From c966d8cddd9001e4f47c7778b3865584599cde13 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 23 Sep 2016 12:32:17 -0700 Subject: [PATCH 001/892] Moving all bigtable files into subdirectory. Done via: $ mkdir -p bigtable/google/cloud $ cp google/__init__.py bigtable/google/__init__.py $ git add bigtable/google/__init__.py $ cp google/cloud/__init__.py bigtable/google/cloud/__init__.py $ git add bigtable/google/cloud/__init__.py $ git mv google/cloud/bigtable bigtable/google/cloud/bigtable $ git mv unit_tests/bigtable bigtable/unit_tests --- .../google-cloud-bigtable/google/__init__.py | 20 + .../google/cloud/__init__.py | 20 + .../google/cloud/bigtable/__init__.py | 18 + .../cloud/bigtable/_generated/__init__.py | 15 + .../cloud/bigtable/_generated/_bigtable.proto | 321 +++++ .../_generated/_bigtable_instance_admin.proto | 232 +++ .../_generated/_bigtable_table_admin.proto | 195 +++ .../cloud/bigtable/_generated/_common.proto | 37 + .../cloud/bigtable/_generated/_data.proto | 532 +++++++ .../cloud/bigtable/_generated/_instance.proto | 113 ++ .../bigtable/_generated/_operations.proto | 144 ++ .../cloud/bigtable/_generated/_table.proto | 115 ++ .../_generated/bigtable_instance_admin_pb2.py | 1061 ++++++++++++++ .../cloud/bigtable/_generated/bigtable_pb2.py | 1100 ++++++++++++++ .../_generated/bigtable_table_admin_pb2.py | 784 ++++++++++ .../cloud/bigtable/_generated/common_pb2.py | 67 + .../cloud/bigtable/_generated/data_pb2.py | 1260 +++++++++++++++++ .../cloud/bigtable/_generated/instance_pb2.py | 222 +++ .../_generated/operations_grpc_pb2.py | 264 ++++ .../cloud/bigtable/_generated/table_pb2.py | 393 +++++ .../google/cloud/bigtable/client.py | 355 +++++ .../google/cloud/bigtable/cluster.py | 277 ++++ .../google/cloud/bigtable/column_family.py | 338 +++++ .../google/cloud/bigtable/instance.py | 356 +++++ .../google/cloud/bigtable/row.py | 887 ++++++++++++ .../google/cloud/bigtable/row_data.py | 441 ++++++ .../google/cloud/bigtable/row_filters.py | 768 ++++++++++ .../google/cloud/bigtable/table.py | 375 +++++ .../unit_tests/__init__.py | 13 + .../unit_tests/_testing.py | 46 + .../unit_tests/read-rows-acceptance-test.json | 1178 +++++++++++++++ .../unit_tests/test_client.py | 649 +++++++++ .../unit_tests/test_cluster.py | 442 ++++++ .../unit_tests/test_column_family.py | 682 +++++++++ .../unit_tests/test_instance.py | 587 ++++++++ .../unit_tests/test_row.py | 909 ++++++++++++ .../unit_tests/test_row_data.py | 730 ++++++++++ .../unit_tests/test_row_filters.py | 1010 +++++++++++++ .../unit_tests/test_table.py | 585 ++++++++ 39 files changed, 17541 insertions(+) create mode 100644 packages/google-cloud-bigtable/google/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/common_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/data_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/instance.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/row.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/table.py create mode 100644 packages/google-cloud-bigtable/unit_tests/__init__.py create mode 100644 packages/google-cloud-bigtable/unit_tests/_testing.py create mode 100644 packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json create mode 100644 packages/google-cloud-bigtable/unit_tests/test_client.py create mode 100644 packages/google-cloud-bigtable/unit_tests/test_cluster.py create mode 100644 packages/google-cloud-bigtable/unit_tests/test_column_family.py create mode 100644 packages/google-cloud-bigtable/unit_tests/test_instance.py create mode 100644 packages/google-cloud-bigtable/unit_tests/test_row.py create mode 100644 packages/google-cloud-bigtable/unit_tests/test_row_data.py create mode 100644 packages/google-cloud-bigtable/unit_tests/test_row_filters.py create mode 100644 packages/google-cloud-bigtable/unit_tests/test_table.py diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/__init__.py new file mode 100644 index 000000000000..b2b833373882 --- /dev/null +++ b/packages/google-cloud-bigtable/google/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import pkg_resources + pkg_resources.declare_namespace(__name__) +except ImportError: + import pkgutil + __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py new file mode 100644 index 000000000000..8ac7b74af136 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2014 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +try: + import pkg_resources + pkg_resources.declare_namespace(__name__) +except ImportError: + import pkgutil + __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py new file mode 100644 index 000000000000..c22cb3fc5379 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google Cloud Bigtable API package.""" + + +from google.cloud.bigtable.client import Client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py new file mode 100644 index 000000000000..cd48e25f08d8 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generated protobuf modules for Google Cloud Bigtable API.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto new file mode 100644 index 000000000000..49e27ca2ff5f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto @@ -0,0 +1,321 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/v2/data.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableProto"; +option java_package = "com.google.bigtable.v2"; + + +// Service for reading from and writing to existing Bigtable tables. +service Bigtable { + // Streams back the contents of all requested rows, optionally + // applying the same Reader filter to each. Depending on their size, + // rows and cells may be broken up across multiple responses, but + // atomicity of each row will still be preserved. See the + // ReadRowsResponse documentation for details. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" }; + } + + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { + option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" }; + } + + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by `mutation`. + rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" }; + } + + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" }; + } + + // Mutates a row atomically based on the output of a predicate Reader filter. + rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" }; + } + + // Modifies a row atomically. The method reads the latest existing timestamp + // and value from the specified columns and writes a new entry based on + // pre-defined read/modify/write rules. The new value for the timestamp is the + // greater of the existing timestamp or the current server time. The method + // returns the new contents of all modified cells. + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { + option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" }; + } +} + +// Request message for Bigtable.ReadRows. +message ReadRowsRequest { + // The unique name of the table from which to read. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The row keys and/or ranges to read. If not specified, reads from all rows. + RowSet rows = 2; + + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entirety of each row. + RowFilter filter = 3; + + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + int64 rows_limit = 4; +} + +// Response message for Bigtable.ReadRows. +message ReadRowsResponse { + // Specifies a piece of a row's contents returned as part of the read + // response stream. + message CellChunk { + // The row key for this chunk of data. If the row key is empty, + // this CellChunk is a continuation of the same row as the previous + // CellChunk in the response stream, even if that CellChunk was in a + // previous ReadRowsResponse message. + bytes row_key = 1; + + // The column family name for this chunk of data. If this message + // is not present this CellChunk is a continuation of the same column + // family as the previous CellChunk. The empty string can occur as a + // column family name in a response so clients must check + // explicitly for the presence of this message, not just for + // `family_name.value` being non-empty. + google.protobuf.StringValue family_name = 2; + + // The column qualifier for this chunk of data. If this message + // is not present, this CellChunk is a continuation of the same column + // as the previous CellChunk. Column qualifiers may be empty so + // clients must check for the presence of this message, not just + // for `qualifier.value` being non-empty. + google.protobuf.BytesValue qualifier = 3; + + // The cell's stored timestamp, which also uniquely identifies it + // within its column. Values are always expressed in + // microseconds, but individual tables may set a coarser + // granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will + // only allow values of `timestamp_micros` which are multiples of + // 1000. Timestamps are only set in the first CellChunk per cell + // (for cells split into multiple chunks). + int64 timestamp_micros = 4; + + // Labels applied to the cell by a + // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set + // on the first CellChunk per cell. + repeated string labels = 5; + + // The value stored in the cell. Cell values can be split across + // multiple CellChunks. In that case only the value field will be + // set in CellChunks after the first: the timestamp and labels + // will only be present in the first CellChunk, even if the first + // CellChunk came in a previous ReadRowsResponse. + bytes value = 6; + + // If this CellChunk is part of a chunked cell value and this is + // not the final chunk of that cell, value_size will be set to the + // total length of the cell value. The client can use this size + // to pre-allocate memory to hold the full cell value. + int32 value_size = 7; + + oneof row_status { + // Indicates that the client should drop all previous chunks for + // `row_key`, as it will be re-read from the beginning. + bool reset_row = 8; + + // Indicates that the client can safely process all previous chunks for + // `row_key`, as its data has been fully read. + bool commit_row = 9; + } + } + + repeated CellChunk chunks = 1; + + // Optionally the server might return the row key of the last row it + // has scanned. The client can use this to construct a more + // efficient retry request if needed: any row keys or portions of + // ranges less than this row key can be dropped from the request. + // This is primarily useful for cases where the server has read a + // lot of data that was filtered out since the last committed row + // key, allowing the client to skip that work on a retry. + bytes last_scanned_row_key = 2; +} + +// Request message for Bigtable.SampleRowKeys. +message SampleRowKeysRequest { + // The unique name of the table from which to sample row keys. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; +} + +// Response message for Bigtable.SampleRowKeys. +message SampleRowKeysResponse { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + bytes row_key = 1; + + // Approximate total storage space used by all rows in the table which precede + // `row_key`. Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // `offset_bytes` fields. + int64 offset_bytes = 2; +} + +// Request message for Bigtable.MutateRow. +message MutateRowRequest { + // The unique name of the table to which the mutation should be applied. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The key of the row to which the mutation should be applied. + bytes row_key = 2; + + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + repeated Mutation mutations = 3; +} + +// Response message for Bigtable.MutateRow. +message MutateRowResponse { + +} + +// Request message for BigtableService.MutateRows. +message MutateRowsRequest { + message Entry { + // The key of the row to which the `mutations` should be applied. + bytes row_key = 1; + + // Changes to be atomically applied to the specified row. Mutations are + // applied in order, meaning that earlier mutations can be masked by + // later ones. + // You must specify at least one mutation. + repeated Mutation mutations = 2; + } + + // The unique name of the table to which the mutations should be applied. + string table_name = 1; + + // The row keys and corresponding mutations to be applied in bulk. + // Each entry is applied as an atomic mutation, but the entries may be + // applied in arbitrary order (even between entries for the same row). + // At least one entry must be specified, and in total the entries can + // contain at most 100000 mutations. + repeated Entry entries = 2; +} + +// Response message for BigtableService.MutateRows. +message MutateRowsResponse { + message Entry { + // The index into the original request's `entries` list of the Entry + // for which a result is being reported. + int64 index = 1; + + // The result of the request Entry identified by `index`. + // Depending on how requests are batched during execution, it is possible + // for one Entry to fail due to an error with another Entry. In the event + // that this occurs, the same error will be reported for both entries. + google.rpc.Status status = 2; + } + + // One or more results for Entries from the batch request. + repeated Entry entries = 1; +} + +// Request message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowRequest { + // The unique name of the table to which the conditional mutation should be + // applied. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The key of the row to which the conditional mutation should be applied. + bytes row_key = 2; + + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either `true_mutations` or + // `false_mutations` will be executed. If unset, checks that the row contains + // any values at all. + RowFilter predicate_filter = 6; + + // Changes to be atomically applied to the specified row if `predicate_filter` + // yields at least one cell when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `false_mutations` is empty, and at most + // 100000. + repeated Mutation true_mutations = 4; + + // Changes to be atomically applied to the specified row if `predicate_filter` + // does not yield any cells when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `true_mutations` is empty, and at most + // 100000. + repeated Mutation false_mutations = 5; +} + +// Response message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowResponse { + // Whether or not the request's `predicate_filter` yielded any results for + // the specified row. + bool predicate_matched = 1; +} + +// Request message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowRequest { + // The unique name of the table to which the read/modify/write rules should be + // applied. + // Values are of the form + // projects/<project>/instances/<instance>/tables/<table> + string table_name = 1; + + // The key of the row to which the read/modify/write rules should be applied. + bytes row_key = 2; + + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + repeated ReadModifyWriteRule rules = 3; +} + +// Response message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowResponse { + // A Row containing the new contents of all cells modified by the request. + Row row = 1; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto new file mode 100644 index 000000000000..bda5d2163532 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto @@ -0,0 +1,232 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/instance.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableInstanceAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable Instances and +// Clusters. Provides access to the Instance and Cluster schemas only, not the +// tables metadata or data stored in those tables. +service BigtableInstanceAdmin { + // Create an instance within a project. + rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" }; + } + + // Gets information about an instance. + rpc GetInstance(GetInstanceRequest) returns (Instance) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" }; + } + + // Lists information about instances in a project. + rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { + option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" }; + } + + // Updates an instance within a project. + rpc UpdateInstance(Instance) returns (Instance) { + option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" }; + } + + // Delete an instance from a project. + rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" }; + } + + // Creates a cluster within an instance. + rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" }; + } + + // Gets information about a cluster. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" }; + } + + // Lists information about clusters in an instance. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" }; + } + + // Updates a cluster within an instance. + rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { + option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" }; + } + + // Deletes a cluster from an instance. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" }; + } +} + +// Request message for BigtableInstanceAdmin.CreateInstance. +message CreateInstanceRequest { + // The unique name of the project in which to create the new instance. + // Values are of the form projects/ + string parent = 1; + + // The id to be used when referring to the new instance within its project, + // e.g. just the "myinstance" section of the full name + // "projects/myproject/instances/myinstance" + string instance_id = 2; + + // The instance to create. + // Fields marked "@OutputOnly" must be left blank. + Instance instance = 3; + + // The clusters to be created within the instance, mapped by desired + // cluster ID (e.g. just the "mycluster" part of the full name + // "projects/myproject/instances/myinstance/clusters/mycluster"). + // Fields marked "@OutputOnly" must be left blank. + // Currently exactly one cluster must be specified. + map clusters = 4; +} + +// Request message for BigtableInstanceAdmin.GetInstance. +message GetInstanceRequest { + // The unique name of the requested instance. Values are of the form + // projects//instances/ + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListInstances. +message ListInstancesRequest { + // The unique name of the project for which a list of instances is requested. + // Values are of the form projects/ + string parent = 1; + + // The value of `next_page_token` returned by a previous call. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListInstances. +message ListInstancesResponse { + // The list of requested instances. + repeated Instance instances = 1; + + // Locations from which Instance information could not be retrieved, + // due to an outage or some other transient condition. + // Instances whose Clusters are all in one of the failed locations + // may be missing from 'instances', and Instances with at least one + // Cluster in a failed location may only have partial information returned. + repeated string failed_locations = 2; + + // Set if not all instances could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.DeleteInstance. +message DeleteInstanceRequest { + // The unique name of the instance to be deleted. + // Values are of the form projects//instances/ + string name = 1; +} + +// Request message for BigtableInstanceAdmin.CreateCluster. +message CreateClusterRequest { + // The unique name of the instance in which to create the new cluster. + // Values are of the form + // projects//instances//clusters/[a-z][-a-z0-9]* + string parent = 1; + + // The id to be used when referring to the new cluster within its instance, + // e.g. just the "mycluster" section of the full name + // "projects/myproject/instances/myinstance/clusters/mycluster" + string cluster_id = 2; + + // The cluster to be created. + // Fields marked "@OutputOnly" must be left blank. + Cluster cluster = 3; +} + +// Request message for BigtableInstanceAdmin.GetCluster. +message GetClusterRequest { + // The unique name of the requested cluster. Values are of the form + // projects//instances//clusters/ + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListClusters. +message ListClustersRequest { + // The unique name of the instance for which a list of clusters is requested. + // Values are of the form projects//instances/ + // Use = '-' to list Clusters for all Instances in a project, + // for example "projects/myproject/instances/-" + string parent = 1; + + // The value of `next_page_token` returned by a previous call. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListClusters. +message ListClustersResponse { + // The list of requested clusters. + repeated Cluster clusters = 1; + + // Locations from which Cluster information could not be retrieved, + // due to an outage or some other transient condition. + // Clusters from these locations may be missing from 'clusters', + // or may only have partial information returned. + repeated string failed_locations = 2; + + // Set if not all clusters could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.DeleteCluster. +message DeleteClusterRequest { + // The unique name of the cluster to be deleted. Values are of the form + // projects//instances//clusters/ + string name = 1; +} + +// The metadata for the Operation returned by CreateInstance. +message CreateInstanceMetadata { + // The request that prompted the initiation of this CreateInstance operation. + CreateInstanceRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by UpdateCluster. +message UpdateClusterMetadata { + // The request that prompted the initiation of this UpdateCluster operation. + Cluster original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto new file mode 100644 index 000000000000..0a39e298359c --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto @@ -0,0 +1,195 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/table.proto"; +import "google/protobuf/empty.proto"; + +option java_multiple_files = true; +option java_outer_classname = "BigtableTableAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// Provides access to the table schemas only, not the data stored within +// the tables. +service BigtableTableAdmin { + // Creates a new table in the specified instance. + // The table can be created with a full set of initial column families, + // specified in the request. + rpc CreateTable(CreateTableRequest) returns (Table) { + option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" }; + } + + // Lists all tables served from a specified instance. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" }; + } + + // Gets metadata information about the specified table. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" }; + } + + // Permanently deletes a specified table and all of its data. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" }; + } + + // Atomically performs a series of column family modifications + // on the specified table. + rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" }; + } + + // Permanently drop/delete a row range from a specified table. The request can + // specify whether to delete all rows in a table, or only those that match a + // particular prefix. + rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" }; + } +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] +message CreateTableRequest { + // An initial split point for a newly created table. + message Split { + // Row key to use as an initial tablet boundary. + bytes key = 1; + } + + // The unique name of the instance in which to create the table. + // Values are of the form projects//instances/ + string parent = 1; + + // The name by which the new table should be referred to within the parent + // instance, e.g. "foobar" rather than "/tables/foobar". + string table_id = 2; + + // The Table to create. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + repeated Split initial_splits = 4; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] +message DropRowRangeRequest { + // The unique name of the table on which to drop a range of rows. + // Values are of the form projects//instances//tables/ + string name = 1; + + oneof target { + // Delete all rows that start with this row key prefix. Prefix cannot be + // zero length. + bytes row_key_prefix = 2; + + // Delete all rows in the table. Setting this to false is a no-op. + bool delete_all_data_from_table = 3; + } +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesRequest { + // The unique name of the instance for which tables should be listed. + // Values are of the form projects//instances/ + string parent = 1; + + // The view to be applied to the returned tables' fields. + // Defaults to NAME_ONLY if unspecified (no others are currently supported). + Table.View view = 2; + + // The value of `next_page_token` returned by a previous call. + string page_token = 3; +} + +// Response message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesResponse { + // The tables present in the requested cluster. + repeated Table tables = 1; + + // Set if not all tables could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] +message GetTableRequest { + // The unique name of the requested table. + // Values are of the form projects//instances//tables/
+ string name = 1; + + // The view to be applied to the returned table's fields. + // Defaults to SCHEMA_ONLY if unspecified. + Table.View view = 2; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] +message DeleteTableRequest { + // The unique name of the table to be deleted. + // Values are of the form projects//instances//tables/
+ string name = 1; +} + +// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] +message ModifyColumnFamiliesRequest { + // A create, update, or delete of a particular column family. + message Modification { + // The ID of the column family to be modified. + string id = 1; + + oneof mod { + // Create a new column family with the specified schema, or fail if + // one already exists with the given ID. + ColumnFamily create = 2; + + // Update an existing column family to the specified schema, or fail + // if no column family exists with the given ID. + ColumnFamily update = 3; + + // Drop (delete) the column family with the given ID, or fail if no such + // family exists. + bool drop = 4; + } + } + + // The unique name of the table whose families should be modified. + // Values are of the form projects//instances//tables/
+ string name = 1; + + // Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto new file mode 100644 index 000000000000..1912e03e0446 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/timestamp.proto"; + +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// Storage media types for persisting Bigtable data. +enum StorageType { + // The user did not specify a storage type. + STORAGE_TYPE_UNSPECIFIED = 0; + + // Flash (SSD) storage should be used. + SSD = 1; + + // Magnetic drive (HDD) storage should be used. + HDD = 2; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto new file mode 100644 index 000000000000..720f48279b8f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto @@ -0,0 +1,532 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +option java_multiple_files = true; +option java_outer_classname = "DataProto"; +option java_package = "com.google.bigtable.v2"; + + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +message Row { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + bytes key = 1; + + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + repeated Family families = 2; +} + +// Specifies (some of) the contents of a single row/column family intersection +// of a table. +message Family { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + string name = 1; + + // Must not be empty. Sorted in order of increasing "qualifier". + repeated Column columns = 2; +} + +// Specifies (some of) the contents of a single row/column intersection of a +// table. +message Column { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its `column_qualifier_regex_filter` field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + bytes qualifier = 1; + + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + repeated Cell cells = 2; +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +message Cell { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of `timestamp_micros` which are multiples of 1000. + int64 timestamp_micros = 1; + + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + bytes value = 2; + + // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. + repeated string labels = 3; +} + +// Specifies a contiguous range of rows. +message RowRange { + // The row key at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_key { + // Used when giving an inclusive lower bound for the range. + bytes start_key_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_key_open = 2; + } + + // The row key at which to end the range. + // If neither field is set, interpreted as the infinite row key, exclusive. + oneof end_key { + // Used when giving an inclusive upper bound for the range. + bytes end_key_open = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_key_closed = 4; + } +} + +// Specifies a non-contiguous set of rows. +message RowSet { + // Single rows included in the set. + repeated bytes row_keys = 1; + + // Contiguous row ranges included in the set. + repeated RowRange row_ranges = 2; +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from <column_family>:<start_qualifier> to +// <column_family>:<end_qualifier>, where both bounds can be either +// inclusive or exclusive. +message ColumnRange { + // The name of the column family within which this range falls. + string family_name = 1; + + // The column qualifier at which to start the range (within `column_family`). + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_qualifier { + // Used when giving an inclusive lower bound for the range. + bytes start_qualifier_closed = 2; + + // Used when giving an exclusive lower bound for the range. + bytes start_qualifier_open = 3; + } + + // The column qualifier at which to end the range (within `column_family`). + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_qualifier { + // Used when giving an inclusive upper bound for the range. + bytes end_qualifier_closed = 4; + + // Used when giving an exclusive upper bound for the range. + bytes end_qualifier_open = 5; + } +} + +// Specified a contiguous range of microsecond timestamps. +message TimestampRange { + // Inclusive lower bound. If left empty, interpreted as 0. + int64 start_timestamp_micros = 1; + + // Exclusive upper bound. If left empty, interpreted as infinity. + int64 end_timestamp_micros = 2; +} + +// Specifies a contiguous range of raw byte values. +message ValueRange { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_value { + // Used when giving an inclusive lower bound for the range. + bytes start_value_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_value_open = 2; + } + + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_value { + // Used when giving an inclusive upper bound for the range. + bytes end_value_closed = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_value_open = 4; + } +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the `value_regex_filter`, +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that `RE2(.)` is equivalent by default to +// `RE2([^\n])`, meaning that it does not match newlines. When attempting to +// match an arbitrary byte, you should therefore use the escape sequence `\C`, +// which may need to be further escaped as `\\C` in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the `strip_value_transformer`, which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +message RowFilter { + // A RowFilter which sends rows through several RowFilters in sequence. + message Chain { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which sends each row to each of several component + // RowFilters and interleaves the results. + message Interleave { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // + // All interleaved filters are executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which evaluates one of two possible RowFilters, depending on + // whether or not a predicate RowFilter outputs any cells from the input row. + // + // IMPORTANT NOTE: The predicate filter does not execute atomically with the + // true and false filters, which may lead to inconsistent or unexpected + // results. Additionally, Condition filters have poor performance, especially + // when filters are set for the false condition. + message Condition { + // If `predicate_filter` outputs any cells, then `true_filter` will be + // evaluated on the input row. Otherwise, `false_filter` will be evaluated. + RowFilter predicate_filter = 1; + + // The filter to apply to the input row if `predicate_filter` returns any + // results. If not provided, no results will be returned in the true case. + RowFilter true_filter = 2; + + // The filter to apply to the input row if `predicate_filter` does not + // return any results. If not provided, no results will be returned in the + // false case. + RowFilter false_filter = 3; + } + + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + oneof filter { + // Applies several RowFilters to the data in sequence, progressively + // narrowing the results. + Chain chain = 1; + + // Applies several RowFilters to the data in parallel and combines the + // results. + Interleave interleave = 2; + + // Applies one of two possible RowFilters to the data based on the output of + // a predicate RowFilter. + Condition condition = 3; + + // ADVANCED USE ONLY. + // Hook for introspection into the RowFilter. Outputs all cells directly to + // the output of the read rather than to any parent filter. Consider the + // following example: + // + // Chain( + // FamilyRegex("A"), + // Interleave( + // All(), + // Chain(Label("foo"), Sink()) + // ), + // QualifierRegex("B") + // ) + // + // A,A,1,w + // A,B,2,x + // B,B,4,z + // | + // FamilyRegex("A") + // | + // A,A,1,w + // A,B,2,x + // | + // +------------+-------------+ + // | | + // All() Label(foo) + // | | + // A,A,1,w A,A,1,w,labels:[foo] + // A,B,2,x A,B,2,x,labels:[foo] + // | | + // | Sink() --------------+ + // | | | + // +------------+ x------+ A,A,1,w,labels:[foo] + // | A,B,2,x,labels:[foo] + // A,A,1,w | + // A,B,2,x | + // | | + // QualifierRegex("B") | + // | | + // A,B,2,x | + // | | + // +--------------------------------+ + // | + // A,A,1,w,labels:[foo] + // A,B,2,x,labels:[foo] // could be switched + // A,B,2,x // could be switched + // + // Despite being excluded by the qualifier filter, a copy of every cell + // that reaches the sink is present in the final result. + // + // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave], + // duplicate cells are possible, and appear in an unspecified mutual order. + // In this case we have a duplicate with column "A:B" and timestamp 2, + // because one copy passed through the all filter while the other was + // passed through the label and sink. Note that one copy has label "foo", + // while the other does not. + // + // Cannot be used within the `predicate_filter`, `true_filter`, or + // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition]. + bool sink = 16; + + // Matches all cells, regardless of input. Functionally equivalent to + // leaving `filter` unset, but included for completeness. + bool pass_all_filter = 17; + + // Does not match any cells, regardless of input. Useful for temporarily + // disabling just part of a filter. + bool block_all_filter = 18; + + // Matches only cells from rows whose keys satisfy the given RE2 regex. In + // other words, passes through the entire row when the key matches, and + // otherwise produces an empty row. + // Note that, since row keys can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a + // binary key. + bytes row_key_regex_filter = 4; + + // Matches all cells from a row with probability p, and matches no cells + // from the row with probability 1-p. + double row_sample_filter = 14; + + // Matches only cells from columns whose families satisfy the given RE2 + // regex. For technical reasons, the regex must not contain the `:` + // character, even if it is not being used as a literal. + // Note that, since column families cannot contain the new line character + // `\n`, it is sufficient to use `.` as a full wildcard when matching + // column family names. + string family_name_regex_filter = 5; + + // Matches only cells from columns whose qualifiers satisfy the given RE2 + // regex. + // Note that, since column qualifiers can contain arbitrary bytes, the `\C` + // escape sequence must be used if a true wildcard is desired. The `.` + // character will not match the new line character `\n`, which may be + // present in a binary qualifier. + bytes column_qualifier_regex_filter = 6; + + // Matches only cells from columns within the given range. + ColumnRange column_range_filter = 7; + + // Matches only cells with timestamps within the given range. + TimestampRange timestamp_range_filter = 8; + + // Matches only cells with values that satisfy the given regular expression. + // Note that, since cell values can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a + // binary value. + bytes value_regex_filter = 9; + + // Matches only cells with values that fall within the given range. + ValueRange value_range_filter = 15; + + // Skips the first N cells of each row, matching all subsequent cells. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_offset_filter = 10; + + // Matches only the first N cells of each row. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_limit_filter = 11; + + // Matches only the most recent N cells within each column. For example, + // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, + // skip all earlier cells in `foo:bar`, and then begin matching again in + // column `foo:bar2`. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_column_limit_filter = 12; + + // Replaces each cell's value with the empty string. + bool strip_value_transformer = 13; + + // Applies the given label to all cells in the output row. This allows + // the client to determine which results were produced from which part of + // the filter. + // + // Values must be at most 15 characters in length, and match the RE2 + // pattern `[a-z0-9\\-]+` + // + // Due to a technical limitation, it is not currently possible to apply + // multiple labels to a cell. As a result, a Chain may have no more than + // one sub-filter which contains a `apply_label_transformer`. It is okay for + // an Interleave to contain multiple `apply_label_transformers`, as they + // will be applied to separate copies of the input. This may be relaxed in + // the future. + string apply_label_transformer = 19; + } +} + +// Specifies a particular change to be made to the contents of a row. +message Mutation { + // A Mutation which sets the value of the specified cell. + message SetCell { + // The name of the family into which new data should be written. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the granularity of the table (e.g. micros, millis). + int64 timestamp_micros = 3; + + // The value to be written into the specified cell. + bytes value = 4; + } + + // A Mutation which deletes cells from the specified column, optionally + // restricting the deletions to a given timestamp range. + message DeleteFromColumn { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The range of timestamps within which cells should be deleted. + TimestampRange time_range = 3; + } + + // A Mutation which deletes all cells from the specified column family. + message DeleteFromFamily { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + } + + // A Mutation which deletes all cells from the containing row. + message DeleteFromRow { + + } + + // Which of the possible Mutation types to apply. + oneof mutation { + // Set a cell's value. + SetCell set_cell = 1; + + // Deletes cells from a column. + DeleteFromColumn delete_from_column = 2; + + // Deletes cells from a column family. + DeleteFromFamily delete_from_family = 3; + + // Deletes cells from the entire row. + DeleteFromRow delete_from_row = 4; + } +} + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +message ReadModifyWriteRule { + // The name of the family to which the read/modify/write should be applied. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The rule used to determine the column's new latest value from its current + // latest value. + oneof rule { + // Rule specifying that `append_value` be appended to the existing value. + // If the targeted cell is unset, it will be treated as containing the + // empty string. + bytes append_value = 3; + + // Rule specifying that `increment_amount` be added to the existing value. + // If the targeted cell is unset, it will be treated as containing a zero. + // Otherwise, the targeted cell must contain an 8-byte value (interpreted + // as a 64-bit big-endian signed integer), or the entire request will fail. + int64 increment_amount = 4; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto new file mode 100644 index 000000000000..4aa3f9d06dd3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto @@ -0,0 +1,113 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/common.proto"; + +option java_multiple_files = true; +option java_outer_classname = "InstanceProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and +// the resources that serve them. +// All tables in an instance are served from a single +// [Cluster][google.bigtable.admin.v2.Cluster]. +message Instance { + // Possible states of an instance. + enum State { + // The state of the instance could not be determined. + STATE_NOT_KNOWN = 0; + + // The instance has been successfully created and can serve requests + // to its tables. + READY = 1; + + // The instance is currently being created, and may be destroyed + // if the creation process encounters an error. + CREATING = 2; + } + + // @OutputOnly + // The unique name of the instance. Values are of the form + // projects//instances/[a-z][a-z0-9\\-]+[a-z0-9] + string name = 1; + + // The descriptive name for this instance as it appears in UIs. + // Can be changed at any time, but should be kept globally unique + // to avoid confusion. + string display_name = 2; + + // + // The current state of the instance. + State state = 3; +} + +// A resizable group of nodes in a particular cloud location, capable +// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent +// [Instance][google.bigtable.admin.v2.Instance]. +message Cluster { + // Possible states of a cluster. + enum State { + // The state of the cluster could not be determined. + STATE_NOT_KNOWN = 0; + + // The cluster has been successfully created and is ready to serve requests. + READY = 1; + + // The cluster is currently being created, and may be destroyed + // if the creation process encounters an error. + // A cluster may not be able to serve requests while being created. + CREATING = 2; + + // The cluster is currently being resized, and may revert to its previous + // node count if the process encounters an error. + // A cluster is still capable of serving requests while being resized, + // but may exhibit performance as if its number of allocated nodes is + // between the starting and requested states. + RESIZING = 3; + + // The cluster has no backing nodes. The data (tables) still + // exist, but no operations can be performed on the cluster. + DISABLED = 4; + } + + // @OutputOnly + // The unique name of the cluster. Values are of the form + // projects//instances//clusters/[a-z][-a-z0-9]* + string name = 1; + + // @CreationOnly + // The location where this cluster's nodes and storage reside. For best + // performance, clients should be located as close as possible to this cluster. + // Currently only zones are supported, e.g. projects/*/locations/us-central1-b + string location = 2; + + // @OutputOnly + // The current state of the cluster. + State state = 3; + + // The number of nodes allocated to this cluster. More nodes enable higher + // throughput and more consistent performance. + int32 serve_nodes = 4; + + // @CreationOnly + // The type of storage used by this cluster to serve its + // parent instance's tables, unless explicitly overridden. + StorageType default_storage_type = 5; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto new file mode 100644 index 000000000000..a358d0a38787 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto @@ -0,0 +1,144 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.longrunning; + +import "google/api/annotations.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "google/rpc/status.proto"; + +option java_multiple_files = true; +option java_outer_classname = "OperationsProto"; +option java_package = "com.google.longrunning"; + + +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return [Operation][google.longrunning.Operation] to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or using `google.watcher.v1.Watcher` interface to watch +// the response, or pass the operation resource to another API (such as Google +// Cloud Pub/Sub API) to receive the response. Any API service that returns +// long-running operations should implement the `Operations` interface so +// developers can have a consistent client experience. +service Operations { + // Gets the latest state of a long-running operation. Clients may use this + // method to poll the operation result at intervals as recommended by the API + // service. + rpc GetOperation(GetOperationRequest) returns (Operation) { + option (google.api.http) = { get: "/v1/{name=operations/**}" }; + } + + // Lists operations that match the specified filter in the request. If the + // server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. + rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { + option (google.api.http) = { get: "/v1/{name=operations}" }; + } + + // Starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not + // guaranteed. If the server doesn't support this method, it returns + // `google.rpc.Code.UNIMPLEMENTED`. Clients may use + // [Operations.GetOperation] or other methods to check whether the + // cancellation succeeded or the operation completed despite cancellation. + rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; + } + + // Deletes a long-running operation. It indicates the client is no longer + // interested in the operation result. It does not cancel the operation. + rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=operations/**}" }; + } +} + +// This resource represents a long-running operation that is the result of a +// network API call. +message Operation { + // The name of the operation resource, which is only unique within the same + // service that originally returns it. + string name = 1; + + // Some service-specific metadata associated with the operation. It typically + // contains progress information and common metadata such as create time. + // Some services may not provide such metadata. Any method that returns a + // long-running operation should document the metadata type, if any. + google.protobuf.Any metadata = 2; + + // If the value is false, it means the operation is still in progress. + // If true, the operation is completed and the `result` is available. + bool done = 3; + + oneof result { + // The error result of the operation in case of failure. + google.rpc.Status error = 4; + + // The normal response of the operation in case of success. If the original + // method returns no data on success, such as `Delete`, the response will be + // `google.protobuf.Empty`. If the original method is standard + // `Get`/`Create`/`Update`, the response should be the resource. For other + // methods, the response should have the type `XxxResponse`, where `Xxx` + // is the original method name. For example, if the original method name + // is `TakeSnapshot()`, the inferred response type will be + // `TakeSnapshotResponse`. + google.protobuf.Any response = 5; + } +} + +// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +message GetOperationRequest { + // The name of the operation resource. + string name = 1; +} + +// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +message ListOperationsRequest { + // The name of the operation collection. + string name = 4; + + // The standard List filter. + string filter = 1; + + // The standard List page size. + int32 page_size = 2; + + // The standard List page token. + string page_token = 3; +} + +// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +message ListOperationsResponse { + // A list of operations that match the specified filter in the request. + repeated Operation operations = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +message CancelOperationRequest { + // The name of the operation resource to be cancelled. + string name = 1; +} + +// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +message DeleteOperationRequest { + // The name of the operation resource to be deleted. + string name = 1; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto new file mode 100644 index 000000000000..63e41103e42f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto @@ -0,0 +1,115 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; + +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.bigtable.admin.v2"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + // Possible timestamp granularities to use when keeping multiple versions + // of data in a table. + enum TimestampGranularity { + // The user did not specify a granularity. Should not be returned. + // When specified during table creation, MILLIS will be used. + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; + + // The table keeps data versioned at a granularity of 1ms. + MILLIS = 1; + } + + // Defines a view over a table's fields. + enum View { + // Uses the default view for each method as documented in its request. + VIEW_UNSPECIFIED = 0; + + // Only populates `name`. + NAME_ONLY = 1; + + // Only populates `name` and fields related to the table's schema. + SCHEMA_VIEW = 2; + + // Populates all fields. + FULL = 4; + } + + // The unique name of the table. Values are of the form + // projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + // Views: NAME_ONLY, SCHEMA_VIEW, REPLICATION_VIEW, FULL + // @OutputOnly + string name = 1; + + // The column families configured for this table, mapped by column family ID. + // Views: SCHEMA_VIEW, FULL + // @CreationOnly + map column_families = 3; + + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // If unspecified at creation time, the value will be set to MILLIS. + // Views: SCHEMA_VIEW, FULL + // @CreationOnly + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // Garbage collection rule specified as a protobuf. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 1; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py new file mode 100644 index 000000000000..e24d4b26ae26 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py @@ -0,0 +1,1061 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/bigtable_instance_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable._generated import instance_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2 +from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/bigtable_instance_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n6google/bigtable/admin/v2/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\'google/bigtable/admin/v2/instance.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xdb\x0b\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}B<\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( + name='ClustersEntry', + full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=452, + serialized_end=534, +) + +_CREATEINSTANCEREQUEST = _descriptor.Descriptor( + name='CreateInstanceRequest', + full_name='google.bigtable.admin.v2.CreateInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateInstanceRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instance_id', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='instance', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.CreateInstanceRequest.clusters', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=255, + serialized_end=534, +) + + +_GETINSTANCEREQUEST = _descriptor.Descriptor( + name='GetInstanceRequest', + full_name='google.bigtable.admin.v2.GetInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=536, + serialized_end=570, +) + + +_LISTINSTANCESREQUEST = _descriptor.Descriptor( + name='ListInstancesRequest', + full_name='google.bigtable.admin.v2.ListInstancesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListInstancesRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=572, + serialized_end=630, +) + + +_LISTINSTANCESRESPONSE = _descriptor.Descriptor( + name='ListInstancesResponse', + full_name='google.bigtable.admin.v2.ListInstancesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='instances', full_name='google.bigtable.admin.v2.ListInstancesResponse.instances', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=633, + serialized_end=762, +) + + +_DELETEINSTANCEREQUEST = _descriptor.Descriptor( + name='DeleteInstanceRequest', + full_name='google.bigtable.admin.v2.DeleteInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=764, + serialized_end=801, +) + + +_CREATECLUSTERREQUEST = _descriptor.Descriptor( + name='CreateClusterRequest', + full_name='google.bigtable.admin.v2.CreateClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateClusterRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cluster_id', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cluster', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=803, + serialized_end=913, +) + + +_GETCLUSTERREQUEST = _descriptor.Descriptor( + name='GetClusterRequest', + full_name='google.bigtable.admin.v2.GetClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=915, + serialized_end=948, +) + + +_LISTCLUSTERSREQUEST = _descriptor.Descriptor( + name='ListClustersRequest', + full_name='google.bigtable.admin.v2.ListClustersRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListClustersRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListClustersRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=950, + serialized_end=1007, +) + + +_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( + name='ListClustersResponse', + full_name='google.bigtable.admin.v2.ListClustersResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.ListClustersResponse.clusters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListClustersResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListClustersResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1009, + serialized_end=1135, +) + + +_DELETECLUSTERREQUEST = _descriptor.Descriptor( + name='DeleteClusterRequest', + full_name='google.bigtable.admin.v2.DeleteClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1137, + serialized_end=1173, +) + + +_CREATEINSTANCEMETADATA = _descriptor.Descriptor( + name='CreateInstanceMetadata', + full_name='google.bigtable.admin.v2.CreateInstanceMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1176, + serialized_end=1374, +) + + +_UPDATECLUSTERMETADATA = _descriptor.Descriptor( + name='UpdateClusterMetadata', + full_name='google.bigtable.admin.v2.UpdateClusterMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1377, + serialized_end=1560, +) + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE +_CREATEINSTANCEREQUEST.fields_by_name['clusters'].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY +_LISTINSTANCESRESPONSE.fields_by_name['instances'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE +_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_CREATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER +_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['GetInstanceRequest'] = _GETINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesRequest'] = _LISTINSTANCESREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesResponse'] = _LISTINSTANCESRESPONSE +DESCRIPTOR.message_types_by_name['DeleteInstanceRequest'] = _DELETEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST +DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST +DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE +DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['CreateInstanceMetadata'] = _CREATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA + +CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), dict( + + ClustersEntry = _reflection.GeneratedProtocolMessageType('ClustersEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEREQUEST_CLUSTERSENTRY, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) + )) + , + DESCRIPTOR = _CREATEINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) + )) +_sym_db.RegisterMessage(CreateInstanceRequest) +_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) + +GetInstanceRequest = _reflection.GeneratedProtocolMessageType('GetInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _GETINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) + )) +_sym_db.RegisterMessage(GetInstanceRequest) + +ListInstancesRequest = _reflection.GeneratedProtocolMessageType('ListInstancesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) + )) +_sym_db.RegisterMessage(ListInstancesRequest) + +ListInstancesResponse = _reflection.GeneratedProtocolMessageType('ListInstancesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) + )) +_sym_db.RegisterMessage(ListInstancesResponse) + +DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType('DeleteInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETEINSTANCEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) + )) +_sym_db.RegisterMessage(DeleteInstanceRequest) + +CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _CREATECLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) + )) +_sym_db.RegisterMessage(CreateClusterRequest) + +GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _GETCLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) + )) +_sym_db.RegisterMessage(GetClusterRequest) + +ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) + )) +_sym_db.RegisterMessage(ListClustersRequest) + +ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) + )) +_sym_db.RegisterMessage(ListClustersResponse) + +DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETECLUSTERREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) + )) +_sym_db.RegisterMessage(DeleteClusterRequest) + +CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType('CreateInstanceMetadata', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEMETADATA, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) + )) +_sym_db.RegisterMessage(CreateInstanceMetadata) + +UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict( + DESCRIPTOR = _UPDATECLUSTERMETADATA, + __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) + )) +_sym_db.RegisterMessage(UpdateClusterMetadata) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True +_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) + +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class BigtableInstanceAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', + request_serializer=CreateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', + request_serializer=GetInstanceRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ) + self.ListInstances = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', + request_serializer=ListInstancesRequest.SerializeToString, + response_deserializer=ListInstancesResponse.FromString, + ) + self.UpdateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', + request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ) + self.DeleteInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', + request_serializer=DeleteInstanceRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', + request_serializer=CreateClusterRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', + request_serializer=GetClusterRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ) + self.ListClusters = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', + request_serializer=ListClustersRequest.SerializeToString, + response_deserializer=ListClustersResponse.FromString, + ) + self.UpdateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', + request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', + request_serializer=DeleteClusterRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class BigtableInstanceAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + + def CreateInstance(self, request, context): + """Create an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetInstance(self, request, context): + """Gets information about an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListInstances(self, request, context): + """Lists information about instances in a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateInstance(self, request, context): + """Updates an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetCluster(self, request, context): + """Gets information about a cluster. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableInstanceAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateInstance': grpc.unary_unary_rpc_method_handler( + servicer.CreateInstance, + request_deserializer=CreateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetInstance': grpc.unary_unary_rpc_method_handler( + servicer.GetInstance, + request_deserializer=GetInstanceRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ), + 'ListInstances': grpc.unary_unary_rpc_method_handler( + servicer.ListInstances, + request_deserializer=ListInstancesRequest.FromString, + response_serializer=ListInstancesResponse.SerializeToString, + ), + 'UpdateInstance': grpc.unary_unary_rpc_method_handler( + servicer.UpdateInstance, + request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ), + 'DeleteInstance': grpc.unary_unary_rpc_method_handler( + servicer.DeleteInstance, + request_deserializer=DeleteInstanceRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'CreateCluster': grpc.unary_unary_rpc_method_handler( + servicer.CreateCluster, + request_deserializer=CreateClusterRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetCluster': grpc.unary_unary_rpc_method_handler( + servicer.GetCluster, + request_deserializer=GetClusterRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ), + 'ListClusters': grpc.unary_unary_rpc_method_handler( + servicer.ListClusters, + request_deserializer=ListClustersRequest.FromString, + response_serializer=ListClustersResponse.SerializeToString, + ), + 'UpdateCluster': grpc.unary_unary_rpc_method_handler( + servicer.UpdateCluster, + request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'DeleteCluster': grpc.unary_unary_rpc_method_handler( + servicer.DeleteCluster, + request_deserializer=DeleteClusterRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableInstanceAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaBigtableInstanceAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + def CreateInstance(self, request, context): + """Create an instance within a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetInstance(self, request, context): + """Gets information about an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListInstances(self, request, context): + """Lists information about instances in a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateInstance(self, request, context): + """Updates an instance within a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetCluster(self, request, context): + """Gets information about a cluster. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaBigtableInstanceAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables metadata or data stored in those tables. + """ + def CreateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Create an instance within a project. + """ + raise NotImplementedError() + CreateInstance.future = None + def GetInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets information about an instance. + """ + raise NotImplementedError() + GetInstance.future = None + def ListInstances(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists information about instances in a project. + """ + raise NotImplementedError() + ListInstances.future = None + def UpdateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates an instance within a project. + """ + raise NotImplementedError() + UpdateInstance.future = None + def DeleteInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Delete an instance from a project. + """ + raise NotImplementedError() + DeleteInstance.future = None + def CreateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Creates a cluster within an instance. + """ + raise NotImplementedError() + CreateCluster.future = None + def GetCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets information about a cluster. + """ + raise NotImplementedError() + GetCluster.future = None + def ListClusters(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists information about clusters in an instance. + """ + raise NotImplementedError() + ListClusters.future = None + def UpdateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Updates a cluster within an instance. + """ + raise NotImplementedError() + UpdateCluster.future = None + def DeleteCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Deletes a cluster from an instance. + """ + raise NotImplementedError() + DeleteCluster.future = None + + +def beta_create_BigtableInstanceAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): face_utilities.unary_unary_inline(servicer.CreateInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): face_utilities.unary_unary_inline(servicer.DeleteInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): face_utilities.unary_unary_inline(servicer.GetInstance), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): face_utilities.unary_unary_inline(servicer.ListInstances), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster), + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): face_utilities.unary_unary_inline(servicer.UpdateInstance), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_BigtableInstanceAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, + } + cardinalities = { + 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, + 'CreateInstance': cardinality.Cardinality.UNARY_UNARY, + 'DeleteCluster': cardinality.Cardinality.UNARY_UNARY, + 'DeleteInstance': cardinality.Cardinality.UNARY_UNARY, + 'GetCluster': cardinality.Cardinality.UNARY_UNARY, + 'GetInstance': cardinality.Cardinality.UNARY_UNARY, + 'ListClusters': cardinality.Cardinality.UNARY_UNARY, + 'ListInstances': cardinality.Cardinality.UNARY_UNARY, + 'UpdateCluster': cardinality.Cardinality.UNARY_UNARY, + 'UpdateInstance': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableInstanceAdmin', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py new file mode 100644 index 000000000000..04b269d72bf3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py @@ -0,0 +1,1100 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/bigtable.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable._generated import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/v2/bigtable.proto', + package='google.bigtable.v2', + syntax='proto3', + serialized_pb=_b('\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x92\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x13\n\x11MutateRowResponse\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"D\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"E\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"M\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B)\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v2_dot_data__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_READROWSREQUEST = _descriptor.Descriptor( + name='ReadRowsRequest', + full_name='google.bigtable.v2.ReadRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='google.bigtable.v2.ReadRowsRequest.rows', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='filter', full_name='google.bigtable.v2.ReadRowsRequest.filter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows_limit', full_name='google.bigtable.v2.ReadRowsRequest.rows_limit', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=176, + serialized_end=322, +) + + +_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( + name='CellChunk', + full_name='google.bigtable.v2.ReadRowsResponse.CellChunk', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.family_name', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='qualifier', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.labels', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_size', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value_size', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reset_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row', index=7, + number=8, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='commit_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='row_status', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_status', + index=0, containing_type=None, fields=[]), + ], + serialized_start=440, + serialized_end=701, +) + +_READROWSRESPONSE = _descriptor.Descriptor( + name='ReadRowsResponse', + full_name='google.bigtable.v2.ReadRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='chunks', full_name='google.bigtable.v2.ReadRowsResponse.chunks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='last_scanned_row_key', full_name='google.bigtable.v2.ReadRowsResponse.last_scanned_row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_READROWSRESPONSE_CELLCHUNK, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=325, + serialized_end=701, +) + + +_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( + name='SampleRowKeysRequest', + full_name='google.bigtable.v2.SampleRowKeysRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.SampleRowKeysRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=703, + serialized_end=745, +) + + +_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( + name='SampleRowKeysResponse', + full_name='google.bigtable.v2.SampleRowKeysResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.SampleRowKeysResponse.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset_bytes', full_name='google.bigtable.v2.SampleRowKeysResponse.offset_bytes', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=747, + serialized_end=809, +) + + +_MUTATEROWREQUEST = _descriptor.Descriptor( + name='MutateRowRequest', + full_name='google.bigtable.v2.MutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowRequest.mutations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=811, + serialized_end=915, +) + + +_MUTATEROWRESPONSE = _descriptor.Descriptor( + name='MutateRowResponse', + full_name='google.bigtable.v2.MutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=917, + serialized_end=936, +) + + +_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsRequest.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowsRequest.Entry.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowsRequest.Entry.mutations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1042, + serialized_end=1115, +) + +_MUTATEROWSREQUEST = _descriptor.Descriptor( + name='MutateRowsRequest', + full_name='google.bigtable.v2.MutateRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsRequest.entries', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSREQUEST_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=939, + serialized_end=1115, +) + + +_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsResponse.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='google.bigtable.v2.MutateRowsResponse.Entry.index', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='status', full_name='google.bigtable.v2.MutateRowsResponse.Entry.status', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1203, + serialized_end=1261, +) + +_MUTATEROWSRESPONSE = _descriptor.Descriptor( + name='MutateRowsResponse', + full_name='google.bigtable.v2.MutateRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsResponse.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSRESPONSE_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1118, + serialized_end=1261, +) + + +_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( + name='CheckAndMutateRowRequest', + full_name='google.bigtable.v2.CheckAndMutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.CheckAndMutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.CheckAndMutateRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predicate_filter', full_name='google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter', index=2, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='true_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.true_mutations', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='false_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.false_mutations', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1264, + serialized_end=1493, +) + + +_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( + name='CheckAndMutateRowResponse', + full_name='google.bigtable.v2.CheckAndMutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='predicate_matched', full_name='google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1495, + serialized_end=1549, +) + + +_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( + name='ReadModifyWriteRowRequest', + full_name='google.bigtable.v2.ReadModifyWriteRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.rules', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1551, + serialized_end=1671, +) + + +_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( + name='ReadModifyWriteRowResponse', + full_name='google.bigtable.v2.ReadModifyWriteRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row', full_name='google.bigtable.v2.ReadModifyWriteRowResponse.row', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1673, + serialized_end=1739, +) + +_READROWSREQUEST.fields_by_name['rows'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWSET +_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER +_READROWSRESPONSE_CELLCHUNK.fields_by_name['family_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE +_READROWSRESPONSE_CELLCHUNK.fields_by_name['qualifier'].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE +_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CELLCHUNK +_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST +_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY +_MUTATEROWSRESPONSE_ENTRY.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS +_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE +_MUTATEROWSRESPONSE.fields_by_name['entries'].message_type = _MUTATEROWSRESPONSE_ENTRY +_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER +_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION +_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._READMODIFYWRITERULE +_READMODIFYWRITEROWRESPONSE.fields_by_name['row'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROW +DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST +DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE +DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST +DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['MutateRowResponse'] = _MUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST +DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE +DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowResponse'] = _READMODIFYWRITEROWRESPONSE + +ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( + DESCRIPTOR = _READROWSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) + )) +_sym_db.RegisterMessage(ReadRowsRequest) + +ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( + + CellChunk = _reflection.GeneratedProtocolMessageType('CellChunk', (_message.Message,), dict( + DESCRIPTOR = _READROWSRESPONSE_CELLCHUNK, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) + )) + , + DESCRIPTOR = _READROWSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) + )) +_sym_db.RegisterMessage(ReadRowsResponse) +_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) + +SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) + )) +_sym_db.RegisterMessage(SampleRowKeysRequest) + +SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) + )) +_sym_db.RegisterMessage(SampleRowKeysResponse) + +MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) + )) +_sym_db.RegisterMessage(MutateRowRequest) + +MutateRowResponse = _reflection.GeneratedProtocolMessageType('MutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) + )) +_sym_db.RegisterMessage(MutateRowResponse) + +MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) + )) +_sym_db.RegisterMessage(MutateRowsRequest) +_sym_db.RegisterMessage(MutateRowsRequest.Entry) + +MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSRESPONSE_ENTRY, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) + )) +_sym_db.RegisterMessage(MutateRowsResponse) +_sym_db.RegisterMessage(MutateRowsResponse.Entry) + +CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) + )) +_sym_db.RegisterMessage(CheckAndMutateRowRequest) + +CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) + )) +_sym_db.RegisterMessage(CheckAndMutateRowResponse) + +ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWREQUEST, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowRequest) + +ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowResponse', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWRESPONSE, + __module__ = 'google.bigtable.v2.bigtable_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowResponse) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) + +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=ReadRowsRequest.SerializeToString, + response_deserializer=ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=SampleRowKeysRequest.SerializeToString, + response_deserializer=SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=MutateRowRequest.SerializeToString, + response_deserializer=MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=MutateRowsRequest.SerializeToString, + response_deserializer=MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=CheckAndMutateRowRequest.SerializeToString, + response_deserializer=CheckAndMutateRowResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=ReadModifyWriteRowResponse.FromString, + ) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=ReadRowsRequest.FromString, + response_serializer=ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=SampleRowKeysRequest.FromString, + response_serializer=SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=MutateRowRequest.FromString, + response_serializer=MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=MutateRowsRequest.FromString, + response_serializer=MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=CheckAndMutateRowRequest.FromString, + response_serializer=CheckAndMutateRowResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=ReadModifyWriteRowRequest.FromString, + response_serializer=ReadModifyWriteRowResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaBigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + def ReadRows(self, request, context): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaBigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + def ReadRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Streams back the contents of all requested rows, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + raise NotImplementedError() + def SampleRowKeys(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + raise NotImplementedError() + def MutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + raise NotImplementedError() + MutateRow.future = None + def MutateRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + raise NotImplementedError() + def CheckAndMutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + raise NotImplementedError() + CheckAndMutateRow.future = None + def ReadModifyWriteRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Modifies a row atomically. The method reads the latest existing timestamp + and value from the specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for the timestamp is the + greater of the existing timestamp or the current server time. The method + returns the new contents of all modified cells. + """ + raise NotImplementedError() + ReadModifyWriteRow.future = None + + +def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.FromString, + } + response_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.SerializeToString, + } + method_implementations = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRows'): face_utilities.unary_stream_inline(servicer.MutateRows), + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), + ('google.bigtable.v2.Bigtable', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.FromString, + } + cardinalities = { + 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRows': cardinality.Cardinality.UNARY_STREAM, + 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, + 'ReadRows': cardinality.Cardinality.UNARY_STREAM, + 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.v2.Bigtable', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py new file mode 100644 index 000000000000..7368c0bf4e59 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py @@ -0,0 +1,784 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/bigtable_table_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable._generated import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/bigtable_table_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n3google/bigtable/admin/v2/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a$google/bigtable/admin/v2/table.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"k\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod2\xb8\x07\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*B9\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( + name='Split', + full_name='google.bigtable.admin.v2.CreateTableRequest.Split', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateTableRequest.Split.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=359, + serialized_end=379, +) + +_CREATETABLEREQUEST = _descriptor.Descriptor( + name='CreateTableRequest', + full_name='google.bigtable.admin.v2.CreateTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateTableRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='table_id', full_name='google.bigtable.admin.v2.CreateTableRequest.table_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='table', full_name='google.bigtable.admin.v2.CreateTableRequest.table', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='initial_splits', full_name='google.bigtable.admin.v2.CreateTableRequest.initial_splits', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_CREATETABLEREQUEST_SPLIT, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=179, + serialized_end=379, +) + + +_DROPROWRANGEREQUEST = _descriptor.Descriptor( + name='DropRowRangeRequest', + full_name='google.bigtable.admin.v2.DropRowRangeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DropRowRangeRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key_prefix', full_name='google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_all_data_from_table', full_name='google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target', + index=0, containing_type=None, fields=[]), + ], + serialized_start=381, + serialized_end=490, +) + + +_LISTTABLESREQUEST = _descriptor.Descriptor( + name='ListTablesRequest', + full_name='google.bigtable.admin.v2.ListTablesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListTablesRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.ListTablesRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=492, + serialized_end=599, +) + + +_LISTTABLESRESPONSE = _descriptor.Descriptor( + name='ListTablesResponse', + full_name='google.bigtable.admin.v2.ListTablesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tables', full_name='google.bigtable.admin.v2.ListTablesResponse.tables', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListTablesResponse.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=601, + serialized_end=695, +) + + +_GETTABLEREQUEST = _descriptor.Descriptor( + name='GetTableRequest', + full_name='google.bigtable.admin.v2.GetTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.GetTableRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=697, + serialized_end=780, +) + + +_DELETETABLEREQUEST = _descriptor.Descriptor( + name='DeleteTableRequest', + full_name='google.bigtable.admin.v2.DeleteTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=782, + serialized_end=816, +) + + +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( + name='Modification', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='create', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='update', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='drop', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', + index=0, containing_type=None, fields=[]), + ], + serialized_start=956, + serialized_end=1121, +) + +_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( + name='ModifyColumnFamiliesRequest', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='modifications', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=819, + serialized_end=1121, +) + +_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST +_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE +_CREATETABLEREQUEST.fields_by_name['initial_splits'].message_type = _CREATETABLEREQUEST_SPLIT +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['row_key_prefix']) +_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table']) +_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_LISTTABLESREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW +_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE +_GETTABLEREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name['modifications'].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION +DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST +DESCRIPTOR.message_types_by_name['DropRowRangeRequest'] = _DROPROWRANGEREQUEST +DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST +DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE +DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST +DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST +DESCRIPTOR.message_types_by_name['ModifyColumnFamiliesRequest'] = _MODIFYCOLUMNFAMILIESREQUEST + +CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( + + Split = _reflection.GeneratedProtocolMessageType('Split', (_message.Message,), dict( + DESCRIPTOR = _CREATETABLEREQUEST_SPLIT, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) + )) + , + DESCRIPTOR = _CREATETABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) + )) +_sym_db.RegisterMessage(CreateTableRequest) +_sym_db.RegisterMessage(CreateTableRequest.Split) + +DropRowRangeRequest = _reflection.GeneratedProtocolMessageType('DropRowRangeRequest', (_message.Message,), dict( + DESCRIPTOR = _DROPROWRANGEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) + )) +_sym_db.RegisterMessage(DropRowRangeRequest) + +ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) + )) +_sym_db.RegisterMessage(ListTablesRequest) + +ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESRESPONSE, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) + )) +_sym_db.RegisterMessage(ListTablesResponse) + +GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( + DESCRIPTOR = _GETTABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) + )) +_sym_db.RegisterMessage(GetTableRequest) + +DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETETABLEREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) + )) +_sym_db.RegisterMessage(DeleteTableRequest) + +ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType('ModifyColumnFamiliesRequest', (_message.Message,), dict( + + Modification = _reflection.GeneratedProtocolMessageType('Modification', (_message.Message,), dict( + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) + )) + , + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST, + __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) + )) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) + +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class BigtableTableAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', + request_serializer=CreateTableRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.ListTables = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', + request_serializer=ListTablesRequest.SerializeToString, + response_deserializer=ListTablesResponse.FromString, + ) + self.GetTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', + request_serializer=GetTableRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.DeleteTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', + request_serializer=DeleteTableRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ModifyColumnFamilies = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', + request_serializer=ModifyColumnFamiliesRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ) + self.DropRowRange = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', + request_serializer=DropRowRangeRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class BigtableTableAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ModifyColumnFamilies(self, request, context): + """Atomically performs a series of column family modifications + on the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableTableAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateTable': grpc.unary_unary_rpc_method_handler( + servicer.CreateTable, + request_deserializer=CreateTableRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'ListTables': grpc.unary_unary_rpc_method_handler( + servicer.ListTables, + request_deserializer=ListTablesRequest.FromString, + response_serializer=ListTablesResponse.SerializeToString, + ), + 'GetTable': grpc.unary_unary_rpc_method_handler( + servicer.GetTable, + request_deserializer=GetTableRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'DeleteTable': grpc.unary_unary_rpc_method_handler( + servicer.DeleteTable, + request_deserializer=DeleteTableRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler( + servicer.ModifyColumnFamilies, + request_deserializer=ModifyColumnFamiliesRequest.FromString, + response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ), + 'DropRowRange': grpc.unary_unary_rpc_method_handler( + servicer.DropRowRange, + request_deserializer=DropRowRangeRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaBigtableTableAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ModifyColumnFamilies(self, request, context): + """Atomically performs a series of column family modifications + on the specified table. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaBigtableTableAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + Provides access to the table schemas only, not the data stored within + the tables. + """ + def CreateTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + raise NotImplementedError() + CreateTable.future = None + def ListTables(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists all tables served from a specified instance. + """ + raise NotImplementedError() + ListTables.future = None + def GetTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets metadata information about the specified table. + """ + raise NotImplementedError() + GetTable.future = None + def DeleteTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Permanently deletes a specified table and all of its data. + """ + raise NotImplementedError() + DeleteTable.future = None + def ModifyColumnFamilies(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Atomically performs a series of column family modifications + on the specified table. + """ + raise NotImplementedError() + ModifyColumnFamilies.future = None + def DropRowRange(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + raise NotImplementedError() + DropRowRange.future = None + + +def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.FromString, + } + response_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, + } + method_implementations = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): face_utilities.unary_unary_inline(servicer.DropRowRange), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables), + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): face_utilities.unary_unary_inline(servicer.ModifyColumnFamilies), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_BigtableTableAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.SerializeToString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.FromString, + ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, + } + cardinalities = { + 'CreateTable': cardinality.Cardinality.UNARY_UNARY, + 'DeleteTable': cardinality.Cardinality.UNARY_UNARY, + 'DropRowRange': cardinality.Cardinality.UNARY_UNARY, + 'GetTable': cardinality.Cardinality.UNARY_UNARY, + 'ListTables': cardinality.Cardinality.UNARY_UNARY, + 'ModifyColumnFamilies': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableTableAdmin', cardinalities, options=stub_options) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/common_pb2.py new file mode 100644 index 000000000000..298130452971 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/common_pb2.py @@ -0,0 +1,67 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/common.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/common.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n%google/bigtable/admin/v2/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42-\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +_STORAGETYPE = _descriptor.EnumDescriptor( + name='StorageType', + full_name='google.bigtable.admin.v2.StorageType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STORAGE_TYPE_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SSD', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HDD', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=130, + serialized_end=191, +) +_sym_db.RegisterEnumDescriptor(_STORAGETYPE) + +StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) +STORAGE_TYPE_UNSPECIFIED = 0 +SSD = 1 +HDD = 2 + + +DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/data_pb2.py new file mode 100644 index 000000000000..6db08fbd12c3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/data_pb2.py @@ -0,0 +1,1260 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/data.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/v2/data.proto', + package='google.bigtable.v2', + syntax='proto3', + serialized_pb=_b('\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB%\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01\x62\x06proto3') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_ROW = _descriptor.Descriptor( + name='Row', + full_name='google.bigtable.v2.Row', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.v2.Row.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='families', full_name='google.bigtable.v2.Row.families', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=53, + serialized_end=117, +) + + +_FAMILY = _descriptor.Descriptor( + name='Family', + full_name='google.bigtable.v2.Family', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.v2.Family.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='columns', full_name='google.bigtable.v2.Family.columns', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=119, + serialized_end=186, +) + + +_COLUMN = _descriptor.Descriptor( + name='Column', + full_name='google.bigtable.v2.Column', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='qualifier', full_name='google.bigtable.v2.Column.qualifier', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells', full_name='google.bigtable.v2.Column.cells', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=188, + serialized_end=256, +) + + +_CELL = _descriptor.Descriptor( + name='Cell', + full_name='google.bigtable.v2.Cell', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.Cell.timestamp_micros', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.Cell.value', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.v2.Cell.labels', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=258, + serialized_end=321, +) + + +_ROWRANGE = _descriptor.Descriptor( + name='RowRange', + full_name='google.bigtable.v2.RowRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_key_closed', full_name='google.bigtable.v2.RowRange.start_key_closed', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_key_open', full_name='google.bigtable.v2.RowRange.start_key_open', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_key_open', full_name='google.bigtable.v2.RowRange.end_key_open', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_key_closed', full_name='google.bigtable.v2.RowRange.end_key_closed', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_key', full_name='google.bigtable.v2.RowRange.start_key', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_key', full_name='google.bigtable.v2.RowRange.end_key', + index=1, containing_type=None, fields=[]), + ], + serialized_start=324, + serialized_end=462, +) + + +_ROWSET = _descriptor.Descriptor( + name='RowSet', + full_name='google.bigtable.v2.RowSet', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_keys', full_name='google.bigtable.v2.RowSet.row_keys', index=0, + number=1, type=12, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_ranges', full_name='google.bigtable.v2.RowSet.row_ranges', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=464, + serialized_end=540, +) + + +_COLUMNRANGE = _descriptor.Descriptor( + name='ColumnRange', + full_name='google.bigtable.v2.ColumnRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ColumnRange.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.start_qualifier_closed', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_qualifier_open', full_name='google.bigtable.v2.ColumnRange.start_qualifier_open', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.end_qualifier_closed', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_qualifier_open', full_name='google.bigtable.v2.ColumnRange.end_qualifier_open', index=4, + number=5, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_qualifier', full_name='google.bigtable.v2.ColumnRange.start_qualifier', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_qualifier', full_name='google.bigtable.v2.ColumnRange.end_qualifier', + index=1, containing_type=None, fields=[]), + ], + serialized_start=543, + serialized_end=741, +) + + +_TIMESTAMPRANGE = _descriptor.Descriptor( + name='TimestampRange', + full_name='google.bigtable.v2.TimestampRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.start_timestamp_micros', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.end_timestamp_micros', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=743, + serialized_end=821, +) + + +_VALUERANGE = _descriptor.Descriptor( + name='ValueRange', + full_name='google.bigtable.v2.ValueRange', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_value_closed', full_name='google.bigtable.v2.ValueRange.start_value_closed', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='start_value_open', full_name='google.bigtable.v2.ValueRange.start_value_open', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_value_closed', full_name='google.bigtable.v2.ValueRange.end_value_closed', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='end_value_open', full_name='google.bigtable.v2.ValueRange.end_value_open', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='start_value', full_name='google.bigtable.v2.ValueRange.start_value', + index=0, containing_type=None, fields=[]), + _descriptor.OneofDescriptor( + name='end_value', full_name='google.bigtable.v2.ValueRange.end_value', + index=1, containing_type=None, fields=[]), + ], + serialized_start=824, + serialized_end=976, +) + + +_ROWFILTER_CHAIN = _descriptor.Descriptor( + name='Chain', + full_name='google.bigtable.v2.RowFilter.Chain', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='filters', full_name='google.bigtable.v2.RowFilter.Chain.filters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1795, + serialized_end=1850, +) + +_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( + name='Interleave', + full_name='google.bigtable.v2.RowFilter.Interleave', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='filters', full_name='google.bigtable.v2.RowFilter.Interleave.filters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1852, + serialized_end=1912, +) + +_ROWFILTER_CONDITION = _descriptor.Descriptor( + name='Condition', + full_name='google.bigtable.v2.RowFilter.Condition', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='predicate_filter', full_name='google.bigtable.v2.RowFilter.Condition.predicate_filter', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='true_filter', full_name='google.bigtable.v2.RowFilter.Condition.true_filter', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='false_filter', full_name='google.bigtable.v2.RowFilter.Condition.false_filter', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1915, + serialized_end=2088, +) + +_ROWFILTER = _descriptor.Descriptor( + name='RowFilter', + full_name='google.bigtable.v2.RowFilter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='chain', full_name='google.bigtable.v2.RowFilter.chain', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='interleave', full_name='google.bigtable.v2.RowFilter.interleave', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='condition', full_name='google.bigtable.v2.RowFilter.condition', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='sink', full_name='google.bigtable.v2.RowFilter.sink', index=3, + number=16, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pass_all_filter', full_name='google.bigtable.v2.RowFilter.pass_all_filter', index=4, + number=17, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='block_all_filter', full_name='google.bigtable.v2.RowFilter.block_all_filter', index=5, + number=18, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key_regex_filter', full_name='google.bigtable.v2.RowFilter.row_key_regex_filter', index=6, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_sample_filter', full_name='google.bigtable.v2.RowFilter.row_sample_filter', index=7, + number=14, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='family_name_regex_filter', full_name='google.bigtable.v2.RowFilter.family_name_regex_filter', index=8, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier_regex_filter', full_name='google.bigtable.v2.RowFilter.column_qualifier_regex_filter', index=9, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_range_filter', full_name='google.bigtable.v2.RowFilter.column_range_filter', index=10, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_range_filter', full_name='google.bigtable.v2.RowFilter.timestamp_range_filter', index=11, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_regex_filter', full_name='google.bigtable.v2.RowFilter.value_regex_filter', index=12, + number=9, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_range_filter', full_name='google.bigtable.v2.RowFilter.value_range_filter', index=13, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_row_offset_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_offset_filter', index=14, + number=10, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_row_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_limit_filter', index=15, + number=11, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='cells_per_column_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_column_limit_filter', index=16, + number=12, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='strip_value_transformer', full_name='google.bigtable.v2.RowFilter.strip_value_transformer', index=17, + number=13, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='apply_label_transformer', full_name='google.bigtable.v2.RowFilter.apply_label_transformer', index=18, + number=19, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='filter', full_name='google.bigtable.v2.RowFilter.filter', + index=0, containing_type=None, fields=[]), + ], + serialized_start=979, + serialized_end=2098, +) + + +_MUTATION_SETCELL = _descriptor.Descriptor( + name='SetCell', + full_name='google.bigtable.v2.Mutation.SetCell', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.SetCell.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.Mutation.SetCell.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.Mutation.SetCell.timestamp_micros', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.Mutation.SetCell.value', index=3, + number=4, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2396, + serialized_end=2493, +) + +_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( + name='DeleteFromColumn', + full_name='google.bigtable.v2.Mutation.DeleteFromColumn', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='time_range', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.time_range', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2495, + serialized_end=2616, +) + +_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( + name='DeleteFromFamily', + full_name='google.bigtable.v2.Mutation.DeleteFromFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromFamily.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2618, + serialized_end=2657, +) + +_MUTATION_DELETEFROMROW = _descriptor.Descriptor( + name='DeleteFromRow', + full_name='google.bigtable.v2.Mutation.DeleteFromRow', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2659, + serialized_end=2674, +) + +_MUTATION = _descriptor.Descriptor( + name='Mutation', + full_name='google.bigtable.v2.Mutation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='set_cell', full_name='google.bigtable.v2.Mutation.set_cell', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_column', full_name='google.bigtable.v2.Mutation.delete_from_column', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_family', full_name='google.bigtable.v2.Mutation.delete_from_family', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='delete_from_row', full_name='google.bigtable.v2.Mutation.delete_from_row', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATION_SETCELL, _MUTATION_DELETEFROMCOLUMN, _MUTATION_DELETEFROMFAMILY, _MUTATION_DELETEFROMROW, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='mutation', full_name='google.bigtable.v2.Mutation.mutation', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2101, + serialized_end=2686, +) + + +_READMODIFYWRITERULE = _descriptor.Descriptor( + name='ReadModifyWriteRule', + full_name='google.bigtable.v2.ReadModifyWriteRule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ReadModifyWriteRule.family_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_qualifier', full_name='google.bigtable.v2.ReadModifyWriteRule.column_qualifier', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='append_value', full_name='google.bigtable.v2.ReadModifyWriteRule.append_value', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='increment_amount', full_name='google.bigtable.v2.ReadModifyWriteRule.increment_amount', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='rule', full_name='google.bigtable.v2.ReadModifyWriteRule.rule', + index=0, containing_type=None, fields=[]), + ], + serialized_start=2689, + serialized_end=2817, +) + +_ROW.fields_by_name['families'].message_type = _FAMILY +_FAMILY.fields_by_name['columns'].message_type = _COLUMN +_COLUMN.fields_by_name['cells'].message_type = _CELL +_ROWRANGE.oneofs_by_name['start_key'].fields.append( + _ROWRANGE.fields_by_name['start_key_closed']) +_ROWRANGE.fields_by_name['start_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] +_ROWRANGE.oneofs_by_name['start_key'].fields.append( + _ROWRANGE.fields_by_name['start_key_open']) +_ROWRANGE.fields_by_name['start_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] +_ROWRANGE.oneofs_by_name['end_key'].fields.append( + _ROWRANGE.fields_by_name['end_key_open']) +_ROWRANGE.fields_by_name['end_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] +_ROWRANGE.oneofs_by_name['end_key'].fields.append( + _ROWRANGE.fields_by_name['end_key_closed']) +_ROWRANGE.fields_by_name['end_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] +_ROWSET.fields_by_name['row_ranges'].message_type = _ROWRANGE +_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['start_qualifier_closed']) +_COLUMNRANGE.fields_by_name['start_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] +_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['start_qualifier_open']) +_COLUMNRANGE.fields_by_name['start_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] +_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['end_qualifier_closed']) +_COLUMNRANGE.fields_by_name['end_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] +_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( + _COLUMNRANGE.fields_by_name['end_qualifier_open']) +_COLUMNRANGE.fields_by_name['end_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] +_VALUERANGE.oneofs_by_name['start_value'].fields.append( + _VALUERANGE.fields_by_name['start_value_closed']) +_VALUERANGE.fields_by_name['start_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] +_VALUERANGE.oneofs_by_name['start_value'].fields.append( + _VALUERANGE.fields_by_name['start_value_open']) +_VALUERANGE.fields_by_name['start_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] +_VALUERANGE.oneofs_by_name['end_value'].fields.append( + _VALUERANGE.fields_by_name['end_value_closed']) +_VALUERANGE.fields_by_name['end_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] +_VALUERANGE.oneofs_by_name['end_value'].fields.append( + _VALUERANGE.fields_by_name['end_value_open']) +_VALUERANGE.fields_by_name['end_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] +_ROWFILTER_CHAIN.fields_by_name['filters'].message_type = _ROWFILTER +_ROWFILTER_CHAIN.containing_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.fields_by_name['filters'].message_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['predicate_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['true_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name['false_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.containing_type = _ROWFILTER +_ROWFILTER.fields_by_name['chain'].message_type = _ROWFILTER_CHAIN +_ROWFILTER.fields_by_name['interleave'].message_type = _ROWFILTER_INTERLEAVE +_ROWFILTER.fields_by_name['condition'].message_type = _ROWFILTER_CONDITION +_ROWFILTER.fields_by_name['column_range_filter'].message_type = _COLUMNRANGE +_ROWFILTER.fields_by_name['timestamp_range_filter'].message_type = _TIMESTAMPRANGE +_ROWFILTER.fields_by_name['value_range_filter'].message_type = _VALUERANGE +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['chain']) +_ROWFILTER.fields_by_name['chain'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['interleave']) +_ROWFILTER.fields_by_name['interleave'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['condition']) +_ROWFILTER.fields_by_name['condition'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['sink']) +_ROWFILTER.fields_by_name['sink'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['pass_all_filter']) +_ROWFILTER.fields_by_name['pass_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['block_all_filter']) +_ROWFILTER.fields_by_name['block_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['row_key_regex_filter']) +_ROWFILTER.fields_by_name['row_key_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['row_sample_filter']) +_ROWFILTER.fields_by_name['row_sample_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['family_name_regex_filter']) +_ROWFILTER.fields_by_name['family_name_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['column_qualifier_regex_filter']) +_ROWFILTER.fields_by_name['column_qualifier_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['column_range_filter']) +_ROWFILTER.fields_by_name['column_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['timestamp_range_filter']) +_ROWFILTER.fields_by_name['timestamp_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['value_regex_filter']) +_ROWFILTER.fields_by_name['value_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['value_range_filter']) +_ROWFILTER.fields_by_name['value_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_row_offset_filter']) +_ROWFILTER.fields_by_name['cells_per_row_offset_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_row_limit_filter']) +_ROWFILTER.fields_by_name['cells_per_row_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['cells_per_column_limit_filter']) +_ROWFILTER.fields_by_name['cells_per_column_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['strip_value_transformer']) +_ROWFILTER.fields_by_name['strip_value_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.oneofs_by_name['filter'].fields.append( + _ROWFILTER.fields_by_name['apply_label_transformer']) +_ROWFILTER.fields_by_name['apply_label_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_MUTATION_SETCELL.containing_type = _MUTATION +_MUTATION_DELETEFROMCOLUMN.fields_by_name['time_range'].message_type = _TIMESTAMPRANGE +_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION +_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION +_MUTATION_DELETEFROMROW.containing_type = _MUTATION +_MUTATION.fields_by_name['set_cell'].message_type = _MUTATION_SETCELL +_MUTATION.fields_by_name['delete_from_column'].message_type = _MUTATION_DELETEFROMCOLUMN +_MUTATION.fields_by_name['delete_from_family'].message_type = _MUTATION_DELETEFROMFAMILY +_MUTATION.fields_by_name['delete_from_row'].message_type = _MUTATION_DELETEFROMROW +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['set_cell']) +_MUTATION.fields_by_name['set_cell'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_column']) +_MUTATION.fields_by_name['delete_from_column'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_family']) +_MUTATION.fields_by_name['delete_from_family'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_MUTATION.oneofs_by_name['mutation'].fields.append( + _MUTATION.fields_by_name['delete_from_row']) +_MUTATION.fields_by_name['delete_from_row'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] +_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( + _READMODIFYWRITERULE.fields_by_name['append_value']) +_READMODIFYWRITERULE.fields_by_name['append_value'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] +_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( + _READMODIFYWRITERULE.fields_by_name['increment_amount']) +_READMODIFYWRITERULE.fields_by_name['increment_amount'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] +DESCRIPTOR.message_types_by_name['Row'] = _ROW +DESCRIPTOR.message_types_by_name['Family'] = _FAMILY +DESCRIPTOR.message_types_by_name['Column'] = _COLUMN +DESCRIPTOR.message_types_by_name['Cell'] = _CELL +DESCRIPTOR.message_types_by_name['RowRange'] = _ROWRANGE +DESCRIPTOR.message_types_by_name['RowSet'] = _ROWSET +DESCRIPTOR.message_types_by_name['ColumnRange'] = _COLUMNRANGE +DESCRIPTOR.message_types_by_name['TimestampRange'] = _TIMESTAMPRANGE +DESCRIPTOR.message_types_by_name['ValueRange'] = _VALUERANGE +DESCRIPTOR.message_types_by_name['RowFilter'] = _ROWFILTER +DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION +DESCRIPTOR.message_types_by_name['ReadModifyWriteRule'] = _READMODIFYWRITERULE + +Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( + DESCRIPTOR = _ROW, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) + )) +_sym_db.RegisterMessage(Row) + +Family = _reflection.GeneratedProtocolMessageType('Family', (_message.Message,), dict( + DESCRIPTOR = _FAMILY, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) + )) +_sym_db.RegisterMessage(Family) + +Column = _reflection.GeneratedProtocolMessageType('Column', (_message.Message,), dict( + DESCRIPTOR = _COLUMN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) + )) +_sym_db.RegisterMessage(Column) + +Cell = _reflection.GeneratedProtocolMessageType('Cell', (_message.Message,), dict( + DESCRIPTOR = _CELL, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) + )) +_sym_db.RegisterMessage(Cell) + +RowRange = _reflection.GeneratedProtocolMessageType('RowRange', (_message.Message,), dict( + DESCRIPTOR = _ROWRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) + )) +_sym_db.RegisterMessage(RowRange) + +RowSet = _reflection.GeneratedProtocolMessageType('RowSet', (_message.Message,), dict( + DESCRIPTOR = _ROWSET, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) + )) +_sym_db.RegisterMessage(RowSet) + +ColumnRange = _reflection.GeneratedProtocolMessageType('ColumnRange', (_message.Message,), dict( + DESCRIPTOR = _COLUMNRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) + )) +_sym_db.RegisterMessage(ColumnRange) + +TimestampRange = _reflection.GeneratedProtocolMessageType('TimestampRange', (_message.Message,), dict( + DESCRIPTOR = _TIMESTAMPRANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) + )) +_sym_db.RegisterMessage(TimestampRange) + +ValueRange = _reflection.GeneratedProtocolMessageType('ValueRange', (_message.Message,), dict( + DESCRIPTOR = _VALUERANGE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) + )) +_sym_db.RegisterMessage(ValueRange) + +RowFilter = _reflection.GeneratedProtocolMessageType('RowFilter', (_message.Message,), dict( + + Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_CHAIN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) + )) + , + + Interleave = _reflection.GeneratedProtocolMessageType('Interleave', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_INTERLEAVE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) + )) + , + + Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), dict( + DESCRIPTOR = _ROWFILTER_CONDITION, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) + )) + , + DESCRIPTOR = _ROWFILTER, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) + )) +_sym_db.RegisterMessage(RowFilter) +_sym_db.RegisterMessage(RowFilter.Chain) +_sym_db.RegisterMessage(RowFilter.Interleave) +_sym_db.RegisterMessage(RowFilter.Condition) + +Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict( + + SetCell = _reflection.GeneratedProtocolMessageType('SetCell', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_SETCELL, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) + )) + , + + DeleteFromColumn = _reflection.GeneratedProtocolMessageType('DeleteFromColumn', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMCOLUMN, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) + )) + , + + DeleteFromFamily = _reflection.GeneratedProtocolMessageType('DeleteFromFamily', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMFAMILY, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) + )) + , + + DeleteFromRow = _reflection.GeneratedProtocolMessageType('DeleteFromRow', (_message.Message,), dict( + DESCRIPTOR = _MUTATION_DELETEFROMROW, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) + )) + , + DESCRIPTOR = _MUTATION, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) + )) +_sym_db.RegisterMessage(Mutation) +_sym_db.RegisterMessage(Mutation.SetCell) +_sym_db.RegisterMessage(Mutation.DeleteFromColumn) +_sym_db.RegisterMessage(Mutation.DeleteFromFamily) +_sym_db.RegisterMessage(Mutation.DeleteFromRow) + +ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRule', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITERULE, + __module__ = 'google.bigtable.v2.data_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) + )) +_sym_db.RegisterMessage(ReadModifyWriteRule) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\tDataProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py new file mode 100644 index 000000000000..8d4383d31e79 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py @@ -0,0 +1,222 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/instance.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable._generated import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/instance.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n\'google/bigtable/admin/v2/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a%google/bigtable/admin/v2/common.proto\"\x9e\x01\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType\"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04\x42/\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_INSTANCE_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Instance.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=244, + serialized_end=297, +) +_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) + +_CLUSTER_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Cluster.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RESIZING', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DISABLED', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=489, + serialized_end=570, +) +_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) + + +_INSTANCE = _descriptor.Descriptor( + name='Instance', + full_name='google.bigtable.admin.v2.Instance', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Instance.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='display_name', full_name='google.bigtable.admin.v2.Instance.display_name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Instance.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _INSTANCE_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=139, + serialized_end=297, +) + + +_CLUSTER = _descriptor.Descriptor( + name='Cluster', + full_name='google.bigtable.admin.v2.Cluster', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Cluster.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='location', full_name='google.bigtable.admin.v2.Cluster.location', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Cluster.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='serve_nodes', full_name='google.bigtable.admin.v2.Cluster.serve_nodes', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='default_storage_type', full_name='google.bigtable.admin.v2.Cluster.default_storage_type', index=4, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CLUSTER_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=300, + serialized_end=570, +) + +_INSTANCE.fields_by_name['state'].enum_type = _INSTANCE_STATE +_INSTANCE_STATE.containing_type = _INSTANCE +_CLUSTER.fields_by_name['state'].enum_type = _CLUSTER_STATE +_CLUSTER.fields_by_name['default_storage_type'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2._STORAGETYPE +_CLUSTER_STATE.containing_type = _CLUSTER +DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE +DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER + +Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict( + DESCRIPTOR = _INSTANCE, + __module__ = 'google.bigtable.admin.v2.instance_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) + )) +_sym_db.RegisterMessage(Instance) + +Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict( + DESCRIPTOR = _CLUSTER, + __module__ = 'google.bigtable.admin.v2.instance_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) + )) +_sym_db.RegisterMessage(Cluster) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001')) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py new file mode 100644 index 000000000000..5723e1d99fe0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py @@ -0,0 +1,264 @@ +from google.longrunning.operations_pb2 import ( + CancelOperationRequest, + DeleteOperationRequest, + GetOperationRequest, + ListOperationsRequest, + ListOperationsResponse, + Operation, + google_dot_protobuf_dot_empty__pb2, +) +from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces +from grpc.framework.common import cardinality +from grpc.framework.interfaces.face import utilities as face_utilities + + +class OperationsStub(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetOperation = channel.unary_unary( + '/google.longrunning.Operations/GetOperation', + request_serializer=GetOperationRequest.SerializeToString, + response_deserializer=Operation.FromString, + ) + self.ListOperations = channel.unary_unary( + '/google.longrunning.Operations/ListOperations', + request_serializer=ListOperationsRequest.SerializeToString, + response_deserializer=ListOperationsResponse.FromString, + ) + self.CancelOperation = channel.unary_unary( + '/google.longrunning.Operations/CancelOperation', + request_serializer=CancelOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.DeleteOperation = channel.unary_unary( + '/google.longrunning.Operations/DeleteOperation', + request_serializer=DeleteOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class OperationsServicer(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def GetOperation(self, request, context): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListOperations(self, request, context): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CancelOperation(self, request, context): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteOperation(self, request, context): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_OperationsServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetOperation': grpc.unary_unary_rpc_method_handler( + servicer.GetOperation, + request_deserializer=GetOperationRequest.FromString, + response_serializer=Operation.SerializeToString, + ), + 'ListOperations': grpc.unary_unary_rpc_method_handler( + servicer.ListOperations, + request_deserializer=ListOperationsRequest.FromString, + response_serializer=ListOperationsResponse.SerializeToString, + ), + 'CancelOperation': grpc.unary_unary_rpc_method_handler( + servicer.CancelOperation, + request_deserializer=CancelOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'DeleteOperation': grpc.unary_unary_rpc_method_handler( + servicer.DeleteOperation, + request_deserializer=DeleteOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.longrunning.Operations', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class BetaOperationsServicer(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + def GetOperation(self, request, context): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ListOperations(self, request, context): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CancelOperation(self, request, context): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def DeleteOperation(self, request, context): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + +class BetaOperationsStub(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + raise NotImplementedError() + GetOperation.future = None + def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + raise NotImplementedError() + ListOperations.future = None + def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + raise NotImplementedError() + CancelOperation.future = None + def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + raise NotImplementedError() + DeleteOperation.future = None + + +def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + request_deserializers = { + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, + } + response_serializers = { + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, + } + method_implementations = { + ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), + ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), + ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), + ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + +def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + request_serializers = { + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, + } + response_deserializers = { + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, + } + cardinalities = { + 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, + 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, + 'GetOperation': cardinality.Cardinality.UNARY_UNARY, + 'ListOperations': cardinality.Cardinality.UNARY_UNARY, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py new file mode 100644 index 000000000000..840076514cc7 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py @@ -0,0 +1,393 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/admin/v2/table.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/bigtable/admin/v2/table.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n$google/bigtable/admin/v2/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xa0\x03\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"F\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04ruleB,\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01\x62\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + +_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( + name='TimestampGranularity', + full_name='google.bigtable.admin.v2.Table.TimestampGranularity', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TIMESTAMP_GRANULARITY_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MILLIS', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=400, + serialized_end=473, +) +_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) + +_TABLE_VIEW = _descriptor.EnumDescriptor( + name='View', + full_name='google.bigtable.admin.v2.Table.View', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='VIEW_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NAME_ONLY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCHEMA_VIEW', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FULL', index=3, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=475, + serialized_end=545, +) +_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) + + +_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( + name='ColumnFamiliesEntry', + full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=305, + serialized_end=398, +) + +_TABLE = _descriptor.Descriptor( + name='Table', + full_name='google.bigtable.admin.v2.Table', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Table.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=1, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_TABLE_COLUMNFAMILIESENTRY, ], + enum_types=[ + _TABLE_TIMESTAMPGRANULARITY, + _TABLE_VIEW, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=129, + serialized_end=545, +) + + +_COLUMNFAMILY = _descriptor.Descriptor( + name='ColumnFamily', + full_name='google.bigtable.admin.v2.ColumnFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gc_rule', full_name='google.bigtable.admin.v2.ColumnFamily.gc_rule', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=547, + serialized_end=612, +) + + +_GCRULE_INTERSECTION = _descriptor.Descriptor( + name='Intersection', + full_name='google.bigtable.admin.v2.GcRule.Intersection', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Intersection.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=827, + serialized_end=890, +) + +_GCRULE_UNION = _descriptor.Descriptor( + name='Union', + full_name='google.bigtable.admin.v2.GcRule.Union', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Union.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=892, + serialized_end=948, +) + +_GCRULE = _descriptor.Descriptor( + name='GcRule', + full_name='google.bigtable.admin.v2.GcRule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_num_versions', full_name='google.bigtable.admin.v2.GcRule.max_num_versions', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='max_age', full_name='google.bigtable.admin.v2.GcRule.max_age', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='intersection', full_name='google.bigtable.admin.v2.GcRule.intersection', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='union', full_name='google.bigtable.admin.v2.GcRule.union', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='rule', full_name='google.bigtable.admin.v2.GcRule.rule', + index=0, containing_type=None, fields=[]), + ], + serialized_start=615, + serialized_end=956, +) + +_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY +_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE +_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY +_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY +_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE +_TABLE_VIEW.containing_type = _TABLE +_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE +_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_INTERSECTION.containing_type = _GCRULE +_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_UNION.containing_type = _GCRULE +_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION +_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_num_versions']) +_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_age']) +_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['intersection']) +_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['union']) +_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +DESCRIPTOR.message_types_by_name['Table'] = _TABLE +DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY +DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE + +Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( + + ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( + DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) + )) + , + DESCRIPTOR = _TABLE, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) + )) +_sym_db.RegisterMessage(Table) +_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) + +ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( + DESCRIPTOR = _COLUMNFAMILY, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) + )) +_sym_db.RegisterMessage(ColumnFamily) + +GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( + + Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_INTERSECTION, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) + )) + , + + Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_UNION, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) + )) + , + DESCRIPTOR = _GCRULE, + __module__ = 'google.bigtable.admin.v2.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) + )) +_sym_db.RegisterMessage(GcRule) +_sym_db.RegisterMessage(GcRule.Intersection) +_sym_db.RegisterMessage(GcRule.Union) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001')) +_TABLE_COLUMNFAMILIESENTRY.has_options = True +_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py new file mode 100644 index 000000000000..1ad0f6bae193 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -0,0 +1,355 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Parent client for calling the Google Cloud Bigtable API. + +This is the base from which all interactions with the API occur. + +In the hierarchy of API concepts + +* a :class:`Client` owns an :class:`.Instance` +* a :class:`.Instance` owns a :class:`~google.cloud.bigtable.table.Table` +* a :class:`~google.cloud.bigtable.table.Table` owns a + :class:`~.column_family.ColumnFamily` +* a :class:`~google.cloud.bigtable.table.Table` owns a :class:`~.row.Row` + (and all the cells in the row) +""" + + +import os + +from google.cloud._helpers import make_insecure_stub +from google.cloud._helpers import make_secure_stub +from google.cloud.bigtable._generated import bigtable_instance_admin_pb2 +from google.cloud.bigtable._generated import bigtable_pb2 +from google.cloud.bigtable._generated import bigtable_table_admin_pb2 +from google.cloud.bigtable._generated import operations_grpc_pb2 +from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES +from google.cloud.bigtable.instance import Instance +from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID +from google.cloud.client import _ClientFactoryMixin +from google.cloud.client import _ClientProjectMixin +from google.cloud.connection import DEFAULT_USER_AGENT +from google.cloud.credentials import get_credentials +from google.cloud.environment_vars import BIGTABLE_EMULATOR + + +TABLE_ADMIN_HOST = 'bigtableadmin.googleapis.com' +"""Table Admin API request host.""" + +INSTANCE_ADMIN_HOST = 'bigtableadmin.googleapis.com' +"""Cluster Admin API request host.""" + +DATA_API_HOST = 'bigtable.googleapis.com' +"""Data API request host.""" + +OPERATIONS_API_HOST = INSTANCE_ADMIN_HOST + +ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' +"""Scope for interacting with the Cluster Admin and Table Admin APIs.""" +DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data' +"""Scope for reading and writing table data.""" +READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly' +"""Scope for reading table data.""" + + +def _make_data_stub(client): + """Creates gRPC stub to make requests to the Data API. + + :type client: :class:`Client` + :param client: The client that will hold the stub. + + :rtype: :class:`._generated.bigtable_pb2.BigtableStub` + :returns: A gRPC stub object. + """ + if client.emulator_host is None: + return make_secure_stub(client.credentials, client.user_agent, + bigtable_pb2.BigtableStub, DATA_API_HOST) + else: + return make_insecure_stub(bigtable_pb2.BigtableStub, + client.emulator_host) + + +def _make_instance_stub(client): + """Creates gRPC stub to make requests to the Instance Admin API. + + :type client: :class:`Client` + :param client: The client that will hold the stub. + + :rtype: :class:`.bigtable_instance_admin_pb2.BigtableInstanceAdminStub` + :returns: A gRPC stub object. + """ + if client.emulator_host is None: + return make_secure_stub( + client.credentials, client.user_agent, + bigtable_instance_admin_pb2.BigtableInstanceAdminStub, + INSTANCE_ADMIN_HOST) + else: + return make_insecure_stub( + bigtable_instance_admin_pb2.BigtableInstanceAdminStub, + client.emulator_host) + + +def _make_operations_stub(client): + """Creates gRPC stub to make requests to the Operations API. + + These are for long-running operations of the Instance Admin API, + hence the host and port matching. + + :type client: :class:`Client` + :param client: The client that will hold the stub. + + :rtype: :class:`._generated.operations_grpc_pb2.OperationsStub` + :returns: A gRPC stub object. + """ + if client.emulator_host is None: + return make_secure_stub(client.credentials, client.user_agent, + operations_grpc_pb2.OperationsStub, + OPERATIONS_API_HOST) + else: + return make_insecure_stub(operations_grpc_pb2.OperationsStub, + client.emulator_host) + + +def _make_table_stub(client): + """Creates gRPC stub to make requests to the Table Admin API. + + :type client: :class:`Client` + :param client: The client that will hold the stub. + + :rtype: :class:`.bigtable_instance_admin_pb2.BigtableTableAdminStub` + :returns: A gRPC stub object. + """ + if client.emulator_host is None: + return make_secure_stub( + client.credentials, client.user_agent, + bigtable_table_admin_pb2.BigtableTableAdminStub, + TABLE_ADMIN_HOST) + else: + return make_insecure_stub( + bigtable_table_admin_pb2.BigtableTableAdminStub, + client.emulator_host) + + +class Client(_ClientFactoryMixin, _ClientProjectMixin): + """Client for interacting with Google Cloud Bigtable API. + + .. note:: + + Since the Cloud Bigtable API requires the gRPC transport, no + ``http`` argument is accepted by this class. + + :type project: :class:`str` or :func:`unicode ` + :param project: (Optional) The ID of the project which owns the + instances, tables and data. If not provided, will + attempt to determine from the environment. + + :type credentials: + :class:`OAuth2Credentials ` or + :data:`NoneType ` + :param credentials: (Optional) The OAuth2 Credentials to use for this + client. If not provided, defaults to the Google + Application Default Credentials. + + :type read_only: bool + :param read_only: (Optional) Boolean indicating if the data scope should be + for reading only (or for writing as well). Defaults to + :data:`False`. + + :type admin: bool + :param admin: (Optional) Boolean indicating if the client will be used to + interact with the Instance Admin or Table Admin APIs. This + requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. + + :type user_agent: str + :param user_agent: (Optional) The user agent to be used with API request. + Defaults to :const:`DEFAULT_USER_AGENT`. + + :raises: :class:`ValueError ` if both ``read_only`` + and ``admin`` are :data:`True` + """ + + _instance_stub_internal = None + _operations_stub_internal = None + _table_stub_internal = None + + def __init__(self, project=None, credentials=None, + read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT): + _ClientProjectMixin.__init__(self, project=project) + if credentials is None: + credentials = get_credentials() + + if read_only and admin: + raise ValueError('A read-only client cannot also perform' + 'administrative actions.') + + scopes = [] + if read_only: + scopes.append(READ_ONLY_SCOPE) + else: + scopes.append(DATA_SCOPE) + + if admin: + scopes.append(ADMIN_SCOPE) + + self._admin = bool(admin) + try: + credentials = credentials.create_scoped(scopes) + except AttributeError: + pass + self._credentials = credentials + self.user_agent = user_agent + self.emulator_host = os.getenv(BIGTABLE_EMULATOR) + + # Create gRPC stubs for making requests. + self._data_stub = _make_data_stub(self) + if self._admin: + self._instance_stub_internal = _make_instance_stub(self) + self._operations_stub_internal = _make_operations_stub(self) + self._table_stub_internal = _make_table_stub(self) + + def copy(self): + """Make a copy of this client. + + Copies the local data stored as simple types but does not copy the + current state of any open connections with the Cloud Bigtable API. + + :rtype: :class:`.Client` + :returns: A copy of the current client. + """ + credentials = self._credentials + copied_creds = credentials.create_scoped(credentials.scopes) + return self.__class__( + self.project, + copied_creds, + READ_ONLY_SCOPE in copied_creds.scopes, + self._admin, + self.user_agent, + ) + + @property + def credentials(self): + """Getter for client's credentials. + + :rtype: + :class:`OAuth2Credentials ` + :returns: The credentials stored on the client. + """ + return self._credentials + + @property + def project_name(self): + """Project name to be used with Instance Admin API. + + .. note:: + + This property will not change if ``project`` does not, but the + return value is not cached. + + The project name is of the form + + ``"projects/{project}"`` + + :rtype: str + :returns: The project name to be used with the Cloud Bigtable Admin + API RPC service. + """ + return 'projects/' + self.project + + @property + def _instance_stub(self): + """Getter for the gRPC stub used for the Instance Admin API. + + :rtype: :class:`.bigtable_instance_admin_pb2.BigtableInstanceAdminStub` + :returns: A gRPC stub object. + :raises: :class:`ValueError ` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. + """ + if not self._admin: + raise ValueError('Client is not an admin client.') + return self._instance_stub_internal + + @property + def _operations_stub(self): + """Getter for the gRPC stub used for the Operations API. + + :rtype: :class:`._generated.operations_grpc_pb2.OperationsStub` + :returns: A gRPC stub object. + :raises: :class:`ValueError ` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. + """ + if not self._admin: + raise ValueError('Client is not an admin client.') + return self._operations_stub_internal + + @property + def _table_stub(self): + """Getter for the gRPC stub used for the Table Admin API. + + :rtype: :class:`.bigtable_instance_admin_pb2.BigtableTableAdminStub` + :returns: A gRPC stub object. + :raises: :class:`ValueError ` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. + """ + if not self._admin: + raise ValueError('Client is not an admin client.') + return self._table_stub_internal + + def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, + display_name=None, serve_nodes=DEFAULT_SERVE_NODES): + """Factory to create a instance associated with this client. + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type location: string + :param location: location name, in form + ``projects//locations/``; used to + set up the instance's cluster. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in + the Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + + :rtype: :class:`.Instance` + :returns: an instance owned by this client. + """ + return Instance(instance_id, self, location, + display_name=display_name, serve_nodes=serve_nodes) + + def list_instances(self): + """List instances owned by the project. + + :rtype: tuple + :returns: A pair of results, the first is a list of + :class:`.Instance` objects returned and the second is a + list of strings (the failed locations in the request). + """ + request_pb = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=self.project_name) + + response = self._instance_stub.ListInstances(request_pb) + + instances = [Instance.from_pb(instance_pb, self) + for instance_pb in response.instances] + return instances, response.failed_locations diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py new file mode 100644 index 000000000000..e22f383bed95 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -0,0 +1,277 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Cluster.""" + + +import re + +from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) +from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) +from google.cloud.operation import Operation +from google.cloud.operation import _compute_type_url +from google.cloud.operation import _register_type_url + + +_CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' + r'instances/(?P[^/]+)/clusters/' + r'(?P[a-z][-a-z0-9]*)$') + +DEFAULT_SERVE_NODES = 3 +"""Default number of nodes to use when creating a cluster.""" + + +_UPDATE_CLUSTER_METADATA_URL = _compute_type_url( + messages_v2_pb2.UpdateClusterMetadata) +_register_type_url( + _UPDATE_CLUSTER_METADATA_URL, messages_v2_pb2.UpdateClusterMetadata) + + +def _prepare_create_request(cluster): + """Creates a protobuf request for a CreateCluster request. + + :type cluster: :class:`Cluster` + :param cluster: The cluster to be created. + + :rtype: :class:`.messages_v2_pb2.CreateClusterRequest` + :returns: The CreateCluster request object containing the cluster info. + """ + return messages_v2_pb2.CreateClusterRequest( + parent=cluster._instance.name, + cluster_id=cluster.cluster_id, + cluster=data_v2_pb2.Cluster( + serve_nodes=cluster.serve_nodes, + ), + ) + + +class Cluster(object): + """Representation of a Google Cloud Bigtable Cluster. + + We can use a :class:`Cluster` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + + .. note:: + + For now, we leave out the ``default_storage_type`` (an enum) + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type instance: :class:`.instance.Instance` + :param instance: The instance where the cluster resides. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + Defaults to :data:`DEFAULT_SERVE_NODES`. + """ + + def __init__(self, cluster_id, instance, + serve_nodes=DEFAULT_SERVE_NODES): + self.cluster_id = cluster_id + self._instance = instance + self.serve_nodes = serve_nodes + self.location = None + + def _update_from_pb(self, cluster_pb): + """Refresh self from the server-provided protobuf. + + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not cluster_pb.serve_nodes: # Simple field (int32) + raise ValueError('Cluster protobuf does not contain serve_nodes') + self.serve_nodes = cluster_pb.serve_nodes + self.location = cluster_pb.location + + @classmethod + def from_pb(cls, cluster_pb, instance): + """Creates a cluster instance from a protobuf. + + :type cluster_pb: :class:`instance_pb2.Cluster` + :param cluster_pb: A cluster protobuf object. + + :type instance: :class:`.instance.Instance>` + :param instance: The instance that owns the cluster. + + :rtype: :class:`Cluster` + :returns: The cluster parsed from the protobuf response. + :raises: + :class:`ValueError ` if the cluster + name does not match + ``projects/{project}/instances/{instance}/clusters/{cluster_id}`` + or if the parsed project ID does not match the project ID + on the client. + """ + match = _CLUSTER_NAME_RE.match(cluster_pb.name) + if match is None: + raise ValueError('Cluster protobuf name was not in the ' + 'expected format.', cluster_pb.name) + if match.group('project') != instance._client.project: + raise ValueError('Project ID on cluster does not match the ' + 'project ID on the client') + if match.group('instance') != instance.instance_id: + raise ValueError('Instance ID on cluster does not match the ' + 'instance ID on the client') + + result = cls(match.group('cluster_id'), instance) + result._update_from_pb(cluster_pb) + return result + + def copy(self): + """Make a copy of this cluster. + + Copies the local data stored as simple types and copies the client + attached to this instance. + + :rtype: :class:`.Cluster` + :returns: A copy of the current cluster. + """ + new_instance = self._instance.copy() + return self.__class__(self.cluster_id, new_instance, + serve_nodes=self.serve_nodes) + + @property + def name(self): + """Cluster name used in requests. + + .. note:: + This property will not change if ``_instance`` and ``cluster_id`` + do not, but the return value is not cached. + + The cluster name is of the form + + ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"`` + + :rtype: str + :returns: The cluster name. + """ + return self._instance.name + '/clusters/' + self.cluster_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the serve_nodes. Instead, it only compares + # identifying values instance, cluster ID and client. This is + # intentional, since the same cluster can be in different states + # if not synchronized. Clusters with similar instance/cluster + # settings but different clients can't be used in the same way. + return (other.cluster_id == self.cluster_id and + other._instance == self._instance) + + def __ne__(self, other): + return not self.__eq__(other) + + def reload(self): + """Reload the metadata for this cluster.""" + request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) + # We expect a `._generated.instance_pb2.Cluster`. + cluster_pb = self._instance._client._instance_stub.GetCluster( + request_pb) + + # NOTE: _update_from_pb does not check that the project, instance and + # cluster ID on the response match the request. + self._update_from_pb(cluster_pb) + + def create(self): + """Create this cluster. + + .. note:: + + Uses the ``project``, ``instance`` and ``cluster_id`` on the + current :class:`Cluster` in addition to the ``serve_nodes``. + To change them before creating, reset the values via + + .. code:: python + + cluster.serve_nodes = 8 + cluster.cluster_id = 'i-changed-my-mind' + + before calling :meth:`create`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + create operation. + """ + request_pb = _prepare_create_request(self) + # We expect a `google.longrunning.operations_pb2.Operation`. + client = self._instance._client + operation_pb = client._instance_stub.CreateCluster(request_pb) + + operation = Operation.from_pb(operation_pb, client) + operation.target = self + operation.metadata['request_type'] = 'CreateCluster' + return operation + + def update(self): + """Update this cluster. + + .. note:: + + Updates the ``serve_nodes``. If you'd like to + change them before updating, reset the values via + + .. code:: python + + cluster.serve_nodes = 8 + + before calling :meth:`update`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + update operation. + """ + request_pb = data_v2_pb2.Cluster( + name=self.name, + serve_nodes=self.serve_nodes, + ) + # We expect a `google.longrunning.operations_pb2.Operation`. + client = self._instance._client + operation_pb = client._instance_stub.UpdateCluster(request_pb) + + operation = Operation.from_pb(operation_pb, client) + operation.target = self + operation.metadata['request_type'] = 'UpdateCluster' + return operation + + def delete(self): + """Delete this cluster. + + Marks a cluster and all of its tables for permanent deletion in 7 days. + + Immediately upon completion of the request: + + * Billing will cease for all of the cluster's reserved resources. + * The cluster's ``delete_time`` field will be set 7 days in the future. + + Soon afterward: + + * All tables within the cluster will become unavailable. + + At the cluster's ``delete_time``: + + * The cluster and **all of its tables** will immediately and + irrevocably disappear from the API, and their data will be + permanently deleted. + """ + request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name) + # We expect a `google.protobuf.empty_pb2.Empty` + self._instance._client._instance_stub.DeleteCluster(request_pb) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py new file mode 100644 index 000000000000..cb7dcdc4ff60 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -0,0 +1,338 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Column Family.""" + + +import datetime + +from google.protobuf import duration_pb2 + +from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) +from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + + +def _timedelta_to_duration_pb(timedelta_val): + """Convert a Python timedelta object to a duration protobuf. + + .. note:: + + The Python timedelta has a granularity of microseconds while + the protobuf duration type has a duration of nanoseconds. + + :type timedelta_val: :class:`datetime.timedelta` + :param timedelta_val: A timedelta object. + + :rtype: :class:`google.protobuf.duration_pb2.Duration` + :returns: A duration object equivalent to the time delta. + """ + seconds_decimal = timedelta_val.total_seconds() + # Truncate the parts other than the integer. + seconds = int(seconds_decimal) + if seconds_decimal < 0: + signed_micros = timedelta_val.microseconds - 10**6 + else: + signed_micros = timedelta_val.microseconds + # Convert nanoseconds to microseconds. + nanos = 1000 * signed_micros + return duration_pb2.Duration(seconds=seconds, nanos=nanos) + + +def _duration_pb_to_timedelta(duration_pb): + """Convert a duration protobuf to a Python timedelta object. + + .. note:: + + The Python timedelta has a granularity of microseconds while + the protobuf duration type has a duration of nanoseconds. + + :type duration_pb: :class:`google.protobuf.duration_pb2.Duration` + :param duration_pb: A protobuf duration object. + + :rtype: :class:`datetime.timedelta` + :returns: The converted timedelta object. + """ + return datetime.timedelta( + seconds=duration_pb.seconds, + microseconds=(duration_pb.nanos / 1000.0), + ) + + +class GarbageCollectionRule(object): + """Garbage collection rule for column families within a table. + + Cells in the column family (within a table) fitting the rule will be + deleted during garbage collection. + + .. note:: + + This class is a do-nothing base class for all GC rules. + + .. note:: + + A string ``gc_expression`` can also be used with API requests, but + that value would be superceded by a ``gc_rule``. As a result, we + don't support that feature and instead support via native classes. + """ + + def __ne__(self, other): + return not self.__eq__(other) + + +class MaxVersionsGCRule(GarbageCollectionRule): + """Garbage collection limiting the number of versions of a cell. + + :type max_num_versions: int + :param max_num_versions: The maximum number of versions + """ + + def __init__(self, max_num_versions): + self.max_num_versions = max_num_versions + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.max_num_versions == self.max_num_versions + + def to_pb(self): + """Converts the garbage collection rule to a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions) + + +class MaxAgeGCRule(GarbageCollectionRule): + """Garbage collection limiting the age of a cell. + + :type max_age: :class:`datetime.timedelta` + :param max_age: The maximum age allowed for a cell in the table. + """ + + def __init__(self, max_age): + self.max_age = max_age + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.max_age == self.max_age + + def to_pb(self): + """Converts the garbage collection rule to a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + max_age = _timedelta_to_duration_pb(self.max_age) + return table_v2_pb2.GcRule(max_age=max_age) + + +class GCRuleUnion(GarbageCollectionRule): + """Union of garbage collection rules. + + :type rules: list + :param rules: List of :class:`GarbageCollectionRule`. + """ + + def __init__(self, rules): + self.rules = rules + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.rules == self.rules + + def to_pb(self): + """Converts the union into a single GC rule as a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + union = table_v2_pb2.GcRule.Union( + rules=[rule.to_pb() for rule in self.rules]) + return table_v2_pb2.GcRule(union=union) + + +class GCRuleIntersection(GarbageCollectionRule): + """Intersection of garbage collection rules. + + :type rules: list + :param rules: List of :class:`GarbageCollectionRule`. + """ + + def __init__(self, rules): + self.rules = rules + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.rules == self.rules + + def to_pb(self): + """Converts the intersection into a single GC rule as a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + intersection = table_v2_pb2.GcRule.Intersection( + rules=[rule.to_pb() for rule in self.rules]) + return table_v2_pb2.GcRule(intersection=intersection) + + +class ColumnFamily(object): + """Representation of a Google Cloud Bigtable Column Family. + + We can use a :class:`ColumnFamily` to: + + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + + :type column_family_id: str + :param column_family_id: The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type table: :class:`Table ` + :param table: The table that owns the column family. + + :type gc_rule: :class:`GarbageCollectionRule` + :param gc_rule: (Optional) The garbage collection settings for this + column family. + """ + + def __init__(self, column_family_id, table, gc_rule=None): + self.column_family_id = column_family_id + self._table = table + self.gc_rule = gc_rule + + @property + def name(self): + """Column family name used in requests. + + .. note:: + + This property will not change if ``column_family_id`` does not, but + the return value is not cached. + + The table name is of the form + + ``"projects/../zones/../clusters/../tables/../columnFamilies/.."`` + + :rtype: str + :returns: The column family name. + """ + return self._table.name + '/columnFamilies/' + self.column_family_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.column_family_id == self.column_family_id and + other._table == self._table and + other.gc_rule == self.gc_rule) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_pb(self): + """Converts the column family to a protobuf. + + :rtype: :class:`.table_v2_pb2.ColumnFamily` + :returns: The converted current object. + """ + if self.gc_rule is None: + return table_v2_pb2.ColumnFamily() + else: + return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) + + def create(self): + """Create this column family.""" + column_family = self.to_pb() + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + create=column_family, + ) + client = self._table._instance._client + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only + # data it contains are the GC rule and the column family ID already + # stored on this instance. + client._table_stub.ModifyColumnFamilies(request_pb) + + def update(self): + """Update this column family. + + .. note:: + + Only the GC rule can be updated. By changing the column family ID, + you will simply be referring to a different column family. + """ + column_family = self.to_pb() + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + update=column_family) + client = self._table._instance._client + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only + # data it contains are the GC rule and the column family ID already + # stored on this instance. + client._table_stub.ModifyColumnFamilies(request_pb) + + def delete(self): + """Delete this column family.""" + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + drop=True) + client = self._table._instance._client + # We expect a `google.protobuf.empty_pb2.Empty` + client._table_stub.ModifyColumnFamilies(request_pb) + + +def _gc_rule_from_pb(gc_rule_pb): + """Convert a protobuf GC rule to a native object. + + :type gc_rule_pb: :class:`.table_v2_pb2.GcRule` + :param gc_rule_pb: The GC rule to convert. + + :rtype: :class:`GarbageCollectionRule` or :data:`NoneType ` + :returns: An instance of one of the native rules defined + in :module:`column_family` or :data:`None` if no values were + set on the protobuf passed in. + :raises: :class:`ValueError ` if the rule name + is unexpected. + """ + rule_name = gc_rule_pb.WhichOneof('rule') + if rule_name is None: + return None + + if rule_name == 'max_num_versions': + return MaxVersionsGCRule(gc_rule_pb.max_num_versions) + elif rule_name == 'max_age': + max_age = _duration_pb_to_timedelta(gc_rule_pb.max_age) + return MaxAgeGCRule(max_age) + elif rule_name == 'union': + return GCRuleUnion([_gc_rule_from_pb(rule) + for rule in gc_rule_pb.union.rules]) + elif rule_name == 'intersection': + rules = [_gc_rule_from_pb(rule) + for rule in gc_rule_pb.intersection.rules] + return GCRuleIntersection(rules) + else: + raise ValueError('Unexpected rule name', rule_name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py new file mode 100644 index 000000000000..8730836622d0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -0,0 +1,356 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Instance.""" + + +import re + +from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) +from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) +from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_messages_v2_pb2) +from google.cloud.bigtable.cluster import Cluster +from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES +from google.cloud.bigtable.table import Table +from google.cloud.operation import Operation +from google.cloud.operation import _compute_type_url +from google.cloud.operation import _register_type_url + + +_EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' +_INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' + r'instances/(?P[a-z][-a-z0-9]*)$') + + +_CREATE_INSTANCE_METADATA_URL = _compute_type_url( + messages_v2_pb2.CreateInstanceMetadata) +_register_type_url( + _CREATE_INSTANCE_METADATA_URL, messages_v2_pb2.CreateInstanceMetadata) + + +def _prepare_create_request(instance): + """Creates a protobuf request for a CreateInstance request. + + :type instance: :class:`Instance` + :param instance: The instance to be created. + + :rtype: :class:`.messages_v2_pb2.CreateInstanceRequest` + :returns: The CreateInstance request object containing the instance info. + """ + parent_name = ('projects/' + instance._client.project) + message = messages_v2_pb2.CreateInstanceRequest( + parent=parent_name, + instance_id=instance.instance_id, + instance=data_v2_pb2.Instance( + display_name=instance.display_name, + ), + ) + cluster = message.clusters[instance.instance_id] + cluster.name = instance.name + '/clusters/' + instance.instance_id + cluster.location = ( + parent_name + '/locations/' + instance._cluster_location_id) + cluster.serve_nodes = instance._cluster_serve_nodes + return message + + +class Instance(object): + """Representation of a Google Cloud Bigtable Instance. + + We can use a :class:`Instance` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + + .. note:: + + For now, we leave out the ``default_storage_type`` (an enum) + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type client: :class:`Client ` + :param client: The client that owns the instance. Provides + authorization and a project ID. + + :type location_id: str + :param location_id: ID of the location in which the instance will be + created. Required for instances which do not yet + exist. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in the + Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + """ + + def __init__(self, instance_id, client, + location_id=_EXISTING_INSTANCE_LOCATION_ID, + display_name=None, + serve_nodes=DEFAULT_SERVE_NODES): + self.instance_id = instance_id + self.display_name = display_name or instance_id + self._cluster_location_id = location_id + self._cluster_serve_nodes = serve_nodes + self._client = client + + def _update_from_pb(self, instance_pb): + """Refresh self from the server-provided protobuf. + + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not instance_pb.display_name: # Simple field (string) + raise ValueError('Instance protobuf does not contain display_name') + self.display_name = instance_pb.display_name + + @classmethod + def from_pb(cls, instance_pb, client): + """Creates a instance instance from a protobuf. + + :type instance_pb: :class:`instance_pb2.Instance` + :param instance_pb: A instance protobuf object. + + :type client: :class:`Client ` + :param client: The client that owns the instance. + + :rtype: :class:`Instance` + :returns: The instance parsed from the protobuf response. + :raises: :class:`ValueError ` if the instance + name does not match + ``projects/{project}/instances/{instance_id}`` + or if the parsed project ID does not match the project ID + on the client. + """ + match = _INSTANCE_NAME_RE.match(instance_pb.name) + if match is None: + raise ValueError('Instance protobuf name was not in the ' + 'expected format.', instance_pb.name) + if match.group('project') != client.project: + raise ValueError('Project ID on instance does not match the ' + 'project ID on the client') + instance_id = match.group('instance_id') + + result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID) + result._update_from_pb(instance_pb) + return result + + def copy(self): + """Make a copy of this instance. + + Copies the local data stored as simple types and copies the client + attached to this instance. + + :rtype: :class:`.Instance` + :returns: A copy of the current instance. + """ + new_client = self._client.copy() + return self.__class__(self.instance_id, new_client, + self._cluster_location_id, + display_name=self.display_name) + + @property + def name(self): + """Instance name used in requests. + + .. note:: + This property will not change if ``instance_id`` does not, + but the return value is not cached. + + The instance name is of the form + + ``"projects/{project}/instances/{instance_id}"`` + + :rtype: str + :returns: The instance name. + """ + return self._client.project_name + '/instances/' + self.instance_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the display_name. Instead, it only compares + # identifying values instance ID and client. This is + # intentional, since the same instance can be in different states + # if not synchronized. Instances with similar instance + # settings but different clients can't be used in the same way. + return (other.instance_id == self.instance_id and + other._client == self._client) + + def __ne__(self, other): + return not self.__eq__(other) + + def reload(self): + """Reload the metadata for this instance.""" + request_pb = messages_v2_pb2.GetInstanceRequest(name=self.name) + # We expect `data_v2_pb2.Instance`. + instance_pb = self._client._instance_stub.GetInstance(request_pb) + + # NOTE: _update_from_pb does not check that the project and + # instance ID on the response match the request. + self._update_from_pb(instance_pb) + + def create(self): + """Create this instance. + + .. note:: + + Uses the ``project`` and ``instance_id`` on the current + :class:`Instance` in addition to the ``display_name``. + To change them before creating, reset the values via + + .. code:: python + + instance.display_name = 'New display name' + instance.instance_id = 'i-changed-my-mind' + + before calling :meth:`create`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + create operation. + """ + request_pb = _prepare_create_request(self) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._client._instance_stub.CreateInstance(request_pb) + + operation = Operation.from_pb(operation_pb, self._client) + operation.target = self + operation.metadata['request_type'] = 'CreateInstance' + return operation + + def update(self): + """Update this instance. + + .. note:: + + Updates the ``display_name``. To change that value before + updating, reset its values via + + .. code:: python + + instance.display_name = 'New display name' + + before calling :meth:`update`. + """ + request_pb = data_v2_pb2.Instance( + name=self.name, + display_name=self.display_name, + ) + # Ignore the expected `data_v2_pb2.Instance`. + self._client._instance_stub.UpdateInstance(request_pb) + + def delete(self): + """Delete this instance. + + Marks a instance and all of its tables for permanent deletion + in 7 days. + + Immediately upon completion of the request: + + * Billing will cease for all of the instance's reserved resources. + * The instance's ``delete_time`` field will be set 7 days in + the future. + + Soon afterward: + + * All tables within the instance will become unavailable. + + At the instance's ``delete_time``: + + * The instance and **all of its tables** will immediately and + irrevocably disappear from the API, and their data will be + permanently deleted. + """ + request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name) + # We expect a `google.protobuf.empty_pb2.Empty` + self._client._instance_stub.DeleteInstance(request_pb) + + def cluster(self, cluster_id, serve_nodes=3): + """Factory to create a cluster associated with this client. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + Defaults to 3. + + :rtype: :class:`.Cluster` + :returns: The cluster owned by this client. + """ + return Cluster(cluster_id, self, serve_nodes=serve_nodes) + + def list_clusters(self): + """Lists clusters in this instance. + + :rtype: tuple + :returns: A pair of results, the first is a list of :class:`.Cluster` s + returned and the second is a list of strings (the failed + locations in the request). + """ + request_pb = messages_v2_pb2.ListClustersRequest(parent=self.name) + # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` + list_clusters_response = self._client._instance_stub.ListClusters( + request_pb) + + failed_locations = [ + location for location in list_clusters_response.failed_locations] + clusters = [Cluster.from_pb(cluster_pb, self) + for cluster_pb in list_clusters_response.clusters] + return clusters, failed_locations + + def table(self, table_id): + """Factory to create a table associated with this instance. + + :type table_id: str + :param table_id: The ID of the table. + + :rtype: :class:`Table ` + :returns: The table owned by this instance. + """ + return Table(table_id, self) + + def list_tables(self): + """List the tables in this instance. + + :rtype: list of :class:`Table ` + :returns: The list of tables owned by the instance. + :raises: :class:`ValueError ` if one of the + returned tables has a name that is not of the expected format. + """ + request_pb = table_messages_v2_pb2.ListTablesRequest(parent=self.name) + # We expect a `table_messages_v2_pb2.ListTablesResponse` + table_list_pb = self._client._table_stub.ListTables(request_pb) + + result = [] + for table_pb in table_list_pb.tables: + table_prefix = self.name + '/tables/' + if not table_pb.name.startswith(table_prefix): + raise ValueError('Table name %s not of expected format' % ( + table_pb.name,)) + table_id = table_pb.name[len(table_prefix):] + result.append(self.table(table_id)) + + return result diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py new file mode 100644 index 000000000000..8ec1c63b1327 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -0,0 +1,887 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Row.""" + + +import struct + +import six + +from google.cloud._helpers import _datetime_from_microseconds +from google.cloud._helpers import _microseconds_from_datetime +from google.cloud._helpers import _to_bytes +from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) +from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + + +_PACK_I64 = struct.Struct('>q').pack + +MAX_MUTATIONS = 100000 +"""The maximum number of mutations that a row can accumulate.""" + + +class Row(object): + """Base representation of a Google Cloud Bigtable Row. + + This class has three subclasses corresponding to the three + RPC methods for sending row mutations: + + * :class:`DirectRow` for ``MutateRow`` + * :class:`ConditionalRow` for ``CheckAndMutateRow`` + * :class:`AppendRow` for ``ReadModifyWriteRow`` + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + """ + + def __init__(self, row_key, table): + self._row_key = _to_bytes(row_key) + self._table = table + + +class _SetDeleteRow(Row): + """Row helper for setting or deleting cell values. + + Implements helper methods to add mutations to set or delete cell contents: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + """ + + ALL_COLUMNS = object() + """Sentinel value used to indicate all columns in a column family.""" + + def _get_mutations(self, state): + """Gets the list of mutations for a given state. + + This method intended to be implemented by subclasses. + + ``state`` may not need to be used by all subclasses. + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :raises: :class:`NotImplementedError ` + always. + """ + raise NotImplementedError + + def _set_cell(self, column_family_id, column, value, timestamp=None, + state=None): + """Helper for :meth:`set_cell` + + Adds a mutation to set the value in a specific cell. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. + """ + column = _to_bytes(column) + if isinstance(value, six.integer_types): + value = _PACK_I64(value) + value = _to_bytes(value) + if timestamp is None: + # Use -1 for current Bigtable server time. + timestamp_micros = -1 + else: + timestamp_micros = _microseconds_from_datetime(timestamp) + # Truncate to millisecond granularity. + timestamp_micros -= (timestamp_micros % 1000) + + mutation_val = data_v2_pb2.Mutation.SetCell( + family_name=column_family_id, + column_qualifier=column, + timestamp_micros=timestamp_micros, + value=value, + ) + mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) + self._get_mutations(state).append(mutation_pb) + + def _delete(self, state=None): + """Helper for :meth:`delete` + + Adds a delete mutation (for the entire row) to the accumulated + mutations. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. + """ + mutation_val = data_v2_pb2.Mutation.DeleteFromRow() + mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) + self._get_mutations(state).append(mutation_pb) + + def _delete_cells(self, column_family_id, columns, time_range=None, + state=None): + """Helper for :meth:`delete_cell` and :meth:`delete_cells`. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode `, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then + the entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. + """ + mutations_list = self._get_mutations(state) + if columns is self.ALL_COLUMNS: + mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( + family_name=column_family_id, + ) + mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) + mutations_list.append(mutation_pb) + else: + delete_kwargs = {} + if time_range is not None: + delete_kwargs['time_range'] = time_range.to_pb() + + to_append = [] + for column in columns: + column = _to_bytes(column) + # time_range will never change if present, but the rest of + # delete_kwargs will + delete_kwargs.update( + family_name=column_family_id, + column_qualifier=column, + ) + mutation_val = data_v2_pb2.Mutation.DeleteFromColumn( + **delete_kwargs) + mutation_pb = data_v2_pb2.Mutation( + delete_from_column=mutation_val) + to_append.append(mutation_pb) + + # We don't add the mutations until all columns have been + # processed without error. + mutations_list.extend(to_append) + + +class DirectRow(_SetDeleteRow): + """Google Cloud Bigtable Row for sending "direct" mutations. + + These mutations directly set or delete cell contents: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + These methods can be used directly:: + + >>> row = table.row(b'row-key1') + >>> row.set_cell(u'fam', b'col1', b'cell-val') + >>> row.delete_cell(u'fam', b'col2') + + .. note:: + + A :class:`DirectRow` accumulates mutations locally via the + :meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and + :meth:`delete_cells` methods. To actually send these mutations to the + Google Cloud Bigtable API, you must call :meth:`commit`. + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + """ + + def __init__(self, row_key, table): + super(DirectRow, self).__init__(row_key, table) + self._pb_mutations = [] + + def _get_mutations(self, state): # pylint: disable=unused-argument + """Gets the list of mutations for a given state. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :rtype: list + :returns: The list to add new mutations to (for the current state). + """ + return self._pb_mutations + + def set_cell(self, column_family_id, column, value, timestamp=None): + """Sets a value in this row. + + The cell is determined by the ``row_key`` of this :class:`DirectRow` + and the ``column``. The ``column`` must be in an existing + :class:`.ColumnFamily` (as determined by ``column_family_id``). + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + """ + self._set_cell(column_family_id, column, value, timestamp=timestamp, + state=None) + + def delete(self): + """Deletes this row from the table. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + """ + self._delete(state=None) + + def delete_cell(self, column_family_id, column, time_range=None): + """Deletes cell in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family that will have a + cell deleted. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + """ + self._delete_cells(column_family_id, [column], time_range=time_range, + state=None) + + def delete_cells(self, column_family_id, columns, time_range=None): + """Deletes cells in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode `, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then + the entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + """ + self._delete_cells(column_family_id, columns, time_range=time_range, + state=None) + + def commit(self): + """Makes a ``MutateRow`` API request. + + If no mutations have been created in the row, no request is made. + + Mutations are applied atomically and in order, meaning that earlier + mutations can be masked / negated by later ones. Cells already present + in the row are left unchanged unless explicitly changed by a mutation. + + After committing the accumulated mutations, resets the local + mutations to an empty list. + + :raises: :class:`ValueError ` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. + """ + mutations_list = self._get_mutations(None) + num_mutations = len(mutations_list) + if num_mutations == 0: + return + if num_mutations > MAX_MUTATIONS: + raise ValueError('%d total mutations exceed the maximum allowable ' + '%d.' % (num_mutations, MAX_MUTATIONS)) + request_pb = messages_v2_pb2.MutateRowRequest( + table_name=self._table.name, + row_key=self._row_key, + mutations=mutations_list, + ) + # We expect a `google.protobuf.empty_pb2.Empty` + client = self._table._instance._client + client._data_stub.MutateRow(request_pb) + self.clear() + + def clear(self): + """Removes all currently accumulated mutations on the current row.""" + del self._pb_mutations[:] + + +class ConditionalRow(_SetDeleteRow): + """Google Cloud Bigtable Row for sending mutations conditionally. + + Each mutation has an associated state: :data:`True` or :data:`False`. + When :meth:`commit`-ed, the mutations for the :data:`True` + state will be applied if the filter matches any cells in + the row, otherwise the :data:`False` state will be applied. + + A :class:`ConditionalRow` accumulates mutations in the same way a + :class:`DirectRow` does: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + with the only change the extra ``state`` parameter:: + + >>> row_cond = table.row(b'row-key2', filter_=row_filter) + >>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True) + >>> row_cond.delete_cell(u'fam', b'col', state=False) + + .. note:: + + As with :class:`DirectRow`, to actually send these mutations to the + Google Cloud Bigtable API, you must call :meth:`commit`. + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + + :type filter_: :class:`.RowFilter` + :param filter_: Filter to be used for conditional mutations. + """ + def __init__(self, row_key, table, filter_): + super(ConditionalRow, self).__init__(row_key, table) + self._filter = filter_ + self._true_pb_mutations = [] + self._false_pb_mutations = [] + + def _get_mutations(self, state): + """Gets the list of mutations for a given state. + + Over-ridden so that the state can be used in: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :rtype: list + :returns: The list to add new mutations to (for the current state). + """ + if state: + return self._true_pb_mutations + else: + return self._false_pb_mutations + + def commit(self): + """Makes a ``CheckAndMutateRow`` API request. + + If no mutations have been created in the row, no request is made. + + The mutations will be applied conditionally, based on whether the + filter matches any cells in the :class:`ConditionalRow` or not. (Each + method which adds a mutation has a ``state`` parameter for this + purpose.) + + Mutations are applied atomically and in order, meaning that earlier + mutations can be masked / negated by later ones. Cells already present + in the row are left unchanged unless explicitly changed by a mutation. + + After committing the accumulated mutations, resets the local + mutations. + + :rtype: bool + :returns: Flag indicating if the filter was matched (which also + indicates which set of mutations were applied by the server). + :raises: :class:`ValueError ` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. + """ + true_mutations = self._get_mutations(state=True) + false_mutations = self._get_mutations(state=False) + num_true_mutations = len(true_mutations) + num_false_mutations = len(false_mutations) + if num_true_mutations == 0 and num_false_mutations == 0: + return + if (num_true_mutations > MAX_MUTATIONS or + num_false_mutations > MAX_MUTATIONS): + raise ValueError( + 'Exceed the maximum allowable mutations (%d). Had %s true ' + 'mutations and %d false mutations.' % ( + MAX_MUTATIONS, num_true_mutations, num_false_mutations)) + + request_pb = messages_v2_pb2.CheckAndMutateRowRequest( + table_name=self._table.name, + row_key=self._row_key, + predicate_filter=self._filter.to_pb(), + true_mutations=true_mutations, + false_mutations=false_mutations, + ) + # We expect a `.messages_v2_pb2.CheckAndMutateRowResponse` + client = self._table._instance._client + resp = client._data_stub.CheckAndMutateRow(request_pb) + self.clear() + return resp.predicate_matched + + # pylint: disable=arguments-differ + def set_cell(self, column_family_id, column, value, timestamp=None, + state=True): + """Sets a value in this row. + + The cell is determined by the ``row_key`` of this + :class:`ConditionalRow` and the ``column``. The ``column`` must be in + an existing :class:`.ColumnFamily` (as determined by + ``column_family_id``). + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._set_cell(column_family_id, column, value, timestamp=timestamp, + state=state) + + def delete(self, state=True): + """Deletes this row from the table. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._delete(state=state) + + def delete_cell(self, column_family_id, column, time_range=None, + state=True): + """Deletes cell in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family that will have a + cell deleted. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._delete_cells(column_family_id, [column], time_range=time_range, + state=state) + + def delete_cells(self, column_family_id, columns, time_range=None, + state=True): + """Deletes cells in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode `, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then the + entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._delete_cells(column_family_id, columns, time_range=time_range, + state=state) + # pylint: enable=arguments-differ + + def clear(self): + """Removes all currently accumulated mutations on the current row.""" + del self._true_pb_mutations[:] + del self._false_pb_mutations[:] + + +class AppendRow(Row): + """Google Cloud Bigtable Row for sending append mutations. + + These mutations are intended to augment the value of an existing cell + and uses the methods: + + * :meth:`append_cell_value` + * :meth:`increment_cell_value` + + The first works by appending bytes and the second by incrementing an + integer (stored in the cell as 8 bytes). In either case, if the + cell is empty, assumes the default empty value (empty string for + bytes or and 0 for integer). + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table ` + :param table: The table that owns the row. + """ + + def __init__(self, row_key, table): + super(AppendRow, self).__init__(row_key, table) + self._rule_pb_list = [] + + def clear(self): + """Removes all currently accumulated modifications on current row.""" + del self._rule_pb_list[:] + + def append_cell_value(self, column_family_id, column, value): + """Appends a value to an existing cell. + + .. note:: + + This method adds a read-modify rule protobuf to the accumulated + read-modify rules on this row, but does not make an API + request. To actually send an API request (with the rules) to the + Google Cloud Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes + :param value: The value to append to the existing value in the cell. If + the targeted cell is unset, it will be treated as + containing the empty string. + """ + column = _to_bytes(column) + value = _to_bytes(value) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + append_value=value) + self._rule_pb_list.append(rule_pb) + + def increment_cell_value(self, column_family_id, column, int_value): + """Increments a value in an existing cell. + + Assumes the value in the cell is stored as a 64 bit integer + serialized to bytes. + + .. note:: + + This method adds a read-modify rule protobuf to the accumulated + read-modify rules on this row, but does not make an API + request. To actually send an API request (with the rules) to the + Google Cloud Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type int_value: int + :param int_value: The value to increment the existing value in the cell + by. If the targeted cell is unset, it will be treated + as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit + big-endian signed integer), or the entire request + will fail. + """ + column = _to_bytes(column) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + increment_amount=int_value) + self._rule_pb_list.append(rule_pb) + + def commit(self): + """Makes a ``ReadModifyWriteRow`` API request. + + This commits modifications made by :meth:`append_cell_value` and + :meth:`increment_cell_value`. If no modifications were made, makes + no API request and just returns ``{}``. + + Modifies a row atomically, reading the latest existing + timestamp / value from the specified columns and writing a new value by + appending / incrementing. The new cell created uses either the current + server time or the highest timestamp of a cell in that column (if it + exceeds the server time). + + After committing the accumulated mutations, resets the local mutations. + + .. code:: python + + >>> append_row.commit() + { + u'col-fam-id': { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + }, + u'col-fam-id2': { + b'col-name3-but-other-fam': [ + (b'foo', datetime.datetime(...)), + ], + }, + } + + :rtype: dict + :returns: The new contents of all modified cells. Returned as a + dictionary of column families, each of which holds a + dictionary of columns. Each column contains a list of cells + modified. Each cell is represented with a two-tuple with the + value (in bytes) and the timestamp for the cell. + :raises: :class:`ValueError ` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. + """ + num_mutations = len(self._rule_pb_list) + if num_mutations == 0: + return {} + if num_mutations > MAX_MUTATIONS: + raise ValueError('%d total append mutations exceed the maximum ' + 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) + request_pb = messages_v2_pb2.ReadModifyWriteRowRequest( + table_name=self._table.name, + row_key=self._row_key, + rules=self._rule_pb_list, + ) + # We expect a `.data_v2_pb2.Row` + client = self._table._instance._client + row_response = client._data_stub.ReadModifyWriteRow(request_pb) + + # Reset modifications after commit-ing request. + self.clear() + + # NOTE: We expect row_response.key == self._row_key but don't check. + return _parse_rmw_row_response(row_response) + + +def _parse_rmw_row_response(row_response): + """Parses the response to a ``ReadModifyWriteRow`` request. + + :type row_response: :class:`.data_v2_pb2.Row` + :param row_response: The response row (with only modified cells) from a + ``ReadModifyWriteRow`` request. + + :rtype: dict + :returns: The new contents of all modified cells. Returned as a + dictionary of column families, each of which holds a + dictionary of columns. Each column contains a list of cells + modified. Each cell is represented with a two-tuple with the + value (in bytes) and the timestamp for the cell. For example: + + .. code:: python + + { + u'col-fam-id': { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + }, + u'col-fam-id2': { + b'col-name3-but-other-fam': [ + (b'foo', datetime.datetime(...)), + ], + }, + } + """ + result = {} + for column_family in row_response.row.families: + column_family_id, curr_family = _parse_family_pb(column_family) + result[column_family_id] = curr_family + return result + + +def _parse_family_pb(family_pb): + """Parses a Family protobuf into a dictionary. + + :type family_pb: :class:`._generated.data_pb2.Family` + :param family_pb: A protobuf + + :rtype: tuple + :returns: A string and dictionary. The string is the name of the + column family and the dictionary has column names (within the + family) as keys and cell lists as values. Each cell is + represented with a two-tuple with the value (in bytes) and the + timestamp for the cell. For example: + + .. code:: python + + { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + } + """ + result = {} + for column in family_pb.columns: + result[column.qualifier] = cells = [] + for cell in column.cells: + val_pair = ( + cell.value, + _datetime_from_microseconds(cell.timestamp_micros), + ) + cells.append(val_pair) + + return family_pb.name, result diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py new file mode 100644 index 000000000000..f293c93d3c43 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -0,0 +1,441 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Container for Google Cloud Bigtable Cells and Streaming Row Contents.""" + + +import copy +import six + +from google.cloud._helpers import _datetime_from_microseconds +from google.cloud._helpers import _to_bytes + + +class Cell(object): + """Representation of a Google Cloud Bigtable Cell. + + :type value: bytes + :param value: The value stored in the cell. + + :type timestamp: :class:`datetime.datetime` + :param timestamp: The timestamp when the cell was stored. + + :type labels: list + :param labels: (Optional) List of strings. Labels applied to the cell. + """ + + def __init__(self, value, timestamp, labels=()): + self.value = value + self.timestamp = timestamp + self.labels = list(labels) + + @classmethod + def from_pb(cls, cell_pb): + """Create a new cell from a Cell protobuf. + + :type cell_pb: :class:`._generated.data_pb2.Cell` + :param cell_pb: The protobuf to convert. + + :rtype: :class:`Cell` + :returns: The cell corresponding to the protobuf. + """ + timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros) + if cell_pb.labels: + return cls(cell_pb.value, timestamp, labels=cell_pb.labels) + else: + return cls(cell_pb.value, timestamp) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.value == self.value and + other.timestamp == self.timestamp and + other.labels == self.labels) + + def __ne__(self, other): + return not self.__eq__(other) + + +class PartialCellData(object): + """Representation of partial cell in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) cell. + + :type family_name: str + :param family_name: The family name of the (partial) cell. + + :type qualifier: bytes + :param qualifier: The column qualifier of the (partial) cell. + + :type timestamp_micros: int + :param timestamp_micros: The timestamp (in microsecods) of the + (partial) cell. + + :type labels: list of str + :param labels: labels assigned to the (partial) cell + + :type value: bytes + :param value: The (accumulated) value of the (partial) cell. + """ + def __init__(self, row_key, family_name, qualifier, timestamp_micros, + labels=(), value=b''): + self.row_key = row_key + self.family_name = family_name + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels + self.value = value + + def append_value(self, value): + """Append bytes from a new chunk to value. + + :type value: bytes + :param value: bytes to append + """ + self.value += value + + +class PartialRowData(object): + """Representation of partial row in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) data. + """ + + def __init__(self, row_key): + self._row_key = row_key + self._cells = {} + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other._row_key == self._row_key and + other._cells == self._cells) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_dict(self): + """Convert the cells to a dictionary. + + This is intended to be used with HappyBase, so the column family and + column qualiers are combined (with ``:``). + + :rtype: dict + :returns: Dictionary containing all the data in the cells of this row. + """ + result = {} + for column_family_id, columns in six.iteritems(self._cells): + for column_qual, cells in six.iteritems(columns): + key = (_to_bytes(column_family_id) + b':' + + _to_bytes(column_qual)) + result[key] = cells + return result + + @property + def cells(self): + """Property returning all the cells accumulated on this partial row. + + :rtype: dict + :returns: Dictionary of the :class:`Cell` objects accumulated. This + dictionary has two-levels of keys (first for column families + and second for column names/qualifiers within a family). For + a given column, a list of :class:`Cell` objects is stored. + """ + return copy.deepcopy(self._cells) + + @property + def row_key(self): + """Getter for the current (partial) row's key. + + :rtype: bytes + :returns: The current (partial) row's key. + """ + return self._row_key + + +class InvalidReadRowsResponse(RuntimeError): + """Exception raised to to invalid response data from back-end.""" + + +class InvalidChunk(RuntimeError): + """Exception raised to to invalid chunk data from back-end.""" + + +class PartialRowsData(object): + """Convenience wrapper for consuming a ``ReadRows`` streaming response. + + :type response_iterator: :class:`~google.cloud.exceptions.GrpcRendezvous` + :param response_iterator: A streaming iterator returned from a + ``ReadRows`` request. + """ + START = "Start" # No responses yet processed. + NEW_ROW = "New row" # No cells yet complete for row + ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row + CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row + + def __init__(self, response_iterator): + self._response_iterator = response_iterator + # Fully-processed rows, keyed by `row_key` + self._rows = {} + # Counter for responses pulled from iterator + self._counter = 0 + # Maybe cached from previous response + self._last_scanned_row_key = None + # In-progress row, unset until first response, after commit/reset + self._row = None + # Last complete row, unset until first commit + self._previous_row = None + # In-progress cell, unset until first response, after completion + self._cell = None + # Last complete cell, unset until first completion, after new row + self._previous_cell = None + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other._response_iterator == self._response_iterator + + def __ne__(self, other): + return not self.__eq__(other) + + @property + def state(self): + """State machine state. + + :rtype: str + :returns: name of state corresponding to currrent row / chunk + processing. + """ + if self._last_scanned_row_key is None: + return self.START + if self._row is None: + assert self._cell is None + assert self._previous_cell is None + return self.NEW_ROW + if self._cell is not None: + return self.CELL_IN_PROGRESS + if self._previous_cell is not None: + return self.ROW_IN_PROGRESS + return self.NEW_ROW # row added, no chunk yet processed + + @property + def rows(self): + """Property returning all rows accumulated from the stream. + + :rtype: dict + :returns: row_key -> :class:`PartialRowData`. + """ + # NOTE: To avoid duplicating large objects, this is just the + # mutable private data. + return self._rows + + def cancel(self): + """Cancels the iterator, closing the stream.""" + self._response_iterator.cancel() + + def consume_next(self): + """Consume the next ``ReadRowsResponse`` from the stream. + + Parse the response and its chunks into a new/existing row in + :attr:`_rows` + """ + response = six.next(self._response_iterator) + self._counter += 1 + + if self._last_scanned_row_key is None: # first response + if response.last_scanned_row_key: + raise InvalidReadRowsResponse() + + self._last_scanned_row_key = response.last_scanned_row_key + + row = self._row + cell = self._cell + + for chunk in response.chunks: + + self._validate_chunk(chunk) + + if chunk.reset_row: + row = self._row = None + cell = self._cell = self._previous_cell = None + continue + + if row is None: + row = self._row = PartialRowData(chunk.row_key) + + if cell is None: + cell = self._cell = PartialCellData( + chunk.row_key, + chunk.family_name.value, + chunk.qualifier.value, + chunk.timestamp_micros, + chunk.labels, + chunk.value) + self._copy_from_previous(cell) + else: + cell.append_value(chunk.value) + + if chunk.commit_row: + self._save_current_row() + row = cell = None + continue + + if chunk.value_size == 0: + self._save_current_cell() + cell = None + + def consume_all(self, max_loops=None): + """Consume the streamed responses until there are no more. + + This simply calls :meth:`consume_next` until there are no + more to consume. + + :type max_loops: int + :param max_loops: (Optional) Maximum number of times to try to consume + an additional ``ReadRowsResponse``. You can use this + to avoid long wait times. + """ + curr_loop = 0 + if max_loops is None: + max_loops = float('inf') + while curr_loop < max_loops: + curr_loop += 1 + try: + self.consume_next() + except StopIteration: + break + + @staticmethod + def _validate_chunk_status(chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`, etc.""" + # No reseet with other keys + if chunk.reset_row: + _raise_if(chunk.row_key) + _raise_if(chunk.HasField('family_name')) + _raise_if(chunk.HasField('qualifier')) + _raise_if(chunk.timestamp_micros) + _raise_if(chunk.labels) + _raise_if(chunk.value_size) + _raise_if(chunk.value) + # No commit with value size + _raise_if(chunk.commit_row and chunk.value_size > 0) + # No negative value_size (inferred as a general constraint). + _raise_if(chunk.value_size < 0) + + def _validate_chunk_new_row(self, chunk): + """Helper for :meth:`_validate_chunk`.""" + assert self.state == self.NEW_ROW + _raise_if(chunk.reset_row) + _raise_if(not chunk.row_key) + _raise_if(not chunk.family_name) + _raise_if(not chunk.qualifier) + # This constraint is not enforced in the Go example. + _raise_if(chunk.value_size > 0 and chunk.commit_row is not False) + # This constraint is from the Go example, not the spec. + _raise_if(self._previous_row is not None and + chunk.row_key <= self._previous_row.row_key) + + def _same_as_previous(self, chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`""" + previous = self._previous_cell + return (chunk.row_key == previous.row_key and + chunk.family_name == previous.family_name and + chunk.qualifier == previous.qualifier and + chunk.labels == previous.labels) + + def _validate_chunk_row_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.ROW_IN_PROGRESS + self._validate_chunk_status(chunk) + if not chunk.HasField('commit_row') and not chunk.reset_row: + _raise_if(not chunk.timestamp_micros or not chunk.value) + _raise_if(chunk.row_key and + chunk.row_key != self._row.row_key) + _raise_if(chunk.HasField('family_name') and + not chunk.HasField('qualifier')) + previous = self._previous_cell + _raise_if(self._same_as_previous(chunk) and + chunk.timestamp_micros <= previous.timestamp_micros) + + def _validate_chunk_cell_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.CELL_IN_PROGRESS + self._validate_chunk_status(chunk) + self._copy_from_current(chunk) + + def _validate_chunk(self, chunk): + """Helper for :meth:`consume_next`.""" + if self.state == self.NEW_ROW: + self._validate_chunk_new_row(chunk) + if self.state == self.ROW_IN_PROGRESS: + self._validate_chunk_row_in_progress(chunk) + if self.state == self.CELL_IN_PROGRESS: + self._validate_chunk_cell_in_progress(chunk) + + def _save_current_cell(self): + """Helper for :meth:`consume_next`.""" + row, cell = self._row, self._cell + family = row._cells.setdefault(cell.family_name, {}) + qualified = family.setdefault(cell.qualifier, []) + complete = Cell.from_pb(self._cell) + qualified.append(complete) + self._cell, self._previous_cell = None, cell + + def _copy_from_current(self, chunk): + """Helper for :meth:`consume_next`.""" + current = self._cell + if current is not None: + if not chunk.row_key: + chunk.row_key = current.row_key + if not chunk.HasField('family_name'): + chunk.family_name.value = current.family_name + if not chunk.HasField('qualifier'): + chunk.qualifier.value = current.qualifier + if not chunk.timestamp_micros: + chunk.timestamp_micros = current.timestamp_micros + if not chunk.labels: + chunk.labels.extend(current.labels) + + def _copy_from_previous(self, cell): + """Helper for :meth:`consume_next`.""" + previous = self._previous_cell + if previous is not None: + if not cell.row_key: + cell.row_key = previous.row_key + if not cell.family_name: + cell.family_name = previous.family_name + if not cell.qualifier: + cell.qualifier = previous.qualifier + + def _save_current_row(self): + """Helper for :meth:`consume_next`.""" + if self._cell: + self._save_current_cell() + self._rows[self._row.row_key] = self._row + self._row, self._previous_row = None, self._row + self._previous_cell = None + + +def _raise_if(predicate, *args): + """Helper for validation methods.""" + if predicate: + raise InvalidChunk(*args) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py new file mode 100644 index 000000000000..e3f3006df286 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -0,0 +1,768 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Filters for Google Cloud Bigtable Row classes.""" + + +from google.cloud._helpers import _microseconds_from_datetime +from google.cloud._helpers import _to_bytes +from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + + +class RowFilter(object): + """Basic filter to apply to cells in a row. + + These values can be combined via :class:`RowFilterChain`, + :class:`RowFilterUnion` and :class:`ConditionalRowFilter`. + + .. note:: + + This class is a do-nothing base class for all row filters. + """ + + def __ne__(self, other): + return not self.__eq__(other) + + +class _BoolFilter(RowFilter): + """Row filter that uses a boolean flag. + + :type flag: bool + :param flag: An indicator if a setting is turned on or off. + """ + + def __init__(self, flag): + self.flag = flag + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.flag == self.flag + + +class SinkFilter(_BoolFilter): + """Advanced row filter to skip parent filters. + + :type flag: bool + :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter. + Outputs all cells directly to the output of the read rather + than to any parent filter. Cannot be used within the + ``predicate_filter``, ``true_filter``, or ``false_filter`` + of a :class:`ConditionalRowFilter`. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(sink=self.flag) + + +class PassAllFilter(_BoolFilter): + """Row filter equivalent to not filtering at all. + + :type flag: bool + :param flag: Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(pass_all_filter=self.flag) + + +class BlockAllFilter(_BoolFilter): + """Row filter that doesn't match any cells. + + :type flag: bool + :param flag: Does not match any cells, regardless of input. Useful for + temporarily disabling just part of a filter. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(block_all_filter=self.flag) + + +class _RegexFilter(RowFilter): + """Row filter that uses a regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: bytes or str + :param regex: A regular expression (RE2) for some row filter. + """ + + def __init__(self, regex): + self.regex = _to_bytes(regex) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.regex == self.regex + + +class RowKeyRegexFilter(_RegexFilter): + """Row filter for a row key regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from rows with row + keys that satisfy this regex. For a + ``CheckAndMutateRowRequest``, this filter is unnecessary + since the row key is already specified. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex) + + +class RowSampleFilter(RowFilter): + """Matches all cells from a row with probability p. + + :type sample: float + :param sample: The probability of matching a cell (must be in the + interval ``[0, 1]``). + """ + + def __init__(self, sample): + self.sample = sample + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.sample == self.sample + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(row_sample_filter=self.sample) + + +class FamilyNameRegexFilter(_RegexFilter): + """Row filter for a family name regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: str + :param regex: A regular expression (RE2) to match cells from columns in a + given column family. For technical reasons, the regex must + not contain the ``':'`` character, even if it is not being + used as a literal. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex) + + +class ColumnQualifierRegexFilter(_RegexFilter): + """Row filter for a column qualifier regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from column that + match this regex (irrespective of column family). + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex) + + +class TimestampRange(object): + """Range of time with inclusive lower and exclusive upper bounds. + + :type start: :class:`datetime.datetime` + :param start: (Optional) The (inclusive) lower bound of the timestamp + range. If omitted, defaults to Unix epoch. + + :type end: :class:`datetime.datetime` + :param end: (Optional) The (exclusive) upper bound of the timestamp + range. If omitted, no upper bound is used. + """ + + def __init__(self, start=None, end=None): + self.start = start + self.end = end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.start == self.start and + other.end == self.end) + + def __ne__(self, other): + return not self.__eq__(other) + + def to_pb(self): + """Converts the :class:`TimestampRange` to a protobuf. + + :rtype: :class:`.data_v2_pb2.TimestampRange` + :returns: The converted current object. + """ + timestamp_range_kwargs = {} + if self.start is not None: + timestamp_range_kwargs['start_timestamp_micros'] = ( + _microseconds_from_datetime(self.start)) + if self.end is not None: + timestamp_range_kwargs['end_timestamp_micros'] = ( + _microseconds_from_datetime(self.end)) + return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) + + +class TimestampRangeFilter(RowFilter): + """Row filter that limits cells to a range of time. + + :type range_: :class:`TimestampRange` + :param range_: Range of time that cells should match against. + """ + + def __init__(self, range_): + self.range_ = range_ + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.range_ == self.range_ + + def to_pb(self): + """Converts the row filter to a protobuf. + + First converts the ``range_`` on the current object to a protobuf and + then uses it in the ``timestamp_range_filter`` field. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter( + timestamp_range_filter=self.range_.to_pb()) + + +class ColumnRangeFilter(RowFilter): + """A row filter to restrict to a range of columns. + + Both the start and end column can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type column_family_id: str + :param column_family_id: The column family that contains the columns. Must + be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type start_column: bytes + :param start_column: The start of the range of columns. If no value is + used, the backend applies no upper bound to the + values. + + :type end_column: bytes + :param end_column: The end of the range of columns. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start column should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_column`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end column should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_column`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError ` if ``inclusive_start`` + is set but no ``start_column`` is given or if ``inclusive_end`` + is set but no ``end_column`` is given + """ + + def __init__(self, column_family_id, start_column=None, end_column=None, + inclusive_start=None, inclusive_end=None): + self.column_family_id = column_family_id + + if inclusive_start is None: + inclusive_start = True + elif start_column is None: + raise ValueError('Inclusive start was specified but no ' + 'start column was given.') + self.start_column = start_column + self.inclusive_start = inclusive_start + + if inclusive_end is None: + inclusive_end = True + elif end_column is None: + raise ValueError('Inclusive end was specified but no ' + 'end column was given.') + self.end_column = end_column + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.column_family_id == self.column_family_id and + other.start_column == self.start_column and + other.end_column == self.end_column and + other.inclusive_start == self.inclusive_start and + other.inclusive_end == self.inclusive_end) + + def to_pb(self): + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it + in the ``column_range_filter`` field. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + column_range_kwargs = {'family_name': self.column_family_id} + if self.start_column is not None: + if self.inclusive_start: + key = 'start_qualifier_closed' + else: + key = 'start_qualifier_open' + column_range_kwargs[key] = _to_bytes(self.start_column) + if self.end_column is not None: + if self.inclusive_end: + key = 'end_qualifier_closed' + else: + key = 'end_qualifier_open' + column_range_kwargs[key] = _to_bytes(self.end_column) + + column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) + return data_v2_pb2.RowFilter(column_range_filter=column_range) + + +class ValueRegexFilter(_RegexFilter): + """Row filter for a value regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells with values that + match this regex. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(value_regex_filter=self.regex) + + +class ValueRangeFilter(RowFilter): + """A range of values to restrict to in a row filter. + + Will only match cells that have values in this range. + + Both the start and end value can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type start_value: bytes + :param start_value: The start of the range of values. If no value is used, + the backend applies no lower bound to the values. + + :type end_value: bytes + :param end_value: The end of the range of values. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start value should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_value`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end value should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_value`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError ` if ``inclusive_start`` + is set but no ``start_value`` is given or if ``inclusive_end`` + is set but no ``end_value`` is given + """ + + def __init__(self, start_value=None, end_value=None, + inclusive_start=None, inclusive_end=None): + if inclusive_start is None: + inclusive_start = True + elif start_value is None: + raise ValueError('Inclusive start was specified but no ' + 'start value was given.') + self.start_value = start_value + self.inclusive_start = inclusive_start + + if inclusive_end is None: + inclusive_end = True + elif end_value is None: + raise ValueError('Inclusive end was specified but no ' + 'end value was given.') + self.end_value = end_value + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.start_value == self.start_value and + other.end_value == self.end_value and + other.inclusive_start == self.inclusive_start and + other.inclusive_end == self.inclusive_end) + + def to_pb(self): + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ValueRange` and then uses + it to create a row filter protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + value_range_kwargs = {} + if self.start_value is not None: + if self.inclusive_start: + key = 'start_value_closed' + else: + key = 'start_value_open' + value_range_kwargs[key] = _to_bytes(self.start_value) + if self.end_value is not None: + if self.inclusive_end: + key = 'end_value_closed' + else: + key = 'end_value_open' + value_range_kwargs[key] = _to_bytes(self.end_value) + + value_range = data_v2_pb2.ValueRange(**value_range_kwargs) + return data_v2_pb2.RowFilter(value_range_filter=value_range) + + +class _CellCountFilter(RowFilter): + """Row filter that uses an integer count of cells. + + The cell count is used as an offset or a limit for the number + of results returned. + + :type num_cells: int + :param num_cells: An integer count / offset / limit. + """ + + def __init__(self, num_cells): + self.num_cells = num_cells + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.num_cells == self.num_cells + + +class CellsRowOffsetFilter(_CellCountFilter): + """Row filter to skip cells in a row. + + :type num_cells: int + :param num_cells: Skips the first N cells of the row. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter( + cells_per_row_offset_filter=self.num_cells) + + +class CellsRowLimitFilter(_CellCountFilter): + """Row filter to limit cells in a row. + + :type num_cells: int + :param num_cells: Matches only the first N cells of the row. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells) + + +class CellsColumnLimitFilter(_CellCountFilter): + """Row filter to limit cells in a column. + + :type num_cells: int + :param num_cells: Matches only the most recent N cells within each column. + This filters a (family name, column) pair, based on + timestamps of each cell. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter( + cells_per_column_limit_filter=self.num_cells) + + +class StripValueTransformerFilter(_BoolFilter): + """Row filter that transforms cells into empty string (0 bytes). + + :type flag: bool + :param flag: If :data:`True`, replaces each cell's value with the empty + string. As the name indicates, this is more useful as a + transformer than a generic query / filter. + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(strip_value_transformer=self.flag) + + +class ApplyLabelFilter(RowFilter): + """Filter to apply labels to cells. + + Intended to be used as an intermediate filter on a pre-existing filtered + result set. This way if two sets are combined, the label can tell where + the cell(s) originated.This allows the client to determine which results + were produced from which part of the filter. + + .. note:: + + Due to a technical limitation of the backend, it is not currently + possible to apply multiple labels to a cell. + + :type label: str + :param label: Label to apply to cells in the output row. Values must be + at most 15 characters long, and match the pattern + ``[a-z0-9\\-]+``. + """ + + def __init__(self, label): + self.label = label + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.label == self.label + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(apply_label_transformer=self.label) + + +class _FilterCombination(RowFilter): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def __init__(self, filters=None): + if filters is None: + filters = [] + self.filters = filters + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.filters == self.filters + + +class RowFilterChain(_FilterCombination): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + chain = data_v2_pb2.RowFilter.Chain( + filters=[row_filter.to_pb() for row_filter in self.filters]) + return data_v2_pb2.RowFilter(chain=chain) + + +class RowFilterUnion(_FilterCombination): + """Union of row filters. + + Sends rows through several filters simultaneously, then + merges / interleaves all the filtered results together. + + If multiple cells are produced with the same column and timestamp, + they will all appear in the output row in an unspecified mutual order. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + interleave = data_v2_pb2.RowFilter.Interleave( + filters=[row_filter.to_pb() for row_filter in self.filters]) + return data_v2_pb2.RowFilter(interleave=interleave) + + +class ConditionalRowFilter(RowFilter): + """Conditional row filter which exhibits ternary behavior. + + Executes one of two filters based on another filter. If the ``base_filter`` + returns any cells in the row, then ``true_filter`` is executed. If not, + then ``false_filter`` is executed. + + .. note:: + + The ``base_filter`` does not execute atomically with the true and false + filters, which may lead to inconsistent or unexpected results. + + Additionally, executing a :class:`ConditionalRowFilter` has poor + performance on the server, especially when ``false_filter`` is set. + + :type base_filter: :class:`RowFilter` + :param base_filter: The filter to condition on before executing the + true/false filters. + + :type true_filter: :class:`RowFilter` + :param true_filter: (Optional) The filter to execute if there are any cells + matching ``base_filter``. If not provided, no results + will be returned in the true case. + + :type false_filter: :class:`RowFilter` + :param false_filter: (Optional) The filter to execute if there are no cells + matching ``base_filter``. If not provided, no results + will be returned in the false case. + """ + + def __init__(self, base_filter, true_filter=None, false_filter=None): + self.base_filter = base_filter + self.true_filter = true_filter + self.false_filter = false_filter + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.base_filter == self.base_filter and + other.true_filter == self.true_filter and + other.false_filter == self.false_filter) + + def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + condition_kwargs = {'predicate_filter': self.base_filter.to_pb()} + if self.true_filter is not None: + condition_kwargs['true_filter'] = self.true_filter.to_pb() + if self.false_filter is not None: + condition_kwargs['false_filter'] = self.false_filter.to_pb() + condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) + return data_v2_pb2.RowFilter(condition=condition) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py new file mode 100644 index 000000000000..f2120ddc5416 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -0,0 +1,375 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable Table.""" + +from google.cloud._helpers import _to_bytes +from google.cloud.bigtable._generated import ( + bigtable_pb2 as data_messages_v2_pb2) +from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) +from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) +from google.cloud.bigtable.column_family import _gc_rule_from_pb +from google.cloud.bigtable.column_family import ColumnFamily +from google.cloud.bigtable.row import AppendRow +from google.cloud.bigtable.row import ConditionalRow +from google.cloud.bigtable.row import DirectRow +from google.cloud.bigtable.row_data import PartialRowsData + + +class Table(object): + """Representation of a Google Cloud Bigtable Table. + + .. note:: + + We don't define any properties on a table other than the name. + The only other fields are ``column_families`` and ``granularity``, + The ``column_families`` are not stored locally and + ``granularity`` is an enum with only one value. + + We can use a :class:`Table` to: + + * :meth:`create` the table + * :meth:`rename` the table + * :meth:`delete` the table + * :meth:`list_column_families` in the table + + :type table_id: str + :param table_id: The ID of the table. + + :type instance: :class:`Instance <.instance.Instance>` + :param instance: The instance that owns the table. + """ + + def __init__(self, table_id, instance): + self.table_id = table_id + self._instance = instance + + @property + def name(self): + """Table name used in requests. + + .. note:: + + This property will not change if ``table_id`` does not, but the + return value is not cached. + + The table name is of the form + + ``"projects/../instances/../tables/{table_id}"`` + + :rtype: str + :returns: The table name. + """ + return self._instance.name + '/tables/' + self.table_id + + def column_family(self, column_family_id, gc_rule=None): + """Factory to create a column family associated with this table. + + :type column_family_id: str + :param column_family_id: The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type gc_rule: :class:`.GarbageCollectionRule` + :param gc_rule: (Optional) The garbage collection settings for this + column family. + + :rtype: :class:`.ColumnFamily` + :returns: A column family owned by this table. + """ + return ColumnFamily(column_family_id, self, gc_rule=gc_rule) + + def row(self, row_key, filter_=None, append=False): + """Factory to create a row associated with this table. + + .. warning:: + + At most one of ``filter_`` and ``append`` can be used in a + :class:`Row`. + + :type row_key: bytes + :param row_key: The key for the row being created. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) Filter to be used for conditional mutations. + See :class:`.DirectRow` for more details. + + :type append: bool + :param append: (Optional) Flag to determine if the row should be used + for append mutations. + + :rtype: :class:`.DirectRow` + :returns: A row owned by this table. + :raises: :class:`ValueError ` if both + ``filter_`` and ``append`` are used. + """ + if append and filter_ is not None: + raise ValueError('At most one of filter_ and append can be set') + if append: + return AppendRow(row_key, self) + elif filter_ is not None: + return ConditionalRow(row_key, self, filter_=filter_) + else: + return DirectRow(row_key, self) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.table_id == self.table_id and + other._instance == self._instance) + + def __ne__(self, other): + return not self.__eq__(other) + + def create(self, initial_split_keys=None, column_families=()): + """Creates this table. + + .. note:: + + A create request returns a + :class:`._generated.table_pb2.Table` but we don't use + this response. + + :type initial_split_keys: list + :param initial_split_keys: (Optional) List of row keys that will be + used to initially split the table into + several tablets (Tablets are similar to + HBase regions). Given two split keys, + ``"s1"`` and ``"s2"``, three tablets will be + created, spanning the key ranges: + ``[, s1)``, ``[s1, s2)``, ``[s2, )``. + + :type column_families: list + :param column_families: (Optional) List or other iterable of + :class:`.ColumnFamily` instances. + """ + if initial_split_keys is not None: + split_pb = table_admin_messages_v2_pb2.CreateTableRequest.Split + initial_split_keys = [ + split_pb(key=key) for key in initial_split_keys] + + table_pb = None + if column_families: + table_pb = table_v2_pb2.Table() + for col_fam in column_families: + curr_id = col_fam.column_family_id + table_pb.column_families[curr_id].MergeFrom(col_fam.to_pb()) + + request_pb = table_admin_messages_v2_pb2.CreateTableRequest( + initial_splits=initial_split_keys or [], + parent=self._instance.name, + table_id=self.table_id, + table=table_pb, + ) + client = self._instance._client + # We expect a `._generated.table_pb2.Table` + client._table_stub.CreateTable(request_pb) + + def delete(self): + """Delete this table.""" + request_pb = table_admin_messages_v2_pb2.DeleteTableRequest( + name=self.name) + client = self._instance._client + # We expect a `google.protobuf.empty_pb2.Empty` + client._table_stub.DeleteTable(request_pb) + + def list_column_families(self): + """List the column families owned by this table. + + :rtype: dict + :returns: Dictionary of column families attached to this table. Keys + are strings (column family names) and values are + :class:`.ColumnFamily` instances. + :raises: :class:`ValueError ` if the column + family name from the response does not agree with the computed + name from the column family ID. + """ + request_pb = table_admin_messages_v2_pb2.GetTableRequest( + name=self.name) + client = self._instance._client + # We expect a `._generated.table_pb2.Table` + table_pb = client._table_stub.GetTable(request_pb) + + result = {} + for column_family_id, value_pb in table_pb.column_families.items(): + gc_rule = _gc_rule_from_pb(value_pb.gc_rule) + column_family = self.column_family(column_family_id, + gc_rule=gc_rule) + result[column_family_id] = column_family + return result + + def read_row(self, row_key, filter_=None): + """Read a single row from this table. + + :type row_key: bytes + :param row_key: The key of the row to read from. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + row. If unset, returns the entire row. + + :rtype: :class:`.PartialRowData`, :data:`NoneType ` + :returns: The contents of the row if any chunks were returned in + the response, otherwise :data:`None`. + :raises: :class:`ValueError ` if a commit row + chunk is never encountered. + """ + request_pb = _create_row_request(self.name, row_key=row_key, + filter_=filter_) + client = self._instance._client + response_iterator = client._data_stub.ReadRows(request_pb) + rows_data = PartialRowsData(response_iterator) + rows_data.consume_all() + if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): + raise ValueError('The row remains partial / is not committed.') + + if len(rows_data.rows) == 0: + return None + + return rows_data.rows[row_key] + + def read_rows(self, start_key=None, end_key=None, limit=None, + filter_=None): + """Read rows from this table. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads every column in + each row. + + :rtype: :class:`.PartialRowsData` + :returns: A :class:`.PartialRowsData` convenience wrapper for consuming + the streamed results. + """ + request_pb = _create_row_request( + self.name, start_key=start_key, end_key=end_key, filter_=filter_, + limit=limit) + client = self._instance._client + response_iterator = client._data_stub.ReadRows(request_pb) + # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` + return PartialRowsData(response_iterator) + + def sample_row_keys(self): + """Read a sample of row keys in the table. + + The returned row keys will delimit contiguous sections of the table of + approximately equal size, which can be used to break up the data for + distributed tasks like mapreduces. + + The elements in the iterator are a SampleRowKeys response and they have + the properties ``offset_bytes`` and ``row_key``. They occur in sorted + order. The table might have contents before the first row key in the + list and after the last one, but a key containing the empty string + indicates "end of table" and will be the last response given, if + present. + + .. note:: + + Row keys in this list may not have ever been written to or read + from, and users should therefore not make any assumptions about the + row key structure that are specific to their use case. + + The ``offset_bytes`` field on a response indicates the approximate + total storage space used by all rows in the table which precede + ``row_key``. Buffering the contents of all rows between two subsequent + samples would require space roughly equal to the difference in their + ``offset_bytes`` fields. + + :rtype: :class:`~google.cloud.exceptions.GrpcRendezvous` + :returns: A cancel-able iterator. Can be consumed by calling ``next()`` + or by casting to a :class:`list` and can be cancelled by + calling ``cancel()``. + """ + request_pb = data_messages_v2_pb2.SampleRowKeysRequest( + table_name=self.name) + client = self._instance._client + response_iterator = client._data_stub.SampleRowKeys(request_pb) + return response_iterator + + +def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, + filter_=None, limit=None): + """Creates a request to read rows in a table. + + :type table_name: str + :param table_name: The name of the table to read from. + + :type row_key: bytes + :param row_key: (Optional) The key of a specific row to read from. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads the entire table. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` + :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. + :raises: :class:`ValueError ` if both + ``row_key`` and one of ``start_key`` and ``end_key`` are set + """ + request_kwargs = {'table_name': table_name} + if (row_key is not None and + (start_key is not None or end_key is not None)): + raise ValueError('Row key and row range cannot be ' + 'set simultaneously') + range_kwargs = {} + if start_key is not None or end_key is not None: + if start_key is not None: + range_kwargs['start_key_closed'] = _to_bytes(start_key) + if end_key is not None: + range_kwargs['end_key_open'] = _to_bytes(end_key) + if filter_ is not None: + request_kwargs['filter'] = filter_.to_pb() + if limit is not None: + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) + + return message diff --git a/packages/google-cloud-bigtable/unit_tests/__init__.py b/packages/google-cloud-bigtable/unit_tests/__init__.py new file mode 100644 index 000000000000..58e0d9153632 --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/packages/google-cloud-bigtable/unit_tests/_testing.py b/packages/google-cloud-bigtable/unit_tests/_testing.py new file mode 100644 index 000000000000..e67af6a1498c --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/_testing.py @@ -0,0 +1,46 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Mocks used to emulate gRPC generated objects.""" + + +class _FakeStub(object): + """Acts as a gPRC stub.""" + + def __init__(self, *results): + self.results = results + self.method_calls = [] + + def __getattr__(self, name): + # We need not worry about attributes set in constructor + # since __getattribute__ will handle them. + return _MethodMock(name, self) + + +class _MethodMock(object): + """Mock for API method attached to a gRPC stub. + + These are of type :class:`grpc._channel._UnaryUnaryMultiCallable`. + """ + + def __init__(self, name, stub): + self._name = name + self._stub = stub + + def __call__(self, *args, **kwargs): + """Sync method meant to mock a gRPC stub request.""" + self._stub.method_calls.append((self._name, args, kwargs)) + curr_result, self._stub.results = (self._stub.results[0], + self._stub.results[1:]) + return curr_result diff --git a/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json b/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json new file mode 100644 index 000000000000..4973831f4979 --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json @@ -0,0 +1,1178 @@ +{ + "tests": [ + { + "name": "invalid - no commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before commit", + "chunks": [ + "commit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before value", + "chunks": [ + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new col family must specify qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "bare commit implies ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "simple row with timestamp", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "missing timestamp, implied ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "empty cell value", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "two unsplit cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two qualifiers", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two families", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "with labels", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "L_2", + "error": false + } + ] + }, + { + "name": "split cell, bare commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "split cell", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "split four ways", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"ue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "L", + "error": false + } + ] + }, + { + "name": "two split cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier multi-split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-family split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - no commit between rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no commit after first row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - last row missing commit", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - duplicate row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new row missing row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "two rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows implicit timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows empty value", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, one with multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "E", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "F", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells, multiple families", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "M", + "qual": "O", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "N", + "qual": "P", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, four cells, 2 labels", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "L_3", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows with splits, same timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - bare reset", + "chunks": [ + "reset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - bad reset, no commit", + "chunks": [ + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - missing key after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "no data after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n" + ], + "results": null + }, + { + "name": "simple reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new val", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new qual", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "reset with splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two resets", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset then two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "B", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset in between chunks", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - reset with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - commit with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "empty cell chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py new file mode 100644 index 000000000000..237011c9807d --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -0,0 +1,649 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class Test__make_data_stub(unittest.TestCase): + + def _callFUT(self, client): + from google.cloud.bigtable.client import _make_data_stub + return _make_data_stub(client) + + def test_without_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + credentials = _Credentials() + user_agent = 'you-sir-age-int' + client = _Client(credentials, user_agent) + + fake_stub = object() + make_secure_stub_args = [] + + def mock_make_secure_stub(*args): + make_secure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_secure_stub_args, [ + ( + client.credentials, + client.user_agent, + MUT.bigtable_pb2.BigtableStub, + MUT.DATA_API_HOST, + ), + ]) + + def test_with_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + emulator_host = object() + client = _Client(None, None, emulator_host=emulator_host) + + fake_stub = object() + make_insecure_stub_args = [] + + def mock_make_insecure_stub(*args): + make_insecure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_insecure_stub_args, [ + ( + MUT.bigtable_pb2.BigtableStub, + emulator_host, + ), + ]) + + +class Test__make_instance_stub(unittest.TestCase): + + def _callFUT(self, client): + from google.cloud.bigtable.client import _make_instance_stub + return _make_instance_stub(client) + + def test_without_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + credentials = _Credentials() + user_agent = 'you-sir-age-int' + client = _Client(credentials, user_agent) + + fake_stub = object() + make_secure_stub_args = [] + + def mock_make_secure_stub(*args): + make_secure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_secure_stub_args, [ + ( + client.credentials, + client.user_agent, + MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub, + MUT.INSTANCE_ADMIN_HOST, + ), + ]) + + def test_with_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + emulator_host = object() + client = _Client(None, None, emulator_host=emulator_host) + + fake_stub = object() + make_insecure_stub_args = [] + + def mock_make_insecure_stub(*args): + make_insecure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_insecure_stub_args, [ + ( + MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub, + emulator_host, + ), + ]) + + +class Test__make_operations_stub(unittest.TestCase): + + def _callFUT(self, client): + from google.cloud.bigtable.client import _make_operations_stub + return _make_operations_stub(client) + + def test_without_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + credentials = _Credentials() + user_agent = 'you-sir-age-int' + client = _Client(credentials, user_agent) + + fake_stub = object() + make_secure_stub_args = [] + + def mock_make_secure_stub(*args): + make_secure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_secure_stub_args, [ + ( + client.credentials, + client.user_agent, + MUT.operations_grpc_pb2.OperationsStub, + MUT.OPERATIONS_API_HOST, + ), + ]) + + def test_with_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + emulator_host = object() + client = _Client(None, None, emulator_host=emulator_host) + + fake_stub = object() + make_insecure_stub_args = [] + + def mock_make_insecure_stub(*args): + make_insecure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_insecure_stub_args, [ + ( + MUT.operations_grpc_pb2.OperationsStub, + emulator_host, + ), + ]) + + +class Test__make_table_stub(unittest.TestCase): + + def _callFUT(self, client): + from google.cloud.bigtable.client import _make_table_stub + return _make_table_stub(client) + + def test_without_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + credentials = _Credentials() + user_agent = 'you-sir-age-int' + client = _Client(credentials, user_agent) + + fake_stub = object() + make_secure_stub_args = [] + + def mock_make_secure_stub(*args): + make_secure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_secure_stub_args, [ + ( + client.credentials, + client.user_agent, + MUT.bigtable_table_admin_pb2.BigtableTableAdminStub, + MUT.TABLE_ADMIN_HOST, + ), + ]) + + def test_with_emulator(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + emulator_host = object() + client = _Client(None, None, emulator_host=emulator_host) + + fake_stub = object() + make_insecure_stub_args = [] + + def mock_make_insecure_stub(*args): + make_insecure_stub_args.append(args) + return fake_stub + + with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): + result = self._callFUT(client) + + self.assertIs(result, fake_stub) + self.assertEqual(make_insecure_stub_args, [ + ( + MUT.bigtable_table_admin_pb2.BigtableTableAdminStub, + emulator_host, + ), + ]) + + +class TestClient(unittest.TestCase): + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + USER_AGENT = 'you-sir-age-int' + + def _getTargetClass(self): + from google.cloud.bigtable.client import Client + return Client + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _makeOneWithMocks(self, *args, **kwargs): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + mock_make_data_stub = _MakeStubMock() + mock_make_instance_stub = _MakeStubMock() + mock_make_operations_stub = _MakeStubMock() + mock_make_table_stub = _MakeStubMock() + with _Monkey(MUT, _make_data_stub=mock_make_data_stub, + _make_instance_stub=mock_make_instance_stub, + _make_operations_stub=mock_make_operations_stub, + _make_table_stub=mock_make_table_stub): + return self._makeOne(*args, **kwargs) + + def _constructor_test_helper(self, expected_scopes, creds, + read_only=False, admin=False, + user_agent=None, expected_creds=None): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + user_agent = user_agent or MUT.DEFAULT_USER_AGENT + + mock_make_data_stub = _MakeStubMock() + mock_make_instance_stub = _MakeStubMock() + mock_make_operations_stub = _MakeStubMock() + mock_make_table_stub = _MakeStubMock() + with _Monkey(MUT, _make_data_stub=mock_make_data_stub, + _make_instance_stub=mock_make_instance_stub, + _make_operations_stub=mock_make_operations_stub, + _make_table_stub=mock_make_table_stub): + client = self._makeOne(project=self.PROJECT, credentials=creds, + read_only=read_only, admin=admin, + user_agent=user_agent) + + # Verify the mocks. + self.assertEqual(mock_make_data_stub.calls, [client]) + if admin: + self.assertSequenceEqual(mock_make_instance_stub.calls, [client]) + self.assertSequenceEqual(mock_make_operations_stub.calls, [client]) + self.assertSequenceEqual(mock_make_table_stub.calls, [client]) + else: + self.assertSequenceEqual(mock_make_instance_stub.calls, []) + self.assertSequenceEqual(mock_make_operations_stub.calls, []) + self.assertSequenceEqual(mock_make_table_stub.calls, []) + + expected_creds = expected_creds or creds + self.assertIs(client._credentials, expected_creds) + if expected_scopes is not None: + self.assertEqual(client._credentials.scopes, expected_scopes) + + self.assertEqual(client.project, self.PROJECT) + self.assertEqual(client.user_agent, user_agent) + # Check gRPC stubs (or mocks of them) are set + self.assertIs(client._data_stub, mock_make_data_stub.result) + if admin: + self.assertIs(client._instance_stub_internal, + mock_make_instance_stub.result) + self.assertIs(client._operations_stub_internal, + mock_make_operations_stub.result) + self.assertIs(client._table_stub_internal, + mock_make_table_stub.result) + else: + self.assertIsNone(client._instance_stub_internal) + self.assertIsNone(client._operations_stub_internal) + self.assertIsNone(client._table_stub_internal) + + def test_constructor_default_scopes(self): + from google.cloud.bigtable import client as MUT + + expected_scopes = [MUT.DATA_SCOPE] + creds = _Credentials() + self._constructor_test_helper(expected_scopes, creds) + + def test_constructor_custom_user_agent(self): + from google.cloud.bigtable import client as MUT + + CUSTOM_USER_AGENT = 'custom-application' + expected_scopes = [MUT.DATA_SCOPE] + creds = _Credentials() + self._constructor_test_helper(expected_scopes, creds, + user_agent=CUSTOM_USER_AGENT) + + def test_constructor_with_admin(self): + from google.cloud.bigtable import client as MUT + + expected_scopes = [MUT.DATA_SCOPE, MUT.ADMIN_SCOPE] + creds = _Credentials() + self._constructor_test_helper(expected_scopes, creds, admin=True) + + def test_constructor_with_read_only(self): + from google.cloud.bigtable import client as MUT + + expected_scopes = [MUT.READ_ONLY_SCOPE] + creds = _Credentials() + self._constructor_test_helper(expected_scopes, creds, read_only=True) + + def test_constructor_both_admin_and_read_only(self): + creds = _Credentials() + with self.assertRaises(ValueError): + self._constructor_test_helper([], creds, admin=True, + read_only=True) + + def test_constructor_implicit_credentials(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + creds = _Credentials() + expected_scopes = [MUT.DATA_SCOPE] + + def mock_get_credentials(): + return creds + + with _Monkey(MUT, get_credentials=mock_get_credentials): + self._constructor_test_helper(expected_scopes, None, + expected_creds=creds) + + def test_constructor_credentials_wo_create_scoped(self): + creds = object() + expected_scopes = None + self._constructor_test_helper(expected_scopes, creds) + + def _copy_test_helper(self, read_only=False, admin=False): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import client as MUT + + credentials = _Credentials('value') + client = self._makeOneWithMocks( + project=self.PROJECT, + credentials=credentials, + read_only=read_only, + admin=admin, + user_agent=self.USER_AGENT) + # Put some fake stubs in place so that we can verify they don't + # get copied. In the admin=False case, only the data stub will + # not be None, so we over-ride all the internal values. + client._data_stub = object() + client._instance_stub_internal = object() + client._operations_stub_internal = object() + client._table_stub_internal = object() + + mock_make_data_stub = _MakeStubMock() + mock_make_instance_stub = _MakeStubMock() + mock_make_operations_stub = _MakeStubMock() + mock_make_table_stub = _MakeStubMock() + with _Monkey(MUT, _make_data_stub=mock_make_data_stub, + _make_instance_stub=mock_make_instance_stub, + _make_operations_stub=mock_make_operations_stub, + _make_table_stub=mock_make_table_stub): + new_client = client.copy() + self.assertEqual(new_client._admin, client._admin) + self.assertEqual(new_client._credentials, client._credentials) + self.assertEqual(new_client.project, client.project) + self.assertEqual(new_client.user_agent, client.user_agent) + # Make sure stubs are not preserved. + self.assertNotEqual(new_client._data_stub, client._data_stub) + self.assertNotEqual(new_client._instance_stub_internal, + client._instance_stub_internal) + self.assertNotEqual(new_client._operations_stub_internal, + client._operations_stub_internal) + self.assertNotEqual(new_client._table_stub_internal, + client._table_stub_internal) + + def test_copy(self): + self._copy_test_helper() + + def test_copy_admin(self): + self._copy_test_helper(admin=True) + + def test_copy_read_only(self): + self._copy_test_helper(read_only=True) + + def test_credentials_getter(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials) + self.assertIs(client.credentials, credentials) + + def test_project_name_property(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials) + project_name = 'projects/' + project + self.assertEqual(client.project_name, project_name) + + def test_instance_stub_getter(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials, admin=True) + self.assertIs(client._instance_stub, client._instance_stub_internal) + + def test_instance_stub_non_admin_failure(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials, admin=False) + with self.assertRaises(ValueError): + getattr(client, '_instance_stub') + + def test_operations_stub_getter(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials, admin=True) + self.assertIs(client._operations_stub, + client._operations_stub_internal) + + def test_operations_stub_non_admin_failure(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials, admin=False) + with self.assertRaises(ValueError): + getattr(client, '_operations_stub') + + def test_table_stub_getter(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials, admin=True) + self.assertIs(client._table_stub, client._table_stub_internal) + + def test_table_stub_non_admin_failure(self): + credentials = _Credentials() + project = 'PROJECT' + client = self._makeOneWithMocks(project=project, + credentials=credentials, admin=False) + with self.assertRaises(ValueError): + getattr(client, '_table_stub') + + def test_instance_factory_defaults(self): + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.instance import ( + _EXISTING_INSTANCE_LOCATION_ID) + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + credentials = _Credentials() + client = self._makeOneWithMocks(project=PROJECT, + credentials=credentials) + + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) + + self.assertIsInstance(instance, Instance) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance._cluster_location_id, + _EXISTING_INSTANCE_LOCATION_ID) + self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) + self.assertIs(instance._client, client) + + def test_instance_factory_w_explicit_serve_nodes(self): + from google.cloud.bigtable.instance import Instance + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + LOCATION_ID = 'locname' + SERVE_NODES = 5 + credentials = _Credentials() + client = self._makeOneWithMocks(project=PROJECT, + credentials=credentials) + + instance = client.instance( + INSTANCE_ID, display_name=DISPLAY_NAME, + location=LOCATION_ID, serve_nodes=SERVE_NODES) + + self.assertIsInstance(instance, Instance) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance._cluster_location_id, LOCATION_ID) + self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES) + self.assertIs(instance._client, client) + + def test_list_instances(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + + LOCATION = 'projects/' + self.PROJECT + '/locations/locname' + FAILED_LOCATION = 'FAILED' + INSTANCE_ID1 = 'instance-id1' + INSTANCE_ID2 = 'instance-id2' + INSTANCE_NAME1 = ( + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1) + INSTANCE_NAME2 = ( + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) + + credentials = _Credentials() + client = self._makeOneWithMocks( + project=self.PROJECT, + credentials=credentials, + admin=True, + ) + + # Create request_pb + request_pb = messages_v2_pb2.ListInstancesRequest( + parent='projects/' + self.PROJECT, + ) + + # Create response_pb + response_pb = messages_v2_pb2.ListInstancesResponse( + failed_locations=[ + FAILED_LOCATION, + ], + instances=[ + data_v2_pb2.Instance( + name=INSTANCE_NAME1, + display_name=INSTANCE_NAME1, + ), + data_v2_pb2.Instance( + name=INSTANCE_NAME2, + display_name=INSTANCE_NAME2, + ), + ], + ) + + # Patch the stub used by the API method. + client._instance_stub_internal = stub = _FakeStub(response_pb) + + # Create expected_result. + failed_locations = [FAILED_LOCATION] + instances = [ + client.instance(INSTANCE_ID1, LOCATION), + client.instance(INSTANCE_ID2, LOCATION), + ] + expected_result = (instances, failed_locations) + + # Perform the method and check the result. + result = client.list_instances() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListInstances', + (request_pb,), + {}, + )]) + + +class _Credentials(object): + + scopes = None + + def __init__(self, access_token=None): + self._access_token = access_token + self._tokens = [] + + def create_scoped(self, scope): + self.scopes = scope + return self + + def __eq__(self, other): + return self._access_token == other._access_token + + +class _Client(object): + + def __init__(self, credentials, user_agent, emulator_host=None): + self.credentials = credentials + self.user_agent = user_agent + self.emulator_host = emulator_host + + +class _MakeStubMock(object): + + def __init__(self): + self.result = object() + self.calls = [] + + def __call__(self, client): + self.calls.append(client) + return self.result diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py new file mode 100644 index 000000000000..82335b293f0f --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -0,0 +1,442 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class TestCluster(unittest.TestCase): + + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + CLUSTER_NAME = ('projects/' + PROJECT + + '/instances/' + INSTANCE_ID + + '/clusters/' + CLUSTER_ID) + + def _getTargetClass(self): + from google.cloud.bigtable.cluster import Cluster + return Cluster + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertIs(cluster._instance, instance) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + + def test_constructor_non_default(self): + SERVE_NODES = 8 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertIs(cluster._instance, instance) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + + def test_copy(self): + SERVE_NODES = 8 + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) + new_cluster = cluster.copy() + + # Make sure the client copy succeeded. + self.assertIsNot(new_cluster._instance, instance) + self.assertEqual(new_cluster.serve_nodes, SERVE_NODES) + # Make sure the client got copied to a new instance. + self.assertIsNot(cluster, new_cluster) + self.assertEqual(cluster, new_cluster) + + def test__update_from_pb_success(self): + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + + SERVE_NODES = 8 + cluster_pb = _ClusterPB( + serve_nodes=SERVE_NODES, + ) + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + cluster._update_from_pb(cluster_pb) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + + def test__update_from_pb_no_serve_nodes(self): + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + + cluster_pb = _ClusterPB() + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + with self.assertRaises(ValueError): + cluster._update_from_pb(cluster_pb) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + + def test_from_pb_success(self): + SERVE_NODES = 331 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, + ) + + klass = self._getTargetClass() + cluster = klass.from_pb(cluster_pb, instance) + self.assertIsInstance(cluster, klass) + self.assertIs(cluster._instance, instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + + def test_from_pb_bad_cluster_name(self): + BAD_CLUSTER_NAME = 'INCORRECT_FORMAT' + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) + + def test_from_pb_project_mistmatch(self): + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(ALT_PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) + + def test_from_pb_instance_mistmatch(self): + ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + client = _Client(self.PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) + + self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) + + cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) + + def test_name_property(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster.name, self.CLUSTER_NAME) + + def test___eq__(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) + self.assertEqual(cluster1, cluster2) + + def test___eq__type_differ(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = object() + self.assertNotEqual(cluster1, cluster2) + + def test___ne__same_value(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster2 = self._makeOne(self.CLUSTER_ID, instance) + comparison_val = (cluster1 != cluster2) + self.assertFalse(comparison_val) + + def test___ne__(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster1 = self._makeOne('cluster_id1', instance) + cluster2 = self._makeOne('cluster_id2', instance) + self.assertNotEqual(cluster1, cluster2) + + def test_reload(self): + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + + SERVE_NODES = 31 + LOCATION = 'LOCATION' + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) + + # Create request_pb + request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME) + + # Create response_pb + response_pb = _ClusterPB( + serve_nodes=SERVE_NODES, + location=LOCATION, + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check Cluster optional config values before. + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + + # Perform the method and check the result. + result = cluster.reload() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetCluster', + (request_pb,), + {}, + )]) + + # Check Cluster optional config values before. + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(cluster.location, LOCATION) + + def test_create(self): + from google.longrunning import operations_pb2 + from google.cloud.operation import Operation + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + + SERVE_NODES = 4 + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne( + self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) + + # Create response_pb + OP_ID = 5678 + OP_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) + response_pb = operations_pb2.Operation(name=OP_NAME) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Perform the method and check the result. + result = cluster.create() + + self.assertIsInstance(result, Operation) + self.assertEqual(result.name, OP_NAME) + self.assertIs(result.target, cluster) + self.assertIs(result.client, client) + self.assertIsNone(result.pb_metadata) + self.assertEqual(result.metadata, {'request_type': 'CreateCluster'}) + + self.assertEqual(len(stub.method_calls), 1) + api_name, args, kwargs = stub.method_calls[0] + self.assertEqual(api_name, 'CreateCluster') + request_pb, = args + self.assertIsInstance(request_pb, + messages_v2_pb2.CreateClusterRequest) + self.assertEqual(request_pb.parent, instance.name) + self.assertEqual(request_pb.cluster_id, self.CLUSTER_ID) + self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) + self.assertEqual(kwargs, {}) + + def test_update(self): + import datetime + from google.longrunning import operations_pb2 + from google.cloud.operation import Operation + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.bigtable.cluster import _UPDATE_CLUSTER_METADATA_URL + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + + SERVE_NODES = 81 + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) + + # Create request_pb + request_pb = _ClusterPB( + name=self.CLUSTER_NAME, + serve_nodes=SERVE_NODES, + ) + + # Create response_pb + OP_ID = 5678 + OP_NAME = ( + 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % + (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) + metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any( + type_url=_UPDATE_CLUSTER_METADATA_URL, + value=metadata.SerializeToString() + ) + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + result = cluster.update() + + self.assertIsInstance(result, Operation) + self.assertEqual(result.name, OP_NAME) + self.assertIs(result.target, cluster) + self.assertIs(result.client, client) + self.assertIsInstance(result.pb_metadata, + messages_v2_pb2.UpdateClusterMetadata) + self.assertEqual(result.pb_metadata.request_time, NOW_PB) + self.assertEqual(result.metadata, {'request_type': 'UpdateCluster'}) + + self.assertEqual(len(stub.method_calls), 1) + api_name, args, kwargs = stub.method_calls[0] + self.assertEqual(api_name, 'UpdateCluster') + request_pb, = args + self.assertIsInstance(request_pb, data_v2_pb2.Cluster) + self.assertEqual(request_pb.name, self.CLUSTER_NAME) + self.assertEqual(request_pb.serve_nodes, SERVE_NODES) + self.assertEqual(kwargs, {}) + + def test_delete(self): + from google.protobuf import empty_pb2 + from unit_tests.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._makeOne(self.CLUSTER_ID, instance) + + # Create request_pb + request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = cluster.delete() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'DeleteCluster', + (request_pb,), + {}, + )]) + + +class Test__prepare_create_request(unittest.TestCase): + + def _callFUT(self, cluster): + from google.cloud.bigtable.cluster import _prepare_create_request + return _prepare_create_request(cluster) + + def test_it(self): + from google.cloud.bigtable.cluster import Cluster + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + CLUSTER_ID = 'cluster-id' + SERVE_NODES = 8 + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster = Cluster(CLUSTER_ID, instance, + serve_nodes=SERVE_NODES) + + request_pb = self._callFUT(cluster) + + self.assertEqual(request_pb.cluster_id, CLUSTER_ID) + self.assertEqual(request_pb.parent, instance.name) + self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) + + +def _ClusterPB(*args, **kw): + from google.cloud.bigtable._generated import ( + instance_pb2 as instance_v2_pb2) + return instance_v2_pb2.Cluster(*args, **kw) + + +def _DeleteClusterRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.DeleteClusterRequest(*args, **kw) + + +def _GetClusterRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.GetClusterRequest(*args, **kw) + + +class _Instance(object): + + def __init__(self, instance_id, client): + self.instance_id = instance_id + self._client = client + + @property + def name(self): + return 'projects/%s/instances/%s' % ( + self._client.project, self.instance_id) + + def copy(self): + return self.__class__(self.instance_id, self._client) + + def __eq__(self, other): + return (other.instance_id == self.instance_id and + other._client == self._client) + + +class _Client(object): + + def __init__(self, project): + self.project = project + self.project_name = 'projects/' + self.project + + def __eq__(self, other): + return (other.project == self.project and + other.project_name == self.project_name) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py new file mode 100644 index 000000000000..c2ad847694b7 --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -0,0 +1,682 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class Test__timedelta_to_duration_pb(unittest.TestCase): + + def _callFUT(self, *args, **kwargs): + from google.cloud.bigtable.column_family import ( + _timedelta_to_duration_pb) + return _timedelta_to_duration_pb(*args, **kwargs) + + def test_it(self): + import datetime + from google.protobuf import duration_pb2 + + seconds = microseconds = 1 + timedelta_val = datetime.timedelta(seconds=seconds, + microseconds=microseconds) + result = self._callFUT(timedelta_val) + self.assertIsInstance(result, duration_pb2.Duration) + self.assertEqual(result.seconds, seconds) + self.assertEqual(result.nanos, 1000 * microseconds) + + def test_with_negative_microseconds(self): + import datetime + from google.protobuf import duration_pb2 + + seconds = 1 + microseconds = -5 + timedelta_val = datetime.timedelta(seconds=seconds, + microseconds=microseconds) + result = self._callFUT(timedelta_val) + self.assertIsInstance(result, duration_pb2.Duration) + self.assertEqual(result.seconds, seconds - 1) + self.assertEqual(result.nanos, 10**9 + 1000 * microseconds) + + def test_with_negative_seconds(self): + import datetime + from google.protobuf import duration_pb2 + + seconds = -1 + microseconds = 5 + timedelta_val = datetime.timedelta(seconds=seconds, + microseconds=microseconds) + result = self._callFUT(timedelta_val) + self.assertIsInstance(result, duration_pb2.Duration) + self.assertEqual(result.seconds, seconds + 1) + self.assertEqual(result.nanos, -(10**9 - 1000 * microseconds)) + + +class Test__duration_pb_to_timedelta(unittest.TestCase): + + def _callFUT(self, *args, **kwargs): + from google.cloud.bigtable.column_family import ( + _duration_pb_to_timedelta) + return _duration_pb_to_timedelta(*args, **kwargs) + + def test_it(self): + import datetime + from google.protobuf import duration_pb2 + + seconds = microseconds = 1 + duration_pb = duration_pb2.Duration(seconds=seconds, + nanos=1000 * microseconds) + timedelta_val = datetime.timedelta(seconds=seconds, + microseconds=microseconds) + result = self._callFUT(duration_pb) + self.assertIsInstance(result, datetime.timedelta) + self.assertEqual(result, timedelta_val) + + +class TestMaxVersionsGCRule(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + return MaxVersionsGCRule + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test___eq__type_differ(self): + gc_rule1 = self._makeOne(10) + gc_rule2 = object() + self.assertNotEqual(gc_rule1, gc_rule2) + + def test___eq__same_value(self): + gc_rule1 = self._makeOne(2) + gc_rule2 = self._makeOne(2) + self.assertEqual(gc_rule1, gc_rule2) + + def test___ne__same_value(self): + gc_rule1 = self._makeOne(99) + gc_rule2 = self._makeOne(99) + comparison_val = (gc_rule1 != gc_rule2) + self.assertFalse(comparison_val) + + def test_to_pb(self): + max_num_versions = 1337 + gc_rule = self._makeOne(max_num_versions=max_num_versions) + pb_val = gc_rule.to_pb() + expected = _GcRulePB(max_num_versions=max_num_versions) + self.assertEqual(pb_val, expected) + + +class TestMaxAgeGCRule(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.column_family import MaxAgeGCRule + return MaxAgeGCRule + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test___eq__type_differ(self): + max_age = object() + gc_rule1 = self._makeOne(max_age=max_age) + gc_rule2 = object() + self.assertNotEqual(gc_rule1, gc_rule2) + + def test___eq__same_value(self): + max_age = object() + gc_rule1 = self._makeOne(max_age=max_age) + gc_rule2 = self._makeOne(max_age=max_age) + self.assertEqual(gc_rule1, gc_rule2) + + def test___ne__same_value(self): + max_age = object() + gc_rule1 = self._makeOne(max_age=max_age) + gc_rule2 = self._makeOne(max_age=max_age) + comparison_val = (gc_rule1 != gc_rule2) + self.assertFalse(comparison_val) + + def test_to_pb(self): + import datetime + from google.protobuf import duration_pb2 + + max_age = datetime.timedelta(seconds=1) + duration = duration_pb2.Duration(seconds=1) + gc_rule = self._makeOne(max_age=max_age) + pb_val = gc_rule.to_pb() + self.assertEqual(pb_val, _GcRulePB(max_age=duration)) + + +class TestGCRuleUnion(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.column_family import GCRuleUnion + return GCRuleUnion + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + rules = object() + rule_union = self._makeOne(rules) + self.assertIs(rule_union.rules, rules) + + def test___eq__(self): + rules = object() + gc_rule1 = self._makeOne(rules) + gc_rule2 = self._makeOne(rules) + self.assertEqual(gc_rule1, gc_rule2) + + def test___eq__type_differ(self): + rules = object() + gc_rule1 = self._makeOne(rules) + gc_rule2 = object() + self.assertNotEqual(gc_rule1, gc_rule2) + + def test___ne__same_value(self): + rules = object() + gc_rule1 = self._makeOne(rules) + gc_rule2 = self._makeOne(rules) + comparison_val = (gc_rule1 != gc_rule2) + self.assertFalse(comparison_val) + + def test_to_pb(self): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions = 42 + rule1 = MaxVersionsGCRule(max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) + + rule3 = self._makeOne(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) + + gc_rule_pb = rule3.to_pb() + self.assertEqual(gc_rule_pb, pb_rule3) + + def test_to_pb_nested(self): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions1 = 42 + rule1 = MaxVersionsGCRule(max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) + + rule3 = self._makeOne(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) + + max_num_versions2 = 1337 + rule4 = MaxVersionsGCRule(max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) + + rule5 = self._makeOne(rules=[rule3, rule4]) + pb_rule5 = _GcRulePB( + union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) + + gc_rule_pb = rule5.to_pb() + self.assertEqual(gc_rule_pb, pb_rule5) + + +class TestGCRuleIntersection(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.column_family import GCRuleIntersection + return GCRuleIntersection + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + rules = object() + rule_intersection = self._makeOne(rules) + self.assertIs(rule_intersection.rules, rules) + + def test___eq__(self): + rules = object() + gc_rule1 = self._makeOne(rules) + gc_rule2 = self._makeOne(rules) + self.assertEqual(gc_rule1, gc_rule2) + + def test___eq__type_differ(self): + rules = object() + gc_rule1 = self._makeOne(rules) + gc_rule2 = object() + self.assertNotEqual(gc_rule1, gc_rule2) + + def test___ne__same_value(self): + rules = object() + gc_rule1 = self._makeOne(rules) + gc_rule2 = self._makeOne(rules) + comparison_val = (gc_rule1 != gc_rule2) + self.assertFalse(comparison_val) + + def test_to_pb(self): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions = 42 + rule1 = MaxVersionsGCRule(max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) + + rule3 = self._makeOne(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB( + intersection=_GcRuleIntersectionPB( + rules=[pb_rule1, pb_rule2])) + + gc_rule_pb = rule3.to_pb() + self.assertEqual(gc_rule_pb, pb_rule3) + + def test_to_pb_nested(self): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions1 = 42 + rule1 = MaxVersionsGCRule(max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB( + max_age=duration_pb2.Duration(seconds=1)) + + rule3 = self._makeOne(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB( + intersection=_GcRuleIntersectionPB( + rules=[pb_rule1, pb_rule2])) + + max_num_versions2 = 1337 + rule4 = MaxVersionsGCRule(max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) + + rule5 = self._makeOne(rules=[rule3, rule4]) + pb_rule5 = _GcRulePB( + intersection=_GcRuleIntersectionPB( + rules=[pb_rule3, pb_rule4])) + + gc_rule_pb = rule5.to_pb() + self.assertEqual(gc_rule_pb, pb_rule5) + + +class TestColumnFamily(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.column_family import ColumnFamily + return ColumnFamily + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + column_family_id = u'column-family-id' + table = object() + gc_rule = object() + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) + + self.assertEqual(column_family.column_family_id, column_family_id) + self.assertIs(column_family._table, table) + self.assertIs(column_family.gc_rule, gc_rule) + + def test_name_property(self): + column_family_id = u'column-family-id' + table_name = 'table_name' + table = _Table(table_name) + column_family = self._makeOne(column_family_id, table) + + expected_name = table_name + '/columnFamilies/' + column_family_id + self.assertEqual(column_family.name, expected_name) + + def test___eq__(self): + column_family_id = 'column_family_id' + table = object() + gc_rule = object() + column_family1 = self._makeOne(column_family_id, table, + gc_rule=gc_rule) + column_family2 = self._makeOne(column_family_id, table, + gc_rule=gc_rule) + self.assertEqual(column_family1, column_family2) + + def test___eq__type_differ(self): + column_family1 = self._makeOne('column_family_id', None) + column_family2 = object() + self.assertNotEqual(column_family1, column_family2) + + def test___ne__same_value(self): + column_family_id = 'column_family_id' + table = object() + gc_rule = object() + column_family1 = self._makeOne(column_family_id, table, + gc_rule=gc_rule) + column_family2 = self._makeOne(column_family_id, table, + gc_rule=gc_rule) + comparison_val = (column_family1 != column_family2) + self.assertFalse(comparison_val) + + def test___ne__(self): + column_family1 = self._makeOne('column_family_id1', None) + column_family2 = self._makeOne('column_family_id2', None) + self.assertNotEqual(column_family1, column_family2) + + def test_to_pb_no_rules(self): + column_family = self._makeOne('column_family_id', None) + pb_val = column_family.to_pb() + expected = _ColumnFamilyPB() + self.assertEqual(pb_val, expected) + + def test_to_pb_with_rule(self): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + gc_rule = MaxVersionsGCRule(1) + column_family = self._makeOne('column_family_id', None, + gc_rule=gc_rule) + pb_val = column_family.to_pb() + expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + self.assertEqual(pb_val, expected) + + def _create_test_helper(self, gc_rule=None): + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + + project_id = 'project-id' + zone = 'zone' + cluster_id = 'cluster-id' + table_id = 'table-id' + column_family_id = 'column-family-id' + table_name = ('projects/' + project_id + '/zones/' + zone + + '/clusters/' + cluster_id + '/tables/' + table_id) + + client = _Client() + table = _Table(table_name, client=client) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) + + # Create request_pb + if gc_rule is None: + column_family_pb = _ColumnFamilyPB() + else: + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + create=column_family_pb, + ) + + # Create response_pb + response_pb = _ColumnFamilyPB() + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # create() has no return value. + + # Perform the method and check the result. + self.assertEqual(stub.results, (response_pb,)) + result = column_family.create() + self.assertEqual(stub.results, ()) + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ModifyColumnFamilies', + (request_pb,), + {}, + )]) + + def test_create(self): + self._create_test_helper(gc_rule=None) + + def test_create_with_gc_rule(self): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + gc_rule = MaxVersionsGCRule(1337) + self._create_test_helper(gc_rule=gc_rule) + + def _update_test_helper(self, gc_rule=None): + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + + project_id = 'project-id' + zone = 'zone' + cluster_id = 'cluster-id' + table_id = 'table-id' + column_family_id = 'column-family-id' + table_name = ('projects/' + project_id + '/zones/' + zone + + '/clusters/' + cluster_id + '/tables/' + table_id) + + client = _Client() + table = _Table(table_name, client=client) + column_family = self._makeOne( + column_family_id, table, gc_rule=gc_rule) + + # Create request_pb + if gc_rule is None: + column_family_pb = _ColumnFamilyPB() + else: + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + update=column_family_pb, + ) + + # Create response_pb + response_pb = _ColumnFamilyPB() + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # update() has no return value. + + # Perform the method and check the result. + self.assertEqual(stub.results, (response_pb,)) + result = column_family.update() + self.assertEqual(stub.results, ()) + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ModifyColumnFamilies', + (request_pb,), + {}, + )]) + + def test_update(self): + self._update_test_helper(gc_rule=None) + + def test_update_with_gc_rule(self): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + gc_rule = MaxVersionsGCRule(1337) + self._update_test_helper(gc_rule=gc_rule) + + def test_delete(self): + from google.protobuf import empty_pb2 + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + + project_id = 'project-id' + zone = 'zone' + cluster_id = 'cluster-id' + table_id = 'table-id' + column_family_id = 'column-family-id' + table_name = ('projects/' + project_id + '/zones/' + zone + + '/clusters/' + cluster_id + '/tables/' + table_id) + + client = _Client() + table = _Table(table_name, client=client) + column_family = self._makeOne(column_family_id, table) + + # Create request_pb + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=table_name) + request_pb.modifications.add( + id=column_family_id, + drop=True) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + self.assertEqual(stub.results, (response_pb,)) + result = column_family.delete() + self.assertEqual(stub.results, ()) + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ModifyColumnFamilies', + (request_pb,), + {}, + )]) + + +class Test__gc_rule_from_pb(unittest.TestCase): + + def _callFUT(self, *args, **kwargs): + from google.cloud.bigtable.column_family import _gc_rule_from_pb + return _gc_rule_from_pb(*args, **kwargs) + + def test_empty(self): + + gc_rule_pb = _GcRulePB() + self.assertIsNone(self._callFUT(gc_rule_pb)) + + def test_max_num_versions(self): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + orig_rule = MaxVersionsGCRule(1) + gc_rule_pb = orig_rule.to_pb() + result = self._callFUT(gc_rule_pb) + self.assertIsInstance(result, MaxVersionsGCRule) + self.assertEqual(result, orig_rule) + + def test_max_age(self): + import datetime + from google.cloud.bigtable.column_family import MaxAgeGCRule + + orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1)) + gc_rule_pb = orig_rule.to_pb() + result = self._callFUT(gc_rule_pb) + self.assertIsInstance(result, MaxAgeGCRule) + self.assertEqual(result, orig_rule) + + def test_union(self): + import datetime + from google.cloud.bigtable.column_family import GCRuleUnion + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + rule1 = MaxVersionsGCRule(1) + rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) + orig_rule = GCRuleUnion([rule1, rule2]) + gc_rule_pb = orig_rule.to_pb() + result = self._callFUT(gc_rule_pb) + self.assertIsInstance(result, GCRuleUnion) + self.assertEqual(result, orig_rule) + + def test_intersection(self): + import datetime + from google.cloud.bigtable.column_family import GCRuleIntersection + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + rule1 = MaxVersionsGCRule(1) + rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) + orig_rule = GCRuleIntersection([rule1, rule2]) + gc_rule_pb = orig_rule.to_pb() + result = self._callFUT(gc_rule_pb) + self.assertIsInstance(result, GCRuleIntersection) + self.assertEqual(result, orig_rule) + + def test_unknown_field_name(self): + class MockProto(object): + + names = [] + + @classmethod + def WhichOneof(cls, name): + cls.names.append(name) + return 'unknown' + + self.assertEqual(MockProto.names, []) + self.assertRaises(ValueError, self._callFUT, MockProto) + self.assertEqual(MockProto.names, ['rule']) + + +def _GcRulePB(*args, **kw): + from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule(*args, **kw) + + +def _GcRuleIntersectionPB(*args, **kw): + from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Intersection(*args, **kw) + + +def _GcRuleUnionPB(*args, **kw): + from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Union(*args, **kw) + + +def _ColumnFamilyPB(*args, **kw): + from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) + + +class _Instance(object): + + def __init__(self, client=None): + self._client = client + + +class _Client(object): + pass + + +class _Table(object): + + def __init__(self, name, client=None): + self.name = name + self._instance = _Instance(client) diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py new file mode 100644 index 000000000000..cc2c49366c90 --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -0,0 +1,587 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class TestInstance(unittest.TestCase): + + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID + LOCATION_ID = 'locname' + LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID + DISPLAY_NAME = 'display_name' + OP_ID = 8915 + OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % + (PROJECT, INSTANCE_ID, OP_ID)) + TABLE_ID = 'table_id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + + def _getTargetClass(self): + from google.cloud.bigtable.instance import Instance + return Instance + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + + client = object() + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, self.INSTANCE_ID) + self.assertIs(instance._client, client) + self.assertEqual(instance._cluster_location_id, self.LOCATION_ID) + self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) + + def test_constructor_non_default(self): + display_name = 'display_name' + client = object() + + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=display_name) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, display_name) + self.assertIs(instance._client, client) + + def test_copy(self): + display_name = 'display_name' + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=display_name) + new_instance = instance.copy() + + # Make sure the client copy succeeded. + self.assertIsNot(new_instance._client, client) + self.assertEqual(new_instance._client, client) + # Make sure the client got copied to a new instance. + self.assertIsNot(instance, new_instance) + self.assertEqual(instance, new_instance) + + def test_table_factory(self): + from google.cloud.bigtable.table import Table + + instance = self._makeOne(self.INSTANCE_ID, None, self.LOCATION_ID) + + table = instance.table(self.TABLE_ID) + self.assertIsInstance(table, Table) + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertEqual(table._instance, instance) + + def test__update_from_pb_success(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + + display_name = 'display_name' + instance_pb = data_v2_pb2.Instance( + display_name=display_name, + ) + + instance = self._makeOne(None, None, None, None) + self.assertIsNone(instance.display_name) + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, display_name) + + def test__update_from_pb_no_display_name(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + + instance_pb = data_v2_pb2.Instance() + instance = self._makeOne(None, None, None, None) + self.assertIsNone(instance.display_name) + with self.assertRaises(ValueError): + instance._update_from_pb(instance_pb) + self.assertIsNone(instance.display_name) + + def test_from_pb_success(self): + from google.cloud.bigtable.instance import ( + _EXISTING_INSTANCE_LOCATION_ID) + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + + client = _Client(project=self.PROJECT) + + instance_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.INSTANCE_ID, + ) + + klass = self._getTargetClass() + instance = klass.from_pb(instance_pb, client) + self.assertIsInstance(instance, klass) + self.assertEqual(instance._client, client) + self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance._cluster_location_id, + _EXISTING_INSTANCE_LOCATION_ID) + + def test_from_pb_bad_instance_name(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + + instance_name = 'INCORRECT_FORMAT' + instance_pb = data_v2_pb2.Instance(name=instance_name) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, None) + + def test_from_pb_project_mistmatch(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(project=ALT_PROJECT) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + + instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) + + klass = self._getTargetClass() + with self.assertRaises(ValueError): + klass.from_pb(instance_pb, client) + + def test_name_property(self): + client = _Client(project=self.PROJECT) + + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + self.assertEqual(instance.name, self.INSTANCE_NAME) + + def test___eq__(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + self.assertEqual(instance1, instance2) + + def test___eq__type_differ(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = object() + self.assertNotEqual(instance1, instance2) + + def test___ne__same_value(self): + client = object() + instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + comparison_val = (instance1 != instance2) + self.assertFalse(comparison_val) + + def test___ne__(self): + instance1 = self._makeOne('instance_id1', 'client1', self.LOCATION_ID) + instance2 = self._makeOne('instance_id2', 'client2', self.LOCATION_ID) + self.assertNotEqual(instance1, instance2) + + def test_reload(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from unit_tests.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create request_pb + request_pb = messages_v2_pb.GetInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + DISPLAY_NAME = u'hey-hi-hello' + response_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, self.INSTANCE_ID) + + # Perform the method and check the result. + result = instance.reload() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetInstance', + (request_pb,), + {}, + )]) + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, DISPLAY_NAME) + + def test_create(self): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud._helpers import _datetime_to_pb_timestamp + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.operation import Operation + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + from google.cloud.bigtable.instance import ( + _CREATE_INSTANCE_METADATA_URL) + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=self.DISPLAY_NAME) + + # Create response_pb + metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=_CREATE_INSTANCE_METADATA_URL, + value=metadata.SerializeToString(), + ) + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Perform the method and check the result. + result = instance.create() + + self.assertIsInstance(result, Operation) + self.assertEqual(result.name, self.OP_NAME) + self.assertIs(result.target, instance) + self.assertIs(result.client, client) + self.assertIsInstance(result.pb_metadata, + messages_v2_pb2.CreateInstanceMetadata) + self.assertEqual(result.pb_metadata.request_time, NOW_PB) + self.assertEqual(result.metadata, {'request_type': 'CreateInstance'}) + + self.assertEqual(len(stub.method_calls), 1) + api_name, args, kwargs = stub.method_calls[0] + self.assertEqual(api_name, 'CreateInstance') + request_pb, = args + self.assertIsInstance(request_pb, + messages_v2_pb2.CreateInstanceRequest) + self.assertEqual(request_pb.parent, 'projects/%s' % (self.PROJECT,)) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) + self.assertEqual(request_pb.instance.display_name, self.DISPLAY_NAME) + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + self.assertEqual(kwargs, {}) + + def test_create_w_explicit_serve_nodes(self): + from google.longrunning import operations_pb2 + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.operation import Operation + + SERVE_NODES = 5 + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + serve_nodes=SERVE_NODES) + + # Create response_pb + response_pb = operations_pb2.Operation(name=self.OP_NAME) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Perform the method and check the result. + result = instance.create() + + self.assertIsInstance(result, Operation) + self.assertEqual(result.name, self.OP_NAME) + self.assertIs(result.target, instance) + self.assertIs(result.client, client) + + self.assertEqual(len(stub.method_calls), 1) + api_name, args, kwargs = stub.method_calls[0] + self.assertEqual(api_name, 'CreateInstance') + request_pb, = args + self.assertIsInstance(request_pb, + messages_v2_pb2.CreateInstanceRequest) + self.assertEqual(request_pb.parent, 'projects/%s' % (self.PROJECT,)) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) + self.assertEqual(request_pb.instance.display_name, self.INSTANCE_ID) + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(kwargs, {}) + + def test_update(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=self.DISPLAY_NAME) + + # Create request_pb + request_pb = data_v2_pb2.Instance( + name=self.INSTANCE_NAME, + display_name=self.DISPLAY_NAME, + ) + + # Create response_pb + response_pb = data_v2_pb2.Instance() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None + + # Perform the method and check the result. + result = instance.update() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'UpdateInstance', + (request_pb,), + {}, + )]) + + def test_delete(self): + from google.protobuf import empty_pb2 + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from unit_tests.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create request_pb + request_pb = messages_v2_pb.DeleteInstanceRequest( + name=self.INSTANCE_NAME) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = instance.delete() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'DeleteInstance', + (request_pb,), + {}, + )]) + + def test_list_clusters(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as instance_v2_pb2) + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from unit_tests.bigtable._testing import _FakeStub + + FAILED_LOCATION = 'FAILED' + FAILED_LOCATIONS = [FAILED_LOCATION] + CLUSTER_ID1 = 'cluster-id1' + CLUSTER_ID2 = 'cluster-id2' + SERVE_NODES = 4 + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) + CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) + # Create request_pb + request_pb = messages_v2_pb2.ListClustersRequest( + parent=instance.name, + ) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[FAILED_LOCATION], + clusters=[ + instance_v2_pb2.Cluster( + name=CLUSTER_NAME1, + serve_nodes=SERVE_NODES, + ), + instance_v2_pb2.Cluster( + name=CLUSTER_NAME2, + serve_nodes=SERVE_NODES, + ), + ], + ) + + # Patch the stub used by the API method. + client._instance_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + clusters = [ + instance.cluster(CLUSTER_ID1), + instance.cluster(CLUSTER_ID2), + ] + expected_result = (clusters, FAILED_LOCATIONS) + + # Perform the method and check the result. + result = instance.list_clusters() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListClusters', + (request_pb,), + {}, + )]) + + def _list_tables_helper(self, table_name=None): + from google.cloud.bigtable._generated import ( + table_pb2 as table_data_v2_pb2) + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_messages_v1_pb2) + from unit_tests.bigtable._testing import _FakeStub + + client = _Client(self.PROJECT) + instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create request_ + request_pb = table_messages_v1_pb2.ListTablesRequest( + parent=self.INSTANCE_NAME) + + # Create response_pb + if table_name is None: + table_name = self.TABLE_NAME + + response_pb = table_messages_v1_pb2.ListTablesResponse( + tables=[ + table_data_v2_pb2.Table(name=table_name), + ], + ) + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_table = instance.table(self.TABLE_ID) + expected_result = [expected_table] + + # Perform the method and check the result. + result = instance.list_tables() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ListTables', + (request_pb,), + {}, + )]) + + def test_list_tables(self): + self._list_tables_helper() + + def test_list_tables_failure_bad_split(self): + with self.assertRaises(ValueError): + self._list_tables_helper(table_name='wrong-format') + + def test_list_tables_failure_name_bad_before(self): + BAD_TABLE_NAME = ('nonempty-section-before' + + 'projects/' + self.PROJECT + + '/instances/' + self.INSTANCE_ID + + '/tables/' + self.TABLE_ID) + with self.assertRaises(ValueError): + self._list_tables_helper(table_name=BAD_TABLE_NAME) + + +class Test__prepare_create_request(unittest.TestCase): + PROJECT = 'PROJECT' + PARENT = 'projects/' + PROJECT + LOCATION_ID = 'locname' + LOCATION_NAME = 'projects/' + PROJECT + '/locations/' + LOCATION_ID + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = PARENT + '/instances/' + INSTANCE_ID + CLUSTER_NAME = INSTANCE_NAME + '/clusters/' + INSTANCE_ID + + def _callFUT(self, instance, **kw): + from google.cloud.bigtable.instance import _prepare_create_request + return _prepare_create_request(instance, **kw) + + def test_w_defaults(self): + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from google.cloud.bigtable.instance import Instance + + client = _Client(self.PROJECT) + + instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID) + request_pb = self._callFUT(instance) + self.assertIsInstance(request_pb, + messages_v2_pb.CreateInstanceRequest) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) + self.assertEqual(request_pb.parent, self.PARENT) + self.assertIsInstance(request_pb.instance, data_v2_pb2.Instance) + self.assertEqual(request_pb.instance.name, u'') + self.assertEqual(request_pb.instance.display_name, self.INSTANCE_ID) + + # An instance must also define a same-named cluster + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertIsInstance(cluster, data_v2_pb2.Cluster) + self.assertEqual(cluster.name, self.CLUSTER_NAME) + self.assertEqual(cluster.location, self.LOCATION_NAME) + self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + + def test_w_explicit_serve_nodes(self): + from google.cloud.bigtable._generated import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable._generated import ( + bigtable_instance_admin_pb2 as messages_v2_pb) + from google.cloud.bigtable.instance import Instance + DISPLAY_NAME = u'DISPLAY_NAME' + SERVE_NODES = 5 + client = _Client(self.PROJECT) + instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID, + display_name=DISPLAY_NAME, + serve_nodes=SERVE_NODES) + + request_pb = self._callFUT(instance) + + self.assertIsInstance(request_pb, + messages_v2_pb.CreateInstanceRequest) + self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) + self.assertEqual(request_pb.parent, + 'projects/' + self.PROJECT) + self.assertIsInstance(request_pb.instance, data_v2_pb2.Instance) + self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME) + # An instance must also define a same-named cluster + cluster = request_pb.clusters[self.INSTANCE_ID] + self.assertIsInstance(cluster, data_v2_pb2.Cluster) + self.assertEqual(cluster.location, self.LOCATION_NAME) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + + +class _Client(object): + + def __init__(self, project): + self.project = project + self.project_name = 'projects/' + self.project + + def copy(self): + from copy import deepcopy + return deepcopy(self) + + def __eq__(self, other): + return (other.project == self.project and + other.project_name == self.project_name) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/unit_tests/test_row.py new file mode 100644 index 000000000000..0a495bbbe433 --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_row.py @@ -0,0 +1,909 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class Test_SetDeleteRow(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row import _SetDeleteRow + return _SetDeleteRow + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test__get_mutations_virtual(self): + row = self._makeOne(b'row-key', None) + with self.assertRaises(NotImplementedError): + row._get_mutations(None) + + +class TestDirectRow(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row import DirectRow + return DirectRow + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + row_key = b'row_key' + table = object() + + row = self._makeOne(row_key, table) + self.assertEqual(row._row_key, row_key) + self.assertIs(row._table, table) + self.assertEqual(row._pb_mutations, []) + + def test_constructor_with_unicode(self): + row_key = u'row_key' + row_key_bytes = b'row_key' + table = object() + + row = self._makeOne(row_key, table) + self.assertEqual(row._row_key, row_key_bytes) + self.assertIs(row._table, table) + + def test_constructor_with_non_bytes(self): + row_key = object() + with self.assertRaises(TypeError): + self._makeOne(row_key, None) + + def test__get_mutations(self): + row_key = b'row_key' + row = self._makeOne(row_key, None) + + row._pb_mutations = mutations = object() + self.assertIs(mutations, row._get_mutations(None)) + + def _set_cell_helper(self, column=None, column_bytes=None, + value=b'foobar', timestamp=None, + timestamp_micros=-1): + import six + import struct + row_key = b'row_key' + column_family_id = u'column_family_id' + if column is None: + column = b'column' + table = object() + row = self._makeOne(row_key, table) + self.assertEqual(row._pb_mutations, []) + row.set_cell(column_family_id, column, + value, timestamp=timestamp) + + if isinstance(value, six.integer_types): + value = struct.pack('>q', value) + expected_pb = _MutationPB( + set_cell=_MutationSetCellPB( + family_name=column_family_id, + column_qualifier=column_bytes or column, + timestamp_micros=timestamp_micros, + value=value, + ), + ) + self.assertEqual(row._pb_mutations, [expected_pb]) + + def test_set_cell(self): + self._set_cell_helper() + + def test_set_cell_with_string_column(self): + column_bytes = b'column' + column_non_bytes = u'column' + self._set_cell_helper(column=column_non_bytes, + column_bytes=column_bytes) + + def test_set_cell_with_integer_value(self): + value = 1337 + self._set_cell_helper(value=value) + + def test_set_cell_with_non_bytes_value(self): + row_key = b'row_key' + column = b'column' + column_family_id = u'column_family_id' + table = object() + + row = self._makeOne(row_key, table) + value = object() # Not bytes + with self.assertRaises(TypeError): + row.set_cell(column_family_id, column, value) + + def test_set_cell_with_non_null_timestamp(self): + import datetime + from google.cloud._helpers import _EPOCH + + microseconds = 898294371 + millis_granularity = microseconds - (microseconds % 1000) + timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds) + self._set_cell_helper(timestamp=timestamp, + timestamp_micros=millis_granularity) + + def test_delete(self): + row_key = b'row_key' + row = self._makeOne(row_key, object()) + self.assertEqual(row._pb_mutations, []) + row.delete() + + expected_pb = _MutationPB( + delete_from_row=_MutationDeleteFromRowPB(), + ) + self.assertEqual(row._pb_mutations, [expected_pb]) + + def test_delete_cell(self): + klass = self._getTargetClass() + + class MockRow(klass): + + def __init__(self, *args, **kwargs): + super(MockRow, self).__init__(*args, **kwargs) + self._args = [] + self._kwargs = [] + + # Replace the called method with one that logs arguments. + def _delete_cells(self, *args, **kwargs): + self._args.append(args) + self._kwargs.append(kwargs) + + row_key = b'row_key' + column = b'column' + column_family_id = u'column_family_id' + table = object() + + mock_row = MockRow(row_key, table) + # Make sure no values are set before calling the method. + self.assertEqual(mock_row._pb_mutations, []) + self.assertEqual(mock_row._args, []) + self.assertEqual(mock_row._kwargs, []) + + # Actually make the request against the mock class. + time_range = object() + mock_row.delete_cell(column_family_id, column, time_range=time_range) + self.assertEqual(mock_row._pb_mutations, []) + self.assertEqual(mock_row._args, [(column_family_id, [column])]) + self.assertEqual(mock_row._kwargs, [{ + 'state': None, + 'time_range': time_range, + }]) + + def test_delete_cells_non_iterable(self): + row_key = b'row_key' + column_family_id = u'column_family_id' + table = object() + + row = self._makeOne(row_key, table) + columns = object() # Not iterable + with self.assertRaises(TypeError): + row.delete_cells(column_family_id, columns) + + def test_delete_cells_all_columns(self): + row_key = b'row_key' + column_family_id = u'column_family_id' + table = object() + + row = self._makeOne(row_key, table) + klass = self._getTargetClass() + self.assertEqual(row._pb_mutations, []) + row.delete_cells(column_family_id, klass.ALL_COLUMNS) + + expected_pb = _MutationPB( + delete_from_family=_MutationDeleteFromFamilyPB( + family_name=column_family_id, + ), + ) + self.assertEqual(row._pb_mutations, [expected_pb]) + + def test_delete_cells_no_columns(self): + row_key = b'row_key' + column_family_id = u'column_family_id' + table = object() + + row = self._makeOne(row_key, table) + columns = [] + self.assertEqual(row._pb_mutations, []) + row.delete_cells(column_family_id, columns) + self.assertEqual(row._pb_mutations, []) + + def _delete_cells_helper(self, time_range=None): + row_key = b'row_key' + column = b'column' + column_family_id = u'column_family_id' + table = object() + + row = self._makeOne(row_key, table) + columns = [column] + self.assertEqual(row._pb_mutations, []) + row.delete_cells(column_family_id, columns, time_range=time_range) + + expected_pb = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( + family_name=column_family_id, + column_qualifier=column, + ), + ) + if time_range is not None: + expected_pb.delete_from_column.time_range.CopyFrom( + time_range.to_pb()) + self.assertEqual(row._pb_mutations, [expected_pb]) + + def test_delete_cells_no_time_range(self): + self._delete_cells_helper() + + def test_delete_cells_with_time_range(self): + import datetime + from google.cloud._helpers import _EPOCH + from google.cloud.bigtable.row_filters import TimestampRange + + microseconds = 30871000 # Makes sure already milliseconds granularity + start = _EPOCH + datetime.timedelta(microseconds=microseconds) + time_range = TimestampRange(start=start) + self._delete_cells_helper(time_range=time_range) + + def test_delete_cells_with_bad_column(self): + # This makes sure a failure on one of the columns doesn't leave + # the row's mutations in a bad state. + row_key = b'row_key' + column = b'column' + column_family_id = u'column_family_id' + table = object() + + row = self._makeOne(row_key, table) + columns = [column, object()] + self.assertEqual(row._pb_mutations, []) + with self.assertRaises(TypeError): + row.delete_cells(column_family_id, columns) + self.assertEqual(row._pb_mutations, []) + + def test_delete_cells_with_string_columns(self): + row_key = b'row_key' + column_family_id = u'column_family_id' + column1 = u'column1' + column1_bytes = b'column1' + column2 = u'column2' + column2_bytes = b'column2' + table = object() + + row = self._makeOne(row_key, table) + columns = [column1, column2] + self.assertEqual(row._pb_mutations, []) + row.delete_cells(column_family_id, columns) + + expected_pb1 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( + family_name=column_family_id, + column_qualifier=column1_bytes, + ), + ) + expected_pb2 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( + family_name=column_family_id, + column_qualifier=column2_bytes, + ), + ) + self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2]) + + def test_commit(self): + from google.protobuf import empty_pb2 + from unit_tests.bigtable._testing import _FakeStub + + row_key = b'row_key' + table_name = 'projects/more-stuff' + column_family_id = u'column_family_id' + column = b'column' + client = _Client() + table = _Table(table_name, client=client) + row = self._makeOne(row_key, table) + + # Create request_pb + value = b'bytes-value' + mutation = _MutationPB( + set_cell=_MutationSetCellPB( + family_name=column_family_id, + column_qualifier=column, + timestamp_micros=-1, # Default value. + value=value, + ), + ) + request_pb = _MutateRowRequestPB( + table_name=table_name, + row_key=row_key, + mutations=[mutation], + ) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # commit() has no return value when no filter. + + # Perform the method and check the result. + row.set_cell(column_family_id, column, value) + result = row.commit() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'MutateRow', + (request_pb,), + {}, + )]) + self.assertEqual(row._pb_mutations, []) + + def test_commit_too_many_mutations(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import row as MUT + + row_key = b'row_key' + table = object() + row = self._makeOne(row_key, table) + row._pb_mutations = [1, 2, 3] + num_mutations = len(row._pb_mutations) + with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): + with self.assertRaises(ValueError): + row.commit() + + def test_commit_no_mutations(self): + from unit_tests.bigtable._testing import _FakeStub + + row_key = b'row_key' + client = _Client() + table = _Table(None, client=client) + row = self._makeOne(row_key, table) + self.assertEqual(row._pb_mutations, []) + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub() + + # Perform the method and check the result. + result = row.commit() + self.assertIsNone(result) + # Make sure no request was sent. + self.assertEqual(stub.method_calls, []) + + +class TestConditionalRow(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row import ConditionalRow + return ConditionalRow + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + row_key = b'row_key' + table = object() + filter_ = object() + + row = self._makeOne(row_key, table, filter_=filter_) + self.assertEqual(row._row_key, row_key) + self.assertIs(row._table, table) + self.assertIs(row._filter, filter_) + self.assertEqual(row._true_pb_mutations, []) + self.assertEqual(row._false_pb_mutations, []) + + def test__get_mutations(self): + row_key = b'row_key' + filter_ = object() + row = self._makeOne(row_key, None, filter_=filter_) + + row._true_pb_mutations = true_mutations = object() + row._false_pb_mutations = false_mutations = object() + self.assertIs(true_mutations, row._get_mutations(True)) + self.assertIs(false_mutations, row._get_mutations(False)) + self.assertIs(false_mutations, row._get_mutations(None)) + + def test_commit(self): + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.bigtable.row_filters import RowSampleFilter + + row_key = b'row_key' + table_name = 'projects/more-stuff' + column_family_id1 = u'column_family_id1' + column_family_id2 = u'column_family_id2' + column_family_id3 = u'column_family_id3' + column1 = b'column1' + column2 = b'column2' + client = _Client() + table = _Table(table_name, client=client) + row_filter = RowSampleFilter(0.33) + row = self._makeOne(row_key, table, filter_=row_filter) + + # Create request_pb + value1 = b'bytes-value' + mutation1 = _MutationPB( + set_cell=_MutationSetCellPB( + family_name=column_family_id1, + column_qualifier=column1, + timestamp_micros=-1, # Default value. + value=value1, + ), + ) + mutation2 = _MutationPB( + delete_from_row=_MutationDeleteFromRowPB(), + ) + mutation3 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( + family_name=column_family_id2, + column_qualifier=column2, + ), + ) + mutation4 = _MutationPB( + delete_from_family=_MutationDeleteFromFamilyPB( + family_name=column_family_id3, + ), + ) + request_pb = _CheckAndMutateRowRequestPB( + table_name=table_name, + row_key=row_key, + predicate_filter=row_filter.to_pb(), + true_mutations=[mutation1, mutation3, mutation4], + false_mutations=[mutation2], + ) + + # Create response_pb + predicate_matched = True + response_pb = _CheckAndMutateRowResponsePB( + predicate_matched=predicate_matched) + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = predicate_matched + + # Perform the method and check the result. + row.set_cell(column_family_id1, column1, value1, state=True) + row.delete(state=False) + row.delete_cell(column_family_id2, column2, state=True) + row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) + result = row.commit() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'CheckAndMutateRow', + (request_pb,), + {}, + )]) + self.assertEqual(row._true_pb_mutations, []) + self.assertEqual(row._false_pb_mutations, []) + + def test_commit_too_many_mutations(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import row as MUT + + row_key = b'row_key' + table = object() + filter_ = object() + row = self._makeOne(row_key, table, filter_=filter_) + row._true_pb_mutations = [1, 2, 3] + num_mutations = len(row._true_pb_mutations) + with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): + with self.assertRaises(ValueError): + row.commit() + + def test_commit_no_mutations(self): + from unit_tests.bigtable._testing import _FakeStub + + row_key = b'row_key' + client = _Client() + table = _Table(None, client=client) + filter_ = object() + row = self._makeOne(row_key, table, filter_=filter_) + self.assertEqual(row._true_pb_mutations, []) + self.assertEqual(row._false_pb_mutations, []) + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub() + + # Perform the method and check the result. + result = row.commit() + self.assertIsNone(result) + # Make sure no request was sent. + self.assertEqual(stub.method_calls, []) + + +class TestAppendRow(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row import AppendRow + return AppendRow + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + row_key = b'row_key' + table = object() + + row = self._makeOne(row_key, table) + self.assertEqual(row._row_key, row_key) + self.assertIs(row._table, table) + self.assertEqual(row._rule_pb_list, []) + + def test_clear(self): + row_key = b'row_key' + table = object() + row = self._makeOne(row_key, table) + row._rule_pb_list = [1, 2, 3] + row.clear() + self.assertEqual(row._rule_pb_list, []) + + def test_append_cell_value(self): + table = object() + row_key = b'row_key' + row = self._makeOne(row_key, table) + self.assertEqual(row._rule_pb_list, []) + + column = b'column' + column_family_id = u'column_family_id' + value = b'bytes-val' + row.append_cell_value(column_family_id, column, value) + expected_pb = _ReadModifyWriteRulePB( + family_name=column_family_id, column_qualifier=column, + append_value=value) + self.assertEqual(row._rule_pb_list, [expected_pb]) + + def test_increment_cell_value(self): + table = object() + row_key = b'row_key' + row = self._makeOne(row_key, table) + self.assertEqual(row._rule_pb_list, []) + + column = b'column' + column_family_id = u'column_family_id' + int_value = 281330 + row.increment_cell_value(column_family_id, column, int_value) + expected_pb = _ReadModifyWriteRulePB( + family_name=column_family_id, column_qualifier=column, + increment_amount=int_value) + self.assertEqual(row._rule_pb_list, [expected_pb]) + + def test_commit(self): + from google.cloud._testing import _Monkey + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.bigtable import row as MUT + + row_key = b'row_key' + table_name = 'projects/more-stuff' + column_family_id = u'column_family_id' + column = b'column' + client = _Client() + table = _Table(table_name, client=client) + row = self._makeOne(row_key, table) + + # Create request_pb + value = b'bytes-value' + # We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value). + request_pb = _ReadModifyWriteRowRequestPB( + table_name=table_name, + row_key=row_key, + rules=[ + _ReadModifyWriteRulePB( + family_name=column_family_id, + column_qualifier=column, + append_value=value, + ), + ], + ) + + # Create response_pb + response_pb = object() + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + row_responses = [] + expected_result = object() + + def mock_parse_rmw_row_response(row_response): + row_responses.append(row_response) + return expected_result + + # Perform the method and check the result. + with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): + row.append_cell_value(column_family_id, column, value) + result = row.commit() + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ReadModifyWriteRow', + (request_pb,), + {}, + )]) + self.assertEqual(row_responses, [response_pb]) + self.assertEqual(row._rule_pb_list, []) + + def test_commit_no_rules(self): + from unit_tests.bigtable._testing import _FakeStub + + row_key = b'row_key' + client = _Client() + table = _Table(None, client=client) + row = self._makeOne(row_key, table) + self.assertEqual(row._rule_pb_list, []) + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub() + + # Perform the method and check the result. + result = row.commit() + self.assertEqual(result, {}) + # Make sure no request was sent. + self.assertEqual(stub.method_calls, []) + + def test_commit_too_many_mutations(self): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import row as MUT + + row_key = b'row_key' + table = object() + row = self._makeOne(row_key, table) + row._rule_pb_list = [1, 2, 3] + num_mutations = len(row._rule_pb_list) + with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): + with self.assertRaises(ValueError): + row.commit() + + +class Test__parse_rmw_row_response(unittest.TestCase): + + def _callFUT(self, row_response): + from google.cloud.bigtable.row import _parse_rmw_row_response + return _parse_rmw_row_response(row_response) + + def test_it(self): + from google.cloud._helpers import _datetime_from_microseconds + col_fam1 = u'col-fam-id' + col_fam2 = u'col-fam-id2' + col_name1 = b'col-name1' + col_name2 = b'col-name2' + col_name3 = b'col-name3-but-other-fam' + cell_val1 = b'cell-val' + cell_val2 = b'cell-val-newer' + cell_val3 = b'altcol-cell-val' + cell_val4 = b'foo' + + microseconds = 1000871 + timestamp = _datetime_from_microseconds(microseconds) + expected_output = { + col_fam1: { + col_name1: [ + (cell_val1, timestamp), + (cell_val2, timestamp), + ], + col_name2: [ + (cell_val3, timestamp), + ], + }, + col_fam2: { + col_name3: [ + (cell_val4, timestamp), + ], + }, + } + response_row = _RowPB( + families=[ + _FamilyPB( + name=col_fam1, + columns=[ + _ColumnPB( + qualifier=col_name1, + cells=[ + _CellPB( + value=cell_val1, + timestamp_micros=microseconds, + ), + _CellPB( + value=cell_val2, + timestamp_micros=microseconds, + ), + ], + ), + _ColumnPB( + qualifier=col_name2, + cells=[ + _CellPB( + value=cell_val3, + timestamp_micros=microseconds, + ), + ], + ), + ], + ), + _FamilyPB( + name=col_fam2, + columns=[ + _ColumnPB( + qualifier=col_name3, + cells=[ + _CellPB( + value=cell_val4, + timestamp_micros=microseconds, + ), + ], + ), + ], + ), + ], + ) + sample_input = _ReadModifyWriteRowResponsePB(row=response_row) + self.assertEqual(expected_output, self._callFUT(sample_input)) + + +class Test__parse_family_pb(unittest.TestCase): + + def _callFUT(self, family_pb): + from google.cloud.bigtable.row import _parse_family_pb + return _parse_family_pb(family_pb) + + def test_it(self): + from google.cloud._helpers import _datetime_from_microseconds + col_fam1 = u'col-fam-id' + col_name1 = b'col-name1' + col_name2 = b'col-name2' + cell_val1 = b'cell-val' + cell_val2 = b'cell-val-newer' + cell_val3 = b'altcol-cell-val' + + microseconds = 5554441037 + timestamp = _datetime_from_microseconds(microseconds) + expected_dict = { + col_name1: [ + (cell_val1, timestamp), + (cell_val2, timestamp), + ], + col_name2: [ + (cell_val3, timestamp), + ], + } + expected_output = (col_fam1, expected_dict) + sample_input = _FamilyPB( + name=col_fam1, + columns=[ + _ColumnPB( + qualifier=col_name1, + cells=[ + _CellPB( + value=cell_val1, + timestamp_micros=microseconds, + ), + _CellPB( + value=cell_val2, + timestamp_micros=microseconds, + ), + ], + ), + _ColumnPB( + qualifier=col_name2, + cells=[ + _CellPB( + value=cell_val3, + timestamp_micros=microseconds, + ), + ], + ), + ], + ) + self.assertEqual(expected_output, self._callFUT(sample_input)) + + +def _CheckAndMutateRowRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowRequest(*args, **kw) + + +def _CheckAndMutateRowResponsePB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) + + +def _MutateRowRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.MutateRowRequest(*args, **kw) + + +def _ReadModifyWriteRowRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw) + + +def _ReadModifyWriteRowResponsePB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) + + +def _CellPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Cell(*args, **kw) + + +def _ColumnPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Column(*args, **kw) + + +def _FamilyPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Family(*args, **kw) + + +def _MutationPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation(*args, **kw) + + +def _MutationSetCellPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.SetCell(*args, **kw) + + +def _MutationDeleteFromColumnPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) + + +def _MutationDeleteFromFamilyPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) + + +def _MutationDeleteFromRowPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) + + +def _RowPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.Row(*args, **kw) + + +def _ReadModifyWriteRulePB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ReadModifyWriteRule(*args, **kw) + + +class _Client(object): + + data_stub = None + + +class _Instance(object): + + def __init__(self, client=None): + self._client = client + + +class _Table(object): + + def __init__(self, name, client=None): + self.name = name + self._instance = _Instance(client) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py new file mode 100644 index 000000000000..daa823aeee1b --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -0,0 +1,730 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class TestCell(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_data import Cell + return Cell + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _from_pb_test_helper(self, labels=None): + import datetime + from google.cloud._helpers import _EPOCH + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + + timestamp_micros = 18738724000 # Make sure millis granularity + timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) + value = b'value-bytes' + + if labels is None: + cell_pb = data_v2_pb2.Cell( + value=value, timestamp_micros=timestamp_micros) + cell_expected = self._makeOne(value, timestamp) + else: + cell_pb = data_v2_pb2.Cell( + value=value, timestamp_micros=timestamp_micros, labels=labels) + cell_expected = self._makeOne(value, timestamp, labels=labels) + + klass = self._getTargetClass() + result = klass.from_pb(cell_pb) + self.assertEqual(result, cell_expected) + + def test_from_pb(self): + self._from_pb_test_helper() + + def test_from_pb_with_labels(self): + labels = [u'label1', u'label2'] + self._from_pb_test_helper(labels) + + def test_constructor(self): + value = object() + timestamp = object() + cell = self._makeOne(value, timestamp) + self.assertEqual(cell.value, value) + self.assertEqual(cell.timestamp, timestamp) + + def test___eq__(self): + value = object() + timestamp = object() + cell1 = self._makeOne(value, timestamp) + cell2 = self._makeOne(value, timestamp) + self.assertEqual(cell1, cell2) + + def test___eq__type_differ(self): + cell1 = self._makeOne(None, None) + cell2 = object() + self.assertNotEqual(cell1, cell2) + + def test___ne__same_value(self): + value = object() + timestamp = object() + cell1 = self._makeOne(value, timestamp) + cell2 = self._makeOne(value, timestamp) + comparison_val = (cell1 != cell2) + self.assertFalse(comparison_val) + + def test___ne__(self): + value1 = 'value1' + value2 = 'value2' + timestamp = object() + cell1 = self._makeOne(value1, timestamp) + cell2 = self._makeOne(value2, timestamp) + self.assertNotEqual(cell1, cell2) + + +class TestPartialRowData(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_data import PartialRowData + return PartialRowData + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + row_key = object() + partial_row_data = self._makeOne(row_key) + self.assertIs(partial_row_data._row_key, row_key) + self.assertEqual(partial_row_data._cells, {}) + + def test___eq__(self): + row_key = object() + partial_row_data1 = self._makeOne(row_key) + partial_row_data2 = self._makeOne(row_key) + self.assertEqual(partial_row_data1, partial_row_data2) + + def test___eq__type_differ(self): + partial_row_data1 = self._makeOne(None) + partial_row_data2 = object() + self.assertNotEqual(partial_row_data1, partial_row_data2) + + def test___ne__same_value(self): + row_key = object() + partial_row_data1 = self._makeOne(row_key) + partial_row_data2 = self._makeOne(row_key) + comparison_val = (partial_row_data1 != partial_row_data2) + self.assertFalse(comparison_val) + + def test___ne__(self): + row_key1 = object() + partial_row_data1 = self._makeOne(row_key1) + row_key2 = object() + partial_row_data2 = self._makeOne(row_key2) + self.assertNotEqual(partial_row_data1, partial_row_data2) + + def test___ne__cells(self): + row_key = object() + partial_row_data1 = self._makeOne(row_key) + partial_row_data1._cells = object() + partial_row_data2 = self._makeOne(row_key) + self.assertNotEqual(partial_row_data1, partial_row_data2) + + def test_to_dict(self): + cell1 = object() + cell2 = object() + cell3 = object() + + family_name1 = u'name1' + family_name2 = u'name2' + qual1 = b'col1' + qual2 = b'col2' + qual3 = b'col3' + + partial_row_data = self._makeOne(None) + partial_row_data._cells = { + family_name1: { + qual1: cell1, + qual2: cell2, + }, + family_name2: { + qual3: cell3, + }, + } + + result = partial_row_data.to_dict() + expected_result = { + b'name1:col1': cell1, + b'name1:col2': cell2, + b'name2:col3': cell3, + } + self.assertEqual(result, expected_result) + + def test_cells_property(self): + partial_row_data = self._makeOne(None) + cells = {1: 2} + partial_row_data._cells = cells + # Make sure we get a copy, not the original. + self.assertIsNot(partial_row_data.cells, cells) + self.assertEqual(partial_row_data.cells, cells) + + def test_row_key_getter(self): + row_key = object() + partial_row_data = self._makeOne(row_key) + self.assertIs(partial_row_data.row_key, row_key) + + +class TestPartialRowsData(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_data import PartialRowsData + return PartialRowsData + + def _getDoNothingClass(self): + klass = self._getTargetClass() + + class FakePartialRowsData(klass): + + def __init__(self, *args, **kwargs): + super(FakePartialRowsData, self).__init__(*args, **kwargs) + self._consumed = [] + + def consume_next(self): + value = self._response_iterator.next() + self._consumed.append(value) + return value + + return FakePartialRowsData + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + response_iterator = object() + partial_rows_data = self._makeOne(response_iterator) + self.assertIs(partial_rows_data._response_iterator, + response_iterator) + self.assertEqual(partial_rows_data._rows, {}) + + def test___eq__(self): + response_iterator = object() + partial_rows_data1 = self._makeOne(response_iterator) + partial_rows_data2 = self._makeOne(response_iterator) + self.assertEqual(partial_rows_data1, partial_rows_data2) + + def test___eq__type_differ(self): + partial_rows_data1 = self._makeOne(None) + partial_rows_data2 = object() + self.assertNotEqual(partial_rows_data1, partial_rows_data2) + + def test___ne__same_value(self): + response_iterator = object() + partial_rows_data1 = self._makeOne(response_iterator) + partial_rows_data2 = self._makeOne(response_iterator) + comparison_val = (partial_rows_data1 != partial_rows_data2) + self.assertFalse(comparison_val) + + def test___ne__(self): + response_iterator1 = object() + partial_rows_data1 = self._makeOne(response_iterator1) + response_iterator2 = object() + partial_rows_data2 = self._makeOne(response_iterator2) + self.assertNotEqual(partial_rows_data1, partial_rows_data2) + + def test_state_start(self): + prd = self._makeOne([]) + self.assertEqual(prd.state, prd.START) + + def test_state_new_row_w_row(self): + prd = self._makeOne([]) + prd._last_scanned_row_key = '' + prd._row = object() + self.assertEqual(prd.state, prd.NEW_ROW) + + def test_rows_getter(self): + partial_rows_data = self._makeOne(None) + partial_rows_data._rows = value = object() + self.assertIs(partial_rows_data.rows, value) + + def test_cancel(self): + response_iterator = _MockCancellableIterator() + partial_rows_data = self._makeOne(response_iterator) + self.assertEqual(response_iterator.cancel_calls, 0) + partial_rows_data.cancel() + self.assertEqual(response_iterator.cancel_calls, 1) + + # 'consume_nest' tested via 'TestPartialRowsData_JSON_acceptance_tests' + + def test_consume_all(self): + klass = self._getDoNothingClass() + + value1, value2, value3 = object(), object(), object() + response_iterator = _MockCancellableIterator(value1, value2, value3) + partial_rows_data = klass(response_iterator) + self.assertEqual(partial_rows_data._consumed, []) + partial_rows_data.consume_all() + self.assertEqual( + partial_rows_data._consumed, [value1, value2, value3]) + + def test_consume_all_with_max_loops(self): + klass = self._getDoNothingClass() + + value1, value2, value3 = object(), object(), object() + response_iterator = _MockCancellableIterator(value1, value2, value3) + partial_rows_data = klass(response_iterator) + self.assertEqual(partial_rows_data._consumed, []) + partial_rows_data.consume_all(max_loops=1) + self.assertEqual(partial_rows_data._consumed, [value1]) + # Make sure the iterator still has the remaining values. + self.assertEqual( + list(response_iterator.iter_values), [value2, value3]) + + def test__copy_from_current_unset(self): + prd = self._makeOne([]) + chunks = _generate_cell_chunks(['']) + chunk = chunks[0] + prd._copy_from_current(chunk) + self.assertEqual(chunk.row_key, b'') + self.assertEqual(chunk.family_name.value, u'') + self.assertEqual(chunk.qualifier.value, b'') + self.assertEqual(chunk.timestamp_micros, 0) + self.assertEqual(chunk.labels, []) + + def test__copy_from_current_blank(self): + ROW_KEY = b'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + prd._cell = _PartialCellData() + chunks = _generate_cell_chunks(['']) + chunk = chunks[0] + chunk.row_key = ROW_KEY + chunk.family_name.value = FAMILY_NAME + chunk.qualifier.value = QUALIFIER + chunk.timestamp_micros = TIMESTAMP_MICROS + chunk.labels.extend(LABELS) + prd._copy_from_current(chunk) + self.assertEqual(chunk.row_key, ROW_KEY) + self.assertEqual(chunk.family_name.value, FAMILY_NAME) + self.assertEqual(chunk.qualifier.value, QUALIFIER) + self.assertEqual(chunk.timestamp_micros, TIMESTAMP_MICROS) + self.assertEqual(chunk.labels, LABELS) + + def test__copy_from_previous_unset(self): + prd = self._makeOne([]) + cell = _PartialCellData() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, '') + self.assertEqual(cell.family_name, u'') + self.assertEqual(cell.qualifier, b'') + self.assertEqual(cell.timestamp_micros, 0) + self.assertEqual(cell.labels, []) + + def test__copy_from_previous_blank(self): + ROW_KEY = 'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + cell = _PartialCellData( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + prd._previous_cell = _PartialCellData() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, ROW_KEY) + self.assertEqual(cell.family_name, FAMILY_NAME) + self.assertEqual(cell.qualifier, QUALIFIER) + self.assertEqual(cell.timestamp_micros, TIMESTAMP_MICROS) + self.assertEqual(cell.labels, LABELS) + + def test__copy_from_previous_filled(self): + ROW_KEY = 'RK' + FAMILY_NAME = u'A' + QUALIFIER = b'C' + TIMESTAMP_MICROS = 100 + LABELS = ['L1', 'L2'] + prd = self._makeOne([]) + prd._previous_cell = _PartialCellData( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + cell = _PartialCellData() + prd._copy_from_previous(cell) + self.assertEqual(cell.row_key, ROW_KEY) + self.assertEqual(cell.family_name, FAMILY_NAME) + self.assertEqual(cell.qualifier, QUALIFIER) + self.assertEqual(cell.timestamp_micros, 0) + self.assertEqual(cell.labels, []) + + def test__save_row_no_cell(self): + ROW_KEY = 'RK' + prd = self._makeOne([]) + row = prd._row = _Dummy(row_key=ROW_KEY) + prd._cell = None + prd._save_current_row() + self.assertIs(prd._rows[ROW_KEY], row) + + def test_invalid_last_scanned_row_key_on_start(self): + from google.cloud.bigtable.row_data import InvalidReadRowsResponse + response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidReadRowsResponse): + prd.consume_next() + + def test_valid_last_scanned_row_key_on_start(self): + response = _ReadRowsResponseV2( + chunks=(), last_scanned_row_key='AFTER') + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd._last_scanned_row_key = 'BEFORE' + prd.consume_next() + self.assertEqual(prd._last_scanned_row_key, 'AFTER') + + def test_invalid_empty_chunk(self): + from google.cloud.bigtable.row_data import InvalidChunk + chunks = _generate_cell_chunks(['']) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidChunk): + prd.consume_next() + + def test_invalid_empty_second_chunk(self): + from google.cloud.bigtable.row_data import InvalidChunk + chunks = _generate_cell_chunks(['', '']) + first = chunks[0] + first.row_key = b'RK' + first.family_name.value = 'A' + first.qualifier.value = b'C' + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidChunk): + prd.consume_next() + + +class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): + + _json_tests = None + + def _getTargetClass(self): + from google.cloud.bigtable.row_data import PartialRowsData + return PartialRowsData + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def _load_json_test(self, test_name): + import os + if self.__class__._json_tests is None: + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, 'read-rows-acceptance-test.json') + raw = _parse_readrows_acceptance_tests(filename) + tests = self.__class__._json_tests = {} + for (name, chunks, results) in raw: + tests[name] = chunks, results + return self.__class__._json_tests[test_name] + + # JSON Error cases: invalid chunks + + def _fail_during_consume(self, testcase_name): + from google.cloud.bigtable.row_data import InvalidChunk + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + with self.assertRaises(InvalidChunk): + prd.consume_next() + expected_result = self._sort_flattend_cells( + [result for result in results if not result['error']]) + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + self.assertEqual(flattened, expected_result) + + def test_invalid_no_cell_key_before_commit(self): + self._fail_during_consume('invalid - no cell key before commit') + + def test_invalid_no_cell_key_before_value(self): + self._fail_during_consume('invalid - no cell key before value') + + def test_invalid_new_col_family_wo_qualifier(self): + self._fail_during_consume( + 'invalid - new col family must specify qualifier') + + def test_invalid_no_commit_between_rows(self): + self._fail_during_consume('invalid - no commit between rows') + + def test_invalid_no_commit_after_first_row(self): + self._fail_during_consume('invalid - no commit after first row') + + def test_invalid_duplicate_row_key(self): + self._fail_during_consume('invalid - duplicate row key') + + def test_invalid_new_row_missing_row_key(self): + self._fail_during_consume('invalid - new row missing row key') + + def test_invalid_bare_reset(self): + self._fail_during_consume('invalid - bare reset') + + def test_invalid_bad_reset_no_commit(self): + self._fail_during_consume('invalid - bad reset, no commit') + + def test_invalid_missing_key_after_reset(self): + self._fail_during_consume('invalid - missing key after reset') + + def test_invalid_reset_with_chunk(self): + self._fail_during_consume('invalid - reset with chunk') + + def test_invalid_commit_with_chunk(self): + self._fail_during_consume('invalid - commit with chunk') + + # JSON Error cases: incomplete final row + + def _sort_flattend_cells(self, flattened): + import operator + key_func = operator.itemgetter('rk', 'fm', 'qual') + return sorted(flattened, key=key_func) + + def _incomplete_final_row(self, testcase_name): + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd.consume_next() + self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) + expected_result = self._sort_flattend_cells( + [result for result in results if not result['error']]) + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + self.assertEqual(flattened, expected_result) + + def test_invalid_no_commit(self): + self._incomplete_final_row('invalid - no commit') + + def test_invalid_last_row_missing_commit(self): + self._incomplete_final_row('invalid - last row missing commit') + + # Non-error cases + + _marker = object() + + def _match_results(self, testcase_name, expected_result=_marker): + chunks, results = self._load_json_test(testcase_name) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + prd = self._makeOne(iterator) + prd.consume_next() + flattened = self._sort_flattend_cells(_flatten_cells(prd)) + if expected_result is self._marker: + expected_result = self._sort_flattend_cells(results) + self.assertEqual(flattened, expected_result) + + def test_bare_commit_implies_ts_zero(self): + self._match_results('bare commit implies ts=0') + + def test_simple_row_with_timestamp(self): + self._match_results('simple row with timestamp') + + def test_missing_timestamp_implies_ts_zero(self): + self._match_results('missing timestamp, implied ts=0') + + def test_empty_cell_value(self): + self._match_results('empty cell value') + + def test_two_unsplit_cells(self): + self._match_results('two unsplit cells') + + def test_two_qualifiers(self): + self._match_results('two qualifiers') + + def test_two_families(self): + self._match_results('two families') + + def test_with_labels(self): + self._match_results('with labels') + + def test_split_cell_bare_commit(self): + self._match_results('split cell, bare commit') + + def test_split_cell(self): + self._match_results('split cell') + + def test_split_four_ways(self): + self._match_results('split four ways') + + def test_two_split_cells(self): + self._match_results('two split cells') + + def test_multi_qualifier_splits(self): + self._match_results('multi-qualifier splits') + + def test_multi_qualifier_multi_split(self): + self._match_results('multi-qualifier multi-split') + + def test_multi_family_split(self): + self._match_results('multi-family split') + + def test_two_rows(self): + self._match_results('two rows') + + def test_two_rows_implicit_timestamp(self): + self._match_results('two rows implicit timestamp') + + def test_two_rows_empty_value(self): + self._match_results('two rows empty value') + + def test_two_rows_one_with_multiple_cells(self): + self._match_results('two rows, one with multiple cells') + + def test_two_rows_multiple_cells_multiple_families(self): + self._match_results('two rows, multiple cells, multiple families') + + def test_two_rows_multiple_cells(self): + self._match_results('two rows, multiple cells') + + def test_two_rows_four_cells_two_labels(self): + self._match_results('two rows, four cells, 2 labels') + + def test_two_rows_with_splits_same_timestamp(self): + self._match_results('two rows with splits, same timestamp') + + def test_no_data_after_reset(self): + # JSON testcase has `"results": null` + self._match_results('no data after reset', expected_result=[]) + + def test_simple_reset(self): + self._match_results('simple reset') + + def test_reset_to_new_val(self): + self._match_results('reset to new val') + + def test_reset_to_new_qual(self): + self._match_results('reset to new qual') + + def test_reset_with_splits(self): + self._match_results('reset with splits') + + def test_two_resets(self): + self._match_results('two resets') + + def test_reset_to_new_row(self): + self._match_results('reset to new row') + + def test_reset_in_between_chunks(self): + self._match_results('reset in between chunks') + + def test_empty_cell_chunk(self): + self._match_results('empty cell chunk') + + +def _flatten_cells(prd): + # Match results format from JSON testcases. + # Doesn't handle error cases. + from google.cloud._helpers import _bytes_to_unicode + from google.cloud._helpers import _microseconds_from_datetime + for row_key, row in prd.rows.items(): + for family_name, family in row.cells.items(): + for qualifier, column in family.items(): + for cell in column: + yield { + u'rk': _bytes_to_unicode(row_key), + u'fm': family_name, + u'qual': _bytes_to_unicode(qualifier), + u'ts': _microseconds_from_datetime(cell.timestamp), + u'value': _bytes_to_unicode(cell.value), + u'label': u' '.join(cell.labels), + u'error': False, + } + + +class _MockCancellableIterator(object): + + cancel_calls = 0 + + def __init__(self, *values): + self.iter_values = iter(values) + + def cancel(self): + self.cancel_calls += 1 + + def next(self): + return next(self.iter_values) + + def __next__(self): # pragma: NO COVER Py3k + return self.next() + + +class _Dummy(object): + + def __init__(self, **kw): + self.__dict__.update(kw) + + +class _PartialCellData(object): + + row_key = '' + family_name = u'' + qualifier = b'' + timestamp_micros = 0 + + def __init__(self, **kw): + self.labels = kw.pop('labels', []) + self.__dict__.update(kw) + + +class _ReadRowsResponseV2(object): + + def __init__(self, chunks, last_scanned_row_key=''): + self.chunks = chunks + self.last_scanned_row_key = last_scanned_row_key + + +def _generate_cell_chunks(chunk_text_pbs): + from google.protobuf.text_format import Merge + from google.cloud.bigtable._generated.bigtable_pb2 import ReadRowsResponse + + chunks = [] + + for chunk_text_pb in chunk_text_pbs: + chunk = ReadRowsResponse.CellChunk() + chunks.append(Merge(chunk_text_pb, chunk)) + + return chunks + + +def _parse_readrows_acceptance_tests(filename): + """Parse acceptance tests from JSON + + See: + https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/\ + 4d3185662ca61bc9fa1bdf1ec0166f6e5ecf86c6/bigtable-client-core/src/\ + test/resources/com/google/cloud/bigtable/grpc/scanner/v2/ + read-rows-acceptance-test.json + """ + import json + + with open(filename) as json_file: + test_json = json.load(json_file) + + for test in test_json['tests']: + name = test['name'] + chunks = _generate_cell_chunks(test['chunks']) + results = test['results'] + yield name, chunks, results diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py new file mode 100644 index 000000000000..cb63856ac981 --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py @@ -0,0 +1,1010 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class Test_BoolFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import _BoolFilter + return _BoolFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + flag = object() + row_filter = self._makeOne(flag) + self.assertIs(row_filter.flag, flag) + + def test___eq__type_differ(self): + flag = object() + row_filter1 = self._makeOne(flag) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test___eq__same_value(self): + flag = object() + row_filter1 = self._makeOne(flag) + row_filter2 = self._makeOne(flag) + self.assertEqual(row_filter1, row_filter2) + + def test___ne__same_value(self): + flag = object() + row_filter1 = self._makeOne(flag) + row_filter2 = self._makeOne(flag) + comparison_val = (row_filter1 != row_filter2) + self.assertFalse(comparison_val) + + +class TestSinkFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import SinkFilter + return SinkFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + flag = True + row_filter = self._makeOne(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(sink=flag) + self.assertEqual(pb_val, expected_pb) + + +class TestPassAllFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import PassAllFilter + return PassAllFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + flag = True + row_filter = self._makeOne(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(pass_all_filter=flag) + self.assertEqual(pb_val, expected_pb) + + +class TestBlockAllFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import BlockAllFilter + return BlockAllFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + flag = True + row_filter = self._makeOne(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(block_all_filter=flag) + self.assertEqual(pb_val, expected_pb) + + +class Test_RegexFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import _RegexFilter + return _RegexFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + regex = b'abc' + row_filter = self._makeOne(regex) + self.assertIs(row_filter.regex, regex) + + def test_constructor_non_bytes(self): + regex = u'abc' + row_filter = self._makeOne(regex) + self.assertEqual(row_filter.regex, b'abc') + + def test___eq__type_differ(self): + regex = b'def-rgx' + row_filter1 = self._makeOne(regex) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test___eq__same_value(self): + regex = b'trex-regex' + row_filter1 = self._makeOne(regex) + row_filter2 = self._makeOne(regex) + self.assertEqual(row_filter1, row_filter2) + + def test___ne__same_value(self): + regex = b'abc' + row_filter1 = self._makeOne(regex) + row_filter2 = self._makeOne(regex) + comparison_val = (row_filter1 != row_filter2) + self.assertFalse(comparison_val) + + +class TestRowKeyRegexFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import RowKeyRegexFilter + return RowKeyRegexFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + regex = b'row-key-regex' + row_filter = self._makeOne(regex) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(row_key_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + +class TestRowSampleFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + return RowSampleFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + sample = object() + row_filter = self._makeOne(sample) + self.assertIs(row_filter.sample, sample) + + def test___eq__type_differ(self): + sample = object() + row_filter1 = self._makeOne(sample) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test___eq__same_value(self): + sample = object() + row_filter1 = self._makeOne(sample) + row_filter2 = self._makeOne(sample) + self.assertEqual(row_filter1, row_filter2) + + def test_to_pb(self): + sample = 0.25 + row_filter = self._makeOne(sample) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(row_sample_filter=sample) + self.assertEqual(pb_val, expected_pb) + + +class TestFamilyNameRegexFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import FamilyNameRegexFilter + return FamilyNameRegexFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + regex = u'family-regex' + row_filter = self._makeOne(regex) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(family_name_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + +class TestColumnQualifierRegexFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import ( + ColumnQualifierRegexFilter) + return ColumnQualifierRegexFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + regex = b'column-regex' + row_filter = self._makeOne(regex) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB( + column_qualifier_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + +class TestTimestampRange(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import TimestampRange + return TimestampRange + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + start = object() + end = object() + time_range = self._makeOne(start=start, end=end) + self.assertIs(time_range.start, start) + self.assertIs(time_range.end, end) + + def test___eq__(self): + start = object() + end = object() + time_range1 = self._makeOne(start=start, end=end) + time_range2 = self._makeOne(start=start, end=end) + self.assertEqual(time_range1, time_range2) + + def test___eq__type_differ(self): + start = object() + end = object() + time_range1 = self._makeOne(start=start, end=end) + time_range2 = object() + self.assertNotEqual(time_range1, time_range2) + + def test___ne__same_value(self): + start = object() + end = object() + time_range1 = self._makeOne(start=start, end=end) + time_range2 = self._makeOne(start=start, end=end) + comparison_val = (time_range1 != time_range2) + self.assertFalse(comparison_val) + + def _to_pb_helper(self, start_micros=None, end_micros=None): + import datetime + from google.cloud._helpers import _EPOCH + pb_kwargs = {} + + start = None + if start_micros is not None: + start = _EPOCH + datetime.timedelta(microseconds=start_micros) + pb_kwargs['start_timestamp_micros'] = start_micros + end = None + if end_micros is not None: + end = _EPOCH + datetime.timedelta(microseconds=end_micros) + pb_kwargs['end_timestamp_micros'] = end_micros + time_range = self._makeOne(start=start, end=end) + + expected_pb = _TimestampRangePB(**pb_kwargs) + self.assertEqual(time_range.to_pb(), expected_pb) + + def test_to_pb(self): + # Makes sure already milliseconds granularity + start_micros = 30871000 + end_micros = 12939371000 + self._to_pb_helper(start_micros=start_micros, + end_micros=end_micros) + + def test_to_pb_start_only(self): + # Makes sure already milliseconds granularity + start_micros = 30871000 + self._to_pb_helper(start_micros=start_micros) + + def test_to_pb_end_only(self): + # Makes sure already milliseconds granularity + end_micros = 12939371000 + self._to_pb_helper(end_micros=end_micros) + + +class TestTimestampRangeFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import TimestampRangeFilter + return TimestampRangeFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + range_ = object() + row_filter = self._makeOne(range_) + self.assertIs(row_filter.range_, range_) + + def test___eq__type_differ(self): + range_ = object() + row_filter1 = self._makeOne(range_) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test___eq__same_value(self): + range_ = object() + row_filter1 = self._makeOne(range_) + row_filter2 = self._makeOne(range_) + self.assertEqual(row_filter1, row_filter2) + + def test_to_pb(self): + from google.cloud.bigtable.row_filters import TimestampRange + + range_ = TimestampRange() + row_filter = self._makeOne(range_) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB( + timestamp_range_filter=_TimestampRangePB()) + self.assertEqual(pb_val, expected_pb) + + +class TestColumnRangeFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + return ColumnRangeFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + column_family_id = object() + row_filter = self._makeOne(column_family_id) + self.assertIs(row_filter.column_family_id, column_family_id) + self.assertIsNone(row_filter.start_column) + self.assertIsNone(row_filter.end_column) + self.assertTrue(row_filter.inclusive_start) + self.assertTrue(row_filter.inclusive_end) + + def test_constructor_explicit(self): + column_family_id = object() + start_column = object() + end_column = object() + inclusive_start = object() + inclusive_end = object() + row_filter = self._makeOne( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) + self.assertIs(row_filter.column_family_id, column_family_id) + self.assertIs(row_filter.start_column, start_column) + self.assertIs(row_filter.end_column, end_column) + self.assertIs(row_filter.inclusive_start, inclusive_start) + self.assertIs(row_filter.inclusive_end, inclusive_end) + + def test_constructor_bad_start(self): + column_family_id = object() + self.assertRaises(ValueError, self._makeOne, + column_family_id, inclusive_start=True) + + def test_constructor_bad_end(self): + column_family_id = object() + self.assertRaises(ValueError, self._makeOne, + column_family_id, inclusive_end=True) + + def test___eq__(self): + column_family_id = object() + start_column = object() + end_column = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = self._makeOne(column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) + row_filter2 = self._makeOne(column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) + self.assertEqual(row_filter1, row_filter2) + + def test___eq__type_differ(self): + column_family_id = object() + row_filter1 = self._makeOne(column_family_id) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test_to_pb(self): + column_family_id = u'column-family-id' + row_filter = self._makeOne(column_family_id) + col_range_pb = _ColumnRangePB(family_name=column_family_id) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_inclusive_start(self): + column_family_id = u'column-family-id' + column = b'column' + row_filter = self._makeOne(column_family_id, start_column=column) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, + start_qualifier_closed=column, + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_exclusive_start(self): + column_family_id = u'column-family-id' + column = b'column' + row_filter = self._makeOne(column_family_id, start_column=column, + inclusive_start=False) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, + start_qualifier_open=column, + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_inclusive_end(self): + column_family_id = u'column-family-id' + column = b'column' + row_filter = self._makeOne(column_family_id, end_column=column) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, + end_qualifier_closed=column, + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_exclusive_end(self): + column_family_id = u'column-family-id' + column = b'column' + row_filter = self._makeOne(column_family_id, end_column=column, + inclusive_end=False) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, + end_qualifier_open=column, + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + +class TestValueRegexFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import ValueRegexFilter + return ValueRegexFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + regex = b'value-regex' + row_filter = self._makeOne(regex) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + +class TestValueRangeFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import ValueRangeFilter + return ValueRangeFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + row_filter = self._makeOne() + self.assertIsNone(row_filter.start_value) + self.assertIsNone(row_filter.end_value) + self.assertTrue(row_filter.inclusive_start) + self.assertTrue(row_filter.inclusive_end) + + def test_constructor_explicit(self): + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter = self._makeOne(start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) + self.assertIs(row_filter.start_value, start_value) + self.assertIs(row_filter.end_value, end_value) + self.assertIs(row_filter.inclusive_start, inclusive_start) + self.assertIs(row_filter.inclusive_end, inclusive_end) + + def test_constructor_bad_start(self): + self.assertRaises(ValueError, self._makeOne, inclusive_start=True) + + def test_constructor_bad_end(self): + self.assertRaises(ValueError, self._makeOne, inclusive_end=True) + + def test___eq__(self): + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = self._makeOne(start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) + row_filter2 = self._makeOne(start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) + self.assertEqual(row_filter1, row_filter2) + + def test___eq__type_differ(self): + row_filter1 = self._makeOne() + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test_to_pb(self): + row_filter = self._makeOne() + expected_pb = _RowFilterPB( + value_range_filter=_ValueRangePB()) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_inclusive_start(self): + value = b'some-value' + row_filter = self._makeOne(start_value=value) + val_range_pb = _ValueRangePB(start_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_exclusive_start(self): + value = b'some-value' + row_filter = self._makeOne(start_value=value, inclusive_start=False) + val_range_pb = _ValueRangePB(start_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_inclusive_end(self): + value = b'some-value' + row_filter = self._makeOne(end_value=value) + val_range_pb = _ValueRangePB(end_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + def test_to_pb_exclusive_end(self): + value = b'some-value' + row_filter = self._makeOne(end_value=value, inclusive_end=False) + val_range_pb = _ValueRangePB(end_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + self.assertEqual(row_filter.to_pb(), expected_pb) + + +class Test_CellCountFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import _CellCountFilter + return _CellCountFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + num_cells = object() + row_filter = self._makeOne(num_cells) + self.assertIs(row_filter.num_cells, num_cells) + + def test___eq__type_differ(self): + num_cells = object() + row_filter1 = self._makeOne(num_cells) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test___eq__same_value(self): + num_cells = object() + row_filter1 = self._makeOne(num_cells) + row_filter2 = self._makeOne(num_cells) + self.assertEqual(row_filter1, row_filter2) + + def test___ne__same_value(self): + num_cells = object() + row_filter1 = self._makeOne(num_cells) + row_filter2 = self._makeOne(num_cells) + comparison_val = (row_filter1 != row_filter2) + self.assertFalse(comparison_val) + + +class TestCellsRowOffsetFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + return CellsRowOffsetFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + num_cells = 76 + row_filter = self._makeOne(num_cells) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB( + cells_per_row_offset_filter=num_cells) + self.assertEqual(pb_val, expected_pb) + + +class TestCellsRowLimitFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + return CellsRowLimitFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + num_cells = 189 + row_filter = self._makeOne(num_cells) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB( + cells_per_row_limit_filter=num_cells) + self.assertEqual(pb_val, expected_pb) + + +class TestCellsColumnLimitFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import CellsColumnLimitFilter + return CellsColumnLimitFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + num_cells = 10 + row_filter = self._makeOne(num_cells) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB( + cells_per_column_limit_filter=num_cells) + self.assertEqual(pb_val, expected_pb) + + +class TestStripValueTransformerFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + return StripValueTransformerFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + flag = True + row_filter = self._makeOne(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(strip_value_transformer=flag) + self.assertEqual(pb_val, expected_pb) + + +class TestApplyLabelFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + return ApplyLabelFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + label = object() + row_filter = self._makeOne(label) + self.assertIs(row_filter.label, label) + + def test___eq__type_differ(self): + label = object() + row_filter1 = self._makeOne(label) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + def test___eq__same_value(self): + label = object() + row_filter1 = self._makeOne(label) + row_filter2 = self._makeOne(label) + self.assertEqual(row_filter1, row_filter2) + + def test_to_pb(self): + label = u'label' + row_filter = self._makeOne(label) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(apply_label_transformer=label) + self.assertEqual(pb_val, expected_pb) + + +class Test_FilterCombination(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import _FilterCombination + return _FilterCombination + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor_defaults(self): + row_filter = self._makeOne() + self.assertEqual(row_filter.filters, []) + + def test_constructor_explicit(self): + filters = object() + row_filter = self._makeOne(filters=filters) + self.assertIs(row_filter.filters, filters) + + def test___eq__(self): + filters = object() + row_filter1 = self._makeOne(filters=filters) + row_filter2 = self._makeOne(filters=filters) + self.assertEqual(row_filter1, row_filter2) + + def test___eq__type_differ(self): + filters = object() + row_filter1 = self._makeOne(filters=filters) + row_filter2 = object() + self.assertNotEqual(row_filter1, row_filter2) + + +class TestRowFilterChain(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import RowFilterChain + return RowFilterChain + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3.to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB( + filters=[row_filter1_pb, row_filter2_pb], + ), + ) + self.assertEqual(filter_pb, expected_pb) + + def test_to_pb_nested(self): + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3.to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4.to_pb() + + row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5.to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB( + filters=[row_filter3_pb, row_filter4_pb], + ), + ) + self.assertEqual(filter_pb, expected_pb) + + +class TestRowFilterUnion(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import RowFilterUnion + return RowFilterUnion + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_to_pb(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3.to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB( + filters=[row_filter1_pb, row_filter2_pb], + ), + ) + self.assertEqual(filter_pb, expected_pb) + + def test_to_pb_nested(self): + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3.to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4.to_pb() + + row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5.to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB( + filters=[row_filter3_pb, row_filter4_pb], + ), + ) + self.assertEqual(filter_pb, expected_pb) + + +class TestConditionalRowFilter(unittest.TestCase): + + def _getTargetClass(self): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + return ConditionalRowFilter + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + base_filter = object() + true_filter = object() + false_filter = object() + cond_filter = self._makeOne(base_filter, + true_filter=true_filter, + false_filter=false_filter) + self.assertIs(cond_filter.base_filter, base_filter) + self.assertIs(cond_filter.true_filter, true_filter) + self.assertIs(cond_filter.false_filter, false_filter) + + def test___eq__(self): + base_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = self._makeOne(base_filter, + true_filter=true_filter, + false_filter=false_filter) + cond_filter2 = self._makeOne(base_filter, + true_filter=true_filter, + false_filter=false_filter) + self.assertEqual(cond_filter1, cond_filter2) + + def test___eq__type_differ(self): + base_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = self._makeOne(base_filter, + true_filter=true_filter, + false_filter=false_filter) + cond_filter2 = object() + self.assertNotEqual(cond_filter1, cond_filter2) + + def test_to_pb(self): + from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = CellsRowOffsetFilter(11) + row_filter3_pb = row_filter3.to_pb() + + row_filter4 = self._makeOne(row_filter1, true_filter=row_filter2, + false_filter=row_filter3) + filter_pb = row_filter4.to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, + true_filter=row_filter2_pb, + false_filter=row_filter3_pb, + ), + ) + self.assertEqual(filter_pb, expected_pb) + + def test_to_pb_true_only(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = self._makeOne(row_filter1, true_filter=row_filter2) + filter_pb = row_filter3.to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, + true_filter=row_filter2_pb, + ), + ) + self.assertEqual(filter_pb, expected_pb) + + def test_to_pb_false_only(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import ( + StripValueTransformerFilter) + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = self._makeOne(row_filter1, false_filter=row_filter2) + filter_pb = row_filter3.to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, + false_filter=row_filter2_pb, + ), + ) + self.assertEqual(filter_pb, expected_pb) + + +def _ColumnRangePB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ColumnRange(*args, **kw) + + +def _RowFilterPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter(*args, **kw) + + +def _RowFilterChainPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Chain(*args, **kw) + + +def _RowFilterConditionPB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Condition(*args, **kw) + + +def _RowFilterInterleavePB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Interleave(*args, **kw) + + +def _TimestampRangePB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.TimestampRange(*args, **kw) + + +def _ValueRangePB(*args, **kw): + from google.cloud.bigtable._generated import ( + data_pb2 as data_v2_pb2) + return data_v2_pb2.ValueRange(*args, **kw) diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py new file mode 100644 index 000000000000..113cfa0bbd64 --- /dev/null +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -0,0 +1,585 @@ +# Copyright 2015 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + + +class TestTable(unittest.TestCase): + + PROJECT_ID = 'project-id' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) + TABLE_ID = 'table-id' + TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + ROW_KEY = b'row-key' + FAMILY_NAME = u'family' + QUALIFIER = b'qualifier' + TIMESTAMP_MICROS = 100 + VALUE = b'value' + + def _getTargetClass(self): + from google.cloud.bigtable.table import Table + return Table + + def _makeOne(self, *args, **kwargs): + return self._getTargetClass()(*args, **kwargs) + + def test_constructor(self): + table_id = 'table-id' + instance = object() + + table = self._makeOne(table_id, instance) + self.assertEqual(table.table_id, table_id) + self.assertIs(table._instance, instance) + + def test_name_property(self): + table_id = 'table-id' + instance_name = 'instance_name' + + instance = _Instance(instance_name) + table = self._makeOne(table_id, instance) + expected_name = instance_name + '/tables/' + table_id + self.assertEqual(table.name, expected_name) + + def test_column_family_factory(self): + from google.cloud.bigtable.column_family import ColumnFamily + + table_id = 'table-id' + gc_rule = object() + table = self._makeOne(table_id, None) + column_family_id = 'column_family_id' + column_family = table.column_family(column_family_id, gc_rule=gc_rule) + + self.assertIsInstance(column_family, ColumnFamily) + self.assertEqual(column_family.column_family_id, column_family_id) + self.assertIs(column_family.gc_rule, gc_rule) + self.assertEqual(column_family._table, table) + + def test_row_factory_direct(self): + from google.cloud.bigtable.row import DirectRow + + table_id = 'table-id' + table = self._makeOne(table_id, None) + row_key = b'row_key' + row = table.row(row_key) + + self.assertIsInstance(row, DirectRow) + self.assertEqual(row._row_key, row_key) + self.assertEqual(row._table, table) + + def test_row_factory_conditional(self): + from google.cloud.bigtable.row import ConditionalRow + + table_id = 'table-id' + table = self._makeOne(table_id, None) + row_key = b'row_key' + filter_ = object() + row = table.row(row_key, filter_=filter_) + + self.assertIsInstance(row, ConditionalRow) + self.assertEqual(row._row_key, row_key) + self.assertEqual(row._table, table) + + def test_row_factory_append(self): + from google.cloud.bigtable.row import AppendRow + + table_id = 'table-id' + table = self._makeOne(table_id, None) + row_key = b'row_key' + row = table.row(row_key, append=True) + + self.assertIsInstance(row, AppendRow) + self.assertEqual(row._row_key, row_key) + self.assertEqual(row._table, table) + + def test_row_factory_failure(self): + table = self._makeOne(self.TABLE_ID, None) + with self.assertRaises(ValueError): + table.row(b'row_key', filter_=object(), append=True) + + def test___eq__(self): + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) + self.assertEqual(table1, table2) + + def test___eq__type_differ(self): + table1 = self._makeOne(self.TABLE_ID, None) + table2 = object() + self.assertNotEqual(table1, table2) + + def test___ne__same_value(self): + instance = object() + table1 = self._makeOne(self.TABLE_ID, instance) + table2 = self._makeOne(self.TABLE_ID, instance) + comparison_val = (table1 != table2) + self.assertFalse(comparison_val) + + def test___ne__(self): + table1 = self._makeOne('table_id1', 'instance1') + table2 = self._makeOne('table_id2', 'instance2') + self.assertNotEqual(table1, table2) + + def _create_test_helper(self, initial_split_keys, column_families=()): + from google.cloud._helpers import _to_bytes + from unit_tests.bigtable._testing import _FakeStub + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) + + # Create request_pb + splits_pb = [ + _CreateTableRequestSplitPB(key=_to_bytes(key)) + for key in initial_split_keys or ()] + table_pb = None + if column_families: + table_pb = _TablePB() + for cf in column_families: + cf_pb = table_pb.column_families[cf.column_family_id] + if cf.gc_rule is not None: + cf_pb.gc_rule.MergeFrom(cf.gc_rule.to_pb()) + request_pb = _CreateTableRequestPB( + initial_splits=splits_pb, + parent=self.INSTANCE_NAME, + table_id=self.TABLE_ID, + table=table_pb, + ) + + # Create response_pb + response_pb = _TablePB() + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # create() has no return value. + + # Perform the method and check the result. + result = table.create(initial_split_keys=initial_split_keys, + column_families=column_families) + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'CreateTable', + (request_pb,), + {}, + )]) + + def test_create(self): + initial_split_keys = None + self._create_test_helper(initial_split_keys) + + def test_create_with_split_keys(self): + initial_split_keys = [b's1', b's2'] + self._create_test_helper(initial_split_keys) + + def test_create_with_column_families(self): + from google.cloud.bigtable.column_family import ColumnFamily + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + cf_id1 = 'col-fam-id1' + cf1 = ColumnFamily(cf_id1, None) + cf_id2 = 'col-fam-id2' + gc_rule = MaxVersionsGCRule(42) + cf2 = ColumnFamily(cf_id2, None, gc_rule=gc_rule) + + initial_split_keys = None + column_families = [cf1, cf2] + self._create_test_helper(initial_split_keys, + column_families=column_families) + + def _list_column_families_helper(self): + from unit_tests.bigtable._testing import _FakeStub + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) + + # Create request_pb + request_pb = _GetTableRequestPB(name=self.TABLE_NAME) + + # Create response_pb + COLUMN_FAMILY_ID = 'foo' + column_family = _ColumnFamilyPB() + response_pb = _TablePB( + column_families={COLUMN_FAMILY_ID: column_family}, + ) + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = { + COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID), + } + + # Perform the method and check the result. + result = table.list_column_families() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'GetTable', + (request_pb,), + {}, + )]) + + def test_list_column_families(self): + self._list_column_families_helper() + + def test_delete(self): + from google.protobuf import empty_pb2 + from unit_tests.bigtable._testing import _FakeStub + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) + + # Create request_pb + request_pb = _DeleteTableRequestPB(name=self.TABLE_NAME) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._table_stub = stub = _FakeStub(response_pb) + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = table.delete() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'DeleteTable', + (request_pb,), + {}, + )]) + + def _read_row_helper(self, chunks, expected_result): + from google.cloud._testing import _Monkey + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.bigtable import table as MUT + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, row_key, filter_): + mock_created.append((table_name, row_key, filter_)) + return request_pb + + # Create response_iterator + if chunks is None: + response_iterator = iter(()) # no responses at all + else: + response_pb = _ReadRowsResponsePB(chunks=chunks) + response_iterator = iter([response_pb]) + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_iterator) + + # Perform the method and check the result. + filter_obj = object() + with _Monkey(MUT, _create_row_request=mock_create_row_request): + result = table.read_row(self.ROW_KEY, filter_=filter_obj) + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ReadRows', + (request_pb,), + {}, + )]) + self.assertEqual(mock_created, + [(table.name, self.ROW_KEY, filter_obj)]) + + def test_read_row_miss_no__responses(self): + self._read_row_helper(None, None) + + def test_read_row_miss_no_chunks_in_response(self): + chunks = [] + self._read_row_helper(chunks, None) + + def test_read_row_complete(self): + from google.cloud.bigtable.row_data import Cell + from google.cloud.bigtable.row_data import PartialRowData + + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk] + expected_result = PartialRowData(row_key=self.ROW_KEY) + family = expected_result._cells.setdefault(self.FAMILY_NAME, {}) + column = family.setdefault(self.QUALIFIER, []) + column.append(Cell.from_pb(chunk)) + self._read_row_helper(chunks, expected_result) + + def test_read_row_still_partial(self): + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + ) + # No "commit row". + chunks = [chunk] + with self.assertRaises(ValueError): + self._read_row_helper(chunks, None) + + def test_read_rows(self): + from google.cloud._testing import _Monkey + from unit_tests.bigtable._testing import _FakeStub + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import table as MUT + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create response_iterator + response_iterator = object() + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_iterator) + + # Create expected_result. + expected_result = PartialRowsData(response_iterator) + + # Perform the method and check the result. + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit) + + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'ReadRows', + (request_pb,), + {}, + )]) + created_kwargs = { + 'start_key': start_key, + 'end_key': end_key, + 'filter_': filter_obj, + 'limit': limit, + } + self.assertEqual(mock_created, [(table.name, created_kwargs)]) + + def test_sample_row_keys(self): + from unit_tests.bigtable._testing import _FakeStub + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._makeOne(self.TABLE_ID, instance) + + # Create request_pb + request_pb = _SampleRowKeysRequestPB(table_name=self.TABLE_NAME) + + # Create response_iterator + response_iterator = object() # Just passed to a mock. + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_iterator) + + # Create expected_result. + expected_result = response_iterator + + # Perform the method and check the result. + result = table.sample_row_keys() + self.assertEqual(result, expected_result) + self.assertEqual(stub.method_calls, [( + 'SampleRowKeys', + (request_pb,), + {}, + )]) + + +class Test__create_row_request(unittest.TestCase): + + def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None, + filter_=None, limit=None): + from google.cloud.bigtable.table import _create_row_request + return _create_row_request( + table_name, row_key=row_key, start_key=start_key, end_key=end_key, + filter_=filter_, limit=limit) + + def test_table_name_only(self): + table_name = 'table_name' + result = self._callFUT(table_name) + expected_result = _ReadRowsRequestPB( + table_name=table_name) + self.assertEqual(result, expected_result) + + def test_row_key_row_range_conflict(self): + with self.assertRaises(ValueError): + self._callFUT(None, row_key=object(), end_key=object()) + + def test_row_key(self): + table_name = 'table_name' + row_key = b'row_key' + result = self._callFUT(table_name, row_key=row_key) + expected_result = _ReadRowsRequestPB( + table_name=table_name, + ) + expected_result.rows.row_keys.append(row_key) + self.assertEqual(result, expected_result) + + def test_row_range_start_key(self): + table_name = 'table_name' + start_key = b'start_key' + result = self._callFUT(table_name, start_key=start_key) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(start_key_closed=start_key) + self.assertEqual(result, expected_result) + + def test_row_range_end_key(self): + table_name = 'table_name' + end_key = b'end_key' + result = self._callFUT(table_name, end_key=end_key) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(end_key_open=end_key) + self.assertEqual(result, expected_result) + + def test_row_range_both_keys(self): + table_name = 'table_name' + start_key = b'start_key' + end_key = b'end_key' + result = self._callFUT(table_name, start_key=start_key, + end_key=end_key) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add( + start_key_closed=start_key, end_key_open=end_key) + self.assertEqual(result, expected_result) + + def test_with_filter(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + table_name = 'table_name' + row_filter = RowSampleFilter(0.33) + result = self._callFUT(table_name, filter_=row_filter) + expected_result = _ReadRowsRequestPB( + table_name=table_name, + filter=row_filter.to_pb(), + ) + self.assertEqual(result, expected_result) + + def test_with_limit(self): + table_name = 'table_name' + limit = 1337 + result = self._callFUT(table_name, limit=limit) + expected_result = _ReadRowsRequestPB( + table_name=table_name, + rows_limit=limit, + ) + self.assertEqual(result, expected_result) + + +def _CreateTableRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest(*args, **kw) + + +def _CreateTableRequestSplitPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest.Split(*args, **kw) + + +def _DeleteTableRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.DeleteTableRequest(*args, **kw) + + +def _GetTableRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.GetTableRequest(*args, **kw) + + +def _ReadRowsRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsRequest(*args, **kw) + + +def _ReadRowsResponseCellChunkPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + family_name = kw.pop('family_name') + qualifier = kw.pop('qualifier') + message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) + message.family_name.value = family_name + message.qualifier.value = qualifier + return message + + +def _ReadRowsResponsePB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsResponse(*args, **kw) + + +def _SampleRowKeysRequestPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.SampleRowKeysRequest(*args, **kw) + + +def _TablePB(*args, **kw): + from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.Table(*args, **kw) + + +def _ColumnFamilyPB(*args, **kw): + from google.cloud.bigtable._generated import ( + table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) + + +class _Client(object): + + data_stub = None + instance_stub = None + operations_stub = None + table_stub = None + + +class _Instance(object): + + def __init__(self, name, client=None): + self.name = name + self._client = client From d73ff64ec9e7b6c024b2da384d3b50594a6d685b Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 23 Sep 2016 13:14:38 -0700 Subject: [PATCH 002/892] Making bigtable subpackage into a proper package. - Adding setup.py, MANIFEST.in, README, .coveragerc and tox.ini - Adding google-cloud-bigtable as a dependency to the umbrella package - Adding the bigtable subdirectory into the list of packages for verifying the docs - Incorporating the bigtable subdirectory into the umbrella coverage report - Adding the bigtable only tox tests to the Travis config - Updating the project main README to refer the bigtable subdirectory - Renamed bigtable _testing imports (since in a new place) - Adding {toxinidir}/../core as a dependency for the bigtable tox config - Updating the location of the ignored bigtable generated files both in the `pycodestyle` config in `tox.ini` and in `run_pylint` Changed the bigtable test helper import via: $ git grep -l 'from unit_tests.bigtable._testing import _FakeStub' | > xargs sed -i s/'from unit_tests.bigtable._testing import _FakeStub'/'from unit_tests._testing import _FakeStub'/g --- packages/google-cloud-bigtable/.coveragerc | 13 ++++ packages/google-cloud-bigtable/MANIFEST.in | 4 ++ packages/google-cloud-bigtable/README.rst | 44 ++++++++++++ packages/google-cloud-bigtable/setup.py | 69 +++++++++++++++++++ packages/google-cloud-bigtable/tox.ini | 30 ++++++++ .../unit_tests/test_client.py | 2 +- .../unit_tests/test_cluster.py | 8 +-- .../unit_tests/test_column_family.py | 6 +- .../unit_tests/test_instance.py | 14 ++-- .../unit_tests/test_row.py | 12 ++-- .../unit_tests/test_table.py | 12 ++-- 11 files changed, 187 insertions(+), 27 deletions(-) create mode 100644 packages/google-cloud-bigtable/.coveragerc create mode 100644 packages/google-cloud-bigtable/MANIFEST.in create mode 100644 packages/google-cloud-bigtable/README.rst create mode 100644 packages/google-cloud-bigtable/setup.py create mode 100644 packages/google-cloud-bigtable/tox.ini diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc new file mode 100644 index 000000000000..08f3fdea2433 --- /dev/null +++ b/packages/google-cloud-bigtable/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +omit = + */_generated/*.py +fail_under = 100 +show_missing = True +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in new file mode 100644 index 000000000000..cb3a2b9ef4fa --- /dev/null +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -0,0 +1,4 @@ +include README.rst +graft google +graft unit_tests +global-exclude *.pyc diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst new file mode 100644 index 000000000000..619a0e15663d --- /dev/null +++ b/packages/google-cloud-bigtable/README.rst @@ -0,0 +1,44 @@ +Python Client for Google Cloud Bigtable +======================================= + + Python idiomatic client for `Google Cloud Bigtable`_ + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ + +- `Homepage`_ +- `API Documentation`_ + +.. _Homepage: https://googlecloudplatform.github.io/google-cloud-python/ +.. _API Documentation: http://googlecloudplatform.github.io/google-cloud-python/ + +Quick Start +----------- + +:: + + $ pip install --upgrade google-cloud-bigtable + +Authentication +-------------- + +With ``google-cloud-python`` we try to make authentication as painless as +possible. Check out the `Authentication section`_ in our documentation to +learn more. You may also find the `authentication document`_ shared by all +the ``google-cloud-*`` libraries to be helpful. + +.. _Authentication section: http://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html +.. _authentication document: https://github.com/GoogleCloudPlatform/gcloud-common/tree/master/authentication + +Using the API +------------- + +Cloud `Bigtable`_ is Google's NoSQL Big Data database service. It's the same +database that powers many core Google services, including Search, +Analytics, Maps, and Gmail. + +.. _Bigtable: https://cloud.google.com/bigtable/docs/ + +See the ``google-cloud-python`` API `Bigtable documentation`_ to learn +how to manage your data in Bigtable tables. + +.. _Bigtable documentation: https://google-cloud-python.readthedocs.io/en/stable/bigtable-usage.html diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py new file mode 100644 index 000000000000..71b82d119b6c --- /dev/null +++ b/packages/google-cloud-bigtable/setup.py @@ -0,0 +1,69 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from setuptools import find_packages +from setuptools import setup + + +PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) + +with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj: + README = file_obj.read() + +# NOTE: This is duplicated throughout and we should try to +# consolidate. +SETUP_BASE = { + 'author': 'Google Cloud Platform', + 'author_email': 'jjg+google-cloud-python@google.com', + 'scripts': [], + 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python', + 'license': 'Apache 2.0', + 'platforms': 'Posix; MacOS X; Windows', + 'include_package_data': True, + 'zip_safe': False, + 'classifiers': [ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Topic :: Internet', + ], +} + + +REQUIREMENTS = [ + 'google-cloud-core', + 'grpcio >= 1.0.0', +] + +setup( + name='google-cloud-bigtable', + version='0.20.0dev', + description='Python Client for Google Cloud Bigtable', + long_description=README, + namespace_packages=[ + 'google', + 'google.cloud', + ], + packages=find_packages(), + install_requires=REQUIREMENTS, + **SETUP_BASE +) diff --git a/packages/google-cloud-bigtable/tox.ini b/packages/google-cloud-bigtable/tox.ini new file mode 100644 index 000000000000..b5b080543a4c --- /dev/null +++ b/packages/google-cloud-bigtable/tox.ini @@ -0,0 +1,30 @@ +[tox] +envlist = + py27,py34,py35,cover + +[testing] +deps = + {toxinidir}/../core + pytest +covercmd = + py.test --quiet \ + --cov=google.cloud.bigtable \ + --cov=unit_tests \ + --cov-config {toxinidir}/.coveragerc \ + unit_tests + +[testenv] +commands = + py.test --quiet {posargs} unit_tests +deps = + {[testing]deps} + +[testenv:cover] +basepython = + python2.7 +commands = + {[testing]covercmd} +deps = + {[testenv]deps} + coverage + pytest-cov diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index 237011c9807d..eb1048d35ec4 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -553,7 +553,7 @@ def test_list_instances(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub LOCATION = 'projects/' + self.PROJECT + '/locations/locname' FAILED_LOCATION = 'FAILED' diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index 82335b293f0f..e497a025fd2c 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -187,7 +187,7 @@ def test___ne__(self): self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES SERVE_NODES = 31 @@ -232,7 +232,7 @@ def test_create(self): from google.cloud.operation import Operation from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub SERVE_NODES = 4 client = _Client(self.PROJECT) @@ -281,7 +281,7 @@ def test_update(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.bigtable.cluster import _UPDATE_CLUSTER_METADATA_URL NOW = datetime.datetime.utcnow() @@ -339,7 +339,7 @@ def test_update(self): def test_delete(self): from google.protobuf import empty_pb2 - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index c2ad847694b7..dee8db4e169b 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -409,7 +409,7 @@ def test_to_pb_with_rule(self): def _create_test_helper(self, gc_rule=None): from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub project_id = 'project-id' zone = 'zone' @@ -465,7 +465,7 @@ def test_create_with_gc_rule(self): self._create_test_helper(gc_rule=gc_rule) def _update_test_helper(self, gc_rule=None): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) @@ -526,7 +526,7 @@ def test_delete(self): from google.protobuf import empty_pb2 from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub project_id = 'project-id' zone = 'zone' diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index cc2c49366c90..bf47ab4f62a3 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -190,7 +190,7 @@ def test_reload(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) @@ -233,7 +233,7 @@ def test_create(self): from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.operation import Operation from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.instance import ( @@ -287,7 +287,7 @@ def test_create_w_explicit_serve_nodes(self): from google.longrunning import operations_pb2 from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.operation import Operation SERVE_NODES = 5 @@ -326,7 +326,7 @@ def test_create_w_explicit_serve_nodes(self): def test_update(self): from google.cloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, @@ -361,7 +361,7 @@ def test_delete(self): from google.protobuf import empty_pb2 from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) @@ -394,7 +394,7 @@ def test_list_clusters(self): instance_pb2 as instance_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub FAILED_LOCATION = 'FAILED' FAILED_LOCATIONS = [FAILED_LOCATION] @@ -451,7 +451,7 @@ def _list_tables_helper(self, table_name=None): table_pb2 as table_data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_messages_v1_pb2) - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/unit_tests/test_row.py index 0a495bbbe433..d40d6b64b720 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row.py @@ -296,7 +296,7 @@ def test_delete_cells_with_string_columns(self): def test_commit(self): from google.protobuf import empty_pb2 - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub row_key = b'row_key' table_name = 'projects/more-stuff' @@ -356,7 +356,7 @@ def test_commit_too_many_mutations(self): row.commit() def test_commit_no_mutations(self): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub row_key = b'row_key' client = _Client() @@ -407,7 +407,7 @@ def test__get_mutations(self): self.assertIs(false_mutations, row._get_mutations(None)) def test_commit(self): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.bigtable.row_filters import RowSampleFilter row_key = b'row_key' @@ -495,7 +495,7 @@ def test_commit_too_many_mutations(self): row.commit() def test_commit_no_mutations(self): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub row_key = b'row_key' client = _Client() @@ -573,7 +573,7 @@ def test_increment_cell_value(self): def test_commit(self): from google.cloud._testing import _Monkey - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.bigtable import row as MUT row_key = b'row_key' @@ -628,7 +628,7 @@ def mock_parse_rmw_row_response(row_response): self.assertEqual(row._rule_pb_list, []) def test_commit_no_rules(self): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub row_key = b'row_key' client = _Client() diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py index 113cfa0bbd64..7dca91768861 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -134,7 +134,7 @@ def test___ne__(self): def _create_test_helper(self, initial_split_keys, column_families=()): from google.cloud._helpers import _to_bytes - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -201,7 +201,7 @@ def test_create_with_column_families(self): column_families=column_families) def _list_column_families_helper(self): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -239,7 +239,7 @@ def test_list_column_families(self): def test_delete(self): from google.protobuf import empty_pb2 - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -268,7 +268,7 @@ def test_delete(self): def _read_row_helper(self, chunks, expected_result): from google.cloud._testing import _Monkey - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.bigtable import table as MUT client = _Client() @@ -348,7 +348,7 @@ def test_read_row_still_partial(self): def test_read_rows(self): from google.cloud._testing import _Monkey - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT @@ -398,7 +398,7 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_sample_row_keys(self): - from unit_tests.bigtable._testing import _FakeStub + from unit_tests._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) From 0ffdfa6d7b8f9fc7865d26f9e55e5a7c0af26329 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 26 Sep 2016 12:27:17 -0700 Subject: [PATCH 003/892] Removing custom generated long-running operations. --- .../bigtable/_generated/_operations.proto | 144 ---------- .../_generated/operations_grpc_pb2.py | 264 ------------------ .../google/cloud/bigtable/client.py | 11 +- .../unit_tests/test_client.py | 8 +- 4 files changed, 12 insertions(+), 415 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto deleted file mode 100644 index a358d0a38787..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_operations.proto +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.longrunning; - -import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/rpc/status.proto"; - -option java_multiple_files = true; -option java_outer_classname = "OperationsProto"; -option java_package = "com.google.longrunning"; - - -// Manages long-running operations with an API service. -// -// When an API method normally takes long time to complete, it can be designed -// to return [Operation][google.longrunning.Operation] to the client, and the client can use this -// interface to receive the real response asynchronously by polling the -// operation resource, or using `google.watcher.v1.Watcher` interface to watch -// the response, or pass the operation resource to another API (such as Google -// Cloud Pub/Sub API) to receive the response. Any API service that returns -// long-running operations should implement the `Operations` interface so -// developers can have a consistent client experience. -service Operations { - // Gets the latest state of a long-running operation. Clients may use this - // method to poll the operation result at intervals as recommended by the API - // service. - rpc GetOperation(GetOperationRequest) returns (Operation) { - option (google.api.http) = { get: "/v1/{name=operations/**}" }; - } - - // Lists operations that match the specified filter in the request. If the - // server doesn't support this method, it returns - // `google.rpc.Code.UNIMPLEMENTED`. - rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { - option (google.api.http) = { get: "/v1/{name=operations}" }; - } - - // Starts asynchronous cancellation on a long-running operation. The server - // makes a best effort to cancel the operation, but success is not - // guaranteed. If the server doesn't support this method, it returns - // `google.rpc.Code.UNIMPLEMENTED`. Clients may use - // [Operations.GetOperation] or other methods to check whether the - // cancellation succeeded or the operation completed despite cancellation. - rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; - } - - // Deletes a long-running operation. It indicates the client is no longer - // interested in the operation result. It does not cancel the operation. - rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=operations/**}" }; - } -} - -// This resource represents a long-running operation that is the result of a -// network API call. -message Operation { - // The name of the operation resource, which is only unique within the same - // service that originally returns it. - string name = 1; - - // Some service-specific metadata associated with the operation. It typically - // contains progress information and common metadata such as create time. - // Some services may not provide such metadata. Any method that returns a - // long-running operation should document the metadata type, if any. - google.protobuf.Any metadata = 2; - - // If the value is false, it means the operation is still in progress. - // If true, the operation is completed and the `result` is available. - bool done = 3; - - oneof result { - // The error result of the operation in case of failure. - google.rpc.Status error = 4; - - // The normal response of the operation in case of success. If the original - // method returns no data on success, such as `Delete`, the response will be - // `google.protobuf.Empty`. If the original method is standard - // `Get`/`Create`/`Update`, the response should be the resource. For other - // methods, the response should have the type `XxxResponse`, where `Xxx` - // is the original method name. For example, if the original method name - // is `TakeSnapshot()`, the inferred response type will be - // `TakeSnapshotResponse`. - google.protobuf.Any response = 5; - } -} - -// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. -message GetOperationRequest { - // The name of the operation resource. - string name = 1; -} - -// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. -message ListOperationsRequest { - // The name of the operation collection. - string name = 4; - - // The standard List filter. - string filter = 1; - - // The standard List page size. - int32 page_size = 2; - - // The standard List page token. - string page_token = 3; -} - -// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. -message ListOperationsResponse { - // A list of operations that match the specified filter in the request. - repeated Operation operations = 1; - - // The standard List next-page token. - string next_page_token = 2; -} - -// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. -message CancelOperationRequest { - // The name of the operation resource to be cancelled. - string name = 1; -} - -// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. -message DeleteOperationRequest { - // The name of the operation resource to be deleted. - string name = 1; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py deleted file mode 100644 index 5723e1d99fe0..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/operations_grpc_pb2.py +++ /dev/null @@ -1,264 +0,0 @@ -from google.longrunning.operations_pb2 import ( - CancelOperationRequest, - DeleteOperationRequest, - GetOperationRequest, - ListOperationsRequest, - ListOperationsResponse, - Operation, - google_dot_protobuf_dot_empty__pb2, -) -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class OperationsStub(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetOperation = channel.unary_unary( - '/google.longrunning.Operations/GetOperation', - request_serializer=GetOperationRequest.SerializeToString, - response_deserializer=Operation.FromString, - ) - self.ListOperations = channel.unary_unary( - '/google.longrunning.Operations/ListOperations', - request_serializer=ListOperationsRequest.SerializeToString, - response_deserializer=ListOperationsResponse.FromString, - ) - self.CancelOperation = channel.unary_unary( - '/google.longrunning.Operations/CancelOperation', - request_serializer=CancelOperationRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.DeleteOperation = channel.unary_unary( - '/google.longrunning.Operations/DeleteOperation', - request_serializer=DeleteOperationRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class OperationsServicer(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - - def GetOperation(self, request, context): - """Gets the latest state of a long-running operation. Clients may use this - method to poll the operation result at intervals as recommended by the API - service. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListOperations(self, request, context): - """Lists operations that match the specified filter in the request. If the - server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def CancelOperation(self, request, context): - """Starts asynchronous cancellation on a long-running operation. The server - makes a best effort to cancel the operation, but success is not - guaranteed. If the server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. Clients may use - [Operations.GetOperation] or other methods to check whether the - cancellation succeeded or the operation completed despite cancellation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DeleteOperation(self, request, context): - """Deletes a long-running operation. It indicates the client is no longer - interested in the operation result. It does not cancel the operation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_OperationsServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetOperation': grpc.unary_unary_rpc_method_handler( - servicer.GetOperation, - request_deserializer=GetOperationRequest.FromString, - response_serializer=Operation.SerializeToString, - ), - 'ListOperations': grpc.unary_unary_rpc_method_handler( - servicer.ListOperations, - request_deserializer=ListOperationsRequest.FromString, - response_serializer=ListOperationsResponse.SerializeToString, - ), - 'CancelOperation': grpc.unary_unary_rpc_method_handler( - servicer.CancelOperation, - request_deserializer=CancelOperationRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'DeleteOperation': grpc.unary_unary_rpc_method_handler( - servicer.DeleteOperation, - request_deserializer=DeleteOperationRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.longrunning.Operations', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaOperationsServicer(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - def GetOperation(self, request, context): - """Gets the latest state of a long-running operation. Clients may use this - method to poll the operation result at intervals as recommended by the API - service. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListOperations(self, request, context): - """Lists operations that match the specified filter in the request. If the - server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def CancelOperation(self, request, context): - """Starts asynchronous cancellation on a long-running operation. The server - makes a best effort to cancel the operation, but success is not - guaranteed. If the server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. Clients may use - [Operations.GetOperation] or other methods to check whether the - cancellation succeeded or the operation completed despite cancellation. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def DeleteOperation(self, request, context): - """Deletes a long-running operation. It indicates the client is no longer - interested in the operation result. It does not cancel the operation. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaOperationsStub(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Gets the latest state of a long-running operation. Clients may use this - method to poll the operation result at intervals as recommended by the API - service. - """ - raise NotImplementedError() - GetOperation.future = None - def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Lists operations that match the specified filter in the request. If the - server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. - """ - raise NotImplementedError() - ListOperations.future = None - def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Starts asynchronous cancellation on a long-running operation. The server - makes a best effort to cancel the operation, but success is not - guaranteed. If the server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. Clients may use - [Operations.GetOperation] or other methods to check whether the - cancellation succeeded or the operation completed despite cancellation. - """ - raise NotImplementedError() - CancelOperation.future = None - def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Deletes a long-running operation. It indicates the client is no longer - interested in the operation result. It does not cancel the operation. - """ - raise NotImplementedError() - DeleteOperation.future = None - - -def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, - ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, - } - response_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, - } - method_implementations = { - ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), - ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), - ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), - ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, - } - response_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, - } - cardinalities = { - 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, - 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, - 'GetOperation': cardinality.Cardinality.UNARY_UNARY, - 'ListOperations': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 1ad0f6bae193..cb5c71f252e1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -29,12 +29,13 @@ import os +from google.longrunning import operations_grpc + from google.cloud._helpers import make_insecure_stub from google.cloud._helpers import make_secure_stub from google.cloud.bigtable._generated import bigtable_instance_admin_pb2 from google.cloud.bigtable._generated import bigtable_pb2 from google.cloud.bigtable._generated import bigtable_table_admin_pb2 -from google.cloud.bigtable._generated import operations_grpc_pb2 from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID @@ -110,15 +111,15 @@ def _make_operations_stub(client): :type client: :class:`Client` :param client: The client that will hold the stub. - :rtype: :class:`._generated.operations_grpc_pb2.OperationsStub` + :rtype: :class:`google.longrunning.operations_grpc.OperationsStub` :returns: A gRPC stub object. """ if client.emulator_host is None: return make_secure_stub(client.credentials, client.user_agent, - operations_grpc_pb2.OperationsStub, + operations_grpc.OperationsStub, OPERATIONS_API_HOST) else: - return make_insecure_stub(operations_grpc_pb2.OperationsStub, + return make_insecure_stub(operations_grpc.OperationsStub, client.emulator_host) @@ -285,7 +286,7 @@ def _instance_stub(self): def _operations_stub(self): """Getter for the gRPC stub used for the Operations API. - :rtype: :class:`._generated.operations_grpc_pb2.OperationsStub` + :rtype: :class:`google.longrunning.operations_grpc.OperationsStub` :returns: A gRPC stub object. :raises: :class:`ValueError ` if the current client is not an admin client or if it has not been diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index eb1048d35ec4..94989e244775 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -143,6 +143,8 @@ def _callFUT(self, client): return _make_operations_stub(client) def test_without_emulator(self): + from google.longrunning import operations_grpc + from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT @@ -165,12 +167,14 @@ def mock_make_secure_stub(*args): ( client.credentials, client.user_agent, - MUT.operations_grpc_pb2.OperationsStub, + operations_grpc.OperationsStub, MUT.OPERATIONS_API_HOST, ), ]) def test_with_emulator(self): + from google.longrunning import operations_grpc + from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT @@ -190,7 +194,7 @@ def mock_make_insecure_stub(*args): self.assertIs(result, fake_stub) self.assertEqual(make_insecure_stub_args, [ ( - MUT.operations_grpc_pb2.OperationsStub, + operations_grpc.OperationsStub, emulator_host, ), ]) From 7807a2a1b4b2b63d957c048490ff7ecfe06dd4e6 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 27 Sep 2016 08:53:24 -0700 Subject: [PATCH 004/892] Preparing for a release of all packages. Towards #2441. - Updating umbrella README to point at all packages - Putting upper bounds on grpcio in dependencies - Putting lower bounds on all google-cloud-* packages listed as dependencies - Adding `setup.cfg` for universal wheels --- packages/google-cloud-bigtable/setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 71b82d119b6c..6c36042e9484 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -50,13 +50,13 @@ REQUIREMENTS = [ - 'google-cloud-core', - 'grpcio >= 1.0.0', + 'google-cloud-core >= 0.20.0', + 'grpcio >= 1.0.0, < 2.0dev', ] setup( name='google-cloud-bigtable', - version='0.20.0dev', + version='0.20.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From 53b1f70fc92cbc206bf567dbd79ec9f0717df294 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 27 Sep 2016 08:57:43 -0700 Subject: [PATCH 005/892] Adding setup.cfg to all packages. --- packages/google-cloud-bigtable/setup.cfg | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 packages/google-cloud-bigtable/setup.cfg diff --git a/packages/google-cloud-bigtable/setup.cfg b/packages/google-cloud-bigtable/setup.cfg new file mode 100644 index 000000000000..2a9acf13daa9 --- /dev/null +++ b/packages/google-cloud-bigtable/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal = 1 From 53db0d96c322eb0061644569448e2ef91a4c3aee Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 27 Sep 2016 11:08:22 -0700 Subject: [PATCH 006/892] Fixing up some whitespace issues. Checked for - Tab characters - Extra newlines at the end of files - Trailing whitespace in lines - Missing newlines at the end of files - Windows line feed and only found 1 file (.json) missing a newline and 7 files (.rst) with extra newlines at the end. --- .../unit_tests/read-rows-acceptance-test.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json b/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json index 4973831f4979..84023567dd9b 100644 --- a/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json +++ b/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json @@ -1175,4 +1175,4 @@ ] } ] -} \ No newline at end of file +} From a7b2ea517c69e547542640f3eedf727051e9b1e1 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 27 Sep 2016 12:13:08 -0700 Subject: [PATCH 007/892] Updating Bigtable and Datastore autogenerate makefiles. In the process, also updating a hardcoded path in make_datastore_grpc. Also, re-running `make generate` in each of these introduced small changes to the autogenerated bigtable modules. --- .../cloud/bigtable/_generated/bigtable_instance_admin_pb2.py | 2 +- .../google/cloud/bigtable/_generated/bigtable_pb2.py | 2 +- .../cloud/bigtable/_generated/bigtable_table_admin_pb2.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py index e24d4b26ae26..00e64c8a6976 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py @@ -668,7 +668,7 @@ DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True _CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) - +import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py index 04b269d72bf3..8701efd166dd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py @@ -804,7 +804,7 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) - +import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py index 7368c0bf4e59..9cafeed3e426 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py @@ -504,7 +504,7 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) - +import grpc from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality From 10450772213bba86e619d8e8fccc89a50cc84436 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 29 Sep 2016 16:17:02 -0700 Subject: [PATCH 008/892] General clean-up after rename. - Removing "graft google" from MANIFEST for umbrella package. It isn't needed since the umbrella package has no source - Updating license year on copy-pasted namespace package __init__.py files. Done via: https://gist.github.com/dhermes/a0e88f891ffffc3ecea5c9bb2f13e4f5 - Removing unused HTML context from docs/conf.py - Setting GH_OWNER AND GH_PROJECT_NAME (which together make the REPO_SLUG) manually in the docs update scripts. This way the env. variables don't need to be set in the Travis UI / CLI. Also updating tox.ini to stop passing those variables through - Removing the root package from `verify_included_modules.py` since it no longer has any source - Updated a docstring reference to a moved class in the Bigtable system test - Removing redundant `GOOGLE_CLOUD_*` in `tox` system test `passenv` (already covered by `GOOGLE_*`) --- packages/google-cloud-bigtable/google/cloud/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py index 8ac7b74af136..b2b833373882 100644 --- a/packages/google-cloud-bigtable/google/cloud/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014 Google Inc. +# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 09721a3d1ea0d1f448d4868b16b26d268180573b Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 3 Oct 2016 21:32:48 -0700 Subject: [PATCH 009/892] Updating package README's with more useful doc links. Also removing duplicate "Homepage" links (duplicate of "API Documentation" links). --- packages/google-cloud-bigtable/README.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 619a0e15663d..9e3b08ca67bb 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -5,11 +5,9 @@ Python Client for Google Cloud Bigtable .. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ -- `Homepage`_ -- `API Documentation`_ +- `Documentation`_ -.. _Homepage: https://googlecloudplatform.github.io/google-cloud-python/ -.. _API Documentation: http://googlecloudplatform.github.io/google-cloud-python/ +.. _Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/bigtable-usage.html Quick Start ----------- From 329170b9e3c8221829f5f0a977f1e4465c021225 Mon Sep 17 00:00:00 2001 From: Tim Swast Date: Thu, 20 Oct 2016 15:50:55 -0700 Subject: [PATCH 010/892] Replace types string with str. Uses command: ag -l 'type ([^:]+): string' | \ xargs gsed -r -i.bak -e 's/type ([^:]+): string/type \1: str/g' Note: [-r for gsed (GNU sed) is needed for group matching](http://superuser.com/a/336819/125262). --- packages/google-cloud-bigtable/google/cloud/bigtable/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index cb5c71f252e1..e6d8173f8b39 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -317,7 +317,7 @@ def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, :type instance_id: str :param instance_id: The ID of the instance. - :type location: string + :type location: str :param location: location name, in form ``projects//locations/``; used to set up the instance's cluster. From 16d10f02d8f61f2e4cc1d2a58378e9acf3b7f49e Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 20 Oct 2016 17:04:00 -0700 Subject: [PATCH 011/892] Replace :: with `.. code-block:: console`. Towards #2404. --- packages/google-cloud-bigtable/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 9e3b08ca67bb..859a9ea536bd 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -12,7 +12,7 @@ Python Client for Google Cloud Bigtable Quick Start ----------- -:: +.. code-block:: console $ pip install --upgrade google-cloud-bigtable From 9aaa2bec70986403670b0e0146df2392d70d92e1 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 28 Oct 2016 14:47:01 -0700 Subject: [PATCH 012/892] Re-factoring Operation base class. This is in preparation to support JSON/HTTP operations as well and also to ensure that **all** of the operation PB is parsed when polling. --- .../google/cloud/bigtable/cluster.py | 8 ++++---- .../google/cloud/bigtable/instance.py | 8 +++++--- .../google-cloud-bigtable/unit_tests/test_cluster.py | 12 +++++++----- .../unit_tests/test_instance.py | 7 ++++--- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index e22f383bed95..48b335c5196e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -23,7 +23,7 @@ bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud.operation import Operation from google.cloud.operation import _compute_type_url -from google.cloud.operation import _register_type_url +from google.cloud.operation import register_type_url _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' @@ -36,7 +36,7 @@ _UPDATE_CLUSTER_METADATA_URL = _compute_type_url( messages_v2_pb2.UpdateClusterMetadata) -_register_type_url( +register_type_url( _UPDATE_CLUSTER_METADATA_URL, messages_v2_pb2.UpdateClusterMetadata) @@ -218,7 +218,7 @@ def create(self): operation = Operation.from_pb(operation_pb, client) operation.target = self - operation.metadata['request_type'] = 'CreateCluster' + operation.caller_metadata['request_type'] = 'CreateCluster' return operation def update(self): @@ -249,7 +249,7 @@ def update(self): operation = Operation.from_pb(operation_pb, client) operation.target = self - operation.metadata['request_type'] = 'UpdateCluster' + operation.caller_metadata['request_type'] = 'UpdateCluster' return operation def delete(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 8730836622d0..afa4066a75b0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -28,7 +28,7 @@ from google.cloud.bigtable.table import Table from google.cloud.operation import Operation from google.cloud.operation import _compute_type_url -from google.cloud.operation import _register_type_url +from google.cloud.operation import register_type_url _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' @@ -38,8 +38,10 @@ _CREATE_INSTANCE_METADATA_URL = _compute_type_url( messages_v2_pb2.CreateInstanceMetadata) -_register_type_url( +register_type_url( _CREATE_INSTANCE_METADATA_URL, messages_v2_pb2.CreateInstanceMetadata) +_INSTANCE_METADATA_URL = _compute_type_url(data_v2_pb2.Instance) +register_type_url(_INSTANCE_METADATA_URL, data_v2_pb2.Instance) def _prepare_create_request(instance): @@ -237,7 +239,7 @@ def create(self): operation = Operation.from_pb(operation_pb, self._client) operation.target = self - operation.metadata['request_type'] = 'CreateInstance' + operation.caller_metadata['request_type'] = 'CreateInstance' return operation def update(self): diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index e497a025fd2c..82185cef030f 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -257,8 +257,9 @@ def test_create(self): self.assertEqual(result.name, OP_NAME) self.assertIs(result.target, cluster) self.assertIs(result.client, client) - self.assertIsNone(result.pb_metadata) - self.assertEqual(result.metadata, {'request_type': 'CreateCluster'}) + self.assertIsNone(result.metadata) + self.assertEqual(result.caller_metadata, + {'request_type': 'CreateCluster'}) self.assertEqual(len(stub.method_calls), 1) api_name, args, kwargs = stub.method_calls[0] @@ -323,10 +324,11 @@ def test_update(self): self.assertEqual(result.name, OP_NAME) self.assertIs(result.target, cluster) self.assertIs(result.client, client) - self.assertIsInstance(result.pb_metadata, + self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) - self.assertEqual(result.pb_metadata.request_time, NOW_PB) - self.assertEqual(result.metadata, {'request_type': 'UpdateCluster'}) + self.assertEqual(result.metadata.request_time, NOW_PB) + self.assertEqual(result.caller_metadata, + {'request_type': 'UpdateCluster'}) self.assertEqual(len(stub.method_calls), 1) api_name, args, kwargs = stub.method_calls[0] diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index bf47ab4f62a3..223cfd2033ff 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -265,10 +265,11 @@ def test_create(self): self.assertEqual(result.name, self.OP_NAME) self.assertIs(result.target, instance) self.assertIs(result.client, client) - self.assertIsInstance(result.pb_metadata, + self.assertIsInstance(result.metadata, messages_v2_pb2.CreateInstanceMetadata) - self.assertEqual(result.pb_metadata.request_time, NOW_PB) - self.assertEqual(result.metadata, {'request_type': 'CreateInstance'}) + self.assertEqual(result.metadata.request_time, NOW_PB) + self.assertEqual(result.caller_metadata, + {'request_type': 'CreateInstance'}) self.assertEqual(len(stub.method_calls), 1) api_name, args, kwargs = stub.method_calls[0] From 58b844043db32ce5ec70dee6406acc9ca3e5215b Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 28 Oct 2016 14:57:16 -0700 Subject: [PATCH 013/892] Make type_url optional when registering types. Also renaming register_type_url to register_type. --- .../google/cloud/bigtable/cluster.py | 8 ++------ .../google/cloud/bigtable/instance.py | 11 +++-------- .../google-cloud-bigtable/unit_tests/test_cluster.py | 5 +++-- .../google-cloud-bigtable/unit_tests/test_instance.py | 6 +++--- 4 files changed, 11 insertions(+), 19 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 48b335c5196e..c2418576dde9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -22,8 +22,7 @@ from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud.operation import Operation -from google.cloud.operation import _compute_type_url -from google.cloud.operation import register_type_url +from google.cloud.operation import register_type _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' @@ -34,10 +33,7 @@ """Default number of nodes to use when creating a cluster.""" -_UPDATE_CLUSTER_METADATA_URL = _compute_type_url( - messages_v2_pb2.UpdateClusterMetadata) -register_type_url( - _UPDATE_CLUSTER_METADATA_URL, messages_v2_pb2.UpdateClusterMetadata) +register_type(messages_v2_pb2.UpdateClusterMetadata) def _prepare_create_request(cluster): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index afa4066a75b0..41dda563c843 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -27,8 +27,7 @@ from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.table import Table from google.cloud.operation import Operation -from google.cloud.operation import _compute_type_url -from google.cloud.operation import register_type_url +from google.cloud.operation import register_type _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' @@ -36,12 +35,8 @@ r'instances/(?P[a-z][-a-z0-9]*)$') -_CREATE_INSTANCE_METADATA_URL = _compute_type_url( - messages_v2_pb2.CreateInstanceMetadata) -register_type_url( - _CREATE_INSTANCE_METADATA_URL, messages_v2_pb2.CreateInstanceMetadata) -_INSTANCE_METADATA_URL = _compute_type_url(data_v2_pb2.Instance) -register_type_url(_INSTANCE_METADATA_URL, data_v2_pb2.Instance) +register_type(messages_v2_pb2.CreateInstanceMetadata) +register_type(data_v2_pb2.Instance) def _prepare_create_request(instance): diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index 82185cef030f..d55510ec5147 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -283,7 +283,6 @@ def test_update(self): from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from unit_tests._testing import _FakeStub - from google.cloud.bigtable.cluster import _UPDATE_CLUSTER_METADATA_URL NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -307,10 +306,12 @@ def test_update(self): 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) + type_url = 'type.googleapis.com/%s' % ( + messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name,) response_pb = operations_pb2.Operation( name=OP_NAME, metadata=Any( - type_url=_UPDATE_CLUSTER_METADATA_URL, + type_url=type_url, value=metadata.SerializeToString() ) ) diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index 223cfd2033ff..73d1cce7ff82 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -236,8 +236,6 @@ def test_create(self): from unit_tests._testing import _FakeStub from google.cloud.operation import Operation from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES - from google.cloud.bigtable.instance import ( - _CREATE_INSTANCE_METADATA_URL) NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -247,10 +245,12 @@ def test_create(self): # Create response_pb metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + type_url = 'type.googleapis.com/%s' % ( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( - type_url=_CREATE_INSTANCE_METADATA_URL, + type_url=type_url, value=metadata.SerializeToString(), ) ) From 37ae89de0e1b6b8bc35e98f4d72aeb885e13bdd2 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 4 Nov 2016 10:12:12 -0700 Subject: [PATCH 014/892] Adding PyPI badges to package READMEs. --- packages/google-cloud-bigtable/README.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 859a9ea536bd..51a0f25c7b49 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -5,6 +5,8 @@ Python Client for Google Cloud Bigtable .. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ +|pypi| |versions| + - `Documentation`_ .. _Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/bigtable-usage.html @@ -40,3 +42,8 @@ See the ``google-cloud-python`` API `Bigtable documentation`_ to learn how to manage your data in Bigtable tables. .. _Bigtable documentation: https://google-cloud-python.readthedocs.io/en/stable/bigtable-usage.html + +.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg + :target: https://pypi.python.org/pypi/google-cloud-bigtable +.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg + :target: https://pypi.python.org/pypi/google-cloud-bigtable From b0bb1f9ce2d9f00b9bb5b36dbf02e37d73285cb1 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 7 Nov 2016 21:26:07 -0800 Subject: [PATCH 015/892] Avoiding using filesystem deps in package tox.ini configs. --- packages/google-cloud-bigtable/tox.ini | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tox.ini b/packages/google-cloud-bigtable/tox.ini index b5b080543a4c..f6336268a816 100644 --- a/packages/google-cloud-bigtable/tox.ini +++ b/packages/google-cloud-bigtable/tox.ini @@ -3,8 +3,9 @@ envlist = py27,py34,py35,cover [testing] +localdeps = + pip install --upgrade {toxinidir}/../core deps = - {toxinidir}/../core pytest covercmd = py.test --quiet \ @@ -15,6 +16,7 @@ covercmd = [testenv] commands = + {[testing]localdeps} py.test --quiet {posargs} unit_tests deps = {[testing]deps} @@ -23,6 +25,7 @@ deps = basepython = python2.7 commands = + {[testing]localdeps} {[testing]covercmd} deps = {[testenv]deps} From ffcf0e0d64b27cc73e357d15ca24ef88e6f14c31 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 8 Nov 2016 20:20:59 -0800 Subject: [PATCH 016/892] Renaming _getTargetClass to _get_target_class. Done via: $ git grep -l 'def _getTargetClass(self)' | \ > xargs sed -i s/'def _getTargetClass(self)'/'@staticmethod\n def _get_target_class()'/g --- .../unit_tests/test_client.py | 3 +- .../unit_tests/test_cluster.py | 3 +- .../unit_tests/test_column_family.py | 15 ++-- .../unit_tests/test_instance.py | 3 +- .../unit_tests/test_row.py | 12 ++-- .../unit_tests/test_row_data.py | 12 ++-- .../unit_tests/test_row_filters.py | 72 ++++++++++++------- .../unit_tests/test_table.py | 3 +- 8 files changed, 82 insertions(+), 41 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index 94989e244775..d8a554139062 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -267,7 +267,8 @@ class TestClient(unittest.TestCase): DISPLAY_NAME = 'display-name' USER_AGENT = 'you-sir-age-int' - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.client import Client return Client diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index d55510ec5147..f08eccf3604b 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -25,7 +25,8 @@ class TestCluster(unittest.TestCase): '/instances/' + INSTANCE_ID + '/clusters/' + CLUSTER_ID) - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.cluster import Cluster return Cluster diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index dee8db4e169b..03dc6cfbd2cf 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -85,7 +85,8 @@ def test_it(self): class TestMaxVersionsGCRule(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.column_family import MaxVersionsGCRule return MaxVersionsGCRule @@ -118,7 +119,8 @@ def test_to_pb(self): class TestMaxAgeGCRule(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.column_family import MaxAgeGCRule return MaxAgeGCRule @@ -157,7 +159,8 @@ def test_to_pb(self): class TestGCRuleUnion(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleUnion return GCRuleUnion @@ -243,7 +246,8 @@ def test_to_pb_nested(self): class TestGCRuleIntersection(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleIntersection return GCRuleIntersection @@ -332,7 +336,8 @@ def test_to_pb_nested(self): class TestColumnFamily(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.column_family import ColumnFamily return ColumnFamily diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index 73d1cce7ff82..b9e8ca53ef69 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -30,7 +30,8 @@ class TestInstance(unittest.TestCase): TABLE_ID = 'table_id' TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.instance import Instance return Instance diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/unit_tests/test_row.py index d40d6b64b720..24656cbc16ca 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row.py @@ -18,7 +18,8 @@ class Test_SetDeleteRow(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row import _SetDeleteRow return _SetDeleteRow @@ -33,7 +34,8 @@ def test__get_mutations_virtual(self): class TestDirectRow(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row import DirectRow return DirectRow @@ -376,7 +378,8 @@ def test_commit_no_mutations(self): class TestConditionalRow(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row import ConditionalRow return ConditionalRow @@ -517,7 +520,8 @@ def test_commit_no_mutations(self): class TestAppendRow(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row import AppendRow return AppendRow diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index daa823aeee1b..df04a08ddf27 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -18,7 +18,8 @@ class TestCell(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_data import Cell return Cell @@ -93,7 +94,8 @@ def test___ne__(self): class TestPartialRowData(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowData return PartialRowData @@ -184,7 +186,8 @@ def test_row_key_getter(self): class TestPartialRowsData(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowsData return PartialRowsData @@ -426,7 +429,8 @@ class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): _json_tests = None - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowsData return PartialRowsData diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py index cb63856ac981..7a166a941e02 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py @@ -18,7 +18,8 @@ class Test_BoolFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import _BoolFilter return _BoolFilter @@ -52,7 +53,8 @@ def test___ne__same_value(self): class TestSinkFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import SinkFilter return SinkFilter @@ -69,7 +71,8 @@ def test_to_pb(self): class TestPassAllFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import PassAllFilter return PassAllFilter @@ -86,7 +89,8 @@ def test_to_pb(self): class TestBlockAllFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import BlockAllFilter return BlockAllFilter @@ -103,7 +107,8 @@ def test_to_pb(self): class Test_RegexFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import _RegexFilter return _RegexFilter @@ -142,7 +147,8 @@ def test___ne__same_value(self): class TestRowKeyRegexFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import RowKeyRegexFilter return RowKeyRegexFilter @@ -159,7 +165,8 @@ def test_to_pb(self): class TestRowSampleFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import RowSampleFilter return RowSampleFilter @@ -193,7 +200,8 @@ def test_to_pb(self): class TestFamilyNameRegexFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import FamilyNameRegexFilter return FamilyNameRegexFilter @@ -210,7 +218,8 @@ def test_to_pb(self): class TestColumnQualifierRegexFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import ( ColumnQualifierRegexFilter) return ColumnQualifierRegexFilter @@ -229,7 +238,8 @@ def test_to_pb(self): class TestTimestampRange(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRange return TimestampRange @@ -303,7 +313,8 @@ def test_to_pb_end_only(self): class TestTimestampRangeFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRangeFilter return TimestampRangeFilter @@ -340,7 +351,8 @@ def test_to_pb(self): class TestColumnRangeFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import ColumnRangeFilter return ColumnRangeFilter @@ -464,7 +476,8 @@ def test_to_pb_exclusive_end(self): class TestValueRegexFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRegexFilter return ValueRegexFilter @@ -481,7 +494,8 @@ def test_to_pb(self): class TestValueRangeFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRangeFilter return ValueRangeFilter @@ -572,7 +586,8 @@ def test_to_pb_exclusive_end(self): class Test_CellCountFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import _CellCountFilter return _CellCountFilter @@ -606,7 +621,8 @@ def test___ne__same_value(self): class TestCellsRowOffsetFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowOffsetFilter return CellsRowOffsetFilter @@ -624,7 +640,8 @@ def test_to_pb(self): class TestCellsRowLimitFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowLimitFilter return CellsRowLimitFilter @@ -642,7 +659,8 @@ def test_to_pb(self): class TestCellsColumnLimitFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import CellsColumnLimitFilter return CellsColumnLimitFilter @@ -660,7 +678,8 @@ def test_to_pb(self): class TestStripValueTransformerFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import ( StripValueTransformerFilter) return StripValueTransformerFilter @@ -678,7 +697,8 @@ def test_to_pb(self): class TestApplyLabelFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import ApplyLabelFilter return ApplyLabelFilter @@ -712,7 +732,8 @@ def test_to_pb(self): class Test_FilterCombination(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import _FilterCombination return _FilterCombination @@ -743,7 +764,8 @@ def test___eq__type_differ(self): class TestRowFilterChain(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterChain return RowFilterChain @@ -799,7 +821,8 @@ def test_to_pb_nested(self): class TestRowFilterUnion(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterUnion return RowFilterUnion @@ -855,7 +878,8 @@ def test_to_pb_nested(self): class TestConditionalRowFilter(unittest.TestCase): - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.row_filters import ConditionalRowFilter return ConditionalRowFilter diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py index 7dca91768861..a1ea086a6b96 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -29,7 +29,8 @@ class TestTable(unittest.TestCase): TIMESTAMP_MICROS = 100 VALUE = b'value' - def _getTargetClass(self): + @staticmethod + def _get_target_class(): from google.cloud.bigtable.table import Table return Table From 0dafa7066d8aa8346cb4569ffa30f02b5b22541e Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 8 Nov 2016 20:22:12 -0800 Subject: [PATCH 017/892] Changing uses of _getTargetClass to _get_target_class. Done via: $ git grep -l _getTargetClass | \ > xargs sed -i s/_getTargetClass/_get_target_class/g --- .../unit_tests/test_client.py | 2 +- .../unit_tests/test_cluster.py | 10 ++-- .../unit_tests/test_column_family.py | 10 ++-- .../unit_tests/test_instance.py | 8 ++-- .../unit_tests/test_row.py | 12 ++--- .../unit_tests/test_row_data.py | 12 ++--- .../unit_tests/test_row_filters.py | 48 +++++++++---------- .../unit_tests/test_table.py | 2 +- 8 files changed, 52 insertions(+), 52 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index d8a554139062..9eea7e13a840 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -273,7 +273,7 @@ def _get_target_class(): return Client def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def _makeOneWithMocks(self, *args, **kwargs): from google.cloud._testing import _Monkey diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index f08eccf3604b..337ea0abbba5 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -31,7 +31,7 @@ def _get_target_class(): return Cluster def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES @@ -108,7 +108,7 @@ def test_from_pb_success(self): serve_nodes=SERVE_NODES, ) - klass = self._getTargetClass() + klass = self._get_target_class() cluster = klass.from_pb(cluster_pb, instance) self.assertIsInstance(cluster, klass) self.assertIs(cluster._instance, instance) @@ -121,7 +121,7 @@ def test_from_pb_bad_cluster_name(self): instance = _Instance(self.INSTANCE_ID, client) cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME) - klass = self._getTargetClass() + klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(cluster_pb, instance) @@ -134,7 +134,7 @@ def test_from_pb_project_mistmatch(self): cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) - klass = self._getTargetClass() + klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(cluster_pb, instance) @@ -147,7 +147,7 @@ def test_from_pb_instance_mistmatch(self): cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) - klass = self._getTargetClass() + klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(cluster_pb, instance) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index 03dc6cfbd2cf..1202194d54e2 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -91,7 +91,7 @@ def _get_target_class(): return MaxVersionsGCRule def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test___eq__type_differ(self): gc_rule1 = self._makeOne(10) @@ -125,7 +125,7 @@ def _get_target_class(): return MaxAgeGCRule def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test___eq__type_differ(self): max_age = object() @@ -165,7 +165,7 @@ def _get_target_class(): return GCRuleUnion def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): rules = object() @@ -252,7 +252,7 @@ def _get_target_class(): return GCRuleIntersection def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): rules = object() @@ -342,7 +342,7 @@ def _get_target_class(): return ColumnFamily def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): column_family_id = u'column-family-id' diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index b9e8ca53ef69..3e93a064d7aa 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -36,7 +36,7 @@ def _get_target_class(): return Instance def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES @@ -122,7 +122,7 @@ def test_from_pb_success(self): display_name=self.INSTANCE_ID, ) - klass = self._getTargetClass() + klass = self._get_target_class() instance = klass.from_pb(instance_pb, client) self.assertIsInstance(instance, klass) self.assertEqual(instance._client, client) @@ -137,7 +137,7 @@ def test_from_pb_bad_instance_name(self): instance_name = 'INCORRECT_FORMAT' instance_pb = data_v2_pb2.Instance(name=instance_name) - klass = self._getTargetClass() + klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(instance_pb, None) @@ -152,7 +152,7 @@ def test_from_pb_project_mistmatch(self): instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) - klass = self._getTargetClass() + klass = self._get_target_class() with self.assertRaises(ValueError): klass.from_pb(instance_pb, client) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/unit_tests/test_row.py index 24656cbc16ca..5caf9bf92c01 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row.py @@ -24,7 +24,7 @@ def _get_target_class(): return _SetDeleteRow def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test__get_mutations_virtual(self): row = self._makeOne(b'row-key', None) @@ -40,7 +40,7 @@ def _get_target_class(): return DirectRow def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): row_key = b'row_key' @@ -145,7 +145,7 @@ def test_delete(self): self.assertEqual(row._pb_mutations, [expected_pb]) def test_delete_cell(self): - klass = self._getTargetClass() + klass = self._get_target_class() class MockRow(klass): @@ -196,7 +196,7 @@ def test_delete_cells_all_columns(self): table = object() row = self._makeOne(row_key, table) - klass = self._getTargetClass() + klass = self._get_target_class() self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, klass.ALL_COLUMNS) @@ -384,7 +384,7 @@ def _get_target_class(): return ConditionalRow def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): row_key = b'row_key' @@ -526,7 +526,7 @@ def _get_target_class(): return AppendRow def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): row_key = b'row_key' diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index df04a08ddf27..b3985c8a4f8a 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -24,7 +24,7 @@ def _get_target_class(): return Cell def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def _from_pb_test_helper(self, labels=None): import datetime @@ -45,7 +45,7 @@ def _from_pb_test_helper(self, labels=None): value=value, timestamp_micros=timestamp_micros, labels=labels) cell_expected = self._makeOne(value, timestamp, labels=labels) - klass = self._getTargetClass() + klass = self._get_target_class() result = klass.from_pb(cell_pb) self.assertEqual(result, cell_expected) @@ -100,7 +100,7 @@ def _get_target_class(): return PartialRowData def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): row_key = object() @@ -192,7 +192,7 @@ def _get_target_class(): return PartialRowsData def _getDoNothingClass(self): - klass = self._getTargetClass() + klass = self._get_target_class() class FakePartialRowsData(klass): @@ -208,7 +208,7 @@ def consume_next(self): return FakePartialRowsData def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): response_iterator = object() @@ -435,7 +435,7 @@ def _get_target_class(): return PartialRowsData def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def _load_json_test(self, test_name): import os diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py index 7a166a941e02..560bd3316025 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py @@ -24,7 +24,7 @@ def _get_target_class(): return _BoolFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): flag = object() @@ -59,7 +59,7 @@ def _get_target_class(): return SinkFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True @@ -77,7 +77,7 @@ def _get_target_class(): return PassAllFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True @@ -95,7 +95,7 @@ def _get_target_class(): return BlockAllFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True @@ -113,7 +113,7 @@ def _get_target_class(): return _RegexFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): regex = b'abc' @@ -153,7 +153,7 @@ def _get_target_class(): return RowKeyRegexFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = b'row-key-regex' @@ -171,7 +171,7 @@ def _get_target_class(): return RowSampleFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): sample = object() @@ -206,7 +206,7 @@ def _get_target_class(): return FamilyNameRegexFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = u'family-regex' @@ -225,7 +225,7 @@ def _get_target_class(): return ColumnQualifierRegexFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = b'column-regex' @@ -244,7 +244,7 @@ def _get_target_class(): return TimestampRange def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): start = object() @@ -319,7 +319,7 @@ def _get_target_class(): return TimestampRangeFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): range_ = object() @@ -357,7 +357,7 @@ def _get_target_class(): return ColumnRangeFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): column_family_id = object() @@ -482,7 +482,7 @@ def _get_target_class(): return ValueRegexFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = b'value-regex' @@ -500,7 +500,7 @@ def _get_target_class(): return ValueRangeFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): row_filter = self._makeOne() @@ -592,7 +592,7 @@ def _get_target_class(): return _CellCountFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): num_cells = object() @@ -627,7 +627,7 @@ def _get_target_class(): return CellsRowOffsetFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): num_cells = 76 @@ -646,7 +646,7 @@ def _get_target_class(): return CellsRowLimitFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): num_cells = 189 @@ -665,7 +665,7 @@ def _get_target_class(): return CellsColumnLimitFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): num_cells = 10 @@ -685,7 +685,7 @@ def _get_target_class(): return StripValueTransformerFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True @@ -703,7 +703,7 @@ def _get_target_class(): return ApplyLabelFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): label = object() @@ -738,7 +738,7 @@ def _get_target_class(): return _FilterCombination def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): row_filter = self._makeOne() @@ -770,7 +770,7 @@ def _get_target_class(): return RowFilterChain def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): from google.cloud.bigtable.row_filters import RowSampleFilter @@ -827,7 +827,7 @@ def _get_target_class(): return RowFilterUnion def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_to_pb(self): from google.cloud.bigtable.row_filters import RowSampleFilter @@ -884,7 +884,7 @@ def _get_target_class(): return ConditionalRowFilter def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): base_filter = object() diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py index a1ea086a6b96..9d2a8ce6131b 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -35,7 +35,7 @@ def _get_target_class(): return Table def _makeOne(self, *args, **kwargs): - return self._getTargetClass()(*args, **kwargs) + return self._get_target_class()(*args, **kwargs) def test_constructor(self): table_id = 'table-id' From bf7d201fecb3fece31cacd43a2395a9db43d1472 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 10 Nov 2016 11:05:35 -0800 Subject: [PATCH 018/892] Changing all instances of _makeOne to _make_one. Done via: $ git grep -l _makeOne | \ > xargs sed -i s/_makeOne/_make_one/g --- .../unit_tests/test_client.py | 32 +-- .../unit_tests/test_cluster.py | 36 +-- .../unit_tests/test_column_family.py | 98 ++++---- .../unit_tests/test_instance.py | 44 ++-- .../unit_tests/test_row.py | 66 ++--- .../unit_tests/test_row_data.py | 104 ++++---- .../unit_tests/test_row_filters.py | 236 +++++++++--------- .../unit_tests/test_table.py | 42 ++-- 8 files changed, 329 insertions(+), 329 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index 9eea7e13a840..2b13d2a61a37 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -272,10 +272,10 @@ def _get_target_class(): from google.cloud.bigtable.client import Client return Client - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) - def _makeOneWithMocks(self, *args, **kwargs): + def _make_oneWithMocks(self, *args, **kwargs): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT @@ -287,7 +287,7 @@ def _makeOneWithMocks(self, *args, **kwargs): _make_instance_stub=mock_make_instance_stub, _make_operations_stub=mock_make_operations_stub, _make_table_stub=mock_make_table_stub): - return self._makeOne(*args, **kwargs) + return self._make_one(*args, **kwargs) def _constructor_test_helper(self, expected_scopes, creds, read_only=False, admin=False, @@ -305,7 +305,7 @@ def _constructor_test_helper(self, expected_scopes, creds, _make_instance_stub=mock_make_instance_stub, _make_operations_stub=mock_make_operations_stub, _make_table_stub=mock_make_table_stub): - client = self._makeOne(project=self.PROJECT, credentials=creds, + client = self._make_one(project=self.PROJECT, credentials=creds, read_only=read_only, admin=admin, user_agent=user_agent) @@ -401,7 +401,7 @@ def _copy_test_helper(self, read_only=False, admin=False): from google.cloud.bigtable import client as MUT credentials = _Credentials('value') - client = self._makeOneWithMocks( + client = self._make_oneWithMocks( project=self.PROJECT, credentials=credentials, read_only=read_only, @@ -449,14 +449,14 @@ def test_copy_read_only(self): def test_credentials_getter(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials) self.assertIs(client.credentials, credentials) def test_project_name_property(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials) project_name = 'projects/' + project self.assertEqual(client.project_name, project_name) @@ -464,14 +464,14 @@ def test_project_name_property(self): def test_instance_stub_getter(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) self.assertIs(client._instance_stub, client._instance_stub_internal) def test_instance_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_instance_stub') @@ -479,7 +479,7 @@ def test_instance_stub_non_admin_failure(self): def test_operations_stub_getter(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) self.assertIs(client._operations_stub, client._operations_stub_internal) @@ -487,7 +487,7 @@ def test_operations_stub_getter(self): def test_operations_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_operations_stub') @@ -495,14 +495,14 @@ def test_operations_stub_non_admin_failure(self): def test_table_stub_getter(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) self.assertIs(client._table_stub, client._table_stub_internal) def test_table_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' - client = self._makeOneWithMocks(project=project, + client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_table_stub') @@ -517,7 +517,7 @@ def test_instance_factory_defaults(self): INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' credentials = _Credentials() - client = self._makeOneWithMocks(project=PROJECT, + client = self._make_oneWithMocks(project=PROJECT, credentials=credentials) instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) @@ -539,7 +539,7 @@ def test_instance_factory_w_explicit_serve_nodes(self): LOCATION_ID = 'locname' SERVE_NODES = 5 credentials = _Credentials() - client = self._makeOneWithMocks(project=PROJECT, + client = self._make_oneWithMocks(project=PROJECT, credentials=credentials) instance = client.instance( @@ -570,7 +570,7 @@ def test_list_instances(self): 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) credentials = _Credentials() - client = self._makeOneWithMocks( + client = self._make_oneWithMocks( project=self.PROJECT, credentials=credentials, admin=True, diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index 337ea0abbba5..c8f99a6011e9 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -30,7 +30,7 @@ def _get_target_class(): from google.cloud.bigtable.cluster import Cluster return Cluster - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): @@ -38,7 +38,7 @@ def test_constructor_defaults(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) @@ -48,7 +48,7 @@ def test_constructor_non_default(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance, + cluster = self._make_one(self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) @@ -59,7 +59,7 @@ def test_copy(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance, + cluster = self._make_one(self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) new_cluster = cluster.copy() @@ -80,7 +80,7 @@ def test__update_from_pb_success(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) cluster._update_from_pb(cluster_pb) self.assertEqual(cluster.serve_nodes, SERVE_NODES) @@ -92,7 +92,7 @@ def test__update_from_pb_no_serve_nodes(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) with self.assertRaises(ValueError): cluster._update_from_pb(cluster_pb) @@ -155,36 +155,36 @@ def test_name_property(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster.name, self.CLUSTER_NAME) def test___eq__(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._makeOne(self.CLUSTER_ID, instance) - cluster2 = self._makeOne(self.CLUSTER_ID, instance) + cluster1 = self._make_one(self.CLUSTER_ID, instance) + cluster2 = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster1, cluster2) def test___eq__type_differ(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._makeOne(self.CLUSTER_ID, instance) + cluster1 = self._make_one(self.CLUSTER_ID, instance) cluster2 = object() self.assertNotEqual(cluster1, cluster2) def test___ne__same_value(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._makeOne(self.CLUSTER_ID, instance) - cluster2 = self._makeOne(self.CLUSTER_ID, instance) + cluster1 = self._make_one(self.CLUSTER_ID, instance) + cluster2 = self._make_one(self.CLUSTER_ID, instance) comparison_val = (cluster1 != cluster2) self.assertFalse(comparison_val) def test___ne__(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._makeOne('cluster_id1', instance) - cluster2 = self._makeOne('cluster_id2', instance) + cluster1 = self._make_one('cluster_id1', instance) + cluster2 = self._make_one('cluster_id2', instance) self.assertNotEqual(cluster1, cluster2) def test_reload(self): @@ -195,7 +195,7 @@ def test_reload(self): LOCATION = 'LOCATION' client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance) # Create request_pb request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME) @@ -238,7 +238,7 @@ def test_create(self): SERVE_NODES = 4 client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne( + cluster = self._make_one( self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) # Create response_pb @@ -292,7 +292,7 @@ def test_update(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance, + cluster = self._make_one(self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) # Create request_pb @@ -347,7 +347,7 @@ def test_delete(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._makeOne(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance) # Create request_pb request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index 1202194d54e2..af3249fd7e3f 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -90,28 +90,28 @@ def _get_target_class(): from google.cloud.bigtable.column_family import MaxVersionsGCRule return MaxVersionsGCRule - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test___eq__type_differ(self): - gc_rule1 = self._makeOne(10) + gc_rule1 = self._make_one(10) gc_rule2 = object() self.assertNotEqual(gc_rule1, gc_rule2) def test___eq__same_value(self): - gc_rule1 = self._makeOne(2) - gc_rule2 = self._makeOne(2) + gc_rule1 = self._make_one(2) + gc_rule2 = self._make_one(2) self.assertEqual(gc_rule1, gc_rule2) def test___ne__same_value(self): - gc_rule1 = self._makeOne(99) - gc_rule2 = self._makeOne(99) + gc_rule1 = self._make_one(99) + gc_rule2 = self._make_one(99) comparison_val = (gc_rule1 != gc_rule2) self.assertFalse(comparison_val) def test_to_pb(self): max_num_versions = 1337 - gc_rule = self._makeOne(max_num_versions=max_num_versions) + gc_rule = self._make_one(max_num_versions=max_num_versions) pb_val = gc_rule.to_pb() expected = _GcRulePB(max_num_versions=max_num_versions) self.assertEqual(pb_val, expected) @@ -124,25 +124,25 @@ def _get_target_class(): from google.cloud.bigtable.column_family import MaxAgeGCRule return MaxAgeGCRule - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test___eq__type_differ(self): max_age = object() - gc_rule1 = self._makeOne(max_age=max_age) + gc_rule1 = self._make_one(max_age=max_age) gc_rule2 = object() self.assertNotEqual(gc_rule1, gc_rule2) def test___eq__same_value(self): max_age = object() - gc_rule1 = self._makeOne(max_age=max_age) - gc_rule2 = self._makeOne(max_age=max_age) + gc_rule1 = self._make_one(max_age=max_age) + gc_rule2 = self._make_one(max_age=max_age) self.assertEqual(gc_rule1, gc_rule2) def test___ne__same_value(self): max_age = object() - gc_rule1 = self._makeOne(max_age=max_age) - gc_rule2 = self._makeOne(max_age=max_age) + gc_rule1 = self._make_one(max_age=max_age) + gc_rule2 = self._make_one(max_age=max_age) comparison_val = (gc_rule1 != gc_rule2) self.assertFalse(comparison_val) @@ -152,7 +152,7 @@ def test_to_pb(self): max_age = datetime.timedelta(seconds=1) duration = duration_pb2.Duration(seconds=1) - gc_rule = self._makeOne(max_age=max_age) + gc_rule = self._make_one(max_age=max_age) pb_val = gc_rule.to_pb() self.assertEqual(pb_val, _GcRulePB(max_age=duration)) @@ -164,30 +164,30 @@ def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleUnion return GCRuleUnion - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): rules = object() - rule_union = self._makeOne(rules) + rule_union = self._make_one(rules) self.assertIs(rule_union.rules, rules) def test___eq__(self): rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) + gc_rule1 = self._make_one(rules) + gc_rule2 = self._make_one(rules) self.assertEqual(gc_rule1, gc_rule2) def test___eq__type_differ(self): rules = object() - gc_rule1 = self._makeOne(rules) + gc_rule1 = self._make_one(rules) gc_rule2 = object() self.assertNotEqual(gc_rule1, gc_rule2) def test___ne__same_value(self): rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) + gc_rule1 = self._make_one(rules) + gc_rule2 = self._make_one(rules) comparison_val = (gc_rule1 != gc_rule2) self.assertFalse(comparison_val) @@ -206,7 +206,7 @@ def test_to_pb(self): pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) - rule3 = self._makeOne(rules=[rule1, rule2]) + rule3 = self._make_one(rules=[rule1, rule2]) pb_rule3 = _GcRulePB( union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) @@ -228,7 +228,7 @@ def test_to_pb_nested(self): pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) - rule3 = self._makeOne(rules=[rule1, rule2]) + rule3 = self._make_one(rules=[rule1, rule2]) pb_rule3 = _GcRulePB( union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) @@ -236,7 +236,7 @@ def test_to_pb_nested(self): rule4 = MaxVersionsGCRule(max_num_versions2) pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) - rule5 = self._makeOne(rules=[rule3, rule4]) + rule5 = self._make_one(rules=[rule3, rule4]) pb_rule5 = _GcRulePB( union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) @@ -251,30 +251,30 @@ def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleIntersection return GCRuleIntersection - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): rules = object() - rule_intersection = self._makeOne(rules) + rule_intersection = self._make_one(rules) self.assertIs(rule_intersection.rules, rules) def test___eq__(self): rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) + gc_rule1 = self._make_one(rules) + gc_rule2 = self._make_one(rules) self.assertEqual(gc_rule1, gc_rule2) def test___eq__type_differ(self): rules = object() - gc_rule1 = self._makeOne(rules) + gc_rule1 = self._make_one(rules) gc_rule2 = object() self.assertNotEqual(gc_rule1, gc_rule2) def test___ne__same_value(self): rules = object() - gc_rule1 = self._makeOne(rules) - gc_rule2 = self._makeOne(rules) + gc_rule1 = self._make_one(rules) + gc_rule2 = self._make_one(rules) comparison_val = (gc_rule1 != gc_rule2) self.assertFalse(comparison_val) @@ -293,7 +293,7 @@ def test_to_pb(self): pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) - rule3 = self._makeOne(rules=[rule1, rule2]) + rule3 = self._make_one(rules=[rule1, rule2]) pb_rule3 = _GcRulePB( intersection=_GcRuleIntersectionPB( rules=[pb_rule1, pb_rule2])) @@ -316,7 +316,7 @@ def test_to_pb_nested(self): pb_rule2 = _GcRulePB( max_age=duration_pb2.Duration(seconds=1)) - rule3 = self._makeOne(rules=[rule1, rule2]) + rule3 = self._make_one(rules=[rule1, rule2]) pb_rule3 = _GcRulePB( intersection=_GcRuleIntersectionPB( rules=[pb_rule1, pb_rule2])) @@ -325,7 +325,7 @@ def test_to_pb_nested(self): rule4 = MaxVersionsGCRule(max_num_versions2) pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) - rule5 = self._makeOne(rules=[rule3, rule4]) + rule5 = self._make_one(rules=[rule3, rule4]) pb_rule5 = _GcRulePB( intersection=_GcRuleIntersectionPB( rules=[pb_rule3, pb_rule4])) @@ -341,14 +341,14 @@ def _get_target_class(): from google.cloud.bigtable.column_family import ColumnFamily return ColumnFamily - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): column_family_id = u'column-family-id' table = object() gc_rule = object() - column_family = self._makeOne( + column_family = self._make_one( column_family_id, table, gc_rule=gc_rule) self.assertEqual(column_family.column_family_id, column_family_id) @@ -359,7 +359,7 @@ def test_name_property(self): column_family_id = u'column-family-id' table_name = 'table_name' table = _Table(table_name) - column_family = self._makeOne(column_family_id, table) + column_family = self._make_one(column_family_id, table) expected_name = table_name + '/columnFamilies/' + column_family_id self.assertEqual(column_family.name, expected_name) @@ -368,14 +368,14 @@ def test___eq__(self): column_family_id = 'column_family_id' table = object() gc_rule = object() - column_family1 = self._makeOne(column_family_id, table, + column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) - column_family2 = self._makeOne(column_family_id, table, + column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) self.assertEqual(column_family1, column_family2) def test___eq__type_differ(self): - column_family1 = self._makeOne('column_family_id', None) + column_family1 = self._make_one('column_family_id', None) column_family2 = object() self.assertNotEqual(column_family1, column_family2) @@ -383,20 +383,20 @@ def test___ne__same_value(self): column_family_id = 'column_family_id' table = object() gc_rule = object() - column_family1 = self._makeOne(column_family_id, table, + column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) - column_family2 = self._makeOne(column_family_id, table, + column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) comparison_val = (column_family1 != column_family2) self.assertFalse(comparison_val) def test___ne__(self): - column_family1 = self._makeOne('column_family_id1', None) - column_family2 = self._makeOne('column_family_id2', None) + column_family1 = self._make_one('column_family_id1', None) + column_family2 = self._make_one('column_family_id2', None) self.assertNotEqual(column_family1, column_family2) def test_to_pb_no_rules(self): - column_family = self._makeOne('column_family_id', None) + column_family = self._make_one('column_family_id', None) pb_val = column_family.to_pb() expected = _ColumnFamilyPB() self.assertEqual(pb_val, expected) @@ -405,7 +405,7 @@ def test_to_pb_with_rule(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule gc_rule = MaxVersionsGCRule(1) - column_family = self._makeOne('column_family_id', None, + column_family = self._make_one('column_family_id', None, gc_rule=gc_rule) pb_val = column_family.to_pb() expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) @@ -426,7 +426,7 @@ def _create_test_helper(self, gc_rule=None): client = _Client() table = _Table(table_name, client=client) - column_family = self._makeOne( + column_family = self._make_one( column_family_id, table, gc_rule=gc_rule) # Create request_pb @@ -484,7 +484,7 @@ def _update_test_helper(self, gc_rule=None): client = _Client() table = _Table(table_name, client=client) - column_family = self._makeOne( + column_family = self._make_one( column_family_id, table, gc_rule=gc_rule) # Create request_pb @@ -543,7 +543,7 @@ def test_delete(self): client = _Client() table = _Table(table_name, client=client) - column_family = self._makeOne(column_family_id, table) + column_family = self._make_one(column_family_id, table) # Create request_pb request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index 3e93a064d7aa..a1dab7c6f31e 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -35,14 +35,14 @@ def _get_target_class(): from google.cloud.bigtable.instance import Instance return Instance - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES client = object() - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertIs(instance._client, client) @@ -53,7 +53,7 @@ def test_constructor_non_default(self): display_name = 'display_name' client = object() - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=display_name) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, display_name) @@ -63,7 +63,7 @@ def test_copy(self): display_name = 'display_name' client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=display_name) new_instance = instance.copy() @@ -77,7 +77,7 @@ def test_copy(self): def test_table_factory(self): from google.cloud.bigtable.table import Table - instance = self._makeOne(self.INSTANCE_ID, None, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, None, self.LOCATION_ID) table = instance.table(self.TABLE_ID) self.assertIsInstance(table, Table) @@ -93,7 +93,7 @@ def test__update_from_pb_success(self): display_name=display_name, ) - instance = self._makeOne(None, None, None, None) + instance = self._make_one(None, None, None, None) self.assertIsNone(instance.display_name) instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, display_name) @@ -103,7 +103,7 @@ def test__update_from_pb_no_display_name(self): instance_pb2 as data_v2_pb2) instance_pb = data_v2_pb2.Instance() - instance = self._makeOne(None, None, None, None) + instance = self._make_one(None, None, None, None) self.assertIsNone(instance.display_name) with self.assertRaises(ValueError): instance._update_from_pb(instance_pb) @@ -159,31 +159,31 @@ def test_from_pb_project_mistmatch(self): def test_name_property(self): client = _Client(project=self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance.name, self.INSTANCE_NAME) def test___eq__(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) - instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance1 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance1, instance2) def test___eq__type_differ(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance1 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) instance2 = object() self.assertNotEqual(instance1, instance2) def test___ne__same_value(self): client = object() - instance1 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) - instance2 = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance1 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance2 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) comparison_val = (instance1 != instance2) self.assertFalse(comparison_val) def test___ne__(self): - instance1 = self._makeOne('instance_id1', 'client1', self.LOCATION_ID) - instance2 = self._makeOne('instance_id2', 'client2', self.LOCATION_ID) + instance1 = self._make_one('instance_id1', 'client1', self.LOCATION_ID) + instance2 = self._make_one('instance_id2', 'client2', self.LOCATION_ID) self.assertNotEqual(instance1, instance2) def test_reload(self): @@ -194,7 +194,7 @@ def test_reload(self): from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) # Create request_pb request_pb = messages_v2_pb.GetInstanceRequest( @@ -241,7 +241,7 @@ def test_create(self): NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=self.DISPLAY_NAME) # Create response_pb @@ -295,7 +295,7 @@ def test_create_w_explicit_serve_nodes(self): SERVE_NODES = 5 client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, serve_nodes=SERVE_NODES) # Create response_pb @@ -331,7 +331,7 @@ def test_update(self): from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=self.DISPLAY_NAME) # Create request_pb @@ -366,7 +366,7 @@ def test_delete(self): from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) # Create request_pb request_pb = messages_v2_pb.DeleteInstanceRequest( @@ -405,7 +405,7 @@ def test_list_clusters(self): SERVE_NODES = 4 client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) @@ -456,7 +456,7 @@ def _list_tables_helper(self, table_name=None): from unit_tests._testing import _FakeStub client = _Client(self.PROJECT) - instance = self._makeOne(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) # Create request_ request_pb = table_messages_v1_pb2.ListTablesRequest( diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/unit_tests/test_row.py index 5caf9bf92c01..e8dd847ed811 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row.py @@ -23,11 +23,11 @@ def _get_target_class(): from google.cloud.bigtable.row import _SetDeleteRow return _SetDeleteRow - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test__get_mutations_virtual(self): - row = self._makeOne(b'row-key', None) + row = self._make_one(b'row-key', None) with self.assertRaises(NotImplementedError): row._get_mutations(None) @@ -39,14 +39,14 @@ def _get_target_class(): from google.cloud.bigtable.row import DirectRow return DirectRow - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): row_key = b'row_key' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._row_key, row_key) self.assertIs(row._table, table) self.assertEqual(row._pb_mutations, []) @@ -56,18 +56,18 @@ def test_constructor_with_unicode(self): row_key_bytes = b'row_key' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._row_key, row_key_bytes) self.assertIs(row._table, table) def test_constructor_with_non_bytes(self): row_key = object() with self.assertRaises(TypeError): - self._makeOne(row_key, None) + self._make_one(row_key, None) def test__get_mutations(self): row_key = b'row_key' - row = self._makeOne(row_key, None) + row = self._make_one(row_key, None) row._pb_mutations = mutations = object() self.assertIs(mutations, row._get_mutations(None)) @@ -82,7 +82,7 @@ def _set_cell_helper(self, column=None, column_bytes=None, if column is None: column = b'column' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._pb_mutations, []) row.set_cell(column_family_id, column, value, timestamp=timestamp) @@ -118,7 +118,7 @@ def test_set_cell_with_non_bytes_value(self): column_family_id = u'column_family_id' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) value = object() # Not bytes with self.assertRaises(TypeError): row.set_cell(column_family_id, column, value) @@ -135,7 +135,7 @@ def test_set_cell_with_non_null_timestamp(self): def test_delete(self): row_key = b'row_key' - row = self._makeOne(row_key, object()) + row = self._make_one(row_key, object()) self.assertEqual(row._pb_mutations, []) row.delete() @@ -185,7 +185,7 @@ def test_delete_cells_non_iterable(self): column_family_id = u'column_family_id' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) columns = object() # Not iterable with self.assertRaises(TypeError): row.delete_cells(column_family_id, columns) @@ -195,7 +195,7 @@ def test_delete_cells_all_columns(self): column_family_id = u'column_family_id' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) klass = self._get_target_class() self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, klass.ALL_COLUMNS) @@ -212,7 +212,7 @@ def test_delete_cells_no_columns(self): column_family_id = u'column_family_id' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) columns = [] self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns) @@ -224,7 +224,7 @@ def _delete_cells_helper(self, time_range=None): column_family_id = u'column_family_id' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) columns = [column] self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns, time_range=time_range) @@ -261,7 +261,7 @@ def test_delete_cells_with_bad_column(self): column_family_id = u'column_family_id' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) columns = [column, object()] self.assertEqual(row._pb_mutations, []) with self.assertRaises(TypeError): @@ -277,7 +277,7 @@ def test_delete_cells_with_string_columns(self): column2_bytes = b'column2' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) columns = [column1, column2] self.assertEqual(row._pb_mutations, []) row.delete_cells(column_family_id, columns) @@ -306,7 +306,7 @@ def test_commit(self): column = b'column' client = _Client() table = _Table(table_name, client=client) - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) # Create request_pb value = b'bytes-value' @@ -350,7 +350,7 @@ def test_commit_too_many_mutations(self): row_key = b'row_key' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) row._pb_mutations = [1, 2, 3] num_mutations = len(row._pb_mutations) with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): @@ -363,7 +363,7 @@ def test_commit_no_mutations(self): row_key = b'row_key' client = _Client() table = _Table(None, client=client) - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._pb_mutations, []) # Patch the stub used by the API method. @@ -383,7 +383,7 @@ def _get_target_class(): from google.cloud.bigtable.row import ConditionalRow return ConditionalRow - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): @@ -391,7 +391,7 @@ def test_constructor(self): table = object() filter_ = object() - row = self._makeOne(row_key, table, filter_=filter_) + row = self._make_one(row_key, table, filter_=filter_) self.assertEqual(row._row_key, row_key) self.assertIs(row._table, table) self.assertIs(row._filter, filter_) @@ -401,7 +401,7 @@ def test_constructor(self): def test__get_mutations(self): row_key = b'row_key' filter_ = object() - row = self._makeOne(row_key, None, filter_=filter_) + row = self._make_one(row_key, None, filter_=filter_) row._true_pb_mutations = true_mutations = object() row._false_pb_mutations = false_mutations = object() @@ -423,7 +423,7 @@ def test_commit(self): client = _Client() table = _Table(table_name, client=client) row_filter = RowSampleFilter(0.33) - row = self._makeOne(row_key, table, filter_=row_filter) + row = self._make_one(row_key, table, filter_=row_filter) # Create request_pb value1 = b'bytes-value' @@ -490,7 +490,7 @@ def test_commit_too_many_mutations(self): row_key = b'row_key' table = object() filter_ = object() - row = self._makeOne(row_key, table, filter_=filter_) + row = self._make_one(row_key, table, filter_=filter_) row._true_pb_mutations = [1, 2, 3] num_mutations = len(row._true_pb_mutations) with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): @@ -504,7 +504,7 @@ def test_commit_no_mutations(self): client = _Client() table = _Table(None, client=client) filter_ = object() - row = self._makeOne(row_key, table, filter_=filter_) + row = self._make_one(row_key, table, filter_=filter_) self.assertEqual(row._true_pb_mutations, []) self.assertEqual(row._false_pb_mutations, []) @@ -525,14 +525,14 @@ def _get_target_class(): from google.cloud.bigtable.row import AppendRow return AppendRow - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): row_key = b'row_key' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._row_key, row_key) self.assertIs(row._table, table) self.assertEqual(row._rule_pb_list, []) @@ -540,7 +540,7 @@ def test_constructor(self): def test_clear(self): row_key = b'row_key' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) row._rule_pb_list = [1, 2, 3] row.clear() self.assertEqual(row._rule_pb_list, []) @@ -548,7 +548,7 @@ def test_clear(self): def test_append_cell_value(self): table = object() row_key = b'row_key' - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._rule_pb_list, []) column = b'column' @@ -563,7 +563,7 @@ def test_append_cell_value(self): def test_increment_cell_value(self): table = object() row_key = b'row_key' - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._rule_pb_list, []) column = b'column' @@ -586,7 +586,7 @@ def test_commit(self): column = b'column' client = _Client() table = _Table(table_name, client=client) - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) # Create request_pb value = b'bytes-value' @@ -637,7 +637,7 @@ def test_commit_no_rules(self): row_key = b'row_key' client = _Client() table = _Table(None, client=client) - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) self.assertEqual(row._rule_pb_list, []) # Patch the stub used by the API method. @@ -655,7 +655,7 @@ def test_commit_too_many_mutations(self): row_key = b'row_key' table = object() - row = self._makeOne(row_key, table) + row = self._make_one(row_key, table) row._rule_pb_list = [1, 2, 3] num_mutations = len(row._rule_pb_list) with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index b3985c8a4f8a..8e5d72125f5d 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -23,7 +23,7 @@ def _get_target_class(): from google.cloud.bigtable.row_data import Cell return Cell - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def _from_pb_test_helper(self, labels=None): @@ -39,11 +39,11 @@ def _from_pb_test_helper(self, labels=None): if labels is None: cell_pb = data_v2_pb2.Cell( value=value, timestamp_micros=timestamp_micros) - cell_expected = self._makeOne(value, timestamp) + cell_expected = self._make_one(value, timestamp) else: cell_pb = data_v2_pb2.Cell( value=value, timestamp_micros=timestamp_micros, labels=labels) - cell_expected = self._makeOne(value, timestamp, labels=labels) + cell_expected = self._make_one(value, timestamp, labels=labels) klass = self._get_target_class() result = klass.from_pb(cell_pb) @@ -59,27 +59,27 @@ def test_from_pb_with_labels(self): def test_constructor(self): value = object() timestamp = object() - cell = self._makeOne(value, timestamp) + cell = self._make_one(value, timestamp) self.assertEqual(cell.value, value) self.assertEqual(cell.timestamp, timestamp) def test___eq__(self): value = object() timestamp = object() - cell1 = self._makeOne(value, timestamp) - cell2 = self._makeOne(value, timestamp) + cell1 = self._make_one(value, timestamp) + cell2 = self._make_one(value, timestamp) self.assertEqual(cell1, cell2) def test___eq__type_differ(self): - cell1 = self._makeOne(None, None) + cell1 = self._make_one(None, None) cell2 = object() self.assertNotEqual(cell1, cell2) def test___ne__same_value(self): value = object() timestamp = object() - cell1 = self._makeOne(value, timestamp) - cell2 = self._makeOne(value, timestamp) + cell1 = self._make_one(value, timestamp) + cell2 = self._make_one(value, timestamp) comparison_val = (cell1 != cell2) self.assertFalse(comparison_val) @@ -87,8 +87,8 @@ def test___ne__(self): value1 = 'value1' value2 = 'value2' timestamp = object() - cell1 = self._makeOne(value1, timestamp) - cell2 = self._makeOne(value2, timestamp) + cell1 = self._make_one(value1, timestamp) + cell2 = self._make_one(value2, timestamp) self.assertNotEqual(cell1, cell2) @@ -99,45 +99,45 @@ def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowData return PartialRowData - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): row_key = object() - partial_row_data = self._makeOne(row_key) + partial_row_data = self._make_one(row_key) self.assertIs(partial_row_data._row_key, row_key) self.assertEqual(partial_row_data._cells, {}) def test___eq__(self): row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data2 = self._makeOne(row_key) + partial_row_data1 = self._make_one(row_key) + partial_row_data2 = self._make_one(row_key) self.assertEqual(partial_row_data1, partial_row_data2) def test___eq__type_differ(self): - partial_row_data1 = self._makeOne(None) + partial_row_data1 = self._make_one(None) partial_row_data2 = object() self.assertNotEqual(partial_row_data1, partial_row_data2) def test___ne__same_value(self): row_key = object() - partial_row_data1 = self._makeOne(row_key) - partial_row_data2 = self._makeOne(row_key) + partial_row_data1 = self._make_one(row_key) + partial_row_data2 = self._make_one(row_key) comparison_val = (partial_row_data1 != partial_row_data2) self.assertFalse(comparison_val) def test___ne__(self): row_key1 = object() - partial_row_data1 = self._makeOne(row_key1) + partial_row_data1 = self._make_one(row_key1) row_key2 = object() - partial_row_data2 = self._makeOne(row_key2) + partial_row_data2 = self._make_one(row_key2) self.assertNotEqual(partial_row_data1, partial_row_data2) def test___ne__cells(self): row_key = object() - partial_row_data1 = self._makeOne(row_key) + partial_row_data1 = self._make_one(row_key) partial_row_data1._cells = object() - partial_row_data2 = self._makeOne(row_key) + partial_row_data2 = self._make_one(row_key) self.assertNotEqual(partial_row_data1, partial_row_data2) def test_to_dict(self): @@ -151,7 +151,7 @@ def test_to_dict(self): qual2 = b'col2' qual3 = b'col3' - partial_row_data = self._makeOne(None) + partial_row_data = self._make_one(None) partial_row_data._cells = { family_name1: { qual1: cell1, @@ -171,7 +171,7 @@ def test_to_dict(self): self.assertEqual(result, expected_result) def test_cells_property(self): - partial_row_data = self._makeOne(None) + partial_row_data = self._make_one(None) cells = {1: 2} partial_row_data._cells = cells # Make sure we get a copy, not the original. @@ -180,7 +180,7 @@ def test_cells_property(self): def test_row_key_getter(self): row_key = object() - partial_row_data = self._makeOne(row_key) + partial_row_data = self._make_one(row_key) self.assertIs(partial_row_data.row_key, row_key) @@ -207,59 +207,59 @@ def consume_next(self): return FakePartialRowsData - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): response_iterator = object() - partial_rows_data = self._makeOne(response_iterator) + partial_rows_data = self._make_one(response_iterator) self.assertIs(partial_rows_data._response_iterator, response_iterator) self.assertEqual(partial_rows_data._rows, {}) def test___eq__(self): response_iterator = object() - partial_rows_data1 = self._makeOne(response_iterator) - partial_rows_data2 = self._makeOne(response_iterator) + partial_rows_data1 = self._make_one(response_iterator) + partial_rows_data2 = self._make_one(response_iterator) self.assertEqual(partial_rows_data1, partial_rows_data2) def test___eq__type_differ(self): - partial_rows_data1 = self._makeOne(None) + partial_rows_data1 = self._make_one(None) partial_rows_data2 = object() self.assertNotEqual(partial_rows_data1, partial_rows_data2) def test___ne__same_value(self): response_iterator = object() - partial_rows_data1 = self._makeOne(response_iterator) - partial_rows_data2 = self._makeOne(response_iterator) + partial_rows_data1 = self._make_one(response_iterator) + partial_rows_data2 = self._make_one(response_iterator) comparison_val = (partial_rows_data1 != partial_rows_data2) self.assertFalse(comparison_val) def test___ne__(self): response_iterator1 = object() - partial_rows_data1 = self._makeOne(response_iterator1) + partial_rows_data1 = self._make_one(response_iterator1) response_iterator2 = object() - partial_rows_data2 = self._makeOne(response_iterator2) + partial_rows_data2 = self._make_one(response_iterator2) self.assertNotEqual(partial_rows_data1, partial_rows_data2) def test_state_start(self): - prd = self._makeOne([]) + prd = self._make_one([]) self.assertEqual(prd.state, prd.START) def test_state_new_row_w_row(self): - prd = self._makeOne([]) + prd = self._make_one([]) prd._last_scanned_row_key = '' prd._row = object() self.assertEqual(prd.state, prd.NEW_ROW) def test_rows_getter(self): - partial_rows_data = self._makeOne(None) + partial_rows_data = self._make_one(None) partial_rows_data._rows = value = object() self.assertIs(partial_rows_data.rows, value) def test_cancel(self): response_iterator = _MockCancellableIterator() - partial_rows_data = self._makeOne(response_iterator) + partial_rows_data = self._make_one(response_iterator) self.assertEqual(response_iterator.cancel_calls, 0) partial_rows_data.cancel() self.assertEqual(response_iterator.cancel_calls, 1) @@ -291,7 +291,7 @@ def test_consume_all_with_max_loops(self): list(response_iterator.iter_values), [value2, value3]) def test__copy_from_current_unset(self): - prd = self._makeOne([]) + prd = self._make_one([]) chunks = _generate_cell_chunks(['']) chunk = chunks[0] prd._copy_from_current(chunk) @@ -307,7 +307,7 @@ def test__copy_from_current_blank(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - prd = self._makeOne([]) + prd = self._make_one([]) prd._cell = _PartialCellData() chunks = _generate_cell_chunks(['']) chunk = chunks[0] @@ -324,7 +324,7 @@ def test__copy_from_current_blank(self): self.assertEqual(chunk.labels, LABELS) def test__copy_from_previous_unset(self): - prd = self._makeOne([]) + prd = self._make_one([]) cell = _PartialCellData() prd._copy_from_previous(cell) self.assertEqual(cell.row_key, '') @@ -339,7 +339,7 @@ def test__copy_from_previous_blank(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - prd = self._makeOne([]) + prd = self._make_one([]) cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, @@ -361,7 +361,7 @@ def test__copy_from_previous_filled(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - prd = self._makeOne([]) + prd = self._make_one([]) prd._previous_cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, @@ -379,7 +379,7 @@ def test__copy_from_previous_filled(self): def test__save_row_no_cell(self): ROW_KEY = 'RK' - prd = self._makeOne([]) + prd = self._make_one([]) row = prd._row = _Dummy(row_key=ROW_KEY) prd._cell = None prd._save_current_row() @@ -389,7 +389,7 @@ def test_invalid_last_scanned_row_key_on_start(self): from google.cloud.bigtable.row_data import InvalidReadRowsResponse response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) + prd = self._make_one(iterator) with self.assertRaises(InvalidReadRowsResponse): prd.consume_next() @@ -397,7 +397,7 @@ def test_valid_last_scanned_row_key_on_start(self): response = _ReadRowsResponseV2( chunks=(), last_scanned_row_key='AFTER') iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) + prd = self._make_one(iterator) prd._last_scanned_row_key = 'BEFORE' prd.consume_next() self.assertEqual(prd._last_scanned_row_key, 'AFTER') @@ -407,7 +407,7 @@ def test_invalid_empty_chunk(self): chunks = _generate_cell_chunks(['']) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) + prd = self._make_one(iterator) with self.assertRaises(InvalidChunk): prd.consume_next() @@ -420,7 +420,7 @@ def test_invalid_empty_second_chunk(self): first.qualifier.value = b'C' response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) + prd = self._make_one(iterator) with self.assertRaises(InvalidChunk): prd.consume_next() @@ -434,7 +434,7 @@ def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowsData return PartialRowsData - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def _load_json_test(self, test_name): @@ -455,7 +455,7 @@ def _fail_during_consume(self, testcase_name): chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) + prd = self._make_one(iterator) with self.assertRaises(InvalidChunk): prd.consume_next() expected_result = self._sort_flattend_cells( @@ -511,7 +511,7 @@ def _incomplete_final_row(self, testcase_name): chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) + prd = self._make_one(iterator) prd.consume_next() self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) expected_result = self._sort_flattend_cells( @@ -533,7 +533,7 @@ def _match_results(self, testcase_name, expected_result=_marker): chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._makeOne(iterator) + prd = self._make_one(iterator) prd.consume_next() flattened = self._sort_flattend_cells(_flatten_cells(prd)) if expected_result is self._marker: diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py index 560bd3316025..bb3d92edf491 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py @@ -23,30 +23,30 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import _BoolFilter return _BoolFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): flag = object() - row_filter = self._makeOne(flag) + row_filter = self._make_one(flag) self.assertIs(row_filter.flag, flag) def test___eq__type_differ(self): flag = object() - row_filter1 = self._makeOne(flag) + row_filter1 = self._make_one(flag) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test___eq__same_value(self): flag = object() - row_filter1 = self._makeOne(flag) - row_filter2 = self._makeOne(flag) + row_filter1 = self._make_one(flag) + row_filter2 = self._make_one(flag) self.assertEqual(row_filter1, row_filter2) def test___ne__same_value(self): flag = object() - row_filter1 = self._makeOne(flag) - row_filter2 = self._makeOne(flag) + row_filter1 = self._make_one(flag) + row_filter2 = self._make_one(flag) comparison_val = (row_filter1 != row_filter2) self.assertFalse(comparison_val) @@ -58,12 +58,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import SinkFilter return SinkFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True - row_filter = self._makeOne(flag) + row_filter = self._make_one(flag) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(sink=flag) self.assertEqual(pb_val, expected_pb) @@ -76,12 +76,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import PassAllFilter return PassAllFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True - row_filter = self._makeOne(flag) + row_filter = self._make_one(flag) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(pass_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -94,12 +94,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import BlockAllFilter return BlockAllFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True - row_filter = self._makeOne(flag) + row_filter = self._make_one(flag) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(block_all_filter=flag) self.assertEqual(pb_val, expected_pb) @@ -112,35 +112,35 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import _RegexFilter return _RegexFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): regex = b'abc' - row_filter = self._makeOne(regex) + row_filter = self._make_one(regex) self.assertIs(row_filter.regex, regex) def test_constructor_non_bytes(self): regex = u'abc' - row_filter = self._makeOne(regex) + row_filter = self._make_one(regex) self.assertEqual(row_filter.regex, b'abc') def test___eq__type_differ(self): regex = b'def-rgx' - row_filter1 = self._makeOne(regex) + row_filter1 = self._make_one(regex) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test___eq__same_value(self): regex = b'trex-regex' - row_filter1 = self._makeOne(regex) - row_filter2 = self._makeOne(regex) + row_filter1 = self._make_one(regex) + row_filter2 = self._make_one(regex) self.assertEqual(row_filter1, row_filter2) def test___ne__same_value(self): regex = b'abc' - row_filter1 = self._makeOne(regex) - row_filter2 = self._makeOne(regex) + row_filter1 = self._make_one(regex) + row_filter2 = self._make_one(regex) comparison_val = (row_filter1 != row_filter2) self.assertFalse(comparison_val) @@ -152,12 +152,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import RowKeyRegexFilter return RowKeyRegexFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = b'row-key-regex' - row_filter = self._makeOne(regex) + row_filter = self._make_one(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(row_key_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -170,29 +170,29 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import RowSampleFilter return RowSampleFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): sample = object() - row_filter = self._makeOne(sample) + row_filter = self._make_one(sample) self.assertIs(row_filter.sample, sample) def test___eq__type_differ(self): sample = object() - row_filter1 = self._makeOne(sample) + row_filter1 = self._make_one(sample) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test___eq__same_value(self): sample = object() - row_filter1 = self._makeOne(sample) - row_filter2 = self._makeOne(sample) + row_filter1 = self._make_one(sample) + row_filter2 = self._make_one(sample) self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): sample = 0.25 - row_filter = self._makeOne(sample) + row_filter = self._make_one(sample) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(row_sample_filter=sample) self.assertEqual(pb_val, expected_pb) @@ -205,12 +205,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import FamilyNameRegexFilter return FamilyNameRegexFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = u'family-regex' - row_filter = self._makeOne(regex) + row_filter = self._make_one(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(family_name_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -224,12 +224,12 @@ def _get_target_class(): ColumnQualifierRegexFilter) return ColumnQualifierRegexFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = b'column-regex' - row_filter = self._makeOne(regex) + row_filter = self._make_one(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB( column_qualifier_regex_filter=regex) @@ -243,35 +243,35 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRange return TimestampRange - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): start = object() end = object() - time_range = self._makeOne(start=start, end=end) + time_range = self._make_one(start=start, end=end) self.assertIs(time_range.start, start) self.assertIs(time_range.end, end) def test___eq__(self): start = object() end = object() - time_range1 = self._makeOne(start=start, end=end) - time_range2 = self._makeOne(start=start, end=end) + time_range1 = self._make_one(start=start, end=end) + time_range2 = self._make_one(start=start, end=end) self.assertEqual(time_range1, time_range2) def test___eq__type_differ(self): start = object() end = object() - time_range1 = self._makeOne(start=start, end=end) + time_range1 = self._make_one(start=start, end=end) time_range2 = object() self.assertNotEqual(time_range1, time_range2) def test___ne__same_value(self): start = object() end = object() - time_range1 = self._makeOne(start=start, end=end) - time_range2 = self._makeOne(start=start, end=end) + time_range1 = self._make_one(start=start, end=end) + time_range2 = self._make_one(start=start, end=end) comparison_val = (time_range1 != time_range2) self.assertFalse(comparison_val) @@ -288,7 +288,7 @@ def _to_pb_helper(self, start_micros=None, end_micros=None): if end_micros is not None: end = _EPOCH + datetime.timedelta(microseconds=end_micros) pb_kwargs['end_timestamp_micros'] = end_micros - time_range = self._makeOne(start=start, end=end) + time_range = self._make_one(start=start, end=end) expected_pb = _TimestampRangePB(**pb_kwargs) self.assertEqual(time_range.to_pb(), expected_pb) @@ -318,31 +318,31 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRangeFilter return TimestampRangeFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): range_ = object() - row_filter = self._makeOne(range_) + row_filter = self._make_one(range_) self.assertIs(row_filter.range_, range_) def test___eq__type_differ(self): range_ = object() - row_filter1 = self._makeOne(range_) + row_filter1 = self._make_one(range_) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test___eq__same_value(self): range_ = object() - row_filter1 = self._makeOne(range_) - row_filter2 = self._makeOne(range_) + row_filter1 = self._make_one(range_) + row_filter2 = self._make_one(range_) self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): from google.cloud.bigtable.row_filters import TimestampRange range_ = TimestampRange() - row_filter = self._makeOne(range_) + row_filter = self._make_one(range_) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB( timestamp_range_filter=_TimestampRangePB()) @@ -356,12 +356,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import ColumnRangeFilter return ColumnRangeFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): column_family_id = object() - row_filter = self._makeOne(column_family_id) + row_filter = self._make_one(column_family_id) self.assertIs(row_filter.column_family_id, column_family_id) self.assertIsNone(row_filter.start_column) self.assertIsNone(row_filter.end_column) @@ -374,7 +374,7 @@ def test_constructor_explicit(self): end_column = object() inclusive_start = object() inclusive_end = object() - row_filter = self._makeOne( + row_filter = self._make_one( column_family_id, start_column=start_column, end_column=end_column, @@ -388,12 +388,12 @@ def test_constructor_explicit(self): def test_constructor_bad_start(self): column_family_id = object() - self.assertRaises(ValueError, self._makeOne, + self.assertRaises(ValueError, self._make_one, column_family_id, inclusive_start=True) def test_constructor_bad_end(self): column_family_id = object() - self.assertRaises(ValueError, self._makeOne, + self.assertRaises(ValueError, self._make_one, column_family_id, inclusive_end=True) def test___eq__(self): @@ -402,12 +402,12 @@ def test___eq__(self): end_column = object() inclusive_start = object() inclusive_end = object() - row_filter1 = self._makeOne(column_family_id, + row_filter1 = self._make_one(column_family_id, start_column=start_column, end_column=end_column, inclusive_start=inclusive_start, inclusive_end=inclusive_end) - row_filter2 = self._makeOne(column_family_id, + row_filter2 = self._make_one(column_family_id, start_column=start_column, end_column=end_column, inclusive_start=inclusive_start, @@ -416,13 +416,13 @@ def test___eq__(self): def test___eq__type_differ(self): column_family_id = object() - row_filter1 = self._makeOne(column_family_id) + row_filter1 = self._make_one(column_family_id) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): column_family_id = u'column-family-id' - row_filter = self._makeOne(column_family_id) + row_filter = self._make_one(column_family_id) col_range_pb = _ColumnRangePB(family_name=column_family_id) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -430,7 +430,7 @@ def test_to_pb(self): def test_to_pb_inclusive_start(self): column_family_id = u'column-family-id' column = b'column' - row_filter = self._makeOne(column_family_id, start_column=column) + row_filter = self._make_one(column_family_id, start_column=column) col_range_pb = _ColumnRangePB( family_name=column_family_id, start_qualifier_closed=column, @@ -441,7 +441,7 @@ def test_to_pb_inclusive_start(self): def test_to_pb_exclusive_start(self): column_family_id = u'column-family-id' column = b'column' - row_filter = self._makeOne(column_family_id, start_column=column, + row_filter = self._make_one(column_family_id, start_column=column, inclusive_start=False) col_range_pb = _ColumnRangePB( family_name=column_family_id, @@ -453,7 +453,7 @@ def test_to_pb_exclusive_start(self): def test_to_pb_inclusive_end(self): column_family_id = u'column-family-id' column = b'column' - row_filter = self._makeOne(column_family_id, end_column=column) + row_filter = self._make_one(column_family_id, end_column=column) col_range_pb = _ColumnRangePB( family_name=column_family_id, end_qualifier_closed=column, @@ -464,7 +464,7 @@ def test_to_pb_inclusive_end(self): def test_to_pb_exclusive_end(self): column_family_id = u'column-family-id' column = b'column' - row_filter = self._makeOne(column_family_id, end_column=column, + row_filter = self._make_one(column_family_id, end_column=column, inclusive_end=False) col_range_pb = _ColumnRangePB( family_name=column_family_id, @@ -481,12 +481,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRegexFilter return ValueRegexFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): regex = b'value-regex' - row_filter = self._makeOne(regex) + row_filter = self._make_one(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(value_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -499,11 +499,11 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRangeFilter return ValueRangeFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): - row_filter = self._makeOne() + row_filter = self._make_one() self.assertIsNone(row_filter.start_value) self.assertIsNone(row_filter.end_value) self.assertTrue(row_filter.inclusive_start) @@ -514,7 +514,7 @@ def test_constructor_explicit(self): end_value = object() inclusive_start = object() inclusive_end = object() - row_filter = self._makeOne(start_value=start_value, + row_filter = self._make_one(start_value=start_value, end_value=end_value, inclusive_start=inclusive_start, inclusive_end=inclusive_end) @@ -524,61 +524,61 @@ def test_constructor_explicit(self): self.assertIs(row_filter.inclusive_end, inclusive_end) def test_constructor_bad_start(self): - self.assertRaises(ValueError, self._makeOne, inclusive_start=True) + self.assertRaises(ValueError, self._make_one, inclusive_start=True) def test_constructor_bad_end(self): - self.assertRaises(ValueError, self._makeOne, inclusive_end=True) + self.assertRaises(ValueError, self._make_one, inclusive_end=True) def test___eq__(self): start_value = object() end_value = object() inclusive_start = object() inclusive_end = object() - row_filter1 = self._makeOne(start_value=start_value, + row_filter1 = self._make_one(start_value=start_value, end_value=end_value, inclusive_start=inclusive_start, inclusive_end=inclusive_end) - row_filter2 = self._makeOne(start_value=start_value, + row_filter2 = self._make_one(start_value=start_value, end_value=end_value, inclusive_start=inclusive_start, inclusive_end=inclusive_end) self.assertEqual(row_filter1, row_filter2) def test___eq__type_differ(self): - row_filter1 = self._makeOne() + row_filter1 = self._make_one() row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - row_filter = self._makeOne() + row_filter = self._make_one() expected_pb = _RowFilterPB( value_range_filter=_ValueRangePB()) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): value = b'some-value' - row_filter = self._makeOne(start_value=value) + row_filter = self._make_one(start_value=value) val_range_pb = _ValueRangePB(start_value_closed=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): value = b'some-value' - row_filter = self._makeOne(start_value=value, inclusive_start=False) + row_filter = self._make_one(start_value=value, inclusive_start=False) val_range_pb = _ValueRangePB(start_value_open=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): value = b'some-value' - row_filter = self._makeOne(end_value=value) + row_filter = self._make_one(end_value=value) val_range_pb = _ValueRangePB(end_value_closed=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): value = b'some-value' - row_filter = self._makeOne(end_value=value, inclusive_end=False) + row_filter = self._make_one(end_value=value, inclusive_end=False) val_range_pb = _ValueRangePB(end_value_open=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) @@ -591,30 +591,30 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import _CellCountFilter return _CellCountFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): num_cells = object() - row_filter = self._makeOne(num_cells) + row_filter = self._make_one(num_cells) self.assertIs(row_filter.num_cells, num_cells) def test___eq__type_differ(self): num_cells = object() - row_filter1 = self._makeOne(num_cells) + row_filter1 = self._make_one(num_cells) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test___eq__same_value(self): num_cells = object() - row_filter1 = self._makeOne(num_cells) - row_filter2 = self._makeOne(num_cells) + row_filter1 = self._make_one(num_cells) + row_filter2 = self._make_one(num_cells) self.assertEqual(row_filter1, row_filter2) def test___ne__same_value(self): num_cells = object() - row_filter1 = self._makeOne(num_cells) - row_filter2 = self._makeOne(num_cells) + row_filter1 = self._make_one(num_cells) + row_filter2 = self._make_one(num_cells) comparison_val = (row_filter1 != row_filter2) self.assertFalse(comparison_val) @@ -626,12 +626,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowOffsetFilter return CellsRowOffsetFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): num_cells = 76 - row_filter = self._makeOne(num_cells) + row_filter = self._make_one(num_cells) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB( cells_per_row_offset_filter=num_cells) @@ -645,12 +645,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowLimitFilter return CellsRowLimitFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): num_cells = 189 - row_filter = self._makeOne(num_cells) + row_filter = self._make_one(num_cells) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB( cells_per_row_limit_filter=num_cells) @@ -664,12 +664,12 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import CellsColumnLimitFilter return CellsColumnLimitFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): num_cells = 10 - row_filter = self._makeOne(num_cells) + row_filter = self._make_one(num_cells) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB( cells_per_column_limit_filter=num_cells) @@ -684,12 +684,12 @@ def _get_target_class(): StripValueTransformerFilter) return StripValueTransformerFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): flag = True - row_filter = self._makeOne(flag) + row_filter = self._make_one(flag) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(strip_value_transformer=flag) self.assertEqual(pb_val, expected_pb) @@ -702,29 +702,29 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import ApplyLabelFilter return ApplyLabelFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): label = object() - row_filter = self._makeOne(label) + row_filter = self._make_one(label) self.assertIs(row_filter.label, label) def test___eq__type_differ(self): label = object() - row_filter1 = self._makeOne(label) + row_filter1 = self._make_one(label) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test___eq__same_value(self): label = object() - row_filter1 = self._makeOne(label) - row_filter2 = self._makeOne(label) + row_filter1 = self._make_one(label) + row_filter2 = self._make_one(label) self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): label = u'label' - row_filter = self._makeOne(label) + row_filter = self._make_one(label) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(apply_label_transformer=label) self.assertEqual(pb_val, expected_pb) @@ -737,27 +737,27 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import _FilterCombination return _FilterCombination - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): - row_filter = self._makeOne() + row_filter = self._make_one() self.assertEqual(row_filter.filters, []) def test_constructor_explicit(self): filters = object() - row_filter = self._makeOne(filters=filters) + row_filter = self._make_one(filters=filters) self.assertIs(row_filter.filters, filters) def test___eq__(self): filters = object() - row_filter1 = self._makeOne(filters=filters) - row_filter2 = self._makeOne(filters=filters) + row_filter1 = self._make_one(filters=filters) + row_filter2 = self._make_one(filters=filters) self.assertEqual(row_filter1, row_filter2) def test___eq__type_differ(self): filters = object() - row_filter1 = self._makeOne(filters=filters) + row_filter1 = self._make_one(filters=filters) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) @@ -769,7 +769,7 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterChain return RowFilterChain - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): @@ -783,7 +783,7 @@ def test_to_pb(self): row_filter2 = RowSampleFilter(0.25) row_filter2_pb = row_filter2.to_pb() - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() expected_pb = _RowFilterPB( @@ -802,13 +802,13 @@ def test_to_pb_nested(self): row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) row_filter3_pb = row_filter3.to_pb() row_filter4 = CellsRowLimitFilter(11) row_filter4_pb = row_filter4.to_pb() - row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) + row_filter5 = self._make_one(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() expected_pb = _RowFilterPB( @@ -826,7 +826,7 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterUnion return RowFilterUnion - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): @@ -840,7 +840,7 @@ def test_to_pb(self): row_filter2 = RowSampleFilter(0.25) row_filter2_pb = row_filter2.to_pb() - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) filter_pb = row_filter3.to_pb() expected_pb = _RowFilterPB( @@ -859,13 +859,13 @@ def test_to_pb_nested(self): row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) - row_filter3 = self._makeOne(filters=[row_filter1, row_filter2]) + row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) row_filter3_pb = row_filter3.to_pb() row_filter4 = CellsRowLimitFilter(11) row_filter4_pb = row_filter4.to_pb() - row_filter5 = self._makeOne(filters=[row_filter3, row_filter4]) + row_filter5 = self._make_one(filters=[row_filter3, row_filter4]) filter_pb = row_filter5.to_pb() expected_pb = _RowFilterPB( @@ -883,14 +883,14 @@ def _get_target_class(): from google.cloud.bigtable.row_filters import ConditionalRowFilter return ConditionalRowFilter - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): base_filter = object() true_filter = object() false_filter = object() - cond_filter = self._makeOne(base_filter, + cond_filter = self._make_one(base_filter, true_filter=true_filter, false_filter=false_filter) self.assertIs(cond_filter.base_filter, base_filter) @@ -901,10 +901,10 @@ def test___eq__(self): base_filter = object() true_filter = object() false_filter = object() - cond_filter1 = self._makeOne(base_filter, + cond_filter1 = self._make_one(base_filter, true_filter=true_filter, false_filter=false_filter) - cond_filter2 = self._makeOne(base_filter, + cond_filter2 = self._make_one(base_filter, true_filter=true_filter, false_filter=false_filter) self.assertEqual(cond_filter1, cond_filter2) @@ -913,7 +913,7 @@ def test___eq__type_differ(self): base_filter = object() true_filter = object() false_filter = object() - cond_filter1 = self._makeOne(base_filter, + cond_filter1 = self._make_one(base_filter, true_filter=true_filter, false_filter=false_filter) cond_filter2 = object() @@ -934,7 +934,7 @@ def test_to_pb(self): row_filter3 = CellsRowOffsetFilter(11) row_filter3_pb = row_filter3.to_pb() - row_filter4 = self._makeOne(row_filter1, true_filter=row_filter2, + row_filter4 = self._make_one(row_filter1, true_filter=row_filter2, false_filter=row_filter3) filter_pb = row_filter4.to_pb() @@ -958,7 +958,7 @@ def test_to_pb_true_only(self): row_filter2 = RowSampleFilter(0.25) row_filter2_pb = row_filter2.to_pb() - row_filter3 = self._makeOne(row_filter1, true_filter=row_filter2) + row_filter3 = self._make_one(row_filter1, true_filter=row_filter2) filter_pb = row_filter3.to_pb() expected_pb = _RowFilterPB( @@ -980,7 +980,7 @@ def test_to_pb_false_only(self): row_filter2 = RowSampleFilter(0.25) row_filter2_pb = row_filter2.to_pb() - row_filter3 = self._makeOne(row_filter1, false_filter=row_filter2) + row_filter3 = self._make_one(row_filter1, false_filter=row_filter2) filter_pb = row_filter3.to_pb() expected_pb = _RowFilterPB( diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py index 9d2a8ce6131b..08b9f51e0ab0 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -34,14 +34,14 @@ def _get_target_class(): from google.cloud.bigtable.table import Table return Table - def _makeOne(self, *args, **kwargs): + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): table_id = 'table-id' instance = object() - table = self._makeOne(table_id, instance) + table = self._make_one(table_id, instance) self.assertEqual(table.table_id, table_id) self.assertIs(table._instance, instance) @@ -50,7 +50,7 @@ def test_name_property(self): instance_name = 'instance_name' instance = _Instance(instance_name) - table = self._makeOne(table_id, instance) + table = self._make_one(table_id, instance) expected_name = instance_name + '/tables/' + table_id self.assertEqual(table.name, expected_name) @@ -59,7 +59,7 @@ def test_column_family_factory(self): table_id = 'table-id' gc_rule = object() - table = self._makeOne(table_id, None) + table = self._make_one(table_id, None) column_family_id = 'column_family_id' column_family = table.column_family(column_family_id, gc_rule=gc_rule) @@ -72,7 +72,7 @@ def test_row_factory_direct(self): from google.cloud.bigtable.row import DirectRow table_id = 'table-id' - table = self._makeOne(table_id, None) + table = self._make_one(table_id, None) row_key = b'row_key' row = table.row(row_key) @@ -84,7 +84,7 @@ def test_row_factory_conditional(self): from google.cloud.bigtable.row import ConditionalRow table_id = 'table-id' - table = self._makeOne(table_id, None) + table = self._make_one(table_id, None) row_key = b'row_key' filter_ = object() row = table.row(row_key, filter_=filter_) @@ -97,7 +97,7 @@ def test_row_factory_append(self): from google.cloud.bigtable.row import AppendRow table_id = 'table-id' - table = self._makeOne(table_id, None) + table = self._make_one(table_id, None) row_key = b'row_key' row = table.row(row_key, append=True) @@ -106,31 +106,31 @@ def test_row_factory_append(self): self.assertEqual(row._table, table) def test_row_factory_failure(self): - table = self._makeOne(self.TABLE_ID, None) + table = self._make_one(self.TABLE_ID, None) with self.assertRaises(ValueError): table.row(b'row_key', filter_=object(), append=True) def test___eq__(self): instance = object() - table1 = self._makeOne(self.TABLE_ID, instance) - table2 = self._makeOne(self.TABLE_ID, instance) + table1 = self._make_one(self.TABLE_ID, instance) + table2 = self._make_one(self.TABLE_ID, instance) self.assertEqual(table1, table2) def test___eq__type_differ(self): - table1 = self._makeOne(self.TABLE_ID, None) + table1 = self._make_one(self.TABLE_ID, None) table2 = object() self.assertNotEqual(table1, table2) def test___ne__same_value(self): instance = object() - table1 = self._makeOne(self.TABLE_ID, instance) - table2 = self._makeOne(self.TABLE_ID, instance) + table1 = self._make_one(self.TABLE_ID, instance) + table2 = self._make_one(self.TABLE_ID, instance) comparison_val = (table1 != table2) self.assertFalse(comparison_val) def test___ne__(self): - table1 = self._makeOne('table_id1', 'instance1') - table2 = self._makeOne('table_id2', 'instance2') + table1 = self._make_one('table_id1', 'instance1') + table2 = self._make_one('table_id2', 'instance2') self.assertNotEqual(table1, table2) def _create_test_helper(self, initial_split_keys, column_families=()): @@ -139,7 +139,7 @@ def _create_test_helper(self, initial_split_keys, column_families=()): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._makeOne(self.TABLE_ID, instance) + table = self._make_one(self.TABLE_ID, instance) # Create request_pb splits_pb = [ @@ -206,7 +206,7 @@ def _list_column_families_helper(self): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._makeOne(self.TABLE_ID, instance) + table = self._make_one(self.TABLE_ID, instance) # Create request_pb request_pb = _GetTableRequestPB(name=self.TABLE_NAME) @@ -244,7 +244,7 @@ def test_delete(self): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._makeOne(self.TABLE_ID, instance) + table = self._make_one(self.TABLE_ID, instance) # Create request_pb request_pb = _DeleteTableRequestPB(name=self.TABLE_NAME) @@ -274,7 +274,7 @@ def _read_row_helper(self, chunks, expected_result): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._makeOne(self.TABLE_ID, instance) + table = self._make_one(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -355,7 +355,7 @@ def test_read_rows(self): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._makeOne(self.TABLE_ID, instance) + table = self._make_one(self.TABLE_ID, instance) # Create request_pb request_pb = object() # Returned by our mock. @@ -403,7 +403,7 @@ def test_sample_row_keys(self): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._makeOne(self.TABLE_ID, instance) + table = self._make_one(self.TABLE_ID, instance) # Create request_pb request_pb = _SampleRowKeysRequestPB(table_name=self.TABLE_NAME) From db9022714de05f3d026758a6510e62a71f898c6a Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 10 Nov 2016 11:06:21 -0800 Subject: [PATCH 019/892] Changing all instances of _callFUT to _call_fut. Done via: $ git grep -l _callFUT | \ > xargs sed -i s/_callFUT/_call_fut/g --- .../unit_tests/test_client.py | 24 ++++++++--------- .../unit_tests/test_cluster.py | 4 +-- .../unit_tests/test_column_family.py | 26 +++++++++---------- .../unit_tests/test_instance.py | 6 ++--- .../unit_tests/test_row.py | 8 +++--- .../unit_tests/test_table.py | 18 ++++++------- 6 files changed, 43 insertions(+), 43 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index 2b13d2a61a37..23aa223a046f 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -18,7 +18,7 @@ class Test__make_data_stub(unittest.TestCase): - def _callFUT(self, client): + def _call_fut(self, client): from google.cloud.bigtable.client import _make_data_stub return _make_data_stub(client) @@ -38,7 +38,7 @@ def mock_make_secure_stub(*args): return fake_stub with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_secure_stub_args, [ @@ -65,7 +65,7 @@ def mock_make_insecure_stub(*args): return fake_stub with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_insecure_stub_args, [ @@ -78,7 +78,7 @@ def mock_make_insecure_stub(*args): class Test__make_instance_stub(unittest.TestCase): - def _callFUT(self, client): + def _call_fut(self, client): from google.cloud.bigtable.client import _make_instance_stub return _make_instance_stub(client) @@ -98,7 +98,7 @@ def mock_make_secure_stub(*args): return fake_stub with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_secure_stub_args, [ @@ -125,7 +125,7 @@ def mock_make_insecure_stub(*args): return fake_stub with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_insecure_stub_args, [ @@ -138,7 +138,7 @@ def mock_make_insecure_stub(*args): class Test__make_operations_stub(unittest.TestCase): - def _callFUT(self, client): + def _call_fut(self, client): from google.cloud.bigtable.client import _make_operations_stub return _make_operations_stub(client) @@ -160,7 +160,7 @@ def mock_make_secure_stub(*args): return fake_stub with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_secure_stub_args, [ @@ -189,7 +189,7 @@ def mock_make_insecure_stub(*args): return fake_stub with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_insecure_stub_args, [ @@ -202,7 +202,7 @@ def mock_make_insecure_stub(*args): class Test__make_table_stub(unittest.TestCase): - def _callFUT(self, client): + def _call_fut(self, client): from google.cloud.bigtable.client import _make_table_stub return _make_table_stub(client) @@ -222,7 +222,7 @@ def mock_make_secure_stub(*args): return fake_stub with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_secure_stub_args, [ @@ -249,7 +249,7 @@ def mock_make_insecure_stub(*args): return fake_stub with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._callFUT(client) + result = self._call_fut(client) self.assertIs(result, fake_stub) self.assertEqual(make_insecure_stub_args, [ diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index c8f99a6011e9..4a4e40cdf1fd 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -374,7 +374,7 @@ def test_delete(self): class Test__prepare_create_request(unittest.TestCase): - def _callFUT(self, cluster): + def _call_fut(self, cluster): from google.cloud.bigtable.cluster import _prepare_create_request return _prepare_create_request(cluster) @@ -391,7 +391,7 @@ def test_it(self): cluster = Cluster(CLUSTER_ID, instance, serve_nodes=SERVE_NODES) - request_pb = self._callFUT(cluster) + request_pb = self._call_fut(cluster) self.assertEqual(request_pb.cluster_id, CLUSTER_ID) self.assertEqual(request_pb.parent, instance.name) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index af3249fd7e3f..d10b12703a65 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -18,7 +18,7 @@ class Test__timedelta_to_duration_pb(unittest.TestCase): - def _callFUT(self, *args, **kwargs): + def _call_fut(self, *args, **kwargs): from google.cloud.bigtable.column_family import ( _timedelta_to_duration_pb) return _timedelta_to_duration_pb(*args, **kwargs) @@ -30,7 +30,7 @@ def test_it(self): seconds = microseconds = 1 timedelta_val = datetime.timedelta(seconds=seconds, microseconds=microseconds) - result = self._callFUT(timedelta_val) + result = self._call_fut(timedelta_val) self.assertIsInstance(result, duration_pb2.Duration) self.assertEqual(result.seconds, seconds) self.assertEqual(result.nanos, 1000 * microseconds) @@ -43,7 +43,7 @@ def test_with_negative_microseconds(self): microseconds = -5 timedelta_val = datetime.timedelta(seconds=seconds, microseconds=microseconds) - result = self._callFUT(timedelta_val) + result = self._call_fut(timedelta_val) self.assertIsInstance(result, duration_pb2.Duration) self.assertEqual(result.seconds, seconds - 1) self.assertEqual(result.nanos, 10**9 + 1000 * microseconds) @@ -56,7 +56,7 @@ def test_with_negative_seconds(self): microseconds = 5 timedelta_val = datetime.timedelta(seconds=seconds, microseconds=microseconds) - result = self._callFUT(timedelta_val) + result = self._call_fut(timedelta_val) self.assertIsInstance(result, duration_pb2.Duration) self.assertEqual(result.seconds, seconds + 1) self.assertEqual(result.nanos, -(10**9 - 1000 * microseconds)) @@ -64,7 +64,7 @@ def test_with_negative_seconds(self): class Test__duration_pb_to_timedelta(unittest.TestCase): - def _callFUT(self, *args, **kwargs): + def _call_fut(self, *args, **kwargs): from google.cloud.bigtable.column_family import ( _duration_pb_to_timedelta) return _duration_pb_to_timedelta(*args, **kwargs) @@ -78,7 +78,7 @@ def test_it(self): nanos=1000 * microseconds) timedelta_val = datetime.timedelta(seconds=seconds, microseconds=microseconds) - result = self._callFUT(duration_pb) + result = self._call_fut(duration_pb) self.assertIsInstance(result, datetime.timedelta) self.assertEqual(result, timedelta_val) @@ -575,21 +575,21 @@ def test_delete(self): class Test__gc_rule_from_pb(unittest.TestCase): - def _callFUT(self, *args, **kwargs): + def _call_fut(self, *args, **kwargs): from google.cloud.bigtable.column_family import _gc_rule_from_pb return _gc_rule_from_pb(*args, **kwargs) def test_empty(self): gc_rule_pb = _GcRulePB() - self.assertIsNone(self._callFUT(gc_rule_pb)) + self.assertIsNone(self._call_fut(gc_rule_pb)) def test_max_num_versions(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule orig_rule = MaxVersionsGCRule(1) gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) + result = self._call_fut(gc_rule_pb) self.assertIsInstance(result, MaxVersionsGCRule) self.assertEqual(result, orig_rule) @@ -599,7 +599,7 @@ def test_max_age(self): orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1)) gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) + result = self._call_fut(gc_rule_pb) self.assertIsInstance(result, MaxAgeGCRule) self.assertEqual(result, orig_rule) @@ -613,7 +613,7 @@ def test_union(self): rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) orig_rule = GCRuleUnion([rule1, rule2]) gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) + result = self._call_fut(gc_rule_pb) self.assertIsInstance(result, GCRuleUnion) self.assertEqual(result, orig_rule) @@ -627,7 +627,7 @@ def test_intersection(self): rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) orig_rule = GCRuleIntersection([rule1, rule2]) gc_rule_pb = orig_rule.to_pb() - result = self._callFUT(gc_rule_pb) + result = self._call_fut(gc_rule_pb) self.assertIsInstance(result, GCRuleIntersection) self.assertEqual(result, orig_rule) @@ -642,7 +642,7 @@ def WhichOneof(cls, name): return 'unknown' self.assertEqual(MockProto.names, []) - self.assertRaises(ValueError, self._callFUT, MockProto) + self.assertRaises(ValueError, self._call_fut, MockProto) self.assertEqual(MockProto.names, ['rule']) diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index a1dab7c6f31e..3821f9db75b6 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -514,7 +514,7 @@ class Test__prepare_create_request(unittest.TestCase): INSTANCE_NAME = PARENT + '/instances/' + INSTANCE_ID CLUSTER_NAME = INSTANCE_NAME + '/clusters/' + INSTANCE_ID - def _callFUT(self, instance, **kw): + def _call_fut(self, instance, **kw): from google.cloud.bigtable.instance import _prepare_create_request return _prepare_create_request(instance, **kw) @@ -529,7 +529,7 @@ def test_w_defaults(self): client = _Client(self.PROJECT) instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID) - request_pb = self._callFUT(instance) + request_pb = self._call_fut(instance) self.assertIsInstance(request_pb, messages_v2_pb.CreateInstanceRequest) self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) @@ -558,7 +558,7 @@ def test_w_explicit_serve_nodes(self): display_name=DISPLAY_NAME, serve_nodes=SERVE_NODES) - request_pb = self._callFUT(instance) + request_pb = self._call_fut(instance) self.assertIsInstance(request_pb, messages_v2_pb.CreateInstanceRequest) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/unit_tests/test_row.py index e8dd847ed811..c3321a12eec1 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row.py @@ -665,7 +665,7 @@ def test_commit_too_many_mutations(self): class Test__parse_rmw_row_response(unittest.TestCase): - def _callFUT(self, row_response): + def _call_fut(self, row_response): from google.cloud.bigtable.row import _parse_rmw_row_response return _parse_rmw_row_response(row_response) @@ -745,12 +745,12 @@ def test_it(self): ], ) sample_input = _ReadModifyWriteRowResponsePB(row=response_row) - self.assertEqual(expected_output, self._callFUT(sample_input)) + self.assertEqual(expected_output, self._call_fut(sample_input)) class Test__parse_family_pb(unittest.TestCase): - def _callFUT(self, family_pb): + def _call_fut(self, family_pb): from google.cloud.bigtable.row import _parse_family_pb return _parse_family_pb(family_pb) @@ -802,7 +802,7 @@ def test_it(self): ), ], ) - self.assertEqual(expected_output, self._callFUT(sample_input)) + self.assertEqual(expected_output, self._call_fut(sample_input)) def _CheckAndMutateRowRequestPB(*args, **kw): diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py index 08b9f51e0ab0..14e359b044d2 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -429,7 +429,7 @@ def test_sample_row_keys(self): class Test__create_row_request(unittest.TestCase): - def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None, + def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, filter_=None, limit=None): from google.cloud.bigtable.table import _create_row_request return _create_row_request( @@ -438,19 +438,19 @@ def _callFUT(self, table_name, row_key=None, start_key=None, end_key=None, def test_table_name_only(self): table_name = 'table_name' - result = self._callFUT(table_name) + result = self._call_fut(table_name) expected_result = _ReadRowsRequestPB( table_name=table_name) self.assertEqual(result, expected_result) def test_row_key_row_range_conflict(self): with self.assertRaises(ValueError): - self._callFUT(None, row_key=object(), end_key=object()) + self._call_fut(None, row_key=object(), end_key=object()) def test_row_key(self): table_name = 'table_name' row_key = b'row_key' - result = self._callFUT(table_name, row_key=row_key) + result = self._call_fut(table_name, row_key=row_key) expected_result = _ReadRowsRequestPB( table_name=table_name, ) @@ -460,7 +460,7 @@ def test_row_key(self): def test_row_range_start_key(self): table_name = 'table_name' start_key = b'start_key' - result = self._callFUT(table_name, start_key=start_key) + result = self._call_fut(table_name, start_key=start_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) @@ -468,7 +468,7 @@ def test_row_range_start_key(self): def test_row_range_end_key(self): table_name = 'table_name' end_key = b'end_key' - result = self._callFUT(table_name, end_key=end_key) + result = self._call_fut(table_name, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add(end_key_open=end_key) self.assertEqual(result, expected_result) @@ -477,7 +477,7 @@ def test_row_range_both_keys(self): table_name = 'table_name' start_key = b'start_key' end_key = b'end_key' - result = self._callFUT(table_name, start_key=start_key, + result = self._call_fut(table_name, start_key=start_key, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add( @@ -488,7 +488,7 @@ def test_with_filter(self): from google.cloud.bigtable.row_filters import RowSampleFilter table_name = 'table_name' row_filter = RowSampleFilter(0.33) - result = self._callFUT(table_name, filter_=row_filter) + result = self._call_fut(table_name, filter_=row_filter) expected_result = _ReadRowsRequestPB( table_name=table_name, filter=row_filter.to_pb(), @@ -498,7 +498,7 @@ def test_with_filter(self): def test_with_limit(self): table_name = 'table_name' limit = 1337 - result = self._callFUT(table_name, limit=limit) + result = self._call_fut(table_name, limit=limit) expected_result = _ReadRowsRequestPB( table_name=table_name, rows_limit=limit, From 0302148003ed55a8727925ed839c4417eb99cd57 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 8 Nov 2016 21:02:17 -0800 Subject: [PATCH 020/892] Manually fixing up bad indents / long lines after renames. --- .../unit_tests/test_client.py | 24 ++++---- .../unit_tests/test_cluster.py | 6 +- .../unit_tests/test_column_family.py | 10 ++-- .../unit_tests/test_instance.py | 10 ++-- .../unit_tests/test_row_filters.py | 56 +++++++++---------- .../unit_tests/test_table.py | 4 +- 6 files changed, 55 insertions(+), 55 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index 23aa223a046f..bd880c6dc669 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -306,8 +306,8 @@ def _constructor_test_helper(self, expected_scopes, creds, _make_operations_stub=mock_make_operations_stub, _make_table_stub=mock_make_table_stub): client = self._make_one(project=self.PROJECT, credentials=creds, - read_only=read_only, admin=admin, - user_agent=user_agent) + read_only=read_only, admin=admin, + user_agent=user_agent) # Verify the mocks. self.assertEqual(mock_make_data_stub.calls, [client]) @@ -450,14 +450,14 @@ def test_credentials_getter(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials) + credentials=credentials) self.assertIs(client.credentials, credentials) def test_project_name_property(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials) + credentials=credentials) project_name = 'projects/' + project self.assertEqual(client.project_name, project_name) @@ -465,14 +465,14 @@ def test_instance_stub_getter(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=True) + credentials=credentials, admin=True) self.assertIs(client._instance_stub, client._instance_stub_internal) def test_instance_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=False) + credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_instance_stub') @@ -480,7 +480,7 @@ def test_operations_stub_getter(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=True) + credentials=credentials, admin=True) self.assertIs(client._operations_stub, client._operations_stub_internal) @@ -488,7 +488,7 @@ def test_operations_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=False) + credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_operations_stub') @@ -496,14 +496,14 @@ def test_table_stub_getter(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=True) + credentials=credentials, admin=True) self.assertIs(client._table_stub, client._table_stub_internal) def test_table_stub_non_admin_failure(self): credentials = _Credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=False) + credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_table_stub') @@ -518,7 +518,7 @@ def test_instance_factory_defaults(self): DISPLAY_NAME = 'display-name' credentials = _Credentials() client = self._make_oneWithMocks(project=PROJECT, - credentials=credentials) + credentials=credentials) instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) @@ -540,7 +540,7 @@ def test_instance_factory_w_explicit_serve_nodes(self): SERVE_NODES = 5 credentials = _Credentials() client = self._make_oneWithMocks(project=PROJECT, - credentials=credentials) + credentials=credentials) instance = client.instance( INSTANCE_ID, display_name=DISPLAY_NAME, diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index 4a4e40cdf1fd..9730d9b5ab44 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -49,7 +49,7 @@ def test_constructor_non_default(self): instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - serve_nodes=SERVE_NODES) + serve_nodes=SERVE_NODES) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) self.assertEqual(cluster.serve_nodes, SERVE_NODES) @@ -60,7 +60,7 @@ def test_copy(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - serve_nodes=SERVE_NODES) + serve_nodes=SERVE_NODES) new_cluster = cluster.copy() # Make sure the client copy succeeded. @@ -293,7 +293,7 @@ def test_update(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - serve_nodes=SERVE_NODES) + serve_nodes=SERVE_NODES) # Create request_pb request_pb = _ClusterPB( diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index d10b12703a65..90baf9193691 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -369,9 +369,9 @@ def test___eq__(self): table = object() gc_rule = object() column_family1 = self._make_one(column_family_id, table, - gc_rule=gc_rule) + gc_rule=gc_rule) column_family2 = self._make_one(column_family_id, table, - gc_rule=gc_rule) + gc_rule=gc_rule) self.assertEqual(column_family1, column_family2) def test___eq__type_differ(self): @@ -384,9 +384,9 @@ def test___ne__same_value(self): table = object() gc_rule = object() column_family1 = self._make_one(column_family_id, table, - gc_rule=gc_rule) + gc_rule=gc_rule) column_family2 = self._make_one(column_family_id, table, - gc_rule=gc_rule) + gc_rule=gc_rule) comparison_val = (column_family1 != column_family2) self.assertFalse(comparison_val) @@ -406,7 +406,7 @@ def test_to_pb_with_rule(self): gc_rule = MaxVersionsGCRule(1) column_family = self._make_one('column_family_id', None, - gc_rule=gc_rule) + gc_rule=gc_rule) pb_val = column_family.to_pb() expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) self.assertEqual(pb_val, expected) diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index 3821f9db75b6..84bc0e6346ea 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -54,7 +54,7 @@ def test_constructor_non_default(self): client = object() instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, - display_name=display_name) + display_name=display_name) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, display_name) self.assertIs(instance._client, client) @@ -64,7 +64,7 @@ def test_copy(self): client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, - display_name=display_name) + display_name=display_name) new_instance = instance.copy() # Make sure the client copy succeeded. @@ -242,7 +242,7 @@ def test_create(self): NOW_PB = _datetime_to_pb_timestamp(NOW) client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, - display_name=self.DISPLAY_NAME) + display_name=self.DISPLAY_NAME) # Create response_pb metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) @@ -296,7 +296,7 @@ def test_create_w_explicit_serve_nodes(self): client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, - serve_nodes=SERVE_NODES) + serve_nodes=SERVE_NODES) # Create response_pb response_pb = operations_pb2.Operation(name=self.OP_NAME) @@ -332,7 +332,7 @@ def test_update(self): client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, - display_name=self.DISPLAY_NAME) + display_name=self.DISPLAY_NAME) # Create request_pb request_pb = data_v2_pb2.Instance( diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py index bb3d92edf491..a49911acc0c3 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py @@ -403,15 +403,15 @@ def test___eq__(self): inclusive_start = object() inclusive_end = object() row_filter1 = self._make_one(column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) row_filter2 = self._make_one(column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) self.assertEqual(row_filter1, row_filter2) def test___eq__type_differ(self): @@ -442,7 +442,7 @@ def test_to_pb_exclusive_start(self): column_family_id = u'column-family-id' column = b'column' row_filter = self._make_one(column_family_id, start_column=column, - inclusive_start=False) + inclusive_start=False) col_range_pb = _ColumnRangePB( family_name=column_family_id, start_qualifier_open=column, @@ -465,7 +465,7 @@ def test_to_pb_exclusive_end(self): column_family_id = u'column-family-id' column = b'column' row_filter = self._make_one(column_family_id, end_column=column, - inclusive_end=False) + inclusive_end=False) col_range_pb = _ColumnRangePB( family_name=column_family_id, end_qualifier_open=column, @@ -515,9 +515,9 @@ def test_constructor_explicit(self): inclusive_start = object() inclusive_end = object() row_filter = self._make_one(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) self.assertIs(row_filter.start_value, start_value) self.assertIs(row_filter.end_value, end_value) self.assertIs(row_filter.inclusive_start, inclusive_start) @@ -535,13 +535,13 @@ def test___eq__(self): inclusive_start = object() inclusive_end = object() row_filter1 = self._make_one(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) row_filter2 = self._make_one(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end) self.assertEqual(row_filter1, row_filter2) def test___eq__type_differ(self): @@ -891,8 +891,8 @@ def test_constructor(self): true_filter = object() false_filter = object() cond_filter = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) + true_filter=true_filter, + false_filter=false_filter) self.assertIs(cond_filter.base_filter, base_filter) self.assertIs(cond_filter.true_filter, true_filter) self.assertIs(cond_filter.false_filter, false_filter) @@ -902,11 +902,11 @@ def test___eq__(self): true_filter = object() false_filter = object() cond_filter1 = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) + true_filter=true_filter, + false_filter=false_filter) cond_filter2 = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) + true_filter=true_filter, + false_filter=false_filter) self.assertEqual(cond_filter1, cond_filter2) def test___eq__type_differ(self): @@ -914,8 +914,8 @@ def test___eq__type_differ(self): true_filter = object() false_filter = object() cond_filter1 = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) + true_filter=true_filter, + false_filter=false_filter) cond_filter2 = object() self.assertNotEqual(cond_filter1, cond_filter2) @@ -935,7 +935,7 @@ def test_to_pb(self): row_filter3_pb = row_filter3.to_pb() row_filter4 = self._make_one(row_filter1, true_filter=row_filter2, - false_filter=row_filter3) + false_filter=row_filter3) filter_pb = row_filter4.to_pb() expected_pb = _RowFilterPB( diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py index 14e359b044d2..f96ddcc8e704 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -430,7 +430,7 @@ def test_sample_row_keys(self): class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None): + filter_=None, limit=None): from google.cloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, @@ -478,7 +478,7 @@ def test_row_range_both_keys(self): start_key = b'start_key' end_key = b'end_key' result = self._call_fut(table_name, start_key=start_key, - end_key=end_key) + end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add( start_key_closed=start_key, end_key_open=end_key) From 4a4594c345a31706da97c87f6d81800a3606a7d9 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 10 Nov 2016 21:17:51 -0800 Subject: [PATCH 021/892] Adding quiet flag to pip command for local deps. --- packages/google-cloud-bigtable/tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tox.ini b/packages/google-cloud-bigtable/tox.ini index f6336268a816..36f5e1d2a950 100644 --- a/packages/google-cloud-bigtable/tox.ini +++ b/packages/google-cloud-bigtable/tox.ini @@ -4,7 +4,7 @@ envlist = [testing] localdeps = - pip install --upgrade {toxinidir}/../core + pip install --quiet --upgrade {toxinidir}/../core deps = pytest covercmd = From ca5827258d55d999f6c5c74afd8e76cf4e7d74fa Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 7 Nov 2016 18:25:50 -0800 Subject: [PATCH 022/892] Updating connection -> _connection attribute in some packages. In particular: bigquery, bigtable and datastore. (The only change in bigtable was an import, and that attribute should probably go elsewhere.) --- packages/google-cloud-bigtable/google/cloud/bigtable/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index e6d8173f8b39..c2c160592228 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -41,7 +41,7 @@ from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID from google.cloud.client import _ClientFactoryMixin from google.cloud.client import _ClientProjectMixin -from google.cloud.connection import DEFAULT_USER_AGENT +from google.cloud._http import DEFAULT_USER_AGENT from google.cloud.credentials import get_credentials from google.cloud.environment_vars import BIGTABLE_EMULATOR From 4304e5fc08b2102c380646fb2942f63d5a972950 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 14 Nov 2016 12:44:19 -0800 Subject: [PATCH 023/892] Upgrading core to version to 0.21.0. As a result, also upgrading the umbrella package and all packages to 0.21.0 (since they all depend on core). --- packages/google-cloud-bigtable/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 6c36042e9484..2f4fe451fb28 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -50,13 +50,13 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.20.0', + 'google-cloud-core >= 0.21.0', 'grpcio >= 1.0.0, < 2.0dev', ] setup( name='google-cloud-bigtable', - version='0.20.0', + version='0.21.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From b0b29c24ed2667510d84d6bbd9349fa4e50364a0 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 14 Nov 2016 14:11:34 -0800 Subject: [PATCH 024/892] Need to install from local deps first. The `pip install --upgrade` still is needed to ensure freshness but by removing the filesystem paths from deps we made the initial install grab from PyPI (by mistake). This way, all local package deps are grabbed from the local filesystem. --- packages/google-cloud-bigtable/tox.ini | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/tox.ini b/packages/google-cloud-bigtable/tox.ini index 36f5e1d2a950..94006dd1c3ad 100644 --- a/packages/google-cloud-bigtable/tox.ini +++ b/packages/google-cloud-bigtable/tox.ini @@ -6,6 +6,7 @@ envlist = localdeps = pip install --quiet --upgrade {toxinidir}/../core deps = + {toxinidir}/../core pytest covercmd = py.test --quiet \ @@ -16,7 +17,6 @@ covercmd = [testenv] commands = - {[testing]localdeps} py.test --quiet {posargs} unit_tests deps = {[testing]deps} @@ -25,7 +25,6 @@ deps = basepython = python2.7 commands = - {[testing]localdeps} {[testing]covercmd} deps = {[testenv]deps} From 5f0c2ca9bf32ade80bb7b0ce4fd680cceb2319cc Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 14 Nov 2016 14:58:42 -0800 Subject: [PATCH 025/892] Fixing accidental removal of {localdeps} Also - adding RTD dependency for runtimeconfig. - adding local paths to umbrella tox config "deps" as was done in #2733. --- packages/google-cloud-bigtable/tox.ini | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/google-cloud-bigtable/tox.ini b/packages/google-cloud-bigtable/tox.ini index 94006dd1c3ad..ac77cf8fe314 100644 --- a/packages/google-cloud-bigtable/tox.ini +++ b/packages/google-cloud-bigtable/tox.ini @@ -17,6 +17,7 @@ covercmd = [testenv] commands = + {[testing]localdeps} py.test --quiet {posargs} unit_tests deps = {[testing]deps} @@ -25,6 +26,7 @@ deps = basepython = python2.7 commands = + {[testing]localdeps} {[testing]covercmd} deps = {[testenv]deps} From 29e11aa79056de4e011fd2714146de2dd332f88d Mon Sep 17 00:00:00 2001 From: Misha Brukman Date: Sat, 19 Nov 2016 00:11:57 -0500 Subject: [PATCH 026/892] Fixed grammar in comments and docs. Summary of edits: * "a instance" -> "an instance" consistency (some are correct, some not) * removed extraneous words, added missing words in sentences * minor grammatical edits for readability --- .../google/cloud/bigtable/client.py | 2 +- .../google/cloud/bigtable/instance.py | 10 +++++----- .../google-cloud-bigtable/google/cloud/bigtable/row.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index c2c160592228..18d9c685916f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -19,7 +19,7 @@ In the hierarchy of API concepts * a :class:`Client` owns an :class:`.Instance` -* a :class:`.Instance` owns a :class:`~google.cloud.bigtable.table.Table` +* an :class:`.Instance` owns a :class:`~google.cloud.bigtable.table.Table` * a :class:`~google.cloud.bigtable.table.Table` owns a :class:`~.column_family.ColumnFamily` * a :class:`~google.cloud.bigtable.table.Table` owns a :class:`~.row.Row` diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 41dda563c843..1de3cbcea814 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""User friendly container for Google Cloud Bigtable Instance.""" +"""User-friendly container for Google Cloud Bigtable Instance.""" import re @@ -67,7 +67,7 @@ def _prepare_create_request(instance): class Instance(object): """Representation of a Google Cloud Bigtable Instance. - We can use a :class:`Instance` to: + We can use an :class:`Instance` to: * :meth:`reload` itself * :meth:`create` itself @@ -123,10 +123,10 @@ def _update_from_pb(self, instance_pb): @classmethod def from_pb(cls, instance_pb, client): - """Creates a instance instance from a protobuf. + """Creates an instance instance from a protobuf. :type instance_pb: :class:`instance_pb2.Instance` - :param instance_pb: A instance protobuf object. + :param instance_pb: An instance protobuf object. :type client: :class:`Client ` :param client: The client that owns the instance. @@ -261,7 +261,7 @@ def update(self): def delete(self): """Delete this instance. - Marks a instance and all of its tables for permanent deletion + Marks an instance and all of its tables for permanent deletion in 7 days. Immediately upon completion of the request: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 8ec1c63b1327..5e9075ef8eec 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""User friendly container for Google Cloud Bigtable Row.""" +"""User-friendly container for Google Cloud Bigtable Row.""" import struct @@ -657,7 +657,7 @@ class AppendRow(Row): The first works by appending bytes and the second by incrementing an integer (stored in the cell as 8 bytes). In either case, if the cell is empty, assumes the default empty value (empty string for - bytes or and 0 for integer). + bytes or 0 for integer). :type row_key: bytes :param row_key: The key for the current row. From bea18408cb11f751bf4e3dd71e6095b6065f9dbf Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 16 Nov 2016 11:09:27 -0500 Subject: [PATCH 027/892] Set core version compatible specifier to packages. --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 2f4fe451fb28..a5e7387ac813 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -50,7 +50,7 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.21.0', + 'google-cloud-core >= 0.21.0, < 0.22dev', 'grpcio >= 1.0.0, < 2.0dev', ] From 2e3570ed1181305ae7582c5aefc43a5c296acc08 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Fri, 2 Dec 2016 15:02:25 -0800 Subject: [PATCH 028/892] Switch from oauth2client to google-auth (#2726) * Removes all use of oauth2client from every package and tests. * Updates core to use google-auth's default credentials, project ID, and scoping logic. * Updates bigtable to use google-auth's scoping logic. --- .../google/cloud/bigtable/client.py | 17 ++-- packages/google-cloud-bigtable/tox.ini | 1 + .../unit_tests/test_client.py | 79 +++++++++---------- 3 files changed, 46 insertions(+), 51 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 18d9c685916f..037b6efa15ea 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -29,6 +29,7 @@ import os +import google.auth.credentials from google.longrunning import operations_grpc from google.cloud._helpers import make_insecure_stub @@ -201,14 +202,16 @@ def __init__(self, project=None, credentials=None, else: scopes.append(DATA_SCOPE) + self._read_only = bool(read_only) + if admin: scopes.append(ADMIN_SCOPE) self._admin = bool(admin) - try: - credentials = credentials.create_scoped(scopes) - except AttributeError: - pass + + credentials = google.auth.credentials.with_scopes_if_required( + credentials, scopes) + self._credentials = credentials self.user_agent = user_agent self.emulator_host = os.getenv(BIGTABLE_EMULATOR) @@ -229,12 +232,10 @@ def copy(self): :rtype: :class:`.Client` :returns: A copy of the current client. """ - credentials = self._credentials - copied_creds = credentials.create_scoped(credentials.scopes) return self.__class__( self.project, - copied_creds, - READ_ONLY_SCOPE in copied_creds.scopes, + self._credentials, + self._read_only, self._admin, self.user_agent, ) diff --git a/packages/google-cloud-bigtable/tox.ini b/packages/google-cloud-bigtable/tox.ini index ac77cf8fe314..257e21288b27 100644 --- a/packages/google-cloud-bigtable/tox.ini +++ b/packages/google-cloud-bigtable/tox.ini @@ -7,6 +7,7 @@ localdeps = pip install --quiet --upgrade {toxinidir}/../core deps = {toxinidir}/../core + mock pytest covercmd = py.test --quiet \ diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index bd880c6dc669..a6b82cde0aae 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -26,7 +26,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = _Credentials() + credentials = object() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -86,7 +86,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = _Credentials() + credentials = object() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -148,7 +148,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = _Credentials() + credentials = object() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -210,7 +210,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = _Credentials() + credentials = object() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -289,6 +289,11 @@ def _make_oneWithMocks(self, *args, **kwargs): _make_table_stub=mock_make_table_stub): return self._make_one(*args, **kwargs) + def _make_credentials(self): + import mock + import google.auth.credentials + return mock.Mock(spec=google.auth.credentials.Scoped) + def _constructor_test_helper(self, expected_scopes, creds, read_only=False, admin=False, user_agent=None, expected_creds=None): @@ -320,10 +325,11 @@ def _constructor_test_helper(self, expected_scopes, creds, self.assertSequenceEqual(mock_make_operations_stub.calls, []) self.assertSequenceEqual(mock_make_table_stub.calls, []) - expected_creds = expected_creds or creds + expected_creds = expected_creds or creds.with_scopes.return_value self.assertIs(client._credentials, expected_creds) + if expected_scopes is not None: - self.assertEqual(client._credentials.scopes, expected_scopes) + creds.with_scopes.assert_called_once_with(expected_scopes) self.assertEqual(client.project, self.PROJECT) self.assertEqual(client.user_agent, user_agent) @@ -345,7 +351,7 @@ def test_constructor_default_scopes(self): from google.cloud.bigtable import client as MUT expected_scopes = [MUT.DATA_SCOPE] - creds = _Credentials() + creds = self._make_credentials() self._constructor_test_helper(expected_scopes, creds) def test_constructor_custom_user_agent(self): @@ -353,7 +359,7 @@ def test_constructor_custom_user_agent(self): CUSTOM_USER_AGENT = 'custom-application' expected_scopes = [MUT.DATA_SCOPE] - creds = _Credentials() + creds = self._make_credentials() self._constructor_test_helper(expected_scopes, creds, user_agent=CUSTOM_USER_AGENT) @@ -361,18 +367,18 @@ def test_constructor_with_admin(self): from google.cloud.bigtable import client as MUT expected_scopes = [MUT.DATA_SCOPE, MUT.ADMIN_SCOPE] - creds = _Credentials() + creds = self._make_credentials() self._constructor_test_helper(expected_scopes, creds, admin=True) def test_constructor_with_read_only(self): from google.cloud.bigtable import client as MUT expected_scopes = [MUT.READ_ONLY_SCOPE] - creds = _Credentials() + creds = self._make_credentials() self._constructor_test_helper(expected_scopes, creds, read_only=True) def test_constructor_both_admin_and_read_only(self): - creds = _Credentials() + creds = self._make_credentials() with self.assertRaises(ValueError): self._constructor_test_helper([], creds, admin=True, read_only=True) @@ -381,18 +387,21 @@ def test_constructor_implicit_credentials(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - creds = _Credentials() + creds = self._make_credentials() expected_scopes = [MUT.DATA_SCOPE] def mock_get_credentials(): return creds with _Monkey(MUT, get_credentials=mock_get_credentials): - self._constructor_test_helper(expected_scopes, None, - expected_creds=creds) + self._constructor_test_helper( + None, None, + expected_creds=creds.with_scopes.return_value) + + creds.with_scopes.assert_called_once_with(expected_scopes) def test_constructor_credentials_wo_create_scoped(self): - creds = object() + creds = self._make_credentials() expected_scopes = None self._constructor_test_helper(expected_scopes, creds) @@ -400,7 +409,7 @@ def _copy_test_helper(self, read_only=False, admin=False): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = _Credentials('value') + credentials = self._make_credentials() client = self._make_oneWithMocks( project=self.PROJECT, credentials=credentials, @@ -447,14 +456,14 @@ def test_copy_read_only(self): self._copy_test_helper(read_only=True) def test_credentials_getter(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials) self.assertIs(client.credentials, credentials) def test_project_name_property(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials) @@ -462,14 +471,14 @@ def test_project_name_property(self): self.assertEqual(client.project_name, project_name) def test_instance_stub_getter(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) self.assertIs(client._instance_stub, client._instance_stub_internal) def test_instance_stub_non_admin_failure(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) @@ -477,7 +486,7 @@ def test_instance_stub_non_admin_failure(self): getattr(client, '_instance_stub') def test_operations_stub_getter(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) @@ -485,7 +494,7 @@ def test_operations_stub_getter(self): client._operations_stub_internal) def test_operations_stub_non_admin_failure(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) @@ -493,14 +502,14 @@ def test_operations_stub_non_admin_failure(self): getattr(client, '_operations_stub') def test_table_stub_getter(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) self.assertIs(client._table_stub, client._table_stub_internal) def test_table_stub_non_admin_failure(self): - credentials = _Credentials() + credentials = object() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) @@ -516,7 +525,7 @@ def test_instance_factory_defaults(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' - credentials = _Credentials() + credentials = object() client = self._make_oneWithMocks(project=PROJECT, credentials=credentials) @@ -538,7 +547,7 @@ def test_instance_factory_w_explicit_serve_nodes(self): DISPLAY_NAME = 'display-name' LOCATION_ID = 'locname' SERVE_NODES = 5 - credentials = _Credentials() + credentials = object() client = self._make_oneWithMocks(project=PROJECT, credentials=credentials) @@ -569,7 +578,7 @@ def test_list_instances(self): INSTANCE_NAME2 = ( 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) - credentials = _Credentials() + credentials = object() client = self._make_oneWithMocks( project=self.PROJECT, credentials=credentials, @@ -619,22 +628,6 @@ def test_list_instances(self): )]) -class _Credentials(object): - - scopes = None - - def __init__(self, access_token=None): - self._access_token = access_token - self._tokens = [] - - def create_scoped(self, scope): - self.scopes = scope - return self - - def __eq__(self, other): - return self._access_token == other._access_token - - class _Client(object): def __init__(self, credentials, user_agent, emulator_host=None): From bb2ad270fb27e75bccca18efe970286bc42e8df3 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 5 Dec 2016 13:24:39 -0800 Subject: [PATCH 029/892] Removing links to "stable" build of RTD. See #2810 for context. --- packages/google-cloud-bigtable/README.rst | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 51a0f25c7b49..3b37f5ec6880 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -38,11 +38,9 @@ Analytics, Maps, and Gmail. .. _Bigtable: https://cloud.google.com/bigtable/docs/ -See the ``google-cloud-python`` API `Bigtable documentation`_ to learn +See the ``google-cloud-python`` API Bigtable `Documentation`_ to learn how to manage your data in Bigtable tables. -.. _Bigtable documentation: https://google-cloud-python.readthedocs.io/en/stable/bigtable-usage.html - .. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg :target: https://pypi.python.org/pypi/google-cloud-bigtable .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg From 9537b7b1903cb7f5c70fe5a6e127460b4b057839 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 7 Dec 2016 16:00:24 -0800 Subject: [PATCH 030/892] Raise ValueError if credentials are not from google-auth (#2828) --- .../unit_tests/test_client.py | 66 +++++++++++-------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index a6b82cde0aae..4c912b4794d3 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -15,6 +15,19 @@ import unittest +import mock + + +def _make_credentials(): + import google.auth.credentials + + class _CredentialsWithScopes( + google.auth.credentials.Credentials, + google.auth.credentials.Scoped): + pass + + return mock.Mock(spec=_CredentialsWithScopes) + class Test__make_data_stub(unittest.TestCase): @@ -26,7 +39,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = object() + credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -86,7 +99,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = object() + credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -148,7 +161,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = object() + credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -210,7 +223,7 @@ def test_without_emulator(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = object() + credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) @@ -289,11 +302,6 @@ def _make_oneWithMocks(self, *args, **kwargs): _make_table_stub=mock_make_table_stub): return self._make_one(*args, **kwargs) - def _make_credentials(self): - import mock - import google.auth.credentials - return mock.Mock(spec=google.auth.credentials.Scoped) - def _constructor_test_helper(self, expected_scopes, creds, read_only=False, admin=False, user_agent=None, expected_creds=None): @@ -351,7 +359,7 @@ def test_constructor_default_scopes(self): from google.cloud.bigtable import client as MUT expected_scopes = [MUT.DATA_SCOPE] - creds = self._make_credentials() + creds = _make_credentials() self._constructor_test_helper(expected_scopes, creds) def test_constructor_custom_user_agent(self): @@ -359,7 +367,7 @@ def test_constructor_custom_user_agent(self): CUSTOM_USER_AGENT = 'custom-application' expected_scopes = [MUT.DATA_SCOPE] - creds = self._make_credentials() + creds = _make_credentials() self._constructor_test_helper(expected_scopes, creds, user_agent=CUSTOM_USER_AGENT) @@ -367,18 +375,18 @@ def test_constructor_with_admin(self): from google.cloud.bigtable import client as MUT expected_scopes = [MUT.DATA_SCOPE, MUT.ADMIN_SCOPE] - creds = self._make_credentials() + creds = _make_credentials() self._constructor_test_helper(expected_scopes, creds, admin=True) def test_constructor_with_read_only(self): from google.cloud.bigtable import client as MUT expected_scopes = [MUT.READ_ONLY_SCOPE] - creds = self._make_credentials() + creds = _make_credentials() self._constructor_test_helper(expected_scopes, creds, read_only=True) def test_constructor_both_admin_and_read_only(self): - creds = self._make_credentials() + creds = _make_credentials() with self.assertRaises(ValueError): self._constructor_test_helper([], creds, admin=True, read_only=True) @@ -387,7 +395,7 @@ def test_constructor_implicit_credentials(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - creds = self._make_credentials() + creds = _make_credentials() expected_scopes = [MUT.DATA_SCOPE] def mock_get_credentials(): @@ -401,7 +409,7 @@ def mock_get_credentials(): creds.with_scopes.assert_called_once_with(expected_scopes) def test_constructor_credentials_wo_create_scoped(self): - creds = self._make_credentials() + creds = _make_credentials() expected_scopes = None self._constructor_test_helper(expected_scopes, creds) @@ -409,7 +417,7 @@ def _copy_test_helper(self, read_only=False, admin=False): from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT - credentials = self._make_credentials() + credentials = _make_credentials() client = self._make_oneWithMocks( project=self.PROJECT, credentials=credentials, @@ -456,14 +464,14 @@ def test_copy_read_only(self): self._copy_test_helper(read_only=True) def test_credentials_getter(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials) - self.assertIs(client.credentials, credentials) + self.assertIs(client.credentials, credentials.with_scopes.return_value) def test_project_name_property(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials) @@ -471,14 +479,14 @@ def test_project_name_property(self): self.assertEqual(client.project_name, project_name) def test_instance_stub_getter(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) self.assertIs(client._instance_stub, client._instance_stub_internal) def test_instance_stub_non_admin_failure(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) @@ -486,7 +494,7 @@ def test_instance_stub_non_admin_failure(self): getattr(client, '_instance_stub') def test_operations_stub_getter(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) @@ -494,7 +502,7 @@ def test_operations_stub_getter(self): client._operations_stub_internal) def test_operations_stub_non_admin_failure(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) @@ -502,14 +510,14 @@ def test_operations_stub_non_admin_failure(self): getattr(client, '_operations_stub') def test_table_stub_getter(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=True) self.assertIs(client._table_stub, client._table_stub_internal) def test_table_stub_non_admin_failure(self): - credentials = object() + credentials = _make_credentials() project = 'PROJECT' client = self._make_oneWithMocks(project=project, credentials=credentials, admin=False) @@ -525,7 +533,7 @@ def test_instance_factory_defaults(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' - credentials = object() + credentials = _make_credentials() client = self._make_oneWithMocks(project=PROJECT, credentials=credentials) @@ -547,7 +555,7 @@ def test_instance_factory_w_explicit_serve_nodes(self): DISPLAY_NAME = 'display-name' LOCATION_ID = 'locname' SERVE_NODES = 5 - credentials = object() + credentials = _make_credentials() client = self._make_oneWithMocks(project=PROJECT, credentials=credentials) @@ -578,7 +586,7 @@ def test_list_instances(self): INSTANCE_NAME2 = ( 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) - credentials = object() + credentials = _make_credentials() client = self._make_oneWithMocks( project=self.PROJECT, credentials=credentials, From d8ee305056c00cbd229d5a9a2c030fc4b5414b3a Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 8 Dec 2016 15:17:03 -0800 Subject: [PATCH 031/892] Update versions for mega-release. We want to update - `google-cloud-bigquery` - `google-cloud-datastore` - `google-cloud-logging` - `google-cloud-storage` - `google-cloud-core` And then update `google-cloud` to re-wrap the latest versions of each. However, to avoid having packages in `google-cloud` with conflicting versions of `google-cloud-core`, we must release all packages. --- packages/google-cloud-bigtable/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index a5e7387ac813..36a9ac76eff8 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -50,13 +50,13 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.21.0, < 0.22dev', + 'google-cloud-core >= 0.22.1, < 0.23dev', 'grpcio >= 1.0.0, < 2.0dev', ] setup( name='google-cloud-bigtable', - version='0.21.0', + version='0.22.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From e15601eb6b25e2589a6e5546d3a48f6e05e0a930 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 9 Dec 2016 16:57:17 -0800 Subject: [PATCH 032/892] Explicitly putting 1.0.2 lower bound on grpcio. Also upgrading logging from 0.14.x to 0.90.x --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 36a9ac76eff8..08cd2eb7d6b4 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -51,7 +51,7 @@ REQUIREMENTS = [ 'google-cloud-core >= 0.22.1, < 0.23dev', - 'grpcio >= 1.0.0, < 2.0dev', + 'grpcio >= 1.0.2, < 2.0dev', ] setup( From 22c88fd1116cf67c950ab56ae31f839db6464cfc Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Fri, 23 Dec 2016 07:58:49 -0500 Subject: [PATCH 033/892] Add max_receive_message_length for larger rows. --- .../google/cloud/bigtable/client.py | 11 ++++++++++- .../google-cloud-bigtable/unit_tests/test_client.py | 8 +++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 037b6efa15ea..38a103c7c005 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -65,6 +65,14 @@ READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly' """Scope for reading table data.""" +# NOTE: 'grpc.max_message_length' will no longer be recognized in +# grpcio 1.1 and later. +_MAX_MSG_LENGTH_100MB = 100 * 1024 * 1024 +_GRPC_MAX_LENGTH_OPTIONS = ( + ('grpc.max_message_length', _MAX_MSG_LENGTH_100MB), + ('grpc.max_receive_message_length', _MAX_MSG_LENGTH_100MB), +) + def _make_data_stub(client): """Creates gRPC stub to make requests to the Data API. @@ -77,7 +85,8 @@ def _make_data_stub(client): """ if client.emulator_host is None: return make_secure_stub(client.credentials, client.user_agent, - bigtable_pb2.BigtableStub, DATA_API_HOST) + bigtable_pb2.BigtableStub, DATA_API_HOST, + extra_options=_GRPC_MAX_LENGTH_OPTIONS) else: return make_insecure_stub(bigtable_pb2.BigtableStub, client.emulator_host) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index 4c912b4794d3..24fee441a6c9 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -46,13 +46,18 @@ def test_without_emulator(self): fake_stub = object() make_secure_stub_args = [] - def mock_make_secure_stub(*args): + def mock_make_secure_stub(*args, **kwargs): make_secure_stub_args.append(args) + make_secure_stub_args.append(kwargs) return fake_stub with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): result = self._call_fut(client) + extra_options = {'extra_options': ( + ('grpc.max_message_length', 104857600), + ('grpc.max_receive_message_length', 104857600) + )} self.assertIs(result, fake_stub) self.assertEqual(make_secure_stub_args, [ ( @@ -61,6 +66,7 @@ def mock_make_secure_stub(*args): MUT.bigtable_pb2.BigtableStub, MUT.DATA_API_HOST, ), + extra_options, ]) def test_with_emulator(self): From 18975c7261e445c03df5a0250b3e5b277e05dcd1 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 18 Jan 2017 13:14:03 -0500 Subject: [PATCH 034/892] Update import spacing part 2. --- .../unit_tests/test_client.py | 5 +++ .../unit_tests/test_cluster.py | 6 ++++ .../unit_tests/test_column_family.py | 14 ++++++++ .../unit_tests/test_instance.py | 4 +++ .../unit_tests/test_row.py | 24 ++++++++++++++ .../unit_tests/test_row_data.py | 11 +++++++ .../unit_tests/test_row_filters.py | 32 +++++++++++++++++++ .../unit_tests/test_table.py | 13 ++++++++ 8 files changed, 109 insertions(+) diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index 24fee441a6c9..b7b666e819df 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -33,6 +33,7 @@ class Test__make_data_stub(unittest.TestCase): def _call_fut(self, client): from google.cloud.bigtable.client import _make_data_stub + return _make_data_stub(client) def test_without_emulator(self): @@ -99,6 +100,7 @@ class Test__make_instance_stub(unittest.TestCase): def _call_fut(self, client): from google.cloud.bigtable.client import _make_instance_stub + return _make_instance_stub(client) def test_without_emulator(self): @@ -159,6 +161,7 @@ class Test__make_operations_stub(unittest.TestCase): def _call_fut(self, client): from google.cloud.bigtable.client import _make_operations_stub + return _make_operations_stub(client) def test_without_emulator(self): @@ -223,6 +226,7 @@ class Test__make_table_stub(unittest.TestCase): def _call_fut(self, client): from google.cloud.bigtable.client import _make_table_stub + return _make_table_stub(client) def test_without_emulator(self): @@ -289,6 +293,7 @@ class TestClient(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.client import Client + return Client def _make_one(self, *args, **kwargs): diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/unit_tests/test_cluster.py index 9730d9b5ab44..9472fde29f59 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/unit_tests/test_cluster.py @@ -28,6 +28,7 @@ class TestCluster(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.cluster import Cluster + return Cluster def _make_one(self, *args, **kwargs): @@ -35,6 +36,7 @@ def _make_one(self, *args, **kwargs): def test_constructor_defaults(self): from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) @@ -376,6 +378,7 @@ class Test__prepare_create_request(unittest.TestCase): def _call_fut(self, cluster): from google.cloud.bigtable.cluster import _prepare_create_request + return _prepare_create_request(cluster) def test_it(self): @@ -401,18 +404,21 @@ def test_it(self): def _ClusterPB(*args, **kw): from google.cloud.bigtable._generated import ( instance_pb2 as instance_v2_pb2) + return instance_v2_pb2.Cluster(*args, **kw) def _DeleteClusterRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.DeleteClusterRequest(*args, **kw) def _GetClusterRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) + return messages_v2_pb2.GetClusterRequest(*args, **kw) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index 90baf9193691..36f4c99c0032 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -21,6 +21,7 @@ class Test__timedelta_to_duration_pb(unittest.TestCase): def _call_fut(self, *args, **kwargs): from google.cloud.bigtable.column_family import ( _timedelta_to_duration_pb) + return _timedelta_to_duration_pb(*args, **kwargs) def test_it(self): @@ -67,6 +68,7 @@ class Test__duration_pb_to_timedelta(unittest.TestCase): def _call_fut(self, *args, **kwargs): from google.cloud.bigtable.column_family import ( _duration_pb_to_timedelta) + return _duration_pb_to_timedelta(*args, **kwargs) def test_it(self): @@ -88,6 +90,7 @@ class TestMaxVersionsGCRule(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import MaxVersionsGCRule + return MaxVersionsGCRule def _make_one(self, *args, **kwargs): @@ -122,6 +125,7 @@ class TestMaxAgeGCRule(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import MaxAgeGCRule + return MaxAgeGCRule def _make_one(self, *args, **kwargs): @@ -162,6 +166,7 @@ class TestGCRuleUnion(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleUnion + return GCRuleUnion def _make_one(self, *args, **kwargs): @@ -249,6 +254,7 @@ class TestGCRuleIntersection(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleIntersection + return GCRuleIntersection def _make_one(self, *args, **kwargs): @@ -339,6 +345,7 @@ class TestColumnFamily(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import ColumnFamily + return ColumnFamily def _make_one(self, *args, **kwargs): @@ -466,6 +473,7 @@ def test_create(self): def test_create_with_gc_rule(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule + gc_rule = MaxVersionsGCRule(1337) self._create_test_helper(gc_rule=gc_rule) @@ -524,6 +532,7 @@ def test_update(self): def test_update_with_gc_rule(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule + gc_rule = MaxVersionsGCRule(1337) self._update_test_helper(gc_rule=gc_rule) @@ -577,6 +586,7 @@ class Test__gc_rule_from_pb(unittest.TestCase): def _call_fut(self, *args, **kwargs): from google.cloud.bigtable.column_family import _gc_rule_from_pb + return _gc_rule_from_pb(*args, **kwargs) def test_empty(self): @@ -649,24 +659,28 @@ def WhichOneof(cls, name): def _GcRulePB(*args, **kw): from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule(*args, **kw) def _GcRuleIntersectionPB(*args, **kw): from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Intersection(*args, **kw) def _GcRuleUnionPB(*args, **kw): from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) + return table_v2_pb2.GcRule.Union(*args, **kw) def _ColumnFamilyPB(*args, **kw): from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index 84bc0e6346ea..c243ca77a135 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -33,6 +33,7 @@ class TestInstance(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.instance import Instance + return Instance def _make_one(self, *args, **kwargs): @@ -516,6 +517,7 @@ class Test__prepare_create_request(unittest.TestCase): def _call_fut(self, instance, **kw): from google.cloud.bigtable.instance import _prepare_create_request + return _prepare_create_request(instance, **kw) def test_w_defaults(self): @@ -551,6 +553,7 @@ def test_w_explicit_serve_nodes(self): from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) from google.cloud.bigtable.instance import Instance + DISPLAY_NAME = u'DISPLAY_NAME' SERVE_NODES = 5 client = _Client(self.PROJECT) @@ -582,6 +585,7 @@ def __init__(self, project): def copy(self): from copy import deepcopy + return deepcopy(self) def __eq__(self, other): diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/unit_tests/test_row.py index c3321a12eec1..60d53d5ccdf6 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row.py @@ -21,6 +21,7 @@ class Test_SetDeleteRow(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row import _SetDeleteRow + return _SetDeleteRow def _make_one(self, *args, **kwargs): @@ -37,6 +38,7 @@ class TestDirectRow(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row import DirectRow + return DirectRow def _make_one(self, *args, **kwargs): @@ -77,6 +79,7 @@ def _set_cell_helper(self, column=None, column_bytes=None, timestamp_micros=-1): import six import struct + row_key = b'row_key' column_family_id = u'column_family_id' if column is None: @@ -381,6 +384,7 @@ class TestConditionalRow(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row import ConditionalRow + return ConditionalRow def _make_one(self, *args, **kwargs): @@ -523,6 +527,7 @@ class TestAppendRow(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row import AppendRow + return AppendRow def _make_one(self, *args, **kwargs): @@ -667,10 +672,12 @@ class Test__parse_rmw_row_response(unittest.TestCase): def _call_fut(self, row_response): from google.cloud.bigtable.row import _parse_rmw_row_response + return _parse_rmw_row_response(row_response) def test_it(self): from google.cloud._helpers import _datetime_from_microseconds + col_fam1 = u'col-fam-id' col_fam2 = u'col-fam-id2' col_name1 = b'col-name1' @@ -752,10 +759,12 @@ class Test__parse_family_pb(unittest.TestCase): def _call_fut(self, family_pb): from google.cloud.bigtable.row import _parse_family_pb + return _parse_family_pb(family_pb) def test_it(self): from google.cloud._helpers import _datetime_from_microseconds + col_fam1 = u'col-fam-id' col_name1 = b'col-name1' col_name2 = b'col-name2' @@ -808,90 +817,105 @@ def test_it(self): def _CheckAndMutateRowRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowRequest(*args, **kw) def _CheckAndMutateRowResponsePB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) def _MutateRowRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.MutateRowRequest(*args, **kw) def _ReadModifyWriteRowRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw) def _ReadModifyWriteRowResponsePB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) def _CellPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Cell(*args, **kw) def _ColumnPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Column(*args, **kw) def _FamilyPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Family(*args, **kw) def _MutationPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation(*args, **kw) def _MutationSetCellPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.SetCell(*args, **kw) def _MutationDeleteFromColumnPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) def _MutationDeleteFromFamilyPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) def _MutationDeleteFromRowPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) def _RowPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.Row(*args, **kw) def _ReadModifyWriteRulePB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.ReadModifyWriteRule(*args, **kw) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index 8e5d72125f5d..8a2fb6a7e5e6 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -21,6 +21,7 @@ class TestCell(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_data import Cell + return Cell def _make_one(self, *args, **kwargs): @@ -97,6 +98,7 @@ class TestPartialRowData(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowData + return PartialRowData def _make_one(self, *args, **kwargs): @@ -189,6 +191,7 @@ class TestPartialRowsData(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowsData + return PartialRowsData def _getDoNothingClass(self): @@ -387,6 +390,7 @@ def test__save_row_no_cell(self): def test_invalid_last_scanned_row_key_on_start(self): from google.cloud.bigtable.row_data import InvalidReadRowsResponse + response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') iterator = _MockCancellableIterator(response) prd = self._make_one(iterator) @@ -404,6 +408,7 @@ def test_valid_last_scanned_row_key_on_start(self): def test_invalid_empty_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk + chunks = _generate_cell_chunks(['']) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) @@ -413,6 +418,7 @@ def test_invalid_empty_chunk(self): def test_invalid_empty_second_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk + chunks = _generate_cell_chunks(['', '']) first = chunks[0] first.row_key = b'RK' @@ -432,6 +438,7 @@ class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowsData + return PartialRowsData def _make_one(self, *args, **kwargs): @@ -439,6 +446,7 @@ def _make_one(self, *args, **kwargs): def _load_json_test(self, test_name): import os + if self.__class__._json_tests is None: dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'read-rows-acceptance-test.json') @@ -452,6 +460,7 @@ def _load_json_test(self, test_name): def _fail_during_consume(self, testcase_name): from google.cloud.bigtable.row_data import InvalidChunk + chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) @@ -504,6 +513,7 @@ def test_invalid_commit_with_chunk(self): def _sort_flattend_cells(self, flattened): import operator + key_func = operator.itemgetter('rk', 'fm', 'qual') return sorted(flattened, key=key_func) @@ -643,6 +653,7 @@ def _flatten_cells(prd): # Doesn't handle error cases. from google.cloud._helpers import _bytes_to_unicode from google.cloud._helpers import _microseconds_from_datetime + for row_key, row in prd.rows.items(): for family_name, family in row.cells.items(): for qualifier, column in family.items(): diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py index a49911acc0c3..b3715a1337ad 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_filters.py @@ -21,6 +21,7 @@ class Test_BoolFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _BoolFilter + return _BoolFilter def _make_one(self, *args, **kwargs): @@ -56,6 +57,7 @@ class TestSinkFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import SinkFilter + return SinkFilter def _make_one(self, *args, **kwargs): @@ -74,6 +76,7 @@ class TestPassAllFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import PassAllFilter + return PassAllFilter def _make_one(self, *args, **kwargs): @@ -92,6 +95,7 @@ class TestBlockAllFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import BlockAllFilter + return BlockAllFilter def _make_one(self, *args, **kwargs): @@ -110,6 +114,7 @@ class Test_RegexFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _RegexFilter + return _RegexFilter def _make_one(self, *args, **kwargs): @@ -150,6 +155,7 @@ class TestRowKeyRegexFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowKeyRegexFilter + return RowKeyRegexFilter def _make_one(self, *args, **kwargs): @@ -168,6 +174,7 @@ class TestRowSampleFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowSampleFilter + return RowSampleFilter def _make_one(self, *args, **kwargs): @@ -203,6 +210,7 @@ class TestFamilyNameRegexFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import FamilyNameRegexFilter + return FamilyNameRegexFilter def _make_one(self, *args, **kwargs): @@ -222,6 +230,7 @@ class TestColumnQualifierRegexFilter(unittest.TestCase): def _get_target_class(): from google.cloud.bigtable.row_filters import ( ColumnQualifierRegexFilter) + return ColumnQualifierRegexFilter def _make_one(self, *args, **kwargs): @@ -241,6 +250,7 @@ class TestTimestampRange(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRange + return TimestampRange def _make_one(self, *args, **kwargs): @@ -278,6 +288,7 @@ def test___ne__same_value(self): def _to_pb_helper(self, start_micros=None, end_micros=None): import datetime from google.cloud._helpers import _EPOCH + pb_kwargs = {} start = None @@ -316,6 +327,7 @@ class TestTimestampRangeFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRangeFilter + return TimestampRangeFilter def _make_one(self, *args, **kwargs): @@ -354,6 +366,7 @@ class TestColumnRangeFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ColumnRangeFilter + return ColumnRangeFilter def _make_one(self, *args, **kwargs): @@ -479,6 +492,7 @@ class TestValueRegexFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRegexFilter + return ValueRegexFilter def _make_one(self, *args, **kwargs): @@ -497,6 +511,7 @@ class TestValueRangeFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRangeFilter + return ValueRangeFilter def _make_one(self, *args, **kwargs): @@ -589,6 +604,7 @@ class Test_CellCountFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _CellCountFilter + return _CellCountFilter def _make_one(self, *args, **kwargs): @@ -624,6 +640,7 @@ class TestCellsRowOffsetFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + return CellsRowOffsetFilter def _make_one(self, *args, **kwargs): @@ -643,6 +660,7 @@ class TestCellsRowLimitFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowLimitFilter + return CellsRowLimitFilter def _make_one(self, *args, **kwargs): @@ -662,6 +680,7 @@ class TestCellsColumnLimitFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import CellsColumnLimitFilter + return CellsColumnLimitFilter def _make_one(self, *args, **kwargs): @@ -682,6 +701,7 @@ class TestStripValueTransformerFilter(unittest.TestCase): def _get_target_class(): from google.cloud.bigtable.row_filters import ( StripValueTransformerFilter) + return StripValueTransformerFilter def _make_one(self, *args, **kwargs): @@ -700,6 +720,7 @@ class TestApplyLabelFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ApplyLabelFilter + return ApplyLabelFilter def _make_one(self, *args, **kwargs): @@ -735,6 +756,7 @@ class Test_FilterCombination(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _FilterCombination + return _FilterCombination def _make_one(self, *args, **kwargs): @@ -767,6 +789,7 @@ class TestRowFilterChain(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterChain + return RowFilterChain def _make_one(self, *args, **kwargs): @@ -824,6 +847,7 @@ class TestRowFilterUnion(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterUnion + return RowFilterUnion def _make_one(self, *args, **kwargs): @@ -881,6 +905,7 @@ class TestConditionalRowFilter(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ConditionalRowFilter + return ConditionalRowFilter def _make_one(self, *args, **kwargs): @@ -995,40 +1020,47 @@ def test_to_pb_false_only(self): def _ColumnRangePB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.ColumnRange(*args, **kw) def _RowFilterPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter(*args, **kw) def _RowFilterChainPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Chain(*args, **kw) def _RowFilterConditionPB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Condition(*args, **kw) def _RowFilterInterleavePB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.RowFilter.Interleave(*args, **kw) def _TimestampRangePB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.TimestampRange(*args, **kw) def _ValueRangePB(*args, **kw): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) + return data_v2_pb2.ValueRange(*args, **kw) diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/unit_tests/test_table.py index f96ddcc8e704..4ad6afe1596f 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/unit_tests/test_table.py @@ -32,6 +32,7 @@ class TestTable(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.table import Table + return Table def _make_one(self, *args, **kwargs): @@ -432,6 +433,7 @@ class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, filter_=None, limit=None): from google.cloud.bigtable.table import _create_row_request + return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit) @@ -486,6 +488,7 @@ def test_row_range_both_keys(self): def test_with_filter(self): from google.cloud.bigtable.row_filters import RowSampleFilter + table_name = 'table_name' row_filter = RowSampleFilter(0.33) result = self._call_fut(table_name, filter_=row_filter) @@ -509,36 +512,42 @@ def test_with_limit(self): def _CreateTableRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest(*args, **kw) def _CreateTableRequestSplitPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.CreateTableRequest.Split(*args, **kw) def _DeleteTableRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.DeleteTableRequest(*args, **kw) def _GetTableRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) + return table_admin_v2_pb2.GetTableRequest(*args, **kw) def _ReadRowsRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsRequest(*args, **kw) def _ReadRowsResponseCellChunkPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + family_name = kw.pop('family_name') qualifier = kw.pop('qualifier') message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) @@ -550,24 +559,28 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): def _ReadRowsResponsePB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.ReadRowsResponse(*args, **kw) def _SampleRowKeysRequestPB(*args, **kw): from google.cloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) + return messages_v2_pb2.SampleRowKeysRequest(*args, **kw) def _TablePB(*args, **kw): from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) + return table_v2_pb2.Table(*args, **kw) def _ColumnFamilyPB(*args, **kw): from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) + return table_v2_pb2.ColumnFamily(*args, **kw) From f94f6161967a1111aaa4477f52bffb046a8dc264 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 20 Jan 2017 11:08:42 -0800 Subject: [PATCH 035/892] Moving Bigtable helpers for duration protobufs into core. --- .../google/cloud/bigtable/column_family.py | 55 +-------------- .../unit_tests/test_column_family.py | 69 ------------------- 2 files changed, 3 insertions(+), 121 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index cb7dcdc4ff60..c34e75ed2c1f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -15,62 +15,13 @@ """User friendly container for Google Cloud Bigtable Column Family.""" -import datetime - -from google.protobuf import duration_pb2 - +from google.cloud import _helpers from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) -def _timedelta_to_duration_pb(timedelta_val): - """Convert a Python timedelta object to a duration protobuf. - - .. note:: - - The Python timedelta has a granularity of microseconds while - the protobuf duration type has a duration of nanoseconds. - - :type timedelta_val: :class:`datetime.timedelta` - :param timedelta_val: A timedelta object. - - :rtype: :class:`google.protobuf.duration_pb2.Duration` - :returns: A duration object equivalent to the time delta. - """ - seconds_decimal = timedelta_val.total_seconds() - # Truncate the parts other than the integer. - seconds = int(seconds_decimal) - if seconds_decimal < 0: - signed_micros = timedelta_val.microseconds - 10**6 - else: - signed_micros = timedelta_val.microseconds - # Convert nanoseconds to microseconds. - nanos = 1000 * signed_micros - return duration_pb2.Duration(seconds=seconds, nanos=nanos) - - -def _duration_pb_to_timedelta(duration_pb): - """Convert a duration protobuf to a Python timedelta object. - - .. note:: - - The Python timedelta has a granularity of microseconds while - the protobuf duration type has a duration of nanoseconds. - - :type duration_pb: :class:`google.protobuf.duration_pb2.Duration` - :param duration_pb: A protobuf duration object. - - :rtype: :class:`datetime.timedelta` - :returns: The converted timedelta object. - """ - return datetime.timedelta( - seconds=duration_pb.seconds, - microseconds=(duration_pb.nanos / 1000.0), - ) - - class GarbageCollectionRule(object): """Garbage collection rule for column families within a table. @@ -137,7 +88,7 @@ def to_pb(self): :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - max_age = _timedelta_to_duration_pb(self.max_age) + max_age = _helpers._timedelta_to_duration_pb(self.max_age) return table_v2_pb2.GcRule(max_age=max_age) @@ -325,7 +276,7 @@ def _gc_rule_from_pb(gc_rule_pb): if rule_name == 'max_num_versions': return MaxVersionsGCRule(gc_rule_pb.max_num_versions) elif rule_name == 'max_age': - max_age = _duration_pb_to_timedelta(gc_rule_pb.max_age) + max_age = _helpers._duration_pb_to_timedelta(gc_rule_pb.max_age) return MaxAgeGCRule(max_age) elif rule_name == 'union': return GCRuleUnion([_gc_rule_from_pb(rule) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/unit_tests/test_column_family.py index 36f4c99c0032..126a18da3003 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/unit_tests/test_column_family.py @@ -16,75 +16,6 @@ import unittest -class Test__timedelta_to_duration_pb(unittest.TestCase): - - def _call_fut(self, *args, **kwargs): - from google.cloud.bigtable.column_family import ( - _timedelta_to_duration_pb) - - return _timedelta_to_duration_pb(*args, **kwargs) - - def test_it(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = microseconds = 1 - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._call_fut(timedelta_val) - self.assertIsInstance(result, duration_pb2.Duration) - self.assertEqual(result.seconds, seconds) - self.assertEqual(result.nanos, 1000 * microseconds) - - def test_with_negative_microseconds(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = 1 - microseconds = -5 - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._call_fut(timedelta_val) - self.assertIsInstance(result, duration_pb2.Duration) - self.assertEqual(result.seconds, seconds - 1) - self.assertEqual(result.nanos, 10**9 + 1000 * microseconds) - - def test_with_negative_seconds(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = -1 - microseconds = 5 - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._call_fut(timedelta_val) - self.assertIsInstance(result, duration_pb2.Duration) - self.assertEqual(result.seconds, seconds + 1) - self.assertEqual(result.nanos, -(10**9 - 1000 * microseconds)) - - -class Test__duration_pb_to_timedelta(unittest.TestCase): - - def _call_fut(self, *args, **kwargs): - from google.cloud.bigtable.column_family import ( - _duration_pb_to_timedelta) - - return _duration_pb_to_timedelta(*args, **kwargs) - - def test_it(self): - import datetime - from google.protobuf import duration_pb2 - - seconds = microseconds = 1 - duration_pb = duration_pb2.Duration(seconds=seconds, - nanos=1000 * microseconds) - timedelta_val = datetime.timedelta(seconds=seconds, - microseconds=microseconds) - result = self._call_fut(duration_pb) - self.assertIsInstance(result, datetime.timedelta) - self.assertEqual(result, timedelta_val) - - class TestMaxVersionsGCRule(unittest.TestCase): @staticmethod From 73cc800a27efdbacd7114f7900e27ee910c8fa9b Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Tue, 31 Jan 2017 09:17:12 -0500 Subject: [PATCH 036/892] Updates for pycodestyle. (#2973) --- packages/google-cloud-bigtable/unit_tests/test_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/unit_tests/test_instance.py index c243ca77a135..aefba45d9158 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/unit_tests/test_instance.py @@ -254,8 +254,8 @@ def test_create(self): metadata=Any( type_url=type_url, value=metadata.SerializeToString(), - ) ) + ) # Patch the stub used by the API method. client._instance_stub = stub = _FakeStub(response_pb) From 4628c66bfeb21252fcf091c5907b9d59d26c5ab6 Mon Sep 17 00:00:00 2001 From: Gary Elliott Date: Thu, 9 Feb 2017 10:21:45 -0500 Subject: [PATCH 037/892] Remove invalid chunk validation Fixes #2980 --- .../google-cloud-bigtable/google/cloud/bigtable/row_data.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index f293c93d3c43..60fc1f0ef1e8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -366,8 +366,6 @@ def _validate_chunk_row_in_progress(self, chunk): """Helper for :meth:`_validate_chunk`""" assert self.state == self.ROW_IN_PROGRESS self._validate_chunk_status(chunk) - if not chunk.HasField('commit_row') and not chunk.reset_row: - _raise_if(not chunk.timestamp_micros or not chunk.value) _raise_if(chunk.row_key and chunk.row_key != self._row.row_key) _raise_if(chunk.HasField('family_name') and From fc6a8daa75bdd91b9f7d0e08ac721fa3631fbe7d Mon Sep 17 00:00:00 2001 From: Gary Elliott Date: Mon, 13 Feb 2017 15:49:03 -0500 Subject: [PATCH 038/892] remove invalid test case --- .../unit_tests/test_row_data.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index 8a2fb6a7e5e6..b7fd9eec80b9 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -416,21 +416,6 @@ def test_invalid_empty_chunk(self): with self.assertRaises(InvalidChunk): prd.consume_next() - def test_invalid_empty_second_chunk(self): - from google.cloud.bigtable.row_data import InvalidChunk - - chunks = _generate_cell_chunks(['', '']) - first = chunks[0] - first.row_key = b'RK' - first.family_name.value = 'A' - first.qualifier.value = b'C' - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - prd = self._make_one(iterator) - with self.assertRaises(InvalidChunk): - prd.consume_next() - - class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): _json_tests = None From 404277e896712cc06f75fd4cde6f855e56cda780 Mon Sep 17 00:00:00 2001 From: Gary Elliott Date: Mon, 13 Feb 2017 15:59:00 -0500 Subject: [PATCH 039/892] fix spacing --- packages/google-cloud-bigtable/unit_tests/test_row_data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index b7fd9eec80b9..7e993bca5d0d 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -416,6 +416,7 @@ def test_invalid_empty_chunk(self): with self.assertRaises(InvalidChunk): prd.consume_next() + class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): _json_tests = None From 943f0e8ddc052d8a3e2bae6032166afb7171a68f Mon Sep 17 00:00:00 2001 From: Gary Elliott Date: Tue, 14 Feb 2017 09:54:35 -0500 Subject: [PATCH 040/892] update whitespace --- packages/google-cloud-bigtable/unit_tests/test_row_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index 7e993bca5d0d..cc90ced67dcc 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -416,7 +416,7 @@ def test_invalid_empty_chunk(self): with self.assertRaises(InvalidChunk): prd.consume_next() - + class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): _json_tests = None From 064bd137acdddf9b388dfc1dea5135d2299d5f11 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 14 Feb 2017 12:25:25 -0500 Subject: [PATCH 041/892] Prep spanner release. --- .../google/cloud/bigtable/client.py | 17 ++++++++++------- .../google/cloud/bigtable/cluster.py | 4 ++-- .../google/cloud/bigtable/table.py | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 38a103c7c005..06b35c6d9e94 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -18,12 +18,14 @@ In the hierarchy of API concepts -* a :class:`Client` owns an :class:`.Instance` -* an :class:`.Instance` owns a :class:`~google.cloud.bigtable.table.Table` +* a :class:`~google.cloud.bigtable.client.Client` owns an + :class:`~google.cloud.bigtable.instance.Instance` +* an :class:`~google.cloud.bigtable.instance.Instance` owns a + :class:`~google.cloud.bigtable.table.Table` * a :class:`~google.cloud.bigtable.table.Table` owns a :class:`~.column_family.ColumnFamily` -* a :class:`~google.cloud.bigtable.table.Table` owns a :class:`~.row.Row` - (and all the cells in the row) +* a :class:`~google.cloud.bigtable.table.Table` owns a + :class:`~google.cloud.bigtable.row.Row` (and all the cells in the row) """ @@ -342,7 +344,7 @@ def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, :param serve_nodes: (Optional) The number of nodes in the instance's cluster; used to set up the instance's cluster. - :rtype: :class:`.Instance` + :rtype: :class:`~google.cloud.bigtable.instance.Instance` :returns: an instance owned by this client. """ return Instance(instance_id, self, location, @@ -353,8 +355,9 @@ def list_instances(self): :rtype: tuple :returns: A pair of results, the first is a list of - :class:`.Instance` objects returned and the second is a - list of strings (the failed locations in the request). + :class:`~google.cloud.bigtable.instance.Instance` objects + returned and the second is a list of strings (the failed + locations in the request). """ request_pb = bigtable_instance_admin_pb2.ListInstancesRequest( parent=self.project_name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index c2418576dde9..80b9068958db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -72,7 +72,7 @@ class Cluster(object): :type cluster_id: str :param cluster_id: The ID of the cluster. - :type instance: :class:`.instance.Instance` + :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance where the cluster resides. :type serve_nodes: int @@ -104,7 +104,7 @@ def from_pb(cls, cluster_pb, instance): :type cluster_pb: :class:`instance_pb2.Cluster` :param cluster_pb: A cluster protobuf object. - :type instance: :class:`.instance.Instance>` + :type instance: :class:`~google.cloud.bigtable.instance.Instance>` :param instance: The instance that owns the cluster. :rtype: :class:`Cluster` diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index f2120ddc5416..3fbd198d6b65 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -49,7 +49,7 @@ class Table(object): :type table_id: str :param table_id: The ID of the table. - :type instance: :class:`Instance <.instance.Instance>` + :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the table. """ From 77b01d4715c6d4727962a49e139b8a4d394a3188 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Wed, 15 Feb 2017 15:34:59 -0500 Subject: [PATCH 042/892] Update Beta classifiers to Alpha for specified services. --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 08cd2eb7d6b4..7def41afac93 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -35,7 +35,7 @@ 'include_package_data': True, 'zip_safe': False, 'classifiers': [ - 'Development Status :: 4 - Beta', + 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', From 9688b3359e66519b7b73a8f0bf747ad216e1f4d1 Mon Sep 17 00:00:00 2001 From: Thomas Schultz Date: Thu, 16 Feb 2017 14:19:53 -0500 Subject: [PATCH 043/892] Update core dependency to google-cloud-core >= 0.23.0, < 0.24dev. (#3028) * Update core dependency to google-cloud-core >= 0.23.0, < 0.24dev. --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 7def41afac93..fc35e8da83b1 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -50,7 +50,7 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.22.1, < 0.23dev', + 'google-cloud-core >= 0.23.0, < 0.24dev', 'grpcio >= 1.0.2, < 2.0dev', ] From 3a4dcd3ebaffb87bb3103083e306e792b481ecf8 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 24 Feb 2017 11:30:18 -0800 Subject: [PATCH 044/892] Upgrading all versions for umbrella release. --- packages/google-cloud-bigtable/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index fc35e8da83b1..06db1ac10ce1 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -50,13 +50,13 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.23.0, < 0.24dev', + 'google-cloud-core >= 0.23.1, < 0.24dev', 'grpcio >= 1.0.2, < 2.0dev', ] setup( name='google-cloud-bigtable', - version='0.22.0', + version='0.23.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From 56bd7d190c47a22cbee0608bf8efcf23a7b84353 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 2 Mar 2017 16:54:21 -0800 Subject: [PATCH 045/892] Sending x-goog-api-client header in Bigtable. We missed this because it does not use GAPIC (the most recent generated Bigtable GAPIC surface is 5+ months old). --- .../google/cloud/bigtable/__init__.py | 3 + .../google/cloud/bigtable/client.py | 33 +++-- packages/google-cloud-bigtable/setup.py | 2 +- .../unit_tests/test_client.py | 139 ++++++------------ 4 files changed, 74 insertions(+), 103 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index c22cb3fc5379..0c815b8b0988 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -15,4 +15,7 @@ """Google Cloud Bigtable API package.""" +from pkg_resources import get_distribution +__version__ = get_distribution('google-cloud-bigtable').version + from google.cloud.bigtable.client import Client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 06b35c6d9e94..2f552b1c2564 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -32,21 +32,24 @@ import os import google.auth.credentials +from google.gax.utils import metrics from google.longrunning import operations_grpc from google.cloud._helpers import make_insecure_stub from google.cloud._helpers import make_secure_stub +from google.cloud._http import DEFAULT_USER_AGENT +from google.cloud.client import _ClientFactoryMixin +from google.cloud.client import _ClientProjectMixin +from google.cloud.credentials import get_credentials +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from google.cloud.bigtable import __version__ from google.cloud.bigtable._generated import bigtable_instance_admin_pb2 from google.cloud.bigtable._generated import bigtable_pb2 from google.cloud.bigtable._generated import bigtable_table_admin_pb2 from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID -from google.cloud.client import _ClientFactoryMixin -from google.cloud.client import _ClientProjectMixin -from google.cloud._http import DEFAULT_USER_AGENT -from google.cloud.credentials import get_credentials -from google.cloud.environment_vars import BIGTABLE_EMULATOR TABLE_ADMIN_HOST = 'bigtableadmin.googleapis.com' @@ -67,10 +70,17 @@ READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly' """Scope for reading table data.""" +_METRICS_HEADERS = ( + ('gccl', __version__), +) +_HEADER_STR = metrics.stringify(metrics.fill(_METRICS_HEADERS)) +_GRPC_EXTRA_OPTIONS = ( + ('x-goog-api-client', _HEADER_STR), +) # NOTE: 'grpc.max_message_length' will no longer be recognized in # grpcio 1.1 and later. _MAX_MSG_LENGTH_100MB = 100 * 1024 * 1024 -_GRPC_MAX_LENGTH_OPTIONS = ( +_GRPC_MAX_LENGTH_OPTIONS = _GRPC_EXTRA_OPTIONS + ( ('grpc.max_message_length', _MAX_MSG_LENGTH_100MB), ('grpc.max_receive_message_length', _MAX_MSG_LENGTH_100MB), ) @@ -107,7 +117,7 @@ def _make_instance_stub(client): return make_secure_stub( client.credentials, client.user_agent, bigtable_instance_admin_pb2.BigtableInstanceAdminStub, - INSTANCE_ADMIN_HOST) + INSTANCE_ADMIN_HOST, extra_options=_GRPC_EXTRA_OPTIONS) else: return make_insecure_stub( bigtable_instance_admin_pb2.BigtableInstanceAdminStub, @@ -127,9 +137,10 @@ def _make_operations_stub(client): :returns: A gRPC stub object. """ if client.emulator_host is None: - return make_secure_stub(client.credentials, client.user_agent, - operations_grpc.OperationsStub, - OPERATIONS_API_HOST) + return make_secure_stub( + client.credentials, client.user_agent, + operations_grpc.OperationsStub, + OPERATIONS_API_HOST, extra_options=_GRPC_EXTRA_OPTIONS) else: return make_insecure_stub(operations_grpc.OperationsStub, client.emulator_host) @@ -148,7 +159,7 @@ def _make_table_stub(client): return make_secure_stub( client.credentials, client.user_agent, bigtable_table_admin_pb2.BigtableTableAdminStub, - TABLE_ADMIN_HOST) + TABLE_ADMIN_HOST, extra_options=_GRPC_EXTRA_OPTIONS) else: return make_insecure_stub( bigtable_table_admin_pb2.BigtableTableAdminStub, diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 06db1ac10ce1..25e12f76b011 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -51,7 +51,7 @@ REQUIREMENTS = [ 'google-cloud-core >= 0.23.1, < 0.24dev', - 'grpcio >= 1.0.2, < 2.0dev', + 'google-gax>=0.15.7, <0.16dev', ] setup( diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/unit_tests/test_client.py index b7b666e819df..8761dc360c81 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/unit_tests/test_client.py @@ -36,39 +36,24 @@ def _call_fut(self, client): return _make_data_stub(client) - def test_without_emulator(self): - from google.cloud._testing import _Monkey + @mock.patch('google.cloud.bigtable.client.make_secure_stub', + return_value=mock.sentinel.stub) + def test_without_emulator(self, make_stub): from google.cloud.bigtable import client as MUT credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) - fake_stub = object() - make_secure_stub_args = [] - - def mock_make_secure_stub(*args, **kwargs): - make_secure_stub_args.append(args) - make_secure_stub_args.append(kwargs) - return fake_stub - - with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._call_fut(client) - - extra_options = {'extra_options': ( - ('grpc.max_message_length', 104857600), - ('grpc.max_receive_message_length', 104857600) - )} - self.assertIs(result, fake_stub) - self.assertEqual(make_secure_stub_args, [ - ( - client.credentials, - client.user_agent, - MUT.bigtable_pb2.BigtableStub, - MUT.DATA_API_HOST, - ), - extra_options, - ]) + result = self._call_fut(client) + self.assertIs(result, mock.sentinel.stub) + make_stub.assert_called_once_with( + client.credentials, + client.user_agent, + MUT.bigtable_pb2.BigtableStub, + MUT.DATA_API_HOST, + extra_options=MUT._GRPC_MAX_LENGTH_OPTIONS, + ) def test_with_emulator(self): from google.cloud._testing import _Monkey @@ -103,33 +88,24 @@ def _call_fut(self, client): return _make_instance_stub(client) - def test_without_emulator(self): - from google.cloud._testing import _Monkey + @mock.patch('google.cloud.bigtable.client.make_secure_stub', + return_value=mock.sentinel.stub) + def test_without_emulator(self, make_stub): from google.cloud.bigtable import client as MUT credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) - fake_stub = object() - make_secure_stub_args = [] - - def mock_make_secure_stub(*args): - make_secure_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._call_fut(client) - - self.assertIs(result, fake_stub) - self.assertEqual(make_secure_stub_args, [ - ( - client.credentials, - client.user_agent, - MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub, - MUT.INSTANCE_ADMIN_HOST, - ), - ]) + result = self._call_fut(client) + self.assertIs(result, mock.sentinel.stub) + make_stub.assert_called_once_with( + client.credentials, + client.user_agent, + MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub, + MUT.INSTANCE_ADMIN_HOST, + extra_options=MUT._GRPC_EXTRA_OPTIONS, + ) def test_with_emulator(self): from google.cloud._testing import _Monkey @@ -164,35 +140,25 @@ def _call_fut(self, client): return _make_operations_stub(client) - def test_without_emulator(self): + @mock.patch('google.cloud.bigtable.client.make_secure_stub', + return_value=mock.sentinel.stub) + def test_without_emulator(self, make_stub): from google.longrunning import operations_grpc - - from google.cloud._testing import _Monkey from google.cloud.bigtable import client as MUT credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) - fake_stub = object() - make_secure_stub_args = [] - - def mock_make_secure_stub(*args): - make_secure_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._call_fut(client) - - self.assertIs(result, fake_stub) - self.assertEqual(make_secure_stub_args, [ - ( - client.credentials, - client.user_agent, - operations_grpc.OperationsStub, - MUT.OPERATIONS_API_HOST, - ), - ]) + result = self._call_fut(client) + self.assertIs(result, mock.sentinel.stub) + make_stub.assert_called_once_with( + client.credentials, + client.user_agent, + operations_grpc.OperationsStub, + MUT.OPERATIONS_API_HOST, + extra_options=MUT._GRPC_EXTRA_OPTIONS, + ) def test_with_emulator(self): from google.longrunning import operations_grpc @@ -229,33 +195,24 @@ def _call_fut(self, client): return _make_table_stub(client) - def test_without_emulator(self): - from google.cloud._testing import _Monkey + @mock.patch('google.cloud.bigtable.client.make_secure_stub', + return_value=mock.sentinel.stub) + def test_without_emulator(self, make_stub): from google.cloud.bigtable import client as MUT credentials = _make_credentials() user_agent = 'you-sir-age-int' client = _Client(credentials, user_agent) - fake_stub = object() - make_secure_stub_args = [] - - def mock_make_secure_stub(*args): - make_secure_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, make_secure_stub=mock_make_secure_stub): - result = self._call_fut(client) - - self.assertIs(result, fake_stub) - self.assertEqual(make_secure_stub_args, [ - ( - client.credentials, - client.user_agent, - MUT.bigtable_table_admin_pb2.BigtableTableAdminStub, - MUT.TABLE_ADMIN_HOST, - ), - ]) + result = self._call_fut(client) + self.assertIs(result, mock.sentinel.stub) + make_stub.assert_called_once_with( + client.credentials, + client.user_agent, + MUT.bigtable_table_admin_pb2.BigtableTableAdminStub, + MUT.TABLE_ADMIN_HOST, + extra_options=MUT._GRPC_EXTRA_OPTIONS, + ) def test_with_emulator(self): from google.cloud._testing import _Monkey From 5f450c834836c3aa3a6ba16e25cfd26f0351bf42 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 3 Mar 2017 11:18:05 -0800 Subject: [PATCH 046/892] Cut version 0.23.1 of Bigtable. --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 25e12f76b011..a7013bc4259c 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -56,7 +56,7 @@ setup( name='google-cloud-bigtable', - version='0.23.0', + version='0.23.1', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From f76922c0e288d595c3a85b013c98e916d8c8b224 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 9 Mar 2017 09:41:49 -0800 Subject: [PATCH 047/892] Removing _Dummy test helper classes (in favor of mocks). --- .../google-cloud-bigtable/unit_tests/test_row_data.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/unit_tests/test_row_data.py index cc90ced67dcc..eed5e77c5630 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row_data.py +++ b/packages/google-cloud-bigtable/unit_tests/test_row_data.py @@ -15,6 +15,8 @@ import unittest +import mock + class TestCell(unittest.TestCase): @@ -383,7 +385,7 @@ def test__copy_from_previous_filled(self): def test__save_row_no_cell(self): ROW_KEY = 'RK' prd = self._make_one([]) - row = prd._row = _Dummy(row_key=ROW_KEY) + row = prd._row = mock.Mock(row_key=ROW_KEY, spec=['row_key']) prd._cell = None prd._save_current_row() self.assertIs(prd._rows[ROW_KEY], row) @@ -672,12 +674,6 @@ def __next__(self): # pragma: NO COVER Py3k return self.next() -class _Dummy(object): - - def __init__(self, **kw): - self.__dict__.update(kw) - - class _PartialCellData(object): row_key = '' From 771b0988d77bb844f0e77ecb48c25d1c59dd4543 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 23 Mar 2017 14:49:26 -0700 Subject: [PATCH 048/892] CI Rehash (#3146) --- packages/google-cloud-bigtable/.flake8 | 11 + packages/google-cloud-bigtable/LICENSE | 202 ++++++++ packages/google-cloud-bigtable/MANIFEST.in | 8 +- .../google/cloud/bigtable/__init__.py | 3 + packages/google-cloud-bigtable/nox.py | 86 +++ packages/google-cloud-bigtable/setup.py | 2 +- .../google-cloud-bigtable/tests/__init__.py | 0 .../google-cloud-bigtable/tests/system.py | 490 ++++++++++++++++++ .../{unit_tests => tests/unit}/__init__.py | 0 .../{unit_tests => tests/unit}/_testing.py | 0 .../unit}/read-rows-acceptance-test.json | 0 .../{unit_tests => tests/unit}/test_client.py | 2 +- .../unit}/test_cluster.py | 8 +- .../unit}/test_column_family.py | 6 +- .../unit}/test_instance.py | 14 +- .../{unit_tests => tests/unit}/test_row.py | 12 +- .../unit}/test_row_data.py | 0 .../unit}/test_row_filters.py | 0 .../{unit_tests => tests/unit}/test_table.py | 12 +- packages/google-cloud-bigtable/tox.ini | 35 -- 20 files changed, 824 insertions(+), 67 deletions(-) create mode 100644 packages/google-cloud-bigtable/.flake8 create mode 100644 packages/google-cloud-bigtable/LICENSE create mode 100644 packages/google-cloud-bigtable/nox.py create mode 100644 packages/google-cloud-bigtable/tests/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/system.py rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/__init__.py (100%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/_testing.py (100%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/read-rows-acceptance-test.json (100%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_client.py (99%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_cluster.py (98%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_column_family.py (99%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_instance.py (98%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_row.py (99%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_row_data.py (100%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_row_filters.py (100%) rename packages/google-cloud-bigtable/{unit_tests => tests/unit}/test_table.py (98%) delete mode 100644 packages/google-cloud-bigtable/tox.ini diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 new file mode 100644 index 000000000000..5a380e53da40 --- /dev/null +++ b/packages/google-cloud-bigtable/.flake8 @@ -0,0 +1,11 @@ +[flake8] +exclude = + # BigTable includes generated code in the manual layer; + # do not lint this. + google/cloud/bigtable/_generated/*.py, + + # Standard linting exemptions. + __pycache__, + .git, + *.pyc, + conf.py diff --git a/packages/google-cloud-bigtable/LICENSE b/packages/google-cloud-bigtable/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/packages/google-cloud-bigtable/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index cb3a2b9ef4fa..9f7100c9528a 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -1,4 +1,4 @@ -include README.rst -graft google -graft unit_tests -global-exclude *.pyc +include README.rst LICENSE +recursive-include google *.json *.proto +recursive-include unit_tests * +global-exclude *.pyc __pycache__ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index 0c815b8b0988..2886f24b67a1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -19,3 +19,6 @@ __version__ = get_distribution('google-cloud-bigtable').version from google.cloud.bigtable.client import Client + + +__all__ = ['__version__', 'Client'] diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py new file mode 100644 index 000000000000..eb8fe574c0c0 --- /dev/null +++ b/packages/google-cloud-bigtable/nox.py @@ -0,0 +1,86 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import os + +import nox + + +@nox.session +@nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6']) +def unit_tests(session, python_version): + """Run the unit test suite.""" + + # Run unit tests against all supported versions of Python. + session.interpreter = 'python%s' % python_version + + # Install all test dependencies, then install this package in-place. + session.install('mock', 'pytest', 'pytest-cov', '../core/') + session.install('-e', '.') + + # Run py.test against the unit tests. + session.run('py.test', '--quiet', + '--cov=google.cloud.bigtable', '--cov=tests.unit', '--cov-append', + '--cov-config=.coveragerc', '--cov-report=', '--cov-fail-under=97', + 'tests/unit', + ) + + +@nox.session +@nox.parametrize('python_version', ['2.7', '3.6']) +def system_tests(session, python_version): + """Run the system test suite.""" + + # Sanity check: Only run system tests if the environment variable is set. + if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): + return + + # Run the system tests against latest Python 2 and Python 3 only. + session.interpreter = 'python%s' % python_version + + # Install all test dependencies, then install this package into the + # virutalenv's dist-packages. + session.install('mock', 'pytest', + '../core/', '../test_utils/') + session.install('.') + + # Run py.test against the system tests. + session.run('py.test', '--quiet', 'tests/system.py') + + +@nox.session +def lint(session): + """Run flake8. + + Returns a failure if flake8 finds linting errors or sufficiently + serious code quality issues. + """ + session.interpreter = 'python3.6' + session.install('flake8') + session.install('.') + session.run('flake8', 'google/cloud/bigtable') + + +@nox.session +def cover(session): + """Run the final coverage report. + + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.interpreter = 'python3.6' + session.install('coverage', 'pytest-cov') + session.run('coverage', 'report', '--show-missing', '--fail-under=100') + session.run('coverage', 'erase') diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index a7013bc4259c..b4fa4483490a 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -63,7 +63,7 @@ 'google', 'google.cloud', ], - packages=find_packages(), + packages=find_packages(exclude=('unit_tests*',)), install_requires=REQUIREMENTS, **SETUP_BASE ) diff --git a/packages/google-cloud-bigtable/tests/__init__.py b/packages/google-cloud-bigtable/tests/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py new file mode 100644 index 000000000000..faed85fdb302 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system.py @@ -0,0 +1,490 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import operator +import os + +import unittest + +from google.cloud._helpers import _datetime_from_microseconds +from google.cloud._helpers import _microseconds_from_datetime +from google.cloud._helpers import UTC +from google.cloud.bigtable.client import Client +from google.cloud.bigtable.column_family import MaxVersionsGCRule +from google.cloud.bigtable.row_filters import ApplyLabelFilter +from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter +from google.cloud.bigtable.row_filters import RowFilterChain +from google.cloud.bigtable.row_filters import RowFilterUnion +from google.cloud.bigtable.row_data import Cell +from google.cloud.bigtable.row_data import PartialRowData +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from test_utils.retry import RetryErrors +from test_utils.retry import RetryResult +from test_utils.system import EmulatorCreds +from test_utils.system import unique_resource_id + + +LOCATION_ID = 'us-central1-c' +INSTANCE_ID = 'g-c-p' + unique_resource_id('-') +TABLE_ID = 'google-cloud-python-test-table' +COLUMN_FAMILY_ID1 = u'col-fam-id1' +COLUMN_FAMILY_ID2 = u'col-fam-id2' +COL_NAME1 = b'col-name1' +COL_NAME2 = b'col-name2' +COL_NAME3 = b'col-name3-but-other-fam' +CELL_VAL1 = b'cell-val' +CELL_VAL2 = b'cell-val-newer' +CELL_VAL3 = b'altcol-cell-val' +CELL_VAL4 = b'foo' +ROW_KEY = b'row-key' +ROW_KEY_ALT = b'row-key-alt' +EXISTING_INSTANCES = [] + + +class Config(object): + """Run-time configuration to be modified at set-up. + + This is a mutable stand-in to allow test set-up to modify + global state. + """ + CLIENT = None + INSTANCE = None + IN_EMULATOR = False + + +def _wait_until_complete(operation, max_attempts=5): + """Wait until an operation has completed. + + :type operation: :class:`google.cloud.operation.Operation` + :param operation: Operation that has not completed. + + :type max_attempts: int + :param max_attempts: (Optional) The maximum number of times to check if + the operation has completed. Defaults to 5. + + :rtype: bool + :returns: Boolean indicating if the operation is complete. + """ + + def _operation_complete(result): + return result + + retry = RetryResult(_operation_complete, max_tries=max_attempts) + return retry(operation.poll)() + + +def _retry_on_unavailable(exc): + """Retry only errors whose status code is 'UNAVAILABLE'.""" + from grpc import StatusCode + return exc.code() == StatusCode.UNAVAILABLE + + +def setUpModule(): + from google.cloud.exceptions import GrpcRendezvous + + Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None + + if Config.IN_EMULATOR: + credentials = EmulatorCreds() + Config.CLIENT = Client(admin=True, credentials=credentials) + else: + Config.CLIENT = Client(admin=True) + + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) + + if not Config.IN_EMULATOR: + retry = RetryErrors(GrpcRendezvous, + error_predicate=_retry_on_unavailable) + instances, failed_locations = retry(Config.CLIENT.list_instances)() + + if len(failed_locations) != 0: + raise ValueError('List instances failed in module set up.') + + EXISTING_INSTANCES[:] = instances + + # After listing, create the test instance. + created_op = Config.INSTANCE.create() + if not _wait_until_complete(created_op): + raise RuntimeError('Instance creation exceed 5 seconds.') + + +def tearDownModule(): + if not Config.IN_EMULATOR: + Config.INSTANCE.delete() + + +class TestInstanceAdminAPI(unittest.TestCase): + + def setUp(self): + if Config.IN_EMULATOR: + self.skipTest( + 'Instance Admin API not supported in Bigtable emulator') + self.instances_to_delete = [] + + def tearDown(self): + for instance in self.instances_to_delete: + instance.delete() + + def test_list_instances(self): + instances, failed_locations = Config.CLIENT.list_instances() + self.assertEqual(failed_locations, []) + # We have added one new instance in `setUpModule`. + self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) + for instance in instances: + instance_existence = (instance in EXISTING_INSTANCES or + instance == Config.INSTANCE) + self.assertTrue(instance_existence) + + def test_reload(self): + # Use same arguments as Config.INSTANCE (created in `setUpModule`) + # so we can use reload() on a fresh instance. + instance = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) + # Make sure metadata unset before reloading. + instance.display_name = None + + instance.reload() + self.assertEqual(instance.display_name, Config.INSTANCE.display_name) + + def test_create_instance(self): + ALT_INSTANCE_ID = 'new' + unique_resource_id('-') + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) + operation = instance.create() + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + self.assertTrue(_wait_until_complete(operation)) + + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + + def test_update(self): + OLD_DISPLAY_NAME = Config.INSTANCE.display_name + NEW_DISPLAY_NAME = 'Foo Bar Baz' + Config.INSTANCE.display_name = NEW_DISPLAY_NAME + Config.INSTANCE.update() + + # Create a new instance instance and reload it. + instance_alt = Config.CLIENT.instance(INSTANCE_ID, None) + self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME) + instance_alt.reload() + self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) + + # Make sure to put the instance back the way it was for the + # other test cases. + Config.INSTANCE.display_name = OLD_DISPLAY_NAME + Config.INSTANCE.update() + + +class TestTableAdminAPI(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls._table = Config.INSTANCE.table(TABLE_ID) + cls._table.create() + + @classmethod + def tearDownClass(cls): + cls._table.delete() + + def setUp(self): + self.tables_to_delete = [] + + def tearDown(self): + for table in self.tables_to_delete: + table.delete() + + def test_list_tables(self): + # Since `Config.INSTANCE` is newly created in `setUpModule`, the table + # created in `setUpClass` here will be the only one. + tables = Config.INSTANCE.list_tables() + self.assertEqual(tables, [self._table]) + + def test_create_table(self): + temp_table_id = 'foo-bar-baz-table' + temp_table = Config.INSTANCE.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + # First, create a sorted version of our expected result. + name_attr = operator.attrgetter('name') + expected_tables = sorted([temp_table, self._table], key=name_attr) + + # Then query for the tables in the instance and sort them by + # name as well. + tables = Config.INSTANCE.list_tables() + sorted_tables = sorted(tables, key=name_attr) + self.assertEqual(sorted_tables, expected_tables) + + def test_create_column_family(self): + temp_table_id = 'foo-bar-baz-table' + temp_table = Config.INSTANCE.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + self.assertEqual(temp_table.list_column_families(), {}) + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1, + gc_rule=gc_rule) + column_family.create() + + col_fams = temp_table.list_column_families() + + self.assertEqual(len(col_fams), 1) + retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] + self.assertIs(retrieved_col_fam._table, column_family._table) + self.assertEqual(retrieved_col_fam.column_family_id, + column_family.column_family_id) + self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) + + def test_update_column_family(self): + temp_table_id = 'foo-bar-baz-table' + temp_table = Config.INSTANCE.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1, + gc_rule=gc_rule) + column_family.create() + + # Check that our created table is as expected. + col_fams = temp_table.list_column_families() + self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family}) + + # Update the column family's GC rule and then try to update. + column_family.gc_rule = None + column_family.update() + + # Check that the update has propagated. + col_fams = temp_table.list_column_families() + self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) + + def test_delete_column_family(self): + temp_table_id = 'foo-bar-baz-table' + temp_table = Config.INSTANCE.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + self.assertEqual(temp_table.list_column_families(), {}) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1) + column_family.create() + + # Make sure the family is there before deleting it. + col_fams = temp_table.list_column_families() + self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1]) + + column_family.delete() + # Make sure we have successfully deleted it. + self.assertEqual(temp_table.list_column_families(), {}) + + +class TestDataAPI(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls._table = table = Config.INSTANCE.table(TABLE_ID) + table.create() + table.column_family(COLUMN_FAMILY_ID1).create() + table.column_family(COLUMN_FAMILY_ID2).create() + + @classmethod + def tearDownClass(cls): + # Will also delete any data contained in the table. + cls._table.delete() + + def _maybe_emulator_skip(self, message): + # NOTE: This method is necessary because ``Config.IN_EMULATOR`` + # is set at runtime rather than import time, which means we + # can't use the @unittest.skipIf decorator. + if Config.IN_EMULATOR: + self.skipTest(message) + + def setUp(self): + self.rows_to_delete = [] + + def tearDown(self): + for row in self.rows_to_delete: + row.clear() + row.delete() + row.commit() + + def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): + timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) + timestamp1_micros = _microseconds_from_datetime(timestamp1) + # Truncate to millisecond granularity. + timestamp1_micros -= (timestamp1_micros % 1000) + timestamp1 = _datetime_from_microseconds(timestamp1_micros) + # 1000 microseconds is a millisecond + timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) + timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) + timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) + if row1 is not None: + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, + timestamp=timestamp1) + if row2 is not None: + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, + timestamp=timestamp2) + if row3 is not None: + row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, + timestamp=timestamp3) + if row4 is not None: + row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, + timestamp=timestamp4) + + # Create the cells we will check. + cell1 = Cell(CELL_VAL1, timestamp1) + cell2 = Cell(CELL_VAL2, timestamp2) + cell3 = Cell(CELL_VAL3, timestamp3) + cell4 = Cell(CELL_VAL4, timestamp4) + return cell1, cell2, cell3, cell4 + + def test_read_large_cell_limit(self): + row = self._table.row(ROW_KEY) + self.rows_to_delete.append(row) + + number_of_bytes = 10 * 1024 * 1024 + data = b'1' * number_of_bytes # 10MB of 1's. + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) + row.commit() + + # Read back the contents of the row. + partial_row_data = self._table.read_row(ROW_KEY) + self.assertEqual(partial_row_data.row_key, ROW_KEY) + cell = partial_row_data.cells[COLUMN_FAMILY_ID1] + column = cell[COL_NAME1] + self.assertEqual(len(column), 1) + self.assertEqual(column[0].value, data) + + def test_read_row(self): + row = self._table.row(ROW_KEY) + self.rows_to_delete.append(row) + + cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row) + row.commit() + + # Read back the contents of the row. + partial_row_data = self._table.read_row(ROW_KEY) + self.assertEqual(partial_row_data.row_key, ROW_KEY) + + # Check the cells match. + ts_attr = operator.attrgetter('timestamp') + expected_row_contents = { + COLUMN_FAMILY_ID1: { + COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), + COL_NAME2: [cell3], + }, + COLUMN_FAMILY_ID2: { + COL_NAME3: [cell4], + }, + } + self.assertEqual(partial_row_data.cells, expected_row_contents) + + def test_read_rows(self): + row = self._table.row(ROW_KEY) + row_alt = self._table.row(ROW_KEY_ALT) + self.rows_to_delete.extend([row, row_alt]) + + cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, + row, row_alt) + row.commit() + row_alt.commit() + + rows_data = self._table.read_rows() + self.assertEqual(rows_data.rows, {}) + rows_data.consume_all() + + # NOTE: We should refrain from editing protected data on instances. + # Instead we should make the values public or provide factories + # for constructing objects with them. + row_data = PartialRowData(ROW_KEY) + row_data._chunks_encountered = True + row_data._committed = True + row_data._cells = { + COLUMN_FAMILY_ID1: { + COL_NAME1: [cell1], + COL_NAME2: [cell3], + }, + } + + row_alt_data = PartialRowData(ROW_KEY_ALT) + row_alt_data._chunks_encountered = True + row_alt_data._committed = True + row_alt_data._cells = { + COLUMN_FAMILY_ID1: { + COL_NAME1: [cell2], + }, + COLUMN_FAMILY_ID2: { + COL_NAME3: [cell4], + }, + } + + expected_rows = { + ROW_KEY: row_data, + ROW_KEY_ALT: row_alt_data, + } + self.assertEqual(rows_data.rows, expected_rows) + + def test_read_with_label_applied(self): + self._maybe_emulator_skip('Labels not supported by Bigtable emulator') + row = self._table.row(ROW_KEY) + self.rows_to_delete.append(row) + + cell1, _, cell3, _ = self._write_to_row(row, None, row) + row.commit() + + # Combine a label with column 1. + label1 = u'label-red' + label1_filter = ApplyLabelFilter(label1) + col1_filter = ColumnQualifierRegexFilter(COL_NAME1) + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Combine a label with column 2. + label2 = u'label-blue' + label2_filter = ApplyLabelFilter(label2) + col2_filter = ColumnQualifierRegexFilter(COL_NAME2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) + self.assertEqual(partial_row_data.row_key, ROW_KEY) + + cells_returned = partial_row_data.cells + col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) + # Make sure COLUMN_FAMILY_ID1 was the only key. + self.assertEqual(len(cells_returned), 0) + + cell1_new, = col_fam1.pop(COL_NAME1) + cell3_new, = col_fam1.pop(COL_NAME2) + # Make sure COL_NAME1 and COL_NAME2 were the only keys. + self.assertEqual(len(col_fam1), 0) + + # Check that cell1 has matching values and gained a label. + self.assertEqual(cell1_new.value, cell1.value) + self.assertEqual(cell1_new.timestamp, cell1.timestamp) + self.assertEqual(cell1.labels, []) + self.assertEqual(cell1_new.labels, [label1]) + + # Check that cell3 has matching values and gained a label. + self.assertEqual(cell3_new.value, cell3.value) + self.assertEqual(cell3_new.timestamp, cell3.timestamp) + self.assertEqual(cell3.labels, []) + self.assertEqual(cell3_new.labels, [label2]) diff --git a/packages/google-cloud-bigtable/unit_tests/__init__.py b/packages/google-cloud-bigtable/tests/unit/__init__.py similarity index 100% rename from packages/google-cloud-bigtable/unit_tests/__init__.py rename to packages/google-cloud-bigtable/tests/unit/__init__.py diff --git a/packages/google-cloud-bigtable/unit_tests/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py similarity index 100% rename from packages/google-cloud-bigtable/unit_tests/_testing.py rename to packages/google-cloud-bigtable/tests/unit/_testing.py diff --git a/packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json b/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json similarity index 100% rename from packages/google-cloud-bigtable/unit_tests/read-rows-acceptance-test.json rename to packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json diff --git a/packages/google-cloud-bigtable/unit_tests/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py similarity index 99% rename from packages/google-cloud-bigtable/unit_tests/test_client.py rename to packages/google-cloud-bigtable/tests/unit/test_client.py index 8761dc360c81..17656be60c00 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -543,7 +543,7 @@ def test_list_instances(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub LOCATION = 'projects/' + self.PROJECT + '/locations/locname' FAILED_LOCATION = 'FAILED' diff --git a/packages/google-cloud-bigtable/unit_tests/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py similarity index 98% rename from packages/google-cloud-bigtable/unit_tests/test_cluster.py rename to packages/google-cloud-bigtable/tests/unit/test_cluster.py index 9472fde29f59..3cc40964ba49 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -190,7 +190,7 @@ def test___ne__(self): self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES SERVE_NODES = 31 @@ -235,7 +235,7 @@ def test_create(self): from google.cloud.operation import Operation from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub SERVE_NODES = 4 client = _Client(self.PROJECT) @@ -285,7 +285,7 @@ def test_update(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -345,7 +345,7 @@ def test_update(self): def test_delete(self): from google.protobuf import empty_pb2 - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) diff --git a/packages/google-cloud-bigtable/unit_tests/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py similarity index 99% rename from packages/google-cloud-bigtable/unit_tests/test_column_family.py rename to packages/google-cloud-bigtable/tests/unit/test_column_family.py index 126a18da3003..6fa408fdb07e 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -352,7 +352,7 @@ def test_to_pb_with_rule(self): def _create_test_helper(self, gc_rule=None): from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub project_id = 'project-id' zone = 'zone' @@ -409,7 +409,7 @@ def test_create_with_gc_rule(self): self._create_test_helper(gc_rule=gc_rule) def _update_test_helper(self, gc_rule=None): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) @@ -471,7 +471,7 @@ def test_delete(self): from google.protobuf import empty_pb2 from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub project_id = 'project-id' zone = 'zone' diff --git a/packages/google-cloud-bigtable/unit_tests/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py similarity index 98% rename from packages/google-cloud-bigtable/unit_tests/test_instance.py rename to packages/google-cloud-bigtable/tests/unit/test_instance.py index aefba45d9158..cdad3c376d0a 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -192,7 +192,7 @@ def test_reload(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) @@ -235,7 +235,7 @@ def test_create(self): from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.operation import Operation from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES @@ -290,7 +290,7 @@ def test_create_w_explicit_serve_nodes(self): from google.longrunning import operations_pb2 from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.operation import Operation SERVE_NODES = 5 @@ -329,7 +329,7 @@ def test_create_w_explicit_serve_nodes(self): def test_update(self): from google.cloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, @@ -364,7 +364,7 @@ def test_delete(self): from google.protobuf import empty_pb2 from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) @@ -397,7 +397,7 @@ def test_list_clusters(self): instance_pb2 as instance_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub FAILED_LOCATION = 'FAILED' FAILED_LOCATIONS = [FAILED_LOCATION] @@ -454,7 +454,7 @@ def _list_tables_helper(self, table_name=None): table_pb2 as table_data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_messages_v1_pb2) - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client(self.PROJECT) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) diff --git a/packages/google-cloud-bigtable/unit_tests/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py similarity index 99% rename from packages/google-cloud-bigtable/unit_tests/test_row.py rename to packages/google-cloud-bigtable/tests/unit/test_row.py index 60d53d5ccdf6..3e2d4fd60e0f 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -301,7 +301,7 @@ def test_delete_cells_with_string_columns(self): def test_commit(self): from google.protobuf import empty_pb2 - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub row_key = b'row_key' table_name = 'projects/more-stuff' @@ -361,7 +361,7 @@ def test_commit_too_many_mutations(self): row.commit() def test_commit_no_mutations(self): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub row_key = b'row_key' client = _Client() @@ -414,7 +414,7 @@ def test__get_mutations(self): self.assertIs(false_mutations, row._get_mutations(None)) def test_commit(self): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_filters import RowSampleFilter row_key = b'row_key' @@ -502,7 +502,7 @@ def test_commit_too_many_mutations(self): row.commit() def test_commit_no_mutations(self): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub row_key = b'row_key' client = _Client() @@ -582,7 +582,7 @@ def test_increment_cell_value(self): def test_commit(self): from google.cloud._testing import _Monkey - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.bigtable import row as MUT row_key = b'row_key' @@ -637,7 +637,7 @@ def mock_parse_rmw_row_response(row_response): self.assertEqual(row._rule_pb_list, []) def test_commit_no_rules(self): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub row_key = b'row_key' client = _Client() diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py similarity index 100% rename from packages/google-cloud-bigtable/unit_tests/test_row_data.py rename to packages/google-cloud-bigtable/tests/unit/test_row_data.py diff --git a/packages/google-cloud-bigtable/unit_tests/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py similarity index 100% rename from packages/google-cloud-bigtable/unit_tests/test_row_filters.py rename to packages/google-cloud-bigtable/tests/unit/test_row_filters.py diff --git a/packages/google-cloud-bigtable/unit_tests/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py similarity index 98% rename from packages/google-cloud-bigtable/unit_tests/test_table.py rename to packages/google-cloud-bigtable/tests/unit/test_table.py index 4ad6afe1596f..63844f5d48b7 100644 --- a/packages/google-cloud-bigtable/unit_tests/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -136,7 +136,7 @@ def test___ne__(self): def _create_test_helper(self, initial_split_keys, column_families=()): from google.cloud._helpers import _to_bytes - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -203,7 +203,7 @@ def test_create_with_column_families(self): column_families=column_families) def _list_column_families_helper(self): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -241,7 +241,7 @@ def test_list_column_families(self): def test_delete(self): from google.protobuf import empty_pb2 - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -270,7 +270,7 @@ def test_delete(self): def _read_row_helper(self, chunks, expected_result): from google.cloud._testing import _Monkey - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.bigtable import table as MUT client = _Client() @@ -350,7 +350,7 @@ def test_read_row_still_partial(self): def test_read_rows(self): from google.cloud._testing import _Monkey - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT @@ -400,7 +400,7 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_sample_row_keys(self): - from unit_tests._testing import _FakeStub + from tests.unit._testing import _FakeStub client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) diff --git a/packages/google-cloud-bigtable/tox.ini b/packages/google-cloud-bigtable/tox.ini deleted file mode 100644 index 257e21288b27..000000000000 --- a/packages/google-cloud-bigtable/tox.ini +++ /dev/null @@ -1,35 +0,0 @@ -[tox] -envlist = - py27,py34,py35,cover - -[testing] -localdeps = - pip install --quiet --upgrade {toxinidir}/../core -deps = - {toxinidir}/../core - mock - pytest -covercmd = - py.test --quiet \ - --cov=google.cloud.bigtable \ - --cov=unit_tests \ - --cov-config {toxinidir}/.coveragerc \ - unit_tests - -[testenv] -commands = - {[testing]localdeps} - py.test --quiet {posargs} unit_tests -deps = - {[testing]deps} - -[testenv:cover] -basepython = - python2.7 -commands = - {[testing]localdeps} - {[testing]covercmd} -deps = - {[testenv]deps} - coverage - pytest-cov From f37d4cfe8d7f41db30f108a63ff3b3419b2b0a8e Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 27 Mar 2017 10:20:16 -0700 Subject: [PATCH 049/892] Fixing up some format strings in nox configs. Using `STRING_TEMPLATE % VARIABLE` can introduce hard-to-find bugs if `VARIABLE` is expected to be a string but ends up being a tuple. Instead of using percent formatting, just using `.format`. Also making tweaks to `get_target_packages` to make some path manipulation / checks OS-independent. --- packages/google-cloud-bigtable/nox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index eb8fe574c0c0..53e6c5d5576d 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -24,7 +24,7 @@ def unit_tests(session, python_version): """Run the unit test suite.""" # Run unit tests against all supported versions of Python. - session.interpreter = 'python%s' % python_version + session.interpreter = 'python{}'.format(python_version) # Install all test dependencies, then install this package in-place. session.install('mock', 'pytest', 'pytest-cov', '../core/') @@ -48,7 +48,7 @@ def system_tests(session, python_version): return # Run the system tests against latest Python 2 and Python 3 only. - session.interpreter = 'python%s' % python_version + session.interpreter = 'python{}'.format(python_version) # Install all test dependencies, then install this package into the # virutalenv's dist-packages. From ca22969de5c98b6fdfb82c22c0724cd910eb3e71 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 30 Mar 2017 14:45:10 -0700 Subject: [PATCH 050/892] GA and Beta Promotions (#3245) * Make clients explicitly unpickleable. Closes #3211. * Make clients explicitly unpickleable. Closes #3211. * Add GA designator, add 1.0 version numbers. * Version changes. Eep. * Oops, Speech is still alpha. * 0.24.0, not 0.24.1 * Remove double __getstate__ goof. * Version changes. Eep. * Oops, Speech is still alpha. * Remove double __getstate__ goof. * Adding 3.6 classifier where missing and fixing bad versions. Done via "git grep '0\.24'" and "git grep '0\.23'". * Fix Noxfiles forlocal packages. * Fixing copy-pasta issue in error reporting nox config. Also fixing bad indent in same file. * Depend on stable logging in error reporting package. * Fixing lint errors in error_reporting. These were masked because error_reporting's lint nox session was linting the datastore codebase. This also means that the error reporting package has gained __all__. * Fixing a syntax error in nox config for logging. Also fixing an indent error while I was in there. * Revert "Add docs for 'result_index' usage and a system test." This reverts commit b5742aa160f604ec7cd81873ad24ac9aa75e548d. * Fixing docs nox session for umbrella package. Two issues: - error_reporting came BEFORE logging (which means it would try to pull in a logging dep from PyPI that doesn't exist) - dns was NOT in the list of local packages * Updating upper bound on logging in error_reporting. * Un-revert typo fix. --- packages/google-cloud-bigtable/nox.py | 11 +++++++---- packages/google-cloud-bigtable/setup.py | 5 +++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 53e6c5d5576d..bded96fcbe29 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -18,6 +18,9 @@ import nox +LOCAL_DEPS = ('../core/',) + + @nox.session @nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6']) def unit_tests(session, python_version): @@ -27,7 +30,7 @@ def unit_tests(session, python_version): session.interpreter = 'python{}'.format(python_version) # Install all test dependencies, then install this package in-place. - session.install('mock', 'pytest', 'pytest-cov', '../core/') + session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS) session.install('-e', '.') # Run py.test against the unit tests. @@ -52,8 +55,8 @@ def system_tests(session, python_version): # Install all test dependencies, then install this package into the # virutalenv's dist-packages. - session.install('mock', 'pytest', - '../core/', '../test_utils/') + session.install('mock', 'pytest', *LOCAL_DEPS) + session.install('../test_utils/') session.install('.') # Run py.test against the system tests. @@ -68,7 +71,7 @@ def lint(session): serious code quality issues. """ session.interpreter = 'python3.6' - session.install('flake8') + session.install('flake8', *LOCAL_DEPS) session.install('.') session.run('flake8', 'google/cloud/bigtable') diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b4fa4483490a..708eef10a407 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -44,19 +44,20 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', 'Topic :: Internet', ], } REQUIREMENTS = [ - 'google-cloud-core >= 0.23.1, < 0.24dev', + 'google-cloud-core >= 0.24.0, < 0.25dev', 'google-gax>=0.15.7, <0.16dev', ] setup( name='google-cloud-bigtable', - version='0.23.1', + version='0.24.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From e6282c9966362aa59d9694d86671a71e6feedf98 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 20 Apr 2017 13:00:32 -0700 Subject: [PATCH 051/892] Adding check that **all** setup.py README's are valid RST. (#3318) * Adding check that **all** setup.py README's are valid RST. Follow up to #3316. Fixes #2446. * Fixing duplicate reference in Logging README. * Fixing duplicate reference in Monitoring README. --- packages/google-cloud-bigtable/nox.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index bded96fcbe29..bc60a19c8217 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -76,6 +76,15 @@ def lint(session): session.run('flake8', 'google/cloud/bigtable') +@nox.session +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.interpreter = 'python3.6' + session.install('docutils', 'Pygments') + session.run( + 'python', 'setup.py', 'check', '--restructuredtext', '--strict') + + @nox.session def cover(session): """Run the final coverage report. From 10b50f35517b332814862480b2f0c086718ae476 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 21 Apr 2017 10:03:56 -0700 Subject: [PATCH 052/892] Ignore tests (rather than unit_tests) in setup.py files. (#3319) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 708eef10a407..212feda21758 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -64,7 +64,7 @@ 'google', 'google.cloud', ], - packages=find_packages(exclude=('unit_tests*',)), + packages=find_packages(exclude=('tests*',)), install_requires=REQUIREMENTS, **SETUP_BASE ) From 0dae919f671465ec69c8f48a8cfda8df9a41eb53 Mon Sep 17 00:00:00 2001 From: Dima Timofeev Date: Fri, 12 May 2017 21:57:48 +0100 Subject: [PATCH 053/892] Add getters for Bigtable Row.row_key and Row.table (#3408) --- .../google/cloud/bigtable/row.py | 18 ++++++++++++++++++ .../tests/unit/test_row.py | 19 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 5e9075ef8eec..09d12377a49c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -55,6 +55,24 @@ def __init__(self, row_key, table): self._row_key = _to_bytes(row_key) self._table = table + @property + def row_key(self): + """Row key. + + :rtype: bytes + :returns: The key for the current row. + """ + return self._row_key + + @property + def table(self): + """Row table. + + :rtype: table: :class:`Table ` + :returns: table: The table that owns the row. + """ + return self._table + class _SetDeleteRow(Row): """Row helper for setting or deleting cell values. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 3e2d4fd60e0f..046934ca1f27 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -16,6 +16,25 @@ import unittest +class TestRow(unittest.TestCase): + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.row import Row + return Row + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_row_key_getter(self): + row = self._make_one(row_key=b'row_key', table='table') + self.assertEqual(b'row_key', row.row_key) + + def test_row_table_getter(self): + row = self._make_one(row_key=b'row_key', table='table') + self.assertEqual('table', row.table) + + class Test_SetDeleteRow(unittest.TestCase): @staticmethod From abb3353a33020124d59641b69e3cdd90b6f7aca5 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Fri, 2 Jun 2017 14:36:29 -0700 Subject: [PATCH 054/892] Vision semi-GAPIC (#3373) --- packages/google-cloud-bigtable/tests/unit/test_row_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index eed5e77c5630..51534138b66c 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -709,7 +709,7 @@ def _generate_cell_chunks(chunk_text_pbs): def _parse_readrows_acceptance_tests(filename): """Parse acceptance tests from JSON - See: + See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/\ 4d3185662ca61bc9fa1bdf1ec0166f6e5ecf86c6/bigtable-client-core/src/\ test/resources/com/google/cloud/bigtable/grpc/scanner/v2/ From 75f2d0cfe2d1cb876c685b12b0ceb733c356d814 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 6 Jun 2017 10:11:59 -0700 Subject: [PATCH 055/892] Adding optional switch to capture project ID in from_service_account_json(). (#3436) Fixes #1883. --- packages/google-cloud-bigtable/google/cloud/bigtable/client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 2f552b1c2564..764a365dacb2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -207,6 +207,7 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): _instance_stub_internal = None _operations_stub_internal = None _table_stub_internal = None + _SET_PROJECT = True # Used by from_service_account_json() def __init__(self, project=None, credentials=None, read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT): From 1291495079fef97d2fca091bf4d9e0203e8deba9 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 6 Jun 2017 13:08:46 -0700 Subject: [PATCH 056/892] Updating Bigtable Client docstring to reflect new credentials. (#3477) --- .../google-cloud-bigtable/google/cloud/bigtable/client.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 764a365dacb2..86ee7173c917 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -179,12 +179,10 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): instances, tables and data. If not provided, will attempt to determine from the environment. - :type credentials: - :class:`OAuth2Credentials ` or - :data:`NoneType ` + :type credentials: :class:`~google.auth.credentials.Credentials` :param credentials: (Optional) The OAuth2 Credentials to use for this - client. If not provided, defaults to the Google - Application Default Credentials. + client. If not passed, falls back to the default + inferred from the environment. :type read_only: bool :param read_only: (Optional) Boolean indicating if the data scope should be From 46bd168621af806d6a3fc9d6f01fe85754685c21 Mon Sep 17 00:00:00 2001 From: Gary Elliott Date: Mon, 19 Jun 2017 12:04:00 -0400 Subject: [PATCH 057/892] Add sentence about row ordering (#3504) I'm not sure if this is the best place for this, but we want to make sure it's documented that rows are returned in row key order. --- .../google-cloud-bigtable/google/cloud/bigtable/row_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 60fc1f0ef1e8..78179db25c4e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -256,7 +256,7 @@ def consume_next(self): """Consume the next ``ReadRowsResponse`` from the stream. Parse the response and its chunks into a new/existing row in - :attr:`_rows` + :attr:`_rows`. Rows are returned in order by row key. """ response = six.next(self._response_iterator) self._counter += 1 From 8d2930c60f46015e71f8721458e5b2ce09180637 Mon Sep 17 00:00:00 2001 From: Dima Timofeev Date: Mon, 19 Jun 2017 21:45:23 +0100 Subject: [PATCH 058/892] Allow bulk update of records via 'MutateRows' API (#3401) --- .../google/cloud/bigtable/table.py | 111 ++++++++++++- .../google-cloud-bigtable/tests/system.py | 27 ++++ .../tests/unit/test_row.py | 1 + .../tests/unit/test_table.py | 148 ++++++++++++++++++ 4 files changed, 286 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 3fbd198d6b65..8dbf8c1ce6fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -12,7 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""User friendly container for Google Cloud Bigtable Table.""" +"""User-friendly container for Google Cloud Bigtable Table.""" + + +import six from google.cloud._helpers import _to_bytes from google.cloud.bigtable._generated import ( @@ -29,6 +32,19 @@ from google.cloud.bigtable.row_data import PartialRowsData +# Maximum number of mutations in bulk (MutateRowsRequest message): +# https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#google.bigtable.v2.MutateRowRequest +_MAX_BULK_MUTATIONS = 100000 + + +class TableMismatchError(ValueError): + """Row from another table.""" + + +class TooManyMutationsError(ValueError): + """The number of mutations for bulk request is too big.""" + + class Table(object): """Representation of a Google Cloud Bigtable Table. @@ -276,6 +292,35 @@ def read_rows(self, start_key=None, end_key=None, limit=None, # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` return PartialRowsData(response_iterator) + def mutate_rows(self, rows): + """Mutates multiple rows in bulk. + + The method tries to update all specified rows. + If some of the rows weren't updated, it would not remove mutations. + They can be applied to the row separately. + If row mutations finished successfully, they would be cleaned up. + + :type rows: list + :param rows: List or other iterable of :class:`.DirectRow` instances. + + :rtype: list + :returns: A list of response statuses (`google.rpc.status_pb2.Status`) + corresponding to success or failure of each row mutation + sent. These will be in the same order as the `rows`. + """ + mutate_rows_request = _mutate_rows_request(self.name, rows) + client = self._instance._client + responses = client._data_stub.MutateRows(mutate_rows_request) + + responses_statuses = [ + None for _ in six.moves.xrange(len(mutate_rows_request.entries))] + for response in responses: + for entry in response.entries: + responses_statuses[entry.index] = entry.status + if entry.status.code == 0: + rows[entry.index].clear() + return responses_statuses + def sample_row_keys(self): """Read a sample of row keys in the table. @@ -373,3 +418,67 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, message.rows.row_ranges.add(**range_kwargs) return message + + +def _mutate_rows_request(table_name, rows): + """Creates a request to mutate rows in a table. + + :type table_name: str + :param table_name: The name of the table to write to. + + :type rows: list + :param rows: List or other iterable of :class:`.DirectRow` instances. + + :rtype: :class:`data_messages_v2_pb2.MutateRowsRequest` + :returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs. + :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is + greater than 100,000 + """ + request_pb = data_messages_v2_pb2.MutateRowsRequest(table_name=table_name) + mutations_count = 0 + for row in rows: + _check_row_table_name(table_name, row) + _check_row_type(row) + entry = request_pb.entries.add() + entry.row_key = row.row_key + # NOTE: Since `_check_row_type` has verified `row` is a `DirectRow`, + # the mutations have no state. + for mutation in row._get_mutations(None): + mutations_count += 1 + entry.mutations.add().CopyFrom(mutation) + if mutations_count > _MAX_BULK_MUTATIONS: + raise TooManyMutationsError('Maximum number of mutations is %s' % + (_MAX_BULK_MUTATIONS,)) + return request_pb + + +def _check_row_table_name(table_name, row): + """Checks that a row belongs to a table. + + :type table_name: str + :param table_name: The name of the table. + + :type row: :class:`.Row` + :param row: An instance of :class:`.Row` subclasses. + + :raises: :exc:`~.table.TableMismatchError` if the row does not belong to + the table. + """ + if row.table.name != table_name: + raise TableMismatchError( + 'Row %s is a part of %s table. Current table: %s' % + (row.row_key, row.table.name, table_name)) + + +def _check_row_type(row): + """Checks that a row is an instance of :class:`.DirectRow`. + + :type row: :class:`.Row` + :param row: An instance of :class:`.Row` subclasses. + + :raises: :class:`TypeError ` if the row is not an + instance of DirectRow. + """ + if not isinstance(row, DirectRow): + raise TypeError('Bulk processing can not be applied for ' + 'conditional or append mutations.') diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index faed85fdb302..1fcda808db39 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -356,6 +356,33 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): cell4 = Cell(CELL_VAL4, timestamp4) return cell1, cell2, cell3, cell4 + def test_mutate_rows(self): + row1 = self._table.row(ROW_KEY) + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row1.commit() + self.rows_to_delete.append(row1) + row2 = self._table.row(ROW_KEY_ALT) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) + row2.commit() + self.rows_to_delete.append(row2) + + # Change the contents + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) + rows = [row1, row2] + statuses = self._table.mutate_rows(rows) + result = [status.code for status in statuses] + expected_result = [0, 0] + self.assertEqual(result, expected_result) + + # Check the contents + row1_data = self._table.read_row(ROW_KEY) + self.assertEqual( + row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3) + row2_data = self._table.read_row(ROW_KEY_ALT) + self.assertEqual( + row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4) + def test_read_large_cell_limit(self): row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 046934ca1f27..156a517b351a 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -21,6 +21,7 @@ class TestRow(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row import Row + return Row def _make_one(self, *args, **kwargs): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 63844f5d48b7..5867e76aff73 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -15,6 +15,109 @@ import unittest +import mock + + +class Test___mutate_rows_request(unittest.TestCase): + + def _call_fut(self, table_name, rows): + from google.cloud.bigtable.table import _mutate_rows_request + + return _mutate_rows_request(table_name, rows) + + @mock.patch('google.cloud.bigtable.table._MAX_BULK_MUTATIONS', new=3) + def test__mutate_rows_too_many_mutations(self): + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import TooManyMutationsError + + table = mock.Mock(name='table', spec=['name']) + table.name = 'table' + rows = [DirectRow(row_key=b'row_key', table=table), + DirectRow(row_key=b'row_key_2', table=table)] + rows[0].set_cell('cf1', b'c1', 1) + rows[0].set_cell('cf1', b'c1', 2) + rows[1].set_cell('cf1', b'c1', 3) + rows[1].set_cell('cf1', b'c1', 4) + with self.assertRaises(TooManyMutationsError): + self._call_fut('table', rows) + + def test__mutate_rows_request(self): + from google.cloud.bigtable.row import DirectRow + + table = mock.Mock(name='table', spec=['name']) + table.name = 'table' + rows = [DirectRow(row_key=b'row_key', table=table), + DirectRow(row_key=b'row_key_2', table=table)] + rows[0].set_cell('cf1', b'c1', b'1') + rows[1].set_cell('cf1', b'c1', b'2') + result = self._call_fut('table', rows) + + expected_result = _mutate_rows_request_pb(table_name='table') + entry1 = expected_result.entries.add() + entry1.row_key = b'row_key' + mutations1 = entry1.mutations.add() + mutations1.set_cell.family_name = 'cf1' + mutations1.set_cell.column_qualifier = b'c1' + mutations1.set_cell.timestamp_micros = -1 + mutations1.set_cell.value = b'1' + entry2 = expected_result.entries.add() + entry2.row_key = b'row_key_2' + mutations2 = entry2.mutations.add() + mutations2.set_cell.family_name = 'cf1' + mutations2.set_cell.column_qualifier = b'c1' + mutations2.set_cell.timestamp_micros = -1 + mutations2.set_cell.value = b'2' + + self.assertEqual(result, expected_result) + + +class Test__check_row_table_name(unittest.TestCase): + + def _call_fut(self, table_name, row): + from google.cloud.bigtable.table import _check_row_table_name + + return _check_row_table_name(table_name, row) + + def test_wrong_table_name(self): + from google.cloud.bigtable.table import TableMismatchError + from google.cloud.bigtable.row import DirectRow + + table = mock.Mock(name='table', spec=['name']) + table.name = 'table' + row = DirectRow(row_key=b'row_key', table=table) + with self.assertRaises(TableMismatchError): + self._call_fut('other_table', row) + + def test_right_table_name(self): + from google.cloud.bigtable.row import DirectRow + + table = mock.Mock(name='table', spec=['name']) + table.name = 'table' + row = DirectRow(row_key=b'row_key', table=table) + result = self._call_fut('table', row) + self.assertFalse(result) + + +class Test__check_row_type(unittest.TestCase): + def _call_fut(self, row): + from google.cloud.bigtable.table import _check_row_type + + return _check_row_type(row) + + def test_test_wrong_row_type(self): + from google.cloud.bigtable.row import ConditionalRow + + row = ConditionalRow(row_key=b'row_key', table='table', filter_=None) + with self.assertRaises(TypeError): + self._call_fut(row) + + def test_right_row_type(self): + from google.cloud.bigtable.row import DirectRow + + row = DirectRow(row_key=b'row_key', table='table') + result = self._call_fut(row) + self.assertFalse(result) + class TestTable(unittest.TestCase): @@ -348,6 +451,44 @@ def test_read_row_still_partial(self): with self.assertRaises(ValueError): self._read_row_helper(chunks, None) + def test_mutate_rows(self): + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.rpc.status_pb2 import Status + from tests.unit._testing import _FakeStub + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + + response = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=0), + ), + MutateRowsResponse.Entry( + index=1, + status=Status(code=1), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = _FakeStub([response]) + statuses = table.mutate_rows([row_1, row_2]) + result = [status.code for status in statuses] + expected_result = [0, 1] + + self.assertEqual(result, expected_result) + + def test_read_rows(self): from google.cloud._testing import _Monkey from tests.unit._testing import _FakeStub @@ -570,6 +711,13 @@ def _SampleRowKeysRequestPB(*args, **kw): return messages_v2_pb2.SampleRowKeysRequest(*args, **kw) +def _mutate_rows_request_pb(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as data_messages_v2_pb2) + + return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) + + def _TablePB(*args, **kw): from google.cloud.bigtable._generated import ( table_pb2 as table_v2_pb2) From 814688a41482b9a0152d81cb988226fad5020d7e Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Fri, 23 Jun 2017 15:08:10 -0700 Subject: [PATCH 059/892] Re-enable pylint in info-only mode for all packages (#3519) --- packages/google-cloud-bigtable/nox.py | 13 +++++++--- .../google-cloud-bigtable/pylint.config.py | 25 +++++++++++++++++++ 2 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 packages/google-cloud-bigtable/pylint.config.py diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index bc60a19c8217..611de0bc9338 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -65,15 +65,22 @@ def system_tests(session, python_version): @nox.session def lint(session): - """Run flake8. + """Run linters. - Returns a failure if flake8 finds linting errors or sufficiently + Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ session.interpreter = 'python3.6' - session.install('flake8', *LOCAL_DEPS) + session.install('flake8', 'pylint', 'gcp-devrel-py-tools', *LOCAL_DEPS) session.install('.') session.run('flake8', 'google/cloud/bigtable') + session.run( + 'gcp-devrel-py-tools', 'run-pylint', + '--config', 'pylint.config.py', + '--library-filesets', 'google', + '--test-filesets', 'tests', + # Temporarily allow this to fail. + success_codes=range(0, 100)) @nox.session diff --git a/packages/google-cloud-bigtable/pylint.config.py b/packages/google-cloud-bigtable/pylint.config.py new file mode 100644 index 000000000000..d8ca7b92e85e --- /dev/null +++ b/packages/google-cloud-bigtable/pylint.config.py @@ -0,0 +1,25 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module is used to configure gcp-devrel-py-tools run-pylint.""" + +# Library configuration + +# library_additions = {} +# library_replacements = {} + +# Test configuration + +# test_additions = copy.deepcopy(library_additions) +# test_replacements = copy.deepcopy(library_replacements) From 7a91fc57aa4057cce59c1aae260846c78070d2aa Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 26 Jun 2017 18:40:30 -0400 Subject: [PATCH 060/892] Prep bigtable-0.25.0 release. (#3534) --- packages/google-cloud-bigtable/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 212feda21758..6b90c6878ca8 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -51,13 +51,13 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.24.0, < 0.25dev', + 'google-cloud-core >= 0.25.0, < 0.26dev', 'google-gax>=0.15.7, <0.16dev', ] setup( name='google-cloud-bigtable', - version='0.24.0', + version='0.25.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From 63d143b8deb91400729511c3252c8c85fd7870b9 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 27 Jun 2017 10:32:30 -0700 Subject: [PATCH 061/892] Fix inclusion of tests in manifest.in (#3552) --- packages/google-cloud-bigtable/MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index 9f7100c9528a..fc77f8c82ff0 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -1,4 +1,4 @@ include README.rst LICENSE recursive-include google *.json *.proto -recursive-include unit_tests * +recursive-include tests * global-exclude *.pyc __pycache__ From b45a4506a9b31312cb53b20b5db51f38d43eb34f Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Wed, 28 Jun 2017 14:07:25 -0700 Subject: [PATCH 062/892] Making all LICENSE headers "uniform". (#3563) --- packages/google-cloud-bigtable/pylint.config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/pylint.config.py b/packages/google-cloud-bigtable/pylint.config.py index d8ca7b92e85e..b618319b8b61 100644 --- a/packages/google-cloud-bigtable/pylint.config.py +++ b/packages/google-cloud-bigtable/pylint.config.py @@ -4,7 +4,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, From cec0f095cbe5a4dae9022c64ea2d821c8f41c650 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 29 Jun 2017 10:56:09 -0700 Subject: [PATCH 063/892] Skipping system tests when credentials env. var is unset. (#3475) --- packages/google-cloud-bigtable/nox.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 611de0bc9338..40d997acc88b 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -48,7 +48,7 @@ def system_tests(session, python_version): # Sanity check: Only run system tests if the environment variable is set. if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): - return + session.skip('Credentials must be set via environment variable.') # Run the system tests against latest Python 2 and Python 3 only. session.interpreter = 'python{}'.format(python_version) From 7cddec819b9bbfe442e2af6f1f6aa9cb3784d6d9 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 6 Jul 2017 16:41:31 -0400 Subject: [PATCH 064/892] Shorten nox virtualenv names to avoid hashing. (#3585) --- packages/google-cloud-bigtable/nox.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 40d997acc88b..b43e196a95ff 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -29,6 +29,9 @@ def unit_tests(session, python_version): # Run unit tests against all supported versions of Python. session.interpreter = 'python{}'.format(python_version) + # Set the virtualenv dirname. + session.virtualenv_dirname = 'unit-' + python_version + # Install all test dependencies, then install this package in-place. session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS) session.install('-e', '.') @@ -53,6 +56,9 @@ def system_tests(session, python_version): # Run the system tests against latest Python 2 and Python 3 only. session.interpreter = 'python{}'.format(python_version) + # Set the virtualenv dirname. + session.virtualenv_dirname = 'sys-' + python_version + # Install all test dependencies, then install this package into the # virutalenv's dist-packages. session.install('mock', 'pytest', *LOCAL_DEPS) @@ -87,6 +93,10 @@ def lint(session): def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.interpreter = 'python3.6' + + # Set the virtualenv dirname. + session.virtualenv_dirname = 'setup' + session.install('docutils', 'Pygments') session.run( 'python', 'setup.py', 'check', '--restructuredtext', '--strict') From cb670ecfa532d3c77f7a53019183a44727cf4f5b Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 11 Jul 2017 10:51:40 -0700 Subject: [PATCH 065/892] Updating author_email in all setup.py. (#3598) Done via: $ git grep -l author_email | \ > xargs sed -i s/jjg+google-cloud-python@google.com/googleapis-publisher@google.com/g and manually editing `videointelligence/setup.py` and `vision/setup.py`. --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 6b90c6878ca8..8d5bad6a1ffd 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -27,7 +27,7 @@ # consolidate. SETUP_BASE = { 'author': 'Google Cloud Platform', - 'author_email': 'jjg+google-cloud-python@google.com', + 'author_email': 'googleapis-publisher@google.com', 'scripts': [], 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python', 'license': 'Apache 2.0', From 208d97ca2b30e65564bcb1a1b6505bacc2645a8a Mon Sep 17 00:00:00 2001 From: Cal Peyser Date: Mon, 17 Jul 2017 11:37:34 -0400 Subject: [PATCH 066/892] RPC retries (second PR) (#3324) --- .../google/cloud/bigtable/retry.py | 169 ++++++++++++++++ .../google/cloud/bigtable/row_data.py | 3 + .../google/cloud/bigtable/table.py | 101 +++------- .../tests/retry_test_script.txt | 38 ++++ .../google-cloud-bigtable/tests/system.py | 78 ++++++++ .../tests/unit/_testing.py | 27 ++- .../tests/unit/test_table.py | 185 +++++++++++++++++- 7 files changed, 520 insertions(+), 81 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/retry.py create mode 100644 packages/google-cloud-bigtable/tests/retry_test_script.txt diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py b/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py new file mode 100644 index 000000000000..f20419ce4f8e --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py @@ -0,0 +1,169 @@ +"""Provides function wrappers that implement retrying.""" +import random +import time +import six +import sys + +from google.cloud._helpers import _to_bytes +from google.cloud.bigtable._generated import ( + bigtable_pb2 as data_messages_v2_pb2) +from google.gax import config, errors +from grpc import RpcError + + +_MILLIS_PER_SECOND = 1000 + + +class ReadRowsIterator(object): + """Creates an iterator equivalent to a_iter, but that retries on certain + exceptions. + """ + + def __init__(self, client, name, start_key, end_key, filter_, limit, + retry_options, **kwargs): + self.client = client + self.retry_options = retry_options + self.name = name + self.start_key = start_key + self.start_key_closed = True + self.end_key = end_key + self.filter_ = filter_ + self.limit = limit + self.delay_mult = retry_options.backoff_settings.retry_delay_multiplier + self.max_delay_millis = \ + retry_options.backoff_settings.max_retry_delay_millis + self.timeout_mult = \ + retry_options.backoff_settings.rpc_timeout_multiplier + self.max_timeout = \ + (retry_options.backoff_settings.max_rpc_timeout_millis / + _MILLIS_PER_SECOND) + self.total_timeout = \ + (retry_options.backoff_settings.total_timeout_millis / + _MILLIS_PER_SECOND) + self.set_stream() + + def set_start_key(self, start_key): + """ + Sets the row key at which this iterator will begin reading. + """ + self.start_key = start_key + self.start_key_closed = False + + def set_stream(self): + """ + Resets the read stream by making an RPC on the 'ReadRows' endpoint. + """ + req_pb = _create_row_request(self.name, start_key=self.start_key, + start_key_closed=self.start_key_closed, + end_key=self.end_key, + filter_=self.filter_, limit=self.limit) + self.stream = self.client._data_stub.ReadRows(req_pb) + + def next(self, *args, **kwargs): + """ + Read and return the next row from the stream. + Retry on idempotent failure. + """ + delay = self.retry_options.backoff_settings.initial_retry_delay_millis + exc = errors.RetryError('Retry total timeout exceeded before any' + 'response was received') + timeout = (self.retry_options.backoff_settings + .initial_rpc_timeout_millis / + _MILLIS_PER_SECOND) + + now = time.time() + deadline = now + self.total_timeout + while deadline is None or now < deadline: + try: + return six.next(self.stream) + except StopIteration as stop: + raise stop + except RpcError as error: # pylint: disable=broad-except + code = config.exc_to_code(error) + if code not in self.retry_options.retry_codes: + six.reraise(type(error), error) + + # pylint: disable=redefined-variable-type + exc = errors.RetryError( + 'Retry total timeout exceeded with exception', error) + + # Sleep a random number which will, on average, equal the + # expected delay. + to_sleep = random.uniform(0, delay * 2) + time.sleep(to_sleep / _MILLIS_PER_SECOND) + delay = min(delay * self.delay_mult, self.max_delay_millis) + now = time.time() + timeout = min( + timeout * self.timeout_mult, self.max_timeout, + deadline - now) + self.set_stream() + + six.reraise(errors.RetryError, exc, sys.exc_info()[2]) + + def __next__(self, *args, **kwargs): + return self.next(*args, **kwargs) + + +def _create_row_request(table_name, row_key=None, start_key=None, + start_key_closed=True, end_key=None, filter_=None, + limit=None): + """Creates a request to read rows in a table. + + :type table_name: str + :param table_name: The name of the table to read from. + + :type row_key: bytes + :param row_key: (Optional) The key of a specific row to read from. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads the entire table. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` + :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. + :raises: :class:`ValueError ` if both + ``row_key`` and one of ``start_key`` and ``end_key`` are set + """ + request_kwargs = {'table_name': table_name} + if (row_key is not None and + (start_key is not None or end_key is not None)): + raise ValueError('Row key and row range cannot be ' + 'set simultaneously') + range_kwargs = {} + if start_key is not None or end_key is not None: + if start_key is not None: + if start_key_closed: + range_kwargs['start_key_closed'] = _to_bytes(start_key) + else: + range_kwargs['start_key_open'] = _to_bytes(start_key) + if end_key is not None: + range_kwargs['end_key_open'] = _to_bytes(end_key) + if filter_ is not None: + request_kwargs['filter'] = filter_.to_pb() + if limit is not None: + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) + + return message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 78179db25c4e..0849e681b7e6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -274,6 +274,9 @@ def consume_next(self): self._validate_chunk(chunk) + if hasattr(self._response_iterator, 'set_start_key'): + self._response_iterator.set_start_key(chunk.row_key) + if chunk.reset_row: row = self._row = None cell = self._cell = self._previous_cell = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 8dbf8c1ce6fb..3ed2d20ea975 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -17,7 +17,6 @@ import six -from google.cloud._helpers import _to_bytes from google.cloud.bigtable._generated import ( bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable._generated import ( @@ -30,6 +29,26 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData +from google.gax import RetryOptions, BackoffSettings +from google.cloud.bigtable.retry import ReadRowsIterator, _create_row_request +from grpc import StatusCode + +BACKOFF_SETTINGS = BackoffSettings( + initial_retry_delay_millis=10, + retry_delay_multiplier=1.3, + max_retry_delay_millis=30000, + initial_rpc_timeout_millis=25 * 60 * 1000, + rpc_timeout_multiplier=1.0, + max_rpc_timeout_millis=25 * 60 * 1000, + total_timeout_millis=30 * 60 * 1000 +) + +RETRY_CODES = [ + StatusCode.DEADLINE_EXCEEDED, + StatusCode.ABORTED, + StatusCode.INTERNAL, + StatusCode.UNAVAILABLE +] # Maximum number of mutations in bulk (MutateRowsRequest message): @@ -257,7 +276,7 @@ def read_row(self, row_key, filter_=None): return rows_data.rows[row_key] def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None): + filter_=None, backoff_settings=None): """Read rows from this table. :type start_key: bytes @@ -284,13 +303,18 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :returns: A :class:`.PartialRowsData` convenience wrapper for consuming the streamed results. """ - request_pb = _create_row_request( - self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit) client = self._instance._client - response_iterator = client._data_stub.ReadRows(request_pb) - # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` - return PartialRowsData(response_iterator) + if backoff_settings is None: + backoff_settings = BACKOFF_SETTINGS + RETRY_OPTIONS = RetryOptions( + retry_codes=RETRY_CODES, + backoff_settings=backoff_settings + ) + + retrying_iterator = ReadRowsIterator(client, self.name, start_key, + end_key, filter_, limit, + RETRY_OPTIONS) + return PartialRowsData(retrying_iterator) def mutate_rows(self, rows): """Mutates multiple rows in bulk. @@ -359,67 +383,6 @@ def sample_row_keys(self): return response_iterator -def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None): - """Creates a request to read rows in a table. - - :type table_name: str - :param table_name: The name of the table to read from. - - :type row_key: bytes - :param row_key: (Optional) The key of a specific row to read from. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads the entire table. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. - - :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` - :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. - :raises: :class:`ValueError ` if both - ``row_key`` and one of ``start_key`` and ``end_key`` are set - """ - request_kwargs = {'table_name': table_name} - if (row_key is not None and - (start_key is not None or end_key is not None)): - raise ValueError('Row key and row range cannot be ' - 'set simultaneously') - range_kwargs = {} - if start_key is not None or end_key is not None: - if start_key is not None: - range_kwargs['start_key_closed'] = _to_bytes(start_key) - if end_key is not None: - range_kwargs['end_key_open'] = _to_bytes(end_key) - if filter_ is not None: - request_kwargs['filter'] = filter_.to_pb() - if limit is not None: - request_kwargs['rows_limit'] = limit - - message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) - - if row_key is not None: - message.rows.row_keys.append(_to_bytes(row_key)) - - if range_kwargs: - message.rows.row_ranges.add(**range_kwargs) - - return message - - def _mutate_rows_request(table_name, rows): """Creates a request to mutate rows in a table. diff --git a/packages/google-cloud-bigtable/tests/retry_test_script.txt b/packages/google-cloud-bigtable/tests/retry_test_script.txt new file mode 100644 index 000000000000..863662e897ba --- /dev/null +++ b/packages/google-cloud-bigtable/tests/retry_test_script.txt @@ -0,0 +1,38 @@ +# This retry script is processed by the retry server and the client under test. +# Client tests should parse any command beginning with "CLIENT:", send the corresponding RPC +# to the retry server and expect a valid response. +# "EXPECT" commands indicate the call the server is expecting the client to send. +# +# The retry server has one table named "table" that should be used for testing. +# There are three types of commands supported: +# READ +# Expect the corresponding rows to be returned with arbitrary values. +# SCAN ... +# Ranges are expressed as an interval with either open or closed start and end, +# such as [1,3) for "1,2" or (1, 3] for "2,3". +# WRITE +# All writes should succeed eventually. Value payload is ignored. +# The server writes PASS or FAIL on a line by itself to STDOUT depending on the result of the test. +# All other server output should be ignored. + +# Echo same scan back after immediate error +CLIENT: SCAN [r1,r3) r1,r2 +EXPECT: SCAN [r1,r3) +SERVER: ERROR Unavailable +EXPECT: SCAN [r1,r3) +SERVER: READ_RESPONSE r1,r2 + +# Retry scans with open interval starting at the least read row key. +# Instead of using open intervals for retry ranges, '\x00' can be +# appended to the last received row key and sent in a closed interval. +CLIENT: SCAN [r1,r9) r1,r2,r3,r4,r5,r6,r7,r8 +EXPECT: SCAN [r1,r9) +SERVER: READ_RESPONSE r1,r2,r3,r4 +SERVER: ERROR Unavailable +EXPECT: SCAN (r4,r9) +SERVER: ERROR Unavailable +EXPECT: SCAN (r4,r9) +SERVER: READ_RESPONSE r5,r6,r7 +SERVER: ERROR Unavailable +EXPECT: SCAN (r7,r9) +SERVER: READ_RESPONSE r8 diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 1fcda808db39..5a5b4324cbbe 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -295,6 +295,84 @@ def test_delete_column_family(self): # Make sure we have successfully deleted it. self.assertEqual(temp_table.list_column_families(), {}) + def test_retry(self): + import subprocess, os, stat, platform + from google.cloud.bigtable.client import Client + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.table import Table + + # import for urlopen based on version + try: + # python 3 + from urllib.request import urlopen + except ImportError: + # python 2 + from urllib2 import urlopen + + + TEST_SCRIPT = 'tests/retry_test_script.txt' + SERVER_NAME = 'retry_server' + SERVER_ZIP = SERVER_NAME + ".tar.gz" + + def process_scan(table, range, ids): + range_chunks = range.split(",") + range_open = range_chunks[0].lstrip("[") + range_close = range_chunks[1].rstrip(")") + rows = table.read_rows(range_open, range_close) + rows.consume_all() + + # Download server + MOCK_SERVER_URLS = { + 'Linux': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_linux.tar.gz', + 'Darwin': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_mac.tar.gz', + } + + test_platform = platform.system() + if test_platform not in MOCK_SERVER_URLS: + self.skip('Retry server not available for platform {0}.'.format(test_platform)) + + mock_server_download = urlopen(MOCK_SERVER_URLS[test_platform]).read() + mock_server_file = open(SERVER_ZIP, 'wb') + mock_server_file.write(mock_server_download) + + # Unzip server + subprocess.call(['tar', 'zxvf', SERVER_ZIP, '-C', '.']) + + # Connect to server + server = subprocess.Popen( + ['./' + SERVER_NAME, '--script=' + TEST_SCRIPT], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + (endpoint, port) = server.stdout.readline().rstrip("\n").split(":") + os.environ["BIGTABLE_EMULATOR_HOST"] = endpoint + ":" + port + client = Client(project="client", admin=True) + instance = Instance("instance", client) + table = instance.table("table") + + # Run test, line by line + with open(TEST_SCRIPT, 'r') as script: + for line in script.readlines(): + if line.startswith("CLIENT:"): + chunks = line.split(" ") + op = chunks[1] + process_scan(table, chunks[2], chunks[3]) + + # Check that the test passed + server.kill() + server_stdout_lines = [] + while True: + line = server.stdout.readline() + if line != '': + server_stdout_lines.append(line) + else: + break + self.assertEqual(server_stdout_lines[-1], "PASS\n") + + # Clean up + os.remove(SERVER_ZIP) + os.remove(SERVER_NAME) class TestDataAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index e67af6a1498c..7587c66c133b 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -14,7 +14,6 @@ """Mocks used to emulate gRPC generated objects.""" - class _FakeStub(object): """Acts as a gPRC stub.""" @@ -27,6 +26,16 @@ def __getattr__(self, name): # since __getattribute__ will handle them. return _MethodMock(name, self) +class _CustomFakeStub(object): + """Acts as a gRPC stub. Generates a result using an injected callable.""" + def __init__(self, result_callable): + self.result_callable = result_callable + self.method_calls = [] + + def __getattr__(self, name): + # We need not worry about attributes set in constructor + # since __getattribute__ will handle them. + return _CustomMethodMock(name, self) class _MethodMock(object): """Mock for API method attached to a gRPC stub. @@ -42,5 +51,19 @@ def __call__(self, *args, **kwargs): """Sync method meant to mock a gRPC stub request.""" self._stub.method_calls.append((self._name, args, kwargs)) curr_result, self._stub.results = (self._stub.results[0], - self._stub.results[1:]) + self._stub.results[1:]) return curr_result + +class _CustomMethodMock(object): + """ + Same as _MethodMock, but backed by an injected callable. + """ + + def __init__(self, name, stub): + self._name = name + self._stub = stub + + def __call__(self, *args, **kwargs): + """Sync method meant to mock a gRPC stub request.""" + self._stub.method_calls.append((self._name, args, kwargs)) + return self._stub.result_callable() diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 5867e76aff73..d985f7eb2f0f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -493,7 +493,8 @@ def test_read_rows(self): from google.cloud._testing import _Monkey from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import table as MUT + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -513,20 +514,18 @@ def mock_create_row_request(table_name, **kwargs): # Patch the stub used by the API method. client._data_stub = stub = _FakeStub(response_iterator) - # Create expected_result. - expected_result = PartialRowsData(response_iterator) - - # Perform the method and check the result. start_key = b'start-key' end_key = b'end-key' filter_obj = object() limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Perform the method and check the result. result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, limit=limit) - self.assertEqual(result, expected_result) + self.assertIsInstance(result._response_iterator, ReadRowsIterator) + self.assertEqual(result._response_iterator.client, client) self.assertEqual(stub.method_calls, [( 'ReadRows', (request_pb,), @@ -537,9 +536,166 @@ def mock_create_row_request(table_name, **kwargs): 'end_key': end_key, 'filter_': filter_obj, 'limit': limit, + 'start_key_closed': True, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) + def test_read_rows_one_chunk(self): + from google.cloud._testing import _Monkey + from tests.unit._testing import _FakeStub + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator + from google.cloud.bigtable.row_data import Cell + from google.cloud.bigtable.row_data import PartialRowsData + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create response_iterator + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + response_pb = _ReadRowsResponsePB(chunks=[chunk]) + response_iterator = iter([response_pb]) + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_iterator) + + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Perform the method and check the result. + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit) + result.consume_all() + + def test_read_rows_retry_timeout(self): + from google.cloud._testing import _Monkey + from tests.unit._testing import _CustomFakeStub + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator + from google.gax import BackoffSettings + from google.gax.errors import RetryError + from grpc import StatusCode, RpcError + import time + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create a slow response iterator to cause a timeout + class MockTimeoutError(RpcError): + def code(self): + return StatusCode.DEADLINE_EXCEEDED + + def _wait_then_raise(): + time.sleep(0.1) + raise MockTimeoutError() + + # Patch the stub used by the API method. The stub should create a new + # slow_iterator every time its queried. + def make_slow_iterator(): + return (_wait_then_raise() for i in range(10)) + client._data_stub = stub = _CustomFakeStub(make_slow_iterator) + + # Set to timeout before RPC completes + test_backoff_settings = BackoffSettings( + initial_retry_delay_millis=10, + retry_delay_multiplier=0.3, + max_retry_delay_millis=30000, + initial_rpc_timeout_millis=1000, + rpc_timeout_multiplier=1.0, + max_rpc_timeout_millis=25 * 60 * 1000, + total_timeout_millis=1000 + ) + + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Verify that a RetryError is thrown on read. + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit, backoff_settings=test_backoff_settings) + with self.assertRaises(RetryError): + result.consume_next() + + def test_read_rows_non_idempotent_error_throws(self): + from google.cloud._testing import _Monkey + from tests.unit._testing import _CustomFakeStub + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator + from google.gax import BackoffSettings + from google.gax.errors import RetryError + from grpc import StatusCode, RpcError + import time + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create response iterator that raises a non-idempotent exception + class MockNonIdempotentError(RpcError): + def code(self): + return StatusCode.RESOURCE_EXHAUSTED + + def _raise(): + raise MockNonIdempotentError() + + # Patch the stub used by the API method. The stub should create a new + # slow_iterator every time its queried. + def make_raising_iterator(): + return (_raise() for i in range(10)) + client._data_stub = stub = _CustomFakeStub(make_raising_iterator) + + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Verify that a RetryError is thrown on read. + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit) + with self.assertRaises(MockNonIdempotentError): + result.consume_next() + def test_sample_row_keys(self): from tests.unit._testing import _FakeStub @@ -572,12 +728,12 @@ def test_sample_row_keys(self): class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None): - from google.cloud.bigtable.table import _create_row_request + start_key_closed=True, filter_=None, limit=None): + from google.cloud.bigtable.retry import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - filter_=filter_, limit=limit) + start_key_closed=start_key_closed, filter_=filter_, limit=limit) def test_table_name_only(self): table_name = 'table_name' @@ -600,7 +756,7 @@ def test_row_key(self): expected_result.rows.row_keys.append(row_key) self.assertEqual(result, expected_result) - def test_row_range_start_key(self): + def test_row_range_start_key_closed(self): table_name = 'table_name' start_key = b'start_key' result = self._call_fut(table_name, start_key=start_key) @@ -608,6 +764,15 @@ def test_row_range_start_key(self): expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) + def test_row_range_start_key_open(self): + table_name = 'table_name' + start_key = b'start_key' + result = self._call_fut(table_name, start_key=start_key, + start_key_closed=False) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(start_key_open=start_key) + self.assertEqual(result, expected_result) + def test_row_range_end_key(self): table_name = 'table_name' end_key = b'end_key' From fdb4a43b1bbcd092d27d0d93c7136cbe23b13807 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 17 Jul 2017 10:03:33 -0700 Subject: [PATCH 067/892] Using `CopyFrom` to set protobuf message fields (instead of `MergeFrom`). (#3612) Fixes #3571. --- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 3ed2d20ea975..ad6fab88dcf9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -200,7 +200,7 @@ def create(self, initial_split_keys=None, column_families=()): table_pb = table_v2_pb2.Table() for col_fam in column_families: curr_id = col_fam.column_family_id - table_pb.column_families[curr_id].MergeFrom(col_fam.to_pb()) + table_pb.column_families[curr_id].CopyFrom(col_fam.to_pb()) request_pb = table_admin_messages_v2_pb2.CreateTableRequest( initial_splits=initial_split_keys or [], diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index d985f7eb2f0f..c59667d6a821 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -255,7 +255,7 @@ def _create_test_helper(self, initial_split_keys, column_families=()): for cf in column_families: cf_pb = table_pb.column_families[cf.column_family_id] if cf.gc_rule is not None: - cf_pb.gc_rule.MergeFrom(cf.gc_rule.to_pb()) + cf_pb.gc_rule.CopyFrom(cf.gc_rule.to_pb()) request_pb = _CreateTableRequestPB( initial_splits=splits_pb, parent=self.INSTANCE_NAME, From ae64579b184e85217760abd98e389ba9cb1c6b02 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Wed, 19 Jul 2017 14:58:17 -0700 Subject: [PATCH 068/892] Fixing references to "dead" docs links. (#3631) * Fixing references to "dead" docs links. Done via: $ git grep -l 'google-cloud-auth.html' | \ > xargs sed -i s/'google-cloud-auth.html'/'core\/auth.html'/g $ git grep -l 'http\:\/\/google-cloud-python.readthedocs.io' | \ > xargs sed -i s/'http\:\/\/google-cloud-python.readthedocs.io'/\ > 'https\:\/\/google-cloud-python.readthedocs.io'/g Fixes #3531. * Fixing up other docs that were moved in #3459. --- packages/google-cloud-bigtable/README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 3b37f5ec6880..3385b882c28f 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -9,7 +9,7 @@ Python Client for Google Cloud Bigtable - `Documentation`_ -.. _Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/bigtable-usage.html +.. _Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/bigtable/usage.html Quick Start ----------- @@ -26,7 +26,7 @@ possible. Check out the `Authentication section`_ in our documentation to learn more. You may also find the `authentication document`_ shared by all the ``google-cloud-*`` libraries to be helpful. -.. _Authentication section: http://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html +.. _Authentication section: https://google-cloud-python.readthedocs.io/en/latest/core/auth.html .. _authentication document: https://github.com/GoogleCloudPlatform/gcloud-common/tree/master/authentication Using the API From 698b5b230c1a9818b725143f452bd260d847551a Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 20 Jul 2017 09:33:21 -0700 Subject: [PATCH 069/892] Changing all pypi.python.org links to warehouse links. (#3641) Done via $ export OLD='https\:\/\/pypi.python.org\/pypi\/' $ export NEW='https\:\/\/pypi.org\/project\/' $ git grep -l ${OLD} | xargs sed -i s/${OLD}/${NEW}/g Then manually going through and adding a trailing slash to all warehouse links. (Though I did undo changes to `docs/json/`.) --- packages/google-cloud-bigtable/README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 3385b882c28f..ebc202d8d87e 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -42,6 +42,6 @@ See the ``google-cloud-python`` API Bigtable `Documentation`_ to learn how to manage your data in Bigtable tables. .. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg - :target: https://pypi.python.org/pypi/google-cloud-bigtable + :target: https://pypi.org/project/google-cloud-bigtable/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg - :target: https://pypi.python.org/pypi/google-cloud-bigtable + :target: https://pypi.org/project/google-cloud-bigtable/ From 350984b0eca182a62c6536bd88f84396c650dd42 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 20 Jul 2017 10:07:12 -0700 Subject: [PATCH 070/892] Revert "RPC retries (second PR) (#3324)" (#3642) This reverts commit 0213b69cdf0177d9158d7608633b4d9c66930e03. --- .../google/cloud/bigtable/retry.py | 169 ---------------- .../google/cloud/bigtable/row_data.py | 3 - .../google/cloud/bigtable/table.py | 101 +++++++--- .../tests/retry_test_script.txt | 38 ---- .../google-cloud-bigtable/tests/system.py | 78 -------- .../tests/unit/_testing.py | 27 +-- .../tests/unit/test_table.py | 185 +----------------- 7 files changed, 81 insertions(+), 520 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/retry.py delete mode 100644 packages/google-cloud-bigtable/tests/retry_test_script.txt diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py b/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py deleted file mode 100644 index f20419ce4f8e..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py +++ /dev/null @@ -1,169 +0,0 @@ -"""Provides function wrappers that implement retrying.""" -import random -import time -import six -import sys - -from google.cloud._helpers import _to_bytes -from google.cloud.bigtable._generated import ( - bigtable_pb2 as data_messages_v2_pb2) -from google.gax import config, errors -from grpc import RpcError - - -_MILLIS_PER_SECOND = 1000 - - -class ReadRowsIterator(object): - """Creates an iterator equivalent to a_iter, but that retries on certain - exceptions. - """ - - def __init__(self, client, name, start_key, end_key, filter_, limit, - retry_options, **kwargs): - self.client = client - self.retry_options = retry_options - self.name = name - self.start_key = start_key - self.start_key_closed = True - self.end_key = end_key - self.filter_ = filter_ - self.limit = limit - self.delay_mult = retry_options.backoff_settings.retry_delay_multiplier - self.max_delay_millis = \ - retry_options.backoff_settings.max_retry_delay_millis - self.timeout_mult = \ - retry_options.backoff_settings.rpc_timeout_multiplier - self.max_timeout = \ - (retry_options.backoff_settings.max_rpc_timeout_millis / - _MILLIS_PER_SECOND) - self.total_timeout = \ - (retry_options.backoff_settings.total_timeout_millis / - _MILLIS_PER_SECOND) - self.set_stream() - - def set_start_key(self, start_key): - """ - Sets the row key at which this iterator will begin reading. - """ - self.start_key = start_key - self.start_key_closed = False - - def set_stream(self): - """ - Resets the read stream by making an RPC on the 'ReadRows' endpoint. - """ - req_pb = _create_row_request(self.name, start_key=self.start_key, - start_key_closed=self.start_key_closed, - end_key=self.end_key, - filter_=self.filter_, limit=self.limit) - self.stream = self.client._data_stub.ReadRows(req_pb) - - def next(self, *args, **kwargs): - """ - Read and return the next row from the stream. - Retry on idempotent failure. - """ - delay = self.retry_options.backoff_settings.initial_retry_delay_millis - exc = errors.RetryError('Retry total timeout exceeded before any' - 'response was received') - timeout = (self.retry_options.backoff_settings - .initial_rpc_timeout_millis / - _MILLIS_PER_SECOND) - - now = time.time() - deadline = now + self.total_timeout - while deadline is None or now < deadline: - try: - return six.next(self.stream) - except StopIteration as stop: - raise stop - except RpcError as error: # pylint: disable=broad-except - code = config.exc_to_code(error) - if code not in self.retry_options.retry_codes: - six.reraise(type(error), error) - - # pylint: disable=redefined-variable-type - exc = errors.RetryError( - 'Retry total timeout exceeded with exception', error) - - # Sleep a random number which will, on average, equal the - # expected delay. - to_sleep = random.uniform(0, delay * 2) - time.sleep(to_sleep / _MILLIS_PER_SECOND) - delay = min(delay * self.delay_mult, self.max_delay_millis) - now = time.time() - timeout = min( - timeout * self.timeout_mult, self.max_timeout, - deadline - now) - self.set_stream() - - six.reraise(errors.RetryError, exc, sys.exc_info()[2]) - - def __next__(self, *args, **kwargs): - return self.next(*args, **kwargs) - - -def _create_row_request(table_name, row_key=None, start_key=None, - start_key_closed=True, end_key=None, filter_=None, - limit=None): - """Creates a request to read rows in a table. - - :type table_name: str - :param table_name: The name of the table to read from. - - :type row_key: bytes - :param row_key: (Optional) The key of a specific row to read from. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads the entire table. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. - - :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` - :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. - :raises: :class:`ValueError ` if both - ``row_key`` and one of ``start_key`` and ``end_key`` are set - """ - request_kwargs = {'table_name': table_name} - if (row_key is not None and - (start_key is not None or end_key is not None)): - raise ValueError('Row key and row range cannot be ' - 'set simultaneously') - range_kwargs = {} - if start_key is not None or end_key is not None: - if start_key is not None: - if start_key_closed: - range_kwargs['start_key_closed'] = _to_bytes(start_key) - else: - range_kwargs['start_key_open'] = _to_bytes(start_key) - if end_key is not None: - range_kwargs['end_key_open'] = _to_bytes(end_key) - if filter_ is not None: - request_kwargs['filter'] = filter_.to_pb() - if limit is not None: - request_kwargs['rows_limit'] = limit - - message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) - - if row_key is not None: - message.rows.row_keys.append(_to_bytes(row_key)) - - if range_kwargs: - message.rows.row_ranges.add(**range_kwargs) - - return message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 0849e681b7e6..78179db25c4e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -274,9 +274,6 @@ def consume_next(self): self._validate_chunk(chunk) - if hasattr(self._response_iterator, 'set_start_key'): - self._response_iterator.set_start_key(chunk.row_key) - if chunk.reset_row: row = self._row = None cell = self._cell = self._previous_cell = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index ad6fab88dcf9..40ef3a2ca2fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -17,6 +17,7 @@ import six +from google.cloud._helpers import _to_bytes from google.cloud.bigtable._generated import ( bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable._generated import ( @@ -29,26 +30,6 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData -from google.gax import RetryOptions, BackoffSettings -from google.cloud.bigtable.retry import ReadRowsIterator, _create_row_request -from grpc import StatusCode - -BACKOFF_SETTINGS = BackoffSettings( - initial_retry_delay_millis=10, - retry_delay_multiplier=1.3, - max_retry_delay_millis=30000, - initial_rpc_timeout_millis=25 * 60 * 1000, - rpc_timeout_multiplier=1.0, - max_rpc_timeout_millis=25 * 60 * 1000, - total_timeout_millis=30 * 60 * 1000 -) - -RETRY_CODES = [ - StatusCode.DEADLINE_EXCEEDED, - StatusCode.ABORTED, - StatusCode.INTERNAL, - StatusCode.UNAVAILABLE -] # Maximum number of mutations in bulk (MutateRowsRequest message): @@ -276,7 +257,7 @@ def read_row(self, row_key, filter_=None): return rows_data.rows[row_key] def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None, backoff_settings=None): + filter_=None): """Read rows from this table. :type start_key: bytes @@ -303,18 +284,13 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :returns: A :class:`.PartialRowsData` convenience wrapper for consuming the streamed results. """ + request_pb = _create_row_request( + self.name, start_key=start_key, end_key=end_key, filter_=filter_, + limit=limit) client = self._instance._client - if backoff_settings is None: - backoff_settings = BACKOFF_SETTINGS - RETRY_OPTIONS = RetryOptions( - retry_codes=RETRY_CODES, - backoff_settings=backoff_settings - ) - - retrying_iterator = ReadRowsIterator(client, self.name, start_key, - end_key, filter_, limit, - RETRY_OPTIONS) - return PartialRowsData(retrying_iterator) + response_iterator = client._data_stub.ReadRows(request_pb) + # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` + return PartialRowsData(response_iterator) def mutate_rows(self, rows): """Mutates multiple rows in bulk. @@ -383,6 +359,67 @@ def sample_row_keys(self): return response_iterator +def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, + filter_=None, limit=None): + """Creates a request to read rows in a table. + + :type table_name: str + :param table_name: The name of the table to read from. + + :type row_key: bytes + :param row_key: (Optional) The key of a specific row to read from. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads the entire table. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` + :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. + :raises: :class:`ValueError ` if both + ``row_key`` and one of ``start_key`` and ``end_key`` are set + """ + request_kwargs = {'table_name': table_name} + if (row_key is not None and + (start_key is not None or end_key is not None)): + raise ValueError('Row key and row range cannot be ' + 'set simultaneously') + range_kwargs = {} + if start_key is not None or end_key is not None: + if start_key is not None: + range_kwargs['start_key_closed'] = _to_bytes(start_key) + if end_key is not None: + range_kwargs['end_key_open'] = _to_bytes(end_key) + if filter_ is not None: + request_kwargs['filter'] = filter_.to_pb() + if limit is not None: + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) + + return message + + def _mutate_rows_request(table_name, rows): """Creates a request to mutate rows in a table. diff --git a/packages/google-cloud-bigtable/tests/retry_test_script.txt b/packages/google-cloud-bigtable/tests/retry_test_script.txt deleted file mode 100644 index 863662e897ba..000000000000 --- a/packages/google-cloud-bigtable/tests/retry_test_script.txt +++ /dev/null @@ -1,38 +0,0 @@ -# This retry script is processed by the retry server and the client under test. -# Client tests should parse any command beginning with "CLIENT:", send the corresponding RPC -# to the retry server and expect a valid response. -# "EXPECT" commands indicate the call the server is expecting the client to send. -# -# The retry server has one table named "table" that should be used for testing. -# There are three types of commands supported: -# READ -# Expect the corresponding rows to be returned with arbitrary values. -# SCAN ... -# Ranges are expressed as an interval with either open or closed start and end, -# such as [1,3) for "1,2" or (1, 3] for "2,3". -# WRITE -# All writes should succeed eventually. Value payload is ignored. -# The server writes PASS or FAIL on a line by itself to STDOUT depending on the result of the test. -# All other server output should be ignored. - -# Echo same scan back after immediate error -CLIENT: SCAN [r1,r3) r1,r2 -EXPECT: SCAN [r1,r3) -SERVER: ERROR Unavailable -EXPECT: SCAN [r1,r3) -SERVER: READ_RESPONSE r1,r2 - -# Retry scans with open interval starting at the least read row key. -# Instead of using open intervals for retry ranges, '\x00' can be -# appended to the last received row key and sent in a closed interval. -CLIENT: SCAN [r1,r9) r1,r2,r3,r4,r5,r6,r7,r8 -EXPECT: SCAN [r1,r9) -SERVER: READ_RESPONSE r1,r2,r3,r4 -SERVER: ERROR Unavailable -EXPECT: SCAN (r4,r9) -SERVER: ERROR Unavailable -EXPECT: SCAN (r4,r9) -SERVER: READ_RESPONSE r5,r6,r7 -SERVER: ERROR Unavailable -EXPECT: SCAN (r7,r9) -SERVER: READ_RESPONSE r8 diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 5a5b4324cbbe..1fcda808db39 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -295,84 +295,6 @@ def test_delete_column_family(self): # Make sure we have successfully deleted it. self.assertEqual(temp_table.list_column_families(), {}) - def test_retry(self): - import subprocess, os, stat, platform - from google.cloud.bigtable.client import Client - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable.table import Table - - # import for urlopen based on version - try: - # python 3 - from urllib.request import urlopen - except ImportError: - # python 2 - from urllib2 import urlopen - - - TEST_SCRIPT = 'tests/retry_test_script.txt' - SERVER_NAME = 'retry_server' - SERVER_ZIP = SERVER_NAME + ".tar.gz" - - def process_scan(table, range, ids): - range_chunks = range.split(",") - range_open = range_chunks[0].lstrip("[") - range_close = range_chunks[1].rstrip(")") - rows = table.read_rows(range_open, range_close) - rows.consume_all() - - # Download server - MOCK_SERVER_URLS = { - 'Linux': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_linux.tar.gz', - 'Darwin': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_mac.tar.gz', - } - - test_platform = platform.system() - if test_platform not in MOCK_SERVER_URLS: - self.skip('Retry server not available for platform {0}.'.format(test_platform)) - - mock_server_download = urlopen(MOCK_SERVER_URLS[test_platform]).read() - mock_server_file = open(SERVER_ZIP, 'wb') - mock_server_file.write(mock_server_download) - - # Unzip server - subprocess.call(['tar', 'zxvf', SERVER_ZIP, '-C', '.']) - - # Connect to server - server = subprocess.Popen( - ['./' + SERVER_NAME, '--script=' + TEST_SCRIPT], - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - - (endpoint, port) = server.stdout.readline().rstrip("\n").split(":") - os.environ["BIGTABLE_EMULATOR_HOST"] = endpoint + ":" + port - client = Client(project="client", admin=True) - instance = Instance("instance", client) - table = instance.table("table") - - # Run test, line by line - with open(TEST_SCRIPT, 'r') as script: - for line in script.readlines(): - if line.startswith("CLIENT:"): - chunks = line.split(" ") - op = chunks[1] - process_scan(table, chunks[2], chunks[3]) - - # Check that the test passed - server.kill() - server_stdout_lines = [] - while True: - line = server.stdout.readline() - if line != '': - server_stdout_lines.append(line) - else: - break - self.assertEqual(server_stdout_lines[-1], "PASS\n") - - # Clean up - os.remove(SERVER_ZIP) - os.remove(SERVER_NAME) class TestDataAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index 7587c66c133b..e67af6a1498c 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -14,6 +14,7 @@ """Mocks used to emulate gRPC generated objects.""" + class _FakeStub(object): """Acts as a gPRC stub.""" @@ -26,16 +27,6 @@ def __getattr__(self, name): # since __getattribute__ will handle them. return _MethodMock(name, self) -class _CustomFakeStub(object): - """Acts as a gRPC stub. Generates a result using an injected callable.""" - def __init__(self, result_callable): - self.result_callable = result_callable - self.method_calls = [] - - def __getattr__(self, name): - # We need not worry about attributes set in constructor - # since __getattribute__ will handle them. - return _CustomMethodMock(name, self) class _MethodMock(object): """Mock for API method attached to a gRPC stub. @@ -51,19 +42,5 @@ def __call__(self, *args, **kwargs): """Sync method meant to mock a gRPC stub request.""" self._stub.method_calls.append((self._name, args, kwargs)) curr_result, self._stub.results = (self._stub.results[0], - self._stub.results[1:]) + self._stub.results[1:]) return curr_result - -class _CustomMethodMock(object): - """ - Same as _MethodMock, but backed by an injected callable. - """ - - def __init__(self, name, stub): - self._name = name - self._stub = stub - - def __call__(self, *args, **kwargs): - """Sync method meant to mock a gRPC stub request.""" - self._stub.method_calls.append((self._name, args, kwargs)) - return self._stub.result_callable() diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index c59667d6a821..dc4d2b5bbad0 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -493,8 +493,7 @@ def test_read_rows(self): from google.cloud._testing import _Monkey from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator + from google.cloud.bigtable import table as MUT client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -514,18 +513,20 @@ def mock_create_row_request(table_name, **kwargs): # Patch the stub used by the API method. client._data_stub = stub = _FakeStub(response_iterator) + # Create expected_result. + expected_result = PartialRowsData(response_iterator) + + # Perform the method and check the result. start_key = b'start-key' end_key = b'end-key' filter_obj = object() limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Perform the method and check the result. result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, limit=limit) - self.assertIsInstance(result._response_iterator, ReadRowsIterator) - self.assertEqual(result._response_iterator.client, client) + self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', (request_pb,), @@ -536,166 +537,9 @@ def mock_create_row_request(table_name, **kwargs): 'end_key': end_key, 'filter_': filter_obj, 'limit': limit, - 'start_key_closed': True, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) - def test_read_rows_one_chunk(self): - from google.cloud._testing import _Monkey - from tests.unit._testing import _FakeStub - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator - from google.cloud.bigtable.row_data import Cell - from google.cloud.bigtable.row_data import PartialRowsData - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create response_iterator - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - response_pb = _ReadRowsResponsePB(chunks=[chunk]) - response_iterator = iter([response_pb]) - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) - - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Perform the method and check the result. - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit) - result.consume_all() - - def test_read_rows_retry_timeout(self): - from google.cloud._testing import _Monkey - from tests.unit._testing import _CustomFakeStub - from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator - from google.gax import BackoffSettings - from google.gax.errors import RetryError - from grpc import StatusCode, RpcError - import time - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create a slow response iterator to cause a timeout - class MockTimeoutError(RpcError): - def code(self): - return StatusCode.DEADLINE_EXCEEDED - - def _wait_then_raise(): - time.sleep(0.1) - raise MockTimeoutError() - - # Patch the stub used by the API method. The stub should create a new - # slow_iterator every time its queried. - def make_slow_iterator(): - return (_wait_then_raise() for i in range(10)) - client._data_stub = stub = _CustomFakeStub(make_slow_iterator) - - # Set to timeout before RPC completes - test_backoff_settings = BackoffSettings( - initial_retry_delay_millis=10, - retry_delay_multiplier=0.3, - max_retry_delay_millis=30000, - initial_rpc_timeout_millis=1000, - rpc_timeout_multiplier=1.0, - max_rpc_timeout_millis=25 * 60 * 1000, - total_timeout_millis=1000 - ) - - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Verify that a RetryError is thrown on read. - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit, backoff_settings=test_backoff_settings) - with self.assertRaises(RetryError): - result.consume_next() - - def test_read_rows_non_idempotent_error_throws(self): - from google.cloud._testing import _Monkey - from tests.unit._testing import _CustomFakeStub - from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator - from google.gax import BackoffSettings - from google.gax.errors import RetryError - from grpc import StatusCode, RpcError - import time - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create response iterator that raises a non-idempotent exception - class MockNonIdempotentError(RpcError): - def code(self): - return StatusCode.RESOURCE_EXHAUSTED - - def _raise(): - raise MockNonIdempotentError() - - # Patch the stub used by the API method. The stub should create a new - # slow_iterator every time its queried. - def make_raising_iterator(): - return (_raise() for i in range(10)) - client._data_stub = stub = _CustomFakeStub(make_raising_iterator) - - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Verify that a RetryError is thrown on read. - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit) - with self.assertRaises(MockNonIdempotentError): - result.consume_next() - def test_sample_row_keys(self): from tests.unit._testing import _FakeStub @@ -728,12 +572,12 @@ def test_sample_row_keys(self): class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, - start_key_closed=True, filter_=None, limit=None): - from google.cloud.bigtable.retry import _create_row_request + filter_=None, limit=None): + from google.cloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - start_key_closed=start_key_closed, filter_=filter_, limit=limit) + filter_=filter_, limit=limit) def test_table_name_only(self): table_name = 'table_name' @@ -756,7 +600,7 @@ def test_row_key(self): expected_result.rows.row_keys.append(row_key) self.assertEqual(result, expected_result) - def test_row_range_start_key_closed(self): + def test_row_range_start_key(self): table_name = 'table_name' start_key = b'start_key' result = self._call_fut(table_name, start_key=start_key) @@ -764,15 +608,6 @@ def test_row_range_start_key_closed(self): expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) - def test_row_range_start_key_open(self): - table_name = 'table_name' - start_key = b'start_key' - result = self._call_fut(table_name, start_key=start_key, - start_key_closed=False) - expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(start_key_open=start_key) - self.assertEqual(result, expected_result) - def test_row_range_end_key(self): table_name = 'table_name' end_key = b'end_key' From 8e0a2e736dd23cd33f397d7880093df39a07b4f1 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 20 Jul 2017 10:40:24 -0700 Subject: [PATCH 071/892] Adding Bigtable Cluster location on create() request. (#3646) --- packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py | 1 + packages/google-cloud-bigtable/tests/unit/test_cluster.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 80b9068958db..8f7321b2a548 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -49,6 +49,7 @@ def _prepare_create_request(cluster): parent=cluster._instance.name, cluster_id=cluster.cluster_id, cluster=data_v2_pb2.Cluster( + location=cluster.location, serve_nodes=cluster.serve_nodes, ), ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 3cc40964ba49..3eb18f43863d 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -393,12 +393,14 @@ def test_it(self): instance = _Instance(INSTANCE_ID, client) cluster = Cluster(CLUSTER_ID, instance, serve_nodes=SERVE_NODES) + cluster.location = u'projects/prahj-ekt/locations/zona-tres' request_pb = self._call_fut(cluster) self.assertEqual(request_pb.cluster_id, CLUSTER_ID) self.assertEqual(request_pb.parent, instance.name) self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) + self.assertEqual(request_pb.cluster.location, cluster.location) def _ClusterPB(*args, **kw): From 2c1d421175c4c09212134b51720564aa82abd348 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 20 Jul 2017 11:00:26 -0700 Subject: [PATCH 072/892] Update bigtable to use future.operation (#3623) --- .../google/cloud/bigtable/cluster.py | 37 ++++++++++--------- .../google/cloud/bigtable/instance.py | 17 ++++----- .../google-cloud-bigtable/tests/system.py | 27 +------------- .../tests/unit/test_cluster.py | 23 +++++------- .../tests/unit/test_instance.py | 21 +++++------ 5 files changed, 46 insertions(+), 79 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 8f7321b2a548..8d15547efae3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -21,9 +21,7 @@ instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) -from google.cloud.operation import Operation -from google.cloud.operation import register_type - +from google.cloud.future import operation _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[^/]+)/clusters/' @@ -33,9 +31,6 @@ """Default number of nodes to use when creating a cluster.""" -register_type(messages_v2_pb2.UpdateClusterMetadata) - - def _prepare_create_request(cluster): """Creates a protobuf request for a CreateCluster request. @@ -208,15 +203,18 @@ def create(self): :returns: The long-running operation corresponding to the create operation. """ - request_pb = _prepare_create_request(self) - # We expect a `google.longrunning.operations_pb2.Operation`. client = self._instance._client + + # We expect a `google.longrunning.operations_pb2.Operation`. + request_pb = _prepare_create_request(self) operation_pb = client._instance_stub.CreateCluster(request_pb) - operation = Operation.from_pb(operation_pb, client) - operation.target = self - operation.caller_metadata['request_type'] = 'CreateCluster' - return operation + operation_future = operation.from_grpc( + operation_pb, + client._operations_stub, + data_v2_pb2.Cluster, + metadata_type=messages_v2_pb2.UpdateClusterMetadata) + return operation_future def update(self): """Update this cluster. @@ -236,18 +234,21 @@ def update(self): :returns: The long-running operation corresponding to the update operation. """ + client = self._instance._client + + # We expect a `google.longrunning.operations_pb2.Operation`. request_pb = data_v2_pb2.Cluster( name=self.name, serve_nodes=self.serve_nodes, ) - # We expect a `google.longrunning.operations_pb2.Operation`. - client = self._instance._client operation_pb = client._instance_stub.UpdateCluster(request_pb) - operation = Operation.from_pb(operation_pb, client) - operation.target = self - operation.caller_metadata['request_type'] = 'UpdateCluster' - return operation + operation_future = operation.from_grpc( + operation_pb, + client._operations_stub, + data_v2_pb2.Cluster, + metadata_type=messages_v2_pb2.UpdateClusterMetadata) + return operation_future def delete(self): """Delete this cluster. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 1de3cbcea814..958f16602953 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -26,8 +26,7 @@ from google.cloud.bigtable.cluster import Cluster from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.table import Table -from google.cloud.operation import Operation -from google.cloud.operation import register_type +from google.cloud.future import operation _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' @@ -35,10 +34,6 @@ r'instances/(?P[a-z][-a-z0-9]*)$') -register_type(messages_v2_pb2.CreateInstanceMetadata) -register_type(data_v2_pb2.Instance) - - def _prepare_create_request(instance): """Creates a protobuf request for a CreateInstance request. @@ -232,10 +227,12 @@ def create(self): # We expect a `google.longrunning.operations_pb2.Operation`. operation_pb = self._client._instance_stub.CreateInstance(request_pb) - operation = Operation.from_pb(operation_pb, self._client) - operation.target = self - operation.caller_metadata['request_type'] = 'CreateInstance' - return operation + operation_future = operation.from_grpc( + operation_pb, + self._client._operations_stub, + data_v2_pb2.Instance, + metadata_type=messages_v2_pb2.CreateInstanceMetadata) + return operation_future def update(self): """Update this instance. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 1fcda808db39..cfc2cb17f805 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -32,7 +32,6 @@ from google.cloud.environment_vars import BIGTABLE_EMULATOR from test_utils.retry import RetryErrors -from test_utils.retry import RetryResult from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id @@ -65,27 +64,6 @@ class Config(object): IN_EMULATOR = False -def _wait_until_complete(operation, max_attempts=5): - """Wait until an operation has completed. - - :type operation: :class:`google.cloud.operation.Operation` - :param operation: Operation that has not completed. - - :type max_attempts: int - :param max_attempts: (Optional) The maximum number of times to check if - the operation has completed. Defaults to 5. - - :rtype: bool - :returns: Boolean indicating if the operation is complete. - """ - - def _operation_complete(result): - return result - - retry = RetryResult(_operation_complete, max_tries=max_attempts) - return retry(operation.poll)() - - def _retry_on_unavailable(exc): """Retry only errors whose status code is 'UNAVAILABLE'.""" from grpc import StatusCode @@ -117,8 +95,7 @@ def setUpModule(): # After listing, create the test instance. created_op = Config.INSTANCE.create() - if not _wait_until_complete(created_op): - raise RuntimeError('Instance creation exceed 5 seconds.') + created_op.result(timeout=10) def tearDownModule(): @@ -166,7 +143,7 @@ def test_create_instance(self): self.instances_to_delete.append(instance) # We want to make sure the operation completes. - self.assertTrue(_wait_until_complete(operation)) + operation.result(timeout=10) # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 3eb18f43863d..e244b55d6dff 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -15,6 +15,8 @@ import unittest +import mock + class TestCluster(unittest.TestCase): @@ -232,7 +234,7 @@ def test_reload(self): def test_create(self): from google.longrunning import operations_pb2 - from google.cloud.operation import Operation + from google.cloud.future import operation from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from tests.unit._testing import _FakeStub @@ -256,13 +258,9 @@ def test_create(self): # Perform the method and check the result. result = cluster.create() - self.assertIsInstance(result, Operation) - self.assertEqual(result.name, OP_NAME) - self.assertIs(result.target, cluster) - self.assertIs(result.client, client) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, OP_NAME) self.assertIsNone(result.metadata) - self.assertEqual(result.caller_metadata, - {'request_type': 'CreateCluster'}) self.assertEqual(len(stub.method_calls), 1) api_name, args, kwargs = stub.method_calls[0] @@ -278,7 +276,7 @@ def test_create(self): def test_update(self): import datetime from google.longrunning import operations_pb2 - from google.cloud.operation import Operation + from google.cloud.future import operation from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable._generated import ( @@ -324,15 +322,11 @@ def test_update(self): result = cluster.update() - self.assertIsInstance(result, Operation) - self.assertEqual(result.name, OP_NAME) - self.assertIs(result.target, cluster) - self.assertIs(result.client, client) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, OP_NAME) self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) self.assertEqual(result.metadata.request_time, NOW_PB) - self.assertEqual(result.caller_metadata, - {'request_type': 'UpdateCluster'}) self.assertEqual(len(stub.method_calls), 1) api_name, args, kwargs = stub.method_calls[0] @@ -448,6 +442,7 @@ class _Client(object): def __init__(self, project): self.project = project self.project_name = 'projects/' + self.project + self._operations_stub = mock.sentinel.operations_stub def __eq__(self, other): return (other.project == self.project and diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index cdad3c376d0a..03c0034fc49e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -15,6 +15,8 @@ import unittest +import mock + class TestInstance(unittest.TestCase): @@ -236,7 +238,7 @@ def test_create(self): bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp from tests.unit._testing import _FakeStub - from google.cloud.operation import Operation + from google.cloud.future import operation from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES NOW = datetime.datetime.utcnow() @@ -263,15 +265,11 @@ def test_create(self): # Perform the method and check the result. result = instance.create() - self.assertIsInstance(result, Operation) - self.assertEqual(result.name, self.OP_NAME) - self.assertIs(result.target, instance) - self.assertIs(result.client, client) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) self.assertIsInstance(result.metadata, messages_v2_pb2.CreateInstanceMetadata) self.assertEqual(result.metadata.request_time, NOW_PB) - self.assertEqual(result.caller_metadata, - {'request_type': 'CreateInstance'}) self.assertEqual(len(stub.method_calls), 1) api_name, args, kwargs = stub.method_calls[0] @@ -291,7 +289,7 @@ def test_create_w_explicit_serve_nodes(self): from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from tests.unit._testing import _FakeStub - from google.cloud.operation import Operation + from google.cloud.future import operation SERVE_NODES = 5 @@ -308,10 +306,8 @@ def test_create_w_explicit_serve_nodes(self): # Perform the method and check the result. result = instance.create() - self.assertIsInstance(result, Operation) - self.assertEqual(result.name, self.OP_NAME) - self.assertIs(result.target, instance) - self.assertIs(result.client, client) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) self.assertEqual(len(stub.method_calls), 1) api_name, args, kwargs = stub.method_calls[0] @@ -582,6 +578,7 @@ class _Client(object): def __init__(self, project): self.project = project self.project_name = 'projects/' + self.project + self._operations_stub = mock.sentinel.operations_stub def copy(self): from copy import deepcopy From bbf72647f6949e8eaefc8ae8d67d42a49bd1b2bf Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 25 Jul 2017 14:13:44 -0700 Subject: [PATCH 073/892] Removing `get_credentials()` from `core`. (#3667) * Removing `get_credentials()` from `core`. In the process also: - Slight re-org on `nox.py` config (to pass posargs) for `core` and `datastore` - Getting rid of last usage of `_Monkey` in datastore This is part of `@jonparrott`'s effort to slim down / stabilize `core`. * Removing `google.cloud.credentials` module from docs. --- .../google/cloud/bigtable/client.py | 4 ++-- .../google-cloud-bigtable/tests/unit/test_client.py | 13 ++++++------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 86ee7173c917..62877371a945 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -31,6 +31,7 @@ import os +import google.auth import google.auth.credentials from google.gax.utils import metrics from google.longrunning import operations_grpc @@ -40,7 +41,6 @@ from google.cloud._http import DEFAULT_USER_AGENT from google.cloud.client import _ClientFactoryMixin from google.cloud.client import _ClientProjectMixin -from google.cloud.credentials import get_credentials from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable import __version__ @@ -211,7 +211,7 @@ def __init__(self, project=None, credentials=None, read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT): _ClientProjectMixin.__init__(self, project=project) if credentials is None: - credentials = get_credentials() + credentials, _ = google.auth.default() if read_only and admin: raise ValueError('A read-only client cannot also perform' diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 17656be60c00..c3ab8d1ed888 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -360,20 +360,19 @@ def test_constructor_both_admin_and_read_only(self): read_only=True) def test_constructor_implicit_credentials(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT + from google.cloud.bigtable.client import DATA_SCOPE creds = _make_credentials() - expected_scopes = [MUT.DATA_SCOPE] - - def mock_get_credentials(): - return creds + expected_scopes = [DATA_SCOPE] - with _Monkey(MUT, get_credentials=mock_get_credentials): + patch = mock.patch( + 'google.auth.default', return_value=(creds, None)) + with patch as default: self._constructor_test_helper( None, None, expected_creds=creds.with_scopes.return_value) + default.assert_called_once_with() creds.with_scopes.assert_called_once_with(expected_scopes) def test_constructor_credentials_wo_create_scoped(self): From a5db9d5cc0969082cd0308c0ba718efa1d365bb2 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Wed, 26 Jul 2017 14:43:54 -0700 Subject: [PATCH 074/892] Simplifying Client constructor's for Bigtable and Spanner. (#3672) * Simplifying Client constructor's for Bigtable and Spanner. * Fixing Bigtable unit tests after Client re-factor. Also slightly changing the Client constructor so that it only called `with_scopes()` one time on the credentials (was previously calling with `SCOPE=None` and then again with the custom scope for the instance) * Fixing Spanner unit tests after Client re-factor. Also slightly changing the `copy()` method so that it just passes the same credentials instance. Also updating `nox` config to allow session `posargs`. * Removing unused imports after Bigtable/Spanner Client re-factor. --- .../google/cloud/bigtable/client.py | 51 +-- packages/google-cloud-bigtable/nox.py | 13 +- .../tests/unit/test_client.py | 386 ++++++++++-------- 3 files changed, 247 insertions(+), 203 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 62877371a945..de6d0768266f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -31,16 +31,13 @@ import os -import google.auth -import google.auth.credentials from google.gax.utils import metrics from google.longrunning import operations_grpc from google.cloud._helpers import make_insecure_stub from google.cloud._helpers import make_secure_stub from google.cloud._http import DEFAULT_USER_AGENT -from google.cloud.client import _ClientFactoryMixin -from google.cloud.client import _ClientProjectMixin +from google.cloud.client import ClientWithProject from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable import __version__ @@ -166,13 +163,13 @@ def _make_table_stub(client): client.emulator_host) -class Client(_ClientFactoryMixin, _ClientProjectMixin): +class Client(ClientWithProject): """Client for interacting with Google Cloud Bigtable API. .. note:: Since the Cloud Bigtable API requires the gRPC transport, no - ``http`` argument is accepted by this class. + ``_http`` argument is accepted by this class. :type project: :class:`str` or :func:`unicode ` :param project: (Optional) The ID of the project which owns the @@ -209,31 +206,21 @@ class Client(_ClientFactoryMixin, _ClientProjectMixin): def __init__(self, project=None, credentials=None, read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT): - _ClientProjectMixin.__init__(self, project=project) - if credentials is None: - credentials, _ = google.auth.default() - if read_only and admin: raise ValueError('A read-only client cannot also perform' 'administrative actions.') - scopes = [] - if read_only: - scopes.append(READ_ONLY_SCOPE) - else: - scopes.append(DATA_SCOPE) - + # NOTE: We set the scopes **before** calling the parent constructor. + # It **may** use those scopes in ``with_scopes_if_required``. self._read_only = bool(read_only) - - if admin: - scopes.append(ADMIN_SCOPE) - self._admin = bool(admin) + self.SCOPE = self._get_scopes() - credentials = google.auth.credentials.with_scopes_if_required( - credentials, scopes) - - self._credentials = credentials + # NOTE: This API has no use for the _http argument, but sending it + # will have no impact since the _http() @property only lazily + # creates a working HTTP object. + super(Client, self).__init__( + project=project, credentials=credentials, _http=None) self.user_agent = user_agent self.emulator_host = os.getenv(BIGTABLE_EMULATOR) @@ -244,6 +231,22 @@ def __init__(self, project=None, credentials=None, self._operations_stub_internal = _make_operations_stub(self) self._table_stub_internal = _make_table_stub(self) + def _get_scopes(self): + """Get the scopes corresponding to admin / read-only state. + + Returns: + Tuple[str, ...]: The tuple of scopes. + """ + if self._read_only: + scopes = (READ_ONLY_SCOPE,) + else: + scopes = (DATA_SCOPE,) + + if self._admin: + scopes += (ADMIN_SCOPE,) + + return scopes + def copy(self): """Make a copy of this client. diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index b43e196a95ff..83b56e49d2df 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -37,10 +37,17 @@ def unit_tests(session, python_version): session.install('-e', '.') # Run py.test against the unit tests. - session.run('py.test', '--quiet', - '--cov=google.cloud.bigtable', '--cov=tests.unit', '--cov-append', - '--cov-config=.coveragerc', '--cov-report=', '--cov-fail-under=97', + session.run( + 'py.test', + '--quiet', + '--cov=google.cloud.bigtable', + '--cov=tests.unit', + '--cov-append', + '--cov-config=.coveragerc', + '--cov-report=', + '--cov-fail-under=97', 'tests/unit', + *session.posargs ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index c3ab8d1ed888..9e0485a41554 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -256,170 +256,215 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) - def _make_oneWithMocks(self, *args, **kwargs): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT - - mock_make_data_stub = _MakeStubMock() - mock_make_instance_stub = _MakeStubMock() - mock_make_operations_stub = _MakeStubMock() - mock_make_table_stub = _MakeStubMock() - with _Monkey(MUT, _make_data_stub=mock_make_data_stub, - _make_instance_stub=mock_make_instance_stub, - _make_operations_stub=mock_make_operations_stub, - _make_table_stub=mock_make_table_stub): - return self._make_one(*args, **kwargs) - - def _constructor_test_helper(self, expected_scopes, creds, - read_only=False, admin=False, - user_agent=None, expected_creds=None): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT - - user_agent = user_agent or MUT.DEFAULT_USER_AGENT - - mock_make_data_stub = _MakeStubMock() - mock_make_instance_stub = _MakeStubMock() - mock_make_operations_stub = _MakeStubMock() - mock_make_table_stub = _MakeStubMock() - with _Monkey(MUT, _make_data_stub=mock_make_data_stub, - _make_instance_stub=mock_make_instance_stub, - _make_operations_stub=mock_make_operations_stub, - _make_table_stub=mock_make_table_stub): - client = self._make_one(project=self.PROJECT, credentials=creds, - read_only=read_only, admin=admin, - user_agent=user_agent) - - # Verify the mocks. - self.assertEqual(mock_make_data_stub.calls, [client]) - if admin: - self.assertSequenceEqual(mock_make_instance_stub.calls, [client]) - self.assertSequenceEqual(mock_make_operations_stub.calls, [client]) - self.assertSequenceEqual(mock_make_table_stub.calls, [client]) - else: - self.assertSequenceEqual(mock_make_instance_stub.calls, []) - self.assertSequenceEqual(mock_make_operations_stub.calls, []) - self.assertSequenceEqual(mock_make_table_stub.calls, []) - - expected_creds = expected_creds or creds.with_scopes.return_value - self.assertIs(client._credentials, expected_creds) + @mock.patch('google.cloud.bigtable.client._make_table_stub') + @mock.patch('google.cloud.bigtable.client._make_operations_stub') + @mock.patch('google.cloud.bigtable.client._make_instance_stub') + @mock.patch('google.cloud.bigtable.client._make_data_stub') + def _make_one_with_mocks( + self, _make_data_stub, _make_instance_stub, + _make_operations_stub, _make_table_stub, + *args, **kwargs): + return self._make_one(*args, **kwargs) + + @mock.patch('google.cloud.bigtable.client._make_table_stub') + @mock.patch('google.cloud.bigtable.client._make_operations_stub') + @mock.patch('google.cloud.bigtable.client._make_instance_stub') + @mock.patch('google.cloud.bigtable.client._make_data_stub') + def test_constructor_default_scopes( + self, _make_data_stub, _make_instance_stub, + _make_operations_stub, _make_table_stub): + from google.cloud.bigtable.client import DATA_SCOPE - if expected_scopes is not None: - creds.with_scopes.assert_called_once_with(expected_scopes) + expected_scopes = (DATA_SCOPE,) + credentials = _make_credentials() + custom_user_agent = 'custom-application' + client = self._make_one( + project=self.PROJECT, credentials=credentials, + user_agent=custom_user_agent) self.assertEqual(client.project, self.PROJECT) - self.assertEqual(client.user_agent, user_agent) - # Check gRPC stubs (or mocks of them) are set - self.assertIs(client._data_stub, mock_make_data_stub.result) - if admin: - self.assertIs(client._instance_stub_internal, - mock_make_instance_stub.result) - self.assertIs(client._operations_stub_internal, - mock_make_operations_stub.result) - self.assertIs(client._table_stub_internal, - mock_make_table_stub.result) - else: - self.assertIsNone(client._instance_stub_internal) - self.assertIsNone(client._operations_stub_internal) - self.assertIsNone(client._table_stub_internal) - - def test_constructor_default_scopes(self): - from google.cloud.bigtable import client as MUT - - expected_scopes = [MUT.DATA_SCOPE] - creds = _make_credentials() - self._constructor_test_helper(expected_scopes, creds) - - def test_constructor_custom_user_agent(self): - from google.cloud.bigtable import client as MUT - - CUSTOM_USER_AGENT = 'custom-application' - expected_scopes = [MUT.DATA_SCOPE] - creds = _make_credentials() - self._constructor_test_helper(expected_scopes, creds, - user_agent=CUSTOM_USER_AGENT) - - def test_constructor_with_admin(self): - from google.cloud.bigtable import client as MUT - - expected_scopes = [MUT.DATA_SCOPE, MUT.ADMIN_SCOPE] - creds = _make_credentials() - self._constructor_test_helper(expected_scopes, creds, admin=True) + self.assertIs( + client._credentials, credentials.with_scopes.return_value) + self.assertIsNone(client._http_internal) + self.assertFalse(client._read_only) + self.assertFalse(client._admin) + self.assertEqual(client.SCOPE, expected_scopes) + self.assertEqual(client.user_agent, custom_user_agent) + self.assertIsNone(client.emulator_host) + self.assertIs(client._data_stub, _make_data_stub.return_value) + self.assertIsNone(client._instance_stub_internal) + self.assertIsNone(client._operations_stub_internal) + self.assertIsNone(client._table_stub_internal) + + # Check mocks. + credentials.with_scopes.assert_called_once_with(expected_scopes) + _make_data_stub.assert_called_once_with(client) + _make_instance_stub.assert_not_called() + _make_operations_stub.assert_not_called() + _make_table_stub.assert_not_called() + + @mock.patch('google.cloud.bigtable.client._make_table_stub') + @mock.patch('google.cloud.bigtable.client._make_operations_stub') + @mock.patch('google.cloud.bigtable.client._make_instance_stub') + @mock.patch('google.cloud.bigtable.client._make_data_stub') + def test_constructor_with_admin( + self, _make_data_stub, _make_instance_stub, + _make_operations_stub, _make_table_stub): + from google.cloud._http import DEFAULT_USER_AGENT + from google.cloud.bigtable.client import ADMIN_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE - def test_constructor_with_read_only(self): - from google.cloud.bigtable import client as MUT + expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) + credentials = _make_credentials() + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True) - expected_scopes = [MUT.READ_ONLY_SCOPE] - creds = _make_credentials() - self._constructor_test_helper(expected_scopes, creds, read_only=True) + self.assertEqual(client.project, self.PROJECT) + self.assertIs( + client._credentials, credentials.with_scopes.return_value) + self.assertIsNone(client._http_internal) + self.assertFalse(client._read_only) + self.assertTrue(client._admin) + self.assertEqual(client.SCOPE, expected_scopes) + self.assertEqual(client.user_agent, DEFAULT_USER_AGENT) + self.assertIsNone(client.emulator_host) + self.assertIs(client._data_stub, _make_data_stub.return_value) + self.assertIs( + client._instance_stub_internal, _make_instance_stub.return_value) + self.assertIs( + client._operations_stub_internal, + _make_operations_stub.return_value) + self.assertIs( + client._table_stub_internal, _make_table_stub.return_value) + + # Check mocks. + credentials.with_scopes.assert_called_once_with(expected_scopes) + _make_data_stub.assert_called_once_with(client) + _make_instance_stub.assert_called_once_with(client) + _make_operations_stub.assert_called_once_with(client) + _make_table_stub.assert_called_once_with(client) def test_constructor_both_admin_and_read_only(self): - creds = _make_credentials() + credentials = _make_credentials() with self.assertRaises(ValueError): - self._constructor_test_helper([], creds, admin=True, - read_only=True) + self._make_one( + project=self.PROJECT, credentials=credentials, + admin=True, read_only=True) - def test_constructor_implicit_credentials(self): + def test__get_scopes_default(self): from google.cloud.bigtable.client import DATA_SCOPE - creds = _make_credentials() - expected_scopes = [DATA_SCOPE] + client = self._make_one( + project=self.PROJECT, credentials=_make_credentials()) + self.assertEqual(client._get_scopes(), (DATA_SCOPE,)) - patch = mock.patch( - 'google.auth.default', return_value=(creds, None)) - with patch as default: - self._constructor_test_helper( - None, None, - expected_creds=creds.with_scopes.return_value) + def test__get_scopes_admin(self): + from google.cloud.bigtable.client import ADMIN_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE - default.assert_called_once_with() - creds.with_scopes.assert_called_once_with(expected_scopes) + client = self._make_one( + project=self.PROJECT, credentials=_make_credentials(), + admin=True) + expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) + self.assertEqual(client._get_scopes(), expected_scopes) + + def test__get_scopes_read_only(self): + from google.cloud.bigtable.client import READ_ONLY_SCOPE + + client = self._make_one( + project=self.PROJECT, credentials=_make_credentials(), + read_only=True) + self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,)) + + def _copy_helper_check_stubs(self, client, new_client): + if client._admin: + # Check the instance stub. + self.assertIs( + client._instance_stub_internal, mock.sentinel.inst_stub1) + self.assertIs( + new_client._instance_stub_internal, mock.sentinel.inst_stub2) + self.assertIsNot( + new_client._instance_stub_internal, + client._instance_stub_internal) + # Check the operations stub. + self.assertIs( + client._operations_stub_internal, mock.sentinel.ops_stub1) + self.assertIs( + new_client._operations_stub_internal, mock.sentinel.ops_stub2) + self.assertIsNot( + new_client._operations_stub_internal, + client._operations_stub_internal) + # Check the table stub. + self.assertIs( + client._table_stub_internal, mock.sentinel.table_stub1) + self.assertIs( + new_client._table_stub_internal, mock.sentinel.table_stub2) + self.assertIsNot( + new_client._table_stub_internal, client._table_stub_internal) + else: + # Check the instance stub. + self.assertIsNone(client._instance_stub_internal) + self.assertIsNone(new_client._instance_stub_internal) + # Check the operations stub. + self.assertIsNone(client._operations_stub_internal) + self.assertIsNone(new_client._operations_stub_internal) + # Check the table stub. + self.assertIsNone(client._table_stub_internal) + self.assertIsNone(new_client._table_stub_internal) + + @mock.patch( + 'google.cloud.bigtable.client._make_table_stub', + side_effect=[mock.sentinel.table_stub1, mock.sentinel.table_stub2], + ) + @mock.patch( + 'google.cloud.bigtable.client._make_operations_stub', + side_effect=[mock.sentinel.ops_stub1, mock.sentinel.ops_stub2], + ) + @mock.patch( + 'google.cloud.bigtable.client._make_instance_stub', + side_effect=[mock.sentinel.inst_stub1, mock.sentinel.inst_stub2], + ) + @mock.patch( + 'google.cloud.bigtable.client._make_data_stub', + side_effect=[mock.sentinel.data_stub1, mock.sentinel.data_stub2], + ) + def _copy_test_helper( + self, _make_data_stub, _make_instance_stub, + _make_operations_stub, _make_table_stub, **kwargs): + credentials = _make_credentials() + # Make sure it "already" is scoped. + credentials.requires_scopes = False - def test_constructor_credentials_wo_create_scoped(self): - creds = _make_credentials() - expected_scopes = None - self._constructor_test_helper(expected_scopes, creds) + client = self._make_one( + project=self.PROJECT, credentials=credentials, **kwargs) + self.assertIs(client._credentials, credentials) - def _copy_test_helper(self, read_only=False, admin=False): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT - - credentials = _make_credentials() - client = self._make_oneWithMocks( - project=self.PROJECT, - credentials=credentials, - read_only=read_only, - admin=admin, - user_agent=self.USER_AGENT) - # Put some fake stubs in place so that we can verify they don't - # get copied. In the admin=False case, only the data stub will - # not be None, so we over-ride all the internal values. - client._data_stub = object() - client._instance_stub_internal = object() - client._operations_stub_internal = object() - client._table_stub_internal = object() - - mock_make_data_stub = _MakeStubMock() - mock_make_instance_stub = _MakeStubMock() - mock_make_operations_stub = _MakeStubMock() - mock_make_table_stub = _MakeStubMock() - with _Monkey(MUT, _make_data_stub=mock_make_data_stub, - _make_instance_stub=mock_make_instance_stub, - _make_operations_stub=mock_make_operations_stub, - _make_table_stub=mock_make_table_stub): - new_client = client.copy() + new_client = client.copy() self.assertEqual(new_client._admin, client._admin) self.assertEqual(new_client._credentials, client._credentials) self.assertEqual(new_client.project, client.project) self.assertEqual(new_client.user_agent, client.user_agent) # Make sure stubs are not preserved. - self.assertNotEqual(new_client._data_stub, client._data_stub) - self.assertNotEqual(new_client._instance_stub_internal, - client._instance_stub_internal) - self.assertNotEqual(new_client._operations_stub_internal, - client._operations_stub_internal) - self.assertNotEqual(new_client._table_stub_internal, - client._table_stub_internal) + self.assertIs(client._data_stub, mock.sentinel.data_stub1) + self.assertIs(new_client._data_stub, mock.sentinel.data_stub2) + self.assertIsNot(new_client._data_stub, client._data_stub) + self._copy_helper_check_stubs(client, new_client) + + # Check mocks. + credentials.with_scopes.assert_not_called() + stub_calls = [ + mock.call(client), + mock.call(new_client), + ] + self.assertEqual(_make_data_stub.mock_calls, stub_calls) + if client._admin: + self.assertEqual(_make_instance_stub.mock_calls, stub_calls) + self.assertEqual(_make_operations_stub.mock_calls, stub_calls) + self.assertEqual(_make_table_stub.mock_calls, stub_calls) + else: + _make_instance_stub.assert_not_called() + _make_operations_stub.assert_not_called() + _make_table_stub.assert_not_called() def test_copy(self): self._copy_test_helper() @@ -433,61 +478,61 @@ def test_copy_read_only(self): def test_credentials_getter(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials) + client = self._make_one_with_mocks( + project=project, credentials=credentials) self.assertIs(client.credentials, credentials.with_scopes.return_value) def test_project_name_property(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials) + client = self._make_one_with_mocks( + project=project, credentials=credentials) project_name = 'projects/' + project self.assertEqual(client.project_name, project_name) def test_instance_stub_getter(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=True) + client = self._make_one_with_mocks( + project=project, credentials=credentials, admin=True) self.assertIs(client._instance_stub, client._instance_stub_internal) def test_instance_stub_non_admin_failure(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=False) + client = self._make_one_with_mocks( + project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_instance_stub') def test_operations_stub_getter(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=True) + client = self._make_one_with_mocks( + project=project, credentials=credentials, admin=True) self.assertIs(client._operations_stub, client._operations_stub_internal) def test_operations_stub_non_admin_failure(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=False) + client = self._make_one_with_mocks( + project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_operations_stub') def test_table_stub_getter(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=True) + client = self._make_one_with_mocks( + project=project, credentials=credentials, admin=True) self.assertIs(client._table_stub, client._table_stub_internal) def test_table_stub_non_admin_failure(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_oneWithMocks(project=project, - credentials=credentials, admin=False) + client = self._make_one_with_mocks( + project=project, credentials=credentials, admin=False) with self.assertRaises(ValueError): getattr(client, '_table_stub') @@ -501,8 +546,8 @@ def test_instance_factory_defaults(self): INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' credentials = _make_credentials() - client = self._make_oneWithMocks(project=PROJECT, - credentials=credentials) + client = self._make_one_with_mocks( + project=PROJECT, credentials=credentials) instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) @@ -523,8 +568,8 @@ def test_instance_factory_w_explicit_serve_nodes(self): LOCATION_ID = 'locname' SERVE_NODES = 5 credentials = _make_credentials() - client = self._make_oneWithMocks(project=PROJECT, - credentials=credentials) + client = self._make_one_with_mocks( + project=PROJECT, credentials=credentials) instance = client.instance( INSTANCE_ID, display_name=DISPLAY_NAME, @@ -554,7 +599,7 @@ def test_list_instances(self): 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) credentials = _make_credentials() - client = self._make_oneWithMocks( + client = self._make_one_with_mocks( project=self.PROJECT, credentials=credentials, admin=True, @@ -609,14 +654,3 @@ def __init__(self, credentials, user_agent, emulator_host=None): self.credentials = credentials self.user_agent = user_agent self.emulator_host = emulator_host - - -class _MakeStubMock(object): - - def __init__(self): - self.result = object() - self.calls = [] - - def __call__(self, client): - self.calls.append(client) - return self.result From 57d0ecbaf5783db293066d49dcd23b198f91451c Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 4 Aug 2017 16:45:43 -0700 Subject: [PATCH 075/892] Updating all affected packages after google-cloud-core update. (#3730) * Updating all affected packages after google-cloud-core update. * Moving 'pip install .' **after** subpackages in nox docs. @lukesneeringer still hasn't explained why it was moved. In it's current location, the depencencies are first retrieved from PyPI (which fails here for the unreleased versions), e.g. https://circleci.com/gh/GoogleCloudPlatform/google-cloud-python/2716 --- packages/google-cloud-bigtable/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 8d5bad6a1ffd..3b164fe8e12f 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -51,13 +51,13 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.25.0, < 0.26dev', + 'google-cloud-core >= 0.26.0, < 0.27dev', 'google-gax>=0.15.7, <0.16dev', ] setup( name='google-cloud-bigtable', - version='0.25.0', + version='0.26.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From 12b4da297c08630195c7d0e81777f1de410b5505 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 7 Aug 2017 13:31:10 -0700 Subject: [PATCH 076/892] Allow Table.read_rows to take an inclusive end key. (#3744) This commit adds the `end_inclusive` keyword argument, which can be explicitly passed to get `[start:end]` rather than `[start:end)`. --- .../google/cloud/bigtable/table.py | 19 +++++++++++++++---- .../tests/unit/test_table.py | 16 ++++++++++++++-- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 40ef3a2ca2fb..64fbcc93771e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -257,7 +257,7 @@ def read_row(self, row_key, filter_=None): return rows_data.rows[row_key] def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None): + filter_=None, end_inclusive=False): """Read rows from this table. :type start_key: bytes @@ -280,13 +280,17 @@ def read_rows(self, start_key=None, end_key=None, limit=None, specified row(s). If unset, reads every column in each row. + :type end_inclusive: bool + :param end_inclusive: (Optional) Whether the ``end_key`` should be + considered inclusive. The default is False (exclusive). + :rtype: :class:`.PartialRowsData` :returns: A :class:`.PartialRowsData` convenience wrapper for consuming the streamed results. """ request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit) + limit=limit, end_inclusive=end_inclusive) client = self._instance._client response_iterator = client._data_stub.ReadRows(request_pb) # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` @@ -360,7 +364,7 @@ def sample_row_keys(self): def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None): + filter_=None, limit=None, end_inclusive=False): """Creates a request to read rows in a table. :type table_name: str @@ -388,6 +392,10 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, rows' worth of results. The default (zero) is to return all results. + :type end_inclusive: bool + :param end_inclusive: (Optional) Whether the ``end_key`` should be + considered inclusive. The default is False (exclusive). + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. :raises: :class:`ValueError ` if both @@ -403,7 +411,10 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, if start_key is not None: range_kwargs['start_key_closed'] = _to_bytes(start_key) if end_key is not None: - range_kwargs['end_key_open'] = _to_bytes(end_key) + end_key_key = 'end_key_open' + if end_inclusive: + end_key_key = 'end_key_closed' + range_kwargs[end_key_key] = _to_bytes(end_key) if filter_ is not None: request_kwargs['filter'] = filter_.to_pb() if limit is not None: diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index dc4d2b5bbad0..3890d097f572 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -537,6 +537,7 @@ def mock_create_row_request(table_name, **kwargs): 'end_key': end_key, 'filter_': filter_obj, 'limit': limit, + 'end_inclusive': False, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) @@ -572,12 +573,12 @@ def test_sample_row_keys(self): class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None): + filter_=None, limit=None, end_inclusive=False): from google.cloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - filter_=filter_, limit=limit) + filter_=filter_, limit=limit, end_inclusive=end_inclusive) def test_table_name_only(self): table_name = 'table_name' @@ -627,6 +628,17 @@ def test_row_range_both_keys(self): start_key_closed=start_key, end_key_open=end_key) self.assertEqual(result, expected_result) + def test_row_range_both_keys_inclusive(self): + table_name = 'table_name' + start_key = b'start_key' + end_key = b'end_key' + result = self._call_fut(table_name, start_key=start_key, + end_key=end_key, end_inclusive=True) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add( + start_key_closed=start_key, end_key_closed=end_key) + self.assertEqual(result, expected_result) + def test_with_filter(self): from google.cloud.bigtable.row_filters import RowSampleFilter From f555455c33cbee4ce86f1834b9a05f77126c10d7 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 8 Aug 2017 14:03:04 -0700 Subject: [PATCH 077/892] Move google.cloud.future to google.api.core (#3764) --- .../google-cloud-bigtable/google/cloud/bigtable/cluster.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_cluster.py | 4 ++-- packages/google-cloud-bigtable/tests/unit/test_instance.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 8d15547efae3..09a34e11bb05 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -17,11 +17,11 @@ import re +from google.api.core import operation from google.cloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) -from google.cloud.future import operation _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[^/]+)/clusters/' diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 958f16602953..5e73ed2ba661 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -17,6 +17,7 @@ import re +from google.api.core import operation from google.cloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( @@ -26,7 +27,6 @@ from google.cloud.bigtable.cluster import Cluster from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.table import Table -from google.cloud.future import operation _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index e244b55d6dff..8ed54846d18e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -233,8 +233,8 @@ def test_reload(self): self.assertEqual(cluster.location, LOCATION) def test_create(self): + from google.api.core import operation from google.longrunning import operations_pb2 - from google.cloud.future import operation from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from tests.unit._testing import _FakeStub @@ -275,8 +275,8 @@ def test_create(self): def test_update(self): import datetime + from google.api.core import operation from google.longrunning import operations_pb2 - from google.cloud.future import operation from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable._generated import ( diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 03c0034fc49e..ce475e0d5a66 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -232,13 +232,13 @@ def test_reload(self): def test_create(self): import datetime + from google.api.core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp from tests.unit._testing import _FakeStub - from google.cloud.future import operation from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES NOW = datetime.datetime.utcnow() @@ -285,11 +285,11 @@ def test_create(self): self.assertEqual(kwargs, {}) def test_create_w_explicit_serve_nodes(self): + from google.api.core import operation from google.longrunning import operations_pb2 from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from tests.unit._testing import _FakeStub - from google.cloud.future import operation SERVE_NODES = 5 From ee17fdc89a4a66aa8a0464080e7c376e2e0a834f Mon Sep 17 00:00:00 2001 From: Tim Swast Date: Tue, 8 Aug 2017 14:50:31 -0700 Subject: [PATCH 078/892] Use latest/ directory for docs instead of stable/ (#3766) See also https://github.com/GoogleCloudPlatform/google-cloud-python/pull/3763 $ sed -i '' 's/googlecloudplatform.github.io\/google-cloud-python\/stable\//googlecloudplatform.github.io\/google-cloud-python\/latest\//g' **/*.rst --- packages/google-cloud-bigtable/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index ebc202d8d87e..879ddd512c9b 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -9,7 +9,7 @@ Python Client for Google Cloud Bigtable - `Documentation`_ -.. _Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/bigtable/usage.html +.. _Documentation: https://googlecloudplatform.github.io/google-cloud-python/latest/bigtable/usage.html Quick Start ----------- From 6588d35561a8c4157e3593979a4217698dc90caf Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 8 Aug 2017 14:51:50 -0700 Subject: [PATCH 079/892] Fix __eq__ and __ne__. (#3765) --- .../google/cloud/bigtable/cluster.py | 4 +- .../google/cloud/bigtable/column_family.py | 27 ++++++--- .../google/cloud/bigtable/instance.py | 4 +- .../google/cloud/bigtable/row_data.py | 12 ++-- .../google/cloud/bigtable/row_filters.py | 57 ++++++++++++++----- .../google/cloud/bigtable/table.py | 4 +- .../tests/unit/test_column_family.py | 7 ++- 7 files changed, 76 insertions(+), 39 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 09a34e11bb05..21410ca559cd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -159,7 +159,7 @@ def name(self): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented # NOTE: This does not compare the configuration values, such as # the serve_nodes. Instead, it only compares # identifying values instance, cluster ID and client. This is @@ -170,7 +170,7 @@ def __eq__(self, other): other._instance == self._instance) def __ne__(self, other): - return not self.__eq__(other) + return not self == other def reload(self): """Reload the metadata for this cluster.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index c34e75ed2c1f..391452880f2f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -39,9 +39,6 @@ class GarbageCollectionRule(object): don't support that feature and instead support via native classes. """ - def __ne__(self, other): - return not self.__eq__(other) - class MaxVersionsGCRule(GarbageCollectionRule): """Garbage collection limiting the number of versions of a cell. @@ -55,9 +52,12 @@ def __init__(self, max_num_versions): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.max_num_versions == self.max_num_versions + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the garbage collection rule to a protobuf. @@ -79,9 +79,12 @@ def __init__(self, max_age): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.max_age == self.max_age + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the garbage collection rule to a protobuf. @@ -104,9 +107,12 @@ def __init__(self, rules): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.rules == self.rules + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the union into a single GC rule as a protobuf. @@ -130,9 +136,12 @@ def __init__(self, rules): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.rules == self.rules + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the intersection into a single GC rule as a protobuf. @@ -190,13 +199,13 @@ def name(self): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other.column_family_id == self.column_family_id and other._table == self._table and other.gc_rule == self.gc_rule) def __ne__(self, other): - return not self.__eq__(other) + return not self == other def to_pb(self): """Converts the column family to a protobuf. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 5e73ed2ba661..10246ecf6ef2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -180,7 +180,7 @@ def name(self): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented # NOTE: This does not compare the configuration values, such as # the display_name. Instead, it only compares # identifying values instance ID and client. This is @@ -191,7 +191,7 @@ def __eq__(self, other): other._client == self._client) def __ne__(self, other): - return not self.__eq__(other) + return not self == other def reload(self): """Reload the metadata for this instance.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 78179db25c4e..56129f6342b8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -58,13 +58,13 @@ def from_pb(cls, cell_pb): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other.value == self.value and other.timestamp == self.timestamp and other.labels == self.labels) def __ne__(self, other): - return not self.__eq__(other) + return not self == other class PartialCellData(object): @@ -126,12 +126,12 @@ def __init__(self, row_key): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other._row_key == self._row_key and other._cells == self._cells) def __ne__(self, other): - return not self.__eq__(other) + return not self == other def to_dict(self): """Convert the cells to a dictionary. @@ -211,11 +211,11 @@ def __init__(self, response_iterator): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other._response_iterator == self._response_iterator def __ne__(self, other): - return not self.__eq__(other) + return not self == other @property def state(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index e3f3006df286..a59be638365c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -32,9 +32,6 @@ class RowFilter(object): This class is a do-nothing base class for all row filters. """ - def __ne__(self, other): - return not self.__eq__(other) - class _BoolFilter(RowFilter): """Row filter that uses a boolean flag. @@ -48,9 +45,12 @@ def __init__(self, flag): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.flag == self.flag + def __ne__(self, other): + return not self == other + class SinkFilter(_BoolFilter): """Advanced row filter to skip parent filters. @@ -124,9 +124,12 @@ def __init__(self, regex): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.regex == self.regex + def __ne__(self, other): + return not self == other + class RowKeyRegexFilter(_RegexFilter): """Row filter for a row key regular expression. @@ -173,9 +176,12 @@ def __init__(self, sample): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.sample == self.sample + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the row filter to a protobuf. @@ -257,12 +263,12 @@ def __init__(self, start=None, end=None): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other.start == self.start and other.end == self.end) def __ne__(self, other): - return not self.__eq__(other) + return not self == other def to_pb(self): """Converts the :class:`TimestampRange` to a protobuf. @@ -292,9 +298,12 @@ def __init__(self, range_): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.range_ == self.range_ + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the row filter to a protobuf. @@ -367,13 +376,16 @@ def __init__(self, column_family_id, start_column=None, end_column=None, def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other.column_family_id == self.column_family_id and other.start_column == self.start_column and other.end_column == self.end_column and other.inclusive_start == self.inclusive_start and other.inclusive_end == self.inclusive_end) + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the row filter to a protobuf. @@ -485,12 +497,15 @@ def __init__(self, start_value=None, end_value=None, def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other.start_value == self.start_value and other.end_value == self.end_value and other.inclusive_start == self.inclusive_start and other.inclusive_end == self.inclusive_end) + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the row filter to a protobuf. @@ -533,9 +548,12 @@ def __init__(self, num_cells): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.num_cells == self.num_cells + def __ne__(self, other): + return not self == other + class CellsRowOffsetFilter(_CellCountFilter): """Row filter to skip cells in a row. @@ -631,9 +649,12 @@ def __init__(self, label): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.label == self.label + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the row filter to a protobuf. @@ -661,9 +682,12 @@ def __init__(self, filters=None): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return other.filters == self.filters + def __ne__(self, other): + return not self == other + class RowFilterChain(_FilterCombination): """Chain of row filters. @@ -748,11 +772,14 @@ def __init__(self, base_filter, true_filter=None, false_filter=None): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other.base_filter == self.base_filter and other.true_filter == self.true_filter and other.false_filter == self.false_filter) + def __ne__(self, other): + return not self == other + def to_pb(self): """Converts the row filter to a protobuf. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 64fbcc93771e..921fd30fd11b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -142,12 +142,12 @@ def row(self, row_key, filter_=None, append=False): def __eq__(self, other): if not isinstance(other, self.__class__): - return False + return NotImplemented return (other.table_id == self.table_id and other._instance == self._instance) def __ne__(self, other): - return not self.__eq__(other) + return not self == other def create(self, initial_split_keys=None, column_families=()): """Creates this table. diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index 6fa408fdb07e..73b836501b47 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -12,9 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. - import unittest +import mock + class TestMaxVersionsGCRule(unittest.TestCase): @@ -29,8 +30,8 @@ def _make_one(self, *args, **kwargs): def test___eq__type_differ(self): gc_rule1 = self._make_one(10) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) + self.assertNotEqual(gc_rule1, object()) + self.assertEqual(gc_rule1, mock.ANY) def test___eq__same_value(self): gc_rule1 = self._make_one(2) From 7a544a8622a47f5be1509b7a5b3c1d12bd7aa860 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 24 Aug 2017 13:28:07 -0700 Subject: [PATCH 080/892] Bump core version number (#3864) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 3b164fe8e12f..5de75d35c397 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -51,7 +51,7 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.26.0, < 0.27dev', + 'google-cloud-core >= 0.27.0, < 0.28dev', 'google-gax>=0.15.7, <0.16dev', ] From c92fab3f88e380f4ff7916766ad22af47d8a20a9 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Thu, 24 Aug 2017 14:21:24 -0700 Subject: [PATCH 081/892] Bigtable 0.27.0 (#3867) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 5de75d35c397..dd9087c60bbf 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -57,7 +57,7 @@ setup( name='google-cloud-bigtable', - version='0.26.0', + version='0.27.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From 9aa76074330ccca854a2835f154e01bac7df63b3 Mon Sep 17 00:00:00 2001 From: Michael Tamm Date: Fri, 8 Sep 2017 16:46:58 +0200 Subject: [PATCH 082/892] Fixed references in doc comment of `google.cloud.bigtable.Table#row` (#3934) --- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 921fd30fd11b..aaec98b6265b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -120,13 +120,13 @@ def row(self, row_key, filter_=None, append=False): :type filter_: :class:`.RowFilter` :param filter_: (Optional) Filter to be used for conditional mutations. - See :class:`.DirectRow` for more details. + See :class:`.ConditionalRow` for more details. :type append: bool :param append: (Optional) Flag to determine if the row should be used for append mutations. - :rtype: :class:`.DirectRow` + :rtype: :class:`.Row` :returns: A row owned by this table. :raises: :class:`ValueError ` if both ``filter_`` and ``append`` are used. From aa1375450a60ed51a25f35cffab61425f52d2586 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 3 Oct 2017 13:02:49 -0700 Subject: [PATCH 083/892] Fixing virutal->virtual typo. (#4108) Done via: $ git grep -l virutal | xargs sed -i s/virutal/virtual/g --- packages/google-cloud-bigtable/nox.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 83b56e49d2df..21e03c67c586 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -67,7 +67,7 @@ def system_tests(session, python_version): session.virtualenv_dirname = 'sys-' + python_version # Install all test dependencies, then install this package into the - # virutalenv's dist-packages. + # virtualenv's dist-packages. session.install('mock', 'pytest', *LOCAL_DEPS) session.install('../test_utils/') session.install('.') From da917f4b9782a4672601f602204c7984cc68f986 Mon Sep 17 00:00:00 2001 From: Tim Swast Date: Thu, 12 Oct 2017 17:13:19 -0700 Subject: [PATCH 084/892] s/gcloud-common/google-cloud-common/g (#4180) The gcloud-common repo moved to https://github.com/GoogleCloudPlatform/google-cloud-common --- packages/google-cloud-bigtable/README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 879ddd512c9b..f71f9ba826ff 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -27,7 +27,7 @@ learn more. You may also find the `authentication document`_ shared by all the ``google-cloud-*`` libraries to be helpful. .. _Authentication section: https://google-cloud-python.readthedocs.io/en/latest/core/auth.html -.. _authentication document: https://github.com/GoogleCloudPlatform/gcloud-common/tree/master/authentication +.. _authentication document: https://github.com/GoogleCloudPlatform/google-cloud-common/tree/master/authentication Using the API ------------- From c949c99b455cd0f8bcbeede5b732c8a48b3fa7b4 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Fri, 13 Oct 2017 13:46:24 -0700 Subject: [PATCH 085/892] Update Docs with Python Setup Guide (#4187) --- packages/google-cloud-bigtable/README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index f71f9ba826ff..95c4e515dddf 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -18,6 +18,10 @@ Quick Start $ pip install --upgrade google-cloud-bigtable +Fore more information on setting up your Python development environment, such as installing ``pip`` and on your system, please refer to `Python Development Environment Setup Guide`_ for Google Cloud Platform. + +.. _Python Development Environment Setup Guide: https://cloud.google.com/python/setup + Authentication -------------- From 0c9c4fedfe6bbba727d60208c249185d38c7d5ee Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 16 Oct 2017 13:22:37 -0400 Subject: [PATCH 086/892] Skip running 'pylint' on '_generated' files. (#4195) --- packages/google-cloud-bigtable/pylint.config.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/pylint.config.py b/packages/google-cloud-bigtable/pylint.config.py index b618319b8b61..9a706403a78d 100644 --- a/packages/google-cloud-bigtable/pylint.config.py +++ b/packages/google-cloud-bigtable/pylint.config.py @@ -14,10 +14,15 @@ """This module is used to configure gcp-devrel-py-tools run-pylint.""" +import copy + +from gcp_devrel.tools import pylint + # Library configuration # library_additions = {} -# library_replacements = {} +library_replacements = copy.deepcopy(pylint.DEFAULT_LIBRARY_RC_REPLACEMENTS) +library_replacements['MASTER']['ignore'].append('_generated') # Test configuration From 5f2b35221c318b20c74d2a246e34a00e0fdff816 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 18 Oct 2017 15:36:57 -0700 Subject: [PATCH 087/892] Replace usage of google.api.core with google.api_core (#4221) * Remove api.core packages from google.cloud.core, make google.cloud.core depend on api_core. * s/google.api.core/google.api_core/g and nox updates * Fixing core tests, addressing review feedback * Fix bigquery --- .../google-cloud-bigtable/google/cloud/bigtable/cluster.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 2 +- packages/google-cloud-bigtable/nox.py | 5 ++++- packages/google-cloud-bigtable/tests/unit/test_cluster.py | 4 ++-- packages/google-cloud-bigtable/tests/unit/test_instance.py | 4 ++-- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 21410ca559cd..e028e171a360 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -17,7 +17,7 @@ import re -from google.api.core import operation +from google.api_core import operation from google.cloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 10246ecf6ef2..4b21222b0639 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -17,7 +17,7 @@ import re -from google.api.core import operation +from google.api_core import operation from google.cloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) from google.cloud.bigtable._generated import ( diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 21e03c67c586..cb0121c9c4c1 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -18,7 +18,10 @@ import nox -LOCAL_DEPS = ('../core/',) +LOCAL_DEPS = ( + os.path.join('..', 'api_core'), + os.path.join('..', 'core'), +) @nox.session diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 8ed54846d18e..b4e67735354d 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -233,7 +233,7 @@ def test_reload(self): self.assertEqual(cluster.location, LOCATION) def test_create(self): - from google.api.core import operation + from google.api_core import operation from google.longrunning import operations_pb2 from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) @@ -275,7 +275,7 @@ def test_create(self): def test_update(self): import datetime - from google.api.core import operation + from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index ce475e0d5a66..756bcd05cee6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -232,7 +232,7 @@ def test_reload(self): def test_create(self): import datetime - from google.api.core import operation + from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud.bigtable._generated import ( @@ -285,7 +285,7 @@ def test_create(self): self.assertEqual(kwargs, {}) def test_create_w_explicit_serve_nodes(self): - from google.api.core import operation + from google.api_core import operation from google.longrunning import operations_pb2 from google.cloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) From da6ca385628ba3e8ac97da75c81141488347dcc6 Mon Sep 17 00:00:00 2001 From: Tim Swast Date: Mon, 23 Oct 2017 14:42:22 -0700 Subject: [PATCH 088/892] BigQuery: make docstrings use bigquery module, like the samples do. (#4236) * BigQuery: make docstrings use bigquery module, like the samples do. All the public classes we expect developers to use are included in the `google.cloud.bigquery` module, and it is this module that we use in code samples. Also, I found one error in the Bigtable docs where `Row` was not being used as a local reference and conflicted with the BigQuery Row. * Adjust heading underline. --- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index aaec98b6265b..d1711f5be704 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -113,7 +113,7 @@ def row(self, row_key, filter_=None, append=False): .. warning:: At most one of ``filter_`` and ``append`` can be used in a - :class:`Row`. + :class:`.Row`. :type row_key: bytes :param row_key: The key for the row being created. From a4bd78fa69edef27047a4a526ed112cd99be8177 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 30 Oct 2017 14:41:42 -0700 Subject: [PATCH 089/892] Cutting version 0.28.0 of `google-cloud-core`. (#4280) Also - updating all dependencies of `grpcio` to `>= 1.7.0`. This was due to an issue [1] with `1.6.0`. - updating the version of `google-api-core` (also to be released, This is required since the bounds on `grpcio` of `google-cloud-core==0.28.0` and `google-api-core==0.1.0` are mutually exclusive.) - Updating `google-api-core` CHANGELOG for release. - Updating packages to depend on `google-cloud-core>=0.28.0`. - Installing `nox -s lint` deps locally for vision. [1]: https://github.com/grpc/grpc/issues/12455 --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index dd9087c60bbf..12d856dfb435 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -51,7 +51,7 @@ REQUIREMENTS = [ - 'google-cloud-core >= 0.27.0, < 0.28dev', + 'google-cloud-core >= 0.28.0, < 0.29dev', 'google-gax>=0.15.7, <0.16dev', ] From ed227159fcbceb2c531095f236c10d552b3532d3 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Tue, 31 Oct 2017 08:57:09 -0700 Subject: [PATCH 090/892] Switch copyright holder to "Google LLC" (#4287) --- packages/google-cloud-bigtable/google/__init__.py | 2 +- packages/google-cloud-bigtable/google/cloud/__init__.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable/__init__.py | 2 +- .../google/cloud/bigtable/_generated/__init__.py | 2 +- .../google/cloud/bigtable/_generated/_bigtable.proto | 2 +- .../cloud/bigtable/_generated/_bigtable_instance_admin.proto | 2 +- .../cloud/bigtable/_generated/_bigtable_table_admin.proto | 2 +- .../google/cloud/bigtable/_generated/_common.proto | 2 +- .../google/cloud/bigtable/_generated/_data.proto | 2 +- .../google/cloud/bigtable/_generated/_instance.proto | 2 +- .../google/cloud/bigtable/_generated/_table.proto | 2 +- packages/google-cloud-bigtable/google/cloud/bigtable/client.py | 2 +- packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py | 2 +- .../google/cloud/bigtable/column_family.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 2 +- packages/google-cloud-bigtable/google/cloud/bigtable/row.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable/row_data.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable/row_filters.py | 2 +- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 2 +- packages/google-cloud-bigtable/nox.py | 2 +- packages/google-cloud-bigtable/pylint.config.py | 2 +- packages/google-cloud-bigtable/setup.py | 2 +- packages/google-cloud-bigtable/tests/system.py | 2 +- packages/google-cloud-bigtable/tests/unit/__init__.py | 2 +- packages/google-cloud-bigtable/tests/unit/_testing.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_client.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_cluster.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_column_family.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_instance.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_row.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_row_data.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_row_filters.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_table.py | 2 +- 33 files changed, 33 insertions(+), 33 deletions(-) diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/__init__.py index b2b833373882..9ee9bf4342ab 100644 --- a/packages/google-cloud-bigtable/google/__init__.py +++ b/packages/google-cloud-bigtable/google/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py index b2b833373882..9ee9bf4342ab 100644 --- a/packages/google-cloud-bigtable/google/cloud/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index 2886f24b67a1..db4a5e0bf130 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py index cd48e25f08d8..7e33fb434ef5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto index 49e27ca2ff5f..254b4963fc4d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto index bda5d2163532..dd629b3e6d8c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto index 0a39e298359c..1f839436568d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto index 1912e03e0446..f30e2c5f6782 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto index 720f48279b8f..2abc23820158 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto index 4aa3f9d06dd3..ce8ebc9b2031 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto index 63e41103e42f..331470d1f14a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto @@ -1,4 +1,4 @@ -// Copyright 2016 Google Inc. +// Copyright 2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index de6d0768266f..b30ed1aefb36 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index e028e171a360..97b524ec5dc5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index 391452880f2f..4a67313ae4c5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 4b21222b0639..b5f21a2e08fe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 09d12377a49c..da9678cdf892 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 56129f6342b8..455da96bdbf2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index a59be638365c..c6d8d25f0c81 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index d1711f5be704..e33abc627250 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index cb0121c9c4c1..bd3c28d50065 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/pylint.config.py b/packages/google-cloud-bigtable/pylint.config.py index 9a706403a78d..074e6626a6e6 100644 --- a/packages/google-cloud-bigtable/pylint.config.py +++ b/packages/google-cloud-bigtable/pylint.config.py @@ -1,4 +1,4 @@ -# Copyright 2017 Google Inc. +# Copyright 2017 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 12d856dfb435..90c0afc297ca 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index cfc2cb17f805..c889b181673e 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/__init__.py b/packages/google-cloud-bigtable/tests/unit/__init__.py index 58e0d9153632..df379f1e9d88 100644 --- a/packages/google-cloud-bigtable/tests/unit/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index e67af6a1498c..cfa24c062660 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 9e0485a41554..1458f81b35ca 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index b4e67735354d..282df36ab204 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index 73b836501b47..246a086966a9 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 756bcd05cee6..922913c11e24 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 156a517b351a..5813e070152a 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 51534138b66c..b1ea91839d2a 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index b3715a1337ad..e091f8cde542 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google Inc. +# Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 3890d097f572..bbf7b1f7e0a4 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1,4 +1,4 @@ -# Copyright 2015 Google Inc. +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From ae9a38e5cea33fe948e902f71dfc570d230b7cdd Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 31 Oct 2017 14:28:55 -0700 Subject: [PATCH 091/892] Making release for most packages. (#4296) * Making release for most packages. Every package except those that have already been released (`google-cloud-core`, `google-api-core`, `google-cloud-bigquery`): - `google-cloud` - `google-cloud-bigtable` - `google-cloud-datastore` - `google-cloud-dns` - `google-cloud-error-reporting` - `google-cloud-firestore` - `google-cloud-language` - `google-cloud-logging` - `google-cloud-monitoring` - `google-cloud-resource-manager` - `google-cloud-runtimeconfig` - `google-cloud-spanner` - `google-cloud-speech` - `google-cloud-storage` - `google-cloud-trace` - `google-cloud-translate` - `google-cloud-videointelligence` - `google-cloud-vision` * Adding changelog files for each package. --- packages/google-cloud-bigtable/CHANGELOG.md | 21 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 5 +++-- 2 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 packages/google-cloud-bigtable/CHANGELOG.md diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md new file mode 100644 index 000000000000..13dd117a2882 --- /dev/null +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -0,0 +1,21 @@ +# Changelog + +[PyPI History][1] + +[1]: https://pypi.org/project/google-cloud-bigtable/#history + +## 0.28.0 + +### Documentation + +- Fixed referenced types in `Table.row` docstring (#3934, h/t to + @MichaelTamm) +- Added link to "Python Development Environment Setup Guide" in + project README (#4187, h/t to @michaelawyu) + +### Dependencies + +- Upgrading to `google-cloud-core >= 0.28.0` and adding dependency + on `google-api-core` (#4221, #4280) + +PyPI: https://pypi.org/project/google-cloud-bigtable/0.28.0/ diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 90c0afc297ca..c7b1fdf83cbd 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -52,12 +52,13 @@ REQUIREMENTS = [ 'google-cloud-core >= 0.28.0, < 0.29dev', - 'google-gax>=0.15.7, <0.16dev', + 'google-api-core >= 0.1.1, < 0.2.0dev', + 'google-gax >= 0.15.7, < 0.16dev', ] setup( name='google-cloud-bigtable', - version='0.27.0', + version='0.28.0', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From 37ddaf212295798098d522ff27cb29fbbd40d968 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Tue, 31 Oct 2017 15:43:51 -0700 Subject: [PATCH 092/892] Marking all remaining versions as "dev". (#4299) This is to make it clear the code is between releases. Any code that relies on a **new** feature (e.g. of `google-api-core`) will then be able to **explicitly** make this clear by using the lower bound of the `devN` version. Fixes #4208. See: https://snarky.ca/how-i-manage-package-version-numbers/ --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index c7b1fdf83cbd..f399492c5156 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -58,7 +58,7 @@ setup( name='google-cloud-bigtable', - version='0.28.0', + version='0.28.1.dev1', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From f98d9f4d5fbbc1e56549e46ca5ec56223c45e49b Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Wed, 1 Nov 2017 12:43:23 -0700 Subject: [PATCH 093/892] Fixing "Fore" -> "For" typo in README docs. (#4317) Also obeying an 80-column limit for the content and adding a missing "``virtualenv``" in the phrase "``pip`` and ``virtualenv``" in some of the docs. --- packages/google-cloud-bigtable/README.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 95c4e515dddf..48df63d52069 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -18,7 +18,9 @@ Quick Start $ pip install --upgrade google-cloud-bigtable -Fore more information on setting up your Python development environment, such as installing ``pip`` and on your system, please refer to `Python Development Environment Setup Guide`_ for Google Cloud Platform. +For more information on setting up your Python development environment, +such as installing ``pip`` and ``virtualenv`` on your system, please refer +to `Python Development Environment Setup Guide`_ for Google Cloud Platform. .. _Python Development Environment Setup Guide: https://cloud.google.com/python/setup From f3a342673b3b37f6ecb0b9b262c7d845eab82d76 Mon Sep 17 00:00:00 2001 From: chemelnucfin Date: Wed, 1 Nov 2017 16:53:46 -0700 Subject: [PATCH 094/892] Closes #4319 - shorten test names (#4321) * Closes #4319 - shorten test names * #4319 update docs and config files --- packages/google-cloud-bigtable/nox.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index bd3c28d50065..587d8629208a 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -25,15 +25,15 @@ @nox.session -@nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6']) -def unit_tests(session, python_version): +@nox.parametrize('py', ['2.7', '3.4', '3.5', '3.6']) +def unit(session, py): """Run the unit test suite.""" # Run unit tests against all supported versions of Python. - session.interpreter = 'python{}'.format(python_version) + session.interpreter = 'python{}'.format(py) # Set the virtualenv dirname. - session.virtualenv_dirname = 'unit-' + python_version + session.virtualenv_dirname = 'unit-' + py # Install all test dependencies, then install this package in-place. session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS) @@ -55,8 +55,8 @@ def unit_tests(session, python_version): @nox.session -@nox.parametrize('python_version', ['2.7', '3.6']) -def system_tests(session, python_version): +@nox.parametrize('py', ['2.7', '3.6']) +def system(session, py): """Run the system test suite.""" # Sanity check: Only run system tests if the environment variable is set. @@ -64,10 +64,10 @@ def system_tests(session, python_version): session.skip('Credentials must be set via environment variable.') # Run the system tests against latest Python 2 and Python 3 only. - session.interpreter = 'python{}'.format(python_version) + session.interpreter = 'python{}'.format(py) # Set the virtualenv dirname. - session.virtualenv_dirname = 'sys-' + python_version + session.virtualenv_dirname = 'sys-' + py # Install all test dependencies, then install this package into the # virtualenv's dist-packages. From 36991f2bbc4ba40849c3c6d54c58d9680b1eec2e Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Wed, 1 Nov 2017 21:47:55 -0700 Subject: [PATCH 095/892] Making a `nox -s default` session for all packages. (#4324) * Making a `nox -s default` session for all packages. * Using "default" `nox` session on AppVeyor. This was 32-bit or 64-bit Python can be used, depending on which is the active `python` / the active `nox.exe`. --- packages/google-cloud-bigtable/nox.py | 30 +++++++++++++++++++-------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 587d8629208a..94893f2472c5 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -25,16 +25,14 @@ @nox.session -@nox.parametrize('py', ['2.7', '3.4', '3.5', '3.6']) -def unit(session, py): - """Run the unit test suite.""" - - # Run unit tests against all supported versions of Python. - session.interpreter = 'python{}'.format(py) - - # Set the virtualenv dirname. - session.virtualenv_dirname = 'unit-' + py +def default(session): + """Default unit test session. + This is intended to be run **without** an interpreter set, so + that the current ``python`` (on the ``PATH``) or the version of + Python corresponding to the ``nox`` binary the ``PATH`` can + run the tests. + """ # Install all test dependencies, then install this package in-place. session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS) session.install('-e', '.') @@ -54,6 +52,20 @@ def unit(session, py): ) +@nox.session +@nox.parametrize('py', ['2.7', '3.4', '3.5', '3.6']) +def unit(session, py): + """Run the unit test suite.""" + + # Run unit tests against all supported versions of Python. + session.interpreter = 'python{}'.format(py) + + # Set the virtualenv dirname. + session.virtualenv_dirname = 'unit-' + py + + default(session) + + @nox.session @nox.parametrize('py', ['2.7', '3.6']) def system(session, py): From 9bfc6cebaaf240ddc6e21285288d2fce570f4d76 Mon Sep 17 00:00:00 2001 From: Justin Lin Date: Thu, 2 Nov 2017 12:10:00 -0700 Subject: [PATCH 096/892] Retry for 'mutate_rows' that return retryable errors. (#4256) --- .../google/cloud/bigtable/table.py | 133 ++++- .../tests/unit/test_table.py | 512 +++++++++++++++++- 2 files changed, 605 insertions(+), 40 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index e33abc627250..35b876f91913 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -17,6 +17,13 @@ import six +from google.api_core.exceptions import RetryError +from google.api_core.exceptions import Aborted +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import ServiceUnavailable +from google.api_core.exceptions import from_grpc_status +from google.api_core.retry import Retry +from google.api_core.retry import if_exception_type from google.cloud._helpers import _to_bytes from google.cloud.bigtable._generated import ( bigtable_pb2 as data_messages_v2_pb2) @@ -30,12 +37,22 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData +from grpc import StatusCode # Maximum number of mutations in bulk (MutateRowsRequest message): # https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#google.bigtable.v2.MutateRowRequest _MAX_BULK_MUTATIONS = 100000 +DEFAULT_RETRY = Retry( + predicate=if_exception_type((Aborted, + DeadlineExceeded, + ServiceUnavailable)), + initial=1.0, + maximum=15.0, + multiplier=2.0, + deadline=60.0 * 2.0) + class TableMismatchError(ValueError): """Row from another table.""" @@ -296,34 +313,32 @@ def read_rows(self, start_key=None, end_key=None, limit=None, # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` return PartialRowsData(response_iterator) - def mutate_rows(self, rows): + def mutate_rows(self, rows, retry=DEFAULT_RETRY): """Mutates multiple rows in bulk. The method tries to update all specified rows. If some of the rows weren't updated, it would not remove mutations. They can be applied to the row separately. If row mutations finished successfully, they would be cleaned up. + Optionally specify a `retry` to re-attempt rows that return transient + errors, until all rows succeed or the deadline is reached. :type rows: list :param rows: List or other iterable of :class:`.DirectRow` instances. + :type retry: :class:`~google.api_core.retry.Retry` + :param retry: (Optional) Retry delay and deadline arguments. Can be + specified using ``DEFAULT_RETRY.with_delay`` and/or + ``DEFAULT_RETRY.with_deadline``. + :rtype: list :returns: A list of response statuses (`google.rpc.status_pb2.Status`) corresponding to success or failure of each row mutation sent. These will be in the same order as the `rows`. """ - mutate_rows_request = _mutate_rows_request(self.name, rows) - client = self._instance._client - responses = client._data_stub.MutateRows(mutate_rows_request) - - responses_statuses = [ - None for _ in six.moves.xrange(len(mutate_rows_request.entries))] - for response in responses: - for entry in response.entries: - responses_statuses[entry.index] = entry.status - if entry.status.code == 0: - rows[entry.index].clear() - return responses_statuses + retryable_mutate_rows = _RetryableMutateRowsWorker( + self._instance._client, self.name, rows) + return retryable_mutate_rows(retry=retry) def sample_row_keys(self): """Read a sample of row keys in the table. @@ -363,6 +378,98 @@ def sample_row_keys(self): return response_iterator +class _RetryableMutateRowsWorker(object): + """A callable worker that can retry to mutate rows with transient errors. + + This class is a callable that can retry mutating rows that result in + transient errors. After all rows are successful or none of the rows + are retryable, any subsequent call on this callable will be a no-op. + """ + + # pylint: disable=unsubscriptable-object + RETRY_CODES = ( + StatusCode.DEADLINE_EXCEEDED.value[0], + StatusCode.ABORTED.value[0], + StatusCode.UNAVAILABLE.value[0], + ) + + def __init__(self, client, table_name, rows): + self.client = client + self.table_name = table_name + self.rows = rows + self.responses_statuses = [ + None for _ in six.moves.xrange(len(self.rows))] + + def __call__(self, retry=DEFAULT_RETRY): + """Attempt to mutate all rows and retry rows with transient errors. + + Will retry the rows with transient errors until all rows succeed or + ``deadline`` specified in the `retry` is reached. + + :rtype: list + :returns: A list of response statuses (`google.rpc.status_pb2.Status`) + corresponding to success or failure of each row mutation + sent. These will be in the same order as the ``rows``. + """ + try: + retry(self.__class__._do_mutate_retryable_rows)(self) + except (RetryError, ValueError) as err: + # Upon timeout or sleep generator error, return responses_statuses + pass + return self.responses_statuses + + def _is_retryable(self, status): # pylint: disable=no-self-use + return (status is None or + status.code in _RetryableMutateRowsWorker.RETRY_CODES) + + def _do_mutate_retryable_rows(self): + """Mutate all the rows that are eligible for retry. + + A row is eligible for retry if it has not been tried or if it resulted + in a transient error in a previous call. + + :rtype: list + :return: ``responses_statuses`` (`google.rpc.status_pb2.Status`) + :raises: :exc:`~google.api_core.exceptions.ServiceUnavailable` if any + row returned a transient error. An artificial exception + to work with ``DEFAULT_RETRY``. + """ + retryable_rows = [] + index_into_all_rows = [] + for i, status in enumerate(self.responses_statuses): + if self._is_retryable(status): + retryable_rows.append(self.rows[i]) + index_into_all_rows.append(i) + + if not retryable_rows: + # All mutations are either successful or non-retryable now. + return self.responses_statuses + + mutate_rows_request = _mutate_rows_request( + self.table_name, retryable_rows) + responses = self.client._data_stub.MutateRows( + mutate_rows_request) + + num_responses = 0 + num_retryable_responses = 0 + for response in responses: + for entry in response.entries: + num_responses += 1 + index = index_into_all_rows[entry.index] + self.responses_statuses[index] = entry.status + if self._is_retryable(entry.status): + num_retryable_responses += 1 + if entry.status.code == 0: + self.rows[index].clear() + + assert len(retryable_rows) == num_responses + + if num_retryable_responses: + raise from_grpc_status(StatusCode.UNAVAILABLE, + 'MutateRows retryable error.') + return self.responses_statuses + + def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, filter_=None, limit=None, end_inclusive=False): """Creates a request to read rows in a table. diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index bbf7b1f7e0a4..10b956ce6839 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -452,43 +452,23 @@ def test_read_row_still_partial(self): self._read_row_helper(chunks, None) def test_mutate_rows(self): - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) - from google.cloud.bigtable.row import DirectRow from google.rpc.status_pb2 import Status - from tests.unit._testing import _FakeStub - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + instance = mock.MagicMock() table = self._make_one(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') + response = [Status(code=0), Status(code=1)] - response = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=0), - ), - MutateRowsResponse.Entry( - index=1, - status=Status(code=1), - ), - ], - ) - - # Patch the stub used by the API method. - client._data_stub = _FakeStub([response]) - statuses = table.mutate_rows([row_1, row_2]) + mock_worker = mock.Mock(return_value=response) + with mock.patch( + 'google.cloud.bigtable.table._RetryableMutateRowsWorker', + new=mock.MagicMock(return_value=mock_worker)): + statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) result = [status.code for status in statuses] expected_result = [0, 1] self.assertEqual(result, expected_result) - def test_read_rows(self): from google.cloud._testing import _Monkey from tests.unit._testing import _FakeStub @@ -570,6 +550,484 @@ def test_sample_row_keys(self): )]) +class Test__RetryableMutateRowsWorker(unittest.TestCase): + PROJECT_ID = 'project-id' + INSTANCE_ID = 'instance-id' + INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) + TABLE_ID = 'table-id' + + @staticmethod + def _get_target_class_for_worker(): + from google.cloud.bigtable.table import _RetryableMutateRowsWorker + + return _RetryableMutateRowsWorker + + def _make_worker(self, *args, **kwargs): + return self._get_target_class_for_worker()(*args, **kwargs) + + @staticmethod + def _get_target_class_for_table(): + from google.cloud.bigtable.table import Table + + return Table + + def _make_table(self, *args, **kwargs): + return self._get_target_class_for_table()(*args, **kwargs) + + def _make_responses_statuses(self, codes): + from google.rpc.status_pb2 import Status + + response = [Status(code=code) for code in codes] + return response + + def test_callable_empty_rows(self): + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + worker = self._make_worker(table._instance._client, table.name, []) + statuses = worker() + + self.assertEqual(len(statuses), 0) + + def test_callable_retry(self): + from google.api_core.retry import Retry + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import DEFAULT_RETRY + from google.rpc.status_pb2 import Status + + # Setup: + # - Mutate 3 rows. + # Action: + # - Initial attempt will mutate all 3 rows. + # Expectation: + # - First attempt will result in one retryable error. + # - Second attempt will result in success for the retry-ed row. + # - Check MutateRows is called twice. + # - State of responses_statuses should be + # [success, success, non-retryable] + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + row_3 = DirectRow(row_key=b'row_key_3', table=table) + row_3.set_cell('cf', b'col', b'value3') + + response_1 = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=0), + ), + MutateRowsResponse.Entry( + index=1, + status=Status(code=4), + ), + MutateRowsResponse.Entry( + index=2, + status=Status(code=1), + ), + ], + ) + + response_2 = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=0), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = mock.MagicMock() + client._data_stub.MutateRows.side_effect = [[response_1], [response_2]] + + retry = DEFAULT_RETRY.with_delay(initial=0.1) + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) + statuses = worker(retry=retry) + + result = [status.code for status in statuses] + expected_result = [0, 0, 1] + + client._data_stub.MutateRows.assert_has_calls([mock.call(mock.ANY), mock.call(mock.ANY)]) + self.assertEqual(client._data_stub.MutateRows.call_count, 2) + self.assertEqual(result, expected_result) + + def test_callable_retry_timeout(self): + from google.api_core.retry import Retry + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import DEFAULT_RETRY + from google.rpc.status_pb2 import Status + + # Setup: + # - Mutate 2 rows. + # Action: + # - Initial attempt will mutate all 2 rows. + # Expectation: + # - Both rows always return retryable errors. + # - google.api_core.Retry should keep retrying. + # - Check MutateRows is called multiple times. + # - By the time deadline is reached, statuses should be + # [retryable, retryable] + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + + response = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=4), + ), + MutateRowsResponse.Entry( + index=1, + status=Status(code=4), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = mock.MagicMock() + client._data_stub.MutateRows.return_value = [response] + + retry = DEFAULT_RETRY.with_delay( + initial=0.1, maximum=0.2, multiplier=2.0).with_deadline(0.5) + worker = self._make_worker(client, table.name, [row_1, row_2]) + statuses = worker(retry=retry) + + result = [status.code for status in statuses] + expected_result = [4, 4] + + self.assertTrue(client._data_stub.MutateRows.call_count > 1) + self.assertEqual(result, expected_result) + + def test_do_mutate_retryable_rows_empty_rows(self): + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + worker = self._make_worker(table._instance._client, table.name, []) + statuses = worker._do_mutate_retryable_rows() + + self.assertEqual(len(statuses), 0) + + def test_do_mutate_retryable_rows(self): + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.rpc.status_pb2 import Status + from tests.unit._testing import _FakeStub + + # Setup: + # - Mutate 2 rows. + # Action: + # - Initial attempt will mutate all 2 rows. + # Expectation: + # - Expect [success, non-retryable] + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + + response = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=0), + ), + MutateRowsResponse.Entry( + index=1, + status=Status(code=1), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = _FakeStub([response]) + + worker = self._make_worker(table._instance._client, + table.name, [row_1, row_2]) + statuses = worker._do_mutate_retryable_rows() + + result = [status.code for status in statuses] + expected_result = [0, 1] + + self.assertEqual(result, expected_result) + + def test_do_mutate_retryable_rows_retry(self): + from google.api_core.exceptions import ServiceUnavailable + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.rpc.status_pb2 import Status + from tests.unit._testing import _FakeStub + + # Setup: + # - Mutate 3 rows. + # Action: + # - Initial attempt will mutate all 3 rows. + # Expectation: + # - Second row returns retryable error code, so expect a raise. + # - State of responses_statuses should be + # [success, retryable, non-retryable] + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + row_3 = DirectRow(row_key=b'row_key_3', table=table) + row_3.set_cell('cf', b'col', b'value3') + + response = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=0), + ), + MutateRowsResponse.Entry( + index=1, + status=Status(code=4), + ), + MutateRowsResponse.Entry( + index=2, + status=Status(code=1), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = _FakeStub([response]) + + worker = self._make_worker(table._instance._client, + table.name, [row_1, row_2, row_3]) + + with self.assertRaises(ServiceUnavailable): + worker._do_mutate_retryable_rows() + + statuses = worker.responses_statuses + result = [status.code for status in statuses] + expected_result = [0, 4, 1] + + self.assertEqual(result, expected_result) + + def test_do_mutate_retryable_rows_second_retry(self): + from google.api_core.exceptions import ServiceUnavailable + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.rpc.status_pb2 import Status + from tests.unit._testing import _FakeStub + + # Setup: + # - Mutate 4 rows. + # - First try results: + # [success, retryable, non-retryable, retryable] + # Action: + # - Second try should re-attempt the 'retryable' rows. + # Expectation: + # - After second try: + # [success, success, non-retryable, retryable] + # - One of the rows tried second time returns retryable error code, + # so expect a raise. + # - Exception contains response whose index should be '3' even though + # only two rows were retried. + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + row_3 = DirectRow(row_key=b'row_key_3', table=table) + row_3.set_cell('cf', b'col', b'value3') + row_4 = DirectRow(row_key=b'row_key_4', table=table) + row_4.set_cell('cf', b'col', b'value4') + + response = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=0), + ), + MutateRowsResponse.Entry( + index=1, + status=Status(code=4), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = _FakeStub([response]) + + worker = self._make_worker(table._instance._client, + table.name, [row_1, row_2, row_3, row_4]) + worker.responses_statuses = self._make_responses_statuses( + [0, 4, 1, 10]) + + with self.assertRaises(ServiceUnavailable): + worker._do_mutate_retryable_rows() + + statuses = worker.responses_statuses + result = [status.code for status in statuses] + expected_result = [0, 0, 1, 4] + + self.assertEqual(result, expected_result) + + def test_do_mutate_retryable_rows_second_try(self): + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.rpc.status_pb2 import Status + from tests.unit._testing import _FakeStub + + # Setup: + # - Mutate 4 rows. + # - First try results: + # [success, retryable, non-retryable, retryable] + # Action: + # - Second try should re-attempt the 'retryable' rows. + # Expectation: + # - After second try: + # [success, non-retryable, non-retryable, success] + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + row_3 = DirectRow(row_key=b'row_key_3', table=table) + row_3.set_cell('cf', b'col', b'value3') + row_4 = DirectRow(row_key=b'row_key_4', table=table) + row_4.set_cell('cf', b'col', b'value4') + + response = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=1), + ), + MutateRowsResponse.Entry( + index=1, + status=Status(code=0), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = _FakeStub([response]) + + worker = self._make_worker(table._instance._client, + table.name, [row_1, row_2, row_3, row_4]) + worker.responses_statuses = self._make_responses_statuses( + [0, 4, 1, 10]) + + statuses = worker._do_mutate_retryable_rows() + + result = [status.code for status in statuses] + expected_result = [0, 1, 1, 0] + + self.assertEqual(result, expected_result) + + def test_do_mutate_retryable_rows_second_try_no_retryable(self): + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.rpc.status_pb2 import Status + from tests.unit._testing import _FakeStub + + # Setup: + # - Mutate 2 rows. + # - First try results: [success, non-retryable] + # Action: + # - Second try has no row to retry. + # Expectation: + # - After second try: [success, non-retryable] + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + + worker = self._make_worker(table._instance._client, + table.name, [row_1, row_2]) + worker.responses_statuses = self._make_responses_statuses( + [0, 1]) + + statuses = worker._do_mutate_retryable_rows() + + result = [status.code for status in statuses] + expected_result = [0, 1] + + self.assertEqual(result, expected_result) + + def test_do_mutate_retryable_rows_mismatch_num_responses(self): + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.cloud.bigtable.row import DirectRow + from google.rpc.status_pb2 import Status + from tests.unit._testing import _FakeStub + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + + response = MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=0, + status=Status(code=0), + ), + ], + ) + + # Patch the stub used by the API method. + client._data_stub = _FakeStub([response]) + + worker = self._make_worker(table._instance._client, + table.name, [row_1, row_2]) + with self.assertRaises(AssertionError): + statuses = worker._do_mutate_retryable_rows() + + class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, From c7d39967b0f37266f83f1742649a10be3297068c Mon Sep 17 00:00:00 2001 From: Gary Elliott Date: Thu, 2 Nov 2017 15:43:18 -0400 Subject: [PATCH 097/892] Bugfix: Allow Bigtable ReadRows to handle empty string column qualifier (#4252) --- .../google/cloud/bigtable/row_data.py | 9 +++++-- .../tests/unit/read-rows-acceptance-test.json | 27 +++++++++++++++++++ .../tests/unit/test_row_data.py | 6 +++-- 3 files changed, 38 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 455da96bdbf2..9bde1c0cb5a3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -283,10 +283,14 @@ def consume_next(self): row = self._row = PartialRowData(chunk.row_key) if cell is None: + qualifier = None + if chunk.HasField('qualifier'): + qualifier = chunk.qualifier.value + cell = self._cell = PartialCellData( chunk.row_key, chunk.family_name.value, - chunk.qualifier.value, + qualifier, chunk.timestamp_micros, chunk.labels, chunk.value) @@ -421,7 +425,8 @@ def _copy_from_previous(self, cell): cell.row_key = previous.row_key if not cell.family_name: cell.family_name = previous.family_name - if not cell.qualifier: + # NOTE: ``cell.qualifier`` **can** be empty string. + if cell.qualifier is None: cell.qualifier = previous.qualifier def _save_current_row(self): diff --git a/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json b/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json index 84023567dd9b..cfa8a17f327b 100644 --- a/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json +++ b/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json @@ -1173,6 +1173,33 @@ "error": false } ] + }, + { + "name": "empty second qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 99\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"\"\n\u003e\ntimestamp_micros: 98\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 99, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "", + "ts": 98, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] } ] } diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index b1ea91839d2a..7cfb1dc45d4e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -334,7 +334,7 @@ def test__copy_from_previous_unset(self): prd._copy_from_previous(cell) self.assertEqual(cell.row_key, '') self.assertEqual(cell.family_name, u'') - self.assertEqual(cell.qualifier, b'') + self.assertIsNone(cell.qualifier) self.assertEqual(cell.timestamp_micros, 0) self.assertEqual(cell.labels, []) @@ -635,6 +635,8 @@ def test_reset_in_between_chunks(self): def test_empty_cell_chunk(self): self._match_results('empty cell chunk') + def test_empty_second_qualifier(self): + self._match_results('empty second qualifier') def _flatten_cells(prd): # Match results format from JSON testcases. @@ -678,7 +680,7 @@ class _PartialCellData(object): row_key = '' family_name = u'' - qualifier = b'' + qualifier = None timestamp_micros = 0 def __init__(self, **kw): From 42d64643f09f8086e72113f2053e8eb4997b36ca Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 2 Nov 2017 13:01:36 -0700 Subject: [PATCH 098/892] Cutting release 0.28.1 for Bigtable. (#4331) --- packages/google-cloud-bigtable/CHANGELOG.md | 9 +++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 13dd117a2882..791610fa2ac7 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,15 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.28.1 + +### Implementation Changes + +- Bugfix: Distinguish between an unset column qualifier and an empty string + column qualifier while parsing a `ReadRows` response (#4252) + +PyPI: https://pypi.org/project/google-cloud-bigtable/0.28.1/ + ## 0.28.0 ### Documentation diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index f399492c5156..dc4a29e9e51a 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -58,7 +58,7 @@ setup( name='google-cloud-bigtable', - version='0.28.1.dev1', + version='0.28.1', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From 4218f729edf1b024e18a38113de760e37a8bb987 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Thu, 2 Nov 2017 13:38:48 -0700 Subject: [PATCH 099/892] Marking Bigtable as "dev" after release. (#4332) * Marking Bigtable as "dev" after release. * Updating changelog based on #4256. --- packages/google-cloud-bigtable/CHANGELOG.md | 6 ++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 791610fa2ac7..b70baa849230 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -11,6 +11,12 @@ - Bugfix: Distinguish between an unset column qualifier and an empty string column qualifier while parsing a `ReadRows` response (#4252) +### Features added + +- Add a ``retry`` strategy that will be used for retry-able errors + in ``Table.mutate_rows``. This will be used for gRPC errors of type + ``ABORTED``, ``DEADLINE_EXCEEDED`` and ``SERVICE_UNAVAILABLE``. (#4256) + PyPI: https://pypi.org/project/google-cloud-bigtable/0.28.1/ ## 0.28.0 diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index dc4a29e9e51a..fdd2cc1e16c6 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -58,7 +58,7 @@ setup( name='google-cloud-bigtable', - version='0.28.1', + version='0.28.2.dev1', description='Python Client for Google Cloud Bigtable', long_description=README, namespace_packages=[ From e8ffd16c5bb3ccda90ae5c5d53330f31d34770d0 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Fri, 3 Nov 2017 09:29:35 -0700 Subject: [PATCH 100/892] BigTable: Small docs/hygiene tweaks after #4256. (#4333) --- .../google/cloud/bigtable/table.py | 85 ++++++++++++------- .../tests/unit/test_table.py | 4 +- 2 files changed, 57 insertions(+), 32 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 35b876f91913..0e2a832d00ee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -15,15 +15,13 @@ """User-friendly container for Google Cloud Bigtable Table.""" -import six - -from google.api_core.exceptions import RetryError from google.api_core.exceptions import Aborted from google.api_core.exceptions import DeadlineExceeded -from google.api_core.exceptions import ServiceUnavailable from google.api_core.exceptions import from_grpc_status -from google.api_core.retry import Retry +from google.api_core.exceptions import RetryError +from google.api_core.exceptions import ServiceUnavailable from google.api_core.retry import if_exception_type +from google.api_core.retry import Retry from google.cloud._helpers import _to_bytes from google.cloud.bigtable._generated import ( bigtable_pb2 as data_messages_v2_pb2) @@ -41,17 +39,27 @@ # Maximum number of mutations in bulk (MutateRowsRequest message): -# https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#google.bigtable.v2.MutateRowRequest +# (https://cloud.google.com/bigtable/docs/reference/data/rpc/ +# google.bigtable.v2#google.bigtable.v2.MutateRowRequest) _MAX_BULK_MUTATIONS = 100000 DEFAULT_RETRY = Retry( - predicate=if_exception_type((Aborted, - DeadlineExceeded, - ServiceUnavailable)), - initial=1.0, - maximum=15.0, - multiplier=2.0, - deadline=60.0 * 2.0) + predicate=if_exception_type( + ( + Aborted, + DeadlineExceeded, + ServiceUnavailable, + ), + ), + initial=1.0, + maximum=15.0, + multiplier=2.0, + deadline=120.0, # 2 minutes +) +"""The default retry stategy to be used on retry-able errors. + +Used by :meth:`~google.cloud.bigtable.table.Table.mutate_rows`. +""" class TableMismatchError(ValueError): @@ -320,16 +328,22 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY): If some of the rows weren't updated, it would not remove mutations. They can be applied to the row separately. If row mutations finished successfully, they would be cleaned up. - Optionally specify a `retry` to re-attempt rows that return transient - errors, until all rows succeed or the deadline is reached. + + Optionally, a ``retry`` strategy can be specified to re-attempt + mutations on rows that return transient errors. This method will retry + until all rows succeed or until the request deadline is reached. To + specify a ``retry`` strategy of "do-nothing", a deadline of ``0.0`` + can be specified. :type rows: list :param rows: List or other iterable of :class:`.DirectRow` instances. :type retry: :class:`~google.api_core.retry.Retry` - :param retry: (Optional) Retry delay and deadline arguments. Can be - specified using ``DEFAULT_RETRY.with_delay`` and/or - ``DEFAULT_RETRY.with_deadline``. + :param retry: + (Optional) Retry delay and deadline arguments. To override, the + default value :attr:`DEFAULT_RETRY` can be used and modified with + the :meth:`~google.api_core.retry.Retry.with_delay` method or the + :meth:`~google.api_core.retry.Retry.with_deadline` method. :rtype: list :returns: A list of response statuses (`google.rpc.status_pb2.Status`) @@ -392,13 +406,13 @@ class _RetryableMutateRowsWorker(object): StatusCode.ABORTED.value[0], StatusCode.UNAVAILABLE.value[0], ) + # pylint: enable=unsubscriptable-object def __init__(self, client, table_name, rows): self.client = client self.table_name = table_name self.rows = rows - self.responses_statuses = [ - None for _ in six.moves.xrange(len(self.rows))] + self.responses_statuses = [None] * len(self.rows) def __call__(self, retry=DEFAULT_RETRY): """Attempt to mutate all rows and retry rows with transient errors. @@ -412,13 +426,14 @@ def __call__(self, retry=DEFAULT_RETRY): sent. These will be in the same order as the ``rows``. """ try: - retry(self.__class__._do_mutate_retryable_rows)(self) + retry(self._do_mutate_retryable_rows)() except (RetryError, ValueError) as err: # Upon timeout or sleep generator error, return responses_statuses pass return self.responses_statuses - def _is_retryable(self, status): # pylint: disable=no-self-use + @staticmethod + def _is_retryable(status): return (status is None or status.code in _RetryableMutateRowsWorker.RETRY_CODES) @@ -429,17 +444,23 @@ def _do_mutate_retryable_rows(self): in a transient error in a previous call. :rtype: list - :return: ``responses_statuses`` (`google.rpc.status_pb2.Status`) - :raises: :exc:`~google.api_core.exceptions.ServiceUnavailable` if any - row returned a transient error. An artificial exception - to work with ``DEFAULT_RETRY``. + :return: The responses statuses, which is a list of + :class:`~google.rpc.status_pb2.Status`. + :raises: One of the following: + + * :exc:`~google.api_core.exceptions.ServiceUnavailable` if any + row returned a transient error. This is "artificial" in + the sense that we intentionally raise the error because it + will be caught by the retry strategy. + * :exc:`RuntimeError` if the number of responses doesn't + match the number of rows that were retried """ retryable_rows = [] index_into_all_rows = [] - for i, status in enumerate(self.responses_statuses): + for index, status in enumerate(self.responses_statuses): if self._is_retryable(status): - retryable_rows.append(self.rows[i]) - index_into_all_rows.append(i) + retryable_rows.append(self.rows[index]) + index_into_all_rows.append(index) if not retryable_rows: # All mutations are either successful or non-retryable now. @@ -462,11 +483,15 @@ def _do_mutate_retryable_rows(self): if entry.status.code == 0: self.rows[index].clear() - assert len(retryable_rows) == num_responses + if len(retryable_rows) != num_responses: + raise RuntimeError( + 'Unexpected the number of responses', num_responses, + 'Expected', len(retryable_rows)) if num_retryable_responses: raise from_grpc_status(StatusCode.UNAVAILABLE, 'MutateRows retryable error.') + return self.responses_statuses diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 10b956ce6839..69d49997ecca 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1024,8 +1024,8 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): worker = self._make_worker(table._instance._client, table.name, [row_1, row_2]) - with self.assertRaises(AssertionError): - statuses = worker._do_mutate_retryable_rows() + with self.assertRaises(RuntimeError): + worker._do_mutate_retryable_rows() class Test__create_row_request(unittest.TestCase): From ad1fe5d96dd9f73afb06cd1ba5531eb0efedd629 Mon Sep 17 00:00:00 2001 From: Alix Hamilton Date: Fri, 3 Nov 2017 14:18:09 -0700 Subject: [PATCH 101/892] BigQuery: moves Row class out of helpers and updates docstrings (#4291) * moves Row class out of helpers and updates docstrings to specify Row as rtype * updates docstring references to classes to include the appropriate module * breaks up imports to different lines * renames single letter variables and fixes string formatting * adds todo to fix circular import * updates references to bigtable's Row class to differentiate from bigquery's Row class * Revert "adds todo to fix circular import" This reverts commit 1c2bbb01a5fa70d5ddbaf81576e84e760d8b9f6e. --- .../google/cloud/bigtable/table.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 0e2a832d00ee..4e77d28e0e15 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -138,7 +138,7 @@ def row(self, row_key, filter_=None, append=False): .. warning:: At most one of ``filter_`` and ``append`` can be used in a - :class:`.Row`. + :class:`~google.cloud.bigtable.row.Row`. :type row_key: bytes :param row_key: The key for the row being created. @@ -151,7 +151,7 @@ def row(self, row_key, filter_=None, append=False): :param append: (Optional) Flag to determine if the row should be used for append mutations. - :rtype: :class:`.Row` + :rtype: :class:`~google.cloud.bigtable.row.Row` :returns: A row owned by this table. :raises: :class:`ValueError ` if both ``filter_`` and ``append`` are used. @@ -601,8 +601,9 @@ def _check_row_table_name(table_name, row): :type table_name: str :param table_name: The name of the table. - :type row: :class:`.Row` - :param row: An instance of :class:`.Row` subclasses. + :type row: :class:`~google.cloud.bigtable.row.Row` + :param row: An instance of :class:`~google.cloud.bigtable.row.Row` + subclasses. :raises: :exc:`~.table.TableMismatchError` if the row does not belong to the table. @@ -616,8 +617,9 @@ def _check_row_table_name(table_name, row): def _check_row_type(row): """Checks that a row is an instance of :class:`.DirectRow`. - :type row: :class:`.Row` - :param row: An instance of :class:`.Row` subclasses. + :type row: :class:`~google.cloud.bigtable.row.Row` + :param row: An instance of :class:`~google.cloud.bigtable.row.Row` + subclasses. :raises: :class:`TypeError ` if the row is not an instance of DirectRow. From c39ddc0605e957a27eac0138be14f637a395e477 Mon Sep 17 00:00:00 2001 From: Justin Lin Date: Mon, 6 Nov 2017 06:23:24 -0800 Subject: [PATCH 102/892] Handle retry=None for mutate_rows, and some refactoring in test (#4341) --- .../google/cloud/bigtable/table.py | 42 +-- .../tests/unit/test_table.py | 245 ++++++++---------- 2 files changed, 128 insertions(+), 159 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 4e77d28e0e15..100409e5e81c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -15,11 +15,7 @@ """User-friendly container for Google Cloud Bigtable Table.""" -from google.api_core.exceptions import Aborted -from google.api_core.exceptions import DeadlineExceeded -from google.api_core.exceptions import from_grpc_status from google.api_core.exceptions import RetryError -from google.api_core.exceptions import ServiceUnavailable from google.api_core.retry import if_exception_type from google.api_core.retry import Retry from google.cloud._helpers import _to_bytes @@ -43,14 +39,13 @@ # google.bigtable.v2#google.bigtable.v2.MutateRowRequest) _MAX_BULK_MUTATIONS = 100000 + +class _BigtableRetryableError(Exception): + """Retry-able error expected by the default retry strategy.""" + + DEFAULT_RETRY = Retry( - predicate=if_exception_type( - ( - Aborted, - DeadlineExceeded, - ServiceUnavailable, - ), - ), + predicate=if_exception_type(_BigtableRetryableError), initial=1.0, maximum=15.0, multiplier=2.0, @@ -425,11 +420,19 @@ def __call__(self, retry=DEFAULT_RETRY): corresponding to success or failure of each row mutation sent. These will be in the same order as the ``rows``. """ + mutate_rows = self._do_mutate_retryable_rows + if retry: + mutate_rows = retry(self._do_mutate_retryable_rows) + try: - retry(self._do_mutate_retryable_rows)() - except (RetryError, ValueError) as err: - # Upon timeout or sleep generator error, return responses_statuses + mutate_rows() + except (_BigtableRetryableError, RetryError) as err: + # - _BigtableRetryableError raised when no retry strategy is used + # and a retryable error on a mutation occurred. + # - RetryError raised when retry deadline is reached. + # In both cases, just return current `responses_statuses`. pass + return self.responses_statuses @staticmethod @@ -448,10 +451,8 @@ def _do_mutate_retryable_rows(self): :class:`~google.rpc.status_pb2.Status`. :raises: One of the following: - * :exc:`~google.api_core.exceptions.ServiceUnavailable` if any - row returned a transient error. This is "artificial" in - the sense that we intentionally raise the error because it - will be caught by the retry strategy. + * :exc:`~.table._BigtableRetryableError` if any + row returned a transient error. * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried """ @@ -485,12 +486,11 @@ def _do_mutate_retryable_rows(self): if len(retryable_rows) != num_responses: raise RuntimeError( - 'Unexpected the number of responses', num_responses, + 'Unexpected number of responses', num_responses, 'Expected', len(retryable_rows)) if num_retryable_responses: - raise from_grpc_status(StatusCode.UNAVAILABLE, - 'MutateRows retryable error.') + raise _BigtableRetryableError return self.responses_statuses diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 69d49997ecca..5b904f091c15 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -551,11 +551,19 @@ def test_sample_row_keys(self): class Test__RetryableMutateRowsWorker(unittest.TestCase): + from grpc import StatusCode + PROJECT_ID = 'project-id' INSTANCE_ID = 'instance-id' INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) TABLE_ID = 'table-id' + # RPC Status Codes + SUCCESS = StatusCode.OK.value[0] + RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0] + RETRYABLE_2 = StatusCode.ABORTED.value[0] + NON_RETRYABLE = StatusCode.CANCELLED.value[0] + @staticmethod def _get_target_class_for_worker(): from google.cloud.bigtable.table import _RetryableMutateRowsWorker @@ -580,6 +588,17 @@ def _make_responses_statuses(self, codes): response = [Status(code=code) for code in codes] return response + def _make_responses(self, codes): + import six + from google.cloud.bigtable._generated.bigtable_pb2 import ( + MutateRowsResponse) + from google.rpc.status_pb2 import Status + + entries = [MutateRowsResponse.Entry( + index=i, status=Status(code=codes[i])) + for i in six.moves.xrange(len(codes))] + return MutateRowsResponse(entries=entries) + def test_callable_empty_rows(self): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -590,13 +609,53 @@ def test_callable_empty_rows(self): self.assertEqual(len(statuses), 0) + def test_callable_no_retry_strategy(self): + from google.api_core.retry import Retry + from google.cloud.bigtable.row import DirectRow + + # Setup: + # - Mutate 3 rows. + # Action: + # - Attempt to mutate the rows w/o any retry strategy. + # Expectation: + # - Since no retry, should return statuses as they come back. + # - Even if there are retryable errors, no retry attempt is made. + # - State of responses_statuses should be + # [success, retryable, non-retryable] + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_table(self.TABLE_ID, instance) + + row_1 = DirectRow(row_key=b'row_key', table=table) + row_1.set_cell('cf', b'col', b'value1') + row_2 = DirectRow(row_key=b'row_key_2', table=table) + row_2.set_cell('cf', b'col', b'value2') + row_3 = DirectRow(row_key=b'row_key_3', table=table) + row_3.set_cell('cf', b'col', b'value3') + + response = self._make_responses([ + self.SUCCESS, + self.RETRYABLE_1, + self.NON_RETRYABLE]) + + # Patch the stub used by the API method. + client._data_stub = mock.MagicMock() + client._data_stub.MutateRows.return_value = [response] + + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) + statuses = worker(retry=None) + + result = [status.code for status in statuses] + expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + + client._data_stub.MutateRows.assert_called_once() + self.assertEqual(result, expected_result) + def test_callable_retry(self): from google.api_core.retry import Retry - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY - from google.rpc.status_pb2 import Status # Setup: # - Mutate 3 rows. @@ -620,31 +679,11 @@ def test_callable_retry(self): row_3 = DirectRow(row_key=b'row_key_3', table=table) row_3.set_cell('cf', b'col', b'value3') - response_1 = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=0), - ), - MutateRowsResponse.Entry( - index=1, - status=Status(code=4), - ), - MutateRowsResponse.Entry( - index=2, - status=Status(code=1), - ), - ], - ) - - response_2 = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=0), - ), - ], - ) + response_1 = self._make_responses([ + self.SUCCESS, + self.RETRYABLE_1, + self.NON_RETRYABLE]) + response_2 = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. client._data_stub = mock.MagicMock() @@ -655,19 +694,18 @@ def test_callable_retry(self): statuses = worker(retry=retry) result = [status.code for status in statuses] - expected_result = [0, 0, 1] + expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] - client._data_stub.MutateRows.assert_has_calls([mock.call(mock.ANY), mock.call(mock.ANY)]) + client._data_stub.MutateRows.assert_has_calls([ + mock.call(mock.ANY), + mock.call(mock.ANY)]) self.assertEqual(client._data_stub.MutateRows.call_count, 2) self.assertEqual(result, expected_result) def test_callable_retry_timeout(self): from google.api_core.retry import Retry - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY - from google.rpc.status_pb2 import Status # Setup: # - Mutate 2 rows. @@ -689,18 +727,7 @@ def test_callable_retry_timeout(self): row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') - response = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=4), - ), - MutateRowsResponse.Entry( - index=1, - status=Status(code=4), - ), - ], - ) + response = self._make_responses([self.RETRYABLE_1, self.RETRYABLE_1]) # Patch the stub used by the API method. client._data_stub = mock.MagicMock() @@ -712,7 +739,7 @@ def test_callable_retry_timeout(self): statuses = worker(retry=retry) result = [status.code for status in statuses] - expected_result = [4, 4] + expected_result = [self.RETRYABLE_1, self.RETRYABLE_1] self.assertTrue(client._data_stub.MutateRows.call_count > 1) self.assertEqual(result, expected_result) @@ -728,10 +755,7 @@ def test_do_mutate_retryable_rows_empty_rows(self): self.assertEqual(len(statuses), 0) def test_do_mutate_retryable_rows(self): - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow - from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub # Setup: @@ -750,18 +774,7 @@ def test_do_mutate_retryable_rows(self): row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') - response = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=0), - ), - MutateRowsResponse.Entry( - index=1, - status=Status(code=1), - ), - ], - ) + response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) @@ -771,16 +784,13 @@ def test_do_mutate_retryable_rows(self): statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] - expected_result = [0, 1] + expected_result = [self.SUCCESS, self.NON_RETRYABLE] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_retry(self): - from google.api_core.exceptions import ServiceUnavailable - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow - from google.rpc.status_pb2 import Status + from google.cloud.bigtable.table import _BigtableRetryableError from tests.unit._testing import _FakeStub # Setup: @@ -803,22 +813,10 @@ def test_do_mutate_retryable_rows_retry(self): row_3 = DirectRow(row_key=b'row_key_3', table=table) row_3.set_cell('cf', b'col', b'value3') - response = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=0), - ), - MutateRowsResponse.Entry( - index=1, - status=Status(code=4), - ), - MutateRowsResponse.Entry( - index=2, - status=Status(code=1), - ), - ], - ) + response = self._make_responses([ + self.SUCCESS, + self.RETRYABLE_1, + self.NON_RETRYABLE]) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) @@ -826,21 +824,18 @@ def test_do_mutate_retryable_rows_retry(self): worker = self._make_worker(table._instance._client, table.name, [row_1, row_2, row_3]) - with self.assertRaises(ServiceUnavailable): + with self.assertRaises(_BigtableRetryableError): worker._do_mutate_retryable_rows() statuses = worker.responses_statuses result = [status.code for status in statuses] - expected_result = [0, 4, 1] + expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_retry(self): - from google.api_core.exceptions import ServiceUnavailable - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow - from google.rpc.status_pb2 import Status + from google.cloud.bigtable.table import _BigtableRetryableError from tests.unit._testing import _FakeStub # Setup: @@ -870,41 +865,33 @@ def test_do_mutate_retryable_rows_second_retry(self): row_4 = DirectRow(row_key=b'row_key_4', table=table) row_4.set_cell('cf', b'col', b'value4') - response = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=0), - ), - MutateRowsResponse.Entry( - index=1, - status=Status(code=4), - ), - ], - ) + response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) worker = self._make_worker(table._instance._client, table.name, [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses( - [0, 4, 1, 10]) + worker.responses_statuses = self._make_responses_statuses([ + self.SUCCESS, + self.RETRYABLE_1, + self.NON_RETRYABLE, + self.RETRYABLE_2]) - with self.assertRaises(ServiceUnavailable): + with self.assertRaises(_BigtableRetryableError): worker._do_mutate_retryable_rows() statuses = worker.responses_statuses result = [status.code for status in statuses] - expected_result = [0, 0, 1, 4] + expected_result = [self.SUCCESS, + self.SUCCESS, + self.NON_RETRYABLE, + self.RETRYABLE_1] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_try(self): - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow - from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub # Setup: @@ -930,39 +917,31 @@ def test_do_mutate_retryable_rows_second_try(self): row_4 = DirectRow(row_key=b'row_key_4', table=table) row_4.set_cell('cf', b'col', b'value4') - response = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=1), - ), - MutateRowsResponse.Entry( - index=1, - status=Status(code=0), - ), - ], - ) + response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) worker = self._make_worker(table._instance._client, table.name, [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses( - [0, 4, 1, 10]) + worker.responses_statuses = self._make_responses_statuses([ + self.SUCCESS, + self.RETRYABLE_1, + self.NON_RETRYABLE, + self.RETRYABLE_2]) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] - expected_result = [0, 1, 1, 0] + expected_result = [self.SUCCESS, + self.NON_RETRYABLE, + self.NON_RETRYABLE, + self.SUCCESS] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_try_no_retryable(self): - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow - from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub # Setup: @@ -985,20 +964,17 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): worker = self._make_worker(table._instance._client, table.name, [row_1, row_2]) worker.responses_statuses = self._make_responses_statuses( - [0, 1]) + [self.SUCCESS, self.NON_RETRYABLE]) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] - expected_result = [0, 1] + expected_result = [self.SUCCESS, self.NON_RETRYABLE] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_mismatch_num_responses(self): - from google.cloud.bigtable._generated.bigtable_pb2 import ( - MutateRowsResponse) from google.cloud.bigtable.row import DirectRow - from google.rpc.status_pb2 import Status from tests.unit._testing import _FakeStub client = _Client() @@ -1010,14 +986,7 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') - response = MutateRowsResponse( - entries=[ - MutateRowsResponse.Entry( - index=0, - status=Status(code=0), - ), - ], - ) + response = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) From 8a8c30545df24cbb07b2178a12da90b1b08bfd78 Mon Sep 17 00:00:00 2001 From: Cal Peyser Date: Wed, 15 Nov 2017 12:42:22 -0800 Subject: [PATCH 103/892] Add RPC retries to Bigtable (#3811) --- .../google/cloud/bigtable/retry.py | 205 +++++++++++++ .../google/cloud/bigtable/row_data.py | 13 + .../google/cloud/bigtable/table.py | 107 ++----- .../tests/retry_test_script.txt | 38 +++ .../google-cloud-bigtable/tests/system.py | 83 ++++++ .../tests/unit/_testing.py | 26 +- .../tests/unit/test_table.py | 277 +++++++++++++++++- 7 files changed, 661 insertions(+), 88 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/retry.py create mode 100644 packages/google-cloud-bigtable/tests/retry_test_script.txt diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py b/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py new file mode 100644 index 000000000000..687da4bc65cb --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py @@ -0,0 +1,205 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provides function wrappers that implement retrying.""" + +import random +import time +import six +import sys + +from google.cloud._helpers import _to_bytes +from google.cloud.bigtable._generated import ( + bigtable_pb2 as data_messages_v2_pb2) +from google.gax import config, errors +from grpc import RpcError + + +_MILLIS_PER_SECOND = 1000 + + +class ReadRowsIterator(object): + """Creates an iterator equivalent to a_iter, but that retries on certain + exceptions. + """ + + def __init__(self, client, name, start_key, end_key, filter_, limit, + end_inclusive, retry_options, **kwargs): + self.client = client + self.retry_options = retry_options + self.name = name + self.start_key = start_key + self.start_key_closed = True + self.end_key = end_key + self.filter_ = filter_ + self.limit = limit + self.end_inclusive = end_inclusive + self.delay_mult = retry_options.backoff_settings.retry_delay_multiplier + self.max_delay_millis = \ + retry_options.backoff_settings.max_retry_delay_millis + self.timeout_mult = \ + retry_options.backoff_settings.rpc_timeout_multiplier + self.max_timeout = \ + (retry_options.backoff_settings.max_rpc_timeout_millis / + _MILLIS_PER_SECOND) + self.total_timeout = \ + (retry_options.backoff_settings.total_timeout_millis / + _MILLIS_PER_SECOND) + self._responses_for_row = 0 + self.set_stream() + + def set_start_key(self, start_key): + """ + Sets the row key at which this iterator will begin reading. + """ + self.start_key = start_key + self.start_key_closed = False + + def set_stream(self): + """ + Resets the read stream by making an RPC on the 'ReadRows' endpoint. + """ + req_pb = _create_row_request(self.name, start_key=self.start_key, + start_key_closed=self.start_key_closed, + end_key=self.end_key, + filter_=self.filter_, limit=self.limit, + end_inclusive=self.end_inclusive) + self.stream = self.client._data_stub.ReadRows(req_pb) + + @property + def responses_for_row(self): + """ Property that gives the number of calls made so far for the current + row. If 1, then either this row is being read for the first time, + or the most recent response required a retry, causing the row to be + read again + + :rtype: int + :returns: Int that gives the number of calls made so far for the + current row. + """ + return self._responses_for_row + + def clear_responses_for_row(self): + """ + Signals that a new row has been started. + """ + self._responses_for_row = 0 + + def next(self, *args, **kwargs): + """ + Read and return the next chunk from the stream. + Retry on idempotent failure. + """ + delay = self.retry_options.backoff_settings.initial_retry_delay_millis + exc = errors.RetryError('Retry total timeout exceeded before any' + 'response was received') + + now = time.time() + deadline = now + self.total_timeout + while deadline is None or now < deadline: + self._responses_for_row += 1 + try: + return(six.next(self.stream)) + except StopIteration as stop: + raise stop + except RpcError as error: # pylint: disable=broad-except + code = config.exc_to_code(error) + if code not in self.retry_options.retry_codes: + six.reraise(type(error), error) + + # pylint: disable=redefined-variable-type + exc = errors.RetryError( + 'Retry total timeout exceeded with exception', error) + + # Sleep a random number which will, on average, equal the + # expected delay. + to_sleep = random.uniform(0, delay * 2) + time.sleep(to_sleep / _MILLIS_PER_SECOND) + delay = min(delay * self.delay_mult, self.max_delay_millis) + now = time.time() + self._responses_for_row = 0 + self.set_stream() + + six.reraise(errors.RetryError, exc, sys.exc_info()[2]) + + def __next__(self, *args, **kwargs): + return self.next(*args, **kwargs) + + +def _create_row_request(table_name, row_key=None, start_key=None, + start_key_closed=True, end_key=None, filter_=None, + limit=None, end_inclusive=False): + """Creates a request to read rows in a table. + + :type table_name: str + :param table_name: The name of the table to read from. + + :type row_key: bytes + :param row_key: (Optional) The key of a specific row to read from. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads the entire table. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` + :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. + :raises: :class:`ValueError ` if both + ``row_key`` and one of ``start_key`` and ``end_key`` are set + """ + request_kwargs = {'table_name': table_name} + if (row_key is not None and + (start_key is not None or end_key is not None)): + raise ValueError('Row key and row range cannot be ' + 'set simultaneously') + range_kwargs = {} + if start_key is not None or end_key is not None: + if start_key is not None: + if start_key_closed: + range_kwargs['start_key_closed'] = _to_bytes(start_key) + else: + range_kwargs['start_key_open'] = _to_bytes(start_key) + if end_key is not None: + end_key_key = 'end_key_open' + if end_inclusive: + end_key_key = 'end_key_closed' + range_kwargs[end_key_key] = _to_bytes(end_key) + if filter_ is not None: + request_kwargs['filter'] = filter_.to_pb() + if limit is not None: + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) + + return message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 9bde1c0cb5a3..f45097f1d8e5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -267,6 +267,10 @@ def consume_next(self): self._last_scanned_row_key = response.last_scanned_row_key + if hasattr(self._response_iterator, 'responses_for_row'): + if (self._response_iterator.responses_for_row == 1): + self._clear_accumulated_row() + row = self._row cell = self._cell @@ -300,6 +304,10 @@ def consume_next(self): if chunk.commit_row: self._save_current_row() + if hasattr(self._response_iterator, 'set_start_key'): + self._response_iterator.set_start_key(chunk.row_key) + if hasattr(self._response_iterator, 'clear_responses_for_row'): + self._response_iterator.clear_responses_for_row() row = cell = None continue @@ -345,6 +353,11 @@ def _validate_chunk_status(chunk): # No negative value_size (inferred as a general constraint). _raise_if(chunk.value_size < 0) + def _clear_accumulated_row(self): + self._row = None + self._cell = None + self._previous_cell = None + def _validate_chunk_new_row(self, chunk): """Helper for :meth:`_validate_chunk`.""" assert self.state == self.NEW_ROW diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 100409e5e81c..fbebb58c968b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -18,7 +18,6 @@ from google.api_core.exceptions import RetryError from google.api_core.retry import if_exception_type from google.api_core.retry import Retry -from google.cloud._helpers import _to_bytes from google.cloud.bigtable._generated import ( bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable._generated import ( @@ -31,8 +30,27 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData +from google.gax import RetryOptions, BackoffSettings +from google.cloud.bigtable.retry import ReadRowsIterator, _create_row_request from grpc import StatusCode +BACKOFF_SETTINGS = BackoffSettings( + initial_retry_delay_millis=10, + retry_delay_multiplier=1.3, + max_retry_delay_millis=30000, + initial_rpc_timeout_millis=25 * 60 * 1000, + rpc_timeout_multiplier=1.0, + max_rpc_timeout_millis=25 * 60 * 1000, + total_timeout_millis=30 * 60 * 1000 +) + +RETRY_CODES = [ + StatusCode.DEADLINE_EXCEEDED, + StatusCode.ABORTED, + StatusCode.INTERNAL, + StatusCode.UNAVAILABLE +] + # Maximum number of mutations in bulk (MutateRowsRequest message): # (https://cloud.google.com/bigtable/docs/reference/data/rpc/ @@ -277,7 +295,7 @@ def read_row(self, row_key, filter_=None): return rows_data.rows[row_key] def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None, end_inclusive=False): + filter_=None, end_inclusive=False, backoff_settings=None): """Read rows from this table. :type start_key: bytes @@ -308,13 +326,18 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :returns: A :class:`.PartialRowsData` convenience wrapper for consuming the streamed results. """ - request_pb = _create_row_request( - self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit, end_inclusive=end_inclusive) client = self._instance._client - response_iterator = client._data_stub.ReadRows(request_pb) - # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` - return PartialRowsData(response_iterator) + if backoff_settings is None: + backoff_settings = BACKOFF_SETTINGS + RETRY_OPTIONS = RetryOptions( + retry_codes=RETRY_CODES, + backoff_settings=backoff_settings + ) + + retrying_iterator = ReadRowsIterator(client, self.name, start_key, + end_key, filter_, limit, + end_inclusive, RETRY_OPTIONS) + return PartialRowsData(retrying_iterator) def mutate_rows(self, rows, retry=DEFAULT_RETRY): """Mutates multiple rows in bulk. @@ -495,74 +518,6 @@ def _do_mutate_retryable_rows(self): return self.responses_statuses -def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None, end_inclusive=False): - """Creates a request to read rows in a table. - - :type table_name: str - :param table_name: The name of the table to read from. - - :type row_key: bytes - :param row_key: (Optional) The key of a specific row to read from. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads the entire table. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. - - :type end_inclusive: bool - :param end_inclusive: (Optional) Whether the ``end_key`` should be - considered inclusive. The default is False (exclusive). - - :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` - :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. - :raises: :class:`ValueError ` if both - ``row_key`` and one of ``start_key`` and ``end_key`` are set - """ - request_kwargs = {'table_name': table_name} - if (row_key is not None and - (start_key is not None or end_key is not None)): - raise ValueError('Row key and row range cannot be ' - 'set simultaneously') - range_kwargs = {} - if start_key is not None or end_key is not None: - if start_key is not None: - range_kwargs['start_key_closed'] = _to_bytes(start_key) - if end_key is not None: - end_key_key = 'end_key_open' - if end_inclusive: - end_key_key = 'end_key_closed' - range_kwargs[end_key_key] = _to_bytes(end_key) - if filter_ is not None: - request_kwargs['filter'] = filter_.to_pb() - if limit is not None: - request_kwargs['rows_limit'] = limit - - message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) - - if row_key is not None: - message.rows.row_keys.append(_to_bytes(row_key)) - - if range_kwargs: - message.rows.row_ranges.add(**range_kwargs) - - return message - - def _mutate_rows_request(table_name, rows): """Creates a request to mutate rows in a table. diff --git a/packages/google-cloud-bigtable/tests/retry_test_script.txt b/packages/google-cloud-bigtable/tests/retry_test_script.txt new file mode 100644 index 000000000000..863662e897ba --- /dev/null +++ b/packages/google-cloud-bigtable/tests/retry_test_script.txt @@ -0,0 +1,38 @@ +# This retry script is processed by the retry server and the client under test. +# Client tests should parse any command beginning with "CLIENT:", send the corresponding RPC +# to the retry server and expect a valid response. +# "EXPECT" commands indicate the call the server is expecting the client to send. +# +# The retry server has one table named "table" that should be used for testing. +# There are three types of commands supported: +# READ +# Expect the corresponding rows to be returned with arbitrary values. +# SCAN ... +# Ranges are expressed as an interval with either open or closed start and end, +# such as [1,3) for "1,2" or (1, 3] for "2,3". +# WRITE +# All writes should succeed eventually. Value payload is ignored. +# The server writes PASS or FAIL on a line by itself to STDOUT depending on the result of the test. +# All other server output should be ignored. + +# Echo same scan back after immediate error +CLIENT: SCAN [r1,r3) r1,r2 +EXPECT: SCAN [r1,r3) +SERVER: ERROR Unavailable +EXPECT: SCAN [r1,r3) +SERVER: READ_RESPONSE r1,r2 + +# Retry scans with open interval starting at the least read row key. +# Instead of using open intervals for retry ranges, '\x00' can be +# appended to the last received row key and sent in a closed interval. +CLIENT: SCAN [r1,r9) r1,r2,r3,r4,r5,r6,r7,r8 +EXPECT: SCAN [r1,r9) +SERVER: READ_RESPONSE r1,r2,r3,r4 +SERVER: ERROR Unavailable +EXPECT: SCAN (r4,r9) +SERVER: ERROR Unavailable +EXPECT: SCAN (r4,r9) +SERVER: READ_RESPONSE r5,r6,r7 +SERVER: ERROR Unavailable +EXPECT: SCAN (r7,r9) +SERVER: READ_RESPONSE r8 diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index c889b181673e..4cbe4ef67152 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -272,6 +272,89 @@ def test_delete_column_family(self): # Make sure we have successfully deleted it. self.assertEqual(temp_table.list_column_families(), {}) + def test_retry(self): + import subprocess, os, stat, platform, ssl + from google.cloud.bigtable.client import Client + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.table import Table + + # import for urlopen based on version + try: + # python 3 + from urllib.request import urlopen + except ImportError: + # python 2 + from urllib2 import urlopen + + TEST_SCRIPT = 'tests/retry_test_script.txt' + SERVER_NAME = 'retry_server' + SERVER_ZIP = SERVER_NAME + ".tar.gz" + + def download_server(): + MOCK_SERVER_URLS = { + 'Linux': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_linux.tar.gz', + 'Darwin': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_mac.tar.gz', + } + + test_platform = platform.system() + if test_platform not in MOCK_SERVER_URLS: + self.skip('Retry server not available for platform {0}.'.format(test_platform)) + + context = ssl._create_unverified_context() + mock_server_download = urlopen(MOCK_SERVER_URLS[test_platform], context=context).read() + mock_server_file = open(SERVER_ZIP, 'wb') + mock_server_file.write(mock_server_download) + + # Extract server binary from archive + subprocess.call(['tar', 'zxvf', SERVER_ZIP, '-C', '.']) + os.remove(SERVER_ZIP) + + def process_scan(table, range, ids): + range_chunks = range.split(',') + range_open = range_chunks[0].lstrip('[]') + range_close = range_chunks[1].rstrip(')') + rows = table.read_rows(range_open, range_close) + rows.consume_all() + + should_download = os.environ.get('DOWNLOAD_BIGTABLE_TEST_SERVER') + if should_download is None or should_download == '1': + if not os.path.isfile(SERVER_NAME): + download_server() + + # Connect to server + server = subprocess.Popen( + ['./' + SERVER_NAME, '--script=' + TEST_SCRIPT], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + + (endpoint, port) = server.stdout.readline().decode("utf-8").rstrip("\n").split(":") + os.environ["BIGTABLE_EMULATOR_HOST"] = endpoint + ":" + port + client = Client(project="client", admin=True) + instance = Instance("instance", client) + table = instance.table("table") + + # Run test, line by line + with open(TEST_SCRIPT, 'r') as script: + for line in script.readlines(): + if line.startswith("CLIENT:"): + chunks = line.split(" ") + op = chunks[1] + process_scan(table, chunks[2], chunks[3]) + + # Check that the test passed + server.kill() + server_stdout_lines = [] + while True: + line = server.stdout.readline().decode("utf-8") + if line != '': + server_stdout_lines.append(line) + else: + break + self.assertEqual(server_stdout_lines[-1], "PASS\n") + + # Clean up + os.remove(SERVER_NAME) class TestDataAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index cfa24c062660..9192a134854e 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -14,7 +14,6 @@ """Mocks used to emulate gRPC generated objects.""" - class _FakeStub(object): """Acts as a gPRC stub.""" @@ -27,6 +26,17 @@ def __getattr__(self, name): # since __getattribute__ will handle them. return _MethodMock(name, self) +class _CustomFakeStub(object): + """Acts as a gRPC stub. Generates a result from a given iterator + """ + def __init__(self, result): + self.result = result + self.method_calls = [] + + def __getattr__(self, name): + # We need not worry about attributes set in constructor + # since __getattribute__ will handle them. + return _CustomMethodMock(name, self) class _MethodMock(object): """Mock for API method attached to a gRPC stub. @@ -44,3 +54,17 @@ def __call__(self, *args, **kwargs): curr_result, self._stub.results = (self._stub.results[0], self._stub.results[1:]) return curr_result + +class _CustomMethodMock(object): + """ + Same as _MethodMock, but backed by an injected callable. + """ + + def __init__(self, name, stub): + self._name = name + self._stub = stub + + def __call__(self, *args, **kwargs): + """Sync method meant to mock a gRPC stub request.""" + self._stub.method_calls.append((self._name, args, kwargs)) + return self._stub.result diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 5b904f091c15..943cceb6f6a8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -473,7 +473,8 @@ def test_read_rows(self): from google.cloud._testing import _Monkey from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import table as MUT + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -493,20 +494,18 @@ def mock_create_row_request(table_name, **kwargs): # Patch the stub used by the API method. client._data_stub = stub = _FakeStub(response_iterator) - # Create expected_result. - expected_result = PartialRowsData(response_iterator) - - # Perform the method and check the result. start_key = b'start-key' end_key = b'end-key' filter_obj = object() limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Perform the method and check the result. result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, limit=limit) - self.assertEqual(result, expected_result) + self.assertIsInstance(result._response_iterator, ReadRowsIterator) + self.assertEqual(result._response_iterator.client, client) self.assertEqual(stub.method_calls, [( 'ReadRows', (request_pb,), @@ -514,13 +513,258 @@ def mock_create_row_request(table_name, **kwargs): )]) created_kwargs = { 'start_key': start_key, + 'end_inclusive': False, 'end_key': end_key, 'filter_': filter_obj, 'limit': limit, - 'end_inclusive': False, + 'start_key_closed': True, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) + def test_read_rows_one_chunk(self): + from google.cloud._testing import _Monkey + from tests.unit._testing import _FakeStub + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator + from google.cloud.bigtable.row_data import Cell + from google.cloud.bigtable.row_data import PartialRowsData + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create response_iterator + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + response_pb = _ReadRowsResponsePB(chunks=[chunk]) + response_iterator = iter([response_pb]) + + # Patch the stub used by the API method. + client._data_stub = stub = _FakeStub(response_iterator) + + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Perform the method and check the result. + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit) + result.consume_all() + + def test_read_rows_retry_timeout(self): + from google.cloud._testing import _Monkey + from tests.unit._testing import _CustomFakeStub + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator + from google.gax import BackoffSettings + from google.gax.errors import RetryError + from grpc import StatusCode, RpcError + import time + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create a slow response iterator to cause a timeout + class MockTimeoutError(RpcError): + def code(self): + return StatusCode.DEADLINE_EXCEEDED + + class MockTimeoutIterator(object): + def next(self): + return self.__next__() + def __next__(self): + raise MockTimeoutError() + + client._data_stub = stub = _CustomFakeStub(MockTimeoutIterator()) + + # Set to timeout before RPC completes + test_backoff_settings = BackoffSettings( + initial_retry_delay_millis=10, + retry_delay_multiplier=0.3, + max_retry_delay_millis=30000, + initial_rpc_timeout_millis=1000, + rpc_timeout_multiplier=1.0, + max_rpc_timeout_millis=25 * 60 * 1000, + total_timeout_millis=1000 + ) + + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Verify that a RetryError is thrown on read. + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit, backoff_settings=test_backoff_settings) + with self.assertRaises(RetryError): + result.consume_next() + + def test_read_rows_mid_row_timeout_retry(self): + from google.cloud._testing import _Monkey + from tests.unit._testing import _CustomFakeStub + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator + from google.gax import BackoffSettings + from google.gax.errors import RetryError + from grpc import StatusCode, RpcError + import time + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create an iterator that throws an idempotent exception + class MockTimeoutError(RpcError): + def code(self): + return StatusCode.DEADLINE_EXCEEDED + + first_chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + ) + first_response = _ReadRowsResponsePB(chunks = [first_chunk]) + + second_chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + second_response = _ReadRowsResponsePB(chunks = [second_chunk]) + + class MidRowTimeoutIterator(object): + def __init__(self): + self.invocation_count = 0 + def next(self): + return self.__next__() + def __next__(self): + self.invocation_count += 1 + if (self.invocation_count == 1): + return first_response + elif (self.invocation_count == 2): + raise MockTimeoutError() + elif (self.invocation_count == 3): + return first_response + elif (self.invocation_count == 4): + return second_response + else: + raise StopIteration + + client._data_stub = stub = _CustomFakeStub(MidRowTimeoutIterator()) + + # Set to timeout before RPC completes + test_backoff_settings = BackoffSettings( + initial_retry_delay_millis=10, + retry_delay_multiplier=1, + max_retry_delay_millis=30000, + initial_rpc_timeout_millis=1000, + rpc_timeout_multiplier=1.0, + max_rpc_timeout_millis=25 * 60 * 1000, + total_timeout_millis=1000 + ) + + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + # Verify that a RetryError is thrown on read. + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit, backoff_settings=test_backoff_settings) + result.consume_all() + + cells = result.rows[self.ROW_KEY].cells[self.FAMILY_NAME][self.QUALIFIER] + self.assertEquals(len(cells), 2) + + def test_read_rows_non_idempotent_error_throws(self): + from google.cloud._testing import _Monkey + from tests.unit._testing import _CustomFakeStub + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import retry as MUT + from google.cloud.bigtable.retry import ReadRowsIterator + from google.gax import BackoffSettings + from google.gax.errors import RetryError + from grpc import StatusCode, RpcError + import time + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb + + # Create response iterator that raises a non-idempotent exception + class MockNonIdempotentError(RpcError): + def code(self): + return StatusCode.RESOURCE_EXHAUSTED + + class MockNonIdempotentIterator(object): + def next(self): + return self.__next__() + def __next__(self): + raise MockNonIdempotentError() + + client._data_stub = stub = _CustomFakeStub(MockNonIdempotentIterator()) + + start_key = b'start-key' + end_key = b'end-key' + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + result = table.read_rows( + start_key=start_key, end_key=end_key, filter_=filter_obj, + limit=limit) + with self.assertRaises(MockNonIdempotentError): + result.consume_next() + def test_sample_row_keys(self): from tests.unit._testing import _FakeStub @@ -1000,12 +1244,14 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None, end_inclusive=False): - from google.cloud.bigtable.table import _create_row_request + filter_=None, limit=None, start_key_closed=True, + end_inclusive=False): + from google.cloud.bigtable.retry import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - filter_=filter_, limit=limit, end_inclusive=end_inclusive) + start_key_closed=start_key_closed, filter_=filter_, + limit=limit, end_inclusive=end_inclusive) def test_table_name_only(self): table_name = 'table_name' @@ -1028,7 +1274,7 @@ def test_row_key(self): expected_result.rows.row_keys.append(row_key) self.assertEqual(result, expected_result) - def test_row_range_start_key(self): + def test_row_range_start_key_closed(self): table_name = 'table_name' start_key = b'start_key' result = self._call_fut(table_name, start_key=start_key) @@ -1036,6 +1282,15 @@ def test_row_range_start_key(self): expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) + def test_row_range_start_key_open(self): + table_name = 'table_name' + start_key = b'start_key' + result = self._call_fut(table_name, start_key=start_key, + start_key_closed=False) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.add(start_key_open=start_key) + self.assertEqual(result, expected_result) + def test_row_range_end_key(self): table_name = 'table_name' end_key = b'end_key' From f425409f0f976b6e85b923156412fcc7335bc10d Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 4 Dec 2017 11:15:00 -0800 Subject: [PATCH 104/892] Revert "Add RPC retries to Bigtable (#3811)" (#4524) This reverts commit 1c699f36584d8c507118dae9ab8a2a31bc951ce6 / PR #3811. --- .../google/cloud/bigtable/retry.py | 205 ------------- .../google/cloud/bigtable/row_data.py | 13 - .../google/cloud/bigtable/table.py | 107 +++++-- .../tests/retry_test_script.txt | 38 --- .../google-cloud-bigtable/tests/system.py | 83 ------ .../tests/unit/_testing.py | 26 +- .../tests/unit/test_table.py | 277 +----------------- 7 files changed, 88 insertions(+), 661 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/retry.py delete mode 100644 packages/google-cloud-bigtable/tests/retry_test_script.txt diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py b/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py deleted file mode 100644 index 687da4bc65cb..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/retry.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Provides function wrappers that implement retrying.""" - -import random -import time -import six -import sys - -from google.cloud._helpers import _to_bytes -from google.cloud.bigtable._generated import ( - bigtable_pb2 as data_messages_v2_pb2) -from google.gax import config, errors -from grpc import RpcError - - -_MILLIS_PER_SECOND = 1000 - - -class ReadRowsIterator(object): - """Creates an iterator equivalent to a_iter, but that retries on certain - exceptions. - """ - - def __init__(self, client, name, start_key, end_key, filter_, limit, - end_inclusive, retry_options, **kwargs): - self.client = client - self.retry_options = retry_options - self.name = name - self.start_key = start_key - self.start_key_closed = True - self.end_key = end_key - self.filter_ = filter_ - self.limit = limit - self.end_inclusive = end_inclusive - self.delay_mult = retry_options.backoff_settings.retry_delay_multiplier - self.max_delay_millis = \ - retry_options.backoff_settings.max_retry_delay_millis - self.timeout_mult = \ - retry_options.backoff_settings.rpc_timeout_multiplier - self.max_timeout = \ - (retry_options.backoff_settings.max_rpc_timeout_millis / - _MILLIS_PER_SECOND) - self.total_timeout = \ - (retry_options.backoff_settings.total_timeout_millis / - _MILLIS_PER_SECOND) - self._responses_for_row = 0 - self.set_stream() - - def set_start_key(self, start_key): - """ - Sets the row key at which this iterator will begin reading. - """ - self.start_key = start_key - self.start_key_closed = False - - def set_stream(self): - """ - Resets the read stream by making an RPC on the 'ReadRows' endpoint. - """ - req_pb = _create_row_request(self.name, start_key=self.start_key, - start_key_closed=self.start_key_closed, - end_key=self.end_key, - filter_=self.filter_, limit=self.limit, - end_inclusive=self.end_inclusive) - self.stream = self.client._data_stub.ReadRows(req_pb) - - @property - def responses_for_row(self): - """ Property that gives the number of calls made so far for the current - row. If 1, then either this row is being read for the first time, - or the most recent response required a retry, causing the row to be - read again - - :rtype: int - :returns: Int that gives the number of calls made so far for the - current row. - """ - return self._responses_for_row - - def clear_responses_for_row(self): - """ - Signals that a new row has been started. - """ - self._responses_for_row = 0 - - def next(self, *args, **kwargs): - """ - Read and return the next chunk from the stream. - Retry on idempotent failure. - """ - delay = self.retry_options.backoff_settings.initial_retry_delay_millis - exc = errors.RetryError('Retry total timeout exceeded before any' - 'response was received') - - now = time.time() - deadline = now + self.total_timeout - while deadline is None or now < deadline: - self._responses_for_row += 1 - try: - return(six.next(self.stream)) - except StopIteration as stop: - raise stop - except RpcError as error: # pylint: disable=broad-except - code = config.exc_to_code(error) - if code not in self.retry_options.retry_codes: - six.reraise(type(error), error) - - # pylint: disable=redefined-variable-type - exc = errors.RetryError( - 'Retry total timeout exceeded with exception', error) - - # Sleep a random number which will, on average, equal the - # expected delay. - to_sleep = random.uniform(0, delay * 2) - time.sleep(to_sleep / _MILLIS_PER_SECOND) - delay = min(delay * self.delay_mult, self.max_delay_millis) - now = time.time() - self._responses_for_row = 0 - self.set_stream() - - six.reraise(errors.RetryError, exc, sys.exc_info()[2]) - - def __next__(self, *args, **kwargs): - return self.next(*args, **kwargs) - - -def _create_row_request(table_name, row_key=None, start_key=None, - start_key_closed=True, end_key=None, filter_=None, - limit=None, end_inclusive=False): - """Creates a request to read rows in a table. - - :type table_name: str - :param table_name: The name of the table to read from. - - :type row_key: bytes - :param row_key: (Optional) The key of a specific row to read from. - - :type start_key: bytes - :param start_key: (Optional) The beginning of a range of row keys to - read from. The range will include ``start_key``. If - left empty, will be interpreted as the empty string. - - :type end_key: bytes - :param end_key: (Optional) The end of a range of row keys to read from. - The range will not include ``end_key``. If left empty, - will be interpreted as an infinite string. - - :type filter_: :class:`.RowFilter` - :param filter_: (Optional) The filter to apply to the contents of the - specified row(s). If unset, reads the entire table. - - :type limit: int - :param limit: (Optional) The read will terminate after committing to N - rows' worth of results. The default (zero) is to return - all results. - - :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` - :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. - :raises: :class:`ValueError ` if both - ``row_key`` and one of ``start_key`` and ``end_key`` are set - """ - request_kwargs = {'table_name': table_name} - if (row_key is not None and - (start_key is not None or end_key is not None)): - raise ValueError('Row key and row range cannot be ' - 'set simultaneously') - range_kwargs = {} - if start_key is not None or end_key is not None: - if start_key is not None: - if start_key_closed: - range_kwargs['start_key_closed'] = _to_bytes(start_key) - else: - range_kwargs['start_key_open'] = _to_bytes(start_key) - if end_key is not None: - end_key_key = 'end_key_open' - if end_inclusive: - end_key_key = 'end_key_closed' - range_kwargs[end_key_key] = _to_bytes(end_key) - if filter_ is not None: - request_kwargs['filter'] = filter_.to_pb() - if limit is not None: - request_kwargs['rows_limit'] = limit - - message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) - - if row_key is not None: - message.rows.row_keys.append(_to_bytes(row_key)) - - if range_kwargs: - message.rows.row_ranges.add(**range_kwargs) - - return message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index f45097f1d8e5..9bde1c0cb5a3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -267,10 +267,6 @@ def consume_next(self): self._last_scanned_row_key = response.last_scanned_row_key - if hasattr(self._response_iterator, 'responses_for_row'): - if (self._response_iterator.responses_for_row == 1): - self._clear_accumulated_row() - row = self._row cell = self._cell @@ -304,10 +300,6 @@ def consume_next(self): if chunk.commit_row: self._save_current_row() - if hasattr(self._response_iterator, 'set_start_key'): - self._response_iterator.set_start_key(chunk.row_key) - if hasattr(self._response_iterator, 'clear_responses_for_row'): - self._response_iterator.clear_responses_for_row() row = cell = None continue @@ -353,11 +345,6 @@ def _validate_chunk_status(chunk): # No negative value_size (inferred as a general constraint). _raise_if(chunk.value_size < 0) - def _clear_accumulated_row(self): - self._row = None - self._cell = None - self._previous_cell = None - def _validate_chunk_new_row(self, chunk): """Helper for :meth:`_validate_chunk`.""" assert self.state == self.NEW_ROW diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index fbebb58c968b..100409e5e81c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -18,6 +18,7 @@ from google.api_core.exceptions import RetryError from google.api_core.retry import if_exception_type from google.api_core.retry import Retry +from google.cloud._helpers import _to_bytes from google.cloud.bigtable._generated import ( bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable._generated import ( @@ -30,27 +31,8 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData -from google.gax import RetryOptions, BackoffSettings -from google.cloud.bigtable.retry import ReadRowsIterator, _create_row_request from grpc import StatusCode -BACKOFF_SETTINGS = BackoffSettings( - initial_retry_delay_millis=10, - retry_delay_multiplier=1.3, - max_retry_delay_millis=30000, - initial_rpc_timeout_millis=25 * 60 * 1000, - rpc_timeout_multiplier=1.0, - max_rpc_timeout_millis=25 * 60 * 1000, - total_timeout_millis=30 * 60 * 1000 -) - -RETRY_CODES = [ - StatusCode.DEADLINE_EXCEEDED, - StatusCode.ABORTED, - StatusCode.INTERNAL, - StatusCode.UNAVAILABLE -] - # Maximum number of mutations in bulk (MutateRowsRequest message): # (https://cloud.google.com/bigtable/docs/reference/data/rpc/ @@ -295,7 +277,7 @@ def read_row(self, row_key, filter_=None): return rows_data.rows[row_key] def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None, end_inclusive=False, backoff_settings=None): + filter_=None, end_inclusive=False): """Read rows from this table. :type start_key: bytes @@ -326,18 +308,13 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :returns: A :class:`.PartialRowsData` convenience wrapper for consuming the streamed results. """ + request_pb = _create_row_request( + self.name, start_key=start_key, end_key=end_key, filter_=filter_, + limit=limit, end_inclusive=end_inclusive) client = self._instance._client - if backoff_settings is None: - backoff_settings = BACKOFF_SETTINGS - RETRY_OPTIONS = RetryOptions( - retry_codes=RETRY_CODES, - backoff_settings=backoff_settings - ) - - retrying_iterator = ReadRowsIterator(client, self.name, start_key, - end_key, filter_, limit, - end_inclusive, RETRY_OPTIONS) - return PartialRowsData(retrying_iterator) + response_iterator = client._data_stub.ReadRows(request_pb) + # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` + return PartialRowsData(response_iterator) def mutate_rows(self, rows, retry=DEFAULT_RETRY): """Mutates multiple rows in bulk. @@ -518,6 +495,74 @@ def _do_mutate_retryable_rows(self): return self.responses_statuses +def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, + filter_=None, limit=None, end_inclusive=False): + """Creates a request to read rows in a table. + + :type table_name: str + :param table_name: The name of the table to read from. + + :type row_key: bytes + :param row_key: (Optional) The key of a specific row to read from. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads the entire table. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :type end_inclusive: bool + :param end_inclusive: (Optional) Whether the ``end_key`` should be + considered inclusive. The default is False (exclusive). + + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` + :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. + :raises: :class:`ValueError ` if both + ``row_key`` and one of ``start_key`` and ``end_key`` are set + """ + request_kwargs = {'table_name': table_name} + if (row_key is not None and + (start_key is not None or end_key is not None)): + raise ValueError('Row key and row range cannot be ' + 'set simultaneously') + range_kwargs = {} + if start_key is not None or end_key is not None: + if start_key is not None: + range_kwargs['start_key_closed'] = _to_bytes(start_key) + if end_key is not None: + end_key_key = 'end_key_open' + if end_inclusive: + end_key_key = 'end_key_closed' + range_kwargs[end_key_key] = _to_bytes(end_key) + if filter_ is not None: + request_kwargs['filter'] = filter_.to_pb() + if limit is not None: + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) + + return message + + def _mutate_rows_request(table_name, rows): """Creates a request to mutate rows in a table. diff --git a/packages/google-cloud-bigtable/tests/retry_test_script.txt b/packages/google-cloud-bigtable/tests/retry_test_script.txt deleted file mode 100644 index 863662e897ba..000000000000 --- a/packages/google-cloud-bigtable/tests/retry_test_script.txt +++ /dev/null @@ -1,38 +0,0 @@ -# This retry script is processed by the retry server and the client under test. -# Client tests should parse any command beginning with "CLIENT:", send the corresponding RPC -# to the retry server and expect a valid response. -# "EXPECT" commands indicate the call the server is expecting the client to send. -# -# The retry server has one table named "table" that should be used for testing. -# There are three types of commands supported: -# READ -# Expect the corresponding rows to be returned with arbitrary values. -# SCAN ... -# Ranges are expressed as an interval with either open or closed start and end, -# such as [1,3) for "1,2" or (1, 3] for "2,3". -# WRITE -# All writes should succeed eventually. Value payload is ignored. -# The server writes PASS or FAIL on a line by itself to STDOUT depending on the result of the test. -# All other server output should be ignored. - -# Echo same scan back after immediate error -CLIENT: SCAN [r1,r3) r1,r2 -EXPECT: SCAN [r1,r3) -SERVER: ERROR Unavailable -EXPECT: SCAN [r1,r3) -SERVER: READ_RESPONSE r1,r2 - -# Retry scans with open interval starting at the least read row key. -# Instead of using open intervals for retry ranges, '\x00' can be -# appended to the last received row key and sent in a closed interval. -CLIENT: SCAN [r1,r9) r1,r2,r3,r4,r5,r6,r7,r8 -EXPECT: SCAN [r1,r9) -SERVER: READ_RESPONSE r1,r2,r3,r4 -SERVER: ERROR Unavailable -EXPECT: SCAN (r4,r9) -SERVER: ERROR Unavailable -EXPECT: SCAN (r4,r9) -SERVER: READ_RESPONSE r5,r6,r7 -SERVER: ERROR Unavailable -EXPECT: SCAN (r7,r9) -SERVER: READ_RESPONSE r8 diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 4cbe4ef67152..c889b181673e 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -272,89 +272,6 @@ def test_delete_column_family(self): # Make sure we have successfully deleted it. self.assertEqual(temp_table.list_column_families(), {}) - def test_retry(self): - import subprocess, os, stat, platform, ssl - from google.cloud.bigtable.client import Client - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable.table import Table - - # import for urlopen based on version - try: - # python 3 - from urllib.request import urlopen - except ImportError: - # python 2 - from urllib2 import urlopen - - TEST_SCRIPT = 'tests/retry_test_script.txt' - SERVER_NAME = 'retry_server' - SERVER_ZIP = SERVER_NAME + ".tar.gz" - - def download_server(): - MOCK_SERVER_URLS = { - 'Linux': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_linux.tar.gz', - 'Darwin': 'https://storage.googleapis.com/cloud-bigtable-test/retries/retry_server_mac.tar.gz', - } - - test_platform = platform.system() - if test_platform not in MOCK_SERVER_URLS: - self.skip('Retry server not available for platform {0}.'.format(test_platform)) - - context = ssl._create_unverified_context() - mock_server_download = urlopen(MOCK_SERVER_URLS[test_platform], context=context).read() - mock_server_file = open(SERVER_ZIP, 'wb') - mock_server_file.write(mock_server_download) - - # Extract server binary from archive - subprocess.call(['tar', 'zxvf', SERVER_ZIP, '-C', '.']) - os.remove(SERVER_ZIP) - - def process_scan(table, range, ids): - range_chunks = range.split(',') - range_open = range_chunks[0].lstrip('[]') - range_close = range_chunks[1].rstrip(')') - rows = table.read_rows(range_open, range_close) - rows.consume_all() - - should_download = os.environ.get('DOWNLOAD_BIGTABLE_TEST_SERVER') - if should_download is None or should_download == '1': - if not os.path.isfile(SERVER_NAME): - download_server() - - # Connect to server - server = subprocess.Popen( - ['./' + SERVER_NAME, '--script=' + TEST_SCRIPT], - stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - - (endpoint, port) = server.stdout.readline().decode("utf-8").rstrip("\n").split(":") - os.environ["BIGTABLE_EMULATOR_HOST"] = endpoint + ":" + port - client = Client(project="client", admin=True) - instance = Instance("instance", client) - table = instance.table("table") - - # Run test, line by line - with open(TEST_SCRIPT, 'r') as script: - for line in script.readlines(): - if line.startswith("CLIENT:"): - chunks = line.split(" ") - op = chunks[1] - process_scan(table, chunks[2], chunks[3]) - - # Check that the test passed - server.kill() - server_stdout_lines = [] - while True: - line = server.stdout.readline().decode("utf-8") - if line != '': - server_stdout_lines.append(line) - else: - break - self.assertEqual(server_stdout_lines[-1], "PASS\n") - - # Clean up - os.remove(SERVER_NAME) class TestDataAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index 9192a134854e..cfa24c062660 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -14,6 +14,7 @@ """Mocks used to emulate gRPC generated objects.""" + class _FakeStub(object): """Acts as a gPRC stub.""" @@ -26,17 +27,6 @@ def __getattr__(self, name): # since __getattribute__ will handle them. return _MethodMock(name, self) -class _CustomFakeStub(object): - """Acts as a gRPC stub. Generates a result from a given iterator - """ - def __init__(self, result): - self.result = result - self.method_calls = [] - - def __getattr__(self, name): - # We need not worry about attributes set in constructor - # since __getattribute__ will handle them. - return _CustomMethodMock(name, self) class _MethodMock(object): """Mock for API method attached to a gRPC stub. @@ -54,17 +44,3 @@ def __call__(self, *args, **kwargs): curr_result, self._stub.results = (self._stub.results[0], self._stub.results[1:]) return curr_result - -class _CustomMethodMock(object): - """ - Same as _MethodMock, but backed by an injected callable. - """ - - def __init__(self, name, stub): - self._name = name - self._stub = stub - - def __call__(self, *args, **kwargs): - """Sync method meant to mock a gRPC stub request.""" - self._stub.method_calls.append((self._name, args, kwargs)) - return self._stub.result diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 943cceb6f6a8..5b904f091c15 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -473,8 +473,7 @@ def test_read_rows(self): from google.cloud._testing import _Monkey from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator + from google.cloud.bigtable import table as MUT client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) @@ -494,18 +493,20 @@ def mock_create_row_request(table_name, **kwargs): # Patch the stub used by the API method. client._data_stub = stub = _FakeStub(response_iterator) + # Create expected_result. + expected_result = PartialRowsData(response_iterator) + + # Perform the method and check the result. start_key = b'start-key' end_key = b'end-key' filter_obj = object() limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Perform the method and check the result. result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, limit=limit) - self.assertIsInstance(result._response_iterator, ReadRowsIterator) - self.assertEqual(result._response_iterator.client, client) + self.assertEqual(result, expected_result) self.assertEqual(stub.method_calls, [( 'ReadRows', (request_pb,), @@ -513,258 +514,13 @@ def mock_create_row_request(table_name, **kwargs): )]) created_kwargs = { 'start_key': start_key, - 'end_inclusive': False, 'end_key': end_key, 'filter_': filter_obj, 'limit': limit, - 'start_key_closed': True, + 'end_inclusive': False, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) - def test_read_rows_one_chunk(self): - from google.cloud._testing import _Monkey - from tests.unit._testing import _FakeStub - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator - from google.cloud.bigtable.row_data import Cell - from google.cloud.bigtable.row_data import PartialRowsData - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create response_iterator - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - response_pb = _ReadRowsResponsePB(chunks=[chunk]) - response_iterator = iter([response_pb]) - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) - - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Perform the method and check the result. - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit) - result.consume_all() - - def test_read_rows_retry_timeout(self): - from google.cloud._testing import _Monkey - from tests.unit._testing import _CustomFakeStub - from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator - from google.gax import BackoffSettings - from google.gax.errors import RetryError - from grpc import StatusCode, RpcError - import time - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create a slow response iterator to cause a timeout - class MockTimeoutError(RpcError): - def code(self): - return StatusCode.DEADLINE_EXCEEDED - - class MockTimeoutIterator(object): - def next(self): - return self.__next__() - def __next__(self): - raise MockTimeoutError() - - client._data_stub = stub = _CustomFakeStub(MockTimeoutIterator()) - - # Set to timeout before RPC completes - test_backoff_settings = BackoffSettings( - initial_retry_delay_millis=10, - retry_delay_multiplier=0.3, - max_retry_delay_millis=30000, - initial_rpc_timeout_millis=1000, - rpc_timeout_multiplier=1.0, - max_rpc_timeout_millis=25 * 60 * 1000, - total_timeout_millis=1000 - ) - - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Verify that a RetryError is thrown on read. - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit, backoff_settings=test_backoff_settings) - with self.assertRaises(RetryError): - result.consume_next() - - def test_read_rows_mid_row_timeout_retry(self): - from google.cloud._testing import _Monkey - from tests.unit._testing import _CustomFakeStub - from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator - from google.gax import BackoffSettings - from google.gax.errors import RetryError - from grpc import StatusCode, RpcError - import time - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create an iterator that throws an idempotent exception - class MockTimeoutError(RpcError): - def code(self): - return StatusCode.DEADLINE_EXCEEDED - - first_chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - ) - first_response = _ReadRowsResponsePB(chunks = [first_chunk]) - - second_chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - second_response = _ReadRowsResponsePB(chunks = [second_chunk]) - - class MidRowTimeoutIterator(object): - def __init__(self): - self.invocation_count = 0 - def next(self): - return self.__next__() - def __next__(self): - self.invocation_count += 1 - if (self.invocation_count == 1): - return first_response - elif (self.invocation_count == 2): - raise MockTimeoutError() - elif (self.invocation_count == 3): - return first_response - elif (self.invocation_count == 4): - return second_response - else: - raise StopIteration - - client._data_stub = stub = _CustomFakeStub(MidRowTimeoutIterator()) - - # Set to timeout before RPC completes - test_backoff_settings = BackoffSettings( - initial_retry_delay_millis=10, - retry_delay_multiplier=1, - max_retry_delay_millis=30000, - initial_rpc_timeout_millis=1000, - rpc_timeout_multiplier=1.0, - max_rpc_timeout_millis=25 * 60 * 1000, - total_timeout_millis=1000 - ) - - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - # Verify that a RetryError is thrown on read. - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit, backoff_settings=test_backoff_settings) - result.consume_all() - - cells = result.rows[self.ROW_KEY].cells[self.FAMILY_NAME][self.QUALIFIER] - self.assertEquals(len(cells), 2) - - def test_read_rows_non_idempotent_error_throws(self): - from google.cloud._testing import _Monkey - from tests.unit._testing import _CustomFakeStub - from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import retry as MUT - from google.cloud.bigtable.retry import ReadRowsIterator - from google.gax import BackoffSettings - from google.gax.errors import RetryError - from grpc import StatusCode, RpcError - import time - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb - - # Create response iterator that raises a non-idempotent exception - class MockNonIdempotentError(RpcError): - def code(self): - return StatusCode.RESOURCE_EXHAUSTED - - class MockNonIdempotentIterator(object): - def next(self): - return self.__next__() - def __next__(self): - raise MockNonIdempotentError() - - client._data_stub = stub = _CustomFakeStub(MockNonIdempotentIterator()) - - start_key = b'start-key' - end_key = b'end-key' - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit) - with self.assertRaises(MockNonIdempotentError): - result.consume_next() - def test_sample_row_keys(self): from tests.unit._testing import _FakeStub @@ -1244,14 +1000,12 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None, start_key_closed=True, - end_inclusive=False): - from google.cloud.bigtable.retry import _create_row_request + filter_=None, limit=None, end_inclusive=False): + from google.cloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - start_key_closed=start_key_closed, filter_=filter_, - limit=limit, end_inclusive=end_inclusive) + filter_=filter_, limit=limit, end_inclusive=end_inclusive) def test_table_name_only(self): table_name = 'table_name' @@ -1274,7 +1028,7 @@ def test_row_key(self): expected_result.rows.row_keys.append(row_key) self.assertEqual(result, expected_result) - def test_row_range_start_key_closed(self): + def test_row_range_start_key(self): table_name = 'table_name' start_key = b'start_key' result = self._call_fut(table_name, start_key=start_key) @@ -1282,15 +1036,6 @@ def test_row_range_start_key_closed(self): expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) - def test_row_range_start_key_open(self): - table_name = 'table_name' - start_key = b'start_key' - result = self._call_fut(table_name, start_key=start_key, - start_key_closed=False) - expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(start_key_open=start_key) - self.assertEqual(result, expected_result) - def test_row_range_end_key(self): table_name = 'table_name' end_key = b'end_key' From 78f1421c10c5f1f94ed628de6a127a5a289e866b Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Mon, 4 Dec 2017 17:55:51 -0500 Subject: [PATCH 105/892] Removing "rename" from bigtable table.py comments (#4526) Bigtable tables cannot be renamed. I'm removing the comment that says that rename is allowed. --- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 100409e5e81c..070d3179a49f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -78,7 +78,6 @@ class Table(object): We can use a :class:`Table` to: * :meth:`create` the table - * :meth:`rename` the table * :meth:`delete` the table * :meth:`list_column_families` in the table From 12d62e7893fb31557586a713081210a7bc4581ed Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Wed, 13 Dec 2017 15:34:22 -0800 Subject: [PATCH 106/892] Add `google.cloud.container` API. (#4577) --- .../google/cloud/bigtable/instance.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index b5f21a2e08fe..5927b7c4f0ca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -291,7 +291,7 @@ def cluster(self, cluster_id, serve_nodes=3): :param serve_nodes: (Optional) The number of nodes in the cluster. Defaults to 3. - :rtype: :class:`.Cluster` + :rtype: :class:`~.bigtable.cluster.Cluster` :returns: The cluster owned by this client. """ return Cluster(cluster_id, self, serve_nodes=serve_nodes) @@ -300,9 +300,10 @@ def list_clusters(self): """Lists clusters in this instance. :rtype: tuple - :returns: A pair of results, the first is a list of :class:`.Cluster` s - returned and the second is a list of strings (the failed - locations in the request). + :returns: A pair of results, the first is a list of + :class:`~.bigtable.cluster.Cluster` objects returned and the + second is a list of strings (the failed locations in the + request). """ request_pb = messages_v2_pb2.ListClustersRequest(parent=self.name) # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` From bc3c664da07902ba83c3f1ce16a5510900cc451e Mon Sep 17 00:00:00 2001 From: chemelnucfin Date: Wed, 27 Dec 2017 10:16:14 -0800 Subject: [PATCH 107/892] Bigtable: Row filter end points documentation error (#4667) --- .../google-cloud-bigtable/google/cloud/bigtable/row_filters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index c6d8d25f0c81..653966d3f838 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -168,7 +168,7 @@ class RowSampleFilter(RowFilter): :type sample: float :param sample: The probability of matching a cell (must be in the - interval ``[0, 1]``). + interval ``(0, 1)`` The end points are excluded). """ def __init__(self, sample): From 8a12eecdaaeda62797461de29b4ac6505085e86b Mon Sep 17 00:00:00 2001 From: chemelnucfin Date: Thu, 18 Jan 2018 09:22:26 -0800 Subject: [PATCH 108/892] BigTable: minor typo (#4758) --- .../google-cloud-bigtable/google/cloud/bigtable/row_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 9bde1c0cb5a3..616059ad0c50 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -222,7 +222,7 @@ def state(self): """State machine state. :rtype: str - :returns: name of state corresponding to currrent row / chunk + :returns: name of state corresponding to current row / chunk processing. """ if self._last_scanned_row_key is None: From 32235a00e9abaaa51b7e31fbdd41a2ca422c1c5d Mon Sep 17 00:00:00 2001 From: zakons Date: Fri, 19 Jan 2018 08:47:00 -0500 Subject: [PATCH 109/892] BigTable: Cell.from_pb() performance improvement (#4745) --- .../google/cloud/bigtable/row_data.py | 20 ++++++++------ .../tests/unit/test_row_data.py | 27 +++++++++---------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 616059ad0c50..0c4ecdacced9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -28,16 +28,16 @@ class Cell(object): :type value: bytes :param value: The value stored in the cell. - :type timestamp: :class:`datetime.datetime` - :param timestamp: The timestamp when the cell was stored. + :type timestamp_micros: int + :param timestamp_micros: The timestamp_micros when the cell was stored. :type labels: list :param labels: (Optional) List of strings. Labels applied to the cell. """ - def __init__(self, value, timestamp, labels=()): + def __init__(self, value, timestamp_micros, labels=()): self.value = value - self.timestamp = timestamp + self.timestamp_micros = timestamp_micros self.labels = list(labels) @classmethod @@ -50,17 +50,21 @@ def from_pb(cls, cell_pb): :rtype: :class:`Cell` :returns: The cell corresponding to the protobuf. """ - timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros) if cell_pb.labels: - return cls(cell_pb.value, timestamp, labels=cell_pb.labels) + return cls(cell_pb.value, cell_pb.timestamp_micros, + labels=cell_pb.labels) else: - return cls(cell_pb.value, timestamp) + return cls(cell_pb.value, cell_pb.timestamp_micros) + + @property + def timestamp(self): + return _datetime_from_microseconds(self.timestamp_micros) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return (other.value == self.value and - other.timestamp == self.timestamp and + other.timestamp_micros == self.timestamp_micros and other.labels == self.labels) def __ne__(self, other): diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 7cfb1dc45d4e..375097c54e79 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -19,6 +19,7 @@ class TestCell(unittest.TestCase): + timestamp_micros = 18738724000 # Make sure millis granularity @staticmethod def _get_target_class(): @@ -35,22 +36,23 @@ def _from_pb_test_helper(self, labels=None): from google.cloud.bigtable._generated import ( data_pb2 as data_v2_pb2) - timestamp_micros = 18738724000 # Make sure millis granularity + timestamp_micros = TestCell.timestamp_micros timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) value = b'value-bytes' if labels is None: cell_pb = data_v2_pb2.Cell( value=value, timestamp_micros=timestamp_micros) - cell_expected = self._make_one(value, timestamp) + cell_expected = self._make_one(value, timestamp_micros) else: cell_pb = data_v2_pb2.Cell( value=value, timestamp_micros=timestamp_micros, labels=labels) - cell_expected = self._make_one(value, timestamp, labels=labels) + cell_expected = self._make_one(value, timestamp_micros, labels=labels) klass = self._get_target_class() result = klass.from_pb(cell_pb) self.assertEqual(result, cell_expected) + self.assertEqual(result.timestamp, timestamp) def test_from_pb(self): self._from_pb_test_helper() @@ -61,16 +63,13 @@ def test_from_pb_with_labels(self): def test_constructor(self): value = object() - timestamp = object() - cell = self._make_one(value, timestamp) + cell = self._make_one(value, TestCell.timestamp_micros) self.assertEqual(cell.value, value) - self.assertEqual(cell.timestamp, timestamp) def test___eq__(self): value = object() - timestamp = object() - cell1 = self._make_one(value, timestamp) - cell2 = self._make_one(value, timestamp) + cell1 = self._make_one(value, TestCell.timestamp_micros) + cell2 = self._make_one(value, TestCell.timestamp_micros) self.assertEqual(cell1, cell2) def test___eq__type_differ(self): @@ -80,18 +79,16 @@ def test___eq__type_differ(self): def test___ne__same_value(self): value = object() - timestamp = object() - cell1 = self._make_one(value, timestamp) - cell2 = self._make_one(value, timestamp) + cell1 = self._make_one(value, TestCell.timestamp_micros) + cell2 = self._make_one(value, TestCell.timestamp_micros) comparison_val = (cell1 != cell2) self.assertFalse(comparison_val) def test___ne__(self): value1 = 'value1' value2 = 'value2' - timestamp = object() - cell1 = self._make_one(value1, timestamp) - cell2 = self._make_one(value2, timestamp) + cell1 = self._make_one(value1, TestCell.timestamp_micros) + cell2 = self._make_one(value2, TestCell.timestamp_micros) self.assertNotEqual(cell1, cell2) From 184687e3fcef9f2b79185bec84ddec591a2a881e Mon Sep 17 00:00:00 2001 From: chemelnucfin Date: Fri, 19 Jan 2018 13:53:56 -0800 Subject: [PATCH 110/892] BigTable: Timestamp system test fix (#4765) --- packages/google-cloud-bigtable/tests/system.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index c889b181673e..011bf3377343 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -311,8 +311,12 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): timestamp1 = _datetime_from_microseconds(timestamp1_micros) # 1000 microseconds is a millisecond timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) + timestamp2_micros = _microseconds_from_datetime(timestamp2) timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) + timestamp3_micros = _microseconds_from_datetime(timestamp3) timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) + timestamp4_micros = _microseconds_from_datetime(timestamp4) + if row1 is not None: row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) @@ -327,10 +331,10 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): timestamp=timestamp4) # Create the cells we will check. - cell1 = Cell(CELL_VAL1, timestamp1) - cell2 = Cell(CELL_VAL2, timestamp2) - cell3 = Cell(CELL_VAL3, timestamp3) - cell4 = Cell(CELL_VAL4, timestamp4) + cell1 = Cell(CELL_VAL1, timestamp1_micros) + cell2 = Cell(CELL_VAL2, timestamp2_micros) + cell3 = Cell(CELL_VAL3, timestamp3_micros) + cell4 = Cell(CELL_VAL4, timestamp4_micros) return cell1, cell2, cell3, cell4 def test_mutate_rows(self): From 8c1576edc4d554c281a7e772c4c34f061e0e2a15 Mon Sep 17 00:00:00 2001 From: aneepct Date: Fri, 26 Jan 2018 18:41:10 +0530 Subject: [PATCH 111/892] BigTable: Adding a row generator on a table. (#4679) Allows iteration over the rows in a table instead of reading the rows into an internal dictionary first. As soon as a row has been validated, it is available in the iterator. --- .../google/cloud/bigtable/row_data.py | 190 ++++++++++-------- .../google/cloud/bigtable/table.py | 38 ++++ .../tests/unit/test_row_data.py | 186 +++++++++-------- .../tests/unit/test_table.py | 53 +++++ 4 files changed, 289 insertions(+), 178 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 0c4ecdacced9..7ae511c621f6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -191,15 +191,64 @@ class PartialRowsData(object): :param response_iterator: A streaming iterator returned from a ``ReadRows`` request. """ - START = "Start" # No responses yet processed. - NEW_ROW = "New row" # No cells yet complete for row - ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row - CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row + + START = 'Start' # No responses yet processed. + NEW_ROW = 'New row' # No cells yet complete for row + ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row + CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row def __init__(self, response_iterator): self._response_iterator = response_iterator + self._generator = YieldRowsData(response_iterator) + # Fully-processed rows, keyed by `row_key` - self._rows = {} + self.rows = {} + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other._response_iterator == self._response_iterator + + def __ne__(self, other): + return not self == other + + @property + def state(self): + """State machine state. + + :rtype: str + :returns: name of state corresponding to currrent row / chunk + processing. + """ + return self._generator.state + + def consume_all(self, max_loops=None): + """Consume the streamed responses until there are no more. + + :type max_loops: int + :param max_loops: (Optional) Maximum number of times to try to consume + an additional ``ReadRowsResponse``. You can use this + to avoid long wait times. + """ + for row in self._generator.read_rows(): + self.rows[row.row_key] = row + + +class YieldRowsData(object): + """Convenience wrapper for consuming a ``ReadRows`` streaming response. + + :type response_iterator: :class:`~google.cloud.exceptions.GrpcRendezvous` + :param response_iterator: A streaming iterator returned from a + ``ReadRows`` request. + """ + + START = 'Start' # No responses yet processed. + NEW_ROW = 'New row' # No cells yet complete for row + ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row + CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row + + def __init__(self, response_iterator): + self._response_iterator = response_iterator # Counter for responses pulled from iterator self._counter = 0 # Maybe cached from previous response @@ -213,14 +262,6 @@ def __init__(self, response_iterator): # Last complete cell, unset until first completion, after new row self._previous_cell = None - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other._response_iterator == self._response_iterator - - def __ne__(self, other): - return not self == other - @property def state(self): """State machine state. @@ -241,96 +282,75 @@ def state(self): return self.ROW_IN_PROGRESS return self.NEW_ROW # row added, no chunk yet processed - @property - def rows(self): - """Property returning all rows accumulated from the stream. - - :rtype: dict - :returns: row_key -> :class:`PartialRowData`. - """ - # NOTE: To avoid duplicating large objects, this is just the - # mutable private data. - return self._rows - def cancel(self): """Cancels the iterator, closing the stream.""" self._response_iterator.cancel() - def consume_next(self): - """Consume the next ``ReadRowsResponse`` from the stream. + def read_rows(self): + """Consume the ``ReadRowsResponse's`` from the stream. + Read the rows and yield each to the reader Parse the response and its chunks into a new/existing row in :attr:`_rows`. Rows are returned in order by row key. """ - response = six.next(self._response_iterator) - self._counter += 1 + while True: + try: + response = six.next(self._response_iterator) + except StopIteration: + break - if self._last_scanned_row_key is None: # first response - if response.last_scanned_row_key: - raise InvalidReadRowsResponse() + self._counter += 1 - self._last_scanned_row_key = response.last_scanned_row_key + if self._last_scanned_row_key is None: # first response + if response.last_scanned_row_key: + raise InvalidReadRowsResponse() - row = self._row - cell = self._cell + self._last_scanned_row_key = response.last_scanned_row_key - for chunk in response.chunks: + row = self._row + cell = self._cell - self._validate_chunk(chunk) + for chunk in response.chunks: - if chunk.reset_row: - row = self._row = None - cell = self._cell = self._previous_cell = None - continue + self._validate_chunk(chunk) - if row is None: - row = self._row = PartialRowData(chunk.row_key) + if chunk.reset_row: + row = self._row = None + cell = self._cell = self._previous_cell = None + continue - if cell is None: - qualifier = None - if chunk.HasField('qualifier'): - qualifier = chunk.qualifier.value + if row is None: + row = self._row = PartialRowData(chunk.row_key) - cell = self._cell = PartialCellData( - chunk.row_key, - chunk.family_name.value, - qualifier, - chunk.timestamp_micros, - chunk.labels, - chunk.value) - self._copy_from_previous(cell) - else: - cell.append_value(chunk.value) + if cell is None: + qualifier = None + if chunk.HasField('qualifier'): + qualifier = chunk.qualifier.value - if chunk.commit_row: - self._save_current_row() - row = cell = None - continue + cell = self._cell = PartialCellData( + chunk.row_key, + chunk.family_name.value, + qualifier, + chunk.timestamp_micros, + chunk.labels, + chunk.value) + self._copy_from_previous(cell) + else: + cell.append_value(chunk.value) - if chunk.value_size == 0: - self._save_current_cell() - cell = None + if chunk.commit_row: + self._save_current_cell() - def consume_all(self, max_loops=None): - """Consume the streamed responses until there are no more. + yield self._row - This simply calls :meth:`consume_next` until there are no - more to consume. + self._row, self._previous_row = None, self._row + self._previous_cell = None + row = cell = None + continue - :type max_loops: int - :param max_loops: (Optional) Maximum number of times to try to consume - an additional ``ReadRowsResponse``. You can use this - to avoid long wait times. - """ - curr_loop = 0 - if max_loops is None: - max_loops = float('inf') - while curr_loop < max_loops: - curr_loop += 1 - try: - self.consume_next() - except StopIteration: - break + if chunk.value_size == 0: + self._save_current_cell() + cell = None @staticmethod def _validate_chunk_status(chunk): @@ -433,14 +453,6 @@ def _copy_from_previous(self, cell): if cell.qualifier is None: cell.qualifier = previous.qualifier - def _save_current_row(self): - """Helper for :meth:`consume_next`.""" - if self._cell: - self._save_current_cell() - self._rows[self._row.row_key] = self._row - self._row, self._previous_row = None, self._row - self._previous_cell = None - def _raise_if(predicate, *args): """Helper for validation methods.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 070d3179a49f..305cf1de5508 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -31,6 +31,7 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData +from google.cloud.bigtable.row_data import YieldRowsData from grpc import StatusCode @@ -315,6 +316,43 @@ def read_rows(self, start_key=None, end_key=None, limit=None, # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` return PartialRowsData(response_iterator) + def yield_rows(self, start_key=None, end_key=None, limit=None, + filter_=None): + """Read rows from this table. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads every column in + each row. + + :rtype: :class:`.PartialRowData` + :returns: A :class:`.PartialRowData` for each row returned + """ + request_pb = _create_row_request( + self.name, start_key=start_key, end_key=end_key, filter_=filter_, + limit=limit) + client = self._instance._client + response_iterator = client._data_stub.ReadRows(request_pb) + # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` + generator = YieldRowsData(response_iterator) + for row in generator.read_rows(): + yield row + def mutate_rows(self, rows, retry=DEFAULT_RETRY): """Mutates multiple rows in bulk. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 375097c54e79..c58ea351633b 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -14,8 +14,8 @@ import unittest - -import mock +import timeit +import csv class TestCell(unittest.TestCase): @@ -193,22 +193,6 @@ def _get_target_class(): return PartialRowsData - def _getDoNothingClass(self): - klass = self._get_target_class() - - class FakePartialRowsData(klass): - - def __init__(self, *args, **kwargs): - super(FakePartialRowsData, self).__init__(*args, **kwargs) - self._consumed = [] - - def consume_next(self): - value = self._response_iterator.next() - self._consumed.append(value) - return value - - return FakePartialRowsData - def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) @@ -217,7 +201,7 @@ def test_constructor(self): partial_rows_data = self._make_one(response_iterator) self.assertIs(partial_rows_data._response_iterator, response_iterator) - self.assertEqual(partial_rows_data._rows, {}) + self.assertEqual(partial_rows_data.rows, {}) def test___eq__(self): response_iterator = object() @@ -244,59 +228,52 @@ def test___ne__(self): partial_rows_data2 = self._make_one(response_iterator2) self.assertNotEqual(partial_rows_data1, partial_rows_data2) - def test_state_start(self): - prd = self._make_one([]) - self.assertEqual(prd.state, prd.START) - - def test_state_new_row_w_row(self): - prd = self._make_one([]) - prd._last_scanned_row_key = '' - prd._row = object() - self.assertEqual(prd.state, prd.NEW_ROW) - def test_rows_getter(self): partial_rows_data = self._make_one(None) - partial_rows_data._rows = value = object() + partial_rows_data.rows = value = object() self.assertIs(partial_rows_data.rows, value) - def test_cancel(self): - response_iterator = _MockCancellableIterator() - partial_rows_data = self._make_one(response_iterator) - self.assertEqual(response_iterator.cancel_calls, 0) - partial_rows_data.cancel() - self.assertEqual(response_iterator.cancel_calls, 1) - # 'consume_nest' tested via 'TestPartialRowsData_JSON_acceptance_tests' +class TestYieldRowsData(unittest.TestCase): + ROW_KEY = b'row-key' + FAMILY_NAME = u'family' + QUALIFIER = b'qualifier' + TIMESTAMP_MICROS = 100 + VALUE = b'value' + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.row_data import YieldRowsData + + return YieldRowsData + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) - def test_consume_all(self): - klass = self._getDoNothingClass() + def test_state_start(self): + yrd = self._make_one([]) + self.assertEqual(yrd.state, yrd.START) - value1, value2, value3 = object(), object(), object() - response_iterator = _MockCancellableIterator(value1, value2, value3) - partial_rows_data = klass(response_iterator) - self.assertEqual(partial_rows_data._consumed, []) - partial_rows_data.consume_all() - self.assertEqual( - partial_rows_data._consumed, [value1, value2, value3]) + def test_state_new_row_w_row(self): + yrd = self._make_one([]) + yrd._last_scanned_row_key = '' + yrd._row = object() + self.assertEqual(yrd.state, yrd.NEW_ROW) - def test_consume_all_with_max_loops(self): - klass = self._getDoNothingClass() + def test_cancel(self): + response_iterator = _MockCancellableIterator() + yield_rows_data = self._make_one(response_iterator) + self.assertEqual(response_iterator.cancel_calls, 0) + yield_rows_data.cancel() + self.assertEqual(response_iterator.cancel_calls, 1) - value1, value2, value3 = object(), object(), object() - response_iterator = _MockCancellableIterator(value1, value2, value3) - partial_rows_data = klass(response_iterator) - self.assertEqual(partial_rows_data._consumed, []) - partial_rows_data.consume_all(max_loops=1) - self.assertEqual(partial_rows_data._consumed, [value1]) - # Make sure the iterator still has the remaining values. - self.assertEqual( - list(response_iterator.iter_values), [value2, value3]) + # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' def test__copy_from_current_unset(self): - prd = self._make_one([]) + yrd = self._make_one([]) chunks = _generate_cell_chunks(['']) chunk = chunks[0] - prd._copy_from_current(chunk) + yrd._copy_from_current(chunk) self.assertEqual(chunk.row_key, b'') self.assertEqual(chunk.family_name.value, u'') self.assertEqual(chunk.qualifier.value, b'') @@ -309,8 +286,8 @@ def test__copy_from_current_blank(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - prd = self._make_one([]) - prd._cell = _PartialCellData() + yrd = self._make_one([]) + yrd._cell = _PartialCellData() chunks = _generate_cell_chunks(['']) chunk = chunks[0] chunk.row_key = ROW_KEY @@ -318,7 +295,7 @@ def test__copy_from_current_blank(self): chunk.qualifier.value = QUALIFIER chunk.timestamp_micros = TIMESTAMP_MICROS chunk.labels.extend(LABELS) - prd._copy_from_current(chunk) + yrd._copy_from_current(chunk) self.assertEqual(chunk.row_key, ROW_KEY) self.assertEqual(chunk.family_name.value, FAMILY_NAME) self.assertEqual(chunk.qualifier.value, QUALIFIER) @@ -326,9 +303,9 @@ def test__copy_from_current_blank(self): self.assertEqual(chunk.labels, LABELS) def test__copy_from_previous_unset(self): - prd = self._make_one([]) + yrd = self._make_one([]) cell = _PartialCellData() - prd._copy_from_previous(cell) + yrd._copy_from_previous(cell) self.assertEqual(cell.row_key, '') self.assertEqual(cell.family_name, u'') self.assertIsNone(cell.qualifier) @@ -341,7 +318,7 @@ def test__copy_from_previous_blank(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - prd = self._make_one([]) + yrd = self._make_one([]) cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, @@ -349,8 +326,8 @@ def test__copy_from_previous_blank(self): timestamp_micros=TIMESTAMP_MICROS, labels=LABELS, ) - prd._previous_cell = _PartialCellData() - prd._copy_from_previous(cell) + yrd._previous_cell = _PartialCellData() + yrd._copy_from_previous(cell) self.assertEqual(cell.row_key, ROW_KEY) self.assertEqual(cell.family_name, FAMILY_NAME) self.assertEqual(cell.qualifier, QUALIFIER) @@ -363,8 +340,8 @@ def test__copy_from_previous_filled(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - prd = self._make_one([]) - prd._previous_cell = _PartialCellData( + yrd = self._make_one([]) + yrd._previous_cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, qualifier=QUALIFIER, @@ -372,38 +349,30 @@ def test__copy_from_previous_filled(self): labels=LABELS, ) cell = _PartialCellData() - prd._copy_from_previous(cell) + yrd._copy_from_previous(cell) self.assertEqual(cell.row_key, ROW_KEY) self.assertEqual(cell.family_name, FAMILY_NAME) self.assertEqual(cell.qualifier, QUALIFIER) self.assertEqual(cell.timestamp_micros, 0) self.assertEqual(cell.labels, []) - def test__save_row_no_cell(self): - ROW_KEY = 'RK' - prd = self._make_one([]) - row = prd._row = mock.Mock(row_key=ROW_KEY, spec=['row_key']) - prd._cell = None - prd._save_current_row() - self.assertIs(prd._rows[ROW_KEY], row) - def test_invalid_last_scanned_row_key_on_start(self): from google.cloud.bigtable.row_data import InvalidReadRowsResponse response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') iterator = _MockCancellableIterator(response) - prd = self._make_one(iterator) + yrd = self._make_one(iterator) with self.assertRaises(InvalidReadRowsResponse): - prd.consume_next() + self._consume_all(yrd) def test_valid_last_scanned_row_key_on_start(self): response = _ReadRowsResponseV2( chunks=(), last_scanned_row_key='AFTER') iterator = _MockCancellableIterator(response) - prd = self._make_one(iterator) - prd._last_scanned_row_key = 'BEFORE' - prd.consume_next() - self.assertEqual(prd._last_scanned_row_key, 'AFTER') + yrd = self._make_one(iterator) + yrd._last_scanned_row_key = 'BEFORE' + self._consume_all(yrd) + self.assertEqual(yrd._last_scanned_row_key, 'AFTER') def test_invalid_empty_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk @@ -411,9 +380,36 @@ def test_invalid_empty_chunk(self): chunks = _generate_cell_chunks(['']) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._make_one(iterator) + yrd = self._make_one(iterator) with self.assertRaises(InvalidChunk): - prd.consume_next() + self._consume_all(yrd) + + def test_yield_rows_data(self): + from google.cloud.bigtable.row_data import YieldRowsData + + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk] + + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + yrd = YieldRowsData(iterator) + + rows = [] + for row in yrd.read_rows(): + rows.append(row) + result = rows[0] + + self.assertEqual(result.row_key, self.ROW_KEY) + + def _consume_all(self, yrd): + return [row.row_key for row in yrd.read_rows()] class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): @@ -451,7 +447,7 @@ def _fail_during_consume(self, testcase_name): iterator = _MockCancellableIterator(response) prd = self._make_one(iterator) with self.assertRaises(InvalidChunk): - prd.consume_next() + prd.consume_all() expected_result = self._sort_flattend_cells( [result for result in results if not result['error']]) flattened = self._sort_flattend_cells(_flatten_cells(prd)) @@ -507,7 +503,7 @@ def _incomplete_final_row(self, testcase_name): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) prd = self._make_one(iterator) - prd.consume_next() + prd.consume_all() self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) expected_result = self._sort_flattend_cells( [result for result in results if not result['error']]) @@ -529,7 +525,7 @@ def _match_results(self, testcase_name, expected_result=_marker): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) prd = self._make_one(iterator) - prd.consume_next() + prd.consume_all() flattened = self._sort_flattend_cells(_flatten_cells(prd)) if expected_result is self._marker: expected_result = self._sort_flattend_cells(results) @@ -724,3 +720,15 @@ def _parse_readrows_acceptance_tests(filename): chunks = _generate_cell_chunks(test['chunks']) results = test['results'] yield name, chunks, results + + +def _ReadRowsResponseCellChunkPB(*args, **kw): + from google.cloud.bigtable._generated import ( + bigtable_pb2 as messages_v2_pb2) + + family_name = kw.pop('family_name') + qualifier = kw.pop('qualifier') + message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) + message.family_name.value = family_name + message.qualifier.value = qualifier + return message diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 5b904f091c15..5ed8221f4884 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -131,6 +131,7 @@ class TestTable(unittest.TestCase): QUALIFIER = b'qualifier' TIMESTAMP_MICROS = 100 VALUE = b'value' + _json_tests = None @staticmethod def _get_target_class(): @@ -521,6 +522,37 @@ def mock_create_row_request(table_name, **kwargs): } self.assertEqual(mock_created, [(table.name, created_kwargs)]) + def test_yield_rows(self): + from tests.unit._testing import _FakeStub + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + table = self._make_one(self.TABLE_ID, instance) + + # Create response_iterator + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk] + + response = _ReadRowsResponseV2(chunks) + response_iterator = _MockCancellableIterator(response) + + # Patch the stub used by the API method. + client._data_stub = _FakeStub(response_iterator) + + rows = [] + for row in table.yield_rows(): + rows.append(row) + result = rows[0] + + self.assertEqual(result.row_key, self.ROW_KEY) + def test_sample_row_keys(self): from tests.unit._testing import _FakeStub @@ -1184,3 +1216,24 @@ class _Instance(object): def __init__(self, name, client=None): self.name = name self._client = client + + +class _MockCancellableIterator(object): + + cancel_calls = 0 + + def __init__(self, *values): + self.iter_values = iter(values) + + def next(self): + return next(self.iter_values) + + def __next__(self): # pragma: NO COVER Py3k + return self.next() + + +class _ReadRowsResponseV2(object): + + def __init__(self, chunks, last_scanned_row_key=''): + self.chunks = chunks + self.last_scanned_row_key = last_scanned_row_key From 48a994bf8da10b5da5bb49b4ff8ababe56d0667e Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Tue, 6 Feb 2018 03:04:34 +0530 Subject: [PATCH 112/892] Use `api_core.retry` for `mutate_row` (#4665) --- .../google/cloud/bigtable/row.py | 22 ++++++++++++++++--- .../tests/unit/test_row.py | 20 +++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index da9678cdf892..9b6de8a5a728 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -15,10 +15,14 @@ """User-friendly container for Google Cloud Bigtable Row.""" +import functools import struct +import grpc import six +from google.api_core import exceptions +from google.api_core import retry from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes @@ -236,6 +240,12 @@ def _delete_cells(self, column_family_id, columns, time_range=None, mutations_list.extend(to_append) +def _retry_commit_exception(exc): + if isinstance(exc, grpc.RpcError): + exc = exceptions.from_grpc_error(exc) + return isinstance(exc, exceptions.ServiceUnavailable) + + class DirectRow(_SetDeleteRow): """Google Cloud Bigtable Row for sending "direct" mutations. @@ -412,9 +422,15 @@ def commit(self): row_key=self._row_key, mutations=mutations_list, ) - # We expect a `google.protobuf.empty_pb2.Empty` - client = self._table._instance._client - client._data_stub.MutateRow(request_pb) + + commit = functools.partial( + self._table._instance._client._data_stub.MutateRow, + request_pb) + retry_ = retry.Retry( + predicate=_retry_commit_exception, + deadline=30) + retry_(commit)() + self.clear() def clear(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 5813e070152a..d9682c29c5ca 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -367,6 +367,26 @@ def test_commit(self): )]) self.assertEqual(row._pb_mutations, []) + def test_retry_commit_exception(self): + import grpc + import mock + + from google.cloud.bigtable.row import _retry_commit_exception + + class ErrorUnavailable(grpc.RpcError, grpc.Call): + """ErrorUnavailable exception""" + + message = 'Endpoint read failed' + error = mock.create_autospec(ErrorUnavailable, instance=True) + error.code.return_value = grpc.StatusCode.UNAVAILABLE + error.details.return_value = message + + result = _retry_commit_exception(error) + self.assertEqual(result, True) + + result = _retry_commit_exception(ValueError) + self.assertNotEqual(result, True) + def test_commit_too_many_mutations(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import row as MUT From d9e7fe60f2fbc52eb592adab78bbd29b3895b97a Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 12 Feb 2018 15:54:10 -0800 Subject: [PATCH 113/892] Remove gax usage from BigTable (#4873) --- .../google/cloud/bigtable/client.py | 12 ++++-------- packages/google-cloud-bigtable/setup.py | 3 +-- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index b30ed1aefb36..454cb1816e55 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -31,7 +31,7 @@ import os -from google.gax.utils import metrics +from google.api_core import gapic_v1 from google.longrunning import operations_grpc from google.cloud._helpers import make_insecure_stub @@ -67,13 +67,9 @@ READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly' """Scope for reading table data.""" -_METRICS_HEADERS = ( - ('gccl', __version__), -) -_HEADER_STR = metrics.stringify(metrics.fill(_METRICS_HEADERS)) -_GRPC_EXTRA_OPTIONS = ( - ('x-goog-api-client', _HEADER_STR), -) +_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + client_library_version=__version__,) +_GRPC_EXTRA_OPTIONS = (_CLIENT_INFO.to_grpc_metadata(),) # NOTE: 'grpc.max_message_length' will no longer be recognized in # grpcio 1.1 and later. _MAX_MSG_LENGTH_100MB = 100 * 1024 * 1024 diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index fdd2cc1e16c6..a4dadf0e389c 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -52,8 +52,7 @@ REQUIREMENTS = [ 'google-cloud-core >= 0.28.0, < 0.29dev', - 'google-api-core >= 0.1.1, < 0.2.0dev', - 'google-gax >= 0.15.7, < 0.16dev', + 'google-api-core[grpc] >= 0.1.1, < 0.2.0dev', ] setup( From f89c0f10b3250e0202b92d5b2e2225e363805085 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 22 Feb 2018 10:28:50 -0800 Subject: [PATCH 114/892] Normalize all setup.py files (#4909) --- packages/google-cloud-bigtable/setup.py | 99 +++++++++++++++---------- 1 file changed, 59 insertions(+), 40 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index a4dadf0e389c..175e743dd7b4 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google LLC +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,59 +12,78 @@ # See the License for the specific language governing permissions and # limitations under the License. +import io import os -from setuptools import find_packages -from setuptools import setup +import setuptools -PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) +# Package metadata. + +name = 'google-cloud-bigtable' +description = 'Google Cloud Bigtable API client library' +version = '0.28.2.dev1' +# Should be one of: +# 'Development Status :: 3 - Alpha' +# 'Development Status :: 4 - Beta' +# 'Development Status :: 5 - Stable' +release_status = 'Development Status :: 3 - Alpha' +dependencies = [ + 'google-cloud-core<0.29dev,>=0.28.0', + 'google-api-core[grpc]<0.2.0dev,>=0.1.1', +] +extras = { +} + + +# Setup boilerplate below this line. + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +# Only include packages under the 'google' namespace. Do not include tests, +# benchmarks, etc. +packages = [ + package for package in setuptools.find_packages() + if package.startswith('google')] + +# Determine which namespaces are needed. +namespaces = ['google'] +if 'google.cloud' in packages: + namespaces.append('google.cloud') -with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj: - README = file_obj.read() -# NOTE: This is duplicated throughout and we should try to -# consolidate. -SETUP_BASE = { - 'author': 'Google Cloud Platform', - 'author_email': 'googleapis-publisher@google.com', - 'scripts': [], - 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python', - 'license': 'Apache 2.0', - 'platforms': 'Posix; MacOS X; Windows', - 'include_package_data': True, - 'zip_safe': False, - 'classifiers': [ - 'Development Status :: 3 - Alpha', +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author='Google LLC', + author_email='googleapis-packages@google.com', + license='Apache 2.0', + url='https://github.com/GoogleCloudPlatform/google-cloud-python', + classifiers=[ + release_status, 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', + 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Operating System :: OS Independent', 'Topic :: Internet', ], -} - - -REQUIREMENTS = [ - 'google-cloud-core >= 0.28.0, < 0.29dev', - 'google-api-core[grpc] >= 0.1.1, < 0.2.0dev', -] - -setup( - name='google-cloud-bigtable', - version='0.28.2.dev1', - description='Python Client for Google Cloud Bigtable', - long_description=README, - namespace_packages=[ - 'google', - 'google.cloud', - ], - packages=find_packages(exclude=('tests*',)), - install_requires=REQUIREMENTS, - **SETUP_BASE + platforms='Posix; MacOS X; Windows', + packages=packages, + namespace_packages=namespaces, + install_requires=dependencies, + extras_require=extras, + include_package_data=True, + zip_safe=False, ) From af1445608a4f885fc709eebb1695f952927673e5 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Fri, 23 Feb 2018 16:34:37 -0800 Subject: [PATCH 115/892] Re-enable lint for tests, remove usage of pylint (#4921) --- packages/google-cloud-bigtable/.flake8 | 7 ++-- packages/google-cloud-bigtable/nox.py | 11 ++----- .../tests/unit/test_row_data.py | 6 ++-- .../tests/unit/test_table.py | 33 +++++++++---------- 4 files changed, 25 insertions(+), 32 deletions(-) diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index 5a380e53da40..1f44a90f8195 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -1,8 +1,9 @@ [flake8] exclude = - # BigTable includes generated code in the manual layer; - # do not lint this. - google/cloud/bigtable/_generated/*.py, + # Exclude generated code. + **/proto/** + **/gapic/** + *_pb2.py # Standard linting exemptions. __pycache__, diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 94893f2472c5..7dbfb79dac1d 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -99,16 +99,9 @@ def lint(session): serious code quality issues. """ session.interpreter = 'python3.6' - session.install('flake8', 'pylint', 'gcp-devrel-py-tools', *LOCAL_DEPS) + session.install('flake8') session.install('.') - session.run('flake8', 'google/cloud/bigtable') - session.run( - 'gcp-devrel-py-tools', 'run-pylint', - '--config', 'pylint.config.py', - '--library-filesets', 'google', - '--test-filesets', 'tests', - # Temporarily allow this to fail. - success_codes=range(0, 100)) + session.run('flake8', 'google', 'tests') @nox.session diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index c58ea351633b..9dd65750967f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -14,8 +14,6 @@ import unittest -import timeit -import csv class TestCell(unittest.TestCase): @@ -47,7 +45,8 @@ def _from_pb_test_helper(self, labels=None): else: cell_pb = data_v2_pb2.Cell( value=value, timestamp_micros=timestamp_micros, labels=labels) - cell_expected = self._make_one(value, timestamp_micros, labels=labels) + cell_expected = self._make_one( + value, timestamp_micros, labels=labels) klass = self._get_target_class() result = klass.from_pb(cell_pb) @@ -631,6 +630,7 @@ def test_empty_cell_chunk(self): def test_empty_second_qualifier(self): self._match_results('empty second qualifier') + def _flatten_cells(prd): # Match results format from JSON testcases. # Doesn't handle error cases. diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 5ed8221f4884..b5deca9e4059 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -642,7 +642,6 @@ def test_callable_empty_rows(self): self.assertEqual(len(statuses), 0) def test_callable_no_retry_strategy(self): - from google.api_core.retry import Retry from google.cloud.bigtable.row import DirectRow # Setup: @@ -685,7 +684,6 @@ def test_callable_no_retry_strategy(self): self.assertEqual(result, expected_result) def test_callable_retry(self): - from google.api_core.retry import Retry from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY @@ -735,7 +733,6 @@ def test_callable_retry(self): self.assertEqual(result, expected_result) def test_callable_retry_timeout(self): - from google.api_core.retry import Retry from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY @@ -811,8 +808,8 @@ def test_do_mutate_retryable_rows(self): # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) - worker = self._make_worker(table._instance._client, - table.name, [row_1, row_2]) + worker = self._make_worker( + table._instance._client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] @@ -853,8 +850,8 @@ def test_do_mutate_retryable_rows_retry(self): # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) - worker = self._make_worker(table._instance._client, - table.name, [row_1, row_2, row_3]) + worker = self._make_worker( + table._instance._client, table.name, [row_1, row_2, row_3]) with self.assertRaises(_BigtableRetryableError): worker._do_mutate_retryable_rows() @@ -902,8 +899,9 @@ def test_do_mutate_retryable_rows_second_retry(self): # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) - worker = self._make_worker(table._instance._client, - table.name, [row_1, row_2, row_3, row_4]) + worker = self._make_worker( + table._instance._client, + table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses([ self.SUCCESS, self.RETRYABLE_1, @@ -954,8 +952,9 @@ def test_do_mutate_retryable_rows_second_try(self): # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) - worker = self._make_worker(table._instance._client, - table.name, [row_1, row_2, row_3, row_4]) + worker = self._make_worker( + table._instance._client, + table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses([ self.SUCCESS, self.RETRYABLE_1, @@ -974,7 +973,6 @@ def test_do_mutate_retryable_rows_second_try(self): def test_do_mutate_retryable_rows_second_try_no_retryable(self): from google.cloud.bigtable.row import DirectRow - from tests.unit._testing import _FakeStub # Setup: # - Mutate 2 rows. @@ -993,10 +991,10 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') - worker = self._make_worker(table._instance._client, - table.name, [row_1, row_2]) + worker = self._make_worker( + table._instance._client, table.name, [row_1, row_2]) worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.NON_RETRYABLE]) + [self.SUCCESS, self.NON_RETRYABLE]) statuses = worker._do_mutate_retryable_rows() @@ -1023,8 +1021,9 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): # Patch the stub used by the API method. client._data_stub = _FakeStub([response]) - worker = self._make_worker(table._instance._client, - table.name, [row_1, row_2]) + worker = self._make_worker( + table._instance._client, + table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): worker._do_mutate_retryable_rows() From 5da2c4ac695a478b9aa2d315ac08ced77f3d8717 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Mon, 26 Feb 2018 14:24:04 -0800 Subject: [PATCH 116/892] Install local dependencies when running lint (#4936) --- packages/google-cloud-bigtable/nox.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 7dbfb79dac1d..f8bfb4b33c8a 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -99,7 +99,7 @@ def lint(session): serious code quality issues. """ session.interpreter = 'python3.6' - session.install('flake8') + session.install('flake8', *LOCAL_DEPS) session.install('.') session.run('flake8', 'google', 'tests') From 67ca8f2b527c83391e57385a4a4ede36c1a1a820 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Tue, 27 Feb 2018 11:17:14 -0800 Subject: [PATCH 117/892] Update dependency range for api-core to include v1.0.0 releases (#4944) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 175e743dd7b4..8ef8c18512ab 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -30,7 +30,7 @@ release_status = 'Development Status :: 3 - Alpha' dependencies = [ 'google-cloud-core<0.29dev,>=0.28.0', - 'google-api-core[grpc]<0.2.0dev,>=0.1.1', + 'google-api-core[grpc]<2.0.0dev,>=0.1.1', ] extras = { } From 316c11c687099ca3e7898daa494fce54dc2dfd69 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Wed, 28 Feb 2018 09:04:11 -0800 Subject: [PATCH 118/892] Release bigtable 0.29.0 (#4951) --- packages/google-cloud-bigtable/CHANGELOG.md | 30 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index b70baa849230..9d742c2e55d0 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,36 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.29.0 + +### New features + +- Use `api_core.retry` for `mutate_row` (#4665, #4341) +- Added a row generator on a table. (#4679) + +### Implementation changes + +- Remove gax usage from BigTable (#4873) +- BigTable: Cell.from_pb() performance improvement (#4745) + +### Dependencies + +- Update dependency range for api-core to include v1.0.0 releases (#4944) + +### Documentation + +- Minor typo (#4758) +- Row filter end points documentation error (#4667) +- Removing "rename" from bigtable table.py comments (#4526) +- Small docs/hygiene tweaks after #4256. (#4333) + +### Testing and internal changes + +- Install local dependencies when running lint (#4936) +- Re-enable lint for tests, remove usage of pylint (#4921) +- Normalize all setup.py files (#4909) +- Timestamp system test fix (#4765) + ## 0.28.1 ### Implementation Changes diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 8ef8c18512ab..b23b6b16f857 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.28.2.dev1' +version = '0.29.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 544bb9c8cac90667486025964ac395c847523ed9 Mon Sep 17 00:00:00 2001 From: zakons Date: Wed, 14 Mar 2018 14:44:17 -0400 Subject: [PATCH 119/892] BigTable: provide better access to cell values (#4908) --- .../google/cloud/bigtable/row_data.py | 106 ++++++++++++++++++ .../tests/unit/test_row_data.py | 97 ++++++++++++++++ 2 files changed, 203 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 7ae511c621f6..b83898a9dd88 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -21,6 +21,15 @@ from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _to_bytes +_MISSING_COLUMN_FAMILY = ( + 'Column family {} is not among the cells stored in this row.') +_MISSING_COLUMN = ( + 'Column {} is not among the cells stored in this row in the ' + 'column family {}.') +_MISSING_INDEX = ( + 'Index {!r} is not valid for the cells stored in this row for column {} ' + 'in the column family {}. There are {} such cells.') + class Cell(object): """Representation of a Google Cloud Bigtable Cell. @@ -175,6 +184,103 @@ def row_key(self): """ return self._row_key + def find_cells(self, column_family_id, column): + """Get a time series of cells stored on this instance. + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cells + are located. + + Returns: + List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the + specified column. + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. + """ + try: + column_family = self._cells[column_family_id] + except KeyError: + raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id)) + + try: + cells = column_family[column] + except KeyError: + raise KeyError(_MISSING_COLUMN.format(column, column_family_id)) + + return cells + + def cell_value(self, column_family_id, column, index=0): + """Get a single cell value stored on this instance. + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cell + is located. + index (Optional[int]): The offset within the series of values. If + not specified, will return the first cell. + + Returns: + ~google.cloud.bigtable.row_data.Cell value: The cell value stored + in the specified column and specified index. + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. + IndexError: If ``index`` cannot be found within the cells stored + in this row for the given ``column_family_id``, ``column`` + pair. + """ + cells = self.find_cells(column_family_id, column) + + try: + cell = cells[index] + except (TypeError, IndexError): + num_cells = len(cells) + msg = _MISSING_INDEX.format( + index, column, column_family_id, num_cells) + raise IndexError(msg) + + return cell.value + + def cell_values(self, column_family_id, column, max_count=None): + """Get a time series of cells stored on this instance. + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cells + are located. + max_count (int): The maximum number of cells to use. + + Returns: + A generator which provides: cell.value, cell.timestamp_micros + for each cell in the list of cells + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. + """ + cells = self.find_cells(column_family_id, column) + if max_count is None: + max_count = len(cells) + + for index, cell in enumerate(cells): + if index == max_count: + break + + yield cell.value, cell.timestamp_micros + class InvalidReadRowsResponse(RuntimeError): """Exception raised to to invalid response data from back-end.""" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 9dd65750967f..2c9299d213ff 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -170,6 +170,97 @@ def test_to_dict(self): } self.assertEqual(result, expected_result) + def test_cell_value(self): + family_name = u'name1' + qualifier = b'col1' + cell = _make_cell(b'value-bytes') + + partial_row_data = self._make_one(None) + partial_row_data._cells = { + family_name: { + qualifier: [cell], + }, + } + + result = partial_row_data.cell_value(family_name, qualifier) + self.assertEqual(result, cell.value) + + def test_cell_value_invalid_index(self): + family_name = u'name1' + qualifier = b'col1' + cell = _make_cell(b'') + + partial_row_data = self._make_one(None) + partial_row_data._cells = { + family_name: { + qualifier: [cell], + }, + } + + with self.assertRaises(IndexError): + partial_row_data.cell_value(family_name, qualifier, index=None) + + def test_cell_value_invalid_column_family_key(self): + family_name = u'name1' + qualifier = b'col1' + + partial_row_data = self._make_one(None) + + with self.assertRaises(KeyError): + partial_row_data.cell_value(family_name, qualifier) + + def test_cell_value_invalid_column_key(self): + family_name = u'name1' + qualifier = b'col1' + + partial_row_data = self._make_one(None) + partial_row_data._cells = { + family_name: {}, + } + + with self.assertRaises(KeyError): + partial_row_data.cell_value(family_name, qualifier) + + def test_cell_values(self): + family_name = u'name1' + qualifier = b'col1' + cell = _make_cell(b'value-bytes') + + partial_row_data = self._make_one(None) + partial_row_data._cells = { + family_name: { + qualifier: [cell], + }, + } + + values = [] + for value, timestamp_micros in partial_row_data.cell_values( + family_name, qualifier): + values.append(value) + + self.assertEqual(values[0], cell.value) + + def test_cell_values_with_max_count(self): + family_name = u'name1' + qualifier = b'col1' + cell_1 = _make_cell(b'value-bytes-1') + cell_2 = _make_cell(b'value-bytes-2') + + partial_row_data = self._make_one(None) + partial_row_data._cells = { + family_name: { + qualifier: [cell_1, cell_2], + }, + } + + values = [] + for value, timestamp_micros in partial_row_data.cell_values( + family_name, qualifier, max_count=1): + values.append(value) + + self.assertEqual(1, len(values)) + self.assertEqual(values[0], cell_1.value) + def test_cells_property(self): partial_row_data = self._make_one(None) cells = {1: 2} @@ -732,3 +823,9 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): message.family_name.value = family_name message.qualifier.value = qualifier return message + + +def _make_cell(value): + from google.cloud.bigtable import row_data + + return row_data.Cell(value, TestCell.timestamp_micros) From ce8e3e00d7ca6bbfb882c9bbe9c5d1618c818fc8 Mon Sep 17 00:00:00 2001 From: Jon Wayne Parrott Date: Thu, 15 Mar 2018 08:52:22 -0700 Subject: [PATCH 120/892] Fix bad trove classifier --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b23b6b16f857..b7b9e6f7617d 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -26,7 +26,7 @@ # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' -# 'Development Status :: 5 - Stable' +# 'Development Status :: 5 - Production/Stable' release_status = 'Development Status :: 3 - Alpha' dependencies = [ 'google-cloud-core<0.29dev,>=0.28.0', From 3e2227fab4652100ddd50c13537ec99758299206 Mon Sep 17 00:00:00 2001 From: chemelnucfin Date: Thu, 15 Mar 2018 14:40:36 -0700 Subject: [PATCH 121/892] Bigtable: TimestampRanges must be milliseconds granularity (#5002) --- .../google/cloud/bigtable/row_filters.py | 8 +-- .../google-cloud-bigtable/tests/system.py | 10 ++++ .../tests/unit/test_row_filters.py | 54 +++++++++++-------- 3 files changed, 48 insertions(+), 24 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index 653966d3f838..3b41f9e59052 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -279,10 +279,12 @@ def to_pb(self): timestamp_range_kwargs = {} if self.start is not None: timestamp_range_kwargs['start_timestamp_micros'] = ( - _microseconds_from_datetime(self.start)) + _microseconds_from_datetime(self.start) // 1000 * 1000) if self.end is not None: - timestamp_range_kwargs['end_timestamp_micros'] = ( - _microseconds_from_datetime(self.end)) + end_time = _microseconds_from_datetime(self.end) + if end_time % 1000 != 0: + end_time = end_time // 1000 * 1000 + 1000 + timestamp_range_kwargs['end_timestamp_micros'] = end_time return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 011bf3377343..12bd102690c0 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -337,6 +337,16 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): cell4 = Cell(CELL_VAL4, timestamp4_micros) return cell1, cell2, cell3, cell4 + def test_timestamp_filter_millisecond_granularity(self): + from google.cloud.bigtable import row_filters + + end = datetime.datetime.now() + start = end - datetime.timedelta(minutes=60) + timestamp_range = row_filters.TimestampRange(start=start, end=end) + timefilter = row_filters.TimestampRangeFilter(timestamp_range) + row_data = self._table.read_rows(filter_=timefilter) + row_data.consume_all() + def test_mutate_rows(self): row1 = self._table.row(ROW_KEY) row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index e091f8cde542..9f485c2a5fb8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -285,41 +285,53 @@ def test___ne__same_value(self): comparison_val = (time_range1 != time_range2) self.assertFalse(comparison_val) - def _to_pb_helper(self, start_micros=None, end_micros=None): + def _to_pb_helper(self, pb_kwargs, start=None, end=None): import datetime from google.cloud._helpers import _EPOCH - - pb_kwargs = {} - - start = None - if start_micros is not None: - start = _EPOCH + datetime.timedelta(microseconds=start_micros) - pb_kwargs['start_timestamp_micros'] = start_micros - end = None - if end_micros is not None: - end = _EPOCH + datetime.timedelta(microseconds=end_micros) - pb_kwargs['end_timestamp_micros'] = end_micros + if start is not None: + start = _EPOCH + datetime.timedelta(microseconds=start) + if end is not None: + end = _EPOCH + datetime.timedelta(microseconds=end) time_range = self._make_one(start=start, end=end) - expected_pb = _TimestampRangePB(**pb_kwargs) - self.assertEqual(time_range.to_pb(), expected_pb) + time_pb = time_range.to_pb() + self.assertEqual( + time_pb.start_timestamp_micros, + expected_pb.start_timestamp_micros) + self.assertEqual( + time_pb.end_timestamp_micros, + expected_pb.end_timestamp_micros) + self.assertEqual(time_pb, expected_pb) def test_to_pb(self): - # Makes sure already milliseconds granularity - start_micros = 30871000 - end_micros = 12939371000 - self._to_pb_helper(start_micros=start_micros, - end_micros=end_micros) + start_micros = 30871234 + end_micros = 12939371234 + start_millis = start_micros // 1000 * 1000 + self.assertEqual(start_millis, 30871000) + end_millis = end_micros // 1000 * 1000 + 1000 + self.assertEqual(end_millis, 12939372000) + pb_kwargs = {} + pb_kwargs['start_timestamp_micros'] = start_millis + pb_kwargs['end_timestamp_micros'] = end_millis + self._to_pb_helper(pb_kwargs, start=start_micros, end=end_micros) def test_to_pb_start_only(self): # Makes sure already milliseconds granularity start_micros = 30871000 - self._to_pb_helper(start_micros=start_micros) + start_millis = start_micros // 1000 * 1000 + self.assertEqual(start_millis, 30871000) + pb_kwargs = {} + pb_kwargs['start_timestamp_micros'] = start_millis + self._to_pb_helper(pb_kwargs, start=start_micros, end=None) def test_to_pb_end_only(self): # Makes sure already milliseconds granularity end_micros = 12939371000 - self._to_pb_helper(end_micros=end_micros) + end_millis = end_micros // 1000 * 1000 + self.assertEqual(end_millis, 12939371000) + pb_kwargs = {} + pb_kwargs['end_timestamp_micros'] = end_millis + self._to_pb_helper(pb_kwargs, start=None, end=end_micros) class TestTimestampRangeFilter(unittest.TestCase): From beaafbd7429601cda8645d9c397e4c3aa6f24262 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Tue, 20 Mar 2018 21:28:42 +0530 Subject: [PATCH 122/892] Add retry for yield_rows (#4882) --- .../google/cloud/bigtable/row.py | 3 +- .../google/cloud/bigtable/row_data.py | 91 ++++++++-- .../google/cloud/bigtable/table.py | 25 ++- .../tests/unit/test_row_data.py | 164 +++++++++++++----- .../tests/unit/test_table.py | 112 ++++++++---- 5 files changed, 290 insertions(+), 105 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 9b6de8a5a728..09bf1fe5ef10 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -243,7 +243,8 @@ def _delete_cells(self, column_family_id, columns, time_range=None, def _retry_commit_exception(exc): if isinstance(exc, grpc.RpcError): exc = exceptions.from_grpc_error(exc) - return isinstance(exc, exceptions.ServiceUnavailable) + return isinstance(exc, (exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded)) class DirectRow(_SetDeleteRow): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index b83898a9dd88..3a84d8261eb2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -18,6 +18,10 @@ import copy import six +import grpc + +from google.api_core import exceptions +from google.api_core import retry from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _to_bytes @@ -293,9 +297,12 @@ class InvalidChunk(RuntimeError): class PartialRowsData(object): """Convenience wrapper for consuming a ``ReadRows`` streaming response. - :type response_iterator: :class:`~google.cloud.exceptions.GrpcRendezvous` - :param response_iterator: A streaming iterator returned from a - ``ReadRows`` request. + :type read_method: :class:`client._data_stub.ReadRows` + :param read_method: ``ReadRows`` method. + + :type request: :class:`data_messages_v2_pb2.ReadRowsRequest` + :param request: The ``ReadRowsRequest`` message used to create a + ReadRowsResponse iterator. """ START = 'Start' # No responses yet processed. @@ -303,9 +310,8 @@ class PartialRowsData(object): ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row - def __init__(self, response_iterator): - self._response_iterator = response_iterator - self._generator = YieldRowsData(response_iterator) + def __init__(self, read_method, request): + self._generator = YieldRowsData(read_method, request) # Fully-processed rows, keyed by `row_key` self.rows = {} @@ -313,7 +319,7 @@ def __init__(self, response_iterator): def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return other._response_iterator == self._response_iterator + return other._generator == self._generator def __ne__(self, other): return not self == other @@ -340,12 +346,27 @@ def consume_all(self, max_loops=None): self.rows[row.row_key] = row +def _retry_read_rows_exception(exc): + if isinstance(exc, grpc.RpcError): + exc = exceptions.from_grpc_error(exc) + return isinstance(exc, (exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded)) + + class YieldRowsData(object): """Convenience wrapper for consuming a ``ReadRows`` streaming response. - :type response_iterator: :class:`~google.cloud.exceptions.GrpcRendezvous` - :param response_iterator: A streaming iterator returned from a - ``ReadRows`` request. + :type read_method: :class:`client._data_stub.ReadRows` + :param read_method: ``ReadRows`` method. + + :type request: :class:`data_messages_v2_pb2.ReadRowsRequest` + :param request: The ``ReadRowsRequest`` message used to create a + ReadRowsResponse iterator. If the iterator fails, a new + iterator is created, allowing the scan to continue from + the point just beyond the last successfully read row, + identified by self.last_scanned_row_key. The retry happens + inside of the Retry class, using a predicate for the + expected exceptions during iteration. """ START = 'Start' # No responses yet processed. @@ -353,12 +374,9 @@ class YieldRowsData(object): ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row - def __init__(self, response_iterator): - self._response_iterator = response_iterator + def __init__(self, read_method, request): # Counter for responses pulled from iterator self._counter = 0 - # Maybe cached from previous response - self._last_scanned_row_key = None # In-progress row, unset until first response, after commit/reset self._row = None # Last complete row, unset until first commit @@ -368,6 +386,12 @@ def __init__(self, response_iterator): # Last complete cell, unset until first completion, after new row self._previous_cell = None + # May be cached from previous response + self.last_scanned_row_key = None + self.read_method = read_method + self.request = request + self.response_iterator = read_method(request) + @property def state(self): """State machine state. @@ -376,7 +400,7 @@ def state(self): :returns: name of state corresponding to current row / chunk processing. """ - if self._last_scanned_row_key is None: + if self.last_scanned_row_key is None: return self.START if self._row is None: assert self._cell is None @@ -390,7 +414,35 @@ def state(self): def cancel(self): """Cancels the iterator, closing the stream.""" - self._response_iterator.cancel() + self.response_iterator.cancel() + + def _create_retry_request(self): + """Helper for :meth:`read_rows`.""" + row_range = self.request.rows.row_ranges.pop() + range_kwargs = {} + # start AFTER the row_key of the last successfully read row + range_kwargs['start_key_open'] = self.last_scanned_row_key + range_kwargs['end_key_open'] = row_range.end_key_open + self.request.rows.row_ranges.add(**range_kwargs) + + def _on_error(self, exc): + """Helper for :meth:`read_rows`.""" + # restart the read scan from AFTER the last successfully read row + if self.last_scanned_row_key: + self._create_retry_request() + + self.response_iterator = self.read_method(self.request) + + def _read_next(self): + """Helper for :meth:`read_rows`.""" + return six.next(self.response_iterator) + + def _read_next_response(self): + """Helper for :meth:`read_rows`.""" + retry_ = retry.Retry( + predicate=_retry_read_rows_exception, + deadline=60) + return retry_(self._read_next, on_error=self._on_error)() def read_rows(self): """Consume the ``ReadRowsResponse's`` from the stream. @@ -401,17 +453,17 @@ def read_rows(self): """ while True: try: - response = six.next(self._response_iterator) + response = self._read_next_response() except StopIteration: break self._counter += 1 - if self._last_scanned_row_key is None: # first response + if self.last_scanned_row_key is None: # first response if response.last_scanned_row_key: raise InvalidReadRowsResponse() - self._last_scanned_row_key = response.last_scanned_row_key + self.last_scanned_row_key = response.last_scanned_row_key row = self._row cell = self._cell @@ -449,6 +501,7 @@ def read_rows(self): yield self._row + self.last_scanned_row_key = self._row.row_key self._row, self._previous_row = None, self._row self._previous_cell = None row = cell = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 305cf1de5508..a2adec28ba83 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -15,6 +15,8 @@ """User-friendly container for Google Cloud Bigtable Table.""" +from grpc import StatusCode + from google.api_core.exceptions import RetryError from google.api_core.retry import if_exception_type from google.api_core.retry import Retry @@ -32,7 +34,6 @@ from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable.row_data import YieldRowsData -from grpc import StatusCode # Maximum number of mutations in bulk (MutateRowsRequest message): @@ -262,11 +263,10 @@ def read_row(self, row_key, filter_=None): :raises: :class:`ValueError ` if a commit row chunk is never encountered. """ - request_pb = _create_row_request(self.name, row_key=row_key, - filter_=filter_) + request = _create_row_request(self.name, row_key=row_key, + filter_=filter_) client = self._instance._client - response_iterator = client._data_stub.ReadRows(request_pb) - rows_data = PartialRowsData(response_iterator) + rows_data = PartialRowsData(client._data_stub.ReadRows, request) rows_data.consume_all() if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): raise ValueError('The row remains partial / is not committed.') @@ -308,13 +308,12 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :returns: A :class:`.PartialRowsData` convenience wrapper for consuming the streamed results. """ - request_pb = _create_row_request( + request = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit, end_inclusive=end_inclusive) client = self._instance._client - response_iterator = client._data_stub.ReadRows(request_pb) - # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` - return PartialRowsData(response_iterator) + + return PartialRowsData(client._data_stub.ReadRows, request) def yield_rows(self, start_key=None, end_key=None, limit=None, filter_=None): @@ -343,13 +342,13 @@ def yield_rows(self, start_key=None, end_key=None, limit=None, :rtype: :class:`.PartialRowData` :returns: A :class:`.PartialRowData` for each row returned """ - request_pb = _create_row_request( + + request = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit) client = self._instance._client - response_iterator = client._data_stub.ReadRows(request_pb) - # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` - generator = YieldRowsData(response_iterator) + + generator = YieldRowsData(client._data_stub.ReadRows, request) for row in generator.read_rows(): yield row diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 2c9299d213ff..942b4e735d2c 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -15,6 +15,8 @@ import unittest +import mock + class TestCell(unittest.TestCase): timestamp_micros = 18738724000 # Make sure millis granularity @@ -275,6 +277,11 @@ def test_row_key_getter(self): self.assertIs(partial_row_data.row_key, row_key) +class _Client(object): + + data_stub = None + + class TestPartialRowsData(unittest.TestCase): @staticmethod @@ -287,39 +294,61 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): - response_iterator = object() - partial_rows_data = self._make_one(response_iterator) - self.assertIs(partial_rows_data._response_iterator, - response_iterator) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data = self._make_one(client._data_stub.ReadRows, + request) + self.assertIs(partial_rows_data._generator.request, + request) self.assertEqual(partial_rows_data.rows, {}) def test___eq__(self): - response_iterator = object() - partial_rows_data1 = self._make_one(response_iterator) - partial_rows_data2 = self._make_one(response_iterator) - self.assertEqual(partial_rows_data1, partial_rows_data2) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, + request) + partial_rows_data2 = self._make_one(client._data_stub.ReadRows, + request) + self.assertEqual(partial_rows_data1.rows, partial_rows_data2.rows) def test___eq__type_differ(self): - partial_rows_data1 = self._make_one(None) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, + request) partial_rows_data2 = object() self.assertNotEqual(partial_rows_data1, partial_rows_data2) def test___ne__same_value(self): - response_iterator = object() - partial_rows_data1 = self._make_one(response_iterator) - partial_rows_data2 = self._make_one(response_iterator) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, + request) + partial_rows_data2 = self._make_one(client._data_stub.ReadRows, + request) comparison_val = (partial_rows_data1 != partial_rows_data2) - self.assertFalse(comparison_val) + self.assertTrue(comparison_val) def test___ne__(self): - response_iterator1 = object() - partial_rows_data1 = self._make_one(response_iterator1) - response_iterator2 = object() - partial_rows_data2 = self._make_one(response_iterator2) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, + request) + partial_rows_data2 = self._make_one(client._data_stub.ReadRows, + request) self.assertNotEqual(partial_rows_data1, partial_rows_data2) def test_rows_getter(self): - partial_rows_data = self._make_one(None) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data = self._make_one(client._data_stub.ReadRows, + request) partial_rows_data.rows = value = object() self.assertIs(partial_rows_data.rows, value) @@ -341,18 +370,32 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_state_start(self): - yrd = self._make_one([]) + client = _Client() + iterator = _MockCancellableIterator() + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) self.assertEqual(yrd.state, yrd.START) def test_state_new_row_w_row(self): - yrd = self._make_one([]) - yrd._last_scanned_row_key = '' + client = _Client() + iterator = _MockCancellableIterator() + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) + yrd.last_scanned_row_key = '' yrd._row = object() self.assertEqual(yrd.state, yrd.NEW_ROW) def test_cancel(self): + client = _Client() response_iterator = _MockCancellableIterator() - yield_rows_data = self._make_one(response_iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [response_iterator] + request = object() + yield_rows_data = self._make_one(client._data_stub.ReadRows, request) self.assertEqual(response_iterator.cancel_calls, 0) yield_rows_data.cancel() self.assertEqual(response_iterator.cancel_calls, 1) @@ -360,7 +403,10 @@ def test_cancel(self): # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' def test__copy_from_current_unset(self): - yrd = self._make_one([]) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) chunks = _generate_cell_chunks(['']) chunk = chunks[0] yrd._copy_from_current(chunk) @@ -376,7 +422,10 @@ def test__copy_from_current_blank(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - yrd = self._make_one([]) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) yrd._cell = _PartialCellData() chunks = _generate_cell_chunks(['']) chunk = chunks[0] @@ -393,7 +442,10 @@ def test__copy_from_current_blank(self): self.assertEqual(chunk.labels, LABELS) def test__copy_from_previous_unset(self): - yrd = self._make_one([]) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) cell = _PartialCellData() yrd._copy_from_previous(cell) self.assertEqual(cell.row_key, '') @@ -408,7 +460,10 @@ def test__copy_from_previous_blank(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - yrd = self._make_one([]) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, @@ -430,7 +485,10 @@ def test__copy_from_previous_filled(self): QUALIFIER = b'C' TIMESTAMP_MICROS = 100 LABELS = ['L1', 'L2'] - yrd = self._make_one([]) + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) yrd._previous_cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, @@ -449,33 +507,45 @@ def test__copy_from_previous_filled(self): def test_invalid_last_scanned_row_key_on_start(self): from google.cloud.bigtable.row_data import InvalidReadRowsResponse + client = _Client() response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') iterator = _MockCancellableIterator(response) - yrd = self._make_one(iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) with self.assertRaises(InvalidReadRowsResponse): self._consume_all(yrd) def test_valid_last_scanned_row_key_on_start(self): + client = _Client() response = _ReadRowsResponseV2( chunks=(), last_scanned_row_key='AFTER') iterator = _MockCancellableIterator(response) - yrd = self._make_one(iterator) - yrd._last_scanned_row_key = 'BEFORE' + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) + yrd.last_scanned_row_key = 'BEFORE' self._consume_all(yrd) - self.assertEqual(yrd._last_scanned_row_key, 'AFTER') + self.assertEqual(yrd.last_scanned_row_key, 'AFTER') def test_invalid_empty_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk + client = _Client() chunks = _generate_cell_chunks(['']) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - yrd = self._make_one(iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) with self.assertRaises(InvalidChunk): self._consume_all(yrd) def test_yield_rows_data(self): - from google.cloud.bigtable.row_data import YieldRowsData + client = _Client() chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -489,7 +559,12 @@ def test_yield_rows_data(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - yrd = YieldRowsData(iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + + request = object() + + yrd = self._make_one(client._data_stub.ReadRows, request) rows = [] for row in yrd.read_rows(): @@ -532,10 +607,14 @@ def _load_json_test(self, test_name): def _fail_during_consume(self, testcase_name): from google.cloud.bigtable.row_data import InvalidChunk + client = _Client() chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._make_one(iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + prd = self._make_one(client._data_stub.ReadRows, request) with self.assertRaises(InvalidChunk): prd.consume_all() expected_result = self._sort_flattend_cells( @@ -589,10 +668,14 @@ def _sort_flattend_cells(self, flattened): return sorted(flattened, key=key_func) def _incomplete_final_row(self, testcase_name): + client = _Client() chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._make_one(iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + prd = self._make_one(client._data_stub.ReadRows, request) prd.consume_all() self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) expected_result = self._sort_flattend_cells( @@ -611,10 +694,14 @@ def test_invalid_last_row_missing_commit(self): _marker = object() def _match_results(self, testcase_name, expected_result=_marker): + client = _Client() chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - prd = self._make_one(iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + prd = self._make_one(client._data_stub.ReadRows, request) prd.consume_all() flattened = self._sort_flattend_cells(_flatten_cells(prd)) if expected_result is self._marker: @@ -756,8 +843,7 @@ def cancel(self): def next(self): return next(self.iter_values) - def __next__(self): # pragma: NO COVER Py3k - return self.next() + __next__ = next class _PartialCellData(object): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index b5deca9e4059..4321cc8e0bc9 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -15,6 +15,7 @@ import unittest +import grpc import mock @@ -127,6 +128,8 @@ class TestTable(unittest.TestCase): TABLE_ID = 'table-id' TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID ROW_KEY = b'row-key' + ROW_KEY_1 = b'row-key-1' + ROW_KEY_2 = b'row-key-2' FAMILY_NAME = u'family' QUALIFIER = b'qualifier' TIMESTAMP_MICROS = 100 @@ -472,7 +475,6 @@ def test_mutate_rows(self): def test_read_rows(self): from google.cloud._testing import _Monkey - from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT @@ -481,21 +483,19 @@ def test_read_rows(self): table = self._make_one(self.TABLE_ID, instance) # Create request_pb - request_pb = object() # Returned by our mock. + request = object() # Returned by our mock. mock_created = [] def mock_create_row_request(table_name, **kwargs): mock_created.append((table_name, kwargs)) - return request_pb - - # Create response_iterator - response_iterator = object() + return request # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) + client._data_stub = mock.MagicMock() # Create expected_result. - expected_result = PartialRowsData(response_iterator) + expected_result = PartialRowsData(client._data_stub.ReadRows, + request) # Perform the method and check the result. start_key = b'start-key' @@ -507,12 +507,7 @@ def mock_create_row_request(table_name, **kwargs): start_key=start_key, end_key=end_key, filter_=filter_obj, limit=limit) - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ReadRows', - (request_pb,), - {}, - )]) + self.assertEqual(result.rows, expected_result.rows) created_kwargs = { 'start_key': start_key, 'end_key': end_key, @@ -522,36 +517,49 @@ def mock_create_row_request(table_name, **kwargs): } self.assertEqual(mock_created, [(table.name, created_kwargs)]) - def test_yield_rows(self): - from tests.unit._testing import _FakeStub - + def test_yield_retry_rows(self): client = _Client() instance = _Instance(self.INSTANCE_NAME, client=client) table = self._make_one(self.TABLE_ID, instance) # Create response_iterator - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_1, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True, + commit_row=True ) - chunks = [chunk] - response = _ReadRowsResponseV2(chunks) - response_iterator = _MockCancellableIterator(response) + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_2, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True + ) + + response_1 = _ReadRowsResponseV2([chunk_1]) + response_2 = _ReadRowsResponseV2([chunk_2]) + response_failure_iterator_1 = _MockFailureIterator_1() + response_failure_iterator_2 = _MockFailureIterator_2([response_1]) + response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. - client._data_stub = _FakeStub(response_iterator) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [response_failure_iterator_1, + response_failure_iterator_2, + response_iterator] rows = [] - for row in table.yield_rows(): + for row in table.yield_rows(start_key=self.ROW_KEY_1, + end_key=self.ROW_KEY_2): rows.append(row) - result = rows[0] - self.assertEqual(result.row_key, self.ROW_KEY) + result = rows[1] + self.assertEqual(result.row_key, self.ROW_KEY_2) def test_sample_row_keys(self): from tests.unit._testing import _FakeStub @@ -1217,18 +1225,56 @@ def __init__(self, name, client=None): self._client = client -class _MockCancellableIterator(object): - - cancel_calls = 0 - +class _MockReadRowsIterator(object): def __init__(self, *values): self.iter_values = iter(values) def next(self): return next(self.iter_values) - def __next__(self): # pragma: NO COVER Py3k - return self.next() + __next__ = next + + +class _MockFailureIterator_1(object): + + def next(self): + class DeadlineExceeded(grpc.RpcError, grpc.Call): + """ErrorDeadlineExceeded exception""" + + def code(self): + return grpc.StatusCode.DEADLINE_EXCEEDED + + def details(self): + return "Failed to read from server" + + raise DeadlineExceeded() + + __next__ = next + + +class _MockFailureIterator_2(object): + + def __init__(self, *values): + self.iter_values = values[0] + self.calls = 0 + + def next(self): + class DeadlineExceeded(grpc.RpcError, grpc.Call): + """ErrorDeadlineExceeded exception""" + + def code(self): + return grpc.StatusCode.DEADLINE_EXCEEDED + + def details(self): + return "Failed to read from server" + + self.calls += 1 + if self.calls == 1: + return self.iter_values[0] + else: + raise DeadlineExceeded() + + __next__ = next class _ReadRowsResponseV2(object): From 6fcfead0b2e18028472e74456dd0fc4cd115ffcd Mon Sep 17 00:00:00 2001 From: Tim Swast Date: Fri, 13 Apr 2018 10:41:18 -0700 Subject: [PATCH 123/892] Rename releases to changelog and include from CHANGELOG.md (#5191) * Symlink releases docs to CHANGELOG.md Since CHANGELOG.md is maintained by the release tool, this ensures that the releases page is also kept up-to-date. * Add recommonmark to docs requirements.txt This dependency is needed for [Sphinx Markdown support](http://www.sphinx-doc.org/en/master/markdown.html). * Rename releases to changelog This makes the docs toctree reflect the header on the CHANGELOG.md page. --- packages/google-cloud-bigtable/releases.md | 1 + 1 file changed, 1 insertion(+) create mode 120000 packages/google-cloud-bigtable/releases.md diff --git a/packages/google-cloud-bigtable/releases.md b/packages/google-cloud-bigtable/releases.md new file mode 120000 index 000000000000..4c43d49320dc --- /dev/null +++ b/packages/google-cloud-bigtable/releases.md @@ -0,0 +1 @@ +../../bigtable/CHANGELOG.md \ No newline at end of file From 05375b3937d2fe6ae0a030665056015fa5a3666e Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Wed, 2 May 2018 22:43:48 +0530 Subject: [PATCH 124/892] Integrate new generated low-level client (#5178) --- packages/google-cloud-bigtable/docs/conf.py | 310 +++ .../docs/gapic/v2/api.rst | 6 + .../docs/gapic/v2/types.rst | 5 + packages/google-cloud-bigtable/docs/index.rst | 84 + .../google-cloud-bigtable/google/__init__.py | 5 +- .../google/cloud/__init__.py | 14 - .../_generated/__init__.py => bigtable.py} | 14 +- .../cloud/bigtable/_generated/_bigtable.proto | 321 ---- .../_generated/_bigtable_instance_admin.proto | 232 --- .../_generated/_bigtable_table_admin.proto | 195 -- .../cloud/bigtable/_generated/_common.proto | 37 - .../cloud/bigtable/_generated/_data.proto | 532 ------ .../cloud/bigtable/_generated/_instance.proto | 113 -- .../cloud/bigtable/_generated/_table.proto | 115 -- .../_generated/bigtable_instance_admin_pb2.py | 1061 ----------- .../cloud/bigtable/_generated/bigtable_pb2.py | 1100 ----------- .../_generated/bigtable_table_admin_pb2.py | 784 -------- .../cloud/bigtable/_generated/instance_pb2.py | 222 --- .../cloud/bigtable/_generated/table_pb2.py | 393 ---- .../google/cloud/bigtable/client.py | 251 +-- .../google/cloud/bigtable/cluster.py | 149 +- .../google/cloud/bigtable/column_family.py | 46 +- .../google/cloud/bigtable/instance.py | 162 +- .../google/cloud/bigtable/row.py | 37 +- .../google/cloud/bigtable/row_data.py | 4 +- .../google/cloud/bigtable/row_filters.py | 2 +- .../google/cloud/bigtable/table.py | 90 +- .../cloud/bigtable_admin_v2/__init__.py | 41 + .../cloud/bigtable_admin_v2/gapic/__init__.py | 0 .../gapic/bigtable_instance_admin_client.py | 1621 ++++++++++++++++ .../bigtable_instance_admin_client_config.py | 118 ++ .../gapic/bigtable_table_admin_client.py | 1208 ++++++++++++ .../bigtable_table_admin_client_config.py | 88 + .../cloud/bigtable_admin_v2/gapic/enums.py | 177 ++ .../cloud/bigtable_admin_v2/proto/__init__.py | 0 .../proto/bigtable_instance_admin_pb2.py | 1675 +++++++++++++++++ .../proto/bigtable_instance_admin_pb2_grpc.py | 406 ++++ .../proto/bigtable_table_admin_pb2.py | 1608 ++++++++++++++++ .../proto/bigtable_table_admin_pb2_grpc.py | 310 +++ .../proto}/common_pb2.py | 14 +- .../proto/common_pb2_grpc.py | 3 + .../bigtable_admin_v2/proto/instance_pb2.py | 588 ++++++ .../proto/instance_pb2_grpc.py | 3 + .../bigtable_admin_v2/proto/table_pb2.py | 776 ++++++++ .../bigtable_admin_v2/proto/table_pb2_grpc.py | 3 + .../google/cloud/bigtable_admin_v2/types.py | 61 + .../google/cloud/bigtable_v2/__init__.py | 28 + .../cloud/bigtable_v2/gapic/__init__.py | 0 .../bigtable_v2/gapic/bigtable_client.py | 564 ++++++ .../gapic/bigtable_client_config.py | 53 + .../cloud/bigtable_v2/proto/__init__.py | 0 .../cloud/bigtable_v2/proto/bigtable_pb2.py | 1467 +++++++++++++++ .../bigtable_v2/proto/bigtable_pb2_grpc.py | 145 ++ .../proto}/data_pb2.py | 617 +++++- .../cloud/bigtable_v2/proto/data_pb2_grpc.py | 3 + .../google/cloud/bigtable_v2/types.py | 43 + packages/google-cloud-bigtable/setup.py | 1 + .../tests/unit/_testing.py | 23 - .../unit/gapic/v2/test_bigtable_client_v2.py | 283 +++ .../test_bigtable_instance_admin_client_v2.py | 830 ++++++++ .../v2/test_bigtable_table_admin_client_v2.py | 556 ++++++ .../tests/unit/test_client.py | 532 +----- .../tests/unit/test_cluster.py | 281 +-- .../tests/unit/test_column_family.py | 73 +- .../tests/unit/test_instance.py | 351 +--- .../tests/unit/test_row.py | 211 +-- .../tests/unit/test_row_data.py | 6 +- .../tests/unit/test_row_filters.py | 14 +- .../tests/unit/test_table.py | 531 +++--- 69 files changed, 14318 insertions(+), 7278 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/conf.py create mode 100644 packages/google-cloud-bigtable/docs/gapic/v2/api.rst create mode 100644 packages/google-cloud-bigtable/docs/gapic/v2/types.rst create mode 100644 packages/google-cloud-bigtable/docs/index.rst rename packages/google-cloud-bigtable/google/cloud/{bigtable/_generated/__init__.py => bigtable.py} (65%) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py rename packages/google-cloud-bigtable/google/cloud/{bigtable/_generated => bigtable_admin_v2/proto}/common_pb2.py (69%) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py rename packages/google-cloud-bigtable/google/cloud/{bigtable/_generated => bigtable_v2/proto}/data_pb2.py (62%) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py new file mode 100644 index 000000000000..507ffb40399c --- /dev/null +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -0,0 +1,310 @@ +# -*- coding: utf-8 -*- +# +# google-cloud-bigtable documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('..')) + +__version__ = '0.1.0' + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.coverage', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', +] + +# autodoc/autosummary flags +autoclass_content = 'both' +autodoc_default_flags = ['members'] +autosummary_generate = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'google-cloud-bigtable' +copyright = u'2017, Google' +author = u'Google APIs' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = '.'.join(release.split('.')[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'sphinx_rtd_theme' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = [] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'google-cloud-bigtable-doc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + #'preamble': '', + + # Latex figure (float) alignment + #'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'google-cloud-bigtable.tex', + u'google-cloud-bigtable Documentation', author, 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, 'google-cloud-bigtable', + u'google-cloud-bigtable Documentation', [author], 1)] + +# If true, show URL addresses after external links. +#man_show_urls = False + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'google-cloud-bigtable', + u'google-cloud-bigtable Documentation', author, 'google-cloud-bigtable', + 'GAPIC library for the {metadata.shortName} v2 service', 'APIs'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + 'python': ('http://python.readthedocs.org/en/latest/', None), + 'gax': ('https://gax-python.readthedocs.org/en/latest/', None), +} + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/packages/google-cloud-bigtable/docs/gapic/v2/api.rst b/packages/google-cloud-bigtable/docs/gapic/v2/api.rst new file mode 100644 index 000000000000..3546c5633915 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/gapic/v2/api.rst @@ -0,0 +1,6 @@ +Client for Cloud Bigtable API +============================= + +.. automodule:: google.cloud.bigtable_v2 + :members: + :inherited-members: \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/gapic/v2/types.rst b/packages/google-cloud-bigtable/docs/gapic/v2/types.rst new file mode 100644 index 000000000000..c1d98f25119b --- /dev/null +++ b/packages/google-cloud-bigtable/docs/gapic/v2/types.rst @@ -0,0 +1,5 @@ +Types for Cloud Bigtable API Client +=================================== + +.. automodule:: google.cloud.bigtable_v2.types + :members: \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst new file mode 100644 index 000000000000..4a86b7e60c08 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -0,0 +1,84 @@ +Python Client for Cloud Bigtable API (`Alpha`_) +=============================================== + +`Cloud Bigtable API`_: API for reading and writing the contents of Bigtables associated with a +cloud project. + +- `Client Library Documentation`_ +- `Product Documentation`_ + +.. _Alpha: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst +.. _Cloud Bigtable API: https://cloud.google.com/bigtable +.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/bigtable/usage.html +.. _Product Documentation: https://cloud.google.com/bigtable + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. `Enable the Cloud Bigtable API.`_ +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable +.. _Setup Authentication.: https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + pip install virtualenv + virtualenv + source /bin/activate + /bin/pip install google-cloud-bigtable + + +Windows +^^^^^^^ + +.. code-block:: console + + pip install virtualenv + virtualenv + \Scripts\activate + \Scripts\pip.exe install google-cloud-bigtable + +Next Steps +~~~~~~~~~~ + +- Read the `Client Library Documentation`_ for Cloud Bigtable API + API to see other available methods on the client. +- Read the `Cloud Bigtable API Product documentation`_ to learn + more about the product and see How-to Guides. +- View this `repository’s main README`_ to see the full list of Cloud + APIs that we cover. + +.. _Cloud Bigtable API Product documentation: https://cloud.google.com/bigtable +.. _repository’s main README: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst + +Api Reference +------------- +.. toctree:: + :maxdepth: 2 + + gapic/v2/api + gapic/v2/types \ No newline at end of file diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/__init__.py index 9ee9bf4342ab..e16082edc506 100644 --- a/packages/google-cloud-bigtable/google/__init__.py +++ b/packages/google-cloud-bigtable/google/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google LLC +# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Google Cloud Bigtable API package.""" + + try: import pkg_resources pkg_resources.declare_namespace(__name__) diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py index 9ee9bf4342ab..267f71008dcb 100644 --- a/packages/google-cloud-bigtable/google/cloud/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/__init__.py @@ -1,17 +1,3 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - try: import pkg_resources pkg_resources.declare_namespace(__name__) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable.py similarity index 65% rename from packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py rename to packages/google-cloud-bigtable/google/cloud/bigtable.py index 7e33fb434ef5..3a5e6a7477e4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable.py @@ -1,10 +1,10 @@ -# Copyright 2015 Google LLC +# Copyright 2017 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -12,4 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Generated protobuf modules for Google Cloud Bigtable API.""" +from __future__ import absolute_import + +from google.cloud.bigtable_v2 import BigtableClient +from google.cloud.bigtable_v2 import types + +__all__ = ( + 'types', + 'BigtableClient', +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto deleted file mode 100644 index 254b4963fc4d..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable.proto +++ /dev/null @@ -1,321 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/v2/data.proto"; -import "google/protobuf/wrappers.proto"; -import "google/rpc/status.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableProto"; -option java_package = "com.google.bigtable.v2"; - - -// Service for reading from and writing to existing Bigtable tables. -service Bigtable { - // Streams back the contents of all requested rows, optionally - // applying the same Reader filter to each. Depending on their size, - // rows and cells may be broken up across multiple responses, but - // atomicity of each row will still be preserved. See the - // ReadRowsResponse documentation for details. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" }; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" }; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by `mutation`. - rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { - option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" }; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { - option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" }; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" }; - } - - // Modifies a row atomically. The method reads the latest existing timestamp - // and value from the specified columns and writes a new entry based on - // pre-defined read/modify/write rules. The new value for the timestamp is the - // greater of the existing timestamp or the current server time. The method - // returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { - option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" }; - } -} - -// Request message for Bigtable.ReadRows. -message ReadRowsRequest { - // The unique name of the table from which to read. - // Values are of the form - // projects/<project>/instances/<instance>/tables/<table> - string table_name = 1; - - // The row keys and/or ranges to read. If not specified, reads from all rows. - RowSet rows = 2; - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entirety of each row. - RowFilter filter = 3; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - int64 rows_limit = 4; -} - -// Response message for Bigtable.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message CellChunk { - // The row key for this chunk of data. If the row key is empty, - // this CellChunk is a continuation of the same row as the previous - // CellChunk in the response stream, even if that CellChunk was in a - // previous ReadRowsResponse message. - bytes row_key = 1; - - // The column family name for this chunk of data. If this message - // is not present this CellChunk is a continuation of the same column - // family as the previous CellChunk. The empty string can occur as a - // column family name in a response so clients must check - // explicitly for the presence of this message, not just for - // `family_name.value` being non-empty. - google.protobuf.StringValue family_name = 2; - - // The column qualifier for this chunk of data. If this message - // is not present, this CellChunk is a continuation of the same column - // as the previous CellChunk. Column qualifiers may be empty so - // clients must check for the presence of this message, not just - // for `qualifier.value` being non-empty. - google.protobuf.BytesValue qualifier = 3; - - // The cell's stored timestamp, which also uniquely identifies it - // within its column. Values are always expressed in - // microseconds, but individual tables may set a coarser - // granularity to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will - // only allow values of `timestamp_micros` which are multiples of - // 1000. Timestamps are only set in the first CellChunk per cell - // (for cells split into multiple chunks). - int64 timestamp_micros = 4; - - // Labels applied to the cell by a - // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set - // on the first CellChunk per cell. - repeated string labels = 5; - - // The value stored in the cell. Cell values can be split across - // multiple CellChunks. In that case only the value field will be - // set in CellChunks after the first: the timestamp and labels - // will only be present in the first CellChunk, even if the first - // CellChunk came in a previous ReadRowsResponse. - bytes value = 6; - - // If this CellChunk is part of a chunked cell value and this is - // not the final chunk of that cell, value_size will be set to the - // total length of the cell value. The client can use this size - // to pre-allocate memory to hold the full cell value. - int32 value_size = 7; - - oneof row_status { - // Indicates that the client should drop all previous chunks for - // `row_key`, as it will be re-read from the beginning. - bool reset_row = 8; - - // Indicates that the client can safely process all previous chunks for - // `row_key`, as its data has been fully read. - bool commit_row = 9; - } - } - - repeated CellChunk chunks = 1; - - // Optionally the server might return the row key of the last row it - // has scanned. The client can use this to construct a more - // efficient retry request if needed: any row keys or portions of - // ranges less than this row key can be dropped from the request. - // This is primarily useful for cases where the server has read a - // lot of data that was filtered out since the last committed row - // key, allowing the client to skip that work on a retry. - bytes last_scanned_row_key = 2; -} - -// Request message for Bigtable.SampleRowKeys. -message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. - // Values are of the form - // projects/<project>/instances/<instance>/tables/<table> - string table_name = 1; -} - -// Response message for Bigtable.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // `row_key`. Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // `offset_bytes` fields. - int64 offset_bytes = 2; -} - -// Request message for Bigtable.MutateRow. -message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. - // Values are of the form - // projects/<project>/instances/<instance>/tables/<table> - string table_name = 1; - - // The key of the row to which the mutation should be applied. - bytes row_key = 2; - - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; -} - -// Response message for Bigtable.MutateRow. -message MutateRowResponse { - -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // You must specify at least one mutation. - repeated Mutation mutations = 2; - } - - // The unique name of the table to which the mutations should be applied. - string table_name = 1; - - // The row keys and corresponding mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries can - // contain at most 100000 mutations. - repeated Entry entries = 2; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - message Entry { - // The index into the original request's `entries` list of the Entry - // for which a result is being reported. - int64 index = 1; - - // The result of the request Entry identified by `index`. - // Depending on how requests are batched during execution, it is possible - // for one Entry to fail due to an error with another Entry. In the event - // that this occurs, the same error will be reported for both entries. - google.rpc.Status status = 2; - } - - // One or more results for Entries from the batch request. - repeated Entry entries = 1; -} - -// Request message for Bigtable.CheckAndMutateRow. -message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be - // applied. - // Values are of the form - // projects/<project>/instances/<instance>/tables/<table> - string table_name = 1; - - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either `true_mutations` or - // `false_mutations` will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if `predicate_filter` - // yields at least one cell when applied to `row_key`. Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if `false_mutations` is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if `predicate_filter` - // does not yield any cells when applied to `row_key`. Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if `true_mutations` is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for Bigtable.CheckAndMutateRow. -message CheckAndMutateRowResponse { - // Whether or not the request's `predicate_filter` yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for Bigtable.ReadModifyWriteRow. -message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be - // applied. - // Values are of the form - // projects/<project>/instances/<instance>/tables/<table> - string table_name = 1; - - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; - - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; -} - -// Response message for Bigtable.ReadModifyWriteRow. -message ReadModifyWriteRowResponse { - // A Row containing the new contents of all cells modified by the request. - Row row = 1; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto deleted file mode 100644 index dd629b3e6d8c..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_instance_admin.proto +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/instance.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableInstanceAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable Instances and -// Clusters. Provides access to the Instance and Cluster schemas only, not the -// tables metadata or data stored in those tables. -service BigtableInstanceAdmin { - // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" }; - } - - // Gets information about an instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" }; - } - - // Lists information about instances in a project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" }; - } - - // Updates an instance within a project. - rpc UpdateInstance(Instance) returns (Instance) { - option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" body: "*" }; - } - - // Delete an instance from a project. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" }; - } - - // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" }; - } - - // Gets information about a cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" }; - } - - // Lists information about clusters in an instance. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" }; - } - - // Updates a cluster within an instance. - rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { - option (google.api.http) = { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" }; - } - - // Deletes a cluster from an instance. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" }; - } -} - -// Request message for BigtableInstanceAdmin.CreateInstance. -message CreateInstanceRequest { - // The unique name of the project in which to create the new instance. - // Values are of the form projects/ - string parent = 1; - - // The id to be used when referring to the new instance within its project, - // e.g. just the "myinstance" section of the full name - // "projects/myproject/instances/myinstance" - string instance_id = 2; - - // The instance to create. - // Fields marked "@OutputOnly" must be left blank. - Instance instance = 3; - - // The clusters to be created within the instance, mapped by desired - // cluster ID (e.g. just the "mycluster" part of the full name - // "projects/myproject/instances/myinstance/clusters/mycluster"). - // Fields marked "@OutputOnly" must be left blank. - // Currently exactly one cluster must be specified. - map clusters = 4; -} - -// Request message for BigtableInstanceAdmin.GetInstance. -message GetInstanceRequest { - // The unique name of the requested instance. Values are of the form - // projects//instances/ - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListInstances. -message ListInstancesRequest { - // The unique name of the project for which a list of instances is requested. - // Values are of the form projects/ - string parent = 1; - - // The value of `next_page_token` returned by a previous call. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListInstances. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // Locations from which Instance information could not be retrieved, - // due to an outage or some other transient condition. - // Instances whose Clusters are all in one of the failed locations - // may be missing from 'instances', and Instances with at least one - // Cluster in a failed location may only have partial information returned. - repeated string failed_locations = 2; - - // Set if not all instances could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteInstance. -message DeleteInstanceRequest { - // The unique name of the instance to be deleted. - // Values are of the form projects//instances/ - string name = 1; -} - -// Request message for BigtableInstanceAdmin.CreateCluster. -message CreateClusterRequest { - // The unique name of the instance in which to create the new cluster. - // Values are of the form - // projects//instances//clusters/[a-z][-a-z0-9]* - string parent = 1; - - // The id to be used when referring to the new cluster within its instance, - // e.g. just the "mycluster" section of the full name - // "projects/myproject/instances/myinstance/clusters/mycluster" - string cluster_id = 2; - - // The cluster to be created. - // Fields marked "@OutputOnly" must be left blank. - Cluster cluster = 3; -} - -// Request message for BigtableInstanceAdmin.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. Values are of the form - // projects//instances//clusters/ - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListClusters. -message ListClustersRequest { - // The unique name of the instance for which a list of clusters is requested. - // Values are of the form projects//instances/ - // Use = '-' to list Clusters for all Instances in a project, - // for example "projects/myproject/instances/-" - string parent = 1; - - // The value of `next_page_token` returned by a previous call. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListClusters. -message ListClustersResponse { - // The list of requested clusters. - repeated Cluster clusters = 1; - - // Locations from which Cluster information could not be retrieved, - // due to an outage or some other transient condition. - // Clusters from these locations may be missing from 'clusters', - // or may only have partial information returned. - repeated string failed_locations = 2; - - // Set if not all clusters could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. Values are of the form - // projects//instances//clusters/ - string name = 1; -} - -// The metadata for the Operation returned by CreateInstance. -message CreateInstanceMetadata { - // The request that prompted the initiation of this CreateInstance operation. - CreateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateCluster. -message UpdateClusterMetadata { - // The request that prompted the initiation of this UpdateCluster operation. - Cluster original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto deleted file mode 100644 index 1f839436568d..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_bigtable_table_admin.proto +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/table.proto"; -import "google/protobuf/empty.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within -// the tables. -service BigtableTableAdmin { - // Creates a new table in the specified instance. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" }; - } - - // Lists all tables served from a specified instance. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" }; - } - - // Gets metadata information about the specified table. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" }; - } - - // Atomically performs a series of column family modifications - // on the specified table. - rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { - option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" }; - } - - // Permanently drop/delete a row range from a specified table. The request can - // specify whether to delete all rows in a table, or only those that match a - // particular prefix. - rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" body: "*" }; - } -} - -// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] -message CreateTableRequest { - // An initial split point for a newly created table. - message Split { - // Row key to use as an initial tablet boundary. - bytes key = 1; - } - - // The unique name of the instance in which to create the table. - // Values are of the form projects//instances/ - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated Split initial_splits = 4; -} - -// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] -message DropRowRangeRequest { - // The unique name of the table on which to drop a range of rows. - // Values are of the form projects//instances//tables/
- string name = 1; - - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} - -// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesRequest { - // The unique name of the instance for which tables should be listed. - // Values are of the form projects//instances/ - string parent = 1; - - // The view to be applied to the returned tables' fields. - // Defaults to NAME_ONLY if unspecified (no others are currently supported). - Table.View view = 2; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesResponse { - // The tables present in the requested cluster. - repeated Table tables = 1; - - // Set if not all tables could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] -message GetTableRequest { - // The unique name of the requested table. - // Values are of the form projects//instances//tables/
- string name = 1; - - // The view to be applied to the returned table's fields. - // Defaults to SCHEMA_ONLY if unspecified. - Table.View view = 2; -} - -// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] -message DeleteTableRequest { - // The unique name of the table to be deleted. - // Values are of the form projects//instances//tables/
- string name = 1; -} - -// Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] -message ModifyColumnFamiliesRequest { - // A create, update, or delete of a particular column family. - message Modification { - // The ID of the column family to be modified. - string id = 1; - - oneof mod { - // Create a new column family with the specified schema, or fail if - // one already exists with the given ID. - ColumnFamily create = 2; - - // Update an existing column family to the specified schema, or fail - // if no column family exists with the given ID. - ColumnFamily update = 3; - - // Drop (delete) the column family with the given ID, or fail if no such - // family exists. - bool drop = 4; - } - } - - // The unique name of the table whose families should be modified. - // Values are of the form projects//instances//tables/
- string name = 1; - - // Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto deleted file mode 100644 index f30e2c5f6782..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_common.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/timestamp.proto"; - -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.bigtable.admin.v2"; - - -// Storage media types for persisting Bigtable data. -enum StorageType { - // The user did not specify a storage type. - STORAGE_TYPE_UNSPECIFIED = 0; - - // Flash (SSD) storage should be used. - SSD = 1; - - // Magnetic drive (HDD) storage should be used. - HDD = 2; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto deleted file mode 100644 index 2abc23820158..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_data.proto +++ /dev/null @@ -1,532 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v2; - -option java_multiple_files = true; -option java_outer_classname = "DataProto"; -option java_package = "com.google.bigtable.v2"; - - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family intersection -// of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column intersection of a -// table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its `column_qualifier_regex_filter` field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser granularity to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of `timestamp_micros` which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // The row key at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_key { - // Used when giving an inclusive lower bound for the range. - bytes start_key_closed = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_key_open = 2; - } - - // The row key at which to end the range. - // If neither field is set, interpreted as the infinite row key, exclusive. - oneof end_key { - // Used when giving an inclusive upper bound for the range. - bytes end_key_open = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_key_closed = 4; - } -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from <column_family>:<start_qualifier> to -// <column_family>:<end_qualifier>, where both bounds can be either -// inclusive or exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within `column_family`). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_closed = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_open = 3; - } - - // The column qualifier at which to end the range (within `column_family`). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_closed = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_open = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_closed = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_open = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_closed = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_open = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the `value_regex_filter`, -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that `RE2(.)` is equivalent by default to -// `RE2([^\n])`, meaning that it does not match newlines. When attempting to -// match an arbitrary byte, you should therefore use the escape sequence `\C`, -// which may need to be further escaped as `\\C` in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the `strip_value_transformer`, which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If `predicate_filter` outputs any cells, then `true_filter` will be - // evaluated on the input row. Otherwise, `false_filter` will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if `predicate_filter` returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if `predicate_filter` does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the `\C` escape - // sequence must be used if a true wildcard is desired. The `.` character - // will not match the new line character `\n`, which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the `:` - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // `\n`, it is sufficient to use `.` as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the `\C` - // escape sequence must be used if a true wildcard is desired. The `.` - // character will not match the new line character `\n`, which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the `\C` escape - // sequence must be used if a true wildcard is desired. The `.` character - // will not match the new line character `\n`, which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, - // skip all earlier cells in `foo:bar`, and then begin matching again in - // column `foo:bar2`. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern `[a-z0-9\\-]+` - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a `apply_label_transformer`. It is okay for - // an Interleave to contain multiple `apply_label_transformers`, as they - // will be applied to separate copies of the input. This may be relaxed in - // the future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the granularity of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that `append_value` be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that `increment_amount` be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto deleted file mode 100644 index ce8ebc9b2031..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_instance.proto +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/common.proto"; - -option java_multiple_files = true; -option java_outer_classname = "InstanceProto"; -option java_package = "com.google.bigtable.admin.v2"; - - -// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and -// the resources that serve them. -// All tables in an instance are served from a single -// [Cluster][google.bigtable.admin.v2.Cluster]. -message Instance { - // Possible states of an instance. - enum State { - // The state of the instance could not be determined. - STATE_NOT_KNOWN = 0; - - // The instance has been successfully created and can serve requests - // to its tables. - READY = 1; - - // The instance is currently being created, and may be destroyed - // if the creation process encounters an error. - CREATING = 2; - } - - // @OutputOnly - // The unique name of the instance. Values are of the form - // projects//instances/[a-z][a-z0-9\\-]+[a-z0-9] - string name = 1; - - // The descriptive name for this instance as it appears in UIs. - // Can be changed at any time, but should be kept globally unique - // to avoid confusion. - string display_name = 2; - - // - // The current state of the instance. - State state = 3; -} - -// A resizable group of nodes in a particular cloud location, capable -// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent -// [Instance][google.bigtable.admin.v2.Instance]. -message Cluster { - // Possible states of a cluster. - enum State { - // The state of the cluster could not be determined. - STATE_NOT_KNOWN = 0; - - // The cluster has been successfully created and is ready to serve requests. - READY = 1; - - // The cluster is currently being created, and may be destroyed - // if the creation process encounters an error. - // A cluster may not be able to serve requests while being created. - CREATING = 2; - - // The cluster is currently being resized, and may revert to its previous - // node count if the process encounters an error. - // A cluster is still capable of serving requests while being resized, - // but may exhibit performance as if its number of allocated nodes is - // between the starting and requested states. - RESIZING = 3; - - // The cluster has no backing nodes. The data (tables) still - // exist, but no operations can be performed on the cluster. - DISABLED = 4; - } - - // @OutputOnly - // The unique name of the cluster. Values are of the form - // projects//instances//clusters/[a-z][-a-z0-9]* - string name = 1; - - // @CreationOnly - // The location where this cluster's nodes and storage reside. For best - // performance, clients should be located as close as possible to this cluster. - // Currently only zones are supported, e.g. projects/*/locations/us-central1-b - string location = 2; - - // @OutputOnly - // The current state of the cluster. - State state = 3; - - // The number of nodes allocated to this cluster. More nodes enable higher - // throughput and more consistent performance. - int32 serve_nodes = 4; - - // @CreationOnly - // The type of storage used by this cluster to serve its - // parent instance's tables, unless explicitly overridden. - StorageType default_storage_type = 5; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto deleted file mode 100644 index 331470d1f14a..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/_table.proto +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; - -option java_multiple_files = true; -option java_outer_classname = "TableProto"; -option java_package = "com.google.bigtable.admin.v2"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - // Possible timestamp granularities to use when keeping multiple versions - // of data in a table. - enum TimestampGranularity { - // The user did not specify a granularity. Should not be returned. - // When specified during table creation, MILLIS will be used. - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; - - // The table keeps data versioned at a granularity of 1ms. - MILLIS = 1; - } - - // Defines a view over a table's fields. - enum View { - // Uses the default view for each method as documented in its request. - VIEW_UNSPECIFIED = 0; - - // Only populates `name`. - NAME_ONLY = 1; - - // Only populates `name` and fields related to the table's schema. - SCHEMA_VIEW = 2; - - // Populates all fields. - FULL = 4; - } - - // The unique name of the table. Values are of the form - // projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - // Views: NAME_ONLY, SCHEMA_VIEW, REPLICATION_VIEW, FULL - // @OutputOnly - string name = 1; - - // The column families configured for this table, mapped by column family ID. - // Views: SCHEMA_VIEW, FULL - // @CreationOnly - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // If unspecified at creation time, the value will be set to MILLIS. - // Views: SCHEMA_VIEW, FULL - // @CreationOnly - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // Garbage collection rule specified as a protobuf. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 1; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py deleted file mode 100644 index 00e64c8a6976..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_instance_admin_pb2.py +++ /dev/null @@ -1,1061 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/v2/bigtable_instance_admin.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable._generated import instance_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2 -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/v2/bigtable_instance_admin.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n6google/bigtable/admin/v2/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\'google/bigtable/admin/v2/instance.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xdb\x0b\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}B<\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01\x62\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( - name='ClustersEntry', - full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=452, - serialized_end=534, -) - -_CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name='CreateInstanceRequest', - full_name='google.bigtable.admin.v2.CreateInstanceRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateInstanceRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='instance_id', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='instance', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='clusters', full_name='google.bigtable.admin.v2.CreateInstanceRequest.clusters', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=255, - serialized_end=534, -) - - -_GETINSTANCEREQUEST = _descriptor.Descriptor( - name='GetInstanceRequest', - full_name='google.bigtable.admin.v2.GetInstanceRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetInstanceRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=536, - serialized_end=570, -) - - -_LISTINSTANCESREQUEST = _descriptor.Descriptor( - name='ListInstancesRequest', - full_name='google.bigtable.admin.v2.ListInstancesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListInstancesRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=572, - serialized_end=630, -) - - -_LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name='ListInstancesResponse', - full_name='google.bigtable.admin.v2.ListInstancesResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='instances', full_name='google.bigtable.admin.v2.ListInstancesResponse.instances', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesResponse.failed_locations', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=633, - serialized_end=762, -) - - -_DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name='DeleteInstanceRequest', - full_name='google.bigtable.admin.v2.DeleteInstanceRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteInstanceRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=764, - serialized_end=801, -) - - -_CREATECLUSTERREQUEST = _descriptor.Descriptor( - name='CreateClusterRequest', - full_name='google.bigtable.admin.v2.CreateClusterRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateClusterRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cluster_id', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cluster', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=803, - serialized_end=913, -) - - -_GETCLUSTERREQUEST = _descriptor.Descriptor( - name='GetClusterRequest', - full_name='google.bigtable.admin.v2.GetClusterRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetClusterRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=915, - serialized_end=948, -) - - -_LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name='ListClustersRequest', - full_name='google.bigtable.admin.v2.ListClustersRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListClustersRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListClustersRequest.page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=950, - serialized_end=1007, -) - - -_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name='ListClustersResponse', - full_name='google.bigtable.admin.v2.ListClustersResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='clusters', full_name='google.bigtable.admin.v2.ListClustersResponse.clusters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='failed_locations', full_name='google.bigtable.admin.v2.ListClustersResponse.failed_locations', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListClustersResponse.next_page_token', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1009, - serialized_end=1135, -) - - -_DELETECLUSTERREQUEST = _descriptor.Descriptor( - name='DeleteClusterRequest', - full_name='google.bigtable.admin.v2.DeleteClusterRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteClusterRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1137, - serialized_end=1173, -) - - -_CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name='CreateInstanceMetadata', - full_name='google.bigtable.admin.v2.CreateInstanceMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1176, - serialized_end=1374, -) - - -_UPDATECLUSTERMETADATA = _descriptor.Descriptor( - name='UpdateClusterMetadata', - full_name='google.bigtable.admin.v2.UpdateClusterMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1377, - serialized_end=1560, -) - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE -_CREATEINSTANCEREQUEST.fields_by_name['clusters'].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY -_LISTINSTANCESRESPONSE.fields_by_name['instances'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._INSTANCE -_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER -_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER -_CREATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2._CLUSTER -_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name['GetInstanceRequest'] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name['ListInstancesRequest'] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name['ListInstancesResponse'] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name['DeleteInstanceRequest'] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name['CreateInstanceMetadata'] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA - -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), dict( - - ClustersEntry = _reflection.GeneratedProtocolMessageType('ClustersEntry', (_message.Message,), dict( - DESCRIPTOR = _CREATEINSTANCEREQUEST_CLUSTERSENTRY, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) - )) - , - DESCRIPTOR = _CREATEINSTANCEREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) - )) -_sym_db.RegisterMessage(CreateInstanceRequest) -_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) - -GetInstanceRequest = _reflection.GeneratedProtocolMessageType('GetInstanceRequest', (_message.Message,), dict( - DESCRIPTOR = _GETINSTANCEREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) - )) -_sym_db.RegisterMessage(GetInstanceRequest) - -ListInstancesRequest = _reflection.GeneratedProtocolMessageType('ListInstancesRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTINSTANCESREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) - )) -_sym_db.RegisterMessage(ListInstancesRequest) - -ListInstancesResponse = _reflection.GeneratedProtocolMessageType('ListInstancesResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTINSTANCESRESPONSE, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) - )) -_sym_db.RegisterMessage(ListInstancesResponse) - -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType('DeleteInstanceRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETEINSTANCEREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) - )) -_sym_db.RegisterMessage(DeleteInstanceRequest) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATECLUSTERREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) - )) -_sym_db.RegisterMessage(CreateClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict( - DESCRIPTOR = _GETCLUSTERREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) - )) -_sym_db.RegisterMessage(GetClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTCLUSTERSREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) - )) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTCLUSTERSRESPONSE, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) - )) -_sym_db.RegisterMessage(ListClustersResponse) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETECLUSTERREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) - )) -_sym_db.RegisterMessage(DeleteClusterRequest) - -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType('CreateInstanceMetadata', (_message.Message,), dict( - DESCRIPTOR = _CREATEINSTANCEMETADATA, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) - )) -_sym_db.RegisterMessage(CreateInstanceMetadata) - -UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict( - DESCRIPTOR = _UPDATECLUSTERMETADATA, - __module__ = 'google.bigtable.admin.v2.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) - )) -_sym_db.RegisterMessage(UpdateClusterMetadata) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001')) -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True -_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class BigtableInstanceAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables metadata or data stored in those tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', - request_serializer=CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', - request_serializer=GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, - ) - self.ListInstances = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', - request_serializer=ListInstancesRequest.SerializeToString, - response_deserializer=ListInstancesResponse.FromString, - ) - self.UpdateInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', - request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, - response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, - ) - self.DeleteInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', - request_serializer=DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', - request_serializer=CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', - request_serializer=GetClusterRequest.SerializeToString, - response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, - ) - self.ListClusters = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', - request_serializer=ListClustersRequest.SerializeToString, - response_deserializer=ListClustersResponse.FromString, - ) - self.UpdateCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', - request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', - request_serializer=DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class BigtableInstanceAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables metadata or data stored in those tables. - """ - - def CreateInstance(self, request, context): - """Create an instance within a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetInstance(self, request, context): - """Gets information about an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListInstances(self, request, context): - """Lists information about instances in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def UpdateInstance(self, request, context): - """Updates an instance within a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DeleteInstance(self, request, context): - """Delete an instance from a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def CreateCluster(self, request, context): - """Creates a cluster within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetCluster(self, request, context): - """Gets information about a cluster. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListClusters(self, request, context): - """Lists information about clusters in an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def UpdateCluster(self, request, context): - """Updates a cluster within an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DeleteCluster(self, request, context): - """Deletes a cluster from an instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_BigtableInstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CreateInstance': grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'GetInstance': grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=GetInstanceRequest.FromString, - response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, - ), - 'ListInstances': grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=ListInstancesRequest.FromString, - response_serializer=ListInstancesResponse.SerializeToString, - ), - 'UpdateInstance': grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, - response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, - ), - 'DeleteInstance': grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'CreateCluster': grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'GetCluster': grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=GetClusterRequest.FromString, - response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, - ), - 'ListClusters': grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=ListClustersRequest.FromString, - response_serializer=ListClustersResponse.SerializeToString, - ), - 'UpdateCluster': grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'DeleteCluster': grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=DeleteClusterRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.bigtable.admin.v2.BigtableInstanceAdmin', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaBigtableInstanceAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables metadata or data stored in those tables. - """ - def CreateInstance(self, request, context): - """Create an instance within a project. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def GetInstance(self, request, context): - """Gets information about an instance. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListInstances(self, request, context): - """Lists information about instances in a project. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def UpdateInstance(self, request, context): - """Updates an instance within a project. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def DeleteInstance(self, request, context): - """Delete an instance from a project. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def CreateCluster(self, request, context): - """Creates a cluster within an instance. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def GetCluster(self, request, context): - """Gets information about a cluster. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListClusters(self, request, context): - """Lists information about clusters in an instance. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def UpdateCluster(self, request, context): - """Updates a cluster within an instance. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def DeleteCluster(self, request, context): - """Deletes a cluster from an instance. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaBigtableInstanceAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables metadata or data stored in those tables. - """ - def CreateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Create an instance within a project. - """ - raise NotImplementedError() - CreateInstance.future = None - def GetInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Gets information about an instance. - """ - raise NotImplementedError() - GetInstance.future = None - def ListInstances(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Lists information about instances in a project. - """ - raise NotImplementedError() - ListInstances.future = None - def UpdateInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Updates an instance within a project. - """ - raise NotImplementedError() - UpdateInstance.future = None - def DeleteInstance(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Delete an instance from a project. - """ - raise NotImplementedError() - DeleteInstance.future = None - def CreateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Creates a cluster within an instance. - """ - raise NotImplementedError() - CreateCluster.future = None - def GetCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Gets information about a cluster. - """ - raise NotImplementedError() - GetCluster.future = None - def ListClusters(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Lists information about clusters in an instance. - """ - raise NotImplementedError() - ListClusters.future = None - def UpdateCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Updates a cluster within an instance. - """ - raise NotImplementedError() - UpdateCluster.future = None - def DeleteCluster(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Deletes a cluster from an instance. - """ - raise NotImplementedError() - DeleteCluster.future = None - - -def beta_create_BigtableInstanceAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, - } - response_serializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, - } - method_implementations = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): face_utilities.unary_unary_inline(servicer.CreateInstance), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): face_utilities.unary_unary_inline(servicer.DeleteInstance), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): face_utilities.unary_unary_inline(servicer.GetInstance), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): face_utilities.unary_unary_inline(servicer.ListInstances), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster), - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): face_utilities.unary_unary_inline(servicer.UpdateInstance), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_BigtableInstanceAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): CreateClusterRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): CreateInstanceRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): DeleteClusterRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): DeleteInstanceRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): GetClusterRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): GetInstanceRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.SerializeToString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.SerializeToString, - } - response_deserializers = { - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'CreateInstance'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteCluster'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'DeleteInstance'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetCluster'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Cluster.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'GetInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListClusters'): ListClustersResponse.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'ListInstances'): ListInstancesResponse.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateCluster'): google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ('google.bigtable.admin.v2.BigtableInstanceAdmin', 'UpdateInstance'): google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2.Instance.FromString, - } - cardinalities = { - 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, - 'CreateInstance': cardinality.Cardinality.UNARY_UNARY, - 'DeleteCluster': cardinality.Cardinality.UNARY_UNARY, - 'DeleteInstance': cardinality.Cardinality.UNARY_UNARY, - 'GetCluster': cardinality.Cardinality.UNARY_UNARY, - 'GetInstance': cardinality.Cardinality.UNARY_UNARY, - 'ListClusters': cardinality.Cardinality.UNARY_UNARY, - 'ListInstances': cardinality.Cardinality.UNARY_UNARY, - 'UpdateCluster': cardinality.Cardinality.UNARY_UNARY, - 'UpdateInstance': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableInstanceAdmin', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py deleted file mode 100644 index 8701efd166dd..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_pb2.py +++ /dev/null @@ -1,1100 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v2/bigtable.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable._generated import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v2/bigtable.proto', - package='google.bigtable.v2', - syntax='proto3', - serialized_pb=_b('\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x92\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x13\n\x11MutateRowResponse\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"D\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"E\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"M\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B)\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01\x62\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v2_dot_data__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_READROWSREQUEST = _descriptor.Descriptor( - name='ReadRowsRequest', - full_name='google.bigtable.v2.ReadRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.ReadRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rows', full_name='google.bigtable.v2.ReadRowsRequest.rows', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='filter', full_name='google.bigtable.v2.ReadRowsRequest.filter', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rows_limit', full_name='google.bigtable.v2.ReadRowsRequest.rows_limit', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=176, - serialized_end=322, -) - - -_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( - name='CellChunk', - full_name='google.bigtable.v2.ReadRowsResponse.CellChunk', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.family_name', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='qualifier', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='labels', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.labels', index=4, - number=5, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value', index=5, - number=6, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value_size', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value_size', index=6, - number=7, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reset_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row', index=7, - number=8, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='commit_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row', index=8, - number=9, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='row_status', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_status', - index=0, containing_type=None, fields=[]), - ], - serialized_start=440, - serialized_end=701, -) - -_READROWSRESPONSE = _descriptor.Descriptor( - name='ReadRowsResponse', - full_name='google.bigtable.v2.ReadRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chunks', full_name='google.bigtable.v2.ReadRowsResponse.chunks', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='last_scanned_row_key', full_name='google.bigtable.v2.ReadRowsResponse.last_scanned_row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_READROWSRESPONSE_CELLCHUNK, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=325, - serialized_end=701, -) - - -_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name='SampleRowKeysRequest', - full_name='google.bigtable.v2.SampleRowKeysRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.SampleRowKeysRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=703, - serialized_end=745, -) - - -_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name='SampleRowKeysResponse', - full_name='google.bigtable.v2.SampleRowKeysResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.SampleRowKeysResponse.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='offset_bytes', full_name='google.bigtable.v2.SampleRowKeysResponse.offset_bytes', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=747, - serialized_end=809, -) - - -_MUTATEROWREQUEST = _descriptor.Descriptor( - name='MutateRowRequest', - full_name='google.bigtable.v2.MutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.MutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.MutateRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v2.MutateRowRequest.mutations', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=811, - serialized_end=915, -) - - -_MUTATEROWRESPONSE = _descriptor.Descriptor( - name='MutateRowResponse', - full_name='google.bigtable.v2.MutateRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=917, - serialized_end=936, -) - - -_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name='Entry', - full_name='google.bigtable.v2.MutateRowsRequest.Entry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.MutateRowsRequest.Entry.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v2.MutateRowsRequest.Entry.mutations', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1042, - serialized_end=1115, -) - -_MUTATEROWSREQUEST = _descriptor.Descriptor( - name='MutateRowsRequest', - full_name='google.bigtable.v2.MutateRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.MutateRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='entries', full_name='google.bigtable.v2.MutateRowsRequest.entries', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_MUTATEROWSREQUEST_ENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=939, - serialized_end=1115, -) - - -_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( - name='Entry', - full_name='google.bigtable.v2.MutateRowsResponse.Entry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='index', full_name='google.bigtable.v2.MutateRowsResponse.Entry.index', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='status', full_name='google.bigtable.v2.MutateRowsResponse.Entry.status', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1203, - serialized_end=1261, -) - -_MUTATEROWSRESPONSE = _descriptor.Descriptor( - name='MutateRowsResponse', - full_name='google.bigtable.v2.MutateRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='entries', full_name='google.bigtable.v2.MutateRowsResponse.entries', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_MUTATEROWSRESPONSE_ENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1118, - serialized_end=1261, -) - - -_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name='CheckAndMutateRowRequest', - full_name='google.bigtable.v2.CheckAndMutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.CheckAndMutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.CheckAndMutateRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='predicate_filter', full_name='google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter', index=2, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='true_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.true_mutations', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='false_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.false_mutations', index=4, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1264, - serialized_end=1493, -) - - -_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name='CheckAndMutateRowResponse', - full_name='google.bigtable.v2.CheckAndMutateRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='predicate_matched', full_name='google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1495, - serialized_end=1549, -) - - -_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name='ReadModifyWriteRowRequest', - full_name='google.bigtable.v2.ReadModifyWriteRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.rules', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1551, - serialized_end=1671, -) - - -_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( - name='ReadModifyWriteRowResponse', - full_name='google.bigtable.v2.ReadModifyWriteRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row', full_name='google.bigtable.v2.ReadModifyWriteRowResponse.row', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1673, - serialized_end=1739, -) - -_READROWSREQUEST.fields_by_name['rows'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWSET -_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER -_READROWSRESPONSE_CELLCHUNK.fields_by_name['family_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE -_READROWSRESPONSE_CELLCHUNK.fields_by_name['qualifier'].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE -_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row']) -_READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row']) -_READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] -_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CELLCHUNK -_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE_ENTRY.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS -_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE -_MUTATEROWSRESPONSE.fields_by_name['entries'].message_type = _MUTATEROWSRESPONSE_ENTRY -_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._READMODIFYWRITERULE -_READMODIFYWRITEROWRESPONSE.fields_by_name['row'].message_type = google_dot_bigtable_dot_v2_dot_data__pb2._ROW -DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['MutateRowResponse'] = _MUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST -DESCRIPTOR.message_types_by_name['ReadModifyWriteRowResponse'] = _READMODIFYWRITEROWRESPONSE - -ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( - DESCRIPTOR = _READROWSREQUEST, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) - )) -_sym_db.RegisterMessage(ReadRowsRequest) - -ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( - - CellChunk = _reflection.GeneratedProtocolMessageType('CellChunk', (_message.Message,), dict( - DESCRIPTOR = _READROWSRESPONSE_CELLCHUNK, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) - )) - , - DESCRIPTOR = _READROWSRESPONSE, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) - )) -_sym_db.RegisterMessage(ReadRowsResponse) -_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) - -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSREQUEST, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) - )) -_sym_db.RegisterMessage(SampleRowKeysRequest) - -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) - )) -_sym_db.RegisterMessage(SampleRowKeysResponse) - -MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWREQUEST, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) - )) -_sym_db.RegisterMessage(MutateRowRequest) - -MutateRowResponse = _reflection.GeneratedProtocolMessageType('MutateRowResponse', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWRESPONSE, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) - )) -_sym_db.RegisterMessage(MutateRowResponse) - -MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( - - Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) - )) - , - DESCRIPTOR = _MUTATEROWSREQUEST, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) - )) -_sym_db.RegisterMessage(MutateRowsRequest) -_sym_db.RegisterMessage(MutateRowsRequest.Entry) - -MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( - - Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSRESPONSE_ENTRY, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) - )) - , - DESCRIPTOR = _MUTATEROWSRESPONSE, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) - )) -_sym_db.RegisterMessage(MutateRowsResponse) -_sym_db.RegisterMessage(MutateRowsResponse.Entry) - -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) - )) -_sym_db.RegisterMessage(CheckAndMutateRowRequest) - -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) - )) -_sym_db.RegisterMessage(CheckAndMutateRowResponse) - -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITEROWREQUEST, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) - )) -_sym_db.RegisterMessage(ReadModifyWriteRowRequest) - -ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowResponse', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITEROWRESPONSE, - __module__ = 'google.bigtable.v2.bigtable_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) - )) -_sym_db.RegisterMessage(ReadModifyWriteRowResponse) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ReadRows = channel.unary_stream( - '/google.bigtable.v2.Bigtable/ReadRows', - request_serializer=ReadRowsRequest.SerializeToString, - response_deserializer=ReadRowsResponse.FromString, - ) - self.SampleRowKeys = channel.unary_stream( - '/google.bigtable.v2.Bigtable/SampleRowKeys', - request_serializer=SampleRowKeysRequest.SerializeToString, - response_deserializer=SampleRowKeysResponse.FromString, - ) - self.MutateRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/MutateRow', - request_serializer=MutateRowRequest.SerializeToString, - response_deserializer=MutateRowResponse.FromString, - ) - self.MutateRows = channel.unary_stream( - '/google.bigtable.v2.Bigtable/MutateRows', - request_serializer=MutateRowsRequest.SerializeToString, - response_deserializer=MutateRowsResponse.FromString, - ) - self.CheckAndMutateRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/CheckAndMutateRow', - request_serializer=CheckAndMutateRowRequest.SerializeToString, - response_deserializer=CheckAndMutateRowResponse.FromString, - ) - self.ReadModifyWriteRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', - request_serializer=ReadModifyWriteRowRequest.SerializeToString, - response_deserializer=ReadModifyWriteRowResponse.FromString, - ) - - -class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def ReadRows(self, request, context): - """Streams back the contents of all requested rows, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically. The method reads the latest existing timestamp - and value from the specified columns and writes a new entry based on - pre-defined read/modify/write rules. The new value for the timestamp is the - greater of the existing timestamp or the current server time. The method - returns the new contents of all modified cells. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_BigtableServicer_to_server(servicer, server): - rpc_method_handlers = { - 'ReadRows': grpc.unary_stream_rpc_method_handler( - servicer.ReadRows, - request_deserializer=ReadRowsRequest.FromString, - response_serializer=ReadRowsResponse.SerializeToString, - ), - 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( - servicer.SampleRowKeys, - request_deserializer=SampleRowKeysRequest.FromString, - response_serializer=SampleRowKeysResponse.SerializeToString, - ), - 'MutateRow': grpc.unary_unary_rpc_method_handler( - servicer.MutateRow, - request_deserializer=MutateRowRequest.FromString, - response_serializer=MutateRowResponse.SerializeToString, - ), - 'MutateRows': grpc.unary_stream_rpc_method_handler( - servicer.MutateRows, - request_deserializer=MutateRowsRequest.FromString, - response_serializer=MutateRowsResponse.SerializeToString, - ), - 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( - servicer.CheckAndMutateRow, - request_deserializer=CheckAndMutateRowRequest.FromString, - response_serializer=CheckAndMutateRowResponse.SerializeToString, - ), - 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( - servicer.ReadModifyWriteRow, - request_deserializer=ReadModifyWriteRowRequest.FromString, - response_serializer=ReadModifyWriteRowResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.bigtable.v2.Bigtable', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaBigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables. - """ - def ReadRows(self, request, context): - """Streams back the contents of all requested rows, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically. The method reads the latest existing timestamp - and value from the specified columns and writes a new entry based on - pre-defined read/modify/write rules. The new value for the timestamp is the - greater of the existing timestamp or the current server time. The method - returns the new contents of all modified cells. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaBigtableStub(object): - """Service for reading from and writing to existing Bigtable tables. - """ - def ReadRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Streams back the contents of all requested rows, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - raise NotImplementedError() - def SampleRowKeys(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - raise NotImplementedError() - def MutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - raise NotImplementedError() - MutateRow.future = None - def MutateRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - raise NotImplementedError() - def CheckAndMutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - raise NotImplementedError() - CheckAndMutateRow.future = None - def ReadModifyWriteRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Modifies a row atomically. The method reads the latest existing timestamp - and value from the specified columns and writes a new entry based on - pre-defined read/modify/write rules. The new value for the timestamp is the - greater of the existing timestamp or the current server time. The method - returns the new contents of all modified cells. - """ - raise NotImplementedError() - ReadModifyWriteRow.future = None - - -def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.FromString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.FromString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.FromString, - } - response_serializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.SerializeToString, - } - method_implementations = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), - ('google.bigtable.v2.Bigtable', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), - ('google.bigtable.v2.Bigtable', 'MutateRows'): face_utilities.unary_stream_inline(servicer.MutateRows), - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), - ('google.bigtable.v2.Bigtable', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.SerializeToString, - } - response_deserializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.FromString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.FromString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.FromString, - } - cardinalities = { - 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRows': cardinality.Cardinality.UNARY_STREAM, - 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, - 'ReadRows': cardinality.Cardinality.UNARY_STREAM, - 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.v2.Bigtable', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py deleted file mode 100644 index 9cafeed3e426..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/bigtable_table_admin_pb2.py +++ /dev/null @@ -1,784 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/v2/bigtable_table_admin.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable._generated import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/v2/bigtable_table_admin.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n3google/bigtable/admin/v2/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a$google/bigtable/admin/v2/table.proto\x1a\x1bgoogle/protobuf/empty.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"k\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod2\xb8\x07\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*B9\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01\x62\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( - name='Split', - full_name='google.bigtable.admin.v2.CreateTableRequest.Split', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.CreateTableRequest.Split.key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=359, - serialized_end=379, -) - -_CREATETABLEREQUEST = _descriptor.Descriptor( - name='CreateTableRequest', - full_name='google.bigtable.admin.v2.CreateTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateTableRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='table_id', full_name='google.bigtable.admin.v2.CreateTableRequest.table_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='table', full_name='google.bigtable.admin.v2.CreateTableRequest.table', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='initial_splits', full_name='google.bigtable.admin.v2.CreateTableRequest.initial_splits', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_CREATETABLEREQUEST_SPLIT, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=179, - serialized_end=379, -) - - -_DROPROWRANGEREQUEST = _descriptor.Descriptor( - name='DropRowRangeRequest', - full_name='google.bigtable.admin.v2.DropRowRangeRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DropRowRangeRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key_prefix', full_name='google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete_all_data_from_table', full_name='google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target', - index=0, containing_type=None, fields=[]), - ], - serialized_start=381, - serialized_end=490, -) - - -_LISTTABLESREQUEST = _descriptor.Descriptor( - name='ListTablesRequest', - full_name='google.bigtable.admin.v2.ListTablesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListTablesRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='view', full_name='google.bigtable.admin.v2.ListTablesRequest.view', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=492, - serialized_end=599, -) - - -_LISTTABLESRESPONSE = _descriptor.Descriptor( - name='ListTablesResponse', - full_name='google.bigtable.admin.v2.ListTablesResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tables', full_name='google.bigtable.admin.v2.ListTablesResponse.tables', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListTablesResponse.next_page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=601, - serialized_end=695, -) - - -_GETTABLEREQUEST = _descriptor.Descriptor( - name='GetTableRequest', - full_name='google.bigtable.admin.v2.GetTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='view', full_name='google.bigtable.admin.v2.GetTableRequest.view', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=697, - serialized_end=780, -) - - -_DELETETABLEREQUEST = _descriptor.Descriptor( - name='DeleteTableRequest', - full_name='google.bigtable.admin.v2.DeleteTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=782, - serialized_end=816, -) - - -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( - name='Modification', - full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='id', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='create', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='update', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='drop', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', - index=0, containing_type=None, fields=[]), - ], - serialized_start=956, - serialized_end=1121, -) - -_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( - name='ModifyColumnFamiliesRequest', - full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='modifications', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=819, - serialized_end=1121, -) - -_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST -_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE -_CREATETABLEREQUEST.fields_by_name['initial_splits'].message_type = _CREATETABLEREQUEST_SPLIT -_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( - _DROPROWRANGEREQUEST.fields_by_name['row_key_prefix']) -_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] -_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( - _DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table']) -_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] -_LISTTABLESREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW -_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE -_GETTABLEREQUEST.fields_by_name['view'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._TABLE_VIEW -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].message_type = google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2._COLUMNFAMILY -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create']) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update']) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop']) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name['modifications'].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION -DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name['DropRowRangeRequest'] = _DROPROWRANGEREQUEST -DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name['ModifyColumnFamiliesRequest'] = _MODIFYCOLUMNFAMILIESREQUEST - -CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( - - Split = _reflection.GeneratedProtocolMessageType('Split', (_message.Message,), dict( - DESCRIPTOR = _CREATETABLEREQUEST_SPLIT, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) - )) - , - DESCRIPTOR = _CREATETABLEREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) - )) -_sym_db.RegisterMessage(CreateTableRequest) -_sym_db.RegisterMessage(CreateTableRequest.Split) - -DropRowRangeRequest = _reflection.GeneratedProtocolMessageType('DropRowRangeRequest', (_message.Message,), dict( - DESCRIPTOR = _DROPROWRANGEREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) - )) -_sym_db.RegisterMessage(DropRowRangeRequest) - -ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) - )) -_sym_db.RegisterMessage(ListTablesRequest) - -ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESRESPONSE, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) - )) -_sym_db.RegisterMessage(ListTablesResponse) - -GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( - DESCRIPTOR = _GETTABLEREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) - )) -_sym_db.RegisterMessage(GetTableRequest) - -DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETETABLEREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) - )) -_sym_db.RegisterMessage(DeleteTableRequest) - -ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType('ModifyColumnFamiliesRequest', (_message.Message,), dict( - - Modification = _reflection.GeneratedProtocolMessageType('Modification', (_message.Message,), dict( - DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) - )) - , - DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST, - __module__ = 'google.bigtable.admin.v2.bigtable_table_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) - )) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001')) -import grpc -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class BigtableTableAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateTable = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', - request_serializer=CreateTableRequest.SerializeToString, - response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, - ) - self.ListTables = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', - request_serializer=ListTablesRequest.SerializeToString, - response_deserializer=ListTablesResponse.FromString, - ) - self.GetTable = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', - request_serializer=GetTableRequest.SerializeToString, - response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, - ) - self.DeleteTable = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', - request_serializer=DeleteTableRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ModifyColumnFamilies = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', - request_serializer=ModifyColumnFamiliesRequest.SerializeToString, - response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, - ) - self.DropRowRange = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', - request_serializer=DropRowRangeRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class BigtableTableAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def CreateTable(self, request, context): - """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListTables(self, request, context): - """Lists all tables served from a specified instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetTable(self, request, context): - """Gets metadata information about the specified table. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ModifyColumnFamilies(self, request, context): - """Atomically performs a series of column family modifications - on the specified table. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DropRowRange(self, request, context): - """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_BigtableTableAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CreateTable': grpc.unary_unary_rpc_method_handler( - servicer.CreateTable, - request_deserializer=CreateTableRequest.FromString, - response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, - ), - 'ListTables': grpc.unary_unary_rpc_method_handler( - servicer.ListTables, - request_deserializer=ListTablesRequest.FromString, - response_serializer=ListTablesResponse.SerializeToString, - ), - 'GetTable': grpc.unary_unary_rpc_method_handler( - servicer.GetTable, - request_deserializer=GetTableRequest.FromString, - response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, - ), - 'DeleteTable': grpc.unary_unary_rpc_method_handler( - servicer.DeleteTable, - request_deserializer=DeleteTableRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler( - servicer.ModifyColumnFamilies, - request_deserializer=ModifyColumnFamiliesRequest.FromString, - response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, - ), - 'DropRowRange': grpc.unary_unary_rpc_method_handler( - servicer.DropRowRange, - request_deserializer=DropRowRangeRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaBigtableTableAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - Provides access to the table schemas only, not the data stored within - the tables. - """ - def CreateTable(self, request, context): - """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListTables(self, request, context): - """Lists all tables served from a specified instance. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def GetTable(self, request, context): - """Gets metadata information about the specified table. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ModifyColumnFamilies(self, request, context): - """Atomically performs a series of column family modifications - on the specified table. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def DropRowRange(self, request, context): - """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaBigtableTableAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - Provides access to the table schemas only, not the data stored within - the tables. - """ - def CreateTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ - raise NotImplementedError() - CreateTable.future = None - def ListTables(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Lists all tables served from a specified instance. - """ - raise NotImplementedError() - ListTables.future = None - def GetTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Gets metadata information about the specified table. - """ - raise NotImplementedError() - GetTable.future = None - def DeleteTable(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Permanently deletes a specified table and all of its data. - """ - raise NotImplementedError() - DeleteTable.future = None - def ModifyColumnFamilies(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Atomically performs a series of column family modifications - on the specified table. - """ - raise NotImplementedError() - ModifyColumnFamilies.future = None - def DropRowRange(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ - raise NotImplementedError() - DropRowRange.future = None - - -def beta_create_BigtableTableAdmin_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.FromString, - } - response_serializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString, - } - method_implementations = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable), - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): face_utilities.unary_unary_inline(servicer.DropRowRange), - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable), - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables), - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): face_utilities.unary_unary_inline(servicer.ModifyColumnFamilies), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_BigtableTableAdmin_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): CreateTableRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): DeleteTableRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): DropRowRangeRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): GetTableRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesRequest.SerializeToString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): ModifyColumnFamiliesRequest.SerializeToString, - } - response_deserializers = { - ('google.bigtable.admin.v2.BigtableTableAdmin', 'CreateTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DeleteTable'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'DropRowRange'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'GetTable'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ListTables'): ListTablesResponse.FromString, - ('google.bigtable.admin.v2.BigtableTableAdmin', 'ModifyColumnFamilies'): google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString, - } - cardinalities = { - 'CreateTable': cardinality.Cardinality.UNARY_UNARY, - 'DeleteTable': cardinality.Cardinality.UNARY_UNARY, - 'DropRowRange': cardinality.Cardinality.UNARY_UNARY, - 'GetTable': cardinality.Cardinality.UNARY_UNARY, - 'ListTables': cardinality.Cardinality.UNARY_UNARY, - 'ModifyColumnFamilies': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.v2.BigtableTableAdmin', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py deleted file mode 100644 index 8d4383d31e79..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/instance_pb2.py +++ /dev/null @@ -1,222 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/v2/instance.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable._generated import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/v2/instance.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n\'google/bigtable/admin/v2/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a%google/bigtable/admin/v2/common.proto\"\x9e\x01\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType\"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04\x42/\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01\x62\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_INSTANCE_STATE = _descriptor.EnumDescriptor( - name='State', - full_name='google.bigtable.admin.v2.Instance.State', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STATE_NOT_KNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CREATING', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=244, - serialized_end=297, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) - -_CLUSTER_STATE = _descriptor.EnumDescriptor( - name='State', - full_name='google.bigtable.admin.v2.Cluster.State', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STATE_NOT_KNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CREATING', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RESIZING', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DISABLED', index=4, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=489, - serialized_end=570, -) -_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) - - -_INSTANCE = _descriptor.Descriptor( - name='Instance', - full_name='google.bigtable.admin.v2.Instance', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.Instance.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='display_name', full_name='google.bigtable.admin.v2.Instance.display_name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='state', full_name='google.bigtable.admin.v2.Instance.state', index=2, - number=3, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _INSTANCE_STATE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=139, - serialized_end=297, -) - - -_CLUSTER = _descriptor.Descriptor( - name='Cluster', - full_name='google.bigtable.admin.v2.Cluster', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.Cluster.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='location', full_name='google.bigtable.admin.v2.Cluster.location', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='state', full_name='google.bigtable.admin.v2.Cluster.state', index=2, - number=3, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='serve_nodes', full_name='google.bigtable.admin.v2.Cluster.serve_nodes', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='default_storage_type', full_name='google.bigtable.admin.v2.Cluster.default_storage_type', index=4, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CLUSTER_STATE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=300, - serialized_end=570, -) - -_INSTANCE.fields_by_name['state'].enum_type = _INSTANCE_STATE -_INSTANCE_STATE.containing_type = _INSTANCE -_CLUSTER.fields_by_name['state'].enum_type = _CLUSTER_STATE -_CLUSTER.fields_by_name['default_storage_type'].enum_type = google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2._STORAGETYPE -_CLUSTER_STATE.containing_type = _CLUSTER -DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE -DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER - -Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict( - DESCRIPTOR = _INSTANCE, - __module__ = 'google.bigtable.admin.v2.instance_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) - )) -_sym_db.RegisterMessage(Instance) - -Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict( - DESCRIPTOR = _CLUSTER, - __module__ = 'google.bigtable.admin.v2.instance_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) - )) -_sym_db.RegisterMessage(Cluster) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001')) -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py deleted file mode 100644 index 840076514cc7..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/table_pb2.py +++ /dev/null @@ -1,393 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/v2/table.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/v2/table.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n$google/bigtable/admin/v2/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xa0\x03\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"F\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04ruleB,\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01\x62\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name='TimestampGranularity', - full_name='google.bigtable.admin.v2.Table.TimestampGranularity', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='TIMESTAMP_GRANULARITY_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MILLIS', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=400, - serialized_end=473, -) -_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) - -_TABLE_VIEW = _descriptor.EnumDescriptor( - name='View', - full_name='google.bigtable.admin.v2.Table.View', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='VIEW_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NAME_ONLY', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SCHEMA_VIEW', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FULL', index=3, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=475, - serialized_end=545, -) -_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) - - -_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name='ColumnFamiliesEntry', - full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=305, - serialized_end=398, -) - -_TABLE = _descriptor.Descriptor( - name='Table', - full_name='google.bigtable.admin.v2.Table', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.Table.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=1, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=2, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_TABLE_COLUMNFAMILIESENTRY, ], - enum_types=[ - _TABLE_TIMESTAMPGRANULARITY, - _TABLE_VIEW, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=129, - serialized_end=545, -) - - -_COLUMNFAMILY = _descriptor.Descriptor( - name='ColumnFamily', - full_name='google.bigtable.admin.v2.ColumnFamily', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='gc_rule', full_name='google.bigtable.admin.v2.ColumnFamily.gc_rule', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=547, - serialized_end=612, -) - - -_GCRULE_INTERSECTION = _descriptor.Descriptor( - name='Intersection', - full_name='google.bigtable.admin.v2.GcRule.Intersection', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.v2.GcRule.Intersection.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=827, - serialized_end=890, -) - -_GCRULE_UNION = _descriptor.Descriptor( - name='Union', - full_name='google.bigtable.admin.v2.GcRule.Union', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.v2.GcRule.Union.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=892, - serialized_end=948, -) - -_GCRULE = _descriptor.Descriptor( - name='GcRule', - full_name='google.bigtable.admin.v2.GcRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='max_num_versions', full_name='google.bigtable.admin.v2.GcRule.max_num_versions', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_age', full_name='google.bigtable.admin.v2.GcRule.max_age', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='intersection', full_name='google.bigtable.admin.v2.GcRule.intersection', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='union', full_name='google.bigtable.admin.v2.GcRule.union', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='rule', full_name='google.bigtable.admin.v2.GcRule.rule', - index=0, containing_type=None, fields=[]), - ], - serialized_start=615, - serialized_end=956, -) - -_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY -_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY -_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE -_TABLE_VIEW.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE -_GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE -_GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_num_versions']) -_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_age']) -_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['intersection']) -_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['union']) -_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -DESCRIPTOR.message_types_by_name['Table'] = _TABLE -DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE - -Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( - - ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( - DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) - )) - , - DESCRIPTOR = _TABLE, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) - )) -_sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) - -ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( - DESCRIPTOR = _COLUMNFAMILY, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) - )) -_sym_db.RegisterMessage(ColumnFamily) - -GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( - - Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_INTERSECTION, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) - )) - , - - Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_UNION, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) - )) - , - DESCRIPTOR = _GCRULE, - __module__ = 'google.bigtable.admin.v2.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) - )) -_sym_db.RegisterMessage(GcRule) -_sym_db.RegisterMessage(GcRule.Intersection) -_sym_db.RegisterMessage(GcRule.Union) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001')) -_TABLE_COLUMNFAMILIESENTRY.has_options = True -_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 454cb1816e55..0e6ed808e102 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -29,36 +29,12 @@ """ -import os - -from google.api_core import gapic_v1 -from google.longrunning import operations_grpc - -from google.cloud._helpers import make_insecure_stub -from google.cloud._helpers import make_secure_stub -from google.cloud._http import DEFAULT_USER_AGENT -from google.cloud.client import ClientWithProject -from google.cloud.environment_vars import BIGTABLE_EMULATOR - -from google.cloud.bigtable import __version__ -from google.cloud.bigtable._generated import bigtable_instance_admin_pb2 -from google.cloud.bigtable._generated import bigtable_pb2 -from google.cloud.bigtable._generated import bigtable_table_admin_pb2 -from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID +from google.cloud import bigtable_v2 +from google.cloud import bigtable_admin_v2 -TABLE_ADMIN_HOST = 'bigtableadmin.googleapis.com' -"""Table Admin API request host.""" - -INSTANCE_ADMIN_HOST = 'bigtableadmin.googleapis.com' -"""Cluster Admin API request host.""" - -DATA_API_HOST = 'bigtable.googleapis.com' -"""Data API request host.""" - -OPERATIONS_API_HOST = INSTANCE_ADMIN_HOST ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' """Scope for interacting with the Cluster Admin and Table Admin APIs.""" @@ -67,99 +43,8 @@ READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly' """Scope for reading table data.""" -_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - client_library_version=__version__,) -_GRPC_EXTRA_OPTIONS = (_CLIENT_INFO.to_grpc_metadata(),) -# NOTE: 'grpc.max_message_length' will no longer be recognized in -# grpcio 1.1 and later. -_MAX_MSG_LENGTH_100MB = 100 * 1024 * 1024 -_GRPC_MAX_LENGTH_OPTIONS = _GRPC_EXTRA_OPTIONS + ( - ('grpc.max_message_length', _MAX_MSG_LENGTH_100MB), - ('grpc.max_receive_message_length', _MAX_MSG_LENGTH_100MB), -) - - -def _make_data_stub(client): - """Creates gRPC stub to make requests to the Data API. - - :type client: :class:`Client` - :param client: The client that will hold the stub. - :rtype: :class:`._generated.bigtable_pb2.BigtableStub` - :returns: A gRPC stub object. - """ - if client.emulator_host is None: - return make_secure_stub(client.credentials, client.user_agent, - bigtable_pb2.BigtableStub, DATA_API_HOST, - extra_options=_GRPC_MAX_LENGTH_OPTIONS) - else: - return make_insecure_stub(bigtable_pb2.BigtableStub, - client.emulator_host) - - -def _make_instance_stub(client): - """Creates gRPC stub to make requests to the Instance Admin API. - - :type client: :class:`Client` - :param client: The client that will hold the stub. - - :rtype: :class:`.bigtable_instance_admin_pb2.BigtableInstanceAdminStub` - :returns: A gRPC stub object. - """ - if client.emulator_host is None: - return make_secure_stub( - client.credentials, client.user_agent, - bigtable_instance_admin_pb2.BigtableInstanceAdminStub, - INSTANCE_ADMIN_HOST, extra_options=_GRPC_EXTRA_OPTIONS) - else: - return make_insecure_stub( - bigtable_instance_admin_pb2.BigtableInstanceAdminStub, - client.emulator_host) - - -def _make_operations_stub(client): - """Creates gRPC stub to make requests to the Operations API. - - These are for long-running operations of the Instance Admin API, - hence the host and port matching. - - :type client: :class:`Client` - :param client: The client that will hold the stub. - - :rtype: :class:`google.longrunning.operations_grpc.OperationsStub` - :returns: A gRPC stub object. - """ - if client.emulator_host is None: - return make_secure_stub( - client.credentials, client.user_agent, - operations_grpc.OperationsStub, - OPERATIONS_API_HOST, extra_options=_GRPC_EXTRA_OPTIONS) - else: - return make_insecure_stub(operations_grpc.OperationsStub, - client.emulator_host) - - -def _make_table_stub(client): - """Creates gRPC stub to make requests to the Table Admin API. - - :type client: :class:`Client` - :param client: The client that will hold the stub. - - :rtype: :class:`.bigtable_instance_admin_pb2.BigtableTableAdminStub` - :returns: A gRPC stub object. - """ - if client.emulator_host is None: - return make_secure_stub( - client.credentials, client.user_agent, - bigtable_table_admin_pb2.BigtableTableAdminStub, - TABLE_ADMIN_HOST, extra_options=_GRPC_EXTRA_OPTIONS) - else: - return make_insecure_stub( - bigtable_table_admin_pb2.BigtableTableAdminStub, - client.emulator_host) - - -class Client(ClientWithProject): +class Client(object): """Client for interacting with Google Cloud Bigtable API. .. note:: @@ -187,45 +72,31 @@ class Client(ClientWithProject): interact with the Instance Admin or Table Admin APIs. This requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. - :type user_agent: str - :param user_agent: (Optional) The user agent to be used with API request. - Defaults to :const:`DEFAULT_USER_AGENT`. + :type channel: :instance: grpc.Channel + :param channel (grpc.Channel): (Optional) A ``Channel`` instance + through which to make calls. This argument is mutually + exclusive with ``credentials``; providing both will raise an + exception. :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` """ - _instance_stub_internal = None - _operations_stub_internal = None - _table_stub_internal = None - _SET_PROJECT = True # Used by from_service_account_json() - def __init__(self, project=None, credentials=None, - read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT): + read_only=False, admin=False, channel=None): if read_only and admin: raise ValueError('A read-only client cannot also perform' 'administrative actions.') # NOTE: We set the scopes **before** calling the parent constructor. # It **may** use those scopes in ``with_scopes_if_required``. + self.project = project self._read_only = bool(read_only) self._admin = bool(admin) + self._channel = channel + self._credentials = credentials self.SCOPE = self._get_scopes() - - # NOTE: This API has no use for the _http argument, but sending it - # will have no impact since the _http() @property only lazily - # creates a working HTTP object. - super(Client, self).__init__( - project=project, credentials=credentials, _http=None) - self.user_agent = user_agent - self.emulator_host = os.getenv(BIGTABLE_EMULATOR) - - # Create gRPC stubs for making requests. - self._data_stub = _make_data_stub(self) - if self._admin: - self._instance_stub_internal = _make_instance_stub(self) - self._operations_stub_internal = _make_operations_stub(self) - self._table_stub_internal = _make_table_stub(self) + super(Client, self).__init__() def _get_scopes(self): """Get the scopes corresponding to admin / read-only state. @@ -243,35 +114,8 @@ def _get_scopes(self): return scopes - def copy(self): - """Make a copy of this client. - - Copies the local data stored as simple types but does not copy the - current state of any open connections with the Cloud Bigtable API. - - :rtype: :class:`.Client` - :returns: A copy of the current client. - """ - return self.__class__( - self.project, - self._credentials, - self._read_only, - self._admin, - self.user_agent, - ) - - @property - def credentials(self): - """Getter for client's credentials. - - :rtype: - :class:`OAuth2Credentials ` - :returns: The credentials stored on the client. - """ - return self._credentials - @property - def project_name(self): + def project_path(self): """Project name to be used with Instance Admin API. .. note:: @@ -284,55 +128,53 @@ def project_name(self): ``"projects/{project}"`` :rtype: str - :returns: The project name to be used with the Cloud Bigtable Admin - API RPC service. + :returns: Return a fully-qualified project string. """ - return 'projects/' + self.project + instance_client = self._instance_admin_client + return instance_client.project_path(self.project) @property - def _instance_stub(self): - """Getter for the gRPC stub used for the Instance Admin API. + def _table_data_client(self): + """Getter for the gRPC stub used for the Table Admin API. - :rtype: :class:`.bigtable_instance_admin_pb2.BigtableInstanceAdminStub` - :returns: A gRPC stub object. - :raises: :class:`ValueError ` if the current - client is not an admin client or if it has not been - :meth:`start`-ed. + :rtype: :class:`.bigtable_v2.BigtableClient` + :returns: A BigtableClient object. """ - if not self._admin: - raise ValueError('Client is not an admin client.') - return self._instance_stub_internal + return bigtable_v2.BigtableClient(channel=self._channel, + credentials=self._credentials) @property - def _operations_stub(self): - """Getter for the gRPC stub used for the Operations API. + def _table_admin_client(self): + """Getter for the gRPC stub used for the Table Admin API. - :rtype: :class:`google.longrunning.operations_grpc.OperationsStub` - :returns: A gRPC stub object. + :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` + :returns: A BigtableTableAdmin instance. :raises: :class:`ValueError ` if the current client is not an admin client or if it has not been :meth:`start`-ed. """ if not self._admin: raise ValueError('Client is not an admin client.') - return self._operations_stub_internal + return bigtable_admin_v2.BigtableTableAdminClient( + channel=self._channel, credentials=self._credentials) @property - def _table_stub(self): + def _instance_admin_client(self): """Getter for the gRPC stub used for the Table Admin API. - :rtype: :class:`.bigtable_instance_admin_pb2.BigtableTableAdminStub` - :returns: A gRPC stub object. + :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` + :returns: A BigtableInstanceAdmin instance. :raises: :class:`ValueError ` if the current client is not an admin client or if it has not been :meth:`start`-ed. """ if not self._admin: raise ValueError('Client is not an admin client.') - return self._table_stub_internal + return bigtable_admin_v2.BigtableInstanceAdminClient( + channel=self._channel, credentials=self._credentials) def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, - display_name=None, serve_nodes=DEFAULT_SERVE_NODES): + display_name=None): """Factory to create a instance associated with this client. :type instance_id: str @@ -349,30 +191,15 @@ def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, characters.) If this value is not set in the constructor, will fall back to the instance ID. - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the instance's - cluster; used to set up the instance's cluster. - :rtype: :class:`~google.cloud.bigtable.instance.Instance` :returns: an instance owned by this client. """ - return Instance(instance_id, self, location, - display_name=display_name, serve_nodes=serve_nodes) + return Instance(instance_id, self, location, display_name=display_name) def list_instances(self): """List instances owned by the project. - :rtype: tuple - :returns: A pair of results, the first is a list of - :class:`~google.cloud.bigtable.instance.Instance` objects - returned and the second is a list of strings (the failed - locations in the request). + :rtype: :class:`~google.api_core.page_iterator.Iterator` + :returns: A list of Instance. """ - request_pb = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=self.project_name) - - response = self._instance_stub.ListInstances(request_pb) - - instances = [Instance.from_pb(instance_pb, self) - for instance_pb in response.instances] - return instances, response.failed_locations + return self._instance_admin_client.list_instances(self.project_path) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 97b524ec5dc5..43c200d02512 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -17,11 +17,6 @@ import re -from google.api_core import operation -from google.cloud.bigtable._generated import ( - instance_pb2 as data_v2_pb2) -from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[^/]+)/clusters/' @@ -31,25 +26,6 @@ """Default number of nodes to use when creating a cluster.""" -def _prepare_create_request(cluster): - """Creates a protobuf request for a CreateCluster request. - - :type cluster: :class:`Cluster` - :param cluster: The cluster to be created. - - :rtype: :class:`.messages_v2_pb2.CreateClusterRequest` - :returns: The CreateCluster request object containing the cluster info. - """ - return messages_v2_pb2.CreateClusterRequest( - parent=cluster._instance.name, - cluster_id=cluster.cluster_id, - cluster=data_v2_pb2.Cluster( - location=cluster.location, - serve_nodes=cluster.serve_nodes, - ), - ) - - class Cluster(object): """Representation of a Google Cloud Bigtable Cluster. @@ -60,11 +36,6 @@ class Cluster(object): * :meth:`update` itself * :meth:`delete` itself - .. note:: - - For now, we leave out the ``default_storage_type`` (an enum) - which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. - :type cluster_id: str :param cluster_id: The ID of the cluster. @@ -83,63 +54,6 @@ def __init__(self, cluster_id, instance, self.serve_nodes = serve_nodes self.location = None - def _update_from_pb(self, cluster_pb): - """Refresh self from the server-provided protobuf. - - Helper for :meth:`from_pb` and :meth:`reload`. - """ - if not cluster_pb.serve_nodes: # Simple field (int32) - raise ValueError('Cluster protobuf does not contain serve_nodes') - self.serve_nodes = cluster_pb.serve_nodes - self.location = cluster_pb.location - - @classmethod - def from_pb(cls, cluster_pb, instance): - """Creates a cluster instance from a protobuf. - - :type cluster_pb: :class:`instance_pb2.Cluster` - :param cluster_pb: A cluster protobuf object. - - :type instance: :class:`~google.cloud.bigtable.instance.Instance>` - :param instance: The instance that owns the cluster. - - :rtype: :class:`Cluster` - :returns: The cluster parsed from the protobuf response. - :raises: - :class:`ValueError ` if the cluster - name does not match - ``projects/{project}/instances/{instance}/clusters/{cluster_id}`` - or if the parsed project ID does not match the project ID - on the client. - """ - match = _CLUSTER_NAME_RE.match(cluster_pb.name) - if match is None: - raise ValueError('Cluster protobuf name was not in the ' - 'expected format.', cluster_pb.name) - if match.group('project') != instance._client.project: - raise ValueError('Project ID on cluster does not match the ' - 'project ID on the client') - if match.group('instance') != instance.instance_id: - raise ValueError('Instance ID on cluster does not match the ' - 'instance ID on the client') - - result = cls(match.group('cluster_id'), instance) - result._update_from_pb(cluster_pb) - return result - - def copy(self): - """Make a copy of this cluster. - - Copies the local data stored as simple types and copies the client - attached to this instance. - - :rtype: :class:`.Cluster` - :returns: A copy of the current cluster. - """ - new_instance = self._instance.copy() - return self.__class__(self.cluster_id, new_instance, - serve_nodes=self.serve_nodes) - @property def name(self): """Cluster name used in requests. @@ -155,7 +69,9 @@ def name(self): :rtype: str :returns: The cluster name. """ - return self._instance.name + '/clusters/' + self.cluster_id + return self._instance._client._instance_admin_client.cluster_path( + self._instance._client.project, self._instance.instance_id, + self.cluster_id) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -174,14 +90,7 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this cluster.""" - request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) - # We expect a `._generated.instance_pb2.Cluster`. - cluster_pb = self._instance._client._instance_stub.GetCluster( - request_pb) - - # NOTE: _update_from_pb does not check that the project, instance and - # cluster ID on the response match the request. - self._update_from_pb(cluster_pb) + self._instance._client._instance_admin_client.get_cluster(self.name) def create(self): """Create this cluster. @@ -199,24 +108,15 @@ def create(self): before calling :meth:`create`. - :rtype: :class:`Operation` + :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create operation. """ client = self._instance._client + return client._instance_admin_client.create_cluster( + self._instance.name, self.cluster_id, {}) - # We expect a `google.longrunning.operations_pb2.Operation`. - request_pb = _prepare_create_request(self) - operation_pb = client._instance_stub.CreateCluster(request_pb) - - operation_future = operation.from_grpc( - operation_pb, - client._operations_stub, - data_v2_pb2.Cluster, - metadata_type=messages_v2_pb2.UpdateClusterMetadata) - return operation_future - - def update(self): + def update(self, location='', serve_nodes=0): """Update this cluster. .. note:: @@ -230,25 +130,25 @@ def update(self): before calling :meth:`update`. + :type location: :str:``CreationOnly`` + :param location: The location where this cluster's nodes and storage + reside. For best performance, clients should be located as + close as possible to this cluster. Currently only zones are + supported, so values should be of the form + ``projects//locations/``. + + :type serve_nodes: :int + :param serve_nodes: The number of nodes allocated to this cluster. + More nodes enable higher throughput and more consistent + performance. + :rtype: :class:`Operation` :returns: The long-running operation corresponding to the update operation. """ client = self._instance._client - - # We expect a `google.longrunning.operations_pb2.Operation`. - request_pb = data_v2_pb2.Cluster( - name=self.name, - serve_nodes=self.serve_nodes, - ) - operation_pb = client._instance_stub.UpdateCluster(request_pb) - - operation_future = operation.from_grpc( - operation_pb, - client._operations_stub, - data_v2_pb2.Cluster, - metadata_type=messages_v2_pb2.UpdateClusterMetadata) - return operation_future + return client._instance_admin_client.update_cluster( + self.name, location, serve_nodes) def delete(self): """Delete this cluster. @@ -270,6 +170,5 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name) - # We expect a `google.protobuf.empty_pb2.Empty` - self._instance._client._instance_stub.DeleteCluster(request_pb) + client = self._instance._client + client._instance_admin_client.delete_cluster(self.name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index 4a67313ae4c5..3e2f1cae818e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -16,9 +16,9 @@ from google.cloud import _helpers -from google.cloud.bigtable._generated import ( +from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_v2_pb2) -from google.cloud.bigtable._generated import ( +from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) @@ -221,17 +221,15 @@ def to_pb(self): def create(self): """Create this column family.""" column_family = self.to_pb() - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( - name=self._table.name) - request_pb.modifications.add( - id=self.column_family_id, - create=column_family, - ) + modification = ( + table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=self.column_family_id, create=column_family)) + client = self._table._instance._client - # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_stub.ModifyColumnFamilies(request_pb) + client._table_admin_client.modify_column_families( + self._table.name, [modification]) def update(self): """Update this column family. @@ -242,27 +240,27 @@ def update(self): you will simply be referring to a different column family. """ column_family = self.to_pb() - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( - name=self._table.name) - request_pb.modifications.add( - id=self.column_family_id, - update=column_family) + modification = ( + table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=self.column_family_id, update=column_family)) + client = self._table._instance._client - # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_stub.ModifyColumnFamilies(request_pb) + client._table_admin_client.modify_column_families( + self._table.name, [modification]) def delete(self): """Delete this column family.""" - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( - name=self._table.name) - request_pb.modifications.add( - id=self.column_family_id, - drop=True) + modification = ( + table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=self.column_family_id, drop=True)) + client = self._table._instance._client - # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.ModifyColumnFamilies(request_pb) + # data it contains are the GC rule and the column family ID already + # stored on this instance. + client._table_admin_client.modify_column_families( + self._table.name, [modification]) def _gc_rule_from_pb(gc_rule_pb): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 5927b7c4f0ca..1030aab9b939 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -17,48 +17,16 @@ import re -from google.api_core import operation -from google.cloud.bigtable._generated import ( - instance_pb2 as data_v2_pb2) -from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) -from google.cloud.bigtable._generated import ( - bigtable_table_admin_pb2 as table_messages_v2_pb2) -from google.cloud.bigtable.cluster import Cluster -from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.table import Table +from google.cloud.bigtable_admin_v2 import enums + _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') -def _prepare_create_request(instance): - """Creates a protobuf request for a CreateInstance request. - - :type instance: :class:`Instance` - :param instance: The instance to be created. - - :rtype: :class:`.messages_v2_pb2.CreateInstanceRequest` - :returns: The CreateInstance request object containing the instance info. - """ - parent_name = ('projects/' + instance._client.project) - message = messages_v2_pb2.CreateInstanceRequest( - parent=parent_name, - instance_id=instance.instance_id, - instance=data_v2_pb2.Instance( - display_name=instance.display_name, - ), - ) - cluster = message.clusters[instance.instance_id] - cluster.name = instance.name + '/clusters/' + instance.instance_id - cluster.location = ( - parent_name + '/locations/' + instance._cluster_location_id) - cluster.serve_nodes = instance._cluster_serve_nodes - return message - - class Instance(object): """Representation of a Google Cloud Bigtable Instance. @@ -91,31 +59,16 @@ class Instance(object): Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the instance's - cluster; used to set up the instance's cluster. """ def __init__(self, instance_id, client, location_id=_EXISTING_INSTANCE_LOCATION_ID, - display_name=None, - serve_nodes=DEFAULT_SERVE_NODES): + display_name=None): self.instance_id = instance_id self.display_name = display_name or instance_id self._cluster_location_id = location_id - self._cluster_serve_nodes = serve_nodes self._client = client - def _update_from_pb(self, instance_pb): - """Refresh self from the server-provided protobuf. - - Helper for :meth:`from_pb` and :meth:`reload`. - """ - if not instance_pb.display_name: # Simple field (string) - raise ValueError('Instance protobuf does not contain display_name') - self.display_name = instance_pb.display_name - @classmethod def from_pb(cls, instance_pb, client): """Creates an instance instance from a protobuf. @@ -144,22 +97,15 @@ def from_pb(cls, instance_pb, client): instance_id = match.group('instance_id') result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID) - result._update_from_pb(instance_pb) return result - def copy(self): - """Make a copy of this instance. - - Copies the local data stored as simple types and copies the client - attached to this instance. - - :rtype: :class:`.Instance` - :returns: A copy of the current instance. + def _update_from_pb(self, instance_pb): + """Refresh self from the server-provided protobuf. + Helper for :meth:`from_pb` and :meth:`reload`. """ - new_client = self._client.copy() - return self.__class__(self.instance_id, new_client, - self._cluster_location_id, - display_name=self.display_name) + if not instance_pb.display_name: # Simple field (string) + raise ValueError('Instance protobuf does not contain display_name') + self.display_name = instance_pb.display_name @property def name(self): @@ -174,9 +120,10 @@ def name(self): ``"projects/{project}/instances/{instance_id}"`` :rtype: str - :returns: The instance name. + :returns: Return a fully-qualified instance string. """ - return self._client.project_name + '/instances/' + self.instance_id + return self._client._instance_admin_client.instance_path( + project=self._client.project, instance=self.instance_id) def __eq__(self, other): if not isinstance(other, self.__class__): @@ -193,16 +140,6 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def reload(self): - """Reload the metadata for this instance.""" - request_pb = messages_v2_pb2.GetInstanceRequest(name=self.name) - # We expect `data_v2_pb2.Instance`. - instance_pb = self._client._instance_stub.GetInstance(request_pb) - - # NOTE: _update_from_pb does not check that the project and - # instance ID on the response match the request. - self._update_from_pb(instance_pb) - def create(self): """Create this instance. @@ -219,20 +156,14 @@ def create(self): before calling :meth:`create`. - :rtype: :class:`Operation` - :returns: The long-running operation corresponding to the - create operation. + :rtype: :class:`~google.api_core.operation.Operation` + :returns: The long-running operation corresponding to the create + operation. """ - request_pb = _prepare_create_request(self) - # We expect a `google.longrunning.operations_pb2.Operation`. - operation_pb = self._client._instance_stub.CreateInstance(request_pb) - - operation_future = operation.from_grpc( - operation_pb, - self._client._operations_stub, - data_v2_pb2.Instance, - metadata_type=messages_v2_pb2.CreateInstanceMetadata) - return operation_future + parent = self._client.project_path + return self._client._instance_admin_client.create_instance( + parent=parent, instance_id=self.instance_id, instance={}, + clusters={}) def update(self): """Update this instance. @@ -248,12 +179,10 @@ def update(self): before calling :meth:`update`. """ - request_pb = data_v2_pb2.Instance( - name=self.name, - display_name=self.display_name, - ) - # Ignore the expected `data_v2_pb2.Instance`. - self._client._instance_stub.UpdateInstance(request_pb) + type = enums.Instance.Type.TYPE_UNSPECIFIED + self._client._instance_admin_client.update_instance( + name=self.name, display_name=self.display_name, type_=type, + labels={}) def delete(self): """Delete this instance. @@ -277,44 +206,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name) - # We expect a `google.protobuf.empty_pb2.Empty` - self._client._instance_stub.DeleteInstance(request_pb) - - def cluster(self, cluster_id, serve_nodes=3): - """Factory to create a cluster associated with this client. - - :type cluster_id: str - :param cluster_id: The ID of the cluster. - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. - Defaults to 3. - - :rtype: :class:`~.bigtable.cluster.Cluster` - :returns: The cluster owned by this client. - """ - return Cluster(cluster_id, self, serve_nodes=serve_nodes) - - def list_clusters(self): - """Lists clusters in this instance. - - :rtype: tuple - :returns: A pair of results, the first is a list of - :class:`~.bigtable.cluster.Cluster` objects returned and the - second is a list of strings (the failed locations in the - request). - """ - request_pb = messages_v2_pb2.ListClustersRequest(parent=self.name) - # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` - list_clusters_response = self._client._instance_stub.ListClusters( - request_pb) - - failed_locations = [ - location for location in list_clusters_response.failed_locations] - clusters = [Cluster.from_pb(cluster_pb, self) - for cluster_pb in list_clusters_response.clusters] - return clusters, failed_locations + self._client._instance_admin_client.delete_instance(name=self.name) def table(self, table_id): """Factory to create a table associated with this instance. @@ -335,12 +227,10 @@ def list_tables(self): :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. """ - request_pb = table_messages_v2_pb2.ListTablesRequest(parent=self.name) - # We expect a `table_messages_v2_pb2.ListTablesResponse` - table_list_pb = self._client._table_stub.ListTables(request_pb) + table_list_pb = self._client._table_admin_client.list_tables(self.name) result = [] - for table_pb in table_list_pb.tables: + for table_pb in table_list_pb: table_prefix = self.name + '/tables/' if not table_pb.name.startswith(table_prefix): raise ValueError('Table name %s not of expected format' % ( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 09bf1fe5ef10..73803801249f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -26,10 +26,8 @@ from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable._generated import ( +from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) -from google.cloud.bigtable._generated import ( - bigtable_pb2 as messages_v2_pb2) _PACK_I64 = struct.Struct('>q').pack @@ -418,15 +416,10 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total mutations exceed the maximum allowable ' '%d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_v2_pb2.MutateRowRequest( - table_name=self._table.name, - row_key=self._row_key, - mutations=mutations_list, - ) commit = functools.partial( - self._table._instance._client._data_stub.MutateRow, - request_pb) + self._table._instance._client._table_data_client.mutate_row, + self._table.name, self._row_key, mutations_list) retry_ = retry.Retry( predicate=_retry_commit_exception, deadline=30) @@ -539,18 +532,11 @@ def commit(self): 'mutations and %d false mutations.' % ( MAX_MUTATIONS, num_true_mutations, num_false_mutations)) - request_pb = messages_v2_pb2.CheckAndMutateRowRequest( - table_name=self._table.name, - row_key=self._row_key, - predicate_filter=self._filter.to_pb(), - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - # We expect a `.messages_v2_pb2.CheckAndMutateRowResponse` client = self._table._instance._client - resp = client._data_stub.CheckAndMutateRow(request_pb) + resp = client._table_data_client.check_and_mutate_row( + table_name=self._table.name, row_key=self._row_key,) self.clear() - return resp.predicate_matched + return resp[0].predicate_matched # pylint: disable=arguments-differ def set_cell(self, column_family_id, column, value, timestamp=None, @@ -828,14 +814,11 @@ def commit(self): if num_mutations > MAX_MUTATIONS: raise ValueError('%d total append mutations exceed the maximum ' 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) - request_pb = messages_v2_pb2.ReadModifyWriteRowRequest( - table_name=self._table.name, - row_key=self._row_key, - rules=self._rule_pb_list, - ) - # We expect a `.data_v2_pb2.Row` + client = self._table._instance._client - row_response = client._data_stub.ReadModifyWriteRow(request_pb) + row_response = client._table_data_client.read_modify_write_row( + table_name=self._table.name, row_key=self._row_key, + rules=self._rule_pb_list) # Reset modifications after commit-ing request. self.clear() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 3a84d8261eb2..3216be84dd2b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -297,7 +297,7 @@ class InvalidChunk(RuntimeError): class PartialRowsData(object): """Convenience wrapper for consuming a ``ReadRows`` streaming response. - :type read_method: :class:`client._data_stub.ReadRows` + :type read_method: :class:`client._table_data_client.read_rows` :param read_method: ``ReadRows`` method. :type request: :class:`data_messages_v2_pb2.ReadRowsRequest` @@ -356,7 +356,7 @@ def _retry_read_rows_exception(exc): class YieldRowsData(object): """Convenience wrapper for consuming a ``ReadRows`` streaming response. - :type read_method: :class:`client._data_stub.ReadRows` + :type read_method: :class:`client._table_data_client.read_rows` :param read_method: ``ReadRows`` method. :type request: :class:`data_messages_v2_pb2.ReadRowsRequest` diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index 3b41f9e59052..b318dc3ab3a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -17,7 +17,7 @@ from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable._generated import ( +from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index a2adec28ba83..818806ca41b9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -21,12 +21,8 @@ from google.api_core.retry import if_exception_type from google.api_core.retry import Retry from google.cloud._helpers import _to_bytes -from google.cloud.bigtable._generated import ( +from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as data_messages_v2_pb2) -from google.cloud.bigtable._generated import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) -from google.cloud.bigtable._generated import ( - table_pb2 as table_v2_pb2) from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.row import AppendRow @@ -110,7 +106,10 @@ def name(self): :rtype: str :returns: The table name. """ - return self._instance.name + '/tables/' + self.table_id + project = self._instance._client.project + instance_id = self._instance.instance_id + return self._instance._client._table_admin_client.table_path( + project=project, instance=instance_id, table=self.table_id) def column_family(self, column_family_id, gc_rule=None): """Factory to create a column family associated with this table. @@ -170,7 +169,7 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def create(self, initial_split_keys=None, column_families=()): + def create(self): """Creates this table. .. note:: @@ -178,49 +177,17 @@ def create(self, initial_split_keys=None, column_families=()): A create request returns a :class:`._generated.table_pb2.Table` but we don't use this response. - - :type initial_split_keys: list - :param initial_split_keys: (Optional) List of row keys that will be - used to initially split the table into - several tablets (Tablets are similar to - HBase regions). Given two split keys, - ``"s1"`` and ``"s2"``, three tablets will be - created, spanning the key ranges: - ``[, s1)``, ``[s1, s2)``, ``[s2, )``. - - :type column_families: list - :param column_families: (Optional) List or other iterable of - :class:`.ColumnFamily` instances. """ - if initial_split_keys is not None: - split_pb = table_admin_messages_v2_pb2.CreateTableRequest.Split - initial_split_keys = [ - split_pb(key=key) for key in initial_split_keys] - - table_pb = None - if column_families: - table_pb = table_v2_pb2.Table() - for col_fam in column_families: - curr_id = col_fam.column_family_id - table_pb.column_families[curr_id].CopyFrom(col_fam.to_pb()) - - request_pb = table_admin_messages_v2_pb2.CreateTableRequest( - initial_splits=initial_split_keys or [], - parent=self._instance.name, - table_id=self.table_id, - table=table_pb, - ) client = self._instance._client - # We expect a `._generated.table_pb2.Table` - client._table_stub.CreateTable(request_pb) + instance_name = self._instance.name + client._table_admin_client.create_table(parent=instance_name, + table_id=self.table_id, + table={}) def delete(self): """Delete this table.""" - request_pb = table_admin_messages_v2_pb2.DeleteTableRequest( - name=self.name) client = self._instance._client - # We expect a `google.protobuf.empty_pb2.Empty` - client._table_stub.DeleteTable(request_pb) + client._table_admin_client.delete_table(name=self.name) def list_column_families(self): """List the column families owned by this table. @@ -233,11 +200,8 @@ def list_column_families(self): family name from the response does not agree with the computed name from the column family ID. """ - request_pb = table_admin_messages_v2_pb2.GetTableRequest( - name=self.name) client = self._instance._client - # We expect a `._generated.table_pb2.Table` - table_pb = client._table_stub.GetTable(request_pb) + table_pb = client._table_admin_client.get_table(self.name) result = {} for column_family_id, value_pb in table_pb.column_families.items(): @@ -263,10 +227,12 @@ def read_row(self, row_key, filter_=None): :raises: :class:`ValueError ` if a commit row chunk is never encountered. """ - request = _create_row_request(self.name, row_key=row_key, - filter_=filter_) + request_pb = _create_row_request(self.name, row_key=row_key, + filter_=filter_) client = self._instance._client - rows_data = PartialRowsData(client._data_stub.ReadRows, request) + rows_data = PartialRowsData(client._table_data_client._read_rows, + request_pb) + rows_data.consume_all() if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): raise ValueError('The row remains partial / is not committed.') @@ -308,12 +274,12 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :returns: A :class:`.PartialRowsData` convenience wrapper for consuming the streamed results. """ - request = _create_row_request( + request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit, end_inclusive=end_inclusive) client = self._instance._client - - return PartialRowsData(client._data_stub.ReadRows, request) + return PartialRowsData(client._table_data_client._read_rows, + request_pb) def yield_rows(self, start_key=None, end_key=None, limit=None, filter_=None): @@ -342,13 +308,12 @@ def yield_rows(self, start_key=None, end_key=None, limit=None, :rtype: :class:`.PartialRowData` :returns: A :class:`.PartialRowData` for each row returned """ - - request = _create_row_request( + request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit) client = self._instance._client - - generator = YieldRowsData(client._data_stub.ReadRows, request) + generator = YieldRowsData(client._table_data_client._read_rows, + request_pb) for row in generator.read_rows(): yield row @@ -416,10 +381,9 @@ def sample_row_keys(self): or by casting to a :class:`list` and can be cancelled by calling ``cancel()``. """ - request_pb = data_messages_v2_pb2.SampleRowKeysRequest( - table_name=self.name) client = self._instance._client - response_iterator = client._data_stub.SampleRowKeys(request_pb) + response_iterator = client._table_data_client.sample_row_keys( + self.name) return response_iterator @@ -505,8 +469,8 @@ def _do_mutate_retryable_rows(self): mutate_rows_request = _mutate_rows_request( self.table_name, retryable_rows) - responses = self.client._data_stub.MutateRows( - mutate_rows_request) + responses = self.client._table_data_client._mutate_rows( + mutate_rows_request, retry=None) num_responses = 0 num_retryable_responses = 0 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py new file mode 100644 index 000000000000..62e1934aab62 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -0,0 +1,41 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from google.cloud.bigtable_admin_v2 import types +from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client +from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client +from google.cloud.bigtable_admin_v2.gapic import enums + + +class BigtableInstanceAdminClient( + bigtable_instance_admin_client.BigtableInstanceAdminClient): + __doc__ = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__) + enums = enums + + +class BigtableTableAdminClient( + bigtable_table_admin_client.BigtableTableAdminClient): + __doc__ = bigtable_table_admin_client.BigtableTableAdminClient.__doc__ + enums = enums + + +__all__ = ( + 'enums', + 'types', + 'BigtableInstanceAdminClient', + 'BigtableTableAdminClient', +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py new file mode 100644 index 000000000000..f61e4a3b9e52 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -0,0 +1,1621 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Accesses the google.bigtable.admin.v2 BigtableInstanceAdmin API.""" + +import functools +import pkg_resources + +import google.api_core.gapic_v1.client_info +import google.api_core.gapic_v1.config +import google.api_core.gapic_v1.method +import google.api_core.gapic_v1.routing_header +import google.api_core.grpc_helpers +import google.api_core.operation +import google.api_core.operations_v1 +import google.api_core.page_iterator +import google.api_core.path_template + +from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config +from google.cloud.bigtable_admin_v2.gapic import enums +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + 'google-cloud-bigtable', ).version + + +class BigtableInstanceAdminClient(object): + """ + Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + SERVICE_ADDRESS = 'bigtableadmin.googleapis.com:443' + """The default address of the service.""" + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _DEFAULT_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + # The name of the interface for this client. This is the key used to find + # method configuration in the client_config dictionary. + _INTERFACE_NAME = 'google.bigtable.admin.v2.BigtableInstanceAdmin' + + @classmethod + def project_path(cls, project): + """Return a fully-qualified project string.""" + return google.api_core.path_template.expand( + 'projects/{project}', + project=project, + ) + + @classmethod + def instance_path(cls, project, instance): + """Return a fully-qualified instance string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}', + project=project, + instance=instance, + ) + + @classmethod + def app_profile_path(cls, project, instance, app_profile): + """Return a fully-qualified app_profile string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}/appProfiles/{app_profile}', + project=project, + instance=instance, + app_profile=app_profile, + ) + + @classmethod + def cluster_path(cls, project, instance, cluster): + """Return a fully-qualified cluster string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}/clusters/{cluster}', + project=project, + instance=instance, + cluster=cluster, + ) + + @classmethod + def location_path(cls, project, location): + """Return a fully-qualified location string.""" + return google.api_core.path_template.expand( + 'projects/{project}/locations/{location}', + project=project, + location=location, + ) + + def __init__(self, + channel=None, + credentials=None, + client_config=bigtable_instance_admin_client_config.config, + client_info=None): + """Constructor. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_config (dict): A dictionary of call options for each + method. If not specified, the default configuration is used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + 'The `channel` and `credentials` arguments to {} are mutually ' + 'exclusive.'.format(self.__class__.__name__), ) + + # Create the channel. + if channel is None: + channel = google.api_core.grpc_helpers.create_channel( + self.SERVICE_ADDRESS, + credentials=credentials, + scopes=self._DEFAULT_SCOPES, + ) + + # Create the gRPC stubs. + self.bigtable_instance_admin_stub = ( + bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub(channel)) + + # Operations client for methods that return long-running operations + # futures. + self.operations_client = ( + google.api_core.operations_v1.OperationsClient(channel)) + + if client_info is None: + client_info = ( + google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) + client_info.gapic_version = _GAPIC_LIBRARY_VERSION + + # Parse out the default settings for retry and timeout for each RPC + # from the client configuration. + # (Ordinarily, these are the defaults specified in the `*_config.py` + # file next to this one.) + method_configs = google.api_core.gapic_v1.config.parse_method_configs( + client_config['interfaces'][self._INTERFACE_NAME], ) + + # Write the "inner API call" methods to the class. + # These are wrapped versions of the gRPC stub methods, with retry and + # timeout configuration applied, called by the public methods on + # this class. + self._create_instance = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.CreateInstance, + default_retry=method_configs['CreateInstance'].retry, + default_timeout=method_configs['CreateInstance'].timeout, + client_info=client_info, + ) + self._get_instance = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.GetInstance, + default_retry=method_configs['GetInstance'].retry, + default_timeout=method_configs['GetInstance'].timeout, + client_info=client_info, + ) + self._list_instances = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.ListInstances, + default_retry=method_configs['ListInstances'].retry, + default_timeout=method_configs['ListInstances'].timeout, + client_info=client_info, + ) + self._update_instance = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.UpdateInstance, + default_retry=method_configs['UpdateInstance'].retry, + default_timeout=method_configs['UpdateInstance'].timeout, + client_info=client_info, + ) + self._partial_update_instance = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.PartialUpdateInstance, + default_retry=method_configs['PartialUpdateInstance'].retry, + default_timeout=method_configs['PartialUpdateInstance'].timeout, + client_info=client_info, + ) + self._delete_instance = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.DeleteInstance, + default_retry=method_configs['DeleteInstance'].retry, + default_timeout=method_configs['DeleteInstance'].timeout, + client_info=client_info, + ) + self._create_cluster = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.CreateCluster, + default_retry=method_configs['CreateCluster'].retry, + default_timeout=method_configs['CreateCluster'].timeout, + client_info=client_info, + ) + self._get_cluster = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.GetCluster, + default_retry=method_configs['GetCluster'].retry, + default_timeout=method_configs['GetCluster'].timeout, + client_info=client_info, + ) + self._list_clusters = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.ListClusters, + default_retry=method_configs['ListClusters'].retry, + default_timeout=method_configs['ListClusters'].timeout, + client_info=client_info, + ) + self._update_cluster = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.UpdateCluster, + default_retry=method_configs['UpdateCluster'].retry, + default_timeout=method_configs['UpdateCluster'].timeout, + client_info=client_info, + ) + self._delete_cluster = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.DeleteCluster, + default_retry=method_configs['DeleteCluster'].retry, + default_timeout=method_configs['DeleteCluster'].timeout, + client_info=client_info, + ) + self._create_app_profile = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.CreateAppProfile, + default_retry=method_configs['CreateAppProfile'].retry, + default_timeout=method_configs['CreateAppProfile'].timeout, + client_info=client_info, + ) + self._get_app_profile = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.GetAppProfile, + default_retry=method_configs['GetAppProfile'].retry, + default_timeout=method_configs['GetAppProfile'].timeout, + client_info=client_info, + ) + self._list_app_profiles = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.ListAppProfiles, + default_retry=method_configs['ListAppProfiles'].retry, + default_timeout=method_configs['ListAppProfiles'].timeout, + client_info=client_info, + ) + self._update_app_profile = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.UpdateAppProfile, + default_retry=method_configs['UpdateAppProfile'].retry, + default_timeout=method_configs['UpdateAppProfile'].timeout, + client_info=client_info, + ) + self._delete_app_profile = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.DeleteAppProfile, + default_retry=method_configs['DeleteAppProfile'].retry, + default_timeout=method_configs['DeleteAppProfile'].timeout, + client_info=client_info, + ) + self._get_iam_policy = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.GetIamPolicy, + default_retry=method_configs['GetIamPolicy'].retry, + default_timeout=method_configs['GetIamPolicy'].timeout, + client_info=client_info, + ) + self._set_iam_policy = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.SetIamPolicy, + default_retry=method_configs['SetIamPolicy'].retry, + default_timeout=method_configs['SetIamPolicy'].timeout, + client_info=client_info, + ) + self._test_iam_permissions = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_instance_admin_stub.TestIamPermissions, + default_retry=method_configs['TestIamPermissions'].retry, + default_timeout=method_configs['TestIamPermissions'].timeout, + client_info=client_info, + ) + + # Service calls + def create_instance(self, + parent, + instance_id, + instance, + clusters, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Create an instance within a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.project_path('[PROJECT]') + >>> + >>> # TODO: Initialize ``instance_id``: + >>> instance_id = '' + >>> + >>> # TODO: Initialize ``instance``: + >>> instance = {} + >>> + >>> # TODO: Initialize ``clusters``: + >>> clusters = {} + >>> + >>> response = client.create_instance(parent, instance_id, instance, clusters) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): The unique name of the project in which to create the new instance. + Values are of the form ``projects/``. + instance_id (str): The ID to be used when referring to the new instance within its project, + e.g., just ``myinstance`` rather than + ``projects/myproject/instances/myinstance``. + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The instance to create. + Fields marked ``OutputOnly`` must be left blank. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Instance` + clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): The clusters to be created within the instance, mapped by desired + cluster ID, e.g., just ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. + Currently exactly one cluster must be specified. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.CreateInstanceRequest( + parent=parent, + instance_id=instance_id, + instance=instance, + clusters=clusters, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + operation = self._create_instance( + request, retry=retry, timeout=timeout, metadata=metadata) + return google.api_core.operation.from_gapic( + operation, + self.operations_client, + instance_pb2.Instance, + metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, + ) + + def get_instance(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Gets information about an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> response = client.get_instance(name) + + Args: + name (str): The unique name of the requested instance. Values are of the form + ``projects//instances/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._get_instance( + request, retry=retry, timeout=timeout, metadata=metadata) + + def list_instances(self, + parent, + page_token=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Lists information about instances in a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.project_path('[PROJECT]') + >>> + >>> response = client.list_instances(parent) + + Args: + parent (str): The unique name of the project for which a list of instances is requested. + Values are of the form ``projects/``. + page_token (str): The value of ``next_page_token`` returned by a previous call. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent, + page_token=page_token, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + return self._list_instances( + request, retry=retry, timeout=timeout, metadata=metadata) + + def update_instance(self, + name, + display_name, + type_, + labels, + state=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Updates an instance within a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> from google.cloud.bigtable_admin_v2 import enums + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize ``display_name``: + >>> display_name = '' + >>> + >>> # TODO: Initialize ``type_``: + >>> type_ = enums.Instance.Type.TYPE_UNSPECIFIED + >>> + >>> # TODO: Initialize ``labels``: + >>> labels = {} + >>> + >>> response = client.update_instance(name, display_name, type_, labels) + + Args: + name (str): (``OutputOnly``) + The unique name of the instance. Values are of the form + ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + display_name (str): The descriptive name for this instance as it appears in UIs. + Can be changed at any time, but should be kept globally unique + to avoid confusion. + type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. + labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud + resources into groups that reflect a customer's organizational needs and + deployment strategies. They can be used to filter resources and aggregate + metrics. + + * Label keys must be between 1 and 63 characters long and must conform to + the regular expression: ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + * Label values must be between 0 and 63 characters long and must conform to + the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + * No more than 64 labels can be associated with a given resource. + * Keys and values must both be under 128 bytes. + state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) + The current state of the instance. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = instance_pb2.Instance( + name=name, + display_name=display_name, + type=type_, + labels=labels, + state=state, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._update_instance( + request, retry=retry, timeout=timeout, metadata=metadata) + + def partial_update_instance( + self, + instance, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Partially updates an instance within a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize ``instance``: + >>> instance = {} + >>> + >>> # TODO: Initialize ``update_mask``: + >>> update_mask = {} + >>> + >>> response = client.partial_update_instance(instance, update_mask) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The Instance which will (partially) replace the current value. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Instance` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of Instance fields which should be replaced. + Must be explicitly set. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, + update_mask=update_mask, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('instance.name', instance.name)], ) + metadata.append(routing_header) + + operation = self._partial_update_instance( + request, retry=retry, timeout=timeout, metadata=metadata) + return google.api_core.operation.from_gapic( + operation, + self.operations_client, + instance_pb2.Instance, + metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, + ) + + def delete_instance(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Delete an instance from a project. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> client.delete_instance(name) + + Args: + name (str): The unique name of the instance to be deleted. + Values are of the form ``projects//instances/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.DeleteInstanceRequest( + name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + self._delete_instance( + request, retry=retry, timeout=timeout, metadata=metadata) + + def create_cluster(self, + parent, + cluster_id, + cluster, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Creates a cluster within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize ``cluster_id``: + >>> cluster_id = '' + >>> + >>> # TODO: Initialize ``cluster``: + >>> cluster = {} + >>> + >>> response = client.create_cluster(parent, cluster_id, cluster) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): The unique name of the instance in which to create the new cluster. + Values are of the form + ``projects//instances/``. + cluster_id (str): The ID to be used when referring to the new cluster within its instance, + e.g., just ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. + Fields marked ``OutputOnly`` must be left blank. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, + cluster_id=cluster_id, + cluster=cluster, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + operation = self._create_cluster( + request, retry=retry, timeout=timeout, metadata=metadata) + return google.api_core.operation.from_gapic( + operation, + self.operations_client, + instance_pb2.Cluster, + metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, + ) + + def get_cluster(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Gets information about a cluster. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> response = client.get_cluster(name) + + Args: + name (str): The unique name of the requested cluster. Values are of the form + ``projects//instances//clusters/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Cluster` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.GetClusterRequest(name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._get_cluster( + request, retry=retry, timeout=timeout, metadata=metadata) + + def list_clusters(self, + parent, + page_token=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Lists information about clusters in an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> response = client.list_clusters(parent) + + Args: + parent (str): The unique name of the instance for which a list of clusters is requested. + Values are of the form ``projects//instances/``. + Use `` = '-'`` to list Clusters for all Instances in a project, + e.g., ``projects/myproject/instances/-``. + page_token (str): The value of ``next_page_token`` returned by a previous call. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.ListClustersResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.ListClustersRequest( + parent=parent, + page_token=page_token, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + return self._list_clusters( + request, retry=retry, timeout=timeout, metadata=metadata) + + def update_cluster(self, + name, + location, + serve_nodes, + state=None, + default_storage_type=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Updates a cluster within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> # TODO: Initialize ``location``: + >>> location = '' + >>> + >>> # TODO: Initialize ``serve_nodes``: + >>> serve_nodes = 0 + >>> + >>> response = client.update_cluster(name, location, serve_nodes) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + name (str): (``OutputOnly``) + The unique name of the cluster. Values are of the form + ``projects//instances//clusters/[a-z][-a-z0-9]*``. + location (str): (``CreationOnly``) + The location where this cluster's nodes and storage reside. For best + performance, clients should be located as close as possible to this + cluster. Currently only zones are supported, so values should be of the + form ``projects//locations/``. + serve_nodes (int): The number of nodes allocated to this cluster. More nodes enable higher + throughput and more consistent performance. + state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) + The current state of the cluster. + default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) + The type of storage used by this cluster to serve its + parent instance's tables, unless explicitly overridden. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = instance_pb2.Cluster( + name=name, + location=location, + serve_nodes=serve_nodes, + state=state, + default_storage_type=default_storage_type, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + operation = self._update_cluster( + request, retry=retry, timeout=timeout, metadata=metadata) + return google.api_core.operation.from_gapic( + operation, + self.operations_client, + instance_pb2.Cluster, + metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, + ) + + def delete_cluster(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Deletes a cluster from an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> client.delete_cluster(name) + + Args: + name (str): The unique name of the cluster to be deleted. Values are of the form + ``projects//instances//clusters/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + self._delete_cluster( + request, retry=retry, timeout=timeout, metadata=metadata) + + def create_app_profile(self, + parent, + app_profile_id, + app_profile, + ignore_warnings=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Creates an app profile within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize ``app_profile_id``: + >>> app_profile_id = '' + >>> + >>> # TODO: Initialize ``app_profile``: + >>> app_profile = {} + >>> + >>> response = client.create_app_profile(parent, app_profile_id, app_profile) + + Args: + parent (str): The unique name of the instance in which to create the new app profile. + Values are of the form + ``projects//instances/``. + app_profile_id (str): The ID to be used when referring to the new app profile within its + instance, e.g., just ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile to be created. + Fields marked ``OutputOnly`` will be ignored. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + ignore_warnings (bool): If true, ignore safety checks when creating the app profile. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.CreateAppProfileRequest( + parent=parent, + app_profile_id=app_profile_id, + app_profile=app_profile, + ignore_warnings=ignore_warnings, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + return self._create_app_profile( + request, retry=retry, timeout=timeout, metadata=metadata) + + def get_app_profile(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Gets information about an app profile. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') + >>> + >>> response = client.get_app_profile(name) + + Args: + name (str): The unique name of the requested app profile. Values are of the form + ``projects//instances//appProfiles/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._get_app_profile( + request, retry=retry, timeout=timeout, metadata=metadata) + + def list_app_profiles(self, + parent, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Lists information about app profiles in an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> + >>> # Iterate over all results + >>> for element in client.list_app_profiles(parent): + ... # process element + ... pass + >>> + >>> # Or iterate over results one page at a time + >>> for page in client.list_app_profiles(parent, options=CallOptions(page_token=INITIAL_PAGE)): + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): The unique name of the instance for which a list of app profiles is + requested. Values are of the form + ``projects//instances/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.gax.PageIterator` instance. By default, this + is an iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. + This object can also be configured to iterate over the pages + of the response through the `options` parameter. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.ListAppProfilesRequest( + parent=parent, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._list_app_profiles, + retry=retry, + timeout=timeout, + metadata=metadata), + request=request, + items_field='app_profiles', + request_token_field='page_token', + response_token_field='next_page_token', + ) + return iterator + + def update_app_profile(self, + app_profile, + update_mask, + ignore_warnings=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Updates an app profile within an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> # TODO: Initialize ``app_profile``: + >>> app_profile = {} + >>> + >>> # TODO: Initialize ``update_mask``: + >>> update_mask = {} + >>> + >>> response = client.update_app_profile(app_profile, update_mask) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile which will (partially) replace the current value. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of app profile fields which should be replaced. + If unset, all fields will be replaced. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + ignore_warnings (bool): If true, ignore safety checks when updating the app profile. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, + update_mask=update_mask, + ignore_warnings=ignore_warnings, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('app_profile.name', app_profile.name)], ) + metadata.append(routing_header) + + operation = self._update_app_profile( + request, retry=retry, timeout=timeout, metadata=metadata) + return google.api_core.operation.from_gapic( + operation, + self.operations_client, + instance_pb2.AppProfile, + metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, + ) + + def delete_app_profile(self, + name, + ignore_warnings, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Deletes an app profile from an instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') + >>> + >>> # TODO: Initialize ``ignore_warnings``: + >>> ignore_warnings = False + >>> + >>> client.delete_app_profile(name, ignore_warnings) + + Args: + name (str): The unique name of the app profile to be deleted. Values are of the form + ``projects//instances//appProfiles/``. + ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( + name=name, + ignore_warnings=ignore_warnings, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + self._delete_app_profile( + request, retry=retry, timeout=timeout, metadata=metadata) + + def get_iam_policy(self, + resource, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable instance level + permissions. This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways and + is not recommended for production use. It is not subject to any SLA or + deprecation policy. + + Gets the access control policy for an instance resource. Returns an empty + policy if an instance exists but does not have a policy set. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> response = client.get_iam_policy(resource) + + Args: + resource (str): REQUIRED: The resource for which the policy is being requested. + ``resource`` is usually specified as a path. For example, a Project + resource is specified as ``projects/{project}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('resource', resource)], ) + metadata.append(routing_header) + + return self._get_iam_policy( + request, retry=retry, timeout=timeout, metadata=metadata) + + def set_iam_policy(self, + resource, + policy, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable instance level + permissions. This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways and + is not recommended for production use. It is not subject to any SLA or + deprecation policy. + + Sets the access control policy on an instance resource. Replaces any + existing policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize ``policy``: + >>> policy = {} + >>> + >>> response = client.set_iam_policy(resource, policy) + + Args: + resource (str): REQUIRED: The resource for which the policy is being specified. + ``resource`` is usually specified as a path. For example, a Project + resource is specified as ``projects/{project}``. + policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The size of + the policy is limited to a few 10s of KB. An empty policy is a + valid policy but certain Cloud Platform services (such as Projects) + might reject them. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Policy` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, + policy=policy, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('resource', resource)], ) + metadata.append(routing_header) + + return self._set_iam_policy( + request, retry=retry, timeout=timeout, metadata=metadata) + + def test_iam_permissions(self, + resource, + permissions, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable instance level + permissions. This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways and + is not recommended for production use. It is not subject to any SLA or + deprecation policy. + + Returns permissions that the caller has on the specified instance resource. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() + >>> + >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize ``permissions``: + >>> permissions = [] + >>> + >>> response = client.test_iam_permissions(resource, permissions) + + Args: + resource (str): REQUIRED: The resource for which the policy detail is being requested. + ``resource`` is usually specified as a path. For example, a Project + resource is specified as ``projects/{project}``. + permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with + wildcards (such as '*' or 'storage.*') are not allowed. For more + information see + `IAM Overview `_. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, + permissions=permissions, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('resource', resource)], ) + metadata.append(routing_header) + + return self._test_iam_permissions( + request, retry=retry, timeout=timeout, metadata=metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py new file mode 100644 index 000000000000..5b738628f32f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py @@ -0,0 +1,118 @@ +config = { + "interfaces": { + "google.bigtable.admin.v2.BigtableInstanceAdmin": { + "retry_codes": { + "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "non_idempotent": ["UNAVAILABLE"] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 5, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateInstance": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetInstance": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ListInstances": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "UpdateInstance": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "PartialUpdateInstance": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "DeleteInstance": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "CreateCluster": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetCluster": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ListClusters": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "UpdateCluster": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "DeleteCluster": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "CreateAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ListAppProfiles": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "UpdateAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "DeleteAppProfile": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "SetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "TestIamPermissions": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py new file mode 100644 index 000000000000..2dbac887a560 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -0,0 +1,1208 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Accesses the google.bigtable.admin.v2 BigtableTableAdmin API.""" + +import functools +import pkg_resources + +import google.api_core.gapic_v1.client_info +import google.api_core.gapic_v1.config +import google.api_core.gapic_v1.method +import google.api_core.gapic_v1.routing_header +import google.api_core.grpc_helpers +import google.api_core.operation +import google.api_core.operations_v1 +import google.api_core.page_iterator +import google.api_core.path_template +import google.api_core.protobuf_helpers + +from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config +from google.cloud.bigtable_admin_v2.gapic import enums +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import duration_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + 'google-cloud-bigtable', ).version + + +class BigtableTableAdminClient(object): + """ + Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + SERVICE_ADDRESS = 'bigtableadmin.googleapis.com:443' + """The default address of the service.""" + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _DEFAULT_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + # The name of the interface for this client. This is the key used to find + # method configuration in the client_config dictionary. + _INTERFACE_NAME = 'google.bigtable.admin.v2.BigtableTableAdmin' + + @classmethod + def instance_path(cls, project, instance): + """Return a fully-qualified instance string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}', + project=project, + instance=instance, + ) + + @classmethod + def cluster_path(cls, project, instance, cluster): + """Return a fully-qualified cluster string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}/clusters/{cluster}', + project=project, + instance=instance, + cluster=cluster, + ) + + @classmethod + def snapshot_path(cls, project, instance, cluster, snapshot): + """Return a fully-qualified snapshot string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}', + project=project, + instance=instance, + cluster=cluster, + snapshot=snapshot, + ) + + @classmethod + def table_path(cls, project, instance, table): + """Return a fully-qualified table string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}/tables/{table}', + project=project, + instance=instance, + table=table, + ) + + def __init__(self, + channel=None, + credentials=None, + client_config=bigtable_table_admin_client_config.config, + client_info=None): + """Constructor. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_config (dict): A dictionary of call options for each + method. If not specified, the default configuration is used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + 'The `channel` and `credentials` arguments to {} are mutually ' + 'exclusive.'.format(self.__class__.__name__), ) + + # Create the channel. + if channel is None: + channel = google.api_core.grpc_helpers.create_channel( + self.SERVICE_ADDRESS, + credentials=credentials, + scopes=self._DEFAULT_SCOPES, + ) + + # Create the gRPC stubs. + self.bigtable_table_admin_stub = ( + bigtable_table_admin_pb2_grpc.BigtableTableAdminStub(channel)) + + # Operations client for methods that return long-running operations + # futures. + self.operations_client = ( + google.api_core.operations_v1.OperationsClient(channel)) + + if client_info is None: + client_info = ( + google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) + client_info.gapic_version = _GAPIC_LIBRARY_VERSION + + # Parse out the default settings for retry and timeout for each RPC + # from the client configuration. + # (Ordinarily, these are the defaults specified in the `*_config.py` + # file next to this one.) + method_configs = google.api_core.gapic_v1.config.parse_method_configs( + client_config['interfaces'][self._INTERFACE_NAME], ) + + # Write the "inner API call" methods to the class. + # These are wrapped versions of the gRPC stub methods, with retry and + # timeout configuration applied, called by the public methods on + # this class. + self._create_table = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.CreateTable, + default_retry=method_configs['CreateTable'].retry, + default_timeout=method_configs['CreateTable'].timeout, + client_info=client_info, + ) + self._create_table_from_snapshot = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.CreateTableFromSnapshot, + default_retry=method_configs['CreateTableFromSnapshot'].retry, + default_timeout=method_configs['CreateTableFromSnapshot'].timeout, + client_info=client_info, + ) + self._list_tables = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.ListTables, + default_retry=method_configs['ListTables'].retry, + default_timeout=method_configs['ListTables'].timeout, + client_info=client_info, + ) + self._get_table = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.GetTable, + default_retry=method_configs['GetTable'].retry, + default_timeout=method_configs['GetTable'].timeout, + client_info=client_info, + ) + self._delete_table = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.DeleteTable, + default_retry=method_configs['DeleteTable'].retry, + default_timeout=method_configs['DeleteTable'].timeout, + client_info=client_info, + ) + self._modify_column_families = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.ModifyColumnFamilies, + default_retry=method_configs['ModifyColumnFamilies'].retry, + default_timeout=method_configs['ModifyColumnFamilies'].timeout, + client_info=client_info, + ) + self._drop_row_range = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.DropRowRange, + default_retry=method_configs['DropRowRange'].retry, + default_timeout=method_configs['DropRowRange'].timeout, + client_info=client_info, + ) + self._generate_consistency_token = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.GenerateConsistencyToken, + default_retry=method_configs['GenerateConsistencyToken'].retry, + default_timeout=method_configs['GenerateConsistencyToken'].timeout, + client_info=client_info, + ) + self._check_consistency = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.CheckConsistency, + default_retry=method_configs['CheckConsistency'].retry, + default_timeout=method_configs['CheckConsistency'].timeout, + client_info=client_info, + ) + self._snapshot_table = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.SnapshotTable, + default_retry=method_configs['SnapshotTable'].retry, + default_timeout=method_configs['SnapshotTable'].timeout, + client_info=client_info, + ) + self._get_snapshot = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.GetSnapshot, + default_retry=method_configs['GetSnapshot'].retry, + default_timeout=method_configs['GetSnapshot'].timeout, + client_info=client_info, + ) + self._list_snapshots = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.ListSnapshots, + default_retry=method_configs['ListSnapshots'].retry, + default_timeout=method_configs['ListSnapshots'].timeout, + client_info=client_info, + ) + self._delete_snapshot = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_table_admin_stub.DeleteSnapshot, + default_retry=method_configs['DeleteSnapshot'].retry, + default_timeout=method_configs['DeleteSnapshot'].timeout, + client_info=client_info, + ) + + # Service calls + def create_table(self, + parent, + table_id, + table, + initial_splits=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize ``table_id``: + >>> table_id = '' + >>> + >>> # TODO: Initialize ``table``: + >>> table = {} + >>> + >>> response = client.create_table(parent, table_id, table) + + Args: + parent (str): The unique name of the instance in which to create the table. + Values are of the form ``projects//instances/``. + table_id (str): The name by which the new table should be referred to within the parent + instance, e.g., ``foobar`` rather than ``/tables/foobar``. + table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Table` + initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the + table into several tablets (tablets are similar to HBase regions). + Given two split keys, ``s1`` and ``s2``, three tablets will be created, + spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. + + Example: + + * Row keys := ``[\"a\", \"apple\", \"custom\", \"customer_1\", \"customer_2\",`` + :: + + `\"other\", \"zz\"]` + * initial_split_keys := ``[\"apple\", \"customer_1\", \"customer_2\", \"other\"]`` + * Key assignment: + :: + + - Tablet 1 `[, apple) => {\"a\"}.` + - Tablet 2 `[apple, customer_1) => {\"apple\", \"custom\"}.` + - Tablet 3 `[customer_1, customer_2) => {\"customer_1\"}.` + - Tablet 4 `[customer_2, other) => {\"customer_2\"}.` + - Tablet 5 `[other, ) => {\"other\", \"zz\"}.` + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Split` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, + table_id=table_id, + table=table, + initial_splits=initial_splits, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + return self._create_table( + request, retry=retry, timeout=timeout, metadata=metadata) + + def create_table_from_snapshot( + self, + parent, + table_id, + source_snapshot, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Creates a new table from the specified snapshot. The target table must + not exist. The snapshot and the table must be in the same instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize ``table_id``: + >>> table_id = '' + >>> + >>> # TODO: Initialize ``source_snapshot``: + >>> source_snapshot = '' + >>> + >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): The unique name of the instance in which to create the table. + Values are of the form ``projects//instances/``. + table_id (str): The name by which the new table should be referred to within the parent + instance, e.g., ``foobar`` rather than ``/tables/foobar``. + source_snapshot (str): The unique name of the snapshot from which to restore the table. The + snapshot and the table must be in the same instance. + Values are of the form + ``projects//instances//clusters//snapshots/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( + parent=parent, + table_id=table_id, + source_snapshot=source_snapshot, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + operation = self._create_table_from_snapshot( + request, retry=retry, timeout=timeout, metadata=metadata) + return google.api_core.operation.from_gapic( + operation, + self.operations_client, + table_pb2.Table, + metadata_type=bigtable_table_admin_pb2. + CreateTableFromSnapshotMetadata, + ) + + def list_tables(self, + parent, + view=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Lists all tables served from a specified instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> + >>> # Iterate over all results + >>> for element in client.list_tables(parent): + ... # process element + ... pass + >>> + >>> # Or iterate over results one page at a time + >>> for page in client.list_tables(parent, options=CallOptions(page_token=INITIAL_PAGE)): + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): The unique name of the instance for which tables should be listed. + Values are of the form ``projects//instances/``. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. + Defaults to ``NAME_ONLY`` if unspecified; no others are currently supported. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.gax.PageIterator` instance. By default, this + is an iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. + This object can also be configured to iterate over the pages + of the response through the `options` parameter. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.ListTablesRequest( + parent=parent, + view=view, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._list_tables, + retry=retry, + timeout=timeout, + metadata=metadata), + request=request, + items_field='tables', + request_token_field='page_token', + response_token_field='next_page_token', + ) + return iterator + + def get_table(self, + name, + view=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Gets metadata information about the specified table. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> response = client.get_table(name) + + Args: + name (str): The unique name of the requested table. + Values are of the form + ``projects//instances//tables/
``. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. + Defaults to ``SCHEMA_VIEW`` if unspecified. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.GetTableRequest( + name=name, + view=view, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._get_table( + request, retry=retry, timeout=timeout, metadata=metadata) + + def delete_table(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Permanently deletes a specified table and all of its data. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> client.delete_table(name) + + Args: + name (str): The unique name of the table to be deleted. + Values are of the form + ``projects//instances//tables/
``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.DeleteTableRequest(name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + self._delete_table( + request, retry=retry, timeout=timeout, metadata=metadata) + + def modify_column_families(self, + name, + modifications, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Performs a series of column family modifications on the specified table. + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize ``modifications``: + >>> modifications = [] + >>> + >>> response = client.modify_column_families(name, modifications) + + Args: + name (str): The unique name of the table whose families should be modified. + Values are of the form + ``projects//instances//tables/
``. + modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Modifications to be atomically applied to the specified table's families. + Entries are applied in order, meaning that earlier modifications can be + masked by later ones (in the case of repeated updates to the same family, + for example). + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Modification` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( + name=name, + modifications=modifications, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._modify_column_families( + request, retry=retry, timeout=timeout, metadata=metadata) + + def drop_row_range(self, + name, + row_key_prefix=None, + delete_all_data_from_table=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> client.drop_row_range(name) + + Args: + name (str): The unique name of the table on which to drop a range of rows. + Values are of the form + ``projects//instances//tables/
``. + row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be + zero length. + delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof( + row_key_prefix=row_key_prefix, + delete_all_data_from_table=delete_all_data_from_table, + ) + + request = bigtable_table_admin_pb2.DropRowRangeRequest( + name=name, + row_key_prefix=row_key_prefix, + delete_all_data_from_table=delete_all_data_from_table, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + self._drop_row_range( + request, retry=retry, timeout=timeout, metadata=metadata) + + def generate_consistency_token( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Generates a consistency token for a Table, which can be used in + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> response = client.generate_consistency_token(name) + + Args: + name (str): The unique name of the Table for which to create a consistency token. + Values are of the form + ``projects//instances//tables/
``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( + name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._generate_consistency_token( + request, retry=retry, timeout=timeout, metadata=metadata) + + def check_consistency(self, + name, + consistency_token, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Checks replication consistency based on a consistency token, that is, if + replication has caught up based on the conditions specified in the token + and the check request. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize ``consistency_token``: + >>> consistency_token = '' + >>> + >>> response = client.check_consistency(name, consistency_token) + + Args: + name (str): The unique name of the Table for which to check replication consistency. + Values are of the form + ``projects//instances//tables/
``. + consistency_token (str): The token created using GenerateConsistencyToken for the Table. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.CheckConsistencyRequest( + name=name, + consistency_token=consistency_token, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._check_consistency( + request, retry=retry, timeout=timeout, metadata=metadata) + + def snapshot_table(self, + name, + cluster, + snapshot_id, + description, + ttl=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize ``cluster``: + >>> cluster = '' + >>> + >>> # TODO: Initialize ``snapshot_id``: + >>> snapshot_id = '' + >>> + >>> # TODO: Initialize ``description``: + >>> description = '' + >>> + >>> response = client.snapshot_table(name, cluster, snapshot_id, description) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + name (str): The unique name of the table to have the snapshot taken. + Values are of the form + ``projects//instances//tables/
``. + cluster (str): The name of the cluster where the snapshot will be created in. + Values are of the form + ``projects//instances//clusters/``. + snapshot_id (str): The ID by which the new snapshot should be referred to within the parent + cluster, e.g., ``mysnapshot`` of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + rather than + ``projects//instances//clusters//snapshots/mysnapshot``. + description (str): Description of the snapshot. + ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is + created. Once 'ttl' expires, the snapshot will get deleted. The maximum + amount of time a snapshot can stay active is 7 days. If 'ttl' is not + specified, the default value of 24 hours will be used. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Duration` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, + cluster=cluster, + snapshot_id=snapshot_id, + description=description, + ttl=ttl, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + operation = self._snapshot_table( + request, retry=retry, timeout=timeout, metadata=metadata) + return google.api_core.operation.from_gapic( + operation, + self.operations_client, + table_pb2.Snapshot, + metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, + ) + + def get_snapshot(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Gets metadata information about the specified snapshot. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') + >>> + >>> response = client.get_snapshot(name) + + Args: + name (str): The unique name of the requested snapshot. + Values are of the form + ``projects//instances//clusters//snapshots/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + return self._get_snapshot( + request, retry=retry, timeout=timeout, metadata=metadata) + + def list_snapshots(self, + parent, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Lists all snapshots associated with the specified cluster. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> + >>> # Iterate over all results + >>> for element in client.list_snapshots(parent): + ... # process element + ... pass + >>> + >>> # Or iterate over results one page at a time + >>> for page in client.list_snapshots(parent, options=CallOptions(page_token=INITIAL_PAGE)): + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): The unique name of the cluster for which snapshots should be listed. + Values are of the form + ``projects//instances//clusters/``. + Use `` = '-'`` to list snapshots for all clusters in an instance, + e.g., ``projects//instances//clusters/-``. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.gax.PageIterator` instance. By default, this + is an iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. + This object can also be configured to iterate over the pages + of the response through the `options` parameter. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.ListSnapshotsRequest( + parent=parent, + page_size=page_size, + ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('parent', parent)], ) + metadata.append(routing_header) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._list_snapshots, + retry=retry, + timeout=timeout, + metadata=metadata), + request=request, + items_field='snapshots', + request_token_field='page_token', + response_token_field='next_page_token', + ) + return iterator + + def delete_snapshot(self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): + """ + This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Permanently deletes the specified snapshot. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') + >>> + >>> client.delete_snapshot(name) + + Args: + name (str): The unique name of the snapshot to be deleted. + Values are of the form + ``projects//instances//clusters//snapshots/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + if metadata is None: + metadata = [] + metadata = list(metadata) + request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name, ) + + routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + [('name', name)], ) + metadata.append(routing_header) + + self._delete_snapshot( + request, retry=retry, timeout=timeout, metadata=metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py new file mode 100644 index 000000000000..e5e0161aeb4b --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -0,0 +1,88 @@ +config = { + "interfaces": { + "google.bigtable.admin.v2.BigtableTableAdmin": { + "retry_codes": { + "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "non_idempotent": [] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 20000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 20000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "CreateTable": { + "timeout_millis": 130000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "CreateTableFromSnapshot": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ListTables": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "GetTable": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "DeleteTable": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ModifyColumnFamilies": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "DropRowRange": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GenerateConsistencyToken": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "CheckConsistency": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "SnapshotTable": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "GetSnapshot": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "ListSnapshots": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "DeleteSnapshot": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py new file mode 100644 index 000000000000..36fc4aa470aa --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -0,0 +1,177 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Wrappers for protocol buffer enum types.""" + + +class StorageType(object): + """ + Storage media types for persisting Bigtable data. + + Attributes: + STORAGE_TYPE_UNSPECIFIED (int): The user did not specify a storage type. + SSD (int): Flash (SSD) storage should be used. + HDD (int): Magnetic drive (HDD) storage should be used. + """ + STORAGE_TYPE_UNSPECIFIED = 0 + SSD = 1 + HDD = 2 + + +class Instance(object): + class State(object): + """ + Possible states of an instance. + + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be determined. + READY (int): The instance has been successfully created and can serve requests + to its tables. + CREATING (int): The instance is currently being created, and may be destroyed + if the creation process encounters an error. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(object): + """ + The type of the instance. + + Attributes: + TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an + instance, a ``PRODUCTION`` instance will be created. If set when updating + an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set + on the cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has + no performance or uptime guarantees and is not covered by SLA. + After a development instance is created, it can be upgraded by + updating the instance to type ``PRODUCTION``. An instance created + as a production instance cannot be changed to a development instance. + When creating a development instance, ``serve_nodes`` on the cluster must + not be set. + """ + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + +class Cluster(object): + class State(object): + """ + Possible states of a cluster. + + Attributes: + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 + + +class Table(object): + class TimestampGranularity(object): + """ + Possible timestamp granularities to use when keeping multiple versions + of data in a table. + + Attributes: + TIMESTAMP_GRANULARITY_UNSPECIFIED (int): The user did not specify a granularity. Should not be returned. + When specified during table creation, MILLIS will be used. + MILLIS (int): The table keeps data versioned at a granularity of 1ms. + """ + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 + MILLIS = 1 + + class View(object): + """ + Defines a view over a table's fields. + + Attributes: + VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. + NAME_ONLY (int): Only populates ``name``. + SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. + REPLICATION_VIEW (int): This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Only populates ``name`` and fields related to the table's + replication state. + FULL (int): Populates all fields. + """ + VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + SCHEMA_VIEW = 2 + REPLICATION_VIEW = 3 + FULL = 4 + + class ClusterState(object): + class ReplicationState(object): + """ + Table replication states. + + Attributes: + STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. + INITIALIZING (int): The cluster was recently created, and the table must finish copying + over pre-existing data from other clusters before it can begin + receiving live replication updates and serving + ``Data API`` requests. + PLANNED_MAINTENANCE (int): The table is temporarily unable to serve + ``Data API`` requests from this + cluster due to planned internal maintenance. + UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve + ``Data API`` requests from this + cluster due to unplanned or emergency maintenance. + READY (int): The table can serve + ``Data API`` requests from this + cluster. Depending on replication delay, reads may not immediately + reflect the state of the table in other clusters. + """ + STATE_NOT_KNOWN = 0 + INITIALIZING = 1 + PLANNED_MAINTENANCE = 2 + UNPLANNED_MAINTENANCE = 3 + READY = 4 + + +class Snapshot(object): + class State(object): + """ + Possible states of a snapshot. + + Attributes: + STATE_NOT_KNOWN (int): The state of the snapshot could not be determined. + READY (int): The snapshot has been successfully created and can serve all requests. + CREATING (int): The snapshot is currently being created, and may be destroyed if the + creation process encounters an error. A snapshot may not be restored to a + table while it is being created. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py new file mode 100644 index 000000000000..eb795e269af4 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -0,0 +1,1675 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2 +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08\"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"<\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"n\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08\"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08\"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation\"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"D\x82\xd3\xe4\x93\x02>\"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation\"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty\"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12\".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12\".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse\"C\x82\xd3\xe4\x93\x02=\"8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR,google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR,google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) + + + + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( + name='ClustersEntry', + full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=570, + serialized_end=652, +) + +_CREATEINSTANCEREQUEST = _descriptor.Descriptor( + name='CreateInstanceRequest', + full_name='google.bigtable.admin.v2.CreateInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateInstanceRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='instance_id', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='instance', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.CreateInstanceRequest.clusters', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=373, + serialized_end=652, +) + + +_GETINSTANCEREQUEST = _descriptor.Descriptor( + name='GetInstanceRequest', + full_name='google.bigtable.admin.v2.GetInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=654, + serialized_end=688, +) + + +_LISTINSTANCESREQUEST = _descriptor.Descriptor( + name='ListInstancesRequest', + full_name='google.bigtable.admin.v2.ListInstancesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListInstancesRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=690, + serialized_end=748, +) + + +_LISTINSTANCESRESPONSE = _descriptor.Descriptor( + name='ListInstancesResponse', + full_name='google.bigtable.admin.v2.ListInstancesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='instances', full_name='google.bigtable.admin.v2.ListInstancesResponse.instances', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=751, + serialized_end=880, +) + + +_PARTIALUPDATEINSTANCEREQUEST = _descriptor.Descriptor( + name='PartialUpdateInstanceRequest', + full_name='google.bigtable.admin.v2.PartialUpdateInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='instance', full_name='google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='update_mask', full_name='google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=883, + serialized_end=1016, +) + + +_DELETEINSTANCEREQUEST = _descriptor.Descriptor( + name='DeleteInstanceRequest', + full_name='google.bigtable.admin.v2.DeleteInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteInstanceRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1018, + serialized_end=1055, +) + + +_CREATECLUSTERREQUEST = _descriptor.Descriptor( + name='CreateClusterRequest', + full_name='google.bigtable.admin.v2.CreateClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateClusterRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='cluster_id', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='cluster', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1057, + serialized_end=1167, +) + + +_GETCLUSTERREQUEST = _descriptor.Descriptor( + name='GetClusterRequest', + full_name='google.bigtable.admin.v2.GetClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1169, + serialized_end=1202, +) + + +_LISTCLUSTERSREQUEST = _descriptor.Descriptor( + name='ListClustersRequest', + full_name='google.bigtable.admin.v2.ListClustersRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListClustersRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListClustersRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1204, + serialized_end=1261, +) + + +_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( + name='ListClustersResponse', + full_name='google.bigtable.admin.v2.ListClustersResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='clusters', full_name='google.bigtable.admin.v2.ListClustersResponse.clusters', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListClustersResponse.failed_locations', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListClustersResponse.next_page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1263, + serialized_end=1389, +) + + +_DELETECLUSTERREQUEST = _descriptor.Descriptor( + name='DeleteClusterRequest', + full_name='google.bigtable.admin.v2.DeleteClusterRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteClusterRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1391, + serialized_end=1427, +) + + +_CREATEINSTANCEMETADATA = _descriptor.Descriptor( + name='CreateInstanceMetadata', + full_name='google.bigtable.admin.v2.CreateInstanceMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1430, + serialized_end=1628, +) + + +_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( + name='UpdateInstanceMetadata', + full_name='google.bigtable.admin.v2.UpdateInstanceMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.UpdateInstanceMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.UpdateInstanceMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1631, + serialized_end=1836, +) + + +_CREATECLUSTERMETADATA = _descriptor.Descriptor( + name='CreateClusterMetadata', + full_name='google.bigtable.admin.v2.CreateClusterMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.CreateClusterMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.CreateClusterMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.CreateClusterMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1839, + serialized_end=2035, +) + + +_UPDATECLUSTERMETADATA = _descriptor.Descriptor( + name='UpdateClusterMetadata', + full_name='google.bigtable.admin.v2.UpdateClusterMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2038, + serialized_end=2221, +) + + +_CREATEAPPPROFILEREQUEST = _descriptor.Descriptor( + name='CreateAppProfileRequest', + full_name='google.bigtable.admin.v2.CreateAppProfileRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='app_profile_id', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='app_profile', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.app_profile', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='ignore_warnings', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2224, + serialized_end=2373, +) + + +_GETAPPPROFILEREQUEST = _descriptor.Descriptor( + name='GetAppProfileRequest', + full_name='google.bigtable.admin.v2.GetAppProfileRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetAppProfileRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2375, + serialized_end=2411, +) + + +_LISTAPPPROFILESREQUEST = _descriptor.Descriptor( + name='ListAppProfilesRequest', + full_name='google.bigtable.admin.v2.ListAppProfilesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2413, + serialized_end=2473, +) + + +_LISTAPPPROFILESRESPONSE = _descriptor.Descriptor( + name='ListAppProfilesResponse', + full_name='google.bigtable.admin.v2.ListAppProfilesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='app_profiles', full_name='google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2475, + serialized_end=2585, +) + + +_UPDATEAPPPROFILEREQUEST = _descriptor.Descriptor( + name='UpdateAppProfileRequest', + full_name='google.bigtable.admin.v2.UpdateAppProfileRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='app_profile', full_name='google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='update_mask', full_name='google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='ignore_warnings', full_name='google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2588, + serialized_end=2746, +) + + +_DELETEAPPPROFILEREQUEST = _descriptor.Descriptor( + name='DeleteAppProfileRequest', + full_name='google.bigtable.admin.v2.DeleteAppProfileRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteAppProfileRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='ignore_warnings', full_name='google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2748, + serialized_end=2812, +) + + +_UPDATEAPPPROFILEMETADATA = _descriptor.Descriptor( + name='UpdateAppProfileMetadata', + full_name='google.bigtable.admin.v2.UpdateAppProfileMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2814, + serialized_end=2840, +) + +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE +_CREATEINSTANCEREQUEST.fields_by_name['clusters'].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY +_LISTINSTANCESRESPONSE.fields_by_name['instances'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +_CREATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _PARTIALUPDATEINSTANCEREQUEST +_UPDATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATECLUSTERMETADATA.fields_by_name['original_request'].message_type = _CREATECLUSTERREQUEST +_CREATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEAPPPROFILEREQUEST.fields_by_name['app_profile'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +_LISTAPPPROFILESRESPONSE.fields_by_name['app_profiles'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +_UPDATEAPPPROFILEREQUEST.fields_by_name['app_profile'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +_UPDATEAPPPROFILEREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['GetInstanceRequest'] = _GETINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesRequest'] = _LISTINSTANCESREQUEST +DESCRIPTOR.message_types_by_name['ListInstancesResponse'] = _LISTINSTANCESRESPONSE +DESCRIPTOR.message_types_by_name['PartialUpdateInstanceRequest'] = _PARTIALUPDATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['DeleteInstanceRequest'] = _DELETEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST +DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST +DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE +DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST +DESCRIPTOR.message_types_by_name['CreateInstanceMetadata'] = _CREATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name['UpdateInstanceMetadata'] = _UPDATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name['CreateClusterMetadata'] = _CREATECLUSTERMETADATA +DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA +DESCRIPTOR.message_types_by_name['CreateAppProfileRequest'] = _CREATEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name['GetAppProfileRequest'] = _GETAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name['ListAppProfilesRequest'] = _LISTAPPPROFILESREQUEST +DESCRIPTOR.message_types_by_name['ListAppProfilesResponse'] = _LISTAPPPROFILESRESPONSE +DESCRIPTOR.message_types_by_name['UpdateAppProfileRequest'] = _UPDATEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name['DeleteAppProfileRequest'] = _DELETEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name['UpdateAppProfileMetadata'] = _UPDATEAPPPROFILEMETADATA +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), dict( + + ClustersEntry = _reflection.GeneratedProtocolMessageType('ClustersEntry', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEREQUEST_CLUSTERSENTRY, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) + )) + , + DESCRIPTOR = _CREATEINSTANCEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.CreateInstance. + + + Attributes: + parent: + The unique name of the project in which to create the new + instance. Values are of the form ``projects/``. + instance_id: + The ID to be used when referring to the new instance within + its project, e.g., just ``myinstance`` rather than + ``projects/myproject/instances/myinstance``. + instance: + The instance to create. Fields marked ``OutputOnly`` must be + left blank. + clusters: + The clusters to be created within the instance, mapped by + desired cluster ID, e.g., just ``mycluster`` rather than ``pro + jects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. Currently + exactly one cluster must be specified. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) + )) +_sym_db.RegisterMessage(CreateInstanceRequest) +_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) + +GetInstanceRequest = _reflection.GeneratedProtocolMessageType('GetInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _GETINSTANCEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.GetInstance. + + + Attributes: + name: + The unique name of the requested instance. Values are of the + form ``projects//instances/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) + )) +_sym_db.RegisterMessage(GetInstanceRequest) + +ListInstancesRequest = _reflection.GeneratedProtocolMessageType('ListInstancesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.ListInstances. + + + Attributes: + parent: + The unique name of the project for which a list of instances + is requested. Values are of the form ``projects/``. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) + )) +_sym_db.RegisterMessage(ListInstancesRequest) + +ListInstancesResponse = _reflection.GeneratedProtocolMessageType('ListInstancesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTINSTANCESRESPONSE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Response message for BigtableInstanceAdmin.ListInstances. + + + Attributes: + instances: + The list of requested instances. + failed_locations: + Locations from which Instance information could not be + retrieved, due to an outage or some other transient condition. + Instances whose Clusters are all in one of the failed + locations may be missing from ``instances``, and Instances + with at least one Cluster in a failed location may only have + partial information returned. + next_page_token: + Set if not all instances could be returned in a single + response. Pass this value to ``page_token`` in another request + to get the next page of results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) + )) +_sym_db.RegisterMessage(ListInstancesResponse) + +PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType('PartialUpdateInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _PARTIALUPDATEINSTANCEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.PartialUpdateInstance. + + + Attributes: + instance: + The Instance which will (partially) replace the current value. + update_mask: + The subset of Instance fields which should be replaced. Must + be explicitly set. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) + )) +_sym_db.RegisterMessage(PartialUpdateInstanceRequest) + +DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType('DeleteInstanceRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETEINSTANCEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.DeleteInstance. + + + Attributes: + name: + The unique name of the instance to be deleted. Values are of + the form ``projects//instances/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) + )) +_sym_db.RegisterMessage(DeleteInstanceRequest) + +CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _CREATECLUSTERREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.CreateCluster. + + + Attributes: + parent: + The unique name of the instance in which to create the new + cluster. Values are of the form + ``projects//instances/``. + cluster_id: + The ID to be used when referring to the new cluster within its + instance, e.g., just ``mycluster`` rather than ``projects/mypr + oject/instances/myinstance/clusters/mycluster``. + cluster: + The cluster to be created. Fields marked ``OutputOnly`` must + be left blank. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) + )) +_sym_db.RegisterMessage(CreateClusterRequest) + +GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _GETCLUSTERREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.GetCluster. + + + Attributes: + name: + The unique name of the requested cluster. Values are of the + form ``projects//instances//clusters/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) + )) +_sym_db.RegisterMessage(GetClusterRequest) + +ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.ListClusters. + + + Attributes: + parent: + The unique name of the instance for which a list of clusters + is requested. Values are of the form + ``projects//instances/``. Use `` + = '-'`` to list Clusters for all Instances in a project, e.g., + ``projects/myproject/instances/-``. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) + )) +_sym_db.RegisterMessage(ListClustersRequest) + +ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTCLUSTERSRESPONSE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Response message for BigtableInstanceAdmin.ListClusters. + + + Attributes: + clusters: + The list of requested clusters. + failed_locations: + Locations from which Cluster information could not be + retrieved, due to an outage or some other transient condition. + Clusters from these locations may be missing from + ``clusters``, or may only have partial information returned. + next_page_token: + Set if not all clusters could be returned in a single + response. Pass this value to ``page_token`` in another request + to get the next page of results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) + )) +_sym_db.RegisterMessage(ListClustersResponse) + +DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETECLUSTERREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """Request message for BigtableInstanceAdmin.DeleteCluster. + + + Attributes: + name: + The unique name of the cluster to be deleted. Values are of + the form ``projects//instances//clusters/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) + )) +_sym_db.RegisterMessage(DeleteClusterRequest) + +CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType('CreateInstanceMetadata', (_message.Message,), dict( + DESCRIPTOR = _CREATEINSTANCEMETADATA, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """The metadata for the Operation returned by CreateInstance. + + + Attributes: + original_request: + The request that prompted the initiation of this + CreateInstance operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) + )) +_sym_db.RegisterMessage(CreateInstanceMetadata) + +UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType('UpdateInstanceMetadata', (_message.Message,), dict( + DESCRIPTOR = _UPDATEINSTANCEMETADATA, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """The metadata for the Operation returned by UpdateInstance. + + + Attributes: + original_request: + The request that prompted the initiation of this + UpdateInstance operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) + )) +_sym_db.RegisterMessage(UpdateInstanceMetadata) + +CreateClusterMetadata = _reflection.GeneratedProtocolMessageType('CreateClusterMetadata', (_message.Message,), dict( + DESCRIPTOR = _CREATECLUSTERMETADATA, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """The metadata for the Operation returned by CreateCluster. + + + Attributes: + original_request: + The request that prompted the initiation of this CreateCluster + operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) + )) +_sym_db.RegisterMessage(CreateClusterMetadata) + +UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict( + DESCRIPTOR = _UPDATECLUSTERMETADATA, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """The metadata for the Operation returned by UpdateCluster. + + + Attributes: + original_request: + The request that prompted the initiation of this UpdateCluster + operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) + )) +_sym_db.RegisterMessage(UpdateClusterMetadata) + +CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType('CreateAppProfileRequest', (_message.Message,), dict( + DESCRIPTOR = _CREATEAPPPROFILEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for BigtableInstanceAdmin.CreateAppProfile. + + + Attributes: + parent: + The unique name of the instance in which to create the new app + profile. Values are of the form + ``projects//instances/``. + app_profile_id: + The ID to be used when referring to the new app profile within + its instance, e.g., just ``myprofile`` rather than ``projects/ + myproject/instances/myinstance/appProfiles/myprofile``. + app_profile: + The app profile to be created. Fields marked ``OutputOnly`` + will be ignored. + ignore_warnings: + If true, ignore safety checks when creating the app profile. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) + )) +_sym_db.RegisterMessage(CreateAppProfileRequest) + +GetAppProfileRequest = _reflection.GeneratedProtocolMessageType('GetAppProfileRequest', (_message.Message,), dict( + DESCRIPTOR = _GETAPPPROFILEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for BigtableInstanceAdmin.GetAppProfile. + + + Attributes: + name: + The unique name of the requested app profile. Values are of + the form ``projects//instances//appProfiles + /``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) + )) +_sym_db.RegisterMessage(GetAppProfileRequest) + +ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType('ListAppProfilesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTAPPPROFILESREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for BigtableInstanceAdmin.ListAppProfiles. + + + Attributes: + parent: + The unique name of the instance for which a list of app + profiles is requested. Values are of the form + ``projects//instances/``. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) + )) +_sym_db.RegisterMessage(ListAppProfilesRequest) + +ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType('ListAppProfilesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTAPPPROFILESRESPONSE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Response message for BigtableInstanceAdmin.ListAppProfiles. + + + Attributes: + app_profiles: + The list of requested app profiles. + next_page_token: + Set if not all app profiles could be returned in a single + response. Pass this value to ``page_token`` in another request + to get the next page of results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) + )) +_sym_db.RegisterMessage(ListAppProfilesResponse) + +UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType('UpdateAppProfileRequest', (_message.Message,), dict( + DESCRIPTOR = _UPDATEAPPPROFILEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for BigtableInstanceAdmin.UpdateAppProfile. + + + Attributes: + app_profile: + The app profile which will (partially) replace the current + value. + update_mask: + The subset of app profile fields which should be replaced. If + unset, all fields will be replaced. + ignore_warnings: + If true, ignore safety checks when updating the app profile. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) + )) +_sym_db.RegisterMessage(UpdateAppProfileRequest) + +DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType('DeleteAppProfileRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETEAPPPROFILEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for BigtableInstanceAdmin.DeleteAppProfile. + + + Attributes: + name: + The unique name of the app profile to be deleted. Values are + of the form ``projects//instances//appProfi + les/``. + ignore_warnings: + If true, ignore safety checks when deleting the app profile. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) + )) +_sym_db.RegisterMessage(DeleteAppProfileRequest) + +UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType('UpdateAppProfileMetadata', (_message.Message,), dict( + DESCRIPTOR = _UPDATEAPPPROFILEMETADATA, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + The metadata for the Operation returned by UpdateAppProfile. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) + )) +_sym_db.RegisterMessage(UpdateAppProfileMetadata) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True +_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) + +_BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( + name='BigtableInstanceAdmin', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin', + file=DESCRIPTOR, + index=0, + options=None, + serialized_start=2843, + serialized_end=5829, + methods=[ + _descriptor.MethodDescriptor( + name='CreateInstance', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance', + index=0, + containing_service=None, + input_type=_CREATEINSTANCEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002&\"!/v2/{parent=projects/*}/instances:\001*')), + ), + _descriptor.MethodDescriptor( + name='GetInstance', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance', + index=1, + containing_service=None, + input_type=_GETINSTANCEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}')), + ), + _descriptor.MethodDescriptor( + name='ListInstances', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances', + index=2, + containing_service=None, + input_type=_LISTINSTANCESREQUEST, + output_type=_LISTINSTANCESRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances')), + ), + _descriptor.MethodDescriptor( + name='UpdateInstance', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance', + index=3, + containing_service=None, + input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*')), + ), + _descriptor.MethodDescriptor( + name='PartialUpdateInstance', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance', + index=4, + containing_service=None, + input_type=_PARTIALUPDATEINSTANCEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance')), + ), + _descriptor.MethodDescriptor( + name='DeleteInstance', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance', + index=5, + containing_service=None, + input_type=_DELETEINSTANCEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}')), + ), + _descriptor.MethodDescriptor( + name='CreateCluster', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster', + index=6, + containing_service=None, + input_type=_CREATECLUSTERREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0027\",/v2/{parent=projects/*/instances/*}/clusters:\007cluster')), + ), + _descriptor.MethodDescriptor( + name='GetCluster', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster', + index=7, + containing_service=None, + input_type=_GETCLUSTERREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}')), + ), + _descriptor.MethodDescriptor( + name='ListClusters', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters', + index=8, + containing_service=None, + input_type=_LISTCLUSTERSREQUEST, + output_type=_LISTCLUSTERSRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters')), + ), + _descriptor.MethodDescriptor( + name='UpdateCluster', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster', + index=9, + containing_service=None, + input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*')), + ), + _descriptor.MethodDescriptor( + name='DeleteCluster', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster', + index=10, + containing_service=None, + input_type=_DELETECLUSTERREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}')), + ), + _descriptor.MethodDescriptor( + name='CreateAppProfile', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile', + index=11, + containing_service=None, + input_type=_CREATEAPPPROFILEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002>\"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile')), + ), + _descriptor.MethodDescriptor( + name='GetAppProfile', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile', + index=12, + containing_service=None, + input_type=_GETAPPPROFILEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}')), + ), + _descriptor.MethodDescriptor( + name='ListAppProfiles', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles', + index=13, + containing_service=None, + input_type=_LISTAPPPROFILESREQUEST, + output_type=_LISTAPPPROFILESRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles')), + ), + _descriptor.MethodDescriptor( + name='UpdateAppProfile', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile', + index=14, + containing_service=None, + input_type=_UPDATEAPPPROFILEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile')), + ), + _descriptor.MethodDescriptor( + name='DeleteAppProfile', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile', + index=15, + containing_service=None, + input_type=_DELETEAPPPROFILEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}')), + ), + _descriptor.MethodDescriptor( + name='GetIamPolicy', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy', + index=16, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0027\"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*')), + ), + _descriptor.MethodDescriptor( + name='SetIamPolicy', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy', + index=17, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0027\"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*')), + ), + _descriptor.MethodDescriptor( + name='TestIamPermissions', + full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions', + index=18, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, + output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002=\"8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*')), + ), +]) +_sym_db.RegisterServiceDescriptor(_BIGTABLEINSTANCEADMIN) + +DESCRIPTOR.services_by_name['BigtableInstanceAdmin'] = _BIGTABLEINSTANCEADMIN + +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py new file mode 100644 index 000000000000..f18f82cc1363 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -0,0 +1,406 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2 +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +class BigtableInstanceAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + ) + self.ListInstances = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, + ) + self.UpdateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + ) + self.PartialUpdateInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteInstance = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + ) + self.ListClusters = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, + ) + self.UpdateCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteCluster = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.CreateAppProfile = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + ) + self.GetAppProfile = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + ) + self.ListAppProfiles = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, + ) + self.UpdateAppProfile = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.DeleteAppProfile = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.GetIamPolicy = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy', + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.SetIamPolicy = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy', + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.TestIamPermissions = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions', + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + ) + + +class BigtableInstanceAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + def CreateInstance(self, request, context): + """Create an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetInstance(self, request, context): + """Gets information about an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListInstances(self, request, context): + """Lists information about instances in a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateInstance(self, request, context): + """Updates an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PartialUpdateInstance(self, request, context): + """Partially updates an instance within a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteInstance(self, request, context): + """Delete an instance from a project. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateCluster(self, request, context): + """Creates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetCluster(self, request, context): + """Gets information about a cluster. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListClusters(self, request, context): + """Lists information about clusters in an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateAppProfile(self, request, context): + """This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Creates an app profile within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetAppProfile(self, request, context): + """This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Gets information about an app profile. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListAppProfiles(self, request, context): + """This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Lists information about app profiles in an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateAppProfile(self, request, context): + """This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Updates an app profile within an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteAppProfile(self, request, context): + """This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Deletes an app profile from an instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetIamPolicy(self, request, context): + """This is a private alpha release of Cloud Bigtable instance level + permissions. This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways and + is not recommended for production use. It is not subject to any SLA or + deprecation policy. + + Gets the access control policy for an instance resource. Returns an empty + policy if an instance exists but does not have a policy set. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetIamPolicy(self, request, context): + """This is a private alpha release of Cloud Bigtable instance level + permissions. This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways and + is not recommended for production use. It is not subject to any SLA or + deprecation policy. + + Sets the access control policy on an instance resource. Replaces any + existing policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def TestIamPermissions(self, request, context): + """This is a private alpha release of Cloud Bigtable instance level + permissions. This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways and + is not recommended for production use. It is not subject to any SLA or + deprecation policy. + + Returns permissions that the caller has on the specified instance resource. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableInstanceAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateInstance': grpc.unary_unary_rpc_method_handler( + servicer.CreateInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetInstance': grpc.unary_unary_rpc_method_handler( + servicer.GetInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + ), + 'ListInstances': grpc.unary_unary_rpc_method_handler( + servicer.ListInstances, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, + ), + 'UpdateInstance': grpc.unary_unary_rpc_method_handler( + servicer.UpdateInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + ), + 'PartialUpdateInstance': grpc.unary_unary_rpc_method_handler( + servicer.PartialUpdateInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'DeleteInstance': grpc.unary_unary_rpc_method_handler( + servicer.DeleteInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'CreateCluster': grpc.unary_unary_rpc_method_handler( + servicer.CreateCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetCluster': grpc.unary_unary_rpc_method_handler( + servicer.GetCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + ), + 'ListClusters': grpc.unary_unary_rpc_method_handler( + servicer.ListClusters, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, + ), + 'UpdateCluster': grpc.unary_unary_rpc_method_handler( + servicer.UpdateCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'DeleteCluster': grpc.unary_unary_rpc_method_handler( + servicer.DeleteCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'CreateAppProfile': grpc.unary_unary_rpc_method_handler( + servicer.CreateAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + ), + 'GetAppProfile': grpc.unary_unary_rpc_method_handler( + servicer.GetAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + ), + 'ListAppProfiles': grpc.unary_unary_rpc_method_handler( + servicer.ListAppProfiles, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, + ), + 'UpdateAppProfile': grpc.unary_unary_rpc_method_handler( + servicer.UpdateAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'DeleteAppProfile': grpc.unary_unary_rpc_method_handler( + servicer.DeleteAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'GetIamPolicy': grpc.unary_unary_rpc_method_handler( + servicer.GetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + 'SetIamPolicy': grpc.unary_unary_rpc_method_handler( + servicer.SetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + 'TestIamPermissions': grpc.unary_unary_rpc_method_handler( + servicer.TestIamPermissions, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableInstanceAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py new file mode 100644 index 000000000000..fbb2ebb46390 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -0,0 +1,1608 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2 +from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"k\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod\"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t\"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t\".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08\"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\"\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xb7\x11\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation\"H\x82\xd3\xe4\x93\x02\x42\"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation\">\x82\xd3\xe4\x93\x02\x38\"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a\".google.bigtable.admin.v2.Snapshot\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) + + + + +_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( + name='Split', + full_name='google.bigtable.admin.v2.CreateTableRequest.Split', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.CreateTableRequest.Split.key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=485, + serialized_end=505, +) + +_CREATETABLEREQUEST = _descriptor.Descriptor( + name='CreateTableRequest', + full_name='google.bigtable.admin.v2.CreateTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateTableRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='table_id', full_name='google.bigtable.admin.v2.CreateTableRequest.table_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='table', full_name='google.bigtable.admin.v2.CreateTableRequest.table', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='initial_splits', full_name='google.bigtable.admin.v2.CreateTableRequest.initial_splits', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_CREATETABLEREQUEST_SPLIT, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=305, + serialized_end=505, +) + + +_CREATETABLEFROMSNAPSHOTREQUEST = _descriptor.Descriptor( + name='CreateTableFromSnapshotRequest', + full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='table_id', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='source_snapshot', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=507, + serialized_end=598, +) + + +_DROPROWRANGEREQUEST = _descriptor.Descriptor( + name='DropRowRangeRequest', + full_name='google.bigtable.admin.v2.DropRowRangeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DropRowRangeRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='row_key_prefix', full_name='google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='delete_all_data_from_table', full_name='google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target', + index=0, containing_type=None, fields=[]), + ], + serialized_start=600, + serialized_end=709, +) + + +_LISTTABLESREQUEST = _descriptor.Descriptor( + name='ListTablesRequest', + full_name='google.bigtable.admin.v2.ListTablesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListTablesRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.ListTablesRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=711, + serialized_end=818, +) + + +_LISTTABLESRESPONSE = _descriptor.Descriptor( + name='ListTablesResponse', + full_name='google.bigtable.admin.v2.ListTablesResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='tables', full_name='google.bigtable.admin.v2.ListTablesResponse.tables', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListTablesResponse.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=820, + serialized_end=914, +) + + +_GETTABLEREQUEST = _descriptor.Descriptor( + name='GetTableRequest', + full_name='google.bigtable.admin.v2.GetTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='view', full_name='google.bigtable.admin.v2.GetTableRequest.view', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=916, + serialized_end=999, +) + + +_DELETETABLEREQUEST = _descriptor.Descriptor( + name='DeleteTableRequest', + full_name='google.bigtable.admin.v2.DeleteTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1001, + serialized_end=1035, +) + + +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( + name='Modification', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='create', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='update', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='drop', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1175, + serialized_end=1340, +) + +_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( + name='ModifyColumnFamiliesRequest', + full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='modifications', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1038, + serialized_end=1340, +) + + +_GENERATECONSISTENCYTOKENREQUEST = _descriptor.Descriptor( + name='GenerateConsistencyTokenRequest', + full_name='google.bigtable.admin.v2.GenerateConsistencyTokenRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1342, + serialized_end=1389, +) + + +_GENERATECONSISTENCYTOKENRESPONSE = _descriptor.Descriptor( + name='GenerateConsistencyTokenResponse', + full_name='google.bigtable.admin.v2.GenerateConsistencyTokenResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='consistency_token', full_name='google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1391, + serialized_end=1452, +) + + +_CHECKCONSISTENCYREQUEST = _descriptor.Descriptor( + name='CheckConsistencyRequest', + full_name='google.bigtable.admin.v2.CheckConsistencyRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.CheckConsistencyRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='consistency_token', full_name='google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1454, + serialized_end=1520, +) + + +_CHECKCONSISTENCYRESPONSE = _descriptor.Descriptor( + name='CheckConsistencyResponse', + full_name='google.bigtable.admin.v2.CheckConsistencyResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='consistent', full_name='google.bigtable.admin.v2.CheckConsistencyResponse.consistent', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1522, + serialized_end=1568, +) + + +_SNAPSHOTTABLEREQUEST = _descriptor.Descriptor( + name='SnapshotTableRequest', + full_name='google.bigtable.admin.v2.SnapshotTableRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.SnapshotTableRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='cluster', full_name='google.bigtable.admin.v2.SnapshotTableRequest.cluster', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='snapshot_id', full_name='google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='ttl', full_name='google.bigtable.admin.v2.SnapshotTableRequest.ttl', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='description', full_name='google.bigtable.admin.v2.SnapshotTableRequest.description', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1571, + serialized_end=1706, +) + + +_GETSNAPSHOTREQUEST = _descriptor.Descriptor( + name='GetSnapshotRequest', + full_name='google.bigtable.admin.v2.GetSnapshotRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.GetSnapshotRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1708, + serialized_end=1742, +) + + +_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( + name='ListSnapshotsRequest', + full_name='google.bigtable.admin.v2.ListSnapshotsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='parent', full_name='google.bigtable.admin.v2.ListSnapshotsRequest.parent', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_size', full_name='google.bigtable.admin.v2.ListSnapshotsRequest.page_size', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListSnapshotsRequest.page_token', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1744, + serialized_end=1821, +) + + +_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( + name='ListSnapshotsResponse', + full_name='google.bigtable.admin.v2.ListSnapshotsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='snapshots', full_name='google.bigtable.admin.v2.ListSnapshotsResponse.snapshots', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='next_page_token', full_name='google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1823, + serialized_end=1926, +) + + +_DELETESNAPSHOTREQUEST = _descriptor.Descriptor( + name='DeleteSnapshotRequest', + full_name='google.bigtable.admin.v2.DeleteSnapshotRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.DeleteSnapshotRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1928, + serialized_end=1965, +) + + +_SNAPSHOTTABLEMETADATA = _descriptor.Descriptor( + name='SnapshotTableMetadata', + full_name='google.bigtable.admin.v2.SnapshotTableMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.SnapshotTableMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.SnapshotTableMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.SnapshotTableMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1968, + serialized_end=2164, +) + + +_CREATETABLEFROMSNAPSHOTMETADATA = _descriptor.Descriptor( + name='CreateTableFromSnapshotMetadata', + full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='original_request', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='request_time', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='finish_time', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2167, + serialized_end=2383, +) + +_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST +_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE +_CREATETABLEREQUEST.fields_by_name['initial_splits'].message_type = _CREATETABLEREQUEST_SPLIT +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['row_key_prefix']) +_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( + _DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table']) +_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] +_LISTTABLESREQUEST.fields_by_name['view'].enum_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE +_GETTABLEREQUEST.fields_by_name['view'].enum_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop']) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name['modifications'].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION +_SNAPSHOTTABLEREQUEST.fields_by_name['ttl'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_LISTSNAPSHOTSRESPONSE.fields_by_name['snapshots'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT +_SNAPSHOTTABLEMETADATA.fields_by_name['original_request'].message_type = _SNAPSHOTTABLEREQUEST +_SNAPSHOTTABLEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOTTABLEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name['original_request'].message_type = _CREATETABLEFROMSNAPSHOTREQUEST +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST +DESCRIPTOR.message_types_by_name['CreateTableFromSnapshotRequest'] = _CREATETABLEFROMSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name['DropRowRangeRequest'] = _DROPROWRANGEREQUEST +DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST +DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE +DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST +DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST +DESCRIPTOR.message_types_by_name['ModifyColumnFamiliesRequest'] = _MODIFYCOLUMNFAMILIESREQUEST +DESCRIPTOR.message_types_by_name['GenerateConsistencyTokenRequest'] = _GENERATECONSISTENCYTOKENREQUEST +DESCRIPTOR.message_types_by_name['GenerateConsistencyTokenResponse'] = _GENERATECONSISTENCYTOKENRESPONSE +DESCRIPTOR.message_types_by_name['CheckConsistencyRequest'] = _CHECKCONSISTENCYREQUEST +DESCRIPTOR.message_types_by_name['CheckConsistencyResponse'] = _CHECKCONSISTENCYRESPONSE +DESCRIPTOR.message_types_by_name['SnapshotTableRequest'] = _SNAPSHOTTABLEREQUEST +DESCRIPTOR.message_types_by_name['GetSnapshotRequest'] = _GETSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name['ListSnapshotsRequest'] = _LISTSNAPSHOTSREQUEST +DESCRIPTOR.message_types_by_name['ListSnapshotsResponse'] = _LISTSNAPSHOTSRESPONSE +DESCRIPTOR.message_types_by_name['DeleteSnapshotRequest'] = _DELETESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name['SnapshotTableMetadata'] = _SNAPSHOTTABLEMETADATA +DESCRIPTOR.message_types_by_name['CreateTableFromSnapshotMetadata'] = _CREATETABLEFROMSNAPSHOTMETADATA +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( + + Split = _reflection.GeneratedProtocolMessageType('Split', (_message.Message,), dict( + DESCRIPTOR = _CREATETABLEREQUEST_SPLIT, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """An initial split point for a newly created table. + + + Attributes: + key: + Row key to use as an initial tablet boundary. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) + )) + , + DESCRIPTOR = _CREATETABLEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + + + Attributes: + parent: + The unique name of the instance in which to create the table. + Values are of the form + ``projects//instances/``. + table_id: + The name by which the new table should be referred to within + the parent instance, e.g., ``foobar`` rather than + ``/tables/foobar``. + table: + The Table to create. + initial_splits: + The optional list of row keys that will be used to initially + split the table into several tablets (tablets are similar to + HBase regions). Given two split keys, ``s1`` and ``s2``, three + tablets will be created, spanning the key ranges: ``[, s1), + [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", + "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` + - initial\_split\_keys := ``["apple", "customer_1", + "customer_2", "other"]`` - Key assignment: - Tablet 1 + ``[, apple) => {"a"}.`` - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` - + Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - + Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - + Tablet 5 ``[other, ) => {"other", "zz"}.`` + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) + )) +_sym_db.RegisterMessage(CreateTableRequest) +_sym_db.RegisterMessage(CreateTableRequest.Split) + +CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType('CreateTableFromSnapshotRequest', (_message.Message,), dict( + DESCRIPTOR = _CREATETABLEFROMSNAPSHOTREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + + Attributes: + parent: + The unique name of the instance in which to create the table. + Values are of the form + ``projects//instances/``. + table_id: + The name by which the new table should be referred to within + the parent instance, e.g., ``foobar`` rather than + ``/tables/foobar``. + source_snapshot: + The unique name of the snapshot from which to restore the + table. The snapshot and the table must be in the same + instance. Values are of the form ``projects//instance + s//clusters//snapshots/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) + )) +_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) + +DropRowRangeRequest = _reflection.GeneratedProtocolMessageType('DropRowRangeRequest', (_message.Message,), dict( + DESCRIPTOR = _DROPROWRANGEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + + Attributes: + name: + The unique name of the table on which to drop a range of rows. + Values are of the form + ``projects//instances//tables/
``. + target: + Delete all rows or by prefix. + row_key_prefix: + Delete all rows that start with this row key prefix. Prefix + cannot be zero length. + delete_all_data_from_table: + Delete all rows in the table. Setting this to false is a no- + op. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) + )) +_sym_db.RegisterMessage(DropRowRangeRequest) + +ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + + Attributes: + parent: + The unique name of the instance for which tables should be + listed. Values are of the form + ``projects//instances/``. + view: + The view to be applied to the returned tables' fields. + Defaults to ``NAME_ONLY`` if unspecified; no others are + currently supported. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) + )) +_sym_db.RegisterMessage(ListTablesRequest) + +ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTTABLESRESPONSE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + + Attributes: + tables: + The tables present in the requested instance. + next_page_token: + Set if not all tables could be returned in a single response. + Pass this value to ``page_token`` in another request to get + the next page of results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) + )) +_sym_db.RegisterMessage(ListTablesResponse) + +GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( + DESCRIPTOR = _GETTABLEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + + + Attributes: + name: + The unique name of the requested table. Values are of the form + ``projects//instances//tables/
``. + view: + The view to be applied to the returned table's fields. + Defaults to ``SCHEMA_VIEW`` if unspecified. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) + )) +_sym_db.RegisterMessage(GetTableRequest) + +DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETETABLEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + + + Attributes: + name: + The unique name of the table to be deleted. Values are of the + form + ``projects//instances//tables/
``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) + )) +_sym_db.RegisterMessage(DeleteTableRequest) + +ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType('ModifyColumnFamiliesRequest', (_message.Message,), dict( + + Modification = _reflection.GeneratedProtocolMessageType('Modification', (_message.Message,), dict( + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """A create, update, or delete of a particular column family. + + + Attributes: + id: + The ID of the column family to be modified. + mod: + Column familiy modifications. + create: + Create a new column family with the specified schema, or fail + if one already exists with the given ID. + update: + Update an existing column family to the specified schema, or + fail if no column family exists with the given ID. + drop: + Drop (delete) the column family with the given ID, or fail if + no such family exists. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) + )) + , + DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + + + Attributes: + name: + The unique name of the table whose families should be + modified. Values are of the form + ``projects//instances//tables/
``. + modifications: + Modifications to be atomically applied to the specified + table's families. Entries are applied in order, meaning that + earlier modifications can be masked by later ones (in the case + of repeated updates to the same family, for example). + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) + )) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) +_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) + +GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType('GenerateConsistencyTokenRequest', (_message.Message,), dict( + DESCRIPTOR = _GENERATECONSISTENCYTOKENREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + + Attributes: + name: + The unique name of the Table for which to create a consistency + token. Values are of the form + ``projects//instances//tables/
``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) + )) +_sym_db.RegisterMessage(GenerateConsistencyTokenRequest) + +GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType('GenerateConsistencyTokenResponse', (_message.Message,), dict( + DESCRIPTOR = _GENERATECONSISTENCYTOKENRESPONSE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + + Attributes: + consistency_token: + The generated consistency token. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) + )) +_sym_db.RegisterMessage(GenerateConsistencyTokenResponse) + +CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType('CheckConsistencyRequest', (_message.Message,), dict( + DESCRIPTOR = _CHECKCONSISTENCYREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + + Attributes: + name: + The unique name of the Table for which to check replication + consistency. Values are of the form + ``projects//instances//tables/
``. + consistency_token: + The token created using GenerateConsistencyToken for the + Table. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) + )) +_sym_db.RegisterMessage(CheckConsistencyRequest) + +CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType('CheckConsistencyResponse', (_message.Message,), dict( + DESCRIPTOR = _CHECKCONSISTENCYRESPONSE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + + Attributes: + consistent: + True only if the token is consistent. A token is consistent if + replication has caught up with the restrictions specified in + the request. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) + )) +_sym_db.RegisterMessage(CheckConsistencyResponse) + +SnapshotTableRequest = _reflection.GeneratedProtocolMessageType('SnapshotTableRequest', (_message.Message,), dict( + DESCRIPTOR = _SNAPSHOTTABLEREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + + Attributes: + name: + The unique name of the table to have the snapshot taken. + Values are of the form + ``projects//instances//tables/
``. + cluster: + The name of the cluster where the snapshot will be created in. + Values are of the form ``projects//instances//clusters/``. + snapshot_id: + The ID by which the new snapshot should be referred to within + the parent cluster, e.g., ``mysnapshot`` of the form: ``[_a- + zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects//ins + tances//clusters//snapshots/mysnapshot``. + ttl: + The amount of time that the new snapshot can stay active after + it is created. Once 'ttl' expires, the snapshot will get + deleted. The maximum amount of time a snapshot can stay active + is 7 days. If 'ttl' is not specified, the default value of 24 + hours will be used. + description: + Description of the snapshot. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) + )) +_sym_db.RegisterMessage(SnapshotTableRequest) + +GetSnapshotRequest = _reflection.GeneratedProtocolMessageType('GetSnapshotRequest', (_message.Message,), dict( + DESCRIPTOR = _GETSNAPSHOTREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + + Attributes: + name: + The unique name of the requested snapshot. Values are of the + form ``projects//instances//clusters//snapshots/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) + )) +_sym_db.RegisterMessage(GetSnapshotRequest) + +ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType('ListSnapshotsRequest', (_message.Message,), dict( + DESCRIPTOR = _LISTSNAPSHOTSREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + + Attributes: + parent: + The unique name of the cluster for which snapshots should be + listed. Values are of the form ``projects//instances/ + /clusters/``. Use `` = '-'`` to + list snapshots for all clusters in an instance, e.g., + ``projects//instances//clusters/-``. + page_size: + The maximum number of snapshots to return. + page_token: + The value of ``next_page_token`` returned by a previous call. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) + )) +_sym_db.RegisterMessage(ListSnapshotsRequest) + +ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType('ListSnapshotsResponse', (_message.Message,), dict( + DESCRIPTOR = _LISTSNAPSHOTSRESPONSE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + + Attributes: + snapshots: + The snapshots present in the requested cluster. + next_page_token: + Set if not all snapshots could be returned in a single + response. Pass this value to ``page_token`` in another request + to get the next page of results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) + )) +_sym_db.RegisterMessage(ListSnapshotsResponse) + +DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType('DeleteSnapshotRequest', (_message.Message,), dict( + DESCRIPTOR = _DELETESNAPSHOTREQUEST, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + + Attributes: + name: + The unique name of the snapshot to be deleted. Values are of + the form ``projects//instances//clusters//snapshots/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) + )) +_sym_db.RegisterMessage(DeleteSnapshotRequest) + +SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType('SnapshotTableMetadata', (_message.Message,), dict( + DESCRIPTOR = _SNAPSHOTTABLEMETADATA, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + The metadata for the Operation returned by SnapshotTable. + + + Attributes: + original_request: + The request that prompted the initiation of this SnapshotTable + operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) + )) +_sym_db.RegisterMessage(SnapshotTableMetadata) + +CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType('CreateTableFromSnapshotMetadata', (_message.Message,), dict( + DESCRIPTOR = _CREATETABLEFROMSNAPSHOTMETADATA, + __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + The metadata for the Operation returned by CreateTableFromSnapshot. + + + Attributes: + original_request: + The request that prompted the initiation of this + CreateTableFromSnapshot operation. + request_time: + The time at which the original request was received. + finish_time: + The time at which the operation failed or was completed + successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) + )) +_sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) + +_BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( + name='BigtableTableAdmin', + full_name='google.bigtable.admin.v2.BigtableTableAdmin', + file=DESCRIPTOR, + index=0, + options=None, + serialized_start=2386, + serialized_end=4617, + methods=[ + _descriptor.MethodDescriptor( + name='CreateTable', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.CreateTable', + index=0, + containing_service=None, + input_type=_CREATETABLEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002/\"*/v2/{parent=projects/*/instances/*}/tables:\001*')), + ), + _descriptor.MethodDescriptor( + name='CreateTableFromSnapshot', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot', + index=1, + containing_service=None, + input_type=_CREATETABLEFROMSNAPSHOTREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002B\"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*')), + ), + _descriptor.MethodDescriptor( + name='ListTables', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.ListTables', + index=2, + containing_service=None, + input_type=_LISTTABLESREQUEST, + output_type=_LISTTABLESRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables')), + ), + _descriptor.MethodDescriptor( + name='GetTable', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.GetTable', + index=3, + containing_service=None, + input_type=_GETTABLEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}')), + ), + _descriptor.MethodDescriptor( + name='DeleteTable', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable', + index=4, + containing_service=None, + input_type=_DELETETABLEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}')), + ), + _descriptor.MethodDescriptor( + name='ModifyColumnFamilies', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies', + index=5, + containing_service=None, + input_type=_MODIFYCOLUMNFAMILIESREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002D\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*')), + ), + _descriptor.MethodDescriptor( + name='DropRowRange', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange', + index=6, + containing_service=None, + input_type=_DROPROWRANGEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*')), + ), + _descriptor.MethodDescriptor( + name='GenerateConsistencyToken', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken', + index=7, + containing_service=None, + input_type=_GENERATECONSISTENCYTOKENREQUEST, + output_type=_GENERATECONSISTENCYTOKENRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002H\"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*')), + ), + _descriptor.MethodDescriptor( + name='CheckConsistency', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency', + index=8, + containing_service=None, + input_type=_CHECKCONSISTENCYREQUEST, + output_type=_CHECKCONSISTENCYRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002@\";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*')), + ), + _descriptor.MethodDescriptor( + name='SnapshotTable', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable', + index=9, + containing_service=None, + input_type=_SNAPSHOTTABLEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0028\"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*')), + ), + _descriptor.MethodDescriptor( + name='GetSnapshot', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot', + index=10, + containing_service=None, + input_type=_GETSNAPSHOTREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}')), + ), + _descriptor.MethodDescriptor( + name='ListSnapshots', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots', + index=11, + containing_service=None, + input_type=_LISTSNAPSHOTSREQUEST, + output_type=_LISTSNAPSHOTSRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots')), + ), + _descriptor.MethodDescriptor( + name='DeleteSnapshot', + full_name='google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot', + index=12, + containing_service=None, + input_type=_DELETESNAPSHOTREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}')), + ), +]) +_sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) + +DESCRIPTOR.services_by_name['BigtableTableAdmin'] = _BIGTABLETABLEADMIN + +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py new file mode 100644 index 000000000000..4a5adbb6bc30 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -0,0 +1,310 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2 +from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 + + +class BigtableTableAdminStub(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + ) + self.CreateTableFromSnapshot = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.ListTables = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, + ) + self.GetTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + ) + self.DeleteTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ModifyColumnFamilies = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + ) + self.DropRowRange = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.GenerateConsistencyToken = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, + ) + self.CheckConsistency = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, + ) + self.SnapshotTable = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetSnapshot = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, + ) + self.ListSnapshots = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, + ) + self.DeleteSnapshot = channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot', + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class BigtableTableAdminServicer(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + def CreateTable(self, request, context): + """Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateTableFromSnapshot(self, request, context): + """This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Creates a new table from the specified snapshot. The target table must + not exist. The snapshot and the table must be in the same instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListTables(self, request, context): + """Lists all tables served from a specified instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetTable(self, request, context): + """Gets metadata information about the specified table. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ModifyColumnFamilies(self, request, context): + """Performs a series of column family modifications on the specified table. + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GenerateConsistencyToken(self, request, context): + """This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Generates a consistency token for a Table, which can be used in + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckConsistency(self, request, context): + """This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Checks replication consistency based on a consistency token, that is, if + replication has caught up based on the conditions specified in the token + and the check request. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SnapshotTable(self, request, context): + """This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetSnapshot(self, request, context): + """This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Gets metadata information about the specified snapshot. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListSnapshots(self, request, context): + """This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Lists all snapshots associated with the specified cluster. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteSnapshot(self, request, context): + """This is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + Permanently deletes the specified snapshot. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableTableAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateTable': grpc.unary_unary_rpc_method_handler( + servicer.CreateTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + 'CreateTableFromSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.CreateTableFromSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'ListTables': grpc.unary_unary_rpc_method_handler( + servicer.ListTables, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, + ), + 'GetTable': grpc.unary_unary_rpc_method_handler( + servicer.GetTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + 'DeleteTable': grpc.unary_unary_rpc_method_handler( + servicer.DeleteTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler( + servicer.ModifyColumnFamilies, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + 'DropRowRange': grpc.unary_unary_rpc_method_handler( + servicer.DropRowRange, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'GenerateConsistencyToken': grpc.unary_unary_rpc_method_handler( + servicer.GenerateConsistencyToken, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, + ), + 'CheckConsistency': grpc.unary_unary_rpc_method_handler( + servicer.CheckConsistency, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, + ), + 'SnapshotTable': grpc.unary_unary_rpc_method_handler( + servicer.SnapshotTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + 'GetSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.GetSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, + ), + 'ListSnapshots': grpc.unary_unary_rpc_method_handler( + servicer.ListSnapshots, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, + ), + 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( + servicer.DeleteSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py similarity index 69% rename from packages/google-cloud-bigtable/google/cloud/bigtable/_generated/common_pb2.py rename to packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index 298130452971..0b3427c58d93 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -1,5 +1,5 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/v2/common.proto +# source: google/cloud/bigtable/admin_v2/proto/common.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) @@ -19,13 +19,12 @@ DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/v2/common.proto', + name='google/cloud/bigtable/admin_v2/proto/common.proto', package='google.bigtable.admin.v2', syntax='proto3', - serialized_pb=_b('\n%google/bigtable/admin/v2/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42-\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01\x62\x06proto3') + serialized_pb=_b('\n1google/cloud/bigtable/admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) _STORAGETYPE = _descriptor.EnumDescriptor( name='StorageType', @@ -48,8 +47,8 @@ ], containing_type=None, options=None, - serialized_start=130, - serialized_end=191, + serialized_start=142, + serialized_end=203, ) _sym_db.RegisterEnumDescriptor(_STORAGETYPE) @@ -60,8 +59,9 @@ DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001')) +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py new file mode 100644 index 000000000000..a89435267cb2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py new file mode 100644 index 000000000000..eebcdc895abe --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -0,0 +1,588 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable/admin_v2/proto/instance.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable_admin_v2.proto import common_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/bigtable/admin_v2/proto/instance.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n3google/cloud/bigtable/admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/bigtable/admin_v2/proto/common.proto\"\x83\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02\"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType\"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04\"\x82\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08\x42\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR,]) + + + +_INSTANCE_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Instance.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=434, + serialized_end=487, +) +_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) + +_INSTANCE_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='google.bigtable.admin.v2.Instance.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TYPE_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PRODUCTION', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DEVELOPMENT', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=489, + serialized_end=550, +) +_sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) + +_CLUSTER_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Cluster.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='RESIZING', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DISABLED', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=742, + serialized_end=823, +) +_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) + + +_INSTANCE_LABELSENTRY = _descriptor.Descriptor( + name='LabelsEntry', + full_name='google.bigtable.admin.v2.Instance.LabelsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.Instance.LabelsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.Instance.LabelsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=387, + serialized_end=432, +) + +_INSTANCE = _descriptor.Descriptor( + name='Instance', + full_name='google.bigtable.admin.v2.Instance', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Instance.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='display_name', full_name='google.bigtable.admin.v2.Instance.display_name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Instance.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='type', full_name='google.bigtable.admin.v2.Instance.type', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.admin.v2.Instance.labels', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_INSTANCE_LABELSENTRY, ], + enum_types=[ + _INSTANCE_STATE, + _INSTANCE_TYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=163, + serialized_end=550, +) + + +_CLUSTER = _descriptor.Descriptor( + name='Cluster', + full_name='google.bigtable.admin.v2.Cluster', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Cluster.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='location', full_name='google.bigtable.admin.v2.Cluster.location', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Cluster.state', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='serve_nodes', full_name='google.bigtable.admin.v2.Cluster.serve_nodes', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='default_storage_type', full_name='google.bigtable.admin.v2.Cluster.default_storage_type', index=4, + number=5, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _CLUSTER_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=553, + serialized_end=823, +) + + +_APPPROFILE_MULTICLUSTERROUTINGUSEANY = _descriptor.Descriptor( + name='MultiClusterRoutingUseAny', + full_name='google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1087, + serialized_end=1114, +) + +_APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( + name='SingleClusterRouting', + full_name='google.bigtable.admin.v2.AppProfile.SingleClusterRouting', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cluster_id', full_name='google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='allow_transactional_writes', full_name='google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1116, + serialized_end=1194, +) + +_APPPROFILE = _descriptor.Descriptor( + name='AppProfile', + full_name='google.bigtable.admin.v2.AppProfile', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.AppProfile.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='etag', full_name='google.bigtable.admin.v2.AppProfile.etag', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='description', full_name='google.bigtable.admin.v2.AppProfile.description', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='multi_cluster_routing_use_any', full_name='google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any', index=3, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='single_cluster_routing', full_name='google.bigtable.admin.v2.AppProfile.single_cluster_routing', index=4, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_APPPROFILE_MULTICLUSTERROUTINGUSEANY, _APPPROFILE_SINGLECLUSTERROUTING, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='routing_policy', full_name='google.bigtable.admin.v2.AppProfile.routing_policy', + index=0, containing_type=None, fields=[]), + ], + serialized_start=826, + serialized_end=1212, +) + +_INSTANCE_LABELSENTRY.containing_type = _INSTANCE +_INSTANCE.fields_by_name['state'].enum_type = _INSTANCE_STATE +_INSTANCE.fields_by_name['type'].enum_type = _INSTANCE_TYPE +_INSTANCE.fields_by_name['labels'].message_type = _INSTANCE_LABELSENTRY +_INSTANCE_STATE.containing_type = _INSTANCE +_INSTANCE_TYPE.containing_type = _INSTANCE +_CLUSTER.fields_by_name['state'].enum_type = _CLUSTER_STATE +_CLUSTER.fields_by_name['default_storage_type'].enum_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2._STORAGETYPE +_CLUSTER_STATE.containing_type = _CLUSTER +_APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE +_APPPROFILE_SINGLECLUSTERROUTING.containing_type = _APPPROFILE +_APPPROFILE.fields_by_name['multi_cluster_routing_use_any'].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY +_APPPROFILE.fields_by_name['single_cluster_routing'].message_type = _APPPROFILE_SINGLECLUSTERROUTING +_APPPROFILE.oneofs_by_name['routing_policy'].fields.append( + _APPPROFILE.fields_by_name['multi_cluster_routing_use_any']) +_APPPROFILE.fields_by_name['multi_cluster_routing_use_any'].containing_oneof = _APPPROFILE.oneofs_by_name['routing_policy'] +_APPPROFILE.oneofs_by_name['routing_policy'].fields.append( + _APPPROFILE.fields_by_name['single_cluster_routing']) +_APPPROFILE.fields_by_name['single_cluster_routing'].containing_oneof = _APPPROFILE.oneofs_by_name['routing_policy'] +DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE +DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER +DESCRIPTOR.message_types_by_name['AppProfile'] = _APPPROFILE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict( + + LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( + DESCRIPTOR = _INSTANCE_LABELSENTRY, + __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) + )) + , + DESCRIPTOR = _INSTANCE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' + , + __doc__ = """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an instance are served from + a single [Cluster][google.bigtable.admin.v2.Cluster]. + + + Attributes: + name: + (``OutputOnly``) The unique name of the instance. Values are + of the form + ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + display_name: + The descriptive name for this instance as it appears in UIs. + Can be changed at any time, but should be kept globally unique + to avoid confusion. + state: + (``OutputOnly``) The current state of the instance. + type: + The type of the instance. Defaults to ``PRODUCTION``. + labels: + Labels are a flexible and lightweight mechanism for organizing + cloud resources into groups that reflect a customer's + organizational needs and deployment strategies. They can be + used to filter resources and aggregate metrics. - Label keys + must be between 1 and 63 characters long and must conform + to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - Label values + must be between 0 and 63 characters long and must conform + to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - + No more than 64 labels can be associated with a given + resource. - Keys and values must both be under 128 bytes. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) + )) +_sym_db.RegisterMessage(Instance) +_sym_db.RegisterMessage(Instance.LabelsEntry) + +Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict( + DESCRIPTOR = _CLUSTER, + __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' + , + __doc__ = """A resizable group of nodes in a particular cloud location, capable of + serving all [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + + Attributes: + name: + (``OutputOnly``) The unique name of the cluster. Values are of + the form ``projects//instances//clusters/[a + -z][-a-z0-9]*``. + location: + (``CreationOnly``) The location where this cluster's nodes and + storage reside. For best performance, clients should be + located as close as possible to this cluster. Currently only + zones are supported, so values should be of the form + ``projects//locations/``. + state: + (``OutputOnly``) The current state of the cluster. + serve_nodes: + The number of nodes allocated to this cluster. More nodes + enable higher throughput and more consistent performance. + default_storage_type: + (``CreationOnly``) The type of storage used by this cluster to + serve its parent instance's tables, unless explicitly + overridden. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) + )) +_sym_db.RegisterMessage(Cluster) + +AppProfile = _reflection.GeneratedProtocolMessageType('AppProfile', (_message.Message,), dict( + + MultiClusterRoutingUseAny = _reflection.GeneratedProtocolMessageType('MultiClusterRoutingUseAny', (_message.Message,), dict( + DESCRIPTOR = _APPPROFILE_MULTICLUSTERROUTINGUSEANY, + __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' + , + __doc__ = """Read/write requests may be routed to any cluster in the instance, and + will fail over to another cluster in the event of transient errors or + delays. Choosing this option sacrifices read-your-writes consistency to + improve availability. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) + )) + , + + SingleClusterRouting = _reflection.GeneratedProtocolMessageType('SingleClusterRouting', (_message.Message,), dict( + DESCRIPTOR = _APPPROFILE_SINGLECLUSTERROUTING, + __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' + , + __doc__ = """Unconditionally routes all read/write requests to a specific cluster. + This option preserves read-your-writes consistency, but does not improve + availability. + + + Attributes: + cluster_id: + The cluster to which read/write requests should be routed. + allow_transactional_writes: + Whether or not ``CheckAndMutateRow`` and + ``ReadModifyWriteRow`` requests are allowed by this app + profile. It is unsafe to send these requests to the same + table/row/column in multiple clusters. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) + )) + , + DESCRIPTOR = _APPPROFILE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + A configuration object describing how Cloud Bigtable should treat + traffic from a particular end user application. + + + Attributes: + name: + (``OutputOnly``) The unique name of the app profile. Values + are of the form + ``projects//instances//appProfiles/[_a- + zA-Z0-9][-_.a-zA-Z0-9]*``. + etag: + Strongly validated etag for optimistic concurrency control. + Preserve the value returned from ``GetAppProfile`` when + calling ``UpdateAppProfile`` to fail the request if there has + been a modification in the mean time. The ``update_mask`` of + the request need not include ``etag`` for this protection to + apply. See `Wikipedia + `__ and `RFC 7232 + `__ for more + details. + description: + Optional long form description of the use case for this + AppProfile. + routing_policy: + The routing policy for all read/write requests which use this + app profile. A value must be explicitly set. + multi_cluster_routing_use_any: + Use a multi-cluster routing policy that may pick any cluster. + single_cluster_routing: + Use a single-cluster routing policy. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) + )) +_sym_db.RegisterMessage(AppProfile) +_sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) +_sym_db.RegisterMessage(AppProfile.SingleClusterRouting) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +_INSTANCE_LABELSENTRY.has_options = True +_INSTANCE_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py new file mode 100644 index 000000000000..a89435267cb2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py new file mode 100644 index 000000000000..07b1fa0e9e8d --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -0,0 +1,776 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable/admin_v2/proto/table.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/bigtable/admin_v2/proto/table.proto', + package='google.bigtable.admin.v2', + syntax='proto3', + serialized_pb=_b('\n0google/cloud/bigtable/admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState\"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule\"\xcf\x02\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) + + + +_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( + name='ReplicationState', + full_name='google.bigtable.admin.v2.Table.ClusterState.ReplicationState', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='INITIALIZING', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PLANNED_MAINTENANCE', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='UNPLANNED_MAINTENANCE', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=533, + serialized_end=653, +) +_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) + +_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( + name='TimestampGranularity', + full_name='google.bigtable.admin.v2.Table.TimestampGranularity', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='TIMESTAMP_GRANULARITY_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MILLIS', index=1, number=1, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=850, + serialized_end=923, +) +_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) + +_TABLE_VIEW = _descriptor.EnumDescriptor( + name='View', + full_name='google.bigtable.admin.v2.Table.View', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='VIEW_UNSPECIFIED', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='NAME_ONLY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SCHEMA_VIEW', index=2, number=2, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REPLICATION_VIEW', index=3, number=3, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='FULL', index=4, number=4, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=925, + serialized_end=1017, +) +_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) + +_SNAPSHOT_STATE = _descriptor.EnumDescriptor( + name='State', + full_name='google.bigtable.admin.v2.Snapshot.State', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='STATE_NOT_KNOWN', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='READY', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='CREATING', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=1713, + serialized_end=1766, +) +_sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) + + +_TABLE_CLUSTERSTATE = _descriptor.Descriptor( + name='ClusterState', + full_name='google.bigtable.admin.v2.Table.ClusterState', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='replication_state', full_name='google.bigtable.admin.v2.Table.ClusterState.replication_state', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _TABLE_CLUSTERSTATE_REPLICATIONSTATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=427, + serialized_end=653, +) + +_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( + name='ClusterStatesEntry', + full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=655, + serialized_end=753, +) + +_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( + name='ColumnFamiliesEntry', + full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=755, + serialized_end=848, +) + +_TABLE = _descriptor.Descriptor( + name='Table', + full_name='google.bigtable.admin.v2.Table', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Table.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='cluster_states', full_name='google.bigtable.admin.v2.Table.cluster_states', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_TABLE_CLUSTERSTATE, _TABLE_CLUSTERSTATESENTRY, _TABLE_COLUMNFAMILIESENTRY, ], + enum_types=[ + _TABLE_TIMESTAMPGRANULARITY, + _TABLE_VIEW, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=174, + serialized_end=1017, +) + + +_COLUMNFAMILY = _descriptor.Descriptor( + name='ColumnFamily', + full_name='google.bigtable.admin.v2.ColumnFamily', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='gc_rule', full_name='google.bigtable.admin.v2.ColumnFamily.gc_rule', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1019, + serialized_end=1084, +) + + +_GCRULE_INTERSECTION = _descriptor.Descriptor( + name='Intersection', + full_name='google.bigtable.admin.v2.GcRule.Intersection', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Intersection.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1299, + serialized_end=1362, +) + +_GCRULE_UNION = _descriptor.Descriptor( + name='Union', + full_name='google.bigtable.admin.v2.GcRule.Union', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.admin.v2.GcRule.Union.rules', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1364, + serialized_end=1420, +) + +_GCRULE = _descriptor.Descriptor( + name='GcRule', + full_name='google.bigtable.admin.v2.GcRule', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='max_num_versions', full_name='google.bigtable.admin.v2.GcRule.max_num_versions', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='max_age', full_name='google.bigtable.admin.v2.GcRule.max_age', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='intersection', full_name='google.bigtable.admin.v2.GcRule.intersection', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='union', full_name='google.bigtable.admin.v2.GcRule.union', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='rule', full_name='google.bigtable.admin.v2.GcRule.rule', + index=0, containing_type=None, fields=[]), + ], + serialized_start=1087, + serialized_end=1428, +) + + +_SNAPSHOT = _descriptor.Descriptor( + name='Snapshot', + full_name='google.bigtable.admin.v2.Snapshot', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='google.bigtable.admin.v2.Snapshot.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='source_table', full_name='google.bigtable.admin.v2.Snapshot.source_table', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='data_size_bytes', full_name='google.bigtable.admin.v2.Snapshot.data_size_bytes', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='create_time', full_name='google.bigtable.admin.v2.Snapshot.create_time', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='delete_time', full_name='google.bigtable.admin.v2.Snapshot.delete_time', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='state', full_name='google.bigtable.admin.v2.Snapshot.state', index=5, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='description', full_name='google.bigtable.admin.v2.Snapshot.description', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _SNAPSHOT_STATE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1431, + serialized_end=1766, +) + +_TABLE_CLUSTERSTATE.fields_by_name['replication_state'].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE +_TABLE_CLUSTERSTATE.containing_type = _TABLE +_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE +_TABLE_CLUSTERSTATESENTRY.fields_by_name['value'].message_type = _TABLE_CLUSTERSTATE +_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE +_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY +_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE +_TABLE.fields_by_name['cluster_states'].message_type = _TABLE_CLUSTERSTATESENTRY +_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY +_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY +_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE +_TABLE_VIEW.containing_type = _TABLE +_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE +_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_INTERSECTION.containing_type = _GCRULE +_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_UNION.containing_type = _GCRULE +_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION +_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_num_versions']) +_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['max_age']) +_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['intersection']) +_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_GCRULE.oneofs_by_name['rule'].fields.append( + _GCRULE.fields_by_name['union']) +_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] +_SNAPSHOT.fields_by_name['source_table'].message_type = _TABLE +_SNAPSHOT.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name['delete_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name['state'].enum_type = _SNAPSHOT_STATE +_SNAPSHOT_STATE.containing_type = _SNAPSHOT +DESCRIPTOR.message_types_by_name['Table'] = _TABLE +DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY +DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE +DESCRIPTOR.message_types_by_name['Snapshot'] = _SNAPSHOT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( + + ClusterState = _reflection.GeneratedProtocolMessageType('ClusterState', (_message.Message,), dict( + DESCRIPTOR = _TABLE_CLUSTERSTATE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable replication. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + The state of a table's data in a particular cluster. + + + Attributes: + replication_state: + (``OutputOnly``) The state of replication for the table in + this cluster. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) + )) + , + + ClusterStatesEntry = _reflection.GeneratedProtocolMessageType('ClusterStatesEntry', (_message.Message,), dict( + DESCRIPTOR = _TABLE_CLUSTERSTATESENTRY, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) + )) + , + + ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( + DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) + )) + , + DESCRIPTOR = _TABLE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + , + __doc__ = """A collection of user data indexed by row, column, and timestamp. Each + table is served using the resources of its parent cluster. + + + Attributes: + name: + (``OutputOnly``) The unique name of the table. Values are of + the form ``projects//instances//tables/[_a- + zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, + ``SCHEMA_VIEW``, ``FULL`` + cluster_states: + This is a private alpha release of Cloud Bigtable replication. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. + (``OutputOnly``) Map from cluster ID to per-cluster table + state. If it could not be determined whether or not the table + has data in a particular cluster (for example, if its zone is + unavailable), then there will be an entry for the cluster with + UNKNOWN ``replication_status``. Views: ``FULL`` + column_families: + (``CreationOnly``) The column families configured for this + table, mapped by column family ID. Views: ``SCHEMA_VIEW``, + ``FULL`` + granularity: + (``CreationOnly``) The granularity (e.g. ``MILLIS``, + ``MICROS``) at which timestamps are stored in this table. + Timestamps not matching the granularity will be rejected. If + unspecified at creation time, the value will be set to + ``MILLIS``. Views: ``SCHEMA_VIEW``, ``FULL`` + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) + )) +_sym_db.RegisterMessage(Table) +_sym_db.RegisterMessage(Table.ClusterState) +_sym_db.RegisterMessage(Table.ClusterStatesEntry) +_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) + +ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( + DESCRIPTOR = _COLUMNFAMILY, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + , + __doc__ = """A set of columns within a table which share a common configuration. + + + Attributes: + gc_rule: + Garbage collection rule specified as a protobuf. Must + serialize to at most 500 bytes. NOTE: Garbage collection + executes opportunistically in the background, and so it's + possible for reads to return a cell even if it matches the + active GC expression for its family. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) + )) +_sym_db.RegisterMessage(ColumnFamily) + +GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( + + Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_INTERSECTION, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + , + __doc__ = """A GcRule which deletes cells matching all of the given rules. + + + Attributes: + rules: + Only delete cells which would be deleted by every element of + ``rules``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) + )) + , + + Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( + DESCRIPTOR = _GCRULE_UNION, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + , + __doc__ = """A GcRule which deletes cells matching any of the given rules. + + + Attributes: + rules: + Delete cells which would be deleted by any element of + ``rules``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) + )) + , + DESCRIPTOR = _GCRULE, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + , + __doc__ = """Rule for determining which cells to delete during garbage collection. + + + Attributes: + rule: + Garbage collection rules. + max_num_versions: + Delete all cells in a column except the most recent N. + max_age: + Delete cells in a column older than the given age. Values must + be at least one millisecond, and will be truncated to + microsecond granularity. + intersection: + Delete cells that would be deleted by every nested rule. + union: + Delete cells that would be deleted by any nested rule. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) + )) +_sym_db.RegisterMessage(GcRule) +_sym_db.RegisterMessage(GcRule.Intersection) +_sym_db.RegisterMessage(GcRule.Union) + +Snapshot = _reflection.GeneratedProtocolMessageType('Snapshot', (_message.Message,), dict( + DESCRIPTOR = _SNAPSHOT, + __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' + , + __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. + This feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or + deprecation policy. + + A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for a new table. + + + Attributes: + name: + (``OutputOnly``) The unique name of the snapshot. Values are + of the form ``projects//instances//clusters + //snapshots/``. + source_table: + (``OutputOnly``) The source table at the time the snapshot was + taken. + data_size_bytes: + (``OutputOnly``) The size of the data in the source table at + the time the snapshot was taken. In some cases, this value may + be computed asynchronously via a background process and a + placeholder of 0 will be used in the meantime. + create_time: + (``OutputOnly``) The time when the snapshot is created. + delete_time: + (``OutputOnly``) The time when the snapshot will be deleted. + The maximum amount of time a snapshot can stay active is 365 + days. If 'ttl' is not specified, the default maximum of 365 + days will be used. + state: + (``OutputOnly``) The current state of the snapshot. + description: + (``OutputOnly``) Description of the snapshot. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) + )) +_sym_db.RegisterMessage(Snapshot) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +_TABLE_CLUSTERSTATESENTRY.has_options = True +_TABLE_CLUSTERSTATESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TABLE_COLUMNFAMILIESENTRY.has_options = True +_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py new file mode 100644 index 000000000000..a89435267cb2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py new file mode 100644 index 000000000000..b796a7c9bf39 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -0,0 +1,61 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import sys + +from google.api_core.protobuf_helpers import get_messages + +from google.api import http_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.iam.v1.logging import audit_data_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import any_pb2 +from google.protobuf import descriptor_pb2 +from google.protobuf import duration_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 +from google.protobuf import timestamp_pb2 +from google.rpc import status_pb2 + +names = [] +for module in ( + http_pb2, + bigtable_instance_admin_pb2, + bigtable_table_admin_pb2, + instance_pb2, + table_pb2, + iam_policy_pb2, + policy_pb2, + audit_data_pb2, + operations_pb2, + any_pb2, + descriptor_pb2, + duration_pb2, + empty_pb2, + field_mask_pb2, + timestamp_pb2, + status_pb2, +): + for name, message in get_messages(module).items(): + message.__module__ = 'google.cloud.bigtable_admin_v2.types' + setattr(sys.modules[__name__], name, message) + names.append(name) + +__all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py new file mode 100644 index 000000000000..751a63d9d40e --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -0,0 +1,28 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import + +from google.cloud.bigtable_v2 import types +from google.cloud.bigtable_v2.gapic import bigtable_client + + +class BigtableClient(bigtable_client.BigtableClient): + __doc__ = bigtable_client.BigtableClient.__doc__ + + +__all__ = ( + 'types', + 'BigtableClient', +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py new file mode 100644 index 000000000000..60eb063290b7 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -0,0 +1,564 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Accesses the google.bigtable.v2 Bigtable API.""" + +import pkg_resources + +import google.api_core.gapic_v1.client_info +import google.api_core.gapic_v1.config +import google.api_core.gapic_v1.method +import google.api_core.grpc_helpers +import google.api_core.path_template + +from google.cloud.bigtable_v2.gapic import bigtable_client_config +from google.cloud.bigtable_v2.proto import bigtable_pb2 +from google.cloud.bigtable_v2.proto import data_pb2 + +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + 'google-cloud-bigtable', ).version + + +class BigtableClient(object): + """Service for reading from and writing to existing Bigtable tables.""" + + SERVICE_ADDRESS = 'bigtable.googleapis.com:443' + """The default address of the service.""" + + # The scopes needed to make gRPC calls to all of the methods defined in + # this service + _DEFAULT_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + # The name of the interface for this client. This is the key used to find + # method configuration in the client_config dictionary. + _INTERFACE_NAME = 'google.bigtable.v2.Bigtable' + + @classmethod + def table_path(cls, project, instance, table): + """Return a fully-qualified table string.""" + return google.api_core.path_template.expand( + 'projects/{project}/instances/{instance}/tables/{table}', + project=project, + instance=instance, + table=table, + ) + + def __init__(self, + channel=None, + credentials=None, + client_config=bigtable_client_config.config, + client_info=None): + """Constructor. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_config (dict): A dictionary of call options for each + method. If not specified, the default configuration is used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + 'The `channel` and `credentials` arguments to {} are mutually ' + 'exclusive.'.format(self.__class__.__name__), ) + + # Create the channel. + if channel is None: + channel = google.api_core.grpc_helpers.create_channel( + self.SERVICE_ADDRESS, + credentials=credentials, + scopes=self._DEFAULT_SCOPES, + ) + + # Create the gRPC stubs. + self.bigtable_stub = (bigtable_pb2.BigtableStub(channel)) + + if client_info is None: + client_info = ( + google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) + client_info.gapic_version = _GAPIC_LIBRARY_VERSION + + # Parse out the default settings for retry and timeout for each RPC + # from the client configuration. + # (Ordinarily, these are the defaults specified in the `*_config.py` + # file next to this one.) + method_configs = google.api_core.gapic_v1.config.parse_method_configs( + client_config['interfaces'][self._INTERFACE_NAME], ) + + # Write the "inner API call" methods to the class. + # These are wrapped versions of the gRPC stub methods, with retry and + # timeout configuration applied, called by the public methods on + # this class. + self._read_rows = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_stub.ReadRows, + default_retry=method_configs['ReadRows'].retry, + default_timeout=method_configs['ReadRows'].timeout, + client_info=client_info, + ) + self._sample_row_keys = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_stub.SampleRowKeys, + default_retry=method_configs['SampleRowKeys'].retry, + default_timeout=method_configs['SampleRowKeys'].timeout, + client_info=client_info, + ) + self._mutate_row = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_stub.MutateRow, + default_retry=method_configs['MutateRow'].retry, + default_timeout=method_configs['MutateRow'].timeout, + client_info=client_info, + ) + self._mutate_rows = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_stub.MutateRows, + default_retry=method_configs['MutateRows'].retry, + default_timeout=method_configs['MutateRows'].timeout, + client_info=client_info, + ) + self._check_and_mutate_row = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_stub.CheckAndMutateRow, + default_retry=method_configs['CheckAndMutateRow'].retry, + default_timeout=method_configs['CheckAndMutateRow'].timeout, + client_info=client_info, + ) + self._read_modify_write_row = google.api_core.gapic_v1.method.wrap_method( + self.bigtable_stub.ReadModifyWriteRow, + default_retry=method_configs['ReadModifyWriteRow'].retry, + default_timeout=method_configs['ReadModifyWriteRow'].timeout, + client_info=client_info, + ) + + # Service calls + def read_rows(self, + table_name, + app_profile_id=None, + rows=None, + filter_=None, + rows_limit=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT): + """ + Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> for element in client.read_rows(table_name): + ... # process element + ... pass + + Args: + table_name (str): The unique name of the table from which to read. + Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + This value specifies routing for replication. If not specified, the + \"default\" application profile will be used. + rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.RowSet` + filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, + reads the entirety of each row. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.RowFilter` + rows_limit (long): The read will terminate after committing to N rows' worth of results. The + default (zero) is to return all results. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + + Returns: + Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + request = bigtable_pb2.ReadRowsRequest( + table_name=table_name, + app_profile_id=app_profile_id, + rows=rows, + filter=filter_, + rows_limit=rows_limit, + ) + return self._read_rows(request, retry=retry, timeout=timeout) + + def sample_row_keys(self, + table_name, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT): + """ + Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> for element in client.sample_row_keys(table_name): + ... # process element + ... pass + + Args: + table_name (str): The unique name of the table from which to sample row keys. + Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + This value specifies routing for replication. If not specified, the + \"default\" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + + Returns: + Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse]. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + request = bigtable_pb2.SampleRowKeysRequest( + table_name=table_name, + app_profile_id=app_profile_id, + ) + return self._sample_row_keys(request, retry=retry, timeout=timeout) + + def mutate_row(self, + table_name, + row_key, + mutations, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT): + """ + Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by ``mutation``. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> row_key = b'' + >>> mutations = [] + >>> + >>> response = client.mutate_row(table_name, row_key, mutations) + + Args: + table_name (str): The unique name of the table to which the mutation should be applied. + Values are of the form + ``projects//instances//tables/
``. + row_key (bytes): The key of the row to which the mutation should be applied. + mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row. Entries are applied + in order, meaning that earlier mutations can be masked by later ones. + Must contain at least one entry and at most 100000. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Mutation` + app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + This value specifies routing for replication. If not specified, the + \"default\" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + + Returns: + A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + request = bigtable_pb2.MutateRowRequest( + table_name=table_name, + row_key=row_key, + mutations=mutations, + app_profile_id=app_profile_id, + ) + return self._mutate_row(request, retry=retry, timeout=timeout) + + def mutate_rows(self, + table_name, + entries, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT): + """ + Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> entries = [] + >>> + >>> for element in client.mutate_rows(table_name, entries): + ... # process element + ... pass + + Args: + table_name (str): The unique name of the table to which the mutations should be applied. + entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. + Each entry is applied as an atomic mutation, but the entries may be + applied in arbitrary order (even between entries for the same row). + At least one entry must be specified, and in total the entries can + contain at most 100000 mutations. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Entry` + app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + This value specifies routing for replication. If not specified, the + \"default\" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + + Returns: + Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + request = bigtable_pb2.MutateRowsRequest( + table_name=table_name, + entries=entries, + app_profile_id=app_profile_id, + ) + return self._mutate_rows(request, retry=retry, timeout=timeout) + + def check_and_mutate_row(self, + table_name, + row_key, + app_profile_id=None, + predicate_filter=None, + true_mutations=None, + false_mutations=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT): + """ + Mutates a row atomically based on the output of a predicate Reader filter. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> row_key = b'' + >>> + >>> response = client.check_and_mutate_row(table_name, row_key) + + Args: + table_name (str): The unique name of the table to which the conditional mutation should be + applied. + Values are of the form + ``projects//instances//tables/
``. + row_key (bytes): The key of the row to which the conditional mutation should be applied. + app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + This value specifies routing for replication. If not specified, the + \"default\" application profile will be used. + predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending + on whether or not any results are yielded, either ``true_mutations`` or + ``false_mutations`` will be executed. If unset, checks that the row contains + any values at all. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.RowFilter` + true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` + yields at least one cell when applied to ``row_key``. Entries are applied in + order, meaning that earlier mutations can be masked by later ones. + Must contain at least one entry if ``false_mutations`` is empty, and at most + 100000. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Mutation` + false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` + does not yield any cells when applied to ``row_key``. Entries are applied in + order, meaning that earlier mutations can be masked by later ones. + Must contain at least one entry if ``true_mutations`` is empty, and at most + 100000. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.Mutation` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + + Returns: + A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + request = bigtable_pb2.CheckAndMutateRowRequest( + table_name=table_name, + row_key=row_key, + app_profile_id=app_profile_id, + predicate_filter=predicate_filter, + true_mutations=true_mutations, + false_mutations=false_mutations, + ) + return self._check_and_mutate_row( + request, retry=retry, timeout=timeout) + + def read_modify_write_row(self, + table_name, + row_key, + rules, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT): + """ + Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + + Example: + >>> from google.cloud import bigtable_v2 + >>> + >>> client = bigtable_v2.BigtableClient() + >>> + >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> row_key = b'' + >>> rules = [] + >>> + >>> response = client.read_modify_write_row(table_name, row_key, rules) + + Args: + table_name (str): The unique name of the table to which the read/modify/write rules should be + applied. + Values are of the form + ``projects//instances//tables/
``. + row_key (bytes): The key of the row to which the read/modify/write rules should be applied. + rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Rules specifying how the specified row's contents are to be transformed + into writes. Entries are applied in order, meaning that earlier rules will + affect the results of later ones. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` + app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature + is not currently available to most Cloud Bigtable customers. This feature + might be changed in backward-incompatible ways and is not recommended for + production use. It is not subject to any SLA or deprecation policy. + + This value specifies routing for replication. If not specified, the + \"default\" application profile will be used. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will not + be retried. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + + Returns: + A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + request = bigtable_pb2.ReadModifyWriteRowRequest( + table_name=table_name, + row_key=row_key, + rules=rules, + app_profile_id=app_profile_id, + ) + return self._read_modify_write_row( + request, retry=retry, timeout=timeout) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py new file mode 100644 index 000000000000..d87d2776f583 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py @@ -0,0 +1,53 @@ +config = { + "interfaces": { + "google.bigtable.v2.Bigtable": { + "retry_codes": { + "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "non_idempotent": [] + }, + "retry_params": { + "default": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 20000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 20000, + "total_timeout_millis": 600000 + } + }, + "methods": { + "ReadRows": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "SampleRowKeys": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "default" + }, + "MutateRow": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "MutateRows": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "CheckAndMutateRow": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + }, + "ReadModifyWriteRow": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default" + } + } + } + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py new file mode 100644 index 000000000000..87755c24c433 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -0,0 +1,1467 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/bigtable_v2/proto/bigtable.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.cloud.bigtable_v2.proto import data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='google/cloud/bigtable_v2/proto/bigtable.proto', + package='google.bigtable.v2', + syntax='proto3', + serialized_pb=_b('\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x13\n\x11MutateRowResponse\"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"D\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"E\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"M\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + + + + +_READROWSREQUEST = _descriptor.Descriptor( + name='ReadRowsRequest', + full_name='google.bigtable.v2.ReadRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='app_profile_id', full_name='google.bigtable.v2.ReadRowsRequest.app_profile_id', index=1, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows', full_name='google.bigtable.v2.ReadRowsRequest.rows', index=2, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='filter', full_name='google.bigtable.v2.ReadRowsRequest.filter', index=3, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rows_limit', full_name='google.bigtable.v2.ReadRowsRequest.rows_limit', index=4, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=200, + serialized_end=370, +) + + +_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( + name='CellChunk', + full_name='google.bigtable.v2.ReadRowsResponse.CellChunk', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='family_name', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.family_name', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='qualifier', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='timestamp_micros', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='labels', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.labels', index=4, + number=5, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value', index=5, + number=6, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='value_size', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value_size', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reset_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row', index=7, + number=8, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='commit_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='row_status', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_status', + index=0, containing_type=None, fields=[]), + ], + serialized_start=488, + serialized_end=749, +) + +_READROWSRESPONSE = _descriptor.Descriptor( + name='ReadRowsResponse', + full_name='google.bigtable.v2.ReadRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='chunks', full_name='google.bigtable.v2.ReadRowsResponse.chunks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='last_scanned_row_key', full_name='google.bigtable.v2.ReadRowsResponse.last_scanned_row_key', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_READROWSRESPONSE_CELLCHUNK, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=373, + serialized_end=749, +) + + +_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( + name='SampleRowKeysRequest', + full_name='google.bigtable.v2.SampleRowKeysRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.SampleRowKeysRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='app_profile_id', full_name='google.bigtable.v2.SampleRowKeysRequest.app_profile_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=751, + serialized_end=817, +) + + +_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( + name='SampleRowKeysResponse', + full_name='google.bigtable.v2.SampleRowKeysResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.SampleRowKeysResponse.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='offset_bytes', full_name='google.bigtable.v2.SampleRowKeysResponse.offset_bytes', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=819, + serialized_end=881, +) + + +_MUTATEROWREQUEST = _descriptor.Descriptor( + name='MutateRowRequest', + full_name='google.bigtable.v2.MutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='app_profile_id', full_name='google.bigtable.v2.MutateRowRequest.app_profile_id', index=1, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowRequest.row_key', index=2, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowRequest.mutations', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=884, + serialized_end=1012, +) + + +_MUTATEROWRESPONSE = _descriptor.Descriptor( + name='MutateRowResponse', + full_name='google.bigtable.v2.MutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1014, + serialized_end=1033, +) + + +_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsRequest.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.MutateRowsRequest.Entry.row_key', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='mutations', full_name='google.bigtable.v2.MutateRowsRequest.Entry.mutations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1163, + serialized_end=1236, +) + +_MUTATEROWSREQUEST = _descriptor.Descriptor( + name='MutateRowsRequest', + full_name='google.bigtable.v2.MutateRowsRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.MutateRowsRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='app_profile_id', full_name='google.bigtable.v2.MutateRowsRequest.app_profile_id', index=1, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsRequest.entries', index=2, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSREQUEST_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1036, + serialized_end=1236, +) + + +_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( + name='Entry', + full_name='google.bigtable.v2.MutateRowsResponse.Entry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='google.bigtable.v2.MutateRowsResponse.Entry.index', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='status', full_name='google.bigtable.v2.MutateRowsResponse.Entry.status', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1324, + serialized_end=1382, +) + +_MUTATEROWSRESPONSE = _descriptor.Descriptor( + name='MutateRowsResponse', + full_name='google.bigtable.v2.MutateRowsResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='entries', full_name='google.bigtable.v2.MutateRowsResponse.entries', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[_MUTATEROWSRESPONSE_ENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1239, + serialized_end=1382, +) + + +_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( + name='CheckAndMutateRowRequest', + full_name='google.bigtable.v2.CheckAndMutateRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.CheckAndMutateRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='app_profile_id', full_name='google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id', index=1, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.CheckAndMutateRowRequest.row_key', index=2, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='predicate_filter', full_name='google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter', index=3, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='true_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.true_mutations', index=4, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='false_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.false_mutations', index=5, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1385, + serialized_end=1638, +) + + +_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( + name='CheckAndMutateRowResponse', + full_name='google.bigtable.v2.CheckAndMutateRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='predicate_matched', full_name='google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched', index=0, + number=1, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1640, + serialized_end=1694, +) + + +_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( + name='ReadModifyWriteRowRequest', + full_name='google.bigtable.v2.ReadModifyWriteRowRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='table_name', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.table_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='app_profile_id', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id', index=1, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='row_key', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.row_key', index=2, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='rules', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.rules', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1697, + serialized_end=1841, +) + + +_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( + name='ReadModifyWriteRowResponse', + full_name='google.bigtable.v2.ReadModifyWriteRowResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='row', full_name='google.bigtable.v2.ReadModifyWriteRowResponse.row', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1843, + serialized_end=1909, +) + +_READROWSREQUEST.fields_by_name['rows'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET +_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER +_READROWSRESPONSE_CELLCHUNK.fields_by_name['family_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE +_READROWSRESPONSE_CELLCHUNK.fields_by_name['qualifier'].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE +_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row']) +_READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] +_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CELLCHUNK +_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST +_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY +_MUTATEROWSRESPONSE_ENTRY.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS +_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE +_MUTATEROWSRESPONSE.fields_by_name['entries'].message_type = _MUTATEROWSRESPONSE_ENTRY +_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER +_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE +_READMODIFYWRITEROWRESPONSE.fields_by_name['row'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW +DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST +DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE +DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST +DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['MutateRowResponse'] = _MUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST +DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE +DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST +DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST +DESCRIPTOR.message_types_by_name['ReadModifyWriteRowResponse'] = _READMODIFYWRITEROWRESPONSE + +ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( + DESCRIPTOR = _READROWSREQUEST, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Request message for Bigtable.ReadRows. + + + Attributes: + table_name: + The unique name of the table from which to read. Values are of + the form + ``projects//instances//tables/
``. + app_profile_id: + This is a private alpha release of Cloud Bigtable replication. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. This + value specifies routing for replication. If not specified, the + "default" application profile will be used. + rows: + The row keys and/or ranges to read. If not specified, reads + from all rows. + filter: + The filter to apply to the contents of the specified row(s). + If unset, reads the entirety of each row. + rows_limit: + The read will terminate after committing to N rows' worth of + results. The default (zero) is to return all results. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) + )) +_sym_db.RegisterMessage(ReadRowsRequest) + +ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( + + CellChunk = _reflection.GeneratedProtocolMessageType('CellChunk', (_message.Message,), dict( + DESCRIPTOR = _READROWSRESPONSE_CELLCHUNK, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Specifies a piece of a row's contents returned as part of the read + response stream. + + + Attributes: + row_key: + The row key for this chunk of data. If the row key is empty, + this CellChunk is a continuation of the same row as the + previous CellChunk in the response stream, even if that + CellChunk was in a previous ReadRowsResponse message. + family_name: + The column family name for this chunk of data. If this message + is not present this CellChunk is a continuation of the same + column family as the previous CellChunk. The empty string can + occur as a column family name in a response so clients must + check explicitly for the presence of this message, not just + for ``family_name.value`` being non-empty. + qualifier: + The column qualifier for this chunk of data. If this message + is not present, this CellChunk is a continuation of the same + column as the previous CellChunk. Column qualifiers may be + empty so clients must check for the presence of this message, + not just for ``qualifier.value`` being non-empty. + timestamp_micros: + The cell's stored timestamp, which also uniquely identifies it + within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity will + only allow values of ``timestamp_micros`` which are multiples + of 1000. Timestamps are only set in the first CellChunk per + cell (for cells split into multiple chunks). + labels: + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set + on the first CellChunk per cell. + value: + The value stored in the cell. Cell values can be split across + multiple CellChunks. In that case only the value field will be + set in CellChunks after the first: the timestamp and labels + will only be present in the first CellChunk, even if the first + CellChunk came in a previous ReadRowsResponse. + value_size: + If this CellChunk is part of a chunked cell value and this is + not the final chunk of that cell, value\_size will be set to + the total length of the cell value. The client can use this + size to pre-allocate memory to hold the full cell value. + reset_row: + Indicates that the client should drop all previous chunks for + ``row_key``, as it will be re-read from the beginning. + commit_row: + Indicates that the client can safely process all previous + chunks for ``row_key``, as its data has been fully read. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) + )) + , + DESCRIPTOR = _READROWSRESPONSE, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Response message for Bigtable.ReadRows. + + + Attributes: + last_scanned_row_key: + Optionally the server might return the row key of the last row + it has scanned. The client can use this to construct a more + efficient retry request if needed: any row keys or portions of + ranges less than this row key can be dropped from the request. + This is primarily useful for cases where the server has read a + lot of data that was filtered out since the last committed row + key, allowing the client to skip that work on a retry. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) + )) +_sym_db.RegisterMessage(ReadRowsResponse) +_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) + +SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSREQUEST, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Request message for Bigtable.SampleRowKeys. + + + Attributes: + table_name: + The unique name of the table from which to sample row keys. + Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This is a private alpha release of Cloud Bigtable replication. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. This + value specifies routing for replication. If not specified, the + "default" application profile will be used. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) + )) +_sym_db.RegisterMessage(SampleRowKeysRequest) + +SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( + DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Response message for Bigtable.SampleRowKeys. + + + Attributes: + row_key: + Sorted streamed sequence of sample row keys in the table. The + table might have contents before the first row key in the list + and after the last one, but a key containing the empty string + indicates "end of table" and will be the last response given, + if present. Note that row keys in this list may not have ever + been written to or read from, and users should therefore not + make any assumptions about the row key structure that are + specific to their use case. + offset_bytes: + Approximate total storage space used by all rows in the table + which precede ``row_key``. Buffering the contents of all rows + between two subsequent samples would require space roughly + equal to the difference in their ``offset_bytes`` fields. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) + )) +_sym_db.RegisterMessage(SampleRowKeysResponse) + +MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWREQUEST, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Request message for Bigtable.MutateRow. + + + Attributes: + table_name: + The unique name of the table to which the mutation should be + applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This is a private alpha release of Cloud Bigtable replication. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. This + value specifies routing for replication. If not specified, the + "default" application profile will be used. + row_key: + The key of the row to which the mutation should be applied. + mutations: + Changes to be atomically applied to the specified row. Entries + are applied in order, meaning that earlier mutations can be + masked by later ones. Must contain at least one entry and at + most 100000. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) + )) +_sym_db.RegisterMessage(MutateRowRequest) + +MutateRowResponse = _reflection.GeneratedProtocolMessageType('MutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWRESPONSE, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Response message for Bigtable.MutateRow. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) + )) +_sym_db.RegisterMessage(MutateRowResponse) + +MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Attributes: + row_key: + The key of the row to which the ``mutations`` should be + applied. + mutations: + Changes to be atomically applied to the specified row. + Mutations are applied in order, meaning that earlier mutations + can be masked by later ones. You must specify at least one + mutation. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSREQUEST, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Request message for BigtableService.MutateRows. + + + Attributes: + table_name: + The unique name of the table to which the mutations should be + applied. + app_profile_id: + This is a private alpha release of Cloud Bigtable replication. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. This + value specifies routing for replication. If not specified, the + "default" application profile will be used. + entries: + The row keys and corresponding mutations to be applied in + bulk. Each entry is applied as an atomic mutation, but the + entries may be applied in arbitrary order (even between + entries for the same row). At least one entry must be + specified, and in total the entries can contain at most 100000 + mutations. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) + )) +_sym_db.RegisterMessage(MutateRowsRequest) +_sym_db.RegisterMessage(MutateRowsRequest.Entry) + +MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( + + Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( + DESCRIPTOR = _MUTATEROWSRESPONSE_ENTRY, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Attributes: + index: + The index into the original request's ``entries`` list of the + Entry for which a result is being reported. + status: + The result of the request Entry identified by ``index``. + Depending on how requests are batched during execution, it is + possible for one Entry to fail due to an error with another + Entry. In the event that this occurs, the same error will be + reported for both entries. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) + )) + , + DESCRIPTOR = _MUTATEROWSRESPONSE, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Response message for BigtableService.MutateRows. + + + Attributes: + entries: + One or more results for Entries from the batch request. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) + )) +_sym_db.RegisterMessage(MutateRowsResponse) +_sym_db.RegisterMessage(MutateRowsResponse.Entry) + +CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Request message for Bigtable.CheckAndMutateRow. + + + Attributes: + table_name: + The unique name of the table to which the conditional mutation + should be applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This is a private alpha release of Cloud Bigtable replication. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. This + value specifies routing for replication. If not specified, the + "default" application profile will be used. + row_key: + The key of the row to which the conditional mutation should be + applied. + predicate_filter: + The filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, either + ``true_mutations`` or ``false_mutations`` will be executed. If + unset, checks that the row contains any values at all. + true_mutations: + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when applied to + ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain at + least one entry if ``false_mutations`` is empty, and at most + 100000. + false_mutations: + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when applied to + ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain at + least one entry if ``true_mutations`` is empty, and at most + 100000. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) + )) +_sym_db.RegisterMessage(CheckAndMutateRowRequest) + +CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( + DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Response message for Bigtable.CheckAndMutateRow. + + + Attributes: + predicate_matched: + Whether or not the request's ``predicate_filter`` yielded any + results for the specified row. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) + )) +_sym_db.RegisterMessage(CheckAndMutateRowResponse) + +ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWREQUEST, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Request message for Bigtable.ReadModifyWriteRow. + + + Attributes: + table_name: + The unique name of the table to which the read/modify/write + rules should be applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id: + This is a private alpha release of Cloud Bigtable replication. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. This + value specifies routing for replication. If not specified, the + "default" application profile will be used. + row_key: + The key of the row to which the read/modify/write rules should + be applied. + rules: + Rules specifying how the specified row's contents are to be + transformed into writes. Entries are applied in order, meaning + that earlier rules will affect the results of later ones. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowRequest) + +ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowResponse', (_message.Message,), dict( + DESCRIPTOR = _READMODIFYWRITEROWRESPONSE, + __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' + , + __doc__ = """Response message for Bigtable.ReadModifyWriteRow. + + + Attributes: + row: + A Row containing the new contents of all cells modified by the + request. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) + )) +_sym_db.RegisterMessage(ReadModifyWriteRowResponse) + + +DESCRIPTOR.has_options = True +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2')) +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities + + + class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=ReadRowsRequest.SerializeToString, + response_deserializer=ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=SampleRowKeysRequest.SerializeToString, + response_deserializer=SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=MutateRowRequest.SerializeToString, + response_deserializer=MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=MutateRowsRequest.SerializeToString, + response_deserializer=MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=CheckAndMutateRowRequest.SerializeToString, + response_deserializer=CheckAndMutateRowResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=ReadModifyWriteRowResponse.FromString, + ) + + + class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + + def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=ReadRowsRequest.FromString, + response_serializer=ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=SampleRowKeysRequest.FromString, + response_serializer=SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=MutateRowRequest.FromString, + response_serializer=MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=MutateRowsRequest.FromString, + response_serializer=MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=CheckAndMutateRowRequest.FromString, + response_serializer=CheckAndMutateRowResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=ReadModifyWriteRowRequest.FromString, + response_serializer=ReadModifyWriteRowResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + class BetaBigtableServicer(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Service for reading from and writing to existing Bigtable tables. + """ + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + + + class BetaBigtableStub(object): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This class was generated + only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" + """Service for reading from and writing to existing Bigtable tables. + """ + def ReadRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + raise NotImplementedError() + def SampleRowKeys(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + raise NotImplementedError() + def MutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + raise NotImplementedError() + MutateRow.future = None + def MutateRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + raise NotImplementedError() + def CheckAndMutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + raise NotImplementedError() + CheckAndMutateRow.future = None + def ReadModifyWriteRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + raise NotImplementedError() + ReadModifyWriteRow.future = None + + + def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.FromString, + } + response_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.SerializeToString, + } + method_implementations = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), + ('google.bigtable.v2.Bigtable', 'MutateRows'): face_utilities.unary_stream_inline(servicer.MutateRows), + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), + ('google.bigtable.v2.Bigtable', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), + } + server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) + return beta_implementations.server(method_implementations, options=server_options) + + + def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): + """The Beta API is deprecated for 0.15.0 and later. + + It is recommended to use the GA API (classes and functions in this + file not marked beta) for all further purposes. This function was + generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" + request_serializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.SerializeToString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.SerializeToString, + } + response_deserializers = { + ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.FromString, + ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.FromString, + ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.FromString, + } + cardinalities = { + 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRow': cardinality.Cardinality.UNARY_UNARY, + 'MutateRows': cardinality.Cardinality.UNARY_STREAM, + 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, + 'ReadRows': cardinality.Cardinality.UNARY_STREAM, + 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, + } + stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) + return beta_implementations.dynamic_stub(channel, 'google.bigtable.v2.Bigtable', cardinalities, options=stub_options) +except ImportError: + pass +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py new file mode 100644 index 000000000000..e3fb9e6ba348 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py @@ -0,0 +1,145 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +import google.cloud.bigtable_v2.proto.bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2 + + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + ) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py similarity index 62% rename from packages/google-cloud-bigtable/google/cloud/bigtable/_generated/data_pb2.py rename to packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index 6db08fbd12c3..a43f75240604 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/_generated/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -1,5 +1,5 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v2/data.proto +# source: google/cloud/bigtable_v2/proto/data.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) @@ -16,10 +16,10 @@ DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v2/data.proto', + name='google/cloud/bigtable_v2/proto/data.proto', package='google.bigtable.v2', syntax='proto3', - serialized_pb=_b('\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB%\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01\x62\x06proto3') + serialized_pb=_b('\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -59,8 +59,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=53, - serialized_end=117, + serialized_start=65, + serialized_end=129, ) @@ -97,8 +97,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=119, - serialized_end=186, + serialized_start=131, + serialized_end=198, ) @@ -135,8 +135,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=188, - serialized_end=256, + serialized_start=200, + serialized_end=268, ) @@ -180,8 +180,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=258, - serialized_end=321, + serialized_start=270, + serialized_end=333, ) @@ -238,8 +238,8 @@ name='end_key', full_name='google.bigtable.v2.RowRange.end_key', index=1, containing_type=None, fields=[]), ], - serialized_start=324, - serialized_end=462, + serialized_start=336, + serialized_end=474, ) @@ -276,8 +276,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=464, - serialized_end=540, + serialized_start=476, + serialized_end=552, ) @@ -341,8 +341,8 @@ name='end_qualifier', full_name='google.bigtable.v2.ColumnRange.end_qualifier', index=1, containing_type=None, fields=[]), ], - serialized_start=543, - serialized_end=741, + serialized_start=555, + serialized_end=753, ) @@ -379,8 +379,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=743, - serialized_end=821, + serialized_start=755, + serialized_end=833, ) @@ -437,8 +437,8 @@ name='end_value', full_name='google.bigtable.v2.ValueRange.end_value', index=1, containing_type=None, fields=[]), ], - serialized_start=824, - serialized_end=976, + serialized_start=836, + serialized_end=988, ) @@ -468,8 +468,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1795, - serialized_end=1850, + serialized_start=1807, + serialized_end=1862, ) _ROWFILTER_INTERLEAVE = _descriptor.Descriptor( @@ -498,8 +498,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1852, - serialized_end=1912, + serialized_start=1864, + serialized_end=1924, ) _ROWFILTER_CONDITION = _descriptor.Descriptor( @@ -542,8 +542,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1915, - serialized_end=2088, + serialized_start=1927, + serialized_end=2100, ) _ROWFILTER = _descriptor.Descriptor( @@ -701,8 +701,8 @@ name='filter', full_name='google.bigtable.v2.RowFilter.filter', index=0, containing_type=None, fields=[]), ], - serialized_start=979, - serialized_end=2098, + serialized_start=991, + serialized_end=2110, ) @@ -753,8 +753,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2396, - serialized_end=2493, + serialized_start=2408, + serialized_end=2505, ) _MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( @@ -797,8 +797,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2495, - serialized_end=2616, + serialized_start=2507, + serialized_end=2628, ) _MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( @@ -827,8 +827,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2618, - serialized_end=2657, + serialized_start=2630, + serialized_end=2669, ) _MUTATION_DELETEFROMROW = _descriptor.Descriptor( @@ -850,8 +850,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2659, - serialized_end=2674, + serialized_start=2671, + serialized_end=2686, ) _MUTATION = _descriptor.Descriptor( @@ -904,8 +904,8 @@ name='mutation', full_name='google.bigtable.v2.Mutation.mutation', index=0, containing_type=None, fields=[]), ], - serialized_start=2101, - serialized_end=2686, + serialized_start=2113, + serialized_end=2698, ) @@ -959,8 +959,8 @@ name='rule', full_name='google.bigtable.v2.ReadModifyWriteRule.rule', index=0, containing_type=None, fields=[]), ], - serialized_start=2689, - serialized_end=2817, + serialized_start=2701, + serialized_end=2829, ) _ROW.fields_by_name['families'].message_type = _FAMILY @@ -1116,63 +1116,219 @@ Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( DESCRIPTOR = _ROW, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies the complete (requested) contents of a single row of a table. + Rows which exceed 256MiB in size cannot be read in full. + + + Attributes: + key: + The unique key which identifies this row within its table. + This is the same key that's used to identify the row in, for + example, a MutateRowRequest. May contain any non-empty byte + string up to 4KiB in length. + families: + May be empty, but only if the entire row is empty. The mutual + ordering of column families is not specified. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) )) _sym_db.RegisterMessage(Row) Family = _reflection.GeneratedProtocolMessageType('Family', (_message.Message,), dict( DESCRIPTOR = _FAMILY, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies (some of) the contents of a single row/column family + intersection of a table. + + + Attributes: + name: + The unique key which identifies this family within its row. + This is the same key that's used to identify the family in, + for example, a RowFilter which sets its + "family\_name\_regex\_filter" field. Must match + ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may + produce cells in a sentinel family with an empty name. Must be + no greater than 64 characters in length. + columns: + Must not be empty. Sorted in order of increasing "qualifier". + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) )) _sym_db.RegisterMessage(Family) Column = _reflection.GeneratedProtocolMessageType('Column', (_message.Message,), dict( DESCRIPTOR = _COLUMN, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies (some of) the contents of a single row/column intersection of + a table. + + + Attributes: + qualifier: + The unique key which identifies this column within its family. + This is the same key that's used to identify the column in, + for example, a RowFilter which sets its + ``column_qualifier_regex_filter`` field. May contain any byte + string, including the empty string, up to 16kiB in length. + cells: + Must not be empty. Sorted in order of decreasing + "timestamp\_micros". + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) )) _sym_db.RegisterMessage(Column) Cell = _reflection.GeneratedProtocolMessageType('Cell', (_message.Message,), dict( DESCRIPTOR = _CELL, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies (some of) the contents of a single row/column/timestamp of a + table. + + + Attributes: + timestamp_micros: + The cell's stored timestamp, which also uniquely identifies it + within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity will + only allow values of ``timestamp_micros`` which are multiples + of 1000. + value: + The value stored in the cell. May contain any byte string, + including the empty string, up to 100MiB in length. + labels: + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) )) _sym_db.RegisterMessage(Cell) RowRange = _reflection.GeneratedProtocolMessageType('RowRange', (_message.Message,), dict( DESCRIPTOR = _ROWRANGE, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies a contiguous range of rows. + + + Attributes: + start_key: + The row key at which to start the range. If neither field is + set, interpreted as the empty string, inclusive. + start_key_closed: + Used when giving an inclusive lower bound for the range. + start_key_open: + Used when giving an exclusive lower bound for the range. + end_key: + The row key at which to end the range. If neither field is + set, interpreted as the infinite row key, exclusive. + end_key_open: + Used when giving an exclusive upper bound for the range. + end_key_closed: + Used when giving an inclusive upper bound for the range. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) )) _sym_db.RegisterMessage(RowRange) RowSet = _reflection.GeneratedProtocolMessageType('RowSet', (_message.Message,), dict( DESCRIPTOR = _ROWSET, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies a non-contiguous set of rows. + + + Attributes: + row_keys: + Single rows included in the set. + row_ranges: + Contiguous row ranges included in the set. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) )) _sym_db.RegisterMessage(RowSet) ColumnRange = _reflection.GeneratedProtocolMessageType('ColumnRange', (_message.Message,), dict( DESCRIPTOR = _COLUMNRANGE, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies a contiguous range of columns within a single column family. + The range spans from : to + :, where both bounds can be either + inclusive or exclusive. + + + Attributes: + family_name: + The name of the column family within which this range falls. + start_qualifier: + The column qualifier at which to start the range (within + ``column_family``). If neither field is set, interpreted as + the empty string, inclusive. + start_qualifier_closed: + Used when giving an inclusive lower bound for the range. + start_qualifier_open: + Used when giving an exclusive lower bound for the range. + end_qualifier: + The column qualifier at which to end the range (within + ``column_family``). If neither field is set, interpreted as + the infinite string, exclusive. + end_qualifier_closed: + Used when giving an inclusive upper bound for the range. + end_qualifier_open: + Used when giving an exclusive upper bound for the range. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) )) _sym_db.RegisterMessage(ColumnRange) TimestampRange = _reflection.GeneratedProtocolMessageType('TimestampRange', (_message.Message,), dict( DESCRIPTOR = _TIMESTAMPRANGE, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specified a contiguous range of microsecond timestamps. + + + Attributes: + start_timestamp_micros: + Inclusive lower bound. If left empty, interpreted as 0. + end_timestamp_micros: + Exclusive upper bound. If left empty, interpreted as infinity. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) )) _sym_db.RegisterMessage(TimestampRange) ValueRange = _reflection.GeneratedProtocolMessageType('ValueRange', (_message.Message,), dict( DESCRIPTOR = _VALUERANGE, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies a contiguous range of raw byte values. + + + Attributes: + start_value: + The value at which to start the range. If neither field is + set, interpreted as the empty string, inclusive. + start_value_closed: + Used when giving an inclusive lower bound for the range. + start_value_open: + Used when giving an exclusive lower bound for the range. + end_value: + The value at which to end the range. If neither field is set, + interpreted as the infinite string, exclusive. + end_value_closed: + Used when giving an inclusive upper bound for the range. + end_value_open: + Used when giving an exclusive upper bound for the range. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) )) _sym_db.RegisterMessage(ValueRange) @@ -1181,26 +1337,264 @@ Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict( DESCRIPTOR = _ROWFILTER_CHAIN, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """A RowFilter which sends rows through several RowFilters in sequence. + + + Attributes: + filters: + The elements of "filters" are chained together to process the + input row: in row -> f(0) -> intermediate row -> f(1) -> ... + -> f(N) -> out row The full chain is executed atomically. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) )) , Interleave = _reflection.GeneratedProtocolMessageType('Interleave', (_message.Message,), dict( DESCRIPTOR = _ROWFILTER_INTERLEAVE, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """A RowFilter which sends each row to each of several component RowFilters + and interleaves the results. + + + Attributes: + filters: + The elements of "filters" all process a copy of the input row, + and the results are pooled, sorted, and combined into a single + output row. If multiple cells are produced with the same + column and timestamp, they will all appear in the output row + in an unspecified mutual order. Consider the following + example, with three filters: :: + input row | + ----------------------------------------------------- + | | | + f(0) f(1) f(2) + | | | 1: + foo,bar,10,x foo,bar,10,z far,bar,7,a + 2: foo,blah,11,z far,blah,5,x + far,blah,5,x | | + | + ----------------------------------------------------- + | 1: foo,bar,10,z // could have + switched with #2 2: foo,bar,10,x // + could have switched with #1 3: + foo,blah,11,z 4: far,bar,7,a 5: + far,blah,5,x // identical to #6 6: + far,blah,5,x // identical to #5 All interleaved filters are + executed atomically. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) )) , Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), dict( DESCRIPTOR = _ROWFILTER_CONDITION, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """A RowFilter which evaluates one of two possible RowFilters, depending on + whether or not a predicate RowFilter outputs any cells from the input + row. + + IMPORTANT NOTE: The predicate filter does not execute atomically with + the true and false filters, which may lead to inconsistent or unexpected + results. Additionally, Condition filters have poor performance, + especially when filters are set for the false condition. + + + Attributes: + predicate_filter: + If ``predicate_filter`` outputs any cells, then + ``true_filter`` will be evaluated on the input row. Otherwise, + ``false_filter`` will be evaluated. + true_filter: + The filter to apply to the input row if ``predicate_filter`` + returns any results. If not provided, no results will be + returned in the true case. + false_filter: + The filter to apply to the input row if ``predicate_filter`` + does not return any results. If not provided, no results will + be returned in the false case. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) )) , DESCRIPTOR = _ROWFILTER, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Takes a row as input and produces an alternate view of the row based on + specified rules. For example, a RowFilter might trim down a row to + include just the cells from columns matching a given regular expression, + or might return all the cells of a row but not their values. More + complicated filters can be composed out of these components to express + requests such as, "within every column of a particular family, give just + the two most recent cells which are older than timestamp X." + + There are two broad categories of RowFilters (true filters and + transformers), as well as two ways to compose simple filters into more + complex ones (chains and interleaves). They work as follows: + + - True filters alter the input row by excluding some of its cells + wholesale from the output row. An example of a true filter is the + ``value_regex_filter``, which excludes cells whose values don't match + the specified pattern. All regex true filters use RE2 syntax + (https://github.com/google/re2/wiki/Syntax) in raw byte mode + (RE2::Latin1), and are evaluated as full matches. An important point + to keep in mind is that ``RE2(.)`` is equivalent by default to + ``RE2([^\n])``, meaning that it does not match newlines. When + attempting to match an arbitrary byte, you should therefore use the + escape sequence ``\C``, which may need to be further escaped as + ``\\C`` in your client language. + + - Transformers alter the input row by changing the values of some of + its cells in the output, without excluding them completely. + Currently, the only supported transformer is the + ``strip_value_transformer``, which replaces every cell's value with + the empty string. + + - Chains and interleaves are described in more detail in the + RowFilter.Chain and RowFilter.Interleave documentation. + + The total serialized size of a RowFilter message must not exceed 4096 + bytes, and RowFilters may not be nested within each other (in Chains or + Interleaves) to a depth of more than 20. + + + Attributes: + filter: + Which of the possible RowFilter types to apply. If none are + set, this RowFilter returns all cells in the input row. + chain: + Applies several RowFilters to the data in sequence, + progressively narrowing the results. + interleave: + Applies several RowFilters to the data in parallel and + combines the results. + condition: + Applies one of two possible RowFilters to the data based on + the output of a predicate RowFilter. + sink: + ADVANCED USE ONLY. Hook for introspection into the RowFilter. + Outputs all cells directly to the output of the read rather + than to any parent filter. Consider the following example: :: + Chain( FamilyRegex("A"), Interleave( + All(), Chain(Label("foo"), Sink()) ), + QualifierRegex("B") ) A,A,1,w + A,B,2,x B,B,4,z + | FamilyRegex("A") + | A,A,1,w + A,B,2,x | + +------------+-------------+ | + | All() Label(foo) + | | A,A,1,w + A,A,1,w,labels:[foo] A,B,2,x + A,B,2,x,labels:[foo] | + | | Sink() + --------------+ | | + | +------------+ x------+ + A,A,1,w,labels:[foo] | + A,B,2,x,labels:[foo] A,A,1,w + | A,B,2,x + | | + | QualifierRegex("B") + | | + | A,B,2,x + | | + | + +--------------------------------+ + | A,A,1,w,labels:[foo] + A,B,2,x,labels:[foo] // could be switched + A,B,2,x // could be switched Despite being + excluded by the qualifier filter, a copy of every cell that + reaches the sink is present in the final result. As with an + [Interleave][google.bigtable.v2.RowFilter.Interleave], + duplicate cells are possible, and appear in an unspecified + mutual order. In this case we have a duplicate with column + "A:B" and timestamp 2, because one copy passed through the all + filter while the other was passed through the label and sink. + Note that one copy has label "foo", while the other does not. + Cannot be used within the ``predicate_filter``, + ``true_filter``, or ``false_filter`` of a + [Condition][google.bigtable.v2.RowFilter.Condition]. + pass_all_filter: + Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + block_all_filter: + Does not match any cells, regardless of input. Useful for + temporarily disabling just part of a filter. + row_key_regex_filter: + Matches only cells from rows whose keys satisfy the given RE2 + regex. In other words, passes through the entire row when the + key matches, and otherwise produces an empty row. Note that, + since row keys can contain arbitrary bytes, the ``\C`` escape + sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\n``, which + may be present in a binary key. + row_sample_filter: + Matches all cells from a row with probability p, and matches + no cells from the row with probability 1-p. + family_name_regex_filter: + Matches only cells from columns whose families satisfy the + given RE2 regex. For technical reasons, the regex must not + contain the ``:`` character, even if it is not being used as a + literal. Note that, since column families cannot contain the + new line character ``\n``, it is sufficient to use ``.`` as a + full wildcard when matching column family names. + column_qualifier_regex_filter: + Matches only cells from columns whose qualifiers satisfy the + given RE2 regex. Note that, since column qualifiers can + contain arbitrary bytes, the ``\C`` escape sequence must be + used if a true wildcard is desired. The ``.`` character will + not match the new line character ``\n``, which may be present + in a binary qualifier. + column_range_filter: + Matches only cells from columns within the given range. + timestamp_range_filter: + Matches only cells with timestamps within the given range. + value_regex_filter: + Matches only cells with values that satisfy the given regular + expression. Note that, since cell values can contain arbitrary + bytes, the ``\C`` escape sequence must be used if a true + wildcard is desired. The ``.`` character will not match the + new line character ``\n``, which may be present in a binary + value. + value_range_filter: + Matches only cells with values that fall within the given + range. + cells_per_row_offset_filter: + Skips the first N cells of each row, matching all subsequent + cells. If duplicate cells are present, as is possible when + using an Interleave, each copy of the cell is counted + separately. + cells_per_row_limit_filter: + Matches only the first N cells of each row. If duplicate cells + are present, as is possible when using an Interleave, each + copy of the cell is counted separately. + cells_per_column_limit_filter: + Matches only the most recent N cells within each column. For + example, if N=2, this filter would match column ``foo:bar`` at + timestamps 10 and 9, skip all earlier cells in ``foo:bar``, + and then begin matching again in column ``foo:bar2``. If + duplicate cells are present, as is possible when using an + Interleave, each copy of the cell is counted separately. + strip_value_transformer: + Replaces each cell's value with the empty string. + apply_label_transformer: + Applies the given label to all cells in the output row. This + allows the client to determine which results were produced + from which part of the filter. Values must be at most 15 + characters in length, and match the RE2 pattern + ``[a-z0-9\\-]+`` Due to a technical limitation, it is not + currently possible to apply multiple labels to a cell. As a + result, a Chain may have no more than one sub-filter which + contains a ``apply_label_transformer``. It is okay for an + Interleave to contain multiple ``apply_label_transformers``, + as they will be applied to separate copies of the input. This + may be relaxed in the future. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) )) _sym_db.RegisterMessage(RowFilter) @@ -1212,33 +1606,97 @@ SetCell = _reflection.GeneratedProtocolMessageType('SetCell', (_message.Message,), dict( DESCRIPTOR = _MUTATION_SETCELL, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """A Mutation which sets the value of the specified cell. + + + Attributes: + family_name: + The name of the family into which new data should be written. + Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier: + The qualifier of the column into which new data should be + written. Can be any byte string, including the empty string. + timestamp_micros: + The timestamp of the cell into which new data should be + written. Use -1 for current Bigtable server time. Otherwise, + the client should set this value itself, noting that the + default value is a timestamp of zero if the field is left + unspecified. Values must match the granularity of the table + (e.g. micros, millis). + value: + The value to be written into the specified cell. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) )) , DeleteFromColumn = _reflection.GeneratedProtocolMessageType('DeleteFromColumn', (_message.Message,), dict( DESCRIPTOR = _MUTATION_DELETEFROMCOLUMN, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """A Mutation which deletes cells from the specified column, optionally + restricting the deletions to a given timestamp range. + + + Attributes: + family_name: + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier: + The qualifier of the column from which cells should be + deleted. Can be any byte string, including the empty string. + time_range: + The range of timestamps within which cells should be deleted. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) )) , DeleteFromFamily = _reflection.GeneratedProtocolMessageType('DeleteFromFamily', (_message.Message,), dict( DESCRIPTOR = _MUTATION_DELETEFROMFAMILY, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """A Mutation which deletes all cells from the specified column family. + + + Attributes: + family_name: + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) )) , DeleteFromRow = _reflection.GeneratedProtocolMessageType('DeleteFromRow', (_message.Message,), dict( DESCRIPTOR = _MUTATION_DELETEFROMROW, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """A Mutation which deletes all cells from the containing row. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) )) , DESCRIPTOR = _MUTATION, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies a particular change to be made to the contents of a row. + + + Attributes: + mutation: + Which of the possible Mutation types to apply. + set_cell: + Set a cell's value. + delete_from_column: + Deletes cells from a column. + delete_from_family: + Deletes cells from a column family. + delete_from_row: + Deletes cells from the entire row. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) )) _sym_db.RegisterMessage(Mutation) @@ -1249,12 +1707,49 @@ ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRule', (_message.Message,), dict( DESCRIPTOR = _READMODIFYWRITERULE, - __module__ = 'google.bigtable.v2.data_pb2' + __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' + , + __doc__ = """Specifies an atomic read/modify/write operation on the latest value of + the specified column. + + + Attributes: + family_name: + The name of the family to which the read/modify/write should + be applied. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier: + The qualifier of the column to which the read/modify/write + should be applied. Can be any byte string, including the empty + string. + rule: + The rule used to determine the column's new latest value from + its current latest value. + append_value: + Rule specifying that ``append_value`` be appended to the + existing value. If the targeted cell is unset, it will be + treated as containing the empty string. + increment_amount: + Rule specifying that ``increment_amount`` be added to the + existing value. If the targeted cell is unset, it will be + treated as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit big- + endian signed integer), or the entire request will fail. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) )) _sym_db.RegisterMessage(ReadModifyWriteRule) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\tDataProtoP\001')) +DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2')) +try: + # THESE ELEMENTS WILL BE DEPRECATED. + # Please use the generated *_pb2_grpc.py files instead. + import grpc + from grpc.beta import implementations as beta_implementations + from grpc.beta import interfaces as beta_interfaces + from grpc.framework.common import cardinality + from grpc.framework.interfaces.face import utilities as face_utilities +except ImportError: + pass # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py new file mode 100644 index 000000000000..a89435267cb2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py @@ -0,0 +1,3 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py new file mode 100644 index 000000000000..b7edfdccdbd5 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -0,0 +1,43 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import sys + +from google.api_core.protobuf_helpers import get_messages + +from google.api import http_pb2 +from google.cloud.bigtable_v2.proto import bigtable_pb2 +from google.cloud.bigtable_v2.proto import data_pb2 +from google.protobuf import any_pb2 +from google.protobuf import descriptor_pb2 +from google.protobuf import wrappers_pb2 +from google.rpc import status_pb2 + +names = [] +for module in ( + http_pb2, + bigtable_pb2, + data_pb2, + any_pb2, + descriptor_pb2, + wrappers_pb2, + status_pb2, +): + for name, message in get_messages(module).items(): + message.__module__ = 'google.cloud.bigtable_v2.types' + setattr(sys.modules[__name__], name, message) + names.append(name) + +__all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b7b9e6f7617d..9206dcfad733 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -31,6 +31,7 @@ dependencies = [ 'google-cloud-core<0.29dev,>=0.28.0', 'google-api-core[grpc]<2.0.0dev,>=0.1.1', + 'grpc-google-iam-v1<0.12dev,>=0.11.4' ] extras = { } diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index cfa24c062660..06881806de1e 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -21,26 +21,3 @@ class _FakeStub(object): def __init__(self, *results): self.results = results self.method_calls = [] - - def __getattr__(self, name): - # We need not worry about attributes set in constructor - # since __getattribute__ will handle them. - return _MethodMock(name, self) - - -class _MethodMock(object): - """Mock for API method attached to a gRPC stub. - - These are of type :class:`grpc._channel._UnaryUnaryMultiCallable`. - """ - - def __init__(self, name, stub): - self._name = name - self._stub = stub - - def __call__(self, *args, **kwargs): - """Sync method meant to mock a gRPC stub request.""" - self._stub.method_calls.append((self._name, args, kwargs)) - curr_result, self._stub.results = (self._stub.results[0], - self._stub.results[1:]) - return curr_result diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py new file mode 100644 index 000000000000..d574049f9b9e --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -0,0 +1,283 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests.""" + +import pytest + +from google.cloud import bigtable_v2 +from google.cloud.bigtable_v2.proto import bigtable_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + def unary_stream(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableClient(object): + def test_read_rows(self): + # Setup Expected Response + last_scanned_row_key = b'-126' + expected_response = {'last_scanned_row_key': last_scanned_row_key} + expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup Request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + response = client.read_rows(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + with pytest.raises(CustomException): + client.read_rows(table_name) + + def test_sample_row_keys(self): + # Setup Expected Response + row_key = b'122' + offset_bytes = 889884095 + expected_response = {'row_key': row_key, 'offset_bytes': offset_bytes} + expected_response = bigtable_pb2.SampleRowKeysResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup Request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + response = client.sample_row_keys(table_name) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.SampleRowKeysRequest( + table_name=table_name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_sample_row_keys_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + with pytest.raises(CustomException): + client.sample_row_keys(table_name) + + def test_mutate_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup Request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + row_key = b'122' + mutations = [] + + response = client.mutate_row(table_name, row_key, mutations) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowRequest( + table_name=table_name, row_key=row_key, mutations=mutations) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + row_key = b'122' + mutations = [] + + with pytest.raises(CustomException): + client.mutate_row(table_name, row_key, mutations) + + def test_mutate_rows(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.MutateRowsResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[iter([expected_response])]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup Request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + entries = [] + + response = client.mutate_rows(table_name, entries) + resources = list(response) + assert len(resources) == 1 + assert expected_response == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.MutateRowsRequest( + table_name=table_name, entries=entries) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_mutate_rows_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + entries = [] + + with pytest.raises(CustomException): + client.mutate_rows(table_name, entries) + + def test_check_and_mutate_row(self): + # Setup Expected Response + predicate_matched = True + expected_response = {'predicate_matched': predicate_matched} + expected_response = bigtable_pb2.CheckAndMutateRowResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup Request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + row_key = b'122' + + response = client.check_and_mutate_row(table_name, row_key) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.CheckAndMutateRowRequest( + table_name=table_name, row_key=row_key) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_and_mutate_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + row_key = b'122' + + with pytest.raises(CustomException): + client.check_and_mutate_row(table_name, row_key) + + def test_read_modify_write_row(self): + # Setup Expected Response + expected_response = {} + expected_response = bigtable_pb2.ReadModifyWriteRowResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup Request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + row_key = b'122' + rules = [] + + response = client.read_modify_write_row(table_name, row_key, rules) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_pb2.ReadModifyWriteRowRequest( + table_name=table_name, row_key=row_key, rules=rules) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_read_modify_write_row_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_v2.BigtableClient(channel=channel) + + # Setup request + table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + row_key = b'122' + rules = [] + + with pytest.raises(CustomException): + client.read_modify_write_row(table_name, row_key, rules) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py new file mode 100644 index 000000000000..f7c1a515cdb6 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -0,0 +1,830 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests.""" + +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableInstanceAdminClient(object): + def test_create_instance(self): + # Setup Expected Response + name = 'name3373707' + display_name = 'displayName1615086568' + expected_response = {'name': name, 'display_name': display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name='operations/test_create_instance', done=True) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.project_path('[PROJECT]') + instance_id = 'instanceId-2101995259' + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, + clusters) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( + parent=parent, + instance_id=instance_id, + instance=instance, + clusters=clusters) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name='operations/test_create_instance_exception', done=True) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.project_path('[PROJECT]') + instance_id = 'instanceId-2101995259' + instance = {} + clusters = {} + + response = client.create_instance(parent, instance_id, instance, + clusters) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_instance(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + display_name = 'displayName1615086568' + expected_response = {'name': name_2, 'display_name': display_name} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.instance_path('[PROJECT]', '[INSTANCE]') + + response = client.get_instance(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetInstanceRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + name = client.instance_path('[PROJECT]', '[INSTANCE]') + + with pytest.raises(CustomException): + client.get_instance(name) + + def test_list_instances(self): + # Setup Expected Response + next_page_token = 'nextPageToken-1530815211' + expected_response = {'next_page_token': next_page_token} + expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.project_path('[PROJECT]') + + response = client.list_instances(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( + parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_instances_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + parent = client.project_path('[PROJECT]') + + with pytest.raises(CustomException): + client.list_instances(parent) + + def test_update_instance(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + display_name_2 = 'displayName21615000987' + expected_response = {'name': name_2, 'display_name': display_name_2} + expected_response = instance_pb2.Instance(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.instance_path('[PROJECT]', '[INSTANCE]') + display_name = 'displayName1615086568' + type_ = enums.Instance.Type.TYPE_UNSPECIFIED + labels = {} + + response = client.update_instance(name, display_name, type_, labels) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Instance( + name=name, display_name=display_name, type=type_, labels=labels) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + name = client.instance_path('[PROJECT]', '[INSTANCE]') + display_name = 'displayName1615086568' + type_ = enums.Instance.Type.TYPE_UNSPECIFIED + labels = {} + + with pytest.raises(CustomException): + client.update_instance(name, display_name, type_, labels) + + def test_partial_update_instance(self): + # Setup Expected Response + name = 'name3373707' + display_name = 'displayName1615086568' + expected_response = {'name': name, 'display_name': display_name} + expected_response = instance_pb2.Instance(**expected_response) + operation = operations_pb2.Operation( + name='operations/test_partial_update_instance', done=True) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + instance = instance_pb2.Instance() + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( + instance=instance, update_mask=update_mask) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_partial_update_instance_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name='operations/test_partial_update_instance_exception', + done=True) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + instance = instance_pb2.Instance() + update_mask = {} + + response = client.partial_update_instance(instance, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_instance(self): + channel = ChannelStub() + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.instance_path('[PROJECT]', '[INSTANCE]') + + client.delete_instance(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_instance_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + name = client.instance_path('[PROJECT]', '[INSTANCE]') + + with pytest.raises(CustomException): + client.delete_instance(name) + + def test_create_cluster(self): + # Setup Expected Response + name = 'name3373707' + location = 'location1901043637' + serve_nodes = 1288838783 + expected_response = { + 'name': name, + 'location': location, + 'serve_nodes': serve_nodes + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name='operations/test_create_cluster', done=True) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + cluster_id = 'clusterId240280960' + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( + parent=parent, cluster_id=cluster_id, cluster=cluster) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name='operations/test_create_cluster_exception', done=True) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + cluster_id = 'clusterId240280960' + cluster = {} + + response = client.create_cluster(parent, cluster_id, cluster) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_cluster(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + location = 'location1901043637' + serve_nodes = 1288838783 + expected_response = { + 'name': name_2, + 'location': location, + 'serve_nodes': serve_nodes + } + expected_response = instance_pb2.Cluster(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + + response = client.get_cluster(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetClusterRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + + with pytest.raises(CustomException): + client.get_cluster(name) + + def test_list_clusters(self): + # Setup Expected Response + next_page_token = 'nextPageToken-1530815211' + expected_response = {'next_page_token': next_page_token} + expected_response = bigtable_instance_admin_pb2.ListClustersResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + + response = client.list_clusters(parent) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListClustersRequest( + parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_clusters_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + + with pytest.raises(CustomException): + client.list_clusters(parent) + + def test_update_cluster(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + location_2 = 'location21541837352' + serve_nodes_2 = 1623486220 + expected_response = { + 'name': name_2, + 'location': location_2, + 'serve_nodes': serve_nodes_2 + } + expected_response = instance_pb2.Cluster(**expected_response) + operation = operations_pb2.Operation( + name='operations/test_update_cluster', done=True) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + location = 'location1901043637' + serve_nodes = 1288838783 + + response = client.update_cluster(name, location, serve_nodes) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = instance_pb2.Cluster( + name=name, location=location, serve_nodes=serve_nodes) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_cluster_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name='operations/test_update_cluster_exception', done=True) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + location = 'location1901043637' + serve_nodes = 1288838783 + + response = client.update_cluster(name, location, serve_nodes) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_cluster(self): + channel = ChannelStub() + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + + client.delete_cluster(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_cluster_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + + with pytest.raises(CustomException): + client.delete_cluster(name) + + def test_create_app_profile(self): + # Setup Expected Response + name = 'name3373707' + etag = 'etag3123477' + description = 'description-1724546052' + expected_response = { + 'name': name, + 'etag': etag, + 'description': description + } + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + app_profile_id = 'appProfileId1262094415' + app_profile = {} + + response = client.create_app_profile(parent, app_profile_id, + app_profile) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( + parent=parent, + app_profile_id=app_profile_id, + app_profile=app_profile) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + app_profile_id = 'appProfileId1262094415' + app_profile = {} + + with pytest.raises(CustomException): + client.create_app_profile(parent, app_profile_id, app_profile) + + def test_get_app_profile(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + etag = 'etag3123477' + description = 'description-1724546052' + expected_response = { + 'name': name_2, + 'etag': etag, + 'description': description + } + expected_response = instance_pb2.AppProfile(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.app_profile_path('[PROJECT]', '[INSTANCE]', + '[APP_PROFILE]') + + response = client.get_app_profile(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + name = client.app_profile_path('[PROJECT]', '[INSTANCE]', + '[APP_PROFILE]') + + with pytest.raises(CustomException): + client.get_app_profile(name) + + def test_list_app_profiles(self): + # Setup Expected Response + next_page_token = '' + app_profiles_element = {} + app_profiles = [app_profiles_element] + expected_response = { + 'next_page_token': next_page_token, + 'app_profiles': app_profiles + } + expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + + paged_list_response = client.list_app_profiles(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.app_profiles[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( + parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_app_profiles_exception(self): + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + + paged_list_response = client.list_app_profiles(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_app_profile(self): + # Setup Expected Response + name = 'name3373707' + etag = 'etag3123477' + description = 'description-1724546052' + expected_response = { + 'name': name, + 'etag': etag, + 'description': description + } + expected_response = instance_pb2.AppProfile(**expected_response) + operation = operations_pb2.Operation( + name='operations/test_update_app_profile', done=True) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + app_profile = instance_pb2.AppProfile() + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( + app_profile=app_profile, update_mask=update_mask) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_app_profile_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name='operations/test_update_app_profile_exception', done=True) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + app_profile = instance_pb2.AppProfile() + update_mask = {} + + response = client.update_app_profile(app_profile, update_mask) + exception = response.exception() + assert exception.errors[0] == error + + def test_delete_app_profile(self): + channel = ChannelStub() + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + name = client.app_profile_path('[PROJECT]', '[INSTANCE]', + '[APP_PROFILE]') + ignore_warnings = True + + client.delete_app_profile(name, ignore_warnings) + + assert len(channel.requests) == 1 + expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( + name=name, ignore_warnings=ignore_warnings) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_app_profile_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + name = client.app_profile_path('[PROJECT]', '[INSTANCE]', + '[APP_PROFILE]') + ignore_warnings = True + + with pytest.raises(CustomException): + client.delete_app_profile(name, ignore_warnings) + + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b'etag3123477' + expected_response = {'version': version, 'etag': etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + resource = client.instance_path('[PROJECT]', '[INSTANCE]') + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + resource = client.instance_path('[PROJECT]', '[INSTANCE]') + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b'etag3123477' + expected_response = {'version': version, 'etag': etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + resource = client.instance_path('[PROJECT]', '[INSTANCE]') + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + resource = client.instance_path('[PROJECT]', '[INSTANCE]') + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup Request + resource = client.instance_path('[PROJECT]', '[INSTANCE]') + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + + # Setup request + resource = client.instance_path('[PROJECT]', '[INSTANCE]') + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py new file mode 100644 index 000000000000..f489dee39209 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -0,0 +1,556 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Unit tests.""" + +import pytest + +from google.rpc import status_pb2 + +from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.longrunning import operations_pb2 +from google.protobuf import empty_pb2 + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + response = None + if self.channel_stub.responses: + response = self.channel_stub.responses.pop() + + if isinstance(response, Exception): + raise response + + if response: + return response + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + +class CustomException(Exception): + pass + + +class TestBigtableTableAdminClient(object): + def test_create_table(self): + # Setup Expected Response + name = 'name3373707' + expected_response = {'name': name} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + table_id = 'tableId-895419604' + table = {} + + response = client.create_table(parent, table_id, table) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableRequest( + parent=parent, table_id=table_id, table=table) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + table_id = 'tableId-895419604' + table = {} + + with pytest.raises(CustomException): + client.create_table(parent, table_id, table) + + def test_create_table_from_snapshot(self): + # Setup Expected Response + name = 'name3373707' + expected_response = {'name': name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name='operations/test_create_table_from_snapshot', done=True) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + table_id = 'tableId-895419604' + source_snapshot = 'sourceSnapshot-947679896' + + response = client.create_table_from_snapshot(parent, table_id, + source_snapshot) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( + parent=parent, table_id=table_id, source_snapshot=source_snapshot) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_table_from_snapshot_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name='operations/test_create_table_from_snapshot_exception', + done=True) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + table_id = 'tableId-895419604' + source_snapshot = 'sourceSnapshot-947679896' + + response = client.create_table_from_snapshot(parent, table_id, + source_snapshot) + exception = response.exception() + assert exception.errors[0] == error + + def test_list_tables(self): + # Setup Expected Response + next_page_token = '' + tables_element = {} + tables = [tables_element] + expected_response = { + 'next_page_token': next_page_token, + 'tables': tables + } + expected_response = bigtable_table_admin_pb2.ListTablesResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + + paged_list_response = client.list_tables(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.tables[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListTablesRequest( + parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_tables_exception(self): + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + parent = client.instance_path('[PROJECT]', '[INSTANCE]') + + paged_list_response = client.list_tables(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_get_table(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + expected_response = {'name': name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + response = client.get_table(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + with pytest.raises(CustomException): + client.get_table(name) + + def test_delete_table(self): + channel = ChannelStub() + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + client.delete_table(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteTableRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_table_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + with pytest.raises(CustomException): + client.delete_table(name) + + def test_modify_column_families(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + expected_response = {'name': name_2} + expected_response = table_pb2.Table(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + modifications = [] + + response = client.modify_column_families(name, modifications) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( + name=name, modifications=modifications) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_modify_column_families_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + modifications = [] + + with pytest.raises(CustomException): + client.modify_column_families(name, modifications) + + def test_drop_row_range(self): + channel = ChannelStub() + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + client.drop_row_range(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DropRowRangeRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_drop_row_range_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + with pytest.raises(CustomException): + client.drop_row_range(name) + + def test_generate_consistency_token(self): + # Setup Expected Response + consistency_token = 'consistencyToken-1090516718' + expected_response = {'consistency_token': consistency_token} + expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + response = client.generate_consistency_token(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_generate_consistency_token_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + + with pytest.raises(CustomException): + client.generate_consistency_token(name) + + def test_check_consistency(self): + # Setup Expected Response + consistent = True + expected_response = {'consistent': consistent} + expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + consistency_token = 'consistencyToken-1090516718' + + response = client.check_consistency(name, consistency_token) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( + name=name, consistency_token=consistency_token) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_check_consistency_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + consistency_token = 'consistencyToken-1090516718' + + with pytest.raises(CustomException): + client.check_consistency(name, consistency_token) + + def test_snapshot_table(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + data_size_bytes = 2110122398 + description_2 = 'description2568623279' + expected_response = { + 'name': name_2, + 'data_size_bytes': data_size_bytes, + 'description': description_2 + } + expected_response = table_pb2.Snapshot(**expected_response) + operation = operations_pb2.Operation( + name='operations/test_snapshot_table', done=True) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + cluster = 'cluster872092154' + snapshot_id = 'snapshotId-168585866' + description = 'description-1724546052' + + response = client.snapshot_table(name, cluster, snapshot_id, + description) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( + name=name, + cluster=cluster, + snapshot_id=snapshot_id, + description=description) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_snapshot_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name='operations/test_snapshot_table_exception', done=True) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + cluster = 'cluster872092154' + snapshot_id = 'snapshotId-168585866' + description = 'description-1724546052' + + response = client.snapshot_table(name, cluster, snapshot_id, + description) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_snapshot(self): + # Setup Expected Response + name_2 = 'name2-1052831874' + data_size_bytes = 2110122398 + description = 'description-1724546052' + expected_response = { + 'name': name_2, + 'data_size_bytes': data_size_bytes, + 'description': description + } + expected_response = table_pb2.Snapshot(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', + '[SNAPSHOT]') + + response = client.get_snapshot(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetSnapshotRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', + '[SNAPSHOT]') + + with pytest.raises(CustomException): + client.get_snapshot(name) + + def test_list_snapshots(self): + # Setup Expected Response + next_page_token = '' + snapshots_element = {} + snapshots = [snapshots_element] + expected_response = { + 'next_page_token': next_page_token, + 'snapshots': snapshots + } + expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( + **expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + + paged_list_response = client.list_snapshots(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.snapshots[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest( + parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_snapshots_exception(self): + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + + paged_list_response = client.list_snapshots(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_delete_snapshot(self): + channel = ChannelStub() + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup Request + name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', + '[SNAPSHOT]') + + client.delete_snapshot(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest( + name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_snapshot_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + + # Setup request + name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', + '[SNAPSHOT]') + + with pytest.raises(CustomException): + client.delete_snapshot(name) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 1458f81b35ca..3725b20bb8fe 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -29,215 +29,15 @@ class _CredentialsWithScopes( return mock.Mock(spec=_CredentialsWithScopes) -class Test__make_data_stub(unittest.TestCase): +@mock.patch('google.auth.transport.grpc.secure_authorized_channel') +def _make_channel(secure_authorized_channel): + from google.api_core import grpc_helpers + target = 'example.com:443' - def _call_fut(self, client): - from google.cloud.bigtable.client import _make_data_stub + channel = grpc_helpers.create_channel( + target, credentials=mock.sentinel.credentials) - return _make_data_stub(client) - - @mock.patch('google.cloud.bigtable.client.make_secure_stub', - return_value=mock.sentinel.stub) - def test_without_emulator(self, make_stub): - from google.cloud.bigtable import client as MUT - - credentials = _make_credentials() - user_agent = 'you-sir-age-int' - client = _Client(credentials, user_agent) - - result = self._call_fut(client) - self.assertIs(result, mock.sentinel.stub) - make_stub.assert_called_once_with( - client.credentials, - client.user_agent, - MUT.bigtable_pb2.BigtableStub, - MUT.DATA_API_HOST, - extra_options=MUT._GRPC_MAX_LENGTH_OPTIONS, - ) - - def test_with_emulator(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT - - emulator_host = object() - client = _Client(None, None, emulator_host=emulator_host) - - fake_stub = object() - make_insecure_stub_args = [] - - def mock_make_insecure_stub(*args): - make_insecure_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._call_fut(client) - - self.assertIs(result, fake_stub) - self.assertEqual(make_insecure_stub_args, [ - ( - MUT.bigtable_pb2.BigtableStub, - emulator_host, - ), - ]) - - -class Test__make_instance_stub(unittest.TestCase): - - def _call_fut(self, client): - from google.cloud.bigtable.client import _make_instance_stub - - return _make_instance_stub(client) - - @mock.patch('google.cloud.bigtable.client.make_secure_stub', - return_value=mock.sentinel.stub) - def test_without_emulator(self, make_stub): - from google.cloud.bigtable import client as MUT - - credentials = _make_credentials() - user_agent = 'you-sir-age-int' - client = _Client(credentials, user_agent) - - result = self._call_fut(client) - self.assertIs(result, mock.sentinel.stub) - make_stub.assert_called_once_with( - client.credentials, - client.user_agent, - MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub, - MUT.INSTANCE_ADMIN_HOST, - extra_options=MUT._GRPC_EXTRA_OPTIONS, - ) - - def test_with_emulator(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT - - emulator_host = object() - client = _Client(None, None, emulator_host=emulator_host) - - fake_stub = object() - make_insecure_stub_args = [] - - def mock_make_insecure_stub(*args): - make_insecure_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._call_fut(client) - - self.assertIs(result, fake_stub) - self.assertEqual(make_insecure_stub_args, [ - ( - MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub, - emulator_host, - ), - ]) - - -class Test__make_operations_stub(unittest.TestCase): - - def _call_fut(self, client): - from google.cloud.bigtable.client import _make_operations_stub - - return _make_operations_stub(client) - - @mock.patch('google.cloud.bigtable.client.make_secure_stub', - return_value=mock.sentinel.stub) - def test_without_emulator(self, make_stub): - from google.longrunning import operations_grpc - from google.cloud.bigtable import client as MUT - - credentials = _make_credentials() - user_agent = 'you-sir-age-int' - client = _Client(credentials, user_agent) - - result = self._call_fut(client) - self.assertIs(result, mock.sentinel.stub) - make_stub.assert_called_once_with( - client.credentials, - client.user_agent, - operations_grpc.OperationsStub, - MUT.OPERATIONS_API_HOST, - extra_options=MUT._GRPC_EXTRA_OPTIONS, - ) - - def test_with_emulator(self): - from google.longrunning import operations_grpc - - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT - - emulator_host = object() - client = _Client(None, None, emulator_host=emulator_host) - - fake_stub = object() - make_insecure_stub_args = [] - - def mock_make_insecure_stub(*args): - make_insecure_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._call_fut(client) - - self.assertIs(result, fake_stub) - self.assertEqual(make_insecure_stub_args, [ - ( - operations_grpc.OperationsStub, - emulator_host, - ), - ]) - - -class Test__make_table_stub(unittest.TestCase): - - def _call_fut(self, client): - from google.cloud.bigtable.client import _make_table_stub - - return _make_table_stub(client) - - @mock.patch('google.cloud.bigtable.client.make_secure_stub', - return_value=mock.sentinel.stub) - def test_without_emulator(self, make_stub): - from google.cloud.bigtable import client as MUT - - credentials = _make_credentials() - user_agent = 'you-sir-age-int' - client = _Client(credentials, user_agent) - - result = self._call_fut(client) - self.assertIs(result, mock.sentinel.stub) - make_stub.assert_called_once_with( - client.credentials, - client.user_agent, - MUT.bigtable_table_admin_pb2.BigtableTableAdminStub, - MUT.TABLE_ADMIN_HOST, - extra_options=MUT._GRPC_EXTRA_OPTIONS, - ) - - def test_with_emulator(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import client as MUT - - emulator_host = object() - client = _Client(None, None, emulator_host=emulator_host) - - fake_stub = object() - make_insecure_stub_args = [] - - def mock_make_insecure_stub(*args): - make_insecure_stub_args.append(args) - return fake_stub - - with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub): - result = self._call_fut(client) - - self.assertIs(result, fake_stub) - self.assertEqual(make_insecure_stub_args, [ - ( - MUT.bigtable_table_admin_pb2.BigtableTableAdminStub, - emulator_host, - ), - ]) + return channel class TestClient(unittest.TestCase): @@ -256,94 +56,6 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) - @mock.patch('google.cloud.bigtable.client._make_table_stub') - @mock.patch('google.cloud.bigtable.client._make_operations_stub') - @mock.patch('google.cloud.bigtable.client._make_instance_stub') - @mock.patch('google.cloud.bigtable.client._make_data_stub') - def _make_one_with_mocks( - self, _make_data_stub, _make_instance_stub, - _make_operations_stub, _make_table_stub, - *args, **kwargs): - return self._make_one(*args, **kwargs) - - @mock.patch('google.cloud.bigtable.client._make_table_stub') - @mock.patch('google.cloud.bigtable.client._make_operations_stub') - @mock.patch('google.cloud.bigtable.client._make_instance_stub') - @mock.patch('google.cloud.bigtable.client._make_data_stub') - def test_constructor_default_scopes( - self, _make_data_stub, _make_instance_stub, - _make_operations_stub, _make_table_stub): - from google.cloud.bigtable.client import DATA_SCOPE - - expected_scopes = (DATA_SCOPE,) - credentials = _make_credentials() - custom_user_agent = 'custom-application' - client = self._make_one( - project=self.PROJECT, credentials=credentials, - user_agent=custom_user_agent) - - self.assertEqual(client.project, self.PROJECT) - self.assertIs( - client._credentials, credentials.with_scopes.return_value) - self.assertIsNone(client._http_internal) - self.assertFalse(client._read_only) - self.assertFalse(client._admin) - self.assertEqual(client.SCOPE, expected_scopes) - self.assertEqual(client.user_agent, custom_user_agent) - self.assertIsNone(client.emulator_host) - self.assertIs(client._data_stub, _make_data_stub.return_value) - self.assertIsNone(client._instance_stub_internal) - self.assertIsNone(client._operations_stub_internal) - self.assertIsNone(client._table_stub_internal) - - # Check mocks. - credentials.with_scopes.assert_called_once_with(expected_scopes) - _make_data_stub.assert_called_once_with(client) - _make_instance_stub.assert_not_called() - _make_operations_stub.assert_not_called() - _make_table_stub.assert_not_called() - - @mock.patch('google.cloud.bigtable.client._make_table_stub') - @mock.patch('google.cloud.bigtable.client._make_operations_stub') - @mock.patch('google.cloud.bigtable.client._make_instance_stub') - @mock.patch('google.cloud.bigtable.client._make_data_stub') - def test_constructor_with_admin( - self, _make_data_stub, _make_instance_stub, - _make_operations_stub, _make_table_stub): - from google.cloud._http import DEFAULT_USER_AGENT - from google.cloud.bigtable.client import ADMIN_SCOPE - from google.cloud.bigtable.client import DATA_SCOPE - - expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True) - - self.assertEqual(client.project, self.PROJECT) - self.assertIs( - client._credentials, credentials.with_scopes.return_value) - self.assertIsNone(client._http_internal) - self.assertFalse(client._read_only) - self.assertTrue(client._admin) - self.assertEqual(client.SCOPE, expected_scopes) - self.assertEqual(client.user_agent, DEFAULT_USER_AGENT) - self.assertIsNone(client.emulator_host) - self.assertIs(client._data_stub, _make_data_stub.return_value) - self.assertIs( - client._instance_stub_internal, _make_instance_stub.return_value) - self.assertIs( - client._operations_stub_internal, - _make_operations_stub.return_value) - self.assertIs( - client._table_stub_internal, _make_table_stub.return_value) - - # Check mocks. - credentials.with_scopes.assert_called_once_with(expected_scopes) - _make_data_stub.assert_called_once_with(client) - _make_instance_stub.assert_called_once_with(client) - _make_operations_stub.assert_called_once_with(client) - _make_table_stub.assert_called_once_with(client) - def test_constructor_both_admin_and_read_only(self): credentials = _make_credentials() with self.assertRaises(ValueError): @@ -376,177 +88,29 @@ def test__get_scopes_read_only(self): read_only=True) self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,)) - def _copy_helper_check_stubs(self, client, new_client): - if client._admin: - # Check the instance stub. - self.assertIs( - client._instance_stub_internal, mock.sentinel.inst_stub1) - self.assertIs( - new_client._instance_stub_internal, mock.sentinel.inst_stub2) - self.assertIsNot( - new_client._instance_stub_internal, - client._instance_stub_internal) - # Check the operations stub. - self.assertIs( - client._operations_stub_internal, mock.sentinel.ops_stub1) - self.assertIs( - new_client._operations_stub_internal, mock.sentinel.ops_stub2) - self.assertIsNot( - new_client._operations_stub_internal, - client._operations_stub_internal) - # Check the table stub. - self.assertIs( - client._table_stub_internal, mock.sentinel.table_stub1) - self.assertIs( - new_client._table_stub_internal, mock.sentinel.table_stub2) - self.assertIsNot( - new_client._table_stub_internal, client._table_stub_internal) - else: - # Check the instance stub. - self.assertIsNone(client._instance_stub_internal) - self.assertIsNone(new_client._instance_stub_internal) - # Check the operations stub. - self.assertIsNone(client._operations_stub_internal) - self.assertIsNone(new_client._operations_stub_internal) - # Check the table stub. - self.assertIsNone(client._table_stub_internal) - self.assertIsNone(new_client._table_stub_internal) - - @mock.patch( - 'google.cloud.bigtable.client._make_table_stub', - side_effect=[mock.sentinel.table_stub1, mock.sentinel.table_stub2], - ) - @mock.patch( - 'google.cloud.bigtable.client._make_operations_stub', - side_effect=[mock.sentinel.ops_stub1, mock.sentinel.ops_stub2], - ) - @mock.patch( - 'google.cloud.bigtable.client._make_instance_stub', - side_effect=[mock.sentinel.inst_stub1, mock.sentinel.inst_stub2], - ) - @mock.patch( - 'google.cloud.bigtable.client._make_data_stub', - side_effect=[mock.sentinel.data_stub1, mock.sentinel.data_stub2], - ) - def _copy_test_helper( - self, _make_data_stub, _make_instance_stub, - _make_operations_stub, _make_table_stub, **kwargs): - credentials = _make_credentials() - # Make sure it "already" is scoped. - credentials.requires_scopes = False - - client = self._make_one( - project=self.PROJECT, credentials=credentials, **kwargs) - self.assertIs(client._credentials, credentials) - - new_client = client.copy() - self.assertEqual(new_client._admin, client._admin) - self.assertEqual(new_client._credentials, client._credentials) - self.assertEqual(new_client.project, client.project) - self.assertEqual(new_client.user_agent, client.user_agent) - # Make sure stubs are not preserved. - self.assertIs(client._data_stub, mock.sentinel.data_stub1) - self.assertIs(new_client._data_stub, mock.sentinel.data_stub2) - self.assertIsNot(new_client._data_stub, client._data_stub) - self._copy_helper_check_stubs(client, new_client) - - # Check mocks. - credentials.with_scopes.assert_not_called() - stub_calls = [ - mock.call(client), - mock.call(new_client), - ] - self.assertEqual(_make_data_stub.mock_calls, stub_calls) - if client._admin: - self.assertEqual(_make_instance_stub.mock_calls, stub_calls) - self.assertEqual(_make_operations_stub.mock_calls, stub_calls) - self.assertEqual(_make_table_stub.mock_calls, stub_calls) - else: - _make_instance_stub.assert_not_called() - _make_operations_stub.assert_not_called() - _make_table_stub.assert_not_called() - - def test_copy(self): - self._copy_test_helper() - - def test_copy_admin(self): - self._copy_test_helper(admin=True) - - def test_copy_read_only(self): - self._copy_test_helper(read_only=True) - def test_credentials_getter(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_one_with_mocks( + client = self._make_one( project=project, credentials=credentials) - self.assertIs(client.credentials, credentials.with_scopes.return_value) + self.assertIs(client._credentials, credentials) def test_project_name_property(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_one_with_mocks( - project=project, credentials=credentials) - project_name = 'projects/' + project - self.assertEqual(client.project_name, project_name) - - def test_instance_stub_getter(self): - credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one_with_mocks( - project=project, credentials=credentials, admin=True) - self.assertIs(client._instance_stub, client._instance_stub_internal) - - def test_instance_stub_non_admin_failure(self): - credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one_with_mocks( - project=project, credentials=credentials, admin=False) - with self.assertRaises(ValueError): - getattr(client, '_instance_stub') - - def test_operations_stub_getter(self): - credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one_with_mocks( - project=project, credentials=credentials, admin=True) - self.assertIs(client._operations_stub, - client._operations_stub_internal) - - def test_operations_stub_non_admin_failure(self): - credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one_with_mocks( - project=project, credentials=credentials, admin=False) - with self.assertRaises(ValueError): - getattr(client, '_operations_stub') - - def test_table_stub_getter(self): - credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one_with_mocks( + client = self._make_one( project=project, credentials=credentials, admin=True) - self.assertIs(client._table_stub, client._table_stub_internal) - - def test_table_stub_non_admin_failure(self): - credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one_with_mocks( - project=project, credentials=credentials, admin=False) - with self.assertRaises(ValueError): - getattr(client, '_table_stub') + project_name = 'projects/' + project + self.assertEqual(client.project_path, project_name) def test_instance_factory_defaults(self): - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable.instance import ( - _EXISTING_INSTANCE_LOCATION_ID) PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' credentials = _make_credentials() - client = self._make_one_with_mocks( + client = self._make_one( project=PROJECT, credentials=credentials) instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) @@ -554,9 +118,6 @@ def test_instance_factory_defaults(self): self.assertIsInstance(instance, Instance) self.assertEqual(instance.instance_id, INSTANCE_ID) self.assertEqual(instance.display_name, DISPLAY_NAME) - self.assertEqual(instance._cluster_location_id, - _EXISTING_INSTANCE_LOCATION_ID) - self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) self.assertIs(instance._client, client) def test_instance_factory_w_explicit_serve_nodes(self): @@ -566,49 +127,46 @@ def test_instance_factory_w_explicit_serve_nodes(self): INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' LOCATION_ID = 'locname' - SERVE_NODES = 5 credentials = _make_credentials() - client = self._make_one_with_mocks( + client = self._make_one( project=PROJECT, credentials=credentials) instance = client.instance( - INSTANCE_ID, display_name=DISPLAY_NAME, - location=LOCATION_ID, serve_nodes=SERVE_NODES) + INSTANCE_ID, display_name=DISPLAY_NAME, location=LOCATION_ID) self.assertIsInstance(instance, Instance) self.assertEqual(instance.instance_id, INSTANCE_ID) self.assertEqual(instance.display_name, DISPLAY_NAME) self.assertEqual(instance._cluster_location_id, LOCATION_ID) - self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES) self.assertIs(instance._client, client) + def test_admin_client_w_value_error(self): + channel = _make_channel() + client = self._make_one(project=self.PROJECT, channel=channel) + + with self.assertRaises(ValueError): + client._table_admin_client() + + with self.assertRaises(ValueError): + client._instance_admin_client() + def test_list_instances(self): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from tests.unit._testing import _FakeStub - LOCATION = 'projects/' + self.PROJECT + '/locations/locname' FAILED_LOCATION = 'FAILED' INSTANCE_ID1 = 'instance-id1' INSTANCE_ID2 = 'instance-id2' INSTANCE_NAME1 = ( - 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1) + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1) INSTANCE_NAME2 = ( - 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) - - credentials = _make_credentials() - client = self._make_one_with_mocks( - project=self.PROJECT, - credentials=credentials, - admin=True, - ) + 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) - # Create request_pb - request_pb = messages_v2_pb2.ListInstancesRequest( - parent='projects/' + self.PROJECT, - ) + channel = _make_channel() + client = self._make_one(project=self.PROJECT, channel=channel, + admin=True) # Create response_pb response_pb = messages_v2_pb2.ListInstancesResponse( @@ -628,29 +186,11 @@ def test_list_instances(self): ) # Patch the stub used by the API method. - client._instance_stub_internal = stub = _FakeStub(response_pb) - - # Create expected_result. - failed_locations = [FAILED_LOCATION] - instances = [ - client.instance(INSTANCE_ID1, LOCATION), - client.instance(INSTANCE_ID2, LOCATION), - ] - expected_result = (instances, failed_locations) + bigtable_instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + bigtable_instance_stub.ListInstances.side_effect = [response_pb] + expected_result = response_pb # Perform the method and check the result. result = client.list_instances() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListInstances', - (request_pb,), - {}, - )]) - - -class _Client(object): - - def __init__(self, credentials, user_agent, emulator_host=None): - self.credentials = credentials - self.user_agent = user_agent - self.emulator_host = emulator_host diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 282df36ab204..d520bd504ee1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -27,6 +27,16 @@ class TestCluster(unittest.TestCase): '/instances/' + INSTANCE_ID + '/clusters/' + CLUSTER_ID) + @mock.patch('google.auth.transport.grpc.secure_authorized_channel') + def _make_channel(self, secure_authorized_channel): + from google.api_core import grpc_helpers + target = 'example.com:443' + + channel = grpc_helpers.create_channel( + target, credentials=mock.sentinel.credentials) + + return channel + @staticmethod def _get_target_class(): from google.cloud.bigtable.cluster import Cluster @@ -36,6 +46,15 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_constructor_defaults(self): from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES @@ -58,108 +77,16 @@ def test_constructor_non_default(self): self.assertIs(cluster._instance, instance) self.assertEqual(cluster.serve_nodes, SERVE_NODES) - def test_copy(self): - SERVE_NODES = 8 - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, - serve_nodes=SERVE_NODES) - new_cluster = cluster.copy() - - # Make sure the client copy succeeded. - self.assertIsNot(new_cluster._instance, instance) - self.assertEqual(new_cluster.serve_nodes, SERVE_NODES) - # Make sure the client got copied to a new instance. - self.assertIsNot(cluster, new_cluster) - self.assertEqual(cluster, new_cluster) - - def test__update_from_pb_success(self): - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES - - SERVE_NODES = 8 - cluster_pb = _ClusterPB( - serve_nodes=SERVE_NODES, - ) - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster = self._make_one(self.CLUSTER_ID, instance) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - - def test__update_from_pb_no_serve_nodes(self): - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES - - cluster_pb = _ClusterPB() - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster = self._make_one(self.CLUSTER_ID, instance) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - with self.assertRaises(ValueError): - cluster._update_from_pb(cluster_pb) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - - def test_from_pb_success(self): - SERVE_NODES = 331 - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster_pb = _ClusterPB( - name=self.CLUSTER_NAME, - serve_nodes=SERVE_NODES, - ) - - klass = self._get_target_class() - cluster = klass.from_pb(cluster_pb, instance) - self.assertIsInstance(cluster, klass) - self.assertIs(cluster._instance, instance) - self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - - def test_from_pb_bad_cluster_name(self): - BAD_CLUSTER_NAME = 'INCORRECT_FORMAT' - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster_pb = _ClusterPB(name=BAD_CLUSTER_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, instance) - - def test_from_pb_project_mistmatch(self): - ALT_PROJECT = 'ALT_PROJECT' - client = _Client(ALT_PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - self.assertNotEqual(self.PROJECT, ALT_PROJECT) - - cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, instance) - - def test_from_pb_instance_mistmatch(self): - ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' - client = _Client(self.PROJECT) - instance = _Instance(ALT_INSTANCE_ID, client) - - self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) - - cluster_pb = _ClusterPB(name=self.CLUSTER_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, instance) - def test_name_property(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) + from google.cloud.bigtable.instance import Instance + channel = self._make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) + instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance) + instance = Instance(self.INSTANCE_ID, client) + self.assertEqual(cluster.name, self.CLUSTER_NAME) def test___eq__(self): @@ -192,26 +119,26 @@ def test___ne__(self): self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from tests.unit._testing import _FakeStub from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + from google.cloud.bigtable.instance import Instance - SERVE_NODES = 31 LOCATION = 'LOCATION' - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) + channel = self._make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) + instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance) - # Create request_pb - request_pb = _GetClusterRequestPB(name=self.CLUSTER_NAME) - # Create response_pb response_pb = _ClusterPB( - serve_nodes=SERVE_NODES, + serve_nodes=DEFAULT_SERVE_NODES, location=LOCATION, ) # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) + instance_admin_client = client._instance_admin_client + instance_stub = instance_admin_client.bigtable_instance_admin_stub + instance_stub.GetCluster.side_effect = [response_pb] # Create expected_result. expected_result = None # reload() has no return value. @@ -222,28 +149,17 @@ def test_reload(self): # Perform the method and check the result. result = cluster.reload() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'GetCluster', - (request_pb,), - {}, - )]) - - # Check Cluster optional config values before. - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - self.assertEqual(cluster.location, LOCATION) def test_create(self): from google.api_core import operation from google.longrunning import operations_pb2 - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from tests.unit._testing import _FakeStub + from google.cloud.bigtable.instance import Instance - SERVE_NODES = 4 - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) + channel = self._make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) + instance = Instance(self.INSTANCE_ID, client) + cluster = self._make_one(self.CLUSTER_ID, instance) # Create response_pb OP_ID = 5678 @@ -253,7 +169,9 @@ def test_create(self): response_pb = operations_pb2.Operation(name=OP_NAME) # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) + instance_admin_client = client._instance_admin_client + instance_stub = instance_admin_client.bigtable_instance_admin_stub + instance_stub.CreateCluster.side_effect = [response_pb] # Perform the method and check the result. result = cluster.create() @@ -262,36 +180,27 @@ def test_create(self): self.assertEqual(result.operation.name, OP_NAME) self.assertIsNone(result.metadata) - self.assertEqual(len(stub.method_calls), 1) - api_name, args, kwargs = stub.method_calls[0] - self.assertEqual(api_name, 'CreateCluster') - request_pb, = args - self.assertIsInstance(request_pb, - messages_v2_pb2.CreateClusterRequest) - self.assertEqual(request_pb.parent, instance.name) - self.assertEqual(request_pb.cluster_id, self.CLUSTER_ID) - self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) - self.assertEqual(kwargs, {}) - def test_update(self): import datetime from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from tests.unit._testing import _FakeStub NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) SERVE_NODES = 81 - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) + channel = self._make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) + instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) @@ -318,41 +227,39 @@ def test_update(self): ) # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) + instance_admin_client = client._instance_admin_client + instance_stub = instance_admin_client.bigtable_instance_admin_stub + instance_stub.UpdateCluster.side_effect = [response_pb] result = cluster.update() self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, OP_NAME) self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) - self.assertEqual(result.metadata.request_time, NOW_PB) - self.assertEqual(len(stub.method_calls), 1) - api_name, args, kwargs = stub.method_calls[0] - self.assertEqual(api_name, 'UpdateCluster') - request_pb, = args self.assertIsInstance(request_pb, data_v2_pb2.Cluster) self.assertEqual(request_pb.name, self.CLUSTER_NAME) self.assertEqual(request_pb.serve_nodes, SERVE_NODES) - self.assertEqual(kwargs, {}) def test_delete(self): from google.protobuf import empty_pb2 - from tests.unit._testing import _FakeStub - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance) + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + from google.cloud.bigtable.instance import Instance - # Create request_pb - request_pb = _DeleteClusterRequestPB(name=self.CLUSTER_NAME) + channel = self._make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) + instance = Instance(self.INSTANCE_ID, client) + cluster = self._make_one(self.CLUSTER_ID, instance, + serve_nodes=DEFAULT_SERVE_NODES) # Create response_pb response_pb = empty_pb2.Empty() # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) + instance_admin_client = client._instance_admin_client + instance_stub = instance_admin_client.bigtable_instance_admin_stub + instance_stub.DeleteCluster.side_effect = [response_pb] # Create expected_result. expected_result = None # delete() has no return value. @@ -361,77 +268,21 @@ def test_delete(self): result = cluster.delete() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'DeleteCluster', - (request_pb,), - {}, - )]) - - -class Test__prepare_create_request(unittest.TestCase): - - def _call_fut(self, cluster): - from google.cloud.bigtable.cluster import _prepare_create_request - - return _prepare_create_request(cluster) - - def test_it(self): - from google.cloud.bigtable.cluster import Cluster - - PROJECT = 'PROJECT' - INSTANCE_ID = 'instance-id' - CLUSTER_ID = 'cluster-id' - SERVE_NODES = 8 - - client = _Client(PROJECT) - instance = _Instance(INSTANCE_ID, client) - cluster = Cluster(CLUSTER_ID, instance, - serve_nodes=SERVE_NODES) - cluster.location = u'projects/prahj-ekt/locations/zona-tres' - - request_pb = self._call_fut(cluster) - - self.assertEqual(request_pb.cluster_id, CLUSTER_ID) - self.assertEqual(request_pb.parent, instance.name) - self.assertEqual(request_pb.cluster.serve_nodes, SERVE_NODES) - self.assertEqual(request_pb.cluster.location, cluster.location) def _ClusterPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as instance_v2_pb2) return instance_v2_pb2.Cluster(*args, **kw) -def _DeleteClusterRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - - return messages_v2_pb2.DeleteClusterRequest(*args, **kw) - - -def _GetClusterRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - - return messages_v2_pb2.GetClusterRequest(*args, **kw) - - class _Instance(object): def __init__(self, instance_id, client): self.instance_id = instance_id self._client = client - @property - def name(self): - return 'projects/%s/instances/%s' % ( - self._client.project, self.instance_id) - - def copy(self): - return self.__class__(self.instance_id, self._client) - def __eq__(self, other): return (other.instance_id == self.instance_id and other._client == self._client) diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index 246a086966a9..a6178ffd6ba3 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -17,6 +17,17 @@ import mock +@mock.patch('google.auth.transport.grpc.secure_authorized_channel') +def _make_channel(secure_authorized_channel): + from google.api_core import grpc_helpers + target = 'example.com:443' + + channel = grpc_helpers.create_channel( + target, credentials=mock.sentinel.credentials) + + return channel + + class TestMaxVersionsGCRule(unittest.TestCase): @staticmethod @@ -283,6 +294,15 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_constructor(self): column_family_id = u'column-family-id' table = object() @@ -351,7 +371,7 @@ def test_to_pb_with_rule(self): self.assertEqual(pb_val, expected) def _create_test_helper(self, gc_rule=None): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) from tests.unit._testing import _FakeStub @@ -363,7 +383,9 @@ def _create_test_helper(self, gc_rule=None): table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - client = _Client() + channel = _make_channel() + client = self._make_client(project=project_id, channel=channel, + admin=True) table = _Table(table_name, client=client) column_family = self._make_one( column_family_id, table, gc_rule=gc_rule) @@ -384,7 +406,8 @@ def _create_test_helper(self, gc_rule=None): response_pb = _ColumnFamilyPB() # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) + stub = _FakeStub(response_pb) + client._table_admin_client.bigtable_table_admin_stub = stub # Create expected_result. expected_result = None # create() has no return value. @@ -392,13 +415,7 @@ def _create_test_helper(self, gc_rule=None): # Perform the method and check the result. self.assertEqual(stub.results, (response_pb,)) result = column_family.create() - self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ModifyColumnFamilies', - (request_pb,), - {}, - )]) def test_create(self): self._create_test_helper(gc_rule=None) @@ -411,7 +428,7 @@ def test_create_with_gc_rule(self): def _update_test_helper(self, gc_rule=None): from tests.unit._testing import _FakeStub - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) project_id = 'project-id' @@ -422,7 +439,9 @@ def _update_test_helper(self, gc_rule=None): table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - client = _Client() + channel = _make_channel() + client = self._make_client(project=project_id, channel=channel, + admin=True) table = _Table(table_name, client=client) column_family = self._make_one( column_family_id, table, gc_rule=gc_rule) @@ -443,7 +462,8 @@ def _update_test_helper(self, gc_rule=None): response_pb = _ColumnFamilyPB() # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) + stub = _FakeStub(response_pb) + client._table_admin_client.bigtable_table_admin_stub = stub # Create expected_result. expected_result = None # update() has no return value. @@ -451,13 +471,7 @@ def _update_test_helper(self, gc_rule=None): # Perform the method and check the result. self.assertEqual(stub.results, (response_pb,)) result = column_family.update() - self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ModifyColumnFamilies', - (request_pb,), - {}, - )]) def test_update(self): self._update_test_helper(gc_rule=None) @@ -470,7 +484,7 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) from tests.unit._testing import _FakeStub @@ -482,7 +496,9 @@ def test_delete(self): table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - client = _Client() + channel = _make_channel() + client = self._make_client(project=project_id, channel=channel, + admin=True) table = _Table(table_name, client=client) column_family = self._make_one(column_family_id, table) @@ -497,7 +513,8 @@ def test_delete(self): response_pb = empty_pb2.Empty() # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) + stub = _FakeStub(response_pb) + client._table_admin_client.bigtable_table_admin_stub = stub # Create expected_result. expected_result = None # delete() has no return value. @@ -505,13 +522,7 @@ def test_delete(self): # Perform the method and check the result. self.assertEqual(stub.results, (response_pb,)) result = column_family.delete() - self.assertEqual(stub.results, ()) self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ModifyColumnFamilies', - (request_pb,), - {}, - )]) class Test__gc_rule_from_pb(unittest.TestCase): @@ -589,28 +600,28 @@ def WhichOneof(cls, name): def _GcRulePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_v2_pb2) return table_v2_pb2.GcRule(*args, **kw) def _GcRuleIntersectionPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_v2_pb2) return table_v2_pb2.GcRule.Intersection(*args, **kw) def _GcRuleUnionPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_v2_pb2) return table_v2_pb2.GcRule.Union(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_v2_pb2) return table_v2_pb2.ColumnFamily(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 922913c11e24..f13b38d824a5 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -18,6 +18,17 @@ import mock +@mock.patch('google.auth.transport.grpc.secure_authorized_channel') +def _make_channel(secure_authorized_channel): + from google.api_core import grpc_helpers + target = 'example.com:443' + + channel = grpc_helpers.create_channel( + target, credentials=mock.sentinel.credentials) + + return channel + + class TestInstance(unittest.TestCase): PROJECT = 'project' @@ -41,8 +52,16 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_constructor_defaults(self): - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES client = object() instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) @@ -50,7 +69,6 @@ def test_constructor_defaults(self): self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertIs(instance._client, client) self.assertEqual(instance._cluster_location_id, self.LOCATION_ID) - self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES) def test_constructor_non_default(self): display_name = 'display_name' @@ -62,21 +80,6 @@ def test_constructor_non_default(self): self.assertEqual(instance.display_name, display_name) self.assertIs(instance._client, client) - def test_copy(self): - display_name = 'display_name' - - client = _Client(self.PROJECT) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, - display_name=display_name) - new_instance = instance.copy() - - # Make sure the client copy succeeded. - self.assertIsNot(new_instance._client, client) - self.assertEqual(new_instance._client, client) - # Make sure the client got copied to a new instance. - self.assertIsNot(instance, new_instance) - self.assertEqual(instance, new_instance) - def test_table_factory(self): from google.cloud.bigtable.table import Table @@ -88,7 +91,7 @@ def test_table_factory(self): self.assertEqual(table._instance, instance) def test__update_from_pb_success(self): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) display_name = 'display_name' @@ -102,7 +105,7 @@ def test__update_from_pb_success(self): self.assertEqual(instance.display_name, display_name) def test__update_from_pb_no_display_name(self): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) instance_pb = data_v2_pb2.Instance() @@ -110,12 +113,11 @@ def test__update_from_pb_no_display_name(self): self.assertIsNone(instance.display_name) with self.assertRaises(ValueError): instance._update_from_pb(instance_pb) - self.assertIsNone(instance.display_name) def test_from_pb_success(self): from google.cloud.bigtable.instance import ( _EXISTING_INSTANCE_LOCATION_ID) - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) client = _Client(project=self.PROJECT) @@ -134,7 +136,7 @@ def test_from_pb_success(self): _EXISTING_INSTANCE_LOCATION_ID) def test_from_pb_bad_instance_name(self): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) instance_name = 'INCORRECT_FORMAT' @@ -145,7 +147,7 @@ def test_from_pb_bad_instance_name(self): klass.from_pb(instance_pb, None) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) ALT_PROJECT = 'ALT_PROJECT' @@ -160,7 +162,9 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(instance_pb, client) def test_name_property(self): - client = _Client(project=self.PROJECT) + channel = _make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance.name, self.INSTANCE_NAME) @@ -189,61 +193,21 @@ def test___ne__(self): instance2 = self._make_one('instance_id2', 'client2', self.LOCATION_ID) self.assertNotEqual(instance1, instance2) - def test_reload(self): - from google.cloud.bigtable._generated import ( - instance_pb2 as data_v2_pb2) - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb) - from tests.unit._testing import _FakeStub - - client = _Client(self.PROJECT) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) - - # Create request_pb - request_pb = messages_v2_pb.GetInstanceRequest( - name=self.INSTANCE_NAME) - - # Create response_pb - DISPLAY_NAME = u'hey-hi-hello' - response_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, - ) - - # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check Instance optional config values before. - self.assertEqual(instance.display_name, self.INSTANCE_ID) - - # Perform the method and check the result. - result = instance.reload() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'GetInstance', - (request_pb,), - {}, - )]) - - # Check Instance optional config values before. - self.assertEqual(instance.display_name, DISPLAY_NAME) - def test_create(self): import datetime from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp from tests.unit._testing import _FakeStub - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) - client = _Client(self.PROJECT) + channel = _make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=self.DISPLAY_NAME) @@ -260,89 +224,46 @@ def test_create(self): ) # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) + stub = _FakeStub(response_pb) + client._instance_admin_client.bigtable_instance_admin_stub = stub # Perform the method and check the result. result = instance.create() self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) + # self.assertEqual(result.operation.name, self.OP_NAME) self.assertIsInstance(result.metadata, messages_v2_pb2.CreateInstanceMetadata) - self.assertEqual(result.metadata.request_time, NOW_PB) - - self.assertEqual(len(stub.method_calls), 1) - api_name, args, kwargs = stub.method_calls[0] - self.assertEqual(api_name, 'CreateInstance') - request_pb, = args - self.assertIsInstance(request_pb, - messages_v2_pb2.CreateInstanceRequest) - self.assertEqual(request_pb.parent, 'projects/%s' % (self.PROJECT,)) - self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) - self.assertEqual(request_pb.instance.display_name, self.DISPLAY_NAME) - cluster = request_pb.clusters[self.INSTANCE_ID] - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - self.assertEqual(kwargs, {}) def test_create_w_explicit_serve_nodes(self): from google.api_core import operation from google.longrunning import operations_pb2 - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) from tests.unit._testing import _FakeStub - SERVE_NODES = 5 - - client = _Client(self.PROJECT) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, - serve_nodes=SERVE_NODES) + channel = _make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) # Create response_pb response_pb = operations_pb2.Operation(name=self.OP_NAME) # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) + stub = _FakeStub(response_pb) + client._instance_admin_client.bigtable_instance_admin_stub = stub # Perform the method and check the result. result = instance.create() self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - - self.assertEqual(len(stub.method_calls), 1) - api_name, args, kwargs = stub.method_calls[0] - self.assertEqual(api_name, 'CreateInstance') - request_pb, = args - self.assertIsInstance(request_pb, - messages_v2_pb2.CreateInstanceRequest) - self.assertEqual(request_pb.parent, 'projects/%s' % (self.PROJECT,)) - self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) - self.assertEqual(request_pb.instance.display_name, self.INSTANCE_ID) - cluster = request_pb.clusters[self.INSTANCE_ID] - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - self.assertEqual(kwargs, {}) def test_update(self): - from google.cloud.bigtable._generated import ( - instance_pb2 as data_v2_pb2) - from tests.unit._testing import _FakeStub - - client = _Client(self.PROJECT) + channel = _make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=self.DISPLAY_NAME) - # Create request_pb - request_pb = data_v2_pb2.Instance( - name=self.INSTANCE_NAME, - display_name=self.DISPLAY_NAME, - ) - - # Create response_pb - response_pb = data_v2_pb2.Instance() - - # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) - # Create expected_result. expected_result = None @@ -350,31 +271,13 @@ def test_update(self): result = instance.update() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'UpdateInstance', - (request_pb,), - {}, - )]) def test_delete(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb) - from tests.unit._testing import _FakeStub - - client = _Client(self.PROJECT) + channel = _make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) - # Create request_pb - request_pb = messages_v2_pb.DeleteInstanceRequest( - name=self.INSTANCE_NAME) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) - # Create expected_result. expected_result = None # delete() has no return value. @@ -382,83 +285,18 @@ def test_delete(self): result = instance.delete() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'DeleteInstance', - (request_pb,), - {}, - )]) - - def test_list_clusters(self): - from google.cloud.bigtable._generated import ( - instance_pb2 as instance_v2_pb2) - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from tests.unit._testing import _FakeStub - - FAILED_LOCATION = 'FAILED' - FAILED_LOCATIONS = [FAILED_LOCATION] - CLUSTER_ID1 = 'cluster-id1' - CLUSTER_ID2 = 'cluster-id2' - SERVE_NODES = 4 - - client = _Client(self.PROJECT) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) - - CLUSTER_NAME1 = (instance.name + '/clusters/' + CLUSTER_ID1) - CLUSTER_NAME2 = (instance.name + '/clusters/' + CLUSTER_ID2) - # Create request_pb - request_pb = messages_v2_pb2.ListClustersRequest( - parent=instance.name, - ) - - # Create response_pb - response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[FAILED_LOCATION], - clusters=[ - instance_v2_pb2.Cluster( - name=CLUSTER_NAME1, - serve_nodes=SERVE_NODES, - ), - instance_v2_pb2.Cluster( - name=CLUSTER_NAME2, - serve_nodes=SERVE_NODES, - ), - ], - ) - - # Patch the stub used by the API method. - client._instance_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - clusters = [ - instance.cluster(CLUSTER_ID1), - instance.cluster(CLUSTER_ID2), - ] - expected_result = (clusters, FAILED_LOCATIONS) - - # Perform the method and check the result. - result = instance.list_clusters() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListClusters', - (request_pb,), - {}, - )]) def _list_tables_helper(self, table_name=None): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_data_v2_pb2) - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_messages_v1_pb2) - from tests.unit._testing import _FakeStub - client = _Client(self.PROJECT) + channel = _make_channel() + client = self._make_client(project=self.PROJECT, channel=channel, + admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) - # Create request_ - request_pb = table_messages_v1_pb2.ListTablesRequest( - parent=self.INSTANCE_NAME) - # Create response_pb if table_name is None: table_name = self.TABLE_NAME @@ -470,7 +308,9 @@ def _list_tables_helper(self, table_name=None): ) # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) + bigtable_table_stub = ( + client._table_admin_client.bigtable_table_admin_stub) + bigtable_table_stub.ListTables.side_effect = [response_pb] # Create expected_result. expected_table = instance.table(self.TABLE_ID) @@ -480,11 +320,6 @@ def _list_tables_helper(self, table_name=None): result = instance.list_tables() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ListTables', - (request_pb,), - {}, - )]) def test_list_tables(self): self._list_tables_helper() @@ -502,77 +337,6 @@ def test_list_tables_failure_name_bad_before(self): self._list_tables_helper(table_name=BAD_TABLE_NAME) -class Test__prepare_create_request(unittest.TestCase): - PROJECT = 'PROJECT' - PARENT = 'projects/' + PROJECT - LOCATION_ID = 'locname' - LOCATION_NAME = 'projects/' + PROJECT + '/locations/' + LOCATION_ID - INSTANCE_ID = 'instance-id' - INSTANCE_NAME = PARENT + '/instances/' + INSTANCE_ID - CLUSTER_NAME = INSTANCE_NAME + '/clusters/' + INSTANCE_ID - - def _call_fut(self, instance, **kw): - from google.cloud.bigtable.instance import _prepare_create_request - - return _prepare_create_request(instance, **kw) - - def test_w_defaults(self): - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES - from google.cloud.bigtable._generated import ( - instance_pb2 as data_v2_pb2) - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb) - from google.cloud.bigtable.instance import Instance - - client = _Client(self.PROJECT) - - instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID) - request_pb = self._call_fut(instance) - self.assertIsInstance(request_pb, - messages_v2_pb.CreateInstanceRequest) - self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) - self.assertEqual(request_pb.parent, self.PARENT) - self.assertIsInstance(request_pb.instance, data_v2_pb2.Instance) - self.assertEqual(request_pb.instance.name, u'') - self.assertEqual(request_pb.instance.display_name, self.INSTANCE_ID) - - # An instance must also define a same-named cluster - cluster = request_pb.clusters[self.INSTANCE_ID] - self.assertIsInstance(cluster, data_v2_pb2.Cluster) - self.assertEqual(cluster.name, self.CLUSTER_NAME) - self.assertEqual(cluster.location, self.LOCATION_NAME) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) - - def test_w_explicit_serve_nodes(self): - from google.cloud.bigtable._generated import ( - instance_pb2 as data_v2_pb2) - from google.cloud.bigtable._generated import ( - bigtable_instance_admin_pb2 as messages_v2_pb) - from google.cloud.bigtable.instance import Instance - - DISPLAY_NAME = u'DISPLAY_NAME' - SERVE_NODES = 5 - client = _Client(self.PROJECT) - instance = Instance(self.INSTANCE_ID, client, self.LOCATION_ID, - display_name=DISPLAY_NAME, - serve_nodes=SERVE_NODES) - - request_pb = self._call_fut(instance) - - self.assertIsInstance(request_pb, - messages_v2_pb.CreateInstanceRequest) - self.assertEqual(request_pb.instance_id, self.INSTANCE_ID) - self.assertEqual(request_pb.parent, - 'projects/' + self.PROJECT) - self.assertIsInstance(request_pb.instance, data_v2_pb2.Instance) - self.assertEqual(request_pb.instance.display_name, DISPLAY_NAME) - # An instance must also define a same-named cluster - cluster = request_pb.clusters[self.INSTANCE_ID] - self.assertIsInstance(cluster, data_v2_pb2.Cluster) - self.assertEqual(cluster.location, self.LOCATION_NAME) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - - class _Client(object): def __init__(self, project): @@ -580,11 +344,6 @@ def __init__(self, project): self.project_name = 'projects/' + self.project self._operations_stub = mock.sentinel.operations_stub - def copy(self): - from copy import deepcopy - - return deepcopy(self) - def __eq__(self, other): return (other.project == self.project and other.project_name == self.project_name) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index d9682c29c5ca..c17ed03cb3d6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -15,6 +15,19 @@ import unittest +import mock + + +@mock.patch('google.auth.transport.grpc.secure_authorized_channel') +def _make_channel(secure_authorized_channel): + from google.api_core import grpc_helpers + target = 'example.com:443' + + channel = grpc_helpers.create_channel( + target, credentials=mock.sentinel.credentials) + + return channel + class TestRow(unittest.TestCase): @@ -64,6 +77,15 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_constructor(self): row_key = b'row_key' table = object() @@ -321,37 +343,28 @@ def test_delete_cells_with_string_columns(self): def test_commit(self): from google.protobuf import empty_pb2 - from tests.unit._testing import _FakeStub + project_id = 'project-id' row_key = b'row_key' table_name = 'projects/more-stuff' column_family_id = u'column_family_id' column = b'column' - client = _Client() + + channel = _make_channel() + client = self._make_client(project=project_id, channel=channel, + admin=True) table = _Table(table_name, client=client) row = self._make_one(row_key, table) # Create request_pb value = b'bytes-value' - mutation = _MutationPB( - set_cell=_MutationSetCellPB( - family_name=column_family_id, - column_qualifier=column, - timestamp_micros=-1, # Default value. - value=value, - ), - ) - request_pb = _MutateRowRequestPB( - table_name=table_name, - row_key=row_key, - mutations=[mutation], - ) # Create response_pb response_pb = empty_pb2.Empty() # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_pb) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRow.side_effect = [response_pb] # Create expected_result. expected_result = None # commit() has no return value when no filter. @@ -360,11 +373,6 @@ def test_commit(self): row.set_cell(column_family_id, column, value) result = row.commit() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'MutateRow', - (request_pb,), - {}, - )]) self.assertEqual(row._pb_mutations, []) def test_retry_commit_exception(self): @@ -404,13 +412,16 @@ def test_commit_no_mutations(self): from tests.unit._testing import _FakeStub row_key = b'row_key' - client = _Client() + from google.cloud.bigtable_v2 import BigtableClient + + channel = _make_channel() + client = BigtableClient(channel=channel) table = _Table(None, client=client) row = self._make_one(row_key, table) self.assertEqual(row._pb_mutations, []) # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub() + stub = _FakeStub() # Perform the method and check the result. result = row.commit() @@ -430,6 +441,15 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_constructor(self): row_key = b'row_key' table = object() @@ -454,9 +474,9 @@ def test__get_mutations(self): self.assertIs(false_mutations, row._get_mutations(None)) def test_commit(self): - from tests.unit._testing import _FakeStub from google.cloud.bigtable.row_filters import RowSampleFilter + project_id = 'project-id' row_key = b'row_key' table_name = 'projects/more-stuff' column_family_id1 = u'column_family_id1' @@ -464,42 +484,16 @@ def test_commit(self): column_family_id3 = u'column_family_id3' column1 = b'column1' column2 = b'column2' - client = _Client() + + channel = _make_channel() + client = self._make_client(project=project_id, channel=channel, + admin=True) table = _Table(table_name, client=client) row_filter = RowSampleFilter(0.33) row = self._make_one(row_key, table, filter_=row_filter) # Create request_pb value1 = b'bytes-value' - mutation1 = _MutationPB( - set_cell=_MutationSetCellPB( - family_name=column_family_id1, - column_qualifier=column1, - timestamp_micros=-1, # Default value. - value=value1, - ), - ) - mutation2 = _MutationPB( - delete_from_row=_MutationDeleteFromRowPB(), - ) - mutation3 = _MutationPB( - delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id2, - column_qualifier=column2, - ), - ) - mutation4 = _MutationPB( - delete_from_family=_MutationDeleteFromFamilyPB( - family_name=column_family_id3, - ), - ) - request_pb = _CheckAndMutateRowRequestPB( - table_name=table_name, - row_key=row_key, - predicate_filter=row_filter.to_pb(), - true_mutations=[mutation1, mutation3, mutation4], - false_mutations=[mutation2], - ) # Create response_pb predicate_matched = True @@ -507,7 +501,8 @@ def test_commit(self): predicate_matched=predicate_matched) # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_pb) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.CheckAndMutateRow.side_effect = [[response_pb]] # Create expected_result. expected_result = predicate_matched @@ -519,11 +514,6 @@ def test_commit(self): row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) result = row.commit() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'CheckAndMutateRow', - (request_pb,), - {}, - )]) self.assertEqual(row._true_pb_mutations, []) self.assertEqual(row._false_pb_mutations, []) @@ -545,7 +535,10 @@ def test_commit_no_mutations(self): from tests.unit._testing import _FakeStub row_key = b'row_key' - client = _Client() + from google.cloud.bigtable_v2 import BigtableClient + + channel = _make_channel() + client = BigtableClient(channel=channel) table = _Table(None, client=client) filter_ = object() row = self._make_one(row_key, table, filter_=filter_) @@ -553,7 +546,7 @@ def test_commit_no_mutations(self): self.assertEqual(row._false_pb_mutations, []) # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub() + stub = _FakeStub() # Perform the method and check the result. result = row.commit() @@ -573,6 +566,15 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_constructor(self): row_key = b'row_key' table = object() @@ -622,37 +624,21 @@ def test_increment_cell_value(self): def test_commit(self): from google.cloud._testing import _Monkey - from tests.unit._testing import _FakeStub from google.cloud.bigtable import row as MUT + project_id = 'project-id' row_key = b'row_key' table_name = 'projects/more-stuff' column_family_id = u'column_family_id' column = b'column' - client = _Client() + channel = _make_channel() + client = self._make_client(project=project_id, channel=channel, + admin=True) table = _Table(table_name, client=client) row = self._make_one(row_key, table) # Create request_pb value = b'bytes-value' - # We will call row.append_cell_value(COLUMN_FAMILY_ID, COLUMN, value). - request_pb = _ReadModifyWriteRowRequestPB( - table_name=table_name, - row_key=row_key, - rules=[ - _ReadModifyWriteRulePB( - family_name=column_family_id, - column_qualifier=column, - append_value=value, - ), - ], - ) - - # Create response_pb - response_pb = object() - - # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_pb) # Create expected_result. row_responses = [] @@ -668,25 +654,22 @@ def mock_parse_rmw_row_response(row_response): result = row.commit() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ReadModifyWriteRow', - (request_pb,), - {}, - )]) - self.assertEqual(row_responses, [response_pb]) self.assertEqual(row._rule_pb_list, []) def test_commit_no_rules(self): from tests.unit._testing import _FakeStub + project_id = 'project-id' row_key = b'row_key' - client = _Client() + channel = _make_channel() + client = self._make_client(project=project_id, channel=channel, + admin=True) table = _Table(None, client=client) row = self._make_one(row_key, table) self.assertEqual(row._rule_pb_list, []) # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub() + stub = _FakeStub() # Perform the method and check the result. result = row.commit() @@ -854,106 +837,85 @@ def test_it(self): self.assertEqual(expected_output, self._call_fut(sample_input)) -def _CheckAndMutateRowRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_pb2 as messages_v2_pb2) - - return messages_v2_pb2.CheckAndMutateRowRequest(*args, **kw) - - def _CheckAndMutateRowResponsePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) -def _MutateRowRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_pb2 as messages_v2_pb2) - - return messages_v2_pb2.MutateRowRequest(*args, **kw) - - -def _ReadModifyWriteRowRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_pb2 as messages_v2_pb2) - - return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw) - - def _ReadModifyWriteRowResponsePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) def _CellPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Cell(*args, **kw) def _ColumnPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Column(*args, **kw) def _FamilyPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Family(*args, **kw) def _MutationPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation(*args, **kw) def _MutationSetCellPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.SetCell(*args, **kw) def _MutationDeleteFromColumnPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) def _MutationDeleteFromFamilyPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) def _MutationDeleteFromRowPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) def _RowPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Row(*args, **kw) def _ReadModifyWriteRulePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.ReadModifyWriteRule(*args, **kw) @@ -962,6 +924,8 @@ def _ReadModifyWriteRulePB(*args, **kw): class _Client(object): data_stub = None + _table_data_client = None + _table_admin_client = None class _Instance(object): @@ -975,3 +939,4 @@ class _Table(object): def __init__(self, name, client=None): self.name = name self._instance = _Instance(client) + self.client = client diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 942b4e735d2c..a2293aae3943 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -33,7 +33,7 @@ def _make_one(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from google.cloud._helpers import _EPOCH - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) timestamp_micros = TestCell.timestamp_micros @@ -867,7 +867,7 @@ def __init__(self, chunks, last_scanned_row_key=''): def _generate_cell_chunks(chunk_text_pbs): from google.protobuf.text_format import Merge - from google.cloud.bigtable._generated.bigtable_pb2 import ReadRowsResponse + from google.cloud.bigtable_v2.proto.bigtable_pb2 import ReadRowsResponse chunks = [] @@ -900,7 +900,7 @@ def _parse_readrows_acceptance_tests(filename): def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) family_name = kw.pop('family_name') diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index 9f485c2a5fb8..2e781be7bf15 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -1030,49 +1030,49 @@ def test_to_pb_false_only(self): def _ColumnRangePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.ColumnRange(*args, **kw) def _RowFilterPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter(*args, **kw) def _RowFilterChainPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter.Chain(*args, **kw) def _RowFilterConditionPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter.Condition(*args, **kw) def _RowFilterInterleavePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter.Interleave(*args, **kw) def _TimestampRangePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.TimestampRange(*args, **kw) def _ValueRangePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( data_pb2 as data_v2_pb2) return data_v2_pb2.ValueRange(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 4321cc8e0bc9..9b8de28935ee 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -136,6 +136,16 @@ class TestTable(unittest.TestCase): VALUE = b'value' _json_tests = None + @mock.patch('google.auth.transport.grpc.secure_authorized_channel') + def _make_channel(self, secure_authorized_channel): + from google.api_core import grpc_helpers + target = 'example.com:443' + + channel = grpc_helpers.create_channel( + target, credentials=mock.sentinel.credentials) + + return channel + @staticmethod def _get_target_class(): from google.cloud.bigtable.table import Table @@ -145,42 +155,33 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) - def test_constructor(self): - table_id = 'table-id' - instance = object() - - table = self._make_one(table_id, instance) - self.assertEqual(table.table_id, table_id) - self.assertIs(table._instance, instance) - - def test_name_property(self): - table_id = 'table-id' - instance_name = 'instance_name' + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client - instance = _Instance(instance_name) - table = self._make_one(table_id, instance) - expected_name = instance_name + '/tables/' + table_id - self.assertEqual(table.name, expected_name) + return Client - def test_column_family_factory(self): - from google.cloud.bigtable.column_family import ColumnFamily + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_constructor(self): + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) table_id = 'table-id' - gc_rule = object() - table = self._make_one(table_id, None) - column_family_id = 'column_family_id' - column_family = table.column_family(column_family_id, gc_rule=gc_rule) - - self.assertIsInstance(column_family, ColumnFamily) - self.assertEqual(column_family.column_family_id, column_family_id) - self.assertIs(column_family.gc_rule, gc_rule) - self.assertEqual(column_family._table, table) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + self.assertEqual(table.table_id, table_id) + self.assertIs(table._instance._client, client) def test_row_factory_direct(self): from google.cloud.bigtable.row import DirectRow - table_id = 'table-id' - table = self._make_one(table_id, None) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) row_key = b'row_key' row = table.row(row_key) @@ -191,8 +192,11 @@ def test_row_factory_direct(self): def test_row_factory_conditional(self): from google.cloud.bigtable.row import ConditionalRow - table_id = 'table-id' - table = self._make_one(table_id, None) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) row_key = b'row_key' filter_ = object() row = table.row(row_key, filter_=filter_) @@ -204,8 +208,11 @@ def test_row_factory_conditional(self): def test_row_factory_append(self): from google.cloud.bigtable.row import AppendRow - table_id = 'table-id' - table = self._make_one(table_id, None) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) row_key = b'row_key' row = table.row(row_key, append=True) @@ -214,111 +221,85 @@ def test_row_factory_append(self): self.assertEqual(row._table, table) def test_row_factory_failure(self): - table = self._make_one(self.TABLE_ID, None) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) with self.assertRaises(ValueError): table.row(b'row_key', filter_=object(), append=True) def test___eq__(self): - instance = object() + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) self.assertEqual(table1, table2) def test___eq__type_differ(self): - table1 = self._make_one(self.TABLE_ID, None) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table1 = self._make_one(self.TABLE_ID, instance) table2 = object() self.assertNotEqual(table1, table2) def test___ne__same_value(self): - instance = object() + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) comparison_val = (table1 != table2) self.assertFalse(comparison_val) def test___ne__(self): - table1 = self._make_one('table_id1', 'instance1') - table2 = self._make_one('table_id2', 'instance2') + table1 = self._make_one('table_id1', None) + table2 = self._make_one('table_id2', None) self.assertNotEqual(table1, table2) - def _create_test_helper(self, initial_split_keys, column_families=()): - from google.cloud._helpers import _to_bytes - from tests.unit._testing import _FakeStub - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + def _create_test_helper(self): + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - # Create request_pb - splits_pb = [ - _CreateTableRequestSplitPB(key=_to_bytes(key)) - for key in initial_split_keys or ()] - table_pb = None - if column_families: - table_pb = _TablePB() - for cf in column_families: - cf_pb = table_pb.column_families[cf.column_family_id] - if cf.gc_rule is not None: - cf_pb.gc_rule.CopyFrom(cf.gc_rule.to_pb()) - request_pb = _CreateTableRequestPB( - initial_splits=splits_pb, - parent=self.INSTANCE_NAME, - table_id=self.TABLE_ID, - table=table_pb, - ) - - # Create response_pb - response_pb = _TablePB() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - # Create expected_result. expected_result = None # create() has no return value. # Perform the method and check the result. - result = table.create(initial_split_keys=initial_split_keys, - column_families=column_families) + result = table.create() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'CreateTable', - (request_pb,), - {}, - )]) def test_create(self): - initial_split_keys = None - self._create_test_helper(initial_split_keys) - - def test_create_with_split_keys(self): - initial_split_keys = [b's1', b's2'] - self._create_test_helper(initial_split_keys) + self._create_test_helper() - def test_create_with_column_families(self): - from google.cloud.bigtable.column_family import ColumnFamily - from google.cloud.bigtable.column_family import MaxVersionsGCRule + def test_delete(self): + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) - cf_id1 = 'col-fam-id1' - cf1 = ColumnFamily(cf_id1, None) - cf_id2 = 'col-fam-id2' - gc_rule = MaxVersionsGCRule(42) - cf2 = ColumnFamily(cf_id2, None, gc_rule=gc_rule) + # Create expected_result. + expected_result = None # delete() has no return value. - initial_split_keys = None - column_families = [cf1, cf2] - self._create_test_helper(initial_split_keys, - column_families=column_families) + # Perform the method and check the result. + result = table.delete() + self.assertEqual(result, expected_result) def _list_column_families_helper(self): - from tests.unit._testing import _FakeStub - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - # Create request_pb - request_pb = _GetTableRequestPB(name=self.TABLE_NAME) - # Create response_pb COLUMN_FAMILY_ID = 'foo' column_family = _ColumnFamilyPB() @@ -326,8 +307,9 @@ def _list_column_families_helper(self): column_families={COLUMN_FAMILY_ID: column_family}, ) - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) + bigtable_table_stub = ( + client._table_admin_client.bigtable_table_admin_stub) + bigtable_table_stub.GetTable.side_effect = [response_pb] # Create expected_result. expected_result = { @@ -337,51 +319,18 @@ def _list_column_families_helper(self): # Perform the method and check the result. result = table.list_column_families() self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'GetTable', - (request_pb,), - {}, - )]) def test_list_column_families(self): self._list_column_families_helper() - def test_delete(self): - from google.protobuf import empty_pb2 - from tests.unit._testing import _FakeStub - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) - table = self._make_one(self.TABLE_ID, instance) - - # Create request_pb - request_pb = _DeleteTableRequestPB(name=self.TABLE_NAME) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._table_stub = stub = _FakeStub(response_pb) - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = table.delete() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'DeleteTable', - (request_pb,), - {}, - )]) - def _read_row_helper(self, chunks, expected_result): from google.cloud._testing import _Monkey - from tests.unit._testing import _FakeStub from google.cloud.bigtable import table as MUT - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create request_pb @@ -400,7 +349,8 @@ def mock_create_row_request(table_name, row_key, filter_): response_iterator = iter([response_pb]) # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.ReadRows.side_effect = [response_iterator] # Perform the method and check the result. filter_obj = object() @@ -408,11 +358,6 @@ def mock_create_row_request(table_name, row_key, filter_): result = table.read_row(self.ROW_KEY, filter_=filter_obj) self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'ReadRows', - (request_pb,), - {}, - )]) self.assertEqual(mock_created, [(table.name, self.ROW_KEY, filter_obj)]) @@ -458,7 +403,10 @@ def test_read_row_still_partial(self): def test_mutate_rows(self): from google.rpc.status_pb2 import Status - instance = mock.MagicMock() + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) response = [Status(code=0), Status(code=1)] @@ -478,8 +426,10 @@ def test_read_rows(self): from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create request_pb @@ -490,12 +440,10 @@ def mock_create_row_request(table_name, **kwargs): mock_created.append((table_name, kwargs)) return request - # Patch the stub used by the API method. - client._data_stub = mock.MagicMock() - # Create expected_result. - expected_result = PartialRowsData(client._data_stub.ReadRows, - request) + expected_result = PartialRowsData( + client._table_data_client.bigtable_stub.ReadRows, + request) # Perform the method and check the result. start_key = b'start-key' @@ -518,8 +466,10 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_yield_retry_rows(self): - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create response_iterator @@ -548,10 +498,9 @@ def test_yield_retry_rows(self): response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [response_failure_iterator_1, - response_failure_iterator_2, - response_iterator] + client._table_data_client.bigtable_stub.ReadRows.side_effect = [ + response_failure_iterator_1, response_failure_iterator_2, + response_iterator] rows = [] for row in table.yield_rows(start_key=self.ROW_KEY_1, @@ -562,32 +511,25 @@ def test_yield_retry_rows(self): self.assertEqual(result.row_key, self.ROW_KEY_2) def test_sample_row_keys(self): - from tests.unit._testing import _FakeStub - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - # Create request_pb - request_pb = _SampleRowKeysRequestPB(table_name=self.TABLE_NAME) - # Create response_iterator response_iterator = object() # Just passed to a mock. # Patch the stub used by the API method. - client._data_stub = stub = _FakeStub(response_iterator) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.SampleRowKeys.side_effect = [[response_iterator]] # Create expected_result. expected_result = response_iterator # Perform the method and check the result. result = table.sample_row_keys() - self.assertEqual(result, expected_result) - self.assertEqual(stub.method_calls, [( - 'SampleRowKeys', - (request_pb,), - {}, - )]) + self.assertEqual(result[0], expected_result) class Test__RetryableMutateRowsWorker(unittest.TestCase): @@ -604,6 +546,16 @@ class Test__RetryableMutateRowsWorker(unittest.TestCase): RETRYABLE_2 = StatusCode.ABORTED.value[0] NON_RETRYABLE = StatusCode.CANCELLED.value[0] + @mock.patch('google.auth.transport.grpc.secure_authorized_channel') + def _make_channel(self, secure_authorized_channel): + from google.api_core import grpc_helpers + target = 'example.com:443' + + channel = grpc_helpers.create_channel( + target, credentials=mock.sentinel.credentials) + + return channel + @staticmethod def _get_target_class_for_worker(): from google.cloud.bigtable.table import _RetryableMutateRowsWorker @@ -622,6 +574,15 @@ def _get_target_class_for_table(): def _make_table(self, *args, **kwargs): return self._get_target_class_for_table()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def _make_responses_statuses(self, codes): from google.rpc.status_pb2 import Status @@ -630,7 +591,7 @@ def _make_responses_statuses(self, codes): def _make_responses(self, codes): import six - from google.cloud.bigtable._generated.bigtable_pb2 import ( + from google.cloud.bigtable_v2.proto.bigtable_pb2 import ( MutateRowsResponse) from google.rpc.status_pb2 import Status @@ -640,11 +601,13 @@ def _make_responses(self, codes): return MutateRowsResponse(entries=entries) def test_callable_empty_rows(self): - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - worker = self._make_worker(table._instance._client, table.name, []) + worker = self._make_worker(client, table.name, []) statuses = worker() self.assertEqual(len(statuses), 0) @@ -662,8 +625,10 @@ def test_callable_no_retry_strategy(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -679,8 +644,8 @@ def test_callable_no_retry_strategy(self): self.NON_RETRYABLE]) # Patch the stub used by the API method. - client._data_stub = mock.MagicMock() - client._data_stub.MutateRows.return_value = [response] + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRows.return_value = [response] worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) statuses = worker(retry=None) @@ -688,7 +653,7 @@ def test_callable_no_retry_strategy(self): result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - client._data_stub.MutateRows.assert_called_once() + client._table_data_client.bigtable_stub.MutateRows.assert_called_once() self.assertEqual(result, expected_result) def test_callable_retry(self): @@ -706,8 +671,10 @@ def test_callable_retry(self): # - State of responses_statuses should be # [success, success, non-retryable] - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -724,8 +691,8 @@ def test_callable_retry(self): response_2 = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - client._data_stub = mock.MagicMock() - client._data_stub.MutateRows.side_effect = [[response_1], [response_2]] + client._table_data_client.bigtable_stub.MutateRows.side_effect = [ + [response_1], [response_2]] retry = DEFAULT_RETRY.with_delay(initial=0.1) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -734,10 +701,8 @@ def test_callable_retry(self): result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] - client._data_stub.MutateRows.assert_has_calls([ - mock.call(mock.ANY), - mock.call(mock.ANY)]) - self.assertEqual(client._data_stub.MutateRows.call_count, 2) + self.assertEqual( + client._table_data_client.bigtable_stub.MutateRows.call_count, 2) self.assertEqual(result, expected_result) def test_callable_retry_timeout(self): @@ -755,8 +720,10 @@ def test_callable_retry_timeout(self): # - By the time deadline is reached, statuses should be # [retryable, retryable] - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -767,8 +734,8 @@ def test_callable_retry_timeout(self): response = self._make_responses([self.RETRYABLE_1, self.RETRYABLE_1]) # Patch the stub used by the API method. - client._data_stub = mock.MagicMock() - client._data_stub.MutateRows.return_value = [response] + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRows.return_value = [response] retry = DEFAULT_RETRY.with_delay( initial=0.1, maximum=0.2, multiplier=2.0).with_deadline(0.5) @@ -778,22 +745,24 @@ def test_callable_retry_timeout(self): result = [status.code for status in statuses] expected_result = [self.RETRYABLE_1, self.RETRYABLE_1] - self.assertTrue(client._data_stub.MutateRows.call_count > 1) + self.assertTrue( + client._table_data_client.bigtable_stub.MutateRows.call_count > 1) self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_empty_rows(self): - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - worker = self._make_worker(table._instance._client, table.name, []) + worker = self._make_worker(client, table.name, []) statuses = worker._do_mutate_retryable_rows() self.assertEqual(len(statuses), 0) def test_do_mutate_retryable_rows(self): from google.cloud.bigtable.row import DirectRow - from tests.unit._testing import _FakeStub # Setup: # - Mutate 2 rows. @@ -802,8 +771,10 @@ def test_do_mutate_retryable_rows(self): # Expectation: # - Expect [success, non-retryable] - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -814,10 +785,10 @@ def test_do_mutate_retryable_rows(self): response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) # Patch the stub used by the API method. - client._data_stub = _FakeStub([response]) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRows.side_effect = [[response]] - worker = self._make_worker( - table._instance._client, table.name, [row_1, row_2]) + worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] @@ -828,7 +799,6 @@ def test_do_mutate_retryable_rows(self): def test_do_mutate_retryable_rows_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError - from tests.unit._testing import _FakeStub # Setup: # - Mutate 3 rows. @@ -839,8 +809,10 @@ def test_do_mutate_retryable_rows_retry(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -856,10 +828,10 @@ def test_do_mutate_retryable_rows_retry(self): self.NON_RETRYABLE]) # Patch the stub used by the API method. - client._data_stub = _FakeStub([response]) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRows.side_effect = [[response]] - worker = self._make_worker( - table._instance._client, table.name, [row_1, row_2, row_3]) + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) with self.assertRaises(_BigtableRetryableError): worker._do_mutate_retryable_rows() @@ -873,7 +845,6 @@ def test_do_mutate_retryable_rows_retry(self): def test_do_mutate_retryable_rows_second_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError - from tests.unit._testing import _FakeStub # Setup: # - Mutate 4 rows. @@ -889,8 +860,10 @@ def test_do_mutate_retryable_rows_second_retry(self): # - Exception contains response whose index should be '3' even though # only two rows were retried. - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -905,11 +878,11 @@ def test_do_mutate_retryable_rows_second_retry(self): response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) # Patch the stub used by the API method. - client._data_stub = _FakeStub([response]) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRows.side_effect = [[response]] - worker = self._make_worker( - table._instance._client, - table.name, [row_1, row_2, row_3, row_4]) + worker = self._make_worker(client, table.name, + [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses([ self.SUCCESS, self.RETRYABLE_1, @@ -930,7 +903,6 @@ def test_do_mutate_retryable_rows_second_retry(self): def test_do_mutate_retryable_rows_second_try(self): from google.cloud.bigtable.row import DirectRow - from tests.unit._testing import _FakeStub # Setup: # - Mutate 4 rows. @@ -942,8 +914,10 @@ def test_do_mutate_retryable_rows_second_try(self): # - After second try: # [success, non-retryable, non-retryable, success] - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -958,11 +932,11 @@ def test_do_mutate_retryable_rows_second_try(self): response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) # Patch the stub used by the API method. - client._data_stub = _FakeStub([response]) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRows.side_effect = [[response]] - worker = self._make_worker( - table._instance._client, - table.name, [row_1, row_2, row_3, row_4]) + worker = self._make_worker(client, table.name, + [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses([ self.SUCCESS, self.RETRYABLE_1, @@ -990,8 +964,10 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): # Expectation: # - After second try: [success, non-retryable] - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -999,8 +975,7 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): row_2 = DirectRow(row_key=b'row_key_2', table=table) row_2.set_cell('cf', b'col', b'value2') - worker = self._make_worker( - table._instance._client, table.name, [row_1, row_2]) + worker = self._make_worker(client, table.name, [row_1, row_2]) worker.responses_statuses = self._make_responses_statuses( [self.SUCCESS, self.NON_RETRYABLE]) @@ -1013,10 +988,11 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): def test_do_mutate_retryable_rows_mismatch_num_responses(self): from google.cloud.bigtable.row import DirectRow - from tests.unit._testing import _FakeStub - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client=client) + channel = self._make_channel() + client = self._make_client(project='project-id', channel=channel, + admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) row_1 = DirectRow(row_key=b'row_key', table=table) @@ -1027,11 +1003,10 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): response = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - client._data_stub = _FakeStub([response]) + bigtable_stub = client._table_data_client.bigtable_stub + bigtable_stub.MutateRows.side_effect = [[response]] - worker = self._make_worker( - table._instance._client, - table.name, [row_1, row_2]) + worker = self._make_worker(client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): worker._do_mutate_retryable_rows() @@ -1128,43 +1103,15 @@ def test_with_limit(self): self.assertEqual(result, expected_result) -def _CreateTableRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) - - return table_admin_v2_pb2.CreateTableRequest(*args, **kw) - - -def _CreateTableRequestSplitPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) - - return table_admin_v2_pb2.CreateTableRequest.Split(*args, **kw) - - -def _DeleteTableRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) - - return table_admin_v2_pb2.DeleteTableRequest(*args, **kw) - - -def _GetTableRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) - - return table_admin_v2_pb2.GetTableRequest(*args, **kw) - - def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadRowsRequest(*args, **kw) def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) family_name = kw.pop('family_name') @@ -1176,55 +1123,19 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): def _ReadRowsResponsePB(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadRowsResponse(*args, **kw) -def _SampleRowKeysRequestPB(*args, **kw): - from google.cloud.bigtable._generated import ( - bigtable_pb2 as messages_v2_pb2) - - return messages_v2_pb2.SampleRowKeysRequest(*args, **kw) - - def _mutate_rows_request_pb(*args, **kw): - from google.cloud.bigtable._generated import ( + from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as data_messages_v2_pb2) return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) -def _TablePB(*args, **kw): - from google.cloud.bigtable._generated import ( - table_pb2 as table_v2_pb2) - - return table_v2_pb2.Table(*args, **kw) - - -def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable._generated import ( - table_pb2 as table_v2_pb2) - - return table_v2_pb2.ColumnFamily(*args, **kw) - - -class _Client(object): - - data_stub = None - instance_stub = None - operations_stub = None - table_stub = None - - -class _Instance(object): - - def __init__(self, name, client=None): - self.name = name - self._client = client - - class _MockReadRowsIterator(object): def __init__(self, *values): self.iter_values = iter(values) @@ -1282,3 +1193,17 @@ class _ReadRowsResponseV2(object): def __init__(self, chunks, last_scanned_row_key=''): self.chunks = chunks self.last_scanned_row_key = last_scanned_row_key + + +def _TablePB(*args, **kw): + from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as table_v2_pb2) + + return table_v2_pb2.Table(*args, **kw) + + +def _ColumnFamilyPB(*args, **kw): + from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as table_v2_pb2) + + return table_v2_pb2.ColumnFamily(*args, **kw) From 1017882e712a001af4fc067fdd60b6e222b83e84 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Fri, 4 May 2018 09:01:24 -0700 Subject: [PATCH 125/892] Add Test runs for Python 3.7 and remove 3.4 (#5295) * remove 3.4 from unit test runs * add 3.7 to most packages. PubSub, Monitoring, BigQuery not enabled * Fix #5292 by draining queue in a way compatible with SimpleQueue and Queue --- packages/google-cloud-bigtable/nox.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index f8bfb4b33c8a..3b9a2aad12f4 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -53,7 +53,7 @@ def default(session): @nox.session -@nox.parametrize('py', ['2.7', '3.4', '3.5', '3.6']) +@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7']) def unit(session, py): """Run the unit test suite.""" From 9617a2600ed41d37adb18a3e801d65d1ea03197d Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Wed, 16 May 2018 10:14:30 -0700 Subject: [PATCH 126/892] Modify system tests to use prerelease versions of grpcio (#5304) --- packages/google-cloud-bigtable/nox.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 3b9a2aad12f4..fe446a4a3a81 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -81,6 +81,9 @@ def system(session, py): # Set the virtualenv dirname. session.virtualenv_dirname = 'sys-' + py + # Use pre-release gRPC for system tests. + session.install('--pre', 'grpcio') + # Install all test dependencies, then install this package into the # virtualenv's dist-packages. session.install('mock', 'pytest', *LOCAL_DEPS) From 45a81ebe26c710c3a276cecdaddb4d6fd015c041 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Thu, 17 May 2018 01:16:54 +0530 Subject: [PATCH 127/892] BigTable: Modify system test for new GAPIC code (#5302) * Provide new auto-generated layer for Bigtable. * Change bigtable_pb2 imports to use from gapic library. * Add retry for read rows * Add parameter start_inclusive to _create_row_request * Add retry for Deadline Exceeded on read rows * Refactor yield_rows retry * Add grpc google iam v1 on setup.py on bigtable * Change routing_header to use to_grpc_metadata --- .../google/cloud/bigtable/client.py | 65 ++-- .../google/cloud/bigtable/cluster.py | 10 +- .../google/cloud/bigtable/instance.py | 49 ++- .../google-cloud-bigtable/tests/system.py | 16 +- .../tests/unit/_testing.py | 14 + .../tests/unit/test_client.py | 86 +++-- .../tests/unit/test_cluster.py | 67 ++-- .../tests/unit/test_column_family.py | 41 ++- .../tests/unit/test_instance.py | 132 +++++-- .../tests/unit/test_row.py | 63 ++-- .../tests/unit/test_table.py | 342 ++++++++++++------ 11 files changed, 601 insertions(+), 284 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 0e6ed808e102..7d509170f8b9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -29,13 +29,21 @@ """ -from google.cloud.bigtable.instance import Instance -from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID +from google.api_core.gapic_v1 import client_info from google.cloud import bigtable_v2 from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable import __version__ +from google.cloud.bigtable.instance import Instance +from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID + +from google.cloud.client import ClientWithProject + +_CLIENT_INFO = client_info.ClientInfo( + client_library_version=__version__) +SPANNER_ADMIN_SCOPE = 'https://www.googleapis.com/auth/spanner.admin' ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' """Scope for interacting with the Cluster Admin and Table Admin APIs.""" DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data' @@ -44,7 +52,7 @@ """Scope for reading table data.""" -class Client(object): +class Client(ClientWithProject): """Client for interacting with Google Cloud Bigtable API. .. note:: @@ -81,6 +89,9 @@ class Client(object): :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` """ + _table_data_client = None + _table_admin_client = None + _instance_admin_client = None def __init__(self, project=None, credentials=None, read_only=False, admin=False, channel=None): @@ -90,13 +101,11 @@ def __init__(self, project=None, credentials=None, # NOTE: We set the scopes **before** calling the parent constructor. # It **may** use those scopes in ``with_scopes_if_required``. - self.project = project self._read_only = bool(read_only) self._admin = bool(admin) self._channel = channel - self._credentials = credentials self.SCOPE = self._get_scopes() - super(Client, self).__init__() + super(Client, self).__init__(project=project, credentials=credentials) def _get_scopes(self): """Get the scopes corresponding to admin / read-only state. @@ -130,21 +139,27 @@ def project_path(self): :rtype: str :returns: Return a fully-qualified project string. """ - instance_client = self._instance_admin_client + instance_client = self.instance_admin_client return instance_client.project_path(self.project) @property - def _table_data_client(self): + def table_data_client(self): """Getter for the gRPC stub used for the Table Admin API. :rtype: :class:`.bigtable_v2.BigtableClient` :returns: A BigtableClient object. """ - return bigtable_v2.BigtableClient(channel=self._channel, - credentials=self._credentials) + if self._table_data_client is None: + if not self._admin: + raise ValueError('Client is not an admin client.') + self._table_data_client = ( + bigtable_v2.BigtableClient(credentials=self._credentials, + client_info=_CLIENT_INFO)) + + return self._table_data_client @property - def _table_admin_client(self): + def table_admin_client(self): """Getter for the gRPC stub used for the Table Admin API. :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` @@ -153,13 +168,17 @@ def _table_admin_client(self): client is not an admin client or if it has not been :meth:`start`-ed. """ - if not self._admin: - raise ValueError('Client is not an admin client.') - return bigtable_admin_v2.BigtableTableAdminClient( - channel=self._channel, credentials=self._credentials) + if self._table_admin_client is None: + if not self._admin: + raise ValueError('Client is not an admin client.') + self._table_admin_client = ( + bigtable_admin_v2.BigtableTableAdminClient( + credentials=self._credentials, client_info=_CLIENT_INFO)) + + return self._table_admin_client @property - def _instance_admin_client(self): + def instance_admin_client(self): """Getter for the gRPC stub used for the Table Admin API. :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` @@ -168,10 +187,14 @@ def _instance_admin_client(self): client is not an admin client or if it has not been :meth:`start`-ed. """ - if not self._admin: - raise ValueError('Client is not an admin client.') - return bigtable_admin_v2.BigtableInstanceAdminClient( - channel=self._channel, credentials=self._credentials) + if self._instance_admin_client is None: + if not self._admin: + raise ValueError('Client is not an admin client.') + self._instance_admin_client = ( + bigtable_admin_v2.BigtableInstanceAdminClient( + credentials=self._credentials, client_info=_CLIENT_INFO)) + + return self._instance_admin_client def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, display_name=None): @@ -202,4 +225,4 @@ def list_instances(self): :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: A list of Instance. """ - return self._instance_admin_client.list_instances(self.project_path) + return self.instance_admin_client.list_instances(self.project_path) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 43c200d02512..f5c31d8bc4ce 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -69,7 +69,7 @@ def name(self): :rtype: str :returns: The cluster name. """ - return self._instance._client._instance_admin_client.cluster_path( + return self._instance._client.instance_admin_client.cluster_path( self._instance._client.project, self._instance.instance_id, self.cluster_id) @@ -90,7 +90,7 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this cluster.""" - self._instance._client._instance_admin_client.get_cluster(self.name) + self._instance._client.instance_admin_client.get_cluster(self.name) def create(self): """Create this cluster. @@ -113,7 +113,7 @@ def create(self): create operation. """ client = self._instance._client - return client._instance_admin_client.create_cluster( + return client.instance_admin_client.create_cluster( self._instance.name, self.cluster_id, {}) def update(self, location='', serve_nodes=0): @@ -147,7 +147,7 @@ def update(self, location='', serve_nodes=0): update operation. """ client = self._instance._client - return client._instance_admin_client.update_cluster( + return client.instance_admin_client.update_cluster( self.name, location, serve_nodes) def delete(self): @@ -171,4 +171,4 @@ def delete(self): permanently deleted. """ client = self._instance._client - client._instance_admin_client.delete_cluster(self.name) + client.instance_admin_client.delete_cluster(self.name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 1030aab9b939..8543f88409d4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -18,13 +18,16 @@ import re from google.cloud.bigtable.table import Table +from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable_admin_v2.types import instance_pb2 _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') +_STORAGE_TYPE_UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED class Instance(object): @@ -59,15 +62,31 @@ class Instance(object): Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + + :type default_storage_type: int + :param default_storage_type: (Optional) The default values are + STORAGE_TYPE_UNSPECIFIED = 0: The user did + not specify a storage type. + SSD = 1: Flash (SSD) storage should be + used. + HDD = 2: Magnetic drive (HDD) storage + should be used. """ def __init__(self, instance_id, client, location_id=_EXISTING_INSTANCE_LOCATION_ID, - display_name=None): + display_name=None, serve_nodes=DEFAULT_SERVE_NODES, + default_storage_type=_STORAGE_TYPE_UNSPECIFIED): self.instance_id = instance_id self.display_name = display_name or instance_id self._cluster_location_id = location_id + self._cluster_serve_nodes = serve_nodes self._client = client + self._default_storage_type = default_storage_type @classmethod def from_pb(cls, instance_pb, client): @@ -140,6 +159,15 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def reload(self): + """Reload the metadata for this instance.""" + instance_pb = self._client._instance_admin_client.get_instance( + self.name) + + # NOTE: _update_from_pb does not check that the project and + # instance ID on the response match the request. + self._update_from_pb(instance_pb) + def create(self): """Create this instance. @@ -160,10 +188,25 @@ def create(self): :returns: The long-running operation corresponding to the create operation. """ + clusters = {} + cluster_id = '{}-cluster'.format(self.instance_id) + cluster_name = self._client._instance_admin_client.cluster_path( + self._client.project, self.instance_id, cluster_id) + location = self._client._instance_admin_client.location_path( + self._client.project, self._cluster_location_id) + cluster = instance_pb2.Cluster( + name=cluster_name, location=location, + serve_nodes=self._cluster_serve_nodes, + default_storage_type=self._default_storage_type) + instance = instance_pb2.Instance( + display_name=self.display_name + ) + clusters[cluster_id] = cluster parent = self._client.project_path + return self._client._instance_admin_client.create_instance( - parent=parent, instance_id=self.instance_id, instance={}, - clusters={}) + parent=parent, instance_id=self.instance_id, instance=instance, + clusters=clusters) def update(self): """Update this instance. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 12bd102690c0..ff87f1bc9444 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -86,12 +86,13 @@ def setUpModule(): if not Config.IN_EMULATOR: retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) - instances, failed_locations = retry(Config.CLIENT.list_instances)() - if len(failed_locations) != 0: + instances_response = retry(Config.CLIENT.list_instances)() + + if len(instances_response.failed_locations) != 0: raise ValueError('List instances failed in module set up.') - EXISTING_INSTANCES[:] = instances + EXISTING_INSTANCES[:] = instances_response.instances # After listing, create the test instance. created_op = Config.INSTANCE.create() @@ -116,11 +117,12 @@ def tearDown(self): instance.delete() def test_list_instances(self): - instances, failed_locations = Config.CLIENT.list_instances() - self.assertEqual(failed_locations, []) + instances_response = Config.CLIENT.list_instances() + self.assertEqual(instances_response.failed_locations, []) # We have added one new instance in `setUpModule`. - self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) - for instance in instances: + self.assertEqual(len(instances_response.instances), + len(EXISTING_INSTANCES) + 1) + for instance in instances_response.instances: instance_existence = (instance in EXISTING_INSTANCES or instance == Config.INSTANCE) self.assertTrue(instance_existence) diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index 06881806de1e..3bae0d9ce4a7 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -15,9 +15,23 @@ """Mocks used to emulate gRPC generated objects.""" +import mock + + class _FakeStub(object): """Acts as a gPRC stub.""" def __init__(self, *results): self.results = results self.method_calls = [] + + +def _make_credentials(): + import google.auth.credentials + + class _CredentialsWithScopes( + google.auth.credentials.Credentials, + google.auth.credentials.Scoped): + pass + + return mock.Mock(spec=_CredentialsWithScopes) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 3725b20bb8fe..8efbbbba4cdf 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -17,27 +17,7 @@ import mock - -def _make_credentials(): - import google.auth.credentials - - class _CredentialsWithScopes( - google.auth.credentials.Credentials, - google.auth.credentials.Scoped): - pass - - return mock.Mock(spec=_CredentialsWithScopes) - - -@mock.patch('google.auth.transport.grpc.secure_authorized_channel') -def _make_channel(secure_authorized_channel): - from google.api_core import grpc_helpers - target = 'example.com:443' - - channel = grpc_helpers.create_channel( - target, credentials=mock.sentinel.credentials) - - return channel +from ._testing import _make_credentials class TestClient(unittest.TestCase): @@ -93,13 +73,14 @@ def test_credentials_getter(self): project = 'PROJECT' client = self._make_one( project=project, credentials=credentials) - self.assertIs(client._credentials, credentials) + self.assertIs(client._credentials, + credentials.with_scopes.return_value) def test_project_name_property(self): credentials = _make_credentials() project = 'PROJECT' - client = self._make_one( - project=project, credentials=credentials, admin=True) + client = self._make_one(project=project, credentials=credentials, + admin=True) project_name = 'projects/' + project self.assertEqual(client.project_path, project_name) @@ -141,20 +122,53 @@ def test_instance_factory_w_explicit_serve_nodes(self): self.assertIs(instance._client, client) def test_admin_client_w_value_error(self): - channel = _make_channel() - client = self._make_one(project=self.PROJECT, channel=channel) + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials) with self.assertRaises(ValueError): - client._table_admin_client() + client.table_admin_client() + + with self.assertRaises(ValueError): + client.instance_admin_client() + + def test_table_data_client(self): + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials, + admin=True) + + table_data_client = client.table_data_client + self.assertEqual(client._table_data_client, table_data_client) + + client._table_data_client = object() + table_data_client = client.table_data_client + self.assertEqual(client.table_data_client, table_data_client) + + def test_table_admin_client(self): + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials, + admin=True) + + table_admin_client = client.table_admin_client + self.assertEqual(client._table_admin_client, table_admin_client) + + client._table_admin_client = object() + table_admin_client = client.table_admin_client + self.assertEqual(client._table_admin_client, table_admin_client) + + def test_table_data_client_w_value_error(self): + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials) with self.assertRaises(ValueError): - client._instance_admin_client() + client.table_data_client() def test_list_instances(self): from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import \ + bigtable_instance_admin_client FAILED_LOCATION = 'FAILED' INSTANCE_ID1 = 'instance-id1' @@ -164,8 +178,10 @@ def test_list_instances(self): INSTANCE_NAME2 = ( 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) - channel = _make_channel() - client = self._make_one(project=self.PROJECT, channel=channel, + credentials = _make_credentials() + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + client = self._make_one(project=self.PROJECT, credentials=credentials, admin=True) # Create response_pb @@ -185,12 +201,14 @@ def test_list_instances(self): ], ) + expected_result = response_pb + # Patch the stub used by the API method. + client._instance_admin_client = api bigtable_instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) + client.instance_admin_client.bigtable_instance_admin_stub) bigtable_instance_stub.ListInstances.side_effect = [response_pb] - expected_result = response_pb # Perform the method and check the result. - result = client.list_instances() - self.assertEqual(result, expected_result) + response = client.list_instances() + self.assertEqual(response, expected_result) diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index d520bd504ee1..ec3887fc3ef0 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -17,6 +17,8 @@ import mock +from ._testing import _make_credentials + class TestCluster(unittest.TestCase): @@ -27,16 +29,6 @@ class TestCluster(unittest.TestCase): '/instances/' + INSTANCE_ID + '/clusters/' + CLUSTER_ID) - @mock.patch('google.auth.transport.grpc.secure_authorized_channel') - def _make_channel(self, secure_authorized_channel): - from google.api_core import grpc_helpers - target = 'example.com:443' - - channel = grpc_helpers.create_channel( - target, credentials=mock.sentinel.credentials) - - return channel - @staticmethod def _get_target_class(): from google.cloud.bigtable.cluster import Cluster @@ -80,12 +72,11 @@ def test_constructor_non_default(self): def test_name_property(self): from google.cloud.bigtable.instance import Instance - channel = self._make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance) - instance = Instance(self.INSTANCE_ID, client) self.assertEqual(cluster.name, self.CLUSTER_NAME) @@ -121,11 +112,15 @@ def test___ne__(self): def test_reload(self): from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) LOCATION = 'LOCATION' - channel = self._make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance) @@ -136,6 +131,7 @@ def test_reload(self): ) # Patch the stub used by the API method. + client._instance_admin_client = api instance_admin_client = client._instance_admin_client instance_stub = instance_admin_client.bigtable_instance_admin_stub instance_stub.GetCluster.side_effect = [response_pb] @@ -154,10 +150,14 @@ def test_create(self): from google.api_core import operation from google.longrunning import operations_pb2 from google.cloud.bigtable.instance import Instance - - channel = self._make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance) @@ -169,6 +169,7 @@ def test_create(self): response_pb = operations_pb2.Operation(name=OP_NAME) # Patch the stub used by the API method. + client._instance_admin_client = api instance_admin_client = client._instance_admin_client instance_stub = instance_admin_client.bigtable_instance_admin_stub instance_stub.CreateCluster.side_effect = [response_pb] @@ -191,15 +192,19 @@ def test_update(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) SERVE_NODES = 81 - channel = self._make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, serve_nodes=SERVE_NODES) @@ -227,6 +232,7 @@ def test_update(self): ) # Patch the stub used by the API method. + client._instance_admin_client = api instance_admin_client = client._instance_admin_client instance_stub = instance_admin_client.bigtable_instance_admin_stub instance_stub.UpdateCluster.side_effect = [response_pb] @@ -245,10 +251,14 @@ def test_delete(self): from google.protobuf import empty_pb2 from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.instance import Instance - - channel = self._make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, serve_nodes=DEFAULT_SERVE_NODES) @@ -257,6 +267,7 @@ def test_delete(self): response_pb = empty_pb2.Empty() # Patch the stub used by the API method. + client._instance_admin_client = api instance_admin_client = client._instance_admin_client instance_stub = instance_admin_client.bigtable_instance_admin_stub instance_stub.DeleteCluster.side_effect = [response_pb] diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index a6178ffd6ba3..9443a198093b 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -16,16 +16,7 @@ import mock - -@mock.patch('google.auth.transport.grpc.secure_authorized_channel') -def _make_channel(secure_authorized_channel): - from google.api_core import grpc_helpers - target = 'example.com:443' - - channel = grpc_helpers.create_channel( - target, credentials=mock.sentinel.credentials) - - return channel +from ._testing import _make_credentials class TestMaxVersionsGCRule(unittest.TestCase): @@ -374,6 +365,8 @@ def _create_test_helper(self, gc_rule=None): from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) project_id = 'project-id' zone = 'zone' @@ -383,9 +376,10 @@ def _create_test_helper(self, gc_rule=None): table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - channel = _make_channel() - client = self._make_client(project=project_id, channel=channel, - admin=True) + api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=project_id, + credentials=credentials, admin=True) table = _Table(table_name, client=client) column_family = self._make_one( column_family_id, table, gc_rule=gc_rule) @@ -407,6 +401,7 @@ def _create_test_helper(self, gc_rule=None): # Patch the stub used by the API method. stub = _FakeStub(response_pb) + client._table_admin_client = api client._table_admin_client.bigtable_table_admin_stub = stub # Create expected_result. @@ -430,6 +425,8 @@ def _update_test_helper(self, gc_rule=None): from tests.unit._testing import _FakeStub from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) project_id = 'project-id' zone = 'zone' @@ -439,9 +436,10 @@ def _update_test_helper(self, gc_rule=None): table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - channel = _make_channel() - client = self._make_client(project=project_id, channel=channel, - admin=True) + api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=project_id, + credentials=credentials, admin=True) table = _Table(table_name, client=client) column_family = self._make_one( column_family_id, table, gc_rule=gc_rule) @@ -463,6 +461,7 @@ def _update_test_helper(self, gc_rule=None): # Patch the stub used by the API method. stub = _FakeStub(response_pb) + client._table_admin_client = api client._table_admin_client.bigtable_table_admin_stub = stub # Create expected_result. @@ -487,6 +486,8 @@ def test_delete(self): from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) project_id = 'project-id' zone = 'zone' @@ -496,9 +497,10 @@ def test_delete(self): table_name = ('projects/' + project_id + '/zones/' + zone + '/clusters/' + cluster_id + '/tables/' + table_id) - channel = _make_channel() - client = self._make_client(project=project_id, channel=channel, - admin=True) + api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=project_id, + credentials=credentials, admin=True) table = _Table(table_name, client=client) column_family = self._make_one(column_family_id, table) @@ -514,6 +516,7 @@ def test_delete(self): # Patch the stub used by the API method. stub = _FakeStub(response_pb) + client._table_admin_client = api client._table_admin_client.bigtable_table_admin_stub = stub # Create expected_result. diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index f13b38d824a5..b3654ce8091e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -17,16 +17,7 @@ import mock - -@mock.patch('google.auth.transport.grpc.secure_authorized_channel') -def _make_channel(secure_authorized_channel): - from google.api_core import grpc_helpers - target = 'example.com:443' - - channel = grpc_helpers.create_channel( - target, credentials=mock.sentinel.credentials) - - return channel +from ._testing import _make_credentials class TestInstance(unittest.TestCase): @@ -162,9 +153,17 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(instance_pb, client) def test_name_property(self): - channel = _make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + + # Patch the the API method. + client._instance_admin_client = api instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) self.assertEqual(instance.name, self.INSTANCE_NAME) @@ -193,6 +192,44 @@ def test___ne__(self): instance2 = self._make_one('instance_id2', 'client2', self.LOCATION_ID) self.assertNotEqual(instance1, instance2) + def test_reload(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create response_pb + DISPLAY_NAME = u'hey-hi-hello' + response_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, + ) + + # Patch the stub used by the API method. + client._instance_admin_client = api + bigtable_instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + bigtable_instance_stub.GetInstance.side_effect = [response_pb] + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, self.INSTANCE_ID) + + # Perform the method and check the result. + result = instance.reload() + self.assertEqual(result, expected_result) + + # Check Instance optional config values before. + self.assertEqual(instance.display_name, DISPLAY_NAME) + def test_create(self): import datetime from google.api_core import operation @@ -202,12 +239,16 @@ def test_create(self): bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) - channel = _make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=self.DISPLAY_NAME) @@ -225,6 +266,7 @@ def test_create(self): # Patch the stub used by the API method. stub = _FakeStub(response_pb) + client._instance_admin_client = api client._instance_admin_client.bigtable_instance_admin_stub = stub # Perform the method and check the result. @@ -239,10 +281,14 @@ def test_create_w_explicit_serve_nodes(self): from google.api_core import operation from google.longrunning import operations_pb2 from tests.unit._testing import _FakeStub - - channel = _make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) # Create response_pb @@ -250,6 +296,7 @@ def test_create_w_explicit_serve_nodes(self): # Patch the stub used by the API method. stub = _FakeStub(response_pb) + client._instance_admin_client = api client._instance_admin_client.bigtable_instance_admin_stub = stub # Perform the method and check the result. @@ -258,12 +305,20 @@ def test_create_w_explicit_serve_nodes(self): self.assertIsInstance(result, operation.Operation) def test_update(self): - channel = _make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, display_name=self.DISPLAY_NAME) + # Mock api calls + client._instance_admin_client = api + # Create expected_result. expected_result = None @@ -273,11 +328,19 @@ def test_update(self): self.assertEqual(result, expected_result) def test_delete(self): - channel = _make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + # Mock api calls + client._instance_admin_client = api + # Create expected_result. expected_result = None # delete() has no return value. @@ -291,10 +354,17 @@ def _list_tables_helper(self, table_name=None): table_pb2 as table_data_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_messages_v1_pb2) - - channel = _make_channel() - client = self._make_client(project=self.PROJECT, channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client, bigtable_instance_admin_client) + + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) # Create response_pb @@ -308,6 +378,8 @@ def _list_tables_helper(self, table_name=None): ) # Patch the stub used by the API method. + client._table_admin_client = table_api + client._instance_admin_client = instance_api bigtable_table_stub = ( client._table_admin_client.bigtable_table_admin_stub) bigtable_table_stub.ListTables.side_effect = [response_pb] diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index c17ed03cb3d6..39e701adfb26 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -17,16 +17,7 @@ import mock - -@mock.patch('google.auth.transport.grpc.secure_authorized_channel') -def _make_channel(secure_authorized_channel): - from google.api_core import grpc_helpers - target = 'example.com:443' - - channel = grpc_helpers.create_channel( - target, credentials=mock.sentinel.credentials) - - return channel +from ._testing import _make_credentials class TestRow(unittest.TestCase): @@ -343,6 +334,7 @@ def test_delete_cells_with_string_columns(self): def test_commit(self): from google.protobuf import empty_pb2 + from google.cloud.bigtable_v2.gapic import bigtable_client project_id = 'project-id' row_key = b'row_key' @@ -350,9 +342,10 @@ def test_commit(self): column_family_id = u'column_family_id' column = b'column' - channel = _make_channel() - client = self._make_client(project=project_id, channel=channel, - admin=True) + api = bigtable_client.BigtableClient(mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=project_id, + credentials=credentials, admin=True) table = _Table(table_name, client=client) row = self._make_one(row_key, table) @@ -363,6 +356,7 @@ def test_commit(self): response_pb = empty_pb2.Empty() # Patch the stub used by the API method. + client._table_data_client = api bigtable_stub = client._table_data_client.bigtable_stub bigtable_stub.MutateRow.side_effect = [response_pb] @@ -411,11 +405,12 @@ def test_commit_too_many_mutations(self): def test_commit_no_mutations(self): from tests.unit._testing import _FakeStub + project_id = 'project-id' row_key = b'row_key' - from google.cloud.bigtable_v2 import BigtableClient - channel = _make_channel() - client = BigtableClient(channel=channel) + credentials = _make_credentials() + client = self._make_client(project=project_id, + credentials=credentials, admin=True) table = _Table(None, client=client) row = self._make_one(row_key, table) self.assertEqual(row._pb_mutations, []) @@ -475,6 +470,7 @@ def test__get_mutations(self): def test_commit(self): from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.gapic import bigtable_client project_id = 'project-id' row_key = b'row_key' @@ -485,8 +481,9 @@ def test_commit(self): column1 = b'column1' column2 = b'column2' - channel = _make_channel() - client = self._make_client(project=project_id, channel=channel, + api = bigtable_client.BigtableClient(mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=project_id, credentials=credentials, admin=True) table = _Table(table_name, client=client) row_filter = RowSampleFilter(0.33) @@ -501,6 +498,7 @@ def test_commit(self): predicate_matched=predicate_matched) # Patch the stub used by the API method. + client._table_data_client = api bigtable_stub = client._table_data_client.bigtable_stub bigtable_stub.CheckAndMutateRow.side_effect = [[response_pb]] @@ -534,11 +532,12 @@ def test_commit_too_many_mutations(self): def test_commit_no_mutations(self): from tests.unit._testing import _FakeStub + project_id = 'project-id' row_key = b'row_key' - from google.cloud.bigtable_v2 import BigtableClient - channel = _make_channel() - client = BigtableClient(channel=channel) + credentials = _make_credentials() + client = self._make_client(project=project_id, credentials=credentials, + admin=True) table = _Table(None, client=client) filter_ = object() row = self._make_one(row_key, table, filter_=filter_) @@ -625,14 +624,17 @@ def test_increment_cell_value(self): def test_commit(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import row as MUT + from google.cloud.bigtable_v2.gapic import bigtable_client project_id = 'project-id' row_key = b'row_key' table_name = 'projects/more-stuff' column_family_id = u'column_family_id' column = b'column' - channel = _make_channel() - client = self._make_client(project=project_id, channel=channel, + + api = bigtable_client.BigtableClient(mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=project_id, credentials=credentials, admin=True) table = _Table(table_name, client=client) row = self._make_one(row_key, table) @@ -644,6 +646,9 @@ def test_commit(self): row_responses = [] expected_result = object() + # Patch API calls + client._table_data_client = api + def mock_parse_rmw_row_response(row_response): row_responses.append(row_response) return expected_result @@ -661,8 +666,9 @@ def test_commit_no_rules(self): project_id = 'project-id' row_key = b'row_key' - channel = _make_channel() - client = self._make_client(project=project_id, channel=channel, + + credentials = _make_credentials() + client = self._make_client(project=project_id, credentials=credentials, admin=True) table = _Table(None, client=client) row = self._make_one(row_key, table) @@ -921,13 +927,6 @@ def _ReadModifyWriteRulePB(*args, **kw): return data_v2_pb2.ReadModifyWriteRule(*args, **kw) -class _Client(object): - - data_stub = None - _table_data_client = None - _table_admin_client = None - - class _Instance(object): def __init__(self, client=None): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 9b8de28935ee..037a57235391 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -18,6 +18,8 @@ import grpc import mock +from ._testing import _make_credentials + class Test___mutate_rows_request(unittest.TestCase): @@ -136,16 +138,6 @@ class TestTable(unittest.TestCase): VALUE = b'value' _json_tests = None - @mock.patch('google.auth.transport.grpc.secure_authorized_channel') - def _make_channel(self, secure_authorized_channel): - from google.api_core import grpc_helpers - target = 'example.com:443' - - channel = grpc_helpers.create_channel( - target, credentials=mock.sentinel.credentials) - - return channel - @staticmethod def _get_target_class(): from google.cloud.bigtable.table import Table @@ -165,9 +157,9 @@ def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) table_id = 'table-id' instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -177,9 +169,9 @@ def test_constructor(self): def test_row_factory_direct(self): from google.cloud.bigtable.row import DirectRow - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) row_key = b'row_key' @@ -192,9 +184,9 @@ def test_row_factory_direct(self): def test_row_factory_conditional(self): from google.cloud.bigtable.row import ConditionalRow - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) row_key = b'row_key' @@ -208,9 +200,9 @@ def test_row_factory_conditional(self): def test_row_factory_append(self): from google.cloud.bigtable.row import AppendRow - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) row_key = b'row_key' @@ -221,36 +213,36 @@ def test_row_factory_append(self): self.assertEqual(row._table, table) def test_row_factory_failure(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) with self.assertRaises(ValueError): table.row(b'row_key', filter_=object(), append=True) def test___eq__(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) self.assertEqual(table1, table2) def test___eq__type_differ(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = object() self.assertNotEqual(table1, table2) def test___ne__same_value(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) @@ -263,12 +255,24 @@ def test___ne__(self): self.assertNotEqual(table1, table2) def _create_test_helper(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client, bigtable_table_admin_client) + + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) + # Patch API calls + client._table_admin_client = table_api + client._instance_admin_client = instance_api + # Create expected_result. expected_result = None # create() has no return value. @@ -280,12 +284,20 @@ def test_create(self): self._create_test_helper() def test_delete(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) + # Patch API calls + client._table_admin_client = api + # Create expected_result. expected_result = None # delete() has no return value. @@ -294,9 +306,14 @@ def test_delete(self): self.assertEqual(result, expected_result) def _list_column_families_helper(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -307,6 +324,8 @@ def _list_column_families_helper(self): column_families={COLUMN_FAMILY_ID: column_family}, ) + # Patch the stub used by the API method. + client._table_admin_client = api bigtable_table_stub = ( client._table_admin_client.bigtable_table_admin_stub) bigtable_table_stub.GetTable.side_effect = [response_pb] @@ -326,10 +345,16 @@ def test_list_column_families(self): def _read_row_helper(self, chunks, expected_result): from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT - - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -349,6 +374,8 @@ def mock_create_row_request(table_name, row_key, filter_): response_iterator = iter([response_pb]) # Patch the stub used by the API method. + client._table_data_client = data_api + client._table_admin_client = table_api bigtable_stub = client._table_data_client.bigtable_stub bigtable_stub.ReadRows.side_effect = [response_iterator] @@ -402,11 +429,16 @@ def test_read_row_still_partial(self): def test_mutate_rows(self): from google.rpc.status_pb2 import Status - - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) + client._table_admin_client = api table = self._make_one(self.TABLE_ID, instance) response = [Status(code=0), Status(code=1)] @@ -425,10 +457,18 @@ def test_read_rows(self): from google.cloud._testing import _Monkey from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT - - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -466,9 +506,18 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_yield_retry_rows(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -511,9 +560,18 @@ def test_yield_retry_rows(self): self.assertEqual(result.row_key, self.ROW_KEY_2) def test_sample_row_keys(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -546,16 +604,6 @@ class Test__RetryableMutateRowsWorker(unittest.TestCase): RETRYABLE_2 = StatusCode.ABORTED.value[0] NON_RETRYABLE = StatusCode.CANCELLED.value[0] - @mock.patch('google.auth.transport.grpc.secure_authorized_channel') - def _make_channel(self, secure_authorized_channel): - from google.api_core import grpc_helpers - target = 'example.com:443' - - channel = grpc_helpers.create_channel( - target, credentials=mock.sentinel.credentials) - - return channel - @staticmethod def _get_target_class_for_worker(): from google.cloud.bigtable.table import _RetryableMutateRowsWorker @@ -601,9 +649,18 @@ def _make_responses(self, codes): return MutateRowsResponse(entries=entries) def test_callable_empty_rows(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -614,6 +671,9 @@ def test_callable_empty_rows(self): def test_callable_no_retry_strategy(self): from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 3 rows. @@ -625,9 +685,14 @@ def test_callable_no_retry_strategy(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -659,6 +724,9 @@ def test_callable_no_retry_strategy(self): def test_callable_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 3 rows. @@ -671,9 +739,14 @@ def test_callable_retry(self): # - State of responses_statuses should be # [success, success, non-retryable] - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -708,6 +781,9 @@ def test_callable_retry(self): def test_callable_retry_timeout(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 2 rows. @@ -720,9 +796,14 @@ def test_callable_retry_timeout(self): # - By the time deadline is reached, statuses should be # [retryable, retryable] - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -750,9 +831,15 @@ def test_callable_retry_timeout(self): self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_empty_rows(self): - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -763,6 +850,9 @@ def test_do_mutate_retryable_rows_empty_rows(self): def test_do_mutate_retryable_rows(self): from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 2 rows. @@ -771,9 +861,14 @@ def test_do_mutate_retryable_rows(self): # Expectation: # - Expect [success, non-retryable] - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -799,6 +894,9 @@ def test_do_mutate_retryable_rows(self): def test_do_mutate_retryable_rows_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 3 rows. @@ -809,9 +907,14 @@ def test_do_mutate_retryable_rows_retry(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -845,6 +948,9 @@ def test_do_mutate_retryable_rows_retry(self): def test_do_mutate_retryable_rows_second_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 4 rows. @@ -860,9 +966,14 @@ def test_do_mutate_retryable_rows_second_retry(self): # - Exception contains response whose index should be '3' even though # only two rows were retried. - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -903,6 +1014,9 @@ def test_do_mutate_retryable_rows_second_retry(self): def test_do_mutate_retryable_rows_second_try(self): from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 4 rows. @@ -914,9 +1028,14 @@ def test_do_mutate_retryable_rows_second_try(self): # - After second try: # [success, non-retryable, non-retryable, success] - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -955,6 +1074,8 @@ def test_do_mutate_retryable_rows_second_try(self): def test_do_mutate_retryable_rows_second_try_no_retryable(self): from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) # Setup: # - Mutate 2 rows. @@ -964,9 +1085,12 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): # Expectation: # - After second try: [success, non-retryable] - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -988,10 +1112,18 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): def test_do_mutate_retryable_rows_mismatch_num_responses(self): from google.cloud.bigtable.row import DirectRow - - channel = self._make_channel() - client = self._make_client(project='project-id', channel=channel, - admin=True) + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) From fdfff328f96f9e33edb8655fcdf170b4f1832743 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 22 May 2018 16:03:07 -0400 Subject: [PATCH 128/892] Avoid overwriting '__module__' of messages from shared modules. (#5364) Note that we *are* still overwriting it for messages from modules defined within the current package. See #4715. --- .../google/cloud/bigtable_admin_v2/types.py | 60 +++++++++++-------- .../google/cloud/bigtable_v2/types.py | 38 ++++++++---- 2 files changed, 61 insertions(+), 37 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index b796a7c9bf39..6f91fac2db0c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -15,13 +15,7 @@ from __future__ import absolute_import import sys -from google.api_core.protobuf_helpers import get_messages - from google.api import http_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 from google.iam.v1.logging import audit_data_pb2 @@ -34,25 +28,43 @@ from google.protobuf import timestamp_pb2 from google.rpc import status_pb2 +from google.api_core.protobuf_helpers import get_messages +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 + + +_shared_modules = [ + http_pb2, + iam_policy_pb2, + policy_pb2, + audit_data_pb2, + operations_pb2, + any_pb2, + descriptor_pb2, + duration_pb2, + empty_pb2, + field_mask_pb2, + timestamp_pb2, + status_pb2, +] + +_local_modules = [ + bigtable_instance_admin_pb2, + bigtable_table_admin_pb2, + instance_pb2, + table_pb2, +] + names = [] -for module in ( - http_pb2, - bigtable_instance_admin_pb2, - bigtable_table_admin_pb2, - instance_pb2, - table_pb2, - iam_policy_pb2, - policy_pb2, - audit_data_pb2, - operations_pb2, - any_pb2, - descriptor_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, -): + +for module in _shared_modules: + for name, message in get_messages(module).items(): + setattr(sys.modules[__name__], name, message) + names.append(name) + +for module in _local_modules: for name, message in get_messages(module).items(): message.__module__ = 'google.cloud.bigtable_admin_v2.types' setattr(sys.modules[__name__], name, message) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index b7edfdccdbd5..a5d64f46ef07 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -15,26 +15,38 @@ from __future__ import absolute_import import sys -from google.api_core.protobuf_helpers import get_messages - from google.api import http_pb2 -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 from google.protobuf import any_pb2 from google.protobuf import descriptor_pb2 from google.protobuf import wrappers_pb2 from google.rpc import status_pb2 +from google.api_core.protobuf_helpers import get_messages +from google.cloud.bigtable_v2.proto import bigtable_pb2 +from google.cloud.bigtable_v2.proto import data_pb2 + + +_shared_modules = [ + http_pb2, + any_pb2, + descriptor_pb2, + wrappers_pb2, + status_pb2, +] + +_local_modules = [ + bigtable_pb2, + data_pb2, +] + names = [] -for module in ( - http_pb2, - bigtable_pb2, - data_pb2, - any_pb2, - descriptor_pb2, - wrappers_pb2, - status_pb2, -): + +for module in _shared_modules: + for name, message in get_messages(module).items(): + setattr(sys.modules[__name__], name, message) + names.append(name) + +for module in _local_modules: for name, message in get_messages(module).items(): message.__module__ = 'google.cloud.bigtable_v2.types' setattr(sys.modules[__name__], name, message) From c8ebd30e6a528055090f1798284addef01b0a165 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Thu, 24 May 2018 13:02:47 -0700 Subject: [PATCH 129/892] disable bigtable system tests (#5381) --- .../google-cloud-bigtable/tests/system.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index ff87f1bc9444..067bd1b86a2c 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -35,6 +35,7 @@ from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id +import pytest LOCATION_ID = 'us-central1-c' INSTANCE_ID = 'g-c-p' + unique_resource_id('-') @@ -116,6 +117,8 @@ def tearDown(self): for instance in self.instances_to_delete: instance.delete() + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_list_instances(self): instances_response = Config.CLIENT.list_instances() self.assertEqual(instances_response.failed_locations, []) @@ -190,12 +193,16 @@ def tearDown(self): for table in self.tables_to_delete: table.delete() + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_list_tables(self): # Since `Config.INSTANCE` is newly created in `setUpModule`, the table # created in `setUpClass` here will be the only one. tables = Config.INSTANCE.list_tables() self.assertEqual(tables, [self._table]) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_create_table(self): temp_table_id = 'foo-bar-baz-table' temp_table = Config.INSTANCE.table(temp_table_id) @@ -212,6 +219,8 @@ def test_create_table(self): sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_create_column_family(self): temp_table_id = 'foo-bar-baz-table' temp_table = Config.INSTANCE.table(temp_table_id) @@ -233,6 +242,8 @@ def test_create_column_family(self): column_family.column_family_id) self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_update_column_family(self): temp_table_id = 'foo-bar-baz-table' temp_table = Config.INSTANCE.table(temp_table_id) @@ -256,6 +267,8 @@ def test_update_column_family(self): col_fams = temp_table.list_column_families() self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_delete_column_family(self): temp_table_id = 'foo-bar-baz-table' temp_table = Config.INSTANCE.table(temp_table_id) @@ -339,6 +352,8 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): cell4 = Cell(CELL_VAL4, timestamp4_micros) return cell1, cell2, cell3, cell4 + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_timestamp_filter_millisecond_granularity(self): from google.cloud.bigtable import row_filters @@ -349,6 +364,8 @@ def test_timestamp_filter_millisecond_granularity(self): row_data = self._table.read_rows(filter_=timefilter) row_data.consume_all() + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_mutate_rows(self): row1 = self._table.row(ROW_KEY) row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) @@ -376,6 +393,8 @@ def test_mutate_rows(self): self.assertEqual( row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_read_large_cell_limit(self): row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) @@ -393,6 +412,8 @@ def test_read_large_cell_limit(self): self.assertEqual(len(column), 1) self.assertEqual(column[0].value, data) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_read_row(self): row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) @@ -417,6 +438,8 @@ def test_read_row(self): } self.assertEqual(partial_row_data.cells, expected_row_contents) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_read_rows(self): row = self._table.row(ROW_KEY) row_alt = self._table.row(ROW_KEY_ALT) @@ -462,6 +485,8 @@ def test_read_rows(self): } self.assertEqual(rows_data.rows, expected_rows) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" + "google-cloud-python/issues/5362") def test_read_with_label_applied(self): self._maybe_emulator_skip('Labels not supported by Bigtable emulator') row = self._table.row(ROW_KEY) From 5ebc0fa2c25c05ec62da99b6e651871a302abc70 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Tue, 29 May 2018 18:19:12 +0530 Subject: [PATCH 130/892] BigTable: Add data app profile id (#5369) --- .../google/cloud/bigtable/table.py | 43 +++++++++++++------ .../tests/unit/test_table.py | 40 +++++++++++++---- 2 files changed, 62 insertions(+), 21 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 818806ca41b9..22e889588d32 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -84,11 +84,15 @@ class Table(object): :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the table. + + :type: app_profile_id: str + :param app_profile_id: (Optional) The unique name of the AppProfile. """ - def __init__(self, table_id, instance): + def __init__(self, table_id, instance, app_profile_id=None): self.table_id = table_id self._instance = instance + self._app_profile_id = app_profile_id @property def name(self): @@ -227,8 +231,9 @@ def read_row(self, row_key, filter_=None): :raises: :class:`ValueError ` if a commit row chunk is never encountered. """ - request_pb = _create_row_request(self.name, row_key=row_key, - filter_=filter_) + request_pb = _create_row_request( + self.name, row_key=row_key, filter_=filter_, + app_profile_id=self._app_profile_id) client = self._instance._client rows_data = PartialRowsData(client._table_data_client._read_rows, request_pb) @@ -276,7 +281,8 @@ def read_rows(self, start_key=None, end_key=None, limit=None, """ request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit, end_inclusive=end_inclusive) + limit=limit, end_inclusive=end_inclusive, + app_profile_id=self._app_profile_id) client = self._instance._client return PartialRowsData(client._table_data_client._read_rows, request_pb) @@ -310,7 +316,7 @@ def yield_rows(self, start_key=None, end_key=None, limit=None, """ request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit) + limit=limit, app_profile_id=self._app_profile_id) client = self._instance._client generator = YieldRowsData(client._table_data_client._read_rows, request_pb) @@ -347,7 +353,8 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY): sent. These will be in the same order as the `rows`. """ retryable_mutate_rows = _RetryableMutateRowsWorker( - self._instance._client, self.name, rows) + self._instance._client, self.name, rows, + app_profile_id=self._app_profile_id) return retryable_mutate_rows(retry=retry) def sample_row_keys(self): @@ -383,7 +390,7 @@ def sample_row_keys(self): """ client = self._instance._client response_iterator = client._table_data_client.sample_row_keys( - self.name) + self.name, app_profile_id=self._app_profile_id) return response_iterator @@ -403,10 +410,11 @@ class _RetryableMutateRowsWorker(object): ) # pylint: enable=unsubscriptable-object - def __init__(self, client, table_name, rows): + def __init__(self, client, table_name, rows, app_profile_id=None): self.client = client self.table_name = table_name self.rows = rows + self.app_profile_id = app_profile_id self.responses_statuses = [None] * len(self.rows) def __call__(self, retry=DEFAULT_RETRY): @@ -468,7 +476,8 @@ def _do_mutate_retryable_rows(self): return self.responses_statuses mutate_rows_request = _mutate_rows_request( - self.table_name, retryable_rows) + self.table_name, retryable_rows, + app_profile_id=self.app_profile_id) responses = self.client._table_data_client._mutate_rows( mutate_rows_request, retry=None) @@ -496,7 +505,8 @@ def _do_mutate_retryable_rows(self): def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None, end_inclusive=False): + filter_=None, limit=None, end_inclusive=False, + app_profile_id=None): """Creates a request to read rows in a table. :type table_name: str @@ -528,6 +538,9 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, :param end_inclusive: (Optional) Whether the ``end_key`` should be considered inclusive. The default is False (exclusive). + :type: app_profile_id: str + :param app_profile_id: (Optional) The unique name of the AppProfile. + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. :raises: :class:`ValueError ` if both @@ -551,6 +564,8 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, request_kwargs['filter'] = filter_.to_pb() if limit is not None: request_kwargs['rows_limit'] = limit + if app_profile_id is not None: + request_kwargs['app_profile_id'] = app_profile_id message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) @@ -563,7 +578,7 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, return message -def _mutate_rows_request(table_name, rows): +def _mutate_rows_request(table_name, rows, app_profile_id=None): """Creates a request to mutate rows in a table. :type table_name: str @@ -572,12 +587,16 @@ def _mutate_rows_request(table_name, rows): :type rows: list :param rows: List or other iterable of :class:`.DirectRow` instances. + :type: app_profile_id: str + :param app_profile_id: (Optional) The unique name of the AppProfile. + :rtype: :class:`data_messages_v2_pb2.MutateRowsRequest` :returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs. :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is greater than 100,000 """ - request_pb = data_messages_v2_pb2.MutateRowsRequest(table_name=table_name) + request_pb = data_messages_v2_pb2.MutateRowsRequest( + table_name=table_name, app_profile_id=app_profile_id) mutations_count = 0 for row in rows: _check_row_table_name(table_name, row) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 037a57235391..485d22278033 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -342,7 +342,7 @@ def _list_column_families_helper(self): def test_list_column_families(self): self._list_column_families_helper() - def _read_row_helper(self, chunks, expected_result): + def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT from google.cloud.bigtable_v2.gapic import bigtable_client @@ -356,14 +356,16 @@ def _read_row_helper(self, chunks, expected_result): client = self._make_client(project='project-id', credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + table = self._make_one(self.TABLE_ID, instance, + app_profile_id=app_profile_id) # Create request_pb request_pb = object() # Returned by our mock. mock_created = [] - def mock_create_row_request(table_name, row_key, filter_): - mock_created.append((table_name, row_key, filter_)) + def mock_create_row_request(table_name, row_key, filter_, + app_profile_id=app_profile_id): + mock_created.append((table_name, row_key, filter_, app_profile_id)) return request_pb # Create response_iterator @@ -386,7 +388,8 @@ def mock_create_row_request(table_name, row_key, filter_): self.assertEqual(result, expected_result) self.assertEqual(mock_created, - [(table.name, self.ROW_KEY, filter_obj)]) + [(table.name, self.ROW_KEY, filter_obj, + app_profile_id)]) def test_read_row_miss_no__responses(self): self._read_row_helper(None, None) @@ -399,6 +402,7 @@ def test_read_row_complete(self): from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData + app_profile_id = 'app-profile-id' chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, family_name=self.FAMILY_NAME, @@ -412,7 +416,7 @@ def test_read_row_complete(self): family = expected_result._cells.setdefault(self.FAMILY_NAME, {}) column = family.setdefault(self.QUALIFIER, []) column.append(Cell.from_pb(chunk)) - self._read_row_helper(chunks, expected_result) + self._read_row_helper(chunks, expected_result, app_profile_id) def test_read_row_still_partial(self): chunk = _ReadRowsResponseCellChunkPB( @@ -470,7 +474,9 @@ def test_read_rows(self): client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + app_profile_id = 'app-profile-id' + table = self._make_one(self.TABLE_ID, instance, + app_profile_id=app_profile_id) # Create request_pb request = object() # Returned by our mock. @@ -502,6 +508,7 @@ def mock_create_row_request(table_name, **kwargs): 'filter_': filter_obj, 'limit': limit, 'end_inclusive': False, + 'app_profile_id': app_profile_id } self.assertEqual(mock_created, [(table.name, created_kwargs)]) @@ -1146,12 +1153,14 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, - filter_=None, limit=None, end_inclusive=False): + filter_=None, limit=None, end_inclusive=False, + app_profile_id=None): from google.cloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, - filter_=filter_, limit=limit, end_inclusive=end_inclusive) + filter_=filter_, limit=limit, end_inclusive=end_inclusive, + app_profile_id=app_profile_id) def test_table_name_only(self): table_name = 'table_name' @@ -1234,6 +1243,19 @@ def test_with_limit(self): ) self.assertEqual(result, expected_result) + def test_with_app_profile_id(self): + table_name = 'table_name' + limit = 1337 + app_profile_id = 'app-profile-id' + result = self._call_fut(table_name, limit=limit, + app_profile_id=app_profile_id) + expected_result = _ReadRowsRequestPB( + table_name=table_name, + rows_limit=limit, + app_profile_id=app_profile_id + ) + self.assertEqual(result, expected_result) + def _ReadRowsRequestPB(*args, **kw): from google.cloud.bigtable_v2.proto import ( From dff05edb040f37a9eadff0528fc1d5474de36429 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Thu, 31 May 2018 02:49:38 +0530 Subject: [PATCH 131/892] BigTable: improve read rows validation performance (#5390) Modify validation code to minimize access to the chunk, since this access is very expensive. Instead, copy chunk to a cell data and validate it. Also, minimize access to chunk state variables, such as reset_row and commit_row. --- .../google/cloud/bigtable/row_data.py | 184 +++++++++--------- .../tests/unit/test_row_data.py | 143 +++++++++++++- 2 files changed, 223 insertions(+), 104 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 3216be84dd2b..13db91b1268d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -48,10 +48,10 @@ class Cell(object): :param labels: (Optional) List of strings. Labels applied to the cell. """ - def __init__(self, value, timestamp_micros, labels=()): + def __init__(self, value, timestamp_micros, labels=None): self.value = value self.timestamp_micros = timestamp_micros - self.labels = list(labels) + self.labels = list(labels) if labels is not None else [] @classmethod def from_pb(cls, cell_pb): @@ -374,6 +374,15 @@ class YieldRowsData(object): ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row + STATE_START = 0 + STATE_NEW_ROW = 1 + STATE_ROW_IN_PROGRESS = 2 + STATE_CELL_IN_PROGRESS = 3 + + read_states = {STATE_START: START, STATE_NEW_ROW: NEW_ROW, + STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS, + STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS} + def __init__(self, read_method, request): # Counter for responses pulled from iterator self._counter = 0 @@ -400,17 +409,24 @@ def state(self): :returns: name of state corresponding to current row / chunk processing. """ + return self.read_states[self._state] + + @property + def _state(self): + """State machine state. + :rtype: int + :returns: id of state corresponding to currrent row / chunk + processing. + """ + if self._previous_cell is not None: + return self.STATE_ROW_IN_PROGRESS if self.last_scanned_row_key is None: - return self.START + return self.STATE_START if self._row is None: - assert self._cell is None - assert self._previous_cell is None - return self.NEW_ROW + return self.STATE_NEW_ROW if self._cell is not None: - return self.CELL_IN_PROGRESS - if self._previous_cell is not None: - return self.ROW_IN_PROGRESS - return self.NEW_ROW # row added, no chunk yet processed + return self.STATE_CELL_IN_PROGRESS + return self.STATE_NEW_ROW # row added, no chunk yet processed def cancel(self): """Cancels the iterator, closing the stream.""" @@ -470,33 +486,37 @@ def read_rows(self): for chunk in response.chunks: - self._validate_chunk(chunk) - if chunk.reset_row: + self._validate_chunk_reset_row(chunk) row = self._row = None cell = self._cell = self._previous_cell = None continue - if row is None: - row = self._row = PartialRowData(chunk.row_key) - if cell is None: - qualifier = None - if chunk.HasField('qualifier'): - qualifier = chunk.qualifier.value + qualifier = chunk.qualifier.value + if qualifier == b'' and not chunk.HasField('qualifier'): + qualifier = None - cell = self._cell = PartialCellData( + cell = PartialCellData( chunk.row_key, chunk.family_name.value, qualifier, chunk.timestamp_micros, chunk.labels, chunk.value) + self._validate_cell_data(cell) + self._cell = cell self._copy_from_previous(cell) else: cell.append_value(chunk.value) + if row is None: + row = self._row = PartialRowData(cell.row_key) + if chunk.commit_row: + if chunk.value_size > 0: + raise InvalidChunk() + self._save_current_cell() yield self._row @@ -511,94 +531,66 @@ def read_rows(self): self._save_current_cell() cell = None - @staticmethod - def _validate_chunk_status(chunk): - """Helper for :meth:`_validate_chunk_row_in_progress`, etc.""" - # No reseet with other keys - if chunk.reset_row: - _raise_if(chunk.row_key) - _raise_if(chunk.HasField('family_name')) - _raise_if(chunk.HasField('qualifier')) - _raise_if(chunk.timestamp_micros) - _raise_if(chunk.labels) - _raise_if(chunk.value_size) - _raise_if(chunk.value) - # No commit with value size - _raise_if(chunk.commit_row and chunk.value_size > 0) - # No negative value_size (inferred as a general constraint). - _raise_if(chunk.value_size < 0) - - def _validate_chunk_new_row(self, chunk): - """Helper for :meth:`_validate_chunk`.""" - assert self.state == self.NEW_ROW - _raise_if(chunk.reset_row) - _raise_if(not chunk.row_key) - _raise_if(not chunk.family_name) - _raise_if(not chunk.qualifier) - # This constraint is not enforced in the Go example. - _raise_if(chunk.value_size > 0 and chunk.commit_row is not False) - # This constraint is from the Go example, not the spec. - _raise_if(self._previous_row is not None and - chunk.row_key <= self._previous_row.row_key) - - def _same_as_previous(self, chunk): - """Helper for :meth:`_validate_chunk_row_in_progress`""" - previous = self._previous_cell - return (chunk.row_key == previous.row_key and - chunk.family_name == previous.family_name and - chunk.qualifier == previous.qualifier and - chunk.labels == previous.labels) - - def _validate_chunk_row_in_progress(self, chunk): - """Helper for :meth:`_validate_chunk`""" - assert self.state == self.ROW_IN_PROGRESS - self._validate_chunk_status(chunk) - _raise_if(chunk.row_key and - chunk.row_key != self._row.row_key) - _raise_if(chunk.HasField('family_name') and - not chunk.HasField('qualifier')) - previous = self._previous_cell - _raise_if(self._same_as_previous(chunk) and - chunk.timestamp_micros <= previous.timestamp_micros) - - def _validate_chunk_cell_in_progress(self, chunk): - """Helper for :meth:`_validate_chunk`""" - assert self.state == self.CELL_IN_PROGRESS - self._validate_chunk_status(chunk) - self._copy_from_current(chunk) - - def _validate_chunk(self, chunk): - """Helper for :meth:`consume_next`.""" - if self.state == self.NEW_ROW: - self._validate_chunk_new_row(chunk) - if self.state == self.ROW_IN_PROGRESS: - self._validate_chunk_row_in_progress(chunk) - if self.state == self.CELL_IN_PROGRESS: - self._validate_chunk_cell_in_progress(chunk) + def _validate_cell_data(self, cell): + if self._state == self.STATE_ROW_IN_PROGRESS: + self._validate_cell_data_row_in_progress(cell) + if self._state == self.STATE_NEW_ROW: + self._validate_cell_data_new_row(cell) + if self._state == self.STATE_CELL_IN_PROGRESS: + self._copy_from_current(cell) + + def _validate_cell_data_new_row(self, cell): + if (not cell.row_key or + not cell.family_name or + cell.qualifier is None): + raise InvalidChunk() + + if (self._previous_row is not None and + cell.row_key <= self._previous_row.row_key): + raise InvalidChunk() + + def _validate_cell_data_row_in_progress(self, cell): + if ((cell.row_key and + cell.row_key != self._row.row_key) or + (cell.family_name and cell.qualifier is None)): + raise InvalidChunk() + + def _validate_chunk_reset_row(self, chunk): + # No reset for new row + _raise_if(self._state == self.STATE_NEW_ROW) + + # No reset with other keys + _raise_if(chunk.row_key) + _raise_if(chunk.HasField('family_name')) + _raise_if(chunk.HasField('qualifier')) + _raise_if(chunk.timestamp_micros) + _raise_if(chunk.labels) + _raise_if(chunk.value_size) + _raise_if(chunk.value) def _save_current_cell(self): """Helper for :meth:`consume_next`.""" row, cell = self._row, self._cell family = row._cells.setdefault(cell.family_name, {}) qualified = family.setdefault(cell.qualifier, []) - complete = Cell.from_pb(self._cell) + complete = Cell.from_pb(cell) qualified.append(complete) self._cell, self._previous_cell = None, cell - def _copy_from_current(self, chunk): - """Helper for :meth:`consume_next`.""" + def _copy_from_current(self, cell): current = self._cell if current is not None: - if not chunk.row_key: - chunk.row_key = current.row_key - if not chunk.HasField('family_name'): - chunk.family_name.value = current.family_name - if not chunk.HasField('qualifier'): - chunk.qualifier.value = current.qualifier - if not chunk.timestamp_micros: - chunk.timestamp_micros = current.timestamp_micros - if not chunk.labels: - chunk.labels.extend(current.labels) + if not cell.row_key: + cell.row_key = current.row_key + if not cell.family_name: + cell.family_name = current.family_name + # NOTE: ``cell.qualifier`` **can** be empty string. + if cell.qualifier is None: + cell.qualifier = current.qualifier + if not cell.timestamp_micros: + cell.timestamp_micros = current.timestamp_micros + if not cell.labels: + cell.labels.extend(current.labels) def _copy_from_previous(self, cell): """Helper for :meth:`consume_next`.""" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index a2293aae3943..b5f146c47715 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -17,6 +17,41 @@ import mock +from ._testing import _make_credentials + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + return self.channel_stub.responses.pop() + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + def unary_stream(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + class TestCell(unittest.TestCase): timestamp_micros = 18738724000 # Make sure millis granularity @@ -369,6 +404,15 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + def test_state_start(self): client = _Client() iterator = _MockCancellableIterator() @@ -379,13 +423,36 @@ def test_state_start(self): self.assertEqual(yrd.state, yrd.START) def test_state_new_row_w_row(self): - client = _Client() - iterator = _MockCancellableIterator() - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + from google.cloud.bigtable_v2.gapic import bigtable_client + + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk] + + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + channel = ChannelStub(responses=[iterator]) + data_api = bigtable_client.BigtableClient(channel=channel) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - yrd.last_scanned_row_key = '' + yrd = self._make_one( + client._table_data_client.bigtable_stub.ReadRows, request) + yrd._response_iterator = iterator + yrd._last_scanned_row_key = '' + rows = [row for row in yrd.read_rows()] + + result = rows[0] + self.assertEqual(result.row_key, self.ROW_KEY) + yrd._row = object() self.assertEqual(yrd.state, yrd.NEW_ROW) @@ -441,6 +508,22 @@ def test__copy_from_current_blank(self): self.assertEqual(chunk.timestamp_micros, TIMESTAMP_MICROS) self.assertEqual(chunk.labels, LABELS) + def test__copy_from_current_empty_chunk(self): + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) + yrd._cell = _PartialCellData() + yrd._cell.qualifier = b'' + chunks = _generate_cell_chunks(['']) + chunk = chunks[0] + yrd._copy_from_current(chunk) + self.assertEqual(chunk.row_key, b'') + self.assertEqual(chunk.family_name.value, '') + self.assertEqual(chunk.qualifier.value, b'') + self.assertEqual(chunk.timestamp_micros, 0) + self.assertEqual(chunk.labels, []) + def test__copy_from_previous_unset(self): client = _Client() client._data_stub = mock.MagicMock() @@ -448,7 +531,7 @@ def test__copy_from_previous_unset(self): yrd = self._make_one(client._data_stub.ReadRows, request) cell = _PartialCellData() yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, '') + self.assertEqual(cell.row_key, b'') self.assertEqual(cell.family_name, u'') self.assertIsNone(cell.qualifier) self.assertEqual(cell.timestamp_micros, 0) @@ -544,6 +627,50 @@ def test_invalid_empty_chunk(self): with self.assertRaises(InvalidChunk): self._consume_all(yrd) + def test_state_cell_in_progress(self): + LABELS = ['L1', 'L2'] + + client = _Client() + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk] + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + yrd = self._make_one(client._data_stub.ReadRows, request) + self._consume_all(yrd) + yrd._last_scanned_row_key = '' + yrd._row = object() + cell = _PartialCellData( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + labels=LABELS + ) + + yrd._cell = cell + more_cell_data = _PartialCellData( + value=self.VALUE + ) + + yrd._validate_cell_data(more_cell_data) + + self.assertEqual(more_cell_data.row_key, self.ROW_KEY) + self.assertEqual(more_cell_data.family_name, self.FAMILY_NAME) + self.assertEqual(more_cell_data.qualifier, self.QUALIFIER) + self.assertEqual(more_cell_data.timestamp_micros, + self.TIMESTAMP_MICROS) + self.assertEqual(more_cell_data.labels, LABELS) + def test_yield_rows_data(self): client = _Client() @@ -848,7 +975,7 @@ def next(self): class _PartialCellData(object): - row_key = '' + row_key = b'' family_name = u'' qualifier = None timestamp_micros = 0 From eb9eb1981d8b83c3514d99b5a8144daefbcbcb45 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Thu, 31 May 2018 19:18:51 +0530 Subject: [PATCH 132/892] BigTable: Add admin app profile methods on Instance (#5315) --- .../google/cloud/bigtable/instance.py | 200 ++++++++++ .../google-cloud-bigtable/tests/system.py | 33 ++ .../tests/unit/test_instance.py | 361 ++++++++++++++++++ 3 files changed, 594 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 8543f88409d4..97b90e1ab9d4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -20,6 +20,8 @@ from google.cloud.bigtable.table import Table from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES +from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable_admin_v2.types import instance_pb2 @@ -27,6 +29,8 @@ _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') +ROUTING_POLICY_TYPE_ANY = 1 +ROUTING_POLICY_TYPE_SINGLE = 2 _STORAGE_TYPE_UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED @@ -282,3 +286,199 @@ def list_tables(self): result.append(self.table(table_id)) return result + + def create_app_profile(self, app_profile_id, routing_policy_type, + description='', ignore_warnings=None, + cluster_id=None, allow_transactional_writes=False): + """Creates an app profile within an instance. + + :type: app_profile_id: str + :param app_profile_id: The unique name for the new app profile. + + :type: routing_policy_type: int + :param: routing_policy_type: There are two routing policies + ROUTING_POLICY_TYPE_ANY = 1 and + ROUTING_POLICY_TYPE_SINGLE = 2. + If ROUTING_POLICY_TYPE_ANY + which will create a + MultiClusterRoutingUseAny policy and if + ROUTING_POLICY_TYPE_ANY is specified, a + SingleClusterRouting policy will be created + using the cluster_id and + allow_transactional_writes parameters. + + :type: description: str + :param: description: (Optional) Long form description of the use + case for this AppProfile. + + :type: ignore_warnings: bool + :param: ignore_warnings: (Optional) If true, ignore safety checks when + creating the app profile. + + :type: cluster_id: str + :param: cluster_id: (Optional) Unique cluster_id which is only required + when routing_policy_type is + ROUTING_POLICY_TYPE_SINGLE. + + :type: allow_transactional_writes: bool + :param: allow_transactional_writes: (Optional) If true, allow + transactional writes for + ROUTING_POLICY_TYPE_SINGLE. + + :rtype: :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + :return: The AppProfile instance. + :raises: :class:`ValueError ` If routing + policy is not set. + """ + if not routing_policy_type: + raise ValueError('AppProfile required routing policy.') + + single_cluster_routing = None + multi_cluster_routing_use_any = None + instance_admin_client = self._client._instance_admin_client + name = instance_admin_client.app_profile_path( + self._client.project, self.instance_id, app_profile_id) + + if routing_policy_type == ROUTING_POLICY_TYPE_ANY: + multi_cluster_routing_use_any = ( + instance_pb2.AppProfile.MultiClusterRoutingUseAny()) + + if routing_policy_type == ROUTING_POLICY_TYPE_SINGLE: + single_cluster_routing = ( + instance_pb2.AppProfile.SingleClusterRouting( + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes + )) + + app_profile = instance_pb2.AppProfile( + name=name, description=description, + multi_cluster_routing_use_any=multi_cluster_routing_use_any, + single_cluster_routing=single_cluster_routing + ) + + return self._client._instance_admin_client.create_app_profile( + parent=self.name, app_profile_id=app_profile_id, + app_profile=app_profile, ignore_warnings=ignore_warnings) + + def get_app_profile(self, app_profile_id): + """Gets information about an app profile. + + :type: app_profile_id: str + :param app_profile_id: The unique name for the app profile. + + :rtype: :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + :return: The AppProfile instance. + """ + instance_admin_client = self._client._instance_admin_client + name = instance_admin_client.app_profile_path( + self._client.project, self.instance_id, app_profile_id) + return self._client._instance_admin_client.get_app_profile(name) + + def list_app_profiles(self): + """Lists information about app profiles in an instance. + + :rtype: :list:[`~google.cloud.bigtable_admin_v2.types.AppProfile`] + :return: A :list:[`~google.cloud.bigtable_admin_v2.types.AppProfile`]. + By default, this is a list of + :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + instances. + """ + list_app_profiles = list( + self._client._instance_admin_client.list_app_profiles(self.name)) + return list_app_profiles + + def update_app_profile(self, app_profile_id, update_mask, + routing_policy_type, description='', + ignore_warnings=None, + cluster_id=None, allow_transactional_writes=False): + """Updates an app profile within an instance. + + :type: app_profile_id: str + :param app_profile_id: The unique name for the new app profile. + + :type: update_mask: list + :param: update_mask: Name of the parameters of AppProfiles that + needed to update. + + :type: routing_policy_type: int + :param: routing_policy_type: There are two routing policies + ROUTING_POLICY_TYPE_ANY = 1 and + ROUTING_POLICY_TYPE_SINGLE = 2. + If ROUTING_POLICY_TYPE_ANY + which will create a + MultiClusterRoutingUseAny policy and if + ROUTING_POLICY_TYPE_ANY is specified, a + SingleClusterRouting policy will be created + using the cluster_id and + allow_transactional_writes parameters. + + :type: description: str + :param: description: (Optional) Optional long form description of the + use case for this AppProfile. + + :type: ignore_warnings: bool + :param: ignore_warnings: (Optional) If true, ignore safety checks when + creating the app profile. + + :type: cluster_id: str + :param: cluster_id: (Optional) Unique cluster_id which is only required + when routing_policy_type is + ROUTING_POLICY_TYPE_SINGLE. + + :type: allow_transactional_writes: bool + :param: allow_transactional_writes: (Optional) If true, allow + transactional writes for + ROUTING_POLICY_TYPE_SINGLE. + + :rtype: :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` + :return: The AppProfile instance. + :raises: :class:`ValueError ` If routing + policy is not set. + """ + if not routing_policy_type: + raise ValueError('AppProfile required routing policy.') + + single_cluster_routing = None + multi_cluster_routing_use_any = None + instance_admin_client = self._client._instance_admin_client + name = instance_admin_client.app_profile_path( + self._client.project, self.instance_id, app_profile_id) + + if routing_policy_type == ROUTING_POLICY_TYPE_ANY: + multi_cluster_routing_use_any = ( + instance_pb2.AppProfile.MultiClusterRoutingUseAny()) + + if routing_policy_type == ROUTING_POLICY_TYPE_SINGLE: + single_cluster_routing = ( + instance_pb2.AppProfile.SingleClusterRouting( + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes + )) + + update_app_profile = instance_pb2.AppProfile( + name=name, description=description, + multi_cluster_routing_use_any=multi_cluster_routing_use_any, + single_cluster_routing=single_cluster_routing + ) + update_mask = field_mask_pb2.FieldMask(paths=update_mask) + + return self._client._instance_admin_client.update_app_profile( + app_profile=update_app_profile, update_mask=update_mask, + ignore_warnings=ignore_warnings) + + def delete_app_profile(self, app_profile_id, ignore_warnings=False): + """Deletes an app profile from an instance. + + :type: app_profile_id: str + :param app_profile_id: The unique name for the app profile to delete. + + :raises: google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. google.api_core.exceptions.RetryError: + If the request failed due to a retryable error and retry + attempts failed. ValueError: If the parameters are invalid. + """ + instance_admin_client = self._client._instance_admin_client + app_profile_path = instance_admin_client.app_profile_path( + self._client.project, self.instance_id, app_profile_id) + self._client._instance_admin_client.delete_app_profile( + app_profile_path, ignore_warnings) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 067bd1b86a2c..b7896d19e294 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -40,6 +40,8 @@ LOCATION_ID = 'us-central1-c' INSTANCE_ID = 'g-c-p' + unique_resource_id('-') TABLE_ID = 'google-cloud-python-test-table' +APP_PROFILE_ID = 'app-profile-id' +CLUSTER_ID = INSTANCE_ID+'-cluster' COLUMN_FAMILY_ID1 = u'col-fam-id1' COLUMN_FAMILY_ID2 = u'col-fam-id2' COL_NAME1 = b'col-name1' @@ -51,6 +53,8 @@ CELL_VAL4 = b'foo' ROW_KEY = b'row-key' ROW_KEY_ALT = b'row-key-alt' +ROUTING_POLICY_TYPE_ANY = 1 +ROUTING_POLICY_TYPE_SINGLE = 2 EXISTING_INSTANCES = [] @@ -174,6 +178,35 @@ def test_update(self): Config.INSTANCE.display_name = OLD_DISPLAY_NAME Config.INSTANCE.update() + def test_create_app_profile_with_multi_routing_policy(self): + # Create a new instance instance and reload it. + description = 'Foo App Profile' + instance = Config.INSTANCE + + app_profile = instance.create_app_profile( + app_profile_id=APP_PROFILE_ID+'-multi', + routing_policy_type=ROUTING_POLICY_TYPE_ANY, + description=description, + ignore_warnings=True + ) + + self.assertEqual(app_profile.description, description) + + def test_create_app_profile_with_single_routing_policy(self): + # Create a new instance instance and reload it. + description = 'Foo App Profile' + instance = Config.INSTANCE + + app_profile = instance.create_app_profile( + app_profile_id=APP_PROFILE_ID+'-single', + routing_policy_type=ROUTING_POLICY_TYPE_SINGLE, + description=description, + cluster_id=CLUSTER_ID, + ignore_warnings=True + ) + + self.assertEqual(app_profile.description, description) + class TestTableAdminAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index b3654ce8091e..46bb52799b20 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -20,6 +20,33 @@ from ._testing import _make_credentials +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + return self.channel_stub.responses.pop() + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + class TestInstance(unittest.TestCase): PROJECT = 'project' @@ -27,6 +54,9 @@ class TestInstance(unittest.TestCase): INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID LOCATION_ID = 'locname' LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID + APP_PROFILE_PATH = ( + 'projects/' + PROJECT + '/instances/' + INSTANCE_ID + + '/appProfiles/') DISPLAY_NAME = 'display_name' OP_ID = 8915 OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % @@ -408,6 +438,330 @@ def test_list_tables_failure_name_bad_before(self): with self.assertRaises(ValueError): self._list_tables_helper(table_name=BAD_TABLE_NAME) + def test_create_app_profile_with_wrong_routing_policy(self): + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + app_profile_id = 'appProfileId1262094415' + update_mask = [] + + # Create AppProfile with exception + with self.assertRaises(ValueError): + instance.create_app_profile(app_profile_id=app_profile_id, + routing_policy_type=None) + + with self.assertRaises(ValueError): + instance.update_app_profile(app_profile_id, + update_mask=update_mask, + routing_policy_type=None) + + def test_create_app_profile_with_multi_routing_policy(self): + from google.cloud.bigtable_admin_v2.proto import instance_pb2 + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + description = 'description-1724546052' + app_profile_id = 'appProfileId1262094415' + expected_response = { + 'name': self.APP_PROFILE_PATH + app_profile_id, + 'description': description, + 'multi_cluster_routing_use_any': + instance_pb2.AppProfile.MultiClusterRoutingUseAny() + } + expected_request = { + 'app_profile_id': app_profile_id, + 'routing_policy_type': 1, + 'description': description + } + expected_response = instance_pb2.AppProfile(**expected_response) + + channel = ChannelStub(responses=[expected_response]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + + # Perform the method and check the result. + result = instance.create_app_profile(**expected_request) + + parent = client._instance_admin_client.instance_path( + self.PROJECT, self.INSTANCE_ID) + expected_request = _CreateAppProfileRequestPB( + parent=parent, app_profile_id=app_profile_id, + app_profile=expected_response, + ) + + actual_request = channel.requests[0][1] + assert expected_request == actual_request + self.assertEqual(result, expected_response) + + def test_create_app_profile_with_single_routing_policy(self): + from google.cloud.bigtable_admin_v2.proto import instance_pb2 + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + description = 'description-1724546052' + app_profile_id = 'appProfileId1262094415' + cluster_id = 'cluster-id' + expected_response = { + 'name': self.APP_PROFILE_PATH + app_profile_id, + 'description': description, + 'single_cluster_routing': + instance_pb2.AppProfile.SingleClusterRouting( + cluster_id=cluster_id, + allow_transactional_writes=False + ) + } + expected_request = { + 'app_profile_id': app_profile_id, + 'routing_policy_type': 2, + 'description': description, + 'cluster_id': cluster_id + } + expected_response = instance_pb2.AppProfile(**expected_response) + + channel = ChannelStub(responses=[expected_response]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + + # Perform the method and check the result. + result = instance.create_app_profile(**expected_request) + + parent = client._instance_admin_client.instance_path( + self.PROJECT, self.INSTANCE_ID) + expected_request = _CreateAppProfileRequestPB( + parent=parent, app_profile_id=app_profile_id, + app_profile=expected_response, + ) + + actual_request = channel.requests[0][1] + assert expected_request == actual_request + self.assertEqual(result, expected_response) + + def test_get_app_profile(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as instance_data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + name = 'name3373707' + etag = 'etag3123477' + description = 'description-1724546052' + expected_response = { + 'name': name, + 'etag': etag, + 'description': description + } + expected_response = instance_data_v2_pb2.AppProfile( + **expected_response) + + response_pb = instance_data_v2_pb2.AppProfile( + name=name, + etag=etag, + description=description + ) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + bigtable_instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + bigtable_instance_stub.GetAppProfile.side_effect = [response_pb] + + # Perform the method and check the result. + app_profile_id = 'appProfileId1262094415' + result = instance.get_app_profile(app_profile_id=app_profile_id) + + self.assertEqual(result, expected_response) + + def test_list_app_profiles(self): + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as instance_messages_v1_pb2) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Setup Expected Response + next_page_token = '' + app_profiles_element = {} + app_profiles = [app_profiles_element] + expected_response = { + 'next_page_token': next_page_token, + 'app_profiles': app_profiles + } + expected_response = instance_messages_v1_pb2.ListAppProfilesResponse( + **expected_response) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + bigtable_instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + bigtable_instance_stub.ListAppProfiles.side_effect = [ + expected_response] + + # Perform the method and check the result. + response = instance.list_app_profiles() + + self.assertEqual(response[0], expected_response.app_profiles[0]) + + def test_update_app_profile(self): + import datetime + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud._helpers import _datetime_to_pb_timestamp + from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create response_pb + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + type_url = 'type.googleapis.com/%s' % ( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString(), + ) + ) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + stub = _FakeStub(response_pb) + client._instance_admin_client.bigtable_instance_admin_stub = stub + update_mask = [] + + # Perform the method and check the result. + app_profile_id = 'appProfileId1262094415' + result = instance.update_app_profile(app_profile_id, + update_mask=update_mask, + routing_policy_type=1) + + self.assertIsInstance(result, operation.Operation) + + def test_update_app_profile_with_single_routing_policy(self): + import datetime + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud._helpers import _datetime_to_pb_timestamp + from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Create response_pb + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + type_url = 'type.googleapis.com/%s' % ( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString(), + ) + ) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + stub = _FakeStub(response_pb) + client._instance_admin_client.bigtable_instance_admin_stub = stub + update_mask = [] + + # Perform the method and check the result. + app_profile_id = 'appProfileId1262094415' + cluster_id = 'cluster-id' + result = instance.update_app_profile(app_profile_id, + update_mask=update_mask, + routing_policy_type=2, + cluster_id=cluster_id) + + self.assertIsInstance(result, operation.Operation) + + def test_delete_app_profile(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + + ignore_warnings = True + + expected_result = None # delete() has no return value. + + app_profile_id = 'appProfileId1262094415' + result = instance.delete_app_profile(app_profile_id, ignore_warnings) + + self.assertEqual(expected_result, result) + class _Client(object): @@ -419,3 +773,10 @@ def __init__(self, project): def __eq__(self, other): return (other.project == self.project and other.project_name == self.project_name) + + +def _CreateAppProfileRequestPB(*args, **kw): + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as instance_v2_pb2) + + return instance_v2_pb2.CreateAppProfileRequest(*args, **kw) From a858f34de1bff68f7cfca83f46d3fb683da278fb Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 31 May 2018 13:17:46 -0400 Subject: [PATCH 133/892] Pass through 'session.posargs' when running Bigtable system tests. (#5418) While we're add it, use a develop install in system tests. --- packages/google-cloud-bigtable/nox.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index fe446a4a3a81..2abcce3ae2a5 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -88,10 +88,10 @@ def system(session, py): # virtualenv's dist-packages. session.install('mock', 'pytest', *LOCAL_DEPS) session.install('../test_utils/') - session.install('.') + session.install('-e', '.') # Run py.test against the system tests. - session.run('py.test', '--quiet', 'tests/system.py') + session.run('py.test', '--quiet', 'tests/system.py', *session.posargs) @nox.session From 12ff12db2ad1f910265847df396540b291068bff Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Thu, 31 May 2018 23:06:42 +0530 Subject: [PATCH 134/892] BigTable: use client properties rather than private attrs (#5398) --- .../google/cloud/bigtable/column_family.py | 6 +-- .../google/cloud/bigtable/instance.py | 16 +++---- .../google/cloud/bigtable/row.py | 11 +++-- .../google/cloud/bigtable/table.py | 47 +++++++++---------- 4 files changed, 40 insertions(+), 40 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index 3e2f1cae818e..c0c5c47d6da3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -228,7 +228,7 @@ def create(self): client = self._table._instance._client # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_admin_client.modify_column_families( + client.table_admin_client.modify_column_families( self._table.name, [modification]) def update(self): @@ -247,7 +247,7 @@ def update(self): client = self._table._instance._client # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_admin_client.modify_column_families( + client.table_admin_client.modify_column_families( self._table.name, [modification]) def delete(self): @@ -259,7 +259,7 @@ def delete(self): client = self._table._instance._client # data it contains are the GC rule and the column family ID already # stored on this instance. - client._table_admin_client.modify_column_families( + client.table_admin_client.modify_column_families( self._table.name, [modification]) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 97b90e1ab9d4..67177c040b6c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -145,7 +145,7 @@ def name(self): :rtype: str :returns: Return a fully-qualified instance string. """ - return self._client._instance_admin_client.instance_path( + return self._client.instance_admin_client.instance_path( project=self._client.project, instance=self.instance_id) def __eq__(self, other): @@ -165,7 +165,7 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this instance.""" - instance_pb = self._client._instance_admin_client.get_instance( + instance_pb = self._client.instance_admin_client.get_instance( self.name) # NOTE: _update_from_pb does not check that the project and @@ -194,9 +194,9 @@ def create(self): """ clusters = {} cluster_id = '{}-cluster'.format(self.instance_id) - cluster_name = self._client._instance_admin_client.cluster_path( + cluster_name = self._client.instance_admin_client.cluster_path( self._client.project, self.instance_id, cluster_id) - location = self._client._instance_admin_client.location_path( + location = self._client.instance_admin_client.location_path( self._client.project, self._cluster_location_id) cluster = instance_pb2.Cluster( name=cluster_name, location=location, @@ -208,7 +208,7 @@ def create(self): clusters[cluster_id] = cluster parent = self._client.project_path - return self._client._instance_admin_client.create_instance( + return self._client.instance_admin_client.create_instance( parent=parent, instance_id=self.instance_id, instance=instance, clusters=clusters) @@ -227,7 +227,7 @@ def update(self): before calling :meth:`update`. """ type = enums.Instance.Type.TYPE_UNSPECIFIED - self._client._instance_admin_client.update_instance( + self._client.instance_admin_client.update_instance( name=self.name, display_name=self.display_name, type_=type, labels={}) @@ -253,7 +253,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - self._client._instance_admin_client.delete_instance(name=self.name) + self._client.instance_admin_client.delete_instance(name=self.name) def table(self, table_id): """Factory to create a table associated with this instance. @@ -274,7 +274,7 @@ def list_tables(self): :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. """ - table_list_pb = self._client._table_admin_client.list_tables(self.name) + table_list_pb = self._client.table_admin_client.list_tables(self.name) result = [] for table_pb in table_list_pb: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 73803801249f..f900e42d4720 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -417,8 +417,9 @@ def commit(self): raise ValueError('%d total mutations exceed the maximum allowable ' '%d.' % (num_mutations, MAX_MUTATIONS)) + data_client = self._table._instance._client.table_data_client commit = functools.partial( - self._table._instance._client._table_data_client.mutate_row, + data_client.mutate_row, self._table.name, self._row_key, mutations_list) retry_ = retry.Retry( predicate=_retry_commit_exception, @@ -532,8 +533,8 @@ def commit(self): 'mutations and %d false mutations.' % ( MAX_MUTATIONS, num_true_mutations, num_false_mutations)) - client = self._table._instance._client - resp = client._table_data_client.check_and_mutate_row( + data_client = self._table._instance._client.table_data_client + resp = data_client.check_and_mutate_row( table_name=self._table.name, row_key=self._row_key,) self.clear() return resp[0].predicate_matched @@ -815,8 +816,8 @@ def commit(self): raise ValueError('%d total append mutations exceed the maximum ' 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) - client = self._table._instance._client - row_response = client._table_data_client.read_modify_write_row( + data_client = self._table._instance._client.table_data_client + row_response = data_client.read_modify_write_row( table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 22e889588d32..0e37fc28d77a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -112,7 +112,8 @@ def name(self): """ project = self._instance._client.project instance_id = self._instance.instance_id - return self._instance._client._table_admin_client.table_path( + table_client = self._instance._client.table_admin_client + return table_client.table_path( project=project, instance=instance_id, table=self.table_id) def column_family(self, column_family_id, gc_rule=None): @@ -182,16 +183,15 @@ def create(self): :class:`._generated.table_pb2.Table` but we don't use this response. """ - client = self._instance._client + table_client = self._instance._client.table_admin_client instance_name = self._instance.name - client._table_admin_client.create_table(parent=instance_name, - table_id=self.table_id, - table={}) + table_client.create_table( + parent=instance_name, table_id=self.table_id, table={}) def delete(self): """Delete this table.""" - client = self._instance._client - client._table_admin_client.delete_table(name=self.name) + table_client = self._instance._client.table_admin_client + table_client.delete_table(name=self.name) def list_column_families(self): """List the column families owned by this table. @@ -204,8 +204,8 @@ def list_column_families(self): family name from the response does not agree with the computed name from the column family ID. """ - client = self._instance._client - table_pb = client._table_admin_client.get_table(self.name) + table_client = self._instance._client.table_admin_client + table_pb = table_client.get_table(self.name) result = {} for column_family_id, value_pb in table_pb.column_families.items(): @@ -234,9 +234,8 @@ def read_row(self, row_key, filter_=None): request_pb = _create_row_request( self.name, row_key=row_key, filter_=filter_, app_profile_id=self._app_profile_id) - client = self._instance._client - rows_data = PartialRowsData(client._table_data_client._read_rows, - request_pb) + data_client = self._instance._client.table_data_client + rows_data = PartialRowsData(data_client._read_rows, request_pb) rows_data.consume_all() if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): @@ -280,12 +279,11 @@ def read_rows(self, start_key=None, end_key=None, limit=None, the streamed results. """ request_pb = _create_row_request( - self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit, end_inclusive=end_inclusive, + self.name, start_key=start_key, end_key=end_key, + filter_=filter_, limit=limit, end_inclusive=end_inclusive, app_profile_id=self._app_profile_id) - client = self._instance._client - return PartialRowsData(client._table_data_client._read_rows, - request_pb) + data_client = self._instance._client.table_data_client + return PartialRowsData(data_client._read_rows, request_pb) def yield_rows(self, start_key=None, end_key=None, limit=None, filter_=None): @@ -317,9 +315,9 @@ def yield_rows(self, start_key=None, end_key=None, limit=None, request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit, app_profile_id=self._app_profile_id) - client = self._instance._client - generator = YieldRowsData(client._table_data_client._read_rows, - request_pb) + data_client = self._instance._client.table_data_client + generator = YieldRowsData(data_client._read_rows, request_pb) + for row in generator.read_rows(): yield row @@ -388,9 +386,10 @@ def sample_row_keys(self): or by casting to a :class:`list` and can be cancelled by calling ``cancel()``. """ - client = self._instance._client - response_iterator = client._table_data_client.sample_row_keys( + data_client = self._instance._client.table_data_client + response_iterator = data_client.sample_row_keys( self.name, app_profile_id=self._app_profile_id) + return response_iterator @@ -478,8 +477,8 @@ def _do_mutate_retryable_rows(self): mutate_rows_request = _mutate_rows_request( self.table_name, retryable_rows, app_profile_id=self.app_profile_id) - responses = self.client._table_data_client._mutate_rows( - mutate_rows_request, retry=None) + data_client = self.client.table_data_client + responses = data_client._mutate_rows(mutate_rows_request, retry=None) num_responses = 0 num_retryable_responses = 0 From b5e1c0d77f014c9c576b2df48d478f15ac257711 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 7 Jun 2018 13:08:57 -0400 Subject: [PATCH 135/892] Avoid sharing table names across unrelated systests. (#5421) --- .../google-cloud-bigtable/tests/system.py | 30 ++++--------------- 1 file changed, 5 insertions(+), 25 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index b7896d19e294..9e8329bb226b 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -226,18 +226,14 @@ def tearDown(self): for table in self.tables_to_delete: table.delete() - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_list_tables(self): # Since `Config.INSTANCE` is newly created in `setUpModule`, the table # created in `setUpClass` here will be the only one. tables = Config.INSTANCE.list_tables() self.assertEqual(tables, [self._table]) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_create_table(self): - temp_table_id = 'foo-bar-baz-table' + temp_table_id = 'test-create-table' temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -252,10 +248,8 @@ def test_create_table(self): sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_create_column_family(self): - temp_table_id = 'foo-bar-baz-table' + temp_table_id = 'test-create-column-family' temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -275,10 +269,8 @@ def test_create_column_family(self): column_family.column_family_id) self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_update_column_family(self): - temp_table_id = 'foo-bar-baz-table' + temp_table_id = 'test-update-column-family' temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -300,10 +292,8 @@ def test_update_column_family(self): col_fams = temp_table.list_column_families() self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_delete_column_family(self): - temp_table_id = 'foo-bar-baz-table' + temp_table_id = 'test-delete-column-family' temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -325,7 +315,7 @@ class TestDataAPI(unittest.TestCase): @classmethod def setUpClass(cls): - cls._table = table = Config.INSTANCE.table(TABLE_ID) + cls._table = table = Config.INSTANCE.table('test-data-api') table.create() table.column_family(COLUMN_FAMILY_ID1).create() table.column_family(COLUMN_FAMILY_ID2).create() @@ -385,8 +375,6 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): cell4 = Cell(CELL_VAL4, timestamp4_micros) return cell1, cell2, cell3, cell4 - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_timestamp_filter_millisecond_granularity(self): from google.cloud.bigtable import row_filters @@ -397,8 +385,6 @@ def test_timestamp_filter_millisecond_granularity(self): row_data = self._table.read_rows(filter_=timefilter) row_data.consume_all() - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_mutate_rows(self): row1 = self._table.row(ROW_KEY) row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) @@ -445,8 +431,6 @@ def test_read_large_cell_limit(self): self.assertEqual(len(column), 1) self.assertEqual(column[0].value, data) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_read_row(self): row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) @@ -471,8 +455,6 @@ def test_read_row(self): } self.assertEqual(partial_row_data.cells, expected_row_contents) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_read_rows(self): row = self._table.row(ROW_KEY) row_alt = self._table.row(ROW_KEY_ALT) @@ -518,8 +500,6 @@ def test_read_rows(self): } self.assertEqual(rows_data.rows, expected_rows) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_read_with_label_applied(self): self._maybe_emulator_skip('Labels not supported by Bigtable emulator') row = self._table.row(ROW_KEY) From bd29c92fd45166bc517b8f5af5decb1e505da053 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 7 Jun 2018 13:28:29 -0400 Subject: [PATCH 136/892] Make 'Client.list_instances' return actual instance objects, not protos. (#5420) --- .../google/cloud/bigtable/client.py | 13 ++++++++++--- packages/google-cloud-bigtable/tests/system.py | 18 +++++++----------- .../tests/unit/test_client.py | 18 ++++++++++++++---- 3 files changed, 31 insertions(+), 18 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 7d509170f8b9..b81ba6dd7d7e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -222,7 +222,14 @@ def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, def list_instances(self): """List instances owned by the project. - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: A list of Instance. + :rtype: tuple + :returns: + (instances, failed_locations), where 'instances' is list of + :class:`google.cloud.bigtable.instance.Instance`, and + 'failed_locations' is a list of locations which could not + be resolved. """ - return self.instance_admin_client.list_instances(self.project_path) + resp = self.instance_admin_client.list_instances(self.project_path) + instances = [ + Instance.from_pb(instance, self) for instance in resp.instances] + return instances, resp.failed_locations diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 9e8329bb226b..2a1bbdc7d306 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -91,13 +91,12 @@ def setUpModule(): if not Config.IN_EMULATOR: retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) + instances, failed_locations = retry(Config.CLIENT.list_instances)() - instances_response = retry(Config.CLIENT.list_instances)() - - if len(instances_response.failed_locations) != 0: + if len(failed_locations) != 0: raise ValueError('List instances failed in module set up.') - EXISTING_INSTANCES[:] = instances_response.instances + EXISTING_INSTANCES[:] = instances # After listing, create the test instance. created_op = Config.INSTANCE.create() @@ -121,15 +120,12 @@ def tearDown(self): for instance in self.instances_to_delete: instance.delete() - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_list_instances(self): - instances_response = Config.CLIENT.list_instances() - self.assertEqual(instances_response.failed_locations, []) + instances, failed_locations = Config.CLIENT.list_instances() + self.assertEqual(failed_locations, []) # We have added one new instance in `setUpModule`. - self.assertEqual(len(instances_response.instances), - len(EXISTING_INSTANCES) + 1) - for instance in instances_response.instances: + self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) + for instance in instances: instance_existence = (instance in EXISTING_INSTANCES or instance == Config.INSTANCE) self.assertTrue(instance_existence) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 8efbbbba4cdf..5a2db9e830e4 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -169,6 +169,7 @@ def test_list_instances(self): bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud.bigtable_admin_v2.gapic import \ bigtable_instance_admin_client + from google.cloud.bigtable.instance import Instance FAILED_LOCATION = 'FAILED' INSTANCE_ID1 = 'instance-id1' @@ -201,8 +202,6 @@ def test_list_instances(self): ], ) - expected_result = response_pb - # Patch the stub used by the API method. client._instance_admin_client = api bigtable_instance_stub = ( @@ -210,5 +209,16 @@ def test_list_instances(self): bigtable_instance_stub.ListInstances.side_effect = [response_pb] # Perform the method and check the result. - response = client.list_instances() - self.assertEqual(response, expected_result) + instances, failed_locations = client.list_instances() + + instance_1, instance_2 = instances + + self.assertIsInstance(instance_1, Instance) + self.assertEqual(instance_1.name, INSTANCE_NAME1) + self.assertTrue(instance_1._client is client) + + self.assertIsInstance(instance_2, Instance) + self.assertEqual(instance_2.name, INSTANCE_NAME2) + self.assertTrue(instance_2._client is client) + + self.assertEqual(failed_locations, [FAILED_LOCATION]) From a6617f85be4f8fd54e2861e9d2e49f7f4939a422 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Mon, 11 Jun 2018 23:59:48 +0530 Subject: [PATCH 137/892] BigTable: Add truncate table and drop by prefix on top of GAPIC integration (#5360) --- .../google/cloud/bigtable/table.py | 48 ++++++ .../google-cloud-bigtable/tests/system.py | 43 +++++ .../tests/unit/test_table.py | 150 +++++++++++++++--- 3 files changed, 217 insertions(+), 24 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 0e37fc28d77a..773dcf0b1a77 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -392,6 +392,54 @@ def sample_row_keys(self): return response_iterator + def truncate(self, timeout=None): + """Truncate the table + + :type timeout: float + :param timeout: (Optional) The amount of time, in seconds, to wait + for the request to complete. + + :raise: google.api_core.exceptions.GoogleAPICallError: If the + request failed for any reason. + google.api_core.exceptions.RetryError: If the request failed + due to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + client = self._instance._client + table_admin_client = client.table_admin_client + if timeout: + table_admin_client.drop_row_range( + self.name, delete_all_data_from_table=True, timeout=timeout) + else: + table_admin_client.drop_row_range( + self.name, delete_all_data_from_table=True) + + def drop_by_prefix(self, row_key_prefix, timeout=None): + """ + :type row_prefix: bytes + :param row_prefix: Delete all rows that start with this row key + prefix. Prefix cannot be zero length. + + :type timeout: float + :param timeout: (Optional) The amount of time, in seconds, to wait + for the request to complete. + + :raise: google.api_core.exceptions.GoogleAPICallError: If the + request failed for any reason. + google.api_core.exceptions.RetryError: If the request failed + due to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + client = self._instance._client + table_admin_client = client.table_admin_client + if timeout: + table_admin_client.drop_row_range( + self.name, row_key_prefix=_to_bytes(row_key_prefix), + timeout=timeout) + else: + table_admin_client.drop_row_range( + self.name, row_key_prefix=_to_bytes(row_key_prefix)) + class _RetryableMutateRowsWorker(object): """A callable worker that can retry to mutate rows with transient errors. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 2a1bbdc7d306..9ff868404c07 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -408,6 +408,49 @@ def test_mutate_rows(self): self.assertEqual( row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4) + def test_truncate_table(self): + row_keys = [ + b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', + b'row_key_5', b'row_key_pr_1', b'row_key_pr_2', b'row_key_pr_3', + b'row_key_pr_4', b'row_key_pr_5'] + + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + self.rows_to_delete.append(row) + + self._table.truncate(timeout=200) + + read_rows = self._table.yield_rows() + + for row in read_rows: + self.assertNotIn(row.row_key.decode('utf-8'), row_keys) + + def test_drop_by_prefix_table(self): + row_keys = [ + b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', + b'row_key_5', b'row_key_pr_1', b'row_key_pr_2', b'row_key_pr_3', + b'row_key_pr_4', b'row_key_pr_5'] + + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + self.rows_to_delete.append(row) + + self._table.drop_by_prefix(row_key_prefix='row_key_pr', timeout=200) + + read_rows = self._table.yield_rows() + expected_rows_count = 5 + read_rows_count = 0 + + for row in read_rows: + if row.row_key.decode('utf-8') in row_keys: + read_rows_count += 1 + + self.assertEqual(expected_rows_count, read_rows_count) + @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" "google-cloud-python/issues/5362") def test_read_large_cell_limit(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 485d22278033..84d48b03a892 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -258,11 +258,10 @@ def _create_test_helper(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client, bigtable_table_admin_client) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) @@ -287,8 +286,8 @@ def test_delete(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) - api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) @@ -296,7 +295,7 @@ def test_delete(self): table = self._make_one(self.TABLE_ID, instance) # Patch API calls - client._table_admin_client = api + client._table_admin_client = table_api # Create expected_result. expected_result = None # delete() has no return value. @@ -309,7 +308,7 @@ def _list_column_families_helper(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) - api = bigtable_table_admin_client.BigtableTableAdminClient( + table_api = bigtable_table_admin_client.BigtableTableAdminClient( mock.Mock()) credentials = _make_credentials() client = self._make_client(project='project-id', @@ -325,7 +324,7 @@ def _list_column_families_helper(self): ) # Patch the stub used by the API method. - client._table_admin_client = api + client._table_admin_client = table_api bigtable_table_stub = ( client._table_admin_client.bigtable_table_admin_stub) bigtable_table_stub.GetTable.side_effect = [response_pb] @@ -350,8 +349,8 @@ def _read_row_helper(self, chunks, expected_result, app_profile_id=None): bigtable_table_admin_client) data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) @@ -436,13 +435,13 @@ def test_mutate_rows(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) - api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) - client._table_admin_client = api + client._table_admin_client = table_api table = self._make_one(self.TABLE_ID, instance) response = [Status(code=0), Status(code=1)] @@ -466,8 +465,8 @@ def test_read_rows(self): bigtable_table_admin_client) data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) @@ -596,6 +595,109 @@ def test_sample_row_keys(self): result = table.sample_row_keys() self.assertEqual(result[0], expected_result) + def test_truncate(self): + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = mock.create_autospec(bigtable_client.BigtableClient) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + table.name.return_value = client._table_data_client.table_path( + self.PROJECT_ID, self.INSTANCE_ID, self.TABLE_ID) + + expected_result = None # truncate() has no return value. + with mock.patch('google.cloud.bigtable.table.Table.name', + new=self.TABLE_NAME): + result = table.truncate() + + table_api.drop_row_range.assert_called_once_with( + name=self.TABLE_NAME, + delete_all_data_from_table=True, + ) + + self.assertEqual(result, expected_result) + + def test_truncate_w_timeout(self): + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = mock.create_autospec(bigtable_client.BigtableClient) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + expected_result = None # truncate() has no return value. + + timeout = 120 + result = table.truncate(timeout=timeout) + + self.assertEqual(result, expected_result) + + def test_drop_by_prefix(self): + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = mock.create_autospec(bigtable_client.BigtableClient) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + expected_result = None # drop_by_prefix() has no return value. + + row_key_prefix = 'row-key-prefix' + + result = table.drop_by_prefix(row_key_prefix=row_key_prefix) + + self.assertEqual(result, expected_result) + + def test_drop_by_prefix_w_timeout(self): + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + + data_api = mock.create_autospec(bigtable_client.BigtableClient) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + expected_result = None # drop_by_prefix() has no return value. + + row_key_prefix = 'row-key-prefix' + + timeout = 120 + result = table.drop_by_prefix(row_key_prefix=row_key_prefix, + timeout=timeout) + + self.assertEqual(result, expected_result) + class Test__RetryableMutateRowsWorker(unittest.TestCase): from grpc import StatusCode @@ -660,9 +762,9 @@ def test_callable_empty_rows(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + data_api = mock.create_autospec(bigtable_client.BigtableClient) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) @@ -841,8 +943,8 @@ def test_do_mutate_retryable_rows_empty_rows(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) @@ -1092,8 +1194,8 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): # Expectation: # - After second try: [success, non-retryable] - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) From 54207a2e76249b51795ae312db683c52909de771 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 12 Jun 2018 12:09:00 -0400 Subject: [PATCH 138/892] Fix Py3 breakage in new system test. (#5474) Added in PR #5360. --- packages/google-cloud-bigtable/tests/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 9ff868404c07..2dd9d050c02b 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -446,7 +446,7 @@ def test_drop_by_prefix_table(self): read_rows_count = 0 for row in read_rows: - if row.row_key.decode('utf-8') in row_keys: + if row.row_key in row_keys: read_rows_count += 1 self.assertEqual(expected_rows_count, read_rows_count) From c5bc1bff4cb7f6e5de868b632a622a1705c5bb08 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 12 Jun 2018 15:38:06 -0400 Subject: [PATCH 139/892] Harden 'test_list_instances' against simultaneous test runs. (#5476) --- packages/google-cloud-bigtable/tests/system.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 2dd9d050c02b..8f48a30af110 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -121,14 +121,14 @@ def tearDown(self): instance.delete() def test_list_instances(self): + expected = set([instance.name for instance in EXISTING_INSTANCES]) + expected.add(Config.INSTANCE.name) + instances, failed_locations = Config.CLIENT.list_instances() + self.assertEqual(failed_locations, []) - # We have added one new instance in `setUpModule`. - self.assertEqual(len(instances), len(EXISTING_INSTANCES) + 1) - for instance in instances: - instance_existence = (instance in EXISTING_INSTANCES or - instance == Config.INSTANCE) - self.assertTrue(instance_existence) + found = set([instance.name for instance in instances]) + self.assertTrue(expected.issubset(found)) def test_reload(self): # Use same arguments as Config.INSTANCE (created in `setUpModule`) From 34d91f76fb4ec588918a083301274c65f6fe0900 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 15 Jun 2018 16:51:18 -0400 Subject: [PATCH 140/892] Override gRPC max message lengths. (#5498) Remove 'xfail' for the large cell test which needs the override. Closes #5362. * Lint. --- .../google/cloud/bigtable_v2/gapic/bigtable_client.py | 4 ++++ packages/google-cloud-bigtable/tests/system.py | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 60eb063290b7..5172fb50ee79 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -97,6 +97,10 @@ def __init__(self, self.SERVICE_ADDRESS, credentials=credentials, scopes=self._DEFAULT_SCOPES, + options={ + 'grpc.max_send_message_length': -1, + 'grpc.max_receive_message_length': -1, + }.items(), ) # Create the gRPC stubs. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 8f48a30af110..47a8095f19a7 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -35,8 +35,6 @@ from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id -import pytest - LOCATION_ID = 'us-central1-c' INSTANCE_ID = 'g-c-p' + unique_resource_id('-') TABLE_ID = 'google-cloud-python-test-table' @@ -451,8 +449,6 @@ def test_drop_by_prefix_table(self): self.assertEqual(expected_rows_count, read_rows_count) - @pytest.mark.xfail(reason="https://github.com/GoogleCloudPlatform/" - "google-cloud-python/issues/5362") def test_read_large_cell_limit(self): row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) From e6cc4cbb2bf6ec83f2f483133a4f30a7dd69d14b Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Wed, 27 Jun 2018 13:12:16 -0400 Subject: [PATCH 141/892] Bigtable: Improve testing of create instance (#5544) Adds checks to make sure that the create_instance requests are as expected. --- .../tests/unit/test_instance.py | 80 ++++++++++++++++--- 1 file changed, 67 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 46bb52799b20..bead53c280a1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -268,14 +268,13 @@ def test_create(self): from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp - from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) @@ -295,13 +294,26 @@ def test_create(self): ) # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._instance_admin_client = api - client._instance_admin_client.bigtable_instance_admin_stub = stub + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api # Perform the method and check the result. result = instance.create() + actual_request = channel.requests[0][1] + + cluster_id = '{}-cluster'.format(self.INSTANCE_ID) + cluster = self._create_cluster( + instance_api, cluster_id, self.LOCATION_ID, DEFAULT_SERVE_NODES, + enums.StorageType.STORAGE_TYPE_UNSPECIFIED) + expected_request = self._create_instance_request( + self.DISPLAY_NAME, + {cluster_id: cluster} + ) + self.assertEqual(expected_request, actual_request) self.assertIsInstance(result, operation.Operation) # self.assertEqual(result.operation.name, self.OP_NAME) self.assertIsInstance(result.metadata, @@ -310,30 +322,72 @@ def test_create(self): def test_create_w_explicit_serve_nodes(self): from google.api_core import operation from google.longrunning import operations_pb2 - from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + serve_nodes = 10 credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one( + self.INSTANCE_ID, client, location_id=self.LOCATION_ID, + display_name=self.DISPLAY_NAME, serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.SSD) # Create response_pb response_pb = operations_pb2.Operation(name=self.OP_NAME) # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._instance_admin_client = api - client._instance_admin_client.bigtable_instance_admin_stub = stub + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api # Perform the method and check the result. result = instance.create() + actual_request = channel.requests[0][1] + cluster_id = '{}-cluster'.format(self.INSTANCE_ID) + cluster = self._create_cluster( + instance_api, cluster_id, self.LOCATION_ID, serve_nodes, + enums.StorageType.SSD) + + expected_request = self._create_instance_request( + self.DISPLAY_NAME, + {cluster_id: cluster} + ) + self.assertEqual(expected_request, actual_request) self.assertIsInstance(result, operation.Operation) + def _create_cluster(self, instance_api, cluster_id, location_id, + server_nodes, storage_type): + from google.cloud.bigtable_admin_v2.types import instance_pb2 + + cluster_name = instance_api.cluster_path( + self.PROJECT, self.INSTANCE_ID, cluster_id) + location = instance_api.location_path( + self.PROJECT, location_id) + return instance_pb2.Cluster( + name=cluster_name, location=location, + serve_nodes=server_nodes, + default_storage_type=storage_type) + + def _create_instance_request(self, display_name, clusters): + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.types import instance_pb2 + + instance = instance_pb2.Instance(display_name=display_name) + + return messages_v2_pb2.CreateInstanceRequest( + parent='projects/%s' % (self.PROJECT), + instance_id=self.INSTANCE_ID, + instance=instance, + clusters=clusters + ) + def test_update(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) From 36eb774aaa705582448ca192d57a22409e82c488 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Wed, 27 Jun 2018 14:34:26 -0400 Subject: [PATCH 142/892] Bigtable: Instance creation cleanup. (#5542) A Cloud Bigtable instance is a collection of clusters. Each cluster has a node count and a location. The python Cloud Bigtable client needs a bit of refactoring to better represent the distinction between instances and clusters. Long term, 'Instance.create()' should take in a complete set of variables required for creating an [instance](https://github.com/googleapis/googleapis/blob/c89b6330b6386298f8ea65e47e0b77b28294e3e7/google/bigtable/admin/v2/instance.proto#L34) and a collection of [clusters](https://github.com/googleapis/googleapis/blob/c89b6330b6386298f8ea65e47e0b77b28294e3e7/google/bigtable/admin/v2/instance.proto#L104). --- .../google/cloud/bigtable/client.py | 11 +-- .../google/cloud/bigtable/instance.py | 58 ++++++++-------- .../google-cloud-bigtable/tests/system.py | 14 ++-- .../tests/unit/test_client.py | 5 +- .../tests/unit/test_instance.py | 69 +++++++++---------- 5 files changed, 70 insertions(+), 87 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index b81ba6dd7d7e..6c4178e1daad 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -36,7 +36,6 @@ from google.cloud.bigtable import __version__ from google.cloud.bigtable.instance import Instance -from google.cloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID from google.cloud.client import ClientWithProject @@ -196,18 +195,12 @@ def instance_admin_client(self): return self._instance_admin_client - def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, - display_name=None): + def instance(self, instance_id, display_name=None): """Factory to create a instance associated with this client. :type instance_id: str :param instance_id: The ID of the instance. - :type location: str - :param location: location name, in form - ``projects//locations/``; used to - set up the instance's cluster. - :type display_name: str :param display_name: (Optional) The display name for the instance in the Cloud Console UI. (Must be between 4 and 30 @@ -217,7 +210,7 @@ def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, :rtype: :class:`~google.cloud.bigtable.instance.Instance` :returns: an instance owned by this client. """ - return Instance(instance_id, self, location, display_name=display_name) + return Instance(instance_id, self, display_name=display_name) def list_instances(self): """List instances owned by the project. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 67177c040b6c..37c0a96887dc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -56,41 +56,17 @@ class Instance(object): :param client: The client that owns the instance. Provides authorization and a project ID. - :type location_id: str - :param location_id: ID of the location in which the instance will be - created. Required for instances which do not yet - exist. - :type display_name: str :param display_name: (Optional) The display name for the instance in the Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. - - :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the instance's - cluster; used to set up the instance's cluster. - - :type default_storage_type: int - :param default_storage_type: (Optional) The default values are - STORAGE_TYPE_UNSPECIFIED = 0: The user did - not specify a storage type. - SSD = 1: Flash (SSD) storage should be - used. - HDD = 2: Magnetic drive (HDD) storage - should be used. """ - def __init__(self, instance_id, client, - location_id=_EXISTING_INSTANCE_LOCATION_ID, - display_name=None, serve_nodes=DEFAULT_SERVE_NODES, - default_storage_type=_STORAGE_TYPE_UNSPECIFIED): + def __init__(self, instance_id, client, display_name=None): self.instance_id = instance_id self.display_name = display_name or instance_id - self._cluster_location_id = location_id - self._cluster_serve_nodes = serve_nodes self._client = client - self._default_storage_type = default_storage_type @classmethod def from_pb(cls, instance_pb, client): @@ -119,7 +95,8 @@ def from_pb(cls, instance_pb, client): 'project ID on the client') instance_id = match.group('instance_id') - result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID) + result = cls(instance_id, client, + display_name=instance_pb.display_name) return result def _update_from_pb(self, instance_pb): @@ -172,7 +149,9 @@ def reload(self): # instance ID on the response match the request. self._update_from_pb(instance_pb) - def create(self): + def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, + serve_nodes=DEFAULT_SERVE_NODES, + default_storage_type=_STORAGE_TYPE_UNSPECIFIED): """Create this instance. .. note:: @@ -188,6 +167,25 @@ def create(self): before calling :meth:`create`. + :type location_id: str + :param location_id: ID of the location in which the instance will be + created. Required for instances which do not yet + exist. + + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + + :type default_storage_type: int + :param default_storage_type: (Optional) The default values are + STORAGE_TYPE_UNSPECIFIED = 0: The user + did not specify a storage type. + SSD = 1: Flash (SSD) storage should be + used. + HDD = 2: Magnetic drive (HDD) storage + should be used. + :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create operation. @@ -197,11 +195,11 @@ def create(self): cluster_name = self._client.instance_admin_client.cluster_path( self._client.project, self.instance_id, cluster_id) location = self._client.instance_admin_client.location_path( - self._client.project, self._cluster_location_id) + self._client.project, location_id) cluster = instance_pb2.Cluster( name=cluster_name, location=location, - serve_nodes=self._cluster_serve_nodes, - default_storage_type=self._default_storage_type) + serve_nodes=serve_nodes, + default_storage_type=default_storage_type) instance = instance_pb2.Instance( display_name=self.display_name ) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 47a8095f19a7..ec27a2d6d29c 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -84,7 +84,7 @@ def setUpModule(): else: Config.CLIENT = Client(admin=True) - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID) if not Config.IN_EMULATOR: retry = RetryErrors(GrpcRendezvous, @@ -97,7 +97,7 @@ def setUpModule(): EXISTING_INSTANCES[:] = instances # After listing, create the test instance. - created_op = Config.INSTANCE.create() + created_op = Config.INSTANCE.create(location_id=LOCATION_ID) created_op.result(timeout=10) @@ -131,7 +131,7 @@ def test_list_instances(self): def test_reload(self): # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. - instance = Config.CLIENT.instance(INSTANCE_ID, LOCATION_ID) + instance = Config.CLIENT.instance(INSTANCE_ID) # Make sure metadata unset before reloading. instance.display_name = None @@ -140,8 +140,8 @@ def test_reload(self): def test_create_instance(self): ALT_INSTANCE_ID = 'new' + unique_resource_id('-') - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) - operation = instance.create() + instance = Config.CLIENT.instance(ALT_INSTANCE_ID) + operation = instance.create(location_id=LOCATION_ID) # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) @@ -149,7 +149,7 @@ def test_create_instance(self): operation.result(timeout=10) # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID, LOCATION_ID) + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) instance_alt.reload() self.assertEqual(instance, instance_alt) @@ -162,7 +162,7 @@ def test_update(self): Config.INSTANCE.update() # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(INSTANCE_ID, None) + instance_alt = Config.CLIENT.instance(INSTANCE_ID) self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME) instance_alt.reload() self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 5a2db9e830e4..97df171b4cb6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -107,18 +107,15 @@ def test_instance_factory_w_explicit_serve_nodes(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' - LOCATION_ID = 'locname' credentials = _make_credentials() client = self._make_one( project=PROJECT, credentials=credentials) - instance = client.instance( - INSTANCE_ID, display_name=DISPLAY_NAME, location=LOCATION_ID) + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) self.assertIsInstance(instance, Instance) self.assertEqual(instance.instance_id, INSTANCE_ID) self.assertEqual(instance.display_name, DISPLAY_NAME) - self.assertEqual(instance._cluster_location_id, LOCATION_ID) self.assertIs(instance._client, client) def test_admin_client_w_value_error(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index bead53c280a1..870f85e168ad 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -85,17 +85,16 @@ def _make_client(self, *args, **kwargs): def test_constructor_defaults(self): client = object() - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertIs(instance._client, client) - self.assertEqual(instance._cluster_location_id, self.LOCATION_ID) def test_constructor_non_default(self): display_name = 'display_name' client = object() - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, display_name=display_name) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, display_name) @@ -104,7 +103,7 @@ def test_constructor_non_default(self): def test_table_factory(self): from google.cloud.bigtable.table import Table - instance = self._make_one(self.INSTANCE_ID, None, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, None) table = instance.table(self.TABLE_ID) self.assertIsInstance(table, Table) @@ -120,7 +119,7 @@ def test__update_from_pb_success(self): display_name=display_name, ) - instance = self._make_one(None, None, None, None) + instance = self._make_one(None, None) self.assertIsNone(instance.display_name) instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, display_name) @@ -130,14 +129,12 @@ def test__update_from_pb_no_display_name(self): instance_pb2 as data_v2_pb2) instance_pb = data_v2_pb2.Instance() - instance = self._make_one(None, None, None, None) + instance = self._make_one(None, None) self.assertIsNone(instance.display_name) with self.assertRaises(ValueError): instance._update_from_pb(instance_pb) def test_from_pb_success(self): - from google.cloud.bigtable.instance import ( - _EXISTING_INSTANCE_LOCATION_ID) from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) @@ -153,8 +150,6 @@ def test_from_pb_success(self): self.assertIsInstance(instance, klass) self.assertEqual(instance._client, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance._cluster_location_id, - _EXISTING_INSTANCE_LOCATION_ID) def test_from_pb_bad_instance_name(self): from google.cloud.bigtable_admin_v2.proto import ( @@ -195,31 +190,31 @@ def test_name_property(self): # Patch the the API method. client._instance_admin_client = api - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) self.assertEqual(instance.name, self.INSTANCE_NAME) def test___eq__(self): client = object() - instance1 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) - instance2 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance1 = self._make_one(self.INSTANCE_ID, client) + instance2 = self._make_one(self.INSTANCE_ID, client) self.assertEqual(instance1, instance2) def test___eq__type_differ(self): client = object() - instance1 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance1 = self._make_one(self.INSTANCE_ID, client) instance2 = object() self.assertNotEqual(instance1, instance2) def test___ne__same_value(self): client = object() - instance1 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) - instance2 = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance1 = self._make_one(self.INSTANCE_ID, client) + instance2 = self._make_one(self.INSTANCE_ID, client) comparison_val = (instance1 != instance2) self.assertFalse(comparison_val) def test___ne__(self): - instance1 = self._make_one('instance_id1', 'client1', self.LOCATION_ID) - instance2 = self._make_one('instance_id2', 'client2', self.LOCATION_ID) + instance1 = self._make_one('instance_id1', 'client1') + instance2 = self._make_one('instance_id2', 'client2') self.assertNotEqual(instance1, instance2) def test_reload(self): @@ -233,7 +228,7 @@ def test_reload(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb DISPLAY_NAME = u'hey-hi-hello' @@ -278,7 +273,7 @@ def test_create(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME) # Create response_pb @@ -301,7 +296,7 @@ def test_create(self): client._instance_admin_client = instance_api # Perform the method and check the result. - result = instance.create() + result = instance.create(location_id=self.LOCATION_ID) actual_request = channel.requests[0][1] cluster_id = '{}-cluster'.format(self.INSTANCE_ID) @@ -330,10 +325,8 @@ def test_create_w_explicit_serve_nodes(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one( - self.INSTANCE_ID, client, location_id=self.LOCATION_ID, - display_name=self.DISPLAY_NAME, serve_nodes=serve_nodes, - default_storage_type=enums.StorageType.SSD) + instance = self._make_one(self.INSTANCE_ID, client, + display_name=self.DISPLAY_NAME) # Create response_pb response_pb = operations_pb2.Operation(name=self.OP_NAME) @@ -346,7 +339,9 @@ def test_create_w_explicit_serve_nodes(self): client._instance_admin_client = instance_api # Perform the method and check the result. - result = instance.create() + result = instance.create( + location_id=self.LOCATION_ID, serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.SSD) actual_request = channel.requests[0][1] cluster_id = '{}-cluster'.format(self.INSTANCE_ID) @@ -397,7 +392,7 @@ def test_update(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID, + instance = self._make_one(self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME) # Mock api calls @@ -420,7 +415,7 @@ def test_delete(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) # Mock api calls client._instance_admin_client = api @@ -449,7 +444,7 @@ def _list_tables_helper(self, table_name=None): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb if table_name is None: @@ -496,7 +491,7 @@ def test_create_app_profile_with_wrong_routing_policy(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) app_profile_id = 'appProfileId1262094415' update_mask = [] @@ -519,7 +514,7 @@ def test_create_app_profile_with_multi_routing_policy(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) description = 'description-1724546052' app_profile_id = 'appProfileId1262094415' @@ -566,7 +561,7 @@ def test_create_app_profile_with_single_routing_policy(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) description = 'description-1724546052' app_profile_id = 'appProfileId1262094415' @@ -623,7 +618,7 @@ def test_get_app_profile(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) name = 'name3373707' etag = 'etag3123477' @@ -667,7 +662,7 @@ def test_list_app_profiles(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) # Setup Expected Response next_page_token = '' @@ -711,7 +706,7 @@ def test_update_app_profile(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb NOW = datetime.datetime.utcnow() @@ -760,7 +755,7 @@ def test_update_app_profile_with_single_routing_policy(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb NOW = datetime.datetime.utcnow() @@ -802,7 +797,7 @@ def test_delete_app_profile(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, self.LOCATION_ID) + instance = self._make_one(self.INSTANCE_ID, client) # Patch the stub used by the API method. client._instance_admin_client = instance_api From 12a7f3cf57e06f0938ad65f79fc8fb2f2d0c31e3 Mon Sep 17 00:00:00 2001 From: vikas-jamdar <39574687+vikas-jamdar@users.noreply.github.com> Date: Mon, 2 Jul 2018 21:53:04 +0530 Subject: [PATCH 143/892] Add 'Table.exists' method (#5545) --- .../google/cloud/bigtable/table.py | 15 +++++++ .../tests/unit/test_table.py | 45 +++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 773dcf0b1a77..21ccd6d8f729 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -18,6 +18,7 @@ from grpc import StatusCode from google.api_core.exceptions import RetryError +from google.api_core.exceptions import NotFound from google.api_core.retry import if_exception_type from google.api_core.retry import Retry from google.cloud._helpers import _to_bytes @@ -188,6 +189,20 @@ def create(self): table_client.create_table( parent=instance_name, table_id=self.table_id, table={}) + def exists(self): + """Check whether the table exists. + + :rtype: bool + :returns: True if the table exists, else False. + """ + table_client = self._instance._client.table_admin_client + try: + table_client.get_table(name=self.name) + except NotFound: + return False + else: + return True + def delete(self): """Delete this table.""" table_client = self._instance._client.table_admin_client diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 84d48b03a892..10184b479c7b 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -282,6 +282,51 @@ def _create_test_helper(self): def test_create(self): self._create_test_helper() + def test_exists(self): + from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as table_data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_table_admin_pb2 as table_messages_v1_pb2) + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client, bigtable_table_admin_client) + from google.api_core.exceptions import NotFound + + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + # Create response_pb + response_pb = table_messages_v1_pb2.ListTablesResponse( + tables=[ + table_data_v2_pb2.Table(name=self.TABLE_NAME), + ], + ) + + # Patch API calls + client._table_admin_client = table_api + client._instance_admin_client = instance_api + bigtable_table_stub = ( + client._table_admin_client.bigtable_table_admin_stub) + bigtable_table_stub.ListTables.side_effect = [ + response_pb, + NotFound('testing'), + ] + + # Perform the method and check the result. + table1 = instance.table(self.TABLE_ID) + table2 = instance.table('table-id2') + + result = table1.exists() + self.assertEqual(True, result) + + result = table2.exists() + self.assertEqual(False, result) + def test_delete(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) From fb4b7885b40e1ebf7097aefd04de4a146e6c82b1 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Thu, 12 Jul 2018 02:17:33 +0530 Subject: [PATCH 144/892] BigTable: Add split keys on create table - v2 (#5513) --- .../google/cloud/bigtable/table.py | 28 +++++++++++--- .../google-cloud-bigtable/tests/system.py | 13 +++++++ .../tests/unit/test_table.py | 38 +++++++++++++++++++ 3 files changed, 74 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 21ccd6d8f729..86f84cc0d4b3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -22,8 +22,6 @@ from google.api_core.retry import if_exception_type from google.api_core.retry import Retry from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.row import AppendRow @@ -31,6 +29,10 @@ from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable.row_data import YieldRowsData +from google.cloud.bigtable_v2.proto import ( + bigtable_pb2 as data_messages_v2_pb2) +from google.cloud.bigtable_admin_v2.proto import ( + bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) # Maximum number of mutations in bulk (MutateRowsRequest message): @@ -175,7 +177,7 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def create(self): + def create(self, initial_split_keys=None): """Creates this table. .. note:: @@ -183,11 +185,27 @@ def create(self): A create request returns a :class:`._generated.table_pb2.Table` but we don't use this response. + + :type initial_split_keys: list + :param initial_split_keys: The optional list of row keys in bytes that + will be used to initially split the table + into several tablets. """ table_client = self._instance._client.table_admin_client instance_name = self._instance.name - table_client.create_table( - parent=instance_name, table_id=self.table_id, table={}) + + if initial_split_keys is not None: + splits = [] + for initial_split_key in initial_split_keys: + splits.append( + table_admin_messages_v2_pb2.CreateTableRequest.Split( + key=initial_split_key)) + else: + splits = None + + table_client.create_table(parent=instance_name, + table_id=self.table_id, table={}, + initial_splits=splits) def exists(self): """Check whether the table exists. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index ec27a2d6d29c..dd610dbaecaa 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -242,6 +242,19 @@ def test_create_table(self): sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) + def test_create_table_with_split_keys(self): + temp_table_id = 'foo-bar-baz-split-table' + initial_split_keys = [b'split_key_1', b'split_key_10', + b'split_key_20', b''] + temp_table = Config.INSTANCE.table(temp_table_id) + temp_table.create(initial_split_keys=initial_split_keys) + self.tables_to_delete.append(temp_table) + + # Read Sample Row Keys for created splits + sample_row_keys = temp_table.sample_row_keys() + self.assertEqual(set([srk.row_key for srk in sample_row_keys]), + set(initial_split_keys)) + def test_create_column_family(self): temp_table_id = 'test-create-column-family' temp_table = Config.INSTANCE.table(temp_table_id) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 10184b479c7b..1ef7ebc25a04 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -282,6 +282,44 @@ def _create_test_helper(self): def test_create(self): self._create_test_helper() + def test_create_with_split_keys(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client, bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + split_keys = [b'split1', b'split2', b'split3'] + + # Patch API calls + client._table_admin_client = table_api + client._instance_admin_client = instance_api + + # Perform the method and check the result. + table.create(split_keys) + + splits = [] + for split_key in split_keys: + splits.append( + table_admin_messages_v2_pb2.CreateTableRequest.Split( + key=split_key)) + + table_api.create_table.assert_called_once_with( + parent=self.INSTANCE_NAME, + table={}, + table_id=self.TABLE_ID, + initial_splits=splits) + def test_exists(self): from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_data_v2_pb2) From dca0c80fc213cc52be3e8c85f3b81bb818f6df0b Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Thu, 12 Jul 2018 11:08:07 -0400 Subject: [PATCH 145/892] Bigtable: DirectRow without a table (#5567) Allowing `DirectRow`s to be created without a call to `table.row()` or without a table passed in the constructor. `DirectRow`s that are passed to `Table.mutate_rows()` don't need their own reference to a table. More importantly, Dataflow needs the separation of concerns between the data that needs to be operated on, adn the Service which will perform operations. This PR also changes the dynamics of how `DirectRow.commit()` is performed. `DirectRow.commit()` now defers implementation details to `Table.mutate_rows()`. Until now, there was validation duplication, as well as some duplication around complexities of retrying. Validation and retries now only happen in `Table.mutate_rows()` simplifying the codebase. --- .../google/cloud/bigtable/row.py | 50 +++--------- .../google/cloud/bigtable/table.py | 10 +-- .../tests/unit/test_row.py | 81 ++----------------- 3 files changed, 22 insertions(+), 119 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index f900e42d4720..08378c7b2b20 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -15,14 +15,10 @@ """User-friendly container for Google Cloud Bigtable Row.""" -import functools import struct -import grpc import six -from google.api_core import exceptions -from google.api_core import retry from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes @@ -50,10 +46,10 @@ class Row(object): :param row_key: The key for the current row. :type table: :class:`Table ` - :param table: The table that owns the row. + :param table: (Optional) The table that owns the row. """ - def __init__(self, row_key, table): + def __init__(self, row_key, table=None): self._row_key = _to_bytes(row_key) self._table = table @@ -96,7 +92,7 @@ class _SetDeleteRow(Row): ALL_COLUMNS = object() """Sentinel value used to indicate all columns in a column family.""" - def _get_mutations(self, state): + def _get_mutations(self, state=None): """Gets the list of mutations for a given state. This method intended to be implemented by subclasses. @@ -238,13 +234,6 @@ def _delete_cells(self, column_family_id, columns, time_range=None, mutations_list.extend(to_append) -def _retry_commit_exception(exc): - if isinstance(exc, grpc.RpcError): - exc = exceptions.from_grpc_error(exc) - return isinstance(exc, (exceptions.ServiceUnavailable, - exceptions.DeadlineExceeded)) - - class DirectRow(_SetDeleteRow): """Google Cloud Bigtable Row for sending "direct" mutations. @@ -272,14 +261,17 @@ class DirectRow(_SetDeleteRow): :param row_key: The key for the current row. :type table: :class:`Table ` - :param table: The table that owns the row. + :param table: (Optional) The table that owns the row. This is + used for the :meth: `commit` only. Alternatively, + DirectRows can be persisted via + :meth:`~google.cloud.bigtable.table.Table.mutate_rows`. """ - def __init__(self, row_key, table): + def __init__(self, row_key, table=None): super(DirectRow, self).__init__(row_key, table) self._pb_mutations = [] - def _get_mutations(self, state): # pylint: disable=unused-argument + def _get_mutations(self, state=None): # pylint: disable=unused-argument """Gets the list of mutations for a given state. ``state`` is unused by :class:`DirectRow` but is used by @@ -406,26 +398,10 @@ def commit(self): After committing the accumulated mutations, resets the local mutations to an empty list. - :raises: :class:`ValueError ` if the number of - mutations exceeds the :data:`MAX_MUTATIONS`. + :raises: :exc:`~.table.TooManyMutationsError` if the number of + mutations is greater than 100,000. """ - mutations_list = self._get_mutations(None) - num_mutations = len(mutations_list) - if num_mutations == 0: - return - if num_mutations > MAX_MUTATIONS: - raise ValueError('%d total mutations exceed the maximum allowable ' - '%d.' % (num_mutations, MAX_MUTATIONS)) - - data_client = self._table._instance._client.table_data_client - commit = functools.partial( - data_client.mutate_row, - self._table.name, self._row_key, mutations_list) - retry_ = retry.Retry( - predicate=_retry_commit_exception, - deadline=30) - retry_(commit)() - + self._table.mutate_rows([self]) self.clear() def clear(self): @@ -475,7 +451,7 @@ def __init__(self, row_key, table, filter_): self._true_pb_mutations = [] self._false_pb_mutations = [] - def _get_mutations(self, state): + def _get_mutations(self, state=None): """Gets the list of mutations for a given state. Over-ridden so that the state can be used in: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 86f84cc0d4b3..d3a1cbdb4203 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -681,13 +681,9 @@ def _mutate_rows_request(table_name, rows, app_profile_id=None): for row in rows: _check_row_table_name(table_name, row) _check_row_type(row) - entry = request_pb.entries.add() - entry.row_key = row.row_key - # NOTE: Since `_check_row_type` has verified `row` is a `DirectRow`, - # the mutations have no state. - for mutation in row._get_mutations(None): - mutations_count += 1 - entry.mutations.add().CopyFrom(mutation) + mutations = row._get_mutations() + request_pb.entries.add(row_key=row.row_key, mutations=mutations) + mutations_count += len(mutations) if mutations_count > _MAX_BULK_MUTATIONS: raise TooManyMutationsError('Maximum number of mutations is %s' % (_MAX_BULK_MUTATIONS,)) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 39e701adfb26..2e5e9bbfe1f8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -333,96 +333,23 @@ def test_delete_cells_with_string_columns(self): self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2]) def test_commit(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable_v2.gapic import bigtable_client - project_id = 'project-id' row_key = b'row_key' table_name = 'projects/more-stuff' column_family_id = u'column_family_id' column = b'column' - api = bigtable_client.BigtableClient(mock.Mock()) credentials = _make_credentials() client = self._make_client(project=project_id, credentials=credentials, admin=True) table = _Table(table_name, client=client) row = self._make_one(row_key, table) - - # Create request_pb value = b'bytes-value' - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._table_data_client = api - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRow.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # commit() has no return value when no filter. - # Perform the method and check the result. row.set_cell(column_family_id, column, value) - result = row.commit() - self.assertEqual(result, expected_result) - self.assertEqual(row._pb_mutations, []) - - def test_retry_commit_exception(self): - import grpc - import mock - - from google.cloud.bigtable.row import _retry_commit_exception - - class ErrorUnavailable(grpc.RpcError, grpc.Call): - """ErrorUnavailable exception""" - - message = 'Endpoint read failed' - error = mock.create_autospec(ErrorUnavailable, instance=True) - error.code.return_value = grpc.StatusCode.UNAVAILABLE - error.details.return_value = message - - result = _retry_commit_exception(error) - self.assertEqual(result, True) - - result = _retry_commit_exception(ValueError) - self.assertNotEqual(result, True) - - def test_commit_too_many_mutations(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import row as MUT - - row_key = b'row_key' - table = object() - row = self._make_one(row_key, table) - row._pb_mutations = [1, 2, 3] - num_mutations = len(row._pb_mutations) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - def test_commit_no_mutations(self): - from tests.unit._testing import _FakeStub - - project_id = 'project-id' - row_key = b'row_key' - - credentials = _make_credentials() - client = self._make_client(project=project_id, - credentials=credentials, admin=True) - table = _Table(None, client=client) - row = self._make_one(row_key, table) - self.assertEqual(row._pb_mutations, []) - - # Patch the stub used by the API method. - stub = _FakeStub() - - # Perform the method and check the result. - result = row.commit() - self.assertIsNone(result) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) + row.commit() + self.assertEqual(table.mutated_rows, [row]) class TestConditionalRow(unittest.TestCase): @@ -939,3 +866,7 @@ def __init__(self, name, client=None): self.name = name self._instance = _Instance(client) self.client = client + self.mutated_rows = [] + + def mutate_rows(self, rows): + self.mutated_rows.extend(rows) From 5998b9441c40748e1fc04965168b7b51936326c1 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Thu, 12 Jul 2018 11:10:30 -0400 Subject: [PATCH 146/892] Bigtable: Allow 'Table.create()' to create column families. (#5576) `Table.create` method now takes an optional `column_families` parameter. --- .../google/cloud/bigtable/table.py | 36 +++++----- .../google-cloud-bigtable/tests/system.py | 16 +++++ .../tests/unit/test_table.py | 67 +++++++------------ 3 files changed, 60 insertions(+), 59 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index d3a1cbdb4203..f56e05f0c890 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -31,6 +31,8 @@ from google.cloud.bigtable.row_data import YieldRowsData from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as data_messages_v2_pb2) +from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as admin_messages_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) @@ -88,7 +90,7 @@ class Table(object): :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance that owns the table. - :type: app_profile_id: str + :type app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. """ @@ -177,7 +179,7 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def create(self, initial_split_keys=None): + def create(self, initial_split_keys=[], column_families={}): """Creates this table. .. note:: @@ -187,25 +189,27 @@ def create(self, initial_split_keys=None): this response. :type initial_split_keys: list - :param initial_split_keys: The optional list of row keys in bytes that - will be used to initially split the table - into several tablets. + :param initial_split_keys: (Optional) list of row keys in bytes that + will be used to initially split the table + into several tablets. + + :type column_families: dict + :param column_failies: (Optional) A map columns to create. The key is + the column_id str and the value is a + :class:`GarbageCollectionRule` """ table_client = self._instance._client.table_admin_client instance_name = self._instance.name - if initial_split_keys is not None: - splits = [] - for initial_split_key in initial_split_keys: - splits.append( - table_admin_messages_v2_pb2.CreateTableRequest.Split( - key=initial_split_key)) - else: - splits = None + families = {id: ColumnFamily(id, self, rule).to_pb() + for (id, rule) in column_families.items()} + table = admin_messages_v2_pb2.Table(column_families=families) + + split = table_admin_messages_v2_pb2.CreateTableRequest.Split + splits = [split(key=key) for key in initial_split_keys] - table_client.create_table(parent=instance_name, - table_id=self.table_id, table={}, - initial_splits=splits) + table_client.create_table(parent=instance_name, table_id=self.table_id, + table=table, initial_splits=splits) def exists(self): """Check whether the table exists. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index dd610dbaecaa..689571ad7cf0 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -242,6 +242,22 @@ def test_create_table(self): sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) + def test_create_table_with_families(self): + temp_table_id = 'test-create-table-with-failies' + temp_table = Config.INSTANCE.table(temp_table_id) + gc_rule = MaxVersionsGCRule(1) + temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) + self.tables_to_delete.append(temp_table) + + col_fams = temp_table.list_column_families() + + self.assertEqual(len(col_fams), 1) + retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] + self.assertIs(retrieved_col_fam._table, temp_table) + self.assertEqual(retrieved_col_fam.column_family_id, + COLUMN_FAMILY_ID1) + self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) + def test_create_table_with_split_keys(self): temp_table_id = 'foo-bar-baz-split-table' initial_split_keys = [b'split_key_1', b'split_key_10', diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 1ef7ebc25a04..8f83f1c74674 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -254,72 +254,53 @@ def test___ne__(self): table2 = self._make_one('table_id2', None) self.assertNotEqual(table1, table2) - def _create_test_helper(self): + def _create_test_helper(self, split_keys=[], column_families={}): from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, bigtable_table_admin_client) - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Patch API calls - client._table_admin_client = table_api - client._instance_admin_client = instance_api - - # Create expected_result. - expected_result = None # create() has no return value. - - # Perform the method and check the result. - result = table.create() - self.assertEqual(result, expected_result) - - def test_create(self): - self._create_test_helper() - - def test_create_with_split_keys(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, bigtable_table_admin_client) + bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) + from google.cloud.bigtable.column_family import ColumnFamily table_api = mock.create_autospec( bigtable_table_admin_client.BigtableTableAdminClient) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) credentials = _make_credentials() client = self._make_client(project='project-id', credentials=credentials, admin=True) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - split_keys = [b'split1', b'split2', b'split3'] - # Patch API calls client._table_admin_client = table_api - client._instance_admin_client = instance_api # Perform the method and check the result. - table.create(split_keys) + table.create(column_families=column_families, + initial_split_keys=split_keys) + + families = {id: ColumnFamily(id, self, rule).to_pb() + for (id, rule) in column_families.items()} - splits = [] - for split_key in split_keys: - splits.append( - table_admin_messages_v2_pb2.CreateTableRequest.Split( - key=split_key)) + split = table_admin_messages_v2_pb2.CreateTableRequest.Split + splits = [split(key=split_key) for split_key in split_keys] table_api.create_table.assert_called_once_with( parent=self.INSTANCE_NAME, - table={}, + table=table_pb2.Table(column_families=families), table_id=self.TABLE_ID, initial_splits=splits) + def test_create(self): + self._create_test_helper() + + def test_create_with_families(self): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + families = {"family": MaxVersionsGCRule(5)} + self._create_test_helper(column_families=families) + + def test_create_with_split_keys(self): + self._create_test_helper(split_keys=[b'split1', b'split2', b'split3']) + def test_exists(self): from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as table_data_v2_pb2) From 18e78dba9181479d9d583bb15aed938c38691cc2 Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Fri, 13 Jul 2018 03:07:41 +0530 Subject: [PATCH 147/892] Bigtable : Implement row set for yield_rows (#5506) --- .../google/cloud/bigtable/row_data.py | 87 ++++++++++- .../google/cloud/bigtable/row_set.py | 131 ++++++++++++++++ .../google/cloud/bigtable/table.py | 47 ++++-- .../google-cloud-bigtable/tests/system.py | 28 ++++ .../tests/unit/test_row_data.py | 144 ++++++++++++++++++ .../tests/unit/test_row_set.py | 118 ++++++++++++++ .../tests/unit/test_table.py | 89 ++++++++++- 7 files changed, 621 insertions(+), 23 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py create mode 100644 packages/google-cloud-bigtable/tests/unit/test_row_set.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 13db91b1268d..f3ada1dd3f02 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -24,6 +24,10 @@ from google.api_core import retry from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _to_bytes +from google.cloud.bigtable_v2.proto import ( + bigtable_pb2 as data_messages_v2_pb2) +from google.cloud.bigtable_v2.proto import ( + data_pb2 as data_v2_pb2) _MISSING_COLUMN_FAMILY = ( 'Column family {} is not among the cells stored in this row.') @@ -434,12 +438,10 @@ def cancel(self): def _create_retry_request(self): """Helper for :meth:`read_rows`.""" - row_range = self.request.rows.row_ranges.pop() - range_kwargs = {} - # start AFTER the row_key of the last successfully read row - range_kwargs['start_key_open'] = self.last_scanned_row_key - range_kwargs['end_key_open'] = row_range.end_key_open - self.request.rows.row_ranges.add(**range_kwargs) + req_manager = _ReadRowsRequestManager(self.request, + self.last_scanned_row_key, + self._counter) + self.request = req_manager.build_updated_request() def _on_error(self, exc): """Helper for :meth:`read_rows`.""" @@ -605,6 +607,79 @@ def _copy_from_previous(self, cell): cell.qualifier = previous.qualifier +class _ReadRowsRequestManager(object): + """ Update the ReadRowsRequest message in case of failures by + filtering the already read keys. + + :type message: class:`data_messages_v2_pb2.ReadRowsRequest` + :param message: Original ReadRowsRequest containing all of the parameters + of API call + + :type last_scanned_key: bytes + :param last_scanned_key: last successfully scanned key + + :type rows_read_so_far: int + :param rows_read_so_far: total no of rows successfully read so far. + this will be used for updating rows_limit + + """ + + def __init__(self, message, last_scanned_key, rows_read_so_far): + self.message = message + self.last_scanned_key = last_scanned_key + self.rows_read_so_far = rows_read_so_far + + def build_updated_request(self): + """ Updates the given message request as per last scanned key + """ + r_kwargs = {'table_name': self.message.table_name, + 'filter': self.message.filter} + + if self.message.rows_limit != 0: + r_kwargs['rows_limit'] = max(1, self.message.rows_limit - + self.rows_read_so_far) + + row_keys = self._filter_rows_keys() + row_ranges = self._filter_row_ranges() + r_kwargs['rows'] = data_v2_pb2.RowSet(row_keys=row_keys, + row_ranges=row_ranges) + + return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs) + + def _filter_rows_keys(self): + """ Helper for :meth:`build_updated_request`""" + return [row_key for row_key in self.message.rows.row_keys + if row_key > self.last_scanned_key] + + def _filter_row_ranges(self): + """ Helper for :meth:`build_updated_request`""" + new_row_ranges = [] + + for row_range in self.message.rows.row_ranges: + if((row_range.end_key_open and + self._key_already_read(row_range.end_key_open)) or + (row_range.end_key_closed and + self._key_already_read(row_range.end_key_closed))): + continue + + if ((row_range.start_key_open and + self._key_already_read(row_range.start_key_open)) or + (row_range.start_key_closed and + self._key_already_read(row_range.start_key_closed))): + row_range.start_key_closed = _to_bytes("") + row_range.start_key_open = self.last_scanned_key + + new_row_ranges.append(row_range) + else: + new_row_ranges.append(row_range) + + return new_row_ranges + + def _key_already_read(self, key): + """ Helper for :meth:`_filter_row_ranges`""" + return key <= self.last_scanned_key + + def _raise_if(predicate, *args): """Helper for validation methods.""" if predicate: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py new file mode 100644 index 000000000000..0d5ae9903473 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -0,0 +1,131 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User-friendly container for Google Cloud Bigtable RowSet """ + + +from google.cloud._helpers import _to_bytes + + +class RowSet(object): + """ Convenience wrapper of google.bigtable.v2.RowSet + + Useful for creating a set of row keys and row ranges, which can + be passed to yield_rows method of class:`.Table.yield_rows`. + """ + + def __init__(self): + self.row_keys = [] + self.row_ranges = [] + + def add_row_key(self, row_key): + """Add row key to row_keys list. + + :type row_key: bytes + :param row_key: The key of a row to read + """ + self.row_keys.append(row_key) + + def add_row_range(self, row_range): + """Add row_range to row_ranges list. + + :type row_range: class:`RowRange` + :param row_range: The row range object having start and end key + """ + self.row_ranges.append(row_range) + + def add_row_range_from_keys(self, start_key=None, end_key=None, + start_inclusive=True, end_inclusive=False): + """Add row range to row_ranges list from the row keys + + :type start_key: bytes + :param start_key: (Optional) Start key of the row range. If left empty, + will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) End key of the row range. If left empty, + will be interpreted as the empty string and range will + be unbounded on the high end. + + :type start_inclusive: bool + :param start_inclusive: (Optional) Whether the ``start_key`` should be + considered inclusive. The default is True (inclusive). + + :type end_inclusive: bool + :param end_inclusive: (Optional) Whether the ``end_key`` should be + considered inclusive. The default is False (exclusive). + """ + row_range = RowRange(start_key, end_key, + start_inclusive, end_inclusive) + self.row_ranges.append(row_range) + + def _update_message_request(self, message): + """Add row keys and row range to given request message + + :type message: class:`data_messages_v2_pb2.ReadRowsRequest` + :param message: The ``ReadRowsRequest`` protobuf + """ + for each in self.row_keys: + message.rows.row_keys.append(_to_bytes(each)) + + for each in self.row_ranges: + r_kwrags = each.get_range_kwargs() + message.rows.row_ranges.add(**r_kwrags) + + +class RowRange(object): + """ Convenience wrapper of google.bigtable.v2.RowRange + + :type start_key: bytes + :param start_key: (Optional) Start key of the row range. If left empty, + will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) End key of the row range. If left empty, + will be interpreted as the empty string and range will + be unbounded on the high end. + + :type start_inclusive: bool + :param start_inclusive: (Optional) Whether the ``start_key`` should be + considered inclusive. The default is True (inclusive). + + :type end_inclusive: bool + :param end_inclusive: (Optional) Whether the ``end_key`` should be + considered inclusive. The default is False (exclusive). + """ + + def __init__(self, start_key=None, end_key=None, + start_inclusive=True, end_inclusive=False): + self.start_key = start_key + self.start_inclusive = start_inclusive + self.end_key = end_key + self.end_inclusive = end_inclusive + + def get_range_kwargs(self): + """ Convert row range object to dict which can be passed to + google.bigtable.v2.RowRange add method. + """ + range_kwargs = {} + if self.start_key is not None: + start_key_key = 'start_key_open' + if self.start_inclusive: + start_key_key = 'start_key_closed' + range_kwargs[start_key_key] = _to_bytes(self.start_key) + + if self.end_key is not None: + end_key_key = 'end_key_open' + if self.end_inclusive: + end_key_key = 'end_key_closed' + range_kwargs[end_key_key] = _to_bytes(self.end_key) + return range_kwargs diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index f56e05f0c890..f8602a44c748 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -29,6 +29,8 @@ from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable.row_data import YieldRowsData +from google.cloud.bigtable.row_set import RowSet +from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( @@ -323,7 +325,7 @@ def read_rows(self, start_key=None, end_key=None, limit=None, return PartialRowsData(data_client._read_rows, request_pb) def yield_rows(self, start_key=None, end_key=None, limit=None, - filter_=None): + filter_=None, row_set=None): """Read rows from this table. :type start_key: bytes @@ -346,12 +348,16 @@ def yield_rows(self, start_key=None, end_key=None, limit=None, specified row(s). If unset, reads every column in each row. + :type row_set: :class:`row_set.RowSet` + :param filter_: (Optional) The row set containing multiple row keys and + row_ranges. + :rtype: :class:`.PartialRowData` :returns: A :class:`.PartialRowData` for each row returned """ request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit, app_profile_id=self._app_profile_id) + limit=limit, app_profile_id=self._app_profile_id, row_set=row_set) data_client = self._instance._client.table_data_client generator = YieldRowsData(data_client._read_rows, request_pb) @@ -590,7 +596,7 @@ def _do_mutate_retryable_rows(self): def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, filter_=None, limit=None, end_inclusive=False, - app_profile_id=None): + app_profile_id=None, row_set=None): """Creates a request to read rows in a table. :type table_name: str @@ -625,6 +631,10 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, :type: app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. + :type row_set: :class:`row_set.RowSet` + :param filter_: (Optional) The row set containing multiple row keys and + row_ranges. + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. :raises: :class:`ValueError ` if both @@ -635,15 +645,16 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, (start_key is not None or end_key is not None)): raise ValueError('Row key and row range cannot be ' 'set simultaneously') - range_kwargs = {} - if start_key is not None or end_key is not None: - if start_key is not None: - range_kwargs['start_key_closed'] = _to_bytes(start_key) - if end_key is not None: - end_key_key = 'end_key_open' - if end_inclusive: - end_key_key = 'end_key_closed' - range_kwargs[end_key_key] = _to_bytes(end_key) + + if (row_key is not None and row_set is not None): + raise ValueError('Row key and row set cannot be ' + 'set simultaneously') + + if ((start_key is not None or end_key is not None) and + row_set is not None): + raise ValueError('Row range and row set cannot be ' + 'set simultaneously') + if filter_ is not None: request_kwargs['filter'] = filter_.to_pb() if limit is not None: @@ -654,10 +665,16 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) if row_key is not None: - message.rows.row_keys.append(_to_bytes(row_key)) + row_set = RowSet() + row_set.add_row_key(row_key) + + if start_key is not None or end_key is not None: + row_set = RowSet() + row_set.add_row_range(RowRange(start_key, end_key, + end_inclusive=end_inclusive)) - if range_kwargs: - message.rows.row_ranges.add(**range_kwargs) + if row_set is not None: + row_set._update_message_request(message) return message diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 689571ad7cf0..be65c1f03388 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -30,6 +30,8 @@ from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.row_set import RowSet +from google.cloud.bigtable.row_set import RowRange from test_utils.retry import RetryErrors from test_utils.system import EmulatorCreds @@ -478,6 +480,32 @@ def test_drop_by_prefix_table(self): self.assertEqual(expected_rows_count, read_rows_count) + def test_yield_rows_with_row_set(self): + row_keys = [ + b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', + b'row_key_5', b'row_key_6', b'row_key_7', b'row_key_8', + b'row_key_9'] + + rows = [] + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + rows.append(row) + self.rows_to_delete.append(row) + self._table.mutate_rows(rows) + + row_set = RowSet() + row_set.add_row_range(RowRange(start_key=b'row_key_3', + end_key=b'row_key_7')) + row_set.add_row_key(b'row_key_1') + + read_rows = self._table.yield_rows(row_set=row_set) + + expected_row_keys = set([b'row_key_1', b'row_key_3', b'row_key_4', + b'row_key_5', b'row_key_6']) + found_row_keys = set([row.row_key for row in read_rows]) + self.assertEqual(found_row_keys, set(expected_row_keys)) + def test_read_large_cell_limit(self): row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index b5f146c47715..12b1093ac4de 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -18,6 +18,9 @@ import mock from ._testing import _make_credentials +from google.cloud.bigtable.row_set import RowRange +from google.cloud.bigtable_v2.proto import ( + data_pb2 as data_v2_pb2) class MultiCallableStub(object): @@ -704,6 +707,140 @@ def _consume_all(self, yrd): return [row.row_key for row in yrd.read_rows()] +class Test_ReadRowsRequestManager(unittest.TestCase): + + @classmethod + def setUpClass(cls): + cls.table_name = 'table_name' + cls.row_range1 = RowRange(b"row_key21", b"row_key29") + cls.row_range2 = RowRange(b"row_key31", b"row_key39") + cls.row_range3 = RowRange(b"row_key41", b"row_key49") + + cls.request = _ReadRowsRequestPB(table_name=cls.table_name) + cls.request.rows.row_ranges.add(**cls.row_range1.get_range_kwargs()) + cls.request.rows.row_ranges.add(**cls.row_range2.get_range_kwargs()) + cls.request.rows.row_ranges.add(**cls.row_range3.get_range_kwargs()) + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.row_data import _ReadRowsRequestManager + return _ReadRowsRequestManager + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_constructor(self): + request = mock.Mock() + last_scanned_key = "last_key" + rows_read_so_far = 10 + + request_manager = self._make_one(request, last_scanned_key, + rows_read_so_far) + self.assertEqual(request, request_manager.message) + self.assertEqual(last_scanned_key, request_manager.last_scanned_key) + self.assertEqual(rows_read_so_far, request_manager.rows_read_so_far) + + def test__filter_row_key(self): + table_name = 'table_name' + request = _ReadRowsRequestPB(table_name=table_name) + request.rows.row_keys.extend([b'row_key1', b'row_key2', + b'row_key3', b'row_key4']) + + last_scanned_key = b"row_key2" + request_manager = self._make_one(request, last_scanned_key, 2) + row_keys = request_manager._filter_rows_keys() + + expected_row_keys = [b'row_key3', b'row_key4'] + self.assertEqual(expected_row_keys, row_keys) + + def test__filter_row_ranges_all_ranges_added_back(self): + last_scanned_key = b"row_key14" + request_manager = self._make_one(self.request, last_scanned_key, 2) + row_ranges = request_manager._filter_row_ranges() + + exp_row_range1 = data_v2_pb2.RowRange(start_key_closed=b"row_key21", + end_key_open=b"row_key29") + exp_row_range2 = data_v2_pb2.RowRange(start_key_closed=b"row_key31", + end_key_open=b"row_key39") + exp_row_range3 = data_v2_pb2.RowRange(start_key_closed=b"row_key41", + end_key_open=b"row_key49") + exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] + + self.assertEqual(exp_row_ranges, row_ranges) + + def test__filter_row_ranges_all_ranges_already_read(self): + last_scanned_key = b"row_key54" + request_manager = self._make_one(self.request, last_scanned_key, 2) + row_ranges = request_manager._filter_row_ranges() + + self.assertEqual(row_ranges, []) + + def test__filter_row_ranges_all_ranges_already_read_open_closed(self): + last_scanned_key = b"row_key54" + + row_range1 = RowRange(b"row_key21", b"row_key29", False, True) + row_range2 = RowRange(b"row_key31", b"row_key39") + row_range3 = RowRange(b"row_key41", b"row_key49", False, True) + + request = _ReadRowsRequestPB(table_name=self.table_name) + request.rows.row_ranges.add(**row_range1.get_range_kwargs()) + request.rows.row_ranges.add(**row_range2.get_range_kwargs()) + request.rows.row_ranges.add(**row_range3.get_range_kwargs()) + + request_manager = self._make_one(request, last_scanned_key, 2) + request_manager.new_message = _ReadRowsRequestPB( + table_name=self.table_name) + row_ranges = request_manager._filter_row_ranges() + + self.assertEqual(row_ranges, []) + + def test__filter_row_ranges_some_ranges_already_read(self): + last_scanned_key = b"row_key22" + request_manager = self._make_one(self.request, last_scanned_key, 2) + request_manager.new_message = _ReadRowsRequestPB( + table_name=self.table_name) + row_ranges = request_manager._filter_row_ranges() + + exp_row_range1 = data_v2_pb2.RowRange(start_key_open=b"row_key22", + end_key_open=b"row_key29") + exp_row_range2 = data_v2_pb2.RowRange(start_key_closed=b"row_key31", + end_key_open=b"row_key39") + exp_row_range3 = data_v2_pb2.RowRange(start_key_closed=b"row_key41", + end_key_open=b"row_key49") + exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] + + self.assertEqual(row_ranges, exp_row_ranges) + + def test_build_updated_request(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key14" + request = _ReadRowsRequestPB(filter=row_filter.to_pb(), + rows_limit=8, + table_name=self.table_name) + request.rows.row_ranges.add(**self.row_range1.get_range_kwargs()) + + request_manager = self._make_one(request, last_scanned_key, 2) + + result = request_manager.build_updated_request() + + expected_result = _ReadRowsRequestPB(table_name=self.table_name, + filter=row_filter.to_pb(), + rows_limit=6) + expected_result.rows.row_ranges.add(**self.row_range1. + get_range_kwargs()) + + self.assertEqual(expected_result, result) + + def test__key_already_read(self): + last_scanned_key = b"row_key14" + request = _ReadRowsRequestPB(table_name=self.table_name) + request_manager = self._make_one(request, last_scanned_key, 2) + + self.assertTrue(request_manager._key_already_read(b"row_key11")) + self.assertFalse(request_manager._key_already_read(b"row_key16")) + + class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): _json_tests = None @@ -1042,3 +1179,10 @@ def _make_cell(value): from google.cloud.bigtable import row_data return row_data.Cell(value, TestCell.timestamp_micros) + + +def _ReadRowsRequestPB(*args, **kw): + from google.cloud.bigtable_v2.proto import ( + bigtable_pb2 as messages_v2_pb2) + + return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_set.py b/packages/google-cloud-bigtable/tests/unit/test_row_set.py new file mode 100644 index 000000000000..84640b616f98 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_row_set.py @@ -0,0 +1,118 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest +from google.cloud.bigtable.row_set import RowRange +from google.cloud._helpers import _to_bytes + + +class TestRowSet(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.row_set import RowSet + return RowSet + + def _make_one(self): + return self._get_target_class()() + + def test_constructor(self): + row_set = self._make_one() + self.assertEqual([], row_set.row_keys) + self.assertEqual([], row_set.row_ranges) + + def test_add_row_key(self): + row_set = self._make_one() + row_set.add_row_key("row_key1") + row_set.add_row_key("row_key2") + self.assertEqual(["row_key1", "row_key2"], row_set.row_keys) + + def test_add_row_range(self): + row_set = self._make_one() + row_range1 = RowRange(b"row_key1", b"row_key9") + row_range2 = RowRange(b"row_key21", b"row_key29") + row_set.add_row_range(row_range1) + row_set.add_row_range(row_range2) + expected = [row_range1, row_range2] + self.assertEqual(expected, row_set.row_ranges) + + def test_add_row_range_from_keys(self): + row_set = self._make_one() + row_set.add_row_range_from_keys(start_key=b"row_key1", + end_key=b"row_key9", + start_inclusive=False, + end_inclusive=True) + self.assertEqual(row_set.row_ranges[0].end_key, b"row_key9") + + def test__update_message_request(self): + row_set = self._make_one() + table_name = 'table_name' + row_set.add_row_key("row_key1") + row_range1 = RowRange(b"row_key21", b"row_key29") + row_set.add_row_range(row_range1) + + request = _ReadRowsRequestPB(table_name=table_name) + row_set._update_message_request(request) + + expected_request = _ReadRowsRequestPB(table_name=table_name) + expected_request.rows.row_keys.append(_to_bytes("row_key1")) + + expected_request.rows.row_ranges.add(**row_range1.get_range_kwargs()) + + self.assertEqual(request, expected_request) + + +class TestRowRange(unittest.TestCase): + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.row_set import RowRange + return RowRange + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_constructor(self): + start_key = "row_key1" + end_key = "row_key9" + row_range = self._make_one(start_key, end_key) + self.assertEqual(start_key, row_range.start_key) + self.assertEqual(end_key, row_range.end_key) + self.assertTrue(row_range.start_inclusive) + self.assertFalse(row_range.end_inclusive) + + def test_get_range_kwargs_closed_open(self): + start_key = b"row_key1" + end_key = b"row_key9" + expected_result = {'start_key_closed': start_key, + 'end_key_open': end_key} + row_range = self._make_one(start_key, end_key) + actual_result = row_range.get_range_kwargs() + self.assertEqual(expected_result, actual_result) + + def test_get_range_kwargs_open_closed(self): + start_key = b"row_key1" + end_key = b"row_key9" + expected_result = {'start_key_open': start_key, + 'end_key_closed': end_key} + row_range = self._make_one(start_key, end_key, False, True) + actual_result = row_range.get_range_kwargs() + self.assertEqual(expected_result, actual_result) + + +def _ReadRowsRequestPB(*args, **kw): + from google.cloud.bigtable_v2.proto import ( + bigtable_pb2 as messages_v2_pb2) + + return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 8f83f1c74674..fa611471276f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -132,6 +132,7 @@ class TestTable(unittest.TestCase): ROW_KEY = b'row-key' ROW_KEY_1 = b'row-key-1' ROW_KEY_2 = b'row-key-2' + ROW_KEY_3 = b'row-key-3' FAMILY_NAME = u'family' QUALIFIER = b'qualifier' TIMESTAMP_MICROS = 100 @@ -629,6 +630,74 @@ def test_yield_retry_rows(self): result = rows[1] self.assertEqual(result.row_key, self.ROW_KEY_2) + def test_yield_rows_with_row_set(self): + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + # Create response_iterator + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_1, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True + ) + + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_2, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True + ) + + chunk_3 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_3, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True + ) + + response_1 = _ReadRowsResponseV2([chunk_1]) + response_2 = _ReadRowsResponseV2([chunk_2]) + response_3 = _ReadRowsResponseV2([chunk_3]) + response_iterator = _MockReadRowsIterator(response_1, response_2, + response_3) + + # Patch the stub used by the API method. + client._table_data_client.bigtable_stub.ReadRows.side_effect = [ + response_iterator] + + rows = [] + row_set = RowSet() + row_set.add_row_range(RowRange(start_key=self.ROW_KEY_1, + end_key=self.ROW_KEY_2)) + row_set.add_row_key(self.ROW_KEY_3) + for row in table.yield_rows(row_set=row_set): + rows.append(row) + + self.assertEqual(rows[0].row_key, self.ROW_KEY_1) + self.assertEqual(rows[1].row_key, self.ROW_KEY_2) + self.assertEqual(rows[2].row_key, self.ROW_KEY_3) + def test_sample_row_keys(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import ( @@ -1320,13 +1389,13 @@ class Test__create_row_request(unittest.TestCase): def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, filter_=None, limit=None, end_inclusive=False, - app_profile_id=None): + app_profile_id=None, row_set=None): from google.cloud.bigtable.table import _create_row_request return _create_row_request( table_name, row_key=row_key, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit, end_inclusive=end_inclusive, - app_profile_id=app_profile_id) + app_profile_id=app_profile_id, row_set=row_set) def test_table_name_only(self): table_name = 'table_name' @@ -1339,6 +1408,14 @@ def test_row_key_row_range_conflict(self): with self.assertRaises(ValueError): self._call_fut(None, row_key=object(), end_key=object()) + def test_row_key_row_set_conflict(self): + with self.assertRaises(ValueError): + self._call_fut(None, row_key=object(), row_set=object()) + + def test_row_range_row_set_conflict(self): + with self.assertRaises(ValueError): + self._call_fut(None, end_key=object(), row_set=object()) + def test_row_key(self): table_name = 'table_name' row_key = b'row_key' @@ -1409,6 +1486,14 @@ def test_with_limit(self): ) self.assertEqual(result, expected_result) + def test_with_row_set(self): + from google.cloud.bigtable.row_set import RowSet + table_name = 'table_name' + row_set = RowSet() + result = self._call_fut(table_name, row_set=row_set) + expected_result = _ReadRowsRequestPB(table_name=table_name) + self.assertEqual(result, expected_result) + def test_with_app_profile_id(self): table_name = 'table_name' limit = 1337 From 2015949be83e1f6e2ef3296b9feed6c9a86c649f Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Thu, 12 Jul 2018 23:42:28 -0400 Subject: [PATCH 148/892] Fixing the broken Bigtable system test. (#5607) --- .../google/cloud/bigtable/table.py | 2 +- packages/google-cloud-bigtable/tests/system.py | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index f8602a44c748..ddba8e6b17a6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -208,7 +208,7 @@ def create(self, initial_split_keys=[], column_families={}): table = admin_messages_v2_pb2.Table(column_families=families) split = table_admin_messages_v2_pb2.CreateTableRequest.Split - splits = [split(key=key) for key in initial_split_keys] + splits = [split(key=_to_bytes(key)) for key in initial_split_keys] table_client.create_table(parent=instance_name, table_id=self.table_id, table=table, initial_splits=splits) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index be65c1f03388..2b1003889e02 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -263,15 +263,19 @@ def test_create_table_with_families(self): def test_create_table_with_split_keys(self): temp_table_id = 'foo-bar-baz-split-table' initial_split_keys = [b'split_key_1', b'split_key_10', - b'split_key_20', b''] + b'split_key_20'] temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create(initial_split_keys=initial_split_keys) self.tables_to_delete.append(temp_table) # Read Sample Row Keys for created splits sample_row_keys = temp_table.sample_row_keys() - self.assertEqual(set([srk.row_key for srk in sample_row_keys]), - set(initial_split_keys)) + actual_keys = [srk.row_key for srk in sample_row_keys] + + expected_keys = initial_split_keys + expected_keys.append(b'') + + self.assertEqual(actual_keys, expected_keys) def test_create_column_family(self): temp_table_id = 'test-create-column-family' From 6c41e0647d65cbe4cf655aa6297b7f813ee23e23 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Fri, 13 Jul 2018 00:02:36 -0400 Subject: [PATCH 149/892] Adding optional app profile on instance.table() (#5605) --- .../google/cloud/bigtable/instance.py | 7 +++++-- packages/google-cloud-bigtable/tests/unit/test_instance.py | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 37c0a96887dc..57d3385a9f12 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -253,16 +253,19 @@ def delete(self): """ self._client.instance_admin_client.delete_instance(name=self.name) - def table(self, table_id): + def table(self, table_id, app_profile_id=None): """Factory to create a table associated with this instance. :type table_id: str :param table_id: The ID of the table. + :type app_profile_id: str + :param app_profile_id: (Optional) The unique name of the AppProfile. + :rtype: :class:`Table ` :returns: The table owned by this instance. """ - return Table(table_id, self) + return Table(table_id, self, app_profile_id=app_profile_id) def list_tables(self): """List the tables in this instance. diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 870f85e168ad..1d5e1f1c2ea8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -103,12 +103,14 @@ def test_constructor_non_default(self): def test_table_factory(self): from google.cloud.bigtable.table import Table + app_profile_id = 'appProfileId1262094415' instance = self._make_one(self.INSTANCE_ID, None) - table = instance.table(self.TABLE_ID) + table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id) self.assertIsInstance(table, Table) self.assertEqual(table.table_id, self.TABLE_ID) self.assertEqual(table._instance, instance) + self.assertEqual(table._app_profile_id, app_profile_id) def test__update_from_pb_success(self): from google.cloud.bigtable_admin_v2.proto import ( From 79bb1bf7f69df3ce59bfd665964efa5064de9af4 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Tue, 17 Jul 2018 14:08:59 -0400 Subject: [PATCH 150/892] Bigtable: Add 'instance_type', 'labels' to 'Instance' ctor (#5614) --- .../google/cloud/bigtable/client.py | 32 ++++++- .../google/cloud/bigtable/enums.py | 55 +++++++++++ .../google/cloud/bigtable/instance.py | 68 ++++++++++---- .../google-cloud-bigtable/tests/system.py | 41 ++++++++- .../tests/unit/test_client.py | 17 +++- .../tests/unit/test_instance.py | 91 ++++++++++++++----- 6 files changed, 252 insertions(+), 52 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/enums.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 6c4178e1daad..4ddc56ea091c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -39,7 +39,12 @@ from google.cloud.client import ClientWithProject +from google.cloud.bigtable_admin_v2 import enums + +INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION +INSTANCE_TYPE_DEVELOPMENT = enums.Instance.Type.DEVELOPMENT +INSTANCE_TYPE_UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED _CLIENT_INFO = client_info.ClientInfo( client_library_version=__version__) SPANNER_ADMIN_SCOPE = 'https://www.googleapis.com/auth/spanner.admin' @@ -192,10 +197,10 @@ def instance_admin_client(self): self._instance_admin_client = ( bigtable_admin_v2.BigtableInstanceAdminClient( credentials=self._credentials, client_info=_CLIENT_INFO)) - return self._instance_admin_client - def instance(self, instance_id, display_name=None): + def instance(self, instance_id, display_name=None, + instance_type=None, labels=None): """Factory to create a instance associated with this client. :type instance_id: str @@ -207,10 +212,31 @@ def instance(self, instance_id, display_name=None): characters.) If this value is not set in the constructor, will fall back to the instance ID. + :type instance_type: int + :param instance_type: (Optional) The type of the instance. + Possible values are represented + by the following constants: + :data:`google.cloud.bigtable.enums.InstanceType.PRODUCTION`. + :data:`google.cloud.bigtable.enums.InstanceType.DEVELOPMENT`, + Defaults to + :data:`google.cloud.bigtable.enums.InstanceType.UNSPECIFIED`. + + :type labels: dict + :param labels: (Optional) Labels are a flexible and lightweight + mechanism for organizing cloud resources into groups + that reflect a customer's organizational needs and + deployment strategies. They can be used to filter + resources and aggregate metrics. Label keys must be + between 1 and 63 characters long. Maximum 64 labels can + be associated with a given resource. Label values must + be between 0 and 63 characters long. Keys and values + must both be under 128 bytes. + :rtype: :class:`~google.cloud.bigtable.instance.Instance` :returns: an instance owned by this client. """ - return Instance(instance_id, self, display_name=display_name) + return Instance(instance_id, self, display_name=display_name, + instance_type=instance_type, labels=labels) def list_instances(self): """List instances owned by the project. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py new file mode 100644 index 000000000000..0f50793f6e10 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -0,0 +1,55 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Wrappers for gapic enum types.""" + +from google.cloud.bigtable_admin_v2 import enums + + +class StorageType(object): + """ + Storage media types for persisting Bigtable data. + + Attributes: + UNSPECIFIED (int): The user did not specify a storage type. + SSD (int): Flash (SSD) storage should be used. + HDD (int): Magnetic drive (HDD) storage should be used. + """ + UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED + SSD = enums.StorageType.SSD + HDD = enums.StorageType.HDD + + +class InstanceType(object): + """ + The type of the instance. + + Attributes: + UNSPECIFIED (int): The type of the instance is unspecified. + If set when creating an instance, a ``PRODUCTION`` instance will + be created. If set when updating an instance, the type will be + left unchanged. + PRODUCTION (int): An instance meant for production use. + ``serve_nodes`` must be set on the cluster. + DEVELOPMENT (int): The instance is meant for development and testing + purposes only; it has no performance or uptime guarantees and is not + covered by SLA. + After a development instance is created, it can be upgraded by + updating the instance to type ``PRODUCTION``. An instance created + as a production instance cannot be changed to a development instance. + When creating a development instance, ``serve_nodes`` on the cluster + must not be set. + """ + UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED + PRODUCTION = enums.Instance.Type.PRODUCTION + DEVELOPMENT = enums.Instance.Type.DEVELOPMENT diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 57d3385a9f12..a067af2696ee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -22,16 +22,16 @@ from google.protobuf import field_mask_pb2 -from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable_admin_v2.types import instance_pb2 _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') + + ROUTING_POLICY_TYPE_ANY = 1 ROUTING_POLICY_TYPE_SINGLE = 2 -_STORAGE_TYPE_UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED class Instance(object): @@ -61,12 +61,39 @@ class Instance(object): Cloud Console UI. (Must be between 4 and 30 characters.) If this value is not set in the constructor, will fall back to the instance ID. + + :type instance_type: int + :param instance_type: (Optional) The type of the instance. + Possible values are represented + by the following constants: + :data:`google.cloud.bigtable.enums.InstanceType.PRODUCTION`. + :data:`google.cloud.bigtable.enums.InstanceType.DEVELOPMENT`, + Defaults to + :data:`google.cloud.bigtable.enums.InstanceType.UNSPECIFIED`. + + :type labels: dict + :param labels: (Optional) Labels are a flexible and lightweight + mechanism for organizing cloud resources into groups + that reflect a customer's organizational needs and + deployment strategies. They can be used to filter + resources and aggregate metrics. Label keys must be + between 1 and 63 characters long. Maximum 64 labels can + be associated with a given resource. Label values must + be between 0 and 63 characters long. Keys and values + must both be under 128 bytes. """ - def __init__(self, instance_id, client, display_name=None): + def __init__(self, + instance_id, + client, + display_name=None, + instance_type=None, + labels=None): self.instance_id = instance_id - self.display_name = display_name or instance_id self._client = client + self.display_name = display_name or instance_id + self.type_ = instance_type + self.labels = labels @classmethod def from_pb(cls, instance_pb, client): @@ -95,8 +122,8 @@ def from_pb(cls, instance_pb, client): 'project ID on the client') instance_id = match.group('instance_id') - result = cls(instance_id, client, - display_name=instance_pb.display_name) + result = cls(instance_id, client, instance_pb.display_name, + instance_pb.type, instance_pb.labels) return result def _update_from_pb(self, instance_pb): @@ -106,6 +133,8 @@ def _update_from_pb(self, instance_pb): if not instance_pb.display_name: # Simple field (string) raise ValueError('Instance protobuf does not contain display_name') self.display_name = instance_pb.display_name + self.type_ = instance_pb.type + self.labels = instance_pb.labels @property def name(self): @@ -151,7 +180,7 @@ def reload(self): def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, serve_nodes=DEFAULT_SERVE_NODES, - default_storage_type=_STORAGE_TYPE_UNSPECIFIED): + default_storage_type=None): """Create this instance. .. note:: @@ -178,13 +207,14 @@ def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, cluster; used to set up the instance's cluster. :type default_storage_type: int - :param default_storage_type: (Optional) The default values are - STORAGE_TYPE_UNSPECIFIED = 0: The user - did not specify a storage type. - SSD = 1: Flash (SSD) storage should be - used. - HDD = 2: Magnetic drive (HDD) storage - should be used. + :param default_storage_type: (Optional) The storage media type for + persisting Bigtable data. + Possible values are represented + by the following constants: + :data:`google.cloud.bigtable.enums.StorageType.SSD`. + :data:`google.cloud.bigtable.enums.StorageType.SHD`, + Defaults to + :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create @@ -201,8 +231,8 @@ def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, serve_nodes=serve_nodes, default_storage_type=default_storage_type) instance = instance_pb2.Instance( - display_name=self.display_name - ) + display_name=self.display_name, type=self.type_, + labels=self.labels) clusters[cluster_id] = cluster parent = self._client.project_path @@ -224,10 +254,10 @@ def update(self): before calling :meth:`update`. """ - type = enums.Instance.Type.TYPE_UNSPECIFIED self._client.instance_admin_client.update_instance( - name=self.name, display_name=self.display_name, type_=type, - labels={}) + name=self.name, display_name=self.display_name, + type_=self.type_, + labels=self.labels) def delete(self): """Delete this instance. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 2b1003889e02..ca5600a71aaa 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -39,6 +39,7 @@ LOCATION_ID = 'us-central1-c' INSTANCE_ID = 'g-c-p' + unique_resource_id('-') +LABELS = {u'foo': u'bar'} TABLE_ID = 'google-cloud-python-test-table' APP_PROFILE_ID = 'app-profile-id' CLUSTER_ID = INSTANCE_ID+'-cluster' @@ -86,7 +87,7 @@ def setUpModule(): else: Config.CLIENT = Client(admin=True) - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID) + Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) if not Config.IN_EMULATOR: retry = RetryErrors(GrpcRendezvous, @@ -131,6 +132,7 @@ def test_list_instances(self): self.assertTrue(expected.issubset(found)) def test_reload(self): + from google.cloud.bigtable import enums # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. instance = Config.CLIENT.instance(INSTANCE_ID) @@ -139,9 +141,13 @@ def test_reload(self): instance.reload() self.assertEqual(instance.display_name, Config.INSTANCE.display_name) + self.assertEqual(instance.labels, Config.INSTANCE.labels) + self.assertEqual(instance.type_, enums.InstanceType.PRODUCTION) - def test_create_instance(self): - ALT_INSTANCE_ID = 'new' + unique_resource_id('-') + def test_create_instance_defaults(self): + from google.cloud.bigtable import enums + + ALT_INSTANCE_ID = 'new-def' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID) operation = instance.create(location_id=LOCATION_ID) # Make sure this instance gets deleted after the test case. @@ -156,6 +162,35 @@ def test_create_instance(self): self.assertEqual(instance, instance_alt) self.assertEqual(instance.display_name, instance_alt.display_name) + # Make sure that by default a PRODUCTION type instance is created + self.assertIsNone(instance.type_) + self.assertEqual(instance_alt.type_, enums.InstanceType.PRODUCTION) + self.assertIsNone(instance.labels) + self.assertFalse(instance_alt.labels) + + def test_create_instance(self): + from google.cloud.bigtable import enums + _DEVELOPMENT = enums.InstanceType.DEVELOPMENT + + ALT_INSTANCE_ID = 'new' + unique_resource_id('-') + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, + instance_type=_DEVELOPMENT, + labels=LABELS) + operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + self.assertEqual(instance.type_, instance_alt.type_) + self.assertEqual(instance.labels, instance_alt.labels) def test_update(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 97df171b4cb6..a74260cc84ac 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -89,33 +89,40 @@ def test_instance_factory_defaults(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' - DISPLAY_NAME = 'display-name' credentials = _make_credentials() client = self._make_one( project=PROJECT, credentials=credentials) - instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) + instance = client.instance(INSTANCE_ID) self.assertIsInstance(instance, Instance) self.assertEqual(instance.instance_id, INSTANCE_ID) - self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance.display_name, INSTANCE_ID) + self.assertIsNone(instance.type_) + self.assertIsNone(instance.labels) self.assertIs(instance._client, client) - def test_instance_factory_w_explicit_serve_nodes(self): + def test_instance_factory_non_defaults(self): from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable import enums PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' + instance_type = enums.InstanceType.DEVELOPMENT + labels = {'foo': 'bar'} credentials = _make_credentials() client = self._make_one( project=PROJECT, credentials=credentials) - instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME) + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME, + instance_type=instance_type, labels=labels) self.assertIsInstance(instance, Instance) self.assertEqual(instance.instance_id, INSTANCE_ID) self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance.type_, instance_type) + self.assertEqual(instance.labels, labels) self.assertIs(instance._client, client) def test_admin_client_w_value_error(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 1d5e1f1c2ea8..1639bc7da960 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -58,6 +58,7 @@ class TestInstance(unittest.TestCase): 'projects/' + PROJECT + '/instances/' + INSTANCE_ID + '/appProfiles/') DISPLAY_NAME = 'display_name' + LABELS = {'foo': 'bar'} OP_ID = 8915 OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % (PROJECT, INSTANCE_ID, OP_ID)) @@ -88,16 +89,25 @@ def test_constructor_defaults(self): instance = self._make_one(self.INSTANCE_ID, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.INSTANCE_ID) + self.assertIsNone(instance.type_) + self.assertIsNone(instance.labels) self.assertIs(instance._client, client) def test_constructor_non_default(self): - display_name = 'display_name' + from google.cloud.bigtable import enums + + instance_type = enums.InstanceType.DEVELOPMENT + labels = {'test': 'test'} client = object() instance = self._make_one(self.INSTANCE_ID, client, - display_name=display_name) + display_name=self.DISPLAY_NAME, + instance_type=instance_type, + labels=labels) self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.display_name, display_name) + self.assertEqual(instance.display_name, self.DISPLAY_NAME) + self.assertEqual(instance.type_, instance_type) + self.assertEqual(instance.labels, labels) self.assertIs(instance._client, client) def test_table_factory(self): @@ -115,16 +125,42 @@ def test_table_factory(self): def test__update_from_pb_success(self): from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) + from google.cloud.bigtable import enums - display_name = 'display_name' + instance_type = enums.InstanceType.PRODUCTION instance_pb = data_v2_pb2.Instance( - display_name=display_name, + display_name=self.DISPLAY_NAME, + type=instance_type, + labels=self.LABELS ) instance = self._make_one(None, None) self.assertIsNone(instance.display_name) + self.assertIsNone(instance.type_) + self.assertIsNone(instance.labels) instance._update_from_pb(instance_pb) - self.assertEqual(instance.display_name, display_name) + self.assertEqual(instance.display_name, self.DISPLAY_NAME) + self.assertEqual(instance.type_, instance_type) + self.assertEqual(instance.labels, self.LABELS) + + def test__update_from_pb_success_defaults(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable import enums + + instance_pb = data_v2_pb2.Instance( + display_name=self.DISPLAY_NAME, + ) + + instance = self._make_one(None, None) + self.assertIsNone(instance.display_name) + self.assertIsNone(instance.type_) + self.assertIsNone(instance.labels) + instance._update_from_pb(instance_pb) + self.assertEqual(instance.display_name, self.DISPLAY_NAME) + self.assertEqual(instance.type_, + enums.InstanceType.UNSPECIFIED) + self.assertFalse(instance.labels) def test__update_from_pb_no_display_name(self): from google.cloud.bigtable_admin_v2.proto import ( @@ -139,12 +175,16 @@ def test__update_from_pb_no_display_name(self): def test_from_pb_success(self): from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) + from google.cloud.bigtable import enums client = _Client(project=self.PROJECT) + instance_type = enums.InstanceType.PRODUCTION instance_pb = data_v2_pb2.Instance( name=self.INSTANCE_NAME, display_name=self.INSTANCE_ID, + type=instance_type, + labels=self.LABELS ) klass = self._get_target_class() @@ -152,6 +192,9 @@ def test_from_pb_success(self): self.assertIsInstance(instance, klass) self.assertEqual(instance._client, client) self.assertEqual(instance.instance_id, self.INSTANCE_ID) + self.assertEqual(instance.display_name, self.INSTANCE_ID) + self.assertEqual(instance.type_, instance_type) + self.assertEqual(instance.labels, self.LABELS) def test_from_pb_bad_instance_name(self): from google.cloud.bigtable_admin_v2.proto import ( @@ -224,6 +267,7 @@ def test_reload(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable import enums api = bigtable_instance_admin_client.BigtableInstanceAdminClient( mock.Mock()) @@ -234,8 +278,11 @@ def test_reload(self): # Create response_pb DISPLAY_NAME = u'hey-hi-hello' + instance_type = enums.InstanceType.PRODUCTION response_pb = data_v2_pb2.Instance( display_name=DISPLAY_NAME, + type=instance_type, + labels=self.LABELS ) # Patch the stub used by the API method. @@ -265,7 +312,7 @@ def test_create(self): from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2 import enums + from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES @@ -276,7 +323,9 @@ def test_create(self): client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, - display_name=self.DISPLAY_NAME) + self.DISPLAY_NAME, + enums.InstanceType.PRODUCTION, + self.LABELS) # Create response_pb metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) @@ -304,12 +353,9 @@ def test_create(self): cluster_id = '{}-cluster'.format(self.INSTANCE_ID) cluster = self._create_cluster( instance_api, cluster_id, self.LOCATION_ID, DEFAULT_SERVE_NODES, - enums.StorageType.STORAGE_TYPE_UNSPECIFIED) + enums.StorageType.UNSPECIFIED) - expected_request = self._create_instance_request( - self.DISPLAY_NAME, - {cluster_id: cluster} - ) + expected_request = self._create_instance_request({cluster_id: cluster}) self.assertEqual(expected_request, actual_request) self.assertIsInstance(result, operation.Operation) # self.assertEqual(result.operation.name, self.OP_NAME) @@ -319,7 +365,7 @@ def test_create(self): def test_create_w_explicit_serve_nodes(self): from google.api_core import operation from google.longrunning import operations_pb2 - from google.cloud.bigtable_admin_v2 import enums + from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) @@ -328,8 +374,9 @@ def test_create_w_explicit_serve_nodes(self): client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, - display_name=self.DISPLAY_NAME) - + self.DISPLAY_NAME, + enums.InstanceType.PRODUCTION, + self.LABELS) # Create response_pb response_pb = operations_pb2.Operation(name=self.OP_NAME) @@ -351,10 +398,7 @@ def test_create_w_explicit_serve_nodes(self): instance_api, cluster_id, self.LOCATION_ID, serve_nodes, enums.StorageType.SSD) - expected_request = self._create_instance_request( - self.DISPLAY_NAME, - {cluster_id: cluster} - ) + expected_request = self._create_instance_request({cluster_id: cluster}) self.assertEqual(expected_request, actual_request) self.assertIsInstance(result, operation.Operation) @@ -371,12 +415,15 @@ def _create_cluster(self, instance_api, cluster_id, location_id, serve_nodes=server_nodes, default_storage_type=storage_type) - def _create_instance_request(self, display_name, clusters): + def _create_instance_request(self, clusters): from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable import enums - instance = instance_pb2.Instance(display_name=display_name) + instance = instance_pb2.Instance(display_name=self.DISPLAY_NAME, + type=enums.InstanceType.PRODUCTION, + labels=self.LABELS) return messages_v2_pb2.CreateInstanceRequest( parent='projects/%s' % (self.PROJECT), From 1b9c96e4d4e8b19b56bc176180443ff386efae4b Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 17 Jul 2018 15:01:42 -0400 Subject: [PATCH 151/892] Shorten instance / cluster name to fix CI breakage. (#5641) --- packages/google-cloud-bigtable/tests/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index ca5600a71aaa..376bf993bb88 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -147,7 +147,7 @@ def test_reload(self): def test_create_instance_defaults(self): from google.cloud.bigtable import enums - ALT_INSTANCE_ID = 'new-def' + unique_resource_id('-') + ALT_INSTANCE_ID = 'ndef' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID) operation = instance.create(location_id=LOCATION_ID) # Make sure this instance gets deleted after the test case. From fb68361d819fb09693ea6c688d950978e94f1533 Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Mon, 23 Jul 2018 23:51:02 +0530 Subject: [PATCH 152/892] Bigtable: Ability to create an instance with multiple clusters (#5622) --- .../google/cloud/bigtable/cluster.py | 41 +++++++++- .../google/cloud/bigtable/instance.py | 38 ++++++---- .../tests/unit/test_cluster.py | 28 +++---- .../tests/unit/test_instance.py | 74 +++++++++++++++++++ 4 files changed, 152 insertions(+), 29 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index f5c31d8bc4ce..4bac94d1c8d0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -16,6 +16,8 @@ import re +from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable_admin_v2.types import instance_pb2 _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' @@ -25,6 +27,8 @@ DEFAULT_SERVE_NODES = 3 """Default number of nodes to use when creating a cluster.""" +_STORAGE_TYPE_UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED + class Cluster(object): """Representation of a Google Cloud Bigtable Cluster. @@ -42,17 +46,35 @@ class Cluster(object): :type instance: :class:`~google.cloud.bigtable.instance.Instance` :param instance: The instance where the cluster resides. + :type location_id: str + :param location_id: ID of the location in which the cluster will be + created. For e.g "us-central1-b" + List of locations id is : + https://cloud.google.com/bigtable/docs/locations + For best performance, clients should be located + as close as possible to this cluster. + :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. Defaults to :data:`DEFAULT_SERVE_NODES`. + + :type default_storage_type: int + :param default_storage_type: (Optional) The default values are + STORAGE_TYPE_UNSPECIFIED = 0: The user + did not specify a storage type. + SSD = 1: Flash (SSD) storage should be used. + HDD = 2: Magnetic drive (HDD) storage + should be used. """ - def __init__(self, cluster_id, instance, - serve_nodes=DEFAULT_SERVE_NODES): + def __init__(self, cluster_id, instance, location_id, + serve_nodes=DEFAULT_SERVE_NODES, + default_storage_type=_STORAGE_TYPE_UNSPECIFIED): self.cluster_id = cluster_id self._instance = instance + self.location = location_id self.serve_nodes = serve_nodes - self.location = None + self.default_storage_type = default_storage_type @property def name(self): @@ -172,3 +194,16 @@ def delete(self): """ client = self._instance._client client.instance_admin_client.delete_cluster(self.name) + + def _create_pb_request(self): + """ Create cluster proto buff message for API calls """ + client = self._instance._client + cluster_name = client.instance_admin_client.cluster_path( + client.project, self._instance.instance_id, self.cluster_id) + location = client.instance_admin_client.location_path( + client.project, self.location) + cluster_message = instance_pb2.Cluster( + name=cluster_name, location=location, + serve_nodes=self.serve_nodes, + default_storage_type=self.default_storage_type) + return cluster_message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index a067af2696ee..6d31092322fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -19,6 +19,7 @@ from google.cloud.bigtable.table import Table from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES +from google.cloud.bigtable.cluster import Cluster from google.protobuf import field_mask_pb2 @@ -180,7 +181,7 @@ def reload(self): def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, serve_nodes=DEFAULT_SERVE_NODES, - default_storage_type=None): + default_storage_type=None, clusters=None): """Create this instance. .. note:: @@ -216,29 +217,40 @@ def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. + :type clusters: class:`~[~google.cloud.bigtable.cluster.Cluster]` + :param clusters: List of clusters to be created. + :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create operation. + + :raises: :class:`ValueError ` if both + ``clusters`` and one of ``location_id``, ``serve_nodes`` + and ``default_storage_type`` are set. """ - clusters = {} - cluster_id = '{}-cluster'.format(self.instance_id) - cluster_name = self._client.instance_admin_client.cluster_path( - self._client.project, self.instance_id, cluster_id) - location = self._client.instance_admin_client.location_path( - self._client.project, location_id) - cluster = instance_pb2.Cluster( - name=cluster_name, location=location, - serve_nodes=serve_nodes, - default_storage_type=default_storage_type) + + if clusters is None: + cluster_id = '{}-cluster'.format(self.instance_id) + + clusters = [Cluster(cluster_id, self, location_id, + serve_nodes, default_storage_type)] + + elif (location_id is not None or + serve_nodes is not None or + default_storage_type is not None): + raise ValueError("clusters and one of location_id, serve_nodes, \ + default_storage_type can not be set \ + simultaneously.") + instance = instance_pb2.Instance( display_name=self.display_name, type=self.type_, labels=self.labels) - clusters[cluster_id] = cluster + parent = self._client.project_path return self._client.instance_admin_client.create_instance( parent=parent, instance_id=self.instance_id, instance=instance, - clusters=clusters) + clusters={c.cluster_id: c._create_pb_request() for c in clusters}) def update(self): """Update this instance. diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index ec3887fc3ef0..c8caf36ebba9 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -25,6 +25,7 @@ class TestCluster(unittest.TestCase): PROJECT = 'project' INSTANCE_ID = 'instance-id' CLUSTER_ID = 'cluster-id' + LOCATION_ID = 'location-id' CLUSTER_NAME = ('projects/' + PROJECT + '/instances/' + INSTANCE_ID + '/clusters/' + CLUSTER_ID) @@ -53,7 +54,7 @@ def test_constructor_defaults(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) @@ -64,7 +65,7 @@ def test_constructor_non_default(self): instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - serve_nodes=SERVE_NODES) + self.LOCATION_ID, serve_nodes=SERVE_NODES) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) self.assertEqual(cluster.serve_nodes, SERVE_NODES) @@ -76,37 +77,37 @@ def test_name_property(self): client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) self.assertEqual(cluster.name, self.CLUSTER_NAME) def test___eq__(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance) - cluster2 = self._make_one(self.CLUSTER_ID, instance) + cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) + cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) self.assertEqual(cluster1, cluster2) def test___eq__type_differ(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance) + cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) cluster2 = object() self.assertNotEqual(cluster1, cluster2) def test___ne__same_value(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance) - cluster2 = self._make_one(self.CLUSTER_ID, instance) + cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) + cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) comparison_val = (cluster1 != cluster2) self.assertFalse(comparison_val) def test___ne__(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one('cluster_id1', instance) - cluster2 = self._make_one('cluster_id2', instance) + cluster1 = self._make_one('cluster_id1', instance, self.LOCATION_ID) + cluster2 = self._make_one('cluster_id2', instance, self.LOCATION_ID) self.assertNotEqual(cluster1, cluster2) def test_reload(self): @@ -122,7 +123,7 @@ def test_reload(self): client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) # Create response_pb response_pb = _ClusterPB( @@ -159,7 +160,7 @@ def test_create(self): client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance) + cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) # Create response_pb OP_ID = 5678 @@ -207,7 +208,7 @@ def test_update(self): credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - serve_nodes=SERVE_NODES) + self.LOCATION_ID, serve_nodes=SERVE_NODES) # Create request_pb request_pb = _ClusterPB( @@ -261,6 +262,7 @@ def test_delete(self): credentials=credentials, admin=True) instance = Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, + self.LOCATION_ID, serve_nodes=DEFAULT_SERVE_NODES) # Create response_pb diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 1639bc7da960..a6bc1bf5ab97 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -18,6 +18,7 @@ import mock from ._testing import _make_credentials +from google.cloud.bigtable.cluster import Cluster class MultiCallableStub(object): @@ -304,6 +305,13 @@ def test_reload(self): # Check Instance optional config values before. self.assertEqual(instance.display_name, DISPLAY_NAME) + def test_create_check_conflicts(self): + client = object() + instance = self._make_one(self.INSTANCE_ID, client) + with self.assertRaises(ValueError): + instance.create(location_id=self.LOCATION_ID, + clusters=[object(), object()]) + def test_create(self): import datetime from google.api_core import operation @@ -402,6 +410,72 @@ def test_create_w_explicit_serve_nodes(self): self.assertEqual(expected_request, actual_request) self.assertIsInstance(result, operation.Operation) + def test_create_w_clusters(self): + import datetime + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client, + self.DISPLAY_NAME, + enums.InstanceType.PRODUCTION, + self.LABELS) + + # Create response_pb + metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + type_url = 'type.googleapis.com/%s' % ( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString(), + ) + ) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api + + # Perform the method and check the result. + clusters = [Cluster('cluster-id1', instance, 'location-id1'), + Cluster('cluster-id2', instance, 'location-id2')] + result = instance.create(None, None, None, clusters) + actual_request = channel.requests[0][1] + + cluster1 = self._create_cluster( + instance_api, 'cluster-id1', 'location-id1', DEFAULT_SERVE_NODES, + enums.StorageType.UNSPECIFIED) + + cluster2 = self._create_cluster( + instance_api, 'cluster-id2', 'location-id2', DEFAULT_SERVE_NODES, + enums.StorageType.UNSPECIFIED) + + expected_request = self._create_instance_request( + {'cluster-id1': cluster1, + 'cluster-id2': cluster2} + ) + self.assertEqual(expected_request, actual_request) + self.assertIsInstance(result, operation.Operation) + # self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.CreateInstanceMetadata) + def _create_cluster(self, instance_api, cluster_id, location_id, server_nodes, storage_type): from google.cloud.bigtable_admin_v2.types import instance_pb2 From 7e9ada496fc9a6082a4ac3bb3ab1d8069946b41a Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Thu, 26 Jul 2018 14:30:57 -0400 Subject: [PATCH 153/892] Bigtable: refactor update_app_profile() to remove update_mask argument (#5684) --- .../google/cloud/bigtable/enums.py | 24 ++++ .../google/cloud/bigtable/instance.py | 65 ++++----- .../cloud/bigtable_admin_v2/gapic/enums.py | 24 ++++ .../google-cloud-bigtable/tests/system.py | 100 ++++++++++++-- .../tests/unit/test_instance.py | 129 ++++++++++++------ 5 files changed, 256 insertions(+), 86 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index 0f50793f6e10..2f2246435b2e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -53,3 +53,27 @@ class InstanceType(object): UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED PRODUCTION = enums.Instance.Type.PRODUCTION DEVELOPMENT = enums.Instance.Type.DEVELOPMENT + + +class RoutingPolicyType(object): + """ + The type of the routing policy for app_profile. + + Attributes: + ANY (int): Read/write requests may be routed to any cluster in the + instance, and will fail over to another cluster in the event of + transient errors or delays. + Choosing this option sacrifices read-your-writes consistency to + improve availability. + See + https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny + + SINGLE (int): Unconditionally routes all read/write requests to a + specific cluster. + This option preserves read-your-writes consistency, but does not improve + availability. + See + https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.SingleClusterRouting + """ + ANY = enums.RoutingPolicyType.ANY + SINGLE = enums.RoutingPolicyType.SINGLE diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 6d31092322fc..e3b26d55f386 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -25,16 +25,14 @@ from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.cloud.bigtable.enums import RoutingPolicyType + _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[a-z][-a-z0-9]*)$') -ROUTING_POLICY_TYPE_ANY = 1 -ROUTING_POLICY_TYPE_SINGLE = 2 - - class Instance(object): """Representation of a Google Cloud Bigtable Instance. @@ -331,7 +329,7 @@ def list_tables(self): return result def create_app_profile(self, app_profile_id, routing_policy_type, - description='', ignore_warnings=None, + description=None, ignore_warnings=None, cluster_id=None, allow_transactional_writes=False): """Creates an app profile within an instance. @@ -339,16 +337,11 @@ def create_app_profile(self, app_profile_id, routing_policy_type, :param app_profile_id: The unique name for the new app profile. :type: routing_policy_type: int - :param: routing_policy_type: There are two routing policies - ROUTING_POLICY_TYPE_ANY = 1 and - ROUTING_POLICY_TYPE_SINGLE = 2. - If ROUTING_POLICY_TYPE_ANY - which will create a - MultiClusterRoutingUseAny policy and if - ROUTING_POLICY_TYPE_ANY is specified, a - SingleClusterRouting policy will be created - using the cluster_id and - allow_transactional_writes parameters. + :param: routing_policy_type: The type of the routing policy. + Possible values are represented + by the following constants: + :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` + :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Long form description of the use @@ -382,11 +375,11 @@ def create_app_profile(self, app_profile_id, routing_policy_type, name = instance_admin_client.app_profile_path( self._client.project, self.instance_id, app_profile_id) - if routing_policy_type == ROUTING_POLICY_TYPE_ANY: + if routing_policy_type == RoutingPolicyType.ANY: multi_cluster_routing_use_any = ( instance_pb2.AppProfile.MultiClusterRoutingUseAny()) - if routing_policy_type == ROUTING_POLICY_TYPE_SINGLE: + if routing_policy_type == RoutingPolicyType.SINGLE: single_cluster_routing = ( instance_pb2.AppProfile.SingleClusterRouting( cluster_id=cluster_id, @@ -430,10 +423,11 @@ def list_app_profiles(self): self._client._instance_admin_client.list_app_profiles(self.name)) return list_app_profiles - def update_app_profile(self, app_profile_id, update_mask, - routing_policy_type, description='', + def update_app_profile(self, app_profile_id, + routing_policy_type, description=None, ignore_warnings=None, - cluster_id=None, allow_transactional_writes=False): + cluster_id=None, + allow_transactional_writes=False): """Updates an app profile within an instance. :type: app_profile_id: str @@ -444,16 +438,11 @@ def update_app_profile(self, app_profile_id, update_mask, needed to update. :type: routing_policy_type: int - :param: routing_policy_type: There are two routing policies - ROUTING_POLICY_TYPE_ANY = 1 and - ROUTING_POLICY_TYPE_SINGLE = 2. - If ROUTING_POLICY_TYPE_ANY - which will create a - MultiClusterRoutingUseAny policy and if - ROUTING_POLICY_TYPE_ANY is specified, a - SingleClusterRouting policy will be created - using the cluster_id and - allow_transactional_writes parameters. + :param: routing_policy_type: The type of the routing policy. + Possible values are represented + by the following constants: + :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` + :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` :type: description: str :param: description: (Optional) Optional long form description of the @@ -481,32 +470,36 @@ def update_app_profile(self, app_profile_id, update_mask, if not routing_policy_type: raise ValueError('AppProfile required routing policy.') + update_mask_pb = field_mask_pb2.FieldMask() single_cluster_routing = None multi_cluster_routing_use_any = None instance_admin_client = self._client._instance_admin_client name = instance_admin_client.app_profile_path( self._client.project, self.instance_id, app_profile_id) - if routing_policy_type == ROUTING_POLICY_TYPE_ANY: + if description is not None: + update_mask_pb.paths.append('description') + + if routing_policy_type == RoutingPolicyType.ANY: multi_cluster_routing_use_any = ( instance_pb2.AppProfile.MultiClusterRoutingUseAny()) + update_mask_pb.paths.append('multi_cluster_routing_use_any') - if routing_policy_type == ROUTING_POLICY_TYPE_SINGLE: + if routing_policy_type == RoutingPolicyType.SINGLE: single_cluster_routing = ( instance_pb2.AppProfile.SingleClusterRouting( cluster_id=cluster_id, allow_transactional_writes=allow_transactional_writes )) + update_mask_pb.paths.append('single_cluster_routing') - update_app_profile = instance_pb2.AppProfile( + update_app_profile_pb = instance_pb2.AppProfile( name=name, description=description, multi_cluster_routing_use_any=multi_cluster_routing_use_any, single_cluster_routing=single_cluster_routing ) - update_mask = field_mask_pb2.FieldMask(paths=update_mask) - return self._client._instance_admin_client.update_app_profile( - app_profile=update_app_profile, update_mask=update_mask, + app_profile=update_app_profile_pb, update_mask=update_mask_pb, ignore_warnings=ignore_warnings) def delete_app_profile(self, app_profile_id, ignore_warnings=False): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 36fc4aa470aa..db0317891abf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -175,3 +175,27 @@ class State(object): STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 + + +class RoutingPolicyType(object): + """ + The type of the routing policy for app_profile. + + Attributes: + ANY (int): Read/write requests may be routed to any cluster in the + instance, and will fail over to another cluster in the event of + transient errors or delays. + Choosing this option sacrifices read-your-writes consistency to + improve availability. + See + https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny + + SINGLE (int): Unconditionally routes all read/write requests to a + specific cluster. + This option preserves read-your-writes consistency, but does not improve + availability. + See + https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.SingleClusterRouting + """ + ANY = 1 + SINGLE = 2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 376bf993bb88..43920f180bc2 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -41,7 +41,6 @@ INSTANCE_ID = 'g-c-p' + unique_resource_id('-') LABELS = {u'foo': u'bar'} TABLE_ID = 'google-cloud-python-test-table' -APP_PROFILE_ID = 'app-profile-id' CLUSTER_ID = INSTANCE_ID+'-cluster' COLUMN_FAMILY_ID1 = u'col-fam-id1' COLUMN_FAMILY_ID2 = u'col-fam-id2' @@ -210,33 +209,116 @@ def test_update(self): Config.INSTANCE.update() def test_create_app_profile_with_multi_routing_policy(self): - # Create a new instance instance and reload it. + from google.cloud.bigtable_admin_v2.types import instance_pb2 + description = 'Foo App Profile' instance = Config.INSTANCE + ignore_warnings = True + app_profile_id = 'app_profile_id_1' app_profile = instance.create_app_profile( - app_profile_id=APP_PROFILE_ID+'-multi', + app_profile_id=app_profile_id, routing_policy_type=ROUTING_POLICY_TYPE_ANY, description=description, - ignore_warnings=True + ignore_warnings=ignore_warnings ) - self.assertEqual(app_profile.description, description) + # Load a different app_profile objec form the server and + # verrify that it is the same + alt_app_profile = instance.get_app_profile(app_profile_id) + self.assertEqual(app_profile, alt_app_profile) + + # Modify existing app_profile to singly routing policy and confirm + new_description = 'To single routing policy' + allow_transactional_writes = False + operation = instance.update_app_profile( + app_profile_id=app_profile_id, + routing_policy_type=ROUTING_POLICY_TYPE_SINGLE, + description=new_description, + cluster_id=CLUSTER_ID, + allow_transactional_writes=allow_transactional_writes) + operation.result(timeout=10) + + alt_app_profile = instance.get_app_profile(app_profile_id) + self.assertEqual(alt_app_profile.description, new_description) + self.assertIsInstance( + alt_app_profile.single_cluster_routing, + instance_pb2.AppProfile.SingleClusterRouting) + self.assertEqual( + alt_app_profile.single_cluster_routing.cluster_id, CLUSTER_ID) + self.assertEqual( + alt_app_profile.single_cluster_routing.allow_transactional_writes, + allow_transactional_writes) + + # Delete app_profile + instance.delete_app_profile(app_profile_id=app_profile_id, + ignore_warnings=ignore_warnings) + self.assertFalse(self._app_profile_exists(app_profile_id)) def test_create_app_profile_with_single_routing_policy(self): - # Create a new instance instance and reload it. + from google.cloud.bigtable_admin_v2.types import instance_pb2 + description = 'Foo App Profile' instance = Config.INSTANCE + ignore_warnings = True + app_profile_id = 'app_profile_id_2' app_profile = instance.create_app_profile( - app_profile_id=APP_PROFILE_ID+'-single', + app_profile_id=app_profile_id, routing_policy_type=ROUTING_POLICY_TYPE_SINGLE, description=description, cluster_id=CLUSTER_ID, - ignore_warnings=True ) - self.assertEqual(app_profile.description, description) + # Load a different app_profile objec form the server and + # verrify that it is the same + alt_app_profile = instance.get_app_profile(app_profile_id) + self.assertEqual(app_profile, alt_app_profile) + + # Modify existing app_profile to allow_transactional_writes + new_description = 'Allow transactional writes' + allow_transactional_writes = True + # Note: Do not need to ignore warnings when switching + # to allow transactional writes. + # Do need to set ignore_warnings to True, when switching to + # disallow the transactional writes. + operation = instance.update_app_profile( + app_profile_id=app_profile_id, + routing_policy_type=ROUTING_POLICY_TYPE_SINGLE, + description=new_description, + cluster_id=CLUSTER_ID, + allow_transactional_writes=allow_transactional_writes) + operation.result(timeout=10) + + alt_app_profile = instance.get_app_profile(app_profile_id) + self.assertEqual(alt_app_profile.description, new_description) + self.assertEqual( + alt_app_profile.single_cluster_routing.allow_transactional_writes, + allow_transactional_writes) + + # Modify existing app_proflie to multi cluster routing + new_description = 'To multi cluster routing' + operation = instance.update_app_profile( + app_profile_id=app_profile_id, + routing_policy_type=ROUTING_POLICY_TYPE_ANY, + description=new_description, + ignore_warnings=ignore_warnings) + operation.result(timeout=10) + + alt_app_profile = instance.get_app_profile(app_profile_id) + self.assertEqual(alt_app_profile.description, new_description) + self.assertIsInstance( + alt_app_profile.multi_cluster_routing_use_any, + instance_pb2.AppProfile.MultiClusterRoutingUseAny) + + def _app_profile_exists(self, app_profile_id): + from google.api_core import exceptions + try: + Config.INSTANCE.get_app_profile(app_profile_id) + except exceptions.NotFound: + return False + else: + return True class TestTableAdminAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index a6bc1bf5ab97..290aec674928 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -617,7 +617,6 @@ def test_create_app_profile_with_wrong_routing_policy(self): instance = self._make_one(self.INSTANCE_ID, client) app_profile_id = 'appProfileId1262094415' - update_mask = [] # Create AppProfile with exception with self.assertRaises(ValueError): @@ -626,13 +625,13 @@ def test_create_app_profile_with_wrong_routing_policy(self): with self.assertRaises(ValueError): instance.update_app_profile(app_profile_id, - update_mask=update_mask, routing_policy_type=None) def test_create_app_profile_with_multi_routing_policy(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable.enums import RoutingPolicyType credentials = _make_credentials() client = self._make_client(project=self.PROJECT, @@ -649,7 +648,7 @@ def test_create_app_profile_with_multi_routing_policy(self): } expected_request = { 'app_profile_id': app_profile_id, - 'routing_policy_type': 1, + 'routing_policy_type': RoutingPolicyType.ANY, 'description': description } expected_response = instance_pb2.AppProfile(**expected_response) @@ -673,13 +672,14 @@ def test_create_app_profile_with_multi_routing_policy(self): ) actual_request = channel.requests[0][1] - assert expected_request == actual_request + self.assertEqual(expected_request, actual_request) self.assertEqual(result, expected_response) def test_create_app_profile_with_single_routing_policy(self): from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable.enums import RoutingPolicyType credentials = _make_credentials() client = self._make_client(project=self.PROJECT, @@ -700,7 +700,7 @@ def test_create_app_profile_with_single_routing_policy(self): } expected_request = { 'app_profile_id': app_profile_id, - 'routing_policy_type': 2, + 'routing_policy_type': RoutingPolicyType.SINGLE, 'description': description, 'cluster_id': cluster_id } @@ -725,7 +725,7 @@ def test_create_app_profile_with_single_routing_policy(self): ) actual_request = channel.requests[0][1] - assert expected_request == actual_request + self.assertEqual(expected_request, actual_request) self.assertEqual(result, expected_response) def test_get_app_profile(self): @@ -810,21 +810,19 @@ def test_list_app_profiles(self): self.assertEqual(response[0], expected_response.app_profiles[0]) - def test_update_app_profile(self): - import datetime + def test_update_app_profile_multi_cluster_routing_policy(self): from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud._helpers import _datetime_to_pb_timestamp - from tests.unit._testing import _FakeStub from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as instance_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType credentials = _make_credentials() client = self._make_client(project=self.PROJECT, @@ -832,11 +830,9 @@ def test_update_app_profile(self): instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + metadata = messages_v2_pb2.UpdateAppProfileMetadata() type_url = 'type.googleapis.com/%s' % ( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name,) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( @@ -846,34 +842,58 @@ def test_update_app_profile(self): ) # Patch the stub used by the API method. + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + # Mock api calls client._instance_admin_client = instance_api - stub = _FakeStub(response_pb) - client._instance_admin_client.bigtable_instance_admin_stub = stub - update_mask = [] # Perform the method and check the result. + description = 'description-1724546052' app_profile_id = 'appProfileId1262094415' + ignore_warnings = True + multi_cluster_routing_use_any = ( + instance_pb2.AppProfile.MultiClusterRoutingUseAny()) + expected_request_app_profile = instance_pb2.AppProfile( + name=self.APP_PROFILE_PATH + app_profile_id, + description=description, + multi_cluster_routing_use_any=multi_cluster_routing_use_any + ) + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=['description', 'multi_cluster_routing_use_any'] + ) + expected_request = instance_v2_pb2.UpdateAppProfileRequest( + app_profile=expected_request_app_profile, + update_mask=expected_request_update_mask, + ignore_warnings=ignore_warnings + ) + result = instance.update_app_profile(app_profile_id, - update_mask=update_mask, - routing_policy_type=1) + RoutingPolicyType.ANY, + description=description, + ignore_warnings=ignore_warnings) + actual_request = channel.requests[0][1] + self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.UpdateAppProfileMetadata) - def test_update_app_profile_with_single_routing_policy(self): - import datetime + def test_update_app_profile_single_routing_policy(self): from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud._helpers import _datetime_to_pb_timestamp - from tests.unit._testing import _FakeStub from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as instance_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType credentials = _make_credentials() client = self._make_client(project=self.PROJECT, @@ -881,11 +901,9 @@ def test_update_app_profile_with_single_routing_policy(self): instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + metadata = messages_v2_pb2.UpdateAppProfileMetadata() type_url = 'type.googleapis.com/%s' % ( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name,) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( @@ -895,20 +913,49 @@ def test_update_app_profile_with_single_routing_policy(self): ) # Patch the stub used by the API method. + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + # Mock api calls client._instance_admin_client = instance_api - stub = _FakeStub(response_pb) - client._instance_admin_client.bigtable_instance_admin_stub = stub - update_mask = [] # Perform the method and check the result. app_profile_id = 'appProfileId1262094415' cluster_id = 'cluster-id' + allow_transactional_writes = True + ignore_warnings = True + single_cluster_routing = ( + instance_pb2.AppProfile.SingleClusterRouting( + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes + )) + expected_request_app_profile = instance_pb2.AppProfile( + name=self.APP_PROFILE_PATH + app_profile_id, + single_cluster_routing=single_cluster_routing + ) + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=['single_cluster_routing'] + ) + expected_request = instance_v2_pb2.UpdateAppProfileRequest( + app_profile=expected_request_app_profile, + update_mask=expected_request_update_mask, + ignore_warnings=ignore_warnings + ) + result = instance.update_app_profile(app_profile_id, - update_mask=update_mask, - routing_policy_type=2, - cluster_id=cluster_id) + RoutingPolicyType.SINGLE, + ignore_warnings=ignore_warnings, + cluster_id=cluster_id, + allow_transactional_writes=( + allow_transactional_writes)) + actual_request = channel.requests[0][1] + self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.UpdateAppProfileMetadata) def test_delete_app_profile(self): from google.cloud.bigtable_admin_v2.gapic import ( From 5dd3fbccf1fccc99db3b450e9bb3fa02e8357512 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Thu, 26 Jul 2018 14:40:01 -0400 Subject: [PATCH 154/892] Bigtable: 'Instance.update()' now uses 'instance.partial_instance_update' API (#5643) 'Instance.update()' now returns 'google.longrunning.Operation' --- .../google/cloud/bigtable/instance.py | 35 ++++-- .../google-cloud-bigtable/tests/system.py | 51 +++++++- .../tests/unit/test_cluster.py | 14 +-- .../tests/unit/test_instance.py | 114 +++++++++++++++--- 4 files changed, 178 insertions(+), 36 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index e3b26d55f386..5677c02cfc0f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -251,23 +251,41 @@ def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, clusters={c.cluster_id: c._create_pb_request() for c in clusters}) def update(self): - """Update this instance. + """Updates an instance within a project. .. note:: - Updates the ``display_name``. To change that value before - updating, reset its values via + Updates any or all of the following values: + ``display_name`` + ``type`` + ``labels`` + To change a value before + updating, assign that values via .. code:: python instance.display_name = 'New display name' before calling :meth:`update`. + + :rtype: :class:`~google.api_core.operation.Operation` + :returns: The long-running operation corresponding to the update + operation. """ - self._client.instance_admin_client.update_instance( + update_mask_pb = field_mask_pb2.FieldMask() + if self.display_name is not None: + update_mask_pb.paths.append('display_name') + if self.type_ is not None: + update_mask_pb.paths.append('type') + if self.labels is not None: + update_mask_pb.paths.append('labels') + instance_pb = instance_pb2.Instance( name=self.name, display_name=self.display_name, - type_=self.type_, - labels=self.labels) + type=self.type_, labels=self.labels) + + return self._client.instance_admin_client.partial_update_instance( + instance=instance_pb, + update_mask=update_mask_pb) def delete(self): """Delete this instance. @@ -321,8 +339,9 @@ def list_tables(self): for table_pb in table_list_pb: table_prefix = self.name + '/tables/' if not table_pb.name.startswith(table_prefix): - raise ValueError('Table name %s not of expected format' % ( - table_pb.name,)) + raise ValueError( + 'Table name {} not of expected format'.format( + table_pb.name)) table_id = table_pb.name[len(table_prefix):] result.append(self.table(table_id)) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 43920f180bc2..64d14e8a2acc 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -191,22 +191,63 @@ def test_create_instance(self): self.assertEqual(instance.type_, instance_alt.type_) self.assertEqual(instance.labels, instance_alt.labels) - def test_update(self): + def test_update_display_name_and_labels(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name NEW_DISPLAY_NAME = 'Foo Bar Baz' + NEW_LABELS = {'foo_bar': 'foo_bar'} Config.INSTANCE.display_name = NEW_DISPLAY_NAME - Config.INSTANCE.update() + Config.INSTANCE.labels = NEW_LABELS + operation = Config.INSTANCE.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(INSTANCE_ID) - self.assertNotEqual(instance_alt.display_name, NEW_DISPLAY_NAME) + instance_alt = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) + self.assertEqual(instance_alt.display_name, OLD_DISPLAY_NAME) + self.assertEqual(instance_alt.labels, LABELS) instance_alt.reload() self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) + self.assertEqual(instance_alt.labels, NEW_LABELS) # Make sure to put the instance back the way it was for the # other test cases. Config.INSTANCE.display_name = OLD_DISPLAY_NAME - Config.INSTANCE.update() + Config.INSTANCE.labels = LABELS + operation = Config.INSTANCE.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + def test_update_type(self): + from google.cloud.bigtable.enums import InstanceType + + _DEVELOPMENT = InstanceType.DEVELOPMENT + _PRODUCTION = InstanceType.PRODUCTION + ALT_INSTANCE_ID = 'new' + unique_resource_id('-') + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, + instance_type=_DEVELOPMENT) + operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Unset the display_name + instance.display_name = None + + instance.type_ = _PRODUCTION + operation = instance.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new instance instance and reload it. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + self.assertIsNone(instance_alt.type_) + instance_alt.reload() + self.assertEqual(instance_alt.type_, _PRODUCTION) def test_create_app_profile_with_multi_routing_policy(self): from google.cloud.bigtable_admin_v2.types import instance_pb2 diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index c8caf36ebba9..98118eebc026 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -165,8 +165,8 @@ def test_create(self): # Create response_pb OP_ID = 5678 OP_NAME = ( - 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % - (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) + 'operations/projects/{}/instances/{}/clusters/{}/operations/{}' + .format(self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) response_pb = operations_pb2.Operation(name=OP_NAME) # Patch the stub used by the API method. @@ -219,16 +219,16 @@ def test_update(self): # Create response_pb OP_ID = 5678 OP_NAME = ( - 'operations/projects/%s/instances/%s/clusters/%s/operations/%d' % - (self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) + 'operations/projects/{}/instances/{}/clusters/{}/operations/{}' + .format(self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/%s' % ( - messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name,) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name) response_pb = operations_pb2.Operation( name=OP_NAME, metadata=Any( type_url=type_url, - value=metadata.SerializeToString() + value=metadata.SerializeToString(), ) ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 290aec674928..fb6334ef288e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -61,8 +61,8 @@ class TestInstance(unittest.TestCase): DISPLAY_NAME = 'display_name' LABELS = {'foo': 'bar'} OP_ID = 8915 - OP_NAME = ('operations/projects/%s/instances/%soperations/%d' % - (PROJECT, INSTANCE_ID, OP_ID)) + OP_NAME = ('operations/projects/{}/instances/{}operations/{}' + .format(PROJECT, INSTANCE_ID, OP_ID)) TABLE_ID = 'table_id' TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID @@ -337,8 +337,8 @@ def test_create(self): # Create response_pb metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/%s' % ( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( @@ -500,34 +500,116 @@ def _create_instance_request(self, clusters): labels=self.LABELS) return messages_v2_pb2.CreateInstanceRequest( - parent='projects/%s' % (self.PROJECT), + parent='projects/{}'.format(self.PROJECT), instance_id=self.INSTANCE_ID, instance=instance, clusters=clusters ) def test_update(self): + import datetime + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as instance_v2_pb2) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, - display_name=self.DISPLAY_NAME) + display_name=self.DISPLAY_NAME, + instance_type=enums.InstanceType.DEVELOPMENT, + labels=self.LABELS) + + expected_request_instance = instance_pb2.Instance( + name=instance.name, display_name=instance.display_name, + type=instance.type_, labels=instance.labels) + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=['display_name', 'type', 'labels']) + expected_request = instance_v2_pb2.PartialUpdateInstanceRequest( + instance=expected_request_instance, + update_mask=expected_request_update_mask) + + metadata = messages_v2_pb2.UpdateInstanceMetadata( + request_time=NOW_PB) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString(), + ) + ) + + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) # Mock api calls - client._instance_admin_client = api + client._instance_admin_client = instance_api - # Create expected_result. - expected_result = None + # Perform the method and check the result. + result = instance.update() + actual_request = channel.requests[0][1] + + self.assertEqual(actual_request, expected_request) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.UpdateInstanceMetadata) + + def test_update_empty(self): + from google.api_core import operation + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.longrunning import operations_pb2 + from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as instance_v2_pb2) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(None, client) + + expected_request_instance = instance_pb2.Instance( + name=instance.name, display_name=instance.display_name, + type=instance.type_, labels=instance.labels) + expected_request_update_mask = field_mask_pb2.FieldMask() + expected_request = instance_v2_pb2.PartialUpdateInstanceRequest( + instance=expected_request_instance, + update_mask=expected_request_update_mask) + + response_pb = operations_pb2.Operation(name=self.OP_NAME) + + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + + # Mock api calls + client._instance_admin_client = instance_api # Perform the method and check the result. result = instance.update() + actual_request = channel.requests[0][1] - self.assertEqual(result, expected_result) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(actual_request, expected_request) def test_delete(self): from google.cloud.bigtable_admin_v2.gapic import ( @@ -831,8 +913,8 @@ def test_update_app_profile_multi_cluster_routing_policy(self): # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = 'type.googleapis.com/%s' % ( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name,) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( @@ -902,8 +984,8 @@ def test_update_app_profile_single_routing_policy(self): # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = 'type.googleapis.com/%s' % ( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name,) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( From 064cde89adddf1336135eaa880ea780277a47d56 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 27 Jul 2018 13:55:39 -0400 Subject: [PATCH 155/892] Harden 'test_list_instances' further. (#5696) An overlapping CI run might delete one or more instances which were present at the beginning of a given run. Closes #5695. --- packages/google-cloud-bigtable/tests/system.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 64d14e8a2acc..ad9237fb03a5 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -121,14 +121,11 @@ def tearDown(self): instance.delete() def test_list_instances(self): - expected = set([instance.name for instance in EXISTING_INSTANCES]) - expected.add(Config.INSTANCE.name) - instances, failed_locations = Config.CLIENT.list_instances() self.assertEqual(failed_locations, []) found = set([instance.name for instance in instances]) - self.assertTrue(expected.issubset(found)) + self.assertTrue(Config.INSTANCE.name in found) def test_reload(self): from google.cloud.bigtable import enums From 647d5b2c73362d0d655bd42145aab850e120c2b3 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Tue, 31 Jul 2018 17:06:15 -0400 Subject: [PATCH 156/892] Bigtable: Reshaping cluster.py, adding cluster() factory to instance.py (#5663) * Reshaping cluster.py with full functionality Added factory to create cluster object in instance.py * adding assert for state in cluster() factory * adding asserts for expected_request and result.metadata for cluster.create() * Added asserts for expected_request and result.metadata for cluster.update() * addressing comments * lint * Fixing a reference to location_id in cluster.py * Fixing test_constructor_defaults in cluster unit test. * fixing current tests after pulling changes from master * added requested functionality some clean up after merging changes from master added unit tests added system tests * add system test for cluster.exists() * enums.py instance.py lint fix * Cluster.from_pb() description correction * correcting lint * reverting changes in bigtable_instance_admin_client.py and unit test * fixing tests * addressing the comments * added instance.list_clusters with unit and system tests other clean up * addressing more comments * renaming 'state' to '_state' in 'cluster.py' using keyword arguments in 'cluster._from_pb()' * change param to '_state' add read-only 'state' property in 'cluster.py' * fixing test coverage ->testing against property 'state' va attribure '_state' --- .../google/cloud/bigtable/cluster.py | 152 ++++++-- .../google/cloud/bigtable/enums.py | 88 +++-- .../google/cloud/bigtable/instance.py | 78 ++++- .../google-cloud-bigtable/tests/system.py | 174 ++++++++- .../tests/unit/test_client.py | 2 +- .../tests/unit/test_cluster.py | 330 ++++++++++++++---- .../tests/unit/test_instance.py | 218 +++++++----- 7 files changed, 803 insertions(+), 239 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 4bac94d1c8d0..e7e56f5e0ce6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -16,19 +16,14 @@ import re -from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.api_core.exceptions import NotFound _CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' r'instances/(?P[^/]+)/clusters/' r'(?P[a-z][-a-z0-9]*)$') -DEFAULT_SERVE_NODES = 3 -"""Default number of nodes to use when creating a cluster.""" - -_STORAGE_TYPE_UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED - class Cluster(object): """Representation of a Google Cloud Bigtable Cluster. @@ -47,34 +42,98 @@ class Cluster(object): :param instance: The instance where the cluster resides. :type location_id: str - :param location_id: ID of the location in which the cluster will be - created. For e.g "us-central1-b" - List of locations id is : + :param location_id: (Creation Only) The location where this cluster's + nodes and storage reside . For best performance, + clients should be located as close as possible to + this cluster. + For list of supported locations refer to https://cloud.google.com/bigtable/docs/locations - For best performance, clients should be located - as close as possible to this cluster. :type serve_nodes: int :param serve_nodes: (Optional) The number of nodes in the cluster. - Defaults to :data:`DEFAULT_SERVE_NODES`. :type default_storage_type: int - :param default_storage_type: (Optional) The default values are - STORAGE_TYPE_UNSPECIFIED = 0: The user - did not specify a storage type. - SSD = 1: Flash (SSD) storage should be used. - HDD = 2: Magnetic drive (HDD) storage - should be used. + :param default_storage_type: (Optional) The type of storage + Possible values are represented by the + following constants: + :data:`google.cloud.bigtable.enums.StorageType.SSD`. + :data:`google.cloud.bigtable.enums.StorageType.SHD`, + Defaults to + :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. + + :type _state: int + :param _state: (`OutputOnly`) + The current state of the cluster. + Possible values are represented by the following constants: + :data:`google.cloud.bigtable.enums.Cluster.State.NOT_KNOWN`. + :data:`google.cloud.bigtable.enums.Cluster.State.READY`. + :data:`google.cloud.bigtable.enums.Cluster.State.CREATING`. + :data:`google.cloud.bigtable.enums.Cluster.State.RESIZING`. + :data:`google.cloud.bigtable.enums.Cluster.State.DISABLED`. """ - def __init__(self, cluster_id, instance, location_id, - serve_nodes=DEFAULT_SERVE_NODES, - default_storage_type=_STORAGE_TYPE_UNSPECIFIED): + def __init__(self, + cluster_id, + instance, + location_id=None, + serve_nodes=None, + default_storage_type=None, + _state=None): self.cluster_id = cluster_id self._instance = instance - self.location = location_id + self.location_id = location_id self.serve_nodes = serve_nodes self.default_storage_type = default_storage_type + self._state = _state + + @classmethod + def from_pb(cls, cluster_pb, instance): + """Creates an cluster instance from a protobuf. + + :type cluster_pb: :class:`instance_pb2.Cluster` + :param cluster_pb: An instance protobuf object. + + :type instance: :class:`google.cloud.bigtable.instance.Instance` + :param instance: The instance that owns the cluster. + + :rtype: :class:`Cluster` + :returns: The Cluster parsed from the protobuf response. + :raises: :class:`ValueError ` if the cluster + name does not match + ``projects/{project}/instances/{instance_id}/clusters/{cluster_id}`` + or if the parsed instance ID does not match the istance ID + on the client. + or if the parsed project ID does not match the project ID + on the client. + """ + match_cluster_name = _CLUSTER_NAME_RE.match(cluster_pb.name) + if match_cluster_name is None: + raise ValueError('Cluster protobuf name was not in the ' + 'expected format.', cluster_pb.name) + if match_cluster_name.group('instance') != instance.instance_id: + raise ValueError('Instance ID on cluster does not match the ' + 'instance ID on the client') + if match_cluster_name.group('project') != instance._client.project: + raise ValueError('Project ID on cluster does not match the ' + 'project ID on the client') + cluster_id = match_cluster_name.group('cluster_id') + location_id = cluster_pb.location.split('/')[-1] + + result = cls(cluster_id, instance, location_id=location_id, + serve_nodes=cluster_pb.serve_nodes, + default_storage_type=cluster_pb.default_storage_type, + _state=cluster_pb.state) + return result + + def _update_from_pb(self, cluster_pb): + """Refresh self from the server-provided protobuf. + Helper for :meth:`from_pb` and :meth:`reload`. + """ + + self.location_id = cluster_pb.location.split('/')[-1] + self.serve_nodes = cluster_pb.serve_nodes + self.default_storage_type = cluster_pb.default_storage_type + self._state = cluster_pb.state @property def name(self): @@ -95,6 +154,11 @@ def name(self): self._instance._client.project, self._instance.instance_id, self.cluster_id) + @property + def state(self): + """google.cloud.bigtable.enums.Cluster.State: state of cluster.""" + return self._state + def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented @@ -112,7 +176,26 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this cluster.""" - self._instance._client.instance_admin_client.get_cluster(self.name) + cluster_pb = self._instance._client.instance_admin_client.get_cluster( + self.name) + + # NOTE: _update_from_pb does not check that the project and + # cluster ID on the response match the request. + self._update_from_pb(cluster_pb) + + def exists(self): + """Check whether the cluster already exists. + + :rtype: bool + :returns: True if the table exists, else False. + """ + client = self._instance._client + try: + client.instance_admin_client.get_cluster(name=self.name) + return True + # NOTE: There could be other exceptions that are returned to the user. + except NotFound: + return False def create(self): """Create this cluster. @@ -135,10 +218,12 @@ def create(self): create operation. """ client = self._instance._client + cluster_pb = self._to_pb() + return client.instance_admin_client.create_cluster( - self._instance.name, self.cluster_id, {}) + self._instance.name, self.cluster_id, cluster_pb) - def update(self, location='', serve_nodes=0): + def update(self): """Update this cluster. .. note:: @@ -169,8 +254,11 @@ def update(self, location='', serve_nodes=0): update operation. """ client = self._instance._client + # We are passing `None` for second argument location. + # Location is set only at the time of creation of a cluster + # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( - self.name, location, serve_nodes) + self.name, None, self.serve_nodes) def delete(self): """Delete this cluster. @@ -195,15 +283,13 @@ def delete(self): client = self._instance._client client.instance_admin_client.delete_cluster(self.name) - def _create_pb_request(self): + def _to_pb(self): """ Create cluster proto buff message for API calls """ client = self._instance._client - cluster_name = client.instance_admin_client.cluster_path( - client.project, self._instance.instance_id, self.cluster_id) location = client.instance_admin_client.location_path( - client.project, self.location) - cluster_message = instance_pb2.Cluster( - name=cluster_name, location=location, + client.project, self.location_id) + cluster_pb = instance_pb2.Cluster( + location=location, serve_nodes=self.serve_nodes, default_storage_type=self.default_storage_type) - return cluster_message + return cluster_pb diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index 2f2246435b2e..3adcef8267c9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -30,29 +30,73 @@ class StorageType(object): HDD = enums.StorageType.HDD -class InstanceType(object): - """ - The type of the instance. +class Instance(object): + class State(object): + """ + Possible states of an instance. - Attributes: - UNSPECIFIED (int): The type of the instance is unspecified. - If set when creating an instance, a ``PRODUCTION`` instance will - be created. If set when updating an instance, the type will be - left unchanged. - PRODUCTION (int): An instance meant for production use. - ``serve_nodes`` must be set on the cluster. - DEVELOPMENT (int): The instance is meant for development and testing - purposes only; it has no performance or uptime guarantees and is not - covered by SLA. - After a development instance is created, it can be upgraded by - updating the instance to type ``PRODUCTION``. An instance created - as a production instance cannot be changed to a development instance. - When creating a development instance, ``serve_nodes`` on the cluster - must not be set. - """ - UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED - PRODUCTION = enums.Instance.Type.PRODUCTION - DEVELOPMENT = enums.Instance.Type.DEVELOPMENT + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be + determined. + READY (int): The instance has been successfully created and can + serve requests to its tables. + CREATING (int): The instance is currently being created, and may be + destroyed if the creation process encounters an error. + """ + NOT_KNOWN = enums.Instance.State.STATE_NOT_KNOWN + READY = enums.Instance.State.READY + CREATING = enums.Instance.State.CREATING + + class Type(object): + """ + The type of the instance. + + Attributes: + UNSPECIFIED (int): The type of the instance is unspecified. + If set when creating an instance, a ``PRODUCTION`` instance will + be created. If set when updating an instance, the type will be + left unchanged. + PRODUCTION (int): An instance meant for production use. + ``serve_nodes`` must be set on the cluster. + DEVELOPMENT (int): The instance is meant for development and testing + purposes only; it has no performance or uptime guarantees and is not + covered by SLA. + After a development instance is created, it can be upgraded by + updating the instance to type ``PRODUCTION``. An instance created + as a production instance cannot be changed to a development instance. + When creating a development instance, ``serve_nodes`` on the cluster + must not be set. + """ + UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED + PRODUCTION = enums.Instance.Type.PRODUCTION + DEVELOPMENT = enums.Instance.Type.DEVELOPMENT + + +class Cluster(object): + class State(object): + """ + Possible states of a cluster. + + Attributes: + NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready + to serve requests. + CREATING (int): The cluster is currently being created, and may be + destroyed if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may + revert to its previous node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) + still exist, but no operations can be performed on the cluster. + """ + NOT_KNOWN = enums.Cluster.State.STATE_NOT_KNOWN + READY = enums.Cluster.State.READY + CREATING = enums.Cluster.State.CREATING + RESIZING = enums.Cluster.State.RESIZING + DISABLED = enums.Cluster.State.DISABLED class RoutingPolicyType(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 5677c02cfc0f..f4f86fb55eca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -18,7 +18,6 @@ import re from google.cloud.bigtable.table import Table -from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES from google.cloud.bigtable.cluster import Cluster from google.protobuf import field_mask_pb2 @@ -177,8 +176,8 @@ def reload(self): # instance ID on the response match the request. self._update_from_pb(instance_pb) - def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, - serve_nodes=DEFAULT_SERVE_NODES, + def create(self, location_id=None, + serve_nodes=None, default_storage_type=None, clusters=None): """Create this instance. @@ -196,9 +195,12 @@ def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, before calling :meth:`create`. :type location_id: str - :param location_id: ID of the location in which the instance will be - created. Required for instances which do not yet - exist. + :param location_id: (Creation Only) The location where nodes and + storage of the cluster owned by this instance + reside. For best performance, clients should be + located as close as possible to cluster's location. + For list of supported locations refer to + https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int @@ -230,9 +232,9 @@ def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, if clusters is None: cluster_id = '{}-cluster'.format(self.instance_id) - clusters = [Cluster(cluster_id, self, location_id, - serve_nodes, default_storage_type)] - + clusters = [self.cluster(cluster_id, location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=default_storage_type)] elif (location_id is not None or serve_nodes is not None or default_storage_type is not None): @@ -240,15 +242,15 @@ def create(self, location_id=_EXISTING_INSTANCE_LOCATION_ID, default_storage_type can not be set \ simultaneously.") - instance = instance_pb2.Instance( + instance_pb = instance_pb2.Instance( display_name=self.display_name, type=self.type_, labels=self.labels) parent = self._client.project_path return self._client.instance_admin_client.create_instance( - parent=parent, instance_id=self.instance_id, instance=instance, - clusters={c.cluster_id: c._create_pb_request() for c in clusters}) + parent=parent, instance_id=self.instance_id, instance=instance_pb, + clusters={c.cluster_id: c._to_pb() for c in clusters}) def update(self): """Updates an instance within a project. @@ -311,6 +313,58 @@ def delete(self): """ self._client.instance_admin_client.delete_instance(name=self.name) + def cluster(self, cluster_id, location_id=None, + serve_nodes=None, default_storage_type=None): + """Factory to create a cluster associated with this instance. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type instance: :class:`~google.cloud.bigtable.instance.Instance` + :param instance: The instance where the cluster resides. + + :type location_id: str + :param location_id: (Creation Only) The location where this cluster's + nodes and storage reside. For best performance, + clients should be located as close as possible to + this cluster. + For list of supported locations refer to + https://cloud.google.com/bigtable/docs/locations + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + + :type default_storage_type: int + :param default_storage_type: (Optional) The type of storage + Possible values are represented by the + following constants: + :data:`google.cloud.bigtable.enums.StorageType.SSD`. + :data:`google.cloud.bigtable.enums.StorageType.SHD`, + Defaults to + :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. + + :rtype: :class:`~google.cloud.bigtable.instance.Cluster` + :returns: a cluster owned by this instance. + """ + return Cluster(cluster_id, self, location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=default_storage_type) + + def list_clusters(self): + """List the clusters in this instance. + + :rtype: tuple + :returns: + (clusters, failed_locations), where 'clusters' is list of + :class:`google.cloud.bigtable.instance.Cluster`, and + 'failed_locations' is a list of locations which could not + be resolved. + """ + resp = self._client.instance_admin_client.list_clusters(self.name) + clusters = [ + Cluster.from_pb(cluster, self) for cluster in resp.clusters] + return clusters, resp.failed_locations + def table(self, table_id, app_profile_id=None): """Factory to create a table associated with this instance. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index ad9237fb03a5..f308b6e7ca2c 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -42,6 +42,7 @@ LABELS = {u'foo': u'bar'} TABLE_ID = 'google-cloud-python-test-table' CLUSTER_ID = INSTANCE_ID+'-cluster' +SERVE_NODES = 3 COLUMN_FAMILY_ID1 = u'col-fam-id1' COLUMN_FAMILY_ID2 = u'col-fam-id2' COL_NAME1 = b'col-name1' @@ -66,6 +67,7 @@ class Config(object): """ CLIENT = None INSTANCE = None + CLUSTER = None IN_EMULATOR = False @@ -87,6 +89,8 @@ def setUpModule(): Config.CLIENT = Client(admin=True) Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) + Config.CLUSTER = Config.INSTANCE.cluster( + CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES) if not Config.IN_EMULATOR: retry = RetryErrors(GrpcRendezvous, @@ -99,7 +103,7 @@ def setUpModule(): EXISTING_INSTANCES[:] = instances # After listing, create the test instance. - created_op = Config.INSTANCE.create(location_id=LOCATION_ID) + created_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) created_op.result(timeout=10) @@ -124,6 +128,7 @@ def test_list_instances(self): instances, failed_locations = Config.CLIENT.list_instances() self.assertEqual(failed_locations, []) + found = set([instance.name for instance in instances]) self.assertTrue(Config.INSTANCE.name in found) @@ -131,27 +136,31 @@ def test_reload(self): from google.cloud.bigtable import enums # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. - instance = Config.CLIENT.instance(INSTANCE_ID) + alt_instance = Config.CLIENT.instance(INSTANCE_ID) # Make sure metadata unset before reloading. - instance.display_name = None + alt_instance.display_name = None - instance.reload() - self.assertEqual(instance.display_name, Config.INSTANCE.display_name) - self.assertEqual(instance.labels, Config.INSTANCE.labels) - self.assertEqual(instance.type_, enums.InstanceType.PRODUCTION) + alt_instance.reload() + self.assertEqual(alt_instance.display_name, + Config.INSTANCE.display_name) + self.assertEqual(alt_instance.labels, Config.INSTANCE.labels) + self.assertEqual(alt_instance.type_, enums.Instance.Type.PRODUCTION) def test_create_instance_defaults(self): from google.cloud.bigtable import enums ALT_INSTANCE_ID = 'ndef' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID) - operation = instance.create(location_id=LOCATION_ID) - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - + ALT_CLUSTER_ID = ALT_INSTANCE_ID+'-cluster' + cluster = instance.cluster( + ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES) + operation = instance.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=10) + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) instance_alt.reload() @@ -160,25 +169,68 @@ def test_create_instance_defaults(self): self.assertEqual(instance.display_name, instance_alt.display_name) # Make sure that by default a PRODUCTION type instance is created self.assertIsNone(instance.type_) - self.assertEqual(instance_alt.type_, enums.InstanceType.PRODUCTION) + self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION) self.assertIsNone(instance.labels) self.assertFalse(instance_alt.labels) def test_create_instance(self): from google.cloud.bigtable import enums - _DEVELOPMENT = enums.InstanceType.DEVELOPMENT + _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT ALT_INSTANCE_ID = 'new' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS) - operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) + ALT_CLUSTER_ID = ALT_INSTANCE_ID+'-cluster' + cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) + operation = instance.create(clusters=[cluster]) + # We want to make sure the operation completes. + operation.result(timeout=10) + # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + self.assertEqual(instance.type_, instance_alt.type_) + self.assertEqual(instance.labels, instance_alt.labels) + + def test_cluster_exists(self): + NONEXISTING_CLUSTER_ID = 'cluster-id' + + cluster = Config.INSTANCE.cluster(CLUSTER_ID) + alt_cluster = Config.INSTANCE.cluster(NONEXISTING_CLUSTER_ID) + self.assertTrue(cluster.exists()) + self.assertFalse(alt_cluster.exists()) + + def test_create_instance_w_two_clusters(self): + from google.cloud.bigtable import enums + _PRODUCTION = enums.Instance.Type.PRODUCTION + ALT_INSTANCE_ID = 'dif' + unique_resource_id('-') + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, + instance_type=_PRODUCTION) + + ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID+'-cluster-1' + ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID+'-cluster-2' + LOCATION_ID_2 = 'us-central1-f' + STORAGE_TYPE = enums.StorageType.HDD + cluster_1 = instance.cluster( + ALT_CLUSTER_ID_1, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE) + cluster_2 = instance.cluster( + ALT_CLUSTER_ID_2, location_id=LOCATION_ID_2, + serve_nodes=SERVE_NODES, default_storage_type=STORAGE_TYPE) + operation = instance.create(clusters=[cluster_1, cluster_2]) # We want to make sure the operation completes. operation.result(timeout=10) + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) instance_alt.reload() @@ -186,7 +238,23 @@ def test_create_instance(self): self.assertEqual(instance, instance_alt) self.assertEqual(instance.display_name, instance_alt.display_name) self.assertEqual(instance.type_, instance_alt.type_) - self.assertEqual(instance.labels, instance_alt.labels) + + clusters, failed_locations = instance_alt.list_clusters() + self.assertEqual(failed_locations, []) + + clusters.sort(key=lambda x: x.name) + alt_cluster_1, alt_cluster_2 = clusters + + self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) + self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) + self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) + self.assertEqual(cluster_1.default_storage_type, + alt_cluster_1.default_storage_type) + self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) + self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) + self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) + self.assertEqual(cluster_2.default_storage_type, + alt_cluster_2.default_storage_type) def test_update_display_name_and_labels(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name @@ -217,10 +285,10 @@ def test_update_display_name_and_labels(self): operation.result(timeout=10) def test_update_type(self): - from google.cloud.bigtable.enums import InstanceType + from google.cloud.bigtable.enums import Instance - _DEVELOPMENT = InstanceType.DEVELOPMENT - _PRODUCTION = InstanceType.PRODUCTION + _DEVELOPMENT = Instance.Type.DEVELOPMENT + _PRODUCTION = Instance.Type.PRODUCTION ALT_INSTANCE_ID = 'new' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID, instance_type=_DEVELOPMENT) @@ -358,6 +426,76 @@ def _app_profile_exists(self, app_profile_id): else: return True + def test_reload_cluster(self): + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster + # Use same arguments as Config.INSTANCE.cluster + # (created in `setUpModule`) so we can use reload() + # on a fresh cluster. + cluster = Config.INSTANCE.cluster(CLUSTER_ID) + + cluster.reload() + self.assertEqual(cluster.location_id, LOCATION_ID) + self.assertEqual(cluster.state, Cluster.State.READY) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + # Make sure that by default an StorageType.SSD storage is used. + self.assertEqual(cluster.default_storage_type, StorageType.SSD) + + def test_update_cluster(self): + NEW_SERVE_NODES = 4 + + Config.CLUSTER.serve_nodes = NEW_SERVE_NODES + + operation = Config.CLUSTER.update() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new cluster instance and reload it. + alt_cluster = Config.INSTANCE.cluster(CLUSTER_ID) + alt_cluster.reload() + self.assertEqual(alt_cluster.serve_nodes, NEW_SERVE_NODES) + + # Make sure to put the cluster back the way it was for the + # other test cases. + Config.CLUSTER.serve_nodes = SERVE_NODES + operation = Config.CLUSTER.update() + operation.result(timeout=10) + + def test_create_cluster(self): + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster + + ALT_CLUSTER_ID = INSTANCE_ID+'-cluster-2' + ALT_LOCATION_ID = 'us-central1-f' + ALT_SERVE_NODES = 4 + + cluster_2 = Config.INSTANCE.cluster(ALT_CLUSTER_ID, + location_id=ALT_LOCATION_ID, + serve_nodes=ALT_SERVE_NODES, + default_storage_type=( + StorageType.SSD)) + operation = cluster_2.create() + + # We want to make sure the operation completes. + operation.result(timeout=10) + + # Create a new object instance, reload and make sure it is the same. + alt_cluster = Config.INSTANCE.cluster(ALT_CLUSTER_ID) + alt_cluster.reload() + + self.assertEqual(cluster_2, alt_cluster) + self.assertEqual(cluster_2.location_id, alt_cluster.location_id) + self.assertEqual(alt_cluster.state, Cluster.State.READY) + self.assertEqual(cluster_2.serve_nodes, alt_cluster.serve_nodes) + self.assertEqual(cluster_2.default_storage_type, + alt_cluster.default_storage_type) + + # Delete the newly created cluster and confirm + self.assertTrue(cluster_2.exists()) + cluster_2.delete() + self.assertFalse(cluster_2.exists()) + class TestTableAdminAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index a74260cc84ac..e8ba2d75307e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -109,7 +109,7 @@ def test_instance_factory_non_defaults(self): PROJECT = 'PROJECT' INSTANCE_ID = 'instance-id' DISPLAY_NAME = 'display-name' - instance_type = enums.InstanceType.DEVELOPMENT + instance_type = enums.Instance.Type.DEVELOPMENT labels = {'foo': 'bar'} credentials = _make_credentials() client = self._make_one( diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 98118eebc026..5d34f95ced79 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -20,15 +20,48 @@ from ._testing import _make_credentials +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + + return self.channel_stub.responses.pop() + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + class TestCluster(unittest.TestCase): PROJECT = 'project' INSTANCE_ID = 'instance-id' + LOCATION_ID = 'location-id' CLUSTER_ID = 'cluster-id' LOCATION_ID = 'location-id' CLUSTER_NAME = ('projects/' + PROJECT + '/instances/' + INSTANCE_ID + '/clusters/' + CLUSTER_ID) + LOCATION_PATH = 'projects/' + PROJECT + '/locations/' + SERVE_NODES = 5 + OP_ID = 5678 + OP_NAME = ('operations/projects/{}/instances/{}/clusters/{}/operations/{}' + .format(PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID)) @staticmethod def _get_target_class(): @@ -49,38 +82,117 @@ def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor_defaults(self): - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES - client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) + cluster = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + self.assertIsNone(cluster.location_id) + self.assertIsNone(cluster.state) + self.assertIsNone(cluster.serve_nodes) + self.assertIsNone(cluster.default_storage_type) def test_constructor_non_default(self): - SERVE_NODES = 8 + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster + STATE = Cluster.State.READY + STORAGE_TYPE_SSD = StorageType.SSD client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - self.LOCATION_ID, serve_nodes=SERVE_NODES) + location_id=self.LOCATION_ID, + _state=STATE, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(cluster.location_id, self.LOCATION_ID) + self.assertEqual(cluster.state, STATE) + self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) + self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) def test_name_property(self): - from google.cloud.bigtable.instance import Instance - credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster.name, self.CLUSTER_NAME) + def test_from_pb_success(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable import enums + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + location = self.LOCATION_PATH + self.LOCATION_ID + state = enums.Cluster.State.RESIZING + storage_type = enums.StorageType.SSD + cluster_pb = data_v2_pb2.Cluster( + name=self.CLUSTER_NAME, + location=location, + state=state, + serve_nodes=self.SERVE_NODES, + default_storage_type=storage_type + ) + + klass = self._get_target_class() + cluster = klass.from_pb(cluster_pb, instance) + self.assertIsInstance(cluster, klass) + self.assertEqual(cluster._instance, instance) + self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) + self.assertEqual(cluster.location_id, self.LOCATION_ID) + self.assertEqual(cluster.state, state) + self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) + self.assertEqual(cluster.default_storage_type, storage_type) + + def test_from_pb_bad_cluster_name(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + + bad_cluster_name = 'BAD_NAME' + + cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name) + + klass = self._get_target_class() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, None) + + def test_from_pb_instance_id_mistmatch(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + + ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + client = _Client(self.PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) + + self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) + cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME) + + klass = self._get_target_class() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) + + def test_from_pb_project_mistmatch(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(project=ALT_PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + self.assertNotEqual(self.PROJECT, ALT_PROJECT) + cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME) + + klass = self._get_target_class() + with self.assertRaises(ValueError): + klass.from_pb(cluster_pb, instance) + def test___eq__(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) @@ -111,24 +223,37 @@ def test___ne__(self): self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster - LOCATION = 'LOCATION' api = bigtable_instance_admin_client.BigtableInstanceAdminClient( mock.Mock()) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) + STORAGE_TYPE_SSD = StorageType.SSD + instance = _Instance(self.INSTANCE_ID, client) + cluster = self._make_one(self.CLUSTER_ID, instance, + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD) # Create response_pb - response_pb = _ClusterPB( - serve_nodes=DEFAULT_SERVE_NODES, - location=LOCATION, + LOCATION_ID_FROM_SERVER = 'new-location-id' + STATE = Cluster.State.READY + SERVE_NODES_FROM_SERVER = 10 + STORAGE_TYPE_FROM_SERVER = StorageType.HDD + + response_pb = data_v2_pb2.Cluster( + name=cluster.name, + location=self.LOCATION_PATH + LOCATION_ID_FROM_SERVER, + state=STATE, + serve_nodes=SERVE_NODES_FROM_SERVER, + default_storage_type=STORAGE_TYPE_FROM_SERVER ) # Patch the stub used by the API method. @@ -141,46 +266,122 @@ def test_reload(self): expected_result = None # reload() has no return value. # Check Cluster optional config values before. - self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) + self.assertEqual(cluster.location_id, self.LOCATION_ID) + self.assertIsNone(cluster.state) + self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) + self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) # Perform the method and check the result. result = cluster.reload() self.assertEqual(result, expected_result) + self.assertEqual(cluster.location_id, LOCATION_ID_FROM_SERVER) + self.assertEqual(cluster.state, STATE) + self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER) + self.assertEqual(cluster.default_storage_type, + STORAGE_TYPE_FROM_SERVER) + + def test_exists(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.instance import Instance + from google.api_core import exceptions + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = Instance(self.INSTANCE_ID, client) + + # Create response_pb + cluster_name = client.instance_admin_client.cluster_path( + self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID) + response_pb = data_v2_pb2.Cluster(name=cluster_name) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_admin_client = client._instance_admin_client + instance_stub = instance_admin_client.bigtable_instance_admin_stub + instance_stub.GetCluster.side_effect = [ + response_pb, + exceptions.NotFound('testing'), + exceptions.BadRequest('testing') + ] + + # Perform the method and check the result. + non_existing_cluster_id = 'cluster-id-2' + alt_cluster_1 = self._make_one(self.CLUSTER_ID, instance) + alt_cluster_2 = self._make_one(non_existing_cluster_id, instance) + self.assertTrue(alt_cluster_1.exists()) + self.assertFalse(alt_cluster_2.exists()) + with self.assertRaises(exceptions.BadRequest): + alt_cluster_1.exists() def test_create(self): + import datetime from google.api_core import operation from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.types import instance_pb2 from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as instance_v2_pb2) + from google.cloud.bigtable.enums import StorageType - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = self.LOCATION_PATH + self.LOCATION_ID instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) + cluster = self._make_one(self.CLUSTER_ID, instance, + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD) + expected_request_cluster = instance_pb2.Cluster( + location=LOCATION, + serve_nodes=cluster.serve_nodes, + default_storage_type=cluster.default_storage_type) + expected_request = instance_v2_pb2.CreateClusterRequest( + parent=instance.name, cluster_id=self.CLUSTER_ID, + cluster=expected_request_cluster) - # Create response_pb - OP_ID = 5678 - OP_NAME = ( - 'operations/projects/{}/instances/{}/clusters/{}/operations/{}' - .format(self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) - response_pb = operations_pb2.Operation(name=OP_NAME) + metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.CreateClusterMetadata.DESCRIPTOR.full_name) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString() + ) + ) # Patch the stub used by the API method. + channel = ChannelStub(responses=[response_pb]) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel) client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.bigtable_instance_admin_stub - instance_stub.CreateCluster.side_effect = [response_pb] # Perform the method and check the result. result = cluster.create() + actual_request = channel.requests[0][1] + self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, OP_NAME) - self.assertIsNone(result.metadata) + self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.CreateClusterMetadata) def test_update(self): import datetime @@ -188,44 +389,35 @@ def test_update(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.types import instance_pb2 from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) - SERVE_NODES = 81 - - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = Instance(self.INSTANCE_ID, client) + STORAGE_TYPE_SSD = StorageType.SSD + instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - self.LOCATION_ID, serve_nodes=SERVE_NODES) + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD) + # Create expected_request + expected_request = instance_pb2.Cluster( + name=cluster.name, + serve_nodes=self.SERVE_NODES) - # Create request_pb - request_pb = _ClusterPB( - name=self.CLUSTER_NAME, - serve_nodes=SERVE_NODES, - ) - - # Create response_pb - OP_ID = 5678 - OP_NAME = ( - 'operations/projects/{}/instances/{}/clusters/{}/operations/{}' - .format(self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID, OP_ID)) metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) type_url = 'type.googleapis.com/{}'.format( messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name) response_pb = operations_pb2.Operation( - name=OP_NAME, + name=self.OP_NAME, metadata=Any( type_url=type_url, value=metadata.SerializeToString(), @@ -233,25 +425,23 @@ def test_update(self): ) # Patch the stub used by the API method. + channel = ChannelStub(responses=[response_pb]) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel) client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.bigtable_instance_admin_stub - instance_stub.UpdateCluster.side_effect = [response_pb] + # Perform the method and check the result. result = cluster.update() + actual_request = channel.requests[0][1] + self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) - self.assertIsInstance(request_pb, data_v2_pb2.Cluster) - self.assertEqual(request_pb.name, self.CLUSTER_NAME) - self.assertEqual(request_pb.serve_nodes, SERVE_NODES) - def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) @@ -260,10 +450,9 @@ def test_delete(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = Instance(self.INSTANCE_ID, client) + instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance, - self.LOCATION_ID, - serve_nodes=DEFAULT_SERVE_NODES) + self.LOCATION_ID) # Create response_pb response_pb = empty_pb2.Empty() @@ -283,13 +472,6 @@ def test_delete(self): self.assertEqual(result, expected_result) -def _ClusterPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as instance_v2_pb2) - - return instance_v2_pb2.Cluster(*args, **kw) - - class _Instance(object): def __init__(self, instance_id, client): diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index fb6334ef288e..34ef0ba1bff8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -53,7 +53,7 @@ class TestInstance(unittest.TestCase): PROJECT = 'project' INSTANCE_ID = 'instance-id' INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID - LOCATION_ID = 'locname' + LOCATION_ID = 'locid' LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID APP_PROFILE_PATH = ( 'projects/' + PROJECT + '/instances/' + INSTANCE_ID @@ -97,7 +97,7 @@ def test_constructor_defaults(self): def test_constructor_non_default(self): from google.cloud.bigtable import enums - instance_type = enums.InstanceType.DEVELOPMENT + instance_type = enums.Instance.Type.DEVELOPMENT labels = {'test': 'test'} client = object() @@ -123,12 +123,92 @@ def test_table_factory(self): self.assertEqual(table._instance, instance) self.assertEqual(table._app_profile_id, app_profile_id) + def test_cluster_factory(self): + from google.cloud.bigtable import enums + + CLUSTER_ID = '{}-cluster'.format(self.INSTANCE_ID) + LOCATION_ID = 'us-central1-c' + SERVE_NODES = 3 + STORAGE_TYPE = enums.StorageType.HDD + + instance = self._make_one(self.INSTANCE_ID, None) + + cluster = instance.cluster(CLUSTER_ID, location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE) + self.assertIsInstance(cluster, Cluster) + self.assertEqual(cluster.cluster_id, CLUSTER_ID) + self.assertEqual(cluster.location_id, LOCATION_ID) + self.assertIsNone(cluster._state) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) + + def test_list_clusters(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.instance import Cluster + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = Instance(self.INSTANCE_ID, client) + + failed_location = 'FAILED' + cluster_id1 = 'cluster-id1' + cluster_id2 = 'ckuster-id2' + cluster_name1 = (client.instance_admin_client.cluster_path( + self.PROJECT, self.INSTANCE_ID, cluster_id1)) + cluster_name2 = (client.instance_admin_client.cluster_path( + self.PROJECT, self.INSTANCE_ID, cluster_id2)) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[ + failed_location + ], + clusters=[ + data_v2_pb2.Cluster( + name=cluster_name1, + ), + data_v2_pb2.Cluster( + name=cluster_name2, + ), + ], + ) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_admin_client = client._instance_admin_client + instance_stub = instance_admin_client.bigtable_instance_admin_stub + instance_stub.ListClusters.side_effect = [response_pb] + + # Perform the method and check the result. + clusters, failed_locations = instance.list_clusters() + + cluster_1, cluster_2 = clusters + + self.assertIsInstance(cluster_1, Cluster) + self.assertEqual(cluster_1.name, cluster_name1) + + self.assertIsInstance(cluster_2, Cluster) + self.assertEqual(cluster_2.name, cluster_name2) + + self.assertEqual(failed_locations, [failed_location]) + def test__update_from_pb_success(self): from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) from google.cloud.bigtable import enums - instance_type = enums.InstanceType.PRODUCTION + instance_type = enums.Instance.Type.PRODUCTION instance_pb = data_v2_pb2.Instance( display_name=self.DISPLAY_NAME, type=instance_type, @@ -160,7 +240,7 @@ def test__update_from_pb_success_defaults(self): instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, - enums.InstanceType.UNSPECIFIED) + enums.Instance.Type.UNSPECIFIED) self.assertFalse(instance.labels) def test__update_from_pb_no_display_name(self): @@ -180,7 +260,7 @@ def test_from_pb_success(self): client = _Client(project=self.PROJECT) - instance_type = enums.InstanceType.PRODUCTION + instance_type = enums.Instance.Type.PRODUCTION instance_pb = data_v2_pb2.Instance( name=self.INSTANCE_NAME, display_name=self.INSTANCE_ID, @@ -279,7 +359,7 @@ def test_reload(self): # Create response_pb DISPLAY_NAME = u'hey-hi-hello' - instance_type = enums.InstanceType.PRODUCTION + instance_type = enums.Instance.Type.PRODUCTION response_pb = data_v2_pb2.Instance( display_name=DISPLAY_NAME, type=instance_type, @@ -306,11 +386,16 @@ def test_reload(self): self.assertEqual(instance.display_name, DISPLAY_NAME) def test_create_check_conflicts(self): - client = object() - instance = self._make_one(self.INSTANCE_ID, client) + instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): instance.create(location_id=self.LOCATION_ID, clusters=[object(), object()]) + with self.assertRaises(ValueError): + instance.create(serve_nodes=3, + clusters=[object(), object()]) + with self.assertRaises(ValueError): + instance.create(default_storage_type=1, + clusters=[object(), object()]) def test_create(self): import datetime @@ -323,7 +408,6 @@ def test_create(self): from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -332,7 +416,7 @@ def test_create(self): credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.DISPLAY_NAME, - enums.InstanceType.PRODUCTION, + enums.Instance.Type.PRODUCTION, self.LABELS) # Create response_pb @@ -355,60 +439,29 @@ def test_create(self): client._instance_admin_client = instance_api # Perform the method and check the result. - result = instance.create(location_id=self.LOCATION_ID) - actual_request = channel.requests[0][1] - + serve_nodes = 3 cluster_id = '{}-cluster'.format(self.INSTANCE_ID) - cluster = self._create_cluster( - instance_api, cluster_id, self.LOCATION_ID, DEFAULT_SERVE_NODES, - enums.StorageType.UNSPECIFIED) + # cluster = instance.cluster(cluster_id, location_id=self.LOCATION_ID, + # serve_nodes=serve_nodes) + # result = instance.create(clusters=[cluster]) - expected_request = self._create_instance_request({cluster_id: cluster}) - self.assertEqual(expected_request, actual_request) - self.assertIsInstance(result, operation.Operation) - # self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.CreateInstanceMetadata) + # TODO: replace this example with above once the otpion is removed + # from instance.create() method + result = instance.create(location_id=self.LOCATION_ID, + serve_nodes=serve_nodes) - def test_create_w_explicit_serve_nodes(self): - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - - serve_nodes = 10 - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, - self.DISPLAY_NAME, - enums.InstanceType.PRODUCTION, - self.LABELS) - # Create response_pb - response_pb = operations_pb2.Operation(name=self.OP_NAME) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - client._instance_admin_client = instance_api - - # Perform the method and check the result. - result = instance.create( - location_id=self.LOCATION_ID, serve_nodes=serve_nodes, - default_storage_type=enums.StorageType.SSD) actual_request = channel.requests[0][1] - cluster_id = '{}-cluster'.format(self.INSTANCE_ID) - cluster = self._create_cluster( + cluster = self._create_cluster_pb( instance_api, cluster_id, self.LOCATION_ID, serve_nodes, - enums.StorageType.SSD) + enums.StorageType.UNSPECIFIED) expected_request = self._create_instance_request({cluster_id: cluster}) self.assertEqual(expected_request, actual_request) self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.CreateInstanceMetadata) def test_create_w_clusters(self): import datetime @@ -421,7 +474,6 @@ def test_create_w_clusters(self): from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) @@ -430,13 +482,13 @@ def test_create_w_clusters(self): credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client, self.DISPLAY_NAME, - enums.InstanceType.PRODUCTION, + enums.Instance.Type.PRODUCTION, self.LABELS) # Create response_pb metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/%s' % ( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name,) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( @@ -453,40 +505,49 @@ def test_create_w_clusters(self): client._instance_admin_client = instance_api # Perform the method and check the result. - clusters = [Cluster('cluster-id1', instance, 'location-id1'), - Cluster('cluster-id2', instance, 'location-id2')] - result = instance.create(None, None, None, clusters) + cluster_id_1 = 'cluster-1' + cluster_id_2 = 'cluster-2' + location_id_1 = 'location-id-1' + location_id_2 = 'location-id-2' + serve_nodes_1 = 3 + serve_nodes_2 = 5 + clusters = [ + Cluster(cluster_id_1, instance, + location_id=location_id_1, + serve_nodes=serve_nodes_1), + Cluster(cluster_id_2, instance, + location_id=location_id_2, + serve_nodes=serve_nodes_2)] + result = instance.create(clusters=clusters) actual_request = channel.requests[0][1] - cluster1 = self._create_cluster( - instance_api, 'cluster-id1', 'location-id1', DEFAULT_SERVE_NODES, + cluster_1_pb = self._create_cluster_pb( + instance_api, cluster_id_1, location_id_1, serve_nodes_1, enums.StorageType.UNSPECIFIED) - cluster2 = self._create_cluster( - instance_api, 'cluster-id2', 'location-id2', DEFAULT_SERVE_NODES, + cluster_2_pb = self._create_cluster_pb( + instance_api, cluster_id_2, location_id_2, serve_nodes_2, enums.StorageType.UNSPECIFIED) expected_request = self._create_instance_request( - {'cluster-id1': cluster1, - 'cluster-id2': cluster2} + {cluster_id_1: cluster_1_pb, + cluster_id_2: cluster_2_pb} ) self.assertEqual(expected_request, actual_request) self.assertIsInstance(result, operation.Operation) - # self.assertEqual(result.operation.name, self.OP_NAME) + self.assertEqual(result.operation.name, self.OP_NAME) self.assertIsInstance(result.metadata, messages_v2_pb2.CreateInstanceMetadata) - def _create_cluster(self, instance_api, cluster_id, location_id, - server_nodes, storage_type): + def _create_cluster_pb(self, instance_api, cluster_id, location_id, + serve_nodes, storage_type): from google.cloud.bigtable_admin_v2.types import instance_pb2 - cluster_name = instance_api.cluster_path( - self.PROJECT, self.INSTANCE_ID, cluster_id) location = instance_api.location_path( self.PROJECT, location_id) return instance_pb2.Cluster( - name=cluster_name, location=location, - serve_nodes=server_nodes, + location=location, + serve_nodes=serve_nodes, default_storage_type=storage_type) def _create_instance_request(self, clusters): @@ -496,7 +557,7 @@ def _create_instance_request(self, clusters): from google.cloud.bigtable import enums instance = instance_pb2.Instance(display_name=self.DISPLAY_NAME, - type=enums.InstanceType.PRODUCTION, + type=enums.Instance.Type.PRODUCTION, labels=self.LABELS) return messages_v2_pb2.CreateInstanceRequest( @@ -527,10 +588,9 @@ def test_update(self): credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, - display_name=self.DISPLAY_NAME, - instance_type=enums.InstanceType.DEVELOPMENT, - labels=self.LABELS) + instance = self._make_one( + self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME, + instance_type=enums.Instance.Type.DEVELOPMENT, labels=self.LABELS) expected_request_instance = instance_pb2.Instance( name=instance.name, display_name=instance.display_name, From 662d7ea445433fc01380b7a01485b94963c3f3ea Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Wed, 1 Aug 2018 11:25:25 -0400 Subject: [PATCH 157/892] shortenning cluster ID in system test (#5719) --- packages/google-cloud-bigtable/tests/system.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index f308b6e7ca2c..219c2f54fde5 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -214,8 +214,8 @@ def test_create_instance_w_two_clusters(self): instance = Config.CLIENT.instance(ALT_INSTANCE_ID, instance_type=_PRODUCTION) - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID+'-cluster-1' - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID+'-cluster-2' + ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID+'-c1' + ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID+'-c2' LOCATION_ID_2 = 'us-central1-f' STORAGE_TYPE = enums.StorageType.HDD cluster_1 = instance.cluster( @@ -466,7 +466,7 @@ def test_create_cluster(self): from google.cloud.bigtable.enums import StorageType from google.cloud.bigtable.enums import Cluster - ALT_CLUSTER_ID = INSTANCE_ID+'-cluster-2' + ALT_CLUSTER_ID = INSTANCE_ID+'-c2' ALT_LOCATION_ID = 'us-central1-f' ALT_SERVE_NODES = 4 From 8d6f9e54f0b0521fad049168eadd2833a71ae063 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Fri, 3 Aug 2018 10:08:38 -0400 Subject: [PATCH 158/892] BIgtable: return 'instance.labels' as dictionary (#5728) Also, now using '_update_from_pb' to set parameters for instance and cluster objects --- .../google-cloud-bigtable/google/cloud/bigtable/cluster.py | 7 ++----- .../google/cloud/bigtable/instance.py | 7 +++---- packages/google-cloud-bigtable/tests/system.py | 2 +- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index e7e56f5e0ce6..0ee4d9cfb146 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -117,12 +117,9 @@ def from_pb(cls, cluster_pb, instance): raise ValueError('Project ID on cluster does not match the ' 'project ID on the client') cluster_id = match_cluster_name.group('cluster_id') - location_id = cluster_pb.location.split('/')[-1] - result = cls(cluster_id, instance, location_id=location_id, - serve_nodes=cluster_pb.serve_nodes, - default_storage_type=cluster_pb.default_storage_type, - _state=cluster_pb.state) + result = cls(cluster_id, instance) + result._update_from_pb(cluster_pb) return result def _update_from_pb(self, cluster_pb): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index f4f86fb55eca..39d8ea4468cf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -119,9 +119,8 @@ def from_pb(cls, instance_pb, client): raise ValueError('Project ID on instance does not match the ' 'project ID on the client') instance_id = match.group('instance_id') - - result = cls(instance_id, client, instance_pb.display_name, - instance_pb.type, instance_pb.labels) + result = cls(instance_id, client) + result._update_from_pb(instance_pb) return result def _update_from_pb(self, instance_pb): @@ -132,7 +131,7 @@ def _update_from_pb(self, instance_pb): raise ValueError('Instance protobuf does not contain display_name') self.display_name = instance_pb.display_name self.type_ = instance_pb.type - self.labels = instance_pb.labels + self.labels = dict(instance_pb.labels) @property def name(self): diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 219c2f54fde5..fa3fd29ec285 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -197,7 +197,7 @@ def test_create_instance(self): self.assertEqual(instance, instance_alt) self.assertEqual(instance.display_name, instance_alt.display_name) self.assertEqual(instance.type_, instance_alt.type_) - self.assertEqual(instance.labels, instance_alt.labels) + self.assertEqual(instance_alt.labels, LABELS) def test_cluster_exists(self): NONEXISTING_CLUSTER_ID = 'cluster-id' From 1b6bad4fcf67e045571053423696d445868beef7 Mon Sep 17 00:00:00 2001 From: Aneep Tandel Date: Fri, 3 Aug 2018 19:54:02 +0530 Subject: [PATCH 159/892] BigTable: Create MutationBatcher for bigtable (#5651) --- .../google/cloud/bigtable/batcher.py | 151 +++++++++++++++ .../google/cloud/bigtable/row.py | 9 + .../google/cloud/bigtable/table.py | 23 +++ .../tests/unit/test_batcher.py | 179 ++++++++++++++++++ .../tests/unit/test_row.py | 20 ++ .../tests/unit/test_table.py | 11 ++ 6 files changed, 393 insertions(+) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py create mode 100644 packages/google-cloud-bigtable/tests/unit/test_batcher.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py new file mode 100644 index 000000000000..eb697ff54df7 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -0,0 +1,151 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Google Cloud Bigtable MutationBatcher.""" + + +FLUSH_COUNT = 1000 +MAX_MUTATIONS = 100000 +MAX_ROW_BYTES = 5242880 # 5MB + + +class MaxMutationsError(ValueError): + """The number of mutations for bulk request is too big.""" + + +class MutationsBatcher(object): + """ A MutationsBatcher is used in batch cases where the number of mutations + is large or unknown. It will store DirectRows in memory until one of the + size limits is reached, or an explicit call to flush() is performed. When + a flush event occurs, the DirectRows in memory will be sent to Cloud + Bigtable. Batching mutations is more efficient than sending individual + request. + + This class is not suited for usage in systems where each mutation + needs to guaranteed to be sent, since calling mutate may only result in an + in-memory change. In a case of a system crash, any DirectRows remaining in + memory will not necessarily be sent to the service, even after the + completion of the mutate() method. + + TODO: Performance would dramatically improve if this class had the + capability of asynchronous, parallel RPCs. + + :type table: class + :param table: class:`~google.cloud.bigtable.table.Table`. + + :type flush_count: int + :param flush_count: (Optional) Max number of rows to flush. If it + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). + + :type max_row_bytes: int + :param max_row_bytes: (Optional) Max number of row mutations size to + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). + """ + + def __init__(self, table, flush_count=FLUSH_COUNT, + max_row_bytes=MAX_ROW_BYTES): + self.rows = [] + self.total_mutation_count = 0 + self.total_size = 0 + self.table = table + self.flush_count = flush_count + self.max_row_bytes = max_row_bytes + + def mutate(self, row): + """ Add a row to the batch. If the current batch meets one of the size + limits, the batch is sent synchronously. + + Example: + >>> # Batcher for max row bytes + >>> batcher = table.mutations_batcher(max_row_bytes=1024) + >>> + >>> row = table.row(b'row_key') + >>> + >>> # In batcher mutate will flush current batch if it + >>> # reaches the max_row_bytes + >>> batcher.mutate(row) + >>> + >>> batcher.flush() + + :type row: class + :param row: class:`~google.cloud.bigtable.row.DirectRow`. + + :raises: One of the following: + * :exc:`~.table._BigtableRetryableError` if any + row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't + match the number of rows that were retried + * :exc:`.batcher.MaxMutationsError` if any row exceeds max + mutations count. + """ + mutation_count = len(row._get_mutations()) + if mutation_count > MAX_MUTATIONS: + raise MaxMutationsError( + 'The row key {} exceeds the number of mutations {}.'.format( + row.row_key, mutation_count), ) + + if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: + self.flush() + + self.rows.append(row) + self.total_mutation_count += mutation_count + self.total_size += row.get_mutations_size() + + if (self.total_size >= self.max_row_bytes or + len(self.rows) >= self.flush_count): + self.flush() + + def mutate_rows(self, rows): + """ Add a row to the batch. If the current batch meets one of the size + limits, the batch is sent synchronously. + + Example: + >>> # Batcher for flush count + >>> batcher = table.mutations_batcher(flush_count=2) + >>> + >>> row1 = table.row(b'row_key_1') + >>> row2 = table.row(b'row_key_2') + >>> row3 = table.row(b'row_key_3') + >>> row4 = table.row(b'row_key_4') + >>> + >>> # In batcher mutate will flush current batch if it + >>> # reaches the max flush_count + >>> batcher.mutate_rows([row_1, row_2, row_3, row_4]) + >>> + >>> batcher.flush() + + :type rows: list:[`~google.cloud.bigtable.row.DirectRow`] + :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. + + :raises: One of the following: + * :exc:`~.table._BigtableRetryableError` if any + row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't + match the number of rows that were retried + * :exc:`.batcher.MaxMutationsError` if any row exceeds max + mutations count. + """ + for row in rows: + self.mutate(row) + + def flush(self): + """ Sends the current. batch to Cloud Bigtable. """ + if len(self.rows) is not 0: + self.table.mutate_rows(self.rows) + self.total_mutation_count = 0 + self.total_size = 0 + self.rows = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 08378c7b2b20..ee6c8d0ea5fa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -286,6 +286,15 @@ def _get_mutations(self, state=None): # pylint: disable=unused-argument """ return self._pb_mutations + def get_mutations_size(self): + """ Gets the total mutations size for current row """ + + mutation_size = 0 + for mutation in self._get_mutations(): + mutation_size += mutation.ByteSize() + + return mutation_size + def set_cell(self, column_family_id, column, value, timestamp=None): """Sets a value in this row. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index ddba8e6b17a6..cbda4f15ccb0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -24,6 +24,8 @@ from google.cloud._helpers import _to_bytes from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily +from google.cloud.bigtable.batcher import MutationsBatcher +from google.cloud.bigtable.batcher import (FLUSH_COUNT, MAX_ROW_BYTES) from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow @@ -483,6 +485,27 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): table_admin_client.drop_row_range( self.name, row_key_prefix=_to_bytes(row_key_prefix)) + def mutations_batcher(self, flush_count=FLUSH_COUNT, + max_row_bytes=MAX_ROW_BYTES): + """Factory to create a mutation batcher associated with this instance. + + :type table: class + :param table: class:`~google.cloud.bigtable.table.Table`. + + :type flush_count: int + :param flush_count: (Optional) Maximum number of rows per batch. If it + reaches the max number of rows it calls finish_batch() to + mutate the current row batch. Default is FLUSH_COUNT (1000 + rows). + + :type max_row_bytes: int + :param max_row_bytes: (Optional) Max number of row mutations size to + flush. If it reaches the max number of row mutations size it + calls finish_batch() to mutate the current row batch. + Default is MAX_ROW_BYTES (5 MB). + """ + return MutationsBatcher(self, flush_count, max_row_bytes) + class _RetryableMutateRowsWorker(object): """A callable worker that can retry to mutate rows with transient errors. diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py new file mode 100644 index 000000000000..4666f28d1776 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -0,0 +1,179 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import mock + +from ._testing import _make_credentials + +from google.cloud.bigtable.batcher import MutationsBatcher +from google.cloud.bigtable.row import DirectRow + + +class TestMutationsBatcher(unittest.TestCase): + from grpc import StatusCode + + TABLE_ID = 'table-id' + TABLE_NAME = '/tables/' + TABLE_ID + + # RPC Status Codes + SUCCESS = StatusCode.OK.value[0] + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.table import Table + + return Table + + def _make_table(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + + def test_constructor(self): + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + + instance = client.instance(instance_id='instance-id') + table = self._make_table(self.TABLE_ID, instance) + + mutation_batcher = MutationsBatcher(table) + self.assertEqual(table, mutation_batcher.table) + + def test_mutate_row(self): + table = _Table(self.TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table) + + rows = [DirectRow(row_key=b'row_key'), + DirectRow(row_key=b'row_key_2'), + DirectRow(row_key=b'row_key_3'), + DirectRow(row_key=b'row_key_4')] + + mutation_batcher.mutate_rows(rows) + mutation_batcher.flush() + + self.assertEqual(table.mutation_calls, 1) + + def test_mutate_rows(self): + table = _Table(self.TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table) + + row = DirectRow(row_key=b'row_key') + row.set_cell('cf1', b'c1', 1) + row.set_cell('cf1', b'c2', 2) + row.set_cell('cf1', b'c3', 3) + row.set_cell('cf1', b'c4', 4) + + mutation_batcher.mutate(row) + + mutation_batcher.flush() + + self.assertEqual(table.mutation_calls, 1) + + def test_flush_with_no_rows(self): + table = _Table(self.TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table) + mutation_batcher.flush() + + self.assertEqual(table.mutation_calls, 0) + + def test_add_row_with_max_flush_count(self): + table = _Table(self.TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table, flush_count=3) + + row_1 = DirectRow(row_key=b'row_key_1') + row_2 = DirectRow(row_key=b'row_key_2') + row_3 = DirectRow(row_key=b'row_key_3') + + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) + + self.assertEqual(table.mutation_calls, 1) + + @mock.patch('google.cloud.bigtable.batcher.MAX_MUTATIONS', new=3) + def test_mutate_row_with_max_mutations_failure(self): + from google.cloud.bigtable.batcher import MaxMutationsError + + table = _Table(self.TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table) + + row = DirectRow(row_key=b'row_key') + row.set_cell('cf1', b'c1', 1) + row.set_cell('cf1', b'c2', 2) + row.set_cell('cf1', b'c3', 3) + row.set_cell('cf1', b'c4', 4) + + with self.assertRaises(MaxMutationsError): + mutation_batcher.mutate(row) + + @mock.patch('google.cloud.bigtable.batcher.MAX_MUTATIONS', new=3) + def test_mutate_row_with_max_mutations(self): + table = _Table(self.TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table) + + row = DirectRow(row_key=b'row_key') + row.set_cell('cf1', b'c1', 1) + row.set_cell('cf1', b'c2', 2) + row.set_cell('cf1', b'c3', 3) + + mutation_batcher.mutate(row) + mutation_batcher.flush() + + self.assertEqual(table.mutation_calls, 1) + + def test_mutate_row_with_max_row_bytes(self): + table = _Table(self.TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table, + max_row_bytes=3 * 1024 * 1024) + + number_of_bytes = 1 * 1024 * 1024 + max_value = b'1' * number_of_bytes + + row = DirectRow(row_key=b'row_key') + row.set_cell('cf1', b'c1', max_value) + row.set_cell('cf1', b'c2', max_value) + row.set_cell('cf1', b'c3', max_value) + + mutation_batcher.mutate(row) + + self.assertEqual(table.mutation_calls, 1) + + +class _Instance(object): + + def __init__(self, client=None): + self._client = client + + +class _Table(object): + + def __init__(self, name, client=None): + self.name = name + self._instance = _Instance(client) + self.mutation_calls = 0 + + def mutate_rows(self, rows): + self.mutation_calls += 1 + return rows diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 2e5e9bbfe1f8..b6601aede200 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -107,6 +107,26 @@ def test__get_mutations(self): row._pb_mutations = mutations = object() self.assertIs(mutations, row._get_mutations(None)) + def test_get_mutations_size(self): + row_key = b'row_key' + row = self._make_one(row_key, None) + + column_family_id1 = u'column_family_id1' + column_family_id2 = u'column_family_id2' + column1 = b'column1' + column2 = b'column2' + number_of_bytes = 1 * 1024 * 1024 + value = b'1' * number_of_bytes + + row.set_cell(column_family_id1, column1, value) + row.set_cell(column_family_id2, column2, value) + + total_mutations_size = 0 + for mutation in row._get_mutations(): + total_mutations_size += mutation.ByteSize() + + self.assertEqual(row.get_mutations_size(), total_mutations_size) + def _set_cell_helper(self, column=None, column_bytes=None, value=b'foobar', timestamp=None, timestamp_micros=-1): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index fa611471276f..805dbc497339 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -831,6 +831,17 @@ def test_drop_by_prefix_w_timeout(self): self.assertEqual(result, expected_result) + def test_mutations_batcher_factory(self): + flush_count = 100 + max_row_bytes = 1000 + table = self._make_one(self.TABLE_ID, None) + mutation_batcher = table.mutations_batcher( + flush_count=flush_count, max_row_bytes=max_row_bytes) + + self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID) + self.assertEqual(mutation_batcher.flush_count, flush_count) + self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes) + class Test__RetryableMutateRowsWorker(unittest.TestCase): from grpc import StatusCode From 69ed5b6bcd2b8534d6fe48bfd6ed612f34a93f4d Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Fri, 3 Aug 2018 19:59:33 +0530 Subject: [PATCH 160/892] Bigtable : Add 'Instance._state' property (#5736) --- .../google/cloud/bigtable/instance.py | 19 ++++++++++++++++++- .../google-cloud-bigtable/tests/system.py | 2 ++ .../tests/unit/test_instance.py | 15 ++++++++++++--- 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 39d8ea4468cf..61c44bd98d2d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -79,6 +79,14 @@ class Instance(object): be associated with a given resource. Label values must be between 0 and 63 characters long. Keys and values must both be under 128 bytes. + + :type _state: int + :param _state: (`OutputOnly`) + The current state of the instance. + Possible values are represented by the following constants: + :data:`google.cloud.bigtable.enums.Instance.State.STATE_NOT_KNOWN`. + :data:`google.cloud.bigtable.enums.Instance.State.READY`. + :data:`google.cloud.bigtable.enums.Instance.State.CREATING`. """ def __init__(self, @@ -86,12 +94,14 @@ def __init__(self, client, display_name=None, instance_type=None, - labels=None): + labels=None, + _state=None): self.instance_id = instance_id self._client = client self.display_name = display_name or instance_id self.type_ = instance_type self.labels = labels + self._state = _state @classmethod def from_pb(cls, instance_pb, client): @@ -119,6 +129,7 @@ def from_pb(cls, instance_pb, client): raise ValueError('Project ID on instance does not match the ' 'project ID on the client') instance_id = match.group('instance_id') + result = cls(instance_id, client) result._update_from_pb(instance_pb) return result @@ -132,6 +143,7 @@ def _update_from_pb(self, instance_pb): self.display_name = instance_pb.display_name self.type_ = instance_pb.type self.labels = dict(instance_pb.labels) + self._state = instance_pb.state @property def name(self): @@ -151,6 +163,11 @@ def name(self): return self._client.instance_admin_client.instance_path( project=self._client.project, instance=self.instance_id) + @property + def state(self): + """google.cloud.bigtable.enums.Instance.State: state of Instance.""" + return self._state + def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index fa3fd29ec285..fcffd458e951 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -176,6 +176,7 @@ def test_create_instance_defaults(self): def test_create_instance(self): from google.cloud.bigtable import enums _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT + _STATE = enums.Instance.State.READY ALT_INSTANCE_ID = 'new' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID, @@ -198,6 +199,7 @@ def test_create_instance(self): self.assertEqual(instance.display_name, instance_alt.display_name) self.assertEqual(instance.type_, instance_alt.type_) self.assertEqual(instance_alt.labels, LABELS) + self.assertEqual(_STATE, instance_alt.state) def test_cluster_exists(self): NONEXISTING_CLUSTER_ID = 'cluster-id' diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 34ef0ba1bff8..1f60348d27dc 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -93,23 +93,26 @@ def test_constructor_defaults(self): self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) self.assertIs(instance._client, client) + self.assertIsNone(instance.state) def test_constructor_non_default(self): from google.cloud.bigtable import enums instance_type = enums.Instance.Type.DEVELOPMENT + state = enums.Instance.State.READY labels = {'test': 'test'} client = object() instance = self._make_one(self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME, instance_type=instance_type, - labels=labels) + labels=labels, _state=state) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, labels) self.assertIs(instance._client, client) + self.assertEqual(instance.state, state) def test_table_factory(self): from google.cloud.bigtable.table import Table @@ -209,10 +212,12 @@ def test__update_from_pb_success(self): from google.cloud.bigtable import enums instance_type = enums.Instance.Type.PRODUCTION + state = enums.Instance.State.READY instance_pb = data_v2_pb2.Instance( display_name=self.DISPLAY_NAME, type=instance_type, - labels=self.LABELS + labels=self.LABELS, + state=state ) instance = self._make_one(None, None) @@ -223,6 +228,7 @@ def test__update_from_pb_success(self): self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, self.LABELS) + self.assertEqual(instance._state, state) def test__update_from_pb_success_defaults(self): from google.cloud.bigtable_admin_v2.proto import ( @@ -261,11 +267,13 @@ def test_from_pb_success(self): client = _Client(project=self.PROJECT) instance_type = enums.Instance.Type.PRODUCTION + state = enums.Instance.State.READY instance_pb = data_v2_pb2.Instance( name=self.INSTANCE_NAME, display_name=self.INSTANCE_ID, type=instance_type, - labels=self.LABELS + labels=self.LABELS, + state=state ) klass = self._get_target_class() @@ -276,6 +284,7 @@ def test_from_pb_success(self): self.assertEqual(instance.display_name, self.INSTANCE_ID) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, self.LABELS) + self.assertEqual(instance._state, state) def test_from_pb_bad_instance_name(self): from google.cloud.bigtable_admin_v2.proto import ( From 77a3dcb9a580829f10c32a1c91ec679af8bbca6f Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Fri, 3 Aug 2018 11:34:41 -0400 Subject: [PATCH 161/892] Bigtable: add labels {'python-system': ISO-timestamp} to systest instances (#5729) --- .../google-cloud-bigtable/tests/system.py | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index fcffd458e951..43daf3e94b80 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -39,7 +39,6 @@ LOCATION_ID = 'us-central1-c' INSTANCE_ID = 'g-c-p' + unique_resource_id('-') -LABELS = {u'foo': u'bar'} TABLE_ID = 'google-cloud-python-test-table' CLUSTER_ID = INSTANCE_ID+'-cluster' SERVE_NODES = 3 @@ -57,6 +56,11 @@ ROUTING_POLICY_TYPE_ANY = 1 ROUTING_POLICY_TYPE_SINGLE = 2 EXISTING_INSTANCES = [] +LABEL_KEY = u'python-system' +label_stamp = datetime.datetime.utcnow() \ + .replace(microsecond=0, tzinfo=UTC,) \ + .strftime("%Y-%m-%dt%H-%M-%S") +LABELS = {LABEL_KEY: str(label_stamp)} class Config(object): @@ -150,7 +154,7 @@ def test_create_instance_defaults(self): from google.cloud.bigtable import enums ALT_INSTANCE_ID = 'ndef' + unique_resource_id('-') - instance = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) ALT_CLUSTER_ID = ALT_INSTANCE_ID+'-cluster' cluster = instance.cluster( ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES) @@ -170,8 +174,6 @@ def test_create_instance_defaults(self): # Make sure that by default a PRODUCTION type instance is created self.assertIsNone(instance.type_) self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION) - self.assertIsNone(instance.labels) - self.assertFalse(instance_alt.labels) def test_create_instance(self): from google.cloud.bigtable import enums @@ -214,7 +216,8 @@ def test_create_instance_w_two_clusters(self): _PRODUCTION = enums.Instance.Type.PRODUCTION ALT_INSTANCE_ID = 'dif' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID, - instance_type=_PRODUCTION) + instance_type=_PRODUCTION, + labels=LABELS) ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID+'-c1' ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID+'-c2' @@ -261,7 +264,11 @@ def test_create_instance_w_two_clusters(self): def test_update_display_name_and_labels(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name NEW_DISPLAY_NAME = 'Foo Bar Baz' - NEW_LABELS = {'foo_bar': 'foo_bar'} + n_label_stamp = datetime.datetime.utcnow() \ + .replace(microsecond=0, tzinfo=UTC) \ + .strftime("%Y-%m-%dt%H-%M-%S") + + NEW_LABELS = {LABEL_KEY: str(n_label_stamp)} Config.INSTANCE.display_name = NEW_DISPLAY_NAME Config.INSTANCE.labels = NEW_LABELS operation = Config.INSTANCE.update() @@ -291,9 +298,10 @@ def test_update_type(self): _DEVELOPMENT = Instance.Type.DEVELOPMENT _PRODUCTION = Instance.Type.PRODUCTION - ALT_INSTANCE_ID = 'new' + unique_resource_id('-') + ALT_INSTANCE_ID = 'ndif' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID, - instance_type=_DEVELOPMENT) + instance_type=_DEVELOPMENT, + labels=LABELS) operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) From 743caad54842a142dcc0ad0bacc67fb1aaf5c261 Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Tue, 7 Aug 2018 01:47:44 +0530 Subject: [PATCH 162/892] Bigtable: optimize 'Table.exists' performance (#5749) --- .../google/cloud/bigtable/enums.py | 28 +++++++++++++++++++ .../google/cloud/bigtable/table.py | 7 +++-- .../google-cloud-bigtable/tests/system.py | 9 ++++++ .../tests/unit/test_table.py | 7 ++++- 4 files changed, 47 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index 3adcef8267c9..140cebdd0305 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -121,3 +121,31 @@ class RoutingPolicyType(object): """ ANY = enums.RoutingPolicyType.ANY SINGLE = enums.RoutingPolicyType.SINGLE + + +class Table(object): + class View(object): + """ + Defines a view over a table's fields. + + Attributes: + VIEW_UNSPECIFIED (int): Uses the default view for each method + as documented in its request. + NAME_ONLY (int): Only populates ``name``. + SCHEMA_VIEW (int): Only populates ``name`` and fields related + to the table's schema. + REPLICATION_VIEW (int): This is a private alpha release of + Cloud Bigtable replication. This feature is not currently available + to most Cloud Bigtable customers. This feature might be changed in + backward-incompatible ways and is not recommended for production use. + It is not subject to any SLA or deprecation policy. + + Only populates ``name`` and fields related to the table's + replication state. + FULL (int): Populates all fields. + """ + VIEW_UNSPECIFIED = enums.Table.View.VIEW_UNSPECIFIED + NAME_ONLY = enums.Table.View.NAME_ONLY + SCHEMA_VIEW = enums.Table.View.SCHEMA_VIEW + REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW + FULL = enums.Table.View.FULL diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index cbda4f15ccb0..28ec2e355be6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -33,6 +33,7 @@ from google.cloud.bigtable.row_data import YieldRowsData from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange +from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( @@ -45,6 +46,7 @@ # (https://cloud.google.com/bigtable/docs/reference/data/rpc/ # google.bigtable.v2#google.bigtable.v2.MutateRowRequest) _MAX_BULK_MUTATIONS = 100000 +VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY class _BigtableRetryableError(Exception): @@ -223,11 +225,10 @@ def exists(self): """ table_client = self._instance._client.table_admin_client try: - table_client.get_table(name=self.name) + table_client.get_table(name=self.name, view=VIEW_NAME_ONLY) + return True except NotFound: return False - else: - return True def delete(self): """Delete this table.""" diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 43daf3e94b80..b43fccf776eb 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -531,6 +531,15 @@ def test_list_tables(self): tables = Config.INSTANCE.list_tables() self.assertEqual(tables, [self._table]) + def test_exists(self): + temp_table_id = 'test-table_existence' + temp_table = Config.INSTANCE.table(temp_table_id) + self.assertFalse(temp_table.exists()) + temp_table.create() + self.assertTrue(temp_table.exists()) + temp_table.delete() + self.assertFalse(temp_table.exists()) + def test_create_table(self): temp_table_id = 'test-create-table' temp_table = Config.INSTANCE.table(temp_table_id) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 805dbc497339..4507a16dc096 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -310,6 +310,7 @@ def test_exists(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client, bigtable_table_admin_client) from google.api_core.exceptions import NotFound + from google.api_core.exceptions import BadRequest table_api = bigtable_table_admin_client.BigtableTableAdminClient( mock.Mock()) @@ -332,9 +333,10 @@ def test_exists(self): client._instance_admin_client = instance_api bigtable_table_stub = ( client._table_admin_client.bigtable_table_admin_stub) - bigtable_table_stub.ListTables.side_effect = [ + bigtable_table_stub.GetTable.side_effect = [ response_pb, NotFound('testing'), + BadRequest('testing') ] # Perform the method and check the result. @@ -347,6 +349,9 @@ def test_exists(self): result = table2.exists() self.assertEqual(False, result) + with self.assertRaises(BadRequest): + table2.exists() + def test_delete(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) From 98611e9971dab879c280f1f18cf8aed7ccd1214c Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Mon, 6 Aug 2018 16:18:33 -0400 Subject: [PATCH 163/892] Bigtable: add 'Client.list_clusters()' (#5715) --- .../google/cloud/bigtable/client.py | 24 ++++++ .../google-cloud-bigtable/tests/system.py | 8 ++ .../tests/unit/test_client.py | 77 +++++++++++++++++++ .../tests/unit/test_instance.py | 2 +- 4 files changed, 110 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 4ddc56ea091c..a473f3158ea9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -36,10 +36,12 @@ from google.cloud.bigtable import __version__ from google.cloud.bigtable.instance import Instance +from google.cloud.bigtable.cluster import Cluster from google.cloud.client import ClientWithProject from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION @@ -252,3 +254,25 @@ def list_instances(self): instances = [ Instance.from_pb(instance, self) for instance in resp.instances] return instances, resp.failed_locations + + def list_clusters(self): + """List the clusters in the project. + + :rtype: tuple + :returns: + (clusters, failed_locations), where 'clusters' is list of + :class:`google.cloud.bigtable.instance.Cluster`, and + 'failed_locations' is a list of strings representing + locations which could not be resolved. + """ + resp = (self.instance_admin_client.list_clusters( + self.instance_admin_client.instance_path(self.project, '-'))) + clusters = [] + instances = {} + for cluster in resp.clusters: + match_cluster_name = _CLUSTER_NAME_RE.match(cluster.name) + instance_id = match_cluster_name.group('instance') + if instance_id not in instances: + instances[instance_id] = self.instance(instance_id) + clusters.append(Cluster.from_pb(cluster, instances[instance_id])) + return clusters, resp.failed_locations diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index b43fccf776eb..f4461fc3605b 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -261,6 +261,14 @@ def test_create_instance_w_two_clusters(self): self.assertEqual(cluster_2.default_storage_type, alt_cluster_2.default_storage_type) + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = Config.CLIENT.list_clusters() + self.assertFalse(failed_locations) + found = set([cluster.name for cluster in clusters]) + self.assertTrue({alt_cluster_1.name, + alt_cluster_2.name, + Config.CLUSTER.name}.issubset(found)) + def test_update_display_name_and_labels(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name NEW_DISPLAY_NAME = 'Foo Bar Baz' diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index e8ba2d75307e..3dac94eb4819 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -226,3 +226,80 @@ def test_list_instances(self): self.assertTrue(instance_2._client is client) self.assertEqual(failed_locations, [FAILED_LOCATION]) + + def test_list_clusters(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.instance import Cluster + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials, + admin=True) + + INSTANCE_ID1 = 'instance-id1' + INSTANCE_ID2 = 'instance-id2' + + failed_location = 'FAILED' + cluster_id1 = '{}-cluster'.format(INSTANCE_ID1) + cluster_id2 = '{}-cluster-1'.format(INSTANCE_ID2) + cluster_id3 = '{}-cluster-2'.format(INSTANCE_ID2) + cluster_name1 = (client.instance_admin_client.cluster_path( + self.PROJECT, INSTANCE_ID1, cluster_id1)) + cluster_name2 = (client.instance_admin_client.cluster_path( + self.PROJECT, INSTANCE_ID2, cluster_id2)) + cluster_name3 = (client.instance_admin_client.cluster_path( + self.PROJECT, INSTANCE_ID2, cluster_id3)) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[ + failed_location + ], + clusters=[ + data_v2_pb2.Cluster( + name=cluster_name1, + ), + data_v2_pb2.Cluster( + name=cluster_name2, + ), + data_v2_pb2.Cluster( + name=cluster_name3, + ), + + ], + ) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + instance_stub.ListClusters.side_effect = [response_pb] + + # Perform the method and check the result. + clusters, failed_locations = client.list_clusters() + + cluster_1, cluster_2, cluster_3 = clusters + + self.assertIsInstance(cluster_1, Cluster) + self.assertEqual(cluster_1.name, cluster_name1) + self.assertEqual(cluster_1._instance.instance_id, + INSTANCE_ID1) + + self.assertIsInstance(cluster_2, Cluster) + self.assertEqual(cluster_2.name, cluster_name2) + self.assertEqual(cluster_2._instance.instance_id, + INSTANCE_ID2) + + self.assertIsInstance(cluster_3, Cluster) + self.assertEqual(cluster_3.name, cluster_name3) + self.assertEqual(cluster_3._instance.instance_id, + INSTANCE_ID2) + + self.assertEqual(failed_locations, [failed_location]) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 1f60348d27dc..282a64cfa6a7 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -166,7 +166,7 @@ def test_list_clusters(self): failed_location = 'FAILED' cluster_id1 = 'cluster-id1' - cluster_id2 = 'ckuster-id2' + cluster_id2 = 'cluster-id2' cluster_name1 = (client.instance_admin_client.cluster_path( self.PROJECT, self.INSTANCE_ID, cluster_id1)) cluster_name2 = (client.instance_admin_client.cluster_path( From 77c1fc622a2dae0ee5799b85ef5b67777388ade6 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Wed, 15 Aug 2018 23:07:31 -0400 Subject: [PATCH 164/892] added instance.exists(), system and unit tests (#5802) --- .../google/cloud/bigtable/instance.py | 14 +++++++ .../google-cloud-bigtable/tests/system.py | 7 ++++ .../tests/unit/test_instance.py | 38 +++++++++++++++++++ 3 files changed, 59 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 61c44bd98d2d..5b0e5952988d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -25,6 +25,7 @@ from google.cloud.bigtable_admin_v2.types import instance_pb2 from google.cloud.bigtable.enums import RoutingPolicyType +from google.api_core.exceptions import NotFound _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' @@ -192,6 +193,19 @@ def reload(self): # instance ID on the response match the request. self._update_from_pb(instance_pb) + def exists(self): + """Check whether the instance already exists. + + :rtype: bool + :returns: True if the table exists, else False. + """ + try: + self._client.instance_admin_client.get_instance(name=self.name) + return True + # NOTE: There could be other exceptions that are returned to the user. + except NotFound: + return False + def create(self, location_id=None, serve_nodes=None, default_storage_type=None, clusters=None): diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index f4461fc3605b..4830655557b2 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -211,6 +211,13 @@ def test_cluster_exists(self): self.assertTrue(cluster.exists()) self.assertFalse(alt_cluster.exists()) + def test_instance_exists(self): + NONEXISTING_INSTANCE_ID = 'instancer-id' + + alt_instance = Config.CLIENT.instance(NONEXISTING_INSTANCE_ID) + self.assertTrue(Config.INSTANCE.exists()) + self.assertFalse(alt_instance.exists()) + def test_create_instance_w_two_clusters(self): from google.cloud.bigtable import enums _PRODUCTION = enums.Instance.Type.PRODUCTION diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 282a64cfa6a7..23bcdab4602e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -394,6 +394,44 @@ def test_reload(self): # Check Instance optional config values before. self.assertEqual(instance.display_name, DISPLAY_NAME) + def test_exists(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.api_core import exceptions + + api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + + # Create response_pb + instance_name = client.instance_admin_client.instance_path( + self.PROJECT, self.INSTANCE_ID) + response_pb = data_v2_pb2.Instance(name=instance_name) + + # Patch the stub used by the API method. + client._instance_admin_client = api + instance_admin_client = client._instance_admin_client + instance_stub = instance_admin_client.bigtable_instance_admin_stub + instance_stub.GetCluster.side_effect = [ + response_pb, + exceptions.NotFound('testing'), + exceptions.BadRequest('testing') + ] + + # Perform the method and check the result. + non_existing_instance_id = 'instance-id-2' + alt_instance_1 = self._make_one(self.INSTANCE_ID, client) + alt_instance_2 = self._make_one(non_existing_instance_id, client) + self.assertTrue(alt_instance_1.exists()) + self.assertFalse(alt_instance_2.exists()) + with self.assertRaises(exceptions.BadRequest): + alt_instance_2.exists() + def test_create_check_conflicts(self): instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): From 66d85edcab1e9c3fd13f5fc8aacca444e7af7bc6 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Thu, 16 Aug 2018 14:43:55 -0400 Subject: [PATCH 165/892] Bigtable: app_profile_object (#5782) * Initial commit + name property * WIP AppProfile object implementation complete added app_profile factory to instance.py now 'instance.list_app_profiles' returns list of new AppProfile objects system tests partial unit tetsts * completing unit tests for app_profile * addressing comments for app_profile.py and instance.py * refactor system tests for app_profile * addressing formatting comments * Removed extra line in test_app_profile.py * addressing comments * added instance.exists(), system and unit tests (#5802) * fixing cover * Updating app_profile.py top comment --- .../google/cloud/bigtable/app_profile.py | 305 ++++++++ .../google/cloud/bigtable/instance.py | 188 +---- .../google-cloud-bigtable/tests/system.py | 282 ++++---- .../tests/unit/test_app_profile.py | 661 ++++++++++++++++++ .../tests/unit/test_instance.py | 404 ++--------- 5 files changed, 1205 insertions(+), 635 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py create mode 100644 packages/google-cloud-bigtable/tests/unit/test_app_profile.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py new file mode 100644 index 000000000000..fc0dfd9c5cf8 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py @@ -0,0 +1,305 @@ +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User-friendly container for Google Cloud Bigtable AppProfile.""" + + +import re + +from google.cloud.bigtable.enums import RoutingPolicyType +from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.protobuf import field_mask_pb2 +from google.api_core.exceptions import NotFound + +_APP_PROFILE_NAME_RE = re.compile( + r'^projects/(?P[^/]+)/' + r'instances/(?P[^/]+)/' + r'appProfiles/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$') + + +class AppProfile(object): + """Representation of a Google Cloud Bigtable AppProfile. + + We can use a :class:`AppProfile` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + + :type app_profile_id: str + :param app_profile_id: The ID of the AppProfile. Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type: routing_policy_type: int + :param: routing_policy_type: (Optional) The type of the routing policy. + Possible values are represented + by the following constants: + :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` + :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` + + :type: description: str + :param: description: (Optional) Long form description of the use + case for this AppProfile. + + :type: cluster_id: str + :param: cluster_id: (Optional) Unique cluster_id which is only required + when routing_policy_type is + ROUTING_POLICY_TYPE_SINGLE. + + :type: allow_transactional_writes: bool + :param: allow_transactional_writes: (Optional) If true, allow + transactional writes for + ROUTING_POLICY_TYPE_SINGLE. + """ + + def __init__(self, app_profile_id, instance, + routing_policy_type=None, + description=None, cluster_id=None, + allow_transactional_writes=None): + self.app_profile_id = app_profile_id + self._instance = instance + self.routing_policy_type = routing_policy_type + self.description = description + self.cluster_id = cluster_id + self.allow_transactional_writes = allow_transactional_writes + + @property + def name(self): + """AppProfile name used in requests. + + .. note:: + + This property will not change if ``app_profile_id`` does not, but + the return value is not cached. + + The AppProfile name is of the form + ``"projects/../instances/../app_profile/{app_profile_id}"`` + + :rtype: str + :returns: The AppProfile name. + """ + return self.instance_admin_client.app_profile_path( + self._instance._client.project, self._instance.instance_id, + self.app_profile_id) + + @property + def instance_admin_client(self): + """Shortcut to instance_admin_client + + :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` + :returns: A BigtableInstanceAdmin instance. + """ + return self._instance._client.instance_admin_client + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the routing_policy_type. Instead, it only compares + # identifying values instance, AppProfile ID and client. This is + # intentional, since the same AppProfile can be in different + # states if not synchronized. + return (other.app_profile_id == self.app_profile_id and + other._instance == self._instance) + + def __ne__(self, other): + return not self == other + + @classmethod + def from_pb(cls, app_profile_pb, instance): + """Creates an instance app_profile from a protobuf. + + :type app_profile_pb: :class:`instance_pb2.app_profile_pb` + :param app_profile_pb: An instance protobuf object. + + :type instance: :class:`google.cloud.bigtable.instance.Instance` + :param instance: The instance that owns the cluster. + + :rtype: :class:`AppProfile` + :returns: The AppProfile parsed from the protobuf response. + + :raises: :class:`ValueError ` if the AppProfile + name does not match + ``projects/{project}/instances/{instance_id}/appProfiles/{app_profile_id}`` + or if the parsed instance ID does not match the istance ID + on the client. + or if the parsed project ID does not match the project ID + on the client. + """ + match_app_profile_name = ( + _APP_PROFILE_NAME_RE.match(app_profile_pb.name)) + if match_app_profile_name is None: + raise ValueError('AppProfile protobuf name was not in the ' + 'expected format.', app_profile_pb.name) + if match_app_profile_name.group('instance') != instance.instance_id: + raise ValueError('Instance ID on app_profile does not match the ' + 'instance ID on the client') + if match_app_profile_name.group('project') != instance._client.project: + raise ValueError('Project ID on app_profile does not match the ' + 'project ID on the client') + app_profile_id = match_app_profile_name.group('app_profile_id') + + result = cls(app_profile_id, instance) + result._update_from_pb(app_profile_pb) + return result + + def _update_from_pb(self, app_profile_pb): + """Refresh self from the server-provided protobuf. + Helper for :meth:`from_pb` and :meth:`reload`. + """ + self.routing_policy_type = None + self.allow_transactional_writes = None + self.cluster_id = None + + self.description = app_profile_pb.description + + routing_policy_type = None + if app_profile_pb.HasField('multi_cluster_routing_use_any'): + routing_policy_type = RoutingPolicyType.ANY + self.allow_transactional_writes = False + else: + routing_policy_type = RoutingPolicyType.SINGLE + self.cluster_id = app_profile_pb.single_cluster_routing.cluster_id + self.allow_transactional_writes = ( + app_profile_pb.single_cluster_routing + .allow_transactional_writes) + self.routing_policy_type = routing_policy_type + + def _to_pb(self): + """Create an AppProfile proto buff message for API calls + :rtype: :class:`.instance_pb2.AppProfile` + :returns: The converted current object. + + :raises: :class:`ValueError ` if the AppProfile + routing_policy_type is not set + """ + if not self.routing_policy_type: + raise ValueError('AppProfile required routing policy.') + + single_cluster_routing = None + multi_cluster_routing_use_any = None + + if self.routing_policy_type == RoutingPolicyType.ANY: + multi_cluster_routing_use_any = ( + instance_pb2.AppProfile.MultiClusterRoutingUseAny()) + else: + single_cluster_routing = ( + instance_pb2.AppProfile.SingleClusterRouting( + cluster_id=self.cluster_id, + allow_transactional_writes=self.allow_transactional_writes) + ) + + app_profile_pb = instance_pb2.AppProfile( + name=self.name, description=self.description, + multi_cluster_routing_use_any=multi_cluster_routing_use_any, + single_cluster_routing=single_cluster_routing + ) + return app_profile_pb + + def reload(self): + """Reload the metadata for this cluster""" + + app_profile_pb = ( + self.instance_admin_client.get_app_profile( + self.name)) + + # NOTE: _update_from_pb does not check that the project and + # app_profile ID on the response match the request. + self._update_from_pb(app_profile_pb) + + def exists(self): + """Check whether the AppProfile already exists. + + :rtype: bool + :returns: True if the AppProfile exists, else False. + """ + try: + self.instance_admin_client.get_app_profile(self.name) + return True + # NOTE: There could be other exceptions that are returned to the user. + except NotFound: + return False + + def create(self, ignore_warnings=None): + """Create this AppProfile. + + .. note:: + + Uses the ``instance`` and ``app_profile_id`` on the current + :class:`AppProfile` in addition to the ``routing_policy_type``, + ``description``, ``cluster_id`` and ``allow_transactional_writes``. + To change them before creating, reset the values via + + .. code:: python + + app_profile.app_profile_id = 'i-changed-my-mind' + app_profile.routing_policy_type = ( + google.cloud.bigtable.enums.RoutingPolicyType.SINGLE + ) + app_profile.description = 'new-description' + app-profile.cluster_id = 'other-cluster-id' + app-profile.allow_transactional_writes = True + + before calling :meth:`create`. + + :type: ignore_warnings: bool + :param: ignore_warnings: (Optional) If true, ignore safety checks when + creating the AppProfile. + """ + return self.from_pb(self.instance_admin_client.create_app_profile( + parent=self._instance.name, app_profile_id=self.app_profile_id, + app_profile=self._to_pb(), ignore_warnings=ignore_warnings), + self._instance) + + def update(self, ignore_warnings=None): + """Update this app_profile. + + .. note:: + + Update any or all of the following values: + ``routing_policy_type`` + ``description`` + ``cluster_id`` + ``allow_transactional_writes`` + + """ + update_mask_pb = field_mask_pb2.FieldMask() + + if self.description is not None: + update_mask_pb.paths.append('description') + + if self.routing_policy_type == RoutingPolicyType.ANY: + update_mask_pb.paths.append('multi_cluster_routing_use_any') + else: + update_mask_pb.paths.append('single_cluster_routing') + + return self.instance_admin_client.update_app_profile( + app_profile=self._to_pb(), update_mask=update_mask_pb, + ignore_warnings=ignore_warnings) + + def delete(self, ignore_warnings=None): + """Delete this AppProfile. + + :type: ignore_warnings: bool + :param: ignore_warnings: If true, ignore safety checks when deleting + the AppProfile. + + :raises: google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. google.api_core.exceptions.RetryError: + If the request failed due to a retryable error and retry + attempts failed. ValueError: If the parameters are invalid. + """ + self.instance_admin_client.delete_app_profile( + self.name, ignore_warnings) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 5b0e5952988d..b9e5fa17b87d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -19,12 +19,12 @@ from google.cloud.bigtable.table import Table from google.cloud.bigtable.cluster import Cluster +from google.cloud.bigtable.app_profile import AppProfile from google.protobuf import field_mask_pb2 from google.cloud.bigtable_admin_v2.types import instance_pb2 -from google.cloud.bigtable.enums import RoutingPolicyType from google.api_core.exceptions import NotFound @@ -431,13 +431,15 @@ def list_tables(self): return result - def create_app_profile(self, app_profile_id, routing_policy_type, - description=None, ignore_warnings=None, - cluster_id=None, allow_transactional_writes=False): - """Creates an app profile within an instance. + def app_profile(self, app_profile_id, + routing_policy_type=None, + description=None, cluster_id=None, + allow_transactional_writes=None): + """Factory to create AppProfile associated with this instance. - :type: app_profile_id: str - :param app_profile_id: The unique name for the new app profile. + :type app_profile_id: str + :param app_profile_id: The ID of the AppProfile. Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type: routing_policy_type: int :param: routing_policy_type: The type of the routing policy. @@ -448,11 +450,7 @@ def create_app_profile(self, app_profile_id, routing_policy_type, :type: description: str :param: description: (Optional) Long form description of the use - case for this AppProfile. - - :type: ignore_warnings: bool - :param: ignore_warnings: (Optional) If true, ignore safety checks when - creating the app profile. + case for this AppProfile. :type: cluster_id: str :param: cluster_id: (Optional) Unique cluster_id which is only required @@ -464,160 +462,22 @@ def create_app_profile(self, app_profile_id, routing_policy_type, transactional writes for ROUTING_POLICY_TYPE_SINGLE. - :rtype: :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - :return: The AppProfile instance. - :raises: :class:`ValueError ` If routing - policy is not set. + :rtype: :class:`~google.cloud.bigtable.app_profile.AppProfile>` + :returns: AppProfile for this instance. """ - if not routing_policy_type: - raise ValueError('AppProfile required routing policy.') - - single_cluster_routing = None - multi_cluster_routing_use_any = None - instance_admin_client = self._client._instance_admin_client - name = instance_admin_client.app_profile_path( - self._client.project, self.instance_id, app_profile_id) - - if routing_policy_type == RoutingPolicyType.ANY: - multi_cluster_routing_use_any = ( - instance_pb2.AppProfile.MultiClusterRoutingUseAny()) - - if routing_policy_type == RoutingPolicyType.SINGLE: - single_cluster_routing = ( - instance_pb2.AppProfile.SingleClusterRouting( - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes - )) - - app_profile = instance_pb2.AppProfile( - name=name, description=description, - multi_cluster_routing_use_any=multi_cluster_routing_use_any, - single_cluster_routing=single_cluster_routing - ) - - return self._client._instance_admin_client.create_app_profile( - parent=self.name, app_profile_id=app_profile_id, - app_profile=app_profile, ignore_warnings=ignore_warnings) - - def get_app_profile(self, app_profile_id): - """Gets information about an app profile. - - :type: app_profile_id: str - :param app_profile_id: The unique name for the app profile. - - :rtype: :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - :return: The AppProfile instance. - """ - instance_admin_client = self._client._instance_admin_client - name = instance_admin_client.app_profile_path( - self._client.project, self.instance_id, app_profile_id) - return self._client._instance_admin_client.get_app_profile(name) + return AppProfile( + app_profile_id, self, routing_policy_type=routing_policy_type, + description=description, cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes) def list_app_profiles(self): - """Lists information about app profiles in an instance. - - :rtype: :list:[`~google.cloud.bigtable_admin_v2.types.AppProfile`] - :return: A :list:[`~google.cloud.bigtable_admin_v2.types.AppProfile`]. - By default, this is a list of - :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - instances. - """ - list_app_profiles = list( - self._client._instance_admin_client.list_app_profiles(self.name)) - return list_app_profiles - - def update_app_profile(self, app_profile_id, - routing_policy_type, description=None, - ignore_warnings=None, - cluster_id=None, - allow_transactional_writes=False): - """Updates an app profile within an instance. - - :type: app_profile_id: str - :param app_profile_id: The unique name for the new app profile. - - :type: update_mask: list - :param: update_mask: Name of the parameters of AppProfiles that - needed to update. + """Lists information about AppProfiles in an instance. - :type: routing_policy_type: int - :param: routing_policy_type: The type of the routing policy. - Possible values are represented - by the following constants: - :data:`google.cloud.bigtable.enums.RoutingPolicyType.ANY` - :data:`google.cloud.bigtable.enums.RoutingPolicyType.SINGLE` - - :type: description: str - :param: description: (Optional) Optional long form description of the - use case for this AppProfile. - - :type: ignore_warnings: bool - :param: ignore_warnings: (Optional) If true, ignore safety checks when - creating the app profile. - - :type: cluster_id: str - :param: cluster_id: (Optional) Unique cluster_id which is only required - when routing_policy_type is - ROUTING_POLICY_TYPE_SINGLE. - - :type: allow_transactional_writes: bool - :param: allow_transactional_writes: (Optional) If true, allow - transactional writes for - ROUTING_POLICY_TYPE_SINGLE. - - :rtype: :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - :return: The AppProfile instance. - :raises: :class:`ValueError ` If routing - policy is not set. - """ - if not routing_policy_type: - raise ValueError('AppProfile required routing policy.') - - update_mask_pb = field_mask_pb2.FieldMask() - single_cluster_routing = None - multi_cluster_routing_use_any = None - instance_admin_client = self._client._instance_admin_client - name = instance_admin_client.app_profile_path( - self._client.project, self.instance_id, app_profile_id) - - if description is not None: - update_mask_pb.paths.append('description') - - if routing_policy_type == RoutingPolicyType.ANY: - multi_cluster_routing_use_any = ( - instance_pb2.AppProfile.MultiClusterRoutingUseAny()) - update_mask_pb.paths.append('multi_cluster_routing_use_any') - - if routing_policy_type == RoutingPolicyType.SINGLE: - single_cluster_routing = ( - instance_pb2.AppProfile.SingleClusterRouting( - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes - )) - update_mask_pb.paths.append('single_cluster_routing') - - update_app_profile_pb = instance_pb2.AppProfile( - name=name, description=description, - multi_cluster_routing_use_any=multi_cluster_routing_use_any, - single_cluster_routing=single_cluster_routing - ) - return self._client._instance_admin_client.update_app_profile( - app_profile=update_app_profile_pb, update_mask=update_mask_pb, - ignore_warnings=ignore_warnings) - - def delete_app_profile(self, app_profile_id, ignore_warnings=False): - """Deletes an app profile from an instance. - - :type: app_profile_id: str - :param app_profile_id: The unique name for the app profile to delete. - - :raises: google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. google.api_core.exceptions.RetryError: - If the request failed due to a retryable error and retry - attempts failed. ValueError: If the parameters are invalid. + :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] + :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. + By default, this is a list of + :class:`~google.cloud.bigtable.app_profile.AppProfile` + instances. """ - instance_admin_client = self._client._instance_admin_client - app_profile_path = instance_admin_client.app_profile_path( - self._client.project, self.instance_id, app_profile_id) - self._client._instance_admin_client.delete_app_profile( - app_profile_path, ignore_warnings) + resp = self._client._instance_admin_client.list_app_profiles(self.name) + return [AppProfile.from_pb(app_profile, self) for app_profile in resp] diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 4830655557b2..64cc6b94c105 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -53,8 +53,6 @@ CELL_VAL4 = b'foo' ROW_KEY = b'row-key' ROW_KEY_ALT = b'row-key-alt' -ROUTING_POLICY_TYPE_ANY = 1 -ROUTING_POLICY_TYPE_SINGLE = 2 EXISTING_INSTANCES = [] LABEL_KEY = u'python-system' label_stamp = datetime.datetime.utcnow() \ @@ -276,6 +274,89 @@ def test_create_instance_w_two_clusters(self): alt_cluster_2.name, Config.CLUSTER.name}.issubset(found)) + # Test create app profile with multi_cluster_routing policy + app_profiles_to_delete = [] + description = 'routing policy-multy' + app_profile_id_1 = 'app_profile_id_1' + routing = enums.RoutingPolicyType.ANY + self._test_create_app_profile_helper( + app_profile_id_1, instance, + routing_policy_type=routing, + description=description, + ignore_warnings=True + ) + app_profiles_to_delete.append(app_profile_id_1) + + # Test list app profiles + self._test_list_app_profiles_helper(instance, [app_profile_id_1]) + + # Test modify app profile app_profile_id_1 + # routing policy to single cluster policy, + # cluster -> ALT_CLUSTER_ID_1, + # allow_transactional_writes -> disallowed + # modify description + description = 'to routing policy-single' + routing = enums.RoutingPolicyType.SINGLE + self._test_modify_app_profile_helper( + app_profile_id_1, instance, + routing_policy_type=routing, + description=description, cluster_id=ALT_CLUSTER_ID_1, + allow_transactional_writes=False) + + # Test modify app profile app_profile_id_1 + # cluster -> ALT_CLUSTER_ID_2, + # allow_transactional_writes -> allowed + self._test_modify_app_profile_helper( + app_profile_id_1, instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True, + ignore_warnings=True) + + # Test create app profile with single cluster routing policy + description = 'routing policy-single' + app_profile_id_2 = 'app_profile_id_2' + routing = enums.RoutingPolicyType.SINGLE + self._test_create_app_profile_helper( + app_profile_id_2, instance, + routing_policy_type=routing, + description=description, cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=False) + app_profiles_to_delete.append(app_profile_id_2) + + # Test list app profiles + self._test_list_app_profiles_helper( + instance, [app_profile_id_1, app_profile_id_2]) + + # Test modify app profile app_profile_id_2 to + # allow transactional writes + # Note: no need to set ``ignore_warnings`` to True + # since we are not restrictings anything with this modification. + self._test_modify_app_profile_helper( + app_profile_id_2, instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True) + + # Test modify app profile app_profile_id_2 routing policy + # to multi_cluster_routing policy + # modify description + description = 'to routing policy-multy' + routing = enums.RoutingPolicyType.ANY + self._test_modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + allow_transactional_writes=False, + ignore_warnings=True) + + # Test delete app profiles + for app_profile_id in app_profiles_to_delete: + self._test_delete_app_profile_helper(app_profile_id, instance) + def test_update_display_name_and_labels(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name NEW_DISPLAY_NAME = 'Foo Bar Baz' @@ -339,133 +420,6 @@ def test_update_type(self): instance_alt.reload() self.assertEqual(instance_alt.type_, _PRODUCTION) - def test_create_app_profile_with_multi_routing_policy(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - description = 'Foo App Profile' - instance = Config.INSTANCE - ignore_warnings = True - app_profile_id = 'app_profile_id_1' - - app_profile = instance.create_app_profile( - app_profile_id=app_profile_id, - routing_policy_type=ROUTING_POLICY_TYPE_ANY, - description=description, - ignore_warnings=ignore_warnings - ) - - # Load a different app_profile objec form the server and - # verrify that it is the same - alt_app_profile = instance.get_app_profile(app_profile_id) - self.assertEqual(app_profile, alt_app_profile) - - # Modify existing app_profile to singly routing policy and confirm - new_description = 'To single routing policy' - allow_transactional_writes = False - operation = instance.update_app_profile( - app_profile_id=app_profile_id, - routing_policy_type=ROUTING_POLICY_TYPE_SINGLE, - description=new_description, - cluster_id=CLUSTER_ID, - allow_transactional_writes=allow_transactional_writes) - operation.result(timeout=10) - - alt_app_profile = instance.get_app_profile(app_profile_id) - self.assertEqual(alt_app_profile.description, new_description) - self.assertIsInstance( - alt_app_profile.single_cluster_routing, - instance_pb2.AppProfile.SingleClusterRouting) - self.assertEqual( - alt_app_profile.single_cluster_routing.cluster_id, CLUSTER_ID) - self.assertEqual( - alt_app_profile.single_cluster_routing.allow_transactional_writes, - allow_transactional_writes) - - # Delete app_profile - instance.delete_app_profile(app_profile_id=app_profile_id, - ignore_warnings=ignore_warnings) - self.assertFalse(self._app_profile_exists(app_profile_id)) - - def test_create_app_profile_with_single_routing_policy(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 - - description = 'Foo App Profile' - instance = Config.INSTANCE - ignore_warnings = True - app_profile_id = 'app_profile_id_2' - - app_profile = instance.create_app_profile( - app_profile_id=app_profile_id, - routing_policy_type=ROUTING_POLICY_TYPE_SINGLE, - description=description, - cluster_id=CLUSTER_ID, - ) - - # Load a different app_profile objec form the server and - # verrify that it is the same - alt_app_profile = instance.get_app_profile(app_profile_id) - self.assertEqual(app_profile, alt_app_profile) - - # Modify existing app_profile to allow_transactional_writes - new_description = 'Allow transactional writes' - allow_transactional_writes = True - # Note: Do not need to ignore warnings when switching - # to allow transactional writes. - # Do need to set ignore_warnings to True, when switching to - # disallow the transactional writes. - operation = instance.update_app_profile( - app_profile_id=app_profile_id, - routing_policy_type=ROUTING_POLICY_TYPE_SINGLE, - description=new_description, - cluster_id=CLUSTER_ID, - allow_transactional_writes=allow_transactional_writes) - operation.result(timeout=10) - - alt_app_profile = instance.get_app_profile(app_profile_id) - self.assertEqual(alt_app_profile.description, new_description) - self.assertEqual( - alt_app_profile.single_cluster_routing.allow_transactional_writes, - allow_transactional_writes) - - # Modify existing app_proflie to multi cluster routing - new_description = 'To multi cluster routing' - operation = instance.update_app_profile( - app_profile_id=app_profile_id, - routing_policy_type=ROUTING_POLICY_TYPE_ANY, - description=new_description, - ignore_warnings=ignore_warnings) - operation.result(timeout=10) - - alt_app_profile = instance.get_app_profile(app_profile_id) - self.assertEqual(alt_app_profile.description, new_description) - self.assertIsInstance( - alt_app_profile.multi_cluster_routing_use_any, - instance_pb2.AppProfile.MultiClusterRoutingUseAny) - - def _app_profile_exists(self, app_profile_id): - from google.api_core import exceptions - try: - Config.INSTANCE.get_app_profile(app_profile_id) - except exceptions.NotFound: - return False - else: - return True - - def test_reload_cluster(self): - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - # Use same arguments as Config.INSTANCE.cluster - # (created in `setUpModule`) so we can use reload() - # on a fresh cluster. - cluster = Config.INSTANCE.cluster(CLUSTER_ID) - - cluster.reload() - self.assertEqual(cluster.location_id, LOCATION_ID) - self.assertEqual(cluster.state, Cluster.State.READY) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - # Make sure that by default an StorageType.SSD storage is used. - self.assertEqual(cluster.default_storage_type, StorageType.SSD) - def test_update_cluster(self): NEW_SERVE_NODES = 4 @@ -521,6 +475,76 @@ def test_create_cluster(self): cluster_2.delete() self.assertFalse(cluster_2.exists()) + def _test_create_app_profile_helper( + self, app_profile_id, instance, routing_policy_type, + description=None, cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None): + + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes + ) + self.assertEqual(app_profile.allow_transactional_writes, + allow_transactional_writes) + + app_profile = app_profile.create(ignore_warnings=ignore_warnings) + + # Load a different app_profile objec form the server and + # verrify that it is the same + alt_app_profile = instance.app_profile(app_profile_id) + alt_app_profile.reload() + + self.assertEqual(app_profile.app_profile_id, + alt_app_profile.app_profile_id) + self.assertEqual(app_profile.routing_policy_type, + routing_policy_type) + self.assertEqual(alt_app_profile.routing_policy_type, + routing_policy_type) + self.assertEqual(app_profile.description, + alt_app_profile.description) + self.assertFalse(app_profile.allow_transactional_writes) + self.assertFalse(alt_app_profile.allow_transactional_writes) + + def _test_list_app_profiles_helper(self, instance, app_profile_ids): + app_profiles = instance.list_app_profiles() + found = [app_prof.app_profile_id for app_prof in app_profiles] + for app_profile_id in app_profile_ids: + self.assertTrue(app_profile_id in found) + + def _test_modify_app_profile_helper( + self, app_profile_id, instance, routing_policy_type, + description=None, cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None): + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes) + + operation = app_profile.update(ignore_warnings) + operation.result(timeout=10) + + alt_app_profile = instance.app_profile(app_profile_id) + alt_app_profile.reload() + self.assertEqual(alt_app_profile.description, description) + self.assertEqual(alt_app_profile.routing_policy_type, + routing_policy_type) + self.assertEqual(alt_app_profile.cluster_id, cluster_id) + self.assertEqual(alt_app_profile.allow_transactional_writes, + allow_transactional_writes) + + def _test_delete_app_profile_helper(self, app_profile_id, instance): + app_profile = instance.app_profile(app_profile_id) + self.assertTrue(app_profile.exists()) + app_profile.delete(ignore_warnings=True) + self.assertFalse(app_profile.exists()) + class TestTableAdminAPI(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py new file mode 100644 index 000000000000..378cefb431e8 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -0,0 +1,661 @@ +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import mock + +from ._testing import _make_credentials + + +class MultiCallableStub(object): + """Stub for the grpc.UnaryUnaryMultiCallable interface.""" + + def __init__(self, method, channel_stub): + self.method = method + self.channel_stub = channel_stub + + def __call__(self, request, timeout=None, metadata=None, credentials=None): + self.channel_stub.requests.append((self.method, request)) + return self.channel_stub.responses.pop() + + +class ChannelStub(object): + """Stub for the grpc.Channel interface.""" + + def __init__(self, responses=[]): + self.responses = responses + self.requests = [] + + def unary_unary(self, + method, + request_serializer=None, + response_deserializer=None): + return MultiCallableStub(method, self) + + +class TestAppProfile(unittest.TestCase): + + PROJECT = 'project' + INSTANCE_ID = 'instance-id' + APP_PROFILE_ID = 'app-profile-id' + APP_PROFILE_NAME = ('projects/{}/instances/{}/appProfiles/{}' + .format(PROJECT, INSTANCE_ID, APP_PROFILE_ID)) + CLUSTER_ID = 'cluster-id' + OP_ID = 8765 + OP_NAME = ( + 'operations/projects/{}/instances/{}/appProfiles/{}/operations/{}' + .format(PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID)) + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.app_profile import AppProfile + + return AppProfile + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + + def _make_client(self, *args, **kwargs): + return self._get_target_client_class()(*args, **kwargs) + + def test_constructor_defaults(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + app_profile = self._make_one(self.APP_PROFILE_ID, instance) + self.assertIsInstance(app_profile, self._get_target_class()) + self.assertEqual(app_profile._instance, instance) + self.assertIsNone(app_profile.routing_policy_type) + self.assertIsNone(app_profile.description) + self.assertIsNone(app_profile.cluster_id) + self.assertIsNone(app_profile.allow_transactional_writes) + + def test_constructor_non_defaults(self): + from google.cloud.bigtable.enums import RoutingPolicyType + + ANY = RoutingPolicyType.ANY + DESCRIPTION_1 = 'routing policy any' + APP_PROFILE_ID_2 = 'app-profile-id-2' + SINGLE = RoutingPolicyType.SINGLE + DESCRIPTION_2 = 'routing policy single' + ALLOW_WRITES = True + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + app_profile1 = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=ANY, + description=DESCRIPTION_1) + app_profile2 = self._make_one(APP_PROFILE_ID_2, instance, + routing_policy_type=SINGLE, + description=DESCRIPTION_2, + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=ALLOW_WRITES) + self.assertEqual(app_profile1.app_profile_id, self.APP_PROFILE_ID) + self.assertIs(app_profile1._instance, instance) + self.assertEqual(app_profile1.routing_policy_type, ANY) + self.assertEqual(app_profile1.description, DESCRIPTION_1) + self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2) + self.assertIs(app_profile2._instance, instance) + self.assertEqual(app_profile2.routing_policy_type, SINGLE) + self.assertEqual(app_profile2.description, DESCRIPTION_2) + self.assertEqual(app_profile2.cluster_id, self.CLUSTER_ID) + self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) + + def test_name_property(self): + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = _Instance(self.INSTANCE_ID, client) + + app_profile = self._make_one(self.APP_PROFILE_ID, instance) + self.assertEqual(app_profile.name, self.APP_PROFILE_NAME) + + def test___eq__(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) + app_profile2 = self._make_one(self.APP_PROFILE_ID, instance) + self.assertTrue(app_profile1 == app_profile2) + + def test___eq__type_instance_differ(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + alt_instance = _Instance('other-instance', client) + other_object = _Other(self.APP_PROFILE_ID, instance) + app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) + app_profile2 = self._make_one(self.APP_PROFILE_ID, alt_instance) + self.assertFalse(app_profile1 == other_object) + self.assertFalse(app_profile1 == app_profile2) + + def test___ne__same_value(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) + app_profile2 = self._make_one(self.APP_PROFILE_ID, instance) + self.assertFalse(app_profile1 != app_profile2) + + def test___ne__(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + app_profile1 = self._make_one('app_profile_id1', instance) + app_profile2 = self._make_one('app_profile_id2', instance) + self.assertTrue(app_profile1 != app_profile2) + + def test_from_pb_success_routing_any(self): + from google.cloud.bigtable_admin_v2.types import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + desctiption = 'routing any' + routing = RoutingPolicyType.ANY + multi_cluster_routing_use_any = ( + data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()) + + app_profile_pb = data_v2_pb2.AppProfile( + name=self.APP_PROFILE_NAME, + description=desctiption, + multi_cluster_routing_use_any=multi_cluster_routing_use_any) + + klass = self._get_target_class() + app_profile = klass.from_pb(app_profile_pb, instance) + self.assertIsInstance(app_profile, klass) + self.assertIs(app_profile._instance, instance) + self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID) + self.assertEqual(app_profile.description, desctiption) + self.assertEqual(app_profile.routing_policy_type, routing) + self.assertIsNone(app_profile.cluster_id) + self.assertEqual(app_profile.allow_transactional_writes, False) + + def test_from_pb_success_routing_single(self): + from google.cloud.bigtable_admin_v2.types import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType + + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + desctiption = 'routing single' + allow_transactional_writes = True + routing = RoutingPolicyType.SINGLE + single_cluster_routing = ( + data_v2_pb2.AppProfile.SingleClusterRouting( + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=allow_transactional_writes)) + + app_profile_pb = data_v2_pb2.AppProfile( + name=self.APP_PROFILE_NAME, + description=desctiption, + single_cluster_routing=single_cluster_routing) + + klass = self._get_target_class() + app_profile = klass.from_pb(app_profile_pb, instance) + self.assertIsInstance(app_profile, klass) + self.assertIs(app_profile._instance, instance) + self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID) + self.assertEqual(app_profile.description, desctiption) + self.assertEqual(app_profile.routing_policy_type, routing) + self.assertEqual(app_profile.cluster_id, self.CLUSTER_ID) + self.assertEqual(app_profile.allow_transactional_writes, + allow_transactional_writes) + + def test_from_pb_bad_app_profile_name(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + + bad_app_profile_name = 'BAD_NAME' + + app_profile_pb = data_v2_pb2.AppProfile(name=bad_app_profile_name) + + klass = self._get_target_class() + with self.assertRaises(ValueError): + klass.from_pb(app_profile_pb, None) + + def test_from_pb_instance_id_mistmatch(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + + ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + client = _Client(self.PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) + self.assertEqual(instance.instance_id, ALT_INSTANCE_ID) + + app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) + + klass = self._get_target_class() + with self.assertRaises(ValueError): + klass.from_pb(app_profile_pb, instance) + + def test_from_pb_project_mistmatch(self): + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + + ALT_PROJECT = 'ALT_PROJECT' + client = _Client(project=ALT_PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + self.assertEqual(client.project, ALT_PROJECT) + + app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) + + klass = self._get_target_class() + with self.assertRaises(ValueError): + klass.from_pb(app_profile_pb, instance) + + def test_reload_routing_any(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType + + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = _Instance(self.INSTANCE_ID, client) + + routing = RoutingPolicyType.ANY + description = 'routing policy any' + + app_profile = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=routing, + description=description) + + # Create response_pb + description_from_server = 'routing policy switched to single' + cluster_id_from_server = self.CLUSTER_ID + allow_transactional_writes = True + single_cluster_routing = ( + data_v2_pb2.AppProfile.SingleClusterRouting( + cluster_id=cluster_id_from_server, + allow_transactional_writes=allow_transactional_writes)) + + response_pb = data_v2_pb2.AppProfile( + name=app_profile.name, + single_cluster_routing=single_cluster_routing, + description=description_from_server) + + # Patch the stub used by the API method. + client._instance_admin_client = api + instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + instance_stub.GetCluster.side_effect = [response_pb] + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check app_profile config values before. + self.assertEqual(app_profile.routing_policy_type, routing) + self.assertEqual(app_profile.description, description) + self.assertIsNone(app_profile.cluster_id) + self.assertIsNone(app_profile.allow_transactional_writes) + + # Perform the method and check the result. + result = app_profile.reload() + self.assertEqual(result, expected_result) + self.assertEqual(app_profile.routing_policy_type, + RoutingPolicyType.SINGLE) + self.assertEqual(app_profile.description, description_from_server) + self.assertEqual(app_profile.cluster_id, cluster_id_from_server) + self.assertEqual(app_profile.allow_transactional_writes, + allow_transactional_writes) + + def test_exists(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.api_core import exceptions + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + + # Create response_pb + response_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) + client._instance_admin_client = instance_api + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + instance_stub.GetCluster.side_effect = [ + response_pb, + exceptions.NotFound('testing'), + exceptions.BadRequest('testing'), + ] + + # Perform the method and check the result. + non_existing_app_profile_id = 'other-app-profile-id' + app_profile = self._make_one(self.APP_PROFILE_ID, instance) + alt_app_profile = self._make_one(non_existing_app_profile_id, instance) + self.assertTrue(app_profile.exists()) + self.assertFalse(alt_app_profile.exists()) + with self.assertRaises(exceptions.BadRequest): + alt_app_profile.exists() + + def test_create_routing_any(self): + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + + routing = RoutingPolicyType.ANY + description = 'routing policy any' + ignore_warnings = True + + app_profile = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=routing, + description=description) + expected_request_app_profile = app_profile._to_pb() + expected_request = messages_v2_pb2.CreateAppProfileRequest( + parent=instance.name, app_profile_id=self.APP_PROFILE_ID, + app_profile=expected_request_app_profile, + ignore_warnings=ignore_warnings + ) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[expected_request_app_profile]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api + # Perform the method and check the result. + result = app_profile.create(ignore_warnings) + actual_request = channel.requests[0][1] + + self.assertEqual(actual_request, expected_request) + self.assertIsInstance(result, self._get_target_class()) + self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID) + self.assertIs(result._instance, instance) + self.assertEqual(result.routing_policy_type, routing) + self.assertEqual(result.description, description) + self.assertEqual(result.allow_transactional_writes, False) + self.assertIsNone(result.cluster_id) + + def test_create_routing_single(self): + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + + routing = RoutingPolicyType.SINGLE + description = 'routing policy single' + allow_writes = False + ignore_warnings = True + + app_profile = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=routing, + description=description, + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=allow_writes) + expected_request_app_profile = app_profile._to_pb() + expected_request = messages_v2_pb2.CreateAppProfileRequest( + parent=instance.name, app_profile_id=self.APP_PROFILE_ID, + app_profile=expected_request_app_profile, + ignore_warnings=ignore_warnings + ) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[expected_request_app_profile]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api + # Perform the method and check the result. + result = app_profile.create(ignore_warnings) + actual_request = channel.requests[0][1] + + self.assertEqual(actual_request, expected_request) + self.assertIsInstance(result, self._get_target_class()) + self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID) + self.assertIs(result._instance, instance) + self.assertEqual(result.routing_policy_type, routing) + self.assertEqual(result.description, description) + self.assertEqual(result.allow_transactional_writes, allow_writes) + self.assertEqual(result.cluster_id, self.CLUSTER_ID) + + def test_create_app_profile_with_wrong_routing_policy(self): + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + app_profile = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=None) + with self.assertRaises(ValueError): + app_profile.create() + + def test_update_app_profile_routing_any(self): + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.protobuf import field_mask_pb2 + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + + routing = RoutingPolicyType.SINGLE + description = 'to routing policy single' + allow_writes = True + app_profile = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=routing, + description=description, + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=allow_writes) + + # Create response_pb + metadata = messages_v2_pb2.UpdateAppProfileMetadata() + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString(), + ) + ) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + # Mock api calls + client._instance_admin_client = instance_api + + # Perform the method and check the result. + ignore_warnings = True + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=['description', 'single_cluster_routing'] + ) + expected_request = messages_v2_pb2.UpdateAppProfileRequest( + app_profile=app_profile._to_pb(), + update_mask=expected_request_update_mask, + ignore_warnings=ignore_warnings + ) + + result = app_profile.update(ignore_warnings=ignore_warnings) + actual_request = channel.requests[0][1] + + self.assertEqual(actual_request, expected_request) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.UpdateAppProfileMetadata) + + def test_update_app_profile_routing_single(self): + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable.enums import RoutingPolicyType + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.protobuf import field_mask_pb2 + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + + routing = RoutingPolicyType.ANY + app_profile = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=routing) + + # Create response_pb + metadata = messages_v2_pb2.UpdateAppProfileMetadata() + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString(), + ) + ) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[response_pb]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + # Mock api calls + client._instance_admin_client = instance_api + + # Perform the method and check the result. + ignore_warnings = True + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=['multi_cluster_routing_use_any'] + ) + expected_request = messages_v2_pb2.UpdateAppProfileRequest( + app_profile=app_profile._to_pb(), + update_mask=expected_request_update_mask, + ignore_warnings=ignore_warnings + ) + + result = app_profile.update(ignore_warnings=ignore_warnings) + actual_request = channel.requests[0][1] + + self.assertEqual(actual_request, expected_request) + self.assertIsInstance(result, operation.Operation) + self.assertEqual(result.operation.name, self.OP_NAME) + self.assertIsInstance(result.metadata, + messages_v2_pb2.UpdateAppProfileMetadata) + + def test_update_app_profile_with_wrong_routing_policy(self): + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + app_profile = self._make_one(self.APP_PROFILE_ID, instance, + routing_policy_type=None) + with self.assertRaises(ValueError): + app_profile.update() + + def test_delete(self): + from google.protobuf import empty_pb2 + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock())) + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = client.instance(self.INSTANCE_ID) + app_profile = self._make_one(self.APP_PROFILE_ID, instance) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_stub = ( + client._instance_admin_client.bigtable_instance_admin_stub) + instance_stub.DeleteCluster.side_effect = [response_pb] + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = app_profile.delete() + + self.assertEqual(result, expected_result) + + +class _Client(object): + + def __init__(self, project): + self.project = project + self.project_name = 'projects/' + self.project + self._operations_stub = mock.sentinel.operations_stub + + def __eq__(self, other): + return (other.project == self.project and + other.project_name == self.project_name) + + +class _Instance(object): + + def __init__(self, instance_id, client): + self.instance_id = instance_id + self._client = client + + def __eq__(self, other): + return (other.instance_id == self.instance_id and + other._client == self._client) + + +class _Other(object): + + def __init__(self, app_profile_id, instance): + self.app_profile_id = app_profile_id + self._instance = instance diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 23bcdab4602e..c31d09ad163f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -799,173 +799,52 @@ def test_list_tables_failure_name_bad_before(self): with self.assertRaises(ValueError): self._list_tables_helper(table_name=BAD_TABLE_NAME) - def test_create_app_profile_with_wrong_routing_policy(self): - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - app_profile_id = 'appProfileId1262094415' - - # Create AppProfile with exception - with self.assertRaises(ValueError): - instance.create_app_profile(app_profile_id=app_profile_id, - routing_policy_type=None) - - with self.assertRaises(ValueError): - instance.update_app_profile(app_profile_id, - routing_policy_type=None) - - def test_create_app_profile_with_multi_routing_policy(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + def test_app_profile_factory(self): from google.cloud.bigtable.enums import RoutingPolicyType - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) + APP_PROFILE_ID_1 = 'app-profile-id-1' + ANY = RoutingPolicyType.ANY + DESCRIPTION_1 = 'routing policy any' + APP_PROFILE_ID_2 = 'app-profile-id-2' + SINGLE = RoutingPolicyType.SINGLE + DESCRIPTION_2 = 'routing policy single' + ALLOW_WRITES = True + CLUSTER_ID = 'cluster-id' - description = 'description-1724546052' - app_profile_id = 'appProfileId1262094415' - expected_response = { - 'name': self.APP_PROFILE_PATH + app_profile_id, - 'description': description, - 'multi_cluster_routing_use_any': - instance_pb2.AppProfile.MultiClusterRoutingUseAny() - } - expected_request = { - 'app_profile_id': app_profile_id, - 'routing_policy_type': RoutingPolicyType.ANY, - 'description': description - } - expected_response = instance_pb2.AppProfile(**expected_response) - - channel = ChannelStub(responses=[expected_response]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - - # Perform the method and check the result. - result = instance.create_app_profile(**expected_request) + instance = self._make_one(self.INSTANCE_ID, None) - parent = client._instance_admin_client.instance_path( - self.PROJECT, self.INSTANCE_ID) - expected_request = _CreateAppProfileRequestPB( - parent=parent, app_profile_id=app_profile_id, - app_profile=expected_response, + app_profile1 = instance.app_profile( + APP_PROFILE_ID_1, + routing_policy_type=ANY, + description=DESCRIPTION_1, ) - actual_request = channel.requests[0][1] - self.assertEqual(expected_request, actual_request) - self.assertEqual(result, expected_response) - - def test_create_app_profile_with_single_routing_policy(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable.enums import RoutingPolicyType - - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - description = 'description-1724546052' - app_profile_id = 'appProfileId1262094415' - cluster_id = 'cluster-id' - expected_response = { - 'name': self.APP_PROFILE_PATH + app_profile_id, - 'description': description, - 'single_cluster_routing': - instance_pb2.AppProfile.SingleClusterRouting( - cluster_id=cluster_id, - allow_transactional_writes=False - ) - } - expected_request = { - 'app_profile_id': app_profile_id, - 'routing_policy_type': RoutingPolicyType.SINGLE, - 'description': description, - 'cluster_id': cluster_id - } - expected_response = instance_pb2.AppProfile(**expected_response) - - channel = ChannelStub(responses=[expected_response]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - - # Perform the method and check the result. - result = instance.create_app_profile(**expected_request) - - parent = client._instance_admin_client.instance_path( - self.PROJECT, self.INSTANCE_ID) - expected_request = _CreateAppProfileRequestPB( - parent=parent, app_profile_id=app_profile_id, - app_profile=expected_response, + app_profile2 = instance.app_profile( + APP_PROFILE_ID_2, + routing_policy_type=SINGLE, + description=DESCRIPTION_2, + cluster_id=CLUSTER_ID, + allow_transactional_writes=ALLOW_WRITES, ) - - actual_request = channel.requests[0][1] - self.assertEqual(expected_request, actual_request) - self.assertEqual(result, expected_response) - - def test_get_app_profile(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as instance_data_v2_pb2) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) - - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - name = 'name3373707' - etag = 'etag3123477' - description = 'description-1724546052' - expected_response = { - 'name': name, - 'etag': etag, - 'description': description - } - expected_response = instance_data_v2_pb2.AppProfile( - **expected_response) - - response_pb = instance_data_v2_pb2.AppProfile( - name=name, - etag=etag, - description=description - ) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - bigtable_instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) - bigtable_instance_stub.GetAppProfile.side_effect = [response_pb] - - # Perform the method and check the result. - app_profile_id = 'appProfileId1262094415' - result = instance.get_app_profile(app_profile_id=app_profile_id) - - self.assertEqual(result, expected_response) + self.assertEqual(app_profile1.app_profile_id, APP_PROFILE_ID_1) + self.assertIs(app_profile1._instance, instance) + self.assertEqual(app_profile1.routing_policy_type, ANY) + self.assertEqual(app_profile1.description, DESCRIPTION_1) + self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2) + self.assertIs(app_profile2._instance, instance) + self.assertEqual(app_profile2.routing_policy_type, SINGLE) + self.assertEqual(app_profile2.description, DESCRIPTION_2) + self.assertEqual(app_profile2.cluster_id, CLUSTER_ID) + self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) def test_list_app_profiles(self): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_messages_v1_pb2) from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.app_profile import AppProfile instance_api = ( bigtable_instance_admin_client.BigtableInstanceAdminClient( @@ -978,14 +857,27 @@ def test_list_app_profiles(self): # Setup Expected Response next_page_token = '' - app_profiles_element = {} - app_profiles = [app_profiles_element] - expected_response = { - 'next_page_token': next_page_token, - 'app_profiles': app_profiles - } - expected_response = instance_messages_v1_pb2.ListAppProfilesResponse( - **expected_response) + app_profile_id1 = 'app-profile-id1' + app_profile_id2 = 'app-profile-id2' + app_profile_name1 = (client.instance_admin_client.app_profile_path( + self.PROJECT, self.INSTANCE_ID, app_profile_id1)) + app_profile_name2 = (client.instance_admin_client.app_profile_path( + self.PROJECT, self.INSTANCE_ID, app_profile_id2)) + routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() + + expected_response = messages_v2_pb2.ListAppProfilesResponse( + next_page_token=next_page_token, + app_profiles=[ + data_v2_pb2.AppProfile( + name=app_profile_name1, + multi_cluster_routing_use_any=routing_policy, + ), + data_v2_pb2.AppProfile( + name=app_profile_name2, + multi_cluster_routing_use_any=routing_policy, + ) + ], + ) # Patch the stub used by the API method. client._instance_admin_client = instance_api @@ -995,180 +887,15 @@ def test_list_app_profiles(self): expected_response] # Perform the method and check the result. - response = instance.list_app_profiles() + app_profiles = instance.list_app_profiles() - self.assertEqual(response[0], expected_response.app_profiles[0]) + app_profile_1, app_profile_2 = app_profiles - def test_update_app_profile_multi_cluster_routing_policy(self): - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2) - from google.cloud.bigtable.enums import RoutingPolicyType + self.assertIsInstance(app_profile_1, AppProfile) + self.assertEqual(app_profile_1.name, app_profile_name1) - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - # Create response_pb - metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - # Mock api calls - client._instance_admin_client = instance_api - - # Perform the method and check the result. - description = 'description-1724546052' - app_profile_id = 'appProfileId1262094415' - ignore_warnings = True - multi_cluster_routing_use_any = ( - instance_pb2.AppProfile.MultiClusterRoutingUseAny()) - expected_request_app_profile = instance_pb2.AppProfile( - name=self.APP_PROFILE_PATH + app_profile_id, - description=description, - multi_cluster_routing_use_any=multi_cluster_routing_use_any - ) - expected_request_update_mask = field_mask_pb2.FieldMask( - paths=['description', 'multi_cluster_routing_use_any'] - ) - expected_request = instance_v2_pb2.UpdateAppProfileRequest( - app_profile=expected_request_app_profile, - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings - ) - - result = instance.update_app_profile(app_profile_id, - RoutingPolicyType.ANY, - description=description, - ignore_warnings=ignore_warnings) - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.UpdateAppProfileMetadata) - - def test_update_app_profile_single_routing_policy(self): - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2) - from google.cloud.bigtable.enums import RoutingPolicyType - - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - # Create response_pb - metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - # Mock api calls - client._instance_admin_client = instance_api - - # Perform the method and check the result. - app_profile_id = 'appProfileId1262094415' - cluster_id = 'cluster-id' - allow_transactional_writes = True - ignore_warnings = True - single_cluster_routing = ( - instance_pb2.AppProfile.SingleClusterRouting( - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes - )) - expected_request_app_profile = instance_pb2.AppProfile( - name=self.APP_PROFILE_PATH + app_profile_id, - single_cluster_routing=single_cluster_routing - ) - expected_request_update_mask = field_mask_pb2.FieldMask( - paths=['single_cluster_routing'] - ) - expected_request = instance_v2_pb2.UpdateAppProfileRequest( - app_profile=expected_request_app_profile, - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings - ) - - result = instance.update_app_profile(app_profile_id, - RoutingPolicyType.SINGLE, - ignore_warnings=ignore_warnings, - cluster_id=cluster_id, - allow_transactional_writes=( - allow_transactional_writes)) - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.UpdateAppProfileMetadata) - - def test_delete_app_profile(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - - ignore_warnings = True - - expected_result = None # delete() has no return value. - - app_profile_id = 'appProfileId1262094415' - result = instance.delete_app_profile(app_profile_id, ignore_warnings) - - self.assertEqual(expected_result, result) + self.assertIsInstance(app_profile_2, AppProfile) + self.assertEqual(app_profile_2.name, app_profile_name2) class _Client(object): @@ -1181,10 +908,3 @@ def __init__(self, project): def __eq__(self, other): return (other.project == self.project and other.project_name == self.project_name) - - -def _CreateAppProfileRequestPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2) - - return instance_v2_pb2.CreateAppProfileRequest(*args, **kw) From 073370d794b89bf41348659abfc5905051bb1395 Mon Sep 17 00:00:00 2001 From: vikas-jamdar <39574687+vikas-jamdar@users.noreply.github.com> Date: Wed, 22 Aug 2018 01:02:51 +0530 Subject: [PATCH 166/892] Bigtable: Add 'Table.get_cluster_states' method (#5790) --- .../google/cloud/bigtable/enums.py | 32 +++++ .../google/cloud/bigtable/table.py | 95 ++++++++++++- .../google-cloud-bigtable/tests/system.py | 31 +++++ .../tests/unit/test_table.py | 127 ++++++++++++++++++ 4 files changed, 284 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index 140cebdd0305..a65d5651e416 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -149,3 +149,35 @@ class View(object): SCHEMA_VIEW = enums.Table.View.SCHEMA_VIEW REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW FULL = enums.Table.View.FULL + + class ReplicationState(object): + """ + Table replication states. + + Attributes: + STATE_NOT_KNOWN (int): The replication state of the table is unknown + in this cluster. + INITIALIZING (int): The cluster was recently created, and the table + must finish copying + over pre-existing data from other clusters before it can begin + receiving live replication updates and serving + ``Data API`` requests. + PLANNED_MAINTENANCE (int): The table is temporarily unable to serve + ``Data API`` requests from this + cluster due to planned internal maintenance. + UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve + ``Data API`` requests from this + cluster due to unplanned or emergency maintenance. + READY (int): The table can serve + ``Data API`` requests from this + cluster. Depending on replication delay, reads may not immediately + reflect the state of the table in other clusters. + """ + STATE_NOT_KNOWN = enums.Table.ClusterState.ReplicationState.\ + STATE_NOT_KNOWN + INITIALIZING = enums.Table.ClusterState.ReplicationState.INITIALIZING + PLANNED_MAINTENANCE = enums.Table.ClusterState.ReplicationState.\ + PLANNED_MAINTENANCE + UNPLANNED_MAINTENANCE = enums.Table.ClusterState.ReplicationState.\ + UNPLANNED_MAINTENANCE + READY = enums.Table.ClusterState.ReplicationState.READY diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 28ec2e355be6..281f96eca379 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -33,7 +33,7 @@ from google.cloud.bigtable.row_data import YieldRowsData from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable import enums from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as data_messages_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( @@ -257,6 +257,22 @@ def list_column_families(self): result[column_family_id] = column_family return result + def get_cluster_states(self): + """List the cluster states owned by this table. + + :rtype: dict + :returns: Dictionary of cluster states for this table. + Keys are cluster ids and values are + :class: 'ClusterState' instances. + """ + + REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW + table_client = self._instance._client.table_admin_client + table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW) + + return {cluster_id: ClusterState(value_pb.replication_state) + for cluster_id, value_pb in table_pb.cluster_states.items()} + def read_row(self, row_key, filter_=None): """Read a single row from this table. @@ -618,6 +634,83 @@ def _do_mutate_retryable_rows(self): return self.responses_statuses +class ClusterState(object): + """Representation of a Cluster State. + + :type replication_state: int + :param replication_state: enum value for cluster state + Possible replications_state values are + 0 for STATE_NOT_KNOWN: The replication state of the table is + unknown in this cluster. + 1 for INITIALIZING: The cluster was recently created, and the + table must finish copying + over pre-existing data from other clusters before it can + begin receiving live replication updates and serving + ``Data API`` requests. + 2 for PLANNED_MAINTENANCE: The table is temporarily unable to + serve + ``Data API`` requests from this + cluster due to planned internal maintenance. + 3 for UNPLANNED_MAINTENANCE: The table is temporarily unable + to serve + ``Data API`` requests from this + cluster due to unplanned or emergency maintenance. + 4 for READY: The table can serve + ``Data API`` requests from this + cluster. Depending on replication delay, reads may not + immediately reflect the state of the table in other clusters. + """ + + def __init__(self, replication_state): + self.replication_state = replication_state + + def __repr__(self): + """Representation of cluster state instance as string value + for cluster state. + + :rtype: ClusterState instance + :returns: ClusterState instance as representation of string + value for cluster state. + """ + replication_dict = { + enums.Table.ReplicationState.STATE_NOT_KNOWN: "STATE_NOT_KNOWN", + enums.Table.ReplicationState.INITIALIZING: "INITIALIZING", + enums.Table.ReplicationState.PLANNED_MAINTENANCE: + "PLANNED_MAINTENANCE", + enums.Table.ReplicationState.UNPLANNED_MAINTENANCE: + "UNPLANNED_MAINTENANCE", + enums.Table.ReplicationState.READY: "READY" + } + return replication_dict[self.replication_state] + + def __eq__(self, other): + """Checks if two ClusterState instances(self and other) are + equal on the basis of instance variable 'replication_state'. + + :type other: ClusterState + :param other: ClusterState instance to compare with. + + :rtype: Boolean value + :returns: True if two cluster state instances have same + replication_state. + """ + if not isinstance(other, self.__class__): + return False + return self.replication_state == other.replication_state + + def __ne__(self, other): + """Checks if two ClusterState instances(self and other) are + not equal. + + :type other: ClusterState. + :param other: ClusterState instance to compare with. + + :rtype: Boolean value. + :returns: True if two cluster state instances are not equal. + """ + return not self == other + + def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, filter_=None, limit=None, end_inclusive=False, app_profile_id=None, row_set=None): diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 64cc6b94c105..8388542fe284 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -273,6 +273,37 @@ def test_create_instance_w_two_clusters(self): self.assertTrue({alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset(found)) + self._test_state_helper(instance, ALT_CLUSTER_ID_1, + ALT_CLUSTER_ID_2) + + def _test_state_helper(self, instance, clusterid1, clusterid2): + # test get_cluster_states for a table in instance + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN + INITIALIZING = enum_table.ReplicationState.INITIALIZING + PLANNED_MAINTENANCE = enum_table.ReplicationState. \ + PLANNED_MAINTENANCE + UNPLANNED_MAINTENANCE = enum_table.ReplicationState. \ + UNPLANNED_MAINTENANCE + READY = enum_table.ReplicationState.READY + temp_table_id = 'test-get-cluster-states' + temp_table = instance.table(temp_table_id) + temp_table.create() + result = temp_table.get_cluster_states() + expected_results = [ + ClusterState(STATE_NOT_KNOWN), + ClusterState(INITIALIZING), + ClusterState(PLANNED_MAINTENANCE), + ClusterState(UNPLANNED_MAINTENANCE), + ClusterState(READY) + ] + cluster_id_list = result.keys() + self.assertEqual(len(cluster_id_list), 2) + self.assertIn(clusterid1, cluster_id_list) + self.assertIn(clusterid2, cluster_id_list) + for clusterstate in result.values(): + self.assertIn(clusterstate, expected_results) # Test create app profile with multi_cluster_routing policy app_profiles_to_delete = [] diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 4507a16dc096..923032f2da29 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -411,6 +411,48 @@ def _list_column_families_helper(self): def test_list_column_families(self): self._list_column_families_helper() + def test_get_cluster_states(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + INITIALIZING = enum_table.ReplicationState.INITIALIZING + PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE + READY = enum_table.ReplicationState.READY + + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + response_pb = _TablePB( + cluster_states={'cluster-id1': _ClusterStatePB(INITIALIZING), + 'cluster-id2': _ClusterStatePB( + PLANNED_MAINTENANCE), + 'cluster-id3': _ClusterStatePB(READY), + }, + ) + + # Patch the stub used by the API method. + client._table_admin_client = table_api + bigtable_table_stub = ( + client._table_admin_client.bigtable_table_admin_stub) + bigtable_table_stub.GetTable.side_effect = [response_pb] + + # build expected result + expected_result = { + u'cluster-id1': ClusterState(INITIALIZING), + u'cluster-id2': ClusterState(PLANNED_MAINTENANCE), + u'cluster-id3': ClusterState(READY) + } + + # Perform the method and check the result. + result = table.get_cluster_states() + self.assertEqual(result, expected_result) + def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT @@ -1531,6 +1573,82 @@ def _ReadRowsRequestPB(*args, **kw): return messages_v2_pb2.ReadRowsRequest(*args, **kw) +class Test_ClusterState(unittest.TestCase): + def test___eq__(self): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY + state1 = ClusterState(READY) + state2 = ClusterState(READY) + self.assertEqual(state1, state2) + + def test___eq__type_differ(self): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY + state1 = ClusterState(READY) + state2 = object() + self.assertNotEqual(state1, state2) + + def test___ne__same_value(self): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY + state1 = ClusterState(READY) + state2 = ClusterState(READY) + comparison_val = (state1 != state2) + self.assertFalse(comparison_val) + + def test___ne__(self): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY + INITIALIZING = enum_table.ReplicationState.INITIALIZING + state1 = ClusterState(READY) + state2 = ClusterState(INITIALIZING) + self.assertNotEqual(state1, state2) + + def test__repr__(self): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN + INITIALIZING = enum_table.ReplicationState.INITIALIZING + PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE + UNPLANNED_MAINTENANCE = enum_table.ReplicationState. \ + UNPLANNED_MAINTENANCE + READY = enum_table.ReplicationState.READY + + replication_dict = { + STATE_NOT_KNOWN: "STATE_NOT_KNOWN", + INITIALIZING: "INITIALIZING", + PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", + UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", + READY: "READY" + } + + self.assertEqual(str(ClusterState(STATE_NOT_KNOWN)), + replication_dict[STATE_NOT_KNOWN]) + self.assertEqual(str(ClusterState(INITIALIZING)), + replication_dict[INITIALIZING]) + self.assertEqual(str(ClusterState(PLANNED_MAINTENANCE)), + replication_dict[PLANNED_MAINTENANCE]) + self.assertEqual(str(ClusterState(UNPLANNED_MAINTENANCE)), + replication_dict[UNPLANNED_MAINTENANCE]) + self.assertEqual(str(ClusterState(READY)), + replication_dict[READY]) + + self.assertEqual(ClusterState(STATE_NOT_KNOWN).replication_state, + STATE_NOT_KNOWN) + self.assertEqual(ClusterState(INITIALIZING).replication_state, + INITIALIZING) + self.assertEqual(ClusterState(PLANNED_MAINTENANCE).replication_state, + PLANNED_MAINTENANCE) + self.assertEqual(ClusterState(UNPLANNED_MAINTENANCE). + replication_state, UNPLANNED_MAINTENANCE) + self.assertEqual(ClusterState(READY).replication_state, + READY) + + def _ReadRowsResponseCellChunkPB(*args, **kw): from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) @@ -1628,3 +1746,12 @@ def _ColumnFamilyPB(*args, **kw): table_pb2 as table_v2_pb2) return table_v2_pb2.ColumnFamily(*args, **kw) + + +def _ClusterStatePB(replication_state): + from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as table_v2_pb2) + + return table_v2_pb2.Table.ClusterState( + replication_state=replication_state + ) From a6d7eb116f5e91ef423981c74d93fbbde236e9d5 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 22 Aug 2018 14:45:00 -0400 Subject: [PATCH 167/892] Fix failing systest: 'test_create_instance_w_two_clusters'. (#5836) Closes #5835. --- .../google-cloud-bigtable/tests/system.py | 33 +++++++------------ 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 8388542fe284..8b39cb9f25d2 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -218,14 +218,15 @@ def test_instance_exists(self): def test_create_instance_w_two_clusters(self): from google.cloud.bigtable import enums + from google.cloud.bigtable.table import ClusterState _PRODUCTION = enums.Instance.Type.PRODUCTION ALT_INSTANCE_ID = 'dif' + unique_resource_id('-') instance = Config.CLIENT.instance(ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS) - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID+'-c1' - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID+'-c2' + ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + '-c1' + ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + '-c2' LOCATION_ID_2 = 'us-central1-f' STORAGE_TYPE = enums.StorageType.HDD cluster_1 = instance.cluster( @@ -273,35 +274,23 @@ def test_create_instance_w_two_clusters(self): self.assertTrue({alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset(found)) - self._test_state_helper(instance, ALT_CLUSTER_ID_1, - ALT_CLUSTER_ID_2) - def _test_state_helper(self, instance, clusterid1, clusterid2): - # test get_cluster_states for a table in instance - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN - INITIALIZING = enum_table.ReplicationState.INITIALIZING - PLANNED_MAINTENANCE = enum_table.ReplicationState. \ - PLANNED_MAINTENANCE - UNPLANNED_MAINTENANCE = enum_table.ReplicationState. \ - UNPLANNED_MAINTENANCE - READY = enum_table.ReplicationState.READY temp_table_id = 'test-get-cluster-states' temp_table = instance.table(temp_table_id) temp_table.create() result = temp_table.get_cluster_states() + ReplicationState = enums.Table.ReplicationState expected_results = [ - ClusterState(STATE_NOT_KNOWN), - ClusterState(INITIALIZING), - ClusterState(PLANNED_MAINTENANCE), - ClusterState(UNPLANNED_MAINTENANCE), - ClusterState(READY) + ClusterState(ReplicationState.STATE_NOT_KNOWN), + ClusterState(ReplicationState.INITIALIZING), + ClusterState(ReplicationState.PLANNED_MAINTENANCE), + ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), + ClusterState(ReplicationState.READY) ] cluster_id_list = result.keys() self.assertEqual(len(cluster_id_list), 2) - self.assertIn(clusterid1, cluster_id_list) - self.assertIn(clusterid2, cluster_id_list) + self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) + self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) for clusterstate in result.values(): self.assertIn(clusterstate, expected_results) From 4f97351ab656ab8cd7001315ce72e97f1edcd5b2 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Mon, 27 Aug 2018 14:48:19 -0400 Subject: [PATCH 168/892] Bigtable: consolidating read_rows and yield_rows (#5840) - Fold `YieldRowsData` functionality into `PartialRowsData` - Use the name `__iter__()` instead of `read_rows()` so that `PartialRowsData` is now an iterable. - Retain obsolete `Table.yield_rows()` for backwards compatiblity. --- .../google/cloud/bigtable/row_data.py | 81 ++++++------------- .../google/cloud/bigtable/table.py | 27 +++---- .../google-cloud-bigtable/tests/system.py | 8 +- .../tests/unit/test_row_data.py | 48 ++++------- .../tests/unit/test_table.py | 3 +- 5 files changed, 58 insertions(+), 109 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index f3ada1dd3f02..ab561655a16a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -298,58 +298,6 @@ class InvalidChunk(RuntimeError): """Exception raised to to invalid chunk data from back-end.""" -class PartialRowsData(object): - """Convenience wrapper for consuming a ``ReadRows`` streaming response. - - :type read_method: :class:`client._table_data_client.read_rows` - :param read_method: ``ReadRows`` method. - - :type request: :class:`data_messages_v2_pb2.ReadRowsRequest` - :param request: The ``ReadRowsRequest`` message used to create a - ReadRowsResponse iterator. - """ - - START = 'Start' # No responses yet processed. - NEW_ROW = 'New row' # No cells yet complete for row - ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row - CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row - - def __init__(self, read_method, request): - self._generator = YieldRowsData(read_method, request) - - # Fully-processed rows, keyed by `row_key` - self.rows = {} - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other._generator == self._generator - - def __ne__(self, other): - return not self == other - - @property - def state(self): - """State machine state. - - :rtype: str - :returns: name of state corresponding to currrent row / chunk - processing. - """ - return self._generator.state - - def consume_all(self, max_loops=None): - """Consume the streamed responses until there are no more. - - :type max_loops: int - :param max_loops: (Optional) Maximum number of times to try to consume - an additional ``ReadRowsResponse``. You can use this - to avoid long wait times. - """ - for row in self._generator.read_rows(): - self.rows[row.row_key] = row - - def _retry_read_rows_exception(exc): if isinstance(exc, grpc.RpcError): exc = exceptions.from_grpc_error(exc) @@ -357,7 +305,7 @@ def _retry_read_rows_exception(exc): exceptions.DeadlineExceeded)) -class YieldRowsData(object): +class PartialRowsData(object): """Convenience wrapper for consuming a ``ReadRows`` streaming response. :type read_method: :class:`client._table_data_client.read_rows` @@ -405,6 +353,8 @@ def __init__(self, read_method, request): self.request = request self.response_iterator = read_method(request) + self.rows = {} + @property def state(self): """State machine state. @@ -436,15 +386,30 @@ def cancel(self): """Cancels the iterator, closing the stream.""" self.response_iterator.cancel() + def consume_all(self, max_loops=None): + """Consume the streamed responses until there are no more. + + .. warning:: + This method will be removed in future releases. Please use this + class as a generator instead. + + :type max_loops: int + :param max_loops: (Optional) Maximum number of times to try to consume + an additional ``ReadRowsResponse``. You can use this + to avoid long wait times. + """ + for row in self: + self.rows[row.row_key] = row + def _create_retry_request(self): - """Helper for :meth:`read_rows`.""" + """Helper for :meth:`__iter__`.""" req_manager = _ReadRowsRequestManager(self.request, self.last_scanned_row_key, self._counter) self.request = req_manager.build_updated_request() def _on_error(self, exc): - """Helper for :meth:`read_rows`.""" + """Helper for :meth:`__iter__`.""" # restart the read scan from AFTER the last successfully read row if self.last_scanned_row_key: self._create_retry_request() @@ -452,17 +417,17 @@ def _on_error(self, exc): self.response_iterator = self.read_method(self.request) def _read_next(self): - """Helper for :meth:`read_rows`.""" + """Helper for :meth:`__iter__`.""" return six.next(self.response_iterator) def _read_next_response(self): - """Helper for :meth:`read_rows`.""" + """Helper for :meth:`__iter__`.""" retry_ = retry.Retry( predicate=_retry_read_rows_exception, deadline=60) return retry_(self._read_next, on_error=self._on_error)() - def read_rows(self): + def __iter__(self): """Consume the ``ReadRowsResponse's`` from the stream. Read the rows and yield each to the reader diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 281f96eca379..3b9d56451f58 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -30,7 +30,6 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData -from google.cloud.bigtable.row_data import YieldRowsData from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums @@ -305,7 +304,7 @@ def read_row(self, row_key, filter_=None): return rows_data.rows[row_key] def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None, end_inclusive=False): + filter_=None, end_inclusive=False, row_set=None): """Read rows from this table. :type start_key: bytes @@ -332,21 +331,28 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :param end_inclusive: (Optional) Whether the ``end_key`` should be considered inclusive. The default is False (exclusive). + :type row_set: :class:`row_set.RowSet` + :param filter_: (Optional) The row set containing multiple row keys and + row_ranges. + :rtype: :class:`.PartialRowsData` - :returns: A :class:`.PartialRowsData` convenience wrapper for consuming + :returns: A :class:`.PartialRowsData` a generator for consuming the streamed results. """ request_pb = _create_row_request( self.name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit, end_inclusive=end_inclusive, - app_profile_id=self._app_profile_id) + app_profile_id=self._app_profile_id, row_set=row_set) data_client = self._instance._client.table_data_client return PartialRowsData(data_client._read_rows, request_pb) - def yield_rows(self, start_key=None, end_key=None, limit=None, - filter_=None, row_set=None): + def yield_rows(self, **kwargs): """Read rows from this table. + .. warning:: + This method will be removed in future releases. Please use + ``read_rows`` instead. + :type start_key: bytes :param start_key: (Optional) The beginning of a range of row keys to read from. The range will include ``start_key``. If @@ -374,14 +380,7 @@ def yield_rows(self, start_key=None, end_key=None, limit=None, :rtype: :class:`.PartialRowData` :returns: A :class:`.PartialRowData` for each row returned """ - request_pb = _create_row_request( - self.name, start_key=start_key, end_key=end_key, filter_=filter_, - limit=limit, app_profile_id=self._app_profile_id, row_set=row_set) - data_client = self._instance._client.table_data_client - generator = YieldRowsData(data_client._read_rows, request_pb) - - for row in generator.read_rows(): - yield row + return self.read_rows(**kwargs) def mutate_rows(self, rows, retry=DEFAULT_RETRY): """Mutates multiple rows in bulk. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 8b39cb9f25d2..9a9c8afaf2d5 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -876,10 +876,10 @@ def test_yield_rows_with_row_set(self): read_rows = self._table.yield_rows(row_set=row_set) - expected_row_keys = set([b'row_key_1', b'row_key_3', b'row_key_4', - b'row_key_5', b'row_key_6']) - found_row_keys = set([row.row_key for row in read_rows]) - self.assertEqual(found_row_keys, set(expected_row_keys)) + expected_row_keys = [b'row_key_1', b'row_key_3', b'row_key_4', + b'row_key_5', b'row_key_6'] + found_row_keys = [row.row_key for row in read_rows] + self.assertEqual(found_row_keys, expected_row_keys) def test_read_large_cell_limit(self): row = self._table.row(ROW_KEY) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 12b1093ac4de..067948c26b17 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -321,6 +321,11 @@ class _Client(object): class TestPartialRowsData(unittest.TestCase): + ROW_KEY = b'row-key' + FAMILY_NAME = u'family' + QUALIFIER = b'qualifier' + TIMESTAMP_MICROS = 100 + VALUE = b'value' @staticmethod def _get_target_class(): @@ -328,6 +333,12 @@ def _get_target_class(): return PartialRowsData + @staticmethod + def _get_target_client_class(): + from google.cloud.bigtable.client import Client + + return Client + def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) @@ -337,8 +348,7 @@ def test_constructor(self): request = object() partial_rows_data = self._make_one(client._data_stub.ReadRows, request) - self.assertIs(partial_rows_data._generator.request, - request) + self.assertIs(partial_rows_data.request, request) self.assertEqual(partial_rows_data.rows, {}) def test___eq__(self): @@ -390,29 +400,6 @@ def test_rows_getter(self): partial_rows_data.rows = value = object() self.assertIs(partial_rows_data.rows, value) - -class TestYieldRowsData(unittest.TestCase): - ROW_KEY = b'row-key' - FAMILY_NAME = u'family' - QUALIFIER = b'qualifier' - TIMESTAMP_MICROS = 100 - VALUE = b'value' - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import YieldRowsData - - return YieldRowsData - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) @@ -451,7 +438,7 @@ def test_state_new_row_w_row(self): client._table_data_client.bigtable_stub.ReadRows, request) yrd._response_iterator = iterator yrd._last_scanned_row_key = '' - rows = [row for row in yrd.read_rows()] + rows = [row for row in yrd] result = rows[0] self.assertEqual(result.row_key, self.ROW_KEY) @@ -696,15 +683,12 @@ def test_yield_rows_data(self): yrd = self._make_one(client._data_stub.ReadRows, request) - rows = [] - for row in yrd.read_rows(): - rows.append(row) - result = rows[0] + result = self._consume_all(yrd)[0] - self.assertEqual(result.row_key, self.ROW_KEY) + self.assertEqual(result, self.ROW_KEY) def _consume_all(self, yrd): - return [row.row_key for row in yrd.read_rows()] + return [row.row_key for row in yrd] class Test_ReadRowsRequestManager(unittest.TestCase): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 923032f2da29..95e6b1a42d0b 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -619,7 +619,8 @@ def mock_create_row_request(table_name, **kwargs): 'filter_': filter_obj, 'limit': limit, 'end_inclusive': False, - 'app_profile_id': app_profile_id + 'app_profile_id': app_profile_id, + 'row_set': None } self.assertEqual(mock_created, [(table.name, created_kwargs)]) From 11beb4be075363f3b11c9257a590282c71016046 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Tue, 28 Aug 2018 11:42:11 -0400 Subject: [PATCH 169/892] Release bigtable 0.30.0 (#5853) --- packages/google-cloud-bigtable/CHANGELOG.md | 64 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 9d742c2e55d0..7a81ec6d4323 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,70 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.30.0 + +### New Features + +- Improve performance and capabilities of reads. `read_rows` now returns a generator; has automatic retries; and can read an arbitrary set of keys and ranges + - Consolidate read_rows and yield_rows (#5840) + - Implement row set for yield_rows (#5506) + - Improve read rows validation performance (#5390) + - Add retry for yield_rows (#4882) + - Require TimestampRanges to be milliseconds granularity (#5002) + - Provide better access to cell values (#4908) + - Add data app profile id (#5369) + +- Improve writes: Writes are usable in Beam + - Create MutationBatcher for bigtable (#5651) + - Allow DirectRow to be created without a table (#5567) + - Add data app profile id (#5369) + +- Improve table admin: Table creation now can also create families in a single RPC. Add an `exist()` method. Add `get_cluster_states` for information about replication + - Add 'Table.get_cluster_states' method (#5790) + - Optimize 'Table.exists' performance (#5749) + - Add column creation in 'Table.create()'. (#5576) + - Add 'Table.exists' method (#5545) + - Add split keys on create table - v2 (#5513) + - Avoid sharing table names across unrelated systests. (#5421) + - Add truncate table and drop by prefix on top of GAPIC integration (#5360) + +- Improve instance admin: Instance creation allows for the creation of multiple clusters. Instance label management is now enabled. + - Create app_profile_object (#5782) + - Add 'Instance.exists' method (#5802) + - Add 'InstanceAdminClient.list_clusters' method (#5715) + - Add 'Instance._state' property (#5736) + - Convert 'instance.labels' to return a dictionary (#5728) + - Reshape cluster.py, adding cluster() factory to instance.py (#5663) + - Convert 'Instance.update' to use 'instance.partial_instance_update' API (#5643) + - Refactor 'InstanceAdminClient.update_app_profile' to remove update_mask argument (#5684) + - Add the ability to create an instance with multiple clusters (#5622) + - Add 'instance_type', 'labels' to 'Instance' ctor (#5614) + - Add optional app profile to 'Instance.table' (#5605) + - Clean up Instance creation. (#5542) + - Make 'InstanceAdminClient.list_instances' return actual instance objects, not protos. (#5420) + - Add admin app profile methods on Instance (#5315) + +### Internal / Testing Changes +- Rename releases to changelog and include from CHANGELOG.md (#5191) +- Fix bad trove classifier +- Integrate new generated low-level client (#5178) +- Override gRPC max message lengths. (#5498) +- Use client properties rather than private attrs (#5398) +- Fix the broken Bigtable system test. (#5607) +- Fix Py3 breakage in new system test. (#5474) +- Modify system test for new GAPIC code (#5302) +- Add Test runs for Python 3.7 and remove 3.4 (#5295) +- Disable Bigtable system tests (#5381) +- Modify system tests to use prerelease versions of grpcio (#5304) +- Pass through 'session.posargs' when running Bigtable system tests. (#5418) +- Harden 'test_list_instances' against simultaneous test runs. (#5476) +- Shorten instance / cluster name to fix CI breakage. (#5641) +- Fix failing systest: 'test_create_instance_w_two_clusters'. (#5836) +- Add labels {'python-system': ISO-timestamp} to systest instances (#5729) +- Shorten cluster ID in system test (#5719) +- Harden 'test_list_instances' further. (#5696) +- Improve testing of create instance (#5544) + ## 0.29.0 ### New features diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 9206dcfad733..001c99757b7f 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.29.0' +version = '0.30.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 7b607692a5f42b29158468cb7cf363bb1e65e943 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 30 Aug 2018 11:50:23 -0400 Subject: [PATCH 170/892] Nox: use inplace installs (#5865) --- packages/google-cloud-bigtable/nox.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index 2abcce3ae2a5..e8c0ac93df88 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -33,8 +33,10 @@ def default(session): Python corresponding to the ``nox`` binary the ``PATH`` can run the tests. """ - # Install all test dependencies, then install this package in-place. - session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS) + # Install all test dependencies, then install local packages in-place. + session.install('mock', 'pytest', 'pytest-cov') + for local_dep in LOCAL_DEPS: + session.install('-e', local_dep) session.install('-e', '.') # Run py.test against the unit tests. @@ -86,8 +88,10 @@ def system(session, py): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install('mock', 'pytest', *LOCAL_DEPS) - session.install('../test_utils/') + session.install('mock', 'pytest') + for local_dep in LOCAL_DEPS: + session.install('-e', local_dep) + session.install('-e', '../test_utils/') session.install('-e', '.') # Run py.test against the system tests. From 04a4f70f4c519cbdd63737e029e668777fe222d4 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Tue, 4 Sep 2018 08:35:08 -0700 Subject: [PATCH 171/892] synth bigtable and bigtable admin (#5867) * synth bigtable and bigtable admin * adjust gapic lib version * Modify hand-written code and tests for new generated code * Initialize inner_api_calls when used in table.py * set grpc message lengths * modify tests to create inner api call --- .../google/cloud/bigtable/cluster.py | 2 +- .../google/cloud/bigtable/enums.py | 4 +- .../google/cloud/bigtable/table.py | 34 +- .../cloud/bigtable_admin_v2/__init__.py | 5 +- .../gapic/bigtable_instance_admin_client.py | 919 +++++++++++------- .../gapic/bigtable_table_admin_client.py | 699 +++++++------ .../bigtable_table_admin_client_config.py | 24 +- .../cloud/bigtable_admin_v2/gapic/enums.py | 68 +- .../gapic/transports/__init__.py | 0 .../bigtable_instance_admin_grpc_transport.py | 361 +++++++ .../bigtable_table_admin_grpc_transport.py | 328 +++++++ .../proto/bigtable_instance_admin_pb2.py | 56 +- .../proto/bigtable_instance_admin_pb2_grpc.py | 59 +- .../proto/bigtable_table_admin_pb2.py | 92 +- .../proto/bigtable_table_admin_pb2_grpc.py | 73 +- .../bigtable_admin_v2/proto/instance_pb2.py | 8 +- .../bigtable_admin_v2/proto/table_pb2.py | 36 +- .../google/cloud/bigtable_admin_v2/types.py | 16 +- .../google/cloud/bigtable_v2/__init__.py | 4 +- .../bigtable_v2/gapic/bigtable_client.py | 403 +++++--- .../gapic/bigtable_client_config.py | 19 +- .../bigtable_v2/gapic/transports/__init__.py | 0 .../transports/bigtable_grpc_transport.py | 195 ++++ .../cloud/bigtable_v2/proto/bigtable_pb2.py | 527 +++------- .../bigtable_v2/proto/bigtable_pb2_grpc.py | 2 +- .../cloud/bigtable_v2/proto/data_pb2.py | 144 ++- .../google/cloud/bigtable_v2/types.py | 14 +- packages/google-cloud-bigtable/nox.py | 2 +- packages/google-cloud-bigtable/synth.py | 63 ++ .../unit/gapic/v2/test_bigtable_client_v2.py | 4 +- .../test_bigtable_instance_admin_client_v2.py | 22 +- .../v2/test_bigtable_table_admin_client_v2.py | 2 + .../tests/unit/test_app_profile.py | 12 +- .../tests/unit/test_client.py | 8 +- .../tests/unit/test_cluster.py | 12 +- .../tests/unit/test_column_family.py | 6 +- .../tests/unit/test_instance.py | 20 +- .../tests/unit/test_row.py | 3 +- .../tests/unit/test_row_data.py | 4 +- .../tests/unit/test_table.py | 101 +- 40 files changed, 2701 insertions(+), 1650 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py create mode 100644 packages/google-cloud-bigtable/synth.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 0ee4d9cfb146..b5032f805f10 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -255,7 +255,7 @@ def update(self): # Location is set only at the time of creation of a cluster # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( - self.name, None, self.serve_nodes) + self.name, self.serve_nodes, None) def delete(self): """Delete this cluster. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index a65d5651e416..3f695b86ce47 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -119,8 +119,8 @@ class RoutingPolicyType(object): See https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.SingleClusterRouting """ - ANY = enums.RoutingPolicyType.ANY - SINGLE = enums.RoutingPolicyType.SINGLE + ANY = 1 + SINGLE = 2 class Table(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 3b9d56451f58..29463e034ea1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -21,6 +21,7 @@ from google.api_core.exceptions import NotFound from google.api_core.retry import if_exception_type from google.api_core.retry import Retry +from google.api_core.gapic_v1.method import wrap_method from google.cloud._helpers import _to_bytes from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily @@ -292,7 +293,19 @@ def read_row(self, row_key, filter_=None): self.name, row_key=row_key, filter_=filter_, app_profile_id=self._app_profile_id) data_client = self._instance._client.table_data_client - rows_data = PartialRowsData(data_client._read_rows, request_pb) + if 'read_rows' not in data_client._inner_api_calls: + default_retry = data_client._method_configs['ReadRows'].retry + timeout = data_client._method_configs['ReadRows'].timeout + data_client._inner_api_calls['read_rows'] = \ + wrap_method( + data_client.transport.read_rows, + default_retry=default_retry, + default_timeout=timeout, + client_info=data_client._client_info, + ) + rows_data = PartialRowsData( + data_client._inner_api_calls['read_rows'], + request_pb) rows_data.consume_all() if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): @@ -344,7 +357,9 @@ def read_rows(self, start_key=None, end_key=None, limit=None, filter_=filter_, limit=limit, end_inclusive=end_inclusive, app_profile_id=self._app_profile_id, row_set=row_set) data_client = self._instance._client.table_data_client - return PartialRowsData(data_client._read_rows, request_pb) + return PartialRowsData( + data_client.transport.read_rows, + request_pb) def yield_rows(self, **kwargs): """Read rows from this table. @@ -608,7 +623,20 @@ def _do_mutate_retryable_rows(self): self.table_name, retryable_rows, app_profile_id=self.app_profile_id) data_client = self.client.table_data_client - responses = data_client._mutate_rows(mutate_rows_request, retry=None) + inner_api_calls = data_client._inner_api_calls + if 'mutate_rows' not in inner_api_calls: + default_retry = data_client._method_configs['MutateRows'].retry, + default_timeout = data_client._method_configs['MutateRows'].timeout + data_client._inner_api_calls[ + 'mutate_rows'] = wrap_method( + data_client.transport.mutate_rows, + default_retry=default_retry, + default_timeout=default_timeout, + client_info=data_client._client_info, + ) + + responses = data_client._inner_api_calls['mutate_rows']( + mutate_rows_request, retry=None) num_responses = 0 num_retryable_responses = 0 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 62e1934aab62..bcb3e9e24d8e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -23,7 +25,8 @@ class BigtableInstanceAdminClient( bigtable_instance_admin_client.BigtableInstanceAdminClient): __doc__ = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__) + bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ + ) enums = enums diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index f61e4a3b9e52..a854964aed01 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +17,9 @@ import functools import pkg_resources +import warnings +from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method @@ -25,9 +29,11 @@ import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template +import grpc from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config from google.cloud.bigtable_admin_v2.gapic import enums +from google.cloud.bigtable_admin_v2.gapic.transports import bigtable_instance_admin_grpc_transport from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc from google.cloud.bigtable_admin_v2.proto import instance_pb2 @@ -51,24 +57,31 @@ class BigtableInstanceAdminClient(object): SERVICE_ADDRESS = 'bigtableadmin.googleapis.com:443' """The default address of the service.""" - # The scopes needed to make gRPC calls to all of the methods defined in - # this service - _DEFAULT_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', - ) - - # The name of the interface for this client. This is the key used to find - # method configuration in the client_config dictionary. + # The name of the interface for this client. This is the key used to + # find the method configuration in the client_config dictionary. _INTERFACE_NAME = 'google.bigtable.admin.v2.BigtableInstanceAdmin' + @classmethod + def from_service_account_file(cls, filename, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs['credentials'] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + @classmethod def project_path(cls, project): """Return a fully-qualified project string.""" @@ -116,6 +129,7 @@ def location_path(cls, project, location): ) def __init__(self, + transport=None, channel=None, credentials=None, client_config=bigtable_instance_admin_client_config.config, @@ -123,176 +137,83 @@ def __init__(self, """Constructor. Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive + transport (Union[~.BigtableInstanceAdminGrpcTransport, + Callable[[~.Credentials, type], ~.BigtableInstanceAdminGrpcTransport]): A transport + instance, responsible for actually making the API calls. + The default transport uses the gRPC protocol. + This argument may also be a callable which returns a + transport instance. Callables will be sent the credentials + as the first argument and the default transport class as + the second argument. + channel (grpc.Channel): DEPRECATED. A ``Channel`` instance + through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - client_config (dict): A dictionary of call options for each - method. If not specified, the default configuration is used. + This argument is mutually exclusive with providing a + transport instance to ``transport``; doing so will raise + an exception. + client_config (dict): DEPRECATED. A dictionary of call options for + each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - 'The `channel` and `credentials` arguments to {} are mutually ' - 'exclusive.'.format(self.__class__.__name__), ) - - # Create the channel. - if channel is None: - channel = google.api_core.grpc_helpers.create_channel( - self.SERVICE_ADDRESS, + # Raise deprecation warnings for things we want to go away. + if client_config: + warnings.warn('The `client_config` argument is deprecated.', + PendingDeprecationWarning) + if channel: + warnings.warn( + 'The `channel` argument is deprecated; use ' + '`transport` instead.', PendingDeprecationWarning) + + # Instantiate the transport. + # The transport is responsible for handling serialization and + # deserialization and actually sending data to the service. + if transport: + if callable(transport): + self.transport = transport( + credentials=credentials, + default_class=bigtable_instance_admin_grpc_transport. + BigtableInstanceAdminGrpcTransport, + ) + else: + if credentials: + raise ValueError( + 'Received both a transport instance and ' + 'credentials; these are mutually exclusive.') + self.transport = transport + else: + self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( + address=self.SERVICE_ADDRESS, + channel=channel, credentials=credentials, - scopes=self._DEFAULT_SCOPES, ) - # Create the gRPC stubs. - self.bigtable_instance_admin_stub = ( - bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub(channel)) - - # Operations client for methods that return long-running operations - # futures. - self.operations_client = ( - google.api_core.operations_v1.OperationsClient(channel)) - if client_info is None: client_info = ( google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) client_info.gapic_version = _GAPIC_LIBRARY_VERSION + self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) - method_configs = google.api_core.gapic_v1.config.parse_method_configs( + self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config['interfaces'][self._INTERFACE_NAME], ) - # Write the "inner API call" methods to the class. - # These are wrapped versions of the gRPC stub methods, with retry and - # timeout configuration applied, called by the public methods on - # this class. - self._create_instance = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.CreateInstance, - default_retry=method_configs['CreateInstance'].retry, - default_timeout=method_configs['CreateInstance'].timeout, - client_info=client_info, - ) - self._get_instance = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.GetInstance, - default_retry=method_configs['GetInstance'].retry, - default_timeout=method_configs['GetInstance'].timeout, - client_info=client_info, - ) - self._list_instances = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.ListInstances, - default_retry=method_configs['ListInstances'].retry, - default_timeout=method_configs['ListInstances'].timeout, - client_info=client_info, - ) - self._update_instance = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.UpdateInstance, - default_retry=method_configs['UpdateInstance'].retry, - default_timeout=method_configs['UpdateInstance'].timeout, - client_info=client_info, - ) - self._partial_update_instance = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.PartialUpdateInstance, - default_retry=method_configs['PartialUpdateInstance'].retry, - default_timeout=method_configs['PartialUpdateInstance'].timeout, - client_info=client_info, - ) - self._delete_instance = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.DeleteInstance, - default_retry=method_configs['DeleteInstance'].retry, - default_timeout=method_configs['DeleteInstance'].timeout, - client_info=client_info, - ) - self._create_cluster = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.CreateCluster, - default_retry=method_configs['CreateCluster'].retry, - default_timeout=method_configs['CreateCluster'].timeout, - client_info=client_info, - ) - self._get_cluster = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.GetCluster, - default_retry=method_configs['GetCluster'].retry, - default_timeout=method_configs['GetCluster'].timeout, - client_info=client_info, - ) - self._list_clusters = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.ListClusters, - default_retry=method_configs['ListClusters'].retry, - default_timeout=method_configs['ListClusters'].timeout, - client_info=client_info, - ) - self._update_cluster = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.UpdateCluster, - default_retry=method_configs['UpdateCluster'].retry, - default_timeout=method_configs['UpdateCluster'].timeout, - client_info=client_info, - ) - self._delete_cluster = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.DeleteCluster, - default_retry=method_configs['DeleteCluster'].retry, - default_timeout=method_configs['DeleteCluster'].timeout, - client_info=client_info, - ) - self._create_app_profile = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.CreateAppProfile, - default_retry=method_configs['CreateAppProfile'].retry, - default_timeout=method_configs['CreateAppProfile'].timeout, - client_info=client_info, - ) - self._get_app_profile = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.GetAppProfile, - default_retry=method_configs['GetAppProfile'].retry, - default_timeout=method_configs['GetAppProfile'].timeout, - client_info=client_info, - ) - self._list_app_profiles = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.ListAppProfiles, - default_retry=method_configs['ListAppProfiles'].retry, - default_timeout=method_configs['ListAppProfiles'].timeout, - client_info=client_info, - ) - self._update_app_profile = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.UpdateAppProfile, - default_retry=method_configs['UpdateAppProfile'].retry, - default_timeout=method_configs['UpdateAppProfile'].timeout, - client_info=client_info, - ) - self._delete_app_profile = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.DeleteAppProfile, - default_retry=method_configs['DeleteAppProfile'].retry, - default_timeout=method_configs['DeleteAppProfile'].timeout, - client_info=client_info, - ) - self._get_iam_policy = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.GetIamPolicy, - default_retry=method_configs['GetIamPolicy'].retry, - default_timeout=method_configs['GetIamPolicy'].timeout, - client_info=client_info, - ) - self._set_iam_policy = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.SetIamPolicy, - default_retry=method_configs['SetIamPolicy'].retry, - default_timeout=method_configs['SetIamPolicy'].timeout, - client_info=client_info, - ) - self._test_iam_permissions = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_instance_admin_stub.TestIamPermissions, - default_retry=method_configs['TestIamPermissions'].retry, - default_timeout=method_configs['TestIamPermissions'].timeout, - client_info=client_info, - ) + # Save a dictionary of cached API call functions. + # These are the actual callables which invoke the proper + # transport methods, wrapped with `wrap_method` to add retry, + # timeout, and the like. + self._inner_api_calls = {} # Service calls def create_instance(self, @@ -369,25 +290,40 @@ def create_instance(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'create_instance' not in self._inner_api_calls: + self._inner_api_calls[ + 'create_instance'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_instance, + default_retry=self._method_configs['CreateInstance'].retry, + default_timeout=self._method_configs['CreateInstance'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.CreateInstanceRequest( parent=parent, instance_id=instance_id, instance=instance, clusters=clusters, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) - - operation = self._create_instance( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + operation = self._inner_api_calls['create_instance']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, - self.operations_client, + self.transport._operations_client, instance_pb2.Instance, metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, ) @@ -431,16 +367,31 @@ def get_instance(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'get_instance' not in self._inner_api_calls: + self._inner_api_calls[ + 'get_instance'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_instance, + default_retry=self._method_configs['GetInstance'].retry, + default_timeout=self._method_configs['GetInstance'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._get_instance( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['get_instance']( request, retry=retry, timeout=timeout, metadata=metadata) def list_instances(self, @@ -484,19 +435,34 @@ def list_instances(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'list_instances' not in self._inner_api_calls: + self._inner_api_calls[ + 'list_instances'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_instances, + default_retry=self._method_configs['ListInstances'].retry, + default_timeout=self._method_configs['ListInstances'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.ListInstancesRequest( parent=parent, page_token=page_token, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) - - return self._list_instances( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['list_instances']( request, retry=retry, timeout=timeout, metadata=metadata) def update_instance(self, @@ -570,9 +536,17 @@ def update_instance(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'update_instance' not in self._inner_api_calls: + self._inner_api_calls[ + 'update_instance'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_instance, + default_retry=self._method_configs['UpdateInstance'].retry, + default_timeout=self._method_configs['UpdateInstance'] + .timeout, + client_info=self._client_info, + ) + request = instance_pb2.Instance( name=name, display_name=display_name, @@ -580,12 +554,19 @@ def update_instance(self, labels=labels, state=state, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._update_instance( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['update_instance']( request, retry=retry, timeout=timeout, metadata=metadata) def partial_update_instance( @@ -647,23 +628,39 @@ def partial_update_instance( to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'partial_update_instance' not in self._inner_api_calls: + self._inner_api_calls[ + 'partial_update_instance'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.partial_update_instance, + default_retry=self._method_configs['PartialUpdateInstance'] + .retry, + default_timeout=self._method_configs[ + 'PartialUpdateInstance'].timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( instance=instance, update_mask=update_mask, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('instance.name', instance.name)], ) - metadata.append(routing_header) - - operation = self._partial_update_instance( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('instance.name', instance.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + operation = self._inner_api_calls['partial_update_instance']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, - self.operations_client, + self.transport._operations_client, instance_pb2.Instance, metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, ) @@ -704,17 +701,32 @@ def delete_instance(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'delete_instance' not in self._inner_api_calls: + self._inner_api_calls[ + 'delete_instance'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_instance, + default_retry=self._method_configs['DeleteInstance'].retry, + default_timeout=self._method_configs['DeleteInstance'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.DeleteInstanceRequest( + name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_instance_admin_pb2.DeleteInstanceRequest( - name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - self._delete_instance( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + self._inner_api_calls['delete_instance']( request, retry=retry, timeout=timeout, metadata=metadata) def create_cluster(self, @@ -781,24 +793,39 @@ def create_cluster(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'create_cluster' not in self._inner_api_calls: + self._inner_api_calls[ + 'create_cluster'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_cluster, + default_retry=self._method_configs['CreateCluster'].retry, + default_timeout=self._method_configs['CreateCluster'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.CreateClusterRequest( parent=parent, cluster_id=cluster_id, cluster=cluster, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) - - operation = self._create_cluster( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + operation = self._inner_api_calls['create_cluster']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, - self.operations_client, + self.transport._operations_client, instance_pb2.Cluster, metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, ) @@ -842,16 +869,30 @@ def get_cluster(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'get_cluster' not in self._inner_api_calls: + self._inner_api_calls[ + 'get_cluster'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_cluster, + default_retry=self._method_configs['GetCluster'].retry, + default_timeout=self._method_configs['GetCluster'].timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.GetClusterRequest(name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_instance_admin_pb2.GetClusterRequest(name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._get_cluster( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['get_cluster']( request, retry=retry, timeout=timeout, metadata=metadata) def list_clusters(self, @@ -897,25 +938,40 @@ def list_clusters(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'list_clusters' not in self._inner_api_calls: + self._inner_api_calls[ + 'list_clusters'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_clusters, + default_retry=self._method_configs['ListClusters'].retry, + default_timeout=self._method_configs['ListClusters'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.ListClustersRequest( parent=parent, page_token=page_token, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) - - return self._list_clusters( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['list_clusters']( request, retry=retry, timeout=timeout, metadata=metadata) def update_cluster(self, name, - location, serve_nodes, + location=None, state=None, default_storage_type=None, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -931,13 +987,10 @@ def update_cluster(self, >>> >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') >>> - >>> # TODO: Initialize ``location``: - >>> location = '' - >>> >>> # TODO: Initialize ``serve_nodes``: >>> serve_nodes = 0 >>> - >>> response = client.update_cluster(name, location, serve_nodes) + >>> response = client.update_cluster(name, serve_nodes) >>> >>> def callback(operation_future): ... # Handle result. @@ -952,13 +1005,13 @@ def update_cluster(self, name (str): (``OutputOnly``) The unique name of the cluster. Values are of the form ``projects//instances//clusters/[a-z][-a-z0-9]*``. + serve_nodes (int): The number of nodes allocated to this cluster. More nodes enable higher + throughput and more consistent performance. location (str): (``CreationOnly``) The location where this cluster's nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form ``projects//locations/``. - serve_nodes (int): The number of nodes allocated to this cluster. More nodes enable higher - throughput and more consistent performance. state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the cluster. default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) @@ -983,26 +1036,41 @@ def update_cluster(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'update_cluster' not in self._inner_api_calls: + self._inner_api_calls[ + 'update_cluster'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_cluster, + default_retry=self._method_configs['UpdateCluster'].retry, + default_timeout=self._method_configs['UpdateCluster'] + .timeout, + client_info=self._client_info, + ) + request = instance_pb2.Cluster( name=name, - location=location, serve_nodes=serve_nodes, + location=location, state=state, default_storage_type=default_storage_type, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - operation = self._update_cluster( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + operation = self._inner_api_calls['update_cluster']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, - self.operations_client, + self.transport._operations_client, instance_pb2.Cluster, metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, ) @@ -1043,16 +1111,31 @@ def delete_cluster(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'delete_cluster' not in self._inner_api_calls: + self._inner_api_calls[ + 'delete_cluster'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_cluster, + default_retry=self._method_configs['DeleteCluster'].retry, + default_timeout=self._method_configs['DeleteCluster'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - self._delete_cluster( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + self._inner_api_calls['delete_cluster']( request, retry=retry, timeout=timeout, metadata=metadata) def create_app_profile(self, @@ -1064,11 +1147,6 @@ def create_app_profile(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Creates an app profile within an instance. Example: @@ -1117,21 +1195,37 @@ def create_app_profile(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'create_app_profile' not in self._inner_api_calls: + self._inner_api_calls[ + 'create_app_profile'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_app_profile, + default_retry=self._method_configs[ + 'CreateAppProfile'].retry, + default_timeout=self._method_configs['CreateAppProfile'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.CreateAppProfileRequest( parent=parent, app_profile_id=app_profile_id, app_profile=app_profile, ignore_warnings=ignore_warnings, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) - - return self._create_app_profile( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['create_app_profile']( request, retry=retry, timeout=timeout, metadata=metadata) def get_app_profile(self, @@ -1140,11 +1234,6 @@ def get_app_profile(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Gets information about an app profile. Example: @@ -1178,16 +1267,31 @@ def get_app_profile(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'get_app_profile' not in self._inner_api_calls: + self._inner_api_calls[ + 'get_app_profile'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_app_profile, + default_retry=self._method_configs['GetAppProfile'].retry, + default_timeout=self._method_configs['GetAppProfile'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._get_app_profile( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['get_app_profile']( request, retry=retry, timeout=timeout, metadata=metadata) def list_app_profiles(self, @@ -1196,11 +1300,6 @@ def list_app_profiles(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Lists information about app profiles in an instance. Example: @@ -1210,13 +1309,15 @@ def list_app_profiles(self, >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> >>> # Iterate over all results >>> for element in client.list_app_profiles(parent): ... # process element ... pass >>> - >>> # Or iterate over results one page at a time + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time >>> for page in client.list_app_profiles(parent, options=CallOptions(page_token=INITIAL_PAGE)): ... for element in page: ... # process element @@ -1248,20 +1349,36 @@ def list_app_profiles(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'list_app_profiles' not in self._inner_api_calls: + self._inner_api_calls[ + 'list_app_profiles'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_app_profiles, + default_retry=self._method_configs[ + 'ListAppProfiles'].retry, + default_timeout=self._method_configs['ListAppProfiles'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_instance_admin_pb2.ListAppProfilesRequest( + parent=parent, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._list_app_profiles, + self._inner_api_calls['list_app_profiles'], retry=retry, timeout=timeout, metadata=metadata), @@ -1280,11 +1397,6 @@ def update_app_profile(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Updates an app profile within an instance. Example: @@ -1337,24 +1449,40 @@ def update_app_profile(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'update_app_profile' not in self._inner_api_calls: + self._inner_api_calls[ + 'update_app_profile'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_app_profile, + default_retry=self._method_configs[ + 'UpdateAppProfile'].retry, + default_timeout=self._method_configs['UpdateAppProfile'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( app_profile=app_profile, update_mask=update_mask, ignore_warnings=ignore_warnings, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('app_profile.name', app_profile.name)], ) - metadata.append(routing_header) - - operation = self._update_app_profile( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('app_profile.name', app_profile.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + operation = self._inner_api_calls['update_app_profile']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, - self.operations_client, + self.transport._operations_client, instance_pb2.AppProfile, metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, ) @@ -1366,11 +1494,6 @@ def delete_app_profile(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Deletes an app profile from an instance. Example: @@ -1405,19 +1528,35 @@ def delete_app_profile(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'delete_app_profile' not in self._inner_api_calls: + self._inner_api_calls[ + 'delete_app_profile'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_app_profile, + default_retry=self._method_configs[ + 'DeleteAppProfile'].retry, + default_timeout=self._method_configs['DeleteAppProfile'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( name=name, ignore_warnings=ignore_warnings, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - self._delete_app_profile( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + self._inner_api_calls['delete_app_profile']( request, retry=retry, timeout=timeout, metadata=metadata) def get_iam_policy(self, @@ -1426,12 +1565,6 @@ def get_iam_policy(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable instance level - permissions. This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways and - is not recommended for production use. It is not subject to any SLA or - deprecation policy. - Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. @@ -1467,16 +1600,31 @@ def get_iam_policy(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'get_iam_policy' not in self._inner_api_calls: + self._inner_api_calls[ + 'get_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_iam_policy, + default_retry=self._method_configs['GetIamPolicy'].retry, + default_timeout=self._method_configs['GetIamPolicy'] + .timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) if metadata is None: metadata = [] metadata = list(metadata) - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('resource', resource)], ) - metadata.append(routing_header) - - return self._get_iam_policy( + try: + routing_header = [('resource', resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['get_iam_policy']( request, retry=retry, timeout=timeout, metadata=metadata) def set_iam_policy(self, @@ -1486,12 +1634,6 @@ def set_iam_policy(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable instance level - permissions. This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways and - is not recommended for production use. It is not subject to any SLA or - deprecation policy. - Sets the access control policy on an instance resource. Replaces any existing policy. @@ -1536,19 +1678,34 @@ def set_iam_policy(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'set_iam_policy' not in self._inner_api_calls: + self._inner_api_calls[ + 'set_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.set_iam_policy, + default_retry=self._method_configs['SetIamPolicy'].retry, + default_timeout=self._method_configs['SetIamPolicy'] + .timeout, + client_info=self._client_info, + ) + request = iam_policy_pb2.SetIamPolicyRequest( resource=resource, policy=policy, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('resource', resource)], ) - metadata.append(routing_header) - - return self._set_iam_policy( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('resource', resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['set_iam_policy']( request, retry=retry, timeout=timeout, metadata=metadata) def test_iam_permissions(self, @@ -1558,12 +1715,6 @@ def test_iam_permissions(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable instance level - permissions. This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways and - is not recommended for production use. It is not subject to any SLA or - deprecation policy. - Returns permissions that the caller has on the specified instance resource. Example: @@ -1605,17 +1756,33 @@ def test_iam_permissions(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'test_iam_permissions' not in self._inner_api_calls: + self._inner_api_calls[ + 'test_iam_permissions'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.test_iam_permissions, + default_retry=self._method_configs[ + 'TestIamPermissions'].retry, + default_timeout=self._method_configs['TestIamPermissions'] + .timeout, + client_info=self._client_info, + ) + request = iam_policy_pb2.TestIamPermissionsRequest( resource=resource, permissions=permissions, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('resource', resource)], ) - metadata.append(routing_header) - - return self._test_iam_permissions( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('resource', resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['test_iam_permissions']( request, retry=retry, timeout=timeout, metadata=metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 2dbac887a560..e821241a9808 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +17,9 @@ import functools import pkg_resources +import warnings +from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method @@ -26,9 +30,11 @@ import google.api_core.page_iterator import google.api_core.path_template import google.api_core.protobuf_helpers +import grpc from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config from google.cloud.bigtable_admin_v2.gapic import enums +from google.cloud.bigtable_admin_v2.gapic.transports import bigtable_table_admin_grpc_transport from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 @@ -58,24 +64,31 @@ class BigtableTableAdminClient(object): SERVICE_ADDRESS = 'bigtableadmin.googleapis.com:443' """The default address of the service.""" - # The scopes needed to make gRPC calls to all of the methods defined in - # this service - _DEFAULT_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', - ) - - # The name of the interface for this client. This is the key used to find - # method configuration in the client_config dictionary. + # The name of the interface for this client. This is the key used to + # find the method configuration in the client_config dictionary. _INTERFACE_NAME = 'google.bigtable.admin.v2.BigtableTableAdmin' + @classmethod + def from_service_account_file(cls, filename, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs['credentials'] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + @classmethod def instance_path(cls, project, instance): """Return a fully-qualified instance string.""" @@ -117,6 +130,7 @@ def table_path(cls, project, instance, table): ) def __init__(self, + transport=None, channel=None, credentials=None, client_config=bigtable_table_admin_client_config.config, @@ -124,140 +138,83 @@ def __init__(self, """Constructor. Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive + transport (Union[~.BigtableTableAdminGrpcTransport, + Callable[[~.Credentials, type], ~.BigtableTableAdminGrpcTransport]): A transport + instance, responsible for actually making the API calls. + The default transport uses the gRPC protocol. + This argument may also be a callable which returns a + transport instance. Callables will be sent the credentials + as the first argument and the default transport class as + the second argument. + channel (grpc.Channel): DEPRECATED. A ``Channel`` instance + through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - client_config (dict): A dictionary of call options for each - method. If not specified, the default configuration is used. + This argument is mutually exclusive with providing a + transport instance to ``transport``; doing so will raise + an exception. + client_config (dict): DEPRECATED. A dictionary of call options for + each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - 'The `channel` and `credentials` arguments to {} are mutually ' - 'exclusive.'.format(self.__class__.__name__), ) - - # Create the channel. - if channel is None: - channel = google.api_core.grpc_helpers.create_channel( - self.SERVICE_ADDRESS, + # Raise deprecation warnings for things we want to go away. + if client_config: + warnings.warn('The `client_config` argument is deprecated.', + PendingDeprecationWarning) + if channel: + warnings.warn( + 'The `channel` argument is deprecated; use ' + '`transport` instead.', PendingDeprecationWarning) + + # Instantiate the transport. + # The transport is responsible for handling serialization and + # deserialization and actually sending data to the service. + if transport: + if callable(transport): + self.transport = transport( + credentials=credentials, + default_class=bigtable_table_admin_grpc_transport. + BigtableTableAdminGrpcTransport, + ) + else: + if credentials: + raise ValueError( + 'Received both a transport instance and ' + 'credentials; these are mutually exclusive.') + self.transport = transport + else: + self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( + address=self.SERVICE_ADDRESS, + channel=channel, credentials=credentials, - scopes=self._DEFAULT_SCOPES, ) - # Create the gRPC stubs. - self.bigtable_table_admin_stub = ( - bigtable_table_admin_pb2_grpc.BigtableTableAdminStub(channel)) - - # Operations client for methods that return long-running operations - # futures. - self.operations_client = ( - google.api_core.operations_v1.OperationsClient(channel)) - if client_info is None: client_info = ( google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) client_info.gapic_version = _GAPIC_LIBRARY_VERSION + self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) - method_configs = google.api_core.gapic_v1.config.parse_method_configs( + self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config['interfaces'][self._INTERFACE_NAME], ) - # Write the "inner API call" methods to the class. - # These are wrapped versions of the gRPC stub methods, with retry and - # timeout configuration applied, called by the public methods on - # this class. - self._create_table = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.CreateTable, - default_retry=method_configs['CreateTable'].retry, - default_timeout=method_configs['CreateTable'].timeout, - client_info=client_info, - ) - self._create_table_from_snapshot = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.CreateTableFromSnapshot, - default_retry=method_configs['CreateTableFromSnapshot'].retry, - default_timeout=method_configs['CreateTableFromSnapshot'].timeout, - client_info=client_info, - ) - self._list_tables = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.ListTables, - default_retry=method_configs['ListTables'].retry, - default_timeout=method_configs['ListTables'].timeout, - client_info=client_info, - ) - self._get_table = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.GetTable, - default_retry=method_configs['GetTable'].retry, - default_timeout=method_configs['GetTable'].timeout, - client_info=client_info, - ) - self._delete_table = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.DeleteTable, - default_retry=method_configs['DeleteTable'].retry, - default_timeout=method_configs['DeleteTable'].timeout, - client_info=client_info, - ) - self._modify_column_families = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.ModifyColumnFamilies, - default_retry=method_configs['ModifyColumnFamilies'].retry, - default_timeout=method_configs['ModifyColumnFamilies'].timeout, - client_info=client_info, - ) - self._drop_row_range = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.DropRowRange, - default_retry=method_configs['DropRowRange'].retry, - default_timeout=method_configs['DropRowRange'].timeout, - client_info=client_info, - ) - self._generate_consistency_token = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.GenerateConsistencyToken, - default_retry=method_configs['GenerateConsistencyToken'].retry, - default_timeout=method_configs['GenerateConsistencyToken'].timeout, - client_info=client_info, - ) - self._check_consistency = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.CheckConsistency, - default_retry=method_configs['CheckConsistency'].retry, - default_timeout=method_configs['CheckConsistency'].timeout, - client_info=client_info, - ) - self._snapshot_table = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.SnapshotTable, - default_retry=method_configs['SnapshotTable'].retry, - default_timeout=method_configs['SnapshotTable'].timeout, - client_info=client_info, - ) - self._get_snapshot = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.GetSnapshot, - default_retry=method_configs['GetSnapshot'].retry, - default_timeout=method_configs['GetSnapshot'].timeout, - client_info=client_info, - ) - self._list_snapshots = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.ListSnapshots, - default_retry=method_configs['ListSnapshots'].retry, - default_timeout=method_configs['ListSnapshots'].timeout, - client_info=client_info, - ) - self._delete_snapshot = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_table_admin_stub.DeleteSnapshot, - default_retry=method_configs['DeleteSnapshot'].retry, - default_timeout=method_configs['DeleteSnapshot'].timeout, - client_info=client_info, - ) + # Save a dictionary of cached API call functions. + # These are the actual callables which invoke the proper + # transport methods, wrapped with `wrap_method` to add retry, + # timeout, and the like. + self._inner_api_calls = {} # Service calls def create_table(self, @@ -337,21 +294,36 @@ def create_table(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'create_table' not in self._inner_api_calls: + self._inner_api_calls[ + 'create_table'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_table, + default_retry=self._method_configs['CreateTable'].retry, + default_timeout=self._method_configs['CreateTable'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.CreateTableRequest( parent=parent, table_id=table_id, table=table, initial_splits=initial_splits, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) - - return self._create_table( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['create_table']( request, retry=retry, timeout=timeout, metadata=metadata) def create_table_from_snapshot( @@ -363,14 +335,15 @@ def create_table_from_snapshot( timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + Example: >>> from google.cloud import bigtable_admin_v2 >>> @@ -423,24 +396,40 @@ def create_table_from_snapshot( to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'create_table_from_snapshot' not in self._inner_api_calls: + self._inner_api_calls[ + 'create_table_from_snapshot'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_table_from_snapshot, + default_retry=self._method_configs[ + 'CreateTableFromSnapshot'].retry, + default_timeout=self._method_configs[ + 'CreateTableFromSnapshot'].timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( parent=parent, table_id=table_id, source_snapshot=source_snapshot, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) - - operation = self._create_table_from_snapshot( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + operation = self._inner_api_calls['create_table_from_snapshot']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, - self.operations_client, + self.transport._operations_client, table_pb2.Table, metadata_type=bigtable_table_admin_pb2. CreateTableFromSnapshotMetadata, @@ -462,13 +451,15 @@ def list_tables(self, >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> >>> # Iterate over all results >>> for element in client.list_tables(parent): ... # process element ... pass >>> - >>> # Or iterate over results one page at a time + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time >>> for page in client.list_tables(parent, options=CallOptions(page_token=INITIAL_PAGE)): ... for element in page: ... # process element @@ -501,22 +492,36 @@ def list_tables(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'list_tables' not in self._inner_api_calls: + self._inner_api_calls[ + 'list_tables'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_tables, + default_retry=self._method_configs['ListTables'].retry, + default_timeout=self._method_configs['ListTables'].timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.ListTablesRequest( parent=parent, view=view, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._list_tables, + self._inner_api_calls['list_tables'], retry=retry, timeout=timeout, metadata=metadata), @@ -570,19 +575,33 @@ def get_table(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'get_table' not in self._inner_api_calls: + self._inner_api_calls[ + 'get_table'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_table, + default_retry=self._method_configs['GetTable'].retry, + default_timeout=self._method_configs['GetTable'].timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.GetTableRequest( name=name, view=view, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._get_table( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['get_table']( request, retry=retry, timeout=timeout, metadata=metadata) def delete_table(self, @@ -622,16 +641,31 @@ def delete_table(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'delete_table' not in self._inner_api_calls: + self._inner_api_calls[ + 'delete_table'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_table, + default_retry=self._method_configs['DeleteTable'].retry, + default_timeout=self._method_configs['DeleteTable'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.DeleteTableRequest(name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_table_admin_pb2.DeleteTableRequest(name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - self._delete_table( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + self._inner_api_calls['delete_table']( request, retry=retry, timeout=timeout, metadata=metadata) def modify_column_families(self, @@ -687,19 +721,35 @@ def modify_column_families(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'modify_column_families' not in self._inner_api_calls: + self._inner_api_calls[ + 'modify_column_families'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.modify_column_families, + default_retry=self._method_configs['ModifyColumnFamilies'] + .retry, + default_timeout=self._method_configs[ + 'ModifyColumnFamilies'].timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( name=name, modifications=modifications, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._modify_column_families( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['modify_column_families']( request, retry=retry, timeout=timeout, metadata=metadata) def drop_row_range(self, @@ -746,9 +796,17 @@ def drop_row_range(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'drop_row_range' not in self._inner_api_calls: + self._inner_api_calls[ + 'drop_row_range'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.drop_row_range, + default_retry=self._method_configs['DropRowRange'].retry, + default_timeout=self._method_configs['DropRowRange'] + .timeout, + client_info=self._client_info, + ) + # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. google.api_core.protobuf_helpers.check_oneof( @@ -761,12 +819,19 @@ def drop_row_range(self, row_key_prefix=row_key_prefix, delete_all_data_from_table=delete_all_data_from_table, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - self._drop_row_range( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + self._inner_api_calls['drop_row_range']( request, retry=retry, timeout=timeout, metadata=metadata) def generate_consistency_token( @@ -776,11 +841,6 @@ def generate_consistency_token( timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations to the table that finished before this call started have been replicated. The tokens will be available @@ -818,17 +878,33 @@ def generate_consistency_token( to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'generate_consistency_token' not in self._inner_api_calls: + self._inner_api_calls[ + 'generate_consistency_token'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.generate_consistency_token, + default_retry=self._method_configs[ + 'GenerateConsistencyToken'].retry, + default_timeout=self._method_configs[ + 'GenerateConsistencyToken'].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( + name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._generate_consistency_token( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['generate_consistency_token']( request, retry=retry, timeout=timeout, metadata=metadata) def check_consistency(self, @@ -838,11 +914,6 @@ def check_consistency(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Checks replication consistency based on a consistency token, that is, if replication has caught up based on the conditions specified in the token and the check request. @@ -883,19 +954,35 @@ def check_consistency(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'check_consistency' not in self._inner_api_calls: + self._inner_api_calls[ + 'check_consistency'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.check_consistency, + default_retry=self._method_configs[ + 'CheckConsistency'].retry, + default_timeout=self._method_configs['CheckConsistency'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.CheckConsistencyRequest( name=name, consistency_token=consistency_token, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._check_consistency( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['check_consistency']( request, retry=retry, timeout=timeout, metadata=metadata) def snapshot_table(self, @@ -908,14 +995,15 @@ def snapshot_table(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + Example: >>> from google.cloud import bigtable_admin_v2 >>> @@ -980,9 +1068,17 @@ def snapshot_table(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'snapshot_table' not in self._inner_api_calls: + self._inner_api_calls[ + 'snapshot_table'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.snapshot_table, + default_retry=self._method_configs['SnapshotTable'].retry, + default_timeout=self._method_configs['SnapshotTable'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.SnapshotTableRequest( name=name, cluster=cluster, @@ -990,16 +1086,23 @@ def snapshot_table(self, description=description, ttl=ttl, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - operation = self._snapshot_table( + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + operation = self._inner_api_calls['snapshot_table']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, - self.operations_client, + self.transport._operations_client, table_pb2.Snapshot, metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, ) @@ -1010,13 +1113,14 @@ def get_snapshot(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Gets metadata information about the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + Example: >>> from google.cloud import bigtable_admin_v2 >>> @@ -1049,16 +1153,31 @@ def get_snapshot(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'get_snapshot' not in self._inner_api_calls: + self._inner_api_calls[ + 'get_snapshot'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_snapshot, + default_retry=self._method_configs['GetSnapshot'].retry, + default_timeout=self._method_configs['GetSnapshot'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - return self._get_snapshot( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['get_snapshot']( request, retry=retry, timeout=timeout, metadata=metadata) def list_snapshots(self, @@ -1068,13 +1187,14 @@ def list_snapshots(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Lists all snapshots associated with the specified cluster. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + Example: >>> from google.cloud import bigtable_admin_v2 >>> @@ -1082,13 +1202,15 @@ def list_snapshots(self, >>> >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') >>> - >>> >>> # Iterate over all results >>> for element in client.list_snapshots(parent): ... # process element ... pass >>> - >>> # Or iterate over results one page at a time + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time >>> for page in client.list_snapshots(parent, options=CallOptions(page_token=INITIAL_PAGE)): ... for element in page: ... # process element @@ -1127,22 +1249,37 @@ def list_snapshots(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - if metadata is None: - metadata = [] - metadata = list(metadata) + # Wrap the transport method to add retry and timeout logic. + if 'list_snapshots' not in self._inner_api_calls: + self._inner_api_calls[ + 'list_snapshots'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_snapshots, + default_retry=self._method_configs['ListSnapshots'].retry, + default_timeout=self._method_configs['ListSnapshots'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_table_admin_pb2.ListSnapshotsRequest( parent=parent, page_size=page_size, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('parent', parent)], ) - metadata.append(routing_header) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('parent', parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._list_snapshots, + self._inner_api_calls['list_snapshots'], retry=retry, timeout=timeout, metadata=metadata), @@ -1159,13 +1296,14 @@ def delete_snapshot(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + Example: >>> from google.cloud import bigtable_admin_v2 >>> @@ -1195,14 +1333,29 @@ def delete_snapshot(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'delete_snapshot' not in self._inner_api_calls: + self._inner_api_calls[ + 'delete_snapshot'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_snapshot, + default_retry=self._method_configs['DeleteSnapshot'].retry, + default_timeout=self._method_configs['DeleteSnapshot'] + .timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name, ) if metadata is None: metadata = [] metadata = list(metadata) - request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name, ) - - routing_header = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - [('name', name)], ) - metadata.append(routing_header) - - self._delete_snapshot( + try: + routing_header = [('name', name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + self._inner_api_calls['delete_snapshot']( request, retry=retry, timeout=timeout, metadata=metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index e5e0161aeb4b..61d5fcc95194 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -14,13 +14,31 @@ "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000 + }, + "create_table": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 130000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 130000, + "total_timeout_millis": 3600000 + }, + "drop_row_range": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 900000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 900000, + "total_timeout_millis": 3600000 } }, "methods": { "CreateTable": { "timeout_millis": 130000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "create_table" }, "CreateTableFromSnapshot": { "timeout_millis": 60000, @@ -48,9 +66,9 @@ "retry_params_name": "default" }, "DropRowRange": { - "timeout_millis": 60000, + "timeout_millis": 900000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "drop_row_range" }, "GenerateConsistencyToken": { "timeout_millis": 60000, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index db0317891abf..ac1e2d44e8bf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,8 +15,10 @@ # limitations under the License. """Wrappers for protocol buffer enum types.""" +import enum + -class StorageType(object): +class StorageType(enum.IntEnum): """ Storage media types for persisting Bigtable data. @@ -29,7 +33,7 @@ class StorageType(object): class Instance(object): - class State(object): + class State(enum.IntEnum): """ Possible states of an instance. @@ -44,7 +48,7 @@ class State(object): READY = 1 CREATING = 2 - class Type(object): + class Type(enum.IntEnum): """ The type of the instance. @@ -68,7 +72,7 @@ class Type(object): class Cluster(object): - class State(object): + class State(enum.IntEnum): """ Possible states of a cluster. @@ -94,7 +98,7 @@ class State(object): class Table(object): - class TimestampGranularity(object): + class TimestampGranularity(enum.IntEnum): """ Possible timestamp granularities to use when keeping multiple versions of data in a table. @@ -107,7 +111,7 @@ class TimestampGranularity(object): TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 MILLIS = 1 - class View(object): + class View(enum.IntEnum): """ Defines a view over a table's fields. @@ -115,13 +119,7 @@ class View(object): VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. NAME_ONLY (int): Only populates ``name``. SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. - REPLICATION_VIEW (int): This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Only populates ``name`` and fields related to the table's + REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's replication state. FULL (int): Populates all fields. """ @@ -132,7 +130,7 @@ class View(object): FULL = 4 class ClusterState(object): - class ReplicationState(object): + class ReplicationState(enum.IntEnum): """ Table replication states. @@ -140,18 +138,14 @@ class ReplicationState(object): STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. INITIALIZING (int): The cluster was recently created, and the table must finish copying over pre-existing data from other clusters before it can begin - receiving live replication updates and serving - ``Data API`` requests. - PLANNED_MAINTENANCE (int): The table is temporarily unable to serve - ``Data API`` requests from this + receiving live replication updates and serving Data API requests. + PLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this cluster due to planned internal maintenance. - UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve - ``Data API`` requests from this + UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this cluster due to unplanned or emergency maintenance. - READY (int): The table can serve - ``Data API`` requests from this - cluster. Depending on replication delay, reads may not immediately - reflect the state of the table in other clusters. + READY (int): The table can serve Data API requests from this cluster. Depending on + replication delay, reads may not immediately reflect the state of the + table in other clusters. """ STATE_NOT_KNOWN = 0 INITIALIZING = 1 @@ -161,7 +155,7 @@ class ReplicationState(object): class Snapshot(object): - class State(object): + class State(enum.IntEnum): """ Possible states of a snapshot. @@ -175,27 +169,3 @@ class State(object): STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 - - -class RoutingPolicyType(object): - """ - The type of the routing policy for app_profile. - - Attributes: - ANY (int): Read/write requests may be routed to any cluster in the - instance, and will fail over to another cluster in the event of - transient errors or delays. - Choosing this option sacrifices read-your-writes consistency to - improve availability. - See - https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny - - SINGLE (int): Unconditionally routes all read/write requests to a - specific cluster. - This option preserves read-your-writes consistency, but does not improve - availability. - See - https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.SingleClusterRouting - """ - ANY = 1 - SINGLE = 2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py new file mode 100644 index 000000000000..bcb20263aae9 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -0,0 +1,361 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import google.api_core.grpc_helpers +import google.api_core.operations_v1 + +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc + + +class BigtableInstanceAdminGrpcTransport(object): + """gRPC transport class providing stubs for + google.bigtable.admin.v2 BigtableInstanceAdmin API. + + The transport provides access to the raw gRPC stubs, + which can be used to take advantage of advanced + features of gRPC. + """ + # The scopes needed to make gRPC calls to all of the methods defined + # in this service. + _OAUTH_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + def __init__(self, + channel=None, + credentials=None, + address='bigtableadmin.googleapis.com:443'): + """Instantiate the transport class. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + address (str): The address where the service is hosted. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + 'The `channel` and `credentials` arguments are mutually ' + 'exclusive.', ) + + # Create the channel. + if channel is None: + channel = self.create_channel( + address=address, + credentials=credentials, + ) + + # gRPC uses objects called "stubs" that are bound to the + # channel and provide a basic method for each RPC. + self._stubs = { + 'bigtable_instance_admin_stub': + bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( + channel), + } + + # Because this API includes a method that returns a + # long-running operation (proto: google.longrunning.Operation), + # instantiate an LRO client. + self._operations_client = google.api_core.operations_v1.OperationsClient( + channel) + + @classmethod + def create_channel(cls, + address='bigtableadmin.googleapis.com:443', + credentials=None): + """Create and return a gRPC channel object. + + Args: + address (str): The host for the channel to use. + credentials (~.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return google.api_core.grpc_helpers.create_channel( + address, + credentials=credentials, + scopes=cls._OAUTH_SCOPES, + ) + + @property + def create_instance(self): + """Return the gRPC stub for {$apiMethod.name}. + + Create an instance within a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].CreateInstance + + @property + def get_instance(self): + """Return the gRPC stub for {$apiMethod.name}. + + Gets information about an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].GetInstance + + @property + def list_instances(self): + """Return the gRPC stub for {$apiMethod.name}. + + Lists information about instances in a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].ListInstances + + @property + def update_instance(self): + """Return the gRPC stub for {$apiMethod.name}. + + Updates an instance within a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].UpdateInstance + + @property + def partial_update_instance(self): + """Return the gRPC stub for {$apiMethod.name}. + + Partially updates an instance within a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs[ + 'bigtable_instance_admin_stub'].PartialUpdateInstance + + @property + def delete_instance(self): + """Return the gRPC stub for {$apiMethod.name}. + + Delete an instance from a project. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].DeleteInstance + + @property + def create_cluster(self): + """Return the gRPC stub for {$apiMethod.name}. + + Creates a cluster within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].CreateCluster + + @property + def get_cluster(self): + """Return the gRPC stub for {$apiMethod.name}. + + Gets information about a cluster. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].GetCluster + + @property + def list_clusters(self): + """Return the gRPC stub for {$apiMethod.name}. + + Lists information about clusters in an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].ListClusters + + @property + def update_cluster(self): + """Return the gRPC stub for {$apiMethod.name}. + + Updates a cluster within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].UpdateCluster + + @property + def delete_cluster(self): + """Return the gRPC stub for {$apiMethod.name}. + + Deletes a cluster from an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].DeleteCluster + + @property + def create_app_profile(self): + """Return the gRPC stub for {$apiMethod.name}. + + Creates an app profile within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].CreateAppProfile + + @property + def get_app_profile(self): + """Return the gRPC stub for {$apiMethod.name}. + + Gets information about an app profile. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].GetAppProfile + + @property + def list_app_profiles(self): + """Return the gRPC stub for {$apiMethod.name}. + + Lists information about app profiles in an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].ListAppProfiles + + @property + def update_app_profile(self): + """Return the gRPC stub for {$apiMethod.name}. + + Updates an app profile within an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].UpdateAppProfile + + @property + def delete_app_profile(self): + """Return the gRPC stub for {$apiMethod.name}. + + Deletes an app profile from an instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].DeleteAppProfile + + @property + def get_iam_policy(self): + """Return the gRPC stub for {$apiMethod.name}. + + Gets the access control policy for an instance resource. Returns an empty + policy if an instance exists but does not have a policy set. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].GetIamPolicy + + @property + def set_iam_policy(self): + """Return the gRPC stub for {$apiMethod.name}. + + Sets the access control policy on an instance resource. Replaces any + existing policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].SetIamPolicy + + @property + def test_iam_permissions(self): + """Return the gRPC stub for {$apiMethod.name}. + + Returns permissions that the caller has on the specified instance resource. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_instance_admin_stub'].TestIamPermissions diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py new file mode 100644 index 000000000000..e655db6f03d0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -0,0 +1,328 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import google.api_core.grpc_helpers +import google.api_core.operations_v1 + +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc + + +class BigtableTableAdminGrpcTransport(object): + """gRPC transport class providing stubs for + google.bigtable.admin.v2 BigtableTableAdmin API. + + The transport provides access to the raw gRPC stubs, + which can be used to take advantage of advanced + features of gRPC. + """ + # The scopes needed to make gRPC calls to all of the methods defined + # in this service. + _OAUTH_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + def __init__(self, + channel=None, + credentials=None, + address='bigtableadmin.googleapis.com:443'): + """Instantiate the transport class. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + address (str): The address where the service is hosted. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + 'The `channel` and `credentials` arguments are mutually ' + 'exclusive.', ) + + # Create the channel. + if channel is None: + channel = self.create_channel( + address=address, + credentials=credentials, + ) + + # gRPC uses objects called "stubs" that are bound to the + # channel and provide a basic method for each RPC. + self._stubs = { + 'bigtable_table_admin_stub': + bigtable_table_admin_pb2_grpc.BigtableTableAdminStub(channel), + } + + # Because this API includes a method that returns a + # long-running operation (proto: google.longrunning.Operation), + # instantiate an LRO client. + self._operations_client = google.api_core.operations_v1.OperationsClient( + channel) + + @classmethod + def create_channel(cls, + address='bigtableadmin.googleapis.com:443', + credentials=None): + """Create and return a gRPC channel object. + + Args: + address (str): The host for the channel to use. + credentials (~.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return google.api_core.grpc_helpers.create_channel( + address, + credentials=credentials, + scopes=cls._OAUTH_SCOPES, + options={ + 'grpc.max_send_message_length': -1, + 'grpc.max_receive_message_length': -1, + }.items(), + ) + + @property + def create_table(self): + """Return the gRPC stub for {$apiMethod.name}. + + Creates a new table in the specified instance. + The table can be created with a full set of initial column families, + specified in the request. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].CreateTable + + @property + def create_table_from_snapshot(self): + """Return the gRPC stub for {$apiMethod.name}. + + Creates a new table from the specified snapshot. The target table must + not exist. The snapshot and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].CreateTableFromSnapshot + + @property + def list_tables(self): + """Return the gRPC stub for {$apiMethod.name}. + + Lists all tables served from a specified instance. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].ListTables + + @property + def get_table(self): + """Return the gRPC stub for {$apiMethod.name}. + + Gets metadata information about the specified table. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].GetTable + + @property + def delete_table(self): + """Return the gRPC stub for {$apiMethod.name}. + + Permanently deletes a specified table and all of its data. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].DeleteTable + + @property + def modify_column_families(self): + """Return the gRPC stub for {$apiMethod.name}. + + Performs a series of column family modifications on the specified table. + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].ModifyColumnFamilies + + @property + def drop_row_range(self): + """Return the gRPC stub for {$apiMethod.name}. + + Permanently drop/delete a row range from a specified table. The request can + specify whether to delete all rows in a table, or only those that match a + particular prefix. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].DropRowRange + + @property + def generate_consistency_token(self): + """Return the gRPC stub for {$apiMethod.name}. + + Generates a consistency token for a Table, which can be used in + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs[ + 'bigtable_table_admin_stub'].GenerateConsistencyToken + + @property + def check_consistency(self): + """Return the gRPC stub for {$apiMethod.name}. + + Checks replication consistency based on a consistency token, that is, if + replication has caught up based on the conditions specified in the token + and the check request. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].CheckConsistency + + @property + def snapshot_table(self): + """Return the gRPC stub for {$apiMethod.name}. + + Creates a new snapshot in the specified cluster from the specified + source table. The cluster and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].SnapshotTable + + @property + def get_snapshot(self): + """Return the gRPC stub for {$apiMethod.name}. + + Gets metadata information about the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].GetSnapshot + + @property + def list_snapshots(self): + """Return the gRPC stub for {$apiMethod.name}. + + Lists all snapshots associated with the specified cluster. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].ListSnapshots + + @property + def delete_snapshot(self): + """Return the gRPC stub for {$apiMethod.name}. + + Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_table_admin_stub'].DeleteSnapshot diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index eb795e269af4..3142362adef0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -1312,13 +1312,7 @@ DESCRIPTOR = _CREATEAPPPROFILEREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Request message for BigtableInstanceAdmin.CreateAppProfile. + __doc__ = """Request message for BigtableInstanceAdmin.CreateAppProfile. Attributes: @@ -1344,13 +1338,7 @@ DESCRIPTOR = _GETAPPPROFILEREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Request message for BigtableInstanceAdmin.GetAppProfile. + __doc__ = """Request message for BigtableInstanceAdmin.GetAppProfile. Attributes: @@ -1367,13 +1355,7 @@ DESCRIPTOR = _LISTAPPPROFILESREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Request message for BigtableInstanceAdmin.ListAppProfiles. + __doc__ = """Request message for BigtableInstanceAdmin.ListAppProfiles. Attributes: @@ -1392,13 +1374,7 @@ DESCRIPTOR = _LISTAPPPROFILESRESPONSE, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Response message for BigtableInstanceAdmin.ListAppProfiles. + __doc__ = """Response message for BigtableInstanceAdmin.ListAppProfiles. Attributes: @@ -1417,13 +1393,7 @@ DESCRIPTOR = _UPDATEAPPPROFILEREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Request message for BigtableInstanceAdmin.UpdateAppProfile. + __doc__ = """Request message for BigtableInstanceAdmin.UpdateAppProfile. Attributes: @@ -1444,13 +1414,7 @@ DESCRIPTOR = _DELETEAPPPROFILEREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Request message for BigtableInstanceAdmin.DeleteAppProfile. + __doc__ = """Request message for BigtableInstanceAdmin.DeleteAppProfile. Attributes: @@ -1469,13 +1433,7 @@ DESCRIPTOR = _UPDATEAPPPROFILEMETADATA, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - The metadata for the Operation returned by UpdateAppProfile. + __doc__ = """The metadata for the Operation returned by UpdateAppProfile. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) )) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py index f18f82cc1363..f1ea31abdbba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -202,73 +202,42 @@ def DeleteCluster(self, request, context): raise NotImplementedError('Method not implemented!') def CreateAppProfile(self, request, context): - """This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Creates an app profile within an instance. + """Creates an app profile within an instance. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetAppProfile(self, request, context): - """This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Gets information about an app profile. + """Gets information about an app profile. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListAppProfiles(self, request, context): - """This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Lists information about app profiles in an instance. + """Lists information about app profiles in an instance. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateAppProfile(self, request, context): - """This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Updates an app profile within an instance. + """Updates an app profile within an instance. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteAppProfile(self, request, context): - """This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Deletes an app profile from an instance. + """Deletes an app profile from an instance. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetIamPolicy(self, request, context): - """This is a private alpha release of Cloud Bigtable instance level - permissions. This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways and - is not recommended for production use. It is not subject to any SLA or - deprecation policy. - - Gets the access control policy for an instance resource. Returns an empty + """Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -276,13 +245,7 @@ def GetIamPolicy(self, request, context): raise NotImplementedError('Method not implemented!') def SetIamPolicy(self, request, context): - """This is a private alpha release of Cloud Bigtable instance level - permissions. This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways and - is not recommended for production use. It is not subject to any SLA or - deprecation policy. - - Sets the access control policy on an instance resource. Replaces any + """Sets the access control policy on an instance resource. Replaces any existing policy. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -290,13 +253,7 @@ def SetIamPolicy(self, request, context): raise NotImplementedError('Method not implemented!') def TestIamPermissions(self, request, context): - """This is a private alpha release of Cloud Bigtable instance level - permissions. This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways and - is not recommended for production use. It is not subject to any SLA or - deprecation policy. - - Returns permissions that the caller has on the specified instance resource. + """Returns permissions that the caller has on the specified instance resource. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index fbb2ebb46390..9d650846856c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -987,15 +987,15 @@ DESCRIPTOR = _CREATETABLEFROMSNAPSHOTREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - Attributes: parent: @@ -1178,13 +1178,7 @@ DESCRIPTOR = _GENERATECONSISTENCYTOKENREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Request message for + __doc__ = """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] @@ -1202,13 +1196,7 @@ DESCRIPTOR = _GENERATECONSISTENCYTOKENRESPONSE, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Response message for + __doc__ = """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] @@ -1224,13 +1212,7 @@ DESCRIPTOR = _CHECKCONSISTENCYREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Request message for + __doc__ = """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] @@ -1251,13 +1233,7 @@ DESCRIPTOR = _CHECKCONSISTENCYRESPONSE, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Response message for + __doc__ = """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] @@ -1275,15 +1251,15 @@ DESCRIPTOR = _SNAPSHOTTABLEREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - Attributes: name: @@ -1316,15 +1292,15 @@ DESCRIPTOR = _GETSNAPSHOTREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - Attributes: name: @@ -1340,15 +1316,15 @@ DESCRIPTOR = _LISTSNAPSHOTSREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Attributes: parent: @@ -1370,15 +1346,15 @@ DESCRIPTOR = _LISTSNAPSHOTSRESPONSE, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Attributes: snapshots: @@ -1396,15 +1372,15 @@ DESCRIPTOR = _DELETESNAPSHOTREQUEST, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - Attributes: name: @@ -1420,14 +1396,14 @@ DESCRIPTOR = _SNAPSHOTTABLEMETADATA, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """The metadata for the Operation returned by SnapshotTable. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - The metadata for the Operation returned by SnapshotTable. - Attributes: original_request: @@ -1447,14 +1423,14 @@ DESCRIPTOR = _CREATETABLEFROMSNAPSHOTMETADATA, __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """The metadata for the Operation returned by CreateTableFromSnapshot. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - The metadata for the Operation returned by CreateTableFromSnapshot. - Attributes: original_request: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 4a5adbb6bc30..278c914f023b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -106,13 +106,14 @@ def CreateTable(self, request, context): raise NotImplementedError('Method not implemented!') def CreateTableFromSnapshot(self, request, context): - """This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Creates a new table from the specified snapshot. The target table must + """Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') @@ -159,12 +160,7 @@ def DropRowRange(self, request, context): raise NotImplementedError('Method not implemented!') def GenerateConsistencyToken(self, request, context): - """This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Generates a consistency token for a Table, which can be used in + """Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations to the table that finished before this call started have been replicated. The tokens will be available for 90 days. @@ -174,12 +170,7 @@ def GenerateConsistencyToken(self, request, context): raise NotImplementedError('Method not implemented!') def CheckConsistency(self, request, context): - """This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Checks replication consistency based on a consistency token, that is, if + """Checks replication consistency based on a consistency token, that is, if replication has caught up based on the conditions specified in the token and the check request. """ @@ -188,49 +179,53 @@ def CheckConsistency(self, request, context): raise NotImplementedError('Method not implemented!') def SnapshotTable(self, request, context): - """This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - Creates a new snapshot in the specified cluster from the specified + """Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetSnapshot(self, request, context): - """This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. + """Gets metadata information about the specified snapshot. - Gets metadata information about the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListSnapshots(self, request, context): - """This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. + """Lists all snapshots associated with the specified cluster. - Lists all snapshots associated with the specified cluster. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteSnapshot(self, request, context): - """This is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. + """Permanently deletes the specified snapshot. - Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index eebcdc895abe..3e44d81aa2e4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -537,13 +537,7 @@ DESCRIPTOR = _APPPROFILE, __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - A configuration object describing how Cloud Bigtable should treat + __doc__ = """A configuration object describing how Cloud Bigtable should treat traffic from a particular end user application. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index 07b1fa0e9e8d..4d7625703694 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -572,13 +572,7 @@ DESCRIPTOR = _TABLE_CLUSTERSTATE, __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable replication. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - The state of a table's data in a particular cluster. + __doc__ = """The state of a table's data in a particular cluster. Attributes: @@ -615,28 +609,24 @@ (``OutputOnly``) The unique name of the table. Values are of the form ``projects//instances//tables/[_a- zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, - ``SCHEMA_VIEW``, ``FULL`` + ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` cluster_states: - This is a private alpha release of Cloud Bigtable replication. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. (``OutputOnly``) Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with - UNKNOWN ``replication_status``. Views: ``FULL`` + UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, + ``FULL`` column_families: (``CreationOnly``) The column families configured for this table, mapped by column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` granularity: - (``CreationOnly``) The granularity (e.g. ``MILLIS``, - ``MICROS``) at which timestamps are stored in this table. - Timestamps not matching the granularity will be rejected. If - unspecified at creation time, the value will be set to - ``MILLIS``. Views: ``SCHEMA_VIEW``, ``FULL`` + (``CreationOnly``) The granularity (i.e. ``MILLIS``) at which + timestamps are stored in this table. Timestamps not matching + the granularity will be rejected. If unspecified at creation + time, the value will be set to ``MILLIS``. Views: + ``SCHEMA_VIEW``, ``FULL`` """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) )) @@ -727,15 +717,15 @@ DESCRIPTOR = _SNAPSHOT, __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' , - __doc__ = """This is a private alpha release of Cloud Bigtable snapshots. This + __doc__ = """A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for a new table. + + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for a new table. - Attributes: name: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index 6f91fac2db0c..e67c6f585fbc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +17,13 @@ from __future__ import absolute_import import sys +from google.api_core.protobuf_helpers import get_messages + from google.api import http_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import instance_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 from google.iam.v1.logging import audit_data_pb2 @@ -28,13 +36,6 @@ from google.protobuf import timestamp_pb2 from google.rpc import status_pb2 -from google.api_core.protobuf_helpers import get_messages -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 - - _shared_modules = [ http_pb2, iam_policy_pb2, @@ -63,7 +64,6 @@ for name, message in get_messages(module).items(): setattr(sys.modules[__name__], name, message) names.append(name) - for module in _local_modules: for name, message in get_messages(module).items(): message.__module__ = 'google.cloud.bigtable_admin_v2.types' diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 751a63d9d40e..bedc50962146 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2017 Google LLC +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 5172fb50ee79..b7a3476e0a94 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -1,4 +1,6 @@ -# Copyright 2017 Google LLC +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,15 +16,21 @@ """Accesses the google.bigtable.v2 Bigtable API.""" import pkg_resources +import warnings +from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method +import google.api_core.gapic_v1.routing_header import google.api_core.grpc_helpers import google.api_core.path_template +import grpc from google.cloud.bigtable_v2.gapic import bigtable_client_config +from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport from google.cloud.bigtable_v2.proto import bigtable_pb2 +from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc from google.cloud.bigtable_v2.proto import data_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( @@ -35,21 +43,31 @@ class BigtableClient(object): SERVICE_ADDRESS = 'bigtable.googleapis.com:443' """The default address of the service.""" - # The scopes needed to make gRPC calls to all of the methods defined in - # this service - _DEFAULT_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.data', - 'https://www.googleapis.com/auth/bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-bigtable.data', - 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', - ) - - # The name of the interface for this client. This is the key used to find - # method configuration in the client_config dictionary. + # The name of the interface for this client. This is the key used to + # find the method configuration in the client_config dictionary. _INTERFACE_NAME = 'google.bigtable.v2.Bigtable' + @classmethod + def from_service_account_file(cls, filename, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs['credentials'] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + @classmethod def table_path(cls, project, instance, table): """Return a fully-qualified table string.""" @@ -61,6 +79,7 @@ def table_path(cls, project, instance, table): ) def __init__(self, + transport=None, channel=None, credentials=None, client_config=bigtable_client_config.config, @@ -68,96 +87,83 @@ def __init__(self, """Constructor. Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive + transport (Union[~.BigtableGrpcTransport, + Callable[[~.Credentials, type], ~.BigtableGrpcTransport]): A transport + instance, responsible for actually making the API calls. + The default transport uses the gRPC protocol. + This argument may also be a callable which returns a + transport instance. Callables will be sent the credentials + as the first argument and the default transport class as + the second argument. + channel (grpc.Channel): DEPRECATED. A ``Channel`` instance + through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - client_config (dict): A dictionary of call options for each - method. If not specified, the default configuration is used. + This argument is mutually exclusive with providing a + transport instance to ``transport``; doing so will raise + an exception. + client_config (dict): DEPRECATED. A dictionary of call options for + each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - 'The `channel` and `credentials` arguments to {} are mutually ' - 'exclusive.'.format(self.__class__.__name__), ) - - # Create the channel. - if channel is None: - channel = google.api_core.grpc_helpers.create_channel( - self.SERVICE_ADDRESS, + # Raise deprecation warnings for things we want to go away. + if client_config: + warnings.warn('The `client_config` argument is deprecated.', + PendingDeprecationWarning) + if channel: + warnings.warn( + 'The `channel` argument is deprecated; use ' + '`transport` instead.', PendingDeprecationWarning) + + # Instantiate the transport. + # The transport is responsible for handling serialization and + # deserialization and actually sending data to the service. + if transport: + if callable(transport): + self.transport = transport( + credentials=credentials, + default_class=bigtable_grpc_transport. + BigtableGrpcTransport, + ) + else: + if credentials: + raise ValueError( + 'Received both a transport instance and ' + 'credentials; these are mutually exclusive.') + self.transport = transport + else: + self.transport = bigtable_grpc_transport.BigtableGrpcTransport( + address=self.SERVICE_ADDRESS, + channel=channel, credentials=credentials, - scopes=self._DEFAULT_SCOPES, - options={ - 'grpc.max_send_message_length': -1, - 'grpc.max_receive_message_length': -1, - }.items(), ) - # Create the gRPC stubs. - self.bigtable_stub = (bigtable_pb2.BigtableStub(channel)) - if client_info is None: client_info = ( google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) client_info.gapic_version = _GAPIC_LIBRARY_VERSION + self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) - method_configs = google.api_core.gapic_v1.config.parse_method_configs( + self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config['interfaces'][self._INTERFACE_NAME], ) - # Write the "inner API call" methods to the class. - # These are wrapped versions of the gRPC stub methods, with retry and - # timeout configuration applied, called by the public methods on - # this class. - self._read_rows = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_stub.ReadRows, - default_retry=method_configs['ReadRows'].retry, - default_timeout=method_configs['ReadRows'].timeout, - client_info=client_info, - ) - self._sample_row_keys = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_stub.SampleRowKeys, - default_retry=method_configs['SampleRowKeys'].retry, - default_timeout=method_configs['SampleRowKeys'].timeout, - client_info=client_info, - ) - self._mutate_row = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_stub.MutateRow, - default_retry=method_configs['MutateRow'].retry, - default_timeout=method_configs['MutateRow'].timeout, - client_info=client_info, - ) - self._mutate_rows = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_stub.MutateRows, - default_retry=method_configs['MutateRows'].retry, - default_timeout=method_configs['MutateRows'].timeout, - client_info=client_info, - ) - self._check_and_mutate_row = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_stub.CheckAndMutateRow, - default_retry=method_configs['CheckAndMutateRow'].retry, - default_timeout=method_configs['CheckAndMutateRow'].timeout, - client_info=client_info, - ) - self._read_modify_write_row = google.api_core.gapic_v1.method.wrap_method( - self.bigtable_stub.ReadModifyWriteRow, - default_retry=method_configs['ReadModifyWriteRow'].retry, - default_timeout=method_configs['ReadModifyWriteRow'].timeout, - client_info=client_info, - ) + # Save a dictionary of cached API call functions. + # These are the actual callables which invoke the proper + # transport methods, wrapped with `wrap_method` to add retry, + # timeout, and the like. + self._inner_api_calls = {} # Service calls def read_rows(self, @@ -167,7 +173,8 @@ def read_rows(self, filter_=None, rows_limit=None, retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT): + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): """ Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, @@ -190,12 +197,7 @@ def read_rows(self, table_name (str): The unique name of the table from which to read. Values are of the form ``projects//instances//tables/
``. - app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - This value specifies routing for replication. If not specified, the + app_profile_id (str): This value specifies routing for replication. If not specified, the \"default\" application profile will be used. rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. If a dict is provided, it must be of the same form as the protobuf @@ -212,6 +214,8 @@ def read_rows(self, timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. @@ -223,6 +227,16 @@ def read_rows(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'read_rows' not in self._inner_api_calls: + self._inner_api_calls[ + 'read_rows'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.read_rows, + default_retry=self._method_configs['ReadRows'].retry, + default_timeout=self._method_configs['ReadRows'].timeout, + client_info=self._client_info, + ) + request = bigtable_pb2.ReadRowsRequest( table_name=table_name, app_profile_id=app_profile_id, @@ -230,13 +244,27 @@ def read_rows(self, filter=filter_, rows_limit=rows_limit, ) - return self._read_rows(request, retry=retry, timeout=timeout) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('table_name', table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['read_rows']( + request, retry=retry, timeout=timeout, metadata=metadata) def sample_row_keys(self, table_name, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT): + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): """ Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of the table of approximately equal size, @@ -258,12 +286,7 @@ def sample_row_keys(self, table_name (str): The unique name of the table from which to sample row keys. Values are of the form ``projects//instances//tables/
``. - app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - This value specifies routing for replication. If not specified, the + app_profile_id (str): This value specifies routing for replication. If not specified, the \"default\" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -271,6 +294,8 @@ def sample_row_keys(self, timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse]. @@ -282,11 +307,35 @@ def sample_row_keys(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'sample_row_keys' not in self._inner_api_calls: + self._inner_api_calls[ + 'sample_row_keys'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.sample_row_keys, + default_retry=self._method_configs['SampleRowKeys'].retry, + default_timeout=self._method_configs['SampleRowKeys'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_pb2.SampleRowKeysRequest( table_name=table_name, app_profile_id=app_profile_id, ) - return self._sample_row_keys(request, retry=retry, timeout=timeout) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('table_name', table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['sample_row_keys']( + request, retry=retry, timeout=timeout, metadata=metadata) def mutate_row(self, table_name, @@ -294,7 +343,8 @@ def mutate_row(self, mutations, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT): + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): """ Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -305,7 +355,11 @@ def mutate_row(self, >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize ``row_key``: >>> row_key = b'' + >>> + >>> # TODO: Initialize ``mutations``: >>> mutations = [] >>> >>> response = client.mutate_row(table_name, row_key, mutations) @@ -320,12 +374,7 @@ def mutate_row(self, Must contain at least one entry and at most 100000. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` - app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - This value specifies routing for replication. If not specified, the + app_profile_id (str): This value specifies routing for replication. If not specified, the \"default\" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -333,6 +382,8 @@ def mutate_row(self, timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance. @@ -344,20 +395,44 @@ def mutate_row(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'mutate_row' not in self._inner_api_calls: + self._inner_api_calls[ + 'mutate_row'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.mutate_row, + default_retry=self._method_configs['MutateRow'].retry, + default_timeout=self._method_configs['MutateRow'].timeout, + client_info=self._client_info, + ) + request = bigtable_pb2.MutateRowRequest( table_name=table_name, row_key=row_key, mutations=mutations, app_profile_id=app_profile_id, ) - return self._mutate_row(request, retry=retry, timeout=timeout) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('table_name', table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['mutate_row']( + request, retry=retry, timeout=timeout, metadata=metadata) def mutate_rows(self, table_name, entries, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT): + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): """ Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed @@ -369,6 +444,8 @@ def mutate_rows(self, >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize ``entries``: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): @@ -384,12 +461,7 @@ def mutate_rows(self, contain at most 100000 mutations. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` - app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - This value specifies routing for replication. If not specified, the + app_profile_id (str): This value specifies routing for replication. If not specified, the \"default\" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -397,6 +469,8 @@ def mutate_rows(self, timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. Returns: Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. @@ -408,12 +482,35 @@ def mutate_rows(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'mutate_rows' not in self._inner_api_calls: + self._inner_api_calls[ + 'mutate_rows'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.mutate_rows, + default_retry=self._method_configs['MutateRows'].retry, + default_timeout=self._method_configs['MutateRows'].timeout, + client_info=self._client_info, + ) + request = bigtable_pb2.MutateRowsRequest( table_name=table_name, entries=entries, app_profile_id=app_profile_id, ) - return self._mutate_rows(request, retry=retry, timeout=timeout) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('table_name', table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['mutate_rows']( + request, retry=retry, timeout=timeout, metadata=metadata) def check_and_mutate_row(self, table_name, @@ -423,7 +520,8 @@ def check_and_mutate_row(self, true_mutations=None, false_mutations=None, retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT): + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): """ Mutates a row atomically based on the output of a predicate Reader filter. @@ -433,6 +531,8 @@ def check_and_mutate_row(self, >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize ``row_key``: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) @@ -443,12 +543,7 @@ def check_and_mutate_row(self, Values are of the form ``projects//instances//tables/
``. row_key (bytes): The key of the row to which the conditional mutation should be applied. - app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - This value specifies routing for replication. If not specified, the + app_profile_id (str): This value specifies routing for replication. If not specified, the \"default\" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or @@ -476,6 +571,8 @@ def check_and_mutate_row(self, timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. @@ -487,6 +584,18 @@ def check_and_mutate_row(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'check_and_mutate_row' not in self._inner_api_calls: + self._inner_api_calls[ + 'check_and_mutate_row'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.check_and_mutate_row, + default_retry=self._method_configs[ + 'CheckAndMutateRow'].retry, + default_timeout=self._method_configs['CheckAndMutateRow'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_pb2.CheckAndMutateRowRequest( table_name=table_name, row_key=row_key, @@ -495,8 +604,20 @@ def check_and_mutate_row(self, true_mutations=true_mutations, false_mutations=false_mutations, ) - return self._check_and_mutate_row( - request, retry=retry, timeout=timeout) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('table_name', table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['check_and_mutate_row']( + request, retry=retry, timeout=timeout, metadata=metadata) def read_modify_write_row(self, table_name, @@ -504,7 +625,8 @@ def read_modify_write_row(self, rules, app_profile_id=None, retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT): + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None): """ Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the specified columns and writes a new @@ -518,7 +640,11 @@ def read_modify_write_row(self, >>> client = bigtable_v2.BigtableClient() >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize ``row_key``: >>> row_key = b'' + >>> + >>> # TODO: Initialize ``rules``: >>> rules = [] >>> >>> response = client.read_modify_write_row(table_name, row_key, rules) @@ -534,12 +660,7 @@ def read_modify_write_row(self, affect the results of later ones. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` - app_profile_id (str): This is a private alpha release of Cloud Bigtable replication. This feature - is not currently available to most Cloud Bigtable customers. This feature - might be changed in backward-incompatible ways and is not recommended for - production use. It is not subject to any SLA or deprecation policy. - - This value specifies routing for replication. If not specified, the + app_profile_id (str): This value specifies routing for replication. If not specified, the \"default\" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -547,6 +668,8 @@ def read_modify_write_row(self, timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. Returns: A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance. @@ -558,11 +681,35 @@ def read_modify_write_row(self, to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ + # Wrap the transport method to add retry and timeout logic. + if 'read_modify_write_row' not in self._inner_api_calls: + self._inner_api_calls[ + 'read_modify_write_row'] = google.api_core.gapic_v1.method.wrap_method( + self.transport.read_modify_write_row, + default_retry=self._method_configs[ + 'ReadModifyWriteRow'].retry, + default_timeout=self._method_configs['ReadModifyWriteRow'] + .timeout, + client_info=self._client_info, + ) + request = bigtable_pb2.ReadModifyWriteRowRequest( table_name=table_name, row_key=row_key, rules=rules, app_profile_id=app_profile_id, ) - return self._read_modify_write_row( - request, retry=retry, timeout=timeout) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [('table_name', table_name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header) + metadata.append(routing_metadata) + + return self._inner_api_calls['read_modify_write_row']( + request, retry=retry, timeout=timeout, metadata=metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py index d87d2776f583..04e214427b08 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py @@ -14,13 +14,22 @@ "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000 + }, + "streaming": { + "initial_retry_delay_millis": 100, + "retry_delay_multiplier": 1.3, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 20000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 20000, + "total_timeout_millis": 3600000 } }, "methods": { "ReadRows": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "timeout_millis": 3600000, + "retry_codes_name": "idempotent", + "retry_params_name": "streaming" }, "SampleRowKeys": { "timeout_millis": 60000, @@ -29,12 +38,12 @@ }, "MutateRow": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", + "retry_codes_name": "idempotent", "retry_params_name": "default" }, "MutateRows": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", + "retry_codes_name": "idempotent", "retry_params_name": "default" }, "CheckAndMutateRow": { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py new file mode 100644 index 000000000000..0f6e7dfdf4be --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -0,0 +1,195 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import google.api_core.grpc_helpers + +from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc + + +class BigtableGrpcTransport(object): + """gRPC transport class providing stubs for + google.bigtable.v2 Bigtable API. + + The transport provides access to the raw gRPC stubs, + which can be used to take advantage of advanced + features of gRPC. + """ + # The scopes needed to make gRPC calls to all of the methods defined + # in this service. + _OAUTH_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + def __init__(self, + channel=None, + credentials=None, + address='bigtable.googleapis.com:443'): + """Instantiate the transport class. + + Args: + channel (grpc.Channel): A ``Channel`` instance through + which to make calls. This argument is mutually exclusive + with ``credentials``; providing both will raise an exception. + credentials (google.auth.credentials.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If none + are specified, the client will attempt to ascertain the + credentials from the environment. + address (str): The address where the service is hosted. + """ + # If both `channel` and `credentials` are specified, raise an + # exception (channels come with credentials baked in already). + if channel is not None and credentials is not None: + raise ValueError( + 'The `channel` and `credentials` arguments are mutually ' + 'exclusive.', ) + + # Create the channel. + if channel is None: + channel = self.create_channel( + address=address, + credentials=credentials, + ) + + # gRPC uses objects called "stubs" that are bound to the + # channel and provide a basic method for each RPC. + self._stubs = { + 'bigtable_stub': bigtable_pb2_grpc.BigtableStub(channel), + } + + @classmethod + def create_channel(cls, + address='bigtable.googleapis.com:443', + credentials=None): + """Create and return a gRPC channel object. + + Args: + address (str): The host for the channel to use. + credentials (~.Credentials): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return google.api_core.grpc_helpers.create_channel( + address, + credentials=credentials, + scopes=cls._OAUTH_SCOPES, + options={ + 'grpc.max_send_message_length': -1, + 'grpc.max_receive_message_length': -1, + }.items(), + ) + + @property + def read_rows(self): + """Return the gRPC stub for {$apiMethod.name}. + + Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_stub'].ReadRows + + @property + def sample_row_keys(self): + """Return the gRPC stub for {$apiMethod.name}. + + Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_stub'].SampleRowKeys + + @property + def mutate_row(self): + """Return the gRPC stub for {$apiMethod.name}. + + Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_stub'].MutateRow + + @property + def mutate_rows(self): + """Return the gRPC stub for {$apiMethod.name}. + + Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_stub'].MutateRows + + @property + def check_and_mutate_row(self): + """Return the gRPC stub for {$apiMethod.name}. + + Mutates a row atomically based on the output of a predicate Reader filter. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_stub'].CheckAndMutateRow + + @property + def read_modify_write_row(self): + """Return the gRPC stub for {$apiMethod.name}. + + Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs['bigtable_stub'].ReadModifyWriteRow diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index 87755c24c433..da10ffd36bda 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -26,7 +26,6 @@ serialized_pb=_b('\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x13\n\x11MutateRowResponse\"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"D\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"E\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"M\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -44,35 +43,35 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='app_profile_id', full_name='google.bigtable.v2.ReadRowsRequest.app_profile_id', index=1, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='rows', full_name='google.bigtable.v2.ReadRowsRequest.rows', index=2, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='filter', full_name='google.bigtable.v2.ReadRowsRequest.filter', index=3, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='rows_limit', full_name='google.bigtable.v2.ReadRowsRequest.rows_limit', index=4, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -103,63 +102,63 @@ has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='family_name', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.family_name', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='qualifier', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='timestamp_micros', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros', index=3, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.labels', index=4, number=5, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value', index=5, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value_size', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value_size', index=6, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='reset_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row', index=7, number=8, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='commit_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row', index=8, number=9, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -192,14 +191,14 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='last_scanned_row_key', full_name='google.bigtable.v2.ReadRowsResponse.last_scanned_row_key', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -230,14 +229,14 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='app_profile_id', full_name='google.bigtable.v2.SampleRowKeysRequest.app_profile_id', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -268,14 +267,14 @@ has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='offset_bytes', full_name='google.bigtable.v2.SampleRowKeysResponse.offset_bytes', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -306,28 +305,28 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='app_profile_id', full_name='google.bigtable.v2.MutateRowRequest.app_profile_id', index=1, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='row_key', full_name='google.bigtable.v2.MutateRowRequest.row_key', index=2, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mutations', full_name='google.bigtable.v2.MutateRowRequest.mutations', index=3, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -382,14 +381,14 @@ has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='mutations', full_name='google.bigtable.v2.MutateRowsRequest.Entry.mutations', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -419,21 +418,21 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='app_profile_id', full_name='google.bigtable.v2.MutateRowsRequest.app_profile_id', index=1, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='entries', full_name='google.bigtable.v2.MutateRowsRequest.entries', index=2, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -464,14 +463,14 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='status', full_name='google.bigtable.v2.MutateRowsResponse.Entry.status', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -501,7 +500,7 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -532,42 +531,42 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='app_profile_id', full_name='google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id', index=1, number=7, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='row_key', full_name='google.bigtable.v2.CheckAndMutateRowRequest.row_key', index=2, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='predicate_filter', full_name='google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter', index=3, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='true_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.true_mutations', index=4, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='false_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.false_mutations', index=5, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -598,7 +597,7 @@ has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -629,28 +628,28 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='app_profile_id', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id', index=1, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='row_key', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.row_key', index=2, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='rules', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.rules', index=3, number=3, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -681,7 +680,7 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -734,6 +733,7 @@ DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST DESCRIPTOR.message_types_by_name['ReadModifyWriteRowResponse'] = _READMODIFYWRITEROWRESPONSE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( DESCRIPTOR = _READROWSREQUEST, @@ -748,13 +748,8 @@ the form ``projects//instances//tables/
``. app_profile_id: - This is a private alpha release of Cloud Bigtable replication. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. This - value specifies routing for replication. If not specified, the - "default" application profile will be used. + This value specifies routing for replication. If not + specified, the "default" application profile will be used. rows: The row keys and/or ranges to read. If not specified, reads from all rows. @@ -866,13 +861,8 @@ Values are of the form ``projects//instances//tables/
``. app_profile_id: - This is a private alpha release of Cloud Bigtable replication. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. This - value specifies routing for replication. If not specified, the - "default" application profile will be used. + This value specifies routing for replication. If not + specified, the "default" application profile will be used. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) )) @@ -918,13 +908,8 @@ applied. Values are of the form ``projects//instances//tables/
``. app_profile_id: - This is a private alpha release of Cloud Bigtable replication. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. This - value specifies routing for replication. If not specified, the - "default" application profile will be used. + This value specifies routing for replication. If not + specified, the "default" application profile will be used. row_key: The key of the row to which the mutation should be applied. mutations: @@ -977,13 +962,8 @@ The unique name of the table to which the mutations should be applied. app_profile_id: - This is a private alpha release of Cloud Bigtable replication. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. This - value specifies routing for replication. If not specified, the - "default" application profile will be used. + This value specifies routing for replication. If not + specified, the "default" application profile will be used. entries: The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the @@ -1045,13 +1025,8 @@ should be applied. Values are of the form ``projects//instances//tables/
``. app_profile_id: - This is a private alpha release of Cloud Bigtable replication. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. This - value specifies routing for replication. If not specified, the - "default" application profile will be used. + This value specifies routing for replication. If not + specified, the "default" application profile will be used. row_key: The key of the row to which the conditional mutation should be applied. @@ -1108,13 +1083,8 @@ rules should be applied. Values are of the form ``projects//instances//tables/
``. app_profile_id: - This is a private alpha release of Cloud Bigtable replication. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. - It is not subject to any SLA or deprecation policy. This - value specifies routing for replication. If not specified, the - "default" application profile will be used. + This value specifies routing for replication. If not + specified, the "default" application profile will be used. row_key: The key of the row to which the read/modify/write rules should be applied. @@ -1146,322 +1116,73 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities - - - class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ReadRows = channel.unary_stream( - '/google.bigtable.v2.Bigtable/ReadRows', - request_serializer=ReadRowsRequest.SerializeToString, - response_deserializer=ReadRowsResponse.FromString, - ) - self.SampleRowKeys = channel.unary_stream( - '/google.bigtable.v2.Bigtable/SampleRowKeys', - request_serializer=SampleRowKeysRequest.SerializeToString, - response_deserializer=SampleRowKeysResponse.FromString, - ) - self.MutateRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/MutateRow', - request_serializer=MutateRowRequest.SerializeToString, - response_deserializer=MutateRowResponse.FromString, - ) - self.MutateRows = channel.unary_stream( - '/google.bigtable.v2.Bigtable/MutateRows', - request_serializer=MutateRowsRequest.SerializeToString, - response_deserializer=MutateRowsResponse.FromString, - ) - self.CheckAndMutateRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/CheckAndMutateRow', - request_serializer=CheckAndMutateRowRequest.SerializeToString, - response_deserializer=CheckAndMutateRowResponse.FromString, - ) - self.ReadModifyWriteRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', - request_serializer=ReadModifyWriteRowRequest.SerializeToString, - response_deserializer=ReadModifyWriteRowResponse.FromString, - ) - - - class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables. - """ - - def ReadRows(self, request, context): - """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') +_BIGTABLE = _descriptor.ServiceDescriptor( + name='Bigtable', + full_name='google.bigtable.v2.Bigtable', + file=DESCRIPTOR, + index=0, + options=None, + serialized_start=1912, + serialized_end=2981, + methods=[ + _descriptor.MethodDescriptor( + name='ReadRows', + full_name='google.bigtable.v2.Bigtable.ReadRows', + index=0, + containing_service=None, + input_type=_READROWSREQUEST, + output_type=_READROWSRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*')), + ), + _descriptor.MethodDescriptor( + name='SampleRowKeys', + full_name='google.bigtable.v2.Bigtable.SampleRowKeys', + index=1, + containing_service=None, + input_type=_SAMPLEROWKEYSREQUEST, + output_type=_SAMPLEROWKEYSRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys')), + ), + _descriptor.MethodDescriptor( + name='MutateRow', + full_name='google.bigtable.v2.Bigtable.MutateRow', + index=2, + containing_service=None, + input_type=_MUTATEROWREQUEST, + output_type=_MUTATEROWRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*')), + ), + _descriptor.MethodDescriptor( + name='MutateRows', + full_name='google.bigtable.v2.Bigtable.MutateRows', + index=3, + containing_service=None, + input_type=_MUTATEROWSREQUEST, + output_type=_MUTATEROWSRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*')), + ), + _descriptor.MethodDescriptor( + name='CheckAndMutateRow', + full_name='google.bigtable.v2.Bigtable.CheckAndMutateRow', + index=4, + containing_service=None, + input_type=_CHECKANDMUTATEROWREQUEST, + output_type=_CHECKANDMUTATEROWRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*')), + ), + _descriptor.MethodDescriptor( + name='ReadModifyWriteRow', + full_name='google.bigtable.v2.Bigtable.ReadModifyWriteRow', + index=5, + containing_service=None, + input_type=_READMODIFYWRITEROWREQUEST, + output_type=_READMODIFYWRITEROWRESPONSE, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*')), + ), +]) +_sym_db.RegisterServiceDescriptor(_BIGTABLE) + +DESCRIPTOR.services_by_name['Bigtable'] = _BIGTABLE - def add_BigtableServicer_to_server(servicer, server): - rpc_method_handlers = { - 'ReadRows': grpc.unary_stream_rpc_method_handler( - servicer.ReadRows, - request_deserializer=ReadRowsRequest.FromString, - response_serializer=ReadRowsResponse.SerializeToString, - ), - 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( - servicer.SampleRowKeys, - request_deserializer=SampleRowKeysRequest.FromString, - response_serializer=SampleRowKeysResponse.SerializeToString, - ), - 'MutateRow': grpc.unary_unary_rpc_method_handler( - servicer.MutateRow, - request_deserializer=MutateRowRequest.FromString, - response_serializer=MutateRowResponse.SerializeToString, - ), - 'MutateRows': grpc.unary_stream_rpc_method_handler( - servicer.MutateRows, - request_deserializer=MutateRowsRequest.FromString, - response_serializer=MutateRowsResponse.SerializeToString, - ), - 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( - servicer.CheckAndMutateRow, - request_deserializer=CheckAndMutateRowRequest.FromString, - response_serializer=CheckAndMutateRowResponse.SerializeToString, - ), - 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( - servicer.ReadModifyWriteRow, - request_deserializer=ReadModifyWriteRowRequest.FromString, - response_serializer=ReadModifyWriteRowResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.bigtable.v2.Bigtable', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - class BetaBigtableServicer(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Service for reading from and writing to existing Bigtable tables. - """ - def ReadRows(self, request, context): - """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - - class BetaBigtableStub(object): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This class was generated - only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0.""" - """Service for reading from and writing to existing Bigtable tables. - """ - def ReadRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - raise NotImplementedError() - def SampleRowKeys(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - raise NotImplementedError() - def MutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - raise NotImplementedError() - MutateRow.future = None - def MutateRows(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - raise NotImplementedError() - def CheckAndMutateRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ - raise NotImplementedError() - CheckAndMutateRow.future = None - def ReadModifyWriteRow(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ - raise NotImplementedError() - ReadModifyWriteRow.future = None - - - def beta_create_Bigtable_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_deserializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.FromString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.FromString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.FromString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.FromString, - } - response_serializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.SerializeToString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.SerializeToString, - } - method_implementations = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), - ('google.bigtable.v2.Bigtable', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), - ('google.bigtable.v2.Bigtable', 'MutateRows'): face_utilities.unary_stream_inline(servicer.MutateRows), - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), - ('google.bigtable.v2.Bigtable', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - - def beta_create_Bigtable_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - """The Beta API is deprecated for 0.15.0 and later. - - It is recommended to use the GA API (classes and functions in this - file not marked beta) for all further purposes. This function was - generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0""" - request_serializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsRequest.SerializeToString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysRequest.SerializeToString, - } - response_deserializers = { - ('google.bigtable.v2.Bigtable', 'CheckAndMutateRow'): CheckAndMutateRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRow'): MutateRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'MutateRows'): MutateRowsResponse.FromString, - ('google.bigtable.v2.Bigtable', 'ReadModifyWriteRow'): ReadModifyWriteRowResponse.FromString, - ('google.bigtable.v2.Bigtable', 'ReadRows'): ReadRowsResponse.FromString, - ('google.bigtable.v2.Bigtable', 'SampleRowKeys'): SampleRowKeysResponse.FromString, - } - cardinalities = { - 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRows': cardinality.Cardinality.UNARY_STREAM, - 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, - 'ReadRows': cardinality.Cardinality.UNARY_STREAM, - 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.v2.Bigtable', cardinalities, options=stub_options) -except ImportError: - pass # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py index e3fb9e6ba348..950b89f98023 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py @@ -1,7 +1,7 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc -import google.cloud.bigtable_v2.proto.bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2 +from google.cloud.bigtable_v2.proto import bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2 class BigtableStub(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index a43f75240604..70a305b87aa3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -21,7 +21,6 @@ syntax='proto3', serialized_pb=_b('\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3') ) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) @@ -39,14 +38,14 @@ has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='families', full_name='google.bigtable.v2.Row.families', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -77,14 +76,14 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='columns', full_name='google.bigtable.v2.Family.columns', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -115,14 +114,14 @@ has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells', full_name='google.bigtable.v2.Column.cells', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -153,21 +152,21 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='google.bigtable.v2.Cell.value', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='labels', full_name='google.bigtable.v2.Cell.labels', index=2, number=3, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -198,28 +197,28 @@ has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='start_key_open', full_name='google.bigtable.v2.RowRange.start_key_open', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end_key_open', full_name='google.bigtable.v2.RowRange.end_key_open', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end_key_closed', full_name='google.bigtable.v2.RowRange.end_key_closed', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -256,14 +255,14 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='row_ranges', full_name='google.bigtable.v2.RowSet.row_ranges', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -294,35 +293,35 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='start_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.start_qualifier_closed', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='start_qualifier_open', full_name='google.bigtable.v2.ColumnRange.start_qualifier_open', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.end_qualifier_closed', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end_qualifier_open', full_name='google.bigtable.v2.ColumnRange.end_qualifier_open', index=4, number=5, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -359,14 +358,14 @@ has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.end_timestamp_micros', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -397,28 +396,28 @@ has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='start_value_open', full_name='google.bigtable.v2.ValueRange.start_value_open', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end_value_closed', full_name='google.bigtable.v2.ValueRange.end_value_closed', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='end_value_open', full_name='google.bigtable.v2.ValueRange.end_value_open', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -455,7 +454,7 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -485,7 +484,7 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -515,21 +514,21 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='true_filter', full_name='google.bigtable.v2.RowFilter.Condition.true_filter', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='false_filter', full_name='google.bigtable.v2.RowFilter.Condition.false_filter', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -559,133 +558,133 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='interleave', full_name='google.bigtable.v2.RowFilter.interleave', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='condition', full_name='google.bigtable.v2.RowFilter.condition', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='sink', full_name='google.bigtable.v2.RowFilter.sink', index=3, number=16, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='pass_all_filter', full_name='google.bigtable.v2.RowFilter.pass_all_filter', index=4, number=17, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='block_all_filter', full_name='google.bigtable.v2.RowFilter.block_all_filter', index=5, number=18, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='row_key_regex_filter', full_name='google.bigtable.v2.RowFilter.row_key_regex_filter', index=6, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='row_sample_filter', full_name='google.bigtable.v2.RowFilter.row_sample_filter', index=7, number=14, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='family_name_regex_filter', full_name='google.bigtable.v2.RowFilter.family_name_regex_filter', index=8, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='column_qualifier_regex_filter', full_name='google.bigtable.v2.RowFilter.column_qualifier_regex_filter', index=9, number=6, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='column_range_filter', full_name='google.bigtable.v2.RowFilter.column_range_filter', index=10, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='timestamp_range_filter', full_name='google.bigtable.v2.RowFilter.timestamp_range_filter', index=11, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value_regex_filter', full_name='google.bigtable.v2.RowFilter.value_regex_filter', index=12, number=9, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value_range_filter', full_name='google.bigtable.v2.RowFilter.value_range_filter', index=13, number=15, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells_per_row_offset_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_offset_filter', index=14, number=10, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells_per_row_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_limit_filter', index=15, number=11, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='cells_per_column_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_column_limit_filter', index=16, number=12, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='strip_value_transformer', full_name='google.bigtable.v2.RowFilter.strip_value_transformer', index=17, number=13, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='apply_label_transformer', full_name='google.bigtable.v2.RowFilter.apply_label_transformer', index=18, number=19, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -719,28 +718,28 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='column_qualifier', full_name='google.bigtable.v2.Mutation.SetCell.column_qualifier', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='timestamp_micros', full_name='google.bigtable.v2.Mutation.SetCell.timestamp_micros', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='google.bigtable.v2.Mutation.SetCell.value', index=3, number=4, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -770,21 +769,21 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='column_qualifier', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='time_range', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.time_range', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -814,7 +813,7 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -867,28 +866,28 @@ has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='delete_from_column', full_name='google.bigtable.v2.Mutation.delete_from_column', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='delete_from_family', full_name='google.bigtable.v2.Mutation.delete_from_family', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='delete_from_row', full_name='google.bigtable.v2.Mutation.delete_from_row', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -922,28 +921,28 @@ has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='column_qualifier', full_name='google.bigtable.v2.ReadModifyWriteRule.column_qualifier', index=1, number=2, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='append_value', full_name='google.bigtable.v2.ReadModifyWriteRule.append_value', index=2, number=3, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='increment_amount', full_name='google.bigtable.v2.ReadModifyWriteRule.increment_amount', index=3, number=4, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=None), + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -1113,6 +1112,7 @@ DESCRIPTOR.message_types_by_name['RowFilter'] = _ROWFILTER DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION DESCRIPTOR.message_types_by_name['ReadModifyWriteRule'] = _READMODIFYWRITERULE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( DESCRIPTOR = _ROW, @@ -1742,14 +1742,4 @@ DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2')) -try: - # THESE ELEMENTS WILL BE DEPRECATED. - # Please use the generated *_pb2_grpc.py files instead. - import grpc - from grpc.beta import implementations as beta_implementations - from grpc.beta import interfaces as beta_interfaces - from grpc.framework.common import cardinality - from grpc.framework.interfaces.face import utilities as face_utilities -except ImportError: - pass # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index a5d64f46ef07..15db79060479 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -1,4 +1,6 @@ -# Copyright 2017 Google LLC +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,17 +17,16 @@ from __future__ import absolute_import import sys +from google.api_core.protobuf_helpers import get_messages + from google.api import http_pb2 +from google.cloud.bigtable_v2.proto import bigtable_pb2 +from google.cloud.bigtable_v2.proto import data_pb2 from google.protobuf import any_pb2 from google.protobuf import descriptor_pb2 from google.protobuf import wrappers_pb2 from google.rpc import status_pb2 -from google.api_core.protobuf_helpers import get_messages -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 - - _shared_modules = [ http_pb2, any_pb2, @@ -45,7 +46,6 @@ for name, message in get_messages(module).items(): setattr(sys.modules[__name__], name, message) names.append(name) - for module in _local_modules: for name, message in get_messages(module).items(): message.__module__ = 'google.cloud.bigtable_v2.types' diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/nox.py index e8c0ac93df88..1e8ce157ee81 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/nox.py @@ -69,7 +69,7 @@ def unit(session, py): @nox.session -@nox.parametrize('py', ['2.7', '3.6']) +@nox.parametrize('py', ['2.7', '3.7']) def system(session, py): """Run the system test suite.""" diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py new file mode 100644 index 000000000000..effddc9d0615 --- /dev/null +++ b/packages/google-cloud-bigtable/synth.py @@ -0,0 +1,63 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script is used to synthesize generated parts of this library.""" + +import synthtool as s +from synthtool import gcp + +gapic = gcp.GAPICGenerator() + + +# Generate client +library = gapic.py_library( + 'bigtable', + 'v2', + config_path='/google/bigtable/artman_bigtable.yaml', + artman_output_name='bigtable-v2') + +s.move(library / 'google/cloud/bigtable_v2') +s.move(library / 'tests') + +# Generate admin client +library = gapic.py_library( + 'bigtable_admin', + 'v2', + config_path='/google/bigtable/admin/artman_bigtableadmin.yaml', + artman_output_name='bigtable-admin-v2') + +s.move(library / 'google/cloud/bigtable_admin_v2') +s.move(library / 'tests') + +s.replace( + ['google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py', + 'google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py'], + "'google-cloud-bigtable-admin'", + "'google-cloud-bigtable'") + +s.replace( + "**/*.py", + 'from google\.cloud\.bigtable\.admin_v2.proto', + 'from google.cloud.bigtable_admin_v2.proto') + +s.replace( + ['google/cloud/bigtable_admin_v2/gapic/transports/' + 'bigtable_table_admin_grpc_transport.py', + 'google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py'], + 'google.api_core.grpc_helpers.create_channel\(\n' + '(\s+)address.*\n\s+credentials.*\n\s+scopes.*\n', + "\g<0>\g<1>options={\n\g<1> 'grpc.max_send_message_length': -1,\n" + "\g<1> 'grpc.max_receive_message_length': -1,\n" + "\g<1>}.items(),\n" +) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py index d574049f9b9e..b432e0c716c7 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -1,4 +1,6 @@ -# Copyright 2017 Google LLC +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index f7c1a515cdb6..37c701c7bb51 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -247,7 +249,7 @@ def test_partial_update_instance(self): client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) # Setup Request - instance = instance_pb2.Instance() + instance = {} update_mask = {} response = client.partial_update_instance(instance, update_mask) @@ -273,7 +275,7 @@ def test_partial_update_instance_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) # Setup Request - instance = instance_pb2.Instance() + instance = {} update_mask = {} response = client.partial_update_instance(instance, update_mask) @@ -436,11 +438,11 @@ def test_list_clusters_exception(self): def test_update_cluster(self): # Setup Expected Response name_2 = 'name2-1052831874' - location_2 = 'location21541837352' + location = 'location1901043637' serve_nodes_2 = 1623486220 expected_response = { 'name': name_2, - 'location': location_2, + 'location': location, 'serve_nodes': serve_nodes_2 } expected_response = instance_pb2.Cluster(**expected_response) @@ -454,16 +456,15 @@ def test_update_cluster(self): # Setup Request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - location = 'location1901043637' serve_nodes = 1288838783 - response = client.update_cluster(name, location, serve_nodes) + response = client.update_cluster(name, serve_nodes) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = instance_pb2.Cluster( - name=name, location=location, serve_nodes=serve_nodes) + name=name, serve_nodes=serve_nodes) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -480,10 +481,9 @@ def test_update_cluster_exception(self): # Setup Request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - location = 'location1901043637' serve_nodes = 1288838783 - response = client.update_cluster(name, location, serve_nodes) + response = client.update_cluster(name, serve_nodes) exception = response.exception() assert exception.errors[0] == error @@ -662,7 +662,7 @@ def test_update_app_profile(self): client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) # Setup Request - app_profile = instance_pb2.AppProfile() + app_profile = {} update_mask = {} response = client.update_app_profile(app_profile, update_mask) @@ -687,7 +687,7 @@ def test_update_app_profile_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) # Setup Request - app_profile = instance_pb2.AppProfile() + app_profile = {} update_mask = {} response = client.update_app_profile(app_profile, update_mask) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index f489dee39209..f9d81b01afb1 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- +# # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py index 378cefb431e8..17cadc49f789 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -300,8 +300,8 @@ def test_reload_routing_any(self): # Patch the stub used by the API method. client._instance_admin_client = api instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) - instance_stub.GetCluster.side_effect = [response_pb] + client._instance_admin_client.transport) + instance_stub.get_app_profile.side_effect = [response_pb] # Create expected_result. expected_result = None # reload() has no return value. @@ -344,8 +344,8 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) - instance_stub.GetCluster.side_effect = [ + client._instance_admin_client.transport) + instance_stub.get_app_profile.side_effect = [ response_pb, exceptions.NotFound('testing'), exceptions.BadRequest('testing'), @@ -619,8 +619,8 @@ def test_delete(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) - instance_stub.DeleteCluster.side_effect = [response_pb] + client._instance_admin_client.transport) + instance_stub.delete_cluster.side_effect = [response_pb] # Create expected_result. expected_result = None # delete() has no return value. diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 3dac94eb4819..414f39ae3cf9 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -209,8 +209,8 @@ def test_list_instances(self): # Patch the stub used by the API method. client._instance_admin_client = api bigtable_instance_stub = ( - client.instance_admin_client.bigtable_instance_admin_stub) - bigtable_instance_stub.ListInstances.side_effect = [response_pb] + client.instance_admin_client.transport) + bigtable_instance_stub.list_instances.side_effect = [response_pb] # Perform the method and check the result. instances, failed_locations = client.list_instances() @@ -279,8 +279,8 @@ def test_list_clusters(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) - instance_stub.ListClusters.side_effect = [response_pb] + client._instance_admin_client.transport) + instance_stub.list_clusters.side_effect = [response_pb] # Perform the method and check the result. clusters, failed_locations = client.list_clusters() diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 5d34f95ced79..9ee8b36540b4 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -259,8 +259,8 @@ def test_reload(self): # Patch the stub used by the API method. client._instance_admin_client = api instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.bigtable_instance_admin_stub - instance_stub.GetCluster.side_effect = [response_pb] + instance_stub = instance_admin_client.transport + instance_stub.get_cluster.side_effect = [response_pb] # Create expected_result. expected_result = None # reload() has no return value. @@ -304,8 +304,8 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.bigtable_instance_admin_stub - instance_stub.GetCluster.side_effect = [ + instance_stub = instance_admin_client.transport + instance_stub.get_cluster.side_effect = [ response_pb, exceptions.NotFound('testing'), exceptions.BadRequest('testing') @@ -460,8 +460,8 @@ def test_delete(self): # Patch the stub used by the API method. client._instance_admin_client = api instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.bigtable_instance_admin_stub - instance_stub.DeleteCluster.side_effect = [response_pb] + instance_stub = instance_admin_client.transport + instance_stub.delete_cluster.side_effect = [response_pb] # Create expected_result. expected_result = None # delete() has no return value. diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index 9443a198093b..140504072f25 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -402,7 +402,7 @@ def _create_test_helper(self, gc_rule=None): # Patch the stub used by the API method. stub = _FakeStub(response_pb) client._table_admin_client = api - client._table_admin_client.bigtable_table_admin_stub = stub + client._table_admin_client.transport.create = stub # Create expected_result. expected_result = None # create() has no return value. @@ -462,7 +462,7 @@ def _update_test_helper(self, gc_rule=None): # Patch the stub used by the API method. stub = _FakeStub(response_pb) client._table_admin_client = api - client._table_admin_client.bigtable_table_admin_stub = stub + client._table_admin_client.transport.update = stub # Create expected_result. expected_result = None # update() has no return value. @@ -517,7 +517,7 @@ def test_delete(self): # Patch the stub used by the API method. stub = _FakeStub(response_pb) client._table_admin_client = api - client._table_admin_client.bigtable_table_admin_stub = stub + client._table_admin_client.transport.delete = stub # Create expected_result. expected_result = None # delete() has no return value. diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index c31d09ad163f..23f1616c23e4 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -190,8 +190,8 @@ def test_list_clusters(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.bigtable_instance_admin_stub - instance_stub.ListClusters.side_effect = [response_pb] + instance_stub = instance_admin_client.transport + instance_stub.list_clusters.side_effect = [response_pb] # Perform the method and check the result. clusters, failed_locations = instance.list_clusters() @@ -378,8 +378,8 @@ def test_reload(self): # Patch the stub used by the API method. client._instance_admin_client = api bigtable_instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) - bigtable_instance_stub.GetInstance.side_effect = [response_pb] + client._instance_admin_client.transport) + bigtable_instance_stub.get_instance.side_effect = [response_pb] # Create expected_result. expected_result = None # reload() has no return value. @@ -416,8 +416,8 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = api instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.bigtable_instance_admin_stub - instance_stub.GetCluster.side_effect = [ + instance_stub = instance_admin_client.transport + instance_stub.get_instance.side_effect = [ response_pb, exceptions.NotFound('testing'), exceptions.BadRequest('testing') @@ -772,8 +772,8 @@ def _list_tables_helper(self, table_name=None): client._table_admin_client = table_api client._instance_admin_client = instance_api bigtable_table_stub = ( - client._table_admin_client.bigtable_table_admin_stub) - bigtable_table_stub.ListTables.side_effect = [response_pb] + client._table_admin_client.transport) + bigtable_table_stub.list_tables.side_effect = [response_pb] # Create expected_result. expected_table = instance.table(self.TABLE_ID) @@ -882,8 +882,8 @@ def test_list_app_profiles(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api bigtable_instance_stub = ( - client._instance_admin_client.bigtable_instance_admin_stub) - bigtable_instance_stub.ListAppProfiles.side_effect = [ + client._instance_admin_client.transport) + bigtable_instance_stub.list_app_profiles.side_effect = [ expected_response] # Perform the method and check the result. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index b6601aede200..4e87c15c6bf6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -445,9 +445,8 @@ def test_commit(self): predicate_matched=predicate_matched) # Patch the stub used by the API method. + api.transport.check_and_mutate_row.side_effect = [[response_pb]] client._table_data_client = api - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.CheckAndMutateRow.side_effect = [[response_pb]] # Create expected_result. expected_result = predicate_matched diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 067948c26b17..e6ba31f4decd 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -434,8 +434,10 @@ def test_state_new_row_w_row(self): credentials=credentials, admin=True) client._table_data_client = data_api request = object() + yrd = self._make_one( - client._table_data_client.bigtable_stub.ReadRows, request) + client._table_data_client.transport.read_rows, request) + yrd._response_iterator = iterator yrd._last_scanned_row_key = '' rows = [row for row in yrd] diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 95e6b1a42d0b..261eba449c64 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -17,7 +17,6 @@ import grpc import mock - from ._testing import _make_credentials @@ -332,8 +331,8 @@ def test_exists(self): client._table_admin_client = table_api client._instance_admin_client = instance_api bigtable_table_stub = ( - client._table_admin_client.bigtable_table_admin_stub) - bigtable_table_stub.GetTable.side_effect = [ + client._table_admin_client.transport) + bigtable_table_stub.get_table.side_effect = [ response_pb, NotFound('testing'), BadRequest('testing') @@ -396,8 +395,8 @@ def _list_column_families_helper(self): # Patch the stub used by the API method. client._table_admin_client = table_api bigtable_table_stub = ( - client._table_admin_client.bigtable_table_admin_stub) - bigtable_table_stub.GetTable.side_effect = [response_pb] + client._table_admin_client.transport) + bigtable_table_stub.get_table.side_effect = [response_pb] # Create expected_result. expected_result = { @@ -439,8 +438,8 @@ def test_get_cluster_states(self): # Patch the stub used by the API method. client._table_admin_client = table_api bigtable_table_stub = ( - client._table_admin_client.bigtable_table_admin_stub) - bigtable_table_stub.GetTable.side_effect = [response_pb] + client._table_admin_client.transport) + bigtable_table_stub.get_table.side_effect = [response_pb] # build expected result expected_result = { @@ -453,7 +452,8 @@ def test_get_cluster_states(self): result = table.get_cluster_states() self.assertEqual(result, expected_result) - def _read_row_helper(self, chunks, expected_result, app_profile_id=None): + def _read_row_helper(self, chunks, expected_result, app_profile_id=None, + initialized_read_row=True): from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT from google.cloud.bigtable_v2.gapic import bigtable_client @@ -489,8 +489,11 @@ def mock_create_row_request(table_name, row_key, filter_, # Patch the stub used by the API method. client._table_data_client = data_api client._table_admin_client = table_api - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.ReadRows.side_effect = [response_iterator] + + inner_api_calls = client._table_data_client._inner_api_calls + if initialized_read_row: + inner_api_calls['read_rows'] = mock.Mock( + side_effect=[response_iterator]) # Perform the method and check the result. filter_obj = object() @@ -542,6 +545,14 @@ def test_read_row_still_partial(self): with self.assertRaises(ValueError): self._read_row_helper(chunks, None) + def test_read_row_no_inner_api(self): + chunks = [] + with mock.patch( + 'google.cloud.bigtable.table.wrap_method') as patched: + patched.return_value = mock.Mock( + return_value=iter(())) + self._read_row_helper(chunks, None, initialized_read_row=False) + def test_mutate_rows(self): from google.rpc.status_pb2 import Status from google.cloud.bigtable_admin_v2.gapic import ( @@ -599,7 +610,7 @@ def mock_create_row_request(table_name, **kwargs): # Create expected_result. expected_result = PartialRowsData( - client._table_data_client.bigtable_stub.ReadRows, + client._table_data_client.transport.read_rows, request) # Perform the method and check the result. @@ -666,9 +677,10 @@ def test_yield_retry_rows(self): response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. - client._table_data_client.bigtable_stub.ReadRows.side_effect = [ - response_failure_iterator_1, response_failure_iterator_2, - response_iterator] + client._table_data_client.transport.read_rows = mock.Mock( + side_effect=[ + response_failure_iterator_1, response_failure_iterator_2, + response_iterator]) rows = [] for row in table.yield_rows(start_key=self.ROW_KEY_1, @@ -731,8 +743,8 @@ def test_yield_rows_with_row_set(self): response_3) # Patch the stub used by the API method. - client._table_data_client.bigtable_stub.ReadRows.side_effect = [ - response_iterator] + client._table_data_client.transport.read_rows = mock.Mock( + side_effect=[response_iterator]) rows = [] row_set = RowSet() @@ -766,8 +778,9 @@ def test_sample_row_keys(self): response_iterator = object() # Just passed to a mock. # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.SampleRowKeys.side_effect = [[response_iterator]] + inner_api_calls = client._table_data_client._inner_api_calls + inner_api_calls['sample_row_keys'] = mock.Mock( + side_effect=[[response_iterator]]) # Create expected_result. expected_result = response_iterator @@ -1009,17 +1022,22 @@ def test_callable_no_retry_strategy(self): self.RETRYABLE_1, self.NON_RETRYABLE]) - # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRows.return_value = [response] + with mock.patch( + 'google.cloud.bigtable.table.wrap_method') as patched: + patched.return_value = mock.Mock( + return_value=[response]) - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - statuses = worker(retry=None) + worker = self._make_worker( + client, + table.name, + [row_1, row_2, row_3]) + statuses = worker(retry=None) result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - client._table_data_client.bigtable_stub.MutateRows.assert_called_once() + client._table_data_client._inner_api_calls[ + 'mutate_rows'].assert_called_once() self.assertEqual(result, expected_result) def test_callable_retry(self): @@ -1065,8 +1083,8 @@ def test_callable_retry(self): response_2 = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - client._table_data_client.bigtable_stub.MutateRows.side_effect = [ - [response_1], [response_2]] + client._table_data_client._inner_api_calls['mutate_rows'] = mock.Mock( + side_effect=[[response_1], [response_2]]) retry = DEFAULT_RETRY.with_delay(initial=0.1) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1076,7 +1094,9 @@ def test_callable_retry(self): expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] self.assertEqual( - client._table_data_client.bigtable_stub.MutateRows.call_count, 2) + client._table_data_client._inner_api_calls[ + 'mutate_rows'].call_count, + 2) self.assertEqual(result, expected_result) def test_callable_retry_timeout(self): @@ -1116,8 +1136,8 @@ def test_callable_retry_timeout(self): response = self._make_responses([self.RETRYABLE_1, self.RETRYABLE_1]) # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRows.return_value = [response] + inner_api_calls = client._table_data_client._inner_api_calls + inner_api_calls['mutate_rows'] = mock.Mock(return_value=[response]) retry = DEFAULT_RETRY.with_delay( initial=0.1, maximum=0.2, multiplier=2.0).with_deadline(0.5) @@ -1128,7 +1148,8 @@ def test_callable_retry_timeout(self): expected_result = [self.RETRYABLE_1, self.RETRYABLE_1] self.assertTrue( - client._table_data_client.bigtable_stub.MutateRows.call_count > 1) + client._table_data_client._inner_api_calls[ + 'mutate_rows'].call_count > 1) self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_empty_rows(self): @@ -1181,8 +1202,8 @@ def test_do_mutate_retryable_rows(self): response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRows.side_effect = [[response]] + inner_api_calls = client._table_data_client._inner_api_calls + inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() @@ -1232,8 +1253,8 @@ def test_do_mutate_retryable_rows_retry(self): self.NON_RETRYABLE]) # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRows.side_effect = [[response]] + inner_api_calls = client._table_data_client._inner_api_calls + inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1290,8 +1311,8 @@ def test_do_mutate_retryable_rows_second_retry(self): response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRows.side_effect = [[response]] + inner_api_calls = client._table_data_client._inner_api_calls + inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) @@ -1352,8 +1373,8 @@ def test_do_mutate_retryable_rows_second_try(self): response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRows.side_effect = [[response]] + inner_api_calls = client._table_data_client._inner_api_calls + inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) @@ -1436,8 +1457,8 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): response = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - bigtable_stub = client._table_data_client.bigtable_stub - bigtable_stub.MutateRows.side_effect = [[response]] + inner_api_calls = client._table_data_client._inner_api_calls + inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): From ebf1bdc4e89d7d413d554b9170db5e0f65a39d4e Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 5 Sep 2018 16:43:40 -0400 Subject: [PATCH 172/892] Bigtable: fix non admin client access. (#5890) Closes #5874. --- .../google/cloud/bigtable/client.py | 2 - .../google/cloud/bigtable/table.py | 2 +- .../google-cloud-bigtable/tests/system.py | 6 ++ .../tests/unit/test_client.py | 70 +++++++++++++------ .../tests/unit/test_table.py | 20 ++++-- 5 files changed, 70 insertions(+), 30 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index a473f3158ea9..1ef9e072199c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -156,8 +156,6 @@ def table_data_client(self): :returns: A BigtableClient object. """ if self._table_data_client is None: - if not self._admin: - raise ValueError('Client is not an admin client.') self._table_data_client = ( bigtable_v2.BigtableClient(credentials=self._credentials, client_info=_CLIENT_INFO)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 29463e034ea1..62e22c67eb4a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -123,7 +123,7 @@ def name(self): """ project = self._instance._client.project instance_id = self._instance.instance_id - table_client = self._instance._client.table_admin_client + table_client = self._instance._client.table_data_client return table_client.table_path( project=project, instance=instance_id, table=self.table_id) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 9a9c8afaf2d5..ffe747b6d66e 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -1013,3 +1013,9 @@ def test_read_with_label_applied(self): self.assertEqual(cell3_new.timestamp, cell3.timestamp) self.assertEqual(cell3.labels, []) self.assertEqual(cell3_new.labels, [label2]) + + def test_access_with_non_admin_client(self): + client = Client(admin=False) + instance = client.instance(INSTANCE_ID) + table = instance.table(self._table.table_id) + self.assertIsNone(table.read_row('nonesuch')) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 414f39ae3cf9..95937f0957ed 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -125,46 +125,74 @@ def test_instance_factory_non_defaults(self): self.assertEqual(instance.labels, labels) self.assertIs(instance._client, client) - def test_admin_client_w_value_error(self): + def test_table_data_client_not_initialized(self): + from google.cloud.bigtable_v2 import BigtableClient + credentials = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=credentials) - with self.assertRaises(ValueError): - client.table_admin_client() - - with self.assertRaises(ValueError): - client.instance_admin_client() + table_data_client = client.table_data_client + self.assertIsInstance(table_data_client, BigtableClient) + self.assertIs(client._table_data_client, table_data_client) - def test_table_data_client(self): + def test_table_data_client_initialized(self): credentials = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=credentials, admin=True) - table_data_client = client.table_data_client - self.assertEqual(client._table_data_client, table_data_client) + already = client._table_data_client = object() + self.assertIs(client.table_data_client, already) - client._table_data_client = object() - table_data_client = client.table_data_client - self.assertEqual(client.table_data_client, table_data_client) + def test_table_admin_client_not_initialized_no_admin_flag(self): + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials) + + with self.assertRaises(ValueError): + client.table_admin_client() + + def test_table_admin_client_not_initialized_w_admin_flag(self): + from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - def test_table_admin_client(self): credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials, - admin=True) + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True) table_admin_client = client.table_admin_client - self.assertEqual(client._table_admin_client, table_admin_client) + self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - client._table_admin_client = object() - table_admin_client = client.table_admin_client - self.assertEqual(client._table_admin_client, table_admin_client) + def test_table_admin_client_initialized(self): + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials, + admin=True) - def test_table_data_client_w_value_error(self): + already = client._table_admin_client = object() + self.assertIs(client.table_admin_client, already) + + def test_instance_admin_client_not_initialized_no_admin_flag(self): credentials = _make_credentials() client = self._make_one(project=self.PROJECT, credentials=credentials) with self.assertRaises(ValueError): - client.table_data_client() + client.instance_admin_client() + + def test_instance_admin_client_not_initialized_w_admin_flag(self): + from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient + + credentials = _make_credentials() + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True) + + instance_admin_client = client.instance_admin_client + self.assertIsInstance( + instance_admin_client, BigtableInstanceAdminClient) + + def test_instance_admin_client_initialized(self): + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials, + admin=True) + + already = client._instance_admin_client = object() + self.assertIs(client.instance_admin_client, already) def test_list_instances(self): from google.cloud.bigtable_admin_v2.proto import ( diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 261eba449c64..208139710635 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -156,15 +156,25 @@ def _get_target_client_class(): def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) - def test_constructor(self): + def test_constructor_w_admin(self): credentials = _make_credentials() - client = self._make_client(project='project-id', + client = self._make_client(project=self.PROJECT_ID, credentials=credentials, admin=True) - table_id = 'table-id' instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - self.assertEqual(table.table_id, table_id) + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertIs(table._instance._client, client) + self.assertEqual(table.name, self.TABLE_NAME) + + def test_constructor_wo_admin(self): + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT_ID, + credentials=credentials, admin=False) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + self.assertEqual(table.table_id, self.TABLE_ID) self.assertIs(table._instance._client, client) + self.assertEqual(table.name, self.TABLE_NAME) def test_row_factory_direct(self): from google.cloud.bigtable.row import DirectRow @@ -804,8 +814,6 @@ def test_truncate(self): client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - table.name.return_value = client._table_data_client.table_path( - self.PROJECT_ID, self.INSTANCE_ID, self.TABLE_ID) expected_result = None # truncate() has no return value. with mock.patch('google.cloud.bigtable.table.Table.name', From b8ac2de37d002d5d2ca3cb00e489907ab70366ef Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 6 Sep 2018 11:29:52 -0400 Subject: [PATCH 173/892] Prepare bigtable-0.30.1 release. (#5891) --- packages/google-cloud-bigtable/CHANGELOG.md | 11 +++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 7a81ec6d4323..db4581862ca0 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,17 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.30.1 + +### Implementation changes + +- Fix non-admin access to table data. (#5875) +- Synth bigtable and bigtable admin GAPIC clients. (#5867) + +### Testing and internal changes + +- Nox: use in-place installs for local packages. (#5865) + ## 0.30.0 ### New Features diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 001c99757b7f..54ac831a81fb 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.30.0' +version = '0.30.1' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From e47e329948698f26e37a5897195f4c375703199c Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Thu, 6 Sep 2018 21:46:24 +0530 Subject: [PATCH 174/892] Bigtable: add iam policy implementation for an instance. (#5838) --- .../google/cloud/bigtable/instance.py | 102 +++++++++++++++ .../google/cloud/bigtable/policy.py | 108 ++++++++++++++++ .../tests/unit/test_instance.py | 121 ++++++++++++++++++ .../tests/unit/test_policy.py | 85 ++++++++++++ 4 files changed, 416 insertions(+) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/policy.py create mode 100644 packages/google-cloud-bigtable/tests/unit/test_policy.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index b9e5fa17b87d..b09470dc60b7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -27,6 +27,8 @@ from google.api_core.exceptions import NotFound +from google.cloud.bigtable.policy import Policy + _EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' _INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' @@ -481,3 +483,103 @@ def list_app_profiles(self): """ resp = self._client._instance_admin_client.list_app_profiles(self.name) return [AppProfile.from_pb(app_profile, self) for app_profile in resp] + + def get_iam_policy(self): + """Gets the access control policy for an instance resource. + + .. code-block:: python + + from google.cloud.bigtable.client import Client + from google.cloud.bigtable.policy import Policy + + client = Client(admin=True) + instance = client.instance('[INSTANCE_ID]') + policy_latest = instance.get_iam_policy() + print (policy_latest.bigtable_viewers) + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this instance + """ + instance_admin_client = self._client._instance_admin_client + resp = instance_admin_client.get_iam_policy(resource=self.name) + return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) + + def set_iam_policy(self, policy): + """Sets the access control policy on an instance resource. Replaces any + existing policy. + + For more information about policy, please see documentation of + class `google.cloud.bigtable.policy.Policy` + + .. code-block:: python + + from google.cloud.bigtable.client import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + client = Client(admin=True) + instance = client.instance('[INSTANCE_ID]') + ins_policy = instance.get_iam_policy() + ins_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("test_iam@test.com"), + Policy.service_account("sv_account@gmail.com")] + + policy_latest = instance.set_iam_policy() + print (policy_latest.bigtable_admins) + + :type policy: :class:`google.cloud.bigtable.policy.Policy` + :param policy: A new IAM policy to replace the current IAM policy + of this instance + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this instance. + """ + instance_admin_client = self._client._instance_admin_client + resp = instance_admin_client.set_iam_policy( + resource=self.name, policy=policy.to_api_repr()) + return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) + + def test_iam_permissions(self, permissions): + """Returns permissions that the caller has on the specified instance + resource. + + .. code-block:: python + + from google.cloud.bigtable.client import Client + + client = Client(admin=True) + instance = client.instance('[INSTANCE_ID]') + permissions = ["bigtable.tables.create", + "bigtable.clusters.create"] + permissions_allowed = instance.test_iam_permissions(permissions) + print (permissions_allowed) + + :type permissions: list + :param permissions: The set of permissions to check for + the ``resource``. Permissions with wildcards (such as '*' + or 'storage.*') are not allowed. For more information see + `IAM Overview + `_. + `Bigtable Permissions + `_. + + :rtype: list + :returns: A List(string) of permissions allowed on the instance + """ + instance_admin_client = self._client._instance_admin_client + resp = instance_admin_client.test_iam_permissions( + resource=self.name, permissions=permissions) + return list(resp.permissions) + + def _to_dict_from_policy_pb(self, policy): + """Returns a dictionary representation of resource returned from + the getIamPolicy API to use as parameter for + :meth: google.cloud.iam.Policy.from_api_repr + """ + pb_dict = {} + bindings = [{'role': binding.role, 'members': binding.members} + for binding in policy.bindings] + pb_dict['etag'] = policy.etag + pb_dict['version'] = policy.version + pb_dict['bindings'] = bindings + return pb_dict diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py new file mode 100644 index 000000000000..99523404258c --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -0,0 +1,108 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.iam import Policy as BasePolicy +from google.cloud._helpers import _to_bytes + +"""IAM roles supported by Bigtable Instance resource""" +BIGTABLE_ADMIN_ROLE = 'roles/bigtable.admin' +"""Administers all instances within a project, including the data stored +within tables. Can create new instances. Intended for project administrators. +""" +BIGTABLE_USER_ROLE = 'roles/bigtable.user' +"""Provides read-write access to the data stored within tables. Intended for +application developers or service accounts. +""" +BIGTABLE_READER_ROLE = 'roles/bigtable.reader' +"""Provides read-only access to the data stored within tables. Intended for +data scientists, dashboard generators, and other data-analysis scenarios. +""" +BIGTABLE_VIEWER_ROLE = 'roles/bigtable.viewer' +"""Provides no data access. Intended as a minimal set of permissions to access +the GCP Console for Cloud Bigtable. +""" +"""For detailed information +See +https://cloud.google.com/bigtable/docs/access-control#roles +""" + + +class Policy(BasePolicy): + """IAM Policy + + See + https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.iam.v1#policy + + A Policy consists of a list of bindings. A binding binds a list of + members to a role, where the members can be user accounts, Google + groups, Google domains, and service accounts. A role is a named list + of permissions defined by IAM. + For more information about predefined roles currently supoprted + by Bigtable Instance please see + `Predefined roles + `_. + For more information about custom roles please see + `Custom roles + `_. + + :type etag: str + :param etag: etag is used for optimistic concurrency control as a way to + help prevent simultaneous updates of a policy from overwriting + each other. It is strongly suggested that systems make use + of the etag in the read-modify-write cycle to perform policy + updates in order to avoid race conditions: + An etag is returned in the response to getIamPolicy, and + systems are expected to put that etag in the request to + setIamPolicy to ensure that their change will be applied to + the same version of the policy. + + If no etag is provided in the call to setIamPolicy, then the + existing policy is overwritten blindly. + """ + def __init__(self, etag=None, version=None): + BasePolicy.__init__(self, + etag=etag if etag is None else _to_bytes(etag), + version=version) + + @property + def bigtable_admins(self): + """Access to bigtable.admin role memebers""" + result = set() + for member in self._bindings.get(BIGTABLE_ADMIN_ROLE, ()): + result.add(member) + return frozenset(result) + + @property + def bigtable_readers(self): + """Access to bigtable.reader role memebers""" + result = set() + for member in self._bindings.get(BIGTABLE_READER_ROLE, ()): + result.add(member) + return frozenset(result) + + @property + def bigtable_users(self): + """Access to bigtable.user role memebers""" + result = set() + for member in self._bindings.get(BIGTABLE_USER_ROLE, ()): + result.add(member) + return frozenset(result) + + @property + def bigtable_viewers(self): + """Access to bigtable.viewer role memebers""" + result = set() + for member in self._bindings.get(BIGTABLE_VIEWER_ROLE, ()): + result.add(member) + return frozenset(result) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 23f1616c23e4..f6bc42bc7292 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -897,6 +897,127 @@ def test_list_app_profiles(self): self.assertIsInstance(app_profile_2, AppProfile) self.assertEqual(app_profile_2.name, app_profile_name2) + def test_get_iam_policy(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.iam.v1 import iam_policy_pb2 + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client) + + version = 1 + etag = b'etag_v1' + bindings = [{'role': BIGTABLE_ADMIN_ROLE, + 'members': ['serviceAccount:service_acc1@test.com', + 'user:user1@test.com']}] + + expected_request_policy = policy_pb2.Policy(version=version, + etag=etag, + bindings=bindings) + + expected_request = iam_policy_pb2.GetIamPolicyRequest( + resource=instance.name + ) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[expected_request_policy]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api + # Perform the method and check the result. + policy_request = Policy(etag=etag, version=version) + policy_request[BIGTABLE_ADMIN_ROLE] = [Policy.user("user1@test.com"), + Policy.service_account( + "service_acc1@test.com")] + + result = instance.get_iam_policy() + actual_request = channel.requests[0][1] + + self.assertEqual(actual_request, expected_request) + self.assertEqual(result.bigtable_admins, + policy_request.bigtable_admins) + + def test_set_iam_policy(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.iam.v1 import iam_policy_pb2 + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client) + + version = 1 + etag = b'etag_v1' + bindings = [{'role': BIGTABLE_ADMIN_ROLE, + 'members': ['serviceAccount:service_acc1@test.com', + 'user:user1@test.com']}] + + expected_request_policy = policy_pb2.Policy(version=version, + etag=etag, + bindings=bindings) + + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=instance.name, + policy=expected_request_policy + ) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[expected_request_policy]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api + # Perform the method and check the result. + policy_request = Policy(etag=etag, version=version) + policy_request[BIGTABLE_ADMIN_ROLE] = [Policy.user("user1@test.com"), + Policy.service_account( + "service_acc1@test.com")] + + result = instance.set_iam_policy(policy_request) + actual_request = channel.requests[0][1] + + self.assertEqual(actual_request, expected_request) + self.assertEqual(result.bigtable_admins, + policy_request.bigtable_admins) + + def test_test_iam_permissions(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.iam.v1 import iam_policy_pb2 + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client) + + permissions = ["bigtable.tables.create", "bigtable.clusters.create"] + + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=instance.name, + permissions=permissions) + + # Patch the stub used by the API method. + channel = ChannelStub(responses=[expected_request]) + instance_api = ( + bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel)) + client._instance_admin_client = instance_api + + result = instance.test_iam_permissions(permissions) + actual_request = channel.requests[0][1] + self.assertEqual(actual_request, expected_request) + self.assertEqual(result, permissions) + class _Client(object): diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py new file mode 100644 index 000000000000..b0ffe6afed36 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -0,0 +1,85 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestPolicy(unittest.TestCase): + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.policy import Policy + + return Policy + + def _make_one(self, *args, **kw): + return self._get_target_class()(*args, **kw) + + def test_ctor_defaults(self): + empty = frozenset() + policy = self._make_one() + self.assertIsNone(policy.etag) + self.assertIsNone(policy.version) + self.assertEqual(policy.bigtable_admins, empty) + self.assertEqual(policy.bigtable_readers, empty) + self.assertEqual(policy.bigtable_users, empty) + self.assertEqual(policy.bigtable_viewers, empty) + self.assertEqual(len(policy), 0) + self.assertEqual(dict(policy), {}) + + def test_ctor_explicit(self): + VERSION = 17 + ETAG = b'ETAG' + empty = frozenset() + policy = self._make_one(ETAG, VERSION) + self.assertEqual(policy.etag, ETAG) + self.assertEqual(policy.version, VERSION) + self.assertEqual(policy.bigtable_admins, empty) + self.assertEqual(policy.bigtable_readers, empty) + self.assertEqual(policy.bigtable_users, empty) + self.assertEqual(policy.bigtable_viewers, empty) + self.assertEqual(len(policy), 0) + self.assertEqual(dict(policy), {}) + + def test_bigtable_admins_getter(self): + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + MEMBER = 'user:phred@example.com' + expected = frozenset([MEMBER]) + policy = self._make_one() + policy[BIGTABLE_ADMIN_ROLE] = [MEMBER] + self.assertEqual(policy.bigtable_admins, expected) + + def test_bigtable_readers_getter(self): + from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE + MEMBER = 'user:phred@example.com' + expected = frozenset([MEMBER]) + policy = self._make_one() + policy[BIGTABLE_READER_ROLE] = [MEMBER] + self.assertEqual(policy.bigtable_readers, expected) + + def test_bigtable_users_getter(self): + from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE + MEMBER = 'user:phred@example.com' + expected = frozenset([MEMBER]) + policy = self._make_one() + policy[BIGTABLE_USER_ROLE] = [MEMBER] + self.assertEqual(policy.bigtable_users, expected) + + def test_bigtable_viewers_getter(self): + from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE + MEMBER = 'user:phred@example.com' + expected = frozenset([MEMBER]) + policy = self._make_one() + policy[BIGTABLE_VIEWER_ROLE] = [MEMBER] + self.assertEqual(policy.bigtable_viewers, expected) From 692556e226d8f5b3fe8a3740417e1328fc0893bd Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 10 Sep 2018 15:55:01 -0400 Subject: [PATCH 175/892] Docs: Replace links to '/stable/' with '/latest/'. (#5901) * Replace links to '/stable/' with '/latest/'. * DRY out duplicated 'README.rst' vs. 'docs/index.rst'. * Include websecurityscanner in docs. Toward #5894. --- packages/google-cloud-bigtable/docs/index.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 4a86b7e60c08..11906f60d979 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -9,7 +9,7 @@ cloud project. .. _Alpha: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst .. _Cloud Bigtable API: https://cloud.google.com/bigtable -.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/stable/bigtable/usage.html +.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/latest/bigtable/usage.html .. _Product Documentation: https://cloud.google.com/bigtable Quick Start @@ -25,7 +25,7 @@ In order to use this library, you first need to go through the following steps: .. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project .. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project .. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable -.. _Setup Authentication.: https://googlecloudplatform.github.io/google-cloud-python/stable/core/auth.html +.. _Setup Authentication.: https://googlecloudplatform.github.io/google-cloud-python/latest/core/auth.html Installation ~~~~~~~~~~~~ @@ -81,4 +81,4 @@ Api Reference :maxdepth: 2 gapic/v2/api - gapic/v2/types \ No newline at end of file + gapic/v2/types From 946b22892363451681bdb8983405bb9a521454c0 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Wed, 12 Sep 2018 15:13:06 -0700 Subject: [PATCH 176/892] Re-generate library using bigtable/synth.py (#5948) --- .../cloud/bigtable_admin_v2/__init__.py | 3 +- .../gapic/bigtable_instance_admin_client.py | 15 +- .../gapic/bigtable_table_admin_client.py | 4 + .../cloud/bigtable_admin_v2/gapic/enums.py | 130 +++++++++--------- .../proto/bigtable_instance_admin_pb2.py | 74 ++++++---- .../proto/bigtable_table_admin_pb2.py | 87 +++++++----- packages/google-cloud-bigtable/synth.py | 15 +- 7 files changed, 193 insertions(+), 135 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index bcb3e9e24d8e..f5555e0c70bd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -25,8 +25,7 @@ class BigtableInstanceAdminClient( bigtable_instance_admin_client.BigtableInstanceAdminClient): __doc__ = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ - ) + bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__) enums = enums diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index a854964aed01..60646e45b5f9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -268,7 +268,7 @@ def create_instance(self, cluster ID, e.g., just ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields marked ``OutputOnly`` must be left blank. - Currently exactly one cluster must be specified. + Currently, at most two clusters can be specified. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -415,7 +415,7 @@ def list_instances(self, Args: parent (str): The unique name of the project for which a list of instances is requested. Values are of the form ``projects/``. - page_token (str): The value of ``next_page_token`` returned by a previous call. + page_token (str): DEPRECATED: This field is unused and ignored. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -918,7 +918,7 @@ def list_clusters(self, Values are of the form ``projects//instances/``. Use `` = '-'`` to list Clusters for all Instances in a project, e.g., ``projects/myproject/instances/-``. - page_token (str): The value of ``next_page_token`` returned by a previous call. + page_token (str): DEPRECATED: This field is unused and ignored. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -1296,6 +1296,7 @@ def get_app_profile(self, def list_app_profiles(self, parent, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): @@ -1327,6 +1328,10 @@ def list_app_profiles(self, parent (str): The unique name of the instance for which a list of app profiles is requested. Values are of the form ``projects//instances/``. + Use `` = '-'`` to list AppProfiles for all Instances in a project, + e.g., ``projects/myproject/instances/-``. + page_size (int): Maximum number of results per page. + CURRENTLY UNIMPLEMENTED AND IGNORED. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -1362,7 +1367,9 @@ def list_app_profiles(self, ) request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, ) + parent=parent, + page_size=page_size, + ) if metadata is None: metadata = [] metadata = list(metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index e821241a9808..f550e8ef3013 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -438,6 +438,7 @@ def create_table_from_snapshot( def list_tables(self, parent, view=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): @@ -470,6 +471,8 @@ def list_tables(self, Values are of the form ``projects//instances/``. view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Defaults to ``NAME_ONLY`` if unspecified; no others are currently supported. + page_size (int): Maximum number of results per page. + CURRENTLY UNIMPLEMENTED AND IGNORED. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -505,6 +508,7 @@ def list_tables(self, request = bigtable_table_admin_pb2.ListTablesRequest( parent=parent, view=view, + page_size=page_size, ) if metadata is None: metadata = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index ac1e2d44e8bf..6081506d671a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -32,71 +32,6 @@ class StorageType(enum.IntEnum): HDD = 2 -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when updating - an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set - on the cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. - After a development instance is created, it can be upgraded by - updating the instance to type ``PRODUCTION``. An instance created - as a production instance cannot be changed to a development instance. - When creating a development instance, ``serve_nodes`` on the cluster must - not be set. - """ - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - class Table(object): class TimestampGranularity(enum.IntEnum): """ @@ -169,3 +104,68 @@ class State(enum.IntEnum): STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 + + +class Instance(object): + class State(enum.IntEnum): + """ + Possible states of an instance. + + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be determined. + READY (int): The instance has been successfully created and can serve requests + to its tables. + CREATING (int): The instance is currently being created, and may be destroyed + if the creation process encounters an error. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(enum.IntEnum): + """ + The type of the instance. + + Attributes: + TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an + instance, a ``PRODUCTION`` instance will be created. If set when updating + an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set + on the cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has + no performance or uptime guarantees and is not covered by SLA. + After a development instance is created, it can be upgraded by + updating the instance to type ``PRODUCTION``. An instance created + as a production instance cannot be changed to a development instance. + When creating a development instance, ``serve_nodes`` on the cluster must + not be set. + """ + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + +class Cluster(object): + class State(enum.IntEnum): + """ + Possible states of a cluster. + + Attributes: + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 3142362adef0..1f223ce39dc3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -27,7 +27,7 @@ name='google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto', package='google.bigtable.admin.v2', syntax='proto3', - serialized_pb=_b('\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08\"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"<\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"n\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08\"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08\"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation\"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"D\x82\xd3\xe4\x93\x02>\"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation\"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty\"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12\".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12\".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse\"C\x82\xd3\xe4\x93\x02=\"8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') + serialized_pb=_b('\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08\"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"O\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t\"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08\"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08\"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation\"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"D\x82\xd3\xe4\x93\x02>\"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation\"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty\"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12\".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12\".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse\"C\x82\xd3\xe4\x93\x02=\"8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR,google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR,google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) @@ -774,7 +774,14 @@ is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.page_token', index=1, + name='page_size', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.page_size', index=1, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.page_token', index=2, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -793,7 +800,7 @@ oneofs=[ ], serialized_start=2413, - serialized_end=2473, + serialized_end=2492, ) @@ -818,6 +825,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='failed_locations', full_name='google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), ], extensions=[ ], @@ -830,8 +844,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2475, - serialized_end=2585, + serialized_start=2495, + serialized_end=2631, ) @@ -875,8 +889,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2588, - serialized_end=2746, + serialized_start=2634, + serialized_end=2792, ) @@ -913,8 +927,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2748, - serialized_end=2812, + serialized_start=2794, + serialized_end=2858, ) @@ -937,8 +951,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2814, - serialized_end=2840, + serialized_start=2860, + serialized_end=2886, ) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER @@ -1019,8 +1033,8 @@ The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than ``pro jects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. Currently - exactly one cluster must be specified. + Fields marked ``OutputOnly`` must be left blank. Currently, at + most two clusters can be specified. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) )) @@ -1055,7 +1069,7 @@ The unique name of the project for which a list of instances is requested. Values are of the form ``projects/``. page_token: - The value of ``next_page_token`` returned by a previous call. + DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) )) @@ -1077,11 +1091,10 @@ Instances whose Clusters are all in one of the failed locations may be missing from ``instances``, and Instances with at least one Cluster in a failed location may only have - partial information returned. + partial information returned. Values are of the form + ``projects//locations/`` next_page_token: - Set if not all instances could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. + DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) )) @@ -1177,7 +1190,7 @@ = '-'`` to list Clusters for all Instances in a project, e.g., ``projects/myproject/instances/-``. page_token: - The value of ``next_page_token`` returned by a previous call. + DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) )) @@ -1198,10 +1211,10 @@ retrieved, due to an outage or some other transient condition. Clusters from these locations may be missing from ``clusters``, or may only have partial information returned. + Values are of the form + ``projects//locations/`` next_page_token: - Set if not all clusters could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. + DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) )) @@ -1362,7 +1375,12 @@ parent: The unique name of the instance for which a list of app profiles is requested. Values are of the form - ``projects//instances/``. + ``projects//instances/``. Use `` + = '-'`` to list AppProfiles for all Instances in a project, + e.g., ``projects/myproject/instances/-``. + page_size: + Maximum number of results per page. CURRENTLY UNIMPLEMENTED + AND IGNORED. page_token: The value of ``next_page_token`` returned by a previous call. """, @@ -1384,6 +1402,12 @@ Set if not all app profiles could be returned in a single response. Pass this value to ``page_token`` in another request to get the next page of results. + failed_locations: + Locations from which AppProfile information could not be + retrieved, due to an outage or some other transient condition. + AppProfiles from these locations may be missing from + ``app_profiles``. Values are of the form + ``projects//locations/`` """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) )) @@ -1451,8 +1475,8 @@ file=DESCRIPTOR, index=0, options=None, - serialized_start=2843, - serialized_end=5829, + serialized_start=2889, + serialized_end=5875, methods=[ _descriptor.MethodDescriptor( name='CreateInstance', diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 9d650846856c..fd373785bc22 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -25,7 +25,7 @@ name='google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto', package='google.bigtable.admin.v2', syntax='proto3', - serialized_pb=_b('\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"k\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod\"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t\"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t\".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08\"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\"\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xb7\x11\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation\"H\x82\xd3\xe4\x93\x02\x42\"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation\">\x82\xd3\xe4\x93\x02\x38\"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a\".google.bigtable.admin.v2.Snapshot\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') + serialized_pb=_b('\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod\"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t\"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t\".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08\"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\"\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xb7\x11\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation\"H\x82\xd3\xe4\x93\x02\x42\"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation\">\x82\xd3\xe4\x93\x02\x38\"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a\".google.bigtable.admin.v2.Snapshot\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') , dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) @@ -229,7 +229,14 @@ is_extension=False, extension_scope=None, options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=2, + name='page_size', full_name='google.bigtable.admin.v2.ListTablesRequest.page_size', index=2, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=3, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -248,7 +255,7 @@ oneofs=[ ], serialized_start=711, - serialized_end=818, + serialized_end=837, ) @@ -285,8 +292,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=820, - serialized_end=914, + serialized_start=839, + serialized_end=933, ) @@ -323,8 +330,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=916, - serialized_end=999, + serialized_start=935, + serialized_end=1018, ) @@ -354,8 +361,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1001, - serialized_end=1035, + serialized_start=1020, + serialized_end=1054, ) @@ -409,8 +416,8 @@ name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', index=0, containing_type=None, fields=[]), ], - serialized_start=1175, - serialized_end=1340, + serialized_start=1194, + serialized_end=1359, ) _MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( @@ -446,8 +453,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1038, - serialized_end=1340, + serialized_start=1057, + serialized_end=1359, ) @@ -477,8 +484,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1342, - serialized_end=1389, + serialized_start=1361, + serialized_end=1408, ) @@ -508,8 +515,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1391, - serialized_end=1452, + serialized_start=1410, + serialized_end=1471, ) @@ -546,8 +553,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1454, - serialized_end=1520, + serialized_start=1473, + serialized_end=1539, ) @@ -577,8 +584,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1522, - serialized_end=1568, + serialized_start=1541, + serialized_end=1587, ) @@ -636,8 +643,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1571, - serialized_end=1706, + serialized_start=1590, + serialized_end=1725, ) @@ -667,8 +674,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1708, - serialized_end=1742, + serialized_start=1727, + serialized_end=1761, ) @@ -712,8 +719,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1744, - serialized_end=1821, + serialized_start=1763, + serialized_end=1840, ) @@ -750,8 +757,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1823, - serialized_end=1926, + serialized_start=1842, + serialized_end=1945, ) @@ -781,8 +788,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1928, - serialized_end=1965, + serialized_start=1947, + serialized_end=1984, ) @@ -826,8 +833,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1968, - serialized_end=2164, + serialized_start=1987, + serialized_end=2183, ) @@ -871,8 +878,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2167, - serialized_end=2383, + serialized_start=2186, + serialized_end=2402, ) _CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST @@ -1059,6 +1066,9 @@ The view to be applied to the returned tables' fields. Defaults to ``NAME_ONLY`` if unspecified; no others are currently supported. + page_size: + Maximum number of results per page. CURRENTLY UNIMPLEMENTED + AND IGNORED. page_token: The value of ``next_page_token`` returned by a previous call. """, @@ -1334,7 +1344,8 @@ list snapshots for all clusters in an instance, e.g., ``projects//instances//clusters/-``. page_size: - The maximum number of snapshots to return. + The maximum number of snapshots to return per page. CURRENTLY + UNIMPLEMENTED AND IGNORED. page_token: The value of ``next_page_token`` returned by a previous call. """, @@ -1456,8 +1467,8 @@ file=DESCRIPTOR, index=0, options=None, - serialized_start=2386, - serialized_end=4617, + serialized_start=2405, + serialized_end=4636, methods=[ _descriptor.MethodDescriptor( name='CreateTable', diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index effddc9d0615..5e73c768a5b3 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -47,7 +47,11 @@ "'google-cloud-bigtable'") s.replace( - "**/*.py", + "google/**/*.py", + 'from google\.cloud\.bigtable\.admin_v2.proto', + 'from google.cloud.bigtable_admin_v2.proto') +s.replace( + "tests/**/*.py", 'from google\.cloud\.bigtable\.admin_v2.proto', 'from google.cloud.bigtable_admin_v2.proto') @@ -61,3 +65,12 @@ "\g<1> 'grpc.max_receive_message_length': -1,\n" "\g<1>}.items(),\n" ) + +s.replace( + ['google/cloud/bigtable_admin_v2/__init__.py'], + ' __doc__ = bigtable_instance_admin_client.' + 'BigtableInstanceAdminClient.__doc__\n', + ' __doc__ = (\n' + ' bigtable_instance_admin_client.BigtableInstanceAdminClient.' + '__doc__)\n', +) From 91b38be7140c5ef1067c8bd86f8f1ec337fdf533 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Fri, 14 Sep 2018 14:06:59 -0400 Subject: [PATCH 177/892] Bigtable: refactoring read_rows infrastructure (#5963) * Bigtable: refactoring read_rows infrastructure This work is on behalf of understanding the issues behind issue #5876 where InvalidChunk is thrown due to row keys being processed out of order. About this PR - introducing `PartialRowsData._process_chunk` to break up logic in `__iter__`, and allow for additional testability. `__iter__` is responsible to get chunks, call `_process_chunk`, and manage `last_scanned_row_key`. - removed `_copy_from_current` - _state is now explicitly set - fixed a bug in `PartialRowsData.counter` which was counting chunks instead of rows. This counter is used in reconstructing requests for "smart retries" that update the request on transient exceptions * Fixing some failures. * Fixing lint issues * Fixing more lint * removing _validate_cell_data_cell_in_progress * Updating documentation about `_counter` --- .../google/cloud/bigtable/row_data.py | 180 +++++++----------- .../google/cloud/bigtable/table.py | 2 +- .../tests/unit/test_row_data.py | 171 +++++++---------- 3 files changed, 135 insertions(+), 218 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index ab561655a16a..f2dd0be01a35 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -321,22 +321,20 @@ class PartialRowsData(object): expected exceptions during iteration. """ - START = 'Start' # No responses yet processed. NEW_ROW = 'New row' # No cells yet complete for row ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row - STATE_START = 0 STATE_NEW_ROW = 1 STATE_ROW_IN_PROGRESS = 2 STATE_CELL_IN_PROGRESS = 3 - read_states = {STATE_START: START, STATE_NEW_ROW: NEW_ROW, + read_states = {STATE_NEW_ROW: NEW_ROW, STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS, STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS} def __init__(self, read_method, request): - # Counter for responses pulled from iterator + # Counter for rows returned to the user self._counter = 0 # In-progress row, unset until first response, after commit/reset self._row = None @@ -354,6 +352,7 @@ def __init__(self, read_method, request): self.response_iterator = read_method(request) self.rows = {} + self._state = self.STATE_NEW_ROW @property def state(self): @@ -365,23 +364,6 @@ def state(self): """ return self.read_states[self._state] - @property - def _state(self): - """State machine state. - :rtype: int - :returns: id of state corresponding to currrent row / chunk - processing. - """ - if self._previous_cell is not None: - return self.STATE_ROW_IN_PROGRESS - if self.last_scanned_row_key is None: - return self.STATE_START - if self._row is None: - return self.STATE_NEW_ROW - if self._cell is not None: - return self.STATE_CELL_IN_PROGRESS - return self.STATE_NEW_ROW # row added, no chunk yet processed - def cancel(self): """Cancels the iterator, closing the stream.""" self.response_iterator.cancel() @@ -428,7 +410,7 @@ def _read_next_response(self): return retry_(self._read_next, on_error=self._on_error)() def __iter__(self): - """Consume the ``ReadRowsResponse's`` from the stream. + """Consume the ``ReadRowsResponse``s from the stream. Read the rows and yield each to the reader Parse the response and its chunks into a new/existing row in @@ -440,86 +422,78 @@ def __iter__(self): except StopIteration: break - self._counter += 1 - - if self.last_scanned_row_key is None: # first response - if response.last_scanned_row_key: - raise InvalidReadRowsResponse() - - self.last_scanned_row_key = response.last_scanned_row_key - - row = self._row - cell = self._cell - for chunk in response.chunks: - - if chunk.reset_row: - self._validate_chunk_reset_row(chunk) - row = self._row = None - cell = self._cell = self._previous_cell = None - continue - - if cell is None: - qualifier = chunk.qualifier.value - if qualifier == b'' and not chunk.HasField('qualifier'): - qualifier = None - - cell = PartialCellData( - chunk.row_key, - chunk.family_name.value, - qualifier, - chunk.timestamp_micros, - chunk.labels, - chunk.value) - self._validate_cell_data(cell) - self._cell = cell - self._copy_from_previous(cell) - else: - cell.append_value(chunk.value) - - if row is None: - row = self._row = PartialRowData(cell.row_key) - + self._process_chunk(chunk) if chunk.commit_row: - if chunk.value_size > 0: - raise InvalidChunk() - - self._save_current_cell() + self.last_scanned_row_key = self._previous_row.row_key + self._counter += 1 + yield self._previous_row - yield self._row + resp_last_key = response.last_scanned_row_key + if resp_last_key and resp_last_key > self.last_scanned_row_key: + self.last_scanned_row_key = resp_last_key - self.last_scanned_row_key = self._row.row_key - self._row, self._previous_row = None, self._row - self._previous_cell = None - row = cell = None - continue + def _process_chunk(self, chunk): + if chunk.reset_row: + self._validate_chunk_reset_row(chunk) + self._row = None + self._cell = self._previous_cell = None + self._state = self.STATE_NEW_ROW + return - if chunk.value_size == 0: - self._save_current_cell() - cell = None + self._update_cell(chunk) - def _validate_cell_data(self, cell): - if self._state == self.STATE_ROW_IN_PROGRESS: - self._validate_cell_data_row_in_progress(cell) - if self._state == self.STATE_NEW_ROW: - self._validate_cell_data_new_row(cell) - if self._state == self.STATE_CELL_IN_PROGRESS: - self._copy_from_current(cell) + if self._row is None: + if (self._previous_row is not None and + self._cell.row_key <= self._previous_row.row_key): + raise InvalidChunk() + self._row = PartialRowData(self._cell.row_key) + + if chunk.value_size == 0: + self._state = self.STATE_ROW_IN_PROGRESS + self._save_current_cell() + else: + self._state = self.STATE_CELL_IN_PROGRESS + + if chunk.commit_row: + if chunk.value_size > 0: + raise InvalidChunk() + + self._previous_row = self._row + self._row = None + self._previous_cell = None + self._state = self.STATE_NEW_ROW + + def _update_cell(self, chunk): + if self._cell is None: + qualifier = None + if chunk.HasField('qualifier'): + qualifier = chunk.qualifier.value + family = None + if chunk.HasField('family_name'): + family = chunk.family_name.value + + self._cell = PartialCellData( + chunk.row_key, + family, + qualifier, + chunk.timestamp_micros, + chunk.labels, + chunk.value) + self._copy_from_previous(self._cell) + self._validate_cell_data_new_cell() + else: + self._cell.append_value(chunk.value) - def _validate_cell_data_new_row(self, cell): + def _validate_cell_data_new_cell(self): + cell = self._cell if (not cell.row_key or not cell.family_name or cell.qualifier is None): raise InvalidChunk() - if (self._previous_row is not None and - cell.row_key <= self._previous_row.row_key): - raise InvalidChunk() - - def _validate_cell_data_row_in_progress(self, cell): - if ((cell.row_key and - cell.row_key != self._row.row_key) or - (cell.family_name and cell.qualifier is None)): + prev = self._previous_cell + if prev and prev.row_key != cell.row_key: raise InvalidChunk() def _validate_chunk_reset_row(self, chunk): @@ -534,6 +508,7 @@ def _validate_chunk_reset_row(self, chunk): _raise_if(chunk.labels) _raise_if(chunk.value_size) _raise_if(chunk.value) + _raise_if(chunk.commit_row) def _save_current_cell(self): """Helper for :meth:`consume_next`.""" @@ -544,32 +519,17 @@ def _save_current_cell(self): qualified.append(complete) self._cell, self._previous_cell = None, cell - def _copy_from_current(self, cell): - current = self._cell - if current is not None: - if not cell.row_key: - cell.row_key = current.row_key - if not cell.family_name: - cell.family_name = current.family_name - # NOTE: ``cell.qualifier`` **can** be empty string. - if cell.qualifier is None: - cell.qualifier = current.qualifier - if not cell.timestamp_micros: - cell.timestamp_micros = current.timestamp_micros - if not cell.labels: - cell.labels.extend(current.labels) - def _copy_from_previous(self, cell): """Helper for :meth:`consume_next`.""" previous = self._previous_cell if previous is not None: if not cell.row_key: cell.row_key = previous.row_key - if not cell.family_name: - cell.family_name = previous.family_name - # NOTE: ``cell.qualifier`` **can** be empty string. - if cell.qualifier is None: - cell.qualifier = previous.qualifier + if not cell.family_name: + cell.family_name = previous.family_name + # NOTE: ``cell.qualifier`` **can** be empty string. + if cell.qualifier is None: + cell.qualifier = previous.qualifier class _ReadRowsRequestManager(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 62e22c67eb4a..53660a4546db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -308,7 +308,7 @@ def read_row(self, row_key, filter_=None): request_pb) rows_data.consume_all() - if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): + if rows_data.state != rows_data.NEW_ROW: raise ValueError('The row remains partial / is not committed.') if len(rows_data.rows) == 0: diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index e6ba31f4decd..c11182ae6a8e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -410,7 +410,7 @@ def test_state_start(self): client._data_stub.ReadRows.side_effect = [iterator] request = object() yrd = self._make_one(client._data_stub.ReadRows, request) - self.assertEqual(yrd.state, yrd.START) + self.assertEqual(yrd.state, yrd.NEW_ROW) def test_state_new_row_w_row(self): from google.cloud.bigtable_v2.gapic import bigtable_client @@ -439,13 +439,50 @@ def test_state_new_row_w_row(self): client._table_data_client.transport.read_rows, request) yrd._response_iterator = iterator - yrd._last_scanned_row_key = '' rows = [row for row in yrd] result = rows[0] self.assertEqual(result.row_key, self.ROW_KEY) + self.assertEqual(yrd._counter, 1) + self.assertEqual(yrd.state, yrd.NEW_ROW) + + def test_multiple_chunks(self): + from google.cloud.bigtable_v2.gapic import bigtable_client + + chunk1 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=False, + ) + chunk2 = _ReadRowsResponseCellChunkPB( + qualifier=self.QUALIFIER + b'1', + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk1, chunk2] + + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + channel = ChannelStub(responses=[iterator]) + data_api = bigtable_client.BigtableClient(channel=channel) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + request = object() - yrd._row = object() + yrd = self._make_one( + client._table_data_client.transport.read_rows, request) + + yrd._response_iterator = iterator + rows = [row for row in yrd] + result = rows[0] + self.assertEqual(result.row_key, self.ROW_KEY) + self.assertEqual(yrd._counter, 1) self.assertEqual(yrd.state, yrd.NEW_ROW) def test_cancel(self): @@ -461,61 +498,6 @@ def test_cancel(self): # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' - def test__copy_from_current_unset(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - chunks = _generate_cell_chunks(['']) - chunk = chunks[0] - yrd._copy_from_current(chunk) - self.assertEqual(chunk.row_key, b'') - self.assertEqual(chunk.family_name.value, u'') - self.assertEqual(chunk.qualifier.value, b'') - self.assertEqual(chunk.timestamp_micros, 0) - self.assertEqual(chunk.labels, []) - - def test__copy_from_current_blank(self): - ROW_KEY = b'RK' - FAMILY_NAME = u'A' - QUALIFIER = b'C' - TIMESTAMP_MICROS = 100 - LABELS = ['L1', 'L2'] - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - yrd._cell = _PartialCellData() - chunks = _generate_cell_chunks(['']) - chunk = chunks[0] - chunk.row_key = ROW_KEY - chunk.family_name.value = FAMILY_NAME - chunk.qualifier.value = QUALIFIER - chunk.timestamp_micros = TIMESTAMP_MICROS - chunk.labels.extend(LABELS) - yrd._copy_from_current(chunk) - self.assertEqual(chunk.row_key, ROW_KEY) - self.assertEqual(chunk.family_name.value, FAMILY_NAME) - self.assertEqual(chunk.qualifier.value, QUALIFIER) - self.assertEqual(chunk.timestamp_micros, TIMESTAMP_MICROS) - self.assertEqual(chunk.labels, LABELS) - - def test__copy_from_current_empty_chunk(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - yrd._cell = _PartialCellData() - yrd._cell.qualifier = b'' - chunks = _generate_cell_chunks(['']) - chunk = chunks[0] - yrd._copy_from_current(chunk) - self.assertEqual(chunk.row_key, b'') - self.assertEqual(chunk.family_name.value, '') - self.assertEqual(chunk.qualifier.value, b'') - self.assertEqual(chunk.timestamp_micros, 0) - self.assertEqual(chunk.labels, []) - def test__copy_from_previous_unset(self): client = _Client() client._data_stub = mock.MagicMock() @@ -579,31 +561,18 @@ def test__copy_from_previous_filled(self): self.assertEqual(cell.timestamp_micros, 0) self.assertEqual(cell.labels, []) - def test_invalid_last_scanned_row_key_on_start(self): - from google.cloud.bigtable.row_data import InvalidReadRowsResponse - - client = _Client() - response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key='ABC') - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - with self.assertRaises(InvalidReadRowsResponse): - self._consume_all(yrd) - def test_valid_last_scanned_row_key_on_start(self): client = _Client() response = _ReadRowsResponseV2( - chunks=(), last_scanned_row_key='AFTER') + chunks=(), last_scanned_row_key='2.AFTER') iterator = _MockCancellableIterator(response) client._data_stub = mock.MagicMock() client._data_stub.ReadRows.side_effect = [iterator] request = object() yrd = self._make_one(client._data_stub.ReadRows, request) - yrd.last_scanned_row_key = 'BEFORE' + yrd.last_scanned_row_key = '1.BEFORE' self._consume_all(yrd) - self.assertEqual(yrd.last_scanned_row_key, 'AFTER') + self.assertEqual(yrd.last_scanned_row_key, '2.AFTER') def test_invalid_empty_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk @@ -622,46 +591,30 @@ def test_invalid_empty_chunk(self): def test_state_cell_in_progress(self): LABELS = ['L1', 'L2'] - client = _Client() + request = object() + read_rows = mock.MagicMock() + yrd = self._make_one(read_rows, request) + chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, family_name=self.FAMILY_NAME, qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - self._consume_all(yrd) - yrd._last_scanned_row_key = '' - yrd._row = object() - cell = _PartialCellData( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, labels=LABELS ) + yrd._update_cell(chunk) - yrd._cell = cell - more_cell_data = _PartialCellData( - value=self.VALUE - ) - - yrd._validate_cell_data(more_cell_data) + more_cell_data = _ReadRowsResponseCellChunkPB(value=self.VALUE) + yrd._update_cell(more_cell_data) - self.assertEqual(more_cell_data.row_key, self.ROW_KEY) - self.assertEqual(more_cell_data.family_name, self.FAMILY_NAME) - self.assertEqual(more_cell_data.qualifier, self.QUALIFIER) - self.assertEqual(more_cell_data.timestamp_micros, + self.assertEqual(yrd._cell.row_key, self.ROW_KEY) + self.assertEqual(yrd._cell.family_name, self.FAMILY_NAME) + self.assertEqual(yrd._cell.qualifier, self.QUALIFIER) + self.assertEqual(yrd._cell.timestamp_micros, self.TIMESTAMP_MICROS) - self.assertEqual(more_cell_data.labels, LABELS) + self.assertEqual(yrd._cell.labels, LABELS) + self.assertEqual(yrd._cell.value, self.VALUE + self.VALUE) def test_yield_rows_data(self): client = _Client() @@ -1153,11 +1106,15 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): from google.cloud.bigtable_v2.proto import ( bigtable_pb2 as messages_v2_pb2) - family_name = kw.pop('family_name') - qualifier = kw.pop('qualifier') + family_name = kw.pop('family_name', None) + qualifier = kw.pop('qualifier', None) message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) - message.family_name.value = family_name - message.qualifier.value = qualifier + + if family_name: + message.family_name.value = family_name + if qualifier: + message.qualifier.value = qualifier + return message From 22c93d270512f013f87f41334fde19f2027deebf Mon Sep 17 00:00:00 2001 From: DPE bot Date: Fri, 14 Sep 2018 11:32:19 -0700 Subject: [PATCH 178/892] Re-generate library using bigtable/synth.py (#5974) --- .../gapic/bigtable_instance_admin_client.py | 96 +++++++++---------- .../gapic/bigtable_table_admin_client.py | 60 ++++++------ .../bigtable_v2/gapic/bigtable_client.py | 20 ++-- 3 files changed, 88 insertions(+), 88 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 60646e45b5f9..6e2e7f25063b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -296,8 +296,8 @@ def create_instance(self, 'create_instance'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_instance, default_retry=self._method_configs['CreateInstance'].retry, - default_timeout=self._method_configs['CreateInstance'] - .timeout, + default_timeout=self._method_configs['CreateInstance']. + timeout, client_info=self._client_info, ) @@ -373,8 +373,8 @@ def get_instance(self, 'get_instance'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_instance, default_retry=self._method_configs['GetInstance'].retry, - default_timeout=self._method_configs['GetInstance'] - .timeout, + default_timeout=self._method_configs['GetInstance']. + timeout, client_info=self._client_info, ) @@ -441,8 +441,8 @@ def list_instances(self, 'list_instances'] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_instances, default_retry=self._method_configs['ListInstances'].retry, - default_timeout=self._method_configs['ListInstances'] - .timeout, + default_timeout=self._method_configs['ListInstances']. + timeout, client_info=self._client_info, ) @@ -542,8 +542,8 @@ def update_instance(self, 'update_instance'] = google.api_core.gapic_v1.method.wrap_method( self.transport.update_instance, default_retry=self._method_configs['UpdateInstance'].retry, - default_timeout=self._method_configs['UpdateInstance'] - .timeout, + default_timeout=self._method_configs['UpdateInstance']. + timeout, client_info=self._client_info, ) @@ -633,10 +633,10 @@ def partial_update_instance( self._inner_api_calls[ 'partial_update_instance'] = google.api_core.gapic_v1.method.wrap_method( self.transport.partial_update_instance, - default_retry=self._method_configs['PartialUpdateInstance'] - .retry, - default_timeout=self._method_configs[ - 'PartialUpdateInstance'].timeout, + default_retry=self. + _method_configs['PartialUpdateInstance'].retry, + default_timeout=self. + _method_configs['PartialUpdateInstance'].timeout, client_info=self._client_info, ) @@ -707,8 +707,8 @@ def delete_instance(self, 'delete_instance'] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_instance, default_retry=self._method_configs['DeleteInstance'].retry, - default_timeout=self._method_configs['DeleteInstance'] - .timeout, + default_timeout=self._method_configs['DeleteInstance']. + timeout, client_info=self._client_info, ) @@ -799,8 +799,8 @@ def create_cluster(self, 'create_cluster'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_cluster, default_retry=self._method_configs['CreateCluster'].retry, - default_timeout=self._method_configs['CreateCluster'] - .timeout, + default_timeout=self._method_configs['CreateCluster']. + timeout, client_info=self._client_info, ) @@ -944,8 +944,8 @@ def list_clusters(self, 'list_clusters'] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_clusters, default_retry=self._method_configs['ListClusters'].retry, - default_timeout=self._method_configs['ListClusters'] - .timeout, + default_timeout=self._method_configs['ListClusters']. + timeout, client_info=self._client_info, ) @@ -1042,8 +1042,8 @@ def update_cluster(self, 'update_cluster'] = google.api_core.gapic_v1.method.wrap_method( self.transport.update_cluster, default_retry=self._method_configs['UpdateCluster'].retry, - default_timeout=self._method_configs['UpdateCluster'] - .timeout, + default_timeout=self._method_configs['UpdateCluster']. + timeout, client_info=self._client_info, ) @@ -1117,8 +1117,8 @@ def delete_cluster(self, 'delete_cluster'] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_cluster, default_retry=self._method_configs['DeleteCluster'].retry, - default_timeout=self._method_configs['DeleteCluster'] - .timeout, + default_timeout=self._method_configs['DeleteCluster']. + timeout, client_info=self._client_info, ) @@ -1200,10 +1200,10 @@ def create_app_profile(self, self._inner_api_calls[ 'create_app_profile'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_app_profile, - default_retry=self._method_configs[ - 'CreateAppProfile'].retry, - default_timeout=self._method_configs['CreateAppProfile'] - .timeout, + default_retry=self._method_configs['CreateAppProfile']. + retry, + default_timeout=self._method_configs['CreateAppProfile']. + timeout, client_info=self._client_info, ) @@ -1273,8 +1273,8 @@ def get_app_profile(self, 'get_app_profile'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_app_profile, default_retry=self._method_configs['GetAppProfile'].retry, - default_timeout=self._method_configs['GetAppProfile'] - .timeout, + default_timeout=self._method_configs['GetAppProfile']. + timeout, client_info=self._client_info, ) @@ -1359,10 +1359,10 @@ def list_app_profiles(self, self._inner_api_calls[ 'list_app_profiles'] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_app_profiles, - default_retry=self._method_configs[ - 'ListAppProfiles'].retry, - default_timeout=self._method_configs['ListAppProfiles'] - .timeout, + default_retry=self._method_configs['ListAppProfiles']. + retry, + default_timeout=self._method_configs['ListAppProfiles']. + timeout, client_info=self._client_info, ) @@ -1461,10 +1461,10 @@ def update_app_profile(self, self._inner_api_calls[ 'update_app_profile'] = google.api_core.gapic_v1.method.wrap_method( self.transport.update_app_profile, - default_retry=self._method_configs[ - 'UpdateAppProfile'].retry, - default_timeout=self._method_configs['UpdateAppProfile'] - .timeout, + default_retry=self._method_configs['UpdateAppProfile']. + retry, + default_timeout=self._method_configs['UpdateAppProfile']. + timeout, client_info=self._client_info, ) @@ -1540,10 +1540,10 @@ def delete_app_profile(self, self._inner_api_calls[ 'delete_app_profile'] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_app_profile, - default_retry=self._method_configs[ - 'DeleteAppProfile'].retry, - default_timeout=self._method_configs['DeleteAppProfile'] - .timeout, + default_retry=self._method_configs['DeleteAppProfile']. + retry, + default_timeout=self._method_configs['DeleteAppProfile']. + timeout, client_info=self._client_info, ) @@ -1613,8 +1613,8 @@ def get_iam_policy(self, 'get_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_iam_policy, default_retry=self._method_configs['GetIamPolicy'].retry, - default_timeout=self._method_configs['GetIamPolicy'] - .timeout, + default_timeout=self._method_configs['GetIamPolicy']. + timeout, client_info=self._client_info, ) @@ -1691,8 +1691,8 @@ def set_iam_policy(self, 'set_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( self.transport.set_iam_policy, default_retry=self._method_configs['SetIamPolicy'].retry, - default_timeout=self._method_configs['SetIamPolicy'] - .timeout, + default_timeout=self._method_configs['SetIamPolicy']. + timeout, client_info=self._client_info, ) @@ -1768,10 +1768,10 @@ def test_iam_permissions(self, self._inner_api_calls[ 'test_iam_permissions'] = google.api_core.gapic_v1.method.wrap_method( self.transport.test_iam_permissions, - default_retry=self._method_configs[ - 'TestIamPermissions'].retry, - default_timeout=self._method_configs['TestIamPermissions'] - .timeout, + default_retry=self._method_configs['TestIamPermissions']. + retry, + default_timeout=self._method_configs['TestIamPermissions']. + timeout, client_info=self._client_info, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index f550e8ef3013..f758608a88ac 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -300,8 +300,8 @@ def create_table(self, 'create_table'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_table, default_retry=self._method_configs['CreateTable'].retry, - default_timeout=self._method_configs['CreateTable'] - .timeout, + default_timeout=self._method_configs['CreateTable']. + timeout, client_info=self._client_info, ) @@ -401,10 +401,10 @@ def create_table_from_snapshot( self._inner_api_calls[ 'create_table_from_snapshot'] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_table_from_snapshot, - default_retry=self._method_configs[ - 'CreateTableFromSnapshot'].retry, - default_timeout=self._method_configs[ - 'CreateTableFromSnapshot'].timeout, + default_retry=self. + _method_configs['CreateTableFromSnapshot'].retry, + default_timeout=self. + _method_configs['CreateTableFromSnapshot'].timeout, client_info=self._client_info, ) @@ -651,8 +651,8 @@ def delete_table(self, 'delete_table'] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_table, default_retry=self._method_configs['DeleteTable'].retry, - default_timeout=self._method_configs['DeleteTable'] - .timeout, + default_timeout=self._method_configs['DeleteTable']. + timeout, client_info=self._client_info, ) @@ -730,10 +730,10 @@ def modify_column_families(self, self._inner_api_calls[ 'modify_column_families'] = google.api_core.gapic_v1.method.wrap_method( self.transport.modify_column_families, - default_retry=self._method_configs['ModifyColumnFamilies'] - .retry, - default_timeout=self._method_configs[ - 'ModifyColumnFamilies'].timeout, + default_retry=self._method_configs['ModifyColumnFamilies']. + retry, + default_timeout=self. + _method_configs['ModifyColumnFamilies'].timeout, client_info=self._client_info, ) @@ -806,8 +806,8 @@ def drop_row_range(self, 'drop_row_range'] = google.api_core.gapic_v1.method.wrap_method( self.transport.drop_row_range, default_retry=self._method_configs['DropRowRange'].retry, - default_timeout=self._method_configs['DropRowRange'] - .timeout, + default_timeout=self._method_configs['DropRowRange']. + timeout, client_info=self._client_info, ) @@ -887,10 +887,10 @@ def generate_consistency_token( self._inner_api_calls[ 'generate_consistency_token'] = google.api_core.gapic_v1.method.wrap_method( self.transport.generate_consistency_token, - default_retry=self._method_configs[ - 'GenerateConsistencyToken'].retry, - default_timeout=self._method_configs[ - 'GenerateConsistencyToken'].timeout, + default_retry=self. + _method_configs['GenerateConsistencyToken'].retry, + default_timeout=self. + _method_configs['GenerateConsistencyToken'].timeout, client_info=self._client_info, ) @@ -963,10 +963,10 @@ def check_consistency(self, self._inner_api_calls[ 'check_consistency'] = google.api_core.gapic_v1.method.wrap_method( self.transport.check_consistency, - default_retry=self._method_configs[ - 'CheckConsistency'].retry, - default_timeout=self._method_configs['CheckConsistency'] - .timeout, + default_retry=self._method_configs['CheckConsistency']. + retry, + default_timeout=self._method_configs['CheckConsistency']. + timeout, client_info=self._client_info, ) @@ -1078,8 +1078,8 @@ def snapshot_table(self, 'snapshot_table'] = google.api_core.gapic_v1.method.wrap_method( self.transport.snapshot_table, default_retry=self._method_configs['SnapshotTable'].retry, - default_timeout=self._method_configs['SnapshotTable'] - .timeout, + default_timeout=self._method_configs['SnapshotTable']. + timeout, client_info=self._client_info, ) @@ -1163,8 +1163,8 @@ def get_snapshot(self, 'get_snapshot'] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_snapshot, default_retry=self._method_configs['GetSnapshot'].retry, - default_timeout=self._method_configs['GetSnapshot'] - .timeout, + default_timeout=self._method_configs['GetSnapshot']. + timeout, client_info=self._client_info, ) @@ -1259,8 +1259,8 @@ def list_snapshots(self, 'list_snapshots'] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_snapshots, default_retry=self._method_configs['ListSnapshots'].retry, - default_timeout=self._method_configs['ListSnapshots'] - .timeout, + default_timeout=self._method_configs['ListSnapshots']. + timeout, client_info=self._client_info, ) @@ -1343,8 +1343,8 @@ def delete_snapshot(self, 'delete_snapshot'] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_snapshot, default_retry=self._method_configs['DeleteSnapshot'].retry, - default_timeout=self._method_configs['DeleteSnapshot'] - .timeout, + default_timeout=self._method_configs['DeleteSnapshot']. + timeout, client_info=self._client_info, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index b7a3476e0a94..c4149b0d214a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -313,8 +313,8 @@ def sample_row_keys(self, 'sample_row_keys'] = google.api_core.gapic_v1.method.wrap_method( self.transport.sample_row_keys, default_retry=self._method_configs['SampleRowKeys'].retry, - default_timeout=self._method_configs['SampleRowKeys'] - .timeout, + default_timeout=self._method_configs['SampleRowKeys']. + timeout, client_info=self._client_info, ) @@ -589,10 +589,10 @@ def check_and_mutate_row(self, self._inner_api_calls[ 'check_and_mutate_row'] = google.api_core.gapic_v1.method.wrap_method( self.transport.check_and_mutate_row, - default_retry=self._method_configs[ - 'CheckAndMutateRow'].retry, - default_timeout=self._method_configs['CheckAndMutateRow'] - .timeout, + default_retry=self._method_configs['CheckAndMutateRow']. + retry, + default_timeout=self._method_configs['CheckAndMutateRow']. + timeout, client_info=self._client_info, ) @@ -686,10 +686,10 @@ def read_modify_write_row(self, self._inner_api_calls[ 'read_modify_write_row'] = google.api_core.gapic_v1.method.wrap_method( self.transport.read_modify_write_row, - default_retry=self._method_configs[ - 'ReadModifyWriteRow'].retry, - default_timeout=self._method_configs['ReadModifyWriteRow'] - .timeout, + default_retry=self._method_configs['ReadModifyWriteRow']. + retry, + default_timeout=self._method_configs['ReadModifyWriteRow']. + timeout, client_info=self._client_info, ) From dff4d2822fdd2f8804e734e7ed20da4056ebab3a Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Mon, 17 Sep 2018 12:58:25 -0400 Subject: [PATCH 179/892] Bigtable: smart retry for 'read_rows()' and 'read_rows(rows_limit)' requests (#5966) Closes #5876. --- .../google/cloud/bigtable/row_data.py | 73 +++++++++----- .../tests/unit/test_row_data.py | 99 ++++++++++++++++++- 2 files changed, 144 insertions(+), 28 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index f2dd0be01a35..510f28ca2b70 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -388,15 +388,16 @@ def _create_retry_request(self): req_manager = _ReadRowsRequestManager(self.request, self.last_scanned_row_key, self._counter) - self.request = req_manager.build_updated_request() + return req_manager.build_updated_request() def _on_error(self, exc): """Helper for :meth:`__iter__`.""" # restart the read scan from AFTER the last successfully read row + retry_request = self.request if self.last_scanned_row_key: - self._create_retry_request() + retry_request = self._create_retry_request() - self.response_iterator = self.read_method(self.request) + self.response_iterator = self.read_method(retry_request) def _read_next(self): """Helper for :meth:`__iter__`.""" @@ -564,11 +565,18 @@ def build_updated_request(self): r_kwargs['rows_limit'] = max(1, self.message.rows_limit - self.rows_read_so_far) - row_keys = self._filter_rows_keys() - row_ranges = self._filter_row_ranges() - r_kwargs['rows'] = data_v2_pb2.RowSet(row_keys=row_keys, - row_ranges=row_ranges) - + # if neither RowSet.row_keys nor RowSet.row_ranges currently exist, + # add row_range that starts with last_scanned_key as start_key_open + # to request only rows that have not been returned yet + if not self.message.HasField('rows'): + row_range = data_v2_pb2.RowRange( + start_key_open=self.last_scanned_key) + r_kwargs['rows'] = data_v2_pb2.RowSet(row_ranges=[row_range]) + else: + row_keys = self._filter_rows_keys() + row_ranges = self._filter_row_ranges() + r_kwargs['rows'] = data_v2_pb2.RowSet(row_keys=row_keys, + row_ranges=row_ranges) return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs) def _filter_rows_keys(self): @@ -581,22 +589,29 @@ def _filter_row_ranges(self): new_row_ranges = [] for row_range in self.message.rows.row_ranges: - if((row_range.end_key_open and - self._key_already_read(row_range.end_key_open)) or - (row_range.end_key_closed and - self._key_already_read(row_range.end_key_closed))): - continue - - if ((row_range.start_key_open and - self._key_already_read(row_range.start_key_open)) or - (row_range.start_key_closed and - self._key_already_read(row_range.start_key_closed))): - row_range.start_key_closed = _to_bytes("") - row_range.start_key_open = self.last_scanned_key - - new_row_ranges.append(row_range) - else: - new_row_ranges.append(row_range) + # if current end_key (open or closed) is set, return its value, + # if not, set to empty string (''). + # NOTE: Empty string in end_key means "end of table" + end_key = self._end_key_set(row_range) + # if end_key is already read, skip to the next row_range + if(end_key and self._key_already_read(end_key)): + continue + + # if current start_key (open or closed) is set, return its value, + # if not, then set to empty string ('') + # NOTE: Empty string in start_key means "beginning of table" + start_key = self._start_key_set(row_range) + + # if start_key was already read or doesn't exist, + # create a row_range with last_scanned_key as start_key_open + # to be passed to retry request + retry_row_range = row_range + if(self._key_already_read(start_key)): + retry_row_range = copy.deepcopy(row_range) + retry_row_range.start_key_closed = _to_bytes("") + retry_row_range.start_key_open = self.last_scanned_key + + new_row_ranges.append(retry_row_range) return new_row_ranges @@ -604,6 +619,16 @@ def _key_already_read(self, key): """ Helper for :meth:`_filter_row_ranges`""" return key <= self.last_scanned_key + @staticmethod + def _start_key_set(row_range): + """ Helper for :meth:`_filter_row_ranges`""" + return row_range.start_key_open or row_range.start_key_closed + + @staticmethod + def _end_key_set(row_range): + """ Helper for :meth:`_filter_row_ranges`""" + return row_range.end_key_open or row_range.end_key_closed + def _raise_if(predicate, *args): """Helper for validation methods.""" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index c11182ae6a8e..5278d8eb98d8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -748,12 +748,12 @@ def test__filter_row_ranges_some_ranges_already_read(self): end_key_open=b"row_key49") exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] - self.assertEqual(row_ranges, exp_row_ranges) + self.assertEqual(exp_row_ranges, row_ranges) def test_build_updated_request(self): from google.cloud.bigtable.row_filters import RowSampleFilter row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key14" + last_scanned_key = b"row_key25" request = _ReadRowsRequestPB(filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name) @@ -766,9 +766,100 @@ def test_build_updated_request(self): expected_result = _ReadRowsRequestPB(table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6) - expected_result.rows.row_ranges.add(**self.row_range1. - get_range_kwargs()) + expected_result.rows.row_ranges.add( + start_key_open=last_scanned_key, + end_key_open=self.row_range1.end_key + ) + + self.assertEqual(expected_result, result) + + def test_build_updated_request_full_table(self): + last_scanned_key = b"row_key14" + + request = _ReadRowsRequestPB(table_name=self.table_name) + request_manager = self._make_one(request, last_scanned_key, 2) + + result = request_manager.build_updated_request() + expected_result = _ReadRowsRequestPB(table_name=self.table_name, + filter={}) + expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + self.assertEqual(expected_result, result) + + def test_build_updated_request_no_start_key(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key25" + request = _ReadRowsRequestPB(filter=row_filter.to_pb(), + rows_limit=8, + table_name=self.table_name) + request.rows.row_ranges.add(end_key_open=b"row_key29") + + request_manager = self._make_one(request, last_scanned_key, 2) + + result = request_manager.build_updated_request() + + expected_result = _ReadRowsRequestPB(table_name=self.table_name, + filter=row_filter.to_pb(), + rows_limit=6) + expected_result.rows.row_ranges.add(start_key_open=last_scanned_key, + end_key_open=b"row_key29") + + self.assertEqual(expected_result, result) + + def test_build_updated_request_no_end_key(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key25" + request = _ReadRowsRequestPB(filter=row_filter.to_pb(), + rows_limit=8, + table_name=self.table_name) + request.rows.row_ranges.add(start_key_closed=b"row_key20") + + request_manager = self._make_one(request, last_scanned_key, 2) + + result = request_manager.build_updated_request() + + expected_result = _ReadRowsRequestPB(table_name=self.table_name, + filter=row_filter.to_pb(), + rows_limit=6) + expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + + self.assertEqual(expected_result, result) + def test_build_updated_request_rows(self): + from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key4" + request = _ReadRowsRequestPB(filter=row_filter.to_pb(), + rows_limit=5, + table_name=self.table_name) + request.rows.row_keys.extend([b'row_key1', b'row_key2', + b'row_key4', b'row_key5', + b'row_key7', b'row_key9']) + + request_manager = self._make_one(request, last_scanned_key, 3) + + result = request_manager.build_updated_request() + + expected_result = _ReadRowsRequestPB(table_name=self.table_name, + filter=row_filter.to_pb(), + rows_limit=2) + expected_result.rows.row_keys.extend([b'row_key5', b'row_key7', + b'row_key9']) + + self.assertEqual(expected_result, result) + + def test_build_updated_request_rows_limit(self): + last_scanned_key = b"row_key14" + + request = _ReadRowsRequestPB(table_name=self.table_name, rows_limit=10) + request_manager = self._make_one(request, last_scanned_key, 2) + + result = request_manager.build_updated_request() + expected_result = _ReadRowsRequestPB(table_name=self.table_name, + filter={}, + rows_limit=8) + expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) self.assertEqual(expected_result, result) def test__key_already_read(self): From 0eb4d84083037156cfc121461e7d316beb193b6e Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Mon, 17 Sep 2018 13:22:42 -0400 Subject: [PATCH 180/892] Release bigtable 0.30.2 (#5997) --- packages/google-cloud-bigtable/CHANGELOG.md | 15 +++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index db4581862ca0..c8a3cd1d07a9 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,21 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.30.2 + +### New Features +- Add iam policy implementation for an instance. (#5838) + +### Implementation Changes +- Fix smart retries for 'read_rows()' when reading the full table (#5966) + +### Documentation +- Replace links to `/stable/` with `/latest/`. (#5901) + +### Internal / Testing Changes +- Re-generate library using bigtable/synth.py (#5974) +- Refactor `read_rows` infrastructure (#5963) + ## 0.30.1 ### Implementation changes diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 54ac831a81fb..4666a502f1b8 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.30.1' +version = '0.30.2' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From aa70573cbd5c3edb48d4de23959b92f59748010c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 18 Sep 2018 17:17:27 -0400 Subject: [PATCH 181/892] Re-synth to pick up new deadline configuration. (#6010) Closes #5928. --- .../bigtable_instance_admin_client_config.py | 32 ++--- .../bigtable_table_admin_client_config.py | 34 ++--- .../cloud/bigtable_admin_v2/gapic/enums.py | 130 +++++++++--------- 3 files changed, 89 insertions(+), 107 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py index 5b738628f32f..9107ed551462 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py @@ -23,12 +23,12 @@ "retry_params_name": "default" }, "GetInstance": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "ListInstances": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, @@ -43,8 +43,8 @@ "retry_params_name": "default" }, "DeleteInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", + "timeout_millis": 150000, + "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "CreateCluster": { @@ -53,12 +53,12 @@ "retry_params_name": "default" }, "GetCluster": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "ListClusters": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, @@ -68,47 +68,47 @@ "retry_params_name": "default" }, "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", + "timeout_millis": 120000, + "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "CreateAppProfile": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "GetAppProfile": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "ListAppProfiles": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "UpdateAppProfile": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "DeleteAppProfile": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "GetIamPolicy": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "SetIamPolicy": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "TestIamPermissions": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index 61d5fcc95194..895c81a25c78 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -14,31 +14,13 @@ "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000 - }, - "create_table": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 130000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 130000, - "total_timeout_millis": 3600000 - }, - "drop_row_range": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 3600000 } }, "methods": { "CreateTable": { - "timeout_millis": 130000, + "timeout_millis": 900000, "retry_codes_name": "non_idempotent", - "retry_params_name": "create_table" + "retry_params_name": "default" }, "CreateTableFromSnapshot": { "timeout_millis": 60000, @@ -46,29 +28,29 @@ "retry_params_name": "default" }, "ListTables": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "GetTable": { - "timeout_millis": 60000, + "timeout_millis": 120000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "DeleteTable": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", + "timeout_millis": 120000, + "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "ModifyColumnFamilies": { - "timeout_millis": 60000, + "timeout_millis": 900000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "DropRowRange": { "timeout_millis": 900000, "retry_codes_name": "non_idempotent", - "retry_params_name": "drop_row_range" + "retry_params_name": "default" }, "GenerateConsistencyToken": { "timeout_millis": 60000, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 6081506d671a..ac1e2d44e8bf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -32,6 +32,71 @@ class StorageType(enum.IntEnum): HDD = 2 +class Instance(object): + class State(enum.IntEnum): + """ + Possible states of an instance. + + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be determined. + READY (int): The instance has been successfully created and can serve requests + to its tables. + CREATING (int): The instance is currently being created, and may be destroyed + if the creation process encounters an error. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(enum.IntEnum): + """ + The type of the instance. + + Attributes: + TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an + instance, a ``PRODUCTION`` instance will be created. If set when updating + an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set + on the cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has + no performance or uptime guarantees and is not covered by SLA. + After a development instance is created, it can be upgraded by + updating the instance to type ``PRODUCTION``. An instance created + as a production instance cannot be changed to a development instance. + When creating a development instance, ``serve_nodes`` on the cluster must + not be set. + """ + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + +class Cluster(object): + class State(enum.IntEnum): + """ + Possible states of a cluster. + + Attributes: + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 + + class Table(object): class TimestampGranularity(enum.IntEnum): """ @@ -104,68 +169,3 @@ class State(enum.IntEnum): STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 - - -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when updating - an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set - on the cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. - After a development instance is created, it can be upgraded by - updating the instance to type ``PRODUCTION``. An instance created - as a production instance cannot be changed to a development instance. - When creating a development instance, ``serve_nodes`` on the cluster must - not be set. - """ - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 From dc01c0e449662953ef8b4026bc766483397fc240 Mon Sep 17 00:00:00 2001 From: DPE bot Date: Thu, 20 Sep 2018 09:48:12 -0700 Subject: [PATCH 182/892] Re-generate library using bigtable/synth.py (#6036) --- .../cloud/bigtable_admin_v2/gapic/enums.py | 130 +++++++++--------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index ac1e2d44e8bf..6081506d671a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -32,71 +32,6 @@ class StorageType(enum.IntEnum): HDD = 2 -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when updating - an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set - on the cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. - After a development instance is created, it can be upgraded by - updating the instance to type ``PRODUCTION``. An instance created - as a production instance cannot be changed to a development instance. - When creating a development instance, ``serve_nodes`` on the cluster must - not be set. - """ - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - class Table(object): class TimestampGranularity(enum.IntEnum): """ @@ -169,3 +104,68 @@ class State(enum.IntEnum): STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 + + +class Instance(object): + class State(enum.IntEnum): + """ + Possible states of an instance. + + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be determined. + READY (int): The instance has been successfully created and can serve requests + to its tables. + CREATING (int): The instance is currently being created, and may be destroyed + if the creation process encounters an error. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(enum.IntEnum): + """ + The type of the instance. + + Attributes: + TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an + instance, a ``PRODUCTION`` instance will be created. If set when updating + an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set + on the cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has + no performance or uptime guarantees and is not covered by SLA. + After a development instance is created, it can be upgraded by + updating the instance to type ``PRODUCTION``. An instance created + as a production instance cannot be changed to a development instance. + When creating a development instance, ``serve_nodes`` on the cluster must + not be set. + """ + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + +class Cluster(object): + class State(enum.IntEnum): + """ + Possible states of a cluster. + + Attributes: + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 From 2a8e11669ccd1cf13cae15958cc0188b1989330f Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Fri, 21 Sep 2018 00:01:43 +0530 Subject: [PATCH 183/892] Bigtable: Add '{RowSet,RowRange}.{__eq__,.__ne__}' (#6025) --- .../google/cloud/bigtable/row_set.py | 47 ++++++ .../tests/unit/test_row_set.py | 159 ++++++++++++++++++ 2 files changed, 206 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 0d5ae9903473..ab2f15231903 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -29,6 +29,27 @@ def __init__(self): self.row_keys = [] self.row_ranges = [] + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + + if len(other.row_keys) != len(self.row_keys): + return False + + if len(other.row_ranges) != len(self.row_ranges): + return False + + if not set(other.row_keys) == set(self.row_keys): + return False + + if not set(other.row_ranges) == set(self.row_ranges): + return False + + return True + + def __ne__(self, other): + return not self == other + def add_row_key(self, row_key): """Add row key to row_keys list. @@ -112,6 +133,32 @@ def __init__(self, start_key=None, end_key=None, self.end_key = end_key self.end_inclusive = end_inclusive + def _key(self): + """A tuple key that uniquely describes this field. + + Used to compute this instance's hashcode and evaluate equality. + + Returns: + Tuple[str]: The contents of this :class:`.RowRange`. + """ + return ( + self.start_key, + self.start_inclusive, + self.end_key, + self.end_inclusive, + ) + + def __hash__(self): + return hash(self._key()) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self._key() == other._key() + + def __ne__(self, other): + return not self == other + def get_range_kwargs(self): """ Convert row range object to dict which can be passed to google.bigtable.v2.RowRange add method. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_set.py b/packages/google-cloud-bigtable/tests/unit/test_row_set.py index 84640b616f98..990173b376c1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_set.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_set.py @@ -32,6 +32,115 @@ def test_constructor(self): self.assertEqual([], row_set.row_keys) self.assertEqual([], row_set.row_ranges) + def test__eq__(self): + row_key1 = b"row_key1" + row_key2 = b"row_key1" + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key4", b"row_key9") + + row_set1 = self._make_one() + row_set2 = self._make_one() + + row_set1.add_row_key(row_key1) + row_set2.add_row_key(row_key2) + row_set1.add_row_range(row_range1) + row_set2.add_row_range(row_range2) + + self.assertEqual(row_set1, row_set2) + + def test__eq__type_differ(self): + row_set1 = self._make_one() + row_set2 = object() + self.assertNotEqual(row_set1, row_set2) + + def test__eq__len_row_keys_differ(self): + row_key1 = b"row_key1" + row_key2 = b"row_key1" + + row_set1 = self._make_one() + row_set2 = self._make_one() + + row_set1.add_row_key(row_key1) + row_set1.add_row_key(row_key2) + row_set2.add_row_key(row_key2) + + self.assertNotEqual(row_set1, row_set2) + + def test__eq__len_row_ranges_differ(self): + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key4", b"row_key9") + + row_set1 = self._make_one() + row_set2 = self._make_one() + + row_set1.add_row_range(row_range1) + row_set1.add_row_range(row_range2) + row_set2.add_row_range(row_range2) + + self.assertNotEqual(row_set1, row_set2) + + def test__eq__row_keys_differ(self): + row_set1 = self._make_one() + row_set2 = self._make_one() + + row_set1.add_row_key(b"row_key1") + row_set1.add_row_key(b"row_key2") + row_set1.add_row_key(b"row_key3") + row_set2.add_row_key(b"row_key1") + row_set2.add_row_key(b"row_key2") + row_set2.add_row_key(b"row_key4") + + self.assertNotEqual(row_set1, row_set2) + + def test__eq__row_ranges_differ(self): + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key14", b"row_key19") + row_range3 = RowRange(b"row_key24", b"row_key29") + + row_set1 = self._make_one() + row_set2 = self._make_one() + + row_set1.add_row_range(row_range1) + row_set1.add_row_range(row_range2) + row_set1.add_row_range(row_range3) + row_set2.add_row_range(row_range1) + row_set2.add_row_range(row_range2) + + self.assertNotEqual(row_set1, row_set2) + + def test__ne__(self): + row_key1 = b"row_key1" + row_key2 = b"row_key1" + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key5", b"row_key9") + + row_set1 = self._make_one() + row_set2 = self._make_one() + + row_set1.add_row_key(row_key1) + row_set2.add_row_key(row_key2) + row_set1.add_row_range(row_range1) + row_set2.add_row_range(row_range2) + + self.assertNotEqual(row_set1, row_set2) + + def test__ne__same_value(self): + row_key1 = b"row_key1" + row_key2 = b"row_key1" + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key4", b"row_key9") + + row_set1 = self._make_one() + row_set2 = self._make_one() + + row_set1.add_row_key(row_key1) + row_set2.add_row_key(row_key2) + row_set1.add_row_range(row_range1) + row_set2.add_row_range(row_range2) + + comparison_val = (row_set1 != row_set2) + self.assertFalse(comparison_val) + def test_add_row_key(self): row_set = self._make_one() row_set.add_row_key("row_key1") @@ -92,6 +201,56 @@ def test_constructor(self): self.assertTrue(row_range.start_inclusive) self.assertFalse(row_range.end_inclusive) + def test___hash__set_equality(self): + row_range1 = self._make_one('row_key1', 'row_key9') + row_range2 = self._make_one('row_key1', 'row_key9') + set_one = {row_range1, row_range2} + set_two = {row_range1, row_range2} + self.assertEqual(set_one, set_two) + + def test___hash__not_equals(self): + row_range1 = self._make_one('row_key1', 'row_key9') + row_range2 = self._make_one('row_key1', 'row_key19') + set_one = {row_range1} + set_two = {row_range2} + self.assertNotEqual(set_one, set_two) + + def test__eq__(self): + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = self._make_one(start_key, end_key, + True, False) + row_range2 = self._make_one(start_key, end_key, + True, False) + self.assertEqual(row_range1, row_range2) + + def test___eq__type_differ(self): + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = self._make_one(start_key, end_key, + True, False) + row_range2 = object() + self.assertNotEqual(row_range1, row_range2) + + def test__ne__(self): + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = self._make_one(start_key, end_key, + True, False) + row_range2 = self._make_one(start_key, end_key, + False, True) + self.assertNotEqual(row_range1, row_range2) + + def test__ne__same_value(self): + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = self._make_one(start_key, end_key, + True, False) + row_range2 = self._make_one(start_key, end_key, + True, False) + comparison_val = (row_range1 != row_range2) + self.assertFalse(comparison_val) + def test_get_range_kwargs_closed_open(self): start_key = b"row_key1" end_key = b"row_key9" From cc695935af800e9b9ac06b486d1df30a6422d6ff Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 21 Sep 2018 11:39:45 -0400 Subject: [PATCH 184/892] Bigtable: prep docs for repo split. (#6014) - Move 'docs/bigtable' -> 'bigtable/docs', leaving a symlink. - Harmonize / DRY 'bigtable/README.rst' and 'bigtable/docs/index.rst'. - Remove generated GAPIC docs (not part of the surface). --- packages/google-cloud-bigtable/README.rst | 103 ++++-- .../google-cloud-bigtable/docs/changelog.md | 1 + .../docs/client-intro.rst | 90 +++++ .../google-cloud-bigtable/docs/client.rst | 6 + .../google-cloud-bigtable/docs/cluster.rst | 6 + .../docs/column-family.rst | 49 +++ .../google-cloud-bigtable/docs/data-api.rst | 344 ++++++++++++++++++ .../docs/gapic/v2/api.rst | 6 - .../docs/gapic/v2/types.rst | 5 - packages/google-cloud-bigtable/docs/index.rst | 90 +---- .../docs/instance-api.rst | 135 +++++++ .../google-cloud-bigtable/docs/instance.rst | 6 + .../google-cloud-bigtable/docs/row-data.rst | 6 + .../docs/row-filters.rst | 67 ++++ packages/google-cloud-bigtable/docs/row.rst | 7 + .../google-cloud-bigtable/docs/table-api.rst | 154 ++++++++ packages/google-cloud-bigtable/docs/table.rst | 6 + packages/google-cloud-bigtable/docs/usage.rst | 28 ++ 18 files changed, 993 insertions(+), 116 deletions(-) create mode 120000 packages/google-cloud-bigtable/docs/changelog.md create mode 100644 packages/google-cloud-bigtable/docs/client-intro.rst create mode 100644 packages/google-cloud-bigtable/docs/client.rst create mode 100644 packages/google-cloud-bigtable/docs/cluster.rst create mode 100644 packages/google-cloud-bigtable/docs/column-family.rst create mode 100644 packages/google-cloud-bigtable/docs/data-api.rst delete mode 100644 packages/google-cloud-bigtable/docs/gapic/v2/api.rst delete mode 100644 packages/google-cloud-bigtable/docs/gapic/v2/types.rst create mode 100644 packages/google-cloud-bigtable/docs/instance-api.rst create mode 100644 packages/google-cloud-bigtable/docs/instance.rst create mode 100644 packages/google-cloud-bigtable/docs/row-data.rst create mode 100644 packages/google-cloud-bigtable/docs/row-filters.rst create mode 100644 packages/google-cloud-bigtable/docs/row.rst create mode 100644 packages/google-cloud-bigtable/docs/table-api.rst create mode 100644 packages/google-cloud-bigtable/docs/table.rst create mode 100644 packages/google-cloud-bigtable/docs/usage.rst diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 48df63d52069..c93c7e7be327 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -1,53 +1,90 @@ -Python Client for Google Cloud Bigtable -======================================= +Python Client for Google Cloud Bigtable (`Alpha`_) +================================================== - Python idiomatic client for `Google Cloud Bigtable`_ +|pypi| |versions| -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ -|pypi| |versions| +`Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the +same database that powers many core Google services, including Search, +Analytics, Maps, and Gmail. -- `Documentation`_ +- `Client Library Documentation`_ +- `Product Documentation`_ -.. _Documentation: https://googlecloudplatform.github.io/google-cloud-python/latest/bigtable/usage.html +.. _Alpha: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst +.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg + :target: https://pypi.org/project/google-cloud-bigtable/ +.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg + :target: https://pypi.org/project/google-cloud-bigtable/ +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable +.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/latest/bigtable/usage.html +.. _Product Documentation: https://cloud.google.com/bigtable/docs Quick Start ----------- -.. code-block:: console +In order to use this library, you first need to go through the following steps: - $ pip install --upgrade google-cloud-bigtable +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. `Enable the Cloud Bigtable API.`_ +4. `Setup Authentication.`_ -For more information on setting up your Python development environment, -such as installing ``pip`` and ``virtualenv`` on your system, please refer -to `Python Development Environment Setup Guide`_ for Google Cloud Platform. +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable +.. _Setup Authentication.: https://googlecloudplatform.github.io/google-cloud-python/latest/core/auth.html -.. _Python Development Environment Setup Guide: https://cloud.google.com/python/setup +Installation +~~~~~~~~~~~~ -Authentication --------------- +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. -With ``google-cloud-python`` we try to make authentication as painless as -possible. Check out the `Authentication section`_ in our documentation to -learn more. You may also find the `authentication document`_ shared by all -the ``google-cloud-*`` libraries to be helpful. +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. -.. _Authentication section: https://google-cloud-python.readthedocs.io/en/latest/core/auth.html -.. _authentication document: https://github.com/GoogleCloudPlatform/google-cloud-common/tree/master/authentication +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ -Using the API -------------- -Cloud `Bigtable`_ is Google's NoSQL Big Data database service. It's the same -database that powers many core Google services, including Search, -Analytics, Maps, and Gmail. +Mac/Linux +^^^^^^^^^ -.. _Bigtable: https://cloud.google.com/bigtable/docs/ +.. code-block:: console -See the ``google-cloud-python`` API Bigtable `Documentation`_ to learn -how to manage your data in Bigtable tables. + pip install virtualenv + virtualenv + source /bin/activate + /bin/pip install google-cloud-bigtable -.. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg - :target: https://pypi.org/project/google-cloud-bigtable/ -.. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg - :target: https://pypi.org/project/google-cloud-bigtable/ + +Windows +^^^^^^^ + +.. code-block:: console + + pip install virtualenv + virtualenv + \Scripts\activate + \Scripts\pip.exe install google-cloud-bigtable + +Next Steps +~~~~~~~~~~ + +- Read the `Client Library Documentation`_ for Cloud Bigtable API + to see other available methods on the client. +- Read the `Product documentation`_ to learn + more about the product and see How-to Guides. + +``google-cloud-happybase`` +-------------------------- + +In addition to the core ``google-cloud-bigtable``, we provide a +`google-cloud-happybase +`__ library +with the same interface as the popular `HappyBase +`__ library. Unlike HappyBase, +``google-cloud-happybase`` uses ``google-cloud-bigtable`` under the covers, +rather than Apache HBase. diff --git a/packages/google-cloud-bigtable/docs/changelog.md b/packages/google-cloud-bigtable/docs/changelog.md new file mode 120000 index 000000000000..04c99a55caae --- /dev/null +++ b/packages/google-cloud-bigtable/docs/changelog.md @@ -0,0 +1 @@ +../CHANGELOG.md \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/client-intro.rst b/packages/google-cloud-bigtable/docs/client-intro.rst new file mode 100644 index 000000000000..cb31767f3c26 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/client-intro.rst @@ -0,0 +1,90 @@ +Base for Everything +=================== + +To use the API, the :class:`Client ` +class defines a high-level interface which handles authorization +and creating other objects: + +.. code:: python + + from google.cloud.bigtable.client import Client + client = Client() + +Long-lived Defaults +------------------- + +When creating a :class:`Client `, the +``user_agent`` argument has sensible a default +(:data:`DEFAULT_USER_AGENT `). +However, you may over-ride it and the value will be used throughout all API +requests made with the ``client`` you create. + +Configuration +------------- + +- For an overview of authentication in ``google-cloud-python``, + see :doc:`/core/auth`. + +- In addition to any authentication configuration, you can also set the + :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the Google Cloud Console + project you'd like to interact with. If your code is running in Google App + Engine or Google Compute Engine the project will be detected automatically. + (Setting this environment variable is not required, you may instead pass the + ``project`` explicitly when constructing a + :class:`Client `). + +- After configuring your environment, create a + :class:`Client ` + + .. code:: + + >>> from google.cloud import bigtable + >>> client = bigtable.Client() + + or pass in ``credentials`` and ``project`` explicitly + + .. code:: + + >>> from google.cloud import bigtable + >>> client = bigtable.Client(project='my-project', credentials=creds) + +.. tip:: + + Be sure to use the **Project ID**, not the **Project Number**. + +Admin API Access +---------------- + +If you'll be using your client to make `Instance Admin`_ and `Table Admin`_ +API requests, you'll need to pass the ``admin`` argument: + +.. code:: python + + client = bigtable.Client(admin=True) + +Read-Only Mode +-------------- + +If, on the other hand, you only have (or want) read access to the data, +you can pass the ``read_only`` argument: + +.. code:: python + + client = bigtable.Client(read_only=True) + +This will ensure that the +:data:`READ_ONLY_SCOPE ` is used +for API requests (so any accidental requests that would modify data will +fail). + +Next Step +--------- + +After a :class:`Client `, the next highest-level +object is an :class:`Instance `. You'll need +one before you can interact with tables or data. + +Head next to learn about the :doc:`instance-api`. + +.. _Instance Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1 +.. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 diff --git a/packages/google-cloud-bigtable/docs/client.rst b/packages/google-cloud-bigtable/docs/client.rst new file mode 100644 index 000000000000..c48595c8ac0b --- /dev/null +++ b/packages/google-cloud-bigtable/docs/client.rst @@ -0,0 +1,6 @@ +Client +~~~~~~ + +.. automodule:: google.cloud.bigtable.client + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/cluster.rst b/packages/google-cloud-bigtable/docs/cluster.rst new file mode 100644 index 000000000000..ad33aae5e0b8 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/cluster.rst @@ -0,0 +1,6 @@ +Cluster +~~~~~~~ + +.. automodule:: google.cloud.bigtable.cluster + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/column-family.rst b/packages/google-cloud-bigtable/docs/column-family.rst new file mode 100644 index 000000000000..de6c1eb1f5df --- /dev/null +++ b/packages/google-cloud-bigtable/docs/column-family.rst @@ -0,0 +1,49 @@ +Column Families +=============== + +When creating a +:class:`ColumnFamily `, it is +possible to set garbage collection rules for expired data. + +By setting a rule, cells in the table matching the rule will be deleted +during periodic garbage collection (which executes opportunistically in the +background). + +The types +:class:`MaxAgeGCRule `, +:class:`MaxVersionsGCRule `, +:class:`GarbageCollectionRuleUnion ` and +:class:`GarbageCollectionRuleIntersection ` +can all be used as the optional ``gc_rule`` argument in the +:class:`ColumnFamily ` +constructor. This value is then used in the +:meth:`create() ` and +:meth:`update() ` methods. + +These rules can be nested arbitrarily, with a +:class:`MaxAgeGCRule ` or +:class:`MaxVersionsGCRule ` +at the lowest level of the nesting: + +.. code:: python + + import datetime + + max_age = datetime.timedelta(days=3) + rule1 = MaxAgeGCRule(max_age) + rule2 = MaxVersionsGCRule(1) + + # Make a composite that matches anything older than 3 days **AND** + # with more than 1 version. + rule3 = GarbageCollectionIntersection(rules=[rule1, rule2]) + + # Make another composite that matches our previous intersection + # **OR** anything that has more than 3 versions. + rule4 = GarbageCollectionRule(max_num_versions=3) + rule5 = GarbageCollectionUnion(rules=[rule3, rule4]) + +---- + +.. automodule:: google.cloud.bigtable.column_family + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/data-api.rst b/packages/google-cloud-bigtable/docs/data-api.rst new file mode 100644 index 000000000000..d35b50079426 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data-api.rst @@ -0,0 +1,344 @@ +Data API +======== + +After creating a :class:`Table ` and some +column families, you are ready to store and retrieve data. + +Cells vs. Columns vs. Column Families ++++++++++++++++++++++++++++++++++++++ + +* As explained in the :doc:`table overview `, tables can + have many column families. +* As described below, a table can also have many rows which are + specified by row keys. +* Within a row, data is stored in a cell. A cell simply has a value (as + bytes) and a timestamp. The number of cells in each row can be + different, depending on what was stored in each row. +* Each cell lies in a column (**not** a column family). A column is really + just a more **specific** modifier within a column family. A column + can be present in every column family, in only one or anywhere in between. +* Within a column family there can be many columns. For example, within + the column family ``foo`` we could have columns ``bar`` and ``baz``. + These would typically be represented as ``foo:bar`` and ``foo:baz``. + +Modifying Data +++++++++++++++ + +Since data is stored in cells, which are stored in rows, we +use the metaphor of a **row** in classes that are used to modify +(write, update, delete) data in a +:class:`Table `. + +Direct vs. Conditional vs. Append +--------------------------------- + +There are three ways to modify data in a table, described by the +`MutateRow`_, `CheckAndMutateRow`_ and `ReadModifyWriteRow`_ API +methods. + +* The **direct** way is via `MutateRow`_ which involves simply + adding, overwriting or deleting cells. The + :class:`DirectRow ` class + handles direct mutations. +* The **conditional** way is via `CheckAndMutateRow`_. This method + first checks if some filter is matched in a given row, then + applies one of two sets of mutations, depending on if a match + occurred or not. (These mutation sets are called the "true + mutations" and "false mutations".) The + :class:`ConditionalRow ` class + handles conditional mutations. +* The **append** way is via `ReadModifyWriteRow`_. This simply + appends (as bytes) or increments (as an integer) data in a presumed + existing cell in a row. The + :class:`AppendRow ` class + handles append mutations. + +Row Factory +----------- + +A single factory can be used to create any of the three row types. +To create a :class:`DirectRow `: + +.. code:: python + + row = table.row(row_key) + +Unlike the previous string values we've used before, the row key must +be ``bytes``. + +To create a :class:`ConditionalRow `, +first create a :class:`RowFilter ` and +then + +.. code:: python + + cond_row = table.row(row_key, filter_=filter_) + +To create an :class:`AppendRow ` + +.. code:: python + + append_row = table.row(row_key, append=True) + +Building Up Mutations +--------------------- + +In all three cases, a set of mutations (or two sets) are built up +on a row before they are sent of in a batch via + +.. code:: python + + row.commit() + +Direct Mutations +---------------- + +Direct mutations can be added via one of four methods + +* :meth:`set_cell() ` allows a + single value to be written to a column + + .. code:: python + + row.set_cell(column_family_id, column, value, + timestamp=timestamp) + + If the ``timestamp`` is omitted, the current time on the Google Cloud + Bigtable server will be used when the cell is stored. + + The value can either be bytes or an integer, which will be converted to + bytes as a signed 64-bit integer. + +* :meth:`delete_cell() ` deletes + all cells (i.e. for all timestamps) in a given column + + .. code:: python + + row.delete_cell(column_family_id, column) + + Remember, this only happens in the ``row`` we are using. + + If we only want to delete cells from a limited range of time, a + :class:`TimestampRange ` can + be used + + .. code:: python + + row.delete_cell(column_family_id, column, + time_range=time_range) + +* :meth:`delete_cells() ` does + the same thing as + :meth:`delete_cell() `, + but accepts a list of columns in a column family rather than a single one. + + .. code:: python + + row.delete_cells(column_family_id, [column1, column2], + time_range=time_range) + + In addition, if we want to delete cells from every column in a column family, + the special :attr:`ALL_COLUMNS ` + value can be used + + .. code:: python + + row.delete_cells(column_family_id, row.ALL_COLUMNS, + time_range=time_range) + +* :meth:`delete() ` will delete the + entire row + + .. code:: python + + row.delete() + +Conditional Mutations +--------------------- + +Making **conditional** modifications is essentially identical +to **direct** modifications: it uses the exact same methods +to accumulate mutations. + +However, each mutation added must specify a ``state``: will the mutation be +applied if the filter matches or if it fails to match. + +For example: + +.. code:: python + + cond_row.set_cell(column_family_id, column, value, + timestamp=timestamp, state=True) + +will add to the set of true mutations. + +Append Mutations +---------------- + +Append mutations can be added via one of two methods + +* :meth:`append_cell_value() ` + appends a bytes value to an existing cell: + + .. code:: python + + append_row.append_cell_value(column_family_id, column, bytes_value) + +* :meth:`increment_cell_value() ` + increments an integer value in an existing cell: + + .. code:: python + + append_row.increment_cell_value(column_family_id, column, int_value) + + Since only bytes are stored in a cell, the cell value is decoded as + a signed 64-bit integer before being incremented. (This happens on + the Google Cloud Bigtable server, not in the library.) + +Notice that no timestamp was specified. This is because **append** mutations +operate on the latest value of the specified column. + +If there are no cells in the specified column, then the empty string (bytes +case) or zero (integer case) are the assumed values. + +Starting Fresh +-------------- + +If accumulated mutations need to be dropped, use + +.. code:: python + + row.clear() + +Reading Data +++++++++++++ + +Read Single Row from a Table +---------------------------- + +To make a `ReadRows`_ API request for a single row key, use +:meth:`Table.read_row() `: + +.. code:: python + + >>> row_data = table.read_row(row_key) + >>> row_data.cells + { + u'fam1': { + b'col1': [ + , + , + ], + b'col2': [ + , + ], + }, + u'fam2': { + b'col3': [ + , + , + , + ], + }, + } + >>> cell = row_data.cells[u'fam1'][b'col1'][0] + >>> cell + + >>> cell.value + b'val1' + >>> cell.timestamp + datetime.datetime(2016, 2, 27, 3, 41, 18, 122823, tzinfo=) + +Rather than returning a :class:`DirectRow ` +or similar class, this method returns a +:class:`PartialRowData ` +instance. This class is used for reading and parsing data rather than for +modifying data (as :class:`DirectRow ` is). + +A filter can also be applied to the results: + +.. code:: python + + row_data = table.read_row(row_key, filter_=filter_val) + +The allowable ``filter_`` values are the same as those used for a +:class:`ConditionalRow `. For +more information, see the +:meth:`Table.read_row() ` documentation. + +Stream Many Rows from a Table +----------------------------- + +To make a `ReadRows`_ API request for a stream of rows, use +:meth:`Table.read_rows() `: + +.. code:: python + + row_data = table.read_rows() + +Using gRPC over HTTP/2, a continual stream of responses will be delivered. +In particular + +* :meth:`consume_next() ` + pulls the next result from the stream, parses it and stores it on the + :class:`PartialRowsData ` instance +* :meth:`consume_all() ` + pulls results from the stream until there are no more +* :meth:`cancel() ` closes + the stream + +See the :class:`PartialRowsData ` +documentation for more information. + +As with +:meth:`Table.read_row() `, an optional +``filter_`` can be applied. In addition a ``start_key`` and / or ``end_key`` +can be supplied for the stream, a ``limit`` can be set and a boolean +``allow_row_interleaving`` can be specified to allow faster streamed results +at the potential cost of non-sequential reads. + +See the :meth:`Table.read_rows() ` +documentation for more information on the optional arguments. + +Sample Keys in a Table +---------------------- + +Make a `SampleRowKeys`_ API request with +:meth:`Table.sample_row_keys() `: + +.. code:: python + + keys_iterator = table.sample_row_keys() + +The returned row keys will delimit contiguous sections of the table of +approximately equal size, which can be used to break up the data for +distributed tasks like mapreduces. + +As with +:meth:`Table.read_rows() `, the +returned ``keys_iterator`` is connected to a cancellable HTTP/2 stream. + +The next key in the result can be accessed via + +.. code:: python + + next_key = keys_iterator.next() + +or all keys can be iterated over via + +.. code:: python + + for curr_key in keys_iterator: + do_something(curr_key) + +Just as with reading, the stream can be canceled: + +.. code:: python + + keys_iterator.cancel() + +.. _ReadRows: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L36-L38 +.. _SampleRowKeys: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L44-L46 +.. _MutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L50-L52 +.. _CheckAndMutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L62-L64 +.. _ReadModifyWriteRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L70-L72 diff --git a/packages/google-cloud-bigtable/docs/gapic/v2/api.rst b/packages/google-cloud-bigtable/docs/gapic/v2/api.rst deleted file mode 100644 index 3546c5633915..000000000000 --- a/packages/google-cloud-bigtable/docs/gapic/v2/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud Bigtable API -============================= - -.. automodule:: google.cloud.bigtable_v2 - :members: - :inherited-members: \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/gapic/v2/types.rst b/packages/google-cloud-bigtable/docs/gapic/v2/types.rst deleted file mode 100644 index c1d98f25119b..000000000000 --- a/packages/google-cloud-bigtable/docs/gapic/v2/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud Bigtable API Client -=================================== - -.. automodule:: google.cloud.bigtable_v2.types - :members: \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 11906f60d979..89277952bf29 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -1,84 +1,30 @@ -Python Client for Cloud Bigtable API (`Alpha`_) -=============================================== +.. include:: /../bigtable/README.rst -`Cloud Bigtable API`_: API for reading and writing the contents of Bigtables associated with a -cloud project. -- `Client Library Documentation`_ -- `Product Documentation`_ - -.. _Alpha: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst -.. _Cloud Bigtable API: https://cloud.google.com/bigtable -.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/latest/bigtable/usage.html -.. _Product Documentation: https://cloud.google.com/bigtable - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. `Enable the Cloud Bigtable API.`_ -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable -.. _Setup Authentication.: https://googlecloudplatform.github.io/google-cloud-python/latest/core/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - pip install virtualenv - virtualenv - source /bin/activate - /bin/pip install google-cloud-bigtable +Using the API +------------- +.. toctree:: + :maxdepth: 2 + usage -Windows -^^^^^^^ -.. code-block:: console +API Reference +------------- +.. toctree:: + :maxdepth: 2 - pip install virtualenv - virtualenv - \Scripts\activate - \Scripts\pip.exe install google-cloud-bigtable + instance-api + table-api + data-api -Next Steps -~~~~~~~~~~ -- Read the `Client Library Documentation`_ for Cloud Bigtable API - API to see other available methods on the client. -- Read the `Cloud Bigtable API Product documentation`_ to learn - more about the product and see How-to Guides. -- View this `repository’s main README`_ to see the full list of Cloud - APIs that we cover. +Changelog +--------- -.. _Cloud Bigtable API Product documentation: https://cloud.google.com/bigtable -.. _repository’s main README: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst +For a list of all ``google-cloud-datastore`` releases: -Api Reference -------------- .. toctree:: - :maxdepth: 2 + :maxdepth: 2 - gapic/v2/api - gapic/v2/types + changelog diff --git a/packages/google-cloud-bigtable/docs/instance-api.rst b/packages/google-cloud-bigtable/docs/instance-api.rst new file mode 100644 index 000000000000..bc338d7c7ca9 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/instance-api.rst @@ -0,0 +1,135 @@ +Instance Admin API +================== + +After creating a :class:`Client `, you can +interact with individual instances for a project. + +List Instances +-------------- + +If you want a comprehensive list of all existing instances, make a +`ListInstances`_ API request with +:meth:`Client.list_instances() `: + +.. code:: python + + instances = client.list_instances() + +Instance Factory +---------------- + +To create an :class:`Instance ` object: + +.. code:: python + + instance = client.instance(instance_id, location_id, + display_name=display_name) + +- ``location_id`` is the ID of the location in which the instance's cluster + will be hosted, e.g. ``'us-central1-c'``. ``location_id`` is required for + instances which do not already exist. + +- ``display_name`` is optional. When not provided, ``display_name`` defaults + to the ``instance_id`` value. + +You can also use :meth:`Client.instance` to create a local wrapper for +instances that have already been created with the API, or through the web +console: + +.. code:: python + + instance = client.instance(existing_instance_id) + instance.reload() + +Create a new Instance +--------------------- + +After creating the instance object, make a `CreateInstance`_ API request +with :meth:`create() `: + +.. code:: python + + instance.display_name = 'My very own instance' + instance.create() + +Check on Current Operation +-------------------------- + +.. note:: + + When modifying an instance (via a `CreateInstance`_ request), the Bigtable + API will return a `long-running operation`_ and a corresponding + :class:`Operation ` object + will be returned by + :meth:`create() `. + +You can check if a long-running operation (for a +:meth:`create() ` has finished +by making a `GetOperation`_ request with +:meth:`Operation.finished() `: + +.. code:: python + + >>> operation = instance.create() + >>> operation.finished() + True + +.. note:: + + Once an :class:`Operation ` object + has returned :data:`True` from + :meth:`finished() `, the + object should not be re-used. Subsequent calls to + :meth:`finished() ` + will result in a :class:`ValueError `. + +Get metadata for an existing Instance +------------------------------------- + +After creating the instance object, make a `GetInstance`_ API request +with :meth:`reload() `: + +.. code:: python + + instance.reload() + +This will load ``display_name`` for the existing ``instance`` object. + +Update an existing Instance +--------------------------- + +After creating the instance object, make an `UpdateInstance`_ API request +with :meth:`update() `: + +.. code:: python + + client.display_name = 'New display_name' + instance.update() + +Delete an existing Instance +--------------------------- + +Make a `DeleteInstance`_ API request with +:meth:`delete() `: + +.. code:: python + + instance.delete() + +Next Step +--------- + +Now we go down the hierarchy from +:class:`Instance ` to a +:class:`Table `. + +Head next to learn about the :doc:`table-api`. + +.. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance +.. _CreateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L66-L68 +.. _GetInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L38-L40 +.. _UpdateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L93-L95 +.. _DeleteInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L109-L111 +.. _ListInstances: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L44-L46 +.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 +.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/packages/google-cloud-bigtable/docs/instance.rst b/packages/google-cloud-bigtable/docs/instance.rst new file mode 100644 index 000000000000..f9be9672fc64 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/instance.rst @@ -0,0 +1,6 @@ +Instance +~~~~~~~~ + +.. automodule:: google.cloud.bigtable.instance + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/row-data.rst b/packages/google-cloud-bigtable/docs/row-data.rst new file mode 100644 index 000000000000..503f9b1cbdfd --- /dev/null +++ b/packages/google-cloud-bigtable/docs/row-data.rst @@ -0,0 +1,6 @@ +Row Data +~~~~~~~~ + +.. automodule:: google.cloud.bigtable.row_data + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/row-filters.rst b/packages/google-cloud-bigtable/docs/row-filters.rst new file mode 100644 index 000000000000..292ae9dfb6aa --- /dev/null +++ b/packages/google-cloud-bigtable/docs/row-filters.rst @@ -0,0 +1,67 @@ +Bigtable Row Filters +==================== + +It is possible to use a +:class:`RowFilter ` +when adding mutations to a +:class:`ConditionalRow ` and when +reading row data with :meth:`read_row() ` +or :meth:`read_rows() `. + +As laid out in the `RowFilter definition`_, the following basic filters +are provided: + +* :class:`SinkFilter <.row_filters.SinkFilter>` +* :class:`PassAllFilter <.row_filters.PassAllFilter>` +* :class:`BlockAllFilter <.row_filters.BlockAllFilter>` +* :class:`RowKeyRegexFilter <.row_filters.RowKeyRegexFilter>` +* :class:`RowSampleFilter <.row_filters.RowSampleFilter>` +* :class:`FamilyNameRegexFilter <.row_filters.FamilyNameRegexFilter>` +* :class:`ColumnQualifierRegexFilter <.row_filters.ColumnQualifierRegexFilter>` +* :class:`TimestampRangeFilter <.row_filters.TimestampRangeFilter>` +* :class:`ColumnRangeFilter <.row_filters.ColumnRangeFilter>` +* :class:`ValueRegexFilter <.row_filters.ValueRegexFilter>` +* :class:`ValueRangeFilter <.row_filters.ValueRangeFilter>` +* :class:`CellsRowOffsetFilter <.row_filters.CellsRowOffsetFilter>` +* :class:`CellsRowLimitFilter <.row_filters.CellsRowLimitFilter>` +* :class:`CellsColumnLimitFilter <.row_filters.CellsColumnLimitFilter>` +* :class:`StripValueTransformerFilter <.row_filters.StripValueTransformerFilter>` +* :class:`ApplyLabelFilter <.row_filters.ApplyLabelFilter>` + +In addition, these filters can be combined into composite filters with + +* :class:`RowFilterChain <.row_filters.RowFilterChain>` +* :class:`RowFilterUnion <.row_filters.RowFilterUnion>` +* :class:`ConditionalRowFilter <.row_filters.ConditionalRowFilter>` + +These rules can be nested arbitrarily, with a basic filter at the lowest +level. For example: + +.. code:: python + + # Filter in a specified column (matching any column family). + col1_filter = ColumnQualifierRegexFilter(b'columnbia') + + # Create a filter to label results. + label1 = u'label-red' + label1_filter = ApplyLabelFilter(label1) + + # Combine the filters to label all the cells in columnbia. + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Create a similar filter to label cells blue. + col2_filter = ColumnQualifierRegexFilter(b'columnseeya') + label2 = u'label-blue' + label2_filter = ApplyLabelFilter(label2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + +---- + +.. automodule:: google.cloud.bigtable.row_filters + :members: + :show-inheritance: + +.. _RowFilter definition: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/1ff247c2e3b7cd0a2dd49071b2d95beaf6563092/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_data.proto#L195 diff --git a/packages/google-cloud-bigtable/docs/row.rst b/packages/google-cloud-bigtable/docs/row.rst new file mode 100644 index 000000000000..33686608b363 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/row.rst @@ -0,0 +1,7 @@ +Bigtable Row +============ + +.. automodule:: google.cloud.bigtable.row + :members: + :show-inheritance: + :inherited-members: diff --git a/packages/google-cloud-bigtable/docs/table-api.rst b/packages/google-cloud-bigtable/docs/table-api.rst new file mode 100644 index 000000000000..5168aad49ff7 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/table-api.rst @@ -0,0 +1,154 @@ +Table Admin API +=============== + +After creating an :class:`Instance `, you can +interact with individual tables, groups of tables or column families within +a table. + +List Tables +----------- + +If you want a comprehensive list of all existing tables in a instance, make a +`ListTables`_ API request with +:meth:`Instance.list_tables() `: + +.. code:: python + + >>> instance.list_tables() + [, + ] + +Table Factory +------------- + +To create a :class:`Table ` object: + +.. code:: python + + table = instance.table(table_id) + +Even if this :class:`Table ` already +has been created with the API, you'll want this object to use as a +parent of a :class:`ColumnFamily ` +or :class:`Row `. + +Create a new Table +------------------ + +After creating the table object, make a `CreateTable`_ API request +with :meth:`create() `: + +.. code:: python + + table.create() + +If you would like to initially split the table into several tablets (tablets are +similar to HBase regions): + +.. code:: python + + table.create(initial_split_keys=['s1', 's2']) + +Delete an existing Table +------------------------ + +Make a `DeleteTable`_ API request with +:meth:`delete() `: + +.. code:: python + + table.delete() + +List Column Families in a Table +------------------------------- + +Though there is no **official** method for retrieving `column families`_ +associated with a table, the `GetTable`_ API method returns a +table object with the names of the column families. + +To retrieve the list of column families use +:meth:`list_column_families() `: + +.. code:: python + + column_families = table.list_column_families() + +Column Family Factory +--------------------- + +To create a +:class:`ColumnFamily ` object: + +.. code:: python + + column_family = table.column_family(column_family_id) + +There is no real reason to use this factory unless you intend to +create or delete a column family. + +In addition, you can specify an optional ``gc_rule`` (a +:class:`GarbageCollectionRule ` +or similar): + +.. code:: python + + column_family = table.column_family(column_family_id, + gc_rule=gc_rule) + +This rule helps the backend determine when and how to clean up old cells +in the column family. + +See :doc:`column-family` for more information about +:class:`GarbageCollectionRule ` +and related classes. + +Create a new Column Family +-------------------------- + +After creating the column family object, make a `CreateColumnFamily`_ API +request with +:meth:`ColumnFamily.create() ` + +.. code:: python + + column_family.create() + +Delete an existing Column Family +-------------------------------- + +Make a `DeleteColumnFamily`_ API request with +:meth:`ColumnFamily.delete() ` + +.. code:: python + + column_family.delete() + +Update an existing Column Family +-------------------------------- + +Make an `UpdateColumnFamily`_ API request with +:meth:`ColumnFamily.delete() ` + +.. code:: python + + column_family.update() + +Next Step +--------- + +Now we go down the final step of the hierarchy from +:class:`Table ` to +:class:`Row ` as well as streaming +data directly via a :class:`Table `. + +Head next to learn about the :doc:`data-api`. + +.. _ListTables: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L40-L42 +.. _CreateTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L35-L37 +.. _DeleteTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L50-L52 +.. _RenameTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L56-L58 +.. _GetTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L45-L47 +.. _CreateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L61-L63 +.. _UpdateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L66-L68 +.. _DeleteColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L71-L73 +.. _column families: https://cloud.google.com/bigtable/docs/schema-design#column_families_and_column_qualifiers diff --git a/packages/google-cloud-bigtable/docs/table.rst b/packages/google-cloud-bigtable/docs/table.rst new file mode 100644 index 000000000000..c230725d1351 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/table.rst @@ -0,0 +1,6 @@ +Table +~~~~~ + +.. automodule:: google.cloud.bigtable.table + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst new file mode 100644 index 000000000000..aa8d899d58cb --- /dev/null +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -0,0 +1,28 @@ +Using the API +============= + +.. toctree:: + :maxdepth: 2 + + client-intro + client + cluster + instance + table + column-family + row + row-data + row-filters + + +In the hierarchy of API concepts + +* a :class:`Client ` owns an + :class:`Instance ` +* an :class:`Instance ` owns a + :class:`Table ` +* a :class:`Table ` owns a + :class:`ColumnFamily ` +* a :class:`Table ` owns a + :class:`Row ` + (and all the cells in the row) From cafe8dbf2996be41aac811340700d6b930579801 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 25 Sep 2018 19:02:25 -0400 Subject: [PATCH 185/892] Harden instance teardown against '429 Too Many Requests'. (#6102) Closes #4935. --- packages/google-cloud-bigtable/tests/system.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index ffe747b6d66e..737dc3fa0d39 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -18,6 +18,7 @@ import unittest +from google.api_core.exceptions import TooManyRequests from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import UTC @@ -79,6 +80,9 @@ def _retry_on_unavailable(exc): return exc.code() == StatusCode.UNAVAILABLE +retry_429 = RetryErrors(TooManyRequests) + + def setUpModule(): from google.cloud.exceptions import GrpcRendezvous @@ -111,7 +115,7 @@ def setUpModule(): def tearDownModule(): if not Config.IN_EMULATOR: - Config.INSTANCE.delete() + retry_429(Config.INSTANCE.delete)() class TestInstanceAdminAPI(unittest.TestCase): @@ -124,7 +128,7 @@ def setUp(self): def tearDown(self): for instance in self.instances_to_delete: - instance.delete() + retry_429(instance.delete)() def test_list_instances(self): instances, failed_locations = Config.CLIENT.list_instances() From dcb48781952f745382532f170a4e6399bfa29a71 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Fri, 28 Sep 2018 12:24:22 -0400 Subject: [PATCH 186/892] Preparing Cloud Bigtable for beta (#6129) --- packages/google-cloud-bigtable/README.rst | 4 ++-- packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index c93c7e7be327..76ddbb35c076 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -1,4 +1,4 @@ -Python Client for Google Cloud Bigtable (`Alpha`_) +Python Client for Google Cloud Bigtable (`Beta`_) ================================================== |pypi| |versions| @@ -11,7 +11,7 @@ Analytics, Maps, and Gmail. - `Client Library Documentation`_ - `Product Documentation`_ -.. _Alpha: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst +.. _Beta: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst .. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg :target: https://pypi.org/project/google-cloud-bigtable/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 4666a502f1b8..96df78be2a9d 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -27,7 +27,7 @@ # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' -release_status = 'Development Status :: 3 - Alpha' +release_status = 'Development Status :: 4 - Beta' dependencies = [ 'google-cloud-core<0.29dev,>=0.28.0', 'google-api-core[grpc]<2.0.0dev,>=0.1.1', From b134c1671191a97b5a65d0f1f21f811853dcdc62 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Fri, 5 Oct 2018 10:14:53 -0400 Subject: [PATCH 187/892] Bigtable: refactor 'read_row' to call 'read_rows' (#6137) --- .../google/cloud/bigtable/row_data.py | 3 + .../google/cloud/bigtable/table.py | 53 ++--------- .../tests/unit/test_row_data.py | 3 +- .../tests/unit/test_table.py | 87 ++++++++++--------- 4 files changed, 58 insertions(+), 88 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 510f28ca2b70..44a01bc0751b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -421,6 +421,9 @@ def __iter__(self): try: response = self._read_next_response() except StopIteration: + if self.state != self.NEW_ROW: + raise ValueError( + 'The row remains partial / is not committed.') break for chunk in response.chunks: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 53660a4546db..a164b3b58b4f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -289,32 +289,13 @@ def read_row(self, row_key, filter_=None): :raises: :class:`ValueError ` if a commit row chunk is never encountered. """ - request_pb = _create_row_request( - self.name, row_key=row_key, filter_=filter_, - app_profile_id=self._app_profile_id) - data_client = self._instance._client.table_data_client - if 'read_rows' not in data_client._inner_api_calls: - default_retry = data_client._method_configs['ReadRows'].retry - timeout = data_client._method_configs['ReadRows'].timeout - data_client._inner_api_calls['read_rows'] = \ - wrap_method( - data_client.transport.read_rows, - default_retry=default_retry, - default_timeout=timeout, - client_info=data_client._client_info, - ) - rows_data = PartialRowsData( - data_client._inner_api_calls['read_rows'], - request_pb) - - rows_data.consume_all() - if rows_data.state != rows_data.NEW_ROW: - raise ValueError('The row remains partial / is not committed.') - - if len(rows_data.rows) == 0: - return None - - return rows_data.rows[row_key] + row_set = RowSet() + row_set.add_row_key(row_key) + result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set)) + row = next(result_iter, None) + if next(result_iter, None) is not None: + raise ValueError('More than one row was returned.') + return row def read_rows(self, start_key=None, end_key=None, limit=None, filter_=None, end_inclusive=False, row_set=None): @@ -738,7 +719,7 @@ def __ne__(self, other): return not self == other -def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, +def _create_row_request(table_name, start_key=None, end_key=None, filter_=None, limit=None, end_inclusive=False, app_profile_id=None, row_set=None): """Creates a request to read rows in a table. @@ -746,9 +727,6 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, :type table_name: str :param table_name: The name of the table to read from. - :type row_key: bytes - :param row_key: (Optional) The key of a specific row to read from. - :type start_key: bytes :param start_key: (Optional) The beginning of a range of row keys to read from. The range will include ``start_key``. If @@ -782,18 +760,9 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. :raises: :class:`ValueError ` if both - ``row_key`` and one of ``start_key`` and ``end_key`` are set + ``row_set`` and one of ``start_key`` or ``end_key`` are set """ request_kwargs = {'table_name': table_name} - if (row_key is not None and - (start_key is not None or end_key is not None)): - raise ValueError('Row key and row range cannot be ' - 'set simultaneously') - - if (row_key is not None and row_set is not None): - raise ValueError('Row key and row set cannot be ' - 'set simultaneously') - if ((start_key is not None or end_key is not None) and row_set is not None): raise ValueError('Row range and row set cannot be ' @@ -808,10 +777,6 @@ def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) - if row_key is not None: - row_set = RowSet() - row_set.add_row_key(row_key) - if start_key is not None or end_key is not None: row_set = RowSet() row_set.add_row_range(RowRange(start_key, end_key, diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 5278d8eb98d8..98e682dc4537 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -970,7 +970,8 @@ def _incomplete_final_row(self, testcase_name): client._data_stub.ReadRows.side_effect = [iterator] request = object() prd = self._make_one(client._data_stub.ReadRows, request) - prd.consume_all() + with self.assertRaises(ValueError): + prd.consume_all() self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) expected_result = self._sort_flattend_cells( [result for result in results if not result['error']]) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 208139710635..b9dea7e5dab0 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -462,13 +462,15 @@ def test_get_cluster_states(self): result = table.get_cluster_states() self.assertEqual(result, expected_result) - def _read_row_helper(self, chunks, expected_result, app_profile_id=None, - initialized_read_row=True): + def _read_row_helper(self, chunks, expected_result, app_profile_id=None): + from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT + from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import ( bigtable_table_admin_client) + from google.cloud.bigtable.row_filters import RowSampleFilter data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = mock.create_autospec( @@ -484,9 +486,8 @@ def _read_row_helper(self, chunks, expected_result, app_profile_id=None, request_pb = object() # Returned by our mock. mock_created = [] - def mock_create_row_request(table_name, row_key, filter_, - app_profile_id=app_profile_id): - mock_created.append((table_name, row_key, filter_, app_profile_id)) + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) return request_pb # Create response_iterator @@ -499,21 +500,23 @@ def mock_create_row_request(table_name, row_key, filter_, # Patch the stub used by the API method. client._table_data_client = data_api client._table_admin_client = table_api - - inner_api_calls = client._table_data_client._inner_api_calls - if initialized_read_row: - inner_api_calls['read_rows'] = mock.Mock( - side_effect=[response_iterator]) + client._table_data_client.transport.read_rows = mock.Mock( + side_effect=[response_iterator]) # Perform the method and check the result. - filter_obj = object() + filter_obj = RowSampleFilter(0.33) + result = None with _Monkey(MUT, _create_row_request=mock_create_row_request): result = table.read_row(self.ROW_KEY, filter_=filter_obj) - + row_set = RowSet() + row_set.add_row_key(self.ROW_KEY) + expected_request = [(table.name, { + 'end_inclusive': False, 'row_set': row_set, + 'app_profile_id': app_profile_id, 'end_key': None, + 'limit': None, 'start_key': None, 'filter_': filter_obj + })] self.assertEqual(result, expected_result) - self.assertEqual(mock_created, - [(table.name, self.ROW_KEY, filter_obj, - app_profile_id)]) + self.assertEqual(mock_created, expected_request) def test_read_row_miss_no__responses(self): self._read_row_helper(None, None) @@ -542,6 +545,29 @@ def test_read_row_complete(self): column.append(Cell.from_pb(chunk)) self._read_row_helper(chunks, expected_result, app_profile_id) + def test_read_row_more_than_one_row_returned(self): + app_profile_id = 'app-profile-id' + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_2, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True + ) + + chunks = [chunk_1, chunk_2] + with self.assertRaises(ValueError): + self._read_row_helper(chunks, None, app_profile_id) + def test_read_row_still_partial(self): chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -555,14 +581,6 @@ def test_read_row_still_partial(self): with self.assertRaises(ValueError): self._read_row_helper(chunks, None) - def test_read_row_no_inner_api(self): - chunks = [] - with mock.patch( - 'google.cloud.bigtable.table.wrap_method') as patched: - patched.return_value = mock.Mock( - return_value=iter(())) - self._read_row_helper(chunks, None, initialized_read_row=False) - def test_mutate_rows(self): from google.rpc.status_pb2 import Status from google.cloud.bigtable_admin_v2.gapic import ( @@ -1475,13 +1493,14 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): class Test__create_row_request(unittest.TestCase): - def _call_fut(self, table_name, row_key=None, start_key=None, end_key=None, + def _call_fut(self, table_name, start_key=None, end_key=None, filter_=None, limit=None, end_inclusive=False, app_profile_id=None, row_set=None): + from google.cloud.bigtable.table import _create_row_request return _create_row_request( - table_name, row_key=row_key, start_key=start_key, end_key=end_key, + table_name, start_key=start_key, end_key=end_key, filter_=filter_, limit=limit, end_inclusive=end_inclusive, app_profile_id=app_profile_id, row_set=row_set) @@ -1492,28 +1511,10 @@ def test_table_name_only(self): table_name=table_name) self.assertEqual(result, expected_result) - def test_row_key_row_range_conflict(self): - with self.assertRaises(ValueError): - self._call_fut(None, row_key=object(), end_key=object()) - - def test_row_key_row_set_conflict(self): - with self.assertRaises(ValueError): - self._call_fut(None, row_key=object(), row_set=object()) - def test_row_range_row_set_conflict(self): with self.assertRaises(ValueError): self._call_fut(None, end_key=object(), row_set=object()) - def test_row_key(self): - table_name = 'table_name' - row_key = b'row_key' - result = self._call_fut(table_name, row_key=row_key) - expected_result = _ReadRowsRequestPB( - table_name=table_name, - ) - expected_result.rows.row_keys.append(row_key) - self.assertEqual(result, expected_result) - def test_row_range_start_key(self): table_name = 'table_name' start_key = b'start_key' From 1ba8ec5bb7fb52d6c2307a57dacfe91686df9190 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Fri, 5 Oct 2018 11:38:05 -0400 Subject: [PATCH 188/892] Release bigtable 0.31.0 (#6166) --- packages/google-cloud-bigtable/CHANGELOG.md | 17 +++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index c8a3cd1d07a9..263f82231785 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,23 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.31.0 + +### New Features +- Upgrade support level from `alpha` to `beta`. ([#6129](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6129)) + +### Implementation Changes +- Improve admin operation timeouts. ([#6010](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6010)) + +### Documentation +- Prepare docs for repo split. ([#6014](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6014)) + +### Internal / Testing Changes +- Refactor 'read_row' to call 'read_rows' ([#6137](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102)) +- Harden instance teardown against '429 Too Many Requests'. ([#6102](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102)) +- Add `{RowSet,RowRange}.{__eq__,.__ne__}` ([#6025](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6025)) +- Regenerate low-level GAPIC code ([#6036](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6036)) + ## 0.30.2 ### New Features diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 96df78be2a9d..9b93abfb40e8 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.30.2' +version = '0.31.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 6d3e91e3f84ca86d8f82a00c843afa7906f265e7 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Wed, 10 Oct 2018 11:04:44 -0700 Subject: [PATCH 189/892] Use new Nox (#6175) --- .../{nox.py => noxfile.py} | 36 ++++--------------- 1 file changed, 7 insertions(+), 29 deletions(-) rename packages/google-cloud-bigtable/{nox.py => noxfile.py} (80%) diff --git a/packages/google-cloud-bigtable/nox.py b/packages/google-cloud-bigtable/noxfile.py similarity index 80% rename from packages/google-cloud-bigtable/nox.py rename to packages/google-cloud-bigtable/noxfile.py index 1e8ce157ee81..28738b0c42fb 100644 --- a/packages/google-cloud-bigtable/nox.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -24,7 +24,6 @@ ) -@nox.session def default(session): """Default unit test session. @@ -54,35 +53,20 @@ def default(session): ) -@nox.session -@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7']) -def unit(session, py): +@nox.session(python=['2.7', '3.5', '3.6', '3.7']) +def unit(session): """Run the unit test suite.""" - - # Run unit tests against all supported versions of Python. - session.interpreter = 'python{}'.format(py) - - # Set the virtualenv dirname. - session.virtualenv_dirname = 'unit-' + py - default(session) -@nox.session -@nox.parametrize('py', ['2.7', '3.7']) -def system(session, py): +@nox.session(python=['2.7', '3.7']) +def system(session): """Run the system test suite.""" # Sanity check: Only run system tests if the environment variable is set. if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): session.skip('Credentials must be set via environment variable.') - # Run the system tests against latest Python 2 and Python 3 only. - session.interpreter = 'python{}'.format(py) - - # Set the virtualenv dirname. - session.virtualenv_dirname = 'sys-' + py - # Use pre-release gRPC for system tests. session.install('--pre', 'grpcio') @@ -98,40 +82,34 @@ def system(session, py): session.run('py.test', '--quiet', 'tests/system.py', *session.posargs) -@nox.session +@nox.session(python='3.6') def lint(session): """Run linters. Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ - session.interpreter = 'python3.6' session.install('flake8', *LOCAL_DEPS) session.install('.') session.run('flake8', 'google', 'tests') -@nox.session +@nox.session(python='3.6') def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" - session.interpreter = 'python3.6' - - # Set the virtualenv dirname. - session.virtualenv_dirname = 'setup' session.install('docutils', 'Pygments') session.run( 'python', 'setup.py', 'check', '--restructuredtext', '--strict') -@nox.session +@nox.session(python='3.6') def cover(session): """Run the final coverage report. This outputs the coverage report aggregating coverage from the unit test runs (not system test runs), and then erases coverage data. """ - session.interpreter = 'python3.6' session.install('coverage', 'pytest-cov') session.run('coverage', 'report', '--show-missing', '--fail-under=100') session.run('coverage', 'erase') From a20be215b455eb802ddc5f7dfff57e3c98754d25 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 17 Oct 2018 12:51:09 -0400 Subject: [PATCH 190/892] Docs: normalize use of support level badges (#6159) * Remove badges for deprecated umbrella 'google-cloud' package. * Clarify support levels. - Add explicit section to support linking from sub-package README badges. - Move explanatory text for a support level above the list of packages at that level. * Normalize use of support-level badges in READMEs. - Note that 'error_reporting/README.rst' and 'monitoring/README.rst' are undergoing other edits; they are left out here to avoid conflicts. * Use 'General Avaialblity' for support level. Fix linkx in related API READMEs. * Fix links for alpha support in API READMEs. * Fix links for beta support in API READMEs. --- packages/google-cloud-bigtable/README.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 76ddbb35c076..24632069f1f6 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -1,8 +1,7 @@ -Python Client for Google Cloud Bigtable (`Beta`_) -================================================== - -|pypi| |versions| +Python Client for Google Cloud Bigtable +======================================= +|beta| |pypi| |versions| `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, @@ -11,7 +10,8 @@ Analytics, Maps, and Gmail. - `Client Library Documentation`_ - `Product Documentation`_ -.. _Beta: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/README.rst +.. |beta| image:: https://img.shields.io/badge/support-beta-silver.svg + :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#beta-support .. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg :target: https://pypi.org/project/google-cloud-bigtable/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg From 8362250a4f9b854062b2ae0cc02bcbaf68508b30 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 23 Oct 2018 15:43:33 -0400 Subject: [PATCH 191/892] Bigtable: deprecate 'channel' arg to 'Client' (#6279) We can't do anything with it, because it conflicts with the credentials we (always) have. Also, add explicit tests for 'Client' constructor, including assertions for all attributes. --- .../google/cloud/bigtable/client.py | 19 ++- .../tests/unit/test_client.py | 135 +++++++++++------- 2 files changed, 97 insertions(+), 57 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 1ef9e072199c..f4786a7f44e3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -27,7 +27,7 @@ * a :class:`~google.cloud.bigtable.table.Table` owns a :class:`~google.cloud.bigtable.row.Row` (and all the cells in the row) """ - +import warnings from google.api_core.gapic_v1 import client_info @@ -87,10 +87,10 @@ class Client(ClientWithProject): requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. :type channel: :instance: grpc.Channel - :param channel (grpc.Channel): (Optional) A ``Channel`` instance - through which to make calls. This argument is mutually - exclusive with ``credentials``; providing both will raise an - exception. + :param channel (grpc.Channel): (Optional) DEPRECATED: + A ``Channel`` instance through which to make calls. + This argument is mutually exclusive with ``credentials``; + providing both will raise an exception. No longer used. :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` @@ -109,6 +109,12 @@ def __init__(self, project=None, credentials=None, # It **may** use those scopes in ``with_scopes_if_required``. self._read_only = bool(read_only) self._admin = bool(admin) + + if channel is not None: + warnings.warn( + "'channel' is deprecated and no longer used.", + DeprecationWarning, stacklevel=2) + self._channel = channel self.SCOPE = self._get_scopes() super(Client, self).__init__(project=project, credentials=credentials) @@ -145,8 +151,7 @@ def project_path(self): :rtype: str :returns: Return a fully-qualified project string. """ - instance_client = self.instance_admin_client - return instance_client.project_path(self.project) + return self.instance_admin_client.project_path(self.project) @property def table_data_client(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 95937f0957ed..343e4e556114 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -36,6 +36,49 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + def test_constructor_defaults(self): + from google.cloud.bigtable.client import DATA_SCOPE + + credentials = _make_credentials() + + with mock.patch('google.auth.default') as mocked: + mocked.return_value = credentials, self.PROJECT + client = self._make_one() + + self.assertEqual(client.project, self.PROJECT) + self.assertIs( + client._credentials, credentials.with_scopes.return_value) + self.assertFalse(client._read_only) + self.assertFalse(client._admin) + self.assertIsNone(client._channel) + self.assertEqual(client.SCOPE, (DATA_SCOPE,)) + + def test_constructor_explicit(self): + import warnings + from google.cloud.bigtable.client import ADMIN_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE + + credentials = _make_credentials() + + with warnings.catch_warnings(record=True) as warned: + client = self._make_one( + project=self.PROJECT, + credentials=credentials, + read_only=False, + admin=True, + channel=mock.sentinel.channel, + ) + + self.assertEqual(len(warned), 1) + + self.assertEqual(client.project, self.PROJECT) + self.assertIs( + client._credentials, credentials.with_scopes.return_value) + self.assertFalse(client._read_only) + self.assertTrue(client._admin) + self.assertIs(client._channel, mock.sentinel.channel) + self.assertEqual(client.SCOPE, (DATA_SCOPE, ADMIN_SCOPE)) + def test_constructor_both_admin_and_read_only(self): credentials = _make_credentials() with self.assertRaises(ValueError): @@ -68,15 +111,7 @@ def test__get_scopes_read_only(self): read_only=True) self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,)) - def test_credentials_getter(self): - credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one( - project=project, credentials=credentials) - self.assertIs(client._credentials, - credentials.with_scopes.return_value) - - def test_project_name_property(self): + def test_project_path_property(self): credentials = _make_credentials() project = 'PROJECT' client = self._make_one(project=project, credentials=credentials, @@ -84,47 +119,6 @@ def test_project_name_property(self): project_name = 'projects/' + project self.assertEqual(client.project_path, project_name) - def test_instance_factory_defaults(self): - from google.cloud.bigtable.instance import Instance - - PROJECT = 'PROJECT' - INSTANCE_ID = 'instance-id' - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials) - - instance = client.instance(INSTANCE_ID) - - self.assertIsInstance(instance, Instance) - self.assertEqual(instance.instance_id, INSTANCE_ID) - self.assertEqual(instance.display_name, INSTANCE_ID) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - self.assertIs(instance._client, client) - - def test_instance_factory_non_defaults(self): - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable import enums - - PROJECT = 'PROJECT' - INSTANCE_ID = 'instance-id' - DISPLAY_NAME = 'display-name' - instance_type = enums.Instance.Type.DEVELOPMENT - labels = {'foo': 'bar'} - credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials) - - instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME, - instance_type=instance_type, labels=labels) - - self.assertIsInstance(instance, Instance) - self.assertEqual(instance.instance_id, INSTANCE_ID) - self.assertEqual(instance.display_name, DISPLAY_NAME) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, labels) - self.assertIs(instance._client, client) - def test_table_data_client_not_initialized(self): from google.cloud.bigtable_v2 import BigtableClient @@ -194,6 +188,47 @@ def test_instance_admin_client_initialized(self): already = client._instance_admin_client = object() self.assertIs(client.instance_admin_client, already) + def test_instance_factory_defaults(self): + from google.cloud.bigtable.instance import Instance + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials) + + instance = client.instance(INSTANCE_ID) + + self.assertIsInstance(instance, Instance) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, INSTANCE_ID) + self.assertIsNone(instance.type_) + self.assertIsNone(instance.labels) + self.assertIs(instance._client, client) + + def test_instance_factory_non_defaults(self): + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable import enums + + PROJECT = 'PROJECT' + INSTANCE_ID = 'instance-id' + DISPLAY_NAME = 'display-name' + instance_type = enums.Instance.Type.DEVELOPMENT + labels = {'foo': 'bar'} + credentials = _make_credentials() + client = self._make_one( + project=PROJECT, credentials=credentials) + + instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME, + instance_type=instance_type, labels=labels) + + self.assertIsInstance(instance, Instance) + self.assertEqual(instance.instance_id, INSTANCE_ID) + self.assertEqual(instance.display_name, DISPLAY_NAME) + self.assertEqual(instance.type_, instance_type) + self.assertEqual(instance.labels, labels) + self.assertIs(instance._client, client) + def test_list_instances(self): from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) From 76bf194b35e770341c04e303153c6da4bc348408 Mon Sep 17 00:00:00 2001 From: Nico Kemnitz Date: Wed, 24 Oct 2018 17:14:39 -0400 Subject: [PATCH 192/892] Bigtable: Fix ConditionalRow interaction with check_and_mutate_row (#6296) --- .../google-cloud-bigtable/google/cloud/bigtable/row.py | 8 ++++++-- packages/google-cloud-bigtable/tests/unit/test_row.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index ee6c8d0ea5fa..13fcbca885b6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -520,9 +520,13 @@ def commit(self): data_client = self._table._instance._client.table_data_client resp = data_client.check_and_mutate_row( - table_name=self._table.name, row_key=self._row_key,) + table_name=self._table.name, + row_key=self._row_key, + predicate_filter=self._filter.to_pb(), + true_mutations=true_mutations, + false_mutations=false_mutations) self.clear() - return resp[0].predicate_matched + return resp.predicate_matched # pylint: disable=arguments-differ def set_cell(self, column_family_id, column, value, timestamp=None, diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 4e87c15c6bf6..f4b94f9d2f8a 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -445,7 +445,7 @@ def test_commit(self): predicate_matched=predicate_matched) # Patch the stub used by the API method. - api.transport.check_and_mutate_row.side_effect = [[response_pb]] + api.transport.check_and_mutate_row.side_effect = [response_pb] client._table_data_client = api # Create expected_result. From 7b3d8f55bfbe85beb33563705e59526358ef76a2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 25 Oct 2018 18:02:14 -0400 Subject: [PATCH 193/892] Fix error from new flake8 version. (#6309) Presumably, anyway, as the line of code in question hasn't changed for almost a year. --- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index a164b3b58b4f..ccf6c039b39a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -559,7 +559,7 @@ def __call__(self, retry=DEFAULT_RETRY): try: mutate_rows() - except (_BigtableRetryableError, RetryError) as err: + except (_BigtableRetryableError, RetryError): # - _BigtableRetryableError raised when no retry strategy is used # and a retryable error on a mutation occurred. # - RetryError raised when retry deadline is reached. From cafcbbc2ddaae9047c2bbdaa1eecaf6616b0443d Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Tue, 30 Oct 2018 00:10:48 +0530 Subject: [PATCH 194/892] Bigtable: add 'docs/snippets.py' and test (#6012) --- .../google-cloud-bigtable/docs/snippets.py | 406 ++++++++++++++++++ .../google/cloud/bigtable/client.py | 18 + .../google/cloud/bigtable/cluster.py | 33 +- .../google/cloud/bigtable/instance.py | 112 +++-- packages/google-cloud-bigtable/noxfile.py | 21 +- 5 files changed, 556 insertions(+), 34 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/snippets.py diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py new file mode 100644 index 000000000000..01564fca50b5 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -0,0 +1,406 @@ +#!/usr/bin/env python + +# Copyright 2018, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Testable usage examples for Google Cloud Bigtable API wrapper + +Each example function takes a ``client`` argument (which must be an instance +of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task +with the API. + +To facilitate running the examples as system tests, each example is also passed +a ``to_delete`` list; the function adds to the list any objects created which +need to be deleted during teardown. + +.. note:: + This file is under progress and will be updated with more guidance from + the team. Unit tests will be added with guidance from the team. + +""" + +import datetime +import pytest + +from test_utils.system import unique_resource_id +from google.cloud._helpers import UTC +from google.cloud.bigtable import Client +from google.cloud.bigtable import enums + + +INSTANCE_ID = "snippet-" + unique_resource_id('-') +CLUSTER_ID = "clus-1-" + unique_resource_id('-') +LOCATION_ID = 'us-central1-f' +ALT_LOCATION_ID = 'us-central1-a' +PRODUCTION = enums.Instance.Type.PRODUCTION +SERVER_NODES = 3 +STORAGE_TYPE = enums.StorageType.SSD +LABEL_KEY = u'python-snippet' +LABEL_STAMP = datetime.datetime.utcnow() \ + .replace(microsecond=0, tzinfo=UTC,) \ + .strftime("%Y-%m-%dt%H-%M-%S") +LABELS = {LABEL_KEY: str(LABEL_STAMP)} + + +class Config(object): + """Run-time configuration to be modified at set-up. + + This is a mutable stand-in to allow test set-up to modify + global state. + """ + INSTANCE = None + + +def setup_module(): + client = Client(admin=True) + Config.INSTANCE = client.instance(INSTANCE_ID, + instance_type=PRODUCTION, + labels=LABELS) + cluster = Config.INSTANCE.cluster(CLUSTER_ID, + location_id=LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE) + operation = Config.INSTANCE.create(clusters=[cluster]) + # We want to make sure the operation completes. + operation.result(timeout=100) + + +def teardown_module(): + Config.INSTANCE.delete() + + +def test_bigtable_create_instance(): + # [START bigtable_create_prod_instance] + from google.cloud.bigtable import Client + from google.cloud.bigtable import enums + + my_instance_id = "inst-my-" + unique_resource_id('-') + my_cluster_id = "clus-my-" + unique_resource_id('-') + location_id = 'us-central1-f' + serve_nodes = 3 + storage_type = enums.StorageType.SSD + production = enums.Instance.Type.PRODUCTION + labels = {'prod-label': 'prod-label'} + + client = Client(admin=True) + instance = client.instance(my_instance_id, instance_type=production, + labels=labels) + cluster = instance.cluster(my_cluster_id, location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type) + operation = instance.create(clusters=[cluster]) + # We want to make sure the operation completes. + operation.result(timeout=100) + # [END bigtable_create_prod_instance] + assert instance.exists() + instance.delete() + + +def test_bigtable_create_additional_cluster(): + # [START bigtable_create_cluster] + from google.cloud.bigtable import Client + from google.cloud.bigtable import enums + + # Assuming that there is an existing instance with `INSTANCE_ID` + # on the server already. + # to create an instance see + # 'https://cloud.google.com/bigtable/docs/creating-instance' + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + + cluster_id = "clus-my-" + unique_resource_id('-') + location_id = 'us-central1-a' + serve_nodes = 3 + storage_type = enums.StorageType.SSD + + cluster = instance.cluster(cluster_id, location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type) + operation = cluster.create() + # We want to make sure the operation completes. + operation.result(timeout=100) + # [END bigtable_create_cluster] + assert cluster.exists() + + cluster.delete() + + +def test_bigtable_create_app_profile(): + # [START bigtable_create_app_profile] + from google.cloud.bigtable import Client + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + + app_profile_id = "app-prof-" + unique_resource_id('-') + description = 'routing policy-multy' + routing_policy_type = enums.RoutingPolicyType.ANY + + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=CLUSTER_ID) + + app_profile = app_profile.create(ignore_warnings=True) + # [END bigtable_create_app_profile] + assert app_profile.exists() + + app_profile.delete(ignore_warnings=True) + + +def test_bigtable_list_instances(): + # [START bigtable_list_instances] + from google.cloud.bigtable import Client + + client = Client(admin=True) + (instances_list, failed_locations_list) = client.list_instances() + # [END bigtable_list_instances] + assert len(instances_list) is not 0 + + +def test_bigtable_list_clusters_on_instance(): + # [START bigtable_list_clusters_on_instance] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + (clusters_list, failed_locations_list) = instance.list_clusters() + # [END bigtable_list_clusters_on_instance] + assert len(clusters_list) is not 0 + + +def test_bigtable_list_clusters_in_project(): + # [START bigtable_list_clusters_in_project] + from google.cloud.bigtable import Client + + client = Client(admin=True) + (clusters_list, failed_locations_list) = client.list_clusters() + # [END bigtable_list_clusters_in_project] + assert len(clusters_list) is not 0 + + +def test_bigtable_list_app_profiles(): + # [START bigtable_list_app_profiles] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + # [END bigtable_list_app_profiles] + + app_profile = instance.app_profile( + app_profile_id="app-prof-" + unique_resource_id('-'), + routing_policy_type=enums.RoutingPolicyType.ANY) + app_profile = app_profile.create(ignore_warnings=True) + + # [START bigtable_list_app_profiles] + app_profiles_list = instance.list_app_profiles() + # [END bigtable_list_app_profiles] + assert len(app_profiles_list) is not 0 + + +def test_bigtable_instance_exists(): + # [START bigtable_check_instance_exists] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance_exists = instance.exists() + # [END bigtable_check_instance_exists] + assert instance_exists + + +def test_bigtable_cluster_exists(): + # [START bigtable_check_cluster_exists] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + cluster = instance.cluster(CLUSTER_ID) + cluster_exists = cluster.exists() + # [END bigtable_check_cluster_exists] + assert cluster_exists + + +def test_bigtable_reload_instance(): + # [START bigtable_reload_instance] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance.reload() + # [END bigtable_reload_instance] + assert instance.type_ is PRODUCTION.value + + +def test_bigtable_reload_cluster(): + # [START bigtable_reload_cluster] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + cluster = instance.cluster(CLUSTER_ID) + cluster.reload() + # [END bigtable_reload_cluster] + assert cluster.serve_nodes is SERVER_NODES + + +def test_bigtable_update_instance(): + # [START bigtable_update_instance] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + display_name = "My new instance" + instance.display_name = display_name + instance.update() + # [END bigtable_update_instance] + assert instance.display_name is display_name + + +def test_bigtable_update_cluster(): + # [START bigtable_update_cluster] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + cluster = instance.cluster(CLUSTER_ID) + cluster.serve_nodes = 8 + cluster.update() + # [END bigtable_update_cluster] + assert cluster.serve_nodes is 8 + + +def test_bigtable_create_table(): + # [START bigtable_create_table] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table("table_my") + # Define the GC policy to retain only the most recent 2 versions. + max_versions_rule = column_family.MaxVersionsGCRule(2) + table.create(column_families={'cf1': max_versions_rule}) + # [END bigtable_create_table] + assert table.exists() + + +def test_bigtable_list_tables(): + # [START bigtable_list_tables] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + tables_list = instance.list_tables() + # [END bigtable_list_tables] + assert len(tables_list) is not 0 + + +def test_bigtable_delete_cluster(): + # [START bigtable_delete_cluster] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + cluster_id = "clus-my-" + unique_resource_id('-') + # [END bigtable_delete_cluster] + + cluster = instance.cluster(cluster_id, location_id=ALT_LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE) + operation = cluster.create() + # We want to make sure the operation completes. + operation.result(timeout=1000) + + # [START bigtable_delete_cluster] + cluster_to_delete = instance.cluster(cluster_id) + cluster_to_delete.delete() + # [END bigtable_delete_cluster] + assert not cluster_to_delete.exists() + + +def test_bigtable_delete_instance(): + # [START bigtable_delete_instance] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance_id_to_delete = "inst-my-" + unique_resource_id('-') + # [END bigtable_delete_instance] + cluster_id = "clus-my-" + unique_resource_id('-') + + instance = client.instance(instance_id_to_delete, + instance_type=PRODUCTION, + labels=LABELS) + cluster = instance.cluster(cluster_id, + location_id=ALT_LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE) + operation = instance.create(clusters=[cluster]) + # We want to make sure the operation completes. + operation.result(timeout=100) + + # [START bigtable_delete_instance] + instance_to_delete = client.instance(instance_id_to_delete) + instance_to_delete.delete() + # [END bigtable_delete_instance] + assert not instance_to_delete.exists() + + +def test_bigtable_test_iam_permissions(): + # [START bigtable_test_iam_permissions] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance.reload() + permissions = ["bigtable.clusters.create", "bigtable.tables.create"] + permissions_allowed = instance.test_iam_permissions(permissions) + # [END bigtable_test_iam_permissions] + assert permissions_allowed == permissions + + +def test_bigtable_get_iam_policy(): + # [START bigtable_get_iam_policy] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + policy_latest = instance.get_iam_policy() + # [END bigtable_get_iam_policy] + + assert len(policy_latest.bigtable_viewers) is not 0 + + +def test_bigtable_set_iam_policy(): + # [START bigtable_set_iam_policy] + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance.reload() + ins_policy = Policy() + ins_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("test_iam@test.com"), + Policy.service_account("sv_account@gmail.com")] + + policy_latest = instance.set_iam_policy(ins_policy) + # [END bigtable_set_iam_policy] + + assert len(policy_latest.bigtable_admins) is not 0 + + +if __name__ == '__main__': + pytest.main() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index f4786a7f44e3..4ac61913ba4c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -208,6 +208,12 @@ def instance(self, instance_id, display_name=None, instance_type=None, labels=None): """Factory to create a instance associated with this client. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_create_prod_instance] + :end-before: [END bigtable_create_prod_instance] + :type instance_id: str :param instance_id: The ID of the instance. @@ -246,6 +252,12 @@ def instance(self, instance_id, display_name=None, def list_instances(self): """List instances owned by the project. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_list_instances] + :end-before: [END bigtable_list_instances] + :rtype: tuple :returns: (instances, failed_locations), where 'instances' is list of @@ -261,6 +273,12 @@ def list_instances(self): def list_clusters(self): """List the clusters in the project. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_list_clusters_in_project] + :end-before: [END bigtable_list_clusters_in_project] + :rtype: tuple :returns: (clusters, failed_locations), where 'clusters' is list of diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index b5032f805f10..1b3fe559c3c7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -172,7 +172,14 @@ def __ne__(self, other): return not self == other def reload(self): - """Reload the metadata for this cluster.""" + """Reload the metadata for this cluster. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_reload_cluster] + :end-before: [END bigtable_reload_cluster] + """ cluster_pb = self._instance._client.instance_admin_client.get_cluster( self.name) @@ -183,6 +190,12 @@ def reload(self): def exists(self): """Check whether the cluster already exists. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_check_cluster_exists] + :end-before: [END bigtable_check_cluster_exists] + :rtype: bool :returns: True if the table exists, else False. """ @@ -197,6 +210,12 @@ def exists(self): def create(self): """Create this cluster. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_create_cluster] + :end-before: [END bigtable_create_cluster] + .. note:: Uses the ``project``, ``instance`` and ``cluster_id`` on the @@ -223,6 +242,12 @@ def create(self): def update(self): """Update this cluster. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_update_cluster] + :end-before: [END bigtable_update_cluster] + .. note:: Updates the ``serve_nodes``. If you'd like to @@ -260,6 +285,12 @@ def update(self): def delete(self): """Delete this cluster. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_delete_cluster] + :end-before: [END bigtable_delete_cluster] + Marks a cluster and all of its tables for permanent deletion in 7 days. Immediately upon completion of the request: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index b09470dc60b7..17e373673f64 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -187,7 +187,14 @@ def __ne__(self, other): return not self == other def reload(self): - """Reload the metadata for this instance.""" + """Reload the metadata for this instance. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_reload_instance] + :end-before: [END bigtable_reload_instance] + """ instance_pb = self._client.instance_admin_client.get_instance( self.name) @@ -198,6 +205,12 @@ def reload(self): def exists(self): """Check whether the instance already exists. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_check_instance_exists] + :end-before: [END bigtable_check_instance_exists] + :rtype: bool :returns: True if the table exists, else False. """ @@ -213,6 +226,12 @@ def create(self, location_id=None, default_storage_type=None, clusters=None): """Create this instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_create_prod_instance] + :end-before: [END bigtable_create_prod_instance] + .. note:: Uses the ``project`` and ``instance_id`` on the current @@ -287,6 +306,12 @@ def create(self, location_id=None, def update(self): """Updates an instance within a project. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_update_instance] + :end-before: [END bigtable_update_instance] + .. note:: Updates any or all of the following values: @@ -324,6 +349,12 @@ def update(self): def delete(self): """Delete this instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_delete_instance] + :end-before: [END bigtable_delete_instance] + Marks an instance and all of its tables for permanent deletion in 7 days. @@ -349,6 +380,12 @@ def cluster(self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None): """Factory to create a cluster associated with this instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_create_cluster] + :end-before: [END bigtable_create_cluster] + :type cluster_id: str :param cluster_id: The ID of the cluster. @@ -385,6 +422,12 @@ def cluster(self, cluster_id, location_id=None, def list_clusters(self): """List the clusters in this instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_list_clusters_on_instance] + :end-before: [END bigtable_list_clusters_on_instance] + :rtype: tuple :returns: (clusters, failed_locations), where 'clusters' is list of @@ -400,6 +443,12 @@ def list_clusters(self): def table(self, table_id, app_profile_id=None): """Factory to create a table associated with this instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_create_table] + :end-before: [END bigtable_create_table] + :type table_id: str :param table_id: The ID of the table. @@ -414,6 +463,12 @@ def table(self, table_id, app_profile_id=None): def list_tables(self): """List the tables in this instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_list_tables] + :end-before: [END bigtable_list_tables] + :rtype: list of :class:`Table ` :returns: The list of tables owned by the instance. :raises: :class:`ValueError ` if one of the @@ -439,6 +494,12 @@ def app_profile(self, app_profile_id, allow_transactional_writes=None): """Factory to create AppProfile associated with this instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_create_app_profile] + :end-before: [END bigtable_create_app_profile] + :type app_profile_id: str :param app_profile_id: The ID of the AppProfile. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. @@ -475,6 +536,12 @@ def app_profile(self, app_profile_id, def list_app_profiles(self): """Lists information about AppProfiles in an instance. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_list_app_profiles] + :end-before: [END bigtable_list_app_profiles] + :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. By default, this is a list of @@ -487,15 +554,11 @@ def list_app_profiles(self): def get_iam_policy(self): """Gets the access control policy for an instance resource. - .. code-block:: python - - from google.cloud.bigtable.client import Client - from google.cloud.bigtable.policy import Policy + For example: - client = Client(admin=True) - instance = client.instance('[INSTANCE_ID]') - policy_latest = instance.get_iam_policy() - print (policy_latest.bigtable_viewers) + .. literalinclude:: snippets.py + :start-after: [START bigtable_get_iam_policy] + :end-before: [END bigtable_get_iam_policy] :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance @@ -511,21 +574,11 @@ def set_iam_policy(self, policy): For more information about policy, please see documentation of class `google.cloud.bigtable.policy.Policy` - .. code-block:: python + For example: - from google.cloud.bigtable.client import Client - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - client = Client(admin=True) - instance = client.instance('[INSTANCE_ID]') - ins_policy = instance.get_iam_policy() - ins_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.user("test_iam@test.com"), - Policy.service_account("sv_account@gmail.com")] - - policy_latest = instance.set_iam_policy() - print (policy_latest.bigtable_admins) + .. literalinclude:: snippets.py + :start-after: [START bigtable_set_iam_policy] + :end-before: [END bigtable_set_iam_policy] :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy @@ -543,16 +596,11 @@ def test_iam_permissions(self, permissions): """Returns permissions that the caller has on the specified instance resource. - .. code-block:: python - - from google.cloud.bigtable.client import Client + For example: - client = Client(admin=True) - instance = client.instance('[INSTANCE_ID]') - permissions = ["bigtable.tables.create", - "bigtable.clusters.create"] - permissions_allowed = instance.test_iam_permissions(permissions) - print (permissions_allowed) + .. literalinclude:: snippets.py + :start-after: [START bigtable_test_iam_permissions] + :end-before: [END bigtable_test_iam_permissions] :type permissions: list :param permissions: The set of permissions to check for diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 28738b0c42fb..dfef10881d03 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -91,7 +91,7 @@ def lint(session): """ session.install('flake8', *LOCAL_DEPS) session.install('.') - session.run('flake8', 'google', 'tests') + session.run('flake8', 'google', 'tests', 'docs') @nox.session(python='3.6') @@ -113,3 +113,22 @@ def cover(session): session.install('coverage', 'pytest-cov') session.run('coverage', 'report', '--show-missing', '--fail-under=100') session.run('coverage', 'erase') + + +@nox.session(python='3.7') +def snippets(session): + """Run the system test suite.""" + # Sanity check: Only run system tests if the environment variable is set. + if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): + session.skip('Credentials must be set via environment variable.') + + # Install all test dependencies, then install local packages in place. + session.install('mock', 'pytest') + for local_dep in LOCAL_DEPS: + session.install('-e', local_dep) + session.install('-e', os.path.join('..', 'bigtable')) + session.install('-e', '../test_utils/') + session.install('-e', '.') + session.run('py.test', '--quiet', \ + os.path.join('docs', 'snippets.py'), \ + *session.posargs) From 3431c4cba05fa9b46a17c5d6cd9d00f015344066 Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Wed, 31 Oct 2018 00:01:47 +0530 Subject: [PATCH 195/892] Bigtable: Add retry parameter to 'Table.read_rows()'. (#6281) Closes #6186 --- .../google/cloud/bigtable/row_data.py | 31 +++++- .../google/cloud/bigtable/table.py | 16 +++- .../tests/unit/test_row_data.py | 62 +++++++++++- .../tests/unit/test_table.py | 96 ++++++++++++++----- 4 files changed, 172 insertions(+), 33 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 44a01bc0751b..6e09d5fc6a84 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -305,6 +305,20 @@ def _retry_read_rows_exception(exc): exceptions.DeadlineExceeded)) +DEFAULT_RETRY_READ_ROWS = retry.Retry( + predicate=_retry_read_rows_exception, + initial=1.0, + maximum=15.0, + multiplier=2.0, + deadline=60.0, # 60 seconds +) +"""The default retry strategy to be used on retry-able errors. + +Used by +:meth:`~google.cloud.bigtable.row_data.PartialRowsData._read_next_response`. +""" + + class PartialRowsData(object): """Convenience wrapper for consuming a ``ReadRows`` streaming response. @@ -319,6 +333,14 @@ class PartialRowsData(object): identified by self.last_scanned_row_key. The retry happens inside of the Retry class, using a predicate for the expected exceptions during iteration. + + :type retry: :class:`~google.api_core.retry.Retry` + :param retry: (Optional) Retry delay and deadline arguments. To override, + the default value :attr:`DEFAULT_RETRY_READ_ROWS` can be + used and modified with the + :meth:`~google.api_core.retry.Retry.with_delay` method + or the + :meth:`~google.api_core.retry.Retry.with_deadline` method. """ NEW_ROW = 'New row' # No cells yet complete for row @@ -333,7 +355,8 @@ class PartialRowsData(object): STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS, STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS} - def __init__(self, read_method, request): + def __init__(self, read_method, request, + retry=DEFAULT_RETRY_READ_ROWS): # Counter for rows returned to the user self._counter = 0 # In-progress row, unset until first response, after commit/reset @@ -349,6 +372,7 @@ def __init__(self, read_method, request): self.last_scanned_row_key = None self.read_method = read_method self.request = request + self.retry = retry self.response_iterator = read_method(request) self.rows = {} @@ -405,10 +429,7 @@ def _read_next(self): def _read_next_response(self): """Helper for :meth:`__iter__`.""" - retry_ = retry.Retry( - predicate=_retry_read_rows_exception, - deadline=60) - return retry_(self._read_next, on_error=self._on_error)() + return self.retry(self._read_next, on_error=self._on_error)() def __iter__(self): """Consume the ``ReadRowsResponse``s from the stream. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index ccf6c039b39a..bf1da4e7cc00 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -31,6 +31,7 @@ from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.row_data import PartialRowsData +from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums @@ -60,7 +61,7 @@ class _BigtableRetryableError(Exception): multiplier=2.0, deadline=120.0, # 2 minutes ) -"""The default retry stategy to be used on retry-able errors. +"""The default retry strategy to be used on retry-able errors. Used by :meth:`~google.cloud.bigtable.table.Table.mutate_rows`. """ @@ -298,7 +299,8 @@ def read_row(self, row_key, filter_=None): return row def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None, end_inclusive=False, row_set=None): + filter_=None, end_inclusive=False, row_set=None, + retry=DEFAULT_RETRY_READ_ROWS): """Read rows from this table. :type start_key: bytes @@ -329,6 +331,14 @@ def read_rows(self, start_key=None, end_key=None, limit=None, :param filter_: (Optional) The row set containing multiple row keys and row_ranges. + :type retry: :class:`~google.api_core.retry.Retry` + :param retry: + (Optional) Retry delay and deadline arguments. To override, the + default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and + modified with the :meth:`~google.api_core.retry.Retry.with_delay` + method or the :meth:`~google.api_core.retry.Retry.with_deadline` + method. + :rtype: :class:`.PartialRowsData` :returns: A :class:`.PartialRowsData` a generator for consuming the streamed results. @@ -340,7 +350,7 @@ def read_rows(self, start_key=None, end_key=None, limit=None, data_client = self._instance._client.table_data_client return PartialRowsData( data_client.transport.read_rows, - request_pb) + request_pb, retry) def yield_rows(self, **kwargs): """Read rows from this table. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 98e682dc4537..5044dd7c5942 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -14,9 +14,9 @@ import unittest - import mock +from google.api_core.exceptions import DeadlineExceeded from ._testing import _make_credentials from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable_v2.proto import ( @@ -343,6 +343,7 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS client = _Client() client._data_stub = mock.MagicMock() request = object() @@ -350,6 +351,19 @@ def test_constructor(self): request) self.assertIs(partial_rows_data.request, request) self.assertEqual(partial_rows_data.rows, {}) + self.assertEqual(partial_rows_data.retry, + DEFAULT_RETRY_READ_ROWS) + + def test_constructor_with_retry(self): + client = _Client() + client._data_stub = mock.MagicMock() + request = retry = object() + partial_rows_data = self._make_one(client._data_stub.ReadRows, + request, retry) + self.assertIs(partial_rows_data.request, request) + self.assertEqual(partial_rows_data.rows, {}) + self.assertEqual(partial_rows_data.retry, + retry) def test___eq__(self): client = _Client() @@ -642,6 +656,40 @@ def test_yield_rows_data(self): self.assertEqual(result, self.ROW_KEY) + def test_yield_retry_rows_data(self): + from google.api_core import retry + client = _Client() + + retry_read_rows = retry.Retry( + predicate=_read_rows_retry_exception, + ) + + chunk = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk] + + response = _ReadRowsResponseV2(chunks) + failure_iterator = _MockFailureIterator_1() + iterator = _MockCancellableIterator(response) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [failure_iterator, + iterator] + + request = object() + + yrd = self._make_one(client._data_stub.ReadRows, request, + retry_read_rows) + + result = self._consume_all(yrd)[0] + + self.assertEqual(result, self.ROW_KEY) + def _consume_all(self, yrd): return [row.row_key for row in yrd] @@ -1141,6 +1189,14 @@ def next(self): __next__ = next +class _MockFailureIterator_1(object): + + def next(self): + raise DeadlineExceeded("Failed to read from server") + + __next__ = next + + class _PartialCellData(object): row_key = b'' @@ -1221,3 +1277,7 @@ def _ReadRowsRequestPB(*args, **kw): bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadRowsRequest(*args, **kw) + + +def _read_rows_retry_exception(exc): + return isinstance(exc, DeadlineExceeded) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index b9dea7e5dab0..4c6f075afa70 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -15,9 +15,9 @@ import unittest -import grpc import mock from ._testing import _make_credentials +from google.api_core.exceptions import DeadlineExceeded class Test___mutate_rows_request(unittest.TestCase): @@ -629,7 +629,7 @@ def test_read_rows(self): app_profile_id=app_profile_id) # Create request_pb - request = object() # Returned by our mock. + request = retry = object() # Returned by our mock. mock_created = [] def mock_create_row_request(table_name, **kwargs): @@ -639,7 +639,7 @@ def mock_create_row_request(table_name, **kwargs): # Create expected_result. expected_result = PartialRowsData( client._table_data_client.transport.read_rows, - request) + request, retry) # Perform the method and check the result. start_key = b'start-key' @@ -649,9 +649,10 @@ def mock_create_row_request(table_name, **kwargs): with _Monkey(MUT, _create_row_request=mock_create_row_request): result = table.read_rows( start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit) + limit=limit, retry=retry) self.assertEqual(result.rows, expected_result.rows) + self.assertEqual(result.retry, expected_result.retry) created_kwargs = { 'start_key': start_key, 'end_key': end_key, @@ -663,6 +664,67 @@ def mock_create_row_request(table_name, **kwargs): } self.assertEqual(mock_created, [(table.name, created_kwargs)]) + def test_read_retry_rows(self): + from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client) + from google.api_core import retry + + data_api = bigtable_client.BigtableClient(mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient( + mock.Mock()) + credentials = _make_credentials() + client = self._make_client(project='project-id', + credentials=credentials, admin=True) + client._table_data_client = data_api + client._table_admin_client = table_api + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + retry_read_rows = retry.Retry( + predicate=_read_rows_retry_exception, + ) + + # Create response_iterator + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_1, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True + ) + + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY_2, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True + ) + + response_1 = _ReadRowsResponseV2([chunk_1]) + response_2 = _ReadRowsResponseV2([chunk_2]) + response_failure_iterator_1 = _MockFailureIterator_1() + response_failure_iterator_2 = _MockFailureIterator_2([response_1]) + response_iterator = _MockReadRowsIterator(response_2) + + # Patch the stub used by the API method. + client._table_data_client.transport.read_rows = mock.Mock( + side_effect=[ + response_failure_iterator_1, response_failure_iterator_2, + response_iterator]) + + rows = [] + for row in table.read_rows(start_key=self.ROW_KEY_1, + end_key=self.ROW_KEY_2, + retry=retry_read_rows): + rows.append(row) + + result = rows[1] + self.assertEqual(result.row_key, self.ROW_KEY_2) + def test_yield_retry_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import ( @@ -1719,16 +1781,7 @@ def next(self): class _MockFailureIterator_1(object): def next(self): - class DeadlineExceeded(grpc.RpcError, grpc.Call): - """ErrorDeadlineExceeded exception""" - - def code(self): - return grpc.StatusCode.DEADLINE_EXCEEDED - - def details(self): - return "Failed to read from server" - - raise DeadlineExceeded() + raise DeadlineExceeded("Failed to read from server") __next__ = next @@ -1740,20 +1793,11 @@ def __init__(self, *values): self.calls = 0 def next(self): - class DeadlineExceeded(grpc.RpcError, grpc.Call): - """ErrorDeadlineExceeded exception""" - - def code(self): - return grpc.StatusCode.DEADLINE_EXCEEDED - - def details(self): - return "Failed to read from server" - self.calls += 1 if self.calls == 1: return self.iter_values[0] else: - raise DeadlineExceeded() + raise DeadlineExceeded("Failed to read from server") __next__ = next @@ -1786,3 +1830,7 @@ def _ClusterStatePB(replication_state): return table_v2_pb2.Table.ClusterState( replication_state=replication_state ) + + +def _read_rows_retry_exception(exc): + return isinstance(exc, DeadlineExceeded) From d30f24562a2587f6e323161ccbd82b8f03853830 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Tue, 30 Oct 2018 14:39:17 -0400 Subject: [PATCH 196/892] Bigtable: Support DirectRow without a table (#6336) --- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index bf1da4e7cc00..e7dfbf010191 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -843,7 +843,7 @@ def _check_row_table_name(table_name, row): :raises: :exc:`~.table.TableMismatchError` if the row does not belong to the table. """ - if row.table.name != table_name: + if row.table is not None and row.table.name != table_name: raise TableMismatchError( 'Row %s is a part of %s table. Current table: %s' % (row.row_key, row.table.name, table_name)) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 4c6f075afa70..39d03a33743e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -49,7 +49,7 @@ def test__mutate_rows_request(self): table = mock.Mock(name='table', spec=['name']) table.name = 'table' rows = [DirectRow(row_key=b'row_key', table=table), - DirectRow(row_key=b'row_key_2', table=table)] + DirectRow(row_key=b'row_key_2')] rows[0].set_cell('cf1', b'c1', b'1') rows[1].set_cell('cf1', b'c1', b'2') result = self._call_fut('table', rows) From ab0e09ff9ac25d7102c0559d879e429dabcb08f9 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 30 Oct 2018 16:04:28 -0400 Subject: [PATCH 197/892] Bigtable: fix instance IAM methods (#6343) Refactor instance tests to mock 'instance_admin_client' directly, rather than underlying channel. Normalize instance method order, and make testcase order match. Closes #6337. --- .../google-cloud-bigtable/docs/snippets.py | 45 +- .../google/cloud/bigtable/instance.py | 256 ++--- packages/google-cloud-bigtable/noxfile.py | 12 +- .../tests/unit/test_instance.py | 1003 ++++++++--------- 4 files changed, 635 insertions(+), 681 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 01564fca50b5..b829555f5212 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -58,11 +58,12 @@ class Config(object): This is a mutable stand-in to allow test set-up to modify global state. """ + CLIENT = None INSTANCE = None def setup_module(): - client = Client(admin=True) + client = Config.CLIENT = Client(admin=True) Config.INSTANCE = client.instance(INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS) @@ -337,6 +338,7 @@ def test_bigtable_delete_instance(): client = Client(admin=True) instance_id_to_delete = "inst-my-" + unique_resource_id('-') # [END bigtable_delete_instance] + cluster_id = "clus-my-" + unique_resource_id('-') instance = client.instance(instance_id_to_delete, @@ -354,6 +356,7 @@ def test_bigtable_delete_instance(): instance_to_delete = client.instance(instance_id_to_delete) instance_to_delete.delete() # [END bigtable_delete_instance] + assert not instance_to_delete.exists() @@ -367,40 +370,44 @@ def test_bigtable_test_iam_permissions(): permissions = ["bigtable.clusters.create", "bigtable.tables.create"] permissions_allowed = instance.test_iam_permissions(permissions) # [END bigtable_test_iam_permissions] - assert permissions_allowed == permissions - -def test_bigtable_get_iam_policy(): - # [START bigtable_get_iam_policy] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - policy_latest = instance.get_iam_policy() - # [END bigtable_get_iam_policy] - - assert len(policy_latest.bigtable_viewers) is not 0 + assert permissions_allowed == permissions -def test_bigtable_set_iam_policy(): +def test_bigtable_set_iam_policy_then_get_iam_policy(): # [START bigtable_set_iam_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + # [END bigtable_set_iam_policy] + + service_account_email = Config.CLIENT._credentials.service_account_email + + # [START bigtable_set_iam_policy] client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance.reload() - ins_policy = Policy() - ins_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.user("test_iam@test.com"), - Policy.service_account("sv_account@gmail.com")] + new_policy = Policy() + new_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.service_account(service_account_email), + ] - policy_latest = instance.set_iam_policy(ins_policy) + policy_latest = instance.set_iam_policy(new_policy) # [END bigtable_set_iam_policy] assert len(policy_latest.bigtable_admins) is not 0 + # [START bigtable_get_iam_policy] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + policy = instance.get_iam_policy() + # [END bigtable_get_iam_policy] + + assert len(policy.bigtable_admins) is not 0 + if __name__ == '__main__': pytest.main() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 17e373673f64..6dd0eecb8a50 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -106,6 +106,17 @@ def __init__(self, self.labels = labels self._state = _state + def _update_from_pb(self, instance_pb): + """Refresh self from the server-provided protobuf. + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not instance_pb.display_name: # Simple field (string) + raise ValueError('Instance protobuf does not contain display_name') + self.display_name = instance_pb.display_name + self.type_ = instance_pb.type + self.labels = dict(instance_pb.labels) + self._state = instance_pb.state + @classmethod def from_pb(cls, instance_pb, client): """Creates an instance instance from a protobuf. @@ -137,17 +148,6 @@ def from_pb(cls, instance_pb, client): result._update_from_pb(instance_pb) return result - def _update_from_pb(self, instance_pb): - """Refresh self from the server-provided protobuf. - Helper for :meth:`from_pb` and :meth:`reload`. - """ - if not instance_pb.display_name: # Simple field (string) - raise ValueError('Instance protobuf does not contain display_name') - self.display_name = instance_pb.display_name - self.type_ = instance_pb.type - self.labels = dict(instance_pb.labels) - self._state = instance_pb.state - @property def name(self): """Instance name used in requests. @@ -186,41 +186,6 @@ def __eq__(self, other): def __ne__(self, other): return not self == other - def reload(self): - """Reload the metadata for this instance. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_reload_instance] - :end-before: [END bigtable_reload_instance] - """ - instance_pb = self._client.instance_admin_client.get_instance( - self.name) - - # NOTE: _update_from_pb does not check that the project and - # instance ID on the response match the request. - self._update_from_pb(instance_pb) - - def exists(self): - """Check whether the instance already exists. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_check_instance_exists] - :end-before: [END bigtable_check_instance_exists] - - :rtype: bool - :returns: True if the table exists, else False. - """ - try: - self._client.instance_admin_client.get_instance(name=self.name) - return True - # NOTE: There could be other exceptions that are returned to the user. - except NotFound: - return False - def create(self, location_id=None, serve_nodes=None, default_storage_type=None, clusters=None): @@ -303,6 +268,41 @@ def create(self, location_id=None, parent=parent, instance_id=self.instance_id, instance=instance_pb, clusters={c.cluster_id: c._to_pb() for c in clusters}) + def exists(self): + """Check whether the instance already exists. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_check_instance_exists] + :end-before: [END bigtable_check_instance_exists] + + :rtype: bool + :returns: True if the table exists, else False. + """ + try: + self._client.instance_admin_client.get_instance(name=self.name) + return True + # NOTE: There could be other exceptions that are returned to the user. + except NotFound: + return False + + def reload(self): + """Reload the metadata for this instance. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_reload_instance] + :end-before: [END bigtable_reload_instance] + """ + instance_pb = self._client.instance_admin_client.get_instance( + self.name) + + # NOTE: _update_from_pb does not check that the project and + # instance ID on the response match the request. + self._update_from_pb(instance_pb) + def update(self): """Updates an instance within a project. @@ -376,6 +376,87 @@ def delete(self): """ self._client.instance_admin_client.delete_instance(name=self.name) + def get_iam_policy(self): + """Gets the access control policy for an instance resource. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_get_iam_policy] + :end-before: [END bigtable_get_iam_policy] + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this instance + """ + instance_admin_client = self._client.instance_admin_client + resp = instance_admin_client.get_iam_policy(resource=self.name) + return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) + + def set_iam_policy(self, policy): + """Sets the access control policy on an instance resource. Replaces any + existing policy. + + For more information about policy, please see documentation of + class `google.cloud.bigtable.policy.Policy` + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_set_iam_policy] + :end-before: [END bigtable_set_iam_policy] + + :type policy: :class:`google.cloud.bigtable.policy.Policy` + :param policy: A new IAM policy to replace the current IAM policy + of this instance + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this instance. + """ + instance_admin_client = self._client.instance_admin_client + resp = instance_admin_client.set_iam_policy( + resource=self.name, policy=policy.to_api_repr()) + return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) + + def test_iam_permissions(self, permissions): + """Returns permissions that the caller has on the specified instance + resource. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_test_iam_permissions] + :end-before: [END bigtable_test_iam_permissions] + + :type permissions: list + :param permissions: The set of permissions to check for + the ``resource``. Permissions with wildcards (such as '*' + or 'storage.*') are not allowed. For more information see + `IAM Overview + `_. + `Bigtable Permissions + `_. + + :rtype: list + :returns: A List(string) of permissions allowed on the instance + """ + instance_admin_client = self._client.instance_admin_client + resp = instance_admin_client.test_iam_permissions( + resource=self.name, permissions=permissions) + return list(resp.permissions) + + def _to_dict_from_policy_pb(self, policy): + """Returns a dictionary representation of resource returned from + the getIamPolicy API to use as parameter for + :meth: google.cloud.iam.Policy.from_api_repr + """ + pb_dict = {} + bindings = [{'role': binding.role, 'members': binding.members} + for binding in policy.bindings] + pb_dict['etag'] = policy.etag + pb_dict['version'] = policy.version + pb_dict['bindings'] = bindings + return pb_dict + def cluster(self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None): """Factory to create a cluster associated with this instance. @@ -548,86 +629,5 @@ def list_app_profiles(self): :class:`~google.cloud.bigtable.app_profile.AppProfile` instances. """ - resp = self._client._instance_admin_client.list_app_profiles(self.name) + resp = self._client.instance_admin_client.list_app_profiles(self.name) return [AppProfile.from_pb(app_profile, self) for app_profile in resp] - - def get_iam_policy(self): - """Gets the access control policy for an instance resource. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_get_iam_policy] - :end-before: [END bigtable_get_iam_policy] - - :rtype: :class:`google.cloud.bigtable.policy.Policy` - :returns: The current IAM policy of this instance - """ - instance_admin_client = self._client._instance_admin_client - resp = instance_admin_client.get_iam_policy(resource=self.name) - return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) - - def set_iam_policy(self, policy): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - - For more information about policy, please see documentation of - class `google.cloud.bigtable.policy.Policy` - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_set_iam_policy] - :end-before: [END bigtable_set_iam_policy] - - :type policy: :class:`google.cloud.bigtable.policy.Policy` - :param policy: A new IAM policy to replace the current IAM policy - of this instance - - :rtype: :class:`google.cloud.bigtable.policy.Policy` - :returns: The current IAM policy of this instance. - """ - instance_admin_client = self._client._instance_admin_client - resp = instance_admin_client.set_iam_policy( - resource=self.name, policy=policy.to_api_repr()) - return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) - - def test_iam_permissions(self, permissions): - """Returns permissions that the caller has on the specified instance - resource. - - For example: - - .. literalinclude:: snippets.py - :start-after: [START bigtable_test_iam_permissions] - :end-before: [END bigtable_test_iam_permissions] - - :type permissions: list - :param permissions: The set of permissions to check for - the ``resource``. Permissions with wildcards (such as '*' - or 'storage.*') are not allowed. For more information see - `IAM Overview - `_. - `Bigtable Permissions - `_. - - :rtype: list - :returns: A List(string) of permissions allowed on the instance - """ - instance_admin_client = self._client._instance_admin_client - resp = instance_admin_client.test_iam_permissions( - resource=self.name, permissions=permissions) - return list(resp.permissions) - - def _to_dict_from_policy_pb(self, policy): - """Returns a dictionary representation of resource returned from - the getIamPolicy API to use as parameter for - :meth: google.cloud.iam.Policy.from_api_repr - """ - pb_dict = {} - bindings = [{'role': binding.role, 'members': binding.members} - for binding in policy.bindings] - pb_dict['etag'] = policy.etag - pb_dict['version'] = policy.version - pb_dict['bindings'] = bindings - return pb_dict diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index dfef10881d03..ce44f4ce16e6 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -115,7 +115,7 @@ def cover(session): session.run('coverage', 'erase') -@nox.session(python='3.7') +@nox.session(python=['2.7', '3.7']) def snippets(session): """Run the system test suite.""" # Sanity check: Only run system tests if the environment variable is set. @@ -126,9 +126,11 @@ def snippets(session): session.install('mock', 'pytest') for local_dep in LOCAL_DEPS: session.install('-e', local_dep) - session.install('-e', os.path.join('..', 'bigtable')) session.install('-e', '../test_utils/') session.install('-e', '.') - session.run('py.test', '--quiet', \ - os.path.join('docs', 'snippets.py'), \ - *session.posargs) + session.run( + 'py.test', + '--quiet', + os.path.join('docs', 'snippets.py'), + *session.posargs, + ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index f6bc42bc7292..cf902df5ca5f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -21,33 +21,6 @@ from google.cloud.bigtable.cluster import Cluster -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - return self.channel_stub.responses.pop() - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, - method, - request_serializer=None, - response_deserializer=None): - return MultiCallableStub(method, self) - - class TestInstance(unittest.TestCase): PROJECT = 'project' @@ -114,98 +87,6 @@ def test_constructor_non_default(self): self.assertIs(instance._client, client) self.assertEqual(instance.state, state) - def test_table_factory(self): - from google.cloud.bigtable.table import Table - - app_profile_id = 'appProfileId1262094415' - instance = self._make_one(self.INSTANCE_ID, None) - - table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id) - self.assertIsInstance(table, Table) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertEqual(table._instance, instance) - self.assertEqual(table._app_profile_id, app_profile_id) - - def test_cluster_factory(self): - from google.cloud.bigtable import enums - - CLUSTER_ID = '{}-cluster'.format(self.INSTANCE_ID) - LOCATION_ID = 'us-central1-c' - SERVE_NODES = 3 - STORAGE_TYPE = enums.StorageType.HDD - - instance = self._make_one(self.INSTANCE_ID, None) - - cluster = instance.cluster(CLUSTER_ID, location_id=LOCATION_ID, - serve_nodes=SERVE_NODES, - default_storage_type=STORAGE_TYPE) - self.assertIsInstance(cluster, Cluster) - self.assertEqual(cluster.cluster_id, CLUSTER_ID) - self.assertEqual(cluster.location_id, LOCATION_ID) - self.assertIsNone(cluster._state) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) - - def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable.instance import Cluster - - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = Instance(self.INSTANCE_ID, client) - - failed_location = 'FAILED' - cluster_id1 = 'cluster-id1' - cluster_id2 = 'cluster-id2' - cluster_name1 = (client.instance_admin_client.cluster_path( - self.PROJECT, self.INSTANCE_ID, cluster_id1)) - cluster_name2 = (client.instance_admin_client.cluster_path( - self.PROJECT, self.INSTANCE_ID, cluster_id2)) - - # Create response_pb - response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[ - failed_location - ], - clusters=[ - data_v2_pb2.Cluster( - name=cluster_name1, - ), - data_v2_pb2.Cluster( - name=cluster_name2, - ), - ], - ) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.list_clusters.side_effect = [response_pb] - - # Perform the method and check the result. - clusters, failed_locations = instance.list_clusters() - - cluster_1, cluster_2 = clusters - - self.assertIsInstance(cluster_1, Cluster) - self.assertEqual(cluster_1.name, cluster_name1) - - self.assertIsInstance(cluster_2, Cluster) - self.assertEqual(cluster_2.name, cluster_name2) - - self.assertEqual(failed_locations, [failed_location]) - def test__update_from_pb_success(self): from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) @@ -264,8 +145,9 @@ def test_from_pb_success(self): instance_pb2 as data_v2_pb2) from google.cloud.bigtable import enums - client = _Client(project=self.PROJECT) - + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True) instance_type = enums.Instance.Type.PRODUCTION state = enums.Instance.State.READY instance_pb = data_v2_pb2.Instance( @@ -302,7 +184,9 @@ def test_from_pb_project_mistmatch(self): instance_pb2 as data_v2_pb2) ALT_PROJECT = 'ALT_PROJECT' - client = _Client(project=ALT_PROJECT) + credentials = _make_credentials() + client = self._make_client( + project=ALT_PROJECT, credentials=credentials, admin=True) self.assertNotEqual(self.PROJECT, ALT_PROJECT) @@ -352,47 +236,172 @@ def test___ne__(self): instance2 = self._make_one('instance_id2', 'client2') self.assertNotEqual(instance1, instance2) - def test_reload(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + def test_create_check_location_and_clusters(self): + instance = self._make_one(self.INSTANCE_ID, None) + + with self.assertRaises(ValueError): + instance.create( + location_id=self.LOCATION_ID, clusters=[object(), object()]) + + def test_create_check_serve_nodes_and_clusters(self): + instance = self._make_one(self.INSTANCE_ID, None) + + with self.assertRaises(ValueError): + instance.create( + serve_nodes=3, clusters=[object(), object()]) + + def test_create_check_default_storage_type_and_clusters(self): + instance = self._make_one(self.INSTANCE_ID, None) + + with self.assertRaises(ValueError): + instance.create( + default_storage_type=1, clusters=[object(), object()]) + + def _instance_api_response_for_create(self): + import datetime + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.types import instance_pb2 + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + type_url = 'type.googleapis.com/{}'.format( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any( + type_url=type_url, + value=metadata.SerializeToString(), + ) + ) + response = operation.from_gapic( + response_pb, + mock.Mock(), + instance_pb2.Instance, + metadata_type=messages_v2_pb2.CreateInstanceMetadata, + ) + project_path_template = 'projects/{}' + location_path_template = 'projects/{}/locations/{}' + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) + instance_api.create_instance.return_value = response + instance_api.project_path = project_path_template.format + instance_api.location_path = location_path_template.format + return instance_api, response + + def test_create(self): from google.cloud.bigtable import enums + from google.cloud.bigtable_admin_v2.types import instance_pb2 - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True) + instance = self._make_one( + self.INSTANCE_ID, + client, + self.DISPLAY_NAME, + enums.Instance.Type.PRODUCTION, + self.LABELS, + ) + instance_api, response = self._instance_api_response_for_create() + client._instance_admin_client = instance_api + serve_nodes = 3 - # Create response_pb - DISPLAY_NAME = u'hey-hi-hello' - instance_type = enums.Instance.Type.PRODUCTION - response_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, - type=instance_type, - labels=self.LABELS + result = instance.create( + location_id=self.LOCATION_ID, + serve_nodes=serve_nodes, ) - # Patch the stub used by the API method. - client._instance_admin_client = api - bigtable_instance_stub = ( - client._instance_admin_client.transport) - bigtable_instance_stub.get_instance.side_effect = [response_pb] + cluster_pb = instance_pb2.Cluster( + location=instance_api.location_path( + self.PROJECT, self.LOCATION_ID), + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.UNSPECIFIED, + ) + instance_pb = instance_pb2.Instance( + display_name=self.DISPLAY_NAME, + type=enums.Instance.Type.PRODUCTION, + labels=self.LABELS, + ) + cluster_id = '{}-cluster'.format(self.INSTANCE_ID) + instance_api.create_instance.assert_called_once_with( + parent=instance_api.project_path(self.PROJECT), + instance_id=self.INSTANCE_ID, + instance=instance_pb, + clusters={cluster_id: cluster_pb}, + ) - # Create expected_result. - expected_result = None # reload() has no return value. + self.assertIs(result, response) - # Check Instance optional config values before. - self.assertEqual(instance.display_name, self.INSTANCE_ID) + def test_create_w_clusters(self): + from google.cloud.bigtable import enums + from google.cloud.bigtable_admin_v2.types import instance_pb2 + + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True) + instance = self._make_one( + self.INSTANCE_ID, + client, + self.DISPLAY_NAME, + enums.Instance.Type.PRODUCTION, + self.LABELS, + ) + instance_api, response = self._instance_api_response_for_create() + client._instance_admin_client = instance_api # Perform the method and check the result. - result = instance.reload() - self.assertEqual(result, expected_result) + cluster_id_1 = 'cluster-1' + cluster_id_2 = 'cluster-2' + location_id_1 = 'location-id-1' + location_id_2 = 'location-id-2' + serve_nodes_1 = 3 + serve_nodes_2 = 5 + clusters = [ + Cluster(cluster_id_1, instance, + location_id=location_id_1, + serve_nodes=serve_nodes_1), + Cluster(cluster_id_2, instance, + location_id=location_id_2, + serve_nodes=serve_nodes_2)] - # Check Instance optional config values before. - self.assertEqual(instance.display_name, DISPLAY_NAME) + result = instance.create(clusters=clusters) + + cluster_pb_1 = instance_pb2.Cluster( + location=instance_api.location_path( + self.PROJECT, location_id_1), + serve_nodes=serve_nodes_1, + default_storage_type=enums.StorageType.UNSPECIFIED, + ) + cluster_pb_2 = instance_pb2.Cluster( + location=instance_api.location_path( + self.PROJECT, location_id_2), + serve_nodes=serve_nodes_2, + default_storage_type=enums.StorageType.UNSPECIFIED, + ) + instance_pb = instance_pb2.Instance( + display_name=self.DISPLAY_NAME, + type=enums.Instance.Type.PRODUCTION, + labels=self.LABELS, + ) + instance_api.create_instance.assert_called_once_with( + parent=instance_api.project_path(self.PROJECT), + instance_id=self.INSTANCE_ID, + instance=instance_pb, + clusters={ + cluster_id_1: cluster_pb_1, + cluster_id_2: cluster_pb_2, + }, + ) + + self.assertIs(result, response) def test_exists(self): from google.cloud.bigtable_admin_v2.gapic import ( @@ -429,113 +438,69 @@ def test_exists(self): alt_instance_2 = self._make_one(non_existing_instance_id, client) self.assertTrue(alt_instance_1.exists()) self.assertFalse(alt_instance_2.exists()) + with self.assertRaises(exceptions.BadRequest): alt_instance_2.exists() - def test_create_check_conflicts(self): - instance = self._make_one(self.INSTANCE_ID, None) - with self.assertRaises(ValueError): - instance.create(location_id=self.LOCATION_ID, - clusters=[object(), object()]) - with self.assertRaises(ValueError): - instance.create(serve_nodes=3, - clusters=[object(), object()]) - with self.assertRaises(ValueError): - instance.create(default_storage_type=1, - clusters=[object(), object()]) - - def test_create(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any + def test_reload(self): from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable import enums + instance_pb2 as data_v2_pb2) from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable import enums - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock()) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, - self.DISPLAY_NAME, - enums.Instance.Type.PRODUCTION, - self.LABELS) + instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb - metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) + DISPLAY_NAME = u'hey-hi-hello' + instance_type = enums.Instance.Type.PRODUCTION + response_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, + type=instance_type, + labels=self.LABELS ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - client._instance_admin_client = instance_api + client._instance_admin_client = api + bigtable_instance_stub = ( + client._instance_admin_client.transport) + bigtable_instance_stub.get_instance.side_effect = [response_pb] - # Perform the method and check the result. - serve_nodes = 3 - cluster_id = '{}-cluster'.format(self.INSTANCE_ID) - # cluster = instance.cluster(cluster_id, location_id=self.LOCATION_ID, - # serve_nodes=serve_nodes) - # result = instance.create(clusters=[cluster]) - - # TODO: replace this example with above once the otpion is removed - # from instance.create() method - result = instance.create(location_id=self.LOCATION_ID, - serve_nodes=serve_nodes) + # Create expected_result. + expected_result = None # reload() has no return value. - actual_request = channel.requests[0][1] + # Check Instance optional config values before. + self.assertEqual(instance.display_name, self.INSTANCE_ID) - cluster = self._create_cluster_pb( - instance_api, cluster_id, self.LOCATION_ID, serve_nodes, - enums.StorageType.UNSPECIFIED) + # Perform the method and check the result. + result = instance.reload() + self.assertEqual(result, expected_result) - expected_request = self._create_instance_request({cluster_id: cluster}) - self.assertEqual(expected_request, actual_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.CreateInstanceMetadata) + # Check Instance optional config values before. + self.assertEqual(instance.display_name, DISPLAY_NAME) - def test_create_w_clusters(self): + def _instance_api_response_for_update(self): import datetime from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.types import instance_pb2 NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client, - self.DISPLAY_NAME, - enums.Instance.Type.PRODUCTION, - self.LABELS) - - # Create response_pb - metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name) + messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name) response_pb = operations_pb2.Operation( name=self.OP_NAME, metadata=Any( @@ -543,202 +508,310 @@ def test_create_w_clusters(self): value=metadata.SerializeToString(), ) ) + response = operation.from_gapic( + response_pb, + mock.Mock(), + instance_pb2.Instance, + metadata_type=messages_v2_pb2.UpdateInstanceMetadata, + ) + instance_path_template = 'projects/{project}/instances/{instance}' + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) + instance_api.partial_update_instance.return_value = response + instance_api.instance_path = instance_path_template.format + return instance_api, response - # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - client._instance_admin_client = instance_api + def test_update(self): + from google.cloud.bigtable import enums + from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2.types import instance_pb2 - # Perform the method and check the result. - cluster_id_1 = 'cluster-1' - cluster_id_2 = 'cluster-2' - location_id_1 = 'location-id-1' - location_id_2 = 'location-id-2' - serve_nodes_1 = 3 - serve_nodes_2 = 5 - clusters = [ - Cluster(cluster_id_1, instance, - location_id=location_id_1, - serve_nodes=serve_nodes_1), - Cluster(cluster_id_2, instance, - location_id=location_id_2, - serve_nodes=serve_nodes_2)] - result = instance.create(clusters=clusters) - actual_request = channel.requests[0][1] + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True) + instance = self._make_one( + self.INSTANCE_ID, + client, + display_name=self.DISPLAY_NAME, + instance_type=enums.Instance.Type.DEVELOPMENT, + labels=self.LABELS, + ) + instance_api, response = self._instance_api_response_for_update() + client._instance_admin_client = instance_api - cluster_1_pb = self._create_cluster_pb( - instance_api, cluster_id_1, location_id_1, serve_nodes_1, - enums.StorageType.UNSPECIFIED) + result = instance.update() - cluster_2_pb = self._create_cluster_pb( - instance_api, cluster_id_2, location_id_2, serve_nodes_2, - enums.StorageType.UNSPECIFIED) + instance_pb = instance_pb2.Instance( + name=instance.name, + display_name=instance.display_name, + type=instance.type_, + labels=instance.labels, + ) + update_mask_pb = field_mask_pb2.FieldMask( + paths=['display_name', 'type', 'labels']) - expected_request = self._create_instance_request( - {cluster_id_1: cluster_1_pb, - cluster_id_2: cluster_2_pb} + instance_api.partial_update_instance.assert_called_once_with( + instance=instance_pb, + update_mask=update_mask_pb, ) - self.assertEqual(expected_request, actual_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.CreateInstanceMetadata) - - def _create_cluster_pb(self, instance_api, cluster_id, location_id, - serve_nodes, storage_type): - from google.cloud.bigtable_admin_v2.types import instance_pb2 - location = instance_api.location_path( - self.PROJECT, location_id) - return instance_pb2.Cluster( - location=location, - serve_nodes=serve_nodes, - default_storage_type=storage_type) + self.assertIs(result, response) - def _create_instance_request(self, clusters): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + def test_update_empty(self): + from google.protobuf import field_mask_pb2 from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable import enums - instance = instance_pb2.Instance(display_name=self.DISPLAY_NAME, - type=enums.Instance.Type.PRODUCTION, - labels=self.LABELS) + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True) + instance = self._make_one(None, client) + instance_api, response = self._instance_api_response_for_update() + client._instance_admin_client = instance_api - return messages_v2_pb2.CreateInstanceRequest( - parent='projects/{}'.format(self.PROJECT), - instance_id=self.INSTANCE_ID, - instance=instance, - clusters=clusters + result = instance.update() + + instance_pb = instance_pb2.Instance( + name=instance.name, + display_name=instance.display_name, + type=instance.type_, + labels=instance.labels, ) + update_mask_pb = field_mask_pb2.FieldMask() - def test_update(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable import enums + instance_api.partial_update_instance.assert_called_once_with( + instance=instance_pb, + update_mask=update_mask_pb, + ) + + self.assertIs(result, response) + + def test_delete(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2) - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one( - self.INSTANCE_ID, client, display_name=self.DISPLAY_NAME, - instance_type=enums.Instance.Type.DEVELOPMENT, labels=self.LABELS) + instance = self._make_one(self.INSTANCE_ID, client) + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) + instance_api.delete_instance.return_value = None + client._instance_admin_client = instance_api - expected_request_instance = instance_pb2.Instance( - name=instance.name, display_name=instance.display_name, - type=instance.type_, labels=instance.labels) - expected_request_update_mask = field_mask_pb2.FieldMask( - paths=['display_name', 'type', 'labels']) - expected_request = instance_v2_pb2.PartialUpdateInstanceRequest( - instance=expected_request_instance, - update_mask=expected_request_update_mask) + result = instance.delete() - metadata = messages_v2_pb2.UpdateInstanceMetadata( - request_time=NOW_PB) - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) - ) + instance_api.delete_instance.assert_called_once_with( + instance.name) - channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) + self.assertIsNone(result) - # Mock api calls + def test_get_iam_policy(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = self._make_one(self.INSTANCE_ID, client) + + version = 1 + etag = b'etag_v1' + members = [ + 'serviceAccount:service_acc1@test.com', + 'user:user1@test.com', + ] + bindings = [{'role': BIGTABLE_ADMIN_ROLE, 'members': members}] + iam_policy = policy_pb2.Policy( + version=version, etag=etag, bindings=bindings) + + # Patch the stub used by the API method. + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) client._instance_admin_client = instance_api + instance_api.get_iam_policy.return_value = iam_policy # Perform the method and check the result. - result = instance.update() - actual_request = channel.requests[0][1] + result = instance.get_iam_policy() - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.UpdateInstanceMetadata) + instance_api.get_iam_policy.assert_called_once_with( + resource=instance.name) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) - def test_update_empty(self): - from google.api_core import operation + def test_set_iam_policy(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - from google.longrunning import operations_pb2 - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2) + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) - instance = self._make_one(None, client) - - expected_request_instance = instance_pb2.Instance( - name=instance.name, display_name=instance.display_name, - type=instance.type_, labels=instance.labels) - expected_request_update_mask = field_mask_pb2.FieldMask() - expected_request = instance_v2_pb2.PartialUpdateInstanceRequest( - instance=expected_request_instance, - update_mask=expected_request_update_mask) - - response_pb = operations_pb2.Operation(name=self.OP_NAME) + instance = self._make_one(self.INSTANCE_ID, client) - channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) + version = 1 + etag = b'etag_v1' + members = [ + 'serviceAccount:service_acc1@test.com', + 'user:user1@test.com', + ] + bindings = [{'role': BIGTABLE_ADMIN_ROLE, 'members': members}] + iam_policy_pb = policy_pb2.Policy( + version=version, etag=etag, bindings=bindings) - # Mock api calls + # Patch the stub used by the API method. + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) + instance_api.set_iam_policy.return_value = iam_policy_pb client._instance_admin_client = instance_api # Perform the method and check the result. - result = instance.update() - actual_request = channel.requests[0][1] + iam_policy = Policy(etag=etag, version=version) + iam_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("user1@test.com"), + Policy.service_account("service_acc1@test.com"), + ] - self.assertIsInstance(result, operation.Operation) - self.assertEqual(actual_request, expected_request) + result = instance.set_iam_policy(iam_policy) - def test_delete(self): + instance_api.set_iam_policy.assert_called_once_with( + resource=instance.name, + policy={ + 'version': version, + 'etag': etag, + 'bindings': bindings, + }, + ) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + + def test_test_iam_permissions(self): from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) + from google.iam.v1 import iam_policy_pb2 - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, credentials=credentials, admin=True) instance = self._make_one(self.INSTANCE_ID, client) - # Mock api calls - client._instance_admin_client = api + permissions = ["bigtable.tables.create", "bigtable.clusters.create"] - # Create expected_result. - expected_result = None # delete() has no return value. + response = iam_policy_pb2.TestIamPermissionsResponse( + permissions=permissions) + + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) + instance_api.test_iam_permissions.return_value = response + client._instance_admin_client = instance_api + + result = instance.test_iam_permissions(permissions) + + self.assertEqual(result, permissions) + instance_api.test_iam_permissions.assert_called_once_with( + resource=instance.name, permissions=permissions) + + def test_cluster_factory(self): + from google.cloud.bigtable import enums + + CLUSTER_ID = '{}-cluster'.format(self.INSTANCE_ID) + LOCATION_ID = 'us-central1-c' + SERVE_NODES = 3 + STORAGE_TYPE = enums.StorageType.HDD + + instance = self._make_one(self.INSTANCE_ID, None) + + cluster = instance.cluster(CLUSTER_ID, location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE) + self.assertIsInstance(cluster, Cluster) + self.assertEqual(cluster.cluster_id, CLUSTER_ID) + self.assertEqual(cluster.location_id, LOCATION_ID) + self.assertIsNone(cluster._state) + self.assertEqual(cluster.serve_nodes, SERVE_NODES) + self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) + + def test_list_clusters(self): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as data_v2_pb2) + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.instance import Cluster + + credentials = _make_credentials() + client = self._make_client(project=self.PROJECT, + credentials=credentials, admin=True) + instance = Instance(self.INSTANCE_ID, client) + + failed_location = 'FAILED' + cluster_id1 = 'cluster-id1' + cluster_id2 = 'cluster-id2' + cluster_path_template = 'projects/{}/instances/{}/clusters/{}' + cluster_name1 = cluster_path_template.format( + self.PROJECT, self.INSTANCE_ID, cluster_id1) + cluster_name2 = cluster_path_template.format( + self.PROJECT, self.INSTANCE_ID, cluster_id2) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[ + failed_location + ], + clusters=[ + data_v2_pb2.Cluster( + name=cluster_name1, + ), + data_v2_pb2.Cluster( + name=cluster_name2, + ), + ], + ) + + # Patch the stub used by the API method. + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) + instance_api.list_clusters.side_effect = [response_pb] + instance_api.cluster_path = cluster_path_template.format + client._instance_admin_client = instance_api # Perform the method and check the result. - result = instance.delete() + clusters, failed_locations = instance.list_clusters() - self.assertEqual(result, expected_result) + cluster_1, cluster_2 = clusters + + self.assertIsInstance(cluster_1, Cluster) + self.assertEqual(cluster_1.name, cluster_name1) + + self.assertIsInstance(cluster_2, Cluster) + self.assertEqual(cluster_2.name, cluster_name2) + + self.assertEqual(failed_locations, [failed_location]) + + def test_table_factory(self): + from google.cloud.bigtable.table import Table + + app_profile_id = 'appProfileId1262094415' + instance = self._make_one(self.INSTANCE_ID, None) + + table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id) + self.assertIsInstance(table, Table) + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertEqual(table._instance, instance) + self.assertEqual(table._app_profile_id, app_profile_id) def _list_tables_helper(self, table_name=None): from google.cloud.bigtable_admin_v2.proto import ( @@ -838,17 +911,24 @@ def test_app_profile_factory(self): self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) def test_list_app_profiles(self): + from google.api_core.page_iterator import Iterator + from google.api_core.page_iterator import Page from google.cloud.bigtable_admin_v2.gapic import ( bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) from google.cloud.bigtable_admin_v2.proto import ( instance_pb2 as data_v2_pb2) from google.cloud.bigtable.app_profile import AppProfile - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + class _Iterator(Iterator): + + def __init__(self, pages): + super(_Iterator, self).__init__(client=None) + self._pages = pages + + def _next_page(self): + if self._pages: + page, self._pages = self._pages[0], self._pages[1:] + return Page(self, page, self.item_to_value) credentials = _make_credentials() client = self._make_client(project=self.PROJECT, @@ -856,35 +936,33 @@ def test_list_app_profiles(self): instance = self._make_one(self.INSTANCE_ID, client) # Setup Expected Response - next_page_token = '' + app_profile_path_template = 'projects/{}/instances/{}/appProfiles/{}' app_profile_id1 = 'app-profile-id1' app_profile_id2 = 'app-profile-id2' - app_profile_name1 = (client.instance_admin_client.app_profile_path( - self.PROJECT, self.INSTANCE_ID, app_profile_id1)) - app_profile_name2 = (client.instance_admin_client.app_profile_path( - self.PROJECT, self.INSTANCE_ID, app_profile_id2)) + app_profile_name1 = app_profile_path_template.format( + self.PROJECT, self.INSTANCE_ID, app_profile_id1) + app_profile_name2 = app_profile_path_template.format( + self.PROJECT, self.INSTANCE_ID, app_profile_id2) routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() - expected_response = messages_v2_pb2.ListAppProfilesResponse( - next_page_token=next_page_token, - app_profiles=[ - data_v2_pb2.AppProfile( - name=app_profile_name1, - multi_cluster_routing_use_any=routing_policy, - ), - data_v2_pb2.AppProfile( - name=app_profile_name2, - multi_cluster_routing_use_any=routing_policy, - ) - ], - ) + app_profiles = [ + data_v2_pb2.AppProfile( + name=app_profile_name1, + multi_cluster_routing_use_any=routing_policy, + ), + data_v2_pb2.AppProfile( + name=app_profile_name2, + multi_cluster_routing_use_any=routing_policy, + ) + ] + iterator = _Iterator(pages=[app_profiles]) # Patch the stub used by the API method. + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient) client._instance_admin_client = instance_api - bigtable_instance_stub = ( - client._instance_admin_client.transport) - bigtable_instance_stub.list_app_profiles.side_effect = [ - expected_response] + instance_api.app_profile_path = app_profile_path_template.format + instance_api.list_app_profiles.return_value = iterator # Perform the method and check the result. app_profiles = instance.list_app_profiles() @@ -896,136 +974,3 @@ def test_list_app_profiles(self): self.assertIsInstance(app_profile_2, AppProfile) self.assertEqual(app_profile_2.name, app_profile_name2) - - def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.iam.v1 import iam_policy_pb2 - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b'etag_v1' - bindings = [{'role': BIGTABLE_ADMIN_ROLE, - 'members': ['serviceAccount:service_acc1@test.com', - 'user:user1@test.com']}] - - expected_request_policy = policy_pb2.Policy(version=version, - etag=etag, - bindings=bindings) - - expected_request = iam_policy_pb2.GetIamPolicyRequest( - resource=instance.name - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_policy]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - client._instance_admin_client = instance_api - # Perform the method and check the result. - policy_request = Policy(etag=etag, version=version) - policy_request[BIGTABLE_ADMIN_ROLE] = [Policy.user("user1@test.com"), - Policy.service_account( - "service_acc1@test.com")] - - result = instance.get_iam_policy() - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertEqual(result.bigtable_admins, - policy_request.bigtable_admins) - - def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.iam.v1 import iam_policy_pb2 - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b'etag_v1' - bindings = [{'role': BIGTABLE_ADMIN_ROLE, - 'members': ['serviceAccount:service_acc1@test.com', - 'user:user1@test.com']}] - - expected_request_policy = policy_pb2.Policy(version=version, - etag=etag, - bindings=bindings) - - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=instance.name, - policy=expected_request_policy - ) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_policy]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - client._instance_admin_client = instance_api - # Perform the method and check the result. - policy_request = Policy(etag=etag, version=version) - policy_request[BIGTABLE_ADMIN_ROLE] = [Policy.user("user1@test.com"), - Policy.service_account( - "service_acc1@test.com")] - - result = instance.set_iam_policy(policy_request) - actual_request = channel.requests[0][1] - - self.assertEqual(actual_request, expected_request) - self.assertEqual(result.bigtable_admins, - policy_request.bigtable_admins) - - def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.iam.v1 import iam_policy_pb2 - - credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) - instance = self._make_one(self.INSTANCE_ID, client) - - permissions = ["bigtable.tables.create", "bigtable.clusters.create"] - - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=instance.name, - permissions=permissions) - - # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) - client._instance_admin_client = instance_api - - result = instance.test_iam_permissions(permissions) - actual_request = channel.requests[0][1] - self.assertEqual(actual_request, expected_request) - self.assertEqual(result, permissions) - - -class _Client(object): - - def __init__(self, project): - self.project = project - self.project_name = 'projects/' + self.project - self._operations_stub = mock.sentinel.operations_stub - - def __eq__(self, other): - return (other.project == self.project and - other.project_name == self.project_name) From ee3acf2ac81e07d1feef2956cf7bcf2f5723fc7c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 1 Nov 2018 16:29:46 -0400 Subject: [PATCH 198/892] Add explicit coverage for 'row_data._retry_read_rows_exception'. (#6364) Fixes broken coverage on 'master'. --- .../tests/unit/test_row_data.py | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 5044dd7c5942..b5a3a797f634 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -320,6 +320,71 @@ class _Client(object): data_stub = None +class Test_retry_read_rows_exception(unittest.TestCase): + + @staticmethod + def _call_fut(exc): + from google.cloud.bigtable.row_data import _retry_read_rows_exception + + return _retry_read_rows_exception(exc) + + @staticmethod + def _make_grpc_call_error(exception): + from grpc import Call + from grpc import RpcError + + class TestingException(Call, RpcError): + def __init__(self, exception): + self.exception = exception + + def code(self): + return self.exception.grpc_status_code + + def details(self): + return 'Testing' + + return TestingException(exception) + + def test_w_miss(self): + from google.api_core.exceptions import Conflict + + exception = Conflict('testing') + self.assertFalse(self._call_fut(exception)) + + def test_w_service_unavailable(self): + from google.api_core.exceptions import ServiceUnavailable + + exception = ServiceUnavailable('testing') + self.assertTrue(self._call_fut(exception)) + + def test_w_deadline_exceeded(self): + from google.api_core.exceptions import DeadlineExceeded + + exception = DeadlineExceeded('testing') + self.assertTrue(self._call_fut(exception)) + + def test_w_miss_wrapped_in_grpc(self): + from google.api_core.exceptions import Conflict + + wrapped = Conflict('testing') + exception = self._make_grpc_call_error(wrapped) + self.assertFalse(self._call_fut(exception)) + + def test_w_service_unavailable_wrapped_in_grpc(self): + from google.api_core.exceptions import ServiceUnavailable + + wrapped = ServiceUnavailable('testing') + exception = self._make_grpc_call_error(wrapped) + self.assertTrue(self._call_fut(exception)) + + def test_w_deadline_exceeded_wrapped_in_grpc(self): + from google.api_core.exceptions import DeadlineExceeded + + wrapped = DeadlineExceeded('testing') + exception = self._make_grpc_call_error(wrapped) + self.assertTrue(self._call_fut(exception)) + + class TestPartialRowsData(unittest.TestCase): ROW_KEY = b'row-key' FAMILY_NAME = u'family' From a79added36bb243fc33c20589af8b1e784ec7b44 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Thu, 1 Nov 2018 14:54:55 -0700 Subject: [PATCH 199/892] Update IAM version in dependencies (#6362) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 9b93abfb40e8..83d7cad45bf2 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -31,7 +31,7 @@ dependencies = [ 'google-cloud-core<0.29dev,>=0.28.0', 'google-api-core[grpc]<2.0.0dev,>=0.1.1', - 'grpc-google-iam-v1<0.12dev,>=0.11.4' + 'grpc-google-iam-v1<0.12dev,>=0.11.4', ] extras = { } From 3219134ddba0b1984f1eb807d06cde2eb1675e33 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 1 Nov 2018 18:06:57 -0400 Subject: [PATCH 200/892] Bigtable: fix flaky systests / snippets (#6367) * Harden table existence checks for eventual consistency. Closes #6245. * Snippet: require fewer serve nodes when updating cluster. Avoid errors due to quota with simultaneous CI runs. Closes #6366. * Avoid use of 'is' keyword in asserts where identity is not promised. --- .../google-cloud-bigtable/docs/snippets.py | 24 +++++++++---------- .../google-cloud-bigtable/tests/system.py | 7 ++++-- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index b829555f5212..a75d89bc3ffb 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -167,7 +167,7 @@ def test_bigtable_list_instances(): client = Client(admin=True) (instances_list, failed_locations_list) = client.list_instances() # [END bigtable_list_instances] - assert len(instances_list) is not 0 + assert len(instances_list) > 0 def test_bigtable_list_clusters_on_instance(): @@ -178,7 +178,7 @@ def test_bigtable_list_clusters_on_instance(): instance = client.instance(INSTANCE_ID) (clusters_list, failed_locations_list) = instance.list_clusters() # [END bigtable_list_clusters_on_instance] - assert len(clusters_list) is not 0 + assert len(clusters_list) > 0 def test_bigtable_list_clusters_in_project(): @@ -188,7 +188,7 @@ def test_bigtable_list_clusters_in_project(): client = Client(admin=True) (clusters_list, failed_locations_list) = client.list_clusters() # [END bigtable_list_clusters_in_project] - assert len(clusters_list) is not 0 + assert len(clusters_list) > 0 def test_bigtable_list_app_profiles(): @@ -207,7 +207,7 @@ def test_bigtable_list_app_profiles(): # [START bigtable_list_app_profiles] app_profiles_list = instance.list_app_profiles() # [END bigtable_list_app_profiles] - assert len(app_profiles_list) is not 0 + assert len(app_profiles_list) > 0 def test_bigtable_instance_exists(): @@ -241,7 +241,7 @@ def test_bigtable_reload_instance(): instance = client.instance(INSTANCE_ID) instance.reload() # [END bigtable_reload_instance] - assert instance.type_ is PRODUCTION.value + assert instance.type_ == PRODUCTION.value def test_bigtable_reload_cluster(): @@ -253,7 +253,7 @@ def test_bigtable_reload_cluster(): cluster = instance.cluster(CLUSTER_ID) cluster.reload() # [END bigtable_reload_cluster] - assert cluster.serve_nodes is SERVER_NODES + assert cluster.serve_nodes == SERVER_NODES def test_bigtable_update_instance(): @@ -266,7 +266,7 @@ def test_bigtable_update_instance(): instance.display_name = display_name instance.update() # [END bigtable_update_instance] - assert instance.display_name is display_name + assert instance.display_name == display_name def test_bigtable_update_cluster(): @@ -276,10 +276,10 @@ def test_bigtable_update_cluster(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) cluster = instance.cluster(CLUSTER_ID) - cluster.serve_nodes = 8 + cluster.serve_nodes = 4 cluster.update() # [END bigtable_update_cluster] - assert cluster.serve_nodes is 8 + assert cluster.serve_nodes == 4 def test_bigtable_create_table(): @@ -305,7 +305,7 @@ def test_bigtable_list_tables(): instance = client.instance(INSTANCE_ID) tables_list = instance.list_tables() # [END bigtable_list_tables] - assert len(tables_list) is not 0 + assert len(tables_list) > 0 def test_bigtable_delete_cluster(): @@ -396,7 +396,7 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): policy_latest = instance.set_iam_policy(new_policy) # [END bigtable_set_iam_policy] - assert len(policy_latest.bigtable_admins) is not 0 + assert len(policy_latest.bigtable_admins) > 0 # [START bigtable_get_iam_policy] from google.cloud.bigtable import Client @@ -406,7 +406,7 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): policy = instance.get_iam_policy() # [END bigtable_get_iam_policy] - assert len(policy.bigtable_admins) is not 0 + assert len(policy.bigtable_admins) > 0 if __name__ == '__main__': diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 737dc3fa0d39..63d8200d1cbe 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -35,6 +35,7 @@ from google.cloud.bigtable.row_set import RowRange from test_utils.retry import RetryErrors +from test_utils.retry import RetryResult from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id @@ -595,13 +596,15 @@ def test_list_tables(self): self.assertEqual(tables, [self._table]) def test_exists(self): + retry_until_true = RetryResult(lambda result: result) + retry_until_false = RetryResult(lambda result: not result) temp_table_id = 'test-table_existence' temp_table = Config.INSTANCE.table(temp_table_id) self.assertFalse(temp_table.exists()) temp_table.create() - self.assertTrue(temp_table.exists()) + self.assertTrue(retry_until_true(temp_table.exists)()) temp_table.delete() - self.assertFalse(temp_table.exists()) + self.assertFalse(retry_until_false(temp_table.exists)()) def test_create_table(self): temp_table_id = 'test-create-table' From 76a7c0f7b8bd133f28cbad210ab5c396dc06a0ac Mon Sep 17 00:00:00 2001 From: DPE bot Date: Tue, 6 Nov 2018 08:02:14 -0800 Subject: [PATCH 201/892] Fix client_info bug, update docstrings and timeouts. (#6406) --- .../gapic/bigtable_instance_admin_client.py | 151 +++++++++--------- .../bigtable_instance_admin_client_config.py | 28 ++-- .../gapic/bigtable_table_admin_client.py | 114 +++++++------ .../bigtable_table_admin_client_config.py | 10 +- .../cloud/bigtable_admin_v2/gapic/enums.py | 23 ++- .../bigtable_v2/gapic/bigtable_client.py | 79 ++++----- 6 files changed, 205 insertions(+), 200 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 6e2e7f25063b..e243b9d32710 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -197,9 +197,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -234,13 +235,13 @@ def create_instance(self, >>> >>> parent = client.project_path('[PROJECT]') >>> - >>> # TODO: Initialize ``instance_id``: + >>> # TODO: Initialize `instance_id`: >>> instance_id = '' >>> - >>> # TODO: Initialize ``instance``: + >>> # TODO: Initialize `instance`: >>> instance = {} >>> - >>> # TODO: Initialize ``clusters``: + >>> # TODO: Initialize `clusters`: >>> clusters = {} >>> >>> response = client.create_instance(parent, instance_id, instance, clusters) @@ -260,15 +261,16 @@ def create_instance(self, instance_id (str): The ID to be used when referring to the new instance within its project, e.g., just ``myinstance`` rather than ``projects/myproject/instances/myinstance``. - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The instance to create. - Fields marked ``OutputOnly`` must be left blank. + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The instance to create. Fields marked ``OutputOnly`` must be left blank. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Instance` clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. - Currently, at most two clusters can be specified. + ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields + marked ``OutputOnly`` must be left blank. Currently, at most two + clusters can be specified. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -413,8 +415,8 @@ def list_instances(self, >>> response = client.list_instances(parent) Args: - parent (str): The unique name of the project for which a list of instances is requested. - Values are of the form ``projects/``. + parent (str): The unique name of the project for which a list of instances is + requested. Values are of the form ``projects/``. page_token (str): DEPRECATED: This field is unused and ignored. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -485,20 +487,19 @@ def update_instance(self, >>> >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``display_name``: + >>> # TODO: Initialize `display_name`: >>> display_name = '' >>> - >>> # TODO: Initialize ``type_``: + >>> # TODO: Initialize `type_`: >>> type_ = enums.Instance.Type.TYPE_UNSPECIFIED >>> - >>> # TODO: Initialize ``labels``: + >>> # TODO: Initialize `labels`: >>> labels = {} >>> >>> response = client.update_instance(name, display_name, type_, labels) Args: - name (str): (``OutputOnly``) - The unique name of the instance. Values are of the form + name (str): (``OutputOnly``) The unique name of the instance. Values are of the form ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. display_name (str): The descriptive name for this instance as it appears in UIs. Can be changed at any time, but should be kept globally unique @@ -506,17 +507,17 @@ def update_instance(self, type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a customer's organizational needs and - deployment strategies. They can be used to filter resources and aggregate - metrics. - - * Label keys must be between 1 and 63 characters long and must conform to - the regular expression: ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - * Label values must be between 0 and 63 characters long and must conform to - the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - * No more than 64 labels can be associated with a given resource. - * Keys and values must both be under 128 bytes. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) - The current state of the instance. + deployment strategies. They can be used to filter resources and + aggregate metrics. + + - Label keys must be between 1 and 63 characters long and must conform + to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and must + conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given resource. + - Keys and values must both be under 128 bytes. + state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -584,10 +585,10 @@ def partial_update_instance( >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> # TODO: Initialize ``instance``: + >>> # TODO: Initialize `instance`: >>> instance = {} >>> - >>> # TODO: Initialize ``update_mask``: + >>> # TODO: Initialize `update_mask`: >>> update_mask = {} >>> >>> response = client.partial_update_instance(instance, update_mask) @@ -603,10 +604,12 @@ def partial_update_instance( Args: instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The Instance which will (partially) replace the current value. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Instance` update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of Instance fields which should be replaced. Must be explicitly set. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -683,8 +686,8 @@ def delete_instance(self, >>> client.delete_instance(name) Args: - name (str): The unique name of the instance to be deleted. - Values are of the form ``projects//instances/``. + name (str): The unique name of the instance to be deleted. Values are of the form + ``projects//instances/``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -746,10 +749,10 @@ def create_cluster(self, >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``cluster_id``: + >>> # TODO: Initialize `cluster_id`: >>> cluster_id = '' >>> - >>> # TODO: Initialize ``cluster``: + >>> # TODO: Initialize `cluster`: >>> cluster = {} >>> >>> response = client.create_cluster(parent, cluster_id, cluster) @@ -765,13 +768,13 @@ def create_cluster(self, Args: parent (str): The unique name of the instance in which to create the new cluster. - Values are of the form - ``projects//instances/``. + Values are of the form ``projects//instances/``. cluster_id (str): The ID to be used when referring to the new cluster within its instance, e.g., just ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. - Fields marked ``OutputOnly`` must be left blank. + cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. Fields marked ``OutputOnly`` must be left + blank. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -914,10 +917,11 @@ def list_clusters(self, >>> response = client.list_clusters(parent) Args: - parent (str): The unique name of the instance for which a list of clusters is requested. - Values are of the form ``projects//instances/``. - Use `` = '-'`` to list Clusters for all Instances in a project, - e.g., ``projects/myproject/instances/-``. + parent (str): The unique name of the instance for which a list of clusters is + requested. Values are of the form + ``projects//instances/``. Use `` = '-'`` to + list Clusters for all Instances in a project, e.g., + ``projects/myproject/instances/-``. page_token (str): DEPRECATED: This field is unused and ignored. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -987,7 +991,7 @@ def update_cluster(self, >>> >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') >>> - >>> # TODO: Initialize ``serve_nodes``: + >>> # TODO: Initialize `serve_nodes`: >>> serve_nodes = 0 >>> >>> response = client.update_cluster(name, serve_nodes) @@ -1002,20 +1006,16 @@ def update_cluster(self, >>> metadata = response.metadata() Args: - name (str): (``OutputOnly``) - The unique name of the cluster. Values are of the form + name (str): (``OutputOnly``) The unique name of the cluster. Values are of the form ``projects//instances//clusters/[a-z][-a-z0-9]*``. serve_nodes (int): The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance. - location (str): (``CreationOnly``) - The location where this cluster's nodes and storage reside. For best - performance, clients should be located as close as possible to this - cluster. Currently only zones are supported, so values should be of the - form ``projects//locations/``. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) - The current state of the cluster. - default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) - The type of storage used by this cluster to serve its + location (str): (``CreationOnly``) The location where this cluster's nodes and storage + reside. For best performance, clients should be located as close as + possible to this cluster. Currently only zones are supported, so values + should be of the form ``projects//locations/``. + state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the cluster. + default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -1156,23 +1156,23 @@ def create_app_profile(self, >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``app_profile_id``: + >>> # TODO: Initialize `app_profile_id`: >>> app_profile_id = '' >>> - >>> # TODO: Initialize ``app_profile``: + >>> # TODO: Initialize `app_profile`: >>> app_profile = {} >>> >>> response = client.create_app_profile(parent, app_profile_id, app_profile) Args: parent (str): The unique name of the instance in which to create the new app profile. - Values are of the form - ``projects//instances/``. + Values are of the form ``projects//instances/``. app_profile_id (str): The ID to be used when referring to the new app profile within its instance, e.g., just ``myprofile`` rather than ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile to be created. - Fields marked ``OutputOnly`` will be ignored. + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile to be created. Fields marked ``OutputOnly`` will be + ignored. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` ignore_warnings (bool): If true, ignore safety checks when creating the app profile. @@ -1319,7 +1319,7 @@ def list_app_profiles(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_app_profiles(parent, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_app_profiles(parent).pages: ... for element in page: ... # process element ... pass @@ -1327,9 +1327,9 @@ def list_app_profiles(self, Args: parent (str): The unique name of the instance for which a list of app profiles is requested. Values are of the form - ``projects//instances/``. - Use `` = '-'`` to list AppProfiles for all Instances in a project, - e.g., ``projects/myproject/instances/-``. + ``projects//instances/``. Use `` = '-'`` to + list AppProfiles for all Instances in a project, e.g., + ``projects/myproject/instances/-``. page_size (int): Maximum number of results per page. CURRENTLY UNIMPLEMENTED AND IGNORED. retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -1411,10 +1411,10 @@ def update_app_profile(self, >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> # TODO: Initialize ``app_profile``: + >>> # TODO: Initialize `app_profile`: >>> app_profile = {} >>> - >>> # TODO: Initialize ``update_mask``: + >>> # TODO: Initialize `update_mask`: >>> update_mask = {} >>> >>> response = client.update_app_profile(app_profile, update_mask) @@ -1430,10 +1430,12 @@ def update_app_profile(self, Args: app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile which will (partially) replace the current value. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of app profile fields which should be replaced. If unset, all fields will be replaced. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` ignore_warnings (bool): If true, ignore safety checks when updating the app profile. @@ -1510,7 +1512,7 @@ def delete_app_profile(self, >>> >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') >>> - >>> # TODO: Initialize ``ignore_warnings``: + >>> # TODO: Initialize `ignore_warnings`: >>> ignore_warnings = False >>> >>> client.delete_app_profile(name, ignore_warnings) @@ -1651,7 +1653,7 @@ def set_iam_policy(self, >>> >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``policy``: + >>> # TODO: Initialize `policy`: >>> policy = {} >>> >>> response = client.set_iam_policy(resource, policy) @@ -1660,10 +1662,11 @@ def set_iam_policy(self, resource (str): REQUIRED: The resource for which the policy is being specified. ``resource`` is usually specified as a path. For example, a Project resource is specified as ``projects/{project}``. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The size of - the policy is limited to a few 10s of KB. An empty policy is a + policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The + size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Policy` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -1731,7 +1734,7 @@ def test_iam_permissions(self, >>> >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``permissions``: + >>> # TODO: Initialize `permissions`: >>> permissions = [] >>> >>> response = client.test_iam_permissions(resource, permissions) @@ -1742,8 +1745,8 @@ def test_iam_permissions(self, resource is specified as ``projects/{project}``. permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see - `IAM Overview `_. + information see `IAM + Overview `__. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py index 9107ed551462..b9e1fc6b385d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py @@ -23,12 +23,12 @@ "retry_params_name": "default" }, "GetInstance": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "ListInstances": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, @@ -43,7 +43,7 @@ "retry_params_name": "default" }, "DeleteInstance": { - "timeout_millis": 150000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, @@ -53,12 +53,12 @@ "retry_params_name": "default" }, "GetCluster": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "ListClusters": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, @@ -68,47 +68,47 @@ "retry_params_name": "default" }, "DeleteCluster": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "CreateAppProfile": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "GetAppProfile": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "ListAppProfiles": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "UpdateAppProfile": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "DeleteAppProfile": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "GetIamPolicy": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "SetIamPolicy": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "TestIamPermissions": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index f758608a88ac..98631b20ef92 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -198,9 +198,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -237,42 +238,42 @@ def create_table(self, >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``table_id``: + >>> # TODO: Initialize `table_id`: >>> table_id = '' >>> - >>> # TODO: Initialize ``table``: + >>> # TODO: Initialize `table`: >>> table = {} >>> >>> response = client.create_table(parent, table_id, table) Args: - parent (str): The unique name of the instance in which to create the table. - Values are of the form ``projects//instances/``. + parent (str): The unique name of the instance in which to create the table. Values are + of the form ``projects//instances/``. table_id (str): The name by which the new table should be referred to within the parent instance, e.g., ``foobar`` rather than ``/tables/foobar``. table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Table` initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the - table into several tablets (tablets are similar to HBase regions). - Given two split keys, ``s1`` and ``s2``, three tablets will be created, + table into several tablets (tablets are similar to HBase regions). Given + two split keys, ``s1`` and ``s2``, three tablets will be created, spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. Example: - * Row keys := ``[\"a\", \"apple\", \"custom\", \"customer_1\", \"customer_2\",`` - :: + - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial\_split\_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: - `\"other\", \"zz\"]` - * initial_split_keys := ``[\"apple\", \"customer_1\", \"customer_2\", \"other\"]`` - * Key assignment: - :: + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 ``[other, ) => {"other", "zz"}.`` - - Tablet 1 `[, apple) => {\"a\"}.` - - Tablet 2 `[apple, customer_1) => {\"apple\", \"custom\"}.` - - Tablet 3 `[customer_1, customer_2) => {\"customer_1\"}.` - - Tablet 4 `[customer_2, other) => {\"customer_2\"}.` - - Tablet 5 `[other, ) => {\"other\", \"zz\"}.` If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Split` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -351,10 +352,10 @@ def create_table_from_snapshot( >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``table_id``: + >>> # TODO: Initialize `table_id`: >>> table_id = '' >>> - >>> # TODO: Initialize ``source_snapshot``: + >>> # TODO: Initialize `source_snapshot`: >>> source_snapshot = '' >>> >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) @@ -369,13 +370,13 @@ def create_table_from_snapshot( >>> metadata = response.metadata() Args: - parent (str): The unique name of the instance in which to create the table. - Values are of the form ``projects//instances/``. + parent (str): The unique name of the instance in which to create the table. Values are + of the form ``projects//instances/``. table_id (str): The name by which the new table should be referred to within the parent instance, e.g., ``foobar`` rather than ``/tables/foobar``. source_snapshot (str): The unique name of the snapshot from which to restore the table. The - snapshot and the table must be in the same instance. - Values are of the form + snapshot and the table must be in the same instance. Values are of the + form ``projects//instances//clusters//snapshots/``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -461,7 +462,7 @@ def list_tables(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_tables(parent, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_tables(parent).pages: ... for element in page: ... # process element ... pass @@ -469,8 +470,8 @@ def list_tables(self, Args: parent (str): The unique name of the instance for which tables should be listed. Values are of the form ``projects//instances/``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. - Defaults to ``NAME_ONLY`` if unspecified; no others are currently supported. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Defaults to + ``NAME_ONLY`` if unspecified; no others are currently supported. page_size (int): Maximum number of results per page. CURRENTLY UNIMPLEMENTED AND IGNORED. retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -555,11 +556,10 @@ def get_table(self, >>> response = client.get_table(name) Args: - name (str): The unique name of the requested table. - Values are of the form + name (str): The unique name of the requested table. Values are of the form ``projects//instances//tables/
``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. - Defaults to ``SCHEMA_VIEW`` if unspecified. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to + ``SCHEMA_VIEW`` if unspecified. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -626,8 +626,7 @@ def delete_table(self, >>> client.delete_table(name) Args: - name (str): The unique name of the table to be deleted. - Values are of the form + name (str): The unique name of the table to be deleted. Values are of the form ``projects//instances//tables/
``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -691,19 +690,20 @@ def modify_column_families(self, >>> >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize ``modifications``: + >>> # TODO: Initialize `modifications`: >>> modifications = [] >>> >>> response = client.modify_column_families(name, modifications) Args: - name (str): The unique name of the table whose families should be modified. - Values are of the form + name (str): The unique name of the table whose families should be modified. Values + are of the form ``projects//instances//tables/
``. modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Modifications to be atomically applied to the specified table's families. Entries are applied in order, meaning that earlier modifications can be masked by later ones (in the case of repeated updates to the same family, for example). + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Modification` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -778,8 +778,8 @@ def drop_row_range(self, >>> client.drop_row_range(name) Args: - name (str): The unique name of the table on which to drop a range of rows. - Values are of the form + name (str): The unique name of the table on which to drop a range of rows. Values + are of the form ``projects//instances//tables/
``. row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be zero length. @@ -929,7 +929,7 @@ def check_consistency(self, >>> >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize ``consistency_token``: + >>> # TODO: Initialize `consistency_token`: >>> consistency_token = '' >>> >>> response = client.check_consistency(name, consistency_token) @@ -1015,13 +1015,13 @@ def snapshot_table(self, >>> >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize ``cluster``: + >>> # TODO: Initialize `cluster`: >>> cluster = '' >>> - >>> # TODO: Initialize ``snapshot_id``: + >>> # TODO: Initialize `snapshot_id`: >>> snapshot_id = '' >>> - >>> # TODO: Initialize ``description``: + >>> # TODO: Initialize `description`: >>> description = '' >>> >>> response = client.snapshot_table(name, cluster, snapshot_id, description) @@ -1036,21 +1036,21 @@ def snapshot_table(self, >>> metadata = response.metadata() Args: - name (str): The unique name of the table to have the snapshot taken. - Values are of the form - ``projects//instances//tables/
``. - cluster (str): The name of the cluster where the snapshot will be created in. - Values are of the form + name (str): The unique name of the table to have the snapshot taken. Values are of + the form ``projects//instances//tables/
``. + cluster (str): The name of the cluster where the snapshot will be created in. Values + are of the form ``projects//instances//clusters/``. snapshot_id (str): The ID by which the new snapshot should be referred to within the parent - cluster, e.g., ``mysnapshot`` of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` - rather than + cluster, e.g., ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects//instances//clusters//snapshots/mysnapshot``. description (str): Description of the snapshot. ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is created. Once 'ttl' expires, the snapshot will get deleted. The maximum amount of time a snapshot can stay active is 7 days. If 'ttl' is not specified, the default value of 24 hours will be used. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Duration` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -1135,8 +1135,7 @@ def get_snapshot(self, >>> response = client.get_snapshot(name) Args: - name (str): The unique name of the requested snapshot. - Values are of the form + name (str): The unique name of the requested snapshot. Values are of the form ``projects//instances//clusters//snapshots/``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not @@ -1215,7 +1214,7 @@ def list_snapshots(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_snapshots(parent, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_snapshots(parent).pages: ... for element in page: ... # process element ... pass @@ -1223,8 +1222,8 @@ def list_snapshots(self, Args: parent (str): The unique name of the cluster for which snapshots should be listed. Values are of the form - ``projects//instances//clusters/``. - Use `` = '-'`` to list snapshots for all clusters in an instance, + ``projects//instances//clusters/``. Use + `` = '-'`` to list snapshots for all clusters in an instance, e.g., ``projects//instances//clusters/-``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- @@ -1318,8 +1317,7 @@ def delete_snapshot(self, >>> client.delete_snapshot(name) Args: - name (str): The unique name of the snapshot to be deleted. - Values are of the form + name (str): The unique name of the snapshot to be deleted. Values are of the form ``projects//instances//clusters//snapshots/``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index 895c81a25c78..63e08c23aa72 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -18,7 +18,7 @@ }, "methods": { "CreateTable": { - "timeout_millis": 900000, + "timeout_millis": 130000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, @@ -28,22 +28,22 @@ "retry_params_name": "default" }, "ListTables": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "GetTable": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, "DeleteTable": { - "timeout_millis": 120000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "ModifyColumnFamilies": { - "timeout_millis": 900000, + "timeout_millis": 60000, "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 6081506d671a..a4a88bb17ba1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -54,8 +54,8 @@ class View(enum.IntEnum): VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. NAME_ONLY (int): Only populates ``name``. SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. - REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's - replication state. + REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's replication + state. FULL (int): Populates all fields. """ VIEW_UNSPECIFIED = 0 @@ -128,17 +128,16 @@ class Type(enum.IntEnum): Attributes: TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when updating - an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set - on the cluster. + instance, a ``PRODUCTION`` instance will be created. If set when + updating an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on the + cluster. DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. - After a development instance is created, it can be upgraded by - updating the instance to type ``PRODUCTION``. An instance created - as a production instance cannot be changed to a development instance. - When creating a development instance, ``serve_nodes`` on the cluster must - not be set. + no performance or uptime guarantees and is not covered by SLA. After a + development instance is created, it can be upgraded by updating the + instance to type ``PRODUCTION``. An instance created as a production + instance cannot be changed to a development instance. When creating a + development instance, ``serve_nodes`` on the cluster must not be set. """ TYPE_UNSPECIFIED = 0 PRODUCTION = 1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index c4149b0d214a..b84cc5f5f3df 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -147,9 +147,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -194,16 +195,17 @@ def read_rows(self, ... pass Args: - table_name (str): The unique name of the table from which to read. - Values are of the form + table_name (str): The unique name of the table from which to read. Values are of the form ``projects//instances//tables/
``. app_profile_id (str): This value specifies routing for replication. If not specified, the - \"default\" application profile will be used. + "default" application profile will be used. rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowSet` filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, reads the entirety of each row. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` rows_limit (long): The read will terminate after committing to N rows' worth of results. The @@ -283,11 +285,10 @@ def sample_row_keys(self, ... pass Args: - table_name (str): The unique name of the table from which to sample row keys. - Values are of the form - ``projects//instances//tables/
``. + table_name (str): The unique name of the table from which to sample row keys. Values are + of the form ``projects//instances//tables/
``. app_profile_id (str): This value specifies routing for replication. If not specified, the - \"default\" application profile will be used. + "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -356,10 +357,10 @@ def mutate_row(self, >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize ``row_key``: + >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> - >>> # TODO: Initialize ``mutations``: + >>> # TODO: Initialize `mutations`: >>> mutations = [] >>> >>> response = client.mutate_row(table_name, row_key, mutations) @@ -372,10 +373,11 @@ def mutate_row(self, mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry and at most 100000. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` app_profile_id (str): This value specifies routing for replication. If not specified, the - \"default\" application profile will be used. + "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -445,7 +447,7 @@ def mutate_rows(self, >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize ``entries``: + >>> # TODO: Initialize `entries`: >>> entries = [] >>> >>> for element in client.mutate_rows(table_name, entries): @@ -459,10 +461,11 @@ def mutate_rows(self, applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Entry` app_profile_id (str): This value specifies routing for replication. If not specified, the - \"default\" application profile will be used. + "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -532,37 +535,39 @@ def check_and_mutate_row(self, >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize ``row_key``: + >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> >>> response = client.check_and_mutate_row(table_name, row_key) Args: table_name (str): The unique name of the table to which the conditional mutation should be - applied. - Values are of the form + applied. Values are of the form ``projects//instances//tables/
``. row_key (bytes): The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the - \"default\" application profile will be used. + "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either ``true_mutations`` or - ``false_mutations`` will be executed. If unset, checks that the row contains - any values at all. + ``false_mutations`` will be executed. If unset, checks that the row + contains any values at all. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` - true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` - yields at least one cell when applied to ``row_key``. Entries are applied in - order, meaning that earlier mutations can be masked by later ones. - Must contain at least one entry if ``false_mutations`` is empty, and at most - 100000. + true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when applied to + ``row_key``. Entries are applied in order, meaning that earlier + mutations can be masked by later ones. Must contain at least one entry + if ``false_mutations`` is empty, and at most 100000. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` - false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if ``predicate_filter`` - does not yield any cells when applied to ``row_key``. Entries are applied in - order, meaning that earlier mutations can be masked by later ones. - Must contain at least one entry if ``true_mutations`` is empty, and at most - 100000. + false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when applied to + ``row_key``. Entries are applied in order, meaning that earlier + mutations can be masked by later ones. Must contain at least one entry + if ``true_mutations`` is empty, and at most 100000. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -641,27 +646,27 @@ def read_modify_write_row(self, >>> >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') >>> - >>> # TODO: Initialize ``row_key``: + >>> # TODO: Initialize `row_key`: >>> row_key = b'' >>> - >>> # TODO: Initialize ``rules``: + >>> # TODO: Initialize `rules`: >>> rules = [] >>> >>> response = client.read_modify_write_row(table_name, row_key, rules) Args: - table_name (str): The unique name of the table to which the read/modify/write rules should be - applied. - Values are of the form + table_name (str): The unique name of the table to which the read/modify/write rules should + be applied. Values are of the form ``projects//instances//tables/
``. row_key (bytes): The key of the row to which the read/modify/write rules should be applied. rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Rules specifying how the specified row's contents are to be transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later ones. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` app_profile_id (str): This value specifies routing for replication. If not specified, the - \"default\" application profile will be used. + "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. From fe45a555fbf5f479ba428252066cb9cc747cf06e Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 7 Nov 2018 11:50:54 -0500 Subject: [PATCH 202/892] Assorted synth fixups / cleanups (#6400) * Asset: Replace 'stable' -> 'latest' in docs links, caused by out-of-date synth. * AutoML: fix copying generated docs. * Bigtable: remove now-spurious fixup from 'synth.py'. * DLP: remove now-spurious fixups from 'synth.py'. * Monitoring: remove now-spurious fixups from 'synth.py'. * Redis: remove now-spurious fixups from 'synth.py'. * Speech: remove now-spurious fixups from 'synth.py'. --- packages/google-cloud-bigtable/synth.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 5e73c768a5b3..464b1aafdac5 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -50,10 +50,6 @@ "google/**/*.py", 'from google\.cloud\.bigtable\.admin_v2.proto', 'from google.cloud.bigtable_admin_v2.proto') -s.replace( - "tests/**/*.py", - 'from google\.cloud\.bigtable\.admin_v2.proto', - 'from google.cloud.bigtable_admin_v2.proto') s.replace( ['google/cloud/bigtable_admin_v2/gapic/transports/' From a63a809a3b49e2749711e4a56f0fff1490923bbf Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 7 Nov 2018 14:03:35 -0500 Subject: [PATCH 203/892] Bump minimum 'api_core' version for all GAPIC libs to 1.4.1. (#6391) Closes #6390. --- packages/google-cloud-bigtable/setup.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 83d7cad45bf2..892cd4dcc008 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,9 +29,9 @@ # 'Development Status :: 5 - Production/Stable' release_status = 'Development Status :: 4 - Beta' dependencies = [ - 'google-cloud-core<0.29dev,>=0.28.0', - 'google-api-core[grpc]<2.0.0dev,>=0.1.1', - 'grpc-google-iam-v1<0.12dev,>=0.11.4', + 'google-api-core[grpc] >= 1.4.1, < 2.0.0dev', + 'google-cloud-core >= 0.28.0, <0.29dev', + 'grpc-google-iam-v1 >= 0.11.4, < 0.12dev', ] extras = { } From 13e9c93668d0d48ebd7859592887582e8862be27 Mon Sep 17 00:00:00 2001 From: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Date: Sat, 10 Nov 2018 01:21:09 +0530 Subject: [PATCH 204/892] Fix anonymous usage when run with Bigtable emulator (#6385) Closes #6287. --- .../google/cloud/bigtable/client.py | 38 +++++++---- .../tests/unit/test_client.py | 66 +++++++++++++++++++ 2 files changed, 93 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 4ac61913ba4c..f6f8866426b0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -27,7 +27,9 @@ * a :class:`~google.cloud.bigtable.table.Table` owns a :class:`~google.cloud.bigtable.row.Row` (and all the cells in the row) """ +import os import warnings +import grpc from google.api_core.gapic_v1 import client_info @@ -42,6 +44,7 @@ from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE +from google.cloud.environment_vars import BIGTABLE_EMULATOR INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION @@ -58,6 +61,19 @@ """Scope for reading table data.""" +def _create_gapic_client(client_class): + + def inner(self): + if self._emulator_host is None: + return client_class( + credentials=self._credentials, client_info=_CLIENT_INFO) + else: + return client_class( + channel=self._emulator_channel, client_info=_CLIENT_INFO) + + return inner + + class Client(ClientWithProject): """Client for interacting with Google Cloud Bigtable API. @@ -109,6 +125,11 @@ def __init__(self, project=None, credentials=None, # It **may** use those scopes in ``with_scopes_if_required``. self._read_only = bool(read_only) self._admin = bool(admin) + self._emulator_host = os.getenv(BIGTABLE_EMULATOR) + self._emulator_channel = None + + if self._emulator_host is not None: + self._emulator_channel = grpc.insecure_channel(self._emulator_host) if channel is not None: warnings.warn( @@ -161,10 +182,8 @@ def table_data_client(self): :returns: A BigtableClient object. """ if self._table_data_client is None: - self._table_data_client = ( - bigtable_v2.BigtableClient(credentials=self._credentials, - client_info=_CLIENT_INFO)) - + self._table_data_client = _create_gapic_client( + bigtable_v2.BigtableClient)(self) return self._table_data_client @property @@ -180,10 +199,8 @@ def table_admin_client(self): if self._table_admin_client is None: if not self._admin: raise ValueError('Client is not an admin client.') - self._table_admin_client = ( - bigtable_admin_v2.BigtableTableAdminClient( - credentials=self._credentials, client_info=_CLIENT_INFO)) - + self._table_admin_client = _create_gapic_client( + bigtable_admin_v2.BigtableTableAdminClient)(self) return self._table_admin_client @property @@ -199,9 +216,8 @@ def instance_admin_client(self): if self._instance_admin_client is None: if not self._admin: raise ValueError('Client is not an admin client.') - self._instance_admin_client = ( - bigtable_admin_v2.BigtableInstanceAdminClient( - credentials=self._credentials, client_info=_CLIENT_INFO)) + self._instance_admin_client = _create_gapic_client( + bigtable_admin_v2.BigtableInstanceAdminClient)(self) return self._instance_admin_client def instance(self, instance_id, display_name=None, diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 343e4e556114..e45c0d44218e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -20,6 +20,53 @@ from ._testing import _make_credentials +class Test__create_gapic_client(unittest.TestCase): + + def _invoke_client_factory(self, client_class): + from google.cloud.bigtable.client import _create_gapic_client + + return _create_gapic_client(client_class) + + def test_without_emulator(self): + from google.cloud.bigtable.client import _CLIENT_INFO + + client_class = mock.Mock() + credentials = _make_credentials() + client = _Client(credentials) + + result = self._invoke_client_factory(client_class)(client) + + self.assertIs(result, client_class.return_value) + client_class.assert_called_once_with( + credentials=client._credentials, + client_info=_CLIENT_INFO) + + def test_with_emulator(self): + from google.cloud.bigtable.client import _CLIENT_INFO + + client_class = mock.Mock() + emulator_host = emulator_channel = object() + credentials = _make_credentials() + client = _Client(credentials, emulator_host=emulator_host, + emulator_channel=emulator_channel) + + result = self._invoke_client_factory(client_class)(client) + + self.assertIs(result, client_class.return_value) + client_class.assert_called_once_with( + channel=client._emulator_channel, + client_info=_CLIENT_INFO) + + +class _Client(object): + + def __init__(self, credentials, emulator_host=None, + emulator_channel=None): + self._credentials = credentials + self._emulator_host = emulator_host + self._emulator_channel = emulator_channel + + class TestClient(unittest.TestCase): PROJECT = 'PROJECT' @@ -51,6 +98,8 @@ def test_constructor_defaults(self): self.assertFalse(client._read_only) self.assertFalse(client._admin) self.assertIsNone(client._channel) + self.assertIsNone(client._emulator_host) + self.assertIsNone(client._emulator_channel) self.assertEqual(client.SCOPE, (DATA_SCOPE,)) def test_constructor_explicit(self): @@ -86,6 +135,23 @@ def test_constructor_both_admin_and_read_only(self): project=self.PROJECT, credentials=credentials, admin=True, read_only=True) + def test_constructor_with_emulator_host(self): + from google.cloud.environment_vars import BIGTABLE_EMULATOR + + credentials = _make_credentials() + emulator_host = "localhost:8081" + with mock.patch('os.getenv') as getenv: + getenv.return_value = emulator_host + with mock.patch('grpc.insecure_channel') as factory: + getenv.return_value = emulator_host + client = self._make_one( + project=self.PROJECT, credentials=credentials) + + self.assertEqual(client._emulator_host, emulator_host) + self.assertIs(client._emulator_channel, factory.return_value) + factory.assert_called_once_with(emulator_host) + getenv.assert_called_once_with(BIGTABLE_EMULATOR) + def test__get_scopes_default(self): from google.cloud.bigtable.client import DATA_SCOPE From 4d66ecef69c70becc9527ef218c5faf0e44f1be2 Mon Sep 17 00:00:00 2001 From: Solomon Duskis Date: Fri, 9 Nov 2018 15:47:39 -0500 Subject: [PATCH 205/892] Release bigtable 0.31.1 (#6378) * Release 0.31.1 * Addressing CHANGLONG as per comments * Applying ` to CHANGLOG * Update CHANGELOG.md * Add changes landed this week. * Update bigtable/CHANGELOG.md Co-Authored-By: sduskis --- packages/google-cloud-bigtable/CHANGELOG.md | 30 ++++++++++++++++++++- packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 263f82231785..714062afb60a 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,34 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.31.1 + +11-02-2018 08:13 PDT + +### Implementation Changes +- Fix anonymous usage under Bigtable emulator ([#6385](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6385)) +- Support `DirectRow` without a `Table` ([#6336](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6336)) +- Add retry parameter to `Table.read_rows()`. ([#6281](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6281)) +- Fix `ConditionalRow` interaction with `check_and_mutate_row` ([#6296](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6296)) +- Deprecate `channel` arg to `Client` ([#6279](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6279)) + +### Dependencies +- Update dependency: `google-api-core >= 1.4.1` ([#6391](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6391)) +- Update IAM version in dependencies ([#6362](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6362)) + +### Documentation +- Add `docs/snippets.py` and test ([#6012](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6012)) +- Normalize use of support level badges ([#6159](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6159)) + +### Internal / Testing Changes +- Fix client_info bug, update docstrings and timeouts. ([#6406)](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6406)) +- Remove now-spurious fixup from 'synth.py'. ([#6400](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6400)) +- Fix flaky systests / snippets ([#6367](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6367)) +- Add explicit coverage for `row_data._retry_read_rows_exception`. ([#6364](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6364)) +- Fix instance IAM test methods ([#6343](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6343)) +- Fix error from new flake8 version. ([#6309](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6309)) +- Use new Nox ([#6175](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6175)) + ## 0.31.0 ### New Features @@ -16,7 +44,7 @@ - Prepare docs for repo split. ([#6014](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6014)) ### Internal / Testing Changes -- Refactor 'read_row' to call 'read_rows' ([#6137](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102)) +- Refactor `read_row` to call `read_rows` ([#6137](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102)) - Harden instance teardown against '429 Too Many Requests'. ([#6102](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6102)) - Add `{RowSet,RowRange}.{__eq__,.__ne__}` ([#6025](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6025)) - Regenerate low-level GAPIC code ([#6036](https://github.com/GoogleCloudPlatform/google-cloud-python/pull/6036)) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 892cd4dcc008..a44311f7d3b9 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.31.0' +version = '0.31.1' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 9e4f24ee6b8f6cfb14a6b363eb48b699b6f61c2a Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 21 Nov 2018 10:34:50 -0500 Subject: [PATCH 206/892] Bigtable: pick up fixes to GAPIC generator. (#6630) Includes fixes from these PRs: - googleapis/gapic-generator#2407 - googleapis/gapic-generator#2396 Includes changes to generated tests. Closes #6492. --- .../gapic/bigtable_instance_admin_client.py | 17 +- .../gapic/bigtable_table_admin_client.py | 17 +- .../cloud/bigtable_admin_v2/gapic/enums.py | 128 ++++++------ .../bigtable_instance_admin_grpc_transport.py | 11 + .../bigtable_table_admin_grpc_transport.py | 11 + .../bigtable_v2/gapic/bigtable_client.py | 17 +- .../transports/bigtable_grpc_transport.py | 11 + .../unit/gapic/v2/test_bigtable_client_v2.py | 61 ++++-- .../test_bigtable_instance_admin_client_v2.py | 191 ++++++++++++++---- .../v2/test_bigtable_table_admin_client_v2.py | 131 +++++++++--- 10 files changed, 440 insertions(+), 155 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index e243b9d32710..33da8341ea3e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -132,7 +132,7 @@ def __init__(self, transport=None, channel=None, credentials=None, - client_config=bigtable_instance_admin_client_config.config, + client_config=None, client_info=None): """Constructor. @@ -165,13 +165,20 @@ def __init__(self, your own client library. """ # Raise deprecation warnings for things we want to go away. - if client_config: - warnings.warn('The `client_config` argument is deprecated.', - PendingDeprecationWarning) + if client_config is not None: + warnings.warn( + 'The `client_config` argument is deprecated.', + PendingDeprecationWarning, + stacklevel=2) + else: + client_config = bigtable_instance_admin_client_config.config + if channel: warnings.warn( 'The `channel` argument is deprecated; use ' - '`transport` instead.', PendingDeprecationWarning) + '`transport` instead.', + PendingDeprecationWarning, + stacklevel=2) # Instantiate the transport. # The transport is responsible for handling serialization and diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 98631b20ef92..8014df3ee826 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -133,7 +133,7 @@ def __init__(self, transport=None, channel=None, credentials=None, - client_config=bigtable_table_admin_client_config.config, + client_config=None, client_info=None): """Constructor. @@ -166,13 +166,20 @@ def __init__(self, your own client library. """ # Raise deprecation warnings for things we want to go away. - if client_config: - warnings.warn('The `client_config` argument is deprecated.', - PendingDeprecationWarning) + if client_config is not None: + warnings.warn( + 'The `client_config` argument is deprecated.', + PendingDeprecationWarning, + stacklevel=2) + else: + client_config = bigtable_table_admin_client_config.config + if channel: warnings.warn( 'The `channel` argument is deprecated; use ' - '`transport` instead.', PendingDeprecationWarning) + '`transport` instead.', + PendingDeprecationWarning, + stacklevel=2) # Instantiate the transport. # The transport is responsible for handling serialization and diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index a4a88bb17ba1..28f7ff3cad0b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -32,6 +32,70 @@ class StorageType(enum.IntEnum): HDD = 2 +class Instance(object): + class State(enum.IntEnum): + """ + Possible states of an instance. + + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be determined. + READY (int): The instance has been successfully created and can serve requests + to its tables. + CREATING (int): The instance is currently being created, and may be destroyed + if the creation process encounters an error. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(enum.IntEnum): + """ + The type of the instance. + + Attributes: + TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an + instance, a ``PRODUCTION`` instance will be created. If set when + updating an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on the + cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has + no performance or uptime guarantees and is not covered by SLA. After a + development instance is created, it can be upgraded by updating the + instance to type ``PRODUCTION``. An instance created as a production + instance cannot be changed to a development instance. When creating a + development instance, ``serve_nodes`` on the cluster must not be set. + """ + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + +class Cluster(object): + class State(enum.IntEnum): + """ + Possible states of a cluster. + + Attributes: + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 + + class Table(object): class TimestampGranularity(enum.IntEnum): """ @@ -104,67 +168,3 @@ class State(enum.IntEnum): STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 - - -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when - updating an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on the - cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. After a - development instance is created, it can be upgraded by updating the - instance to type ``PRODUCTION``. An instance created as a production - instance cannot be changed to a development instance. When creating a - development instance, ``serve_nodes`` on the cluster must not be set. - """ - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index bcb20263aae9..094cb6c0ff5e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -73,6 +73,8 @@ def __init__(self, credentials=credentials, ) + self._channel = channel + # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. self._stubs = { @@ -110,6 +112,15 @@ def create_channel(cls, scopes=cls._OAUTH_SCOPES, ) + @property + def channel(self): + """The gRPC channel used by the transport. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return self._channel + @property def create_instance(self): """Return the gRPC stub for {$apiMethod.name}. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index e655db6f03d0..9e9dce76207e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -73,6 +73,8 @@ def __init__(self, credentials=credentials, ) + self._channel = channel + # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. self._stubs = { @@ -113,6 +115,15 @@ def create_channel(cls, }.items(), ) + @property + def channel(self): + """The gRPC channel used by the transport. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return self._channel + @property def create_table(self): """Return the gRPC stub for {$apiMethod.name}. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index b84cc5f5f3df..64adb352f1c8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -82,7 +82,7 @@ def __init__(self, transport=None, channel=None, credentials=None, - client_config=bigtable_client_config.config, + client_config=None, client_info=None): """Constructor. @@ -115,13 +115,20 @@ def __init__(self, your own client library. """ # Raise deprecation warnings for things we want to go away. - if client_config: - warnings.warn('The `client_config` argument is deprecated.', - PendingDeprecationWarning) + if client_config is not None: + warnings.warn( + 'The `client_config` argument is deprecated.', + PendingDeprecationWarning, + stacklevel=2) + else: + client_config = bigtable_client_config.config + if channel: warnings.warn( 'The `channel` argument is deprecated; use ' - '`transport` instead.', PendingDeprecationWarning) + '`transport` instead.', + PendingDeprecationWarning, + stacklevel=2) # Instantiate the transport. # The transport is responsible for handling serialization and diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index 0f6e7dfdf4be..6d3577105f37 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -69,6 +69,8 @@ def __init__(self, credentials=credentials, ) + self._channel = channel + # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. self._stubs = { @@ -102,6 +104,15 @@ def create_channel(cls, }.items(), ) + @property + def channel(self): + """The gRPC channel used by the transport. + + Returns: + grpc.Channel: A gRPC channel object. + """ + return self._channel + @property def read_rows(self): """Return the gRPC stub for {$apiMethod.name}. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py index b432e0c716c7..a8099b30e8cd 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -15,6 +15,7 @@ # limitations under the License. """Unit tests.""" +import mock import pytest from google.cloud import bigtable_v2 @@ -75,7 +76,10 @@ def test_read_rows(self): # Mock the API response channel = ChannelStub(responses=[iter([expected_response])]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup Request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -93,7 +97,10 @@ def test_read_rows(self): def test_read_rows_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -111,7 +118,10 @@ def test_sample_row_keys(self): # Mock the API response channel = ChannelStub(responses=[iter([expected_response])]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup Request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -130,7 +140,10 @@ def test_sample_row_keys(self): def test_sample_row_keys_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -145,7 +158,10 @@ def test_mutate_row(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup Request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -164,7 +180,10 @@ def test_mutate_row(self): def test_mutate_row_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -182,7 +201,10 @@ def test_mutate_rows(self): # Mock the API response channel = ChannelStub(responses=[iter([expected_response])]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup Request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -202,7 +224,10 @@ def test_mutate_rows(self): def test_mutate_rows_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -220,7 +245,10 @@ def test_check_and_mutate_row(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup Request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -238,7 +266,10 @@ def test_check_and_mutate_row(self): def test_check_and_mutate_row_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -255,7 +286,10 @@ def test_read_modify_write_row(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup Request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -274,7 +308,10 @@ def test_read_modify_write_row(self): def test_read_modify_write_row_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_v2.BigtableClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_v2.BigtableClient() # Setup request table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index 37c701c7bb51..d81ebe590d90 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -15,6 +15,7 @@ # limitations under the License. """Unit tests.""" +import mock import pytest from google.rpc import status_pb2 @@ -82,7 +83,10 @@ def test_create_instance(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.project_path('[PROJECT]') @@ -113,7 +117,10 @@ def test_create_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.project_path('[PROJECT]') @@ -135,7 +142,10 @@ def test_get_instance(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -152,7 +162,10 @@ def test_get_instance(self): def test_get_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request name = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -169,7 +182,10 @@ def test_list_instances(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.project_path('[PROJECT]') @@ -186,7 +202,10 @@ def test_list_instances(self): def test_list_instances_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request parent = client.project_path('[PROJECT]') @@ -203,7 +222,10 @@ def test_update_instance(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -223,7 +245,10 @@ def test_update_instance(self): def test_update_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request name = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -246,7 +271,10 @@ def test_partial_update_instance(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request instance = {} @@ -272,7 +300,10 @@ def test_partial_update_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request instance = {} @@ -284,7 +315,10 @@ def test_partial_update_instance_exception(self): def test_delete_instance(self): channel = ChannelStub() - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -300,7 +334,10 @@ def test_delete_instance(self): def test_delete_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request name = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -325,7 +362,10 @@ def test_create_cluster(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -351,7 +391,10 @@ def test_create_cluster_exception(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -376,7 +419,10 @@ def test_get_cluster(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -393,7 +439,10 @@ def test_get_cluster(self): def test_get_cluster_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -410,7 +459,10 @@ def test_list_clusters(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -427,7 +479,10 @@ def test_list_clusters(self): def test_list_clusters_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -452,7 +507,10 @@ def test_update_cluster(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -477,7 +535,10 @@ def test_update_cluster_exception(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -489,7 +550,10 @@ def test_update_cluster_exception(self): def test_delete_cluster(self): channel = ChannelStub() - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -505,7 +569,10 @@ def test_delete_cluster(self): def test_delete_cluster_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -527,7 +594,10 @@ def test_create_app_profile(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -549,7 +619,10 @@ def test_create_app_profile(self): def test_create_app_profile_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -573,7 +646,10 @@ def test_get_app_profile(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.app_profile_path('[PROJECT]', '[INSTANCE]', @@ -591,7 +667,10 @@ def test_get_app_profile(self): def test_get_app_profile_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request name = client.app_profile_path('[PROJECT]', '[INSTANCE]', @@ -614,7 +693,10 @@ def test_list_app_profiles(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -633,7 +715,10 @@ def test_list_app_profiles(self): def test_list_app_profiles_exception(self): channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -659,7 +744,10 @@ def test_update_app_profile(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request app_profile = {} @@ -684,7 +772,10 @@ def test_update_app_profile_exception(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request app_profile = {} @@ -696,7 +787,10 @@ def test_update_app_profile_exception(self): def test_delete_app_profile(self): channel = ChannelStub() - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request name = client.app_profile_path('[PROJECT]', '[INSTANCE]', @@ -714,7 +808,10 @@ def test_delete_app_profile(self): def test_delete_app_profile_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request name = client.app_profile_path('[PROJECT]', '[INSTANCE]', @@ -733,7 +830,10 @@ def test_get_iam_policy(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request resource = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -750,7 +850,10 @@ def test_get_iam_policy(self): def test_get_iam_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request resource = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -767,7 +870,10 @@ def test_set_iam_policy(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request resource = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -785,7 +891,10 @@ def test_set_iam_policy(self): def test_set_iam_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request resource = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -802,7 +911,10 @@ def test_test_iam_permissions(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request resource = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -820,7 +932,10 @@ def test_test_iam_permissions(self): def test_test_iam_permissions_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableInstanceAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request resource = client.instance_path('[PROJECT]', '[INSTANCE]') diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index f9d81b01afb1..4e4cd1b68153 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -15,6 +15,7 @@ # limitations under the License. """Unit tests.""" +import mock import pytest from google.rpc import status_pb2 @@ -74,7 +75,10 @@ def test_create_table(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -93,7 +97,10 @@ def test_create_table(self): def test_create_table_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -114,7 +121,10 @@ def test_create_table_from_snapshot(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -142,7 +152,10 @@ def test_create_table_from_snapshot_exception(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -168,7 +181,10 @@ def test_list_tables(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -187,7 +203,10 @@ def test_list_tables(self): def test_list_tables_exception(self): channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request parent = client.instance_path('[PROJECT]', '[INSTANCE]') @@ -204,7 +223,10 @@ def test_get_table(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -220,7 +242,10 @@ def test_get_table(self): def test_get_table_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -230,7 +255,10 @@ def test_get_table_exception(self): def test_delete_table(self): channel = ChannelStub() - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -246,7 +274,10 @@ def test_delete_table(self): def test_delete_table_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -262,7 +293,10 @@ def test_modify_column_families(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -280,7 +314,10 @@ def test_modify_column_families(self): def test_modify_column_families_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -291,7 +328,10 @@ def test_modify_column_families_exception(self): def test_drop_row_range(self): channel = ChannelStub() - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -307,7 +347,10 @@ def test_drop_row_range(self): def test_drop_row_range_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -324,7 +367,10 @@ def test_generate_consistency_token(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -341,7 +387,10 @@ def test_generate_consistency_token(self): def test_generate_consistency_token_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -358,7 +407,10 @@ def test_check_consistency(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -376,7 +428,10 @@ def test_check_consistency(self): def test_check_consistency_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -402,7 +457,10 @@ def test_snapshot_table(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -433,7 +491,10 @@ def test_snapshot_table_exception(self): # Mock the API response channel = ChannelStub(responses=[operation]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') @@ -460,7 +521,10 @@ def test_get_snapshot(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', @@ -478,7 +542,10 @@ def test_get_snapshot(self): def test_get_snapshot_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', @@ -501,7 +568,10 @@ def test_list_snapshots(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -520,7 +590,10 @@ def test_list_snapshots(self): def test_list_snapshots_exception(self): channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') @@ -531,7 +604,10 @@ def test_list_snapshots_exception(self): def test_delete_snapshot(self): channel = ChannelStub() - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', @@ -548,7 +624,10 @@ def test_delete_snapshot(self): def test_delete_snapshot_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - client = bigtable_admin_v2.BigtableTableAdminClient(channel=channel) + patch = mock.patch('google.api_core.grpc_helpers.create_channel') + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', From 31e3e1698e6606e919354e06af9363add199cc50 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 26 Nov 2018 14:59:55 -0500 Subject: [PATCH 207/892] Remove 'deepcopy' from 'PartialRowData.cells' property. (#6648) Closes #6643. --- .../google-cloud-bigtable/google/cloud/bigtable/row_data.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_row_data.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 6e09d5fc6a84..8eb8f474660f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -181,7 +181,7 @@ def cells(self): and second for column names/qualifiers within a family). For a given column, a list of :class:`Cell` objects is stored. """ - return copy.deepcopy(self._cells) + return self._cells @property def row_key(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index b5a3a797f634..cde6f7d21cbf 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -305,8 +305,6 @@ def test_cells_property(self): partial_row_data = self._make_one(None) cells = {1: 2} partial_row_data._cells = cells - # Make sure we get a copy, not the original. - self.assertIsNot(partial_row_data.cells, cells) self.assertEqual(partial_row_data.cells, cells) def test_row_key_getter(self): From fb5bd808b0fda3faf4ec5b38760adf18951007a9 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Wed, 28 Nov 2018 13:55:23 -0800 Subject: [PATCH 208/892] Add templates for flake8, coveragerc, noxfile, and black. (#6642) --- packages/google-cloud-bigtable/.coveragerc | 9 +- packages/google-cloud-bigtable/.flake8 | 1 + packages/google-cloud-bigtable/MANIFEST.in | 3 +- packages/google-cloud-bigtable/noxfile.py | 170 +++++++++++---------- packages/google-cloud-bigtable/synth.py | 78 ++++++---- 5 files changed, 144 insertions(+), 117 deletions(-) diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 08f3fdea2433..51fec440cebf 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -2,8 +2,6 @@ branch = True [report] -omit = - */_generated/*.py fail_under = 100 show_missing = True exclude_lines = @@ -11,3 +9,10 @@ exclude_lines = pragma: NO COVER # Ignore debug-only repr def __repr__ + # Ignore abstract methods + raise NotImplementedError +omit = + */gapic/*.py + */proto/*.py + */google-cloud-python/core/*.py + */site-packages/*.py \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index 1f44a90f8195..61766fa84d02 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -1,4 +1,5 @@ [flake8] +ignore = E203, E266, E501, W503 exclude = # Exclude generated code. **/proto/** diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index fc77f8c82ff0..9cbf175afe6b 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -1,4 +1,5 @@ include README.rst LICENSE recursive-include google *.json *.proto recursive-include tests * -global-exclude *.pyc __pycache__ +global-exclude *.py[co] +global-exclude __pycache__ diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index ce44f4ce16e6..652b0a9c4e44 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -1,10 +1,12 @@ -# Copyright 2016 Google LLC +# -*- coding: utf-8 -*- +# +# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -18,119 +20,121 @@ import nox -LOCAL_DEPS = ( - os.path.join('..', 'api_core'), - os.path.join('..', 'core'), -) +LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) +@nox.session(python="3.7") +def blacken(session): + """Run black. + + Format code to uniform standard. + """ + session.install("black") + session.run( + "black", + "google", + "tests", + "docs", + "--exclude", + ".*/proto/.*|.*/gapic/.*|.*/.*_pb2.py", + ) -def default(session): - """Default unit test session. - This is intended to be run **without** an interpreter set, so - that the current ``python`` (on the ``PATH``) or the version of - Python corresponding to the ``nox`` binary the ``PATH`` can - run the tests. +@nox.session(python="3.7") +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. """ - # Install all test dependencies, then install local packages in-place. - session.install('mock', 'pytest', 'pytest-cov') + session.install("flake8", "black", *LOCAL_DEPS) + session.run( + "black", + "--check", + "google", + "tests", + "docs", + "--exclude", + ".*/proto/.*|.*/gapic/.*|.*/.*_pb2.py", + ) + session.run("flake8", "google", "tests") + + +@nox.session(python="3.7") +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") + + +def default(session): + # Install all test dependencies, then install this package in-place. + session.install("mock", "pytest", "pytest-cov") for local_dep in LOCAL_DEPS: - session.install('-e', local_dep) - session.install('-e', '.') + session.install("-e", local_dep) + session.install("-e", ".") # Run py.test against the unit tests. session.run( - 'py.test', - '--quiet', - '--cov=google.cloud.bigtable', - '--cov=tests.unit', - '--cov-append', - '--cov-config=.coveragerc', - '--cov-report=', - '--cov-fail-under=97', - 'tests/unit', - *session.posargs + "py.test", + "--quiet", + "--cov=google.cloud", + "--cov=tests.unit", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=97", + os.path.join("tests", "unit"), + *session.posargs, ) -@nox.session(python=['2.7', '3.5', '3.6', '3.7']) +@nox.session(python=["2.7", "3.5", "3.6", "3.7"]) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=['2.7', '3.7']) +@nox.session(python=["2.7", "3.7"]) def system(session): """Run the system test suite.""" - - # Sanity check: Only run system tests if the environment variable is set. - if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): - session.skip('Credentials must be set via environment variable.') + system_test_path = os.path.join("tests", "system.py") + system_test_folder_path = os.path.join("tests", "system") + # Sanity check: Only run tests if the environment variable is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): + session.skip("Credentials must be set via environment variable") + + system_test_exists = os.path.exists(system_test_path) + system_test_folder_exists = os.path.exists(system_test_folder_path) + # Sanity check: only run tests if found. + if not system_test_exists and not system_test_folder_exists: + session.skip("System tests were not found") # Use pre-release gRPC for system tests. - session.install('--pre', 'grpcio') + session.install("--pre", "grpcio") # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install('mock', 'pytest') + session.install("mock", "pytest") for local_dep in LOCAL_DEPS: - session.install('-e', local_dep) - session.install('-e', '../test_utils/') - session.install('-e', '.') + session.install("-e", local_dep) + session.install("-e", "../test_utils/") + session.install("-e", ".") # Run py.test against the system tests. - session.run('py.test', '--quiet', 'tests/system.py', *session.posargs) + if system_test_exists: + session.run("py.test", "--quiet", system_test_path, *session.posargs) + if system_test_folder_exists: + session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python='3.6') -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install('flake8', *LOCAL_DEPS) - session.install('.') - session.run('flake8', 'google', 'tests', 'docs') - - -@nox.session(python='3.6') -def lint_setup_py(session): - """Verify that setup.py is valid (including RST check).""" - - session.install('docutils', 'Pygments') - session.run( - 'python', 'setup.py', 'check', '--restructuredtext', '--strict') - - -@nox.session(python='3.6') +@nox.session(python="3.7") def cover(session): """Run the final coverage report. This outputs the coverage report aggregating coverage from the unit test runs (not system test runs), and then erases coverage data. """ - session.install('coverage', 'pytest-cov') - session.run('coverage', 'report', '--show-missing', '--fail-under=100') - session.run('coverage', 'erase') - - -@nox.session(python=['2.7', '3.7']) -def snippets(session): - """Run the system test suite.""" - # Sanity check: Only run system tests if the environment variable is set. - if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): - session.skip('Credentials must be set via environment variable.') + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=99") - # Install all test dependencies, then install local packages in place. - session.install('mock', 'pytest') - for local_dep in LOCAL_DEPS: - session.install('-e', local_dep) - session.install('-e', '../test_utils/') - session.install('-e', '.') - session.run( - 'py.test', - '--quiet', - os.path.join('docs', 'snippets.py'), - *session.posargs, - ) + session.run("coverage", "erase") diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 464b1aafdac5..e3b51d026f6a 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -18,55 +18,71 @@ from synthtool import gcp gapic = gcp.GAPICGenerator() +common = gcp.CommonTemplates() - -# Generate client +# ---------------------------------------------------------------------------- +# Generate bigtable and bigtable_admin GAPIC layer +# ---------------------------------------------------------------------------- library = gapic.py_library( - 'bigtable', - 'v2', - config_path='/google/bigtable/artman_bigtable.yaml', - artman_output_name='bigtable-v2') + "bigtable", + "v2", + config_path="/google/bigtable/artman_bigtable.yaml", + artman_output_name="bigtable-v2", +) -s.move(library / 'google/cloud/bigtable_v2') -s.move(library / 'tests') +s.move(library / "google/cloud/bigtable_v2") +s.move(library / "tests") # Generate admin client library = gapic.py_library( - 'bigtable_admin', - 'v2', - config_path='/google/bigtable/admin/artman_bigtableadmin.yaml', - artman_output_name='bigtable-admin-v2') + "bigtable_admin", + "v2", + config_path="/google/bigtable/admin/artman_bigtableadmin.yaml", + artman_output_name="bigtable-admin-v2", +) -s.move(library / 'google/cloud/bigtable_admin_v2') -s.move(library / 'tests') +s.move(library / "google/cloud/bigtable_admin_v2") +s.move(library / "tests") s.replace( - ['google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py', - 'google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py'], + [ + "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", + ], "'google-cloud-bigtable-admin'", - "'google-cloud-bigtable'") + "'google-cloud-bigtable'", +) s.replace( "google/**/*.py", - 'from google\.cloud\.bigtable\.admin_v2.proto', - 'from google.cloud.bigtable_admin_v2.proto') + "from google\.cloud\.bigtable\.admin_v2.proto", + "from google.cloud.bigtable_admin_v2.proto", +) s.replace( - ['google/cloud/bigtable_admin_v2/gapic/transports/' - 'bigtable_table_admin_grpc_transport.py', - 'google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py'], - 'google.api_core.grpc_helpers.create_channel\(\n' - '(\s+)address.*\n\s+credentials.*\n\s+scopes.*\n', + [ + "google/cloud/bigtable_admin_v2/gapic/transports/" + "bigtable_table_admin_grpc_transport.py", + "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py", + ], + "google.api_core.grpc_helpers.create_channel\(\n" + "(\s+)address.*\n\s+credentials.*\n\s+scopes.*\n", "\g<0>\g<1>options={\n\g<1> 'grpc.max_send_message_length': -1,\n" "\g<1> 'grpc.max_receive_message_length': -1,\n" - "\g<1>}.items(),\n" + "\g<1>}.items(),\n", ) s.replace( - ['google/cloud/bigtable_admin_v2/__init__.py'], - ' __doc__ = bigtable_instance_admin_client.' - 'BigtableInstanceAdminClient.__doc__\n', - ' __doc__ = (\n' - ' bigtable_instance_admin_client.BigtableInstanceAdminClient.' - '__doc__)\n', + ["google/cloud/bigtable_admin_v2/__init__.py"], + " __doc__ = bigtable_instance_admin_client." + "BigtableInstanceAdminClient.__doc__\n", + " __doc__ = (\n" + " bigtable_instance_admin_client.BigtableInstanceAdminClient." + "__doc__)\n", ) + +# ---------------------------------------------------------------------------- +# Add templated files +# ---------------------------------------------------------------------------- +templated_files = common.py_library(unit_cov_level=97, cov_level=99) +s.move(templated_files) From 0dc4facc5329421257e38733f0af3b5927dbb2b8 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Thu, 29 Nov 2018 11:02:59 -0800 Subject: [PATCH 209/892] Blackening Continued... (#6667) * blacken bigtable * blacken logging * blacken translate * blacken runtimeconfig * blacken dns --- packages/google-cloud-bigtable/docs/conf.py | 167 +-- .../google-cloud-bigtable/docs/snippets.py | 122 +- .../google-cloud-bigtable/google/__init__.py | 2 + .../google/cloud/__init__.py | 2 + .../google/cloud/bigtable.py | 5 +- .../google/cloud/bigtable/__init__.py | 5 +- .../google/cloud/bigtable/app_profile.py | 114 +- .../google/cloud/bigtable/batcher.py | 12 +- .../google/cloud/bigtable/client.py | 70 +- .../google/cloud/bigtable/cluster.py | 70 +- .../google/cloud/bigtable/column_family.py | 67 +- .../google/cloud/bigtable/enums.py | 20 +- .../google/cloud/bigtable/instance.py | 182 +-- .../google/cloud/bigtable/policy.py | 15 +- .../google/cloud/bigtable/row.py | 99 +- .../google/cloud/bigtable/row_data.py | 128 +- .../google/cloud/bigtable/row_filters.py | 117 +- .../google/cloud/bigtable/row_set.py | 28 +- .../google/cloud/bigtable/table.py | 192 +-- .../cloud/bigtable_admin_v2/__init__.py | 16 +- .../gapic/bigtable_instance_admin_client.py | 909 +++++++------- .../bigtable_instance_admin_client_config.py | 46 +- .../gapic/bigtable_table_admin_client.py | 650 +++++----- .../bigtable_table_admin_client_config.py | 34 +- .../cloud/bigtable_admin_v2/gapic/enums.py | 8 + .../bigtable_instance_admin_grpc_transport.py | 93 +- .../bigtable_table_admin_grpc_transport.py | 80 +- .../google/cloud/bigtable_admin_v2/types.py | 2 +- .../google/cloud/bigtable_v2/__init__.py | 5 +- .../bigtable_v2/gapic/bigtable_client.py | 333 ++--- .../gapic/bigtable_client_config.py | 24 +- .../transports/bigtable_grpc_transport.py | 53 +- .../google/cloud/bigtable_v2/types.py | 17 +- .../google-cloud-bigtable/tests/system.py | 437 ++++--- .../tests/unit/_testing.py | 4 +- .../unit/gapic/v2/test_bigtable_client_v2.py | 107 +- .../test_bigtable_instance_admin_client_v2.py | 395 +++--- .../v2/test_bigtable_table_admin_client_v2.py | 281 +++-- .../tests/unit/test_app_profile.py | 391 +++--- .../tests/unit/test_batcher.py | 74 +- .../tests/unit/test_client.py | 242 ++-- .../tests/unit/test_cluster.py | 247 ++-- .../tests/unit/test_column_family.py | 239 ++-- .../tests/unit/test_instance.py | 505 ++++---- .../tests/unit/test_policy.py | 15 +- .../tests/unit/test_row.py | 400 +++--- .../tests/unit/test_row_data.py | 542 ++++---- .../tests/unit/test_row_filters.py | 309 ++--- .../tests/unit/test_row_set.py | 57 +- .../tests/unit/test_table.py | 1091 +++++++++-------- 50 files changed, 4564 insertions(+), 4459 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 507ffb40399c..51127e3ff14e 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -18,50 +18,50 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath("..")) -__version__ = '0.1.0' +__version__ = "0.1.0" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.intersphinx', - 'sphinx.ext.coverage', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", ] # autodoc/autosummary flags -autoclass_content = 'both' -autodoc_default_flags = ['members'] +autoclass_content = "both" +autodoc_default_flags = ["members"] autosummary_generate = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'google-cloud-bigtable' -copyright = u'2017, Google' -author = u'Google APIs' +project = u"google-cloud-bigtable" +copyright = u"2017, Google" +author = u"Google APIs" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -70,7 +70,7 @@ # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. -version = '.'.join(release.split('.')[0:2]) +version = ".".join(release.split(".")[0:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -81,37 +81,37 @@ # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -120,31 +120,31 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, @@ -154,78 +154,75 @@ # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'google-cloud-bigtable-doc' +htmlhelp_basename = "google-cloud-bigtable-doc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. #'preamble': '', - # Latex figure (float) alignment #'figure_align': 'htbp', } @@ -234,39 +231,51 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'google-cloud-bigtable.tex', - u'google-cloud-bigtable Documentation', author, 'manual'), + ( + master_doc, + "google-cloud-bigtable.tex", + u"google-cloud-bigtable Documentation", + author, + "manual", + ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, 'google-cloud-bigtable', - u'google-cloud-bigtable Documentation', [author], 1)] +man_pages = [ + ( + master_doc, + "google-cloud-bigtable", + u"google-cloud-bigtable Documentation", + [author], + 1, + ) +] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------- @@ -274,27 +283,33 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'google-cloud-bigtable', - u'google-cloud-bigtable Documentation', author, 'google-cloud-bigtable', - 'GAPIC library for the {metadata.shortName} v2 service', 'APIs'), + ( + master_doc, + "google-cloud-bigtable", + u"google-cloud-bigtable Documentation", + author, + "google-cloud-bigtable", + "GAPIC library for the {metadata.shortName} v2 service", + "APIs", + ) ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' +# texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False +# texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - 'python': ('http://python.readthedocs.org/en/latest/', None), - 'gax': ('https://gax-python.readthedocs.org/en/latest/', None), + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), } # Napoleon settings diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index a75d89bc3ffb..9255ab136771 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -38,17 +38,19 @@ from google.cloud.bigtable import enums -INSTANCE_ID = "snippet-" + unique_resource_id('-') -CLUSTER_ID = "clus-1-" + unique_resource_id('-') -LOCATION_ID = 'us-central1-f' -ALT_LOCATION_ID = 'us-central1-a' +INSTANCE_ID = "snippet-" + unique_resource_id("-") +CLUSTER_ID = "clus-1-" + unique_resource_id("-") +LOCATION_ID = "us-central1-f" +ALT_LOCATION_ID = "us-central1-a" PRODUCTION = enums.Instance.Type.PRODUCTION SERVER_NODES = 3 STORAGE_TYPE = enums.StorageType.SSD -LABEL_KEY = u'python-snippet' -LABEL_STAMP = datetime.datetime.utcnow() \ - .replace(microsecond=0, tzinfo=UTC,) \ - .strftime("%Y-%m-%dt%H-%M-%S") +LABEL_KEY = u"python-snippet" +LABEL_STAMP = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") +) LABELS = {LABEL_KEY: str(LABEL_STAMP)} @@ -58,19 +60,22 @@ class Config(object): This is a mutable stand-in to allow test set-up to modify global state. """ + CLIENT = None INSTANCE = None def setup_module(): client = Config.CLIENT = Client(admin=True) - Config.INSTANCE = client.instance(INSTANCE_ID, - instance_type=PRODUCTION, - labels=LABELS) - cluster = Config.INSTANCE.cluster(CLUSTER_ID, - location_id=LOCATION_ID, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE) + Config.INSTANCE = client.instance( + INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS + ) + cluster = Config.INSTANCE.cluster( + CLUSTER_ID, + location_id=LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE, + ) operation = Config.INSTANCE.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=100) @@ -85,20 +90,22 @@ def test_bigtable_create_instance(): from google.cloud.bigtable import Client from google.cloud.bigtable import enums - my_instance_id = "inst-my-" + unique_resource_id('-') - my_cluster_id = "clus-my-" + unique_resource_id('-') - location_id = 'us-central1-f' + my_instance_id = "inst-my-" + unique_resource_id("-") + my_cluster_id = "clus-my-" + unique_resource_id("-") + location_id = "us-central1-f" serve_nodes = 3 storage_type = enums.StorageType.SSD production = enums.Instance.Type.PRODUCTION - labels = {'prod-label': 'prod-label'} + labels = {"prod-label": "prod-label"} client = Client(admin=True) - instance = client.instance(my_instance_id, instance_type=production, - labels=labels) - cluster = instance.cluster(my_cluster_id, location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=storage_type) + instance = client.instance(my_instance_id, instance_type=production, labels=labels) + cluster = instance.cluster( + my_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type, + ) operation = instance.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=100) @@ -120,14 +127,17 @@ def test_bigtable_create_additional_cluster(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) - cluster_id = "clus-my-" + unique_resource_id('-') - location_id = 'us-central1-a' + cluster_id = "clus-my-" + unique_resource_id("-") + location_id = "us-central1-a" serve_nodes = 3 storage_type = enums.StorageType.SSD - cluster = instance.cluster(cluster_id, location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=storage_type) + cluster = instance.cluster( + cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type, + ) operation = cluster.create() # We want to make sure the operation completes. operation.result(timeout=100) @@ -140,18 +150,20 @@ def test_bigtable_create_additional_cluster(): def test_bigtable_create_app_profile(): # [START bigtable_create_app_profile] from google.cloud.bigtable import Client + client = Client(admin=True) instance = client.instance(INSTANCE_ID) - app_profile_id = "app-prof-" + unique_resource_id('-') - description = 'routing policy-multy' + app_profile_id = "app-prof-" + unique_resource_id("-") + description = "routing policy-multy" routing_policy_type = enums.RoutingPolicyType.ANY app_profile = instance.app_profile( app_profile_id=app_profile_id, routing_policy_type=routing_policy_type, description=description, - cluster_id=CLUSTER_ID) + cluster_id=CLUSTER_ID, + ) app_profile = app_profile.create(ignore_warnings=True) # [END bigtable_create_app_profile] @@ -200,8 +212,9 @@ def test_bigtable_list_app_profiles(): # [END bigtable_list_app_profiles] app_profile = instance.app_profile( - app_profile_id="app-prof-" + unique_resource_id('-'), - routing_policy_type=enums.RoutingPolicyType.ANY) + app_profile_id="app-prof-" + unique_resource_id("-"), + routing_policy_type=enums.RoutingPolicyType.ANY, + ) app_profile = app_profile.create(ignore_warnings=True) # [START bigtable_list_app_profiles] @@ -292,7 +305,7 @@ def test_bigtable_create_table(): table = instance.table("table_my") # Define the GC policy to retain only the most recent 2 versions. max_versions_rule = column_family.MaxVersionsGCRule(2) - table.create(column_families={'cf1': max_versions_rule}) + table.create(column_families={"cf1": max_versions_rule}) # [END bigtable_create_table] assert table.exists() @@ -314,12 +327,15 @@ def test_bigtable_delete_cluster(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) - cluster_id = "clus-my-" + unique_resource_id('-') + cluster_id = "clus-my-" + unique_resource_id("-") # [END bigtable_delete_cluster] - cluster = instance.cluster(cluster_id, location_id=ALT_LOCATION_ID, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE) + cluster = instance.cluster( + cluster_id, + location_id=ALT_LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE, + ) operation = cluster.create() # We want to make sure the operation completes. operation.result(timeout=1000) @@ -336,18 +352,20 @@ def test_bigtable_delete_instance(): from google.cloud.bigtable import Client client = Client(admin=True) - instance_id_to_delete = "inst-my-" + unique_resource_id('-') + instance_id_to_delete = "inst-my-" + unique_resource_id("-") # [END bigtable_delete_instance] - cluster_id = "clus-my-" + unique_resource_id('-') - - instance = client.instance(instance_id_to_delete, - instance_type=PRODUCTION, - labels=LABELS) - cluster = instance.cluster(cluster_id, - location_id=ALT_LOCATION_ID, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE) + cluster_id = "clus-my-" + unique_resource_id("-") + + instance = client.instance( + instance_id_to_delete, instance_type=PRODUCTION, labels=LABELS + ) + cluster = instance.cluster( + cluster_id, + location_id=ALT_LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE, + ) operation = instance.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=100) @@ -389,9 +407,7 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): instance = client.instance(INSTANCE_ID) instance.reload() new_policy = Policy() - new_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.service_account(service_account_email), - ] + new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] policy_latest = instance.set_iam_policy(new_policy) # [END bigtable_set_iam_policy] @@ -409,5 +425,5 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): assert len(policy.bigtable_admins) > 0 -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/__init__.py index e16082edc506..abc37089339e 100644 --- a/packages/google-cloud-bigtable/google/__init__.py +++ b/packages/google-cloud-bigtable/google/__init__.py @@ -17,7 +17,9 @@ try: import pkg_resources + pkg_resources.declare_namespace(__name__) except ImportError: import pkgutil + __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py index 267f71008dcb..2f4b4738aee1 100644 --- a/packages/google-cloud-bigtable/google/cloud/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/__init__.py @@ -1,6 +1,8 @@ try: import pkg_resources + pkg_resources.declare_namespace(__name__) except ImportError: import pkgutil + __path__ = pkgutil.extend_path(__path__, __name__) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable.py index 3a5e6a7477e4..72858878e8a7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable.py @@ -17,7 +17,4 @@ from google.cloud.bigtable_v2 import BigtableClient from google.cloud.bigtable_v2 import types -__all__ = ( - 'types', - 'BigtableClient', -) +__all__ = ("types", "BigtableClient") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index db4a5e0bf130..75b765a8a0da 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -16,9 +16,10 @@ from pkg_resources import get_distribution -__version__ = get_distribution('google-cloud-bigtable').version + +__version__ = get_distribution("google-cloud-bigtable").version from google.cloud.bigtable.client import Client -__all__ = ['__version__', 'Client'] +__all__ = ["__version__", "Client"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py index fc0dfd9c5cf8..44246e829551 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py @@ -23,9 +23,10 @@ from google.api_core.exceptions import NotFound _APP_PROFILE_NAME_RE = re.compile( - r'^projects/(?P[^/]+)/' - r'instances/(?P[^/]+)/' - r'appProfiles/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$') + r"^projects/(?P[^/]+)/" + r"instances/(?P[^/]+)/" + r"appProfiles/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" +) class AppProfile(object): @@ -64,10 +65,15 @@ class AppProfile(object): ROUTING_POLICY_TYPE_SINGLE. """ - def __init__(self, app_profile_id, instance, - routing_policy_type=None, - description=None, cluster_id=None, - allow_transactional_writes=None): + def __init__( + self, + app_profile_id, + instance, + routing_policy_type=None, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ): self.app_profile_id = app_profile_id self._instance = instance self.routing_policy_type = routing_policy_type @@ -91,8 +97,10 @@ def name(self): :returns: The AppProfile name. """ return self.instance_admin_client.app_profile_path( - self._instance._client.project, self._instance.instance_id, - self.app_profile_id) + self._instance._client.project, + self._instance.instance_id, + self.app_profile_id, + ) @property def instance_admin_client(self): @@ -111,8 +119,10 @@ def __eq__(self, other): # identifying values instance, AppProfile ID and client. This is # intentional, since the same AppProfile can be in different # states if not synchronized. - return (other.app_profile_id == self.app_profile_id and - other._instance == self._instance) + return ( + other.app_profile_id == self.app_profile_id + and other._instance == self._instance + ) def __ne__(self, other): return not self == other @@ -138,18 +148,23 @@ def from_pb(cls, app_profile_pb, instance): or if the parsed project ID does not match the project ID on the client. """ - match_app_profile_name = ( - _APP_PROFILE_NAME_RE.match(app_profile_pb.name)) + match_app_profile_name = _APP_PROFILE_NAME_RE.match(app_profile_pb.name) if match_app_profile_name is None: - raise ValueError('AppProfile protobuf name was not in the ' - 'expected format.', app_profile_pb.name) - if match_app_profile_name.group('instance') != instance.instance_id: - raise ValueError('Instance ID on app_profile does not match the ' - 'instance ID on the client') - if match_app_profile_name.group('project') != instance._client.project: - raise ValueError('Project ID on app_profile does not match the ' - 'project ID on the client') - app_profile_id = match_app_profile_name.group('app_profile_id') + raise ValueError( + "AppProfile protobuf name was not in the " "expected format.", + app_profile_pb.name, + ) + if match_app_profile_name.group("instance") != instance.instance_id: + raise ValueError( + "Instance ID on app_profile does not match the " + "instance ID on the client" + ) + if match_app_profile_name.group("project") != instance._client.project: + raise ValueError( + "Project ID on app_profile does not match the " + "project ID on the client" + ) + app_profile_id = match_app_profile_name.group("app_profile_id") result = cls(app_profile_id, instance) result._update_from_pb(app_profile_pb) @@ -166,15 +181,15 @@ def _update_from_pb(self, app_profile_pb): self.description = app_profile_pb.description routing_policy_type = None - if app_profile_pb.HasField('multi_cluster_routing_use_any'): + if app_profile_pb.HasField("multi_cluster_routing_use_any"): routing_policy_type = RoutingPolicyType.ANY self.allow_transactional_writes = False else: routing_policy_type = RoutingPolicyType.SINGLE self.cluster_id = app_profile_pb.single_cluster_routing.cluster_id self.allow_transactional_writes = ( - app_profile_pb.single_cluster_routing - .allow_transactional_writes) + app_profile_pb.single_cluster_routing.allow_transactional_writes + ) self.routing_policy_type = routing_policy_type def _to_pb(self): @@ -186,34 +201,33 @@ def _to_pb(self): routing_policy_type is not set """ if not self.routing_policy_type: - raise ValueError('AppProfile required routing policy.') + raise ValueError("AppProfile required routing policy.") single_cluster_routing = None multi_cluster_routing_use_any = None if self.routing_policy_type == RoutingPolicyType.ANY: multi_cluster_routing_use_any = ( - instance_pb2.AppProfile.MultiClusterRoutingUseAny()) + instance_pb2.AppProfile.MultiClusterRoutingUseAny() + ) else: - single_cluster_routing = ( - instance_pb2.AppProfile.SingleClusterRouting( - cluster_id=self.cluster_id, - allow_transactional_writes=self.allow_transactional_writes) + single_cluster_routing = instance_pb2.AppProfile.SingleClusterRouting( + cluster_id=self.cluster_id, + allow_transactional_writes=self.allow_transactional_writes, ) app_profile_pb = instance_pb2.AppProfile( - name=self.name, description=self.description, + name=self.name, + description=self.description, multi_cluster_routing_use_any=multi_cluster_routing_use_any, - single_cluster_routing=single_cluster_routing + single_cluster_routing=single_cluster_routing, ) return app_profile_pb def reload(self): """Reload the metadata for this cluster""" - app_profile_pb = ( - self.instance_admin_client.get_app_profile( - self.name)) + app_profile_pb = self.instance_admin_client.get_app_profile(self.name) # NOTE: _update_from_pb does not check that the project and # app_profile ID on the response match the request. @@ -258,10 +272,15 @@ def create(self, ignore_warnings=None): :param: ignore_warnings: (Optional) If true, ignore safety checks when creating the AppProfile. """ - return self.from_pb(self.instance_admin_client.create_app_profile( - parent=self._instance.name, app_profile_id=self.app_profile_id, - app_profile=self._to_pb(), ignore_warnings=ignore_warnings), - self._instance) + return self.from_pb( + self.instance_admin_client.create_app_profile( + parent=self._instance.name, + app_profile_id=self.app_profile_id, + app_profile=self._to_pb(), + ignore_warnings=ignore_warnings, + ), + self._instance, + ) def update(self, ignore_warnings=None): """Update this app_profile. @@ -278,16 +297,18 @@ def update(self, ignore_warnings=None): update_mask_pb = field_mask_pb2.FieldMask() if self.description is not None: - update_mask_pb.paths.append('description') + update_mask_pb.paths.append("description") if self.routing_policy_type == RoutingPolicyType.ANY: - update_mask_pb.paths.append('multi_cluster_routing_use_any') + update_mask_pb.paths.append("multi_cluster_routing_use_any") else: - update_mask_pb.paths.append('single_cluster_routing') + update_mask_pb.paths.append("single_cluster_routing") return self.instance_admin_client.update_app_profile( - app_profile=self._to_pb(), update_mask=update_mask_pb, - ignore_warnings=ignore_warnings) + app_profile=self._to_pb(), + update_mask=update_mask_pb, + ignore_warnings=ignore_warnings, + ) def delete(self, ignore_warnings=None): """Delete this AppProfile. @@ -301,5 +322,4 @@ def delete(self, ignore_warnings=None): If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - self.instance_admin_client.delete_app_profile( - self.name, ignore_warnings) + self.instance_admin_client.delete_app_profile(self.name, ignore_warnings) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index eb697ff54df7..1f22af4a534c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -56,8 +56,7 @@ class MutationsBatcher(object): (5 MB). """ - def __init__(self, table, flush_count=FLUSH_COUNT, - max_row_bytes=MAX_ROW_BYTES): + def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): self.rows = [] self.total_mutation_count = 0 self.total_size = 0 @@ -95,8 +94,10 @@ def mutate(self, row): mutation_count = len(row._get_mutations()) if mutation_count > MAX_MUTATIONS: raise MaxMutationsError( - 'The row key {} exceeds the number of mutations {}.'.format( - row.row_key, mutation_count), ) + "The row key {} exceeds the number of mutations {}.".format( + row.row_key, mutation_count + ) + ) if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: self.flush() @@ -105,8 +106,7 @@ def mutate(self, row): self.total_mutation_count += mutation_count self.total_size += row.get_mutations_size() - if (self.total_size >= self.max_row_bytes or - len(self.rows) >= self.flush_count): + if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: self.flush() def mutate_rows(self, rows): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index f6f8866426b0..14b836b9dd5b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -50,26 +50,24 @@ INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION INSTANCE_TYPE_DEVELOPMENT = enums.Instance.Type.DEVELOPMENT INSTANCE_TYPE_UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED -_CLIENT_INFO = client_info.ClientInfo( - client_library_version=__version__) -SPANNER_ADMIN_SCOPE = 'https://www.googleapis.com/auth/spanner.admin' -ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin' +_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) +SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" +ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin" """Scope for interacting with the Cluster Admin and Table Admin APIs.""" -DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data' +DATA_SCOPE = "https://www.googleapis.com/auth/bigtable.data" """Scope for reading and writing table data.""" -READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly' +READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly" """Scope for reading table data.""" def _create_gapic_client(client_class): - def inner(self): if self._emulator_host is None: - return client_class( - credentials=self._credentials, client_info=_CLIENT_INFO) + return client_class(credentials=self._credentials, client_info=_CLIENT_INFO) else: return client_class( - channel=self._emulator_channel, client_info=_CLIENT_INFO) + channel=self._emulator_channel, client_info=_CLIENT_INFO + ) return inner @@ -111,15 +109,18 @@ class Client(ClientWithProject): :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` """ + _table_data_client = None _table_admin_client = None _instance_admin_client = None - def __init__(self, project=None, credentials=None, - read_only=False, admin=False, channel=None): + def __init__( + self, project=None, credentials=None, read_only=False, admin=False, channel=None + ): if read_only and admin: - raise ValueError('A read-only client cannot also perform' - 'administrative actions.') + raise ValueError( + "A read-only client cannot also perform" "administrative actions." + ) # NOTE: We set the scopes **before** calling the parent constructor. # It **may** use those scopes in ``with_scopes_if_required``. @@ -134,7 +135,9 @@ def __init__(self, project=None, credentials=None, if channel is not None: warnings.warn( "'channel' is deprecated and no longer used.", - DeprecationWarning, stacklevel=2) + DeprecationWarning, + stacklevel=2, + ) self._channel = channel self.SCOPE = self._get_scopes() @@ -182,8 +185,9 @@ def table_data_client(self): :returns: A BigtableClient object. """ if self._table_data_client is None: - self._table_data_client = _create_gapic_client( - bigtable_v2.BigtableClient)(self) + self._table_data_client = _create_gapic_client(bigtable_v2.BigtableClient)( + self + ) return self._table_data_client @property @@ -198,9 +202,10 @@ def table_admin_client(self): """ if self._table_admin_client is None: if not self._admin: - raise ValueError('Client is not an admin client.') + raise ValueError("Client is not an admin client.") self._table_admin_client = _create_gapic_client( - bigtable_admin_v2.BigtableTableAdminClient)(self) + bigtable_admin_v2.BigtableTableAdminClient + )(self) return self._table_admin_client @property @@ -215,13 +220,13 @@ def instance_admin_client(self): """ if self._instance_admin_client is None: if not self._admin: - raise ValueError('Client is not an admin client.') + raise ValueError("Client is not an admin client.") self._instance_admin_client = _create_gapic_client( - bigtable_admin_v2.BigtableInstanceAdminClient)(self) + bigtable_admin_v2.BigtableInstanceAdminClient + )(self) return self._instance_admin_client - def instance(self, instance_id, display_name=None, - instance_type=None, labels=None): + def instance(self, instance_id, display_name=None, instance_type=None, labels=None): """Factory to create a instance associated with this client. For example: @@ -262,8 +267,13 @@ def instance(self, instance_id, display_name=None, :rtype: :class:`~google.cloud.bigtable.instance.Instance` :returns: an instance owned by this client. """ - return Instance(instance_id, self, display_name=display_name, - instance_type=instance_type, labels=labels) + return Instance( + instance_id, + self, + display_name=display_name, + instance_type=instance_type, + labels=labels, + ) def list_instances(self): """List instances owned by the project. @@ -282,8 +292,7 @@ def list_instances(self): be resolved. """ resp = self.instance_admin_client.list_instances(self.project_path) - instances = [ - Instance.from_pb(instance, self) for instance in resp.instances] + instances = [Instance.from_pb(instance, self) for instance in resp.instances] return instances, resp.failed_locations def list_clusters(self): @@ -302,13 +311,14 @@ def list_clusters(self): 'failed_locations' is a list of strings representing locations which could not be resolved. """ - resp = (self.instance_admin_client.list_clusters( - self.instance_admin_client.instance_path(self.project, '-'))) + resp = self.instance_admin_client.list_clusters( + self.instance_admin_client.instance_path(self.project, "-") + ) clusters = [] instances = {} for cluster in resp.clusters: match_cluster_name = _CLUSTER_NAME_RE.match(cluster.name) - instance_id = match_cluster_name.group('instance') + instance_id = match_cluster_name.group("instance") if instance_id not in instances: instances[instance_id] = self.instance(instance_id) clusters.append(Cluster.from_pb(cluster, instances[instance_id])) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 1b3fe559c3c7..b617e36c02a9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -20,9 +20,11 @@ from google.api_core.exceptions import NotFound -_CLUSTER_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' - r'instances/(?P[^/]+)/clusters/' - r'(?P[a-z][-a-z0-9]*)$') +_CLUSTER_NAME_RE = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[^/]+)/clusters/" + r"(?P[a-z][-a-z0-9]*)$" +) class Cluster(object): @@ -72,13 +74,15 @@ class Cluster(object): :data:`google.cloud.bigtable.enums.Cluster.State.DISABLED`. """ - def __init__(self, - cluster_id, - instance, - location_id=None, - serve_nodes=None, - default_storage_type=None, - _state=None): + def __init__( + self, + cluster_id, + instance, + location_id=None, + serve_nodes=None, + default_storage_type=None, + _state=None, + ): self.cluster_id = cluster_id self._instance = instance self.location_id = location_id @@ -108,15 +112,19 @@ def from_pb(cls, cluster_pb, instance): """ match_cluster_name = _CLUSTER_NAME_RE.match(cluster_pb.name) if match_cluster_name is None: - raise ValueError('Cluster protobuf name was not in the ' - 'expected format.', cluster_pb.name) - if match_cluster_name.group('instance') != instance.instance_id: - raise ValueError('Instance ID on cluster does not match the ' - 'instance ID on the client') - if match_cluster_name.group('project') != instance._client.project: - raise ValueError('Project ID on cluster does not match the ' - 'project ID on the client') - cluster_id = match_cluster_name.group('cluster_id') + raise ValueError( + "Cluster protobuf name was not in the " "expected format.", + cluster_pb.name, + ) + if match_cluster_name.group("instance") != instance.instance_id: + raise ValueError( + "Instance ID on cluster does not match the " "instance ID on the client" + ) + if match_cluster_name.group("project") != instance._client.project: + raise ValueError( + "Project ID on cluster does not match the " "project ID on the client" + ) + cluster_id = match_cluster_name.group("cluster_id") result = cls(cluster_id, instance) result._update_from_pb(cluster_pb) @@ -127,7 +135,7 @@ def _update_from_pb(self, cluster_pb): Helper for :meth:`from_pb` and :meth:`reload`. """ - self.location_id = cluster_pb.location.split('/')[-1] + self.location_id = cluster_pb.location.split("/")[-1] self.serve_nodes = cluster_pb.serve_nodes self.default_storage_type = cluster_pb.default_storage_type self._state = cluster_pb.state @@ -148,8 +156,8 @@ def name(self): :returns: The cluster name. """ return self._instance._client.instance_admin_client.cluster_path( - self._instance._client.project, self._instance.instance_id, - self.cluster_id) + self._instance._client.project, self._instance.instance_id, self.cluster_id + ) @property def state(self): @@ -165,8 +173,7 @@ def __eq__(self, other): # intentional, since the same cluster can be in different states # if not synchronized. Clusters with similar instance/cluster # settings but different clients can't be used in the same way. - return (other.cluster_id == self.cluster_id and - other._instance == self._instance) + return other.cluster_id == self.cluster_id and other._instance == self._instance def __ne__(self, other): return not self == other @@ -180,8 +187,7 @@ def reload(self): :start-after: [START bigtable_reload_cluster] :end-before: [END bigtable_reload_cluster] """ - cluster_pb = self._instance._client.instance_admin_client.get_cluster( - self.name) + cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name) # NOTE: _update_from_pb does not check that the project and # cluster ID on the response match the request. @@ -237,7 +243,8 @@ def create(self): cluster_pb = self._to_pb() return client.instance_admin_client.create_cluster( - self._instance.name, self.cluster_id, cluster_pb) + self._instance.name, self.cluster_id, cluster_pb + ) def update(self): """Update this cluster. @@ -280,7 +287,8 @@ def update(self): # Location is set only at the time of creation of a cluster # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( - self.name, self.serve_nodes, None) + self.name, self.serve_nodes, None + ) def delete(self): """Delete this cluster. @@ -315,9 +323,11 @@ def _to_pb(self): """ Create cluster proto buff message for API calls """ client = self._instance._client location = client.instance_admin_client.location_path( - client.project, self.location_id) + client.project, self.location_id + ) cluster_pb = instance_pb2.Cluster( location=location, serve_nodes=self.serve_nodes, - default_storage_type=self.default_storage_type) + default_storage_type=self.default_storage_type, + ) return cluster_pb diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index c0c5c47d6da3..ec5d4a6eadfb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -16,10 +16,10 @@ from google.cloud import _helpers +from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) + bigtable_table_admin_pb2 as table_admin_v2_pb2, +) class GarbageCollectionRule(object): @@ -119,8 +119,7 @@ def to_pb(self): :rtype: :class:`.table_v2_pb2.GcRule` :returns: The converted current object. """ - union = table_v2_pb2.GcRule.Union( - rules=[rule.to_pb() for rule in self.rules]) + union = table_v2_pb2.GcRule.Union(rules=[rule.to_pb() for rule in self.rules]) return table_v2_pb2.GcRule(union=union) @@ -149,7 +148,8 @@ def to_pb(self): :returns: The converted current object. """ intersection = table_v2_pb2.GcRule.Intersection( - rules=[rule.to_pb() for rule in self.rules]) + rules=[rule.to_pb() for rule in self.rules] + ) return table_v2_pb2.GcRule(intersection=intersection) @@ -195,14 +195,16 @@ def name(self): :rtype: str :returns: The column family name. """ - return self._table.name + '/columnFamilies/' + self.column_family_id + return self._table.name + "/columnFamilies/" + self.column_family_id def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other.column_family_id == self.column_family_id and - other._table == self._table and - other.gc_rule == self.gc_rule) + return ( + other.column_family_id == self.column_family_id + and other._table == self._table + and other.gc_rule == self.gc_rule + ) def __ne__(self, other): return not self == other @@ -221,15 +223,16 @@ def to_pb(self): def create(self): """Create this column family.""" column_family = self.to_pb() - modification = ( - table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( - id=self.column_family_id, create=column_family)) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=self.column_family_id, create=column_family + ) client = self._table._instance._client # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification]) + self._table.name, [modification] + ) def update(self): """Update this column family. @@ -240,27 +243,29 @@ def update(self): you will simply be referring to a different column family. """ column_family = self.to_pb() - modification = ( - table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( - id=self.column_family_id, update=column_family)) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=self.column_family_id, update=column_family + ) client = self._table._instance._client # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification]) + self._table.name, [modification] + ) def delete(self): """Delete this column family.""" - modification = ( - table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( - id=self.column_family_id, drop=True)) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=self.column_family_id, drop=True + ) client = self._table._instance._client # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification]) + self._table.name, [modification] + ) def _gc_rule_from_pb(gc_rule_pb): @@ -276,21 +281,19 @@ def _gc_rule_from_pb(gc_rule_pb): :raises: :class:`ValueError ` if the rule name is unexpected. """ - rule_name = gc_rule_pb.WhichOneof('rule') + rule_name = gc_rule_pb.WhichOneof("rule") if rule_name is None: return None - if rule_name == 'max_num_versions': + if rule_name == "max_num_versions": return MaxVersionsGCRule(gc_rule_pb.max_num_versions) - elif rule_name == 'max_age': + elif rule_name == "max_age": max_age = _helpers._duration_pb_to_timedelta(gc_rule_pb.max_age) return MaxAgeGCRule(max_age) - elif rule_name == 'union': - return GCRuleUnion([_gc_rule_from_pb(rule) - for rule in gc_rule_pb.union.rules]) - elif rule_name == 'intersection': - rules = [_gc_rule_from_pb(rule) - for rule in gc_rule_pb.intersection.rules] + elif rule_name == "union": + return GCRuleUnion([_gc_rule_from_pb(rule) for rule in gc_rule_pb.union.rules]) + elif rule_name == "intersection": + rules = [_gc_rule_from_pb(rule) for rule in gc_rule_pb.intersection.rules] return GCRuleIntersection(rules) else: - raise ValueError('Unexpected rule name', rule_name) + raise ValueError("Unexpected rule name", rule_name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index 3f695b86ce47..f0965779fc8b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -25,6 +25,7 @@ class StorageType(object): SSD (int): Flash (SSD) storage should be used. HDD (int): Magnetic drive (HDD) storage should be used. """ + UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED SSD = enums.StorageType.SSD HDD = enums.StorageType.HDD @@ -43,6 +44,7 @@ class State(object): CREATING (int): The instance is currently being created, and may be destroyed if the creation process encounters an error. """ + NOT_KNOWN = enums.Instance.State.STATE_NOT_KNOWN READY = enums.Instance.State.READY CREATING = enums.Instance.State.CREATING @@ -67,6 +69,7 @@ class Type(object): When creating a development instance, ``serve_nodes`` on the cluster must not be set. """ + UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED PRODUCTION = enums.Instance.Type.PRODUCTION DEVELOPMENT = enums.Instance.Type.DEVELOPMENT @@ -92,6 +95,7 @@ class State(object): DISABLED (int): The cluster has no backing nodes. The data (tables) still exist, but no operations can be performed on the cluster. """ + NOT_KNOWN = enums.Cluster.State.STATE_NOT_KNOWN READY = enums.Cluster.State.READY CREATING = enums.Cluster.State.CREATING @@ -119,6 +123,7 @@ class RoutingPolicyType(object): See https://cloud.google.com/bigtable/docs/reference/admin/rpc/google.bigtable.admin.v2#google.bigtable.admin.v2.AppProfile.SingleClusterRouting """ + ANY = 1 SINGLE = 2 @@ -144,6 +149,7 @@ class View(object): replication state. FULL (int): Populates all fields. """ + VIEW_UNSPECIFIED = enums.Table.View.VIEW_UNSPECIFIED NAME_ONLY = enums.Table.View.NAME_ONLY SCHEMA_VIEW = enums.Table.View.SCHEMA_VIEW @@ -173,11 +179,13 @@ class ReplicationState(object): cluster. Depending on replication delay, reads may not immediately reflect the state of the table in other clusters. """ - STATE_NOT_KNOWN = enums.Table.ClusterState.ReplicationState.\ - STATE_NOT_KNOWN + + STATE_NOT_KNOWN = enums.Table.ClusterState.ReplicationState.STATE_NOT_KNOWN INITIALIZING = enums.Table.ClusterState.ReplicationState.INITIALIZING - PLANNED_MAINTENANCE = enums.Table.ClusterState.ReplicationState.\ - PLANNED_MAINTENANCE - UNPLANNED_MAINTENANCE = enums.Table.ClusterState.ReplicationState.\ - UNPLANNED_MAINTENANCE + PLANNED_MAINTENANCE = ( + enums.Table.ClusterState.ReplicationState.PLANNED_MAINTENANCE + ) + UNPLANNED_MAINTENANCE = ( + enums.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE + ) READY = enums.Table.ClusterState.ReplicationState.READY diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 6dd0eecb8a50..d494d7c1b936 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -30,9 +30,10 @@ from google.cloud.bigtable.policy import Policy -_EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster' -_INSTANCE_NAME_RE = re.compile(r'^projects/(?P[^/]+)/' - r'instances/(?P[a-z][-a-z0-9]*)$') +_EXISTING_INSTANCE_LOCATION_ID = "see-existing-cluster" +_INSTANCE_NAME_RE = re.compile( + r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$" +) class Instance(object): @@ -92,13 +93,15 @@ class Instance(object): :data:`google.cloud.bigtable.enums.Instance.State.CREATING`. """ - def __init__(self, - instance_id, - client, - display_name=None, - instance_type=None, - labels=None, - _state=None): + def __init__( + self, + instance_id, + client, + display_name=None, + instance_type=None, + labels=None, + _state=None, + ): self.instance_id = instance_id self._client = client self.display_name = display_name or instance_id @@ -111,7 +114,7 @@ def _update_from_pb(self, instance_pb): Helper for :meth:`from_pb` and :meth:`reload`. """ if not instance_pb.display_name: # Simple field (string) - raise ValueError('Instance protobuf does not contain display_name') + raise ValueError("Instance protobuf does not contain display_name") self.display_name = instance_pb.display_name self.type_ = instance_pb.type self.labels = dict(instance_pb.labels) @@ -137,12 +140,15 @@ def from_pb(cls, instance_pb, client): """ match = _INSTANCE_NAME_RE.match(instance_pb.name) if match is None: - raise ValueError('Instance protobuf name was not in the ' - 'expected format.', instance_pb.name) - if match.group('project') != client.project: - raise ValueError('Project ID on instance does not match the ' - 'project ID on the client') - instance_id = match.group('instance_id') + raise ValueError( + "Instance protobuf name was not in the " "expected format.", + instance_pb.name, + ) + if match.group("project") != client.project: + raise ValueError( + "Project ID on instance does not match the " "project ID on the client" + ) + instance_id = match.group("instance_id") result = cls(instance_id, client) result._update_from_pb(instance_pb) @@ -164,7 +170,8 @@ def name(self): :returns: Return a fully-qualified instance string. """ return self._client.instance_admin_client.instance_path( - project=self._client.project, instance=self.instance_id) + project=self._client.project, instance=self.instance_id + ) @property def state(self): @@ -180,15 +187,18 @@ def __eq__(self, other): # intentional, since the same instance can be in different states # if not synchronized. Instances with similar instance # settings but different clients can't be used in the same way. - return (other.instance_id == self.instance_id and - other._client == self._client) + return other.instance_id == self.instance_id and other._client == self._client def __ne__(self, other): return not self == other - def create(self, location_id=None, - serve_nodes=None, - default_storage_type=None, clusters=None): + def create( + self, + location_id=None, + serve_nodes=None, + default_storage_type=None, + clusters=None, + ): """Create this instance. For example: @@ -246,27 +256,39 @@ def create(self, location_id=None, """ if clusters is None: - cluster_id = '{}-cluster'.format(self.instance_id) - - clusters = [self.cluster(cluster_id, location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=default_storage_type)] - elif (location_id is not None or - serve_nodes is not None or - default_storage_type is not None): - raise ValueError("clusters and one of location_id, serve_nodes, \ + cluster_id = "{}-cluster".format(self.instance_id) + + clusters = [ + self.cluster( + cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=default_storage_type, + ) + ] + elif ( + location_id is not None + or serve_nodes is not None + or default_storage_type is not None + ): + raise ValueError( + "clusters and one of location_id, serve_nodes, \ default_storage_type can not be set \ - simultaneously.") + simultaneously." + ) instance_pb = instance_pb2.Instance( - display_name=self.display_name, type=self.type_, - labels=self.labels) + display_name=self.display_name, type=self.type_, labels=self.labels + ) parent = self._client.project_path return self._client.instance_admin_client.create_instance( - parent=parent, instance_id=self.instance_id, instance=instance_pb, - clusters={c.cluster_id: c._to_pb() for c in clusters}) + parent=parent, + instance_id=self.instance_id, + instance=instance_pb, + clusters={c.cluster_id: c._to_pb() for c in clusters}, + ) def exists(self): """Check whether the instance already exists. @@ -296,8 +318,7 @@ def reload(self): :start-after: [START bigtable_reload_instance] :end-before: [END bigtable_reload_instance] """ - instance_pb = self._client.instance_admin_client.get_instance( - self.name) + instance_pb = self._client.instance_admin_client.get_instance(self.name) # NOTE: _update_from_pb does not check that the project and # instance ID on the response match the request. @@ -333,18 +354,21 @@ def update(self): """ update_mask_pb = field_mask_pb2.FieldMask() if self.display_name is not None: - update_mask_pb.paths.append('display_name') + update_mask_pb.paths.append("display_name") if self.type_ is not None: - update_mask_pb.paths.append('type') + update_mask_pb.paths.append("type") if self.labels is not None: - update_mask_pb.paths.append('labels') + update_mask_pb.paths.append("labels") instance_pb = instance_pb2.Instance( - name=self.name, display_name=self.display_name, - type=self.type_, labels=self.labels) + name=self.name, + display_name=self.display_name, + type=self.type_, + labels=self.labels, + ) return self._client.instance_admin_client.partial_update_instance( - instance=instance_pb, - update_mask=update_mask_pb) + instance=instance_pb, update_mask=update_mask_pb + ) def delete(self): """Delete this instance. @@ -414,7 +438,8 @@ class `google.cloud.bigtable.policy.Policy` """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.set_iam_policy( - resource=self.name, policy=policy.to_api_repr()) + resource=self.name, policy=policy.to_api_repr() + ) return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) def test_iam_permissions(self, permissions): @@ -441,7 +466,8 @@ def test_iam_permissions(self, permissions): """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.test_iam_permissions( - resource=self.name, permissions=permissions) + resource=self.name, permissions=permissions + ) return list(resp.permissions) def _to_dict_from_policy_pb(self, policy): @@ -450,15 +476,18 @@ def _to_dict_from_policy_pb(self, policy): :meth: google.cloud.iam.Policy.from_api_repr """ pb_dict = {} - bindings = [{'role': binding.role, 'members': binding.members} - for binding in policy.bindings] - pb_dict['etag'] = policy.etag - pb_dict['version'] = policy.version - pb_dict['bindings'] = bindings + bindings = [ + {"role": binding.role, "members": binding.members} + for binding in policy.bindings + ] + pb_dict["etag"] = policy.etag + pb_dict["version"] = policy.version + pb_dict["bindings"] = bindings return pb_dict - def cluster(self, cluster_id, location_id=None, - serve_nodes=None, default_storage_type=None): + def cluster( + self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None + ): """Factory to create a cluster associated with this instance. For example: @@ -496,9 +525,13 @@ def cluster(self, cluster_id, location_id=None, :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance. """ - return Cluster(cluster_id, self, location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=default_storage_type) + return Cluster( + cluster_id, + self, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=default_storage_type, + ) def list_clusters(self): """List the clusters in this instance. @@ -517,8 +550,7 @@ def list_clusters(self): be resolved. """ resp = self._client.instance_admin_client.list_clusters(self.name) - clusters = [ - Cluster.from_pb(cluster, self) for cluster in resp.clusters] + clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] return clusters, resp.failed_locations def table(self, table_id, app_profile_id=None): @@ -559,20 +591,24 @@ def list_tables(self): result = [] for table_pb in table_list_pb: - table_prefix = self.name + '/tables/' + table_prefix = self.name + "/tables/" if not table_pb.name.startswith(table_prefix): raise ValueError( - 'Table name {} not of expected format'.format( - table_pb.name)) - table_id = table_pb.name[len(table_prefix):] + "Table name {} not of expected format".format(table_pb.name) + ) + table_id = table_pb.name[len(table_prefix) :] result.append(self.table(table_id)) return result - def app_profile(self, app_profile_id, - routing_policy_type=None, - description=None, cluster_id=None, - allow_transactional_writes=None): + def app_profile( + self, + app_profile_id, + routing_policy_type=None, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ): """Factory to create AppProfile associated with this instance. For example: @@ -610,9 +646,13 @@ def app_profile(self, app_profile_id, :returns: AppProfile for this instance. """ return AppProfile( - app_profile_id, self, routing_policy_type=routing_policy_type, - description=description, cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes) + app_profile_id, + self, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes, + ) def list_app_profiles(self): """Lists information about AppProfiles in an instance. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index 99523404258c..87c9a7650c2e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -16,19 +16,19 @@ from google.cloud._helpers import _to_bytes """IAM roles supported by Bigtable Instance resource""" -BIGTABLE_ADMIN_ROLE = 'roles/bigtable.admin' +BIGTABLE_ADMIN_ROLE = "roles/bigtable.admin" """Administers all instances within a project, including the data stored within tables. Can create new instances. Intended for project administrators. """ -BIGTABLE_USER_ROLE = 'roles/bigtable.user' +BIGTABLE_USER_ROLE = "roles/bigtable.user" """Provides read-write access to the data stored within tables. Intended for application developers or service accounts. """ -BIGTABLE_READER_ROLE = 'roles/bigtable.reader' +BIGTABLE_READER_ROLE = "roles/bigtable.reader" """Provides read-only access to the data stored within tables. Intended for data scientists, dashboard generators, and other data-analysis scenarios. """ -BIGTABLE_VIEWER_ROLE = 'roles/bigtable.viewer' +BIGTABLE_VIEWER_ROLE = "roles/bigtable.viewer" """Provides no data access. Intended as a minimal set of permissions to access the GCP Console for Cloud Bigtable. """ @@ -70,10 +70,11 @@ class Policy(BasePolicy): If no etag is provided in the call to setIamPolicy, then the existing policy is overwritten blindly. """ + def __init__(self, etag=None, version=None): - BasePolicy.__init__(self, - etag=etag if etag is None else _to_bytes(etag), - version=version) + BasePolicy.__init__( + self, etag=etag if etag is None else _to_bytes(etag), version=version + ) @property def bigtable_admins(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 13fcbca885b6..358344cefcb6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -22,11 +22,10 @@ from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) +from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 -_PACK_I64 = struct.Struct('>q').pack +_PACK_I64 = struct.Struct(">q").pack MAX_MUTATIONS = 100000 """The maximum number of mutations that a row can accumulate.""" @@ -108,8 +107,7 @@ def _get_mutations(self, state=None): """ raise NotImplementedError - def _set_cell(self, column_family_id, column, value, timestamp=None, - state=None): + def _set_cell(self, column_family_id, column, value, timestamp=None, state=None): """Helper for :meth:`set_cell` Adds a mutation to set the value in a specific cell. @@ -148,7 +146,7 @@ def _set_cell(self, column_family_id, column, value, timestamp=None, else: timestamp_micros = _microseconds_from_datetime(timestamp) # Truncate to millisecond granularity. - timestamp_micros -= (timestamp_micros % 1000) + timestamp_micros -= timestamp_micros % 1000 mutation_val = data_v2_pb2.Mutation.SetCell( family_name=column_family_id, @@ -176,8 +174,7 @@ def _delete(self, state=None): mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) self._get_mutations(state).append(mutation_pb) - def _delete_cells(self, column_family_id, columns, time_range=None, - state=None): + def _delete_cells(self, column_family_id, columns, time_range=None, state=None): """Helper for :meth:`delete_cell` and :meth:`delete_cells`. ``state`` is unused by :class:`DirectRow` but is used by @@ -205,14 +202,14 @@ def _delete_cells(self, column_family_id, columns, time_range=None, mutations_list = self._get_mutations(state) if columns is self.ALL_COLUMNS: mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( - family_name=column_family_id, + family_name=column_family_id ) mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) mutations_list.append(mutation_pb) else: delete_kwargs = {} if time_range is not None: - delete_kwargs['time_range'] = time_range.to_pb() + delete_kwargs["time_range"] = time_range.to_pb() to_append = [] for column in columns: @@ -220,13 +217,10 @@ def _delete_cells(self, column_family_id, columns, time_range=None, # time_range will never change if present, but the rest of # delete_kwargs will delete_kwargs.update( - family_name=column_family_id, - column_qualifier=column, + family_name=column_family_id, column_qualifier=column ) - mutation_val = data_v2_pb2.Mutation.DeleteFromColumn( - **delete_kwargs) - mutation_pb = data_v2_pb2.Mutation( - delete_from_column=mutation_val) + mutation_val = data_v2_pb2.Mutation.DeleteFromColumn(**delete_kwargs) + mutation_pb = data_v2_pb2.Mutation(delete_from_column=mutation_val) to_append.append(mutation_pb) # We don't add the mutations until all columns have been @@ -326,8 +320,7 @@ def set_cell(self, column_family_id, column, value, timestamp=None): :type timestamp: :class:`datetime.datetime` :param timestamp: (Optional) The timestamp of the operation. """ - self._set_cell(column_family_id, column, value, timestamp=timestamp, - state=None) + self._set_cell(column_family_id, column, value, timestamp=timestamp, state=None) def delete(self): """Deletes this row from the table. @@ -364,8 +357,9 @@ def delete_cell(self, column_family_id, column, time_range=None): :param time_range: (Optional) The range of time within which cells should be deleted. """ - self._delete_cells(column_family_id, [column], time_range=time_range, - state=None) + self._delete_cells( + column_family_id, [column], time_range=time_range, state=None + ) def delete_cells(self, column_family_id, columns, time_range=None): """Deletes cells in this row. @@ -392,8 +386,7 @@ def delete_cells(self, column_family_id, columns, time_range=None): :param time_range: (Optional) The range of time within which cells should be deleted. """ - self._delete_cells(column_family_id, columns, time_range=time_range, - state=None) + self._delete_cells(column_family_id, columns, time_range=time_range, state=None) def commit(self): """Makes a ``MutateRow`` API request. @@ -454,6 +447,7 @@ class ConditionalRow(_SetDeleteRow): :type filter_: :class:`.RowFilter` :param filter_: Filter to be used for conditional mutations. """ + def __init__(self, row_key, table, filter_): super(ConditionalRow, self).__init__(row_key, table) self._filter = filter_ @@ -511,12 +505,12 @@ def commit(self): num_false_mutations = len(false_mutations) if num_true_mutations == 0 and num_false_mutations == 0: return - if (num_true_mutations > MAX_MUTATIONS or - num_false_mutations > MAX_MUTATIONS): + if num_true_mutations > MAX_MUTATIONS or num_false_mutations > MAX_MUTATIONS: raise ValueError( - 'Exceed the maximum allowable mutations (%d). Had %s true ' - 'mutations and %d false mutations.' % ( - MAX_MUTATIONS, num_true_mutations, num_false_mutations)) + "Exceed the maximum allowable mutations (%d). Had %s true " + "mutations and %d false mutations." + % (MAX_MUTATIONS, num_true_mutations, num_false_mutations) + ) data_client = self._table._instance._client.table_data_client resp = data_client.check_and_mutate_row( @@ -524,13 +518,13 @@ def commit(self): row_key=self._row_key, predicate_filter=self._filter.to_pb(), true_mutations=true_mutations, - false_mutations=false_mutations) + false_mutations=false_mutations, + ) self.clear() return resp.predicate_matched # pylint: disable=arguments-differ - def set_cell(self, column_family_id, column, value, timestamp=None, - state=True): + def set_cell(self, column_family_id, column, value, timestamp=None, state=True): """Sets a value in this row. The cell is determined by the ``row_key`` of this @@ -566,8 +560,9 @@ def set_cell(self, column_family_id, column, value, timestamp=None, :param state: (Optional) The state that the mutation should be applied in. Defaults to :data:`True`. """ - self._set_cell(column_family_id, column, value, timestamp=timestamp, - state=state) + self._set_cell( + column_family_id, column, value, timestamp=timestamp, state=state + ) def delete(self, state=True): """Deletes this row from the table. @@ -585,8 +580,7 @@ def delete(self, state=True): """ self._delete(state=state) - def delete_cell(self, column_family_id, column, time_range=None, - state=True): + def delete_cell(self, column_family_id, column, time_range=None, state=True): """Deletes cell in this row. .. note:: @@ -613,11 +607,11 @@ def delete_cell(self, column_family_id, column, time_range=None, :param state: (Optional) The state that the mutation should be applied in. Defaults to :data:`True`. """ - self._delete_cells(column_family_id, [column], time_range=time_range, - state=state) + self._delete_cells( + column_family_id, [column], time_range=time_range, state=state + ) - def delete_cells(self, column_family_id, columns, time_range=None, - state=True): + def delete_cells(self, column_family_id, columns, time_range=None, state=True): """Deletes cells in this row. .. note:: @@ -646,8 +640,10 @@ def delete_cells(self, column_family_id, columns, time_range=None, :param state: (Optional) The state that the mutation should be applied in. Defaults to :data:`True`. """ - self._delete_cells(column_family_id, columns, time_range=time_range, - state=state) + self._delete_cells( + column_family_id, columns, time_range=time_range, state=state + ) + # pylint: enable=arguments-differ def clear(self): @@ -712,9 +708,8 @@ def append_cell_value(self, column_family_id, column, value): column = _to_bytes(column) value = _to_bytes(value) rule_pb = data_v2_pb2.ReadModifyWriteRule( - family_name=column_family_id, - column_qualifier=column, - append_value=value) + family_name=column_family_id, column_qualifier=column, append_value=value + ) self._rule_pb_list.append(rule_pb) def increment_cell_value(self, column_family_id, column, int_value): @@ -751,7 +746,8 @@ def increment_cell_value(self, column_family_id, column, int_value): rule_pb = data_v2_pb2.ReadModifyWriteRule( family_name=column_family_id, column_qualifier=column, - increment_amount=int_value) + increment_amount=int_value, + ) self._rule_pb_list.append(rule_pb) def commit(self): @@ -802,13 +798,15 @@ def commit(self): if num_mutations == 0: return {} if num_mutations > MAX_MUTATIONS: - raise ValueError('%d total append mutations exceed the maximum ' - 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) + raise ValueError( + "%d total append mutations exceed the maximum " + "allowable %d." % (num_mutations, MAX_MUTATIONS) + ) data_client = self._table._instance._client.table_data_client row_response = data_client.read_modify_write_row( - table_name=self._table.name, row_key=self._row_key, - rules=self._rule_pb_list) + table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list + ) # Reset modifications after commit-ing request. self.clear() @@ -886,10 +884,7 @@ def _parse_family_pb(family_pb): for column in family_pb.columns: result[column.qualifier] = cells = [] for cell in column.cells: - val_pair = ( - cell.value, - _datetime_from_microseconds(cell.timestamp_micros), - ) + val_pair = (cell.value, _datetime_from_microseconds(cell.timestamp_micros)) cells.append(val_pair) return family_pb.name, result diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 8eb8f474660f..f9651efd12b7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -24,19 +24,17 @@ from google.api_core import retry from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as data_messages_v2_pb2) -from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) +from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 +from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 -_MISSING_COLUMN_FAMILY = ( - 'Column family {} is not among the cells stored in this row.') +_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." _MISSING_COLUMN = ( - 'Column {} is not among the cells stored in this row in the ' - 'column family {}.') + "Column {} is not among the cells stored in this row in the " "column family {}." +) _MISSING_INDEX = ( - 'Index {!r} is not valid for the cells stored in this row for column {} ' - 'in the column family {}. There are {} such cells.') + "Index {!r} is not valid for the cells stored in this row for column {} " + "in the column family {}. There are {} such cells." +) class Cell(object): @@ -68,8 +66,7 @@ def from_pb(cls, cell_pb): :returns: The cell corresponding to the protobuf. """ if cell_pb.labels: - return cls(cell_pb.value, cell_pb.timestamp_micros, - labels=cell_pb.labels) + return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels) else: return cls(cell_pb.value, cell_pb.timestamp_micros) @@ -80,9 +77,11 @@ def timestamp(self): def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other.value == self.value and - other.timestamp_micros == self.timestamp_micros and - other.labels == self.labels) + return ( + other.value == self.value + and other.timestamp_micros == self.timestamp_micros + and other.labels == self.labels + ) def __ne__(self, other): return not self == other @@ -113,8 +112,10 @@ class PartialCellData(object): :type value: bytes :param value: The (accumulated) value of the (partial) cell. """ - def __init__(self, row_key, family_name, qualifier, timestamp_micros, - labels=(), value=b''): + + def __init__( + self, row_key, family_name, qualifier, timestamp_micros, labels=(), value=b"" + ): self.row_key = row_key self.family_name = family_name self.qualifier = qualifier @@ -148,8 +149,7 @@ def __init__(self, row_key): def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other._row_key == self._row_key and - other._cells == self._cells) + return other._row_key == self._row_key and other._cells == self._cells def __ne__(self, other): return not self == other @@ -166,8 +166,7 @@ def to_dict(self): result = {} for column_family_id, columns in six.iteritems(self._cells): for column_qual, cells in six.iteritems(columns): - key = (_to_bytes(column_family_id) + b':' + - _to_bytes(column_qual)) + key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual) result[key] = cells return result @@ -253,8 +252,7 @@ def cell_value(self, column_family_id, column, index=0): cell = cells[index] except (TypeError, IndexError): num_cells = len(cells) - msg = _MISSING_INDEX.format( - index, column, column_family_id, num_cells) + msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells) raise IndexError(msg) return cell.value @@ -301,8 +299,7 @@ class InvalidChunk(RuntimeError): def _retry_read_rows_exception(exc): if isinstance(exc, grpc.RpcError): exc = exceptions.from_grpc_error(exc) - return isinstance(exc, (exceptions.ServiceUnavailable, - exceptions.DeadlineExceeded)) + return isinstance(exc, (exceptions.ServiceUnavailable, exceptions.DeadlineExceeded)) DEFAULT_RETRY_READ_ROWS = retry.Retry( @@ -343,20 +340,21 @@ class PartialRowsData(object): :meth:`~google.api_core.retry.Retry.with_deadline` method. """ - NEW_ROW = 'New row' # No cells yet complete for row - ROW_IN_PROGRESS = 'Row in progress' # Some cells complete for row - CELL_IN_PROGRESS = 'Cell in progress' # Incomplete cell for row + NEW_ROW = "New row" # No cells yet complete for row + ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row + CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row STATE_NEW_ROW = 1 STATE_ROW_IN_PROGRESS = 2 STATE_CELL_IN_PROGRESS = 3 - read_states = {STATE_NEW_ROW: NEW_ROW, - STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS, - STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS} + read_states = { + STATE_NEW_ROW: NEW_ROW, + STATE_ROW_IN_PROGRESS: ROW_IN_PROGRESS, + STATE_CELL_IN_PROGRESS: CELL_IN_PROGRESS, + } - def __init__(self, read_method, request, - retry=DEFAULT_RETRY_READ_ROWS): + def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): # Counter for rows returned to the user self._counter = 0 # In-progress row, unset until first response, after commit/reset @@ -409,9 +407,9 @@ class as a generator instead. def _create_retry_request(self): """Helper for :meth:`__iter__`.""" - req_manager = _ReadRowsRequestManager(self.request, - self.last_scanned_row_key, - self._counter) + req_manager = _ReadRowsRequestManager( + self.request, self.last_scanned_row_key, self._counter + ) return req_manager.build_updated_request() def _on_error(self, exc): @@ -443,8 +441,7 @@ def __iter__(self): response = self._read_next_response() except StopIteration: if self.state != self.NEW_ROW: - raise ValueError( - 'The row remains partial / is not committed.') + raise ValueError("The row remains partial / is not committed.") break for chunk in response.chunks: @@ -469,8 +466,10 @@ def _process_chunk(self, chunk): self._update_cell(chunk) if self._row is None: - if (self._previous_row is not None and - self._cell.row_key <= self._previous_row.row_key): + if ( + self._previous_row is not None + and self._cell.row_key <= self._previous_row.row_key + ): raise InvalidChunk() self._row = PartialRowData(self._cell.row_key) @@ -492,10 +491,10 @@ def _process_chunk(self, chunk): def _update_cell(self, chunk): if self._cell is None: qualifier = None - if chunk.HasField('qualifier'): + if chunk.HasField("qualifier"): qualifier = chunk.qualifier.value family = None - if chunk.HasField('family_name'): + if chunk.HasField("family_name"): family = chunk.family_name.value self._cell = PartialCellData( @@ -504,7 +503,8 @@ def _update_cell(self, chunk): qualifier, chunk.timestamp_micros, chunk.labels, - chunk.value) + chunk.value, + ) self._copy_from_previous(self._cell) self._validate_cell_data_new_cell() else: @@ -512,9 +512,7 @@ def _update_cell(self, chunk): def _validate_cell_data_new_cell(self): cell = self._cell - if (not cell.row_key or - not cell.family_name or - cell.qualifier is None): + if not cell.row_key or not cell.family_name or cell.qualifier is None: raise InvalidChunk() prev = self._previous_cell @@ -527,8 +525,8 @@ def _validate_chunk_reset_row(self, chunk): # No reset with other keys _raise_if(chunk.row_key) - _raise_if(chunk.HasField('family_name')) - _raise_if(chunk.HasField('qualifier')) + _raise_if(chunk.HasField("family_name")) + _raise_if(chunk.HasField("qualifier")) _raise_if(chunk.timestamp_micros) _raise_if(chunk.labels) _raise_if(chunk.value_size) @@ -582,31 +580,37 @@ def __init__(self, message, last_scanned_key, rows_read_so_far): def build_updated_request(self): """ Updates the given message request as per last scanned key """ - r_kwargs = {'table_name': self.message.table_name, - 'filter': self.message.filter} + r_kwargs = { + "table_name": self.message.table_name, + "filter": self.message.filter, + } if self.message.rows_limit != 0: - r_kwargs['rows_limit'] = max(1, self.message.rows_limit - - self.rows_read_so_far) + r_kwargs["rows_limit"] = max( + 1, self.message.rows_limit - self.rows_read_so_far + ) # if neither RowSet.row_keys nor RowSet.row_ranges currently exist, # add row_range that starts with last_scanned_key as start_key_open # to request only rows that have not been returned yet - if not self.message.HasField('rows'): - row_range = data_v2_pb2.RowRange( - start_key_open=self.last_scanned_key) - r_kwargs['rows'] = data_v2_pb2.RowSet(row_ranges=[row_range]) + if not self.message.HasField("rows"): + row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key) + r_kwargs["rows"] = data_v2_pb2.RowSet(row_ranges=[row_range]) else: row_keys = self._filter_rows_keys() row_ranges = self._filter_row_ranges() - r_kwargs['rows'] = data_v2_pb2.RowSet(row_keys=row_keys, - row_ranges=row_ranges) + r_kwargs["rows"] = data_v2_pb2.RowSet( + row_keys=row_keys, row_ranges=row_ranges + ) return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs) def _filter_rows_keys(self): """ Helper for :meth:`build_updated_request`""" - return [row_key for row_key in self.message.rows.row_keys - if row_key > self.last_scanned_key] + return [ + row_key + for row_key in self.message.rows.row_keys + if row_key > self.last_scanned_key + ] def _filter_row_ranges(self): """ Helper for :meth:`build_updated_request`""" @@ -618,7 +622,7 @@ def _filter_row_ranges(self): # NOTE: Empty string in end_key means "end of table" end_key = self._end_key_set(row_range) # if end_key is already read, skip to the next row_range - if(end_key and self._key_already_read(end_key)): + if end_key and self._key_already_read(end_key): continue # if current start_key (open or closed) is set, return its value, @@ -630,7 +634,7 @@ def _filter_row_ranges(self): # create a row_range with last_scanned_key as start_key_open # to be passed to retry request retry_row_range = row_range - if(self._key_already_read(start_key)): + if self._key_already_read(start_key): retry_row_range = copy.deepcopy(row_range) retry_row_range.start_key_closed = _to_bytes("") retry_row_range.start_key_open = self.last_scanned_key diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index b318dc3ab3a5..e8a70a9f4add 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -17,8 +17,7 @@ from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) +from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 class RowFilter(object): @@ -264,8 +263,7 @@ def __init__(self, start=None, end=None): def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other.start == self.start and - other.end == self.end) + return other.start == self.start and other.end == self.end def __ne__(self, other): return not self == other @@ -278,13 +276,14 @@ def to_pb(self): """ timestamp_range_kwargs = {} if self.start is not None: - timestamp_range_kwargs['start_timestamp_micros'] = ( - _microseconds_from_datetime(self.start) // 1000 * 1000) + timestamp_range_kwargs["start_timestamp_micros"] = ( + _microseconds_from_datetime(self.start) // 1000 * 1000 + ) if self.end is not None: end_time = _microseconds_from_datetime(self.end) if end_time % 1000 != 0: end_time = end_time // 1000 * 1000 + 1000 - timestamp_range_kwargs['end_timestamp_micros'] = end_time + timestamp_range_kwargs["end_timestamp_micros"] = end_time return data_v2_pb2.TimestampRange(**timestamp_range_kwargs) @@ -315,8 +314,7 @@ def to_pb(self): :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v2_pb2.RowFilter( - timestamp_range_filter=self.range_.to_pb()) + return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_.to_pb()) class ColumnRangeFilter(RowFilter): @@ -356,34 +354,44 @@ class ColumnRangeFilter(RowFilter): is set but no ``end_column`` is given """ - def __init__(self, column_family_id, start_column=None, end_column=None, - inclusive_start=None, inclusive_end=None): + def __init__( + self, + column_family_id, + start_column=None, + end_column=None, + inclusive_start=None, + inclusive_end=None, + ): self.column_family_id = column_family_id if inclusive_start is None: inclusive_start = True elif start_column is None: - raise ValueError('Inclusive start was specified but no ' - 'start column was given.') + raise ValueError( + "Inclusive start was specified but no " "start column was given." + ) self.start_column = start_column self.inclusive_start = inclusive_start if inclusive_end is None: inclusive_end = True elif end_column is None: - raise ValueError('Inclusive end was specified but no ' - 'end column was given.') + raise ValueError( + "Inclusive end was specified but no " "end column was given." + ) self.end_column = end_column self.inclusive_end = inclusive_end def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other.column_family_id == self.column_family_id and - other.start_column == self.start_column and - other.end_column == self.end_column and - other.inclusive_start == self.inclusive_start and - other.inclusive_end == self.inclusive_end) + return ( + other.column_family_id == self.column_family_id + and other.start_column == self.start_column + and other.end_column == self.end_column + and other.inclusive_start == self.inclusive_start + and other.inclusive_end == self.inclusive_end + ) def __ne__(self, other): return not self == other @@ -397,18 +405,18 @@ def to_pb(self): :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - column_range_kwargs = {'family_name': self.column_family_id} + column_range_kwargs = {"family_name": self.column_family_id} if self.start_column is not None: if self.inclusive_start: - key = 'start_qualifier_closed' + key = "start_qualifier_closed" else: - key = 'start_qualifier_open' + key = "start_qualifier_open" column_range_kwargs[key] = _to_bytes(self.start_column) if self.end_column is not None: if self.inclusive_end: - key = 'end_qualifier_closed' + key = "end_qualifier_closed" else: - key = 'end_qualifier_open' + key = "end_qualifier_open" column_range_kwargs[key] = _to_bytes(self.end_column) column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) @@ -479,31 +487,36 @@ class ValueRangeFilter(RowFilter): is set but no ``end_value`` is given """ - def __init__(self, start_value=None, end_value=None, - inclusive_start=None, inclusive_end=None): + def __init__( + self, start_value=None, end_value=None, inclusive_start=None, inclusive_end=None + ): if inclusive_start is None: inclusive_start = True elif start_value is None: - raise ValueError('Inclusive start was specified but no ' - 'start value was given.') + raise ValueError( + "Inclusive start was specified but no " "start value was given." + ) self.start_value = start_value self.inclusive_start = inclusive_start if inclusive_end is None: inclusive_end = True elif end_value is None: - raise ValueError('Inclusive end was specified but no ' - 'end value was given.') + raise ValueError( + "Inclusive end was specified but no " "end value was given." + ) self.end_value = end_value self.inclusive_end = inclusive_end def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other.start_value == self.start_value and - other.end_value == self.end_value and - other.inclusive_start == self.inclusive_start and - other.inclusive_end == self.inclusive_end) + return ( + other.start_value == self.start_value + and other.end_value == self.end_value + and other.inclusive_start == self.inclusive_start + and other.inclusive_end == self.inclusive_end + ) def __ne__(self, other): return not self == other @@ -520,15 +533,15 @@ def to_pb(self): value_range_kwargs = {} if self.start_value is not None: if self.inclusive_start: - key = 'start_value_closed' + key = "start_value_closed" else: - key = 'start_value_open' + key = "start_value_open" value_range_kwargs[key] = _to_bytes(self.start_value) if self.end_value is not None: if self.inclusive_end: - key = 'end_value_closed' + key = "end_value_closed" else: - key = 'end_value_open' + key = "end_value_open" value_range_kwargs[key] = _to_bytes(self.end_value) value_range = data_v2_pb2.ValueRange(**value_range_kwargs) @@ -570,8 +583,7 @@ def to_pb(self): :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v2_pb2.RowFilter( - cells_per_row_offset_filter=self.num_cells) + return data_v2_pb2.RowFilter(cells_per_row_offset_filter=self.num_cells) class CellsRowLimitFilter(_CellCountFilter): @@ -605,8 +617,7 @@ def to_pb(self): :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - return data_v2_pb2.RowFilter( - cells_per_column_limit_filter=self.num_cells) + return data_v2_pb2.RowFilter(cells_per_column_limit_filter=self.num_cells) class StripValueTransformerFilter(_BoolFilter): @@ -709,7 +720,8 @@ def to_pb(self): :returns: The converted current object. """ chain = data_v2_pb2.RowFilter.Chain( - filters=[row_filter.to_pb() for row_filter in self.filters]) + filters=[row_filter.to_pb() for row_filter in self.filters] + ) return data_v2_pb2.RowFilter(chain=chain) @@ -733,7 +745,8 @@ def to_pb(self): :returns: The converted current object. """ interleave = data_v2_pb2.RowFilter.Interleave( - filters=[row_filter.to_pb() for row_filter in self.filters]) + filters=[row_filter.to_pb() for row_filter in self.filters] + ) return data_v2_pb2.RowFilter(interleave=interleave) @@ -775,9 +788,11 @@ def __init__(self, base_filter, true_filter=None, false_filter=None): def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other.base_filter == self.base_filter and - other.true_filter == self.true_filter and - other.false_filter == self.false_filter) + return ( + other.base_filter == self.base_filter + and other.true_filter == self.true_filter + and other.false_filter == self.false_filter + ) def __ne__(self, other): return not self == other @@ -788,10 +803,10 @@ def to_pb(self): :rtype: :class:`.data_v2_pb2.RowFilter` :returns: The converted current object. """ - condition_kwargs = {'predicate_filter': self.base_filter.to_pb()} + condition_kwargs = {"predicate_filter": self.base_filter.to_pb()} if self.true_filter is not None: - condition_kwargs['true_filter'] = self.true_filter.to_pb() + condition_kwargs["true_filter"] = self.true_filter.to_pb() if self.false_filter is not None: - condition_kwargs['false_filter'] = self.false_filter.to_pb() + condition_kwargs["false_filter"] = self.false_filter.to_pb() condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) return data_v2_pb2.RowFilter(condition=condition) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index ab2f15231903..454194a77a1c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -66,8 +66,9 @@ def add_row_range(self, row_range): """ self.row_ranges.append(row_range) - def add_row_range_from_keys(self, start_key=None, end_key=None, - start_inclusive=True, end_inclusive=False): + def add_row_range_from_keys( + self, start_key=None, end_key=None, start_inclusive=True, end_inclusive=False + ): """Add row range to row_ranges list from the row keys :type start_key: bytes @@ -87,8 +88,7 @@ def add_row_range_from_keys(self, start_key=None, end_key=None, :param end_inclusive: (Optional) Whether the ``end_key`` should be considered inclusive. The default is False (exclusive). """ - row_range = RowRange(start_key, end_key, - start_inclusive, end_inclusive) + row_range = RowRange(start_key, end_key, start_inclusive, end_inclusive) self.row_ranges.append(row_range) def _update_message_request(self, message): @@ -126,8 +126,9 @@ class RowRange(object): considered inclusive. The default is False (exclusive). """ - def __init__(self, start_key=None, end_key=None, - start_inclusive=True, end_inclusive=False): + def __init__( + self, start_key=None, end_key=None, start_inclusive=True, end_inclusive=False + ): self.start_key = start_key self.start_inclusive = start_inclusive self.end_key = end_key @@ -141,12 +142,7 @@ def _key(self): Returns: Tuple[str]: The contents of this :class:`.RowRange`. """ - return ( - self.start_key, - self.start_inclusive, - self.end_key, - self.end_inclusive, - ) + return (self.start_key, self.start_inclusive, self.end_key, self.end_inclusive) def __hash__(self): return hash(self._key()) @@ -165,14 +161,14 @@ def get_range_kwargs(self): """ range_kwargs = {} if self.start_key is not None: - start_key_key = 'start_key_open' + start_key_key = "start_key_open" if self.start_inclusive: - start_key_key = 'start_key_closed' + start_key_key = "start_key_closed" range_kwargs[start_key_key] = _to_bytes(self.start_key) if self.end_key is not None: - end_key_key = 'end_key_open' + end_key_key = "end_key_open" if self.end_inclusive: - end_key_key = 'end_key_closed' + end_key_key = "end_key_closed" range_kwargs[end_key_key] = _to_bytes(self.end_key) return range_kwargs diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index e7dfbf010191..ccbb5cf47e91 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -26,7 +26,7 @@ from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import (FLUSH_COUNT, MAX_ROW_BYTES) +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow @@ -35,12 +35,11 @@ from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums -from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as data_messages_v2_pb2) +from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 +from google.cloud.bigtable_admin_v2.proto import table_pb2 as admin_messages_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as admin_messages_v2_pb2) -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) + bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, +) # Maximum number of mutations in bulk (MutateRowsRequest message): @@ -126,7 +125,8 @@ def name(self): instance_id = self._instance.instance_id table_client = self._instance._client.table_data_client return table_client.table_path( - project=project, instance=instance_id, table=self.table_id) + project=project, instance=instance_id, table=self.table_id + ) def column_family(self, column_family_id, gc_rule=None): """Factory to create a column family associated with this table. @@ -169,7 +169,7 @@ def row(self, row_key, filter_=None, append=False): ``filter_`` and ``append`` are used. """ if append and filter_ is not None: - raise ValueError('At most one of filter_ and append can be set') + raise ValueError("At most one of filter_ and append can be set") if append: return AppendRow(row_key, self) elif filter_ is not None: @@ -180,8 +180,7 @@ def row(self, row_key, filter_=None, append=False): def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented - return (other.table_id == self.table_id and - other._instance == self._instance) + return other.table_id == self.table_id and other._instance == self._instance def __ne__(self, other): return not self == other @@ -208,15 +207,21 @@ def create(self, initial_split_keys=[], column_families={}): table_client = self._instance._client.table_admin_client instance_name = self._instance.name - families = {id: ColumnFamily(id, self, rule).to_pb() - for (id, rule) in column_families.items()} + families = { + id: ColumnFamily(id, self, rule).to_pb() + for (id, rule) in column_families.items() + } table = admin_messages_v2_pb2.Table(column_families=families) split = table_admin_messages_v2_pb2.CreateTableRequest.Split splits = [split(key=_to_bytes(key)) for key in initial_split_keys] - table_client.create_table(parent=instance_name, table_id=self.table_id, - table=table, initial_splits=splits) + table_client.create_table( + parent=instance_name, + table_id=self.table_id, + table=table, + initial_splits=splits, + ) def exists(self): """Check whether the table exists. @@ -253,8 +258,7 @@ def list_column_families(self): result = {} for column_family_id, value_pb in table_pb.column_families.items(): gc_rule = _gc_rule_from_pb(value_pb.gc_rule) - column_family = self.column_family(column_family_id, - gc_rule=gc_rule) + column_family = self.column_family(column_family_id, gc_rule=gc_rule) result[column_family_id] = column_family return result @@ -271,8 +275,10 @@ def get_cluster_states(self): table_client = self._instance._client.table_admin_client table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW) - return {cluster_id: ClusterState(value_pb.replication_state) - for cluster_id, value_pb in table_pb.cluster_states.items()} + return { + cluster_id: ClusterState(value_pb.replication_state) + for cluster_id, value_pb in table_pb.cluster_states.items() + } def read_row(self, row_key, filter_=None): """Read a single row from this table. @@ -295,12 +301,19 @@ def read_row(self, row_key, filter_=None): result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set)) row = next(result_iter, None) if next(result_iter, None) is not None: - raise ValueError('More than one row was returned.') + raise ValueError("More than one row was returned.") return row - def read_rows(self, start_key=None, end_key=None, limit=None, - filter_=None, end_inclusive=False, row_set=None, - retry=DEFAULT_RETRY_READ_ROWS): + def read_rows( + self, + start_key=None, + end_key=None, + limit=None, + filter_=None, + end_inclusive=False, + row_set=None, + retry=DEFAULT_RETRY_READ_ROWS, + ): """Read rows from this table. :type start_key: bytes @@ -344,13 +357,17 @@ def read_rows(self, start_key=None, end_key=None, limit=None, the streamed results. """ request_pb = _create_row_request( - self.name, start_key=start_key, end_key=end_key, - filter_=filter_, limit=limit, end_inclusive=end_inclusive, - app_profile_id=self._app_profile_id, row_set=row_set) + self.name, + start_key=start_key, + end_key=end_key, + filter_=filter_, + limit=limit, + end_inclusive=end_inclusive, + app_profile_id=self._app_profile_id, + row_set=row_set, + ) data_client = self._instance._client.table_data_client - return PartialRowsData( - data_client.transport.read_rows, - request_pb, retry) + return PartialRowsData(data_client.transport.read_rows, request_pb, retry) def yield_rows(self, **kwargs): """Read rows from this table. @@ -418,8 +435,8 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY): sent. These will be in the same order as the `rows`. """ retryable_mutate_rows = _RetryableMutateRowsWorker( - self._instance._client, self.name, rows, - app_profile_id=self._app_profile_id) + self._instance._client, self.name, rows, app_profile_id=self._app_profile_id + ) return retryable_mutate_rows(retry=retry) def sample_row_keys(self): @@ -455,7 +472,8 @@ def sample_row_keys(self): """ data_client = self._instance._client.table_data_client response_iterator = data_client.sample_row_keys( - self.name, app_profile_id=self._app_profile_id) + self.name, app_profile_id=self._app_profile_id + ) return response_iterator @@ -476,10 +494,12 @@ def truncate(self, timeout=None): table_admin_client = client.table_admin_client if timeout: table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True, timeout=timeout) + self.name, delete_all_data_from_table=True, timeout=timeout + ) else: table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True) + self.name, delete_all_data_from_table=True + ) def drop_by_prefix(self, row_key_prefix, timeout=None): """ @@ -501,14 +521,14 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): table_admin_client = client.table_admin_client if timeout: table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix), - timeout=timeout) + self.name, row_key_prefix=_to_bytes(row_key_prefix), timeout=timeout + ) else: table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix)) + self.name, row_key_prefix=_to_bytes(row_key_prefix) + ) - def mutations_batcher(self, flush_count=FLUSH_COUNT, - max_row_bytes=MAX_ROW_BYTES): + def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): """Factory to create a mutation batcher associated with this instance. :type table: class @@ -580,8 +600,7 @@ def __call__(self, retry=DEFAULT_RETRY): @staticmethod def _is_retryable(status): - return (status is None or - status.code in _RetryableMutateRowsWorker.RETRY_CODES) + return status is None or status.code in _RetryableMutateRowsWorker.RETRY_CODES def _do_mutate_retryable_rows(self): """Mutate all the rows that are eligible for retry. @@ -611,23 +630,23 @@ def _do_mutate_retryable_rows(self): return self.responses_statuses mutate_rows_request = _mutate_rows_request( - self.table_name, retryable_rows, - app_profile_id=self.app_profile_id) + self.table_name, retryable_rows, app_profile_id=self.app_profile_id + ) data_client = self.client.table_data_client inner_api_calls = data_client._inner_api_calls - if 'mutate_rows' not in inner_api_calls: - default_retry = data_client._method_configs['MutateRows'].retry, - default_timeout = data_client._method_configs['MutateRows'].timeout - data_client._inner_api_calls[ - 'mutate_rows'] = wrap_method( - data_client.transport.mutate_rows, - default_retry=default_retry, - default_timeout=default_timeout, - client_info=data_client._client_info, - ) - - responses = data_client._inner_api_calls['mutate_rows']( - mutate_rows_request, retry=None) + if "mutate_rows" not in inner_api_calls: + default_retry = (data_client._method_configs["MutateRows"].retry,) + default_timeout = data_client._method_configs["MutateRows"].timeout + data_client._inner_api_calls["mutate_rows"] = wrap_method( + data_client.transport.mutate_rows, + default_retry=default_retry, + default_timeout=default_timeout, + client_info=data_client._client_info, + ) + + responses = data_client._inner_api_calls["mutate_rows"]( + mutate_rows_request, retry=None + ) num_responses = 0 num_retryable_responses = 0 @@ -643,8 +662,11 @@ def _do_mutate_retryable_rows(self): if len(retryable_rows) != num_responses: raise RuntimeError( - 'Unexpected number of responses', num_responses, - 'Expected', len(retryable_rows)) + "Unexpected number of responses", + num_responses, + "Expected", + len(retryable_rows), + ) if num_retryable_responses: raise _BigtableRetryableError @@ -693,11 +715,9 @@ def __repr__(self): replication_dict = { enums.Table.ReplicationState.STATE_NOT_KNOWN: "STATE_NOT_KNOWN", enums.Table.ReplicationState.INITIALIZING: "INITIALIZING", - enums.Table.ReplicationState.PLANNED_MAINTENANCE: - "PLANNED_MAINTENANCE", - enums.Table.ReplicationState.UNPLANNED_MAINTENANCE: - "UNPLANNED_MAINTENANCE", - enums.Table.ReplicationState.READY: "READY" + enums.Table.ReplicationState.PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", + enums.Table.ReplicationState.UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", + enums.Table.ReplicationState.READY: "READY", } return replication_dict[self.replication_state] @@ -729,9 +749,16 @@ def __ne__(self, other): return not self == other -def _create_row_request(table_name, start_key=None, end_key=None, - filter_=None, limit=None, end_inclusive=False, - app_profile_id=None, row_set=None): +def _create_row_request( + table_name, + start_key=None, + end_key=None, + filter_=None, + limit=None, + end_inclusive=False, + app_profile_id=None, + row_set=None, +): """Creates a request to read rows in a table. :type table_name: str @@ -772,25 +799,22 @@ def _create_row_request(table_name, start_key=None, end_key=None, :raises: :class:`ValueError ` if both ``row_set`` and one of ``start_key`` or ``end_key`` are set """ - request_kwargs = {'table_name': table_name} - if ((start_key is not None or end_key is not None) and - row_set is not None): - raise ValueError('Row range and row set cannot be ' - 'set simultaneously') + request_kwargs = {"table_name": table_name} + if (start_key is not None or end_key is not None) and row_set is not None: + raise ValueError("Row range and row set cannot be " "set simultaneously") if filter_ is not None: - request_kwargs['filter'] = filter_.to_pb() + request_kwargs["filter"] = filter_.to_pb() if limit is not None: - request_kwargs['rows_limit'] = limit + request_kwargs["rows_limit"] = limit if app_profile_id is not None: - request_kwargs['app_profile_id'] = app_profile_id + request_kwargs["app_profile_id"] = app_profile_id message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) if start_key is not None or end_key is not None: row_set = RowSet() - row_set.add_row_range(RowRange(start_key, end_key, - end_inclusive=end_inclusive)) + row_set.add_row_range(RowRange(start_key, end_key, end_inclusive=end_inclusive)) if row_set is not None: row_set._update_message_request(message) @@ -816,7 +840,8 @@ def _mutate_rows_request(table_name, rows, app_profile_id=None): greater than 100,000 """ request_pb = data_messages_v2_pb2.MutateRowsRequest( - table_name=table_name, app_profile_id=app_profile_id) + table_name=table_name, app_profile_id=app_profile_id + ) mutations_count = 0 for row in rows: _check_row_table_name(table_name, row) @@ -825,8 +850,9 @@ def _mutate_rows_request(table_name, rows, app_profile_id=None): request_pb.entries.add(row_key=row.row_key, mutations=mutations) mutations_count += len(mutations) if mutations_count > _MAX_BULK_MUTATIONS: - raise TooManyMutationsError('Maximum number of mutations is %s' % - (_MAX_BULK_MUTATIONS,)) + raise TooManyMutationsError( + "Maximum number of mutations is %s" % (_MAX_BULK_MUTATIONS,) + ) return request_pb @@ -845,8 +871,9 @@ def _check_row_table_name(table_name, row): """ if row.table is not None and row.table.name != table_name: raise TableMismatchError( - 'Row %s is a part of %s table. Current table: %s' % - (row.row_key, row.table.name, table_name)) + "Row %s is a part of %s table. Current table: %s" + % (row.row_key, row.table.name, table_name) + ) def _check_row_type(row): @@ -860,5 +887,6 @@ def _check_row_type(row): instance of DirectRow. """ if not isinstance(row, DirectRow): - raise TypeError('Bulk processing can not be applied for ' - 'conditional or append mutations.') + raise TypeError( + "Bulk processing can not be applied for " "conditional or append mutations." + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index f5555e0c70bd..1ce80625ec39 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -23,21 +23,15 @@ class BigtableInstanceAdminClient( - bigtable_instance_admin_client.BigtableInstanceAdminClient): - __doc__ = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__) + bigtable_instance_admin_client.BigtableInstanceAdminClient +): + __doc__ = bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ enums = enums -class BigtableTableAdminClient( - bigtable_table_admin_client.BigtableTableAdminClient): +class BigtableTableAdminClient(bigtable_table_admin_client.BigtableTableAdminClient): __doc__ = bigtable_table_admin_client.BigtableTableAdminClient.__doc__ enums = enums -__all__ = ( - 'enums', - 'types', - 'BigtableInstanceAdminClient', - 'BigtableTableAdminClient', -) +__all__ = ("enums", "types", "BigtableInstanceAdminClient", "BigtableTableAdminClient") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 33da8341ea3e..535b65ac54db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -33,7 +33,9 @@ from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import bigtable_instance_admin_grpc_transport +from google.cloud.bigtable_admin_v2.gapic.transports import ( + bigtable_instance_admin_grpc_transport, +) from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc from google.cloud.bigtable_admin_v2.proto import instance_pb2 @@ -43,8 +45,7 @@ from google.protobuf import empty_pb2 from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - 'google-cloud-bigtable', ).version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version class BigtableInstanceAdminClient(object): @@ -54,12 +55,12 @@ class BigtableInstanceAdminClient(object): tables' metadata or data stored in those tables. """ - SERVICE_ADDRESS = 'bigtableadmin.googleapis.com:443' + SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" """The default address of the service.""" # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = 'google.bigtable.admin.v2.BigtableInstanceAdmin' + _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableInstanceAdmin" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): @@ -75,9 +76,8 @@ def from_service_account_file(cls, filename, *args, **kwargs): Returns: BigtableInstanceAdminClient: The constructed client. """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs['credentials'] = credentials + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @@ -86,15 +86,14 @@ def from_service_account_file(cls, filename, *args, **kwargs): def project_path(cls, project): """Return a fully-qualified project string.""" return google.api_core.path_template.expand( - 'projects/{project}', - project=project, + "projects/{project}", project=project ) @classmethod def instance_path(cls, project, instance): """Return a fully-qualified instance string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}', + "projects/{project}/instances/{instance}", project=project, instance=instance, ) @@ -103,7 +102,7 @@ def instance_path(cls, project, instance): def app_profile_path(cls, project, instance, app_profile): """Return a fully-qualified app_profile string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}/appProfiles/{app_profile}', + "projects/{project}/instances/{instance}/appProfiles/{app_profile}", project=project, instance=instance, app_profile=app_profile, @@ -113,7 +112,7 @@ def app_profile_path(cls, project, instance, app_profile): def cluster_path(cls, project, instance, cluster): """Return a fully-qualified cluster string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}/clusters/{cluster}', + "projects/{project}/instances/{instance}/clusters/{cluster}", project=project, instance=instance, cluster=cluster, @@ -123,17 +122,19 @@ def cluster_path(cls, project, instance, cluster): def location_path(cls, project, location): """Return a fully-qualified location string.""" return google.api_core.path_template.expand( - 'projects/{project}/locations/{location}', + "projects/{project}/locations/{location}", project=project, location=location, ) - def __init__(self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None): + def __init__( + self, + transport=None, + channel=None, + credentials=None, + client_config=None, + client_info=None, + ): """Constructor. Args: @@ -167,18 +168,19 @@ def __init__(self, # Raise deprecation warnings for things we want to go away. if client_config is not None: warnings.warn( - 'The `client_config` argument is deprecated.', + "The `client_config` argument is deprecated.", PendingDeprecationWarning, - stacklevel=2) + stacklevel=2, + ) else: client_config = bigtable_instance_admin_client_config.config if channel: warnings.warn( - 'The `channel` argument is deprecated; use ' - '`transport` instead.', + "The `channel` argument is deprecated; use " "`transport` instead.", PendingDeprecationWarning, - stacklevel=2) + stacklevel=2, + ) # Instantiate the transport. # The transport is responsible for handling serialization and @@ -187,25 +189,24 @@ def __init__(self, if callable(transport): self.transport = transport( credentials=credentials, - default_class=bigtable_instance_admin_grpc_transport. - BigtableInstanceAdminGrpcTransport, + default_class=bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, ) else: if credentials: raise ValueError( - 'Received both a transport instance and ' - 'credentials; these are mutually exclusive.') + "Received both a transport instance and " + "credentials; these are mutually exclusive." + ) self.transport = transport else: self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=self.SERVICE_ADDRESS, - channel=channel, - credentials=credentials, + address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, ) + gapic_version=_GAPIC_LIBRARY_VERSION + ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info @@ -215,7 +216,8 @@ def __init__(self, # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config['interfaces'][self._INTERFACE_NAME], ) + client_config["interfaces"][self._INTERFACE_NAME] + ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper @@ -224,14 +226,16 @@ def __init__(self, self._inner_api_calls = {} # Service calls - def create_instance(self, - parent, - instance_id, - instance, - clusters, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def create_instance( + self, + parent, + instance_id, + instance, + clusters, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Create an instance within a project. @@ -300,36 +304,35 @@ def create_instance(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'create_instance' not in self._inner_api_calls: + if "create_instance" not in self._inner_api_calls: self._inner_api_calls[ - 'create_instance'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_instance, - default_retry=self._method_configs['CreateInstance'].retry, - default_timeout=self._method_configs['CreateInstance']. - timeout, - client_info=self._client_info, - ) + "create_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_instance, + default_retry=self._method_configs["CreateInstance"].retry, + default_timeout=self._method_configs["CreateInstance"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, - instance_id=instance_id, - instance=instance, - clusters=clusters, + parent=parent, instance_id=instance_id, instance=instance, clusters=clusters ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - operation = self._inner_api_calls['create_instance']( - request, retry=retry, timeout=timeout, metadata=metadata) + operation = self._inner_api_calls["create_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, @@ -337,11 +340,13 @@ def create_instance(self, metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, ) - def get_instance(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def get_instance( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Gets information about an instance. @@ -377,38 +382,42 @@ def get_instance(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'get_instance' not in self._inner_api_calls: + if "get_instance" not in self._inner_api_calls: self._inner_api_calls[ - 'get_instance'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs['GetInstance'].retry, - default_timeout=self._method_configs['GetInstance']. - timeout, - client_info=self._client_info, - ) + "get_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_instance, + default_retry=self._method_configs["GetInstance"].retry, + default_timeout=self._method_configs["GetInstance"].timeout, + client_info=self._client_info, + ) - request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name, ) + request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['get_instance']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["get_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def list_instances(self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def list_instances( + self, + parent, + page_token=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Lists information about instances in a project. @@ -445,44 +454,47 @@ def list_instances(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'list_instances' not in self._inner_api_calls: + if "list_instances" not in self._inner_api_calls: self._inner_api_calls[ - 'list_instances'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs['ListInstances'].retry, - default_timeout=self._method_configs['ListInstances']. - timeout, - client_info=self._client_info, - ) + "list_instances" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_instances, + default_retry=self._method_configs["ListInstances"].retry, + default_timeout=self._method_configs["ListInstances"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, - page_token=page_token, + parent=parent, page_token=page_token ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['list_instances']( - request, retry=retry, timeout=timeout, metadata=metadata) - - def update_instance(self, - name, - display_name, - type_, - labels, - state=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + return self._inner_api_calls["list_instances"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def update_instance( + self, + name, + display_name, + type_, + labels, + state=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Updates an instance within a project. @@ -545,45 +557,44 @@ def update_instance(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'update_instance' not in self._inner_api_calls: + if "update_instance" not in self._inner_api_calls: self._inner_api_calls[ - 'update_instance'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs['UpdateInstance'].retry, - default_timeout=self._method_configs['UpdateInstance']. - timeout, - client_info=self._client_info, - ) + "update_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_instance, + default_retry=self._method_configs["UpdateInstance"].retry, + default_timeout=self._method_configs["UpdateInstance"].timeout, + client_info=self._client_info, + ) request = instance_pb2.Instance( - name=name, - display_name=display_name, - type=type_, - labels=labels, - state=state, + name=name, display_name=display_name, type=type_, labels=labels, state=state ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['update_instance']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["update_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) def partial_update_instance( - self, - instance, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + self, + instance, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Partially updates an instance within a project. @@ -639,35 +650,35 @@ def partial_update_instance( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'partial_update_instance' not in self._inner_api_calls: + if "partial_update_instance" not in self._inner_api_calls: self._inner_api_calls[ - 'partial_update_instance'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partial_update_instance, - default_retry=self. - _method_configs['PartialUpdateInstance'].retry, - default_timeout=self. - _method_configs['PartialUpdateInstance'].timeout, - client_info=self._client_info, - ) + "partial_update_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.partial_update_instance, + default_retry=self._method_configs["PartialUpdateInstance"].retry, + default_timeout=self._method_configs["PartialUpdateInstance"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, - update_mask=update_mask, + instance=instance, update_mask=update_mask ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('instance.name', instance.name)] + routing_header = [("instance.name", instance.name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - operation = self._inner_api_calls['partial_update_instance']( - request, retry=retry, timeout=timeout, metadata=metadata) + operation = self._inner_api_calls["partial_update_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, @@ -675,11 +686,13 @@ def partial_update_instance( metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, ) - def delete_instance(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def delete_instance( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Delete an instance from a project. @@ -712,40 +725,43 @@ def delete_instance(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'delete_instance' not in self._inner_api_calls: + if "delete_instance" not in self._inner_api_calls: self._inner_api_calls[ - 'delete_instance'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs['DeleteInstance'].retry, - default_timeout=self._method_configs['DeleteInstance']. - timeout, - client_info=self._client_info, - ) + "delete_instance" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_instance, + default_retry=self._method_configs["DeleteInstance"].retry, + default_timeout=self._method_configs["DeleteInstance"].timeout, + client_info=self._client_info, + ) - request = bigtable_instance_admin_pb2.DeleteInstanceRequest( - name=name, ) + request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - self._inner_api_calls['delete_instance']( - request, retry=retry, timeout=timeout, metadata=metadata) + self._inner_api_calls["delete_instance"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def create_cluster(self, - parent, - cluster_id, - cluster, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def create_cluster( + self, + parent, + cluster_id, + cluster, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Creates a cluster within an instance. @@ -804,35 +820,35 @@ def create_cluster(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'create_cluster' not in self._inner_api_calls: + if "create_cluster" not in self._inner_api_calls: self._inner_api_calls[ - 'create_cluster'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs['CreateCluster'].retry, - default_timeout=self._method_configs['CreateCluster']. - timeout, - client_info=self._client_info, - ) + "create_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_cluster, + default_retry=self._method_configs["CreateCluster"].retry, + default_timeout=self._method_configs["CreateCluster"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, - cluster_id=cluster_id, - cluster=cluster, + parent=parent, cluster_id=cluster_id, cluster=cluster ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - operation = self._inner_api_calls['create_cluster']( - request, retry=retry, timeout=timeout, metadata=metadata) + operation = self._inner_api_calls["create_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, @@ -840,11 +856,13 @@ def create_cluster(self, metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, ) - def get_cluster(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def get_cluster( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Gets information about a cluster. @@ -880,37 +898,42 @@ def get_cluster(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'get_cluster' not in self._inner_api_calls: + if "get_cluster" not in self._inner_api_calls: self._inner_api_calls[ - 'get_cluster'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs['GetCluster'].retry, - default_timeout=self._method_configs['GetCluster'].timeout, - client_info=self._client_info, - ) + "get_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_cluster, + default_retry=self._method_configs["GetCluster"].retry, + default_timeout=self._method_configs["GetCluster"].timeout, + client_info=self._client_info, + ) - request = bigtable_instance_admin_pb2.GetClusterRequest(name=name, ) + request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['get_cluster']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["get_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def list_clusters(self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def list_clusters( + self, + parent, + page_token=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Lists information about clusters in an instance. @@ -950,44 +973,47 @@ def list_clusters(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'list_clusters' not in self._inner_api_calls: + if "list_clusters" not in self._inner_api_calls: self._inner_api_calls[ - 'list_clusters'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs['ListClusters'].retry, - default_timeout=self._method_configs['ListClusters']. - timeout, - client_info=self._client_info, - ) + "list_clusters" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_clusters, + default_retry=self._method_configs["ListClusters"].retry, + default_timeout=self._method_configs["ListClusters"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent, - page_token=page_token, + parent=parent, page_token=page_token ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['list_clusters']( - request, retry=retry, timeout=timeout, metadata=metadata) - - def update_cluster(self, - name, - serve_nodes, - location=None, - state=None, - default_storage_type=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + return self._inner_api_calls["list_clusters"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def update_cluster( + self, + name, + serve_nodes, + location=None, + state=None, + default_storage_type=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Updates a cluster within an instance. @@ -1044,15 +1070,15 @@ def update_cluster(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'update_cluster' not in self._inner_api_calls: + if "update_cluster" not in self._inner_api_calls: self._inner_api_calls[ - 'update_cluster'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs['UpdateCluster'].retry, - default_timeout=self._method_configs['UpdateCluster']. - timeout, - client_info=self._client_info, - ) + "update_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_cluster, + default_retry=self._method_configs["UpdateCluster"].retry, + default_timeout=self._method_configs["UpdateCluster"].timeout, + client_info=self._client_info, + ) request = instance_pb2.Cluster( name=name, @@ -1065,16 +1091,18 @@ def update_cluster(self, metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - operation = self._inner_api_calls['update_cluster']( - request, retry=retry, timeout=timeout, metadata=metadata) + operation = self._inner_api_calls["update_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, @@ -1082,11 +1110,13 @@ def update_cluster(self, metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, ) - def delete_cluster(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def delete_cluster( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Deletes a cluster from an instance. @@ -1119,40 +1149,44 @@ def delete_cluster(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'delete_cluster' not in self._inner_api_calls: + if "delete_cluster" not in self._inner_api_calls: self._inner_api_calls[ - 'delete_cluster'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs['DeleteCluster'].retry, - default_timeout=self._method_configs['DeleteCluster']. - timeout, - client_info=self._client_info, - ) + "delete_cluster" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_cluster, + default_retry=self._method_configs["DeleteCluster"].retry, + default_timeout=self._method_configs["DeleteCluster"].timeout, + client_info=self._client_info, + ) - request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name, ) + request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - self._inner_api_calls['delete_cluster']( - request, retry=retry, timeout=timeout, metadata=metadata) - - def create_app_profile(self, - parent, - app_profile_id, - app_profile, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + self._inner_api_calls["delete_cluster"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def create_app_profile( + self, + parent, + app_profile_id, + app_profile, + ignore_warnings=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Creates an app profile within an instance. @@ -1203,16 +1237,15 @@ def create_app_profile(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'create_app_profile' not in self._inner_api_calls: + if "create_app_profile" not in self._inner_api_calls: self._inner_api_calls[ - 'create_app_profile'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_app_profile, - default_retry=self._method_configs['CreateAppProfile']. - retry, - default_timeout=self._method_configs['CreateAppProfile']. - timeout, - client_info=self._client_info, - ) + "create_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_app_profile, + default_retry=self._method_configs["CreateAppProfile"].retry, + default_timeout=self._method_configs["CreateAppProfile"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.CreateAppProfileRequest( parent=parent, @@ -1224,22 +1257,26 @@ def create_app_profile(self, metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['create_app_profile']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["create_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def get_app_profile(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def get_app_profile( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Gets information about an app profile. @@ -1275,38 +1312,42 @@ def get_app_profile(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'get_app_profile' not in self._inner_api_calls: + if "get_app_profile" not in self._inner_api_calls: self._inner_api_calls[ - 'get_app_profile'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_app_profile, - default_retry=self._method_configs['GetAppProfile'].retry, - default_timeout=self._method_configs['GetAppProfile']. - timeout, - client_info=self._client_info, - ) + "get_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_app_profile, + default_retry=self._method_configs["GetAppProfile"].retry, + default_timeout=self._method_configs["GetAppProfile"].timeout, + client_info=self._client_info, + ) - request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name, ) + request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['get_app_profile']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["get_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def list_app_profiles(self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def list_app_profiles( + self, + parent, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Lists information about app profiles in an instance. @@ -1362,54 +1403,56 @@ def list_app_profiles(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'list_app_profiles' not in self._inner_api_calls: + if "list_app_profiles" not in self._inner_api_calls: self._inner_api_calls[ - 'list_app_profiles'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_app_profiles, - default_retry=self._method_configs['ListAppProfiles']. - retry, - default_timeout=self._method_configs['ListAppProfiles']. - timeout, - client_info=self._client_info, - ) + "list_app_profiles" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_app_profiles, + default_retry=self._method_configs["ListAppProfiles"].retry, + default_timeout=self._method_configs["ListAppProfiles"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, - page_size=page_size, + parent=parent, page_size=page_size ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls['list_app_profiles'], + self._inner_api_calls["list_app_profiles"], retry=retry, timeout=timeout, - metadata=metadata), + metadata=metadata, + ), request=request, - items_field='app_profiles', - request_token_field='page_token', - response_token_field='next_page_token', + items_field="app_profiles", + request_token_field="page_token", + response_token_field="next_page_token", ) return iterator - def update_app_profile(self, - app_profile, - update_mask, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def update_app_profile( + self, + app_profile, + update_mask, + ignore_warnings=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Updates an app profile within an instance. @@ -1466,16 +1509,15 @@ def update_app_profile(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'update_app_profile' not in self._inner_api_calls: + if "update_app_profile" not in self._inner_api_calls: self._inner_api_calls[ - 'update_app_profile'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_app_profile, - default_retry=self._method_configs['UpdateAppProfile']. - retry, - default_timeout=self._method_configs['UpdateAppProfile']. - timeout, - client_info=self._client_info, - ) + "update_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_app_profile, + default_retry=self._method_configs["UpdateAppProfile"].retry, + default_timeout=self._method_configs["UpdateAppProfile"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( app_profile=app_profile, @@ -1486,16 +1528,18 @@ def update_app_profile(self, metadata = [] metadata = list(metadata) try: - routing_header = [('app_profile.name', app_profile.name)] + routing_header = [("app_profile.name", app_profile.name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - operation = self._inner_api_calls['update_app_profile']( - request, retry=retry, timeout=timeout, metadata=metadata) + operation = self._inner_api_calls["update_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, @@ -1503,12 +1547,14 @@ def update_app_profile(self, metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, ) - def delete_app_profile(self, - name, - ignore_warnings, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def delete_app_profile( + self, + name, + ignore_warnings, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Deletes an app profile from an instance. @@ -1545,41 +1591,43 @@ def delete_app_profile(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'delete_app_profile' not in self._inner_api_calls: + if "delete_app_profile" not in self._inner_api_calls: self._inner_api_calls[ - 'delete_app_profile'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_app_profile, - default_retry=self._method_configs['DeleteAppProfile']. - retry, - default_timeout=self._method_configs['DeleteAppProfile']. - timeout, - client_info=self._client_info, - ) + "delete_app_profile" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_app_profile, + default_retry=self._method_configs["DeleteAppProfile"].retry, + default_timeout=self._method_configs["DeleteAppProfile"].timeout, + client_info=self._client_info, + ) request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, - ignore_warnings=ignore_warnings, + name=name, ignore_warnings=ignore_warnings ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - self._inner_api_calls['delete_app_profile']( - request, retry=retry, timeout=timeout, metadata=metadata) + self._inner_api_calls["delete_app_profile"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def get_iam_policy(self, - resource, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def get_iam_policy( + self, + resource, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. @@ -1617,38 +1665,42 @@ def get_iam_policy(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'get_iam_policy' not in self._inner_api_calls: + if "get_iam_policy" not in self._inner_api_calls: self._inner_api_calls[ - 'get_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs['GetIamPolicy'].retry, - default_timeout=self._method_configs['GetIamPolicy']. - timeout, - client_info=self._client_info, - ) + "get_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_iam_policy, + default_retry=self._method_configs["GetIamPolicy"].retry, + default_timeout=self._method_configs["GetIamPolicy"].timeout, + client_info=self._client_info, + ) - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('resource', resource)] + routing_header = [("resource", resource)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['get_iam_policy']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["get_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def set_iam_policy(self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def set_iam_policy( + self, + resource, + policy, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Sets the access control policy on an instance resource. Replaces any existing policy. @@ -1696,41 +1748,42 @@ def set_iam_policy(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'set_iam_policy' not in self._inner_api_calls: + if "set_iam_policy" not in self._inner_api_calls: self._inner_api_calls[ - 'set_iam_policy'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs['SetIamPolicy'].retry, - default_timeout=self._method_configs['SetIamPolicy']. - timeout, - client_info=self._client_info, - ) + "set_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.set_iam_policy, + default_retry=self._method_configs["SetIamPolicy"].retry, + default_timeout=self._method_configs["SetIamPolicy"].timeout, + client_info=self._client_info, + ) - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - policy=policy, - ) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('resource', resource)] + routing_header = [("resource", resource)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['set_iam_policy']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["set_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def test_iam_permissions(self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def test_iam_permissions( + self, + resource, + permissions, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Returns permissions that the caller has on the specified instance resource. @@ -1774,32 +1827,32 @@ def test_iam_permissions(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'test_iam_permissions' not in self._inner_api_calls: + if "test_iam_permissions" not in self._inner_api_calls: self._inner_api_calls[ - 'test_iam_permissions'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs['TestIamPermissions']. - retry, - default_timeout=self._method_configs['TestIamPermissions']. - timeout, - client_info=self._client_info, - ) + "test_iam_permissions" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.test_iam_permissions, + default_retry=self._method_configs["TestIamPermissions"].retry, + default_timeout=self._method_configs["TestIamPermissions"].timeout, + client_info=self._client_info, + ) request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, + resource=resource, permissions=permissions ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('resource', resource)] + routing_header = [("resource", resource)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['test_iam_permissions']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["test_iam_permissions"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py index b9e1fc6b385d..355020d508b3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py @@ -3,7 +3,7 @@ "google.bigtable.admin.v2.BigtableInstanceAdmin": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": ["UNAVAILABLE"] + "non_idempotent": ["UNAVAILABLE"], }, "retry_params": { "default": { @@ -13,106 +13,106 @@ "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000 + "total_timeout_millis": 600000, } }, "methods": { "CreateInstance": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "GetInstance": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "ListInstances": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "UpdateInstance": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "PartialUpdateInstance": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "DeleteInstance": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "CreateCluster": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "GetCluster": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "ListClusters": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "UpdateCluster": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "DeleteCluster": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "CreateAppProfile": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "GetAppProfile": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "ListAppProfiles": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "UpdateAppProfile": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "DeleteAppProfile": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "GetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "SetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "TestIamPermissions": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" - } - } + "retry_params_name": "default", + }, + }, } } } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 8014df3ee826..bc89075d0508 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -34,7 +34,9 @@ from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import bigtable_table_admin_grpc_transport +from google.cloud.bigtable_admin_v2.gapic.transports import ( + bigtable_table_admin_grpc_transport, +) from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 @@ -48,8 +50,7 @@ from google.protobuf import empty_pb2 from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - 'google-cloud-bigtable', ).version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version class BigtableTableAdminClient(object): @@ -61,12 +62,12 @@ class BigtableTableAdminClient(object): the tables. """ - SERVICE_ADDRESS = 'bigtableadmin.googleapis.com:443' + SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" """The default address of the service.""" # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = 'google.bigtable.admin.v2.BigtableTableAdmin' + _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableTableAdmin" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): @@ -82,9 +83,8 @@ def from_service_account_file(cls, filename, *args, **kwargs): Returns: BigtableTableAdminClient: The constructed client. """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs['credentials'] = credentials + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @@ -93,7 +93,7 @@ def from_service_account_file(cls, filename, *args, **kwargs): def instance_path(cls, project, instance): """Return a fully-qualified instance string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}', + "projects/{project}/instances/{instance}", project=project, instance=instance, ) @@ -102,7 +102,7 @@ def instance_path(cls, project, instance): def cluster_path(cls, project, instance, cluster): """Return a fully-qualified cluster string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}/clusters/{cluster}', + "projects/{project}/instances/{instance}/clusters/{cluster}", project=project, instance=instance, cluster=cluster, @@ -112,7 +112,7 @@ def cluster_path(cls, project, instance, cluster): def snapshot_path(cls, project, instance, cluster, snapshot): """Return a fully-qualified snapshot string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}', + "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", project=project, instance=instance, cluster=cluster, @@ -123,18 +123,20 @@ def snapshot_path(cls, project, instance, cluster, snapshot): def table_path(cls, project, instance, table): """Return a fully-qualified table string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}/tables/{table}', + "projects/{project}/instances/{instance}/tables/{table}", project=project, instance=instance, table=table, ) - def __init__(self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None): + def __init__( + self, + transport=None, + channel=None, + credentials=None, + client_config=None, + client_info=None, + ): """Constructor. Args: @@ -168,18 +170,19 @@ def __init__(self, # Raise deprecation warnings for things we want to go away. if client_config is not None: warnings.warn( - 'The `client_config` argument is deprecated.', + "The `client_config` argument is deprecated.", PendingDeprecationWarning, - stacklevel=2) + stacklevel=2, + ) else: client_config = bigtable_table_admin_client_config.config if channel: warnings.warn( - 'The `channel` argument is deprecated; use ' - '`transport` instead.', + "The `channel` argument is deprecated; use " "`transport` instead.", PendingDeprecationWarning, - stacklevel=2) + stacklevel=2, + ) # Instantiate the transport. # The transport is responsible for handling serialization and @@ -188,25 +191,24 @@ def __init__(self, if callable(transport): self.transport = transport( credentials=credentials, - default_class=bigtable_table_admin_grpc_transport. - BigtableTableAdminGrpcTransport, + default_class=bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, ) else: if credentials: raise ValueError( - 'Received both a transport instance and ' - 'credentials; these are mutually exclusive.') + "Received both a transport instance and " + "credentials; these are mutually exclusive." + ) self.transport = transport else: self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=self.SERVICE_ADDRESS, - channel=channel, - credentials=credentials, + address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, ) + gapic_version=_GAPIC_LIBRARY_VERSION + ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info @@ -216,7 +218,8 @@ def __init__(self, # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config['interfaces'][self._INTERFACE_NAME], ) + client_config["interfaces"][self._INTERFACE_NAME] + ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper @@ -225,14 +228,16 @@ def __init__(self, self._inner_api_calls = {} # Service calls - def create_table(self, - parent, - table_id, - table, - initial_splits=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def create_table( + self, + parent, + table_id, + table, + initial_splits=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Creates a new table in the specified instance. The table can be created with a full set of initial column families, @@ -303,45 +308,45 @@ def create_table(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'create_table' not in self._inner_api_calls: + if "create_table" not in self._inner_api_calls: self._inner_api_calls[ - 'create_table'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table, - default_retry=self._method_configs['CreateTable'].retry, - default_timeout=self._method_configs['CreateTable']. - timeout, - client_info=self._client_info, - ) + "create_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_table, + default_retry=self._method_configs["CreateTable"].retry, + default_timeout=self._method_configs["CreateTable"].timeout, + client_info=self._client_info, + ) request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, - table_id=table_id, - table=table, - initial_splits=initial_splits, + parent=parent, table_id=table_id, table=table, initial_splits=initial_splits ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['create_table']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["create_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) def create_table_from_snapshot( - self, - parent, - table_id, - source_snapshot, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + self, + parent, + table_id, + source_snapshot, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. @@ -405,51 +410,51 @@ def create_table_from_snapshot( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'create_table_from_snapshot' not in self._inner_api_calls: + if "create_table_from_snapshot" not in self._inner_api_calls: self._inner_api_calls[ - 'create_table_from_snapshot'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table_from_snapshot, - default_retry=self. - _method_configs['CreateTableFromSnapshot'].retry, - default_timeout=self. - _method_configs['CreateTableFromSnapshot'].timeout, - client_info=self._client_info, - ) + "create_table_from_snapshot" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_table_from_snapshot, + default_retry=self._method_configs["CreateTableFromSnapshot"].retry, + default_timeout=self._method_configs["CreateTableFromSnapshot"].timeout, + client_info=self._client_info, + ) request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, - table_id=table_id, - source_snapshot=source_snapshot, + parent=parent, table_id=table_id, source_snapshot=source_snapshot ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - operation = self._inner_api_calls['create_table_from_snapshot']( - request, retry=retry, timeout=timeout, metadata=metadata) + operation = self._inner_api_calls["create_table_from_snapshot"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, table_pb2.Table, - metadata_type=bigtable_table_admin_pb2. - CreateTableFromSnapshotMetadata, + metadata_type=bigtable_table_admin_pb2.CreateTableFromSnapshotMetadata, ) - def list_tables(self, - parent, - view=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def list_tables( + self, + parent, + view=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Lists all tables served from a specified instance. @@ -504,52 +509,55 @@ def list_tables(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'list_tables' not in self._inner_api_calls: + if "list_tables" not in self._inner_api_calls: self._inner_api_calls[ - 'list_tables'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_tables, - default_retry=self._method_configs['ListTables'].retry, - default_timeout=self._method_configs['ListTables'].timeout, - client_info=self._client_info, - ) + "list_tables" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_tables, + default_retry=self._method_configs["ListTables"].retry, + default_timeout=self._method_configs["ListTables"].timeout, + client_info=self._client_info, + ) request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, - view=view, - page_size=page_size, + parent=parent, view=view, page_size=page_size ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls['list_tables'], + self._inner_api_calls["list_tables"], retry=retry, timeout=timeout, - metadata=metadata), + metadata=metadata, + ), request=request, - items_field='tables', - request_token_field='page_token', - response_token_field='next_page_token', + items_field="tables", + request_token_field="page_token", + response_token_field="next_page_token", ) return iterator - def get_table(self, - name, - view=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def get_table( + self, + name, + view=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Gets metadata information about the specified table. @@ -587,39 +595,41 @@ def get_table(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'get_table' not in self._inner_api_calls: + if "get_table" not in self._inner_api_calls: self._inner_api_calls[ - 'get_table'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table, - default_retry=self._method_configs['GetTable'].retry, - default_timeout=self._method_configs['GetTable'].timeout, - client_info=self._client_info, - ) + "get_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_table, + default_retry=self._method_configs["GetTable"].retry, + default_timeout=self._method_configs["GetTable"].timeout, + client_info=self._client_info, + ) - request = bigtable_table_admin_pb2.GetTableRequest( - name=name, - view=view, - ) + request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['get_table']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["get_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def delete_table(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def delete_table( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Permanently deletes a specified table and all of its data. @@ -652,38 +662,42 @@ def delete_table(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'delete_table' not in self._inner_api_calls: + if "delete_table" not in self._inner_api_calls: self._inner_api_calls[ - 'delete_table'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_table, - default_retry=self._method_configs['DeleteTable'].retry, - default_timeout=self._method_configs['DeleteTable']. - timeout, - client_info=self._client_info, - ) + "delete_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_table, + default_retry=self._method_configs["DeleteTable"].retry, + default_timeout=self._method_configs["DeleteTable"].timeout, + client_info=self._client_info, + ) - request = bigtable_table_admin_pb2.DeleteTableRequest(name=name, ) + request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - self._inner_api_calls['delete_table']( - request, retry=retry, timeout=timeout, metadata=metadata) + self._inner_api_calls["delete_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def modify_column_families(self, - name, - modifications, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def modify_column_families( + self, + name, + modifications, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Performs a series of column family modifications on the specified table. Either all or none of the modifications will occur before this method @@ -733,43 +747,45 @@ def modify_column_families(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'modify_column_families' not in self._inner_api_calls: + if "modify_column_families" not in self._inner_api_calls: self._inner_api_calls[ - 'modify_column_families'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.modify_column_families, - default_retry=self._method_configs['ModifyColumnFamilies']. - retry, - default_timeout=self. - _method_configs['ModifyColumnFamilies'].timeout, - client_info=self._client_info, - ) + "modify_column_families" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.modify_column_families, + default_retry=self._method_configs["ModifyColumnFamilies"].retry, + default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, + client_info=self._client_info, + ) request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, - modifications=modifications, + name=name, modifications=modifications ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['modify_column_families']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["modify_column_families"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def drop_row_range(self, - name, - row_key_prefix=None, - delete_all_data_from_table=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def drop_row_range( + self, + name, + row_key_prefix=None, + delete_all_data_from_table=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Permanently drop/delete a row range from a specified table. The request can specify whether to delete all rows in a table, or only those that match a @@ -808,15 +824,15 @@ def drop_row_range(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'drop_row_range' not in self._inner_api_calls: + if "drop_row_range" not in self._inner_api_calls: self._inner_api_calls[ - 'drop_row_range'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_row_range, - default_retry=self._method_configs['DropRowRange'].retry, - default_timeout=self._method_configs['DropRowRange']. - timeout, - client_info=self._client_info, - ) + "drop_row_range" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.drop_row_range, + default_retry=self._method_configs["DropRowRange"].retry, + default_timeout=self._method_configs["DropRowRange"].timeout, + client_info=self._client_info, + ) # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. @@ -834,23 +850,26 @@ def drop_row_range(self, metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - self._inner_api_calls['drop_row_range']( - request, retry=retry, timeout=timeout, metadata=metadata) + self._inner_api_calls["drop_row_range"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) def generate_consistency_token( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations to the table that finished @@ -890,40 +909,44 @@ def generate_consistency_token( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'generate_consistency_token' not in self._inner_api_calls: + if "generate_consistency_token" not in self._inner_api_calls: self._inner_api_calls[ - 'generate_consistency_token'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.generate_consistency_token, - default_retry=self. - _method_configs['GenerateConsistencyToken'].retry, - default_timeout=self. - _method_configs['GenerateConsistencyToken'].timeout, - client_info=self._client_info, - ) + "generate_consistency_token" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.generate_consistency_token, + default_retry=self._method_configs["GenerateConsistencyToken"].retry, + default_timeout=self._method_configs[ + "GenerateConsistencyToken" + ].timeout, + client_info=self._client_info, + ) - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name, ) + request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['generate_consistency_token']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["generate_consistency_token"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def check_consistency(self, - name, - consistency_token, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def check_consistency( + self, + name, + consistency_token, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Checks replication consistency based on a consistency token, that is, if replication has caught up based on the conditions specified in the token @@ -966,45 +989,47 @@ def check_consistency(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'check_consistency' not in self._inner_api_calls: + if "check_consistency" not in self._inner_api_calls: self._inner_api_calls[ - 'check_consistency'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_consistency, - default_retry=self._method_configs['CheckConsistency']. - retry, - default_timeout=self._method_configs['CheckConsistency']. - timeout, - client_info=self._client_info, - ) + "check_consistency" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.check_consistency, + default_retry=self._method_configs["CheckConsistency"].retry, + default_timeout=self._method_configs["CheckConsistency"].timeout, + client_info=self._client_info, + ) request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, - consistency_token=consistency_token, + name=name, consistency_token=consistency_token ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['check_consistency']( - request, retry=retry, timeout=timeout, metadata=metadata) - - def snapshot_table(self, - name, - cluster, - snapshot_id, - description, - ttl=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + return self._inner_api_calls["check_consistency"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def snapshot_table( + self, + name, + cluster, + snapshot_id, + description, + ttl=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. @@ -1080,15 +1105,15 @@ def snapshot_table(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'snapshot_table' not in self._inner_api_calls: + if "snapshot_table" not in self._inner_api_calls: self._inner_api_calls[ - 'snapshot_table'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.snapshot_table, - default_retry=self._method_configs['SnapshotTable'].retry, - default_timeout=self._method_configs['SnapshotTable']. - timeout, - client_info=self._client_info, - ) + "snapshot_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.snapshot_table, + default_retry=self._method_configs["SnapshotTable"].retry, + default_timeout=self._method_configs["SnapshotTable"].timeout, + client_info=self._client_info, + ) request = bigtable_table_admin_pb2.SnapshotTableRequest( name=name, @@ -1101,16 +1126,18 @@ def snapshot_table(self, metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - operation = self._inner_api_calls['snapshot_table']( - request, retry=retry, timeout=timeout, metadata=metadata) + operation = self._inner_api_calls["snapshot_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, @@ -1118,11 +1145,13 @@ def snapshot_table(self, metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, ) - def get_snapshot(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def get_snapshot( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Gets metadata information about the specified snapshot. @@ -1164,38 +1193,42 @@ def get_snapshot(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'get_snapshot' not in self._inner_api_calls: + if "get_snapshot" not in self._inner_api_calls: self._inner_api_calls[ - 'get_snapshot'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_snapshot, - default_retry=self._method_configs['GetSnapshot'].retry, - default_timeout=self._method_configs['GetSnapshot']. - timeout, - client_info=self._client_info, - ) + "get_snapshot" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_snapshot, + default_retry=self._method_configs["GetSnapshot"].retry, + default_timeout=self._method_configs["GetSnapshot"].timeout, + client_info=self._client_info, + ) - request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name, ) + request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['get_snapshot']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["get_snapshot"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def list_snapshots(self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def list_snapshots( + self, + parent, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Lists all snapshots associated with the specified cluster. @@ -1260,51 +1293,54 @@ def list_snapshots(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'list_snapshots' not in self._inner_api_calls: + if "list_snapshots" not in self._inner_api_calls: self._inner_api_calls[ - 'list_snapshots'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_snapshots, - default_retry=self._method_configs['ListSnapshots'].retry, - default_timeout=self._method_configs['ListSnapshots']. - timeout, - client_info=self._client_info, - ) + "list_snapshots" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_snapshots, + default_retry=self._method_configs["ListSnapshots"].retry, + default_timeout=self._method_configs["ListSnapshots"].timeout, + client_info=self._client_info, + ) request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent, - page_size=page_size, + parent=parent, page_size=page_size ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('parent', parent)] + routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls['list_snapshots'], + self._inner_api_calls["list_snapshots"], retry=retry, timeout=timeout, - metadata=metadata), + metadata=metadata, + ), request=request, - items_field='snapshots', - request_token_field='page_token', - response_token_field='next_page_token', + items_field="snapshots", + request_token_field="page_token", + response_token_field="next_page_token", ) return iterator - def delete_snapshot(self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def delete_snapshot( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Permanently deletes the specified snapshot. @@ -1343,28 +1379,30 @@ def delete_snapshot(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'delete_snapshot' not in self._inner_api_calls: + if "delete_snapshot" not in self._inner_api_calls: self._inner_api_calls[ - 'delete_snapshot'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_snapshot, - default_retry=self._method_configs['DeleteSnapshot'].retry, - default_timeout=self._method_configs['DeleteSnapshot']. - timeout, - client_info=self._client_info, - ) + "delete_snapshot" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_snapshot, + default_retry=self._method_configs["DeleteSnapshot"].retry, + default_timeout=self._method_configs["DeleteSnapshot"].timeout, + client_info=self._client_info, + ) - request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name, ) + request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('name', name)] + routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - self._inner_api_calls['delete_snapshot']( - request, retry=retry, timeout=timeout, metadata=metadata) + self._inner_api_calls["delete_snapshot"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index 63e08c23aa72..4318e93365cb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -3,7 +3,7 @@ "google.bigtable.admin.v2.BigtableTableAdmin": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [] + "non_idempotent": [], }, "retry_params": { "default": { @@ -13,76 +13,76 @@ "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000 + "total_timeout_millis": 600000, } }, "methods": { "CreateTable": { "timeout_millis": 130000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "CreateTableFromSnapshot": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "ListTables": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "GetTable": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "DeleteTable": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "ModifyColumnFamilies": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "DropRowRange": { "timeout_millis": 900000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "GenerateConsistencyToken": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "CheckConsistency": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "SnapshotTable": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "GetSnapshot": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "ListSnapshots": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "DeleteSnapshot": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - } - } + "retry_params_name": "default", + }, + }, } } } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 28f7ff3cad0b..b2e837a12448 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -27,6 +27,7 @@ class StorageType(enum.IntEnum): SSD (int): Flash (SSD) storage should be used. HDD (int): Magnetic drive (HDD) storage should be used. """ + STORAGE_TYPE_UNSPECIFIED = 0 SSD = 1 HDD = 2 @@ -44,6 +45,7 @@ class State(enum.IntEnum): CREATING (int): The instance is currently being created, and may be destroyed if the creation process encounters an error. """ + STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 @@ -65,6 +67,7 @@ class Type(enum.IntEnum): instance cannot be changed to a development instance. When creating a development instance, ``serve_nodes`` on the cluster must not be set. """ + TYPE_UNSPECIFIED = 0 PRODUCTION = 1 DEVELOPMENT = 2 @@ -89,6 +92,7 @@ class State(enum.IntEnum): DISABLED (int): The cluster has no backing nodes. The data (tables) still exist, but no operations can be performed on the cluster. """ + STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 @@ -107,6 +111,7 @@ class TimestampGranularity(enum.IntEnum): When specified during table creation, MILLIS will be used. MILLIS (int): The table keeps data versioned at a granularity of 1ms. """ + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 MILLIS = 1 @@ -122,6 +127,7 @@ class View(enum.IntEnum): state. FULL (int): Populates all fields. """ + VIEW_UNSPECIFIED = 0 NAME_ONLY = 1 SCHEMA_VIEW = 2 @@ -146,6 +152,7 @@ class ReplicationState(enum.IntEnum): replication delay, reads may not immediately reflect the state of the table in other clusters. """ + STATE_NOT_KNOWN = 0 INITIALIZING = 1 PLANNED_MAINTENANCE = 2 @@ -165,6 +172,7 @@ class State(enum.IntEnum): creation process encounters an error. A snapshot may not be restored to a table while it is being created. """ + STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index 094cb6c0ff5e..72d269cba08e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -28,24 +28,24 @@ class BigtableInstanceAdminGrpcTransport(object): which can be used to take advantage of advanced features of gRPC. """ + # The scopes needed to make gRPC calls to all of the methods defined # in this service. _OAUTH_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", ) - def __init__(self, - channel=None, - credentials=None, - address='bigtableadmin.googleapis.com:443'): + def __init__( + self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" + ): """Instantiate the transport class. Args: @@ -63,36 +63,34 @@ def __init__(self, # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - 'The `channel` and `credentials` arguments are mutually ' - 'exclusive.', ) + "The `channel` and `credentials` arguments are mutually " "exclusive." + ) # Create the channel. if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - ) + channel = self.create_channel(address=address, credentials=credentials) self._channel = channel # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. self._stubs = { - 'bigtable_instance_admin_stub': - bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( - channel), + "bigtable_instance_admin_stub": bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( + channel + ) } # Because this API includes a method that returns a # long-running operation (proto: google.longrunning.Operation), # instantiate an LRO client. self._operations_client = google.api_core.operations_v1.OperationsClient( - channel) + channel + ) @classmethod - def create_channel(cls, - address='bigtableadmin.googleapis.com:443', - credentials=None): + def create_channel( + cls, address="bigtableadmin.googleapis.com:443", credentials=None + ): """Create and return a gRPC channel object. Args: @@ -107,9 +105,7 @@ def create_channel(cls, grpc.Channel: A gRPC channel object. """ return google.api_core.grpc_helpers.create_channel( - address, - credentials=credentials, - scopes=cls._OAUTH_SCOPES, + address, credentials=credentials, scopes=cls._OAUTH_SCOPES ) @property @@ -132,7 +128,7 @@ def create_instance(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].CreateInstance + return self._stubs["bigtable_instance_admin_stub"].CreateInstance @property def get_instance(self): @@ -145,7 +141,7 @@ def get_instance(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].GetInstance + return self._stubs["bigtable_instance_admin_stub"].GetInstance @property def list_instances(self): @@ -158,7 +154,7 @@ def list_instances(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].ListInstances + return self._stubs["bigtable_instance_admin_stub"].ListInstances @property def update_instance(self): @@ -171,7 +167,7 @@ def update_instance(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].UpdateInstance + return self._stubs["bigtable_instance_admin_stub"].UpdateInstance @property def partial_update_instance(self): @@ -184,8 +180,7 @@ def partial_update_instance(self): deserialized request object and returns a deserialized response object. """ - return self._stubs[ - 'bigtable_instance_admin_stub'].PartialUpdateInstance + return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance @property def delete_instance(self): @@ -198,7 +193,7 @@ def delete_instance(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].DeleteInstance + return self._stubs["bigtable_instance_admin_stub"].DeleteInstance @property def create_cluster(self): @@ -211,7 +206,7 @@ def create_cluster(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].CreateCluster + return self._stubs["bigtable_instance_admin_stub"].CreateCluster @property def get_cluster(self): @@ -224,7 +219,7 @@ def get_cluster(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].GetCluster + return self._stubs["bigtable_instance_admin_stub"].GetCluster @property def list_clusters(self): @@ -237,7 +232,7 @@ def list_clusters(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].ListClusters + return self._stubs["bigtable_instance_admin_stub"].ListClusters @property def update_cluster(self): @@ -250,7 +245,7 @@ def update_cluster(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].UpdateCluster + return self._stubs["bigtable_instance_admin_stub"].UpdateCluster @property def delete_cluster(self): @@ -263,7 +258,7 @@ def delete_cluster(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].DeleteCluster + return self._stubs["bigtable_instance_admin_stub"].DeleteCluster @property def create_app_profile(self): @@ -276,7 +271,7 @@ def create_app_profile(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].CreateAppProfile + return self._stubs["bigtable_instance_admin_stub"].CreateAppProfile @property def get_app_profile(self): @@ -289,7 +284,7 @@ def get_app_profile(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].GetAppProfile + return self._stubs["bigtable_instance_admin_stub"].GetAppProfile @property def list_app_profiles(self): @@ -302,7 +297,7 @@ def list_app_profiles(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].ListAppProfiles + return self._stubs["bigtable_instance_admin_stub"].ListAppProfiles @property def update_app_profile(self): @@ -315,7 +310,7 @@ def update_app_profile(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].UpdateAppProfile + return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile @property def delete_app_profile(self): @@ -328,7 +323,7 @@ def delete_app_profile(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].DeleteAppProfile + return self._stubs["bigtable_instance_admin_stub"].DeleteAppProfile @property def get_iam_policy(self): @@ -342,7 +337,7 @@ def get_iam_policy(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].GetIamPolicy + return self._stubs["bigtable_instance_admin_stub"].GetIamPolicy @property def set_iam_policy(self): @@ -356,7 +351,7 @@ def set_iam_policy(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].SetIamPolicy + return self._stubs["bigtable_instance_admin_stub"].SetIamPolicy @property def test_iam_permissions(self): @@ -369,4 +364,4 @@ def test_iam_permissions(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_instance_admin_stub'].TestIamPermissions + return self._stubs["bigtable_instance_admin_stub"].TestIamPermissions diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 9e9dce76207e..4f318857e62f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -28,24 +28,24 @@ class BigtableTableAdminGrpcTransport(object): which can be used to take advantage of advanced features of gRPC. """ + # The scopes needed to make gRPC calls to all of the methods defined # in this service. _OAUTH_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", ) - def __init__(self, - channel=None, - credentials=None, - address='bigtableadmin.googleapis.com:443'): + def __init__( + self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" + ): """Instantiate the transport class. Args: @@ -63,35 +63,34 @@ def __init__(self, # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - 'The `channel` and `credentials` arguments are mutually ' - 'exclusive.', ) + "The `channel` and `credentials` arguments are mutually " "exclusive." + ) # Create the channel. if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - ) + channel = self.create_channel(address=address, credentials=credentials) self._channel = channel # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. self._stubs = { - 'bigtable_table_admin_stub': - bigtable_table_admin_pb2_grpc.BigtableTableAdminStub(channel), + "bigtable_table_admin_stub": bigtable_table_admin_pb2_grpc.BigtableTableAdminStub( + channel + ) } # Because this API includes a method that returns a # long-running operation (proto: google.longrunning.Operation), # instantiate an LRO client. self._operations_client = google.api_core.operations_v1.OperationsClient( - channel) + channel + ) @classmethod - def create_channel(cls, - address='bigtableadmin.googleapis.com:443', - credentials=None): + def create_channel( + cls, address="bigtableadmin.googleapis.com:443", credentials=None + ): """Create and return a gRPC channel object. Args: @@ -110,8 +109,8 @@ def create_channel(cls, credentials=credentials, scopes=cls._OAUTH_SCOPES, options={ - 'grpc.max_send_message_length': -1, - 'grpc.max_receive_message_length': -1, + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, }.items(), ) @@ -137,7 +136,7 @@ def create_table(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].CreateTable + return self._stubs["bigtable_table_admin_stub"].CreateTable @property def create_table_from_snapshot(self): @@ -157,7 +156,7 @@ def create_table_from_snapshot(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].CreateTableFromSnapshot + return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot @property def list_tables(self): @@ -170,7 +169,7 @@ def list_tables(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].ListTables + return self._stubs["bigtable_table_admin_stub"].ListTables @property def get_table(self): @@ -183,7 +182,7 @@ def get_table(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].GetTable + return self._stubs["bigtable_table_admin_stub"].GetTable @property def delete_table(self): @@ -196,7 +195,7 @@ def delete_table(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].DeleteTable + return self._stubs["bigtable_table_admin_stub"].DeleteTable @property def modify_column_families(self): @@ -212,7 +211,7 @@ def modify_column_families(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].ModifyColumnFamilies + return self._stubs["bigtable_table_admin_stub"].ModifyColumnFamilies @property def drop_row_range(self): @@ -227,7 +226,7 @@ def drop_row_range(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].DropRowRange + return self._stubs["bigtable_table_admin_stub"].DropRowRange @property def generate_consistency_token(self): @@ -243,8 +242,7 @@ def generate_consistency_token(self): deserialized request object and returns a deserialized response object. """ - return self._stubs[ - 'bigtable_table_admin_stub'].GenerateConsistencyToken + return self._stubs["bigtable_table_admin_stub"].GenerateConsistencyToken @property def check_consistency(self): @@ -259,7 +257,7 @@ def check_consistency(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].CheckConsistency + return self._stubs["bigtable_table_admin_stub"].CheckConsistency @property def snapshot_table(self): @@ -279,7 +277,7 @@ def snapshot_table(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].SnapshotTable + return self._stubs["bigtable_table_admin_stub"].SnapshotTable @property def get_snapshot(self): @@ -298,7 +296,7 @@ def get_snapshot(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].GetSnapshot + return self._stubs["bigtable_table_admin_stub"].GetSnapshot @property def list_snapshots(self): @@ -317,7 +315,7 @@ def list_snapshots(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].ListSnapshots + return self._stubs["bigtable_table_admin_stub"].ListSnapshots @property def delete_snapshot(self): @@ -336,4 +334,4 @@ def delete_snapshot(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_table_admin_stub'].DeleteSnapshot + return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index e67c6f585fbc..ccdda74aeb21 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -66,7 +66,7 @@ names.append(name) for module in _local_modules: for name, message in get_messages(module).items(): - message.__module__ = 'google.cloud.bigtable_admin_v2.types' + message.__module__ = "google.cloud.bigtable_admin_v2.types" setattr(sys.modules[__name__], name, message) names.append(name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index bedc50962146..cbd017f4b625 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -24,7 +24,4 @@ class BigtableClient(bigtable_client.BigtableClient): __doc__ = bigtable_client.BigtableClient.__doc__ -__all__ = ( - 'types', - 'BigtableClient', -) +__all__ = ("types", "BigtableClient") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 64adb352f1c8..783830f18066 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -33,19 +33,18 @@ from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc from google.cloud.bigtable_v2.proto import data_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - 'google-cloud-bigtable', ).version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version class BigtableClient(object): """Service for reading from and writing to existing Bigtable tables.""" - SERVICE_ADDRESS = 'bigtable.googleapis.com:443' + SERVICE_ADDRESS = "bigtable.googleapis.com:443" """The default address of the service.""" # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = 'google.bigtable.v2.Bigtable' + _INTERFACE_NAME = "google.bigtable.v2.Bigtable" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): @@ -61,9 +60,8 @@ def from_service_account_file(cls, filename, *args, **kwargs): Returns: BigtableClient: The constructed client. """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs['credentials'] = credentials + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @@ -72,18 +70,20 @@ def from_service_account_file(cls, filename, *args, **kwargs): def table_path(cls, project, instance, table): """Return a fully-qualified table string.""" return google.api_core.path_template.expand( - 'projects/{project}/instances/{instance}/tables/{table}', + "projects/{project}/instances/{instance}/tables/{table}", project=project, instance=instance, table=table, ) - def __init__(self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None): + def __init__( + self, + transport=None, + channel=None, + credentials=None, + client_config=None, + client_info=None, + ): """Constructor. Args: @@ -117,18 +117,19 @@ def __init__(self, # Raise deprecation warnings for things we want to go away. if client_config is not None: warnings.warn( - 'The `client_config` argument is deprecated.', + "The `client_config` argument is deprecated.", PendingDeprecationWarning, - stacklevel=2) + stacklevel=2, + ) else: client_config = bigtable_client_config.config if channel: warnings.warn( - 'The `channel` argument is deprecated; use ' - '`transport` instead.', + "The `channel` argument is deprecated; use " "`transport` instead.", PendingDeprecationWarning, - stacklevel=2) + stacklevel=2, + ) # Instantiate the transport. # The transport is responsible for handling serialization and @@ -137,25 +138,24 @@ def __init__(self, if callable(transport): self.transport = transport( credentials=credentials, - default_class=bigtable_grpc_transport. - BigtableGrpcTransport, + default_class=bigtable_grpc_transport.BigtableGrpcTransport, ) else: if credentials: raise ValueError( - 'Received both a transport instance and ' - 'credentials; these are mutually exclusive.') + "Received both a transport instance and " + "credentials; these are mutually exclusive." + ) self.transport = transport else: self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=self.SERVICE_ADDRESS, - channel=channel, - credentials=credentials, + address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, ) + gapic_version=_GAPIC_LIBRARY_VERSION + ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info @@ -165,7 +165,8 @@ def __init__(self, # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config['interfaces'][self._INTERFACE_NAME], ) + client_config["interfaces"][self._INTERFACE_NAME] + ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper @@ -174,15 +175,17 @@ def __init__(self, self._inner_api_calls = {} # Service calls - def read_rows(self, - table_name, - app_profile_id=None, - rows=None, - filter_=None, - rows_limit=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def read_rows( + self, + table_name, + app_profile_id=None, + rows=None, + filter_=None, + rows_limit=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, @@ -237,14 +240,15 @@ def read_rows(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'read_rows' not in self._inner_api_calls: + if "read_rows" not in self._inner_api_calls: self._inner_api_calls[ - 'read_rows'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_rows, - default_retry=self._method_configs['ReadRows'].retry, - default_timeout=self._method_configs['ReadRows'].timeout, - client_info=self._client_info, - ) + "read_rows" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.read_rows, + default_retry=self._method_configs["ReadRows"].retry, + default_timeout=self._method_configs["ReadRows"].timeout, + client_info=self._client_info, + ) request = bigtable_pb2.ReadRowsRequest( table_name=table_name, @@ -257,23 +261,27 @@ def read_rows(self, metadata = [] metadata = list(metadata) try: - routing_header = [('table_name', table_name)] + routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['read_rows']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["read_rows"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def sample_row_keys(self, - table_name, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def sample_row_keys( + self, + table_name, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of the table of approximately equal size, @@ -316,43 +324,46 @@ def sample_row_keys(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'sample_row_keys' not in self._inner_api_calls: + if "sample_row_keys" not in self._inner_api_calls: self._inner_api_calls[ - 'sample_row_keys'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.sample_row_keys, - default_retry=self._method_configs['SampleRowKeys'].retry, - default_timeout=self._method_configs['SampleRowKeys']. - timeout, - client_info=self._client_info, - ) + "sample_row_keys" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.sample_row_keys, + default_retry=self._method_configs["SampleRowKeys"].retry, + default_timeout=self._method_configs["SampleRowKeys"].timeout, + client_info=self._client_info, + ) request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name, - app_profile_id=app_profile_id, + table_name=table_name, app_profile_id=app_profile_id ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('table_name', table_name)] + routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['sample_row_keys']( - request, retry=retry, timeout=timeout, metadata=metadata) - - def mutate_row(self, - table_name, - row_key, - mutations, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + return self._inner_api_calls["sample_row_keys"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def mutate_row( + self, + table_name, + row_key, + mutations, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -405,14 +416,15 @@ def mutate_row(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'mutate_row' not in self._inner_api_calls: + if "mutate_row" not in self._inner_api_calls: self._inner_api_calls[ - 'mutate_row'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_row, - default_retry=self._method_configs['MutateRow'].retry, - default_timeout=self._method_configs['MutateRow'].timeout, - client_info=self._client_info, - ) + "mutate_row" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.mutate_row, + default_retry=self._method_configs["MutateRow"].retry, + default_timeout=self._method_configs["MutateRow"].timeout, + client_info=self._client_info, + ) request = bigtable_pb2.MutateRowRequest( table_name=table_name, @@ -424,24 +436,28 @@ def mutate_row(self, metadata = [] metadata = list(metadata) try: - routing_header = [('table_name', table_name)] + routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['mutate_row']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["mutate_row"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) - def mutate_rows(self, - table_name, - entries, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + def mutate_rows( + self, + table_name, + entries, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed @@ -493,45 +509,48 @@ def mutate_rows(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'mutate_rows' not in self._inner_api_calls: + if "mutate_rows" not in self._inner_api_calls: self._inner_api_calls[ - 'mutate_rows'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_rows, - default_retry=self._method_configs['MutateRows'].retry, - default_timeout=self._method_configs['MutateRows'].timeout, - client_info=self._client_info, - ) + "mutate_rows" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.mutate_rows, + default_retry=self._method_configs["MutateRows"].retry, + default_timeout=self._method_configs["MutateRows"].timeout, + client_info=self._client_info, + ) request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, - entries=entries, - app_profile_id=app_profile_id, + table_name=table_name, entries=entries, app_profile_id=app_profile_id ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [('table_name', table_name)] + routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['mutate_rows']( - request, retry=retry, timeout=timeout, metadata=metadata) - - def check_and_mutate_row(self, - table_name, - row_key, - app_profile_id=None, - predicate_filter=None, - true_mutations=None, - false_mutations=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + return self._inner_api_calls["mutate_rows"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def check_and_mutate_row( + self, + table_name, + row_key, + app_profile_id=None, + predicate_filter=None, + true_mutations=None, + false_mutations=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Mutates a row atomically based on the output of a predicate Reader filter. @@ -597,16 +616,15 @@ def check_and_mutate_row(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'check_and_mutate_row' not in self._inner_api_calls: + if "check_and_mutate_row" not in self._inner_api_calls: self._inner_api_calls[ - 'check_and_mutate_row'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_and_mutate_row, - default_retry=self._method_configs['CheckAndMutateRow']. - retry, - default_timeout=self._method_configs['CheckAndMutateRow']. - timeout, - client_info=self._client_info, - ) + "check_and_mutate_row" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.check_and_mutate_row, + default_retry=self._method_configs["CheckAndMutateRow"].retry, + default_timeout=self._method_configs["CheckAndMutateRow"].timeout, + client_info=self._client_info, + ) request = bigtable_pb2.CheckAndMutateRowRequest( table_name=table_name, @@ -620,25 +638,29 @@ def check_and_mutate_row(self, metadata = [] metadata = list(metadata) try: - routing_header = [('table_name', table_name)] + routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['check_and_mutate_row']( - request, retry=retry, timeout=timeout, metadata=metadata) - - def read_modify_write_row(self, - table_name, - row_key, - rules, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None): + return self._inner_api_calls["check_and_mutate_row"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def read_modify_write_row( + self, + table_name, + row_key, + rules, + app_profile_id=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): """ Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the specified columns and writes a new @@ -694,16 +716,15 @@ def read_modify_write_row(self, ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if 'read_modify_write_row' not in self._inner_api_calls: + if "read_modify_write_row" not in self._inner_api_calls: self._inner_api_calls[ - 'read_modify_write_row'] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_modify_write_row, - default_retry=self._method_configs['ReadModifyWriteRow']. - retry, - default_timeout=self._method_configs['ReadModifyWriteRow']. - timeout, - client_info=self._client_info, - ) + "read_modify_write_row" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.read_modify_write_row, + default_retry=self._method_configs["ReadModifyWriteRow"].retry, + default_timeout=self._method_configs["ReadModifyWriteRow"].timeout, + client_info=self._client_info, + ) request = bigtable_pb2.ReadModifyWriteRowRequest( table_name=table_name, @@ -715,13 +736,15 @@ def read_modify_write_row(self, metadata = [] metadata = list(metadata) try: - routing_header = [('table_name', table_name)] + routing_header = [("table_name", table_name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header) + routing_header + ) metadata.append(routing_metadata) - return self._inner_api_calls['read_modify_write_row']( - request, retry=retry, timeout=timeout, metadata=metadata) + return self._inner_api_calls["read_modify_write_row"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py index 04e214427b08..04a7a2572e6b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py @@ -3,7 +3,7 @@ "google.bigtable.v2.Bigtable": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [] + "non_idempotent": [], }, "retry_params": { "default": { @@ -13,7 +13,7 @@ "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000 + "total_timeout_millis": 600000, }, "streaming": { "initial_retry_delay_millis": 100, @@ -22,41 +22,41 @@ "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 3600000 - } + "total_timeout_millis": 3600000, + }, }, "methods": { "ReadRows": { "timeout_millis": 3600000, "retry_codes_name": "idempotent", - "retry_params_name": "streaming" + "retry_params_name": "streaming", }, "SampleRowKeys": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "MutateRow": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "MutateRows": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "CheckAndMutateRow": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" + "retry_params_name": "default", }, "ReadModifyWriteRow": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default" - } - } + "retry_params_name": "default", + }, + }, } } } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index 6d3577105f37..d9fa267e5da8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -27,21 +27,21 @@ class BigtableGrpcTransport(object): which can be used to take advantage of advanced features of gRPC. """ + # The scopes needed to make gRPC calls to all of the methods defined # in this service. _OAUTH_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.data', - 'https://www.googleapis.com/auth/bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-bigtable.data', - 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", ) - def __init__(self, - channel=None, - credentials=None, - address='bigtable.googleapis.com:443'): + def __init__( + self, channel=None, credentials=None, address="bigtable.googleapis.com:443" + ): """Instantiate the transport class. Args: @@ -59,28 +59,21 @@ def __init__(self, # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - 'The `channel` and `credentials` arguments are mutually ' - 'exclusive.', ) + "The `channel` and `credentials` arguments are mutually " "exclusive." + ) # Create the channel. if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - ) + channel = self.create_channel(address=address, credentials=credentials) self._channel = channel # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. - self._stubs = { - 'bigtable_stub': bigtable_pb2_grpc.BigtableStub(channel), - } + self._stubs = {"bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel)} @classmethod - def create_channel(cls, - address='bigtable.googleapis.com:443', - credentials=None): + def create_channel(cls, address="bigtable.googleapis.com:443", credentials=None): """Create and return a gRPC channel object. Args: @@ -99,8 +92,8 @@ def create_channel(cls, credentials=credentials, scopes=cls._OAUTH_SCOPES, options={ - 'grpc.max_send_message_length': -1, - 'grpc.max_receive_message_length': -1, + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, }.items(), ) @@ -128,7 +121,7 @@ def read_rows(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_stub'].ReadRows + return self._stubs["bigtable_stub"].ReadRows @property def sample_row_keys(self): @@ -144,7 +137,7 @@ def sample_row_keys(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_stub'].SampleRowKeys + return self._stubs["bigtable_stub"].SampleRowKeys @property def mutate_row(self): @@ -158,7 +151,7 @@ def mutate_row(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_stub'].MutateRow + return self._stubs["bigtable_stub"].MutateRow @property def mutate_rows(self): @@ -173,7 +166,7 @@ def mutate_rows(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_stub'].MutateRows + return self._stubs["bigtable_stub"].MutateRows @property def check_and_mutate_row(self): @@ -186,7 +179,7 @@ def check_and_mutate_row(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_stub'].CheckAndMutateRow + return self._stubs["bigtable_stub"].CheckAndMutateRow @property def read_modify_write_row(self): @@ -203,4 +196,4 @@ def read_modify_write_row(self): deserialized request object and returns a deserialized response object. """ - return self._stubs['bigtable_stub'].ReadModifyWriteRow + return self._stubs["bigtable_stub"].ReadModifyWriteRow diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index 15db79060479..4dafb23b2d6a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -27,18 +27,9 @@ from google.protobuf import wrappers_pb2 from google.rpc import status_pb2 -_shared_modules = [ - http_pb2, - any_pb2, - descriptor_pb2, - wrappers_pb2, - status_pb2, -] - -_local_modules = [ - bigtable_pb2, - data_pb2, -] +_shared_modules = [http_pb2, any_pb2, descriptor_pb2, wrappers_pb2, status_pb2] + +_local_modules = [bigtable_pb2, data_pb2] names = [] @@ -48,7 +39,7 @@ names.append(name) for module in _local_modules: for name, message in get_messages(module).items(): - message.__module__ = 'google.cloud.bigtable_v2.types' + message.__module__ = "google.cloud.bigtable_v2.types" setattr(sys.modules[__name__], name, message) names.append(name) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 63d8200d1cbe..754da098226c 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -39,27 +39,29 @@ from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id -LOCATION_ID = 'us-central1-c' -INSTANCE_ID = 'g-c-p' + unique_resource_id('-') -TABLE_ID = 'google-cloud-python-test-table' -CLUSTER_ID = INSTANCE_ID+'-cluster' +LOCATION_ID = "us-central1-c" +INSTANCE_ID = "g-c-p" + unique_resource_id("-") +TABLE_ID = "google-cloud-python-test-table" +CLUSTER_ID = INSTANCE_ID + "-cluster" SERVE_NODES = 3 -COLUMN_FAMILY_ID1 = u'col-fam-id1' -COLUMN_FAMILY_ID2 = u'col-fam-id2' -COL_NAME1 = b'col-name1' -COL_NAME2 = b'col-name2' -COL_NAME3 = b'col-name3-but-other-fam' -CELL_VAL1 = b'cell-val' -CELL_VAL2 = b'cell-val-newer' -CELL_VAL3 = b'altcol-cell-val' -CELL_VAL4 = b'foo' -ROW_KEY = b'row-key' -ROW_KEY_ALT = b'row-key-alt' +COLUMN_FAMILY_ID1 = u"col-fam-id1" +COLUMN_FAMILY_ID2 = u"col-fam-id2" +COL_NAME1 = b"col-name1" +COL_NAME2 = b"col-name2" +COL_NAME3 = b"col-name3-but-other-fam" +CELL_VAL1 = b"cell-val" +CELL_VAL2 = b"cell-val-newer" +CELL_VAL3 = b"altcol-cell-val" +CELL_VAL4 = b"foo" +ROW_KEY = b"row-key" +ROW_KEY_ALT = b"row-key-alt" EXISTING_INSTANCES = [] -LABEL_KEY = u'python-system' -label_stamp = datetime.datetime.utcnow() \ - .replace(microsecond=0, tzinfo=UTC,) \ - .strftime("%Y-%m-%dt%H-%M-%S") +LABEL_KEY = u"python-system" +label_stamp = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") +) LABELS = {LABEL_KEY: str(label_stamp)} @@ -69,6 +71,7 @@ class Config(object): This is a mutable stand-in to allow test set-up to modify global state. """ + CLIENT = None INSTANCE = None CLUSTER = None @@ -78,6 +81,7 @@ class Config(object): def _retry_on_unavailable(exc): """Retry only errors whose status code is 'UNAVAILABLE'.""" from grpc import StatusCode + return exc.code() == StatusCode.UNAVAILABLE @@ -97,15 +101,15 @@ def setUpModule(): Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) Config.CLUSTER = Config.INSTANCE.cluster( - CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES) + CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES + ) if not Config.IN_EMULATOR: - retry = RetryErrors(GrpcRendezvous, - error_predicate=_retry_on_unavailable) + retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) instances, failed_locations = retry(Config.CLIENT.list_instances)() if len(failed_locations) != 0: - raise ValueError('List instances failed in module set up.') + raise ValueError("List instances failed in module set up.") EXISTING_INSTANCES[:] = instances @@ -120,11 +124,9 @@ def tearDownModule(): class TestInstanceAdminAPI(unittest.TestCase): - def setUp(self): if Config.IN_EMULATOR: - self.skipTest( - 'Instance Admin API not supported in Bigtable emulator') + self.skipTest("Instance Admin API not supported in Bigtable emulator") self.instances_to_delete = [] def tearDown(self): @@ -141,6 +143,7 @@ def test_list_instances(self): def test_reload(self): from google.cloud.bigtable import enums + # Use same arguments as Config.INSTANCE (created in `setUpModule`) # so we can use reload() on a fresh instance. alt_instance = Config.CLIENT.instance(INSTANCE_ID) @@ -148,19 +151,19 @@ def test_reload(self): alt_instance.display_name = None alt_instance.reload() - self.assertEqual(alt_instance.display_name, - Config.INSTANCE.display_name) + self.assertEqual(alt_instance.display_name, Config.INSTANCE.display_name) self.assertEqual(alt_instance.labels, Config.INSTANCE.labels) self.assertEqual(alt_instance.type_, enums.Instance.Type.PRODUCTION) def test_create_instance_defaults(self): from google.cloud.bigtable import enums - ALT_INSTANCE_ID = 'ndef' + unique_resource_id('-') + ALT_INSTANCE_ID = "ndef" + unique_resource_id("-") instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) - ALT_CLUSTER_ID = ALT_INSTANCE_ID+'-cluster' + ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" cluster = instance.cluster( - ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES) + ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES + ) operation = instance.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=10) @@ -180,14 +183,15 @@ def test_create_instance_defaults(self): def test_create_instance(self): from google.cloud.bigtable import enums + _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT _STATE = enums.Instance.State.READY - ALT_INSTANCE_ID = 'new' + unique_resource_id('-') - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, - instance_type=_DEVELOPMENT, - labels=LABELS) - ALT_CLUSTER_ID = ALT_INSTANCE_ID+'-cluster' + ALT_INSTANCE_ID = "new" + unique_resource_id("-") + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS + ) + ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) operation = instance.create(clusters=[cluster]) # We want to make sure the operation completes. @@ -207,7 +211,7 @@ def test_create_instance(self): self.assertEqual(_STATE, instance_alt.state) def test_cluster_exists(self): - NONEXISTING_CLUSTER_ID = 'cluster-id' + NONEXISTING_CLUSTER_ID = "cluster-id" cluster = Config.INSTANCE.cluster(CLUSTER_ID) alt_cluster = Config.INSTANCE.cluster(NONEXISTING_CLUSTER_ID) @@ -215,7 +219,7 @@ def test_cluster_exists(self): self.assertFalse(alt_cluster.exists()) def test_instance_exists(self): - NONEXISTING_INSTANCE_ID = 'instancer-id' + NONEXISTING_INSTANCE_ID = "instancer-id" alt_instance = Config.CLIENT.instance(NONEXISTING_INSTANCE_ID) self.assertTrue(Config.INSTANCE.exists()) @@ -224,22 +228,29 @@ def test_instance_exists(self): def test_create_instance_w_two_clusters(self): from google.cloud.bigtable import enums from google.cloud.bigtable.table import ClusterState + _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = 'dif' + unique_resource_id('-') - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, - instance_type=_PRODUCTION, - labels=LABELS) - - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + '-c1' - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + '-c2' - LOCATION_ID_2 = 'us-central1-f' + ALT_INSTANCE_ID = "dif" + unique_resource_id("-") + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS + ) + + ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" + ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" + LOCATION_ID_2 = "us-central1-f" STORAGE_TYPE = enums.StorageType.HDD cluster_1 = instance.cluster( - ALT_CLUSTER_ID_1, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, - default_storage_type=STORAGE_TYPE) + ALT_CLUSTER_ID_1, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE, + ) cluster_2 = instance.cluster( - ALT_CLUSTER_ID_2, location_id=LOCATION_ID_2, - serve_nodes=SERVE_NODES, default_storage_type=STORAGE_TYPE) + ALT_CLUSTER_ID_2, + location_id=LOCATION_ID_2, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE, + ) operation = instance.create(clusters=[cluster_1, cluster_2]) # We want to make sure the operation completes. operation.result(timeout=10) @@ -264,23 +275,27 @@ def test_create_instance_w_two_clusters(self): self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) - self.assertEqual(cluster_1.default_storage_type, - alt_cluster_1.default_storage_type) + self.assertEqual( + cluster_1.default_storage_type, alt_cluster_1.default_storage_type + ) self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) - self.assertEqual(cluster_2.default_storage_type, - alt_cluster_2.default_storage_type) + self.assertEqual( + cluster_2.default_storage_type, alt_cluster_2.default_storage_type + ) # Test list clusters in project via 'client.list_clusters' clusters, failed_locations = Config.CLIENT.list_clusters() self.assertFalse(failed_locations) found = set([cluster.name for cluster in clusters]) - self.assertTrue({alt_cluster_1.name, - alt_cluster_2.name, - Config.CLUSTER.name}.issubset(found)) + self.assertTrue( + {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( + found + ) + ) - temp_table_id = 'test-get-cluster-states' + temp_table_id = "test-get-cluster-states" temp_table = instance.table(temp_table_id) temp_table.create() result = temp_table.get_cluster_states() @@ -290,7 +305,7 @@ def test_create_instance_w_two_clusters(self): ClusterState(ReplicationState.INITIALIZING), ClusterState(ReplicationState.PLANNED_MAINTENANCE), ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), - ClusterState(ReplicationState.READY) + ClusterState(ReplicationState.READY), ] cluster_id_list = result.keys() self.assertEqual(len(cluster_id_list), 2) @@ -301,14 +316,15 @@ def test_create_instance_w_two_clusters(self): # Test create app profile with multi_cluster_routing policy app_profiles_to_delete = [] - description = 'routing policy-multy' - app_profile_id_1 = 'app_profile_id_1' + description = "routing policy-multy" + app_profile_id_1 = "app_profile_id_1" routing = enums.RoutingPolicyType.ANY self._test_create_app_profile_helper( - app_profile_id_1, instance, + app_profile_id_1, + instance, routing_policy_type=routing, description=description, - ignore_warnings=True + ignore_warnings=True, ) app_profiles_to_delete.append(app_profile_id_1) @@ -320,55 +336,66 @@ def test_create_instance_w_two_clusters(self): # cluster -> ALT_CLUSTER_ID_1, # allow_transactional_writes -> disallowed # modify description - description = 'to routing policy-single' + description = "to routing policy-single" routing = enums.RoutingPolicyType.SINGLE self._test_modify_app_profile_helper( - app_profile_id_1, instance, + app_profile_id_1, + instance, routing_policy_type=routing, - description=description, cluster_id=ALT_CLUSTER_ID_1, - allow_transactional_writes=False) + description=description, + cluster_id=ALT_CLUSTER_ID_1, + allow_transactional_writes=False, + ) # Test modify app profile app_profile_id_1 # cluster -> ALT_CLUSTER_ID_2, # allow_transactional_writes -> allowed self._test_modify_app_profile_helper( - app_profile_id_1, instance, + app_profile_id_1, + instance, routing_policy_type=routing, description=description, cluster_id=ALT_CLUSTER_ID_2, allow_transactional_writes=True, - ignore_warnings=True) + ignore_warnings=True, + ) # Test create app profile with single cluster routing policy - description = 'routing policy-single' - app_profile_id_2 = 'app_profile_id_2' + description = "routing policy-single" + app_profile_id_2 = "app_profile_id_2" routing = enums.RoutingPolicyType.SINGLE self._test_create_app_profile_helper( - app_profile_id_2, instance, + app_profile_id_2, + instance, routing_policy_type=routing, - description=description, cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=False) + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=False, + ) app_profiles_to_delete.append(app_profile_id_2) # Test list app profiles self._test_list_app_profiles_helper( - instance, [app_profile_id_1, app_profile_id_2]) + instance, [app_profile_id_1, app_profile_id_2] + ) # Test modify app profile app_profile_id_2 to # allow transactional writes # Note: no need to set ``ignore_warnings`` to True # since we are not restrictings anything with this modification. self._test_modify_app_profile_helper( - app_profile_id_2, instance, + app_profile_id_2, + instance, routing_policy_type=routing, description=description, cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True) + allow_transactional_writes=True, + ) # Test modify app profile app_profile_id_2 routing policy # to multi_cluster_routing policy # modify description - description = 'to routing policy-multy' + description = "to routing policy-multy" routing = enums.RoutingPolicyType.ANY self._test_modify_app_profile_helper( app_profile_id_2, @@ -376,7 +403,8 @@ def test_create_instance_w_two_clusters(self): routing_policy_type=routing, description=description, allow_transactional_writes=False, - ignore_warnings=True) + ignore_warnings=True, + ) # Test delete app profiles for app_profile_id in app_profiles_to_delete: @@ -384,10 +412,12 @@ def test_create_instance_w_two_clusters(self): def test_update_display_name_and_labels(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name - NEW_DISPLAY_NAME = 'Foo Bar Baz' - n_label_stamp = datetime.datetime.utcnow() \ - .replace(microsecond=0, tzinfo=UTC) \ - .strftime("%Y-%m-%dt%H-%M-%S") + NEW_DISPLAY_NAME = "Foo Bar Baz" + n_label_stamp = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") + ) NEW_LABELS = {LABEL_KEY: str(n_label_stamp)} Config.INSTANCE.display_name = NEW_DISPLAY_NAME @@ -419,10 +449,10 @@ def test_update_type(self): _DEVELOPMENT = Instance.Type.DEVELOPMENT _PRODUCTION = Instance.Type.PRODUCTION - ALT_INSTANCE_ID = 'ndif' + unique_resource_id('-') - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, - instance_type=_DEVELOPMENT, - labels=LABELS) + ALT_INSTANCE_ID = "ndif" + unique_resource_id("-") + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS + ) operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) @@ -470,15 +500,16 @@ def test_create_cluster(self): from google.cloud.bigtable.enums import StorageType from google.cloud.bigtable.enums import Cluster - ALT_CLUSTER_ID = INSTANCE_ID+'-c2' - ALT_LOCATION_ID = 'us-central1-f' + ALT_CLUSTER_ID = INSTANCE_ID + "-c2" + ALT_LOCATION_ID = "us-central1-f" ALT_SERVE_NODES = 4 - cluster_2 = Config.INSTANCE.cluster(ALT_CLUSTER_ID, - location_id=ALT_LOCATION_ID, - serve_nodes=ALT_SERVE_NODES, - default_storage_type=( - StorageType.SSD)) + cluster_2 = Config.INSTANCE.cluster( + ALT_CLUSTER_ID, + location_id=ALT_LOCATION_ID, + serve_nodes=ALT_SERVE_NODES, + default_storage_type=(StorageType.SSD), + ) operation = cluster_2.create() # We want to make sure the operation completes. @@ -492,8 +523,9 @@ def test_create_cluster(self): self.assertEqual(cluster_2.location_id, alt_cluster.location_id) self.assertEqual(alt_cluster.state, Cluster.State.READY) self.assertEqual(cluster_2.serve_nodes, alt_cluster.serve_nodes) - self.assertEqual(cluster_2.default_storage_type, - alt_cluster.default_storage_type) + self.assertEqual( + cluster_2.default_storage_type, alt_cluster.default_storage_type + ) # Delete the newly created cluster and confirm self.assertTrue(cluster_2.exists()) @@ -501,20 +533,26 @@ def test_create_cluster(self): self.assertFalse(cluster_2.exists()) def _test_create_app_profile_helper( - self, app_profile_id, instance, routing_policy_type, - description=None, cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None): + self, + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, + ): app_profile = instance.app_profile( app_profile_id=app_profile_id, routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes + allow_transactional_writes=allow_transactional_writes, + ) + self.assertEqual( + app_profile.allow_transactional_writes, allow_transactional_writes ) - self.assertEqual(app_profile.allow_transactional_writes, - allow_transactional_writes) app_profile = app_profile.create(ignore_warnings=ignore_warnings) @@ -523,14 +561,10 @@ def _test_create_app_profile_helper( alt_app_profile = instance.app_profile(app_profile_id) alt_app_profile.reload() - self.assertEqual(app_profile.app_profile_id, - alt_app_profile.app_profile_id) - self.assertEqual(app_profile.routing_policy_type, - routing_policy_type) - self.assertEqual(alt_app_profile.routing_policy_type, - routing_policy_type) - self.assertEqual(app_profile.description, - alt_app_profile.description) + self.assertEqual(app_profile.app_profile_id, alt_app_profile.app_profile_id) + self.assertEqual(app_profile.routing_policy_type, routing_policy_type) + self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) + self.assertEqual(app_profile.description, alt_app_profile.description) self.assertFalse(app_profile.allow_transactional_writes) self.assertFalse(alt_app_profile.allow_transactional_writes) @@ -541,16 +575,22 @@ def _test_list_app_profiles_helper(self, instance, app_profile_ids): self.assertTrue(app_profile_id in found) def _test_modify_app_profile_helper( - self, app_profile_id, instance, routing_policy_type, - description=None, cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None): + self, + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, + ): app_profile = instance.app_profile( app_profile_id=app_profile_id, routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes) + allow_transactional_writes=allow_transactional_writes, + ) operation = app_profile.update(ignore_warnings) operation.result(timeout=10) @@ -558,11 +598,11 @@ def _test_modify_app_profile_helper( alt_app_profile = instance.app_profile(app_profile_id) alt_app_profile.reload() self.assertEqual(alt_app_profile.description, description) - self.assertEqual(alt_app_profile.routing_policy_type, - routing_policy_type) + self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) self.assertEqual(alt_app_profile.cluster_id, cluster_id) - self.assertEqual(alt_app_profile.allow_transactional_writes, - allow_transactional_writes) + self.assertEqual( + alt_app_profile.allow_transactional_writes, allow_transactional_writes + ) def _test_delete_app_profile_helper(self, app_profile_id, instance): app_profile = instance.app_profile(app_profile_id) @@ -572,7 +612,6 @@ def _test_delete_app_profile_helper(self, app_profile_id, instance): class TestTableAdminAPI(unittest.TestCase): - @classmethod def setUpClass(cls): cls._table = Config.INSTANCE.table(TABLE_ID) @@ -598,7 +637,7 @@ def test_list_tables(self): def test_exists(self): retry_until_true = RetryResult(lambda result: result) retry_until_false = RetryResult(lambda result: not result) - temp_table_id = 'test-table_existence' + temp_table_id = "test-table_existence" temp_table = Config.INSTANCE.table(temp_table_id) self.assertFalse(temp_table.exists()) temp_table.create() @@ -607,13 +646,13 @@ def test_exists(self): self.assertFalse(retry_until_false(temp_table.exists)()) def test_create_table(self): - temp_table_id = 'test-create-table' + temp_table_id = "test-create-table" temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) # First, create a sorted version of our expected result. - name_attr = operator.attrgetter('name') + name_attr = operator.attrgetter("name") expected_tables = sorted([temp_table, self._table], key=name_attr) # Then query for the tables in the instance and sort them by @@ -623,7 +662,7 @@ def test_create_table(self): self.assertEqual(sorted_tables, expected_tables) def test_create_table_with_families(self): - temp_table_id = 'test-create-table-with-failies' + temp_table_id = "test-create-table-with-failies" temp_table = Config.INSTANCE.table(temp_table_id) gc_rule = MaxVersionsGCRule(1) temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) @@ -634,14 +673,12 @@ def test_create_table_with_families(self): self.assertEqual(len(col_fams), 1) retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] self.assertIs(retrieved_col_fam._table, temp_table) - self.assertEqual(retrieved_col_fam.column_family_id, - COLUMN_FAMILY_ID1) + self.assertEqual(retrieved_col_fam.column_family_id, COLUMN_FAMILY_ID1) self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) def test_create_table_with_split_keys(self): - temp_table_id = 'foo-bar-baz-split-table' - initial_split_keys = [b'split_key_1', b'split_key_10', - b'split_key_20'] + temp_table_id = "foo-bar-baz-split-table" + initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create(initial_split_keys=initial_split_keys) self.tables_to_delete.append(temp_table) @@ -651,20 +688,19 @@ def test_create_table_with_split_keys(self): actual_keys = [srk.row_key for srk in sample_row_keys] expected_keys = initial_split_keys - expected_keys.append(b'') + expected_keys.append(b"") self.assertEqual(actual_keys, expected_keys) def test_create_column_family(self): - temp_table_id = 'test-create-column-family' + temp_table_id = "test-create-column-family" temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) self.assertEqual(temp_table.list_column_families(), {}) gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, - gc_rule=gc_rule) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) column_family.create() col_fams = temp_table.list_column_families() @@ -672,19 +708,19 @@ def test_create_column_family(self): self.assertEqual(len(col_fams), 1) retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] self.assertIs(retrieved_col_fam._table, column_family._table) - self.assertEqual(retrieved_col_fam.column_family_id, - column_family.column_family_id) + self.assertEqual( + retrieved_col_fam.column_family_id, column_family.column_family_id + ) self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) def test_update_column_family(self): - temp_table_id = 'test-update-column-family' + temp_table_id = "test-update-column-family" temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, - gc_rule=gc_rule) + column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) column_family.create() # Check that our created table is as expected. @@ -700,7 +736,7 @@ def test_update_column_family(self): self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) def test_delete_column_family(self): - temp_table_id = 'test-delete-column-family' + temp_table_id = "test-delete-column-family" temp_table = Config.INSTANCE.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -719,10 +755,9 @@ def test_delete_column_family(self): class TestDataAPI(unittest.TestCase): - @classmethod def setUpClass(cls): - cls._table = table = Config.INSTANCE.table('test-data-api') + cls._table = table = Config.INSTANCE.table("test-data-api") table.create() table.column_family(COLUMN_FAMILY_ID1).create() table.column_family(COLUMN_FAMILY_ID2).create() @@ -752,7 +787,7 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) timestamp1_micros = _microseconds_from_datetime(timestamp1) # Truncate to millisecond granularity. - timestamp1_micros -= (timestamp1_micros % 1000) + timestamp1_micros -= timestamp1_micros % 1000 timestamp1 = _datetime_from_microseconds(timestamp1_micros) # 1000 microseconds is a millisecond timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) @@ -763,17 +798,13 @@ def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): timestamp4_micros = _microseconds_from_datetime(timestamp4) if row1 is not None: - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, - timestamp=timestamp1) + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) if row2 is not None: - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, - timestamp=timestamp2) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) if row3 is not None: - row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, - timestamp=timestamp3) + row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) if row4 is not None: - row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, - timestamp=timestamp4) + row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) # Create the cells we will check. cell1 = Cell(CELL_VAL1, timestamp1_micros) @@ -814,16 +845,26 @@ def test_mutate_rows(self): # Check the contents row1_data = self._table.read_row(ROW_KEY) self.assertEqual( - row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3) + row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3 + ) row2_data = self._table.read_row(ROW_KEY_ALT) self.assertEqual( - row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4) + row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4 + ) def test_truncate_table(self): row_keys = [ - b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', - b'row_key_5', b'row_key_pr_1', b'row_key_pr_2', b'row_key_pr_3', - b'row_key_pr_4', b'row_key_pr_5'] + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] for row_key in row_keys: row = self._table.row(row_key) @@ -836,13 +877,21 @@ def test_truncate_table(self): read_rows = self._table.yield_rows() for row in read_rows: - self.assertNotIn(row.row_key.decode('utf-8'), row_keys) + self.assertNotIn(row.row_key.decode("utf-8"), row_keys) def test_drop_by_prefix_table(self): row_keys = [ - b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', - b'row_key_5', b'row_key_pr_1', b'row_key_pr_2', b'row_key_pr_3', - b'row_key_pr_4', b'row_key_pr_5'] + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] for row_key in row_keys: row = self._table.row(row_key) @@ -850,7 +899,7 @@ def test_drop_by_prefix_table(self): row.commit() self.rows_to_delete.append(row) - self._table.drop_by_prefix(row_key_prefix='row_key_pr', timeout=200) + self._table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) read_rows = self._table.yield_rows() expected_rows_count = 5 @@ -864,9 +913,16 @@ def test_drop_by_prefix_table(self): def test_yield_rows_with_row_set(self): row_keys = [ - b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', - b'row_key_5', b'row_key_6', b'row_key_7', b'row_key_8', - b'row_key_9'] + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_6", + b"row_key_7", + b"row_key_8", + b"row_key_9", + ] rows = [] for row_key in row_keys: @@ -877,14 +933,18 @@ def test_yield_rows_with_row_set(self): self._table.mutate_rows(rows) row_set = RowSet() - row_set.add_row_range(RowRange(start_key=b'row_key_3', - end_key=b'row_key_7')) - row_set.add_row_key(b'row_key_1') + row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) + row_set.add_row_key(b"row_key_1") read_rows = self._table.yield_rows(row_set=row_set) - expected_row_keys = [b'row_key_1', b'row_key_3', b'row_key_4', - b'row_key_5', b'row_key_6'] + expected_row_keys = [ + b"row_key_1", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_6", + ] found_row_keys = [row.row_key for row in read_rows] self.assertEqual(found_row_keys, expected_row_keys) @@ -893,7 +953,7 @@ def test_read_large_cell_limit(self): self.rows_to_delete.append(row) number_of_bytes = 10 * 1024 * 1024 - data = b'1' * number_of_bytes # 10MB of 1's. + data = b"1" * number_of_bytes # 10MB of 1's. row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) row.commit() @@ -917,15 +977,13 @@ def test_read_row(self): self.assertEqual(partial_row_data.row_key, ROW_KEY) # Check the cells match. - ts_attr = operator.attrgetter('timestamp') + ts_attr = operator.attrgetter("timestamp") expected_row_contents = { COLUMN_FAMILY_ID1: { COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), COL_NAME2: [cell3], }, - COLUMN_FAMILY_ID2: { - COL_NAME3: [cell4], - }, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, } self.assertEqual(partial_row_data.cells, expected_row_contents) @@ -934,8 +992,7 @@ def test_read_rows(self): row_alt = self._table.row(ROW_KEY_ALT) self.rows_to_delete.extend([row, row_alt]) - cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, - row, row_alt) + cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, row, row_alt) row.commit() row_alt.commit() @@ -949,33 +1006,21 @@ def test_read_rows(self): row_data = PartialRowData(ROW_KEY) row_data._chunks_encountered = True row_data._committed = True - row_data._cells = { - COLUMN_FAMILY_ID1: { - COL_NAME1: [cell1], - COL_NAME2: [cell3], - }, - } + row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} row_alt_data = PartialRowData(ROW_KEY_ALT) row_alt_data._chunks_encountered = True row_alt_data._committed = True row_alt_data._cells = { - COLUMN_FAMILY_ID1: { - COL_NAME1: [cell2], - }, - COLUMN_FAMILY_ID2: { - COL_NAME3: [cell4], - }, + COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, } - expected_rows = { - ROW_KEY: row_data, - ROW_KEY_ALT: row_alt_data, - } + expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} self.assertEqual(rows_data.rows, expected_rows) def test_read_with_label_applied(self): - self._maybe_emulator_skip('Labels not supported by Bigtable emulator') + self._maybe_emulator_skip("Labels not supported by Bigtable emulator") row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) @@ -983,13 +1028,13 @@ def test_read_with_label_applied(self): row.commit() # Combine a label with column 1. - label1 = u'label-red' + label1 = u"label-red" label1_filter = ApplyLabelFilter(label1) col1_filter = ColumnQualifierRegexFilter(COL_NAME1) chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) # Combine a label with column 2. - label2 = u'label-blue' + label2 = u"label-blue" label2_filter = ApplyLabelFilter(label2) col2_filter = ColumnQualifierRegexFilter(COL_NAME2) chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) @@ -1025,4 +1070,4 @@ def test_access_with_non_admin_client(self): client = Client(admin=False) instance = client.instance(INSTANCE_ID) table = instance.table(self._table.table_id) - self.assertIsNone(table.read_row('nonesuch')) + self.assertIsNone(table.read_row("nonesuch")) diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py index 3bae0d9ce4a7..302d33ac1540 100644 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -30,8 +30,8 @@ def _make_credentials(): import google.auth.credentials class _CredentialsWithScopes( - google.auth.credentials.Credentials, - google.auth.credentials.Scoped): + google.auth.credentials.Credentials, google.auth.credentials.Scoped + ): pass return mock.Mock(spec=_CredentialsWithScopes) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py index a8099b30e8cd..587f589aa278 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -50,16 +50,10 @@ def __init__(self, responses=[]): self.responses = responses self.requests = [] - def unary_unary(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) - def unary_stream(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_stream(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) @@ -70,19 +64,19 @@ class CustomException(Exception): class TestBigtableClient(object): def test_read_rows(self): # Setup Expected Response - last_scanned_row_key = b'-126' - expected_response = {'last_scanned_row_key': last_scanned_row_key} + last_scanned_row_key = b"-126" + expected_response = {"last_scanned_row_key": last_scanned_row_key} expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup Request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") response = client.read_rows(table_name) resources = list(response) @@ -97,34 +91,33 @@ def test_read_rows(self): def test_read_rows_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") with pytest.raises(CustomException): client.read_rows(table_name) def test_sample_row_keys(self): # Setup Expected Response - row_key = b'122' + row_key = b"122" offset_bytes = 889884095 - expected_response = {'row_key': row_key, 'offset_bytes': offset_bytes} - expected_response = bigtable_pb2.SampleRowKeysResponse( - **expected_response) + expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} + expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup Request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") response = client.sample_row_keys(table_name) resources = list(response) @@ -132,21 +125,20 @@ def test_sample_row_keys(self): assert expected_response == resources[0] assert len(channel.requests) == 1 - expected_request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name) + expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_sample_row_keys_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") with pytest.raises(CustomException): client.sample_row_keys(table_name) @@ -158,14 +150,14 @@ def test_mutate_row(self): # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup Request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - row_key = b'122' + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" mutations = [] response = client.mutate_row(table_name, row_key, mutations) @@ -173,21 +165,22 @@ def test_mutate_row(self): assert len(channel.requests) == 1 expected_request = bigtable_pb2.MutateRowRequest( - table_name=table_name, row_key=row_key, mutations=mutations) + table_name=table_name, row_key=row_key, mutations=mutations + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_mutate_row_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - row_key = b'122' + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" mutations = [] with pytest.raises(CustomException): @@ -196,18 +189,17 @@ def test_mutate_row_exception(self): def test_mutate_rows(self): # Setup Expected Response expected_response = {} - expected_response = bigtable_pb2.MutateRowsResponse( - **expected_response) + expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup Request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") entries = [] response = client.mutate_rows(table_name, entries) @@ -217,20 +209,21 @@ def test_mutate_rows(self): assert len(channel.requests) == 1 expected_request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries) + table_name=table_name, entries=entries + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_mutate_rows_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") entries = [] with pytest.raises(CustomException): @@ -239,41 +232,41 @@ def test_mutate_rows_exception(self): def test_check_and_mutate_row(self): # Setup Expected Response predicate_matched = True - expected_response = {'predicate_matched': predicate_matched} - expected_response = bigtable_pb2.CheckAndMutateRowResponse( - **expected_response) + expected_response = {"predicate_matched": predicate_matched} + expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup Request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - row_key = b'122' + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" response = client.check_and_mutate_row(table_name, row_key) assert expected_response == response assert len(channel.requests) == 1 expected_request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, row_key=row_key) + table_name=table_name, row_key=row_key + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_check_and_mutate_row_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - row_key = b'122' + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" with pytest.raises(CustomException): client.check_and_mutate_row(table_name, row_key) @@ -281,19 +274,18 @@ def test_check_and_mutate_row_exception(self): def test_read_modify_write_row(self): # Setup Expected Response expected_response = {} - expected_response = bigtable_pb2.ReadModifyWriteRowResponse( - **expected_response) + expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup Request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - row_key = b'122' + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" rules = [] response = client.read_modify_write_row(table_name, row_key, rules) @@ -301,21 +293,22 @@ def test_read_modify_write_row(self): assert len(channel.requests) == 1 expected_request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, row_key=row_key, rules=rules) + table_name=table_name, row_key=row_key, rules=rules + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_read_modify_write_row_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_v2.BigtableClient() # Setup request - table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - row_key = b'122' + table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + row_key = b"122" rules = [] with pytest.raises(CustomException): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index d81ebe590d90..b9dcb2214893 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -59,10 +59,7 @@ def __init__(self, responses=[]): self.responses = responses self.requests = [] - def unary_unary(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) @@ -73,38 +70,36 @@ class CustomException(Exception): class TestBigtableInstanceAdminClient(object): def test_create_instance(self): # Setup Expected Response - name = 'name3373707' - display_name = 'displayName1615086568' - expected_response = {'name': name, 'display_name': display_name} + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} expected_response = instance_pb2.Instance(**expected_response) operation = operations_pb2.Operation( - name='operations/test_create_instance', done=True) + name="operations/test_create_instance", done=True + ) operation.response.Pack(expected_response) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.project_path('[PROJECT]') - instance_id = 'instanceId-2101995259' + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" instance = {} clusters = {} - response = client.create_instance(parent, instance_id, instance, - clusters) + response = client.create_instance(parent, instance_id, instance, clusters) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, - instance_id=instance_id, - instance=instance, - clusters=clusters) + parent=parent, instance_id=instance_id, instance=instance, clusters=clusters + ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -112,124 +107,125 @@ def test_create_instance_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name='operations/test_create_instance_exception', done=True) + name="operations/test_create_instance_exception", done=True + ) operation.error.CopyFrom(error) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.project_path('[PROJECT]') - instance_id = 'instanceId-2101995259' + parent = client.project_path("[PROJECT]") + instance_id = "instanceId-2101995259" instance = {} clusters = {} - response = client.create_instance(parent, instance_id, instance, - clusters) + response = client.create_instance(parent, instance_id, instance, clusters) exception = response.exception() assert exception.errors[0] == error def test_get_instance(self): # Setup Expected Response - name_2 = 'name2-1052831874' - display_name = 'displayName1615086568' - expected_response = {'name': name_2, 'display_name': display_name} + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + expected_response = {"name": name_2, "display_name": display_name} expected_response = instance_pb2.Instance(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.instance_path('[PROJECT]', '[INSTANCE]') + name = client.instance_path("[PROJECT]", "[INSTANCE]") response = client.get_instance(name) assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest( - name=name) + expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.instance_path('[PROJECT]', '[INSTANCE]') + name = client.instance_path("[PROJECT]", "[INSTANCE]") with pytest.raises(CustomException): client.get_instance(name) def test_list_instances(self): # Setup Expected Response - next_page_token = 'nextPageToken-1530815211' - expected_response = {'next_page_token': next_page_token} + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.project_path('[PROJECT]') + parent = client.project_path("[PROJECT]") response = client.list_instances(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent) + parent=parent + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_instances_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - parent = client.project_path('[PROJECT]') + parent = client.project_path("[PROJECT]") with pytest.raises(CustomException): client.list_instances(parent) def test_update_instance(self): # Setup Expected Response - name_2 = 'name2-1052831874' - display_name_2 = 'displayName21615000987' - expected_response = {'name': name_2, 'display_name': display_name_2} + name_2 = "name2-1052831874" + display_name_2 = "displayName21615000987" + expected_response = {"name": name_2, "display_name": display_name_2} expected_response = instance_pb2.Instance(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.instance_path('[PROJECT]', '[INSTANCE]') - display_name = 'displayName1615086568' + name = client.instance_path("[PROJECT]", "[INSTANCE]") + display_name = "displayName1615086568" type_ = enums.Instance.Type.TYPE_UNSPECIFIED labels = {} @@ -238,21 +234,22 @@ def test_update_instance(self): assert len(channel.requests) == 1 expected_request = instance_pb2.Instance( - name=name, display_name=display_name, type=type_, labels=labels) + name=name, display_name=display_name, type=type_, labels=labels + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_update_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.instance_path('[PROJECT]', '[INSTANCE]') - display_name = 'displayName1615086568' + name = client.instance_path("[PROJECT]", "[INSTANCE]") + display_name = "displayName1615086568" type_ = enums.Instance.Type.TYPE_UNSPECIFIED labels = {} @@ -261,17 +258,18 @@ def test_update_instance_exception(self): def test_partial_update_instance(self): # Setup Expected Response - name = 'name3373707' - display_name = 'displayName1615086568' - expected_response = {'name': name, 'display_name': display_name} + name = "name3373707" + display_name = "displayName1615086568" + expected_response = {"name": name, "display_name": display_name} expected_response = instance_pb2.Instance(**expected_response) operation = operations_pb2.Operation( - name='operations/test_partial_update_instance', done=True) + name="operations/test_partial_update_instance", done=True + ) operation.response.Pack(expected_response) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() @@ -286,7 +284,8 @@ def test_partial_update_instance(self): assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask) + instance=instance, update_mask=update_mask + ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -294,13 +293,13 @@ def test_partial_update_instance_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name='operations/test_partial_update_instance_exception', - done=True) + name="operations/test_partial_update_instance_exception", done=True + ) operation.error.CopyFrom(error) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() @@ -315,61 +314,61 @@ def test_partial_update_instance_exception(self): def test_delete_instance(self): channel = ChannelStub() - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.instance_path('[PROJECT]', '[INSTANCE]') + name = client.instance_path("[PROJECT]", "[INSTANCE]") client.delete_instance(name) assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest( - name=name) + expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_instance_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.instance_path('[PROJECT]', '[INSTANCE]') + name = client.instance_path("[PROJECT]", "[INSTANCE]") with pytest.raises(CustomException): client.delete_instance(name) def test_create_cluster(self): # Setup Expected Response - name = 'name3373707' - location = 'location1901043637' + name = "name3373707" + location = "location1901043637" serve_nodes = 1288838783 expected_response = { - 'name': name, - 'location': location, - 'serve_nodes': serve_nodes + "name": name, + "location": location, + "serve_nodes": serve_nodes, } expected_response = instance_pb2.Cluster(**expected_response) operation = operations_pb2.Operation( - name='operations/test_create_cluster', done=True) + name="operations/test_create_cluster", done=True + ) operation.response.Pack(expected_response) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - cluster_id = 'clusterId240280960' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" cluster = {} response = client.create_cluster(parent, cluster_id, cluster) @@ -378,7 +377,8 @@ def test_create_cluster(self): assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster) + parent=parent, cluster_id=cluster_id, cluster=cluster + ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -386,19 +386,20 @@ def test_create_cluster_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name='operations/test_create_cluster_exception', done=True) + name="operations/test_create_cluster_exception", done=True + ) operation.error.CopyFrom(error) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - cluster_id = 'clusterId240280960' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + cluster_id = "clusterId240280960" cluster = {} response = client.create_cluster(parent, cluster_id, cluster) @@ -407,113 +408,115 @@ def test_create_cluster_exception(self): def test_get_cluster(self): # Setup Expected Response - name_2 = 'name2-1052831874' - location = 'location1901043637' + name_2 = "name2-1052831874" + location = "location1901043637" serve_nodes = 1288838783 expected_response = { - 'name': name_2, - 'location': location, - 'serve_nodes': serve_nodes + "name": name_2, + "location": location, + "serve_nodes": serve_nodes, } expected_response = instance_pb2.Cluster(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") response = client.get_cluster(name) assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetClusterRequest( - name=name) + expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_cluster_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") with pytest.raises(CustomException): client.get_cluster(name) def test_list_clusters(self): # Setup Expected Response - next_page_token = 'nextPageToken-1530815211' - expected_response = {'next_page_token': next_page_token} + next_page_token = "nextPageToken-1530815211" + expected_response = {"next_page_token": next_page_token} expected_response = bigtable_instance_admin_pb2.ListClustersResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') + parent = client.instance_path("[PROJECT]", "[INSTANCE]") response = client.list_clusters(parent) assert expected_response == response assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent) + parent=parent + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_clusters_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') + parent = client.instance_path("[PROJECT]", "[INSTANCE]") with pytest.raises(CustomException): client.list_clusters(parent) def test_update_cluster(self): # Setup Expected Response - name_2 = 'name2-1052831874' - location = 'location1901043637' + name_2 = "name2-1052831874" + location = "location1901043637" serve_nodes_2 = 1623486220 expected_response = { - 'name': name_2, - 'location': location, - 'serve_nodes': serve_nodes_2 + "name": name_2, + "location": location, + "serve_nodes": serve_nodes_2, } expected_response = instance_pb2.Cluster(**expected_response) operation = operations_pb2.Operation( - name='operations/test_update_cluster', done=True) + name="operations/test_update_cluster", done=True + ) operation.response.Pack(expected_response) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") serve_nodes = 1288838783 response = client.update_cluster(name, serve_nodes) @@ -521,8 +524,7 @@ def test_update_cluster(self): assert expected_response == result assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster( - name=name, serve_nodes=serve_nodes) + expected_request = instance_pb2.Cluster(name=name, serve_nodes=serve_nodes) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -530,18 +532,19 @@ def test_update_cluster_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name='operations/test_update_cluster_exception', done=True) + name="operations/test_update_cluster_exception", done=True + ) operation.error.CopyFrom(error) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") serve_nodes = 1288838783 response = client.update_cluster(name, serve_nodes) @@ -550,83 +553,76 @@ def test_update_cluster_exception(self): def test_delete_cluster(self): channel = ChannelStub() - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") client.delete_cluster(name) assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest( - name=name) + expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_cluster_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") with pytest.raises(CustomException): client.delete_cluster(name) def test_create_app_profile(self): # Setup Expected Response - name = 'name3373707' - etag = 'etag3123477' - description = 'description-1724546052' - expected_response = { - 'name': name, - 'etag': etag, - 'description': description - } + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} expected_response = instance_pb2.AppProfile(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - app_profile_id = 'appProfileId1262094415' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" app_profile = {} - response = client.create_app_profile(parent, app_profile_id, - app_profile) + response = client.create_app_profile(parent, app_profile_id, app_profile) assert expected_response == response assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, - app_profile_id=app_profile_id, - app_profile=app_profile) + parent=parent, app_profile_id=app_profile_id, app_profile=app_profile + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_app_profile_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - app_profile_id = 'appProfileId1262094415' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + app_profile_id = "appProfileId1262094415" app_profile = {} with pytest.raises(CustomException): @@ -634,72 +630,66 @@ def test_create_app_profile_exception(self): def test_get_app_profile(self): # Setup Expected Response - name_2 = 'name2-1052831874' - etag = 'etag3123477' - description = 'description-1724546052' - expected_response = { - 'name': name_2, - 'etag': etag, - 'description': description - } + name_2 = "name2-1052831874" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name_2, "etag": etag, "description": description} expected_response = instance_pb2.AppProfile(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.app_profile_path('[PROJECT]', '[INSTANCE]', - '[APP_PROFILE]') + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") response = client.get_app_profile(name) assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest( - name=name) + expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_app_profile_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.app_profile_path('[PROJECT]', '[INSTANCE]', - '[APP_PROFILE]') + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") with pytest.raises(CustomException): client.get_app_profile(name) def test_list_app_profiles(self): # Setup Expected Response - next_page_token = '' + next_page_token = "" app_profiles_element = {} app_profiles = [app_profiles_element] expected_response = { - 'next_page_token': next_page_token, - 'app_profiles': app_profiles + "next_page_token": next_page_token, + "app_profiles": app_profiles, } expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') + parent = client.instance_path("[PROJECT]", "[INSTANCE]") paged_list_response = client.list_app_profiles(parent) resources = list(paged_list_response) @@ -709,19 +699,20 @@ def test_list_app_profiles(self): assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent) + parent=parent + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_app_profiles_exception(self): channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') + parent = client.instance_path("[PROJECT]", "[INSTANCE]") paged_list_response = client.list_app_profiles(parent) with pytest.raises(CustomException): @@ -729,22 +720,19 @@ def test_list_app_profiles_exception(self): def test_update_app_profile(self): # Setup Expected Response - name = 'name3373707' - etag = 'etag3123477' - description = 'description-1724546052' - expected_response = { - 'name': name, - 'etag': etag, - 'description': description - } + name = "name3373707" + etag = "etag3123477" + description = "description-1724546052" + expected_response = {"name": name, "etag": etag, "description": description} expected_response = instance_pb2.AppProfile(**expected_response) operation = operations_pb2.Operation( - name='operations/test_update_app_profile', done=True) + name="operations/test_update_app_profile", done=True + ) operation.response.Pack(expected_response) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() @@ -759,7 +747,8 @@ def test_update_app_profile(self): assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask) + app_profile=app_profile, update_mask=update_mask + ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -767,12 +756,13 @@ def test_update_app_profile_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name='operations/test_update_app_profile_exception', done=True) + name="operations/test_update_app_profile_exception", done=True + ) operation.error.CopyFrom(error) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() @@ -787,35 +777,34 @@ def test_update_app_profile_exception(self): def test_delete_app_profile(self): channel = ChannelStub() - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.app_profile_path('[PROJECT]', '[INSTANCE]', - '[APP_PROFILE]') + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") ignore_warnings = True client.delete_app_profile(name, ignore_warnings) assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings) + name=name, ignore_warnings=ignore_warnings + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_app_profile_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.app_profile_path('[PROJECT]', '[INSTANCE]', - '[APP_PROFILE]') + name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") ignore_warnings = True with pytest.raises(CustomException): @@ -824,39 +813,38 @@ def test_delete_app_profile_exception(self): def test_get_iam_policy(self): # Setup Expected Response version = 351608024 - etag = b'etag3123477' - expected_response = {'version': version, 'etag': etag} + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} expected_response = policy_pb2.Policy(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - resource = client.instance_path('[PROJECT]', '[INSTANCE]') + resource = client.instance_path("[PROJECT]", "[INSTANCE]") response = client.get_iam_policy(resource) assert expected_response == response assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource) + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_iam_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - resource = client.instance_path('[PROJECT]', '[INSTANCE]') + resource = client.instance_path("[PROJECT]", "[INSTANCE]") with pytest.raises(CustomException): client.get_iam_policy(resource) @@ -864,19 +852,19 @@ def test_get_iam_policy_exception(self): def test_set_iam_policy(self): # Setup Expected Response version = 351608024 - etag = b'etag3123477' - expected_response = {'version': version, 'etag': etag} + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} expected_response = policy_pb2.Policy(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - resource = client.instance_path('[PROJECT]', '[INSTANCE]') + resource = client.instance_path("[PROJECT]", "[INSTANCE]") policy = {} response = client.set_iam_policy(resource, policy) @@ -884,20 +872,21 @@ def test_set_iam_policy(self): assert len(channel.requests) == 1 expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy) + resource=resource, policy=policy + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_set_iam_policy_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - resource = client.instance_path('[PROJECT]', '[INSTANCE]') + resource = client.instance_path("[PROJECT]", "[INSTANCE]") policy = {} with pytest.raises(CustomException): @@ -907,17 +896,18 @@ def test_test_iam_permissions(self): # Setup Expected Response expected_response = {} expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - resource = client.instance_path('[PROJECT]', '[INSTANCE]') + resource = client.instance_path("[PROJECT]", "[INSTANCE]") permissions = [] response = client.test_iam_permissions(resource, permissions) @@ -925,20 +915,21 @@ def test_test_iam_permissions(self): assert len(channel.requests) == 1 expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions) + resource=resource, permissions=permissions + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_test_iam_permissions_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - resource = client.instance_path('[PROJECT]', '[INSTANCE]') + resource = client.instance_path("[PROJECT]", "[INSTANCE]") permissions = [] with pytest.raises(CustomException): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index 4e4cd1b68153..1b84de2fef13 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -55,10 +55,7 @@ def __init__(self, responses=[]): self.responses = responses self.requests = [] - def unary_unary(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) @@ -69,20 +66,20 @@ class CustomException(Exception): class TestBigtableTableAdminClient(object): def test_create_table(self): # Setup Expected Response - name = 'name3373707' - expected_response = {'name': name} + name = "name3373707" + expected_response = {"name": name} expected_response = table_pb2.Table(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - table_id = 'tableId-895419604' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" table = {} response = client.create_table(parent, table_id, table) @@ -90,21 +87,22 @@ def test_create_table(self): assert len(channel.requests) == 1 expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table) + parent=parent, table_id=table_id, table=table + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_create_table_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - table_id = 'tableId-895419604' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" table = {} with pytest.raises(CustomException): @@ -112,33 +110,34 @@ def test_create_table_exception(self): def test_create_table_from_snapshot(self): # Setup Expected Response - name = 'name3373707' - expected_response = {'name': name} + name = "name3373707" + expected_response = {"name": name} expected_response = table_pb2.Table(**expected_response) operation = operations_pb2.Operation( - name='operations/test_create_table_from_snapshot', done=True) + name="operations/test_create_table_from_snapshot", done=True + ) operation.response.Pack(expected_response) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - table_id = 'tableId-895419604' - source_snapshot = 'sourceSnapshot-947679896' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = "sourceSnapshot-947679896" - response = client.create_table_from_snapshot(parent, table_id, - source_snapshot) + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot) + parent=parent, table_id=table_id, source_snapshot=source_snapshot + ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -146,48 +145,45 @@ def test_create_table_from_snapshot_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name='operations/test_create_table_from_snapshot_exception', - done=True) + name="operations/test_create_table_from_snapshot_exception", done=True + ) operation.error.CopyFrom(error) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') - table_id = 'tableId-895419604' - source_snapshot = 'sourceSnapshot-947679896' + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + table_id = "tableId-895419604" + source_snapshot = "sourceSnapshot-947679896" - response = client.create_table_from_snapshot(parent, table_id, - source_snapshot) + response = client.create_table_from_snapshot(parent, table_id, source_snapshot) exception = response.exception() assert exception.errors[0] == error def test_list_tables(self): # Setup Expected Response - next_page_token = '' + next_page_token = "" tables_element = {} tables = [tables_element] - expected_response = { - 'next_page_token': next_page_token, - 'tables': tables - } + expected_response = {"next_page_token": next_page_token, "tables": tables} expected_response = bigtable_table_admin_pb2.ListTablesResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') + parent = client.instance_path("[PROJECT]", "[INSTANCE]") paged_list_response = client.list_tables(parent) resources = list(paged_list_response) @@ -196,20 +192,19 @@ def test_list_tables(self): assert expected_response.tables[0] == resources[0] assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent) + expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_tables_exception(self): channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - parent = client.instance_path('[PROJECT]', '[INSTANCE]') + parent = client.instance_path("[PROJECT]", "[INSTANCE]") paged_list_response = client.list_tables(parent) with pytest.raises(CustomException): @@ -217,19 +212,19 @@ def test_list_tables_exception(self): def test_get_table(self): # Setup Expected Response - name_2 = 'name2-1052831874' - expected_response = {'name': name_2} + name_2 = "name2-1052831874" + expected_response = {"name": name_2} expected_response = table_pb2.Table(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") response = client.get_table(name) assert expected_response == response @@ -242,64 +237,63 @@ def test_get_table(self): def test_get_table_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") with pytest.raises(CustomException): client.get_table(name) def test_delete_table(self): channel = ChannelStub() - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") client.delete_table(name) assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteTableRequest( - name=name) + expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_table_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") with pytest.raises(CustomException): client.delete_table(name) def test_modify_column_families(self): # Setup Expected Response - name_2 = 'name2-1052831874' - expected_response = {'name': name_2} + name_2 = "name2-1052831874" + expected_response = {"name": name_2} expected_response = table_pb2.Table(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") modifications = [] response = client.modify_column_families(name, modifications) @@ -307,20 +301,21 @@ def test_modify_column_families(self): assert len(channel.requests) == 1 expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications) + name=name, modifications=modifications + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_modify_column_families_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") modifications = [] with pytest.raises(CustomException): @@ -328,72 +323,73 @@ def test_modify_column_families_exception(self): def test_drop_row_range(self): channel = ChannelStub() - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") client.drop_row_range(name) assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DropRowRangeRequest( - name=name) + expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_drop_row_range_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") with pytest.raises(CustomException): client.drop_row_range(name) def test_generate_consistency_token(self): # Setup Expected Response - consistency_token = 'consistencyToken-1090516718' - expected_response = {'consistency_token': consistency_token} + consistency_token = "consistencyToken-1090516718" + expected_response = {"consistency_token": consistency_token} expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") response = client.generate_consistency_token(name) assert expected_response == response assert len(channel.requests) == 1 expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name) + name=name + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_generate_consistency_token_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") with pytest.raises(CustomException): client.generate_consistency_token(name) @@ -401,84 +397,84 @@ def test_generate_consistency_token_exception(self): def test_check_consistency(self): # Setup Expected Response consistent = True - expected_response = {'consistent': consistent} + expected_response = {"consistent": consistent} expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - consistency_token = 'consistencyToken-1090516718' + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" response = client.check_consistency(name, consistency_token) assert expected_response == response assert len(channel.requests) == 1 expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token) + name=name, consistency_token=consistency_token + ) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_check_consistency_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - consistency_token = 'consistencyToken-1090516718' + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + consistency_token = "consistencyToken-1090516718" with pytest.raises(CustomException): client.check_consistency(name, consistency_token) def test_snapshot_table(self): # Setup Expected Response - name_2 = 'name2-1052831874' + name_2 = "name2-1052831874" data_size_bytes = 2110122398 - description_2 = 'description2568623279' + description_2 = "description2568623279" expected_response = { - 'name': name_2, - 'data_size_bytes': data_size_bytes, - 'description': description_2 + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description_2, } expected_response = table_pb2.Snapshot(**expected_response) operation = operations_pb2.Operation( - name='operations/test_snapshot_table', done=True) + name="operations/test_snapshot_table", done=True + ) operation.response.Pack(expected_response) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - cluster = 'cluster872092154' - snapshot_id = 'snapshotId-168585866' - description = 'description-1724546052' + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = "cluster872092154" + snapshot_id = "snapshotId-168585866" + description = "description-1724546052" - response = client.snapshot_table(name, cluster, snapshot_id, - description) + response = client.snapshot_table(name, cluster, snapshot_id, description) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, - cluster=cluster, - snapshot_id=snapshot_id, - description=description) + name=name, cluster=cluster, snapshot_id=snapshot_id, description=description + ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -486,95 +482,94 @@ def test_snapshot_table_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name='operations/test_snapshot_table_exception', done=True) + name="operations/test_snapshot_table_exception", done=True + ) operation.error.CopyFrom(error) # Mock the API response channel = ChannelStub(responses=[operation]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - cluster = 'cluster872092154' - snapshot_id = 'snapshotId-168585866' - description = 'description-1724546052' + name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + cluster = "cluster872092154" + snapshot_id = "snapshotId-168585866" + description = "description-1724546052" - response = client.snapshot_table(name, cluster, snapshot_id, - description) + response = client.snapshot_table(name, cluster, snapshot_id, description) exception = response.exception() assert exception.errors[0] == error def test_get_snapshot(self): # Setup Expected Response - name_2 = 'name2-1052831874' + name_2 = "name2-1052831874" data_size_bytes = 2110122398 - description = 'description-1724546052' + description = "description-1724546052" expected_response = { - 'name': name_2, - 'data_size_bytes': data_size_bytes, - 'description': description + "name": name_2, + "data_size_bytes": data_size_bytes, + "description": description, } expected_response = table_pb2.Snapshot(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', - '[SNAPSHOT]') + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) response = client.get_snapshot(name) assert expected_response == response assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetSnapshotRequest( - name=name) + expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_get_snapshot_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', - '[SNAPSHOT]') + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) with pytest.raises(CustomException): client.get_snapshot(name) def test_list_snapshots(self): # Setup Expected Response - next_page_token = '' + next_page_token = "" snapshots_element = {} snapshots = [snapshots_element] - expected_response = { - 'next_page_token': next_page_token, - 'snapshots': snapshots - } + expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( - **expected_response) + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") paged_list_response = client.list_snapshots(parent) resources = list(paged_list_response) @@ -583,20 +578,19 @@ def test_list_snapshots(self): assert expected_response.snapshots[0] == resources[0] assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent) + expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_list_snapshots_exception(self): channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") paged_list_response = client.list_snapshots(parent) with pytest.raises(CustomException): @@ -604,34 +598,35 @@ def test_list_snapshots_exception(self): def test_delete_snapshot(self): channel = ChannelStub() - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', - '[SNAPSHOT]') + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) client.delete_snapshot(name) assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest( - name=name) + expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request def test_delete_snapshot_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch('google.api_core.grpc_helpers.create_channel') + patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', - '[SNAPSHOT]') + name = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) with pytest.raises(CustomException): client.delete_snapshot(name) diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py index 17cadc49f789..f7ec0a85511f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -39,25 +39,23 @@ def __init__(self, responses=[]): self.responses = responses self.requests = [] - def unary_unary(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) class TestAppProfile(unittest.TestCase): - PROJECT = 'project' - INSTANCE_ID = 'instance-id' - APP_PROFILE_ID = 'app-profile-id' - APP_PROFILE_NAME = ('projects/{}/instances/{}/appProfiles/{}' - .format(PROJECT, INSTANCE_ID, APP_PROFILE_ID)) - CLUSTER_ID = 'cluster-id' + PROJECT = "project" + INSTANCE_ID = "instance-id" + APP_PROFILE_ID = "app-profile-id" + APP_PROFILE_NAME = "projects/{}/instances/{}/appProfiles/{}".format( + PROJECT, INSTANCE_ID, APP_PROFILE_ID + ) + CLUSTER_ID = "cluster-id" OP_ID = 8765 - OP_NAME = ( - 'operations/projects/{}/instances/{}/appProfiles/{}/operations/{}' - .format(PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID)) + OP_NAME = "operations/projects/{}/instances/{}/appProfiles/{}/operations/{}".format( + PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID + ) @staticmethod def _get_target_class(): @@ -93,22 +91,28 @@ def test_constructor_non_defaults(self): from google.cloud.bigtable.enums import RoutingPolicyType ANY = RoutingPolicyType.ANY - DESCRIPTION_1 = 'routing policy any' - APP_PROFILE_ID_2 = 'app-profile-id-2' + DESCRIPTION_1 = "routing policy any" + APP_PROFILE_ID_2 = "app-profile-id-2" SINGLE = RoutingPolicyType.SINGLE - DESCRIPTION_2 = 'routing policy single' + DESCRIPTION_2 = "routing policy single" ALLOW_WRITES = True client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=ANY, - description=DESCRIPTION_1) - app_profile2 = self._make_one(APP_PROFILE_ID_2, instance, - routing_policy_type=SINGLE, - description=DESCRIPTION_2, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=ALLOW_WRITES) + app_profile1 = self._make_one( + self.APP_PROFILE_ID, + instance, + routing_policy_type=ANY, + description=DESCRIPTION_1, + ) + app_profile2 = self._make_one( + APP_PROFILE_ID_2, + instance, + routing_policy_type=SINGLE, + description=DESCRIPTION_2, + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=ALLOW_WRITES, + ) self.assertEqual(app_profile1.app_profile_id, self.APP_PROFILE_ID) self.assertIs(app_profile1._instance, instance) self.assertEqual(app_profile1.routing_policy_type, ANY) @@ -122,8 +126,9 @@ def test_constructor_non_defaults(self): def test_name_property(self): credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = _Instance(self.INSTANCE_ID, client) app_profile = self._make_one(self.APP_PROFILE_ID, instance) @@ -139,7 +144,7 @@ def test___eq__(self): def test___eq__type_instance_differ(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - alt_instance = _Instance('other-instance', client) + alt_instance = _Instance("other-instance", client) other_object = _Other(self.APP_PROFILE_ID, instance) app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) app_profile2 = self._make_one(self.APP_PROFILE_ID, alt_instance) @@ -156,27 +161,28 @@ def test___ne__same_value(self): def test___ne__(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one('app_profile_id1', instance) - app_profile2 = self._make_one('app_profile_id2', instance) + app_profile1 = self._make_one("app_profile_id1", instance) + app_profile2 = self._make_one("app_profile_id2", instance) self.assertTrue(app_profile1 != app_profile2) def test_from_pb_success_routing_any(self): - from google.cloud.bigtable_admin_v2.types import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - desctiption = 'routing any' + desctiption = "routing any" routing = RoutingPolicyType.ANY multi_cluster_routing_use_any = ( - data_v2_pb2.AppProfile.MultiClusterRoutingUseAny()) + data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() + ) app_profile_pb = data_v2_pb2.AppProfile( name=self.APP_PROFILE_NAME, description=desctiption, - multi_cluster_routing_use_any=multi_cluster_routing_use_any) + multi_cluster_routing_use_any=multi_cluster_routing_use_any, + ) klass = self._get_target_class() app_profile = klass.from_pb(app_profile_pb, instance) @@ -189,25 +195,25 @@ def test_from_pb_success_routing_any(self): self.assertEqual(app_profile.allow_transactional_writes, False) def test_from_pb_success_routing_single(self): - from google.cloud.bigtable_admin_v2.types import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - desctiption = 'routing single' + desctiption = "routing single" allow_transactional_writes = True routing = RoutingPolicyType.SINGLE - single_cluster_routing = ( - data_v2_pb2.AppProfile.SingleClusterRouting( - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_transactional_writes)) + single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=allow_transactional_writes, + ) app_profile_pb = data_v2_pb2.AppProfile( name=self.APP_PROFILE_NAME, description=desctiption, - single_cluster_routing=single_cluster_routing) + single_cluster_routing=single_cluster_routing, + ) klass = self._get_target_class() app_profile = klass.from_pb(app_profile_pb, instance) @@ -217,14 +223,14 @@ def test_from_pb_success_routing_single(self): self.assertEqual(app_profile.description, desctiption) self.assertEqual(app_profile.routing_policy_type, routing) self.assertEqual(app_profile.cluster_id, self.CLUSTER_ID) - self.assertEqual(app_profile.allow_transactional_writes, - allow_transactional_writes) + self.assertEqual( + app_profile.allow_transactional_writes, allow_transactional_writes + ) def test_from_pb_bad_app_profile_name(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - bad_app_profile_name = 'BAD_NAME' + bad_app_profile_name = "BAD_NAME" app_profile_pb = data_v2_pb2.AppProfile(name=bad_app_profile_name) @@ -233,10 +239,9 @@ def test_from_pb_bad_app_profile_name(self): klass.from_pb(app_profile_pb, None) def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(self.PROJECT) instance = _Instance(ALT_INSTANCE_ID, client) self.assertEqual(instance.instance_id, ALT_INSTANCE_ID) @@ -248,10 +253,9 @@ def test_from_pb_instance_id_mistmatch(self): klass.from_pb(app_profile_pb, instance) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - ALT_PROJECT = 'ALT_PROJECT' + ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) instance = _Instance(self.INSTANCE_ID, client) self.assertEqual(client.project, ALT_PROJECT) @@ -263,44 +267,45 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(app_profile_pb, instance) def test_reload_routing_any(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = _Instance(self.INSTANCE_ID, client) routing = RoutingPolicyType.ANY - description = 'routing policy any' + description = "routing policy any" - app_profile = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=routing, - description=description) + app_profile = self._make_one( + self.APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, + ) # Create response_pb - description_from_server = 'routing policy switched to single' + description_from_server = "routing policy switched to single" cluster_id_from_server = self.CLUSTER_ID allow_transactional_writes = True - single_cluster_routing = ( - data_v2_pb2.AppProfile.SingleClusterRouting( - cluster_id=cluster_id_from_server, - allow_transactional_writes=allow_transactional_writes)) + single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( + cluster_id=cluster_id_from_server, + allow_transactional_writes=allow_transactional_writes, + ) response_pb = data_v2_pb2.AppProfile( name=app_profile.name, single_cluster_routing=single_cluster_routing, - description=description_from_server) + description=description_from_server, + ) # Patch the stub used by the API method. client._instance_admin_client = api - instance_stub = ( - client._instance_admin_client.transport) + instance_stub = client._instance_admin_client.transport instance_stub.get_app_profile.side_effect = [response_pb] # Create expected_result. @@ -315,26 +320,25 @@ def test_reload_routing_any(self): # Perform the method and check the result. result = app_profile.reload() self.assertEqual(result, expected_result) - self.assertEqual(app_profile.routing_policy_type, - RoutingPolicyType.SINGLE) + self.assertEqual(app_profile.routing_policy_type, RoutingPolicyType.SINGLE) self.assertEqual(app_profile.description, description_from_server) self.assertEqual(app_profile.cluster_id, cluster_id_from_server) - self.assertEqual(app_profile.allow_transactional_writes, - allow_transactional_writes) + self.assertEqual( + app_profile.allow_transactional_writes, allow_transactional_writes + ) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.api_core import exceptions - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock() + ) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) # Create response_pb @@ -343,16 +347,15 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_stub = ( - client._instance_admin_client.transport) + instance_stub = client._instance_admin_client.transport instance_stub.get_app_profile.side_effect = [ response_pb, - exceptions.NotFound('testing'), - exceptions.BadRequest('testing'), + exceptions.NotFound("testing"), + exceptions.BadRequest("testing"), ] # Perform the method and check the result. - non_existing_app_profile_id = 'other-app-profile-id' + non_existing_app_profile_id = "other-app-profile-id" app_profile = self._make_one(self.APP_PROFILE_ID, instance) alt_app_profile = self._make_one(non_existing_app_profile_id, instance) self.assertTrue(app_profile.exists()) @@ -362,35 +365,40 @@ def test_exists(self): def test_create_routing_any(self): from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) routing = RoutingPolicyType.ANY - description = 'routing policy any' + description = "routing policy any" ignore_warnings = True - app_profile = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=routing, - description=description) + app_profile = self._make_one( + self.APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, + ) expected_request_app_profile = app_profile._to_pb() expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, app_profile_id=self.APP_PROFILE_ID, + parent=instance.name, + app_profile_id=self.APP_PROFILE_ID, app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings + ignore_warnings=ignore_warnings, ) # Patch the stub used by the API method. channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel + ) client._instance_admin_client = instance_api # Perform the method and check the result. result = app_profile.create(ignore_warnings) @@ -407,38 +415,43 @@ def test_create_routing_any(self): def test_create_routing_single(self): from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) routing = RoutingPolicyType.SINGLE - description = 'routing policy single' + description = "routing policy single" allow_writes = False ignore_warnings = True - app_profile = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=routing, - description=description, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_writes) + app_profile = self._make_one( + self.APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=allow_writes, + ) expected_request_app_profile = app_profile._to_pb() expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, app_profile_id=self.APP_PROFILE_ID, + parent=instance.name, + app_profile_id=self.APP_PROFILE_ID, app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings + ignore_warnings=ignore_warnings, ) # Patch the stub used by the API method. channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel + ) client._instance_admin_client = instance_api # Perform the method and check the result. result = app_profile.create(ignore_warnings) @@ -455,11 +468,13 @@ def test_create_routing_single(self): def test_create_app_profile_with_wrong_routing_policy(self): credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=None) + app_profile = self._make_one( + self.APP_PROFILE_ID, instance, routing_policy_type=None + ) with self.assertRaises(ValueError): app_profile.create() @@ -468,55 +483,57 @@ def test_update_app_profile_routing_any(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.protobuf import field_mask_pb2 credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) routing = RoutingPolicyType.SINGLE - description = 'to routing policy single' + description = "to routing policy single" allow_writes = True - app_profile = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=routing, - description=description, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_writes) + app_profile = self._make_one( + self.APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, + cluster_id=self.CLUSTER_ID, + allow_transactional_writes=allow_writes, + ) # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name + ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) + metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) # Patch the stub used by the API method. channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel + ) # Mock api calls client._instance_admin_client = instance_api # Perform the method and check the result. ignore_warnings = True expected_request_update_mask = field_mask_pb2.FieldMask( - paths=['description', 'single_cluster_routing'] + paths=["description", "single_cluster_routing"] ) expected_request = messages_v2_pb2.UpdateAppProfileRequest( app_profile=app_profile._to_pb(), update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings + ignore_warnings=ignore_warnings, ) result = app_profile.update(ignore_warnings=ignore_warnings) @@ -525,58 +542,57 @@ def test_update_app_profile_routing_any(self): self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.UpdateAppProfileMetadata) + self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) def test_update_app_profile_routing_single(self): from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.protobuf import field_mask_pb2 credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) routing = RoutingPolicyType.ANY - app_profile = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=routing) + app_profile = self._make_one( + self.APP_PROFILE_ID, instance, routing_policy_type=routing + ) # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name + ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) + metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) # Patch the stub used by the API method. channel = ChannelStub(responses=[response_pb]) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel)) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + channel=channel + ) # Mock api calls client._instance_admin_client = instance_api # Perform the method and check the result. ignore_warnings = True expected_request_update_mask = field_mask_pb2.FieldMask( - paths=['multi_cluster_routing_use_any'] + paths=["multi_cluster_routing_use_any"] ) expected_request = messages_v2_pb2.UpdateAppProfileRequest( app_profile=app_profile._to_pb(), update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings + ignore_warnings=ignore_warnings, ) result = app_profile.update(ignore_warnings=ignore_warnings) @@ -585,31 +601,32 @@ def test_update_app_profile_routing_single(self): self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.UpdateAppProfileMetadata) + self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) def test_update_app_profile_with_wrong_routing_policy(self): credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one(self.APP_PROFILE_ID, instance, - routing_policy_type=None) + app_profile = self._make_one( + self.APP_PROFILE_ID, instance, routing_policy_type=None + ) with self.assertRaises(ValueError): app_profile.update() def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock() + ) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = client.instance(self.INSTANCE_ID) app_profile = self._make_one(self.APP_PROFILE_ID, instance) @@ -618,8 +635,7 @@ def test_delete(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_stub = ( - client._instance_admin_client.transport) + instance_stub = client._instance_admin_client.transport instance_stub.delete_cluster.side_effect = [response_pb] # Create expected_result. @@ -632,30 +648,25 @@ def test_delete(self): class _Client(object): - def __init__(self, project): self.project = project - self.project_name = 'projects/' + self.project + self.project_name = "projects/" + self.project self._operations_stub = mock.sentinel.operations_stub def __eq__(self, other): - return (other.project == self.project and - other.project_name == self.project_name) + return other.project == self.project and other.project_name == self.project_name class _Instance(object): - def __init__(self, instance_id, client): self.instance_id = instance_id self._client = client def __eq__(self, other): - return (other.instance_id == self.instance_id and - other._client == self._client) + return other.instance_id == self.instance_id and other._client == self._client class _Other(object): - def __init__(self, app_profile_id, instance): self.app_profile_id = app_profile_id self._instance = instance diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py index 4666f28d1776..8760c3a2de2c 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -26,8 +26,8 @@ class TestMutationsBatcher(unittest.TestCase): from grpc import StatusCode - TABLE_ID = 'table-id' - TABLE_NAME = '/tables/' + TABLE_ID + TABLE_ID = "table-id" + TABLE_NAME = "/tables/" + TABLE_ID # RPC Status Codes SUCCESS = StatusCode.OK.value[0] @@ -52,10 +52,11 @@ def _make_client(self, *args, **kwargs): def test_constructor(self): credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) - instance = client.instance(instance_id='instance-id') + instance = client.instance(instance_id="instance-id") table = self._make_table(self.TABLE_ID, instance) mutation_batcher = MutationsBatcher(table) @@ -65,10 +66,12 @@ def test_mutate_row(self): table = _Table(self.TABLE_NAME) mutation_batcher = MutationsBatcher(table=table) - rows = [DirectRow(row_key=b'row_key'), - DirectRow(row_key=b'row_key_2'), - DirectRow(row_key=b'row_key_3'), - DirectRow(row_key=b'row_key_4')] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] mutation_batcher.mutate_rows(rows) mutation_batcher.flush() @@ -79,11 +82,11 @@ def test_mutate_rows(self): table = _Table(self.TABLE_NAME) mutation_batcher = MutationsBatcher(table=table) - row = DirectRow(row_key=b'row_key') - row.set_cell('cf1', b'c1', 1) - row.set_cell('cf1', b'c2', 2) - row.set_cell('cf1', b'c3', 3) - row.set_cell('cf1', b'c4', 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) mutation_batcher.mutate(row) @@ -102,9 +105,9 @@ def test_add_row_with_max_flush_count(self): table = _Table(self.TABLE_NAME) mutation_batcher = MutationsBatcher(table=table, flush_count=3) - row_1 = DirectRow(row_key=b'row_key_1') - row_2 = DirectRow(row_key=b'row_key_2') - row_3 = DirectRow(row_key=b'row_key_3') + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") mutation_batcher.mutate(row_1) mutation_batcher.mutate(row_2) @@ -112,31 +115,31 @@ def test_add_row_with_max_flush_count(self): self.assertEqual(table.mutation_calls, 1) - @mock.patch('google.cloud.bigtable.batcher.MAX_MUTATIONS', new=3) + @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) def test_mutate_row_with_max_mutations_failure(self): from google.cloud.bigtable.batcher import MaxMutationsError table = _Table(self.TABLE_NAME) mutation_batcher = MutationsBatcher(table=table) - row = DirectRow(row_key=b'row_key') - row.set_cell('cf1', b'c1', 1) - row.set_cell('cf1', b'c2', 2) - row.set_cell('cf1', b'c3', 3) - row.set_cell('cf1', b'c4', 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) with self.assertRaises(MaxMutationsError): mutation_batcher.mutate(row) - @mock.patch('google.cloud.bigtable.batcher.MAX_MUTATIONS', new=3) + @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) def test_mutate_row_with_max_mutations(self): table = _Table(self.TABLE_NAME) mutation_batcher = MutationsBatcher(table=table) - row = DirectRow(row_key=b'row_key') - row.set_cell('cf1', b'c1', 1) - row.set_cell('cf1', b'c2', 2) - row.set_cell('cf1', b'c3', 3) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) mutation_batcher.mutate(row) mutation_batcher.flush() @@ -145,16 +148,15 @@ def test_mutate_row_with_max_mutations(self): def test_mutate_row_with_max_row_bytes(self): table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, - max_row_bytes=3 * 1024 * 1024) + mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) number_of_bytes = 1 * 1024 * 1024 - max_value = b'1' * number_of_bytes + max_value = b"1" * number_of_bytes - row = DirectRow(row_key=b'row_key') - row.set_cell('cf1', b'c1', max_value) - row.set_cell('cf1', b'c2', max_value) - row.set_cell('cf1', b'c3', max_value) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) @@ -162,13 +164,11 @@ def test_mutate_row_with_max_row_bytes(self): class _Instance(object): - def __init__(self, client=None): self._client = client class _Table(object): - def __init__(self, name, client=None): self.name = name self._instance = _Instance(client) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index e45c0d44218e..7bcbbd2b3db3 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -21,7 +21,6 @@ class Test__create_gapic_client(unittest.TestCase): - def _invoke_client_factory(self, client_class): from google.cloud.bigtable.client import _create_gapic_client @@ -38,8 +37,8 @@ def test_without_emulator(self): self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - credentials=client._credentials, - client_info=_CLIENT_INFO) + credentials=client._credentials, client_info=_CLIENT_INFO + ) def test_with_emulator(self): from google.cloud.bigtable.client import _CLIENT_INFO @@ -47,21 +46,20 @@ def test_with_emulator(self): client_class = mock.Mock() emulator_host = emulator_channel = object() credentials = _make_credentials() - client = _Client(credentials, emulator_host=emulator_host, - emulator_channel=emulator_channel) + client = _Client( + credentials, emulator_host=emulator_host, emulator_channel=emulator_channel + ) result = self._invoke_client_factory(client_class)(client) self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - channel=client._emulator_channel, - client_info=_CLIENT_INFO) + channel=client._emulator_channel, client_info=_CLIENT_INFO + ) class _Client(object): - - def __init__(self, credentials, emulator_host=None, - emulator_channel=None): + def __init__(self, credentials, emulator_host=None, emulator_channel=None): self._credentials = credentials self._emulator_host = emulator_host self._emulator_channel = emulator_channel @@ -69,10 +67,10 @@ def __init__(self, credentials, emulator_host=None, class TestClient(unittest.TestCase): - PROJECT = 'PROJECT' - INSTANCE_ID = 'instance-id' - DISPLAY_NAME = 'display-name' - USER_AGENT = 'you-sir-age-int' + PROJECT = "PROJECT" + INSTANCE_ID = "instance-id" + DISPLAY_NAME = "display-name" + USER_AGENT = "you-sir-age-int" @staticmethod def _get_target_class(): @@ -88,13 +86,12 @@ def test_constructor_defaults(self): credentials = _make_credentials() - with mock.patch('google.auth.default') as mocked: + with mock.patch("google.auth.default") as mocked: mocked.return_value = credentials, self.PROJECT client = self._make_one() self.assertEqual(client.project, self.PROJECT) - self.assertIs( - client._credentials, credentials.with_scopes.return_value) + self.assertIs(client._credentials, credentials.with_scopes.return_value) self.assertFalse(client._read_only) self.assertFalse(client._admin) self.assertIsNone(client._channel) @@ -121,8 +118,7 @@ def test_constructor_explicit(self): self.assertEqual(len(warned), 1) self.assertEqual(client.project, self.PROJECT) - self.assertIs( - client._credentials, credentials.with_scopes.return_value) + self.assertIs(client._credentials, credentials.with_scopes.return_value) self.assertFalse(client._read_only) self.assertTrue(client._admin) self.assertIs(client._channel, mock.sentinel.channel) @@ -132,20 +128,22 @@ def test_constructor_both_admin_and_read_only(self): credentials = _make_credentials() with self.assertRaises(ValueError): self._make_one( - project=self.PROJECT, credentials=credentials, - admin=True, read_only=True) + project=self.PROJECT, + credentials=credentials, + admin=True, + read_only=True, + ) def test_constructor_with_emulator_host(self): from google.cloud.environment_vars import BIGTABLE_EMULATOR credentials = _make_credentials() emulator_host = "localhost:8081" - with mock.patch('os.getenv') as getenv: + with mock.patch("os.getenv") as getenv: getenv.return_value = emulator_host - with mock.patch('grpc.insecure_channel') as factory: + with mock.patch("grpc.insecure_channel") as factory: getenv.return_value = emulator_host - client = self._make_one( - project=self.PROJECT, credentials=credentials) + client = self._make_one(project=self.PROJECT, credentials=credentials) self.assertEqual(client._emulator_host, emulator_host) self.assertIs(client._emulator_channel, factory.return_value) @@ -155,8 +153,7 @@ def test_constructor_with_emulator_host(self): def test__get_scopes_default(self): from google.cloud.bigtable.client import DATA_SCOPE - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials()) + client = self._make_one(project=self.PROJECT, credentials=_make_credentials()) self.assertEqual(client._get_scopes(), (DATA_SCOPE,)) def test__get_scopes_admin(self): @@ -164,8 +161,8 @@ def test__get_scopes_admin(self): from google.cloud.bigtable.client import DATA_SCOPE client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), - admin=True) + project=self.PROJECT, credentials=_make_credentials(), admin=True + ) expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) self.assertEqual(client._get_scopes(), expected_scopes) @@ -173,16 +170,15 @@ def test__get_scopes_read_only(self): from google.cloud.bigtable.client import READ_ONLY_SCOPE client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), - read_only=True) + project=self.PROJECT, credentials=_make_credentials(), read_only=True + ) self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,)) def test_project_path_property(self): credentials = _make_credentials() - project = 'PROJECT' - client = self._make_one(project=project, credentials=credentials, - admin=True) - project_name = 'projects/' + project + project = "PROJECT" + client = self._make_one(project=project, credentials=credentials, admin=True) + project_name = "projects/" + project self.assertEqual(client.project_path, project_name) def test_table_data_client_not_initialized(self): @@ -197,8 +193,9 @@ def test_table_data_client_not_initialized(self): def test_table_data_client_initialized(self): credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials, - admin=True) + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True + ) already = client._table_data_client = object() self.assertIs(client.table_data_client, already) @@ -215,15 +212,17 @@ def test_table_admin_client_not_initialized_w_admin_flag(self): credentials = _make_credentials() client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True) + project=self.PROJECT, credentials=credentials, admin=True + ) table_admin_client = client.table_admin_client self.assertIsInstance(table_admin_client, BigtableTableAdminClient) def test_table_admin_client_initialized(self): credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials, - admin=True) + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True + ) already = client._table_admin_client = object() self.assertIs(client.table_admin_client, already) @@ -240,16 +239,17 @@ def test_instance_admin_client_not_initialized_w_admin_flag(self): credentials = _make_credentials() client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True) + project=self.PROJECT, credentials=credentials, admin=True + ) instance_admin_client = client.instance_admin_client - self.assertIsInstance( - instance_admin_client, BigtableInstanceAdminClient) + self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) def test_instance_admin_client_initialized(self): credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials, - admin=True) + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True + ) already = client._instance_admin_client = object() self.assertIs(client.instance_admin_client, already) @@ -257,11 +257,10 @@ def test_instance_admin_client_initialized(self): def test_instance_factory_defaults(self): from google.cloud.bigtable.instance import Instance - PROJECT = 'PROJECT' - INSTANCE_ID = 'instance-id' + PROJECT = "PROJECT" + INSTANCE_ID = "instance-id" credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials) + client = self._make_one(project=PROJECT, credentials=credentials) instance = client.instance(INSTANCE_ID) @@ -276,17 +275,20 @@ def test_instance_factory_non_defaults(self): from google.cloud.bigtable.instance import Instance from google.cloud.bigtable import enums - PROJECT = 'PROJECT' - INSTANCE_ID = 'instance-id' - DISPLAY_NAME = 'display-name' + PROJECT = "PROJECT" + INSTANCE_ID = "instance-id" + DISPLAY_NAME = "display-name" instance_type = enums.Instance.Type.DEVELOPMENT - labels = {'foo': 'bar'} + labels = {"foo": "bar"} credentials = _make_credentials() - client = self._make_one( - project=PROJECT, credentials=credentials) + client = self._make_one(project=PROJECT, credentials=credentials) - instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME, - instance_type=instance_type, labels=labels) + instance = client.instance( + INSTANCE_ID, + display_name=DISPLAY_NAME, + instance_type=instance_type, + labels=labels, + ) self.assertIsInstance(instance, Instance) self.assertEqual(instance.instance_id, INSTANCE_ID) @@ -296,49 +298,37 @@ def test_instance_factory_non_defaults(self): self.assertIs(instance._client, client) def test_list_instances(self): + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud.bigtable_admin_v2.gapic import \ - bigtable_instance_admin_client + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.instance import Instance - FAILED_LOCATION = 'FAILED' - INSTANCE_ID1 = 'instance-id1' - INSTANCE_ID2 = 'instance-id2' - INSTANCE_NAME1 = ( - 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1) - INSTANCE_NAME2 = ( - 'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2) + FAILED_LOCATION = "FAILED" + INSTANCE_ID1 = "instance-id1" + INSTANCE_ID2 = "instance-id2" + INSTANCE_NAME1 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID1 + INSTANCE_NAME2 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID2 credentials = _make_credentials() - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) - client = self._make_one(project=self.PROJECT, credentials=credentials, - admin=True) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True + ) # Create response_pb response_pb = messages_v2_pb2.ListInstancesResponse( - failed_locations=[ - FAILED_LOCATION, - ], + failed_locations=[FAILED_LOCATION], instances=[ - data_v2_pb2.Instance( - name=INSTANCE_NAME1, - display_name=INSTANCE_NAME1, - ), - data_v2_pb2.Instance( - name=INSTANCE_NAME2, - display_name=INSTANCE_NAME2, - ), + data_v2_pb2.Instance(name=INSTANCE_NAME1, display_name=INSTANCE_NAME1), + data_v2_pb2.Instance(name=INSTANCE_NAME2, display_name=INSTANCE_NAME2), ], ) # Patch the stub used by the API method. client._instance_admin_client = api - bigtable_instance_stub = ( - client.instance_admin_client.transport) + bigtable_instance_stub = client.instance_admin_client.transport bigtable_instance_stub.list_instances.side_effect = [response_pb] # Perform the method and check the result. @@ -357,58 +347,51 @@ def test_list_instances(self): self.assertEqual(failed_locations, [FAILED_LOCATION]) def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.instance import Cluster - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock() + ) credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials, - admin=True) - - INSTANCE_ID1 = 'instance-id1' - INSTANCE_ID2 = 'instance-id2' - - failed_location = 'FAILED' - cluster_id1 = '{}-cluster'.format(INSTANCE_ID1) - cluster_id2 = '{}-cluster-1'.format(INSTANCE_ID2) - cluster_id3 = '{}-cluster-2'.format(INSTANCE_ID2) - cluster_name1 = (client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID1, cluster_id1)) - cluster_name2 = (client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID2, cluster_id2)) - cluster_name3 = (client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID2, cluster_id3)) + client = self._make_one( + project=self.PROJECT, credentials=credentials, admin=True + ) + + INSTANCE_ID1 = "instance-id1" + INSTANCE_ID2 = "instance-id2" + + failed_location = "FAILED" + cluster_id1 = "{}-cluster".format(INSTANCE_ID1) + cluster_id2 = "{}-cluster-1".format(INSTANCE_ID2) + cluster_id3 = "{}-cluster-2".format(INSTANCE_ID2) + cluster_name1 = client.instance_admin_client.cluster_path( + self.PROJECT, INSTANCE_ID1, cluster_id1 + ) + cluster_name2 = client.instance_admin_client.cluster_path( + self.PROJECT, INSTANCE_ID2, cluster_id2 + ) + cluster_name3 = client.instance_admin_client.cluster_path( + self.PROJECT, INSTANCE_ID2, cluster_id3 + ) # Create response_pb response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[ - failed_location - ], + failed_locations=[failed_location], clusters=[ - data_v2_pb2.Cluster( - name=cluster_name1, - ), - data_v2_pb2.Cluster( - name=cluster_name2, - ), - data_v2_pb2.Cluster( - name=cluster_name3, - ), - + data_v2_pb2.Cluster(name=cluster_name1), + data_v2_pb2.Cluster(name=cluster_name2), + data_v2_pb2.Cluster(name=cluster_name3), ], ) # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_stub = ( - client._instance_admin_client.transport) + instance_stub = client._instance_admin_client.transport instance_stub.list_clusters.side_effect = [response_pb] # Perform the method and check the result. @@ -418,17 +401,14 @@ def test_list_clusters(self): self.assertIsInstance(cluster_1, Cluster) self.assertEqual(cluster_1.name, cluster_name1) - self.assertEqual(cluster_1._instance.instance_id, - INSTANCE_ID1) + self.assertEqual(cluster_1._instance.instance_id, INSTANCE_ID1) self.assertIsInstance(cluster_2, Cluster) self.assertEqual(cluster_2.name, cluster_name2) - self.assertEqual(cluster_2._instance.instance_id, - INSTANCE_ID2) + self.assertEqual(cluster_2._instance.instance_id, INSTANCE_ID2) self.assertIsInstance(cluster_3, Cluster) self.assertEqual(cluster_3.name, cluster_name3) - self.assertEqual(cluster_3._instance.instance_id, - INSTANCE_ID2) + self.assertEqual(cluster_3._instance.instance_id, INSTANCE_ID2) self.assertEqual(failed_locations, [failed_location]) diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 9ee8b36540b4..9a0d39c84977 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -40,28 +40,26 @@ def __init__(self, responses=[]): self.responses = responses self.requests = [] - def unary_unary(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) class TestCluster(unittest.TestCase): - PROJECT = 'project' - INSTANCE_ID = 'instance-id' - LOCATION_ID = 'location-id' - CLUSTER_ID = 'cluster-id' - LOCATION_ID = 'location-id' - CLUSTER_NAME = ('projects/' + PROJECT + - '/instances/' + INSTANCE_ID + - '/clusters/' + CLUSTER_ID) - LOCATION_PATH = 'projects/' + PROJECT + '/locations/' + PROJECT = "project" + INSTANCE_ID = "instance-id" + LOCATION_ID = "location-id" + CLUSTER_ID = "cluster-id" + LOCATION_ID = "location-id" + CLUSTER_NAME = ( + "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/clusters/" + CLUSTER_ID + ) + LOCATION_PATH = "projects/" + PROJECT + "/locations/" SERVE_NODES = 5 OP_ID = 5678 - OP_NAME = ('operations/projects/{}/instances/{}/clusters/{}/operations/{}' - .format(PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID)) + OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format( + PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID + ) @staticmethod def _get_target_class(): @@ -96,16 +94,20 @@ def test_constructor_defaults(self): def test_constructor_non_default(self): from google.cloud.bigtable.enums import StorageType from google.cloud.bigtable.enums import Cluster + STATE = Cluster.State.READY STORAGE_TYPE_SSD = StorageType.SSD client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, - location_id=self.LOCATION_ID, - _state=STATE, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD) + cluster = self._make_one( + self.CLUSTER_ID, + instance, + location_id=self.LOCATION_ID, + _state=STATE, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + ) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) self.assertEqual(cluster.location_id, self.LOCATION_ID) @@ -115,16 +117,16 @@ def test_constructor_non_default(self): def test_name_property(self): credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = _Instance(self.INSTANCE_ID, client) cluster = self._make_one(self.CLUSTER_ID, instance) self.assertEqual(cluster.name, self.CLUSTER_NAME) def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable import enums client = _Client(self.PROJECT) @@ -138,7 +140,7 @@ def test_from_pb_success(self): location=location, state=state, serve_nodes=self.SERVE_NODES, - default_storage_type=storage_type + default_storage_type=storage_type, ) klass = self._get_target_class() @@ -152,10 +154,9 @@ def test_from_pb_success(self): self.assertEqual(cluster.default_storage_type, storage_type) def test_from_pb_bad_cluster_name(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - bad_cluster_name = 'BAD_NAME' + bad_cluster_name = "BAD_NAME" cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name) @@ -164,10 +165,9 @@ def test_from_pb_bad_cluster_name(self): klass.from_pb(cluster_pb, None) def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - ALT_INSTANCE_ID = 'ALT_INSTANCE_ID' + ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(self.PROJECT) instance = _Instance(ALT_INSTANCE_ID, client) @@ -179,10 +179,9 @@ def test_from_pb_instance_id_mistmatch(self): klass.from_pb(cluster_pb, instance) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - ALT_PROJECT = 'ALT_PROJECT' + ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) instance = _Instance(self.INSTANCE_ID, client) @@ -212,38 +211,39 @@ def test___ne__same_value(self): instance = _Instance(self.INSTANCE_ID, client) cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - comparison_val = (cluster1 != cluster2) + comparison_val = cluster1 != cluster2 self.assertFalse(comparison_val) def test___ne__(self): client = _Client(self.PROJECT) instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one('cluster_id1', instance, self.LOCATION_ID) - cluster2 = self._make_one('cluster_id2', instance, self.LOCATION_ID) + cluster1 = self._make_one("cluster_id1", instance, self.LOCATION_ID) + cluster2 = self._make_one("cluster_id2", instance, self.LOCATION_ID) self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.enums import StorageType from google.cloud.bigtable.enums import Cluster - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) STORAGE_TYPE_SSD = StorageType.SSD instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD) + cluster = self._make_one( + self.CLUSTER_ID, + instance, + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + ) # Create response_pb - LOCATION_ID_FROM_SERVER = 'new-location-id' + LOCATION_ID_FROM_SERVER = "new-location-id" STATE = Cluster.State.READY SERVE_NODES_FROM_SERVER = 10 STORAGE_TYPE_FROM_SERVER = StorageType.HDD @@ -253,7 +253,7 @@ def test_reload(self): location=self.LOCATION_PATH + LOCATION_ID_FROM_SERVER, state=STATE, serve_nodes=SERVE_NODES_FROM_SERVER, - default_storage_type=STORAGE_TYPE_FROM_SERVER + default_storage_type=STORAGE_TYPE_FROM_SERVER, ) # Patch the stub used by the API method. @@ -277,28 +277,27 @@ def test_reload(self): self.assertEqual(cluster.location_id, LOCATION_ID_FROM_SERVER) self.assertEqual(cluster.state, STATE) self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER) - self.assertEqual(cluster.default_storage_type, - STORAGE_TYPE_FROM_SERVER) + self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.instance import Instance from google.api_core import exceptions - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock() + ) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = Instance(self.INSTANCE_ID, client) # Create response_pb cluster_name = client.instance_admin_client.cluster_path( - self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID) + self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID + ) response_pb = data_v2_pb2.Cluster(name=cluster_name) # Patch the stub used by the API method. @@ -307,12 +306,12 @@ def test_exists(self): instance_stub = instance_admin_client.transport instance_stub.get_cluster.side_effect = [ response_pb, - exceptions.NotFound('testing'), - exceptions.BadRequest('testing') + exceptions.NotFound("testing"), + exceptions.BadRequest("testing"), ] # Perform the method and check the result. - non_existing_cluster_id = 'cluster-id-2' + non_existing_cluster_id = "cluster-id-2" alt_cluster_1 = self._make_one(self.CLUSTER_ID, instance) alt_cluster_2 = self._make_one(non_existing_cluster_id, instance) self.assertTrue(alt_cluster_1.exists()) @@ -326,51 +325,58 @@ def test_create(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable.instance import Instance from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2) + bigtable_instance_admin_pb2 as instance_v2_pb2, + ) from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) STORAGE_TYPE_SSD = StorageType.SSD LOCATION = self.LOCATION_PATH + self.LOCATION_ID instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD) + cluster = self._make_one( + self.CLUSTER_ID, + instance, + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + ) expected_request_cluster = instance_pb2.Cluster( location=LOCATION, serve_nodes=cluster.serve_nodes, - default_storage_type=cluster.default_storage_type) + default_storage_type=cluster.default_storage_type, + ) expected_request = instance_v2_pb2.CreateClusterRequest( - parent=instance.name, cluster_id=self.CLUSTER_ID, - cluster=expected_request_cluster) + parent=instance.name, + cluster_id=self.CLUSTER_ID, + cluster=expected_request_cluster, + ) metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.CreateClusterMetadata.DESCRIPTOR.full_name) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateClusterMetadata.DESCRIPTOR.full_name + ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString() - ) + metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) # Patch the stub used by the API method. channel = ChannelStub(responses=[response_pb]) api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel) + channel=channel + ) client._instance_admin_client = api # Perform the method and check the result. @@ -380,8 +386,7 @@ def test_create(self): self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.CreateClusterMetadata) + self.assertIsInstance(result.metadata, messages_v2_pb2.CreateClusterMetadata) def test_update(self): import datetime @@ -390,44 +395,47 @@ def test_update(self): from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) STORAGE_TYPE_SSD = StorageType.SSD instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD) + cluster = self._make_one( + self.CLUSTER_ID, + instance, + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + ) # Create expected_request expected_request = instance_pb2.Cluster( - name=cluster.name, - serve_nodes=self.SERVE_NODES) + name=cluster.name, serve_nodes=self.SERVE_NODES + ) metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name + ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) + metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) # Patch the stub used by the API method. channel = ChannelStub(responses=[response_pb]) api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel) + channel=channel + ) client._instance_admin_client = api # Perform the method and check the result. @@ -437,22 +445,19 @@ def test_update(self): self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, operation.Operation) self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, - messages_v2_pb2.UpdateClusterMetadata) + self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, - self.LOCATION_ID) + cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) # Create response_pb response_pb = empty_pb2.Empty() @@ -473,23 +478,19 @@ def test_delete(self): class _Instance(object): - def __init__(self, instance_id, client): self.instance_id = instance_id self._client = client def __eq__(self, other): - return (other.instance_id == self.instance_id and - other._client == self._client) + return other.instance_id == self.instance_id and other._client == self._client class _Client(object): - def __init__(self, project): self.project = project - self.project_name = 'projects/' + self.project + self.project_name = "projects/" + self.project self._operations_stub = mock.sentinel.operations_stub def __eq__(self, other): - return (other.project == self.project and - other.project_name == self.project_name) + return other.project == self.project and other.project_name == self.project_name diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index 140504072f25..d6f6c2672047 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -20,7 +20,6 @@ class TestMaxVersionsGCRule(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import MaxVersionsGCRule @@ -43,7 +42,7 @@ def test___eq__same_value(self): def test___ne__same_value(self): gc_rule1 = self._make_one(99) gc_rule2 = self._make_one(99) - comparison_val = (gc_rule1 != gc_rule2) + comparison_val = gc_rule1 != gc_rule2 self.assertFalse(comparison_val) def test_to_pb(self): @@ -55,7 +54,6 @@ def test_to_pb(self): class TestMaxAgeGCRule(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import MaxAgeGCRule @@ -81,7 +79,7 @@ def test___ne__same_value(self): max_age = object() gc_rule1 = self._make_one(max_age=max_age) gc_rule2 = self._make_one(max_age=max_age) - comparison_val = (gc_rule1 != gc_rule2) + comparison_val = gc_rule1 != gc_rule2 self.assertFalse(comparison_val) def test_to_pb(self): @@ -96,7 +94,6 @@ def test_to_pb(self): class TestGCRuleUnion(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleUnion @@ -127,7 +124,7 @@ def test___ne__same_value(self): rules = object() gc_rule1 = self._make_one(rules) gc_rule2 = self._make_one(rules) - comparison_val = (gc_rule1 != gc_rule2) + comparison_val = gc_rule1 != gc_rule2 self.assertFalse(comparison_val) def test_to_pb(self): @@ -142,12 +139,10 @@ def test_to_pb(self): max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB( - max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB( - union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) + pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) gc_rule_pb = rule3.to_pb() self.assertEqual(gc_rule_pb, pb_rule3) @@ -164,27 +159,23 @@ def test_to_pb_nested(self): max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB( - max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB( - union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) + pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) rule5 = self._make_one(rules=[rule3, rule4]) - pb_rule5 = _GcRulePB( - union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) + pb_rule5 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) gc_rule_pb = rule5.to_pb() self.assertEqual(gc_rule_pb, pb_rule5) class TestGCRuleIntersection(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import GCRuleIntersection @@ -215,7 +206,7 @@ def test___ne__same_value(self): rules = object() gc_rule1 = self._make_one(rules) gc_rule2 = self._make_one(rules) - comparison_val = (gc_rule1 != gc_rule2) + comparison_val = gc_rule1 != gc_rule2 self.assertFalse(comparison_val) def test_to_pb(self): @@ -230,13 +221,12 @@ def test_to_pb(self): max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB( - max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) rule3 = self._make_one(rules=[rule1, rule2]) pb_rule3 = _GcRulePB( - intersection=_GcRuleIntersectionPB( - rules=[pb_rule1, pb_rule2])) + intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]) + ) gc_rule_pb = rule3.to_pb() self.assertEqual(gc_rule_pb, pb_rule3) @@ -253,13 +243,12 @@ def test_to_pb_nested(self): max_age = datetime.timedelta(seconds=1) rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB( - max_age=duration_pb2.Duration(seconds=1)) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) rule3 = self._make_one(rules=[rule1, rule2]) pb_rule3 = _GcRulePB( - intersection=_GcRuleIntersectionPB( - rules=[pb_rule1, pb_rule2])) + intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]) + ) max_num_versions2 = 1337 rule4 = MaxVersionsGCRule(max_num_versions2) @@ -267,15 +256,14 @@ def test_to_pb_nested(self): rule5 = self._make_one(rules=[rule3, rule4]) pb_rule5 = _GcRulePB( - intersection=_GcRuleIntersectionPB( - rules=[pb_rule3, pb_rule4])) + intersection=_GcRuleIntersectionPB(rules=[pb_rule3, pb_rule4]) + ) gc_rule_pb = rule5.to_pb() self.assertEqual(gc_rule_pb, pb_rule5) class TestColumnFamily(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.column_family import ColumnFamily @@ -295,58 +283,53 @@ def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor(self): - column_family_id = u'column-family-id' + column_family_id = u"column-family-id" table = object() gc_rule = object() - column_family = self._make_one( - column_family_id, table, gc_rule=gc_rule) + column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) self.assertEqual(column_family.column_family_id, column_family_id) self.assertIs(column_family._table, table) self.assertIs(column_family.gc_rule, gc_rule) def test_name_property(self): - column_family_id = u'column-family-id' - table_name = 'table_name' + column_family_id = u"column-family-id" + table_name = "table_name" table = _Table(table_name) column_family = self._make_one(column_family_id, table) - expected_name = table_name + '/columnFamilies/' + column_family_id + expected_name = table_name + "/columnFamilies/" + column_family_id self.assertEqual(column_family.name, expected_name) def test___eq__(self): - column_family_id = 'column_family_id' + column_family_id = "column_family_id" table = object() gc_rule = object() - column_family1 = self._make_one(column_family_id, table, - gc_rule=gc_rule) - column_family2 = self._make_one(column_family_id, table, - gc_rule=gc_rule) + column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) + column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) self.assertEqual(column_family1, column_family2) def test___eq__type_differ(self): - column_family1 = self._make_one('column_family_id', None) + column_family1 = self._make_one("column_family_id", None) column_family2 = object() self.assertNotEqual(column_family1, column_family2) def test___ne__same_value(self): - column_family_id = 'column_family_id' + column_family_id = "column_family_id" table = object() gc_rule = object() - column_family1 = self._make_one(column_family_id, table, - gc_rule=gc_rule) - column_family2 = self._make_one(column_family_id, table, - gc_rule=gc_rule) - comparison_val = (column_family1 != column_family2) + column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) + column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) + comparison_val = column_family1 != column_family2 self.assertFalse(comparison_val) def test___ne__(self): - column_family1 = self._make_one('column_family_id1', None) - column_family2 = self._make_one('column_family_id2', None) + column_family1 = self._make_one("column_family_id1", None) + column_family2 = self._make_one("column_family_id2", None) self.assertNotEqual(column_family1, column_family2) def test_to_pb_no_rules(self): - column_family = self._make_one('column_family_id', None) + column_family = self._make_one("column_family_id", None) pb_val = column_family.to_pb() expected = _ColumnFamilyPB() self.assertEqual(pb_val, expected) @@ -355,46 +338,49 @@ def test_to_pb_with_rule(self): from google.cloud.bigtable.column_family import MaxVersionsGCRule gc_rule = MaxVersionsGCRule(1) - column_family = self._make_one('column_family_id', None, - gc_rule=gc_rule) + column_family = self._make_one("column_family_id", None, gc_rule=gc_rule) pb_val = column_family.to_pb() expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) self.assertEqual(pb_val, expected) def _create_test_helper(self, gc_rule=None): from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) + bigtable_table_admin_pb2 as table_admin_v2_pb2, + ) from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - column_family_id = 'column-family-id' - table_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + + project_id = "project-id" + zone = "zone" + cluster_id = "cluster-id" + table_id = "table-id" + column_family_id = "column-family-id" + table_name = ( + "projects/" + + project_id + + "/zones/" + + zone + + "/clusters/" + + cluster_id + + "/tables/" + + table_id + ) api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=project_id, - credentials=credentials, admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(table_name, client=client) - column_family = self._make_one( - column_family_id, table, gc_rule=gc_rule) + column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) # Create request_pb if gc_rule is None: column_family_pb = _ColumnFamilyPB() else: column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( - name=table_name) - request_pb.modifications.add( - id=column_family_id, - create=column_family_pb, - ) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) + request_pb.modifications.add(id=column_family_id, create=column_family_pb) # Create response_pb response_pb = _ColumnFamilyPB() @@ -424,37 +410,41 @@ def test_create_with_gc_rule(self): def _update_test_helper(self, gc_rule=None): from tests.unit._testing import _FakeStub from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - column_family_id = 'column-family-id' - table_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) + bigtable_table_admin_pb2 as table_admin_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + + project_id = "project-id" + zone = "zone" + cluster_id = "cluster-id" + table_id = "table-id" + column_family_id = "column-family-id" + table_name = ( + "projects/" + + project_id + + "/zones/" + + zone + + "/clusters/" + + cluster_id + + "/tables/" + + table_id + ) api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=project_id, - credentials=credentials, admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(table_name, client=client) - column_family = self._make_one( - column_family_id, table, gc_rule=gc_rule) + column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) # Create request_pb if gc_rule is None: column_family_pb = _ColumnFamilyPB() else: column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( - name=table_name) - request_pb.modifications.add( - id=column_family_id, - update=column_family_pb, - ) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) + request_pb.modifications.add(id=column_family_id, update=column_family_pb) # Create response_pb response_pb = _ColumnFamilyPB() @@ -484,32 +474,38 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2) + bigtable_table_admin_pb2 as table_admin_v2_pb2, + ) from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) - - project_id = 'project-id' - zone = 'zone' - cluster_id = 'cluster-id' - table_id = 'table-id' - column_family_id = 'column-family-id' - table_name = ('projects/' + project_id + '/zones/' + zone + - '/clusters/' + cluster_id + '/tables/' + table_id) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + + project_id = "project-id" + zone = "zone" + cluster_id = "cluster-id" + table_id = "table-id" + column_family_id = "column-family-id" + table_name = ( + "projects/" + + project_id + + "/zones/" + + zone + + "/clusters/" + + cluster_id + + "/tables/" + + table_id + ) api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=project_id, - credentials=credentials, admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(table_name, client=client) column_family = self._make_one(column_family_id, table) # Create request_pb - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( - name=table_name) - request_pb.modifications.add( - id=column_family_id, - drop=True) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) + request_pb.modifications.add(id=column_family_id, drop=True) # Create response_pb response_pb = empty_pb2.Empty() @@ -529,7 +525,6 @@ def test_delete(self): class Test__gc_rule_from_pb(unittest.TestCase): - def _call_fut(self, *args, **kwargs): from google.cloud.bigtable.column_family import _gc_rule_from_pb @@ -595,43 +590,38 @@ class MockProto(object): @classmethod def WhichOneof(cls, name): cls.names.append(name) - return 'unknown' + return "unknown" self.assertEqual(MockProto.names, []) self.assertRaises(ValueError, self._call_fut, MockProto) - self.assertEqual(MockProto.names, ['rule']) + self.assertEqual(MockProto.names, ["rule"]) def _GcRulePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.GcRule(*args, **kw) def _GcRuleIntersectionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.GcRule.Intersection(*args, **kw) def _GcRuleUnionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.GcRule.Union(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.ColumnFamily(*args, **kw) class _Instance(object): - def __init__(self, client=None): self._client = client @@ -641,7 +631,6 @@ class _Client(object): class _Table(object): - def __init__(self, name, client=None): self.name = name self._instance = _Instance(client) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index cf902df5ca5f..6ac1d242c626 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -23,21 +23,22 @@ class TestInstance(unittest.TestCase): - PROJECT = 'project' - INSTANCE_ID = 'instance-id' - INSTANCE_NAME = 'projects/' + PROJECT + '/instances/' + INSTANCE_ID - LOCATION_ID = 'locid' - LOCATION = 'projects/' + PROJECT + '/locations/' + LOCATION_ID + PROJECT = "project" + INSTANCE_ID = "instance-id" + INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID + LOCATION_ID = "locid" + LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID APP_PROFILE_PATH = ( - 'projects/' + PROJECT + '/instances/' + INSTANCE_ID - + '/appProfiles/') - DISPLAY_NAME = 'display_name' - LABELS = {'foo': 'bar'} + "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/" + ) + DISPLAY_NAME = "display_name" + LABELS = {"foo": "bar"} OP_ID = 8915 - OP_NAME = ('operations/projects/{}/instances/{}operations/{}' - .format(PROJECT, INSTANCE_ID, OP_ID)) - TABLE_ID = 'table_id' - TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID + OP_NAME = "operations/projects/{}/instances/{}operations/{}".format( + PROJECT, INSTANCE_ID, OP_ID + ) + TABLE_ID = "table_id" + TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID @staticmethod def _get_target_class(): @@ -73,13 +74,17 @@ def test_constructor_non_default(self): instance_type = enums.Instance.Type.DEVELOPMENT state = enums.Instance.State.READY - labels = {'test': 'test'} + labels = {"test": "test"} client = object() - instance = self._make_one(self.INSTANCE_ID, client, - display_name=self.DISPLAY_NAME, - instance_type=instance_type, - labels=labels, _state=state) + instance = self._make_one( + self.INSTANCE_ID, + client, + display_name=self.DISPLAY_NAME, + instance_type=instance_type, + labels=labels, + _state=state, + ) self.assertEqual(instance.instance_id, self.INSTANCE_ID) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, instance_type) @@ -88,8 +93,7 @@ def test_constructor_non_default(self): self.assertEqual(instance.state, state) def test__update_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable import enums instance_type = enums.Instance.Type.PRODUCTION @@ -98,7 +102,7 @@ def test__update_from_pb_success(self): display_name=self.DISPLAY_NAME, type=instance_type, labels=self.LABELS, - state=state + state=state, ) instance = self._make_one(None, None) @@ -112,13 +116,10 @@ def test__update_from_pb_success(self): self.assertEqual(instance._state, state) def test__update_from_pb_success_defaults(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable import enums - instance_pb = data_v2_pb2.Instance( - display_name=self.DISPLAY_NAME, - ) + instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME) instance = self._make_one(None, None) self.assertIsNone(instance.display_name) @@ -126,13 +127,11 @@ def test__update_from_pb_success_defaults(self): self.assertIsNone(instance.labels) instance._update_from_pb(instance_pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.type_, - enums.Instance.Type.UNSPECIFIED) + self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED) self.assertFalse(instance.labels) def test__update_from_pb_no_display_name(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 instance_pb = data_v2_pb2.Instance() instance = self._make_one(None, None) @@ -141,13 +140,13 @@ def test__update_from_pb_no_display_name(self): instance._update_from_pb(instance_pb) def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable import enums credentials = _make_credentials() client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True) + project=self.PROJECT, credentials=credentials, admin=True + ) instance_type = enums.Instance.Type.PRODUCTION state = enums.Instance.State.READY instance_pb = data_v2_pb2.Instance( @@ -155,7 +154,7 @@ def test_from_pb_success(self): display_name=self.INSTANCE_ID, type=instance_type, labels=self.LABELS, - state=state + state=state, ) klass = self._get_target_class() @@ -169,10 +168,9 @@ def test_from_pb_success(self): self.assertEqual(instance._state, state) def test_from_pb_bad_instance_name(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - instance_name = 'INCORRECT_FORMAT' + instance_name = "INCORRECT_FORMAT" instance_pb = data_v2_pb2.Instance(name=instance_name) klass = self._get_target_class() @@ -180,13 +178,13 @@ def test_from_pb_bad_instance_name(self): klass.from_pb(instance_pb, None) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - ALT_PROJECT = 'ALT_PROJECT' + ALT_PROJECT = "ALT_PROJECT" credentials = _make_credentials() client = self._make_client( - project=ALT_PROJECT, credentials=credentials, admin=True) + project=ALT_PROJECT, credentials=credentials, admin=True + ) self.assertNotEqual(self.PROJECT, ALT_PROJECT) @@ -197,14 +195,13 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(instance_pb, client) def test_name_property(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) # Patch the the API method. client._instance_admin_client = api @@ -228,34 +225,31 @@ def test___ne__same_value(self): client = object() instance1 = self._make_one(self.INSTANCE_ID, client) instance2 = self._make_one(self.INSTANCE_ID, client) - comparison_val = (instance1 != instance2) + comparison_val = instance1 != instance2 self.assertFalse(comparison_val) def test___ne__(self): - instance1 = self._make_one('instance_id1', 'client1') - instance2 = self._make_one('instance_id2', 'client2') + instance1 = self._make_one("instance_id1", "client1") + instance2 = self._make_one("instance_id2", "client2") self.assertNotEqual(instance1, instance2) def test_create_check_location_and_clusters(self): instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): - instance.create( - location_id=self.LOCATION_ID, clusters=[object(), object()]) + instance.create(location_id=self.LOCATION_ID, clusters=[object(), object()]) def test_create_check_serve_nodes_and_clusters(self): instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): - instance.create( - serve_nodes=3, clusters=[object(), object()]) + instance.create(serve_nodes=3, clusters=[object(), object()]) def test_create_check_default_storage_type_and_clusters(self): instance = self._make_one(self.INSTANCE_ID, None) with self.assertRaises(ValueError): - instance.create( - default_storage_type=1, clusters=[object(), object()]) + instance.create(default_storage_type=1, clusters=[object(), object()]) def _instance_api_response_for_create(self): import datetime @@ -263,23 +257,21 @@ def _instance_api_response_for_create(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud.bigtable_admin_v2.types import instance_pb2 NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name + ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) + metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) response = operation.from_gapic( response_pb, @@ -287,10 +279,11 @@ def _instance_api_response_for_create(self): instance_pb2.Instance, metadata_type=messages_v2_pb2.CreateInstanceMetadata, ) - project_path_template = 'projects/{}' - location_path_template = 'projects/{}/locations/{}' + project_path_template = "projects/{}" + location_path_template = "projects/{}/locations/{}" instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) instance_api.create_instance.return_value = response instance_api.project_path = project_path_template.format instance_api.location_path = location_path_template.format @@ -302,7 +295,8 @@ def test_create(self): credentials = _make_credentials() client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True) + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one( self.INSTANCE_ID, client, @@ -314,14 +308,10 @@ def test_create(self): client._instance_admin_client = instance_api serve_nodes = 3 - result = instance.create( - location_id=self.LOCATION_ID, - serve_nodes=serve_nodes, - ) + result = instance.create(location_id=self.LOCATION_ID, serve_nodes=serve_nodes) cluster_pb = instance_pb2.Cluster( - location=instance_api.location_path( - self.PROJECT, self.LOCATION_ID), + location=instance_api.location_path(self.PROJECT, self.LOCATION_ID), serve_nodes=serve_nodes, default_storage_type=enums.StorageType.UNSPECIFIED, ) @@ -330,7 +320,7 @@ def test_create(self): type=enums.Instance.Type.PRODUCTION, labels=self.LABELS, ) - cluster_id = '{}-cluster'.format(self.INSTANCE_ID) + cluster_id = "{}-cluster".format(self.INSTANCE_ID) instance_api.create_instance.assert_called_once_with( parent=instance_api.project_path(self.PROJECT), instance_id=self.INSTANCE_ID, @@ -346,7 +336,8 @@ def test_create_w_clusters(self): credentials = _make_credentials() client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True) + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one( self.INSTANCE_ID, client, @@ -358,31 +349,36 @@ def test_create_w_clusters(self): client._instance_admin_client = instance_api # Perform the method and check the result. - cluster_id_1 = 'cluster-1' - cluster_id_2 = 'cluster-2' - location_id_1 = 'location-id-1' - location_id_2 = 'location-id-2' + cluster_id_1 = "cluster-1" + cluster_id_2 = "cluster-2" + location_id_1 = "location-id-1" + location_id_2 = "location-id-2" serve_nodes_1 = 3 serve_nodes_2 = 5 clusters = [ - Cluster(cluster_id_1, instance, - location_id=location_id_1, - serve_nodes=serve_nodes_1), - Cluster(cluster_id_2, instance, - location_id=location_id_2, - serve_nodes=serve_nodes_2)] + Cluster( + cluster_id_1, + instance, + location_id=location_id_1, + serve_nodes=serve_nodes_1, + ), + Cluster( + cluster_id_2, + instance, + location_id=location_id_2, + serve_nodes=serve_nodes_2, + ), + ] result = instance.create(clusters=clusters) cluster_pb_1 = instance_pb2.Cluster( - location=instance_api.location_path( - self.PROJECT, location_id_1), + location=instance_api.location_path(self.PROJECT, location_id_1), serve_nodes=serve_nodes_1, default_storage_type=enums.StorageType.UNSPECIFIED, ) cluster_pb_2 = instance_pb2.Cluster( - location=instance_api.location_path( - self.PROJECT, location_id_2), + location=instance_api.location_path(self.PROJECT, location_id_2), serve_nodes=serve_nodes_2, default_storage_type=enums.StorageType.UNSPECIFIED, ) @@ -395,31 +391,26 @@ def test_create_w_clusters(self): parent=instance_api.project_path(self.PROJECT), instance_id=self.INSTANCE_ID, instance=instance_pb, - clusters={ - cluster_id_1: cluster_pb_1, - cluster_id_2: cluster_pb_2, - }, + clusters={cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, ) self.assertIs(result, response) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.api_core import exceptions - api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) # Create response_pb instance_name = client.instance_admin_client.instance_path( - self.PROJECT, self.INSTANCE_ID) + self.PROJECT, self.INSTANCE_ID + ) response_pb = data_v2_pb2.Instance(name=instance_name) # Patch the stub used by the API method. @@ -428,12 +419,12 @@ def test_exists(self): instance_stub = instance_admin_client.transport instance_stub.get_instance.side_effect = [ response_pb, - exceptions.NotFound('testing'), - exceptions.BadRequest('testing') + exceptions.NotFound("testing"), + exceptions.BadRequest("testing"), ] # Perform the method and check the result. - non_existing_instance_id = 'instance-id-2' + non_existing_instance_id = "instance-id-2" alt_instance_1 = self._make_one(self.INSTANCE_ID, client) alt_instance_2 = self._make_one(non_existing_instance_id, client) self.assertTrue(alt_instance_1.exists()) @@ -443,32 +434,27 @@ def test_exists(self): alt_instance_2.exists() def test_reload(self): - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable import enums - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock()) + api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb - DISPLAY_NAME = u'hey-hi-hello' + DISPLAY_NAME = u"hey-hi-hello" instance_type = enums.Instance.Type.PRODUCTION response_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, - type=instance_type, - labels=self.LABELS + display_name=DISPLAY_NAME, type=instance_type, labels=self.LABELS ) # Patch the stub used by the API method. client._instance_admin_client = api - bigtable_instance_stub = ( - client._instance_admin_client.transport) + bigtable_instance_stub = client._instance_admin_client.transport bigtable_instance_stub.get_instance.side_effect = [response_pb] # Create expected_result. @@ -490,23 +476,21 @@ def _instance_api_response_for_update(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) from google.cloud.bigtable_admin_v2.types import instance_pb2 NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) - type_url = 'type.googleapis.com/{}'.format( - messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name + ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any( - type_url=type_url, - value=metadata.SerializeToString(), - ) + metadata=Any(type_url=type_url, value=metadata.SerializeToString()), ) response = operation.from_gapic( response_pb, @@ -514,9 +498,10 @@ def _instance_api_response_for_update(self): instance_pb2.Instance, metadata_type=messages_v2_pb2.UpdateInstanceMetadata, ) - instance_path_template = 'projects/{project}/instances/{instance}' + instance_path_template = "projects/{project}/instances/{instance}" instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) instance_api.partial_update_instance.return_value = response instance_api.instance_path = instance_path_template.format return instance_api, response @@ -528,7 +513,8 @@ def test_update(self): credentials = _make_credentials() client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True) + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one( self.INSTANCE_ID, client, @@ -548,11 +534,11 @@ def test_update(self): labels=instance.labels, ) update_mask_pb = field_mask_pb2.FieldMask( - paths=['display_name', 'type', 'labels']) + paths=["display_name", "type", "labels"] + ) instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, - update_mask=update_mask_pb, + instance=instance_pb, update_mask=update_mask_pb ) self.assertIs(result, response) @@ -563,7 +549,8 @@ def test_update_empty(self): credentials = _make_credentials() client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True) + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(None, client) instance_api, response = self._instance_api_response_for_update() client._instance_admin_client = instance_api @@ -579,64 +566,59 @@ def test_update_empty(self): update_mask_pb = field_mask_pb2.FieldMask() instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, - update_mask=update_mask_pb, + instance=instance_pb, update_mask=update_mask_pb ) self.assertIs(result, response) def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(self.INSTANCE_ID, client) instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) instance_api.delete_instance.return_value = None client._instance_admin_client = instance_api result = instance.delete() - instance_api.delete_instance.assert_called_once_with( - instance.name) + instance_api.delete_instance.assert_called_once_with(instance.name) self.assertIsNone(result) def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(self.INSTANCE_ID, client) version = 1 - etag = b'etag_v1' - members = [ - 'serviceAccount:service_acc1@test.com', - 'user:user1@test.com', - ] - bindings = [{'role': BIGTABLE_ADMIN_ROLE, 'members': members}] - iam_policy = policy_pb2.Policy( - version=version, etag=etag, bindings=bindings) + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) client._instance_admin_client = instance_api instance_api.get_iam_policy.return_value = iam_policy # Perform the method and check the result. result = instance.get_iam_policy() - instance_api.get_iam_policy.assert_called_once_with( - resource=instance.name) + instance_api.get_iam_policy.assert_called_once_with(resource=instance.name) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins @@ -645,30 +627,27 @@ def test_get_iam_policy(self): self.assertEqual(found, expected) def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(self.INSTANCE_ID, client) version = 1 - etag = b'etag_v1' - members = [ - 'serviceAccount:service_acc1@test.com', - 'user:user1@test.com', - ] - bindings = [{'role': BIGTABLE_ADMIN_ROLE, 'members': members}] - iam_policy_pb = policy_pb2.Policy( - version=version, etag=etag, bindings=bindings) + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) instance_api.set_iam_policy.return_value = iam_policy_pb client._instance_admin_client = instance_api @@ -683,11 +662,7 @@ def test_set_iam_policy(self): instance_api.set_iam_policy.assert_called_once_with( resource=instance.name, - policy={ - 'version': version, - 'etag': etag, - 'bindings': bindings, - }, + policy={"version": version, "etag": etag, "bindings": bindings}, ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -697,22 +672,22 @@ def test_set_iam_policy(self): self.assertEqual(found, expected) def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(self.INSTANCE_ID, client) permissions = ["bigtable.tables.create", "bigtable.clusters.create"] - response = iam_policy_pb2.TestIamPermissionsResponse( - permissions=permissions) + response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) instance_api.test_iam_permissions.return_value = response client._instance_admin_client = instance_api @@ -720,21 +695,25 @@ def test_test_iam_permissions(self): self.assertEqual(result, permissions) instance_api.test_iam_permissions.assert_called_once_with( - resource=instance.name, permissions=permissions) + resource=instance.name, permissions=permissions + ) def test_cluster_factory(self): from google.cloud.bigtable import enums - CLUSTER_ID = '{}-cluster'.format(self.INSTANCE_ID) - LOCATION_ID = 'us-central1-c' + CLUSTER_ID = "{}-cluster".format(self.INSTANCE_ID) + LOCATION_ID = "us-central1-c" SERVE_NODES = 3 STORAGE_TYPE = enums.StorageType.HDD instance = self._make_one(self.INSTANCE_ID, None) - cluster = instance.cluster(CLUSTER_ID, location_id=LOCATION_ID, - serve_nodes=SERVE_NODES, - default_storage_type=STORAGE_TYPE) + cluster = instance.cluster( + CLUSTER_ID, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE, + ) self.assertIsInstance(cluster, Cluster) self.assertEqual(cluster.cluster_id, CLUSTER_ID) self.assertEqual(cluster.location_id, LOCATION_ID) @@ -743,47 +722,44 @@ def test_cluster_factory(self): self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + bigtable_instance_admin_pb2 as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.instance import Cluster credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = Instance(self.INSTANCE_ID, client) - failed_location = 'FAILED' - cluster_id1 = 'cluster-id1' - cluster_id2 = 'cluster-id2' - cluster_path_template = 'projects/{}/instances/{}/clusters/{}' + failed_location = "FAILED" + cluster_id1 = "cluster-id1" + cluster_id2 = "cluster-id2" + cluster_path_template = "projects/{}/instances/{}/clusters/{}" cluster_name1 = cluster_path_template.format( - self.PROJECT, self.INSTANCE_ID, cluster_id1) + self.PROJECT, self.INSTANCE_ID, cluster_id1 + ) cluster_name2 = cluster_path_template.format( - self.PROJECT, self.INSTANCE_ID, cluster_id2) + self.PROJECT, self.INSTANCE_ID, cluster_id2 + ) # Create response_pb response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[ - failed_location - ], + failed_locations=[failed_location], clusters=[ - data_v2_pb2.Cluster( - name=cluster_name1, - ), - data_v2_pb2.Cluster( - name=cluster_name2, - ), + data_v2_pb2.Cluster(name=cluster_name1), + data_v2_pb2.Cluster(name=cluster_name2), ], ) # Patch the stub used by the API method. instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) instance_api.list_clusters.side_effect = [response_pb] instance_api.cluster_path = cluster_path_template.format client._instance_admin_client = instance_api @@ -804,7 +780,7 @@ def test_list_clusters(self): def test_table_factory(self): from google.cloud.bigtable.table import Table - app_profile_id = 'appProfileId1262094415' + app_profile_id = "appProfileId1262094415" instance = self._make_one(self.INSTANCE_ID, None) table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id) @@ -814,21 +790,23 @@ def test_table_factory(self): self.assertEqual(table._app_profile_id, app_profile_id) def _list_tables_helper(self, table_name=None): + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_data_v2_pb2) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2) + bigtable_table_admin_pb2 as table_messages_v1_pb2, + ) from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client, bigtable_instance_admin_client) + bigtable_table_admin_client, + bigtable_instance_admin_client, + ) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock() + ) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(self.INSTANCE_ID, client) # Create response_pb @@ -836,16 +814,13 @@ def _list_tables_helper(self, table_name=None): table_name = self.TABLE_NAME response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[ - table_data_v2_pb2.Table(name=table_name), - ], + tables=[table_data_v2_pb2.Table(name=table_name)] ) # Patch the stub used by the API method. client._table_admin_client = table_api client._instance_admin_client = instance_api - bigtable_table_stub = ( - client._table_admin_client.transport) + bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.list_tables.side_effect = [response_pb] # Create expected_result. @@ -862,34 +837,37 @@ def test_list_tables(self): def test_list_tables_failure_bad_split(self): with self.assertRaises(ValueError): - self._list_tables_helper(table_name='wrong-format') + self._list_tables_helper(table_name="wrong-format") def test_list_tables_failure_name_bad_before(self): - BAD_TABLE_NAME = ('nonempty-section-before' + - 'projects/' + self.PROJECT + - '/instances/' + self.INSTANCE_ID + - '/tables/' + self.TABLE_ID) + BAD_TABLE_NAME = ( + "nonempty-section-before" + + "projects/" + + self.PROJECT + + "/instances/" + + self.INSTANCE_ID + + "/tables/" + + self.TABLE_ID + ) with self.assertRaises(ValueError): self._list_tables_helper(table_name=BAD_TABLE_NAME) def test_app_profile_factory(self): from google.cloud.bigtable.enums import RoutingPolicyType - APP_PROFILE_ID_1 = 'app-profile-id-1' + APP_PROFILE_ID_1 = "app-profile-id-1" ANY = RoutingPolicyType.ANY - DESCRIPTION_1 = 'routing policy any' - APP_PROFILE_ID_2 = 'app-profile-id-2' + DESCRIPTION_1 = "routing policy any" + APP_PROFILE_ID_2 = "app-profile-id-2" SINGLE = RoutingPolicyType.SINGLE - DESCRIPTION_2 = 'routing policy single' + DESCRIPTION_2 = "routing policy single" ALLOW_WRITES = True - CLUSTER_ID = 'cluster-id' + CLUSTER_ID = "cluster-id" instance = self._make_one(self.INSTANCE_ID, None) app_profile1 = instance.app_profile( - APP_PROFILE_ID_1, - routing_policy_type=ANY, - description=DESCRIPTION_1, + APP_PROFILE_ID_1, routing_policy_type=ANY, description=DESCRIPTION_1 ) app_profile2 = instance.app_profile( @@ -913,14 +891,11 @@ def test_app_profile_factory(self): def test_list_app_profiles(self): from google.api_core.page_iterator import Iterator from google.api_core.page_iterator import Page - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client) - from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as data_v2_pb2) + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 from google.cloud.bigtable.app_profile import AppProfile class _Iterator(Iterator): - def __init__(self, pages): super(_Iterator, self).__init__(client=None) self._pages = pages @@ -931,35 +906,37 @@ def _next_page(self): return Page(self, page, self.item_to_value) credentials = _make_credentials() - client = self._make_client(project=self.PROJECT, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) instance = self._make_one(self.INSTANCE_ID, client) # Setup Expected Response - app_profile_path_template = 'projects/{}/instances/{}/appProfiles/{}' - app_profile_id1 = 'app-profile-id1' - app_profile_id2 = 'app-profile-id2' + app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}" + app_profile_id1 = "app-profile-id1" + app_profile_id2 = "app-profile-id2" app_profile_name1 = app_profile_path_template.format( - self.PROJECT, self.INSTANCE_ID, app_profile_id1) + self.PROJECT, self.INSTANCE_ID, app_profile_id1 + ) app_profile_name2 = app_profile_path_template.format( - self.PROJECT, self.INSTANCE_ID, app_profile_id2) + self.PROJECT, self.INSTANCE_ID, app_profile_id2 + ) routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() app_profiles = [ data_v2_pb2.AppProfile( - name=app_profile_name1, - multi_cluster_routing_use_any=routing_policy, + name=app_profile_name1, multi_cluster_routing_use_any=routing_policy ), data_v2_pb2.AppProfile( - name=app_profile_name2, - multi_cluster_routing_use_any=routing_policy, - ) + name=app_profile_name2, multi_cluster_routing_use_any=routing_policy + ), ] iterator = _Iterator(pages=[app_profiles]) # Patch the stub used by the API method. instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient) + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) client._instance_admin_client = instance_api instance_api.app_profile_path = app_profile_path_template.format instance_api.list_app_profiles.return_value = iterator diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py index b0ffe6afed36..49eb015e078e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_policy.py +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -16,7 +16,6 @@ class TestPolicy(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.policy import Policy @@ -40,7 +39,7 @@ def test_ctor_defaults(self): def test_ctor_explicit(self): VERSION = 17 - ETAG = b'ETAG' + ETAG = b"ETAG" empty = frozenset() policy = self._make_one(ETAG, VERSION) self.assertEqual(policy.etag, ETAG) @@ -54,7 +53,8 @@ def test_ctor_explicit(self): def test_bigtable_admins_getter(self): from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - MEMBER = 'user:phred@example.com' + + MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) policy = self._make_one() policy[BIGTABLE_ADMIN_ROLE] = [MEMBER] @@ -62,7 +62,8 @@ def test_bigtable_admins_getter(self): def test_bigtable_readers_getter(self): from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE - MEMBER = 'user:phred@example.com' + + MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) policy = self._make_one() policy[BIGTABLE_READER_ROLE] = [MEMBER] @@ -70,7 +71,8 @@ def test_bigtable_readers_getter(self): def test_bigtable_users_getter(self): from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE - MEMBER = 'user:phred@example.com' + + MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) policy = self._make_one() policy[BIGTABLE_USER_ROLE] = [MEMBER] @@ -78,7 +80,8 @@ def test_bigtable_users_getter(self): def test_bigtable_viewers_getter(self): from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE - MEMBER = 'user:phred@example.com' + + MEMBER = "user:phred@example.com" expected = frozenset([MEMBER]) policy = self._make_one() policy[BIGTABLE_VIEWER_ROLE] = [MEMBER] diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index f4b94f9d2f8a..b4aaefb862f8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -21,7 +21,6 @@ class TestRow(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row import Row @@ -32,16 +31,15 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_row_key_getter(self): - row = self._make_one(row_key=b'row_key', table='table') - self.assertEqual(b'row_key', row.row_key) + row = self._make_one(row_key=b"row_key", table="table") + self.assertEqual(b"row_key", row.row_key) def test_row_table_getter(self): - row = self._make_one(row_key=b'row_key', table='table') - self.assertEqual('table', row.table) + row = self._make_one(row_key=b"row_key", table="table") + self.assertEqual("table", row.table) class Test_SetDeleteRow(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row import _SetDeleteRow @@ -52,13 +50,12 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test__get_mutations_virtual(self): - row = self._make_one(b'row-key', None) + row = self._make_one(b"row-key", None) with self.assertRaises(NotImplementedError): row._get_mutations(None) class TestDirectRow(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row import DirectRow @@ -78,7 +75,7 @@ def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor(self): - row_key = b'row_key' + row_key = b"row_key" table = object() row = self._make_one(row_key, table) @@ -87,8 +84,8 @@ def test_constructor(self): self.assertEqual(row._pb_mutations, []) def test_constructor_with_unicode(self): - row_key = u'row_key' - row_key_bytes = b'row_key' + row_key = u"row_key" + row_key_bytes = b"row_key" table = object() row = self._make_one(row_key, table) @@ -101,22 +98,22 @@ def test_constructor_with_non_bytes(self): self._make_one(row_key, None) def test__get_mutations(self): - row_key = b'row_key' + row_key = b"row_key" row = self._make_one(row_key, None) row._pb_mutations = mutations = object() self.assertIs(mutations, row._get_mutations(None)) def test_get_mutations_size(self): - row_key = b'row_key' + row_key = b"row_key" row = self._make_one(row_key, None) - column_family_id1 = u'column_family_id1' - column_family_id2 = u'column_family_id2' - column1 = b'column1' - column2 = b'column2' + column_family_id1 = u"column_family_id1" + column_family_id2 = u"column_family_id2" + column1 = b"column1" + column2 = b"column2" number_of_bytes = 1 * 1024 * 1024 - value = b'1' * number_of_bytes + value = b"1" * number_of_bytes row.set_cell(column_family_id1, column1, value) row.set_cell(column_family_id2, column2, value) @@ -127,31 +124,35 @@ def test_get_mutations_size(self): self.assertEqual(row.get_mutations_size(), total_mutations_size) - def _set_cell_helper(self, column=None, column_bytes=None, - value=b'foobar', timestamp=None, - timestamp_micros=-1): + def _set_cell_helper( + self, + column=None, + column_bytes=None, + value=b"foobar", + timestamp=None, + timestamp_micros=-1, + ): import six import struct - row_key = b'row_key' - column_family_id = u'column_family_id' + row_key = b"row_key" + column_family_id = u"column_family_id" if column is None: - column = b'column' + column = b"column" table = object() row = self._make_one(row_key, table) self.assertEqual(row._pb_mutations, []) - row.set_cell(column_family_id, column, - value, timestamp=timestamp) + row.set_cell(column_family_id, column, value, timestamp=timestamp) if isinstance(value, six.integer_types): - value = struct.pack('>q', value) + value = struct.pack(">q", value) expected_pb = _MutationPB( set_cell=_MutationSetCellPB( family_name=column_family_id, column_qualifier=column_bytes or column, timestamp_micros=timestamp_micros, value=value, - ), + ) ) self.assertEqual(row._pb_mutations, [expected_pb]) @@ -159,19 +160,18 @@ def test_set_cell(self): self._set_cell_helper() def test_set_cell_with_string_column(self): - column_bytes = b'column' - column_non_bytes = u'column' - self._set_cell_helper(column=column_non_bytes, - column_bytes=column_bytes) + column_bytes = b"column" + column_non_bytes = u"column" + self._set_cell_helper(column=column_non_bytes, column_bytes=column_bytes) def test_set_cell_with_integer_value(self): value = 1337 self._set_cell_helper(value=value) def test_set_cell_with_non_bytes_value(self): - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" table = object() row = self._make_one(row_key, table) @@ -186,25 +186,21 @@ def test_set_cell_with_non_null_timestamp(self): microseconds = 898294371 millis_granularity = microseconds - (microseconds % 1000) timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds) - self._set_cell_helper(timestamp=timestamp, - timestamp_micros=millis_granularity) + self._set_cell_helper(timestamp=timestamp, timestamp_micros=millis_granularity) def test_delete(self): - row_key = b'row_key' + row_key = b"row_key" row = self._make_one(row_key, object()) self.assertEqual(row._pb_mutations, []) row.delete() - expected_pb = _MutationPB( - delete_from_row=_MutationDeleteFromRowPB(), - ) + expected_pb = _MutationPB(delete_from_row=_MutationDeleteFromRowPB()) self.assertEqual(row._pb_mutations, [expected_pb]) def test_delete_cell(self): klass = self._get_target_class() class MockRow(klass): - def __init__(self, *args, **kwargs): super(MockRow, self).__init__(*args, **kwargs) self._args = [] @@ -215,9 +211,9 @@ def _delete_cells(self, *args, **kwargs): self._args.append(args) self._kwargs.append(kwargs) - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" table = object() mock_row = MockRow(row_key, table) @@ -231,14 +227,11 @@ def _delete_cells(self, *args, **kwargs): mock_row.delete_cell(column_family_id, column, time_range=time_range) self.assertEqual(mock_row._pb_mutations, []) self.assertEqual(mock_row._args, [(column_family_id, [column])]) - self.assertEqual(mock_row._kwargs, [{ - 'state': None, - 'time_range': time_range, - }]) + self.assertEqual(mock_row._kwargs, [{"state": None, "time_range": time_range}]) def test_delete_cells_non_iterable(self): - row_key = b'row_key' - column_family_id = u'column_family_id' + row_key = b"row_key" + column_family_id = u"column_family_id" table = object() row = self._make_one(row_key, table) @@ -247,8 +240,8 @@ def test_delete_cells_non_iterable(self): row.delete_cells(column_family_id, columns) def test_delete_cells_all_columns(self): - row_key = b'row_key' - column_family_id = u'column_family_id' + row_key = b"row_key" + column_family_id = u"column_family_id" table = object() row = self._make_one(row_key, table) @@ -257,15 +250,13 @@ def test_delete_cells_all_columns(self): row.delete_cells(column_family_id, klass.ALL_COLUMNS) expected_pb = _MutationPB( - delete_from_family=_MutationDeleteFromFamilyPB( - family_name=column_family_id, - ), + delete_from_family=_MutationDeleteFromFamilyPB(family_name=column_family_id) ) self.assertEqual(row._pb_mutations, [expected_pb]) def test_delete_cells_no_columns(self): - row_key = b'row_key' - column_family_id = u'column_family_id' + row_key = b"row_key" + column_family_id = u"column_family_id" table = object() row = self._make_one(row_key, table) @@ -275,9 +266,9 @@ def test_delete_cells_no_columns(self): self.assertEqual(row._pb_mutations, []) def _delete_cells_helper(self, time_range=None): - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" table = object() row = self._make_one(row_key, table) @@ -287,13 +278,11 @@ def _delete_cells_helper(self, time_range=None): expected_pb = _MutationPB( delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, - column_qualifier=column, - ), + family_name=column_family_id, column_qualifier=column + ) ) if time_range is not None: - expected_pb.delete_from_column.time_range.CopyFrom( - time_range.to_pb()) + expected_pb.delete_from_column.time_range.CopyFrom(time_range.to_pb()) self.assertEqual(row._pb_mutations, [expected_pb]) def test_delete_cells_no_time_range(self): @@ -312,9 +301,9 @@ def test_delete_cells_with_time_range(self): def test_delete_cells_with_bad_column(self): # This makes sure a failure on one of the columns doesn't leave # the row's mutations in a bad state. - row_key = b'row_key' - column = b'column' - column_family_id = u'column_family_id' + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" table = object() row = self._make_one(row_key, table) @@ -325,12 +314,12 @@ def test_delete_cells_with_bad_column(self): self.assertEqual(row._pb_mutations, []) def test_delete_cells_with_string_columns(self): - row_key = b'row_key' - column_family_id = u'column_family_id' - column1 = u'column1' - column1_bytes = b'column1' - column2 = u'column2' - column2_bytes = b'column2' + row_key = b"row_key" + column_family_id = u"column_family_id" + column1 = u"column1" + column1_bytes = b"column1" + column2 = u"column2" + column2_bytes = b"column2" table = object() row = self._make_one(row_key, table) @@ -340,31 +329,30 @@ def test_delete_cells_with_string_columns(self): expected_pb1 = _MutationPB( delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, - column_qualifier=column1_bytes, - ), + family_name=column_family_id, column_qualifier=column1_bytes + ) ) expected_pb2 = _MutationPB( delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, - column_qualifier=column2_bytes, - ), + family_name=column_family_id, column_qualifier=column2_bytes + ) ) self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2]) def test_commit(self): - project_id = 'project-id' - row_key = b'row_key' - table_name = 'projects/more-stuff' - column_family_id = u'column_family_id' - column = b'column' + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + column_family_id = u"column_family_id" + column = b"column" credentials = _make_credentials() - client = self._make_client(project=project_id, - credentials=credentials, admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(table_name, client=client) row = self._make_one(row_key, table) - value = b'bytes-value' + value = b"bytes-value" # Perform the method and check the result. row.set_cell(column_family_id, column, value) @@ -373,7 +361,6 @@ def test_commit(self): class TestConditionalRow(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row import ConditionalRow @@ -393,7 +380,7 @@ def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor(self): - row_key = b'row_key' + row_key = b"row_key" table = object() filter_ = object() @@ -405,7 +392,7 @@ def test_constructor(self): self.assertEqual(row._false_pb_mutations, []) def test__get_mutations(self): - row_key = b'row_key' + row_key = b"row_key" filter_ = object() row = self._make_one(row_key, None, filter_=filter_) @@ -419,30 +406,30 @@ def test_commit(self): from google.cloud.bigtable.row_filters import RowSampleFilter from google.cloud.bigtable_v2.gapic import bigtable_client - project_id = 'project-id' - row_key = b'row_key' - table_name = 'projects/more-stuff' - column_family_id1 = u'column_family_id1' - column_family_id2 = u'column_family_id2' - column_family_id3 = u'column_family_id3' - column1 = b'column1' - column2 = b'column2' + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + column_family_id1 = u"column_family_id1" + column_family_id2 = u"column_family_id2" + column_family_id3 = u"column_family_id3" + column1 = b"column1" + column2 = b"column2" api = bigtable_client.BigtableClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=project_id, credentials=credentials, - admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(table_name, client=client) row_filter = RowSampleFilter(0.33) row = self._make_one(row_key, table, filter_=row_filter) # Create request_pb - value1 = b'bytes-value' + value1 = b"bytes-value" # Create response_pb predicate_matched = True - response_pb = _CheckAndMutateRowResponsePB( - predicate_matched=predicate_matched) + response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched) # Patch the stub used by the API method. api.transport.check_and_mutate_row.side_effect = [response_pb] @@ -465,7 +452,7 @@ def test_commit_too_many_mutations(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import row as MUT - row_key = b'row_key' + row_key = b"row_key" table = object() filter_ = object() row = self._make_one(row_key, table, filter_=filter_) @@ -478,12 +465,13 @@ def test_commit_too_many_mutations(self): def test_commit_no_mutations(self): from tests.unit._testing import _FakeStub - project_id = 'project-id' - row_key = b'row_key' + project_id = "project-id" + row_key = b"row_key" credentials = _make_credentials() - client = self._make_client(project=project_id, credentials=credentials, - admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(None, client=client) filter_ = object() row = self._make_one(row_key, table, filter_=filter_) @@ -501,7 +489,6 @@ def test_commit_no_mutations(self): class TestAppendRow(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row import AppendRow @@ -521,7 +508,7 @@ def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) def test_constructor(self): - row_key = b'row_key' + row_key = b"row_key" table = object() row = self._make_one(row_key, table) @@ -530,7 +517,7 @@ def test_constructor(self): self.assertEqual(row._rule_pb_list, []) def test_clear(self): - row_key = b'row_key' + row_key = b"row_key" table = object() row = self._make_one(row_key, table) row._rule_pb_list = [1, 2, 3] @@ -539,32 +526,34 @@ def test_clear(self): def test_append_cell_value(self): table = object() - row_key = b'row_key' + row_key = b"row_key" row = self._make_one(row_key, table) self.assertEqual(row._rule_pb_list, []) - column = b'column' - column_family_id = u'column_family_id' - value = b'bytes-val' + column = b"column" + column_family_id = u"column_family_id" + value = b"bytes-val" row.append_cell_value(column_family_id, column, value) expected_pb = _ReadModifyWriteRulePB( - family_name=column_family_id, column_qualifier=column, - append_value=value) + family_name=column_family_id, column_qualifier=column, append_value=value + ) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_increment_cell_value(self): table = object() - row_key = b'row_key' + row_key = b"row_key" row = self._make_one(row_key, table) self.assertEqual(row._rule_pb_list, []) - column = b'column' - column_family_id = u'column_family_id' + column = b"column" + column_family_id = u"column_family_id" int_value = 281330 row.increment_cell_value(column_family_id, column, int_value) expected_pb = _ReadModifyWriteRulePB( - family_name=column_family_id, column_qualifier=column, - increment_amount=int_value) + family_name=column_family_id, + column_qualifier=column, + increment_amount=int_value, + ) self.assertEqual(row._rule_pb_list, [expected_pb]) def test_commit(self): @@ -572,21 +561,22 @@ def test_commit(self): from google.cloud.bigtable import row as MUT from google.cloud.bigtable_v2.gapic import bigtable_client - project_id = 'project-id' - row_key = b'row_key' - table_name = 'projects/more-stuff' - column_family_id = u'column_family_id' - column = b'column' + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + column_family_id = u"column_family_id" + column = b"column" api = bigtable_client.BigtableClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project=project_id, credentials=credentials, - admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(table_name, client=client) row = self._make_one(row_key, table) # Create request_pb - value = b'bytes-value' + value = b"bytes-value" # Create expected_result. row_responses = [] @@ -610,12 +600,13 @@ def mock_parse_rmw_row_response(row_response): def test_commit_no_rules(self): from tests.unit._testing import _FakeStub - project_id = 'project-id' - row_key = b'row_key' + project_id = "project-id" + row_key = b"row_key" credentials = _make_credentials() - client = self._make_client(project=project_id, credentials=credentials, - admin=True) + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) table = _Table(None, client=client) row = self._make_one(row_key, table) self.assertEqual(row._rule_pb_list, []) @@ -633,7 +624,7 @@ def test_commit_too_many_mutations(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import row as MUT - row_key = b'row_key' + row_key = b"row_key" table = object() row = self._make_one(row_key, table) row._rule_pb_list = [1, 2, 3] @@ -644,7 +635,6 @@ def test_commit_too_many_mutations(self): class Test__parse_rmw_row_response(unittest.TestCase): - def _call_fut(self, row_response): from google.cloud.bigtable.row import _parse_rmw_row_response @@ -653,33 +643,24 @@ def _call_fut(self, row_response): def test_it(self): from google.cloud._helpers import _datetime_from_microseconds - col_fam1 = u'col-fam-id' - col_fam2 = u'col-fam-id2' - col_name1 = b'col-name1' - col_name2 = b'col-name2' - col_name3 = b'col-name3-but-other-fam' - cell_val1 = b'cell-val' - cell_val2 = b'cell-val-newer' - cell_val3 = b'altcol-cell-val' - cell_val4 = b'foo' + col_fam1 = u"col-fam-id" + col_fam2 = u"col-fam-id2" + col_name1 = b"col-name1" + col_name2 = b"col-name2" + col_name3 = b"col-name3-but-other-fam" + cell_val1 = b"cell-val" + cell_val2 = b"cell-val-newer" + cell_val3 = b"altcol-cell-val" + cell_val4 = b"foo" microseconds = 1000871 timestamp = _datetime_from_microseconds(microseconds) expected_output = { col_fam1: { - col_name1: [ - (cell_val1, timestamp), - (cell_val2, timestamp), - ], - col_name2: [ - (cell_val3, timestamp), - ], - }, - col_fam2: { - col_name3: [ - (cell_val4, timestamp), - ], + col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)], + col_name2: [(cell_val3, timestamp)], }, + col_fam2: {col_name3: [(cell_val4, timestamp)]}, } response_row = _RowPB( families=[ @@ -689,23 +670,14 @@ def test_it(self): _ColumnPB( qualifier=col_name1, cells=[ - _CellPB( - value=cell_val1, - timestamp_micros=microseconds, - ), - _CellPB( - value=cell_val2, - timestamp_micros=microseconds, - ), + _CellPB(value=cell_val1, timestamp_micros=microseconds), + _CellPB(value=cell_val2, timestamp_micros=microseconds), ], ), _ColumnPB( qualifier=col_name2, cells=[ - _CellPB( - value=cell_val3, - timestamp_micros=microseconds, - ), + _CellPB(value=cell_val3, timestamp_micros=microseconds) ], ), ], @@ -716,22 +688,18 @@ def test_it(self): _ColumnPB( qualifier=col_name3, cells=[ - _CellPB( - value=cell_val4, - timestamp_micros=microseconds, - ), + _CellPB(value=cell_val4, timestamp_micros=microseconds) ], - ), + ) ], ), - ], + ] ) sample_input = _ReadModifyWriteRowResponsePB(row=response_row) self.assertEqual(expected_output, self._call_fut(sample_input)) class Test__parse_family_pb(unittest.TestCase): - def _call_fut(self, family_pb): from google.cloud.bigtable.row import _parse_family_pb @@ -740,23 +708,18 @@ def _call_fut(self, family_pb): def test_it(self): from google.cloud._helpers import _datetime_from_microseconds - col_fam1 = u'col-fam-id' - col_name1 = b'col-name1' - col_name2 = b'col-name2' - cell_val1 = b'cell-val' - cell_val2 = b'cell-val-newer' - cell_val3 = b'altcol-cell-val' + col_fam1 = u"col-fam-id" + col_name1 = b"col-name1" + col_name2 = b"col-name2" + cell_val1 = b"cell-val" + cell_val2 = b"cell-val-newer" + cell_val3 = b"altcol-cell-val" microseconds = 5554441037 timestamp = _datetime_from_microseconds(microseconds) expected_dict = { - col_name1: [ - (cell_val1, timestamp), - (cell_val2, timestamp), - ], - col_name2: [ - (cell_val3, timestamp), - ], + col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)], + col_name2: [(cell_val3, timestamp)], } expected_output = (col_fam1, expected_dict) sample_input = _FamilyPB( @@ -765,24 +728,13 @@ def test_it(self): _ColumnPB( qualifier=col_name1, cells=[ - _CellPB( - value=cell_val1, - timestamp_micros=microseconds, - ), - _CellPB( - value=cell_val2, - timestamp_micros=microseconds, - ), + _CellPB(value=cell_val1, timestamp_micros=microseconds), + _CellPB(value=cell_val2, timestamp_micros=microseconds), ], ), _ColumnPB( qualifier=col_name2, - cells=[ - _CellPB( - value=cell_val3, - timestamp_micros=microseconds, - ), - ], + cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)], ), ], ) @@ -790,97 +742,83 @@ def test_it(self): def _CheckAndMutateRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) def _ReadModifyWriteRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) def _CellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Cell(*args, **kw) def _ColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Column(*args, **kw) def _FamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Family(*args, **kw) def _MutationPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Mutation(*args, **kw) def _MutationSetCellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Mutation.SetCell(*args, **kw) def _MutationDeleteFromColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) def _MutationDeleteFromFamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) def _MutationDeleteFromRowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) def _RowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.Row(*args, **kw) def _ReadModifyWriteRulePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.ReadModifyWriteRule(*args, **kw) class _Instance(object): - def __init__(self, client=None): self._client = client class _Table(object): - def __init__(self, name, client=None): self.name = name self._instance = _Instance(client) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index cde6f7d21cbf..4aeb9e7b58da 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -19,8 +19,7 @@ from google.api_core.exceptions import DeadlineExceeded from ._testing import _make_credentials from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) +from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 class MultiCallableStub(object): @@ -43,16 +42,10 @@ def __init__(self, responses=[]): self.responses = responses self.requests = [] - def unary_unary(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_unary(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) - def unary_stream(self, - method, - request_serializer=None, - response_deserializer=None): + def unary_stream(self, method, request_serializer=None, response_deserializer=None): return MultiCallableStub(method, self) @@ -71,22 +64,20 @@ def _make_one(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from google.cloud._helpers import _EPOCH - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 timestamp_micros = TestCell.timestamp_micros timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) - value = b'value-bytes' + value = b"value-bytes" if labels is None: - cell_pb = data_v2_pb2.Cell( - value=value, timestamp_micros=timestamp_micros) + cell_pb = data_v2_pb2.Cell(value=value, timestamp_micros=timestamp_micros) cell_expected = self._make_one(value, timestamp_micros) else: cell_pb = data_v2_pb2.Cell( - value=value, timestamp_micros=timestamp_micros, labels=labels) - cell_expected = self._make_one( - value, timestamp_micros, labels=labels) + value=value, timestamp_micros=timestamp_micros, labels=labels + ) + cell_expected = self._make_one(value, timestamp_micros, labels=labels) klass = self._get_target_class() result = klass.from_pb(cell_pb) @@ -97,7 +88,7 @@ def test_from_pb(self): self._from_pb_test_helper() def test_from_pb_with_labels(self): - labels = [u'label1', u'label2'] + labels = [u"label1", u"label2"] self._from_pb_test_helper(labels) def test_constructor(self): @@ -120,19 +111,18 @@ def test___ne__same_value(self): value = object() cell1 = self._make_one(value, TestCell.timestamp_micros) cell2 = self._make_one(value, TestCell.timestamp_micros) - comparison_val = (cell1 != cell2) + comparison_val = cell1 != cell2 self.assertFalse(comparison_val) def test___ne__(self): - value1 = 'value1' - value2 = 'value2' + value1 = "value1" + value2 = "value2" cell1 = self._make_one(value1, TestCell.timestamp_micros) cell2 = self._make_one(value2, TestCell.timestamp_micros) self.assertNotEqual(cell1, cell2) class TestPartialRowData(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_data import PartialRowData @@ -163,7 +153,7 @@ def test___ne__same_value(self): row_key = object() partial_row_data1 = self._make_one(row_key) partial_row_data2 = self._make_one(row_key) - comparison_val = (partial_row_data1 != partial_row_data2) + comparison_val = partial_row_data1 != partial_row_data2 self.assertFalse(comparison_val) def test___ne__(self): @@ -185,64 +175,51 @@ def test_to_dict(self): cell2 = object() cell3 = object() - family_name1 = u'name1' - family_name2 = u'name2' - qual1 = b'col1' - qual2 = b'col2' - qual3 = b'col3' + family_name1 = u"name1" + family_name2 = u"name2" + qual1 = b"col1" + qual2 = b"col2" + qual3 = b"col3" partial_row_data = self._make_one(None) partial_row_data._cells = { - family_name1: { - qual1: cell1, - qual2: cell2, - }, - family_name2: { - qual3: cell3, - }, + family_name1: {qual1: cell1, qual2: cell2}, + family_name2: {qual3: cell3}, } result = partial_row_data.to_dict() expected_result = { - b'name1:col1': cell1, - b'name1:col2': cell2, - b'name2:col3': cell3, + b"name1:col1": cell1, + b"name1:col2": cell2, + b"name2:col3": cell3, } self.assertEqual(result, expected_result) def test_cell_value(self): - family_name = u'name1' - qualifier = b'col1' - cell = _make_cell(b'value-bytes') + family_name = u"name1" + qualifier = b"col1" + cell = _make_cell(b"value-bytes") partial_row_data = self._make_one(None) - partial_row_data._cells = { - family_name: { - qualifier: [cell], - }, - } + partial_row_data._cells = {family_name: {qualifier: [cell]}} result = partial_row_data.cell_value(family_name, qualifier) self.assertEqual(result, cell.value) def test_cell_value_invalid_index(self): - family_name = u'name1' - qualifier = b'col1' - cell = _make_cell(b'') + family_name = u"name1" + qualifier = b"col1" + cell = _make_cell(b"") partial_row_data = self._make_one(None) - partial_row_data._cells = { - family_name: { - qualifier: [cell], - }, - } + partial_row_data._cells = {family_name: {qualifier: [cell]}} with self.assertRaises(IndexError): partial_row_data.cell_value(family_name, qualifier, index=None) def test_cell_value_invalid_column_family_key(self): - family_name = u'name1' - qualifier = b'col1' + family_name = u"name1" + qualifier = b"col1" partial_row_data = self._make_one(None) @@ -250,52 +227,44 @@ def test_cell_value_invalid_column_family_key(self): partial_row_data.cell_value(family_name, qualifier) def test_cell_value_invalid_column_key(self): - family_name = u'name1' - qualifier = b'col1' + family_name = u"name1" + qualifier = b"col1" partial_row_data = self._make_one(None) - partial_row_data._cells = { - family_name: {}, - } + partial_row_data._cells = {family_name: {}} with self.assertRaises(KeyError): partial_row_data.cell_value(family_name, qualifier) def test_cell_values(self): - family_name = u'name1' - qualifier = b'col1' - cell = _make_cell(b'value-bytes') + family_name = u"name1" + qualifier = b"col1" + cell = _make_cell(b"value-bytes") partial_row_data = self._make_one(None) - partial_row_data._cells = { - family_name: { - qualifier: [cell], - }, - } + partial_row_data._cells = {family_name: {qualifier: [cell]}} values = [] for value, timestamp_micros in partial_row_data.cell_values( - family_name, qualifier): + family_name, qualifier + ): values.append(value) self.assertEqual(values[0], cell.value) def test_cell_values_with_max_count(self): - family_name = u'name1' - qualifier = b'col1' - cell_1 = _make_cell(b'value-bytes-1') - cell_2 = _make_cell(b'value-bytes-2') + family_name = u"name1" + qualifier = b"col1" + cell_1 = _make_cell(b"value-bytes-1") + cell_2 = _make_cell(b"value-bytes-2") partial_row_data = self._make_one(None) - partial_row_data._cells = { - family_name: { - qualifier: [cell_1, cell_2], - }, - } + partial_row_data._cells = {family_name: {qualifier: [cell_1, cell_2]}} values = [] for value, timestamp_micros in partial_row_data.cell_values( - family_name, qualifier, max_count=1): + family_name, qualifier, max_count=1 + ): values.append(value) self.assertEqual(1, len(values)) @@ -319,7 +288,6 @@ class _Client(object): class Test_retry_read_rows_exception(unittest.TestCase): - @staticmethod def _call_fut(exc): from google.cloud.bigtable.row_data import _retry_read_rows_exception @@ -339,56 +307,56 @@ def code(self): return self.exception.grpc_status_code def details(self): - return 'Testing' + return "Testing" return TestingException(exception) def test_w_miss(self): from google.api_core.exceptions import Conflict - exception = Conflict('testing') + exception = Conflict("testing") self.assertFalse(self._call_fut(exception)) def test_w_service_unavailable(self): from google.api_core.exceptions import ServiceUnavailable - exception = ServiceUnavailable('testing') + exception = ServiceUnavailable("testing") self.assertTrue(self._call_fut(exception)) def test_w_deadline_exceeded(self): from google.api_core.exceptions import DeadlineExceeded - exception = DeadlineExceeded('testing') + exception = DeadlineExceeded("testing") self.assertTrue(self._call_fut(exception)) def test_w_miss_wrapped_in_grpc(self): from google.api_core.exceptions import Conflict - wrapped = Conflict('testing') + wrapped = Conflict("testing") exception = self._make_grpc_call_error(wrapped) self.assertFalse(self._call_fut(exception)) def test_w_service_unavailable_wrapped_in_grpc(self): from google.api_core.exceptions import ServiceUnavailable - wrapped = ServiceUnavailable('testing') + wrapped = ServiceUnavailable("testing") exception = self._make_grpc_call_error(wrapped) self.assertTrue(self._call_fut(exception)) def test_w_deadline_exceeded_wrapped_in_grpc(self): from google.api_core.exceptions import DeadlineExceeded - wrapped = DeadlineExceeded('testing') + wrapped = DeadlineExceeded("testing") exception = self._make_grpc_call_error(wrapped) self.assertTrue(self._call_fut(exception)) class TestPartialRowsData(unittest.TestCase): - ROW_KEY = b'row-key' - FAMILY_NAME = u'family' - QUALIFIER = b'qualifier' + ROW_KEY = b"row-key" + FAMILY_NAME = u"family" + QUALIFIER = b"qualifier" TIMESTAMP_MICROS = 100 - VALUE = b'value' + VALUE = b"value" @staticmethod def _get_target_class(): @@ -407,43 +375,37 @@ def _make_one(self, *args, **kwargs): def test_constructor(self): from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS + client = _Client() client._data_stub = mock.MagicMock() request = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, - request) + partial_rows_data = self._make_one(client._data_stub.ReadRows, request) self.assertIs(partial_rows_data.request, request) self.assertEqual(partial_rows_data.rows, {}) - self.assertEqual(partial_rows_data.retry, - DEFAULT_RETRY_READ_ROWS) + self.assertEqual(partial_rows_data.retry, DEFAULT_RETRY_READ_ROWS) def test_constructor_with_retry(self): client = _Client() client._data_stub = mock.MagicMock() request = retry = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, - request, retry) + partial_rows_data = self._make_one(client._data_stub.ReadRows, request, retry) self.assertIs(partial_rows_data.request, request) self.assertEqual(partial_rows_data.rows, {}) - self.assertEqual(partial_rows_data.retry, - retry) + self.assertEqual(partial_rows_data.retry, retry) def test___eq__(self): client = _Client() client._data_stub = mock.MagicMock() request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, - request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, - request) + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) + partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) self.assertEqual(partial_rows_data1.rows, partial_rows_data2.rows) def test___eq__type_differ(self): client = _Client() client._data_stub = mock.MagicMock() request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, - request) + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) partial_rows_data2 = object() self.assertNotEqual(partial_rows_data1, partial_rows_data2) @@ -451,29 +413,24 @@ def test___ne__same_value(self): client = _Client() client._data_stub = mock.MagicMock() request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, - request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, - request) - comparison_val = (partial_rows_data1 != partial_rows_data2) + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) + partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) + comparison_val = partial_rows_data1 != partial_rows_data2 self.assertTrue(comparison_val) def test___ne__(self): client = _Client() client._data_stub = mock.MagicMock() request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, - request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, - request) + partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) + partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) self.assertNotEqual(partial_rows_data1, partial_rows_data2) def test_rows_getter(self): client = _Client() client._data_stub = mock.MagicMock() request = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, - request) + partial_rows_data = self._make_one(client._data_stub.ReadRows, request) partial_rows_data.rows = value = object() self.assertIs(partial_rows_data.rows, value) @@ -507,13 +464,13 @@ def test_state_new_row_w_row(self): channel = ChannelStub(responses=[iterator]) data_api = bigtable_client.BigtableClient(channel=channel) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api request = object() - yrd = self._make_one( - client._table_data_client.transport.read_rows, request) + yrd = self._make_one(client._table_data_client.transport.read_rows, request) yrd._response_iterator = iterator rows = [row for row in yrd] @@ -535,7 +492,7 @@ def test_multiple_chunks(self): commit_row=False, ) chunk2 = _ReadRowsResponseCellChunkPB( - qualifier=self.QUALIFIER + b'1', + qualifier=self.QUALIFIER + b"1", timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, @@ -547,13 +504,13 @@ def test_multiple_chunks(self): channel = ChannelStub(responses=[iterator]) data_api = bigtable_client.BigtableClient(channel=channel) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api request = object() - yrd = self._make_one( - client._table_data_client.transport.read_rows, request) + yrd = self._make_one(client._table_data_client.transport.read_rows, request) yrd._response_iterator = iterator rows = [row for row in yrd] @@ -582,18 +539,18 @@ def test__copy_from_previous_unset(self): yrd = self._make_one(client._data_stub.ReadRows, request) cell = _PartialCellData() yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, b'') - self.assertEqual(cell.family_name, u'') + self.assertEqual(cell.row_key, b"") + self.assertEqual(cell.family_name, u"") self.assertIsNone(cell.qualifier) self.assertEqual(cell.timestamp_micros, 0) self.assertEqual(cell.labels, []) def test__copy_from_previous_blank(self): - ROW_KEY = 'RK' - FAMILY_NAME = u'A' - QUALIFIER = b'C' + ROW_KEY = "RK" + FAMILY_NAME = u"A" + QUALIFIER = b"C" TIMESTAMP_MICROS = 100 - LABELS = ['L1', 'L2'] + LABELS = ["L1", "L2"] client = _Client() client._data_stub = mock.MagicMock() request = object() @@ -614,11 +571,11 @@ def test__copy_from_previous_blank(self): self.assertEqual(cell.labels, LABELS) def test__copy_from_previous_filled(self): - ROW_KEY = 'RK' - FAMILY_NAME = u'A' - QUALIFIER = b'C' + ROW_KEY = "RK" + FAMILY_NAME = u"A" + QUALIFIER = b"C" TIMESTAMP_MICROS = 100 - LABELS = ['L1', 'L2'] + LABELS = ["L1", "L2"] client = _Client() client._data_stub = mock.MagicMock() request = object() @@ -640,22 +597,21 @@ def test__copy_from_previous_filled(self): def test_valid_last_scanned_row_key_on_start(self): client = _Client() - response = _ReadRowsResponseV2( - chunks=(), last_scanned_row_key='2.AFTER') + response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER") iterator = _MockCancellableIterator(response) client._data_stub = mock.MagicMock() client._data_stub.ReadRows.side_effect = [iterator] request = object() yrd = self._make_one(client._data_stub.ReadRows, request) - yrd.last_scanned_row_key = '1.BEFORE' + yrd.last_scanned_row_key = "1.BEFORE" self._consume_all(yrd) - self.assertEqual(yrd.last_scanned_row_key, '2.AFTER') + self.assertEqual(yrd.last_scanned_row_key, "2.AFTER") def test_invalid_empty_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk client = _Client() - chunks = _generate_cell_chunks(['']) + chunks = _generate_cell_chunks([""]) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) client._data_stub = mock.MagicMock() @@ -666,7 +622,7 @@ def test_invalid_empty_chunk(self): self._consume_all(yrd) def test_state_cell_in_progress(self): - LABELS = ['L1', 'L2'] + LABELS = ["L1", "L2"] request = object() read_rows = mock.MagicMock() @@ -678,7 +634,7 @@ def test_state_cell_in_progress(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - labels=LABELS + labels=LABELS, ) yrd._update_cell(chunk) @@ -688,8 +644,7 @@ def test_state_cell_in_progress(self): self.assertEqual(yrd._cell.row_key, self.ROW_KEY) self.assertEqual(yrd._cell.family_name, self.FAMILY_NAME) self.assertEqual(yrd._cell.qualifier, self.QUALIFIER) - self.assertEqual(yrd._cell.timestamp_micros, - self.TIMESTAMP_MICROS) + self.assertEqual(yrd._cell.timestamp_micros, self.TIMESTAMP_MICROS) self.assertEqual(yrd._cell.labels, LABELS) self.assertEqual(yrd._cell.value, self.VALUE + self.VALUE) @@ -721,11 +676,10 @@ def test_yield_rows_data(self): def test_yield_retry_rows_data(self): from google.api_core import retry + client = _Client() - retry_read_rows = retry.Retry( - predicate=_read_rows_retry_exception, - ) + retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -741,13 +695,11 @@ def test_yield_retry_rows_data(self): failure_iterator = _MockFailureIterator_1() iterator = _MockCancellableIterator(response) client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [failure_iterator, - iterator] + client._data_stub.ReadRows.side_effect = [failure_iterator, iterator] request = object() - yrd = self._make_one(client._data_stub.ReadRows, request, - retry_read_rows) + yrd = self._make_one(client._data_stub.ReadRows, request, retry_read_rows) result = self._consume_all(yrd)[0] @@ -758,10 +710,9 @@ def _consume_all(self, yrd): class Test_ReadRowsRequestManager(unittest.TestCase): - @classmethod def setUpClass(cls): - cls.table_name = 'table_name' + cls.table_name = "table_name" cls.row_range1 = RowRange(b"row_key21", b"row_key29") cls.row_range2 = RowRange(b"row_key31", b"row_key39") cls.row_range3 = RowRange(b"row_key41", b"row_key49") @@ -774,6 +725,7 @@ def setUpClass(cls): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_data import _ReadRowsRequestManager + return _ReadRowsRequestManager def _make_one(self, *args, **kwargs): @@ -784,23 +736,23 @@ def test_constructor(self): last_scanned_key = "last_key" rows_read_so_far = 10 - request_manager = self._make_one(request, last_scanned_key, - rows_read_so_far) + request_manager = self._make_one(request, last_scanned_key, rows_read_so_far) self.assertEqual(request, request_manager.message) self.assertEqual(last_scanned_key, request_manager.last_scanned_key) self.assertEqual(rows_read_so_far, request_manager.rows_read_so_far) def test__filter_row_key(self): - table_name = 'table_name' + table_name = "table_name" request = _ReadRowsRequestPB(table_name=table_name) - request.rows.row_keys.extend([b'row_key1', b'row_key2', - b'row_key3', b'row_key4']) + request.rows.row_keys.extend( + [b"row_key1", b"row_key2", b"row_key3", b"row_key4"] + ) last_scanned_key = b"row_key2" request_manager = self._make_one(request, last_scanned_key, 2) row_keys = request_manager._filter_rows_keys() - expected_row_keys = [b'row_key3', b'row_key4'] + expected_row_keys = [b"row_key3", b"row_key4"] self.assertEqual(expected_row_keys, row_keys) def test__filter_row_ranges_all_ranges_added_back(self): @@ -808,12 +760,15 @@ def test__filter_row_ranges_all_ranges_added_back(self): request_manager = self._make_one(self.request, last_scanned_key, 2) row_ranges = request_manager._filter_row_ranges() - exp_row_range1 = data_v2_pb2.RowRange(start_key_closed=b"row_key21", - end_key_open=b"row_key29") - exp_row_range2 = data_v2_pb2.RowRange(start_key_closed=b"row_key31", - end_key_open=b"row_key39") - exp_row_range3 = data_v2_pb2.RowRange(start_key_closed=b"row_key41", - end_key_open=b"row_key49") + exp_row_range1 = data_v2_pb2.RowRange( + start_key_closed=b"row_key21", end_key_open=b"row_key29" + ) + exp_row_range2 = data_v2_pb2.RowRange( + start_key_closed=b"row_key31", end_key_open=b"row_key39" + ) + exp_row_range3 = data_v2_pb2.RowRange( + start_key_closed=b"row_key41", end_key_open=b"row_key49" + ) exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] self.assertEqual(exp_row_ranges, row_ranges) @@ -838,8 +793,7 @@ def test__filter_row_ranges_all_ranges_already_read_open_closed(self): request.rows.row_ranges.add(**row_range3.get_range_kwargs()) request_manager = self._make_one(request, last_scanned_key, 2) - request_manager.new_message = _ReadRowsRequestPB( - table_name=self.table_name) + request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) row_ranges = request_manager._filter_row_ranges() self.assertEqual(row_ranges, []) @@ -847,39 +801,41 @@ def test__filter_row_ranges_all_ranges_already_read_open_closed(self): def test__filter_row_ranges_some_ranges_already_read(self): last_scanned_key = b"row_key22" request_manager = self._make_one(self.request, last_scanned_key, 2) - request_manager.new_message = _ReadRowsRequestPB( - table_name=self.table_name) + request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) row_ranges = request_manager._filter_row_ranges() - exp_row_range1 = data_v2_pb2.RowRange(start_key_open=b"row_key22", - end_key_open=b"row_key29") - exp_row_range2 = data_v2_pb2.RowRange(start_key_closed=b"row_key31", - end_key_open=b"row_key39") - exp_row_range3 = data_v2_pb2.RowRange(start_key_closed=b"row_key41", - end_key_open=b"row_key49") + exp_row_range1 = data_v2_pb2.RowRange( + start_key_open=b"row_key22", end_key_open=b"row_key29" + ) + exp_row_range2 = data_v2_pb2.RowRange( + start_key_closed=b"row_key31", end_key_open=b"row_key39" + ) + exp_row_range3 = data_v2_pb2.RowRange( + start_key_closed=b"row_key41", end_key_open=b"row_key49" + ) exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] self.assertEqual(exp_row_ranges, row_ranges) def test_build_updated_request(self): from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB(filter=row_filter.to_pb(), - rows_limit=8, - table_name=self.table_name) + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name + ) request.rows.row_ranges.add(**self.row_range1.get_range_kwargs()) request_manager = self._make_one(request, last_scanned_key, 2) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, - filter=row_filter.to_pb(), - rows_limit=6) + expected_result = _ReadRowsRequestPB( + table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 + ) expected_result.rows.row_ranges.add( - start_key_open=last_scanned_key, - end_key_open=self.row_range1.end_key + start_key_open=last_scanned_key, end_key_open=self.row_range1.end_key ) self.assertEqual(expected_result, result) @@ -891,72 +847,81 @@ def test_build_updated_request_full_table(self): request_manager = self._make_one(request, last_scanned_key, 2) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, - filter={}) + expected_result = _ReadRowsRequestPB(table_name=self.table_name, filter={}) expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) self.assertEqual(expected_result, result) def test_build_updated_request_no_start_key(self): from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB(filter=row_filter.to_pb(), - rows_limit=8, - table_name=self.table_name) + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name + ) request.rows.row_ranges.add(end_key_open=b"row_key29") request_manager = self._make_one(request, last_scanned_key, 2) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, - filter=row_filter.to_pb(), - rows_limit=6) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key, - end_key_open=b"row_key29") + expected_result = _ReadRowsRequestPB( + table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 + ) + expected_result.rows.row_ranges.add( + start_key_open=last_scanned_key, end_key_open=b"row_key29" + ) self.assertEqual(expected_result, result) def test_build_updated_request_no_end_key(self): from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB(filter=row_filter.to_pb(), - rows_limit=8, - table_name=self.table_name) + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name + ) request.rows.row_ranges.add(start_key_closed=b"row_key20") request_manager = self._make_one(request, last_scanned_key, 2) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, - filter=row_filter.to_pb(), - rows_limit=6) + expected_result = _ReadRowsRequestPB( + table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 + ) expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) self.assertEqual(expected_result, result) def test_build_updated_request_rows(self): from google.cloud.bigtable.row_filters import RowSampleFilter + row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key4" - request = _ReadRowsRequestPB(filter=row_filter.to_pb(), - rows_limit=5, - table_name=self.table_name) - request.rows.row_keys.extend([b'row_key1', b'row_key2', - b'row_key4', b'row_key5', - b'row_key7', b'row_key9']) + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=5, table_name=self.table_name + ) + request.rows.row_keys.extend( + [ + b"row_key1", + b"row_key2", + b"row_key4", + b"row_key5", + b"row_key7", + b"row_key9", + ] + ) request_manager = self._make_one(request, last_scanned_key, 3) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, - filter=row_filter.to_pb(), - rows_limit=2) - expected_result.rows.row_keys.extend([b'row_key5', b'row_key7', - b'row_key9']) + expected_result = _ReadRowsRequestPB( + table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=2 + ) + expected_result.rows.row_keys.extend([b"row_key5", b"row_key7", b"row_key9"]) self.assertEqual(expected_result, result) @@ -967,9 +932,9 @@ def test_build_updated_request_rows_limit(self): request_manager = self._make_one(request, last_scanned_key, 2) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, - filter={}, - rows_limit=8) + expected_result = _ReadRowsRequestPB( + table_name=self.table_name, filter={}, rows_limit=8 + ) expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) self.assertEqual(expected_result, result) @@ -1000,7 +965,7 @@ def _load_json_test(self, test_name): if self.__class__._json_tests is None: dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, 'read-rows-acceptance-test.json') + filename = os.path.join(dirname, "read-rows-acceptance-test.json") raw = _parse_readrows_acceptance_tests(filename) tests = self.__class__._json_tests = {} for (name, chunks, results) in raw: @@ -1023,53 +988,53 @@ def _fail_during_consume(self, testcase_name): with self.assertRaises(InvalidChunk): prd.consume_all() expected_result = self._sort_flattend_cells( - [result for result in results if not result['error']]) + [result for result in results if not result["error"]] + ) flattened = self._sort_flattend_cells(_flatten_cells(prd)) self.assertEqual(flattened, expected_result) def test_invalid_no_cell_key_before_commit(self): - self._fail_during_consume('invalid - no cell key before commit') + self._fail_during_consume("invalid - no cell key before commit") def test_invalid_no_cell_key_before_value(self): - self._fail_during_consume('invalid - no cell key before value') + self._fail_during_consume("invalid - no cell key before value") def test_invalid_new_col_family_wo_qualifier(self): - self._fail_during_consume( - 'invalid - new col family must specify qualifier') + self._fail_during_consume("invalid - new col family must specify qualifier") def test_invalid_no_commit_between_rows(self): - self._fail_during_consume('invalid - no commit between rows') + self._fail_during_consume("invalid - no commit between rows") def test_invalid_no_commit_after_first_row(self): - self._fail_during_consume('invalid - no commit after first row') + self._fail_during_consume("invalid - no commit after first row") def test_invalid_duplicate_row_key(self): - self._fail_during_consume('invalid - duplicate row key') + self._fail_during_consume("invalid - duplicate row key") def test_invalid_new_row_missing_row_key(self): - self._fail_during_consume('invalid - new row missing row key') + self._fail_during_consume("invalid - new row missing row key") def test_invalid_bare_reset(self): - self._fail_during_consume('invalid - bare reset') + self._fail_during_consume("invalid - bare reset") def test_invalid_bad_reset_no_commit(self): - self._fail_during_consume('invalid - bad reset, no commit') + self._fail_during_consume("invalid - bad reset, no commit") def test_invalid_missing_key_after_reset(self): - self._fail_during_consume('invalid - missing key after reset') + self._fail_during_consume("invalid - missing key after reset") def test_invalid_reset_with_chunk(self): - self._fail_during_consume('invalid - reset with chunk') + self._fail_during_consume("invalid - reset with chunk") def test_invalid_commit_with_chunk(self): - self._fail_during_consume('invalid - commit with chunk') + self._fail_during_consume("invalid - commit with chunk") # JSON Error cases: incomplete final row def _sort_flattend_cells(self, flattened): import operator - key_func = operator.itemgetter('rk', 'fm', 'qual') + key_func = operator.itemgetter("rk", "fm", "qual") return sorted(flattened, key=key_func) def _incomplete_final_row(self, testcase_name): @@ -1085,15 +1050,16 @@ def _incomplete_final_row(self, testcase_name): prd.consume_all() self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) expected_result = self._sort_flattend_cells( - [result for result in results if not result['error']]) + [result for result in results if not result["error"]] + ) flattened = self._sort_flattend_cells(_flatten_cells(prd)) self.assertEqual(flattened, expected_result) def test_invalid_no_commit(self): - self._incomplete_final_row('invalid - no commit') + self._incomplete_final_row("invalid - no commit") def test_invalid_last_row_missing_commit(self): - self._incomplete_final_row('invalid - last row missing commit') + self._incomplete_final_row("invalid - last row missing commit") # Non-error cases @@ -1115,104 +1081,104 @@ def _match_results(self, testcase_name, expected_result=_marker): self.assertEqual(flattened, expected_result) def test_bare_commit_implies_ts_zero(self): - self._match_results('bare commit implies ts=0') + self._match_results("bare commit implies ts=0") def test_simple_row_with_timestamp(self): - self._match_results('simple row with timestamp') + self._match_results("simple row with timestamp") def test_missing_timestamp_implies_ts_zero(self): - self._match_results('missing timestamp, implied ts=0') + self._match_results("missing timestamp, implied ts=0") def test_empty_cell_value(self): - self._match_results('empty cell value') + self._match_results("empty cell value") def test_two_unsplit_cells(self): - self._match_results('two unsplit cells') + self._match_results("two unsplit cells") def test_two_qualifiers(self): - self._match_results('two qualifiers') + self._match_results("two qualifiers") def test_two_families(self): - self._match_results('two families') + self._match_results("two families") def test_with_labels(self): - self._match_results('with labels') + self._match_results("with labels") def test_split_cell_bare_commit(self): - self._match_results('split cell, bare commit') + self._match_results("split cell, bare commit") def test_split_cell(self): - self._match_results('split cell') + self._match_results("split cell") def test_split_four_ways(self): - self._match_results('split four ways') + self._match_results("split four ways") def test_two_split_cells(self): - self._match_results('two split cells') + self._match_results("two split cells") def test_multi_qualifier_splits(self): - self._match_results('multi-qualifier splits') + self._match_results("multi-qualifier splits") def test_multi_qualifier_multi_split(self): - self._match_results('multi-qualifier multi-split') + self._match_results("multi-qualifier multi-split") def test_multi_family_split(self): - self._match_results('multi-family split') + self._match_results("multi-family split") def test_two_rows(self): - self._match_results('two rows') + self._match_results("two rows") def test_two_rows_implicit_timestamp(self): - self._match_results('two rows implicit timestamp') + self._match_results("two rows implicit timestamp") def test_two_rows_empty_value(self): - self._match_results('two rows empty value') + self._match_results("two rows empty value") def test_two_rows_one_with_multiple_cells(self): - self._match_results('two rows, one with multiple cells') + self._match_results("two rows, one with multiple cells") def test_two_rows_multiple_cells_multiple_families(self): - self._match_results('two rows, multiple cells, multiple families') + self._match_results("two rows, multiple cells, multiple families") def test_two_rows_multiple_cells(self): - self._match_results('two rows, multiple cells') + self._match_results("two rows, multiple cells") def test_two_rows_four_cells_two_labels(self): - self._match_results('two rows, four cells, 2 labels') + self._match_results("two rows, four cells, 2 labels") def test_two_rows_with_splits_same_timestamp(self): - self._match_results('two rows with splits, same timestamp') + self._match_results("two rows with splits, same timestamp") def test_no_data_after_reset(self): # JSON testcase has `"results": null` - self._match_results('no data after reset', expected_result=[]) + self._match_results("no data after reset", expected_result=[]) def test_simple_reset(self): - self._match_results('simple reset') + self._match_results("simple reset") def test_reset_to_new_val(self): - self._match_results('reset to new val') + self._match_results("reset to new val") def test_reset_to_new_qual(self): - self._match_results('reset to new qual') + self._match_results("reset to new qual") def test_reset_with_splits(self): - self._match_results('reset with splits') + self._match_results("reset with splits") def test_two_resets(self): - self._match_results('two resets') + self._match_results("two resets") def test_reset_to_new_row(self): - self._match_results('reset to new row') + self._match_results("reset to new row") def test_reset_in_between_chunks(self): - self._match_results('reset in between chunks') + self._match_results("reset in between chunks") def test_empty_cell_chunk(self): - self._match_results('empty cell chunk') + self._match_results("empty cell chunk") def test_empty_second_qualifier(self): - self._match_results('empty second qualifier') + self._match_results("empty second qualifier") def _flatten_cells(prd): @@ -1226,13 +1192,13 @@ def _flatten_cells(prd): for qualifier, column in family.items(): for cell in column: yield { - u'rk': _bytes_to_unicode(row_key), - u'fm': family_name, - u'qual': _bytes_to_unicode(qualifier), - u'ts': _microseconds_from_datetime(cell.timestamp), - u'value': _bytes_to_unicode(cell.value), - u'label': u' '.join(cell.labels), - u'error': False, + u"rk": _bytes_to_unicode(row_key), + u"fm": family_name, + u"qual": _bytes_to_unicode(qualifier), + u"ts": _microseconds_from_datetime(cell.timestamp), + u"value": _bytes_to_unicode(cell.value), + u"label": u" ".join(cell.labels), + u"error": False, } @@ -1253,7 +1219,6 @@ def next(self): class _MockFailureIterator_1(object): - def next(self): raise DeadlineExceeded("Failed to read from server") @@ -1262,19 +1227,18 @@ def next(self): class _PartialCellData(object): - row_key = b'' - family_name = u'' + row_key = b"" + family_name = u"" qualifier = None timestamp_micros = 0 def __init__(self, **kw): - self.labels = kw.pop('labels', []) + self.labels = kw.pop("labels", []) self.__dict__.update(kw) class _ReadRowsResponseV2(object): - - def __init__(self, chunks, last_scanned_row_key=''): + def __init__(self, chunks, last_scanned_row_key=""): self.chunks = chunks self.last_scanned_row_key = last_scanned_row_key @@ -1306,19 +1270,18 @@ def _parse_readrows_acceptance_tests(filename): with open(filename) as json_file: test_json = json.load(json_file) - for test in test_json['tests']: - name = test['name'] - chunks = _generate_cell_chunks(test['chunks']) - results = test['results'] + for test in test_json["tests"]: + name = test["name"] + chunks = _generate_cell_chunks(test["chunks"]) + results = test["results"] yield name, chunks, results def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - family_name = kw.pop('family_name', None) - qualifier = kw.pop('qualifier', None) + family_name = kw.pop("family_name", None) + qualifier = kw.pop("qualifier", None) message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) if family_name: @@ -1336,8 +1299,7 @@ def _make_cell(value): def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index 2e781be7bf15..1c51651d8c44 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -17,7 +17,6 @@ class Test_BoolFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _BoolFilter @@ -48,12 +47,11 @@ def test___ne__same_value(self): flag = object() row_filter1 = self._make_one(flag) row_filter2 = self._make_one(flag) - comparison_val = (row_filter1 != row_filter2) + comparison_val = row_filter1 != row_filter2 self.assertFalse(comparison_val) class TestSinkFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import SinkFilter @@ -72,7 +70,6 @@ def test_to_pb(self): class TestPassAllFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import PassAllFilter @@ -91,7 +88,6 @@ def test_to_pb(self): class TestBlockAllFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import BlockAllFilter @@ -110,7 +106,6 @@ def test_to_pb(self): class Test_RegexFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _RegexFilter @@ -121,37 +116,36 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor(self): - regex = b'abc' + regex = b"abc" row_filter = self._make_one(regex) self.assertIs(row_filter.regex, regex) def test_constructor_non_bytes(self): - regex = u'abc' + regex = u"abc" row_filter = self._make_one(regex) - self.assertEqual(row_filter.regex, b'abc') + self.assertEqual(row_filter.regex, b"abc") def test___eq__type_differ(self): - regex = b'def-rgx' + regex = b"def-rgx" row_filter1 = self._make_one(regex) row_filter2 = object() self.assertNotEqual(row_filter1, row_filter2) def test___eq__same_value(self): - regex = b'trex-regex' + regex = b"trex-regex" row_filter1 = self._make_one(regex) row_filter2 = self._make_one(regex) self.assertEqual(row_filter1, row_filter2) def test___ne__same_value(self): - regex = b'abc' + regex = b"abc" row_filter1 = self._make_one(regex) row_filter2 = self._make_one(regex) - comparison_val = (row_filter1 != row_filter2) + comparison_val = row_filter1 != row_filter2 self.assertFalse(comparison_val) class TestRowKeyRegexFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowKeyRegexFilter @@ -162,7 +156,7 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): - regex = b'row-key-regex' + regex = b"row-key-regex" row_filter = self._make_one(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(row_key_regex_filter=regex) @@ -170,7 +164,6 @@ def test_to_pb(self): class TestRowSampleFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowSampleFilter @@ -206,7 +199,6 @@ def test_to_pb(self): class TestFamilyNameRegexFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import FamilyNameRegexFilter @@ -217,7 +209,7 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): - regex = u'family-regex' + regex = u"family-regex" row_filter = self._make_one(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(family_name_regex_filter=regex) @@ -225,11 +217,9 @@ def test_to_pb(self): class TestColumnQualifierRegexFilter(unittest.TestCase): - @staticmethod def _get_target_class(): - from google.cloud.bigtable.row_filters import ( - ColumnQualifierRegexFilter) + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter return ColumnQualifierRegexFilter @@ -237,16 +227,14 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): - regex = b'column-regex' + regex = b"column-regex" row_filter = self._make_one(regex) pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB( - column_qualifier_regex_filter=regex) + expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex) self.assertEqual(pb_val, expected_pb) class TestTimestampRange(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRange @@ -282,12 +270,13 @@ def test___ne__same_value(self): end = object() time_range1 = self._make_one(start=start, end=end) time_range2 = self._make_one(start=start, end=end) - comparison_val = (time_range1 != time_range2) + comparison_val = time_range1 != time_range2 self.assertFalse(comparison_val) def _to_pb_helper(self, pb_kwargs, start=None, end=None): import datetime from google.cloud._helpers import _EPOCH + if start is not None: start = _EPOCH + datetime.timedelta(microseconds=start) if end is not None: @@ -296,11 +285,9 @@ def _to_pb_helper(self, pb_kwargs, start=None, end=None): expected_pb = _TimestampRangePB(**pb_kwargs) time_pb = time_range.to_pb() self.assertEqual( - time_pb.start_timestamp_micros, - expected_pb.start_timestamp_micros) - self.assertEqual( - time_pb.end_timestamp_micros, - expected_pb.end_timestamp_micros) + time_pb.start_timestamp_micros, expected_pb.start_timestamp_micros + ) + self.assertEqual(time_pb.end_timestamp_micros, expected_pb.end_timestamp_micros) self.assertEqual(time_pb, expected_pb) def test_to_pb(self): @@ -311,8 +298,8 @@ def test_to_pb(self): end_millis = end_micros // 1000 * 1000 + 1000 self.assertEqual(end_millis, 12939372000) pb_kwargs = {} - pb_kwargs['start_timestamp_micros'] = start_millis - pb_kwargs['end_timestamp_micros'] = end_millis + pb_kwargs["start_timestamp_micros"] = start_millis + pb_kwargs["end_timestamp_micros"] = end_millis self._to_pb_helper(pb_kwargs, start=start_micros, end=end_micros) def test_to_pb_start_only(self): @@ -321,7 +308,7 @@ def test_to_pb_start_only(self): start_millis = start_micros // 1000 * 1000 self.assertEqual(start_millis, 30871000) pb_kwargs = {} - pb_kwargs['start_timestamp_micros'] = start_millis + pb_kwargs["start_timestamp_micros"] = start_millis self._to_pb_helper(pb_kwargs, start=start_micros, end=None) def test_to_pb_end_only(self): @@ -330,12 +317,11 @@ def test_to_pb_end_only(self): end_millis = end_micros // 1000 * 1000 self.assertEqual(end_millis, 12939371000) pb_kwargs = {} - pb_kwargs['end_timestamp_micros'] = end_millis + pb_kwargs["end_timestamp_micros"] = end_millis self._to_pb_helper(pb_kwargs, start=None, end=end_micros) class TestTimestampRangeFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import TimestampRangeFilter @@ -368,13 +354,11 @@ def test_to_pb(self): range_ = TimestampRange() row_filter = self._make_one(range_) pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB( - timestamp_range_filter=_TimestampRangePB()) + expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB()) self.assertEqual(pb_val, expected_pb) class TestColumnRangeFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ColumnRangeFilter @@ -404,7 +388,8 @@ def test_constructor_explicit(self): start_column=start_column, end_column=end_column, inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + inclusive_end=inclusive_end, + ) self.assertIs(row_filter.column_family_id, column_family_id) self.assertIs(row_filter.start_column, start_column) self.assertIs(row_filter.end_column, end_column) @@ -413,13 +398,15 @@ def test_constructor_explicit(self): def test_constructor_bad_start(self): column_family_id = object() - self.assertRaises(ValueError, self._make_one, - column_family_id, inclusive_start=True) + self.assertRaises( + ValueError, self._make_one, column_family_id, inclusive_start=True + ) def test_constructor_bad_end(self): column_family_id = object() - self.assertRaises(ValueError, self._make_one, - column_family_id, inclusive_end=True) + self.assertRaises( + ValueError, self._make_one, column_family_id, inclusive_end=True + ) def test___eq__(self): column_family_id = object() @@ -427,16 +414,20 @@ def test___eq__(self): end_column = object() inclusive_start = object() inclusive_end = object() - row_filter1 = self._make_one(column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - row_filter2 = self._make_one(column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + row_filter1 = self._make_one( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = self._make_one( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) self.assertEqual(row_filter1, row_filter2) def test___eq__type_differ(self): @@ -446,61 +437,58 @@ def test___eq__type_differ(self): self.assertNotEqual(row_filter1, row_filter2) def test_to_pb(self): - column_family_id = u'column-family-id' + column_family_id = u"column-family-id" row_filter = self._make_one(column_family_id) col_range_pb = _ColumnRangePB(family_name=column_family_id) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - column_family_id = u'column-family-id' - column = b'column' + column_family_id = u"column-family-id" + column = b"column" row_filter = self._make_one(column_family_id, start_column=column) col_range_pb = _ColumnRangePB( - family_name=column_family_id, - start_qualifier_closed=column, + family_name=column_family_id, start_qualifier_closed=column ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - column_family_id = u'column-family-id' - column = b'column' - row_filter = self._make_one(column_family_id, start_column=column, - inclusive_start=False) + column_family_id = u"column-family-id" + column = b"column" + row_filter = self._make_one( + column_family_id, start_column=column, inclusive_start=False + ) col_range_pb = _ColumnRangePB( - family_name=column_family_id, - start_qualifier_open=column, + family_name=column_family_id, start_qualifier_open=column ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - column_family_id = u'column-family-id' - column = b'column' + column_family_id = u"column-family-id" + column = b"column" row_filter = self._make_one(column_family_id, end_column=column) col_range_pb = _ColumnRangePB( - family_name=column_family_id, - end_qualifier_closed=column, + family_name=column_family_id, end_qualifier_closed=column ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - column_family_id = u'column-family-id' - column = b'column' - row_filter = self._make_one(column_family_id, end_column=column, - inclusive_end=False) + column_family_id = u"column-family-id" + column = b"column" + row_filter = self._make_one( + column_family_id, end_column=column, inclusive_end=False + ) col_range_pb = _ColumnRangePB( - family_name=column_family_id, - end_qualifier_open=column, + family_name=column_family_id, end_qualifier_open=column ) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) class TestValueRegexFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRegexFilter @@ -511,7 +499,7 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_to_pb(self): - regex = b'value-regex' + regex = b"value-regex" row_filter = self._make_one(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(value_regex_filter=regex) @@ -519,7 +507,6 @@ def test_to_pb(self): class TestValueRangeFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ValueRangeFilter @@ -541,10 +528,12 @@ def test_constructor_explicit(self): end_value = object() inclusive_start = object() inclusive_end = object() - row_filter = self._make_one(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + row_filter = self._make_one( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) self.assertIs(row_filter.start_value, start_value) self.assertIs(row_filter.end_value, end_value) self.assertIs(row_filter.inclusive_start, inclusive_start) @@ -561,14 +550,18 @@ def test___eq__(self): end_value = object() inclusive_start = object() inclusive_end = object() - row_filter1 = self._make_one(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) - row_filter2 = self._make_one(start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end) + row_filter1 = self._make_one( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = self._make_one( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) self.assertEqual(row_filter1, row_filter2) def test___eq__type_differ(self): @@ -578,33 +571,32 @@ def test___eq__type_differ(self): def test_to_pb(self): row_filter = self._make_one() - expected_pb = _RowFilterPB( - value_range_filter=_ValueRangePB()) + expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_start(self): - value = b'some-value' + value = b"some-value" row_filter = self._make_one(start_value=value) val_range_pb = _ValueRangePB(start_value_closed=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_start(self): - value = b'some-value' + value = b"some-value" row_filter = self._make_one(start_value=value, inclusive_start=False) val_range_pb = _ValueRangePB(start_value_open=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_inclusive_end(self): - value = b'some-value' + value = b"some-value" row_filter = self._make_one(end_value=value) val_range_pb = _ValueRangePB(end_value_closed=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) self.assertEqual(row_filter.to_pb(), expected_pb) def test_to_pb_exclusive_end(self): - value = b'some-value' + value = b"some-value" row_filter = self._make_one(end_value=value, inclusive_end=False) val_range_pb = _ValueRangePB(end_value_open=value) expected_pb = _RowFilterPB(value_range_filter=val_range_pb) @@ -612,7 +604,6 @@ def test_to_pb_exclusive_end(self): class Test_CellCountFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _CellCountFilter @@ -643,12 +634,11 @@ def test___ne__same_value(self): num_cells = object() row_filter1 = self._make_one(num_cells) row_filter2 = self._make_one(num_cells) - comparison_val = (row_filter1 != row_filter2) + comparison_val = row_filter1 != row_filter2 self.assertFalse(comparison_val) class TestCellsRowOffsetFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowOffsetFilter @@ -662,13 +652,11 @@ def test_to_pb(self): num_cells = 76 row_filter = self._make_one(num_cells) pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB( - cells_per_row_offset_filter=num_cells) + expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells) self.assertEqual(pb_val, expected_pb) class TestCellsRowLimitFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import CellsRowLimitFilter @@ -682,13 +670,11 @@ def test_to_pb(self): num_cells = 189 row_filter = self._make_one(num_cells) pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB( - cells_per_row_limit_filter=num_cells) + expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) class TestCellsColumnLimitFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import CellsColumnLimitFilter @@ -702,17 +688,14 @@ def test_to_pb(self): num_cells = 10 row_filter = self._make_one(num_cells) pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB( - cells_per_column_limit_filter=num_cells) + expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells) self.assertEqual(pb_val, expected_pb) class TestStripValueTransformerFilter(unittest.TestCase): - @staticmethod def _get_target_class(): - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter return StripValueTransformerFilter @@ -728,7 +711,6 @@ def test_to_pb(self): class TestApplyLabelFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ApplyLabelFilter @@ -756,7 +738,7 @@ def test___eq__same_value(self): self.assertEqual(row_filter1, row_filter2) def test_to_pb(self): - label = u'label' + label = u"label" row_filter = self._make_one(label) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(apply_label_transformer=label) @@ -764,7 +746,6 @@ def test_to_pb(self): class Test_FilterCombination(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import _FilterCombination @@ -797,7 +778,6 @@ def test___eq__type_differ(self): class TestRowFilterChain(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterChain @@ -809,8 +789,7 @@ def _make_one(self, *args, **kwargs): def test_to_pb(self): from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -822,17 +801,14 @@ def test_to_pb(self): filter_pb = row_filter3.to_pb() expected_pb = _RowFilterPB( - chain=_RowFilterChainPB( - filters=[row_filter1_pb, row_filter2_pb], - ), + chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb]) ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): from google.cloud.bigtable.row_filters import CellsRowLimitFilter from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -847,15 +823,12 @@ def test_to_pb_nested(self): filter_pb = row_filter5.to_pb() expected_pb = _RowFilterPB( - chain=_RowFilterChainPB( - filters=[row_filter3_pb, row_filter4_pb], - ), + chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb]) ) self.assertEqual(filter_pb, expected_pb) class TestRowFilterUnion(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import RowFilterUnion @@ -867,8 +840,7 @@ def _make_one(self, *args, **kwargs): def test_to_pb(self): from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -880,17 +852,14 @@ def test_to_pb(self): filter_pb = row_filter3.to_pb() expected_pb = _RowFilterPB( - interleave=_RowFilterInterleavePB( - filters=[row_filter1_pb, row_filter2_pb], - ), + interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb]) ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_nested(self): from google.cloud.bigtable.row_filters import CellsRowLimitFilter from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter2 = RowSampleFilter(0.25) @@ -905,15 +874,12 @@ def test_to_pb_nested(self): filter_pb = row_filter5.to_pb() expected_pb = _RowFilterPB( - interleave=_RowFilterInterleavePB( - filters=[row_filter3_pb, row_filter4_pb], - ), + interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb]) ) self.assertEqual(filter_pb, expected_pb) class TestConditionalRowFilter(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_filters import ConditionalRowFilter @@ -927,9 +893,9 @@ def test_constructor(self): base_filter = object() true_filter = object() false_filter = object() - cond_filter = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) + cond_filter = self._make_one( + base_filter, true_filter=true_filter, false_filter=false_filter + ) self.assertIs(cond_filter.base_filter, base_filter) self.assertIs(cond_filter.true_filter, true_filter) self.assertIs(cond_filter.false_filter, false_filter) @@ -938,29 +904,28 @@ def test___eq__(self): base_filter = object() true_filter = object() false_filter = object() - cond_filter1 = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) - cond_filter2 = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) + cond_filter1 = self._make_one( + base_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = self._make_one( + base_filter, true_filter=true_filter, false_filter=false_filter + ) self.assertEqual(cond_filter1, cond_filter2) def test___eq__type_differ(self): base_filter = object() true_filter = object() false_filter = object() - cond_filter1 = self._make_one(base_filter, - true_filter=true_filter, - false_filter=false_filter) + cond_filter1 = self._make_one( + base_filter, true_filter=true_filter, false_filter=false_filter + ) cond_filter2 = object() self.assertNotEqual(cond_filter1, cond_filter2) def test_to_pb(self): from google.cloud.bigtable.row_filters import CellsRowOffsetFilter from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -971,8 +936,9 @@ def test_to_pb(self): row_filter3 = CellsRowOffsetFilter(11) row_filter3_pb = row_filter3.to_pb() - row_filter4 = self._make_one(row_filter1, true_filter=row_filter2, - false_filter=row_filter3) + row_filter4 = self._make_one( + row_filter1, true_filter=row_filter2, false_filter=row_filter3 + ) filter_pb = row_filter4.to_pb() expected_pb = _RowFilterPB( @@ -980,14 +946,13 @@ def test_to_pb(self): predicate_filter=row_filter1_pb, true_filter=row_filter2_pb, false_filter=row_filter3_pb, - ), + ) ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_true_only(self): from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -1000,16 +965,14 @@ def test_to_pb_true_only(self): expected_pb = _RowFilterPB( condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, - true_filter=row_filter2_pb, - ), + predicate_filter=row_filter1_pb, true_filter=row_filter2_pb + ) ) self.assertEqual(filter_pb, expected_pb) def test_to_pb_false_only(self): from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import ( - StripValueTransformerFilter) + from google.cloud.bigtable.row_filters import StripValueTransformerFilter row_filter1 = StripValueTransformerFilter(True) row_filter1_pb = row_filter1.to_pb() @@ -1022,57 +985,49 @@ def test_to_pb_false_only(self): expected_pb = _RowFilterPB( condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, - false_filter=row_filter2_pb, - ), + predicate_filter=row_filter1_pb, false_filter=row_filter2_pb + ) ) self.assertEqual(filter_pb, expected_pb) def _ColumnRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.ColumnRange(*args, **kw) def _RowFilterPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.RowFilter(*args, **kw) def _RowFilterChainPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.RowFilter.Chain(*args, **kw) def _RowFilterConditionPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.RowFilter.Condition(*args, **kw) def _RowFilterInterleavePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.RowFilter.Interleave(*args, **kw) def _TimestampRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.TimestampRange(*args, **kw) def _ValueRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - data_pb2 as data_v2_pb2) + from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 return data_v2_pb2.ValueRange(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_set.py b/packages/google-cloud-bigtable/tests/unit/test_row_set.py index 990173b376c1..c66341b84ec6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_set.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_set.py @@ -22,6 +22,7 @@ class TestRowSet(unittest.TestCase): @staticmethod def _get_target_class(): from google.cloud.bigtable.row_set import RowSet + return RowSet def _make_one(self): @@ -138,7 +139,7 @@ def test__ne__same_value(self): row_set1.add_row_range(row_range1) row_set2.add_row_range(row_range2) - comparison_val = (row_set1 != row_set2) + comparison_val = row_set1 != row_set2 self.assertFalse(comparison_val) def test_add_row_key(self): @@ -158,15 +159,17 @@ def test_add_row_range(self): def test_add_row_range_from_keys(self): row_set = self._make_one() - row_set.add_row_range_from_keys(start_key=b"row_key1", - end_key=b"row_key9", - start_inclusive=False, - end_inclusive=True) + row_set.add_row_range_from_keys( + start_key=b"row_key1", + end_key=b"row_key9", + start_inclusive=False, + end_inclusive=True, + ) self.assertEqual(row_set.row_ranges[0].end_key, b"row_key9") def test__update_message_request(self): row_set = self._make_one() - table_name = 'table_name' + table_name = "table_name" row_set.add_row_key("row_key1") row_range1 = RowRange(b"row_key21", b"row_key29") row_set.add_row_range(row_range1) @@ -183,10 +186,10 @@ def test__update_message_request(self): class TestRowRange(unittest.TestCase): - @staticmethod def _get_target_class(): from google.cloud.bigtable.row_set import RowRange + return RowRange def _make_one(self, *args, **kwargs): @@ -202,15 +205,15 @@ def test_constructor(self): self.assertFalse(row_range.end_inclusive) def test___hash__set_equality(self): - row_range1 = self._make_one('row_key1', 'row_key9') - row_range2 = self._make_one('row_key1', 'row_key9') + row_range1 = self._make_one("row_key1", "row_key9") + row_range2 = self._make_one("row_key1", "row_key9") set_one = {row_range1, row_range2} set_two = {row_range1, row_range2} self.assertEqual(set_one, set_two) def test___hash__not_equals(self): - row_range1 = self._make_one('row_key1', 'row_key9') - row_range2 = self._make_one('row_key1', 'row_key19') + row_range1 = self._make_one("row_key1", "row_key9") + row_range2 = self._make_one("row_key1", "row_key19") set_one = {row_range1} set_two = {row_range2} self.assertNotEqual(set_one, set_two) @@ -218,44 +221,36 @@ def test___hash__not_equals(self): def test__eq__(self): start_key = b"row_key1" end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, - True, False) - row_range2 = self._make_one(start_key, end_key, - True, False) + row_range1 = self._make_one(start_key, end_key, True, False) + row_range2 = self._make_one(start_key, end_key, True, False) self.assertEqual(row_range1, row_range2) def test___eq__type_differ(self): start_key = b"row_key1" end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, - True, False) + row_range1 = self._make_one(start_key, end_key, True, False) row_range2 = object() self.assertNotEqual(row_range1, row_range2) def test__ne__(self): start_key = b"row_key1" end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, - True, False) - row_range2 = self._make_one(start_key, end_key, - False, True) + row_range1 = self._make_one(start_key, end_key, True, False) + row_range2 = self._make_one(start_key, end_key, False, True) self.assertNotEqual(row_range1, row_range2) def test__ne__same_value(self): start_key = b"row_key1" end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, - True, False) - row_range2 = self._make_one(start_key, end_key, - True, False) - comparison_val = (row_range1 != row_range2) + row_range1 = self._make_one(start_key, end_key, True, False) + row_range2 = self._make_one(start_key, end_key, True, False) + comparison_val = row_range1 != row_range2 self.assertFalse(comparison_val) def test_get_range_kwargs_closed_open(self): start_key = b"row_key1" end_key = b"row_key9" - expected_result = {'start_key_closed': start_key, - 'end_key_open': end_key} + expected_result = {"start_key_closed": start_key, "end_key_open": end_key} row_range = self._make_one(start_key, end_key) actual_result = row_range.get_range_kwargs() self.assertEqual(expected_result, actual_result) @@ -263,15 +258,13 @@ def test_get_range_kwargs_closed_open(self): def test_get_range_kwargs_open_closed(self): start_key = b"row_key1" end_key = b"row_key9" - expected_result = {'start_key_open': start_key, - 'end_key_closed': end_key} + expected_result = {"start_key_open": start_key, "end_key_closed": end_key} row_range = self._make_one(start_key, end_key, False, True) actual_result = row_range.get_range_kwargs() self.assertEqual(expected_result, actual_result) def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 39d03a33743e..5e737c872144 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -21,60 +21,62 @@ class Test___mutate_rows_request(unittest.TestCase): - def _call_fut(self, table_name, rows): from google.cloud.bigtable.table import _mutate_rows_request return _mutate_rows_request(table_name, rows) - @mock.patch('google.cloud.bigtable.table._MAX_BULK_MUTATIONS', new=3) + @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) def test__mutate_rows_too_many_mutations(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import TooManyMutationsError - table = mock.Mock(name='table', spec=['name']) - table.name = 'table' - rows = [DirectRow(row_key=b'row_key', table=table), - DirectRow(row_key=b'row_key_2', table=table)] - rows[0].set_cell('cf1', b'c1', 1) - rows[0].set_cell('cf1', b'c1', 2) - rows[1].set_cell('cf1', b'c1', 3) - rows[1].set_cell('cf1', b'c1', 4) + table = mock.Mock(name="table", spec=["name"]) + table.name = "table" + rows = [ + DirectRow(row_key=b"row_key", table=table), + DirectRow(row_key=b"row_key_2", table=table), + ] + rows[0].set_cell("cf1", b"c1", 1) + rows[0].set_cell("cf1", b"c1", 2) + rows[1].set_cell("cf1", b"c1", 3) + rows[1].set_cell("cf1", b"c1", 4) with self.assertRaises(TooManyMutationsError): - self._call_fut('table', rows) + self._call_fut("table", rows) def test__mutate_rows_request(self): from google.cloud.bigtable.row import DirectRow - table = mock.Mock(name='table', spec=['name']) - table.name = 'table' - rows = [DirectRow(row_key=b'row_key', table=table), - DirectRow(row_key=b'row_key_2')] - rows[0].set_cell('cf1', b'c1', b'1') - rows[1].set_cell('cf1', b'c1', b'2') - result = self._call_fut('table', rows) + table = mock.Mock(name="table", spec=["name"]) + table.name = "table" + rows = [ + DirectRow(row_key=b"row_key", table=table), + DirectRow(row_key=b"row_key_2"), + ] + rows[0].set_cell("cf1", b"c1", b"1") + rows[1].set_cell("cf1", b"c1", b"2") + result = self._call_fut("table", rows) - expected_result = _mutate_rows_request_pb(table_name='table') + expected_result = _mutate_rows_request_pb(table_name="table") entry1 = expected_result.entries.add() - entry1.row_key = b'row_key' + entry1.row_key = b"row_key" mutations1 = entry1.mutations.add() - mutations1.set_cell.family_name = 'cf1' - mutations1.set_cell.column_qualifier = b'c1' + mutations1.set_cell.family_name = "cf1" + mutations1.set_cell.column_qualifier = b"c1" mutations1.set_cell.timestamp_micros = -1 - mutations1.set_cell.value = b'1' + mutations1.set_cell.value = b"1" entry2 = expected_result.entries.add() - entry2.row_key = b'row_key_2' + entry2.row_key = b"row_key_2" mutations2 = entry2.mutations.add() - mutations2.set_cell.family_name = 'cf1' - mutations2.set_cell.column_qualifier = b'c1' + mutations2.set_cell.family_name = "cf1" + mutations2.set_cell.column_qualifier = b"c1" mutations2.set_cell.timestamp_micros = -1 - mutations2.set_cell.value = b'2' + mutations2.set_cell.value = b"2" self.assertEqual(result, expected_result) class Test__check_row_table_name(unittest.TestCase): - def _call_fut(self, table_name, row): from google.cloud.bigtable.table import _check_row_table_name @@ -84,19 +86,19 @@ def test_wrong_table_name(self): from google.cloud.bigtable.table import TableMismatchError from google.cloud.bigtable.row import DirectRow - table = mock.Mock(name='table', spec=['name']) - table.name = 'table' - row = DirectRow(row_key=b'row_key', table=table) + table = mock.Mock(name="table", spec=["name"]) + table.name = "table" + row = DirectRow(row_key=b"row_key", table=table) with self.assertRaises(TableMismatchError): - self._call_fut('other_table', row) + self._call_fut("other_table", row) def test_right_table_name(self): from google.cloud.bigtable.row import DirectRow - table = mock.Mock(name='table', spec=['name']) - table.name = 'table' - row = DirectRow(row_key=b'row_key', table=table) - result = self._call_fut('table', row) + table = mock.Mock(name="table", spec=["name"]) + table.name = "table" + row = DirectRow(row_key=b"row_key", table=table) + result = self._call_fut("table", row) self.assertFalse(result) @@ -109,33 +111,33 @@ def _call_fut(self, row): def test_test_wrong_row_type(self): from google.cloud.bigtable.row import ConditionalRow - row = ConditionalRow(row_key=b'row_key', table='table', filter_=None) + row = ConditionalRow(row_key=b"row_key", table="table", filter_=None) with self.assertRaises(TypeError): self._call_fut(row) def test_right_row_type(self): from google.cloud.bigtable.row import DirectRow - row = DirectRow(row_key=b'row_key', table='table') + row = DirectRow(row_key=b"row_key", table="table") result = self._call_fut(row) self.assertFalse(result) class TestTable(unittest.TestCase): - PROJECT_ID = 'project-id' - INSTANCE_ID = 'instance-id' - INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) - TABLE_ID = 'table-id' - TABLE_NAME = INSTANCE_NAME + '/tables/' + TABLE_ID - ROW_KEY = b'row-key' - ROW_KEY_1 = b'row-key-1' - ROW_KEY_2 = b'row-key-2' - ROW_KEY_3 = b'row-key-3' - FAMILY_NAME = u'family' - QUALIFIER = b'qualifier' + PROJECT_ID = "project-id" + INSTANCE_ID = "instance-id" + INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID + TABLE_ID = "table-id" + TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID + ROW_KEY = b"row-key" + ROW_KEY_1 = b"row-key-1" + ROW_KEY_2 = b"row-key-2" + ROW_KEY_3 = b"row-key-3" + FAMILY_NAME = u"family" + QUALIFIER = b"qualifier" TIMESTAMP_MICROS = 100 - VALUE = b'value' + VALUE = b"value" _json_tests = None @staticmethod @@ -158,8 +160,9 @@ def _make_client(self, *args, **kwargs): def test_constructor_w_admin(self): credentials = _make_credentials() - client = self._make_client(project=self.PROJECT_ID, - credentials=credentials, admin=True) + client = self._make_client( + project=self.PROJECT_ID, credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) self.assertEqual(table.table_id, self.TABLE_ID) @@ -168,8 +171,9 @@ def test_constructor_w_admin(self): def test_constructor_wo_admin(self): credentials = _make_credentials() - client = self._make_client(project=self.PROJECT_ID, - credentials=credentials, admin=False) + client = self._make_client( + project=self.PROJECT_ID, credentials=credentials, admin=False + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) self.assertEqual(table.table_id, self.TABLE_ID) @@ -180,11 +184,12 @@ def test_row_factory_direct(self): from google.cloud.bigtable.row import DirectRow credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - row_key = b'row_key' + row_key = b"row_key" row = table.row(row_key) self.assertIsInstance(row, DirectRow) @@ -195,11 +200,12 @@ def test_row_factory_conditional(self): from google.cloud.bigtable.row import ConditionalRow credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - row_key = b'row_key' + row_key = b"row_key" filter_ = object() row = table.row(row_key, filter_=filter_) @@ -211,11 +217,12 @@ def test_row_factory_append(self): from google.cloud.bigtable.row import AppendRow credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - row_key = b'row_key' + row_key = b"row_key" row = table.row(row_key, append=True) self.assertIsInstance(row, AppendRow) @@ -224,17 +231,19 @@ def test_row_factory_append(self): def test_row_factory_failure(self): credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) with self.assertRaises(ValueError): - table.row(b'row_key', filter_=object(), append=True) + table.row(b"row_key", filter_=object(), append=True) def test___eq__(self): credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) @@ -242,8 +251,9 @@ def test___eq__(self): def test___eq__type_differ(self): credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = object() @@ -251,32 +261,35 @@ def test___eq__type_differ(self): def test___ne__same_value(self): credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table1 = self._make_one(self.TABLE_ID, instance) table2 = self._make_one(self.TABLE_ID, instance) - comparison_val = (table1 != table2) + comparison_val = table1 != table2 self.assertFalse(comparison_val) def test___ne__(self): - table1 = self._make_one('table_id1', None) - table2 = self._make_one('table_id2', None) + table1 = self._make_one("table_id1", None) + table2 = self._make_one("table_id2", None) self.assertNotEqual(table1, table2) def _create_test_helper(self, split_keys=[], column_families={}): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) + bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, + ) from google.cloud.bigtable.column_family import ColumnFamily table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -284,11 +297,12 @@ def _create_test_helper(self, split_keys=[], column_families={}): client._table_admin_client = table_api # Perform the method and check the result. - table.create(column_families=column_families, - initial_split_keys=split_keys) + table.create(column_families=column_families, initial_split_keys=split_keys) - families = {id: ColumnFamily(id, self, rule).to_pb() - for (id, rule) in column_families.items()} + families = { + id: ColumnFamily(id, self, rule).to_pb() + for (id, rule) in column_families.items() + } split = table_admin_messages_v2_pb2.CreateTableRequest.Split splits = [split(key=split_key) for split_key in split_keys] @@ -297,7 +311,8 @@ def _create_test_helper(self, split_keys=[], column_families={}): parent=self.INSTANCE_NAME, table=table_pb2.Table(column_families=families), table_id=self.TABLE_ID, - initial_splits=splits) + initial_splits=splits, + ) def test_create(self): self._create_test_helper() @@ -309,48 +324,47 @@ def test_create_with_families(self): self._create_test_helper(column_families=families) def test_create_with_split_keys(self): - self._create_test_helper(split_keys=[b'split1', b'split2', b'split3']) + self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"]) def test_exists(self): + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_data_v2_pb2) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2) + bigtable_table_admin_pb2 as table_messages_v1_pb2, + ) from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, bigtable_table_admin_client) + bigtable_instance_admin_client, + bigtable_table_admin_client, + ) from google.api_core.exceptions import NotFound from google.api_core.exceptions import BadRequest - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) - instance_api = ( - bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock())) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( + mock.Mock() + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) # Create response_pb response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[ - table_data_v2_pb2.Table(name=self.TABLE_NAME), - ], + tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)] ) # Patch API calls client._table_admin_client = table_api client._instance_admin_client = instance_api - bigtable_table_stub = ( - client._table_admin_client.transport) + bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.get_table.side_effect = [ response_pb, - NotFound('testing'), - BadRequest('testing') + NotFound("testing"), + BadRequest("testing"), ] # Perform the method and check the result. table1 = instance.table(self.TABLE_ID) - table2 = instance.table('table-id2') + table2 = instance.table("table-id2") result = table1.exists() self.assertEqual(True, result) @@ -362,14 +376,15 @@ def test_exists(self): table2.exists() def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) @@ -384,34 +399,28 @@ def test_delete(self): self.assertEqual(result, expected_result) def _list_column_families_helper(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) # Create response_pb - COLUMN_FAMILY_ID = 'foo' + COLUMN_FAMILY_ID = "foo" column_family = _ColumnFamilyPB() - response_pb = _TablePB( - column_families={COLUMN_FAMILY_ID: column_family}, - ) + response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family}) # Patch the stub used by the API method. client._table_admin_client = table_api - bigtable_table_stub = ( - client._table_admin_client.transport) + bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.get_table.side_effect = [response_pb] # Create expected_result. - expected_result = { - COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID), - } + expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)} # Perform the method and check the result. result = table.list_column_families() @@ -421,41 +430,40 @@ def test_list_column_families(self): self._list_column_families_helper() def test_get_cluster_states(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState + INITIALIZING = enum_table.ReplicationState.INITIALIZING PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE READY = enum_table.ReplicationState.READY - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) response_pb = _TablePB( - cluster_states={'cluster-id1': _ClusterStatePB(INITIALIZING), - 'cluster-id2': _ClusterStatePB( - PLANNED_MAINTENANCE), - 'cluster-id3': _ClusterStatePB(READY), - }, + cluster_states={ + "cluster-id1": _ClusterStatePB(INITIALIZING), + "cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE), + "cluster-id3": _ClusterStatePB(READY), + } ) # Patch the stub used by the API method. client._table_admin_client = table_api - bigtable_table_stub = ( - client._table_admin_client.transport) + bigtable_table_stub = client._table_admin_client.transport bigtable_table_stub.get_table.side_effect = [response_pb] # build expected result expected_result = { - u'cluster-id1': ClusterState(INITIALIZING), - u'cluster-id2': ClusterState(PLANNED_MAINTENANCE), - u'cluster-id3': ClusterState(READY) + u"cluster-id1": ClusterState(INITIALIZING), + u"cluster-id2": ClusterState(PLANNED_MAINTENANCE), + u"cluster-id3": ClusterState(READY), } # Perform the method and check the result. @@ -468,19 +476,19 @@ def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud.bigtable import table as MUT from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable.row_filters import RowSampleFilter data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance, - app_profile_id=app_profile_id) + table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) # Create request_pb request_pb = object() # Returned by our mock. @@ -501,7 +509,8 @@ def mock_create_row_request(table_name, **kwargs): client._table_data_client = data_api client._table_admin_client = table_api client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator]) + side_effect=[response_iterator] + ) # Perform the method and check the result. filter_obj = RowSampleFilter(0.33) @@ -510,11 +519,20 @@ def mock_create_row_request(table_name, **kwargs): result = table.read_row(self.ROW_KEY, filter_=filter_obj) row_set = RowSet() row_set.add_row_key(self.ROW_KEY) - expected_request = [(table.name, { - 'end_inclusive': False, 'row_set': row_set, - 'app_profile_id': app_profile_id, 'end_key': None, - 'limit': None, 'start_key': None, 'filter_': filter_obj - })] + expected_request = [ + ( + table.name, + { + "end_inclusive": False, + "row_set": row_set, + "app_profile_id": app_profile_id, + "end_key": None, + "limit": None, + "start_key": None, + "filter_": filter_obj, + }, + ) + ] self.assertEqual(result, expected_result) self.assertEqual(mock_created, expected_request) @@ -529,7 +547,7 @@ def test_read_row_complete(self): from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData - app_profile_id = 'app-profile-id' + app_profile_id = "app-profile-id" chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, family_name=self.FAMILY_NAME, @@ -546,7 +564,7 @@ def test_read_row_complete(self): self._read_row_helper(chunks, expected_result, app_profile_id) def test_read_row_more_than_one_row_returned(self): - app_profile_id = 'app-profile-id' + app_profile_id = "app-profile-id" chunk_1 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, family_name=self.FAMILY_NAME, @@ -561,7 +579,7 @@ def test_read_row_more_than_one_row_returned(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) chunks = [chunk_1, chunk_2] @@ -583,14 +601,15 @@ def test_read_row_still_partial(self): def test_mutate_rows(self): from google.rpc.status_pb2 import Status - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) instance = client.instance(instance_id=self.INSTANCE_ID) client._table_admin_client = table_api table = self._make_one(self.TABLE_ID, instance) @@ -599,8 +618,9 @@ def test_mutate_rows(self): mock_worker = mock.Mock(return_value=response) with mock.patch( - 'google.cloud.bigtable.table._RetryableMutateRowsWorker', - new=mock.MagicMock(return_value=mock_worker)): + "google.cloud.bigtable.table._RetryableMutateRowsWorker", + new=mock.MagicMock(return_value=mock_worker), + ): statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) result = [status.code for status in statuses] expected_result = [0, 1] @@ -612,21 +632,21 @@ def test_read_rows(self): from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) - app_profile_id = 'app-profile-id' - table = self._make_one(self.TABLE_ID, instance, - app_profile_id=app_profile_id) + app_profile_id = "app-profile-id" + table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) # Create request_pb request = retry = object() # Returned by our mock. @@ -638,52 +658,53 @@ def mock_create_row_request(table_name, **kwargs): # Create expected_result. expected_result = PartialRowsData( - client._table_data_client.transport.read_rows, - request, retry) + client._table_data_client.transport.read_rows, request, retry + ) # Perform the method and check the result. - start_key = b'start-key' - end_key = b'end-key' + start_key = b"start-key" + end_key = b"end-key" filter_obj = object() limit = 22 with _Monkey(MUT, _create_row_request=mock_create_row_request): result = table.read_rows( - start_key=start_key, end_key=end_key, filter_=filter_obj, - limit=limit, retry=retry) + start_key=start_key, + end_key=end_key, + filter_=filter_obj, + limit=limit, + retry=retry, + ) self.assertEqual(result.rows, expected_result.rows) self.assertEqual(result.retry, expected_result.retry) created_kwargs = { - 'start_key': start_key, - 'end_key': end_key, - 'filter_': filter_obj, - 'limit': limit, - 'end_inclusive': False, - 'app_profile_id': app_profile_id, - 'row_set': None + "start_key": start_key, + "end_key": end_key, + "filter_": filter_obj, + "limit": limit, + "end_inclusive": False, + "app_profile_id": app_profile_id, + "row_set": None, } self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_read_retry_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.api_core import retry data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) - retry_read_rows = retry.Retry( - predicate=_read_rows_retry_exception, - ) + retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) # Create response_iterator chunk_1 = _ReadRowsResponseCellChunkPB( @@ -692,7 +713,7 @@ def test_read_retry_rows(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) chunk_2 = _ReadRowsResponseCellChunkPB( @@ -701,7 +722,7 @@ def test_read_retry_rows(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) response_1 = _ReadRowsResponseV2([chunk_1]) @@ -713,13 +734,16 @@ def test_read_retry_rows(self): # Patch the stub used by the API method. client._table_data_client.transport.read_rows = mock.Mock( side_effect=[ - response_failure_iterator_1, response_failure_iterator_2, - response_iterator]) + response_failure_iterator_1, + response_failure_iterator_2, + response_iterator, + ] + ) rows = [] - for row in table.read_rows(start_key=self.ROW_KEY_1, - end_key=self.ROW_KEY_2, - retry=retry_read_rows): + for row in table.read_rows( + start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows + ): rows.append(row) result = rows[1] @@ -727,15 +751,14 @@ def test_read_retry_rows(self): def test_yield_retry_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) @@ -748,7 +771,7 @@ def test_yield_retry_rows(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) chunk_2 = _ReadRowsResponseCellChunkPB( @@ -757,7 +780,7 @@ def test_yield_retry_rows(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) response_1 = _ReadRowsResponseV2([chunk_1]) @@ -769,12 +792,14 @@ def test_yield_retry_rows(self): # Patch the stub used by the API method. client._table_data_client.transport.read_rows = mock.Mock( side_effect=[ - response_failure_iterator_1, response_failure_iterator_2, - response_iterator]) + response_failure_iterator_1, + response_failure_iterator_2, + response_iterator, + ] + ) rows = [] - for row in table.yield_rows(start_key=self.ROW_KEY_1, - end_key=self.ROW_KEY_2): + for row in table.yield_rows(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2): rows.append(row) result = rows[1] @@ -782,17 +807,16 @@ def test_yield_retry_rows(self): def test_yield_rows_with_row_set(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) @@ -805,7 +829,7 @@ def test_yield_rows_with_row_set(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) chunk_2 = _ReadRowsResponseCellChunkPB( @@ -814,7 +838,7 @@ def test_yield_rows_with_row_set(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) chunk_3 = _ReadRowsResponseCellChunkPB( @@ -823,23 +847,24 @@ def test_yield_rows_with_row_set(self): qualifier=self.QUALIFIER, timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, - commit_row=True + commit_row=True, ) response_1 = _ReadRowsResponseV2([chunk_1]) response_2 = _ReadRowsResponseV2([chunk_2]) response_3 = _ReadRowsResponseV2([chunk_3]) - response_iterator = _MockReadRowsIterator(response_1, response_2, - response_3) + response_iterator = _MockReadRowsIterator(response_1, response_2, response_3) # Patch the stub used by the API method. client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator]) + side_effect=[response_iterator] + ) rows = [] row_set = RowSet() - row_set.add_row_range(RowRange(start_key=self.ROW_KEY_1, - end_key=self.ROW_KEY_2)) + row_set.add_row_range( + RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2) + ) row_set.add_row_key(self.ROW_KEY_3) for row in table.yield_rows(row_set=row_set): rows.append(row) @@ -850,15 +875,14 @@ def test_yield_rows_with_row_set(self): def test_sample_row_keys(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) @@ -869,8 +893,9 @@ def test_sample_row_keys(self): # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls['sample_row_keys'] = mock.Mock( - side_effect=[[response_iterator]]) + inner_api_calls["sample_row_keys"] = mock.Mock( + side_effect=[[response_iterator]] + ) # Create expected_result. expected_result = response_iterator @@ -881,43 +906,43 @@ def test_sample_row_keys(self): def test_truncate(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) expected_result = None # truncate() has no return value. - with mock.patch('google.cloud.bigtable.table.Table.name', - new=self.TABLE_NAME): + with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME): result = table.truncate() table_api.drop_row_range.assert_called_once_with( - name=self.TABLE_NAME, - delete_all_data_from_table=True, + name=self.TABLE_NAME, delete_all_data_from_table=True ) self.assertEqual(result, expected_result) def test_truncate_w_timeout(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) @@ -932,15 +957,16 @@ def test_truncate_w_timeout(self): def test_drop_by_prefix(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) @@ -948,7 +974,7 @@ def test_drop_by_prefix(self): expected_result = None # drop_by_prefix() has no return value. - row_key_prefix = 'row-key-prefix' + row_key_prefix = "row-key-prefix" result = table.drop_by_prefix(row_key_prefix=row_key_prefix) @@ -956,15 +982,16 @@ def test_drop_by_prefix(self): def test_drop_by_prefix_w_timeout(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) @@ -972,11 +999,10 @@ def test_drop_by_prefix_w_timeout(self): expected_result = None # drop_by_prefix() has no return value. - row_key_prefix = 'row-key-prefix' + row_key_prefix = "row-key-prefix" timeout = 120 - result = table.drop_by_prefix(row_key_prefix=row_key_prefix, - timeout=timeout) + result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout) self.assertEqual(result, expected_result) @@ -985,7 +1011,8 @@ def test_mutations_batcher_factory(self): max_row_bytes = 1000 table = self._make_one(self.TABLE_ID, None) mutation_batcher = table.mutations_batcher( - flush_count=flush_count, max_row_bytes=max_row_bytes) + flush_count=flush_count, max_row_bytes=max_row_bytes + ) self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID) self.assertEqual(mutation_batcher.flush_count, flush_count) @@ -995,10 +1022,10 @@ def test_mutations_batcher_factory(self): class Test__RetryableMutateRowsWorker(unittest.TestCase): from grpc import StatusCode - PROJECT_ID = 'project-id' - INSTANCE_ID = 'instance-id' - INSTANCE_NAME = ('projects/' + PROJECT_ID + '/instances/' + INSTANCE_ID) - TABLE_ID = 'table-id' + PROJECT_ID = "project-id" + INSTANCE_ID = "instance-id" + INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID + TABLE_ID = "table-id" # RPC Status Codes SUCCESS = StatusCode.OK.value[0] @@ -1041,26 +1068,27 @@ def _make_responses_statuses(self, codes): def _make_responses(self, codes): import six - from google.cloud.bigtable_v2.proto.bigtable_pb2 import ( - MutateRowsResponse) + from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse from google.rpc.status_pb2 import Status - entries = [MutateRowsResponse.Entry( - index=i, status=Status(code=codes[i])) - for i in six.moves.xrange(len(codes))] + entries = [ + MutateRowsResponse.Entry(index=i, status=Status(code=codes[i])) + for i in six.moves.xrange(len(codes)) + ] return MutateRowsResponse(entries=entries) def test_callable_empty_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = mock.create_autospec(bigtable_client.BigtableClient) table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) @@ -1074,8 +1102,7 @@ def test_callable_empty_rows(self): def test_callable_no_retry_strategy(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 3 rows. @@ -1088,52 +1115,44 @@ def test_callable_no_retry_strategy(self): # [success, retryable, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') - row_3 = DirectRow(row_key=b'row_key_3', table=table) - row_3.set_cell('cf', b'col', b'value3') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") + row_3 = DirectRow(row_key=b"row_key_3", table=table) + row_3.set_cell("cf", b"col", b"value3") - response = self._make_responses([ - self.SUCCESS, - self.RETRYABLE_1, - self.NON_RETRYABLE]) + response = self._make_responses( + [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + ) - with mock.patch( - 'google.cloud.bigtable.table.wrap_method') as patched: - patched.return_value = mock.Mock( - return_value=[response]) + with mock.patch("google.cloud.bigtable.table.wrap_method") as patched: + patched.return_value = mock.Mock(return_value=[response]) - worker = self._make_worker( - client, - table.name, - [row_1, row_2, row_3]) + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) statuses = worker(retry=None) result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - client._table_data_client._inner_api_calls[ - 'mutate_rows'].assert_called_once() + client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once() self.assertEqual(result, expected_result) def test_callable_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 3 rows. @@ -1147,32 +1166,32 @@ def test_callable_retry(self): # [success, success, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') - row_3 = DirectRow(row_key=b'row_key_3', table=table) - row_3.set_cell('cf', b'col', b'value3') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") + row_3 = DirectRow(row_key=b"row_key_3", table=table) + row_3.set_cell("cf", b"col", b"value3") - response_1 = self._make_responses([ - self.SUCCESS, - self.RETRYABLE_1, - self.NON_RETRYABLE]) + response_1 = self._make_responses( + [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + ) response_2 = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - client._table_data_client._inner_api_calls['mutate_rows'] = mock.Mock( - side_effect=[[response_1], [response_2]]) + client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock( + side_effect=[[response_1], [response_2]] + ) retry = DEFAULT_RETRY.with_delay(initial=0.1) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1182,17 +1201,15 @@ def test_callable_retry(self): expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] self.assertEqual( - client._table_data_client._inner_api_calls[ - 'mutate_rows'].call_count, - 2) + client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2 + ) self.assertEqual(result, expected_result) def test_callable_retry_timeout(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 2 rows. @@ -1206,29 +1223,30 @@ def test_callable_retry_timeout(self): # [retryable, retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") response = self._make_responses([self.RETRYABLE_1, self.RETRYABLE_1]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls['mutate_rows'] = mock.Mock(return_value=[response]) + inner_api_calls["mutate_rows"] = mock.Mock(return_value=[response]) retry = DEFAULT_RETRY.with_delay( - initial=0.1, maximum=0.2, multiplier=2.0).with_deadline(0.5) + initial=0.1, maximum=0.2, multiplier=2.0 + ).with_deadline(0.5) worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker(retry=retry) @@ -1236,19 +1254,20 @@ def test_callable_retry_timeout(self): expected_result = [self.RETRYABLE_1, self.RETRYABLE_1] self.assertTrue( - client._table_data_client._inner_api_calls[ - 'mutate_rows'].call_count > 1) + client._table_data_client._inner_api_calls["mutate_rows"].call_count > 1 + ) self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_empty_rows(self): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) @@ -1261,8 +1280,7 @@ def test_do_mutate_retryable_rows_empty_rows(self): def test_do_mutate_retryable_rows(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 2 rows. @@ -1272,26 +1290,26 @@ def test_do_mutate_retryable_rows(self): # - Expect [success, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) + inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() @@ -1305,8 +1323,7 @@ def test_do_mutate_retryable_rows_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 3 rows. @@ -1318,31 +1335,30 @@ def test_do_mutate_retryable_rows_retry(self): # [success, retryable, non-retryable] data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') - row_3 = DirectRow(row_key=b'row_key_3', table=table) - row_3.set_cell('cf', b'col', b'value3') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") + row_3 = DirectRow(row_key=b"row_key_3", table=table) + row_3.set_cell("cf", b"col", b"value3") - response = self._make_responses([ - self.SUCCESS, - self.RETRYABLE_1, - self.NON_RETRYABLE]) + response = self._make_responses( + [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + ) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) + inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1359,8 +1375,7 @@ def test_do_mutate_retryable_rows_second_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 4 rows. @@ -1377,56 +1392,54 @@ def test_do_mutate_retryable_rows_second_retry(self): # only two rows were retried. data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') - row_3 = DirectRow(row_key=b'row_key_3', table=table) - row_3.set_cell('cf', b'col', b'value3') - row_4 = DirectRow(row_key=b'row_key_4', table=table) - row_4.set_cell('cf', b'col', b'value4') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") + row_3 = DirectRow(row_key=b"row_key_3", table=table) + row_3.set_cell("cf", b"col", b"value3") + row_4 = DirectRow(row_key=b"row_key_4", table=table) + row_4.set_cell("cf", b"col", b"value4") response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) + inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) - worker = self._make_worker(client, table.name, - [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses([ - self.SUCCESS, - self.RETRYABLE_1, - self.NON_RETRYABLE, - self.RETRYABLE_2]) + worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) + worker.responses_statuses = self._make_responses_statuses( + [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] + ) with self.assertRaises(_BigtableRetryableError): worker._do_mutate_retryable_rows() statuses = worker.responses_statuses result = [status.code for status in statuses] - expected_result = [self.SUCCESS, - self.SUCCESS, - self.NON_RETRYABLE, - self.RETRYABLE_1] + expected_result = [ + self.SUCCESS, + self.SUCCESS, + self.NON_RETRYABLE, + self.RETRYABLE_1, + ] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_try(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 4 rows. @@ -1439,53 +1452,51 @@ def test_do_mutate_retryable_rows_second_try(self): # [success, non-retryable, non-retryable, success] data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') - row_3 = DirectRow(row_key=b'row_key_3', table=table) - row_3.set_cell('cf', b'col', b'value3') - row_4 = DirectRow(row_key=b'row_key_4', table=table) - row_4.set_cell('cf', b'col', b'value4') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") + row_3 = DirectRow(row_key=b"row_key_3", table=table) + row_3.set_cell("cf", b"col", b"value3") + row_4 = DirectRow(row_key=b"row_key_4", table=table) + row_4.set_cell("cf", b"col", b"value4") response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) + inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) - worker = self._make_worker(client, table.name, - [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses([ - self.SUCCESS, - self.RETRYABLE_1, - self.NON_RETRYABLE, - self.RETRYABLE_2]) + worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) + worker.responses_statuses = self._make_responses_statuses( + [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] + ) statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] - expected_result = [self.SUCCESS, - self.NON_RETRYABLE, - self.NON_RETRYABLE, - self.SUCCESS] + expected_result = [ + self.SUCCESS, + self.NON_RETRYABLE, + self.NON_RETRYABLE, + self.SUCCESS, + ] self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_second_try_no_retryable(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client # Setup: # - Mutate 2 rows. @@ -1496,22 +1507,25 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): # - After second try: [success, non-retryable] table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient) + bigtable_table_admin_client.BigtableTableAdminClient + ) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") worker = self._make_worker(client, table.name, [row_1, row_2]) worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.NON_RETRYABLE]) + [self.SUCCESS, self.NON_RETRYABLE] + ) statuses = worker._do_mutate_retryable_rows() @@ -1523,30 +1537,29 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): def test_do_mutate_retryable_rows_mismatch_num_responses(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client) + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient( - mock.Mock()) + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) credentials = _make_credentials() - client = self._make_client(project='project-id', - credentials=credentials, admin=True) + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) client._table_data_client = data_api client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b'row_key', table=table) - row_1.set_cell('cf', b'col', b'value1') - row_2 = DirectRow(row_key=b'row_key_2', table=table) - row_2.set_cell('cf', b'col', b'value2') + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") response = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls['mutate_rows'] = mock.Mock(side_effect=[[response]]) + inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) worker = self._make_worker(client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): @@ -1554,23 +1567,35 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): class Test__create_row_request(unittest.TestCase): - - def _call_fut(self, table_name, start_key=None, end_key=None, - filter_=None, limit=None, end_inclusive=False, - app_profile_id=None, row_set=None): + def _call_fut( + self, + table_name, + start_key=None, + end_key=None, + filter_=None, + limit=None, + end_inclusive=False, + app_profile_id=None, + row_set=None, + ): from google.cloud.bigtable.table import _create_row_request return _create_row_request( - table_name, start_key=start_key, end_key=end_key, - filter_=filter_, limit=limit, end_inclusive=end_inclusive, - app_profile_id=app_profile_id, row_set=row_set) + table_name, + start_key=start_key, + end_key=end_key, + filter_=filter_, + limit=limit, + end_inclusive=end_inclusive, + app_profile_id=app_profile_id, + row_set=row_set, + ) def test_table_name_only(self): - table_name = 'table_name' + table_name = "table_name" result = self._call_fut(table_name) - expected_result = _ReadRowsRequestPB( - table_name=table_name) + expected_result = _ReadRowsRequestPB(table_name=table_name) self.assertEqual(result, expected_result) def test_row_range_row_set_conflict(self): @@ -1578,90 +1603,85 @@ def test_row_range_row_set_conflict(self): self._call_fut(None, end_key=object(), row_set=object()) def test_row_range_start_key(self): - table_name = 'table_name' - start_key = b'start_key' + table_name = "table_name" + start_key = b"start_key" result = self._call_fut(table_name, start_key=start_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add(start_key_closed=start_key) self.assertEqual(result, expected_result) def test_row_range_end_key(self): - table_name = 'table_name' - end_key = b'end_key' + table_name = "table_name" + end_key = b"end_key" result = self._call_fut(table_name, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add(end_key_open=end_key) self.assertEqual(result, expected_result) def test_row_range_both_keys(self): - table_name = 'table_name' - start_key = b'start_key' - end_key = b'end_key' - result = self._call_fut(table_name, start_key=start_key, - end_key=end_key) + table_name = "table_name" + start_key = b"start_key" + end_key = b"end_key" + result = self._call_fut(table_name, start_key=start_key, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_open=end_key) + start_key_closed=start_key, end_key_open=end_key + ) self.assertEqual(result, expected_result) def test_row_range_both_keys_inclusive(self): - table_name = 'table_name' - start_key = b'start_key' - end_key = b'end_key' - result = self._call_fut(table_name, start_key=start_key, - end_key=end_key, end_inclusive=True) + table_name = "table_name" + start_key = b"start_key" + end_key = b"end_key" + result = self._call_fut( + table_name, start_key=start_key, end_key=end_key, end_inclusive=True + ) expected_result = _ReadRowsRequestPB(table_name=table_name) expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_closed=end_key) + start_key_closed=start_key, end_key_closed=end_key + ) self.assertEqual(result, expected_result) def test_with_filter(self): from google.cloud.bigtable.row_filters import RowSampleFilter - table_name = 'table_name' + table_name = "table_name" row_filter = RowSampleFilter(0.33) result = self._call_fut(table_name, filter_=row_filter) expected_result = _ReadRowsRequestPB( - table_name=table_name, - filter=row_filter.to_pb(), + table_name=table_name, filter=row_filter.to_pb() ) self.assertEqual(result, expected_result) def test_with_limit(self): - table_name = 'table_name' + table_name = "table_name" limit = 1337 result = self._call_fut(table_name, limit=limit) - expected_result = _ReadRowsRequestPB( - table_name=table_name, - rows_limit=limit, - ) + expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit) self.assertEqual(result, expected_result) def test_with_row_set(self): from google.cloud.bigtable.row_set import RowSet - table_name = 'table_name' + + table_name = "table_name" row_set = RowSet() result = self._call_fut(table_name, row_set=row_set) expected_result = _ReadRowsRequestPB(table_name=table_name) self.assertEqual(result, expected_result) def test_with_app_profile_id(self): - table_name = 'table_name' + table_name = "table_name" limit = 1337 - app_profile_id = 'app-profile-id' - result = self._call_fut(table_name, limit=limit, - app_profile_id=app_profile_id) + app_profile_id = "app-profile-id" + result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id) expected_result = _ReadRowsRequestPB( - table_name=table_name, - rows_limit=limit, - app_profile_id=app_profile_id + table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id ) self.assertEqual(result, expected_result) def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) @@ -1670,6 +1690,7 @@ class Test_ClusterState(unittest.TestCase): def test___eq__(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) state2 = ClusterState(READY) @@ -1678,6 +1699,7 @@ def test___eq__(self): def test___eq__type_differ(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) state2 = object() @@ -1686,15 +1708,17 @@ def test___eq__type_differ(self): def test___ne__same_value(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY state1 = ClusterState(READY) state2 = ClusterState(READY) - comparison_val = (state1 != state2) + comparison_val = state1 != state2 self.assertFalse(comparison_val) def test___ne__(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState + READY = enum_table.ReplicationState.READY INITIALIZING = enum_table.ReplicationState.INITIALIZING state1 = ClusterState(READY) @@ -1704,11 +1728,11 @@ def test___ne__(self): def test__repr__(self): from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState + STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN INITIALIZING = enum_table.ReplicationState.INITIALIZING PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE - UNPLANNED_MAINTENANCE = enum_table.ReplicationState. \ - UNPLANNED_MAINTENANCE + UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE READY = enum_table.ReplicationState.READY replication_dict = { @@ -1716,38 +1740,43 @@ def test__repr__(self): INITIALIZING: "INITIALIZING", PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", - READY: "READY" + READY: "READY", } - self.assertEqual(str(ClusterState(STATE_NOT_KNOWN)), - replication_dict[STATE_NOT_KNOWN]) - self.assertEqual(str(ClusterState(INITIALIZING)), - replication_dict[INITIALIZING]) - self.assertEqual(str(ClusterState(PLANNED_MAINTENANCE)), - replication_dict[PLANNED_MAINTENANCE]) - self.assertEqual(str(ClusterState(UNPLANNED_MAINTENANCE)), - replication_dict[UNPLANNED_MAINTENANCE]) - self.assertEqual(str(ClusterState(READY)), - replication_dict[READY]) - - self.assertEqual(ClusterState(STATE_NOT_KNOWN).replication_state, - STATE_NOT_KNOWN) - self.assertEqual(ClusterState(INITIALIZING).replication_state, - INITIALIZING) - self.assertEqual(ClusterState(PLANNED_MAINTENANCE).replication_state, - PLANNED_MAINTENANCE) - self.assertEqual(ClusterState(UNPLANNED_MAINTENANCE). - replication_state, UNPLANNED_MAINTENANCE) - self.assertEqual(ClusterState(READY).replication_state, - READY) + self.assertEqual( + str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN] + ) + self.assertEqual( + str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING] + ) + self.assertEqual( + str(ClusterState(PLANNED_MAINTENANCE)), + replication_dict[PLANNED_MAINTENANCE], + ) + self.assertEqual( + str(ClusterState(UNPLANNED_MAINTENANCE)), + replication_dict[UNPLANNED_MAINTENANCE], + ) + self.assertEqual(str(ClusterState(READY)), replication_dict[READY]) + + self.assertEqual( + ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN + ) + self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING) + self.assertEqual( + ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE + ) + self.assertEqual( + ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE + ) + self.assertEqual(ClusterState(READY).replication_state, READY) def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 - family_name = kw.pop('family_name') - qualifier = kw.pop('qualifier') + family_name = kw.pop("family_name") + qualifier = kw.pop("qualifier") message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) message.family_name.value = family_name message.qualifier.value = qualifier @@ -1755,15 +1784,13 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): def _ReadRowsResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 return messages_v2_pb2.ReadRowsResponse(*args, **kw) def _mutate_rows_request_pb(*args, **kw): - from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as data_messages_v2_pb2) + from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) @@ -1779,7 +1806,6 @@ def next(self): class _MockFailureIterator_1(object): - def next(self): raise DeadlineExceeded("Failed to read from server") @@ -1787,7 +1813,6 @@ def next(self): class _MockFailureIterator_2(object): - def __init__(self, *values): self.iter_values = values[0] self.calls = 0 @@ -1803,33 +1828,27 @@ def next(self): class _ReadRowsResponseV2(object): - - def __init__(self, chunks, last_scanned_row_key=''): + def __init__(self, chunks, last_scanned_row_key=""): self.chunks = chunks self.last_scanned_row_key = last_scanned_row_key def _TablePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.Table(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 return table_v2_pb2.ColumnFamily(*args, **kw) def _ClusterStatePB(replication_state): - from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as table_v2_pb2) + from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 - return table_v2_pb2.Table.ClusterState( - replication_state=replication_state - ) + return table_v2_pb2.Table.ClusterState(replication_state=replication_state) def _read_rows_retry_exception(exc): From de8ea6c04604ad278c065353d10dfd3af2b93aa2 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Thu, 29 Nov 2018 13:13:54 -0800 Subject: [PATCH 210/892] Run black at end of synth.py (#6698) * Run black at end of synth.py * blacken logging --- packages/google-cloud-bigtable/synth.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index e3b51d026f6a..6fb2f37af307 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -86,3 +86,5 @@ # ---------------------------------------------------------------------------- templated_files = common.py_library(unit_cov_level=97, cov_level=99) s.move(templated_files) + +s.shell.run(["nox", "-s", "blacken"], hide_output=False) From 8df06d37da1acbffcdcf38a1960d8f15b94bf07c Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Thu, 29 Nov 2018 13:23:53 -0800 Subject: [PATCH 211/892] omit local deps (#6701) --- packages/google-cloud-bigtable/.coveragerc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 51fec440cebf..6b9ab9da4a1b 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -14,5 +14,5 @@ exclude_lines = omit = */gapic/*.py */proto/*.py - */google-cloud-python/core/*.py + */core/*.py */site-packages/*.py \ No newline at end of file From 713700862b7bb7d3c80298b78747ae85d289b1a7 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Fri, 30 Nov 2018 15:25:18 -0800 Subject: [PATCH 212/892] blacken all gen'd libs (#6792) * blacken all gen'd libs --- .../proto/bigtable_instance_admin_pb2.py | 3221 ++++++++++------- .../proto/bigtable_instance_admin_pb2_grpc.py | 563 +-- .../proto/bigtable_table_admin_pb2.py | 2914 +++++++++------ .../proto/bigtable_table_admin_pb2_grpc.py | 395 +- .../bigtable_admin_v2/proto/common_pb2.py | 71 +- .../proto/common_pb2_grpc.py | 1 - .../bigtable_admin_v2/proto/instance_pb2.py | 1038 +++--- .../proto/instance_pb2_grpc.py | 1 - .../bigtable_admin_v2/proto/table_pb2.py | 1381 ++++--- .../bigtable_admin_v2/proto/table_pb2_grpc.py | 1 - .../cloud/bigtable_v2/proto/bigtable_pb2.py | 2223 +++++++----- .../bigtable_v2/proto/bigtable_pb2_grpc.py | 191 +- .../cloud/bigtable_v2/proto/data_pb2.py | 3204 ++++++++++------ .../cloud/bigtable_v2/proto/data_pb2_grpc.py | 1 - 14 files changed, 9301 insertions(+), 5904 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 1f223ce39dc3..c110db66b01d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -2,1020 +2,1622 @@ # source: google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2, +) from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 DESCRIPTOR = _descriptor.FileDescriptor( - name='google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01\"\"\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32\".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t\"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t\"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08\"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"O\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t\"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t\"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08\"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08\"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation\",\x82\xd3\xe4\x93\x02&\"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a\".google.bigtable.admin.v2.Instance\")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse\")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12\".google.bigtable.admin.v2.Instance\x1a\".google.bigtable.admin.v2.Instance\",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation\"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty\")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation\"=\x82\xd3\xe4\x93\x02\x37\",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse\"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation\"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty\"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"D\x82\xd3\xe4\x93\x02>\"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation\"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty\"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12\".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12\".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy\"=\x82\xd3\xe4\x93\x02\x37\"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse\"C\x82\xd3\xe4\x93\x02=\"8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR,google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR,google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) - - + name="google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_pb=_b( + '\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01""\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"O\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation",\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"=\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"D\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"C\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' + ), + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, + google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, + google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, + google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], +) _CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( - name='ClustersEntry', - full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=570, - serialized_end=652, + name="ClustersEntry", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=570, + serialized_end=652, ) _CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name='CreateInstanceRequest', - full_name='google.bigtable.admin.v2.CreateInstanceRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateInstanceRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='instance_id', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='instance', full_name='google.bigtable.admin.v2.CreateInstanceRequest.instance', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='clusters', full_name='google.bigtable.admin.v2.CreateInstanceRequest.clusters', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=373, - serialized_end=652, + name="CreateInstanceRequest", + full_name="google.bigtable.admin.v2.CreateInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="instance_id", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="instance", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="clusters", + full_name="google.bigtable.admin.v2.CreateInstanceRequest.clusters", + index=3, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=373, + serialized_end=652, ) _GETINSTANCEREQUEST = _descriptor.Descriptor( - name='GetInstanceRequest', - full_name='google.bigtable.admin.v2.GetInstanceRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetInstanceRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=654, - serialized_end=688, + name="GetInstanceRequest", + full_name="google.bigtable.admin.v2.GetInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetInstanceRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=654, + serialized_end=688, ) _LISTINSTANCESREQUEST = _descriptor.Descriptor( - name='ListInstancesRequest', - full_name='google.bigtable.admin.v2.ListInstancesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListInstancesRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListInstancesRequest.page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=690, - serialized_end=748, + name="ListInstancesRequest", + full_name="google.bigtable.admin.v2.ListInstancesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListInstancesRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListInstancesRequest.page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=690, + serialized_end=748, ) _LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name='ListInstancesResponse', - full_name='google.bigtable.admin.v2.ListInstancesResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='instances', full_name='google.bigtable.admin.v2.ListInstancesResponse.instances', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='failed_locations', full_name='google.bigtable.admin.v2.ListInstancesResponse.failed_locations', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListInstancesResponse.next_page_token', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=751, - serialized_end=880, + name="ListInstancesResponse", + full_name="google.bigtable.admin.v2.ListInstancesResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="instances", + full_name="google.bigtable.admin.v2.ListInstancesResponse.instances", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="failed_locations", + full_name="google.bigtable.admin.v2.ListInstancesResponse.failed_locations", + index=1, + number=2, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListInstancesResponse.next_page_token", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=751, + serialized_end=880, ) _PARTIALUPDATEINSTANCEREQUEST = _descriptor.Descriptor( - name='PartialUpdateInstanceRequest', - full_name='google.bigtable.admin.v2.PartialUpdateInstanceRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='instance', full_name='google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='update_mask', full_name='google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=883, - serialized_end=1016, + name="PartialUpdateInstanceRequest", + full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="instance", + full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="update_mask", + full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=883, + serialized_end=1016, ) _DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name='DeleteInstanceRequest', - full_name='google.bigtable.admin.v2.DeleteInstanceRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteInstanceRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1018, - serialized_end=1055, + name="DeleteInstanceRequest", + full_name="google.bigtable.admin.v2.DeleteInstanceRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteInstanceRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1018, + serialized_end=1055, ) _CREATECLUSTERREQUEST = _descriptor.Descriptor( - name='CreateClusterRequest', - full_name='google.bigtable.admin.v2.CreateClusterRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateClusterRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cluster_id', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cluster', full_name='google.bigtable.admin.v2.CreateClusterRequest.cluster', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1057, - serialized_end=1167, + name="CreateClusterRequest", + full_name="google.bigtable.admin.v2.CreateClusterRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateClusterRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cluster_id", + full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cluster", + full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1057, + serialized_end=1167, ) _GETCLUSTERREQUEST = _descriptor.Descriptor( - name='GetClusterRequest', - full_name='google.bigtable.admin.v2.GetClusterRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetClusterRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1169, - serialized_end=1202, + name="GetClusterRequest", + full_name="google.bigtable.admin.v2.GetClusterRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetClusterRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1169, + serialized_end=1202, ) _LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name='ListClustersRequest', - full_name='google.bigtable.admin.v2.ListClustersRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListClustersRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListClustersRequest.page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1204, - serialized_end=1261, + name="ListClustersRequest", + full_name="google.bigtable.admin.v2.ListClustersRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListClustersRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListClustersRequest.page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1204, + serialized_end=1261, ) _LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name='ListClustersResponse', - full_name='google.bigtable.admin.v2.ListClustersResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='clusters', full_name='google.bigtable.admin.v2.ListClustersResponse.clusters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='failed_locations', full_name='google.bigtable.admin.v2.ListClustersResponse.failed_locations', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListClustersResponse.next_page_token', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1263, - serialized_end=1389, + name="ListClustersResponse", + full_name="google.bigtable.admin.v2.ListClustersResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="clusters", + full_name="google.bigtable.admin.v2.ListClustersResponse.clusters", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="failed_locations", + full_name="google.bigtable.admin.v2.ListClustersResponse.failed_locations", + index=1, + number=2, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListClustersResponse.next_page_token", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1263, + serialized_end=1389, ) _DELETECLUSTERREQUEST = _descriptor.Descriptor( - name='DeleteClusterRequest', - full_name='google.bigtable.admin.v2.DeleteClusterRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteClusterRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1391, - serialized_end=1427, + name="DeleteClusterRequest", + full_name="google.bigtable.admin.v2.DeleteClusterRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteClusterRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1391, + serialized_end=1427, ) _CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name='CreateInstanceMetadata', - full_name='google.bigtable.admin.v2.CreateInstanceMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.CreateInstanceMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1430, - serialized_end=1628, + name="CreateInstanceMetadata", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.CreateInstanceMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1430, + serialized_end=1628, ) _UPDATEINSTANCEMETADATA = _descriptor.Descriptor( - name='UpdateInstanceMetadata', - full_name='google.bigtable.admin.v2.UpdateInstanceMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.UpdateInstanceMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.UpdateInstanceMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1631, - serialized_end=1836, + name="UpdateInstanceMetadata", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1631, + serialized_end=1836, ) _CREATECLUSTERMETADATA = _descriptor.Descriptor( - name='CreateClusterMetadata', - full_name='google.bigtable.admin.v2.CreateClusterMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.CreateClusterMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.CreateClusterMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.CreateClusterMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1839, - serialized_end=2035, + name="CreateClusterMetadata", + full_name="google.bigtable.admin.v2.CreateClusterMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.CreateClusterMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.CreateClusterMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.CreateClusterMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1839, + serialized_end=2035, ) _UPDATECLUSTERMETADATA = _descriptor.Descriptor( - name='UpdateClusterMetadata', - full_name='google.bigtable.admin.v2.UpdateClusterMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.UpdateClusterMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2038, - serialized_end=2221, + name="UpdateClusterMetadata", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.UpdateClusterMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2038, + serialized_end=2221, ) _CREATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name='CreateAppProfileRequest', - full_name='google.bigtable.admin.v2.CreateAppProfileRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile_id', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.app_profile', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='ignore_warnings', full_name='google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2224, - serialized_end=2373, + name="CreateAppProfileRequest", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="ignore_warnings", + full_name="google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings", + index=3, + number=4, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2224, + serialized_end=2373, ) _GETAPPPROFILEREQUEST = _descriptor.Descriptor( - name='GetAppProfileRequest', - full_name='google.bigtable.admin.v2.GetAppProfileRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetAppProfileRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2375, - serialized_end=2411, + name="GetAppProfileRequest", + full_name="google.bigtable.admin.v2.GetAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetAppProfileRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2375, + serialized_end=2411, ) _LISTAPPPROFILESREQUEST = _descriptor.Descriptor( - name='ListAppProfilesRequest', - full_name='google.bigtable.admin.v2.ListAppProfilesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_size', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.page_size', index=1, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListAppProfilesRequest.page_token', index=2, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2413, - serialized_end=2492, + name="ListAppProfilesRequest", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_size", + index=1, + number=3, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_token", + index=2, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2413, + serialized_end=2492, ) _LISTAPPPROFILESRESPONSE = _descriptor.Descriptor( - name='ListAppProfilesResponse', - full_name='google.bigtable.admin.v2.ListAppProfilesResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='app_profiles', full_name='google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='failed_locations', full_name='google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2495, - serialized_end=2631, + name="ListAppProfilesResponse", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="app_profiles", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="failed_locations", + full_name="google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations", + index=2, + number=3, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2495, + serialized_end=2631, ) _UPDATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name='UpdateAppProfileRequest', - full_name='google.bigtable.admin.v2.UpdateAppProfileRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='app_profile', full_name='google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='update_mask', full_name='google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='ignore_warnings', full_name='google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2634, - serialized_end=2792, + name="UpdateAppProfileRequest", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="app_profile", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="update_mask", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="ignore_warnings", + full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings", + index=2, + number=3, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2634, + serialized_end=2792, ) _DELETEAPPPROFILEREQUEST = _descriptor.Descriptor( - name='DeleteAppProfileRequest', - full_name='google.bigtable.admin.v2.DeleteAppProfileRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteAppProfileRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='ignore_warnings', full_name='google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2794, - serialized_end=2858, + name="DeleteAppProfileRequest", + full_name="google.bigtable.admin.v2.DeleteAppProfileRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="ignore_warnings", + full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings", + index=1, + number=2, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2794, + serialized_end=2858, ) _UPDATEAPPPROFILEMETADATA = _descriptor.Descriptor( - name='UpdateAppProfileMetadata', - full_name='google.bigtable.admin.v2.UpdateAppProfileMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2860, - serialized_end=2886, + name="UpdateAppProfileMetadata", + full_name="google.bigtable.admin.v2.UpdateAppProfileMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2860, + serialized_end=2886, ) -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name['value'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ + "value" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE -_CREATEINSTANCEREQUEST.fields_by_name['clusters'].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY -_LISTINSTANCESRESPONSE.fields_by_name['instances'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name['instance'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATECLUSTERREQUEST.fields_by_name['cluster'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER -_LISTCLUSTERSRESPONSE.fields_by_name['clusters'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER -_CREATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name['original_request'].message_type = _PARTIALUPDATEINSTANCEREQUEST -_UPDATEINSTANCEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name['original_request'].message_type = _CREATECLUSTERREQUEST -_CREATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name['original_request'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER -_UPDATECLUSTERMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEAPPPROFILEREQUEST.fields_by_name['app_profile'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -_LISTAPPPROFILESRESPONSE.fields_by_name['app_profiles'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -_UPDATEAPPPROFILEREQUEST.fields_by_name['app_profile'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -_UPDATEAPPPROFILEREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -DESCRIPTOR.message_types_by_name['CreateInstanceRequest'] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name['GetInstanceRequest'] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name['ListInstancesRequest'] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name['ListInstancesResponse'] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name['PartialUpdateInstanceRequest'] = _PARTIALUPDATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name['DeleteInstanceRequest'] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name['CreateClusterRequest'] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name['GetClusterRequest'] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name['ListClustersRequest'] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name['ListClustersResponse'] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name['DeleteClusterRequest'] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name['CreateInstanceMetadata'] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name['UpdateInstanceMetadata'] = _UPDATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name['CreateClusterMetadata'] = _CREATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name['UpdateClusterMetadata'] = _UPDATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name['CreateAppProfileRequest'] = _CREATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name['GetAppProfileRequest'] = _GETAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name['ListAppProfilesRequest'] = _LISTAPPPROFILESREQUEST -DESCRIPTOR.message_types_by_name['ListAppProfilesResponse'] = _LISTAPPPROFILESRESPONSE -DESCRIPTOR.message_types_by_name['UpdateAppProfileRequest'] = _UPDATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name['DeleteAppProfileRequest'] = _DELETEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name['UpdateAppProfileMetadata'] = _UPDATEAPPPROFILEMETADATA +_CREATEINSTANCEREQUEST.fields_by_name[ + "instance" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE +) +_CREATEINSTANCEREQUEST.fields_by_name[ + "clusters" +].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY +_LISTINSTANCESRESPONSE.fields_by_name[ + "instances" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE +) +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ + "instance" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE +) +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_CREATECLUSTERREQUEST.fields_by_name[ + "cluster" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) +_LISTCLUSTERSRESPONSE.fields_by_name[ + "clusters" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) +_CREATEINSTANCEMETADATA.fields_by_name[ + "original_request" +].message_type = _CREATEINSTANCEREQUEST +_CREATEINSTANCEMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEINSTANCEMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEINSTANCEMETADATA.fields_by_name[ + "original_request" +].message_type = _PARTIALUPDATEINSTANCEREQUEST +_UPDATEINSTANCEMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEINSTANCEMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATECLUSTERMETADATA.fields_by_name[ + "original_request" +].message_type = _CREATECLUSTERREQUEST +_CREATECLUSTERMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATECLUSTERMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name[ + "original_request" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER +) +_UPDATECLUSTERMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATECLUSTERMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEAPPPROFILEREQUEST.fields_by_name[ + "app_profile" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +) +_LISTAPPPROFILESRESPONSE.fields_by_name[ + "app_profiles" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +) +_UPDATEAPPPROFILEREQUEST.fields_by_name[ + "app_profile" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE +) +_UPDATEAPPPROFILEREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST +DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE +DESCRIPTOR.message_types_by_name[ + "PartialUpdateInstanceRequest" +] = _PARTIALUPDATEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST +DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST +DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST +DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST +DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE +DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST +DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA +DESCRIPTOR.message_types_by_name["CreateClusterMetadata"] = _CREATECLUSTERMETADATA +DESCRIPTOR.message_types_by_name["UpdateClusterMetadata"] = _UPDATECLUSTERMETADATA +DESCRIPTOR.message_types_by_name["CreateAppProfileRequest"] = _CREATEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["GetAppProfileRequest"] = _GETAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["ListAppProfilesRequest"] = _LISTAPPPROFILESREQUEST +DESCRIPTOR.message_types_by_name["ListAppProfilesResponse"] = _LISTAPPPROFILESRESPONSE +DESCRIPTOR.message_types_by_name["UpdateAppProfileRequest"] = _UPDATEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["DeleteAppProfileRequest"] = _DELETEAPPPROFILEREQUEST +DESCRIPTOR.message_types_by_name["UpdateAppProfileMetadata"] = _UPDATEAPPPROFILEMETADATA _sym_db.RegisterFileDescriptor(DESCRIPTOR) -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType('CreateInstanceRequest', (_message.Message,), dict( - - ClustersEntry = _reflection.GeneratedProtocolMessageType('ClustersEntry', (_message.Message,), dict( - DESCRIPTOR = _CREATEINSTANCEREQUEST_CLUSTERSENTRY, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) - )) - , - DESCRIPTOR = _CREATEINSTANCEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.CreateInstance. +CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( + "CreateInstanceRequest", + (_message.Message,), + dict( + ClustersEntry=_reflection.GeneratedProtocolMessageType( + "ClustersEntry", + (_message.Message,), + dict( + DESCRIPTOR=_CREATEINSTANCEREQUEST_CLUSTERSENTRY, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) + ), + ), + DESCRIPTOR=_CREATEINSTANCEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.CreateInstance. Attributes: @@ -1036,16 +1638,19 @@ Fields marked ``OutputOnly`` must be left blank. Currently, at most two clusters can be specified. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) + ), +) _sym_db.RegisterMessage(CreateInstanceRequest) _sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) -GetInstanceRequest = _reflection.GeneratedProtocolMessageType('GetInstanceRequest', (_message.Message,), dict( - DESCRIPTOR = _GETINSTANCEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.GetInstance. +GetInstanceRequest = _reflection.GeneratedProtocolMessageType( + "GetInstanceRequest", + (_message.Message,), + dict( + DESCRIPTOR=_GETINSTANCEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.GetInstance. Attributes: @@ -1053,15 +1658,18 @@ The unique name of the requested instance. Values are of the form ``projects//instances/``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) + ), +) _sym_db.RegisterMessage(GetInstanceRequest) -ListInstancesRequest = _reflection.GeneratedProtocolMessageType('ListInstancesRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTINSTANCESREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.ListInstances. +ListInstancesRequest = _reflection.GeneratedProtocolMessageType( + "ListInstancesRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTINSTANCESREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.ListInstances. Attributes: @@ -1071,15 +1679,18 @@ page_token: DEPRECATED: This field is unused and ignored. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) + ), +) _sym_db.RegisterMessage(ListInstancesRequest) -ListInstancesResponse = _reflection.GeneratedProtocolMessageType('ListInstancesResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTINSTANCESRESPONSE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Response message for BigtableInstanceAdmin.ListInstances. +ListInstancesResponse = _reflection.GeneratedProtocolMessageType( + "ListInstancesResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTINSTANCESRESPONSE, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Response message for BigtableInstanceAdmin.ListInstances. Attributes: @@ -1096,15 +1707,18 @@ next_page_token: DEPRECATED: This field is unused and ignored. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) + ), +) _sym_db.RegisterMessage(ListInstancesResponse) -PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType('PartialUpdateInstanceRequest', (_message.Message,), dict( - DESCRIPTOR = _PARTIALUPDATEINSTANCEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.PartialUpdateInstance. +PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( + "PartialUpdateInstanceRequest", + (_message.Message,), + dict( + DESCRIPTOR=_PARTIALUPDATEINSTANCEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.PartialUpdateInstance. Attributes: @@ -1114,15 +1728,18 @@ The subset of Instance fields which should be replaced. Must be explicitly set. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) + ), +) _sym_db.RegisterMessage(PartialUpdateInstanceRequest) -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType('DeleteInstanceRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETEINSTANCEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.DeleteInstance. +DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( + "DeleteInstanceRequest", + (_message.Message,), + dict( + DESCRIPTOR=_DELETEINSTANCEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.DeleteInstance. Attributes: @@ -1130,15 +1747,18 @@ The unique name of the instance to be deleted. Values are of the form ``projects//instances/``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) + ), +) _sym_db.RegisterMessage(DeleteInstanceRequest) -CreateClusterRequest = _reflection.GeneratedProtocolMessageType('CreateClusterRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATECLUSTERREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.CreateCluster. +CreateClusterRequest = _reflection.GeneratedProtocolMessageType( + "CreateClusterRequest", + (_message.Message,), + dict( + DESCRIPTOR=_CREATECLUSTERREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.CreateCluster. Attributes: @@ -1154,15 +1774,18 @@ The cluster to be created. Fields marked ``OutputOnly`` must be left blank. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) + ), +) _sym_db.RegisterMessage(CreateClusterRequest) -GetClusterRequest = _reflection.GeneratedProtocolMessageType('GetClusterRequest', (_message.Message,), dict( - DESCRIPTOR = _GETCLUSTERREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.GetCluster. +GetClusterRequest = _reflection.GeneratedProtocolMessageType( + "GetClusterRequest", + (_message.Message,), + dict( + DESCRIPTOR=_GETCLUSTERREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.GetCluster. Attributes: @@ -1171,15 +1794,18 @@ form ``projects//instances//clusters/``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) + ), +) _sym_db.RegisterMessage(GetClusterRequest) -ListClustersRequest = _reflection.GeneratedProtocolMessageType('ListClustersRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTCLUSTERSREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.ListClusters. +ListClustersRequest = _reflection.GeneratedProtocolMessageType( + "ListClustersRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTCLUSTERSREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.ListClusters. Attributes: @@ -1192,15 +1818,18 @@ page_token: DEPRECATED: This field is unused and ignored. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) + ), +) _sym_db.RegisterMessage(ListClustersRequest) -ListClustersResponse = _reflection.GeneratedProtocolMessageType('ListClustersResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTCLUSTERSRESPONSE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Response message for BigtableInstanceAdmin.ListClusters. +ListClustersResponse = _reflection.GeneratedProtocolMessageType( + "ListClustersResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTCLUSTERSRESPONSE, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Response message for BigtableInstanceAdmin.ListClusters. Attributes: @@ -1216,15 +1845,18 @@ next_page_token: DEPRECATED: This field is unused and ignored. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) + ), +) _sym_db.RegisterMessage(ListClustersResponse) -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType('DeleteClusterRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETECLUSTERREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.DeleteCluster. +DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( + "DeleteClusterRequest", + (_message.Message,), + dict( + DESCRIPTOR=_DELETECLUSTERREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.DeleteCluster. Attributes: @@ -1233,15 +1865,18 @@ the form ``projects//instances//clusters/``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) + ), +) _sym_db.RegisterMessage(DeleteClusterRequest) -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType('CreateInstanceMetadata', (_message.Message,), dict( - DESCRIPTOR = _CREATEINSTANCEMETADATA, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """The metadata for the Operation returned by CreateInstance. +CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( + "CreateInstanceMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_CREATEINSTANCEMETADATA, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""The metadata for the Operation returned by CreateInstance. Attributes: @@ -1254,15 +1889,18 @@ The time at which the operation failed or was completed successfully. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) + ), +) _sym_db.RegisterMessage(CreateInstanceMetadata) -UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType('UpdateInstanceMetadata', (_message.Message,), dict( - DESCRIPTOR = _UPDATEINSTANCEMETADATA, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """The metadata for the Operation returned by UpdateInstance. +UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( + "UpdateInstanceMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_UPDATEINSTANCEMETADATA, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""The metadata for the Operation returned by UpdateInstance. Attributes: @@ -1275,15 +1913,18 @@ The time at which the operation failed or was completed successfully. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) + ), +) _sym_db.RegisterMessage(UpdateInstanceMetadata) -CreateClusterMetadata = _reflection.GeneratedProtocolMessageType('CreateClusterMetadata', (_message.Message,), dict( - DESCRIPTOR = _CREATECLUSTERMETADATA, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """The metadata for the Operation returned by CreateCluster. +CreateClusterMetadata = _reflection.GeneratedProtocolMessageType( + "CreateClusterMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_CREATECLUSTERMETADATA, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""The metadata for the Operation returned by CreateCluster. Attributes: @@ -1296,15 +1937,18 @@ The time at which the operation failed or was completed successfully. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) + ), +) _sym_db.RegisterMessage(CreateClusterMetadata) -UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType('UpdateClusterMetadata', (_message.Message,), dict( - DESCRIPTOR = _UPDATECLUSTERMETADATA, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """The metadata for the Operation returned by UpdateCluster. +UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType( + "UpdateClusterMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_UPDATECLUSTERMETADATA, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""The metadata for the Operation returned by UpdateCluster. Attributes: @@ -1317,15 +1961,18 @@ The time at which the operation failed or was completed successfully. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) + ), +) _sym_db.RegisterMessage(UpdateClusterMetadata) -CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType('CreateAppProfileRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATEAPPPROFILEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.CreateAppProfile. +CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "CreateAppProfileRequest", + (_message.Message,), + dict( + DESCRIPTOR=_CREATEAPPPROFILEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.CreateAppProfile. Attributes: @@ -1343,15 +1990,18 @@ ignore_warnings: If true, ignore safety checks when creating the app profile. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) + ), +) _sym_db.RegisterMessage(CreateAppProfileRequest) -GetAppProfileRequest = _reflection.GeneratedProtocolMessageType('GetAppProfileRequest', (_message.Message,), dict( - DESCRIPTOR = _GETAPPPROFILEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.GetAppProfile. +GetAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "GetAppProfileRequest", + (_message.Message,), + dict( + DESCRIPTOR=_GETAPPPROFILEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.GetAppProfile. Attributes: @@ -1360,15 +2010,18 @@ the form ``projects//instances//appProfiles /``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) + ), +) _sym_db.RegisterMessage(GetAppProfileRequest) -ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType('ListAppProfilesRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTAPPPROFILESREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.ListAppProfiles. +ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType( + "ListAppProfilesRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTAPPPROFILESREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.ListAppProfiles. Attributes: @@ -1384,15 +2037,18 @@ page_token: The value of ``next_page_token`` returned by a previous call. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) + ), +) _sym_db.RegisterMessage(ListAppProfilesRequest) -ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType('ListAppProfilesResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTAPPPROFILESRESPONSE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Response message for BigtableInstanceAdmin.ListAppProfiles. +ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType( + "ListAppProfilesResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTAPPPROFILESRESPONSE, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Response message for BigtableInstanceAdmin.ListAppProfiles. Attributes: @@ -1409,15 +2065,18 @@ ``app_profiles``. Values are of the form ``projects//locations/`` """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) + ), +) _sym_db.RegisterMessage(ListAppProfilesResponse) -UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType('UpdateAppProfileRequest', (_message.Message,), dict( - DESCRIPTOR = _UPDATEAPPPROFILEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.UpdateAppProfile. +UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "UpdateAppProfileRequest", + (_message.Message,), + dict( + DESCRIPTOR=_UPDATEAPPPROFILEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.UpdateAppProfile. Attributes: @@ -1430,15 +2089,18 @@ ignore_warnings: If true, ignore safety checks when updating the app profile. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) + ), +) _sym_db.RegisterMessage(UpdateAppProfileRequest) -DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType('DeleteAppProfileRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETEAPPPROFILEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """Request message for BigtableInstanceAdmin.DeleteAppProfile. +DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType( + "DeleteAppProfileRequest", + (_message.Message,), + dict( + DESCRIPTOR=_DELETEAPPPROFILEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""Request message for BigtableInstanceAdmin.DeleteAppProfile. Attributes: @@ -1449,209 +2111,306 @@ ignore_warnings: If true, ignore safety checks when deleting the app profile. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) + ), +) _sym_db.RegisterMessage(DeleteAppProfileRequest) -UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType('UpdateAppProfileMetadata', (_message.Message,), dict( - DESCRIPTOR = _UPDATEAPPPROFILEMETADATA, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2' - , - __doc__ = """The metadata for the Operation returned by UpdateAppProfile. +UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType( + "UpdateAppProfileMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_UPDATEAPPPROFILEMETADATA, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", + __doc__="""The metadata for the Operation returned by UpdateAppProfile. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) + ), +) _sym_db.RegisterMessage(UpdateAppProfileMetadata) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), + _b( + "\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), +) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True -_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions( + descriptor_pb2.MessageOptions(), _b("8\001") +) _BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( - name='BigtableInstanceAdmin', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin', - file=DESCRIPTOR, - index=0, - options=None, - serialized_start=2889, - serialized_end=5875, - methods=[ - _descriptor.MethodDescriptor( - name='CreateInstance', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance', + name="BigtableInstanceAdmin", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", + file=DESCRIPTOR, index=0, - containing_service=None, - input_type=_CREATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002&\"!/v2/{parent=projects/*}/instances:\001*')), - ), - _descriptor.MethodDescriptor( - name='GetInstance', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance', - index=1, - containing_service=None, - input_type=_GETINSTANCEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}')), - ), - _descriptor.MethodDescriptor( - name='ListInstances', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances', - index=2, - containing_service=None, - input_type=_LISTINSTANCESREQUEST, - output_type=_LISTINSTANCESRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances')), - ), - _descriptor.MethodDescriptor( - name='UpdateInstance', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance', - index=3, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*')), - ), - _descriptor.MethodDescriptor( - name='PartialUpdateInstance', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance', - index=4, - containing_service=None, - input_type=_PARTIALUPDATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance')), - ), - _descriptor.MethodDescriptor( - name='DeleteInstance', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance', - index=5, - containing_service=None, - input_type=_DELETEINSTANCEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}')), - ), - _descriptor.MethodDescriptor( - name='CreateCluster', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster', - index=6, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0027\",/v2/{parent=projects/*/instances/*}/clusters:\007cluster')), - ), - _descriptor.MethodDescriptor( - name='GetCluster', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster', - index=7, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}')), - ), - _descriptor.MethodDescriptor( - name='ListClusters', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters', - index=8, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters')), - ), - _descriptor.MethodDescriptor( - name='UpdateCluster', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster', - index=9, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*')), - ), - _descriptor.MethodDescriptor( - name='DeleteCluster', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster', - index=10, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}')), - ), - _descriptor.MethodDescriptor( - name='CreateAppProfile', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile', - index=11, - containing_service=None, - input_type=_CREATEAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002>\"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile')), - ), - _descriptor.MethodDescriptor( - name='GetAppProfile', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile', - index=12, - containing_service=None, - input_type=_GETAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}')), - ), - _descriptor.MethodDescriptor( - name='ListAppProfiles', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles', - index=13, - containing_service=None, - input_type=_LISTAPPPROFILESREQUEST, - output_type=_LISTAPPPROFILESRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles')), - ), - _descriptor.MethodDescriptor( - name='UpdateAppProfile', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile', - index=14, - containing_service=None, - input_type=_UPDATEAPPPROFILEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile')), - ), - _descriptor.MethodDescriptor( - name='DeleteAppProfile', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile', - index=15, - containing_service=None, - input_type=_DELETEAPPPROFILEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}')), - ), - _descriptor.MethodDescriptor( - name='GetIamPolicy', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy', - index=16, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0027\"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*')), - ), - _descriptor.MethodDescriptor( - name='SetIamPolicy', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy', - index=17, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0027\"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*')), - ), - _descriptor.MethodDescriptor( - name='TestIamPermissions', - full_name='google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions', - index=18, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002=\"8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*')), - ), -]) + options=None, + serialized_start=2889, + serialized_end=5875, + methods=[ + _descriptor.MethodDescriptor( + name="CreateInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", + index=0, + containing_service=None, + input_type=_CREATEINSTANCEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b('\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*'), + ), + ), + _descriptor.MethodDescriptor( + name="GetInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", + index=1, + containing_service=None, + input_type=_GETINSTANCEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b("\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}"), + ), + ), + _descriptor.MethodDescriptor( + name="ListInstances", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", + index=2, + containing_service=None, + input_type=_LISTINSTANCESREQUEST, + output_type=_LISTINSTANCESRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b("\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances"), + ), + ), + _descriptor.MethodDescriptor( + name="UpdateInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", + index=3, + containing_service=None, + input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b("\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*"), + ), + ), + _descriptor.MethodDescriptor( + name="PartialUpdateInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", + index=4, + containing_service=None, + input_type=_PARTIALUPDATEINSTANCEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance" + ), + ), + ), + _descriptor.MethodDescriptor( + name="DeleteInstance", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", + index=5, + containing_service=None, + input_type=_DELETEINSTANCEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b("\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}"), + ), + ), + _descriptor.MethodDescriptor( + name="CreateCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", + index=6, + containing_service=None, + input_type=_CREATECLUSTERREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster' + ), + ), + ), + _descriptor.MethodDescriptor( + name="GetCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", + index=7, + containing_service=None, + input_type=_GETCLUSTERREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}" + ), + ), + ), + _descriptor.MethodDescriptor( + name="ListClusters", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", + index=8, + containing_service=None, + input_type=_LISTCLUSTERSREQUEST, + output_type=_LISTCLUSTERSRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters" + ), + ), + ), + _descriptor.MethodDescriptor( + name="UpdateCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", + index=9, + containing_service=None, + input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*" + ), + ), + ), + _descriptor.MethodDescriptor( + name="DeleteCluster", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", + index=10, + containing_service=None, + input_type=_DELETECLUSTERREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}" + ), + ), + ), + _descriptor.MethodDescriptor( + name="CreateAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", + index=11, + containing_service=None, + input_type=_CREATEAPPPROFILEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile' + ), + ), + ), + _descriptor.MethodDescriptor( + name="GetAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", + index=12, + containing_service=None, + input_type=_GETAPPPROFILEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}" + ), + ), + ), + _descriptor.MethodDescriptor( + name="ListAppProfiles", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", + index=13, + containing_service=None, + input_type=_LISTAPPPROFILESREQUEST, + output_type=_LISTAPPPROFILESRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles" + ), + ), + ), + _descriptor.MethodDescriptor( + name="UpdateAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", + index=14, + containing_service=None, + input_type=_UPDATEAPPPROFILEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile" + ), + ), + ), + _descriptor.MethodDescriptor( + name="DeleteAppProfile", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", + index=15, + containing_service=None, + input_type=_DELETEAPPPROFILEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}" + ), + ), + ), + _descriptor.MethodDescriptor( + name="GetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", + index=16, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="SetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", + index=17, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="TestIamPermissions", + full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", + index=18, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, + output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*' + ), + ), + ), + ], +) _sym_db.RegisterServiceDescriptor(_BIGTABLEINSTANCEADMIN) -DESCRIPTOR.services_by_name['BigtableInstanceAdmin'] = _BIGTABLEINSTANCEADMIN +DESCRIPTOR.services_by_name["BigtableInstanceAdmin"] = _BIGTABLEINSTANCEADMIN # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py index f1ea31abdbba..0ca0445e22db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -1,363 +1,370 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, +) +from google.cloud.bigtable_admin_v2.proto import ( + instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2, +) from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 class BigtableInstanceAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and + """Service for creating, configuring, and deleting Cloud Bigtable Instances and Clusters. Provides access to the Instance and Cluster schemas only, not the tables' metadata or data stored in those tables. """ - def __init__(self, channel): - """Constructor. + def __init__(self, channel): + """Constructor. Args: channel: A grpc.Channel. """ - self.CreateInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + self.CreateInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) - self.GetInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + self.GetInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, ) - self.ListInstances = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, + self.ListInstances = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, ) - self.UpdateInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + self.UpdateInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, ) - self.PartialUpdateInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + self.PartialUpdateInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) - self.DeleteInstance = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + self.DeleteInstance = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) - self.CreateCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + self.CreateCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) - self.GetCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + self.GetCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, ) - self.ListClusters = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, + self.ListClusters = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, ) - self.UpdateCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + self.UpdateCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) - self.DeleteCluster = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + self.DeleteCluster = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) - self.CreateAppProfile = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + self.CreateAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, ) - self.GetAppProfile = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + self.GetAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, ) - self.ListAppProfiles = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, + self.ListAppProfiles = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, ) - self.UpdateAppProfile = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + self.UpdateAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) - self.DeleteAppProfile = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + self.DeleteAppProfile = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) - self.GetIamPolicy = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy', - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + self.GetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, ) - self.SetIamPolicy = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy', - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + self.SetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, ) - self.TestIamPermissions = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions', - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + self.TestIamPermissions = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, ) class BigtableInstanceAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and + """Service for creating, configuring, and deleting Cloud Bigtable Instances and Clusters. Provides access to the Instance and Cluster schemas only, not the tables' metadata or data stored in those tables. """ - def CreateInstance(self, request, context): - """Create an instance within a project. + def CreateInstance(self, request, context): + """Create an instance within a project. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def GetInstance(self, request, context): - """Gets information about an instance. + def GetInstance(self, request, context): + """Gets information about an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def ListInstances(self, request, context): - """Lists information about instances in a project. + def ListInstances(self, request, context): + """Lists information about instances in a project. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def UpdateInstance(self, request, context): - """Updates an instance within a project. + def UpdateInstance(self, request, context): + """Updates an instance within a project. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def PartialUpdateInstance(self, request, context): - """Partially updates an instance within a project. + def PartialUpdateInstance(self, request, context): + """Partially updates an instance within a project. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def DeleteInstance(self, request, context): - """Delete an instance from a project. + def DeleteInstance(self, request, context): + """Delete an instance from a project. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def CreateCluster(self, request, context): - """Creates a cluster within an instance. + def CreateCluster(self, request, context): + """Creates a cluster within an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def GetCluster(self, request, context): - """Gets information about a cluster. + def GetCluster(self, request, context): + """Gets information about a cluster. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def ListClusters(self, request, context): - """Lists information about clusters in an instance. + def ListClusters(self, request, context): + """Lists information about clusters in an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def UpdateCluster(self, request, context): - """Updates a cluster within an instance. + def UpdateCluster(self, request, context): + """Updates a cluster within an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def DeleteCluster(self, request, context): - """Deletes a cluster from an instance. + def DeleteCluster(self, request, context): + """Deletes a cluster from an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def CreateAppProfile(self, request, context): - """Creates an app profile within an instance. + def CreateAppProfile(self, request, context): + """Creates an app profile within an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def GetAppProfile(self, request, context): - """Gets information about an app profile. + def GetAppProfile(self, request, context): + """Gets information about an app profile. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def ListAppProfiles(self, request, context): - """Lists information about app profiles in an instance. + def ListAppProfiles(self, request, context): + """Lists information about app profiles in an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def UpdateAppProfile(self, request, context): - """Updates an app profile within an instance. + def UpdateAppProfile(self, request, context): + """Updates an app profile within an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def DeleteAppProfile(self, request, context): - """Deletes an app profile from an instance. + def DeleteAppProfile(self, request, context): + """Deletes an app profile from an instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty + def GetIamPolicy(self, request, context): + """Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def SetIamPolicy(self, request, context): - """Sets the access control policy on an instance resource. Replaces any + def SetIamPolicy(self, request, context): + """Sets the access control policy on an instance resource. Replaces any existing policy. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource. + def TestIamPermissions(self, request, context): + """Returns permissions that the caller has on the specified instance resource. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") def add_BigtableInstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CreateInstance': grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'GetInstance': grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - 'ListInstances': grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, - ), - 'UpdateInstance': grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - 'PartialUpdateInstance': grpc.unary_unary_rpc_method_handler( - servicer.PartialUpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'DeleteInstance': grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'CreateCluster': grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'GetCluster': grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - ), - 'ListClusters': grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, - ), - 'UpdateCluster': grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'DeleteCluster': grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'CreateAppProfile': grpc.unary_unary_rpc_method_handler( - servicer.CreateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - 'GetAppProfile': grpc.unary_unary_rpc_method_handler( - servicer.GetAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - 'ListAppProfiles': grpc.unary_unary_rpc_method_handler( - servicer.ListAppProfiles, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, - ), - 'UpdateAppProfile': grpc.unary_unary_rpc_method_handler( - servicer.UpdateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'DeleteAppProfile': grpc.unary_unary_rpc_method_handler( - servicer.DeleteAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'GetIamPolicy': grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - 'SetIamPolicy': grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - 'TestIamPermissions': grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.bigtable.admin.v2.BigtableInstanceAdmin', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) + rpc_method_handlers = { + "CreateInstance": grpc.unary_unary_rpc_method_handler( + servicer.CreateInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetInstance": grpc.unary_unary_rpc_method_handler( + servicer.GetInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + ), + "ListInstances": grpc.unary_unary_rpc_method_handler( + servicer.ListInstances, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, + ), + "UpdateInstance": grpc.unary_unary_rpc_method_handler( + servicer.UpdateInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + ), + "PartialUpdateInstance": grpc.unary_unary_rpc_method_handler( + servicer.PartialUpdateInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DeleteInstance": grpc.unary_unary_rpc_method_handler( + servicer.DeleteInstance, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "CreateCluster": grpc.unary_unary_rpc_method_handler( + servicer.CreateCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetCluster": grpc.unary_unary_rpc_method_handler( + servicer.GetCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + ), + "ListClusters": grpc.unary_unary_rpc_method_handler( + servicer.ListClusters, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, + ), + "UpdateCluster": grpc.unary_unary_rpc_method_handler( + servicer.UpdateCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DeleteCluster": grpc.unary_unary_rpc_method_handler( + servicer.DeleteCluster, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "CreateAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.CreateAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + ), + "GetAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.GetAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + ), + "ListAppProfiles": grpc.unary_unary_rpc_method_handler( + servicer.ListAppProfiles, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, + ), + "UpdateAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.UpdateAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DeleteAppProfile": grpc.unary_unary_rpc_method_handler( + servicer.DeleteAppProfile, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "GetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.GetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "SetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.SetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "TestIamPermissions": grpc.unary_unary_rpc_method_handler( + servicer.TestIamPermissions, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.bigtable.admin.v2.BigtableInstanceAdmin", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index fd373785bc22..6938656fca6f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -2,960 +2,1533 @@ # source: google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2 -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, +) +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 DESCRIPTOR = _descriptor.FileDescriptor( - name='google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c\"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t\"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target\"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod\"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t\"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t\".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08\"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\"\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32\".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xb7\x11\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"5\x82\xd3\xe4\x93\x02/\"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation\"H\x82\xd3\xe4\x93\x02\x42\"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table\"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table\"J\x82\xd3\xe4\x93\x02\x44\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty\"B\x82\xd3\xe4\x93\x02<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation\">\x82\xd3\xe4\x93\x02\x38\"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a\".google.bigtable.admin.v2.Snapshot\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse\"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty\"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR,google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) - - + name="google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_pb=_b( + '\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View""\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t""\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xb7\x11\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"5\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"H\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"J\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"F\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' + ), + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, + google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, + google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, + google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], +) _CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( - name='Split', - full_name='google.bigtable.admin.v2.CreateTableRequest.Split', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.CreateTableRequest.Split.key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=485, - serialized_end=505, + name="Split", + full_name="google.bigtable.admin.v2.CreateTableRequest.Split", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.CreateTableRequest.Split.key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=485, + serialized_end=505, ) _CREATETABLEREQUEST = _descriptor.Descriptor( - name='CreateTableRequest', - full_name='google.bigtable.admin.v2.CreateTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateTableRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='table_id', full_name='google.bigtable.admin.v2.CreateTableRequest.table_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='table', full_name='google.bigtable.admin.v2.CreateTableRequest.table', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='initial_splits', full_name='google.bigtable.admin.v2.CreateTableRequest.initial_splits', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_CREATETABLEREQUEST_SPLIT, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=305, - serialized_end=505, + name="CreateTableRequest", + full_name="google.bigtable.admin.v2.CreateTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateTableRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="table_id", + full_name="google.bigtable.admin.v2.CreateTableRequest.table_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="table", + full_name="google.bigtable.admin.v2.CreateTableRequest.table", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="initial_splits", + full_name="google.bigtable.admin.v2.CreateTableRequest.initial_splits", + index=3, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_CREATETABLEREQUEST_SPLIT], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=305, + serialized_end=505, ) _CREATETABLEFROMSNAPSHOTREQUEST = _descriptor.Descriptor( - name='CreateTableFromSnapshotRequest', - full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='table_id', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='source_snapshot', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=507, - serialized_end=598, + name="CreateTableFromSnapshotRequest", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="table_id", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="source_snapshot", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=507, + serialized_end=598, ) _DROPROWRANGEREQUEST = _descriptor.Descriptor( - name='DropRowRangeRequest', - full_name='google.bigtable.admin.v2.DropRowRangeRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DropRowRangeRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='row_key_prefix', full_name='google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='delete_all_data_from_table', full_name='google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='target', full_name='google.bigtable.admin.v2.DropRowRangeRequest.target', - index=0, containing_type=None, fields=[]), - ], - serialized_start=600, - serialized_end=709, + name="DropRowRangeRequest", + full_name="google.bigtable.admin.v2.DropRowRangeRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="row_key_prefix", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="delete_all_data_from_table", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table", + index=2, + number=3, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="target", + full_name="google.bigtable.admin.v2.DropRowRangeRequest.target", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=600, + serialized_end=709, ) _LISTTABLESREQUEST = _descriptor.Descriptor( - name='ListTablesRequest', - full_name='google.bigtable.admin.v2.ListTablesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListTablesRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='view', full_name='google.bigtable.admin.v2.ListTablesRequest.view', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_size', full_name='google.bigtable.admin.v2.ListTablesRequest.page_size', index=2, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListTablesRequest.page_token', index=3, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=711, - serialized_end=837, + name="ListTablesRequest", + full_name="google.bigtable.admin.v2.ListTablesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListTablesRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="view", + full_name="google.bigtable.admin.v2.ListTablesRequest.view", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListTablesRequest.page_size", + index=2, + number=4, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListTablesRequest.page_token", + index=3, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=711, + serialized_end=837, ) _LISTTABLESRESPONSE = _descriptor.Descriptor( - name='ListTablesResponse', - full_name='google.bigtable.admin.v2.ListTablesResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tables', full_name='google.bigtable.admin.v2.ListTablesResponse.tables', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListTablesResponse.next_page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=839, - serialized_end=933, + name="ListTablesResponse", + full_name="google.bigtable.admin.v2.ListTablesResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="tables", + full_name="google.bigtable.admin.v2.ListTablesResponse.tables", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListTablesResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=839, + serialized_end=933, ) _GETTABLEREQUEST = _descriptor.Descriptor( - name='GetTableRequest', - full_name='google.bigtable.admin.v2.GetTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='view', full_name='google.bigtable.admin.v2.GetTableRequest.view', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=935, - serialized_end=1018, + name="GetTableRequest", + full_name="google.bigtable.admin.v2.GetTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetTableRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="view", + full_name="google.bigtable.admin.v2.GetTableRequest.view", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=935, + serialized_end=1018, ) _DELETETABLEREQUEST = _descriptor.Descriptor( - name='DeleteTableRequest', - full_name='google.bigtable.admin.v2.DeleteTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1020, - serialized_end=1054, + name="DeleteTableRequest", + full_name="google.bigtable.admin.v2.DeleteTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteTableRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1020, + serialized_end=1054, ) _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( - name='Modification', - full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='id', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='create', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='update', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='drop', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='mod', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod', - index=0, containing_type=None, fields=[]), - ], - serialized_start=1194, - serialized_end=1359, + name="Modification", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="id", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="create", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="update", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="drop", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop", + index=3, + number=4, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="mod", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=1194, + serialized_end=1359, ) _MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( - name='ModifyColumnFamiliesRequest', - full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='modifications', full_name='google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1057, - serialized_end=1359, + name="ModifyColumnFamiliesRequest", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="modifications", + full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1057, + serialized_end=1359, ) _GENERATECONSISTENCYTOKENREQUEST = _descriptor.Descriptor( - name='GenerateConsistencyTokenRequest', - full_name='google.bigtable.admin.v2.GenerateConsistencyTokenRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1361, - serialized_end=1408, + name="GenerateConsistencyTokenRequest", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1361, + serialized_end=1408, ) _GENERATECONSISTENCYTOKENRESPONSE = _descriptor.Descriptor( - name='GenerateConsistencyTokenResponse', - full_name='google.bigtable.admin.v2.GenerateConsistencyTokenResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='consistency_token', full_name='google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1410, - serialized_end=1471, + name="GenerateConsistencyTokenResponse", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="consistency_token", + full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1410, + serialized_end=1471, ) _CHECKCONSISTENCYREQUEST = _descriptor.Descriptor( - name='CheckConsistencyRequest', - full_name='google.bigtable.admin.v2.CheckConsistencyRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.CheckConsistencyRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='consistency_token', full_name='google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1473, - serialized_end=1539, + name="CheckConsistencyRequest", + full_name="google.bigtable.admin.v2.CheckConsistencyRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.CheckConsistencyRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="consistency_token", + full_name="google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1473, + serialized_end=1539, ) _CHECKCONSISTENCYRESPONSE = _descriptor.Descriptor( - name='CheckConsistencyResponse', - full_name='google.bigtable.admin.v2.CheckConsistencyResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='consistent', full_name='google.bigtable.admin.v2.CheckConsistencyResponse.consistent', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1541, - serialized_end=1587, + name="CheckConsistencyResponse", + full_name="google.bigtable.admin.v2.CheckConsistencyResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="consistent", + full_name="google.bigtable.admin.v2.CheckConsistencyResponse.consistent", + index=0, + number=1, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1541, + serialized_end=1587, ) _SNAPSHOTTABLEREQUEST = _descriptor.Descriptor( - name='SnapshotTableRequest', - full_name='google.bigtable.admin.v2.SnapshotTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.SnapshotTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cluster', full_name='google.bigtable.admin.v2.SnapshotTableRequest.cluster', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='snapshot_id', full_name='google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='ttl', full_name='google.bigtable.admin.v2.SnapshotTableRequest.ttl', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='description', full_name='google.bigtable.admin.v2.SnapshotTableRequest.description', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1590, - serialized_end=1725, + name="SnapshotTableRequest", + full_name="google.bigtable.admin.v2.SnapshotTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cluster", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.cluster", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="snapshot_id", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="ttl", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.ttl", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="description", + full_name="google.bigtable.admin.v2.SnapshotTableRequest.description", + index=4, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1590, + serialized_end=1725, ) _GETSNAPSHOTREQUEST = _descriptor.Descriptor( - name='GetSnapshotRequest', - full_name='google.bigtable.admin.v2.GetSnapshotRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.GetSnapshotRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1727, - serialized_end=1761, + name="GetSnapshotRequest", + full_name="google.bigtable.admin.v2.GetSnapshotRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetSnapshotRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1727, + serialized_end=1761, ) _LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( - name='ListSnapshotsRequest', - full_name='google.bigtable.admin.v2.ListSnapshotsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='parent', full_name='google.bigtable.admin.v2.ListSnapshotsRequest.parent', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_size', full_name='google.bigtable.admin.v2.ListSnapshotsRequest.page_size', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page_token', full_name='google.bigtable.admin.v2.ListSnapshotsRequest.page_token', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1763, - serialized_end=1840, + name="ListSnapshotsRequest", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_size", + index=1, + number=2, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_token", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1763, + serialized_end=1840, ) _LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( - name='ListSnapshotsResponse', - full_name='google.bigtable.admin.v2.ListSnapshotsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='snapshots', full_name='google.bigtable.admin.v2.ListSnapshotsResponse.snapshots', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='next_page_token', full_name='google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1842, - serialized_end=1945, + name="ListSnapshotsResponse", + full_name="google.bigtable.admin.v2.ListSnapshotsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="snapshots", + full_name="google.bigtable.admin.v2.ListSnapshotsResponse.snapshots", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1842, + serialized_end=1945, ) _DELETESNAPSHOTREQUEST = _descriptor.Descriptor( - name='DeleteSnapshotRequest', - full_name='google.bigtable.admin.v2.DeleteSnapshotRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.DeleteSnapshotRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1947, - serialized_end=1984, + name="DeleteSnapshotRequest", + full_name="google.bigtable.admin.v2.DeleteSnapshotRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteSnapshotRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1947, + serialized_end=1984, ) _SNAPSHOTTABLEMETADATA = _descriptor.Descriptor( - name='SnapshotTableMetadata', - full_name='google.bigtable.admin.v2.SnapshotTableMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.SnapshotTableMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.SnapshotTableMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.SnapshotTableMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1987, - serialized_end=2183, + name="SnapshotTableMetadata", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.SnapshotTableMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1987, + serialized_end=2183, ) _CREATETABLEFROMSNAPSHOTMETADATA = _descriptor.Descriptor( - name='CreateTableFromSnapshotMetadata', - full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='original_request', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='request_time', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='finish_time', full_name='google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2186, - serialized_end=2402, + name="CreateTableFromSnapshotMetadata", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="original_request", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="request_time", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="finish_time", + full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2186, + serialized_end=2402, ) _CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST -_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE -_CREATETABLEREQUEST.fields_by_name['initial_splits'].message_type = _CREATETABLEREQUEST_SPLIT -_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( - _DROPROWRANGEREQUEST.fields_by_name['row_key_prefix']) -_DROPROWRANGEREQUEST.fields_by_name['row_key_prefix'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] -_DROPROWRANGEREQUEST.oneofs_by_name['target'].fields.append( - _DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table']) -_DROPROWRANGEREQUEST.fields_by_name['delete_all_data_from_table'].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name['target'] -_LISTTABLESREQUEST.fields_by_name['view'].enum_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE -_GETTABLEREQUEST.fields_by_name['view'].enum_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +_CREATETABLEREQUEST.fields_by_name[ + "table" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE +) +_CREATETABLEREQUEST.fields_by_name[ + "initial_splits" +].message_type = _CREATETABLEREQUEST_SPLIT +_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( + _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] +) +_DROPROWRANGEREQUEST.fields_by_name[ + "row_key_prefix" +].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] +_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( + _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] +) +_DROPROWRANGEREQUEST.fields_by_name[ + "delete_all_data_from_table" +].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] +_LISTTABLESREQUEST.fields_by_name[ + "view" +].enum_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +) +_LISTTABLESRESPONSE.fields_by_name[ + "tables" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE +) +_GETTABLEREQUEST.fields_by_name[ + "view" +].enum_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "create" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "update" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +) _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create']) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['create'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update']) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['update'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop']) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name['drop'].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name['mod'] -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name['modifications'].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION -_SNAPSHOTTABLEREQUEST.fields_by_name['ttl'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LISTSNAPSHOTSRESPONSE.fields_by_name['snapshots'].message_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT -_SNAPSHOTTABLEMETADATA.fields_by_name['original_request'].message_type = _SNAPSHOTTABLEREQUEST -_SNAPSHOTTABLEMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOTTABLEMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name['original_request'].message_type = _CREATETABLEFROMSNAPSHOTREQUEST -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name['request_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name['finish_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name['CreateTableFromSnapshotRequest'] = _CREATETABLEFROMSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name['DropRowRangeRequest'] = _DROPROWRANGEREQUEST -DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name['ModifyColumnFamiliesRequest'] = _MODIFYCOLUMNFAMILIESREQUEST -DESCRIPTOR.message_types_by_name['GenerateConsistencyTokenRequest'] = _GENERATECONSISTENCYTOKENREQUEST -DESCRIPTOR.message_types_by_name['GenerateConsistencyTokenResponse'] = _GENERATECONSISTENCYTOKENRESPONSE -DESCRIPTOR.message_types_by_name['CheckConsistencyRequest'] = _CHECKCONSISTENCYREQUEST -DESCRIPTOR.message_types_by_name['CheckConsistencyResponse'] = _CHECKCONSISTENCYRESPONSE -DESCRIPTOR.message_types_by_name['SnapshotTableRequest'] = _SNAPSHOTTABLEREQUEST -DESCRIPTOR.message_types_by_name['GetSnapshotRequest'] = _GETSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name['ListSnapshotsRequest'] = _LISTSNAPSHOTSREQUEST -DESCRIPTOR.message_types_by_name['ListSnapshotsResponse'] = _LISTSNAPSHOTSRESPONSE -DESCRIPTOR.message_types_by_name['DeleteSnapshotRequest'] = _DELETESNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name['SnapshotTableMetadata'] = _SNAPSHOTTABLEMETADATA -DESCRIPTOR.message_types_by_name['CreateTableFromSnapshotMetadata'] = _CREATETABLEFROMSNAPSHOTMETADATA +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "create" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "update" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "drop" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ + "modifications" +].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION +_SNAPSHOTTABLEREQUEST.fields_by_name[ + "ttl" +].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_LISTSNAPSHOTSRESPONSE.fields_by_name[ + "snapshots" +].message_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT +) +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "original_request" +].message_type = _SNAPSHOTTABLEREQUEST +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "original_request" +].message_type = _CREATETABLEFROMSNAPSHOTREQUEST +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST +DESCRIPTOR.message_types_by_name[ + "CreateTableFromSnapshotRequest" +] = _CREATETABLEFROMSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST +DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST +DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE +DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST +DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST +DESCRIPTOR.message_types_by_name[ + "ModifyColumnFamiliesRequest" +] = _MODIFYCOLUMNFAMILIESREQUEST +DESCRIPTOR.message_types_by_name[ + "GenerateConsistencyTokenRequest" +] = _GENERATECONSISTENCYTOKENREQUEST +DESCRIPTOR.message_types_by_name[ + "GenerateConsistencyTokenResponse" +] = _GENERATECONSISTENCYTOKENRESPONSE +DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST +DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE +DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST +DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST +DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE +DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA +DESCRIPTOR.message_types_by_name[ + "CreateTableFromSnapshotMetadata" +] = _CREATETABLEFROMSNAPSHOTMETADATA _sym_db.RegisterFileDescriptor(DESCRIPTOR) -CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( - - Split = _reflection.GeneratedProtocolMessageType('Split', (_message.Message,), dict( - DESCRIPTOR = _CREATETABLEREQUEST_SPLIT, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """An initial split point for a newly created table. +CreateTableRequest = _reflection.GeneratedProtocolMessageType( + "CreateTableRequest", + (_message.Message,), + dict( + Split=_reflection.GeneratedProtocolMessageType( + "Split", + (_message.Message,), + dict( + DESCRIPTOR=_CREATETABLEREQUEST_SPLIT, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""An initial split point for a newly created table. Attributes: key: Row key to use as an initial tablet boundary. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) - )) - , - DESCRIPTOR = _CREATETABLEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) + ), + ), + DESCRIPTOR=_CREATETABLEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] @@ -985,16 +1558,19 @@ Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - Tablet 5 ``[other, ) => {"other", "zz"}.`` """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) + ), +) _sym_db.RegisterMessage(CreateTableRequest) _sym_db.RegisterMessage(CreateTableRequest.Split) -CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType('CreateTableFromSnapshotRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATETABLEFROMSNAPSHOTREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( + "CreateTableFromSnapshotRequest", + (_message.Message,), + dict( + DESCRIPTOR=_CREATETABLEFROMSNAPSHOTREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -1019,15 +1595,18 @@ instance. Values are of the form ``projects//instance s//clusters//snapshots/``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) + ), +) _sym_db.RegisterMessage(CreateTableFromSnapshotRequest) -DropRowRangeRequest = _reflection.GeneratedProtocolMessageType('DropRowRangeRequest', (_message.Message,), dict( - DESCRIPTOR = _DROPROWRANGEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( + "DropRowRangeRequest", + (_message.Message,), + dict( + DESCRIPTOR=_DROPROWRANGEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] @@ -1045,15 +1624,18 @@ Delete all rows in the table. Setting this to false is a no- op. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) + ), +) _sym_db.RegisterMessage(DropRowRangeRequest) -ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +ListTablesRequest = _reflection.GeneratedProtocolMessageType( + "ListTablesRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTTABLESREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] @@ -1072,15 +1654,18 @@ page_token: The value of ``next_page_token`` returned by a previous call. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) + ), +) _sym_db.RegisterMessage(ListTablesRequest) -ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESRESPONSE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Response message for +ListTablesResponse = _reflection.GeneratedProtocolMessageType( + "ListTablesResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTTABLESRESPONSE, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Response message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] @@ -1092,15 +1677,18 @@ Pass this value to ``page_token`` in another request to get the next page of results. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) + ), +) _sym_db.RegisterMessage(ListTablesResponse) -GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( - DESCRIPTOR = _GETTABLEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +GetTableRequest = _reflection.GeneratedProtocolMessageType( + "GetTableRequest", + (_message.Message,), + dict( + DESCRIPTOR=_GETTABLEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] @@ -1112,15 +1700,18 @@ The view to be applied to the returned table's fields. Defaults to ``SCHEMA_VIEW`` if unspecified. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) + ), +) _sym_db.RegisterMessage(GetTableRequest) -DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETETABLEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +DeleteTableRequest = _reflection.GeneratedProtocolMessageType( + "DeleteTableRequest", + (_message.Message,), + dict( + DESCRIPTOR=_DELETETABLEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] @@ -1130,17 +1721,22 @@ form ``projects//instances//tables/
``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) + ), +) _sym_db.RegisterMessage(DeleteTableRequest) -ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType('ModifyColumnFamiliesRequest', (_message.Message,), dict( - - Modification = _reflection.GeneratedProtocolMessageType('Modification', (_message.Message,), dict( - DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """A create, update, or delete of a particular column family. +ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType( + "ModifyColumnFamiliesRequest", + (_message.Message,), + dict( + Modification=_reflection.GeneratedProtocolMessageType( + "Modification", + (_message.Message,), + dict( + DESCRIPTOR=_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""A create, update, or delete of a particular column family. Attributes: @@ -1158,13 +1754,12 @@ Drop (delete) the column family with the given ID, or fail if no such family exists. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) - )) - , - DESCRIPTOR = _MODIFYCOLUMNFAMILIESREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) + ), + ), + DESCRIPTOR=_MODIFYCOLUMNFAMILIESREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] @@ -1179,16 +1774,19 @@ earlier modifications can be masked by later ones (in the case of repeated updates to the same family, for example). """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) + ), +) _sym_db.RegisterMessage(ModifyColumnFamiliesRequest) _sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) -GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType('GenerateConsistencyTokenRequest', (_message.Message,), dict( - DESCRIPTOR = _GENERATECONSISTENCYTOKENREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType( + "GenerateConsistencyTokenRequest", + (_message.Message,), + dict( + DESCRIPTOR=_GENERATECONSISTENCYTOKENREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] @@ -1198,15 +1796,18 @@ token. Values are of the form ``projects//instances//tables/
``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) + ), +) _sym_db.RegisterMessage(GenerateConsistencyTokenRequest) -GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType('GenerateConsistencyTokenResponse', (_message.Message,), dict( - DESCRIPTOR = _GENERATECONSISTENCYTOKENRESPONSE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Response message for +GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType( + "GenerateConsistencyTokenResponse", + (_message.Message,), + dict( + DESCRIPTOR=_GENERATECONSISTENCYTOKENRESPONSE, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Response message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] @@ -1214,15 +1815,18 @@ consistency_token: The generated consistency token. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) + ), +) _sym_db.RegisterMessage(GenerateConsistencyTokenResponse) -CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType('CheckConsistencyRequest', (_message.Message,), dict( - DESCRIPTOR = _CHECKCONSISTENCYREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType( + "CheckConsistencyRequest", + (_message.Message,), + dict( + DESCRIPTOR=_CHECKCONSISTENCYREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] @@ -1235,15 +1839,18 @@ The token created using GenerateConsistencyToken for the Table. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) + ), +) _sym_db.RegisterMessage(CheckConsistencyRequest) -CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType('CheckConsistencyResponse', (_message.Message,), dict( - DESCRIPTOR = _CHECKCONSISTENCYRESPONSE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Response message for +CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType( + "CheckConsistencyResponse", + (_message.Message,), + dict( + DESCRIPTOR=_CHECKCONSISTENCYRESPONSE, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Response message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] @@ -1253,15 +1860,18 @@ replication has caught up with the restrictions specified in the request. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) + ), +) _sym_db.RegisterMessage(CheckConsistencyResponse) -SnapshotTableRequest = _reflection.GeneratedProtocolMessageType('SnapshotTableRequest', (_message.Message,), dict( - DESCRIPTOR = _SNAPSHOTTABLEREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +SnapshotTableRequest = _reflection.GeneratedProtocolMessageType( + "SnapshotTableRequest", + (_message.Message,), + dict( + DESCRIPTOR=_SNAPSHOTTABLEREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -1294,15 +1904,18 @@ description: Description of the snapshot. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) + ), +) _sym_db.RegisterMessage(SnapshotTableRequest) -GetSnapshotRequest = _reflection.GeneratedProtocolMessageType('GetSnapshotRequest', (_message.Message,), dict( - DESCRIPTOR = _GETSNAPSHOTREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +GetSnapshotRequest = _reflection.GeneratedProtocolMessageType( + "GetSnapshotRequest", + (_message.Message,), + dict( + DESCRIPTOR=_GETSNAPSHOTREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -1318,15 +1931,18 @@ form ``projects//instances//clusters//snapshots/``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) + ), +) _sym_db.RegisterMessage(GetSnapshotRequest) -ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType('ListSnapshotsRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTSNAPSHOTSREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType( + "ListSnapshotsRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTSNAPSHOTSREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -1349,15 +1965,18 @@ page_token: The value of ``next_page_token`` returned by a previous call. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) + ), +) _sym_db.RegisterMessage(ListSnapshotsRequest) -ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType('ListSnapshotsResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTSNAPSHOTSRESPONSE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Response message for +ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType( + "ListSnapshotsResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTSNAPSHOTSRESPONSE, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Response message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -1375,15 +1994,18 @@ response. Pass this value to ``page_token`` in another request to get the next page of results. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) + ), +) _sym_db.RegisterMessage(ListSnapshotsResponse) -DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType('DeleteSnapshotRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETESNAPSHOTREQUEST, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """Request message for +DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType( + "DeleteSnapshotRequest", + (_message.Message,), + dict( + DESCRIPTOR=_DELETESNAPSHOTREQUEST, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -1399,15 +2021,18 @@ the form ``projects//instances//clusters//snapshots/``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) + ), +) _sym_db.RegisterMessage(DeleteSnapshotRequest) -SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType('SnapshotTableMetadata', (_message.Message,), dict( - DESCRIPTOR = _SNAPSHOTTABLEMETADATA, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """The metadata for the Operation returned by SnapshotTable. +SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType( + "SnapshotTableMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_SNAPSHOTTABLEMETADATA, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""The metadata for the Operation returned by SnapshotTable. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. @@ -1426,15 +2051,18 @@ The time at which the operation failed or was completed successfully. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) + ), +) _sym_db.RegisterMessage(SnapshotTableMetadata) -CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType('CreateTableFromSnapshotMetadata', (_message.Message,), dict( - DESCRIPTOR = _CREATETABLEFROMSNAPSHOTMETADATA, - __module__ = 'google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2' - , - __doc__ = """The metadata for the Operation returned by CreateTableFromSnapshot. +CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType( + "CreateTableFromSnapshotMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_CREATETABLEFROMSNAPSHOTMETADATA, + __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", + __doc__="""The metadata for the Operation returned by CreateTableFromSnapshot. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. @@ -1453,143 +2081,213 @@ The time at which the operation failed or was completed successfully. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) + ), +) _sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), + _b( + "\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), +) _BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( - name='BigtableTableAdmin', - full_name='google.bigtable.admin.v2.BigtableTableAdmin', - file=DESCRIPTOR, - index=0, - options=None, - serialized_start=2405, - serialized_end=4636, - methods=[ - _descriptor.MethodDescriptor( - name='CreateTable', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.CreateTable', + name="BigtableTableAdmin", + full_name="google.bigtable.admin.v2.BigtableTableAdmin", + file=DESCRIPTOR, index=0, - containing_service=None, - input_type=_CREATETABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002/\"*/v2/{parent=projects/*/instances/*}/tables:\001*')), - ), - _descriptor.MethodDescriptor( - name='CreateTableFromSnapshot', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot', - index=1, - containing_service=None, - input_type=_CREATETABLEFROMSNAPSHOTREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002B\"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*')), - ), - _descriptor.MethodDescriptor( - name='ListTables', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.ListTables', - index=2, - containing_service=None, - input_type=_LISTTABLESREQUEST, - output_type=_LISTTABLESRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables')), - ), - _descriptor.MethodDescriptor( - name='GetTable', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.GetTable', - index=3, - containing_service=None, - input_type=_GETTABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}')), - ), - _descriptor.MethodDescriptor( - name='DeleteTable', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable', - index=4, - containing_service=None, - input_type=_DELETETABLEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}')), - ), - _descriptor.MethodDescriptor( - name='ModifyColumnFamilies', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies', - index=5, - containing_service=None, - input_type=_MODIFYCOLUMNFAMILIESREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002D\"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*')), - ), - _descriptor.MethodDescriptor( - name='DropRowRange', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange', - index=6, - containing_service=None, - input_type=_DROPROWRANGEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002<\"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*')), - ), - _descriptor.MethodDescriptor( - name='GenerateConsistencyToken', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken', - index=7, - containing_service=None, - input_type=_GENERATECONSISTENCYTOKENREQUEST, - output_type=_GENERATECONSISTENCYTOKENRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002H\"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*')), - ), - _descriptor.MethodDescriptor( - name='CheckConsistency', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency', - index=8, - containing_service=None, - input_type=_CHECKCONSISTENCYREQUEST, - output_type=_CHECKCONSISTENCYRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002@\";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*')), - ), - _descriptor.MethodDescriptor( - name='SnapshotTable', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable', - index=9, - containing_service=None, - input_type=_SNAPSHOTTABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\0028\"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*')), - ), - _descriptor.MethodDescriptor( - name='GetSnapshot', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot', - index=10, - containing_service=None, - input_type=_GETSNAPSHOTREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}')), - ), - _descriptor.MethodDescriptor( - name='ListSnapshots', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots', - index=11, - containing_service=None, - input_type=_LISTSNAPSHOTSREQUEST, - output_type=_LISTSNAPSHOTSRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots')), - ), - _descriptor.MethodDescriptor( - name='DeleteSnapshot', - full_name='google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot', - index=12, - containing_service=None, - input_type=_DELETESNAPSHOTREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}')), - ), -]) + options=None, + serialized_start=2405, + serialized_end=4636, + methods=[ + _descriptor.MethodDescriptor( + name="CreateTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", + index=0, + containing_service=None, + input_type=_CREATETABLEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="CreateTableFromSnapshot", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", + index=1, + containing_service=None, + input_type=_CREATETABLEFROMSNAPSHOTREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="ListTables", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListTables", + index=2, + containing_service=None, + input_type=_LISTTABLESREQUEST, + output_type=_LISTTABLESRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables" + ), + ), + ), + _descriptor.MethodDescriptor( + name="GetTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetTable", + index=3, + containing_service=None, + input_type=_GETTABLEREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}" + ), + ), + ), + _descriptor.MethodDescriptor( + name="DeleteTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", + index=4, + containing_service=None, + input_type=_DELETETABLEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b("\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}"), + ), + ), + _descriptor.MethodDescriptor( + name="ModifyColumnFamilies", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", + index=5, + containing_service=None, + input_type=_MODIFYCOLUMNFAMILIESREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="DropRowRange", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", + index=6, + containing_service=None, + input_type=_DROPROWRANGEREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="GenerateConsistencyToken", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + index=7, + containing_service=None, + input_type=_GENERATECONSISTENCYTOKENREQUEST, + output_type=_GENERATECONSISTENCYTOKENRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="CheckConsistency", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + index=8, + containing_service=None, + input_type=_CHECKCONSISTENCYREQUEST, + output_type=_CHECKCONSISTENCYRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="SnapshotTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", + index=9, + containing_service=None, + input_type=_SNAPSHOTTABLEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="GetSnapshot", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", + index=10, + containing_service=None, + input_type=_GETSNAPSHOTREQUEST, + output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + ), + ), + ), + _descriptor.MethodDescriptor( + name="ListSnapshots", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", + index=11, + containing_service=None, + input_type=_LISTSNAPSHOTSREQUEST, + output_type=_LISTSNAPSHOTSRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" + ), + ), + ), + _descriptor.MethodDescriptor( + name="DeleteSnapshot", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", + index=12, + containing_service=None, + input_type=_DELETESNAPSHOTREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + ), + ), + ), + ], +) _sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) -DESCRIPTOR.services_by_name['BigtableTableAdmin'] = _BIGTABLETABLEADMIN +DESCRIPTOR.services_by_name["BigtableTableAdmin"] = _BIGTABLETABLEADMIN # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 278c914f023b..2c702413b4e4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -1,112 +1,118 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2 -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2, +) +from google.cloud.bigtable_admin_v2.proto import ( + table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, +) +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 class BigtableTableAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. + """Service for creating, configuring, and deleting Cloud Bigtable tables. Provides access to the table schemas only, not the data stored within the tables. """ - def __init__(self, channel): - """Constructor. + def __init__(self, channel): + """Constructor. Args: channel: A grpc.Channel. """ - self.CreateTable = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + self.CreateTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, ) - self.CreateTableFromSnapshot = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + self.CreateTableFromSnapshot = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) - self.ListTables = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, + self.ListTables = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, ) - self.GetTable = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + self.GetTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, ) - self.DeleteTable = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + self.DeleteTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) - self.ModifyColumnFamilies = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + self.ModifyColumnFamilies = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, ) - self.DropRowRange = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + self.DropRowRange = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) - self.GenerateConsistencyToken = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, + self.GenerateConsistencyToken = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, ) - self.CheckConsistency = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, + self.CheckConsistency = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, ) - self.SnapshotTable = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + self.SnapshotTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) - self.GetSnapshot = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, + self.GetSnapshot = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, ) - self.ListSnapshots = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, + self.ListSnapshots = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, ) - self.DeleteSnapshot = channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot', - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + self.DeleteSnapshot = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class BigtableTableAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. + """Service for creating, configuring, and deleting Cloud Bigtable tables. Provides access to the table schemas only, not the data stored within the tables. """ - def CreateTable(self, request, context): - """Creates a new table in the specified instance. + def CreateTable(self, request, context): + """Creates a new table in the specified instance. The table can be created with a full set of initial column families, specified in the request. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def CreateTableFromSnapshot(self, request, context): - """Creates a new table from the specified snapshot. The target table must + def CreateTableFromSnapshot(self, request, context): + """Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -115,71 +121,71 @@ def CreateTableFromSnapshot(self, request, context): recommended for production use. It is not subject to any SLA or deprecation policy. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def ListTables(self, request, context): - """Lists all tables served from a specified instance. + def ListTables(self, request, context): + """Lists all tables served from a specified instance. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def GetTable(self, request, context): - """Gets metadata information about the specified table. + def GetTable(self, request, context): + """Gets metadata information about the specified table. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data. + def DeleteTable(self, request, context): + """Permanently deletes a specified table and all of its data. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def ModifyColumnFamilies(self, request, context): - """Performs a series of column family modifications on the specified table. + def ModifyColumnFamilies(self, request, context): + """Performs a series of column family modifications on the specified table. Either all or none of the modifications will occur before this method returns, but data requests received prior to that point may see a table where only some modifications have taken effect. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def DropRowRange(self, request, context): - """Permanently drop/delete a row range from a specified table. The request can + def DropRowRange(self, request, context): + """Permanently drop/delete a row range from a specified table. The request can specify whether to delete all rows in a table, or only those that match a particular prefix. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def GenerateConsistencyToken(self, request, context): - """Generates a consistency token for a Table, which can be used in + def GenerateConsistencyToken(self, request, context): + """Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations to the table that finished before this call started have been replicated. The tokens will be available for 90 days. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def CheckConsistency(self, request, context): - """Checks replication consistency based on a consistency token, that is, if + def CheckConsistency(self, request, context): + """Checks replication consistency based on a consistency token, that is, if replication has caught up based on the conditions specified in the token and the check request. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def SnapshotTable(self, request, context): - """Creates a new snapshot in the specified cluster from the specified + def SnapshotTable(self, request, context): + """Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -188,12 +194,12 @@ def SnapshotTable(self, request, context): recommended for production use. It is not subject to any SLA or deprecation policy. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def GetSnapshot(self, request, context): - """Gets metadata information about the specified snapshot. + def GetSnapshot(self, request, context): + """Gets metadata information about the specified snapshot. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This @@ -201,12 +207,12 @@ def GetSnapshot(self, request, context): recommended for production use. It is not subject to any SLA or deprecation policy. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def ListSnapshots(self, request, context): - """Lists all snapshots associated with the specified cluster. + def ListSnapshots(self, request, context): + """Lists all snapshots associated with the specified cluster. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This @@ -214,12 +220,12 @@ def ListSnapshots(self, request, context): recommended for production use. It is not subject to any SLA or deprecation policy. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def DeleteSnapshot(self, request, context): - """Permanently deletes the specified snapshot. + def DeleteSnapshot(self, request, context): + """Permanently deletes the specified snapshot. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This @@ -227,79 +233,80 @@ def DeleteSnapshot(self, request, context): recommended for production use. It is not subject to any SLA or deprecation policy. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") def add_BigtableTableAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CreateTable': grpc.unary_unary_rpc_method_handler( - servicer.CreateTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - 'CreateTableFromSnapshot': grpc.unary_unary_rpc_method_handler( - servicer.CreateTableFromSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'ListTables': grpc.unary_unary_rpc_method_handler( - servicer.ListTables, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, - ), - 'GetTable': grpc.unary_unary_rpc_method_handler( - servicer.GetTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - 'DeleteTable': grpc.unary_unary_rpc_method_handler( - servicer.DeleteTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler( - servicer.ModifyColumnFamilies, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - 'DropRowRange': grpc.unary_unary_rpc_method_handler( - servicer.DropRowRange, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'GenerateConsistencyToken': grpc.unary_unary_rpc_method_handler( - servicer.GenerateConsistencyToken, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, - ), - 'CheckConsistency': grpc.unary_unary_rpc_method_handler( - servicer.CheckConsistency, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, - ), - 'SnapshotTable': grpc.unary_unary_rpc_method_handler( - servicer.SnapshotTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - 'GetSnapshot': grpc.unary_unary_rpc_method_handler( - servicer.GetSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, - ), - 'ListSnapshots': grpc.unary_unary_rpc_method_handler( - servicer.ListSnapshots, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, - ), - 'DeleteSnapshot': grpc.unary_unary_rpc_method_handler( - servicer.DeleteSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) + rpc_method_handlers = { + "CreateTable": grpc.unary_unary_rpc_method_handler( + servicer.CreateTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + "CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler( + servicer.CreateTableFromSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "ListTables": grpc.unary_unary_rpc_method_handler( + servicer.ListTables, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, + ), + "GetTable": grpc.unary_unary_rpc_method_handler( + servicer.GetTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + "DeleteTable": grpc.unary_unary_rpc_method_handler( + servicer.DeleteTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler( + servicer.ModifyColumnFamilies, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + ), + "DropRowRange": grpc.unary_unary_rpc_method_handler( + servicer.DropRowRange, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler( + servicer.GenerateConsistencyToken, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, + ), + "CheckConsistency": grpc.unary_unary_rpc_method_handler( + servicer.CheckConsistency, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, + ), + "SnapshotTable": grpc.unary_unary_rpc_method_handler( + servicer.SnapshotTable, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetSnapshot": grpc.unary_unary_rpc_method_handler( + servicer.GetSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, + ), + "ListSnapshots": grpc.unary_unary_rpc_method_handler( + servicer.ListSnapshots, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, + ), + "DeleteSnapshot": grpc.unary_unary_rpc_method_handler( + servicer.DeleteSnapshot, + request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index 0b3427c58d93..6e8e4ee89b48 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -2,13 +2,15 @@ # source: google/cloud/bigtable/admin_v2/proto/common.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -19,36 +21,38 @@ DESCRIPTOR = _descriptor.FileDescriptor( - name='google/cloud/bigtable/admin_v2/proto/common.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n1google/cloud/bigtable/admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) + name="google/cloud/bigtable/admin_v2/proto/common.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_pb=_b( + "\n1google/cloud/bigtable/admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3" + ), + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], +) _STORAGETYPE = _descriptor.EnumDescriptor( - name='StorageType', - full_name='google.bigtable.admin.v2.StorageType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STORAGE_TYPE_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SSD', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HDD', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=142, - serialized_end=203, + name="StorageType", + full_name="google.bigtable.admin.v2.StorageType", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="STORAGE_TYPE_UNSPECIFIED", index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="SSD", index=1, number=1, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="HDD", index=2, number=2, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=142, + serialized_end=203, ) _sym_db.RegisterEnumDescriptor(_STORAGETYPE) @@ -58,10 +62,15 @@ HDD = 2 -DESCRIPTOR.enum_types_by_name['StorageType'] = _STORAGETYPE +DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), + _b( + "\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), +) # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py index a89435267cb2..07cb78fe03a9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py @@ -1,3 +1,2 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc - diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index 3e44d81aa2e4..ebf96b37b74a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -2,433 +2,643 @@ # source: google/cloud/bigtable/admin_v2/proto/instance.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable_admin_v2.proto import common_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + common_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2, +) DESCRIPTOR = _descriptor.FileDescriptor( - name='google/cloud/bigtable/admin_v2/proto/instance.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n3google/cloud/bigtable/admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/bigtable/admin_v2/proto/common.proto\"\x83\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02\"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType\"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04\"\x82\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12\"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08\x42\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR,]) - + name="google/cloud/bigtable/admin_v2/proto/instance.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_pb=_b( + '\n3google/cloud/bigtable/admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/bigtable/admin_v2/proto/common.proto"\x83\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04"\x82\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08\x42\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' + ), + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, + ], +) _INSTANCE_STATE = _descriptor.EnumDescriptor( - name='State', - full_name='google.bigtable.admin.v2.Instance.State', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STATE_NOT_KNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CREATING', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=434, - serialized_end=487, + name="State", + full_name="google.bigtable.admin.v2.Instance.State", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="READY", index=1, number=1, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="CREATING", index=2, number=2, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=434, + serialized_end=487, ) _sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) _INSTANCE_TYPE = _descriptor.EnumDescriptor( - name='Type', - full_name='google.bigtable.admin.v2.Instance.Type', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='TYPE_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PRODUCTION', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DEVELOPMENT', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=489, - serialized_end=550, + name="Type", + full_name="google.bigtable.admin.v2.Instance.Type", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="TYPE_UNSPECIFIED", index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="PRODUCTION", index=1, number=1, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="DEVELOPMENT", index=2, number=2, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=489, + serialized_end=550, ) _sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) _CLUSTER_STATE = _descriptor.EnumDescriptor( - name='State', - full_name='google.bigtable.admin.v2.Cluster.State', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STATE_NOT_KNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CREATING', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RESIZING', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DISABLED', index=4, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=742, - serialized_end=823, + name="State", + full_name="google.bigtable.admin.v2.Cluster.State", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="READY", index=1, number=1, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="CREATING", index=2, number=2, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="RESIZING", index=3, number=3, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="DISABLED", index=4, number=4, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=742, + serialized_end=823, ) _sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) _INSTANCE_LABELSENTRY = _descriptor.Descriptor( - name='LabelsEntry', - full_name='google.bigtable.admin.v2.Instance.LabelsEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.Instance.LabelsEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.v2.Instance.LabelsEntry.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=387, - serialized_end=432, + name="LabelsEntry", + full_name="google.bigtable.admin.v2.Instance.LabelsEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.Instance.LabelsEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.Instance.LabelsEntry.value", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=387, + serialized_end=432, ) _INSTANCE = _descriptor.Descriptor( - name='Instance', - full_name='google.bigtable.admin.v2.Instance', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.Instance.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='display_name', full_name='google.bigtable.admin.v2.Instance.display_name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='state', full_name='google.bigtable.admin.v2.Instance.state', index=2, - number=3, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='type', full_name='google.bigtable.admin.v2.Instance.type', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='labels', full_name='google.bigtable.admin.v2.Instance.labels', index=4, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_INSTANCE_LABELSENTRY, ], - enum_types=[ - _INSTANCE_STATE, - _INSTANCE_TYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=163, - serialized_end=550, + name="Instance", + full_name="google.bigtable.admin.v2.Instance", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Instance.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="display_name", + full_name="google.bigtable.admin.v2.Instance.display_name", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Instance.state", + index=2, + number=3, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="type", + full_name="google.bigtable.admin.v2.Instance.type", + index=3, + number=4, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="labels", + full_name="google.bigtable.admin.v2.Instance.labels", + index=4, + number=5, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_INSTANCE_LABELSENTRY], + enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=163, + serialized_end=550, ) _CLUSTER = _descriptor.Descriptor( - name='Cluster', - full_name='google.bigtable.admin.v2.Cluster', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.Cluster.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='location', full_name='google.bigtable.admin.v2.Cluster.location', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='state', full_name='google.bigtable.admin.v2.Cluster.state', index=2, - number=3, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='serve_nodes', full_name='google.bigtable.admin.v2.Cluster.serve_nodes', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='default_storage_type', full_name='google.bigtable.admin.v2.Cluster.default_storage_type', index=4, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CLUSTER_STATE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=553, - serialized_end=823, + name="Cluster", + full_name="google.bigtable.admin.v2.Cluster", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Cluster.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="location", + full_name="google.bigtable.admin.v2.Cluster.location", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Cluster.state", + index=2, + number=3, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="serve_nodes", + full_name="google.bigtable.admin.v2.Cluster.serve_nodes", + index=3, + number=4, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="default_storage_type", + full_name="google.bigtable.admin.v2.Cluster.default_storage_type", + index=4, + number=5, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_CLUSTER_STATE], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=553, + serialized_end=823, ) _APPPROFILE_MULTICLUSTERROUTINGUSEANY = _descriptor.Descriptor( - name='MultiClusterRoutingUseAny', - full_name='google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1087, - serialized_end=1114, + name="MultiClusterRoutingUseAny", + full_name="google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1087, + serialized_end=1114, ) _APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( - name='SingleClusterRouting', - full_name='google.bigtable.admin.v2.AppProfile.SingleClusterRouting', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='cluster_id', full_name='google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='allow_transactional_writes', full_name='google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1116, - serialized_end=1194, + name="SingleClusterRouting", + full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="cluster_id", + full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="allow_transactional_writes", + full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes", + index=1, + number=2, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1116, + serialized_end=1194, ) _APPPROFILE = _descriptor.Descriptor( - name='AppProfile', - full_name='google.bigtable.admin.v2.AppProfile', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.AppProfile.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='etag', full_name='google.bigtable.admin.v2.AppProfile.etag', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='description', full_name='google.bigtable.admin.v2.AppProfile.description', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='multi_cluster_routing_use_any', full_name='google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any', index=3, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='single_cluster_routing', full_name='google.bigtable.admin.v2.AppProfile.single_cluster_routing', index=4, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_APPPROFILE_MULTICLUSTERROUTINGUSEANY, _APPPROFILE_SINGLECLUSTERROUTING, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='routing_policy', full_name='google.bigtable.admin.v2.AppProfile.routing_policy', - index=0, containing_type=None, fields=[]), - ], - serialized_start=826, - serialized_end=1212, + name="AppProfile", + full_name="google.bigtable.admin.v2.AppProfile", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.AppProfile.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="etag", + full_name="google.bigtable.admin.v2.AppProfile.etag", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="description", + full_name="google.bigtable.admin.v2.AppProfile.description", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="multi_cluster_routing_use_any", + full_name="google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any", + index=3, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="single_cluster_routing", + full_name="google.bigtable.admin.v2.AppProfile.single_cluster_routing", + index=4, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[ + _APPPROFILE_MULTICLUSTERROUTINGUSEANY, + _APPPROFILE_SINGLECLUSTERROUTING, + ], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="routing_policy", + full_name="google.bigtable.admin.v2.AppProfile.routing_policy", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=826, + serialized_end=1212, ) _INSTANCE_LABELSENTRY.containing_type = _INSTANCE -_INSTANCE.fields_by_name['state'].enum_type = _INSTANCE_STATE -_INSTANCE.fields_by_name['type'].enum_type = _INSTANCE_TYPE -_INSTANCE.fields_by_name['labels'].message_type = _INSTANCE_LABELSENTRY +_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE +_INSTANCE.fields_by_name["type"].enum_type = _INSTANCE_TYPE +_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY _INSTANCE_STATE.containing_type = _INSTANCE _INSTANCE_TYPE.containing_type = _INSTANCE -_CLUSTER.fields_by_name['state'].enum_type = _CLUSTER_STATE -_CLUSTER.fields_by_name['default_storage_type'].enum_type = google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2._STORAGETYPE +_CLUSTER.fields_by_name["state"].enum_type = _CLUSTER_STATE +_CLUSTER.fields_by_name[ + "default_storage_type" +].enum_type = ( + google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2._STORAGETYPE +) _CLUSTER_STATE.containing_type = _CLUSTER _APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE _APPPROFILE_SINGLECLUSTERROUTING.containing_type = _APPPROFILE -_APPPROFILE.fields_by_name['multi_cluster_routing_use_any'].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY -_APPPROFILE.fields_by_name['single_cluster_routing'].message_type = _APPPROFILE_SINGLECLUSTERROUTING -_APPPROFILE.oneofs_by_name['routing_policy'].fields.append( - _APPPROFILE.fields_by_name['multi_cluster_routing_use_any']) -_APPPROFILE.fields_by_name['multi_cluster_routing_use_any'].containing_oneof = _APPPROFILE.oneofs_by_name['routing_policy'] -_APPPROFILE.oneofs_by_name['routing_policy'].fields.append( - _APPPROFILE.fields_by_name['single_cluster_routing']) -_APPPROFILE.fields_by_name['single_cluster_routing'].containing_oneof = _APPPROFILE.oneofs_by_name['routing_policy'] -DESCRIPTOR.message_types_by_name['Instance'] = _INSTANCE -DESCRIPTOR.message_types_by_name['Cluster'] = _CLUSTER -DESCRIPTOR.message_types_by_name['AppProfile'] = _APPPROFILE +_APPPROFILE.fields_by_name[ + "multi_cluster_routing_use_any" +].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY +_APPPROFILE.fields_by_name[ + "single_cluster_routing" +].message_type = _APPPROFILE_SINGLECLUSTERROUTING +_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( + _APPPROFILE.fields_by_name["multi_cluster_routing_use_any"] +) +_APPPROFILE.fields_by_name[ + "multi_cluster_routing_use_any" +].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] +_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( + _APPPROFILE.fields_by_name["single_cluster_routing"] +) +_APPPROFILE.fields_by_name[ + "single_cluster_routing" +].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] +DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE +DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER +DESCRIPTOR.message_types_by_name["AppProfile"] = _APPPROFILE _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Instance = _reflection.GeneratedProtocolMessageType('Instance', (_message.Message,), dict( - - LabelsEntry = _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), dict( - DESCRIPTOR = _INSTANCE_LABELSENTRY, - __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) - )) - , - DESCRIPTOR = _INSTANCE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' - , - __doc__ = """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and +Instance = _reflection.GeneratedProtocolMessageType( + "Instance", + (_message.Message,), + dict( + LabelsEntry=_reflection.GeneratedProtocolMessageType( + "LabelsEntry", + (_message.Message,), + dict( + DESCRIPTOR=_INSTANCE_LABELSENTRY, + __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) + ), + ), + DESCRIPTOR=_INSTANCE, + __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", + __doc__="""A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and the resources that serve them. All tables in an instance are served from a single [Cluster][google.bigtable.admin.v2.Cluster]. @@ -459,16 +669,19 @@ No more than 64 labels can be associated with a given resource. - Keys and values must both be under 128 bytes. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) + ), +) _sym_db.RegisterMessage(Instance) _sym_db.RegisterMessage(Instance.LabelsEntry) -Cluster = _reflection.GeneratedProtocolMessageType('Cluster', (_message.Message,), dict( - DESCRIPTOR = _CLUSTER, - __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' - , - __doc__ = """A resizable group of nodes in a particular cloud location, capable of +Cluster = _reflection.GeneratedProtocolMessageType( + "Cluster", + (_message.Message,), + dict( + DESCRIPTOR=_CLUSTER, + __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", + __doc__="""A resizable group of nodes in a particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. @@ -494,30 +707,36 @@ serve its parent instance's tables, unless explicitly overridden. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) + ), +) _sym_db.RegisterMessage(Cluster) -AppProfile = _reflection.GeneratedProtocolMessageType('AppProfile', (_message.Message,), dict( - - MultiClusterRoutingUseAny = _reflection.GeneratedProtocolMessageType('MultiClusterRoutingUseAny', (_message.Message,), dict( - DESCRIPTOR = _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' - , - __doc__ = """Read/write requests may be routed to any cluster in the instance, and +AppProfile = _reflection.GeneratedProtocolMessageType( + "AppProfile", + (_message.Message,), + dict( + MultiClusterRoutingUseAny=_reflection.GeneratedProtocolMessageType( + "MultiClusterRoutingUseAny", + (_message.Message,), + dict( + DESCRIPTOR=_APPPROFILE_MULTICLUSTERROUTINGUSEANY, + __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", + __doc__="""Read/write requests may be routed to any cluster in the instance, and will fail over to another cluster in the event of transient errors or delays. Choosing this option sacrifices read-your-writes consistency to improve availability. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) - )) - , - - SingleClusterRouting = _reflection.GeneratedProtocolMessageType('SingleClusterRouting', (_message.Message,), dict( - DESCRIPTOR = _APPPROFILE_SINGLECLUSTERROUTING, - __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' - , - __doc__ = """Unconditionally routes all read/write requests to a specific cluster. + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) + ), + ), + SingleClusterRouting=_reflection.GeneratedProtocolMessageType( + "SingleClusterRouting", + (_message.Message,), + dict( + DESCRIPTOR=_APPPROFILE_SINGLECLUSTERROUTING, + __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", + __doc__="""Unconditionally routes all read/write requests to a specific cluster. This option preserves read-your-writes consistency, but does not improve availability. @@ -531,13 +750,12 @@ profile. It is unsafe to send these requests to the same table/row/column in multiple clusters. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) - )) - , - DESCRIPTOR = _APPPROFILE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.instance_pb2' - , - __doc__ = """A configuration object describing how Cloud Bigtable should treat + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) + ), + ), + DESCRIPTOR=_APPPROFILE, + __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", + __doc__="""A configuration object describing how Cloud Bigtable should treat traffic from a particular end user application. @@ -568,15 +786,23 @@ single_cluster_routing: Use a single-cluster routing policy. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) + ), +) _sym_db.RegisterMessage(AppProfile) _sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) _sym_db.RegisterMessage(AppProfile.SingleClusterRouting) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), + _b( + "\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), +) _INSTANCE_LABELSENTRY.has_options = True -_INSTANCE_LABELSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_INSTANCE_LABELSENTRY._options = _descriptor._ParseOptions( + descriptor_pb2.MessageOptions(), _b("8\001") +) # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py index a89435267cb2..07cb78fe03a9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py @@ -1,3 +1,2 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc - diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index 4d7625703694..8b309a67256f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -2,12 +2,14 @@ # source: google/cloud/bigtable/admin_v2/proto/table.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -19,560 +21,795 @@ DESCRIPTOR = _descriptor.FileDescriptor( - name='google/cloud/bigtable/admin_v2/proto/table.proto', - package='google.bigtable.admin.v2', - syntax='proto3', - serialized_pb=_b('\n0google/cloud/bigtable/admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState\"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01\"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01\"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04\"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule\"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule\"\xcf\x02\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t\"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,]) - + name="google/cloud/bigtable/admin_v2/proto/table.proto", + package="google.bigtable.admin.v2", + syntax="proto3", + serialized_pb=_b( + '\n0google/cloud/bigtable/admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xcf\x02\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' + ), + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], +) _TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( - name='ReplicationState', - full_name='google.bigtable.admin.v2.Table.ClusterState.ReplicationState', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STATE_NOT_KNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INITIALIZING', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PLANNED_MAINTENANCE', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='UNPLANNED_MAINTENANCE', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=4, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=533, - serialized_end=653, + name="ReplicationState", + full_name="google.bigtable.admin.v2.Table.ClusterState.ReplicationState", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="INITIALIZING", index=1, number=1, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="PLANNED_MAINTENANCE", index=2, number=2, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="UNPLANNED_MAINTENANCE", index=3, number=3, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="READY", index=4, number=4, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=533, + serialized_end=653, ) _sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) _TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name='TimestampGranularity', - full_name='google.bigtable.admin.v2.Table.TimestampGranularity', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='TIMESTAMP_GRANULARITY_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MILLIS', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=850, - serialized_end=923, + name="TimestampGranularity", + full_name="google.bigtable.admin.v2.Table.TimestampGranularity", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="TIMESTAMP_GRANULARITY_UNSPECIFIED", + index=0, + number=0, + options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="MILLIS", index=1, number=1, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=850, + serialized_end=923, ) _sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) _TABLE_VIEW = _descriptor.EnumDescriptor( - name='View', - full_name='google.bigtable.admin.v2.Table.View', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='VIEW_UNSPECIFIED', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NAME_ONLY', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SCHEMA_VIEW', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='REPLICATION_VIEW', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FULL', index=4, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=925, - serialized_end=1017, + name="View", + full_name="google.bigtable.admin.v2.Table.View", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="VIEW_UNSPECIFIED", index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="NAME_ONLY", index=1, number=1, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="SCHEMA_VIEW", index=2, number=2, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="REPLICATION_VIEW", index=3, number=3, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="FULL", index=4, number=4, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=925, + serialized_end=1017, ) _sym_db.RegisterEnumDescriptor(_TABLE_VIEW) _SNAPSHOT_STATE = _descriptor.EnumDescriptor( - name='State', - full_name='google.bigtable.admin.v2.Snapshot.State', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STATE_NOT_KNOWN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='READY', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CREATING', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1713, - serialized_end=1766, + name="State", + full_name="google.bigtable.admin.v2.Snapshot.State", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="READY", index=1, number=1, options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="CREATING", index=2, number=2, options=None, type=None + ), + ], + containing_type=None, + options=None, + serialized_start=1713, + serialized_end=1766, ) _sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) _TABLE_CLUSTERSTATE = _descriptor.Descriptor( - name='ClusterState', - full_name='google.bigtable.admin.v2.Table.ClusterState', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='replication_state', full_name='google.bigtable.admin.v2.Table.ClusterState.replication_state', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _TABLE_CLUSTERSTATE_REPLICATIONSTATE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=427, - serialized_end=653, + name="ClusterState", + full_name="google.bigtable.admin.v2.Table.ClusterState", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="replication_state", + full_name="google.bigtable.admin.v2.Table.ClusterState.replication_state", + index=0, + number=1, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=427, + serialized_end=653, ) _TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( - name='ClusterStatesEntry', - full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.v2.Table.ClusterStatesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=655, - serialized_end=753, + name="ClusterStatesEntry", + full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.value", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=655, + serialized_end=753, ) _TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name='ColumnFamiliesEntry', - full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=755, - serialized_end=848, + name="ColumnFamiliesEntry", + full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=755, + serialized_end=848, ) _TABLE = _descriptor.Descriptor( - name='Table', - full_name='google.bigtable.admin.v2.Table', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.Table.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cluster_states', full_name='google.bigtable.admin.v2.Table.cluster_states', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='column_families', full_name='google.bigtable.admin.v2.Table.column_families', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='granularity', full_name='google.bigtable.admin.v2.Table.granularity', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_TABLE_CLUSTERSTATE, _TABLE_CLUSTERSTATESENTRY, _TABLE_COLUMNFAMILIESENTRY, ], - enum_types=[ - _TABLE_TIMESTAMPGRANULARITY, - _TABLE_VIEW, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=174, - serialized_end=1017, + name="Table", + full_name="google.bigtable.admin.v2.Table", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Table.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cluster_states", + full_name="google.bigtable.admin.v2.Table.cluster_states", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="column_families", + full_name="google.bigtable.admin.v2.Table.column_families", + index=2, + number=3, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="granularity", + full_name="google.bigtable.admin.v2.Table.granularity", + index=3, + number=4, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[ + _TABLE_CLUSTERSTATE, + _TABLE_CLUSTERSTATESENTRY, + _TABLE_COLUMNFAMILIESENTRY, + ], + enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=174, + serialized_end=1017, ) _COLUMNFAMILY = _descriptor.Descriptor( - name='ColumnFamily', - full_name='google.bigtable.admin.v2.ColumnFamily', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='gc_rule', full_name='google.bigtable.admin.v2.ColumnFamily.gc_rule', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1019, - serialized_end=1084, + name="ColumnFamily", + full_name="google.bigtable.admin.v2.ColumnFamily", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="gc_rule", + full_name="google.bigtable.admin.v2.ColumnFamily.gc_rule", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1019, + serialized_end=1084, ) _GCRULE_INTERSECTION = _descriptor.Descriptor( - name='Intersection', - full_name='google.bigtable.admin.v2.GcRule.Intersection', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.v2.GcRule.Intersection.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1299, - serialized_end=1362, + name="Intersection", + full_name="google.bigtable.admin.v2.GcRule.Intersection", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="rules", + full_name="google.bigtable.admin.v2.GcRule.Intersection.rules", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1299, + serialized_end=1362, ) _GCRULE_UNION = _descriptor.Descriptor( - name='Union', - full_name='google.bigtable.admin.v2.GcRule.Union', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.v2.GcRule.Union.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1364, - serialized_end=1420, + name="Union", + full_name="google.bigtable.admin.v2.GcRule.Union", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="rules", + full_name="google.bigtable.admin.v2.GcRule.Union.rules", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1364, + serialized_end=1420, ) _GCRULE = _descriptor.Descriptor( - name='GcRule', - full_name='google.bigtable.admin.v2.GcRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='max_num_versions', full_name='google.bigtable.admin.v2.GcRule.max_num_versions', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='max_age', full_name='google.bigtable.admin.v2.GcRule.max_age', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='intersection', full_name='google.bigtable.admin.v2.GcRule.intersection', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='union', full_name='google.bigtable.admin.v2.GcRule.union', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='rule', full_name='google.bigtable.admin.v2.GcRule.rule', - index=0, containing_type=None, fields=[]), - ], - serialized_start=1087, - serialized_end=1428, + name="GcRule", + full_name="google.bigtable.admin.v2.GcRule", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="max_num_versions", + full_name="google.bigtable.admin.v2.GcRule.max_num_versions", + index=0, + number=1, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="max_age", + full_name="google.bigtable.admin.v2.GcRule.max_age", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="intersection", + full_name="google.bigtable.admin.v2.GcRule.intersection", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="union", + full_name="google.bigtable.admin.v2.GcRule.union", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="rule", + full_name="google.bigtable.admin.v2.GcRule.rule", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=1087, + serialized_end=1428, ) _SNAPSHOT = _descriptor.Descriptor( - name='Snapshot', - full_name='google.bigtable.admin.v2.Snapshot', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.v2.Snapshot.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='source_table', full_name='google.bigtable.admin.v2.Snapshot.source_table', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='data_size_bytes', full_name='google.bigtable.admin.v2.Snapshot.data_size_bytes', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='create_time', full_name='google.bigtable.admin.v2.Snapshot.create_time', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='delete_time', full_name='google.bigtable.admin.v2.Snapshot.delete_time', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='state', full_name='google.bigtable.admin.v2.Snapshot.state', index=5, - number=6, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='description', full_name='google.bigtable.admin.v2.Snapshot.description', index=6, - number=7, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SNAPSHOT_STATE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1431, - serialized_end=1766, + name="Snapshot", + full_name="google.bigtable.admin.v2.Snapshot", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Snapshot.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.Snapshot.source_table", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="data_size_bytes", + full_name="google.bigtable.admin.v2.Snapshot.data_size_bytes", + index=2, + number=3, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="create_time", + full_name="google.bigtable.admin.v2.Snapshot.create_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="delete_time", + full_name="google.bigtable.admin.v2.Snapshot.delete_time", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Snapshot.state", + index=5, + number=6, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="description", + full_name="google.bigtable.admin.v2.Snapshot.description", + index=6, + number=7, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_SNAPSHOT_STATE], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1431, + serialized_end=1766, ) -_TABLE_CLUSTERSTATE.fields_by_name['replication_state'].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE +_TABLE_CLUSTERSTATE.fields_by_name[ + "replication_state" +].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE _TABLE_CLUSTERSTATE.containing_type = _TABLE _TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.fields_by_name['value'].message_type = _TABLE_CLUSTERSTATE +_TABLE_CLUSTERSTATESENTRY.fields_by_name["value"].message_type = _TABLE_CLUSTERSTATE _TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE -_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY +_TABLE_COLUMNFAMILIESENTRY.fields_by_name["value"].message_type = _COLUMNFAMILY _TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name['cluster_states'].message_type = _TABLE_CLUSTERSTATESENTRY -_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY +_TABLE.fields_by_name["cluster_states"].message_type = _TABLE_CLUSTERSTATESENTRY +_TABLE.fields_by_name["column_families"].message_type = _TABLE_COLUMNFAMILIESENTRY +_TABLE.fields_by_name["granularity"].enum_type = _TABLE_TIMESTAMPGRANULARITY _TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE _TABLE_VIEW.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE +_COLUMNFAMILY.fields_by_name["gc_rule"].message_type = _GCRULE +_GCRULE_INTERSECTION.fields_by_name["rules"].message_type = _GCRULE _GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE +_GCRULE_UNION.fields_by_name["rules"].message_type = _GCRULE _GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_num_versions']) -_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_age']) -_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['intersection']) -_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['union']) -_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_SNAPSHOT.fields_by_name['source_table'].message_type = _TABLE -_SNAPSHOT.fields_by_name['create_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name['delete_time'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name['state'].enum_type = _SNAPSHOT_STATE +_GCRULE.fields_by_name[ + "max_age" +].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_GCRULE.fields_by_name["intersection"].message_type = _GCRULE_INTERSECTION +_GCRULE.fields_by_name["union"].message_type = _GCRULE_UNION +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_num_versions"]) +_GCRULE.fields_by_name["max_num_versions"].containing_oneof = _GCRULE.oneofs_by_name[ + "rule" +] +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_age"]) +_GCRULE.fields_by_name["max_age"].containing_oneof = _GCRULE.oneofs_by_name["rule"] +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["intersection"]) +_GCRULE.fields_by_name["intersection"].containing_oneof = _GCRULE.oneofs_by_name["rule"] +_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["union"]) +_GCRULE.fields_by_name["union"].containing_oneof = _GCRULE.oneofs_by_name["rule"] +_SNAPSHOT.fields_by_name["source_table"].message_type = _TABLE +_SNAPSHOT.fields_by_name[ + "create_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name[ + "delete_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOT.fields_by_name["state"].enum_type = _SNAPSHOT_STATE _SNAPSHOT_STATE.containing_type = _SNAPSHOT -DESCRIPTOR.message_types_by_name['Table'] = _TABLE -DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE -DESCRIPTOR.message_types_by_name['Snapshot'] = _SNAPSHOT +DESCRIPTOR.message_types_by_name["Table"] = _TABLE +DESCRIPTOR.message_types_by_name["ColumnFamily"] = _COLUMNFAMILY +DESCRIPTOR.message_types_by_name["GcRule"] = _GCRULE +DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( - - ClusterState = _reflection.GeneratedProtocolMessageType('ClusterState', (_message.Message,), dict( - DESCRIPTOR = _TABLE_CLUSTERSTATE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - , - __doc__ = """The state of a table's data in a particular cluster. +Table = _reflection.GeneratedProtocolMessageType( + "Table", + (_message.Message,), + dict( + ClusterState=_reflection.GeneratedProtocolMessageType( + "ClusterState", + (_message.Message,), + dict( + DESCRIPTOR=_TABLE_CLUSTERSTATE, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", + __doc__="""The state of a table's data in a particular cluster. Attributes: @@ -580,27 +817,30 @@ (``OutputOnly``) The state of replication for the table in this cluster. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) - )) - , - - ClusterStatesEntry = _reflection.GeneratedProtocolMessageType('ClusterStatesEntry', (_message.Message,), dict( - DESCRIPTOR = _TABLE_CLUSTERSTATESENTRY, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) - )) - , - - ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( - DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) - )) - , - DESCRIPTOR = _TABLE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - , - __doc__ = """A collection of user data indexed by row, column, and timestamp. Each + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) + ), + ), + ClusterStatesEntry=_reflection.GeneratedProtocolMessageType( + "ClusterStatesEntry", + (_message.Message,), + dict( + DESCRIPTOR=_TABLE_CLUSTERSTATESENTRY, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) + ), + ), + ColumnFamiliesEntry=_reflection.GeneratedProtocolMessageType( + "ColumnFamiliesEntry", + (_message.Message,), + dict( + DESCRIPTOR=_TABLE_COLUMNFAMILIESENTRY, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2" + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) + ), + ), + DESCRIPTOR=_TABLE, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", + __doc__="""A collection of user data indexed by row, column, and timestamp. Each table is served using the resources of its parent cluster. @@ -628,18 +868,21 @@ time, the value will be set to ``MILLIS``. Views: ``SCHEMA_VIEW``, ``FULL`` """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) + ), +) _sym_db.RegisterMessage(Table) _sym_db.RegisterMessage(Table.ClusterState) _sym_db.RegisterMessage(Table.ClusterStatesEntry) _sym_db.RegisterMessage(Table.ColumnFamiliesEntry) -ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( - DESCRIPTOR = _COLUMNFAMILY, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - , - __doc__ = """A set of columns within a table which share a common configuration. +ColumnFamily = _reflection.GeneratedProtocolMessageType( + "ColumnFamily", + (_message.Message,), + dict( + DESCRIPTOR=_COLUMNFAMILY, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", + __doc__="""A set of columns within a table which share a common configuration. Attributes: @@ -650,17 +893,22 @@ possible for reads to return a cell even if it matches the active GC expression for its family. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) + ), +) _sym_db.RegisterMessage(ColumnFamily) -GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( - - Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_INTERSECTION, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - , - __doc__ = """A GcRule which deletes cells matching all of the given rules. +GcRule = _reflection.GeneratedProtocolMessageType( + "GcRule", + (_message.Message,), + dict( + Intersection=_reflection.GeneratedProtocolMessageType( + "Intersection", + (_message.Message,), + dict( + DESCRIPTOR=_GCRULE_INTERSECTION, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", + __doc__="""A GcRule which deletes cells matching all of the given rules. Attributes: @@ -668,15 +916,16 @@ Only delete cells which would be deleted by every element of ``rules``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) - )) - , - - Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_UNION, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - , - __doc__ = """A GcRule which deletes cells matching any of the given rules. + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) + ), + ), + Union=_reflection.GeneratedProtocolMessageType( + "Union", + (_message.Message,), + dict( + DESCRIPTOR=_GCRULE_UNION, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", + __doc__="""A GcRule which deletes cells matching any of the given rules. Attributes: @@ -684,13 +933,12 @@ Delete cells which would be deleted by any element of ``rules``. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) - )) - , - DESCRIPTOR = _GCRULE, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - , - __doc__ = """Rule for determining which cells to delete during garbage collection. + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) + ), + ), + DESCRIPTOR=_GCRULE, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", + __doc__="""Rule for determining which cells to delete during garbage collection. Attributes: @@ -707,17 +955,20 @@ union: Delete cells that would be deleted by any nested rule. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) + ), +) _sym_db.RegisterMessage(GcRule) _sym_db.RegisterMessage(GcRule.Intersection) _sym_db.RegisterMessage(GcRule.Union) -Snapshot = _reflection.GeneratedProtocolMessageType('Snapshot', (_message.Message,), dict( - DESCRIPTOR = _SNAPSHOT, - __module__ = 'google.cloud.bigtable.admin_v2.proto.table_pb2' - , - __doc__ = """A snapshot of a table at a particular time. A snapshot can be used as a +Snapshot = _reflection.GeneratedProtocolMessageType( + "Snapshot", + (_message.Message,), + dict( + DESCRIPTOR=_SNAPSHOT, + __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", + __doc__="""A snapshot of a table at a particular time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. Note: This is a private alpha release of Cloud Bigtable snapshots. This @@ -752,15 +1003,25 @@ description: (``OutputOnly``) Description of the snapshot. """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) + ), +) _sym_db.RegisterMessage(Snapshot) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), + _b( + "\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), +) _TABLE_CLUSTERSTATESENTRY.has_options = True -_TABLE_CLUSTERSTATESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TABLE_CLUSTERSTATESENTRY._options = _descriptor._ParseOptions( + descriptor_pb2.MessageOptions(), _b("8\001") +) _TABLE_COLUMNFAMILIESENTRY.has_options = True -_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) +_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions( + descriptor_pb2.MessageOptions(), _b("8\001") +) # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py index a89435267cb2..07cb78fe03a9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py @@ -1,3 +1,2 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc - diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index da10ffd36bda..994ca3d6a6f9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -2,744 +2,1236 @@ # source: google/cloud/bigtable_v2/proto/bigtable.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.cloud.bigtable_v2.proto import data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2 +from google.cloud.bigtable_v2.proto import ( + data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2, +) from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 DESCRIPTOR = _descriptor.FileDescriptor( - name='google/cloud/bigtable_v2/proto/bigtable.proto', - package='google.bigtable.v2', - syntax='proto3', - serialized_pb=_b('\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x13\n\x11MutateRowResponse\"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"D\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"E\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"F\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"M\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"N\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) - - + name="google/cloud/bigtable_v2/proto/bigtable.proto", + package="google.bigtable.v2", + syntax="proto3", + serialized_pb=_b( + '\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x13\n\x11MutateRowResponse"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"D\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"E\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"F\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"M\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3' + ), + dependencies=[ + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR, + google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, + google_dot_rpc_dot_status__pb2.DESCRIPTOR, + ], +) _READROWSREQUEST = _descriptor.Descriptor( - name='ReadRowsRequest', - full_name='google.bigtable.v2.ReadRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.ReadRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile_id', full_name='google.bigtable.v2.ReadRowsRequest.app_profile_id', index=1, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rows', full_name='google.bigtable.v2.ReadRowsRequest.rows', index=2, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='filter', full_name='google.bigtable.v2.ReadRowsRequest.filter', index=3, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rows_limit', full_name='google.bigtable.v2.ReadRowsRequest.rows_limit', index=4, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=200, - serialized_end=370, + name="ReadRowsRequest", + full_name="google.bigtable.v2.ReadRowsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.ReadRowsRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id", + index=1, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="rows", + full_name="google.bigtable.v2.ReadRowsRequest.rows", + index=2, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="filter", + full_name="google.bigtable.v2.ReadRowsRequest.filter", + index=3, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="rows_limit", + full_name="google.bigtable.v2.ReadRowsRequest.rows_limit", + index=4, + number=4, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=200, + serialized_end=370, ) _READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( - name='CellChunk', - full_name='google.bigtable.v2.ReadRowsResponse.CellChunk', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.family_name', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='qualifier', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='labels', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.labels', index=4, - number=5, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value', index=5, - number=6, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value_size', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.value_size', index=6, - number=7, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='reset_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row', index=7, - number=8, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='commit_row', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row', index=8, - number=9, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='row_status', full_name='google.bigtable.v2.ReadRowsResponse.CellChunk.row_status', - index=0, containing_type=None, fields=[]), - ], - serialized_start=488, - serialized_end=749, + name="CellChunk", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="qualifier", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="timestamp_micros", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros", + index=3, + number=4, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="labels", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels", + index=4, + number=5, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value", + index=5, + number=6, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value_size", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size", + index=6, + number=7, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="reset_row", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row", + index=7, + number=8, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="commit_row", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row", + index=8, + number=9, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="row_status", + full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=488, + serialized_end=749, ) _READROWSRESPONSE = _descriptor.Descriptor( - name='ReadRowsResponse', - full_name='google.bigtable.v2.ReadRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chunks', full_name='google.bigtable.v2.ReadRowsResponse.chunks', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='last_scanned_row_key', full_name='google.bigtable.v2.ReadRowsResponse.last_scanned_row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_READROWSRESPONSE_CELLCHUNK, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=373, - serialized_end=749, + name="ReadRowsResponse", + full_name="google.bigtable.v2.ReadRowsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="chunks", + full_name="google.bigtable.v2.ReadRowsResponse.chunks", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="last_scanned_row_key", + full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_READROWSRESPONSE_CELLCHUNK], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=373, + serialized_end=749, ) _SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name='SampleRowKeysRequest', - full_name='google.bigtable.v2.SampleRowKeysRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.SampleRowKeysRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile_id', full_name='google.bigtable.v2.SampleRowKeysRequest.app_profile_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=751, - serialized_end=817, + name="SampleRowKeysRequest", + full_name="google.bigtable.v2.SampleRowKeysRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.SampleRowKeysRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=751, + serialized_end=817, ) _SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name='SampleRowKeysResponse', - full_name='google.bigtable.v2.SampleRowKeysResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.SampleRowKeysResponse.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='offset_bytes', full_name='google.bigtable.v2.SampleRowKeysResponse.offset_bytes', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=819, - serialized_end=881, + name="SampleRowKeysResponse", + full_name="google.bigtable.v2.SampleRowKeysResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.SampleRowKeysResponse.row_key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="offset_bytes", + full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes", + index=1, + number=2, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=819, + serialized_end=881, ) _MUTATEROWREQUEST = _descriptor.Descriptor( - name='MutateRowRequest', - full_name='google.bigtable.v2.MutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.MutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile_id', full_name='google.bigtable.v2.MutateRowRequest.app_profile_id', index=1, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.MutateRowRequest.row_key', index=2, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v2.MutateRowRequest.mutations', index=3, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=884, - serialized_end=1012, + name="MutateRowRequest", + full_name="google.bigtable.v2.MutateRowRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.MutateRowRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.MutateRowRequest.app_profile_id", + index=1, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.MutateRowRequest.row_key", + index=2, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="mutations", + full_name="google.bigtable.v2.MutateRowRequest.mutations", + index=3, + number=3, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=884, + serialized_end=1012, ) _MUTATEROWRESPONSE = _descriptor.Descriptor( - name='MutateRowResponse', - full_name='google.bigtable.v2.MutateRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1014, - serialized_end=1033, + name="MutateRowResponse", + full_name="google.bigtable.v2.MutateRowResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1014, + serialized_end=1033, ) _MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name='Entry', - full_name='google.bigtable.v2.MutateRowsRequest.Entry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.MutateRowsRequest.Entry.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v2.MutateRowsRequest.Entry.mutations', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1163, - serialized_end=1236, + name="Entry", + full_name="google.bigtable.v2.MutateRowsRequest.Entry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="mutations", + full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1163, + serialized_end=1236, ) _MUTATEROWSREQUEST = _descriptor.Descriptor( - name='MutateRowsRequest', - full_name='google.bigtable.v2.MutateRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.MutateRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile_id', full_name='google.bigtable.v2.MutateRowsRequest.app_profile_id', index=1, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='entries', full_name='google.bigtable.v2.MutateRowsRequest.entries', index=2, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_MUTATEROWSREQUEST_ENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1036, - serialized_end=1236, + name="MutateRowsRequest", + full_name="google.bigtable.v2.MutateRowsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.MutateRowsRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id", + index=1, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="entries", + full_name="google.bigtable.v2.MutateRowsRequest.entries", + index=2, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_MUTATEROWSREQUEST_ENTRY], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1036, + serialized_end=1236, ) _MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( - name='Entry', - full_name='google.bigtable.v2.MutateRowsResponse.Entry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='index', full_name='google.bigtable.v2.MutateRowsResponse.Entry.index', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='status', full_name='google.bigtable.v2.MutateRowsResponse.Entry.status', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1324, - serialized_end=1382, + name="Entry", + full_name="google.bigtable.v2.MutateRowsResponse.Entry", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="index", + full_name="google.bigtable.v2.MutateRowsResponse.Entry.index", + index=0, + number=1, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="status", + full_name="google.bigtable.v2.MutateRowsResponse.Entry.status", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1324, + serialized_end=1382, ) _MUTATEROWSRESPONSE = _descriptor.Descriptor( - name='MutateRowsResponse', - full_name='google.bigtable.v2.MutateRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='entries', full_name='google.bigtable.v2.MutateRowsResponse.entries', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_MUTATEROWSRESPONSE_ENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1239, - serialized_end=1382, + name="MutateRowsResponse", + full_name="google.bigtable.v2.MutateRowsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="entries", + full_name="google.bigtable.v2.MutateRowsResponse.entries", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[_MUTATEROWSRESPONSE_ENTRY], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1239, + serialized_end=1382, ) _CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name='CheckAndMutateRowRequest', - full_name='google.bigtable.v2.CheckAndMutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.CheckAndMutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile_id', full_name='google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id', index=1, - number=7, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.CheckAndMutateRowRequest.row_key', index=2, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='predicate_filter', full_name='google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter', index=3, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='true_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.true_mutations', index=4, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='false_mutations', full_name='google.bigtable.v2.CheckAndMutateRowRequest.false_mutations', index=5, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1385, - serialized_end=1638, + name="CheckAndMutateRowRequest", + full_name="google.bigtable.v2.CheckAndMutateRowRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id", + index=1, + number=7, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key", + index=2, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="predicate_filter", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter", + index=3, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="true_mutations", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations", + index=4, + number=4, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="false_mutations", + full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations", + index=5, + number=5, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1385, + serialized_end=1638, ) _CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name='CheckAndMutateRowResponse', - full_name='google.bigtable.v2.CheckAndMutateRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='predicate_matched', full_name='google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1640, - serialized_end=1694, + name="CheckAndMutateRowResponse", + full_name="google.bigtable.v2.CheckAndMutateRowResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="predicate_matched", + full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched", + index=0, + number=1, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1640, + serialized_end=1694, ) _READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name='ReadModifyWriteRowRequest', - full_name='google.bigtable.v2.ReadModifyWriteRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='app_profile_id', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id', index=1, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.row_key', index=2, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.v2.ReadModifyWriteRowRequest.rules', index=3, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1697, - serialized_end=1841, + name="ReadModifyWriteRowRequest", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="table_name", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="app_profile_id", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id", + index=1, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="row_key", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key", + index=2, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="rules", + full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules", + index=3, + number=3, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1697, + serialized_end=1841, ) _READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( - name='ReadModifyWriteRowResponse', - full_name='google.bigtable.v2.ReadModifyWriteRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row', full_name='google.bigtable.v2.ReadModifyWriteRowResponse.row', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1843, - serialized_end=1909, + name="ReadModifyWriteRowResponse", + full_name="google.bigtable.v2.ReadModifyWriteRowResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="row", + full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1843, + serialized_end=1909, ) -_READROWSREQUEST.fields_by_name['rows'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET -_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_READROWSRESPONSE_CELLCHUNK.fields_by_name['family_name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE -_READROWSRESPONSE_CELLCHUNK.fields_by_name['qualifier'].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE +_READROWSREQUEST.fields_by_name[ + "rows" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET +_READROWSREQUEST.fields_by_name[ + "filter" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "family_name" +].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "qualifier" +].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE _READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row']) -_READROWSRESPONSE_CELLCHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row']) -_READROWSRESPONSE_CELLCHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name['row_status'] -_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CELLCHUNK -_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"] +) +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "reset_row" +].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] +_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( + _READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"] +) +_READROWSRESPONSE_CELLCHUNK.fields_by_name[ + "commit_row" +].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] +_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK +_MUTATEROWREQUEST.fields_by_name[ + "mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_MUTATEROWSREQUEST_ENTRY.fields_by_name[ + "mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION _MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE_ENTRY.fields_by_name['status'].message_type = google_dot_rpc_dot_status__pb2._STATUS +_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY +_MUTATEROWSRESPONSE_ENTRY.fields_by_name[ + "status" +].message_type = google_dot_rpc_dot_status__pb2._STATUS _MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE -_MUTATEROWSRESPONSE.fields_by_name['entries'].message_type = _MUTATEROWSRESPONSE_ENTRY -_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE -_READMODIFYWRITEROWRESPONSE.fields_by_name['row'].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW -DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['MutateRowResponse'] = _MUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST -DESCRIPTOR.message_types_by_name['ReadModifyWriteRowResponse'] = _READMODIFYWRITEROWRESPONSE +_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY +_CHECKANDMUTATEROWREQUEST.fields_by_name[ + "predicate_filter" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER +_CHECKANDMUTATEROWREQUEST.fields_by_name[ + "true_mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_CHECKANDMUTATEROWREQUEST.fields_by_name[ + "false_mutations" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION +_READMODIFYWRITEROWREQUEST.fields_by_name[ + "rules" +].message_type = ( + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE +) +_READMODIFYWRITEROWRESPONSE.fields_by_name[ + "row" +].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW +DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST +DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE +DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST +DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE +DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST +DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST +DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE +DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST +DESCRIPTOR.message_types_by_name[ + "CheckAndMutateRowResponse" +] = _CHECKANDMUTATEROWRESPONSE +DESCRIPTOR.message_types_by_name[ + "ReadModifyWriteRowRequest" +] = _READMODIFYWRITEROWREQUEST +DESCRIPTOR.message_types_by_name[ + "ReadModifyWriteRowResponse" +] = _READMODIFYWRITEROWRESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) -ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( - DESCRIPTOR = _READROWSREQUEST, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Request message for Bigtable.ReadRows. +ReadRowsRequest = _reflection.GeneratedProtocolMessageType( + "ReadRowsRequest", + (_message.Message,), + dict( + DESCRIPTOR=_READROWSREQUEST, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Request message for Bigtable.ReadRows. Attributes: @@ -760,17 +1252,22 @@ The read will terminate after committing to N rows' worth of results. The default (zero) is to return all results. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) + ), +) _sym_db.RegisterMessage(ReadRowsRequest) -ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( - - CellChunk = _reflection.GeneratedProtocolMessageType('CellChunk', (_message.Message,), dict( - DESCRIPTOR = _READROWSRESPONSE_CELLCHUNK, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Specifies a piece of a row's contents returned as part of the read +ReadRowsResponse = _reflection.GeneratedProtocolMessageType( + "ReadRowsResponse", + (_message.Message,), + dict( + CellChunk=_reflection.GeneratedProtocolMessageType( + "CellChunk", + (_message.Message,), + dict( + DESCRIPTOR=_READROWSRESPONSE_CELLCHUNK, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Specifies a piece of a row's contents returned as part of the read response stream. @@ -824,13 +1321,12 @@ Indicates that the client can safely process all previous chunks for ``row_key``, as its data has been fully read. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) - )) - , - DESCRIPTOR = _READROWSRESPONSE, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Response message for Bigtable.ReadRows. + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) + ), + ), + DESCRIPTOR=_READROWSRESPONSE, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Response message for Bigtable.ReadRows. Attributes: @@ -843,16 +1339,19 @@ lot of data that was filtered out since the last committed row key, allowing the client to skip that work on a retry. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) + ), +) _sym_db.RegisterMessage(ReadRowsResponse) _sym_db.RegisterMessage(ReadRowsResponse.CellChunk) -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSREQUEST, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Request message for Bigtable.SampleRowKeys. +SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType( + "SampleRowKeysRequest", + (_message.Message,), + dict( + DESCRIPTOR=_SAMPLEROWKEYSREQUEST, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Request message for Bigtable.SampleRowKeys. Attributes: @@ -864,15 +1363,18 @@ This value specifies routing for replication. If not specified, the "default" application profile will be used. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) + ), +) _sym_db.RegisterMessage(SampleRowKeysRequest) -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Response message for Bigtable.SampleRowKeys. +SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType( + "SampleRowKeysResponse", + (_message.Message,), + dict( + DESCRIPTOR=_SAMPLEROWKEYSRESPONSE, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Response message for Bigtable.SampleRowKeys. Attributes: @@ -891,15 +1393,18 @@ between two subsequent samples would require space roughly equal to the difference in their ``offset_bytes`` fields. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) + ), +) _sym_db.RegisterMessage(SampleRowKeysResponse) -MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWREQUEST, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Request message for Bigtable.MutateRow. +MutateRowRequest = _reflection.GeneratedProtocolMessageType( + "MutateRowRequest", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATEROWREQUEST, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Request message for Bigtable.MutateRow. Attributes: @@ -918,27 +1423,35 @@ masked by later ones. Must contain at least one entry and at most 100000. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) + ), +) _sym_db.RegisterMessage(MutateRowRequest) -MutateRowResponse = _reflection.GeneratedProtocolMessageType('MutateRowResponse', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWRESPONSE, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Response message for Bigtable.MutateRow. +MutateRowResponse = _reflection.GeneratedProtocolMessageType( + "MutateRowResponse", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATEROWRESPONSE, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Response message for Bigtable.MutateRow. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) + ), +) _sym_db.RegisterMessage(MutateRowResponse) -MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( - - Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Attributes: +MutateRowsRequest = _reflection.GeneratedProtocolMessageType( + "MutateRowsRequest", + (_message.Message,), + dict( + Entry=_reflection.GeneratedProtocolMessageType( + "Entry", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATEROWSREQUEST_ENTRY, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Attributes: row_key: The key of the row to which the ``mutations`` should be applied. @@ -948,13 +1461,12 @@ can be masked by later ones. You must specify at least one mutation. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) - )) - , - DESCRIPTOR = _MUTATEROWSREQUEST, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Request message for BigtableService.MutateRows. + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) + ), + ), + DESCRIPTOR=_MUTATEROWSREQUEST, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Request message for BigtableService.MutateRows. Attributes: @@ -972,18 +1484,23 @@ specified, and in total the entries can contain at most 100000 mutations. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) + ), +) _sym_db.RegisterMessage(MutateRowsRequest) _sym_db.RegisterMessage(MutateRowsRequest.Entry) -MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( - - Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSRESPONSE_ENTRY, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Attributes: +MutateRowsResponse = _reflection.GeneratedProtocolMessageType( + "MutateRowsResponse", + (_message.Message,), + dict( + Entry=_reflection.GeneratedProtocolMessageType( + "Entry", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATEROWSRESPONSE_ENTRY, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Attributes: index: The index into the original request's ``entries`` list of the Entry for which a result is being reported. @@ -994,29 +1511,31 @@ Entry. In the event that this occurs, the same error will be reported for both entries. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) - )) - , - DESCRIPTOR = _MUTATEROWSRESPONSE, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Response message for BigtableService.MutateRows. + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) + ), + ), + DESCRIPTOR=_MUTATEROWSRESPONSE, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Response message for BigtableService.MutateRows. Attributes: entries: One or more results for Entries from the batch request. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) + ), +) _sym_db.RegisterMessage(MutateRowsResponse) _sym_db.RegisterMessage(MutateRowsResponse.Entry) -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Request message for Bigtable.CheckAndMutateRow. +CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType( + "CheckAndMutateRowRequest", + (_message.Message,), + dict( + DESCRIPTOR=_CHECKANDMUTATEROWREQUEST, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Request message for Bigtable.CheckAndMutateRow. Attributes: @@ -1050,15 +1569,18 @@ least one entry if ``true_mutations`` is empty, and at most 100000. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) + ), +) _sym_db.RegisterMessage(CheckAndMutateRowRequest) -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Response message for Bigtable.CheckAndMutateRow. +CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType( + "CheckAndMutateRowResponse", + (_message.Message,), + dict( + DESCRIPTOR=_CHECKANDMUTATEROWRESPONSE, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Response message for Bigtable.CheckAndMutateRow. Attributes: @@ -1066,15 +1588,18 @@ Whether or not the request's ``predicate_filter`` yielded any results for the specified row. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) + ), +) _sym_db.RegisterMessage(CheckAndMutateRowResponse) -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITEROWREQUEST, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Request message for Bigtable.ReadModifyWriteRow. +ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType( + "ReadModifyWriteRowRequest", + (_message.Message,), + dict( + DESCRIPTOR=_READMODIFYWRITEROWREQUEST, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Request message for Bigtable.ReadModifyWriteRow. Attributes: @@ -1093,15 +1618,18 @@ transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later ones. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) + ), +) _sym_db.RegisterMessage(ReadModifyWriteRowRequest) -ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowResponse', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITEROWRESPONSE, - __module__ = 'google.cloud.bigtable_v2.proto.bigtable_pb2' - , - __doc__ = """Response message for Bigtable.ReadModifyWriteRow. +ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType( + "ReadModifyWriteRowResponse", + (_message.Message,), + dict( + DESCRIPTOR=_READMODIFYWRITEROWRESPONSE, + __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", + __doc__="""Response message for Bigtable.ReadModifyWriteRow. Attributes: @@ -1109,80 +1637,117 @@ A Row containing the new contents of all cells modified by the request. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) + ), +) _sym_db.RegisterMessage(ReadModifyWriteRowResponse) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), + _b( + "\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" + ), +) _BIGTABLE = _descriptor.ServiceDescriptor( - name='Bigtable', - full_name='google.bigtable.v2.Bigtable', - file=DESCRIPTOR, - index=0, - options=None, - serialized_start=1912, - serialized_end=2981, - methods=[ - _descriptor.MethodDescriptor( - name='ReadRows', - full_name='google.bigtable.v2.Bigtable.ReadRows', + name="Bigtable", + full_name="google.bigtable.v2.Bigtable", + file=DESCRIPTOR, index=0, - containing_service=None, - input_type=_READROWSREQUEST, - output_type=_READROWSRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*')), - ), - _descriptor.MethodDescriptor( - name='SampleRowKeys', - full_name='google.bigtable.v2.Bigtable.SampleRowKeys', - index=1, - containing_service=None, - input_type=_SAMPLEROWKEYSREQUEST, - output_type=_SAMPLEROWKEYSRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys')), - ), - _descriptor.MethodDescriptor( - name='MutateRow', - full_name='google.bigtable.v2.Bigtable.MutateRow', - index=2, - containing_service=None, - input_type=_MUTATEROWREQUEST, - output_type=_MUTATEROWRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*')), - ), - _descriptor.MethodDescriptor( - name='MutateRows', - full_name='google.bigtable.v2.Bigtable.MutateRows', - index=3, - containing_service=None, - input_type=_MUTATEROWSREQUEST, - output_type=_MUTATEROWSRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*')), - ), - _descriptor.MethodDescriptor( - name='CheckAndMutateRow', - full_name='google.bigtable.v2.Bigtable.CheckAndMutateRow', - index=4, - containing_service=None, - input_type=_CHECKANDMUTATEROWREQUEST, - output_type=_CHECKANDMUTATEROWRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*')), - ), - _descriptor.MethodDescriptor( - name='ReadModifyWriteRow', - full_name='google.bigtable.v2.Bigtable.ReadModifyWriteRow', - index=5, - containing_service=None, - input_type=_READMODIFYWRITEROWREQUEST, - output_type=_READMODIFYWRITEROWRESPONSE, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*')), - ), -]) + options=None, + serialized_start=1912, + serialized_end=2981, + methods=[ + _descriptor.MethodDescriptor( + name="ReadRows", + full_name="google.bigtable.v2.Bigtable.ReadRows", + index=0, + containing_service=None, + input_type=_READROWSREQUEST, + output_type=_READROWSRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="SampleRowKeys", + full_name="google.bigtable.v2.Bigtable.SampleRowKeys", + index=1, + containing_service=None, + input_type=_SAMPLEROWKEYSREQUEST, + output_type=_SAMPLEROWKEYSRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + "\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" + ), + ), + ), + _descriptor.MethodDescriptor( + name="MutateRow", + full_name="google.bigtable.v2.Bigtable.MutateRow", + index=2, + containing_service=None, + input_type=_MUTATEROWREQUEST, + output_type=_MUTATEROWRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="MutateRows", + full_name="google.bigtable.v2.Bigtable.MutateRows", + index=3, + containing_service=None, + input_type=_MUTATEROWSREQUEST, + output_type=_MUTATEROWSRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="CheckAndMutateRow", + full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow", + index=4, + containing_service=None, + input_type=_CHECKANDMUTATEROWREQUEST, + output_type=_CHECKANDMUTATEROWRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*' + ), + ), + ), + _descriptor.MethodDescriptor( + name="ReadModifyWriteRow", + full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow", + index=5, + containing_service=None, + input_type=_READMODIFYWRITEROWREQUEST, + output_type=_READMODIFYWRITEROWRESPONSE, + options=_descriptor._ParseOptions( + descriptor_pb2.MethodOptions(), + _b( + '\202\323\344\223\002H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*' + ), + ), + ), + ], +) _sym_db.RegisterServiceDescriptor(_BIGTABLE) -DESCRIPTOR.services_by_name['Bigtable'] = _BIGTABLE +DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py index 950b89f98023..4dd6cded9bc4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py @@ -1,145 +1,148 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc -from google.cloud.bigtable_v2.proto import bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2 +from google.cloud.bigtable_v2.proto import ( + bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2, +) class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables. + """Service for reading from and writing to existing Bigtable tables. """ - def __init__(self, channel): - """Constructor. + def __init__(self, channel): + """Constructor. Args: channel: A grpc.Channel. """ - self.ReadRows = channel.unary_stream( - '/google.bigtable.v2.Bigtable/ReadRows', - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, + self.ReadRows = channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadRows", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, ) - self.SampleRowKeys = channel.unary_stream( - '/google.bigtable.v2.Bigtable/SampleRowKeys', - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + self.SampleRowKeys = channel.unary_stream( + "/google.bigtable.v2.Bigtable/SampleRowKeys", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, ) - self.MutateRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/MutateRow', - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, + self.MutateRow = channel.unary_unary( + "/google.bigtable.v2.Bigtable/MutateRow", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, ) - self.MutateRows = channel.unary_stream( - '/google.bigtable.v2.Bigtable/MutateRows', - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, + self.MutateRows = channel.unary_stream( + "/google.bigtable.v2.Bigtable/MutateRows", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, ) - self.CheckAndMutateRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/CheckAndMutateRow', - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + self.CheckAndMutateRow = channel.unary_unary( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, ) - self.ReadModifyWriteRow = channel.unary_unary( - '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + self.ReadModifyWriteRow = channel.unary_unary( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, ) class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables. + """Service for reading from and writing to existing Bigtable tables. """ - def ReadRows(self, request, context): - """Streams back the contents of all requested rows in key order, optionally + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, rows and cells may be broken up across multiple responses, but atomicity of each row will still be preserved. See the ReadRowsResponse documentation for details. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by `mutation`. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed atomically. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically on the server. The method reads the latest + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the specified columns and writes a new entry based on pre-defined read/modify/write rules. The new value for the timestamp is the greater of the existing timestamp or the current server time. The method returns the new contents of all modified cells. """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") def add_BigtableServicer_to_server(servicer, server): - rpc_method_handlers = { - 'ReadRows': grpc.unary_stream_rpc_method_handler( - servicer.ReadRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, - ), - 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( - servicer.SampleRowKeys, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, - ), - 'MutateRow': grpc.unary_unary_rpc_method_handler( - servicer.MutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, - ), - 'MutateRows': grpc.unary_stream_rpc_method_handler( - servicer.MutateRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, - ), - 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( - servicer.CheckAndMutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, - ), - 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( - servicer.ReadModifyWriteRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.bigtable.v2.Bigtable', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) + rpc_method_handlers = { + "ReadRows": grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, + ), + "SampleRowKeys": grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, + ), + "MutateRow": grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, + ), + "MutateRows": grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, + ), + "CheckAndMutateRow": grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, + ), + "ReadModifyWriteRow": grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.bigtable.v2.Bigtable", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index 70a305b87aa3..af8d88968798 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -2,1123 +2,1943 @@ # source: google/cloud/bigtable_v2/proto/data.proto import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 + # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() - - DESCRIPTOR = _descriptor.FileDescriptor( - name='google/cloud/bigtable_v2/proto/data.proto', - package='google.bigtable.v2', - syntax='proto3', - serialized_pb=_b('\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3') + name="google/cloud/bigtable_v2/proto/data.proto", + package="google.bigtable.v2", + syntax="proto3", + serialized_pb=_b( + '\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3' + ), ) - - _ROW = _descriptor.Descriptor( - name='Row', - full_name='google.bigtable.v2.Row', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.v2.Row.key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='families', full_name='google.bigtable.v2.Row.families', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=65, - serialized_end=129, + name="Row", + full_name="google.bigtable.v2.Row", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="key", + full_name="google.bigtable.v2.Row.key", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="families", + full_name="google.bigtable.v2.Row.families", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=65, + serialized_end=129, ) _FAMILY = _descriptor.Descriptor( - name='Family', - full_name='google.bigtable.v2.Family', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.v2.Family.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='columns', full_name='google.bigtable.v2.Family.columns', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=131, - serialized_end=198, + name="Family", + full_name="google.bigtable.v2.Family", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.v2.Family.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="columns", + full_name="google.bigtable.v2.Family.columns", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=131, + serialized_end=198, ) _COLUMN = _descriptor.Descriptor( - name='Column', - full_name='google.bigtable.v2.Column', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='qualifier', full_name='google.bigtable.v2.Column.qualifier', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cells', full_name='google.bigtable.v2.Column.cells', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=200, - serialized_end=268, + name="Column", + full_name="google.bigtable.v2.Column", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="qualifier", + full_name="google.bigtable.v2.Column.qualifier", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cells", + full_name="google.bigtable.v2.Column.cells", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=200, + serialized_end=268, ) _CELL = _descriptor.Descriptor( - name='Cell', - full_name='google.bigtable.v2.Cell', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v2.Cell.timestamp_micros', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v2.Cell.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='labels', full_name='google.bigtable.v2.Cell.labels', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=270, - serialized_end=333, + name="Cell", + full_name="google.bigtable.v2.Cell", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="timestamp_micros", + full_name="google.bigtable.v2.Cell.timestamp_micros", + index=0, + number=1, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.v2.Cell.value", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="labels", + full_name="google.bigtable.v2.Cell.labels", + index=2, + number=3, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=270, + serialized_end=333, ) _ROWRANGE = _descriptor.Descriptor( - name='RowRange', - full_name='google.bigtable.v2.RowRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_key_closed', full_name='google.bigtable.v2.RowRange.start_key_closed', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='start_key_open', full_name='google.bigtable.v2.RowRange.start_key_open', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_key_open', full_name='google.bigtable.v2.RowRange.end_key_open', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_key_closed', full_name='google.bigtable.v2.RowRange.end_key_closed', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='start_key', full_name='google.bigtable.v2.RowRange.start_key', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='end_key', full_name='google.bigtable.v2.RowRange.end_key', - index=1, containing_type=None, fields=[]), - ], - serialized_start=336, - serialized_end=474, + name="RowRange", + full_name="google.bigtable.v2.RowRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="start_key_closed", + full_name="google.bigtable.v2.RowRange.start_key_closed", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="start_key_open", + full_name="google.bigtable.v2.RowRange.start_key_open", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_key_open", + full_name="google.bigtable.v2.RowRange.end_key_open", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_key_closed", + full_name="google.bigtable.v2.RowRange.end_key_closed", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="start_key", + full_name="google.bigtable.v2.RowRange.start_key", + index=0, + containing_type=None, + fields=[], + ), + _descriptor.OneofDescriptor( + name="end_key", + full_name="google.bigtable.v2.RowRange.end_key", + index=1, + containing_type=None, + fields=[], + ), + ], + serialized_start=336, + serialized_end=474, ) _ROWSET = _descriptor.Descriptor( - name='RowSet', - full_name='google.bigtable.v2.RowSet', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_keys', full_name='google.bigtable.v2.RowSet.row_keys', index=0, - number=1, type=12, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='row_ranges', full_name='google.bigtable.v2.RowSet.row_ranges', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=476, - serialized_end=552, + name="RowSet", + full_name="google.bigtable.v2.RowSet", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="row_keys", + full_name="google.bigtable.v2.RowSet.row_keys", + index=0, + number=1, + type=12, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="row_ranges", + full_name="google.bigtable.v2.RowSet.row_ranges", + index=1, + number=2, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=476, + serialized_end=552, ) _COLUMNRANGE = _descriptor.Descriptor( - name='ColumnRange', - full_name='google.bigtable.v2.ColumnRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v2.ColumnRange.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='start_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.start_qualifier_closed', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='start_qualifier_open', full_name='google.bigtable.v2.ColumnRange.start_qualifier_open', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_qualifier_closed', full_name='google.bigtable.v2.ColumnRange.end_qualifier_closed', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_qualifier_open', full_name='google.bigtable.v2.ColumnRange.end_qualifier_open', index=4, - number=5, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='start_qualifier', full_name='google.bigtable.v2.ColumnRange.start_qualifier', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='end_qualifier', full_name='google.bigtable.v2.ColumnRange.end_qualifier', - index=1, containing_type=None, fields=[]), - ], - serialized_start=555, - serialized_end=753, + name="ColumnRange", + full_name="google.bigtable.v2.ColumnRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.ColumnRange.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="start_qualifier_closed", + full_name="google.bigtable.v2.ColumnRange.start_qualifier_closed", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="start_qualifier_open", + full_name="google.bigtable.v2.ColumnRange.start_qualifier_open", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_qualifier_closed", + full_name="google.bigtable.v2.ColumnRange.end_qualifier_closed", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_qualifier_open", + full_name="google.bigtable.v2.ColumnRange.end_qualifier_open", + index=4, + number=5, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="start_qualifier", + full_name="google.bigtable.v2.ColumnRange.start_qualifier", + index=0, + containing_type=None, + fields=[], + ), + _descriptor.OneofDescriptor( + name="end_qualifier", + full_name="google.bigtable.v2.ColumnRange.end_qualifier", + index=1, + containing_type=None, + fields=[], + ), + ], + serialized_start=555, + serialized_end=753, ) _TIMESTAMPRANGE = _descriptor.Descriptor( - name='TimestampRange', - full_name='google.bigtable.v2.TimestampRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.start_timestamp_micros', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_timestamp_micros', full_name='google.bigtable.v2.TimestampRange.end_timestamp_micros', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=755, - serialized_end=833, + name="TimestampRange", + full_name="google.bigtable.v2.TimestampRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="start_timestamp_micros", + full_name="google.bigtable.v2.TimestampRange.start_timestamp_micros", + index=0, + number=1, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_timestamp_micros", + full_name="google.bigtable.v2.TimestampRange.end_timestamp_micros", + index=1, + number=2, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=755, + serialized_end=833, ) _VALUERANGE = _descriptor.Descriptor( - name='ValueRange', - full_name='google.bigtable.v2.ValueRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_value_closed', full_name='google.bigtable.v2.ValueRange.start_value_closed', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='start_value_open', full_name='google.bigtable.v2.ValueRange.start_value_open', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_value_closed', full_name='google.bigtable.v2.ValueRange.end_value_closed', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='end_value_open', full_name='google.bigtable.v2.ValueRange.end_value_open', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='start_value', full_name='google.bigtable.v2.ValueRange.start_value', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='end_value', full_name='google.bigtable.v2.ValueRange.end_value', - index=1, containing_type=None, fields=[]), - ], - serialized_start=836, - serialized_end=988, + name="ValueRange", + full_name="google.bigtable.v2.ValueRange", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="start_value_closed", + full_name="google.bigtable.v2.ValueRange.start_value_closed", + index=0, + number=1, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="start_value_open", + full_name="google.bigtable.v2.ValueRange.start_value_open", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_value_closed", + full_name="google.bigtable.v2.ValueRange.end_value_closed", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_value_open", + full_name="google.bigtable.v2.ValueRange.end_value_open", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="start_value", + full_name="google.bigtable.v2.ValueRange.start_value", + index=0, + containing_type=None, + fields=[], + ), + _descriptor.OneofDescriptor( + name="end_value", + full_name="google.bigtable.v2.ValueRange.end_value", + index=1, + containing_type=None, + fields=[], + ), + ], + serialized_start=836, + serialized_end=988, ) _ROWFILTER_CHAIN = _descriptor.Descriptor( - name='Chain', - full_name='google.bigtable.v2.RowFilter.Chain', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filters', full_name='google.bigtable.v2.RowFilter.Chain.filters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1807, - serialized_end=1862, + name="Chain", + full_name="google.bigtable.v2.RowFilter.Chain", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="filters", + full_name="google.bigtable.v2.RowFilter.Chain.filters", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1807, + serialized_end=1862, ) _ROWFILTER_INTERLEAVE = _descriptor.Descriptor( - name='Interleave', - full_name='google.bigtable.v2.RowFilter.Interleave', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filters', full_name='google.bigtable.v2.RowFilter.Interleave.filters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1864, - serialized_end=1924, + name="Interleave", + full_name="google.bigtable.v2.RowFilter.Interleave", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="filters", + full_name="google.bigtable.v2.RowFilter.Interleave.filters", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1864, + serialized_end=1924, ) _ROWFILTER_CONDITION = _descriptor.Descriptor( - name='Condition', - full_name='google.bigtable.v2.RowFilter.Condition', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='predicate_filter', full_name='google.bigtable.v2.RowFilter.Condition.predicate_filter', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='true_filter', full_name='google.bigtable.v2.RowFilter.Condition.true_filter', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='false_filter', full_name='google.bigtable.v2.RowFilter.Condition.false_filter', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1927, - serialized_end=2100, + name="Condition", + full_name="google.bigtable.v2.RowFilter.Condition", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="predicate_filter", + full_name="google.bigtable.v2.RowFilter.Condition.predicate_filter", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="true_filter", + full_name="google.bigtable.v2.RowFilter.Condition.true_filter", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="false_filter", + full_name="google.bigtable.v2.RowFilter.Condition.false_filter", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1927, + serialized_end=2100, ) _ROWFILTER = _descriptor.Descriptor( - name='RowFilter', - full_name='google.bigtable.v2.RowFilter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chain', full_name='google.bigtable.v2.RowFilter.chain', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='interleave', full_name='google.bigtable.v2.RowFilter.interleave', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='condition', full_name='google.bigtable.v2.RowFilter.condition', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='sink', full_name='google.bigtable.v2.RowFilter.sink', index=3, - number=16, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pass_all_filter', full_name='google.bigtable.v2.RowFilter.pass_all_filter', index=4, - number=17, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='block_all_filter', full_name='google.bigtable.v2.RowFilter.block_all_filter', index=5, - number=18, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='row_key_regex_filter', full_name='google.bigtable.v2.RowFilter.row_key_regex_filter', index=6, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='row_sample_filter', full_name='google.bigtable.v2.RowFilter.row_sample_filter', index=7, - number=14, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='family_name_regex_filter', full_name='google.bigtable.v2.RowFilter.family_name_regex_filter', index=8, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='column_qualifier_regex_filter', full_name='google.bigtable.v2.RowFilter.column_qualifier_regex_filter', index=9, - number=6, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='column_range_filter', full_name='google.bigtable.v2.RowFilter.column_range_filter', index=10, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='timestamp_range_filter', full_name='google.bigtable.v2.RowFilter.timestamp_range_filter', index=11, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value_regex_filter', full_name='google.bigtable.v2.RowFilter.value_regex_filter', index=12, - number=9, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value_range_filter', full_name='google.bigtable.v2.RowFilter.value_range_filter', index=13, - number=15, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cells_per_row_offset_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_offset_filter', index=14, - number=10, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cells_per_row_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_row_limit_filter', index=15, - number=11, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='cells_per_column_limit_filter', full_name='google.bigtable.v2.RowFilter.cells_per_column_limit_filter', index=16, - number=12, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='strip_value_transformer', full_name='google.bigtable.v2.RowFilter.strip_value_transformer', index=17, - number=13, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='apply_label_transformer', full_name='google.bigtable.v2.RowFilter.apply_label_transformer', index=18, - number=19, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='filter', full_name='google.bigtable.v2.RowFilter.filter', - index=0, containing_type=None, fields=[]), - ], - serialized_start=991, - serialized_end=2110, + name="RowFilter", + full_name="google.bigtable.v2.RowFilter", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="chain", + full_name="google.bigtable.v2.RowFilter.chain", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="interleave", + full_name="google.bigtable.v2.RowFilter.interleave", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="condition", + full_name="google.bigtable.v2.RowFilter.condition", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="sink", + full_name="google.bigtable.v2.RowFilter.sink", + index=3, + number=16, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="pass_all_filter", + full_name="google.bigtable.v2.RowFilter.pass_all_filter", + index=4, + number=17, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="block_all_filter", + full_name="google.bigtable.v2.RowFilter.block_all_filter", + index=5, + number=18, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="row_key_regex_filter", + full_name="google.bigtable.v2.RowFilter.row_key_regex_filter", + index=6, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="row_sample_filter", + full_name="google.bigtable.v2.RowFilter.row_sample_filter", + index=7, + number=14, + type=1, + cpp_type=5, + label=1, + has_default_value=False, + default_value=float(0), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="family_name_regex_filter", + full_name="google.bigtable.v2.RowFilter.family_name_regex_filter", + index=8, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="column_qualifier_regex_filter", + full_name="google.bigtable.v2.RowFilter.column_qualifier_regex_filter", + index=9, + number=6, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="column_range_filter", + full_name="google.bigtable.v2.RowFilter.column_range_filter", + index=10, + number=7, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="timestamp_range_filter", + full_name="google.bigtable.v2.RowFilter.timestamp_range_filter", + index=11, + number=8, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value_regex_filter", + full_name="google.bigtable.v2.RowFilter.value_regex_filter", + index=12, + number=9, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value_range_filter", + full_name="google.bigtable.v2.RowFilter.value_range_filter", + index=13, + number=15, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cells_per_row_offset_filter", + full_name="google.bigtable.v2.RowFilter.cells_per_row_offset_filter", + index=14, + number=10, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cells_per_row_limit_filter", + full_name="google.bigtable.v2.RowFilter.cells_per_row_limit_filter", + index=15, + number=11, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cells_per_column_limit_filter", + full_name="google.bigtable.v2.RowFilter.cells_per_column_limit_filter", + index=16, + number=12, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="strip_value_transformer", + full_name="google.bigtable.v2.RowFilter.strip_value_transformer", + index=17, + number=13, + type=8, + cpp_type=7, + label=1, + has_default_value=False, + default_value=False, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="apply_label_transformer", + full_name="google.bigtable.v2.RowFilter.apply_label_transformer", + index=18, + number=19, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="filter", + full_name="google.bigtable.v2.RowFilter.filter", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=991, + serialized_end=2110, ) _MUTATION_SETCELL = _descriptor.Descriptor( - name='SetCell', - full_name='google.bigtable.v2.Mutation.SetCell', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v2.Mutation.SetCell.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v2.Mutation.SetCell.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v2.Mutation.SetCell.timestamp_micros', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v2.Mutation.SetCell.value', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2408, - serialized_end=2505, + name="SetCell", + full_name="google.bigtable.v2.Mutation.SetCell", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.Mutation.SetCell.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="column_qualifier", + full_name="google.bigtable.v2.Mutation.SetCell.column_qualifier", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="timestamp_micros", + full_name="google.bigtable.v2.Mutation.SetCell.timestamp_micros", + index=2, + number=3, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="value", + full_name="google.bigtable.v2.Mutation.SetCell.value", + index=3, + number=4, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2408, + serialized_end=2505, ) _MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( - name='DeleteFromColumn', - full_name='google.bigtable.v2.Mutation.DeleteFromColumn', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='time_range', full_name='google.bigtable.v2.Mutation.DeleteFromColumn.time_range', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2507, - serialized_end=2628, + name="DeleteFromColumn", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="column_qualifier", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="time_range", + full_name="google.bigtable.v2.Mutation.DeleteFromColumn.time_range", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2507, + serialized_end=2628, ) _MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( - name='DeleteFromFamily', - full_name='google.bigtable.v2.Mutation.DeleteFromFamily', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v2.Mutation.DeleteFromFamily.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2630, - serialized_end=2669, + name="DeleteFromFamily", + full_name="google.bigtable.v2.Mutation.DeleteFromFamily", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.Mutation.DeleteFromFamily.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2630, + serialized_end=2669, ) _MUTATION_DELETEFROMROW = _descriptor.Descriptor( - name='DeleteFromRow', - full_name='google.bigtable.v2.Mutation.DeleteFromRow', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2671, - serialized_end=2686, + name="DeleteFromRow", + full_name="google.bigtable.v2.Mutation.DeleteFromRow", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2671, + serialized_end=2686, ) _MUTATION = _descriptor.Descriptor( - name='Mutation', - full_name='google.bigtable.v2.Mutation', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='set_cell', full_name='google.bigtable.v2.Mutation.set_cell', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='delete_from_column', full_name='google.bigtable.v2.Mutation.delete_from_column', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='delete_from_family', full_name='google.bigtable.v2.Mutation.delete_from_family', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='delete_from_row', full_name='google.bigtable.v2.Mutation.delete_from_row', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_MUTATION_SETCELL, _MUTATION_DELETEFROMCOLUMN, _MUTATION_DELETEFROMFAMILY, _MUTATION_DELETEFROMROW, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='mutation', full_name='google.bigtable.v2.Mutation.mutation', - index=0, containing_type=None, fields=[]), - ], - serialized_start=2113, - serialized_end=2698, + name="Mutation", + full_name="google.bigtable.v2.Mutation", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="set_cell", + full_name="google.bigtable.v2.Mutation.set_cell", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="delete_from_column", + full_name="google.bigtable.v2.Mutation.delete_from_column", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="delete_from_family", + full_name="google.bigtable.v2.Mutation.delete_from_family", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="delete_from_row", + full_name="google.bigtable.v2.Mutation.delete_from_row", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[ + _MUTATION_SETCELL, + _MUTATION_DELETEFROMCOLUMN, + _MUTATION_DELETEFROMFAMILY, + _MUTATION_DELETEFROMROW, + ], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="mutation", + full_name="google.bigtable.v2.Mutation.mutation", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=2113, + serialized_end=2698, ) _READMODIFYWRITERULE = _descriptor.Descriptor( - name='ReadModifyWriteRule', - full_name='google.bigtable.v2.ReadModifyWriteRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v2.ReadModifyWriteRule.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v2.ReadModifyWriteRule.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='append_value', full_name='google.bigtable.v2.ReadModifyWriteRule.append_value', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='increment_amount', full_name='google.bigtable.v2.ReadModifyWriteRule.increment_amount', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='rule', full_name='google.bigtable.v2.ReadModifyWriteRule.rule', - index=0, containing_type=None, fields=[]), - ], - serialized_start=2701, - serialized_end=2829, + name="ReadModifyWriteRule", + full_name="google.bigtable.v2.ReadModifyWriteRule", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="family_name", + full_name="google.bigtable.v2.ReadModifyWriteRule.family_name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="column_qualifier", + full_name="google.bigtable.v2.ReadModifyWriteRule.column_qualifier", + index=1, + number=2, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="append_value", + full_name="google.bigtable.v2.ReadModifyWriteRule.append_value", + index=2, + number=3, + type=12, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b(""), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="increment_amount", + full_name="google.bigtable.v2.ReadModifyWriteRule.increment_amount", + index=3, + number=4, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="rule", + full_name="google.bigtable.v2.ReadModifyWriteRule.rule", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=2701, + serialized_end=2829, ) -_ROW.fields_by_name['families'].message_type = _FAMILY -_FAMILY.fields_by_name['columns'].message_type = _COLUMN -_COLUMN.fields_by_name['cells'].message_type = _CELL -_ROWRANGE.oneofs_by_name['start_key'].fields.append( - _ROWRANGE.fields_by_name['start_key_closed']) -_ROWRANGE.fields_by_name['start_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] -_ROWRANGE.oneofs_by_name['start_key'].fields.append( - _ROWRANGE.fields_by_name['start_key_open']) -_ROWRANGE.fields_by_name['start_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['start_key'] -_ROWRANGE.oneofs_by_name['end_key'].fields.append( - _ROWRANGE.fields_by_name['end_key_open']) -_ROWRANGE.fields_by_name['end_key_open'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] -_ROWRANGE.oneofs_by_name['end_key'].fields.append( - _ROWRANGE.fields_by_name['end_key_closed']) -_ROWRANGE.fields_by_name['end_key_closed'].containing_oneof = _ROWRANGE.oneofs_by_name['end_key'] -_ROWSET.fields_by_name['row_ranges'].message_type = _ROWRANGE -_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['start_qualifier_closed']) -_COLUMNRANGE.fields_by_name['start_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] -_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['start_qualifier_open']) -_COLUMNRANGE.fields_by_name['start_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] -_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['end_qualifier_closed']) -_COLUMNRANGE.fields_by_name['end_qualifier_closed'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] -_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['end_qualifier_open']) -_COLUMNRANGE.fields_by_name['end_qualifier_open'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] -_VALUERANGE.oneofs_by_name['start_value'].fields.append( - _VALUERANGE.fields_by_name['start_value_closed']) -_VALUERANGE.fields_by_name['start_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] -_VALUERANGE.oneofs_by_name['start_value'].fields.append( - _VALUERANGE.fields_by_name['start_value_open']) -_VALUERANGE.fields_by_name['start_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] -_VALUERANGE.oneofs_by_name['end_value'].fields.append( - _VALUERANGE.fields_by_name['end_value_closed']) -_VALUERANGE.fields_by_name['end_value_closed'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] -_VALUERANGE.oneofs_by_name['end_value'].fields.append( - _VALUERANGE.fields_by_name['end_value_open']) -_VALUERANGE.fields_by_name['end_value_open'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] -_ROWFILTER_CHAIN.fields_by_name['filters'].message_type = _ROWFILTER +_ROW.fields_by_name["families"].message_type = _FAMILY +_FAMILY.fields_by_name["columns"].message_type = _COLUMN +_COLUMN.fields_by_name["cells"].message_type = _CELL +_ROWRANGE.oneofs_by_name["start_key"].fields.append( + _ROWRANGE.fields_by_name["start_key_closed"] +) +_ROWRANGE.fields_by_name[ + "start_key_closed" +].containing_oneof = _ROWRANGE.oneofs_by_name["start_key"] +_ROWRANGE.oneofs_by_name["start_key"].fields.append( + _ROWRANGE.fields_by_name["start_key_open"] +) +_ROWRANGE.fields_by_name["start_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ + "start_key" +] +_ROWRANGE.oneofs_by_name["end_key"].fields.append( + _ROWRANGE.fields_by_name["end_key_open"] +) +_ROWRANGE.fields_by_name["end_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ + "end_key" +] +_ROWRANGE.oneofs_by_name["end_key"].fields.append( + _ROWRANGE.fields_by_name["end_key_closed"] +) +_ROWRANGE.fields_by_name["end_key_closed"].containing_oneof = _ROWRANGE.oneofs_by_name[ + "end_key" +] +_ROWSET.fields_by_name["row_ranges"].message_type = _ROWRANGE +_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["start_qualifier_closed"] +) +_COLUMNRANGE.fields_by_name[ + "start_qualifier_closed" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] +_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["start_qualifier_open"] +) +_COLUMNRANGE.fields_by_name[ + "start_qualifier_open" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] +_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["end_qualifier_closed"] +) +_COLUMNRANGE.fields_by_name[ + "end_qualifier_closed" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] +_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( + _COLUMNRANGE.fields_by_name["end_qualifier_open"] +) +_COLUMNRANGE.fields_by_name[ + "end_qualifier_open" +].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] +_VALUERANGE.oneofs_by_name["start_value"].fields.append( + _VALUERANGE.fields_by_name["start_value_closed"] +) +_VALUERANGE.fields_by_name[ + "start_value_closed" +].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] +_VALUERANGE.oneofs_by_name["start_value"].fields.append( + _VALUERANGE.fields_by_name["start_value_open"] +) +_VALUERANGE.fields_by_name[ + "start_value_open" +].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] +_VALUERANGE.oneofs_by_name["end_value"].fields.append( + _VALUERANGE.fields_by_name["end_value_closed"] +) +_VALUERANGE.fields_by_name[ + "end_value_closed" +].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] +_VALUERANGE.oneofs_by_name["end_value"].fields.append( + _VALUERANGE.fields_by_name["end_value_open"] +) +_VALUERANGE.fields_by_name[ + "end_value_open" +].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] +_ROWFILTER_CHAIN.fields_by_name["filters"].message_type = _ROWFILTER _ROWFILTER_CHAIN.containing_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.fields_by_name['filters'].message_type = _ROWFILTER +_ROWFILTER_INTERLEAVE.fields_by_name["filters"].message_type = _ROWFILTER _ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['predicate_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['true_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['false_filter'].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name["predicate_filter"].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name["true_filter"].message_type = _ROWFILTER +_ROWFILTER_CONDITION.fields_by_name["false_filter"].message_type = _ROWFILTER _ROWFILTER_CONDITION.containing_type = _ROWFILTER -_ROWFILTER.fields_by_name['chain'].message_type = _ROWFILTER_CHAIN -_ROWFILTER.fields_by_name['interleave'].message_type = _ROWFILTER_INTERLEAVE -_ROWFILTER.fields_by_name['condition'].message_type = _ROWFILTER_CONDITION -_ROWFILTER.fields_by_name['column_range_filter'].message_type = _COLUMNRANGE -_ROWFILTER.fields_by_name['timestamp_range_filter'].message_type = _TIMESTAMPRANGE -_ROWFILTER.fields_by_name['value_range_filter'].message_type = _VALUERANGE -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['chain']) -_ROWFILTER.fields_by_name['chain'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['interleave']) -_ROWFILTER.fields_by_name['interleave'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['condition']) -_ROWFILTER.fields_by_name['condition'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['sink']) -_ROWFILTER.fields_by_name['sink'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['pass_all_filter']) -_ROWFILTER.fields_by_name['pass_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['block_all_filter']) -_ROWFILTER.fields_by_name['block_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['row_key_regex_filter']) -_ROWFILTER.fields_by_name['row_key_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['row_sample_filter']) -_ROWFILTER.fields_by_name['row_sample_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['family_name_regex_filter']) -_ROWFILTER.fields_by_name['family_name_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['column_qualifier_regex_filter']) -_ROWFILTER.fields_by_name['column_qualifier_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['column_range_filter']) -_ROWFILTER.fields_by_name['column_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['timestamp_range_filter']) -_ROWFILTER.fields_by_name['timestamp_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['value_regex_filter']) -_ROWFILTER.fields_by_name['value_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['value_range_filter']) -_ROWFILTER.fields_by_name['value_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_row_offset_filter']) -_ROWFILTER.fields_by_name['cells_per_row_offset_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_row_limit_filter']) -_ROWFILTER.fields_by_name['cells_per_row_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_column_limit_filter']) -_ROWFILTER.fields_by_name['cells_per_column_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['strip_value_transformer']) -_ROWFILTER.fields_by_name['strip_value_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['apply_label_transformer']) -_ROWFILTER.fields_by_name['apply_label_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] +_ROWFILTER.fields_by_name["chain"].message_type = _ROWFILTER_CHAIN +_ROWFILTER.fields_by_name["interleave"].message_type = _ROWFILTER_INTERLEAVE +_ROWFILTER.fields_by_name["condition"].message_type = _ROWFILTER_CONDITION +_ROWFILTER.fields_by_name["column_range_filter"].message_type = _COLUMNRANGE +_ROWFILTER.fields_by_name["timestamp_range_filter"].message_type = _TIMESTAMPRANGE +_ROWFILTER.fields_by_name["value_range_filter"].message_type = _VALUERANGE +_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["chain"]) +_ROWFILTER.fields_by_name["chain"].containing_oneof = _ROWFILTER.oneofs_by_name[ + "filter" +] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["interleave"] +) +_ROWFILTER.fields_by_name["interleave"].containing_oneof = _ROWFILTER.oneofs_by_name[ + "filter" +] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["condition"] +) +_ROWFILTER.fields_by_name["condition"].containing_oneof = _ROWFILTER.oneofs_by_name[ + "filter" +] +_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["sink"]) +_ROWFILTER.fields_by_name["sink"].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["pass_all_filter"] +) +_ROWFILTER.fields_by_name[ + "pass_all_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["block_all_filter"] +) +_ROWFILTER.fields_by_name[ + "block_all_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["row_key_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "row_key_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["row_sample_filter"] +) +_ROWFILTER.fields_by_name[ + "row_sample_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["family_name_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "family_name_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["column_qualifier_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "column_qualifier_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["column_range_filter"] +) +_ROWFILTER.fields_by_name[ + "column_range_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["timestamp_range_filter"] +) +_ROWFILTER.fields_by_name[ + "timestamp_range_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["value_regex_filter"] +) +_ROWFILTER.fields_by_name[ + "value_regex_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["value_range_filter"] +) +_ROWFILTER.fields_by_name[ + "value_range_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["cells_per_row_offset_filter"] +) +_ROWFILTER.fields_by_name[ + "cells_per_row_offset_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["cells_per_row_limit_filter"] +) +_ROWFILTER.fields_by_name[ + "cells_per_row_limit_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["cells_per_column_limit_filter"] +) +_ROWFILTER.fields_by_name[ + "cells_per_column_limit_filter" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["strip_value_transformer"] +) +_ROWFILTER.fields_by_name[ + "strip_value_transformer" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] +_ROWFILTER.oneofs_by_name["filter"].fields.append( + _ROWFILTER.fields_by_name["apply_label_transformer"] +) +_ROWFILTER.fields_by_name[ + "apply_label_transformer" +].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] _MUTATION_SETCELL.containing_type = _MUTATION -_MUTATION_DELETEFROMCOLUMN.fields_by_name['time_range'].message_type = _TIMESTAMPRANGE +_MUTATION_DELETEFROMCOLUMN.fields_by_name["time_range"].message_type = _TIMESTAMPRANGE _MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION _MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION _MUTATION_DELETEFROMROW.containing_type = _MUTATION -_MUTATION.fields_by_name['set_cell'].message_type = _MUTATION_SETCELL -_MUTATION.fields_by_name['delete_from_column'].message_type = _MUTATION_DELETEFROMCOLUMN -_MUTATION.fields_by_name['delete_from_family'].message_type = _MUTATION_DELETEFROMFAMILY -_MUTATION.fields_by_name['delete_from_row'].message_type = _MUTATION_DELETEFROMROW -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['set_cell']) -_MUTATION.fields_by_name['set_cell'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_column']) -_MUTATION.fields_by_name['delete_from_column'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_family']) -_MUTATION.fields_by_name['delete_from_family'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_row']) -_MUTATION.fields_by_name['delete_from_row'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( - _READMODIFYWRITERULE.fields_by_name['append_value']) -_READMODIFYWRITERULE.fields_by_name['append_value'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] -_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( - _READMODIFYWRITERULE.fields_by_name['increment_amount']) -_READMODIFYWRITERULE.fields_by_name['increment_amount'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] -DESCRIPTOR.message_types_by_name['Row'] = _ROW -DESCRIPTOR.message_types_by_name['Family'] = _FAMILY -DESCRIPTOR.message_types_by_name['Column'] = _COLUMN -DESCRIPTOR.message_types_by_name['Cell'] = _CELL -DESCRIPTOR.message_types_by_name['RowRange'] = _ROWRANGE -DESCRIPTOR.message_types_by_name['RowSet'] = _ROWSET -DESCRIPTOR.message_types_by_name['ColumnRange'] = _COLUMNRANGE -DESCRIPTOR.message_types_by_name['TimestampRange'] = _TIMESTAMPRANGE -DESCRIPTOR.message_types_by_name['ValueRange'] = _VALUERANGE -DESCRIPTOR.message_types_by_name['RowFilter'] = _ROWFILTER -DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION -DESCRIPTOR.message_types_by_name['ReadModifyWriteRule'] = _READMODIFYWRITERULE +_MUTATION.fields_by_name["set_cell"].message_type = _MUTATION_SETCELL +_MUTATION.fields_by_name["delete_from_column"].message_type = _MUTATION_DELETEFROMCOLUMN +_MUTATION.fields_by_name["delete_from_family"].message_type = _MUTATION_DELETEFROMFAMILY +_MUTATION.fields_by_name["delete_from_row"].message_type = _MUTATION_DELETEFROMROW +_MUTATION.oneofs_by_name["mutation"].fields.append(_MUTATION.fields_by_name["set_cell"]) +_MUTATION.fields_by_name["set_cell"].containing_oneof = _MUTATION.oneofs_by_name[ + "mutation" +] +_MUTATION.oneofs_by_name["mutation"].fields.append( + _MUTATION.fields_by_name["delete_from_column"] +) +_MUTATION.fields_by_name[ + "delete_from_column" +].containing_oneof = _MUTATION.oneofs_by_name["mutation"] +_MUTATION.oneofs_by_name["mutation"].fields.append( + _MUTATION.fields_by_name["delete_from_family"] +) +_MUTATION.fields_by_name[ + "delete_from_family" +].containing_oneof = _MUTATION.oneofs_by_name["mutation"] +_MUTATION.oneofs_by_name["mutation"].fields.append( + _MUTATION.fields_by_name["delete_from_row"] +) +_MUTATION.fields_by_name["delete_from_row"].containing_oneof = _MUTATION.oneofs_by_name[ + "mutation" +] +_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( + _READMODIFYWRITERULE.fields_by_name["append_value"] +) +_READMODIFYWRITERULE.fields_by_name[ + "append_value" +].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] +_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( + _READMODIFYWRITERULE.fields_by_name["increment_amount"] +) +_READMODIFYWRITERULE.fields_by_name[ + "increment_amount" +].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] +DESCRIPTOR.message_types_by_name["Row"] = _ROW +DESCRIPTOR.message_types_by_name["Family"] = _FAMILY +DESCRIPTOR.message_types_by_name["Column"] = _COLUMN +DESCRIPTOR.message_types_by_name["Cell"] = _CELL +DESCRIPTOR.message_types_by_name["RowRange"] = _ROWRANGE +DESCRIPTOR.message_types_by_name["RowSet"] = _ROWSET +DESCRIPTOR.message_types_by_name["ColumnRange"] = _COLUMNRANGE +DESCRIPTOR.message_types_by_name["TimestampRange"] = _TIMESTAMPRANGE +DESCRIPTOR.message_types_by_name["ValueRange"] = _VALUERANGE +DESCRIPTOR.message_types_by_name["RowFilter"] = _ROWFILTER +DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION +DESCRIPTOR.message_types_by_name["ReadModifyWriteRule"] = _READMODIFYWRITERULE _sym_db.RegisterFileDescriptor(DESCRIPTOR) -Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( - DESCRIPTOR = _ROW, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies the complete (requested) contents of a single row of a table. +Row = _reflection.GeneratedProtocolMessageType( + "Row", + (_message.Message,), + dict( + DESCRIPTOR=_ROW, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies the complete (requested) contents of a single row of a table. Rows which exceed 256MiB in size cannot be read in full. @@ -1132,15 +1952,18 @@ May be empty, but only if the entire row is empty. The mutual ordering of column families is not specified. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) + ), +) _sym_db.RegisterMessage(Row) -Family = _reflection.GeneratedProtocolMessageType('Family', (_message.Message,), dict( - DESCRIPTOR = _FAMILY, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies (some of) the contents of a single row/column family +Family = _reflection.GeneratedProtocolMessageType( + "Family", + (_message.Message,), + dict( + DESCRIPTOR=_FAMILY, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies (some of) the contents of a single row/column family intersection of a table. @@ -1156,15 +1979,18 @@ columns: Must not be empty. Sorted in order of increasing "qualifier". """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) + ), +) _sym_db.RegisterMessage(Family) -Column = _reflection.GeneratedProtocolMessageType('Column', (_message.Message,), dict( - DESCRIPTOR = _COLUMN, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies (some of) the contents of a single row/column intersection of +Column = _reflection.GeneratedProtocolMessageType( + "Column", + (_message.Message,), + dict( + DESCRIPTOR=_COLUMN, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies (some of) the contents of a single row/column intersection of a table. @@ -1179,15 +2005,18 @@ Must not be empty. Sorted in order of decreasing "timestamp\_micros". """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) + ), +) _sym_db.RegisterMessage(Column) -Cell = _reflection.GeneratedProtocolMessageType('Cell', (_message.Message,), dict( - DESCRIPTOR = _CELL, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies (some of) the contents of a single row/column/timestamp of a +Cell = _reflection.GeneratedProtocolMessageType( + "Cell", + (_message.Message,), + dict( + DESCRIPTOR=_CELL, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies (some of) the contents of a single row/column/timestamp of a table. @@ -1207,15 +2036,18 @@ Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) + ), +) _sym_db.RegisterMessage(Cell) -RowRange = _reflection.GeneratedProtocolMessageType('RowRange', (_message.Message,), dict( - DESCRIPTOR = _ROWRANGE, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies a contiguous range of rows. +RowRange = _reflection.GeneratedProtocolMessageType( + "RowRange", + (_message.Message,), + dict( + DESCRIPTOR=_ROWRANGE, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies a contiguous range of rows. Attributes: @@ -1234,15 +2066,18 @@ end_key_closed: Used when giving an inclusive upper bound for the range. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) + ), +) _sym_db.RegisterMessage(RowRange) -RowSet = _reflection.GeneratedProtocolMessageType('RowSet', (_message.Message,), dict( - DESCRIPTOR = _ROWSET, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies a non-contiguous set of rows. +RowSet = _reflection.GeneratedProtocolMessageType( + "RowSet", + (_message.Message,), + dict( + DESCRIPTOR=_ROWSET, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies a non-contiguous set of rows. Attributes: @@ -1251,15 +2086,18 @@ row_ranges: Contiguous row ranges included in the set. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) + ), +) _sym_db.RegisterMessage(RowSet) -ColumnRange = _reflection.GeneratedProtocolMessageType('ColumnRange', (_message.Message,), dict( - DESCRIPTOR = _COLUMNRANGE, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies a contiguous range of columns within a single column family. +ColumnRange = _reflection.GeneratedProtocolMessageType( + "ColumnRange", + (_message.Message,), + dict( + DESCRIPTOR=_COLUMNRANGE, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies a contiguous range of columns within a single column family. The range spans from : to :, where both bounds can be either inclusive or exclusive. @@ -1285,15 +2123,18 @@ end_qualifier_open: Used when giving an exclusive upper bound for the range. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) + ), +) _sym_db.RegisterMessage(ColumnRange) -TimestampRange = _reflection.GeneratedProtocolMessageType('TimestampRange', (_message.Message,), dict( - DESCRIPTOR = _TIMESTAMPRANGE, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specified a contiguous range of microsecond timestamps. +TimestampRange = _reflection.GeneratedProtocolMessageType( + "TimestampRange", + (_message.Message,), + dict( + DESCRIPTOR=_TIMESTAMPRANGE, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specified a contiguous range of microsecond timestamps. Attributes: @@ -1302,15 +2143,18 @@ end_timestamp_micros: Exclusive upper bound. If left empty, interpreted as infinity. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) + ), +) _sym_db.RegisterMessage(TimestampRange) -ValueRange = _reflection.GeneratedProtocolMessageType('ValueRange', (_message.Message,), dict( - DESCRIPTOR = _VALUERANGE, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies a contiguous range of raw byte values. +ValueRange = _reflection.GeneratedProtocolMessageType( + "ValueRange", + (_message.Message,), + dict( + DESCRIPTOR=_VALUERANGE, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies a contiguous range of raw byte values. Attributes: @@ -1329,17 +2173,22 @@ end_value_open: Used when giving an exclusive upper bound for the range. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) + ), +) _sym_db.RegisterMessage(ValueRange) -RowFilter = _reflection.GeneratedProtocolMessageType('RowFilter', (_message.Message,), dict( - - Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_CHAIN, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """A RowFilter which sends rows through several RowFilters in sequence. +RowFilter = _reflection.GeneratedProtocolMessageType( + "RowFilter", + (_message.Message,), + dict( + Chain=_reflection.GeneratedProtocolMessageType( + "Chain", + (_message.Message,), + dict( + DESCRIPTOR=_ROWFILTER_CHAIN, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""A RowFilter which sends rows through several RowFilters in sequence. Attributes: @@ -1348,15 +2197,16 @@ input row: in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row The full chain is executed atomically. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) - )) - , - - Interleave = _reflection.GeneratedProtocolMessageType('Interleave', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_INTERLEAVE, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """A RowFilter which sends each row to each of several component RowFilters + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) + ), + ), + Interleave=_reflection.GeneratedProtocolMessageType( + "Interleave", + (_message.Message,), + dict( + DESCRIPTOR=_ROWFILTER_INTERLEAVE, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""A RowFilter which sends each row to each of several component RowFilters and interleaves the results. @@ -1386,15 +2236,16 @@ far,blah,5,x // identical to #5 All interleaved filters are executed atomically. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) - )) - , - - Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_CONDITION, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """A RowFilter which evaluates one of two possible RowFilters, depending on + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) + ), + ), + Condition=_reflection.GeneratedProtocolMessageType( + "Condition", + (_message.Message,), + dict( + DESCRIPTOR=_ROWFILTER_CONDITION, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""A RowFilter which evaluates one of two possible RowFilters, depending on whether or not a predicate RowFilter outputs any cells from the input row. @@ -1418,13 +2269,12 @@ does not return any results. If not provided, no results will be returned in the false case. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) - )) - , - DESCRIPTOR = _ROWFILTER, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Takes a row as input and produces an alternate view of the row based on + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) + ), + ), + DESCRIPTOR=_ROWFILTER, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Takes a row as input and produces an alternate view of the row based on specified rules. For example, a RowFilter might trim down a row to include just the cells from columns matching a given regular expression, or might return all the cells of a row but not their values. More @@ -1595,20 +2445,25 @@ as they will be applied to separate copies of the input. This may be relaxed in the future. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) + ), +) _sym_db.RegisterMessage(RowFilter) _sym_db.RegisterMessage(RowFilter.Chain) _sym_db.RegisterMessage(RowFilter.Interleave) _sym_db.RegisterMessage(RowFilter.Condition) -Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict( - - SetCell = _reflection.GeneratedProtocolMessageType('SetCell', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_SETCELL, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """A Mutation which sets the value of the specified cell. +Mutation = _reflection.GeneratedProtocolMessageType( + "Mutation", + (_message.Message,), + dict( + SetCell=_reflection.GeneratedProtocolMessageType( + "SetCell", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATION_SETCELL, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""A Mutation which sets the value of the specified cell. Attributes: @@ -1628,15 +2483,16 @@ value: The value to be written into the specified cell. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) - )) - , - - DeleteFromColumn = _reflection.GeneratedProtocolMessageType('DeleteFromColumn', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMCOLUMN, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """A Mutation which deletes cells from the specified column, optionally + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) + ), + ), + DeleteFromColumn=_reflection.GeneratedProtocolMessageType( + "DeleteFromColumn", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATION_DELETEFROMCOLUMN, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""A Mutation which deletes cells from the specified column, optionally restricting the deletions to a given timestamp range. @@ -1650,15 +2506,16 @@ time_range: The range of timestamps within which cells should be deleted. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) - )) - , - - DeleteFromFamily = _reflection.GeneratedProtocolMessageType('DeleteFromFamily', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMFAMILY, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """A Mutation which deletes all cells from the specified column family. + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) + ), + ), + DeleteFromFamily=_reflection.GeneratedProtocolMessageType( + "DeleteFromFamily", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATION_DELETEFROMFAMILY, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""A Mutation which deletes all cells from the specified column family. Attributes: @@ -1666,23 +2523,23 @@ The name of the family from which cells should be deleted. Must match ``[-_.a-zA-Z0-9]+`` """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) - )) - , - - DeleteFromRow = _reflection.GeneratedProtocolMessageType('DeleteFromRow', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMROW, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """A Mutation which deletes all cells from the containing row. + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) + ), + ), + DeleteFromRow=_reflection.GeneratedProtocolMessageType( + "DeleteFromRow", + (_message.Message,), + dict( + DESCRIPTOR=_MUTATION_DELETEFROMROW, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""A Mutation which deletes all cells from the containing row. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) - )) - , - DESCRIPTOR = _MUTATION, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies a particular change to be made to the contents of a row. + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) + ), + ), + DESCRIPTOR=_MUTATION, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies a particular change to be made to the contents of a row. Attributes: @@ -1697,19 +2554,22 @@ delete_from_row: Deletes cells from the entire row. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) + ), +) _sym_db.RegisterMessage(Mutation) _sym_db.RegisterMessage(Mutation.SetCell) _sym_db.RegisterMessage(Mutation.DeleteFromColumn) _sym_db.RegisterMessage(Mutation.DeleteFromFamily) _sym_db.RegisterMessage(Mutation.DeleteFromRow) -ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRule', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITERULE, - __module__ = 'google.cloud.bigtable_v2.proto.data_pb2' - , - __doc__ = """Specifies an atomic read/modify/write operation on the latest value of +ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType( + "ReadModifyWriteRule", + (_message.Message,), + dict( + DESCRIPTOR=_READMODIFYWRITERULE, + __module__="google.cloud.bigtable_v2.proto.data_pb2", + __doc__="""Specifies an atomic read/modify/write operation on the latest value of the specified column. @@ -1735,11 +2595,17 @@ must contain an 8-byte value (interpreted as a 64-bit big- endian signed integer), or the entire request will fail. """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) - )) + # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) + ), +) _sym_db.RegisterMessage(ReadModifyWriteRule) DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2')) +DESCRIPTOR._options = _descriptor._ParseOptions( + descriptor_pb2.FileOptions(), + _b( + "\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" + ), +) # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py index a89435267cb2..07cb78fe03a9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py @@ -1,3 +1,2 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc - From a289b935d04bdb262a5ffeaac59fb3e56c5874bd Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Mon, 3 Dec 2018 13:59:48 -0800 Subject: [PATCH 213/892] Use moved iam.policy now at google.api_core.iam.policy (#6741) * update references to iam to use api-core\ * Update dependency to api_core --- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 2 +- packages/google-cloud-bigtable/google/cloud/bigtable/policy.py | 2 +- packages/google-cloud-bigtable/setup.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index d494d7c1b936..47c8d5f539da 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -473,7 +473,7 @@ def test_iam_permissions(self, permissions): def _to_dict_from_policy_pb(self, policy): """Returns a dictionary representation of resource returned from the getIamPolicy API to use as parameter for - :meth: google.cloud.iam.Policy.from_api_repr + :meth: google.api_core.iam.Policy.from_api_repr """ pb_dict = {} bindings = [ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index 87c9a7650c2e..039cea9f3e9e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from google.cloud.iam import Policy as BasePolicy +from google.api_core.iam import Policy as BasePolicy from google.cloud._helpers import _to_bytes """IAM roles supported by Bigtable Instance resource""" diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index a44311f7d3b9..01194d784f49 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,7 +29,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = 'Development Status :: 4 - Beta' dependencies = [ - 'google-api-core[grpc] >= 1.4.1, < 2.0.0dev', + 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev', 'google-cloud-core >= 0.28.0, <0.29dev', 'grpc-google-iam-v1 >= 0.11.4, < 0.12dev', ] From c961011abfad34f5fb996dd7c8776030d41b0988 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Tue, 4 Dec 2018 09:00:08 -0800 Subject: [PATCH 214/892] Update dependency to google-cloud-core (#6835) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 01194d784f49..5373c0174f46 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -30,7 +30,7 @@ release_status = 'Development Status :: 4 - Beta' dependencies = [ 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev', - 'google-cloud-core >= 0.28.0, <0.29dev', + 'google-cloud-core >= 0.29.0, < 0.30dev', 'grpc-google-iam-v1 >= 0.11.4, < 0.12dev', ] extras = { From 05d86122a732ab05cea592095512744e79e0af3b Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Mon, 10 Dec 2018 13:36:01 -0800 Subject: [PATCH 215/892] Release bigtable 0.32.0 (#6891) * Release 0.32.0 --- packages/google-cloud-bigtable/CHANGELOG.md | 20 ++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 714062afb60a..658ff888818a 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,26 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.32.0 + +12-10-2018 12:47 PST + + +### Implementation Changes +- Import `iam.policy` from `google.api_core`. ([#6741](https://github.com/googleapis/google-cloud-python/pull/6741)) +- Remove `deepcopy` from `PartialRowData.cells` property. ([#6648](https://github.com/googleapis/google-cloud-python/pull/6648)) +- Pick up fixes to GAPIC generator. ([#6630](https://github.com/googleapis/google-cloud-python/pull/6630)) + +### Dependencies +- Update dependency to google-cloud-core ([#6835](https://github.com/googleapis/google-cloud-python/pull/6835)) + +### Internal / Testing Changes +- Blacken all gen'd libs ([#6792](https://github.com/googleapis/google-cloud-python/pull/6792)) +- Omit local deps ([#6701](https://github.com/googleapis/google-cloud-python/pull/6701)) +- Run black at end of synth.py ([#6698](https://github.com/googleapis/google-cloud-python/pull/6698)) +- Blackening Continued... ([#6667](https://github.com/googleapis/google-cloud-python/pull/6667)) +- Add templates for flake8, coveragerc, noxfile, and black. ([#6642](https://github.com/googleapis/google-cloud-python/pull/6642)) + ## 0.31.1 11-02-2018 08:13 PDT diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 5373c0174f46..2983341a25bb 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.31.1' +version = '0.32.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From eadce594a6ed3904f70db8d00d9fdc8396eaccf1 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Fri, 14 Dec 2018 12:25:37 -0800 Subject: [PATCH 216/892] Document Python 2 deprecation (#6910) --- packages/google-cloud-bigtable/README.rst | 9 +++++++++ packages/google-cloud-bigtable/setup.py | 2 ++ 2 files changed, 11 insertions(+) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 24632069f1f6..8835e2a3bbcf 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -49,6 +49,15 @@ dependencies. .. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ +Supported Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^ +Python >= 3.4 + +Deprecated Python Versions +^^^^^^^^^^^^^^^^^^^^^^^^^^ +Python == 2.7. Python 2.7 support will be removed on January 1, 2020. + + Mac/Linux ^^^^^^^^^ diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 2983341a25bb..45999b43cb52 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -77,6 +77,7 @@ 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Operating System :: OS Independent', 'Topic :: Internet', ], @@ -85,6 +86,7 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, + python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', include_package_data=True, zip_safe=False, ) From c35fe198a9cde90f1d62eab7a46b20a4adebc3db Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Mon, 17 Dec 2018 22:44:09 +0530 Subject: [PATCH 217/892] Add snippets for table operations. (#6484) --- .../docs/snippets_table.py | 391 ++++++++++++++++++ .../google/cloud/bigtable/table.py | 95 ++++- packages/google-cloud-bigtable/noxfile.py | 28 ++ packages/google-cloud-bigtable/synth.py | 2 +- 4 files changed, 514 insertions(+), 2 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/snippets_table.py diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py new file mode 100644 index 000000000000..966062ac2f12 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -0,0 +1,391 @@ +#!/usr/bin/env python + +# Copyright 2018, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Testable usage examples for Google Cloud Bigtable API wrapper + +Each example function takes a ``client`` argument (which must be an instance +of :class:`google.cloud.bigtable.client.Client`) and uses it to perform a task +with the API. + +To facilitate running the examples as system tests, each example is also passed +a ``to_delete`` list; the function adds to the list any objects created which +need to be deleted during teardown. + +.. note:: + This file is under progress and will be updated with more guidance from + the team. Unit tests will be added with guidance from the team. + +""" + +import datetime +import pytest + +from test_utils.system import unique_resource_id +from google.cloud._helpers import UTC +from google.cloud.bigtable import Client +from google.cloud.bigtable import enums +from google.cloud.bigtable import column_family + + +INSTANCE_ID = "snippet-" + unique_resource_id('-') +CLUSTER_ID = "clus-1-" + unique_resource_id('-') +TABLE_ID = "tabl-1-" + unique_resource_id('-') +COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id('-') +LOCATION_ID = 'us-central1-f' +ALT_LOCATION_ID = 'us-central1-a' +PRODUCTION = enums.Instance.Type.PRODUCTION +SERVER_NODES = 3 +STORAGE_TYPE = enums.StorageType.SSD +LABEL_KEY = u'python-snippet' +LABEL_STAMP = datetime.datetime.utcnow() \ + .replace(microsecond=0, tzinfo=UTC,) \ + .strftime("%Y-%m-%dt%H-%M-%S") +LABELS = {LABEL_KEY: str(LABEL_STAMP)} +COL_NAME1 = b'col-name1' +CELL_VAL1 = b'cell-val' + + +class Config(object): + """Run-time configuration to be modified at set-up. + + This is a mutable stand-in to allow test set-up to modify + global state. + """ + CLIENT = None + INSTANCE = None + TABLE = None + + +def setup_module(): + client = Config.CLIENT = Client(admin=True) + Config.INSTANCE = client.instance(INSTANCE_ID, + instance_type=PRODUCTION, + labels=LABELS) + cluster = Config.INSTANCE.cluster(CLUSTER_ID, + location_id=LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE) + operation = Config.INSTANCE.create(clusters=[cluster]) + # We want to make sure the operation completes. + operation.result(timeout=100) + Config.TABLE = Config.INSTANCE.table(TABLE_ID) + Config.TABLE.create() + gc_rule = column_family.MaxVersionsGCRule(2) + column_family1 = Config.TABLE.column_family(COLUMN_FAMILY_ID, + gc_rule=gc_rule) + column_family1.create() + + +def teardown_module(): + Config.INSTANCE.delete() + + +def test_bigtable_create_table(): + # [START bigtable_create_table] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + + # Create table without Column families. + table1 = instance.table("table_id1") + table1.create() + + # Create table with Column families. + table2 = instance.table("table_id2") + # Define the GC policy to retain only the most recent 2 versions. + max_versions_rule = column_family.MaxVersionsGCRule(2) + table2.create(column_families={'cf1': max_versions_rule}) + + # [END bigtable_create_table] + assert table1.exists() + assert table2.exists() + table1.delete() + table2.delete() + + +def test_bigtable_sample_row_keys(): + # [START bigtable_sample_row_keys] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + + table = instance.table("table_id1_samplerow") + # [END bigtable_sample_row_keys] + initial_split_keys = [b'split_key_1', b'split_key_10', + b'split_key_20'] + table.create(initial_split_keys=initial_split_keys) + # [START bigtable_sample_row_keys] + data = table.sample_row_keys() + actual_keys, offset = zip(*[(rk.row_key, rk.offset_bytes) for rk in data]) + # [END bigtable_sample_row_keys] + initial_split_keys.append(b'') + assert list(actual_keys) == initial_split_keys + table.delete() + + +def test_bigtable_write_read_drop_truncate(): + # [START bigtable_mutate_rows] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row_keys = [b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', + b'row_key_20', b'row_key_22', b'row_key_200'] + col_name = b'col-name1' + rows = [] + for i, row_key in enumerate(row_keys): + value = 'value_{}'.format(i).encode() + row = table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID, + col_name, + value, + timestamp=datetime.datetime.utcnow()) + rows.append(row) + response = table.mutate_rows(rows) + # validate that all rows written successfully + for i, status in enumerate(response): + if status.code is not 0: + print('Row number {} failed to write'.format(i)) + # [END bigtable_mutate_rows] + assert len(response) == len(rows) + # [START bigtable_read_row] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row_key = 'row_key_1' + row = table.read_row(row_key) + # [END bigtable_read_row] + assert row.row_key.decode('utf-8') == row_key + # [START bigtable_read_rows] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + # Read full table + partial_rows = table.read_rows() + read_rows = [row for row in partial_rows] + # [END bigtable_read_rows] + assert len(read_rows) == len(rows) + # [START bigtable_drop_by_prefix] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row_key_prefix = b'row_key_2' + table.drop_by_prefix(row_key_prefix, timeout=200) + # [END bigtable_drop_by_prefix] + dropped_row_keys = [b'row_key_2', b'row_key_20', + b'row_key_22', b'row_key_200'] + for row in table.read_rows(): + assert row.row_key.decode('utf-8') not in dropped_row_keys + + # [START bigtable_truncate_table] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + table.truncate(timeout=200) + # [END bigtable_truncate_table] + rows_data_after_truncate = [] + for row in table.read_rows(): + rows_data_after_truncate.append(row.row_key) + assert rows_data_after_truncate == [] + + +def test_bigtable_mutations_batcher(): + # [START bigtable_mutations_batcher] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + batcher = table.mutations_batcher() + # [END bigtable_mutations_batcher] + + # Below code will be used while creating batcher.py snippets. + # So not removing this code as of now. + row_keys = [b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', + b'row_key_20', b'row_key_22', b'row_key_200'] + column_name = 'column_name'.encode() + # Add a single row + row_key = row_keys[0] + row = table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID, + column_name, + 'value-0', + timestamp=datetime.datetime.utcnow()) + batcher.mutate(row) + # Add a collections of rows + rows = [] + for i in range(1, len(row_keys)): + row = table.row(row_keys[i]) + value = 'value_{}'.format(i).encode() + row.set_cell(COLUMN_FAMILY_ID, + column_name, + value, + timestamp=datetime.datetime.utcnow()) + rows.append(row) + batcher.mutate_rows(rows) + # batcher will flush current batch if it + # reaches the max flush_count + + # Manually send the current batch to Cloud Bigtable + batcher.flush() + rows_on_table = [] + for row in table.read_rows(): + rows_on_table.append(row.row_key) + assert len(rows_on_table) == len(row_keys) + table.truncate(timeout=200) + + +def test_bigtable_table_column_family(): + # [START bigtable_table_column_family] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + + table = instance.table(TABLE_ID) + column_family_obj = table.column_family(COLUMN_FAMILY_ID) + # [END bigtable_table_column_family] + + assert column_family_obj.column_family_id == COLUMN_FAMILY_ID + + +def test_bigtable_list_tables(): + # [START bigtable_list_tables] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + tables_list = instance.list_tables() + # [END bigtable_list_tables] + assert len(tables_list) is not 0 + + +def test_bigtable_table_name(): + import re + # [START bigtable_table_name] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + + table = instance.table(TABLE_ID) + table_name = table.name + # [END bigtable_table_name] + _table_name_re = re.compile(r'^projects/(?P[^/]+)/' + r'instances/(?P[^/]+)/tables/' + r'(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$') + assert _table_name_re.match(table_name) + + +def test_bigtable_list_column_families(): + # [START bigtable_list_column_families] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + column_family_list = table.list_column_families() + # [END bigtable_list_column_families] + + assert len(column_family_list) > 0 + + +def test_bigtable_get_cluster_states(): + # [START bigtable_get_cluster_states] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + get_cluster_states = table.get_cluster_states() + # [END bigtable_get_cluster_states] + + assert CLUSTER_ID in get_cluster_states + + +def test_bigtable_table_exists(): + # [START bigtable_check_table_exists] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + table_exists = table.exists() + # [END bigtable_check_table_exists] + assert table_exists + + +def test_bigtable_delete_table(): + # [START bigtable_delete_table] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table("table_id_del") + # [END bigtable_delete_table] + + table.create() + assert table.exists() + + # [START bigtable_delete_table] + table.delete() + # [END bigtable_delete_table] + assert not table.exists() + + +def test_bigtable_table_row(): + # [START bigtable_table_row] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_keys = [b'row_key_1', b'row_key_2'] + row1_obj = table.row(row_keys[0]) + row2_obj = table.row(row_keys[1]) + # [END bigtable_table_row] + + row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row1_obj.commit() + row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row2_obj.commit() + + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + + assert actual_rows_keys == row_keys + + table.truncate(timeout=300) + + +if __name__ == '__main__': + pytest.main() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index ccbb5cf47e91..40dc25a5a7e3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -109,6 +109,12 @@ def __init__(self, table_id, instance, app_profile_id=None): def name(self): """Table name used in requests. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_name] + :end-before: [END bigtable_table_name] + .. note:: This property will not change if ``table_id`` does not, but the @@ -131,6 +137,12 @@ def name(self): def column_family(self, column_family_id, gc_rule=None): """Factory to create a column family associated with this table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_column_family] + :end-before: [END bigtable_table_column_family] + :type column_family_id: str :param column_family_id: The ID of the column family. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. @@ -147,6 +159,12 @@ def column_family(self, column_family_id, gc_rule=None): def row(self, row_key, filter_=None, append=False): """Factory to create a row associated with this table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_row] + :end-before: [END bigtable_table_row] + .. warning:: At most one of ``filter_`` and ``append`` can be used in a @@ -188,6 +206,12 @@ def __ne__(self, other): def create(self, initial_split_keys=[], column_families={}): """Creates this table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_create_table] + :end-before: [END bigtable_create_table] + .. note:: A create request returns a @@ -226,6 +250,12 @@ def create(self, initial_split_keys=[], column_families={}): def exists(self): """Check whether the table exists. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_check_table_exists] + :end-before: [END bigtable_check_table_exists] + :rtype: bool :returns: True if the table exists, else False. """ @@ -237,13 +267,27 @@ def exists(self): return False def delete(self): - """Delete this table.""" + """Delete this table. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_delete_table] + :end-before: [END bigtable_delete_table] + + """ table_client = self._instance._client.table_admin_client table_client.delete_table(name=self.name) def list_column_families(self): """List the column families owned by this table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_list_column_families] + :end-before: [END bigtable_list_column_families] + :rtype: dict :returns: Dictionary of column families attached to this table. Keys are strings (column family names) and values are @@ -265,6 +309,12 @@ def list_column_families(self): def get_cluster_states(self): """List the cluster states owned by this table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_get_cluster_states] + :end-before: [END bigtable_get_cluster_states] + :rtype: dict :returns: Dictionary of cluster states for this table. Keys are cluster ids and values are @@ -283,6 +333,12 @@ def get_cluster_states(self): def read_row(self, row_key, filter_=None): """Read a single row from this table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_read_row] + :end-before: [END bigtable_read_row] + :type row_key: bytes :param row_key: The key of the row to read from. @@ -316,6 +372,12 @@ def read_rows( ): """Read rows from this table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_read_rows] + :end-before: [END bigtable_read_rows] + :type start_key: bytes :param start_key: (Optional) The beginning of a range of row keys to read from. The range will include ``start_key``. If @@ -408,6 +470,12 @@ def yield_rows(self, **kwargs): def mutate_rows(self, rows, retry=DEFAULT_RETRY): """Mutates multiple rows in bulk. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_mutate_rows] + :end-before: [END bigtable_mutate_rows] + The method tries to update all specified rows. If some of the rows weren't updated, it would not remove mutations. They can be applied to the row separately. @@ -442,6 +510,12 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY): def sample_row_keys(self): """Read a sample of row keys in the table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_sample_row_keys] + :end-before: [END bigtable_sample_row_keys] + The returned row keys will delimit contiguous sections of the table of approximately equal size, which can be used to break up the data for distributed tasks like mapreduces. @@ -480,6 +554,12 @@ def sample_row_keys(self): def truncate(self, timeout=None): """Truncate the table + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_truncate_table] + :end-before: [END bigtable_truncate_table] + :type timeout: float :param timeout: (Optional) The amount of time, in seconds, to wait for the request to complete. @@ -503,6 +583,13 @@ def truncate(self, timeout=None): def drop_by_prefix(self, row_key_prefix, timeout=None): """ + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_drop_by_prefix] + :end-before: [END bigtable_drop_by_prefix] + :type row_prefix: bytes :param row_prefix: Delete all rows that start with this row key prefix. Prefix cannot be zero length. @@ -531,6 +618,12 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): """Factory to create a mutation batcher associated with this instance. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_mutations_batcher] + :end-before: [END bigtable_mutations_batcher] + :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 652b0a9c4e44..1ccb42c469fa 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -138,3 +138,31 @@ def cover(session): session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") + + +@nox.session(python=['2.7', '3.7']) +def snippets(session): + """Run the documentation example snippets.""" + # Sanity check: Only run snippets system tests if the environment variable + # is set. + if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): + session.skip('Credentials must be set via environment variable.') + + # Install all test dependencies, then install local packages in place. + session.install('mock', 'pytest') + for local_dep in LOCAL_DEPS: + session.install('-e', local_dep) + session.install('-e', '../test_utils/') + session.install('-e', '.') + session.run( + 'py.test', + '--quiet', + os.path.join('docs', 'snippets.py'), + *session.posargs + ) + session.run( + 'py.test', + '--quiet', + os.path.join('docs', 'snippets_table.py'), + *session.posargs + ) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 6fb2f37af307..f1bec2c6a8e2 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -85,6 +85,6 @@ # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library(unit_cov_level=97, cov_level=99) -s.move(templated_files) +s.move(templated_files, excludes=['noxfile.py']) s.shell.run(["nox", "-s", "blacken"], hide_output=False) From 5ffe335b5c3e80041fa6a5465ea9a8ad6dae7e56 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Tue, 18 Dec 2018 12:51:14 -0800 Subject: [PATCH 218/892] Release 0.32.1 (#6944) --- packages/google-cloud-bigtable/CHANGELOG.md | 9 +++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 658ff888818a..d8f70e35165c 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,15 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.32.1 + +12-17-2018 16:38 PST + + +### Documentation +- Document Python 2 deprecation ([#6910](https://github.com/googleapis/google-cloud-python/pull/6910)) +- Add snippets for table operations. ([#6484](https://github.com/googleapis/google-cloud-python/pull/6484)) + ## 0.32.0 12-10-2018 12:47 PST diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 45999b43cb52..ea272a24c244 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.32.0' +version = '0.32.1' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 2a8aa73bfdb10bf53b645ff495c10bfe356beac2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 19 Dec 2018 17:39:49 -0500 Subject: [PATCH 219/892] Plug systest instance leaks (#7004) Towards #7003. --- packages/google-cloud-bigtable/tests/system.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 754da098226c..4147c9fb32ef 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -85,7 +85,7 @@ def _retry_on_unavailable(exc): return exc.code() == StatusCode.UNAVAILABLE -retry_429 = RetryErrors(TooManyRequests) +retry_429 = RetryErrors(TooManyRequests, max_tries=9) def setUpModule(): @@ -165,12 +165,13 @@ def test_create_instance_defaults(self): ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES ) operation = instance.create(clusters=[cluster]) - # We want to make sure the operation completes. - operation.result(timeout=10) # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) + # We want to make sure the operation completes. + operation.result(timeout=10) + # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) instance_alt.reload() @@ -194,12 +195,13 @@ def test_create_instance(self): ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) operation = instance.create(clusters=[cluster]) - # We want to make sure the operation completes. - operation.result(timeout=10) # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) + # We want to make sure the operation completes. + operation.result(timeout=10) + # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) instance_alt.reload() @@ -252,12 +254,13 @@ def test_create_instance_w_two_clusters(self): default_storage_type=STORAGE_TYPE, ) operation = instance.create(clusters=[cluster_1, cluster_2]) - # We want to make sure the operation completes. - operation.result(timeout=10) # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) + # We want to make sure the operation completes. + operation.result(timeout=10) + # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) instance_alt.reload() From 652c44a6096559394340139340153cba7c306dfb Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot <44816363+yoshi-automation@users.noreply.github.com> Date: Fri, 4 Jan 2019 11:47:03 -0800 Subject: [PATCH 220/892] Pick up order-of-enum fix from GAPIC generator. (#6879) --- .../cloud/bigtable_admin_v2/gapic/enums.py | 134 +++++++++--------- packages/google-cloud-bigtable/noxfile.py | 28 ++-- packages/google-cloud-bigtable/synth.metadata | 49 +++++++ 3 files changed, 128 insertions(+), 83 deletions(-) create mode 100644 packages/google-cloud-bigtable/synth.metadata diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index b2e837a12448..6eea1b641ce9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -33,73 +33,6 @@ class StorageType(enum.IntEnum): HDD = 2 -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when - updating an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on the - cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. After a - development instance is created, it can be upgraded by updating the - instance to type ``PRODUCTION``. An instance created as a production - instance cannot be changed to a development instance. When creating a - development instance, ``serve_nodes`` on the cluster must not be set. - """ - - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - class Table(object): class TimestampGranularity(enum.IntEnum): """ @@ -176,3 +109,70 @@ class State(enum.IntEnum): STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 + + +class Instance(object): + class State(enum.IntEnum): + """ + Possible states of an instance. + + Attributes: + STATE_NOT_KNOWN (int): The state of the instance could not be determined. + READY (int): The instance has been successfully created and can serve requests + to its tables. + CREATING (int): The instance is currently being created, and may be destroyed + if the creation process encounters an error. + """ + + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(enum.IntEnum): + """ + The type of the instance. + + Attributes: + TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an + instance, a ``PRODUCTION`` instance will be created. If set when + updating an instance, the type will be left unchanged. + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on the + cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has + no performance or uptime guarantees and is not covered by SLA. After a + development instance is created, it can be upgraded by updating the + instance to type ``PRODUCTION``. An instance created as a production + instance cannot be changed to a development instance. When creating a + development instance, ``serve_nodes`` on the cluster must not be set. + """ + + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + +class Cluster(object): + class State(enum.IntEnum): + """ + Possible states of a cluster. + + Attributes: + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. + """ + + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 1ccb42c469fa..a49d614b3e07 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -23,40 +23,36 @@ LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) @nox.session(python="3.7") -def blacken(session): - """Run black. +def lint(session): + """Run linters. - Format code to uniform standard. + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. """ - session.install("black") + session.install("flake8", "black", *LOCAL_DEPS) session.run( "black", + "--check", "google", "tests", "docs", - "--exclude", - ".*/proto/.*|.*/gapic/.*|.*/.*_pb2.py", ) + session.run("flake8", "google", "tests") -@nox.session(python="3.7") -def lint(session): - """Run linters. +@nox.session(python="3.6") +def blacken(session): + """Run black. - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. + Format code to uniform standard. """ - session.install("flake8", "black", *LOCAL_DEPS) + session.install("black") session.run( "black", - "--check", "google", "tests", "docs", - "--exclude", - ".*/proto/.*|.*/gapic/.*|.*/.*_pb2.py", ) - session.run("flake8", "google", "tests") @nox.session(python="3.7") diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata new file mode 100644 index 000000000000..4c900bde2e0d --- /dev/null +++ b/packages/google-cloud-bigtable/synth.metadata @@ -0,0 +1,49 @@ +{ + "updateTime": "2018-12-08T13:13:19.537274Z", + "sources": [ + { + "generator": { + "name": "artman", + "version": "0.16.2", + "dockerImage": "googleapis/artman@sha256:2f6b261ee7fe1aedf238991c93a20b3820de37a343d0cacf3e3e9555c2aaf2ea" + } + }, + { + "git": { + "name": "googleapis", + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "6f6505a69b2b0a1260c93e890d636eefb859e76e", + "internalRef": "224530961" + } + }, + { + "template": { + "name": "python_library", + "origin": "synthtool.gcp", + "version": "2018.12.6" + } + } + ], + "destinations": [ + { + "client": { + "source": "googleapis", + "apiName": "bigtable", + "apiVersion": "v2", + "language": "python", + "generator": "gapic", + "config": "google/bigtable/artman_bigtable.yaml" + } + }, + { + "client": { + "source": "googleapis", + "apiName": "bigtable_admin", + "apiVersion": "v2", + "language": "python", + "generator": "gapic", + "config": "google/bigtable/admin/artman_bigtableadmin.yaml" + } + } + ] +} \ No newline at end of file From 2a0149e829d47f92d419e8d49d4ba66df22e3956 Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Sat, 5 Jan 2019 01:45:45 +0530 Subject: [PATCH 221/892] Bigtable client snippets (#7020) * add conditional row snippets * add snippets * Add client snippets --- .../google-cloud-bigtable/docs/snippets.py | 45 +++++++++++++++++++ .../google/cloud/bigtable/client.py | 24 ++++++++++ 2 files changed, 69 insertions(+) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 9255ab136771..d07229aef6b5 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -425,5 +425,50 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): assert len(policy.bigtable_admins) > 0 +def test_bigtable_project_path(): + import re + # [START bigtable_project_path] + from google.cloud.bigtable import Client + + client = Client(admin=True) + project_path = client.project_path + # [END bigtable_project_path] + + _project_path_re = re.compile(r'^projects/' + r'(?P' + r'[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$') + assert _project_path_re.match(project_path) + + +def test_bigtable_table_data_client(): + # [START bigtable_table_data_client] + from google.cloud.bigtable import Client + + client = Client(admin=True) + table_data_client = client.table_data_client + # [END bigtable_table_data_client] + assert "BigtableClient" in str(table_data_client) + + +def test_bigtable_table_admin_client(): + # [START bigtable_table_admin_client] + from google.cloud.bigtable import Client + + client = Client(admin=True) + table_admin_client = client.table_admin_client + # [END bigtable_table_admin_client] + assert "BigtableTableAdmin" in str(table_admin_client) + + +def test_bigtable_instance_admin_client(): + # [START bigtable_instance_admin_client] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance_admin_client = client.instance_admin_client + # [END bigtable_instance_admin_client] + assert "BigtableInstanceAdmin" in str(instance_admin_client) + + if __name__ == "__main__": pytest.main() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 14b836b9dd5b..8c5d35b09e4f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -168,6 +168,12 @@ def project_path(self): This property will not change if ``project`` does not, but the return value is not cached. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_project_path] + :end-before: [END bigtable_project_path] + The project name is of the form ``"projects/{project}"`` @@ -181,6 +187,12 @@ def project_path(self): def table_data_client(self): """Getter for the gRPC stub used for the Table Admin API. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_table_data_client] + :end-before: [END bigtable_table_data_client] + :rtype: :class:`.bigtable_v2.BigtableClient` :returns: A BigtableClient object. """ @@ -194,6 +206,12 @@ def table_data_client(self): def table_admin_client(self): """Getter for the gRPC stub used for the Table Admin API. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_table_admin_client] + :end-before: [END bigtable_table_admin_client] + :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` :returns: A BigtableTableAdmin instance. :raises: :class:`ValueError ` if the current @@ -212,6 +230,12 @@ def table_admin_client(self): def instance_admin_client(self): """Getter for the gRPC stub used for the Table Admin API. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_instance_admin_client] + :end-before: [END bigtable_instance_admin_client] + :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` :returns: A BigtableInstanceAdmin instance. :raises: :class:`ValueError ` if the current From aae8805e8114cc35bdc374a9fb1e1d61f6b382e7 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 4 Jan 2019 16:05:29 -0500 Subject: [PATCH 222/892] Blacken snippets. (#7048) --- .../docs/snippets_table.py | 130 ++++++++++-------- 1 file changed, 73 insertions(+), 57 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 966062ac2f12..78bbc1ffc13f 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -39,22 +39,24 @@ from google.cloud.bigtable import column_family -INSTANCE_ID = "snippet-" + unique_resource_id('-') -CLUSTER_ID = "clus-1-" + unique_resource_id('-') -TABLE_ID = "tabl-1-" + unique_resource_id('-') -COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id('-') -LOCATION_ID = 'us-central1-f' -ALT_LOCATION_ID = 'us-central1-a' +INSTANCE_ID = "snippet-" + unique_resource_id("-") +CLUSTER_ID = "clus-1-" + unique_resource_id("-") +TABLE_ID = "tabl-1-" + unique_resource_id("-") +COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id("-") +LOCATION_ID = "us-central1-f" +ALT_LOCATION_ID = "us-central1-a" PRODUCTION = enums.Instance.Type.PRODUCTION SERVER_NODES = 3 STORAGE_TYPE = enums.StorageType.SSD -LABEL_KEY = u'python-snippet' -LABEL_STAMP = datetime.datetime.utcnow() \ - .replace(microsecond=0, tzinfo=UTC,) \ - .strftime("%Y-%m-%dt%H-%M-%S") +LABEL_KEY = u"python-snippet" +LABEL_STAMP = ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") +) LABELS = {LABEL_KEY: str(LABEL_STAMP)} -COL_NAME1 = b'col-name1' -CELL_VAL1 = b'cell-val' +COL_NAME1 = b"col-name1" +CELL_VAL1 = b"cell-val" class Config(object): @@ -63,6 +65,7 @@ class Config(object): This is a mutable stand-in to allow test set-up to modify global state. """ + CLIENT = None INSTANCE = None TABLE = None @@ -70,21 +73,22 @@ class Config(object): def setup_module(): client = Config.CLIENT = Client(admin=True) - Config.INSTANCE = client.instance(INSTANCE_ID, - instance_type=PRODUCTION, - labels=LABELS) - cluster = Config.INSTANCE.cluster(CLUSTER_ID, - location_id=LOCATION_ID, - serve_nodes=SERVER_NODES, - default_storage_type=STORAGE_TYPE) + Config.INSTANCE = client.instance( + INSTANCE_ID, instance_type=PRODUCTION, labels=LABELS + ) + cluster = Config.INSTANCE.cluster( + CLUSTER_ID, + location_id=LOCATION_ID, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE, + ) operation = Config.INSTANCE.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=100) Config.TABLE = Config.INSTANCE.table(TABLE_ID) Config.TABLE.create() gc_rule = column_family.MaxVersionsGCRule(2) - column_family1 = Config.TABLE.column_family(COLUMN_FAMILY_ID, - gc_rule=gc_rule) + column_family1 = Config.TABLE.column_family(COLUMN_FAMILY_ID, gc_rule=gc_rule) column_family1.create() @@ -108,7 +112,7 @@ def test_bigtable_create_table(): table2 = instance.table("table_id2") # Define the GC policy to retain only the most recent 2 versions. max_versions_rule = column_family.MaxVersionsGCRule(2) - table2.create(column_families={'cf1': max_versions_rule}) + table2.create(column_families={"cf1": max_versions_rule}) # [END bigtable_create_table] assert table1.exists() @@ -126,14 +130,13 @@ def test_bigtable_sample_row_keys(): table = instance.table("table_id1_samplerow") # [END bigtable_sample_row_keys] - initial_split_keys = [b'split_key_1', b'split_key_10', - b'split_key_20'] + initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] table.create(initial_split_keys=initial_split_keys) # [START bigtable_sample_row_keys] data = table.sample_row_keys() actual_keys, offset = zip(*[(rk.row_key, rk.offset_bytes) for rk in data]) # [END bigtable_sample_row_keys] - initial_split_keys.append(b'') + initial_split_keys.append(b"") assert list(actual_keys) == initial_split_keys table.delete() @@ -145,23 +148,29 @@ def test_bigtable_write_read_drop_truncate(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table(TABLE_ID) - row_keys = [b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', - b'row_key_20', b'row_key_22', b'row_key_200'] - col_name = b'col-name1' + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_20", + b"row_key_22", + b"row_key_200", + ] + col_name = b"col-name1" rows = [] for i, row_key in enumerate(row_keys): - value = 'value_{}'.format(i).encode() + value = "value_{}".format(i).encode() row = table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID, - col_name, - value, - timestamp=datetime.datetime.utcnow()) + row.set_cell( + COLUMN_FAMILY_ID, col_name, value, timestamp=datetime.datetime.utcnow() + ) rows.append(row) response = table.mutate_rows(rows) # validate that all rows written successfully for i, status in enumerate(response): if status.code is not 0: - print('Row number {} failed to write'.format(i)) + print("Row number {} failed to write".format(i)) # [END bigtable_mutate_rows] assert len(response) == len(rows) # [START bigtable_read_row] @@ -170,10 +179,10 @@ def test_bigtable_write_read_drop_truncate(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table(TABLE_ID) - row_key = 'row_key_1' + row_key = "row_key_1" row = table.read_row(row_key) # [END bigtable_read_row] - assert row.row_key.decode('utf-8') == row_key + assert row.row_key.decode("utf-8") == row_key # [START bigtable_read_rows] from google.cloud.bigtable import Client @@ -192,13 +201,12 @@ def test_bigtable_write_read_drop_truncate(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table(TABLE_ID) - row_key_prefix = b'row_key_2' + row_key_prefix = b"row_key_2" table.drop_by_prefix(row_key_prefix, timeout=200) # [END bigtable_drop_by_prefix] - dropped_row_keys = [b'row_key_2', b'row_key_20', - b'row_key_22', b'row_key_200'] + dropped_row_keys = [b"row_key_2", b"row_key_20", b"row_key_22", b"row_key_200"] for row in table.read_rows(): - assert row.row_key.decode('utf-8') not in dropped_row_keys + assert row.row_key.decode("utf-8") not in dropped_row_keys # [START bigtable_truncate_table] from google.cloud.bigtable import Client @@ -226,26 +234,31 @@ def test_bigtable_mutations_batcher(): # Below code will be used while creating batcher.py snippets. # So not removing this code as of now. - row_keys = [b'row_key_1', b'row_key_2', b'row_key_3', b'row_key_4', - b'row_key_20', b'row_key_22', b'row_key_200'] - column_name = 'column_name'.encode() + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_20", + b"row_key_22", + b"row_key_200", + ] + column_name = "column_name".encode() # Add a single row row_key = row_keys[0] row = table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID, - column_name, - 'value-0', - timestamp=datetime.datetime.utcnow()) + row.set_cell( + COLUMN_FAMILY_ID, column_name, "value-0", timestamp=datetime.datetime.utcnow() + ) batcher.mutate(row) # Add a collections of rows rows = [] for i in range(1, len(row_keys)): row = table.row(row_keys[i]) - value = 'value_{}'.format(i).encode() - row.set_cell(COLUMN_FAMILY_ID, - column_name, - value, - timestamp=datetime.datetime.utcnow()) + value = "value_{}".format(i).encode() + row.set_cell( + COLUMN_FAMILY_ID, column_name, value, timestamp=datetime.datetime.utcnow() + ) rows.append(row) batcher.mutate_rows(rows) # batcher will flush current batch if it @@ -287,6 +300,7 @@ def test_bigtable_list_tables(): def test_bigtable_table_name(): import re + # [START bigtable_table_name] from google.cloud.bigtable import Client @@ -296,9 +310,11 @@ def test_bigtable_table_name(): table = instance.table(TABLE_ID) table_name = table.name # [END bigtable_table_name] - _table_name_re = re.compile(r'^projects/(?P[^/]+)/' - r'instances/(?P[^/]+)/tables/' - r'(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$') + _table_name_re = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[^/]+)/tables/" + r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" + ) assert _table_name_re.match(table_name) @@ -368,7 +384,7 @@ def test_bigtable_table_row(): instance = client.instance(INSTANCE_ID) table = instance.table(TABLE_ID) - row_keys = [b'row_key_1', b'row_key_2'] + row_keys = [b"row_key_1", b"row_key_2"] row1_obj = table.row(row_keys[0]) row2_obj = table.row(row_keys[1]) # [END bigtable_table_row] @@ -387,5 +403,5 @@ def test_bigtable_table_row(): table.truncate(timeout=300) -if __name__ == '__main__': +if __name__ == "__main__": pytest.main() From 6ce83826fd228be1ce1f474929223c67b686c7c7 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Sat, 5 Jan 2019 13:44:16 -0500 Subject: [PATCH 223/892] Bigtable: manual synth (#7060) * Rationalize regex for black. * GAPIC generation fixes: * Pick up stub docstring fix from GAPIC generator. * Pick up order-of-enum fix from GAPIC generator. Closes #7051. --- .../google-cloud-bigtable/docs/snippets.py | 6 +- .../cloud/bigtable_admin_v2/gapic/enums.py | 166 +++++++++--------- .../bigtable_instance_admin_grpc_transport.py | 38 ++-- .../bigtable_table_admin_grpc_transport.py | 26 +-- .../transports/bigtable_grpc_transport.py | 12 +- packages/google-cloud-bigtable/synth.metadata | 10 +- 6 files changed, 129 insertions(+), 129 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index d07229aef6b5..c1e1eb4a8820 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -427,6 +427,7 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): def test_bigtable_project_path(): import re + # [START bigtable_project_path] from google.cloud.bigtable import Client @@ -434,9 +435,8 @@ def test_bigtable_project_path(): project_path = client.project_path # [END bigtable_project_path] - _project_path_re = re.compile(r'^projects/' - r'(?P' - r'[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$') + _project_path = r"^projects/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" + _project_path_re = re.compile(_project_path) assert _project_path_re.match(project_path) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 6eea1b641ce9..1fe61b6980b5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -33,82 +33,31 @@ class StorageType(enum.IntEnum): HDD = 2 -class Table(object): - class TimestampGranularity(enum.IntEnum): - """ - Possible timestamp granularities to use when keeping multiple versions - of data in a table. - - Attributes: - TIMESTAMP_GRANULARITY_UNSPECIFIED (int): The user did not specify a granularity. Should not be returned. - When specified during table creation, MILLIS will be used. - MILLIS (int): The table keeps data versioned at a granularity of 1ms. - """ - - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 - MILLIS = 1 - - class View(enum.IntEnum): - """ - Defines a view over a table's fields. - - Attributes: - VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. - NAME_ONLY (int): Only populates ``name``. - SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. - REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's replication - state. - FULL (int): Populates all fields. - """ - - VIEW_UNSPECIFIED = 0 - NAME_ONLY = 1 - SCHEMA_VIEW = 2 - REPLICATION_VIEW = 3 - FULL = 4 - - class ClusterState(object): - class ReplicationState(enum.IntEnum): - """ - Table replication states. - - Attributes: - STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. - INITIALIZING (int): The cluster was recently created, and the table must finish copying - over pre-existing data from other clusters before it can begin - receiving live replication updates and serving Data API requests. - PLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to planned internal maintenance. - UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to unplanned or emergency maintenance. - READY (int): The table can serve Data API requests from this cluster. Depending on - replication delay, reads may not immediately reflect the state of the - table in other clusters. - """ - - STATE_NOT_KNOWN = 0 - INITIALIZING = 1 - PLANNED_MAINTENANCE = 2 - UNPLANNED_MAINTENANCE = 3 - READY = 4 - - -class Snapshot(object): +class Cluster(object): class State(enum.IntEnum): """ - Possible states of a snapshot. + Possible states of a cluster. Attributes: - STATE_NOT_KNOWN (int): The state of the snapshot could not be determined. - READY (int): The snapshot has been successfully created and can serve all requests. - CREATING (int): The snapshot is currently being created, and may be destroyed if the - creation process encounters an error. A snapshot may not be restored to a - table while it is being created. + STATE_NOT_KNOWN (int): The state of the cluster could not be determined. + READY (int): The cluster has been successfully created and is ready to serve requests. + CREATING (int): The cluster is currently being created, and may be destroyed + if the creation process encounters an error. + A cluster may not be able to serve requests while being created. + RESIZING (int): The cluster is currently being resized, and may revert to its previous + node count if the process encounters an error. + A cluster is still capable of serving requests while being resized, + but may exhibit performance as if its number of allocated nodes is + between the starting and requested states. + DISABLED (int): The cluster has no backing nodes. The data (tables) still + exist, but no operations can be performed on the cluster. """ STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 + RESIZING = 3 + DISABLED = 4 class Instance(object): @@ -151,28 +100,79 @@ class Type(enum.IntEnum): DEVELOPMENT = 2 -class Cluster(object): +class Snapshot(object): class State(enum.IntEnum): """ - Possible states of a cluster. + Possible states of a snapshot. Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. + STATE_NOT_KNOWN (int): The state of the snapshot could not be determined. + READY (int): The snapshot has been successfully created and can serve all requests. + CREATING (int): The snapshot is currently being created, and may be destroyed if the + creation process encounters an error. A snapshot may not be restored to a + table while it is being created. """ STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 - RESIZING = 3 - DISABLED = 4 + + +class Table(object): + class TimestampGranularity(enum.IntEnum): + """ + Possible timestamp granularities to use when keeping multiple versions + of data in a table. + + Attributes: + TIMESTAMP_GRANULARITY_UNSPECIFIED (int): The user did not specify a granularity. Should not be returned. + When specified during table creation, MILLIS will be used. + MILLIS (int): The table keeps data versioned at a granularity of 1ms. + """ + + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 + MILLIS = 1 + + class View(enum.IntEnum): + """ + Defines a view over a table's fields. + + Attributes: + VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. + NAME_ONLY (int): Only populates ``name``. + SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. + REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's replication + state. + FULL (int): Populates all fields. + """ + + VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + SCHEMA_VIEW = 2 + REPLICATION_VIEW = 3 + FULL = 4 + + class ClusterState(object): + class ReplicationState(enum.IntEnum): + """ + Table replication states. + + Attributes: + STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. + INITIALIZING (int): The cluster was recently created, and the table must finish copying + over pre-existing data from other clusters before it can begin + receiving live replication updates and serving Data API requests. + PLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this + cluster due to planned internal maintenance. + UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this + cluster due to unplanned or emergency maintenance. + READY (int): The table can serve Data API requests from this cluster. Depending on + replication delay, reads may not immediately reflect the state of the + table in other clusters. + """ + + STATE_NOT_KNOWN = 0 + INITIALIZING = 1 + PLANNED_MAINTENANCE = 2 + UNPLANNED_MAINTENANCE = 3 + READY = 4 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index 72d269cba08e..c8f3e3c0c6d0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -119,7 +119,7 @@ def channel(self): @property def create_instance(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_instance`. Create an instance within a project. @@ -132,7 +132,7 @@ def create_instance(self): @property def get_instance(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. Gets information about an instance. @@ -145,7 +145,7 @@ def get_instance(self): @property def list_instances(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. Lists information about instances in a project. @@ -158,7 +158,7 @@ def list_instances(self): @property def update_instance(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. Updates an instance within a project. @@ -171,7 +171,7 @@ def update_instance(self): @property def partial_update_instance(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. Partially updates an instance within a project. @@ -184,7 +184,7 @@ def partial_update_instance(self): @property def delete_instance(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. Delete an instance from a project. @@ -197,7 +197,7 @@ def delete_instance(self): @property def create_cluster(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. Creates a cluster within an instance. @@ -210,7 +210,7 @@ def create_cluster(self): @property def get_cluster(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. Gets information about a cluster. @@ -223,7 +223,7 @@ def get_cluster(self): @property def list_clusters(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. Lists information about clusters in an instance. @@ -236,7 +236,7 @@ def list_clusters(self): @property def update_cluster(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. Updates a cluster within an instance. @@ -249,7 +249,7 @@ def update_cluster(self): @property def delete_cluster(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_cluster`. Deletes a cluster from an instance. @@ -262,7 +262,7 @@ def delete_cluster(self): @property def create_app_profile(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_app_profile`. Creates an app profile within an instance. @@ -275,7 +275,7 @@ def create_app_profile(self): @property def get_app_profile(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_app_profile`. Gets information about an app profile. @@ -288,7 +288,7 @@ def get_app_profile(self): @property def list_app_profiles(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_app_profiles`. Lists information about app profiles in an instance. @@ -301,7 +301,7 @@ def list_app_profiles(self): @property def update_app_profile(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. Updates an app profile within an instance. @@ -314,7 +314,7 @@ def update_app_profile(self): @property def delete_app_profile(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_app_profile`. Deletes an app profile from an instance. @@ -327,7 +327,7 @@ def delete_app_profile(self): @property def get_iam_policy(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_iam_policy`. Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. @@ -341,7 +341,7 @@ def get_iam_policy(self): @property def set_iam_policy(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.set_iam_policy`. Sets the access control policy on an instance resource. Replaces any existing policy. @@ -355,7 +355,7 @@ def set_iam_policy(self): @property def test_iam_permissions(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.test_iam_permissions`. Returns permissions that the caller has on the specified instance resource. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 4f318857e62f..d30127945565 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -125,7 +125,7 @@ def channel(self): @property def create_table(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. Creates a new table in the specified instance. The table can be created with a full set of initial column families, @@ -140,7 +140,7 @@ def create_table(self): @property def create_table_from_snapshot(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. @@ -160,7 +160,7 @@ def create_table_from_snapshot(self): @property def list_tables(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_tables`. Lists all tables served from a specified instance. @@ -173,7 +173,7 @@ def list_tables(self): @property def get_table(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_table`. Gets metadata information about the specified table. @@ -186,7 +186,7 @@ def get_table(self): @property def delete_table(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_table`. Permanently deletes a specified table and all of its data. @@ -199,7 +199,7 @@ def delete_table(self): @property def modify_column_families(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.modify_column_families`. Performs a series of column family modifications on the specified table. Either all or none of the modifications will occur before this method @@ -215,7 +215,7 @@ def modify_column_families(self): @property def drop_row_range(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.drop_row_range`. Permanently drop/delete a row range from a specified table. The request can specify whether to delete all rows in a table, or only those that match a @@ -230,7 +230,7 @@ def drop_row_range(self): @property def generate_consistency_token(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.generate_consistency_token`. Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations to the table that finished @@ -246,7 +246,7 @@ def generate_consistency_token(self): @property def check_consistency(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.check_consistency`. Checks replication consistency based on a consistency token, that is, if replication has caught up based on the conditions specified in the token @@ -261,7 +261,7 @@ def check_consistency(self): @property def snapshot_table(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. @@ -281,7 +281,7 @@ def snapshot_table(self): @property def get_snapshot(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_snapshot`. Gets metadata information about the specified snapshot. @@ -300,7 +300,7 @@ def get_snapshot(self): @property def list_snapshots(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_snapshots`. Lists all snapshots associated with the specified cluster. @@ -319,7 +319,7 @@ def list_snapshots(self): @property def delete_snapshot(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_snapshot`. Permanently deletes the specified snapshot. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index d9fa267e5da8..ddbf865a7569 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -108,7 +108,7 @@ def channel(self): @property def read_rows(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableClient.read_rows`. Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to each. Depending on their size, @@ -125,7 +125,7 @@ def read_rows(self): @property def sample_row_keys(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableClient.sample_row_keys`. Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of the table of approximately equal size, @@ -141,7 +141,7 @@ def sample_row_keys(self): @property def mutate_row(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableClient.mutate_row`. Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -155,7 +155,7 @@ def mutate_row(self): @property def mutate_rows(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableClient.mutate_rows`. Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire batch is not executed @@ -170,7 +170,7 @@ def mutate_rows(self): @property def check_and_mutate_row(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableClient.check_and_mutate_row`. Mutates a row atomically based on the output of a predicate Reader filter. @@ -183,7 +183,7 @@ def check_and_mutate_row(self): @property def read_modify_write_row(self): - """Return the gRPC stub for {$apiMethod.name}. + """Return the gRPC stub for :meth:`BigtableClient.read_modify_write_row`. Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the specified columns and writes a new diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 4c900bde2e0d..1178badf4e41 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2018-12-08T13:13:19.537274Z", + "updateTime": "2019-01-05T17:45:03.040890Z", "sources": [ { "generator": { "name": "artman", - "version": "0.16.2", - "dockerImage": "googleapis/artman@sha256:2f6b261ee7fe1aedf238991c93a20b3820de37a343d0cacf3e3e9555c2aaf2ea" + "version": "0.16.4", + "dockerImage": "googleapis/artman@sha256:8b45fae963557c3299921037ecbb86f0689f41b1b4aea73408ebc50562cb2857" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "6f6505a69b2b0a1260c93e890d636eefb859e76e", - "internalRef": "224530961" + "sha": "a111a53c0c6722afcd793b64724ceef7862db5b9", + "internalRef": "227896184" } }, { From ebf46d0c5b93967a95e21ea528caecf093e05df1 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot <44816363+yoshi-automation@users.noreply.github.com> Date: Wed, 9 Jan 2019 11:00:36 -0800 Subject: [PATCH 224/892] Protoc-generated serialization update. (#7077) --- .../proto/bigtable_instance_admin_pb2.py | 294 +++++++----------- .../proto/bigtable_table_admin_pb2.py | 243 ++++++--------- .../bigtable_admin_v2/proto/common_pb2.py | 24 +- .../bigtable_admin_v2/proto/instance_pb2.py | 107 ++++--- .../bigtable_admin_v2/proto/table_pb2.py | 148 +++++---- .../cloud/bigtable_v2/proto/bigtable_pb2.py | 174 +++++------ .../cloud/bigtable_v2/proto/data_pb2.py | 182 ++++++----- packages/google-cloud-bigtable/synth.metadata | 10 +- 8 files changed, 540 insertions(+), 642 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index c110db66b01d..9f486cbb2cbd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -8,7 +8,6 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) @@ -33,6 +32,9 @@ name="google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto", package="google.bigtable.admin.v2", syntax="proto3", + serialized_options=_b( + "\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), serialized_pb=_b( '\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01""\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"O\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation",\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"=\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"D\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"C\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' ), @@ -71,7 +73,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -89,14 +91,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + serialized_options=_b("8\001"), is_extendable=False, syntax="proto3", extension_ranges=[], @@ -127,7 +129,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -145,7 +147,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -163,7 +165,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -181,14 +183,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -220,14 +222,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -259,7 +261,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -277,14 +279,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -316,7 +318,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -334,7 +336,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -352,14 +354,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -391,7 +393,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -409,14 +411,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -448,14 +450,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -487,7 +489,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -505,7 +507,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -523,14 +525,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -562,14 +564,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -601,7 +603,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -619,14 +621,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -658,7 +660,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -676,7 +678,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -694,14 +696,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -733,14 +735,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -772,7 +774,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -790,7 +792,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -808,14 +810,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -847,7 +849,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -865,7 +867,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -883,14 +885,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -922,7 +924,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -940,7 +942,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -958,14 +960,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -997,7 +999,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1015,7 +1017,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1033,14 +1035,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1072,7 +1074,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1090,7 +1092,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1108,7 +1110,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1126,14 +1128,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1165,14 +1167,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1204,7 +1206,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1222,7 +1224,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1240,14 +1242,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1279,7 +1281,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1297,7 +1299,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1315,14 +1317,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1354,7 +1356,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1372,7 +1374,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1390,14 +1392,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1429,7 +1431,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1447,14 +1449,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1474,7 +1476,7 @@ extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -2130,24 +2132,15 @@ _sym_db.RegisterMessage(UpdateAppProfileMetadata) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), -) -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.has_options = True -_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) +DESCRIPTOR._options = None +_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = None _BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( name="BigtableInstanceAdmin", full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", file=DESCRIPTOR, index=0, - options=None, + serialized_options=None, serialized_start=2889, serialized_end=5875, methods=[ @@ -2158,9 +2151,8 @@ containing_service=None, input_type=_CREATEINSTANCEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b('\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*'), + serialized_options=_b( + '\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*' ), ), _descriptor.MethodDescriptor( @@ -2170,9 +2162,8 @@ containing_service=None, input_type=_GETINSTANCEREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b("\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}"), + serialized_options=_b( + "\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}" ), ), _descriptor.MethodDescriptor( @@ -2182,9 +2173,8 @@ containing_service=None, input_type=_LISTINSTANCESREQUEST, output_type=_LISTINSTANCESRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b("\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances"), + serialized_options=_b( + "\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances" ), ), _descriptor.MethodDescriptor( @@ -2194,9 +2184,8 @@ containing_service=None, input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b("\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*"), + serialized_options=_b( + "\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*" ), ), _descriptor.MethodDescriptor( @@ -2206,11 +2195,8 @@ containing_service=None, input_type=_PARTIALUPDATEINSTANCEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance" - ), + serialized_options=_b( + "\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance" ), ), _descriptor.MethodDescriptor( @@ -2220,9 +2206,8 @@ containing_service=None, input_type=_DELETEINSTANCEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b("\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}"), + serialized_options=_b( + "\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}" ), ), _descriptor.MethodDescriptor( @@ -2232,11 +2217,8 @@ containing_service=None, input_type=_CREATECLUSTERREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster' - ), + serialized_options=_b( + '\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster' ), ), _descriptor.MethodDescriptor( @@ -2246,11 +2228,8 @@ containing_service=None, input_type=_GETCLUSTERREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}" - ), + serialized_options=_b( + "\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}" ), ), _descriptor.MethodDescriptor( @@ -2260,11 +2239,8 @@ containing_service=None, input_type=_LISTCLUSTERSREQUEST, output_type=_LISTCLUSTERSRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters" - ), + serialized_options=_b( + "\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters" ), ), _descriptor.MethodDescriptor( @@ -2274,11 +2250,8 @@ containing_service=None, input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*" - ), + serialized_options=_b( + "\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*" ), ), _descriptor.MethodDescriptor( @@ -2288,11 +2261,8 @@ containing_service=None, input_type=_DELETECLUSTERREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}" - ), + serialized_options=_b( + "\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}" ), ), _descriptor.MethodDescriptor( @@ -2302,11 +2272,8 @@ containing_service=None, input_type=_CREATEAPPPROFILEREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile' - ), + serialized_options=_b( + '\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile' ), ), _descriptor.MethodDescriptor( @@ -2316,11 +2283,8 @@ containing_service=None, input_type=_GETAPPPROFILEREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}" - ), + serialized_options=_b( + "\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}" ), ), _descriptor.MethodDescriptor( @@ -2330,11 +2294,8 @@ containing_service=None, input_type=_LISTAPPPROFILESREQUEST, output_type=_LISTAPPPROFILESRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles" - ), + serialized_options=_b( + "\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles" ), ), _descriptor.MethodDescriptor( @@ -2344,11 +2305,8 @@ containing_service=None, input_type=_UPDATEAPPPROFILEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile" - ), + serialized_options=_b( + "\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile" ), ), _descriptor.MethodDescriptor( @@ -2358,11 +2316,8 @@ containing_service=None, input_type=_DELETEAPPPROFILEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}" - ), + serialized_options=_b( + "\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}" ), ), _descriptor.MethodDescriptor( @@ -2372,11 +2327,8 @@ containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*' - ), + serialized_options=_b( + '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*' ), ), _descriptor.MethodDescriptor( @@ -2386,11 +2338,8 @@ containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*' - ), + serialized_options=_b( + '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*' ), ), _descriptor.MethodDescriptor( @@ -2400,11 +2349,8 @@ containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*' ), ), ], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 6938656fca6f..4a360a67d508 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -8,7 +8,6 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) @@ -31,6 +30,9 @@ name="google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto", package="google.bigtable.admin.v2", syntax="proto3", + serialized_options=_b( + "\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), serialized_pb=_b( '\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View""\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t""\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xb7\x11\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"5\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"H\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"J\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"F\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' ), @@ -67,14 +69,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -105,7 +107,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -123,7 +125,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -141,7 +143,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -159,14 +161,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_CREATETABLEREQUEST_SPLIT], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -198,7 +200,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -216,7 +218,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -234,14 +236,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -273,7 +275,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -291,7 +293,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -309,14 +311,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -356,7 +358,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -374,7 +376,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -392,7 +394,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -410,14 +412,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -449,7 +451,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -467,14 +469,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -506,7 +508,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -524,14 +526,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -563,14 +565,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -602,7 +604,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -620,7 +622,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -638,7 +640,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -656,14 +658,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -702,7 +704,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -720,14 +722,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -759,14 +761,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -798,14 +800,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -837,7 +839,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -855,14 +857,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -894,14 +896,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -933,7 +935,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -951,7 +953,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -969,7 +971,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -987,7 +989,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1005,14 +1007,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1044,14 +1046,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1083,7 +1085,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1101,7 +1103,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1119,14 +1121,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1158,7 +1160,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1176,14 +1178,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1215,14 +1217,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1254,7 +1256,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1272,7 +1274,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1290,14 +1292,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1329,7 +1331,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1347,7 +1349,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1365,14 +1367,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -2087,20 +2089,14 @@ _sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), -) +DESCRIPTOR._options = None _BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( name="BigtableTableAdmin", full_name="google.bigtable.admin.v2.BigtableTableAdmin", file=DESCRIPTOR, index=0, - options=None, + serialized_options=None, serialized_start=2405, serialized_end=4636, methods=[ @@ -2111,11 +2107,8 @@ containing_service=None, input_type=_CREATETABLEREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*' ), ), _descriptor.MethodDescriptor( @@ -2125,11 +2118,8 @@ containing_service=None, input_type=_CREATETABLEFROMSNAPSHOTREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*' ), ), _descriptor.MethodDescriptor( @@ -2139,11 +2129,8 @@ containing_service=None, input_type=_LISTTABLESREQUEST, output_type=_LISTTABLESRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables" - ), + serialized_options=_b( + "\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables" ), ), _descriptor.MethodDescriptor( @@ -2153,11 +2140,8 @@ containing_service=None, input_type=_GETTABLEREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}" - ), + serialized_options=_b( + "\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}" ), ), _descriptor.MethodDescriptor( @@ -2167,9 +2151,8 @@ containing_service=None, input_type=_DELETETABLEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b("\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}"), + serialized_options=_b( + "\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}" ), ), _descriptor.MethodDescriptor( @@ -2179,11 +2162,8 @@ containing_service=None, input_type=_MODIFYCOLUMNFAMILIESREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*' ), ), _descriptor.MethodDescriptor( @@ -2193,11 +2173,8 @@ containing_service=None, input_type=_DROPROWRANGEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*' ), ), _descriptor.MethodDescriptor( @@ -2207,11 +2184,8 @@ containing_service=None, input_type=_GENERATECONSISTENCYTOKENREQUEST, output_type=_GENERATECONSISTENCYTOKENRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*' ), ), _descriptor.MethodDescriptor( @@ -2221,11 +2195,8 @@ containing_service=None, input_type=_CHECKCONSISTENCYREQUEST, output_type=_CHECKCONSISTENCYRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*' ), ), _descriptor.MethodDescriptor( @@ -2235,11 +2206,8 @@ containing_service=None, input_type=_SNAPSHOTTABLEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*' - ), + serialized_options=_b( + '\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*' ), ), _descriptor.MethodDescriptor( @@ -2249,11 +2217,8 @@ containing_service=None, input_type=_GETSNAPSHOTREQUEST, output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - ), + serialized_options=_b( + "\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" ), ), _descriptor.MethodDescriptor( @@ -2263,11 +2228,8 @@ containing_service=None, input_type=_LISTSNAPSHOTSREQUEST, output_type=_LISTSNAPSHOTSRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - ), + serialized_options=_b( + "\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" ), ), _descriptor.MethodDescriptor( @@ -2277,11 +2239,8 @@ containing_service=None, input_type=_DELETESNAPSHOTREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - ), + serialized_options=_b( + "\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" ), ), ], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index 6e8e4ee89b48..bd0a50fe17ae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -9,7 +9,6 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) @@ -24,6 +23,9 @@ name="google/cloud/bigtable/admin_v2/proto/common.proto", package="google.bigtable.admin.v2", syntax="proto3", + serialized_options=_b( + "\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), serialized_pb=_b( "\n1google/cloud/bigtable/admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3" ), @@ -40,17 +42,21 @@ file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name="STORAGE_TYPE_UNSPECIFIED", index=0, number=0, options=None, type=None + name="STORAGE_TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="SSD", index=1, number=1, options=None, type=None + name="SSD", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="HDD", index=2, number=2, options=None, type=None + name="HDD", index=2, number=2, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=142, serialized_end=203, ) @@ -66,11 +72,5 @@ _sym_db.RegisterFileDescriptor(DESCRIPTOR) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), -) +DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index ebf96b37b74a..be1d54aa8925 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -8,7 +8,6 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) @@ -25,6 +24,9 @@ name="google/cloud/bigtable/admin_v2/proto/instance.proto", package="google.bigtable.admin.v2", syntax="proto3", + serialized_options=_b( + "\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), serialized_pb=_b( '\n3google/cloud/bigtable/admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/bigtable/admin_v2/proto/common.proto"\x83\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04"\x82\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08\x42\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' ), @@ -42,17 +44,21 @@ file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, options=None, type=None + name="READY", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, options=None, type=None + name="CREATING", index=2, number=2, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=434, serialized_end=487, ) @@ -65,17 +71,21 @@ file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", index=0, number=0, options=None, type=None + name="TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="PRODUCTION", index=1, number=1, options=None, type=None + name="PRODUCTION", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="DEVELOPMENT", index=2, number=2, options=None, type=None + name="DEVELOPMENT", index=2, number=2, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=489, serialized_end=550, ) @@ -88,23 +98,27 @@ file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, options=None, type=None + name="READY", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, options=None, type=None + name="CREATING", index=2, number=2, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="RESIZING", index=3, number=3, options=None, type=None + name="RESIZING", index=3, number=3, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="DISABLED", index=4, number=4, options=None, type=None + name="DISABLED", index=4, number=4, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=742, serialized_end=823, ) @@ -133,7 +147,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -151,14 +165,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + serialized_options=_b("8\001"), is_extendable=False, syntax="proto3", extension_ranges=[], @@ -189,7 +203,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -207,7 +221,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -225,7 +239,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -243,7 +257,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -261,14 +275,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_INSTANCE_LABELSENTRY], enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -300,7 +314,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -318,7 +332,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -336,7 +350,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -354,7 +368,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -372,14 +386,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[_CLUSTER_STATE], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -399,7 +413,7 @@ extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -430,7 +444,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -448,14 +462,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -486,7 +500,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -504,7 +518,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -522,7 +536,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -540,7 +554,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -558,7 +572,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], @@ -568,7 +582,7 @@ _APPPROFILE_SINGLECLUSTERROUTING, ], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -794,15 +808,6 @@ _sym_db.RegisterMessage(AppProfile.SingleClusterRouting) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), -) -_INSTANCE_LABELSENTRY.has_options = True -_INSTANCE_LABELSENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) +DESCRIPTOR._options = None +_INSTANCE_LABELSENTRY._options = None # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index 8b309a67256f..a50d828a246b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -8,7 +8,6 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) @@ -24,6 +23,9 @@ name="google/cloud/bigtable/admin_v2/proto/table.proto", package="google.bigtable.admin.v2", syntax="proto3", + serialized_options=_b( + "\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" + ), serialized_pb=_b( '\n0google/cloud/bigtable/admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xcf\x02\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' ), @@ -42,23 +44,35 @@ file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="INITIALIZING", index=1, number=1, options=None, type=None + name="INITIALIZING", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="PLANNED_MAINTENANCE", index=2, number=2, options=None, type=None + name="PLANNED_MAINTENANCE", + index=2, + number=2, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="UNPLANNED_MAINTENANCE", index=3, number=3, options=None, type=None + name="UNPLANNED_MAINTENANCE", + index=3, + number=3, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="READY", index=4, number=4, options=None, type=None + name="READY", index=4, number=4, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=533, serialized_end=653, ) @@ -74,15 +88,15 @@ name="TIMESTAMP_GRANULARITY_UNSPECIFIED", index=0, number=0, - options=None, + serialized_options=None, type=None, ), _descriptor.EnumValueDescriptor( - name="MILLIS", index=1, number=1, options=None, type=None + name="MILLIS", index=1, number=1, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=850, serialized_end=923, ) @@ -95,23 +109,31 @@ file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name="VIEW_UNSPECIFIED", index=0, number=0, options=None, type=None + name="VIEW_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="NAME_ONLY", index=1, number=1, options=None, type=None + name="NAME_ONLY", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="SCHEMA_VIEW", index=2, number=2, options=None, type=None + name="SCHEMA_VIEW", index=2, number=2, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="REPLICATION_VIEW", index=3, number=3, options=None, type=None + name="REPLICATION_VIEW", + index=3, + number=3, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="FULL", index=4, number=4, options=None, type=None + name="FULL", index=4, number=4, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=925, serialized_end=1017, ) @@ -124,17 +146,21 @@ file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", index=0, number=0, options=None, type=None + name="STATE_NOT_KNOWN", + index=0, + number=0, + serialized_options=None, + type=None, ), _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, options=None, type=None + name="READY", index=1, number=1, serialized_options=None, type=None ), _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, options=None, type=None + name="CREATING", index=2, number=2, serialized_options=None, type=None ), ], containing_type=None, - options=None, + serialized_options=None, serialized_start=1713, serialized_end=1766, ) @@ -163,14 +189,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -201,7 +227,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -219,14 +245,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + serialized_options=_b("8\001"), is_extendable=False, syntax="proto3", extension_ranges=[], @@ -257,7 +283,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -275,14 +301,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), + serialized_options=_b("8\001"), is_extendable=False, syntax="proto3", extension_ranges=[], @@ -313,7 +339,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -331,7 +357,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -349,7 +375,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -367,7 +393,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], @@ -378,7 +404,7 @@ _TABLE_COLUMNFAMILIESENTRY, ], enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -410,14 +436,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -449,14 +475,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -487,14 +513,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -525,7 +551,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -543,7 +569,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -561,7 +587,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -579,14 +605,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -626,7 +652,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -644,7 +670,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -662,7 +688,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -680,7 +706,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -698,7 +724,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -716,7 +742,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -734,14 +760,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[_SNAPSHOT_STATE], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1009,19 +1035,7 @@ _sym_db.RegisterMessage(Snapshot) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), -) -_TABLE_CLUSTERSTATESENTRY.has_options = True -_TABLE_CLUSTERSTATESENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) -_TABLE_COLUMNFAMILIESENTRY.has_options = True -_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions( - descriptor_pb2.MessageOptions(), _b("8\001") -) +DESCRIPTOR._options = None +_TABLE_CLUSTERSTATESENTRY._options = None +_TABLE_COLUMNFAMILIESENTRY._options = None # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index 994ca3d6a6f9..344ccfe67329 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -8,7 +8,6 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) @@ -27,6 +26,9 @@ name="google/cloud/bigtable_v2/proto/bigtable.proto", package="google.bigtable.v2", syntax="proto3", + serialized_options=_b( + "\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" + ), serialized_pb=_b( '\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x13\n\x11MutateRowResponse"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"D\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"E\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"F\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"M\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3' ), @@ -61,7 +63,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -79,7 +81,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -97,7 +99,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -115,7 +117,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -133,14 +135,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -172,7 +174,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -190,7 +192,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -208,7 +210,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -226,7 +228,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -244,7 +246,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -262,7 +264,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -280,7 +282,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -298,7 +300,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -316,14 +318,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -362,7 +364,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -380,14 +382,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_READROWSRESPONSE_CELLCHUNK], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -419,7 +421,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -437,14 +439,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -476,7 +478,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -494,14 +496,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -533,7 +535,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -551,7 +553,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -569,7 +571,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -587,14 +589,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -614,7 +616,7 @@ extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -646,7 +648,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -664,14 +666,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -702,7 +704,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -720,7 +722,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -738,14 +740,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_MUTATEROWSREQUEST_ENTRY], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -777,7 +779,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -795,14 +797,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -833,14 +835,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[_MUTATEROWSRESPONSE_ENTRY], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -872,7 +874,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -890,7 +892,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -908,7 +910,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -926,7 +928,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -944,7 +946,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -962,14 +964,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1001,14 +1003,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1040,7 +1042,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1058,7 +1060,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1076,7 +1078,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1094,14 +1096,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1133,14 +1135,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1643,20 +1645,14 @@ _sym_db.RegisterMessage(ReadModifyWriteRowResponse) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" - ), -) +DESCRIPTOR._options = None _BIGTABLE = _descriptor.ServiceDescriptor( name="Bigtable", full_name="google.bigtable.v2.Bigtable", file=DESCRIPTOR, index=0, - options=None, + serialized_options=None, serialized_start=1912, serialized_end=2981, methods=[ @@ -1667,11 +1663,8 @@ containing_service=None, input_type=_READROWSREQUEST, output_type=_READROWSRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*' ), ), _descriptor.MethodDescriptor( @@ -1681,11 +1674,8 @@ containing_service=None, input_type=_SAMPLEROWKEYSREQUEST, output_type=_SAMPLEROWKEYSRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - "\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" - ), + serialized_options=_b( + "\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" ), ), _descriptor.MethodDescriptor( @@ -1695,11 +1685,8 @@ containing_service=None, input_type=_MUTATEROWREQUEST, output_type=_MUTATEROWRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*' ), ), _descriptor.MethodDescriptor( @@ -1709,11 +1696,8 @@ containing_service=None, input_type=_MUTATEROWSREQUEST, output_type=_MUTATEROWSRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*' ), ), _descriptor.MethodDescriptor( @@ -1723,11 +1707,8 @@ containing_service=None, input_type=_CHECKANDMUTATEROWREQUEST, output_type=_CHECKANDMUTATEROWRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*' ), ), _descriptor.MethodDescriptor( @@ -1737,11 +1718,8 @@ containing_service=None, input_type=_READMODIFYWRITEROWREQUEST, output_type=_READMODIFYWRITEROWRESPONSE, - options=_descriptor._ParseOptions( - descriptor_pb2.MethodOptions(), - _b( - '\202\323\344\223\002H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*' - ), + serialized_options=_b( + '\202\323\344\223\002H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*' ), ), ], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index af8d88968798..31ee6b4e550c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -8,7 +8,6 @@ from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) @@ -19,6 +18,9 @@ name="google/cloud/bigtable_v2/proto/data.proto", package="google.bigtable.v2", syntax="proto3", + serialized_options=_b( + "\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" + ), serialized_pb=_b( '\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3' ), @@ -47,7 +49,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -65,14 +67,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -104,7 +106,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -122,14 +124,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -161,7 +163,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -179,14 +181,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -218,7 +220,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -236,7 +238,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -254,14 +256,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -293,7 +295,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -311,7 +313,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -329,7 +331,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -347,14 +349,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -401,7 +403,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -419,14 +421,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -458,7 +460,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -476,7 +478,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -494,7 +496,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -512,7 +514,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -530,14 +532,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -584,7 +586,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -602,14 +604,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -641,7 +643,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -659,7 +661,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -677,7 +679,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -695,14 +697,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -749,14 +751,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -787,14 +789,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -825,7 +827,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -843,7 +845,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -861,14 +863,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -899,7 +901,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -917,7 +919,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -935,7 +937,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -953,7 +955,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -971,7 +973,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -989,7 +991,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1007,7 +1009,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1025,7 +1027,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1043,7 +1045,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1061,7 +1063,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1079,7 +1081,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1097,7 +1099,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1115,7 +1117,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1133,7 +1135,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1151,7 +1153,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1169,7 +1171,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1187,7 +1189,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1205,7 +1207,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1223,14 +1225,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1270,7 +1272,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1288,7 +1290,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1306,7 +1308,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1324,14 +1326,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1362,7 +1364,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1380,7 +1382,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1398,14 +1400,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1436,14 +1438,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ) ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1462,7 +1464,7 @@ extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1493,7 +1495,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1511,7 +1513,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1529,7 +1531,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1547,7 +1549,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], @@ -1559,7 +1561,7 @@ _MUTATION_DELETEFROMROW, ], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -1599,7 +1601,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1617,7 +1619,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1635,7 +1637,7 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1653,14 +1655,14 @@ containing_type=None, is_extension=False, extension_scope=None, - options=None, + serialized_options=None, file=DESCRIPTOR, ), ], extensions=[], nested_types=[], enum_types=[], - options=None, + serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], @@ -2601,11 +2603,5 @@ _sym_db.RegisterMessage(ReadModifyWriteRule) -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions( - descriptor_pb2.FileOptions(), - _b( - "\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" - ), -) +DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 1178badf4e41..745a69202249 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-01-05T17:45:03.040890Z", + "updateTime": "2019-01-09T13:13:53.936968Z", "sources": [ { "generator": { "name": "artman", - "version": "0.16.4", - "dockerImage": "googleapis/artman@sha256:8b45fae963557c3299921037ecbb86f0689f41b1b4aea73408ebc50562cb2857" + "version": "0.16.5", + "dockerImage": "googleapis/artman@sha256:5a96c2c5c6f9570cc9556b63dc9ce1838777fd9166b5b64e43ad8e0ecee2fe2c" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "a111a53c0c6722afcd793b64724ceef7862db5b9", - "internalRef": "227896184" + "sha": "659d66ec24bf40b35a41a0b79218d96ba3add3d3", + "internalRef": "228437827" } }, { From 76e5de9b9f46272e9cc628723a4c28b6c12f0d54 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot <44816363+yoshi-automation@users.noreply.github.com> Date: Thu, 17 Jan 2019 15:32:04 -0800 Subject: [PATCH 225/892] Update copyright headers --- .../google/cloud/bigtable_admin_v2/__init__.py | 2 +- .../gapic/bigtable_instance_admin_client.py | 2 +- .../gapic/bigtable_table_admin_client.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic/enums.py | 2 +- .../bigtable_instance_admin_grpc_transport.py | 2 +- .../bigtable_table_admin_grpc_transport.py | 2 +- .../google/cloud/bigtable_admin_v2/types.py | 2 +- .../google/cloud/bigtable_v2/__init__.py | 2 +- .../cloud/bigtable_v2/gapic/bigtable_client.py | 2 +- .../gapic/transports/bigtable_grpc_transport.py | 2 +- .../google/cloud/bigtable_v2/types.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 12 ++++++------ .../tests/unit/gapic/v2/test_bigtable_client_v2.py | 2 +- .../v2/test_bigtable_instance_admin_client_v2.py | 2 +- .../gapic/v2/test_bigtable_table_admin_client_v2.py | 2 +- 15 files changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 1ce80625ec39..68adeb90d471 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 535b65ac54db..5d09c545cacc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index bc89075d0508..5e52cea50019 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 1fe61b6980b5..65ca75fd4360 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index c8f3e3c0c6d0..ff1215750cae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index d30127945565..04877f5db347 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index ccdda74aeb21..137785b7107c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index cbd017f4b625..abd6662e60ed 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 783830f18066..8f59b1643ffb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index ddbf865a7569..d45434f0502a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index 4dafb23b2d6a..1baf5910987c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 745a69202249..034dbbb17183 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,26 +1,26 @@ { - "updateTime": "2019-01-09T13:13:53.936968Z", + "updateTime": "2019-01-17T13:14:02.679846Z", "sources": [ { "generator": { "name": "artman", - "version": "0.16.5", - "dockerImage": "googleapis/artman@sha256:5a96c2c5c6f9570cc9556b63dc9ce1838777fd9166b5b64e43ad8e0ecee2fe2c" + "version": "0.16.6", + "dockerImage": "googleapis/artman@sha256:12722f2ca3fbc3b53cc6aa5f0e569d7d221b46bd876a2136497089dec5e3634e" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "659d66ec24bf40b35a41a0b79218d96ba3add3d3", - "internalRef": "228437827" + "sha": "0ac60e21a1aa86c07c1836865b35308ba8178b05", + "internalRef": "229626798" } }, { "template": { "name": "python_library", "origin": "synthtool.gcp", - "version": "2018.12.6" + "version": "2019.1.16" } } ], diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py index 587f589aa278..edd5142e424e 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index b9dcb2214893..6e303c88a665 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index 1b84de2fef13..d697f706b749 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From e764e71a376fd4028e8b6613cf863d05e991f04b Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Tue, 29 Jan 2019 13:28:49 -0800 Subject: [PATCH 226/892] Add protos as an artifact to library (#7205) --- .../proto/bigtable_cluster_data.proto | 94 +++ .../proto/bigtable_cluster_service.proto | 130 +++++ .../bigtable_cluster_service_messages.proto | 141 +++++ .../proto/bigtable_instance_admin.proto | 456 +++++++++++++++ .../proto/bigtable_table_admin.proto | 525 +++++++++++++++++ .../proto/bigtable_table_data.proto | 126 +++++ .../proto/bigtable_table_service.proto | 80 +++ .../bigtable_table_service_messages.proto | 116 ++++ .../bigtable_admin_v2/proto/common.proto | 41 ++ .../bigtable_admin_v2/proto/instance.proto | 208 +++++++ .../cloud/bigtable_admin_v2/proto/table.proto | 221 ++++++++ .../cloud/bigtable_v2/proto/bigtable.proto | 365 ++++++++++++ .../proto/bigtable_cluster_data.proto | 94 +++ .../proto/bigtable_cluster_service.proto | 130 +++++ .../bigtable_cluster_service_messages.proto | 141 +++++ .../bigtable_v2/proto/bigtable_data.proto | 516 +++++++++++++++++ .../proto/bigtable_instance_admin.proto | 456 +++++++++++++++ .../bigtable_v2/proto/bigtable_service.proto | 91 +++ .../proto/bigtable_service_messages.proto | 218 +++++++ .../proto/bigtable_table_admin.proto | 525 +++++++++++++++++ .../proto/bigtable_table_data.proto | 126 +++++ .../proto/bigtable_table_service.proto | 80 +++ .../bigtable_table_service_messages.proto | 116 ++++ .../cloud/bigtable_v2/proto/common.proto | 41 ++ .../google/cloud/bigtable_v2/proto/data.proto | 535 ++++++++++++++++++ .../cloud/bigtable_v2/proto/instance.proto | 208 +++++++ .../cloud/bigtable_v2/proto/table.proto | 221 ++++++++ packages/google-cloud-bigtable/synth.metadata | 10 +- packages/google-cloud-bigtable/synth.py | 2 + 29 files changed, 6008 insertions(+), 5 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto new file mode 100644 index 000000000000..ca3b663d8661 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto @@ -0,0 +1,94 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google/api/annotations.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterDataProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// A physical location in which a particular project can allocate Cloud BigTable +// resources. +message Zone { + // Possible states of a zone. + enum Status { + // The state of the zone is unknown or unspecified. + UNKNOWN = 0; + + // The zone is in a good state. + OK = 1; + + // The zone is down for planned maintenance. + PLANNED_MAINTENANCE = 2; + + // The zone is down for emergency or unplanned maintenance. + EMERGENCY_MAINENANCE = 3; + } + + // A permanent unique identifier for the zone. + // Values are of the form projects//zones/[a-z][-a-z0-9]* + string name = 1; + + // The name of this zone as it appears in UIs. + string display_name = 2; + + // The current state of this zone. + Status status = 3; +} + +// An isolated set of Cloud BigTable resources on which tables can be hosted. +message Cluster { + // A permanent unique identifier for the cluster. For technical reasons, the + // zone in which the cluster resides is included here. + // Values are of the form + // projects//zones//clusters/[a-z][-a-z0-9]* + string name = 1; + + // The operation currently running on the cluster, if any. + // This cannot be set directly, only through CreateCluster, UpdateCluster, + // or UndeleteCluster. Calls to these methods will be rejected if + // "current_operation" is already set. + google.longrunning.Operation current_operation = 3; + + // The descriptive name for this cluster as it appears in UIs. + // Must be unique per zone. + string display_name = 4; + + // The number of serve nodes allocated to this cluster. + int32 serve_nodes = 5; + + // What storage type to use for tables in this cluster. Only configurable at + // cluster creation time. If unspecified, STORAGE_SSD will be used. + StorageType default_storage_type = 8; +} + +enum StorageType { + // The storage type used is unspecified. + STORAGE_UNSPECIFIED = 0; + + // Data will be stored in SSD, providing low and consistent latencies. + STORAGE_SSD = 1; + + // Data will be stored in HDD, providing high and less predictable + // latencies. + STORAGE_HDD = 2; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto new file mode 100644 index 000000000000..038fcc46397f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto @@ -0,0 +1,130 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; +import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterServicesProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// Service for managing zonal Cloud Bigtable resources. +service BigtableClusterService { + // Lists the supported zones for the given project. + rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { + option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; + } + + // Gets information about a particular cluster. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; + } + + // Lists all clusters in the given project, along with any zones for which + // cluster information could not be retrieved. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; + } + + // Creates a cluster and begins preparing it to begin serving. The returned + // cluster embeds as its "current_operation" a long-running operation which + // can be used to track the progress of turning up the new cluster. + // Immediately upon completion of this request: + // * The cluster will be readable via the API, with all requested attributes + // but no allocated resources. + // Until completion of the embedded operation: + // * Cancelling the operation will render the cluster immediately unreadable + // via the API. + // * All other attempts to modify or delete the cluster will be rejected. + // Upon completion of the embedded operation: + // * Billing for all successfully-allocated resources will begin (some types + // may have lower than the requested levels). + // * New tables can be created in the cluster. + // * The cluster's allocated resource levels will be readable via the API. + // The embedded operation's "metadata" field type is + // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc CreateCluster(CreateClusterRequest) returns (Cluster) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; + } + + // Updates a cluster, and begins allocating or releasing resources as + // requested. The returned cluster embeds as its "current_operation" a + // long-running operation which can be used to track the progress of updating + // the cluster. + // Immediately upon completion of this request: + // * For resource types where a decrease in the cluster's allocation has been + // requested, billing will be based on the newly-requested level. + // Until completion of the embedded operation: + // * Cancelling the operation will set its metadata's "cancelled_at_time", + // and begin restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, after which + // point it will terminate with a CANCELLED status. + // * All other attempts to modify or delete the cluster will be rejected. + // * Reading the cluster via the API will continue to give the pre-request + // resource levels. + // Upon completion of the embedded operation: + // * Billing will begin for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources will be available for serving the cluster's + // tables. + // * The cluster's new resource levels will be readable via the API. + // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc UpdateCluster(Cluster) returns (Cluster) { + option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; + } + + // Marks a cluster and all of its tables for permanent deletion in 7 days. + // Immediately upon completion of the request: + // * Billing will cease for all of the cluster's reserved resources. + // * The cluster's "delete_time" field will be set 7 days in the future. + // Soon afterward: + // * All tables within the cluster will become unavailable. + // Prior to the cluster's "delete_time": + // * The cluster can be recovered with a call to UndeleteCluster. + // * All other attempts to modify or delete the cluster will be rejected. + // At the cluster's "delete_time": + // * The cluster and *all of its tables* will immediately and irrevocably + // disappear from the API, and their data will be permanently deleted. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; + } + + // Cancels the scheduled deletion of an cluster and begins preparing it to + // resume serving. The returned operation will also be embedded as the + // cluster's "current_operation". + // Immediately upon completion of this request: + // * The cluster's "delete_time" field will be unset, protecting it from + // automatic deletion. + // Until completion of the returned operation: + // * The operation cannot be cancelled. + // Upon completion of the returned operation: + // * Billing for the cluster's resources will resume. + // * All tables within the cluster will be available. + // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto new file mode 100644 index 000000000000..518d14dac8e0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto @@ -0,0 +1,141 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterServiceMessagesProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// Request message for BigtableClusterService.ListZones. +message ListZonesRequest { + // The unique name of the project for which a list of supported zones is + // requested. + // Values are of the form projects/ + string name = 1; +} + +// Response message for BigtableClusterService.ListZones. +message ListZonesResponse { + // The list of requested zones. + repeated Zone zones = 1; +} + +// Request message for BigtableClusterService.GetCluster. +message GetClusterRequest { + // The unique name of the requested cluster. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Request message for BigtableClusterService.ListClusters. +message ListClustersRequest { + // The unique name of the project for which a list of clusters is requested. + // Values are of the form projects/ + string name = 1; +} + +// Response message for BigtableClusterService.ListClusters. +message ListClustersResponse { + // The list of requested Clusters. + repeated Cluster clusters = 1; + + // The zones for which clusters could not be retrieved. + repeated Zone failed_zones = 2; +} + +// Request message for BigtableClusterService.CreateCluster. +message CreateClusterRequest { + // The unique name of the zone in which to create the cluster. + // Values are of the form projects//zones/ + string name = 1; + + // The id to be used when referring to the new cluster within its zone, + // e.g. just the "test-cluster" section of the full name + // "projects//zones//clusters/test-cluster". + string cluster_id = 2; + + // The cluster to create. + // The "name", "delete_time", and "current_operation" fields must be left + // blank. + Cluster cluster = 3; +} + +// Metadata type for the operation returned by +// BigtableClusterService.CreateCluster. +message CreateClusterMetadata { + // The request which prompted the creation of this operation. + CreateClusterRequest original_request = 1; + + // The time at which original_request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// Metadata type for the operation returned by +// BigtableClusterService.UpdateCluster. +message UpdateClusterMetadata { + // The request which prompted the creation of this operation. + Cluster original_request = 1; + + // The time at which original_request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + google.protobuf.Timestamp cancel_time = 3; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 4; +} + +// Request message for BigtableClusterService.DeleteCluster. +message DeleteClusterRequest { + // The unique name of the cluster to be deleted. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Request message for BigtableClusterService.UndeleteCluster. +message UndeleteClusterRequest { + // The unique name of the cluster to be un-deleted. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Metadata type for the operation returned by +// BigtableClusterService.UndeleteCluster. +message UndeleteClusterMetadata { + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 1; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 2; +} + +// Metadata type for operations initiated by the V2 BigtableAdmin service. +// More complete information for such operations is available via the V2 API. +message V2OperationMetadata { + +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto new file mode 100644 index 000000000000..ec992ea0f818 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -0,0 +1,456 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/instance.proto"; +import "google/iam/v1/iam_policy.proto"; +import "google/iam/v1/policy.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "BigtableInstanceAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable Instances and +// Clusters. Provides access to the Instance and Cluster schemas only, not the +// tables' metadata or data stored in those tables. +service BigtableInstanceAdmin { + // Create an instance within a project. + rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*}/instances" + body: "*" + }; + } + + // Gets information about an instance. + rpc GetInstance(GetInstanceRequest) returns (Instance) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*}" + }; + } + + // Lists information about instances in a project. + rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*}/instances" + }; + } + + // Updates an instance within a project. + rpc UpdateInstance(Instance) returns (Instance) { + option (google.api.http) = { + put: "/v2/{name=projects/*/instances/*}" + body: "*" + }; + } + + // Partially updates an instance within a project. + rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v2/{instance.name=projects/*/instances/*}" + body: "instance" + }; + } + + // Delete an instance from a project. + rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*}" + }; + } + + // Creates a cluster within an instance. + rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/clusters" + body: "cluster" + }; + } + + // Gets information about a cluster. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/clusters/*}" + }; + } + + // Lists information about clusters in an instance. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*}/clusters" + }; + } + + // Updates a cluster within an instance. + rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { + option (google.api.http) = { + put: "/v2/{name=projects/*/instances/*/clusters/*}" + body: "*" + }; + } + + // Deletes a cluster from an instance. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/clusters/*}" + }; + } + + // Creates an app profile within an instance. + rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/appProfiles" + body: "app_profile" + }; + } + + // Gets information about an app profile. + rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/appProfiles/*}" + }; + } + + // Lists information about app profiles in an instance. + rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*}/appProfiles" + }; + } + + // Updates an app profile within an instance. + rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" + body: "app_profile" + }; + } + + // Deletes an app profile from an instance. + rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" + }; + } + + // Gets the access control policy for an instance resource. Returns an empty + // policy if an instance exists but does not have a policy set. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" + body: "*" + }; + } + + // Sets the access control policy on an instance resource. Replaces any + // existing policy. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" + body: "*" + }; + } + + // Returns permissions that the caller has on the specified instance resource. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" + body: "*" + }; + } +} + +// Request message for BigtableInstanceAdmin.CreateInstance. +message CreateInstanceRequest { + // The unique name of the project in which to create the new instance. + // Values are of the form `projects/`. + string parent = 1; + + // The ID to be used when referring to the new instance within its project, + // e.g., just `myinstance` rather than + // `projects/myproject/instances/myinstance`. + string instance_id = 2; + + // The instance to create. + // Fields marked `OutputOnly` must be left blank. + Instance instance = 3; + + // The clusters to be created within the instance, mapped by desired + // cluster ID, e.g., just `mycluster` rather than + // `projects/myproject/instances/myinstance/clusters/mycluster`. + // Fields marked `OutputOnly` must be left blank. + // Currently, at most two clusters can be specified. + map clusters = 4; +} + +// Request message for BigtableInstanceAdmin.GetInstance. +message GetInstanceRequest { + // The unique name of the requested instance. Values are of the form + // `projects//instances/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListInstances. +message ListInstancesRequest { + // The unique name of the project for which a list of instances is requested. + // Values are of the form `projects/`. + string parent = 1; + + // DEPRECATED: This field is unused and ignored. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListInstances. +message ListInstancesResponse { + // The list of requested instances. + repeated Instance instances = 1; + + // Locations from which Instance information could not be retrieved, + // due to an outage or some other transient condition. + // Instances whose Clusters are all in one of the failed locations + // may be missing from `instances`, and Instances with at least one + // Cluster in a failed location may only have partial information returned. + // Values are of the form `projects//locations/` + repeated string failed_locations = 2; + + // DEPRECATED: This field is unused and ignored. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.PartialUpdateInstance. +message PartialUpdateInstanceRequest { + // The Instance which will (partially) replace the current value. + Instance instance = 1; + + // The subset of Instance fields which should be replaced. + // Must be explicitly set. + google.protobuf.FieldMask update_mask = 2; +} + +// Request message for BigtableInstanceAdmin.DeleteInstance. +message DeleteInstanceRequest { + // The unique name of the instance to be deleted. + // Values are of the form `projects//instances/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.CreateCluster. +message CreateClusterRequest { + // The unique name of the instance in which to create the new cluster. + // Values are of the form + // `projects//instances/`. + string parent = 1; + + // The ID to be used when referring to the new cluster within its instance, + // e.g., just `mycluster` rather than + // `projects/myproject/instances/myinstance/clusters/mycluster`. + string cluster_id = 2; + + // The cluster to be created. + // Fields marked `OutputOnly` must be left blank. + Cluster cluster = 3; +} + +// Request message for BigtableInstanceAdmin.GetCluster. +message GetClusterRequest { + // The unique name of the requested cluster. Values are of the form + // `projects//instances//clusters/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListClusters. +message ListClustersRequest { + // The unique name of the instance for which a list of clusters is requested. + // Values are of the form `projects//instances/`. + // Use ` = '-'` to list Clusters for all Instances in a project, + // e.g., `projects/myproject/instances/-`. + string parent = 1; + + // DEPRECATED: This field is unused and ignored. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListClusters. +message ListClustersResponse { + // The list of requested clusters. + repeated Cluster clusters = 1; + + // Locations from which Cluster information could not be retrieved, + // due to an outage or some other transient condition. + // Clusters from these locations may be missing from `clusters`, + // or may only have partial information returned. + // Values are of the form `projects//locations/` + repeated string failed_locations = 2; + + // DEPRECATED: This field is unused and ignored. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.DeleteCluster. +message DeleteClusterRequest { + // The unique name of the cluster to be deleted. Values are of the form + // `projects//instances//clusters/`. + string name = 1; +} + +// The metadata for the Operation returned by CreateInstance. +message CreateInstanceMetadata { + // The request that prompted the initiation of this CreateInstance operation. + CreateInstanceRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by UpdateInstance. +message UpdateInstanceMetadata { + // The request that prompted the initiation of this UpdateInstance operation. + PartialUpdateInstanceRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by CreateCluster. +message CreateClusterMetadata { + // The request that prompted the initiation of this CreateCluster operation. + CreateClusterRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by UpdateCluster. +message UpdateClusterMetadata { + // The request that prompted the initiation of this UpdateCluster operation. + Cluster original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// Request message for BigtableInstanceAdmin.CreateAppProfile. +message CreateAppProfileRequest { + // The unique name of the instance in which to create the new app profile. + // Values are of the form + // `projects//instances/`. + string parent = 1; + + // The ID to be used when referring to the new app profile within its + // instance, e.g., just `myprofile` rather than + // `projects/myproject/instances/myinstance/appProfiles/myprofile`. + string app_profile_id = 2; + + // The app profile to be created. + // Fields marked `OutputOnly` will be ignored. + AppProfile app_profile = 3; + + // If true, ignore safety checks when creating the app profile. + bool ignore_warnings = 4; +} + +// Request message for BigtableInstanceAdmin.GetAppProfile. +message GetAppProfileRequest { + // The unique name of the requested app profile. Values are of the form + // `projects//instances//appProfiles/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListAppProfiles. +message ListAppProfilesRequest { + // The unique name of the instance for which a list of app profiles is + // requested. Values are of the form + // `projects//instances/`. + // Use ` = '-'` to list AppProfiles for all Instances in a project, + // e.g., `projects/myproject/instances/-`. + string parent = 1; + + // Maximum number of results per page. + // CURRENTLY UNIMPLEMENTED AND IGNORED. + int32 page_size = 3; + + // The value of `next_page_token` returned by a previous call. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListAppProfiles. +message ListAppProfilesResponse { + // The list of requested app profiles. + repeated AppProfile app_profiles = 1; + + // Set if not all app profiles could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; + + // Locations from which AppProfile information could not be retrieved, + // due to an outage or some other transient condition. + // AppProfiles from these locations may be missing from `app_profiles`. + // Values are of the form `projects//locations/` + repeated string failed_locations = 3; +} + +// Request message for BigtableInstanceAdmin.UpdateAppProfile. +message UpdateAppProfileRequest { + // The app profile which will (partially) replace the current value. + AppProfile app_profile = 1; + + // The subset of app profile fields which should be replaced. + // If unset, all fields will be replaced. + google.protobuf.FieldMask update_mask = 2; + + // If true, ignore safety checks when updating the app profile. + bool ignore_warnings = 3; +} + + +// Request message for BigtableInstanceAdmin.DeleteAppProfile. +message DeleteAppProfileRequest { + // The unique name of the app profile to be deleted. Values are of the form + // `projects//instances//appProfiles/`. + string name = 1; + + // If true, ignore safety checks when deleting the app profile. + bool ignore_warnings = 2; +} + +// The metadata for the Operation returned by UpdateAppProfile. +message UpdateAppProfileMetadata { + +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto new file mode 100644 index 000000000000..2d5bddf302aa --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -0,0 +1,525 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/table.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// +// +// Provides access to the table schemas only, not the data stored within +// the tables. +service BigtableTableAdmin { + // Creates a new table in the specified instance. + // The table can be created with a full set of initial column families, + // specified in the request. + rpc CreateTable(CreateTableRequest) returns (Table) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/tables" + body: "*" + }; + } + + // Creates a new table from the specified snapshot. The target table must + // not exist. The snapshot and the table must be in the same instance. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" + body: "*" + }; + } + + // Lists all tables served from a specified instance. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*}/tables" + }; + } + + // Gets metadata information about the specified table. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/tables/*}" + }; + } + + // Permanently deletes a specified table and all of its data. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/tables/*}" + }; + } + + // Performs a series of column family modifications on the specified table. + // Either all or none of the modifications will occur before this method + // returns, but data requests received prior to that point may see a table + // where only some modifications have taken effect. + rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" + body: "*" + }; + } + + // Permanently drop/delete a row range from a specified table. The request can + // specify whether to delete all rows in a table, or only those that match a + // particular prefix. + rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" + body: "*" + }; + } + + // Generates a consistency token for a Table, which can be used in + // CheckConsistency to check whether mutations to the table that finished + // before this call started have been replicated. The tokens will be available + // for 90 days. + rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" + body: "*" + }; + } + + // Checks replication consistency based on a consistency token, that is, if + // replication has caught up based on the conditions specified in the token + // and the check request. + rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + body: "*" + }; + } + + // Creates a new snapshot in the specified cluster from the specified + // source table. The cluster and the table must be in the same instance. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" + body: "*" + }; + } + + // Gets metadata information about the specified snapshot. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + }; + } + + // Lists all snapshots associated with the specified cluster. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" + }; + } + + // Permanently deletes the specified snapshot. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + }; + } +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] +message CreateTableRequest { + // An initial split point for a newly created table. + message Split { + // Row key to use as an initial tablet boundary. + bytes key = 1; + } + + // The unique name of the instance in which to create the table. + // Values are of the form `projects//instances/`. + string parent = 1; + + // The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `/tables/foobar`. + string table_id = 2; + + // The Table to create. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (tablets are similar to HBase regions). + // Given two split keys, `s1` and `s2`, three tablets will be created, + // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. + // + // Example: + // + // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` + // `"other", "zz"]` + // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` + // * Key assignment: + // - Tablet 1 `[, apple) => {"a"}.` + // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` + // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` + // - Tablet 4 `[customer_2, other) => {"customer_2"}.` + // - Tablet 5 `[other, ) => {"other", "zz"}.` + repeated Split initial_splits = 4; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message CreateTableFromSnapshotRequest { + // The unique name of the instance in which to create the table. + // Values are of the form `projects//instances/`. + string parent = 1; + + // The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `/tables/foobar`. + string table_id = 2; + + // The unique name of the snapshot from which to restore the table. The + // snapshot and the table must be in the same instance. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string source_snapshot = 3; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] +message DropRowRangeRequest { + // The unique name of the table on which to drop a range of rows. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // Delete all rows or by prefix. + oneof target { + // Delete all rows that start with this row key prefix. Prefix cannot be + // zero length. + bytes row_key_prefix = 2; + + // Delete all rows in the table. Setting this to false is a no-op. + bool delete_all_data_from_table = 3; + } +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesRequest { + // The unique name of the instance for which tables should be listed. + // Values are of the form `projects//instances/`. + string parent = 1; + + // The view to be applied to the returned tables' fields. + // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. + Table.View view = 2; + + // Maximum number of results per page. + // CURRENTLY UNIMPLEMENTED AND IGNORED. + int32 page_size = 4; + + // The value of `next_page_token` returned by a previous call. + string page_token = 3; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesResponse { + // The tables present in the requested instance. + repeated Table tables = 1; + + // Set if not all tables could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] +message GetTableRequest { + // The unique name of the requested table. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // The view to be applied to the returned table's fields. + // Defaults to `SCHEMA_VIEW` if unspecified. + Table.View view = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] +message DeleteTableRequest { + // The unique name of the table to be deleted. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] +message ModifyColumnFamiliesRequest { + // A create, update, or delete of a particular column family. + message Modification { + // The ID of the column family to be modified. + string id = 1; + + // Column familiy modifications. + oneof mod { + // Create a new column family with the specified schema, or fail if + // one already exists with the given ID. + ColumnFamily create = 2; + + // Update an existing column family to the specified schema, or fail + // if no column family exists with the given ID. + ColumnFamily update = 3; + + // Drop (delete) the column family with the given ID, or fail if no such + // family exists. + bool drop = 4; + } + } + + // The unique name of the table whose families should be modified. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] +message GenerateConsistencyTokenRequest { + // The unique name of the Table for which to create a consistency token. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] +message GenerateConsistencyTokenResponse { + // The generated consistency token. + string consistency_token = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] +message CheckConsistencyRequest { + // The unique name of the Table for which to check replication consistency. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // The token created using GenerateConsistencyToken for the Table. + string consistency_token = 2; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] +message CheckConsistencyResponse { + // True only if the token is consistent. A token is consistent if replication + // has caught up with the restrictions specified in the request. + bool consistent = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message SnapshotTableRequest { + // The unique name of the table to have the snapshot taken. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // The name of the cluster where the snapshot will be created in. + // Values are of the form + // `projects//instances//clusters/`. + string cluster = 2; + + // The ID by which the new snapshot should be referred to within the parent + // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // rather than + // `projects//instances//clusters//snapshots/mysnapshot`. + string snapshot_id = 3; + + // The amount of time that the new snapshot can stay active after it is + // created. Once 'ttl' expires, the snapshot will get deleted. The maximum + // amount of time a snapshot can stay active is 7 days. If 'ttl' is not + // specified, the default value of 24 hours will be used. + google.protobuf.Duration ttl = 4; + + // Description of the snapshot. + string description = 5; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message GetSnapshotRequest { + // The unique name of the requested snapshot. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string name = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message ListSnapshotsRequest { + // The unique name of the cluster for which snapshots should be listed. + // Values are of the form + // `projects//instances//clusters/`. + // Use ` = '-'` to list snapshots for all clusters in an instance, + // e.g., `projects//instances//clusters/-`. + string parent = 1; + + // The maximum number of snapshots to return per page. + // CURRENTLY UNIMPLEMENTED AND IGNORED. + int32 page_size = 2; + + // The value of `next_page_token` returned by a previous call. + string page_token = 3; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message ListSnapshotsResponse { + // The snapshots present in the requested cluster. + repeated Snapshot snapshots = 1; + + // Set if not all snapshots could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message DeleteSnapshotRequest { + // The unique name of the snapshot to be deleted. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string name = 1; +} + +// The metadata for the Operation returned by SnapshotTable. +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message SnapshotTableMetadata { + // The request that prompted the initiation of this SnapshotTable operation. + SnapshotTableRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by CreateTableFromSnapshot. +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message CreateTableFromSnapshotMetadata { + // The request that prompted the initiation of this CreateTableFromSnapshot + // operation. + CreateTableFromSnapshotRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto new file mode 100644 index 000000000000..e4efb74f560e --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto @@ -0,0 +1,126 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableDataProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + enum TimestampGranularity { + MILLIS = 0; + } + + // A unique identifier of the form + // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + string name = 1; + + // If this Table is in the process of being created, the Operation used to + // track its progress. As long as this operation is present, the Table will + // not accept any Table Admin or Read/Write requests. + google.longrunning.Operation current_operation = 2; + + // The column families configured for this table, mapped by column family id. + map column_families = 3; + + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // Cannot be changed once the table is created. + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ + // The last segment is the same as the "name" field in + // google.bigtable.v1.Family. + string name = 1; + + // Garbage collection expression specified by the following grammar: + // GC = EXPR + // | "" ; + // EXPR = EXPR, "||", EXPR (* lowest precedence *) + // | EXPR, "&&", EXPR + // | "(", EXPR, ")" (* highest precedence *) + // | PROP ; + // PROP = "version() >", NUM32 + // | "age() >", NUM64, [ UNIT ] ; + // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) + // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) + // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) + // GC expressions can be up to 500 characters in length + // + // The different types of PROP are defined as follows: + // version() - cell index, counting from most recent and starting at 1 + // age() - age of the cell (current time minus cell timestamp) + // + // Example: "version() > 3 || (age() > 3d && version() > 1)" + // drop cells beyond the most recent three, and drop cells older than three + // days unless they're the most recent cell in the row/column + // + // Garbage collection executes opportunistically in the background, and so + // it's possible for reads to return a cell even if it matches the active GC + // expression for its family. + string gc_expression = 2; + + // Garbage collection rule specified as a protobuf. + // Supersedes `gc_expression`. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 3; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto new file mode 100644 index 000000000000..6e968fee17c1 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto @@ -0,0 +1,80 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; +import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableServicesProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// Provides access to the table schemas only, not the data stored within the tables. +service BigtableTableService { + // Creates a new table, to be served from a specified cluster. + // The table can be created with a full set of initial column families, + // specified in the request. + rpc CreateTable(CreateTableRequest) returns (Table) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; + } + + // Lists the names of all tables served from a specified cluster. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; + } + + // Gets the schema of the specified table, including its column families. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; + } + + // Permanently deletes a specified table and all of its data. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; + } + + // Changes the name of a specified table. + // Cannot be used to move tables between clusters, zones, or projects. + rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; + } + + // Creates a new column family within a specified table. + rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; + } + + // Changes the configuration of a specified column family. + rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { + option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; + } + + // Permanently deletes a specified column family and all of its data. + rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; + } + + // Delete all rows in a table corresponding to a particular prefix + rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto new file mode 100644 index 000000000000..617ede65592f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto @@ -0,0 +1,116 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableServiceMessagesProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +message CreateTableRequest { + // The unique name of the cluster in which to create the new table. + string name = 1; + + // The name by which the new table should be referred to within the cluster, + // e.g. "foobar" rather than "/tables/foobar". + string table_id = 2; + + // The Table to create. The `name` field of the Table and all of its + // ColumnFamilies must be left blank, and will be populated in the response. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + repeated string initial_split_keys = 4; +} + +message ListTablesRequest { + // The unique name of the cluster for which tables should be listed. + string name = 1; +} + +message ListTablesResponse { + // The tables present in the requested cluster. + // At present, only the names of the tables are populated. + repeated Table tables = 1; +} + +message GetTableRequest { + // The unique name of the requested table. + string name = 1; +} + +message DeleteTableRequest { + // The unique name of the table to be deleted. + string name = 1; +} + +message RenameTableRequest { + // The current unique name of the table. + string name = 1; + + // The new name by which the table should be referred to within its containing + // cluster, e.g. "foobar" rather than "/tables/foobar". + string new_id = 2; +} + +message CreateColumnFamilyRequest { + // The unique name of the table in which to create the new column family. + string name = 1; + + // The name by which the new column family should be referred to within the + // table, e.g. "foobar" rather than "/columnFamilies/foobar". + string column_family_id = 2; + + // The column family to create. The `name` field must be left blank. + ColumnFamily column_family = 3; +} + +message DeleteColumnFamilyRequest { + // The unique name of the column family to be deleted. + string name = 1; +} + +message BulkDeleteRowsRequest { + // The unique name of the table on which to perform the bulk delete + string table_name = 1; + + oneof target { + // Delete all rows that start with this row key prefix. Prefix cannot be + // zero length. + bytes row_key_prefix = 2; + + // Delete all rows in the table. Setting this to false is a no-op. + bool delete_all_data_from_table = 3; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto new file mode 100644 index 000000000000..0ece12780eb9 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto @@ -0,0 +1,41 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// Storage media types for persisting Bigtable data. +enum StorageType { + // The user did not specify a storage type. + STORAGE_TYPE_UNSPECIFIED = 0; + + // Flash (SSD) storage should be used. + SSD = 1; + + // Magnetic drive (HDD) storage should be used. + HDD = 2; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto new file mode 100644 index 000000000000..bb69b1f66d42 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto @@ -0,0 +1,208 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/common.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "InstanceProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and +// the resources that serve them. +// All tables in an instance are served from a single +// [Cluster][google.bigtable.admin.v2.Cluster]. +message Instance { + // Possible states of an instance. + enum State { + // The state of the instance could not be determined. + STATE_NOT_KNOWN = 0; + + // The instance has been successfully created and can serve requests + // to its tables. + READY = 1; + + // The instance is currently being created, and may be destroyed + // if the creation process encounters an error. + CREATING = 2; + } + + // The type of the instance. + enum Type { + // The type of the instance is unspecified. If set when creating an + // instance, a `PRODUCTION` instance will be created. If set when updating + // an instance, the type will be left unchanged. + TYPE_UNSPECIFIED = 0; + + // An instance meant for production use. `serve_nodes` must be set + // on the cluster. + PRODUCTION = 1; + + // The instance is meant for development and testing purposes only; it has + // no performance or uptime guarantees and is not covered by SLA. + // After a development instance is created, it can be upgraded by + // updating the instance to type `PRODUCTION`. An instance created + // as a production instance cannot be changed to a development instance. + // When creating a development instance, `serve_nodes` on the cluster must + // not be set. + DEVELOPMENT = 2; + } + + // (`OutputOnly`) + // The unique name of the instance. Values are of the form + // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. + string name = 1; + + // The descriptive name for this instance as it appears in UIs. + // Can be changed at any time, but should be kept globally unique + // to avoid confusion. + string display_name = 2; + + // (`OutputOnly`) + // The current state of the instance. + State state = 3; + + // The type of the instance. Defaults to `PRODUCTION`. + Type type = 4; + + // Labels are a flexible and lightweight mechanism for organizing cloud + // resources into groups that reflect a customer's organizational needs and + // deployment strategies. They can be used to filter resources and aggregate + // metrics. + // + // * Label keys must be between 1 and 63 characters long and must conform to + // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. + // * Label values must be between 0 and 63 characters long and must conform to + // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. + // * No more than 64 labels can be associated with a given resource. + // * Keys and values must both be under 128 bytes. + map labels = 5; +} + +// A resizable group of nodes in a particular cloud location, capable +// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent +// [Instance][google.bigtable.admin.v2.Instance]. +message Cluster { + // Possible states of a cluster. + enum State { + // The state of the cluster could not be determined. + STATE_NOT_KNOWN = 0; + + // The cluster has been successfully created and is ready to serve requests. + READY = 1; + + // The cluster is currently being created, and may be destroyed + // if the creation process encounters an error. + // A cluster may not be able to serve requests while being created. + CREATING = 2; + + // The cluster is currently being resized, and may revert to its previous + // node count if the process encounters an error. + // A cluster is still capable of serving requests while being resized, + // but may exhibit performance as if its number of allocated nodes is + // between the starting and requested states. + RESIZING = 3; + + // The cluster has no backing nodes. The data (tables) still + // exist, but no operations can be performed on the cluster. + DISABLED = 4; + } + + // (`OutputOnly`) + // The unique name of the cluster. Values are of the form + // `projects//instances//clusters/[a-z][-a-z0-9]*`. + string name = 1; + + // (`CreationOnly`) + // The location where this cluster's nodes and storage reside. For best + // performance, clients should be located as close as possible to this + // cluster. Currently only zones are supported, so values should be of the + // form `projects//locations/`. + string location = 2; + + // (`OutputOnly`) + // The current state of the cluster. + State state = 3; + + // The number of nodes allocated to this cluster. More nodes enable higher + // throughput and more consistent performance. + int32 serve_nodes = 4; + + // (`CreationOnly`) + // The type of storage used by this cluster to serve its + // parent instance's tables, unless explicitly overridden. + StorageType default_storage_type = 5; +} + +// A configuration object describing how Cloud Bigtable should treat traffic +// from a particular end user application. +message AppProfile { + // Read/write requests may be routed to any cluster in the instance, and will + // fail over to another cluster in the event of transient errors or delays. + // Choosing this option sacrifices read-your-writes consistency to improve + // availability. + message MultiClusterRoutingUseAny { + + } + + // Unconditionally routes all read/write requests to a specific cluster. + // This option preserves read-your-writes consistency, but does not improve + // availability. + message SingleClusterRouting { + // The cluster to which read/write requests should be routed. + string cluster_id = 1; + + // Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are + // allowed by this app profile. It is unsafe to send these requests to + // the same table/row/column in multiple clusters. + bool allow_transactional_writes = 2; + } + + // (`OutputOnly`) + // The unique name of the app profile. Values are of the form + // `projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + string name = 1; + + // Strongly validated etag for optimistic concurrency control. Preserve the + // value returned from `GetAppProfile` when calling `UpdateAppProfile` to + // fail the request if there has been a modification in the mean time. The + // `update_mask` of the request need not include `etag` for this protection + // to apply. + // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and + // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more + // details. + string etag = 2; + + // Optional long form description of the use case for this AppProfile. + string description = 3; + + // The routing policy for all read/write requests which use this app profile. + // A value must be explicitly set. + oneof routing_policy { + // Use a multi-cluster routing policy that may pick any cluster. + MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; + + // Use a single-cluster routing policy. + SingleClusterRouting single_cluster_routing = 6; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto new file mode 100644 index 000000000000..5d4374effc59 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto @@ -0,0 +1,221 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + // The state of a table's data in a particular cluster. + message ClusterState { + // Table replication states. + enum ReplicationState { + // The replication state of the table is unknown in this cluster. + STATE_NOT_KNOWN = 0; + + // The cluster was recently created, and the table must finish copying + // over pre-existing data from other clusters before it can begin + // receiving live replication updates and serving Data API requests. + INITIALIZING = 1; + + // The table is temporarily unable to serve Data API requests from this + // cluster due to planned internal maintenance. + PLANNED_MAINTENANCE = 2; + + // The table is temporarily unable to serve Data API requests from this + // cluster due to unplanned or emergency maintenance. + UNPLANNED_MAINTENANCE = 3; + + // The table can serve Data API requests from this cluster. Depending on + // replication delay, reads may not immediately reflect the state of the + // table in other clusters. + READY = 4; + } + + // (`OutputOnly`) + // The state of replication for the table in this cluster. + ReplicationState replication_state = 1; + } + + // Possible timestamp granularities to use when keeping multiple versions + // of data in a table. + enum TimestampGranularity { + // The user did not specify a granularity. Should not be returned. + // When specified during table creation, MILLIS will be used. + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; + + // The table keeps data versioned at a granularity of 1ms. + MILLIS = 1; + } + + // Defines a view over a table's fields. + enum View { + // Uses the default view for each method as documented in its request. + VIEW_UNSPECIFIED = 0; + + // Only populates `name`. + NAME_ONLY = 1; + + // Only populates `name` and fields related to the table's schema. + SCHEMA_VIEW = 2; + + // Only populates `name` and fields related to the table's + // replication state. + REPLICATION_VIEW = 3; + + // Populates all fields. + FULL = 4; + } + + // (`OutputOnly`) + // The unique name of the table. Values are of the form + // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` + string name = 1; + + // (`OutputOnly`) + // Map from cluster ID to per-cluster table state. + // If it could not be determined whether or not the table has data in a + // particular cluster (for example, if its zone is unavailable), then + // there will be an entry for the cluster with UNKNOWN `replication_status`. + // Views: `REPLICATION_VIEW`, `FULL` + map cluster_states = 2; + + // (`CreationOnly`) + // The column families configured for this table, mapped by column family ID. + // Views: `SCHEMA_VIEW`, `FULL` + map column_families = 3; + + // (`CreationOnly`) + // The granularity (i.e. `MILLIS`) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // If unspecified at creation time, the value will be set to `MILLIS`. + // Views: `SCHEMA_VIEW`, `FULL` + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // Garbage collection rule specified as a protobuf. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 1; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + // Garbage collection rules. + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} + +// A snapshot of a table at a particular time. A snapshot can be used as a +// checkpoint for data restoration or a data source for a new table. +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message Snapshot { + // Possible states of a snapshot. + enum State { + // The state of the snapshot could not be determined. + STATE_NOT_KNOWN = 0; + + // The snapshot has been successfully created and can serve all requests. + READY = 1; + + // The snapshot is currently being created, and may be destroyed if the + // creation process encounters an error. A snapshot may not be restored to a + // table while it is being created. + CREATING = 2; + } + + // (`OutputOnly`) + // The unique name of the snapshot. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string name = 1; + + // (`OutputOnly`) + // The source table at the time the snapshot was taken. + Table source_table = 2; + + // (`OutputOnly`) + // The size of the data in the source table at the time the snapshot was + // taken. In some cases, this value may be computed asynchronously via a + // background process and a placeholder of 0 will be used in the meantime. + int64 data_size_bytes = 3; + + // (`OutputOnly`) + // The time when the snapshot is created. + google.protobuf.Timestamp create_time = 4; + + // (`OutputOnly`) + // The time when the snapshot will be deleted. The maximum amount of time a + // snapshot can stay active is 365 days. If 'ttl' is not specified, + // the default maximum of 365 days will be used. + google.protobuf.Timestamp delete_time = 5; + + // (`OutputOnly`) + // The current state of the snapshot. + State state = 6; + + // (`OutputOnly`) + // Description of the snapshot. + string description = 7; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto new file mode 100644 index 000000000000..d800c2c97ab8 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto @@ -0,0 +1,365 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/v2/data.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable"; +option java_multiple_files = true; +option java_outer_classname = "BigtableProto"; +option java_package = "com.google.bigtable.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\V2"; + + +// Service for reading from and writing to existing Bigtable tables. +service Bigtable { + // Streams back the contents of all requested rows in key order, optionally + // applying the same Reader filter to each. Depending on their size, + // rows and cells may be broken up across multiple responses, but + // atomicity of each row will still be preserved. See the + // ReadRowsResponse documentation for details. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" + body: "*" + }; + } + + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { + option (google.api.http) = { + get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" + }; + } + + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by `mutation`. + rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { + option (google.api.http) = { + post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" + body: "*" + }; + } + + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { + option (google.api.http) = { + post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" + body: "*" + }; + } + + // Mutates a row atomically based on the output of a predicate Reader filter. + rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { + option (google.api.http) = { + post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" + body: "*" + }; + } + + // Modifies a row atomically on the server. The method reads the latest + // existing timestamp and value from the specified columns and writes a new + // entry based on pre-defined read/modify/write rules. The new value for the + // timestamp is the greater of the existing timestamp or the current server + // time. The method returns the new contents of all modified cells. + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { + option (google.api.http) = { + post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" + body: "*" + }; + } +} + +// Request message for Bigtable.ReadRows. +message ReadRowsRequest { + // The unique name of the table from which to read. + // Values are of the form + // `projects//instances//tables/
`. + string table_name = 1; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + string app_profile_id = 5; + + // The row keys and/or ranges to read. If not specified, reads from all rows. + RowSet rows = 2; + + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entirety of each row. + RowFilter filter = 3; + + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + int64 rows_limit = 4; +} + +// Response message for Bigtable.ReadRows. +message ReadRowsResponse { + // Specifies a piece of a row's contents returned as part of the read + // response stream. + message CellChunk { + // The row key for this chunk of data. If the row key is empty, + // this CellChunk is a continuation of the same row as the previous + // CellChunk in the response stream, even if that CellChunk was in a + // previous ReadRowsResponse message. + bytes row_key = 1; + + // The column family name for this chunk of data. If this message + // is not present this CellChunk is a continuation of the same column + // family as the previous CellChunk. The empty string can occur as a + // column family name in a response so clients must check + // explicitly for the presence of this message, not just for + // `family_name.value` being non-empty. + google.protobuf.StringValue family_name = 2; + + // The column qualifier for this chunk of data. If this message + // is not present, this CellChunk is a continuation of the same column + // as the previous CellChunk. Column qualifiers may be empty so + // clients must check for the presence of this message, not just + // for `qualifier.value` being non-empty. + google.protobuf.BytesValue qualifier = 3; + + // The cell's stored timestamp, which also uniquely identifies it + // within its column. Values are always expressed in + // microseconds, but individual tables may set a coarser + // granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will + // only allow values of `timestamp_micros` which are multiples of + // 1000. Timestamps are only set in the first CellChunk per cell + // (for cells split into multiple chunks). + int64 timestamp_micros = 4; + + // Labels applied to the cell by a + // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set + // on the first CellChunk per cell. + repeated string labels = 5; + + // The value stored in the cell. Cell values can be split across + // multiple CellChunks. In that case only the value field will be + // set in CellChunks after the first: the timestamp and labels + // will only be present in the first CellChunk, even if the first + // CellChunk came in a previous ReadRowsResponse. + bytes value = 6; + + // If this CellChunk is part of a chunked cell value and this is + // not the final chunk of that cell, value_size will be set to the + // total length of the cell value. The client can use this size + // to pre-allocate memory to hold the full cell value. + int32 value_size = 7; + + oneof row_status { + // Indicates that the client should drop all previous chunks for + // `row_key`, as it will be re-read from the beginning. + bool reset_row = 8; + + // Indicates that the client can safely process all previous chunks for + // `row_key`, as its data has been fully read. + bool commit_row = 9; + } + } + + repeated CellChunk chunks = 1; + + // Optionally the server might return the row key of the last row it + // has scanned. The client can use this to construct a more + // efficient retry request if needed: any row keys or portions of + // ranges less than this row key can be dropped from the request. + // This is primarily useful for cases where the server has read a + // lot of data that was filtered out since the last committed row + // key, allowing the client to skip that work on a retry. + bytes last_scanned_row_key = 2; +} + +// Request message for Bigtable.SampleRowKeys. +message SampleRowKeysRequest { + // The unique name of the table from which to sample row keys. + // Values are of the form + // `projects//instances//tables/
`. + string table_name = 1; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + string app_profile_id = 2; +} + +// Response message for Bigtable.SampleRowKeys. +message SampleRowKeysResponse { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + bytes row_key = 1; + + // Approximate total storage space used by all rows in the table which precede + // `row_key`. Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // `offset_bytes` fields. + int64 offset_bytes = 2; +} + +// Request message for Bigtable.MutateRow. +message MutateRowRequest { + // The unique name of the table to which the mutation should be applied. + // Values are of the form + // `projects//instances//tables/
`. + string table_name = 1; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + string app_profile_id = 4; + + // The key of the row to which the mutation should be applied. + bytes row_key = 2; + + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + repeated Mutation mutations = 3; +} + +// Response message for Bigtable.MutateRow. +message MutateRowResponse { + +} + +// Request message for BigtableService.MutateRows. +message MutateRowsRequest { + message Entry { + // The key of the row to which the `mutations` should be applied. + bytes row_key = 1; + + // Changes to be atomically applied to the specified row. Mutations are + // applied in order, meaning that earlier mutations can be masked by + // later ones. + // You must specify at least one mutation. + repeated Mutation mutations = 2; + } + + // The unique name of the table to which the mutations should be applied. + string table_name = 1; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + string app_profile_id = 3; + + // The row keys and corresponding mutations to be applied in bulk. + // Each entry is applied as an atomic mutation, but the entries may be + // applied in arbitrary order (even between entries for the same row). + // At least one entry must be specified, and in total the entries can + // contain at most 100000 mutations. + repeated Entry entries = 2; +} + +// Response message for BigtableService.MutateRows. +message MutateRowsResponse { + message Entry { + // The index into the original request's `entries` list of the Entry + // for which a result is being reported. + int64 index = 1; + + // The result of the request Entry identified by `index`. + // Depending on how requests are batched during execution, it is possible + // for one Entry to fail due to an error with another Entry. In the event + // that this occurs, the same error will be reported for both entries. + google.rpc.Status status = 2; + } + + // One or more results for Entries from the batch request. + repeated Entry entries = 1; +} + +// Request message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowRequest { + // The unique name of the table to which the conditional mutation should be + // applied. + // Values are of the form + // `projects//instances//tables/
`. + string table_name = 1; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + string app_profile_id = 7; + + // The key of the row to which the conditional mutation should be applied. + bytes row_key = 2; + + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either `true_mutations` or + // `false_mutations` will be executed. If unset, checks that the row contains + // any values at all. + RowFilter predicate_filter = 6; + + // Changes to be atomically applied to the specified row if `predicate_filter` + // yields at least one cell when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `false_mutations` is empty, and at most + // 100000. + repeated Mutation true_mutations = 4; + + // Changes to be atomically applied to the specified row if `predicate_filter` + // does not yield any cells when applied to `row_key`. Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if `true_mutations` is empty, and at most + // 100000. + repeated Mutation false_mutations = 5; +} + +// Response message for Bigtable.CheckAndMutateRow. +message CheckAndMutateRowResponse { + // Whether or not the request's `predicate_filter` yielded any results for + // the specified row. + bool predicate_matched = 1; +} + +// Request message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowRequest { + // The unique name of the table to which the read/modify/write rules should be + // applied. + // Values are of the form + // `projects//instances//tables/
`. + string table_name = 1; + + // This value specifies routing for replication. If not specified, the + // "default" application profile will be used. + string app_profile_id = 4; + + // The key of the row to which the read/modify/write rules should be applied. + bytes row_key = 2; + + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + repeated ReadModifyWriteRule rules = 3; +} + +// Response message for Bigtable.ReadModifyWriteRow. +message ReadModifyWriteRowResponse { + // A Row containing the new contents of all cells modified by the request. + Row row = 1; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto new file mode 100644 index 000000000000..ca3b663d8661 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto @@ -0,0 +1,94 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google/api/annotations.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterDataProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// A physical location in which a particular project can allocate Cloud BigTable +// resources. +message Zone { + // Possible states of a zone. + enum Status { + // The state of the zone is unknown or unspecified. + UNKNOWN = 0; + + // The zone is in a good state. + OK = 1; + + // The zone is down for planned maintenance. + PLANNED_MAINTENANCE = 2; + + // The zone is down for emergency or unplanned maintenance. + EMERGENCY_MAINENANCE = 3; + } + + // A permanent unique identifier for the zone. + // Values are of the form projects//zones/[a-z][-a-z0-9]* + string name = 1; + + // The name of this zone as it appears in UIs. + string display_name = 2; + + // The current state of this zone. + Status status = 3; +} + +// An isolated set of Cloud BigTable resources on which tables can be hosted. +message Cluster { + // A permanent unique identifier for the cluster. For technical reasons, the + // zone in which the cluster resides is included here. + // Values are of the form + // projects//zones//clusters/[a-z][-a-z0-9]* + string name = 1; + + // The operation currently running on the cluster, if any. + // This cannot be set directly, only through CreateCluster, UpdateCluster, + // or UndeleteCluster. Calls to these methods will be rejected if + // "current_operation" is already set. + google.longrunning.Operation current_operation = 3; + + // The descriptive name for this cluster as it appears in UIs. + // Must be unique per zone. + string display_name = 4; + + // The number of serve nodes allocated to this cluster. + int32 serve_nodes = 5; + + // What storage type to use for tables in this cluster. Only configurable at + // cluster creation time. If unspecified, STORAGE_SSD will be used. + StorageType default_storage_type = 8; +} + +enum StorageType { + // The storage type used is unspecified. + STORAGE_UNSPECIFIED = 0; + + // Data will be stored in SSD, providing low and consistent latencies. + STORAGE_SSD = 1; + + // Data will be stored in HDD, providing high and less predictable + // latencies. + STORAGE_HDD = 2; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto new file mode 100644 index 000000000000..038fcc46397f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto @@ -0,0 +1,130 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; +import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterServicesProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// Service for managing zonal Cloud Bigtable resources. +service BigtableClusterService { + // Lists the supported zones for the given project. + rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { + option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; + } + + // Gets information about a particular cluster. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; + } + + // Lists all clusters in the given project, along with any zones for which + // cluster information could not be retrieved. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; + } + + // Creates a cluster and begins preparing it to begin serving. The returned + // cluster embeds as its "current_operation" a long-running operation which + // can be used to track the progress of turning up the new cluster. + // Immediately upon completion of this request: + // * The cluster will be readable via the API, with all requested attributes + // but no allocated resources. + // Until completion of the embedded operation: + // * Cancelling the operation will render the cluster immediately unreadable + // via the API. + // * All other attempts to modify or delete the cluster will be rejected. + // Upon completion of the embedded operation: + // * Billing for all successfully-allocated resources will begin (some types + // may have lower than the requested levels). + // * New tables can be created in the cluster. + // * The cluster's allocated resource levels will be readable via the API. + // The embedded operation's "metadata" field type is + // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc CreateCluster(CreateClusterRequest) returns (Cluster) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; + } + + // Updates a cluster, and begins allocating or releasing resources as + // requested. The returned cluster embeds as its "current_operation" a + // long-running operation which can be used to track the progress of updating + // the cluster. + // Immediately upon completion of this request: + // * For resource types where a decrease in the cluster's allocation has been + // requested, billing will be based on the newly-requested level. + // Until completion of the embedded operation: + // * Cancelling the operation will set its metadata's "cancelled_at_time", + // and begin restoring resources to their pre-request values. The operation + // is guaranteed to succeed at undoing all resource changes, after which + // point it will terminate with a CANCELLED status. + // * All other attempts to modify or delete the cluster will be rejected. + // * Reading the cluster via the API will continue to give the pre-request + // resource levels. + // Upon completion of the embedded operation: + // * Billing will begin for all successfully-allocated resources (some types + // may have lower than the requested levels). + // * All newly-reserved resources will be available for serving the cluster's + // tables. + // * The cluster's new resource levels will be readable via the API. + // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc UpdateCluster(Cluster) returns (Cluster) { + option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; + } + + // Marks a cluster and all of its tables for permanent deletion in 7 days. + // Immediately upon completion of the request: + // * Billing will cease for all of the cluster's reserved resources. + // * The cluster's "delete_time" field will be set 7 days in the future. + // Soon afterward: + // * All tables within the cluster will become unavailable. + // Prior to the cluster's "delete_time": + // * The cluster can be recovered with a call to UndeleteCluster. + // * All other attempts to modify or delete the cluster will be rejected. + // At the cluster's "delete_time": + // * The cluster and *all of its tables* will immediately and irrevocably + // disappear from the API, and their data will be permanently deleted. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; + } + + // Cancels the scheduled deletion of an cluster and begins preparing it to + // resume serving. The returned operation will also be embedded as the + // cluster's "current_operation". + // Immediately upon completion of this request: + // * The cluster's "delete_time" field will be unset, protecting it from + // automatic deletion. + // Until completion of the returned operation: + // * The operation cannot be cancelled. + // Upon completion of the returned operation: + // * Billing for the cluster's resources will resume. + // * All tables within the cluster will be available. + // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is + // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. + rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto new file mode 100644 index 000000000000..518d14dac8e0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto @@ -0,0 +1,141 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.cluster.v1; + +import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; +option java_multiple_files = true; +option java_outer_classname = "BigtableClusterServiceMessagesProto"; +option java_package = "com.google.bigtable.admin.cluster.v1"; + + +// Request message for BigtableClusterService.ListZones. +message ListZonesRequest { + // The unique name of the project for which a list of supported zones is + // requested. + // Values are of the form projects/ + string name = 1; +} + +// Response message for BigtableClusterService.ListZones. +message ListZonesResponse { + // The list of requested zones. + repeated Zone zones = 1; +} + +// Request message for BigtableClusterService.GetCluster. +message GetClusterRequest { + // The unique name of the requested cluster. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Request message for BigtableClusterService.ListClusters. +message ListClustersRequest { + // The unique name of the project for which a list of clusters is requested. + // Values are of the form projects/ + string name = 1; +} + +// Response message for BigtableClusterService.ListClusters. +message ListClustersResponse { + // The list of requested Clusters. + repeated Cluster clusters = 1; + + // The zones for which clusters could not be retrieved. + repeated Zone failed_zones = 2; +} + +// Request message for BigtableClusterService.CreateCluster. +message CreateClusterRequest { + // The unique name of the zone in which to create the cluster. + // Values are of the form projects//zones/ + string name = 1; + + // The id to be used when referring to the new cluster within its zone, + // e.g. just the "test-cluster" section of the full name + // "projects//zones//clusters/test-cluster". + string cluster_id = 2; + + // The cluster to create. + // The "name", "delete_time", and "current_operation" fields must be left + // blank. + Cluster cluster = 3; +} + +// Metadata type for the operation returned by +// BigtableClusterService.CreateCluster. +message CreateClusterMetadata { + // The request which prompted the creation of this operation. + CreateClusterRequest original_request = 1; + + // The time at which original_request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// Metadata type for the operation returned by +// BigtableClusterService.UpdateCluster. +message UpdateClusterMetadata { + // The request which prompted the creation of this operation. + Cluster original_request = 1; + + // The time at which original_request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which this operation was cancelled. If set, this operation is + // in the process of undoing itself (which is guaranteed to succeed) and + // cannot be cancelled again. + google.protobuf.Timestamp cancel_time = 3; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 4; +} + +// Request message for BigtableClusterService.DeleteCluster. +message DeleteClusterRequest { + // The unique name of the cluster to be deleted. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Request message for BigtableClusterService.UndeleteCluster. +message UndeleteClusterRequest { + // The unique name of the cluster to be un-deleted. + // Values are of the form projects//zones//clusters/ + string name = 1; +} + +// Metadata type for the operation returned by +// BigtableClusterService.UndeleteCluster. +message UndeleteClusterMetadata { + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 1; + + // The time at which this operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 2; +} + +// Metadata type for operations initiated by the V2 BigtableAdmin service. +// More complete information for such operations is available via the V2 API. +message V2OperationMetadata { + +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto new file mode 100644 index 000000000000..bd063a925f45 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto @@ -0,0 +1,516 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v1; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; +option java_multiple_files = true; +option java_outer_classname = "BigtableDataProto"; +option java_package = "com.google.bigtable.v1"; + + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +message Row { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + bytes key = 1; + + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + repeated Family families = 2; +} + +// Specifies (some of) the contents of a single row/column family of a table. +message Family { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + string name = 1; + + // Must not be empty. Sorted in order of increasing "qualifier". + repeated Column columns = 2; +} + +// Specifies (some of) the contents of a single row/column of a table. +message Column { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its "column_qualifier_regex_filter" field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + bytes qualifier = 1; + + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + repeated Cell cells = 2; +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +message Cell { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser "granularity" to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of "timestamp_micros" which are multiples of 1000. + int64 timestamp_micros = 1; + + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + bytes value = 2; + + // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. + repeated string labels = 3; +} + +// Specifies a contiguous range of rows. +message RowRange { + // Inclusive lower bound. If left empty, interpreted as the empty string. + bytes start_key = 2; + + // Exclusive upper bound. If left empty, interpreted as infinity. + bytes end_key = 3; +} + +// Specifies a non-contiguous set of rows. +message RowSet { + // Single rows included in the set. + repeated bytes row_keys = 1; + + // Contiguous row ranges included in the set. + repeated RowRange row_ranges = 2; +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from : to +// :, where both bounds can be either inclusive or +// exclusive. +message ColumnRange { + // The name of the column family within which this range falls. + string family_name = 1; + + // The column qualifier at which to start the range (within 'column_family'). + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_qualifier { + // Used when giving an inclusive lower bound for the range. + bytes start_qualifier_inclusive = 2; + + // Used when giving an exclusive lower bound for the range. + bytes start_qualifier_exclusive = 3; + } + + // The column qualifier at which to end the range (within 'column_family'). + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_qualifier { + // Used when giving an inclusive upper bound for the range. + bytes end_qualifier_inclusive = 4; + + // Used when giving an exclusive upper bound for the range. + bytes end_qualifier_exclusive = 5; + } +} + +// Specified a contiguous range of microsecond timestamps. +message TimestampRange { + // Inclusive lower bound. If left empty, interpreted as 0. + int64 start_timestamp_micros = 1; + + // Exclusive upper bound. If left empty, interpreted as infinity. + int64 end_timestamp_micros = 2; +} + +// Specifies a contiguous range of raw byte values. +message ValueRange { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_value { + // Used when giving an inclusive lower bound for the range. + bytes start_value_inclusive = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_value_exclusive = 2; + } + + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_value { + // Used when giving an inclusive upper bound for the range. + bytes end_value_inclusive = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_value_exclusive = 4; + } +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the "value_regex_filter", +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that RE2(.) is equivalent by default to +// RE2([^\n]), meaning that it does not match newlines. When attempting to match +// an arbitrary byte, you should therefore use the escape sequence '\C', which +// may need to be further escaped as '\\C' in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the "strip_value_transformer", which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +message RowFilter { + // A RowFilter which sends rows through several RowFilters in sequence. + message Chain { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which sends each row to each of several component + // RowFilters and interleaves the results. + message Interleave { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // All interleaved filters are executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which evaluates one of two possible RowFilters, depending on + // whether or not a predicate RowFilter outputs any cells from the input row. + // + // IMPORTANT NOTE: The predicate filter does not execute atomically with the + // true and false filters, which may lead to inconsistent or unexpected + // results. Additionally, Condition filters have poor performance, especially + // when filters are set for the false condition. + message Condition { + // If "predicate_filter" outputs any cells, then "true_filter" will be + // evaluated on the input row. Otherwise, "false_filter" will be evaluated. + RowFilter predicate_filter = 1; + + // The filter to apply to the input row if "predicate_filter" returns any + // results. If not provided, no results will be returned in the true case. + RowFilter true_filter = 2; + + // The filter to apply to the input row if "predicate_filter" does not + // return any results. If not provided, no results will be returned in the + // false case. + RowFilter false_filter = 3; + } + + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + oneof filter { + // Applies several RowFilters to the data in sequence, progressively + // narrowing the results. + Chain chain = 1; + + // Applies several RowFilters to the data in parallel and combines the + // results. + Interleave interleave = 2; + + // Applies one of two possible RowFilters to the data based on the output of + // a predicate RowFilter. + Condition condition = 3; + + // ADVANCED USE ONLY. + // Hook for introspection into the RowFilter. Outputs all cells directly to + // the output of the read rather than to any parent filter. Consider the + // following example: + // + // Chain( + // FamilyRegex("A"), + // Interleave( + // All(), + // Chain(Label("foo"), Sink()) + // ), + // QualifierRegex("B") + // ) + // + // A,A,1,w + // A,B,2,x + // B,B,4,z + // | + // FamilyRegex("A") + // | + // A,A,1,w + // A,B,2,x + // | + // +------------+-------------+ + // | | + // All() Label(foo) + // | | + // A,A,1,w A,A,1,w,labels:[foo] + // A,B,2,x A,B,2,x,labels:[foo] + // | | + // | Sink() --------------+ + // | | | + // +------------+ x------+ A,A,1,w,labels:[foo] + // | A,B,2,x,labels:[foo] + // A,A,1,w | + // A,B,2,x | + // | | + // QualifierRegex("B") | + // | | + // A,B,2,x | + // | | + // +--------------------------------+ + // | + // A,A,1,w,labels:[foo] + // A,B,2,x,labels:[foo] // could be switched + // A,B,2,x // could be switched + // + // Despite being excluded by the qualifier filter, a copy of every cell + // that reaches the sink is present in the final result. + // + // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], + // duplicate cells are possible, and appear in an unspecified mutual order. + // In this case we have a duplicate with column "A:B" and timestamp 2, + // because one copy passed through the all filter while the other was + // passed through the label and sink. Note that one copy has label "foo", + // while the other does not. + // + // Cannot be used within the `predicate_filter`, `true_filter`, or + // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. + bool sink = 16; + + // Matches all cells, regardless of input. Functionally equivalent to + // leaving `filter` unset, but included for completeness. + bool pass_all_filter = 17; + + // Does not match any cells, regardless of input. Useful for temporarily + // disabling just part of a filter. + bool block_all_filter = 18; + + // Matches only cells from rows whose keys satisfy the given RE2 regex. In + // other words, passes through the entire row when the key matches, and + // otherwise produces an empty row. + // Note that, since row keys can contain arbitrary bytes, the '\C' escape + // sequence must be used if a true wildcard is desired. The '.' character + // will not match the new line character '\n', which may be present in a + // binary key. + bytes row_key_regex_filter = 4; + + // Matches all cells from a row with probability p, and matches no cells + // from the row with probability 1-p. + double row_sample_filter = 14; + + // Matches only cells from columns whose families satisfy the given RE2 + // regex. For technical reasons, the regex must not contain the ':' + // character, even if it is not being used as a literal. + // Note that, since column families cannot contain the new line character + // '\n', it is sufficient to use '.' as a full wildcard when matching + // column family names. + string family_name_regex_filter = 5; + + // Matches only cells from columns whose qualifiers satisfy the given RE2 + // regex. + // Note that, since column qualifiers can contain arbitrary bytes, the '\C' + // escape sequence must be used if a true wildcard is desired. The '.' + // character will not match the new line character '\n', which may be + // present in a binary qualifier. + bytes column_qualifier_regex_filter = 6; + + // Matches only cells from columns within the given range. + ColumnRange column_range_filter = 7; + + // Matches only cells with timestamps within the given range. + TimestampRange timestamp_range_filter = 8; + + // Matches only cells with values that satisfy the given regular expression. + // Note that, since cell values can contain arbitrary bytes, the '\C' escape + // sequence must be used if a true wildcard is desired. The '.' character + // will not match the new line character '\n', which may be present in a + // binary value. + bytes value_regex_filter = 9; + + // Matches only cells with values that fall within the given range. + ValueRange value_range_filter = 15; + + // Skips the first N cells of each row, matching all subsequent cells. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_offset_filter = 10; + + // Matches only the first N cells of each row. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_limit_filter = 11; + + // Matches only the most recent N cells within each column. For example, + // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, + // skip all earlier cells in "foo:bar", and then begin matching again in + // column "foo:bar2". + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_column_limit_filter = 12; + + // Replaces each cell's value with the empty string. + bool strip_value_transformer = 13; + + // Applies the given label to all cells in the output row. This allows + // the client to determine which results were produced from which part of + // the filter. + // + // Values must be at most 15 characters in length, and match the RE2 + // pattern [a-z0-9\\-]+ + // + // Due to a technical limitation, it is not currently possible to apply + // multiple labels to a cell. As a result, a Chain may have no more than + // one sub-filter which contains a apply_label_transformer. It is okay for + // an Interleave to contain multiple apply_label_transformers, as they will + // be applied to separate copies of the input. This may be relaxed in the + // future. + string apply_label_transformer = 19; + } +} + +// Specifies a particular change to be made to the contents of a row. +message Mutation { + // A Mutation which sets the value of the specified cell. + message SetCell { + // The name of the family into which new data should be written. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the "granularity" of the table (e.g. micros, millis). + int64 timestamp_micros = 3; + + // The value to be written into the specified cell. + bytes value = 4; + } + + // A Mutation which deletes cells from the specified column, optionally + // restricting the deletions to a given timestamp range. + message DeleteFromColumn { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The range of timestamps within which cells should be deleted. + TimestampRange time_range = 3; + } + + // A Mutation which deletes all cells from the specified column family. + message DeleteFromFamily { + // The name of the family from which cells should be deleted. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + } + + // A Mutation which deletes all cells from the containing row. + message DeleteFromRow { + + } + + // Which of the possible Mutation types to apply. + oneof mutation { + // Set a cell's value. + SetCell set_cell = 1; + + // Deletes cells from a column. + DeleteFromColumn delete_from_column = 2; + + // Deletes cells from a column family. + DeleteFromFamily delete_from_family = 3; + + // Deletes cells from the entire row. + DeleteFromRow delete_from_row = 4; + } +} + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +message ReadModifyWriteRule { + // The name of the family to which the read/modify/write should be applied. + // Must match [-_.a-zA-Z0-9]+ + string family_name = 1; + + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The rule used to determine the column's new latest value from its current + // latest value. + oneof rule { + // Rule specifying that "append_value" be appended to the existing value. + // If the targeted cell is unset, it will be treated as containing the + // empty string. + bytes append_value = 3; + + // Rule specifying that "increment_amount" be added to the existing value. + // If the targeted cell is unset, it will be treated as containing a zero. + // Otherwise, the targeted cell must contain an 8-byte value (interpreted + // as a 64-bit big-endian signed integer), or the entire request will fail. + int64 increment_amount = 4; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto new file mode 100644 index 000000000000..ec992ea0f818 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto @@ -0,0 +1,456 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/instance.proto"; +import "google/iam/v1/iam_policy.proto"; +import "google/iam/v1/policy.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "BigtableInstanceAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable Instances and +// Clusters. Provides access to the Instance and Cluster schemas only, not the +// tables' metadata or data stored in those tables. +service BigtableInstanceAdmin { + // Create an instance within a project. + rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*}/instances" + body: "*" + }; + } + + // Gets information about an instance. + rpc GetInstance(GetInstanceRequest) returns (Instance) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*}" + }; + } + + // Lists information about instances in a project. + rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*}/instances" + }; + } + + // Updates an instance within a project. + rpc UpdateInstance(Instance) returns (Instance) { + option (google.api.http) = { + put: "/v2/{name=projects/*/instances/*}" + body: "*" + }; + } + + // Partially updates an instance within a project. + rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v2/{instance.name=projects/*/instances/*}" + body: "instance" + }; + } + + // Delete an instance from a project. + rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*}" + }; + } + + // Creates a cluster within an instance. + rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/clusters" + body: "cluster" + }; + } + + // Gets information about a cluster. + rpc GetCluster(GetClusterRequest) returns (Cluster) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/clusters/*}" + }; + } + + // Lists information about clusters in an instance. + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*}/clusters" + }; + } + + // Updates a cluster within an instance. + rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { + option (google.api.http) = { + put: "/v2/{name=projects/*/instances/*/clusters/*}" + body: "*" + }; + } + + // Deletes a cluster from an instance. + rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/clusters/*}" + }; + } + + // Creates an app profile within an instance. + rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/appProfiles" + body: "app_profile" + }; + } + + // Gets information about an app profile. + rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/appProfiles/*}" + }; + } + + // Lists information about app profiles in an instance. + rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*}/appProfiles" + }; + } + + // Updates an app profile within an instance. + rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" + body: "app_profile" + }; + } + + // Deletes an app profile from an instance. + rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" + }; + } + + // Gets the access control policy for an instance resource. Returns an empty + // policy if an instance exists but does not have a policy set. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" + body: "*" + }; + } + + // Sets the access control policy on an instance resource. Replaces any + // existing policy. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" + body: "*" + }; + } + + // Returns permissions that the caller has on the specified instance resource. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" + body: "*" + }; + } +} + +// Request message for BigtableInstanceAdmin.CreateInstance. +message CreateInstanceRequest { + // The unique name of the project in which to create the new instance. + // Values are of the form `projects/`. + string parent = 1; + + // The ID to be used when referring to the new instance within its project, + // e.g., just `myinstance` rather than + // `projects/myproject/instances/myinstance`. + string instance_id = 2; + + // The instance to create. + // Fields marked `OutputOnly` must be left blank. + Instance instance = 3; + + // The clusters to be created within the instance, mapped by desired + // cluster ID, e.g., just `mycluster` rather than + // `projects/myproject/instances/myinstance/clusters/mycluster`. + // Fields marked `OutputOnly` must be left blank. + // Currently, at most two clusters can be specified. + map clusters = 4; +} + +// Request message for BigtableInstanceAdmin.GetInstance. +message GetInstanceRequest { + // The unique name of the requested instance. Values are of the form + // `projects//instances/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListInstances. +message ListInstancesRequest { + // The unique name of the project for which a list of instances is requested. + // Values are of the form `projects/`. + string parent = 1; + + // DEPRECATED: This field is unused and ignored. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListInstances. +message ListInstancesResponse { + // The list of requested instances. + repeated Instance instances = 1; + + // Locations from which Instance information could not be retrieved, + // due to an outage or some other transient condition. + // Instances whose Clusters are all in one of the failed locations + // may be missing from `instances`, and Instances with at least one + // Cluster in a failed location may only have partial information returned. + // Values are of the form `projects//locations/` + repeated string failed_locations = 2; + + // DEPRECATED: This field is unused and ignored. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.PartialUpdateInstance. +message PartialUpdateInstanceRequest { + // The Instance which will (partially) replace the current value. + Instance instance = 1; + + // The subset of Instance fields which should be replaced. + // Must be explicitly set. + google.protobuf.FieldMask update_mask = 2; +} + +// Request message for BigtableInstanceAdmin.DeleteInstance. +message DeleteInstanceRequest { + // The unique name of the instance to be deleted. + // Values are of the form `projects//instances/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.CreateCluster. +message CreateClusterRequest { + // The unique name of the instance in which to create the new cluster. + // Values are of the form + // `projects//instances/`. + string parent = 1; + + // The ID to be used when referring to the new cluster within its instance, + // e.g., just `mycluster` rather than + // `projects/myproject/instances/myinstance/clusters/mycluster`. + string cluster_id = 2; + + // The cluster to be created. + // Fields marked `OutputOnly` must be left blank. + Cluster cluster = 3; +} + +// Request message for BigtableInstanceAdmin.GetCluster. +message GetClusterRequest { + // The unique name of the requested cluster. Values are of the form + // `projects//instances//clusters/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListClusters. +message ListClustersRequest { + // The unique name of the instance for which a list of clusters is requested. + // Values are of the form `projects//instances/`. + // Use ` = '-'` to list Clusters for all Instances in a project, + // e.g., `projects/myproject/instances/-`. + string parent = 1; + + // DEPRECATED: This field is unused and ignored. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListClusters. +message ListClustersResponse { + // The list of requested clusters. + repeated Cluster clusters = 1; + + // Locations from which Cluster information could not be retrieved, + // due to an outage or some other transient condition. + // Clusters from these locations may be missing from `clusters`, + // or may only have partial information returned. + // Values are of the form `projects//locations/` + repeated string failed_locations = 2; + + // DEPRECATED: This field is unused and ignored. + string next_page_token = 3; +} + +// Request message for BigtableInstanceAdmin.DeleteCluster. +message DeleteClusterRequest { + // The unique name of the cluster to be deleted. Values are of the form + // `projects//instances//clusters/`. + string name = 1; +} + +// The metadata for the Operation returned by CreateInstance. +message CreateInstanceMetadata { + // The request that prompted the initiation of this CreateInstance operation. + CreateInstanceRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by UpdateInstance. +message UpdateInstanceMetadata { + // The request that prompted the initiation of this UpdateInstance operation. + PartialUpdateInstanceRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by CreateCluster. +message CreateClusterMetadata { + // The request that prompted the initiation of this CreateCluster operation. + CreateClusterRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by UpdateCluster. +message UpdateClusterMetadata { + // The request that prompted the initiation of this UpdateCluster operation. + Cluster original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// Request message for BigtableInstanceAdmin.CreateAppProfile. +message CreateAppProfileRequest { + // The unique name of the instance in which to create the new app profile. + // Values are of the form + // `projects//instances/`. + string parent = 1; + + // The ID to be used when referring to the new app profile within its + // instance, e.g., just `myprofile` rather than + // `projects/myproject/instances/myinstance/appProfiles/myprofile`. + string app_profile_id = 2; + + // The app profile to be created. + // Fields marked `OutputOnly` will be ignored. + AppProfile app_profile = 3; + + // If true, ignore safety checks when creating the app profile. + bool ignore_warnings = 4; +} + +// Request message for BigtableInstanceAdmin.GetAppProfile. +message GetAppProfileRequest { + // The unique name of the requested app profile. Values are of the form + // `projects//instances//appProfiles/`. + string name = 1; +} + +// Request message for BigtableInstanceAdmin.ListAppProfiles. +message ListAppProfilesRequest { + // The unique name of the instance for which a list of app profiles is + // requested. Values are of the form + // `projects//instances/`. + // Use ` = '-'` to list AppProfiles for all Instances in a project, + // e.g., `projects/myproject/instances/-`. + string parent = 1; + + // Maximum number of results per page. + // CURRENTLY UNIMPLEMENTED AND IGNORED. + int32 page_size = 3; + + // The value of `next_page_token` returned by a previous call. + string page_token = 2; +} + +// Response message for BigtableInstanceAdmin.ListAppProfiles. +message ListAppProfilesResponse { + // The list of requested app profiles. + repeated AppProfile app_profiles = 1; + + // Set if not all app profiles could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; + + // Locations from which AppProfile information could not be retrieved, + // due to an outage or some other transient condition. + // AppProfiles from these locations may be missing from `app_profiles`. + // Values are of the form `projects//locations/` + repeated string failed_locations = 3; +} + +// Request message for BigtableInstanceAdmin.UpdateAppProfile. +message UpdateAppProfileRequest { + // The app profile which will (partially) replace the current value. + AppProfile app_profile = 1; + + // The subset of app profile fields which should be replaced. + // If unset, all fields will be replaced. + google.protobuf.FieldMask update_mask = 2; + + // If true, ignore safety checks when updating the app profile. + bool ignore_warnings = 3; +} + + +// Request message for BigtableInstanceAdmin.DeleteAppProfile. +message DeleteAppProfileRequest { + // The unique name of the app profile to be deleted. Values are of the form + // `projects//instances//appProfiles/`. + string name = 1; + + // If true, ignore safety checks when deleting the app profile. + bool ignore_warnings = 2; +} + +// The metadata for the Operation returned by UpdateAppProfile. +message UpdateAppProfileMetadata { + +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto new file mode 100644 index 000000000000..b1f729517a47 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto @@ -0,0 +1,91 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v1; + +import "google/api/annotations.proto"; +import "google/bigtable/v1/bigtable_data.proto"; +import "google/bigtable/v1/bigtable_service_messages.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; +option java_generic_services = true; +option java_multiple_files = true; +option java_outer_classname = "BigtableServicesProto"; +option java_package = "com.google.bigtable.v1"; + + +// Service for reading from and writing to existing Bigtables. +service BigtableService { + // Streams back the contents of all requested rows, optionally applying + // the same Reader filter to each. Depending on their size, rows may be + // broken up across multiple responses, but atomicity of each row will still + // be preserved. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" + body: "*" + }; + } + + // Returns a sample of row keys in the table. The returned row keys will + // delimit contiguous sections of the table of approximately equal size, + // which can be used to break up the data for distributed tasks like + // mapreduces. + rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { + option (google.api.http) = { + get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" + }; + } + + // Mutates a row atomically. Cells already present in the row are left + // unchanged unless explicitly changed by 'mutation'. + rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" + body: "*" + }; + } + + // Mutates multiple rows in a batch. Each individual row is mutated + // atomically as in MutateRow, but the entire batch is not executed + // atomically. + rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) { + option (google.api.http) = { + post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" + body: "*" + }; + } + + // Mutates a row atomically based on the output of a predicate Reader filter. + rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { + option (google.api.http) = { + post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" + body: "*" + }; + } + + // Modifies a row atomically, reading the latest existing timestamp/value from + // the specified columns and writing a new value at + // max(existing timestamp, current server time) based on pre-defined + // read/modify/write rules. Returns the new contents of all modified cells. + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { + option (google.api.http) = { + post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" + body: "*" + }; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto new file mode 100644 index 000000000000..d734ececaec3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto @@ -0,0 +1,218 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v1; + +import "google/bigtable/v1/bigtable_data.proto"; +import "google/rpc/status.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; +option java_multiple_files = true; +option java_outer_classname = "BigtableServiceMessagesProto"; +option java_package = "com.google.bigtable.v1"; + + +// Request message for BigtableServer.ReadRows. +message ReadRowsRequest { + // The unique name of the table from which to read. + string table_name = 1; + + // If neither row_key nor row_range is set, reads from all rows. + oneof target { + // The key of a single row from which to read. + bytes row_key = 2; + + // A range of rows from which to read. + RowRange row_range = 3; + + // A set of rows from which to read. Entries need not be in order, and will + // be deduplicated before reading. + // The total serialized size of the set must not exceed 1MB. + RowSet row_set = 8; + } + + // The filter to apply to the contents of the specified row(s). If unset, + // reads the entire table. + RowFilter filter = 5; + + // By default, rows are read sequentially, producing results which are + // guaranteed to arrive in increasing row order. Setting + // "allow_row_interleaving" to true allows multiple rows to be interleaved in + // the response stream, which increases throughput but breaks this guarantee, + // and may force the client to use more memory to buffer partially-received + // rows. Cannot be set to true when specifying "num_rows_limit". + bool allow_row_interleaving = 6; + + // The read will terminate after committing to N rows' worth of results. The + // default (zero) is to return all results. + // Note that "allow_row_interleaving" cannot be set to true when this is set. + int64 num_rows_limit = 7; +} + +// Response message for BigtableService.ReadRows. +message ReadRowsResponse { + // Specifies a piece of a row's contents returned as part of the read + // response stream. + message Chunk { + oneof chunk { + // A subset of the data from a particular row. As long as no "reset_row" + // is received in between, multiple "row_contents" from the same row are + // from the same atomic view of that row, and will be received in the + // expected family/column/timestamp order. + Family row_contents = 1; + + // Indicates that the client should drop all previous chunks for + // "row_key", as it will be re-read from the beginning. + bool reset_row = 2; + + // Indicates that the client can safely process all previous chunks for + // "row_key", as its data has been fully read. + bool commit_row = 3; + } + } + + // The key of the row for which we're receiving data. + // Results will be received in increasing row key order, unless + // "allow_row_interleaving" was specified in the request. + bytes row_key = 1; + + // One or more chunks of the row specified by "row_key". + repeated Chunk chunks = 2; +} + +// Request message for BigtableService.SampleRowKeys. +message SampleRowKeysRequest { + // The unique name of the table from which to sample row keys. + string table_name = 1; +} + +// Response message for BigtableService.SampleRowKeys. +message SampleRowKeysResponse { + // Sorted streamed sequence of sample row keys in the table. The table might + // have contents before the first row key in the list and after the last one, + // but a key containing the empty string indicates "end of table" and will be + // the last response given, if present. + // Note that row keys in this list may not have ever been written to or read + // from, and users should therefore not make any assumptions about the row key + // structure that are specific to their use case. + bytes row_key = 1; + + // Approximate total storage space used by all rows in the table which precede + // "row_key". Buffering the contents of all rows between two subsequent + // samples would require space roughly equal to the difference in their + // "offset_bytes" fields. + int64 offset_bytes = 2; +} + +// Request message for BigtableService.MutateRow. +message MutateRowRequest { + // The unique name of the table to which the mutation should be applied. + string table_name = 1; + + // The key of the row to which the mutation should be applied. + bytes row_key = 2; + + // Changes to be atomically applied to the specified row. Entries are applied + // in order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry and at most 100000. + repeated Mutation mutations = 3; +} + +// Request message for BigtableService.MutateRows. +message MutateRowsRequest { + message Entry { + // The key of the row to which the `mutations` should be applied. + bytes row_key = 1; + + // Changes to be atomically applied to the specified row. Mutations are + // applied in order, meaning that earlier mutations can be masked by + // later ones. + // At least one mutation must be specified. + repeated Mutation mutations = 2; + } + + // The unique name of the table to which the mutations should be applied. + string table_name = 1; + + // The row keys/mutations to be applied in bulk. + // Each entry is applied as an atomic mutation, but the entries may be + // applied in arbitrary order (even between entries for the same row). + // At least one entry must be specified, and in total the entries may + // contain at most 100000 mutations. + repeated Entry entries = 2; +} + +// Response message for BigtableService.MutateRows. +message MutateRowsResponse { + // The results for each Entry from the request, presented in the order + // in which the entries were originally given. + // Depending on how requests are batched during execution, it is possible + // for one Entry to fail due to an error with another Entry. In the event + // that this occurs, the same error will be reported for both entries. + repeated google.rpc.Status statuses = 1; +} + +// Request message for BigtableService.CheckAndMutateRowRequest +message CheckAndMutateRowRequest { + // The unique name of the table to which the conditional mutation should be + // applied. + string table_name = 1; + + // The key of the row to which the conditional mutation should be applied. + bytes row_key = 2; + + // The filter to be applied to the contents of the specified row. Depending + // on whether or not any results are yielded, either "true_mutations" or + // "false_mutations" will be executed. If unset, checks that the row contains + // any values at all. + RowFilter predicate_filter = 6; + + // Changes to be atomically applied to the specified row if "predicate_filter" + // yields at least one cell when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "false_mutations" is empty, and at most + // 100000. + repeated Mutation true_mutations = 4; + + // Changes to be atomically applied to the specified row if "predicate_filter" + // does not yield any cells when applied to "row_key". Entries are applied in + // order, meaning that earlier mutations can be masked by later ones. + // Must contain at least one entry if "true_mutations" is empty, and at most + // 100000. + repeated Mutation false_mutations = 5; +} + +// Response message for BigtableService.CheckAndMutateRowRequest. +message CheckAndMutateRowResponse { + // Whether or not the request's "predicate_filter" yielded any results for + // the specified row. + bool predicate_matched = 1; +} + +// Request message for BigtableService.ReadModifyWriteRowRequest. +message ReadModifyWriteRowRequest { + // The unique name of the table to which the read/modify/write rules should be + // applied. + string table_name = 1; + + // The key of the row to which the read/modify/write rules should be applied. + bytes row_key = 2; + + // Rules specifying how the specified row's contents are to be transformed + // into writes. Entries are applied in order, meaning that earlier rules will + // affect the results of later ones. + repeated ReadModifyWriteRule rules = 3; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto new file mode 100644 index 000000000000..2d5bddf302aa --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto @@ -0,0 +1,525 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/table.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableAdminProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// +// +// Provides access to the table schemas only, not the data stored within +// the tables. +service BigtableTableAdmin { + // Creates a new table in the specified instance. + // The table can be created with a full set of initial column families, + // specified in the request. + rpc CreateTable(CreateTableRequest) returns (Table) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/tables" + body: "*" + }; + } + + // Creates a new table from the specified snapshot. The target table must + // not exist. The snapshot and the table must be in the same instance. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" + body: "*" + }; + } + + // Lists all tables served from a specified instance. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*}/tables" + }; + } + + // Gets metadata information about the specified table. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/tables/*}" + }; + } + + // Permanently deletes a specified table and all of its data. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/tables/*}" + }; + } + + // Performs a series of column family modifications on the specified table. + // Either all or none of the modifications will occur before this method + // returns, but data requests received prior to that point may see a table + // where only some modifications have taken effect. + rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" + body: "*" + }; + } + + // Permanently drop/delete a row range from a specified table. The request can + // specify whether to delete all rows in a table, or only those that match a + // particular prefix. + rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" + body: "*" + }; + } + + // Generates a consistency token for a Table, which can be used in + // CheckConsistency to check whether mutations to the table that finished + // before this call started have been replicated. The tokens will be available + // for 90 days. + rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" + body: "*" + }; + } + + // Checks replication consistency based on a consistency token, that is, if + // replication has caught up based on the conditions specified in the token + // and the check request. + rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + body: "*" + }; + } + + // Creates a new snapshot in the specified cluster from the specified + // source table. The cluster and the table must be in the same instance. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" + body: "*" + }; + } + + // Gets metadata information about the specified snapshot. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + }; + } + + // Lists all snapshots associated with the specified cluster. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" + }; + } + + // Permanently deletes the specified snapshot. + // + // Note: This is a private alpha release of Cloud Bigtable snapshots. This + // feature is not currently available to most Cloud Bigtable customers. This + // feature might be changed in backward-incompatible ways and is not + // recommended for production use. It is not subject to any SLA or deprecation + // policy. + rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + }; + } +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] +message CreateTableRequest { + // An initial split point for a newly created table. + message Split { + // Row key to use as an initial tablet boundary. + bytes key = 1; + } + + // The unique name of the instance in which to create the table. + // Values are of the form `projects//instances/`. + string parent = 1; + + // The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `/tables/foobar`. + string table_id = 2; + + // The Table to create. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (tablets are similar to HBase regions). + // Given two split keys, `s1` and `s2`, three tablets will be created, + // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. + // + // Example: + // + // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` + // `"other", "zz"]` + // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` + // * Key assignment: + // - Tablet 1 `[, apple) => {"a"}.` + // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` + // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` + // - Tablet 4 `[customer_2, other) => {"customer_2"}.` + // - Tablet 5 `[other, ) => {"other", "zz"}.` + repeated Split initial_splits = 4; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message CreateTableFromSnapshotRequest { + // The unique name of the instance in which to create the table. + // Values are of the form `projects//instances/`. + string parent = 1; + + // The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `/tables/foobar`. + string table_id = 2; + + // The unique name of the snapshot from which to restore the table. The + // snapshot and the table must be in the same instance. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string source_snapshot = 3; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] +message DropRowRangeRequest { + // The unique name of the table on which to drop a range of rows. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // Delete all rows or by prefix. + oneof target { + // Delete all rows that start with this row key prefix. Prefix cannot be + // zero length. + bytes row_key_prefix = 2; + + // Delete all rows in the table. Setting this to false is a no-op. + bool delete_all_data_from_table = 3; + } +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesRequest { + // The unique name of the instance for which tables should be listed. + // Values are of the form `projects//instances/`. + string parent = 1; + + // The view to be applied to the returned tables' fields. + // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. + Table.View view = 2; + + // Maximum number of results per page. + // CURRENTLY UNIMPLEMENTED AND IGNORED. + int32 page_size = 4; + + // The value of `next_page_token` returned by a previous call. + string page_token = 3; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] +message ListTablesResponse { + // The tables present in the requested instance. + repeated Table tables = 1; + + // Set if not all tables could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] +message GetTableRequest { + // The unique name of the requested table. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // The view to be applied to the returned table's fields. + // Defaults to `SCHEMA_VIEW` if unspecified. + Table.View view = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] +message DeleteTableRequest { + // The unique name of the table to be deleted. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] +message ModifyColumnFamiliesRequest { + // A create, update, or delete of a particular column family. + message Modification { + // The ID of the column family to be modified. + string id = 1; + + // Column familiy modifications. + oneof mod { + // Create a new column family with the specified schema, or fail if + // one already exists with the given ID. + ColumnFamily create = 2; + + // Update an existing column family to the specified schema, or fail + // if no column family exists with the given ID. + ColumnFamily update = 3; + + // Drop (delete) the column family with the given ID, or fail if no such + // family exists. + bool drop = 4; + } + } + + // The unique name of the table whose families should be modified. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] +message GenerateConsistencyTokenRequest { + // The unique name of the Table for which to create a consistency token. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] +message GenerateConsistencyTokenResponse { + // The generated consistency token. + string consistency_token = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] +message CheckConsistencyRequest { + // The unique name of the Table for which to check replication consistency. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // The token created using GenerateConsistencyToken for the Table. + string consistency_token = 2; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] +message CheckConsistencyResponse { + // True only if the token is consistent. A token is consistent if replication + // has caught up with the restrictions specified in the request. + bool consistent = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message SnapshotTableRequest { + // The unique name of the table to have the snapshot taken. + // Values are of the form + // `projects//instances//tables/
`. + string name = 1; + + // The name of the cluster where the snapshot will be created in. + // Values are of the form + // `projects//instances//clusters/`. + string cluster = 2; + + // The ID by which the new snapshot should be referred to within the parent + // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // rather than + // `projects//instances//clusters//snapshots/mysnapshot`. + string snapshot_id = 3; + + // The amount of time that the new snapshot can stay active after it is + // created. Once 'ttl' expires, the snapshot will get deleted. The maximum + // amount of time a snapshot can stay active is 7 days. If 'ttl' is not + // specified, the default value of 24 hours will be used. + google.protobuf.Duration ttl = 4; + + // Description of the snapshot. + string description = 5; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message GetSnapshotRequest { + // The unique name of the requested snapshot. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string name = 1; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message ListSnapshotsRequest { + // The unique name of the cluster for which snapshots should be listed. + // Values are of the form + // `projects//instances//clusters/`. + // Use ` = '-'` to list snapshots for all clusters in an instance, + // e.g., `projects//instances//clusters/-`. + string parent = 1; + + // The maximum number of snapshots to return per page. + // CURRENTLY UNIMPLEMENTED AND IGNORED. + int32 page_size = 2; + + // The value of `next_page_token` returned by a previous call. + string page_token = 3; +} + +// Response message for +// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message ListSnapshotsResponse { + // The snapshots present in the requested cluster. + repeated Snapshot snapshots = 1; + + // Set if not all snapshots could be returned in a single response. + // Pass this value to `page_token` in another request to get the next + // page of results. + string next_page_token = 2; +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message DeleteSnapshotRequest { + // The unique name of the snapshot to be deleted. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string name = 1; +} + +// The metadata for the Operation returned by SnapshotTable. +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message SnapshotTableMetadata { + // The request that prompted the initiation of this SnapshotTable operation. + SnapshotTableRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} + +// The metadata for the Operation returned by CreateTableFromSnapshot. +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message CreateTableFromSnapshotMetadata { + // The request that prompted the initiation of this CreateTableFromSnapshot + // operation. + CreateTableFromSnapshotRequest original_request = 1; + + // The time at which the original request was received. + google.protobuf.Timestamp request_time = 2; + + // The time at which the operation failed or was completed successfully. + google.protobuf.Timestamp finish_time = 3; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto new file mode 100644 index 000000000000..e4efb74f560e --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto @@ -0,0 +1,126 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableDataProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + enum TimestampGranularity { + MILLIS = 0; + } + + // A unique identifier of the form + // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* + string name = 1; + + // If this Table is in the process of being created, the Operation used to + // track its progress. As long as this operation is present, the Table will + // not accept any Table Admin or Read/Write requests. + google.longrunning.Operation current_operation = 2; + + // The column families configured for this table, mapped by column family id. + map column_families = 3; + + // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // Cannot be changed once the table is created. + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ + // The last segment is the same as the "name" field in + // google.bigtable.v1.Family. + string name = 1; + + // Garbage collection expression specified by the following grammar: + // GC = EXPR + // | "" ; + // EXPR = EXPR, "||", EXPR (* lowest precedence *) + // | EXPR, "&&", EXPR + // | "(", EXPR, ")" (* highest precedence *) + // | PROP ; + // PROP = "version() >", NUM32 + // | "age() >", NUM64, [ UNIT ] ; + // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) + // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) + // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) + // GC expressions can be up to 500 characters in length + // + // The different types of PROP are defined as follows: + // version() - cell index, counting from most recent and starting at 1 + // age() - age of the cell (current time minus cell timestamp) + // + // Example: "version() > 3 || (age() > 3d && version() > 1)" + // drop cells beyond the most recent three, and drop cells older than three + // days unless they're the most recent cell in the row/column + // + // Garbage collection executes opportunistically in the background, and so + // it's possible for reads to return a cell even if it matches the active GC + // expression for its family. + string gc_expression = 2; + + // Garbage collection rule specified as a protobuf. + // Supersedes `gc_expression`. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 3; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto new file mode 100644 index 000000000000..6e968fee17c1 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto @@ -0,0 +1,80 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; +import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableServicesProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +// Service for creating, configuring, and deleting Cloud Bigtable tables. +// Provides access to the table schemas only, not the data stored within the tables. +service BigtableTableService { + // Creates a new table, to be served from a specified cluster. + // The table can be created with a full set of initial column families, + // specified in the request. + rpc CreateTable(CreateTableRequest) returns (Table) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; + } + + // Lists the names of all tables served from a specified cluster. + rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { + option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; + } + + // Gets the schema of the specified table, including its column families. + rpc GetTable(GetTableRequest) returns (Table) { + option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; + } + + // Permanently deletes a specified table and all of its data. + rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; + } + + // Changes the name of a specified table. + // Cannot be used to move tables between clusters, zones, or projects. + rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; + } + + // Creates a new column family within a specified table. + rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { + option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; + } + + // Changes the configuration of a specified column family. + rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { + option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; + } + + // Permanently deletes a specified column family and all of its data. + rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; + } + + // Delete all rows in a table corresponding to a particular prefix + rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto new file mode 100644 index 000000000000..617ede65592f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto @@ -0,0 +1,116 @@ +// Copyright 2017 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.admin.table.v1; + +import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; + +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; +option java_multiple_files = true; +option java_outer_classname = "BigtableTableServiceMessagesProto"; +option java_package = "com.google.bigtable.admin.table.v1"; + + +message CreateTableRequest { + // The unique name of the cluster in which to create the new table. + string name = 1; + + // The name by which the new table should be referred to within the cluster, + // e.g. "foobar" rather than "/tables/foobar". + string table_id = 2; + + // The Table to create. The `name` field of the Table and all of its + // ColumnFamilies must be left blank, and will be populated in the response. + Table table = 3; + + // The optional list of row keys that will be used to initially split the + // table into several tablets (Tablets are similar to HBase regions). + // Given two split keys, "s1" and "s2", three tablets will be created, + // spanning the key ranges: [, s1), [s1, s2), [s2, ). + // + // Example: + // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", + // "other", "zz"] + // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] + // * Key assignment: + // - Tablet 1 [, apple) => {"a"}. + // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. + // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. + // - Tablet 4 [customer_2, other) => {"customer_2"}. + // - Tablet 5 [other, ) => {"other", "zz"}. + repeated string initial_split_keys = 4; +} + +message ListTablesRequest { + // The unique name of the cluster for which tables should be listed. + string name = 1; +} + +message ListTablesResponse { + // The tables present in the requested cluster. + // At present, only the names of the tables are populated. + repeated Table tables = 1; +} + +message GetTableRequest { + // The unique name of the requested table. + string name = 1; +} + +message DeleteTableRequest { + // The unique name of the table to be deleted. + string name = 1; +} + +message RenameTableRequest { + // The current unique name of the table. + string name = 1; + + // The new name by which the table should be referred to within its containing + // cluster, e.g. "foobar" rather than "/tables/foobar". + string new_id = 2; +} + +message CreateColumnFamilyRequest { + // The unique name of the table in which to create the new column family. + string name = 1; + + // The name by which the new column family should be referred to within the + // table, e.g. "foobar" rather than "/columnFamilies/foobar". + string column_family_id = 2; + + // The column family to create. The `name` field must be left blank. + ColumnFamily column_family = 3; +} + +message DeleteColumnFamilyRequest { + // The unique name of the column family to be deleted. + string name = 1; +} + +message BulkDeleteRowsRequest { + // The unique name of the table on which to perform the bulk delete + string table_name = 1; + + oneof target { + // Delete all rows that start with this row key prefix. Prefix cannot be + // zero length. + bytes row_key_prefix = 2; + + // Delete all rows in the table. Setting this to false is a no-op. + bool delete_all_data_from_table = 3; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto new file mode 100644 index 000000000000..0ece12780eb9 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto @@ -0,0 +1,41 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// Storage media types for persisting Bigtable data. +enum StorageType { + // The user did not specify a storage type. + STORAGE_TYPE_UNSPECIFIED = 0; + + // Flash (SSD) storage should be used. + SSD = 1; + + // Magnetic drive (HDD) storage should be used. + HDD = 2; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto new file mode 100644 index 000000000000..7400197e7b17 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto @@ -0,0 +1,535 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.bigtable.v2; + +option csharp_namespace = "Google.Cloud.Bigtable.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable"; +option java_multiple_files = true; +option java_outer_classname = "DataProto"; +option java_package = "com.google.bigtable.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\V2"; + + +// Specifies the complete (requested) contents of a single row of a table. +// Rows which exceed 256MiB in size cannot be read in full. +message Row { + // The unique key which identifies this row within its table. This is the same + // key that's used to identify the row in, for example, a MutateRowRequest. + // May contain any non-empty byte string up to 4KiB in length. + bytes key = 1; + + // May be empty, but only if the entire row is empty. + // The mutual ordering of column families is not specified. + repeated Family families = 2; +} + +// Specifies (some of) the contents of a single row/column family intersection +// of a table. +message Family { + // The unique key which identifies this family within its row. This is the + // same key that's used to identify the family in, for example, a RowFilter + // which sets its "family_name_regex_filter" field. + // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may + // produce cells in a sentinel family with an empty name. + // Must be no greater than 64 characters in length. + string name = 1; + + // Must not be empty. Sorted in order of increasing "qualifier". + repeated Column columns = 2; +} + +// Specifies (some of) the contents of a single row/column intersection of a +// table. +message Column { + // The unique key which identifies this column within its family. This is the + // same key that's used to identify the column in, for example, a RowFilter + // which sets its `column_qualifier_regex_filter` field. + // May contain any byte string, including the empty string, up to 16kiB in + // length. + bytes qualifier = 1; + + // Must not be empty. Sorted in order of decreasing "timestamp_micros". + repeated Cell cells = 2; +} + +// Specifies (some of) the contents of a single row/column/timestamp of a table. +message Cell { + // The cell's stored timestamp, which also uniquely identifies it within + // its column. + // Values are always expressed in microseconds, but individual tables may set + // a coarser granularity to further restrict the allowed values. For + // example, a table which specifies millisecond granularity will only allow + // values of `timestamp_micros` which are multiples of 1000. + int64 timestamp_micros = 1; + + // The value stored in the cell. + // May contain any byte string, including the empty string, up to 100MiB in + // length. + bytes value = 2; + + // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. + repeated string labels = 3; +} + +// Specifies a contiguous range of rows. +message RowRange { + // The row key at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_key { + // Used when giving an inclusive lower bound for the range. + bytes start_key_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_key_open = 2; + } + + // The row key at which to end the range. + // If neither field is set, interpreted as the infinite row key, exclusive. + oneof end_key { + // Used when giving an exclusive upper bound for the range. + bytes end_key_open = 3; + + // Used when giving an inclusive upper bound for the range. + bytes end_key_closed = 4; + } +} + +// Specifies a non-contiguous set of rows. +message RowSet { + // Single rows included in the set. + repeated bytes row_keys = 1; + + // Contiguous row ranges included in the set. + repeated RowRange row_ranges = 2; +} + +// Specifies a contiguous range of columns within a single column family. +// The range spans from <column_family>:<start_qualifier> to +// <column_family>:<end_qualifier>, where both bounds can be either +// inclusive or exclusive. +message ColumnRange { + // The name of the column family within which this range falls. + string family_name = 1; + + // The column qualifier at which to start the range (within `column_family`). + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_qualifier { + // Used when giving an inclusive lower bound for the range. + bytes start_qualifier_closed = 2; + + // Used when giving an exclusive lower bound for the range. + bytes start_qualifier_open = 3; + } + + // The column qualifier at which to end the range (within `column_family`). + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_qualifier { + // Used when giving an inclusive upper bound for the range. + bytes end_qualifier_closed = 4; + + // Used when giving an exclusive upper bound for the range. + bytes end_qualifier_open = 5; + } +} + +// Specified a contiguous range of microsecond timestamps. +message TimestampRange { + // Inclusive lower bound. If left empty, interpreted as 0. + int64 start_timestamp_micros = 1; + + // Exclusive upper bound. If left empty, interpreted as infinity. + int64 end_timestamp_micros = 2; +} + +// Specifies a contiguous range of raw byte values. +message ValueRange { + // The value at which to start the range. + // If neither field is set, interpreted as the empty string, inclusive. + oneof start_value { + // Used when giving an inclusive lower bound for the range. + bytes start_value_closed = 1; + + // Used when giving an exclusive lower bound for the range. + bytes start_value_open = 2; + } + + // The value at which to end the range. + // If neither field is set, interpreted as the infinite string, exclusive. + oneof end_value { + // Used when giving an inclusive upper bound for the range. + bytes end_value_closed = 3; + + // Used when giving an exclusive upper bound for the range. + bytes end_value_open = 4; + } +} + +// Takes a row as input and produces an alternate view of the row based on +// specified rules. For example, a RowFilter might trim down a row to include +// just the cells from columns matching a given regular expression, or might +// return all the cells of a row but not their values. More complicated filters +// can be composed out of these components to express requests such as, "within +// every column of a particular family, give just the two most recent cells +// which are older than timestamp X." +// +// There are two broad categories of RowFilters (true filters and transformers), +// as well as two ways to compose simple filters into more complex ones +// (chains and interleaves). They work as follows: +// +// * True filters alter the input row by excluding some of its cells wholesale +// from the output row. An example of a true filter is the `value_regex_filter`, +// which excludes cells whose values don't match the specified pattern. All +// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) +// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An +// important point to keep in mind is that `RE2(.)` is equivalent by default to +// `RE2([^\n])`, meaning that it does not match newlines. When attempting to +// match an arbitrary byte, you should therefore use the escape sequence `\C`, +// which may need to be further escaped as `\\C` in your client language. +// +// * Transformers alter the input row by changing the values of some of its +// cells in the output, without excluding them completely. Currently, the only +// supported transformer is the `strip_value_transformer`, which replaces every +// cell's value with the empty string. +// +// * Chains and interleaves are described in more detail in the +// RowFilter.Chain and RowFilter.Interleave documentation. +// +// The total serialized size of a RowFilter message must not +// exceed 4096 bytes, and RowFilters may not be nested within each other +// (in Chains or Interleaves) to a depth of more than 20. +message RowFilter { + // A RowFilter which sends rows through several RowFilters in sequence. + message Chain { + // The elements of "filters" are chained together to process the input row: + // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row + // The full chain is executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which sends each row to each of several component + // RowFilters and interleaves the results. + message Interleave { + // The elements of "filters" all process a copy of the input row, and the + // results are pooled, sorted, and combined into a single output row. + // If multiple cells are produced with the same column and timestamp, + // they will all appear in the output row in an unspecified mutual order. + // Consider the following example, with three filters: + // + // input row + // | + // ----------------------------------------------------- + // | | | + // f(0) f(1) f(2) + // | | | + // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + // 2: foo,blah,11,z far,blah,5,x far,blah,5,x + // | | | + // ----------------------------------------------------- + // | + // 1: foo,bar,10,z // could have switched with #2 + // 2: foo,bar,10,x // could have switched with #1 + // 3: foo,blah,11,z + // 4: far,bar,7,a + // 5: far,blah,5,x // identical to #6 + // 6: far,blah,5,x // identical to #5 + // + // All interleaved filters are executed atomically. + repeated RowFilter filters = 1; + } + + // A RowFilter which evaluates one of two possible RowFilters, depending on + // whether or not a predicate RowFilter outputs any cells from the input row. + // + // IMPORTANT NOTE: The predicate filter does not execute atomically with the + // true and false filters, which may lead to inconsistent or unexpected + // results. Additionally, Condition filters have poor performance, especially + // when filters are set for the false condition. + message Condition { + // If `predicate_filter` outputs any cells, then `true_filter` will be + // evaluated on the input row. Otherwise, `false_filter` will be evaluated. + RowFilter predicate_filter = 1; + + // The filter to apply to the input row if `predicate_filter` returns any + // results. If not provided, no results will be returned in the true case. + RowFilter true_filter = 2; + + // The filter to apply to the input row if `predicate_filter` does not + // return any results. If not provided, no results will be returned in the + // false case. + RowFilter false_filter = 3; + } + + // Which of the possible RowFilter types to apply. If none are set, this + // RowFilter returns all cells in the input row. + oneof filter { + // Applies several RowFilters to the data in sequence, progressively + // narrowing the results. + Chain chain = 1; + + // Applies several RowFilters to the data in parallel and combines the + // results. + Interleave interleave = 2; + + // Applies one of two possible RowFilters to the data based on the output of + // a predicate RowFilter. + Condition condition = 3; + + // ADVANCED USE ONLY. + // Hook for introspection into the RowFilter. Outputs all cells directly to + // the output of the read rather than to any parent filter. Consider the + // following example: + // + // Chain( + // FamilyRegex("A"), + // Interleave( + // All(), + // Chain(Label("foo"), Sink()) + // ), + // QualifierRegex("B") + // ) + // + // A,A,1,w + // A,B,2,x + // B,B,4,z + // | + // FamilyRegex("A") + // | + // A,A,1,w + // A,B,2,x + // | + // +------------+-------------+ + // | | + // All() Label(foo) + // | | + // A,A,1,w A,A,1,w,labels:[foo] + // A,B,2,x A,B,2,x,labels:[foo] + // | | + // | Sink() --------------+ + // | | | + // +------------+ x------+ A,A,1,w,labels:[foo] + // | A,B,2,x,labels:[foo] + // A,A,1,w | + // A,B,2,x | + // | | + // QualifierRegex("B") | + // | | + // A,B,2,x | + // | | + // +--------------------------------+ + // | + // A,A,1,w,labels:[foo] + // A,B,2,x,labels:[foo] // could be switched + // A,B,2,x // could be switched + // + // Despite being excluded by the qualifier filter, a copy of every cell + // that reaches the sink is present in the final result. + // + // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave], + // duplicate cells are possible, and appear in an unspecified mutual order. + // In this case we have a duplicate with column "A:B" and timestamp 2, + // because one copy passed through the all filter while the other was + // passed through the label and sink. Note that one copy has label "foo", + // while the other does not. + // + // Cannot be used within the `predicate_filter`, `true_filter`, or + // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition]. + bool sink = 16; + + // Matches all cells, regardless of input. Functionally equivalent to + // leaving `filter` unset, but included for completeness. + bool pass_all_filter = 17; + + // Does not match any cells, regardless of input. Useful for temporarily + // disabling just part of a filter. + bool block_all_filter = 18; + + // Matches only cells from rows whose keys satisfy the given RE2 regex. In + // other words, passes through the entire row when the key matches, and + // otherwise produces an empty row. + // Note that, since row keys can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a + // binary key. + bytes row_key_regex_filter = 4; + + // Matches all cells from a row with probability p, and matches no cells + // from the row with probability 1-p. + double row_sample_filter = 14; + + // Matches only cells from columns whose families satisfy the given RE2 + // regex. For technical reasons, the regex must not contain the `:` + // character, even if it is not being used as a literal. + // Note that, since column families cannot contain the new line character + // `\n`, it is sufficient to use `.` as a full wildcard when matching + // column family names. + string family_name_regex_filter = 5; + + // Matches only cells from columns whose qualifiers satisfy the given RE2 + // regex. + // Note that, since column qualifiers can contain arbitrary bytes, the `\C` + // escape sequence must be used if a true wildcard is desired. The `.` + // character will not match the new line character `\n`, which may be + // present in a binary qualifier. + bytes column_qualifier_regex_filter = 6; + + // Matches only cells from columns within the given range. + ColumnRange column_range_filter = 7; + + // Matches only cells with timestamps within the given range. + TimestampRange timestamp_range_filter = 8; + + // Matches only cells with values that satisfy the given regular expression. + // Note that, since cell values can contain arbitrary bytes, the `\C` escape + // sequence must be used if a true wildcard is desired. The `.` character + // will not match the new line character `\n`, which may be present in a + // binary value. + bytes value_regex_filter = 9; + + // Matches only cells with values that fall within the given range. + ValueRange value_range_filter = 15; + + // Skips the first N cells of each row, matching all subsequent cells. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_offset_filter = 10; + + // Matches only the first N cells of each row. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_row_limit_filter = 11; + + // Matches only the most recent N cells within each column. For example, + // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, + // skip all earlier cells in `foo:bar`, and then begin matching again in + // column `foo:bar2`. + // If duplicate cells are present, as is possible when using an Interleave, + // each copy of the cell is counted separately. + int32 cells_per_column_limit_filter = 12; + + // Replaces each cell's value with the empty string. + bool strip_value_transformer = 13; + + // Applies the given label to all cells in the output row. This allows + // the client to determine which results were produced from which part of + // the filter. + // + // Values must be at most 15 characters in length, and match the RE2 + // pattern `[a-z0-9\\-]+` + // + // Due to a technical limitation, it is not currently possible to apply + // multiple labels to a cell. As a result, a Chain may have no more than + // one sub-filter which contains a `apply_label_transformer`. It is okay for + // an Interleave to contain multiple `apply_label_transformers`, as they + // will be applied to separate copies of the input. This may be relaxed in + // the future. + string apply_label_transformer = 19; + } +} + +// Specifies a particular change to be made to the contents of a row. +message Mutation { + // A Mutation which sets the value of the specified cell. + message SetCell { + // The name of the family into which new data should be written. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column into which new data should be written. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The timestamp of the cell into which new data should be written. + // Use -1 for current Bigtable server time. + // Otherwise, the client should set this value itself, noting that the + // default value is a timestamp of zero if the field is left unspecified. + // Values must match the granularity of the table (e.g. micros, millis). + int64 timestamp_micros = 3; + + // The value to be written into the specified cell. + bytes value = 4; + } + + // A Mutation which deletes cells from the specified column, optionally + // restricting the deletions to a given timestamp range. + message DeleteFromColumn { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column from which cells should be deleted. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The range of timestamps within which cells should be deleted. + TimestampRange time_range = 3; + } + + // A Mutation which deletes all cells from the specified column family. + message DeleteFromFamily { + // The name of the family from which cells should be deleted. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + } + + // A Mutation which deletes all cells from the containing row. + message DeleteFromRow { + + } + + // Which of the possible Mutation types to apply. + oneof mutation { + // Set a cell's value. + SetCell set_cell = 1; + + // Deletes cells from a column. + DeleteFromColumn delete_from_column = 2; + + // Deletes cells from a column family. + DeleteFromFamily delete_from_family = 3; + + // Deletes cells from the entire row. + DeleteFromRow delete_from_row = 4; + } +} + +// Specifies an atomic read/modify/write operation on the latest value of the +// specified column. +message ReadModifyWriteRule { + // The name of the family to which the read/modify/write should be applied. + // Must match `[-_.a-zA-Z0-9]+` + string family_name = 1; + + // The qualifier of the column to which the read/modify/write should be + // applied. + // Can be any byte string, including the empty string. + bytes column_qualifier = 2; + + // The rule used to determine the column's new latest value from its current + // latest value. + oneof rule { + // Rule specifying that `append_value` be appended to the existing value. + // If the targeted cell is unset, it will be treated as containing the + // empty string. + bytes append_value = 3; + + // Rule specifying that `increment_amount` be added to the existing value. + // If the targeted cell is unset, it will be treated as containing a zero. + // Otherwise, the targeted cell must contain an 8-byte value (interpreted + // as a 64-bit big-endian signed integer), or the entire request will fail. + int64 increment_amount = 4; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto new file mode 100644 index 000000000000..bb69b1f66d42 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto @@ -0,0 +1,208 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/bigtable/admin/v2/common.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "InstanceProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and +// the resources that serve them. +// All tables in an instance are served from a single +// [Cluster][google.bigtable.admin.v2.Cluster]. +message Instance { + // Possible states of an instance. + enum State { + // The state of the instance could not be determined. + STATE_NOT_KNOWN = 0; + + // The instance has been successfully created and can serve requests + // to its tables. + READY = 1; + + // The instance is currently being created, and may be destroyed + // if the creation process encounters an error. + CREATING = 2; + } + + // The type of the instance. + enum Type { + // The type of the instance is unspecified. If set when creating an + // instance, a `PRODUCTION` instance will be created. If set when updating + // an instance, the type will be left unchanged. + TYPE_UNSPECIFIED = 0; + + // An instance meant for production use. `serve_nodes` must be set + // on the cluster. + PRODUCTION = 1; + + // The instance is meant for development and testing purposes only; it has + // no performance or uptime guarantees and is not covered by SLA. + // After a development instance is created, it can be upgraded by + // updating the instance to type `PRODUCTION`. An instance created + // as a production instance cannot be changed to a development instance. + // When creating a development instance, `serve_nodes` on the cluster must + // not be set. + DEVELOPMENT = 2; + } + + // (`OutputOnly`) + // The unique name of the instance. Values are of the form + // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. + string name = 1; + + // The descriptive name for this instance as it appears in UIs. + // Can be changed at any time, but should be kept globally unique + // to avoid confusion. + string display_name = 2; + + // (`OutputOnly`) + // The current state of the instance. + State state = 3; + + // The type of the instance. Defaults to `PRODUCTION`. + Type type = 4; + + // Labels are a flexible and lightweight mechanism for organizing cloud + // resources into groups that reflect a customer's organizational needs and + // deployment strategies. They can be used to filter resources and aggregate + // metrics. + // + // * Label keys must be between 1 and 63 characters long and must conform to + // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. + // * Label values must be between 0 and 63 characters long and must conform to + // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. + // * No more than 64 labels can be associated with a given resource. + // * Keys and values must both be under 128 bytes. + map labels = 5; +} + +// A resizable group of nodes in a particular cloud location, capable +// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent +// [Instance][google.bigtable.admin.v2.Instance]. +message Cluster { + // Possible states of a cluster. + enum State { + // The state of the cluster could not be determined. + STATE_NOT_KNOWN = 0; + + // The cluster has been successfully created and is ready to serve requests. + READY = 1; + + // The cluster is currently being created, and may be destroyed + // if the creation process encounters an error. + // A cluster may not be able to serve requests while being created. + CREATING = 2; + + // The cluster is currently being resized, and may revert to its previous + // node count if the process encounters an error. + // A cluster is still capable of serving requests while being resized, + // but may exhibit performance as if its number of allocated nodes is + // between the starting and requested states. + RESIZING = 3; + + // The cluster has no backing nodes. The data (tables) still + // exist, but no operations can be performed on the cluster. + DISABLED = 4; + } + + // (`OutputOnly`) + // The unique name of the cluster. Values are of the form + // `projects//instances//clusters/[a-z][-a-z0-9]*`. + string name = 1; + + // (`CreationOnly`) + // The location where this cluster's nodes and storage reside. For best + // performance, clients should be located as close as possible to this + // cluster. Currently only zones are supported, so values should be of the + // form `projects//locations/`. + string location = 2; + + // (`OutputOnly`) + // The current state of the cluster. + State state = 3; + + // The number of nodes allocated to this cluster. More nodes enable higher + // throughput and more consistent performance. + int32 serve_nodes = 4; + + // (`CreationOnly`) + // The type of storage used by this cluster to serve its + // parent instance's tables, unless explicitly overridden. + StorageType default_storage_type = 5; +} + +// A configuration object describing how Cloud Bigtable should treat traffic +// from a particular end user application. +message AppProfile { + // Read/write requests may be routed to any cluster in the instance, and will + // fail over to another cluster in the event of transient errors or delays. + // Choosing this option sacrifices read-your-writes consistency to improve + // availability. + message MultiClusterRoutingUseAny { + + } + + // Unconditionally routes all read/write requests to a specific cluster. + // This option preserves read-your-writes consistency, but does not improve + // availability. + message SingleClusterRouting { + // The cluster to which read/write requests should be routed. + string cluster_id = 1; + + // Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are + // allowed by this app profile. It is unsafe to send these requests to + // the same table/row/column in multiple clusters. + bool allow_transactional_writes = 2; + } + + // (`OutputOnly`) + // The unique name of the app profile. Values are of the form + // `projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + string name = 1; + + // Strongly validated etag for optimistic concurrency control. Preserve the + // value returned from `GetAppProfile` when calling `UpdateAppProfile` to + // fail the request if there has been a modification in the mean time. The + // `update_mask` of the request need not include `etag` for this protection + // to apply. + // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and + // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more + // details. + string etag = 2; + + // Optional long form description of the use case for this AppProfile. + string description = 3; + + // The routing policy for all read/write requests which use this app profile. + // A value must be explicitly set. + oneof routing_policy { + // Use a multi-cluster routing policy that may pick any cluster. + MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; + + // Use a single-cluster routing policy. + SingleClusterRouting single_cluster_routing = 6; + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto new file mode 100644 index 000000000000..5d4374effc59 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto @@ -0,0 +1,221 @@ +// Copyright 2018 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.bigtable.admin.v2; + +import "google/api/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; +option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; +option java_multiple_files = true; +option java_outer_classname = "TableProto"; +option java_package = "com.google.bigtable.admin.v2"; +option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; + + +// A collection of user data indexed by row, column, and timestamp. +// Each table is served using the resources of its parent cluster. +message Table { + // The state of a table's data in a particular cluster. + message ClusterState { + // Table replication states. + enum ReplicationState { + // The replication state of the table is unknown in this cluster. + STATE_NOT_KNOWN = 0; + + // The cluster was recently created, and the table must finish copying + // over pre-existing data from other clusters before it can begin + // receiving live replication updates and serving Data API requests. + INITIALIZING = 1; + + // The table is temporarily unable to serve Data API requests from this + // cluster due to planned internal maintenance. + PLANNED_MAINTENANCE = 2; + + // The table is temporarily unable to serve Data API requests from this + // cluster due to unplanned or emergency maintenance. + UNPLANNED_MAINTENANCE = 3; + + // The table can serve Data API requests from this cluster. Depending on + // replication delay, reads may not immediately reflect the state of the + // table in other clusters. + READY = 4; + } + + // (`OutputOnly`) + // The state of replication for the table in this cluster. + ReplicationState replication_state = 1; + } + + // Possible timestamp granularities to use when keeping multiple versions + // of data in a table. + enum TimestampGranularity { + // The user did not specify a granularity. Should not be returned. + // When specified during table creation, MILLIS will be used. + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; + + // The table keeps data versioned at a granularity of 1ms. + MILLIS = 1; + } + + // Defines a view over a table's fields. + enum View { + // Uses the default view for each method as documented in its request. + VIEW_UNSPECIFIED = 0; + + // Only populates `name`. + NAME_ONLY = 1; + + // Only populates `name` and fields related to the table's schema. + SCHEMA_VIEW = 2; + + // Only populates `name` and fields related to the table's + // replication state. + REPLICATION_VIEW = 3; + + // Populates all fields. + FULL = 4; + } + + // (`OutputOnly`) + // The unique name of the table. Values are of the form + // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` + string name = 1; + + // (`OutputOnly`) + // Map from cluster ID to per-cluster table state. + // If it could not be determined whether or not the table has data in a + // particular cluster (for example, if its zone is unavailable), then + // there will be an entry for the cluster with UNKNOWN `replication_status`. + // Views: `REPLICATION_VIEW`, `FULL` + map cluster_states = 2; + + // (`CreationOnly`) + // The column families configured for this table, mapped by column family ID. + // Views: `SCHEMA_VIEW`, `FULL` + map column_families = 3; + + // (`CreationOnly`) + // The granularity (i.e. `MILLIS`) at which timestamps are stored in + // this table. Timestamps not matching the granularity will be rejected. + // If unspecified at creation time, the value will be set to `MILLIS`. + // Views: `SCHEMA_VIEW`, `FULL` + TimestampGranularity granularity = 4; +} + +// A set of columns within a table which share a common configuration. +message ColumnFamily { + // Garbage collection rule specified as a protobuf. + // Must serialize to at most 500 bytes. + // + // NOTE: Garbage collection executes opportunistically in the background, and + // so it's possible for reads to return a cell even if it matches the active + // GC expression for its family. + GcRule gc_rule = 1; +} + +// Rule for determining which cells to delete during garbage collection. +message GcRule { + // A GcRule which deletes cells matching all of the given rules. + message Intersection { + // Only delete cells which would be deleted by every element of `rules`. + repeated GcRule rules = 1; + } + + // A GcRule which deletes cells matching any of the given rules. + message Union { + // Delete cells which would be deleted by any element of `rules`. + repeated GcRule rules = 1; + } + + // Garbage collection rules. + oneof rule { + // Delete all cells in a column except the most recent N. + int32 max_num_versions = 1; + + // Delete cells in a column older than the given age. + // Values must be at least one millisecond, and will be truncated to + // microsecond granularity. + google.protobuf.Duration max_age = 2; + + // Delete cells that would be deleted by every nested rule. + Intersection intersection = 3; + + // Delete cells that would be deleted by any nested rule. + Union union = 4; + } +} + +// A snapshot of a table at a particular time. A snapshot can be used as a +// checkpoint for data restoration or a data source for a new table. +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +message Snapshot { + // Possible states of a snapshot. + enum State { + // The state of the snapshot could not be determined. + STATE_NOT_KNOWN = 0; + + // The snapshot has been successfully created and can serve all requests. + READY = 1; + + // The snapshot is currently being created, and may be destroyed if the + // creation process encounters an error. A snapshot may not be restored to a + // table while it is being created. + CREATING = 2; + } + + // (`OutputOnly`) + // The unique name of the snapshot. + // Values are of the form + // `projects//instances//clusters//snapshots/`. + string name = 1; + + // (`OutputOnly`) + // The source table at the time the snapshot was taken. + Table source_table = 2; + + // (`OutputOnly`) + // The size of the data in the source table at the time the snapshot was + // taken. In some cases, this value may be computed asynchronously via a + // background process and a placeholder of 0 will be used in the meantime. + int64 data_size_bytes = 3; + + // (`OutputOnly`) + // The time when the snapshot is created. + google.protobuf.Timestamp create_time = 4; + + // (`OutputOnly`) + // The time when the snapshot will be deleted. The maximum amount of time a + // snapshot can stay active is 365 days. If 'ttl' is not specified, + // the default maximum of 365 days will be used. + google.protobuf.Timestamp delete_time = 5; + + // (`OutputOnly`) + // The current state of the snapshot. + State state = 6; + + // (`OutputOnly`) + // Description of the snapshot. + string description = 7; +} diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 034dbbb17183..9c5555a83895 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-01-17T13:14:02.679846Z", + "updateTime": "2019-01-23T22:43:21.170639Z", "sources": [ { "generator": { "name": "artman", - "version": "0.16.6", - "dockerImage": "googleapis/artman@sha256:12722f2ca3fbc3b53cc6aa5f0e569d7d221b46bd876a2136497089dec5e3634e" + "version": "0.16.7", + "dockerImage": "googleapis/artman@sha256:d6c8ced606eb49973ca95d2af7c55a681acc042db0f87d135968349e7bf6dd80" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "0ac60e21a1aa86c07c1836865b35308ba8178b05", - "internalRef": "229626798" + "sha": "9aac88a22468b1e291937f55fa1ef237adfdc63e", + "internalRef": "230568136" } }, { diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index f1bec2c6a8e2..edffa43640dd 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -28,6 +28,7 @@ "v2", config_path="/google/bigtable/artman_bigtable.yaml", artman_output_name="bigtable-v2", + include_protos=True, ) s.move(library / "google/cloud/bigtable_v2") @@ -39,6 +40,7 @@ "v2", config_path="/google/bigtable/admin/artman_bigtableadmin.yaml", artman_output_name="bigtable-admin-v2", + include_protos=True, ) s.move(library / "google/cloud/bigtable_admin_v2") From 3efc67d1209e87869b027ef6376729b128948fb0 Mon Sep 17 00:00:00 2001 From: Johan Date: Mon, 4 Feb 2019 18:39:17 +0100 Subject: [PATCH 227/892] Fix typos in Table docstrings. (#7261) --- .../google-cloud-bigtable/google/cloud/bigtable/table.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 40dc25a5a7e3..0ec6bbcf25fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -403,7 +403,7 @@ def read_rows( considered inclusive. The default is False (exclusive). :type row_set: :class:`row_set.RowSet` - :param filter_: (Optional) The row set containing multiple row keys and + :param row_set: (Optional) The row set containing multiple row keys and row_ranges. :type retry: :class:`~google.api_core.retry.Retry` @@ -459,7 +459,7 @@ def yield_rows(self, **kwargs): each row. :type row_set: :class:`row_set.RowSet` - :param filter_: (Optional) The row set containing multiple row keys and + :param row_set: (Optional) The row set containing multiple row keys and row_ranges. :rtype: :class:`.PartialRowData` @@ -884,7 +884,7 @@ def _create_row_request( :param app_profile_id: (Optional) The unique name of the AppProfile. :type row_set: :class:`row_set.RowSet` - :param filter_: (Optional) The row set containing multiple row keys and + :param row_set: (Optional) The row set containing multiple row keys and row_ranges. :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` From cf62d89dd19b73f24a1a5ff98c727164ac29a3e1 Mon Sep 17 00:00:00 2001 From: Pravin Dahal Date: Mon, 11 Feb 2019 19:12:32 +0100 Subject: [PATCH 228/892] Updated client library documentation URLs. (#7307) Previously, the URLs would redirect using JavaScript, which would either be slow or not work at all (in case JavaScript is disabled on the browser) --- packages/google-cloud-bigtable/README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 8835e2a3bbcf..472e85855830 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -17,7 +17,7 @@ Analytics, Maps, and Gmail. .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg :target: https://pypi.org/project/google-cloud-bigtable/ .. _Google Cloud Bigtable: https://cloud.google.com/bigtable -.. _Client Library Documentation: https://googlecloudplatform.github.io/google-cloud-python/latest/bigtable/usage.html +.. _Client Library Documentation: https://googleapis.github.io/google-cloud-python/latest/bigtable/usage.html .. _Product Documentation: https://cloud.google.com/bigtable/docs Quick Start @@ -33,7 +33,7 @@ In order to use this library, you first need to go through the following steps: .. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project .. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project .. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable -.. _Setup Authentication.: https://googlecloudplatform.github.io/google-cloud-python/latest/core/auth.html +.. _Setup Authentication.: https://googleapis.github.io/google-cloud-python/latest/core/auth.html Installation ~~~~~~~~~~~~ From fd571d3d95479d489100edea05acc3d3e7a42ef8 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Thu, 14 Feb 2019 15:32:53 -0500 Subject: [PATCH 229/892] Compare 0 using '!=', rather than 'is not'. (#7312) Per flake8 F632. --- packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 1f22af4a534c..120e2bb05b48 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -144,7 +144,7 @@ def mutate_rows(self, rows): def flush(self): """ Sends the current. batch to Cloud Bigtable. """ - if len(self.rows) is not 0: + if len(self.rows) != 0: self.table.mutate_rows(self.rows) self.total_mutation_count = 0 self.total_size = 0 From 769977e9072fefda119063b4a7a5424204d6f693 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot <44816363+yoshi-automation@users.noreply.github.com> Date: Tue, 19 Feb 2019 12:45:55 -0800 Subject: [PATCH 230/892] Remove unused message exports (via synth). (#7264) --- .../google/cloud/bigtable_admin_v2/types.py | 6 ------ .../google/cloud/bigtable_v2/types.py | 4 +--- packages/google-cloud-bigtable/synth.metadata | 10 +++++----- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index 137785b7107c..4dd4bc032564 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -19,17 +19,14 @@ from google.api_core.protobuf_helpers import get_messages -from google.api import http_pb2 from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 -from google.iam.v1.logging import audit_data_pb2 from google.longrunning import operations_pb2 from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 from google.protobuf import duration_pb2 from google.protobuf import empty_pb2 from google.protobuf import field_mask_pb2 @@ -37,13 +34,10 @@ from google.rpc import status_pb2 _shared_modules = [ - http_pb2, iam_policy_pb2, policy_pb2, - audit_data_pb2, operations_pb2, any_pb2, - descriptor_pb2, duration_pb2, empty_pb2, field_mask_pb2, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index 1baf5910987c..30793e44021f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -19,15 +19,13 @@ from google.api_core.protobuf_helpers import get_messages -from google.api import http_pb2 from google.cloud.bigtable_v2.proto import bigtable_pb2 from google.cloud.bigtable_v2.proto import data_pb2 from google.protobuf import any_pb2 -from google.protobuf import descriptor_pb2 from google.protobuf import wrappers_pb2 from google.rpc import status_pb2 -_shared_modules = [http_pb2, any_pb2, descriptor_pb2, wrappers_pb2, status_pb2] +_shared_modules = [any_pb2, wrappers_pb2, status_pb2] _local_modules = [bigtable_pb2, data_pb2] diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 9c5555a83895..63417df40e73 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-01-23T22:43:21.170639Z", + "updateTime": "2019-02-02T13:13:43.962780Z", "sources": [ { "generator": { "name": "artman", - "version": "0.16.7", - "dockerImage": "googleapis/artman@sha256:d6c8ced606eb49973ca95d2af7c55a681acc042db0f87d135968349e7bf6dd80" + "version": "0.16.8", + "dockerImage": "googleapis/artman@sha256:75bc07ef34a1de9895c18af54dc503ed3b3f3b52e85062e3360a979d2a0741e7" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "9aac88a22468b1e291937f55fa1ef237adfdc63e", - "internalRef": "230568136" + "sha": "bce093dab3e65c40eb9a37efbdc960f34df6037a", + "internalRef": "231974277" } }, { From 604878df110fef9086b73a8955c0082a64dab96a Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 20 Feb 2019 13:22:28 -0500 Subject: [PATCH 231/892] Bigtable: improve 'Policy' interchange w/ JSON, gRPC payloads. (#7378) Add 'Policy.from_pb' factory and 'Policy.to_pb' method. Use them for interchange with the 'google.iam.v1.policy_pb2.Policy' messages. Override 'Policy.{from_api_repr,to_api_repr}', converting 'etag' attr to / from bytes to maintain JSON compatibility. Closes #7369. --- .../google/cloud/bigtable/instance.py | 21 +--- .../google/cloud/bigtable/policy.py | 76 ++++++++++++ .../tests/unit/test_instance.py | 5 +- .../tests/unit/test_policy.py | 113 ++++++++++++++++++ 4 files changed, 194 insertions(+), 21 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 47c8d5f539da..3c5e144ed64a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -414,7 +414,7 @@ def get_iam_policy(self): """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.get_iam_policy(resource=self.name) - return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) + return Policy.from_pb(resp) def set_iam_policy(self, policy): """Sets the access control policy on an instance resource. Replaces any @@ -438,9 +438,9 @@ class `google.cloud.bigtable.policy.Policy` """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.set_iam_policy( - resource=self.name, policy=policy.to_api_repr() + resource=self.name, policy=policy.to_pb() ) - return Policy.from_api_repr(self._to_dict_from_policy_pb(resp)) + return Policy.from_pb(resp) def test_iam_permissions(self, permissions): """Returns permissions that the caller has on the specified instance @@ -470,21 +470,6 @@ def test_iam_permissions(self, permissions): ) return list(resp.permissions) - def _to_dict_from_policy_pb(self, policy): - """Returns a dictionary representation of resource returned from - the getIamPolicy API to use as parameter for - :meth: google.api_core.iam.Policy.from_api_repr - """ - pb_dict = {} - bindings = [ - {"role": binding.role, "members": binding.members} - for binding in policy.bindings - ] - pb_dict["etag"] = policy.etag - pb_dict["version"] = policy.version - pb_dict["bindings"] = bindings - return pb_dict - def cluster( self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None ): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index 039cea9f3e9e..78c8e3a414b1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -12,8 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import base64 + from google.api_core.iam import Policy as BasePolicy from google.cloud._helpers import _to_bytes +from google.iam.v1 import policy_pb2 """IAM roles supported by Bigtable Instance resource""" BIGTABLE_ADMIN_ROLE = "roles/bigtable.admin" @@ -107,3 +110,76 @@ def bigtable_viewers(self): for member in self._bindings.get(BIGTABLE_VIEWER_ROLE, ()): result.add(member) return frozenset(result) + + @classmethod + def from_pb(cls, policy_pb): + """Factory: create a policy from a protobuf message. + + Args: + policy_pb (google.iam.policy_pb2.Policy): message returned by + ``get_iam_policy`` gRPC API. + + Returns: + :class:`Policy`: the parsed policy + """ + policy = cls(policy_pb.etag, policy_pb.version) + + for binding in policy_pb.bindings: + policy[binding.role] = sorted(binding.members) + + return policy + + def to_pb(self): + """Render a protobuf message. + + Returns: + google.iam.policy_pb2.Policy: a message to be passed to the + ``set_iam_policy`` gRPC API. + """ + + return policy_pb2.Policy( + etag=self.etag, + version=self.version or 0, + bindings=[ + policy_pb2.Binding(role=role, members=sorted(self[role])) + for role in self + ], + ) + + @classmethod + def from_api_repr(cls, resource): + """Factory: create a policy from a JSON resource. + + Overrides the base class version to store :attr:`etag` as bytes. + + Args: + resource (dict): JSON policy resource returned by the + ``getIamPolicy`` REST API. + + Returns: + :class:`Policy`: the parsed policy + """ + etag = resource.get("etag") + + if etag is not None: + resource = resource.copy() + resource["etag"] = base64.b64decode(etag.encode("ascii")) + + return super(Policy, cls).from_api_repr(resource) + + def to_api_repr(self): + """Render a JSON policy resource. + + Overrides the base class version to convert :attr:`etag` from bytes + to JSON-compatible base64-encoded text. + + Returns: + dict: a JSON resource to be passed to the + ``setIamPolicy`` REST API. + """ + resource = super(Policy, self).to_api_repr() + + if self.etag is not None: + resource["etag"] = base64.b64encode(self.etag).decode("ascii") + + return resource diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 6ac1d242c626..5092652a1fce 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -641,7 +641,7 @@ def test_set_iam_policy(self): version = 1 etag = b"etag_v1" members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. @@ -661,8 +661,7 @@ def test_set_iam_policy(self): result = instance.set_iam_policy(iam_policy) instance_api.set_iam_policy.assert_called_once_with( - resource=instance.name, - policy={"version": version, "etag": etag, "bindings": bindings}, + resource=instance.name, policy=iam_policy_pb ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py index 49eb015e078e..74b19e49b29a 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_policy.py +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -86,3 +86,116 @@ def test_bigtable_viewers_getter(self): policy = self._make_one() policy[BIGTABLE_VIEWER_ROLE] = [MEMBER] self.assertEqual(policy.bigtable_viewers, expected) + + def test_from_pb_empty(self): + from google.iam.v1 import policy_pb2 + + empty = frozenset() + message = policy_pb2.Policy() + klass = self._get_target_class() + policy = klass.from_pb(message) + self.assertEqual(policy.etag, b"") + self.assertEqual(policy.version, 0) + self.assertEqual(policy.bigtable_admins, empty) + self.assertEqual(policy.bigtable_readers, empty) + self.assertEqual(policy.bigtable_users, empty) + self.assertEqual(policy.bigtable_viewers, empty) + self.assertEqual(len(policy), 0) + self.assertEqual(dict(policy), {}) + + def test_from_pb_non_empty(self): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + ETAG = b"ETAG" + VERSION = 17 + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + empty = frozenset() + message = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=[{"role": BIGTABLE_ADMIN_ROLE, "members": members}], + ) + klass = self._get_target_class() + policy = klass.from_pb(message) + self.assertEqual(policy.etag, ETAG) + self.assertEqual(policy.version, VERSION) + self.assertEqual(policy.bigtable_admins, set(members)) + self.assertEqual(policy.bigtable_readers, empty) + self.assertEqual(policy.bigtable_users, empty) + self.assertEqual(policy.bigtable_viewers, empty) + self.assertEqual(len(policy), 1) + self.assertEqual(dict(policy), {BIGTABLE_ADMIN_ROLE: set(members)}) + + def test_to_pb_empty(self): + from google.iam.v1 import policy_pb2 + + policy = self._make_one() + expected = policy_pb2.Policy() + + self.assertEqual(policy.to_pb(), expected) + + def test_to_pb_explicit(self): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + VERSION = 17 + ETAG = b"ETAG" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + policy = self._make_one(ETAG, VERSION) + policy[BIGTABLE_ADMIN_ROLE] = members + expected = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=[ + policy_pb2.Binding(role=BIGTABLE_ADMIN_ROLE, members=sorted(members)) + ], + ) + + self.assertEqual(policy.to_pb(), expected) + + def test_from_api_repr_wo_etag(self): + VERSION = 17 + empty = frozenset() + resource = {"version": VERSION} + klass = self._get_target_class() + policy = klass.from_api_repr(resource) + self.assertIsNone(policy.etag) + self.assertEqual(policy.version, VERSION) + self.assertEqual(policy.bigtable_admins, empty) + self.assertEqual(policy.bigtable_readers, empty) + self.assertEqual(policy.bigtable_users, empty) + self.assertEqual(policy.bigtable_viewers, empty) + self.assertEqual(len(policy), 0) + self.assertEqual(dict(policy), {}) + + def test_from_api_repr_w_etag(self): + import base64 + + ETAG = b"ETAG" + empty = frozenset() + resource = {"etag": base64.b64encode(ETAG).decode("ascii")} + klass = self._get_target_class() + policy = klass.from_api_repr(resource) + self.assertEqual(policy.etag, ETAG) + self.assertIsNone(policy.version) + self.assertEqual(policy.bigtable_admins, empty) + self.assertEqual(policy.bigtable_readers, empty) + self.assertEqual(policy.bigtable_users, empty) + self.assertEqual(policy.bigtable_viewers, empty) + self.assertEqual(len(policy), 0) + self.assertEqual(dict(policy), {}) + + def test_to_api_repr_wo_etag(self): + VERSION = 17 + resource = {"version": VERSION} + policy = self._make_one(version=VERSION) + self.assertEqual(policy.to_api_repr(), resource) + + def test_to_api_repr_w_etag(self): + import base64 + + ETAG = b"ETAG" + policy = self._make_one(etag=ETAG) + resource = {"etag": base64.b64encode(ETAG).decode("ascii")} + self.assertEqual(policy.to_api_repr(), resource) From df8956ff7ac1070be55e931de2adcaffce2586e3 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot <44816363+yoshi-automation@users.noreply.github.com> Date: Tue, 26 Feb 2019 09:03:02 -0800 Subject: [PATCH 232/892] Copy lintified proto files (via synth). (#7445) --- .../proto/bigtable_instance_admin.proto | 33 +++++++++++-------- .../proto/bigtable_table_admin.proto | 13 +++++--- .../bigtable_admin_v2/proto/common.proto | 1 - .../bigtable_admin_v2/proto/instance.proto | 5 +-- .../cloud/bigtable_admin_v2/proto/table.proto | 1 - .../cloud/bigtable_v2/proto/bigtable.proto | 14 ++++---- .../google/cloud/bigtable_v2/proto/data.proto | 5 +-- packages/google-cloud-bigtable/synth.metadata | 10 +++--- 8 files changed, 41 insertions(+), 41 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto index ec992ea0f818..80ce42470736 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -33,13 +33,13 @@ option java_outer_classname = "BigtableInstanceAdminProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - // Service for creating, configuring, and deleting Cloud Bigtable Instances and // Clusters. Provides access to the Instance and Cluster schemas only, not the // tables' metadata or data stored in those tables. service BigtableInstanceAdmin { // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { + rpc CreateInstance(CreateInstanceRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" @@ -69,7 +69,8 @@ service BigtableInstanceAdmin { } // Partially updates an instance within a project. - rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { + rpc PartialUpdateInstance(PartialUpdateInstanceRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v2/{instance.name=projects/*/instances/*}" body: "instance" @@ -84,7 +85,8 @@ service BigtableInstanceAdmin { } // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { + rpc CreateCluster(CreateClusterRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" @@ -136,14 +138,16 @@ service BigtableInstanceAdmin { } // Lists information about app profiles in an instance. - rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { + rpc ListAppProfiles(ListAppProfilesRequest) + returns (ListAppProfilesResponse) { option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/appProfiles" }; } // Updates an app profile within an instance. - rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { + rpc UpdateAppProfile(UpdateAppProfileRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" body: "app_profile" @@ -151,7 +155,8 @@ service BigtableInstanceAdmin { } // Deletes an app profile from an instance. - rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { + rpc DeleteAppProfile(DeleteAppProfileRequest) + returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" }; @@ -159,7 +164,8 @@ service BigtableInstanceAdmin { // Gets the access control policy for an instance resource. Returns an empty // policy if an instance exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" body: "*" @@ -168,7 +174,8 @@ service BigtableInstanceAdmin { // Sets the access control policy on an instance resource. Replaces any // existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" body: "*" @@ -176,7 +183,8 @@ service BigtableInstanceAdmin { } // Returns permissions that the caller has on the specified instance resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" body: "*" @@ -439,7 +447,6 @@ message UpdateAppProfileRequest { bool ignore_warnings = 3; } - // Request message for BigtableInstanceAdmin.DeleteAppProfile. message DeleteAppProfileRequest { // The unique name of the app profile to be deleted. Values are of the form @@ -451,6 +458,4 @@ message DeleteAppProfileRequest { } // The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata { - -} +message UpdateAppProfileMetadata {} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index 2d5bddf302aa..79c461e05e63 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -31,7 +31,6 @@ option java_outer_classname = "BigtableTableAdminProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - // Service for creating, configuring, and deleting Cloud Bigtable tables. // // @@ -56,7 +55,8 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { + rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" body: "*" @@ -109,7 +109,8 @@ service BigtableTableAdmin { // CheckConsistency to check whether mutations to the table that finished // before this call started have been replicated. The tokens will be available // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { + rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) + returns (GenerateConsistencyTokenResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" body: "*" @@ -119,7 +120,8 @@ service BigtableTableAdmin { // Checks replication consistency based on a consistency token, that is, if // replication has caught up based on the conditions specified in the token // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { + rpc CheckConsistency(CheckConsistencyRequest) + returns (CheckConsistencyResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" body: "*" @@ -134,7 +136,8 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { + rpc SnapshotTable(SnapshotTableRequest) + returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" body: "*" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto index 0ece12780eb9..ad4d735994f3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto @@ -27,7 +27,6 @@ option java_outer_classname = "CommonProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - // Storage media types for persisting Bigtable data. enum StorageType { // The user did not specify a storage type. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto index bb69b1f66d42..ef8599bfe349 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto @@ -27,7 +27,6 @@ option java_outer_classname = "InstanceProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - // A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and // the resources that serve them. // All tables in an instance are served from a single @@ -161,9 +160,7 @@ message AppProfile { // fail over to another cluster in the event of transient errors or delays. // Choosing this option sacrifices read-your-writes consistency to improve // availability. - message MultiClusterRoutingUseAny { - - } + message MultiClusterRoutingUseAny {} // Unconditionally routes all read/write requests to a specific cluster. // This option preserves read-your-writes consistency, but does not improve diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto index 5d4374effc59..5019d8b86448 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto @@ -28,7 +28,6 @@ option java_outer_classname = "TableProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - // A collection of user data indexed by row, column, and timestamp. // Each table is served using the resources of its parent cluster. message Table { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto index d800c2c97ab8..0ab763ba6e62 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto @@ -28,7 +28,6 @@ option java_outer_classname = "BigtableProto"; option java_package = "com.google.bigtable.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\V2"; - // Service for reading from and writing to existing Bigtable tables. service Bigtable { // Streams back the contents of all requested rows in key order, optionally @@ -47,7 +46,8 @@ service Bigtable { // delimit contiguous sections of the table of approximately equal size, // which can be used to break up the data for distributed tasks like // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { + rpc SampleRowKeys(SampleRowKeysRequest) + returns (stream SampleRowKeysResponse) { option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" }; @@ -73,7 +73,8 @@ service Bigtable { } // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { + rpc CheckAndMutateRow(CheckAndMutateRowRequest) + returns (CheckAndMutateRowResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" @@ -85,7 +86,8 @@ service Bigtable { // entry based on pre-defined read/modify/write rules. The new value for the // timestamp is the greater of the existing timestamp or the current server // time. The method returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) + returns (ReadModifyWriteRowResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" @@ -244,9 +246,7 @@ message MutateRowRequest { } // Response message for Bigtable.MutateRow. -message MutateRowResponse { - -} +message MutateRowResponse {} // Request message for BigtableService.MutateRows. message MutateRowsRequest { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto index 7400197e7b17..d0aab0b63f12 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto @@ -23,7 +23,6 @@ option java_outer_classname = "DataProto"; option java_package = "com.google.bigtable.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\V2"; - // Specifies the complete (requested) contents of a single row of a table. // Rows which exceed 256MiB in size cannot be read in full. message Row { @@ -486,9 +485,7 @@ message Mutation { } // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } + message DeleteFromRow {} // Which of the possible Mutation types to apply. oneof mutation { diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 63417df40e73..cf600d26bf23 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-02-02T13:13:43.962780Z", + "updateTime": "2019-02-26T13:12:12.116139Z", "sources": [ { "generator": { "name": "artman", - "version": "0.16.8", - "dockerImage": "googleapis/artman@sha256:75bc07ef34a1de9895c18af54dc503ed3b3f3b52e85062e3360a979d2a0741e7" + "version": "0.16.14", + "dockerImage": "googleapis/artman@sha256:f3d61ae45abaeefb6be5f228cda22732c2f1b00fb687c79c4bd4f2c42bb1e1a7" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "bce093dab3e65c40eb9a37efbdc960f34df6037a", - "internalRef": "231974277" + "sha": "29f098cb03a9983cc9cb15993de5da64419046f2", + "internalRef": "235621085" } }, { From c033a966b5576c4c63c179dfd1454cdd0f984943 Mon Sep 17 00:00:00 2001 From: juan-rael <44061037+juan-rael@users.noreply.github.com> Date: Wed, 27 Feb 2019 14:38:16 -0500 Subject: [PATCH 233/892] Add 'Table.mutation_timeout', allowing override of config timeouts. (#7424) --- .../google/cloud/bigtable/batcher.py | 4 +- .../google/cloud/bigtable/instance.py | 9 +++- .../google/cloud/bigtable/table.py | 18 +++++-- .../tests/unit/test_table.py | 53 ------------------- 4 files changed, 24 insertions(+), 60 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 120e2bb05b48..6afd43f90393 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -56,7 +56,9 @@ class MutationsBatcher(object): (5 MB). """ - def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + def __init__( + self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES + ): self.rows = [] self.total_mutation_count = 0 self.total_size = 0 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 3c5e144ed64a..32d3666136a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -538,7 +538,7 @@ def list_clusters(self): clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] return clusters, resp.failed_locations - def table(self, table_id, app_profile_id=None): + def table(self, table_id, mutation_timeout=None, app_profile_id=None): """Factory to create a table associated with this instance. For example: @@ -556,7 +556,12 @@ def table(self, table_id, app_profile_id=None): :rtype: :class:`Table ` :returns: The table owned by this instance. """ - return Table(table_id, self, app_profile_id=app_profile_id) + return Table( + table_id, + self, + app_profile_id=app_profile_id, + mutation_timeout=mutation_timeout, + ) def list_tables(self): """List the tables in this instance. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 0ec6bbcf25fc..a422d335410d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -17,6 +17,7 @@ from grpc import StatusCode +from google.api_core import timeout from google.api_core.exceptions import RetryError from google.api_core.exceptions import NotFound from google.api_core.retry import if_exception_type @@ -100,10 +101,11 @@ class Table(object): :param app_profile_id: (Optional) The unique name of the AppProfile. """ - def __init__(self, table_id, instance, app_profile_id=None): + def __init__(self, table_id, instance, mutation_timeout=None, app_profile_id=None): self.table_id = table_id self._instance = instance self._app_profile_id = app_profile_id + self.mutation_timeout = mutation_timeout @property def name(self): @@ -503,7 +505,11 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY): sent. These will be in the same order as the `rows`. """ retryable_mutate_rows = _RetryableMutateRowsWorker( - self._instance._client, self.name, rows, app_profile_id=self._app_profile_id + self._instance._client, + self.name, + rows, + app_profile_id=self._app_profile_id, + timeout=self.mutation_timeout, ) return retryable_mutate_rows(retry=retry) @@ -658,12 +664,13 @@ class _RetryableMutateRowsWorker(object): ) # pylint: enable=unsubscriptable-object - def __init__(self, client, table_name, rows, app_profile_id=None): + def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None): self.client = client self.table_name = table_name self.rows = rows self.app_profile_id = app_profile_id self.responses_statuses = [None] * len(self.rows) + self.timeout = timeout def __call__(self, retry=DEFAULT_RETRY): """Attempt to mutate all rows and retry rows with transient errors. @@ -729,7 +736,10 @@ def _do_mutate_retryable_rows(self): inner_api_calls = data_client._inner_api_calls if "mutate_rows" not in inner_api_calls: default_retry = (data_client._method_configs["MutateRows"].retry,) - default_timeout = data_client._method_configs["MutateRows"].timeout + if self.timeout is None: + default_timeout = data_client._method_configs["MutateRows"].timeout + else: + default_timeout = timeout.ExponentialTimeout(deadline=self.timeout) data_client._inner_api_calls["mutate_rows"] = wrap_method( data_client.transport.mutate_rows, default_retry=default_retry, diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 5e737c872144..42933da373b5 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1205,59 +1205,6 @@ def test_callable_retry(self): ) self.assertEqual(result, expected_result) - def test_callable_retry_timeout(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - # Setup: - # - Mutate 2 rows. - # Action: - # - Initial attempt will mutate all 2 rows. - # Expectation: - # - Both rows always return retryable errors. - # - google.api_core.Retry should keep retrying. - # - Check MutateRows is called multiple times. - # - By the time deadline is reached, statuses should be - # [retryable, retryable] - - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - - response = self._make_responses([self.RETRYABLE_1, self.RETRYABLE_1]) - - # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(return_value=[response]) - - retry = DEFAULT_RETRY.with_delay( - initial=0.1, maximum=0.2, multiplier=2.0 - ).with_deadline(0.5) - worker = self._make_worker(client, table.name, [row_1, row_2]) - statuses = worker(retry=retry) - - result = [status.code for status in statuses] - expected_result = [self.RETRYABLE_1, self.RETRYABLE_1] - - self.assertTrue( - client._table_data_client._inner_api_calls["mutate_rows"].call_count > 1 - ) - self.assertEqual(result, expected_result) - def test_do_mutate_retryable_rows_empty_rows(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client From 81eb997f76eaa42947eafa25bebb9a3d9481db24 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot <44816363+yoshi-automation@users.noreply.github.com> Date: Fri, 1 Mar 2019 14:09:53 -0800 Subject: [PATCH 234/892] Re-blacken. (#7462) --- .../google/cloud/bigtable/batcher.py | 4 +--- packages/google-cloud-bigtable/synth.metadata | 8 ++++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 6afd43f90393..120e2bb05b48 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -56,9 +56,7 @@ class MutationsBatcher(object): (5 MB). """ - def __init__( - self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES - ): + def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): self.rows = [] self.total_mutation_count = 0 self.total_size = 0 diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index cf600d26bf23..f5ea3da99483 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2019-02-26T13:12:12.116139Z", + "updateTime": "2019-02-28T13:11:55.008453Z", "sources": [ { "generator": { @@ -12,15 +12,15 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "29f098cb03a9983cc9cb15993de5da64419046f2", - "internalRef": "235621085" + "sha": "9c769d3a0e67e4df9b9e8eee480124c2700a7e6c", + "internalRef": "235997788" } }, { "template": { "name": "python_library", "origin": "synthtool.gcp", - "version": "2019.1.16" + "version": "2019.2.26" } } ], From e715e649242da577f9636f4a98d6e22320e2763f Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Fri, 8 Mar 2019 13:10:06 -0500 Subject: [PATCH 235/892] Bigtable: Run instance_admin system tests on a separate instance from table_admin and data system tests. (#6579) * Add separate instance for table admin and data tests: * instance_data to to type dev --- .../google-cloud-bigtable/tests/system.py | 44 ++++++++++++------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 4147c9fb32ef..3631cf17e14a 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -41,8 +41,10 @@ LOCATION_ID = "us-central1-c" INSTANCE_ID = "g-c-p" + unique_resource_id("-") +INSTANCE_ID_DATA = "g-c-p-d" + unique_resource_id("-") TABLE_ID = "google-cloud-python-test-table" CLUSTER_ID = INSTANCE_ID + "-cluster" +CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" SERVE_NODES = 3 COLUMN_FAMILY_ID1 = u"col-fam-id1" COLUMN_FAMILY_ID2 = u"col-fam-id2" @@ -74,7 +76,9 @@ class Config(object): CLIENT = None INSTANCE = None + INSTANCE_DATA = None CLUSTER = None + CLUSTER_DATA = None IN_EMULATOR = False @@ -90,6 +94,7 @@ def _retry_on_unavailable(exc): def setUpModule(): from google.cloud.exceptions import GrpcRendezvous + from google.cloud.bigtable.enums import Instance Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None @@ -103,6 +108,12 @@ def setUpModule(): Config.CLUSTER = Config.INSTANCE.cluster( CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES ) + Config.INSTANCE_DATA = Config.CLIENT.instance( + INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS + ) + Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( + CLUSTER_ID_DATA, location_id=LOCATION_ID + ) if not Config.IN_EMULATOR: retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) @@ -113,14 +124,17 @@ def setUpModule(): EXISTING_INSTANCES[:] = instances - # After listing, create the test instance. + # After listing, create the test instances. created_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) created_op.result(timeout=10) + created_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) + created_op.result(timeout=10) def tearDownModule(): if not Config.IN_EMULATOR: retry_429(Config.INSTANCE.delete)() + retry_429(Config.INSTANCE_DATA.delete)() class TestInstanceAdminAPI(unittest.TestCase): @@ -617,7 +631,7 @@ def _test_delete_app_profile_helper(self, app_profile_id, instance): class TestTableAdminAPI(unittest.TestCase): @classmethod def setUpClass(cls): - cls._table = Config.INSTANCE.table(TABLE_ID) + cls._table = Config.INSTANCE_DATA.table(TABLE_ID) cls._table.create() @classmethod @@ -632,16 +646,16 @@ def tearDown(self): table.delete() def test_list_tables(self): - # Since `Config.INSTANCE` is newly created in `setUpModule`, the table - # created in `setUpClass` here will be the only one. - tables = Config.INSTANCE.list_tables() + # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the + # table created in `setUpClass` here will be the only one. + tables = Config.INSTANCE_DATA.list_tables() self.assertEqual(tables, [self._table]) def test_exists(self): retry_until_true = RetryResult(lambda result: result) retry_until_false = RetryResult(lambda result: not result) temp_table_id = "test-table_existence" - temp_table = Config.INSTANCE.table(temp_table_id) + temp_table = Config.INSTANCE_DATA.table(temp_table_id) self.assertFalse(temp_table.exists()) temp_table.create() self.assertTrue(retry_until_true(temp_table.exists)()) @@ -650,7 +664,7 @@ def test_exists(self): def test_create_table(self): temp_table_id = "test-create-table" - temp_table = Config.INSTANCE.table(temp_table_id) + temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -660,13 +674,13 @@ def test_create_table(self): # Then query for the tables in the instance and sort them by # name as well. - tables = Config.INSTANCE.list_tables() + tables = Config.INSTANCE_DATA.list_tables() sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) def test_create_table_with_families(self): temp_table_id = "test-create-table-with-failies" - temp_table = Config.INSTANCE.table(temp_table_id) + temp_table = Config.INSTANCE_DATA.table(temp_table_id) gc_rule = MaxVersionsGCRule(1) temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) self.tables_to_delete.append(temp_table) @@ -682,7 +696,7 @@ def test_create_table_with_families(self): def test_create_table_with_split_keys(self): temp_table_id = "foo-bar-baz-split-table" initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] - temp_table = Config.INSTANCE.table(temp_table_id) + temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create(initial_split_keys=initial_split_keys) self.tables_to_delete.append(temp_table) @@ -697,7 +711,7 @@ def test_create_table_with_split_keys(self): def test_create_column_family(self): temp_table_id = "test-create-column-family" - temp_table = Config.INSTANCE.table(temp_table_id) + temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -718,7 +732,7 @@ def test_create_column_family(self): def test_update_column_family(self): temp_table_id = "test-update-column-family" - temp_table = Config.INSTANCE.table(temp_table_id) + temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -740,7 +754,7 @@ def test_update_column_family(self): def test_delete_column_family(self): temp_table_id = "test-delete-column-family" - temp_table = Config.INSTANCE.table(temp_table_id) + temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() self.tables_to_delete.append(temp_table) @@ -760,7 +774,7 @@ def test_delete_column_family(self): class TestDataAPI(unittest.TestCase): @classmethod def setUpClass(cls): - cls._table = table = Config.INSTANCE.table("test-data-api") + cls._table = table = Config.INSTANCE_DATA.table("test-data-api") table.create() table.column_family(COLUMN_FAMILY_ID1).create() table.column_family(COLUMN_FAMILY_ID2).create() @@ -1071,6 +1085,6 @@ def test_read_with_label_applied(self): def test_access_with_non_admin_client(self): client = Client(admin=False) - instance = client.instance(INSTANCE_ID) + instance = client.instance(INSTANCE_ID_DATA) table = instance.table(self._table.table_id) self.assertIsNone(table.read_row("nonesuch")) From 49207329e5500729a91580158b83fb8fc3fbc2fe Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Sat, 9 Mar 2019 02:03:05 +0530 Subject: [PATCH 236/892] Bigtable Row Set snippets (#7016) * add conditional row snippets * add snippets * Add row set snippets * remove unwanted changes in snippets.py * Rectify snippets file name and tags * blacken --- .../docs/snippets_table.py | 85 ++++++++++++++++++- .../google/cloud/bigtable/row_set.py | 18 ++++ 2 files changed, 102 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 78bbc1ffc13f..589b473462ee 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -42,7 +42,6 @@ INSTANCE_ID = "snippet-" + unique_resource_id("-") CLUSTER_ID = "clus-1-" + unique_resource_id("-") TABLE_ID = "tabl-1-" + unique_resource_id("-") -COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id("-") LOCATION_ID = "us-central1-f" ALT_LOCATION_ID = "us-central1-a" PRODUCTION = enums.Instance.Type.PRODUCTION @@ -55,8 +54,14 @@ .strftime("%Y-%m-%dt%H-%M-%S") ) LABELS = {LABEL_KEY: str(LABEL_STAMP)} +COLUMN_FAMILY_ID = "col_fam_id1" COL_NAME1 = b"col-name1" CELL_VAL1 = b"cell-val" +ROW_KEY1 = b"row_key_id1" +COLUMN_FAMILY_ID2 = "col_fam_id2" +COL_NAME2 = b"col-name2" +CELL_VAL2 = b"cell-val2" +ROW_KEY2 = b"row_key_id2" class Config(object): @@ -90,6 +95,9 @@ def setup_module(): gc_rule = column_family.MaxVersionsGCRule(2) column_family1 = Config.TABLE.column_family(COLUMN_FAMILY_ID, gc_rule=gc_rule) column_family1.create() + gc_rule2 = column_family.MaxVersionsGCRule(4) + column_family2 = Config.TABLE.column_family(COLUMN_FAMILY_ID2, gc_rule=gc_rule2) + column_family2.create() def teardown_module(): @@ -403,5 +411,80 @@ def test_bigtable_table_row(): table.truncate(timeout=300) +def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_6", + b"row_key_7", + b"row_key_8", + b"row_key_9", + ] + + rows = [] + for row_key in row_keys: + row = Config.TABLE.row(row_key) + row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + rows.append(row) + Config.TABLE.mutate_rows(rows) + + # [START bigtable_add_row_key] + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_set = RowSet() + row_set.add_row_key(b"row_key_5") + # [END bigtable_add_row_key] + + read_rows = table.read_rows(row_set=row_set) + expected_row_keys = [b"row_key_5"] + found_row_keys = [row.row_key for row in read_rows] + assert found_row_keys == expected_row_keys + + # [START bigtable_add_row_range] + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_set = RowSet() + row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) + # [END bigtable_add_row_range] + + read_rows = table.read_rows(row_set=row_set) + expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"] + found_row_keys = [row.row_key for row in read_rows] + assert found_row_keys == expected_row_keys + + # [START bigtable_row_range_from_keys] + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_set = RowSet() + row_set.add_row_range_from_keys(start_key=b"row_key_3", end_key=b"row_key_7") + # [END bigtable_row_range_from_keys] + + read_rows = table.read_rows(row_set=row_set) + expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"] + found_row_keys = [row.row_key for row in read_rows] + assert found_row_keys == expected_row_keys + + table.truncate(timeout=200) + + if __name__ == "__main__": pytest.main() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 454194a77a1c..0cb6443b05eb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -53,6 +53,12 @@ def __ne__(self, other): def add_row_key(self, row_key): """Add row key to row_keys list. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_add_row_key] + :end-before: [END bigtable_add_row_key] + :type row_key: bytes :param row_key: The key of a row to read """ @@ -61,6 +67,12 @@ def add_row_key(self, row_key): def add_row_range(self, row_range): """Add row_range to row_ranges list. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_add_row_range] + :end-before: [END bigtable_add_row_range] + :type row_range: class:`RowRange` :param row_range: The row range object having start and end key """ @@ -71,6 +83,12 @@ def add_row_range_from_keys( ): """Add row range to row_ranges list from the row keys + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_range_from_keys] + :end-before: [END bigtable_row_range_from_keys] + :type start_key: bytes :param start_key: (Optional) Start key of the row range. If left empty, will be interpreted as the empty string. From c64739c110ea1972d82e5b17c8e71b17643de9d2 Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Tue, 12 Mar 2019 21:51:39 +0530 Subject: [PATCH 237/892] Bigtable column family snippets (#7014) * add conditional row snippets --- .../docs/snippets_table.py | 215 +++++++++++++++++- .../google/cloud/bigtable/column_family.py | 58 ++++- 2 files changed, 269 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 589b473462ee..4126e3d78d1a 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -41,7 +41,8 @@ INSTANCE_ID = "snippet-" + unique_resource_id("-") CLUSTER_ID = "clus-1-" + unique_resource_id("-") -TABLE_ID = "tabl-1-" + unique_resource_id("-") +TABLE_ID = "tabl-1" + unique_resource_id("-") +COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id("-") LOCATION_ID = "us-central1-f" ALT_LOCATION_ID = "us-central1-a" PRODUCTION = enums.Instance.Type.PRODUCTION @@ -411,6 +412,82 @@ def test_bigtable_table_row(): table.truncate(timeout=300) +def test_bigtable_column_family_name(): + # [START bigtable_column_family_name] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + column_families = table.list_column_families() + column_family_obj = column_families[COLUMN_FAMILY_ID] + column_family_name = column_family_obj.name + # [END bigtable_column_family_name] + import re + + _cf_name_re = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[^/]+)/tables/" + r"(?P
[^/]+)/columnFamilies/" + r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" + ) + assert _cf_name_re.match(column_family_name) + + +def test_bigtable_create_update_delete_column_family(): + # [START bigtable_create_column_family] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + column_family_id = "column_family_id1" + gc_rule = column_family.MaxVersionsGCRule(2) + column_family_obj = table.column_family(column_family_id, gc_rule=gc_rule) + column_family_obj.create() + + # [END bigtable_create_column_family] + column_families = table.list_column_families() + assert column_families[column_family_id].gc_rule == gc_rule + + # [START bigtable_update_column_family] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + # Already existing column family id + column_family_id = "column_family_id1" + # Define the GC rule to retain data with max age of 5 days + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + column_family_obj = table.column_family(column_family_id, gc_rule=max_age_rule) + column_family_obj.update() + # [END bigtable_update_column_family] + + updated_families = table.list_column_families() + assert updated_families[column_family_id].gc_rule == max_age_rule + + # [START bigtable_delete_column_family] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + column_family_id = "column_family_id1" + column_family_obj = table.column_family(column_family_id) + column_family_obj.delete() + # [END bigtable_delete_column_family] + column_families = table.list_column_families() + assert column_family_id not in column_families + + def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): row_keys = [ b"row_key_1", @@ -486,5 +563,141 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): table.truncate(timeout=200) +def test_bigtable_create_family_gc_max_age(): + # [START bigtable_create_family_gc_max_age] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + # Define the GC rule to retain data with max age of 5 days + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + + column_family_obj = table.column_family("cf1", max_age_rule) + column_family_obj.create() + + # [END bigtable_create_family_gc_max_age] + rule = str(column_family_obj.to_pb()) + assert "max_age" in rule + assert "seconds: 432000" in rule + column_family_obj.delete() + + +def test_bigtable_create_family_gc_max_versions(): + # [START bigtable_create_family_gc_max_versions] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + # Define the GC policy to retain only the most recent 2 versions + max_versions_rule = column_family.MaxVersionsGCRule(2) + + column_family_obj = table.column_family("cf2", max_versions_rule) + column_family_obj.create() + + # [END bigtable_create_family_gc_max_versions] + rule = str(column_family_obj.to_pb()) + assert "max_num_versions: 2" in rule + column_family_obj.delete() + + +def test_bigtable_create_family_gc_union(): + # [START bigtable_create_family_gc_union] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + max_versions_rule = column_family.MaxVersionsGCRule(2) + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + + union_rule = column_family.GCRuleUnion([max_versions_rule, max_age_rule]) + + column_family_obj = table.column_family("cf3", union_rule) + column_family_obj.create() + + # [END bigtable_create_family_gc_union] + rule = str(column_family_obj.to_pb()) + assert "union" in rule + assert "max_age" in rule + assert "seconds: 432000" in rule + assert "max_num_versions: 2" in rule + column_family_obj.delete() + + +def test_bigtable_create_family_gc_intersection(): + # [START bigtable_create_family_gc_intersection] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + max_versions_rule = column_family.MaxVersionsGCRule(2) + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + + intersection_rule = column_family.GCRuleIntersection( + [max_versions_rule, max_age_rule] + ) + + column_family_obj = table.column_family("cf4", intersection_rule) + column_family_obj.create() + + # [END bigtable_create_family_gc_intersection] + + rule = str(column_family_obj.to_pb()) + assert "intersection" in rule + assert "max_num_versions: 2" in rule + assert "max_age" in rule + assert "seconds: 432000" in rule + column_family_obj.delete() + + +def test_bigtable_create_family_gc_nested(): + # [START bigtable_create_family_gc_nested] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + # Create a column family with nested GC policies. + # Create a nested GC rule: + # Drop cells that are either older than the 10 recent versions + # OR + # Drop cells that are older than a month AND older than the + # 2 recent versions + rule1 = column_family.MaxVersionsGCRule(10) + rule2 = column_family.GCRuleIntersection( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2), + ] + ) + + nested_rule = column_family.GCRuleUnion([rule1, rule2]) + + column_family_obj = table.column_family("cf5", nested_rule) + column_family_obj.create() + + # [END bigtable_create_family_gc_nested] + + rule = str(column_family_obj.to_pb()) + assert "intersection" in rule + assert "max_num_versions: 2" in rule + assert "max_age" in rule + assert "seconds: 432000" in rule + column_family_obj.delete() + + if __name__ == "__main__": pytest.main() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index ec5d4a6eadfb..8b536992faa7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -43,6 +43,12 @@ class GarbageCollectionRule(object): class MaxVersionsGCRule(GarbageCollectionRule): """Garbage collection limiting the number of versions of a cell. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_create_family_gc_max_versions] + :end-before: [END bigtable_create_family_gc_max_versions] + :type max_num_versions: int :param max_num_versions: The maximum number of versions """ @@ -70,6 +76,12 @@ def to_pb(self): class MaxAgeGCRule(GarbageCollectionRule): """Garbage collection limiting the age of a cell. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_create_family_gc_max_age] + :end-before: [END bigtable_create_family_gc_max_age] + :type max_age: :class:`datetime.timedelta` :param max_age: The maximum age allowed for a cell in the table. """ @@ -98,6 +110,12 @@ def to_pb(self): class GCRuleUnion(GarbageCollectionRule): """Union of garbage collection rules. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_create_family_gc_union] + :end-before: [END bigtable_create_family_gc_union] + :type rules: list :param rules: List of :class:`GarbageCollectionRule`. """ @@ -126,6 +144,12 @@ def to_pb(self): class GCRuleIntersection(GarbageCollectionRule): """Intersection of garbage collection rules. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_create_family_gc_intersection] + :end-before: [END bigtable_create_family_gc_intersection] + :type rules: list :param rules: List of :class:`GarbageCollectionRule`. """ @@ -183,12 +207,18 @@ def __init__(self, column_family_id, table, gc_rule=None): def name(self): """Column family name used in requests. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_column_family_name] + :end-before: [END bigtable_column_family_name] + .. note:: This property will not change if ``column_family_id`` does not, but the return value is not cached. - The table name is of the form + The Column family name is of the form ``"projects/../zones/../clusters/../tables/../columnFamilies/.."`` @@ -221,7 +251,15 @@ def to_pb(self): return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb()) def create(self): - """Create this column family.""" + """Create this column family. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_create_column_family] + :end-before: [END bigtable_create_column_family] + + """ column_family = self.to_pb() modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( id=self.column_family_id, create=column_family @@ -237,6 +275,12 @@ def create(self): def update(self): """Update this column family. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_update_column_family] + :end-before: [END bigtable_update_column_family] + .. note:: Only the GC rule can be updated. By changing the column family ID, @@ -255,7 +299,15 @@ def update(self): ) def delete(self): - """Delete this column family.""" + """Delete this column family. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_delete_column_family] + :end-before: [END bigtable_delete_column_family] + + """ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( id=self.column_family_id, drop=True ) From 17b3b55569652df99f18230bba4246e3fec8d079 Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Fri, 15 Mar 2019 03:49:36 +0530 Subject: [PATCH 238/892] Bigtable Batcher, RowData, Row Operations, AppendRow snippets (#7019) --- .../google-cloud-bigtable/docs/snippets.py | 203 +++++++ .../docs/snippets_table.py | 565 +++++++++++++++++- .../google/cloud/bigtable/batcher.py | 44 +- .../google/cloud/bigtable/cluster.py | 22 +- .../google/cloud/bigtable/instance.py | 22 +- .../google/cloud/bigtable/policy.py | 36 +- .../google/cloud/bigtable/row.py | 149 ++++- .../google/cloud/bigtable/row_data.py | 24 + 8 files changed, 1008 insertions(+), 57 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index c1e1eb4a8820..eb0cbaf0fcfa 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -470,5 +470,208 @@ def test_bigtable_instance_admin_client(): assert "BigtableInstanceAdmin" in str(instance_admin_client) +def test_bigtable_admins_policy(): + # [START bigtable_admins_policy] + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + # [END bigtable_admins_policy] + + service_account_email = Config.CLIENT._credentials.service_account_email + + # [START bigtable_admins_policy] + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance.reload() + new_policy = Policy() + new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] + + policy_latest = instance.set_iam_policy(new_policy) + policy = policy_latest.bigtable_admins + # [END bigtable_admins_policy] + + assert len(policy) > 0 + + +def test_bigtable_readers_policy(): + # [START bigtable_readers_policy] + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE + + # [END bigtable_readers_policy] + + service_account_email = Config.CLIENT._credentials.service_account_email + + # [START bigtable_readers_policy] + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance.reload() + new_policy = Policy() + new_policy[BIGTABLE_READER_ROLE] = [Policy.service_account(service_account_email)] + + policy_latest = instance.set_iam_policy(new_policy) + policy = policy_latest.bigtable_readers + # [END bigtable_readers_policy] + + assert len(policy) > 0 + + +def test_bigtable_users_policy(): + # [START bigtable_users_policy] + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE + + # [END bigtable_users_policy] + + service_account_email = Config.CLIENT._credentials.service_account_email + + # [START bigtable_users_policy] + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance.reload() + new_policy = Policy() + new_policy[BIGTABLE_USER_ROLE] = [Policy.service_account(service_account_email)] + + policy_latest = instance.set_iam_policy(new_policy) + policy = policy_latest.bigtable_users + # [END bigtable_users_policy] + + assert len(policy) > 0 + + +def test_bigtable_viewers_policy(): + # [START bigtable_viewers_policy] + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE + + # [END bigtable_viewers_policy] + + service_account_email = Config.CLIENT._credentials.service_account_email + + # [START bigtable_viewers_policy] + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance.reload() + new_policy = Policy() + new_policy[BIGTABLE_VIEWER_ROLE] = [Policy.service_account(service_account_email)] + + policy_latest = instance.set_iam_policy(new_policy) + policy = policy_latest.bigtable_viewers + # [END bigtable_viewers_policy] + + assert len(policy) > 0 + + +def test_bigtable_instance_name(): + import re + + # [START bigtable_instance_name] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance_name = instance.name + # [END bigtable_instance_name] + + _instance_name_re = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P" + r"[a-z][-a-z0-9]*)$" + ) + assert _instance_name_re.match(instance_name) + + +def test_bigtable_cluster_name(): + import re + + # [START bigtable_cluster_name] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + cluster = instance.cluster(CLUSTER_ID) + cluster_name = cluster.name + # [END bigtable_cluster_name] + + _cluster_name_re = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[^/]+)/" + r"clusters/(?P" + r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" + ) + + assert _cluster_name_re.match(cluster_name) + + +def test_bigtable_instance_from_pb(): + # [START bigtable_instance_from_pb] + from google.cloud.bigtable import Client + from google.cloud.bigtable_admin_v2.types import instance_pb2 + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + + name = instance.name + instance_pb = instance_pb2.Instance( + name=name, display_name=INSTANCE_ID, type=PRODUCTION, labels=LABELS + ) + + instance2 = instance.from_pb(instance_pb, client) + # [END bigtable_instance_from_pb] + assert instance2.name == instance.name + + +def test_bigtable_cluster_from_pb(): + # [START bigtable_cluster_from_pb] + from google.cloud.bigtable import Client + from google.cloud.bigtable_admin_v2.types import instance_pb2 + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + cluster = instance.cluster(CLUSTER_ID) + + name = cluster.name + cluster_state = cluster.state + cluster_pb = instance_pb2.Cluster( + name=name, + location=LOCATION_ID, + state=cluster_state, + serve_nodes=SERVER_NODES, + default_storage_type=STORAGE_TYPE, + ) + + cluster2 = cluster.from_pb(cluster_pb, instance) + # [END bigtable_cluster_from_pb] + assert cluster2.name == cluster.name + + +def test_bigtable_instance_state(): + # [START bigtable_instance_state] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + instance_state = instance.state + # [END bigtable_instance_state] + assert not instance_state + + +def test_bigtable_cluster_state(): + # [START bigtable_cluster_state] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + cluster = instance.cluster(CLUSTER_ID) + cluster_state = cluster.state + # [END bigtable_cluster_state] + + assert not cluster_state + + if __name__ == "__main__": pytest.main() diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 4126e3d78d1a..6fe2e36d7a48 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -39,8 +39,8 @@ from google.cloud.bigtable import column_family -INSTANCE_ID = "snippet-" + unique_resource_id("-") -CLUSTER_ID = "clus-1-" + unique_resource_id("-") +INSTANCE_ID = "snippet" + unique_resource_id("-") +CLUSTER_ID = "clus-1" + unique_resource_id("-") TABLE_ID = "tabl-1" + unique_resource_id("-") COLUMN_FAMILY_ID = "col_fam_id-" + unique_resource_id("-") LOCATION_ID = "us-central1-f" @@ -488,6 +488,82 @@ def test_bigtable_create_update_delete_column_family(): assert column_family_id not in column_families +def test_bigtable_column_family_name(): + # [START bigtable_column_family_name] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + column_families = table.list_column_families() + column_family_obj = column_families[COLUMN_FAMILY_ID] + column_family_name = column_family_obj.name + # [END bigtable_column_family_name] + import re + + _cf_name_re = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[^/]+)/tables/" + r"(?P
[^/]+)/columnFamilies/" + r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" + ) + assert _cf_name_re.match(column_family_name) + + +def test_bigtable_create_update_delete_column_family(): + # [START bigtable_create_column_family] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + column_family_id = "column_family_id1" + gc_rule = column_family.MaxVersionsGCRule(2) + column_family_obj = table.column_family(column_family_id, gc_rule=gc_rule) + column_family_obj.create() + + # [END bigtable_create_column_family] + column_families = table.list_column_families() + assert column_families[column_family_id].gc_rule == gc_rule + + # [START bigtable_update_column_family] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + # Already existing column family id + column_family_id = "column_family_id1" + # Define the GC rule to retain data with max age of 5 days + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + column_family_obj = table.column_family(column_family_id, gc_rule=max_age_rule) + column_family_obj.update() + # [END bigtable_update_column_family] + + updated_families = table.list_column_families() + assert updated_families[column_family_id].gc_rule == max_age_rule + + # [START bigtable_delete_column_family] + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + column_family_id = "column_family_id1" + column_family_obj = table.column_family(column_family_id) + column_family_obj.delete() + # [END bigtable_delete_column_family] + column_families = table.list_column_families() + assert column_family_id not in column_families + + def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): row_keys = [ b"row_key_1", @@ -563,6 +639,88 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): table.truncate(timeout=200) +def test_bigtable_batcher_mutate_flush_mutate_rows(): + # [START bigtable_batcher_mutate] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + # Batcher for max row bytes, max_row_bytes=1024 is optional. + batcher = table.mutations_batcher(max_row_bytes=1024) + + # Add a single row + row_key = b"row_key_1" + row = table.row(row_key) + row.set_cell( + COLUMN_FAMILY_ID, COL_NAME1, "value-0", timestamp=datetime.datetime.utcnow() + ) + + # In batcher, mutate will flush current batch if it + # reaches the max_row_bytes + batcher.mutate(row) + batcher.flush() + # [END bigtable_batcher_mutate] + + # [START bigtable_batcher_flush] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + # Batcher for max row bytes, max_row_bytes=1024 is optional. + batcher = table.mutations_batcher(max_row_bytes=1024) + + # Add a single row + row_key = b"row_key" + row = table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, "value-0") + + # In batcher, mutate will flush current batch if it + # reaches the max_row_bytes + batcher.mutate(row) + batcher.flush() + # [END bigtable_batcher_flush] + + rows_on_table = [] + for row in table.read_rows(): + rows_on_table.append(row.row_key) + assert len(rows_on_table) == 2 + table.truncate(timeout=200) + + # [START bigtable_batcher_mutate_rows] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + batcher = table.mutations_batcher() + + row1 = table.row(b"row_key_1") + row2 = table.row(b"row_key_2") + row3 = table.row(b"row_key_3") + row4 = table.row(b"row_key_4") + + row1.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val1") + row2.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val2") + row3.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val3") + row4.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val4") + + batcher.mutate_rows([row1, row2, row3, row4]) + + # batcher will flush current batch if it + # reaches the max flush_count + # Manually send the current batch to Cloud Bigtable + batcher.flush() + # [END bigtable_batcher_mutate_rows] + + rows_on_table = [] + for row in table.read_rows(): + rows_on_table.append(row.row_key) + assert len(rows_on_table) == 4 + table.truncate(timeout=200) + + def test_bigtable_create_family_gc_max_age(): # [START bigtable_create_family_gc_max_age] from google.cloud.bigtable import Client @@ -699,5 +857,408 @@ def test_bigtable_create_family_gc_nested(): column_family_obj.delete() +def test_bigtable_row_data_cells_cell_value_cell_values(): + + value = b"value_in_col1" + row = Config.TABLE.row(b"row_key_1") + row.set_cell( + COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow() + ) + row.commit() + + row.set_cell( + COLUMN_FAMILY_ID, COL_NAME1, value, timestamp=datetime.datetime.utcnow() + ) + row.commit() + + # [START bigtable_row_data_cells] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row_key = "row_key_1" + row_data = table.read_row(row_key) + + cells = row_data.cells + # [END bigtable_row_data_cells] + + actual_cell_value = cells[COLUMN_FAMILY_ID][COL_NAME1][0].value + assert actual_cell_value == value + + # [START bigtable_row_cell_value] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row_key = "row_key_1" + row_data = table.read_row(row_key) + + cell_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) + # [END bigtable_row_cell_value] + assert cell_value == value + + # [START bigtable_row_cell_values] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row_key = "row_key_1" + row_data = table.read_row(row_key) + + cell_values = row_data.cell_values(COLUMN_FAMILY_ID, COL_NAME1) + # [END bigtable_row_cell_values] + + for actual_value, timestamp in cell_values: + assert actual_value == value + + value2 = b"value_in_col2" + row.set_cell(COLUMN_FAMILY_ID, COL_NAME2, value2) + row.commit() + + # [START bigtable_row_find_cells] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row_key = "row_key_1" + row = table.read_row(row_key) + + cells = row.find_cells(COLUMN_FAMILY_ID, COL_NAME2) + # [END bigtable_row_find_cells] + + assert cells[0].value == value2 + table.truncate(timeout=200) + + +def test_bigtable_row_setcell_rowkey(): + # [START bigtable_row_set_cell] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row = table.row(ROW_KEY1) + + cell_val = b"cell-val" + row.set_cell( + COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.datetime.utcnow() + ) + # [END bigtable_row_set_cell] + + response = table.mutate_rows([row]) + # validate that all rows written successfully + for i, status in enumerate(response): + assert status.code == 0 + + # [START bigtable_row_row_key] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row = table.row(ROW_KEY1) + row_key = row.row_key + # [END bigtable_row_row_key] + assert row_key == ROW_KEY1 + + # [START bigtable_row_table] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row = table.row(ROW_KEY1) + table1 = row.table + # [END bigtable_row_table] + + assert table1 == table + table.truncate(timeout=200) + + +def test_bigtable_row_delete(): + # [START bigtable_row_delete] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key = b"row_key_1" + row_obj = table.row(row_key) + # [END bigtable_row_delete] + + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") + row_obj.commit() + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + assert actual_rows_keys == [row_key] + + # [START bigtable_row_delete] + row_obj.delete() + row_obj.commit() + # [END bigtable_row_delete] + + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + assert len(actual_rows_keys) == 0 + + +def test_bigtable_row_delete_cell(): + # [START bigtable_row_delete_cell] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key1 = b"row_key_1" + row_obj = table.row(row_key1) + # [END bigtable_row_delete_cell] + + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row_obj.commit() + + row_key2 = b"row_key_2" + row_obj = table.row(row_key2) + row_obj.set_cell(COLUMN_FAMILY_ID2, COL_NAME2, CELL_VAL2) + row_obj.commit() + + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + assert actual_rows_keys == [row_key1, row_key2] + + # [START bigtable_row_delete_cell] + row_obj.delete_cell(COLUMN_FAMILY_ID2, COL_NAME2) + row_obj.commit() + # [END bigtable_row_delete_cell] + + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + assert actual_rows_keys == [row_key1] + table.truncate(timeout=300) + + +def test_bigtable_row_delete_cells(): + # [START bigtable_row_delete_cells] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key1 = b"row_key_1" + row_obj = table.row(row_key1) + # [END bigtable_row_delete_cells] + + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row_obj.commit() + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME2, CELL_VAL2) + row_obj.commit() + + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + assert actual_rows_keys == [row_key1] + + # [START bigtable_row_delete_cells] + row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2]) + row_obj.commit() + # [END bigtable_row_delete_cells] + + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + assert actual_rows_keys == [] + + +def test_bigtable_row_clear(): + # [START bigtable_row_clear] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key = b"row_key_1" + row_obj = table.row(row_key) + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") + # [END bigtable_row_clear] + + mutation_size = row_obj.get_mutations_size() + assert mutation_size > 0 + + # [START bigtable_row_clear] + row_obj.clear() + # [END bigtable_row_clear] + + mutation_size = row_obj.get_mutations_size() + assert mutation_size == 0 + + +def test_bigtable_row_clear_get_mutations_size(): + # [START bigtable_row_get_mutations_size] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key_id = b"row_key_1" + row_obj = table.row(row_key_id) + + mutation_size = row_obj.get_mutations_size() + # [END bigtable_row_get_mutations_size] + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") + mutation_size = row_obj.get_mutations_size() + assert mutation_size > 0 + + row_obj.clear() + mutation_size = row_obj.get_mutations_size() + assert mutation_size == 0 + + +def test_bigtable_row_setcell_commit_rowkey(): + # [START bigtable_row_set_cell] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key = b"row_key_1" + cell_val = b"cell-val" + row_obj = table.row(row_key) + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) + # [END bigtable_row_set_cell] + row_obj.commit() + + # [START bigtable_row_commit] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key = b"row_key_2" + cell_val = b"cell-val" + row_obj = table.row(row_key) + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) + row_obj.commit() + # [END bigtable_row_commit] + + actual_rows_keys = [] + for row in table.read_rows(): + actual_rows_keys.append(row.row_key) + + assert actual_rows_keys == [b"row_key_1", b"row_key_2"] + + # [START bigtable_row_row_key] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key_id = b"row_key_2" + row_obj = table.row(row_key_id) + row_key = row_obj.row_key + # [END bigtable_row_row_key] + assert row_key == row_key_id + table.truncate(timeout=300) + + +def test_bigtable_row_append_cell_value(): + row = Config.TABLE.row(ROW_KEY1) + + cell_val1 = b"1" + row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val1) + row.commit() + + # [START bigtable_row_append_cell_value] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row = table.row(ROW_KEY1, append=True) + + cell_val2 = b"2" + row.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, cell_val2) + # [END bigtable_row_append_cell_value] + row.commit() + + row_data = table.read_row(ROW_KEY1) + actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) + assert actual_value == cell_val1 + cell_val2 + + # [START bigtable_row_commit] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row = Config.TABLE.row(ROW_KEY2) + cell_val = 1 + row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) + row.commit() + # [END bigtable_row_commit] + + # [START bigtable_row_increment_cell_value] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + row = table.row(ROW_KEY2, append=True) + + int_val = 3 + row.increment_cell_value(COLUMN_FAMILY_ID, COL_NAME1, int_val) + # [END bigtable_row_increment_cell_value] + row.commit() + + row_data = table.read_row(ROW_KEY2) + actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) + + import struct + + _PACK_I64 = struct.Struct(">q").pack + assert actual_value == _PACK_I64(cell_val + int_val) + table.truncate(timeout=200) + + +def test_bigtable_row_clear(): + # [START bigtable_row_clear] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key = b"row_key_1" + row_obj = table.row(row_key) + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") + # [END bigtable_row_clear] + + mutation_size = row_obj.get_mutations_size() + assert mutation_size > 0 + + # [START bigtable_row_clear] + row_obj.clear() + # [END bigtable_row_clear] + + mutation_size = row_obj.get_mutations_size() + assert mutation_size == 0 + + if __name__ == "__main__": pytest.main() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 120e2bb05b48..3a649049b66d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -68,17 +68,11 @@ def mutate(self, row): """ Add a row to the batch. If the current batch meets one of the size limits, the batch is sent synchronously. - Example: - >>> # Batcher for max row bytes - >>> batcher = table.mutations_batcher(max_row_bytes=1024) - >>> - >>> row = table.row(b'row_key') - >>> - >>> # In batcher mutate will flush current batch if it - >>> # reaches the max_row_bytes - >>> batcher.mutate(row) - >>> - >>> batcher.flush() + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_batcher_mutate] + :end-before: [END bigtable_batcher_mutate] :type row: class :param row: class:`~google.cloud.bigtable.row.DirectRow`. @@ -113,20 +107,11 @@ def mutate_rows(self, rows): """ Add a row to the batch. If the current batch meets one of the size limits, the batch is sent synchronously. - Example: - >>> # Batcher for flush count - >>> batcher = table.mutations_batcher(flush_count=2) - >>> - >>> row1 = table.row(b'row_key_1') - >>> row2 = table.row(b'row_key_2') - >>> row3 = table.row(b'row_key_3') - >>> row4 = table.row(b'row_key_4') - >>> - >>> # In batcher mutate will flush current batch if it - >>> # reaches the max flush_count - >>> batcher.mutate_rows([row_1, row_2, row_3, row_4]) - >>> - >>> batcher.flush() + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_batcher_mutate_rows] + :end-before: [END bigtable_batcher_mutate_rows] :type rows: list:[`~google.cloud.bigtable.row.DirectRow`] :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. @@ -143,7 +128,14 @@ def mutate_rows(self, rows): self.mutate(row) def flush(self): - """ Sends the current. batch to Cloud Bigtable. """ + """ Sends the current. batch to Cloud Bigtable. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_batcher_flush] + :end-before: [END bigtable_batcher_flush] + + """ if len(self.rows) != 0: self.table.mutate_rows(self.rows) self.total_mutation_count = 0 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index b617e36c02a9..5ff1d0404b94 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -94,6 +94,12 @@ def __init__( def from_pb(cls, cluster_pb, instance): """Creates an cluster instance from a protobuf. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_cluster_from_pb] + :end-before: [END bigtable_cluster_from_pb] + :type cluster_pb: :class:`instance_pb2.Cluster` :param cluster_pb: An instance protobuf object. @@ -148,6 +154,12 @@ def name(self): This property will not change if ``_instance`` and ``cluster_id`` do not, but the return value is not cached. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_cluster_name] + :end-before: [END bigtable_cluster_name] + The cluster name is of the form ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"`` @@ -161,7 +173,15 @@ def name(self): @property def state(self): - """google.cloud.bigtable.enums.Cluster.State: state of cluster.""" + """google.cloud.bigtable.enums.Cluster.State: state of cluster. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_cluster_state] + :end-before: [END bigtable_cluster_state] + + """ return self._state def __eq__(self, other): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 32d3666136a5..6131b7a7b239 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -124,6 +124,12 @@ def _update_from_pb(self, instance_pb): def from_pb(cls, instance_pb, client): """Creates an instance instance from a protobuf. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_instance_from_pb] + :end-before: [END bigtable_instance_from_pb] + :type instance_pb: :class:`instance_pb2.Instance` :param instance_pb: An instance protobuf object. @@ -162,6 +168,12 @@ def name(self): This property will not change if ``instance_id`` does not, but the return value is not cached. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_instance_name] + :end-before: [END bigtable_instance_name] + The instance name is of the form ``"projects/{project}/instances/{instance_id}"`` @@ -175,7 +187,15 @@ def name(self): @property def state(self): - """google.cloud.bigtable.enums.Instance.State: state of Instance.""" + """google.cloud.bigtable.enums.Instance.State: state of Instance. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_instance_state] + :end-before: [END bigtable_instance_state] + + """ return self._state def __eq__(self, other): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index 78c8e3a414b1..9fea7bbc5a0e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -81,7 +81,14 @@ def __init__(self, etag=None, version=None): @property def bigtable_admins(self): - """Access to bigtable.admin role memebers""" + """Access to bigtable.admin role memebers + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_admins_policy] + :end-before: [END bigtable_admins_policy] + """ result = set() for member in self._bindings.get(BIGTABLE_ADMIN_ROLE, ()): result.add(member) @@ -89,7 +96,14 @@ def bigtable_admins(self): @property def bigtable_readers(self): - """Access to bigtable.reader role memebers""" + """Access to bigtable.reader role memebers + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_readers_policy] + :end-before: [END bigtable_readers_policy] + """ result = set() for member in self._bindings.get(BIGTABLE_READER_ROLE, ()): result.add(member) @@ -97,7 +111,14 @@ def bigtable_readers(self): @property def bigtable_users(self): - """Access to bigtable.user role memebers""" + """Access to bigtable.user role memebers + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_users_policy] + :end-before: [END bigtable_users_policy] + """ result = set() for member in self._bindings.get(BIGTABLE_USER_ROLE, ()): result.add(member) @@ -105,7 +126,14 @@ def bigtable_users(self): @property def bigtable_viewers(self): - """Access to bigtable.viewer role memebers""" + """Access to bigtable.viewer role memebers + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_viewers_policy] + :end-before: [END bigtable_viewers_policy] + """ result = set() for member in self._bindings.get(BIGTABLE_VIEWER_ROLE, ()): result.add(member) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 358344cefcb6..8d1f91f3296f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -56,6 +56,12 @@ def __init__(self, row_key, table=None): def row_key(self): """Row key. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_row_key] + :end-before: [END bigtable_row_row_key] + :rtype: bytes :returns: The key for the current row. """ @@ -65,6 +71,12 @@ def row_key(self): def table(self): """Row table. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_table] + :end-before: [END bigtable_row_table] + :rtype: table: :class:`Table ` :returns: table: The table that owns the row. """ @@ -281,7 +293,15 @@ def _get_mutations(self, state=None): # pylint: disable=unused-argument return self._pb_mutations def get_mutations_size(self): - """ Gets the total mutations size for current row """ + """ Gets the total mutations size for current row + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_get_mutations_size] + :end-before: [END bigtable_row_get_mutations_size] + + """ mutation_size = 0 for mutation in self._get_mutations(): @@ -303,6 +323,12 @@ def set_cell(self, column_family_id, column, value, timestamp=None): send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_set_cell] + :end-before: [END bigtable_row_set_cell] + :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form @@ -331,6 +357,13 @@ def delete(self): row, but does not make an API request. To actually send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_delete] + :end-before: [END bigtable_row_delete] + """ self._delete(state=None) @@ -344,6 +377,12 @@ def delete_cell(self, column_family_id, column, time_range=None): send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_delete_cell] + :end-before: [END bigtable_row_delete_cell] + :type column_family_id: str :param column_family_id: The column family that contains the column or columns with cells being deleted. Must be @@ -371,6 +410,12 @@ def delete_cells(self, column_family_id, columns, time_range=None): send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_delete_cells] + :end-before: [END bigtable_row_delete_cells] + :type column_family_id: str :param column_family_id: The column family that contains the column or columns with cells being deleted. Must be @@ -400,6 +445,12 @@ def commit(self): After committing the accumulated mutations, resets the local mutations to an empty list. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_commit] + :end-before: [END bigtable_row_commit] + :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is greater than 100,000. """ @@ -407,7 +458,15 @@ def commit(self): self.clear() def clear(self): - """Removes all currently accumulated mutations on the current row.""" + """Removes all currently accumulated mutations on the current row. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_clear] + :end-before: [END bigtable_row_clear] + + """ del self._pb_mutations[:] @@ -493,6 +552,12 @@ def commit(self): After committing the accumulated mutations, resets the local mutations. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_commit] + :end-before: [END bigtable_row_commit] + :rtype: bool :returns: Flag indicating if the filter was matched (which also indicates which set of mutations were applied by the server). @@ -539,6 +604,12 @@ def set_cell(self, column_family_id, column, value, timestamp=None, state=True): send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_set_cell] + :end-before: [END bigtable_row_set_cell] + :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form @@ -574,6 +645,12 @@ def delete(self, state=True): send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_delete] + :end-before: [END bigtable_row_delete] + :type state: bool :param state: (Optional) The state that the mutation should be applied in. Defaults to :data:`True`. @@ -590,6 +667,12 @@ def delete_cell(self, column_family_id, column, time_range=None, state=True): send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_delete_cell] + :end-before: [END bigtable_row_delete_cell] + :type column_family_id: str :param column_family_id: The column family that contains the column or columns with cells being deleted. Must be @@ -621,6 +704,12 @@ def delete_cells(self, column_family_id, columns, time_range=None, state=True): send an API request (with the mutations) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_delete_cells] + :end-before: [END bigtable_row_delete_cells] + :type column_family_id: str :param column_family_id: The column family that contains the column or columns with cells being deleted. Must be @@ -647,7 +736,15 @@ def delete_cells(self, column_family_id, columns, time_range=None, state=True): # pylint: enable=arguments-differ def clear(self): - """Removes all currently accumulated mutations on the current row.""" + """Removes all currently accumulated mutations on the current row. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_clear] + :end-before: [END bigtable_row_clear] + + """ del self._true_pb_mutations[:] del self._false_pb_mutations[:] @@ -678,7 +775,15 @@ def __init__(self, row_key, table): self._rule_pb_list = [] def clear(self): - """Removes all currently accumulated modifications on current row.""" + """Removes all currently accumulated modifications on current row. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_clear] + :end-before: [END bigtable_row_clear] + + """ del self._rule_pb_list[:] def append_cell_value(self, column_family_id, column, value): @@ -691,6 +796,12 @@ def append_cell_value(self, column_family_id, column, value): request. To actually send an API request (with the rules) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_append_cell_value] + :end-before: [END bigtable_row_append_cell_value] + :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form @@ -725,6 +836,12 @@ def increment_cell_value(self, column_family_id, column, int_value): request. To actually send an API request (with the rules) to the Google Cloud Bigtable API, call :meth:`commit`. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_increment_cell_value] + :end-before: [END bigtable_row_increment_cell_value] + :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form @@ -765,25 +882,11 @@ def commit(self): After committing the accumulated mutations, resets the local mutations. - .. code:: python - - >>> append_row.commit() - { - u'col-fam-id': { - b'col-name1': [ - (b'cell-val', datetime.datetime(...)), - (b'cell-val-newer', datetime.datetime(...)), - ], - b'col-name2': [ - (b'altcol-cell-val', datetime.datetime(...)), - ], - }, - u'col-fam-id2': { - b'col-name3-but-other-fam': [ - (b'foo', datetime.datetime(...)), - ], - }, - } + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_commit] + :end-before: [END bigtable_row_commit] :rtype: dict :returns: The new contents of all modified cells. Returned as a diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index f9651efd12b7..aeb932243b42 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -174,6 +174,12 @@ def to_dict(self): def cells(self): """Property returning all the cells accumulated on this partial row. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_data_cells] + :end-before: [END bigtable_row_data_cells] + :rtype: dict :returns: Dictionary of the :class:`Cell` objects accumulated. This dictionary has two-levels of keys (first for column families @@ -194,6 +200,12 @@ def row_key(self): def find_cells(self, column_family_id, column): """Get a time series of cells stored on this instance. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_find_cells] + :end-before: [END bigtable_row_find_cells] + Args: column_family_id (str): The ID of the column family. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. @@ -225,6 +237,12 @@ def find_cells(self, column_family_id, column): def cell_value(self, column_family_id, column, index=0): """Get a single cell value stored on this instance. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_cell_value] + :end-before: [END bigtable_row_cell_value] + Args: column_family_id (str): The ID of the column family. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. @@ -260,6 +278,12 @@ def cell_value(self, column_family_id, column, index=0): def cell_values(self, column_family_id, column, max_count=None): """Get a time series of cells stored on this instance. + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_row_cell_values] + :end-before: [END bigtable_row_cell_values] + Args: column_family_id (str): The ID of the column family. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. From dab01cec2d578b3607e02c76010a0a1f263aa229 Mon Sep 17 00:00:00 2001 From: Seth Troisi Date: Tue, 19 Mar 2019 10:34:03 -0700 Subject: [PATCH 239/892] Remove duplicate snippets. (#7528) --- .../docs/snippets_table.py | 100 ------------------ 1 file changed, 100 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 6fe2e36d7a48..345301961f4b 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -488,82 +488,6 @@ def test_bigtable_create_update_delete_column_family(): assert column_family_id not in column_families -def test_bigtable_column_family_name(): - # [START bigtable_column_family_name] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - column_families = table.list_column_families() - column_family_obj = column_families[COLUMN_FAMILY_ID] - column_family_name = column_family_obj.name - # [END bigtable_column_family_name] - import re - - _cf_name_re = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[^/]+)/tables/" - r"(?P
[^/]+)/columnFamilies/" - r"(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" - ) - assert _cf_name_re.match(column_family_name) - - -def test_bigtable_create_update_delete_column_family(): - # [START bigtable_create_column_family] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - column_family_id = "column_family_id1" - gc_rule = column_family.MaxVersionsGCRule(2) - column_family_obj = table.column_family(column_family_id, gc_rule=gc_rule) - column_family_obj.create() - - # [END bigtable_create_column_family] - column_families = table.list_column_families() - assert column_families[column_family_id].gc_rule == gc_rule - - # [START bigtable_update_column_family] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - # Already existing column family id - column_family_id = "column_family_id1" - # Define the GC rule to retain data with max age of 5 days - max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - column_family_obj = table.column_family(column_family_id, gc_rule=max_age_rule) - column_family_obj.update() - # [END bigtable_update_column_family] - - updated_families = table.list_column_families() - assert updated_families[column_family_id].gc_rule == max_age_rule - - # [START bigtable_delete_column_family] - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - column_family_id = "column_family_id1" - column_family_obj = table.column_family(column_family_id) - column_family_obj.delete() - # [END bigtable_delete_column_family] - column_families = table.list_column_families() - assert column_family_id not in column_families - - def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): row_keys = [ b"row_key_1", @@ -1236,29 +1160,5 @@ def test_bigtable_row_append_cell_value(): table.truncate(timeout=200) -def test_bigtable_row_clear(): - # [START bigtable_row_clear] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - - row_key = b"row_key_1" - row_obj = table.row(row_key) - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") - # [END bigtable_row_clear] - - mutation_size = row_obj.get_mutations_size() - assert mutation_size > 0 - - # [START bigtable_row_clear] - row_obj.clear() - # [END bigtable_row_clear] - - mutation_size = row_obj.get_mutations_size() - assert mutation_size == 0 - - if __name__ == "__main__": pytest.main() From 2f38c948a5dc00f414ef7e29e527a7c0d5d3c556 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Wed, 20 Mar 2019 13:41:12 -0700 Subject: [PATCH 240/892] Remove classifier for Python 3.4 for end-of-life. (#7535) * Remove classifier for Python 3.4 for end-of-life. * Update supported versions in Client README, Contributing Guide --- packages/google-cloud-bigtable/README.rst | 2 +- packages/google-cloud-bigtable/setup.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 472e85855830..71c007324eae 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -51,7 +51,7 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.4 +Python >= 3.5 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index ea272a24c244..edfeb68755e4 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -74,7 +74,6 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', From 15b92aed4f2c0aed84d6c0256a69bf8ccd8a15c8 Mon Sep 17 00:00:00 2001 From: Alex <7764119+AVaksman@users.noreply.github.com> Date: Tue, 26 Mar 2019 15:04:03 -0400 Subject: [PATCH 241/892] Add deprecation warnings for to-be-removed features. (#7532) --- .../google/cloud/bigtable/instance.py | 16 +++++++++++++- .../google/cloud/bigtable/table.py | 7 +++++++ .../tests/unit/test_instance.py | 9 +++++++- .../tests/unit/test_table.py | 21 +++++++++++++++---- 4 files changed, 47 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 6131b7a7b239..134b744aa720 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -29,12 +29,18 @@ from google.cloud.bigtable.policy import Policy +import warnings -_EXISTING_INSTANCE_LOCATION_ID = "see-existing-cluster" _INSTANCE_NAME_RE = re.compile( r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$" ) +_INSTANCE_CREATE_WARNING = """ +Use of `instance.create({0}, {1}, {2})` will be depricated. +Please replace with +`cluster = instance.cluster({0}, {1}, {2})` +`instance.create(clusters=[cluster])`.""" + class Instance(object): """Representation of a Google Cloud Bigtable Instance. @@ -276,6 +282,14 @@ def create( """ if clusters is None: + warnings.warn( + _INSTANCE_CREATE_WARNING.format( + "location_id", "serve_nodes", "default_storage_type" + ), + DeprecationWarning, + stacklevel=2, + ) + cluster_id = "{}-cluster".format(self.instance_id) clusters = [ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index a422d335410d..0d1fe98756bf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -42,6 +42,8 @@ bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, ) +import warnings + # Maximum number of mutations in bulk (MutateRowsRequest message): # (https://cloud.google.com/bigtable/docs/reference/data/rpc/ @@ -467,6 +469,11 @@ def yield_rows(self, **kwargs): :rtype: :class:`.PartialRowData` :returns: A :class:`.PartialRowData` for each row returned """ + warnings.warn( + "`yield_rows()` is depricated; use `red_rows()` instead", + DeprecationWarning, + stacklevel=2, + ) return self.read_rows(**kwargs) def mutate_rows(self, rows, retry=DEFAULT_RETRY): diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 5092652a1fce..397e54d570d0 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -292,6 +292,7 @@ def _instance_api_response_for_create(self): def test_create(self): from google.cloud.bigtable import enums from google.cloud.bigtable_admin_v2.types import instance_pb2 + import warnings credentials = _make_credentials() client = self._make_client( @@ -308,7 +309,10 @@ def test_create(self): client._instance_admin_client = instance_api serve_nodes = 3 - result = instance.create(location_id=self.LOCATION_ID, serve_nodes=serve_nodes) + with warnings.catch_warnings(record=True) as warned: + result = instance.create( + location_id=self.LOCATION_ID, serve_nodes=serve_nodes + ) cluster_pb = instance_pb2.Cluster( location=instance_api.location_path(self.PROJECT, self.LOCATION_ID), @@ -328,6 +332,9 @@ def test_create(self): clusters={cluster_id: cluster_pb}, ) + self.assertEqual(len(warned), 1) + self.assertIs(warned[0].category, DeprecationWarning) + self.assertIs(result, response) def test_create_w_clusters(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 42933da373b5..25f468c730fe 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -752,6 +752,7 @@ def test_read_retry_rows(self): def test_yield_retry_rows(self): from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + import warnings data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) @@ -799,8 +800,14 @@ def test_yield_retry_rows(self): ) rows = [] - for row in table.yield_rows(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2): - rows.append(row) + with warnings.catch_warnings(record=True) as warned: + for row in table.yield_rows( + start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2 + ): + rows.append(row) + + self.assertEqual(len(warned), 1) + self.assertIs(warned[0].category, DeprecationWarning) result = rows[1] self.assertEqual(result.row_key, self.ROW_KEY_2) @@ -810,6 +817,7 @@ def test_yield_rows_with_row_set(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange + import warnings data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) @@ -866,8 +874,13 @@ def test_yield_rows_with_row_set(self): RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2) ) row_set.add_row_key(self.ROW_KEY_3) - for row in table.yield_rows(row_set=row_set): - rows.append(row) + + with warnings.catch_warnings(record=True) as warned: + for row in table.yield_rows(row_set=row_set): + rows.append(row) + + self.assertEqual(len(warned), 1) + self.assertIs(warned[0].category, DeprecationWarning) self.assertEqual(rows[0].row_key, self.ROW_KEY_1) self.assertEqual(rows[1].row_key, self.ROW_KEY_2) From 9797363b98bcd9614156b1bf859e0bd59eed620e Mon Sep 17 00:00:00 2001 From: Chose McThing Date: Wed, 17 Apr 2019 18:42:59 -0400 Subject: [PATCH 242/892] Fix enum reference in documentation. (#7724) google.cloud.bigtable.enums.InstanceType -> google.cloud.bigtable.enums.Instance.Type --- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 134b744aa720..bbe8496853e8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -74,10 +74,10 @@ class Instance(object): :param instance_type: (Optional) The type of the instance. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.enums.InstanceType.PRODUCTION`. - :data:`google.cloud.bigtable.enums.InstanceType.DEVELOPMENT`, + :data:`google.cloud.bigtable.enums.Instance.Type.PRODUCTION`. + :data:`google.cloud.bigtable.enums.Instance.Type.DEVELOPMENT`, Defaults to - :data:`google.cloud.bigtable.enums.InstanceType.UNSPECIFIED`. + :data:`google.cloud.bigtable.enums.Instance.Type.UNSPECIFIED`. :type labels: dict :param labels: (Optional) Labels are a flexible and lightweight From 44cd974162751508e616b02e35d2d245157afcf0 Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Tue, 30 Apr 2019 22:15:52 +0530 Subject: [PATCH 243/892] Avoid leaking instances from snippets. (#7800) --- .../google-cloud-bigtable/docs/snippets.py | 23 +++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index eb0cbaf0fcfa..b929f813d039 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -33,12 +33,13 @@ import pytest from test_utils.system import unique_resource_id +from google.api_core.exceptions import NotFound from google.cloud._helpers import UTC from google.cloud.bigtable import Client from google.cloud.bigtable import enums -INSTANCE_ID = "snippet-" + unique_resource_id("-") +INSTANCE_ID = "snippet-tests" + unique_resource_id("-") CLUSTER_ID = "clus-1-" + unique_resource_id("-") LOCATION_ID = "us-central1-f" ALT_LOCATION_ID = "us-central1-a" @@ -52,6 +53,7 @@ .strftime("%Y-%m-%dt%H-%M-%S") ) LABELS = {LABEL_KEY: str(LABEL_STAMP)} +INSTANCES_TO_DELETE = [] class Config(object): @@ -79,10 +81,15 @@ def setup_module(): operation = Config.INSTANCE.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=100) + INSTANCES_TO_DELETE.append(Config.INSTANCE) def teardown_module(): - Config.INSTANCE.delete() + for instance in INSTANCES_TO_DELETE: + try: + instance.delete() + except NotFound: + pass def test_bigtable_create_instance(): @@ -107,9 +114,14 @@ def test_bigtable_create_instance(): default_storage_type=storage_type, ) operation = instance.create(clusters=[cluster]) + + # Make sure this instance gets deleted after the test case. + INSTANCES_TO_DELETE.append(instance) + # We want to make sure the operation completes. operation.result(timeout=100) # [END bigtable_create_prod_instance] + assert instance.exists() instance.delete() @@ -281,6 +293,9 @@ def test_bigtable_update_instance(): # [END bigtable_update_instance] assert instance.display_name == display_name + # Make sure this instance gets deleted after the test case. + INSTANCES_TO_DELETE.append(instance) + def test_bigtable_update_cluster(): # [START bigtable_update_cluster] @@ -367,6 +382,10 @@ def test_bigtable_delete_instance(): default_storage_type=STORAGE_TYPE, ) operation = instance.create(clusters=[cluster]) + + # Make sure this instance gets deleted after the test case. + INSTANCES_TO_DELETE.append(instance) + # We want to make sure the operation completes. operation.result(timeout=100) From 12843cf72510475ef25924f5f341f5029949d937 Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Thu, 2 May 2019 03:58:13 +0530 Subject: [PATCH 244/892] Bigtable: fix rendering of instance admin snippets. (#7797) Closes #7522. --- .../google-cloud-bigtable/docs/snippets.py | 69 ++++------- .../docs/snippets_table.py | 115 +++++++++--------- 2 files changed, 80 insertions(+), 104 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index b929f813d039..9f69e5340b83 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -216,20 +216,18 @@ def test_bigtable_list_clusters_in_project(): def test_bigtable_list_app_profiles(): - # [START bigtable_list_app_profiles] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - # [END bigtable_list_app_profiles] - - app_profile = instance.app_profile( + app_profile = Config.INSTANCE.app_profile( app_profile_id="app-prof-" + unique_resource_id("-"), routing_policy_type=enums.RoutingPolicyType.ANY, ) app_profile = app_profile.create(ignore_warnings=True) # [START bigtable_list_app_profiles] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + app_profiles_list = instance.list_app_profiles() # [END bigtable_list_app_profiles] assert len(app_profiles_list) > 0 @@ -363,22 +361,15 @@ def test_bigtable_delete_cluster(): def test_bigtable_delete_instance(): - # [START bigtable_delete_instance] from google.cloud.bigtable import Client client = Client(admin=True) - instance_id_to_delete = "inst-my-" + unique_resource_id("-") - # [END bigtable_delete_instance] - cluster_id = "clus-my-" + unique_resource_id("-") - - instance = client.instance( - instance_id_to_delete, instance_type=PRODUCTION, labels=LABELS - ) + instance = client.instance("inst-my-123", instance_type=PRODUCTION, labels=LABELS) cluster = instance.cluster( - cluster_id, + "clus-my-123", location_id=ALT_LOCATION_ID, - serve_nodes=SERVER_NODES, + serve_nodes=1, default_storage_type=STORAGE_TYPE, ) operation = instance.create(clusters=[cluster]) @@ -390,7 +381,12 @@ def test_bigtable_delete_instance(): operation.result(timeout=100) # [START bigtable_delete_instance] - instance_to_delete = client.instance(instance_id_to_delete) + from google.cloud.bigtable import Client + + client = Client(admin=True) + + instance_id = "inst-my-123" + instance_to_delete = client.instance(instance_id) instance_to_delete.delete() # [END bigtable_delete_instance] @@ -412,16 +408,13 @@ def test_bigtable_test_iam_permissions(): def test_bigtable_set_iam_policy_then_get_iam_policy(): + service_account_email = Config.CLIENT._credentials.service_account_email + # [START bigtable_set_iam_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - # [END bigtable_set_iam_policy] - - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_set_iam_policy] client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance.reload() @@ -490,16 +483,13 @@ def test_bigtable_instance_admin_client(): def test_bigtable_admins_policy(): + service_account_email = Config.CLIENT._credentials.service_account_email + # [START bigtable_admins_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - # [END bigtable_admins_policy] - - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_admins_policy] client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance.reload() @@ -514,16 +504,13 @@ def test_bigtable_admins_policy(): def test_bigtable_readers_policy(): + service_account_email = Config.CLIENT._credentials.service_account_email + # [START bigtable_readers_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE - # [END bigtable_readers_policy] - - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_readers_policy] client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance.reload() @@ -538,16 +525,13 @@ def test_bigtable_readers_policy(): def test_bigtable_users_policy(): + service_account_email = Config.CLIENT._credentials.service_account_email + # [START bigtable_users_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE - # [END bigtable_users_policy] - - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_users_policy] client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance.reload() @@ -562,16 +546,13 @@ def test_bigtable_users_policy(): def test_bigtable_viewers_policy(): + service_account_email = Config.CLIENT._credentials.service_account_email + # [START bigtable_viewers_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE - # [END bigtable_viewers_policy] - - service_account_email = Config.CLIENT._credentials.service_account_email - - # [START bigtable_viewers_policy] client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance.reload() diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 345301961f4b..e52e9469143d 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -131,6 +131,11 @@ def test_bigtable_create_table(): def test_bigtable_sample_row_keys(): + table_sample = Config.INSTANCE.table("table_id1_samplerow") + initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] + table_sample.create(initial_split_keys=initial_split_keys) + assert table_sample.exists() + # [START bigtable_sample_row_keys] from google.cloud.bigtable import Client @@ -138,10 +143,6 @@ def test_bigtable_sample_row_keys(): instance = client.instance(INSTANCE_ID) table = instance.table("table_id1_samplerow") - # [END bigtable_sample_row_keys] - initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] - table.create(initial_split_keys=initial_split_keys) - # [START bigtable_sample_row_keys] data = table.sample_row_keys() actual_keys, offset = zip(*[(rk.row_key, rk.offset_bytes) for rk in data]) # [END bigtable_sample_row_keys] @@ -178,7 +179,7 @@ def test_bigtable_write_read_drop_truncate(): response = table.mutate_rows(rows) # validate that all rows written successfully for i, status in enumerate(response): - if status.code is not 0: + if status.code != 0: print("Row number {} failed to write".format(i)) # [END bigtable_mutate_rows] assert len(response) == len(rows) @@ -304,7 +305,7 @@ def test_bigtable_list_tables(): instance = client.instance(INSTANCE_ID) tables_list = instance.list_tables() # [END bigtable_list_tables] - assert len(tables_list) is not 0 + assert len(tables_list) != 0 def test_bigtable_table_name(): @@ -368,18 +369,17 @@ def test_bigtable_table_exists(): def test_bigtable_delete_table(): + table_del = Config.INSTANCE.table("table_id_del") + table_del.create() + assert table_del.exists() + # [START bigtable_delete_table] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table("table_id_del") - # [END bigtable_delete_table] - - table.create() - assert table.exists() - # [START bigtable_delete_table] table.delete() # [END bigtable_delete_table] assert not table.exists() @@ -906,6 +906,15 @@ def test_bigtable_row_setcell_rowkey(): def test_bigtable_row_delete(): + table_row_del = Config.INSTANCE.table(TABLE_ID) + row_obj = table_row_del.row(b"row_key_1") + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") + row_obj.commit() + actual_rows_keys = [] + for row in table_row_del.read_rows(): + actual_rows_keys.append(row.row_key) + assert actual_rows_keys == [b"row_key_1"] + # [START bigtable_row_delete] from google.cloud.bigtable import Client @@ -915,16 +924,7 @@ def test_bigtable_row_delete(): row_key = b"row_key_1" row_obj = table.row(row_key) - # [END bigtable_row_delete] - row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") - row_obj.commit() - actual_rows_keys = [] - for row in table.read_rows(): - actual_rows_keys.append(row.row_key) - assert actual_rows_keys == [row_key] - - # [START bigtable_row_delete] row_obj.delete() row_obj.commit() # [END bigtable_row_delete] @@ -936,53 +936,39 @@ def test_bigtable_row_delete(): def test_bigtable_row_delete_cell(): - # [START bigtable_row_delete_cell] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - + table_row_del_cell = Config.INSTANCE.table(TABLE_ID) row_key1 = b"row_key_1" - row_obj = table.row(row_key1) - # [END bigtable_row_delete_cell] - + row_obj = table_row_del_cell.row(row_key1) row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) row_obj.commit() - row_key2 = b"row_key_2" - row_obj = table.row(row_key2) - row_obj.set_cell(COLUMN_FAMILY_ID2, COL_NAME2, CELL_VAL2) - row_obj.commit() - actual_rows_keys = [] - for row in table.read_rows(): + for row in table_row_del_cell.read_rows(): actual_rows_keys.append(row.row_key) - assert actual_rows_keys == [row_key1, row_key2] + assert actual_rows_keys == [row_key1] # [START bigtable_row_delete_cell] - row_obj.delete_cell(COLUMN_FAMILY_ID2, COL_NAME2) + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key = b"row_key_1" + row_obj = table.row(row_key) + + row_obj.delete_cell(COLUMN_FAMILY_ID, COL_NAME1) row_obj.commit() # [END bigtable_row_delete_cell] - actual_rows_keys = [] for row in table.read_rows(): - actual_rows_keys.append(row.row_key) - assert actual_rows_keys == [row_key1] - table.truncate(timeout=300) + assert not row.row_key def test_bigtable_row_delete_cells(): - # [START bigtable_row_delete_cells] - from google.cloud.bigtable import Client - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table(TABLE_ID) - + table_row_del_cells = Config.INSTANCE.table(TABLE_ID) row_key1 = b"row_key_1" - row_obj = table.row(row_key1) - # [END bigtable_row_delete_cells] + row_obj = table_row_del_cells.row(row_key1) row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) row_obj.commit() @@ -990,22 +976,36 @@ def test_bigtable_row_delete_cells(): row_obj.commit() actual_rows_keys = [] - for row in table.read_rows(): + for row in table_row_del_cells.read_rows(): actual_rows_keys.append(row.row_key) assert actual_rows_keys == [row_key1] # [START bigtable_row_delete_cells] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_key = b"row_key_1" + row_obj = table.row(row_key) + row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2]) row_obj.commit() # [END bigtable_row_delete_cells] - actual_rows_keys = [] for row in table.read_rows(): - actual_rows_keys.append(row.row_key) - assert actual_rows_keys == [] + assert not row.row_key def test_bigtable_row_clear(): + table_row_clear = Config.INSTANCE.table(TABLE_ID) + row_obj = table_row_clear.row(b"row_key_1") + row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") + + mutation_size = row_obj.get_mutations_size() + assert mutation_size > 0 + # [START bigtable_row_clear] from google.cloud.bigtable import Client @@ -1016,12 +1016,7 @@ def test_bigtable_row_clear(): row_key = b"row_key_1" row_obj = table.row(row_key) row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") - # [END bigtable_row_clear] - mutation_size = row_obj.get_mutations_size() - assert mutation_size > 0 - - # [START bigtable_row_clear] row_obj.clear() # [END bigtable_row_clear] From 3e5eb2a377126aa6fa3481f35fb46099bd0f1d06 Mon Sep 17 00:00:00 2001 From: Brian McCutchon <8671821+bmccutchon@users.noreply.github.com> Date: Fri, 3 May 2019 11:51:31 -0700 Subject: [PATCH 245/892] Fix typos in deprecation warnings. (#7858) --- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 2 +- packages/google-cloud-bigtable/google/cloud/bigtable/table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index bbe8496853e8..8a664778577a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -36,7 +36,7 @@ ) _INSTANCE_CREATE_WARNING = """ -Use of `instance.create({0}, {1}, {2})` will be depricated. +Use of `instance.create({0}, {1}, {2})` will be deprecated. Please replace with `cluster = instance.cluster({0}, {1}, {2})` `instance.create(clusters=[cluster])`.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 0d1fe98756bf..8a58cd8b6632 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -470,7 +470,7 @@ def yield_rows(self, **kwargs): :returns: A :class:`.PartialRowData` for each row returned """ warnings.warn( - "`yield_rows()` is depricated; use `red_rows()` instead", + "`yield_rows()` is deprecated; use `read_rows()` instead", DeprecationWarning, stacklevel=2, ) From 4a119ad4156ac1a211a0a0b103aafceee781abd1 Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Tue, 7 May 2019 20:34:52 +0530 Subject: [PATCH 246/892] Removed duplicate snippet tags for Delete cluster. (#7860) --- packages/google-cloud-bigtable/docs/snippets.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 9f69e5340b83..3e80d09c667f 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -335,14 +335,11 @@ def test_bigtable_list_tables(): def test_bigtable_delete_cluster(): - # [START bigtable_delete_cluster] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) cluster_id = "clus-my-" + unique_resource_id("-") - # [END bigtable_delete_cluster] - cluster = instance.cluster( cluster_id, location_id=ALT_LOCATION_ID, @@ -354,7 +351,12 @@ def test_bigtable_delete_cluster(): operation.result(timeout=1000) # [START bigtable_delete_cluster] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) cluster_to_delete = instance.cluster(cluster_id) + cluster_to_delete.delete() # [END bigtable_delete_cluster] assert not cluster_to_delete.exists() From 09ef63f163e227c53e5ae70ef5bf28c5c182ad50 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 8 May 2019 11:51:07 -0400 Subject: [PATCH 247/892] Add 'client_info' support to client. (#7876) Forward when constructing GAPIC API client objects. --- .../google/cloud/bigtable/client.py | 22 +++++- .../tests/unit/test_client.py | 71 +++++++++++++++++-- 2 files changed, 84 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 8c5d35b09e4f..dc618e8c5c06 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -63,10 +63,12 @@ def _create_gapic_client(client_class): def inner(self): if self._emulator_host is None: - return client_class(credentials=self._credentials, client_info=_CLIENT_INFO) + return client_class( + credentials=self._credentials, client_info=self._client_info + ) else: return client_class( - channel=self._emulator_channel, client_info=_CLIENT_INFO + channel=self._emulator_channel, client_info=self._client_info ) return inner @@ -100,6 +102,13 @@ class Client(ClientWithProject): interact with the Instance Admin or Table Admin APIs. This requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. + :type: client_info: :class:`google.api_core.client_info.ClientInfo` + :param client_info: + The client info used to send a user-agent string along with API + requests. If ``None``, then default info will be used. Generally, + you only need to set this if you're developing your own library + or partner tool. + :type channel: :instance: grpc.Channel :param channel (grpc.Channel): (Optional) DEPRECATED: A ``Channel`` instance through which to make calls. @@ -115,7 +124,13 @@ class Client(ClientWithProject): _instance_admin_client = None def __init__( - self, project=None, credentials=None, read_only=False, admin=False, channel=None + self, + project=None, + credentials=None, + read_only=False, + admin=False, + client_info=_CLIENT_INFO, + channel=None, ): if read_only and admin: raise ValueError( @@ -126,6 +141,7 @@ def __init__( # It **may** use those scopes in ``with_scopes_if_required``. self._read_only = bool(read_only) self._admin = bool(admin) + self._client_info = client_info self._emulator_host = os.getenv(BIGTABLE_EMULATOR) self._emulator_channel = None diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 7bcbbd2b3db3..05a017d898af 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -27,34 +27,32 @@ def _invoke_client_factory(self, client_class): return _create_gapic_client(client_class) def test_without_emulator(self): - from google.cloud.bigtable.client import _CLIENT_INFO - client_class = mock.Mock() credentials = _make_credentials() client = _Client(credentials) + client_info = client._client_info = mock.Mock() result = self._invoke_client_factory(client_class)(client) self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - credentials=client._credentials, client_info=_CLIENT_INFO + credentials=client._credentials, client_info=client_info ) def test_with_emulator(self): - from google.cloud.bigtable.client import _CLIENT_INFO - client_class = mock.Mock() emulator_host = emulator_channel = object() credentials = _make_credentials() client = _Client( credentials, emulator_host=emulator_host, emulator_channel=emulator_channel ) + client_info = client._client_info = mock.Mock() result = self._invoke_client_factory(client_class)(client) self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - channel=client._emulator_channel, client_info=_CLIENT_INFO + channel=client._emulator_channel, client_info=client_info ) @@ -82,6 +80,7 @@ def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) def test_constructor_defaults(self): + from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable.client import DATA_SCOPE credentials = _make_credentials() @@ -94,6 +93,7 @@ def test_constructor_defaults(self): self.assertIs(client._credentials, credentials.with_scopes.return_value) self.assertFalse(client._read_only) self.assertFalse(client._admin) + self.assertIs(client._client_info, _CLIENT_INFO) self.assertIsNone(client._channel) self.assertIsNone(client._emulator_host) self.assertIsNone(client._emulator_channel) @@ -105,6 +105,7 @@ def test_constructor_explicit(self): from google.cloud.bigtable.client import DATA_SCOPE credentials = _make_credentials() + client_info = mock.Mock() with warnings.catch_warnings(record=True) as warned: client = self._make_one( @@ -112,6 +113,7 @@ def test_constructor_explicit(self): credentials=credentials, read_only=False, admin=True, + client_info=client_info, channel=mock.sentinel.channel, ) @@ -121,6 +123,7 @@ def test_constructor_explicit(self): self.assertIs(client._credentials, credentials.with_scopes.return_value) self.assertFalse(client._read_only) self.assertTrue(client._admin) + self.assertIs(client._client_info, client_info) self.assertIs(client._channel, mock.sentinel.channel) self.assertEqual(client.SCOPE, (DATA_SCOPE, ADMIN_SCOPE)) @@ -182,6 +185,7 @@ def test_project_path_property(self): self.assertEqual(client.project_path, project_name) def test_table_data_client_not_initialized(self): + from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable_v2 import BigtableClient credentials = _make_credentials() @@ -189,6 +193,21 @@ def test_table_data_client_not_initialized(self): table_data_client = client.table_data_client self.assertIsInstance(table_data_client, BigtableClient) + self.assertIs(table_data_client._client_info, _CLIENT_INFO) + self.assertIs(client._table_data_client, table_data_client) + + def test_table_data_client_not_initialized_w_client_info(self): + from google.cloud.bigtable_v2 import BigtableClient + + credentials = _make_credentials() + client_info = mock.Mock() + client = self._make_one( + project=self.PROJECT, credentials=credentials, client_info=client_info + ) + + table_data_client = client.table_data_client + self.assertIsInstance(table_data_client, BigtableClient) + self.assertIs(table_data_client._client_info, client_info) self.assertIs(client._table_data_client, table_data_client) def test_table_data_client_initialized(self): @@ -208,6 +227,7 @@ def test_table_admin_client_not_initialized_no_admin_flag(self): client.table_admin_client() def test_table_admin_client_not_initialized_w_admin_flag(self): + from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient credentials = _make_credentials() @@ -217,6 +237,25 @@ def test_table_admin_client_not_initialized_w_admin_flag(self): table_admin_client = client.table_admin_client self.assertIsInstance(table_admin_client, BigtableTableAdminClient) + self.assertIs(table_admin_client._client_info, _CLIENT_INFO) + self.assertIs(client._table_admin_client, table_admin_client) + + def test_table_admin_client_not_initialized_w_client_info(self): + from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + + credentials = _make_credentials() + client_info = mock.Mock() + client = self._make_one( + project=self.PROJECT, + credentials=credentials, + admin=True, + client_info=client_info, + ) + + table_admin_client = client.table_admin_client + self.assertIsInstance(table_admin_client, BigtableTableAdminClient) + self.assertIs(table_admin_client._client_info, client_info) + self.assertIs(client._table_admin_client, table_admin_client) def test_table_admin_client_initialized(self): credentials = _make_credentials() @@ -235,6 +274,7 @@ def test_instance_admin_client_not_initialized_no_admin_flag(self): client.instance_admin_client() def test_instance_admin_client_not_initialized_w_admin_flag(self): + from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient credentials = _make_credentials() @@ -244,6 +284,25 @@ def test_instance_admin_client_not_initialized_w_admin_flag(self): instance_admin_client = client.instance_admin_client self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) + self.assertIs(instance_admin_client._client_info, _CLIENT_INFO) + self.assertIs(client._instance_admin_client, instance_admin_client) + + def test_instance_admin_client_not_initialized_w_admin_and_client_info(self): + from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient + + credentials = _make_credentials() + client_info = mock.Mock() + client = self._make_one( + project=self.PROJECT, + credentials=credentials, + admin=True, + client_info=client_info, + ) + + instance_admin_client = client.instance_admin_client + self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) + self.assertIs(instance_admin_client._client_info, client_info) + self.assertIs(client._instance_admin_client, instance_admin_client) def test_instance_admin_client_initialized(self): credentials = _make_credentials() From 647e2b03bc4aff17a571b56844b150f7db61cba8 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 8 May 2019 14:17:48 -0400 Subject: [PATCH 248/892] Fix client_info type in GAPIC-based client docstrings. (#7898) --- packages/google-cloud-bigtable/google/cloud/bigtable/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index dc618e8c5c06..f9a625b15843 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -102,7 +102,7 @@ class Client(ClientWithProject): interact with the Instance Admin or Table Admin APIs. This requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. - :type: client_info: :class:`google.api_core.client_info.ClientInfo` + :type: client_info: :class:`google.api_core.gapic_v1.client_info.ClientInfo` :param client_info: The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, From 89659269d8f62ead44e5cec2b4ed2cac4eb5c618 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 16 May 2019 13:22:32 -0400 Subject: [PATCH 249/892] Pin 'google-cloud-core >= 1.0.0, < 2.0dev'. (#7993) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index edfeb68755e4..6eacbe6e8e8b 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -30,7 +30,7 @@ release_status = 'Development Status :: 4 - Beta' dependencies = [ 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev', - 'google-cloud-core >= 0.29.0, < 0.30dev', + "google-cloud-core >= 1.0.0, < 2.0dev", 'grpc-google-iam-v1 >= 0.11.4, < 0.12dev', ] extras = { From 014ecac9059fe66da7b61b55381a888f7c24eb3c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 16 May 2019 17:09:05 -0400 Subject: [PATCH 250/892] Release bigtable-0.33.0 (#8002) --- packages/google-cloud-bigtable/CHANGELOG.md | 45 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index d8f70e35165c..3f508045fe57 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,51 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.33.0 + +05-16-2019 11:51 PDT + + +### Implementation Changes +- Fix typos in deprecation warnings. ([#7858](https://github.com/googleapis/google-cloud-python/pull/7858)) +- Add deprecation warnings for to-be-removed features. ([#7532](https://github.com/googleapis/google-cloud-python/pull/7532)) +- Remove classifier for Python 3.4 for end-of-life. ([#7535](https://github.com/googleapis/google-cloud-python/pull/7535)) +- Improve `Policy` interchange w/ JSON, gRPC payloads. ([#7378](https://github.com/googleapis/google-cloud-python/pull/7378)) + +### New Features +- Add support for passing `client_info` to client. ([#7876](https://github.com/googleapis/google-cloud-python/pull/7876)) and ([#7898](https://github.com/googleapis/google-cloud-python/pull/7898)) +- Add `Table.mutation_timeout`, allowing override of config timeouts. ([#7424](https://github.com/googleapis/google-cloud-python/pull/7424)) + +### Dependencies +- Pin `google-cloud-core >= 1.0.0, < 2.0dev`. ([#7993](https://github.com/googleapis/google-cloud-python/pull/7993)) + +### Documentation +- Remove duplicate snippet tags for Delete cluster. ([#7860](https://github.com/googleapis/google-cloud-python/pull/7860)) +- Fix rendering of instance admin snippets. ([#7797](https://github.com/googleapis/google-cloud-python/pull/7797)) +- Avoid leaking instances from snippets. ([#7800](https://github.com/googleapis/google-cloud-python/pull/7800)) +- Fix enum reference in documentation. ([#7724](https://github.com/googleapis/google-cloud-python/pull/7724)) +- Remove duplicate snippets. ([#7528](https://github.com/googleapis/google-cloud-python/pull/7528)) +- Add snippeds for Batcher, RowData, Row Operations, AppendRow. ([#7019](https://github.com/googleapis/google-cloud-python/pull/7019)) +- Add column family snippets. ([#7014](https://github.com/googleapis/google-cloud-python/pull/7014)) +- Add Row Set snippets. ([#7016](https://github.com/googleapis/google-cloud-python/pull/7016)) +- Update client library documentation URLs. ([#7307](https://github.com/googleapis/google-cloud-python/pull/7307)) +- Fix typos in Table docstrings. ([#7261](https://github.com/googleapis/google-cloud-python/pull/7261)) +- Update copyright headers (via synth). ([#7139](https://github.com/googleapis/google-cloud-python/pull/7139)) +- Fix linked classes in generated docstrings (via synth). ([#7060](https://github.com/googleapis/google-cloud-python/pull/7060)) + +### Internal / Testing Changes +- Run `instance_admin` system tests on a separate instance from `table_admin` and `data` system tests. ([#6579](https://github.com/googleapis/google-cloud-python/pull/6579)) +- Re-blacken. ([#7462](https://github.com/googleapis/google-cloud-python/pull/7462)) +- Copy lintified proto files (via synth). ([#7445](https://github.com/googleapis/google-cloud-python/pull/7445)) +- Remove unused message exports (via synth). ([#7264](https://github.com/googleapis/google-cloud-python/pull/7264)) +- Compare 0 using '!=', rather than 'is not'. ([#7312](https://github.com/googleapis/google-cloud-python/pull/7312)) +- Add protos as an artifact to library ([#7205](https://github.com/googleapis/google-cloud-python/pull/7205)) +- Protoc-generated serialization update. ([#7077](https://github.com/googleapis/google-cloud-python/pull/7077)) +- Blacken snippets. ([#7048](https://github.com/googleapis/google-cloud-python/pull/7048)) +- Bigtable client snippets ([#7020](https://github.com/googleapis/google-cloud-python/pull/7020)) +- Pick up order-of-enum fix from GAPIC generator. ([#6879](https://github.com/googleapis/google-cloud-python/pull/6879)) +- Plug systest instance leaks ([#7004](https://github.com/googleapis/google-cloud-python/pull/7004)) + ## 0.32.1 12-17-2018 16:38 PST diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 6eacbe6e8e8b..028a2030212b 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.32.1' +version = '0.33.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From e9842525ed6393155a3263d03022958cf7ceaf2f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 17 May 2019 15:21:09 -0400 Subject: [PATCH 251/892] Use alabaster theme everwhere. (#8021) 'sphinx_rtd_theme' is no longer installed by default. --- packages/google-cloud-bigtable/docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 51127e3ff14e..6fd3c053d056 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -120,7 +120,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "sphinx_rtd_theme" +html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From dfe707c1f363978caa92aac51375f2ab6c715c2c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 24 May 2019 12:28:12 -0400 Subject: [PATCH 252/892] Docstring / formatting tweaks (via synth). (#8138) Supersedes #7592. --- .../cloud/bigtable_admin_v2/__init__.py | 1 + .../gapic/bigtable_instance_admin_client.py | 43 +++++++++---------- .../gapic/bigtable_table_admin_client.py | 24 ++++++----- .../cloud/bigtable_admin_v2/gapic/enums.py | 1 + .../bigtable_instance_admin_grpc_transport.py | 1 + .../bigtable_table_admin_grpc_transport.py | 1 + .../google/cloud/bigtable_admin_v2/types.py | 5 +++ .../google/cloud/bigtable_v2/__init__.py | 1 + .../bigtable_v2/gapic/bigtable_client.py | 2 + .../transports/bigtable_grpc_transport.py | 1 + .../google/cloud/bigtable_v2/types.py | 3 ++ packages/google-cloud-bigtable/synth.metadata | 12 +++--- packages/google-cloud-bigtable/synth.py | 12 ++++++ .../unit/gapic/v2/test_bigtable_client_v2.py | 1 + .../test_bigtable_instance_admin_client_v2.py | 1 + .../v2/test_bigtable_table_admin_client_v2.py | 1 + 16 files changed, 71 insertions(+), 39 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 68adeb90d471..501d8f24d3e1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + from __future__ import absolute_import from google.cloud.bigtable_admin_v2 import types diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 5d09c545cacc..608c50863d0e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Accesses the google.bigtable.admin.v2 BigtableInstanceAdmin API.""" import functools @@ -45,6 +46,7 @@ from google.protobuf import empty_pb2 from google.protobuf import field_mask_pb2 + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version @@ -82,22 +84,6 @@ def from_service_account_file(cls, filename, *args, **kwargs): from_service_account_json = from_service_account_file - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", project=project - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - @classmethod def app_profile_path(cls, project, instance, app_profile): """Return a fully-qualified app_profile string.""" @@ -118,6 +104,15 @@ def cluster_path(cls, project, instance, cluster): cluster=cluster, ) + @classmethod + def instance_path(cls, project, instance): + """Return a fully-qualified instance string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}", + project=project, + instance=instance, + ) + @classmethod def location_path(cls, project, location): """Return a fully-qualified location string.""" @@ -127,6 +122,13 @@ def location_path(cls, project, location): location=location, ) + @classmethod + def project_path(cls, project): + """Return a fully-qualified project string.""" + return google.api_core.path_template.expand( + "projects/{project}", project=project + ) + def __init__( self, transport=None, @@ -1643,8 +1645,7 @@ def get_iam_policy( Args: resource (str): REQUIRED: The resource for which the policy is being requested. - ``resource`` is usually specified as a path. For example, a Project - resource is specified as ``projects/{project}``. + See the operation documentation for the appropriate value for this field. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -1719,8 +1720,7 @@ def set_iam_policy( Args: resource (str): REQUIRED: The resource for which the policy is being specified. - ``resource`` is usually specified as a path. For example, a Project - resource is specified as ``projects/{project}``. + See the operation documentation for the appropriate value for this field. policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) @@ -1801,8 +1801,7 @@ def test_iam_permissions( Args: resource (str): REQUIRED: The resource for which the policy detail is being requested. - ``resource`` is usually specified as a path. For example, a Project - resource is specified as ``projects/{project}``. + See the operation documentation for the appropriate value for this field. permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 5e52cea50019..f2a7b983deaa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Accesses the google.bigtable.admin.v2 BigtableTableAdmin API.""" import functools @@ -50,6 +51,7 @@ from google.protobuf import empty_pb2 from google.protobuf import field_mask_pb2 + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version @@ -90,22 +92,22 @@ def from_service_account_file(cls, filename, *args, **kwargs): from_service_account_json = from_service_account_file @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" + def cluster_path(cls, project, instance, cluster): + """Return a fully-qualified cluster string.""" return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", + "projects/{project}/instances/{instance}/clusters/{cluster}", project=project, instance=instance, + cluster=cluster, ) @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" + def instance_path(cls, project, instance): + """Return a fully-qualified instance string.""" return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", + "projects/{project}/instances/{instance}", project=project, instance=instance, - cluster=cluster, ) @classmethod @@ -280,11 +282,11 @@ def create_table( ``["apple", "customer_1", "customer_2", "other"]`` - Key assignment: - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 ``[other, ) => {"other", "zz"}.`` If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Split` diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 65ca75fd4360..ba847ea35974 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Wrappers for protocol buffer enum types.""" import enum diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index ff1215750cae..15321e8723dd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import google.api_core.grpc_helpers import google.api_core.operations_v1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 04877f5db347..38d075ad66db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import google.api_core.grpc_helpers import google.api_core.operations_v1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index 4dd4bc032564..95c4966d2663 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + from __future__ import absolute_import import sys @@ -32,6 +33,8 @@ from google.protobuf import field_mask_pb2 from google.protobuf import timestamp_pb2 from google.rpc import status_pb2 +from google.type import expr_pb2 + _shared_modules = [ iam_policy_pb2, @@ -43,6 +46,7 @@ field_mask_pb2, timestamp_pb2, status_pb2, + expr_pb2, ] _local_modules = [ @@ -64,4 +68,5 @@ setattr(sys.modules[__name__], name, message) names.append(name) + __all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index abd6662e60ed..ca18668ce49b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + from __future__ import absolute_import from google.cloud.bigtable_v2 import types diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 8f59b1643ffb..f588203e565e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Accesses the google.bigtable.v2 Bigtable API.""" import pkg_resources @@ -33,6 +34,7 @@ from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc from google.cloud.bigtable_v2.proto import data_pb2 + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index d45434f0502a..70dace995fec 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import google.api_core.grpc_helpers from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index 30793e44021f..89fc8d3937cb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + from __future__ import absolute_import import sys @@ -25,6 +26,7 @@ from google.protobuf import wrappers_pb2 from google.rpc import status_pb2 + _shared_modules = [any_pb2, wrappers_pb2, status_pb2] _local_modules = [bigtable_pb2, data_pb2] @@ -41,4 +43,5 @@ setattr(sys.modules[__name__], name, message) names.append(name) + __all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index f5ea3da99483..b45d1c78f185 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,26 +1,26 @@ { - "updateTime": "2019-02-28T13:11:55.008453Z", + "updateTime": "2019-05-24T15:47:25.801793Z", "sources": [ { "generator": { "name": "artman", - "version": "0.16.14", - "dockerImage": "googleapis/artman@sha256:f3d61ae45abaeefb6be5f228cda22732c2f1b00fb687c79c4bd4f2c42bb1e1a7" + "version": "0.20.0", + "dockerImage": "googleapis/artman@sha256:3246adac900f4bdbd62920e80de2e5877380e44036b3feae13667ec255ebf5ec" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "9c769d3a0e67e4df9b9e8eee480124c2700a7e6c", - "internalRef": "235997788" + "sha": "0537189470f04f24836d6959821c24197a0ed120", + "internalRef": "249742806" } }, { "template": { "name": "python_library", "origin": "synthtool.gcp", - "version": "2019.2.26" + "version": "2019.5.2" } } ], diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index edffa43640dd..0fb2f977b8b9 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -83,6 +83,18 @@ "__doc__)\n", ) +s.replace( + ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], + "if ``true_mutations`` is empty, and at most\n\n\s*100000.", + "if ``true_mutations`` is empty, and at most 100000.", +) + +s.replace( + ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], + "if ``false_mutations`` is empty, and at most\n\n\s*100000.", + "if ``false_mutations`` is empty, and at most 100000.", +) + # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py index edd5142e424e..c575a83e3cc7 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Unit tests.""" import mock diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index 6e303c88a665..d127d63178a4 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Unit tests.""" import mock diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index d697f706b749..786a8357c321 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + """Unit tests.""" import mock From e58609f05c576a12e9296d39b06e22184cd0f65b Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Sat, 25 May 2019 10:35:05 -0700 Subject: [PATCH 253/892] Fix coverage in 'types.py' (via synth). (#8149) --- .../google/cloud/bigtable_admin_v2/types.py | 2 +- .../google/cloud/bigtable_v2/types.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 10 +++++----- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index 95c4966d2663..b50402d9e38f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -58,7 +58,7 @@ names = [] -for module in _shared_modules: +for module in _shared_modules: # pragma: NO COVER for name, message in get_messages(module).items(): setattr(sys.modules[__name__], name, message) names.append(name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index 89fc8d3937cb..53937c1d1687 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -33,7 +33,7 @@ names = [] -for module in _shared_modules: +for module in _shared_modules: # pragma: NO COVER for name, message in get_messages(module).items(): setattr(sys.modules[__name__], name, message) names.append(name) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index b45d1c78f185..1d1961b59f4e 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-05-24T15:47:25.801793Z", + "updateTime": "2019-05-25T12:15:38.296290Z", "sources": [ { "generator": { "name": "artman", - "version": "0.20.0", - "dockerImage": "googleapis/artman@sha256:3246adac900f4bdbd62920e80de2e5877380e44036b3feae13667ec255ebf5ec" + "version": "0.21.0", + "dockerImage": "googleapis/artman@sha256:28d4271586772b275cd3bc95cb46bd227a24d3c9048de45dccdb7f3afb0bfba9" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "0537189470f04f24836d6959821c24197a0ed120", - "internalRef": "249742806" + "sha": "7ca19138ccebe219a67be2245200e821b3e32123", + "internalRef": "249916728" } }, { From 96871d0d5a6771908154f768758009a8263d11df Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 14 Jun 2019 09:48:44 -0700 Subject: [PATCH 254/892] Add disclaimer to auto-generated template files (via synth). (#8308) --- packages/google-cloud-bigtable/.coveragerc | 1 + packages/google-cloud-bigtable/.flake8 | 1 + packages/google-cloud-bigtable/setup.cfg | 1 + packages/google-cloud-bigtable/synth.metadata | 10 +++++----- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 6b9ab9da4a1b..b178b094aa1d 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -1,3 +1,4 @@ +# Generated by synthtool. DO NOT EDIT! [run] branch = True diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index 61766fa84d02..0268ecc9c55c 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -1,3 +1,4 @@ +# Generated by synthtool. DO NOT EDIT! [flake8] ignore = E203, E266, E501, W503 exclude = diff --git a/packages/google-cloud-bigtable/setup.cfg b/packages/google-cloud-bigtable/setup.cfg index 2a9acf13daa9..3bd555500e37 100644 --- a/packages/google-cloud-bigtable/setup.cfg +++ b/packages/google-cloud-bigtable/setup.cfg @@ -1,2 +1,3 @@ +# Generated by synthtool. DO NOT EDIT! [bdist_wheel] universal = 1 diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 1d1961b59f4e..2312b7e22f97 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-05-25T12:15:38.296290Z", + "updateTime": "2019-06-14T12:13:52.849217Z", "sources": [ { "generator": { "name": "artman", - "version": "0.21.0", - "dockerImage": "googleapis/artman@sha256:28d4271586772b275cd3bc95cb46bd227a24d3c9048de45dccdb7f3afb0bfba9" + "version": "0.25.0", + "dockerImage": "googleapis/artman@sha256:ef1a98ab1e2b8f05f4d9a56f27d63347aefe14020e5f2d585172b14ca76f1d90" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "7ca19138ccebe219a67be2245200e821b3e32123", - "internalRef": "249916728" + "sha": "c23b68eecb00c4d285a730a49b1d7d943cd56183", + "internalRef": "253113405" } }, { From 565a363b6da5b541c54bbd215356469b5514dcf3 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 17 Jun 2019 09:34:19 -0700 Subject: [PATCH 255/892] Declare encoding as utf-8 in pb2 files (via synth). (#8346) --- .../proto/bigtable_instance_admin_pb2.py | 1 + .../proto/bigtable_table_admin_pb2.py | 1 + .../google/cloud/bigtable_admin_v2/proto/common_pb2.py | 1 + .../cloud/bigtable_admin_v2/proto/instance_pb2.py | 1 + .../google/cloud/bigtable_admin_v2/proto/table_pb2.py | 1 + .../google/cloud/bigtable_v2/proto/bigtable_pb2.py | 1 + .../google/cloud/bigtable_v2/proto/data_pb2.py | 1 + packages/google-cloud-bigtable/synth.metadata | 10 +++++----- 8 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 9f486cbb2cbd..01d3fa7e3a4d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 4a360a67d508..4bb9d8f1d328 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index bd0a50fe17ae..7d40f043d05c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable/admin_v2/proto/common.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index be1d54aa8925..49164dfe6693 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable/admin_v2/proto/instance.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index a50d828a246b..e15dd2ba5b3f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable/admin_v2/proto/table.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index 344ccfe67329..1c2b0f1ae134 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/bigtable.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index 31ee6b4e550c..8e5cff816455 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/data.proto diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 2312b7e22f97..7aba65b27091 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-06-14T12:13:52.849217Z", + "updateTime": "2019-06-15T12:13:26.869854Z", "sources": [ { "generator": { "name": "artman", - "version": "0.25.0", - "dockerImage": "googleapis/artman@sha256:ef1a98ab1e2b8f05f4d9a56f27d63347aefe14020e5f2d585172b14ca76f1d90" + "version": "0.26.0", + "dockerImage": "googleapis/artman@sha256:6db0735b0d3beec5b887153a2a7c7411fc7bb53f73f6f389a822096bd14a3a15" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c23b68eecb00c4d285a730a49b1d7d943cd56183", - "internalRef": "253113405" + "sha": "7b58b37559f6a5337c4c564518e9573d742df225", + "internalRef": "253322136" } }, { From 04a8fb42494dd447accd1656a2e762fdf3b54cd9 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 19 Jun 2019 13:28:31 -0400 Subject: [PATCH 256/892] Increase timeout for app profile update operation. (#8417) Poke-and-hope. Closes #7900. --- packages/google-cloud-bigtable/tests/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 3631cf17e14a..37d6bbd395eb 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -610,7 +610,7 @@ def _test_modify_app_profile_helper( ) operation = app_profile.update(ignore_warnings) - operation.result(timeout=10) + operation.result(timeout=30) alt_app_profile = instance.app_profile(app_profile_id) alt_app_profile.reload() From fc89089f488c27b399b8e12dbb446d7069597677 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 20 Jun 2019 12:33:45 -0400 Subject: [PATCH 257/892] Plug systest / snippet instance leaks. (#8416) - Reuse same unique suffix across all instances in a systest / snippet run. - Increase timeouts for opeartions involving additional clusters. - Add retries for 429 responses to all 'delete' requests in snipped cleanup. - Move most cleanups inside the creating snippet function: leaving them around (particularly instances) to process end can block other jobs from running. - Perform cleanups for tables, clusters, and app profiles. --- .../google-cloud-bigtable/docs/snippets.py | 124 +++++++++++------- .../google-cloud-bigtable/tests/system.py | 31 ++--- 2 files changed, 89 insertions(+), 66 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 3e80d09c667f..fa67220ad9b7 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -33,14 +33,17 @@ import pytest from test_utils.system import unique_resource_id +from test_utils.retry import RetryErrors from google.api_core.exceptions import NotFound +from google.api_core.exceptions import TooManyRequests from google.cloud._helpers import UTC from google.cloud.bigtable import Client from google.cloud.bigtable import enums -INSTANCE_ID = "snippet-tests" + unique_resource_id("-") -CLUSTER_ID = "clus-1-" + unique_resource_id("-") +UNIQUE_SUFFIX = unique_resource_id("-") +INSTANCE_ID = "snippet-tests" + UNIQUE_SUFFIX +CLUSTER_ID = "clus-1-" + UNIQUE_SUFFIX LOCATION_ID = "us-central1-f" ALT_LOCATION_ID = "us-central1-a" PRODUCTION = enums.Instance.Type.PRODUCTION @@ -55,6 +58,8 @@ LABELS = {LABEL_KEY: str(LABEL_STAMP)} INSTANCES_TO_DELETE = [] +retry_429 = RetryErrors(TooManyRequests, max_tries=9) + class Config(object): """Run-time configuration to be modified at set-up. @@ -81,13 +86,14 @@ def setup_module(): operation = Config.INSTANCE.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=100) - INSTANCES_TO_DELETE.append(Config.INSTANCE) def teardown_module(): + retry_429(Config.INSTANCE.delete)() + for instance in INSTANCES_TO_DELETE: try: - instance.delete() + retry_429(instance.delete)() except NotFound: pass @@ -97,8 +103,8 @@ def test_bigtable_create_instance(): from google.cloud.bigtable import Client from google.cloud.bigtable import enums - my_instance_id = "inst-my-" + unique_resource_id("-") - my_cluster_id = "clus-my-" + unique_resource_id("-") + my_instance_id = "inst-my-" + UNIQUE_SUFFIX + my_cluster_id = "clus-my-" + UNIQUE_SUFFIX location_id = "us-central1-f" serve_nodes = 3 storage_type = enums.StorageType.SSD @@ -115,15 +121,15 @@ def test_bigtable_create_instance(): ) operation = instance.create(clusters=[cluster]) - # Make sure this instance gets deleted after the test case. - INSTANCES_TO_DELETE.append(instance) - # We want to make sure the operation completes. operation.result(timeout=100) + # [END bigtable_create_prod_instance] - assert instance.exists() - instance.delete() + try: + assert instance.exists() + finally: + retry_429(instance.delete)() def test_bigtable_create_additional_cluster(): @@ -139,7 +145,7 @@ def test_bigtable_create_additional_cluster(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) - cluster_id = "clus-my-" + unique_resource_id("-") + cluster_id = "clus-my-" + UNIQUE_SUFFIX location_id = "us-central1-a" serve_nodes = 3 storage_type = enums.StorageType.SSD @@ -154,9 +160,11 @@ def test_bigtable_create_additional_cluster(): # We want to make sure the operation completes. operation.result(timeout=100) # [END bigtable_create_cluster] - assert cluster.exists() - cluster.delete() + try: + assert cluster.exists() + finally: + retry_429(cluster.delete)() def test_bigtable_create_app_profile(): @@ -166,7 +174,7 @@ def test_bigtable_create_app_profile(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) - app_profile_id = "app-prof-" + unique_resource_id("-") + app_profile_id = "app-prof-" + UNIQUE_SUFFIX description = "routing policy-multy" routing_policy_type = enums.RoutingPolicyType.ANY @@ -179,9 +187,11 @@ def test_bigtable_create_app_profile(): app_profile = app_profile.create(ignore_warnings=True) # [END bigtable_create_app_profile] - assert app_profile.exists() - app_profile.delete(ignore_warnings=True) + try: + assert app_profile.exists() + finally: + retry_429(app_profile.delete)(ignore_warnings=True) def test_bigtable_list_instances(): @@ -191,6 +201,7 @@ def test_bigtable_list_instances(): client = Client(admin=True) (instances_list, failed_locations_list) = client.list_instances() # [END bigtable_list_instances] + assert len(instances_list) > 0 @@ -202,6 +213,7 @@ def test_bigtable_list_clusters_on_instance(): instance = client.instance(INSTANCE_ID) (clusters_list, failed_locations_list) = instance.list_clusters() # [END bigtable_list_clusters_on_instance] + assert len(clusters_list) > 0 @@ -212,12 +224,13 @@ def test_bigtable_list_clusters_in_project(): client = Client(admin=True) (clusters_list, failed_locations_list) = client.list_clusters() # [END bigtable_list_clusters_in_project] + assert len(clusters_list) > 0 def test_bigtable_list_app_profiles(): app_profile = Config.INSTANCE.app_profile( - app_profile_id="app-prof-" + unique_resource_id("-"), + app_profile_id="app-prof-" + UNIQUE_SUFFIX, routing_policy_type=enums.RoutingPolicyType.ANY, ) app_profile = app_profile.create(ignore_warnings=True) @@ -230,7 +243,11 @@ def test_bigtable_list_app_profiles(): app_profiles_list = instance.list_app_profiles() # [END bigtable_list_app_profiles] - assert len(app_profiles_list) > 0 + + try: + assert len(app_profiles_list) > 0 + finally: + retry_429(app_profile.delete)(ignore_warnings=True) def test_bigtable_instance_exists(): @@ -241,6 +258,7 @@ def test_bigtable_instance_exists(): instance = client.instance(INSTANCE_ID) instance_exists = instance.exists() # [END bigtable_check_instance_exists] + assert instance_exists @@ -253,6 +271,7 @@ def test_bigtable_cluster_exists(): cluster = instance.cluster(CLUSTER_ID) cluster_exists = cluster.exists() # [END bigtable_check_cluster_exists] + assert cluster_exists @@ -264,6 +283,7 @@ def test_bigtable_reload_instance(): instance = client.instance(INSTANCE_ID) instance.reload() # [END bigtable_reload_instance] + assert instance.type_ == PRODUCTION.value @@ -276,6 +296,7 @@ def test_bigtable_reload_cluster(): cluster = instance.cluster(CLUSTER_ID) cluster.reload() # [END bigtable_reload_cluster] + assert cluster.serve_nodes == SERVER_NODES @@ -289,10 +310,8 @@ def test_bigtable_update_instance(): instance.display_name = display_name instance.update() # [END bigtable_update_instance] - assert instance.display_name == display_name - # Make sure this instance gets deleted after the test case. - INSTANCES_TO_DELETE.append(instance) + assert instance.display_name == display_name def test_bigtable_update_cluster(): @@ -305,6 +324,7 @@ def test_bigtable_update_cluster(): cluster.serve_nodes = 4 cluster.update() # [END bigtable_update_cluster] + assert cluster.serve_nodes == 4 @@ -320,10 +340,23 @@ def test_bigtable_create_table(): max_versions_rule = column_family.MaxVersionsGCRule(2) table.create(column_families={"cf1": max_versions_rule}) # [END bigtable_create_table] - assert table.exists() + + try: + assert table.exists() + finally: + retry_429(table.delete)() def test_bigtable_list_tables(): + from google.cloud.bigtable import Client + from google.cloud.bigtable import column_family + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table("to_list") + max_versions_rule = column_family.MaxVersionsGCRule(2) + table.create(column_families={"cf1": max_versions_rule}) + # [START bigtable_list_tables] from google.cloud.bigtable import Client @@ -331,7 +364,12 @@ def test_bigtable_list_tables(): instance = client.instance(INSTANCE_ID) tables_list = instance.list_tables() # [END bigtable_list_tables] - assert len(tables_list) > 0 + + table_names = [table.name for table in tables_list] + try: + assert table.name in table_names + finally: + retry_429(table.delete)() def test_bigtable_delete_cluster(): @@ -339,7 +377,7 @@ def test_bigtable_delete_cluster(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) - cluster_id = "clus-my-" + unique_resource_id("-") + cluster_id = "clus-my-" + UNIQUE_SUFFIX cluster = instance.cluster( cluster_id, location_id=ALT_LOCATION_ID, @@ -359,6 +397,7 @@ def test_bigtable_delete_cluster(): cluster_to_delete.delete() # [END bigtable_delete_cluster] + assert not cluster_to_delete.exists() @@ -376,12 +415,13 @@ def test_bigtable_delete_instance(): ) operation = instance.create(clusters=[cluster]) - # Make sure this instance gets deleted after the test case. - INSTANCES_TO_DELETE.append(instance) # We want to make sure the operation completes. operation.result(timeout=100) + # Make sure this instance gets deleted after the test case. + INSTANCES_TO_DELETE.append(instance) + # [START bigtable_delete_instance] from google.cloud.bigtable import Client @@ -394,6 +434,9 @@ def test_bigtable_delete_instance(): assert not instance_to_delete.exists() + # Skip deleting it during module teardown if the assertion succeeds. + INSTANCES_TO_DELETE.remove(instance) + def test_bigtable_test_iam_permissions(): # [START bigtable_test_iam_permissions] @@ -449,10 +492,6 @@ def test_bigtable_project_path(): project_path = client.project_path # [END bigtable_project_path] - _project_path = r"^projects/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" - _project_path_re = re.compile(_project_path) - assert _project_path_re.match(project_path) - def test_bigtable_table_data_client(): # [START bigtable_table_data_client] @@ -461,8 +500,6 @@ def test_bigtable_table_data_client(): client = Client(admin=True) table_data_client = client.table_data_client # [END bigtable_table_data_client] - assert "BigtableClient" in str(table_data_client) - def test_bigtable_table_admin_client(): # [START bigtable_table_admin_client] @@ -471,7 +508,6 @@ def test_bigtable_table_admin_client(): client = Client(admin=True) table_admin_client = client.table_admin_client # [END bigtable_table_admin_client] - assert "BigtableTableAdmin" in str(table_admin_client) def test_bigtable_instance_admin_client(): @@ -481,7 +517,6 @@ def test_bigtable_instance_admin_client(): client = Client(admin=True) instance_admin_client = client.instance_admin_client # [END bigtable_instance_admin_client] - assert "BigtableInstanceAdmin" in str(instance_admin_client) def test_bigtable_admins_policy(): @@ -579,13 +614,6 @@ def test_bigtable_instance_name(): instance_name = instance.name # [END bigtable_instance_name] - _instance_name_re = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P" - r"[a-z][-a-z0-9]*)$" - ) - assert _instance_name_re.match(instance_name) - def test_bigtable_cluster_name(): import re @@ -599,15 +627,6 @@ def test_bigtable_cluster_name(): cluster_name = cluster.name # [END bigtable_cluster_name] - _cluster_name_re = re.compile( - r"^projects/(?P[^/]+)/" - r"instances/(?P[^/]+)/" - r"clusters/(?P" - r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" - ) - - assert _cluster_name_re.match(cluster_name) - def test_bigtable_instance_from_pb(): # [START bigtable_instance_from_pb] @@ -624,6 +643,7 @@ def test_bigtable_instance_from_pb(): instance2 = instance.from_pb(instance_pb, client) # [END bigtable_instance_from_pb] + assert instance2.name == instance.name @@ -648,6 +668,7 @@ def test_bigtable_cluster_from_pb(): cluster2 = cluster.from_pb(cluster_pb, instance) # [END bigtable_cluster_from_pb] + assert cluster2.name == cluster.name @@ -659,6 +680,7 @@ def test_bigtable_instance_state(): instance = client.instance(INSTANCE_ID) instance_state = instance.state # [END bigtable_instance_state] + assert not instance_state diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 37d6bbd395eb..a97996abca2f 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -39,9 +39,10 @@ from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id +UNIQUE_SUFFIX = unique_resource_id("-") LOCATION_ID = "us-central1-c" -INSTANCE_ID = "g-c-p" + unique_resource_id("-") -INSTANCE_ID_DATA = "g-c-p-d" + unique_resource_id("-") +INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX +INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX TABLE_ID = "google-cloud-python-test-table" CLUSTER_ID = INSTANCE_ID + "-cluster" CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" @@ -125,10 +126,10 @@ def setUpModule(): EXISTING_INSTANCES[:] = instances # After listing, create the test instances. - created_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) - created_op.result(timeout=10) - created_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) - created_op.result(timeout=10) + admin_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) + admin_op.result(timeout=10) + data_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) + data_op.result(timeout=10) def tearDownModule(): @@ -140,7 +141,7 @@ def tearDownModule(): class TestInstanceAdminAPI(unittest.TestCase): def setUp(self): if Config.IN_EMULATOR: - self.skipTest("Instance Admin API not supported in Bigtable emulator") + self.skipTest("Instance Admin API not supported in emulator") self.instances_to_delete = [] def tearDown(self): @@ -172,7 +173,7 @@ def test_reload(self): def test_create_instance_defaults(self): from google.cloud.bigtable import enums - ALT_INSTANCE_ID = "ndef" + unique_resource_id("-") + ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" cluster = instance.cluster( @@ -200,9 +201,8 @@ def test_create_instance(self): from google.cloud.bigtable import enums _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT - _STATE = enums.Instance.State.READY - ALT_INSTANCE_ID = "new" + unique_resource_id("-") + ALT_INSTANCE_ID = "new" + UNIQUE_SUFFIX instance = Config.CLIENT.instance( ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS ) @@ -224,7 +224,7 @@ def test_create_instance(self): self.assertEqual(instance.display_name, instance_alt.display_name) self.assertEqual(instance.type_, instance_alt.type_) self.assertEqual(instance_alt.labels, LABELS) - self.assertEqual(_STATE, instance_alt.state) + self.assertEqual(instance_alt.state, enums.Instance.State.READY) def test_cluster_exists(self): NONEXISTING_CLUSTER_ID = "cluster-id" @@ -246,7 +246,7 @@ def test_create_instance_w_two_clusters(self): from google.cloud.bigtable.table import ClusterState _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif" + unique_resource_id("-") + ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX instance = Config.CLIENT.instance( ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS ) @@ -273,7 +273,7 @@ def test_create_instance_w_two_clusters(self): self.instances_to_delete.append(instance) # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) @@ -466,11 +466,12 @@ def test_update_type(self): _DEVELOPMENT = Instance.Type.DEVELOPMENT _PRODUCTION = Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "ndif" + unique_resource_id("-") + ALT_INSTANCE_ID = "ndif" + UNIQUE_SUFFIX instance = Config.CLIENT.instance( ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS ) operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) + # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) @@ -530,7 +531,7 @@ def test_create_cluster(self): operation = cluster_2.create() # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Create a new object instance, reload and make sure it is the same. alt_cluster = Config.INSTANCE.cluster(ALT_CLUSTER_ID) From 484c8df1aeb827adaf895e1cb51d672cc61b2863 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 20 Jun 2019 13:27:15 -0400 Subject: [PATCH 258/892] Force timeout for table creation to 90 seconds (in systests). (#8450) Closes #5928. --- .../google-cloud-bigtable/docs/snippets.py | 2 +- .../google-cloud-bigtable/tests/system.py | 23 +++++++++++++------ 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index fa67220ad9b7..458b0411605f 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -415,7 +415,6 @@ def test_bigtable_delete_instance(): ) operation = instance.create(clusters=[cluster]) - # We want to make sure the operation completes. operation.result(timeout=100) @@ -501,6 +500,7 @@ def test_bigtable_table_data_client(): table_data_client = client.table_data_client # [END bigtable_table_data_client] + def test_bigtable_table_admin_client(): # [START bigtable_table_admin_client] from google.cloud.bigtable import Client diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index a97996abca2f..28d95d985ffe 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -15,10 +15,15 @@ import datetime import operator import os - import unittest from google.api_core.exceptions import TooManyRequests +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from test_utils.retry import RetryErrors +from test_utils.retry import RetryResult +from test_utils.system import EmulatorCreds +from test_utils.system import unique_resource_id + from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import UTC @@ -30,14 +35,11 @@ from google.cloud.bigtable.row_filters import RowFilterUnion from google.cloud.bigtable.row_data import Cell from google.cloud.bigtable.row_data import PartialRowData -from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange - -from test_utils.retry import RetryErrors -from test_utils.retry import RetryResult -from test_utils.system import EmulatorCreds -from test_utils.system import unique_resource_id +from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_table_admin_client_config as table_admin_config, +) UNIQUE_SUFFIX = unique_resource_id("-") LOCATION_ID = "us-central1-c" @@ -97,6 +99,13 @@ def setUpModule(): from google.cloud.exceptions import GrpcRendezvous from google.cloud.bigtable.enums import Instance + # See: https://github.com/googleapis/google-cloud-python/issues/5928 + interfaces = table_admin_config.config["interfaces"] + iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] + methods = iface_config["methods"] + create_table = methods["CreateTable"] + create_table["timeout_millis"] = 90000 + Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None if Config.IN_EMULATOR: From eb6fb8331a10f11edf45053848ffde6f6335144d Mon Sep 17 00:00:00 2001 From: Maxim Factourovich <38331387+mf2199@users.noreply.github.com> Date: Thu, 20 Jun 2019 21:05:26 +0300 Subject: [PATCH 259/892] Add 'PartialRowsData.cancel'. (#8176) Closes #7760. --- .../google/cloud/bigtable/row_data.py | 8 +++++++- .../google-cloud-bigtable/tests/unit/test_row_data.py | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index aeb932243b42..c2335e9173f0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -400,6 +400,9 @@ def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): self.rows = {} self._state = self.STATE_NEW_ROW + # Flag to stop iteration, for any reason not related to self.retry() + self._cancelled = False + @property def state(self): """State machine state. @@ -412,6 +415,7 @@ def state(self): def cancel(self): """Cancels the iterator, closing the stream.""" + self._cancelled = True self.response_iterator.cancel() def consume_all(self, max_loops=None): @@ -460,7 +464,7 @@ def __iter__(self): Parse the response and its chunks into a new/existing row in :attr:`_rows`. Rows are returned in order by row key. """ - while True: + while not self._cancelled: try: response = self._read_next_response() except StopIteration: @@ -469,6 +473,8 @@ def __iter__(self): break for chunk in response.chunks: + if self._cancelled: + break self._process_chunk(chunk) if chunk.commit_row: self.last_scanned_row_key = self._previous_row.row_key diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 4aeb9e7b58da..b787233829b2 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -529,6 +529,7 @@ def test_cancel(self): self.assertEqual(response_iterator.cancel_calls, 0) yield_rows_data.cancel() self.assertEqual(response_iterator.cancel_calls, 1) + self.assertEqual(list(yield_rows_data), []) # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' From bdeb3126ec5e7dba185eb821d6a918fc87e207e8 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 24 Jun 2019 08:14:37 -0700 Subject: [PATCH 260/892] Allow kwargs to be passed to create_channel (via synth). (#8458) --- .../bigtable_instance_admin_grpc_transport.py | 6 ++++-- .../transports/bigtable_table_admin_grpc_transport.py | 5 ++++- .../gapic/transports/bigtable_grpc_transport.py | 7 ++++++- packages/google-cloud-bigtable/synth.metadata | 10 +++++----- 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index 15321e8723dd..f1fa71abc5dc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -90,7 +90,7 @@ def __init__( @classmethod def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None + cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs ): """Create and return a gRPC channel object. @@ -101,12 +101,14 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. + kwargs (dict): Keyword arguments, which are passed to the + channel creation. Returns: grpc.Channel: A gRPC channel object. """ return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES + address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs ) @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 38d075ad66db..6c882d5bea80 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -90,7 +90,7 @@ def __init__( @classmethod def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None + cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs ): """Create and return a gRPC channel object. @@ -101,6 +101,8 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. + kwargs (dict): Keyword arguments, which are passed to the + channel creation. Returns: grpc.Channel: A gRPC channel object. @@ -113,6 +115,7 @@ def create_channel( "grpc.max_send_message_length": -1, "grpc.max_receive_message_length": -1, }.items(), + **kwargs ) @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index 70dace995fec..145dd269f938 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -74,7 +74,9 @@ def __init__( self._stubs = {"bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel)} @classmethod - def create_channel(cls, address="bigtable.googleapis.com:443", credentials=None): + def create_channel( + cls, address="bigtable.googleapis.com:443", credentials=None, **kwargs + ): """Create and return a gRPC channel object. Args: @@ -84,6 +86,8 @@ def create_channel(cls, address="bigtable.googleapis.com:443", credentials=None) credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. + kwargs (dict): Keyword arguments, which are passed to the + channel creation. Returns: grpc.Channel: A gRPC channel object. @@ -96,6 +100,7 @@ def create_channel(cls, address="bigtable.googleapis.com:443", credentials=None) "grpc.max_send_message_length": -1, "grpc.max_receive_message_length": -1, }.items(), + **kwargs ) @property diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 7aba65b27091..4e173e27ec69 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-06-15T12:13:26.869854Z", + "updateTime": "2019-06-21T12:13:56.910269Z", "sources": [ { "generator": { "name": "artman", - "version": "0.26.0", - "dockerImage": "googleapis/artman@sha256:6db0735b0d3beec5b887153a2a7c7411fc7bb53f73f6f389a822096bd14a3a15" + "version": "0.29.0", + "dockerImage": "googleapis/artman@sha256:b79c8c20ee51e5302686c9d1294672d59290df1489be93749ef17d0172cc508d" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "7b58b37559f6a5337c4c564518e9573d742df225", - "internalRef": "253322136" + "sha": "c9546320bb83441a5a49b13a22d5552eef352105", + "internalRef": "254331898" } }, { From 624dce695a7c5db04b24e47adf3c7c56f091ed3b Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Tue, 25 Jun 2019 12:44:16 -0700 Subject: [PATCH 261/892] All: Add docs job to publish to googleapis.dev. (#8464) --- packages/google-cloud-bigtable/.repo-metadata.json | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 packages/google-cloud-bigtable/.repo-metadata.json diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json new file mode 100644 index 000000000000..956c74b53395 --- /dev/null +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -0,0 +1,13 @@ +{ + "name": "bigtable", + "name_pretty": "Cloud Bigtable", + "product_documentation": "https://cloud.google.com/bigtable", + "client_documentation": "https://googleapis.dev/python/bigtable/latest", + "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", + "release_level": "ga", + "language": "python", + "repo": "googleapis/google-cloud-python", + "distribution_name": "google-cloud-bigtable", + "api_id": "bigtable.googleapis.com", + "requires_billing": true +} \ No newline at end of file From fc2de3b7d13c90af2007136ee80c6766a216a7ac Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Thu, 27 Jun 2019 14:29:17 -0700 Subject: [PATCH 262/892] Add nox session 'docs' to remaining manual clients. (#8478) --- .../google-cloud-bigtable/docs/README.rst | 1 + .../docs/client-intro.rst | 2 +- packages/google-cloud-bigtable/docs/conf.py | 47 +++++++++++++++++-- packages/google-cloud-bigtable/docs/index.rst | 2 +- .../google/cloud/bigtable/row_data.py | 2 +- packages/google-cloud-bigtable/noxfile.py | 21 +++++++++ 6 files changed, 67 insertions(+), 8 deletions(-) create mode 120000 packages/google-cloud-bigtable/docs/README.rst diff --git a/packages/google-cloud-bigtable/docs/README.rst b/packages/google-cloud-bigtable/docs/README.rst new file mode 120000 index 000000000000..89a0106941ff --- /dev/null +++ b/packages/google-cloud-bigtable/docs/README.rst @@ -0,0 +1 @@ +../README.rst \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/client-intro.rst b/packages/google-cloud-bigtable/docs/client-intro.rst index cb31767f3c26..6a38437790e2 100644 --- a/packages/google-cloud-bigtable/docs/client-intro.rst +++ b/packages/google-cloud-bigtable/docs/client-intro.rst @@ -23,7 +23,7 @@ Configuration ------------- - For an overview of authentication in ``google-cloud-python``, - see :doc:`/core/auth`. + see `Authentication `_. - In addition to any authentication configuration, you can also set the :envvar:`GOOGLE_CLOUD_PROJECT` environment variable for the Google Cloud Console diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 6fd3c053d056..c4a36d62aaef 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -25,7 +25,7 @@ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' +needs_sphinx = "1.6.3" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -36,6 +36,7 @@ "sphinx.ext.intersphinx", "sphinx.ext.coverage", "sphinx.ext.napoleon", + "sphinx.ext.todo", "sphinx.ext.viewcode", ] @@ -44,13 +45,18 @@ autodoc_default_flags = ["members"] autosummary_generate = True + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = ".rst" +source_suffix = [".rst", ".md"] # The encoding of source files. # source_encoding = 'utf-8-sig' @@ -116,6 +122,7 @@ # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True + # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for @@ -125,7 +132,15 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -# html_theme_options = {} +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] @@ -214,6 +229,18 @@ # Output file base name for HTML help builder. htmlhelp_basename = "google-cloud-bigtable-doc" +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + # -- Options for LaTeX output --------------------------------------------- latex_elements = { @@ -260,6 +287,7 @@ # If false, no module index is generated. # latex_domain_indices = True + # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples @@ -277,6 +305,7 @@ # If true, show URL addresses after external links. # man_show_urls = False + # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples @@ -289,7 +318,7 @@ u"google-cloud-bigtable Documentation", author, "google-cloud-bigtable", - "GAPIC library for the {metadata.shortName} v2 service", + "GAPIC library for Bigtable", "APIs", ) ] @@ -306,12 +335,20 @@ # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False + # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google.api_core": ( + "https://googleapis.github.io/google-cloud-python/latest", + None, + ), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://docs.python-requests.org/en/master/", None), } + # Napoleon settings napoleon_google_docstring = True napoleon_numpy_docstring = True diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 89277952bf29..8c76f79b80e1 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -1,4 +1,4 @@ -.. include:: /../bigtable/README.rst +.. include:: README.rst Using the API diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index c2335e9173f0..093181cbaac9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -458,7 +458,7 @@ def _read_next_response(self): return self.retry(self._read_next, on_error=self._on_error)() def __iter__(self): - """Consume the ``ReadRowsResponse``s from the stream. + """Consume the ``ReadRowsResponse`` s from the stream. Read the rows and yield each to the reader Parse the response and its chunks into a new/existing row in diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index a49d614b3e07..00c2b4793529 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -16,6 +16,7 @@ from __future__ import absolute_import import os +import shutil import nox @@ -135,6 +136,26 @@ def cover(session): session.run("coverage", "erase") +@nox.session(python="3.7") +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) @nox.session(python=['2.7', '3.7']) def snippets(session): From 22b321441816caa329047fe73476624a455a5f2d Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 28 Jun 2019 09:03:59 -0700 Subject: [PATCH 263/892] Add 'client_options' support, update list method docstrings (via synth). (#8500) --- .../gapic/bigtable_instance_admin_client.py | 25 +++++++++++--- .../gapic/bigtable_table_admin_client.py | 33 ++++++++++++++----- .../bigtable_v2/gapic/bigtable_client.py | 17 +++++++++- packages/google-cloud-bigtable/synth.metadata | 10 +++--- 4 files changed, 65 insertions(+), 20 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 608c50863d0e..b2f84dff38c9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -21,6 +21,7 @@ import warnings from google.oauth2 import service_account +import google.api_core.client_options import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method @@ -136,6 +137,7 @@ def __init__( credentials=None, client_config=None, client_info=None, + client_options=None, ): """Constructor. @@ -166,6 +168,9 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + client_options (Union[dict, google.api_core.client_options.ClientOptions]): + Client options used to set user options on the client. API Endpoint + should be set through client_options. """ # Raise deprecation warnings for things we want to go away. if client_config is not None: @@ -184,6 +189,15 @@ def __init__( stacklevel=2, ) + api_endpoint = self.SERVICE_ADDRESS + if client_options: + if type(client_options) == dict: + client_options = google.api_core.client_options.from_dict( + client_options + ) + if client_options.api_endpoint: + api_endpoint = client_options.api_endpoint + # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. @@ -192,6 +206,7 @@ def __init__( self.transport = transport( credentials=credentials, default_class=bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, + address=api_endpoint, ) else: if credentials: @@ -202,7 +217,7 @@ def __init__( self.transport = transport else: self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials ) if client_info is None: @@ -1392,10 +1407,10 @@ def list_app_profiles( that is provided to the method. Returns: - A :class:`~google.gax.PageIterator` instance. By default, this - is an iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. - This object can also be configured to iterate over the pages - of the response through the `options` parameter. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index f2a7b983deaa..57734d90cef5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -21,6 +21,7 @@ import warnings from google.oauth2 import service_account +import google.api_core.client_options import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method @@ -138,6 +139,7 @@ def __init__( credentials=None, client_config=None, client_info=None, + client_options=None, ): """Constructor. @@ -168,6 +170,9 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + client_options (Union[dict, google.api_core.client_options.ClientOptions]): + Client options used to set user options on the client. API Endpoint + should be set through client_options. """ # Raise deprecation warnings for things we want to go away. if client_config is not None: @@ -186,6 +191,15 @@ def __init__( stacklevel=2, ) + api_endpoint = self.SERVICE_ADDRESS + if client_options: + if type(client_options) == dict: + client_options = google.api_core.client_options.from_dict( + client_options + ) + if client_options.api_endpoint: + api_endpoint = client_options.api_endpoint + # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. @@ -194,6 +208,7 @@ def __init__( self.transport = transport( credentials=credentials, default_class=bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, + address=api_endpoint, ) else: if credentials: @@ -204,7 +219,7 @@ def __init__( self.transport = transport else: self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials ) if client_info is None: @@ -498,10 +513,10 @@ def list_tables( that is provided to the method. Returns: - A :class:`~google.gax.PageIterator` instance. By default, this - is an iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. - This object can also be configured to iterate over the pages - of the response through the `options` parameter. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1282,10 +1297,10 @@ def list_snapshots( that is provided to the method. Returns: - A :class:`~google.gax.PageIterator` instance. By default, this - is an iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. - This object can also be configured to iterate over the pages - of the response through the `options` parameter. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index f588203e565e..8b9a8dedccfb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -20,6 +20,7 @@ import warnings from google.oauth2 import service_account +import google.api_core.client_options import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method @@ -85,6 +86,7 @@ def __init__( credentials=None, client_config=None, client_info=None, + client_options=None, ): """Constructor. @@ -115,6 +117,9 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + client_options (Union[dict, google.api_core.client_options.ClientOptions]): + Client options used to set user options on the client. API Endpoint + should be set through client_options. """ # Raise deprecation warnings for things we want to go away. if client_config is not None: @@ -133,6 +138,15 @@ def __init__( stacklevel=2, ) + api_endpoint = self.SERVICE_ADDRESS + if client_options: + if type(client_options) == dict: + client_options = google.api_core.client_options.from_dict( + client_options + ) + if client_options.api_endpoint: + api_endpoint = client_options.api_endpoint + # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. @@ -141,6 +155,7 @@ def __init__( self.transport = transport( credentials=credentials, default_class=bigtable_grpc_transport.BigtableGrpcTransport, + address=api_endpoint, ) else: if credentials: @@ -151,7 +166,7 @@ def __init__( self.transport = transport else: self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials ) if client_info is None: diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 4e173e27ec69..655414469389 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-06-21T12:13:56.910269Z", + "updateTime": "2019-06-28T12:14:28.794480Z", "sources": [ { "generator": { "name": "artman", - "version": "0.29.0", - "dockerImage": "googleapis/artman@sha256:b79c8c20ee51e5302686c9d1294672d59290df1489be93749ef17d0172cc508d" + "version": "0.29.2", + "dockerImage": "googleapis/artman@sha256:45263333b058a4b3c26a8b7680a2710f43eae3d250f791a6cb66423991dcb2df" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c9546320bb83441a5a49b13a22d5552eef352105", - "internalRef": "254331898" + "sha": "84c8ad4e52f8eec8f08a60636cfa597b86969b5c", + "internalRef": "255474859" } }, { From 6dbc209c31fe5acf4fa45da22fd2a67848222a22 Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Tue, 2 Jul 2019 01:47:14 +0530 Subject: [PATCH 264/892] Bigtable appprofile snippets (#7033) * add conditional row snippets * add snippets * add app profile snippets * Implement pending review comments --- .../google-cloud-bigtable/docs/snippets.py | 81 +++++++++++++++++-- .../google/cloud/bigtable/app_profile.py | 46 ++++++++--- 2 files changed, 108 insertions(+), 19 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 458b0411605f..7f4071b41fd8 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -44,6 +44,8 @@ UNIQUE_SUFFIX = unique_resource_id("-") INSTANCE_ID = "snippet-tests" + UNIQUE_SUFFIX CLUSTER_ID = "clus-1-" + UNIQUE_SUFFIX +APP_PROFILE_ID = "app-prof" + UNIQUE_SUFFIX +ROUTING_POLICY_TYPE = enums.RoutingPolicyType.ANY LOCATION_ID = "us-central1-f" ALT_LOCATION_ID = "us-central1-a" PRODUCTION = enums.Instance.Type.PRODUCTION @@ -167,19 +169,22 @@ def test_bigtable_create_additional_cluster(): retry_429(cluster.delete)() -def test_bigtable_create_app_profile(): +def test_bigtable_create_reload_delete_app_profile(): + import re + # [START bigtable_create_app_profile] from google.cloud.bigtable import Client + from google.cloud.bigtable import enums + + routing_policy_type = enums.RoutingPolicyType.ANY client = Client(admin=True) instance = client.instance(INSTANCE_ID) - app_profile_id = "app-prof-" + UNIQUE_SUFFIX description = "routing policy-multy" - routing_policy_type = enums.RoutingPolicyType.ANY app_profile = instance.app_profile( - app_profile_id=app_profile_id, + app_profile_id=APP_PROFILE_ID, routing_policy_type=routing_policy_type, description=description, cluster_id=CLUSTER_ID, @@ -188,10 +193,70 @@ def test_bigtable_create_app_profile(): app_profile = app_profile.create(ignore_warnings=True) # [END bigtable_create_app_profile] - try: - assert app_profile.exists() - finally: - retry_429(app_profile.delete)(ignore_warnings=True) + # [START bigtable_app_profile_name] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = instance.app_profile(APP_PROFILE_ID) + + app_profile_name = app_profile.name + # [END bigtable_app_profile_name] + _profile_name_re = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[^/]+)/" + r"appProfiles/(?P" + r"[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" + ) + assert _profile_name_re.match(app_profile_name) + + # [START bigtable_app_profile_exists] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = instance.app_profile(APP_PROFILE_ID) + + app_profile_exists = app_profile.exists() + # [END bigtable_app_profile_exists] + assert app_profile_exists + + # [START bigtable_reload_app_profile] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = instance.app_profile(APP_PROFILE_ID) + + app_profile.reload() + # [END bigtable_reload_app_profile] + assert app_profile.routing_policy_type == ROUTING_POLICY_TYPE + + # [START bigtable_update_app_profile] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = instance.app_profile(APP_PROFILE_ID) + app_profile.reload() + + description = "My new app profile" + app_profile.description = description + app_profile.update() + # [END bigtable_update_app_profile] + assert app_profile.description == description + + # [START bigtable_delete_app_profile] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = instance.app_profile(APP_PROFILE_ID) + app_profile.reload() + + app_profile.delete(ignore_warnings=True) + # [END bigtable_delete_app_profile] + assert not app_profile.exists() def test_bigtable_list_instances(): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py index 44246e829551..cb04ebfc78c7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py @@ -90,6 +90,12 @@ def name(self): This property will not change if ``app_profile_id`` does not, but the return value is not cached. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_app_profile_name] + :end-before: [END bigtable_app_profile_name] + The AppProfile name is of the form ``"projects/../instances/../app_profile/{app_profile_id}"`` @@ -225,7 +231,14 @@ def _to_pb(self): return app_profile_pb def reload(self): - """Reload the metadata for this cluster""" + """Reload the metadata for this cluster + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_reload_app_profile] + :end-before: [END bigtable_reload_app_profile] + """ app_profile_pb = self.instance_admin_client.get_app_profile(self.name) @@ -236,6 +249,12 @@ def reload(self): def exists(self): """Check whether the AppProfile already exists. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_app_profile_exists] + :end-before: [END bigtable_app_profile_exists] + :rtype: bool :returns: True if the AppProfile exists, else False. """ @@ -256,17 +275,11 @@ def create(self, ignore_warnings=None): ``description``, ``cluster_id`` and ``allow_transactional_writes``. To change them before creating, reset the values via - .. code:: python - - app_profile.app_profile_id = 'i-changed-my-mind' - app_profile.routing_policy_type = ( - google.cloud.bigtable.enums.RoutingPolicyType.SINGLE - ) - app_profile.description = 'new-description' - app-profile.cluster_id = 'other-cluster-id' - app-profile.allow_transactional_writes = True + For example: - before calling :meth:`create`. + .. literalinclude:: snippets.py + :start-after: [START bigtable_create_app_profile] + :end-before: [END bigtable_create_app_profile] :type: ignore_warnings: bool :param: ignore_warnings: (Optional) If true, ignore safety checks when @@ -293,6 +306,11 @@ def update(self, ignore_warnings=None): ``cluster_id`` ``allow_transactional_writes`` + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_update_app_profile] + :end-before: [END bigtable_update_app_profile] """ update_mask_pb = field_mask_pb2.FieldMask() @@ -313,6 +331,12 @@ def update(self, ignore_warnings=None): def delete(self, ignore_warnings=None): """Delete this AppProfile. + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_delete_app_profile] + :end-before: [END bigtable_delete_app_profile] + :type: ignore_warnings: bool :param: ignore_warnings: If true, ignore safety checks when deleting the AppProfile. From 4be70a22336f3af16736d4ced71952cd6de55f15 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 11 Jul 2019 14:40:30 -0400 Subject: [PATCH 265/892] Update pin for 'grpc-google-iam-v1' to 0.12.3+. (#8647) For pubsub / kms, also update the import of the 'IAMPolicy' stub, which is no longer exported from the same location. Supersedes: #8639 Supersedes: #8640 Closes: #8574 Closes: #8576 Closes: #8577 Closes: #8585 Closes: #8587 Closes: #8591 Closes: #8594 Closes: #8595 Closes: #8598 --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 028a2030212b..b2dce3ca9767 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -31,7 +31,7 @@ dependencies = [ 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev', "google-cloud-core >= 1.0.0, < 2.0dev", - 'grpc-google-iam-v1 >= 0.11.4, < 0.12dev', + "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", ] extras = { } From e69c32da929895c5290fc069c084f30ffaa153dc Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 12 Jul 2019 09:51:59 -0700 Subject: [PATCH 266/892] Add 'options_' argument to clients' 'get_iam_policy' (via synth). (#8652) --- .../gapic/bigtable_instance_admin_client.py | 11 ++++++++++- .../gapic/bigtable_table_admin_client.py | 1 + .../google/cloud/bigtable_admin_v2/types.py | 2 ++ packages/google-cloud-bigtable/synth.metadata | 10 +++++----- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index b2f84dff38c9..56149bead294 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -42,6 +42,7 @@ from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import options_pb2 from google.iam.v1 import policy_pb2 from google.longrunning import operations_pb2 from google.protobuf import empty_pb2 @@ -1641,6 +1642,7 @@ def delete_app_profile( def get_iam_policy( self, resource, + options_=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -1661,6 +1663,11 @@ def get_iam_policy( Args: resource (str): REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. + options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to + ``GetIamPolicy``. This field is only used by Cloud IAM. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -1691,7 +1698,9 @@ def get_iam_policy( client_info=self._client_info, ) - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource, options=options_ + ) if metadata is None: metadata = [] metadata = list(metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 57734d90cef5..fe50d5d14bc4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -46,6 +46,7 @@ from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import options_pb2 from google.iam.v1 import policy_pb2 from google.longrunning import operations_pb2 from google.protobuf import duration_pb2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index b50402d9e38f..eb2d919856e8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -25,6 +25,7 @@ from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import options_pb2 from google.iam.v1 import policy_pb2 from google.longrunning import operations_pb2 from google.protobuf import any_pb2 @@ -38,6 +39,7 @@ _shared_modules = [ iam_policy_pb2, + options_pb2, policy_pb2, operations_pb2, any_pb2, diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 655414469389..d4e7bd8af5aa 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-06-28T12:14:28.794480Z", + "updateTime": "2019-07-12T12:14:23.307924Z", "sources": [ { "generator": { "name": "artman", - "version": "0.29.2", - "dockerImage": "googleapis/artman@sha256:45263333b058a4b3c26a8b7680a2710f43eae3d250f791a6cb66423991dcb2df" + "version": "0.29.4", + "dockerImage": "googleapis/artman@sha256:63f21e83cb92680b7001dc381069e962c9e6dee314fd8365ac554c07c89221fb" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "84c8ad4e52f8eec8f08a60636cfa597b86969b5c", - "internalRef": "255474859" + "sha": "47bd0c2ba33c28dd624a65dad382e02bb61d1618", + "internalRef": "257690259" } }, { From 89a15aa50a00b9217ff230c0563c601bd428a03f Mon Sep 17 00:00:00 2001 From: ylil93 Date: Mon, 15 Jul 2019 12:12:29 -0700 Subject: [PATCH 267/892] Add compatibility check badges to READMEs. (#8288) --- packages/google-cloud-bigtable/README.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 71c007324eae..f2255b780c84 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -1,7 +1,7 @@ Python Client for Google Cloud Bigtable ======================================= -|beta| |pypi| |versions| +|beta| |pypi| |versions| |compat_check_pypi| |compat_check_github| `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, @@ -16,6 +16,10 @@ Analytics, Maps, and Gmail. :target: https://pypi.org/project/google-cloud-bigtable/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg :target: https://pypi.org/project/google-cloud-bigtable/ +.. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=google-cloud-bigtable + :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=google-cloud-bigtable +.. |compat_check_github| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dbigtable + :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dbigtable .. _Google Cloud Bigtable: https://cloud.google.com/bigtable .. _Client Library Documentation: https://googleapis.github.io/google-cloud-python/latest/bigtable/usage.html .. _Product Documentation: https://cloud.google.com/bigtable/docs From 02a197a0ccc256d1739cddf1b98978fdf1dc39c7 Mon Sep 17 00:00:00 2001 From: Joar Wandborg Date: Wed, 17 Jul 2019 19:52:20 +0200 Subject: [PATCH 268/892] Add 'Cell.__repr__'. (#8683) --- .../google-cloud-bigtable/google/cloud/bigtable/row_data.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 093181cbaac9..24078b8496d8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -86,6 +86,11 @@ def __eq__(self, other): def __ne__(self, other): return not self == other + def __repr__(self): + return "<{name} value={value!r} timestamp={timestamp}>".format( + name=self.__class__.__name__, value=self.value, timestamp=self.timestamp + ) + class PartialCellData(object): """Representation of partial cell in a Google Cloud Bigtable Table. From b0697873d6755c7a4c2004ffeb2aee148e0705dd Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 19 Jul 2019 13:31:47 -0400 Subject: [PATCH 269/892] Bump minimum version for google-api-core to 1.14.0. (#8709) --- packages/google-cloud-bigtable/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b2dce3ca9767..ef504ae8aefb 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,7 +29,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = 'Development Status :: 4 - Beta' dependencies = [ - 'google-api-core[grpc] >= 1.6.0, < 2.0.0dev', + 'google-api-core[grpc] >= 1.14.0, < 2.0.0dev', "google-cloud-core >= 1.0.0, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", ] From 147a44d19dc4075a0a10cb135cc8cbf9bf595221 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Fri, 19 Jul 2019 14:45:47 -0700 Subject: [PATCH 270/892] Link to googleapis.dev documentation in READMEs. (#8705) --- packages/google-cloud-bigtable/README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index f2255b780c84..e0d1dc5654e7 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -21,7 +21,7 @@ Analytics, Maps, and Gmail. .. |compat_check_github| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dbigtable :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dbigtable .. _Google Cloud Bigtable: https://cloud.google.com/bigtable -.. _Client Library Documentation: https://googleapis.github.io/google-cloud-python/latest/bigtable/usage.html +.. _Client Library Documentation: https://googleapis.dev/python/bigtable/latest .. _Product Documentation: https://cloud.google.com/bigtable/docs Quick Start @@ -37,7 +37,7 @@ In order to use this library, you first need to go through the following steps: .. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project .. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project .. _Enable the Cloud Bigtable API.: https://cloud.google.com/bigtable -.. _Setup Authentication.: https://googleapis.github.io/google-cloud-python/latest/core/auth.html +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html Installation ~~~~~~~~~~~~ From cc06277f5dc68841cf1e6ab01100bb72d0d13f54 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 22 Jul 2019 10:39:00 -0700 Subject: [PATCH 271/892] Pick up changes to GAPIC client configuration (via synth). (#8724) --- .../bigtable_instance_admin_client_config.py | 64 +++++++++++------- .../bigtable_table_admin_client_config.py | 65 +++++++++++++------ .../gapic/bigtable_client_config.py | 54 ++++++++++----- packages/google-cloud-bigtable/synth.metadata | 10 +-- 4 files changed, 128 insertions(+), 65 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py index 355020d508b3..b2ec35e0146e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py @@ -3,114 +3,132 @@ "google.bigtable.admin.v2.BigtableInstanceAdmin": { "retry_codes": { "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": ["UNAVAILABLE"], + "non_idempotent": [], }, "retry_params": { - "default": { - "initial_retry_delay_millis": 5, + "idempotent_params": { + "initial_retry_delay_millis": 1000, "retry_delay_multiplier": 2.0, "max_retry_delay_millis": 60000, "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 60000, "total_timeout_millis": 600000, - } + }, + "non_idempotent_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000, + }, + "non_idempotent_heavy_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, }, "methods": { "CreateInstance": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_heavy_params", }, "GetInstance": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "ListInstances": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "UpdateInstance": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "PartialUpdateInstance": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "DeleteInstance": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "CreateCluster": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "GetCluster": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "ListClusters": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "UpdateCluster": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "DeleteCluster": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "CreateAppProfile": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "GetAppProfile": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "ListAppProfiles": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "UpdateAppProfile": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "DeleteAppProfile": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "GetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "SetIamPolicy": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "TestIamPermissions": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, }, } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index 4318e93365cb..1a3f59a06023 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -6,81 +6,108 @@ "non_idempotent": [], }, "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, + "idempotent_params": { + "initial_retry_delay_millis": 1000, + "retry_delay_multiplier": 2.0, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, + "max_rpc_timeout_millis": 60000, "total_timeout_millis": 600000, - } + }, + "non_idempotent_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000, + }, + "non_idempotent_heavy_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 300000, + }, + "drop_row_range_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 1.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 3600000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 3600000, + "total_timeout_millis": 3600000, + }, }, "methods": { "CreateTable": { "timeout_millis": 130000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_heavy_params", }, "CreateTableFromSnapshot": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "ListTables": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "GetTable": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "DeleteTable": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "ModifyColumnFamilies": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_heavy_params", }, "DropRowRange": { "timeout_millis": 900000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "drop_row_range_params", }, "GenerateConsistencyToken": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "CheckConsistency": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "SnapshotTable": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "GetSnapshot": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "ListSnapshots": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "DeleteSnapshot": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, }, } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py index 04a7a2572e6b..3096f33e0c68 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py @@ -6,55 +6,73 @@ "non_idempotent": [], }, "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, + "idempotent_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, "max_retry_delay_millis": 60000, "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, "total_timeout_millis": 600000, }, - "streaming": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, + "non_idempotent_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, "max_retry_delay_millis": 60000, "initial_rpc_timeout_millis": 20000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 3600000, + "total_timeout_millis": 20000, + }, + "read_rows_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 300000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 300000, + "total_timeout_millis": 43200000, + }, + "mutate_rows_params": { + "initial_retry_delay_millis": 10, + "retry_delay_multiplier": 2.0, + "max_retry_delay_millis": 60000, + "initial_rpc_timeout_millis": 60000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 600000, }, }, "methods": { "ReadRows": { - "timeout_millis": 3600000, + "timeout_millis": 43200000, "retry_codes_name": "idempotent", - "retry_params_name": "streaming", + "retry_params_name": "read_rows_params", }, "SampleRowKeys": { - "timeout_millis": 60000, + "timeout_millis": 20000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "MutateRow": { - "timeout_millis": 60000, + "timeout_millis": 20000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "idempotent_params", }, "MutateRows": { "timeout_millis": 60000, "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_params_name": "mutate_rows_params", }, "CheckAndMutateRow": { - "timeout_millis": 60000, + "timeout_millis": 20000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, "ReadModifyWriteRow": { - "timeout_millis": 60000, + "timeout_millis": 20000, "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_params_name": "non_idempotent_params", }, }, } diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index d4e7bd8af5aa..a53a33c7f0af 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-07-12T12:14:23.307924Z", + "updateTime": "2019-07-20T12:14:26.395003Z", "sources": [ { "generator": { "name": "artman", - "version": "0.29.4", - "dockerImage": "googleapis/artman@sha256:63f21e83cb92680b7001dc381069e962c9e6dee314fd8365ac554c07c89221fb" + "version": "0.30.1", + "dockerImage": "googleapis/artman@sha256:f1a2e851e5e012c59e1da4125480bb19878f86a4e7fac4f375f2e819956b5aa3" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "47bd0c2ba33c28dd624a65dad382e02bb61d1618", - "internalRef": "257690259" + "sha": "184ab77f4cee62332f8f9a689c70c9bea441f836", + "internalRef": "259048326" } }, { From a460f66da680996e2933a2a669021245ba8bbc6f Mon Sep 17 00:00:00 2001 From: Gurov Ilya Date: Fri, 26 Jul 2019 19:23:13 +0300 Subject: [PATCH 272/892] Separate row types to remove confusion around return types of 'row.commit'. (#8662) --- .../docs/snippets_table.py | 119 +++++++++++++++--- .../google/cloud/bigtable/table.py | 62 +++++++++ .../tests/unit/test_table.py | 69 ++++++---- 3 files changed, 206 insertions(+), 44 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index e52e9469143d..0fbb16bf74ad 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -403,11 +403,94 @@ def test_bigtable_table_row(): row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) row2_obj.commit() - actual_rows_keys = [] + written_row_keys = [] for row in table.read_rows(): - actual_rows_keys.append(row.row_key) + written_row_keys.append(row.row_key) - assert actual_rows_keys == row_keys + assert written_row_keys == row_keys + + table.truncate(timeout=300) + + +def test_bigtable_table_append_row(): + # [START bigtable_table_append_row] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_keys = [b"row_key_1", b"row_key_2"] + row1_obj = table.append_row(row_keys[0]) + row2_obj = table.append_row(row_keys[1]) + # [END bigtable_table_append_row] + + row1_obj.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row1_obj.commit() + row2_obj.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row2_obj.commit() + + written_row_keys = [] + for row in table.read_rows(): + written_row_keys.append(row.row_key) + + assert written_row_keys == row_keys + + table.truncate(timeout=300) + + +def test_bigtable_table_direct_row(): + # [START bigtable_table_direct_row] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_keys = [b"row_key_1", b"row_key_2"] + row1_obj = table.direct_row(row_keys[0]) + row2_obj = table.direct_row(row_keys[1]) + # [END bigtable_table_direct_row] + + row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row1_obj.commit() + row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + row2_obj.commit() + + written_row_keys = [] + for row in table.read_rows(): + written_row_keys.append(row.row_key) + + assert written_row_keys == row_keys + + table.truncate(timeout=300) + + +def test_bigtable_table_conditional_row(): + # [START bigtable_table_conditional_row] + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_filters import PassAllFilter + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_keys = [b"row_key_1", b"row_key_2"] + filter_ = PassAllFilter(True) + row1_obj = table.conditional_row(row_keys[0], filter_=filter_) + row2_obj = table.conditional_row(row_keys[1], filter_=filter_) + # [END bigtable_table_conditional_row] + + row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False) + row1_obj.commit() + row2_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False) + row2_obj.commit() + + written_row_keys = [] + for row in table.read_rows(): + written_row_keys.append(row.row_key) + + assert written_row_keys == row_keys table.truncate(timeout=300) @@ -910,10 +993,10 @@ def test_bigtable_row_delete(): row_obj = table_row_del.row(b"row_key_1") row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") row_obj.commit() - actual_rows_keys = [] + written_row_keys = [] for row in table_row_del.read_rows(): - actual_rows_keys.append(row.row_key) - assert actual_rows_keys == [b"row_key_1"] + written_row_keys.append(row.row_key) + assert written_row_keys == [b"row_key_1"] # [START bigtable_row_delete] from google.cloud.bigtable import Client @@ -929,10 +1012,10 @@ def test_bigtable_row_delete(): row_obj.commit() # [END bigtable_row_delete] - actual_rows_keys = [] + written_row_keys = [] for row in table.read_rows(): - actual_rows_keys.append(row.row_key) - assert len(actual_rows_keys) == 0 + written_row_keys.append(row.row_key) + assert len(written_row_keys) == 0 def test_bigtable_row_delete_cell(): @@ -942,10 +1025,10 @@ def test_bigtable_row_delete_cell(): row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) row_obj.commit() - actual_rows_keys = [] + written_row_keys = [] for row in table_row_del_cell.read_rows(): - actual_rows_keys.append(row.row_key) - assert actual_rows_keys == [row_key1] + written_row_keys.append(row.row_key) + assert written_row_keys == [row_key1] # [START bigtable_row_delete_cell] from google.cloud.bigtable import Client @@ -975,10 +1058,10 @@ def test_bigtable_row_delete_cells(): row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME2, CELL_VAL2) row_obj.commit() - actual_rows_keys = [] + written_row_keys = [] for row in table_row_del_cells.read_rows(): - actual_rows_keys.append(row.row_key) - assert actual_rows_keys == [row_key1] + written_row_keys.append(row.row_key) + assert written_row_keys == [row_key1] # [START bigtable_row_delete_cells] from google.cloud.bigtable import Client @@ -1075,11 +1158,11 @@ def test_bigtable_row_setcell_commit_rowkey(): row_obj.commit() # [END bigtable_row_commit] - actual_rows_keys = [] + written_row_keys = [] for row in table.read_rows(): - actual_rows_keys.append(row.row_key) + written_row_keys.append(row.row_key) - assert actual_rows_keys == [b"row_key_1", b"row_key_2"] + assert written_row_keys == [b"row_key_1", b"row_key_2"] # [START bigtable_row_row_key] from google.cloud.bigtable import Client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 8a58cd8b6632..4ced9fbde0c2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -190,6 +190,14 @@ def row(self, row_key, filter_=None, append=False): :raises: :class:`ValueError ` if both ``filter_`` and ``append`` are used. """ + warnings.warn( + "This method will be deprecated in future versions. Please " + "use Table.append_row(), Table.conditional_row() " + "and Table.direct_row() methods instead.", + PendingDeprecationWarning, + stacklevel=2, + ) + if append and filter_ is not None: raise ValueError("At most one of filter_ and append can be set") if append: @@ -199,6 +207,60 @@ def row(self, row_key, filter_=None, append=False): else: return DirectRow(row_key, self) + def append_row(self, row_key): + """Create a :class:`~google.cloud.bigtable.row.AppendRow` associated with this table. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_append_row] + :end-before: [END bigtable_table_append_row] + + Args: + row_key (bytes): The key for the row being created. + + Returns: + A row owned by this table. + """ + return AppendRow(row_key, self) + + def direct_row(self, row_key): + """Create a :class:`~google.cloud.bigtable.row.DirectRow` associated with this table. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_direct_row] + :end-before: [END bigtable_table_direct_row] + + Args: + row_key (bytes): The key for the row being created. + + Returns: + A row owned by this table. + """ + return DirectRow(row_key, self) + + def conditional_row(self, row_key, filter_): + """Create a :class:`~google.cloud.bigtable.row.ConditionalRow` associated with this table. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_conditional_row] + :end-before: [END bigtable_table_conditional_row] + + Args: + row_key (bytes): The key for the row being created. + + filter_ (:class:`.RowFilter`): (Optional) Filter to be used for + conditional mutations. See :class:`.ConditionalRow` for more details. + + Returns: + A row owned by this table. + """ + return ConditionalRow(row_key, self, filter_=filter_) + def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 25f468c730fe..495d8660d1f7 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -180,16 +180,19 @@ def test_constructor_wo_admin(self): self.assertIs(table._instance._client, client) self.assertEqual(table.name, self.TABLE_NAME) - def test_row_factory_direct(self): - from google.cloud.bigtable.row import DirectRow - - credentials = _make_credentials() + def _row_methods_helper(self): client = self._make_client( - project="project-id", credentials=credentials, admin=True + project="project-id", credentials=_make_credentials(), admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_one(self.TABLE_ID, instance) row_key = b"row_key" + return table, row_key + + def test_row_factory_direct(self): + from google.cloud.bigtable.row import DirectRow + + table, row_key = self._row_methods_helper() row = table.row(row_key) self.assertIsInstance(row, DirectRow) @@ -199,13 +202,7 @@ def test_row_factory_direct(self): def test_row_factory_conditional(self): from google.cloud.bigtable.row import ConditionalRow - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - row_key = b"row_key" + table, row_key = self._row_methods_helper() filter_ = object() row = table.row(row_key, filter_=filter_) @@ -216,28 +213,48 @@ def test_row_factory_conditional(self): def test_row_factory_append(self): from google.cloud.bigtable.row import AppendRow - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - row_key = b"row_key" + table, row_key = self._row_methods_helper() row = table.row(row_key, append=True) self.assertIsInstance(row, AppendRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) + def test_direct_row(self): + from google.cloud.bigtable.row import DirectRow + + table, row_key = self._row_methods_helper() + row = table.direct_row(row_key) + + self.assertIsInstance(row, DirectRow) + self.assertEqual(row._row_key, row_key) + self.assertEqual(row._table, table) + + def test_conditional_row(self): + from google.cloud.bigtable.row import ConditionalRow + + table, row_key = self._row_methods_helper() + filter_ = object() + row = table.conditional_row(row_key, filter_=filter_) + + self.assertIsInstance(row, ConditionalRow) + self.assertEqual(row._row_key, row_key) + self.assertEqual(row._table, table) + + def test_append_row(self): + from google.cloud.bigtable.row import AppendRow + + table, row_key = self._row_methods_helper() + row = table.append_row(row_key) + + self.assertIsInstance(row, AppendRow) + self.assertEqual(row._row_key, row_key) + self.assertEqual(row._table, table) + def test_row_factory_failure(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + table, row_key = self._row_methods_helper() with self.assertRaises(ValueError): - table.row(b"row_key", filter_=object(), append=True) + table.row(row_key, filter_=object(), append=True) def test___eq__(self): credentials = _make_credentials() From 3a8b898642e92d26fe7df834dafca061170f2406 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Mon, 29 Jul 2019 12:53:23 -0700 Subject: [PATCH 273/892] Update intersphinx mapping for requests. (#8805) --- packages/google-cloud-bigtable/docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index c4a36d62aaef..938f270a92df 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -345,7 +345,7 @@ None, ), "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://docs.python-requests.org/en/master/", None), + "requests": ("https://2.python-requests.org/en/master/", None), } From e03baf2ea9fb6f97da3b1b6dc7f62d7bdcf668a8 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 30 Jul 2019 13:48:46 -0400 Subject: [PATCH 274/892] Release 0.34.0 (#8827) --- packages/google-cloud-bigtable/CHANGELOG.md | 38 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 3f508045fe57..f6e28c23cd4e 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,44 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 0.34.0 + +07-30-2019 10:05 PDT + + +### Implementation Changes +- Pick up changes to GAPIC client configuration (via synth). ([#8724](https://github.com/googleapis/google-cloud-python/pull/8724)) +- Add `Cell.__repr__`. ([#8683](https://github.com/googleapis/google-cloud-python/pull/8683)) +- Increase timeout for app profile update operation. ([#8417](https://github.com/googleapis/google-cloud-python/pull/8417)) + +### New Features +- Add methods returning Separate row types to remove confusion around return types of `row.commit`. ([#8662](https://github.com/googleapis/google-cloud-python/pull/8662)) +- Add `options_` argument to clients' `get_iam_policy` (via synth). ([#8652](https://github.com/googleapis/google-cloud-python/pull/8652)) +- Add `client_options` support, update list method docstrings (via synth). ([#8500](https://github.com/googleapis/google-cloud-python/pull/8500)) + +### Dependencies +- Bump minimum version for google-api-core to 1.14.0. ([#8709](https://github.com/googleapis/google-cloud-python/pull/8709)) +- Update pin for `grpc-google-iam-v1` to 0.12.3+. ([#8647](https://github.com/googleapis/google-cloud-python/pull/8647)) +- Allow kwargs to be passed to `create_channel` (via synth). ([#8458](https://github.com/googleapis/google-cloud-python/pull/8458)) +- Add `PartialRowsData.cancel`. ([#8176](https://github.com/googleapis/google-cloud-python/pull/8176)) + +### Documentation +- Update intersphinx mapping for requests. ([#8805](https://github.com/googleapis/google-cloud-python/pull/8805)) +- Link to googleapis.dev documentation in READMEs. ([#8705](https://github.com/googleapis/google-cloud-python/pull/8705)) +- Add compatibility check badges to READMEs. ([#8288](https://github.com/googleapis/google-cloud-python/pull/8288)) +- Add snppets illustrating use of application profiles. ([#7033](https://github.com/googleapis/google-cloud-python/pull/7033)) + +### Internal / Testing Changes +- Add nox session `docs` to remaining manual clients. ([#8478](https://github.com/googleapis/google-cloud-python/pull/8478)) +- All: Add docs job to publish to googleapis.dev. ([#8464](https://github.com/googleapis/google-cloud-python/pull/8464)) +- Force timeout for table creation to 90 seconds (in systests). ([#8450](https://github.com/googleapis/google-cloud-python/pull/8450)) +- Plug systest / snippet instance leaks. ([#8416](https://github.com/googleapis/google-cloud-python/pull/8416)) +- Declare encoding as utf-8 in pb2 files (via synth). ([#8346](https://github.com/googleapis/google-cloud-python/pull/8346)) +- Add disclaimer to auto-generated template files (via synth). ([#8308](https://github.com/googleapis/google-cloud-python/pull/8308)) +- Fix coverage in `types.py` (via synth). ([#8149](https://github.com/googleapis/google-cloud-python/pull/8149)) +- Integrate docstring / formatting tweaks (via synth). ([#8138](https://github.com/googleapis/google-cloud-python/pull/8138)) +- Use alabaster theme everwhere. ([#8021](https://github.com/googleapis/google-cloud-python/pull/8021)) + ## 0.33.0 05-16-2019 11:51 PDT diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index ef504ae8aefb..29a1de12501f 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.33.0' +version = '0.34.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 7e1e5741b4c5c8671101ec0977cc907499fcfed2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 6 Aug 2019 14:39:08 -0400 Subject: [PATCH 275/892] Remove send/recv msg size limit (via synth). (#8979) Closes #8946. --- .../gapic/bigtable_instance_admin_client.py | 76 +++++++++---------- .../gapic/bigtable_table_admin_client.py | 52 ++++++------- .../bigtable_instance_admin_grpc_transport.py | 9 ++- .../bigtable_table_admin_grpc_transport.py | 18 ++--- .../bigtable_v2/gapic/bigtable_client.py | 24 +++--- .../transports/bigtable_grpc_transport.py | 18 ++--- packages/google-cloud-bigtable/synth.metadata | 9 +-- packages/google-cloud-bigtable/synth.py | 13 ---- 8 files changed, 106 insertions(+), 113 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 56149bead294..fed633c8dc6b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -303,8 +303,8 @@ def create_instance( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -381,8 +381,8 @@ def get_instance( name (str): The unique name of the requested instance. Values are of the form ``projects//instances/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -453,8 +453,8 @@ def list_instances( requested. Values are of the form ``projects/``. page_token (str): DEPRECATED: This field is unused and ignored. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -556,8 +556,8 @@ def update_instance( - Keys and values must both be under 128 bytes. state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -649,8 +649,8 @@ def partial_update_instance( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -727,8 +727,8 @@ def delete_instance( name (str): The unique name of the instance to be deleted. Values are of the form ``projects//instances/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -819,8 +819,8 @@ def create_cluster( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -897,8 +897,8 @@ def get_cluster( name (str): The unique name of the requested cluster. Values are of the form ``projects//instances//clusters/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -972,8 +972,8 @@ def list_clusters( ``projects/myproject/instances/-``. page_token (str): DEPRECATED: This field is unused and ignored. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1069,8 +1069,8 @@ def update_cluster( default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1151,8 +1151,8 @@ def delete_cluster( name (str): The unique name of the cluster to be deleted. Values are of the form ``projects//instances//clusters/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1236,8 +1236,8 @@ def create_app_profile( message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` ignore_warnings (bool): If true, ignore safety checks when creating the app profile. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1311,8 +1311,8 @@ def get_app_profile( name (str): The unique name of the requested app profile. Values are of the form ``projects//instances//appProfiles/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1399,8 +1399,8 @@ def list_app_profiles( page_size (int): Maximum number of results per page. CURRENTLY UNIMPLEMENTED AND IGNORED. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1508,8 +1508,8 @@ def update_app_profile( message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` ignore_warnings (bool): If true, ignore safety checks when updating the app profile. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1593,8 +1593,8 @@ def delete_app_profile( ``projects//instances//appProfiles/``. ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1669,8 +1669,8 @@ def get_iam_policy( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1753,8 +1753,8 @@ def set_iam_policy( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Policy` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1831,8 +1831,8 @@ def test_iam_permissions( information see `IAM Overview `__. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index fe50d5d14bc4..844fd59a7b2a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -307,8 +307,8 @@ def create_table( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Split` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -409,8 +409,8 @@ def create_table_from_snapshot( form ``projects//instances//clusters//snapshots/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -505,8 +505,8 @@ def list_tables( page_size (int): Maximum number of results per page. CURRENTLY UNIMPLEMENTED AND IGNORED. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -594,8 +594,8 @@ def get_table( view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to ``SCHEMA_VIEW`` if unspecified. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -664,8 +664,8 @@ def delete_table( name (str): The unique name of the table to be deleted. Values are of the form ``projects//instances//tables/
``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -746,8 +746,8 @@ def modify_column_families( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Modification` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -826,8 +826,8 @@ def drop_row_range( zero length. delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -908,8 +908,8 @@ def generate_consistency_token( Values are of the form ``projects//instances//tables/
``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -988,8 +988,8 @@ def check_consistency( ``projects//instances//tables/
``. consistency_token (str): The token created using GenerateConsistencyToken for the Table. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1104,8 +1104,8 @@ def snapshot_table( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Duration` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1192,8 +1192,8 @@ def get_snapshot( name (str): The unique name of the requested snapshot. Values are of the form ``projects//instances//clusters//snapshots/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1289,8 +1289,8 @@ def list_snapshots( streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -1381,8 +1381,8 @@ def delete_snapshot( name (str): The unique name of the snapshot to be deleted. Values are of the form ``projects//instances//clusters//snapshots/``. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index f1fa71abc5dc..afb72e0c8ab9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -69,7 +69,14 @@ def __init__( # Create the channel. if channel is None: - channel = self.create_channel(address=address, credentials=credentials) + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) self._channel = channel diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 6c882d5bea80..7b4432130eae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -69,7 +69,14 @@ def __init__( # Create the channel. if channel is None: - channel = self.create_channel(address=address, credentials=credentials) + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) self._channel = channel @@ -108,14 +115,7 @@ def create_channel( grpc.Channel: A gRPC channel object. """ return google.api_core.grpc_helpers.create_channel( - address, - credentials=credentials, - scopes=cls._OAUTH_SCOPES, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - **kwargs + address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs ) @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 8b9a8dedccfb..36021068dfd8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -238,8 +238,8 @@ def read_rows( rows_limit (long): The read will terminate after committing to N rows' worth of results. The default (zero) is to return all results. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -322,8 +322,8 @@ def sample_row_keys( app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -414,8 +414,8 @@ def mutate_row( app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -507,8 +507,8 @@ def mutate_rows( app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -614,8 +614,8 @@ def check_and_mutate_row( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.Mutation` retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. @@ -714,8 +714,8 @@ def read_modify_write_row( app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will not - be retried. + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index 145dd269f938..4c34d5fb1b39 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -65,7 +65,14 @@ def __init__( # Create the channel. if channel is None: - channel = self.create_channel(address=address, credentials=credentials) + channel = self.create_channel( + address=address, + credentials=credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + }.items(), + ) self._channel = channel @@ -93,14 +100,7 @@ def create_channel( grpc.Channel: A gRPC channel object. """ return google.api_core.grpc_helpers.create_channel( - address, - credentials=credentials, - scopes=cls._OAUTH_SCOPES, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - **kwargs + address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs ) @property diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index a53a33c7f0af..0c2408190e20 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,18 @@ { - "updateTime": "2019-07-20T12:14:26.395003Z", + "updateTime": "2019-08-06T18:10:45.081885Z", "sources": [ { "generator": { "name": "artman", - "version": "0.30.1", - "dockerImage": "googleapis/artman@sha256:f1a2e851e5e012c59e1da4125480bb19878f86a4e7fac4f375f2e819956b5aa3" + "version": "0.32.1", + "dockerImage": "googleapis/artman@sha256:a684d40ba9a4e15946f5f2ca6b4bd9fe301192f522e9de4fff622118775f309b" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "184ab77f4cee62332f8f9a689c70c9bea441f836", - "internalRef": "259048326" + "sha": "53e641721f965a485af64331cfea9e5522294d78" } }, { diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 0fb2f977b8b9..32ebc4af2eb1 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -61,19 +61,6 @@ "from google.cloud.bigtable_admin_v2.proto", ) -s.replace( - [ - "google/cloud/bigtable_admin_v2/gapic/transports/" - "bigtable_table_admin_grpc_transport.py", - "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py", - ], - "google.api_core.grpc_helpers.create_channel\(\n" - "(\s+)address.*\n\s+credentials.*\n\s+scopes.*\n", - "\g<0>\g<1>options={\n\g<1> 'grpc.max_send_message_length': -1,\n" - "\g<1> 'grpc.max_receive_message_length': -1,\n" - "\g<1>}.items(),\n", -) - s.replace( ["google/cloud/bigtable_admin_v2/__init__.py"], " __doc__ = bigtable_instance_admin_client." From d120d65d62997d5b3dfce138c4d4dfa3e070ffbb Mon Sep 17 00:00:00 2001 From: Maxim Factourovich <38331387+mf2199@users.noreply.github.com> Date: Wed, 7 Aug 2019 12:39:55 -0400 Subject: [PATCH 276/892] Add retry for DeadlineExceeded to 'test_bigtable_create_table' snippet. (#8889) Fixes #8480. --- packages/google-cloud-bigtable/docs/snippets.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 7f4071b41fd8..1119b3b6bd90 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -395,6 +395,8 @@ def test_bigtable_update_cluster(): def test_bigtable_create_table(): # [START bigtable_create_table] + from google.api_core import exceptions + from google.api_core import retry from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -403,7 +405,13 @@ def test_bigtable_create_table(): table = instance.table("table_my") # Define the GC policy to retain only the most recent 2 versions. max_versions_rule = column_family.MaxVersionsGCRule(2) - table.create(column_families={"cf1": max_versions_rule}) + + # Could include other retriable exception types + # Could configure deadline, etc. + predicate_504 = retry.if_exception_type(exceptions.DeadlineExceeded) + retry_504 = retry.Retry(predicate_504) + + retry_504(table.create)(column_families={"cf1": max_versions_rule}) # [END bigtable_create_table] try: From 783eed6924ed9deafaf4808bcd92415b59baf6eb Mon Sep 17 00:00:00 2001 From: sangramql <39852271+sangramql@users.noreply.github.com> Date: Fri, 9 Aug 2019 01:26:56 +0530 Subject: [PATCH 277/892] Avoid creating table in 'list_tables' snippet; harden 'delete_instance' snippet. (#8879) Closes #8479. --- .../google-cloud-bigtable/docs/snippets.py | 26 ++++++++----------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 1119b3b6bd90..850362b4a42a 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -36,6 +36,7 @@ from test_utils.retry import RetryErrors from google.api_core.exceptions import NotFound from google.api_core.exceptions import TooManyRequests +from google.api_core.exceptions import DeadlineExceeded from google.cloud._helpers import UTC from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -45,6 +46,7 @@ INSTANCE_ID = "snippet-tests" + UNIQUE_SUFFIX CLUSTER_ID = "clus-1-" + UNIQUE_SUFFIX APP_PROFILE_ID = "app-prof" + UNIQUE_SUFFIX +TABLE_ID = "tabl-1" + UNIQUE_SUFFIX ROUTING_POLICY_TYPE = enums.RoutingPolicyType.ANY LOCATION_ID = "us-central1-f" ALT_LOCATION_ID = "us-central1-a" @@ -61,6 +63,7 @@ INSTANCES_TO_DELETE = [] retry_429 = RetryErrors(TooManyRequests, max_tries=9) +retry_504 = RetryErrors(DeadlineExceeded, max_tries=4) class Config(object): @@ -72,6 +75,7 @@ class Config(object): CLIENT = None INSTANCE = None + TABLE = None def setup_module(): @@ -88,6 +92,8 @@ def setup_module(): operation = Config.INSTANCE.create(clusters=[cluster]) # We want to make sure the operation completes. operation.result(timeout=100) + Config.TABLE = Config.INSTANCE.table(TABLE_ID) + retry_504(Config.TABLE.create)() def teardown_module(): @@ -421,14 +427,6 @@ def test_bigtable_create_table(): def test_bigtable_list_tables(): - from google.cloud.bigtable import Client - from google.cloud.bigtable import column_family - - client = Client(admin=True) - instance = client.instance(INSTANCE_ID) - table = instance.table("to_list") - max_versions_rule = column_family.MaxVersionsGCRule(2) - table.create(column_families={"cf1": max_versions_rule}) # [START bigtable_list_tables] from google.cloud.bigtable import Client @@ -438,11 +436,9 @@ def test_bigtable_list_tables(): tables_list = instance.list_tables() # [END bigtable_list_tables] + # Check if returned list has expected table table_names = [table.name for table in tables_list] - try: - assert table.name in table_names - finally: - retry_429(table.delete)() + assert Config.TABLE.name in table_names def test_bigtable_delete_cluster(): @@ -479,9 +475,10 @@ def test_bigtable_delete_instance(): client = Client(admin=True) - instance = client.instance("inst-my-123", instance_type=PRODUCTION, labels=LABELS) + instance_id = "snipt-inst-del" + UNIQUE_SUFFIX + instance = client.instance(instance_id, instance_type=PRODUCTION, labels=LABELS) cluster = instance.cluster( - "clus-my-123", + "clus-to-delete" + UNIQUE_SUFFIX, location_id=ALT_LOCATION_ID, serve_nodes=1, default_storage_type=STORAGE_TYPE, @@ -499,7 +496,6 @@ def test_bigtable_delete_instance(): client = Client(admin=True) - instance_id = "inst-my-123" instance_to_delete = client.instance(instance_id) instance_to_delete.delete() # [END bigtable_delete_instance] From 2a0042fba5deb96298339a5a8c9320d7be17686c Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Fri, 16 Aug 2019 13:25:32 -0700 Subject: [PATCH 278/892] Remove compatability badges from READMEs. (#9035) --- packages/google-cloud-bigtable/README.rst | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index e0d1dc5654e7..5660093c0fb6 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -1,7 +1,7 @@ Python Client for Google Cloud Bigtable ======================================= -|beta| |pypi| |versions| |compat_check_pypi| |compat_check_github| +|beta| |pypi| |versions| `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, @@ -16,10 +16,6 @@ Analytics, Maps, and Gmail. :target: https://pypi.org/project/google-cloud-bigtable/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg :target: https://pypi.org/project/google-cloud-bigtable/ -.. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=google-cloud-bigtable - :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=google-cloud-bigtable -.. |compat_check_github| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dbigtable - :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=git%2Bgit%3A//github.com/googleapis/google-cloud-python.git%23subdirectory%3Dbigtable .. _Google Cloud Bigtable: https://cloud.google.com/bigtable .. _Client Library Documentation: https://googleapis.dev/python/bigtable/latest .. _Product Documentation: https://cloud.google.com/bigtable/docs From f993768f0d756e3f23525546fc413271e14a0229 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Tue, 27 Aug 2019 16:35:22 -0700 Subject: [PATCH 279/892] Docs: Remove CI for gh-pages, use googleapis.dev for api_core refs. (#9085) --- packages/google-cloud-bigtable/docs/conf.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 938f270a92df..ebf93a3dbe5c 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -340,10 +340,7 @@ intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ( - "https://googleapis.github.io/google-cloud-python/latest", - None, - ), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), "grpc": ("https://grpc.io/grpc/python/", None), "requests": ("https://2.python-requests.org/en/master/", None), } From 3cd90e16e4052de867cee21c30d781091b6414d0 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Thu, 29 Aug 2019 09:28:31 -0700 Subject: [PATCH 280/892] Release bigtable 1.0.0 (#9140) * Release bigtable 1.0.0 --- packages/google-cloud-bigtable/CHANGELOG.md | 15 +++++++++++++++ packages/google-cloud-bigtable/README.rst | 6 +++--- packages/google-cloud-bigtable/setup.py | 4 ++-- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index f6e28c23cd4e..80eaff1617b3 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,21 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 1.0.0 + +08-28-2019 12:49 PDT + +### Implementation Changes +- Remove send/recv msg size limit (via synth). ([#8979](https://github.com/googleapis/google-cloud-python/pull/8979)) + +### Documentation +- Avoid creating table in 'list_tables' snippet; harden 'delete_instance' snippet. ([#8879](https://github.com/googleapis/google-cloud-python/pull/8879)) +- Add retry for DeadlineExceeded to 'test_bigtable_create_table' snippet. ([#8889](https://github.com/googleapis/google-cloud-python/pull/8889)) +- Remove compatability badges from READMEs. ([#9035](https://github.com/googleapis/google-cloud-python/pull/9035)) + +### Internal / Testing Changes +- Docs: Remove CI for gh-pages, use googleapis.dev for api_core refs. ([#9085](https://github.com/googleapis/google-cloud-python/pull/9085)) + ## 0.34.0 07-30-2019 10:05 PDT diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 5660093c0fb6..5330d231688b 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -1,7 +1,7 @@ Python Client for Google Cloud Bigtable ======================================= -|beta| |pypi| |versions| +|GA| |pypi| |versions| `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, @@ -10,8 +10,8 @@ Analytics, Maps, and Gmail. - `Client Library Documentation`_ - `Product Documentation`_ -.. |beta| image:: https://img.shields.io/badge/support-beta-silver.svg - :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#beta-support +.. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg + :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#general-availability .. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg :target: https://pypi.org/project/google-cloud-bigtable/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 29a1de12501f..26956b393471 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,12 +22,12 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '0.34.0' +version = '1.0.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' -release_status = 'Development Status :: 4 - Beta' +release_status = 'Development Status :: 5 - Production/Stable' dependencies = [ 'google-api-core[grpc] >= 1.14.0, < 2.0.0dev', "google-cloud-core >= 1.0.0, < 2.0dev", From c4ce3060447a3457f975704876c3e8dba7419d3f Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 4 Sep 2019 16:35:17 -0700 Subject: [PATCH 281/892] Add IAM Policy methods to table admin client (via synth). (#9172) --- .../gapic/bigtable_table_admin_client.py | 241 ++++++++++++++++++ .../bigtable_table_admin_client_config.py | 15 ++ .../bigtable_table_admin_grpc_transport.py | 41 +++ .../proto/bigtable_table_admin.proto | 28 ++ .../proto/bigtable_table_admin_pb2.py | 127 +++++---- .../proto/bigtable_table_admin_pb2_grpc.py | 55 ++++ packages/google-cloud-bigtable/synth.metadata | 9 +- .../v2/test_bigtable_table_admin_client_v2.py | 127 +++++++++ 8 files changed, 594 insertions(+), 49 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 844fd59a7b2a..96026779dbf1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -1037,6 +1037,247 @@ def check_consistency( request, retry=retry, timeout=timeout, metadata=metadata ) + def get_iam_policy( + self, + resource, + options_=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets the access control policy for an instance resource. Returns an empty + policy if an table exists but does not have a policy set. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> response = client.get_iam_policy(resource) + + Args: + resource (str): REQUIRED: The resource for which the policy is being requested. + See the operation documentation for the appropriate value for this field. + options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to + ``GetIamPolicy``. This field is only used by Cloud IAM. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_iam_policy" not in self._inner_api_calls: + self._inner_api_calls[ + "get_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_iam_policy, + default_retry=self._method_configs["GetIamPolicy"].retry, + default_timeout=self._method_configs["GetIamPolicy"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource, options=options_ + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def set_iam_policy( + self, + resource, + policy, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Sets the access control policy on a table resource. Replaces any existing + policy. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `policy`: + >>> policy = {} + >>> + >>> response = client.set_iam_policy(resource, policy) + + Args: + resource (str): REQUIRED: The resource for which the policy is being specified. + See the operation documentation for the appropriate value for this field. + policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The + size of the policy is limited to a few 10s of KB. An empty policy is a + valid policy but certain Cloud Platform services (such as Projects) + might reject them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Policy` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "set_iam_policy" not in self._inner_api_calls: + self._inner_api_calls[ + "set_iam_policy" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.set_iam_policy, + default_retry=self._method_configs["SetIamPolicy"].retry, + default_timeout=self._method_configs["SetIamPolicy"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["set_iam_policy"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def test_iam_permissions( + self, + resource, + permissions, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Returns permissions that the caller has on the specified table resource. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> + >>> # TODO: Initialize `permissions`: + >>> permissions = [] + >>> + >>> response = client.test_iam_permissions(resource, permissions) + + Args: + resource (str): REQUIRED: The resource for which the policy detail is being requested. + See the operation documentation for the appropriate value for this field. + permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with + wildcards (such as '*' or 'storage.*') are not allowed. For more + information see `IAM + Overview `__. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "test_iam_permissions" not in self._inner_api_calls: + self._inner_api_calls[ + "test_iam_permissions" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.test_iam_permissions, + default_retry=self._method_configs["TestIamPermissions"].retry, + default_timeout=self._method_configs["TestIamPermissions"].timeout, + client_info=self._client_info, + ) + + request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("resource", resource)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["test_iam_permissions"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + def snapshot_table( self, name, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index 1a3f59a06023..5e63380ae091 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -89,6 +89,21 @@ "retry_codes_name": "idempotent", "retry_params_name": "idempotent_params", }, + "GetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "SetIamPolicy": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "TestIamPermissions": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, "SnapshotTable": { "timeout_millis": 60000, "retry_codes_name": "non_idempotent", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 7b4432130eae..5d93e555b3b9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -263,6 +263,47 @@ def check_consistency(self): """ return self._stubs["bigtable_table_admin_stub"].CheckConsistency + @property + def get_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. + + Gets the access control policy for an instance resource. Returns an empty + policy if an table exists but does not have a policy set. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].GetIamPolicy + + @property + def set_iam_policy(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. + + Sets the access control policy on a table resource. Replaces any existing + policy. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].SetIamPolicy + + @property + def test_iam_permissions(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. + + Returns permissions that the caller has on the specified table resource. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].TestIamPermissions + @property def snapshot_table(self): """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index 79c461e05e63..a3e2e7ccacc4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -19,6 +19,8 @@ package google.bigtable.admin.v2; import "google/api/annotations.proto"; import "google/bigtable/admin/v2/table.proto"; +import "google/iam/v1/iam_policy.proto"; +import "google/iam/v1/policy.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; @@ -182,6 +184,32 @@ service BigtableTableAdmin { delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" }; } + + // Gets the access control policy for an instance resource. Returns an empty + // policy if an table exists but does not have a policy set. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" + body: "*" + }; + } + + // Sets the access control policy on a table resource. Replaces any existing + // policy. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" + body: "*" + }; + } + + // Returns permissions that the caller has on the specified table resource. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { + option (google.api.http) = { + post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" + body: "*" + }; + } } // Request message for diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 4bb9d8f1d328..c81637a34f25 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -19,6 +19,8 @@ from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, ) +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 from google.longrunning import ( operations_pb2 as google_dot_longrunning_dot_operations__pb2, ) @@ -35,11 +37,13 @@ "\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" ), serialized_pb=_b( - '\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View""\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t""\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xb7\x11\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"5\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"H\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"J\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"F\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' + '\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View""\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t""\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\x99\x15\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"5\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"H\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"J\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"F\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\x91\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"F\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\x12\x91\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"F\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*\x12\xb7\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"L\x82\xd3\xe4\x93\x02\x46"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, + google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, @@ -82,8 +86,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=485, - serialized_end=505, + serialized_start=545, + serialized_end=565, ) _CREATETABLEREQUEST = _descriptor.Descriptor( @@ -174,8 +178,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=305, - serialized_end=505, + serialized_start=365, + serialized_end=565, ) @@ -249,8 +253,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=507, - serialized_end=598, + serialized_start=567, + serialized_end=658, ) @@ -332,8 +336,8 @@ fields=[], ) ], - serialized_start=600, - serialized_end=709, + serialized_start=660, + serialized_end=769, ) @@ -425,8 +429,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=711, - serialized_end=837, + serialized_start=771, + serialized_end=897, ) @@ -482,8 +486,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=839, - serialized_end=933, + serialized_start=899, + serialized_end=993, ) @@ -539,8 +543,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=935, - serialized_end=1018, + serialized_start=995, + serialized_end=1078, ) @@ -578,8 +582,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1020, - serialized_end=1054, + serialized_start=1080, + serialized_end=1114, ) @@ -679,8 +683,8 @@ fields=[], ) ], - serialized_start=1194, - serialized_end=1359, + serialized_start=1254, + serialized_end=1419, ) _MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( @@ -735,8 +739,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1057, - serialized_end=1359, + serialized_start=1117, + serialized_end=1419, ) @@ -774,8 +778,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1361, - serialized_end=1408, + serialized_start=1421, + serialized_end=1468, ) @@ -813,8 +817,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1410, - serialized_end=1471, + serialized_start=1470, + serialized_end=1531, ) @@ -870,8 +874,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1473, - serialized_end=1539, + serialized_start=1533, + serialized_end=1599, ) @@ -909,8 +913,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1541, - serialized_end=1587, + serialized_start=1601, + serialized_end=1647, ) @@ -1020,8 +1024,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1590, - serialized_end=1725, + serialized_start=1650, + serialized_end=1785, ) @@ -1059,8 +1063,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1727, - serialized_end=1761, + serialized_start=1787, + serialized_end=1821, ) @@ -1134,8 +1138,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1763, - serialized_end=1840, + serialized_start=1823, + serialized_end=1900, ) @@ -1191,8 +1195,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1842, - serialized_end=1945, + serialized_start=1902, + serialized_end=2005, ) @@ -1230,8 +1234,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1947, - serialized_end=1984, + serialized_start=2007, + serialized_end=2044, ) @@ -1305,8 +1309,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1987, - serialized_end=2183, + serialized_start=2047, + serialized_end=2243, ) @@ -1380,8 +1384,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2186, - serialized_end=2402, + serialized_start=2246, + serialized_end=2462, ) _CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST @@ -2098,8 +2102,8 @@ file=DESCRIPTOR, index=0, serialized_options=None, - serialized_start=2405, - serialized_end=4636, + serialized_start=2465, + serialized_end=5178, methods=[ _descriptor.MethodDescriptor( name="CreateTable", @@ -2244,6 +2248,39 @@ "\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" ), ), + _descriptor.MethodDescriptor( + name="GetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", + index=13, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + serialized_options=_b( + '\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*' + ), + ), + _descriptor.MethodDescriptor( + name="SetIamPolicy", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", + index=14, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, + output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, + serialized_options=_b( + '\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*' + ), + ), + _descriptor.MethodDescriptor( + name="TestIamPermissions", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", + index=15, + containing_service=None, + input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, + output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, + serialized_options=_b( + '\202\323\344\223\002F"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*' + ), + ), ], ) _sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 2c702413b4e4..689133e84425 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -7,6 +7,8 @@ from google.cloud.bigtable_admin_v2.proto import ( table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, ) +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 from google.longrunning import ( operations_pb2 as google_dot_longrunning_dot_operations__pb2, ) @@ -92,6 +94,21 @@ def __init__(self, channel): request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) + self.GetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.SetIamPolicy = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + ) + self.TestIamPermissions = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + ) class BigtableTableAdminServicer(object): @@ -237,6 +254,29 @@ def DeleteSnapshot(self, request, context): context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") + def GetIamPolicy(self, request, context): + """Gets the access control policy for an instance resource. Returns an empty + policy if an table exists but does not have a policy set. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SetIamPolicy(self, request, context): + """Sets the access control policy on a table resource. Replaces any existing + policy. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def TestIamPermissions(self, request, context): + """Returns permissions that the caller has on the specified table resource. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + def add_BigtableTableAdminServicer_to_server(servicer, server): rpc_method_handlers = { @@ -305,6 +345,21 @@ def add_BigtableTableAdminServicer_to_server(servicer, server): request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), + "GetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.GetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "SetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.SetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "TestIamPermissions": grpc.unary_unary_rpc_method_handler( + servicer.TestIamPermissions, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 0c2408190e20..a7291727fa94 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,18 +1,19 @@ { - "updateTime": "2019-08-06T18:10:45.081885Z", + "updateTime": "2019-09-04T12:14:03.458374Z", "sources": [ { "generator": { "name": "artman", - "version": "0.32.1", - "dockerImage": "googleapis/artman@sha256:a684d40ba9a4e15946f5f2ca6b4bd9fe301192f522e9de4fff622118775f309b" + "version": "0.36.2", + "dockerImage": "googleapis/artman@sha256:0e6f3a668cd68afc768ecbe08817cf6e56a0e64fcbdb1c58c3b97492d12418a1" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "53e641721f965a485af64331cfea9e5522294d78" + "sha": "a2158681f6e30c5fd9446eb1fd7b5021a6d48bfa", + "internalRef": "266999433" } }, { diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index 786a8357c321..6247cba66ccb 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -24,6 +24,8 @@ from google.cloud import bigtable_admin_v2 from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 from google.cloud.bigtable_admin_v2.proto import table_pb2 +from google.iam.v1 import iam_policy_pb2 +from google.iam.v1 import policy_pb2 from google.longrunning import operations_pb2 from google.protobuf import empty_pb2 @@ -439,6 +441,131 @@ def test_check_consistency_exception(self): with pytest.raises(CustomException): client.check_consistency(name, consistency_token) + def test_get_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + response = client.get_iam_policy(resource) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + + with pytest.raises(CustomException): + client.get_iam_policy(resource) + + def test_set_iam_policy(self): + # Setup Expected Response + version = 351608024 + etag = b"etag3123477" + expected_response = {"version": version, "etag": etag} + expected_response = policy_pb2.Policy(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + policy = {} + + response = client.set_iam_policy(resource, policy) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, policy=policy + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_set_iam_policy_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + policy = {} + + with pytest.raises(CustomException): + client.set_iam_policy(resource, policy) + + def test_test_iam_permissions(self): + # Setup Expected Response + expected_response = {} + expected_response = iam_policy_pb2.TestIamPermissionsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + permissions = [] + + response = client.test_iam_permissions(resource, permissions) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = iam_policy_pb2.TestIamPermissionsRequest( + resource=resource, permissions=permissions + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_test_iam_permissions_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + permissions = [] + + with pytest.raises(CustomException): + client.test_iam_permissions(resource, permissions) + def test_snapshot_table(self): # Setup Expected Response name_2 = "name2-1052831874" From e05d289b4414901cf8a00885800203a3635ed9fb Mon Sep 17 00:00:00 2001 From: Jacob Dick Date: Thu, 5 Sep 2019 13:31:30 -0600 Subject: [PATCH 282/892] Fix misspelling in docs. (#9184) --- packages/google-cloud-bigtable/docs/data-api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/data-api.rst b/packages/google-cloud-bigtable/docs/data-api.rst index d35b50079426..b50995be7368 100644 --- a/packages/google-cloud-bigtable/docs/data-api.rst +++ b/packages/google-cloud-bigtable/docs/data-api.rst @@ -84,7 +84,7 @@ Building Up Mutations --------------------- In all three cases, a set of mutations (or two sets) are built up -on a row before they are sent of in a batch via +on a row before they are sent off in a batch via .. code:: python From 362a66db55d57a061672f03d3d465a5561dded89 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Wed, 25 Sep 2019 12:35:50 -0400 Subject: [PATCH 283/892] docs: fix intersphinx reference to requests (#9294) --- packages/google-cloud-bigtable/docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index ebf93a3dbe5c..af2c90faeb46 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -342,7 +342,7 @@ "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://2.python-requests.org/en/master/", None), + "requests": ("https://requests.kennethreitz.org/en/stable/", None), } From 875b809da2d207af2bb1b57a2563255795caf789 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 15 Oct 2019 09:36:40 -0400 Subject: [PATCH 284/892] chore: pin 'google-cloud-core >= 1.0.3, < 2.0.0dev' (#9445) --- packages/google-cloud-bigtable/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 26956b393471..95f08e6dc839 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,8 +29,8 @@ # 'Development Status :: 5 - Production/Stable' release_status = 'Development Status :: 5 - Production/Stable' dependencies = [ - 'google-api-core[grpc] >= 1.14.0, < 2.0.0dev', - "google-cloud-core >= 1.0.0, < 2.0dev", + "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", + "google-cloud-core >= 1.0.3, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", ] extras = { From fbfd0c02573ab023663d83ddb3b8a357f13f6062 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 15 Oct 2019 13:38:18 -0400 Subject: [PATCH 285/892] chore(bigtable): release 1.1.0 (#9468) --- packages/google-cloud-bigtable/CHANGELOG.md | 15 +++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 80eaff1617b3..70e61063c488 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,21 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 1.1.0 + +10-15-2019 06:40 PDT + + +### New Features +- Add IAM Policy methods to table admin client (via synth). ([#9172](https://github.com/googleapis/google-cloud-python/pull/9172)) + +### Dependencies +- Pin 'google-cloud-core >= 1.0.3, < 2.0.0dev'. ([#9445](https://github.com/googleapis/google-cloud-python/pull/9445)) + +### Documentation +- Fix intersphinx reference to requests ([#9294](https://github.com/googleapis/google-cloud-python/pull/9294)) +- Fix misspelling in docs. ([#9184](https://github.com/googleapis/google-cloud-python/pull/9184)) + ## 1.0.0 08-28-2019 12:49 PDT diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 95f08e6dc839..82c3aa499dcd 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '1.0.0' +version = '1.1.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From e10435401a8ffce1d56a6e0e281ec10e46f76861 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 24 Oct 2019 14:33:48 -0400 Subject: [PATCH 286/892] feat(bigtable): add 'client_options' / 'admin_client_options' to Client (#9517) Toward #8475. --- .../google/cloud/bigtable/client.py | 42 ++++++-- .../tests/unit/test_client.py | 95 +++++++++++++++++-- 2 files changed, 121 insertions(+), 16 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index f9a625b15843..8a8315623cae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -60,11 +60,13 @@ """Scope for reading table data.""" -def _create_gapic_client(client_class): +def _create_gapic_client(client_class, client_options=None): def inner(self): if self._emulator_host is None: return client_class( - credentials=self._credentials, client_info=self._client_info + credentials=self._credentials, + client_info=self._client_info, + client_options=client_options, ) else: return client_class( @@ -109,6 +111,17 @@ class Client(ClientWithProject): you only need to set this if you're developing your own library or partner tool. + :type client_options: :class:`~google.api_core.client_options.ClientOptions` + or :class:`dict` + :param client_options: (Optional) Client options used to set user options + on the client. API Endpoint should be set through client_options. + + :type admin_client_options: + :class:`~google.api_core.client_options.ClientOptions` or :class:`dict` + :param admin_client_options: (Optional) Client options used to set user + options on the client. API Endpoint for admin operations should be set + through admin_client_options. + :type channel: :instance: grpc.Channel :param channel (grpc.Channel): (Optional) DEPRECATED: A ``Channel`` instance through which to make calls. @@ -130,6 +143,8 @@ def __init__( read_only=False, admin=False, client_info=_CLIENT_INFO, + client_options=None, + admin_client_options=None, channel=None, ): if read_only and admin: @@ -155,6 +170,8 @@ def __init__( stacklevel=2, ) + self._client_options = client_options + self._admin_client_options = admin_client_options self._channel = channel self.SCOPE = self._get_scopes() super(Client, self).__init__(project=project, credentials=credentials) @@ -213,9 +230,10 @@ def table_data_client(self): :returns: A BigtableClient object. """ if self._table_data_client is None: - self._table_data_client = _create_gapic_client(bigtable_v2.BigtableClient)( - self + klass = _create_gapic_client( + bigtable_v2.BigtableClient, client_options=self._client_options ) + self._table_data_client = klass(self) return self._table_data_client @property @@ -237,9 +255,11 @@ def table_admin_client(self): if self._table_admin_client is None: if not self._admin: raise ValueError("Client is not an admin client.") - self._table_admin_client = _create_gapic_client( - bigtable_admin_v2.BigtableTableAdminClient - )(self) + klass = _create_gapic_client( + bigtable_admin_v2.BigtableTableAdminClient, + client_options=self._admin_client_options, + ) + self._table_admin_client = klass(self) return self._table_admin_client @property @@ -261,9 +281,11 @@ def instance_admin_client(self): if self._instance_admin_client is None: if not self._admin: raise ValueError("Client is not an admin client.") - self._instance_admin_client = _create_gapic_client( - bigtable_admin_v2.BigtableInstanceAdminClient - )(self) + klass = _create_gapic_client( + bigtable_admin_v2.BigtableInstanceAdminClient, + client_options=self._admin_client_options, + ) + self._instance_admin_client = klass(self) return self._instance_admin_client def instance(self, instance_id, display_name=None, instance_type=None, labels=None): diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 05a017d898af..8a2ef3c64b56 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -21,12 +21,12 @@ class Test__create_gapic_client(unittest.TestCase): - def _invoke_client_factory(self, client_class): + def _invoke_client_factory(self, client_class, **kw): from google.cloud.bigtable.client import _create_gapic_client - return _create_gapic_client(client_class) + return _create_gapic_client(client_class, **kw) - def test_without_emulator(self): + def test_wo_emulator(self): client_class = mock.Mock() credentials = _make_credentials() client = _Client(credentials) @@ -36,10 +36,30 @@ def test_without_emulator(self): self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - credentials=client._credentials, client_info=client_info + credentials=client._credentials, + client_info=client_info, + client_options=None, ) - def test_with_emulator(self): + def test_wo_emulator_w_client_options(self): + client_class = mock.Mock() + credentials = _make_credentials() + client = _Client(credentials) + client_info = client._client_info = mock.Mock() + client_options = mock.Mock() + + result = self._invoke_client_factory( + client_class, client_options=client_options + )(client) + + self.assertIs(result, client_class.return_value) + client_class.assert_called_once_with( + credentials=client._credentials, + client_info=client_info, + client_options=client_options, + ) + + def test_w_emulator(self): client_class = mock.Mock() emulator_host = emulator_channel = object() credentials = _make_credentials() @@ -210,6 +230,25 @@ def test_table_data_client_not_initialized_w_client_info(self): self.assertIs(table_data_client._client_info, client_info) self.assertIs(client._table_data_client, table_data_client) + def test_table_data_client_not_initialized_w_client_options(self): + credentials = _make_credentials() + client_options = mock.Mock() + client = self._make_one( + project=self.PROJECT, credentials=credentials, client_options=client_options + ) + + patch = mock.patch("google.cloud.bigtable_v2.BigtableClient") + with patch as mocked: + table_data_client = client.table_data_client + + self.assertIs(table_data_client, mocked.return_value) + self.assertIs(client._table_data_client, table_data_client) + mocked.assert_called_once_with( + client_info=client._client_info, + credentials=mock.ANY, # added scopes + client_options=client_options, + ) + def test_table_data_client_initialized(self): credentials = _make_credentials() client = self._make_one( @@ -257,6 +296,28 @@ def test_table_admin_client_not_initialized_w_client_info(self): self.assertIs(table_admin_client._client_info, client_info) self.assertIs(client._table_admin_client, table_admin_client) + def test_table_admin_client_not_initialized_w_client_options(self): + credentials = _make_credentials() + admin_client_options = mock.Mock() + client = self._make_one( + project=self.PROJECT, + credentials=credentials, + admin=True, + admin_client_options=admin_client_options, + ) + + patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient") + with patch as mocked: + table_admin_client = client.table_admin_client + + self.assertIs(table_admin_client, mocked.return_value) + self.assertIs(client._table_admin_client, table_admin_client) + mocked.assert_called_once_with( + client_info=client._client_info, + credentials=mock.ANY, # added scopes + client_options=admin_client_options, + ) + def test_table_admin_client_initialized(self): credentials = _make_credentials() client = self._make_one( @@ -287,7 +348,7 @@ def test_instance_admin_client_not_initialized_w_admin_flag(self): self.assertIs(instance_admin_client._client_info, _CLIENT_INFO) self.assertIs(client._instance_admin_client, instance_admin_client) - def test_instance_admin_client_not_initialized_w_admin_and_client_info(self): + def test_instance_admin_client_not_initialized_w_client_info(self): from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient credentials = _make_credentials() @@ -304,6 +365,28 @@ def test_instance_admin_client_not_initialized_w_admin_and_client_info(self): self.assertIs(instance_admin_client._client_info, client_info) self.assertIs(client._instance_admin_client, instance_admin_client) + def test_instance_admin_client_not_initialized_w_client_options(self): + credentials = _make_credentials() + admin_client_options = mock.Mock() + client = self._make_one( + project=self.PROJECT, + credentials=credentials, + admin=True, + admin_client_options=admin_client_options, + ) + + patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient") + with patch as mocked: + instance_admin_client = client.instance_admin_client + + self.assertIs(instance_admin_client, mocked.return_value) + self.assertIs(client._instance_admin_client, instance_admin_client) + mocked.assert_called_once_with( + client_info=client._client_info, + credentials=mock.ANY, # added scopes + client_options=admin_client_options, + ) + def test_instance_admin_client_initialized(self): credentials = _make_credentials() client = self._make_one( From 6cd88af04ce0c401c0888c33c45ad1bd7fda7da0 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 29 Oct 2019 11:23:43 -0700 Subject: [PATCH 287/892] chore(bigtable): add trailing commas (via synth) (#9557) --- .../cloud/bigtable_admin_v2/__init__.py | 7 ++- .../gapic/bigtable_instance_admin_client.py | 51 +++++++++++-------- .../gapic/bigtable_table_admin_client.py | 41 ++++++++------- .../bigtable_instance_admin_grpc_transport.py | 4 +- .../bigtable_table_admin_grpc_transport.py | 4 +- .../proto/bigtable_instance_admin_pb2.py | 12 ++--- .../proto/bigtable_table_admin_pb2.py | 22 ++++---- .../bigtable_admin_v2/proto/instance_pb2.py | 8 +-- .../bigtable_admin_v2/proto/table_pb2.py | 18 +++---- .../google/cloud/bigtable_v2/__init__.py | 5 +- .../bigtable_v2/gapic/bigtable_client.py | 14 ++--- .../transports/bigtable_grpc_transport.py | 6 ++- .../cloud/bigtable_v2/proto/bigtable_pb2.py | 14 ++--- .../cloud/bigtable_v2/proto/data_pb2.py | 14 ++--- .../google/cloud/bigtable_v2/types.py | 13 +++-- packages/google-cloud-bigtable/synth.metadata | 12 ++--- .../google-cloud-bigtable/tests/system.py | 4 +- 17 files changed, 141 insertions(+), 108 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 501d8f24d3e1..021abe2ce82b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -35,4 +35,9 @@ class BigtableTableAdminClient(bigtable_table_admin_client.BigtableTableAdminCli enums = enums -__all__ = ("enums", "types", "BigtableInstanceAdminClient", "BigtableTableAdminClient") +__all__ = ( + "enums", + "types", + "BigtableInstanceAdminClient", + "BigtableTableAdminClient", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index fed633c8dc6b..c0bac0768dcf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -49,7 +49,9 @@ from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable", +).version class BigtableInstanceAdminClient(object): @@ -128,7 +130,7 @@ def location_path(cls, project, location): def project_path(cls, project): """Return a fully-qualified project string.""" return google.api_core.path_template.expand( - "projects/{project}", project=project + "projects/{project}", project=project, ) def __init__( @@ -218,12 +220,12 @@ def __init__( self.transport = transport else: self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -234,7 +236,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -333,7 +335,10 @@ def create_instance( ) request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance, clusters=clusters + parent=parent, + instance_id=instance_id, + instance=instance, + clusters=clusters, ) if metadata is None: metadata = [] @@ -410,7 +415,7 @@ def get_instance( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) + request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -483,7 +488,7 @@ def list_instances( ) request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, page_token=page_token + parent=parent, page_token=page_token, ) if metadata is None: metadata = [] @@ -586,7 +591,11 @@ def update_instance( ) request = instance_pb2.Instance( - name=name, display_name=display_name, type=type_, labels=labels, state=state + name=name, + display_name=display_name, + type=type_, + labels=labels, + state=state, ) if metadata is None: metadata = [] @@ -679,7 +688,7 @@ def partial_update_instance( ) request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask + instance=instance, update_mask=update_mask, ) if metadata is None: metadata = [] @@ -753,7 +762,7 @@ def delete_instance( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) + request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -849,7 +858,7 @@ def create_cluster( ) request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster + parent=parent, cluster_id=cluster_id, cluster=cluster, ) if metadata is None: metadata = [] @@ -926,7 +935,7 @@ def get_cluster( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) + request = bigtable_instance_admin_pb2.GetClusterRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1002,7 +1011,7 @@ def list_clusters( ) request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent, page_token=page_token + parent=parent, page_token=page_token, ) if metadata is None: metadata = [] @@ -1177,7 +1186,7 @@ def delete_cluster( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) + request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1340,7 +1349,7 @@ def get_app_profile( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) + request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1432,7 +1441,7 @@ def list_app_profiles( ) request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, page_size=page_size + parent=parent, page_size=page_size, ) if metadata is None: metadata = [] @@ -1620,7 +1629,7 @@ def delete_app_profile( ) request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings + name=name, ignore_warnings=ignore_warnings, ) if metadata is None: metadata = [] @@ -1699,7 +1708,7 @@ def get_iam_policy( ) request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_ + resource=resource, options=options_, ) if metadata is None: metadata = [] @@ -1782,7 +1791,7 @@ def set_iam_policy( client_info=self._client_info, ) - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1861,7 +1870,7 @@ def test_iam_permissions( ) request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions + resource=resource, permissions=permissions, ) if metadata is None: metadata = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 96026779dbf1..bdc3f1a88749 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -54,7 +54,9 @@ from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable", +).version class BigtableTableAdminClient(object): @@ -220,12 +222,12 @@ def __init__( self.transport = transport else: self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -236,7 +238,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -337,7 +339,10 @@ def create_table( ) request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table, initial_splits=initial_splits + parent=parent, + table_id=table_id, + table=table, + initial_splits=initial_splits, ) if metadata is None: metadata = [] @@ -439,7 +444,7 @@ def create_table_from_snapshot( ) request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot + parent=parent, table_id=table_id, source_snapshot=source_snapshot, ) if metadata is None: metadata = [] @@ -538,7 +543,7 @@ def list_tables( ) request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, view=view, page_size=page_size + parent=parent, view=view, page_size=page_size, ) if metadata is None: metadata = [] @@ -623,7 +628,7 @@ def get_table( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view) + request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view,) if metadata is None: metadata = [] metadata = list(metadata) @@ -690,7 +695,7 @@ def delete_table( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) + request = bigtable_table_admin_pb2.DeleteTableRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -776,7 +781,7 @@ def modify_column_families( ) request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications + name=name, modifications=modifications, ) if metadata is None: metadata = [] @@ -939,7 +944,7 @@ def generate_consistency_token( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name) + request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1018,7 +1023,7 @@ def check_consistency( ) request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token + name=name, consistency_token=consistency_token, ) if metadata is None: metadata = [] @@ -1097,7 +1102,7 @@ def get_iam_policy( ) request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_ + resource=resource, options=options_, ) if metadata is None: metadata = [] @@ -1180,7 +1185,7 @@ def set_iam_policy( client_info=self._client_info, ) - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1259,7 +1264,7 @@ def test_iam_permissions( ) request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions + resource=resource, permissions=permissions, ) if metadata is None: metadata = [] @@ -1462,7 +1467,7 @@ def get_snapshot( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) + request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1563,7 +1568,7 @@ def list_snapshots( ) request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent, page_size=page_size + parent=parent, page_size=page_size, ) if metadata is None: metadata = [] @@ -1648,7 +1653,7 @@ def delete_snapshot( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) + request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index afb72e0c8ab9..3482193864b1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -64,7 +64,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -85,7 +85,7 @@ def __init__( self._stubs = { "bigtable_instance_admin_stub": bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( channel - ) + ), } # Because this API includes a method that returns a diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 5d93e555b3b9..08e70e48b31b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -64,7 +64,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -85,7 +85,7 @@ def __init__( self._stubs = { "bigtable_table_admin_stub": bigtable_table_admin_pb2_grpc.BigtableTableAdminStub( channel - ) + ), } # Because this API includes a method that returns a diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 01d3fa7e3a4d..5f0601ac2026 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -189,7 +189,7 @@ ), ], extensions=[], - nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY], + nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -225,7 +225,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -453,7 +453,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -567,7 +567,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -738,7 +738,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1170,7 +1170,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index c81637a34f25..f2a95d546ac3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -76,7 +76,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -171,7 +171,7 @@ ), ], extensions=[], - nested_types=[_CREATETABLEREQUEST_SPLIT], + nested_types=[_CREATETABLEREQUEST_SPLIT,], enum_types=[], serialized_options=None, is_extendable=False, @@ -334,7 +334,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=660, serialized_end=769, @@ -572,7 +572,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -681,7 +681,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=1254, serialized_end=1419, @@ -732,7 +732,7 @@ ), ], extensions=[], - nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION], + nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION,], enum_types=[], serialized_options=None, is_extendable=False, @@ -768,7 +768,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -807,7 +807,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -903,7 +903,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1053,7 +1053,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1224,7 +1224,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index 49164dfe6693..ef3a7ce7858b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -281,8 +281,8 @@ ), ], extensions=[], - nested_types=[_INSTANCE_LABELSENTRY], - enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE], + nested_types=[_INSTANCE_LABELSENTRY,], + enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE,], serialized_options=None, is_extendable=False, syntax="proto3", @@ -393,7 +393,7 @@ ], extensions=[], nested_types=[], - enum_types=[_CLUSTER_STATE], + enum_types=[_CLUSTER_STATE,], serialized_options=None, is_extendable=False, syntax="proto3", @@ -594,7 +594,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=826, serialized_end=1212, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index e15dd2ba5b3f..c348fe4a280f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -192,11 +192,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], - enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE], + enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE,], serialized_options=None, is_extendable=False, syntax="proto3", @@ -404,7 +404,7 @@ _TABLE_CLUSTERSTATESENTRY, _TABLE_COLUMNFAMILIESENTRY, ], - enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW], + enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW,], serialized_options=None, is_extendable=False, syntax="proto3", @@ -439,7 +439,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -478,7 +478,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -516,7 +516,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -611,7 +611,7 @@ ), ], extensions=[], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION], + nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION,], enum_types=[], serialized_options=None, is_extendable=False, @@ -624,7 +624,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=1087, serialized_end=1428, @@ -767,7 +767,7 @@ ], extensions=[], nested_types=[], - enum_types=[_SNAPSHOT_STATE], + enum_types=[_SNAPSHOT_STATE,], serialized_options=None, is_extendable=False, syntax="proto3", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index ca18668ce49b..216ef8fb1daa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -25,4 +25,7 @@ class BigtableClient(bigtable_client.BigtableClient): __doc__ = bigtable_client.BigtableClient.__doc__ -__all__ = ("types", "BigtableClient") +__all__ = ( + "types", + "BigtableClient", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 36021068dfd8..b13faac448c1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -36,7 +36,9 @@ from google.cloud.bigtable_v2.proto import data_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-bigtable").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable", +).version class BigtableClient(object): @@ -166,12 +168,12 @@ def __init__( self.transport = transport else: self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -182,7 +184,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -352,7 +354,7 @@ def sample_row_keys( ) request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name, app_profile_id=app_profile_id + table_name=table_name, app_profile_id=app_profile_id, ) if metadata is None: metadata = [] @@ -537,7 +539,7 @@ def mutate_rows( ) request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries, app_profile_id=app_profile_id + table_name=table_name, entries=entries, app_profile_id=app_profile_id, ) if metadata is None: metadata = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index 4c34d5fb1b39..3c30df704a57 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -60,7 +60,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -78,7 +78,9 @@ def __init__( # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. - self._stubs = {"bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel)} + self._stubs = { + "bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel), + } @classmethod def create_channel( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index 1c2b0f1ae134..4e4ab84e1cc8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -337,7 +337,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=488, serialized_end=749, @@ -388,7 +388,7 @@ ), ], extensions=[], - nested_types=[_READROWSRESPONSE_CELLCHUNK], + nested_types=[_READROWSRESPONSE_CELLCHUNK,], enum_types=[], serialized_options=None, is_extendable=False, @@ -746,7 +746,7 @@ ), ], extensions=[], - nested_types=[_MUTATEROWSREQUEST_ENTRY], + nested_types=[_MUTATEROWSREQUEST_ENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -838,10 +838,10 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], - nested_types=[_MUTATEROWSRESPONSE_ENTRY], + nested_types=[_MUTATEROWSRESPONSE_ENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -1006,7 +1006,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1138,7 +1138,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index 8e5cff816455..825a0fa9222f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -754,7 +754,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -792,7 +792,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1231,7 +1231,7 @@ ), ], extensions=[], - nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION], + nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION,], enum_types=[], serialized_options=None, is_extendable=False, @@ -1244,7 +1244,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=991, serialized_end=2110, @@ -1441,7 +1441,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + ), ], extensions=[], nested_types=[], @@ -1573,7 +1573,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=2113, serialized_end=2698, @@ -1674,7 +1674,7 @@ index=0, containing_type=None, fields=[], - ) + ), ], serialized_start=2701, serialized_end=2829, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index 53937c1d1687..a445eae1cade 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -27,9 +27,16 @@ from google.rpc import status_pb2 -_shared_modules = [any_pb2, wrappers_pb2, status_pb2] - -_local_modules = [bigtable_pb2, data_pb2] +_shared_modules = [ + any_pb2, + wrappers_pb2, + status_pb2, +] + +_local_modules = [ + bigtable_pb2, + data_pb2, +] names = [] diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index a7291727fa94..25c44a96331d 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,26 +1,26 @@ { - "updateTime": "2019-09-04T12:14:03.458374Z", + "updateTime": "2019-10-29T12:15:54.915199Z", "sources": [ { "generator": { "name": "artman", - "version": "0.36.2", - "dockerImage": "googleapis/artman@sha256:0e6f3a668cd68afc768ecbe08817cf6e56a0e64fcbdb1c58c3b97492d12418a1" + "version": "0.40.3", + "dockerImage": "googleapis/artman@sha256:c805f50525f5f557886c94ab76f56eaa09cb1da58c3ee95111fd34259376621a" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "a2158681f6e30c5fd9446eb1fd7b5021a6d48bfa", - "internalRef": "266999433" + "sha": "532773acbed8d09451dafb3d403ab1823e6a6e1e", + "internalRef": "277177415" } }, { "template": { "name": "python_library", "origin": "synthtool.gcp", - "version": "2019.5.2" + "version": "2019.10.17" } } ], diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 28d95d985ffe..ae43bb10ecdf 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -1076,8 +1076,8 @@ def test_read_with_label_applied(self): # Make sure COLUMN_FAMILY_ID1 was the only key. self.assertEqual(len(cells_returned), 0) - cell1_new, = col_fam1.pop(COL_NAME1) - cell3_new, = col_fam1.pop(COL_NAME2) + (cell1_new,) = col_fam1.pop(COL_NAME1) + (cell3_new,) = col_fam1.pop(COL_NAME2) # Make sure COL_NAME1 and COL_NAME2 were the only keys. self.assertEqual(len(col_fam1), 0) From d268214ca0bf4e11e7c22096338f5162518fa4e0 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Mon, 11 Nov 2019 15:15:32 -0800 Subject: [PATCH 288/892] docs: add python 2 sunset banner to documentation (#9036) --- .../docs/_static/custom.css | 4 ++ .../docs/_templates/layout.html | 49 +++++++++++++++++++ packages/google-cloud-bigtable/docs/conf.py | 2 +- 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 packages/google-cloud-bigtable/docs/_static/custom.css create mode 100644 packages/google-cloud-bigtable/docs/_templates/layout.html diff --git a/packages/google-cloud-bigtable/docs/_static/custom.css b/packages/google-cloud-bigtable/docs/_static/custom.css new file mode 100644 index 000000000000..9a6f9f8ddc3a --- /dev/null +++ b/packages/google-cloud-bigtable/docs/_static/custom.css @@ -0,0 +1,4 @@ +div#python2-eol { + border-color: red; + border-width: medium; +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/_templates/layout.html b/packages/google-cloud-bigtable/docs/_templates/layout.html new file mode 100644 index 000000000000..de457b2c2767 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/_templates/layout.html @@ -0,0 +1,49 @@ +{% extends "!layout.html" %} +{%- block content %} +{%- if theme_fixed_sidebar|lower == 'true' %} +
+ {{ sidebar() }} + {%- block document %} +
+ {%- if render_sidebar %} +
+ {%- endif %} + + {%- block relbar_top %} + {%- if theme_show_relbar_top|tobool %} + + {%- endif %} + {% endblock %} + +
+
+ On January 1, 2020 this library will no longer support Python 2 on the latest released version. + Previously released library versions will continue to be available. For more information please + visit Python 2 support on Google Cloud. +
+ {% block body %} {% endblock %} +
+ + {%- block relbar_bottom %} + {%- if theme_show_relbar_bottom|tobool %} + + {%- endif %} + {% endblock %} + + {%- if render_sidebar %} +
+ {%- endif %} +
+ {%- endblock %} +
+
+{%- else %} +{{ super() }} +{%- endif %} +{%- endblock %} diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index af2c90faeb46..97b890f1a8c9 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -164,7 +164,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = [] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied From 5588a037c55e0c2f1ee582835ca74b191c654f85 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 12 Nov 2019 11:43:52 -0800 Subject: [PATCH 289/892] chore(bigtable): change spacing in docs templates (via synth) (#9739) --- packages/google-cloud-bigtable/docs/_static/custom.css | 2 +- .../google-cloud-bigtable/docs/_templates/layout.html | 1 + packages/google-cloud-bigtable/synth.metadata | 10 +++++----- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/_static/custom.css b/packages/google-cloud-bigtable/docs/_static/custom.css index 9a6f9f8ddc3a..0abaf229fce3 100644 --- a/packages/google-cloud-bigtable/docs/_static/custom.css +++ b/packages/google-cloud-bigtable/docs/_static/custom.css @@ -1,4 +1,4 @@ div#python2-eol { border-color: red; border-width: medium; -} \ No newline at end of file +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/_templates/layout.html b/packages/google-cloud-bigtable/docs/_templates/layout.html index de457b2c2767..228529efe2d2 100644 --- a/packages/google-cloud-bigtable/docs/_templates/layout.html +++ b/packages/google-cloud-bigtable/docs/_templates/layout.html @@ -1,3 +1,4 @@ + {% extends "!layout.html" %} {%- block content %} {%- if theme_fixed_sidebar|lower == 'true' %} diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 25c44a96331d..621f88741fe6 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-10-29T12:15:54.915199Z", + "updateTime": "2019-11-12T13:18:17.600834Z", "sources": [ { "generator": { "name": "artman", - "version": "0.40.3", - "dockerImage": "googleapis/artman@sha256:c805f50525f5f557886c94ab76f56eaa09cb1da58c3ee95111fd34259376621a" + "version": "0.41.1", + "dockerImage": "googleapis/artman@sha256:545c758c76c3f779037aa259023ec3d1ef2d57d2c8cd00a222cb187d63ceac5e" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "532773acbed8d09451dafb3d403ab1823e6a6e1e", - "internalRef": "277177415" + "sha": "f69562be0608904932bdcfbc5ad8b9a22d9dceb8", + "internalRef": "279774957" } }, { From a91fd2ab17a40d3684a59481b43b90e5affee960 Mon Sep 17 00:00:00 2001 From: Leonid Emar-Kar <46078689+Emar-Kar@users.noreply.github.com> Date: Tue, 26 Nov 2019 22:17:38 +0300 Subject: [PATCH 290/892] feat(bigtable): add table level IAM policy controls (#9877) * feat(bigtable): add table level IAM policy controls * remove extra lines * system test * black * fix system test * update system-tests * chg comment lines * comment correction --- .../docs/snippets_table.py | 49 ++++++++ .../google/cloud/bigtable/table.py | 69 ++++++++++++ .../google-cloud-bigtable/tests/system.py | 38 +++++++ .../tests/unit/test_table.py | 105 ++++++++++++++++++ 4 files changed, 261 insertions(+) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 0fbb16bf74ad..702cf31b1447 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -356,6 +356,55 @@ def test_bigtable_get_cluster_states(): assert CLUSTER_ID in get_cluster_states +def test_bigtable_table_test_iam_permissions(): + table_policy = Config.INSTANCE.table("table_id_iam_policy") + table_policy.create() + assert table_policy.exists + + # [START bigtable_table_test_iam_permissions] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table("table_id_iam_policy") + + permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] + permissions_allowed = table.test_iam_permissions(permissions) + # [END bigtable_table_test_iam_permissions] + assert permissions_allowed == permissions + + +def test_bigtable_table_set_iam_policy_then_get_iam_policy(): + table_policy = Config.INSTANCE.table("table_id_iam_policy") + assert table_policy.exists + service_account_email = Config.CLIENT._credentials.service_account_email + + # [START bigtable_table_set_iam_policy] + from google.cloud.bigtable import Client + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table("table_id_iam_policy") + new_policy = Policy() + new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] + + policy_latest = table.set_iam_policy(new_policy) + # [END bigtable_table_set_iam_policy] + assert len(policy_latest.bigtable_admins) > 0 + + # [START bigtable_table_get_iam_policy] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table("table_id_iam_policy") + policy = table.get_iam_policy() + # [END bigtable_table_get_iam_policy] + assert len(policy.bigtable_admins) > 0 + + def test_bigtable_table_exists(): # [START bigtable_check_table_exists] from google.cloud.bigtable import Client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 4ced9fbde0c2..69379b21d57e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -28,6 +28,7 @@ from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES +from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow @@ -138,6 +139,74 @@ def name(self): project=project, instance=instance_id, table=self.table_id ) + def get_iam_policy(self): + """Gets the IAM access control policy for this table. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_get_iam_policy] + :end-before: [END bigtable_table_get_iam_policy] + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this table. + """ + table_client = self._instance._client.table_admin_client + resp = table_client.get_iam_policy(resource=self.name) + return Policy.from_pb(resp) + + def set_iam_policy(self, policy): + """Sets the IAM access control policy for this table. Replaces any + existing policy. + + For more information about policy, please see documentation of + class `google.cloud.bigtable.policy.Policy` + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_set_iam_policy] + :end-before: [END bigtable_table_set_iam_policy] + + :type policy: :class:`google.cloud.bigtable.policy.Policy` + :param policy: A new IAM policy to replace the current IAM policy + of this table. + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this table. + """ + table_client = self._instance._client.table_admin_client + resp = table_client.set_iam_policy(resource=self.name, policy=policy.to_pb()) + return Policy.from_pb(resp) + + def test_iam_permissions(self, permissions): + """Tests whether the caller has the given permissions for this table. + Returns the permissions that the caller has. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_table_test_iam_permissions] + :end-before: [END bigtable_table_test_iam_permissions] + + :type permissions: list + :param permissions: The set of permissions to check for + the ``resource``. Permissions with wildcards (such as '*' + or 'storage.*') are not allowed. For more information see + `IAM Overview + `_. + `Bigtable Permissions + `_. + + :rtype: list + :returns: A List(string) of permissions allowed on the table. + """ + table_client = self._instance._client.table_admin_client + resp = table_client.test_iam_permissions( + resource=self.name, permissions=permissions + ) + return list(resp.permissions) + def column_family(self, column_family_id, gc_rule=None): """Factory to create a column family associated with this table. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index ae43bb10ecdf..e9e3ab79179e 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -29,6 +29,8 @@ from google.cloud._helpers import UTC from google.cloud.bigtable.client import Client from google.cloud.bigtable.column_family import MaxVersionsGCRule +from google.cloud.bigtable.policy import Policy +from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE from google.cloud.bigtable.row_filters import ApplyLabelFilter from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter from google.cloud.bigtable.row_filters import RowFilterChain @@ -688,6 +690,42 @@ def test_create_table(self): sorted_tables = sorted(tables, key=name_attr) self.assertEqual(sorted_tables, expected_tables) + def test_test_iam_permissions(self): + temp_table_id = "test-test-iam-policy-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] + permissions_allowed = temp_table.test_iam_permissions(permissions) + self.assertEqual(permissions, permissions_allowed) + + def test_get_iam_policy(self): + temp_table_id = "test-get-iam-policy-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + policy = temp_table.get_iam_policy().to_api_repr() + self.assertEqual(policy["etag"], "ACAB") + self.assertEqual(policy["version"], 0) + + def test_set_iam_policy(self): + temp_table_id = "test-set-iam-policy-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + new_policy = Policy() + service_account_email = Config.CLIENT._credentials.service_account_email + new_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.service_account(service_account_email) + ] + policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() + + self.assertEqual(policy_latest["bindings"][0]["role"], "roles/bigtable.admin") + self.assertIn(service_account_email, policy_latest["bindings"][0]["members"][0]) + def test_create_table_with_families(self): temp_table_id = "test-create-table-with-failies" temp_table = Config.INSTANCE_DATA.table(temp_table_id) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 495d8660d1f7..d4bb621c28c0 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1048,6 +1048,111 @@ def test_mutations_batcher_factory(self): self.assertEqual(mutation_batcher.flush_count, flush_count) self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes) + def test_get_iam_policy(self): + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + client._table_admin_client = table_api + table_api.get_iam_policy.return_value = iam_policy + + result = table.get_iam_policy() + + table_api.get_iam_policy.assert_called_once_with(resource=table.name) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + + def test_set_iam_policy(self): + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] + iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + client._table_admin_client = table_api + table_api.set_iam_policy.return_value = iam_policy_pb + + iam_policy = Policy(etag=etag, version=version) + iam_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("user1@test.com"), + Policy.service_account("service_acc1@test.com"), + ] + + result = table.set_iam_policy(iam_policy) + + table_api.set_iam_policy.assert_called_once_with( + resource=table.name, policy=iam_policy_pb + ) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + + def test_test_iam_permissions(self): + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import iam_policy_pb2 + + credentials = _make_credentials() + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] + + response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + table_api.test_iam_permissions.return_value = response + client._table_admin_client = table_api + + result = table.test_iam_permissions(permissions) + + self.assertEqual(result, permissions) + table_api.test_iam_permissions.assert_called_once_with( + resource=table.name, permissions=permissions + ) + class Test__RetryableMutateRowsWorker(unittest.TestCase): from grpc import StatusCode From c36e9ebc3ec6c0bbd105ae4ab3a0423e0ac24813 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Wed, 4 Dec 2019 12:53:32 -0800 Subject: [PATCH 291/892] chore(bigtable): release 1.2.0 (#9916) --- packages/google-cloud-bigtable/CHANGELOG.md | 16 ++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 70e61063c488..59c541607f65 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,22 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 1.2.0 + +12-04-2019 12:21 PST + + +### New Features +- add table level IAM policy controls ([#9877](https://github.com/googleapis/google-cloud-python/pull/9877)) +- add 'client_options' / 'admin_client_options' to Client ([#9517](https://github.com/googleapis/google-cloud-python/pull/9517)) + +### Documentation +- change spacing in docs templates (via synth) ([#9739](https://github.com/googleapis/google-cloud-python/pull/9739)) +- add python 2 sunset banner to documentation ([#9036](https://github.com/googleapis/google-cloud-python/pull/9036)) + +### Internal +- add trailing commas (via synth) ([#9557](https://github.com/googleapis/google-cloud-python/pull/9557)) + ## 1.1.0 10-15-2019 06:40 PDT diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 82c3aa499dcd..995cd650c3d2 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '1.1.0' +version = '1.2.0' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From e609f674215e0ab3ca93fb71c66ed970f31d56de Mon Sep 17 00:00:00 2001 From: Mackenzie Starr Date: Fri, 3 Jan 2020 13:02:39 -0500 Subject: [PATCH 292/892] fix(bigtable): add ability to use single-row transactions (#10021) * fix: add ability to use single-row transactions fixes #10018 * fix: add unit tests for supporting single-row transactions --- .../google/cloud/bigtable/row.py | 6 +++++- .../google-cloud-bigtable/tests/unit/test_row.py | 14 ++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 8d1f91f3296f..079ba6c8f497 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -582,6 +582,7 @@ def commit(self): table_name=self._table.name, row_key=self._row_key, predicate_filter=self._filter.to_pb(), + app_profile_id=self._table._app_profile_id, true_mutations=true_mutations, false_mutations=false_mutations, ) @@ -908,7 +909,10 @@ def commit(self): data_client = self._table._instance._client.table_data_client row_response = data_client.read_modify_write_row( - table_name=self._table.name, row_key=self._row_key, rules=self._rule_pb_list + table_name=self._table.name, + row_key=self._row_key, + rules=self._rule_pb_list, + app_profile_id=self._table._app_profile_id, ) # Reset modifications after commit-ing request. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index b4aaefb862f8..47424d910d97 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -409,6 +409,7 @@ def test_commit(self): project_id = "project-id" row_key = b"row_key" table_name = "projects/more-stuff" + app_profile_id = "app_profile_id" column_family_id1 = u"column_family_id1" column_family_id2 = u"column_family_id2" column_family_id3 = u"column_family_id3" @@ -420,7 +421,7 @@ def test_commit(self): client = self._make_client( project=project_id, credentials=credentials, admin=True ) - table = _Table(table_name, client=client) + table = _Table(table_name, client=client, app_profile_id=app_profile_id) row_filter = RowSampleFilter(0.33) row = self._make_one(row_key, table, filter_=row_filter) @@ -444,6 +445,8 @@ def test_commit(self): row.delete_cell(column_family_id2, column2, state=True) row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) result = row.commit() + call_args = api.transport.check_and_mutate_row.call_args.args[0] + self.assertEqual(app_profile_id, call_args.app_profile_id) self.assertEqual(result, expected_result) self.assertEqual(row._true_pb_mutations, []) self.assertEqual(row._false_pb_mutations, []) @@ -564,6 +567,7 @@ def test_commit(self): project_id = "project-id" row_key = b"row_key" table_name = "projects/more-stuff" + app_profile_id = "app_profile_id" column_family_id = u"column_family_id" column = b"column" @@ -572,7 +576,7 @@ def test_commit(self): client = self._make_client( project=project_id, credentials=credentials, admin=True ) - table = _Table(table_name, client=client) + table = _Table(table_name, client=client, app_profile_id=app_profile_id) row = self._make_one(row_key, table) # Create request_pb @@ -593,7 +597,8 @@ def mock_parse_rmw_row_response(row_response): with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): row.append_cell_value(column_family_id, column, value) result = row.commit() - + call_args = api.transport.read_modify_write_row.call_args.args[0] + self.assertEqual(app_profile_id, call_args.app_profile_id) self.assertEqual(result, expected_result) self.assertEqual(row._rule_pb_list, []) @@ -819,9 +824,10 @@ def __init__(self, client=None): class _Table(object): - def __init__(self, name, client=None): + def __init__(self, name, client=None, app_profile_id=None): self.name = name self._instance = _Instance(client) + self._app_profile_id = app_profile_id self.client = client self.mutated_rows = [] From 319cf97d506a278a674846b82879742b4bbc0f08 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Fri, 3 Jan 2020 10:47:38 -0800 Subject: [PATCH 293/892] chore(bigtable): release 1.2.1 (#10057) --- packages/google-cloud-bigtable/CHANGELOG.md | 8 ++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 59c541607f65..a0ee45d5b5ff 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,14 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## 1.2.1 + +01-03-2020 10:05 PST + + +### Implementation Changes +- Add ability to use single-row transactions ([#10021](https://github.com/googleapis/google-cloud-python/pull/10021)) + ## 1.2.0 12-04-2019 12:21 PST diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 995cd650c3d2..c5075bbcc61b 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '1.2.0' +version = '1.2.1' # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 8b63996fa36442d42ee25f460c45ac8dc4832b84 Mon Sep 17 00:00:00 2001 From: Jonathan Lui Date: Thu, 9 Jan 2020 13:01:17 -0800 Subject: [PATCH 294/892] feat(api_core): support version 3 policy bindings (#9869) * feat(api_core): support version 3 policy bindings * fix(doc): fix documenting bindings structure * try fixing docs * fix pytype error * fill test coverage * indent docs * fix docs * improve test coverage * linty * remove unused variable --- .../google/cloud/bigtable/policy.py | 54 ++++++++++-- .../tests/unit/test_policy.py | 83 +++++++++++++++++-- 2 files changed, 124 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index 9fea7bbc5a0e..65be0158a006 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -72,6 +72,22 @@ class Policy(BasePolicy): If no etag is provided in the call to setIamPolicy, then the existing policy is overwritten blindly. + :type version: int + :param version: The syntax schema version of the policy. + + Note: + Using conditions in bindings requires the policy's version to be set + to `3` or greater, depending on the versions that are currently supported. + + Accessing the policy using dict operations will raise InvalidOperationException + when the policy's version is set to 3. + + Use the policy.bindings getter/setter to retrieve and modify the policy's bindings. + + See: + IAM Policy https://cloud.google.com/iam/reference/rest/v1/Policy + Policy versions https://cloud.google.com/iam/docs/policies#versions + Conditions overview https://cloud.google.com/iam/docs/conditions-overview. """ def __init__(self, etag=None, version=None): @@ -83,6 +99,8 @@ def __init__(self, etag=None, version=None): def bigtable_admins(self): """Access to bigtable.admin role memebers + Raise InvalidOperationException if version is greater than 1 or policy contains conditions. + For example: .. literalinclude:: snippets.py @@ -90,7 +108,7 @@ def bigtable_admins(self): :end-before: [END bigtable_admins_policy] """ result = set() - for member in self._bindings.get(BIGTABLE_ADMIN_ROLE, ()): + for member in self.get(BIGTABLE_ADMIN_ROLE, ()): result.add(member) return frozenset(result) @@ -98,6 +116,8 @@ def bigtable_admins(self): def bigtable_readers(self): """Access to bigtable.reader role memebers + Raise InvalidOperationException if version is greater than 1 or policy contains conditions. + For example: .. literalinclude:: snippets.py @@ -105,7 +125,7 @@ def bigtable_readers(self): :end-before: [END bigtable_readers_policy] """ result = set() - for member in self._bindings.get(BIGTABLE_READER_ROLE, ()): + for member in self.get(BIGTABLE_READER_ROLE, ()): result.add(member) return frozenset(result) @@ -113,6 +133,8 @@ def bigtable_readers(self): def bigtable_users(self): """Access to bigtable.user role memebers + Raise InvalidOperationException if version is greater than 1 or policy contains conditions. + For example: .. literalinclude:: snippets.py @@ -120,7 +142,7 @@ def bigtable_users(self): :end-before: [END bigtable_users_policy] """ result = set() - for member in self._bindings.get(BIGTABLE_USER_ROLE, ()): + for member in self.get(BIGTABLE_USER_ROLE, ()): result.add(member) return frozenset(result) @@ -128,6 +150,8 @@ def bigtable_users(self): def bigtable_viewers(self): """Access to bigtable.viewer role memebers + Raise InvalidOperationException if version is greater than 1 or policy contains conditions. + For example: .. literalinclude:: snippets.py @@ -135,7 +159,7 @@ def bigtable_viewers(self): :end-before: [END bigtable_viewers_policy] """ result = set() - for member in self._bindings.get(BIGTABLE_VIEWER_ROLE, ()): + for member in self.get(BIGTABLE_VIEWER_ROLE, ()): result.add(member) return frozenset(result) @@ -152,8 +176,17 @@ def from_pb(cls, policy_pb): """ policy = cls(policy_pb.etag, policy_pb.version) - for binding in policy_pb.bindings: - policy[binding.role] = sorted(binding.members) + policy.bindings = bindings = [] + for binding_pb in policy_pb.bindings: + binding = {"role": binding_pb.role, "members": set(binding_pb.members)} + condition = binding_pb.condition + if condition and condition.expression: + binding["condition"] = { + "title": condition.title, + "description": condition.description, + "expression": condition.expression, + } + bindings.append(binding) return policy @@ -169,8 +202,13 @@ def to_pb(self): etag=self.etag, version=self.version or 0, bindings=[ - policy_pb2.Binding(role=role, members=sorted(self[role])) - for role in self + policy_pb2.Binding( + role=binding["role"], + members=sorted(binding["members"]), + condition=binding.get("condition"), + ) + for binding in self.bindings + if binding["members"] ], ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py index 74b19e49b29a..63f9ba03fb23 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_policy.py +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -38,7 +38,7 @@ def test_ctor_defaults(self): self.assertEqual(dict(policy), {}) def test_ctor_explicit(self): - VERSION = 17 + VERSION = 1 ETAG = b"ETAG" empty = frozenset() policy = self._make_one(ETAG, VERSION) @@ -108,7 +108,7 @@ def test_from_pb_non_empty(self): from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE ETAG = b"ETAG" - VERSION = 17 + VERSION = 1 members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] empty = frozenset() message = policy_pb2.Policy( @@ -127,6 +127,45 @@ def test_from_pb_non_empty(self): self.assertEqual(len(policy), 1) self.assertEqual(dict(policy), {BIGTABLE_ADMIN_ROLE: set(members)}) + def test_from_pb_with_condition(self): + import pytest + from google.iam.v1 import policy_pb2 + from google.api_core.iam import InvalidOperationException, _DICT_ACCESS_MSG + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + ETAG = b"ETAG" + VERSION = 3 + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + BINDINGS = [ + { + "role": BIGTABLE_ADMIN_ROLE, + "members": members, + "condition": { + "title": "request_time", + "description": "Requests made before 2021-01-01T00:00:00Z", + "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', + }, + } + ] + message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) + klass = self._get_target_class() + policy = klass.from_pb(message) + self.assertEqual(policy.etag, ETAG) + self.assertEqual(policy.version, VERSION) + self.assertEqual(policy.bindings[0]["role"], BIGTABLE_ADMIN_ROLE) + self.assertEqual(policy.bindings[0]["members"], set(members)) + self.assertEqual(policy.bindings[0]["condition"], BINDINGS[0]["condition"]) + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_admins + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_readers + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_users + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_viewers + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + len(policy) + def test_to_pb_empty(self): from google.iam.v1 import policy_pb2 @@ -139,7 +178,7 @@ def test_to_pb_explicit(self): from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - VERSION = 17 + VERSION = 1 ETAG = b"ETAG" members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] policy = self._make_one(ETAG, VERSION) @@ -154,8 +193,42 @@ def test_to_pb_explicit(self): self.assertEqual(policy.to_pb(), expected) + def test_to_pb_with_condition(self): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + VERSION = 3 + ETAG = b"ETAG" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + condition = { + "title": "request_time", + "description": "Requests made before 2021-01-01T00:00:00Z", + "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', + } + policy = self._make_one(ETAG, VERSION) + policy.bindings = [ + { + "role": BIGTABLE_ADMIN_ROLE, + "members": set(members), + "condition": condition, + } + ] + expected = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=[ + policy_pb2.Binding( + role=BIGTABLE_ADMIN_ROLE, + members=sorted(members), + condition=condition, + ) + ], + ) + + self.assertEqual(policy.to_pb(), expected) + def test_from_api_repr_wo_etag(self): - VERSION = 17 + VERSION = 1 empty = frozenset() resource = {"version": VERSION} klass = self._get_target_class() @@ -187,7 +260,7 @@ def test_from_api_repr_w_etag(self): self.assertEqual(dict(policy), {}) def test_to_api_repr_wo_etag(self): - VERSION = 17 + VERSION = 1 resource = {"version": VERSION} policy = self._make_one(version=VERSION) self.assertEqual(policy.to_api_repr(), resource) From afcb554d73974891f1682d79d61b5979f3ef5242 Mon Sep 17 00:00:00 2001 From: Jonathan Lui Date: Mon, 13 Jan 2020 13:06:23 -0800 Subject: [PATCH 295/892] feat(bigtable): support requested_policy_version for Instance IAM (#10001) * iam proposal #3 maintain compatibility with defaultdict remove in place raise KeyError on delete update deprecation for dict-key access and factory methods clean up maintain compatibility - removing duplicate in __setitems__ check for conditions for dict access remove empty binding fix test accessing private var _bindings fix(tests): change version to make existing tests pass tests: add tests for getitem, delitem, setitem on v3 and conditions test policy.bindings property fixlint black sort bindings by role when converting to api repr add deprecation warning for iam factory methods update deprecation message for role methods make Policy#bindings.members a set update policy docs fix docs make docs better fix: Bigtable policy class to use Policy.bindings add from_pb with conditions test add to_pb condition test blacken fix policy __delitem__ add docs on dict access do not modify binding in to_apr_repr * feat(bigtable): support requested_policy_version to instance * fix passing requested_policy_version to pb2 * add unit test * add unit test --- .../google/cloud/bigtable/instance.py | 25 ++++++++++-- .../tests/unit/test_instance.py | 38 +++++++++++++++++++ 2 files changed, 60 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 8a664778577a..dbdd20640918 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -23,7 +23,7 @@ from google.protobuf import field_mask_pb2 -from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.cloud.bigtable_admin_v2.types import instance_pb2, options_pb2 from google.api_core.exceptions import NotFound @@ -434,7 +434,7 @@ def delete(self): """ self._client.instance_admin_client.delete_instance(name=self.name) - def get_iam_policy(self): + def get_iam_policy(self, requested_policy_version=None): """Gets the access control policy for an instance resource. For example: @@ -443,11 +443,30 @@ def get_iam_policy(self): :start-after: [START bigtable_get_iam_policy] :end-before: [END bigtable_get_iam_policy] + :type requested_policy_version: int or ``NoneType`` + :param requested_policy_version: Optional. The version of IAM policies to request. + If a policy with a condition is requested without + setting this, the server will return an error. + This must be set to a value of 3 to retrieve IAM + policies containing conditions. This is to prevent + client code that isn't aware of IAM conditions from + interpreting and modifying policies incorrectly. + The service might return a policy with version lower + than the one that was requested, based on the + feature syntax in the policy fetched. + :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this instance """ + args = {"resource": self.name} + if requested_policy_version is not None: + args["options_"] = options_pb2.GetPolicyOptions( + requested_policy_version=requested_policy_version + ) + instance_admin_client = self._client.instance_admin_client - resp = instance_admin_client.get_iam_policy(resource=self.name) + + resp = instance_admin_client.get_iam_policy(**args) return Policy.from_pb(resp) def set_iam_policy(self, policy): diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 397e54d570d0..b129d4edc825 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -633,6 +633,44 @@ def test_get_iam_policy(self): for found, expected in zip(sorted(admins), sorted(members)): self.assertEqual(found, expected) + def test_get_iam_policy_w_requested_policy_version(self): + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.iam.v1 import policy_pb2, options_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) + instance = self._make_one(self.INSTANCE_ID, client) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + # Patch the stub used by the API method. + instance_api = mock.create_autospec( + bigtable_instance_admin_client.BigtableInstanceAdminClient + ) + client._instance_admin_client = instance_api + instance_api.get_iam_policy.return_value = iam_policy + + # Perform the method and check the result. + result = instance.get_iam_policy(requested_policy_version=3) + + instance_api.get_iam_policy.assert_called_once_with( + resource=instance.name, + options_=options_pb2.GetPolicyOptions(requested_policy_version=3), + ) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + def test_set_iam_policy(self): from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.iam.v1 import policy_pb2 From ced8788165774bdc5088dc12d6a2f870fc4953b9 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 29 Jan 2020 14:11:48 -0800 Subject: [PATCH 296/892] feat(bigtable): add py2 deprecation warnings; standardize use of 'required' in docstrings (via synth) (#10064) --- .../cloud/bigtable_admin_v2/__init__.py | 11 + .../gapic/bigtable_table_admin_client.py | 2 +- .../bigtable_table_admin_grpc_transport.py | 2 +- .../proto/bigtable_table_admin.proto | 2 +- .../proto/bigtable_table_admin_pb2_grpc.py | 2 +- .../google/cloud/bigtable_v2/__init__.py | 11 + .../bigtable_v2/gapic/bigtable_client.py | 46 ++-- .../cloud/bigtable_v2/proto/bigtable.proto | 147 ++++++++--- .../cloud/bigtable_v2/proto/bigtable_pb2.py | 233 +++++++++++------- .../google/cloud/bigtable_v2/proto/data.proto | 7 +- packages/google-cloud-bigtable/synth.metadata | 10 +- 11 files changed, 321 insertions(+), 152 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 021abe2ce82b..263c8a37a553 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -16,6 +16,8 @@ from __future__ import absolute_import +import sys +import warnings from google.cloud.bigtable_admin_v2 import types from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client @@ -23,6 +25,15 @@ from google.cloud.bigtable_admin_v2.gapic import enums +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7." + "More details about Python 2 support for Google Cloud Client Libraries" + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + class BigtableInstanceAdminClient( bigtable_instance_admin_client.BigtableInstanceAdminClient ): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index bdc3f1a88749..c96304e30832 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -1051,7 +1051,7 @@ def get_iam_policy( metadata=None, ): """ - Gets the access control policy for an instance resource. Returns an empty + Gets the access control policy for a table resource. Returns an empty policy if an table exists but does not have a policy set. Example: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index 08e70e48b31b..b4fc8e9b0ff8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -267,7 +267,7 @@ def check_consistency(self): def get_iam_policy(self): """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - Gets the access control policy for an instance resource. Returns an empty + Gets the access control policy for a table resource. Returns an empty policy if an table exists but does not have a policy set. Returns: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index a3e2e7ccacc4..812022295950 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -185,7 +185,7 @@ service BigtableTableAdmin { }; } - // Gets the access control policy for an instance resource. Returns an empty + // Gets the access control policy for a table resource. Returns an empty // policy if an table exists but does not have a policy set. rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 689133e84425..f152581fe0a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -255,7 +255,7 @@ def DeleteSnapshot(self, request, context): raise NotImplementedError("Method not implemented!") def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty + """Gets the access control policy for a table resource. Returns an empty policy if an table exists but does not have a policy set. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 216ef8fb1daa..cbbc1b2b98db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -16,11 +16,22 @@ from __future__ import absolute_import +import sys +import warnings from google.cloud.bigtable_v2 import types from google.cloud.bigtable_v2.gapic import bigtable_client +if sys.version_info[:2] == (2, 7): + message = ( + "A future version of this library will drop support for Python 2.7." + "More details about Python 2 support for Google Cloud Client Libraries" + "can be found at https://cloud.google.com/python/docs/python2-sunset/" + ) + warnings.warn(message, DeprecationWarning) + + class BigtableClient(bigtable_client.BigtableClient): __doc__ = bigtable_client.BigtableClient.__doc__ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index b13faac448c1..f0522654cb1d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -224,8 +224,8 @@ def read_rows( ... pass Args: - table_name (str): The unique name of the table from which to read. Values are of the form - ``projects//instances//tables/
``. + table_name (str): Required. The unique name of the table from which to read. Values are of + the form ``projects//instances//tables/
``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. @@ -319,8 +319,9 @@ def sample_row_keys( ... pass Args: - table_name (str): The unique name of the table from which to sample row keys. Values are - of the form ``projects//instances//tables/
``. + table_name (str): Required. The unique name of the table from which to sample row keys. + Values are of the form + ``projects//instances//tables/
``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -403,11 +404,14 @@ def mutate_row( >>> response = client.mutate_row(table_name, row_key, mutations) Args: - table_name (str): The unique name of the table to which the mutation should be applied. - Values are of the form + table_name (str): Required. The unique name of the table to which the mutation should be + applied. Values are of the form ``projects//instances//tables/
``. - row_key (bytes): The key of the row to which the mutation should be applied. - mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row. Entries are applied + row_key (bytes): Required. The key of the row to which the mutation should be applied. + + Classified as IDENTIFYING\_ID to provide context around data accesses + for auditing systems. + mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry and at most 100000. @@ -497,8 +501,8 @@ def mutate_rows( ... pass Args: - table_name (str): The unique name of the table to which the mutations should be applied. - entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): The row keys and corresponding mutations to be applied in bulk. + table_name (str): Required. The unique name of the table to which the mutations should be applied. + entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): Required. The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries may be applied in arbitrary order (even between entries for the same row). At least one entry must be specified, and in total the entries can @@ -586,10 +590,14 @@ def check_and_mutate_row( >>> response = client.check_and_mutate_row(table_name, row_key) Args: - table_name (str): The unique name of the table to which the conditional mutation should be - applied. Values are of the form + table_name (str): Required. The unique name of the table to which the conditional mutation + should be applied. Values are of the form ``projects//instances//tables/
``. - row_key (bytes): The key of the row to which the conditional mutation should be applied. + row_key (bytes): Required. The key of the row to which the conditional mutation should be + applied. + + Classified as IDENTIFYING\_ID to provide context around data accesses + for auditing systems. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending @@ -703,11 +711,15 @@ def read_modify_write_row( >>> response = client.read_modify_write_row(table_name, row_key, rules) Args: - table_name (str): The unique name of the table to which the read/modify/write rules should - be applied. Values are of the form + table_name (str): Required. The unique name of the table to which the read/modify/write + rules should be applied. Values are of the form ``projects//instances//tables/
``. - row_key (bytes): The key of the row to which the read/modify/write rules should be applied. - rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Rules specifying how the specified row's contents are to be transformed + row_key (bytes): Required. The key of the row to which the read/modify/write rules should + be applied. + + Classified as IDENTIFYING\_ID to provide context around data accesses + for auditing systems. + rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later ones. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto index 0ab763ba6e62..ea79b8f094a7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,6 +17,9 @@ syntax = "proto3"; package google.bigtable.v2; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; import "google/bigtable/v2/data.proto"; import "google/protobuf/wrappers.proto"; import "google/rpc/status.proto"; @@ -27,9 +30,22 @@ option java_multiple_files = true; option java_outer_classname = "BigtableProto"; option java_package = "com.google.bigtable.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\V2"; +option (google.api.resource_definition) = { + type: "bigtable.googleapis.com/Table" + pattern: "projects/{project}/instances/{instance}/tables/{table}" +}; // Service for reading from and writing to existing Bigtable tables. service Bigtable { + option (google.api.default_host) = "bigtable.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigtable.data," + "https://www.googleapis.com/auth/bigtable.data.readonly," + "https://www.googleapis.com/auth/cloud-bigtable.data," + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + // Streams back the contents of all requested rows in key order, optionally // applying the same Reader filter to each. Depending on their size, // rows and cells may be broken up across multiple responses, but @@ -40,17 +56,20 @@ service Bigtable { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" }; + option (google.api.method_signature) = "table_name"; + option (google.api.method_signature) = "table_name,app_profile_id"; } // Returns a sample of row keys in the table. The returned row keys will // delimit contiguous sections of the table of approximately equal size, // which can be used to break up the data for distributed tasks like // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) - returns (stream SampleRowKeysResponse) { + rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" }; + option (google.api.method_signature) = "table_name"; + option (google.api.method_signature) = "table_name,app_profile_id"; } // Mutates a row atomically. Cells already present in the row are left @@ -60,6 +79,8 @@ service Bigtable { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" }; + option (google.api.method_signature) = "table_name,row_key,mutations"; + option (google.api.method_signature) = "table_name,row_key,mutations,app_profile_id"; } // Mutates multiple rows in a batch. Each individual row is mutated @@ -70,15 +91,18 @@ service Bigtable { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" }; + option (google.api.method_signature) = "table_name,entries"; + option (google.api.method_signature) = "table_name,entries,app_profile_id"; } // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) - returns (CheckAndMutateRowResponse) { + rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" }; + option (google.api.method_signature) = "table_name,row_key,predicate_filter,true_mutations,false_mutations"; + option (google.api.method_signature) = "table_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id"; } // Modifies a row atomically on the server. The method reads the latest @@ -86,21 +110,27 @@ service Bigtable { // entry based on pre-defined read/modify/write rules. The new value for the // timestamp is the greater of the existing timestamp or the current server // time. The method returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) - returns (ReadModifyWriteRowResponse) { + rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" }; + option (google.api.method_signature) = "table_name,row_key,rules"; + option (google.api.method_signature) = "table_name,row_key,rules,app_profile_id"; } } // Request message for Bigtable.ReadRows. message ReadRowsRequest { - // The unique name of the table from which to read. + // Required. The unique name of the table from which to read. // Values are of the form // `projects//instances//tables/
`. - string table_name = 1; + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } + ]; // This value specifies routing for replication. If not specified, the // "default" application profile will be used. @@ -127,6 +157,9 @@ message ReadRowsResponse { // this CellChunk is a continuation of the same row as the previous // CellChunk in the response stream, even if that CellChunk was in a // previous ReadRowsResponse message. + // + // Classified as IDENTIFYING_ID to provide context around data accesses for + // auditing systems. bytes row_key = 1; // The column family name for this chunk of data. If this message @@ -172,6 +205,7 @@ message ReadRowsResponse { // to pre-allocate memory to hold the full cell value. int32 value_size = 7; + // Signals to the client concerning previous CellChunks received. oneof row_status { // Indicates that the client should drop all previous chunks for // `row_key`, as it will be re-read from the beginning. @@ -183,6 +217,7 @@ message ReadRowsResponse { } } + // A collection of a row's contents as part of the read request. repeated CellChunk chunks = 1; // Optionally the server might return the row key of the last row it @@ -197,10 +232,15 @@ message ReadRowsResponse { // Request message for Bigtable.SampleRowKeys. message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. + // Required. The unique name of the table from which to sample row keys. // Values are of the form // `projects//instances//tables/
`. - string table_name = 1; + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } + ]; // This value specifies routing for replication. If not specified, the // "default" application profile will be used. @@ -216,6 +256,9 @@ message SampleRowKeysResponse { // Note that row keys in this list may not have ever been written to or read // from, and users should therefore not make any assumptions about the row key // structure that are specific to their use case. + // + // Classified as IDENTIFYING_ID to provide context around data accesses for + // auditing systems. bytes row_key = 1; // Approximate total storage space used by all rows in the table which precede @@ -227,57 +270,77 @@ message SampleRowKeysResponse { // Request message for Bigtable.MutateRow. message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. + // Required. The unique name of the table to which the mutation should be applied. // Values are of the form // `projects//instances//tables/
`. - string table_name = 1; + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } + ]; // This value specifies routing for replication. If not specified, the // "default" application profile will be used. string app_profile_id = 4; - // The key of the row to which the mutation should be applied. - bytes row_key = 2; + // Required. The key of the row to which the mutation should be applied. + // + // Classified as IDENTIFYING_ID to provide context around data accesses for + // auditing systems. + bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - // Changes to be atomically applied to the specified row. Entries are applied + // Required. Changes to be atomically applied to the specified row. Entries are applied // in order, meaning that earlier mutations can be masked by later ones. // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; + repeated Mutation mutations = 3 [(google.api.field_behavior) = REQUIRED]; } // Response message for Bigtable.MutateRow. -message MutateRowResponse {} +message MutateRowResponse { + +} // Request message for BigtableService.MutateRows. message MutateRowsRequest { + // A mutation for a given row. message Entry { // The key of the row to which the `mutations` should be applied. + // + // Classified as IDENTIFYING_ID to provide context around data accesses for + // auditing systems. bytes row_key = 1; - // Changes to be atomically applied to the specified row. Mutations are + // Required. Changes to be atomically applied to the specified row. Mutations are // applied in order, meaning that earlier mutations can be masked by // later ones. // You must specify at least one mutation. - repeated Mutation mutations = 2; + repeated Mutation mutations = 2 [(google.api.field_behavior) = REQUIRED]; } - // The unique name of the table to which the mutations should be applied. - string table_name = 1; + // Required. The unique name of the table to which the mutations should be applied. + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } + ]; // This value specifies routing for replication. If not specified, the // "default" application profile will be used. string app_profile_id = 3; - // The row keys and corresponding mutations to be applied in bulk. + // Required. The row keys and corresponding mutations to be applied in bulk. // Each entry is applied as an atomic mutation, but the entries may be // applied in arbitrary order (even between entries for the same row). // At least one entry must be specified, and in total the entries can // contain at most 100000 mutations. - repeated Entry entries = 2; + repeated Entry entries = 2 [(google.api.field_behavior) = REQUIRED]; } // Response message for BigtableService.MutateRows. message MutateRowsResponse { + // The result of applying a passed mutation in the original request. message Entry { // The index into the original request's `entries` list of the Entry // for which a result is being reported. @@ -296,18 +359,26 @@ message MutateRowsResponse { // Request message for Bigtable.CheckAndMutateRow. message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be + // Required. The unique name of the table to which the conditional mutation should be // applied. // Values are of the form // `projects//instances//tables/
`. - string table_name = 1; + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } + ]; // This value specifies routing for replication. If not specified, the // "default" application profile will be used. string app_profile_id = 7; - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; + // Required. The key of the row to which the conditional mutation should be applied. + // + // Classified as IDENTIFYING_ID to provide context around data accesses for + // auditing systems. + bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; // The filter to be applied to the contents of the specified row. Depending // on whether or not any results are yielded, either `true_mutations` or @@ -339,23 +410,31 @@ message CheckAndMutateRowResponse { // Request message for Bigtable.ReadModifyWriteRow. message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be + // Required. The unique name of the table to which the read/modify/write rules should be // applied. // Values are of the form // `projects//instances//tables/
`. - string table_name = 1; + string table_name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } + ]; // This value specifies routing for replication. If not specified, the // "default" application profile will be used. string app_profile_id = 4; - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; + // Required. The key of the row to which the read/modify/write rules should be applied. + // + // Classified as IDENTIFYING_ID to provide context around data accesses for + // auditing systems. + bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - // Rules specifying how the specified row's contents are to be transformed + // Required. Rules specifying how the specified row's contents are to be transformed // into writes. Entries are applied in order, meaning that earlier rules will // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; + repeated ReadModifyWriteRule rules = 3 [(google.api.field_behavior) = REQUIRED]; } // Response message for Bigtable.ReadModifyWriteRow. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index 4e4ab84e1cc8..d85fdac528c8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -16,6 +16,9 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.bigtable_v2.proto import ( data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2, ) @@ -28,13 +31,16 @@ package="google.bigtable.v2", syntax="proto3", serialized_options=_b( - "\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" + "\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}" ), serialized_pb=_b( - '\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xaa\x01\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"B\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\x80\x01\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x13\n\x11MutateRowResponse"\xc8\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xfd\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\x90\x01\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRule"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xad\x08\n\x08\x42igtable\x12\x9d\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"D\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*0\x01\x12\xae\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"F\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys0\x01\x12\x9f\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"E\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x12\xa5\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"F\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*0\x01\x12\xbf\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"M\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x12\xc3\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*B\x9b\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3' + '\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR, google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, google_dot_rpc_dot_status__pb2.DESCRIPTOR, @@ -64,7 +70,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b( + "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" + ), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -148,8 +156,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=200, - serialized_end=370, + serialized_start=285, + serialized_end=494, ) @@ -339,8 +347,8 @@ fields=[], ), ], - serialized_start=488, - serialized_end=749, + serialized_start=612, + serialized_end=873, ) _READROWSRESPONSE = _descriptor.Descriptor( @@ -395,8 +403,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=373, - serialized_end=749, + serialized_start=497, + serialized_end=873, ) @@ -422,7 +430,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b( + "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" + ), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -452,8 +462,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=751, - serialized_end=817, + serialized_start=875, + serialized_end=980, ) @@ -509,8 +519,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=819, - serialized_end=881, + serialized_start=982, + serialized_end=1044, ) @@ -536,7 +546,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b( + "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" + ), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -572,7 +584,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -590,7 +602,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), ], @@ -602,8 +614,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=884, - serialized_end=1012, + serialized_start=1047, + serialized_end=1224, ) @@ -622,8 +634,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1014, - serialized_end=1033, + serialized_start=1226, + serialized_end=1245, ) @@ -667,7 +679,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), ], @@ -679,8 +691,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1163, - serialized_end=1236, + serialized_start=1419, + serialized_end=1497, ) _MUTATEROWSREQUEST = _descriptor.Descriptor( @@ -705,7 +717,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b( + "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" + ), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -741,7 +755,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), ], @@ -753,8 +767,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1036, - serialized_end=1236, + serialized_start=1248, + serialized_end=1497, ) @@ -810,8 +824,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1324, - serialized_end=1382, + serialized_start=1585, + serialized_end=1643, ) _MUTATEROWSRESPONSE = _descriptor.Descriptor( @@ -848,8 +862,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1239, - serialized_end=1382, + serialized_start=1500, + serialized_end=1643, ) @@ -875,7 +889,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b( + "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" + ), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -911,7 +927,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -977,8 +993,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1385, - serialized_end=1638, + serialized_start=1646, + serialized_end=1943, ) @@ -1016,8 +1032,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1640, - serialized_end=1694, + serialized_start=1945, + serialized_end=1999, ) @@ -1043,7 +1059,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b( + "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" + ), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1079,7 +1097,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -1097,7 +1115,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), ], @@ -1109,8 +1127,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1697, - serialized_end=1841, + serialized_start=2002, + serialized_end=2195, ) @@ -1148,8 +1166,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1843, - serialized_end=1909, + serialized_start=2197, + serialized_end=2263, ) _READROWSREQUEST.fields_by_name[ @@ -1239,8 +1257,8 @@ Attributes: table_name: - The unique name of the table from which to read. Values are of - the form + Required. The unique name of the table from which to read. + Values are of the form ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not @@ -1280,6 +1298,8 @@ this CellChunk is a continuation of the same row as the previous CellChunk in the response stream, even if that CellChunk was in a previous ReadRowsResponse message. + Classified as IDENTIFYING\_ID to provide context around data + accesses for auditing systems. family_name: The column family name for this chunk of data. If this message is not present this CellChunk is a continuation of the same @@ -1317,6 +1337,8 @@ not the final chunk of that cell, value\_size will be set to the total length of the cell value. The client can use this size to pre-allocate memory to hold the full cell value. + row_status: + Signals to the client concerning previous CellChunks received. reset_row: Indicates that the client should drop all previous chunks for ``row_key``, as it will be re-read from the beginning. @@ -1333,6 +1355,8 @@ Attributes: + chunks: + A collection of a row's contents as part of the read request. last_scanned_row_key: Optionally the server might return the row key of the last row it has scanned. The client can use this to construct a more @@ -1359,8 +1383,8 @@ Attributes: table_name: - The unique name of the table from which to sample row keys. - Values are of the form + Required. The unique name of the table from which to sample + row keys. Values are of the form ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not @@ -1389,7 +1413,8 @@ if present. Note that row keys in this list may not have ever been written to or read from, and users should therefore not make any assumptions about the row key structure that are - specific to their use case. + specific to their use case. Classified as IDENTIFYING\_ID to + provide context around data accesses for auditing systems. offset_bytes: Approximate total storage space used by all rows in the table which precede ``row_key``. Buffering the contents of all rows @@ -1412,19 +1437,21 @@ Attributes: table_name: - The unique name of the table to which the mutation should be - applied. Values are of the form + Required. The unique name of the table to which the mutation + should be applied. Values are of the form ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not specified, the "default" application profile will be used. row_key: - The key of the row to which the mutation should be applied. + Required. The key of the row to which the mutation should be + applied. Classified as IDENTIFYING\_ID to provide context + around data accesses for auditing systems. mutations: - Changes to be atomically applied to the specified row. Entries - are applied in order, meaning that earlier mutations can be - masked by later ones. Must contain at least one entry and at - most 100000. + Required. Changes to be atomically applied to the specified + row. Entries are applied in order, meaning that earlier + mutations can be masked by later ones. Must contain at least + one entry and at most 100000. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) ), @@ -1454,15 +1481,19 @@ dict( DESCRIPTOR=_MUTATEROWSREQUEST_ENTRY, __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Attributes: + __doc__="""A mutation for a given row. + + + Attributes: row_key: The key of the row to which the ``mutations`` should be - applied. + applied. Classified as IDENTIFYING\_ID to provide context + around data accesses for auditing systems. mutations: - Changes to be atomically applied to the specified row. - Mutations are applied in order, meaning that earlier mutations - can be masked by later ones. You must specify at least one - mutation. + Required. Changes to be atomically applied to the specified + row. Mutations are applied in order, meaning that earlier + mutations can be masked by later ones. You must specify at + least one mutation. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) ), @@ -1474,16 +1505,16 @@ Attributes: table_name: - The unique name of the table to which the mutations should be - applied. + Required. The unique name of the table to which the mutations + should be applied. app_profile_id: This value specifies routing for replication. If not specified, the "default" application profile will be used. entries: - The row keys and corresponding mutations to be applied in - bulk. Each entry is applied as an atomic mutation, but the - entries may be applied in arbitrary order (even between - entries for the same row). At least one entry must be + Required. The row keys and corresponding mutations to be + applied in bulk. Each entry is applied as an atomic mutation, + but the entries may be applied in arbitrary order (even + between entries for the same row). At least one entry must be specified, and in total the entries can contain at most 100000 mutations. """, @@ -1503,7 +1534,10 @@ dict( DESCRIPTOR=_MUTATEROWSRESPONSE_ENTRY, __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Attributes: + __doc__="""The result of applying a passed mutation in the original request. + + + Attributes: index: The index into the original request's ``entries`` list of the Entry for which a result is being reported. @@ -1543,15 +1577,16 @@ Attributes: table_name: - The unique name of the table to which the conditional mutation - should be applied. Values are of the form + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of the form ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not specified, the "default" application profile will be used. row_key: - The key of the row to which the conditional mutation should be - applied. + Required. The key of the row to which the conditional mutation + should be applied. Classified as IDENTIFYING\_ID to provide + context around data accesses for auditing systems. predicate_filter: The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either @@ -1607,19 +1642,22 @@ Attributes: table_name: - The unique name of the table to which the read/modify/write - rules should be applied. Values are of the form + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of the + form ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not specified, the "default" application profile will be used. row_key: - The key of the row to which the read/modify/write rules should - be applied. + Required. The key of the row to which the read/modify/write + rules should be applied. Classified as IDENTIFYING\_ID to + provide context around data accesses for auditing systems. rules: - Rules specifying how the specified row's contents are to be - transformed into writes. Entries are applied in order, meaning - that earlier rules will affect the results of later ones. + Required. Rules specifying how the specified row's contents + are to be transformed into writes. Entries are applied in + order, meaning that earlier rules will affect the results of + later ones. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) ), @@ -1647,15 +1685,30 @@ DESCRIPTOR._options = None +_READROWSREQUEST.fields_by_name["table_name"]._options = None +_SAMPLEROWKEYSREQUEST.fields_by_name["table_name"]._options = None +_MUTATEROWREQUEST.fields_by_name["table_name"]._options = None +_MUTATEROWREQUEST.fields_by_name["row_key"]._options = None +_MUTATEROWREQUEST.fields_by_name["mutations"]._options = None +_MUTATEROWSREQUEST_ENTRY.fields_by_name["mutations"]._options = None +_MUTATEROWSREQUEST.fields_by_name["table_name"]._options = None +_MUTATEROWSREQUEST.fields_by_name["entries"]._options = None +_CHECKANDMUTATEROWREQUEST.fields_by_name["table_name"]._options = None +_CHECKANDMUTATEROWREQUEST.fields_by_name["row_key"]._options = None +_READMODIFYWRITEROWREQUEST.fields_by_name["table_name"]._options = None +_READMODIFYWRITEROWREQUEST.fields_by_name["row_key"]._options = None +_READMODIFYWRITEROWREQUEST.fields_by_name["rules"]._options = None _BIGTABLE = _descriptor.ServiceDescriptor( name="Bigtable", full_name="google.bigtable.v2.Bigtable", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=1912, - serialized_end=2981, + serialized_options=_b( + "\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only" + ), + serialized_start=2266, + serialized_end=4126, methods=[ _descriptor.MethodDescriptor( name="ReadRows", @@ -1665,7 +1718,7 @@ input_type=_READROWSREQUEST, output_type=_READROWSRESPONSE, serialized_options=_b( - '\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*' + '\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id' ), ), _descriptor.MethodDescriptor( @@ -1676,7 +1729,7 @@ input_type=_SAMPLEROWKEYSREQUEST, output_type=_SAMPLEROWKEYSRESPONSE, serialized_options=_b( - "\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" + "\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id" ), ), _descriptor.MethodDescriptor( @@ -1687,7 +1740,7 @@ input_type=_MUTATEROWREQUEST, output_type=_MUTATEROWRESPONSE, serialized_options=_b( - '\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*' + '\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id' ), ), _descriptor.MethodDescriptor( @@ -1698,7 +1751,7 @@ input_type=_MUTATEROWSREQUEST, output_type=_MUTATEROWSRESPONSE, serialized_options=_b( - '\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*' + '\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id' ), ), _descriptor.MethodDescriptor( @@ -1709,7 +1762,7 @@ input_type=_CHECKANDMUTATEROWREQUEST, output_type=_CHECKANDMUTATEROWRESPONSE, serialized_options=_b( - '\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*' + '\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id' ), ), _descriptor.MethodDescriptor( @@ -1720,7 +1773,7 @@ input_type=_READMODIFYWRITEROWREQUEST, output_type=_READMODIFYWRITEROWRESPONSE, serialized_options=_b( - '\202\323\344\223\002H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*' + "\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id" ), ), ], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto index d0aab0b63f12..8fd0c15cb3e3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google Inc. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// syntax = "proto3"; @@ -485,7 +486,9 @@ message Mutation { } // A Mutation which deletes all cells from the containing row. - message DeleteFromRow {} + message DeleteFromRow { + + } // Which of the possible Mutation types to apply. oneof mutation { diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 621f88741fe6..c316b757f3dc 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,19 @@ { - "updateTime": "2019-11-12T13:18:17.600834Z", + "updateTime": "2020-01-08T13:14:12.679844Z", "sources": [ { "generator": { "name": "artman", - "version": "0.41.1", - "dockerImage": "googleapis/artman@sha256:545c758c76c3f779037aa259023ec3d1ef2d57d2c8cd00a222cb187d63ceac5e" + "version": "0.43.0", + "dockerImage": "googleapis/artman@sha256:264654a37596a44b0668b8ce6ac41082d713f6ee150b3fc6425fa78cc64e4f20" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "f69562be0608904932bdcfbc5ad8b9a22d9dceb8", - "internalRef": "279774957" + "sha": "08b488e0660c59842a7dee0e3e2b65d9e3a514a9", + "internalRef": "288625007" } }, { From dddafabac95bdba59f4b7306647bf4c1f4969498 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 30 Jan 2020 09:09:53 -0800 Subject: [PATCH 297/892] chore(bigtable): bump copyright year to 2020; change formatting of docstrings (via synth) (#10254) --- .../cloud/bigtable_admin_v2/__init__.py | 2 +- .../gapic/bigtable_instance_admin_client.py | 2 +- .../gapic/bigtable_table_admin_client.py | 2 +- .../cloud/bigtable_admin_v2/gapic/enums.py | 2 +- .../bigtable_instance_admin_grpc_transport.py | 2 +- .../bigtable_table_admin_grpc_transport.py | 2 +- .../proto/bigtable_instance_admin_pb2.py | 19 +- .../proto/bigtable_table_admin_pb2.py | 3 +- .../bigtable_admin_v2/proto/instance_pb2.py | 54 +-- .../bigtable_admin_v2/proto/table_pb2.py | 54 ++- .../google/cloud/bigtable_admin_v2/types.py | 2 +- .../google/cloud/bigtable_v2/__init__.py | 2 +- .../bigtable_v2/gapic/bigtable_client.py | 17 +- .../transports/bigtable_grpc_transport.py | 2 +- .../cloud/bigtable_v2/proto/bigtable.proto | 18 - .../cloud/bigtable_v2/proto/bigtable_pb2.py | 21 +- .../cloud/bigtable_v2/proto/data_pb2.py | 67 +-- .../google/cloud/bigtable_v2/types.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 409 +++++++++++++++++- .../unit/gapic/v2/test_bigtable_client_v2.py | 2 +- .../test_bigtable_instance_admin_client_v2.py | 2 +- .../v2/test_bigtable_table_admin_client_v2.py | 2 +- 22 files changed, 537 insertions(+), 151 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 263c8a37a553..876859fe058e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index c0bac0768dcf..0724c3822a3d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index c96304e30832..9ccd58471455 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index ba847ea35974..68f25f989ba7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index 3482193864b1..fa5bf0556a96 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index b4fc8e9b0ff8..d8a5bfee0d74 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 5f0601ac2026..540e8c91b83b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -1721,7 +1721,8 @@ dict( DESCRIPTOR=_PARTIALUPDATEINSTANCEREQUEST, __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.PartialUpdateInstance. + __doc__="""Request message for + BigtableInstanceAdmin.PartialUpdateInstance. Attributes: @@ -1975,7 +1976,8 @@ dict( DESCRIPTOR=_CREATEAPPPROFILEREQUEST, __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.CreateAppProfile. + __doc__="""Request message for + BigtableInstanceAdmin.CreateAppProfile. Attributes: @@ -2051,7 +2053,8 @@ dict( DESCRIPTOR=_LISTAPPPROFILESRESPONSE, __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Response message for BigtableInstanceAdmin.ListAppProfiles. + __doc__="""Response message for + BigtableInstanceAdmin.ListAppProfiles. Attributes: @@ -2079,7 +2082,8 @@ dict( DESCRIPTOR=_UPDATEAPPPROFILEREQUEST, __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.UpdateAppProfile. + __doc__="""Request message for + BigtableInstanceAdmin.UpdateAppProfile. Attributes: @@ -2103,7 +2107,8 @@ dict( DESCRIPTOR=_DELETEAPPPROFILEREQUEST, __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.DeleteAppProfile. + __doc__="""Request message for + BigtableInstanceAdmin.DeleteAppProfile. Attributes: @@ -2125,7 +2130,9 @@ dict( DESCRIPTOR=_UPDATEAPPPROFILEMETADATA, __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by UpdateAppProfile. + __doc__="""The metadata for the Operation returned by + UpdateAppProfile. + """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) ), diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index f2a95d546ac3..6852607952f3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -2069,7 +2069,8 @@ dict( DESCRIPTOR=_CREATETABLEFROMSNAPSHOTMETADATA, __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""The metadata for the Operation returned by CreateTableFromSnapshot. + __doc__="""The metadata for the Operation returned by + CreateTableFromSnapshot. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index ef3a7ce7858b..5f45909fc6a1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -653,14 +653,15 @@ ), DESCRIPTOR=_INSTANCE, __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an instance are served from - a single [Cluster][google.bigtable.admin.v2.Cluster]. + __doc__="""A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the resources that serve + them. All tables in an instance are served from a single + [Cluster][google.bigtable.admin.v2.Cluster]. Attributes: name: - (``OutputOnly``) The unique name of the instance. Values are + (\ ``OutputOnly``) The unique name of the instance. Values are of the form ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. display_name: @@ -668,7 +669,7 @@ Can be changed at any time, but should be kept globally unique to avoid confusion. state: - (``OutputOnly``) The current state of the instance. + (\ ``OutputOnly``) The current state of the instance. type: The type of the instance. Defaults to ``PRODUCTION``. labels: @@ -696,30 +697,30 @@ dict( DESCRIPTOR=_CLUSTER, __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A resizable group of nodes in a particular cloud location, capable of - serving all [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. + __doc__="""A resizable group of nodes in a particular cloud location, + capable of serving all [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. Attributes: name: - (``OutputOnly``) The unique name of the cluster. Values are of - the form ``projects//instances//clusters/[a - -z][-a-z0-9]*``. + (\ ``OutputOnly``) The unique name of the cluster. Values are + of the form ``projects//instances//clusters + /[a-z][-a-z0-9]*``. location: - (``CreationOnly``) The location where this cluster's nodes and - storage reside. For best performance, clients should be + (\ ``CreationOnly``) The location where this cluster's nodes + and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form ``projects//locations/``. state: - (``OutputOnly``) The current state of the cluster. + (\ ``OutputOnly``) The current state of the cluster. serve_nodes: The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance. default_storage_type: - (``CreationOnly``) The type of storage used by this cluster to - serve its parent instance's tables, unless explicitly + (\ ``CreationOnly``) The type of storage used by this cluster + to serve its parent instance's tables, unless explicitly overridden. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) @@ -737,10 +738,11 @@ dict( DESCRIPTOR=_APPPROFILE_MULTICLUSTERROUTINGUSEANY, __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""Read/write requests may be routed to any cluster in the instance, and - will fail over to another cluster in the event of transient errors or - delays. Choosing this option sacrifices read-your-writes consistency to - improve availability. + __doc__="""Read/write requests may be routed to any cluster in the + instance, and will fail over to another cluster in the event of + transient errors or delays. Choosing this option sacrifices + read-your-writes consistency to improve availability. + """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) ), @@ -751,9 +753,9 @@ dict( DESCRIPTOR=_APPPROFILE_SINGLECLUSTERROUTING, __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""Unconditionally routes all read/write requests to a specific cluster. - This option preserves read-your-writes consistency, but does not improve - availability. + __doc__="""Unconditionally routes all read/write requests to a + specific cluster. This option preserves read-your-writes consistency, + but does not improve availability. Attributes: @@ -770,13 +772,13 @@ ), DESCRIPTOR=_APPPROFILE, __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A configuration object describing how Cloud Bigtable should treat - traffic from a particular end user application. + __doc__="""A configuration object describing how Cloud Bigtable + should treat traffic from a particular end user application. Attributes: name: - (``OutputOnly``) The unique name of the app profile. Values + (\ ``OutputOnly``) The unique name of the app profile. Values are of the form ``projects//instances//appProfiles/[_a- zA-Z0-9][-_.a-zA-Z0-9]*``. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index c348fe4a280f..b026dff95f39 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -841,7 +841,7 @@ Attributes: replication_state: - (``OutputOnly``) The state of replication for the table in + (\ ``OutputOnly``) The state of replication for the table in this cluster. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) @@ -867,32 +867,33 @@ ), DESCRIPTOR=_TABLE, __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A collection of user data indexed by row, column, and timestamp. Each - table is served using the resources of its parent cluster. + __doc__="""A collection of user data indexed by row, column, and + timestamp. Each table is served using the resources of its parent + cluster. Attributes: name: - (``OutputOnly``) The unique name of the table. Values are of + (\ ``OutputOnly``) The unique name of the table. Values are of the form ``projects//instances//tables/[_a- zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` cluster_states: - (``OutputOnly``) Map from cluster ID to per-cluster table + (\ ``OutputOnly``) Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, ``FULL`` column_families: - (``CreationOnly``) The column families configured for this + (\ ``CreationOnly``) The column families configured for this table, mapped by column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` granularity: - (``CreationOnly``) The granularity (i.e. ``MILLIS``) at which - timestamps are stored in this table. Timestamps not matching - the granularity will be rejected. If unspecified at creation - time, the value will be set to ``MILLIS``. Views: + (\ ``CreationOnly``) The granularity (i.e. ``MILLIS``) at + which timestamps are stored in this table. Timestamps not + matching the granularity will be rejected. If unspecified at + creation time, the value will be set to ``MILLIS``. Views: ``SCHEMA_VIEW``, ``FULL`` """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) @@ -909,7 +910,8 @@ dict( DESCRIPTOR=_COLUMNFAMILY, __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A set of columns within a table which share a common configuration. + __doc__="""A set of columns within a table which share a common + configuration. Attributes: @@ -935,7 +937,8 @@ dict( DESCRIPTOR=_GCRULE_INTERSECTION, __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A GcRule which deletes cells matching all of the given rules. + __doc__="""A GcRule which deletes cells matching all of the given + rules. Attributes: @@ -952,7 +955,8 @@ dict( DESCRIPTOR=_GCRULE_UNION, __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A GcRule which deletes cells matching any of the given rules. + __doc__="""A GcRule which deletes cells matching any of the given + rules. Attributes: @@ -965,7 +969,8 @@ ), DESCRIPTOR=_GCRULE, __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""Rule for determining which cells to delete during garbage collection. + __doc__="""Rule for determining which cells to delete during garbage + collection. Attributes: @@ -995,8 +1000,9 @@ dict( DESCRIPTOR=_SNAPSHOT, __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for a new table. + __doc__="""A snapshot of a table at a particular time. A snapshot can + be used as a checkpoint for data restoration or a data source for a new + table. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. @@ -1007,28 +1013,28 @@ Attributes: name: - (``OutputOnly``) The unique name of the snapshot. Values are + (\ ``OutputOnly``) The unique name of the snapshot. Values are of the form ``projects//instances//clusters //snapshots/``. source_table: - (``OutputOnly``) The source table at the time the snapshot was - taken. + (\ ``OutputOnly``) The source table at the time the snapshot + was taken. data_size_bytes: - (``OutputOnly``) The size of the data in the source table at + (\ ``OutputOnly``) The size of the data in the source table at the time the snapshot was taken. In some cases, this value may be computed asynchronously via a background process and a placeholder of 0 will be used in the meantime. create_time: - (``OutputOnly``) The time when the snapshot is created. + (\ ``OutputOnly``) The time when the snapshot is created. delete_time: - (``OutputOnly``) The time when the snapshot will be deleted. + (\ ``OutputOnly``) The time when the snapshot will be deleted. The maximum amount of time a snapshot can stay active is 365 days. If 'ttl' is not specified, the default maximum of 365 days will be used. state: - (``OutputOnly``) The current state of the snapshot. + (\ ``OutputOnly``) The current state of the snapshot. description: - (``OutputOnly``) Description of the snapshot. + (\ ``OutputOnly``) Description of the snapshot. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) ), diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index eb2d919856e8..2b149637e634 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index cbbc1b2b98db..a649c8cf4f59 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index f0522654cb1d..abe6130df88c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -408,9 +408,6 @@ def mutate_row( applied. Values are of the form ``projects//instances//tables/
``. row_key (bytes): Required. The key of the row to which the mutation should be applied. - - Classified as IDENTIFYING\_ID to provide context around data accesses - for auditing systems. mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry and at most 100000. @@ -593,11 +590,7 @@ def check_and_mutate_row( table_name (str): Required. The unique name of the table to which the conditional mutation should be applied. Values are of the form ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the conditional mutation should be - applied. - - Classified as IDENTIFYING\_ID to provide context around data accesses - for auditing systems. + row_key (bytes): Required. The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending @@ -714,11 +707,7 @@ def read_modify_write_row( table_name (str): Required. The unique name of the table to which the read/modify/write rules should be applied. Values are of the form ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the read/modify/write rules should - be applied. - - Classified as IDENTIFYING\_ID to provide context around data accesses - for auditing systems. + row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later ones. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py index 3c30df704a57..5b2757db2d6d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto index ea79b8f094a7..c54225ed3fd9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto @@ -157,9 +157,6 @@ message ReadRowsResponse { // this CellChunk is a continuation of the same row as the previous // CellChunk in the response stream, even if that CellChunk was in a // previous ReadRowsResponse message. - // - // Classified as IDENTIFYING_ID to provide context around data accesses for - // auditing systems. bytes row_key = 1; // The column family name for this chunk of data. If this message @@ -256,9 +253,6 @@ message SampleRowKeysResponse { // Note that row keys in this list may not have ever been written to or read // from, and users should therefore not make any assumptions about the row key // structure that are specific to their use case. - // - // Classified as IDENTIFYING_ID to provide context around data accesses for - // auditing systems. bytes row_key = 1; // Approximate total storage space used by all rows in the table which precede @@ -285,9 +279,6 @@ message MutateRowRequest { string app_profile_id = 4; // Required. The key of the row to which the mutation should be applied. - // - // Classified as IDENTIFYING_ID to provide context around data accesses for - // auditing systems. bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; // Required. Changes to be atomically applied to the specified row. Entries are applied @@ -306,9 +297,6 @@ message MutateRowsRequest { // A mutation for a given row. message Entry { // The key of the row to which the `mutations` should be applied. - // - // Classified as IDENTIFYING_ID to provide context around data accesses for - // auditing systems. bytes row_key = 1; // Required. Changes to be atomically applied to the specified row. Mutations are @@ -375,9 +363,6 @@ message CheckAndMutateRowRequest { string app_profile_id = 7; // Required. The key of the row to which the conditional mutation should be applied. - // - // Classified as IDENTIFYING_ID to provide context around data accesses for - // auditing systems. bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; // The filter to be applied to the contents of the specified row. Depending @@ -426,9 +411,6 @@ message ReadModifyWriteRowRequest { string app_profile_id = 4; // Required. The key of the row to which the read/modify/write rules should be applied. - // - // Classified as IDENTIFYING_ID to provide context around data accesses for - // auditing systems. bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; // Required. Rules specifying how the specified row's contents are to be transformed diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index d85fdac528c8..59fb73a65fa7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -1298,8 +1298,6 @@ this CellChunk is a continuation of the same row as the previous CellChunk in the response stream, even if that CellChunk was in a previous ReadRowsResponse message. - Classified as IDENTIFYING\_ID to provide context around data - accesses for auditing systems. family_name: The column family name for this chunk of data. If this message is not present this CellChunk is a continuation of the same @@ -1413,8 +1411,7 @@ if present. Note that row keys in this list may not have ever been written to or read from, and users should therefore not make any assumptions about the row key structure that are - specific to their use case. Classified as IDENTIFYING\_ID to - provide context around data accesses for auditing systems. + specific to their use case. offset_bytes: Approximate total storage space used by all rows in the table which precede ``row_key``. Buffering the contents of all rows @@ -1445,8 +1442,7 @@ specified, the "default" application profile will be used. row_key: Required. The key of the row to which the mutation should be - applied. Classified as IDENTIFYING\_ID to provide context - around data accesses for auditing systems. + applied. mutations: Required. Changes to be atomically applied to the specified row. Entries are applied in order, meaning that earlier @@ -1465,6 +1461,7 @@ DESCRIPTOR=_MUTATEROWRESPONSE, __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", __doc__="""Response message for Bigtable.MutateRow. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) ), @@ -1487,8 +1484,7 @@ Attributes: row_key: The key of the row to which the ``mutations`` should be - applied. Classified as IDENTIFYING\_ID to provide context - around data accesses for auditing systems. + applied. mutations: Required. Changes to be atomically applied to the specified row. Mutations are applied in order, meaning that earlier @@ -1534,7 +1530,8 @@ dict( DESCRIPTOR=_MUTATEROWSRESPONSE_ENTRY, __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""The result of applying a passed mutation in the original request. + __doc__="""The result of applying a passed mutation in the original + request. Attributes: @@ -1585,8 +1582,7 @@ specified, the "default" application profile will be used. row_key: Required. The key of the row to which the conditional mutation - should be applied. Classified as IDENTIFYING\_ID to provide - context around data accesses for auditing systems. + should be applied. predicate_filter: The filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either @@ -1651,8 +1647,7 @@ specified, the "default" application profile will be used. row_key: Required. The key of the row to which the read/modify/write - rules should be applied. Classified as IDENTIFYING\_ID to - provide context around data accesses for auditing systems. + rules should be applied. rules: Required. Rules specifying how the specified row's contents are to be transformed into writes. Entries are applied in diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index 825a0fa9222f..fb753be1e670 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -1941,8 +1941,8 @@ dict( DESCRIPTOR=_ROW, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies the complete (requested) contents of a single row of a table. - Rows which exceed 256MiB in size cannot be read in full. + __doc__="""Specifies the complete (requested) contents of a single + row of a table. Rows which exceed 256MiB in size cannot be read in full. Attributes: @@ -1966,8 +1966,8 @@ dict( DESCRIPTOR=_FAMILY, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single row/column family - intersection of a table. + __doc__="""Specifies (some of) the contents of a single row/column + family intersection of a table. Attributes: @@ -1993,8 +1993,8 @@ dict( DESCRIPTOR=_COLUMN, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single row/column intersection of - a table. + __doc__="""Specifies (some of) the contents of a single row/column + intersection of a table. Attributes: @@ -2019,8 +2019,8 @@ dict( DESCRIPTOR=_CELL, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single row/column/timestamp of a - table. + __doc__="""Specifies (some of) the contents of a single + row/column/timestamp of a table. Attributes: @@ -2100,9 +2100,9 @@ dict( DESCRIPTOR=_COLUMNRANGE, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a contiguous range of columns within a single column family. - The range spans from : to - :, where both bounds can be either + __doc__="""Specifies a contiguous range of columns within a single + column family. The range spans from : + to :, where both bounds can be either inclusive or exclusive. @@ -2191,7 +2191,8 @@ dict( DESCRIPTOR=_ROWFILTER_CHAIN, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which sends rows through several RowFilters in sequence. + __doc__="""A RowFilter which sends rows through several RowFilters in + sequence. Attributes: @@ -2209,8 +2210,8 @@ dict( DESCRIPTOR=_ROWFILTER_INTERLEAVE, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which sends each row to each of several component RowFilters - and interleaves the results. + __doc__="""A RowFilter which sends each row to each of several + component RowFilters and interleaves the results. Attributes: @@ -2248,9 +2249,9 @@ dict( DESCRIPTOR=_ROWFILTER_CONDITION, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which evaluates one of two possible RowFilters, depending on - whether or not a predicate RowFilter outputs any cells from the input - row. + __doc__="""A RowFilter which evaluates one of two possible + RowFilters, depending on whether or not a predicate RowFilter outputs + any cells from the input row. IMPORTANT NOTE: The predicate filter does not execute atomically with the true and false filters, which may lead to inconsistent or unexpected @@ -2277,13 +2278,13 @@ ), DESCRIPTOR=_ROWFILTER, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Takes a row as input and produces an alternate view of the row based on - specified rules. For example, a RowFilter might trim down a row to - include just the cells from columns matching a given regular expression, - or might return all the cells of a row but not their values. More - complicated filters can be composed out of these components to express - requests such as, "within every column of a particular family, give just - the two most recent cells which are older than timestamp X." + __doc__="""Takes a row as input and produces an alternate view of the + row based on specified rules. For example, a RowFilter might trim down a + row to include just the cells from columns matching a given regular + expression, or might return all the cells of a row but not their values. + More complicated filters can be composed out of these components to + express requests such as, "within every column of a particular family, + give just the two most recent cells which are older than timestamp X." There are two broad categories of RowFilters (true filters and transformers), as well as two ways to compose simple filters into more @@ -2495,8 +2496,8 @@ dict( DESCRIPTOR=_MUTATION_DELETEFROMCOLUMN, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes cells from the specified column, optionally - restricting the deletions to a given timestamp range. + __doc__="""A Mutation which deletes cells from the specified column, + optionally restricting the deletions to a given timestamp range. Attributes: @@ -2518,7 +2519,8 @@ dict( DESCRIPTOR=_MUTATION_DELETEFROMFAMILY, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes all cells from the specified column family. + __doc__="""A Mutation which deletes all cells from the specified + column family. Attributes: @@ -2535,14 +2537,17 @@ dict( DESCRIPTOR=_MUTATION_DELETEFROMROW, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes all cells from the containing row. + __doc__="""A Mutation which deletes all cells from the containing + row. + """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) ), ), DESCRIPTOR=_MUTATION, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a particular change to be made to the contents of a row. + __doc__="""Specifies a particular change to be made to the contents + of a row. Attributes: @@ -2572,8 +2577,8 @@ dict( DESCRIPTOR=_READMODIFYWRITERULE, __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies an atomic read/modify/write operation on the latest value of - the specified column. + __doc__="""Specifies an atomic read/modify/write operation on the + latest value of the specified column. Attributes: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py index a445eae1cade..607e1b09c5dd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index c316b757f3dc..1bedccbac19c 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,19 +1,20 @@ { - "updateTime": "2020-01-08T13:14:12.679844Z", + "updateTime": "2020-01-30T13:15:12.607903Z", "sources": [ { "generator": { "name": "artman", - "version": "0.43.0", - "dockerImage": "googleapis/artman@sha256:264654a37596a44b0668b8ce6ac41082d713f6ee150b3fc6425fa78cc64e4f20" + "version": "0.44.4", + "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "08b488e0660c59842a7dee0e3e2b65d9e3a514a9", - "internalRef": "288625007" + "sha": "c1246a29e22b0f98e800a536b5b0da2d933a55f2", + "internalRef": "292310790", + "log": "c1246a29e22b0f98e800a536b5b0da2d933a55f2\nUpdating v1 protos with the latest inline documentation (in comments) and config options. Also adding a per-service .yaml file.\n\nPiperOrigin-RevId: 292310790\n\nb491d07cadaae7cde5608321f913e5ca1459b32d\nRevert accidental local_repository change\n\nPiperOrigin-RevId: 292245373\n\naf3400a8cb6110025198b59a0f7d018ae3cda700\nUpdate gapic-generator dependency (prebuilt PHP binary support).\n\nPiperOrigin-RevId: 292243997\n\n341fd5690fae36f36cf626ef048fbcf4bbe7cee6\ngrafeas: v1 add resource_definition for the grafeas.io/Project and change references for Project.\n\nPiperOrigin-RevId: 292221998\n\n42e915ec2ece1cd37a590fbcd10aa2c0fb0e5b06\nUpdate the gapic-generator, protoc-java-resource-name-plugin and protoc-docs-plugin to the latest commit.\n\nPiperOrigin-RevId: 292182368\n\nf035f47250675d31492a09f4a7586cfa395520a7\nFix grafeas build and update build.sh script to include gerafeas.\n\nPiperOrigin-RevId: 292168753\n\n26ccb214b7bc4a716032a6266bcb0a9ca55d6dbb\nasset: v1p1beta1 add client config annotations and retry config\n\nPiperOrigin-RevId: 292154210\n\n974ee5c0b5d03e81a50dafcedf41e0efebb5b749\nasset: v1beta1 add client config annotations\n\nPiperOrigin-RevId: 292152573\n\ncf3b61102ed5f36b827bc82ec39be09525f018c8\n Fix to protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 292034635\n\n4e1cfaa7c0fede9e65d64213ca3da1b1255816c0\nUpdate the public proto to support UTF-8 encoded id for CatalogService API, increase the ListCatalogItems deadline to 300s and some minor documentation change\n\nPiperOrigin-RevId: 292030970\n\n9c483584f8fd5a1b862ae07973f4cc7bb3e46648\nasset: add annotations to v1p1beta1\n\nPiperOrigin-RevId: 292009868\n\ne19209fac29731d0baf6d9ac23da1164f7bdca24\nAdd the google.rpc.context.AttributeContext message to the open source\ndirectories.\n\nPiperOrigin-RevId: 291999930\n\nae5662960573f279502bf98a108a35ba1175e782\noslogin API: move file level option on top of the file to avoid protobuf.js bug.\n\nPiperOrigin-RevId: 291990506\n\neba3897fff7c49ed85d3c47fc96fe96e47f6f684\nAdd cc_proto_library and cc_grpc_library targets for Spanner and IAM protos.\n\nPiperOrigin-RevId: 291988651\n\n8e981acfd9b97ea2f312f11bbaa7b6c16e412dea\nBeta launch for PersonDetection and FaceDetection features.\n\nPiperOrigin-RevId: 291821782\n\n994e067fae3b21e195f7da932b08fff806d70b5d\nasset: add annotations to v1p2beta1\n\nPiperOrigin-RevId: 291815259\n\n244e1d2c89346ca2e0701b39e65552330d68545a\nAdd Playable Locations service\n\nPiperOrigin-RevId: 291806349\n\n909f8f67963daf45dd88d020877fb9029b76788d\nasset: add annotations to v1beta2\n\nPiperOrigin-RevId: 291805301\n\n3c39a1d6e23c1ef63c7fba4019c25e76c40dfe19\nKMS: add file-level message for CryptoKeyPath, it is defined in gapic yaml but not\nin proto files.\n\nPiperOrigin-RevId: 291420695\n\nc6f3f350b8387f8d1b85ed4506f30187ebaaddc3\ncontaineranalysis: update v1beta1 and bazel build with annotations\n\nPiperOrigin-RevId: 291401900\n\n92887d74b44e4e636252b7b8477d0d2570cd82db\nfix: fix the location of grpc config file.\n\nPiperOrigin-RevId: 291396015\n\ne26cab8afd19d396b929039dac5d874cf0b5336c\nexpr: add default_host and method_signature annotations to CelService\n\nPiperOrigin-RevId: 291240093\n\n06093ae3952441c34ec176d1f7431b8765cec0be\nirm: fix v1alpha2 bazel build by adding missing proto imports\n\nPiperOrigin-RevId: 291227940\n\na8a2514af326e4673063f9a3c9d0ef1091c87e6c\nAdd proto annotation for cloud/irm API\n\nPiperOrigin-RevId: 291217859\n\n8d16f76de065f530d395a4c7eabbf766d6a120fd\nGenerate Memcache v1beta2 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 291008516\n\n3af1dabd93df9a9f17bf3624d3b875c11235360b\ngrafeas: Add containeranalysis default_host to Grafeas service\n\nPiperOrigin-RevId: 290965849\n\nbe2663fa95e31cba67d0cd62611a6674db9f74b7\nfix(google/maps/roads): add missing opening bracket\n\nPiperOrigin-RevId: 290964086\n\nfacc26550a0af0696e0534bc9cae9df14275aa7c\nUpdating v2 protos with the latest inline documentation (in comments) and adding a per-service .yaml file.\n\nPiperOrigin-RevId: 290952261\n\ncda99c1f7dc5e4ca9b1caeae1dc330838cbc1461\nChange api_name to 'asset' for v1p1beta1\n\nPiperOrigin-RevId: 290800639\n\n94e9e90c303a820ce40643d9129e7f0d2054e8a1\nAdds Google Maps Road service\n\nPiperOrigin-RevId: 290795667\n\na3b23dcb2eaecce98c600c7d009451bdec52dbda\nrpc: new message ErrorInfo, other comment updates\n\nPiperOrigin-RevId: 290781668\n\n26420ef4e46c37f193c0fbe53d6ebac481de460e\nAdd proto definition for Org Policy v1.\n\nPiperOrigin-RevId: 290771923\n\n7f0dab8177cf371ae019a082e2512de7ac102888\nPublish Routes Preferred API v1 service definitions.\n\nPiperOrigin-RevId: 290326986\n\nad6e508d0728e1d1bca6e3f328cd562718cb772d\nFix: Qualify resource type references with \"jobs.googleapis.com/\"\n\nPiperOrigin-RevId: 290285762\n\n58e770d568a2b78168ddc19a874178fee8265a9d\ncts client library\n\nPiperOrigin-RevId: 290146169\n\naf9daa4c3b4c4a8b7133b81588dd9ffd37270af2\nAdd more programming language options to public proto\n\nPiperOrigin-RevId: 290144091\n\nd9f2bbf2df301ef84641d4cec7c828736a0bd907\ntalent: add missing resource.proto dep to Bazel build target\n\nPiperOrigin-RevId: 290143164\n\n3b3968237451d027b42471cd28884a5a1faed6c7\nAnnotate Talent API.\nAdd gRPC service config for retry.\nUpdate bazel file with google.api.resource dependency.\n\nPiperOrigin-RevId: 290125172\n\n0735b4b096872960568d1f366bfa75b7b0e1f1a3\nWeekly library update.\n\nPiperOrigin-RevId: 289939042\n\n8760d3d9a4543d7f9c0d1c7870aca08b116e4095\nWeekly library update.\n\nPiperOrigin-RevId: 289939020\n\n8607df842f782a901805187e02fff598145b0b0e\nChange Talent API timeout to 30s.\n\nPiperOrigin-RevId: 289912621\n\n908155991fe32570653bcb72ecfdcfc896642f41\nAdd Recommendations AI V1Beta1\n\nPiperOrigin-RevId: 289901914\n\n5c9a8c2bebd8b71aa66d1cc473edfaac837a2c78\nAdding no-arg method signatures for ListBillingAccounts and ListServices\n\nPiperOrigin-RevId: 289891136\n\n50b0e8286ac988b0593bd890eb31fef6ea2f5767\nlongrunning: add grpc service config and default_host annotation to operations.proto\n\nPiperOrigin-RevId: 289876944\n\n6cac27dabe51c54807b0401698c32d34998948a9\n Updating default deadline for Cloud Security Command Center's v1 APIs.\n\nPiperOrigin-RevId: 289875412\n\nd99df0d67057a233c711187e0689baa4f8e6333d\nFix: Correct spelling in C# namespace option\n\nPiperOrigin-RevId: 289709813\n\n2fa8d48165cc48e35b0c62e6f7bdade12229326c\nfeat: Publish Recommender v1 to GitHub.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289619243\n\n9118db63d1ab493a2e44a3b4973fde810a835c49\nfirestore: don't retry reads that fail with Aborted\n\nFor transaction reads that fail with ABORTED, we need to rollback and start a new transaction. Our current configuration makes it so that GAPIC retries ABORTED reads multiple times without making any progress. Instead, we should retry at the transaction level.\n\nPiperOrigin-RevId: 289532382\n\n1dbfd3fe4330790b1e99c0bb20beb692f1e20b8a\nFix bazel build\nAdd other langauges (Java was already there) for bigquery/storage/v1alpha2 api.\n\nPiperOrigin-RevId: 289519766\n\nc06599cdd7d11f8d3fd25f8d3249e5bb1a3d5d73\nInitial commit of google.cloud.policytroubleshooter API, The API helps in troubleshooting GCP policies. Refer https://cloud.google.com/iam/docs/troubleshooting-access for more information\n\nPiperOrigin-RevId: 289491444\n\nfce7d80fa16ea241e87f7bc33d68595422e94ecd\nDo not pass samples option for Artman config of recommender v1 API.\n\nPiperOrigin-RevId: 289477403\n\nef179e8c61436297e6bb124352e47e45c8c80cb1\nfix: Address missing Bazel dependency.\n\nBazel builds stopped working in 06ec6d5 because\nthe google/longrunning/operations.proto file took\nan import from google/api/client.proto, but that\nimport was not added to BUILD.bazel.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446074\n\n8841655b242c84fd691d77d7bcf21b61044f01ff\nMigrate Data Labeling v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446026\n\n06ec6d5d053fff299eaa6eaa38afdd36c5e2fc68\nAdd annotations to google.longrunning.v1\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289413169\n\n0480cf40be1d3cc231f4268a2fdb36a8dd60e641\nMigrate IAM Admin v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289411084\n\n1017173e9adeb858587639af61889ad970c728b1\nSpecify a C# namespace for BigQuery Connection v1beta1\n\nPiperOrigin-RevId: 289396763\n\nb08714b378e8e5b0c4ecdde73f92c36d6303b4b6\nfix: Integrate latest proto-docs-plugin fix.\nFixes dialogflow v2\n\nPiperOrigin-RevId: 289189004\n\n51217a67e79255ee1f2e70a6a3919df082513327\nCreate BUILD file for recommender v1\n\nPiperOrigin-RevId: 289183234\n\nacacd87263c0a60e458561b8b8ce9f67c760552a\nGenerate recommender v1 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 289177510\n\n9d2f7133b97720b1fa3601f6dcd30760ba6d8a1e\nFix kokoro build script\n\nPiperOrigin-RevId: 289166315\n\nc43a67530d2a47a0220cad20ca8de39b3fbaf2c5\ncloudtasks: replace missing RPC timeout config for v2beta2 and v2beta3\n\nPiperOrigin-RevId: 289162391\n\n4cefc229a9197236fc0adf02d69b71c0c5cf59de\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 289158456\n\n56f263fe959c50786dab42e3c61402d32d1417bd\nCatalog API: Adding config necessary to build client libraries\n\nPiperOrigin-RevId: 289149879\n\n4543762b23a57fc3c53d409efc3a9affd47b6ab3\nFix Bazel build\nbilling/v1 and dialogflow/v2 remain broken (not bazel-related issues).\nBilling has wrong configuration, dialogflow failure is caused by a bug in documentation plugin.\n\nPiperOrigin-RevId: 289140194\n\nc9dce519127b97e866ca133a01157f4ce27dcceb\nUpdate Bigtable docs\n\nPiperOrigin-RevId: 289114419\n\n802c5c5f2bf94c3facb011267d04e71942e0d09f\nMigrate DLP to proto annotations (but not GAPIC v2).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289102579\n\n6357f30f2ec3cff1d8239d18b707ff9d438ea5da\nRemove gRPC configuration file that was in the wrong place.\n\nPiperOrigin-RevId: 289096111\n\n360a8792ed62f944109d7e22d613a04a010665b4\n Protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 289011995\n\na79211c20c4f2807eec524d00123bf7c06ad3d6e\nRoll back containeranalysis v1 to GAPIC v1.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288999068\n\n9e60345ba603e03484a8aaa33ce5ffa19c1c652b\nPublish Routes Preferred API v1 proto definitions.\n\nPiperOrigin-RevId: 288941399\n\nd52885b642ad2aa1f42b132ee62dbf49a73e1e24\nMigrate the service management API to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288909426\n\n6ace586805c08896fef43e28a261337fcf3f022b\ncloudtasks: replace missing RPC timeout config\n\nPiperOrigin-RevId: 288783603\n\n51d906cabee4876b12497054b15b05d4a50ad027\nImport of Grafeas from Github.\n\nUpdate BUILD.bazel accordingly.\n\nPiperOrigin-RevId: 288783426\n\n5ef42bcd363ba0440f0ee65b3c80b499e9067ede\nMigrate Recommender v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288713066\n\n94f986afd365b7d7e132315ddcd43d7af0e652fb\nMigrate Container Analysis v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288708382\n\n7a751a279184970d3b6ba90e4dd4d22a382a0747\nRemove Container Analysis v1alpha1 (nobody publishes it).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288707473\n\n3c0d9c71242e70474b2b640e15bb0a435fd06ff0\nRemove specious annotation from BigQuery Data Transfer before\nanyone accidentally does anything that uses it.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288701604\n\n1af307a4764bd415ef942ac5187fa1def043006f\nMigrate BigQuery Connection to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288698681\n\n" } }, { @@ -45,5 +46,403 @@ "config": "google/bigtable/admin/artman_bigtableadmin.yaml" } } + ], + "newFiles": [ + { + "path": ".coveragerc" + }, + { + "path": ".flake8" + }, + { + "path": ".repo-metadata.json" + }, + { + "path": "CHANGELOG.md" + }, + { + "path": "LICENSE" + }, + { + "path": "MANIFEST.in" + }, + { + "path": "README.rst" + }, + { + "path": "docs/README.rst" + }, + { + "path": "docs/_static/custom.css" + }, + { + "path": "docs/_templates/layout.html" + }, + { + "path": "docs/changelog.md" + }, + { + "path": "docs/client-intro.rst" + }, + { + "path": "docs/client.rst" + }, + { + "path": "docs/cluster.rst" + }, + { + "path": "docs/column-family.rst" + }, + { + "path": "docs/conf.py" + }, + { + "path": "docs/data-api.rst" + }, + { + "path": "docs/index.rst" + }, + { + "path": "docs/instance-api.rst" + }, + { + "path": "docs/instance.rst" + }, + { + "path": "docs/row-data.rst" + }, + { + "path": "docs/row-filters.rst" + }, + { + "path": "docs/row.rst" + }, + { + "path": "docs/snippets.py" + }, + { + "path": "docs/snippets_table.py" + }, + { + "path": "docs/table-api.rst" + }, + { + "path": "docs/table.rst" + }, + { + "path": "docs/usage.rst" + }, + { + "path": "google/__init__.py" + }, + { + "path": "google/cloud/__init__.py" + }, + { + "path": "google/cloud/bigtable.py" + }, + { + "path": "google/cloud/bigtable/__init__.py" + }, + { + "path": "google/cloud/bigtable/app_profile.py" + }, + { + "path": "google/cloud/bigtable/batcher.py" + }, + { + "path": "google/cloud/bigtable/client.py" + }, + { + "path": "google/cloud/bigtable/cluster.py" + }, + { + "path": "google/cloud/bigtable/column_family.py" + }, + { + "path": "google/cloud/bigtable/enums.py" + }, + { + "path": "google/cloud/bigtable/instance.py" + }, + { + "path": "google/cloud/bigtable/policy.py" + }, + { + "path": "google/cloud/bigtable/row.py" + }, + { + "path": "google/cloud/bigtable/row_data.py" + }, + { + "path": "google/cloud/bigtable/row_filters.py" + }, + { + "path": "google/cloud/bigtable/row_set.py" + }, + { + "path": "google/cloud/bigtable/table.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/__init__.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/__init__.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/enums.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/__init__.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/common.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/common_pb2.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/instance.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/instance_pb2.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/table.proto" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/table_pb2.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py" + }, + { + "path": "google/cloud/bigtable_admin_v2/types.py" + }, + { + "path": "google/cloud/bigtable_v2/__init__.py" + }, + { + "path": "google/cloud/bigtable_v2/gapic/__init__.py" + }, + { + "path": "google/cloud/bigtable_v2/gapic/bigtable_client.py" + }, + { + "path": "google/cloud/bigtable_v2/gapic/bigtable_client_config.py" + }, + { + "path": "google/cloud/bigtable_v2/gapic/transports/__init__.py" + }, + { + "path": "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py" + }, + { + "path": "google/cloud/bigtable_v2/proto/__init__.py" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_data.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_pb2.py" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_service.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_service_messages.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_table_admin.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_table_data.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_table_service.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/common.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/data.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/data_pb2.py" + }, + { + "path": "google/cloud/bigtable_v2/proto/data_pb2_grpc.py" + }, + { + "path": "google/cloud/bigtable_v2/proto/instance.proto" + }, + { + "path": "google/cloud/bigtable_v2/proto/table.proto" + }, + { + "path": "google/cloud/bigtable_v2/types.py" + }, + { + "path": "noxfile.py" + }, + { + "path": "pylint.config.py" + }, + { + "path": "setup.cfg" + }, + { + "path": "setup.py" + }, + { + "path": "synth.metadata" + }, + { + "path": "synth.py" + }, + { + "path": "tests/__init__.py" + }, + { + "path": "tests/system.py" + }, + { + "path": "tests/unit/__init__.py" + }, + { + "path": "tests/unit/_testing.py" + }, + { + "path": "tests/unit/gapic/v2/test_bigtable_client_v2.py" + }, + { + "path": "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py" + }, + { + "path": "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" + }, + { + "path": "tests/unit/read-rows-acceptance-test.json" + }, + { + "path": "tests/unit/test_app_profile.py" + }, + { + "path": "tests/unit/test_batcher.py" + }, + { + "path": "tests/unit/test_client.py" + }, + { + "path": "tests/unit/test_cluster.py" + }, + { + "path": "tests/unit/test_column_family.py" + }, + { + "path": "tests/unit/test_instance.py" + }, + { + "path": "tests/unit/test_policy.py" + }, + { + "path": "tests/unit/test_row.py" + }, + { + "path": "tests/unit/test_row_data.py" + }, + { + "path": "tests/unit/test_row_filters.py" + }, + { + "path": "tests/unit/test_row_set.py" + }, + { + "path": "tests/unit/test_table.py" + } ] } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py index c575a83e3cc7..84abfecef5a0 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index d127d63178a4..e1de090542c6 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index 6247cba66ccb..d1a843164982 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2019 Google LLC +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From c0985dffe00c618a0aea0effa98272d03bcf31a0 Mon Sep 17 00:00:00 2001 From: Chris Wilcox Date: Fri, 31 Jan 2020 18:29:09 +0000 Subject: [PATCH 298/892] chore: add split repo templates --- .../.github/CONTRIBUTING.md | 28 ++ .../.github/ISSUE_TEMPLATE/bug_report.md | 44 ++ .../.github/ISSUE_TEMPLATE/feature_request.md | 18 + .../.github/ISSUE_TEMPLATE/support_request.md | 7 + .../.github/PULL_REQUEST_TEMPLATE.md | 7 + .../.github/release-please.yml | 1 + packages/google-cloud-bigtable/.gitignore | 58 +++ .../google-cloud-bigtable/.kokoro/build.sh | 39 ++ .../.kokoro/continuous/common.cfg | 27 ++ .../.kokoro/continuous/continuous.cfg | 1 + .../.kokoro/docs/common.cfg | 48 +++ .../.kokoro/docs/docs.cfg | 1 + .../.kokoro/presubmit/common.cfg | 27 ++ .../.kokoro/presubmit/presubmit.cfg | 1 + .../.kokoro/publish-docs.sh | 57 +++ .../google-cloud-bigtable/.kokoro/release.sh | 34 ++ .../.kokoro/release/common.cfg | 64 +++ .../.kokoro/release/release.cfg | 1 + .../.kokoro/trampoline.sh | 23 + .../google-cloud-bigtable/.repo-metadata.json | 2 +- .../google-cloud-bigtable/CODE_OF_CONDUCT.md | 44 ++ .../google-cloud-bigtable/CONTRIBUTING.rst | 279 ++++++++++++ packages/google-cloud-bigtable/LICENSE | 7 +- packages/google-cloud-bigtable/MANIFEST.in | 1 + packages/google-cloud-bigtable/docs/conf.py | 13 +- packages/google-cloud-bigtable/renovate.json | 5 + packages/google-cloud-bigtable/setup.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 408 +----------------- 28 files changed, 831 insertions(+), 416 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/CONTRIBUTING.md create mode 100644 packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md create mode 100644 packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md create mode 100644 packages/google-cloud-bigtable/.github/release-please.yml create mode 100644 packages/google-cloud-bigtable/.gitignore create mode 100755 packages/google-cloud-bigtable/.kokoro/build.sh create mode 100644 packages/google-cloud-bigtable/.kokoro/continuous/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/docs/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/docs/docs.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg create mode 100755 packages/google-cloud-bigtable/.kokoro/publish-docs.sh create mode 100755 packages/google-cloud-bigtable/.kokoro/release.sh create mode 100644 packages/google-cloud-bigtable/.kokoro/release/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/release/release.cfg create mode 100755 packages/google-cloud-bigtable/.kokoro/trampoline.sh create mode 100644 packages/google-cloud-bigtable/CODE_OF_CONDUCT.md create mode 100644 packages/google-cloud-bigtable/CONTRIBUTING.rst create mode 100644 packages/google-cloud-bigtable/renovate.json diff --git a/packages/google-cloud-bigtable/.github/CONTRIBUTING.md b/packages/google-cloud-bigtable/.github/CONTRIBUTING.md new file mode 100644 index 000000000000..939e5341e74d --- /dev/null +++ b/packages/google-cloud-bigtable/.github/CONTRIBUTING.md @@ -0,0 +1,28 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution; +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows [Google's Open Source Community +Guidelines](https://opensource.google.com/conduct/). diff --git a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000000..54b119142fcf --- /dev/null +++ b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,44 @@ +--- +name: Bug report +about: Create a report to help us improve + +--- + +Thanks for stopping by to let us know something could be better! + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. + +Please run down the following list and make sure you've tried the usual "quick fixes": + + - Search the issues already opened: https://github.com/googleapis/python-bigtable/issues + - Search the issues on our "catch-all" repository: https://github.com/googleapis/google-cloud-python + - Search StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform+python + +If you are still having issues, please be sure to include as much information as possible: + +#### Environment details + + - OS type and version: + - Python version: `python --version` + - pip version: `pip --version` + - `google-cloud-bigtable` version: `pip show google-cloud-bigtable` + +#### Steps to reproduce + + 1. ? + 2. ? + +#### Code example + +```python +# example +``` + +#### Stack trace +``` +# example +``` + +Making sure to follow these steps will guarantee the quickest resolution possible. + +Thanks! diff --git a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000000..6365857f33c6 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,18 @@ +--- +name: Feature request +about: Suggest an idea for this library + +--- + +Thanks for stopping by to let us know something could be better! + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. + + **Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + **Describe the solution you'd like** +A clear and concise description of what you want to happen. + **Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + **Additional context** +Add any other context or screenshots about the feature request here. diff --git a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md new file mode 100644 index 000000000000..995869032125 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md @@ -0,0 +1,7 @@ +--- +name: Support request +about: If you have a support contract with Google, please create an issue in the Google Cloud Support console. + +--- + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. diff --git a/packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md b/packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000000..4499ce89ac43 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: +- [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-bigtable/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea +- [ ] Ensure the tests and linter pass +- [ ] Code coverage does not decrease (if any source code was changed) +- [ ] Appropriate docs were updated (if necessary) + +Fixes # 🦕 diff --git a/packages/google-cloud-bigtable/.github/release-please.yml b/packages/google-cloud-bigtable/.github/release-please.yml new file mode 100644 index 000000000000..4507ad0598a5 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/release-please.yml @@ -0,0 +1 @@ +releaseType: python diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore new file mode 100644 index 000000000000..3fb06e09ce74 --- /dev/null +++ b/packages/google-cloud-bigtable/.gitignore @@ -0,0 +1,58 @@ +*.py[cod] +*.sw[op] + +# C extensions +*.so + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg +lib +lib64 +__pycache__ + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.nox +.cache +.pytest_cache + + +# Mac +.DS_Store + +# JetBrains +.idea + +# VS Code +.vscode + +# emacs +*~ + +# Built documentation +docs/_build +bigquery/docs/generated + +# Virtual environment +env/ +coverage.xml + +# System test environment variables. +system_tests/local_test_setup + +# Make sure a generated file isn't accidentally committed. +pylintrc +pylintrc.test \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh new file mode 100755 index 000000000000..f59d2895420b --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +cd github/python-bigtable + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Setup service account credentials. +export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json + +# Setup project id. +export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") + +# Remove old nox +python3.6 -m pip uninstall --yes --quiet nox-automation + +# Install nox +python3.6 -m pip install --upgrade --quiet nox +python3.6 -m nox --version + +python3.6 -m nox diff --git a/packages/google-cloud-bigtable/.kokoro/continuous/common.cfg b/packages/google-cloud-bigtable/.kokoro/continuous/common.cfg new file mode 100644 index 000000000000..69e0570b844b --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/continuous/common.cfg @@ -0,0 +1,27 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/build.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg new file mode 100644 index 000000000000..8f43917d92fe --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg new file mode 100644 index 000000000000..8769b3116aac --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg @@ -0,0 +1,48 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/publish-docs.sh" +} + +env_vars: { + key: "STAGING_BUCKET" + value: "docs-staging" +} + +# Fetch the token needed for reporting release status to GitHub +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "yoshi-automation-github-key" + } + } +} + +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/docs/docs.cfg b/packages/google-cloud-bigtable/.kokoro/docs/docs.cfg new file mode 100644 index 000000000000..8f43917d92fe --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/docs/docs.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg new file mode 100644 index 000000000000..69e0570b844b --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg @@ -0,0 +1,27 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/build.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg new file mode 100644 index 000000000000..8f43917d92fe --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh new file mode 100755 index 000000000000..e6047caf8f9b --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash + +set -eo pipefail + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +cd github/python-bigtable + +# Remove old nox +python3.6 -m pip uninstall --yes --quiet nox-automation + +# Install nox +python3.6 -m pip install --upgrade --quiet nox +python3.6 -m nox --version + +# build docs +nox -s docs + +python3 -m pip install gcp-docuploader + +# install a json parser +sudo apt-get update +sudo apt-get -y install software-properties-common +sudo add-apt-repository universe +sudo apt-get update +sudo apt-get -y install jq + +# create metadata +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh new file mode 100755 index 000000000000..6a911b651b56 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -0,0 +1,34 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash + +set -eo pipefail + +# Start the releasetool reporter +python3 -m pip install gcp-releasetool +python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script + +# Ensure that we have the latest versions of Twine, Wheel, and Setuptools. +python3 -m pip install --upgrade twine wheel setuptools + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Move into the package, build the distribution and upload. +TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google_cloud_pypi_password") +cd github/python-bigtable +python3 setup.py sdist bdist_wheel +twine upload --username gcloudpypi --password "${TWINE_PASSWORD}" dist/* diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg new file mode 100644 index 000000000000..d1edfb69db8c --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/release/common.cfg @@ -0,0 +1,64 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/release.sh" +} + +# Fetch the token needed for reporting release status to GitHub +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "yoshi-automation-github-key" + } + } +} + +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_pypi_password" + } + } +} + +# Fetch magictoken to use with Magic Github Proxy +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "releasetool-magictoken" + } + } +} + +# Fetch api key to use with Magic Github Proxy +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "magic-github-proxy-api-key" + } + } +} diff --git a/packages/google-cloud-bigtable/.kokoro/release/release.cfg b/packages/google-cloud-bigtable/.kokoro/release/release.cfg new file mode 100644 index 000000000000..8f43917d92fe --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/release/release.cfg @@ -0,0 +1 @@ +# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline.sh b/packages/google-cloud-bigtable/.kokoro/trampoline.sh new file mode 100755 index 000000000000..e8c4251f3ed4 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/trampoline.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$? + +chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh +${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true + +exit ${ret_code} diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json index 956c74b53395..cfda5a11e0ec 100644 --- a/packages/google-cloud-bigtable/.repo-metadata.json +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -6,7 +6,7 @@ "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", "release_level": "ga", "language": "python", - "repo": "googleapis/google-cloud-python", + "repo": "googleapis/python-bigtable", "distribution_name": "google-cloud-bigtable", "api_id": "bigtable.googleapis.com", "requires_billing": true diff --git a/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md b/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..b3d1f6029849 --- /dev/null +++ b/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md @@ -0,0 +1,44 @@ + +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst new file mode 100644 index 000000000000..97e69746dc6d --- /dev/null +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -0,0 +1,279 @@ +.. Generated by synthtool. DO NOT EDIT! +############ +Contributing +############ + +#. **Please sign one of the contributor license agreements below.** +#. Fork the repo, develop and test your code changes, add docs. +#. Make sure that your commit messages clearly describe the changes. +#. Send a pull request. (Please Read: `Faster Pull Request Reviews`_) + +.. _Faster Pull Request Reviews: https://github.com/kubernetes/community/blob/master/contributors/guide/pull-requests.md#best-practices-for-faster-reviews + +.. contents:: Here are some guidelines for hacking on the Google Cloud Client libraries. + +*************** +Adding Features +*************** + +In order to add a feature: + +- The feature must be documented in both the API and narrative + documentation. + +- The feature must work fully on the following CPython versions: 2.7, + 3.5, 3.6, and 3.7 on both UNIX and Windows. + +- The feature must not add unnecessary dependencies (where + "unnecessary" is of course subjective, but new dependencies should + be discussed). + +**************************** +Using a Development Checkout +**************************** + +You'll have to create a development environment using a Git checkout: + +- While logged into your GitHub account, navigate to the + ``python-bigtable`` `repo`_ on GitHub. + +- Fork and clone the ``python-bigtable`` repository to your GitHub account by + clicking the "Fork" button. + +- Clone your fork of ``python-bigtable`` from your GitHub account to your local + computer, substituting your account username and specifying the destination + as ``hack-on-python-bigtable``. E.g.:: + + $ cd ${HOME} + $ git clone git@github.com:USERNAME/python-bigtable.git hack-on-python-bigtable + $ cd hack-on-python-bigtable + # Configure remotes such that you can pull changes from the googleapis/python-bigtable + # repository into your local repository. + $ git remote add upstream git@github.com:googleapis/python-bigtable.git + # fetch and merge changes from upstream into master + $ git fetch upstream + $ git merge upstream/master + +Now your local repo is set up such that you will push changes to your GitHub +repo, from which you can submit a pull request. + +To work on the codebase and run the tests, we recommend using ``nox``, +but you can also use a ``virtualenv`` of your own creation. + +.. _repo: https://github.com/googleapis/python-bigtable + +Using ``nox`` +============= + +We use `nox `__ to instrument our tests. + +- To test your changes, run unit tests with ``nox``:: + + $ nox -s unit-2.7 + $ nox -s unit-3.7 + $ ... + + .. note:: + + The unit tests and system tests are described in the + ``noxfile.py`` files in each directory. + +.. nox: https://pypi.org/project/nox/ + +Note on Editable Installs / Develop Mode +======================================== + +- As mentioned previously, using ``setuptools`` in `develop mode`_ + or a ``pip`` `editable install`_ is not possible with this + library. This is because this library uses `namespace packages`_. + For context see `Issue #2316`_ and the relevant `PyPA issue`_. + + Since ``editable`` / ``develop`` mode can't be used, packages + need to be installed directly. Hence your changes to the source + tree don't get incorporated into the **already installed** + package. + +.. _namespace packages: https://www.python.org/dev/peps/pep-0420/ +.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316 +.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12 +.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode +.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs + +***************************************** +I'm getting weird errors... Can you help? +***************************************** + +If the error mentions ``Python.h`` not being found, +install ``python-dev`` and try again. +On Debian/Ubuntu:: + + $ sudo apt-get install python-dev + +************ +Coding Style +************ + +- PEP8 compliance, with exceptions defined in the linter configuration. + If you have ``nox`` installed, you can test that you have not introduced + any non-compliant code via:: + + $ nox -s lint + +- In order to make ``nox -s lint`` run faster, you can set some environment + variables:: + + export GOOGLE_CLOUD_TESTING_REMOTE="upstream" + export GOOGLE_CLOUD_TESTING_BRANCH="master" + + By doing this, you are specifying the location of the most up-to-date + version of ``python-bigtable``. The the suggested remote name ``upstream`` + should point to the official ``googleapis`` checkout and the + the branch should be the main branch on that remote (``master``). + +Exceptions to PEP8: + +- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for + "Function-Under-Test"), which is PEP8-incompliant, but more readable. + Some also use a local variable, ``MUT`` (short for "Module-Under-Test"). + +******************** +Running System Tests +******************** + +- To run system tests, you can execute:: + + $ nox -s system-3.7 + $ nox -s system-2.7 + + .. note:: + + System tests are only configured to run under Python 2.7 and + Python 3.7. For expediency, we do not run them in older versions + of Python 3. + + This alone will not run the tests. You'll need to change some local + auth settings and change some configuration in your project to + run all the tests. + +- System tests will be run against an actual project and + so you'll need to provide some environment variables to facilitate + authentication to your project: + + - ``GOOGLE_APPLICATION_CREDENTIALS``: The path to a JSON key file; + Such a file can be downloaded directly from the developer's console by clicking + "Generate new JSON key". See private key + `docs `__ + for more details. + +- Once you have downloaded your json keys, set the environment variable + ``GOOGLE_APPLICATION_CREDENTIALS`` to the absolute path of the json file:: + + $ export GOOGLE_APPLICATION_CREDENTIALS="/Users//path/to/app_credentials.json" + + +************* +Test Coverage +************* + +- The codebase *must* have 100% test statement coverage after each commit. + You can test coverage via ``nox -s cover``. + +****************************************************** +Documentation Coverage and Building HTML Documentation +****************************************************** + +If you fix a bug, and the bug requires an API or behavior modification, all +documentation in this package which references that API or behavior must be +changed to reflect the bug fix, ideally in the same commit that fixes the bug +or adds the feature. + +Build the docs via: + + $ nox -s docs + +******************************************** +Note About ``README`` as it pertains to PyPI +******************************************** + +The `description on PyPI`_ for the project comes directly from the +``README``. Due to the reStructuredText (``rst``) parser used by +PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst`` +instead of +``https://github.com/googleapis/python-bigtable/blob/master/CONTRIBUTING.rst``) +may cause problems creating links or rendering the description. + +.. _description on PyPI: https://pypi.org/project/google-cloud-bigtable + + +************************* +Supported Python Versions +************************* + +We support: + +- `Python 3.5`_ +- `Python 3.6`_ +- `Python 3.7`_ + +.. _Python 3.5: https://docs.python.org/3.5/ +.. _Python 3.6: https://docs.python.org/3.6/ +.. _Python 3.7: https://docs.python.org/3.7/ + + +Supported versions can be found in our ``noxfile.py`` `config`_. + +.. _config: https://github.com/googleapis/python-bigtable/blob/master/noxfile.py + +We explicitly decided not to support `Python 2.5`_ due to `decreased usage`_ +and lack of continuous integration `support`_. + +.. _Python 2.5: https://docs.python.org/2.5/ +.. _decreased usage: https://caremad.io/2013/10/a-look-at-pypi-downloads/ +.. _support: https://blog.travis-ci.com/2013-11-18-upcoming-build-environment-updates/ + +We have `dropped 2.6`_ as a supported version as well since Python 2.6 is no +longer supported by the core development team. + +Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020. + +We also explicitly decided to support Python 3 beginning with version +3.5. Reasons for this include: + +- Encouraging use of newest versions of Python 3 +- Taking the lead of `prominent`_ open-source `projects`_ +- `Unicode literal support`_ which allows for a cleaner codebase that + works in both Python 2 and Python 3 + +.. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django +.. _projects: http://flask.pocoo.org/docs/0.10/python3/ +.. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/ +.. _dropped 2.6: https://github.com/googleapis/google-cloud-python/issues/995 + +********** +Versioning +********** + +This library follows `Semantic Versioning`_. + +.. _Semantic Versioning: http://semver.org/ + +Some packages are currently in major version zero (``0.y.z``), which means that +anything may change at any time and the public API should not be considered +stable. + +****************************** +Contributor License Agreements +****************************** + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the + intellectual property**, then you'll need to sign an + `individual CLA `__. +- **If you work for a company that wants to allow you to contribute your work**, + then you'll need to sign a + `corporate CLA `__. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. diff --git a/packages/google-cloud-bigtable/LICENSE b/packages/google-cloud-bigtable/LICENSE index d64569567334..a8ee855de2aa 100644 --- a/packages/google-cloud-bigtable/LICENSE +++ b/packages/google-cloud-bigtable/LICENSE @@ -1,7 +1,6 @@ - - Apache License + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -193,7 +192,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index 9cbf175afe6b..cd011be27a0e 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -1,3 +1,4 @@ +# Generated by synthtool. DO NOT EDIT! include README.rst LICENSE recursive-include google *.json *.proto recursive-include tests * diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 97b890f1a8c9..ce720db110de 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -20,7 +20,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) -__version__ = "0.1.0" +__version__ = "" # -- General configuration ------------------------------------------------ @@ -66,7 +66,7 @@ # General information about the project. project = u"google-cloud-bigtable" -copyright = u"2017, Google" +copyright = u"2019, Google" author = u"Google APIs" # The version info for the project you're documenting, acts as replacement for @@ -133,9 +133,9 @@ # further. For a list of options available for each theme, see the # documentation. html_theme_options = { - "description": "Google Cloud Client Libraries for Python", + "description": "Google Cloud Client Libraries for google-cloud-bigtable", "github_user": "googleapis", - "github_repo": "google-cloud-python", + "github_repo": "python-bigtable", "github_banner": True, "font_family": "'Roboto', Georgia, sans", "head_font_family": "'Roboto', Georgia, serif", @@ -318,7 +318,7 @@ u"google-cloud-bigtable Documentation", author, "google-cloud-bigtable", - "GAPIC library for Bigtable", + "google-cloud-bigtable Library", "APIs", ) ] @@ -340,9 +340,8 @@ intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("https://requests.kennethreitz.org/en/stable/", None), } diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json new file mode 100644 index 000000000000..4fa949311b20 --- /dev/null +++ b/packages/google-cloud-bigtable/renovate.json @@ -0,0 +1,5 @@ +{ + "extends": [ + "config:base", ":preserveSemverRanges" + ] +} diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index c5075bbcc61b..43804da73dc3 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -65,7 +65,7 @@ author='Google LLC', author_email='googleapis-packages@google.com', license='Apache 2.0', - url='https://github.com/GoogleCloudPlatform/google-cloud-python', + url='https://github.com/googleapis/python-bigtable', classifiers=[ release_status, 'Intended Audience :: Developers', diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 1bedccbac19c..422430e963db 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2020-01-30T13:15:12.607903Z", + "updateTime": "2020-01-31T18:24:32.991056Z", "sources": [ { "generator": { @@ -12,14 +12,14 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c1246a29e22b0f98e800a536b5b0da2d933a55f2", - "internalRef": "292310790", - "log": "c1246a29e22b0f98e800a536b5b0da2d933a55f2\nUpdating v1 protos with the latest inline documentation (in comments) and config options. Also adding a per-service .yaml file.\n\nPiperOrigin-RevId: 292310790\n\nb491d07cadaae7cde5608321f913e5ca1459b32d\nRevert accidental local_repository change\n\nPiperOrigin-RevId: 292245373\n\naf3400a8cb6110025198b59a0f7d018ae3cda700\nUpdate gapic-generator dependency (prebuilt PHP binary support).\n\nPiperOrigin-RevId: 292243997\n\n341fd5690fae36f36cf626ef048fbcf4bbe7cee6\ngrafeas: v1 add resource_definition for the grafeas.io/Project and change references for Project.\n\nPiperOrigin-RevId: 292221998\n\n42e915ec2ece1cd37a590fbcd10aa2c0fb0e5b06\nUpdate the gapic-generator, protoc-java-resource-name-plugin and protoc-docs-plugin to the latest commit.\n\nPiperOrigin-RevId: 292182368\n\nf035f47250675d31492a09f4a7586cfa395520a7\nFix grafeas build and update build.sh script to include gerafeas.\n\nPiperOrigin-RevId: 292168753\n\n26ccb214b7bc4a716032a6266bcb0a9ca55d6dbb\nasset: v1p1beta1 add client config annotations and retry config\n\nPiperOrigin-RevId: 292154210\n\n974ee5c0b5d03e81a50dafcedf41e0efebb5b749\nasset: v1beta1 add client config annotations\n\nPiperOrigin-RevId: 292152573\n\ncf3b61102ed5f36b827bc82ec39be09525f018c8\n Fix to protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 292034635\n\n4e1cfaa7c0fede9e65d64213ca3da1b1255816c0\nUpdate the public proto to support UTF-8 encoded id for CatalogService API, increase the ListCatalogItems deadline to 300s and some minor documentation change\n\nPiperOrigin-RevId: 292030970\n\n9c483584f8fd5a1b862ae07973f4cc7bb3e46648\nasset: add annotations to v1p1beta1\n\nPiperOrigin-RevId: 292009868\n\ne19209fac29731d0baf6d9ac23da1164f7bdca24\nAdd the google.rpc.context.AttributeContext message to the open source\ndirectories.\n\nPiperOrigin-RevId: 291999930\n\nae5662960573f279502bf98a108a35ba1175e782\noslogin API: move file level option on top of the file to avoid protobuf.js bug.\n\nPiperOrigin-RevId: 291990506\n\neba3897fff7c49ed85d3c47fc96fe96e47f6f684\nAdd cc_proto_library and cc_grpc_library targets for Spanner and IAM protos.\n\nPiperOrigin-RevId: 291988651\n\n8e981acfd9b97ea2f312f11bbaa7b6c16e412dea\nBeta launch for PersonDetection and FaceDetection features.\n\nPiperOrigin-RevId: 291821782\n\n994e067fae3b21e195f7da932b08fff806d70b5d\nasset: add annotations to v1p2beta1\n\nPiperOrigin-RevId: 291815259\n\n244e1d2c89346ca2e0701b39e65552330d68545a\nAdd Playable Locations service\n\nPiperOrigin-RevId: 291806349\n\n909f8f67963daf45dd88d020877fb9029b76788d\nasset: add annotations to v1beta2\n\nPiperOrigin-RevId: 291805301\n\n3c39a1d6e23c1ef63c7fba4019c25e76c40dfe19\nKMS: add file-level message for CryptoKeyPath, it is defined in gapic yaml but not\nin proto files.\n\nPiperOrigin-RevId: 291420695\n\nc6f3f350b8387f8d1b85ed4506f30187ebaaddc3\ncontaineranalysis: update v1beta1 and bazel build with annotations\n\nPiperOrigin-RevId: 291401900\n\n92887d74b44e4e636252b7b8477d0d2570cd82db\nfix: fix the location of grpc config file.\n\nPiperOrigin-RevId: 291396015\n\ne26cab8afd19d396b929039dac5d874cf0b5336c\nexpr: add default_host and method_signature annotations to CelService\n\nPiperOrigin-RevId: 291240093\n\n06093ae3952441c34ec176d1f7431b8765cec0be\nirm: fix v1alpha2 bazel build by adding missing proto imports\n\nPiperOrigin-RevId: 291227940\n\na8a2514af326e4673063f9a3c9d0ef1091c87e6c\nAdd proto annotation for cloud/irm API\n\nPiperOrigin-RevId: 291217859\n\n8d16f76de065f530d395a4c7eabbf766d6a120fd\nGenerate Memcache v1beta2 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 291008516\n\n3af1dabd93df9a9f17bf3624d3b875c11235360b\ngrafeas: Add containeranalysis default_host to Grafeas service\n\nPiperOrigin-RevId: 290965849\n\nbe2663fa95e31cba67d0cd62611a6674db9f74b7\nfix(google/maps/roads): add missing opening bracket\n\nPiperOrigin-RevId: 290964086\n\nfacc26550a0af0696e0534bc9cae9df14275aa7c\nUpdating v2 protos with the latest inline documentation (in comments) and adding a per-service .yaml file.\n\nPiperOrigin-RevId: 290952261\n\ncda99c1f7dc5e4ca9b1caeae1dc330838cbc1461\nChange api_name to 'asset' for v1p1beta1\n\nPiperOrigin-RevId: 290800639\n\n94e9e90c303a820ce40643d9129e7f0d2054e8a1\nAdds Google Maps Road service\n\nPiperOrigin-RevId: 290795667\n\na3b23dcb2eaecce98c600c7d009451bdec52dbda\nrpc: new message ErrorInfo, other comment updates\n\nPiperOrigin-RevId: 290781668\n\n26420ef4e46c37f193c0fbe53d6ebac481de460e\nAdd proto definition for Org Policy v1.\n\nPiperOrigin-RevId: 290771923\n\n7f0dab8177cf371ae019a082e2512de7ac102888\nPublish Routes Preferred API v1 service definitions.\n\nPiperOrigin-RevId: 290326986\n\nad6e508d0728e1d1bca6e3f328cd562718cb772d\nFix: Qualify resource type references with \"jobs.googleapis.com/\"\n\nPiperOrigin-RevId: 290285762\n\n58e770d568a2b78168ddc19a874178fee8265a9d\ncts client library\n\nPiperOrigin-RevId: 290146169\n\naf9daa4c3b4c4a8b7133b81588dd9ffd37270af2\nAdd more programming language options to public proto\n\nPiperOrigin-RevId: 290144091\n\nd9f2bbf2df301ef84641d4cec7c828736a0bd907\ntalent: add missing resource.proto dep to Bazel build target\n\nPiperOrigin-RevId: 290143164\n\n3b3968237451d027b42471cd28884a5a1faed6c7\nAnnotate Talent API.\nAdd gRPC service config for retry.\nUpdate bazel file with google.api.resource dependency.\n\nPiperOrigin-RevId: 290125172\n\n0735b4b096872960568d1f366bfa75b7b0e1f1a3\nWeekly library update.\n\nPiperOrigin-RevId: 289939042\n\n8760d3d9a4543d7f9c0d1c7870aca08b116e4095\nWeekly library update.\n\nPiperOrigin-RevId: 289939020\n\n8607df842f782a901805187e02fff598145b0b0e\nChange Talent API timeout to 30s.\n\nPiperOrigin-RevId: 289912621\n\n908155991fe32570653bcb72ecfdcfc896642f41\nAdd Recommendations AI V1Beta1\n\nPiperOrigin-RevId: 289901914\n\n5c9a8c2bebd8b71aa66d1cc473edfaac837a2c78\nAdding no-arg method signatures for ListBillingAccounts and ListServices\n\nPiperOrigin-RevId: 289891136\n\n50b0e8286ac988b0593bd890eb31fef6ea2f5767\nlongrunning: add grpc service config and default_host annotation to operations.proto\n\nPiperOrigin-RevId: 289876944\n\n6cac27dabe51c54807b0401698c32d34998948a9\n Updating default deadline for Cloud Security Command Center's v1 APIs.\n\nPiperOrigin-RevId: 289875412\n\nd99df0d67057a233c711187e0689baa4f8e6333d\nFix: Correct spelling in C# namespace option\n\nPiperOrigin-RevId: 289709813\n\n2fa8d48165cc48e35b0c62e6f7bdade12229326c\nfeat: Publish Recommender v1 to GitHub.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289619243\n\n9118db63d1ab493a2e44a3b4973fde810a835c49\nfirestore: don't retry reads that fail with Aborted\n\nFor transaction reads that fail with ABORTED, we need to rollback and start a new transaction. Our current configuration makes it so that GAPIC retries ABORTED reads multiple times without making any progress. Instead, we should retry at the transaction level.\n\nPiperOrigin-RevId: 289532382\n\n1dbfd3fe4330790b1e99c0bb20beb692f1e20b8a\nFix bazel build\nAdd other langauges (Java was already there) for bigquery/storage/v1alpha2 api.\n\nPiperOrigin-RevId: 289519766\n\nc06599cdd7d11f8d3fd25f8d3249e5bb1a3d5d73\nInitial commit of google.cloud.policytroubleshooter API, The API helps in troubleshooting GCP policies. Refer https://cloud.google.com/iam/docs/troubleshooting-access for more information\n\nPiperOrigin-RevId: 289491444\n\nfce7d80fa16ea241e87f7bc33d68595422e94ecd\nDo not pass samples option for Artman config of recommender v1 API.\n\nPiperOrigin-RevId: 289477403\n\nef179e8c61436297e6bb124352e47e45c8c80cb1\nfix: Address missing Bazel dependency.\n\nBazel builds stopped working in 06ec6d5 because\nthe google/longrunning/operations.proto file took\nan import from google/api/client.proto, but that\nimport was not added to BUILD.bazel.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446074\n\n8841655b242c84fd691d77d7bcf21b61044f01ff\nMigrate Data Labeling v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289446026\n\n06ec6d5d053fff299eaa6eaa38afdd36c5e2fc68\nAdd annotations to google.longrunning.v1\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289413169\n\n0480cf40be1d3cc231f4268a2fdb36a8dd60e641\nMigrate IAM Admin v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289411084\n\n1017173e9adeb858587639af61889ad970c728b1\nSpecify a C# namespace for BigQuery Connection v1beta1\n\nPiperOrigin-RevId: 289396763\n\nb08714b378e8e5b0c4ecdde73f92c36d6303b4b6\nfix: Integrate latest proto-docs-plugin fix.\nFixes dialogflow v2\n\nPiperOrigin-RevId: 289189004\n\n51217a67e79255ee1f2e70a6a3919df082513327\nCreate BUILD file for recommender v1\n\nPiperOrigin-RevId: 289183234\n\nacacd87263c0a60e458561b8b8ce9f67c760552a\nGenerate recommender v1 API protos and gRPC ServiceConfig files\n\nPiperOrigin-RevId: 289177510\n\n9d2f7133b97720b1fa3601f6dcd30760ba6d8a1e\nFix kokoro build script\n\nPiperOrigin-RevId: 289166315\n\nc43a67530d2a47a0220cad20ca8de39b3fbaf2c5\ncloudtasks: replace missing RPC timeout config for v2beta2 and v2beta3\n\nPiperOrigin-RevId: 289162391\n\n4cefc229a9197236fc0adf02d69b71c0c5cf59de\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 289158456\n\n56f263fe959c50786dab42e3c61402d32d1417bd\nCatalog API: Adding config necessary to build client libraries\n\nPiperOrigin-RevId: 289149879\n\n4543762b23a57fc3c53d409efc3a9affd47b6ab3\nFix Bazel build\nbilling/v1 and dialogflow/v2 remain broken (not bazel-related issues).\nBilling has wrong configuration, dialogflow failure is caused by a bug in documentation plugin.\n\nPiperOrigin-RevId: 289140194\n\nc9dce519127b97e866ca133a01157f4ce27dcceb\nUpdate Bigtable docs\n\nPiperOrigin-RevId: 289114419\n\n802c5c5f2bf94c3facb011267d04e71942e0d09f\nMigrate DLP to proto annotations (but not GAPIC v2).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 289102579\n\n6357f30f2ec3cff1d8239d18b707ff9d438ea5da\nRemove gRPC configuration file that was in the wrong place.\n\nPiperOrigin-RevId: 289096111\n\n360a8792ed62f944109d7e22d613a04a010665b4\n Protos for v1p1beta1 release of Cloud Security Command Center\n\nPiperOrigin-RevId: 289011995\n\na79211c20c4f2807eec524d00123bf7c06ad3d6e\nRoll back containeranalysis v1 to GAPIC v1.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288999068\n\n9e60345ba603e03484a8aaa33ce5ffa19c1c652b\nPublish Routes Preferred API v1 proto definitions.\n\nPiperOrigin-RevId: 288941399\n\nd52885b642ad2aa1f42b132ee62dbf49a73e1e24\nMigrate the service management API to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288909426\n\n6ace586805c08896fef43e28a261337fcf3f022b\ncloudtasks: replace missing RPC timeout config\n\nPiperOrigin-RevId: 288783603\n\n51d906cabee4876b12497054b15b05d4a50ad027\nImport of Grafeas from Github.\n\nUpdate BUILD.bazel accordingly.\n\nPiperOrigin-RevId: 288783426\n\n5ef42bcd363ba0440f0ee65b3c80b499e9067ede\nMigrate Recommender v1beta1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288713066\n\n94f986afd365b7d7e132315ddcd43d7af0e652fb\nMigrate Container Analysis v1 to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288708382\n\n7a751a279184970d3b6ba90e4dd4d22a382a0747\nRemove Container Analysis v1alpha1 (nobody publishes it).\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288707473\n\n3c0d9c71242e70474b2b640e15bb0a435fd06ff0\nRemove specious annotation from BigQuery Data Transfer before\nanyone accidentally does anything that uses it.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288701604\n\n1af307a4764bd415ef942ac5187fa1def043006f\nMigrate BigQuery Connection to GAPIC v2.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 288698681\n\n" + "sha": "2717b8a1c762b26911b45ecc2e4ee01d98401b28", + "internalRef": "292555664", + "log": "2717b8a1c762b26911b45ecc2e4ee01d98401b28\nFix dataproc artman client library generation.\n\nPiperOrigin-RevId: 292555664\n\n7ac66d9be8a7d7de4f13566d8663978c9ee9dcd7\nAdd Dataproc Autoscaling API to V1.\n\nPiperOrigin-RevId: 292450564\n\n5d932b2c1be3a6ef487d094e3cf5c0673d0241dd\n- Improve documentation\n- Add a client_id field to StreamingPullRequest\n\nPiperOrigin-RevId: 292434036\n\neaff9fa8edec3e914995ce832b087039c5417ea7\nmonitoring: v3 publish annotations and client retry config\n\nPiperOrigin-RevId: 292425288\n\n70958bab8c5353870d31a23fb2c40305b050d3fe\nBigQuery Storage Read API v1 clients.\n\nPiperOrigin-RevId: 292407644\n\n7a15e7fe78ff4b6d5c9606a3264559e5bde341d1\nUpdate backend proto for Google Cloud Endpoints\n\nPiperOrigin-RevId: 292391607\n\n3ca2c014e24eb5111c8e7248b1e1eb833977c83d\nbazel: Add --flaky_test_attempts=3 argument to prevent CI failures caused by flaky tests\n\nPiperOrigin-RevId: 292382559\n\n9933347c1f677e81e19a844c2ef95bfceaf694fe\nbazel:Integrate latest protoc-java-resource-names-plugin changes (fix for PyYAML dependency in bazel rules)\n\nPiperOrigin-RevId: 292376626\n\nb835ab9d2f62c88561392aa26074c0b849fb0bd3\nasset: v1p2beta1 add client config annotations\n\n* remove unintentionally exposed RPCs\n* remove messages relevant to removed RPCs\n\nPiperOrigin-RevId: 292369593\n\n" } }, { "template": { - "name": "python_library", + "name": "python_split_library", "origin": "synthtool.gcp", "version": "2019.10.17" } @@ -46,403 +46,5 @@ "config": "google/bigtable/admin/artman_bigtableadmin.yaml" } } - ], - "newFiles": [ - { - "path": ".coveragerc" - }, - { - "path": ".flake8" - }, - { - "path": ".repo-metadata.json" - }, - { - "path": "CHANGELOG.md" - }, - { - "path": "LICENSE" - }, - { - "path": "MANIFEST.in" - }, - { - "path": "README.rst" - }, - { - "path": "docs/README.rst" - }, - { - "path": "docs/_static/custom.css" - }, - { - "path": "docs/_templates/layout.html" - }, - { - "path": "docs/changelog.md" - }, - { - "path": "docs/client-intro.rst" - }, - { - "path": "docs/client.rst" - }, - { - "path": "docs/cluster.rst" - }, - { - "path": "docs/column-family.rst" - }, - { - "path": "docs/conf.py" - }, - { - "path": "docs/data-api.rst" - }, - { - "path": "docs/index.rst" - }, - { - "path": "docs/instance-api.rst" - }, - { - "path": "docs/instance.rst" - }, - { - "path": "docs/row-data.rst" - }, - { - "path": "docs/row-filters.rst" - }, - { - "path": "docs/row.rst" - }, - { - "path": "docs/snippets.py" - }, - { - "path": "docs/snippets_table.py" - }, - { - "path": "docs/table-api.rst" - }, - { - "path": "docs/table.rst" - }, - { - "path": "docs/usage.rst" - }, - { - "path": "google/__init__.py" - }, - { - "path": "google/cloud/__init__.py" - }, - { - "path": "google/cloud/bigtable.py" - }, - { - "path": "google/cloud/bigtable/__init__.py" - }, - { - "path": "google/cloud/bigtable/app_profile.py" - }, - { - "path": "google/cloud/bigtable/batcher.py" - }, - { - "path": "google/cloud/bigtable/client.py" - }, - { - "path": "google/cloud/bigtable/cluster.py" - }, - { - "path": "google/cloud/bigtable/column_family.py" - }, - { - "path": "google/cloud/bigtable/enums.py" - }, - { - "path": "google/cloud/bigtable/instance.py" - }, - { - "path": "google/cloud/bigtable/policy.py" - }, - { - "path": "google/cloud/bigtable/row.py" - }, - { - "path": "google/cloud/bigtable/row_data.py" - }, - { - "path": "google/cloud/bigtable/row_filters.py" - }, - { - "path": "google/cloud/bigtable/row_set.py" - }, - { - "path": "google/cloud/bigtable/table.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/enums.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/__init__.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/common.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/common_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/instance.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/instance_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/table.proto" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/table_pb2.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_admin_v2/types.py" - }, - { - "path": "google/cloud/bigtable_v2/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/bigtable_client.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/bigtable_client_config.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/transports/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/__init__.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_pb2.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_service.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_admin.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_service.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/common.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/data.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/data_pb2.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/data_pb2_grpc.py" - }, - { - "path": "google/cloud/bigtable_v2/proto/instance.proto" - }, - { - "path": "google/cloud/bigtable_v2/proto/table.proto" - }, - { - "path": "google/cloud/bigtable_v2/types.py" - }, - { - "path": "noxfile.py" - }, - { - "path": "pylint.config.py" - }, - { - "path": "setup.cfg" - }, - { - "path": "setup.py" - }, - { - "path": "synth.metadata" - }, - { - "path": "synth.py" - }, - { - "path": "tests/__init__.py" - }, - { - "path": "tests/system.py" - }, - { - "path": "tests/unit/__init__.py" - }, - { - "path": "tests/unit/_testing.py" - }, - { - "path": "tests/unit/gapic/v2/test_bigtable_client_v2.py" - }, - { - "path": "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py" - }, - { - "path": "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" - }, - { - "path": "tests/unit/read-rows-acceptance-test.json" - }, - { - "path": "tests/unit/test_app_profile.py" - }, - { - "path": "tests/unit/test_batcher.py" - }, - { - "path": "tests/unit/test_client.py" - }, - { - "path": "tests/unit/test_cluster.py" - }, - { - "path": "tests/unit/test_column_family.py" - }, - { - "path": "tests/unit/test_instance.py" - }, - { - "path": "tests/unit/test_policy.py" - }, - { - "path": "tests/unit/test_row.py" - }, - { - "path": "tests/unit/test_row_data.py" - }, - { - "path": "tests/unit/test_row_filters.py" - }, - { - "path": "tests/unit/test_row_set.py" - }, - { - "path": "tests/unit/test_table.py" - } ] } \ No newline at end of file From 6a36c39b493601f2dfc8d43a446955e38ca75e52 Mon Sep 17 00:00:00 2001 From: Chris Wilcox Date: Fri, 31 Jan 2020 18:58:35 +0000 Subject: [PATCH 299/892] fix: localdeps --- packages/google-cloud-bigtable/noxfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 00c2b4793529..f19ac8a2be2a 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -21,7 +21,7 @@ import nox -LOCAL_DEPS = (os.path.join("..", "api_core"), os.path.join("..", "core")) +LOCAL_DEPS = () @nox.session(python="3.7") def lint(session): From dbff6d9daaeebcd60410a8d74605cee4d6420ceb Mon Sep 17 00:00:00 2001 From: Chris Wilcox Date: Fri, 31 Jan 2020 19:15:32 +0000 Subject: [PATCH 300/892] fix: test_utils --- packages/google-cloud-bigtable/noxfile.py | 4 +- .../test_utils/credentials.json.enc | 49 ++++ .../scripts/circleci/get_tagged_package.py | 64 +++++ .../scripts/circleci/twine_upload.sh | 36 +++ .../test_utils/scripts/get_target_packages.py | 268 ++++++++++++++++++ .../scripts/get_target_packages_kokoro.py | 98 +++++++ .../test_utils/scripts/run_emulator.py | 199 +++++++++++++ .../test_utils/scripts/update_docs.sh | 93 ++++++ .../google-cloud-bigtable/test_utils/setup.py | 64 +++++ .../test_utils/test_utils/__init__.py | 0 .../test_utils/test_utils/imports.py | 38 +++ .../test_utils/test_utils/retry.py | 207 ++++++++++++++ .../test_utils/test_utils/system.py | 81 ++++++ 13 files changed, 1199 insertions(+), 2 deletions(-) create mode 100644 packages/google-cloud-bigtable/test_utils/credentials.json.enc create mode 100644 packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py create mode 100755 packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh create mode 100644 packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py create mode 100644 packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py create mode 100644 packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py create mode 100755 packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh create mode 100644 packages/google-cloud-bigtable/test_utils/setup.py create mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/__init__.py create mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/imports.py create mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/retry.py create mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/system.py diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index f19ac8a2be2a..9155915893ef 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -114,7 +114,7 @@ def system(session): session.install("mock", "pytest") for local_dep in LOCAL_DEPS: session.install("-e", local_dep) - session.install("-e", "../test_utils/") + session.install("-e", "test_utils/") session.install("-e", ".") # Run py.test against the system tests. @@ -169,7 +169,7 @@ def snippets(session): session.install('mock', 'pytest') for local_dep in LOCAL_DEPS: session.install('-e', local_dep) - session.install('-e', '../test_utils/') + session.install('-e', 'test_utils/') session.install('-e', '.') session.run( 'py.test', diff --git a/packages/google-cloud-bigtable/test_utils/credentials.json.enc b/packages/google-cloud-bigtable/test_utils/credentials.json.enc new file mode 100644 index 000000000000..f073c7e4f774 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/credentials.json.enc @@ -0,0 +1,49 @@ +U2FsdGVkX1/vVm/dOEg1DCACYbdOcL+ey6+64A+DZGZVgF8Z/3skK6rpPocu6GOA +UZAqASsBH9QifDf8cKVXQXVYpYq6HSv2O0w7vOmVorZO9GYPo98s9/8XO+4ty/AU +aB6TD68frBAYv4cT/l5m7aYdzfzMTy0EOXoleZT09JYP3B5FV3KCO114FzMXGwrj +HXsR6E5SyUUlUnWPC3eD3aqmovay0gxOKYO3ZwjFK1nlbN/8q6/8nwBCf/Bg6SHV +V93pNxdolRlJev9kgKz4RN1z4jGCy5PAndhSLE82NFIs9LoAiEOU5YeMlN+Ulqus +J92nh+ptUe9a4pJGbAuveUWO7zdS1QyXvTMUcmmSfXCNm/eIQjNuu5+rHtIjWKh8 +Ilwj2w1aTfSptQEhk/kwRgFz/d11vfwJzvwTmCxO6zyOeL0VUWLqdCBGgG5As9He +/RenF8PZ1O0WbTt7fns5oTlTk/MUo+0xJ1xqvu/y45LaqqcBAnEdrWKmtM3dJHWv +ufQku+kD+83F/VwBnQdvgMHu6KZEs6LRrNo58r4QuK6fS7VCACdzxID1RM2cL7kT +6BFRlyGj1aigmjne9g9M9Jx4R+mZDpPU1WDzzG71J4qCUwaX8Dfwutuv4uiFvzwq +NUF0wLJJPtKWmtW+hnZ/fhHQGCRsOpZzFnqp6Zv7J7k6esqxMgIjfal7Djk5Acy8 +j3iVvm6CYmKMVqzL62JHYS9Ye83tzBCaR8hpnJQKgH3FSOFY8HSwrtQSIsl/hSeF +41sgnz0Y+/gkzNeU18qFk+eCZmvljyu+JK0nPYUgpOCJYVBNQpNHz5PUyiAEKhtM +IOSdjPRW1Y+Xf4RroJnLPoF24Ijwrow5LCm9hBRY6TPPMMmnIXCd23xcLJ1rMj6g +x4ZikElans+cwuc9wtbb7w01DcpTwQ1+eIV1qV+KIgpnLjRGLhZD4etobBsrwYu/ +vnIwy2QHCKENPb8sbdgp7x2mF7VSX0/7tf+9+i70EBiMzpOKBkiZhtLzm6hOBkEy +ODaWrx4lTTwbSw8Rmtf58APhPFMsjHoNsjiUoK249Y8Y2Ff4fMfqYsXu6VC1n/At +CuWYHc3EfBwFcLJS+RQB9kFk/4FygFBWq4Kj0MqoRruLbKmoGeJKH9q35W0f0NCD +j+iHt3014kMGiuyJe1UDQ6fvEihFFdHuDivFpPAXDt4PTY/WtpDhaGMx23kb54pK +jkAuxpznAB1lK3u9bGRXDasGeHIrNtIlPvgkrWHXvoBVqM7zry8TGtoxp3E3I42Z +cUfDWfB9GqVdrOwvrTzyZsl2uShRkAJaZFZj5aMyYxiptp4gM8CwWiNtOd2EwtRO +LxZX4M02PQFIqXV3FSDA0q6EwglUrTZdAlYeOEkopaKCtG31dEPOSQG3NGJAEYso +Cxm99H7970dp0OAgpNSgRbcWDbhVbQXnRzvFGqLeH6a9dQ/a8uD3s8Qm9Du/kB6d +XxTRe2OGxzcD0AgI8GClE4rIZHCLbcwuJRp0EYcN+pgY80O4U98fZ5RYpU6OYbU/ +MEiaBYFKtZtGkV6AQD568V7hHJWqc5DDfVHUQ/aeQwnKi2vnU66u+nnV2rZxXxLP ++dqeLRpul+wKa5b/Z5SfQ14Ff8s7aVyxaogGpyggyPL1vyq4KWZ6Or/wEE5hgNO4 +kBh6ht0QT1Hti8XY2JK1M+Jgbjgcg4jkHBGVqegrG1Rvcc2A4TYKwx+QMSBhyxrU +5qhROjS4lTcC42hQslMUkUwc4U/Y91XdFbOOnaAkwzI36NRYL0pmgZnYxGJZeRvr +E5foOhnOEVSFGdOkLfFh+FkWZQf56Lmn8Gg2wHE3dZTxLHibiUYfkgOr1uEosq29 +D1NstvlJURPQ0Q+8QQNWcl9nEZHMAjOmnL1hbx+QfuC6seucp+sXGzdZByMLZbvT +tG8KNL293CmyQowgf9MXToWYnwRkcvqfTaKyor2Ggze3JtoFW4t0j4DI1XPciZFX +XmfApHrzdB/bZadzxyaZ2NE0CuH9zDelwI6rz38xsN5liYnp5qmNKVCZVOHccXa6 +J8x365m5/VaaA2RrtdPqKxn8VaKy7+T690QgMXVGM4PbzQzQxHuSleklocqlP+sB +jSMXCZY+ng/i4UmRO9noiyW3UThYh0hIdMYs12EmmI9cnF/OuYZpl30fmqwV+VNM +td5B2fYvAvvsjiX60SFCn3DATP1GrPMBlZSmhhP3GYS+xrWt3Xxta9qIX2BEF1Gg +twnZZRjoULSRFUYPfJPEOfEH2UQwm84wxx/GezVE+S/RpBlatPOgCiLnNNaLfdTC +mTG9qY9elJv3GGQO8Lqgf4i8blExs05lSPk1BDhzTB6H9TLz+Ge0/l1QxKf3gPXU +aImK1azieXMXHECkdKxrzmehwu1dZ/oYOLc/OFQCETwSRoLPFOFpYUpizwmVVHR6 +uLSfRptte4ZOU3zHfpd/0+J4tkwHwEkGzsmMdqudlm7qME6upuIplyVBH8JiXzUK +n1RIH/OPmVEluAnexWRLZNdk7MrakIO4XACVbICENiYQgAIErP568An6twWEGDbZ +bEN64E3cVDTDRPRAunIhhsEaapcxpFEPWlHorxv36nMUt0R0h0bJlCu5QdzckfcX +ZrRuu1kl76ZfbSE8T0G4/rBb9gsU4Gn3WyvLIO3MgFBuxR68ZwcR8LpEUd8qp38H +NG4cxPmN1nGKo663Z+xI2Gt5up4gpl+fOt4mXqxY386rB7yHaOfElMG5TUYdrS9w +1xbbCVgeJ6zxX+NFlndG33cSAPprhw+C18eUu6ZU63WZcYFo3GfK6rs3lvYtofvE +8DxztdTidQedNVNE+63YCjhxd/cZUI5n/UpgYkr9owp7hNGJiR3tdoNLR2gcoGqL +qWhH928k2aSgF2j97LZ2OqoPCp0tUB7ho4jD2u4Ik3GLVNlCc3dCvWRvpHtDTQDv +tujESMfHUc9I2r4S/PD3bku/ABGwa977Yp1PjzJGr9RajA5is5n6GVpyynwjtKG4 +iyyITpdwpCgr8pueTBLwZnas3slmiMOog/E4PmPgctHzvC+vhQijhUtw5zSsmv0l +bZlw/mVhp5Ta7dTcLBKR8DA3m3vTbaEGkz0xpfQr7GfiSMRbJyvIw88pDK0gyTMD diff --git a/packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py b/packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py new file mode 100644 index 000000000000..c148b9dc2370 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py @@ -0,0 +1,64 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper to determine package from tag. +Get the current package directory corresponding to the Circle Tag. +""" + +from __future__ import print_function + +import os +import re +import sys + + +TAG_RE = re.compile(r""" + ^ + (?P + (([a-z]+)[_-])*) # pkg-name-with-hyphens-or-underscores (empty allowed) + ([0-9]+)\.([0-9]+)\.([0-9]+) # Version x.y.z (x, y, z all ints) + $ +""", re.VERBOSE) +TAG_ENV = 'CIRCLE_TAG' +ERROR_MSG = '%s env. var. not set' % (TAG_ENV,) +BAD_TAG_MSG = 'Invalid tag name: %s. Expected pkg-name-x.y.z' +CIRCLE_CI_SCRIPTS_DIR = os.path.dirname(__file__) +ROOT_DIR = os.path.realpath( + os.path.join(CIRCLE_CI_SCRIPTS_DIR, '..', '..', '..')) + + +def main(): + """Get the current package directory. + Prints the package directory out so callers can consume it. + """ + if TAG_ENV not in os.environ: + print(ERROR_MSG, file=sys.stderr) + sys.exit(1) + + tag_name = os.environ[TAG_ENV] + match = TAG_RE.match(tag_name) + if match is None: + print(BAD_TAG_MSG % (tag_name,), file=sys.stderr) + sys.exit(1) + + pkg_name = match.group('pkg') + if pkg_name is None: + print(ROOT_DIR) + else: + pkg_dir = pkg_name.rstrip('-').replace('-', '_') + print(os.path.join(ROOT_DIR, pkg_dir)) + + +if __name__ == '__main__': + main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh b/packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh new file mode 100755 index 000000000000..23a4738e90b9 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ev + +# If this is not a CircleCI tag, no-op. +if [[ -z "$CIRCLE_TAG" ]]; then + echo "This is not a release tag. Doing nothing." + exit 0 +fi + +# H/T: http://stackoverflow.com/a/246128/1068170 +SCRIPT="$(dirname "${BASH_SOURCE[0]}")/get_tagged_package.py" +# Determine the package directory being deploying on this tag. +PKG_DIR="$(python ${SCRIPT})" + +# Ensure that we have the latest versions of Twine, Wheel, and Setuptools. +python3 -m pip install --upgrade twine wheel setuptools + +# Move into the package, build the distribution and upload. +cd ${PKG_DIR} +python3 setup.py sdist bdist_wheel +twine upload dist/* diff --git a/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py b/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py new file mode 100644 index 000000000000..1d51830cc23a --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py @@ -0,0 +1,268 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Print a list of packages which require testing.""" + +import os +import re +import subprocess +import warnings + + +CURRENT_DIR = os.path.realpath(os.path.dirname(__file__)) +BASE_DIR = os.path.realpath(os.path.join(CURRENT_DIR, '..', '..')) +GITHUB_REPO = os.environ.get('GITHUB_REPO', 'google-cloud-python') +CI = os.environ.get('CI', '') +CI_BRANCH = os.environ.get('CIRCLE_BRANCH') +CI_PR = os.environ.get('CIRCLE_PR_NUMBER') +CIRCLE_TAG = os.environ.get('CIRCLE_TAG') +head_hash, head_name = subprocess.check_output(['git', 'show-ref', 'HEAD'] +).strip().decode('ascii').split() +rev_parse = subprocess.check_output( + ['git', 'rev-parse', '--abbrev-ref', 'HEAD'] +).strip().decode('ascii') +MAJOR_DIV = '#' * 78 +MINOR_DIV = '#' + '-' * 77 + +# NOTE: This reg-ex is copied from ``get_tagged_packages``. +TAG_RE = re.compile(r""" + ^ + (?P + (([a-z]+)-)*) # pkg-name-with-hyphens- (empty allowed) + ([0-9]+)\.([0-9]+)\.([0-9]+) # Version x.y.z (x, y, z all ints) + $ +""", re.VERBOSE) + +# This is the current set of dependencies by package. +# As of this writing, the only "real" dependency is that of error_reporting +# (on logging), the rest are just system test dependencies. +PKG_DEPENDENCIES = { + 'logging': {'pubsub'}, +} + + +def get_baseline(): + """Return the baseline commit. + + On a pull request, or on a branch, return the common parent revision + with the master branch. + + Locally, return a value pulled from environment variables, or None if + the environment variables are not set. + + On a push to master, return None. This will effectively cause everything + to be considered to be affected. + """ + + # If this is a pull request or branch, return the tip for master. + # We will test only packages which have changed since that point. + ci_non_master = (CI == 'true') and any([CI_BRANCH != 'master', CI_PR]) + + if ci_non_master: + + repo_url = 'git@github.com:GoogleCloudPlatform/{}'.format(GITHUB_REPO) + subprocess.run(['git', 'remote', 'add', 'baseline', repo_url], + stderr=subprocess.DEVNULL) + subprocess.run(['git', 'pull', 'baseline'], stderr=subprocess.DEVNULL) + + if CI_PR is None and CI_BRANCH is not None: + output = subprocess.check_output([ + 'git', 'merge-base', '--fork-point', + 'baseline/master', CI_BRANCH]) + return output.strip().decode('ascii') + + return 'baseline/master' + + # If environment variables are set identifying what the master tip is, + # use that. + if os.environ.get('GOOGLE_CLOUD_TESTING_REMOTE', ''): + remote = os.environ['GOOGLE_CLOUD_TESTING_REMOTE'] + branch = os.environ.get('GOOGLE_CLOUD_TESTING_BRANCH', 'master') + return '%s/%s' % (remote, branch) + + # If we are not in CI and we got this far, issue a warning. + if not CI: + warnings.warn('No baseline could be determined; this means tests ' + 'will run for every package. If this is local ' + 'development, set the $GOOGLE_CLOUD_TESTING_REMOTE ' + 'environment variable.') + + # That is all we can do; return None. + return None + + +def get_changed_files(): + """Return a list of files that have been changed since the baseline. + + If there is no base, return None. + """ + # Get the baseline, and fail quickly if there is no baseline. + baseline = get_baseline() + print('# Baseline commit: {}'.format(baseline)) + if not baseline: + return None + + # Return a list of altered files. + try: + return subprocess.check_output([ + 'git', 'diff', '--name-only', '{}..HEAD'.format(baseline), + ], stderr=subprocess.DEVNULL).decode('utf8').strip().split('\n') + except subprocess.CalledProcessError: + warnings.warn('Unable to perform git diff; falling back to assuming ' + 'all packages have changed.') + return None + + +def reverse_map(dict_of_sets): + """Reverse a map of one-to-many. + + So the map:: + + { + 'A': {'B', 'C'}, + 'B': {'C'}, + } + + becomes + + { + 'B': {'A'}, + 'C': {'A', 'B'}, + } + + Args: + dict_of_sets (dict[set]): A dictionary of sets, mapping + one value to many. + + Returns: + dict[set]: The reversed map. + """ + result = {} + for key, values in dict_of_sets.items(): + for value in values: + result.setdefault(value, set()).add(key) + + return result + +def get_changed_packages(file_list): + """Return a list of changed packages based on the provided file list. + + If the file list is None, then all packages should be considered to be + altered. + """ + # Determine a complete list of packages. + all_packages = set() + for file_ in os.listdir(BASE_DIR): + abs_file = os.path.realpath(os.path.join(BASE_DIR, file_)) + nox_file = os.path.join(abs_file, 'nox.py') + if os.path.isdir(abs_file) and os.path.isfile(nox_file): + all_packages.add(file_) + + # If ther is no file list, send down the full package set. + if file_list is None: + return all_packages + + # Create a set based on the list of changed files. + answer = set() + reverse_deps = reverse_map(PKG_DEPENDENCIES) + for file_ in file_list: + # Ignore root directory changes (setup.py, .gitignore, etc.). + if os.path.sep not in file_: + continue + + # Ignore changes that are not in a package (usually this will be docs). + package = file_.split(os.path.sep, 1)[0] + if package not in all_packages: + continue + + # If there is a change in core, short-circuit now and return + # everything. + if package in ('core',): + return all_packages + + # Add the package, as well as any dependencies this package has. + # NOTE: For now, dependencies only go down one level. + answer.add(package) + answer = answer.union(reverse_deps.get(package, set())) + + # We got this far without being short-circuited; return the final answer. + return answer + + +def get_tagged_package(): + """Return the package corresponding to the current tag. + + If there is not tag, will return :data:`None`. + """ + if CIRCLE_TAG is None: + return + + match = TAG_RE.match(CIRCLE_TAG) + if match is None: + return + + pkg_name = match.group('pkg') + if pkg_name == '': + # NOTE: This corresponds to the "umbrella" tag. + return + + return pkg_name.rstrip('-').replace('-', '_') + + +def get_target_packages(): + """Return a list of target packages to be run in the current build. + + If in a tag build, will run only the package(s) that are tagged, otherwise + will run the packages that have file changes in them (or packages that + depend on those). + """ + tagged_package = get_tagged_package() + if tagged_package is None: + file_list = get_changed_files() + print(MAJOR_DIV) + print('# Changed files:') + print(MINOR_DIV) + for file_ in file_list or (): + print('# {}'.format(file_)) + for package in sorted(get_changed_packages(file_list)): + yield package + else: + yield tagged_package + + +def main(): + print(MAJOR_DIV) + print('# Environment') + print(MINOR_DIV) + print('# CircleCI: {}'.format(CI)) + print('# CircleCI branch: {}'.format(CI_BRANCH)) + print('# CircleCI pr: {}'.format(CI_PR)) + print('# CircleCI tag: {}'.format(CIRCLE_TAG)) + print('# HEAD ref: {}'.format(head_hash)) + print('# {}'.format(head_name)) + print('# Git branch: {}'.format(rev_parse)) + print(MAJOR_DIV) + + packages = list(get_target_packages()) + + print(MAJOR_DIV) + print('# Target packages:') + print(MINOR_DIV) + for package in packages: + print(package) + print(MAJOR_DIV) + + +if __name__ == '__main__': + main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py b/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py new file mode 100644 index 000000000000..27d3a0c940ea --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py @@ -0,0 +1,98 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Print a list of packages which require testing.""" + +import pathlib +import subprocess + +import ci_diff_helper +import requests + + +def print_environment(environment): + print("-> CI environment:") + print('Branch', environment.branch) + print('PR', environment.pr) + print('In PR', environment.in_pr) + print('Repo URL', environment.repo_url) + if environment.in_pr: + print('PR Base', environment.base) + + +def get_base(environment): + if environment.in_pr: + return environment.base + else: + # If we're not in a PR, just calculate the changes between this commit + # and its parent. + return 'HEAD~1' + + +def get_changed_files_from_base(base): + return subprocess.check_output([ + 'git', 'diff', '--name-only', f'{base}..HEAD', + ], stderr=subprocess.DEVNULL).decode('utf8').strip().split('\n') + + +_URL_TEMPLATE = ( + 'https://api.github.com/repos/googleapis/google-cloud-python/pulls/' + '{}/files' +) + + +def get_changed_files_from_pr(pr): + url = _URL_TEMPLATE.format(pr) + while url is not None: + response = requests.get(url) + for info in response.json(): + yield info['filename'] + url = response.links.get('next', {}).get('url') + + +def determine_changed_packages(changed_files): + packages = [ + path.parent for path in pathlib.Path('.').glob('*/noxfile.py') + ] + + changed_packages = set() + for file in changed_files: + file = pathlib.Path(file) + for package in packages: + if package in file.parents: + changed_packages.add(package) + + return changed_packages + + +def main(): + environment = ci_diff_helper.get_config() + print_environment(environment) + base = get_base(environment) + + if environment.in_pr: + changed_files = list(get_changed_files_from_pr(environment.pr)) + else: + changed_files = get_changed_files_from_base(base) + + packages = determine_changed_packages(changed_files) + + print(f"Comparing against {base}.") + print("-> Changed packages:") + + for package in packages: + print(package) + + +main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py b/packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py new file mode 100644 index 000000000000..287b08640691 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py @@ -0,0 +1,199 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run system tests locally with the emulator. + +First makes system calls to spawn the emulator and get the local environment +variable needed for it. Then calls the system tests. +""" + + +import argparse +import os +import subprocess + +import psutil + +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.environment_vars import GCD_DATASET +from google.cloud.environment_vars import GCD_HOST +from google.cloud.environment_vars import PUBSUB_EMULATOR +from run_system_test import run_module_tests + + +BIGTABLE = 'bigtable' +DATASTORE = 'datastore' +PUBSUB = 'pubsub' +PACKAGE_INFO = { + BIGTABLE: (BIGTABLE_EMULATOR,), + DATASTORE: (GCD_DATASET, GCD_HOST), + PUBSUB: (PUBSUB_EMULATOR,), +} +EXTRA = { + DATASTORE: ('--no-legacy',), +} +_DS_READY_LINE = '[datastore] Dev App Server is now running.\n' +_PS_READY_LINE_PREFIX = '[pubsub] INFO: Server started, listening on ' +_BT_READY_LINE_PREFIX = '[bigtable] Cloud Bigtable emulator running on ' + + +def get_parser(): + """Get simple ``argparse`` parser to determine package. + + :rtype: :class:`argparse.ArgumentParser` + :returns: The parser for this script. + """ + parser = argparse.ArgumentParser( + description='Run google-cloud system tests against local emulator.') + parser.add_argument('--package', dest='package', + choices=sorted(PACKAGE_INFO.keys()), + default=DATASTORE, help='Package to be tested.') + return parser + + +def get_start_command(package): + """Get command line arguments for starting emulator. + + :type package: str + :param package: The package to start an emulator for. + + :rtype: tuple + :returns: The arguments to be used, in a tuple. + """ + result = ('gcloud', 'beta', 'emulators', package, 'start') + extra = EXTRA.get(package, ()) + return result + extra + + +def get_env_init_command(package): + """Get command line arguments for getting emulator env. info. + + :type package: str + :param package: The package to get environment info for. + + :rtype: tuple + :returns: The arguments to be used, in a tuple. + """ + result = ('gcloud', 'beta', 'emulators', package, 'env-init') + extra = EXTRA.get(package, ()) + return result + extra + + +def datastore_wait_ready(popen): + """Wait until the datastore emulator is ready to use. + + :type popen: :class:`subprocess.Popen` + :param popen: An open subprocess to interact with. + """ + emulator_ready = False + while not emulator_ready: + emulator_ready = popen.stderr.readline() == _DS_READY_LINE + + +def wait_ready_prefix(popen, prefix): + """Wait until the a process encounters a line with matching prefix. + + :type popen: :class:`subprocess.Popen` + :param popen: An open subprocess to interact with. + + :type prefix: str + :param prefix: The prefix to match + """ + emulator_ready = False + while not emulator_ready: + emulator_ready = popen.stderr.readline().startswith(prefix) + + +def wait_ready(package, popen): + """Wait until the emulator is ready to use. + + :type package: str + :param package: The package to check if ready. + + :type popen: :class:`subprocess.Popen` + :param popen: An open subprocess to interact with. + + :raises: :class:`KeyError` if the ``package`` is not among + ``datastore``, ``pubsub`` or ``bigtable``. + """ + if package == DATASTORE: + datastore_wait_ready(popen) + elif package == PUBSUB: + wait_ready_prefix(popen, _PS_READY_LINE_PREFIX) + elif package == BIGTABLE: + wait_ready_prefix(popen, _BT_READY_LINE_PREFIX) + else: + raise KeyError('Package not supported', package) + + +def cleanup(pid): + """Cleanup a process (including all of its children). + + :type pid: int + :param pid: Process ID. + """ + proc = psutil.Process(pid) + for child_proc in proc.children(recursive=True): + try: + child_proc.kill() + child_proc.terminate() + except psutil.NoSuchProcess: + pass + proc.terminate() + proc.kill() + + +def run_tests_in_emulator(package): + """Spawn an emulator instance and run the system tests. + + :type package: str + :param package: The package to run system tests against. + """ + # Make sure this package has environment vars to replace. + env_vars = PACKAGE_INFO[package] + + start_command = get_start_command(package) + # Ignore stdin and stdout, don't pollute the user's output with them. + proc_start = subprocess.Popen(start_command, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + try: + wait_ready(package, proc_start) + env_init_command = get_env_init_command(package) + proc_env = subprocess.Popen(env_init_command, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + env_status = proc_env.wait() + if env_status != 0: + raise RuntimeError(env_status, proc_env.stderr.read()) + env_lines = proc_env.stdout.read().strip().split('\n') + # Set environment variables before running the system tests. + for env_var in env_vars: + line_prefix = 'export ' + env_var + '=' + value, = [line.split(line_prefix, 1)[1] for line in env_lines + if line.startswith(line_prefix)] + os.environ[env_var] = value + run_module_tests(package, + ignore_requirements=True) + finally: + cleanup(proc_start.pid) + + +def main(): + """Main method to run this script.""" + parser = get_parser() + args = parser.parse_args() + run_tests_in_emulator(args.package) + + +if __name__ == '__main__': + main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh b/packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh new file mode 100755 index 000000000000..8cbab9f0dad0 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -ev + +GH_OWNER='GoogleCloudPlatform' +GH_PROJECT_NAME='google-cloud-python' + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# Function to build the docs. +function build_docs { + rm -rf docs/_build/ + rm -f docs/bigquery/generated/*.rst + # -W -> warnings as errors + # -T -> show full traceback on exception + # -N -> no color + sphinx-build \ + -W -T -N \ + -b html \ + -d docs/_build/doctrees \ + docs/ \ + docs/_build/html/ + return $? +} + +# Only update docs if we are on CircleCI. +if [[ "${CIRCLE_BRANCH}" == "master" ]] && [[ -z "${CIRCLE_PR_NUMBER}" ]]; then + echo "Building new docs on a merged commit." +elif [[ "$1" == "kokoro" ]]; then + echo "Building and publishing docs on Kokoro." +elif [[ -n "${CIRCLE_TAG}" ]]; then + echo "Building new docs on a tag (but will not deploy)." + build_docs + exit $? +else + echo "Not on master nor a release tag." + echo "Building new docs for testing purposes, but not deploying." + build_docs + exit $? +fi + +# Adding GitHub pages branch. `git submodule add` checks it +# out at HEAD. +GH_PAGES_DIR='ghpages' +git submodule add -q -b gh-pages \ + "git@github.com:${GH_OWNER}/${GH_PROJECT_NAME}" ${GH_PAGES_DIR} + +# Determine if we are building a new tag or are building docs +# for master. Then build new docs in docs/_build from master. +if [[ -n "${CIRCLE_TAG}" ]]; then + # Sphinx will use the package version by default. + build_docs +else + SPHINX_RELEASE=$(git log -1 --pretty=%h) build_docs +fi + +# Update gh-pages with the created docs. +cd ${GH_PAGES_DIR} +git rm -fr latest/ +cp -R ../docs/_build/html/ latest/ + +# Update the files push to gh-pages. +git add . +git status + +# If there are no changes, just exit cleanly. +if [[ -z "$(git status --porcelain)" ]]; then + echo "Nothing to commit. Exiting without pushing changes." + exit +fi + +# Commit to gh-pages branch to apply changes. +git config --global user.email "dpebot@google.com" +git config --global user.name "dpebot" +git commit -m "Update docs after merge to master." + +# NOTE: This may fail if two docs updates (on merges to master) +# happen in close proximity. +git push -q origin HEAD:gh-pages diff --git a/packages/google-cloud-bigtable/test_utils/setup.py b/packages/google-cloud-bigtable/test_utils/setup.py new file mode 100644 index 000000000000..8e9222a7f862 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/setup.py @@ -0,0 +1,64 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from setuptools import find_packages +from setuptools import setup + + +PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) + + +# NOTE: This is duplicated throughout and we should try to +# consolidate. +SETUP_BASE = { + 'author': 'Google Cloud Platform', + 'author_email': 'googleapis-publisher@google.com', + 'scripts': [], + 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python', + 'license': 'Apache 2.0', + 'platforms': 'Posix; MacOS X; Windows', + 'include_package_data': True, + 'zip_safe': False, + 'classifiers': [ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Topic :: Internet', + ], +} + + +REQUIREMENTS = [ + 'google-auth >= 0.4.0', + 'six', +] + +setup( + name='google-cloud-testutils', + version='0.24.0', + description='System test utilities for google-cloud-python', + packages=find_packages(), + install_requires=REQUIREMENTS, + python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', + **SETUP_BASE +) diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/__init__.py b/packages/google-cloud-bigtable/test_utils/test_utils/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/imports.py b/packages/google-cloud-bigtable/test_utils/test_utils/imports.py new file mode 100644 index 000000000000..5991af7fc465 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/test_utils/imports.py @@ -0,0 +1,38 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import six + + +def maybe_fail_import(predicate): + """Create and return a patcher that conditionally makes an import fail. + + Args: + predicate (Callable[[...], bool]): A callable that, if it returns `True`, + triggers an `ImportError`. It must accept the same arguments as the + built-in `__import__` function. + https://docs.python.org/3/library/functions.html#__import__ + + Returns: + A mock patcher object that can be used to enable patched import behavior. + """ + orig_import = six.moves.builtins.__import__ + + def custom_import(name, globals=None, locals=None, fromlist=(), level=0): + if predicate(name, globals, locals, fromlist, level): + raise ImportError + return orig_import(name, globals, locals, fromlist, level) + + return mock.patch.object(six.moves.builtins, "__import__", new=custom_import) diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/retry.py b/packages/google-cloud-bigtable/test_utils/test_utils/retry.py new file mode 100644 index 000000000000..e61c001a03e1 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/test_utils/retry.py @@ -0,0 +1,207 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from functools import wraps + +import six + +MAX_TRIES = 4 +DELAY = 1 +BACKOFF = 2 + + +def _retry_all(_): + """Retry all caught exceptions.""" + return True + + +class BackoffFailed(Exception): + """Retry w/ backoffs did not complete successfully.""" + + +class RetryBase(object): + """Base for retrying calling a decorated function w/ exponential backoff. + + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. + + :type delay: int + :param delay: Initial delay between retries in seconds. + + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. + + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + self.max_tries = max_tries + self.delay = delay + self.backoff = backoff + self.logger = logger.warning if logger else six.print_ + + +class RetryErrors(RetryBase): + """Decorator for retrying given exceptions in testing. + + :type exception: Exception or tuple of Exceptions + :param exception: The exception to check or may be a tuple of + exceptions to check. + + :type error_predicate: function, takes caught exception, returns bool + :param error_predicate: Predicate evaluating whether to retry after a + caught exception. + + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. + + :type delay: int + :param delay: Initial delay between retries in seconds. + + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. + + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, exception, error_predicate=_retry_all, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + super(RetryErrors, self).__init__(max_tries, delay, backoff, logger) + self.exception = exception + self.error_predicate = error_predicate + + def __call__(self, to_wrap): + @wraps(to_wrap) + def wrapped_function(*args, **kwargs): + tries = 0 + while tries < self.max_tries: + try: + return to_wrap(*args, **kwargs) + except self.exception as caught_exception: + + if not self.error_predicate(caught_exception): + raise + + delay = self.delay * self.backoff**tries + msg = ("%s, Trying again in %d seconds..." % + (caught_exception, delay)) + self.logger(msg) + + time.sleep(delay) + tries += 1 + return to_wrap(*args, **kwargs) + + return wrapped_function + + +class RetryResult(RetryBase): + """Decorator for retrying based on non-error result. + + :type result_predicate: function, takes result, returns bool + :param result_predicate: Predicate evaluating whether to retry after a + result is returned. + + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. + + :type delay: int + :param delay: Initial delay between retries in seconds. + + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. + + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, result_predicate, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + super(RetryResult, self).__init__(max_tries, delay, backoff, logger) + self.result_predicate = result_predicate + + def __call__(self, to_wrap): + @wraps(to_wrap) + def wrapped_function(*args, **kwargs): + tries = 0 + while tries < self.max_tries: + result = to_wrap(*args, **kwargs) + if self.result_predicate(result): + return result + + delay = self.delay * self.backoff**tries + msg = "%s. Trying again in %d seconds..." % ( + self.result_predicate.__name__, delay,) + self.logger(msg) + + time.sleep(delay) + tries += 1 + raise BackoffFailed() + + return wrapped_function + + +class RetryInstanceState(RetryBase): + """Decorator for retrying based on instance state. + + :type instance_predicate: function, takes instance, returns bool + :param instance_predicate: Predicate evaluating whether to retry after an + API-invoking method is called. + + :type max_tries: int + :param max_tries: Number of times to try (not retry) before giving up. + + :type delay: int + :param delay: Initial delay between retries in seconds. + + :type backoff: int + :param backoff: Backoff multiplier e.g. value of 2 will double the + delay each retry. + + :type logger: logging.Logger instance + :param logger: Logger to use. If None, print. + """ + def __init__(self, instance_predicate, + max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, + logger=None): + super(RetryInstanceState, self).__init__( + max_tries, delay, backoff, logger) + self.instance_predicate = instance_predicate + + def __call__(self, to_wrap): + instance = to_wrap.__self__ # only instance methods allowed + + @wraps(to_wrap) + def wrapped_function(*args, **kwargs): + tries = 0 + while tries < self.max_tries: + result = to_wrap(*args, **kwargs) + if self.instance_predicate(instance): + return result + + delay = self.delay * self.backoff**tries + msg = "%s. Trying again in %d seconds..." % ( + self.instance_predicate.__name__, delay,) + self.logger(msg) + + time.sleep(delay) + tries += 1 + raise BackoffFailed() + + return wrapped_function diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/system.py b/packages/google-cloud-bigtable/test_utils/test_utils/system.py new file mode 100644 index 000000000000..590dc62a06e6 --- /dev/null +++ b/packages/google-cloud-bigtable/test_utils/test_utils/system.py @@ -0,0 +1,81 @@ +# Copyright 2014 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import os +import sys +import time + +import google.auth.credentials +from google.auth.environment_vars import CREDENTIALS as TEST_CREDENTIALS + + +# From shell environ. May be None. +CREDENTIALS = os.getenv(TEST_CREDENTIALS) + +ENVIRON_ERROR_MSG = """\ +To run the system tests, you need to set some environment variables. +Please check the CONTRIBUTING guide for instructions. +""" + + +class EmulatorCreds(google.auth.credentials.Credentials): + """A mock credential object. + + Used to avoid unnecessary token refreshing or reliance on the network + while an emulator is running. + """ + + def __init__(self): # pylint: disable=super-init-not-called + self.token = b'seekrit' + self.expiry = None + + @property + def valid(self): + """Would-be validity check of the credentials. + + Always is :data:`True`. + """ + return True + + def refresh(self, unused_request): # pylint: disable=unused-argument + """Off-limits implementation for abstract method.""" + raise RuntimeError('Should never be refreshed.') + + +def check_environ(): + err_msg = None + if CREDENTIALS is None: + err_msg = '\nMissing variables: ' + TEST_CREDENTIALS + elif not os.path.isfile(CREDENTIALS): + err_msg = '\nThe %s path %r is not a file.' % (TEST_CREDENTIALS, + CREDENTIALS) + + if err_msg is not None: + msg = ENVIRON_ERROR_MSG + err_msg + print(msg, file=sys.stderr) + sys.exit(1) + + +def unique_resource_id(delimiter='_'): + """A unique identifier for a resource. + + Intended to help locate resources created in particular + testing environments and at particular times. + """ + build_id = os.getenv('CIRCLE_BUILD_NUM', '') + if build_id == '': + return '%s%d' % (delimiter, 1000 * time.time()) + else: + return '%s%s%s%d' % (delimiter, build_id, delimiter, time.time()) From 7d8dcf1af1919350bd465f41e03cb735f7e7f78c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 25 Feb 2020 14:27:29 -0500 Subject: [PATCH 301/892] test: drop majyk per-lang-version coverage level (#14) Closes #13. --- packages/google-cloud-bigtable/noxfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 9155915893ef..3bca8a099331 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -79,7 +79,7 @@ def default(session): "--cov-append", "--cov-config=.coveragerc", "--cov-report=", - "--cov-fail-under=97", + "--cov-fail-under=0", os.path.join("tests", "unit"), *session.posargs, ) From f540d8a70ec3db18cbcfddb842d10f0471de884d Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Fri, 1 May 2020 11:00:07 -0700 Subject: [PATCH 302/892] docs: add note about multiprocessing usage (#26) --- packages/google-cloud-bigtable/docs/index.rst | 7 +++++++ packages/google-cloud-bigtable/noxfile.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 8c76f79b80e1..88d8e09ec31f 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -1,5 +1,12 @@ .. include:: README.rst +.. note:: + + Because this client uses :mod:`grpcio` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.Pool` or + :class:`multiprocessing.Process`. Using the API ------------- diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 3bca8a099331..1065894e61e4 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -141,7 +141,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( From 3d2d93830aa28ee52cc79b6a8f84160e0974f3c1 Mon Sep 17 00:00:00 2001 From: Vadym Matsishevskyi <25311427+vam-google@users.noreply.github.com> Date: Fri, 1 May 2020 13:48:16 -0700 Subject: [PATCH 303/892] chore: Migrate python-bigtable synth.py from artman to bazel (#21) --- packages/google-cloud-bigtable/synth.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 32ebc4af2eb1..22499ee05040 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -17,17 +17,16 @@ import synthtool as s from synthtool import gcp -gapic = gcp.GAPICGenerator() +gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() # ---------------------------------------------------------------------------- # Generate bigtable and bigtable_admin GAPIC layer # ---------------------------------------------------------------------------- library = gapic.py_library( - "bigtable", - "v2", - config_path="/google/bigtable/artman_bigtable.yaml", - artman_output_name="bigtable-v2", + service="bigtable", + version="v2", + bazel_target="//google/bigtable/v2:bigtable-v2-py", include_protos=True, ) @@ -36,10 +35,9 @@ # Generate admin client library = gapic.py_library( - "bigtable_admin", - "v2", - config_path="/google/bigtable/admin/artman_bigtableadmin.yaml", - artman_output_name="bigtable-admin-v2", + service="bigtable_admin", + version="v2", + bazel_target="//google/bigtable/admin/v2:bigtable-admin-v2-py", include_protos=True, ) From 59d2b9c73f9f0d6d27fa20c5ea26af6b5d1783b8 Mon Sep 17 00:00:00 2001 From: Cameron Zahedi Date: Mon, 11 May 2020 16:29:29 -0600 Subject: [PATCH 304/892] feat: Create CODEOWNERS (#27) * Create CODEOWNERS Adding owner team * fix: Point to correct org --- packages/google-cloud-bigtable/.github/CODEOWNERS | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 packages/google-cloud-bigtable/.github/CODEOWNERS diff --git a/packages/google-cloud-bigtable/.github/CODEOWNERS b/packages/google-cloud-bigtable/.github/CODEOWNERS new file mode 100644 index 000000000000..e43d91c0ba58 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/CODEOWNERS @@ -0,0 +1,10 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. +# +# For syntax help see: +# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax + + +# The bigtable-dpe team is the default owner for anything not +# explicitly taken by someone else. +* @googleapis/bigtable-dpe From fbdccbe4f597a024fa11f8be2137e0c34b207d89 Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Wed, 20 May 2020 22:56:27 +0530 Subject: [PATCH 305/892] feat(bigtable): skip system tests failing with emulator (#18) * feat(bigtable): skip system test failing with emulator * feat(bigtable): nit * feat(bigtable): nit --- packages/google-cloud-bigtable/tests/system.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index e9e3ab79179e..dd77dd9362b0 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -657,6 +657,13 @@ def tearDown(self): for table in self.tables_to_delete: table.delete() + def _skip_if_emulated(self, message): + # NOTE: This method is necessary because ``Config.IN_EMULATOR`` + # is set at runtime rather than import time, which means we + # can't use the @unittest.skipIf decorator. + if Config.IN_EMULATOR: + self.skipTest(message) + def test_list_tables(self): # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the # table created in `setUpClass` here will be the only one. @@ -691,6 +698,7 @@ def test_create_table(self): self.assertEqual(sorted_tables, expected_tables) def test_test_iam_permissions(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") temp_table_id = "test-test-iam-policy-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -701,6 +709,7 @@ def test_test_iam_permissions(self): self.assertEqual(permissions, permissions_allowed) def test_get_iam_policy(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") temp_table_id = "test-get-iam-policy-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -711,6 +720,7 @@ def test_get_iam_policy(self): self.assertEqual(policy["version"], 0) def test_set_iam_policy(self): + self._skip_if_emulated("Method not implemented in bigtable emulator") temp_table_id = "test-set-iam-policy-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -742,6 +752,7 @@ def test_create_table_with_families(self): self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) def test_create_table_with_split_keys(self): + self._skip_if_emulated("Split keys are not supported by Bigtable emulator") temp_table_id = "foo-bar-baz-split-table" initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] temp_table = Config.INSTANCE_DATA.table(temp_table_id) @@ -1014,6 +1025,9 @@ def test_yield_rows_with_row_set(self): self.assertEqual(found_row_keys, expected_row_keys) def test_read_large_cell_limit(self): + self._maybe_emulator_skip( + "Maximum gRPC received message size for emulator is 4194304 bytes." + ) row = self._table.row(ROW_KEY) self.rows_to_delete.append(row) From 59c0a07dfae700d071163d640c5c1e813287f5dc Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Tue, 26 May 2020 19:47:10 +0530 Subject: [PATCH 306/892] docs(bigtable): clean up (#32) --- .../google/cloud/bigtable/cluster.py | 12 ------------ .../google/cloud/bigtable/instance.py | 3 --- .../google/cloud/bigtable/table.py | 9 +++------ 3 files changed, 3 insertions(+), 21 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 5ff1d0404b94..edb5d261bfae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -286,18 +286,6 @@ def update(self): before calling :meth:`update`. - :type location: :str:``CreationOnly`` - :param location: The location where this cluster's nodes and storage - reside. For best performance, clients should be located as - close as possible to this cluster. Currently only zones are - supported, so values should be of the form - ``projects//locations/``. - - :type serve_nodes: :int - :param serve_nodes: The number of nodes allocated to this cluster. - More nodes enable higher throughput and more consistent - performance. - :rtype: :class:`Operation` :returns: The long-running operation corresponding to the update operation. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index dbdd20640918..e0a30590bbca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -537,9 +537,6 @@ def cluster( :type cluster_id: str :param cluster_id: The ID of the cluster. - :type instance: :class:`~google.cloud.bigtable.instance.Instance` - :param instance: The instance where the cluster resides. - :type location_id: str :param location_id: (Creation Only) The location where this cluster's nodes and storage reside. For best performance, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 69379b21d57e..4852ff6e1e98 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -359,7 +359,7 @@ def create(self, initial_split_keys=[], column_families={}): into several tablets. :type column_families: dict - :param column_failies: (Optional) A map columns to create. The key is + :param column_families: (Optional) A map columns to create. The key is the column_id str and the value is a :class:`GarbageCollectionRule` """ @@ -734,8 +734,8 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): :start-after: [START bigtable_drop_by_prefix] :end-before: [END bigtable_drop_by_prefix] - :type row_prefix: bytes - :param row_prefix: Delete all rows that start with this row key + :type row_key_prefix: bytes + :param row_key_prefix: Delete all rows that start with this row key prefix. Prefix cannot be zero length. :type timeout: float @@ -768,9 +768,6 @@ def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES :start-after: [START bigtable_mutations_batcher] :end-before: [END bigtable_mutations_batcher] - :type table: class - :param table: class:`~google.cloud.bigtable.table.Table`. - :type flush_count: int :param flush_count: (Optional) Maximum number of rows per batch. If it reaches the max number of rows it calls finish_batch() to From ab589eb09e3dd66c45cbbb6fddb750ce4607ccf4 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Thu, 4 Jun 2020 13:21:10 -0700 Subject: [PATCH 307/892] docs: add samples from bigtable (#38) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add Bigtable hello world sample. [(#371)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/371) * Add Bigtable hello world sample. * Update secrets for Bigtable tests. * Add region tags to bigtable/hello sample. Also, change the sample to use sequential keys (with a disclaimer) to match the Java sample. I had forgotten to add a sample usage to get a specific row, so add that, too. * Close HappyBase connection in bigtable/hello sample. I noticed that the `bigtable/hello` sample was not quitting when I sent a `Ctrl-C` this should fix that problem. * bigtable: Move hello to hello_happybase. [(#383)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/383) * bigtable: Move hello to hello_happybase. [(#383)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/383) * bigtable: add raw gcloud-python hello sample. This sample uses the "raw" [gcloud-python Cloud Bigtable package](https://googlecloudplatform.github.io/gcloud-python/stable/bigtable-usage.html). * bigtable: add raw gcloud-python hello sample. This sample uses the "raw" [gcloud-python Cloud Bigtable package](https://googlecloudplatform.github.io/gcloud-python/stable/bigtable-usage.html). * Update Bigtable samples to v2. Table of Contents generated with: doctoc --title '**Table of Contents**' bigtable Needs to wait for next gcloud-python launch. Tested locally with a previous version of grpcio. * Update Bigtable samples to v2. Table of Contents generated with: doctoc --title '**Table of Contents**' bigtable Needs to wait for next gcloud-python launch. Tested locally with a previous version of grpcio. * Auto-update dependencies. [(#456)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/456) * Auto-update dependencies. [(#456)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/456) * Remove grpc-python3 hackiness Change-Id: I6bf9a8acb9ba7d067b3095b5857094cbc322ff58 * Remove grpc-python3 hackiness Change-Id: I6bf9a8acb9ba7d067b3095b5857094cbc322ff58 * Auto-update dependencies. [(#540)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/540) * Auto-update dependencies. [(#540)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/540) * Auto-update dependencies. [(#542)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/542) * Auto-update dependencies. [(#542)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/542) * Move to google-cloud [(#544)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/544) * Move to google-cloud [(#544)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/544) * Fix link to bigtable happybase package. [(#576)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/576) It moved to a new repo. * Generate readmes for most service samples [(#599)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/599) * Generate readmes for most service samples [(#599)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/599) * Update samples to support latest Google Cloud Python [(#656)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/656) * Fix bigtable tests Change-Id: I49b68394ccd5133a64e019e91d1ec0529ffd64b3 * Fix bigtable tests Change-Id: I49b68394ccd5133a64e019e91d1ec0529ffd64b3 * Auto-update dependencies. [(#715)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/715) * Auto-update dependencies. [(#715)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/715) * Auto-update dependencies. [(#781)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/781) * Auto-update dependencies. [(#781)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/781) * Remove cloud config fixture [(#887)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/887) * Remove cloud config fixture * Fix client secrets * Fix bigtable instance * Remove cloud config fixture [(#887)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/887) * Remove cloud config fixture * Fix client secrets * Fix bigtable instance * Auto-update dependencies. [(#914)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/914) * Auto-update dependencies. * xfail the error reporting test * Fix lint * Auto-update dependencies. [(#914)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/914) * Auto-update dependencies. * xfail the error reporting test * Fix lint * Re-generate all readmes * Re-generate all readmes * Auto-update dependencies. [(#922)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/922) * Auto-update dependencies. * Fix pubsub iam samples * Auto-update dependencies. [(#922)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/922) * Auto-update dependencies. * Fix pubsub iam samples * Fix README rst links [(#962)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/962) * Fix README rst links * Update all READMEs * Fix README rst links [(#962)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/962) * Fix README rst links * Update all READMEs * Auto-update dependencies. [(#1004)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1004) * Auto-update dependencies. * Fix natural language samples * Fix pubsub iam samples * Fix language samples * Fix bigquery samples * Auto-update dependencies. [(#1004)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1004) * Auto-update dependencies. * Fix natural language samples * Fix pubsub iam samples * Fix language samples * Fix bigquery samples * Update Bigtable Programmatic Scaling Example [(#1003)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1003) * Update Bigtable Programmatic Scaling Example * Rename "autoscaling" to "metricscaler" and the the term "programmatic scaling" * Remove `strategies.py` to simplify example * Fix wrong sleep length bug * Add maximum node count * hegemonic review * Auto-update dependencies. [(#1005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1005) * Auto-update dependencies. * Fix bigtable lint * Fix IOT iam interaction * Auto-update dependencies. [(#1005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1005) * Auto-update dependencies. * Fix bigtable lint * Fix IOT iam interaction * Auto-update dependencies. [(#1028)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1028) * Auto-update dependencies. [(#1055)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1055) * Auto-update dependencies. * Explicitly use latest bigtable client Change-Id: Id71e9e768f020730e4ca9514a0d7ebaa794e7d9e * Revert language update for now Change-Id: I8867f154e9a5aae00d0047c9caf880e5e8f50c53 * Remove pdb. smh Change-Id: I5ff905fadc026eebbcd45512d4e76e003e3b2b43 * Auto-update dependencies. [(#1055)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1055) * Auto-update dependencies. * Explicitly use latest bigtable client Change-Id: Id71e9e768f020730e4ca9514a0d7ebaa794e7d9e * Revert language update for now Change-Id: I8867f154e9a5aae00d0047c9caf880e5e8f50c53 * Remove pdb. smh Change-Id: I5ff905fadc026eebbcd45512d4e76e003e3b2b43 * Auto-update dependencies. [(#1055)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1055) * Auto-update dependencies. * Explicitly use latest bigtable client Change-Id: Id71e9e768f020730e4ca9514a0d7ebaa794e7d9e * Revert language update for now Change-Id: I8867f154e9a5aae00d0047c9caf880e5e8f50c53 * Remove pdb. smh Change-Id: I5ff905fadc026eebbcd45512d4e76e003e3b2b43 * Auto-update dependencies. [(#1057)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1057) * Auto-update dependencies. [(#1093)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1093) * Auto-update dependencies. * Fix storage notification poll sample Change-Id: I6afbc79d15e050531555e4c8e51066996717a0f3 * Fix spanner samples Change-Id: I40069222c60d57e8f3d3878167591af9130895cb * Drop coverage because it's not useful Change-Id: Iae399a7083d7866c3c7b9162d0de244fbff8b522 * Try again to fix flaky logging test Change-Id: I6225c074701970c17c426677ef1935bb6d7e36b4 * Auto-update dependencies. [(#1093)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1093) * Auto-update dependencies. * Fix storage notification poll sample Change-Id: I6afbc79d15e050531555e4c8e51066996717a0f3 * Fix spanner samples Change-Id: I40069222c60d57e8f3d3878167591af9130895cb * Drop coverage because it's not useful Change-Id: Iae399a7083d7866c3c7b9162d0de244fbff8b522 * Try again to fix flaky logging test Change-Id: I6225c074701970c17c426677ef1935bb6d7e36b4 * Auto-update dependencies. [(#1093)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1093) * Auto-update dependencies. * Fix storage notification poll sample Change-Id: I6afbc79d15e050531555e4c8e51066996717a0f3 * Fix spanner samples Change-Id: I40069222c60d57e8f3d3878167591af9130895cb * Drop coverage because it's not useful Change-Id: Iae399a7083d7866c3c7b9162d0de244fbff8b522 * Try again to fix flaky logging test Change-Id: I6225c074701970c17c426677ef1935bb6d7e36b4 * Auto-update dependencies. [(#1094)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1094) * Auto-update dependencies. * Relax assertions in the ocr_nl sample Change-Id: I6d37e5846a8d6dd52429cb30d501f448c52cbba1 * Drop unused logging apiary samples Change-Id: I545718283773cb729a5e0def8a76ebfa40829d51 * Auto-update dependencies. [(#1094)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1094) * Auto-update dependencies. * Relax assertions in the ocr_nl sample Change-Id: I6d37e5846a8d6dd52429cb30d501f448c52cbba1 * Drop unused logging apiary samples Change-Id: I545718283773cb729a5e0def8a76ebfa40829d51 * Update all generated readme auth instructions [(#1121)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1121) Change-Id: I03b5eaef8b17ac3dc3c0339fd2c7447bd3e11bd2 * Update all generated readme auth instructions [(#1121)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1121) Change-Id: I03b5eaef8b17ac3dc3c0339fd2c7447bd3e11bd2 * Update all generated readme auth instructions [(#1121)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1121) Change-Id: I03b5eaef8b17ac3dc3c0339fd2c7447bd3e11bd2 * Bigtable autoscaler: use size variable [(#1156)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1156) * Added Link to Python Setup Guide [(#1158)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1158) * Update Readme.rst to add Python setup guide As requested in b/64770713. This sample is linked in documentation https://cloud.google.com/bigtable/docs/scaling, and it would make more sense to update the guide here than in the documentation. * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update install_deps.tmpl.rst * Updated readmegen scripts and re-generated related README files * Fixed the lint error * Added Link to Python Setup Guide [(#1158)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1158) * Update Readme.rst to add Python setup guide As requested in b/64770713. This sample is linked in documentation https://cloud.google.com/bigtable/docs/scaling, and it would make more sense to update the guide here than in the documentation. * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update install_deps.tmpl.rst * Updated readmegen scripts and re-generated related README files * Fixed the lint error * Added Link to Python Setup Guide [(#1158)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1158) * Update Readme.rst to add Python setup guide As requested in b/64770713. This sample is linked in documentation https://cloud.google.com/bigtable/docs/scaling, and it would make more sense to update the guide here than in the documentation. * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update README.rst * Update install_deps.tmpl.rst * Updated readmegen scripts and re-generated related README files * Fixed the lint error * Auto-update dependencies. [(#1186)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1186) * Auto-update dependencies. [(#1186)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1186) * Auto-update dependencies. [(#1186)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1186) * Auto-update dependencies. [(#1199)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1199) * Auto-update dependencies. * Fix iot lint Change-Id: I6289e093bdb35e38f9e9bfc3fbc3df3660f9a67e * Auto-update dependencies. [(#1199)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1199) * Auto-update dependencies. * Fix iot lint Change-Id: I6289e093bdb35e38f9e9bfc3fbc3df3660f9a67e * Auto-update dependencies. [(#1199)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1199) * Auto-update dependencies. * Fix iot lint Change-Id: I6289e093bdb35e38f9e9bfc3fbc3df3660f9a67e * Added "Open in Cloud Shell" buttons to README files [(#1254)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1254) * Added "Open in Cloud Shell" buttons to README files [(#1254)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1254) * Added "Open in Cloud Shell" buttons to README files [(#1254)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1254) * Auto-update dependencies. [(#1377)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1377) * Auto-update dependencies. * Update requirements.txt * Auto-update dependencies. [(#1377)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1377) * Auto-update dependencies. * Update requirements.txt * Auto-update dependencies. [(#1377)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1377) * Auto-update dependencies. * Update requirements.txt * Regenerate the README files and fix the Open in Cloud Shell link for some samples [(#1441)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1441) * Regenerate the README files and fix the Open in Cloud Shell link for some samples [(#1441)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1441) * Regenerate the README files and fix the Open in Cloud Shell link for some samples [(#1441)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1441) * Update READMEs to fix numbering and add git clone [(#1464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1464) * Update READMEs to fix numbering and add git clone [(#1464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1464) * Update READMEs to fix numbering and add git clone [(#1464)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1464) * Add Bigtable table admin sample [(#1549)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1549) * Update tableadmin [(#1562)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1562) Fixes #1555 * Cloud Bigtable Quickstarts [(#1616)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1616) * quickstart * quickstart happybase * linting and making tests workˆ * Tidying up * Trigger * Fixes for Python3 * Showing default values for the quickstart functions * Fix lint issue with indentation * Cloud Bigtable Quickstarts [(#1616)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1616) * quickstart * quickstart happybase * linting and making tests workˆ * Tidying up * Trigger * Fixes for Python3 * Showing default values for the quickstart functions * Fix lint issue with indentation * Bigtable: Update tableadmin sample to point to latest release. [(#1665)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1665) * Update tableadmin sample to point to latest release. * update tableadmin * Bigtable: update helloworld example [(#1670)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1670) * Update helloworld example * Use iterable PartialRowsData * Bigtable: Create Instanceadmin sample [(#1664)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1664) * Python instanceadmin sample instanceadmin python sample * Updated instanceadmin.py * modify instanceadmin as per comments * Update instanceadmin.py as per the local review comments. * Update instanceadmin * update instanceadmin, to fix ci failures. * update instanceadmin * update instanceadmin * Implement review comments * Upgrading the metrics query to the latest version [(#1674)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1674) * Upgrading the metrics query to the latest version * fix lint issues * Importing module not class * Fixed print statements [(#1755)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1755) * Updated trampoline script to match latest version that cleans up files * Added newline to end of trampoline script * A quickstart test was missing requirements.txt * Replaced print statements with print function calls * Missed a print issue last time * Bad indent fixed * Fixed print statements [(#1755)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1755) * Updated trampoline script to match latest version that cleans up files * Added newline to end of trampoline script * A quickstart test was missing requirements.txt * Replaced print statements with print function calls * Missed a print issue last time * Bad indent fixed * Fixed print statements [(#1755)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1755) * Updated trampoline script to match latest version that cleans up files * Added newline to end of trampoline script * A quickstart test was missing requirements.txt * Replaced print statements with print function calls * Missed a print issue last time * Bad indent fixed * Making bigtable tests run successfully [(#1764)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1764) * Making bigtable tests run successfully * Fixed missing import * Renamed noxfile for new environment * Moving the nox name back * Making bigtable tests run successfully [(#1764)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1764) * Making bigtable tests run successfully * Fixed missing import * Renamed noxfile for new environment * Moving the nox name back * Added Bu Sun's updates, fixed some lint errors [(#1770)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1770) * Added Bu Sun's updates, fixed some lint errors * Changes to use new nox version * Minor formatting to force a presubmit check to run * Ignore noxfile.py for tests * Clean up layout for lint * updating to latest happy base client version [(#1794)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1794) * Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. * Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. * Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. * Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. * Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. * Auto-update dependencies. [(#1846)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1846) ACK, merging. * Bigtable: add filter region tag to hello world [(#1878)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1878) * use row.cell rather than row.cell_value in the example add 'filter' and 'dependencies' region tags * move the comment line * [bigtable] Clean up quickstart comments and vars [(#1890)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1890) Clean up comments and variable names as this quickstart will be sourced directly into our quickstart docs. * Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt * Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt * Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt * Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt * Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt * Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt * Auto-update dependencies. [(#1980)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/1980) * Auto-update dependencies. * Update requirements.txt * Update requirements.txt * New library version to address failure. [(#2057)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2057) * New library version to address failure. * Encoded strings for library call * Give changes a bit longer to finish * fix lint error * Update main.py * Paren was missing * New library version to address failure. [(#2057)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2057) * New library version to address failure. * Encoded strings for library call * Give changes a bit longer to finish * fix lint error * Update main.py * Paren was missing * remove broken test config [(#2054)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2054) * remove broken test config [(#2054)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2054) * Cloud Bigtable Region tag consistency [(#2018)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2018) * Updating the region tags to be consistent across Cloud Bigtable. Need to figure out filtering for happybase or rename * Remove happybase filter * Linting * Cloud Bigtable Region tag consistency [(#2018)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2018) * Updating the region tags to be consistent across Cloud Bigtable. Need to figure out filtering for happybase or rename * Remove happybase filter * Linting * Deflake bigtable and spanner tests. [(#2224)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2224) * Spanner doesn't actually promise the order of the results, so make the assertion work regardless of ordering. * Bigtable might need some more time to scale, so retry the assertion up to 10 times. * Cloud Bigtable writes samples [(#2201)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2201) * Bigtable write samples * Cleaning up test * Fixing lint issues * Fixing imports in test * Cleaning up samples and showing error handling * removing note about the row commit bug * Add fixture to write test * Use test fixtures to create and delete test tables. * Adds updates including compute [(#2436)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2436) * Adds updates including compute * Python 2 compat pytest * Fixing weird \r\n issue from GH merge * Put asset tests back in * Re-add pod operator test * Hack parameter for k8s pod operator * Adds updates including compute [(#2436)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2436) * Adds updates including compute * Python 2 compat pytest * Fixing weird \r\n issue from GH merge * Put asset tests back in * Re-add pod operator test * Hack parameter for k8s pod operator * Adds updates including compute [(#2436)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2436) * Adds updates including compute * Python 2 compat pytest * Fixing weird \r\n issue from GH merge * Put asset tests back in * Re-add pod operator test * Hack parameter for k8s pod operator * Adds updates including compute [(#2436)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2436) * Adds updates including compute * Python 2 compat pytest * Fixing weird \r\n issue from GH merge * Put asset tests back in * Re-add pod operator test * Hack parameter for k8s pod operator * Adds updates including compute [(#2436)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2436) * Adds updates including compute * Python 2 compat pytest * Fixing weird \r\n issue from GH merge * Put asset tests back in * Re-add pod operator test * Hack parameter for k8s pod operator * Adds updates including compute [(#2436)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2436) * Adds updates including compute * Python 2 compat pytest * Fixing weird \r\n issue from GH merge * Put asset tests back in * Re-add pod operator test * Hack parameter for k8s pod operator * Adds updates including compute [(#2436)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2436) * Adds updates including compute * Python 2 compat pytest * Fixing weird \r\n issue from GH merge * Put asset tests back in * Re-add pod operator test * Hack parameter for k8s pod operator * Auto-update dependencies. [(#2005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2005) * Auto-update dependencies. * Revert update of appengine/flexible/datastore. * revert update of appengine/flexible/scipy * revert update of bigquery/bqml * revert update of bigquery/cloud-client * revert update of bigquery/datalab-migration * revert update of bigtable/quickstart * revert update of compute/api * revert update of container_registry/container_analysis * revert update of dataflow/run_template * revert update of datastore/cloud-ndb * revert update of dialogflow/cloud-client * revert update of dlp * revert update of functions/imagemagick * revert update of functions/ocr/app * revert update of healthcare/api-client/fhir * revert update of iam/api-client * revert update of iot/api-client/gcs_file_to_device * revert update of iot/api-client/mqtt_example * revert update of language/automl * revert update of run/image-processing * revert update of vision/automl * revert update testing/requirements.txt * revert update of vision/cloud-client/detect * revert update of vision/cloud-client/product_search * revert update of jobs/v2/api_client * revert update of jobs/v3/api_client * revert update of opencensus * revert update of translate/cloud-client * revert update to speech/cloud-client Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh * Auto-update dependencies. [(#2005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2005) * Auto-update dependencies. * Revert update of appengine/flexible/datastore. * revert update of appengine/flexible/scipy * revert update of bigquery/bqml * revert update of bigquery/cloud-client * revert update of bigquery/datalab-migration * revert update of bigtable/quickstart * revert update of compute/api * revert update of container_registry/container_analysis * revert update of dataflow/run_template * revert update of datastore/cloud-ndb * revert update of dialogflow/cloud-client * revert update of dlp * revert update of functions/imagemagick * revert update of functions/ocr/app * revert update of healthcare/api-client/fhir * revert update of iam/api-client * revert update of iot/api-client/gcs_file_to_device * revert update of iot/api-client/mqtt_example * revert update of language/automl * revert update of run/image-processing * revert update of vision/automl * revert update testing/requirements.txt * revert update of vision/cloud-client/detect * revert update of vision/cloud-client/product_search * revert update of jobs/v2/api_client * revert update of jobs/v3/api_client * revert update of opencensus * revert update of translate/cloud-client * revert update to speech/cloud-client Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh * Auto-update dependencies. [(#2005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2005) * Auto-update dependencies. * Revert update of appengine/flexible/datastore. * revert update of appengine/flexible/scipy * revert update of bigquery/bqml * revert update of bigquery/cloud-client * revert update of bigquery/datalab-migration * revert update of bigtable/quickstart * revert update of compute/api * revert update of container_registry/container_analysis * revert update of dataflow/run_template * revert update of datastore/cloud-ndb * revert update of dialogflow/cloud-client * revert update of dlp * revert update of functions/imagemagick * revert update of functions/ocr/app * revert update of healthcare/api-client/fhir * revert update of iam/api-client * revert update of iot/api-client/gcs_file_to_device * revert update of iot/api-client/mqtt_example * revert update of language/automl * revert update of run/image-processing * revert update of vision/automl * revert update testing/requirements.txt * revert update of vision/cloud-client/detect * revert update of vision/cloud-client/product_search * revert update of jobs/v2/api_client * revert update of jobs/v3/api_client * revert update of opencensus * revert update of translate/cloud-client * revert update to speech/cloud-client Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh * Auto-update dependencies. [(#2005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2005) * Auto-update dependencies. * Revert update of appengine/flexible/datastore. * revert update of appengine/flexible/scipy * revert update of bigquery/bqml * revert update of bigquery/cloud-client * revert update of bigquery/datalab-migration * revert update of bigtable/quickstart * revert update of compute/api * revert update of container_registry/container_analysis * revert update of dataflow/run_template * revert update of datastore/cloud-ndb * revert update of dialogflow/cloud-client * revert update of dlp * revert update of functions/imagemagick * revert update of functions/ocr/app * revert update of healthcare/api-client/fhir * revert update of iam/api-client * revert update of iot/api-client/gcs_file_to_device * revert update of iot/api-client/mqtt_example * revert update of language/automl * revert update of run/image-processing * revert update of vision/automl * revert update testing/requirements.txt * revert update of vision/cloud-client/detect * revert update of vision/cloud-client/product_search * revert update of jobs/v2/api_client * revert update of jobs/v3/api_client * revert update of opencensus * revert update of translate/cloud-client * revert update to speech/cloud-client Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh * Auto-update dependencies. [(#2005)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2005) * Auto-update dependencies. * Revert update of appengine/flexible/datastore. * revert update of appengine/flexible/scipy * revert update of bigquery/bqml * revert update of bigquery/cloud-client * revert update of bigquery/datalab-migration * revert update of bigtable/quickstart * revert update of compute/api * revert update of container_registry/container_analysis * revert update of dataflow/run_template * revert update of datastore/cloud-ndb * revert update of dialogflow/cloud-client * revert update of dlp * revert update of functions/imagemagick * revert update of functions/ocr/app * revert update of healthcare/api-client/fhir * revert update of iam/api-client * revert update of iot/api-client/gcs_file_to_device * revert update of iot/api-client/mqtt_example * revert update of language/automl * revert update of run/image-processing * revert update of vision/automl * revert update testing/requirements.txt * revert update of vision/cloud-client/detect * revert update of vision/cloud-client/product_search * revert update of jobs/v2/api_client * revert update of jobs/v3/api_client * revert update of opencensus * revert update of translate/cloud-client * revert update to speech/cloud-client Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * Cleanup bigtable python examples [(#2692)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2692) * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * remove core dep Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * chore(deps): update dependency google-cloud-core to v1.3.0 [(#3066)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3066) * bigtable: read and filter snippets [(#2707)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2707) * Bigtable write samples * Cleaning up test * Fixing lint issues * Fixing imports in test * Cleaning up samples and showing error handling * removing note about the row commit bug * Add fixture to write test * Read snippets WIP * Cleanup bigtable python: Use new row types for mutations Update bigtable version in requirements Delete table after tests * Change bigtable cluster variable to bigtable instance for consistency Create and delete quickstart table during test * Fixing step size for metric scaler Create unique tables for quickstart tests * Creating fixtures for quickstart tests Fixing hb quickstart test output * Fix quickstart extra delete table Update happybase to use direct row * Use clearer instance names for tests Create unique instances for metric scaler tests * Linting * get session issue in test sorted out * Read snippets with tests working * Filter snippets with tests working * Lint * Update module import * Fix bigtable instance env var * Change scope to module * Don't print empty parens * sort cols * sort by cfs too * Make requirements more specific to samples. LInt fixes Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Co-authored-by: Christopher Wilcox * bigtable/metricscaler: Add Dockerfile [(#3103)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3103) * bigtable/metricscaler: Add Dockerfile. * Add copyright header * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * Simplify noxfile setup. [(#2806)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/2806) * chore(deps): update dependency requests to v2.23.0 * Simplify noxfile and add version control. * Configure appengine/standard to only test Python 2.7. * Update Kokokro configs to match noxfile. * Add requirements-test to each folder. * Remove Py2 versions from everything execept appengine/standard. * Remove conftest.py. * Remove appengine/standard/conftest.py * Remove 'no-sucess-flaky-report' from pytest.ini. * Add GAE SDK back to appengine/standard tests. * Fix typo. * Roll pytest to python 2 version. * Add a bunch of testing requirements. * Remove typo. * Add appengine lib directory back in. * Add some additional requirements. * Fix issue with flake8 args. * Even more requirements. * Readd appengine conftest.py. * Add a few more requirements. * Even more Appengine requirements. * Add webtest for appengine/standard/mailgun. * Add some additional requirements. * Add workaround for issue with mailjet-rest. * Add responses for appengine/standard/mailjet. Co-authored-by: Renovate Bot * chore(deps): update dependency google-cloud-monitoring to v0.35.0 [(#3459)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3459) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * [bigtable] fix: wrap sample invocations with retries [(#3494)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3494) fix #3070 Also added `BIGTABLE_INSTANCE` to testing/test-env.tmpl.sh * bigtable: Handle dev instances and use storage utilization in metric scaler [(#3119)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3119) * WIP handle development instances in metric scaler * use storage utilization and tested * Fix metric queries * remove tests for low storage util * cleanup metric query * EOF new line * use uuid instead of random * lint * fix uuid length * fix uuid length * fix uuid length (again) Co-authored-by: Christopher Wilcox Co-authored-by: Takashi Matsuo * chore: some lint fixes [(#3738)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3738) * chore: some lint fixes [(#3738)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3738) * chore: some lint fixes [(#3738)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3738) * chore: some lint fixes [(#3739)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3739) * chore: some lint fixes [(#3739)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3739) * chore: some lint fixes [(#3740)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3740) * chore(deps): update dependency google-cloud-monitoring to v0.36.0 [(#3783)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3783) Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> * testing: various cleanups [(#3877)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/3877) * testing: various cleanups * [iap]: only run iap tests on Kokoro * [vision/automl]: use temporary directory for temporary files * [appengine/flexible/scipy]: use temporary directory * [bigtable/snippets/reads]: update pytest snapshot * [texttospeech/cloud-client]: added output.mp3 to .gitignore * [iot/api-client/gcs_file_to_device]: use temporary directory * [iot/api-client/mqtt_example]: use temporary directory * [logging/cloud-client]: use uuid and add backoff * use project directory with Trampoline V1 * chore: update templates * chore: add noxfiles for all sample projects * docs: add multiprocessing Co-authored-by: Tim Swast Co-authored-by: Bill Prin Co-authored-by: DPE bot Co-authored-by: Jon Wayne Parrott Co-authored-by: michaelawyu Co-authored-by: Frank Natividad Co-authored-by: sangramql <39852271+sangramql@users.noreply.github.com> Co-authored-by: Billy Jacobson Co-authored-by: Charles Engelke Co-authored-by: sumit-ql <39561577+sumit-ql@users.noreply.github.com> Co-authored-by: Alex <7764119+AVaksman@users.noreply.github.com> Co-authored-by: Misha Brukman Co-authored-by: Averi Kitsch Co-authored-by: Thea Flowers Co-authored-by: Gus Class Co-authored-by: Kurtis Van Gent <31518063+kurtisvg@users.noreply.github.com> Co-authored-by: Doug Mahugh Co-authored-by: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Co-authored-by: WhiteSource Renovate Co-authored-by: Christopher Wilcox Co-authored-by: Matt Braymer-Hayes Co-authored-by: Takashi Matsuo --- packages/google-cloud-bigtable/.coveragerc | 16 + packages/google-cloud-bigtable/.flake8 | 18 + .../google-cloud-bigtable/.github/CODEOWNERS | 1 + .../.github/ISSUE_TEMPLATE/bug_report.md | 3 +- packages/google-cloud-bigtable/.gitignore | 2 + .../.kokoro/publish-docs.sh | 2 - .../google-cloud-bigtable/.kokoro/release.sh | 2 - .../.kokoro/samples/lint/common.cfg | 34 ++ .../.kokoro/samples/lint/continuous.cfg | 6 + .../.kokoro/samples/lint/periodic.cfg | 6 + .../.kokoro/samples/lint/presubmit.cfg | 6 + .../.kokoro/samples/python3.6/common.cfg | 34 ++ .../.kokoro/samples/python3.6/continuous.cfg | 7 + .../.kokoro/samples/python3.6/periodic.cfg | 6 + .../.kokoro/samples/python3.6/presubmit.cfg | 6 + .../.kokoro/samples/python3.7/common.cfg | 34 ++ .../.kokoro/samples/python3.7/continuous.cfg | 6 + .../.kokoro/samples/python3.7/periodic.cfg | 6 + .../.kokoro/samples/python3.7/presubmit.cfg | 6 + .../.kokoro/samples/python3.8/common.cfg | 34 ++ .../.kokoro/samples/python3.8/continuous.cfg | 6 + .../.kokoro/samples/python3.8/periodic.cfg | 6 + .../.kokoro/samples/python3.8/presubmit.cfg | 6 + .../.kokoro/test-samples.sh | 104 ++++ .../google-cloud-bigtable/CONTRIBUTING.rst | 15 +- packages/google-cloud-bigtable/MANIFEST.in | 19 + packages/google-cloud-bigtable/docs/conf.py | 5 +- packages/google-cloud-bigtable/docs/index.rst | 8 +- .../docs/multiprocessing.rst | 7 + packages/google-cloud-bigtable/noxfile.py | 2 +- .../samples/AUTHORING_GUIDE.md | 1 + .../samples/CONTRIBUTING.md | 1 + .../samples/hello/README.rst | 115 +++++ .../samples/hello/README.rst.in | 23 + .../samples/hello/main.py | 130 +++++ .../samples/hello/main_test.py | 39 ++ .../samples/hello/noxfile.py | 225 ++++++++ .../samples/hello/requirements-test.txt | 1 + .../samples/hello/requirements.txt | 2 + .../samples/hello_happybase/README.rst | 122 +++++ .../samples/hello_happybase/README.rst.in | 32 ++ .../samples/hello_happybase/main.py | 118 +++++ .../samples/hello_happybase/main_test.py | 41 ++ .../samples/hello_happybase/noxfile.py | 225 ++++++++ .../hello_happybase/requirements-test.txt | 1 + .../samples/hello_happybase/requirements.txt | 1 + .../samples/instanceadmin/README.rst | 120 +++++ .../samples/instanceadmin/README.rst.in | 23 + .../samples/instanceadmin/instanceadmin.py | 259 ++++++++++ .../samples/instanceadmin/noxfile.py | 225 ++++++++ .../instanceadmin/requirements-test.txt | 1 + .../samples/instanceadmin/requirements.txt | 1 + .../samples/metricscaler/Dockerfile | 24 + .../samples/metricscaler/README.rst | 128 +++++ .../samples/metricscaler/README.rst.in | 29 ++ .../samples/metricscaler/metricscaler.py | 209 ++++++++ .../samples/metricscaler/metricscaler_test.py | 198 ++++++++ .../samples/metricscaler/noxfile.py | 225 ++++++++ .../metricscaler/requirements-test.txt | 2 + .../samples/metricscaler/requirements.txt | 2 + .../samples/quickstart/README.rst | 126 +++++ .../samples/quickstart/README.rst.in | 23 + .../samples/quickstart/main.py | 58 +++ .../samples/quickstart/main_test.py | 55 ++ .../samples/quickstart/noxfile.py | 225 ++++++++ .../samples/quickstart/requirements-test.txt | 1 + .../samples/quickstart/requirements.txt | 1 + .../samples/quickstart_happybase/README.rst | 108 ++++ .../quickstart_happybase/README.rst.in | 23 + .../samples/quickstart_happybase/main.py | 62 +++ .../samples/quickstart_happybase/main_test.py | 55 ++ .../samples/quickstart_happybase/noxfile.py | 225 ++++++++ .../requirements-test.txt | 1 + .../quickstart_happybase/requirements.txt | 1 + .../snippets/filters/filter_snippets.py | 360 +++++++++++++ .../samples/snippets/filters/filters_test.py | 226 +++++++++ .../samples/snippets/filters/noxfile.py | 225 ++++++++ .../snippets/filters/requirements-test.txt | 1 + .../samples/snippets/filters/requirements.txt | 2 + .../snippets/filters/snapshots/__init__.py | 0 .../filters/snapshots/snap_filters_test.py | 480 ++++++++++++++++++ .../samples/snippets/reads/noxfile.py | 225 ++++++++ .../samples/snippets/reads/read_snippets.py | 192 +++++++ .../samples/snippets/reads/reads_test.py | 121 +++++ .../snippets/reads/requirements-test.txt | 1 + .../samples/snippets/reads/requirements.txt | 2 + .../snippets/reads/snapshots/__init__.py | 0 .../reads/snapshots/snap_reads_test.py | 142 ++++++ .../samples/snippets/writes/__init__.py | 0 .../samples/snippets/writes/noxfile.py | 225 ++++++++ .../snippets/writes/requirements-test.txt | 2 + .../samples/snippets/writes/requirements.txt | 1 + .../samples/snippets/writes/write_batch.py | 55 ++ .../snippets/writes/write_conditionally.py | 44 ++ .../snippets/writes/write_increment.py | 34 ++ .../samples/snippets/writes/write_simple.py | 49 ++ .../samples/snippets/writes/writes_test.py | 94 ++++ .../samples/tableadmin/README.rst | 115 +++++ .../samples/tableadmin/README.rst.in | 23 + .../samples/tableadmin/noxfile.py | 225 ++++++++ .../samples/tableadmin/requirements-test.txt | 1 + .../samples/tableadmin/requirements.txt | 1 + .../samples/tableadmin/tableadmin.py | 283 +++++++++++ .../samples/tableadmin/tableadmin_test.py | 66 +++ .../scripts/decrypt-secrets.sh | 33 ++ .../scripts/readme-gen/readme_gen.py | 66 +++ .../readme-gen/templates/README.tmpl.rst | 87 ++++ .../readme-gen/templates/auth.tmpl.rst | 9 + .../templates/auth_api_key.tmpl.rst | 14 + .../templates/install_deps.tmpl.rst | 29 ++ .../templates/install_portaudio.tmpl.rst | 35 ++ packages/google-cloud-bigtable/setup.cfg | 16 + packages/google-cloud-bigtable/synth.metadata | 27 +- packages/google-cloud-bigtable/synth.py | 9 +- .../google-cloud-bigtable/testing/.gitignore | 3 + 115 files changed, 7373 insertions(+), 50 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg create mode 100755 packages/google-cloud-bigtable/.kokoro/test-samples.sh create mode 100644 packages/google-cloud-bigtable/docs/multiprocessing.rst create mode 100644 packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md create mode 100644 packages/google-cloud-bigtable/samples/CONTRIBUTING.md create mode 100644 packages/google-cloud-bigtable/samples/hello/README.rst create mode 100644 packages/google-cloud-bigtable/samples/hello/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/hello/main.py create mode 100644 packages/google-cloud-bigtable/samples/hello/main_test.py create mode 100644 packages/google-cloud-bigtable/samples/hello/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/hello/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/hello/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/README.rst create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/main.py create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/main_test.py create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/README.rst create mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py create mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt create mode 100755 packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/Dockerfile create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/README.rst create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/quickstart/README.rst create mode 100644 packages/google-cloud-bigtable/samples/quickstart/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/quickstart/main.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart/main_test.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/quickstart/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/main.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt create mode 100755 packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/snapshots/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt create mode 100755 packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/snippets/reads/snapshots/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt create mode 100755 packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py create mode 100644 packages/google-cloud-bigtable/samples/tableadmin/README.rst create mode 100644 packages/google-cloud-bigtable/samples/tableadmin/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/tableadmin/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt create mode 100755 packages/google-cloud-bigtable/samples/tableadmin/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py create mode 100755 packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py create mode 100755 packages/google-cloud-bigtable/scripts/decrypt-secrets.sh create mode 100644 packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py create mode 100644 packages/google-cloud-bigtable/scripts/readme-gen/templates/README.tmpl.rst create mode 100644 packages/google-cloud-bigtable/scripts/readme-gen/templates/auth.tmpl.rst create mode 100644 packages/google-cloud-bigtable/scripts/readme-gen/templates/auth_api_key.tmpl.rst create mode 100644 packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst create mode 100644 packages/google-cloud-bigtable/scripts/readme-gen/templates/install_portaudio.tmpl.rst create mode 100644 packages/google-cloud-bigtable/testing/.gitignore diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index b178b094aa1d..dd39c8546c41 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [run] branch = True diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index 0268ecc9c55c..ed9316381c9c 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [flake8] ignore = E203, E266, E501, W503 @@ -5,6 +21,8 @@ exclude = # Exclude generated code. **/proto/** **/gapic/** + **/services/** + **/types/** *_pb2.py # Standard linting exemptions. diff --git a/packages/google-cloud-bigtable/.github/CODEOWNERS b/packages/google-cloud-bigtable/.github/CODEOWNERS index e43d91c0ba58..59302d617ce4 100644 --- a/packages/google-cloud-bigtable/.github/CODEOWNERS +++ b/packages/google-cloud-bigtable/.github/CODEOWNERS @@ -8,3 +8,4 @@ # The bigtable-dpe team is the default owner for anything not # explicitly taken by someone else. * @googleapis/bigtable-dpe +/samples/ @googleapis/bigtable-dpe @googleapis/python-samples-owners \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md index 54b119142fcf..e372a064e0f0 100644 --- a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md +++ b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md @@ -11,8 +11,7 @@ Thanks for stopping by to let us know something could be better! Please run down the following list and make sure you've tried the usual "quick fixes": - Search the issues already opened: https://github.com/googleapis/python-bigtable/issues - - Search the issues on our "catch-all" repository: https://github.com/googleapis/google-cloud-python - - Search StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform+python + - Search StackOverflow: https://stackoverflow.com/questions/tagged/google-cloud-platform+python If you are still having issues, please be sure to include as much information as possible: diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore index 3fb06e09ce74..b87e1ed580d9 100644 --- a/packages/google-cloud-bigtable/.gitignore +++ b/packages/google-cloud-bigtable/.gitignore @@ -10,6 +10,7 @@ dist build eggs +.eggs parts bin var @@ -49,6 +50,7 @@ bigquery/docs/generated # Virtual environment env/ coverage.xml +sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index e6047caf8f9b..7d51f64afceb 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Disable buffering, so that the logs stream through. diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index 6a911b651b56..102d0ba6d06c 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Start the releasetool reporter diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg new file mode 100644 index 000000000000..b597cb22fee7 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "lint" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg new file mode 100644 index 000000000000..50fec9649732 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg new file mode 100644 index 000000000000..dd662013654a --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.6" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg new file mode 100644 index 000000000000..7218af1499e5 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg new file mode 100644 index 000000000000..50fec9649732 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg new file mode 100644 index 000000000000..6ee44dbb96cb --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.7" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg new file mode 100644 index 000000000000..50fec9649732 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg new file mode 100644 index 000000000000..cc909eb206e1 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.8" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg new file mode 100644 index 000000000000..50fec9649732 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh new file mode 100755 index 000000000000..6da844235705 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/test-samples.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-bigtable + +# Run periodic samples tests at latest release +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + LATEST_RELEASE=$(git describe --abbrev=0 --tags) + git checkout $LATEST_RELEASE +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the Build Cop Bot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 97e69746dc6d..5d9a099ac9ad 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: 2.7, - 3.5, 3.6, and 3.7 on both UNIX and Windows. + 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -214,26 +214,18 @@ We support: - `Python 3.5`_ - `Python 3.6`_ - `Python 3.7`_ +- `Python 3.8`_ .. _Python 3.5: https://docs.python.org/3.5/ .. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ +.. _Python 3.8: https://docs.python.org/3.8/ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-bigtable/blob/master/noxfile.py -We explicitly decided not to support `Python 2.5`_ due to `decreased usage`_ -and lack of continuous integration `support`_. - -.. _Python 2.5: https://docs.python.org/2.5/ -.. _decreased usage: https://caremad.io/2013/10/a-look-at-pypi-downloads/ -.. _support: https://blog.travis-ci.com/2013-11-18-upcoming-build-environment-updates/ - -We have `dropped 2.6`_ as a supported version as well since Python 2.6 is no -longer supported by the core development team. - Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020. We also explicitly decided to support Python 3 beginning with version @@ -247,7 +239,6 @@ We also explicitly decided to support Python 3 beginning with version .. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django .. _projects: http://flask.pocoo.org/docs/0.10/python3/ .. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/ -.. _dropped 2.6: https://github.com/googleapis/google-cloud-python/issues/995 ********** Versioning diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index cd011be27a0e..e9e29d12033d 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -1,6 +1,25 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE recursive-include google *.json *.proto recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ + +# Exclude scripts for samples readmegen +prune scripts/readme-gen \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index ce720db110de..9249013859cc 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -38,6 +38,7 @@ "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", + "recommonmark", ] # autodoc/autosummary flags @@ -49,10 +50,6 @@ # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 88d8e09ec31f..b1c8f0574073 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -1,12 +1,6 @@ .. include:: README.rst -.. note:: - - Because this client uses :mod:`grpcio` library, it is safe to - share instances across threads. In multiprocessing scenarios, the best - practice is to create client instances *after* the invocation of - :func:`os.fork` by :class:`multiprocessing.Pool` or - :class:`multiprocessing.Process`. +.. include:: multiprocessing.rst Using the API ------------- diff --git a/packages/google-cloud-bigtable/docs/multiprocessing.rst b/packages/google-cloud-bigtable/docs/multiprocessing.rst new file mode 100644 index 000000000000..1cb29d4ca967 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/multiprocessing.rst @@ -0,0 +1,7 @@ +.. note:: + + Because this client uses :mod:`grpcio` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.Pool` or + :class:`multiprocessing.Process`. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 1065894e61e4..3bca8a099331 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -141,7 +141,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") + session.install("sphinx", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md b/packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md new file mode 100644 index 000000000000..55c97b32f4c1 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/CONTRIBUTING.md b/packages/google-cloud-bigtable/samples/CONTRIBUTING.md new file mode 100644 index 000000000000..34c882b6f1a3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/CONTRIBUTING.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/README.rst b/packages/google-cloud-bigtable/samples/hello/README.rst new file mode 100644 index 000000000000..893932ad5e73 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/README.rst @@ -0,0 +1,115 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Bigtable Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/README.rst + + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Basic example ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/main.py,bigtable/hello/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python main.py + + usage: main.py [-h] [--table TABLE] project_id instance_id + + Demonstrates how to connect to Cloud Bigtable and run some basic operations. + Prerequisites: - Create a Cloud Bigtable cluster. + https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google + Application Default Credentials. + https://developers.google.com/identity/protocols/application-default- + credentials + + positional arguments: + project_id Your Cloud Platform project ID. + instance_id ID of the Cloud Bigtable instance to connect to. + + optional arguments: + -h, --help show this help message and exit + --table TABLE Table to create and destroy. (default: Hello-Bigtable) + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/README.rst.in b/packages/google-cloud-bigtable/samples/hello/README.rst.in new file mode 100644 index 000000000000..ed9253c115a4 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/README.rst.in @@ -0,0 +1,23 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +setup: +- auth +- install_deps + +samples: +- name: Basic example + file: main.py + show_help: true + +cloud_client_library: true + +folder: bigtable/hello \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/main.py b/packages/google-cloud-bigtable/samples/hello/main.py new file mode 100644 index 000000000000..073270847232 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/main.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates how to connect to Cloud Bigtable and run some basic operations. + +Prerequisites: + +- Create a Cloud Bigtable cluster. + https://cloud.google.com/bigtable/docs/creating-cluster +- Set your Google Application Default Credentials. + https://developers.google.com/identity/protocols/application-default-credentials +""" + +import argparse +# [START bigtable_hw_imports] +import datetime + +from google.cloud import bigtable +from google.cloud.bigtable import column_family +from google.cloud.bigtable import row_filters +# [END bigtable_hw_imports] + + +def main(project_id, instance_id, table_id): + # [START bigtable_hw_connect] + # The client must be created with admin=True because it will create a + # table. + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + # [END bigtable_hw_connect] + + # [START bigtable_hw_create_table] + print('Creating the {} table.'.format(table_id)) + table = instance.table(table_id) + + print('Creating column family cf1 with Max Version GC rule...') + # Create a column family with GC policy : most recent N versions + # Define the GC policy to retain only the most recent 2 versions + max_versions_rule = column_family.MaxVersionsGCRule(2) + column_family_id = 'cf1' + column_families = {column_family_id: max_versions_rule} + if not table.exists(): + table.create(column_families=column_families) + else: + print("Table {} already exists.".format(table_id)) + # [END bigtable_hw_create_table] + + # [START bigtable_hw_write_rows] + print('Writing some greetings to the table.') + greetings = ['Hello World!', 'Hello Cloud Bigtable!', 'Hello Python!'] + rows = [] + column = 'greeting'.encode() + for i, value in enumerate(greetings): + # Note: This example uses sequential numeric IDs for simplicity, + # but this can result in poor performance in a production + # application. Since rows are stored in sorted order by key, + # sequential keys can result in poor distribution of operations + # across nodes. + # + # For more information about how to design a Bigtable schema for + # the best performance, see the documentation: + # + # https://cloud.google.com/bigtable/docs/schema-design + row_key = 'greeting{}'.format(i).encode() + row = table.direct_row(row_key) + row.set_cell(column_family_id, + column, + value, + timestamp=datetime.datetime.utcnow()) + rows.append(row) + table.mutate_rows(rows) + # [END bigtable_hw_write_rows] + + # [START bigtable_hw_create_filter] + # Create a filter to only retrieve the most recent version of the cell + # for each column accross entire row. + row_filter = row_filters.CellsColumnLimitFilter(1) + # [END bigtable_hw_create_filter] + + # [START bigtable_hw_get_with_filter] + print('Getting a single greeting by row key.') + key = 'greeting0'.encode() + + row = table.read_row(key, row_filter) + cell = row.cells[column_family_id][column][0] + print(cell.value.decode('utf-8')) + # [END bigtable_hw_get_with_filter] + + # [START bigtable_hw_scan_with_filter] + print('Scanning for all greetings:') + partial_rows = table.read_rows(filter_=row_filter) + + for row in partial_rows: + cell = row.cells[column_family_id][column][0] + print(cell.value.decode('utf-8')) + # [END bigtable_hw_scan_with_filter] + + # [START bigtable_hw_delete_table] + print('Deleting the {} table.'.format(table_id)) + table.delete() + # [END bigtable_hw_delete_table] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('project_id', help='Your Cloud Platform project ID.') + parser.add_argument( + 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + parser.add_argument( + '--table', + help='Table to create and destroy.', + default='Hello-Bigtable') + + args = parser.parse_args() + main(args.project_id, args.instance_id, args.table) diff --git a/packages/google-cloud-bigtable/samples/hello/main_test.py b/packages/google-cloud-bigtable/samples/hello/main_test.py new file mode 100644 index 000000000000..75fe4ff24e7c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/main_test.py @@ -0,0 +1,39 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random + +from main import main + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_NAME_FORMAT = 'hello-world-test-{}' +TABLE_NAME_RANGE = 10000 + + +def test_main(capsys): + table_name = TABLE_NAME_FORMAT.format( + random.randrange(TABLE_NAME_RANGE)) + + main(PROJECT, BIGTABLE_INSTANCE, table_name) + + out, _ = capsys.readouterr() + assert 'Creating the {} table.'.format(table_name) in out + assert 'Writing some greetings to the table.' in out + assert 'Getting a single greeting by row key.' in out + assert 'Hello World!' in out + assert 'Scanning for all greetings' in out + assert 'Hello Cloud Bigtable!' in out + assert 'Deleting the {} table.'.format(table_name) in out diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt new file mode 100644 index 000000000000..29ecf15a2b72 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-bigtable==1.2.1 +google-cloud-core==1.3.0 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.rst b/packages/google-cloud-bigtable/samples/hello_happybase/README.rst new file mode 100644 index 000000000000..82a376535373 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/README.rst @@ -0,0 +1,122 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Bigtable Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello_happybase/README.rst + + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + +This sample demonstrates using the `Google Cloud Client Library HappyBase +package`_, an implementation of the `HappyBase API`_ to connect to and +interact with Cloud Bigtable. + +.. _Google Cloud Client Library HappyBase package: + https://github.com/GoogleCloudPlatform/google-cloud-python-happybase +.. _HappyBase API: http://happybase.readthedocs.io/en/stable/ + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Basic example ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello_happybase/main.py,bigtable/hello_happybase/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python main.py + + usage: main.py [-h] [--table TABLE] project_id instance_id + + Demonstrates how to connect to Cloud Bigtable and run some basic operations. + Prerequisites: - Create a Cloud Bigtable cluster. + https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google + Application Default Credentials. + https://developers.google.com/identity/protocols/application-default- + credentials + + positional arguments: + project_id Your Cloud Platform project ID. + instance_id ID of the Cloud Bigtable instance to connect to. + + optional arguments: + -h, --help show this help message and exit + --table TABLE Table to create and destroy. (default: Hello-Bigtable) + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in b/packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in new file mode 100644 index 000000000000..8ef6a956b5e9 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in @@ -0,0 +1,32 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +description: | + This sample demonstrates using the `Google Cloud Client Library HappyBase + package`_, an implementation of the `HappyBase API`_ to connect to and + interact with Cloud Bigtable. + + .. _Google Cloud Client Library HappyBase package: + https://github.com/GoogleCloudPlatform/google-cloud-python-happybase + .. _HappyBase API: http://happybase.readthedocs.io/en/stable/ + +setup: +- auth +- install_deps + +samples: +- name: Basic example + file: main.py + show_help: true + +cloud_client_library: true + +folder: bigtable/hello_happybase \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/main.py b/packages/google-cloud-bigtable/samples/hello_happybase/main.py new file mode 100644 index 000000000000..ade4acbf0d84 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/main.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates how to connect to Cloud Bigtable and run some basic operations. + +Prerequisites: + +- Create a Cloud Bigtable cluster. + https://cloud.google.com/bigtable/docs/creating-cluster +- Set your Google Application Default Credentials. + https://developers.google.com/identity/protocols/application-default-credentials +""" + +import argparse + +# [START bigtable_hw_imports_happybase] +from google.cloud import bigtable +from google.cloud import happybase +# [END bigtable_hw_imports_happybase] + + +def main(project_id, instance_id, table_name): + # [START bigtable_hw_connect_happybase] + # The client must be created with admin=True because it will create a + # table. + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + connection = happybase.Connection(instance=instance) + # [END bigtable_hw_connect_happybase] + + try: + # [START bigtable_hw_create_table_happybase] + print('Creating the {} table.'.format(table_name)) + column_family_name = 'cf1' + connection.create_table( + table_name, + { + column_family_name: dict() # Use default options. + }) + # [END bigtable_hw_create_table_happybase] + + # [START bigtable_hw_write_rows_happybase] + print('Writing some greetings to the table.') + table = connection.table(table_name) + column_name = '{fam}:greeting'.format(fam=column_family_name) + greetings = [ + 'Hello World!', + 'Hello Cloud Bigtable!', + 'Hello HappyBase!', + ] + + for i, value in enumerate(greetings): + # Note: This example uses sequential numeric IDs for simplicity, + # but this can result in poor performance in a production + # application. Since rows are stored in sorted order by key, + # sequential keys can result in poor distribution of operations + # across nodes. + # + # For more information about how to design a Bigtable schema for + # the best performance, see the documentation: + # + # https://cloud.google.com/bigtable/docs/schema-design + row_key = 'greeting{}'.format(i) + table.put( + row_key, {column_name.encode('utf-8'): value.encode('utf-8')} + ) + # [END bigtable_hw_write_rows_happybase] + + # [START bigtable_hw_get_by_key_happybase] + print('Getting a single greeting by row key.') + key = 'greeting0'.encode('utf-8') + row = table.row(key) + print('\t{}: {}'.format(key, row[column_name.encode('utf-8')])) + # [END bigtable_hw_get_by_key_happybase] + + # [START bigtable_hw_scan_all_happybase] + print('Scanning for all greetings:') + + for key, row in table.scan(): + print('\t{}: {}'.format(key, row[column_name.encode('utf-8')])) + # [END bigtable_hw_scan_all_happybase] + + # [START bigtable_hw_delete_table_happybase] + print('Deleting the {} table.'.format(table_name)) + connection.delete_table(table_name) + # [END bigtable_hw_delete_table_happybase] + + finally: + connection.close() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('project_id', help='Your Cloud Platform project ID.') + parser.add_argument( + 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + parser.add_argument( + '--table', + help='Table to create and destroy.', + default='Hello-Bigtable') + + args = parser.parse_args() + main(args.project_id, args.instance_id, args.table) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py new file mode 100644 index 000000000000..d1dfc65c29dd --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py @@ -0,0 +1,41 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random + +from main import main + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_NAME_FORMAT = 'hello-world-hb-test-{}' +TABLE_NAME_RANGE = 10000 + + +def test_main(capsys): + table_name = TABLE_NAME_FORMAT.format( + random.randrange(TABLE_NAME_RANGE)) + main( + PROJECT, + BIGTABLE_INSTANCE, + table_name) + + out, _ = capsys.readouterr() + assert 'Creating the {} table.'.format(table_name) in out + assert 'Writing some greetings to the table.' in out + assert 'Getting a single greeting by row key.' in out + assert 'Hello World!' in out + assert 'Scanning for all greetings' in out + assert 'Hello Cloud Bigtable!' in out + assert 'Deleting the {} table.'.format(table_name) in out diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt new file mode 100644 index 000000000000..a144f03e1bc5 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt @@ -0,0 +1 @@ +google-cloud-happybase==0.33.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.rst b/packages/google-cloud-bigtable/samples/instanceadmin/README.rst new file mode 100644 index 000000000000..16f176a6099c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/README.rst @@ -0,0 +1,120 @@ +.. This file is automatically generated. Do not edit this file directly. + + +Google Cloud Bigtable table creation +=============================================================================== + +https://cloud.google.com/bigtable/docs/quickstart-cbt + +This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. + +Google Cloud Bigtable Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/README.rst + + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Basic example ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/instanceadmin.py,bigtable/instanceadmin/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python instanceadmin.py + + usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id + + Demonstrates how to connect to Cloud Bigtable and run some basic operations + to create instance, create cluster, delete instance and delete cluster. + Prerequisites: - Create a Cloud Bigtable cluster. + https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google + Application Default Credentials. + https://developers.google.com/identity/protocols/application-default- + credentials + + positional arguments: + project_id Your Cloud Platform project ID. + instance_id ID of the Cloud Bigtable instance to connect to. + cluster_id ID of the Cloud Bigtable cluster to connect to. + + optional arguments: + -h, --help show this help message and exit + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in b/packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in new file mode 100644 index 000000000000..c085e40a6278 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in @@ -0,0 +1,23 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable and run some basic operations. + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +setup: +- auth +- install_deps + +samples: +- name: Basic example with Bigtable Column family and GC rules. + file: instanceadmin.py + show_help: true + +cloud_client_library: true + +folder: bigtable/instanceadmin \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py new file mode 100644 index 000000000000..32120eb63751 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -0,0 +1,259 @@ +#!/usr/bin/env python + +# Copyright 2018, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates how to connect to Cloud Bigtable and run some basic operations. +# http://www.apache.org/licenses/LICENSE-2.0 +Prerequisites: +- Create a Cloud Bigtable project. + https://cloud.google.com/bigtable/docs/ +- Set your Google Application Default Credentials. + https://developers.google.com/identity/protocols/application-default-credentials + +Operations performed: +- Create a Cloud Bigtable Instance. +- List Instance for a Cloud Bigtable. +- Delete a Cloud Bigtable Instance. +- Create a Cloud Bigtable Cluster. +- List Cloud Bigtable Clusters. +- Delete a Cloud Bigtable Cluster. +""" + +import argparse + +from google.cloud import bigtable +from google.cloud.bigtable import enums + + +def run_instance_operations(project_id, instance_id): + ''' Check Instance exists. + Creates a Production instance with default Cluster. + List instances in a project. + List clusters in an instance. + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + ''' + client = bigtable.Client(project=project_id, admin=True) + location_id = 'us-central1-f' + serve_nodes = 3 + storage_type = enums.StorageType.SSD + production = enums.Instance.Type.PRODUCTION + labels = {'prod-label': 'prod-label'} + instance = client.instance(instance_id, instance_type=production, + labels=labels) + + # [START bigtable_check_instance_exists] + if not instance.exists(): + print('Instance {} does not exists.'.format(instance_id)) + else: + print('Instance {} already exists.'.format(instance_id)) + # [END bigtable_check_instance_exists] + + # [START bigtable_create_prod_instance] + cluster = instance.cluster("ssd-cluster1", location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type) + if not instance.exists(): + print('\nCreating an Instance') + # Create instance with given options + instance.create(clusters=[cluster]) + print('\nCreated instance: {}'.format(instance_id)) + # [END bigtable_create_prod_instance] + + # [START bigtable_list_instances] + print('\nListing Instances:') + for instance_local in client.list_instances()[0]: + print(instance_local.instance_id) + # [END bigtable_list_instances] + + # [START bigtable_get_instance] + print('\nName of instance:{}\nLabels:{}'.format(instance.display_name, + instance.labels)) + # [END bigtable_get_instance] + + # [START bigtable_get_clusters] + print('\nListing Clusters...') + for cluster in instance.list_clusters()[0]: + print(cluster.cluster_id) + # [END bigtable_get_clusters] + + +def create_dev_instance(project_id, instance_id, cluster_id): + ''' Creates a Development instance with the name "hdd-instance" + location us-central1-f + Cluster nodes should not be set while creating Development + Instance + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + ''' + + client = bigtable.Client(project=project_id, admin=True) + + # [START bigtable_create_dev_instance] + print('\nCreating a DEVELOPMENT Instance') + # Set options to create an Instance + location_id = 'us-central1-f' + development = enums.Instance.Type.DEVELOPMENT + storage_type = enums.StorageType.HDD + labels = {'dev-label': 'dev-label'} + + # Create instance with given options + instance = client.instance(instance_id, instance_type=development, + labels=labels) + cluster = instance.cluster(cluster_id, location_id=location_id, + default_storage_type=storage_type) + + # Create development instance with given options + if not instance.exists(): + instance.create(clusters=[cluster]) + print('Created development instance: {}'.format(instance_id)) + else: + print('Instance {} already exists.'.format(instance_id)) + + # [END bigtable_create_dev_instance] + + +def delete_instance(project_id, instance_id): + ''' Delete the Instance + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + ''' + + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + # [START bigtable_delete_instance] + print('\nDeleting Instance') + if not instance.exists(): + print('Instance {} does not exists.'.format(instance_id)) + else: + instance.delete() + print('Deleted Instance: {}'.format(instance_id)) + # [END bigtable_delete_instance] + + +def add_cluster(project_id, instance_id, cluster_id): + ''' Add Cluster + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + + :type cluster_id: str + :param cluster_id: Cluster id. + ''' + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + + location_id = 'us-central1-a' + serve_nodes = 3 + storage_type = enums.StorageType.SSD + + if not instance.exists(): + print('Instance {} does not exists.'.format(instance_id)) + else: + print('\nAdding Cluster to Instance {}'.format(instance_id)) + # [START bigtable_create_cluster] + print('\nListing Clusters...') + for cluster in instance.list_clusters()[0]: + print(cluster.cluster_id) + cluster = instance.cluster(cluster_id, location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type) + if cluster.exists(): + print( + '\nCluster not created, as {} already exists.'. + format(cluster_id) + ) + else: + cluster.create() + print('\nCluster created: {}'.format(cluster_id)) + # [END bigtable_create_cluster] + + +def delete_cluster(project_id, instance_id, cluster_id): + ''' Delete the cluster + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + + :type cluster_id: str + :param cluster_id: Cluster id. + ''' + + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + cluster = instance.cluster(cluster_id) + + # [START bigtable_delete_cluster] + print('\nDeleting Cluster') + if cluster.exists(): + cluster.delete() + print('Cluster deleted: {}'.format(cluster_id)) + else: + print('\nCluster {} does not exist.'.format(cluster_id)) + + # [END bigtable_delete_cluster] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('command', + help='run, dev-instance, del-instance, \ + add-cluster or del-cluster. \ + Operation to perform on Instance.') + parser.add_argument('project_id', + help='Your Cloud Platform project ID.') + parser.add_argument('instance_id', + help='ID of the Cloud Bigtable instance to \ + connect to.') + parser.add_argument('cluster_id', + help='ID of the Cloud Bigtable cluster to \ + connect to.') + + args = parser.parse_args() + + if args.command.lower() == 'run': + run_instance_operations(args.project_id, args.instance_id) + elif args.command.lower() == 'dev-instance': + create_dev_instance(args.project_id, args.instance_id, + args.cluster_id) + elif args.command.lower() == 'del-instance': + delete_instance(args.project_id, args.instance_id) + elif args.command.lower() == 'add-cluster': + add_cluster(args.project_id, args.instance_id, args.cluster_id) + elif args.command.lower() == 'del-cluster': + delete_cluster(args.project_id, args.instance_id, args.cluster_id) + else: + print('Command should be either run \n Use argument -h, \ + --help to show help and exit.') diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt new file mode 100755 index 000000000000..2771c2e4c4d0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -0,0 +1 @@ +google-cloud-bigtable==1.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/Dockerfile b/packages/google-cloud-bigtable/samples/metricscaler/Dockerfile new file mode 100644 index 000000000000..d8a5ec0c1a9b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/Dockerfile @@ -0,0 +1,24 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +FROM python:3 + +WORKDIR /usr/src/app + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +ENTRYPOINT [ "python", "./metricscaler.py"] +CMD ["--help"] diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.rst b/packages/google-cloud-bigtable/samples/metricscaler/README.rst new file mode 100644 index 000000000000..c64bbff1d8af --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/README.rst @@ -0,0 +1,128 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Bigtable Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/metricscaler/README.rst + + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + +This sample demonstrates how to use `Stackdriver Monitoring`_ +to scale Cloud Bigtable based on CPU usage. + +.. _Stackdriver Monitoring: http://cloud.google.com/monitoring/docs/ + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Metricscaling example ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/metricscaler/metricscaler.py,bigtable/metricscaler/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python metricscaler.py + + usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] + [--low_cpu_threshold LOW_CPU_THRESHOLD] + [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] + bigtable_instance bigtable_cluster + + Scales Cloud Bigtable clusters based on CPU usage. + + positional arguments: + bigtable_instance ID of the Cloud Bigtable instance to connect to. + bigtable_cluster ID of the Cloud Bigtable cluster to connect to. + + optional arguments: + -h, --help show this help message and exit + --high_cpu_threshold HIGH_CPU_THRESHOLD + If Cloud Bigtable CPU usage is above this threshold, + scale up + --low_cpu_threshold LOW_CPU_THRESHOLD + If Cloud Bigtable CPU usage is below this threshold, + scale down + --short_sleep SHORT_SLEEP + How long to sleep in seconds between checking metrics + after no scale operation + --long_sleep LONG_SLEEP + How long to sleep in seconds between checking metrics + after a scaling operation + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.rst.in b/packages/google-cloud-bigtable/samples/metricscaler/README.rst.in new file mode 100644 index 000000000000..44a548e4c1fb --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/README.rst.in @@ -0,0 +1,29 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs/ + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +description: | + This sample demonstrates how to use `Stackdriver Monitoring`_ + to scale Cloud Bigtable based on CPU usage. + + .. _Stackdriver Monitoring: http://cloud.google.com/monitoring/docs/ + +setup: +- auth +- install_deps + +samples: +- name: Metricscaling example + file: metricscaler.py + show_help: true + +cloud_client_library: true + +folder: bigtable/metricscaler \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py new file mode 100644 index 000000000000..3bfacd4ea81a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -0,0 +1,209 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sample that demonstrates how to use Stackdriver Monitoring metrics to +programmatically scale a Google Cloud Bigtable cluster.""" + +import argparse +import os +import time + +from google.cloud import bigtable +from google.cloud import monitoring_v3 +from google.cloud.bigtable import enums +from google.cloud.monitoring_v3 import query + +PROJECT = os.environ['GCLOUD_PROJECT'] + + +def get_cpu_load(): + """Returns the most recent Cloud Bigtable CPU load measurement. + + Returns: + float: The most recent Cloud Bigtable CPU usage metric + """ + # [START bigtable_cpu] + client = monitoring_v3.MetricServiceClient() + cpu_query = query.Query(client, + project=PROJECT, + metric_type='bigtable.googleapis.com/' + 'cluster/cpu_load', + minutes=5) + cpu = next(cpu_query.iter()) + return cpu.points[0].value.double_value + # [END bigtable_cpu] + + +def get_storage_utilization(): + """Returns the most recent Cloud Bigtable storage utilization measurement. + + Returns: + float: The most recent Cloud Bigtable storage utilization metric + """ + # [START bigtable_metric_scaler_storage_utilization] + client = monitoring_v3.MetricServiceClient() + utilization_query = query.Query(client, + project=PROJECT, + metric_type='bigtable.googleapis.com/' + 'cluster/storage_utilization', + minutes=5) + utilization = next(utilization_query.iter()) + return utilization.points[0].value.double_value + # [END bigtable_metric_scaler_storage_utilization] + + +def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): + """Scales the number of Cloud Bigtable nodes up or down. + + Edits the number of nodes in the Cloud Bigtable cluster to be increased + or decreased, depending on the `scale_up` boolean argument. Currently + the `incremental` strategy from `strategies.py` is used. + + + Args: + bigtable_instance (str): Cloud Bigtable instance ID to scale + bigtable_cluster (str): Cloud Bigtable cluster ID to scale + scale_up (bool): If true, scale up, otherwise scale down + """ + + # The minimum number of nodes to use. The default minimum is 3. If you have + # a lot of data, the rule of thumb is to not go below 2.5 TB per node for + # SSD lusters, and 8 TB for HDD. The + # "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring + # out the minimum number of nodes. + min_node_count = 3 + + # The maximum number of nodes to use. The default maximum is 30 nodes per + # zone. If you need more quota, you can request more by following the + # instructions at https://cloud.google.com/bigtable/quota. + max_node_count = 30 + + # The number of nodes to change the cluster by. + size_change_step = 3 + + # [START bigtable_scale] + bigtable_client = bigtable.Client(admin=True) + instance = bigtable_client.instance(bigtable_instance) + instance.reload() + + if instance.type_ == enums.Instance.Type.DEVELOPMENT: + raise ValueError("Development instances cannot be scaled.") + + cluster = instance.cluster(bigtable_cluster) + cluster.reload() + + current_node_count = cluster.serve_nodes + + if scale_up: + if current_node_count < max_node_count: + new_node_count = min( + current_node_count + size_change_step, max_node_count) + cluster.serve_nodes = new_node_count + cluster.update() + print('Scaled up from {} to {} nodes.'.format( + current_node_count, new_node_count)) + else: + if current_node_count > min_node_count: + new_node_count = max( + current_node_count - size_change_step, min_node_count) + cluster.serve_nodes = new_node_count + cluster.update() + print('Scaled down from {} to {} nodes.'.format( + current_node_count, new_node_count)) + # [END bigtable_scale] + + +def main( + bigtable_instance, + bigtable_cluster, + high_cpu_threshold, + low_cpu_threshold, + high_storage_threshold, + short_sleep, + long_sleep +): + """Main loop runner that autoscales Cloud Bigtable. + + Args: + bigtable_instance (str): Cloud Bigtable instance ID to autoscale + high_cpu_threshold (float): If CPU is higher than this, scale up. + low_cpu_threshold (float): If CPU is lower than this, scale down. + high_storage_threshold (float): If storage is higher than this, + scale up. + short_sleep (int): How long to sleep after no operation + long_sleep (int): How long to sleep after the number of nodes is + changed + """ + cluster_cpu = get_cpu_load() + cluster_storage = get_storage_utilization() + print('Detected cpu of {}'.format(cluster_cpu)) + print('Detected storage utilization of {}'.format(cluster_storage)) + try: + if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold: + scale_bigtable(bigtable_instance, bigtable_cluster, True) + time.sleep(long_sleep) + elif cluster_cpu < low_cpu_threshold: + if cluster_storage < high_storage_threshold: + scale_bigtable(bigtable_instance, bigtable_cluster, False) + time.sleep(long_sleep) + else: + print('CPU within threshold, sleeping.') + time.sleep(short_sleep) + except Exception as e: + print("Error during scaling: %s", e) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Scales Cloud Bigtable clusters based on CPU usage.') + parser.add_argument( + 'bigtable_instance', + help='ID of the Cloud Bigtable instance to connect to.') + parser.add_argument( + 'bigtable_cluster', + help='ID of the Cloud Bigtable cluster to connect to.') + parser.add_argument( + '--high_cpu_threshold', + help='If Cloud Bigtable CPU usage is above this threshold, scale up', + default=0.6) + parser.add_argument( + '--low_cpu_threshold', + help='If Cloud Bigtable CPU usage is below this threshold, scale down', + default=0.2) + parser.add_argument( + '--high_storage_threshold', + help='If Cloud Bigtable storage utilization is above this threshold, ' + 'scale up', + default=0.6) + parser.add_argument( + '--short_sleep', + help='How long to sleep in seconds between checking metrics after no ' + 'scale operation', + default=60) + parser.add_argument( + '--long_sleep', + help='How long to sleep in seconds between checking metrics after a ' + 'scaling operation', + default=60 * 10) + args = parser.parse_args() + + while True: + main( + args.bigtable_instance, + args.bigtable_cluster, + float(args.high_cpu_threshold), + float(args.low_cpu_threshold), + float(args.high_storage_threshold), + int(args.short_sleep), + int(args.long_sleep)) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py new file mode 100644 index 000000000000..6cd70cbffabc --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -0,0 +1,198 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unit and system tests for metricscaler.py""" + +import os +import time +import uuid + +from google.cloud import bigtable +from google.cloud.bigtable import enums +from mock import patch +import pytest + +from metricscaler import get_cpu_load +from metricscaler import get_storage_utilization +from metricscaler import main +from metricscaler import scale_bigtable + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_ZONE = os.environ['BIGTABLE_ZONE'] +SIZE_CHANGE_STEP = 3 +INSTANCE_ID_FORMAT = 'metric-scale-test-{}' +BIGTABLE_INSTANCE = INSTANCE_ID_FORMAT.format(str(uuid.uuid4())[:10]) +BIGTABLE_DEV_INSTANCE = INSTANCE_ID_FORMAT.format(str(uuid.uuid4())[:10]) + + +# System tests to verify API calls succeed + + +def test_get_cpu_load(): + assert float(get_cpu_load()) > 0.0 + + +def test_get_storage_utilization(): + assert float(get_storage_utilization()) > 0.0 + + +@pytest.fixture() +def instance(): + cluster_id = BIGTABLE_INSTANCE + + client = bigtable.Client(project=PROJECT, admin=True) + + serve_nodes = 3 + storage_type = enums.StorageType.SSD + production = enums.Instance.Type.PRODUCTION + labels = {'prod-label': 'prod-label'} + instance = client.instance(BIGTABLE_INSTANCE, instance_type=production, + labels=labels) + + if not instance.exists(): + cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE, + serve_nodes=serve_nodes, + default_storage_type=storage_type) + instance.create(clusters=[cluster]) + + yield + + instance.delete() + + +@pytest.fixture() +def dev_instance(): + cluster_id = BIGTABLE_DEV_INSTANCE + + client = bigtable.Client(project=PROJECT, admin=True) + + storage_type = enums.StorageType.SSD + development = enums.Instance.Type.DEVELOPMENT + labels = {'dev-label': 'dev-label'} + instance = client.instance(BIGTABLE_DEV_INSTANCE, + instance_type=development, + labels=labels) + + if not instance.exists(): + cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE, + default_storage_type=storage_type) + instance.create(clusters=[cluster]) + + yield + + instance.delete() + + +def test_scale_bigtable(instance): + bigtable_client = bigtable.Client(admin=True) + + instance = bigtable_client.instance(BIGTABLE_INSTANCE) + instance.reload() + + cluster = instance.cluster(BIGTABLE_INSTANCE) + cluster.reload() + original_node_count = cluster.serve_nodes + + scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) + + for n in range(10): + time.sleep(10) + cluster.reload() + new_node_count = cluster.serve_nodes + try: + assert (new_node_count == (original_node_count + SIZE_CHANGE_STEP)) + except AssertionError: + if n == 9: + raise + + scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False) + + for n in range(10): + time.sleep(10) + cluster.reload() + final_node_count = cluster.serve_nodes + try: + assert final_node_count == original_node_count + except AssertionError: + if n == 9: + raise + + +def test_handle_dev_instance(capsys, dev_instance): + with pytest.raises(ValueError): + scale_bigtable(BIGTABLE_DEV_INSTANCE, BIGTABLE_DEV_INSTANCE, True) + + +@patch('time.sleep') +@patch('metricscaler.get_storage_utilization') +@patch('metricscaler.get_cpu_load') +@patch('metricscaler.scale_bigtable') +def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep): + SHORT_SLEEP = 5 + LONG_SLEEP = 10 + + # Test okay CPU, okay storage utilization + get_cpu_load.return_value = 0.5 + get_storage_utilization.return_value = 0.5 + + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_not_called() + scale_bigtable.reset_mock() + + # Test high CPU, okay storage utilization + get_cpu_load.return_value = 0.7 + get_storage_utilization.return_value = 0.5 + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, + BIGTABLE_INSTANCE, True) + scale_bigtable.reset_mock() + + # Test low CPU, okay storage utilization + get_storage_utilization.return_value = 0.5 + get_cpu_load.return_value = 0.2 + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, + BIGTABLE_INSTANCE, False) + scale_bigtable.reset_mock() + + # Test okay CPU, high storage utilization + get_cpu_load.return_value = 0.5 + get_storage_utilization.return_value = 0.7 + + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, + BIGTABLE_INSTANCE, True) + scale_bigtable.reset_mock() + + # Test high CPU, high storage utilization + get_cpu_load.return_value = 0.7 + get_storage_utilization.return_value = 0.7 + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, + BIGTABLE_INSTANCE, True) + scale_bigtable.reset_mock() + + # Test low CPU, high storage utilization + get_cpu_load.return_value = 0.2 + get_storage_utilization.return_value = 0.7 + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, + LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, + BIGTABLE_INSTANCE, True) + scale_bigtable.reset_mock() diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt new file mode 100644 index 000000000000..41c4d5110536 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -0,0 +1,2 @@ +pytest==5.3.2 +mock==3.0.5 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt new file mode 100644 index 000000000000..4ab4f4eba966 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-bigtable==1.2.1 +google-cloud-monitoring==0.36.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.rst b/packages/google-cloud-bigtable/samples/quickstart/README.rst new file mode 100644 index 000000000000..c3ff17a3959d --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/README.rst @@ -0,0 +1,126 @@ + +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Bigtable Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart/README.rst + + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs + + +Setup +------------------------------------------------------------------------------- + + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + + + + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 3.6+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + + + + + + +Samples +------------------------------------------------------------------------------- + + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart/main.py,bigtable/quickstart/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python main.py + + + usage: main.py [-h] [--table TABLE] project_id instance_id + + positional arguments: + project_id Your Cloud Platform project ID. + instance_id ID of the Cloud Bigtable instance to connect to. + + optional arguments: + -h, --help show this help message and exit + --table TABLE Existing table used in the quickstart. (default: my-table) + + + + + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.rst.in b/packages/google-cloud-bigtable/samples/quickstart/README.rst.in new file mode 100644 index 000000000000..94f070a7c887 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/README.rst.in @@ -0,0 +1,23 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: main.py + show_help: true + +cloud_client_library: true + +folder: bigtable/quickstart \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/quickstart/main.py b/packages/google-cloud-bigtable/samples/quickstart/main.py new file mode 100644 index 000000000000..3763296f1e4c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/main.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START bigtable_quickstart] +import argparse + +from google.cloud import bigtable + + +def main(project_id="project-id", instance_id="instance-id", + table_id="my-table"): + # Create a Cloud Bigtable client. + client = bigtable.Client(project=project_id) + + # Connect to an existing Cloud Bigtable instance. + instance = client.instance(instance_id) + + # Open an existing table. + table = instance.table(table_id) + + row_key = 'r1' + row = table.read_row(row_key.encode('utf-8')) + + column_family_id = 'cf1' + column_id = 'c1'.encode('utf-8') + value = row.cells[column_family_id][column_id][0].value.decode('utf-8') + + print('Row key: {}\nData: {}'.format(row_key, value)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('project_id', help='Your Cloud Platform project ID.') + parser.add_argument( + 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + parser.add_argument( + '--table', + help='Existing table used in the quickstart.', + default='my-table') + + args = parser.parse_args() + main(args.project_id, args.instance_id, args.table) +# [END bigtable_quickstart] diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_test.py new file mode 100644 index 000000000000..a61e5dbe8795 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/main_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random + +from google.cloud import bigtable +import pytest + +from main import main + + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_ID_FORMAT = 'quickstart-test-{}' +TABLE_ID_RANGE = 10000 + + +@pytest.fixture() +def table(): + table_id = TABLE_ID_FORMAT.format( + random.randrange(TABLE_ID_RANGE)) + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + column_family_id = 'cf1' + column_families = {column_family_id: None} + table.create(column_families=column_families) + + row = table.direct_row("r1") + row.set_cell(column_family_id, "c1", "test-value") + row.commit() + + yield table_id + + table.delete() + + +def test_main(capsys, table): + table_id = table + main(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + assert 'Row key: r1\nData: test-value\n' in out diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt new file mode 100644 index 000000000000..2771c2e4c4d0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -0,0 +1 @@ +google-cloud-bigtable==1.2.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst new file mode 100644 index 000000000000..e2d1c45a2729 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst @@ -0,0 +1,108 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Bigtable Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart_happybase/README.rst + + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart_happybase/main.py,bigtable/quickstart_happybase/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python main.py + + usage: main.py [-h] [--table TABLE] project_id instance_id + + positional arguments: + project_id Your Cloud Platform project ID. + instance_id ID of the Cloud Bigtable instance to connect to. + + optional arguments: + -h, --help show this help message and exit + --table TABLE Existing table used in the quickstart. (default: my-table) + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in new file mode 100644 index 000000000000..811a0b868fb3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in @@ -0,0 +1,23 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: main.py + show_help: true + +cloud_client_library: true + +folder: bigtable/quickstart_happybase \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/main.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/main.py new file mode 100644 index 000000000000..056e3666bb5b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/main.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# [START bigtable_quickstart_happybase] +import argparse + +from google.cloud import bigtable +from google.cloud import happybase + + +def main(project_id="project-id", instance_id="instance-id", + table_id="my-table"): + # Creates a Bigtable client + client = bigtable.Client(project=project_id) + + # Connect to an existing instance:my-bigtable-instance + instance = client.instance(instance_id) + + connection = happybase.Connection(instance=instance) + + try: + # Connect to an existing table:my-table + table = connection.table(table_id) + + key = 'r1' + row = table.row(key.encode('utf-8')) + + column = 'cf1:c1'.encode('utf-8') + value = row[column].decode('utf-8') + print('Row key: {}\nData: {}'.format(key, value)) + + finally: + connection.close() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument('project_id', help='Your Cloud Platform project ID.') + parser.add_argument( + 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + parser.add_argument( + '--table', + help='Existing table used in the quickstart.', + default='my-table') + + args = parser.parse_args() + main(args.project_id, args.instance_id, args.table) +# [END bigtable_quickstart_happybase] diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py new file mode 100644 index 000000000000..771026157f65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py @@ -0,0 +1,55 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random + +from google.cloud import bigtable +import pytest + +from main import main + + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_ID_FORMAT = 'quickstart-hb-test-{}' +TABLE_ID_RANGE = 10000 + + +@pytest.fixture() +def table(): + table_id = TABLE_ID_FORMAT.format( + random.randrange(TABLE_ID_RANGE)) + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + column_family_id = 'cf1' + column_families = {column_family_id: None} + table.create(column_families=column_families) + + row = table.direct_row("r1") + row.set_cell(column_family_id, "c1", "test-value") + row.commit() + + yield table_id + + table.delete() + + +def test_main(capsys, table): + table_id = table + main(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + assert 'Row key: r1\nData: test-value\n' in out diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt new file mode 100644 index 000000000000..a144f03e1bc5 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt @@ -0,0 +1 @@ +google-cloud-happybase==0.33.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py new file mode 100644 index 000000000000..73ade365cff4 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py @@ -0,0 +1,360 @@ +#!/usr/bin/env python + +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START bigtable_filters_limit_timestamp_range] +import datetime + +# [END bigtable_filters_limit_timestamp_range] + +# [START bigtable_filters_limit_row_sample] +# [START bigtable_filters_limit_row_regex] +# [START bigtable_filters_limit_cells_per_col] +# [START bigtable_filters_limit_cells_per_row] +# [START bigtable_filters_limit_cells_per_row_offset] +# [START bigtable_filters_limit_col_family_regex] +# [START bigtable_filters_limit_col_qualifier_regex] +# [START bigtable_filters_limit_col_range] +# [START bigtable_filters_limit_value_range] +# [START bigtable_filters_limit_value_regex] +# [START bigtable_filters_limit_timestamp_range] +# [START bigtable_filters_limit_block_all] +# [START bigtable_filters_limit_pass_all] +# [START bigtable_filters_modify_strip_value] +# [START bigtable_filters_modify_apply_label] +# [START bigtable_filters_composing_chain] +# [START bigtable_filters_composing_interleave] +# [START bigtable_filters_composing_condition] +from google.cloud import bigtable +import google.cloud.bigtable.row_filters as row_filters + +# [END bigtable_filters_limit_row_sample] +# [END bigtable_filters_limit_row_regex] +# [END bigtable_filters_limit_cells_per_col] +# [END bigtable_filters_limit_cells_per_row] +# [END bigtable_filters_limit_cells_per_row_offset] +# [END bigtable_filters_limit_col_family_regex] +# [END bigtable_filters_limit_col_qualifier_regex] +# [END bigtable_filters_limit_col_range] +# [END bigtable_filters_limit_value_range] +# [END bigtable_filters_limit_value_regex] +# [END bigtable_filters_limit_timestamp_range] +# [END bigtable_filters_limit_block_all] +# [END bigtable_filters_limit_pass_all] +# [END bigtable_filters_modify_strip_value] +# [END bigtable_filters_modify_apply_label] +# [END bigtable_filters_composing_chain] +# [END bigtable_filters_composing_interleave] +# [END bigtable_filters_composing_condition] + + +# [START bigtable_filters_limit_row_sample] +def filter_limit_row_sample(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.RowSampleFilter(.75)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_row_sample] +# [START bigtable_filters_limit_row_regex] +def filter_limit_row_regex(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8"))) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_row_regex] +# [START bigtable_filters_limit_cells_per_col] +def filter_limit_cells_per_col(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.CellsColumnLimitFilter(2)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_cells_per_col] +# [START bigtable_filters_limit_cells_per_row] +def filter_limit_cells_per_row(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.CellsRowLimitFilter(2)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row] +# [START bigtable_filters_limit_cells_per_row_offset] +def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.CellsRowOffsetFilter(2)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row_offset] +# [START bigtable_filters_limit_col_family_regex] +def filter_limit_col_family_regex(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8"))) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_col_family_regex] +# [START bigtable_filters_limit_col_qualifier_regex] +def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.ColumnQualifierRegexFilter( + "connected_.*$".encode("utf-8"))) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_col_qualifier_regex] +# [START bigtable_filters_limit_col_range] +def filter_limit_col_range(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.ColumnRangeFilter("cell_plan", + b"data_plan_01gb", + b"data_plan_10gb", + inclusive_end=False)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_col_range] +# [START bigtable_filters_limit_value_range] +def filter_limit_value_range(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406")) + + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_value_range] +# [START bigtable_filters_limit_value_regex] + + +def filter_limit_value_regex(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8"))) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_value_regex] +# [START bigtable_filters_limit_timestamp_range] +def filter_limit_timestamp_range(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + end = datetime.datetime(2019, 5, 1) + + rows = table.read_rows( + filter_=row_filters.TimestampRangeFilter( + row_filters.TimestampRange(end=end))) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_timestamp_range] +# [START bigtable_filters_limit_block_all] +def filter_limit_block_all(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.BlockAllFilter(True)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_block_all] +# [START bigtable_filters_limit_pass_all] +def filter_limit_pass_all(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.PassAllFilter(True)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_limit_pass_all] +# [START bigtable_filters_modify_strip_value] +def filter_modify_strip_value(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.StripValueTransformerFilter(True)) + for row in rows: + print_row(row) + + +# [END bigtable_filters_modify_strip_value] +# [START bigtable_filters_modify_apply_label] +def filter_modify_apply_label(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows( + filter_=row_filters.ApplyLabelFilter(label="labelled")) + for row in rows: + print_row(row) + + +# [END bigtable_filters_modify_apply_label] +# [START bigtable_filters_composing_chain] +def filter_composing_chain(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.RowFilterChain( + filters=[row_filters.CellsColumnLimitFilter(1), + row_filters.FamilyNameRegexFilter("cell_plan")])) + for row in rows: + print_row(row) + + +# [END bigtable_filters_composing_chain] +# [START bigtable_filters_composing_interleave] +def filter_composing_interleave(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.RowFilterUnion( + filters=[row_filters.ValueRegexFilter("true"), + row_filters.ColumnQualifierRegexFilter("os_build")])) + for row in rows: + print_row(row) + + +# [END bigtable_filters_composing_interleave] +# [START bigtable_filters_composing_condition] +def filter_composing_condition(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.ConditionalRowFilter( + base_filter=row_filters.RowFilterChain(filters=[ + row_filters.ColumnQualifierRegexFilter( + "data_plan_10gb"), + row_filters.ValueRegexFilter( + "true")]), + true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), + false_filter=row_filters.ApplyLabelFilter(label="filtered-out") + + )) + for row in rows: + print_row(row) + + +# [END bigtable_filters_composing_condition] + + +# [START bigtable_filters_limit_row_sample] +# [START bigtable_filters_limit_row_regex] +# [START bigtable_filters_limit_cells_per_col] +# [START bigtable_filters_limit_cells_per_row] +# [START bigtable_filters_limit_cells_per_row_offset] +# [START bigtable_filters_limit_col_family_regex] +# [START bigtable_filters_limit_col_qualifier_regex] +# [START bigtable_filters_limit_col_range] +# [START bigtable_filters_limit_value_range] +# [START bigtable_filters_limit_value_regex] +# [START bigtable_filters_limit_timestamp_range] +# [START bigtable_filters_limit_block_all] +# [START bigtable_filters_limit_pass_all] +# [START bigtable_filters_modify_strip_value] +# [START bigtable_filters_modify_apply_label] +# [START bigtable_filters_composing_chain] +# [START bigtable_filters_composing_interleave] +# [START bigtable_filters_composing_condition] +def print_row(row): + print("Reading data for {}:".format(row.row_key.decode('utf-8'))) + for cf, cols in sorted(row.cells.items()): + print("Column Family {}".format(cf)) + for col, cells in sorted(cols.items()): + for cell in cells: + labels = " [{}]".format(",".join(cell.labels)) \ + if len(cell.labels) else "" + print( + "\t{}: {} @{}{}".format(col.decode('utf-8'), + cell.value.decode('utf-8'), + cell.timestamp, labels)) + print("") +# [END bigtable_filters_limit_row_sample] +# [END bigtable_filters_limit_row_regex] +# [END bigtable_filters_limit_cells_per_col] +# [END bigtable_filters_limit_cells_per_row] +# [END bigtable_filters_limit_cells_per_row_offset] +# [END bigtable_filters_limit_col_family_regex] +# [END bigtable_filters_limit_col_qualifier_regex] +# [END bigtable_filters_limit_col_range] +# [END bigtable_filters_limit_value_range] +# [END bigtable_filters_limit_value_regex] +# [END bigtable_filters_limit_timestamp_range] +# [END bigtable_filters_limit_block_all] +# [END bigtable_filters_limit_pass_all] +# [END bigtable_filters_modify_strip_value] +# [END bigtable_filters_modify_apply_label] +# [END bigtable_filters_composing_chain] +# [END bigtable_filters_composing_interleave] +# [END bigtable_filters_composing_condition] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py new file mode 100644 index 000000000000..0d4b265f60c3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py @@ -0,0 +1,226 @@ +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import os +import uuid + +from google.cloud import bigtable +import pytest + +import filter_snippets + + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_ID_PREFIX = 'mobile-time-series-{}' + + +@pytest.fixture(scope="module", autouse=True) +def table_id(): + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={'stats_summary': None, 'cell_plan': None}) + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta( + hours=1) + + rows = [ + table.direct_row("phone#4c410523#20190501"), + table.direct_row("phone#4c410523#20190502"), + table.direct_row("phone#4c410523#20190505"), + table.direct_row("phone#5c10102#20190501"), + table.direct_row("phone#5c10102#20190502"), + ] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) + rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) + rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + + table.mutate_rows(rows) + + yield table_id + + table.delete() + + +def test_filter_limit_row_sample(capsys, snapshot, table_id): + filter_snippets.filter_limit_row_sample(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + assert 'Reading data for' in out + + +def test_filter_limit_row_regex(capsys, snapshot, table_id): + filter_snippets.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_cells_per_col(capsys, snapshot, table_id): + filter_snippets.filter_limit_cells_per_col(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_cells_per_row(capsys, snapshot, table_id): + filter_snippets.filter_limit_cells_per_row(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_cells_per_row_offset(capsys, snapshot, table_id): + filter_snippets.filter_limit_cells_per_row_offset(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_col_family_regex(capsys, snapshot, table_id): + filter_snippets.filter_limit_col_family_regex(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_col_qualifier_regex(capsys, snapshot, table_id): + filter_snippets.filter_limit_col_qualifier_regex(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_col_range(capsys, snapshot, table_id): + filter_snippets.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_value_range(capsys, snapshot, table_id): + filter_snippets.filter_limit_value_range(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_value_regex(capsys, snapshot, table_id): + filter_snippets.filter_limit_value_regex(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_timestamp_range(capsys, snapshot, table_id): + filter_snippets.filter_limit_timestamp_range(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_block_all(capsys, snapshot, table_id): + filter_snippets.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_limit_pass_all(capsys, snapshot, table_id): + filter_snippets.filter_limit_pass_all(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_modify_strip_value(capsys, snapshot, table_id): + filter_snippets.filter_modify_strip_value(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_modify_apply_label(capsys, snapshot, table_id): + filter_snippets.filter_modify_apply_label(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_composing_chain(capsys, snapshot, table_id): + filter_snippets.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_composing_interleave(capsys, snapshot, table_id): + filter_snippets.filter_composing_interleave(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_filter_composing_condition(capsys, snapshot, table_id): + filter_snippets.filter_composing_condition(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt new file mode 100755 index 000000000000..a64e924f1be3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-bigtable==1.2.1 +snapshottest==0.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/__init__.py b/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py new file mode 100644 index 000000000000..a0580f565990 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py @@ -0,0 +1,480 @@ +# -*- coding: utf-8 -*- +# snapshottest: v1 - https://goo.gl/zC4yUc +# flake8: noqa +from __future__ import unicode_literals + +from snapshottest import Snapshot + +snapshots = Snapshot() + +snapshots['test_filter_limit_row_regex 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_cells_per_col 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_cells_per_row 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_cells_per_row_offset 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family stats_summary +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family stats_summary +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_col_family_regex 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_col_qualifier_regex 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_col_range 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_value_range 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_value_regex 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family stats_summary +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family stats_summary +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_limit_timestamp_range 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 + +''' + +snapshots['test_filter_limit_block_all 1'] = '' + +snapshots['test_filter_limit_pass_all 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_modify_strip_value 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: @2019-05-01 00:00:00+00:00 +\tdata_plan_01gb: @2019-04-30 23:00:00+00:00 +\tdata_plan_05gb: @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: @2019-05-01 00:00:00+00:00 +\tconnected_wifi: @2019-05-01 00:00:00+00:00 +\tos_build: @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: @2019-05-01 00:00:00+00:00 +\tconnected_wifi: @2019-05-01 00:00:00+00:00 +\tos_build: @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: @2019-05-01 00:00:00+00:00 +\tconnected_wifi: @2019-05-01 00:00:00+00:00 +\tos_build: @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: @2019-05-01 00:00:00+00:00 +\tconnected_wifi: @2019-05-01 00:00:00+00:00 +\tos_build: @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tconnected_cell: @2019-05-01 00:00:00+00:00 +\tconnected_wifi: @2019-05-01 00:00:00+00:00 +\tos_build: @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_modify_apply_label 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 [labelled] +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 [labelled] +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 [labelled] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 [labelled] + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 [labelled] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 [labelled] + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 [labelled] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 [labelled] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 [labelled] + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 [labelled] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 [labelled] + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 [labelled] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [labelled] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 [labelled] +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 [labelled] + +''' + +snapshots['test_filter_composing_chain 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_composing_interleave 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 +Column Family stats_summary +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_filter_composing_condition 1'] = '''Reading data for phone#4c410523#20190501: +Column Family cell_plan +\tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 [filtered-out] +\tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 [filtered-out] +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 [filtered-out] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [filtered-out] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [filtered-out] +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 [filtered-out] + +Reading data for phone#4c410523#20190502: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 [filtered-out] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [filtered-out] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [filtered-out] +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 [filtered-out] + +Reading data for phone#4c410523#20190505: +Column Family cell_plan +\tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 [filtered-out] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 [filtered-out] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [filtered-out] +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 [filtered-out] + +Reading data for phone#5c10102#20190501: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 [passed-filter] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [passed-filter] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [passed-filter] +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 [passed-filter] + +Reading data for phone#5c10102#20190502: +Column Family cell_plan +\tdata_plan_10gb: true @2019-05-01 00:00:00+00:00 [passed-filter] +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 [passed-filter] +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 [passed-filter] +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 [passed-filter] + +''' diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py new file mode 100644 index 000000000000..aceef7cd14e6 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python + +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START bigtable_reads_row] +# [START bigtable_reads_row_partial] +# [START bigtable_reads_rows] +# [START bigtable_reads_row_range] +# [START bigtable_reads_row_ranges] +# [START bigtable_reads_prefix] +# [START bigtable_reads_filter] +from google.cloud import bigtable + +# [END bigtable_reads_row] +# [END bigtable_reads_row_partial] +# [END bigtable_reads_rows] +# [END bigtable_reads_row_range] +# [END bigtable_reads_row_ranges] +# [END bigtable_reads_prefix] +# [END bigtable_reads_filter] + +# [START bigtable_reads_row_partial] +# [START bigtable_reads_filter] +import google.cloud.bigtable.row_filters as row_filters +# [END bigtable_reads_row_partial] +# [END bigtable_reads_filter] + + +# [START bigtable_reads_rows] +# [START bigtable_reads_row_range] +# [START bigtable_reads_row_ranges] +# [START bigtable_reads_prefix] +from google.cloud.bigtable.row_set import RowSet + + +# [END bigtable_reads_rows] +# [END bigtable_reads_row_range] +# [END bigtable_reads_row_ranges] +# [END bigtable_reads_prefix] + + +# [START bigtable_reads_row] +def read_row(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + row_key = "phone#4c410523#20190501" + + row = table.read_row(row_key) + print_row(row) + + +# [END bigtable_reads_row] + +# [START bigtable_reads_row_partial] +def read_row_partial(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + row_key = "phone#4c410523#20190501" + col_filter = row_filters.ColumnQualifierRegexFilter(b'os_build') + + row = table.read_row(row_key, filter_=col_filter) + print_row(row) + + +# [END bigtable_reads_row_partial] +# [START bigtable_reads_rows] +def read_rows(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + row_set = RowSet() + row_set.add_row_key(b"phone#4c410523#20190501") + row_set.add_row_key(b"phone#4c410523#20190502") + + rows = table.read_rows(row_set=row_set) + for row in rows: + print_row(row) + + +# [END bigtable_reads_rows] +# [START bigtable_reads_row_range] +def read_row_range(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + row_set = RowSet() + row_set.add_row_range_from_keys( + start_key=b"phone#4c410523#20190501", + end_key=b"phone#4c410523#201906201") + + rows = table.read_rows(row_set=row_set) + for row in rows: + print_row(row) + + +# [END bigtable_reads_row_range] +# [START bigtable_reads_row_ranges] +def read_row_ranges(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + row_set = RowSet() + row_set.add_row_range_from_keys( + start_key=b"phone#4c410523#20190501", + end_key=b"phone#4c410523#201906201") + row_set.add_row_range_from_keys( + start_key=b"phone#5c10102#20190501", + end_key=b"phone#5c10102#201906201") + + rows = table.read_rows(row_set=row_set) + for row in rows: + print_row(row) + + +# [END bigtable_reads_row_ranges] +# [START bigtable_reads_prefix] +def read_prefix(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + prefix = "phone#" + end_key = prefix[:-1] + chr(ord(prefix[-1]) + 1) + + row_set = RowSet() + row_set.add_row_range_from_keys(prefix.encode("utf-8"), + end_key.encode("utf-8")) + + rows = table.read_rows(row_set=row_set) + for row in rows: + print_row(row) + + +# [END bigtable_reads_prefix] +# [START bigtable_reads_filter] +def read_filter(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + rows = table.read_rows(filter_=row_filters.ValueRegexFilter(b"PQ2A.*$")) + for row in rows: + print_row(row) + + +# [END bigtable_reads_filter] + + +# [START bigtable_reads_row] +# [START bigtable_reads_row_partial] +# [START bigtable_reads_rows] +# [START bigtable_reads_row_range] +# [START bigtable_reads_row_ranges] +# [START bigtable_reads_prefix] +# [START bigtable_reads_filter] +def print_row(row): + print("Reading data for {}:".format(row.row_key.decode('utf-8'))) + for cf, cols in sorted(row.cells.items()): + print("Column Family {}".format(cf)) + for col, cells in sorted(cols.items()): + for cell in cells: + labels = " [{}]".format(",".join(cell.labels)) \ + if len(cell.labels) else "" + print( + "\t{}: {} @{}{}".format(col.decode('utf-8'), + cell.value.decode('utf-8'), + cell.timestamp, labels)) + print("") +# [END bigtable_reads_row] +# [END bigtable_reads_row_partial] +# [END bigtable_reads_rows] +# [END bigtable_reads_row_range] +# [END bigtable_reads_row_ranges] +# [END bigtable_reads_prefix] +# [END bigtable_reads_filter] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py new file mode 100644 index 000000000000..63fb3f2f3bad --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py @@ -0,0 +1,121 @@ +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os +import uuid + +from google.cloud import bigtable +import pytest + +import read_snippets + + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_ID_PREFIX = 'mobile-time-series-{}' + + +@pytest.fixture(scope="module", autouse=True) +def table_id(): + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={'stats_summary': None}) + + # table = instance.table(table_id) + + timestamp = datetime.datetime(2019, 5, 1) + rows = [ + table.direct_row("phone#4c410523#20190501"), + table.direct_row("phone#4c410523#20190502"), + table.direct_row("phone#4c410523#20190505"), + table.direct_row("phone#5c10102#20190501"), + table.direct_row("phone#5c10102#20190502"), + ] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + + table.mutate_rows(rows) + + yield table_id + + table.delete() + + +def test_read_row(capsys, snapshot, table_id): + read_snippets.read_row(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_read_row_partial(capsys, snapshot, table_id): + read_snippets.read_row_partial(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_read_rows(capsys, snapshot, table_id): + read_snippets.read_rows(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_read_row_range(capsys, snapshot, table_id): + read_snippets.read_row_range(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_read_row_ranges(capsys, snapshot, table_id): + read_snippets.read_row_ranges(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_read_prefix(capsys, snapshot, table_id): + read_snippets.read_prefix(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_read_filter(capsys, snapshot, table_id): + read_snippets.read_filter(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + snapshot.assert_match(out) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt new file mode 100755 index 000000000000..a64e924f1be3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-bigtable==1.2.1 +snapshottest==0.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/__init__.py b/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py b/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py new file mode 100644 index 000000000000..f45e98f2e57c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py @@ -0,0 +1,142 @@ +# -*- coding: utf-8 -*- +# snapshottest: v1 - https://goo.gl/zC4yUc +from __future__ import unicode_literals + +from snapshottest import Snapshot + + +snapshots = Snapshot() + +snapshots['test_read_row_partial 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_read_rows 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_read_row_range 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_read_row_ranges 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_read_prefix 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x00 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_read_filter 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190502: +Column Family stats_summary +\tos_build: PQ2A.190405.004 @2019-05-01 00:00:00+00:00 + +Reading data for phone#4c410523#20190505: +Column Family stats_summary +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190501: +Column Family stats_summary +\tos_build: PQ2A.190401.002 @2019-05-01 00:00:00+00:00 + +Reading data for phone#5c10102#20190502: +Column Family stats_summary +\tos_build: PQ2A.190406.000 @2019-05-01 00:00:00+00:00 + +''' + +snapshots['test_read_row 1'] = '''Reading data for phone#4c410523#20190501: +Column Family stats_summary +\tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 +\tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 + +''' diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/__init__.py b/packages/google-cloud-bigtable/samples/snippets/writes/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt new file mode 100644 index 000000000000..8855f3cf1f88 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -0,0 +1,2 @@ +backoff==1.10.0 +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt new file mode 100755 index 000000000000..618a0d90714d --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -0,0 +1 @@ +google-cloud-bigtable==1.2.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py new file mode 100644 index 000000000000..ecc8f273b0a6 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +# Copyright 2019, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# [START bigtable_writes_batch] +import datetime + +from google.cloud import bigtable + + +def write_batch(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + timestamp = datetime.datetime.utcnow() + column_family_id = "stats_summary" + + rows = [table.direct_row("tablet#a0b81f74#20190501"), + table.direct_row("tablet#a0b81f74#20190502")] + + rows[0].set_cell(column_family_id, + "connected_wifi", + 1, + timestamp) + rows[0].set_cell(column_family_id, + "os_build", + "12155.0.0-rc1", + timestamp) + rows[1].set_cell(column_family_id, + "connected_wifi", + 1, + timestamp) + rows[1].set_cell(column_family_id, + "os_build", + "12145.0.0-rc6", + timestamp) + + response = table.mutate_rows(rows) + for i, status in enumerate(response): + if status.code != 0: + print("Error writing row: {}".format(status.message)) + + print('Successfully wrote 2 rows.') +# [END bigtable_writes_batch] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py new file mode 100644 index 000000000000..5f3d4d607dc8 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright 2019, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# [START bigtable_writes_conditional] +import datetime + +from google.cloud import bigtable +from google.cloud.bigtable import row_filters + + +def write_conditional(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + timestamp = datetime.datetime.utcnow() + column_family_id = "stats_summary" + + row_key = "phone#4c410523#20190501" + + row_filter = row_filters.RowFilterChain( + filters=[row_filters.FamilyNameRegexFilter(column_family_id), + row_filters.ColumnQualifierRegexFilter('os_build'), + row_filters.ValueRegexFilter("PQ2A\\..*")]) + row = table.conditional_row(row_key, filter_=row_filter) + row.set_cell(column_family_id, + "os_name", + "android", + timestamp) + row.commit() + + print('Successfully updated row\'s os_name.') +# [END bigtable_writes_conditional] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py new file mode 100644 index 000000000000..73ce52c2f6d2 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python + +# Copyright 2019, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# [START bigtable_writes_increment] +from google.cloud import bigtable + + +def write_increment(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + column_family_id = "stats_summary" + + row_key = "phone#4c410523#20190501" + row = table.append_row(row_key) + + # Decrement the connected_wifi value by 1. + row.increment_cell_value(column_family_id, "connected_wifi", -1) + row.commit() + + print('Successfully updated row {}.'.format(row_key)) +# [END bigtable_writes_increment] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py new file mode 100644 index 000000000000..b4222d234798 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python + +# Copyright 2019, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START bigtable_writes_simple] +import datetime + +from google.cloud import bigtable + + +def write_simple(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + timestamp = datetime.datetime.utcnow() + column_family_id = "stats_summary" + + row_key = "phone#4c410523#20190501" + + row = table.direct_row(row_key) + row.set_cell(column_family_id, + "connected_cell", + 1, + timestamp) + row.set_cell(column_family_id, + "connected_wifi", + 1, + timestamp) + row.set_cell(column_family_id, + "os_build", + "PQ2A.190405.003", + timestamp) + + row.commit() + + print('Successfully wrote row {}.'.format(row_key)) +# [END bigtable_writes_simple] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py new file mode 100644 index 000000000000..8420a3eebd7e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py @@ -0,0 +1,94 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import backoff +from google.api_core.exceptions import DeadlineExceeded +from google.cloud import bigtable +import pytest + +from .write_batch import write_batch +from .write_conditionally import write_conditional +from .write_increment import write_increment +from .write_simple import write_simple + + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_ID_PREFIX = 'mobile-time-series-{}' + + +@pytest.fixture +def bigtable_client(): + return bigtable.Client(project=PROJECT, admin=True) + + +@pytest.fixture +def bigtable_instance(bigtable_client): + return bigtable_client.instance(BIGTABLE_INSTANCE) + + +@pytest.fixture +def table_id(bigtable_instance): + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = bigtable_instance.table(table_id) + if table.exists(): + table.delete() + + column_family_id = 'stats_summary' + column_families = {column_family_id: None} + table.create(column_families=column_families) + + yield table_id + + table.delete() + + +def test_writes(capsys, table_id): + + # `row.commit()` sometimes ends up with DeadlineExceeded, so now + # we put retries with a hard deadline. + @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60) + def _write_simple(): + write_simple(PROJECT, BIGTABLE_INSTANCE, table_id) + + _write_simple() + out, _ = capsys.readouterr() + assert 'Successfully wrote row' in out + + @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60) + def _write_increment(): + write_increment(PROJECT, BIGTABLE_INSTANCE, table_id) + + _write_increment() + out, _ = capsys.readouterr() + assert 'Successfully updated row' in out + + @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60) + def _write_conditional(): + write_conditional(PROJECT, BIGTABLE_INSTANCE, table_id) + + _write_conditional() + out, _ = capsys.readouterr() + assert 'Successfully updated row\'s os_name' in out + + @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60) + def _write_batch(): + write_batch(PROJECT, BIGTABLE_INSTANCE, table_id) + + _write_batch() + out, _ = capsys.readouterr() + assert 'Successfully wrote 2 rows' in out diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.rst b/packages/google-cloud-bigtable/samples/tableadmin/README.rst new file mode 100644 index 000000000000..f7f83d6d2a1b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/README.rst @@ -0,0 +1,115 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Bigtable Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/README.rst + + +This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. + + + + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Basic example ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/tableadmin.py,bigtable/hello/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python tableadmin.py + + usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id + + Demonstrates how to connect to Cloud Bigtable and run some basic operations. + Prerequisites: - Create a Cloud Bigtable cluster. + https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google + Application Default Credentials. + https://developers.google.com/identity/protocols/application-default- + credentials + + positional arguments: + project_id Your Cloud Platform project ID. + instance_id ID of the Cloud Bigtable instance to connect to. + + optional arguments: + -h, --help show this help message and exit + --table TABLE Table to create and destroy. (default: Hello-Bigtable) + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.rst.in b/packages/google-cloud-bigtable/samples/tableadmin/README.rst.in new file mode 100644 index 000000000000..7fd37641969a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/README.rst.in @@ -0,0 +1,23 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Bigtable and run some basic operations. + short_name: Cloud Bigtable + url: https://cloud.google.com/bigtable/docs + description: > + `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's + the same database that powers many core Google services, including Search, + Analytics, Maps, and Gmail. + +setup: +- auth +- install_deps + +samples: +- name: Basic example with Bigtable Column family and GC rules. + file: tableadmin.py + show_help: true + +cloud_client_library: true + +folder: bigtable/tableadmin \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py new file mode 100644 index 000000000000..b23055f14a65 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -0,0 +1,225 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GCLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret['GCLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt new file mode 100644 index 000000000000..781d4326c947 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -0,0 +1 @@ +pytest==5.3.2 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt new file mode 100755 index 000000000000..2771c2e4c4d0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -0,0 +1 @@ +google-cloud-bigtable==1.2.1 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py new file mode 100644 index 000000000000..29551a7f390c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python + +# Copyright 2018, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates how to connect to Cloud Bigtable and run some basic operations. +# http://www.apache.org/licenses/LICENSE-2.0 +Prerequisites: +- Create a Cloud Bigtable cluster. + https://cloud.google.com/bigtable/docs/creating-cluster +- Set your Google Application Default Credentials. + https://developers.google.com/identity/protocols/application-default-credentials + +Operations performed: +- Create a Cloud Bigtable table. +- List tables for a Cloud Bigtable instance. +- Print metadata of the newly created table. +- Create Column Families with different GC rules. + - GC Rules like: MaxAge, MaxVersions, Union, Intersection and Nested. +- Delete a Bigtable table. +""" + +import argparse +import datetime + +from google.cloud import bigtable +from google.cloud.bigtable import column_family + + +def create_table(project_id, instance_id, table_id): + ''' Create a Bigtable table + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + + :type table_id: str + :param table_id: Table id to create table. + ''' + + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + # Check whether table exists in an instance. + # Create table if it does not exists. + print('Checking if table {} exists...'.format(table_id)) + if table.exists(): + print('Table {} already exists.'.format(table_id)) + else: + print('Creating the {} table.'.format(table_id)) + table.create() + print('Created table {}.'.format(table_id)) + + return client, instance, table + + +def run_table_operations(project_id, instance_id, table_id): + ''' Create a Bigtable table and perform basic operations on it + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + + :type table_id: str + :param table_id: Table id to create table. + ''' + + client, instance, table = create_table(project_id, instance_id, table_id) + + # [START bigtable_list_tables] + tables = instance.list_tables() + print('Listing tables in current project...') + if tables != []: + for tbl in tables: + print(tbl.table_id) + else: + print('No table exists in current project...') + # [END bigtable_list_tables] + + # [START bigtable_create_family_gc_max_age] + print('Creating column family cf1 with with MaxAge GC Rule...') + # Create a column family with GC policy : maximum age + # where age = current time minus cell timestamp + + # Define the GC rule to retain data with max age of 5 days + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + + column_family1 = table.column_family('cf1', max_age_rule) + column_family1.create() + print('Created column family cf1 with MaxAge GC Rule.') + # [END bigtable_create_family_gc_max_age] + + # [START bigtable_create_family_gc_max_versions] + print('Creating column family cf2 with max versions GC rule...') + # Create a column family with GC policy : most recent N versions + # where 1 = most recent version + + # Define the GC policy to retain only the most recent 2 versions + max_versions_rule = column_family.MaxVersionsGCRule(2) + + column_family2 = table.column_family('cf2', max_versions_rule) + column_family2.create() + print('Created column family cf2 with Max Versions GC Rule.') + # [END bigtable_create_family_gc_max_versions] + + # [START bigtable_create_family_gc_union] + print('Creating column family cf3 with union GC rule...') + # Create a column family with GC policy to drop data that matches + # at least one condition. + # Define a GC rule to drop cells older than 5 days or not the + # most recent version + union_rule = column_family.GCRuleUnion([ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2)]) + + column_family3 = table.column_family('cf3', union_rule) + column_family3.create() + print('Created column family cf3 with Union GC rule') + # [END bigtable_create_family_gc_union] + + # [START bigtable_create_family_gc_intersection] + print('Creating column family cf4 with Intersection GC rule...') + # Create a column family with GC policy to drop data that matches + # all conditions + # GC rule: Drop cells older than 5 days AND older than the most + # recent 2 versions + intersection_rule = column_family.GCRuleIntersection([ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2)]) + + column_family4 = table.column_family('cf4', intersection_rule) + column_family4.create() + print('Created column family cf4 with Intersection GC rule.') + # [END bigtable_create_family_gc_intersection] + + # [START bigtable_create_family_gc_nested] + print('Creating column family cf5 with a Nested GC rule...') + # Create a column family with nested GC policies. + # Create a nested GC rule: + # Drop cells that are either older than the 10 recent versions + # OR + # Drop cells that are older than a month AND older than the + # 2 recent versions + rule1 = column_family.MaxVersionsGCRule(10) + rule2 = column_family.GCRuleIntersection([ + column_family.MaxAgeGCRule(datetime.timedelta(days=30)), + column_family.MaxVersionsGCRule(2)]) + + nested_rule = column_family.GCRuleUnion([rule1, rule2]) + + column_family5 = table.column_family('cf5', nested_rule) + column_family5.create() + print('Created column family cf5 with a Nested GC rule.') + # [END bigtable_create_family_gc_nested] + + # [START bigtable_list_column_families] + print('Printing Column Family and GC Rule for all column families...') + column_families = table.list_column_families() + for column_family_name, gc_rule in sorted(column_families.items()): + print('Column Family:', column_family_name) + print('GC Rule:') + print(gc_rule.to_pb()) + # Sample output: + # Column Family: cf4 + # GC Rule: + # gc_rule { + # intersection { + # rules { + # max_age { + # seconds: 432000 + # } + # } + # rules { + # max_num_versions: 2 + # } + # } + # } + # [END bigtable_list_column_families] + + print('Print column family cf1 GC rule before update...') + print('Column Family: cf1') + print(column_family1.to_pb()) + + # [START bigtable_update_gc_rule] + print('Updating column family cf1 GC rule...') + # Update the column family cf1 to update the GC rule + column_family1 = table.column_family( + 'cf1', + column_family.MaxVersionsGCRule(1)) + column_family1.update() + print('Updated column family cf1 GC rule\n') + # [END bigtable_update_gc_rule] + + print('Print column family cf1 GC rule after update...') + print('Column Family: cf1') + print(column_family1.to_pb()) + + # [START bigtable_delete_family] + print('Delete a column family cf2...') + # Delete a column family + column_family2.delete() + print('Column family cf2 deleted successfully.') + # [END bigtable_delete_family] + + print('execute command "python tableadmin.py delete [project_id] \ + [instance_id] --table [tableName]" to delete the table.') + + +def delete_table(project_id, instance_id, table_id): + ''' Delete bigtable. + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + + :type table_id: str + :param table_id: Table id to create table. + ''' + + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + + # [START bigtable_delete_table] + # Delete the entire table + + print('Checking if table {} exists...'.format(table_id)) + if table.exists(): + print('Table {} exists.'.format(table_id)) + print('Deleting {} table.'.format(table_id)) + table.delete() + print('Deleted {} table.'.format(table_id)) + else: + print('Table {} does not exists.'.format(table_id)) + # [END bigtable_delete_table] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + parser.add_argument('command', + help='run or delete. \ + Operation to perform on table.') + parser.add_argument( + '--table', + help='Cloud Bigtable Table name.', + default='Hello-Bigtable') + + parser.add_argument('project_id', + help='Your Cloud Platform project ID.') + parser.add_argument( + 'instance_id', + help='ID of the Cloud Bigtable instance to connect to.') + + args = parser.parse_args() + + if args.command.lower() == 'run': + run_table_operations(args.project_id, args.instance_id, + args.table) + elif args.command.lower() == 'delete': + delete_table(args.project_id, args.instance_id, args.table) + else: + print('Command should be either run or delete.\n Use argument -h,\ + --help to show help and exit.') diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py new file mode 100755 index 000000000000..d6d3835a0bd1 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2018, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random + +from tableadmin import create_table +from tableadmin import delete_table +from tableadmin import run_table_operations + +PROJECT = os.environ['GCLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_NAME_FORMAT = 'tableadmin-test-{}' +TABLE_NAME_RANGE = 10000 + + +def test_run_table_operations(capsys): + table_name = TABLE_NAME_FORMAT.format( + random.randrange(TABLE_NAME_RANGE)) + + run_table_operations(PROJECT, BIGTABLE_INSTANCE, table_name) + out, _ = capsys.readouterr() + + assert 'Creating the ' + table_name + ' table.' in out + assert 'Listing tables in current project.' in out + assert 'Creating column family cf1 with with MaxAge GC Rule' in out + assert 'Created column family cf1 with MaxAge GC Rule.' in out + assert 'Created column family cf2 with Max Versions GC Rule.' in out + assert 'Created column family cf3 with Union GC rule' in out + assert 'Created column family cf4 with Intersection GC rule.' in out + assert 'Created column family cf5 with a Nested GC rule.' in out + assert 'Printing Column Family and GC Rule for all column families.' in out + assert 'Updating column family cf1 GC rule...' in out + assert 'Updated column family cf1 GC rule' in out + assert 'Print column family cf1 GC rule after update...' in out + assert 'Column Family: cf1' in out + assert 'max_num_versions: 1' in out + assert 'Delete a column family cf2...' in out + assert 'Column family cf2 deleted successfully.' in out + + delete_table(PROJECT, BIGTABLE_INSTANCE, table_name) + + +def test_delete_table(capsys): + table_name = TABLE_NAME_FORMAT.format( + random.randrange(TABLE_NAME_RANGE)) + create_table(PROJECT, BIGTABLE_INSTANCE, table_name) + + delete_table(PROJECT, BIGTABLE_INSTANCE, table_name) + out, _ = capsys.readouterr() + + assert 'Table ' + table_name + ' exists.' in out + assert 'Deleting ' + table_name + ' table.' in out + assert 'Deleted ' + table_name + ' table.' in out diff --git a/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh new file mode 100755 index 000000000000..ff599eb2af25 --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ROOT=$( dirname "$DIR" ) + +# Work from the project root. +cd $ROOT + +# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. +PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" + +gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + > testing/test-env.sh +gcloud secrets versions access latest \ + --secret="python-docs-samples-service-account" \ + > testing/service-account.json +gcloud secrets versions access latest \ + --secret="python-docs-samples-client-secrets" \ + > testing/client-secrets.json \ No newline at end of file diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py new file mode 100644 index 000000000000..d309d6e97518 --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generates READMEs using configuration defined in yaml.""" + +import argparse +import io +import os +import subprocess + +import jinja2 +import yaml + + +jinja_env = jinja2.Environment( + trim_blocks=True, + loader=jinja2.FileSystemLoader( + os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')))) + +README_TMPL = jinja_env.get_template('README.tmpl.rst') + + +def get_help(file): + return subprocess.check_output(['python', file, '--help']).decode() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('source') + parser.add_argument('--destination', default='README.rst') + + args = parser.parse_args() + + source = os.path.abspath(args.source) + root = os.path.dirname(source) + destination = os.path.join(root, args.destination) + + jinja_env.globals['get_help'] = get_help + + with io.open(source, 'r') as f: + config = yaml.load(f) + + # This allows get_help to execute in the right directory. + os.chdir(root) + + output = README_TMPL.render(config) + + with io.open(destination, 'w') as f: + f.write(output) + + +if __name__ == '__main__': + main() diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/templates/README.tmpl.rst b/packages/google-cloud-bigtable/scripts/readme-gen/templates/README.tmpl.rst new file mode 100644 index 000000000000..4fd239765b0a --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/readme-gen/templates/README.tmpl.rst @@ -0,0 +1,87 @@ +{# The following line is a lie. BUT! Once jinja2 is done with it, it will + become truth! #} +.. This file is automatically generated. Do not edit this file directly. + +{{product.name}} Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/README.rst + + +This directory contains samples for {{product.name}}. {{product.description}} + +{{description}} + +.. _{{product.name}}: {{product.url}} + +{% if required_api_url %} +To run the sample, you need to enable the API at: {{required_api_url}} +{% endif %} + +{% if required_role %} +To run the sample, you need to have `{{required_role}}` role. +{% endif %} + +{{other_required_steps}} + +{% if setup %} +Setup +------------------------------------------------------------------------------- + +{% for section in setup %} + +{% include section + '.tmpl.rst' %} + +{% endfor %} +{% endif %} + +{% if samples %} +Samples +------------------------------------------------------------------------------- + +{% for sample in samples %} +{{sample.name}} ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +{% if not sample.hide_cloudshell_button %} +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/{{sample.file}},{{folder}}/README.rst +{% endif %} + + +{{sample.description}} + +To run this sample: + +.. code-block:: bash + + $ python {{sample.file}} +{% if sample.show_help %} + + {{get_help(sample.file)|indent}} +{% endif %} + + +{% endfor %} +{% endif %} + +{% if cloud_client_library %} + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + +{% endif %} + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/templates/auth.tmpl.rst b/packages/google-cloud-bigtable/scripts/readme-gen/templates/auth.tmpl.rst new file mode 100644 index 000000000000..1446b94a5e3a --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/readme-gen/templates/auth.tmpl.rst @@ -0,0 +1,9 @@ +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/templates/auth_api_key.tmpl.rst b/packages/google-cloud-bigtable/scripts/readme-gen/templates/auth_api_key.tmpl.rst new file mode 100644 index 000000000000..11957ce2714a --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/readme-gen/templates/auth_api_key.tmpl.rst @@ -0,0 +1,14 @@ +Authentication +++++++++++++++ + +Authentication for this service is done via an `API Key`_. To obtain an API +Key: + +1. Open the `Cloud Platform Console`_ +2. Make sure that billing is enabled for your project. +3. From the **Credentials** page, create a new **API Key** or use an existing + one for your project. + +.. _API Key: + https://developers.google.com/api-client-library/python/guide/aaa_apikeys +.. _Cloud Console: https://console.cloud.google.com/project?_ diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst new file mode 100644 index 000000000000..a0406dba8c84 --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst @@ -0,0 +1,29 @@ +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_portaudio.tmpl.rst b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_portaudio.tmpl.rst new file mode 100644 index 000000000000..5ea33d18c00c --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_portaudio.tmpl.rst @@ -0,0 +1,35 @@ +Install PortAudio ++++++++++++++++++ + +Install `PortAudio`_. This is required by the `PyAudio`_ library to stream +audio from your computer's microphone. PyAudio depends on PortAudio for cross-platform compatibility, and is installed differently depending on the +platform. + +* For Mac OS X, you can use `Homebrew`_:: + + brew install portaudio + + **Note**: if you encounter an error when running `pip install` that indicates + it can't find `portaudio.h`, try running `pip install` with the following + flags:: + + pip install --global-option='build_ext' \ + --global-option='-I/usr/local/include' \ + --global-option='-L/usr/local/lib' \ + pyaudio + +* For Debian / Ubuntu Linux:: + + apt-get install portaudio19-dev python-all-dev + +* Windows may work without having to install PortAudio explicitly (it will get + installed with PyAudio). + +For more details, see the `PyAudio installation`_ page. + + +.. _PyAudio: https://people.csail.mit.edu/hubert/pyaudio/ +.. _PortAudio: http://www.portaudio.com/ +.. _PyAudio installation: + https://people.csail.mit.edu/hubert/pyaudio/#downloads +.. _Homebrew: http://brew.sh diff --git a/packages/google-cloud-bigtable/setup.cfg b/packages/google-cloud-bigtable/setup.cfg index 3bd555500e37..c3a2b39f6528 100644 --- a/packages/google-cloud-bigtable/setup.cfg +++ b/packages/google-cloud-bigtable/setup.cfg @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [bdist_wheel] universal = 1 diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 422430e963db..27cac675ce9a 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -1,27 +1,18 @@ { - "updateTime": "2020-01-31T18:24:32.991056Z", "sources": [ { - "generator": { - "name": "artman", - "version": "0.44.4", - "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" + "git": { + "name": ".", + "remote": "git@github.com:googleapis/python-bigtable.git", + "sha": "e12ffc55933cfd6b40bd2fc6cef899ce78c543b5" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "2717b8a1c762b26911b45ecc2e4ee01d98401b28", - "internalRef": "292555664", - "log": "2717b8a1c762b26911b45ecc2e4ee01d98401b28\nFix dataproc artman client library generation.\n\nPiperOrigin-RevId: 292555664\n\n7ac66d9be8a7d7de4f13566d8663978c9ee9dcd7\nAdd Dataproc Autoscaling API to V1.\n\nPiperOrigin-RevId: 292450564\n\n5d932b2c1be3a6ef487d094e3cf5c0673d0241dd\n- Improve documentation\n- Add a client_id field to StreamingPullRequest\n\nPiperOrigin-RevId: 292434036\n\neaff9fa8edec3e914995ce832b087039c5417ea7\nmonitoring: v3 publish annotations and client retry config\n\nPiperOrigin-RevId: 292425288\n\n70958bab8c5353870d31a23fb2c40305b050d3fe\nBigQuery Storage Read API v1 clients.\n\nPiperOrigin-RevId: 292407644\n\n7a15e7fe78ff4b6d5c9606a3264559e5bde341d1\nUpdate backend proto for Google Cloud Endpoints\n\nPiperOrigin-RevId: 292391607\n\n3ca2c014e24eb5111c8e7248b1e1eb833977c83d\nbazel: Add --flaky_test_attempts=3 argument to prevent CI failures caused by flaky tests\n\nPiperOrigin-RevId: 292382559\n\n9933347c1f677e81e19a844c2ef95bfceaf694fe\nbazel:Integrate latest protoc-java-resource-names-plugin changes (fix for PyYAML dependency in bazel rules)\n\nPiperOrigin-RevId: 292376626\n\nb835ab9d2f62c88561392aa26074c0b849fb0bd3\nasset: v1p2beta1 add client config annotations\n\n* remove unintentionally exposed RPCs\n* remove messages relevant to removed RPCs\n\nPiperOrigin-RevId: 292369593\n\n" - } - }, - { - "template": { - "name": "python_split_library", - "origin": "synthtool.gcp", - "version": "2019.10.17" + "sha": "eafa840ceec23b44a5c21670288107c661252711", + "internalRef": "313488995" } } ], @@ -32,8 +23,7 @@ "apiName": "bigtable", "apiVersion": "v2", "language": "python", - "generator": "gapic", - "config": "google/bigtable/artman_bigtable.yaml" + "generator": "bazel" } }, { @@ -42,8 +32,7 @@ "apiName": "bigtable_admin", "apiVersion": "v2", "language": "python", - "generator": "gapic", - "config": "google/bigtable/admin/artman_bigtableadmin.yaml" + "generator": "bazel" } } ] diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 22499ee05040..141d93dd3cd6 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -16,6 +16,7 @@ import synthtool as s from synthtool import gcp +from synthtool.languages import python gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() @@ -83,7 +84,13 @@ # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=99) +templated_files = common.py_library(unit_cov_level=97, cov_level=99, samples=True) s.move(templated_files, excludes=['noxfile.py']) +# ---------------------------------------------------------------------------- +# Samples templates +# ---------------------------------------------------------------------------- + +python.py_samples(skip_readmes=True) + s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/packages/google-cloud-bigtable/testing/.gitignore b/packages/google-cloud-bigtable/testing/.gitignore new file mode 100644 index 000000000000..b05fbd630881 --- /dev/null +++ b/packages/google-cloud-bigtable/testing/.gitignore @@ -0,0 +1,3 @@ +test-env.sh +service-account.json +client-secrets.json \ No newline at end of file From 645f57ce938f32127bab3e00401a1f09791a2078 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 5 Jun 2020 18:35:11 +0200 Subject: [PATCH 308/892] chore(deps): update dependency google-cloud-monitoring to v1 (#51) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 4ab4f4eba966..ab135bd21a06 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.2.1 -google-cloud-monitoring==0.36.0 +google-cloud-monitoring==1.0.0 From 9d275ba3c0e5b89b35559710b0048f1efdf0c3e7 Mon Sep 17 00:00:00 2001 From: Raphael Long Date: Mon, 8 Jun 2020 10:56:26 -0500 Subject: [PATCH 309/892] docs(bigtable): fix incorrect display_name update (#46) make consistent with the rest of the docs --- packages/google-cloud-bigtable/docs/instance-api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/instance-api.rst b/packages/google-cloud-bigtable/docs/instance-api.rst index bc338d7c7ca9..b61f78bbf66e 100644 --- a/packages/google-cloud-bigtable/docs/instance-api.rst +++ b/packages/google-cloud-bigtable/docs/instance-api.rst @@ -103,7 +103,7 @@ with :meth:`update() `: .. code:: python - client.display_name = 'New display_name' + instance.display_name = 'New display_name' instance.update() Delete an existing Instance From fb6b702164a3e363edc287da7a74864b9134901c Mon Sep 17 00:00:00 2001 From: Raphael Long Date: Tue, 9 Jun 2020 11:34:11 -0500 Subject: [PATCH 310/892] docs(bigtable): remove missing argument from instance declaration (#47) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #42 🦕 --- packages/google-cloud-bigtable/docs/instance-api.rst | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/instance-api.rst b/packages/google-cloud-bigtable/docs/instance-api.rst index b61f78bbf66e..ab884a605b73 100644 --- a/packages/google-cloud-bigtable/docs/instance-api.rst +++ b/packages/google-cloud-bigtable/docs/instance-api.rst @@ -22,12 +22,7 @@ To create an :class:`Instance ` object: .. code:: python - instance = client.instance(instance_id, location_id, - display_name=display_name) - -- ``location_id`` is the ID of the location in which the instance's cluster - will be hosted, e.g. ``'us-central1-c'``. ``location_id`` is required for - instances which do not already exist. + instance = client.instance(instance_id, display_name=display_name) - ``display_name`` is optional. When not provided, ``display_name`` defaults to the ``instance_id`` value. From 581092f14a274d42c8c3d59d29eecaa34785779f Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 15 Jun 2020 10:54:14 -0700 Subject: [PATCH 311/892] feat: update gapic-generator and go microgen, backups generated api (#55) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * chore: update gapic-generator and go microgen changes include: - build_gen: go lro gapic used as dep - go_gapic_library: fixes shading of go_library importpath PiperOrigin-RevId: 314363155 Source-Author: Google APIs Source-Date: Tue Jun 2 10:56:09 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 3a4894c4f0da3e763aca2c67bd280ae915177450 Source-Link: https://github.com/googleapis/googleapis/commit/3a4894c4f0da3e763aca2c67bd280ae915177450 * fix failing test * lint Co-authored-by: Kristen O'Leary Co-authored-by: kolea2 <45548808+kolea2@users.noreply.github.com> --- .../google/cloud/bigtable/cluster.py | 4 +- .../cloud/bigtable_admin_v2/__init__.py | 4 +- .../gapic/bigtable_instance_admin_client.py | 187 +- .../gapic/bigtable_table_admin_client.py | 786 +++++- .../bigtable_table_admin_client_config.py | 30 + .../cloud/bigtable_admin_v2/gapic/enums.py | 48 +- .../bigtable_instance_admin_grpc_transport.py | 7 +- .../bigtable_table_admin_grpc_transport.py | 97 +- .../proto/bigtable_instance_admin.proto | 283 +- .../proto/bigtable_instance_admin_pb2.py | 855 +++--- .../proto/bigtable_instance_admin_pb2_grpc.py | 107 +- .../proto/bigtable_table_admin.proto | 611 +++- .../proto/bigtable_table_admin_pb2.py | 2508 ++++++++++++----- .../proto/bigtable_table_admin_pb2_grpc.py | 214 +- .../bigtable_admin_v2/proto/common.proto | 19 +- .../bigtable_admin_v2/proto/common_pb2.py | 150 +- .../bigtable_admin_v2/proto/instance.proto | 68 +- .../bigtable_admin_v2/proto/instance_pb2.py | 336 ++- .../cloud/bigtable_admin_v2/proto/table.proto | 177 +- .../bigtable_admin_v2/proto/table_pb2.py | 938 ++++-- .../google/cloud/bigtable_admin_v2/types.py | 2 + .../google/cloud/bigtable_v2/__init__.py | 4 +- .../bigtable_v2/gapic/bigtable_client.py | 29 +- .../gapic/bigtable_client_config.py | 14 +- .../cloud/bigtable_v2/proto/bigtable_pb2.py | 400 +-- .../cloud/bigtable_v2/proto/data_pb2.py | 613 ++-- packages/google-cloud-bigtable/synth.metadata | 22 +- .../test_bigtable_instance_admin_client_v2.py | 51 +- .../v2/test_bigtable_table_admin_client_v2.py | 313 +- 29 files changed, 6444 insertions(+), 2433 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index edb5d261bfae..b573705c0696 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -291,11 +291,11 @@ def update(self): update operation. """ client = self._instance._client - # We are passing `None` for second argument location. + # We are passing `None` for third argument location. # Location is set only at the time of creation of a cluster # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( - self.name, self.serve_nodes, None + name=self.name, serve_nodes=self.serve_nodes, location=None ) def delete(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 876859fe058e..9f72d4f53222 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -27,8 +27,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 0724c3822a3d..8edb3c168d4e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -287,19 +287,20 @@ def create_instance( >>> metadata = response.metadata() Args: - parent (str): The unique name of the project in which to create the new instance. - Values are of the form ``projects/``. - instance_id (str): The ID to be used when referring to the new instance within its project, - e.g., just ``myinstance`` rather than + parent (str): Required. The unique name of the project in which to create the new + instance. Values are of the form ``projects/{project}``. + instance_id (str): Required. The ID to be used when referring to the new instance + within its project, e.g., just ``myinstance`` rather than ``projects/myproject/instances/myinstance``. - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The instance to create. Fields marked ``OutputOnly`` must be left blank. + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The instance to create. Fields marked ``OutputOnly`` must + be left blank. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): The clusters to be created within the instance, mapped by desired - cluster ID, e.g., just ``mycluster`` rather than + clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): Required. The clusters to be created within the instance, mapped by + desired cluster ID, e.g., just ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields - marked ``OutputOnly`` must be left blank. Currently, at most two + marked ``OutputOnly`` must be left blank. Currently, at most four clusters can be specified. If a dict is provided, it must be of the same form as the protobuf @@ -383,8 +384,8 @@ def get_instance( >>> response = client.get_instance(name) Args: - name (str): The unique name of the requested instance. Values are of the form - ``projects//instances/``. + name (str): Required. The unique name of the requested instance. Values are of + the form ``projects/{project}/instances/{instance}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -454,8 +455,8 @@ def list_instances( >>> response = client.list_instances(parent) Args: - parent (str): The unique name of the project for which a list of instances is - requested. Values are of the form ``projects/``. + parent (str): Required. The unique name of the project for which a list of + instances is requested. Values are of the form ``projects/{project}``. page_token (str): DEPRECATED: This field is unused and ignored. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will @@ -509,43 +510,37 @@ def list_instances( def update_instance( self, - name, display_name, - type_, - labels, + name=None, state=None, + type_=None, + labels=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates an instance within a project. + Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. Example: >>> from google.cloud import bigtable_admin_v2 - >>> from google.cloud.bigtable_admin_v2 import enums >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> >>> # TODO: Initialize `display_name`: >>> display_name = '' >>> - >>> # TODO: Initialize `type_`: - >>> type_ = enums.Instance.Type.TYPE_UNSPECIFIED - >>> - >>> # TODO: Initialize `labels`: - >>> labels = {} - >>> - >>> response = client.update_instance(name, display_name, type_, labels) + >>> response = client.update_instance(display_name) Args: - name (str): (``OutputOnly``) The unique name of the instance. Values are of the form - ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name (str): The descriptive name for this instance as it appears in UIs. + display_name (str): Required. The descriptive name for this instance as it appears in UIs. Can be changed at any time, but should be kept globally unique to avoid confusion. + name (str): The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a customer's organizational needs and @@ -559,7 +554,6 @@ def update_instance( conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - No more than 64 labels can be associated with a given resource. - Keys and values must both be under 128 bytes. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -591,11 +585,11 @@ def update_instance( ) request = instance_pb2.Instance( - name=name, display_name=display_name, + name=name, + state=state, type=type_, labels=labels, - state=state, ) if metadata is None: metadata = [] @@ -623,7 +617,8 @@ def partial_update_instance( metadata=None, ): """ - Partially updates an instance within a project. + Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. Example: >>> from google.cloud import bigtable_admin_v2 @@ -648,11 +643,11 @@ def partial_update_instance( >>> metadata = response.metadata() Args: - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): The Instance which will (partially) replace the current value. + instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The Instance which will (partially) replace the current value. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of Instance fields which should be replaced. + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of Instance fields which should be replaced. Must be explicitly set. If a dict is provided, it must be of the same form as the protobuf @@ -733,8 +728,8 @@ def delete_instance( >>> client.delete_instance(name) Args: - name (str): The unique name of the instance to be deleted. Values are of the form - ``projects//instances/``. + name (str): Required. The unique name of the instance to be deleted. Values are + of the form ``projects/{project}/instances/{instance}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -817,13 +812,14 @@ def create_cluster( >>> metadata = response.metadata() Args: - parent (str): The unique name of the instance in which to create the new cluster. - Values are of the form ``projects//instances/``. - cluster_id (str): The ID to be used when referring to the new cluster within its instance, - e.g., just ``mycluster`` rather than + parent (str): Required. The unique name of the instance in which to create the new + cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + cluster_id (str): Required. The ID to be used when referring to the new cluster within + its instance, e.g., just ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): The cluster to be created. Fields marked ``OutputOnly`` must be left - blank. + cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): Required. The cluster to be created. Fields marked ``OutputOnly`` + must be left blank. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` @@ -903,8 +899,8 @@ def get_cluster( >>> response = client.get_cluster(name) Args: - name (str): The unique name of the requested cluster. Values are of the form - ``projects//instances//clusters/``. + name (str): Required. The unique name of the requested cluster. Values are of + the form ``projects/{project}/instances/{instance}/clusters/{cluster}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -974,9 +970,9 @@ def list_clusters( >>> response = client.list_clusters(parent) Args: - parent (str): The unique name of the instance for which a list of clusters is - requested. Values are of the form - ``projects//instances/``. Use `` = '-'`` to + parent (str): Required. The unique name of the instance for which a list of + clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to list Clusters for all Instances in a project, e.g., ``projects/myproject/instances/-``. page_token (str): DEPRECATED: This field is unused and ignored. @@ -1032,8 +1028,8 @@ def list_clusters( def update_cluster( self, - name, serve_nodes, + name=None, location=None, state=None, default_storage_type=None, @@ -1049,12 +1045,10 @@ def update_cluster( >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> >>> # TODO: Initialize `serve_nodes`: >>> serve_nodes = 0 >>> - >>> response = client.update_cluster(name, serve_nodes) + >>> response = client.update_cluster(serve_nodes) >>> >>> def callback(operation_future): ... # Handle result. @@ -1066,17 +1060,17 @@ def update_cluster( >>> metadata = response.metadata() Args: - name (str): (``OutputOnly``) The unique name of the cluster. Values are of the form - ``projects//instances//clusters/[a-z][-a-z0-9]*``. - serve_nodes (int): The number of nodes allocated to this cluster. More nodes enable higher - throughput and more consistent performance. - location (str): (``CreationOnly``) The location where this cluster's nodes and storage - reside. For best performance, clients should be located as close as - possible to this cluster. Currently only zones are supported, so values - should be of the form ``projects//locations/``. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the cluster. - default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve its - parent instance's tables, unless explicitly overridden. + serve_nodes (int): Required. The number of nodes allocated to this cluster. More nodes enable + higher throughput and more consistent performance. + name (str): The unique name of the cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. + location (str): (``CreationOnly``) The location where this cluster's nodes and + storage reside. For best performance, clients should be located as close + as possible to this cluster. Currently only zones are supported, so + values should be of the form ``projects/{project}/locations/{zone}``. + state (~google.cloud.bigtable_admin_v2.types.State): The current state of the cluster. + default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve + its parent instance's tables, unless explicitly overridden. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1108,8 +1102,8 @@ def update_cluster( ) request = instance_pb2.Cluster( - name=name, serve_nodes=serve_nodes, + name=name, location=location, state=state, default_storage_type=default_storage_type, @@ -1157,8 +1151,9 @@ def delete_cluster( >>> client.delete_cluster(name) Args: - name (str): The unique name of the cluster to be deleted. Values are of the form - ``projects//instances//clusters/``. + name (str): Required. The unique name of the cluster to be deleted. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1233,13 +1228,14 @@ def create_app_profile( >>> response = client.create_app_profile(parent, app_profile_id, app_profile) Args: - parent (str): The unique name of the instance in which to create the new app profile. - Values are of the form ``projects//instances/``. - app_profile_id (str): The ID to be used when referring to the new app profile within its - instance, e.g., just ``myprofile`` rather than + parent (str): Required. The unique name of the instance in which to create the new + app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + app_profile_id (str): Required. The ID to be used when referring to the new app profile + within its instance, e.g., just ``myprofile`` rather than ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile to be created. Fields marked ``OutputOnly`` will be - ignored. + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` @@ -1317,8 +1313,9 @@ def get_app_profile( >>> response = client.get_app_profile(name) Args: - name (str): The unique name of the requested app profile. Values are of the form - ``projects//instances//appProfiles/``. + name (str): Required. The unique name of the requested app profile. Values are + of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1400,13 +1397,16 @@ def list_app_profiles( ... pass Args: - parent (str): The unique name of the instance for which a list of app profiles is - requested. Values are of the form - ``projects//instances/``. Use `` = '-'`` to + parent (str): Required. The unique name of the instance for which a list of app + profiles is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to list AppProfiles for all Instances in a project, e.g., ``projects/myproject/instances/-``. - page_size (int): Maximum number of results per page. - CURRENTLY UNIMPLEMENTED AND IGNORED. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1506,11 +1506,11 @@ def update_app_profile( >>> metadata = response.metadata() Args: - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): The app profile which will (partially) replace the current value. + app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile which will (partially) replace the current value. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): The subset of app profile fields which should be replaced. + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of app profile fields which should be replaced. If unset, all fields will be replaced. If a dict is provided, it must be of the same form as the protobuf @@ -1577,7 +1577,7 @@ def update_app_profile( def delete_app_profile( self, name, - ignore_warnings, + ignore_warnings=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -1592,14 +1592,12 @@ def delete_app_profile( >>> >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') >>> - >>> # TODO: Initialize `ignore_warnings`: - >>> ignore_warnings = False - >>> - >>> client.delete_app_profile(name, ignore_warnings) + >>> client.delete_app_profile(name) Args: - name (str): The unique name of the app profile to be deleted. Values are of the form - ``projects//instances//appProfiles/``. + name (str): Required. The unique name of the app profile to be deleted. Values + are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will @@ -1665,7 +1663,8 @@ def get_iam_policy( >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> >>> response = client.get_iam_policy(resource) @@ -1744,7 +1743,8 @@ def set_iam_policy( >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> >>> # TODO: Initialize `policy`: >>> policy = {} @@ -1825,7 +1825,8 @@ def test_iam_permissions( >>> >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() >>> - >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> >>> # TODO: Initialize `permissions`: >>> permissions = [] @@ -1835,8 +1836,8 @@ def test_iam_permissions( Args: resource (str): REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with - wildcards (such as '*' or 'storage.*') are not allowed. For more + permissions (list[str]): The set of permissions to check for the ``resource``. Permissions + with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM Overview `__. retry (Optional[google.api_core.retry.Retry]): A retry object used diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 9ccd58471455..cac517314bc6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -95,6 +95,17 @@ def from_service_account_file(cls, filename, *args, **kwargs): from_service_account_json = from_service_account_file + @classmethod + def backup_path(cls, project, instance, cluster, backup): + """Return a fully-qualified backup string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", + project=project, + instance=instance, + cluster=cluster, + backup=backup, + ) + @classmethod def cluster_path(cls, project, instance, cluster): """Return a fully-qualified cluster string.""" @@ -279,24 +290,26 @@ def create_table( >>> response = client.create_table(parent, table_id, table) Args: - parent (str): The unique name of the instance in which to create the table. Values are - of the form ``projects//instances/``. - table_id (str): The name by which the new table should be referred to within the parent - instance, e.g., ``foobar`` rather than ``/tables/foobar``. - table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): The Table to create. + parent (str): Required. The unique name of the instance in which to create the + table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): Required. The name by which the new table should be referred to + within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): Required. The Table to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Table` - initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split the - table into several tablets (tablets are similar to HBase regions). Given - two split keys, ``s1`` and ``s2``, three tablets will be created, + initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split + the table into several tablets (tablets are similar to HBase regions). + Given two split keys, ``s1`` and ``s2``, three tablets will be created, spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - - initial\_split\_keys := + - initial_split_keys := ``["apple", "customer_1", "customer_2", "other"]`` - Key assignment: @@ -389,9 +402,7 @@ def create_table_from_snapshot( >>> >>> # TODO: Initialize `table_id`: >>> table_id = '' - >>> - >>> # TODO: Initialize `source_snapshot`: - >>> source_snapshot = '' + >>> source_snapshot = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') >>> >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) >>> @@ -405,14 +416,16 @@ def create_table_from_snapshot( >>> metadata = response.metadata() Args: - parent (str): The unique name of the instance in which to create the table. Values are - of the form ``projects//instances/``. - table_id (str): The name by which the new table should be referred to within the parent - instance, e.g., ``foobar`` rather than ``/tables/foobar``. - source_snapshot (str): The unique name of the snapshot from which to restore the table. The - snapshot and the table must be in the same instance. Values are of the - form - ``projects//instances//clusters//snapshots/``. + parent (str): Required. The unique name of the instance in which to create the + table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): Required. The name by which the new table should be referred to + within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. + source_snapshot (str): Required. The unique name of the snapshot from which to restore the + table. The snapshot and the table must be in the same instance. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -503,12 +516,16 @@ def list_tables( ... pass Args: - parent (str): The unique name of the instance for which tables should be listed. - Values are of the form ``projects//instances/``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Defaults to - ``NAME_ONLY`` if unspecified; no others are currently supported. - page_size (int): Maximum number of results per page. - CURRENTLY UNIMPLEMENTED AND IGNORED. + parent (str): Required. The unique name of the instance for which tables should be + listed. Values are of the form + ``projects/{project}/instances/{instance}``. + view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Only + NAME_ONLY view (default) and REPLICATION_VIEW are supported. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -594,8 +611,8 @@ def get_table( >>> response = client.get_table(name) Args: - name (str): The unique name of the requested table. Values are of the form - ``projects//instances//tables/
``. + name (str): Required. The unique name of the requested table. Values are of the + form ``projects/{project}/instances/{instance}/tables/{table}``. view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to ``SCHEMA_VIEW`` if unspecified. retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -666,8 +683,8 @@ def delete_table( >>> client.delete_table(name) Args: - name (str): The unique name of the table to be deleted. Values are of the form - ``projects//instances//tables/
``. + name (str): Required. The unique name of the table to be deleted. Values are of + the form ``projects/{project}/instances/{instance}/tables/{table}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -740,13 +757,13 @@ def modify_column_families( >>> response = client.modify_column_families(name, modifications) Args: - name (str): The unique name of the table whose families should be modified. Values - are of the form - ``projects//instances//tables/
``. - modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Modifications to be atomically applied to the specified table's families. - Entries are applied in order, meaning that earlier modifications can be - masked by later ones (in the case of repeated updates to the same family, - for example). + name (str): Required. The unique name of the table whose families should be + modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's + families. Entries are applied in order, meaning that earlier modifications + can be masked by later ones (in the case of repeated updates to the same + family, for example). If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Modification` @@ -824,9 +841,9 @@ def drop_row_range( >>> client.drop_row_range(name) Args: - name (str): The unique name of the table on which to drop a range of rows. Values - are of the form - ``projects//instances//tables/
``. + name (str): Required. The unique name of the table on which to drop a range of + rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be zero length. delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. @@ -909,9 +926,9 @@ def generate_consistency_token( >>> response = client.generate_consistency_token(name) Args: - name (str): The unique name of the Table for which to create a consistency token. - Values are of the form - ``projects//instances//tables/
``. + name (str): Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -988,10 +1005,10 @@ def check_consistency( >>> response = client.check_consistency(name, consistency_token) Args: - name (str): The unique name of the Table for which to check replication consistency. - Values are of the form - ``projects//instances//tables/
``. - consistency_token (str): The token created using GenerateConsistencyToken for the Table. + name (str): Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1051,15 +1068,17 @@ def get_iam_policy( metadata=None, ): """ - Gets the access control policy for a table resource. Returns an empty - policy if an table exists but does not have a policy set. + Gets the access control policy for a resource. + Returns an empty policy if the resource exists but does not have a policy + set. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> >>> response = client.get_iam_policy(resource) @@ -1130,15 +1149,16 @@ def set_iam_policy( metadata=None, ): """ - Sets the access control policy on a table resource. Replaces any existing - policy. + Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. Example: >>> from google.cloud import bigtable_admin_v2 >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> >>> # TODO: Initialize `policy`: >>> policy = {} @@ -1219,7 +1239,8 @@ def test_iam_permissions( >>> >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> - >>> resource = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') + >>> # TODO: Initialize `resource`: + >>> resource = '' >>> >>> # TODO: Initialize `permissions`: >>> permissions = [] @@ -1229,8 +1250,8 @@ def test_iam_permissions( Args: resource (str): REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with - wildcards (such as '*' or 'storage.*') are not allowed. For more + permissions (list[str]): The set of permissions to check for the ``resource``. Permissions + with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM Overview `__. retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -1288,8 +1309,8 @@ def snapshot_table( name, cluster, snapshot_id, - description, ttl=None, + description=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -1310,17 +1331,12 @@ def snapshot_table( >>> client = bigtable_admin_v2.BigtableTableAdminClient() >>> >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = '' + >>> cluster = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') >>> >>> # TODO: Initialize `snapshot_id`: >>> snapshot_id = '' >>> - >>> # TODO: Initialize `description`: - >>> description = '' - >>> - >>> response = client.snapshot_table(name, cluster, snapshot_id, description) + >>> response = client.snapshot_table(name, cluster, snapshot_id) >>> >>> def callback(operation_future): ... # Handle result. @@ -1332,16 +1348,16 @@ def snapshot_table( >>> metadata = response.metadata() Args: - name (str): The unique name of the table to have the snapshot taken. Values are of - the form ``projects//instances//tables/
``. - cluster (str): The name of the cluster where the snapshot will be created in. Values - are of the form - ``projects//instances//clusters/``. - snapshot_id (str): The ID by which the new snapshot should be referred to within the parent - cluster, e.g., ``mysnapshot`` of the form: + name (str): Required. The unique name of the table to have the snapshot taken. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + cluster (str): Required. The name of the cluster where the snapshot will be created + in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + snapshot_id (str): Required. The ID by which the new snapshot should be referred to + within the parent cluster, e.g., ``mysnapshot`` of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects//instances//clusters//snapshots/mysnapshot``. - description (str): Description of the snapshot. + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is created. Once 'ttl' expires, the snapshot will get deleted. The maximum amount of time a snapshot can stay active is 7 days. If 'ttl' is not @@ -1349,6 +1365,7 @@ def snapshot_table( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_admin_v2.types.Duration` + description (str): Description of the snapshot. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1383,8 +1400,8 @@ def snapshot_table( name=name, cluster=cluster, snapshot_id=snapshot_id, - description=description, ttl=ttl, + description=description, ) if metadata is None: metadata = [] @@ -1435,8 +1452,9 @@ def get_snapshot( >>> response = client.get_snapshot(name) Args: - name (str): The unique name of the requested snapshot. Values are of the form - ``projects//instances//clusters//snapshots/``. + name (str): Required. The unique name of the requested snapshot. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1524,11 +1542,11 @@ def list_snapshots( ... pass Args: - parent (str): The unique name of the cluster for which snapshots should be listed. - Values are of the form - ``projects//instances//clusters/``. Use - `` = '-'`` to list snapshots for all clusters in an instance, - e.g., ``projects//instances//clusters/-``. + parent (str): Required. The unique name of the cluster for which snapshots should + be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use + ``{cluster} = '-'`` to list snapshots for all clusters in an instance, + e.g., ``projects/{project}/instances/{instance}/clusters/-``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -1624,8 +1642,9 @@ def delete_snapshot( >>> client.delete_snapshot(name) Args: - name (str): The unique name of the snapshot to be deleted. Values are of the form - ``projects//instances//clusters//snapshots/``. + name (str): Required. The unique name of the snapshot to be deleted. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1670,3 +1689,604 @@ def delete_snapshot( self._inner_api_calls["delete_snapshot"]( request, retry=retry, timeout=timeout, metadata=metadata ) + + def create_backup( + self, + parent, + backup_id, + backup, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Starts creating a new Cloud Bigtable Backup. The returned backup + ``long-running operation`` can be used to track creation of the backup. + The ``metadata`` field type is ``CreateBackupMetadata``. The + ``response`` field type is ``Backup``, if successful. Cancelling the + returned operation will stop the creation and delete the backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> # TODO: Initialize `backup_id`: + >>> backup_id = '' + >>> + >>> # TODO: Initialize `backup`: + >>> backup = {} + >>> + >>> response = client.create_backup(parent, backup_id, backup) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. This must be one of the clusters in the instance in which + this table is located. The backup will be stored in this cluster. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): Required. The id of the backup to be created. The ``backup_id`` + along with the parent ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup name, of the + form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length and match the + regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Backup` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "create_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_backup, + default_retry=self._method_configs["CreateBackup"].retry, + default_timeout=self._method_configs["CreateBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["create_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Backup, + metadata_type=bigtable_table_admin_pb2.CreateBackupMetadata, + ) + + def get_backup( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets metadata on a pending or completed Cloud Bigtable Backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') + >>> + >>> response = client.get_backup(name) + + Args: + name (str): Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "get_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_backup, + default_retry=self._method_configs["GetBackup"].retry, + default_timeout=self._method_configs["GetBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.GetBackupRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def list_backups( + self, + parent, + filter_=None, + order_by=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists Cloud Bigtable backups. Returns both completed and pending + backups. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') + >>> + >>> # Iterate over all results + >>> for element in client.list_backups(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_backups(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The cluster to list backups from. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use + ``{cluster} = '-'`` to list backups for all clusters in an instance, + e.g., ``projects/{project}/instances/{instance}/clusters/-``. + filter_ (str): A filter expression that filters backups listed in the response. The + expression must specify the field name, a comparison operator, and the + value that you want to use for filtering. The value must be a string, a + number, or a boolean. The comparison operator must be <, >, <=, >=, !=, + =, or :. Colon ‘:’ represents a HAS operator which is roughly synonymous + with equality. Filter rules are case insensitive. + + The fields eligible for filtering are: + + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + To filter on multiple expressions, provide each separate expression + within parentheses. By default, each expression is an AND expression. + However, you can include AND, OR, and NOT expressions explicitly. + + Some examples of using filters are: + + - ``name:"exact"`` --> The backup's name is the string "exact". + - ``name:howl`` --> The backup's name contains the string "howl". + - ``source_table:prod`` --> The source_table's name contains the string + "prod". + - ``state:CREATING`` --> The backup is pending creation. + - ``state:READY`` --> The backup is fully created and ready for use. + - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` --> The + backup name contains the string "howl" and start_time of the backup + is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The backup's size is greater than + 10GB + order_by (str): An expression for specifying the sort order of the results of the + request. The string value should specify one or more fields in + ``Backup``. The full syntax is described at + https://aip.dev/132#ordering. + + Fields supported are: \* name \* source_table \* expire_time \* + start_time \* end_time \* size_bytes \* state + + For example, "start_time". The default sorting order is ascending. To + specify descending order for the field, a suffix " desc" should be + appended to the field name. For example, "start_time desc". Redundant + space characters in the syntax are insigificant. + + If order_by is empty, results will be sorted by ``start_time`` in + descending order starting from the most recently created backup. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Backup` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_backups" not in self._inner_api_calls: + self._inner_api_calls[ + "list_backups" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_backups, + default_retry=self._method_configs["ListBackups"].retry, + default_timeout=self._method_configs["ListBackups"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.ListBackupsRequest( + parent=parent, filter=filter_, order_by=order_by, page_size=page_size, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_backups"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="backups", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + + def update_backup( + self, + backup, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a pending or completed Cloud Bigtable Backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> # TODO: Initialize `backup`: + >>> backup = {} + >>> + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.update_backup(backup, update_mask) + + Args: + backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to + be updated as specified by ``update_mask`` are required. Other fields + are ignored. Update is only supported for the following fields: + + - ``backup.expire_time``. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.Backup` + update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in + the Backup resource should be updated. This mask is relative to the + Backup resource, not to the request message. The field mask must always + be specified; this prevents any future fields from being erased + accidentally by clients that do not know about them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "update_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_backup, + default_retry=self._method_configs["UpdateBackup"].retry, + default_timeout=self._method_configs["UpdateBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("backup.name", backup.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def delete_backup( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes a pending or completed Cloud Bigtable backup. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') + >>> + >>> client.delete_backup(name) + + Args: + name (str): Required. Name of the backup to delete. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_backup, + default_retry=self._method_configs["DeleteBackup"].retry, + default_timeout=self._method_configs["DeleteBackup"].timeout, + client_info=self._client_info, + ) + + request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def restore_table( + self, + parent=None, + table_id=None, + backup=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing the + backup. The returned table ``long-running operation`` can be used to + track the progress of the operation, and to cancel it. The ``metadata`` + field type is ``RestoreTableMetadata``. The ``response`` type is + ``Table``, if successful. + + Example: + >>> from google.cloud import bigtable_admin_v2 + >>> + >>> client = bigtable_admin_v2.BigtableTableAdminClient() + >>> + >>> response = client.restore_table() + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. The name of the instance in which to create the restored + table. This instance must be the parent of the source backup. Values are + of the form ``projects//instances/``. + table_id (str): Required. The id of the table to create and restore to. This table + must not already exist. The ``table_id`` appended to ``parent`` forms + the full table name of the form + ``projects//instances//tables/``. + backup (str): Name of the backup from which to restore. Values are of the form + ``projects//instances//clusters//backups/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "restore_table" not in self._inner_api_calls: + self._inner_api_calls[ + "restore_table" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.restore_table, + default_retry=self._method_configs["RestoreTable"].retry, + default_timeout=self._method_configs["RestoreTable"].timeout, + client_info=self._client_info, + ) + + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof(backup=backup,) + + request = bigtable_table_admin_pb2.RestoreTableRequest( + parent=parent, table_id=table_id, backup=backup, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["restore_table"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + table_pb2.Table, + metadata_type=bigtable_table_admin_pb2.RestoreTableMetadata, + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py index 5e63380ae091..db60047bd5a4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py @@ -124,6 +124,36 @@ "retry_codes_name": "non_idempotent", "retry_params_name": "non_idempotent_params", }, + "CreateBackup": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "GetBackup": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "ListBackups": { + "timeout_millis": 60000, + "retry_codes_name": "idempotent", + "retry_params_name": "idempotent_params", + }, + "UpdateBackup": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "DeleteBackup": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, + "RestoreTable": { + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", + }, }, } } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py index 68f25f989ba7..c71bee34bdde 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py @@ -19,6 +19,19 @@ import enum +class RestoreSourceType(enum.IntEnum): + """ + Indicates the type of the restore source. + + Attributes: + RESTORE_SOURCE_TYPE_UNSPECIFIED (int): No restore associated. + BACKUP (int): A backup was used as the source of the restore. + """ + + RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 + BACKUP = 1 + + class StorageType(enum.IntEnum): """ Storage media types for persisting Bigtable data. @@ -34,6 +47,23 @@ class StorageType(enum.IntEnum): HDD = 2 +class Backup(object): + class State(enum.IntEnum): + """ + Indicates the current state of the backup. + + Attributes: + STATE_UNSPECIFIED (int): Not specified. + CREATING (int): The pending backup is still being created. Operations on the backup + may fail with ``FAILED_PRECONDITION`` in this state. + READY (int): The backup is complete and ready for use. + """ + + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + class Cluster(object): class State(enum.IntEnum): """ @@ -86,11 +116,11 @@ class Type(enum.IntEnum): TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an instance, a ``PRODUCTION`` instance will be created. If set when updating an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on the - cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it has - no performance or uptime guarantees and is not covered by SLA. After a - development instance is created, it can be upgraded by updating the + PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on + the cluster. + DEVELOPMENT (int): The instance is meant for development and testing purposes only; it + has no performance or uptime guarantees and is not covered by SLA. After + a development instance is created, it can be upgraded by updating the instance to type ``PRODUCTION``. An instance created as a production instance cannot be changed to a development instance. When creating a development instance, ``serve_nodes`` on the cluster must not be set. @@ -142,8 +172,8 @@ class View(enum.IntEnum): VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. NAME_ONLY (int): Only populates ``name``. SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. - REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's replication - state. + REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's + replication state. FULL (int): Populates all fields. """ @@ -170,6 +200,9 @@ class ReplicationState(enum.IntEnum): READY (int): The table can serve Data API requests from this cluster. Depending on replication delay, reads may not immediately reflect the state of the table in other clusters. + READY_OPTIMIZING (int): The table is fully created and ready for use after a restore, and is + being optimized for performance. When optimizations are complete, the + table will transition to ``READY`` state. """ STATE_NOT_KNOWN = 0 @@ -177,3 +210,4 @@ class ReplicationState(enum.IntEnum): PLANNED_MAINTENANCE = 2 UNPLANNED_MAINTENANCE = 3 READY = 4 + READY_OPTIMIZING = 5 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py index fa5bf0556a96..536629604260 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py @@ -170,7 +170,9 @@ def list_instances(self): def update_instance(self): """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. - Updates an instance within a project. + Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. Returns: Callable: A callable which accepts the appropriate @@ -183,7 +185,8 @@ def update_instance(self): def partial_update_instance(self): """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. - Partially updates an instance within a project. + Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. Returns: Callable: A callable which accepts the appropriate diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py index d8a5bfee0d74..281bad20a253 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py @@ -267,8 +267,9 @@ def check_consistency(self): def get_iam_policy(self): """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - Gets the access control policy for a table resource. Returns an empty - policy if an table exists but does not have a policy set. + Gets the access control policy for a resource. + Returns an empty policy if the resource exists but does not have a policy + set. Returns: Callable: A callable which accepts the appropriate @@ -281,8 +282,8 @@ def get_iam_policy(self): def set_iam_policy(self): """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. - Sets the access control policy on a table resource. Replaces any existing - policy. + Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. Returns: Callable: A callable which accepts the appropriate @@ -380,3 +381,91 @@ def delete_snapshot(self): deserialized response object. """ return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot + + @property + def create_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_backup`. + + Starts creating a new Cloud Bigtable Backup. The returned backup + ``long-running operation`` can be used to track creation of the backup. + The ``metadata`` field type is ``CreateBackupMetadata``. The + ``response`` field type is ``Backup``, if successful. Cancelling the + returned operation will stop the creation and delete the backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].CreateBackup + + @property + def get_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_backup`. + + Gets metadata on a pending or completed Cloud Bigtable Backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].GetBackup + + @property + def list_backups(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_backups`. + + Lists Cloud Bigtable backups. Returns both completed and pending + backups. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].ListBackups + + @property + def update_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.update_backup`. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].UpdateBackup + + @property + def delete_backup(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_backup`. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].DeleteBackup + + @property + def restore_table(self): + """Return the gRPC stub for :meth:`BigtableTableAdminClient.restore_table`. + + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing the + backup. The returned table ``long-running operation`` can be used to + track the progress of the operation, and to cancel it. The ``metadata`` + field type is ``RestoreTableMetadata``. The ``response`` type is + ``Table``, if successful. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["bigtable_table_admin_stub"].RestoreTable diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto index 80ce42470736..8e05bfd0fbae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,6 +18,9 @@ syntax = "proto3"; package google.bigtable.admin.v2; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; import "google/bigtable/admin/v2/instance.proto"; import "google/iam/v1/iam_policy.proto"; import "google/iam/v1/policy.proto"; @@ -37,13 +40,27 @@ option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; // Clusters. Provides access to the Instance and Cluster schemas only, not the // tables' metadata or data stored in those tables. service BigtableInstanceAdmin { + option (google.api.default_host) = "bigtableadmin.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigtable.admin," + "https://www.googleapis.com/auth/bigtable.admin.cluster," + "https://www.googleapis.com/auth/bigtable.admin.instance," + "https://www.googleapis.com/auth/cloud-bigtable.admin," + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) - returns (google.longrunning.Operation) { + rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*}/instances" body: "*" }; + option (google.api.method_signature) = "parent,instance_id,instance,clusters"; + option (google.longrunning.operation_info) = { + response_type: "Instance" + metadata_type: "CreateInstanceMetadata" + }; } // Gets information about an instance. @@ -51,6 +68,7 @@ service BigtableInstanceAdmin { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*}" }; + option (google.api.method_signature) = "name"; } // Lists information about instances in a project. @@ -58,9 +76,12 @@ service BigtableInstanceAdmin { option (google.api.http) = { get: "/v2/{parent=projects/*}/instances" }; + option (google.api.method_signature) = "parent"; } - // Updates an instance within a project. + // Updates an instance within a project. This method updates only the display + // name and type for an Instance. To update other Instance properties, such as + // labels, use PartialUpdateInstance. rpc UpdateInstance(Instance) returns (Instance) { option (google.api.http) = { put: "/v2/{name=projects/*/instances/*}" @@ -68,13 +89,18 @@ service BigtableInstanceAdmin { }; } - // Partially updates an instance within a project. - rpc PartialUpdateInstance(PartialUpdateInstanceRequest) - returns (google.longrunning.Operation) { + // Partially updates an instance within a project. This method can modify all + // fields of an Instance and is the preferred way to update an Instance. + rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v2/{instance.name=projects/*/instances/*}" body: "instance" }; + option (google.api.method_signature) = "instance,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "Instance" + metadata_type: "UpdateInstanceMetadata" + }; } // Delete an instance from a project. @@ -82,15 +108,20 @@ service BigtableInstanceAdmin { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*}" }; + option (google.api.method_signature) = "name"; } // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) - returns (google.longrunning.Operation) { + rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/clusters" body: "cluster" }; + option (google.api.method_signature) = "parent,cluster_id,cluster"; + option (google.longrunning.operation_info) = { + response_type: "Cluster" + metadata_type: "CreateClusterMetadata" + }; } // Gets information about a cluster. @@ -98,6 +129,7 @@ service BigtableInstanceAdmin { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*}" }; + option (google.api.method_signature) = "name"; } // Lists information about clusters in an instance. @@ -105,6 +137,7 @@ service BigtableInstanceAdmin { option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/clusters" }; + option (google.api.method_signature) = "parent"; } // Updates a cluster within an instance. @@ -113,6 +146,10 @@ service BigtableInstanceAdmin { put: "/v2/{name=projects/*/instances/*/clusters/*}" body: "*" }; + option (google.longrunning.operation_info) = { + response_type: "Cluster" + metadata_type: "UpdateClusterMetadata" + }; } // Deletes a cluster from an instance. @@ -120,6 +157,7 @@ service BigtableInstanceAdmin { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*}" }; + option (google.api.method_signature) = "name"; } // Creates an app profile within an instance. @@ -128,6 +166,7 @@ service BigtableInstanceAdmin { post: "/v2/{parent=projects/*/instances/*}/appProfiles" body: "app_profile" }; + option (google.api.method_signature) = "parent,app_profile_id,app_profile"; } // Gets information about an app profile. @@ -135,98 +174,118 @@ service BigtableInstanceAdmin { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/appProfiles/*}" }; + option (google.api.method_signature) = "name"; } // Lists information about app profiles in an instance. - rpc ListAppProfiles(ListAppProfilesRequest) - returns (ListAppProfilesResponse) { + rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/appProfiles" }; + option (google.api.method_signature) = "parent"; } // Updates an app profile within an instance. - rpc UpdateAppProfile(UpdateAppProfileRequest) - returns (google.longrunning.Operation) { + rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" body: "app_profile" }; + option (google.api.method_signature) = "app_profile,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "AppProfile" + metadata_type: "UpdateAppProfileMetadata" + }; } // Deletes an app profile from an instance. - rpc DeleteAppProfile(DeleteAppProfileRequest) - returns (google.protobuf.Empty) { + rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" }; + option (google.api.method_signature) = "name"; } // Gets the access control policy for an instance resource. Returns an empty // policy if an instance exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" body: "*" }; + option (google.api.method_signature) = "resource"; } // Sets the access control policy on an instance resource. Replaces any // existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" body: "*" }; + option (google.api.method_signature) = "resource,policy"; } // Returns permissions that the caller has on the specified instance resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) - returns (google.iam.v1.TestIamPermissionsResponse) { + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" body: "*" }; + option (google.api.method_signature) = "resource,permissions"; } } // Request message for BigtableInstanceAdmin.CreateInstance. message CreateInstanceRequest { - // The unique name of the project in which to create the new instance. - // Values are of the form `projects/`. - string parent = 1; - - // The ID to be used when referring to the new instance within its project, + // Required. The unique name of the project in which to create the new instance. + // Values are of the form `projects/{project}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. The ID to be used when referring to the new instance within its project, // e.g., just `myinstance` rather than // `projects/myproject/instances/myinstance`. - string instance_id = 2; + string instance_id = 2 [(google.api.field_behavior) = REQUIRED]; - // The instance to create. + // Required. The instance to create. // Fields marked `OutputOnly` must be left blank. - Instance instance = 3; + Instance instance = 3 [(google.api.field_behavior) = REQUIRED]; - // The clusters to be created within the instance, mapped by desired + // Required. The clusters to be created within the instance, mapped by desired // cluster ID, e.g., just `mycluster` rather than // `projects/myproject/instances/myinstance/clusters/mycluster`. // Fields marked `OutputOnly` must be left blank. - // Currently, at most two clusters can be specified. - map clusters = 4; + // Currently, at most four clusters can be specified. + map clusters = 4 [(google.api.field_behavior) = REQUIRED]; } // Request message for BigtableInstanceAdmin.GetInstance. message GetInstanceRequest { - // The unique name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1; + // Required. The unique name of the requested instance. Values are of the form + // `projects/{project}/instances/{instance}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; } // Request message for BigtableInstanceAdmin.ListInstances. message ListInstancesRequest { - // The unique name of the project for which a list of instances is requested. - // Values are of the form `projects/`. - string parent = 1; + // Required. The unique name of the project for which a list of instances is requested. + // Values are of the form `projects/{project}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; // DEPRECATED: This field is unused and ignored. string page_token = 2; @@ -251,52 +310,72 @@ message ListInstancesResponse { // Request message for BigtableInstanceAdmin.PartialUpdateInstance. message PartialUpdateInstanceRequest { - // The Instance which will (partially) replace the current value. - Instance instance = 1; + // Required. The Instance which will (partially) replace the current value. + Instance instance = 1 [(google.api.field_behavior) = REQUIRED]; - // The subset of Instance fields which should be replaced. + // Required. The subset of Instance fields which should be replaced. // Must be explicitly set. - google.protobuf.FieldMask update_mask = 2; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } // Request message for BigtableInstanceAdmin.DeleteInstance. message DeleteInstanceRequest { - // The unique name of the instance to be deleted. - // Values are of the form `projects//instances/`. - string name = 1; + // Required. The unique name of the instance to be deleted. + // Values are of the form `projects/{project}/instances/{instance}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; } // Request message for BigtableInstanceAdmin.CreateCluster. message CreateClusterRequest { - // The unique name of the instance in which to create the new cluster. + // Required. The unique name of the instance in which to create the new cluster. // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new cluster within its instance, + // `projects/{project}/instances/{instance}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; + + // Required. The ID to be used when referring to the new cluster within its instance, // e.g., just `mycluster` rather than // `projects/myproject/instances/myinstance/clusters/mycluster`. - string cluster_id = 2; + string cluster_id = 2 [(google.api.field_behavior) = REQUIRED]; - // The cluster to be created. + // Required. The cluster to be created. // Fields marked `OutputOnly` must be left blank. - Cluster cluster = 3; + Cluster cluster = 3 [(google.api.field_behavior) = REQUIRED]; } // Request message for BigtableInstanceAdmin.GetCluster. message GetClusterRequest { - // The unique name of the requested cluster. Values are of the form - // `projects//instances//clusters/`. - string name = 1; + // Required. The unique name of the requested cluster. Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Cluster" + } + ]; } // Request message for BigtableInstanceAdmin.ListClusters. message ListClustersRequest { - // The unique name of the instance for which a list of clusters is requested. - // Values are of the form `projects//instances/`. - // Use ` = '-'` to list Clusters for all Instances in a project, + // Required. The unique name of the instance for which a list of clusters is requested. + // Values are of the form `projects/{project}/instances/{instance}`. + // Use `{instance} = '-'` to list Clusters for all Instances in a project, // e.g., `projects/myproject/instances/-`. - string parent = 1; + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; // DEPRECATED: This field is unused and ignored. string page_token = 2; @@ -320,9 +399,14 @@ message ListClustersResponse { // Request message for BigtableInstanceAdmin.DeleteCluster. message DeleteClusterRequest { - // The unique name of the cluster to be deleted. Values are of the form - // `projects//instances//clusters/`. - string name = 1; + // Required. The unique name of the cluster to be deleted. Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Cluster" + } + ]; } // The metadata for the Operation returned by CreateInstance. @@ -375,19 +459,24 @@ message UpdateClusterMetadata { // Request message for BigtableInstanceAdmin.CreateAppProfile. message CreateAppProfileRequest { - // The unique name of the instance in which to create the new app profile. + // Required. The unique name of the instance in which to create the new app profile. // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new app profile within its + // `projects/{project}/instances/{instance}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; + + // Required. The ID to be used when referring to the new app profile within its // instance, e.g., just `myprofile` rather than // `projects/myproject/instances/myinstance/appProfiles/myprofile`. - string app_profile_id = 2; + string app_profile_id = 2 [(google.api.field_behavior) = REQUIRED]; - // The app profile to be created. + // Required. The app profile to be created. // Fields marked `OutputOnly` will be ignored. - AppProfile app_profile = 3; + AppProfile app_profile = 3 [(google.api.field_behavior) = REQUIRED]; // If true, ignore safety checks when creating the app profile. bool ignore_warnings = 4; @@ -395,22 +484,39 @@ message CreateAppProfileRequest { // Request message for BigtableInstanceAdmin.GetAppProfile. message GetAppProfileRequest { - // The unique name of the requested app profile. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; + // Required. The unique name of the requested app profile. Values are of the form + // `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/AppProfile" + } + ]; } // Request message for BigtableInstanceAdmin.ListAppProfiles. message ListAppProfilesRequest { - // The unique name of the instance for which a list of app profiles is + // Required. The unique name of the instance for which a list of app profiles is // requested. Values are of the form - // `projects//instances/`. - // Use ` = '-'` to list AppProfiles for all Instances in a project, + // `projects/{project}/instances/{instance}`. + // Use `{instance} = '-'` to list AppProfiles for all Instances in a project, // e.g., `projects/myproject/instances/-`. - string parent = 1; + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. + // + // A page_size of zero lets the server choose the number of items to return. + // A page_size which is strictly positive will return at most that many items. + // A negative page_size will cause an error. + // + // Following the first request, subsequent paginated calls are not required + // to pass a page_size. If a page_size is set in subsequent calls, it must + // match the page_size given in the first request. int32 page_size = 3; // The value of `next_page_token` returned by a previous call. @@ -436,12 +542,12 @@ message ListAppProfilesResponse { // Request message for BigtableInstanceAdmin.UpdateAppProfile. message UpdateAppProfileRequest { - // The app profile which will (partially) replace the current value. - AppProfile app_profile = 1; + // Required. The app profile which will (partially) replace the current value. + AppProfile app_profile = 1 [(google.api.field_behavior) = REQUIRED]; - // The subset of app profile fields which should be replaced. + // Required. The subset of app profile fields which should be replaced. // If unset, all fields will be replaced. - google.protobuf.FieldMask update_mask = 2; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; // If true, ignore safety checks when updating the app profile. bool ignore_warnings = 3; @@ -449,9 +555,14 @@ message UpdateAppProfileRequest { // Request message for BigtableInstanceAdmin.DeleteAppProfile. message DeleteAppProfileRequest { - // The unique name of the app profile to be deleted. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; + // Required. The unique name of the app profile to be deleted. Values are of the form + // `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/AppProfile" + } + ]; // If true, ignore safety checks when deleting the app profile. bool ignore_warnings = 2; diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 540e8c91b83b..dbe5a8fd81c3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -1,10 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto +# source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -16,8 +13,11 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2, + instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, ) from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 @@ -30,18 +30,18 @@ DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto", + name="google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\nBgoogle/cloud/bigtable/admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x33google/cloud/bigtable/admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x97\x02\n\x15\x43reateInstanceRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x13\n\x0binstance_id\x18\x02 \x01(\t\x12\x34\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12O\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01""\n\x12GetInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t":\n\x14ListInstancesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x85\x01\n\x1cPartialUpdateInstanceRequest\x12\x34\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.Instance\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"%\n\x15\x44\x65leteInstanceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"n\n\x14\x43reateClusterRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\ncluster_id\x18\x02 \x01(\t\x12\x32\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster"!\n\x11GetClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"9\n\x13ListClustersRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"$\n\x14\x44\x65leteClusterRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x95\x01\n\x17\x43reateAppProfileRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x39\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"$\n\x14GetAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"O\n\x16ListAppProfilesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\x9e\x01\n\x17UpdateAppProfileRequest\x12\x39\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"@\n\x17\x44\x65leteAppProfileRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\xaa\x17\n\x15\x42igtableInstanceAdmin\x12\x8e\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation",\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\x12\x8a\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance")\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\x12\x9b\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse")\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xac\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\x12\x84\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty")\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\x12\x9d\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"=\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\x12\x92\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster"4\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\x12\xa3\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"4\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\x12\x8a\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"7\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\x12\x8d\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty"4\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\x12\xb1\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"D\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\x12\x9e\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\x12\xaf\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"7\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\x12\xb6\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"P\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\x12\x96\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty"7\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\x12\x88\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\x12\x88\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"=\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\x12\xae\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"C\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*B\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), + serialized_options=b"\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"l\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, @@ -58,6 +58,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -68,7 +69,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -76,6 +77,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -94,18 +96,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=570, - serialized_end=652, + serialized_start=723, + serialized_end=805, ) _CREATEINSTANCEREQUEST = _descriptor.Descriptor( @@ -114,6 +117,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -124,14 +128,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="instance_id", @@ -142,14 +147,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="instance", @@ -166,8 +172,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="clusters", @@ -184,8 +191,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -196,8 +204,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=373, - serialized_end=652, + serialized_start=458, + serialized_end=805, ) @@ -207,6 +215,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -217,14 +226,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -235,8 +245,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=654, - serialized_end=688, + serialized_start=807, + serialized_end=883, ) @@ -246,6 +256,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -256,14 +267,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -274,7 +286,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -282,6 +294,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -292,8 +305,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=690, - serialized_end=748, + serialized_start=885, + serialized_end=996, ) @@ -303,6 +316,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="instances", @@ -321,6 +335,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="failed_locations", @@ -339,6 +354,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -349,7 +365,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -357,6 +373,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -367,8 +384,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=751, - serialized_end=880, + serialized_start=999, + serialized_end=1128, ) @@ -378,6 +395,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="instance", @@ -394,8 +412,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -412,8 +431,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -424,8 +444,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=883, - serialized_end=1016, + serialized_start=1131, + serialized_end=1274, ) @@ -435,6 +455,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -445,14 +466,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -463,8 +485,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1018, - serialized_end=1055, + serialized_start=1276, + serialized_end=1355, ) @@ -474,6 +496,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -484,14 +507,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cluster_id", @@ -502,14 +526,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cluster", @@ -526,8 +551,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -538,8 +564,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1057, - serialized_end=1167, + serialized_start=1358, + serialized_end=1520, ) @@ -549,6 +575,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -559,14 +586,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -577,8 +605,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1169, - serialized_end=1202, + serialized_start=1522, + serialized_end=1596, ) @@ -588,6 +616,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -598,14 +627,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -616,7 +646,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -624,6 +654,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -634,8 +665,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1204, - serialized_end=1261, + serialized_start=1598, + serialized_end=1697, ) @@ -645,6 +676,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="clusters", @@ -663,6 +695,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="failed_locations", @@ -681,6 +714,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -691,7 +725,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -699,6 +733,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -709,8 +744,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1263, - serialized_end=1389, + serialized_start=1699, + serialized_end=1825, ) @@ -720,6 +755,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -730,14 +766,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -748,8 +785,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1391, - serialized_end=1427, + serialized_start=1827, + serialized_end=1904, ) @@ -759,6 +796,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="original_request", @@ -777,6 +815,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="request_time", @@ -795,6 +834,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="finish_time", @@ -813,6 +853,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -823,8 +864,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1430, - serialized_end=1628, + serialized_start=1907, + serialized_end=2105, ) @@ -834,6 +875,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="original_request", @@ -852,6 +894,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="request_time", @@ -870,6 +913,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="finish_time", @@ -888,6 +932,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -898,8 +943,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1631, - serialized_end=1836, + serialized_start=2108, + serialized_end=2313, ) @@ -909,6 +954,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="original_request", @@ -927,6 +973,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="request_time", @@ -945,6 +992,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="finish_time", @@ -963,6 +1011,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -973,8 +1022,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1839, - serialized_end=2035, + serialized_start=2316, + serialized_end=2512, ) @@ -984,6 +1033,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="original_request", @@ -1002,6 +1052,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="request_time", @@ -1020,6 +1071,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="finish_time", @@ -1038,6 +1090,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1048,8 +1101,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2038, - serialized_end=2221, + serialized_start=2515, + serialized_end=2698, ) @@ -1059,6 +1112,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1069,14 +1123,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile_id", @@ -1087,14 +1142,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile", @@ -1111,8 +1167,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="ignore_warnings", @@ -1131,6 +1188,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1141,8 +1199,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2224, - serialized_end=2373, + serialized_start=2701, + serialized_end=2902, ) @@ -1152,6 +1210,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1162,14 +1221,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1180,8 +1240,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2375, - serialized_end=2411, + serialized_start=2904, + serialized_end=2984, ) @@ -1191,6 +1251,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1201,14 +1262,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -1227,6 +1289,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1237,7 +1300,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1245,6 +1308,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1255,8 +1319,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2413, - serialized_end=2492, + serialized_start=2986, + serialized_end=3107, ) @@ -1266,6 +1330,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="app_profiles", @@ -1284,6 +1349,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1294,7 +1360,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1302,6 +1368,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="failed_locations", @@ -1320,6 +1387,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1330,8 +1398,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2495, - serialized_end=2631, + serialized_start=3110, + serialized_end=3246, ) @@ -1341,6 +1409,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="app_profile", @@ -1357,8 +1426,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -1375,8 +1445,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="ignore_warnings", @@ -1395,6 +1466,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1405,8 +1477,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2634, - serialized_end=2792, + serialized_start=3249, + serialized_end=3417, ) @@ -1416,6 +1488,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1426,14 +1499,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="ignore_warnings", @@ -1452,6 +1526,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1462,8 +1537,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2794, - serialized_end=2858, + serialized_start=3419, + serialized_end=3527, ) @@ -1473,6 +1548,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -1482,20 +1558,20 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2860, - serialized_end=2886, + serialized_start=3529, + serialized_end=3555, ) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ "value" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER ) _CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST _CREATEINSTANCEREQUEST.fields_by_name[ "instance" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE ) _CREATEINSTANCEREQUEST.fields_by_name[ "clusters" @@ -1503,12 +1579,12 @@ _LISTINSTANCESRESPONSE.fields_by_name[ "instances" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE ) _PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ "instance" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE ) _PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ "update_mask" @@ -1516,12 +1592,12 @@ _CREATECLUSTERREQUEST.fields_by_name[ "cluster" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER ) _LISTCLUSTERSRESPONSE.fields_by_name[ "clusters" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER ) _CREATEINSTANCEMETADATA.fields_by_name[ "original_request" @@ -1553,7 +1629,7 @@ _UPDATECLUSTERMETADATA.fields_by_name[ "original_request" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER ) _UPDATECLUSTERMETADATA.fields_by_name[ "request_time" @@ -1564,17 +1640,17 @@ _CREATEAPPPROFILEREQUEST.fields_by_name[ "app_profile" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE ) _LISTAPPPROFILESRESPONSE.fields_by_name[ "app_profiles" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE ) _UPDATEAPPPROFILEREQUEST.fields_by_name[ "app_profile" ].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE ) _UPDATEAPPPROFILEREQUEST.fields_by_name[ "update_mask" @@ -1608,41 +1684,40 @@ CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( "CreateInstanceRequest", (_message.Message,), - dict( - ClustersEntry=_reflection.GeneratedProtocolMessageType( + { + "ClustersEntry": _reflection.GeneratedProtocolMessageType( "ClustersEntry", (_message.Message,), - dict( - DESCRIPTOR=_CREATEINSTANCEREQUEST_CLUSTERSENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2" + { + "DESCRIPTOR": _CREATEINSTANCEREQUEST_CLUSTERSENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2" # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) - ), + }, ), - DESCRIPTOR=_CREATEINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.CreateInstance. - - + "DESCRIPTOR": _CREATEINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.CreateInstance. Attributes: parent: - The unique name of the project in which to create the new - instance. Values are of the form ``projects/``. + Required. The unique name of the project in which to create + the new instance. Values are of the form + ``projects/{project}``. instance_id: - The ID to be used when referring to the new instance within - its project, e.g., just ``myinstance`` rather than + Required. The ID to be used when referring to the new instance + within its project, e.g., just ``myinstance`` rather than ``projects/myproject/instances/myinstance``. instance: - The instance to create. Fields marked ``OutputOnly`` must be - left blank. + Required. The instance to create. Fields marked ``OutputOnly`` + must be left blank. clusters: - The clusters to be created within the instance, mapped by - desired cluster ID, e.g., just ``mycluster`` rather than ``pro - jects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. Currently, at - most two clusters can be specified. + Required. The clusters to be created within the instance, + mapped by desired cluster ID, e.g., just ``mycluster`` rather + than ``projects/myproject/instances/myinstance/clusters/myclus + ter``. Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) - ), + }, ) _sym_db.RegisterMessage(CreateInstanceRequest) _sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) @@ -1650,52 +1725,47 @@ GetInstanceRequest = _reflection.GeneratedProtocolMessageType( "GetInstanceRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.GetInstance. - - + { + "DESCRIPTOR": _GETINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.GetInstance. Attributes: name: - The unique name of the requested instance. Values are of the - form ``projects//instances/``. + Required. The unique name of the requested instance. Values + are of the form ``projects/{project}/instances/{instance}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) - ), + }, ) _sym_db.RegisterMessage(GetInstanceRequest) ListInstancesRequest = _reflection.GeneratedProtocolMessageType( "ListInstancesRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.ListInstances. - - + { + "DESCRIPTOR": _LISTINSTANCESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.ListInstances. Attributes: parent: - The unique name of the project for which a list of instances - is requested. Values are of the form ``projects/``. + Required. The unique name of the project for which a list of + instances is requested. Values are of the form + ``projects/{project}``. page_token: DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) - ), + }, ) _sym_db.RegisterMessage(ListInstancesRequest) ListInstancesResponse = _reflection.GeneratedProtocolMessageType( "ListInstancesResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTINSTANCESRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Response message for BigtableInstanceAdmin.ListInstances. - - + { + "DESCRIPTOR": _LISTINSTANCESRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Response message for BigtableInstanceAdmin.ListInstances. Attributes: instances: The list of requested instances. @@ -1711,131 +1781,120 @@ DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) - ), + }, ) _sym_db.RegisterMessage(ListInstancesResponse) PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( "PartialUpdateInstanceRequest", (_message.Message,), - dict( - DESCRIPTOR=_PARTIALUPDATEINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - - + { + "DESCRIPTOR": _PARTIALUPDATEINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.PartialUpdateInstance. Attributes: instance: - The Instance which will (partially) replace the current value. + Required. The Instance which will (partially) replace the + current value. update_mask: - The subset of Instance fields which should be replaced. Must - be explicitly set. + Required. The subset of Instance fields which should be + replaced. Must be explicitly set. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) - ), + }, ) _sym_db.RegisterMessage(PartialUpdateInstanceRequest) DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( "DeleteInstanceRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEINSTANCEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.DeleteInstance. - - + { + "DESCRIPTOR": _DELETEINSTANCEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.DeleteInstance. Attributes: name: - The unique name of the instance to be deleted. Values are of - the form ``projects//instances/``. + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteInstanceRequest) CreateClusterRequest = _reflection.GeneratedProtocolMessageType( "CreateClusterRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATECLUSTERREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.CreateCluster. - - + { + "DESCRIPTOR": _CREATECLUSTERREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.CreateCluster. Attributes: parent: - The unique name of the instance in which to create the new - cluster. Values are of the form - ``projects//instances/``. + Required. The unique name of the instance in which to create + the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. cluster_id: - The ID to be used when referring to the new cluster within its - instance, e.g., just ``mycluster`` rather than ``projects/mypr - oject/instances/myinstance/clusters/mycluster``. + Required. The ID to be used when referring to the new cluster + within its instance, e.g., just ``mycluster`` rather than ``pr + ojects/myproject/instances/myinstance/clusters/mycluster``. cluster: - The cluster to be created. Fields marked ``OutputOnly`` must - be left blank. + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) - ), + }, ) _sym_db.RegisterMessage(CreateClusterRequest) GetClusterRequest = _reflection.GeneratedProtocolMessageType( "GetClusterRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETCLUSTERREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.GetCluster. - - + { + "DESCRIPTOR": _GETCLUSTERREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.GetCluster. Attributes: name: - The unique name of the requested cluster. Values are of the - form ``projects//instances//clusters/``. + Required. The unique name of the requested cluster. Values are + of the form ``projects/{project}/instances/{instance}/clusters + /{cluster}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) - ), + }, ) _sym_db.RegisterMessage(GetClusterRequest) ListClustersRequest = _reflection.GeneratedProtocolMessageType( "ListClustersRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTCLUSTERSREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.ListClusters. - - + { + "DESCRIPTOR": _LISTCLUSTERSREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.ListClusters. Attributes: parent: - The unique name of the instance for which a list of clusters - is requested. Values are of the form - ``projects//instances/``. Use `` + Required. The unique name of the instance for which a list of + clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to list Clusters for all Instances in a project, e.g., ``projects/myproject/instances/-``. page_token: DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) - ), + }, ) _sym_db.RegisterMessage(ListClustersRequest) ListClustersResponse = _reflection.GeneratedProtocolMessageType( "ListClustersResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTCLUSTERSRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Response message for BigtableInstanceAdmin.ListClusters. - - + { + "DESCRIPTOR": _LISTCLUSTERSRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Response message for BigtableInstanceAdmin.ListClusters. Attributes: clusters: The list of requested clusters. @@ -1850,39 +1909,35 @@ DEPRECATED: This field is unused and ignored. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) - ), + }, ) _sym_db.RegisterMessage(ListClustersResponse) DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( "DeleteClusterRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETECLUSTERREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.DeleteCluster. - - + { + "DESCRIPTOR": _DELETECLUSTERREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.DeleteCluster. Attributes: name: - The unique name of the cluster to be deleted. Values are of - the form ``projects//instances//clusters/``. + Required. The unique name of the cluster to be deleted. Values + are of the form ``projects/{project}/instances/{instance}/clus + ters/{cluster}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteClusterRequest) CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( "CreateInstanceMetadata", (_message.Message,), - dict( - DESCRIPTOR=_CREATEINSTANCEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by CreateInstance. - - + { + "DESCRIPTOR": _CREATEINSTANCEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by CreateInstance. Attributes: original_request: The request that prompted the initiation of this @@ -1894,19 +1949,17 @@ successfully. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) - ), + }, ) _sym_db.RegisterMessage(CreateInstanceMetadata) UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( "UpdateInstanceMetadata", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEINSTANCEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by UpdateInstance. - - + { + "DESCRIPTOR": _UPDATEINSTANCEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by UpdateInstance. Attributes: original_request: The request that prompted the initiation of this @@ -1918,19 +1971,17 @@ successfully. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) - ), + }, ) _sym_db.RegisterMessage(UpdateInstanceMetadata) CreateClusterMetadata = _reflection.GeneratedProtocolMessageType( "CreateClusterMetadata", (_message.Message,), - dict( - DESCRIPTOR=_CREATECLUSTERMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by CreateCluster. - - + { + "DESCRIPTOR": _CREATECLUSTERMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by CreateCluster. Attributes: original_request: The request that prompted the initiation of this CreateCluster @@ -1942,19 +1993,17 @@ successfully. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) - ), + }, ) _sym_db.RegisterMessage(CreateClusterMetadata) UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType( "UpdateClusterMetadata", (_message.Message,), - dict( - DESCRIPTOR=_UPDATECLUSTERMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by UpdateCluster. - - + { + "DESCRIPTOR": _UPDATECLUSTERMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by UpdateCluster. Attributes: original_request: The request that prompted the initiation of this UpdateCluster @@ -1966,97 +2015,93 @@ successfully. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) - ), + }, ) _sym_db.RegisterMessage(UpdateClusterMetadata) CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType( "CreateAppProfileRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATEAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.CreateAppProfile. - - + { + "DESCRIPTOR": _CREATEAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.CreateAppProfile. Attributes: parent: - The unique name of the instance in which to create the new app - profile. Values are of the form - ``projects//instances/``. + Required. The unique name of the instance in which to create + the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. app_profile_id: - The ID to be used when referring to the new app profile within - its instance, e.g., just ``myprofile`` rather than ``projects/ - myproject/instances/myinstance/appProfiles/myprofile``. + Required. The ID to be used when referring to the new app + profile within its instance, e.g., just ``myprofile`` rather + than ``projects/myproject/instances/myinstance/appProfiles/myp + rofile``. app_profile: - The app profile to be created. Fields marked ``OutputOnly`` - will be ignored. + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. ignore_warnings: If true, ignore safety checks when creating the app profile. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) - ), + }, ) _sym_db.RegisterMessage(CreateAppProfileRequest) GetAppProfileRequest = _reflection.GeneratedProtocolMessageType( "GetAppProfileRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.GetAppProfile. - - + { + "DESCRIPTOR": _GETAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.GetAppProfile. Attributes: name: - The unique name of the requested app profile. Values are of - the form ``projects//instances//appProfiles - /``. + Required. The unique name of the requested app profile. Values + are of the form ``projects/{project}/instances/{instance}/appP + rofiles/{app_profile}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) - ), + }, ) _sym_db.RegisterMessage(GetAppProfileRequest) ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType( "ListAppProfilesRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTAPPPROFILESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for BigtableInstanceAdmin.ListAppProfiles. - - + { + "DESCRIPTOR": _LISTAPPPROFILESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.ListAppProfiles. Attributes: parent: - The unique name of the instance for which a list of app - profiles is requested. Values are of the form - ``projects//instances/``. Use `` + Required. The unique name of the instance for which a list of + app profiles is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to list AppProfiles for all Instances in a project, e.g., ``projects/myproject/instances/-``. page_size: - Maximum number of results per page. CURRENTLY UNIMPLEMENTED - AND IGNORED. + Maximum number of results per page. A page_size of zero lets + the server choose the number of items to return. A page_size + which is strictly positive will return at most that many + items. A negative page_size will cause an error. Following + the first request, subsequent paginated calls are not required + to pass a page_size. If a page_size is set in subsequent + calls, it must match the page_size given in the first request. page_token: The value of ``next_page_token`` returned by a previous call. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) - ), + }, ) _sym_db.RegisterMessage(ListAppProfilesRequest) ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType( "ListAppProfilesResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTAPPPROFILESRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Response message for - BigtableInstanceAdmin.ListAppProfiles. - - + { + "DESCRIPTOR": _LISTAPPPROFILESRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Response message for BigtableInstanceAdmin.ListAppProfiles. Attributes: app_profiles: The list of requested app profiles. @@ -2072,85 +2117,100 @@ ``projects//locations/`` """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) - ), + }, ) _sym_db.RegisterMessage(ListAppProfilesResponse) UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType( "UpdateAppProfileRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.UpdateAppProfile. - - + { + "DESCRIPTOR": _UPDATEAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.UpdateAppProfile. Attributes: app_profile: - The app profile which will (partially) replace the current - value. + Required. The app profile which will (partially) replace the + current value. update_mask: - The subset of app profile fields which should be replaced. If - unset, all fields will be replaced. + Required. The subset of app profile fields which should be + replaced. If unset, all fields will be replaced. ignore_warnings: If true, ignore safety checks when updating the app profile. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateAppProfileRequest) DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType( "DeleteAppProfileRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEAPPPROFILEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""Request message for - BigtableInstanceAdmin.DeleteAppProfile. - - + { + "DESCRIPTOR": _DELETEAPPPROFILEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """Request message for BigtableInstanceAdmin.DeleteAppProfile. Attributes: name: - The unique name of the app profile to be deleted. Values are - of the form ``projects//instances//appProfi - les/``. + Required. The unique name of the app profile to be deleted. + Values are of the form ``projects/{project}/instances/{instanc + e}/appProfiles/{app_profile}``. ignore_warnings: If true, ignore safety checks when deleting the app profile. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteAppProfileRequest) UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType( "UpdateAppProfileMetadata", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEAPPPROFILEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_instance_admin_pb2", - __doc__="""The metadata for the Operation returned by - UpdateAppProfile. - - """, + { + "DESCRIPTOR": _UPDATEAPPPROFILEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", + "__doc__": """The metadata for the Operation returned by UpdateAppProfile.""", # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) - ), + }, ) _sym_db.RegisterMessage(UpdateAppProfileMetadata) DESCRIPTOR._options = None _CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = None +_CREATEINSTANCEREQUEST.fields_by_name["parent"]._options = None +_CREATEINSTANCEREQUEST.fields_by_name["instance_id"]._options = None +_CREATEINSTANCEREQUEST.fields_by_name["instance"]._options = None +_CREATEINSTANCEREQUEST.fields_by_name["clusters"]._options = None +_GETINSTANCEREQUEST.fields_by_name["name"]._options = None +_LISTINSTANCESREQUEST.fields_by_name["parent"]._options = None +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["instance"]._options = None +_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["update_mask"]._options = None +_DELETEINSTANCEREQUEST.fields_by_name["name"]._options = None +_CREATECLUSTERREQUEST.fields_by_name["parent"]._options = None +_CREATECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None +_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None +_GETCLUSTERREQUEST.fields_by_name["name"]._options = None +_LISTCLUSTERSREQUEST.fields_by_name["parent"]._options = None +_DELETECLUSTERREQUEST.fields_by_name["name"]._options = None +_CREATEAPPPROFILEREQUEST.fields_by_name["parent"]._options = None +_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile_id"]._options = None +_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None +_GETAPPPROFILEREQUEST.fields_by_name["name"]._options = None +_LISTAPPPROFILESREQUEST.fields_by_name["parent"]._options = None +_UPDATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None +_UPDATEAPPPROFILEREQUEST.fields_by_name["update_mask"]._options = None +_DELETEAPPPROFILEREQUEST.fields_by_name["name"]._options = None _BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( name="BigtableInstanceAdmin", full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=2889, - serialized_end=5875, + serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\367\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", + create_key=_descriptor._internal_create_key, + serialized_start=3558, + serialized_end=7416, methods=[ _descriptor.MethodDescriptor( name="CreateInstance", @@ -2159,9 +2219,8 @@ containing_service=None, input_type=_CREATEINSTANCEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*' - ), + serialized_options=b'\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*\332A$parent,instance_id,instance,clusters\312A"\n\010Instance\022\026CreateInstanceMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetInstance", @@ -2169,10 +2228,9 @@ index=1, containing_service=None, input_type=_GETINSTANCEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=_b( - "\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}" - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + serialized_options=b"\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListInstances", @@ -2181,20 +2239,18 @@ containing_service=None, input_type=_LISTINSTANCESREQUEST, output_type=_LISTINSTANCESRESPONSE, - serialized_options=_b( - "\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances" - ), + serialized_options=b"\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateInstance", full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", index=3, containing_service=None, - input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=_b( - "\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*" - ), + input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, + serialized_options=b"\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="PartialUpdateInstance", @@ -2203,9 +2259,8 @@ containing_service=None, input_type=_PARTIALUPDATEINSTANCEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance" - ), + serialized_options=b'\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance\332A\024instance,update_mask\312A"\n\010Instance\022\026UpdateInstanceMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteInstance", @@ -2214,9 +2269,8 @@ containing_service=None, input_type=_DELETEINSTANCEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}" - ), + serialized_options=b"\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CreateCluster", @@ -2225,9 +2279,8 @@ containing_service=None, input_type=_CREATECLUSTERREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster' - ), + serialized_options=b'\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster\332A\031parent,cluster_id,cluster\312A \n\007Cluster\022\025CreateClusterMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetCluster", @@ -2235,10 +2288,9 @@ index=7, containing_service=None, input_type=_GETCLUSTERREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - serialized_options=_b( - "\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}" - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + serialized_options=b"\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListClusters", @@ -2247,20 +2299,18 @@ containing_service=None, input_type=_LISTCLUSTERSREQUEST, output_type=_LISTCLUSTERSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters" - ), + serialized_options=b"\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateCluster", full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", index=9, containing_service=None, - input_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._CLUSTER, + input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*" - ), + serialized_options=b"\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*\312A \n\007Cluster\022\025UpdateClusterMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteCluster", @@ -2269,9 +2319,8 @@ containing_service=None, input_type=_DELETECLUSTERREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}" - ), + serialized_options=b"\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CreateAppProfile", @@ -2279,10 +2328,9 @@ index=11, containing_service=None, input_type=_CREATEAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=_b( - '\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile' - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + serialized_options=b'\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile\332A!parent,app_profile_id,app_profile', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetAppProfile", @@ -2290,10 +2338,9 @@ index=12, containing_service=None, input_type=_GETAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=_b( - "\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}" - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, + serialized_options=b"\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListAppProfiles", @@ -2302,9 +2349,8 @@ containing_service=None, input_type=_LISTAPPPROFILESREQUEST, output_type=_LISTAPPPROFILESRESPONSE, - serialized_options=_b( - "\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles" - ), + serialized_options=b"\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateAppProfile", @@ -2313,9 +2359,8 @@ containing_service=None, input_type=_UPDATEAPPPROFILEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile" - ), + serialized_options=b"\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile\332A\027app_profile,update_mask\312A&\n\nAppProfile\022\030UpdateAppProfileMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteAppProfile", @@ -2324,9 +2369,8 @@ containing_service=None, input_type=_DELETEAPPPROFILEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}" - ), + serialized_options=b"\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetIamPolicy", @@ -2335,9 +2379,8 @@ containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*' - ), + serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*\332A\010resource', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="SetIamPolicy", @@ -2346,9 +2389,8 @@ containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*' - ), + serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*\332A\017resource,policy', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="TestIamPermissions", @@ -2357,9 +2399,8 @@ containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*' - ), + serialized_options=b'\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*\332A\024resource,permissions', + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py index 0ca0445e22db..0580b1871335 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -2,10 +2,10 @@ import grpc from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, + bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, ) from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2, + instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, ) from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 @@ -29,82 +29,82 @@ def __init__(self, channel): """ self.CreateInstance = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.GetInstance = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, ) self.ListInstances = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, ) self.UpdateInstance = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, ) self.PartialUpdateInstance = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.DeleteInstance = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CreateCluster = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.GetCluster = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, ) self.ListClusters = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, ) self.UpdateCluster = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.DeleteCluster = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CreateAppProfile = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, ) self.GetAppProfile = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, ) self.ListAppProfiles = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, ) self.UpdateAppProfile = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.DeleteAppProfile = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GetIamPolicy = channel.unary_unary( @@ -152,14 +152,17 @@ def ListInstances(self, request, context): raise NotImplementedError("Method not implemented!") def UpdateInstance(self, request, context): - """Updates an instance within a project. + """Updates an instance within a project. This method updates only the display + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def PartialUpdateInstance(self, request, context): - """Partially updates an instance within a project. + """Partially updates an instance within a project. This method can modify all + fields of an Instance and is the preferred way to update an Instance. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -270,82 +273,82 @@ def add_BigtableInstanceAdminServicer_to_server(servicer, server): rpc_method_handlers = { "CreateInstance": grpc.unary_unary_rpc_method_handler( servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "GetInstance": grpc.unary_unary_rpc_method_handler( servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, ), "ListInstances": grpc.unary_unary_rpc_method_handler( servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, ), "UpdateInstance": grpc.unary_unary_rpc_method_handler( servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, ), "PartialUpdateInstance": grpc.unary_unary_rpc_method_handler( servicer.PartialUpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "DeleteInstance": grpc.unary_unary_rpc_method_handler( servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "CreateCluster": grpc.unary_unary_rpc_method_handler( servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "GetCluster": grpc.unary_unary_rpc_method_handler( servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, ), "ListClusters": grpc.unary_unary_rpc_method_handler( servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, ), "UpdateCluster": grpc.unary_unary_rpc_method_handler( servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "DeleteCluster": grpc.unary_unary_rpc_method_handler( servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "CreateAppProfile": grpc.unary_unary_rpc_method_handler( servicer.CreateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, ), "GetAppProfile": grpc.unary_unary_rpc_method_handler( servicer.GetAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, ), "ListAppProfiles": grpc.unary_unary_rpc_method_handler( servicer.ListAppProfiles, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, ), "UpdateAppProfile": grpc.unary_unary_rpc_method_handler( servicer.UpdateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "DeleteAppProfile": grpc.unary_unary_rpc_method_handler( servicer.DeleteAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "GetIamPolicy": grpc.unary_unary_rpc_method_handler( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index 812022295950..119ef73a4db9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,19 +11,23 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.bigtable.admin.v2; import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/bigtable/admin/v2/common.proto"; import "google/bigtable/admin/v2/table.proto"; import "google/iam/v1/iam_policy.proto"; import "google/iam/v1/policy.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; @@ -39,6 +43,15 @@ option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; // Provides access to the table schemas only, not the data stored within // the tables. service BigtableTableAdmin { + option (google.api.default_host) = "bigtableadmin.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigtable.admin," + "https://www.googleapis.com/auth/bigtable.admin.table," + "https://www.googleapis.com/auth/cloud-bigtable.admin," + "https://www.googleapis.com/auth/cloud-bigtable.admin.table," + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/cloud-platform.read-only"; + // Creates a new table in the specified instance. // The table can be created with a full set of initial column families, // specified in the request. @@ -47,6 +60,7 @@ service BigtableTableAdmin { post: "/v2/{parent=projects/*/instances/*}/tables" body: "*" }; + option (google.api.method_signature) = "parent,table_id,table"; } // Creates a new table from the specified snapshot. The target table must @@ -63,6 +77,11 @@ service BigtableTableAdmin { post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" body: "*" }; + option (google.api.method_signature) = "parent,table_id,source_snapshot"; + option (google.longrunning.operation_info) = { + response_type: "Table" + metadata_type: "CreateTableFromSnapshotMetadata" + }; } // Lists all tables served from a specified instance. @@ -70,6 +89,7 @@ service BigtableTableAdmin { option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*}/tables" }; + option (google.api.method_signature) = "parent"; } // Gets metadata information about the specified table. @@ -77,6 +97,7 @@ service BigtableTableAdmin { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/tables/*}" }; + option (google.api.method_signature) = "name"; } // Permanently deletes a specified table and all of its data. @@ -84,6 +105,7 @@ service BigtableTableAdmin { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/tables/*}" }; + option (google.api.method_signature) = "name"; } // Performs a series of column family modifications on the specified table. @@ -95,6 +117,7 @@ service BigtableTableAdmin { post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" body: "*" }; + option (google.api.method_signature) = "name,modifications"; } // Permanently drop/delete a row range from a specified table. The request can @@ -117,6 +140,7 @@ service BigtableTableAdmin { post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" body: "*" }; + option (google.api.method_signature) = "name"; } // Checks replication consistency based on a consistency token, that is, if @@ -128,6 +152,7 @@ service BigtableTableAdmin { post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" body: "*" }; + option (google.api.method_signature) = "name,consistency_token"; } // Creates a new snapshot in the specified cluster from the specified @@ -144,6 +169,12 @@ service BigtableTableAdmin { post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" body: "*" }; + option (google.api.method_signature) = + "name,cluster,snapshot_id,description"; + option (google.longrunning.operation_info) = { + response_type: "Snapshot" + metadata_type: "SnapshotTableMetadata" + }; } // Gets metadata information about the specified snapshot. @@ -157,6 +188,7 @@ service BigtableTableAdmin { option (google.api.http) = { get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" }; + option (google.api.method_signature) = "name"; } // Lists all snapshots associated with the specified cluster. @@ -170,6 +202,7 @@ service BigtableTableAdmin { option (google.api.http) = { get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" }; + option (google.api.method_signature) = "parent"; } // Permanently deletes the specified snapshot. @@ -183,32 +216,121 @@ service BigtableTableAdmin { option (google.api.http) = { delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" }; + option (google.api.method_signature) = "name"; + } + + // Starts creating a new Cloud Bigtable Backup. The returned backup + // [long-running operation][google.longrunning.Operation] can be used to + // track creation of the backup. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The + // [response][google.longrunning.Operation.response] field type is + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the + // returned operation will stop the creation and delete the backup. + rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" + body: "backup" + }; + option (google.longrunning.operation_info) = { + response_type: "Backup" + metadata_type: "CreateBackupMetadata" + }; + option (google.api.method_signature) = "parent,backup_id,backup"; + } + + // Gets metadata on a pending or completed Cloud Bigtable Backup. + rpc GetBackup(GetBackupRequest) returns (Backup) { + option (google.api.http) = { + get: "/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a pending or completed Cloud Bigtable Backup. + rpc UpdateBackup(UpdateBackupRequest) returns (Backup) { + option (google.api.http) = { + patch: "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" + body: "backup" + }; + option (google.api.method_signature) = "backup,update_mask"; + } + + // Deletes a pending or completed Cloud Bigtable backup. + rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists Cloud Bigtable backups. Returns both completed and pending + // backups. + rpc ListBackups(ListBackupsRequest) returns (ListBackupsResponse) { + option (google.api.http) = { + get: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" + }; + option (google.api.method_signature) = "parent"; + } + + // Create a new table by restoring from a completed backup. The new table + // must be in the same instance as the instance containing the backup. The + // returned table [long-running operation][google.longrunning.Operation] can + // be used to track the progress of the operation, and to cancel it. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [response][google.longrunning.Operation.response] type is + // [Table][google.bigtable.admin.v2.Table], if successful. + rpc RestoreTable(RestoreTableRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v2/{parent=projects/*/instances/*}/tables:restore" + body: "*" + }; + option (google.longrunning.operation_info) = { + response_type: "Table" + metadata_type: "RestoreTableMetadata" + }; } - // Gets the access control policy for a table resource. Returns an empty - // policy if an table exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists but does not have a policy + // set. + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" body: "*" }; + option (google.api.method_signature) = "resource"; } - // Sets the access control policy on a table resource. Replaces any existing - // policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { + // Sets the access control policy on a Table or Backup resource. + // Replaces any existing policy. + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) + returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" body: "*" + additional_bindings { + post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy" + body: "*" + } }; + option (google.api.method_signature) = "resource,policy"; } // Returns permissions that the caller has on the specified table resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) + returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" body: "*" + additional_bindings { + post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions" + body: "*" + } }; + option (google.api.method_signature) = "resource,permissions"; } } @@ -221,16 +343,22 @@ message CreateTableRequest { bytes key = 1; } - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; + // Required. The unique name of the instance in which to create the table. + // Values are of the form `projects/{project}/instances/{instance}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; + // Required. The name by which the new table should be referred to within the + // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Maximum 50 characters. + string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - // The Table to create. - Table table = 3; + // Required. The Table to create. + Table table = 3 [(google.api.field_behavior) = REQUIRED]; // The optional list of row keys that will be used to initially split the // table into several tablets (tablets are similar to HBase regions). @@ -259,28 +387,41 @@ message CreateTableRequest { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message CreateTableFromSnapshotRequest { - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string source_snapshot = 3; + // Required. The unique name of the instance in which to create the table. + // Values are of the form `projects/{project}/instances/{instance}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; + + // Required. The name by which the new table should be referred to within the + // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + string table_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The unique name of the snapshot from which to restore the table. + // The snapshot and the table must be in the same instance. Values are of the + // form + // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. + string source_snapshot = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Snapshot" + } + ]; } // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] message DropRowRangeRequest { - // The unique name of the table on which to drop a range of rows. + // Required. The unique name of the table on which to drop a range of rows. // Values are of the form - // `projects//instances//tables/
`. - string name = 1; + // `projects/{project}/instances/{instance}/tables/{table}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + ]; // Delete all rows or by prefix. oneof target { @@ -296,16 +437,28 @@ message DropRowRangeRequest { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] message ListTablesRequest { - // The unique name of the instance for which tables should be listed. - // Values are of the form `projects//instances/`. - string parent = 1; + // Required. The unique name of the instance for which tables should be + // listed. Values are of the form `projects/{project}/instances/{instance}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; // The view to be applied to the returned tables' fields. - // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. + // Only NAME_ONLY view (default) and REPLICATION_VIEW are supported. Table.View view = 2; // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. + // + // A page_size of zero lets the server choose the number of items to return. + // A page_size which is strictly positive will return at most that many items. + // A negative page_size will cause an error. + // + // Following the first request, subsequent paginated calls are not required + // to pass a page_size. If a page_size is set in subsequent calls, it must + // match the page_size given in the first request. int32 page_size = 4; // The value of `next_page_token` returned by a previous call. @@ -327,10 +480,13 @@ message ListTablesResponse { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] message GetTableRequest { - // The unique name of the requested table. + // Required. The unique name of the requested table. // Values are of the form - // `projects//instances//tables/
`. - string name = 1; + // `projects/{project}/instances/{instance}/tables/{table}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + ]; // The view to be applied to the returned table's fields. // Defaults to `SCHEMA_VIEW` if unspecified. @@ -340,10 +496,13 @@ message GetTableRequest { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] message DeleteTableRequest { - // The unique name of the table to be deleted. + // Required. The unique name of the table to be deleted. // Values are of the form - // `projects//instances//tables/
`. - string name = 1; + // `projects/{project}/instances/{instance}/tables/{table}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + ]; } // Request message for @@ -370,25 +529,32 @@ message ModifyColumnFamiliesRequest { } } - // The unique name of the table whose families should be modified. + // Required. The unique name of the table whose families should be modified. // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2; + // `projects/{project}/instances/{instance}/tables/{table}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + ]; + + // Required. Modifications to be atomically applied to the specified table's + // families. Entries are applied in order, meaning that earlier modifications + // can be masked by later ones (in the case of repeated updates to the same + // family, for example). + repeated Modification modifications = 2 + [(google.api.field_behavior) = REQUIRED]; } // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] message GenerateConsistencyTokenRequest { - // The unique name of the Table for which to create a consistency token. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; + // Required. The unique name of the Table for which to create a consistency + // token. Values are of the form + // `projects/{project}/instances/{instance}/tables/{table}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + ]; } // Response message for @@ -401,13 +567,16 @@ message GenerateConsistencyTokenResponse { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] message CheckConsistencyRequest { - // The unique name of the Table for which to check replication consistency. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The token created using GenerateConsistencyToken for the Table. - string consistency_token = 2; + // Required. The unique name of the Table for which to check replication + // consistency. Values are of the form + // `projects/{project}/instances/{instance}/tables/{table}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + ]; + + // Required. The token created using GenerateConsistencyToken for the Table. + string consistency_token = 2 [(google.api.field_behavior) = REQUIRED]; } // Response message for @@ -426,21 +595,29 @@ message CheckConsistencyResponse { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message SnapshotTableRequest { - // The unique name of the table to have the snapshot taken. + // Required. The unique name of the table to have the snapshot taken. // Values are of the form - // `projects//instances//tables/
`. - string name = 1; + // `projects/{project}/instances/{instance}/tables/{table}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + ]; - // The name of the cluster where the snapshot will be created in. + // Required. The name of the cluster where the snapshot will be created in. // Values are of the form - // `projects//instances//clusters/`. - string cluster = 2; + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + string cluster = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Cluster" + } + ]; - // The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than - // `projects//instances//clusters//snapshots/mysnapshot`. - string snapshot_id = 3; + // Required. The ID by which the new snapshot should be referred to within the + // parent cluster, e.g., `mysnapshot` of the form: + // `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` rather than + // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. + string snapshot_id = 3 [(google.api.field_behavior) = REQUIRED]; // The amount of time that the new snapshot can stay active after it is // created. Once 'ttl' expires, the snapshot will get deleted. The maximum @@ -460,10 +637,15 @@ message SnapshotTableRequest { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message GetSnapshotRequest { - // The unique name of the requested snapshot. + // Required. The unique name of the requested snapshot. // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; + // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Snapshot" + } + ]; } // Request message for @@ -474,12 +656,17 @@ message GetSnapshotRequest { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message ListSnapshotsRequest { - // The unique name of the cluster for which snapshots should be listed. - // Values are of the form - // `projects//instances//clusters/`. - // Use ` = '-'` to list snapshots for all clusters in an instance, - // e.g., `projects//instances//clusters/-`. - string parent = 1; + // Required. The unique name of the cluster for which snapshots should be + // listed. Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, + // e.g., `projects/{project}/instances/{instance}/clusters/-`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Cluster" + } + ]; // The maximum number of snapshots to return per page. // CURRENTLY UNIMPLEMENTED AND IGNORED. @@ -514,10 +701,15 @@ message ListSnapshotsResponse { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message DeleteSnapshotRequest { - // The unique name of the snapshot to be deleted. + // Required. The unique name of the snapshot to be deleted. // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; + // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Snapshot" + } + ]; } // The metadata for the Operation returned by SnapshotTable. @@ -554,3 +746,248 @@ message CreateTableFromSnapshotMetadata { // The time at which the operation failed or was completed successfully. google.protobuf.Timestamp finish_time = 3; } + +// The request for +// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +message CreateBackupRequest { + // Required. This must be one of the clusters in the instance in which this + // table is located. The backup will be stored in this cluster. Values are + // of the form `projects/{project}/instances/{instance}/clusters/{cluster}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Cluster" + } + ]; + + // Required. The id of the backup to be created. The `backup_id` along with + // the parent `parent` are combined as {parent}/backups/{backup_id} to create + // the full backup name, of the form: + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}`. + // This string must be between 1 and 50 characters in length and match the + // regex [_a-zA-Z0-9][-_.a-zA-Z0-9]*. + string backup_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The backup to create. + Backup backup = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Metadata type for the operation returned by +// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +message CreateBackupMetadata { + // The name of the backup being created. + string name = 1; + + // The name of the table the backup is created from. + string source_table = 2; + + // The time at which this operation started. + google.protobuf.Timestamp start_time = 3; + + // If set, the time at which this operation finished or was cancelled. + google.protobuf.Timestamp end_time = 4; +} + +// The request for +// [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. +message GetBackupRequest { + // Required. Name of the backup. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } + ]; +} + +// The request for +// [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. +message UpdateBackupRequest { + // Required. The backup to update. `backup.name`, and the fields to be updated + // as specified by `update_mask` are required. Other fields are ignored. + // Update is only supported for the following fields: + // * `backup.expire_time`. + Backup backup = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields (e.g. `expire_time`) in the + // Backup resource should be updated. This mask is relative to the Backup + // resource, not to the request message. The field mask must always be + // specified; this prevents any future fields from being erased accidentally + // by clients that do not know about them. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// The request for +// [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. +message DeleteBackupRequest { + // Required. Name of the backup to delete. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } + ]; +} + +// The request for +// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +message ListBackupsRequest { + // Required. The cluster to list backups from. Values are of the + // form `projects/{project}/instances/{instance}/clusters/{cluster}`. + // Use `{cluster} = '-'` to list backups for all clusters in an instance, + // e.g., `projects/{project}/instances/{instance}/clusters/-`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Cluster" + } + ]; + + // A filter expression that filters backups listed in the response. + // The expression must specify the field name, a comparison operator, + // and the value that you want to use for filtering. The value must be a + // string, a number, or a boolean. The comparison operator must be + // <, >, <=, >=, !=, =, or :. Colon ‘:’ represents a HAS operator which is + // roughly synonymous with equality. Filter rules are case insensitive. + // + // The fields eligible for filtering are: + // * `name` + // * `source_table` + // * `state` + // * `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `size_bytes` + // + // To filter on multiple expressions, provide each separate expression within + // parentheses. By default, each expression is an AND expression. However, + // you can include AND, OR, and NOT expressions explicitly. + // + // Some examples of using filters are: + // + // * `name:"exact"` --> The backup's name is the string "exact". + // * `name:howl` --> The backup's name contains the string "howl". + // * `source_table:prod` + // --> The source_table's name contains the string "prod". + // * `state:CREATING` --> The backup is pending creation. + // * `state:READY` --> The backup is fully created and ready for use. + // * `(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")` + // --> The backup name contains the string "howl" and start_time + // of the backup is before 2018-03-28T14:50:00Z. + // * `size_bytes > 10000000000` --> The backup's size is greater than 10GB + string filter = 2; + + // An expression for specifying the sort order of the results of the request. + // The string value should specify one or more fields in + // [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at + // https://aip.dev/132#ordering. + // + // Fields supported are: + // * name + // * source_table + // * expire_time + // * start_time + // * end_time + // * size_bytes + // * state + // + // For example, "start_time". The default sorting order is ascending. + // To specify descending order for the field, a suffix " desc" should + // be appended to the field name. For example, "start_time desc". + // Redundant space characters in the syntax are insigificant. + // + // If order_by is empty, results will be sorted by `start_time` in descending + // order starting from the most recently created backup. + string order_by = 3; + + // Number of backups to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 4; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] + // from a previous + // [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the + // same `parent` and with the same `filter`. + string page_token = 5; +} + +// The response for +// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +message ListBackupsResponse { + // The list of matching backups. + repeated Backup backups = 1; + + // `next_page_token` can be sent in a subsequent + // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call + // to fetch more of the matching backups. + string next_page_token = 2; +} + +// The request for +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableRequest { + // Required. The name of the instance in which to create the restored + // table. This instance must be the parent of the source backup. Values are + // of the form `projects//instances/`. + string parent = 1; + + // Required. The id of the table to create and restore to. This + // table must not already exist. The `table_id` appended to + // `parent` forms the full table name of the form + // `projects//instances//tables/`. + string table_id = 2; + + // Required. The source from which to restore. + oneof source { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//clusters//backups/`. + string backup = 3; + } +} + +// Metadata type for the long-running operation returned by +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableMetadata { + // Name of the table being created and restored to. + string name = 1; + + // The type of the restore source. + RestoreSourceType source_type = 2; + + // Information about the source used to restore the table, as specified by + // `source` in + // [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. + oneof source_info { + BackupInfo backup_info = 3; + } + + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored table. The metadata type of the long-running operation is + // [OptimizeRestoreTableMetadata][]. The response type is + // [Empty][google.protobuf.Empty]. This long-running operation may be + // automatically created by the system if applicable after the + // RestoreTable long-running operation completes successfully. This operation + // may not be created if the table is already optimized or the restore was + // not successful. + string optimize_table_operation_name = 4; + + // The progress of the + // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + // operation. + OperationProgress progress = 5; +} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored table. This long-running +// operation is automatically created by the system after the successful +// completion of a table restore, and cannot be cancelled. +message OptimizeRestoredTableMetadata { + // Name of the restored table being optimized. + string name = 1; + + // The progress of the post-restore optimizations. + OperationProgress progress = 2; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 6852607952f3..7c3317ab09a4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -1,10 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto +# source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -16,8 +13,14 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.cloud.bigtable_admin_v2.proto import ( + common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, +) from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, + table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, ) from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 @@ -26,27 +29,30 @@ ) from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto", + name="google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\n?google/cloud/bigtable/admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/bigtable/admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc8\x01\n\x12\x43reateTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12.\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"[\n\x1e\x43reateTableFromSnapshotRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x17\n\x0fsource_snapshot\x18\x03 \x01(\t"m\n\x13\x44ropRowRangeRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"~\n\x11ListTablesRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View""\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xae\x02\n\x1bModifyColumnFamiliesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Y\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"/\n\x1fGenerateConsistencyTokenRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"B\n\x17\x43heckConsistencyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11\x63onsistency_token\x18\x02 \x01(\t".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\x87\x01\n\x14SnapshotTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x63luster\x18\x02 \x01(\t\x12\x13\n\x0bsnapshot_id\x18\x03 \x01(\t\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t""\n\x12GetSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"M\n\x14ListSnapshotsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"%\n\x15\x44\x65leteSnapshotRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\x99\x15\n\x12\x42igtableTableAdmin\x12\x93\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"5\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\x12\xbc\x01\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"H\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\x12\x9b\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse"2\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\x12\x8a\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"2\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\x12\x87\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"2\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\x12\xba\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"J\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe1\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"N\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\x12\xc1\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"F\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\x12\x9e\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\x12\xa1\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\xb2\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"@\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\x12\x9b\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"@\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\x12\x91\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"F\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\x12\x91\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"F\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*\x12\xb7\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"L\x82\xd3\xe4\x93\x02\x46"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*B\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), + serialized_options=b"\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x13RestoreTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x10\n\x06\x62\x61\x63kup\x18\x03 \x01(\tH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress2\xc8$\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\xda\x41\x17parent,backup_id,backup\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\x9c\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"Q\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, + google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, ], ) @@ -58,6 +64,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -68,7 +75,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -76,6 +83,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -86,8 +94,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=545, - serialized_end=565, + serialized_start=767, + serialized_end=787, ) _CREATETABLEREQUEST = _descriptor.Descriptor( @@ -96,6 +104,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -106,14 +115,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="table_id", @@ -124,14 +134,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="table", @@ -148,8 +159,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="initial_splits", @@ -168,6 +180,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -178,8 +191,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=365, - serialized_end=565, + serialized_start=535, + serialized_end=787, ) @@ -189,6 +202,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -199,14 +213,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="table_id", @@ -217,14 +232,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="source_snapshot", @@ -235,14 +251,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -253,8 +270,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=567, - serialized_end=658, + serialized_start=790, + serialized_end=970, ) @@ -264,6 +281,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -274,14 +292,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_key_prefix", @@ -292,7 +311,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -300,6 +319,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="delete_all_data_from_table", @@ -318,6 +338,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -333,11 +354,12 @@ full_name="google.bigtable.admin.v2.DropRowRangeRequest.target", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], - serialized_start=660, - serialized_end=769, + serialized_start=973, + serialized_end=1121, ) @@ -347,6 +369,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -357,14 +380,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="view", @@ -383,6 +407,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -401,6 +426,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -411,7 +437,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -419,6 +445,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -429,8 +456,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=771, - serialized_end=897, + serialized_start=1124, + serialized_end=1292, ) @@ -440,6 +467,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="tables", @@ -458,6 +486,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -468,7 +497,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -476,6 +505,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -486,8 +516,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=899, - serialized_end=993, + serialized_start=1294, + serialized_end=1388, ) @@ -497,6 +527,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -507,14 +538,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="view", @@ -533,6 +565,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -543,8 +576,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=995, - serialized_end=1078, + serialized_start=1390, + serialized_end=1512, ) @@ -554,6 +587,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -564,14 +598,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -582,8 +617,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1080, - serialized_end=1114, + serialized_start=1514, + serialized_end=1587, ) @@ -593,6 +628,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="id", @@ -603,7 +639,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -611,6 +647,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create", @@ -629,6 +666,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update", @@ -647,6 +685,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="drop", @@ -665,6 +704,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -680,11 +720,12 @@ full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], - serialized_start=1254, - serialized_end=1419, + serialized_start=1771, + serialized_end=1936, ) _MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( @@ -693,6 +734,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -703,14 +745,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="modifications", @@ -727,8 +770,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -739,8 +783,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1117, - serialized_end=1419, + serialized_start=1590, + serialized_end=1936, ) @@ -750,6 +794,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -760,14 +805,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -778,8 +824,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1421, - serialized_end=1468, + serialized_start=1938, + serialized_end=2024, ) @@ -789,6 +835,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="consistency_token", @@ -799,7 +846,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -807,6 +854,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -817,8 +865,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1470, - serialized_end=1531, + serialized_start=2026, + serialized_end=2087, ) @@ -828,6 +876,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -838,14 +887,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="consistency_token", @@ -856,14 +906,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -874,8 +925,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1533, - serialized_end=1599, + serialized_start=2089, + serialized_end=2199, ) @@ -885,6 +936,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="consistent", @@ -903,6 +955,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -913,8 +966,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1601, - serialized_end=1647, + serialized_start=2201, + serialized_end=2247, ) @@ -924,6 +977,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -934,14 +988,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cluster", @@ -952,14 +1007,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="snapshot_id", @@ -970,14 +1026,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="ttl", @@ -996,6 +1053,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -1006,7 +1064,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1014,6 +1072,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1024,8 +1083,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1650, - serialized_end=1785, + serialized_start=2250, + serialized_end=2470, ) @@ -1035,6 +1094,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1045,14 +1105,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1063,8 +1124,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1787, - serialized_end=1821, + serialized_start=2472, + serialized_end=2548, ) @@ -1074,6 +1135,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1084,14 +1146,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -1110,6 +1173,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1120,7 +1184,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1128,6 +1192,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1138,8 +1203,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1823, - serialized_end=1900, + serialized_start=2550, + serialized_end=2668, ) @@ -1149,6 +1214,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="snapshots", @@ -1167,6 +1233,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1177,7 +1244,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1185,6 +1252,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1195,8 +1263,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1902, - serialized_end=2005, + serialized_start=2670, + serialized_end=2773, ) @@ -1206,6 +1274,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1216,14 +1285,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1234,8 +1304,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2007, - serialized_end=2044, + serialized_start=2775, + serialized_end=2854, ) @@ -1245,6 +1315,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="original_request", @@ -1263,6 +1334,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="request_time", @@ -1281,6 +1353,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="finish_time", @@ -1299,6 +1372,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1309,8 +1383,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2047, - serialized_end=2243, + serialized_start=2857, + serialized_end=3053, ) @@ -1320,6 +1394,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="original_request", @@ -1338,6 +1413,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="request_time", @@ -1356,6 +1432,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="finish_time", @@ -1374,6 +1451,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1384,298 +1462,1127 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2246, - serialized_end=2462, + serialized_start=3056, + serialized_end=3272, ) -_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST -_CREATETABLEREQUEST.fields_by_name[ - "table" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE -) -_CREATETABLEREQUEST.fields_by_name[ - "initial_splits" -].message_type = _CREATETABLEREQUEST_SPLIT -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "row_key_prefix" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "delete_all_data_from_table" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_LISTTABLESREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_LISTTABLESRESPONSE.fields_by_name[ - "tables" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE -) -_GETTABLEREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "drop" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ - "modifications" -].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION -_SNAPSHOTTABLEREQUEST.fields_by_name[ - "ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LISTSNAPSHOTSRESPONSE.fields_by_name[ - "snapshots" -].message_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT -) -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "original_request" -].message_type = _SNAPSHOTTABLEREQUEST -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATETABLEFROMSNAPSHOTREQUEST -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotRequest" -] = _CREATETABLEFROMSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST -DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "ModifyColumnFamiliesRequest" -] = _MODIFYCOLUMNFAMILIESREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenRequest" -] = _GENERATECONSISTENCYTOKENREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenResponse" -] = _GENERATECONSISTENCYTOKENRESPONSE -DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST -DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE -DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST -DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotMetadata" -] = _CREATETABLEFROMSNAPSHOTMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) -CreateTableRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableRequest", - (_message.Message,), - dict( - Split=_reflection.GeneratedProtocolMessageType( - "Split", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETABLEREQUEST_SPLIT, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""An initial split point for a newly created table. - - - Attributes: - key: - Row key to use as an initial tablet boundary. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) - ), - ), - DESCRIPTOR=_CREATETABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - - - Attributes: - parent: - The unique name of the instance in which to create the table. - Values are of the form - ``projects//instances/``. - table_id: - The name by which the new table should be referred to within - the parent instance, e.g., ``foobar`` rather than - ``/tables/foobar``. - table: - The Table to create. - initial_splits: - The optional list of row keys that will be used to initially - split the table into several tablets (tablets are similar to - HBase regions). Given two split keys, ``s1`` and ``s2``, three - tablets will be created, spanning the key ranges: ``[, s1), - [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", - "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - - initial\_split\_keys := ``["apple", "customer_1", - "customer_2", "other"]`` - Key assignment: - Tablet 1 - ``[, apple) => {"a"}.`` - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) - ), +_CREATEBACKUPREQUEST = _descriptor.Descriptor( + name="CreateBackupRequest", + full_name="google.bigtable.admin.v2.CreateBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.CreateBackupRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup_id", + full_name="google.bigtable.admin.v2.CreateBackupRequest.backup_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.CreateBackupRequest.backup", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3275, + serialized_end=3432, ) -_sym_db.RegisterMessage(CreateTableRequest) -_sym_db.RegisterMessage(CreateTableRequest.Split) -CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotRequest", - (_message.Message,), - dict( - DESCRIPTOR=_CREATETABLEFROMSNAPSHOTREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - - Attributes: - parent: - The unique name of the instance in which to create the table. - Values are of the form - ``projects//instances/``. - table_id: - The name by which the new table should be referred to within - the parent instance, e.g., ``foobar`` rather than - ``/tables/foobar``. - source_snapshot: - The unique name of the snapshot from which to restore the - table. The snapshot and the table must be in the same - instance. Values are of the form ``projects//instance - s//clusters//snapshots/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) - ), -) -_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) -DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( - "DropRowRangeRequest", - (_message.Message,), - dict( - DESCRIPTOR=_DROPROWRANGEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - - - Attributes: - name: - The unique name of the table on which to drop a range of rows. - Values are of the form - ``projects//instances//tables/
``. - target: - Delete all rows or by prefix. - row_key_prefix: - Delete all rows that start with this row key prefix. Prefix - cannot be zero length. - delete_all_data_from_table: - Delete all rows in the table. Setting this to false is a no- +_CREATEBACKUPMETADATA = _descriptor.Descriptor( + name="CreateBackupMetadata", + full_name="google.bigtable.admin.v2.CreateBackupMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.source_table", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.start_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.CreateBackupMetadata.end_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3435, + serialized_end=3587, +) + + +_GETBACKUPREQUEST = _descriptor.Descriptor( + name="GetBackupRequest", + full_name="google.bigtable.admin.v2.GetBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.GetBackupRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3589, + serialized_end=3661, +) + + +_UPDATEBACKUPREQUEST = _descriptor.Descriptor( + name="UpdateBackupRequest", + full_name="google.bigtable.admin.v2.UpdateBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.UpdateBackupRequest.backup", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="update_mask", + full_name="google.bigtable.admin.v2.UpdateBackupRequest.update_mask", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3664, + serialized_end=3794, +) + + +_DELETEBACKUPREQUEST = _descriptor.Descriptor( + name="DeleteBackupRequest", + full_name="google.bigtable.admin.v2.DeleteBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.DeleteBackupRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3796, + serialized_end=3871, +) + + +_LISTBACKUPSREQUEST = _descriptor.Descriptor( + name="ListBackupsRequest", + full_name="google.bigtable.admin.v2.ListBackupsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.ListBackupsRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="filter", + full_name="google.bigtable.admin.v2.ListBackupsRequest.filter", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="order_by", + full_name="google.bigtable.admin.v2.ListBackupsRequest.order_by", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.bigtable.admin.v2.ListBackupsRequest.page_size", + index=3, + number=4, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.bigtable.admin.v2.ListBackupsRequest.page_token", + index=4, + number=5, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=3874, + serialized_end=4024, +) + + +_LISTBACKUPSRESPONSE = _descriptor.Descriptor( + name="ListBackupsResponse", + full_name="google.bigtable.admin.v2.ListBackupsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="backups", + full_name="google.bigtable.admin.v2.ListBackupsResponse.backups", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.bigtable.admin.v2.ListBackupsResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4026, + serialized_end=4123, +) + + +_RESTORETABLEREQUEST = _descriptor.Descriptor( + name="RestoreTableRequest", + full_name="google.bigtable.admin.v2.RestoreTableRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.bigtable.admin.v2.RestoreTableRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="table_id", + full_name="google.bigtable.admin.v2.RestoreTableRequest.table_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.RestoreTableRequest.backup", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source", + full_name="google.bigtable.admin.v2.RestoreTableRequest.source", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=4125, + serialized_end=4208, +) + + +_RESTORETABLEMETADATA = _descriptor.Descriptor( + name="RestoreTableMetadata", + full_name="google.bigtable.admin.v2.RestoreTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_type", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_type", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup_info", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.backup_info", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="optimize_table_operation_name", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.optimize_table_operation_name", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.progress", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source_info", + full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_info", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=4211, + serialized_end=4491, +) + + +_OPTIMIZERESTOREDTABLEMETADATA = _descriptor.Descriptor( + name="OptimizeRestoredTableMetadata", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=4493, + serialized_end=4601, +) + +_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST +_CREATETABLEREQUEST.fields_by_name[ + "table" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE +) +_CREATETABLEREQUEST.fields_by_name[ + "initial_splits" +].message_type = _CREATETABLEREQUEST_SPLIT +_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( + _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] +) +_DROPROWRANGEREQUEST.fields_by_name[ + "row_key_prefix" +].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] +_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( + _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] +) +_DROPROWRANGEREQUEST.fields_by_name[ + "delete_all_data_from_table" +].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] +_LISTTABLESREQUEST.fields_by_name[ + "view" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +) +_LISTTABLESRESPONSE.fields_by_name[ + "tables" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE +) +_GETTABLEREQUEST.fields_by_name[ + "view" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "create" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "update" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "create" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "update" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] +) +_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ + "drop" +].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ + "modifications" +].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION +_SNAPSHOTTABLEREQUEST.fields_by_name[ + "ttl" +].message_type = google_dot_protobuf_dot_duration__pb2._DURATION +_LISTSNAPSHOTSRESPONSE.fields_by_name[ + "snapshots" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT +) +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "original_request" +].message_type = _SNAPSHOTTABLEREQUEST +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_SNAPSHOTTABLEMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "original_request" +].message_type = _CREATETABLEFROMSNAPSHOTREQUEST +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "request_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ + "finish_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEBACKUPREQUEST.fields_by_name[ + "backup" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP +) +_CREATEBACKUPMETADATA.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_CREATEBACKUPMETADATA.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEBACKUPREQUEST.fields_by_name[ + "backup" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP +) +_UPDATEBACKUPREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_LISTBACKUPSRESPONSE.fields_by_name[ + "backups" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP +) +_RESTORETABLEREQUEST.oneofs_by_name["source"].fields.append( + _RESTORETABLEREQUEST.fields_by_name["backup"] +) +_RESTORETABLEREQUEST.fields_by_name[ + "backup" +].containing_oneof = _RESTORETABLEREQUEST.oneofs_by_name["source"] +_RESTORETABLEMETADATA.fields_by_name[ + "source_type" +].enum_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._RESTORESOURCETYPE +) +_RESTORETABLEMETADATA.fields_by_name[ + "backup_info" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUPINFO +) +_RESTORETABLEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +_RESTORETABLEMETADATA.oneofs_by_name["source_info"].fields.append( + _RESTORETABLEMETADATA.fields_by_name["backup_info"] +) +_RESTORETABLEMETADATA.fields_by_name[ + "backup_info" +].containing_oneof = _RESTORETABLEMETADATA.oneofs_by_name["source_info"] +_OPTIMIZERESTOREDTABLEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST +DESCRIPTOR.message_types_by_name[ + "CreateTableFromSnapshotRequest" +] = _CREATETABLEFROMSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST +DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST +DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE +DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST +DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST +DESCRIPTOR.message_types_by_name[ + "ModifyColumnFamiliesRequest" +] = _MODIFYCOLUMNFAMILIESREQUEST +DESCRIPTOR.message_types_by_name[ + "GenerateConsistencyTokenRequest" +] = _GENERATECONSISTENCYTOKENREQUEST +DESCRIPTOR.message_types_by_name[ + "GenerateConsistencyTokenResponse" +] = _GENERATECONSISTENCYTOKENRESPONSE +DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST +DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE +DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST +DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST +DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE +DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST +DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA +DESCRIPTOR.message_types_by_name[ + "CreateTableFromSnapshotMetadata" +] = _CREATETABLEFROMSNAPSHOTMETADATA +DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA +DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST +DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST +DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE +DESCRIPTOR.message_types_by_name["RestoreTableRequest"] = _RESTORETABLEREQUEST +DESCRIPTOR.message_types_by_name["RestoreTableMetadata"] = _RESTORETABLEMETADATA +DESCRIPTOR.message_types_by_name[ + "OptimizeRestoredTableMetadata" +] = _OPTIMIZERESTOREDTABLEMETADATA +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CreateTableRequest = _reflection.GeneratedProtocolMessageType( + "CreateTableRequest", + (_message.Message,), + { + "Split": _reflection.GeneratedProtocolMessageType( + "Split", + (_message.Message,), + { + "DESCRIPTOR": _CREATETABLEREQUEST_SPLIT, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """An initial split point for a newly created table. + Attributes: + key: + Row key to use as an initial tablet boundary. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) + }, + ), + "DESCRIPTOR": _CREATETABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat + eTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + Attributes: + parent: + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id: + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table: + Required. The Table to create. + initial_splits: + The optional list of row keys that will be used to initially + split the table into several tablets (tablets are similar to + HBase regions). Given two split keys, ``s1`` and ``s2``, three + tablets will be created, spanning the key ranges: ``[, s1), + [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", + "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` + - initial_split_keys := ``["apple", "customer_1", + "customer_2", "other"]`` - Key assignment: - Tablet 1 + ``[, apple) => {"a"}.`` - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` - + Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - + Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - + Tablet 5 ``[other, ) => {"other", "zz"}.`` + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) + }, +) +_sym_db.RegisterMessage(CreateTableRequest) +_sym_db.RegisterMessage(CreateTableRequest.Split) + +CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( + "CreateTableFromSnapshotRequest", + (_message.Message,), + { + "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat + eTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.Create + TableFromSnapshot] Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently available to most + Cloud Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It is not + subject to any SLA or deprecation policy. + Attributes: + parent: + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id: + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. + source_snapshot: + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in the + same instance. Values are of the form ``projects/{project}/ins + tances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) + }, +) +_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) + +DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( + "DropRowRangeRequest", + (_message.Message,), + { + "DESCRIPTOR": _DROPROWRANGEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropR + owRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + Attributes: + name: + Required. The unique name of the table on which to drop a + range of rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + target: + Delete all rows or by prefix. + row_key_prefix: + Delete all rows that start with this row key prefix. Prefix + cannot be zero length. + delete_all_data_from_table: + Delete all rows in the table. Setting this to false is a no- op. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) - ), + }, ) _sym_db.RegisterMessage(DropRowRangeRequest) ListTablesRequest = _reflection.GeneratedProtocolMessageType( "ListTablesRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - + { + "DESCRIPTOR": _LISTTABLESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListT + ables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] Attributes: parent: - The unique name of the instance for which tables should be - listed. Values are of the form - ``projects//instances/``. + Required. The unique name of the instance for which tables + should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. view: - The view to be applied to the returned tables' fields. - Defaults to ``NAME_ONLY`` if unspecified; no others are - currently supported. + The view to be applied to the returned tables’ fields. Only + NAME_ONLY view (default) and REPLICATION_VIEW are supported. page_size: - Maximum number of results per page. CURRENTLY UNIMPLEMENTED - AND IGNORED. + Maximum number of results per page. A page_size of zero lets + the server choose the number of items to return. A page_size + which is strictly positive will return at most that many + items. A negative page_size will cause an error. Following + the first request, subsequent paginated calls are not required + to pass a page_size. If a page_size is set in subsequent + calls, it must match the page_size given in the first request. page_token: The value of ``next_page_token`` returned by a previous call. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) - ), + }, ) _sym_db.RegisterMessage(ListTablesRequest) ListTablesResponse = _reflection.GeneratedProtocolMessageType( "ListTablesResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - + { + "DESCRIPTOR": _LISTTABLESRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List + Tables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] Attributes: tables: The tables present in the requested instance. @@ -1685,67 +2592,62 @@ the next page of results. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) - ), + }, ) _sym_db.RegisterMessage(ListTablesResponse) GetTableRequest = _reflection.GeneratedProtocolMessageType( "GetTableRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETTABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - - + { + "DESCRIPTOR": _GETTABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTa + ble][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] Attributes: name: - The unique name of the requested table. Values are of the form - ``projects//instances//tables/
``. + Required. The unique name of the requested table. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}``. view: - The view to be applied to the returned table's fields. + The view to be applied to the returned table’s fields. Defaults to ``SCHEMA_VIEW`` if unspecified. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) - ), + }, ) _sym_db.RegisterMessage(GetTableRequest) DeleteTableRequest = _reflection.GeneratedProtocolMessageType( "DeleteTableRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETETABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - - + { + "DESCRIPTOR": _DELETETABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet + eTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] Attributes: name: - The unique name of the table to be deleted. Values are of the - form - ``projects//instances//tables/
``. + Required. The unique name of the table to be deleted. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteTableRequest) ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType( "ModifyColumnFamiliesRequest", (_message.Message,), - dict( - Modification=_reflection.GeneratedProtocolMessageType( + { + "Modification": _reflection.GeneratedProtocolMessageType( "Modification", (_message.Message,), - dict( - DESCRIPTOR=_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""A create, update, or delete of a particular column family. - - + { + "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """A create, update, or delete of a particular column family. Attributes: id: The ID of the column family to be modified. @@ -1762,27 +2664,27 @@ no such family exists. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) - ), - ), - DESCRIPTOR=_MODIFYCOLUMNFAMILIESREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - - + }, + ), + "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Modif + yColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyCol + umnFamilies] Attributes: name: - The unique name of the table whose families should be - modified. Values are of the form - ``projects//instances//tables/
``. + Required. The unique name of the table whose families should + be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. modifications: - Modifications to be atomically applied to the specified - table's families. Entries are applied in order, meaning that - earlier modifications can be masked by later ones (in the case - of repeated updates to the same family, for example). + Required. Modifications to be atomically applied to the + specified table’s families. Entries are applied in order, + meaning that earlier modifications can be masked by later ones + (in the case of repeated updates to the same family, for + example). """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) - ), + }, ) _sym_db.RegisterMessage(ModifyColumnFamiliesRequest) _sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) @@ -1790,77 +2692,73 @@ GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType( "GenerateConsistencyTokenRequest", (_message.Message,), - dict( - DESCRIPTOR=_GENERATECONSISTENCYTOKENREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - + { + "DESCRIPTOR": _GENERATECONSISTENCYTOKENREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Gener + ateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gener + ateConsistencyToken] Attributes: name: - The unique name of the Table for which to create a consistency - token. Values are of the form - ``projects//instances//tables/
``. + Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) - ), + }, ) _sym_db.RegisterMessage(GenerateConsistencyTokenRequest) GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType( "GenerateConsistencyTokenResponse", (_message.Message,), - dict( - DESCRIPTOR=_GENERATECONSISTENCYTOKENRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - + { + "DESCRIPTOR": _GENERATECONSISTENCYTOKENRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Gene + rateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gene + rateConsistencyToken] Attributes: consistency_token: The generated consistency token. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) - ), + }, ) _sym_db.RegisterMessage(GenerateConsistencyTokenResponse) CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType( "CheckConsistencyRequest", (_message.Message,), - dict( - DESCRIPTOR=_CHECKCONSISTENCYREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - + { + "DESCRIPTOR": _CHECKCONSISTENCYREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Check + Consistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsiste + ncy] Attributes: name: - The unique name of the Table for which to check replication - consistency. Values are of the form - ``projects//instances//tables/
``. + Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. consistency_token: - The token created using GenerateConsistencyToken for the - Table. + Required. The token created using GenerateConsistencyToken for + the Table. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) - ), + }, ) _sym_db.RegisterMessage(CheckConsistencyRequest) CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType( "CheckConsistencyResponse", (_message.Message,), - dict( - DESCRIPTOR=_CHECKCONSISTENCYRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - + { + "DESCRIPTOR": _CHECKCONSISTENCYRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Chec + kConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsist + ency] Attributes: consistent: True only if the token is consistent. A token is consistent if @@ -1868,104 +2766,96 @@ the request. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) - ), + }, ) _sym_db.RegisterMessage(CheckConsistencyResponse) SnapshotTableRequest = _reflection.GeneratedProtocolMessageType( "SnapshotTableRequest", (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOTTABLEREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - + { + "DESCRIPTOR": _SNAPSHOTTABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Snaps + hotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. Attributes: name: - The unique name of the table to have the snapshot taken. - Values are of the form - ``projects//instances//tables/
``. + Required. The unique name of the table to have the snapshot + taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. cluster: - The name of the cluster where the snapshot will be created in. - Values are of the form ``projects//instances//clusters/``. + Required. The name of the cluster where the snapshot will be + created in. Values are of the form ``projects/{project}/instan + ces/{instance}/clusters/{cluster}``. snapshot_id: - The ID by which the new snapshot should be referred to within - the parent cluster, e.g., ``mysnapshot`` of the form: ``[_a- - zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects//ins - tances//clusters//snapshots/mysnapshot``. + Required. The ID by which the new snapshot should be referred + to within the parent cluster, e.g., ``mysnapshot`` of the + form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects/{ + project}/instances/{instance}/clusters/{cluster}/snapshots/mys + napshot``. ttl: The amount of time that the new snapshot can stay active after - it is created. Once 'ttl' expires, the snapshot will get + it is created. Once ‘ttl’ expires, the snapshot will get deleted. The maximum amount of time a snapshot can stay active - is 7 days. If 'ttl' is not specified, the default value of 24 + is 7 days. If ‘ttl’ is not specified, the default value of 24 hours will be used. description: Description of the snapshot. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) - ), + }, ) _sym_db.RegisterMessage(SnapshotTableRequest) GetSnapshotRequest = _reflection.GeneratedProtocolMessageType( "GetSnapshotRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETSNAPSHOTREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - + { + "DESCRIPTOR": _GETSNAPSHOTREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSn + apshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. Attributes: name: - The unique name of the requested snapshot. Values are of the - form ``projects//instances//clusters//snapshots/``. + Required. The unique name of the requested snapshot. Values + are of the form ``projects/{project}/instances/{instance}/clus + ters/{cluster}/snapshots/{snapshot}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) - ), + }, ) _sym_db.RegisterMessage(GetSnapshotRequest) ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType( "ListSnapshotsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTSNAPSHOTSREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - + { + "DESCRIPTOR": _LISTSNAPSHOTSREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListS + napshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. Attributes: parent: - The unique name of the cluster for which snapshots should be - listed. Values are of the form ``projects//instances/ - /clusters/``. Use `` = '-'`` to - list snapshots for all clusters in an instance, e.g., - ``projects//instances//clusters/-``. + Required. The unique name of the cluster for which snapshots + should be listed. Values are of the form ``projects/{project}/ + instances/{instance}/clusters/{cluster}``. Use ``{cluster} = + '-'`` to list snapshots for all clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. page_size: The maximum number of snapshots to return per page. CURRENTLY UNIMPLEMENTED AND IGNORED. @@ -1973,26 +2863,23 @@ The value of ``next_page_token`` returned by a previous call. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) - ), + }, ) _sym_db.RegisterMessage(ListSnapshotsRequest) ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType( "ListSnapshotsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTSNAPSHOTSRESPONSE, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - + { + "DESCRIPTOR": _LISTSNAPSHOTSRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List + Snapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. Attributes: snapshots: The snapshots present in the requested cluster. @@ -2002,52 +2889,46 @@ to get the next page of results. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) - ), + }, ) _sym_db.RegisterMessage(ListSnapshotsResponse) DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType( "DeleteSnapshotRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETESNAPSHOTREQUEST, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - + { + "DESCRIPTOR": _DELETESNAPSHOTREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet + eSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. Attributes: name: - The unique name of the snapshot to be deleted. Values are of - the form ``projects//instances//clusters//snapshots/``. + Required. The unique name of the snapshot to be deleted. + Values are of the form ``projects/{project}/instances/{instanc + e}/clusters/{cluster}/snapshots/{snapshot}``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteSnapshotRequest) SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType( "SnapshotTableMetadata", (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOTTABLEMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""The metadata for the Operation returned by SnapshotTable. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not + { + "DESCRIPTOR": _SNAPSHOTTABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The metadata for the Operation returned by SnapshotTable. Note: This + is a private alpha release of Cloud Bigtable snapshots. This feature + is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - - Attributes: original_request: The request that prompted the initiation of this SnapshotTable @@ -2059,26 +2940,22 @@ successfully. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) - ), + }, ) _sym_db.RegisterMessage(SnapshotTableMetadata) CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType( "CreateTableFromSnapshotMetadata", (_message.Message,), - dict( - DESCRIPTOR=_CREATETABLEFROMSNAPSHOTMETADATA, - __module__="google.cloud.bigtable.admin_v2.proto.bigtable_table_admin_pb2", - __doc__="""The metadata for the Operation returned by - CreateTableFromSnapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - + { + "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The metadata for the Operation returned by CreateTableFromSnapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. Attributes: original_request: The request that prompted the initiation of this @@ -2090,21 +2967,352 @@ successfully. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) - ), + }, ) _sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) +CreateBackupRequest = _reflection.GeneratedProtocolMessageType( + "CreateBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _CREATEBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableA + dmin.CreateBackup]. + Attributes: + parent: + Required. This must be one of the clusters in the instance in + which this table is located. The backup will be stored in this + cluster. Values are of the form ``projects/{project}/instances + /{instance}/clusters/{cluster}``. + backup_id: + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup name, + of the form: ``projects/{project}/instances/{instance}/cluster + s/{cluster}/backups/{backup_id}``. This string must be between + 1 and 50 characters in length and match the regex [_a- + zA-Z0-9][-_.a-zA-Z0-9]*. + backup: + Required. The backup to create. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupRequest) + }, +) +_sym_db.RegisterMessage(CreateBackupRequest) + +CreateBackupMetadata = _reflection.GeneratedProtocolMessageType( + "CreateBackupMetadata", + (_message.Message,), + { + "DESCRIPTOR": _CREATEBACKUPMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the operation returned by [CreateBackup][google.bigt + able.admin.v2.BigtableTableAdmin.CreateBackup]. + Attributes: + name: + The name of the backup being created. + source_table: + The name of the table the backup is created from. + start_time: + The time at which this operation started. + end_time: + If set, the time at which this operation finished or was + cancelled. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupMetadata) + }, +) +_sym_db.RegisterMessage(CreateBackupMetadata) + +GetBackupRequest = _reflection.GeneratedProtocolMessageType( + "GetBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _GETBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + Attributes: + name: + Required. Name of the backup. Values are of the form ``project + s/{project}/instances/{instance}/clusters/{cluster}/backups/{b + ackup}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetBackupRequest) + }, +) +_sym_db.RegisterMessage(GetBackupRequest) + +UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( + "UpdateBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _UPDATEBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableA + dmin.UpdateBackup]. + Attributes: + backup: + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only supported + for the following fields: \* ``backup.expire_time``. + update_mask: + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be updated. + This mask is relative to the Backup resource, not to the + request message. The field mask must always be specified; this + prevents any future fields from being erased accidentally by + clients that do not know about them. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateBackupRequest) + }, +) +_sym_db.RegisterMessage(UpdateBackupRequest) + +DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( + "DeleteBackupRequest", + (_message.Message,), + { + "DESCRIPTOR": _DELETEBACKUPREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableA + dmin.DeleteBackup]. + Attributes: + name: + Required. Name of the backup to delete. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/b + ackups/{backup}``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteBackupRequest) + }, +) +_sym_db.RegisterMessage(DeleteBackupRequest) + +ListBackupsRequest = _reflection.GeneratedProtocolMessageType( + "ListBackupsRequest", + (_message.Message,), + { + "DESCRIPTOR": _LISTBACKUPSREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAd + min.ListBackups]. + Attributes: + parent: + Required. The cluster to list backups from. Values are of the + form ``projects/{project}/instances/{instance}/clusters/{clust + er}``. Use ``{cluster} = '-'`` to list backups for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + filter: + A filter expression that filters backups listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a boolean. + The comparison operator must be <, >, <=, >=, !=, =, or :. + Colon ‘:’ represents a HAS operator which is roughly + synonymous with equality. Filter rules are case insensitive. + The fields eligible for filtering are: \* ``name`` \* + ``source_table`` \* ``state`` \* ``start_time`` (and values + are of the format YYYY-MM-DDTHH:MM:SSZ) \* ``end_time`` (and + values are of the format YYYY-MM-DDTHH:MM:SSZ) \* + ``expire_time`` (and values are of the format YYYY-MM- + DDTHH:MM:SSZ) \* ``size_bytes`` To filter on multiple + expressions, provide each separate expression within + parentheses. By default, each expression is an AND expression. + However, you can include AND, OR, and NOT expressions + explicitly. Some examples of using filters are: - + ``name:"exact"`` –> The backup’s name is the string “exact”. - + ``name:howl`` –> The backup’s name contains the string “howl”. + - ``source_table:prod`` –> The source_table’s name contains + the string “prod”. - ``state:CREATING`` –> The backup is + pending creation. - ``state:READY`` –> The backup is fully + created and ready for use. - ``(name:howl) AND (start_time < + \"2018-03-28T14:50:00Z\")`` –> The backup name contains the + string “howl” and start_time of the backup is before + 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` –> The + backup’s size is greater than 10GB + order_by: + An expression for specifying the sort order of the results of + the request. The string value should specify one or more + fields in [Backup][google.bigtable.admin.v2.Backup]. The full + syntax is described at https://aip.dev/132#ordering. Fields + supported are: \* name \* source_table \* expire_time \* + start_time \* end_time \* size_bytes \* state For example, + “start_time”. The default sorting order is ascending. To + specify descending order for the field, a suffix " desc" + should be appended to the field name. For example, “start_time + desc”. Redundant space characters in the syntax are + insigificant. If order_by is empty, results will be sorted by + ``start_time`` in descending order starting from the most + recently created backup. + page_size: + Number of backups to be returned in the response. If 0 or + less, defaults to the server’s maximum allowed page size. + page_token: + If non-empty, ``page_token`` should contain a [next_page_token + ][google.bigtable.admin.v2.ListBackupsResponse.next_page_token + ] from a previous [ListBackupsResponse][google.bigtable.admin. + v2.ListBackupsResponse] to the same ``parent`` and with the + same ``filter``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsRequest) + }, +) +_sym_db.RegisterMessage(ListBackupsRequest) + +ListBackupsResponse = _reflection.GeneratedProtocolMessageType( + "ListBackupsResponse", + (_message.Message,), + { + "DESCRIPTOR": _LISTBACKUPSRESPONSE, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The response for [ListBackups][google.bigtable.admin.v2.BigtableTableA + dmin.ListBackups]. + Attributes: + backups: + The list of matching backups. + next_page_token: + \ ``next_page_token`` can be sent in a subsequent [ListBackups + ][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] + call to fetch more of the matching backups. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsResponse) + }, +) +_sym_db.RegisterMessage(ListBackupsResponse) + +RestoreTableRequest = _reflection.GeneratedProtocolMessageType( + "RestoreTableRequest", + (_message.Message,), + { + "DESCRIPTOR": _RESTORETABLEREQUEST, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA + dmin.RestoreTable]. + Attributes: + parent: + Required. The name of the instance in which to create the + restored table. This instance must be the parent of the source + backup. Values are of the form + ``projects//instances/``. + table_id: + Required. The id of the table to create and restore to. This + table must not already exist. The ``table_id`` appended to + ``parent`` forms the full table name of the form + ``projects//instances//tables/``. + source: + Required. The source from which to restore. + backup: + Name of the backup from which to restore. Values are of the + form ``projects//instances//clusters//backups/``. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableRequest) + }, +) +_sym_db.RegisterMessage(RestoreTableRequest) + +RestoreTableMetadata = _reflection.GeneratedProtocolMessageType( + "RestoreTableMetadata", + (_message.Message,), + { + "DESCRIPTOR": _RESTORETABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the long-running operation returned by [RestoreTable + ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + Attributes: + name: + Name of the table being created and restored to. + source_type: + The type of the restore source. + source_info: + Information about the source used to restore the table, as + specified by ``source`` in [RestoreTableRequest][google.bigtab + le.admin.v2.RestoreTableRequest]. + optimize_table_operation_name: + If exists, the name of the long-running operation that will be + used to track the post-restore optimization process to + optimize the performance of the restored table. The metadata + type of the long-running operation is + [OptimizeRestoreTableMetadata][]. The response type is + [Empty][google.protobuf.Empty]. This long-running operation + may be automatically created by the system if applicable after + the RestoreTable long-running operation completes + successfully. This operation may not be created if the table + is already optimized or the restore was not successful. + progress: + The progress of the [RestoreTable][google.bigtable.admin.v2.Bi + gtableTableAdmin.RestoreTable] operation. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableMetadata) + }, +) +_sym_db.RegisterMessage(RestoreTableMetadata) + +OptimizeRestoredTableMetadata = _reflection.GeneratedProtocolMessageType( + "OptimizeRestoredTableMetadata", + (_message.Message,), + { + "DESCRIPTOR": _OPTIMIZERESTOREDTABLEMETADATA, + "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", + "__doc__": """Metadata type for the long-running operation used to track the + progress of optimizations performed on a newly restored table. This + long-running operation is automatically created by the system after + the successful completion of a table restore, and cannot be cancelled. + Attributes: + name: + Name of the restored table being optimized. + progress: + The progress of the post-restore optimizations. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OptimizeRestoredTableMetadata) + }, +) +_sym_db.RegisterMessage(OptimizeRestoredTableMetadata) + DESCRIPTOR._options = None +_CREATETABLEREQUEST.fields_by_name["parent"]._options = None +_CREATETABLEREQUEST.fields_by_name["table_id"]._options = None +_CREATETABLEREQUEST.fields_by_name["table"]._options = None +_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["parent"]._options = None +_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["table_id"]._options = None +_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["source_snapshot"]._options = None +_DROPROWRANGEREQUEST.fields_by_name["name"]._options = None +_LISTTABLESREQUEST.fields_by_name["parent"]._options = None +_GETTABLEREQUEST.fields_by_name["name"]._options = None +_DELETETABLEREQUEST.fields_by_name["name"]._options = None +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["name"]._options = None +_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["modifications"]._options = None +_GENERATECONSISTENCYTOKENREQUEST.fields_by_name["name"]._options = None +_CHECKCONSISTENCYREQUEST.fields_by_name["name"]._options = None +_CHECKCONSISTENCYREQUEST.fields_by_name["consistency_token"]._options = None +_SNAPSHOTTABLEREQUEST.fields_by_name["name"]._options = None +_SNAPSHOTTABLEREQUEST.fields_by_name["cluster"]._options = None +_SNAPSHOTTABLEREQUEST.fields_by_name["snapshot_id"]._options = None +_GETSNAPSHOTREQUEST.fields_by_name["name"]._options = None +_LISTSNAPSHOTSREQUEST.fields_by_name["parent"]._options = None +_DELETESNAPSHOTREQUEST.fields_by_name["name"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None +_GETBACKUPREQUEST.fields_by_name["name"]._options = None +_UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None +_UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None +_DELETEBACKUPREQUEST.fields_by_name["name"]._options = None +_LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None _BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( name="BigtableTableAdmin", full_name="google.bigtable.admin.v2.BigtableTableAdmin", file=DESCRIPTOR, index=0, - serialized_options=None, - serialized_start=2465, - serialized_end=5178, + serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\273\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", + create_key=_descriptor._internal_create_key, + serialized_start=4604, + serialized_end=9284, methods=[ _descriptor.MethodDescriptor( name="CreateTable", @@ -2112,10 +3320,9 @@ index=0, containing_service=None, input_type=_CREATETABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=_b( - '\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*' - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, + serialized_options=b'\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*\332A\025parent,table_id,table', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CreateTableFromSnapshot", @@ -2124,9 +3331,8 @@ containing_service=None, input_type=_CREATETABLEFROMSNAPSHOTREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*' - ), + serialized_options=b'\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*\332A\037parent,table_id,source_snapshot\312A(\n\005Table\022\037CreateTableFromSnapshotMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListTables", @@ -2135,9 +3341,8 @@ containing_service=None, input_type=_LISTTABLESREQUEST, output_type=_LISTTABLESRESPONSE, - serialized_options=_b( - "\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables" - ), + serialized_options=b"\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetTable", @@ -2145,10 +3350,9 @@ index=3, containing_service=None, input_type=_GETTABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=_b( - "\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}" - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, + serialized_options=b"\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteTable", @@ -2157,9 +3361,8 @@ containing_service=None, input_type=_DELETETABLEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}" - ), + serialized_options=b"\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ModifyColumnFamilies", @@ -2167,10 +3370,9 @@ index=5, containing_service=None, input_type=_MODIFYCOLUMNFAMILIESREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=_b( - '\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*' - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, + serialized_options=b'\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*\332A\022name,modifications', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DropRowRange", @@ -2179,9 +3381,8 @@ containing_service=None, input_type=_DROPROWRANGEREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - '\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*' - ), + serialized_options=b'\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GenerateConsistencyToken", @@ -2190,9 +3391,8 @@ containing_service=None, input_type=_GENERATECONSISTENCYTOKENREQUEST, output_type=_GENERATECONSISTENCYTOKENRESPONSE, - serialized_options=_b( - '\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*' - ), + serialized_options=b'\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*\332A\004name', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CheckConsistency", @@ -2201,9 +3401,8 @@ containing_service=None, input_type=_CHECKCONSISTENCYREQUEST, output_type=_CHECKCONSISTENCYRESPONSE, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*' - ), + serialized_options=b'\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*\332A\026name,consistency_token', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="SnapshotTable", @@ -2212,9 +3411,8 @@ containing_service=None, input_type=_SNAPSHOTTABLEREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*' - ), + serialized_options=b'\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*\332A$name,cluster,snapshot_id,description\312A!\n\010Snapshot\022\025SnapshotTableMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetSnapshot", @@ -2222,10 +3420,9 @@ index=10, containing_service=None, input_type=_GETSNAPSHOTREQUEST, - output_type=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, - serialized_options=_b( - "\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - ), + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, + serialized_options=b"\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListSnapshots", @@ -2234,9 +3431,8 @@ containing_service=None, input_type=_LISTSNAPSHOTSREQUEST, output_type=_LISTSNAPSHOTSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - ), + serialized_options=b"\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteSnapshot", @@ -2245,42 +3441,98 @@ containing_service=None, input_type=_DELETESNAPSHOTREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=_b( - "\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - ), + serialized_options=b"\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="CreateBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", + index=13, + containing_service=None, + input_type=_CREATEBACKUPREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\002@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\006backup\312A\036\n\006Backup\022\024CreateBackupMetadata\332A\027parent,backup_id,backup', + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="GetBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", + index=14, + containing_service=None, + input_type=_GETBACKUPREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, + serialized_options=b"\202\323\344\223\0028\0226/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="UpdateBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", + index=15, + containing_service=None, + input_type=_UPDATEBACKUPREQUEST, + output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, + serialized_options=b"\202\323\344\223\002G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\006backup\332A\022backup,update_mask", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="DeleteBackup", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", + index=16, + containing_service=None, + input_type=_DELETEBACKUPREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=b"\202\323\344\223\0028*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="ListBackups", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", + index=17, + containing_service=None, + input_type=_LISTBACKUPSREQUEST, + output_type=_LISTBACKUPSRESPONSE, + serialized_options=b"\202\323\344\223\0028\0226/v2/{parent=projects/*/instances/*/clusters/*}/backups\332A\006parent", + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name="RestoreTable", + full_name="google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", + index=18, + containing_service=None, + input_type=_RESTORETABLEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=b'\202\323\344\223\0027"2/v2/{parent=projects/*/instances/*}/tables:restore:\001*\312A\035\n\005Table\022\024RestoreTableMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetIamPolicy", full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", - index=13, + index=19, containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*' - ), + serialized_options=b'\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*\332A\010resource', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="SetIamPolicy", full_name="google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", - index=14, + index=20, containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*' - ), + serialized_options=b'\202\323\344\223\002\216\001";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\001*\332A\017resource,policy', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="TestIamPermissions", full_name="google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", - index=15, + index=21, containing_service=None, input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002F"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*' - ), + serialized_options=b'\202\323\344\223\002\232\001"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\001*\332A\024resource,permissions', + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index f152581fe0a2..54d6ac9cc5ec 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -2,10 +2,10 @@ import grpc from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2, + bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2, ) from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2, + table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, ) from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 @@ -31,69 +31,99 @@ def __init__(self, channel): """ self.CreateTable = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, ) self.CreateTableFromSnapshot = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.ListTables = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, ) self.GetTable = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, ) self.DeleteTable = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ModifyColumnFamilies = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, ) self.DropRowRange = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.GenerateConsistencyToken = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, ) self.CheckConsistency = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, ) self.SnapshotTable = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.GetSnapshot = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, ) self.ListSnapshots = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, ) self.DeleteSnapshot = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - request_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) + self.CreateBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + ) + self.UpdateBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + ) + self.DeleteBackup = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ListBackups = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, + ) + self.RestoreTable = channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) self.GetIamPolicy = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, @@ -254,17 +284,75 @@ def DeleteSnapshot(self, request, context): context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") + def CreateBackup(self, request, context): + """Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be used to + track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The + [response][google.longrunning.Operation.response] field type is + [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the + returned operation will stop the creation and delete the backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetBackup(self, request, context): + """Gets metadata on a pending or completed Cloud Bigtable Backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateBackup(self, request, context): + """Updates a pending or completed Cloud Bigtable Backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteBackup(self, request, context): + """Deletes a pending or completed Cloud Bigtable backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackups(self, request, context): + """Lists Cloud Bigtable backups. Returns both completed and pending + backups. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def RestoreTable(self, request, context): + """Create a new table by restoring from a completed backup. The new table + must be in the same instance as the instance containing the backup. The + returned table [long-running operation][google.longrunning.Operation] can + be used to track the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + def GetIamPolicy(self, request, context): - """Gets the access control policy for a table resource. Returns an empty - policy if an table exists but does not have a policy set. + """Gets the access control policy for a resource. + Returns an empty policy if the resource exists but does not have a policy + set. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def SetIamPolicy(self, request, context): - """Sets the access control policy on a table resource. Replaces any existing - policy. + """Sets the access control policy on a Table or Backup resource. + Replaces any existing policy. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -282,69 +370,99 @@ def add_BigtableTableAdminServicer_to_server(servicer, server): rpc_method_handlers = { "CreateTable": grpc.unary_unary_rpc_method_handler( servicer.CreateTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, ), "CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler( servicer.CreateTableFromSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "ListTables": grpc.unary_unary_rpc_method_handler( servicer.ListTables, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, ), "GetTable": grpc.unary_unary_rpc_method_handler( servicer.GetTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, ), "DeleteTable": grpc.unary_unary_rpc_method_handler( servicer.DeleteTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler( servicer.ModifyColumnFamilies, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, ), "DropRowRange": grpc.unary_unary_rpc_method_handler( servicer.DropRowRange, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler( servicer.GenerateConsistencyToken, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, ), "CheckConsistency": grpc.unary_unary_rpc_method_handler( servicer.CheckConsistency, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, ), "SnapshotTable": grpc.unary_unary_rpc_method_handler( servicer.SnapshotTable, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "GetSnapshot": grpc.unary_unary_rpc_method_handler( servicer.GetSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, ), "ListSnapshots": grpc.unary_unary_rpc_method_handler( servicer.ListSnapshots, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, ), "DeleteSnapshot": grpc.unary_unary_rpc_method_handler( servicer.DeleteSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "CreateBackup": grpc.unary_unary_rpc_method_handler( + servicer.CreateBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetBackup": grpc.unary_unary_rpc_method_handler( + servicer.GetBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, + ), + "UpdateBackup": grpc.unary_unary_rpc_method_handler( + servicer.UpdateBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, + ), + "DeleteBackup": grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackup, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), + "ListBackups": grpc.unary_unary_rpc_method_handler( + servicer.ListBackups, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.FromString, + response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.SerializeToString, + ), + "RestoreTable": grpc.unary_unary_rpc_method_handler( + servicer.RestoreTable, + request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), "GetIamPolicy": grpc.unary_unary_rpc_method_handler( servicer.GetIamPolicy, request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto index ad4d735994f3..89d24ea97112 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,13 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.bigtable.admin.v2; -import "google/api/annotations.proto"; import "google/protobuf/timestamp.proto"; option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; @@ -38,3 +36,18 @@ enum StorageType { // Magnetic drive (HDD) storage should be used. HDD = 2; } + +// Encapsulates progress related information for a Cloud Bigtable long +// running operation. +message OperationProgress { + // Percent completion of the operation. + // Values are between 0 and 100 inclusive. + int32 progress_percent = 1; + + // Time the request was received. + google.protobuf.Timestamp start_time = 2; + + // If set, the time at which this operation failed or was completed + // successfully. + google.protobuf.Timestamp end_time = 3; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index 7d40f043d05c..f0f5ff399e04 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -1,10 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/common.proto +# source: google/cloud/bigtable_admin_v2/proto/common.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -16,24 +13,17 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/common.proto", + name="google/cloud/bigtable_admin_v2/proto/common.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - "\n1google/cloud/bigtable/admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3" - ), - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], + serialized_options=b"\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', + dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,], ) _STORAGETYPE = _descriptor.EnumDescriptor( @@ -41,6 +31,7 @@ full_name="google.bigtable.admin.v2.StorageType", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="STORAGE_TYPE_UNSPECIFIED", @@ -48,18 +39,29 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SSD", index=1, number=1, serialized_options=None, type=None + name="SSD", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="HDD", index=2, number=2, serialized_options=None, type=None + name="HDD", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=142, - serialized_end=203, + serialized_start=254, + serialized_end=315, ) _sym_db.RegisterEnumDescriptor(_STORAGETYPE) @@ -69,9 +71,117 @@ HDD = 2 +_OPERATIONPROGRESS = _descriptor.Descriptor( + name="OperationProgress", + full_name="google.bigtable.admin.v2.OperationProgress", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="progress_percent", + full_name="google.bigtable.admin.v2.OperationProgress.progress_percent", + index=0, + number=1, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.OperationProgress.start_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.OperationProgress.end_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=113, + serialized_end=252, +) + +_OPERATIONPROGRESS.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_OPERATIONPROGRESS.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name["OperationProgress"] = _OPERATIONPROGRESS DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) +OperationProgress = _reflection.GeneratedProtocolMessageType( + "OperationProgress", + (_message.Message,), + { + "DESCRIPTOR": _OPERATIONPROGRESS, + "__module__": "google.cloud.bigtable_admin_v2.proto.common_pb2", + "__doc__": """Encapsulates progress related information for a Cloud Bigtable long + running operation. + Attributes: + progress_percent: + Percent completion of the operation. Values are between 0 and + 100 inclusive. + start_time: + Time the request was received. + end_time: + If set, the time at which this operation failed or was + completed successfully. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OperationProgress) + }, +) +_sym_db.RegisterMessage(OperationProgress) + DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto index ef8599bfe349..e15f63ac0309 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2019 Google LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,7 +17,8 @@ syntax = "proto3"; package google.bigtable.admin.v2; -import "google/api/annotations.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; import "google/bigtable/admin/v2/common.proto"; option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; @@ -29,9 +30,14 @@ option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; // A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and // the resources that serve them. -// All tables in an instance are served from a single -// [Cluster][google.bigtable.admin.v2.Cluster]. +// All tables in an instance are served from all +// [Clusters][google.bigtable.admin.v2.Cluster] in the instance. message Instance { + option (google.api.resource) = { + type: "bigtable.googleapis.com/Instance" + pattern: "projects/{project}/instances/{instance}" + }; + // Possible states of an instance. enum State { // The state of the instance could not be determined. @@ -67,15 +73,14 @@ message Instance { DEVELOPMENT = 2; } - // (`OutputOnly`) // The unique name of the instance. Values are of the form - // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. - string name = 1; + // `projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // The descriptive name for this instance as it appears in UIs. + // Required. The descriptive name for this instance as it appears in UIs. // Can be changed at any time, but should be kept globally unique // to avoid confusion. - string display_name = 2; + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; // (`OutputOnly`) // The current state of the instance. @@ -102,6 +107,11 @@ message Instance { // of serving all [Tables][google.bigtable.admin.v2.Table] in the parent // [Instance][google.bigtable.admin.v2.Instance]. message Cluster { + option (google.api.resource) = { + type: "bigtable.googleapis.com/Cluster" + pattern: "projects/{project}/instances/{instance}/clusters/{cluster}" + }; + // Possible states of a cluster. enum State { // The state of the cluster could not be determined. @@ -127,25 +137,25 @@ message Cluster { DISABLED = 4; } - // (`OutputOnly`) // The unique name of the cluster. Values are of the form - // `projects//instances//clusters/[a-z][-a-z0-9]*`. - string name = 1; + // `projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; // (`CreationOnly`) // The location where this cluster's nodes and storage reside. For best // performance, clients should be located as close as possible to this // cluster. Currently only zones are supported, so values should be of the - // form `projects//locations/`. - string location = 2; + // form `projects/{project}/locations/{zone}`. + string location = 2 [(google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + }]; - // (`OutputOnly`) // The current state of the cluster. - State state = 3; + State state = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - // The number of nodes allocated to this cluster. More nodes enable higher - // throughput and more consistent performance. - int32 serve_nodes = 4; + // Required. The number of nodes allocated to this cluster. More nodes enable + // higher throughput and more consistent performance. + int32 serve_nodes = 4 [(google.api.field_behavior) = REQUIRED]; // (`CreationOnly`) // The type of storage used by this cluster to serve its @@ -156,14 +166,20 @@ message Cluster { // A configuration object describing how Cloud Bigtable should treat traffic // from a particular end user application. message AppProfile { - // Read/write requests may be routed to any cluster in the instance, and will - // fail over to another cluster in the event of transient errors or delays. - // Choosing this option sacrifices read-your-writes consistency to improve - // availability. + option (google.api.resource) = { + type: "bigtable.googleapis.com/AppProfile" + pattern: "projects/{project}/instances/{instance}/appProfiles/{app_profile}" + }; + + // Read/write requests are routed to the nearest cluster in the instance, and + // will fail over to the nearest cluster that is available in the event of + // transient errors or delays. Clusters in a region are considered + // equidistant. Choosing this option sacrifices read-your-writes consistency + // to improve availability. message MultiClusterRoutingUseAny {} // Unconditionally routes all read/write requests to a specific cluster. - // This option preserves read-your-writes consistency, but does not improve + // This option preserves read-your-writes consistency but does not improve // availability. message SingleClusterRouting { // The cluster to which read/write requests should be routed. @@ -193,10 +209,10 @@ message AppProfile { // Optional long form description of the use case for this AppProfile. string description = 3; - // The routing policy for all read/write requests which use this app profile. + // The routing policy for all read/write requests that use this app profile. // A value must be explicitly set. oneof routing_policy { - // Use a multi-cluster routing policy that may pick any cluster. + // Use a multi-cluster routing policy. MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; // Use a single-cluster routing policy. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index 5f45909fc6a1..58d5a036cd2e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -1,10 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/instance.proto +# source: google/cloud/bigtable_admin_v2/proto/instance.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,25 +12,24 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2, + common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, ) DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/instance.proto", + name="google/cloud/bigtable_admin_v2/proto/instance.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\n3google/cloud/bigtable/admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x31google/cloud/bigtable/admin_v2/proto/common.proto"\x83\x03\n\x08Instance\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02"\x8e\x02\n\x07\x43luster\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08location\x18\x02 \x01(\t\x12\x36\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.State\x12\x13\n\x0bserve_nodes\x18\x04 \x01(\x05\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04"\x82\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08\x42\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), + serialized_options=b"\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n3google/cloud/bigtable_admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto"\xdd\x03\n\x08Instance\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02:N\xea\x41K\n bigtable.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\xa7\x03\n\x07\x43luster\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x08location\x18\x02 \x01(\tB&\xfa\x41#\n!locations.googleapis.com/Location\x12;\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.StateB\x03\xe0\x41\x03\x12\x18\n\x0bserve_nodes\x18\x04 \x01(\x05\x42\x03\xe0\x41\x02\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04:`\xea\x41]\n\x1f\x62igtable.googleapis.com/Cluster\x12:projects/{project}/instances/{instance}/clusters/{cluster}"\xee\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08:j\xea\x41g\n"bigtable.googleapis.com/AppProfile\x12\x41projects/{project}/instances/{instance}/appProfiles/{app_profile}B\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, ], ) @@ -43,6 +39,7 @@ full_name="google.bigtable.admin.v2.Instance.State", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="STATE_NOT_KNOWN", @@ -50,18 +47,29 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, serialized_options=None, type=None + name="READY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, serialized_options=None, type=None + name="CREATING", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=434, - serialized_end=487, + serialized_start=474, + serialized_end=527, ) _sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) @@ -70,6 +78,7 @@ full_name="google.bigtable.admin.v2.Instance.Type", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TYPE_UNSPECIFIED", @@ -77,18 +86,29 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PRODUCTION", index=1, number=1, serialized_options=None, type=None + name="PRODUCTION", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DEVELOPMENT", index=2, number=2, serialized_options=None, type=None + name="DEVELOPMENT", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=489, - serialized_end=550, + serialized_start=529, + serialized_end=590, ) _sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) @@ -97,6 +117,7 @@ full_name="google.bigtable.admin.v2.Cluster.State", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="STATE_NOT_KNOWN", @@ -104,24 +125,45 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, serialized_options=None, type=None + name="READY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, serialized_options=None, type=None + name="CREATING", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="RESIZING", index=3, number=3, serialized_options=None, type=None + name="RESIZING", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DISABLED", index=4, number=4, serialized_options=None, type=None + name="DISABLED", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=742, - serialized_end=823, + serialized_start=917, + serialized_end=998, ) _sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) @@ -132,6 +174,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -142,7 +185,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -150,6 +193,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -160,7 +204,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -168,18 +212,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=387, - serialized_end=432, + serialized_start=427, + serialized_end=472, ) _INSTANCE = _descriptor.Descriptor( @@ -188,6 +233,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -198,14 +244,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -216,14 +263,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="state", @@ -242,6 +290,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type", @@ -260,6 +309,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="labels", @@ -278,18 +328,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[_INSTANCE_LABELSENTRY,], enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE,], - serialized_options=None, + serialized_options=b"\352AK\n bigtable.googleapis.com/Instance\022'projects/{project}/instances/{instance}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=163, - serialized_end=550, + serialized_start=193, + serialized_end=670, ) @@ -299,6 +350,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -309,14 +361,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="location", @@ -327,14 +380,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="state", @@ -351,8 +405,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\003", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="serve_nodes", @@ -369,8 +424,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="default_storage_type", @@ -389,18 +445,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[_CLUSTER_STATE,], - serialized_options=None, + serialized_options=b"\352A]\n\037bigtable.googleapis.com/Cluster\022:projects/{project}/instances/{instance}/clusters/{cluster}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=553, - serialized_end=823, + serialized_start=673, + serialized_end=1096, ) @@ -410,6 +467,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -419,8 +477,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1087, - serialized_end=1114, + serialized_start=1360, + serialized_end=1387, ) _APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( @@ -429,6 +487,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="cluster_id", @@ -439,7 +498,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -447,6 +506,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="allow_transactional_writes", @@ -465,6 +525,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -475,8 +536,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1116, - serialized_end=1194, + serialized_start=1389, + serialized_end=1467, ) _APPPROFILE = _descriptor.Descriptor( @@ -485,6 +546,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -495,7 +557,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -503,6 +565,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="etag", @@ -513,7 +576,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -521,6 +584,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -531,7 +595,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -539,6 +603,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="multi_cluster_routing_use_any", @@ -557,6 +622,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="single_cluster_routing", @@ -575,6 +641,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -583,7 +650,7 @@ _APPPROFILE_SINGLECLUSTERROUTING, ], enum_types=[], - serialized_options=None, + serialized_options=b'\352Ag\n"bigtable.googleapis.com/AppProfile\022Aprojects/{project}/instances/{instance}/appProfiles/{app_profile}', is_extendable=False, syntax="proto3", extension_ranges=[], @@ -593,11 +660,12 @@ full_name="google.bigtable.admin.v2.AppProfile.routing_policy", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], - serialized_start=826, - serialized_end=1212, + serialized_start=1099, + serialized_end=1593, ) _INSTANCE_LABELSENTRY.containing_type = _INSTANCE @@ -610,7 +678,7 @@ _CLUSTER.fields_by_name[ "default_storage_type" ].enum_type = ( - google_dot_cloud_dot_bigtable_dot_admin__v2_dot_proto_dot_common__pb2._STORAGETYPE + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._STORAGETYPE ) _CLUSTER_STATE.containing_type = _CLUSTER _APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE @@ -641,40 +709,36 @@ Instance = _reflection.GeneratedProtocolMessageType( "Instance", (_message.Message,), - dict( - LabelsEntry=_reflection.GeneratedProtocolMessageType( + { + "LabelsEntry": _reflection.GeneratedProtocolMessageType( "LabelsEntry", (_message.Message,), - dict( - DESCRIPTOR=_INSTANCE_LABELSENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2" + { + "DESCRIPTOR": _INSTANCE_LABELSENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2" # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) - ), - ), - DESCRIPTOR=_INSTANCE, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the resources that serve - them. All tables in an instance are served from a single - [Cluster][google.bigtable.admin.v2.Cluster]. - - + }, + ), + "DESCRIPTOR": _INSTANCE, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an instance are served + from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. Attributes: name: - (\ ``OutputOnly``) The unique name of the instance. Values are - of the form - ``projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. display_name: - The descriptive name for this instance as it appears in UIs. - Can be changed at any time, but should be kept globally unique - to avoid confusion. + Required. The descriptive name for this instance as it appears + in UIs. Can be changed at any time, but should be kept + globally unique to avoid confusion. state: (\ ``OutputOnly``) The current state of the instance. type: The type of the instance. Defaults to ``PRODUCTION``. labels: Labels are a flexible and lightweight mechanism for organizing - cloud resources into groups that reflect a customer's + cloud resources into groups that reflect a customer’s organizational needs and deployment strategies. They can be used to filter resources and aggregate metrics. - Label keys must be between 1 and 63 characters long and must conform @@ -686,7 +750,7 @@ resource. - Keys and values must both be under 128 bytes. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) - ), + }, ) _sym_db.RegisterMessage(Instance) _sym_db.RegisterMessage(Instance.LabelsEntry) @@ -694,70 +758,65 @@ Cluster = _reflection.GeneratedProtocolMessageType( "Cluster", (_message.Message,), - dict( - DESCRIPTOR=_CLUSTER, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A resizable group of nodes in a particular cloud location, - capable of serving all [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - + { + "DESCRIPTOR": _CLUSTER, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """A resizable group of nodes in a particular cloud location, capable of + serving all [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. Attributes: name: - (\ ``OutputOnly``) The unique name of the cluster. Values are - of the form ``projects//instances//clusters - /[a-z][-a-z0-9]*``. + The unique name of the cluster. Values are of the form ``proje + cts/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. location: - (\ ``CreationOnly``) The location where this cluster's nodes + (\ ``CreationOnly``) The location where this cluster’s nodes and storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form - ``projects//locations/``. + ``projects/{project}/locations/{zone}``. state: - (\ ``OutputOnly``) The current state of the cluster. + The current state of the cluster. serve_nodes: - The number of nodes allocated to this cluster. More nodes - enable higher throughput and more consistent performance. + Required. The number of nodes allocated to this cluster. More + nodes enable higher throughput and more consistent + performance. default_storage_type: (\ ``CreationOnly``) The type of storage used by this cluster - to serve its parent instance's tables, unless explicitly + to serve its parent instance’s tables, unless explicitly overridden. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) - ), + }, ) _sym_db.RegisterMessage(Cluster) AppProfile = _reflection.GeneratedProtocolMessageType( "AppProfile", (_message.Message,), - dict( - MultiClusterRoutingUseAny=_reflection.GeneratedProtocolMessageType( + { + "MultiClusterRoutingUseAny": _reflection.GeneratedProtocolMessageType( "MultiClusterRoutingUseAny", (_message.Message,), - dict( - DESCRIPTOR=_APPPROFILE_MULTICLUSTERROUTINGUSEANY, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""Read/write requests may be routed to any cluster in the - instance, and will fail over to another cluster in the event of - transient errors or delays. Choosing this option sacrifices - read-your-writes consistency to improve availability. - - """, + { + "DESCRIPTOR": _APPPROFILE_MULTICLUSTERROUTINGUSEANY, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """Read/write requests are routed to the nearest cluster in the instance, + and will fail over to the nearest cluster that is available in the + event of transient errors or delays. Clusters in a region are + considered equidistant. Choosing this option sacrifices read-your- + writes consistency to improve availability.""", # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) - ), + }, ), - SingleClusterRouting=_reflection.GeneratedProtocolMessageType( + "SingleClusterRouting": _reflection.GeneratedProtocolMessageType( "SingleClusterRouting", (_message.Message,), - dict( - DESCRIPTOR=_APPPROFILE_SINGLECLUSTERROUTING, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""Unconditionally routes all read/write requests to a - specific cluster. This option preserves read-your-writes consistency, - but does not improve availability. - - + { + "DESCRIPTOR": _APPPROFILE_SINGLECLUSTERROUTING, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """Unconditionally routes all read/write requests to a specific cluster. + This option preserves read-your-writes consistency but does not + improve availability. Attributes: cluster_id: The cluster to which read/write requests should be routed. @@ -768,14 +827,12 @@ table/row/column in multiple clusters. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) - ), - ), - DESCRIPTOR=_APPPROFILE, - __module__="google.cloud.bigtable.admin_v2.proto.instance_pb2", - __doc__="""A configuration object describing how Cloud Bigtable - should treat traffic from a particular end user application. - - + }, + ), + "DESCRIPTOR": _APPPROFILE, + "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", + "__doc__": """A configuration object describing how Cloud Bigtable should treat + traffic from a particular end user application. Attributes: name: (\ ``OutputOnly``) The unique name of the app profile. Values @@ -796,15 +853,15 @@ Optional long form description of the use case for this AppProfile. routing_policy: - The routing policy for all read/write requests which use this + The routing policy for all read/write requests that use this app profile. A value must be explicitly set. multi_cluster_routing_use_any: - Use a multi-cluster routing policy that may pick any cluster. + Use a multi-cluster routing policy. single_cluster_routing: Use a single-cluster routing policy. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) - ), + }, ) _sym_db.RegisterMessage(AppProfile) _sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) @@ -813,4 +870,13 @@ DESCRIPTOR._options = None _INSTANCE_LABELSENTRY._options = None +_INSTANCE.fields_by_name["name"]._options = None +_INSTANCE.fields_by_name["display_name"]._options = None +_INSTANCE._options = None +_CLUSTER.fields_by_name["name"]._options = None +_CLUSTER.fields_by_name["location"]._options = None +_CLUSTER.fields_by_name["state"]._options = None +_CLUSTER.fields_by_name["serve_nodes"]._options = None +_CLUSTER._options = None +_APPPROFILE._options = None # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto index 5019d8b86448..535378989124 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto @@ -1,4 +1,4 @@ -// Copyright 2018 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,13 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.bigtable.admin.v2; -import "google/api/annotations.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; @@ -28,9 +28,36 @@ option java_outer_classname = "TableProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; +// Indicates the type of the restore source. +enum RestoreSourceType { + // No restore associated. + RESTORE_SOURCE_TYPE_UNSPECIFIED = 0; + + // A backup was used as the source of the restore. + BACKUP = 1; +} + +// Information about a table restore. +message RestoreInfo { + // The type of the restore source. + RestoreSourceType source_type = 1; + + // Information about the source used to restore the table. + oneof source_info { + // Information about the backup used to restore the table. The backup + // may no longer exist. + BackupInfo backup_info = 2; + } +} + // A collection of user data indexed by row, column, and timestamp. // Each table is served using the resources of its parent cluster. message Table { + option (google.api.resource) = { + type: "bigtable.googleapis.com/Table" + pattern: "projects/{project}/instances/{instance}/tables/{table}" + }; + // The state of a table's data in a particular cluster. message ClusterState { // Table replication states. @@ -55,10 +82,14 @@ message Table { // replication delay, reads may not immediately reflect the state of the // table in other clusters. READY = 4; + + // The table is fully created and ready for use after a restore, and is + // being optimized for performance. When optimizations are complete, the + // table will transition to `READY` state. + READY_OPTIMIZING = 5; } - // (`OutputOnly`) - // The state of replication for the table in this cluster. + // Output only. The state of replication for the table in this cluster. ReplicationState replication_state = 1; } @@ -84,22 +115,20 @@ message Table { // Only populates `name` and fields related to the table's schema. SCHEMA_VIEW = 2; - // Only populates `name` and fields related to the table's - // replication state. + // Only populates `name` and fields related to the table's replication + // state. REPLICATION_VIEW = 3; // Populates all fields. FULL = 4; } - // (`OutputOnly`) - // The unique name of the table. Values are of the form + // Output only. The unique name of the table. Values are of the form // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` string name = 1; - // (`OutputOnly`) - // Map from cluster ID to per-cluster table state. + // Output only. Map from cluster ID to per-cluster table state. // If it could not be determined whether or not the table has data in a // particular cluster (for example, if its zone is unavailable), then // there will be an entry for the cluster with UNKNOWN `replication_status`. @@ -115,8 +144,12 @@ message Table { // The granularity (i.e. `MILLIS`) at which timestamps are stored in // this table. Timestamps not matching the granularity will be rejected. // If unspecified at creation time, the value will be set to `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL` + // Views: `SCHEMA_VIEW`, `FULL`. TimestampGranularity granularity = 4; + + // Output only. If this table was restored from another data source (e.g. a + // backup), this field will be populated with information about the restore. + RestoreInfo restore_info = 6; } // A set of columns within a table which share a common configuration. @@ -170,6 +203,11 @@ message GcRule { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message Snapshot { + option (google.api.resource) = { + type: "bigtable.googleapis.com/Snapshot" + pattern: "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}" + }; + // Possible states of a snapshot. enum State { // The state of the snapshot could not be determined. @@ -184,37 +222,118 @@ message Snapshot { CREATING = 2; } - // (`OutputOnly`) - // The unique name of the snapshot. + // Output only. The unique name of the snapshot. // Values are of the form // `projects//instances//clusters//snapshots/`. string name = 1; - // (`OutputOnly`) - // The source table at the time the snapshot was taken. + // Output only. The source table at the time the snapshot was taken. Table source_table = 2; - // (`OutputOnly`) - // The size of the data in the source table at the time the snapshot was - // taken. In some cases, this value may be computed asynchronously via a - // background process and a placeholder of 0 will be used in the meantime. + // Output only. The size of the data in the source table at the time the + // snapshot was taken. In some cases, this value may be computed + // asynchronously via a background process and a placeholder of 0 will be used + // in the meantime. int64 data_size_bytes = 3; - // (`OutputOnly`) - // The time when the snapshot is created. + // Output only. The time when the snapshot is created. google.protobuf.Timestamp create_time = 4; - // (`OutputOnly`) - // The time when the snapshot will be deleted. The maximum amount of time a - // snapshot can stay active is 365 days. If 'ttl' is not specified, + // Output only. The time when the snapshot will be deleted. The maximum amount + // of time a snapshot can stay active is 365 days. If 'ttl' is not specified, // the default maximum of 365 days will be used. google.protobuf.Timestamp delete_time = 5; - // (`OutputOnly`) - // The current state of the snapshot. + // Output only. The current state of the snapshot. State state = 6; - // (`OutputOnly`) - // Description of the snapshot. + // Output only. Description of the snapshot. string description = 7; } + +// A backup of a Cloud Bigtable table. +message Backup { + option (google.api.resource) = { + type: "bigtable.googleapis.com/Backup" + pattern: "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}" + }; + + // Indicates the current state of the backup. + enum State { + // Not specified. + STATE_UNSPECIFIED = 0; + + // The pending backup is still being created. Operations on the + // backup may fail with `FAILED_PRECONDITION` in this state. + CREATING = 1; + + // The backup is complete and ready for use. + READY = 2; + } + + // Output only. A globally unique identifier for the backup which cannot be + // changed. Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/ + // backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // The final segment of the name must be between 1 and 50 characters + // in length. + // + // The backup is stored in the cluster identified by the prefix of the backup + // name of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Immutable. Name of the table from which this backup was created. + // This needs to be in the same instance as the backup. Values are of the form + // `projects/{project}/instances/{instance}/tables/{source_table}`. + string source_table = 2 [ + (google.api.field_behavior) = IMMUTABLE, + (google.api.field_behavior) = REQUIRED + ]; + + // Required. The expiration time of the backup, with microseconds + // granularity that must be at least 6 hours and at most 30 days + // from the time the request is received. Once the `expire_time` + // has passed, Cloud Bigtable will delete the backup and free the + // resources used by the backup. + google.protobuf.Timestamp expire_time = 3 + [(google.api.field_behavior) = REQUIRED]; + + // Output only. `start_time` is the time that the backup was started + // (i.e. approximately the time the + // [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] + // request is received). The row data in this backup will be no older than + // this timestamp. + google.protobuf.Timestamp start_time = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. `end_time` is the time that the backup was finished. The row + // data in the backup will be no newer than this timestamp. + google.protobuf.Timestamp end_time = 5 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the backup in bytes. + int64 size_bytes = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The current state of the backup. + State state = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a backup. +message BackupInfo { + // Output only. Name of the backup. + string backup = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The time that the backup was started. Row data in the backup + // will be no older than this timestamp. + google.protobuf.Timestamp start_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. This time that the backup was finished. Row data in the + // backup will be no newer than this timestamp. + google.protobuf.Timestamp end_time = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the table the backup was created from. + string source_table = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index b026dff95f39..a52b2c29796b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -1,10 +1,8 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable/admin_v2/proto/table.proto +# source: google/cloud/bigtable_admin_v2/proto/table.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) +from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,34 +13,69 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable/admin_v2/proto/table.proto", + name="google/cloud/bigtable_admin_v2/proto/table.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=_b( - "\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2" - ), - serialized_pb=_b( - '\n0google/cloud/bigtable/admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcb\x06\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x1a\xe2\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"x\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xcf\x02\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3' - ), + serialized_options=b"\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n0google/cloud/bigtable_admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x9b\x01\n\x0bRestoreInfo\x12@\n\x0bsource_type\x18\x01 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x42\r\n\x0bsource_info"\xfb\x07\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x12;\n\x0crestore_info\x18\x06 \x01(\x0b\x32%.google.bigtable.admin.v2.RestoreInfo\x1a\xf9\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"\x8e\x01\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x12\x14\n\x10READY_OPTIMIZING\x10\x05\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04:Z\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xc7\x03\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02:v\xea\x41s\n bigtable.googleapis.com/Snapshot\x12Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}"\xd7\x03\n\x06\x42\x61\x63kup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x1c\n\x0csource_table\x18\x02 \x01(\tB\x06\xe0\x41\x05\xe0\x41\x02\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.bigtable.admin.v2.Backup.StateB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:p\xea\x41m\n\x1e\x62igtable.googleapis.com/Backup\x12Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}"\xa4\x01\n\nBackupInfo\x12\x13\n\x06\x62\x61\x63kup\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0csource_table\x18\x04 \x01(\tB\x03\xe0\x41\x03*D\n\x11RestoreSourceType\x12#\n\x1fRESTORE_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x42\x41\x43KUP\x10\x01\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, ], ) +_RESTORESOURCETYPE = _descriptor.EnumDescriptor( + name="RestoreSourceType", + full_name="google.bigtable.admin.v2.RestoreSourceType", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="RESTORE_SOURCE_TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="BACKUP", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=2893, + serialized_end=2961, +) +_sym_db.RegisterEnumDescriptor(_RESTORESOURCETYPE) + +RestoreSourceType = enum_type_wrapper.EnumTypeWrapper(_RESTORESOURCETYPE) +RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 +BACKUP = 1 + _TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( name="ReplicationState", full_name="google.bigtable.admin.v2.Table.ClusterState.ReplicationState", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="STATE_NOT_KNOWN", @@ -50,9 +83,15 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INITIALIZING", index=1, number=1, serialized_options=None, type=None + name="INITIALIZING", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="PLANNED_MAINTENANCE", @@ -60,6 +99,7 @@ number=2, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="UNPLANNED_MAINTENANCE", @@ -67,15 +107,29 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="READY", index=4, number=4, serialized_options=None, type=None + name="READY_OPTIMIZING", + index=5, + number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=533, - serialized_end=653, + serialized_start=783, + serialized_end=925, ) _sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) @@ -84,6 +138,7 @@ full_name="google.bigtable.admin.v2.Table.TimestampGranularity", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TIMESTAMP_GRANULARITY_UNSPECIFIED", @@ -91,15 +146,21 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MILLIS", index=1, number=1, serialized_options=None, type=None + name="MILLIS", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=850, - serialized_end=923, + serialized_start=1122, + serialized_end=1195, ) _sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) @@ -108,6 +169,7 @@ full_name="google.bigtable.admin.v2.Table.View", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="VIEW_UNSPECIFIED", @@ -115,12 +177,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="NAME_ONLY", index=1, number=1, serialized_options=None, type=None + name="NAME_ONLY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="SCHEMA_VIEW", index=2, number=2, serialized_options=None, type=None + name="SCHEMA_VIEW", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="REPLICATION_VIEW", @@ -128,15 +201,21 @@ number=3, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FULL", index=4, number=4, serialized_options=None, type=None + name="FULL", + index=4, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=925, - serialized_end=1017, + serialized_start=1197, + serialized_end=1289, ) _sym_db.RegisterEnumDescriptor(_TABLE_VIEW) @@ -145,6 +224,7 @@ full_name="google.bigtable.admin.v2.Snapshot.State", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="STATE_NOT_KNOWN", @@ -152,21 +232,140 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="READY", index=1, number=1, serialized_options=None, type=None + name="READY", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CREATING", index=2, number=2, serialized_options=None, type=None + name="CREATING", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=1713, - serialized_end=1766, + serialized_start=2077, + serialized_end=2130, ) _sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) +_BACKUP_STATE = _descriptor.EnumDescriptor( + name="State", + full_name="google.bigtable.admin.v2.Backup.State", + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="CREATING", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.EnumValueDescriptor( + name="READY", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=2555, + serialized_end=2610, +) +_sym_db.RegisterEnumDescriptor(_BACKUP_STATE) + + +_RESTOREINFO = _descriptor.Descriptor( + name="RestoreInfo", + full_name="google.bigtable.admin.v2.RestoreInfo", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="source_type", + full_name="google.bigtable.admin.v2.RestoreInfo.source_type", + index=0, + number=1, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="backup_info", + full_name="google.bigtable.admin.v2.RestoreInfo.backup_info", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source_info", + full_name="google.bigtable.admin.v2.RestoreInfo.source_info", + index=0, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[], + ), + ], + serialized_start=204, + serialized_end=359, +) + _TABLE_CLUSTERSTATE = _descriptor.Descriptor( name="ClusterState", @@ -174,6 +373,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="replication_state", @@ -192,6 +392,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -202,8 +403,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=427, - serialized_end=653, + serialized_start=676, + serialized_end=925, ) _TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( @@ -212,6 +413,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -222,7 +424,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -230,6 +432,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -248,18 +451,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=655, - serialized_end=753, + serialized_start=927, + serialized_end=1025, ) _TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( @@ -268,6 +472,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -278,7 +483,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -286,6 +491,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -304,18 +510,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=755, - serialized_end=848, + serialized_start=1027, + serialized_end=1120, ) _TABLE = _descriptor.Descriptor( @@ -324,6 +531,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -334,7 +542,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -342,6 +550,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cluster_states", @@ -360,6 +569,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_families", @@ -378,6 +588,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="granularity", @@ -396,6 +607,26 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="restore_info", + full_name="google.bigtable.admin.v2.Table.restore_info", + index=4, + number=6, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -405,13 +636,13 @@ _TABLE_COLUMNFAMILIESENTRY, ], enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW,], - serialized_options=None, + serialized_options=b"\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=174, - serialized_end=1017, + serialized_start=362, + serialized_end=1381, ) @@ -421,6 +652,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gc_rule", @@ -439,6 +671,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -449,8 +682,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1019, - serialized_end=1084, + serialized_start=1383, + serialized_end=1448, ) @@ -460,6 +693,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="rules", @@ -478,6 +712,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -488,8 +723,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1299, - serialized_end=1362, + serialized_start=1663, + serialized_end=1726, ) _GCRULE_UNION = _descriptor.Descriptor( @@ -498,6 +733,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="rules", @@ -516,6 +752,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -526,8 +763,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1364, - serialized_end=1420, + serialized_start=1728, + serialized_end=1784, ) _GCRULE = _descriptor.Descriptor( @@ -536,6 +773,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="max_num_versions", @@ -554,6 +792,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max_age", @@ -572,6 +811,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="intersection", @@ -590,6 +830,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="union", @@ -608,6 +849,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -623,11 +865,12 @@ full_name="google.bigtable.admin.v2.GcRule.rule", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], - serialized_start=1087, - serialized_end=1428, + serialized_start=1451, + serialized_end=1792, ) @@ -637,6 +880,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -647,7 +891,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -655,6 +899,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="source_table", @@ -673,6 +918,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="data_size_bytes", @@ -691,6 +937,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -709,6 +956,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="delete_time", @@ -727,6 +975,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="state", @@ -745,6 +994,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -755,7 +1005,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -763,20 +1013,282 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[_SNAPSHOT_STATE,], + serialized_options=b"\352As\n bigtable.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1795, + serialized_end=2250, +) + + +_BACKUP = _descriptor.Descriptor( + name="Backup", + full_name="google.bigtable.admin.v2.Backup", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.bigtable.admin.v2.Backup.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.Backup.source_table", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\005\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="expire_time", + full_name="google.bigtable.admin.v2.Backup.expire_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\002", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.Backup.start_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.Backup.end_time", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="size_bytes", + full_name="google.bigtable.admin.v2.Backup.size_bytes", + index=5, + number=6, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.bigtable.admin.v2.Backup.state", + index=6, + number=7, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_BACKUP_STATE,], + serialized_options=b"\352Am\n\036bigtable.googleapis.com/Backup\022Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2253, + serialized_end=2724, +) + + +_BACKUPINFO = _descriptor.Descriptor( + name="BackupInfo", + full_name="google.bigtable.admin.v2.BackupInfo", + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name="backup", + full_name="google.bigtable.admin.v2.BackupInfo.backup", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.bigtable.admin.v2.BackupInfo.start_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.bigtable.admin.v2.BackupInfo.end_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="source_table", + full_name="google.bigtable.admin.v2.BackupInfo.source_table", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=b"".decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=b"\340A\003", + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1431, - serialized_end=1766, + serialized_start=2727, + serialized_end=2891, ) +_RESTOREINFO.fields_by_name["source_type"].enum_type = _RESTORESOURCETYPE +_RESTOREINFO.fields_by_name["backup_info"].message_type = _BACKUPINFO +_RESTOREINFO.oneofs_by_name["source_info"].fields.append( + _RESTOREINFO.fields_by_name["backup_info"] +) +_RESTOREINFO.fields_by_name[ + "backup_info" +].containing_oneof = _RESTOREINFO.oneofs_by_name["source_info"] _TABLE_CLUSTERSTATE.fields_by_name[ "replication_state" ].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE @@ -789,6 +1301,7 @@ _TABLE.fields_by_name["cluster_states"].message_type = _TABLE_CLUSTERSTATESENTRY _TABLE.fields_by_name["column_families"].message_type = _TABLE_COLUMNFAMILIESENTRY _TABLE.fields_by_name["granularity"].enum_type = _TABLE_TIMESTAMPGRANULARITY +_TABLE.fields_by_name["restore_info"].message_type = _RESTOREINFO _TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE _TABLE_VIEW.containing_type = _TABLE _COLUMNFAMILY.fields_by_name["gc_rule"].message_type = _GCRULE @@ -820,68 +1333,105 @@ ].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP _SNAPSHOT.fields_by_name["state"].enum_type = _SNAPSHOT_STATE _SNAPSHOT_STATE.containing_type = _SNAPSHOT +_BACKUP.fields_by_name[ + "expire_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name["state"].enum_type = _BACKUP_STATE +_BACKUP_STATE.containing_type = _BACKUP +_BACKUPINFO.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUPINFO.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name["RestoreInfo"] = _RESTOREINFO DESCRIPTOR.message_types_by_name["Table"] = _TABLE DESCRIPTOR.message_types_by_name["ColumnFamily"] = _COLUMNFAMILY DESCRIPTOR.message_types_by_name["GcRule"] = _GCRULE DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT +DESCRIPTOR.message_types_by_name["Backup"] = _BACKUP +DESCRIPTOR.message_types_by_name["BackupInfo"] = _BACKUPINFO +DESCRIPTOR.enum_types_by_name["RestoreSourceType"] = _RESTORESOURCETYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) +RestoreInfo = _reflection.GeneratedProtocolMessageType( + "RestoreInfo", + (_message.Message,), + { + "DESCRIPTOR": _RESTOREINFO, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """Information about a table restore. + Attributes: + source_type: + The type of the restore source. + source_info: + Information about the source used to restore the table. + backup_info: + Information about the backup used to restore the table. The + backup may no longer exist. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreInfo) + }, +) +_sym_db.RegisterMessage(RestoreInfo) + Table = _reflection.GeneratedProtocolMessageType( "Table", (_message.Message,), - dict( - ClusterState=_reflection.GeneratedProtocolMessageType( + { + "ClusterState": _reflection.GeneratedProtocolMessageType( "ClusterState", (_message.Message,), - dict( - DESCRIPTOR=_TABLE_CLUSTERSTATE, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""The state of a table's data in a particular cluster. - - + { + "DESCRIPTOR": _TABLE_CLUSTERSTATE, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """The state of a table’s data in a particular cluster. Attributes: replication_state: - (\ ``OutputOnly``) The state of replication for the table in - this cluster. + Output only. The state of replication for the table in this + cluster. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) - ), + }, ), - ClusterStatesEntry=_reflection.GeneratedProtocolMessageType( + "ClusterStatesEntry": _reflection.GeneratedProtocolMessageType( "ClusterStatesEntry", (_message.Message,), - dict( - DESCRIPTOR=_TABLE_CLUSTERSTATESENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2" + { + "DESCRIPTOR": _TABLE_CLUSTERSTATESENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) - ), + }, ), - ColumnFamiliesEntry=_reflection.GeneratedProtocolMessageType( + "ColumnFamiliesEntry": _reflection.GeneratedProtocolMessageType( "ColumnFamiliesEntry", (_message.Message,), - dict( - DESCRIPTOR=_TABLE_COLUMNFAMILIESENTRY, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2" + { + "DESCRIPTOR": _TABLE_COLUMNFAMILIESENTRY, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) - ), - ), - DESCRIPTOR=_TABLE, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A collection of user data indexed by row, column, and - timestamp. Each table is served using the resources of its parent - cluster. - - + }, + ), + "DESCRIPTOR": _TABLE, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A collection of user data indexed by row, column, and timestamp. Each + table is served using the resources of its parent cluster. Attributes: name: - (\ ``OutputOnly``) The unique name of the table. Values are of - the form ``projects//instances//tables/[_a- + Output only. The unique name of the table. Values are of the + form ``projects//instances//tables/[_a- zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` cluster_states: - (\ ``OutputOnly``) Map from cluster ID to per-cluster table - state. If it could not be determined whether or not the table - has data in a particular cluster (for example, if its zone is + Output only. Map from cluster ID to per-cluster table state. + If it could not be determined whether or not the table has + data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, ``FULL`` @@ -894,10 +1444,14 @@ which timestamps are stored in this table. Timestamps not matching the granularity will be rejected. If unspecified at creation time, the value will be set to ``MILLIS``. Views: - ``SCHEMA_VIEW``, ``FULL`` + ``SCHEMA_VIEW``, ``FULL``. + restore_info: + Output only. If this table was restored from another data + source (e.g. a backup), this field will be populated with + information about the restore. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) - ), + }, ) _sym_db.RegisterMessage(Table) _sym_db.RegisterMessage(Table.ClusterState) @@ -907,72 +1461,60 @@ ColumnFamily = _reflection.GeneratedProtocolMessageType( "ColumnFamily", (_message.Message,), - dict( - DESCRIPTOR=_COLUMNFAMILY, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A set of columns within a table which share a common - configuration. - - + { + "DESCRIPTOR": _COLUMNFAMILY, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A set of columns within a table which share a common configuration. Attributes: gc_rule: Garbage collection rule specified as a protobuf. Must serialize to at most 500 bytes. NOTE: Garbage collection - executes opportunistically in the background, and so it's + executes opportunistically in the background, and so it’s possible for reads to return a cell even if it matches the active GC expression for its family. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) - ), + }, ) _sym_db.RegisterMessage(ColumnFamily) GcRule = _reflection.GeneratedProtocolMessageType( "GcRule", (_message.Message,), - dict( - Intersection=_reflection.GeneratedProtocolMessageType( + { + "Intersection": _reflection.GeneratedProtocolMessageType( "Intersection", (_message.Message,), - dict( - DESCRIPTOR=_GCRULE_INTERSECTION, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A GcRule which deletes cells matching all of the given - rules. - - + { + "DESCRIPTOR": _GCRULE_INTERSECTION, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A GcRule which deletes cells matching all of the given rules. Attributes: rules: Only delete cells which would be deleted by every element of ``rules``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) - ), + }, ), - Union=_reflection.GeneratedProtocolMessageType( + "Union": _reflection.GeneratedProtocolMessageType( "Union", (_message.Message,), - dict( - DESCRIPTOR=_GCRULE_UNION, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A GcRule which deletes cells matching any of the given - rules. - - + { + "DESCRIPTOR": _GCRULE_UNION, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A GcRule which deletes cells matching any of the given rules. Attributes: rules: Delete cells which would be deleted by any element of ``rules``. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) - ), - ), - DESCRIPTOR=_GCRULE, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""Rule for determining which cells to delete during garbage - collection. - - + }, + ), + "DESCRIPTOR": _GCRULE, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """Rule for determining which cells to delete during garbage collection. Attributes: rule: Garbage collection rules. @@ -988,7 +1530,7 @@ Delete cells that would be deleted by any nested rule. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) - ), + }, ) _sym_db.RegisterMessage(GcRule) _sym_db.RegisterMessage(GcRule.Intersection) @@ -997,52 +1539,134 @@ Snapshot = _reflection.GeneratedProtocolMessageType( "Snapshot", (_message.Message,), - dict( - DESCRIPTOR=_SNAPSHOT, - __module__="google.cloud.bigtable.admin_v2.proto.table_pb2", - __doc__="""A snapshot of a table at a particular time. A snapshot can - be used as a checkpoint for data restoration or a data source for a new - table. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. - This feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - + { + "DESCRIPTOR": _SNAPSHOT, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A snapshot of a table at a particular time. A snapshot can be used as + a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible ways + and is not recommended for production use. It is not subject to any + SLA or deprecation policy. Attributes: name: - (\ ``OutputOnly``) The unique name of the snapshot. Values are - of the form ``projects//instances//clusters - //snapshots/``. + Output only. The unique name of the snapshot. Values are of + the form ``projects//instances//clusters//snapshots/``. source_table: - (\ ``OutputOnly``) The source table at the time the snapshot - was taken. + Output only. The source table at the time the snapshot was + taken. data_size_bytes: - (\ ``OutputOnly``) The size of the data in the source table at - the time the snapshot was taken. In some cases, this value may - be computed asynchronously via a background process and a + Output only. The size of the data in the source table at the + time the snapshot was taken. In some cases, this value may be + computed asynchronously via a background process and a placeholder of 0 will be used in the meantime. create_time: - (\ ``OutputOnly``) The time when the snapshot is created. + Output only. The time when the snapshot is created. delete_time: - (\ ``OutputOnly``) The time when the snapshot will be deleted. - The maximum amount of time a snapshot can stay active is 365 - days. If 'ttl' is not specified, the default maximum of 365 - days will be used. + Output only. The time when the snapshot will be deleted. The + maximum amount of time a snapshot can stay active is 365 days. + If ‘ttl’ is not specified, the default maximum of 365 days + will be used. state: - (\ ``OutputOnly``) The current state of the snapshot. + Output only. The current state of the snapshot. description: - (\ ``OutputOnly``) Description of the snapshot. + Output only. Description of the snapshot. """, # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) - ), + }, ) _sym_db.RegisterMessage(Snapshot) +Backup = _reflection.GeneratedProtocolMessageType( + "Backup", + (_message.Message,), + { + "DESCRIPTOR": _BACKUP, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """A backup of a Cloud Bigtable table. + Attributes: + name: + Output only. A globally unique identifier for the backup which + cannot be changed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/ + backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` The final segment of the + name must be between 1 and 50 characters in length. The + backup is stored in the cluster identified by the prefix of + the backup name of the form ``projects/{project}/instances/{in + stance}/clusters/{cluster}``. + source_table: + Required. Immutable. Name of the table from which this backup + was created. This needs to be in the same instance as the + backup. Values are of the form ``projects/{project}/instances/ + {instance}/tables/{source_table}``. + expire_time: + Required. The expiration time of the backup, with microseconds + granularity that must be at least 6 hours and at most 30 days + from the time the request is received. Once the + ``expire_time`` has passed, Cloud Bigtable will delete the + backup and free the resources used by the backup. + start_time: + Output only. ``start_time`` is the time that the backup was + started (i.e. approximately the time the [CreateBackup][google + .bigtable.admin.v2.BigtableTableAdmin.CreateBackup] request is + received). The row data in this backup will be no older than + this timestamp. + end_time: + Output only. ``end_time`` is the time that the backup was + finished. The row data in the backup will be no newer than + this timestamp. + size_bytes: + Output only. Size of the backup in bytes. + state: + Output only. The current state of the backup. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Backup) + }, +) +_sym_db.RegisterMessage(Backup) + +BackupInfo = _reflection.GeneratedProtocolMessageType( + "BackupInfo", + (_message.Message,), + { + "DESCRIPTOR": _BACKUPINFO, + "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", + "__doc__": """Information about a backup. + Attributes: + backup: + Output only. Name of the backup. + start_time: + Output only. The time that the backup was started. Row data in + the backup will be no older than this timestamp. + end_time: + Output only. This time that the backup was finished. Row data + in the backup will be no newer than this timestamp. + source_table: + Output only. Name of the table the backup was created from. + """, + # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.BackupInfo) + }, +) +_sym_db.RegisterMessage(BackupInfo) + DESCRIPTOR._options = None _TABLE_CLUSTERSTATESENTRY._options = None _TABLE_COLUMNFAMILIESENTRY._options = None +_TABLE._options = None +_SNAPSHOT._options = None +_BACKUP.fields_by_name["name"]._options = None +_BACKUP.fields_by_name["source_table"]._options = None +_BACKUP.fields_by_name["expire_time"]._options = None +_BACKUP.fields_by_name["start_time"]._options = None +_BACKUP.fields_by_name["end_time"]._options = None +_BACKUP.fields_by_name["size_bytes"]._options = None +_BACKUP.fields_by_name["state"]._options = None +_BACKUP._options = None +_BACKUPINFO.fields_by_name["backup"]._options = None +_BACKUPINFO.fields_by_name["start_time"]._options = None +_BACKUPINFO.fields_by_name["end_time"]._options = None +_BACKUPINFO.fields_by_name["source_table"]._options = None # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py index 2b149637e634..7dbb939d1639 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py @@ -22,6 +22,7 @@ from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 +from google.cloud.bigtable_admin_v2.proto import common_pb2 from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.cloud.bigtable_admin_v2.proto import table_pb2 from google.iam.v1 import iam_policy_pb2 @@ -54,6 +55,7 @@ _local_modules = [ bigtable_instance_admin_pb2, bigtable_table_admin_pb2, + common_pb2, instance_pb2, table_pb2, ] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index a649c8cf4f59..8c31017cc47f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -25,8 +25,8 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index abe6130df88c..f02e0048f5bb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -224,8 +224,9 @@ def read_rows( ... pass Args: - table_name (str): Required. The unique name of the table from which to read. Values are of - the form ``projects//instances//tables/
``. + table_name (str): Required. The unique name of the table from which to read. Values + are of the form + ``projects//instances//tables/
``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. @@ -319,8 +320,8 @@ def sample_row_keys( ... pass Args: - table_name (str): Required. The unique name of the table from which to sample row keys. - Values are of the form + table_name (str): Required. The unique name of the table from which to sample row + keys. Values are of the form ``projects//instances//tables/
``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. @@ -404,8 +405,8 @@ def mutate_row( >>> response = client.mutate_row(table_name, row_key, mutations) Args: - table_name (str): Required. The unique name of the table to which the mutation should be - applied. Values are of the form + table_name (str): Required. The unique name of the table to which the mutation should + be applied. Values are of the form ``projects//instances//tables/
``. row_key (bytes): Required. The key of the row to which the mutation should be applied. mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied @@ -587,16 +588,16 @@ def check_and_mutate_row( >>> response = client.check_and_mutate_row(table_name, row_key) Args: - table_name (str): Required. The unique name of the table to which the conditional mutation - should be applied. Values are of the form + table_name (str): Required. The unique name of the table to which the conditional + mutation should be applied. Values are of the form ``projects//instances//tables/
``. row_key (bytes): Required. The key of the row to which the conditional mutation should be applied. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application profile will be used. - predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. Depending - on whether or not any results are yielded, either ``true_mutations`` or - ``false_mutations`` will be executed. If unset, checks that the row - contains any values at all. + predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, either + ``true_mutations`` or ``false_mutations`` will be executed. If unset, + checks that the row contains any values at all. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.bigtable_v2.types.RowFilter` @@ -704,8 +705,8 @@ def read_modify_write_row( >>> response = client.read_modify_write_row(table_name, row_key, rules) Args: - table_name (str): Required. The unique name of the table to which the read/modify/write - rules should be applied. Values are of the form + table_name (str): Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of the form ``projects//instances//tables/
``. row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py index 3096f33e0c68..8a57847bf8d9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py @@ -46,22 +46,22 @@ "methods": { "ReadRows": { "timeout_millis": 43200000, - "retry_codes_name": "idempotent", + "retry_codes_name": "non_idempotent", "retry_params_name": "read_rows_params", }, "SampleRowKeys": { - "timeout_millis": 20000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", + "timeout_millis": 60000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "non_idempotent_params", }, "MutateRow": { - "timeout_millis": 20000, + "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "idempotent_params", }, "MutateRows": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", + "timeout_millis": 600000, + "retry_codes_name": "non_idempotent", "retry_params_name": "mutate_rows_params", }, "CheckAndMutateRow": { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index 59fb73a65fa7..9da778ae74e5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/bigtable.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -30,12 +27,9 @@ name="google/cloud/bigtable_v2/proto/bigtable.proto", package="google.bigtable.v2", syntax="proto3", - serialized_options=_b( - "\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}" - ), - serialized_pb=_b( - '\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3' - ), + serialized_options=b"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -54,6 +48,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_name", @@ -64,16 +59,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile_id", @@ -84,7 +78,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -92,6 +86,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="rows", @@ -110,6 +105,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -128,6 +124,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="rows_limit", @@ -146,6 +143,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -167,6 +165,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="row_key", @@ -177,7 +176,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -185,6 +184,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="family_name", @@ -203,6 +203,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="qualifier", @@ -221,6 +222,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="timestamp_micros", @@ -239,6 +241,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="labels", @@ -257,6 +260,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -267,7 +271,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -275,6 +279,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value_size", @@ -293,6 +298,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="reset_row", @@ -311,6 +317,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="commit_row", @@ -329,6 +336,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -344,6 +352,7 @@ full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -357,6 +366,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="chunks", @@ -375,6 +385,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="last_scanned_row_key", @@ -385,7 +396,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -393,6 +404,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -414,6 +426,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_name", @@ -424,16 +437,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile_id", @@ -444,7 +456,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -452,6 +464,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -473,6 +486,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="row_key", @@ -483,7 +497,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -491,6 +505,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="offset_bytes", @@ -509,6 +524,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -530,6 +546,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_name", @@ -540,16 +557,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile_id", @@ -560,7 +576,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -568,6 +584,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_key", @@ -578,14 +595,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mutations", @@ -602,8 +620,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -625,6 +644,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -645,6 +665,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="row_key", @@ -655,7 +676,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -663,6 +684,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mutations", @@ -679,8 +701,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -701,6 +724,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_name", @@ -711,16 +735,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile_id", @@ -731,7 +754,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -739,6 +762,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="entries", @@ -755,8 +779,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -778,6 +803,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="index", @@ -796,6 +822,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="status", @@ -814,6 +841,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -834,6 +862,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="entries", @@ -852,6 +881,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -873,6 +903,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_name", @@ -883,16 +914,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile_id", @@ -903,7 +933,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -911,6 +941,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_key", @@ -921,14 +952,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="predicate_filter", @@ -947,6 +979,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="true_mutations", @@ -965,6 +998,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_mutations", @@ -983,6 +1017,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1004,6 +1039,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="predicate_matched", @@ -1022,6 +1058,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1043,6 +1080,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_name", @@ -1053,16 +1091,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b( - "\340A\002\372A\037\n\035bigtable.googleapis.com/Table" - ), + serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="app_profile_id", @@ -1073,7 +1110,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1081,6 +1118,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_key", @@ -1091,14 +1129,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="rules", @@ -1115,8 +1154,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\340A\002"), + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1138,6 +1178,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="row", @@ -1156,6 +1197,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1249,12 +1291,10 @@ ReadRowsRequest = _reflection.GeneratedProtocolMessageType( "ReadRowsRequest", (_message.Message,), - dict( - DESCRIPTOR=_READROWSREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.ReadRows. - - + { + "DESCRIPTOR": _READROWSREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.ReadRows. Attributes: table_name: Required. The unique name of the table from which to read. @@ -1262,7 +1302,7 @@ ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not - specified, the "default" application profile will be used. + specified, the “default” application profile will be used. rows: The row keys and/or ranges to read. If not specified, reads from all rows. @@ -1270,28 +1310,26 @@ The filter to apply to the contents of the specified row(s). If unset, reads the entirety of each row. rows_limit: - The read will terminate after committing to N rows' worth of + The read will terminate after committing to N rows’ worth of results. The default (zero) is to return all results. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) - ), + }, ) _sym_db.RegisterMessage(ReadRowsRequest) ReadRowsResponse = _reflection.GeneratedProtocolMessageType( "ReadRowsResponse", (_message.Message,), - dict( - CellChunk=_reflection.GeneratedProtocolMessageType( + { + "CellChunk": _reflection.GeneratedProtocolMessageType( "CellChunk", (_message.Message,), - dict( - DESCRIPTOR=_READROWSRESPONSE_CELLCHUNK, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Specifies a piece of a row's contents returned as part of the read + { + "DESCRIPTOR": _READROWSRESPONSE_CELLCHUNK, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Specifies a piece of a row’s contents returned as part of the read response stream. - - Attributes: row_key: The row key for this chunk of data. If the row key is empty, @@ -1312,7 +1350,7 @@ empty so clients must check for the presence of this message, not just for ``qualifier.value`` being non-empty. timestamp_micros: - The cell's stored timestamp, which also uniquely identifies it + The cell’s stored timestamp, which also uniquely identifies it within its column. Values are always expressed in microseconds, but individual tables may set a coarser granularity to further restrict the allowed values. For @@ -1332,7 +1370,7 @@ CellChunk came in a previous ReadRowsResponse. value_size: If this CellChunk is part of a chunked cell value and this is - not the final chunk of that cell, value\_size will be set to + not the final chunk of that cell, value_size will be set to the total length of the cell value. The client can use this size to pre-allocate memory to hold the full cell value. row_status: @@ -1345,16 +1383,14 @@ chunks for ``row_key``, as its data has been fully read. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) - ), + }, ), - DESCRIPTOR=_READROWSRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.ReadRows. - - + "DESCRIPTOR": _READROWSRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.ReadRows. Attributes: chunks: - A collection of a row's contents as part of the read request. + A collection of a row’s contents as part of the read request. last_scanned_row_key: Optionally the server might return the row key of the last row it has scanned. The client can use this to construct a more @@ -1365,7 +1401,7 @@ key, allowing the client to skip that work on a retry. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) - ), + }, ) _sym_db.RegisterMessage(ReadRowsResponse) _sym_db.RegisterMessage(ReadRowsResponse.CellChunk) @@ -1373,12 +1409,10 @@ SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType( "SampleRowKeysRequest", (_message.Message,), - dict( - DESCRIPTOR=_SAMPLEROWKEYSREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.SampleRowKeys. - - + { + "DESCRIPTOR": _SAMPLEROWKEYSREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.SampleRowKeys. Attributes: table_name: Required. The unique name of the table from which to sample @@ -1386,28 +1420,26 @@ ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not - specified, the "default" application profile will be used. + specified, the “default” application profile will be used. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) - ), + }, ) _sym_db.RegisterMessage(SampleRowKeysRequest) SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType( "SampleRowKeysResponse", (_message.Message,), - dict( - DESCRIPTOR=_SAMPLEROWKEYSRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.SampleRowKeys. - - + { + "DESCRIPTOR": _SAMPLEROWKEYSRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.SampleRowKeys. Attributes: row_key: Sorted streamed sequence of sample row keys in the table. The table might have contents before the first row key in the list and after the last one, but a key containing the empty string - indicates "end of table" and will be the last response given, + indicates “end of table” and will be the last response given, if present. Note that row keys in this list may not have ever been written to or read from, and users should therefore not make any assumptions about the row key structure that are @@ -1419,19 +1451,17 @@ equal to the difference in their ``offset_bytes`` fields. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) - ), + }, ) _sym_db.RegisterMessage(SampleRowKeysResponse) MutateRowRequest = _reflection.GeneratedProtocolMessageType( "MutateRowRequest", (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.MutateRow. - - + { + "DESCRIPTOR": _MUTATEROWREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.MutateRow. Attributes: table_name: Required. The unique name of the table to which the mutation @@ -1439,7 +1469,7 @@ ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not - specified, the "default" application profile will be used. + specified, the “default” application profile will be used. row_key: Required. The key of the row to which the mutation should be applied. @@ -1450,37 +1480,33 @@ one entry and at most 100000. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) - ), + }, ) _sym_db.RegisterMessage(MutateRowRequest) MutateRowResponse = _reflection.GeneratedProtocolMessageType( "MutateRowResponse", (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.MutateRow. - - """, + { + "DESCRIPTOR": _MUTATEROWRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.MutateRow.""", # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) - ), + }, ) _sym_db.RegisterMessage(MutateRowResponse) MutateRowsRequest = _reflection.GeneratedProtocolMessageType( "MutateRowsRequest", (_message.Message,), - dict( - Entry=_reflection.GeneratedProtocolMessageType( + { + "Entry": _reflection.GeneratedProtocolMessageType( "Entry", (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWSREQUEST_ENTRY, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""A mutation for a given row. - - + { + "DESCRIPTOR": _MUTATEROWSREQUEST_ENTRY, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """A mutation for a given row. Attributes: row_key: The key of the row to which the ``mutations`` should be @@ -1492,20 +1518,18 @@ least one mutation. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) - ), + }, ), - DESCRIPTOR=_MUTATEROWSREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for BigtableService.MutateRows. - - + "DESCRIPTOR": _MUTATEROWSREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for BigtableService.MutateRows. Attributes: table_name: Required. The unique name of the table to which the mutations should be applied. app_profile_id: This value specifies routing for replication. If not - specified, the "default" application profile will be used. + specified, the “default” application profile will be used. entries: Required. The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, @@ -1515,7 +1539,7 @@ mutations. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) - ), + }, ) _sym_db.RegisterMessage(MutateRowsRequest) _sym_db.RegisterMessage(MutateRowsRequest.Entry) @@ -1523,20 +1547,17 @@ MutateRowsResponse = _reflection.GeneratedProtocolMessageType( "MutateRowsResponse", (_message.Message,), - dict( - Entry=_reflection.GeneratedProtocolMessageType( + { + "Entry": _reflection.GeneratedProtocolMessageType( "Entry", (_message.Message,), - dict( - DESCRIPTOR=_MUTATEROWSRESPONSE_ENTRY, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""The result of applying a passed mutation in the original - request. - - + { + "DESCRIPTOR": _MUTATEROWSRESPONSE_ENTRY, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """The result of applying a passed mutation in the original request. Attributes: index: - The index into the original request's ``entries`` list of the + The index into the original request’s ``entries`` list of the Entry for which a result is being reported. status: The result of the request Entry identified by ``index``. @@ -1546,19 +1567,17 @@ reported for both entries. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) - ), + }, ), - DESCRIPTOR=_MUTATEROWSRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for BigtableService.MutateRows. - - + "DESCRIPTOR": _MUTATEROWSRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for BigtableService.MutateRows. Attributes: entries: One or more results for Entries from the batch request. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) - ), + }, ) _sym_db.RegisterMessage(MutateRowsResponse) _sym_db.RegisterMessage(MutateRowsResponse.Entry) @@ -1566,12 +1585,10 @@ CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType( "CheckAndMutateRowRequest", (_message.Message,), - dict( - DESCRIPTOR=_CHECKANDMUTATEROWREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.CheckAndMutateRow. - - + { + "DESCRIPTOR": _CHECKANDMUTATEROWREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.CheckAndMutateRow. Attributes: table_name: Required. The unique name of the table to which the @@ -1579,7 +1596,7 @@ ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not - specified, the "default" application profile will be used. + specified, the “default” application profile will be used. row_key: Required. The key of the row to which the conditional mutation should be applied. @@ -1604,38 +1621,34 @@ 100000. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) - ), + }, ) _sym_db.RegisterMessage(CheckAndMutateRowRequest) CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType( "CheckAndMutateRowResponse", (_message.Message,), - dict( - DESCRIPTOR=_CHECKANDMUTATEROWRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.CheckAndMutateRow. - - + { + "DESCRIPTOR": _CHECKANDMUTATEROWRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.CheckAndMutateRow. Attributes: predicate_matched: - Whether or not the request's ``predicate_filter`` yielded any + Whether or not the request’s ``predicate_filter`` yielded any results for the specified row. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) - ), + }, ) _sym_db.RegisterMessage(CheckAndMutateRowResponse) ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType( "ReadModifyWriteRowRequest", (_message.Message,), - dict( - DESCRIPTOR=_READMODIFYWRITEROWREQUEST, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Request message for Bigtable.ReadModifyWriteRow. - - + { + "DESCRIPTOR": _READMODIFYWRITEROWREQUEST, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Request message for Bigtable.ReadModifyWriteRow. Attributes: table_name: Required. The unique name of the table to which the @@ -1644,37 +1657,35 @@ ``projects//instances//tables/
``. app_profile_id: This value specifies routing for replication. If not - specified, the "default" application profile will be used. + specified, the “default” application profile will be used. row_key: Required. The key of the row to which the read/modify/write rules should be applied. rules: - Required. Rules specifying how the specified row's contents + Required. Rules specifying how the specified row’s contents are to be transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later ones. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) - ), + }, ) _sym_db.RegisterMessage(ReadModifyWriteRowRequest) ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType( "ReadModifyWriteRowResponse", (_message.Message,), - dict( - DESCRIPTOR=_READMODIFYWRITEROWRESPONSE, - __module__="google.cloud.bigtable_v2.proto.bigtable_pb2", - __doc__="""Response message for Bigtable.ReadModifyWriteRow. - - + { + "DESCRIPTOR": _READMODIFYWRITEROWRESPONSE, + "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", + "__doc__": """Response message for Bigtable.ReadModifyWriteRow. Attributes: row: A Row containing the new contents of all cells modified by the request. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) - ), + }, ) _sym_db.RegisterMessage(ReadModifyWriteRowResponse) @@ -1699,9 +1710,8 @@ full_name="google.bigtable.v2.Bigtable", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only" - ), + serialized_options=b"\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", + create_key=_descriptor._internal_create_key, serialized_start=2266, serialized_end=4126, methods=[ @@ -1712,9 +1722,8 @@ containing_service=None, input_type=_READROWSREQUEST, output_type=_READROWSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id' - ), + serialized_options=b'\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="SampleRowKeys", @@ -1723,9 +1732,8 @@ containing_service=None, input_type=_SAMPLEROWKEYSREQUEST, output_type=_SAMPLEROWKEYSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id" - ), + serialized_options=b"\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="MutateRow", @@ -1734,9 +1742,8 @@ containing_service=None, input_type=_MUTATEROWREQUEST, output_type=_MUTATEROWRESPONSE, - serialized_options=_b( - '\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id' - ), + serialized_options=b'\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="MutateRows", @@ -1745,9 +1752,8 @@ containing_service=None, input_type=_MUTATEROWSREQUEST, output_type=_MUTATEROWSRESPONSE, - serialized_options=_b( - '\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id' - ), + serialized_options=b'\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CheckAndMutateRow", @@ -1756,9 +1762,8 @@ containing_service=None, input_type=_CHECKANDMUTATEROWREQUEST, output_type=_CHECKANDMUTATEROWRESPONSE, - serialized_options=_b( - '\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id' - ), + serialized_options=b'\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ReadModifyWriteRow", @@ -1767,9 +1772,8 @@ containing_service=None, input_type=_READMODIFYWRITEROWREQUEST, output_type=_READMODIFYWRITEROWRESPONSE, - serialized_options=_b( - "\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id" - ), + serialized_options=b"\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id", + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index fb753be1e670..419e147e4fd9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/data.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -19,12 +16,9 @@ name="google/cloud/bigtable_v2/proto/data.proto", package="google.bigtable.v2", syntax="proto3", - serialized_options=_b( - "\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2" - ), - serialized_pb=_b( - '\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3' - ), + serialized_options=b"\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3', ) @@ -34,6 +28,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -44,7 +39,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -52,6 +47,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="families", @@ -70,6 +66,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -91,6 +88,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -101,7 +99,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -109,6 +107,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="columns", @@ -127,6 +126,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -148,6 +148,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="qualifier", @@ -158,7 +159,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -166,6 +167,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cells", @@ -184,6 +186,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -205,6 +208,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="timestamp_micros", @@ -223,6 +227,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -233,7 +238,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -241,6 +246,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="labels", @@ -259,6 +265,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -280,6 +287,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_key_closed", @@ -290,7 +298,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -298,6 +306,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_key_open", @@ -308,7 +317,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -316,6 +325,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_key_open", @@ -326,7 +336,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -334,6 +344,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_key_closed", @@ -344,7 +355,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -352,6 +363,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -367,6 +379,7 @@ full_name="google.bigtable.v2.RowRange.start_key", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), _descriptor.OneofDescriptor( @@ -374,6 +387,7 @@ full_name="google.bigtable.v2.RowRange.end_key", index=1, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -388,6 +402,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="row_keys", @@ -406,6 +421,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_ranges", @@ -424,6 +440,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -445,6 +462,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="family_name", @@ -455,7 +473,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -463,6 +481,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_qualifier_closed", @@ -473,7 +492,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -481,6 +500,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_qualifier_open", @@ -491,7 +511,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -499,6 +519,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_qualifier_closed", @@ -509,7 +530,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -517,6 +538,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_qualifier_open", @@ -527,7 +549,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -535,6 +557,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -550,6 +573,7 @@ full_name="google.bigtable.v2.ColumnRange.start_qualifier", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), _descriptor.OneofDescriptor( @@ -557,6 +581,7 @@ full_name="google.bigtable.v2.ColumnRange.end_qualifier", index=1, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -571,6 +596,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_timestamp_micros", @@ -589,6 +615,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_timestamp_micros", @@ -607,6 +634,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -628,6 +656,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_value_closed", @@ -638,7 +667,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -646,6 +675,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_value_open", @@ -656,7 +686,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -664,6 +694,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_value_closed", @@ -674,7 +705,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -682,6 +713,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_value_open", @@ -692,7 +724,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -700,6 +732,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -715,6 +748,7 @@ full_name="google.bigtable.v2.ValueRange.start_value", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), _descriptor.OneofDescriptor( @@ -722,6 +756,7 @@ full_name="google.bigtable.v2.ValueRange.end_value", index=1, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -736,6 +771,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="filters", @@ -754,6 +790,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -774,6 +811,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="filters", @@ -792,6 +830,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -812,6 +851,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="predicate_filter", @@ -830,6 +870,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="true_filter", @@ -848,6 +889,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_filter", @@ -866,6 +908,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -886,6 +929,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="chain", @@ -904,6 +948,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="interleave", @@ -922,6 +967,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="condition", @@ -940,6 +986,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="sink", @@ -958,6 +1005,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="pass_all_filter", @@ -976,6 +1024,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="block_all_filter", @@ -994,6 +1043,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_key_regex_filter", @@ -1004,7 +1054,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1012,6 +1062,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_sample_filter", @@ -1030,6 +1081,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="family_name_regex_filter", @@ -1040,7 +1092,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1048,6 +1100,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_qualifier_regex_filter", @@ -1058,7 +1111,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1066,6 +1119,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_range_filter", @@ -1084,6 +1138,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="timestamp_range_filter", @@ -1102,6 +1157,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value_regex_filter", @@ -1112,7 +1168,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1120,6 +1176,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value_range_filter", @@ -1138,6 +1195,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cells_per_row_offset_filter", @@ -1156,6 +1214,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cells_per_row_limit_filter", @@ -1174,6 +1233,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="cells_per_column_limit_filter", @@ -1192,6 +1252,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="strip_value_transformer", @@ -1210,6 +1271,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="apply_label_transformer", @@ -1220,7 +1282,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1228,6 +1290,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1243,6 +1306,7 @@ full_name="google.bigtable.v2.RowFilter.filter", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -1257,6 +1321,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="family_name", @@ -1267,7 +1332,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1275,6 +1340,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_qualifier", @@ -1285,7 +1351,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1293,6 +1359,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="timestamp_micros", @@ -1311,6 +1378,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -1321,7 +1389,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1329,6 +1397,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1349,6 +1418,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="family_name", @@ -1359,7 +1429,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1367,6 +1437,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_qualifier", @@ -1377,7 +1448,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1385,6 +1456,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_range", @@ -1403,6 +1475,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1423,6 +1496,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="family_name", @@ -1433,7 +1507,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1441,6 +1515,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1461,6 +1536,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -1480,6 +1556,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="set_cell", @@ -1498,6 +1575,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="delete_from_column", @@ -1516,6 +1594,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="delete_from_family", @@ -1534,6 +1613,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="delete_from_row", @@ -1552,6 +1632,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1572,6 +1653,7 @@ full_name="google.bigtable.v2.Mutation.mutation", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -1586,6 +1668,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="family_name", @@ -1596,7 +1679,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1604,6 +1687,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_qualifier", @@ -1614,7 +1698,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1622,6 +1706,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="append_value", @@ -1632,7 +1717,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -1640,6 +1725,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="increment_amount", @@ -1658,6 +1744,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1673,6 +1760,7 @@ full_name="google.bigtable.v2.ReadModifyWriteRule.rule", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], ), ], @@ -1938,17 +2026,15 @@ Row = _reflection.GeneratedProtocolMessageType( "Row", (_message.Message,), - dict( - DESCRIPTOR=_ROW, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies the complete (requested) contents of a single - row of a table. Rows which exceed 256MiB in size cannot be read in full. - - + { + "DESCRIPTOR": _ROW, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies the complete (requested) contents of a single row of a + table. Rows which exceed 256MiB in size cannot be read in full. Attributes: key: The unique key which identifies this row within its table. - This is the same key that's used to identify the row in, for + This is the same key that’s used to identify the row in, for example, a MutateRowRequest. May contain any non-empty byte string up to 4KiB in length. families: @@ -1956,76 +2042,70 @@ ordering of column families is not specified. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) - ), + }, ) _sym_db.RegisterMessage(Row) Family = _reflection.GeneratedProtocolMessageType( "Family", (_message.Message,), - dict( - DESCRIPTOR=_FAMILY, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single row/column - family intersection of a table. - - + { + "DESCRIPTOR": _FAMILY, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies (some of) the contents of a single row/column family + intersection of a table. Attributes: name: The unique key which identifies this family within its row. - This is the same key that's used to identify the family in, + This is the same key that’s used to identify the family in, for example, a RowFilter which sets its - "family\_name\_regex\_filter" field. Must match + “family_name_regex_filter” field. Must match ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may produce cells in a sentinel family with an empty name. Must be no greater than 64 characters in length. columns: - Must not be empty. Sorted in order of increasing "qualifier". + Must not be empty. Sorted in order of increasing “qualifier”. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) - ), + }, ) _sym_db.RegisterMessage(Family) Column = _reflection.GeneratedProtocolMessageType( "Column", (_message.Message,), - dict( - DESCRIPTOR=_COLUMN, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single row/column - intersection of a table. - - + { + "DESCRIPTOR": _COLUMN, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies (some of) the contents of a single row/column intersection + of a table. Attributes: qualifier: The unique key which identifies this column within its family. - This is the same key that's used to identify the column in, + This is the same key that’s used to identify the column in, for example, a RowFilter which sets its ``column_qualifier_regex_filter`` field. May contain any byte string, including the empty string, up to 16kiB in length. cells: Must not be empty. Sorted in order of decreasing - "timestamp\_micros". + “timestamp_micros”. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) - ), + }, ) _sym_db.RegisterMessage(Column) Cell = _reflection.GeneratedProtocolMessageType( "Cell", (_message.Message,), - dict( - DESCRIPTOR=_CELL, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies (some of) the contents of a single - row/column/timestamp of a table. - - + { + "DESCRIPTOR": _CELL, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies (some of) the contents of a single row/column/timestamp of a + table. Attributes: timestamp_micros: - The cell's stored timestamp, which also uniquely identifies it + The cell’s stored timestamp, which also uniquely identifies it within its column. Values are always expressed in microseconds, but individual tables may set a coarser granularity to further restrict the allowed values. For @@ -2040,19 +2120,17 @@ [RowFilter][google.bigtable.v2.RowFilter]. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) - ), + }, ) _sym_db.RegisterMessage(Cell) RowRange = _reflection.GeneratedProtocolMessageType( "RowRange", (_message.Message,), - dict( - DESCRIPTOR=_ROWRANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a contiguous range of rows. - - + { + "DESCRIPTOR": _ROWRANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a contiguous range of rows. Attributes: start_key: The row key at which to start the range. If neither field is @@ -2070,19 +2148,17 @@ Used when giving an inclusive upper bound for the range. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) - ), + }, ) _sym_db.RegisterMessage(RowRange) RowSet = _reflection.GeneratedProtocolMessageType( "RowSet", (_message.Message,), - dict( - DESCRIPTOR=_ROWSET, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a non-contiguous set of rows. - - + { + "DESCRIPTOR": _ROWSET, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a non-contiguous set of rows. Attributes: row_keys: Single rows included in the set. @@ -2090,22 +2166,20 @@ Contiguous row ranges included in the set. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) - ), + }, ) _sym_db.RegisterMessage(RowSet) ColumnRange = _reflection.GeneratedProtocolMessageType( "ColumnRange", (_message.Message,), - dict( - DESCRIPTOR=_COLUMNRANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a contiguous range of columns within a single - column family. The range spans from : - to :, where both bounds can be either + { + "DESCRIPTOR": _COLUMNRANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a contiguous range of columns within a single column family. + The range spans from : to + :, where both bounds can be either inclusive or exclusive. - - Attributes: family_name: The name of the column family within which this range falls. @@ -2127,19 +2201,17 @@ Used when giving an exclusive upper bound for the range. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) - ), + }, ) _sym_db.RegisterMessage(ColumnRange) TimestampRange = _reflection.GeneratedProtocolMessageType( "TimestampRange", (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPRANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specified a contiguous range of microsecond timestamps. - - + { + "DESCRIPTOR": _TIMESTAMPRANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specified a contiguous range of microsecond timestamps. Attributes: start_timestamp_micros: Inclusive lower bound. If left empty, interpreted as 0. @@ -2147,19 +2219,17 @@ Exclusive upper bound. If left empty, interpreted as infinity. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) - ), + }, ) _sym_db.RegisterMessage(TimestampRange) ValueRange = _reflection.GeneratedProtocolMessageType( "ValueRange", (_message.Message,), - dict( - DESCRIPTOR=_VALUERANGE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a contiguous range of raw byte values. - - + { + "DESCRIPTOR": _VALUERANGE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a contiguous range of raw byte values. Attributes: start_value: The value at which to start the range. If neither field is @@ -2177,88 +2247,80 @@ Used when giving an exclusive upper bound for the range. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) - ), + }, ) _sym_db.RegisterMessage(ValueRange) RowFilter = _reflection.GeneratedProtocolMessageType( "RowFilter", (_message.Message,), - dict( - Chain=_reflection.GeneratedProtocolMessageType( + { + "Chain": _reflection.GeneratedProtocolMessageType( "Chain", (_message.Message,), - dict( - DESCRIPTOR=_ROWFILTER_CHAIN, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which sends rows through several RowFilters in - sequence. - - + { + "DESCRIPTOR": _ROWFILTER_CHAIN, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A RowFilter which sends rows through several RowFilters in sequence. Attributes: filters: - The elements of "filters" are chained together to process the - input row: in row -> f(0) -> intermediate row -> f(1) -> ... - -> f(N) -> out row The full chain is executed atomically. + The elements of “filters” are chained together to process the + input row: in row -> f(0) -> intermediate row -> f(1) -> … -> + f(N) -> out row The full chain is executed atomically. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) - ), + }, ), - Interleave=_reflection.GeneratedProtocolMessageType( + "Interleave": _reflection.GeneratedProtocolMessageType( "Interleave", (_message.Message,), - dict( - DESCRIPTOR=_ROWFILTER_INTERLEAVE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which sends each row to each of several - component RowFilters and interleaves the results. - - + { + "DESCRIPTOR": _ROWFILTER_INTERLEAVE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A RowFilter which sends each row to each of several component + RowFilters and interleaves the results. Attributes: filters: - The elements of "filters" all process a copy of the input row, + The elements of “filters” all process a copy of the input row, and the results are pooled, sorted, and combined into a single output row. If multiple cells are produced with the same column and timestamp, they will all appear in the output row in an unspecified mutual order. Consider the following example, with three filters: :: - input row | + input row | ----------------------------------------------------- | | | f(0) f(1) f(2) - | | | 1: + | | | 1: foo,bar,10,x foo,bar,10,z far,bar,7,a 2: foo,blah,11,z far,blah,5,x - far,blah,5,x | | + far,blah,5,x | | | ----------------------------------------------------- - | 1: foo,bar,10,z // could have - switched with #2 2: foo,bar,10,x // - could have switched with #1 3: - foo,blah,11,z 4: far,bar,7,a 5: - far,blah,5,x // identical to #6 6: + | 1: foo,bar,10,z // could have + switched with #2 2: foo,bar,10,x // + could have switched with #1 3: + foo,blah,11,z 4: far,bar,7,a 5: + far,blah,5,x // identical to #6 6: far,blah,5,x // identical to #5 All interleaved filters are executed atomically. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) - ), + }, ), - Condition=_reflection.GeneratedProtocolMessageType( + "Condition": _reflection.GeneratedProtocolMessageType( "Condition", (_message.Message,), - dict( - DESCRIPTOR=_ROWFILTER_CONDITION, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A RowFilter which evaluates one of two possible - RowFilters, depending on whether or not a predicate RowFilter outputs - any cells from the input row. - - IMPORTANT NOTE: The predicate filter does not execute atomically with - the true and false filters, which may lead to inconsistent or unexpected - results. Additionally, Condition filters have poor performance, - especially when filters are set for the false condition. - - + { + "DESCRIPTOR": _ROWFILTER_CONDITION, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A RowFilter which evaluates one of two possible RowFilters, depending + on whether or not a predicate RowFilter outputs any cells from the + input row. IMPORTANT NOTE: The predicate filter does not execute + atomically with the true and false filters, which may lead to + inconsistent or unexpected results. Additionally, Condition filters + have poor performance, especially when filters are set for the false + condition. Attributes: predicate_filter: If ``predicate_filter`` outputs any cells, then @@ -2274,48 +2336,40 @@ be returned in the false case. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) - ), - ), - DESCRIPTOR=_ROWFILTER, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Takes a row as input and produces an alternate view of the - row based on specified rules. For example, a RowFilter might trim down a - row to include just the cells from columns matching a given regular - expression, or might return all the cells of a row but not their values. - More complicated filters can be composed out of these components to - express requests such as, "within every column of a particular family, - give just the two most recent cells which are older than timestamp X." - - There are two broad categories of RowFilters (true filters and - transformers), as well as two ways to compose simple filters into more - complex ones (chains and interleaves). They work as follows: - - - True filters alter the input row by excluding some of its cells - wholesale from the output row. An example of a true filter is the - ``value_regex_filter``, which excludes cells whose values don't match - the specified pattern. All regex true filters use RE2 syntax - (https://github.com/google/re2/wiki/Syntax) in raw byte mode - (RE2::Latin1), and are evaluated as full matches. An important point - to keep in mind is that ``RE2(.)`` is equivalent by default to - ``RE2([^\n])``, meaning that it does not match newlines. When - attempting to match an arbitrary byte, you should therefore use the - escape sequence ``\C``, which may need to be further escaped as - ``\\C`` in your client language. - - - Transformers alter the input row by changing the values of some of - its cells in the output, without excluding them completely. - Currently, the only supported transformer is the - ``strip_value_transformer``, which replaces every cell's value with - the empty string. - - - Chains and interleaves are described in more detail in the - RowFilter.Chain and RowFilter.Interleave documentation. - - The total serialized size of a RowFilter message must not exceed 4096 - bytes, and RowFilters may not be nested within each other (in Chains or - Interleaves) to a depth of more than 20. - - + }, + ), + "DESCRIPTOR": _ROWFILTER, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Takes a row as input and produces an alternate view of the row based + on specified rules. For example, a RowFilter might trim down a row to + include just the cells from columns matching a given regular + expression, or might return all the cells of a row but not their + values. More complicated filters can be composed out of these + components to express requests such as, “within every column of a + particular family, give just the two most recent cells which are older + than timestamp X.” There are two broad categories of RowFilters (true + filters and transformers), as well as two ways to compose simple + filters into more complex ones (chains and interleaves). They work as + follows: - True filters alter the input row by excluding some of its + cells wholesale from the output row. An example of a true filter is + the ``value_regex_filter``, which excludes cells whose values don’t + match the specified pattern. All regex true filters use RE2 syntax + (https://github.com/google/re2/wiki/Syntax) in raw byte mode + (RE2::Latin1), and are evaluated as full matches. An important point + to keep in mind is that ``RE2(.)`` is equivalent by default to + ``RE2([^\n])``, meaning that it does not match newlines. When + attempting to match an arbitrary byte, you should therefore use the + escape sequence ``\C``, which may need to be further escaped as + ``\\C`` in your client language. - Transformers alter the input row + by changing the values of some of its cells in the output, without + excluding them completely. Currently, the only supported + transformer is the ``strip_value_transformer``, which replaces + every cell’s value with the empty string. - Chains and + interleaves are described in more detail in the RowFilter.Chain and + RowFilter.Interleave documentation. The total serialized size of a + RowFilter message must not exceed 4096 bytes, and RowFilters may not + be nested within each other (in Chains or Interleaves) to a depth of + more than 20. Attributes: filter: Which of the possible RowFilter types to apply. If none are @@ -2333,32 +2387,30 @@ ADVANCED USE ONLY. Hook for introspection into the RowFilter. Outputs all cells directly to the output of the read rather than to any parent filter. Consider the following example: :: - Chain( FamilyRegex("A"), Interleave( - All(), Chain(Label("foo"), Sink()) ), - QualifierRegex("B") ) A,A,1,w - A,B,2,x B,B,4,z - | FamilyRegex("A") - | A,A,1,w - A,B,2,x | - +------------+-------------+ | - | All() Label(foo) - | | A,A,1,w - A,A,1,w,labels:[foo] A,B,2,x - A,B,2,x,labels:[foo] | - | | Sink() - --------------+ | | - | +------------+ x------+ - A,A,1,w,labels:[foo] | - A,B,2,x,labels:[foo] A,A,1,w - | A,B,2,x - | | - | QualifierRegex("B") - | | - | A,B,2,x - | | - | - +--------------------------------+ - | A,A,1,w,labels:[foo] + Chain( FamilyRegex("A"), Interleave( All(), + Chain(Label("foo"), Sink()) ), QualifierRegex("B") + ) A,A,1,w + A,B,2,x B,B,4,z + | FamilyRegex("A") + | A,A,1,w + A,B,2,x | + +------------+-------------+ | + | All() Label(foo) + | | A,A,1,w + A,A,1,w,labels:[foo] A,B,2,x + A,B,2,x,labels:[foo] | | + | Sink() --------------+ | + | | +------------+ x------+ + A,A,1,w,labels:[foo] | + A,B,2,x,labels:[foo] A,A,1,w + | A,B,2,x | + | | + QualifierRegex("B") | + | | + A,B,2,x | + | | + +--------------------------------+ | + A,A,1,w,labels:[foo] A,B,2,x,labels:[foo] // could be switched A,B,2,x // could be switched Despite being excluded by the qualifier filter, a copy of every cell that @@ -2366,9 +2418,9 @@ [Interleave][google.bigtable.v2.RowFilter.Interleave], duplicate cells are possible, and appear in an unspecified mutual order. In this case we have a duplicate with column - "A:B" and timestamp 2, because one copy passed through the all + “A:B” and timestamp 2, because one copy passed through the all filter while the other was passed through the label and sink. - Note that one copy has label "foo", while the other does not. + Note that one copy has label “foo”, while the other does not. Cannot be used within the ``predicate_filter``, ``true_filter``, or ``false_filter`` of a [Condition][google.bigtable.v2.RowFilter.Condition]. @@ -2435,7 +2487,7 @@ duplicate cells are present, as is possible when using an Interleave, each copy of the cell is counted separately. strip_value_transformer: - Replaces each cell's value with the empty string. + Replaces each cell’s value with the empty string. apply_label_transformer: Applies the given label to all cells in the output row. This allows the client to determine which results were produced @@ -2450,7 +2502,7 @@ may be relaxed in the future. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) - ), + }, ) _sym_db.RegisterMessage(RowFilter) _sym_db.RegisterMessage(RowFilter.Chain) @@ -2460,16 +2512,14 @@ Mutation = _reflection.GeneratedProtocolMessageType( "Mutation", (_message.Message,), - dict( - SetCell=_reflection.GeneratedProtocolMessageType( + { + "SetCell": _reflection.GeneratedProtocolMessageType( "SetCell", (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_SETCELL, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which sets the value of the specified cell. - - + { + "DESCRIPTOR": _MUTATION_SETCELL, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which sets the value of the specified cell. Attributes: family_name: The name of the family into which new data should be written. @@ -2483,23 +2533,21 @@ the client should set this value itself, noting that the default value is a timestamp of zero if the field is left unspecified. Values must match the granularity of the table - (e.g. micros, millis). + (e.g. micros, millis). value: The value to be written into the specified cell. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) - ), + }, ), - DeleteFromColumn=_reflection.GeneratedProtocolMessageType( + "DeleteFromColumn": _reflection.GeneratedProtocolMessageType( "DeleteFromColumn", (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_DELETEFROMCOLUMN, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes cells from the specified column, - optionally restricting the deletions to a given timestamp range. - - + { + "DESCRIPTOR": _MUTATION_DELETEFROMCOLUMN, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which deletes cells from the specified column, optionally + restricting the deletions to a given timestamp range. Attributes: family_name: The name of the family from which cells should be deleted. @@ -2511,50 +2559,41 @@ The range of timestamps within which cells should be deleted. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) - ), + }, ), - DeleteFromFamily=_reflection.GeneratedProtocolMessageType( + "DeleteFromFamily": _reflection.GeneratedProtocolMessageType( "DeleteFromFamily", (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_DELETEFROMFAMILY, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes all cells from the specified - column family. - - + { + "DESCRIPTOR": _MUTATION_DELETEFROMFAMILY, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which deletes all cells from the specified column family. Attributes: family_name: The name of the family from which cells should be deleted. Must match ``[-_.a-zA-Z0-9]+`` """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) - ), + }, ), - DeleteFromRow=_reflection.GeneratedProtocolMessageType( + "DeleteFromRow": _reflection.GeneratedProtocolMessageType( "DeleteFromRow", (_message.Message,), - dict( - DESCRIPTOR=_MUTATION_DELETEFROMROW, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""A Mutation which deletes all cells from the containing - row. - - """, + { + "DESCRIPTOR": _MUTATION_DELETEFROMROW, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """A Mutation which deletes all cells from the containing row.""", # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) - ), - ), - DESCRIPTOR=_MUTATION, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies a particular change to be made to the contents - of a row. - - + }, + ), + "DESCRIPTOR": _MUTATION, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies a particular change to be made to the contents of a row. Attributes: mutation: Which of the possible Mutation types to apply. set_cell: - Set a cell's value. + Set a cell’s value. delete_from_column: Deletes cells from a column. delete_from_family: @@ -2563,7 +2602,7 @@ Deletes cells from the entire row. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) - ), + }, ) _sym_db.RegisterMessage(Mutation) _sym_db.RegisterMessage(Mutation.SetCell) @@ -2574,13 +2613,11 @@ ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType( "ReadModifyWriteRule", (_message.Message,), - dict( - DESCRIPTOR=_READMODIFYWRITERULE, - __module__="google.cloud.bigtable_v2.proto.data_pb2", - __doc__="""Specifies an atomic read/modify/write operation on the - latest value of the specified column. - - + { + "DESCRIPTOR": _READMODIFYWRITERULE, + "__module__": "google.cloud.bigtable_v2.proto.data_pb2", + "__doc__": """Specifies an atomic read/modify/write operation on the latest value of + the specified column. Attributes: family_name: The name of the family to which the read/modify/write should @@ -2590,7 +2627,7 @@ should be applied. Can be any byte string, including the empty string. rule: - The rule used to determine the column's new latest value from + The rule used to determine the column’s new latest value from its current latest value. append_value: Rule specifying that ``append_value`` be appended to the @@ -2604,7 +2641,7 @@ endian signed integer), or the entire request will fail. """, # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) - ), + }, ) _sym_db.RegisterMessage(ReadModifyWriteRule) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 27cac675ce9a..1e2f874a94f0 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -3,16 +3,30 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-bigtable.git", - "sha": "e12ffc55933cfd6b40bd2fc6cef899ce78c543b5" + "remote": "https://github.com/googleapis/python-bigtable.git", + "sha": "1ac60be05521b69c924118d40f88e07728a2f75e" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "eafa840ceec23b44a5c21670288107c661252711", - "internalRef": "313488995" + "sha": "3a4894c4f0da3e763aca2c67bd280ae915177450", + "internalRef": "314363155" + } + }, + { + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "f13864cd532f98a4682cec48105580fa9a5c9978" + } + }, + { + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "f13864cd532f98a4682cec48105580fa9a5c9978" } } ], diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py index e1de090542c6..df083406b4e2 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py @@ -22,7 +22,6 @@ from google.rpc import status_pb2 from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2 import enums from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 from google.cloud.bigtable_admin_v2.proto import instance_pb2 from google.iam.v1 import iam_policy_pb2 @@ -212,9 +211,9 @@ def test_list_instances_exception(self): def test_update_instance(self): # Setup Expected Response - name_2 = "name2-1052831874" + name = "name3373707" display_name_2 = "displayName21615000987" - expected_response = {"name": name_2, "display_name": display_name_2} + expected_response = {"name": name, "display_name": display_name_2} expected_response = instance_pb2.Instance(**expected_response) # Mock the API response @@ -225,18 +224,13 @@ def test_update_instance(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") display_name = "displayName1615086568" - type_ = enums.Instance.Type.TYPE_UNSPECIFIED - labels = {} - response = client.update_instance(name, display_name, type_, labels) + response = client.update_instance(display_name) assert expected_response == response assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance( - name=name, display_name=display_name, type=type_, labels=labels - ) + expected_request = instance_pb2.Instance(display_name=display_name) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -249,13 +243,10 @@ def test_update_instance_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") display_name = "displayName1615086568" - type_ = enums.Instance.Type.TYPE_UNSPECIFIED - labels = {} with pytest.raises(CustomException): - client.update_instance(name, display_name, type_, labels) + client.update_instance(display_name) def test_partial_update_instance(self): # Setup Expected Response @@ -495,11 +486,11 @@ def test_list_clusters_exception(self): def test_update_cluster(self): # Setup Expected Response - name_2 = "name2-1052831874" + name = "name3373707" location = "location1901043637" serve_nodes_2 = 1623486220 expected_response = { - "name": name_2, + "name": name, "location": location, "serve_nodes": serve_nodes_2, } @@ -517,15 +508,14 @@ def test_update_cluster(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") serve_nodes = 1288838783 - response = client.update_cluster(name, serve_nodes) + response = client.update_cluster(serve_nodes) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(name=name, serve_nodes=serve_nodes) + expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -545,10 +535,9 @@ def test_update_cluster_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") serve_nodes = 1288838783 - response = client.update_cluster(name, serve_nodes) + response = client.update_cluster(serve_nodes) exception = response.exception() assert exception.errors[0] == error @@ -785,13 +774,12 @@ def test_delete_app_profile(self): # Setup Request name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True - client.delete_app_profile(name, ignore_warnings) + client.delete_app_profile(name) assert len(channel.requests) == 1 expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings + name=name ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -806,10 +794,9 @@ def test_delete_app_profile_exception(self): # Setup request name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - ignore_warnings = True with pytest.raises(CustomException): - client.delete_app_profile(name, ignore_warnings) + client.delete_app_profile(name) def test_get_iam_policy(self): # Setup Expected Response @@ -826,7 +813,7 @@ def test_get_iam_policy(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") + resource = "resource-341064690" response = client.get_iam_policy(resource) assert expected_response == response @@ -845,7 +832,7 @@ def test_get_iam_policy_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") + resource = "resource-341064690" with pytest.raises(CustomException): client.get_iam_policy(resource) @@ -865,7 +852,7 @@ def test_set_iam_policy(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") + resource = "resource-341064690" policy = {} response = client.set_iam_policy(resource, policy) @@ -887,7 +874,7 @@ def test_set_iam_policy_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") + resource = "resource-341064690" policy = {} with pytest.raises(CustomException): @@ -908,7 +895,7 @@ def test_test_iam_permissions(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup Request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") + resource = "resource-341064690" permissions = [] response = client.test_iam_permissions(resource, permissions) @@ -930,7 +917,7 @@ def test_test_iam_permissions_exception(self): client = bigtable_admin_v2.BigtableInstanceAdminClient() # Setup request - resource = client.instance_path("[PROJECT]", "[INSTANCE]") + resource = "resource-341064690" permissions = [] with pytest.raises(CustomException): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py index d1a843164982..42db08579f9d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py @@ -28,6 +28,7 @@ from google.iam.v1 import policy_pb2 from google.longrunning import operations_pb2 from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 class MultiCallableStub(object): @@ -131,7 +132,9 @@ def test_create_table_from_snapshot(self): # Setup Request parent = client.instance_path("[PROJECT]", "[INSTANCE]") table_id = "tableId-895419604" - source_snapshot = "sourceSnapshot-947679896" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) response = client.create_table_from_snapshot(parent, table_id, source_snapshot) result = response.result() @@ -162,7 +165,9 @@ def test_create_table_from_snapshot_exception(self): # Setup Request parent = client.instance_path("[PROJECT]", "[INSTANCE]") table_id = "tableId-895419604" - source_snapshot = "sourceSnapshot-947679896" + source_snapshot = client.snapshot_path( + "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" + ) response = client.create_table_from_snapshot(parent, table_id, source_snapshot) exception = response.exception() @@ -456,7 +461,7 @@ def test_get_iam_policy(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + resource = "resource-341064690" response = client.get_iam_policy(resource) assert expected_response == response @@ -475,7 +480,7 @@ def test_get_iam_policy_exception(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + resource = "resource-341064690" with pytest.raises(CustomException): client.get_iam_policy(resource) @@ -495,7 +500,7 @@ def test_set_iam_policy(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + resource = "resource-341064690" policy = {} response = client.set_iam_policy(resource, policy) @@ -517,7 +522,7 @@ def test_set_iam_policy_exception(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + resource = "resource-341064690" policy = {} with pytest.raises(CustomException): @@ -538,7 +543,7 @@ def test_test_iam_permissions(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup Request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + resource = "resource-341064690" permissions = [] response = client.test_iam_permissions(resource, permissions) @@ -560,7 +565,7 @@ def test_test_iam_permissions_exception(self): client = bigtable_admin_v2.BigtableTableAdminClient() # Setup request - resource = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") + resource = "resource-341064690" permissions = [] with pytest.raises(CustomException): @@ -570,11 +575,11 @@ def test_snapshot_table(self): # Setup Expected Response name_2 = "name2-1052831874" data_size_bytes = 2110122398 - description_2 = "description2568623279" + description = "description-1724546052" expected_response = { "name": name_2, "data_size_bytes": data_size_bytes, - "description": description_2, + "description": description, } expected_response = table_pb2.Snapshot(**expected_response) operation = operations_pb2.Operation( @@ -591,17 +596,16 @@ def test_snapshot_table(self): # Setup Request name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = "cluster872092154" + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") snapshot_id = "snapshotId-168585866" - description = "description-1724546052" - response = client.snapshot_table(name, cluster, snapshot_id, description) + response = client.snapshot_table(name, cluster, snapshot_id) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id, description=description + name=name, cluster=cluster, snapshot_id=snapshot_id ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -623,11 +627,10 @@ def test_snapshot_table_exception(self): # Setup Request name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = "cluster872092154" + cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") snapshot_id = "snapshotId-168585866" - description = "description-1724546052" - response = client.snapshot_table(name, cluster, snapshot_id, description) + response = client.snapshot_table(name, cluster, snapshot_id) exception = response.exception() assert exception.errors[0] == error @@ -758,3 +761,279 @@ def test_delete_snapshot_exception(self): with pytest.raises(CustomException): client.delete_snapshot(name) + + def test_create_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_backup", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_backup_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_backup_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_backup(self): + # Setup Expected Response + name_2 = "name2-1052831874" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name_2, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + response = client.get_backup(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.get_backup(name) + + def test_list_backups(self): + # Setup Expected Response + next_page_token = "" + backups_element = {} + backups = [backups_element] + expected_response = {"next_page_token": next_page_token, "backups": backups} + expected_response = bigtable_table_admin_pb2.ListBackupsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.backups[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_backups_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") + + paged_list_response = client.list_backups(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_update_backup(self): + # Setup Expected Response + name = "name3373707" + source_table = "sourceTable1670858410" + size_bytes = 1796325715 + expected_response = { + "name": name, + "source_table": source_table, + "size_bytes": size_bytes, + } + expected_response = table_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + backup = {} + update_mask = {} + + response = client.update_backup(backup, update_mask) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + backup = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_backup(backup, update_mask) + + def test_delete_backup(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + client.delete_backup(name) + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") + + with pytest.raises(CustomException): + client.delete_backup(name) + + def test_restore_table(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = table_pb2.Table(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_restore_table", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + response = client.restore_table() + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = bigtable_table_admin_pb2.RestoreTableRequest() + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_restore_table_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_restore_table_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = bigtable_admin_v2.BigtableTableAdminClient() + + response = client.restore_table() + exception = response.exception() + assert exception.errors[0] == error From 3ce3fb460abbab996a611e01ef4504ce8d31318c Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 16 Jun 2020 08:27:09 -0700 Subject: [PATCH 312/892] chore: set Ruby namespace in proto options, use protoc-docs-plugin 0.8.0 (#56) * chore: set Ruby namespace in proto options PiperOrigin-RevId: 316039767 Source-Author: Google APIs Source-Date: Thu Jun 11 21:23:34 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 94fe3637559a257634d7b47a15bb8d976daff788 Source-Link: https://github.com/googleapis/googleapis/commit/94fe3637559a257634d7b47a15bb8d976daff788 * fix: use protoc-docs-plugin 0.8.0 Fixes issue with missing newline before 'Attributes' in Python docstrings. PiperOrigin-RevId: 316182409 Source-Author: Google APIs Source-Date: Fri Jun 12 14:52:11 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 184661793fbe3b89f2b485c303e7466cef9d21a1 Source-Link: https://github.com/googleapis/googleapis/commit/184661793fbe3b89f2b485c303e7466cef9d21a1 --- .../proto/bigtable_instance_admin.proto | 1 + .../proto/bigtable_instance_admin_pb2.py | 25 +++++++++++-- .../proto/bigtable_table_admin.proto | 1 + .../proto/bigtable_table_admin_pb2.py | 35 +++++++++++++++++-- .../bigtable_admin_v2/proto/common.proto | 1 + .../bigtable_admin_v2/proto/common_pb2.py | 5 +-- .../bigtable_admin_v2/proto/instance.proto | 1 + .../bigtable_admin_v2/proto/instance_pb2.py | 8 +++-- .../cloud/bigtable_admin_v2/proto/table.proto | 1 + .../bigtable_admin_v2/proto/table_pb2.py | 14 ++++++-- .../cloud/bigtable_v2/proto/bigtable.proto | 1 + .../cloud/bigtable_v2/proto/bigtable_pb2.py | 18 ++++++++-- .../google/cloud/bigtable_v2/proto/data.proto | 1 + .../cloud/bigtable_v2/proto/data_pb2.py | 22 ++++++++++-- packages/google-cloud-bigtable/synth.metadata | 6 ++-- 15 files changed, 123 insertions(+), 17 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto index 8e05bfd0fbae..8b19b5582248 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -35,6 +35,7 @@ option java_multiple_files = true; option java_outer_classname = "BigtableInstanceAdminProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; +option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; // Service for creating, configuring, and deleting Cloud Bigtable Instances and // Clusters. Provides access to the Instance and Cluster schemas only, not the diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index dbe5a8fd81c3..bd4d621d66ec 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -33,9 +33,9 @@ name="google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=b"\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, - serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"l\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xbd\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', + serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"l\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xe2\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -1697,6 +1697,7 @@ "DESCRIPTOR": _CREATEINSTANCEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.CreateInstance. + Attributes: parent: Required. The unique name of the project in which to create @@ -1729,6 +1730,7 @@ "DESCRIPTOR": _GETINSTANCEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.GetInstance. + Attributes: name: Required. The unique name of the requested instance. Values @@ -1746,6 +1748,7 @@ "DESCRIPTOR": _LISTINSTANCESREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.ListInstances. + Attributes: parent: Required. The unique name of the project for which a list of @@ -1766,6 +1769,7 @@ "DESCRIPTOR": _LISTINSTANCESRESPONSE, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Response message for BigtableInstanceAdmin.ListInstances. + Attributes: instances: The list of requested instances. @@ -1792,6 +1796,7 @@ "DESCRIPTOR": _PARTIALUPDATEINSTANCEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.PartialUpdateInstance. + Attributes: instance: Required. The Instance which will (partially) replace the @@ -1812,6 +1817,7 @@ "DESCRIPTOR": _DELETEINSTANCEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.DeleteInstance. + Attributes: name: Required. The unique name of the instance to be deleted. @@ -1830,6 +1836,7 @@ "DESCRIPTOR": _CREATECLUSTERREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.CreateCluster. + Attributes: parent: Required. The unique name of the instance in which to create @@ -1855,6 +1862,7 @@ "DESCRIPTOR": _GETCLUSTERREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.GetCluster. + Attributes: name: Required. The unique name of the requested cluster. Values are @@ -1873,6 +1881,7 @@ "DESCRIPTOR": _LISTCLUSTERSREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.ListClusters. + Attributes: parent: Required. The unique name of the instance for which a list of @@ -1895,6 +1904,7 @@ "DESCRIPTOR": _LISTCLUSTERSRESPONSE, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Response message for BigtableInstanceAdmin.ListClusters. + Attributes: clusters: The list of requested clusters. @@ -1920,6 +1930,7 @@ "DESCRIPTOR": _DELETECLUSTERREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.DeleteCluster. + Attributes: name: Required. The unique name of the cluster to be deleted. Values @@ -1938,6 +1949,7 @@ "DESCRIPTOR": _CREATEINSTANCEMETADATA, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """The metadata for the Operation returned by CreateInstance. + Attributes: original_request: The request that prompted the initiation of this @@ -1960,6 +1972,7 @@ "DESCRIPTOR": _UPDATEINSTANCEMETADATA, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """The metadata for the Operation returned by UpdateInstance. + Attributes: original_request: The request that prompted the initiation of this @@ -1982,6 +1995,7 @@ "DESCRIPTOR": _CREATECLUSTERMETADATA, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """The metadata for the Operation returned by CreateCluster. + Attributes: original_request: The request that prompted the initiation of this CreateCluster @@ -2004,6 +2018,7 @@ "DESCRIPTOR": _UPDATECLUSTERMETADATA, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """The metadata for the Operation returned by UpdateCluster. + Attributes: original_request: The request that prompted the initiation of this UpdateCluster @@ -2026,6 +2041,7 @@ "DESCRIPTOR": _CREATEAPPPROFILEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.CreateAppProfile. + Attributes: parent: Required. The unique name of the instance in which to create @@ -2054,6 +2070,7 @@ "DESCRIPTOR": _GETAPPPROFILEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.GetAppProfile. + Attributes: name: Required. The unique name of the requested app profile. Values @@ -2072,6 +2089,7 @@ "DESCRIPTOR": _LISTAPPPROFILESREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.ListAppProfiles. + Attributes: parent: Required. The unique name of the instance for which a list of @@ -2102,6 +2120,7 @@ "DESCRIPTOR": _LISTAPPPROFILESRESPONSE, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Response message for BigtableInstanceAdmin.ListAppProfiles. + Attributes: app_profiles: The list of requested app profiles. @@ -2128,6 +2147,7 @@ "DESCRIPTOR": _UPDATEAPPPROFILEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.UpdateAppProfile. + Attributes: app_profile: Required. The app profile which will (partially) replace the @@ -2150,6 +2170,7 @@ "DESCRIPTOR": _DELETEAPPPROFILEREQUEST, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", "__doc__": """Request message for BigtableInstanceAdmin.DeleteAppProfile. + Attributes: name: Required. The unique name of the app profile to be deleted. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index 119ef73a4db9..6f434a473557 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -36,6 +36,7 @@ option java_multiple_files = true; option java_outer_classname = "BigtableTableAdminProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; +option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; // Service for creating, configuring, and deleting Cloud Bigtable tables. // diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 7c3317ab09a4..aef2bfdcdb32 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -37,9 +37,9 @@ name="google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=b"\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, - serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x13RestoreTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x10\n\x06\x62\x61\x63kup\x18\x03 \x01(\tH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress2\xc8$\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\xda\x41\x17parent,backup_id,backup\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\x9c\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"Q\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xba\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', + serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x13RestoreTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x10\n\x06\x62\x61\x63kup\x18\x03 \x01(\tH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress2\xc8$\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\xda\x41\x17parent,backup_id,backup\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\x9c\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"Q\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xdf\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -2440,6 +2440,7 @@ "DESCRIPTOR": _CREATETABLEREQUEST_SPLIT, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """An initial split point for a newly created table. + Attributes: key: Row key to use as an initial tablet boundary. @@ -2451,6 +2452,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat eTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + Attributes: parent: Required. The unique name of the instance in which to create @@ -2496,6 +2498,7 @@ Cloud Bigtable customers. This feature might be changed in backward- incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: parent: Required. The unique name of the instance in which to create @@ -2524,6 +2527,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropR owRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + Attributes: name: Required. The unique name of the table on which to drop a @@ -2551,6 +2555,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListT ables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + Attributes: parent: Required. The unique name of the instance for which tables @@ -2583,6 +2588,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List Tables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + Attributes: tables: The tables present in the requested instance. @@ -2604,6 +2610,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTa ble][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + Attributes: name: Required. The unique name of the requested table. Values are @@ -2626,6 +2633,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet eTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + Attributes: name: Required. The unique name of the table to be deleted. Values @@ -2648,6 +2656,7 @@ "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """A create, update, or delete of a particular column family. + Attributes: id: The ID of the column family to be modified. @@ -2671,6 +2680,7 @@ "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Modif yColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyCol umnFamilies] + Attributes: name: Required. The unique name of the table whose families should @@ -2698,6 +2708,7 @@ "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Gener ateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gener ateConsistencyToken] + Attributes: name: Required. The unique name of the Table for which to create a @@ -2718,6 +2729,7 @@ "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Gene rateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gene rateConsistencyToken] + Attributes: consistency_token: The generated consistency token. @@ -2736,6 +2748,7 @@ "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Check Consistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsiste ncy] + Attributes: name: Required. The unique name of the Table for which to check @@ -2759,6 +2772,7 @@ "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Chec kConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsist ency] + Attributes: consistent: True only if the token is consistent. A token is consistent if @@ -2783,6 +2797,7 @@ customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: name: Required. The unique name of the table to have the snapshot @@ -2825,6 +2840,7 @@ customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: name: Required. The unique name of the requested snapshot. Values @@ -2849,6 +2865,7 @@ customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: parent: Required. The unique name of the cluster for which snapshots @@ -2880,6 +2897,7 @@ customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: snapshots: The snapshots present in the requested cluster. @@ -2906,6 +2924,7 @@ customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: name: Required. The unique name of the snapshot to be deleted. @@ -2929,6 +2948,7 @@ feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: original_request: The request that prompted the initiation of this SnapshotTable @@ -2956,6 +2976,7 @@ customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: original_request: The request that prompted the initiation of this @@ -2979,6 +3000,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableA dmin.CreateBackup]. + Attributes: parent: Required. This must be one of the clusters in the instance in @@ -3009,6 +3031,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Metadata type for the operation returned by [CreateBackup][google.bigt able.admin.v2.BigtableTableAdmin.CreateBackup]. + Attributes: name: The name of the backup being created. @@ -3033,6 +3056,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + Attributes: name: Required. Name of the backup. Values are of the form ``project @@ -3052,6 +3076,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableA dmin.UpdateBackup]. + Attributes: backup: Required. The backup to update. ``backup.name``, and the @@ -3079,6 +3104,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableA dmin.DeleteBackup]. + Attributes: name: Required. Name of the backup to delete. Values are of the form @@ -3098,6 +3124,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAd min.ListBackups]. + Attributes: parent: Required. The cluster to list backups from. Values are of the @@ -3170,6 +3197,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """The response for [ListBackups][google.bigtable.admin.v2.BigtableTableA dmin.ListBackups]. + Attributes: backups: The list of matching backups. @@ -3191,6 +3219,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA dmin.RestoreTable]. + Attributes: parent: Required. The name of the instance in which to create the @@ -3222,6 +3251,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", "__doc__": """Metadata type for the long-running operation returned by [RestoreTable ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + Attributes: name: Name of the table being created and restored to. @@ -3261,6 +3291,7 @@ progress of optimizations performed on a newly restored table. This long-running operation is automatically created by the system after the successful completion of a table restore, and cannot be cancelled. + Attributes: name: Name of the restored table being optimized. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto index 89d24ea97112..17c69d469a0c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto @@ -24,6 +24,7 @@ option java_multiple_files = true; option java_outer_classname = "CommonProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; +option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; // Storage media types for persisting Bigtable data. enum StorageType { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index f0f5ff399e04..dd668ef3cb2b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -20,9 +20,9 @@ name="google/cloud/bigtable_admin_v2/proto/common.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=b"\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, - serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xae\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', + serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xd3\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,], ) @@ -167,6 +167,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.common_pb2", "__doc__": """Encapsulates progress related information for a Cloud Bigtable long running operation. + Attributes: progress_percent: Percent completion of the operation. Values are between 0 and diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto index e15f63ac0309..2086f9707c8b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto @@ -27,6 +27,7 @@ option java_multiple_files = true; option java_outer_classname = "InstanceProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; +option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; // A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and // the resources that serve them. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index 58d5a036cd2e..7c6e05fa5152 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -23,9 +23,9 @@ name="google/cloud/bigtable_admin_v2/proto/instance.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=b"\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, - serialized_pb=b'\n3google/cloud/bigtable_admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto"\xdd\x03\n\x08Instance\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02:N\xea\x41K\n bigtable.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\xa7\x03\n\x07\x43luster\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x08location\x18\x02 \x01(\tB&\xfa\x41#\n!locations.googleapis.com/Location\x12;\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.StateB\x03\xe0\x41\x03\x12\x18\n\x0bserve_nodes\x18\x04 \x01(\x05\x42\x03\xe0\x41\x02\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04:`\xea\x41]\n\x1f\x62igtable.googleapis.com/Cluster\x12:projects/{project}/instances/{instance}/clusters/{cluster}"\xee\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08:j\xea\x41g\n"bigtable.googleapis.com/AppProfile\x12\x41projects/{project}/instances/{instance}/appProfiles/{app_profile}B\x10\n\x0erouting_policyB\xb0\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', + serialized_pb=b'\n3google/cloud/bigtable_admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto"\xdd\x03\n\x08Instance\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02:N\xea\x41K\n bigtable.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\xa7\x03\n\x07\x43luster\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x08location\x18\x02 \x01(\tB&\xfa\x41#\n!locations.googleapis.com/Location\x12;\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.StateB\x03\xe0\x41\x03\x12\x18\n\x0bserve_nodes\x18\x04 \x01(\x05\x42\x03\xe0\x41\x02\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04:`\xea\x41]\n\x1f\x62igtable.googleapis.com/Cluster\x12:projects/{project}/instances/{instance}/clusters/{cluster}"\xee\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08:j\xea\x41g\n"bigtable.googleapis.com/AppProfile\x12\x41projects/{project}/instances/{instance}/appProfiles/{app_profile}B\x10\n\x0erouting_policyB\xd5\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', dependencies=[ google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -724,6 +724,7 @@ "__doc__": """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and the resources that serve them. All tables in an instance are served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. + Attributes: name: The unique name of the instance. Values are of the form @@ -764,6 +765,7 @@ "__doc__": """A resizable group of nodes in a particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. + Attributes: name: The unique name of the cluster. Values are of the form ``proje @@ -817,6 +819,7 @@ "__doc__": """Unconditionally routes all read/write requests to a specific cluster. This option preserves read-your-writes consistency but does not improve availability. + Attributes: cluster_id: The cluster to which read/write requests should be routed. @@ -833,6 +836,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", "__doc__": """A configuration object describing how Cloud Bigtable should treat traffic from a particular end user application. + Attributes: name: (\ ``OutputOnly``) The unique name of the app profile. Values diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto index 535378989124..e85ca8ca9745 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto @@ -27,6 +27,7 @@ option java_multiple_files = true; option java_outer_classname = "TableProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; +option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; // Indicates the type of the restore source. enum RestoreSourceType { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index a52b2c29796b..bd0f478fa09c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -23,9 +23,9 @@ name="google/cloud/bigtable_admin_v2/proto/table.proto", package="google.bigtable.admin.v2", syntax="proto3", - serialized_options=b"\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2", + serialized_options=b'\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, - serialized_pb=b'\n0google/cloud/bigtable_admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x9b\x01\n\x0bRestoreInfo\x12@\n\x0bsource_type\x18\x01 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x42\r\n\x0bsource_info"\xfb\x07\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x12;\n\x0crestore_info\x18\x06 \x01(\x0b\x32%.google.bigtable.admin.v2.RestoreInfo\x1a\xf9\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"\x8e\x01\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x12\x14\n\x10READY_OPTIMIZING\x10\x05\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04:Z\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xc7\x03\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02:v\xea\x41s\n bigtable.googleapis.com/Snapshot\x12Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}"\xd7\x03\n\x06\x42\x61\x63kup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x1c\n\x0csource_table\x18\x02 \x01(\tB\x06\xe0\x41\x05\xe0\x41\x02\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.bigtable.admin.v2.Backup.StateB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:p\xea\x41m\n\x1e\x62igtable.googleapis.com/Backup\x12Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}"\xa4\x01\n\nBackupInfo\x12\x13\n\x06\x62\x61\x63kup\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0csource_table\x18\x04 \x01(\tB\x03\xe0\x41\x03*D\n\x11RestoreSourceType\x12#\n\x1fRESTORE_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x42\x41\x43KUP\x10\x01\x42\xad\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2b\x06proto3', + serialized_pb=b'\n0google/cloud/bigtable_admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x9b\x01\n\x0bRestoreInfo\x12@\n\x0bsource_type\x18\x01 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x42\r\n\x0bsource_info"\xfb\x07\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x12;\n\x0crestore_info\x18\x06 \x01(\x0b\x32%.google.bigtable.admin.v2.RestoreInfo\x1a\xf9\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"\x8e\x01\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x12\x14\n\x10READY_OPTIMIZING\x10\x05\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04:Z\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xc7\x03\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02:v\xea\x41s\n bigtable.googleapis.com/Snapshot\x12Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}"\xd7\x03\n\x06\x42\x61\x63kup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x1c\n\x0csource_table\x18\x02 \x01(\tB\x06\xe0\x41\x05\xe0\x41\x02\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.bigtable.admin.v2.Backup.StateB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:p\xea\x41m\n\x1e\x62igtable.googleapis.com/Backup\x12Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}"\xa4\x01\n\nBackupInfo\x12\x13\n\x06\x62\x61\x63kup\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0csource_table\x18\x04 \x01(\tB\x03\xe0\x41\x03*D\n\x11RestoreSourceType\x12#\n\x1fRESTORE_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x42\x41\x43KUP\x10\x01\x42\xd2\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', dependencies=[ google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_api_dot_resource__pb2.DESCRIPTOR, @@ -1367,6 +1367,7 @@ "DESCRIPTOR": _RESTOREINFO, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """Information about a table restore. + Attributes: source_type: The type of the restore source. @@ -1392,6 +1393,7 @@ "DESCRIPTOR": _TABLE_CLUSTERSTATE, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """The state of a table’s data in a particular cluster. + Attributes: replication_state: Output only. The state of replication for the table in this @@ -1422,6 +1424,7 @@ "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """A collection of user data indexed by row, column, and timestamp. Each table is served using the resources of its parent cluster. + Attributes: name: Output only. The unique name of the table. Values are of the @@ -1465,6 +1468,7 @@ "DESCRIPTOR": _COLUMNFAMILY, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """A set of columns within a table which share a common configuration. + Attributes: gc_rule: Garbage collection rule specified as a protobuf. Must @@ -1489,6 +1493,7 @@ "DESCRIPTOR": _GCRULE_INTERSECTION, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """A GcRule which deletes cells matching all of the given rules. + Attributes: rules: Only delete cells which would be deleted by every element of @@ -1504,6 +1509,7 @@ "DESCRIPTOR": _GCRULE_UNION, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """A GcRule which deletes cells matching any of the given rules. + Attributes: rules: Delete cells which would be deleted by any element of @@ -1515,6 +1521,7 @@ "DESCRIPTOR": _GCRULE, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """Rule for determining which cells to delete during garbage collection. + Attributes: rule: Garbage collection rules. @@ -1549,6 +1556,7 @@ customers. This feature might be changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. + Attributes: name: Output only. The unique name of the snapshot. Values are of @@ -1586,6 +1594,7 @@ "DESCRIPTOR": _BACKUP, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """A backup of a Cloud Bigtable table. + Attributes: name: Output only. A globally unique identifier for the backup which @@ -1634,6 +1643,7 @@ "DESCRIPTOR": _BACKUPINFO, "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", "__doc__": """Information about a backup. + Attributes: backup: Output only. Name of the backup. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto index c54225ed3fd9..32aaba21d05e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto @@ -30,6 +30,7 @@ option java_multiple_files = true; option java_outer_classname = "BigtableProto"; option java_package = "com.google.bigtable.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\V2"; +option ruby_package = "Google::Cloud::Bigtable::V2"; option (google.api.resource_definition) = { type: "bigtable.googleapis.com/Table" pattern: "projects/{project}/instances/{instance}/tables/{table}" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index 9da778ae74e5..b0f13cbba51e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -27,9 +27,9 @@ name="google/cloud/bigtable_v2/proto/bigtable.proto", package="google.bigtable.v2", syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", + serialized_options=b"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x01\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3', + serialized_pb=b'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\x93\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_api_dot_client__pb2.DESCRIPTOR, @@ -1295,6 +1295,7 @@ "DESCRIPTOR": _READROWSREQUEST, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Request message for Bigtable.ReadRows. + Attributes: table_name: Required. The unique name of the table from which to read. @@ -1330,6 +1331,7 @@ "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Specifies a piece of a row’s contents returned as part of the read response stream. + Attributes: row_key: The row key for this chunk of data. If the row key is empty, @@ -1388,6 +1390,7 @@ "DESCRIPTOR": _READROWSRESPONSE, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Response message for Bigtable.ReadRows. + Attributes: chunks: A collection of a row’s contents as part of the read request. @@ -1413,6 +1416,7 @@ "DESCRIPTOR": _SAMPLEROWKEYSREQUEST, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Request message for Bigtable.SampleRowKeys. + Attributes: table_name: Required. The unique name of the table from which to sample @@ -1434,6 +1438,7 @@ "DESCRIPTOR": _SAMPLEROWKEYSRESPONSE, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Response message for Bigtable.SampleRowKeys. + Attributes: row_key: Sorted streamed sequence of sample row keys in the table. The @@ -1462,6 +1467,7 @@ "DESCRIPTOR": _MUTATEROWREQUEST, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Request message for Bigtable.MutateRow. + Attributes: table_name: Required. The unique name of the table to which the mutation @@ -1507,6 +1513,7 @@ "DESCRIPTOR": _MUTATEROWSREQUEST_ENTRY, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """A mutation for a given row. + Attributes: row_key: The key of the row to which the ``mutations`` should be @@ -1523,6 +1530,7 @@ "DESCRIPTOR": _MUTATEROWSREQUEST, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Request message for BigtableService.MutateRows. + Attributes: table_name: Required. The unique name of the table to which the mutations @@ -1555,6 +1563,7 @@ "DESCRIPTOR": _MUTATEROWSRESPONSE_ENTRY, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """The result of applying a passed mutation in the original request. + Attributes: index: The index into the original request’s ``entries`` list of the @@ -1572,6 +1581,7 @@ "DESCRIPTOR": _MUTATEROWSRESPONSE, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Response message for BigtableService.MutateRows. + Attributes: entries: One or more results for Entries from the batch request. @@ -1589,6 +1599,7 @@ "DESCRIPTOR": _CHECKANDMUTATEROWREQUEST, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Request message for Bigtable.CheckAndMutateRow. + Attributes: table_name: Required. The unique name of the table to which the @@ -1632,6 +1643,7 @@ "DESCRIPTOR": _CHECKANDMUTATEROWRESPONSE, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Response message for Bigtable.CheckAndMutateRow. + Attributes: predicate_matched: Whether or not the request’s ``predicate_filter`` yielded any @@ -1649,6 +1661,7 @@ "DESCRIPTOR": _READMODIFYWRITEROWREQUEST, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Request message for Bigtable.ReadModifyWriteRow. + Attributes: table_name: Required. The unique name of the table to which the @@ -1679,6 +1692,7 @@ "DESCRIPTOR": _READMODIFYWRITEROWRESPONSE, "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", "__doc__": """Response message for Bigtable.ReadModifyWriteRow. + Attributes: row: A Row containing the new contents of all cells modified by the diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto index 8fd0c15cb3e3..2cc916454b81 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto @@ -23,6 +23,7 @@ option java_multiple_files = true; option java_outer_classname = "DataProto"; option java_package = "com.google.bigtable.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\V2"; +option ruby_package = "Google::Cloud::Bigtable::V2"; // Specifies the complete (requested) contents of a single row of a table. // Rows which exceed 256MiB in size cannot be read in full. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index 419e147e4fd9..aa6f6737b9d0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -16,9 +16,9 @@ name="google/cloud/bigtable_v2/proto/data.proto", package="google.bigtable.v2", syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2", + serialized_options=b"\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2", create_key=_descriptor._internal_create_key, - serialized_pb=b'\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\x97\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2b\x06proto3', + serialized_pb=b'\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3', ) @@ -2031,6 +2031,7 @@ "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies the complete (requested) contents of a single row of a table. Rows which exceed 256MiB in size cannot be read in full. + Attributes: key: The unique key which identifies this row within its table. @@ -2054,6 +2055,7 @@ "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies (some of) the contents of a single row/column family intersection of a table. + Attributes: name: The unique key which identifies this family within its row. @@ -2079,6 +2081,7 @@ "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies (some of) the contents of a single row/column intersection of a table. + Attributes: qualifier: The unique key which identifies this column within its family. @@ -2103,6 +2106,7 @@ "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies (some of) the contents of a single row/column/timestamp of a table. + Attributes: timestamp_micros: The cell’s stored timestamp, which also uniquely identifies it @@ -2131,6 +2135,7 @@ "DESCRIPTOR": _ROWRANGE, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies a contiguous range of rows. + Attributes: start_key: The row key at which to start the range. If neither field is @@ -2159,6 +2164,7 @@ "DESCRIPTOR": _ROWSET, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies a non-contiguous set of rows. + Attributes: row_keys: Single rows included in the set. @@ -2180,6 +2186,7 @@ The range spans from : to :, where both bounds can be either inclusive or exclusive. + Attributes: family_name: The name of the column family within which this range falls. @@ -2212,6 +2219,7 @@ "DESCRIPTOR": _TIMESTAMPRANGE, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specified a contiguous range of microsecond timestamps. + Attributes: start_timestamp_micros: Inclusive lower bound. If left empty, interpreted as 0. @@ -2230,6 +2238,7 @@ "DESCRIPTOR": _VALUERANGE, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies a contiguous range of raw byte values. + Attributes: start_value: The value at which to start the range. If neither field is @@ -2262,6 +2271,7 @@ "DESCRIPTOR": _ROWFILTER_CHAIN, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """A RowFilter which sends rows through several RowFilters in sequence. + Attributes: filters: The elements of “filters” are chained together to process the @@ -2279,6 +2289,7 @@ "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """A RowFilter which sends each row to each of several component RowFilters and interleaves the results. + Attributes: filters: The elements of “filters” all process a copy of the input row, @@ -2321,6 +2332,7 @@ inconsistent or unexpected results. Additionally, Condition filters have poor performance, especially when filters are set for the false condition. + Attributes: predicate_filter: If ``predicate_filter`` outputs any cells, then @@ -2370,6 +2382,7 @@ RowFilter message must not exceed 4096 bytes, and RowFilters may not be nested within each other (in Chains or Interleaves) to a depth of more than 20. + Attributes: filter: Which of the possible RowFilter types to apply. If none are @@ -2520,6 +2533,7 @@ "DESCRIPTOR": _MUTATION_SETCELL, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """A Mutation which sets the value of the specified cell. + Attributes: family_name: The name of the family into which new data should be written. @@ -2548,6 +2562,7 @@ "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """A Mutation which deletes cells from the specified column, optionally restricting the deletions to a given timestamp range. + Attributes: family_name: The name of the family from which cells should be deleted. @@ -2568,6 +2583,7 @@ "DESCRIPTOR": _MUTATION_DELETEFROMFAMILY, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """A Mutation which deletes all cells from the specified column family. + Attributes: family_name: The name of the family from which cells should be deleted. @@ -2589,6 +2605,7 @@ "DESCRIPTOR": _MUTATION, "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies a particular change to be made to the contents of a row. + Attributes: mutation: Which of the possible Mutation types to apply. @@ -2618,6 +2635,7 @@ "__module__": "google.cloud.bigtable_v2.proto.data_pb2", "__doc__": """Specifies an atomic read/modify/write operation on the latest value of the specified column. + Attributes: family_name: The name of the family to which the read/modify/write should diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 1e2f874a94f0..b1b1ff1cbe73 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "1ac60be05521b69c924118d40f88e07728a2f75e" + "sha": "c38888de3d0b1c49c438a7d350f42bc1805809f2" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "3a4894c4f0da3e763aca2c67bd280ae915177450", - "internalRef": "314363155" + "sha": "184661793fbe3b89f2b485c303e7466cef9d21a1", + "internalRef": "316182409" } }, { From 94fcf0ed136a52560a1a32e7f66350dfd7d357fb Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 19 Jun 2020 10:30:52 -0700 Subject: [PATCH 313/892] fix(python): change autodoc_default_flags to autodoc_default_options (#58) Source-Author: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Source-Date: Thu Jun 18 22:50:02 2020 +0530 Source-Repo: googleapis/synthtool Source-Sha: cd522c3b4dde821766d95c80ae5aeb43d7a41170 Source-Link: https://github.com/googleapis/synthtool/commit/cd522c3b4dde821766d95c80ae5aeb43d7a41170 --- packages/google-cloud-bigtable/docs/conf.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 9249013859cc..a33e54fc2490 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -43,7 +43,7 @@ # autodoc/autosummary flags autoclass_content = "both" -autodoc_default_flags = ["members"] +autodoc_default_options = {"members": True} autosummary_generate = True diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index b1b1ff1cbe73..9c90f521370e 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "c38888de3d0b1c49c438a7d350f42bc1805809f2" + "sha": "d257a9375df90a1e1d9f2dc0a7a6ecc19896b021" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f13864cd532f98a4682cec48105580fa9a5c9978" + "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f13864cd532f98a4682cec48105580fa9a5c9978" + "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170" } } ], From 3a60dd53b8dd407fddf1d6807bbb3001d7b330f9 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 24 Jun 2020 19:19:17 -0700 Subject: [PATCH 314/892] chore: update generated _pb2 files (#59) * chore: update grpc to v1.30.0 PiperOrigin-RevId: 317949519 Source-Author: Google APIs Source-Date: Tue Jun 23 15:22:22 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 7157f9552747421572cf1ab3aec1105c05ebd4f9 Source-Link: https://github.com/googleapis/googleapis/commit/7157f9552747421572cf1ab3aec1105c05ebd4f9 --- .../proto/bigtable_instance_admin_pb2.py | 2 +- .../proto/bigtable_instance_admin_pb2_grpc.py | 588 ++++++++++++- .../proto/bigtable_table_admin_pb2.py | 2 +- .../proto/bigtable_table_admin_pb2_grpc.py | 781 ++++++++++++++++-- .../bigtable_admin_v2/proto/common_pb2.py | 2 +- .../proto/common_pb2_grpc.py | 1 + .../bigtable_admin_v2/proto/instance_pb2.py | 2 +- .../proto/instance_pb2_grpc.py | 1 + .../bigtable_admin_v2/proto/table_pb2.py | 2 +- .../bigtable_admin_v2/proto/table_pb2_grpc.py | 1 + .../cloud/bigtable_v2/proto/bigtable_pb2.py | 2 +- .../bigtable_v2/proto/bigtable_pb2_grpc.py | 219 ++++- .../cloud/bigtable_v2/proto/data_pb2.py | 2 +- .../cloud/bigtable_v2/proto/data_pb2_grpc.py | 1 + packages/google-cloud-bigtable/synth.metadata | 6 +- 15 files changed, 1456 insertions(+), 156 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index bd4d621d66ec..63590907a22c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py index 0580b1871335..8b1395579e5c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -1,4 +1,5 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" import grpc from google.cloud.bigtable_admin_v2.proto import ( @@ -17,16 +18,16 @@ class BigtableInstanceAdminStub(object): """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ def __init__(self, channel): """Constructor. - Args: - channel: A grpc.Channel. - """ + Args: + channel: A grpc.Channel. + """ self.CreateInstance = channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, @@ -126,144 +127,144 @@ def __init__(self, channel): class BigtableInstanceAdminServicer(object): """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ def CreateInstance(self, request, context): """Create an instance within a project. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetInstance(self, request, context): """Gets information about an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListInstances(self, request, context): """Lists information about instances in a project. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateInstance(self, request, context): """Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - """ + name and type for an Instance. To update other Instance properties, such as + labels, use PartialUpdateInstance. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def PartialUpdateInstance(self, request, context): """Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - """ + fields of an Instance and is the preferred way to update an Instance. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteInstance(self, request, context): """Delete an instance from a project. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateCluster(self, request, context): """Creates a cluster within an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetCluster(self, request, context): """Gets information about a cluster. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListClusters(self, request, context): """Lists information about clusters in an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateCluster(self, request, context): """Updates a cluster within an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteCluster(self, request, context): """Deletes a cluster from an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateAppProfile(self, request, context): """Creates an app profile within an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetAppProfile(self, request, context): """Gets information about an app profile. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListAppProfiles(self, request, context): """Lists information about app profiles in an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateAppProfile(self, request, context): """Updates an app profile within an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteAppProfile(self, request, context): """Deletes an app profile from an instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetIamPolicy(self, request, context): """Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - """ + policy if an instance exists but does not have a policy set. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def SetIamPolicy(self, request, context): """Sets the access control policy on an instance resource. Replaces any - existing policy. - """ + existing policy. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def TestIamPermissions(self, request, context): """Returns permissions that the caller has on the specified instance resource. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -371,3 +372,524 @@ def add_BigtableInstanceAdminServicer_to_server(servicer, server): "google.bigtable.admin.v2.BigtableInstanceAdmin", rpc_method_handlers ) server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. +class BigtableInstanceAdmin(object): + """Service for creating, configuring, and deleting Cloud Bigtable Instances and + Clusters. Provides access to the Instance and Cluster schemas only, not the + tables' metadata or data stored in those tables. + """ + + @staticmethod + def CreateInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListInstances( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def PartialUpdateInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteInstance( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListClusters( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteCluster( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListAppProfiles( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteAppProfile( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def TestIamPermissions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index aef2bfdcdb32..5ca167d87877 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 54d6ac9cc5ec..2b8d46e20478 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -1,4 +1,5 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" import grpc from google.cloud.bigtable_admin_v2.proto import ( @@ -19,16 +20,16 @@ class BigtableTableAdminStub(object): """Service for creating, configuring, and deleting Cloud Bigtable tables. - Provides access to the table schemas only, not the data stored within - the tables. - """ + Provides access to the table schemas only, not the data stored within + the tables. + """ def __init__(self, channel): """Constructor. - Args: - channel: A grpc.Channel. - """ + Args: + channel: A grpc.Channel. + """ self.CreateTable = channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, @@ -145,102 +146,102 @@ class BigtableTableAdminServicer(object): """Service for creating, configuring, and deleting Cloud Bigtable tables. - Provides access to the table schemas only, not the data stored within - the tables. - """ + Provides access to the table schemas only, not the data stored within + the tables. + """ def CreateTable(self, request, context): """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ + The table can be created with a full set of initial column families, + specified in the request. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateTableFromSnapshot(self, request, context): """Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. + not exist. The snapshot and the table must be in the same instance. - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListTables(self, request, context): """Lists all tables served from a specified instance. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetTable(self, request, context): """Gets metadata information about the specified table. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteTable(self, request, context): """Permanently deletes a specified table and all of its data. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ModifyColumnFamilies(self, request, context): """Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - """ + Either all or none of the modifications will occur before this method + returns, but data requests received prior to that point may see a table + where only some modifications have taken effect. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DropRowRange(self, request, context): """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ + specify whether to delete all rows in a table, or only those that match a + particular prefix. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GenerateConsistencyToken(self, request, context): """Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - """ + CheckConsistency to check whether mutations to the table that finished + before this call started have been replicated. The tokens will be available + for 90 days. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CheckConsistency(self, request, context): """Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - """ + replication has caught up based on the conditions specified in the token + and the check request. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def SnapshotTable(self, request, context): """Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. + source table. The cluster and the table must be in the same instance. - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -248,12 +249,12 @@ def SnapshotTable(self, request, context): def GetSnapshot(self, request, context): """Gets metadata information about the specified snapshot. - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -261,12 +262,12 @@ def GetSnapshot(self, request, context): def ListSnapshots(self, request, context): """Lists all snapshots associated with the specified cluster. - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -274,93 +275,93 @@ def ListSnapshots(self, request, context): def DeleteSnapshot(self, request, context): """Permanently deletes the specified snapshot. - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ + Note: This is a private alpha release of Cloud Bigtable snapshots. This + feature is not currently available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any SLA or deprecation + policy. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateBackup(self, request, context): """Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be used to - track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The - [response][google.longrunning.Operation.response] field type is - [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - returned operation will stop the creation and delete the backup. - """ + [long-running operation][google.longrunning.Operation] can be used to + track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The + [response][google.longrunning.Operation.response] field type is + [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the + returned operation will stop the creation and delete the backup. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetBackup(self, request, context): """Gets metadata on a pending or completed Cloud Bigtable Backup. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateBackup(self, request, context): """Updates a pending or completed Cloud Bigtable Backup. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteBackup(self, request, context): """Deletes a pending or completed Cloud Bigtable backup. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListBackups(self, request, context): """Lists Cloud Bigtable backups. Returns both completed and pending - backups. - """ + backups. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def RestoreTable(self, request, context): """Create a new table by restoring from a completed backup. The new table - must be in the same instance as the instance containing the backup. The - returned table [long-running operation][google.longrunning.Operation] can - be used to track the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The - [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - """ + must be in the same instance as the instance containing the backup. The + returned table [long-running operation][google.longrunning.Operation] can + be used to track the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetIamPolicy(self, request, context): """Gets the access control policy for a resource. - Returns an empty policy if the resource exists but does not have a policy - set. - """ + Returns an empty policy if the resource exists but does not have a policy + set. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def SetIamPolicy(self, request, context): """Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - """ + Replaces any existing policy. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def TestIamPermissions(self, request, context): """Returns permissions that the caller has on the specified table resource. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -483,3 +484,607 @@ def add_BigtableTableAdminServicer_to_server(servicer, server): "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers ) server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. +class BigtableTableAdmin(object): + """Service for creating, configuring, and deleting Cloud Bigtable tables. + + + Provides access to the table schemas only, not the data stored within + the tables. + """ + + @staticmethod + def CreateTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateTableFromSnapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListTables( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ModifyColumnFamilies( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DropRowRange( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GenerateConsistencyToken( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CheckConsistency( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SnapshotTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetSnapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListSnapshots( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteSnapshot( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CreateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def UpdateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def DeleteBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ListBackups( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def RestoreTable( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def GetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def TestIamPermissions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index dd668ef3cb2b..09233cff5a02 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/common.proto - +"""Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py index 07cb78fe03a9..8a9393943bdf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py @@ -1,2 +1,3 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" import grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index 7c6e05fa5152..e0138e0fb0f7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/instance.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py index 07cb78fe03a9..8a9393943bdf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py @@ -1,2 +1,3 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" import grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index bd0f478fa09c..67238a81e909 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/table.proto - +"""Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py index 07cb78fe03a9..8a9393943bdf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py @@ -1,2 +1,3 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" import grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index b0f13cbba51e..ba711b20ca71 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/bigtable.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py index 4dd6cded9bc4..db4ee99f3554 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py @@ -1,4 +1,5 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" import grpc from google.cloud.bigtable_v2.proto import ( @@ -8,14 +9,14 @@ class BigtableStub(object): """Service for reading from and writing to existing Bigtable tables. - """ + """ def __init__(self, channel): """Constructor. - Args: - channel: A grpc.Channel. - """ + Args: + channel: A grpc.Channel. + """ self.ReadRows = channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadRows", request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, @@ -50,60 +51,60 @@ def __init__(self, channel): class BigtableServicer(object): """Service for reading from and writing to existing Bigtable tables. - """ + """ def ReadRows(self, request, context): """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def SampleRowKeys(self, request, context): """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def MutateRow(self, request, context): """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ + unchanged unless explicitly changed by `mutation`. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def MutateRows(self, request, context): """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CheckAndMutateRow(self, request, context): """Mutates a row atomically based on the output of a predicate Reader filter. - """ + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ReadModifyWriteRow(self, request, context): """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -146,3 +147,171 @@ def add_BigtableServicer_to_server(servicer, server): "google.bigtable.v2.Bigtable", rpc_method_handlers ) server.add_generic_rpc_handlers((generic_handler,)) + + +# This class is part of an EXPERIMENTAL API. +class Bigtable(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + @staticmethod + def ReadRows( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.bigtable.v2.Bigtable/ReadRows", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def SampleRowKeys( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.bigtable.v2.Bigtable/SampleRowKeys", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def MutateRow( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.v2.Bigtable/MutateRow", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def MutateRows( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.bigtable.v2.Bigtable/MutateRows", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def CheckAndMutateRow( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) + + @staticmethod + def ReadModifyWriteRow( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + options, + channel_credentials, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index aa6f6737b9d0..a64f9b10e2df 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/data.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py index 07cb78fe03a9..8a9393943bdf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py @@ -1,2 +1,3 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" import grpc diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 9c90f521370e..cd653f163a6f 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "d257a9375df90a1e1d9f2dc0a7a6ecc19896b021" + "sha": "5c1d61827618d254c453b3871c0022a8d35bfbb2" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "184661793fbe3b89f2b485c303e7466cef9d21a1", - "internalRef": "316182409" + "sha": "b882b8e6bfcd708042ff00f7adc67ce750817dd0", + "internalRef": "318028816" } }, { From 00e3f9bed860b493e6e405fef407b570a4d730b1 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Fri, 26 Jun 2020 07:33:54 -0700 Subject: [PATCH 315/892] chore: use GOOGLE_CLOUD_PROJECT and update reads/filters region tags (#60) * chore: use GOOGLE_CLOUD_PROJECT and update region tags * chore: add newlines to appease lint * fix: remove dulicate env var --- .../samples/hello/main_test.py | 2 +- .../samples/hello/noxfile.py | 3 +- .../samples/hello_happybase/main_test.py | 2 +- .../samples/hello_happybase/noxfile.py | 3 +- .../samples/instanceadmin/noxfile.py | 3 +- .../samples/metricscaler/metricscaler.py | 2 +- .../samples/metricscaler/metricscaler_test.py | 4 +- .../samples/metricscaler/noxfile.py | 3 +- .../samples/quickstart/main_test.py | 2 +- .../samples/quickstart/noxfile.py | 3 +- .../samples/quickstart_happybase/main_test.py | 2 +- .../samples/quickstart_happybase/noxfile.py | 3 +- .../snippets/filters/filter_snippets.py | 80 ++----------------- .../samples/snippets/filters/filters_test.py | 2 +- .../samples/snippets/filters/noxfile.py | 3 +- .../samples/snippets/reads/noxfile.py | 3 +- .../samples/snippets/reads/read_snippets.py | 50 ++---------- .../samples/snippets/reads/reads_test.py | 2 +- .../samples/snippets/writes/noxfile.py | 3 +- .../samples/snippets/writes/writes_test.py | 2 +- .../samples/tableadmin/noxfile.py | 3 +- .../samples/tableadmin/tableadmin_test.py | 2 +- 22 files changed, 32 insertions(+), 150 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/hello/main_test.py b/packages/google-cloud-bigtable/samples/hello/main_test.py index 75fe4ff24e7c..49b8098fcd7e 100644 --- a/packages/google-cloud-bigtable/samples/hello/main_test.py +++ b/packages/google-cloud-bigtable/samples/hello/main_test.py @@ -17,7 +17,7 @@ from main import main -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_NAME_FORMAT = 'hello-world-test-{}' TABLE_NAME_RANGE = 10000 diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py index d1dfc65c29dd..f72fc0b2e52b 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py @@ -17,7 +17,7 @@ from main import main -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_NAME_FORMAT = 'hello-world-hb-test-{}' TABLE_NAME_RANGE = 10000 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py index 3bfacd4ea81a..1957a81f1aa9 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -24,7 +24,7 @@ from google.cloud.bigtable import enums from google.cloud.monitoring_v3 import query -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] def get_cpu_load(): diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 6cd70cbffabc..1d4a3a3d11ec 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -21,6 +21,7 @@ from google.cloud import bigtable from google.cloud.bigtable import enums from mock import patch + import pytest from metricscaler import get_cpu_load @@ -28,7 +29,8 @@ from metricscaler import main from metricscaler import scale_bigtable -PROJECT = os.environ['GCLOUD_PROJECT'] + +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_ZONE = os.environ['BIGTABLE_ZONE'] SIZE_CHANGE_STEP = 3 INSTANCE_ID_FORMAT = 'metric-scale-test-{}' diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_test.py index a61e5dbe8795..55c06f413df9 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart/main_test.py @@ -21,7 +21,7 @@ from main import main -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_FORMAT = 'quickstart-test-{}' TABLE_ID_RANGE = 10000 diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py index 771026157f65..5d4ae1e7acd7 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py @@ -21,7 +21,7 @@ from main import main -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_FORMAT = 'quickstart-hb-test-{}' TABLE_ID_RANGE = 10000 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py index 73ade365cff4..c815eae99b8e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py @@ -13,50 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START bigtable_filters_limit_timestamp_range] +# [START bigtable_filters_print] import datetime -# [END bigtable_filters_limit_timestamp_range] - -# [START bigtable_filters_limit_row_sample] -# [START bigtable_filters_limit_row_regex] -# [START bigtable_filters_limit_cells_per_col] -# [START bigtable_filters_limit_cells_per_row] -# [START bigtable_filters_limit_cells_per_row_offset] -# [START bigtable_filters_limit_col_family_regex] -# [START bigtable_filters_limit_col_qualifier_regex] -# [START bigtable_filters_limit_col_range] -# [START bigtable_filters_limit_value_range] -# [START bigtable_filters_limit_value_regex] -# [START bigtable_filters_limit_timestamp_range] -# [START bigtable_filters_limit_block_all] -# [START bigtable_filters_limit_pass_all] -# [START bigtable_filters_modify_strip_value] -# [START bigtable_filters_modify_apply_label] -# [START bigtable_filters_composing_chain] -# [START bigtable_filters_composing_interleave] -# [START bigtable_filters_composing_condition] from google.cloud import bigtable import google.cloud.bigtable.row_filters as row_filters -# [END bigtable_filters_limit_row_sample] -# [END bigtable_filters_limit_row_regex] -# [END bigtable_filters_limit_cells_per_col] -# [END bigtable_filters_limit_cells_per_row] -# [END bigtable_filters_limit_cells_per_row_offset] -# [END bigtable_filters_limit_col_family_regex] -# [END bigtable_filters_limit_col_qualifier_regex] -# [END bigtable_filters_limit_col_range] -# [END bigtable_filters_limit_value_range] -# [END bigtable_filters_limit_value_regex] -# [END bigtable_filters_limit_timestamp_range] -# [END bigtable_filters_limit_block_all] -# [END bigtable_filters_limit_pass_all] -# [END bigtable_filters_modify_strip_value] -# [END bigtable_filters_modify_apply_label] -# [END bigtable_filters_composing_chain] -# [END bigtable_filters_composing_interleave] -# [END bigtable_filters_composing_condition] +# Write your code here. +# [START_EXCLUDE] # [START bigtable_filters_limit_row_sample] @@ -307,26 +271,9 @@ def filter_composing_condition(project_id, instance_id, table_id): # [END bigtable_filters_composing_condition] +# [END_EXCLUDE] -# [START bigtable_filters_limit_row_sample] -# [START bigtable_filters_limit_row_regex] -# [START bigtable_filters_limit_cells_per_col] -# [START bigtable_filters_limit_cells_per_row] -# [START bigtable_filters_limit_cells_per_row_offset] -# [START bigtable_filters_limit_col_family_regex] -# [START bigtable_filters_limit_col_qualifier_regex] -# [START bigtable_filters_limit_col_range] -# [START bigtable_filters_limit_value_range] -# [START bigtable_filters_limit_value_regex] -# [START bigtable_filters_limit_timestamp_range] -# [START bigtable_filters_limit_block_all] -# [START bigtable_filters_limit_pass_all] -# [START bigtable_filters_modify_strip_value] -# [START bigtable_filters_modify_apply_label] -# [START bigtable_filters_composing_chain] -# [START bigtable_filters_composing_interleave] -# [START bigtable_filters_composing_condition] def print_row(row): print("Reading data for {}:".format(row.row_key.decode('utf-8'))) for cf, cols in sorted(row.cells.items()): @@ -340,21 +287,4 @@ def print_row(row): cell.value.decode('utf-8'), cell.timestamp, labels)) print("") -# [END bigtable_filters_limit_row_sample] -# [END bigtable_filters_limit_row_regex] -# [END bigtable_filters_limit_cells_per_col] -# [END bigtable_filters_limit_cells_per_row] -# [END bigtable_filters_limit_cells_per_row_offset] -# [END bigtable_filters_limit_col_family_regex] -# [END bigtable_filters_limit_col_qualifier_regex] -# [END bigtable_filters_limit_col_range] -# [END bigtable_filters_limit_value_range] -# [END bigtable_filters_limit_value_regex] -# [END bigtable_filters_limit_timestamp_range] -# [END bigtable_filters_limit_block_all] -# [END bigtable_filters_limit_pass_all] -# [END bigtable_filters_modify_strip_value] -# [END bigtable_filters_modify_apply_label] -# [END bigtable_filters_composing_chain] -# [END bigtable_filters_composing_interleave] -# [END bigtable_filters_composing_condition] +# [END bigtable_filters_print] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py index 0d4b265f60c3..f46541bffd35 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py @@ -22,7 +22,7 @@ import filter_snippets -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_PREFIX = 'mobile-time-series-{}' diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py index aceef7cd14e6..6936b4c64c8b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py @@ -13,41 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START bigtable_reads_row] -# [START bigtable_reads_row_partial] -# [START bigtable_reads_rows] -# [START bigtable_reads_row_range] -# [START bigtable_reads_row_ranges] -# [START bigtable_reads_prefix] -# [START bigtable_reads_filter] +# [START bigtable_reads_print] from google.cloud import bigtable - -# [END bigtable_reads_row] -# [END bigtable_reads_row_partial] -# [END bigtable_reads_rows] -# [END bigtable_reads_row_range] -# [END bigtable_reads_row_ranges] -# [END bigtable_reads_prefix] -# [END bigtable_reads_filter] - -# [START bigtable_reads_row_partial] -# [START bigtable_reads_filter] import google.cloud.bigtable.row_filters as row_filters -# [END bigtable_reads_row_partial] -# [END bigtable_reads_filter] - - -# [START bigtable_reads_rows] -# [START bigtable_reads_row_range] -# [START bigtable_reads_row_ranges] -# [START bigtable_reads_prefix] from google.cloud.bigtable.row_set import RowSet - -# [END bigtable_reads_rows] -# [END bigtable_reads_row_range] -# [END bigtable_reads_row_ranges] -# [END bigtable_reads_prefix] +# Write your code here. +# [START_EXCLUDE] # [START bigtable_reads_row] @@ -161,15 +133,9 @@ def read_filter(project_id, instance_id, table_id): # [END bigtable_reads_filter] +# [END_EXCLUDE] -# [START bigtable_reads_row] -# [START bigtable_reads_row_partial] -# [START bigtable_reads_rows] -# [START bigtable_reads_row_range] -# [START bigtable_reads_row_ranges] -# [START bigtable_reads_prefix] -# [START bigtable_reads_filter] def print_row(row): print("Reading data for {}:".format(row.row_key.decode('utf-8'))) for cf, cols in sorted(row.cells.items()): @@ -183,10 +149,4 @@ def print_row(row): cell.value.decode('utf-8'), cell.timestamp, labels)) print("") -# [END bigtable_reads_row] -# [END bigtable_reads_row_partial] -# [END bigtable_reads_rows] -# [END bigtable_reads_row_range] -# [END bigtable_reads_row_ranges] -# [END bigtable_reads_prefix] -# [END bigtable_reads_filter] +# [END bigtable_reads_print] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py index 63fb3f2f3bad..fc3421000229 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py @@ -21,7 +21,7 @@ import read_snippets -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_PREFIX = 'mobile-time-series-{}' diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py index 8420a3eebd7e..abe3000959ec 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py @@ -26,7 +26,7 @@ from .write_simple import write_simple -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_PREFIX = 'mobile-time-series-{}' diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index b23055f14a65..ba55d7ce53ca 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -43,7 +43,7 @@ # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GCLOUD_PROJECT', + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', # A dictionary you want to inject into your test. Don't put any @@ -72,7 +72,6 @@ def get_pytest_env_vars(): env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] - ret['GCLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. ret.update(TEST_CONFIG['envs']) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py index d6d3835a0bd1..782f6b621677 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py @@ -20,7 +20,7 @@ from tableadmin import delete_table from tableadmin import run_table_operations -PROJECT = os.environ['GCLOUD_PROJECT'] +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_NAME_FORMAT = 'tableadmin-test-{}' TABLE_NAME_RANGE = 10000 From 6d61b373d50ce40b9663274f7b4f3c374249082a Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 30 Jun 2020 11:40:02 -0700 Subject: [PATCH 316/892] chore: template updates (#63) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/7a96502d-330f-40ee-8fb9-6d283c5c132a/targets - [ ] To automatically regenerate this PR, check this box. Source-Link: https://github.com/googleapis/synthtool/commit/303271797a360f8a439203413f13a160f2f5b3b4 Source-Link: https://github.com/googleapis/synthtool/commit/652d446edabb0ea07de0ce542c6b37ab7dad3a19 --- packages/google-cloud-bigtable/docs/_templates/layout.html | 4 ++-- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/_templates/layout.html b/packages/google-cloud-bigtable/docs/_templates/layout.html index 228529efe2d2..6316a537f72b 100644 --- a/packages/google-cloud-bigtable/docs/_templates/layout.html +++ b/packages/google-cloud-bigtable/docs/_templates/layout.html @@ -21,8 +21,8 @@
- On January 1, 2020 this library will no longer support Python 2 on the latest released version. - Previously released library versions will continue to be available. For more information please + As of January 1, 2020 this library no longer supports Python 2 on the latest released version. + Library versions released prior to that date will continue to be available. For more information please visit Python 2 support on Google Cloud.
{% block body %} {% endblock %} diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index cd653f163a6f..e6c514ad2be0 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "5c1d61827618d254c453b3871c0022a8d35bfbb2" + "sha": "7befdd396d1ef3d76d125785180f4295ba9e0247" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170" + "sha": "303271797a360f8a439203413f13a160f2f5b3b4" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170" + "sha": "303271797a360f8a439203413f13a160f2f5b3b4" } } ], From 280efb3296890a0298bf3ff06c69d28e5a5cb5a5 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 16 Jul 2020 11:16:00 -0700 Subject: [PATCH 317/892] chore: Update protobuf and gapic-generator-csharp versions. (#64) PiperOrigin-RevId: 320411362 Source-Author: Google APIs Source-Date: Thu Jul 9 09:26:49 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: db69b46790b55a82ab7cfa473d031da787bc7591 Source-Link: https://github.com/googleapis/googleapis/commit/db69b46790b55a82ab7cfa473d031da787bc7591 --- .../bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py | 2 +- .../bigtable_admin_v2/proto/bigtable_table_admin_pb2.py | 2 +- .../google/cloud/bigtable_admin_v2/proto/common_pb2.py | 2 +- .../google/cloud/bigtable_admin_v2/proto/instance_pb2.py | 2 +- .../google/cloud/bigtable_admin_v2/proto/table_pb2.py | 2 +- .../google/cloud/bigtable_v2/proto/bigtable_pb2.py | 2 +- .../google/cloud/bigtable_v2/proto/data_pb2.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 63590907a22c..bd4d621d66ec 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 5ca167d87877..aef2bfdcdb32 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index 09233cff5a02..dd668ef3cb2b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/common.proto -"""Generated protocol buffer code.""" + from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index e0138e0fb0f7..7c6e05fa5152 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/instance.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index 67238a81e909..bd0f478fa09c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/table.proto -"""Generated protocol buffer code.""" + from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index ba711b20ca71..b0f13cbba51e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/bigtable.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index a64f9b10e2df..aa6f6737b9d0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/data.proto -"""Generated protocol buffer code.""" + from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index e6c514ad2be0..b1f195d58313 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "7befdd396d1ef3d76d125785180f4295ba9e0247" + "sha": "adedea9daee0231e37a8848a8050b81ea217c6a8" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "b882b8e6bfcd708042ff00f7adc67ce750817dd0", - "internalRef": "318028816" + "sha": "db69b46790b55a82ab7cfa473d031da787bc7591", + "internalRef": "320411362" } }, { From 4af84f35e408271367c4420b0eb57e958f3d9a9a Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 21 Jul 2020 13:04:19 -0400 Subject: [PATCH 318/892] chore: release 1.3.0 (#2) * updated CHANGELOG.md [ci skip] * updated setup.cfg [ci skip] * updated setup.py Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 28 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index a0ee45d5b5ff..1eeb76023449 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,34 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [1.3.0](https://www.github.com/googleapis/python-bigtable/compare/v1.2.1...v1.3.0) (2020-07-16) + + +### Features + +* **api_core:** support version 3 policy bindings ([#9869](https://www.github.com/googleapis/python-bigtable/issues/9869)) ([a9dee32](https://www.github.com/googleapis/python-bigtable/commit/a9dee327ab39e22a014b3c4126f1c9d1beebe2d1)) +* **bigtable:** add py2 deprecation warnings; standardize use of 'required' in docstrings (via synth) ([#10064](https://www.github.com/googleapis/python-bigtable/issues/10064)) ([5460de0](https://www.github.com/googleapis/python-bigtable/commit/5460de0f7e0d936a23289f679c2b1a3040a21247)) +* Create CODEOWNERS ([#27](https://www.github.com/googleapis/python-bigtable/issues/27)) ([2b63746](https://www.github.com/googleapis/python-bigtable/commit/2b6374600d911b3dfd567eafd964260eb00a2bc0)) +* **bigtable:** skip system tests failing with emulator ([#18](https://www.github.com/googleapis/python-bigtable/issues/18)) ([399d3d3](https://www.github.com/googleapis/python-bigtable/commit/399d3d3f960786f616ab6085f142a9703b0391e0)) +* **bigtable:** support requested_policy_version for Instance IAM ([#10001](https://www.github.com/googleapis/python-bigtable/issues/10001)) ([7e5d963](https://www.github.com/googleapis/python-bigtable/commit/7e5d963857fd8f7547778d5247b53c24de7a43f6)), closes [#3](https://www.github.com/googleapis/python-bigtable/issues/3) +* update gapic-generator and go microgen, backups generated api ([#55](https://www.github.com/googleapis/python-bigtable/issues/55)) ([c38888d](https://www.github.com/googleapis/python-bigtable/commit/c38888de3d0b1c49c438a7d350f42bc1805809f2)) + + +### Bug Fixes + +* localdeps ([5d799b2](https://www.github.com/googleapis/python-bigtable/commit/5d799b2d99e79ee9d20ae6cf2663d670493a8db3)) +* test_utils ([43481a9](https://www.github.com/googleapis/python-bigtable/commit/43481a91275e93fadd22eaa7cba3891a00cb97f8)) +* **python:** change autodoc_default_flags to autodoc_default_options ([#58](https://www.github.com/googleapis/python-bigtable/issues/58)) ([5c1d618](https://www.github.com/googleapis/python-bigtable/commit/5c1d61827618d254c453b3871c0022a8d35bfbb2)) + + +### Documentation + +* add note about multiprocessing usage ([#26](https://www.github.com/googleapis/python-bigtable/issues/26)) ([1449589](https://www.github.com/googleapis/python-bigtable/commit/1449589e8b5b9037dae4e9b071ff7e7662992e18)) +* **bigtable:** clean up ([#32](https://www.github.com/googleapis/python-bigtable/issues/32)) ([9f4068c](https://www.github.com/googleapis/python-bigtable/commit/9f4068cf8eb4351c02a4862380547ecf2564d838)) +* add samples from bigtable ([#38](https://www.github.com/googleapis/python-bigtable/issues/38)) ([1121f0d](https://www.github.com/googleapis/python-bigtable/commit/1121f0d647dbfc6c70a459b0979465803fdfad7b)), closes [#371](https://www.github.com/googleapis/python-bigtable/issues/371) [#383](https://www.github.com/googleapis/python-bigtable/issues/383) [#383](https://www.github.com/googleapis/python-bigtable/issues/383) [#456](https://www.github.com/googleapis/python-bigtable/issues/456) [#456](https://www.github.com/googleapis/python-bigtable/issues/456) [#540](https://www.github.com/googleapis/python-bigtable/issues/540) [#540](https://www.github.com/googleapis/python-bigtable/issues/540) [#542](https://www.github.com/googleapis/python-bigtable/issues/542) [#542](https://www.github.com/googleapis/python-bigtable/issues/542) [#544](https://www.github.com/googleapis/python-bigtable/issues/544) [#544](https://www.github.com/googleapis/python-bigtable/issues/544) [#576](https://www.github.com/googleapis/python-bigtable/issues/576) [#599](https://www.github.com/googleapis/python-bigtable/issues/599) [#599](https://www.github.com/googleapis/python-bigtable/issues/599) [#656](https://www.github.com/googleapis/python-bigtable/issues/656) [#715](https://www.github.com/googleapis/python-bigtable/issues/715) [#715](https://www.github.com/googleapis/python-bigtable/issues/715) [#781](https://www.github.com/googleapis/python-bigtable/issues/781) [#781](https://www.github.com/googleapis/python-bigtable/issues/781) [#887](https://www.github.com/googleapis/python-bigtable/issues/887) [#887](https://www.github.com/googleapis/python-bigtable/issues/887) [#914](https://www.github.com/googleapis/python-bigtable/issues/914) [#914](https://www.github.com/googleapis/python-bigtable/issues/914) [#922](https://www.github.com/googleapis/python-bigtable/issues/922) [#922](https://www.github.com/googleapis/python-bigtable/issues/922) [#962](https://www.github.com/googleapis/python-bigtable/issues/962) [#962](https://www.github.com/googleapis/python-bigtable/issues/962) [#1004](https://www.github.com/googleapis/python-bigtable/issues/1004) [#1004](https://www.github.com/googleapis/python-bigtable/issues/1004) [#1003](https://www.github.com/googleapis/python-bigtable/issues/1003) [#1005](https://www.github.com/googleapis/python-bigtable/issues/1005) [#1005](https://www.github.com/googleapis/python-bigtable/issues/1005) [#1028](https://www.github.com/googleapis/python-bigtable/issues/1028) [#1055](https://www.github.com/googleapis/python-bigtable/issues/1055) [#1055](https://www.github.com/googleapis/python-bigtable/issues/1055) [#1055](https://www.github.com/googleapis/python-bigtable/issues/1055) [#1057](https://www.github.com/googleapis/python-bigtable/issues/1057) [#1093](https://www.github.com/googleapis/python-bigtable/issues/1093) [#1093](https://www.github.com/googleapis/python-bigtable/issues/1093) [#1093](https://www.github.com/googleapis/python-bigtable/issues/1093) [#1094](https://www.github.com/googleapis/python-bigtable/issues/1094) [#1094](https://www.github.com/googleapis/python-bigtable/issues/1094) [#1121](https://www.github.com/googleapis/python-bigtable/issues/1121) [#1121](https://www.github.com/googleapis/python-bigtable/issues/1121) [#1121](https://www.github.com/googleapis/python-bigtable/issues/1121) [#1156](https://www.github.com/googleapis/python-bigtable/issues/1156) [#1158](https://www.github.com/googleapis/python-bigtable/issues/1158) [#1158](https://www.github.com/googleapis/python-bigtable/issues/1158) [#1158](https://www.github.com/googleapis/python-bigtable/issues/1158) [#1186](https://www.github.com/googleapis/python-bigtable/issues/1186) [#1186](https://www.github.com/googleapis/python-bigtable/issues/1186) [#1186](https://www.github.com/googleapis/python-bigtable/issues/1186) [#1199](https://www.github.com/googleapis/python-bigtable/issues/1199) [#1199](https://www.github.com/googleapis/python-bigtable/issues/1199) [#1199](https://www.github.com/googleapis/python-bigtable/issues/1199) [#1254](https://www.github.com/googleapis/python-bigtable/issues/1254) [#1254](https://www.github.com/googleapis/python-bigtable/issues/1254) [#1254](https://www.github.com/googleapis/python-bigtable/issues/1254) [#1377](https://www.github.com/googleapis/python-bigtable/issues/1377) [#1377](https://www.github.com/googleapis/python-bigtable/issues/1377) [#1377](https://www.github.com/googleapis/python-bigtable/issues/1377) [#1441](https://www.github.com/googleapis/python-bigtable/issues/1441) [#1441](https://www.github.com/googleapis/python-bigtable/issues/1441) [#1441](https://www.github.com/googleapis/python-bigtable/issues/1441) [#1464](https://www.github.com/googleapis/python-bigtable/issues/1464) [#1464](https://www.github.com/googleapis/python-bigtable/issues/1464) [#1464](https://www.github.com/googleapis/python-bigtable/issues/1464) [#1549](https://www.github.com/googleapis/python-bigtable/issues/1549) [#1562](https://www.github.com/googleapis/python-bigtable/issues/1562) [#1555](https://www.github.com/googleapis/python-bigtable/issues/1555) [#1616](https://www.github.com/googleapis/python-bigtable/issues/1616) [#1616](https://www.github.com/googleapis/python-bigtable/issues/1616) [#1665](https://www.github.com/googleapis/python-bigtable/issues/1665) [#1670](https://www.github.com/googleapis/python-bigtable/issues/1670) [#1664](https://www.github.com/googleapis/python-bigtable/issues/1664) [#1674](https://www.github.com/googleapis/python-bigtable/issues/1674) [#1755](https://www.github.com/googleapis/python-bigtable/issues/1755) [#1755](https://www.github.com/googleapis/python-bigtable/issues/1755) [#1755](https://www.github.com/googleapis/python-bigtable/issues/1755) [#1764](https://www.github.com/googleapis/python-bigtable/issues/1764) [#1764](https://www.github.com/googleapis/python-bigtable/issues/1764) [#1770](https://www.github.com/googleapis/python-bigtable/issues/1770) [#1794](https://www.github.com/googleapis/python-bigtable/issues/1794) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1846](https://www.github.com/googleapis/python-bigtable/issues/1846) [#1878](https://www.github.com/googleapis/python-bigtable/issues/1878) [#1890](https://www.github.com/googleapis/python-bigtable/issues/1890) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#1980](https://www.github.com/googleapis/python-bigtable/issues/1980) [#2057](https://www.github.com/googleapis/python-bigtable/issues/2057) [#2057](https://www.github.com/googleapis/python-bigtable/issues/2057) [#2054](https://www.github.com/googleapis/python-bigtable/issues/2054) [#2054](https://www.github.com/googleapis/python-bigtable/issues/2054) [#2018](https://www.github.com/googleapis/python-bigtable/issues/2018) [#2018](https://www.github.com/googleapis/python-bigtable/issues/2018) [#2224](https://www.github.com/googleapis/python-bigtable/issues/2224) [#2201](https://www.github.com/googleapis/python-bigtable/issues/2201) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2436](https://www.github.com/googleapis/python-bigtable/issues/2436) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2005](https://www.github.com/googleapis/python-bigtable/issues/2005) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#2692](https://www.github.com/googleapis/python-bigtable/issues/2692) [#3066](https://www.github.com/googleapis/python-bigtable/issues/3066) [#2707](https://www.github.com/googleapis/python-bigtable/issues/2707) [#3103](https://www.github.com/googleapis/python-bigtable/issues/3103) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#2806](https://www.github.com/googleapis/python-bigtable/issues/2806) [#3459](https://www.github.com/googleapis/python-bigtable/issues/3459) [#3494](https://www.github.com/googleapis/python-bigtable/issues/3494) [#3070](https://www.github.com/googleapis/python-bigtable/issues/3070) [#3119](https://www.github.com/googleapis/python-bigtable/issues/3119) [#3738](https://www.github.com/googleapis/python-bigtable/issues/3738) [#3738](https://www.github.com/googleapis/python-bigtable/issues/3738) [#3738](https://www.github.com/googleapis/python-bigtable/issues/3738) [#3739](https://www.github.com/googleapis/python-bigtable/issues/3739) [#3739](https://www.github.com/googleapis/python-bigtable/issues/3739) [#3740](https://www.github.com/googleapis/python-bigtable/issues/3740) [#3783](https://www.github.com/googleapis/python-bigtable/issues/3783) [#3877](https://www.github.com/googleapis/python-bigtable/issues/3877) +* **bigtable:** fix incorrect display_name update ([#46](https://www.github.com/googleapis/python-bigtable/issues/46)) ([1ac60be](https://www.github.com/googleapis/python-bigtable/commit/1ac60be05521b69c924118d40f88e07728a2f75e)) +* **bigtable:** remove missing argument from instance declaration ([#47](https://www.github.com/googleapis/python-bigtable/issues/47)) ([c966647](https://www.github.com/googleapis/python-bigtable/commit/c9666475dc31d581fdac0fc1c65e75ee9e27d832)), closes [#42](https://www.github.com/googleapis/python-bigtable/issues/42) + ## 1.2.1 01-03-2020 10:05 PST diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 43804da73dc3..73efe3540d4f 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = '1.2.1' +version = "1.3.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From f41d6d9a464153d0920ceb3501b3471d3b09ce0a Mon Sep 17 00:00:00 2001 From: MF2199 <38331387+mf2199@users.noreply.github.com> Date: Tue, 21 Jul 2020 14:04:02 -0400 Subject: [PATCH 319/892] feat(bigtable): Managed Backups wrappers (#57) * [new] managed backup wrappers + unit tests * feat: managed backups wrappers * fix: docstrings + blacken * fix: cleanup * refactor: ``backup``, ``list_backups`` and ``retore_table`` methods moved to the ``Table`` class. * feat: `reaload` and `is_ready` methods removed * refactor: `re` parser made local * feat: integration test * refactor: cleanup * fix: format * refactor: `name`, `cluster` property getters & `table_list_backups` feat: new `__eq__` and `__ne__` convenience methods * refactor: using `BigtableTableAdminClient.table_path` in lieu of `format` * fix: `from_pb2` method to include all `backup_pb` fields * refactor: cleanup * format: blacken * feat: reinstated `Backup.reload` + test method * fix: docstring typos * cleanup: minor cleanup * cleanup: minor cleanup * fix: ASCII encoding * fix: Python 2 compatibility issue * fix: SphinxWarning [possible cause] * fix: lint errors Co-authored-by: kolea2 <45548808+kolea2@users.noreply.github.com> --- .../google/cloud/bigtable/backup.py | 393 ++++++++++ .../google/cloud/bigtable/instance.py | 5 +- .../google/cloud/bigtable/table.py | 180 ++++- .../google-cloud-bigtable/tests/system.py | 59 ++ .../tests/unit/test_backup.py | 725 ++++++++++++++++++ .../tests/unit/test_instance.py | 4 + .../tests/unit/test_table.py | 151 ++++ 7 files changed, 1512 insertions(+), 5 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/backup.py create mode 100644 packages/google-cloud-bigtable/tests/unit/test_backup.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py new file mode 100644 index 000000000000..c6a2826dd56d --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -0,0 +1,393 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A user-friendly wrapper for a Google Cloud Bigtable Backup.""" + +import re + +from google.cloud._helpers import _datetime_to_pb_timestamp +from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import ( + BigtableTableAdminClient, +) +from google.cloud.bigtable_admin_v2.types import table_pb2 +from google.cloud.exceptions import NotFound +from google.protobuf import field_mask_pb2 + +_BACKUP_NAME_RE = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[a-z][-a-z0-9]*)/" + r"clusters/(?P[a-z][-a-z0-9]*)/" + r"backups/(?P[a-z][a-z0-9_\-]*[a-z0-9])$" +) + +_TABLE_NAME_RE = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[a-z][-a-z0-9]*)/" + r"tables/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" +) + + +class Backup(object): + """Representation of a Google Cloud Bigtable Backup. + + A :class: `Backup` can be used to: + + * :meth:`create` the backup + * :meth:`update` the backup + * :meth:`delete` the backup + + :type backup_id: str + :param backup_id: The ID of the backup. + + :type instance: :class:`~google.cloud.bigtable.instance.Instance` + :param instance: The Instance that owns this Backup. + + :type cluster_id: str + :param cluster_id: (Optional) The ID of the Cluster that contains this Backup. + Required for calling 'delete', 'exists' etc. methods. + + :type table_id: str + :param table_id: (Optional) The ID of the Table that the Backup is for. + Required if the 'create' method will be called. + + :type expire_time: :class:`datetime.datetime` + :param expire_time: (Optional) The expiration time after which the Backup + will be automatically deleted. Required if the `create` + method will be called. + """ + + def __init__( + self, backup_id, instance, cluster_id=None, table_id=None, expire_time=None + ): + self.backup_id = backup_id + self._instance = instance + self._cluster = cluster_id + self.table_id = table_id + self._expire_time = expire_time + + self._parent = None + self._source_table = None + self._start_time = None + self._end_time = None + self._size_bytes = None + self._state = None + + @property + def name(self): + """Backup name used in requests. + + The Backup name is of the form + + ``"projects/../instances/../clusters/../backups/{backup_id}"`` + + :rtype: str + :returns: The Backup name. + + :raises: ValueError: If the 'cluster' has not been set. + """ + if not self._cluster: + raise ValueError('"cluster" parameter must be set') + + return BigtableTableAdminClient.backup_path( + project=self._instance._client.project, + instance=self._instance.instance_id, + cluster=self._cluster, + backup=self.backup_id, + ) + + @property + def cluster(self): + """The ID of the [parent] cluster used in requests. + + :rtype: str + :returns: The ID of the cluster containing the Backup. + """ + return self._cluster + + @cluster.setter + def cluster(self, cluster_id): + self._cluster = cluster_id + + @property + def parent(self): + """Name of the parent cluster used in requests. + + .. note:: + This property will return None if ``cluster`` is not set. + + The parent name is of the form + + ``"projects/{project}/instances/{instance_id}/clusters/{cluster}"`` + + :rtype: str + :returns: A full path to the parent cluster. + """ + if not self._parent and self._cluster: + self._parent = BigtableTableAdminClient.cluster_path( + project=self._instance._client.project, + instance=self._instance.instance_id, + cluster=self._cluster, + ) + return self._parent + + @property + def source_table(self): + """The full name of the Table from which this Backup is created. + + .. note:: + This property will return None if ``table_id`` is not set. + + The table name is of the form + + ``"projects/../instances/../tables/{source_table}"`` + + :rtype: str + :returns: The Table name. + """ + if not self._source_table and self.table_id: + self._source_table = BigtableTableAdminClient.table_path( + project=self._instance._client.project, + instance=self._instance.instance_id, + table=self.table_id, + ) + return self._source_table + + @property + def expire_time(self): + """Expiration time used in the creation requests. + + :rtype: :class:`datetime.datetime` + :returns: A 'datetime' object representing the expiration time of + this Backup. + """ + return self._expire_time + + @expire_time.setter + def expire_time(self, new_expire_time): + self._expire_time = new_expire_time + + @property + def start_time(self): + """The time this Backup was started. + + :rtype: :class:`datetime.datetime` + :returns: A 'datetime' object representing the time when the creation + of this Backup had started. + """ + return self._start_time + + @property + def end_time(self): + """The time this Backup was finished. + + :rtype: :class:`datetime.datetime` + :returns: A 'datetime' object representing the time when the creation + of this Backup was finished. + """ + return self._end_time + + @property + def size_bytes(self): + """The size of this Backup, in bytes. + + :rtype: int + :returns: The size of this Backup, in bytes. + """ + return self._size_bytes + + @property + def state(self): + """ The current state of this Backup. + + :rtype: :class:`~google.cloud.bigtable_admin_v2.gapic.enums.Backup.State` + :returns: The current state of this Backup. + """ + return self._state + + @classmethod + def from_pb(cls, backup_pb, instance): + """Creates a Backup instance from a protobuf message. + + :type backup_pb: :class:`table_pb2.Backup` + :param backup_pb: A Backup protobuf object. + + :type instance: :class:`Instance ` + :param instance: The Instance that owns the Backup. + + :rtype: :class:`~google.cloud.bigtable.backup.Backup` + :returns: The backup parsed from the protobuf response. + :raises: ValueError: If the backup name does not match the expected + format or the parsed project ID does not match the + project ID on the Instance's client, or if the + parsed instance ID does not match the Instance ID. + """ + match = _BACKUP_NAME_RE.match(backup_pb.name) + if match is None: + raise ValueError( + "Backup protobuf name was not in the expected format.", backup_pb.name + ) + if match.group("project") != instance._client.project: + raise ValueError( + "Project ID of the Backup does not match the Project ID " + "of the instance's client" + ) + + instance_id = match.group("instance_id") + if instance_id != instance.instance_id: + raise ValueError( + "Instance ID of the Backup does not match the Instance ID " + "of the instance" + ) + backup_id = match.group("backup_id") + cluster_id = match.group("cluster_id") + + match = _TABLE_NAME_RE.match(backup_pb.source_table) + table_id = match.group("table_id") if match else None + + expire_time = backup_pb.expire_time + + backup = cls( + backup_id, + instance, + cluster_id=cluster_id, + table_id=table_id, + expire_time=expire_time, + ) + backup._start_time = backup_pb.start_time + backup._end_time = backup_pb.end_time + backup._size_bytes = backup_pb.size_bytes + backup._state = backup_pb.state + + return backup + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.backup_id == self.backup_id and other._instance == self._instance + + def __ne__(self, other): + return not self == other + + def create(self, cluster_id=None): + """Creates this backup within its instance. + + :type cluster_id: str + :param cluster_id: (Optional) The ID of the Cluster for the newly + created Backup. + + :rtype: :class:`~google.api_core.operation.Operation` + :returns: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` + instance, to be used to poll the status of the 'create' request + :raises Conflict: if the Backup already exists + :raises NotFound: if the Instance owning the Backup does not exist + :raises BadRequest: if the `table` or `expire_time` values are invalid, + or `expire_time` is not set + """ + if not self._expire_time: + raise ValueError('"expire_time" parameter must be set') + # TODO: Consider implementing a method that sets a default value of + # `expire_time`, e.g. 1 week from the creation of the Backup. + if not self.table_id: + raise ValueError('"table" parameter must be set') + + if cluster_id: + self._cluster = cluster_id + + if not self._cluster: + raise ValueError('"cluster" parameter must be set') + + backup = table_pb2.Backup( + source_table=self.source_table, + expire_time=_datetime_to_pb_timestamp(self.expire_time), + ) + + api = self._instance._client.table_admin_client + return api.create_backup(self.parent, self.backup_id, backup) + + def get(self): + """Retrieves metadata of a pending or completed Backup. + + :returns: An instance of + :class:`~google.cloud.bigtable_admin_v2.types.Backup` + + :raises google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + :raises google.api_core.exceptions.RetryError: If the request failed + due to a retryable error and retry attempts failed. + :raises ValueError: If the parameters are invalid. + """ + api = self._instance._client.table_admin_client + try: + return api.get_backup(self.name) + except NotFound: + return None + + def reload(self): + """Refreshes the stored backup properties.""" + backup = self.get() + self._source_table = backup.source_table + self._expire_time = backup.expire_time + self._start_time = backup.start_time + self._end_time = backup.end_time + self._size_bytes = backup.size_bytes + self._state = backup.state + + def exists(self): + """Tests whether this Backup exists. + + :rtype: bool + :returns: True if the Backup exists, else False. + """ + return self.get() is not None + + def update_expire_time(self, new_expire_time): + """Update the expire time of this Backup. + + :type new_expire_time: :class:`datetime.datetime` + :param new_expire_time: the new expiration time timestamp + """ + backup_update = table_pb2.Backup( + name=self.name, expire_time=_datetime_to_pb_timestamp(new_expire_time), + ) + update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) + api = self._instance._client.table_admin_client + api.update_backup(backup_update, update_mask) + self._expire_time = new_expire_time + + def delete(self): + """Delete this Backup.""" + self._instance._client.table_admin_client.delete_backup(self.name) + + def restore(self, table_id): + """Creates a new Table by restoring from this Backup. The new Table + must be in the same Instance as the Instance containing the Backup. + The returned Table ``long-running operation`` can be used to track the + progress of the operation and to cancel it. The ``response`` type is + ``Table``, if successful. + + :param table_id: The ID of the Table to create and restore to. + This Table must not already exist. + :returns: An instance of + :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`. + + :raises: google.api_core.exceptions.AlreadyExists: If the table + already exists. + :raises: google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + :raises: google.api_core.exceptions.RetryError: If the request failed + due to a retryable error and retry attempts failed. + :raises: ValueError: If the parameters are invalid. + """ + api = self._instance._client.table_admin_client + return api.restore_table(self._instance.name, table_id, self.name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index e0a30590bbca..0c8b81fa3f2f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -14,12 +14,11 @@ """User-friendly container for Google Cloud Bigtable Instance.""" - import re -from google.cloud.bigtable.table import Table -from google.cloud.bigtable.cluster import Cluster from google.cloud.bigtable.app_profile import AppProfile +from google.cloud.bigtable.cluster import Cluster +from google.cloud.bigtable.table import Table from google.protobuf import field_mask_pb2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 4852ff6e1e98..983bfcc14d70 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -14,7 +14,6 @@ """User-friendly container for Google Cloud Bigtable Table.""" - from grpc import StatusCode from google.api_core import timeout @@ -24,6 +23,7 @@ from google.api_core.retry import Retry from google.api_core.gapic_v1.method import wrap_method from google.cloud._helpers import _to_bytes +from google.cloud.bigtable.backup import Backup from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher @@ -38,6 +38,9 @@ from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 +from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import ( + BigtableTableAdminClient, +) from google.cloud.bigtable_admin_v2.proto import table_pb2 as admin_messages_v2_pb2 from google.cloud.bigtable_admin_v2.proto import ( bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, @@ -45,7 +48,6 @@ import warnings - # Maximum number of mutations in bulk (MutateRowsRequest message): # (https://cloud.google.com/bigtable/docs/reference/data/rpc/ # google.bigtable.v2#google.bigtable.v2.MutateRowRequest) @@ -782,6 +784,179 @@ def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES """ return MutationsBatcher(self, flush_count, max_row_bytes) + def backup(self, backup_id, cluster_id=None, expire_time=None): + """Factory to create a Backup linked to this Table. + + :type backup_id: str + :param backup_id: The ID of the Backup to be created. + + :type cluster_id: str + :param cluster_id: (Optional) The ID of the Cluster. Required for + calling 'delete', 'exists' etc. methods. + + :type expire_time: :class:`datetime.datetime` + :param expire_time: (Optional) The expiration time of this new Backup. + Required, if the `create` method needs to be called. + """ + return Backup( + backup_id, + self._instance, + cluster_id=cluster_id, + table_id=self.table_id, + expire_time=expire_time, + ) + + def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0): + """List Backups for this Table. + + :type cluster_id: str + :param cluster_id: (Optional) Specifies a single cluster to list + Backups from. If none is specified, the returned list + contains all the Backups in this Instance. + + :type filter_: str + :param filter_: (Optional) A filter expression that filters backups + listed in the response. The expression must specify + the field name, a comparison operator, and the value + that you want to use for filtering. The value must be + a string, a number, or a boolean. The comparison + operator must be <, >, <=, >=, !=, =, or :. Colon ':' + represents a HAS operator which is roughly synonymous + with equality. Filter rules are case insensitive. + + The fields eligible for filtering are: + + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (values of the format YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (values of the format YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (values of the format YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + To filter on multiple expressions, provide each + separate expression within parentheses. By default, + each expression is an AND expression. However, you can + include AND, OR, and NOT expressions explicitly. + + Some examples of using filters are: + + - ``name:"exact"`` --> The Backup name is the string "exact". + - ``name:howl`` --> The Backup name contains the string "howl" + - ``source_table:prod`` --> The source table's name contains + the string "prod". + - ``state:CREATING`` --> The Backup is pending creation. + - ``state:READY`` --> The Backup is created and ready for use. + - ``(name:howl) AND (start_time < \"2020-05-28T14:50:00Z\")`` + --> The Backup name contains the string "howl" and + the Backup start time is before 2020-05-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The Backup size is greater + than 10GB + + :type order_by: str + :param order_by: (Optional) An expression for specifying the sort order + of the results of the request. The string value should + specify one or more fields in ``Backup``. The full + syntax is described at https://aip.dev/132#ordering. + + Fields supported are: \\* name \\* source_table \\* + expire_time \\* start_time \\* end_time \\* + size_bytes \\* state + + For example, "start_time". The default sorting order + is ascending. To specify descending order for the + field, a suffix " desc" should be appended to the + field name. For example, "start_time desc". Redundant + space characters in the syntax are insigificant. If + order_by is empty, results will be sorted by + ``start_time`` in descending order starting from + the most recently created backup. + + :type page_size: int + :param page_size: (Optional) The maximum number of resources contained + in the underlying API response. If page streaming is + performed per-resource, this parameter does not + affect the return value. If page streaming is + performed per-page, this determines the maximum + number of resources in a page. + + :rtype: :class:`~google.api_core.page_iterator.Iterator` + :returns: Iterator of :class:`~google.cloud.bigtable.backup.Backup` + resources within the current Instance. + :raises: :class:`ValueError ` if one of the + returned Backups' name is not of the expected format. + """ + cluster_id = cluster_id or "-" + + backups_filter = "source_table:{}".format(self.name) + if filter_: + backups_filter = "({}) AND ({})".format(backups_filter, filter_) + + parent = BigtableTableAdminClient.cluster_path( + project=self._instance._client.project, + instance=self._instance.instance_id, + cluster=cluster_id, + ) + client = self._instance._client.table_admin_client + backup_list_pb = client.list_backups( + parent=parent, + filter_=backups_filter, + order_by=order_by, + page_size=page_size, + ) + + result = [] + for backup_pb in backup_list_pb: + result.append(Backup.from_pb(backup_pb, self._instance)) + + return result + + def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=None): + """Creates a new Table by restoring from the Backup specified by either + `backup_id` or `backup_name`. The returned ``long-running operation`` + can be used to track the progress of the operation and to cancel it. + The ``response`` type is ``Table``, if successful. + + :type new_table_id: str + :param new_table_id: The ID of the Table to create and restore to. + This Table must not already exist. + + :type cluster_id: str + :param cluster_id: The ID of the Cluster containing the Backup. + This parameter gets overriden by `backup_name`, if + the latter is provided. + + :type backup_id: str + :param backup_id: The ID of the Backup to restore the Table from. + This parameter gets overriden by `backup_name`, if + the latter is provided. + + :type backup_name: str + :param backup_name: (Optional) The full name of the Backup to restore + from. If specified, it overrides the `cluster_id` + and `backup_id` parameters even of such specified. + + :return: An instance of + :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`. + + :raises: google.api_core.exceptions.AlreadyExists: If the table + already exists. + :raises: google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + :raises: google.api_core.exceptions.RetryError: If the request failed + due to a retryable error and retry attempts failed. + :raises: ValueError: If the parameters are invalid. + """ + api = self._instance._client.table_admin_client + if not backup_name: + backup_name = BigtableTableAdminClient.backup_path( + project=self._instance._client.project, + instance=self._instance.instance_id, + cluster=cluster_id, + backup=backup_id, + ) + return api.restore_table(self._instance.name, new_table_id, backup_name) + class _RetryableMutateRowsWorker(object): """A callable worker that can retry to mutate rows with transient errors. @@ -797,6 +972,7 @@ class _RetryableMutateRowsWorker(object): StatusCode.ABORTED.value[0], StatusCode.UNAVAILABLE.value[0], ) + # pylint: enable=unsubscriptable-object def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None): diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index dd77dd9362b0..c41c90a6a664 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -15,6 +15,7 @@ import datetime import operator import os +import time import unittest from google.api_core.exceptions import TooManyRequests @@ -652,10 +653,13 @@ def tearDownClass(cls): def setUp(self): self.tables_to_delete = [] + self.backups_to_delete = [] def tearDown(self): for table in self.tables_to_delete: table.delete() + for backup in self.backups_to_delete: + backup.delete() def _skip_if_emulated(self, message): # NOTE: This method is necessary because ``Config.IN_EMULATOR`` @@ -829,6 +833,61 @@ def test_delete_column_family(self): # Make sure we have successfully deleted it. self.assertEqual(temp_table.list_column_families(), {}) + def test_backup(self): + temp_table_id = "test-backup-table" + temp_table = Config.INSTANCE_DATA.table(temp_table_id) + temp_table.create() + self.tables_to_delete.append(temp_table) + + temp_backup_id = "test-backup" + + # TODO: consider using `datetime.datetime.now().timestamp()` + # when support for Python 2 is fully dropped + expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 + + # Testing `Table.backup()` factory + temp_backup = temp_table.backup( + temp_backup_id, + cluster_id=CLUSTER_ID_DATA, + expire_time=datetime.datetime.utcfromtimestamp(expire), + ) + + # Sanity check for `Backup.exists()` method + self.assertFalse(temp_backup.exists()) + + # Testing `Backup.create()` method + temp_backup.create().result() + + # Implicit testing of `Backup.delete()` method + self.backups_to_delete.append(temp_backup) + + # Testing `Backup.exists()` method + self.assertTrue(temp_backup.exists()) + + # Testing `Table.list_backups()` method + temp_table_backup = temp_table.list_backups()[0] + self.assertEqual(temp_backup_id, temp_table_backup.backup_id) + self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) + self.assertEqual(expire, temp_table_backup.expire_time.seconds) + + # Testing `Backup.update_expire_time()` method + expire += 3600 # A one-hour change in the `expire_time` parameter + temp_backup.update_expire_time(datetime.datetime.utcfromtimestamp(expire)) + + # Testing `Backup.get()` method + temp_table_backup = temp_backup.get() + self.assertEqual(expire, temp_table_backup.expire_time.seconds) + + # Testing `Table.restore()` and `Backup.retore()` methods + restored_table_id = "test-backup-table-restored" + restored_table = Config.INSTANCE_DATA.table(restored_table_id) + temp_table.restore( + restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id + ).result() + tables = Config.INSTANCE_DATA.list_tables() + self.assertIn(restored_table, tables) + restored_table.delete() + class TestDataAPI(unittest.TestCase): @classmethod diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py new file mode 100644 index 000000000000..587202a840e0 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -0,0 +1,725 @@ +# Copyright 2020 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import mock +import unittest + +from ._testing import _make_credentials +from google.cloud._helpers import UTC + + +class TestBackup(unittest.TestCase): + PROJECT_ID = "project-id" + INSTANCE_ID = "instance-id" + INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID + CLUSTER_ID = "cluster-id" + CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID + TABLE_ID = "table-id" + TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID + BACKUP_ID = "backup-id" + BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.backup import Backup + + return Backup + + @staticmethod + def _make_table_admin_client(): + from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + + return mock.create_autospec(BigtableTableAdminClient, instance=True) + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def _make_timestamp(self): + return datetime.datetime.utcnow().replace(tzinfo=UTC) + + def test_constructor_defaults(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertIs(backup._instance, instance) + self.assertIsNone(backup._cluster) + self.assertIsNone(backup.table_id) + self.assertIsNone(backup._expire_time) + + self.assertIsNone(backup._parent) + self.assertIsNone(backup._source_table) + self.assertIsNone(backup._start_time) + self.assertIsNone(backup._end_time) + self.assertIsNone(backup._size_bytes) + self.assertIsNone(backup._state) + + def test_constructor_non_defaults(self): + instance = _Instance(self.INSTANCE_NAME) + expire_time = self._make_timestamp() + + backup = self._make_one( + self.BACKUP_ID, + instance, + cluster_id=self.CLUSTER_ID, + table_id=self.TABLE_ID, + expire_time=expire_time, + ) + + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertIs(backup._instance, instance) + self.assertIs(backup._cluster, self.CLUSTER_ID) + self.assertEqual(backup.table_id, self.TABLE_ID) + self.assertEqual(backup._expire_time, expire_time) + + self.assertIsNone(backup._parent) + self.assertIsNone(backup._source_table) + self.assertIsNone(backup._start_time) + self.assertIsNone(backup._end_time) + self.assertIsNone(backup._size_bytes) + self.assertIsNone(backup._state) + + def test_from_pb_project_mismatch(self): + from google.cloud.bigtable_admin_v2.proto import table_pb2 + + alt_project_id = "alt-project-id" + client = _Client(project=alt_project_id) + instance = _Instance(self.INSTANCE_NAME, client) + backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) + klasse = self._get_target_class() + + with self.assertRaises(ValueError): + klasse.from_pb(backup_pb, instance) + + def test_from_pb_instance_mismatch(self): + from google.cloud.bigtable_admin_v2.proto import table_pb2 + + alt_instance = "/projects/%s/instances/alt-instance" % self.PROJECT_ID + client = _Client() + instance = _Instance(alt_instance, client) + backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) + klasse = self._get_target_class() + + with self.assertRaises(ValueError): + klasse.from_pb(backup_pb, instance) + + def test_from_pb_bad_name(self): + from google.cloud.bigtable_admin_v2.proto import table_pb2 + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client) + backup_pb = table_pb2.Backup(name="invalid_name") + klasse = self._get_target_class() + + with self.assertRaises(ValueError): + klasse.from_pb(backup_pb, instance) + + def test_from_pb_success(self): + from google.cloud.bigtable_admin_v2.gapic import enums + from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud._helpers import _datetime_to_pb_timestamp + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client) + timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) + size_bytes = 1234 + state = enums.Backup.State.READY + backup_pb = table_pb2.Backup( + name=self.BACKUP_NAME, + source_table=self.TABLE_NAME, + expire_time=timestamp, + start_time=timestamp, + end_time=timestamp, + size_bytes=size_bytes, + state=state, + ) + klasse = self._get_target_class() + + backup = klasse.from_pb(backup_pb, instance) + + self.assertTrue(isinstance(backup, klasse)) + self.assertEqual(backup._instance, instance) + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertEqual(backup.cluster, self.CLUSTER_ID) + self.assertEqual(backup.table_id, self.TABLE_ID) + self.assertEqual(backup._expire_time, timestamp) + self.assertEqual(backup._start_time, timestamp) + self.assertEqual(backup._end_time, timestamp) + self.assertEqual(backup._size_bytes, size_bytes) + self.assertEqual(backup._state, state) + + def test_property_name(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + + api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(self.INSTANCE_NAME, client) + + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + self.assertEqual(backup.name, self.BACKUP_NAME) + + def test_property_cluster(self): + backup = self._make_one( + self.BACKUP_ID, _Instance(self.INSTANCE_NAME), cluster_id=self.CLUSTER_ID + ) + self.assertEqual(backup.cluster, self.CLUSTER_ID) + + def test_property_cluster_setter(self): + backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME)) + backup.cluster = self.CLUSTER_ID + self.assertEqual(backup.cluster, self.CLUSTER_ID) + + def test_property_parent_none(self): + backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME),) + self.assertIsNone(backup.parent) + + def test_property_parent_w_cluster(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + + api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(self.INSTANCE_NAME, client) + + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + self.assertEqual(backup._cluster, self.CLUSTER_ID) + self.assertEqual(backup.parent, self.CLUSTER_NAME) + + def test_property_source_table_none(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + + api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(self.INSTANCE_NAME, client) + + backup = self._make_one(self.BACKUP_ID, instance) + self.assertIsNone(backup.source_table) + + def test_property_source_table_valid(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + + api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(self.INSTANCE_NAME, client) + + backup = self._make_one(self.BACKUP_ID, instance, table_id=self.TABLE_ID) + self.assertEqual(backup.source_table, self.TABLE_NAME) + + def test_property_expire_time(self): + instance = _Instance(self.INSTANCE_NAME) + expire_time = self._make_timestamp() + backup = self._make_one(self.BACKUP_ID, instance, expire_time=expire_time) + self.assertEqual(backup.expire_time, expire_time) + + def test_property_expire_time_setter(self): + instance = _Instance(self.INSTANCE_NAME) + expire_time = self._make_timestamp() + backup = self._make_one(self.BACKUP_ID, instance) + backup.expire_time = expire_time + self.assertEqual(backup.expire_time, expire_time) + + def test_property_start_time(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._start_time = self._make_timestamp() + self.assertEqual(backup.start_time, expected) + + def test_property_end_time(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._end_time = self._make_timestamp() + self.assertEqual(backup.end_time, expected) + + def test_property_size(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._size_bytes = 10 + self.assertEqual(backup.size_bytes, expected) + + def test_property_state(self): + from google.cloud.bigtable_admin_v2.gapic import enums + + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._state = enums.Backup.State.READY + self.assertEqual(backup.state, expected) + + def test___eq__(self): + instance = object() + backup1 = self._make_one(self.BACKUP_ID, instance) + backup2 = self._make_one(self.BACKUP_ID, instance) + self.assertTrue(backup1 == backup2) + + def test___eq__different_types(self): + instance = object() + backup1 = self._make_one(self.BACKUP_ID, instance) + backup2 = object() + self.assertFalse(backup1 == backup2) + + def test___ne__same_value(self): + instance = object() + backup1 = self._make_one(self.BACKUP_ID, instance) + backup2 = self._make_one(self.BACKUP_ID, instance) + self.assertFalse(backup1 != backup2) + + def test___ne__(self): + backup1 = self._make_one("backup_1", "instance1") + backup2 = self._make_one("backup_2", "instance2") + self.assertTrue(backup1 != backup2) + + def test_create_grpc_error(self): + from google.api_core.exceptions import GoogleAPICallError + from google.api_core.exceptions import Unknown + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table_pb2 + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.create_backup.side_effect = Unknown("testing") + + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME, client=client), + table_id=self.TABLE_ID, + expire_time=timestamp, + ) + + backup_pb = table_pb2.Backup( + source_table=self.TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), + ) + + with self.assertRaises(GoogleAPICallError): + backup.create(self.CLUSTER_ID) + + api.create_backup.assert_called_once_with( + parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + ) + + def test_create_already_exists(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.exceptions import Conflict + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.create_backup.side_effect = Conflict("testing") + + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME, client=client), + table_id=self.TABLE_ID, + expire_time=timestamp, + ) + + backup_pb = table_pb2.Backup( + source_table=self.TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), + ) + + with self.assertRaises(Conflict): + backup.create(self.CLUSTER_ID) + + api.create_backup.assert_called_once_with( + parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + ) + + def test_create_instance_not_found(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.exceptions import NotFound + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.create_backup.side_effect = NotFound("testing") + + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME, client=client), + table_id=self.TABLE_ID, + expire_time=timestamp, + ) + + backup_pb = table_pb2.Backup( + source_table=self.TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), + ) + + with self.assertRaises(NotFound): + backup.create(self.CLUSTER_ID) + + api.create_backup.assert_called_once_with( + parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + ) + + def test_create_cluster_not_set(self): + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME), + table_id=self.TABLE_ID, + expire_time=self._make_timestamp(), + ) + + with self.assertRaises(ValueError): + backup.create() + + def test_create_table_not_set(self): + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME), + expire_time=self._make_timestamp(), + ) + + with self.assertRaises(ValueError): + backup.create(self.CLUSTER_ID) + + def test_create_expire_time_not_set(self): + backup = self._make_one( + self.BACKUP_ID, _Instance(self.INSTANCE_NAME), table_id=self.TABLE_ID, + ) + + with self.assertRaises(ValueError): + backup.create(self.CLUSTER_ID) + + def test_create_success(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table_pb2 + + op_future = object() + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.create_backup.return_value = op_future + + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME, client=client), + table_id=self.TABLE_ID, + expire_time=timestamp, + ) + + backup_pb = table_pb2.Backup( + source_table=self.TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), + ) + + future = backup.create(self.CLUSTER_ID) + self.assertEqual(backup._cluster, self.CLUSTER_ID) + self.assertIs(future, op_future) + + api.create_backup.assert_called_once_with( + parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + ) + + def test_exists_grpc_error(self): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.get_backup.side_effect = Unknown("testing") + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + with self.assertRaises(Unknown): + backup.exists() + + api.get_backup.assert_called_once_with(self.BACKUP_NAME) + + def test_exists_not_found(self): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.get_backup.side_effect = NotFound("testing") + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + self.assertFalse(backup.exists()) + + api.get_backup.assert_called_once_with(self.BACKUP_NAME) + + def test_get(self): + from google.cloud.bigtable_admin_v2.gapic import enums + from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud._helpers import _datetime_to_pb_timestamp + + timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) + state = enums.Backup.State.READY + + client = _Client() + backup_pb = table_pb2.Backup( + name=self.BACKUP_NAME, + source_table=self.TABLE_NAME, + expire_time=timestamp, + start_time=timestamp, + end_time=timestamp, + size_bytes=0, + state=state, + ) + api = client.table_admin_client = self._make_table_admin_client() + api.get_backup.return_value = backup_pb + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + self.assertEqual(backup.get(), backup_pb) + + def test_reload(self): + from google.cloud.bigtable_admin_v2.gapic import enums + from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud._helpers import _datetime_to_pb_timestamp + + timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) + state = enums.Backup.State.READY + + client = _Client() + backup_pb = table_pb2.Backup( + name=self.BACKUP_NAME, + source_table=self.TABLE_NAME, + expire_time=timestamp, + start_time=timestamp, + end_time=timestamp, + size_bytes=0, + state=state, + ) + api = client.table_admin_client = self._make_table_admin_client() + api.get_backup.return_value = backup_pb + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + backup.reload() + self.assertEqual(backup._source_table, self.TABLE_NAME) + self.assertEqual(backup._expire_time, timestamp) + self.assertEqual(backup._start_time, timestamp) + self.assertEqual(backup._end_time, timestamp) + self.assertEqual(backup._size_bytes, 0) + self.assertEqual(backup._state, state) + + def test_exists_success(self): + from google.cloud.bigtable_admin_v2.proto import table_pb2 + + client = _Client() + backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) + api = client.table_admin_client = self._make_table_admin_client() + api.get_backup.return_value = backup_pb + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + self.assertTrue(backup.exists()) + + api.get_backup.assert_called_once_with(self.BACKUP_NAME) + + def test_delete_grpc_error(self): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.delete_backup.side_effect = Unknown("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + with self.assertRaises(Unknown): + backup.delete() + + api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + + def test_delete_not_found(self): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.delete_backup.side_effect = NotFound("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + with self.assertRaises(NotFound): + backup.delete() + + api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + + def test_delete_success(self): + from google.protobuf.empty_pb2 import Empty + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.delete_backup.return_value = Empty() + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + backup.delete() + + api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + + def test_update_expire_time_grpc_error(self): + from google.api_core.exceptions import Unknown + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.protobuf import field_mask_pb2 + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.update_backup.side_effect = Unknown("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + expire_time = self._make_timestamp() + + with self.assertRaises(Unknown): + backup.update_expire_time(expire_time) + + backup_update = table_pb2.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + ) + update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) + api.update_backup.assert_called_once_with( + backup_update, update_mask, + ) + + def test_update_expire_time_not_found(self): + from google.api_core.exceptions import NotFound + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.protobuf import field_mask_pb2 + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.update_backup.side_effect = NotFound("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + expire_time = self._make_timestamp() + + with self.assertRaises(NotFound): + backup.update_expire_time(expire_time) + + backup_update = table_pb2.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + ) + update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) + api.update_backup.assert_called_once_with( + backup_update, update_mask, + ) + + def test_update_expire_time_success(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.protobuf import field_mask_pb2 + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.update_backup.return_type = table_pb2.Backup(name=self.BACKUP_NAME) + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + expire_time = self._make_timestamp() + + backup.update_expire_time(expire_time) + + backup_update = table_pb2.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + ) + update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) + api.update_backup.assert_called_once_with( + backup_update, update_mask, + ) + + def test_restore_grpc_error(self): + from google.api_core.exceptions import GoogleAPICallError + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.restore_table.side_effect = Unknown("testing") + + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME, client=client), + cluster_id=self.CLUSTER_ID, + table_id=self.TABLE_NAME, + expire_time=timestamp, + ) + + with self.assertRaises(GoogleAPICallError): + backup.restore(self.TABLE_ID) + + api.restore_table.assert_called_once_with( + parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME, + ) + + def test_restore_cluster_not_set(self): + client = _Client() + client.table_admin_client = self._make_table_admin_client() + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME, client=client), + table_id=self.TABLE_ID, + expire_time=self._make_timestamp(), + ) + + with self.assertRaises(ValueError): + backup.restore(self.TABLE_ID) + + def test_restore_success(self): + op_future = object() + client = _Client() + api = client.table_admin_client = self._make_table_admin_client() + api.restore_table.return_value = op_future + + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME, client=client), + cluster_id=self.CLUSTER_ID, + table_id=self.TABLE_NAME, + expire_time=timestamp, + ) + + future = backup.restore(self.TABLE_ID) + self.assertEqual(backup._cluster, self.CLUSTER_ID) + self.assertIs(future, op_future) + + api.restore_table.assert_called_once_with( + parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME, + ) + + +class _Client(object): + def __init__(self, project=TestBackup.PROJECT_ID): + self.project = project + self.project_name = "projects/" + self.project + + +class _Instance(object): + def __init__(self, name, client=None): + self.name = name + self.instance_id = name.rsplit("/", 1)[1] + self._client = client diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index b129d4edc825..14dd0bf5872d 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -39,6 +39,10 @@ class TestInstance(unittest.TestCase): ) TABLE_ID = "table_id" TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID + CLUSTER_ID = "cluster-id" + CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID + BACKUP_ID = "backup-id" + BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID @staticmethod def _get_target_class(): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index d4bb621c28c0..f7377bc760f7 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -128,8 +128,12 @@ class TestTable(unittest.TestCase): PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID + CLUSTER_ID = "cluster-id" + CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID TABLE_ID = "table-id" TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID + BACKUP_ID = "backup-id" + BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID ROW_KEY = b"row-key" ROW_KEY_1 = b"row-key-1" ROW_KEY_2 = b"row-key-2" @@ -1153,6 +1157,153 @@ def test_test_iam_permissions(self): resource=table.name, permissions=permissions ) + def test_backup_factory_defaults(self): + from google.cloud.bigtable.backup import Backup + + instance = self._make_one(self.INSTANCE_ID, None) + table = self._make_one(self.TABLE_ID, instance) + backup = table.backup(self.BACKUP_ID) + + self.assertIsInstance(backup, Backup) + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertIs(backup._instance, instance) + self.assertIsNone(backup._cluster) + self.assertEqual(backup.table_id, self.TABLE_ID) + self.assertIsNone(backup._expire_time) + + self.assertIsNone(backup._parent) + self.assertIsNone(backup._source_table) + self.assertIsNone(backup._start_time) + self.assertIsNone(backup._end_time) + self.assertIsNone(backup._size_bytes) + self.assertIsNone(backup._state) + + def test_backup_factory_non_defaults(self): + import datetime + from google.cloud._helpers import UTC + from google.cloud.bigtable.backup import Backup + + instance = self._make_one(self.INSTANCE_ID, None) + table = self._make_one(self.TABLE_ID, instance) + timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) + backup = table.backup( + self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp, + ) + + self.assertIsInstance(backup, Backup) + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertIs(backup._instance, instance) + + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertIs(backup._cluster, self.CLUSTER_ID) + self.assertEqual(backup.table_id, self.TABLE_ID) + self.assertEqual(backup._expire_time, timestamp) + self.assertIsNone(backup._start_time) + self.assertIsNone(backup._end_time) + self.assertIsNone(backup._size_bytes) + self.assertIsNone(backup._state) + + def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): + from google.cloud.bigtable_admin_v2.gapic import ( + bigtable_instance_admin_client, + bigtable_table_admin_client, + ) + from google.cloud.bigtable_admin_v2.proto import ( + bigtable_table_admin_pb2, + table_pb2, + ) + from google.cloud.bigtable.backup import Backup + + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient + table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + client = self._make_client( + project=self.PROJECT_ID, credentials=_make_credentials(), admin=True + ) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + client._instance_admin_client = instance_api + client._table_admin_client = table_api + + parent = self.INSTANCE_NAME + "/clusters/cluster" + backups_pb = bigtable_table_admin_pb2.ListBackupsResponse( + backups=[ + table_pb2.Backup(name=parent + "/backups/op1"), + table_pb2.Backup(name=parent + "/backups/op2"), + table_pb2.Backup(name=parent + "/backups/op3"), + ] + ) + + api = table_api._inner_api_calls["list_backups"] = mock.Mock( + return_value=backups_pb + ) + + backups_filter = "source_table:{}".format(self.TABLE_NAME) + if filter_: + backups_filter = "({}) AND ({})".format(backups_filter, filter_) + + backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs) + + for backup in backups: + self.assertIsInstance(backup, Backup) + + if not cluster_id: + cluster_id = "-" + parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id) + + expected_metadata = [ + ("x-goog-request-params", "parent={}".format(parent)), + ] + api.assert_called_once_with( + bigtable_table_admin_pb2.ListBackupsRequest( + parent=parent, filter=backups_filter, **kwargs + ), + retry=mock.ANY, + timeout=mock.ANY, + metadata=expected_metadata, + ) + + def test_list_backups_defaults(self): + self._list_backups_helper() + + def test_list_backups_w_options(self): + self._list_backups_helper( + cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10 + ) + + def _restore_helper(self, backup_name=None): + from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable.instance import Instance + + op_future = object() + instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient + + client = mock.Mock(project=self.PROJECT_ID, instance_admin_client=instance_api) + instance = Instance(self.INSTANCE_ID, client=client) + table = self._make_one(self.TABLE_ID, instance) + + api = client.table_admin_client = mock.create_autospec( + BigtableTableAdminClient, instance=True + ) + api.restore_table.return_value = op_future + + if backup_name: + future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME) + else: + future = table.restore(self.TABLE_ID, self.CLUSTER_ID, self.BACKUP_ID) + self.assertIs(future, op_future) + + api.restore_table.assert_called_once_with( + parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME, + ) + + def test_restore_table_w_backup_id(self): + self._restore_helper() + + def test_restore_table_w_backup_name(self): + self._restore_helper(backup_name=self.BACKUP_NAME) + class Test__RetryableMutateRowsWorker(unittest.TestCase): from grpc import StatusCode From 2315f3b11e547ed24cdec5b5bfae757189871189 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 21 Jul 2020 14:34:01 -0400 Subject: [PATCH 320/892] chore: release 1.4.0 (#65) * updated CHANGELOG.md [ci skip] * updated setup.cfg [ci skip] * updated setup.py Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 1eeb76023449..56a43a7429e4 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [1.4.0](https://www.github.com/googleapis/python-bigtable/compare/v1.3.0...v1.4.0) (2020-07-21) + + +### Features + +* **bigtable:** Managed Backups wrappers ([#57](https://www.github.com/googleapis/python-bigtable/issues/57)) ([a351734](https://www.github.com/googleapis/python-bigtable/commit/a351734ae16b4a689b89e6a42f63ea3ea5ad84ca)) + ## [1.3.0](https://www.github.com/googleapis/python-bigtable/compare/v1.2.1...v1.3.0) (2020-07-16) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 73efe3540d4f..a8f544560605 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = "1.3.0" +version = "1.4.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From c0927817f3908eb3f259c44ddaa4ca7e7459c191 Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Mon, 3 Aug 2020 23:37:51 +0530 Subject: [PATCH 321/892] docs: document 'row_set' module explicitly (#29) --- packages/google-cloud-bigtable/docs/row-set.rst | 6 ++++++ packages/google-cloud-bigtable/docs/usage.rst | 1 + .../google-cloud-bigtable/google/cloud/bigtable/table.py | 6 +++--- 3 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/row-set.rst diff --git a/packages/google-cloud-bigtable/docs/row-set.rst b/packages/google-cloud-bigtable/docs/row-set.rst new file mode 100644 index 000000000000..5f7a16a029ed --- /dev/null +++ b/packages/google-cloud-bigtable/docs/row-set.rst @@ -0,0 +1,6 @@ +Row Set +~~~~~~~~ + +.. automodule:: google.cloud.bigtable.row_set + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index aa8d899d58cb..4e27768053e4 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -13,6 +13,7 @@ Using the API row row-data row-filters + row-set In the hierarchy of API concepts diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 983bfcc14d70..9d2dc1acc3a0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -539,7 +539,7 @@ def read_rows( :param end_inclusive: (Optional) Whether the ``end_key`` should be considered inclusive. The default is False (exclusive). - :type row_set: :class:`row_set.RowSet` + :type row_set: :class:`.RowSet` :param row_set: (Optional) The row set containing multiple row keys and row_ranges. @@ -595,7 +595,7 @@ def yield_rows(self, **kwargs): specified row(s). If unset, reads every column in each row. - :type row_set: :class:`row_set.RowSet` + :type row_set: :class:`.RowSet` :param row_set: (Optional) The row set containing multiple row keys and row_ranges. @@ -1204,7 +1204,7 @@ def _create_row_request( :type: app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. - :type row_set: :class:`row_set.RowSet` + :type row_set: :class:`.RowSet` :param row_set: (Optional) The row set containing multiple row keys and row_ranges. From 86172f459612c9fd3876a5581b9cdaed3709a50e Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Tue, 4 Aug 2020 00:23:09 +0530 Subject: [PATCH 322/892] docs: update links to reflect new Github org (#48) Co-authored-by: Tres Seaver --- .../google-cloud-bigtable/docs/client-intro.rst | 4 ++-- packages/google-cloud-bigtable/docs/data-api.rst | 10 +++++----- .../google-cloud-bigtable/docs/instance-api.rst | 14 +++++++------- .../google-cloud-bigtable/docs/row-filters.rst | 2 +- packages/google-cloud-bigtable/docs/table-api.rst | 15 +++++++-------- .../tests/unit/test_row_data.py | 6 ++---- 6 files changed, 24 insertions(+), 27 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/client-intro.rst b/packages/google-cloud-bigtable/docs/client-intro.rst index 6a38437790e2..36b2677d0325 100644 --- a/packages/google-cloud-bigtable/docs/client-intro.rst +++ b/packages/google-cloud-bigtable/docs/client-intro.rst @@ -86,5 +86,5 @@ one before you can interact with tables or data. Head next to learn about the :doc:`instance-api`. -.. _Instance Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1 -.. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 +.. _Instance Admin: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +.. _Table Admin: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto diff --git a/packages/google-cloud-bigtable/docs/data-api.rst b/packages/google-cloud-bigtable/docs/data-api.rst index b50995be7368..d9269cddb934 100644 --- a/packages/google-cloud-bigtable/docs/data-api.rst +++ b/packages/google-cloud-bigtable/docs/data-api.rst @@ -337,8 +337,8 @@ Just as with reading, the stream can be canceled: keys_iterator.cancel() -.. _ReadRows: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L36-L38 -.. _SampleRowKeys: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L44-L46 -.. _MutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L50-L52 -.. _CheckAndMutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L62-L64 -.. _ReadModifyWriteRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L70-L72 +.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L54-L61 +.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L67-L73 +.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L77-L84 +.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L99-L106 +.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L113-L121 diff --git a/packages/google-cloud-bigtable/docs/instance-api.rst b/packages/google-cloud-bigtable/docs/instance-api.rst index ab884a605b73..65994dd15520 100644 --- a/packages/google-cloud-bigtable/docs/instance-api.rst +++ b/packages/google-cloud-bigtable/docs/instance-api.rst @@ -121,10 +121,10 @@ Now we go down the hierarchy from Head next to learn about the :doc:`table-api`. .. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance -.. _CreateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L66-L68 -.. _GetInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L38-L40 -.. _UpdateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L93-L95 -.. _DeleteInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L109-L111 -.. _ListInstances: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L44-L46 -.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 -.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 +.. _CreateInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L41-L47 +.. _GetInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L50-L54 +.. _UpdateInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L64-L69 +.. _DeleteInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L81-L85 +.. _ListInstances: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L57-L61 +.. _GetOperation: https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto#L77-L82 +.. _long-running operation: https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto#L128-L162 diff --git a/packages/google-cloud-bigtable/docs/row-filters.rst b/packages/google-cloud-bigtable/docs/row-filters.rst index 292ae9dfb6aa..ba5b725905e8 100644 --- a/packages/google-cloud-bigtable/docs/row-filters.rst +++ b/packages/google-cloud-bigtable/docs/row-filters.rst @@ -64,4 +64,4 @@ level. For example: :members: :show-inheritance: -.. _RowFilter definition: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/1ff247c2e3b7cd0a2dd49071b2d95beaf6563092/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_data.proto#L195 +.. _RowFilter definition: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable_data.proto#L196 diff --git a/packages/google-cloud-bigtable/docs/table-api.rst b/packages/google-cloud-bigtable/docs/table-api.rst index 5168aad49ff7..00beb4ffd809 100644 --- a/packages/google-cloud-bigtable/docs/table-api.rst +++ b/packages/google-cloud-bigtable/docs/table-api.rst @@ -143,12 +143,11 @@ data directly via a :class:`Table `. Head next to learn about the :doc:`data-api`. -.. _ListTables: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L40-L42 -.. _CreateTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L35-L37 -.. _DeleteTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L50-L52 -.. _RenameTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L56-L58 -.. _GetTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L45-L47 -.. _CreateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L61-L63 -.. _UpdateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L66-L68 -.. _DeleteColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L71-L73 +.. _ListTables: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L69-L73 +.. _CreateTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L45-L50 +.. _DeleteTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L83-L87 +.. _GetTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L76-L80 +.. _CreateColumnFamily: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L93-L98 +.. _UpdateColumnFamily: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L93-L98 +.. _DeleteColumnFamily: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L93-L98 .. _column families: https://cloud.google.com/bigtable/docs/schema-design#column_families_and_column_qualifiers diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index b787233829b2..40b2ffe30483 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -1261,10 +1261,8 @@ def _parse_readrows_acceptance_tests(filename): """Parse acceptance tests from JSON See - https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/\ - 4d3185662ca61bc9fa1bdf1ec0166f6e5ecf86c6/bigtable-client-core/src/\ - test/resources/com/google/cloud/bigtable/grpc/scanner/v2/ - read-rows-acceptance-test.json + https://github.com/googleapis/python-bigtable/blob/master/\ + tests/unit/read-rows-acceptance-test.json """ import json From 92e73c4e451a8fd07e2bf2c34434a620d9c7f65f Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Tue, 4 Aug 2020 01:44:50 +0530 Subject: [PATCH 323/892] docs: remove indent from snippet code blocks (#49) Co-authored-by: Tres Seaver --- .../google/cloud/bigtable/app_profile.py | 6 +++++ .../google/cloud/bigtable/batcher.py | 3 +++ .../google/cloud/bigtable/client.py | 7 ++++++ .../google/cloud/bigtable/cluster.py | 8 +++++++ .../google/cloud/bigtable/column_family.py | 8 +++++++ .../google/cloud/bigtable/instance.py | 17 +++++++++++++ .../google/cloud/bigtable/policy.py | 4 ++++ .../google/cloud/bigtable/row.py | 24 +++++++++++++++---- .../google/cloud/bigtable/row_data.py | 4 ++++ .../google/cloud/bigtable/row_set.py | 3 +++ .../google/cloud/bigtable/table.py | 22 ++++++++++++++++- 11 files changed, 100 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py index cb04ebfc78c7..8b36eaede4d2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py @@ -95,6 +95,7 @@ def name(self): .. literalinclude:: snippets.py :start-after: [START bigtable_app_profile_name] :end-before: [END bigtable_app_profile_name] + :dedent: 4 The AppProfile name is of the form ``"projects/../instances/../app_profile/{app_profile_id}"`` @@ -238,6 +239,7 @@ def reload(self): .. literalinclude:: snippets.py :start-after: [START bigtable_reload_app_profile] :end-before: [END bigtable_reload_app_profile] + :dedent: 4 """ app_profile_pb = self.instance_admin_client.get_app_profile(self.name) @@ -254,6 +256,7 @@ def exists(self): .. literalinclude:: snippets.py :start-after: [START bigtable_app_profile_exists] :end-before: [END bigtable_app_profile_exists] + :dedent: 4 :rtype: bool :returns: True if the AppProfile exists, else False. @@ -280,6 +283,7 @@ def create(self, ignore_warnings=None): .. literalinclude:: snippets.py :start-after: [START bigtable_create_app_profile] :end-before: [END bigtable_create_app_profile] + :dedent: 4 :type: ignore_warnings: bool :param: ignore_warnings: (Optional) If true, ignore safety checks when @@ -311,6 +315,7 @@ def update(self, ignore_warnings=None): .. literalinclude:: snippets.py :start-after: [START bigtable_update_app_profile] :end-before: [END bigtable_update_app_profile] + :dedent: 4 """ update_mask_pb = field_mask_pb2.FieldMask() @@ -336,6 +341,7 @@ def delete(self, ignore_warnings=None): .. literalinclude:: snippets.py :start-after: [START bigtable_delete_app_profile] :end-before: [END bigtable_delete_app_profile] + :dedent: 4 :type: ignore_warnings: bool :param: ignore_warnings: If true, ignore safety checks when deleting diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 3a649049b66d..0994e289d112 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -73,6 +73,7 @@ def mutate(self, row): .. literalinclude:: snippets.py :start-after: [START bigtable_batcher_mutate] :end-before: [END bigtable_batcher_mutate] + :dedent: 4 :type row: class :param row: class:`~google.cloud.bigtable.row.DirectRow`. @@ -112,6 +113,7 @@ def mutate_rows(self, rows): .. literalinclude:: snippets.py :start-after: [START bigtable_batcher_mutate_rows] :end-before: [END bigtable_batcher_mutate_rows] + :dedent: 4 :type rows: list:[`~google.cloud.bigtable.row.DirectRow`] :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. @@ -134,6 +136,7 @@ def flush(self): .. literalinclude:: snippets.py :start-after: [START bigtable_batcher_flush] :end-before: [END bigtable_batcher_flush] + :dedent: 4 """ if len(self.rows) != 0: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 8a8315623cae..935a0a3b6dd8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -206,6 +206,7 @@ def project_path(self): .. literalinclude:: snippets.py :start-after: [START bigtable_project_path] :end-before: [END bigtable_project_path] + :dedent: 4 The project name is of the form @@ -225,6 +226,7 @@ def table_data_client(self): .. literalinclude:: snippets.py :start-after: [START bigtable_table_data_client] :end-before: [END bigtable_table_data_client] + :dedent: 4 :rtype: :class:`.bigtable_v2.BigtableClient` :returns: A BigtableClient object. @@ -245,6 +247,7 @@ def table_admin_client(self): .. literalinclude:: snippets.py :start-after: [START bigtable_table_admin_client] :end-before: [END bigtable_table_admin_client] + :dedent: 4 :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` :returns: A BigtableTableAdmin instance. @@ -271,6 +274,7 @@ def instance_admin_client(self): .. literalinclude:: snippets.py :start-after: [START bigtable_instance_admin_client] :end-before: [END bigtable_instance_admin_client] + :dedent: 4 :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` :returns: A BigtableInstanceAdmin instance. @@ -296,6 +300,7 @@ def instance(self, instance_id, display_name=None, instance_type=None, labels=No .. literalinclude:: snippets.py :start-after: [START bigtable_create_prod_instance] :end-before: [END bigtable_create_prod_instance] + :dedent: 4 :type instance_id: str :param instance_id: The ID of the instance. @@ -345,6 +350,7 @@ def list_instances(self): .. literalinclude:: snippets.py :start-after: [START bigtable_list_instances] :end-before: [END bigtable_list_instances] + :dedent: 4 :rtype: tuple :returns: @@ -365,6 +371,7 @@ def list_clusters(self): .. literalinclude:: snippets.py :start-after: [START bigtable_list_clusters_in_project] :end-before: [END bigtable_list_clusters_in_project] + :dedent: 4 :rtype: tuple :returns: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index b573705c0696..b2957bc2a999 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -99,6 +99,7 @@ def from_pb(cls, cluster_pb, instance): .. literalinclude:: snippets.py :start-after: [START bigtable_cluster_from_pb] :end-before: [END bigtable_cluster_from_pb] + :dedent: 4 :type cluster_pb: :class:`instance_pb2.Cluster` :param cluster_pb: An instance protobuf object. @@ -159,6 +160,7 @@ def name(self): .. literalinclude:: snippets.py :start-after: [START bigtable_cluster_name] :end-before: [END bigtable_cluster_name] + :dedent: 4 The cluster name is of the form @@ -180,6 +182,7 @@ def state(self): .. literalinclude:: snippets.py :start-after: [START bigtable_cluster_state] :end-before: [END bigtable_cluster_state] + :dedent: 4 """ return self._state @@ -206,6 +209,7 @@ def reload(self): .. literalinclude:: snippets.py :start-after: [START bigtable_reload_cluster] :end-before: [END bigtable_reload_cluster] + :dedent: 4 """ cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name) @@ -221,6 +225,7 @@ def exists(self): .. literalinclude:: snippets.py :start-after: [START bigtable_check_cluster_exists] :end-before: [END bigtable_check_cluster_exists] + :dedent: 4 :rtype: bool :returns: True if the table exists, else False. @@ -241,6 +246,7 @@ def create(self): .. literalinclude:: snippets.py :start-after: [START bigtable_create_cluster] :end-before: [END bigtable_create_cluster] + :dedent: 4 .. note:: @@ -274,6 +280,7 @@ def update(self): .. literalinclude:: snippets.py :start-after: [START bigtable_update_cluster] :end-before: [END bigtable_update_cluster] + :dedent: 4 .. note:: @@ -306,6 +313,7 @@ def delete(self): .. literalinclude:: snippets.py :start-after: [START bigtable_delete_cluster] :end-before: [END bigtable_delete_cluster] + :dedent: 4 Marks a cluster and all of its tables for permanent deletion in 7 days. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index 8b536992faa7..0e884fa8919e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -48,6 +48,7 @@ class MaxVersionsGCRule(GarbageCollectionRule): .. literalinclude:: snippets_table.py :start-after: [START bigtable_create_family_gc_max_versions] :end-before: [END bigtable_create_family_gc_max_versions] + :dedent: 4 :type max_num_versions: int :param max_num_versions: The maximum number of versions @@ -81,6 +82,7 @@ class MaxAgeGCRule(GarbageCollectionRule): .. literalinclude:: snippets_table.py :start-after: [START bigtable_create_family_gc_max_age] :end-before: [END bigtable_create_family_gc_max_age] + :dedent: 4 :type max_age: :class:`datetime.timedelta` :param max_age: The maximum age allowed for a cell in the table. @@ -115,6 +117,7 @@ class GCRuleUnion(GarbageCollectionRule): .. literalinclude:: snippets_table.py :start-after: [START bigtable_create_family_gc_union] :end-before: [END bigtable_create_family_gc_union] + :dedent: 4 :type rules: list :param rules: List of :class:`GarbageCollectionRule`. @@ -149,6 +152,7 @@ class GCRuleIntersection(GarbageCollectionRule): .. literalinclude:: snippets_table.py :start-after: [START bigtable_create_family_gc_intersection] :end-before: [END bigtable_create_family_gc_intersection] + :dedent: 4 :type rules: list :param rules: List of :class:`GarbageCollectionRule`. @@ -212,6 +216,7 @@ def name(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_column_family_name] :end-before: [END bigtable_column_family_name] + :dedent: 4 .. note:: @@ -258,6 +263,7 @@ def create(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_create_column_family] :end-before: [END bigtable_create_column_family] + :dedent: 4 """ column_family = self.to_pb() @@ -280,6 +286,7 @@ def update(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_update_column_family] :end-before: [END bigtable_update_column_family] + :dedent: 4 .. note:: @@ -306,6 +313,7 @@ def delete(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_delete_column_family] :end-before: [END bigtable_delete_column_family] + :dedent: 4 """ modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 0c8b81fa3f2f..c5d3ce8b688d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -134,6 +134,7 @@ def from_pb(cls, instance_pb, client): .. literalinclude:: snippets.py :start-after: [START bigtable_instance_from_pb] :end-before: [END bigtable_instance_from_pb] + :dedent: 4 :type instance_pb: :class:`instance_pb2.Instance` :param instance_pb: An instance protobuf object. @@ -178,6 +179,7 @@ def name(self): .. literalinclude:: snippets.py :start-after: [START bigtable_instance_name] :end-before: [END bigtable_instance_name] + :dedent: 4 The instance name is of the form @@ -199,6 +201,7 @@ def state(self): .. literalinclude:: snippets.py :start-after: [START bigtable_instance_state] :end-before: [END bigtable_instance_state] + :dedent: 4 """ return self._state @@ -231,6 +234,7 @@ def create( .. literalinclude:: snippets.py :start-after: [START bigtable_create_prod_instance] :end-before: [END bigtable_create_prod_instance] + :dedent: 4 .. note:: @@ -331,6 +335,7 @@ def exists(self): .. literalinclude:: snippets.py :start-after: [START bigtable_check_instance_exists] :end-before: [END bigtable_check_instance_exists] + :dedent: 4 :rtype: bool :returns: True if the table exists, else False. @@ -350,6 +355,7 @@ def reload(self): .. literalinclude:: snippets.py :start-after: [START bigtable_reload_instance] :end-before: [END bigtable_reload_instance] + :dedent: 4 """ instance_pb = self._client.instance_admin_client.get_instance(self.name) @@ -365,6 +371,7 @@ def update(self): .. literalinclude:: snippets.py :start-after: [START bigtable_update_instance] :end-before: [END bigtable_update_instance] + :dedent: 4 .. note:: @@ -411,6 +418,7 @@ def delete(self): .. literalinclude:: snippets.py :start-after: [START bigtable_delete_instance] :end-before: [END bigtable_delete_instance] + :dedent: 4 Marks an instance and all of its tables for permanent deletion in 7 days. @@ -441,6 +449,7 @@ def get_iam_policy(self, requested_policy_version=None): .. literalinclude:: snippets.py :start-after: [START bigtable_get_iam_policy] :end-before: [END bigtable_get_iam_policy] + :dedent: 4 :type requested_policy_version: int or ``NoneType`` :param requested_policy_version: Optional. The version of IAM policies to request. @@ -480,6 +489,7 @@ class `google.cloud.bigtable.policy.Policy` .. literalinclude:: snippets.py :start-after: [START bigtable_set_iam_policy] :end-before: [END bigtable_set_iam_policy] + :dedent: 4 :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy @@ -503,6 +513,7 @@ def test_iam_permissions(self, permissions): .. literalinclude:: snippets.py :start-after: [START bigtable_test_iam_permissions] :end-before: [END bigtable_test_iam_permissions] + :dedent: 4 :type permissions: list :param permissions: The set of permissions to check for @@ -532,6 +543,7 @@ def cluster( .. literalinclude:: snippets.py :start-after: [START bigtable_create_cluster] :end-before: [END bigtable_create_cluster] + :dedent: 4 :type cluster_id: str :param cluster_id: The ID of the cluster. @@ -575,6 +587,7 @@ def list_clusters(self): .. literalinclude:: snippets.py :start-after: [START bigtable_list_clusters_on_instance] :end-before: [END bigtable_list_clusters_on_instance] + :dedent: 4 :rtype: tuple :returns: @@ -595,6 +608,7 @@ def table(self, table_id, mutation_timeout=None, app_profile_id=None): .. literalinclude:: snippets.py :start-after: [START bigtable_create_table] :end-before: [END bigtable_create_table] + :dedent: 4 :type table_id: str :param table_id: The ID of the table. @@ -620,6 +634,7 @@ def list_tables(self): .. literalinclude:: snippets.py :start-after: [START bigtable_list_tables] :end-before: [END bigtable_list_tables] + :dedent: 4 :rtype: list of :class:`Table ` :returns: The list of tables owned by the instance. @@ -655,6 +670,7 @@ def app_profile( .. literalinclude:: snippets.py :start-after: [START bigtable_create_app_profile] :end-before: [END bigtable_create_app_profile] + :dedent: 4 :type app_profile_id: str :param app_profile_id: The ID of the AppProfile. Must be of the form @@ -701,6 +717,7 @@ def list_app_profiles(self): .. literalinclude:: snippets.py :start-after: [START bigtable_list_app_profiles] :end-before: [END bigtable_list_app_profiles] + :dedent: 4 :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] :returns: A :list:[`~google.cloud.bigtable.app_profile.AppProfile`]. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index 65be0158a006..1fd7494247d7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -106,6 +106,7 @@ def bigtable_admins(self): .. literalinclude:: snippets.py :start-after: [START bigtable_admins_policy] :end-before: [END bigtable_admins_policy] + :dedent: 4 """ result = set() for member in self.get(BIGTABLE_ADMIN_ROLE, ()): @@ -123,6 +124,7 @@ def bigtable_readers(self): .. literalinclude:: snippets.py :start-after: [START bigtable_readers_policy] :end-before: [END bigtable_readers_policy] + :dedent: 4 """ result = set() for member in self.get(BIGTABLE_READER_ROLE, ()): @@ -140,6 +142,7 @@ def bigtable_users(self): .. literalinclude:: snippets.py :start-after: [START bigtable_users_policy] :end-before: [END bigtable_users_policy] + :dedent: 4 """ result = set() for member in self.get(BIGTABLE_USER_ROLE, ()): @@ -157,6 +160,7 @@ def bigtable_viewers(self): .. literalinclude:: snippets.py :start-after: [START bigtable_viewers_policy] :end-before: [END bigtable_viewers_policy] + :dedent: 4 """ result = set() for member in self.get(BIGTABLE_VIEWER_ROLE, ()): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 079ba6c8f497..92f5b818b08e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -61,6 +61,7 @@ def row_key(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_row_key] :end-before: [END bigtable_row_row_key] + :dedent: 4 :rtype: bytes :returns: The key for the current row. @@ -76,6 +77,7 @@ def table(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_table] :end-before: [END bigtable_row_table] + :dedent: 4 :rtype: table: :class:`Table ` :returns: table: The table that owns the row. @@ -300,7 +302,7 @@ def get_mutations_size(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_get_mutations_size] :end-before: [END bigtable_row_get_mutations_size] - + :dedent: 4 """ mutation_size = 0 @@ -328,6 +330,7 @@ def set_cell(self, column_family_id, column, value, timestamp=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_set_cell] :end-before: [END bigtable_row_set_cell] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column. @@ -363,7 +366,7 @@ def delete(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_delete] :end-before: [END bigtable_row_delete] - + :dedent: 4 """ self._delete(state=None) @@ -382,6 +385,7 @@ def delete_cell(self, column_family_id, column, time_range=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_delete_cell] :end-before: [END bigtable_row_delete_cell] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column @@ -415,6 +419,7 @@ def delete_cells(self, column_family_id, columns, time_range=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_delete_cells] :end-before: [END bigtable_row_delete_cells] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column @@ -450,6 +455,7 @@ def commit(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_commit] :end-before: [END bigtable_row_commit] + :dedent: 4 :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is greater than 100,000. @@ -465,7 +471,7 @@ def clear(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_clear] :end-before: [END bigtable_row_clear] - + :dedent: 4 """ del self._pb_mutations[:] @@ -557,6 +563,7 @@ def commit(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_commit] :end-before: [END bigtable_row_commit] + :dedent: 4 :rtype: bool :returns: Flag indicating if the filter was matched (which also @@ -610,6 +617,7 @@ def set_cell(self, column_family_id, column, value, timestamp=None, state=True): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_set_cell] :end-before: [END bigtable_row_set_cell] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column. @@ -651,6 +659,7 @@ def delete(self, state=True): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_delete] :end-before: [END bigtable_row_delete] + :dedent: 4 :type state: bool :param state: (Optional) The state that the mutation should be @@ -673,6 +682,7 @@ def delete_cell(self, column_family_id, column, time_range=None, state=True): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_delete_cell] :end-before: [END bigtable_row_delete_cell] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column @@ -710,6 +720,7 @@ def delete_cells(self, column_family_id, columns, time_range=None, state=True): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_delete_cells] :end-before: [END bigtable_row_delete_cells] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column @@ -744,7 +755,7 @@ def clear(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_clear] :end-before: [END bigtable_row_clear] - + :dedent: 4 """ del self._true_pb_mutations[:] del self._false_pb_mutations[:] @@ -783,7 +794,7 @@ def clear(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_clear] :end-before: [END bigtable_row_clear] - + :dedent: 4 """ del self._rule_pb_list[:] @@ -802,6 +813,7 @@ def append_cell_value(self, column_family_id, column, value): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_append_cell_value] :end-before: [END bigtable_row_append_cell_value] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column. @@ -842,6 +854,7 @@ def increment_cell_value(self, column_family_id, column, int_value): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_increment_cell_value] :end-before: [END bigtable_row_increment_cell_value] + :dedent: 4 :type column_family_id: str :param column_family_id: The column family that contains the column. @@ -888,6 +901,7 @@ def commit(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_commit] :end-before: [END bigtable_row_commit] + :dedent: 4 :rtype: dict :returns: The new contents of all modified cells. Returned as a diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 24078b8496d8..38bf859563fa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -184,6 +184,7 @@ def cells(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_data_cells] :end-before: [END bigtable_row_data_cells] + :dedent: 4 :rtype: dict :returns: Dictionary of the :class:`Cell` objects accumulated. This @@ -210,6 +211,7 @@ def find_cells(self, column_family_id, column): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_find_cells] :end-before: [END bigtable_row_find_cells] + :dedent: 4 Args: column_family_id (str): The ID of the column family. Must be of the @@ -247,6 +249,7 @@ def cell_value(self, column_family_id, column, index=0): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_cell_value] :end-before: [END bigtable_row_cell_value] + :dedent: 4 Args: column_family_id (str): The ID of the column family. Must be of the @@ -288,6 +291,7 @@ def cell_values(self, column_family_id, column, max_count=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_cell_values] :end-before: [END bigtable_row_cell_values] + :dedent: 4 Args: column_family_id (str): The ID of the column family. Must be of the diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 0cb6443b05eb..aa3e5eb922b3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -58,6 +58,7 @@ def add_row_key(self, row_key): .. literalinclude:: snippets_table.py :start-after: [START bigtable_add_row_key] :end-before: [END bigtable_add_row_key] + :dedent: 4 :type row_key: bytes :param row_key: The key of a row to read @@ -72,6 +73,7 @@ def add_row_range(self, row_range): .. literalinclude:: snippets_table.py :start-after: [START bigtable_add_row_range] :end-before: [END bigtable_add_row_range] + :dedent: 4 :type row_range: class:`RowRange` :param row_range: The row range object having start and end key @@ -88,6 +90,7 @@ def add_row_range_from_keys( .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_range_from_keys] :end-before: [END bigtable_row_range_from_keys] + :dedent: 4 :type start_key: bytes :param start_key: (Optional) Start key of the row range. If left empty, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 9d2dc1acc3a0..199269013074 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -121,6 +121,7 @@ def name(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_name] :end-before: [END bigtable_table_name] + :dedent: 4 .. note:: @@ -149,6 +150,7 @@ def get_iam_policy(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_get_iam_policy] :end-before: [END bigtable_table_get_iam_policy] + :dedent: 4 :rtype: :class:`google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this table. @@ -169,6 +171,7 @@ class `google.cloud.bigtable.policy.Policy` .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_set_iam_policy] :end-before: [END bigtable_table_set_iam_policy] + :dedent: 4 :type policy: :class:`google.cloud.bigtable.policy.Policy` :param policy: A new IAM policy to replace the current IAM policy @@ -190,6 +193,7 @@ def test_iam_permissions(self, permissions): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_test_iam_permissions] :end-before: [END bigtable_table_test_iam_permissions] + :dedent: 4 :type permissions: list :param permissions: The set of permissions to check for @@ -217,6 +221,7 @@ def column_family(self, column_family_id, gc_rule=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_column_family] :end-before: [END bigtable_table_column_family] + :dedent: 4 :type column_family_id: str :param column_family_id: The ID of the column family. Must be of the @@ -239,6 +244,7 @@ def row(self, row_key, filter_=None, append=False): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_row] :end-before: [END bigtable_table_row] + :dedent: 4 .. warning:: @@ -286,6 +292,7 @@ def append_row(self, row_key): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_append_row] :end-before: [END bigtable_table_append_row] + :dedent: 4 Args: row_key (bytes): The key for the row being created. @@ -303,6 +310,7 @@ def direct_row(self, row_key): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_direct_row] :end-before: [END bigtable_table_direct_row] + :dedent: 4 Args: row_key (bytes): The key for the row being created. @@ -320,6 +328,7 @@ def conditional_row(self, row_key, filter_): .. literalinclude:: snippets_table.py :start-after: [START bigtable_table_conditional_row] :end-before: [END bigtable_table_conditional_row] + :dedent: 4 Args: row_key (bytes): The key for the row being created. @@ -348,6 +357,7 @@ def create(self, initial_split_keys=[], column_families={}): .. literalinclude:: snippets_table.py :start-after: [START bigtable_create_table] :end-before: [END bigtable_create_table] + :dedent: 4 .. note:: @@ -392,6 +402,7 @@ def exists(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_check_table_exists] :end-before: [END bigtable_check_table_exists] + :dedent: 4 :rtype: bool :returns: True if the table exists, else False. @@ -411,7 +422,7 @@ def delete(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_delete_table] :end-before: [END bigtable_delete_table] - + :dedent: 4 """ table_client = self._instance._client.table_admin_client table_client.delete_table(name=self.name) @@ -424,6 +435,7 @@ def list_column_families(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_list_column_families] :end-before: [END bigtable_list_column_families] + :dedent: 4 :rtype: dict :returns: Dictionary of column families attached to this table. Keys @@ -451,6 +463,7 @@ def get_cluster_states(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_get_cluster_states] :end-before: [END bigtable_get_cluster_states] + :dedent: 4 :rtype: dict :returns: Dictionary of cluster states for this table. @@ -475,6 +488,7 @@ def read_row(self, row_key, filter_=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_read_row] :end-before: [END bigtable_read_row] + :dedent: 4 :type row_key: bytes :param row_key: The key of the row to read from. @@ -514,6 +528,7 @@ def read_rows( .. literalinclude:: snippets_table.py :start-after: [START bigtable_read_rows] :end-before: [END bigtable_read_rows] + :dedent: 4 :type start_key: bytes :param start_key: (Optional) The beginning of a range of row keys to @@ -617,6 +632,7 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY): .. literalinclude:: snippets_table.py :start-after: [START bigtable_mutate_rows] :end-before: [END bigtable_mutate_rows] + :dedent: 4 The method tries to update all specified rows. If some of the rows weren't updated, it would not remove mutations. @@ -661,6 +677,7 @@ def sample_row_keys(self): .. literalinclude:: snippets_table.py :start-after: [START bigtable_sample_row_keys] :end-before: [END bigtable_sample_row_keys] + :dedent: 4 The returned row keys will delimit contiguous sections of the table of approximately equal size, which can be used to break up the data for @@ -705,6 +722,7 @@ def truncate(self, timeout=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_truncate_table] :end-before: [END bigtable_truncate_table] + :dedent: 4 :type timeout: float :param timeout: (Optional) The amount of time, in seconds, to wait @@ -735,6 +753,7 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): .. literalinclude:: snippets_table.py :start-after: [START bigtable_drop_by_prefix] :end-before: [END bigtable_drop_by_prefix] + :dedent: 4 :type row_key_prefix: bytes :param row_key_prefix: Delete all rows that start with this row key @@ -769,6 +788,7 @@ def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES .. literalinclude:: snippets_table.py :start-after: [START bigtable_mutations_batcher] :end-before: [END bigtable_mutations_batcher] + :dedent: 4 :type flush_count: int :param flush_count: (Optional) Maximum number of rows per batch. If it From 3364c2947f666c0aa80cf92c364fe3b19a56ca75 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Mon, 3 Aug 2020 22:54:16 -0700 Subject: [PATCH 324/892] docs(samples): filter cpu query to get metrics for the correct resources [(#4238)](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4238) (#81) --- .../samples/metricscaler/metricscaler.py | 27 ++++++++++++------- .../samples/metricscaler/metricscaler_test.py | 20 ++++++++++---- 2 files changed, 32 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py index 1957a81f1aa9..93aba8c811ad 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -16,6 +16,7 @@ programmatically scale a Google Cloud Bigtable cluster.""" import argparse +import logging import os import time @@ -26,8 +27,12 @@ PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] +logger = logging.getLogger('bigtable.metricscaler') +logger.addHandler(logging.StreamHandler()) +logger.setLevel(logging.INFO) -def get_cpu_load(): + +def get_cpu_load(bigtable_instance, bigtable_cluster): """Returns the most recent Cloud Bigtable CPU load measurement. Returns: @@ -40,12 +45,13 @@ def get_cpu_load(): metric_type='bigtable.googleapis.com/' 'cluster/cpu_load', minutes=5) + cpu_query = cpu_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster) cpu = next(cpu_query.iter()) return cpu.points[0].value.double_value # [END bigtable_cpu] -def get_storage_utilization(): +def get_storage_utilization(bigtable_instance, bigtable_cluster): """Returns the most recent Cloud Bigtable storage utilization measurement. Returns: @@ -58,6 +64,7 @@ def get_storage_utilization(): metric_type='bigtable.googleapis.com/' 'cluster/storage_utilization', minutes=5) + utilization_query = utilization_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster) utilization = next(utilization_query.iter()) return utilization.points[0].value.double_value # [END bigtable_metric_scaler_storage_utilization] @@ -111,7 +118,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): current_node_count + size_change_step, max_node_count) cluster.serve_nodes = new_node_count cluster.update() - print('Scaled up from {} to {} nodes.'.format( + logger.info('Scaled up from {} to {} nodes.'.format( current_node_count, new_node_count)) else: if current_node_count > min_node_count: @@ -119,7 +126,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): current_node_count - size_change_step, min_node_count) cluster.serve_nodes = new_node_count cluster.update() - print('Scaled down from {} to {} nodes.'.format( + logger.info('Scaled down from {} to {} nodes.'.format( current_node_count, new_node_count)) # [END bigtable_scale] @@ -145,10 +152,10 @@ def main( long_sleep (int): How long to sleep after the number of nodes is changed """ - cluster_cpu = get_cpu_load() - cluster_storage = get_storage_utilization() - print('Detected cpu of {}'.format(cluster_cpu)) - print('Detected storage utilization of {}'.format(cluster_storage)) + cluster_cpu = get_cpu_load(bigtable_instance, bigtable_cluster) + cluster_storage = get_storage_utilization(bigtable_instance, bigtable_cluster) + logger.info('Detected cpu of {}'.format(cluster_cpu)) + logger.info('Detected storage utilization of {}'.format(cluster_storage)) try: if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold: scale_bigtable(bigtable_instance, bigtable_cluster, True) @@ -158,10 +165,10 @@ def main( scale_bigtable(bigtable_instance, bigtable_cluster, False) time.sleep(long_sleep) else: - print('CPU within threshold, sleeping.') + logger.info('CPU within threshold, sleeping.') time.sleep(short_sleep) except Exception as e: - print("Error during scaling: %s", e) + logger.error("Error during scaling: %s", e) if __name__ == '__main__': diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 1d4a3a3d11ec..3c8efaae3c45 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -20,7 +20,7 @@ from google.cloud import bigtable from google.cloud.bigtable import enums -from mock import patch +from mock import Mock, patch import pytest @@ -41,12 +41,18 @@ # System tests to verify API calls succeed -def test_get_cpu_load(): - assert float(get_cpu_load()) > 0.0 +@patch('metricscaler.query') +def test_get_cpu_load(monitoring_v3_query): + iter_mock = monitoring_v3_query.Query().select_resources().iter + iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])]) + assert float(get_cpu_load(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0 -def test_get_storage_utilization(): - assert float(get_storage_utilization()) > 0.0 +@patch('metricscaler.query') +def test_get_storage_utilization(monitoring_v3_query): + iter_mock = monitoring_v3_query.Query().select_resources().iter + iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])]) + assert float(get_storage_utilization(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0 @pytest.fixture() @@ -198,3 +204,7 @@ def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep): scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) scale_bigtable.reset_mock() + + +if __name__ == '__main__': + test_get_cpu_load() From 21de7af53e1028dd84c4c11a88d24be5382397df Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Tue, 4 Aug 2020 22:44:38 +0530 Subject: [PATCH 325/892] feat: add 'Rowset.add_row_range_with_prefix' (#30) Co-authored-by: Tres Seaver --- .../docs/snippets_table.py | 38 +++++++++++++++++++ .../google/cloud/bigtable/row_set.py | 18 +++++++++ .../google-cloud-bigtable/tests/system.py | 32 ++++++++++++++++ .../tests/unit/test_row_set.py | 5 +++ 4 files changed, 93 insertions(+) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 702cf31b1447..a1d8d37fc6a0 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -691,7 +691,45 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"] found_row_keys = [row.row_key for row in read_rows] assert found_row_keys == expected_row_keys + table.truncate(timeout=200) + + +def test_bigtable_add_row_range_with_prefix(): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"sample_row_key_1", + b"sample_row_key_2", + ] + rows = [] + for row_key in row_keys: + row = Config.TABLE.row(row_key) + row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) + rows.append(row) + Config.TABLE.mutate_rows(rows) + + # [START bigtable_add_row_range_with_prefix] + from google.cloud.bigtable import Client + from google.cloud.bigtable.row_set import RowSet + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + table = instance.table(TABLE_ID) + + row_set = RowSet() + row_set.add_row_range_with_prefix("row") + # [END bigtable_add_row_range_with_prefix] + + read_rows = table.read_rows(row_set=row_set) + expected_row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + ] + found_row_keys = [row.row_key for row in read_rows] + assert found_row_keys == expected_row_keys table.truncate(timeout=200) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index aa3e5eb922b3..5de7dabffbf3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -112,6 +112,24 @@ def add_row_range_from_keys( row_range = RowRange(start_key, end_key, start_inclusive, end_inclusive) self.row_ranges.append(row_range) + def add_row_range_with_prefix(self, row_key_prefix): + """Add row range to row_ranges list that start with the row_key_prefix from the row keys + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_add_row_range_with_prefix] + :end-before: [END bigtable_add_row_range_with_prefix] + + :type row_key_prefix: str + :param row_key_prefix: To retrieve all rows that start with this row key prefix. + Prefix cannot be zero length.""" + + end_key = row_key_prefix[:-1] + chr(ord(row_key_prefix[-1]) + 1) + self.add_row_range_from_keys( + row_key_prefix.encode("utf-8"), end_key.encode("utf-8") + ) + def _update_message_request(self, message): """Add row keys and row range to given request message diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index c41c90a6a664..7dae9862e3a7 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -1083,6 +1083,38 @@ def test_yield_rows_with_row_set(self): found_row_keys = [row.row_key for row in read_rows] self.assertEqual(found_row_keys, expected_row_keys) + def test_add_row_range_by_prefix_from_keys(self): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"sample_row_key_1", + b"sample_row_key_2", + ] + + rows = [] + for row_key in row_keys: + row = self._table.row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + rows.append(row) + self.rows_to_delete.append(row) + self._table.mutate_rows(rows) + + row_set = RowSet() + row_set.add_row_range_with_prefix("row") + + read_rows = self._table.yield_rows(row_set=row_set) + + expected_row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + ] + found_row_keys = [row.row_key for row in read_rows] + self.assertEqual(found_row_keys, expected_row_keys) + def test_read_large_cell_limit(self): self._maybe_emulator_skip( "Maximum gRPC received message size for emulator is 4194304 bytes." diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_set.py b/packages/google-cloud-bigtable/tests/unit/test_row_set.py index c66341b84ec6..a855099a1fb3 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_set.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_set.py @@ -167,6 +167,11 @@ def test_add_row_range_from_keys(self): ) self.assertEqual(row_set.row_ranges[0].end_key, b"row_key9") + def test_add_row_range_with_prefix(self): + row_set = self._make_one() + row_set.add_row_range_with_prefix("row") + self.assertEqual(row_set.row_ranges[0].end_key, b"rox") + def test__update_message_request(self): row_set = self._make_one() table_name = "table_name" From a8df2e531bcc053dc64529cd12350f4d8d88e630 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 5 Aug 2020 09:16:03 -0400 Subject: [PATCH 326/892] tests: drop instance / cluster node counts for quota (#89) Toward #87 --- packages/google-cloud-bigtable/docs/snippets.py | 13 ++++++++----- .../samples/instanceadmin/instanceadmin.py | 4 ++-- .../samples/metricscaler/metricscaler.py | 2 +- .../samples/metricscaler/metricscaler_test.py | 2 +- packages/google-cloud-bigtable/tests/system.py | 12 +++++++----- 5 files changed, 19 insertions(+), 14 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 850362b4a42a..36dbe5e73e21 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -114,7 +114,7 @@ def test_bigtable_create_instance(): my_instance_id = "inst-my-" + UNIQUE_SUFFIX my_cluster_id = "clus-my-" + UNIQUE_SUFFIX location_id = "us-central1-f" - serve_nodes = 3 + serve_nodes = 1 storage_type = enums.StorageType.SSD production = enums.Instance.Type.PRODUCTION labels = {"prod-label": "prod-label"} @@ -155,7 +155,7 @@ def test_bigtable_create_additional_cluster(): cluster_id = "clus-my-" + UNIQUE_SUFFIX location_id = "us-central1-a" - serve_nodes = 3 + serve_nodes = 1 storage_type = enums.StorageType.SSD cluster = instance.cluster( @@ -447,10 +447,11 @@ def test_bigtable_delete_cluster(): client = Client(admin=True) instance = client.instance(INSTANCE_ID) cluster_id = "clus-my-" + UNIQUE_SUFFIX + serve_nodes = 1 cluster = instance.cluster( cluster_id, location_id=ALT_LOCATION_ID, - serve_nodes=SERVER_NODES, + serve_nodes=serve_nodes, default_storage_type=STORAGE_TYPE, ) operation = cluster.create() @@ -477,10 +478,11 @@ def test_bigtable_delete_instance(): instance_id = "snipt-inst-del" + UNIQUE_SUFFIX instance = client.instance(instance_id, instance_type=PRODUCTION, labels=LABELS) + serve_nodes = 1 cluster = instance.cluster( "clus-to-delete" + UNIQUE_SUFFIX, location_id=ALT_LOCATION_ID, - serve_nodes=1, + serve_nodes=serve_nodes, default_storage_type=STORAGE_TYPE, ) operation = instance.create(clusters=[cluster]) @@ -727,11 +729,12 @@ def test_bigtable_cluster_from_pb(): name = cluster.name cluster_state = cluster.state + serve_nodes = 1 cluster_pb = instance_pb2.Cluster( name=name, location=LOCATION_ID, state=cluster_state, - serve_nodes=SERVER_NODES, + serve_nodes=serve_nodes, default_storage_type=STORAGE_TYPE, ) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py index 32120eb63751..482806ac989a 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -50,7 +50,7 @@ def run_instance_operations(project_id, instance_id): ''' client = bigtable.Client(project=project_id, admin=True) location_id = 'us-central1-f' - serve_nodes = 3 + serve_nodes = 1 storage_type = enums.StorageType.SSD production = enums.Instance.Type.PRODUCTION labels = {'prod-label': 'prod-label'} @@ -170,7 +170,7 @@ def add_cluster(project_id, instance_id, cluster_id): instance = client.instance(instance_id) location_id = 'us-central1-a' - serve_nodes = 3 + serve_nodes = 1 storage_type = enums.StorageType.SSD if not instance.exists(): diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py index 93aba8c811ad..43b430859a69 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -89,7 +89,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): # SSD lusters, and 8 TB for HDD. The # "bigtable.googleapis.com/disk/bytes_used" metric is useful in figuring # out the minimum number of nodes. - min_node_count = 3 + min_node_count = 1 # The maximum number of nodes to use. The default maximum is 30 nodes per # zone. If you need more quota, you can request more by following the diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 3c8efaae3c45..06e1e27ca348 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -61,7 +61,7 @@ def instance(): client = bigtable.Client(project=PROJECT, admin=True) - serve_nodes = 3 + serve_nodes = 1 storage_type = enums.StorageType.SSD production = enums.Instance.Type.PRODUCTION labels = {'prod-label': 'prod-label'} diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 7dae9862e3a7..e3823177ebe0 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -188,8 +188,9 @@ def test_create_instance_defaults(self): ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" + serve_nodes = 1 cluster = instance.cluster( - ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES + ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=serve_nodes ) operation = instance.create(clusters=[cluster]) @@ -267,16 +268,17 @@ def test_create_instance_w_two_clusters(self): ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" LOCATION_ID_2 = "us-central1-f" STORAGE_TYPE = enums.StorageType.HDD + serve_nodes = 1 cluster_1 = instance.cluster( ALT_CLUSTER_ID_1, location_id=LOCATION_ID, - serve_nodes=SERVE_NODES, + serve_nodes=serve_nodes, default_storage_type=STORAGE_TYPE, ) cluster_2 = instance.cluster( ALT_CLUSTER_ID_2, location_id=LOCATION_ID_2, - serve_nodes=SERVE_NODES, + serve_nodes=serve_nodes, default_storage_type=STORAGE_TYPE, ) operation = instance.create(clusters=[cluster_1, cluster_2]) @@ -482,7 +484,7 @@ def test_update_type(self): instance = Config.CLIENT.instance( ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS ) - operation = instance.create(location_id=LOCATION_ID, serve_nodes=None) + operation = instance.create(location_id=LOCATION_ID) # Make sure this instance gets deleted after the test case. self.instances_to_delete.append(instance) @@ -532,7 +534,7 @@ def test_create_cluster(self): ALT_CLUSTER_ID = INSTANCE_ID + "-c2" ALT_LOCATION_ID = "us-central1-f" - ALT_SERVE_NODES = 4 + ALT_SERVE_NODES = 2 cluster_2 = Config.INSTANCE.cluster( ALT_CLUSTER_ID, From 39537dd20964e91c7e8650f217b8ddf67e30e94e Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Wed, 5 Aug 2020 23:38:58 +0530 Subject: [PATCH 327/892] docs: clarify 'Table.read_rows' snippet (#50) Co-authored-by: kolea2 <45548808+kolea2@users.noreply.github.com> Co-authored-by: Tres Seaver --- packages/google-cloud-bigtable/docs/snippets_table.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index a1d8d37fc6a0..01b16cde07ce 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -202,9 +202,15 @@ def test_bigtable_write_read_drop_truncate(): # Read full table partial_rows = table.read_rows() - read_rows = [row for row in partial_rows] + + # Read row's value + total_rows = [] + for row in partial_rows: + cell = row.cells[COLUMN_FAMILY_ID][col_name][0] + print(cell.value.decode("utf-8")) + total_rows.append(cell) # [END bigtable_read_rows] - assert len(read_rows) == len(rows) + assert len(total_rows) == len(rows) # [START bigtable_drop_by_prefix] from google.cloud.bigtable import Client From 490e9c7de980d60c950b2b61ea3e303aaf88939b Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 5 Aug 2020 11:42:03 -0700 Subject: [PATCH 328/892] docs: update docs build (via synth) (#99) * feat(python-library): changes to docs job * feat(python-library): changes to docs job * migrate to Trampoline V2 * add docs-presubmit job * create docfx yaml files and upload them to another bucket * remove redundant envvars Source-Author: Takashi Matsuo Source-Date: Wed Jul 29 16:15:18 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: f07cb4446192952f19be3056957f56d180586055 Source-Link: https://github.com/googleapis/synthtool/commit/f07cb4446192952f19be3056957f56d180586055 * Revert "feat(python-library): changes to docs job (#700)" This reverts commit f07cb4446192952f19be3056957f56d180586055. Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Wed Jul 29 17:33:57 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: ee7506d15daa3873accfff9430eff7e3953f0248 Source-Link: https://github.com/googleapis/synthtool/commit/ee7506d15daa3873accfff9430eff7e3953f0248 * feat(python-library): changes to docs job * feat(python-library): changes to docs job * migrate to Trampoline V2 * add docs-presubmit job * create docfx yaml files and upload them to another bucket * remove redundant envvars * add a failing test first * fix TemplateSyntaxError: Missing end of comment tag * serving_path is not needed any more * use `raw` to make jinja happy Source-Author: Takashi Matsuo Source-Date: Thu Jul 30 12:44:02 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 5dfda5621df45b71b6e88544ebbb53b1a8c90214 Source-Link: https://github.com/googleapis/synthtool/commit/5dfda5621df45b71b6e88544ebbb53b1a8c90214 * fix(python-library): add missing changes Source-Author: Takashi Matsuo Source-Date: Thu Jul 30 18:26:35 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 39b527a39f5cd56d4882b3874fc08eed4756cebe Source-Link: https://github.com/googleapis/synthtool/commit/39b527a39f5cd56d4882b3874fc08eed4756cebe * chore(py_library): add split_system_tests Source-Author: Takashi Matsuo Source-Date: Fri Jul 31 16:17:13 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: bfcdbe0da977b2de6c1c0471bb6dc2f1e13bf669 Source-Link: https://github.com/googleapis/synthtool/commit/bfcdbe0da977b2de6c1c0471bb6dc2f1e13bf669 * chore(py_library): add some excludes in docs/conf.py This should fix build failures in python-bigquery. Example: https://github.com/googleapis/python-bigquery/pull/205 * also add a comment Source-Author: Takashi Matsuo Source-Date: Mon Aug 3 15:08:00 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 4f8f5dc24af79694887385015294e4dbb214c352 Source-Link: https://github.com/googleapis/synthtool/commit/4f8f5dc24af79694887385015294e4dbb214c352 Co-authored-by: Tres Seaver --- packages/google-cloud-bigtable/.gitignore | 3 +- .../google-cloud-bigtable/.kokoro/build.sh | 8 +- .../.kokoro/docker/docs/Dockerfile | 98 ++++ .../.kokoro/docker/docs/fetch_gpg_keys.sh | 45 ++ .../.kokoro/docs/common.cfg | 21 +- .../.kokoro/docs/docs-presubmit.cfg | 17 + .../.kokoro/publish-docs.sh | 39 +- .../.kokoro/trampoline_v2.sh | 487 ++++++++++++++++++ packages/google-cloud-bigtable/.trampolinerc | 51 ++ packages/google-cloud-bigtable/docs/conf.py | 11 +- packages/google-cloud-bigtable/synth.metadata | 6 +- 11 files changed, 763 insertions(+), 23 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile create mode 100755 packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh create mode 100644 packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg create mode 100755 packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh create mode 100644 packages/google-cloud-bigtable/.trampolinerc diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore index b87e1ed580d9..b9daa52f118d 100644 --- a/packages/google-cloud-bigtable/.gitignore +++ b/packages/google-cloud-bigtable/.gitignore @@ -46,6 +46,7 @@ pip-log.txt # Built documentation docs/_build bigquery/docs/generated +docs.metadata # Virtual environment env/ @@ -57,4 +58,4 @@ system_tests/local_test_setup # Make sure a generated file isn't accidentally committed. pylintrc -pylintrc.test \ No newline at end of file +pylintrc.test diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index f59d2895420b..124fd6ce9fac 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -36,4 +36,10 @@ python3.6 -m pip uninstall --yes --quiet nox-automation python3.6 -m pip install --upgrade --quiet nox python3.6 -m nox --version -python3.6 -m nox +# If NOX_SESSION is set, it only runs the specified session, +# otherwise run all the sessions. +if [[ -n "${NOX_SESSION:-}" ]]; then + python3.6 -m nox -s "${NOX_SESSION:-}" +else + python3.6 -m nox +fi diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile new file mode 100644 index 000000000000..412b0b56a921 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -0,0 +1,98 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ubuntu:20.04 + +ENV DEBIAN_FRONTEND noninteractive + +# Ensure local Python is preferred over distribution Python. +ENV PATH /usr/local/bin:$PATH + +# Install dependencies. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + curl \ + dirmngr \ + git \ + gpg-agent \ + graphviz \ + libbz2-dev \ + libdb5.3-dev \ + libexpat1-dev \ + libffi-dev \ + liblzma-dev \ + libreadline-dev \ + libsnappy-dev \ + libssl-dev \ + libsqlite3-dev \ + portaudio19-dev \ + redis-server \ + software-properties-common \ + ssh \ + sudo \ + tcl \ + tcl-dev \ + tk \ + tk-dev \ + uuid-dev \ + wget \ + zlib1g-dev \ + && add-apt-repository universe \ + && apt-get update \ + && apt-get -y install jq \ + && apt-get clean autoclean \ + && apt-get autoremove -y \ + && rm -rf /var/lib/apt/lists/* \ + && rm -f /var/cache/apt/archives/*.deb + + +COPY fetch_gpg_keys.sh /tmp +# Install the desired versions of Python. +RUN set -ex \ + && export GNUPGHOME="$(mktemp -d)" \ + && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \ + && /tmp/fetch_gpg_keys.sh \ + && for PYTHON_VERSION in 3.7.8 3.8.5; do \ + wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \ + && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \ + && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \ + && rm -r python-${PYTHON_VERSION}.tar.xz.asc \ + && mkdir -p /usr/src/python-${PYTHON_VERSION} \ + && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \ + && rm python-${PYTHON_VERSION}.tar.xz \ + && cd /usr/src/python-${PYTHON_VERSION} \ + && ./configure \ + --enable-shared \ + # This works only on Python 2.7 and throws a warning on every other + # version, but seems otherwise harmless. + --enable-unicode=ucs4 \ + --with-system-ffi \ + --without-ensurepip \ + && make -j$(nproc) \ + && make install \ + && ldconfig \ + ; done \ + && rm -rf "${GNUPGHOME}" \ + && rm -rf /usr/src/python* \ + && rm -rf ~/.cache/ + +RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ + && python3.7 /tmp/get-pip.py \ + && python3.8 /tmp/get-pip.py \ + && rm /tmp/get-pip.py + +CMD ["python3.7"] diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh b/packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh new file mode 100755 index 000000000000..d653dd868e4b --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A script to fetch gpg keys with retry. +# Avoid jinja parsing the file. +# + +function retry { + if [[ "${#}" -le 1 ]]; then + echo "Usage: ${0} retry_count commands.." + exit 1 + fi + local retries=${1} + local command="${@:2}" + until [[ "${retries}" -le 0 ]]; do + $command && return 0 + if [[ $? -ne 0 ]]; then + echo "command failed, retrying" + ((retries--)) + fi + done + return 1 +} + +# 3.6.9, 3.7.5 (Ned Deily) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D + +# 3.8.0 (Łukasz Langa) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + E3FF2839C048B25C084DEBE9B26995E310250568 + +# diff --git a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg index 8769b3116aac..1831bf9d21a5 100644 --- a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg @@ -11,12 +11,12 @@ action { gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" + value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" } env_vars: { key: "TRAMPOLINE_BUILD_FILE" @@ -28,6 +28,23 @@ env_vars: { value: "docs-staging" } +env_vars: { + key: "V2_STAGING_BUCKET" + value: "docs-staging-v2-staging" +} + +# It will upload the docker image after successful builds. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "true" +} + +# It will always build the docker image. +env_vars: { + key: "TRAMPOLINE_DOCKERFILE" + value: ".kokoro/docker/docs/Dockerfile" +} + # Fetch the token needed for reporting release status to GitHub before_action { fetch_keystore { diff --git a/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg new file mode 100644 index 000000000000..1118107829b7 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg @@ -0,0 +1,17 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "STAGING_BUCKET" + value: "gcloud-python-test" +} + +env_vars: { + key: "V2_STAGING_BUCKET" + value: "gcloud-python-test" +} + +# We only upload the image in the main `docs` build. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "false" +} diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 7d51f64afceb..8acb14e802b0 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -18,26 +18,16 @@ set -eo pipefail # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 -cd github/python-bigtable - -# Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --user --upgrade --quiet nox +python3 -m nox --version # build docs nox -s docs -python3 -m pip install gcp-docuploader - -# install a json parser -sudo apt-get update -sudo apt-get -y install software-properties-common -sudo add-apt-repository universe -sudo apt-get update -sudo apt-get -y install jq +python3 -m pip install --user gcp-docuploader # create metadata python3 -m docuploader create-metadata \ @@ -52,4 +42,23 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" + + +# docfx yaml files +nox -s docfx + +# create metadata. +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh new file mode 100755 index 000000000000..719bcd5ba84d --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh @@ -0,0 +1,487 @@ +#!/usr/bin/env bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# trampoline_v2.sh +# +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# To run this script, first download few files from gcs to /dev/shm. +# (/dev/shm is passed into the container as KOKORO_GFILE_DIR). +# +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm +# +# Then run the script. +# .kokoro/trampoline_v2.sh +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. + + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.5" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For Build Cop Bot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/packages/google-cloud-bigtable/.trampolinerc b/packages/google-cloud-bigtable/.trampolinerc new file mode 100644 index 000000000000..995ee29111e1 --- /dev/null +++ b/packages/google-cloud-bigtable/.trampolinerc @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Template for .trampolinerc + +# Add required env vars here. +required_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Add env vars which are passed down into the container here. +pass_down_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Prevent unintentional override on the default image. +if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \ + [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image." + exit 1 +fi + +# Define the default value if it makes sense. +if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then + TRAMPOLINE_IMAGE_UPLOAD="" +fi + +if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + TRAMPOLINE_IMAGE="" +fi + +if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then + TRAMPOLINE_DOCKERFILE="" +fi + +if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then + TRAMPOLINE_BUILD_FILE="" +fi diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index a33e54fc2490..a2df032739e9 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -20,6 +20,10 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) +# For plugins that can not read conf.py. +# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 +sys.path.insert(0, os.path.abspath(".")) + __version__ = "" # -- General configuration ------------------------------------------------ @@ -90,7 +94,12 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build"] +exclude_patterns = [ + "_build", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/README.rst", +] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index b1f195d58313..b541fe17f804 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "adedea9daee0231e37a8848a8050b81ea217c6a8" + "sha": "4796ac85c877d75ed596cde7628dae31918ef726" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "303271797a360f8a439203413f13a160f2f5b3b4" + "sha": "4f8f5dc24af79694887385015294e4dbb214c352" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "303271797a360f8a439203413f13a160f2f5b3b4" + "sha": "4f8f5dc24af79694887385015294e4dbb214c352" } } ], From 49fc7339225f7d5345f4c4c5a32eb57d16889616 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 5 Aug 2020 16:04:02 -0400 Subject: [PATCH 329/892] chore: add docfx session to noxfile, normalize (#100) Unbreaking the `docs-presubmit` build. --- packages/google-cloud-bigtable/noxfile.py | 111 ++++++++++++++-------- 1 file changed, 74 insertions(+), 37 deletions(-) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 3bca8a099331..7947441c6f92 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -21,9 +21,12 @@ import nox +DEFAULT_PYTHON_VERSION = "3.8" +SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] LOCAL_DEPS = () -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): """Run linters. @@ -56,7 +59,7 @@ def blacken(session): ) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.install("docutils", "pygments") @@ -85,13 +88,26 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7"]) +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=["2.7", "3.7"]) +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=99") + + session.run("coverage", "erase") + + +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") @@ -124,19 +140,35 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python="3.7") -def cover(session): - """Run the final coverage report. +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def snippets(session): + """Run the documentation example snippets.""" + # Sanity check: Only run snippets system tests if the environment variable + # is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): + session.skip("Credentials must be set via environment variable.") - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=99") + # Install all test dependencies, then install local packages in place. + session.install("mock", "pytest") + for local_dep in LOCAL_DEPS: + session.install("-e", local_dep) + session.install("-e", "test_utils/") + session.install("-e", ".") + session.run( + "py.test", + "--quiet", + os.path.join("docs", "snippets.py"), + *session.posargs + ) + session.run( + "py.test", + "--quiet", + os.path.join("docs", "snippets_table.py"), + *session.posargs + ) - session.run("coverage", "erase") -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def docs(session): """Build the docs for this library.""" @@ -157,29 +189,34 @@ def docs(session): os.path.join("docs", "_build", "html", ""), ) -@nox.session(python=['2.7', '3.7']) -def snippets(session): - """Run the documentation example snippets.""" - # Sanity check: Only run snippets system tests if the environment variable - # is set. - if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): - session.skip('Credentials must be set via environment variable.') +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docfx(session): + """Build the docfx yaml files for this library.""" - # Install all test dependencies, then install local packages in place. - session.install('mock', 'pytest') - for local_dep in LOCAL_DEPS: - session.install('-e', local_dep) - session.install('-e', 'test_utils/') - session.install('-e', '.') - session.run( - 'py.test', - '--quiet', - os.path.join('docs', 'snippets.py'), - *session.posargs - ) + session.install("-e", ".") + session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( - 'py.test', - '--quiet', - os.path.join('docs', 'snippets_table.py'), - *session.posargs + "sphinx-build", + "-T", # show full traceback on exception + "-N", # no colors + "-D", + ( + "extensions=sphinx.ext.autodoc," + "sphinx.ext.autosummary," + "docfx_yaml.extension," + "sphinx.ext.intersphinx," + "sphinx.ext.coverage," + "sphinx.ext.napoleon," + "sphinx.ext.todo," + "sphinx.ext.viewcode," + "recommonmark" + ), + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), ) From 9f7fcd3770bc4d9aadeb658dd7643b0fdbdcbe4c Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Wed, 5 Aug 2020 17:08:07 -0400 Subject: [PATCH 330/892] docs: switch links to client documentation (#93) When we add new API, the line numbers for these docs in github can change. Switching to the client doc links so we don't have to update if line numbers change --- .../google-cloud-bigtable/docs/instance-api.rst | 12 ++++++------ .../google-cloud-bigtable/docs/row-filters.rst | 2 +- packages/google-cloud-bigtable/docs/table-api.rst | 14 +++++++------- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/instance-api.rst b/packages/google-cloud-bigtable/docs/instance-api.rst index 65994dd15520..52a2fb0a5869 100644 --- a/packages/google-cloud-bigtable/docs/instance-api.rst +++ b/packages/google-cloud-bigtable/docs/instance-api.rst @@ -121,10 +121,10 @@ Now we go down the hierarchy from Head next to learn about the :doc:`table-api`. .. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance -.. _CreateInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L41-L47 -.. _GetInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L50-L54 -.. _UpdateInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L64-L69 -.. _DeleteInstance: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L81-L85 -.. _ListInstances: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto#L57-L61 -.. _GetOperation: https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto#L77-L82 +.. _CreateInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#create-a-new-instance +.. _GetInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#get-metadata-for-an-existing-instance +.. _UpdateInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#update-an-existing-instance +.. _DeleteInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#delete-an-existing-instance +.. _ListInstances: https://googleapis.dev/python/bigtable/latest/instance-api.html#list-instances +.. _GetOperation: https://googleapis.dev/python/bigtable/latest/instance-api.html#check-on-current-operation .. _long-running operation: https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto#L128-L162 diff --git a/packages/google-cloud-bigtable/docs/row-filters.rst b/packages/google-cloud-bigtable/docs/row-filters.rst index ba5b725905e8..9884ce400d52 100644 --- a/packages/google-cloud-bigtable/docs/row-filters.rst +++ b/packages/google-cloud-bigtable/docs/row-filters.rst @@ -64,4 +64,4 @@ level. For example: :members: :show-inheritance: -.. _RowFilter definition: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable_data.proto#L196 +.. _RowFilter definition: https://googleapis.dev/python/bigtable/latest/row-filters.html?highlight=rowfilter#google.cloud.bigtable.row_filters.RowFilter diff --git a/packages/google-cloud-bigtable/docs/table-api.rst b/packages/google-cloud-bigtable/docs/table-api.rst index 00beb4ffd809..20d70e990a35 100644 --- a/packages/google-cloud-bigtable/docs/table-api.rst +++ b/packages/google-cloud-bigtable/docs/table-api.rst @@ -143,11 +143,11 @@ data directly via a :class:`Table `. Head next to learn about the :doc:`data-api`. -.. _ListTables: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L69-L73 -.. _CreateTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L45-L50 -.. _DeleteTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L83-L87 -.. _GetTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L76-L80 -.. _CreateColumnFamily: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L93-L98 -.. _UpdateColumnFamily: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L93-L98 -.. _DeleteColumnFamily: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L93-L98 +.. _ListTables: https://googleapis.dev/python/bigtable/latest/table-api.html#list-tables +.. _CreateTable: https://googleapis.dev/python/bigtable/latest/table-api.html#create-a-new-table +.. _DeleteTable: https://googleapis.dev/python/bigtable/latest/table-api.html#delete-an-existing-table +.. _GetTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L97-L102 +.. _CreateColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#create-a-new-column-family +.. _UpdateColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#update-an-existing-column-family +.. _DeleteColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#delete-an-existing-column-family .. _column families: https://cloud.google.com/bigtable/docs/schema-design#column_families_and_column_qualifiers From c802716324101349d53f5534c3bf85470391f166 Mon Sep 17 00:00:00 2001 From: Billy Jacobson Date: Thu, 6 Aug 2020 11:39:20 -0400 Subject: [PATCH 331/892] docs: add sample for writing data with Beam (#80) Co-authored-by: Tres Seaver --- .../samples/beam/hello_world_write.py | 64 +++++ .../samples/beam/hello_world_write_test.py | 57 +++++ .../samples/beam/noxfile.py | 224 ++++++++++++++++++ .../samples/beam/requirements-test.txt | 1 + .../samples/beam/requirements.txt | 3 + 5 files changed, 349 insertions(+) create mode 100644 packages/google-cloud-bigtable/samples/beam/hello_world_write.py create mode 100644 packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py create mode 100644 packages/google-cloud-bigtable/samples/beam/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/beam/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/beam/requirements.txt diff --git a/packages/google-cloud-bigtable/samples/beam/hello_world_write.py b/packages/google-cloud-bigtable/samples/beam/hello_world_write.py new file mode 100644 index 000000000000..894edc46fb73 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/beam/hello_world_write.py @@ -0,0 +1,64 @@ +# Copyright 2020 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import datetime + +import apache_beam as beam +from apache_beam.io.gcp.bigtableio import WriteToBigTable +from apache_beam.options.pipeline_options import PipelineOptions +from google.cloud.bigtable import row + + +class BigtableOptions(PipelineOptions): + @classmethod + def _add_argparse_args(cls, parser): + parser.add_argument( + '--bigtable-project', + help='The Bigtable project ID, this can be different than your ' + 'Dataflow project', + default='bigtable-project') + parser.add_argument( + '--bigtable-instance', + help='The Bigtable instance ID', + default='bigtable-instance') + parser.add_argument( + '--bigtable-table', + help='The Bigtable table ID in the instance.', + default='bigtable-table') + + +class CreateRowFn(beam.DoFn): + def process(self, key): + direct_row = row.DirectRow(row_key=key) + direct_row.set_cell( + "stats_summary", + b"os_build", + b"android", + datetime.datetime.now()) + return [direct_row] + + +def run(argv=None): + """Build and run the pipeline.""" + options = BigtableOptions(argv) + with beam.Pipeline(options=options) as p: + p | beam.Create(["phone#4c410523#20190501", + "phone#4c410523#20190502"]) | beam.ParDo( + CreateRowFn()) | WriteToBigTable( + project_id=options.bigtable_project, + instance_id=options.bigtable_instance, + table_id=options.bigtable_table) + + +if __name__ == '__main__': + run() diff --git a/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py b/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py new file mode 100644 index 000000000000..cdbecc661e3c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py @@ -0,0 +1,57 @@ +# Copyright 2020 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import uuid + +from google.cloud import bigtable +import pytest + +import hello_world_write + +PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] +BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] +TABLE_ID_PREFIX = 'mobile-time-series-{}' + + +@pytest.fixture(scope="module", autouse=True) +def table_id(): + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={'stats_summary': None}) + yield table_id + + table.delete() + + +def test_hello_world_write(table_id): + hello_world_write.run([ + '--bigtable-project=%s' % PROJECT, + '--bigtable-instance=%s' % BIGTABLE_INSTANCE, + '--bigtable-table=%s' % table_id]) + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + + rows = table.read_rows() + count = 0 + for _ in rows: + count += 1 + assert count == 2 diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py new file mode 100644 index 000000000000..ba55d7ce53ca --- /dev/null +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -0,0 +1,224 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import os +from pathlib import Path +import sys + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +# Copy `noxfile_config.py` to your directory and modify it instead. + + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + 'ignored_versions': ["2.7"], + + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + 'envs': {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append('.') + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars(): + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG['gcloud_project_env'] + # This should error out if not set. + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG['envs']) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to tested samples. +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# +# Style Checks +# + + +def _determine_local_import_names(start_dir): + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session): + session.install("flake8", "flake8-import-order") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + "." + ] + session.run("flake8", *args) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests(session, post_install=None): + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars() + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session): + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) + + +# +# Readmegen +# + + +def _get_repo_root(): + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session, path): + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt new file mode 100644 index 000000000000..7e460c8c866e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -0,0 +1 @@ +pytest==6.0.1 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt new file mode 100644 index 000000000000..2d7898e429fb --- /dev/null +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -0,0 +1,3 @@ +apache-beam==2.23.0 +google-cloud-bigtable==1.4.0 +google-cloud-core==1.3.0 \ No newline at end of file From 0cd205acda39e23250de81a80faa244de52c8501 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 8 Aug 2020 00:00:40 +0200 Subject: [PATCH 332/892] chore(deps): update dependency google-cloud-core to v1.4.1 (#102) Co-authored-by: Tres Seaver --- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 29ecf15a2b72..e1e7508caf87 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.2.1 -google-cloud-core==1.3.0 +google-cloud-core==1.4.1 From bb012790f359372be64192bae754330698068885 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 10 Aug 2020 17:03:14 +0200 Subject: [PATCH 333/892] chore(deps): update dependency google-cloud-bigtable to v1.4.0 (#101) --- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index e1e7508caf87..89c7140a1159 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.2.1 +google-cloud-bigtable==1.4.0 google-cloud-core==1.4.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 2771c2e4c4d0..1f3adb6bf996 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.2.1 +google-cloud-bigtable==1.4.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index ab135bd21a06..cb9f6488ee45 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.2.1 +google-cloud-bigtable==1.4.0 google-cloud-monitoring==1.0.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 2771c2e4c4d0..1f3adb6bf996 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.2.1 +google-cloud-bigtable==1.4.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index a64e924f1be3..15192cadd2b6 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.2.1 +google-cloud-bigtable==1.4.0 snapshottest==0.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index a64e924f1be3..15192cadd2b6 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.2.1 +google-cloud-bigtable==1.4.0 snapshottest==0.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 618a0d90714d..07d1f44fd5bb 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.2.1 \ No newline at end of file +google-cloud-bigtable==1.4.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 2771c2e4c4d0..1f3adb6bf996 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.2.1 +google-cloud-bigtable==1.4.0 From d0b166370d927b0fe292dd4a85228033a22d4c26 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 10 Aug 2020 13:18:32 -0400 Subject: [PATCH 334/892] tests: harden 'snippets_table.py' teardown against 429 (#106) Toward #87. --- packages/google-cloud-bigtable/docs/snippets_table.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 01b16cde07ce..767e3697555e 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -33,6 +33,8 @@ import pytest from test_utils.system import unique_resource_id +from test_utils.retry import RetryErrors +from google.api_core.exceptions import TooManyRequests from google.cloud._helpers import UTC from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -64,6 +66,8 @@ CELL_VAL2 = b"cell-val2" ROW_KEY2 = b"row_key_id2" +retry_429 = RetryErrors(TooManyRequests, max_tries=9) + class Config(object): """Run-time configuration to be modified at set-up. @@ -102,7 +106,7 @@ def setup_module(): def teardown_module(): - Config.INSTANCE.delete() + retry_429(Config.INSTANCE.delete)() def test_bigtable_create_table(): From 7b78877ce95443d2e2f8272cfd7fad6a928dfe9e Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 12 Aug 2020 16:00:07 -0400 Subject: [PATCH 335/892] feat: pass 'client_options' to base class ctor (#104) Closes #69. --- .../google-cloud-bigtable/google/cloud/bigtable/client.py | 4 +++- packages/google-cloud-bigtable/setup.py | 2 +- packages/google-cloud-bigtable/tests/unit/test_client.py | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 935a0a3b6dd8..bbb830519e59 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -174,7 +174,9 @@ def __init__( self._admin_client_options = admin_client_options self._channel = channel self.SCOPE = self._get_scopes() - super(Client, self).__init__(project=project, credentials=credentials) + super(Client, self).__init__( + project=project, credentials=credentials, client_options=client_options, + ) def _get_scopes(self): """Get the scopes corresponding to admin / read-only state. diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index a8f544560605..ece5050d5ad5 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -30,7 +30,7 @@ release_status = 'Development Status :: 5 - Production/Stable' dependencies = [ "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", - "google-cloud-core >= 1.0.3, < 2.0dev", + "google-cloud-core >= 1.4.1, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", ] extras = { diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 8a2ef3c64b56..204e1a5c151b 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -231,8 +231,10 @@ def test_table_data_client_not_initialized_w_client_info(self): self.assertIs(client._table_data_client, table_data_client) def test_table_data_client_not_initialized_w_client_options(self): + from google.api_core.client_options import ClientOptions + credentials = _make_credentials() - client_options = mock.Mock() + client_options = ClientOptions(quota_project_id="QUOTA-PROJECT") client = self._make_one( project=self.PROJECT, credentials=credentials, client_options=client_options ) From 006bda22f1696f9283a662020b20390b686ff1be Mon Sep 17 00:00:00 2001 From: Ryan Yuan Date: Wed, 19 Aug 2020 04:28:04 +1000 Subject: [PATCH 336/892] docs: use correct storage type constant in docstrings (#110) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-bigtable/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --- .../google-cloud-bigtable/google/cloud/bigtable/cluster.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index b2957bc2a999..9048c94f4ab0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -59,7 +59,7 @@ class Cluster(object): Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.enums.StorageType.SHD`, + :data:`google.cloud.bigtable.enums.StorageType.HDD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index c5d3ce8b688d..3656f40a4936 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -268,7 +268,7 @@ def create( Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.enums.StorageType.SHD`, + :data:`google.cloud.bigtable.enums.StorageType.HDD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. @@ -564,7 +564,7 @@ def cluster( Possible values are represented by the following constants: :data:`google.cloud.bigtable.enums.StorageType.SSD`. - :data:`google.cloud.bigtable.enums.StorageType.SHD`, + :data:`google.cloud.bigtable.enums.StorageType.HDD`, Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. From 8d2bb6413a4960113dd750397845f6b07e27b0d7 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 21 Aug 2020 08:15:49 -0700 Subject: [PATCH 337/892] chore: Update protobuf workspace dependency to v3.13.0. (#111) PiperOrigin-RevId: 327026955 Source-Author: Google APIs Source-Date: Mon Aug 17 08:40:22 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 0dc0a6c0f1a9f979bc0690f0caa5fbafa3000c2c Source-Link: https://github.com/googleapis/googleapis/commit/0dc0a6c0f1a9f979bc0690f0caa5fbafa3000c2c --- .../bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py | 2 +- .../bigtable_admin_v2/proto/bigtable_table_admin_pb2.py | 2 +- .../google/cloud/bigtable_admin_v2/proto/common_pb2.py | 2 +- .../google/cloud/bigtable_admin_v2/proto/instance_pb2.py | 2 +- .../google/cloud/bigtable_admin_v2/proto/table_pb2.py | 2 +- .../google/cloud/bigtable_v2/proto/bigtable_pb2.py | 2 +- .../google/cloud/bigtable_v2/proto/data_pb2.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index bd4d621d66ec..63590907a22c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index aef2bfdcdb32..5ca167d87877 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index dd668ef3cb2b..09233cff5a02 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/common.proto - +"""Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index 7c6e05fa5152..e0138e0fb0f7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/instance.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index bd0f478fa09c..67238a81e909 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_admin_v2/proto/table.proto - +"""Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index b0f13cbba51e..ba711b20ca71 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/bigtable.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index aa6f6737b9d0..a64f9b10e2df 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/bigtable_v2/proto/data.proto - +"""Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index b541fe17f804..a9453a02b0d1 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "4796ac85c877d75ed596cde7628dae31918ef726" + "sha": "e55ca07561f9c946276f3bde599e69947769f560" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "db69b46790b55a82ab7cfa473d031da787bc7591", - "internalRef": "320411362" + "sha": "0dc0a6c0f1a9f979bc0690f0caa5fbafa3000c2c", + "internalRef": "327026955" } }, { From 9e227ae040227544fe9d44f1b6b60c82876cfc73 Mon Sep 17 00:00:00 2001 From: Alexa B Date: Fri, 21 Aug 2020 18:44:02 -0400 Subject: [PATCH 338/892] docs: Pysamples new readme gen (#112) This PR adds some new generation code to the synth.py to handle generation of the Python sample docs! Content of the sample descriptions/custom content is predominantly pulled from the existing docs --- .../google-cloud-bigtable/.repo-metadata.json | 56 +++++++- .../google-cloud-bigtable/samples/README.md | 25 ++++ .../samples/hello/README.md | 52 +++++++ .../samples/hello/README.rst | 115 ---------------- .../samples/hello/README.rst.in | 23 ---- .../samples/hello_happybase/README.md | 52 +++++++ .../samples/hello_happybase/README.rst | 122 ----------------- .../samples/hello_happybase/README.rst.in | 32 ----- .../samples/instanceadmin/README.md | 52 +++++++ .../samples/instanceadmin/README.rst | 120 ---------------- .../samples/instanceadmin/README.rst.in | 23 ---- .../samples/metricscaler/README.md | 52 +++++++ .../samples/metricscaler/README.rst | 128 ------------------ .../samples/metricscaler/README.rst.in | 29 ---- .../samples/quickstart/README.md | 52 +++++++ .../samples/quickstart/README.rst | 126 ----------------- .../samples/quickstart/README.rst.in | 23 ---- .../samples/quickstart_happybase/README.md | 52 +++++++ .../samples/quickstart_happybase/README.rst | 108 --------------- .../quickstart_happybase/README.rst.in | 23 ---- .../samples/snippets/README.md | 33 +++++ .../samples/tableadmin/README.md | 52 +++++++ .../samples/tableadmin/README.rst | 115 ---------------- .../samples/tableadmin/README.rst.in | 23 ---- packages/google-cloud-bigtable/synth.py | 5 +- 25 files changed, 481 insertions(+), 1012 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/README.md create mode 100644 packages/google-cloud-bigtable/samples/hello/README.md delete mode 100644 packages/google-cloud-bigtable/samples/hello/README.rst delete mode 100644 packages/google-cloud-bigtable/samples/hello/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/README.md delete mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/README.rst delete mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/README.md delete mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/README.rst delete mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/README.md delete mode 100644 packages/google-cloud-bigtable/samples/metricscaler/README.rst delete mode 100644 packages/google-cloud-bigtable/samples/metricscaler/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/quickstart/README.md delete mode 100644 packages/google-cloud-bigtable/samples/quickstart/README.rst delete mode 100644 packages/google-cloud-bigtable/samples/quickstart/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/README.md delete mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst delete mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in create mode 100644 packages/google-cloud-bigtable/samples/snippets/README.md create mode 100644 packages/google-cloud-bigtable/samples/tableadmin/README.md delete mode 100644 packages/google-cloud-bigtable/samples/tableadmin/README.rst delete mode 100644 packages/google-cloud-bigtable/samples/tableadmin/README.rst.in diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json index cfda5a11e0ec..7c1f86991b0d 100644 --- a/packages/google-cloud-bigtable/.repo-metadata.json +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -9,5 +9,59 @@ "repo": "googleapis/python-bigtable", "distribution_name": "google-cloud-bigtable", "api_id": "bigtable.googleapis.com", - "requires_billing": true + "requires_billing": true, + "samples": [ + {"name": "Hello World in Cloud Bigtable", + "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello"}, + + {"name": "Hello World using HappyBase", + "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello_happybase"}, + + {"name": "cbt Command Demonstration", + "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt", + "file": "instanceadmin.py", + "runnable": true, + "custom_content" : "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "instanceadmin"}, + + {"name": "Metric Scaler", + "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.", + "file": "metricscaler.py", + "runnable": true, + "custom_content": "
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
", + "override_path": "metricscaler"}, + + {"name": "Quickstart", + "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
", + "override_path": "quickstart"}, + + {"name": "Quickstart using HappyBase", + "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "tableadmin"} + ] } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/README.md b/packages/google-cloud-bigtable/samples/README.md new file mode 100644 index 000000000000..ae3123e7ff13 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/README.md @@ -0,0 +1,25 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. + +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/hello/README.md b/packages/google-cloud-bigtable/samples/hello/README.md new file mode 100644 index 000000000000..0fa601de7378 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/README.md @@ -0,0 +1,52 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### Hello World in Cloud Bigtable + +Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello + + +Open in Cloud Shell + + +To run this sample: + +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. + +1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. + +1. Install the dependencies needed to run the samples. + + pip install -r requirements.txt + +1. Run the sample using + + python main.py + + +
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
+ +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/hello/README.rst b/packages/google-cloud-bigtable/samples/hello/README.rst deleted file mode 100644 index 893932ad5e73..000000000000 --- a/packages/google-cloud-bigtable/samples/hello/README.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Bigtable Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/README.rst - - -This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. - - - - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Basic example -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/main.py,bigtable/hello/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python main.py - - usage: main.py [-h] [--table TABLE] project_id instance_id - - Demonstrates how to connect to Cloud Bigtable and run some basic operations. - Prerequisites: - Create a Cloud Bigtable cluster. - https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google - Application Default Credentials. - https://developers.google.com/identity/protocols/application-default- - credentials - - positional arguments: - project_id Your Cloud Platform project ID. - instance_id ID of the Cloud Bigtable instance to connect to. - - optional arguments: - -h, --help show this help message and exit - --table TABLE Table to create and destroy. (default: Hello-Bigtable) - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/README.rst.in b/packages/google-cloud-bigtable/samples/hello/README.rst.in deleted file mode 100644 index ed9253c115a4..000000000000 --- a/packages/google-cloud-bigtable/samples/hello/README.rst.in +++ /dev/null @@ -1,23 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Bigtable - short_name: Cloud Bigtable - url: https://cloud.google.com/bigtable/docs - description: > - `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's - the same database that powers many core Google services, including Search, - Analytics, Maps, and Gmail. - -setup: -- auth -- install_deps - -samples: -- name: Basic example - file: main.py - show_help: true - -cloud_client_library: true - -folder: bigtable/hello \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.md b/packages/google-cloud-bigtable/samples/hello_happybase/README.md new file mode 100644 index 000000000000..c56e68e4b324 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello_happybase/README.md @@ -0,0 +1,52 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### Hello World using HappyBase + +This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase + + +Open in Cloud Shell + + +To run this sample: + +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. + +1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. + +1. Install the dependencies needed to run the samples. + + pip install -r requirements.txt + +1. Run the sample using + + python main.py + + +
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
+ +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.rst b/packages/google-cloud-bigtable/samples/hello_happybase/README.rst deleted file mode 100644 index 82a376535373..000000000000 --- a/packages/google-cloud-bigtable/samples/hello_happybase/README.rst +++ /dev/null @@ -1,122 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Bigtable Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello_happybase/README.rst - - -This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. - - -This sample demonstrates using the `Google Cloud Client Library HappyBase -package`_, an implementation of the `HappyBase API`_ to connect to and -interact with Cloud Bigtable. - -.. _Google Cloud Client Library HappyBase package: - https://github.com/GoogleCloudPlatform/google-cloud-python-happybase -.. _HappyBase API: http://happybase.readthedocs.io/en/stable/ - - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Basic example -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello_happybase/main.py,bigtable/hello_happybase/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python main.py - - usage: main.py [-h] [--table TABLE] project_id instance_id - - Demonstrates how to connect to Cloud Bigtable and run some basic operations. - Prerequisites: - Create a Cloud Bigtable cluster. - https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google - Application Default Credentials. - https://developers.google.com/identity/protocols/application-default- - credentials - - positional arguments: - project_id Your Cloud Platform project ID. - instance_id ID of the Cloud Bigtable instance to connect to. - - optional arguments: - -h, --help show this help message and exit - --table TABLE Table to create and destroy. (default: Hello-Bigtable) - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in b/packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in deleted file mode 100644 index 8ef6a956b5e9..000000000000 --- a/packages/google-cloud-bigtable/samples/hello_happybase/README.rst.in +++ /dev/null @@ -1,32 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Bigtable - short_name: Cloud Bigtable - url: https://cloud.google.com/bigtable/docs - description: > - `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's - the same database that powers many core Google services, including Search, - Analytics, Maps, and Gmail. - -description: | - This sample demonstrates using the `Google Cloud Client Library HappyBase - package`_, an implementation of the `HappyBase API`_ to connect to and - interact with Cloud Bigtable. - - .. _Google Cloud Client Library HappyBase package: - https://github.com/GoogleCloudPlatform/google-cloud-python-happybase - .. _HappyBase API: http://happybase.readthedocs.io/en/stable/ - -setup: -- auth -- install_deps - -samples: -- name: Basic example - file: main.py - show_help: true - -cloud_client_library: true - -folder: bigtable/hello_happybase \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.md b/packages/google-cloud-bigtable/samples/instanceadmin/README.md new file mode 100644 index 000000000000..e6ee71ad56db --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/README.md @@ -0,0 +1,52 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### cbt Command Demonstration + +This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt + + +Open in Cloud Shell + + +To run this sample: + +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. + +1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. + +1. Install the dependencies needed to run the samples. + + pip install -r requirements.txt + +1. Run the sample using + + python instanceadmin.py + + +
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
+ +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.rst b/packages/google-cloud-bigtable/samples/instanceadmin/README.rst deleted file mode 100644 index 16f176a6099c..000000000000 --- a/packages/google-cloud-bigtable/samples/instanceadmin/README.rst +++ /dev/null @@ -1,120 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - - -Google Cloud Bigtable table creation -=============================================================================== - -https://cloud.google.com/bigtable/docs/quickstart-cbt - -This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. - -Google Cloud Bigtable Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/README.rst - - -This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. - - - - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Basic example -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/instanceadmin.py,bigtable/instanceadmin/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python instanceadmin.py - - usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id - - Demonstrates how to connect to Cloud Bigtable and run some basic operations - to create instance, create cluster, delete instance and delete cluster. - Prerequisites: - Create a Cloud Bigtable cluster. - https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google - Application Default Credentials. - https://developers.google.com/identity/protocols/application-default- - credentials - - positional arguments: - project_id Your Cloud Platform project ID. - instance_id ID of the Cloud Bigtable instance to connect to. - cluster_id ID of the Cloud Bigtable cluster to connect to. - - optional arguments: - -h, --help show this help message and exit - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in b/packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in deleted file mode 100644 index c085e40a6278..000000000000 --- a/packages/google-cloud-bigtable/samples/instanceadmin/README.rst.in +++ /dev/null @@ -1,23 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Bigtable and run some basic operations. - short_name: Cloud Bigtable - url: https://cloud.google.com/bigtable/docs - description: > - `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's - the same database that powers many core Google services, including Search, - Analytics, Maps, and Gmail. - -setup: -- auth -- install_deps - -samples: -- name: Basic example with Bigtable Column family and GC rules. - file: instanceadmin.py - show_help: true - -cloud_client_library: true - -folder: bigtable/instanceadmin \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.md b/packages/google-cloud-bigtable/samples/metricscaler/README.md new file mode 100644 index 000000000000..d41a89a85afc --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/README.md @@ -0,0 +1,52 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### Metric Scaler + +This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage. + + +Open in Cloud Shell + + +To run this sample: + +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. + +1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. + +1. Install the dependencies needed to run the samples. + + pip install -r requirements.txt + +1. Run the sample using + + python metricscaler.py + + +
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
+ +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.rst b/packages/google-cloud-bigtable/samples/metricscaler/README.rst deleted file mode 100644 index c64bbff1d8af..000000000000 --- a/packages/google-cloud-bigtable/samples/metricscaler/README.rst +++ /dev/null @@ -1,128 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Bigtable Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/metricscaler/README.rst - - -This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. - - -This sample demonstrates how to use `Stackdriver Monitoring`_ -to scale Cloud Bigtable based on CPU usage. - -.. _Stackdriver Monitoring: http://cloud.google.com/monitoring/docs/ - - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Metricscaling example -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/metricscaler/metricscaler.py,bigtable/metricscaler/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python metricscaler.py - - usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] - [--low_cpu_threshold LOW_CPU_THRESHOLD] - [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] - bigtable_instance bigtable_cluster - - Scales Cloud Bigtable clusters based on CPU usage. - - positional arguments: - bigtable_instance ID of the Cloud Bigtable instance to connect to. - bigtable_cluster ID of the Cloud Bigtable cluster to connect to. - - optional arguments: - -h, --help show this help message and exit - --high_cpu_threshold HIGH_CPU_THRESHOLD - If Cloud Bigtable CPU usage is above this threshold, - scale up - --low_cpu_threshold LOW_CPU_THRESHOLD - If Cloud Bigtable CPU usage is below this threshold, - scale down - --short_sleep SHORT_SLEEP - How long to sleep in seconds between checking metrics - after no scale operation - --long_sleep LONG_SLEEP - How long to sleep in seconds between checking metrics - after a scaling operation - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.rst.in b/packages/google-cloud-bigtable/samples/metricscaler/README.rst.in deleted file mode 100644 index 44a548e4c1fb..000000000000 --- a/packages/google-cloud-bigtable/samples/metricscaler/README.rst.in +++ /dev/null @@ -1,29 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Bigtable - short_name: Cloud Bigtable - url: https://cloud.google.com/bigtable/docs/ - description: > - `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's - the same database that powers many core Google services, including Search, - Analytics, Maps, and Gmail. - -description: | - This sample demonstrates how to use `Stackdriver Monitoring`_ - to scale Cloud Bigtable based on CPU usage. - - .. _Stackdriver Monitoring: http://cloud.google.com/monitoring/docs/ - -setup: -- auth -- install_deps - -samples: -- name: Metricscaling example - file: metricscaler.py - show_help: true - -cloud_client_library: true - -folder: bigtable/metricscaler \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.md b/packages/google-cloud-bigtable/samples/quickstart/README.md new file mode 100644 index 000000000000..ae4f2e773bee --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/README.md @@ -0,0 +1,52 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### Quickstart + +Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection. + + +Open in Cloud Shell + + +To run this sample: + +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. + +1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. + +1. Install the dependencies needed to run the samples. + + pip install -r requirements.txt + +1. Run the sample using + + python main.py + + +
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
+ +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.rst b/packages/google-cloud-bigtable/samples/quickstart/README.rst deleted file mode 100644 index c3ff17a3959d..000000000000 --- a/packages/google-cloud-bigtable/samples/quickstart/README.rst +++ /dev/null @@ -1,126 +0,0 @@ - -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Bigtable Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart/README.rst - - -This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. - - - - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs - - -Setup -------------------------------------------------------------------------------- - - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - - - - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 3.6+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - - - - - - -Samples -------------------------------------------------------------------------------- - - -Quickstart -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart/main.py,bigtable/quickstart/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python main.py - - - usage: main.py [-h] [--table TABLE] project_id instance_id - - positional arguments: - project_id Your Cloud Platform project ID. - instance_id ID of the Cloud Bigtable instance to connect to. - - optional arguments: - -h, --help show this help message and exit - --table TABLE Existing table used in the quickstart. (default: my-table) - - - - - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.rst.in b/packages/google-cloud-bigtable/samples/quickstart/README.rst.in deleted file mode 100644 index 94f070a7c887..000000000000 --- a/packages/google-cloud-bigtable/samples/quickstart/README.rst.in +++ /dev/null @@ -1,23 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Bigtable - short_name: Cloud Bigtable - url: https://cloud.google.com/bigtable/docs - description: > - `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's - the same database that powers many core Google services, including Search, - Analytics, Maps, and Gmail. - -setup: -- auth -- install_deps - -samples: -- name: Quickstart - file: main.py - show_help: true - -cloud_client_library: true - -folder: bigtable/quickstart \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md new file mode 100644 index 000000000000..edbd0bfcb3bc --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md @@ -0,0 +1,52 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### Quickstart using HappyBase + +Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection. + + +Open in Cloud Shell + + +To run this sample: + +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. + +1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. + +1. Install the dependencies needed to run the samples. + + pip install -r requirements.txt + +1. Run the sample using + + python main.py + + +
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst deleted file mode 100644 index e2d1c45a2729..000000000000 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst +++ /dev/null @@ -1,108 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Bigtable Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart_happybase/README.rst - - -This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. - - - - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Quickstart -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/quickstart_happybase/main.py,bigtable/quickstart_happybase/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python main.py - - usage: main.py [-h] [--table TABLE] project_id instance_id - - positional arguments: - project_id Your Cloud Platform project ID. - instance_id ID of the Cloud Bigtable instance to connect to. - - optional arguments: - -h, --help show this help message and exit - --table TABLE Existing table used in the quickstart. (default: my-table) - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in deleted file mode 100644 index 811a0b868fb3..000000000000 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.rst.in +++ /dev/null @@ -1,23 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Bigtable - short_name: Cloud Bigtable - url: https://cloud.google.com/bigtable/docs - description: > - `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's - the same database that powers many core Google services, including Search, - Analytics, Maps, and Gmail. - -setup: -- auth -- install_deps - -samples: -- name: Quickstart - file: main.py - show_help: true - -cloud_client_library: true - -folder: bigtable/quickstart_happybase \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/README.md b/packages/google-cloud-bigtable/samples/snippets/README.md new file mode 100644 index 000000000000..0d6f32edb08b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/README.md @@ -0,0 +1,33 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### Snippets + +This folder contains snippets for Python Cloud Bigtable. + + + +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.md b/packages/google-cloud-bigtable/samples/tableadmin/README.md new file mode 100644 index 000000000000..d3aa038a4d3f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/tableadmin/README.md @@ -0,0 +1,52 @@ +[//]: # "This README.md file is auto-generated, all changes to this file will be lost." +[//]: # "To regenerate it, use `python -m synthtool`." + +## Python Samples for Cloud Bigtable + +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +Samples, quickstarts, and other documentation are available at cloud.google.com. + + +### Table Admin + +Demonstrates how to connect to Cloud Bigtable and run some basic operations. + + +Open in Cloud Shell + + +To run this sample: + +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. + +1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. + +1. Install the dependencies needed to run the samples. + + pip install -r requirements.txt + +1. Run the sample using + + python tableadmin.py + + +
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
+ +## Additional Information + +You can read the documentation for more details on API usage and use GitHub +to [browse the source][source] and [report issues][issues]. + +### Contributing +For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. + +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing +[client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ +[source]: https://github.com/GoogleCloudPlatform/google-cloud-python +[issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[py_style]: http://google.github.io/styleguide/pyguide.html +[cloud_sdk]: https://cloud.google.com/sdk/docs +[gcloud_shell]: https://cloud.google.com/shell/docs +[gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.rst b/packages/google-cloud-bigtable/samples/tableadmin/README.rst deleted file mode 100644 index f7f83d6d2a1b..000000000000 --- a/packages/google-cloud-bigtable/samples/tableadmin/README.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Bigtable Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/README.rst - - -This directory contains samples for Google Cloud Bigtable. `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's the same database that powers many core Google services, including Search, Analytics, Maps, and Gmail. - - - - -.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -Basic example -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=bigtable/hello/tableadmin.py,bigtable/hello/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python tableadmin.py - - usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id - - Demonstrates how to connect to Cloud Bigtable and run some basic operations. - Prerequisites: - Create a Cloud Bigtable cluster. - https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google - Application Default Credentials. - https://developers.google.com/identity/protocols/application-default- - credentials - - positional arguments: - project_id Your Cloud Platform project ID. - instance_id ID of the Cloud Bigtable instance to connect to. - - optional arguments: - -h, --help show this help message and exit - --table TABLE Table to create and destroy. (default: Hello-Bigtable) - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.rst.in b/packages/google-cloud-bigtable/samples/tableadmin/README.rst.in deleted file mode 100644 index 7fd37641969a..000000000000 --- a/packages/google-cloud-bigtable/samples/tableadmin/README.rst.in +++ /dev/null @@ -1,23 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Bigtable and run some basic operations. - short_name: Cloud Bigtable - url: https://cloud.google.com/bigtable/docs - description: > - `Google Cloud Bigtable`_ is Google's NoSQL Big Data database service. It's - the same database that powers many core Google services, including Search, - Analytics, Maps, and Gmail. - -setup: -- auth -- install_deps - -samples: -- name: Basic example with Bigtable Column family and GC rules. - file: tableadmin.py - show_help: true - -cloud_client_library: true - -folder: bigtable/tableadmin \ No newline at end of file diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 141d93dd3cd6..8a2fed1c722e 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -91,6 +91,9 @@ # Samples templates # ---------------------------------------------------------------------------- -python.py_samples(skip_readmes=True) +sample_files = common.py_samples(samples=True) +for path in sample_files: + s.move(path, excludes=['noxfile.py']) + s.shell.run(["nox", "-s", "blacken"], hide_output=False) From 58a94bb0bad0029e2d8078550f906f4d858e663e Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 24 Aug 2020 14:02:03 -0700 Subject: [PATCH 339/892] fix(python_samples): README link fix, enforce samples=True (#114) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/261f920a-ceda-4754-a611-184f29b241a9/targets - [ ] To automatically regenerate this PR, check this box. Source-Link: https://github.com/googleapis/synthtool/commit/2e85c10b5153defd9d654c34b57e7e9263361959 --- packages/google-cloud-bigtable/samples/README.md | 7 +++---- packages/google-cloud-bigtable/samples/hello/README.md | 7 +++---- .../samples/hello_happybase/README.md | 7 +++---- .../google-cloud-bigtable/samples/instanceadmin/README.md | 7 +++---- .../google-cloud-bigtable/samples/metricscaler/README.md | 7 +++---- .../google-cloud-bigtable/samples/quickstart/README.md | 7 +++---- .../samples/quickstart_happybase/README.md | 7 +++---- packages/google-cloud-bigtable/samples/snippets/README.md | 7 +++---- .../google-cloud-bigtable/samples/tableadmin/README.md | 7 +++---- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 10 files changed, 30 insertions(+), 39 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/README.md b/packages/google-cloud-bigtable/samples/README.md index ae3123e7ff13..70d852d4f3f0 100644 --- a/packages/google-cloud-bigtable/samples/README.md +++ b/packages/google-cloud-bigtable/samples/README.md @@ -3,20 +3,19 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/hello/README.md b/packages/google-cloud-bigtable/samples/hello/README.md index 0fa601de7378..cb869f5290ff 100644 --- a/packages/google-cloud-bigtable/samples/hello/README.md +++ b/packages/google-cloud-bigtable/samples/hello/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -35,15 +35,14 @@ To run this sample: ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.md b/packages/google-cloud-bigtable/samples/hello_happybase/README.md index c56e68e4b324..e8a57f2f075b 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/README.md +++ b/packages/google-cloud-bigtable/samples/hello_happybase/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -35,15 +35,14 @@ To run this sample: ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.md b/packages/google-cloud-bigtable/samples/instanceadmin/README.md index e6ee71ad56db..1ff520909d87 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/README.md +++ b/packages/google-cloud-bigtable/samples/instanceadmin/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -35,15 +35,14 @@ To run this sample: ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.md b/packages/google-cloud-bigtable/samples/metricscaler/README.md index d41a89a85afc..29819f88be91 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/README.md +++ b/packages/google-cloud-bigtable/samples/metricscaler/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -35,15 +35,14 @@ To run this sample: ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.md b/packages/google-cloud-bigtable/samples/quickstart/README.md index ae4f2e773bee..8eed6e201207 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -35,15 +35,14 @@ To run this sample: ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md index edbd0bfcb3bc..b74689ddc1e1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -35,15 +35,14 @@ To run this sample: ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/snippets/README.md b/packages/google-cloud-bigtable/samples/snippets/README.md index 0d6f32edb08b..f23681ca9f80 100644 --- a/packages/google-cloud-bigtable/samples/snippets/README.md +++ b/packages/google-cloud-bigtable/samples/snippets/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -16,15 +16,14 @@ This folder contains snippets for Python Cloud Bigtable. ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.md b/packages/google-cloud-bigtable/samples/tableadmin/README.md index d3aa038a4d3f..9bc9e38b78b9 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/README.md +++ b/packages/google-cloud-bigtable/samples/tableadmin/README.md @@ -3,7 +3,7 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use Cloud Bigtable. +This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. Samples, quickstarts, and other documentation are available at cloud.google.com. @@ -35,15 +35,14 @@ To run this sample: ## Additional Information You can read the documentation for more details on API usage and use GitHub -to [browse the source][source] and [report issues][issues]. +to browse the source and [report issues][issues]. ### Contributing -For [contributing guidelines][contrib_guide], the [Python style guide][py_style], and more information on prerequisite steps to contribute, view the source code at googleapis/python-bigtable. +View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. [authentication]: https://cloud.google.com/docs/authentication/getting-started [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ -[source]: https://github.com/GoogleCloudPlatform/google-cloud-python [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues [contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index a9453a02b0d1..a4d60cb48878 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "e55ca07561f9c946276f3bde599e69947769f560" + "sha": "3ecca7a7b52b0f4fc38db5c5016622b994c1a8aa" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4f8f5dc24af79694887385015294e4dbb214c352" + "sha": "2e85c10b5153defd9d654c34b57e7e9263361959" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4f8f5dc24af79694887385015294e4dbb214c352" + "sha": "2e85c10b5153defd9d654c34b57e7e9263361959" } } ], From ec720efc16bbe9245e9f39d8d56c8a5676f416e7 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 31 Aug 2020 07:20:16 -0700 Subject: [PATCH 340/892] chore: code formatting (#117) autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. --- packages/google-cloud-bigtable/docs/conf.py | 5 +- .../google/cloud/bigtable/backup.py | 5 +- .../google/cloud/bigtable/batcher.py | 8 +- .../google/cloud/bigtable/client.py | 4 +- .../google/cloud/bigtable/row.py | 2 +- .../google/cloud/bigtable/row_data.py | 5 +- .../google/cloud/bigtable/row_set.py | 10 +-- .../gapic/bigtable_instance_admin_client.py | 57 +++++++++---- .../gapic/bigtable_table_admin_client.py | 85 ++++++++++++++----- .../proto/bigtable_instance_admin_pb2.py | 4 +- .../proto/bigtable_instance_admin_pb2_grpc.py | 45 ++++------ .../proto/bigtable_table_admin_pb2.py | 8 +- .../proto/bigtable_table_admin_pb2_grpc.py | 21 ++--- .../bigtable_admin_v2/proto/common_pb2.py | 4 +- .../bigtable_admin_v2/proto/instance_pb2.py | 13 ++- .../bigtable_admin_v2/proto/table_pb2.py | 22 +++-- .../bigtable_v2/gapic/bigtable_client.py | 11 ++- .../cloud/bigtable_v2/proto/bigtable_pb2.py | 12 ++- .../bigtable_v2/proto/bigtable_pb2_grpc.py | 12 +-- .../cloud/bigtable_v2/proto/data_pb2.py | 6 +- packages/google-cloud-bigtable/synth.metadata | 2 +- .../tests/unit/test_backup.py | 51 ++++++++--- .../tests/unit/test_policy.py | 6 +- .../tests/unit/test_table.py | 8 +- 24 files changed, 262 insertions(+), 144 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index a2df032739e9..e6a3d0d1a3d0 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -346,7 +346,10 @@ intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), + "google.api_core": ( + "https://googleapis.dev/python/google-api-core/latest/", + None, + ), "grpc": ("https://grpc.io/grpc/python/", None), } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index c6a2826dd56d..03a1c894edd4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -208,7 +208,7 @@ def size_bytes(self): @property def state(self): - """ The current state of this Backup. + """The current state of this Backup. :rtype: :class:`~google.cloud.bigtable_admin_v2.gapic.enums.Backup.State` :returns: The current state of this Backup. @@ -358,7 +358,8 @@ def update_expire_time(self, new_expire_time): :param new_expire_time: the new expiration time timestamp """ backup_update = table_pb2.Backup( - name=self.name, expire_time=_datetime_to_pb_timestamp(new_expire_time), + name=self.name, + expire_time=_datetime_to_pb_timestamp(new_expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api = self._instance._client.table_admin_client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 0994e289d112..782cb979ab47 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -25,7 +25,7 @@ class MaxMutationsError(ValueError): class MutationsBatcher(object): - """ A MutationsBatcher is used in batch cases where the number of mutations + """A MutationsBatcher is used in batch cases where the number of mutations is large or unknown. It will store DirectRows in memory until one of the size limits is reached, or an explicit call to flush() is performed. When a flush event occurs, the DirectRows in memory will be sent to Cloud @@ -65,7 +65,7 @@ def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): self.max_row_bytes = max_row_bytes def mutate(self, row): - """ Add a row to the batch. If the current batch meets one of the size + """Add a row to the batch. If the current batch meets one of the size limits, the batch is sent synchronously. For example: @@ -105,7 +105,7 @@ def mutate(self, row): self.flush() def mutate_rows(self, rows): - """ Add a row to the batch. If the current batch meets one of the size + """Add a row to the batch. If the current batch meets one of the size limits, the batch is sent synchronously. For example: @@ -130,7 +130,7 @@ def mutate_rows(self, rows): self.mutate(row) def flush(self): - """ Sends the current. batch to Cloud Bigtable. + """Sends the current. batch to Cloud Bigtable. For example: .. literalinclude:: snippets.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index bbb830519e59..2ee6e7c77926 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -175,7 +175,9 @@ def __init__( self._channel = channel self.SCOPE = self._get_scopes() super(Client, self).__init__( - project=project, credentials=credentials, client_options=client_options, + project=project, + credentials=credentials, + client_options=client_options, ) def _get_scopes(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 92f5b818b08e..b28b86aa2cf5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -295,7 +295,7 @@ def _get_mutations(self, state=None): # pylint: disable=unused-argument return self._pb_mutations def get_mutations_size(self): - """ Gets the total mutations size for current row + """Gets the total mutations size for current row For example: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 38bf859563fa..04824e1bedda 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -595,7 +595,7 @@ def _copy_from_previous(self, cell): class _ReadRowsRequestManager(object): - """ Update the ReadRowsRequest message in case of failures by + """Update the ReadRowsRequest message in case of failures by filtering the already read keys. :type message: class:`data_messages_v2_pb2.ReadRowsRequest` @@ -617,8 +617,7 @@ def __init__(self, message, last_scanned_key, rows_read_so_far): self.rows_read_so_far = rows_read_so_far def build_updated_request(self): - """ Updates the given message request as per last scanned key - """ + """Updates the given message request as per last scanned key""" r_kwargs = { "table_name": self.message.table_name, "filter": self.message.filter, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 5de7dabffbf3..e229c805a05e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -19,10 +19,10 @@ class RowSet(object): - """ Convenience wrapper of google.bigtable.v2.RowSet + """Convenience wrapper of google.bigtable.v2.RowSet - Useful for creating a set of row keys and row ranges, which can - be passed to yield_rows method of class:`.Table.yield_rows`. + Useful for creating a set of row keys and row ranges, which can + be passed to yield_rows method of class:`.Table.yield_rows`. """ def __init__(self): @@ -145,7 +145,7 @@ def _update_message_request(self, message): class RowRange(object): - """ Convenience wrapper of google.bigtable.v2.RowRange + """Convenience wrapper of google.bigtable.v2.RowRange :type start_key: bytes :param start_key: (Optional) Start key of the row range. If left empty, @@ -195,7 +195,7 @@ def __ne__(self, other): return not self == other def get_range_kwargs(self): - """ Convert row range object to dict which can be passed to + """Convert row range object to dict which can be passed to google.bigtable.v2.RowRange add method. """ range_kwargs = {} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 8edb3c168d4e..d27154d5304c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -130,7 +130,8 @@ def location_path(cls, project, location): def project_path(cls, project): """Return a fully-qualified project string.""" return google.api_core.path_template.expand( - "projects/{project}", project=project, + "projects/{project}", + project=project, ) def __init__( @@ -220,7 +221,9 @@ def __init__( self.transport = transport else: self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, + address=api_endpoint, + channel=channel, + credentials=credentials, ) if client_info is None: @@ -416,7 +419,9 @@ def get_instance( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name,) + request = bigtable_instance_admin_pb2.GetInstanceRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -489,7 +494,8 @@ def list_instances( ) request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, page_token=page_token, + parent=parent, + page_token=page_token, ) if metadata is None: metadata = [] @@ -683,7 +689,8 @@ def partial_update_instance( ) request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask, + instance=instance, + update_mask=update_mask, ) if metadata is None: metadata = [] @@ -757,7 +764,9 @@ def delete_instance( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name,) + request = bigtable_instance_admin_pb2.DeleteInstanceRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -854,7 +863,9 @@ def create_cluster( ) request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster, + parent=parent, + cluster_id=cluster_id, + cluster=cluster, ) if metadata is None: metadata = [] @@ -931,7 +942,9 @@ def get_cluster( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.GetClusterRequest(name=name,) + request = bigtable_instance_admin_pb2.GetClusterRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1007,7 +1020,8 @@ def list_clusters( ) request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent, page_token=page_token, + parent=parent, + page_token=page_token, ) if metadata is None: metadata = [] @@ -1181,7 +1195,9 @@ def delete_cluster( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name,) + request = bigtable_instance_admin_pb2.DeleteClusterRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1346,7 +1362,9 @@ def get_app_profile( client_info=self._client_info, ) - request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name,) + request = bigtable_instance_admin_pb2.GetAppProfileRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1441,7 +1459,8 @@ def list_app_profiles( ) request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, page_size=page_size, + parent=parent, + page_size=page_size, ) if metadata is None: metadata = [] @@ -1627,7 +1646,8 @@ def delete_app_profile( ) request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, ignore_warnings=ignore_warnings, + name=name, + ignore_warnings=ignore_warnings, ) if metadata is None: metadata = [] @@ -1707,7 +1727,8 @@ def get_iam_policy( ) request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_, + resource=resource, + options=options_, ) if metadata is None: metadata = [] @@ -1791,7 +1812,10 @@ def set_iam_policy( client_info=self._client_info, ) - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) + request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, + policy=policy, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1871,7 +1895,8 @@ def test_iam_permissions( ) request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, + resource=resource, + permissions=permissions, ) if metadata is None: metadata = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index cac517314bc6..acbc4b26f385 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -232,8 +232,12 @@ def __init__( ) self.transport = transport else: - self.transport = bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, + self.transport = ( + bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( + address=api_endpoint, + channel=channel, + credentials=credentials, + ) ) if client_info is None: @@ -457,7 +461,9 @@ def create_table_from_snapshot( ) request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot, + parent=parent, + table_id=table_id, + source_snapshot=source_snapshot, ) if metadata is None: metadata = [] @@ -560,7 +566,9 @@ def list_tables( ) request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, view=view, page_size=page_size, + parent=parent, + view=view, + page_size=page_size, ) if metadata is None: metadata = [] @@ -645,7 +653,10 @@ def get_table( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GetTableRequest(name=name, view=view,) + request = bigtable_table_admin_pb2.GetTableRequest( + name=name, + view=view, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -712,7 +723,9 @@ def delete_table( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.DeleteTableRequest(name=name,) + request = bigtable_table_admin_pb2.DeleteTableRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -798,7 +811,8 @@ def modify_column_families( ) request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications, + name=name, + modifications=modifications, ) if metadata is None: metadata = [] @@ -961,7 +975,9 @@ def generate_consistency_token( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest(name=name,) + request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1040,7 +1056,8 @@ def check_consistency( ) request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token, + name=name, + consistency_token=consistency_token, ) if metadata is None: metadata = [] @@ -1121,7 +1138,8 @@ def get_iam_policy( ) request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, options=options_, + resource=resource, + options=options_, ) if metadata is None: metadata = [] @@ -1205,7 +1223,10 @@ def set_iam_policy( client_info=self._client_info, ) - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy,) + request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, + policy=policy, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1285,7 +1306,8 @@ def test_iam_permissions( ) request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, + resource=resource, + permissions=permissions, ) if metadata is None: metadata = [] @@ -1485,7 +1507,9 @@ def get_snapshot( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name,) + request = bigtable_table_admin_pb2.GetSnapshotRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1586,7 +1610,8 @@ def list_snapshots( ) request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent, page_size=page_size, + parent=parent, + page_size=page_size, ) if metadata is None: metadata = [] @@ -1672,7 +1697,9 @@ def delete_snapshot( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name,) + request = bigtable_table_admin_pb2.DeleteSnapshotRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1777,7 +1804,9 @@ def create_backup( ) request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup, + parent=parent, + backup_id=backup_id, + backup=backup, ) if metadata is None: metadata = [] @@ -1854,7 +1883,9 @@ def get_backup( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.GetBackupRequest(name=name,) + request = bigtable_table_admin_pb2.GetBackupRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -2000,7 +2031,10 @@ def list_backups( ) request = bigtable_table_admin_pb2.ListBackupsRequest( - parent=parent, filter=filter_, order_by=order_by, page_size=page_size, + parent=parent, + filter=filter_, + order_by=order_by, + page_size=page_size, ) if metadata is None: metadata = [] @@ -2102,7 +2136,8 @@ def update_backup( ) request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask, + backup=backup, + update_mask=update_mask, ) if metadata is None: metadata = [] @@ -2170,7 +2205,9 @@ def delete_backup( client_info=self._client_info, ) - request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name,) + request = bigtable_table_admin_pb2.DeleteBackupRequest( + name=name, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -2263,10 +2300,14 @@ def restore_table( # Sanity check: We have some fields which are mutually exclusive; # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof(backup=backup,) + google.api_core.protobuf_helpers.check_oneof( + backup=backup, + ) request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, table_id=table_id, backup=backup, + parent=parent, + table_id=table_id, + backup=backup, ) if metadata is None: metadata = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py index 63590907a22c..38fe53f88c19 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py @@ -197,7 +197,9 @@ ), ], extensions=[], - nested_types=[_CREATEINSTANCEREQUEST_CLUSTERSENTRY,], + nested_types=[ + _CREATEINSTANCEREQUEST_CLUSTERSENTRY, + ], enum_types=[], serialized_options=None, is_extendable=False, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py index 8b1395579e5c..0337e5d4fcc7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py @@ -132,22 +132,19 @@ class BigtableInstanceAdminServicer(object): """ def CreateInstance(self, request, context): - """Create an instance within a project. - """ + """Create an instance within a project.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetInstance(self, request, context): - """Gets information about an instance. - """ + """Gets information about an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListInstances(self, request, context): - """Lists information about instances in a project. - """ + """Lists information about instances in a project.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -170,78 +167,67 @@ def PartialUpdateInstance(self, request, context): raise NotImplementedError("Method not implemented!") def DeleteInstance(self, request, context): - """Delete an instance from a project. - """ + """Delete an instance from a project.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateCluster(self, request, context): - """Creates a cluster within an instance. - """ + """Creates a cluster within an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetCluster(self, request, context): - """Gets information about a cluster. - """ + """Gets information about a cluster.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListClusters(self, request, context): - """Lists information about clusters in an instance. - """ + """Lists information about clusters in an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateCluster(self, request, context): - """Updates a cluster within an instance. - """ + """Updates a cluster within an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteCluster(self, request, context): - """Deletes a cluster from an instance. - """ + """Deletes a cluster from an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateAppProfile(self, request, context): - """Creates an app profile within an instance. - """ + """Creates an app profile within an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetAppProfile(self, request, context): - """Gets information about an app profile. - """ + """Gets information about an app profile.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListAppProfiles(self, request, context): - """Lists information about app profiles in an instance. - """ + """Lists information about app profiles in an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateAppProfile(self, request, context): - """Updates an app profile within an instance. - """ + """Updates an app profile within an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteAppProfile(self, request, context): - """Deletes an app profile from an instance. - """ + """Deletes an app profile from an instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -263,8 +249,7 @@ def SetIamPolicy(self, request, context): raise NotImplementedError("Method not implemented!") def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource. - """ + """Returns permissions that the caller has on the specified instance resource.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py index 5ca167d87877..c7094eac20a3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py @@ -184,7 +184,9 @@ ), ], extensions=[], - nested_types=[_CREATETABLEREQUEST_SPLIT,], + nested_types=[ + _CREATETABLEREQUEST_SPLIT, + ], enum_types=[], serialized_options=None, is_extendable=False, @@ -776,7 +778,9 @@ ), ], extensions=[], - nested_types=[_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION,], + nested_types=[ + _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, + ], enum_types=[], serialized_options=None, is_extendable=False, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py index 2b8d46e20478..949de429e0de 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py @@ -174,22 +174,19 @@ def CreateTableFromSnapshot(self, request, context): raise NotImplementedError("Method not implemented!") def ListTables(self, request, context): - """Lists all tables served from a specified instance. - """ + """Lists all tables served from a specified instance.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetTable(self, request, context): - """Gets metadata information about the specified table. - """ + """Gets metadata information about the specified table.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data. - """ + """Permanently deletes a specified table and all of its data.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -300,22 +297,19 @@ def CreateBackup(self, request, context): raise NotImplementedError("Method not implemented!") def GetBackup(self, request, context): - """Gets metadata on a pending or completed Cloud Bigtable Backup. - """ + """Gets metadata on a pending or completed Cloud Bigtable Backup.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateBackup(self, request, context): - """Updates a pending or completed Cloud Bigtable Backup. - """ + """Updates a pending or completed Cloud Bigtable Backup.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteBackup(self, request, context): - """Deletes a pending or completed Cloud Bigtable backup. - """ + """Deletes a pending or completed Cloud Bigtable backup.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -360,8 +354,7 @@ def SetIamPolicy(self, request, context): raise NotImplementedError("Method not implemented!") def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified table resource. - """ + """Returns permissions that the caller has on the specified table resource.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py index 09233cff5a02..e07dea1d1506 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py @@ -23,7 +23,9 @@ serialized_options=b'\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', create_key=_descriptor._internal_create_key, serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xd3\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,], + dependencies=[ + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + ], ) _STORAGETYPE = _descriptor.EnumDescriptor( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py index e0138e0fb0f7..4f3ce0a5b254 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py @@ -332,8 +332,13 @@ ), ], extensions=[], - nested_types=[_INSTANCE_LABELSENTRY,], - enum_types=[_INSTANCE_STATE, _INSTANCE_TYPE,], + nested_types=[ + _INSTANCE_LABELSENTRY, + ], + enum_types=[ + _INSTANCE_STATE, + _INSTANCE_TYPE, + ], serialized_options=b"\352AK\n bigtable.googleapis.com/Instance\022'projects/{project}/instances/{instance}", is_extendable=False, syntax="proto3", @@ -450,7 +455,9 @@ ], extensions=[], nested_types=[], - enum_types=[_CLUSTER_STATE,], + enum_types=[ + _CLUSTER_STATE, + ], serialized_options=b"\352A]\n\037bigtable.googleapis.com/Cluster\022:projects/{project}/instances/{instance}/clusters/{cluster}", is_extendable=False, syntax="proto3", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py index 67238a81e909..71191acbabb1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py @@ -397,7 +397,9 @@ ], extensions=[], nested_types=[], - enum_types=[_TABLE_CLUSTERSTATE_REPLICATIONSTATE,], + enum_types=[ + _TABLE_CLUSTERSTATE_REPLICATIONSTATE, + ], serialized_options=None, is_extendable=False, syntax="proto3", @@ -635,7 +637,10 @@ _TABLE_CLUSTERSTATESENTRY, _TABLE_COLUMNFAMILIESENTRY, ], - enum_types=[_TABLE_TIMESTAMPGRANULARITY, _TABLE_VIEW,], + enum_types=[ + _TABLE_TIMESTAMPGRANULARITY, + _TABLE_VIEW, + ], serialized_options=b"\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", is_extendable=False, syntax="proto3", @@ -853,7 +858,10 @@ ), ], extensions=[], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION,], + nested_types=[ + _GCRULE_INTERSECTION, + _GCRULE_UNION, + ], enum_types=[], serialized_options=None, is_extendable=False, @@ -1018,7 +1026,9 @@ ], extensions=[], nested_types=[], - enum_types=[_SNAPSHOT_STATE,], + enum_types=[ + _SNAPSHOT_STATE, + ], serialized_options=b"\352As\n bigtable.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", is_extendable=False, syntax="proto3", @@ -1173,7 +1183,9 @@ ], extensions=[], nested_types=[], - enum_types=[_BACKUP_STATE,], + enum_types=[ + _BACKUP_STATE, + ], serialized_options=b"\352Am\n\036bigtable.googleapis.com/Backup\022Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", is_extendable=False, syntax="proto3", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index f02e0048f5bb..7e544c99e3ce 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -168,7 +168,9 @@ def __init__( self.transport = transport else: self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, + address=api_endpoint, + channel=channel, + credentials=credentials, ) if client_info is None: @@ -356,7 +358,8 @@ def sample_row_keys( ) request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name, app_profile_id=app_profile_id, + table_name=table_name, + app_profile_id=app_profile_id, ) if metadata is None: metadata = [] @@ -541,7 +544,9 @@ def mutate_rows( ) request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries, app_profile_id=app_profile_id, + table_name=table_name, + entries=entries, + app_profile_id=app_profile_id, ) if metadata is None: metadata = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py index ba711b20ca71..f6d825d89ebf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py @@ -408,7 +408,9 @@ ), ], extensions=[], - nested_types=[_READROWSRESPONSE_CELLCHUNK,], + nested_types=[ + _READROWSRESPONSE_CELLCHUNK, + ], enum_types=[], serialized_options=None, is_extendable=False, @@ -785,7 +787,9 @@ ), ], extensions=[], - nested_types=[_MUTATEROWSREQUEST_ENTRY,], + nested_types=[ + _MUTATEROWSREQUEST_ENTRY, + ], enum_types=[], serialized_options=None, is_extendable=False, @@ -885,7 +889,9 @@ ), ], extensions=[], - nested_types=[_MUTATEROWSRESPONSE_ENTRY,], + nested_types=[ + _MUTATEROWSRESPONSE_ENTRY, + ], enum_types=[], serialized_options=None, is_extendable=False, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py index db4ee99f3554..2a094a7f9d48 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py @@ -8,8 +8,7 @@ class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables. - """ + """Service for reading from and writing to existing Bigtable tables.""" def __init__(self, channel): """Constructor. @@ -50,8 +49,7 @@ def __init__(self, channel): class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables. - """ + """Service for reading from and writing to existing Bigtable tables.""" def ReadRows(self, request, context): """Streams back the contents of all requested rows in key order, optionally @@ -92,8 +90,7 @@ def MutateRows(self, request, context): raise NotImplementedError("Method not implemented!") def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter. - """ + """Mutates a row atomically based on the output of a predicate Reader filter.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") @@ -151,8 +148,7 @@ def add_BigtableServicer_to_server(servicer, server): # This class is part of an EXPERIMENTAL API. class Bigtable(object): - """Service for reading from and writing to existing Bigtable tables. - """ + """Service for reading from and writing to existing Bigtable tables.""" @staticmethod def ReadRows( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py index a64f9b10e2df..5f62756a88d4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py @@ -1294,7 +1294,11 @@ ), ], extensions=[], - nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION,], + nested_types=[ + _ROWFILTER_CHAIN, + _ROWFILTER_INTERLEAVE, + _ROWFILTER_CONDITION, + ], enum_types=[], serialized_options=None, is_extendable=False, diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index a4d60cb48878..79b395fba6e2 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "3ecca7a7b52b0f4fc38db5c5016622b994c1a8aa" + "sha": "dfe658a2b1270eda7a8a084aca28d65b3297a04f" } }, { diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 587202a840e0..2f263dffdc8a 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -186,7 +186,10 @@ def test_property_cluster_setter(self): self.assertEqual(backup.cluster, self.CLUSTER_ID) def test_property_parent_none(self): - backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME),) + backup = self._make_one( + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME), + ) self.assertIsNone(backup.parent) def test_property_parent_w_cluster(self): @@ -318,7 +321,9 @@ def test_create_grpc_error(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + parent=self.CLUSTER_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, ) def test_create_already_exists(self): @@ -347,7 +352,9 @@ def test_create_already_exists(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + parent=self.CLUSTER_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, ) def test_create_instance_not_found(self): @@ -376,7 +383,9 @@ def test_create_instance_not_found(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + parent=self.CLUSTER_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, ) def test_create_cluster_not_set(self): @@ -402,7 +411,9 @@ def test_create_table_not_set(self): def test_create_expire_time_not_set(self): backup = self._make_one( - self.BACKUP_ID, _Instance(self.INSTANCE_NAME), table_id=self.TABLE_ID, + self.BACKUP_ID, + _Instance(self.INSTANCE_NAME), + table_id=self.TABLE_ID, ) with self.assertRaises(ValueError): @@ -435,7 +446,9 @@ def test_create_success(self): self.assertIs(future, op_future) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, backup_id=self.BACKUP_ID, backup=backup_pb, + parent=self.CLUSTER_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, ) def test_exists_grpc_error(self): @@ -598,11 +611,13 @@ def test_update_expire_time_grpc_error(self): backup.update_expire_time(expire_time) backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + name=self.BACKUP_NAME, + expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, update_mask, + backup_update, + update_mask, ) def test_update_expire_time_not_found(self): @@ -622,11 +637,13 @@ def test_update_expire_time_not_found(self): backup.update_expire_time(expire_time) backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + name=self.BACKUP_NAME, + expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, update_mask, + backup_update, + update_mask, ) def test_update_expire_time_success(self): @@ -644,11 +661,13 @@ def test_update_expire_time_success(self): backup.update_expire_time(expire_time) backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + name=self.BACKUP_NAME, + expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, update_mask, + backup_update, + update_mask, ) def test_restore_grpc_error(self): @@ -672,7 +691,9 @@ def test_restore_grpc_error(self): backup.restore(self.TABLE_ID) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME, + parent=self.INSTANCE_NAME, + table_id=self.TABLE_ID, + backup=self.BACKUP_NAME, ) def test_restore_cluster_not_set(self): @@ -708,7 +729,9 @@ def test_restore_success(self): self.assertIs(future, op_future) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME, + parent=self.INSTANCE_NAME, + table_id=self.TABLE_ID, + backup=self.BACKUP_NAME, ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py index 63f9ba03fb23..939e02a9d742 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_policy.py +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -147,7 +147,11 @@ def test_from_pb_with_condition(self): }, } ] - message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) + message = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=BINDINGS, + ) klass = self._get_target_class() policy = klass.from_pb(message) self.assertEqual(policy.etag, ETAG) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index f7377bc760f7..0ea45927dfa5 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1187,7 +1187,9 @@ def test_backup_factory_non_defaults(self): table = self._make_one(self.TABLE_ID, instance) timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) backup = table.backup( - self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp, + self.BACKUP_ID, + cluster_id=self.CLUSTER_ID, + expire_time=timestamp, ) self.assertIsInstance(backup, Backup) @@ -1295,7 +1297,9 @@ def _restore_helper(self, backup_name=None): self.assertIs(future, op_future) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME, + parent=self.INSTANCE_NAME, + table_id=self.TABLE_ID, + backup=self.BACKUP_NAME, ) def test_restore_table_w_backup_id(self): From 040f097ba1bdc0796de8fc10a89b602bcafa9286 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 31 Aug 2020 18:52:03 +0200 Subject: [PATCH 341/892] chore(deps): update dependency google-cloud-monitoring to v1.1.0 (#113) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-cloud-monitoring](https://togithub.com/googleapis/python-monitoring) | minor | `==1.0.0` -> `==1.1.0` | --- ### Release Notes
googleapis/python-monitoring ### [`v1.1.0`](https://togithub.com/googleapis/python-monitoring/blob/master/CHANGELOG.md#​110-httpswwwgithubcomgoogleapispython-monitoringcomparev100v110-2020-08-20) [Compare Source](https://togithub.com/googleapis/python-monitoring/compare/v1.0.0...v1.1.0) ##### Features - add "not equal" support to the query filter ([#​11](https://www.github.com/googleapis/python-monitoring/issues/11)) ([e293f7f](https://www.github.com/googleapis/python-monitoring/commit/e293f7f90b0d1ccb285c16a32251e442fda06a8e))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-bigtable). --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index cb9f6488ee45..b76a5af38d43 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.4.0 -google-cloud-monitoring==1.0.0 +google-cloud-monitoring==1.1.0 From 9ee2f2175c3dcb2ac18b8461f2ad5df2f97863c3 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Wed, 2 Sep 2020 19:52:43 +0000 Subject: [PATCH 342/892] chore: readme cleanup (#120) --- .../google-cloud-bigtable/.repo-metadata.json | 113 ++++++++++-------- .../samples/quickstart/README.md | 3 +- .../samples/tableadmin/README.md | 3 +- 3 files changed, 65 insertions(+), 54 deletions(-) diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json index 7c1f86991b0d..33b5c73a3fae 100644 --- a/packages/google-cloud-bigtable/.repo-metadata.json +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -11,57 +11,66 @@ "api_id": "bigtable.googleapis.com", "requires_billing": true, "samples": [ - {"name": "Hello World in Cloud Bigtable", - "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "hello"}, - - {"name": "Hello World using HappyBase", - "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "hello_happybase"}, - - {"name": "cbt Command Demonstration", - "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt", - "file": "instanceadmin.py", - "runnable": true, - "custom_content" : "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "instanceadmin"}, - - {"name": "Metric Scaler", - "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.", - "file": "metricscaler.py", - "runnable": true, - "custom_content": "
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
", - "override_path": "metricscaler"}, - - {"name": "Quickstart", - "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
", - "override_path": "quickstart"}, - - {"name": "Quickstart using HappyBase", - "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "tableadmin"} + { + "name": "Hello World in Cloud Bigtable", + "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello" + }, + { + "name": "Hello World using HappyBase", + "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello_happybase" + }, + { + "name": "cbt Command Demonstration", + "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt", + "file": "instanceadmin.py", + "runnable": true, + "custom_content": "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "instanceadmin" + }, + { + "name": "Metric Scaler", + "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.", + "file": "metricscaler.py", + "runnable": true, + "custom_content": "
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
", + "override_path": "metricscaler" + }, + { + "name": "Quickstart", + "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
", + "override_path": "quickstart" + }, + { + "name": "Quickstart using HappyBase", + "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "tableadmin" + } ] } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.md b/packages/google-cloud-bigtable/samples/quickstart/README.md index 8eed6e201207..455a412f2bb5 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart/README.md @@ -30,7 +30,8 @@ To run this sample: python main.py -
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
+ +
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
## Additional Information diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.md b/packages/google-cloud-bigtable/samples/tableadmin/README.md index 9bc9e38b78b9..1dee1ff2384a 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/README.md +++ b/packages/google-cloud-bigtable/samples/tableadmin/README.md @@ -30,7 +30,8 @@ To run this sample: python tableadmin.py -
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
+ +
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id 


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
## Additional Information From 79dcdea8cc0fcf376af4f1fa7f68332129d94df9 Mon Sep 17 00:00:00 2001 From: MF2199 <38331387+mf2199@users.noreply.github.com> Date: Wed, 2 Sep 2020 16:19:32 -0400 Subject: [PATCH 343/892] fix: pass timeout to 'PartialRowsData.response_iterator' (#16) --- .../google/cloud/bigtable/row_data.py | 9 ++++++++- .../google-cloud-bigtable/tests/unit/test_row_data.py | 9 ++++++++- packages/google-cloud-bigtable/tests/unit/test_table.py | 4 +++- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 04824e1bedda..8760d77b0b6d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -404,7 +404,14 @@ def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): self.read_method = read_method self.request = request self.retry = retry - self.response_iterator = read_method(request) + + # The `timeout` parameter must be somewhat greater than the value + # contained in `self.retry`, in order to avoid race-like condition and + # allow registering the first deadline error before invoking the retry. + # Otherwise there is a risk of entering an infinite loop that resets + # the timeout counter just before it being triggered. The increment + # by 1 second here is customary but should not be much less than that. + self.response_iterator = read_method(request, timeout=self.retry._deadline + 1) self.rows = {} self._state = self.STATE_NEW_ROW diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 40b2ffe30483..c59da844b8d9 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -385,10 +385,16 @@ def test_constructor(self): self.assertEqual(partial_rows_data.retry, DEFAULT_RETRY_READ_ROWS) def test_constructor_with_retry(self): + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS + client = _Client() client._data_stub = mock.MagicMock() - request = retry = object() + request = object() + retry = DEFAULT_RETRY_READ_ROWS partial_rows_data = self._make_one(client._data_stub.ReadRows, request, retry) + partial_rows_data.read_method.assert_called_once_with( + request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1 + ) self.assertIs(partial_rows_data.request, request) self.assertEqual(partial_rows_data.rows, {}) self.assertEqual(partial_rows_data.retry, retry) @@ -471,6 +477,7 @@ def test_state_new_row_w_row(self): request = object() yrd = self._make_one(client._table_data_client.transport.read_rows, request) + self.assertEqual(yrd.retry._deadline, 60.0) yrd._response_iterator = iterator rows = [row for row in yrd] diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 0ea45927dfa5..c99cd65913fc 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -654,6 +654,7 @@ def test_read_rows(self): from google.cloud.bigtable import table as MUT from google.cloud.bigtable_v2.gapic import bigtable_client from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS data_api = bigtable_client.BigtableClient(mock.Mock()) table_api = mock.create_autospec( @@ -670,7 +671,8 @@ def test_read_rows(self): table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) # Create request_pb - request = retry = object() # Returned by our mock. + request = object() # Returned by our mock. + retry = DEFAULT_RETRY_READ_ROWS mock_created = [] def mock_create_row_request(table_name, **kwargs): From 1f4dc9be3813ee1697e13e41cc39041e40be6189 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 14 Sep 2020 13:48:22 -0700 Subject: [PATCH 344/892] chore: auto generated readme updates (#121) autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. --- .../samples/quickstart/README.md | 1 - .../samples/tableadmin/README.md | 1 - packages/google-cloud-bigtable/synth.metadata | 120 +++++++++++++++++- 3 files changed, 119 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.md b/packages/google-cloud-bigtable/samples/quickstart/README.md index 455a412f2bb5..c7e7aff29ae6 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart/README.md @@ -30,7 +30,6 @@ To run this sample: python main.py -
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
## Additional Information diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.md b/packages/google-cloud-bigtable/samples/tableadmin/README.md index 1dee1ff2384a..8fc5b83eaeb4 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/README.md +++ b/packages/google-cloud-bigtable/samples/tableadmin/README.md @@ -30,7 +30,6 @@ To run this sample: python tableadmin.py -
usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id 


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
## Additional Information diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 79b395fba6e2..bb3f70111362 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "dfe658a2b1270eda7a8a084aca28d65b3297a04f" + "sha": "8f764343e01d50ad880363f5a4e5630122cbdb25" } }, { @@ -49,5 +49,123 @@ "generator": "bazel" } } + ], + "generatedFiles": [ + ".coveragerc", + ".flake8", + ".github/CONTRIBUTING.md", + ".github/ISSUE_TEMPLATE/bug_report.md", + ".github/ISSUE_TEMPLATE/feature_request.md", + ".github/ISSUE_TEMPLATE/support_request.md", + ".github/PULL_REQUEST_TEMPLATE.md", + ".github/release-please.yml", + ".gitignore", + ".kokoro/build.sh", + ".kokoro/continuous/common.cfg", + ".kokoro/continuous/continuous.cfg", + ".kokoro/docker/docs/Dockerfile", + ".kokoro/docker/docs/fetch_gpg_keys.sh", + ".kokoro/docs/common.cfg", + ".kokoro/docs/docs-presubmit.cfg", + ".kokoro/docs/docs.cfg", + ".kokoro/presubmit/common.cfg", + ".kokoro/presubmit/presubmit.cfg", + ".kokoro/publish-docs.sh", + ".kokoro/release.sh", + ".kokoro/release/common.cfg", + ".kokoro/release/release.cfg", + ".kokoro/samples/lint/common.cfg", + ".kokoro/samples/lint/continuous.cfg", + ".kokoro/samples/lint/periodic.cfg", + ".kokoro/samples/lint/presubmit.cfg", + ".kokoro/samples/python3.6/common.cfg", + ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic.cfg", + ".kokoro/samples/python3.6/presubmit.cfg", + ".kokoro/samples/python3.7/common.cfg", + ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic.cfg", + ".kokoro/samples/python3.7/presubmit.cfg", + ".kokoro/samples/python3.8/common.cfg", + ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic.cfg", + ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples.sh", + ".kokoro/trampoline.sh", + ".kokoro/trampoline_v2.sh", + ".trampolinerc", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.rst", + "LICENSE", + "MANIFEST.in", + "docs/_static/custom.css", + "docs/_templates/layout.html", + "docs/conf.py", + "docs/multiprocessing.rst", + "google/cloud/bigtable_admin_v2/__init__.py", + "google/cloud/bigtable_admin_v2/gapic/__init__.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py", + "google/cloud/bigtable_admin_v2/gapic/enums.py", + "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py", + "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py", + "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py", + "google/cloud/bigtable_admin_v2/proto/__init__.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", + "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", + "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/common.proto", + "google/cloud/bigtable_admin_v2/proto/common_pb2.py", + "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/instance.proto", + "google/cloud/bigtable_admin_v2/proto/instance_pb2.py", + "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/table.proto", + "google/cloud/bigtable_admin_v2/proto/table_pb2.py", + "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/types.py", + "google/cloud/bigtable_v2/__init__.py", + "google/cloud/bigtable_v2/gapic/__init__.py", + "google/cloud/bigtable_v2/gapic/bigtable_client.py", + "google/cloud/bigtable_v2/gapic/bigtable_client_config.py", + "google/cloud/bigtable_v2/gapic/transports/__init__.py", + "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py", + "google/cloud/bigtable_v2/proto/__init__.py", + "google/cloud/bigtable_v2/proto/bigtable.proto", + "google/cloud/bigtable_v2/proto/bigtable_pb2.py", + "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py", + "google/cloud/bigtable_v2/proto/data.proto", + "google/cloud/bigtable_v2/proto/data_pb2.py", + "google/cloud/bigtable_v2/proto/data_pb2_grpc.py", + "google/cloud/bigtable_v2/types.py", + "renovate.json", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/README.md", + "samples/hello/README.md", + "samples/hello_happybase/README.md", + "samples/instanceadmin/README.md", + "samples/metricscaler/README.md", + "samples/quickstart/README.md", + "samples/quickstart_happybase/README.md", + "samples/snippets/README.md", + "samples/tableadmin/README.md", + "scripts/decrypt-secrets.sh", + "scripts/readme-gen/readme_gen.py", + "scripts/readme-gen/templates/README.tmpl.rst", + "scripts/readme-gen/templates/auth.tmpl.rst", + "scripts/readme-gen/templates/auth_api_key.tmpl.rst", + "scripts/readme-gen/templates/install_deps.tmpl.rst", + "scripts/readme-gen/templates/install_portaudio.tmpl.rst", + "setup.cfg", + "testing/.gitignore", + "tests/unit/gapic/v2/test_bigtable_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" ] } \ No newline at end of file From b83b66b40cfd2e4ada8c850ca41c5405bd31544e Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 21 Sep 2020 15:52:02 -0400 Subject: [PATCH 345/892] chore: harden snippet teardowns against ServiceUnavalable (#133) Closes: #87 --- .../google-cloud-bigtable/docs/snippets.py | 22 ++++++++++--------- .../docs/snippets_table.py | 8 ++++--- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 36dbe5e73e21..32fdfcb24ce0 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -32,11 +32,13 @@ import datetime import pytest -from test_utils.system import unique_resource_id -from test_utils.retry import RetryErrors +from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import NotFound from google.api_core.exceptions import TooManyRequests -from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import ServiceUnavailable +from test_utils.system import unique_resource_id +from test_utils.retry import RetryErrors + from google.cloud._helpers import UTC from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -62,7 +64,7 @@ LABELS = {LABEL_KEY: str(LABEL_STAMP)} INSTANCES_TO_DELETE = [] -retry_429 = RetryErrors(TooManyRequests, max_tries=9) +retry_429_503 = RetryErrors((ServiceUnavailable, TooManyRequests), max_tries=9) retry_504 = RetryErrors(DeadlineExceeded, max_tries=4) @@ -97,11 +99,11 @@ def setup_module(): def teardown_module(): - retry_429(Config.INSTANCE.delete)() + retry_429_503(Config.INSTANCE.delete)() for instance in INSTANCES_TO_DELETE: try: - retry_429(instance.delete)() + retry_429_503(instance.delete)() except NotFound: pass @@ -137,7 +139,7 @@ def test_bigtable_create_instance(): try: assert instance.exists() finally: - retry_429(instance.delete)() + retry_429_503(instance.delete)() def test_bigtable_create_additional_cluster(): @@ -172,7 +174,7 @@ def test_bigtable_create_additional_cluster(): try: assert cluster.exists() finally: - retry_429(cluster.delete)() + retry_429_503(cluster.delete)() def test_bigtable_create_reload_delete_app_profile(): @@ -318,7 +320,7 @@ def test_bigtable_list_app_profiles(): try: assert len(app_profiles_list) > 0 finally: - retry_429(app_profile.delete)(ignore_warnings=True) + retry_429_503(app_profile.delete)(ignore_warnings=True) def test_bigtable_instance_exists(): @@ -423,7 +425,7 @@ def test_bigtable_create_table(): try: assert table.exists() finally: - retry_429(table.delete)() + retry_429_503(table.delete)() def test_bigtable_list_tables(): diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 767e3697555e..a20918c036a9 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -32,9 +32,11 @@ import datetime import pytest +from google.api_core.exceptions import TooManyRequests +from google.api_core.exceptions import ServiceUnavailable from test_utils.system import unique_resource_id from test_utils.retry import RetryErrors -from google.api_core.exceptions import TooManyRequests + from google.cloud._helpers import UTC from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -66,7 +68,7 @@ CELL_VAL2 = b"cell-val2" ROW_KEY2 = b"row_key_id2" -retry_429 = RetryErrors(TooManyRequests, max_tries=9) +retry_429_503 = RetryErrors((ServiceUnavailable, TooManyRequests), max_tries=9) class Config(object): @@ -106,7 +108,7 @@ def setup_module(): def teardown_module(): - retry_429(Config.INSTANCE.delete)() + retry_429_503(Config.INSTANCE.delete)() def test_bigtable_create_table(): From 889d0cc3a1ac23e8f634fa53dc1a57a15c8c039c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 21 Sep 2020 22:45:26 +0200 Subject: [PATCH 346/892] chore(samples): update dependency apache-beam to v2.24.0 (#126) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 2d7898e429fb..363428adfa77 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.23.0 +apache-beam==2.24.0 google-cloud-bigtable==1.4.0 google-cloud-core==1.3.0 \ No newline at end of file From 206d773c4bd8bd463aa3adc4008f45f3d3ded837 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 21 Sep 2020 16:46:18 -0400 Subject: [PATCH 347/892] chore: double timeouts for flaky systests (#132) Closes: #130 Closes: #131 --- packages/google-cloud-bigtable/tests/system.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index e3823177ebe0..50dc5c31dac9 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -287,7 +287,7 @@ def test_create_instance_w_two_clusters(self): self.instances_to_delete.append(instance) # We want to make sure the operation completes. - operation.result(timeout=30) + operation.result(timeout=60) # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) @@ -526,7 +526,7 @@ def test_update_cluster(self): # other test cases. Config.CLUSTER.serve_nodes = SERVE_NODES operation = Config.CLUSTER.update() - operation.result(timeout=10) + operation.result(timeout=20) def test_create_cluster(self): from google.cloud.bigtable.enums import StorageType From 38f695691547e7cc3a0f6a920cec6f6c11a6f961 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Mon, 21 Sep 2020 20:49:30 +0000 Subject: [PATCH 348/892] chore: update CODEOWNERS (#124) --- packages/google-cloud-bigtable/.github/CODEOWNERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/CODEOWNERS b/packages/google-cloud-bigtable/.github/CODEOWNERS index 59302d617ce4..76c8b03b9d03 100644 --- a/packages/google-cloud-bigtable/.github/CODEOWNERS +++ b/packages/google-cloud-bigtable/.github/CODEOWNERS @@ -5,7 +5,7 @@ # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax -# The bigtable-dpe team is the default owner for anything not +# The api-bigtable team is the default owner for anything not # explicitly taken by someone else. -* @googleapis/bigtable-dpe -/samples/ @googleapis/bigtable-dpe @googleapis/python-samples-owners \ No newline at end of file +* @googleapis/api-bigtable +/samples/ @googleapis/api-bigtable @googleapis/python-samples-owners \ No newline at end of file From fdc8ca79da181b8ac003689852a178370dbb1b6d Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Mon, 21 Sep 2020 15:02:08 -0700 Subject: [PATCH 349/892] fix: retry if failure occurs on initial call in MutateRows (#123) * fix: Retry if failure occurs on initial call in MutateRows * fix: use exception clases to DRY RETRY_CODES Co-authored-by: Tres Seaver --- .../google/cloud/bigtable/table.py | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 199269013074..950a8c3fe827 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -14,11 +14,12 @@ """User-friendly container for Google Cloud Bigtable Table.""" -from grpc import StatusCode - from google.api_core import timeout -from google.api_core.exceptions import RetryError +from google.api_core.exceptions import Aborted +from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import NotFound +from google.api_core.exceptions import RetryError +from google.api_core.exceptions import ServiceUnavailable from google.api_core.retry import if_exception_type from google.api_core.retry import Retry from google.api_core.gapic_v1.method import wrap_method @@ -986,15 +987,12 @@ class _RetryableMutateRowsWorker(object): are retryable, any subsequent call on this callable will be a no-op. """ - # pylint: disable=unsubscriptable-object RETRY_CODES = ( - StatusCode.DEADLINE_EXCEEDED.value[0], - StatusCode.ABORTED.value[0], - StatusCode.UNAVAILABLE.value[0], + Aborted.grpc_status_code.value[0], + DeadlineExceeded.grpc_status_code.value[0], + ServiceUnavailable.grpc_status_code.value[0], ) - # pylint: enable=unsubscriptable-object - def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None): self.client = client self.table_name = table_name @@ -1078,9 +1076,15 @@ def _do_mutate_retryable_rows(self): client_info=data_client._client_info, ) - responses = data_client._inner_api_calls["mutate_rows"]( - mutate_rows_request, retry=None - ) + try: + responses = data_client._inner_api_calls["mutate_rows"]( + mutate_rows_request, retry=None + ) + except (ServiceUnavailable, DeadlineExceeded, Aborted): + # If an exception, considered retryable by `RETRY_CODES`, is + # returned from the initial call, consider + # it to be retryable. Wrap as a Bigtable Retryable Error. + raise _BigtableRetryableError num_responses = 0 num_retryable_responses = 0 From 081f7babbe5faee23ab82d419ed9d808d66f37f9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 22 Sep 2020 17:34:43 +0200 Subject: [PATCH 350/892] chore(deps): update dependency google-cloud-core to v1.4.1 (#135) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 363428adfa77..416b381ae3d4 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.24.0 google-cloud-bigtable==1.4.0 -google-cloud-core==1.3.0 \ No newline at end of file +google-cloud-core==1.4.1 \ No newline at end of file From df03b056802e998add5a7f4eb38c382a76521e6f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 22 Sep 2020 12:14:35 -0400 Subject: [PATCH 351/892] chore(CI): add snippet bot support (via synth) (#134) Closes #125 --- .../.github/snippet-bot.yml | 0 .../.kokoro/populate-secrets.sh | 43 ++++++ .../.kokoro/release/common.cfg | 50 ++----- .../.kokoro/trampoline.sh | 15 +- packages/google-cloud-bigtable/docs/conf.py | 3 +- .../samples/hello/README.md | 1 + .../samples/hello_happybase/README.md | 1 + .../samples/instanceadmin/README.md | 1 + .../samples/metricscaler/README.md | 1 + .../samples/quickstart/README.md | 1 + .../samples/quickstart_happybase/README.md | 1 + .../samples/snippets/README.md | 1 + .../samples/tableadmin/README.md | 1 + .../scripts/decrypt-secrets.sh | 15 +- packages/google-cloud-bigtable/synth.metadata | 130 +----------------- 15 files changed, 96 insertions(+), 168 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/snippet-bot.yml create mode 100755 packages/google-cloud-bigtable/.kokoro/populate-secrets.sh diff --git a/packages/google-cloud-bigtable/.github/snippet-bot.yml b/packages/google-cloud-bigtable/.github/snippet-bot.yml new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh b/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh new file mode 100755 index 000000000000..f52514257ef0 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright 2020 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + + +# Populates requested secrets set in SECRET_MANAGER_KEYS from service account: +# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com +SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + docker run --entrypoint=gcloud \ + --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ + gcr.io/google.com/cloudsdktool/cloud-sdk \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret ${key} > \ + "${SECRET_LOCATION}/${key}" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + fi +done diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg index d1edfb69db8c..ceb054317811 100644 --- a/packages/google-cloud-bigtable/.kokoro/release/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/release/common.cfg @@ -23,42 +23,18 @@ env_vars: { value: "github/python-bigtable/.kokoro/release.sh" } -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google_cloud_pypi_password" - } - } -} - -# Fetch magictoken to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "releasetool-magictoken" - } - } +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_pypi_password" + } + } } -# Fetch api key to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "magic-github-proxy-api-key" - } - } -} +# Tokens needed to report release status back to GitHub +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline.sh b/packages/google-cloud-bigtable/.kokoro/trampoline.sh index e8c4251f3ed4..f39236e943a8 100755 --- a/packages/google-cloud-bigtable/.kokoro/trampoline.sh +++ b/packages/google-cloud-bigtable/.kokoro/trampoline.sh @@ -15,9 +15,14 @@ set -eo pipefail -python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$? +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT -chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh -${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true - -exit ${ret_code} +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index e6a3d0d1a3d0..c0b3a25a4639 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -29,7 +29,7 @@ # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" +needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -39,6 +39,7 @@ "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.coverage", + "sphinx.ext.doctest", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", diff --git a/packages/google-cloud-bigtable/samples/hello/README.md b/packages/google-cloud-bigtable/samples/hello/README.md index cb869f5290ff..1ffd9b86eda9 100644 --- a/packages/google-cloud-bigtable/samples/hello/README.md +++ b/packages/google-cloud-bigtable/samples/hello/README.md @@ -30,6 +30,7 @@ To run this sample: python main.py +
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
## Additional Information diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.md b/packages/google-cloud-bigtable/samples/hello_happybase/README.md index e8a57f2f075b..a37d4fd5e51d 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/README.md +++ b/packages/google-cloud-bigtable/samples/hello_happybase/README.md @@ -30,6 +30,7 @@ To run this sample: python main.py +
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
## Additional Information diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.md b/packages/google-cloud-bigtable/samples/instanceadmin/README.md index 1ff520909d87..59c51c5bde40 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/README.md +++ b/packages/google-cloud-bigtable/samples/instanceadmin/README.md @@ -30,6 +30,7 @@ To run this sample: python instanceadmin.py +
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
## Additional Information diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.md b/packages/google-cloud-bigtable/samples/metricscaler/README.md index 29819f88be91..cf88eb8bf3d9 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/README.md +++ b/packages/google-cloud-bigtable/samples/metricscaler/README.md @@ -30,6 +30,7 @@ To run this sample: python metricscaler.py +
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
## Additional Information diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.md b/packages/google-cloud-bigtable/samples/quickstart/README.md index c7e7aff29ae6..455a412f2bb5 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart/README.md @@ -30,6 +30,7 @@ To run this sample: python main.py +
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
## Additional Information diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md index b74689ddc1e1..c97cbc675e7d 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md @@ -30,6 +30,7 @@ To run this sample: python main.py +
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
## Additional Information diff --git a/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh index ff599eb2af25..21f6d2a26d90 100755 --- a/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh +++ b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh @@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" ) # Work from the project root. cd $ROOT +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + # Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ > testing/test-env.sh gcloud secrets versions access latest \ --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ > testing/service-account.json gcloud secrets versions access latest \ --secret="python-docs-samples-client-secrets" \ - > testing/client-secrets.json \ No newline at end of file + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index bb3f70111362..73e14bc495bf 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -3,30 +3,30 @@ { "git": { "name": ".", - "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "8f764343e01d50ad880363f5a4e5630122cbdb25" + "remote": "git@github.com:googleapis/python-bigtable", + "sha": "474b9f3408efa102533da5b9066cba0cd1ff3a9a" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "0dc0a6c0f1a9f979bc0690f0caa5fbafa3000c2c", - "internalRef": "327026955" + "sha": "8d73f9486fc193a150f6c907dfb9f49431aff3ff", + "internalRef": "332497859" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "2e85c10b5153defd9d654c34b57e7e9263361959" + "sha": "9d216d21544b5c9f2c85a6380ffcf20b67e1e459" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "2e85c10b5153defd9d654c34b57e7e9263361959" + "sha": "9d216d21544b5c9f2c85a6380ffcf20b67e1e459" } } ], @@ -49,123 +49,5 @@ "generator": "bazel" } } - ], - "generatedFiles": [ - ".coveragerc", - ".flake8", - ".github/CONTRIBUTING.md", - ".github/ISSUE_TEMPLATE/bug_report.md", - ".github/ISSUE_TEMPLATE/feature_request.md", - ".github/ISSUE_TEMPLATE/support_request.md", - ".github/PULL_REQUEST_TEMPLATE.md", - ".github/release-please.yml", - ".gitignore", - ".kokoro/build.sh", - ".kokoro/continuous/common.cfg", - ".kokoro/continuous/continuous.cfg", - ".kokoro/docker/docs/Dockerfile", - ".kokoro/docker/docs/fetch_gpg_keys.sh", - ".kokoro/docs/common.cfg", - ".kokoro/docs/docs-presubmit.cfg", - ".kokoro/docs/docs.cfg", - ".kokoro/presubmit/common.cfg", - ".kokoro/presubmit/presubmit.cfg", - ".kokoro/publish-docs.sh", - ".kokoro/release.sh", - ".kokoro/release/common.cfg", - ".kokoro/release/release.cfg", - ".kokoro/samples/lint/common.cfg", - ".kokoro/samples/lint/continuous.cfg", - ".kokoro/samples/lint/periodic.cfg", - ".kokoro/samples/lint/presubmit.cfg", - ".kokoro/samples/python3.6/common.cfg", - ".kokoro/samples/python3.6/continuous.cfg", - ".kokoro/samples/python3.6/periodic.cfg", - ".kokoro/samples/python3.6/presubmit.cfg", - ".kokoro/samples/python3.7/common.cfg", - ".kokoro/samples/python3.7/continuous.cfg", - ".kokoro/samples/python3.7/periodic.cfg", - ".kokoro/samples/python3.7/presubmit.cfg", - ".kokoro/samples/python3.8/common.cfg", - ".kokoro/samples/python3.8/continuous.cfg", - ".kokoro/samples/python3.8/periodic.cfg", - ".kokoro/samples/python3.8/presubmit.cfg", - ".kokoro/test-samples.sh", - ".kokoro/trampoline.sh", - ".kokoro/trampoline_v2.sh", - ".trampolinerc", - "CODE_OF_CONDUCT.md", - "CONTRIBUTING.rst", - "LICENSE", - "MANIFEST.in", - "docs/_static/custom.css", - "docs/_templates/layout.html", - "docs/conf.py", - "docs/multiprocessing.rst", - "google/cloud/bigtable_admin_v2/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py", - "google/cloud/bigtable_admin_v2/gapic/enums.py", - "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py", - "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py", - "google/cloud/bigtable_admin_v2/proto/__init__.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py", - "google/cloud/bigtable_admin_v2/proto/common.proto", - "google/cloud/bigtable_admin_v2/proto/common_pb2.py", - "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py", - "google/cloud/bigtable_admin_v2/proto/instance.proto", - "google/cloud/bigtable_admin_v2/proto/instance_pb2.py", - "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py", - "google/cloud/bigtable_admin_v2/proto/table.proto", - "google/cloud/bigtable_admin_v2/proto/table_pb2.py", - "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py", - "google/cloud/bigtable_admin_v2/types.py", - "google/cloud/bigtable_v2/__init__.py", - "google/cloud/bigtable_v2/gapic/__init__.py", - "google/cloud/bigtable_v2/gapic/bigtable_client.py", - "google/cloud/bigtable_v2/gapic/bigtable_client_config.py", - "google/cloud/bigtable_v2/gapic/transports/__init__.py", - "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py", - "google/cloud/bigtable_v2/proto/__init__.py", - "google/cloud/bigtable_v2/proto/bigtable.proto", - "google/cloud/bigtable_v2/proto/bigtable_pb2.py", - "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py", - "google/cloud/bigtable_v2/proto/data.proto", - "google/cloud/bigtable_v2/proto/data_pb2.py", - "google/cloud/bigtable_v2/proto/data_pb2_grpc.py", - "google/cloud/bigtable_v2/types.py", - "renovate.json", - "samples/AUTHORING_GUIDE.md", - "samples/CONTRIBUTING.md", - "samples/README.md", - "samples/hello/README.md", - "samples/hello_happybase/README.md", - "samples/instanceadmin/README.md", - "samples/metricscaler/README.md", - "samples/quickstart/README.md", - "samples/quickstart_happybase/README.md", - "samples/snippets/README.md", - "samples/tableadmin/README.md", - "scripts/decrypt-secrets.sh", - "scripts/readme-gen/readme_gen.py", - "scripts/readme-gen/templates/README.tmpl.rst", - "scripts/readme-gen/templates/auth.tmpl.rst", - "scripts/readme-gen/templates/auth_api_key.tmpl.rst", - "scripts/readme-gen/templates/install_deps.tmpl.rst", - "scripts/readme-gen/templates/install_portaudio.tmpl.rst", - "setup.cfg", - "testing/.gitignore", - "tests/unit/gapic/v2/test_bigtable_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" ] } \ No newline at end of file From d61f7f599b64f801d6b6249d68640fae6862f5ec Mon Sep 17 00:00:00 2001 From: Ryan Yuan Date: Wed, 23 Sep 2020 05:10:07 +1000 Subject: [PATCH 352/892] feat: add response status to DirectRow.commit() (#128) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [x] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-bigtable/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [x] Ensure the tests and linter pass - [x] Code coverage does not decrease (if any source code was changed) - [x] Appropriate docs were updated (if necessary) Fixes #127 🦕 --- .../google/cloud/bigtable/row.py | 8 +++++- .../tests/unit/test_row.py | 26 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index b28b86aa2cf5..87a2680568de 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -457,12 +457,18 @@ def commit(self): :end-before: [END bigtable_row_commit] :dedent: 4 + :rtype: :class:`~google.rpc.status_pb2.Status` + :returns: A response status (`google.rpc.status_pb2.Status`) + representing success or failure of the row committed. :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is greater than 100,000. """ - self._table.mutate_rows([self]) + response = self._table.mutate_rows([self]) + self.clear() + return response[0] + def clear(self): """Removes all currently accumulated mutations on the current row. diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 47424d910d97..16a8232ec5bd 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -359,6 +359,29 @@ def test_commit(self): row.commit() self.assertEqual(table.mutated_rows, [row]) + def test_commit_with_exception(self): + from google.rpc import status_pb2 + + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + column_family_id = u"column_family_id" + column = b"column" + + credentials = _make_credentials() + client = self._make_client( + project=project_id, credentials=credentials, admin=True + ) + table = _Table(table_name, client=client) + row = self._make_one(row_key, table) + value = b"bytes-value" + + # Perform the method and check the result. + row.set_cell(column_family_id, column, value) + result = row.commit() + expected = status_pb2.Status(code=0) + self.assertEqual(result, expected) + class TestConditionalRow(unittest.TestCase): @staticmethod @@ -832,4 +855,7 @@ def __init__(self, name, client=None, app_profile_id=None): self.mutated_rows = [] def mutate_rows(self, rows): + from google.rpc import status_pb2 + self.mutated_rows.extend(rows) + return [status_pb2.Status(code=0)] From 262aca5dd7b81c020d77ce6bf0381d10f9df21bb Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 22 Sep 2020 16:05:01 -0400 Subject: [PATCH 353/892] chore: release 1.5.0 (#96) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 30 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 56a43a7429e4..6c269cfd6a1f 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,36 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [1.5.0](https://www.github.com/googleapis/python-bigtable/compare/v1.4.0...v1.5.0) (2020-09-22) + + +### Features + +* add 'Rowset.add_row_range_with_prefix' ([#30](https://www.github.com/googleapis/python-bigtable/issues/30)) ([4796ac8](https://www.github.com/googleapis/python-bigtable/commit/4796ac85c877d75ed596cde7628dae31918ef726)) +* add response status to DirectRow.commit() ([#128](https://www.github.com/googleapis/python-bigtable/issues/128)) ([2478bb8](https://www.github.com/googleapis/python-bigtable/commit/2478bb864adbc71ef606e2b10b3bdfe3a7d44717)), closes [#127](https://www.github.com/googleapis/python-bigtable/issues/127) +* pass 'client_options' to base class ctor ([#104](https://www.github.com/googleapis/python-bigtable/issues/104)) ([e55ca07](https://www.github.com/googleapis/python-bigtable/commit/e55ca07561f9c946276f3bde599e69947769f560)), closes [#69](https://www.github.com/googleapis/python-bigtable/issues/69) + + +### Bug Fixes + +* pass timeout to 'PartialRowsData.response_iterator' ([#16](https://www.github.com/googleapis/python-bigtable/issues/16)) ([8f76434](https://www.github.com/googleapis/python-bigtable/commit/8f764343e01d50ad880363f5a4e5630122cbdb25)) +* retry if failure occurs on initial call in MutateRows ([#123](https://www.github.com/googleapis/python-bigtable/issues/123)) ([0c9cde8](https://www.github.com/googleapis/python-bigtable/commit/0c9cde8ade0e4f50d06bbbd1b4169ae5c545b2c0)) +* **python_samples:** README link fix, enforce samples=True ([#114](https://www.github.com/googleapis/python-bigtable/issues/114)) ([dfe658a](https://www.github.com/googleapis/python-bigtable/commit/dfe658a2b1270eda7a8a084aca28d65b3297a04f)) + + +### Documentation + +* add sample for writing data with Beam ([#80](https://www.github.com/googleapis/python-bigtable/issues/80)) ([6900290](https://www.github.com/googleapis/python-bigtable/commit/6900290e00daf04ca545284b3f0a591a2de11136)) +* clarify 'Table.read_rows' snippet ([#50](https://www.github.com/googleapis/python-bigtable/issues/50)) ([5ca8bbd](https://www.github.com/googleapis/python-bigtable/commit/5ca8bbd0fb9c4a7cef7b4cbb67d1ba9f2382f2d8)) +* document 'row_set' module explicitly ([#29](https://www.github.com/googleapis/python-bigtable/issues/29)) ([0e0291e](https://www.github.com/googleapis/python-bigtable/commit/0e0291e56cbaeec00ede5275e17af2968a12251c)) +* Pysamples new readme gen ([#112](https://www.github.com/googleapis/python-bigtable/issues/112)) ([3ecca7a](https://www.github.com/googleapis/python-bigtable/commit/3ecca7a7b52b0f4fc38db5c5016622b994c1a8aa)) +* remove indent from snippet code blocks ([#49](https://www.github.com/googleapis/python-bigtable/issues/49)) ([1fbadf9](https://www.github.com/googleapis/python-bigtable/commit/1fbadf906204c622b9cff3fa073d8fc43d3597f7)) +* switch links to client documentation ([#93](https://www.github.com/googleapis/python-bigtable/issues/93)) ([2c973e6](https://www.github.com/googleapis/python-bigtable/commit/2c973e6cce969e7003be0b3d7a164bdc61b91ef1)) +* update docs build (via synth) ([#99](https://www.github.com/googleapis/python-bigtable/issues/99)) ([c301b53](https://www.github.com/googleapis/python-bigtable/commit/c301b53db4f7d48fd76548a5cd3a01cc46ff1522)), closes [#700](https://www.github.com/googleapis/python-bigtable/issues/700) +* update links to reflect new Github org ([#48](https://www.github.com/googleapis/python-bigtable/issues/48)) ([9bb11ed](https://www.github.com/googleapis/python-bigtable/commit/9bb11edc885958286b5b31fa18cfd0db95338cb4)) +* use correct storage type constant in docstrings ([#110](https://www.github.com/googleapis/python-bigtable/issues/110)) ([bc6db77](https://www.github.com/googleapis/python-bigtable/commit/bc6db77809a89fd6f3b2095cfe9b84d2da1bf304)) +* **samples:** filter cpu query to get metrics for the correct resources [([#4238](https://www.github.com/googleapis/python-bigtable/issues/4238))](https://github.com/GoogleCloudPlatform/python-docs-samples/issues/4238) ([#81](https://www.github.com/googleapis/python-bigtable/issues/81)) ([2c8c386](https://www.github.com/googleapis/python-bigtable/commit/2c8c3864c43a7ac9c85a0cd7c9cd4eec7434b42d)) + ## [1.4.0](https://www.github.com/googleapis/python-bigtable/compare/v1.3.0...v1.4.0) (2020-07-21) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index ece5050d5ad5..1cdd52e028c2 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = "1.4.0" +version = "1.5.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From b85c20ae022a13704e9e014d8005440722e7d960 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 23 Sep 2020 10:45:51 -0400 Subject: [PATCH 354/892] tests: harden 'test_delete_column_family' against 504 (#141) Closes #140 --- packages/google-cloud-bigtable/tests/system.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 50dc5c31dac9..92bd582a3138 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -18,6 +18,7 @@ import time import unittest +from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import TooManyRequests from google.cloud.environment_vars import BIGTABLE_EMULATOR from test_utils.retry import RetryErrors @@ -831,7 +832,8 @@ def test_delete_column_family(self): col_fams = temp_table.list_column_families() self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1]) - column_family.delete() + retry_504 = RetryErrors(DeadlineExceeded) + retry_504(column_family.delete)() # Make sure we have successfully deleted it. self.assertEqual(temp_table.list_column_families(), {}) From c30be9c9449d28d71e91be078d2847fb9cc34e25 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 23 Sep 2020 07:55:01 -0700 Subject: [PATCH 355/892] chore: remove note about editable installs (#144) `pip install -e .` is supported and is how we install the library for tests. Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Tue Sep 22 12:06:12 2020 -0600 Source-Repo: googleapis/synthtool Source-Sha: a651c5fb763c69a921aecdd3e1d8dc51dbf20f8d Source-Link: https://github.com/googleapis/synthtool/commit/a651c5fb763c69a921aecdd3e1d8dc51dbf20f8d --- .../google-cloud-bigtable/CONTRIBUTING.rst | 19 --- packages/google-cloud-bigtable/synth.metadata | 128 +++++++++++++++++- 2 files changed, 124 insertions(+), 23 deletions(-) diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 5d9a099ac9ad..6d9432272aab 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests. .. nox: https://pypi.org/project/nox/ -Note on Editable Installs / Develop Mode -======================================== - -- As mentioned previously, using ``setuptools`` in `develop mode`_ - or a ``pip`` `editable install`_ is not possible with this - library. This is because this library uses `namespace packages`_. - For context see `Issue #2316`_ and the relevant `PyPA issue`_. - - Since ``editable`` / ``develop`` mode can't be used, packages - need to be installed directly. Hence your changes to the source - tree don't get incorporated into the **already installed** - package. - -.. _namespace packages: https://www.python.org/dev/peps/pep-0420/ -.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316 -.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12 -.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode -.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs - ***************************************** I'm getting weird errors... Can you help? ***************************************** diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 73e14bc495bf..7fd12e9c7e50 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -3,8 +3,8 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-bigtable", - "sha": "474b9f3408efa102533da5b9066cba0cd1ff3a9a" + "remote": "https://github.com/googleapis/python-bigtable.git", + "sha": "617f60b40d82460014333ebdd0cc39155ef77bee" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "9d216d21544b5c9f2c85a6380ffcf20b67e1e459" + "sha": "a651c5fb763c69a921aecdd3e1d8dc51dbf20f8d" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "9d216d21544b5c9f2c85a6380ffcf20b67e1e459" + "sha": "a651c5fb763c69a921aecdd3e1d8dc51dbf20f8d" } } ], @@ -49,5 +49,125 @@ "generator": "bazel" } } + ], + "generatedFiles": [ + ".coveragerc", + ".flake8", + ".github/CONTRIBUTING.md", + ".github/ISSUE_TEMPLATE/bug_report.md", + ".github/ISSUE_TEMPLATE/feature_request.md", + ".github/ISSUE_TEMPLATE/support_request.md", + ".github/PULL_REQUEST_TEMPLATE.md", + ".github/release-please.yml", + ".github/snippet-bot.yml", + ".gitignore", + ".kokoro/build.sh", + ".kokoro/continuous/common.cfg", + ".kokoro/continuous/continuous.cfg", + ".kokoro/docker/docs/Dockerfile", + ".kokoro/docker/docs/fetch_gpg_keys.sh", + ".kokoro/docs/common.cfg", + ".kokoro/docs/docs-presubmit.cfg", + ".kokoro/docs/docs.cfg", + ".kokoro/populate-secrets.sh", + ".kokoro/presubmit/common.cfg", + ".kokoro/presubmit/presubmit.cfg", + ".kokoro/publish-docs.sh", + ".kokoro/release.sh", + ".kokoro/release/common.cfg", + ".kokoro/release/release.cfg", + ".kokoro/samples/lint/common.cfg", + ".kokoro/samples/lint/continuous.cfg", + ".kokoro/samples/lint/periodic.cfg", + ".kokoro/samples/lint/presubmit.cfg", + ".kokoro/samples/python3.6/common.cfg", + ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic.cfg", + ".kokoro/samples/python3.6/presubmit.cfg", + ".kokoro/samples/python3.7/common.cfg", + ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic.cfg", + ".kokoro/samples/python3.7/presubmit.cfg", + ".kokoro/samples/python3.8/common.cfg", + ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic.cfg", + ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples.sh", + ".kokoro/trampoline.sh", + ".kokoro/trampoline_v2.sh", + ".trampolinerc", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.rst", + "LICENSE", + "MANIFEST.in", + "docs/_static/custom.css", + "docs/_templates/layout.html", + "docs/conf.py", + "docs/multiprocessing.rst", + "google/cloud/bigtable_admin_v2/__init__.py", + "google/cloud/bigtable_admin_v2/gapic/__init__.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py", + "google/cloud/bigtable_admin_v2/gapic/enums.py", + "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py", + "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py", + "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py", + "google/cloud/bigtable_admin_v2/proto/__init__.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", + "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", + "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py", + "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/common.proto", + "google/cloud/bigtable_admin_v2/proto/common_pb2.py", + "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/instance.proto", + "google/cloud/bigtable_admin_v2/proto/instance_pb2.py", + "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/proto/table.proto", + "google/cloud/bigtable_admin_v2/proto/table_pb2.py", + "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py", + "google/cloud/bigtable_admin_v2/types.py", + "google/cloud/bigtable_v2/__init__.py", + "google/cloud/bigtable_v2/gapic/__init__.py", + "google/cloud/bigtable_v2/gapic/bigtable_client.py", + "google/cloud/bigtable_v2/gapic/bigtable_client_config.py", + "google/cloud/bigtable_v2/gapic/transports/__init__.py", + "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py", + "google/cloud/bigtable_v2/proto/__init__.py", + "google/cloud/bigtable_v2/proto/bigtable.proto", + "google/cloud/bigtable_v2/proto/bigtable_pb2.py", + "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py", + "google/cloud/bigtable_v2/proto/data.proto", + "google/cloud/bigtable_v2/proto/data_pb2.py", + "google/cloud/bigtable_v2/proto/data_pb2_grpc.py", + "google/cloud/bigtable_v2/types.py", + "renovate.json", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/README.md", + "samples/hello/README.md", + "samples/hello_happybase/README.md", + "samples/instanceadmin/README.md", + "samples/metricscaler/README.md", + "samples/quickstart/README.md", + "samples/quickstart_happybase/README.md", + "samples/snippets/README.md", + "samples/tableadmin/README.md", + "scripts/decrypt-secrets.sh", + "scripts/readme-gen/readme_gen.py", + "scripts/readme-gen/templates/README.tmpl.rst", + "scripts/readme-gen/templates/auth.tmpl.rst", + "scripts/readme-gen/templates/auth_api_key.tmpl.rst", + "scripts/readme-gen/templates/install_deps.tmpl.rst", + "scripts/readme-gen/templates/install_portaudio.tmpl.rst", + "setup.cfg", + "testing/.gitignore", + "tests/unit/gapic/v2/test_bigtable_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", + "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" ] } \ No newline at end of file From 1c859fc4e14837410a0131873c2cd18a5ddf574d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 25 Sep 2020 16:49:48 +0200 Subject: [PATCH 356/892] chore(deps): update dependency google-cloud-bigtable to v1.5.0 (#142) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 416b381ae3d4..4b690c76a020 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.24.0 -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 google-cloud-core==1.4.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 89c7140a1159..1d23bf24306e 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 google-cloud-core==1.4.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 1f3adb6bf996..f9c658397dcf 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index b76a5af38d43..471385ad02d5 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 google-cloud-monitoring==1.1.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 1f3adb6bf996..f9c658397dcf 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 15192cadd2b6..f6a6a0172597 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 snapshottest==0.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 15192cadd2b6..f6a6a0172597 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 snapshottest==0.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 07d1f44fd5bb..67be6a0ef1a2 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.4.0 \ No newline at end of file +google-cloud-bigtable==1.5.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 1f3adb6bf996..f9c658397dcf 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.4.0 +google-cloud-bigtable==1.5.0 From a2bcab83e09a5c9fddcb453bec3e0eb0231ef056 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Fri, 25 Sep 2020 10:42:41 -0700 Subject: [PATCH 357/892] chore(python): skip reporting coverage for namespace package (#146) Source-Author: Tres Seaver Source-Date: Wed Sep 23 10:58:13 2020 -0400 Source-Repo: googleapis/synthtool Source-Sha: f3c04883d6c43261ff13db1f52d03a283be06871 Source-Link: https://github.com/googleapis/synthtool/commit/f3c04883d6c43261ff13db1f52d03a283be06871 --- packages/google-cloud-bigtable/.coveragerc | 5 ++++- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index dd39c8546c41..0d8e6297dc9c 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -17,6 +17,8 @@ # Generated by synthtool. DO NOT EDIT! [run] branch = True +omit = + google/cloud/__init__.py [report] fail_under = 100 @@ -32,4 +34,5 @@ omit = */gapic/*.py */proto/*.py */core/*.py - */site-packages/*.py \ No newline at end of file + */site-packages/*.py + google/cloud/__init__.py diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 7fd12e9c7e50..7206c46f8619 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "617f60b40d82460014333ebdd0cc39155ef77bee" + "sha": "3861f6b0552e431a1fc7aa872c4d293ca129c28c" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "a651c5fb763c69a921aecdd3e1d8dc51dbf20f8d" + "sha": "f3c04883d6c43261ff13db1f52d03a283be06871" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "a651c5fb763c69a921aecdd3e1d8dc51dbf20f8d" + "sha": "f3c04883d6c43261ff13db1f52d03a283be06871" } } ], From c79e0928b7a4fd7d8268fac3c76c24065d832beb Mon Sep 17 00:00:00 2001 From: Justin Beckwith Date: Tue, 29 Sep 2020 15:39:29 -0700 Subject: [PATCH 358/892] chore: add sync repo settings config (#143) --- .../.github/sync-repo-settings.yaml | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 packages/google-cloud-bigtable/.github/sync-repo-settings.yaml diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml new file mode 100644 index 000000000000..97245042dc30 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml @@ -0,0 +1,46 @@ +# Whether or not rebase-merging is enabled on this repository. +# Defaults to `true` +rebaseMergeAllowed: true + +# Whether or not squash-merging is enabled on this repository. +# Defaults to `true` +squashMergeAllowed: true + +# Whether or not PRs are merged with a merge commit on this repository. +# Defaults to `false` +mergeCommitAllowed: false + +# Rules for master branch protection +branchProtectionRules: +# Identifies the protection rule pattern. Name of the branch to be protected. +# Defaults to `master` +- pattern: master + # Can admins overwrite branch protection. + # Defaults to `true` + isAdminEnforced: true + # Number of approving reviews required to update matching branches. + # Defaults to `1` + requiredApprovingReviewCount: 1 + # Are reviews from code owners required to update matching branches. + # Defaults to `false` + requiresCodeOwnerReviews: true + # Require up to date branches + requiresStrictStatusChecks: false + # List of required status check contexts that must pass for commits to be accepted to matching branches. + requiredStatusCheckContexts: + - 'Kokoro' + - 'cla/google' +# List of explicit permissions to add (additive only) +permissionRules: + # Team slug to add to repository permissions + - team: yoshi-admins + # Access level required, one of push|pull|admin|maintain|triage + permission: admin + # Team slug to add to repository permissions + - team: yoshi-python-admins + # Access level required, one of push|pull|admin|maintain|triage + permission: admin + # Team slug to add to repository permissions + - team: yoshi-python + # Access level required, one of push|pull|admin|maintain|triage + permission: push From 2d5a812c98265742154514af8e3fb943a9904575 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 2 Oct 2020 18:45:22 +0200 Subject: [PATCH 359/892] chore(deps): update dependency snapshottest to v0.6.0 (#147) --- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index f6a6a0172597..0e902be53b95 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.5.0 -snapshottest==0.5.1 \ No newline at end of file +snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index f6a6a0172597..0e902be53b95 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.5.0 -snapshottest==0.5.1 \ No newline at end of file +snapshottest==0.6.0 \ No newline at end of file From a12ceff4fcf2ee3ba6f4ee74ad3bcd5895ffd391 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 5 Oct 2020 16:15:11 +0200 Subject: [PATCH 360/892] chore(deps): update dependency google-cloud-core to v1.4.2 (#149) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 4b690c76a020..3628e62e585a 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.24.0 google-cloud-bigtable==1.5.0 -google-cloud-core==1.4.1 \ No newline at end of file +google-cloud-core==1.4.2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 1d23bf24306e..185b56b7e7c2 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.5.0 -google-cloud-core==1.4.1 +google-cloud-core==1.4.2 From d437b7fa38f91f24148dccf8ea6d338fcd02efa3 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Tue, 6 Oct 2020 13:34:21 -0700 Subject: [PATCH 361/892] fix: harden version data gathering against DistributionNotFound (#150) --- .../google/cloud/bigtable/__init__.py | 8 ++++++-- .../gapic/bigtable_instance_admin_client.py | 9 ++++++--- .../gapic/bigtable_table_admin_client.py | 11 ++++++++--- .../google/cloud/bigtable_v2/gapic/bigtable_client.py | 9 ++++++--- 4 files changed, 26 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index 75b765a8a0da..f2c5a24bd5a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -15,9 +15,13 @@ """Google Cloud Bigtable API package.""" -from pkg_resources import get_distribution +import pkg_resources + +try: + __version__ = pkg_resources.get_distribution("google-cloud-bigtable").version +except pkg_resources.DistributionNotFound: + __version__ = None -__version__ = get_distribution("google-cloud-bigtable").version from google.cloud.bigtable.client import Client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index d27154d5304c..8b1795249269 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -49,9 +49,12 @@ from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable", -).version +try: + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable" + ).version +except pkg_resources.DistributionNotFound: + _GAPIC_LIBRARY_VERSION = None class BigtableInstanceAdminClient(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index acbc4b26f385..2f19a880a33c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -54,9 +54,14 @@ from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable", -).version +import pkg_resources + +try: + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable" + ).version +except pkg_resources.DistributionNotFound: + _GAPIC_LIBRARY_VERSION = None class BigtableTableAdminClient(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 7e544c99e3ce..43ff81029fee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -36,9 +36,12 @@ from google.cloud.bigtable_v2.proto import data_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable", -).version +try: + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable" + ).version +except pkg_resources.DistributionNotFound: + _GAPIC_LIBRARY_VERSION = None class BigtableClient(object): From 962a1e7db23f30021b8c8c7c9855a02f35355ee8 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 6 Oct 2020 14:11:56 -0700 Subject: [PATCH 362/892] chore: release 1.5.1 (#151) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 6c269cfd6a1f..f46bcc42f3ae 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [1.5.1](https://www.github.com/googleapis/python-bigtable/compare/v1.5.0...v1.5.1) (2020-10-06) + + +### Bug Fixes + +* harden version data gathering against DistributionNotFound ([#150](https://www.github.com/googleapis/python-bigtable/issues/150)) ([c815421](https://www.github.com/googleapis/python-bigtable/commit/c815421422f1c845983e174651a5292767cfe2e7)) + ## [1.5.0](https://www.github.com/googleapis/python-bigtable/compare/v1.4.0...v1.5.0) (2020-09-22) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 1cdd52e028c2..0f3c9cd82887 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = "1.5.0" +version = "1.5.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 27fb95bd8c72cabce750ab844fd47bc71c69512c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 7 Oct 2020 18:36:54 +0200 Subject: [PATCH 363/892] chore(deps): update dependency google-cloud-bigtable to v1.5.1 (#152) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 3628e62e585a..a5ff4714ce3e 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.24.0 -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 google-cloud-core==1.4.2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 185b56b7e7c2..770185a0574f 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 google-cloud-core==1.4.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index f9c658397dcf..23b27ea709e0 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 471385ad02d5..f0ff9c764956 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 google-cloud-monitoring==1.1.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index f9c658397dcf..23b27ea709e0 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 0e902be53b95..ac34bb1f7ef1 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 0e902be53b95..ac34bb1f7ef1 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 67be6a0ef1a2..168f72e9180c 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.0 \ No newline at end of file +google-cloud-bigtable==1.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index f9c658397dcf..23b27ea709e0 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.0 +google-cloud-bigtable==1.5.1 From 6d69f0a4256bbad62914d146750827bd3916670f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 7 Oct 2020 19:06:04 +0200 Subject: [PATCH 364/892] chore(deps): update dependency google-cloud-core to v1.4.3 (#153) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-cloud-core](https://togithub.com/googleapis/python-cloud-core) | patch | `==1.4.2` -> `==1.4.3` | --- ### Release Notes
googleapis/python-cloud-core ### [`v1.4.3`](https://togithub.com/googleapis/python-cloud-core/blob/master/CHANGELOG.md#​143-httpswwwgithubcomgoogleapispython-cloud-corecomparev142v143-2020-10-06) [Compare Source](https://togithub.com/googleapis/python-cloud-core/compare/v1.4.2...v1.4.3)
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-bigtable). --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index a5ff4714ce3e..5ec8ddb2cace 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.24.0 google-cloud-bigtable==1.5.1 -google-cloud-core==1.4.2 \ No newline at end of file +google-cloud-core==1.4.3 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 770185a0574f..3360cf133306 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.5.1 -google-cloud-core==1.4.2 +google-cloud-core==1.4.3 From 8177aaeabf4d2634b038f288af68cfed37ce08bd Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 8 Oct 2020 18:10:26 +0200 Subject: [PATCH 365/892] chore(deps): update dependency google-cloud-monitoring to v2 (#154) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index f0ff9c764956..aa1a6e220ce2 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.5.1 -google-cloud-monitoring==1.1.0 +google-cloud-monitoring==2.0.0 From e7ebf0fc97ef0b2045a427511e0c5d926e6b31bb Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 11 Nov 2020 17:11:20 -0500 Subject: [PATCH 366/892] chore: clean up synth replacements (#161) - Remove those which no longer match. - Apply '_GAPIC_LIBRARY_VERSION' tweak from PR #150. Closes #155. Closes #156. --- packages/google-cloud-bigtable/synth.py | 50 ++++++++++--------------- 1 file changed, 19 insertions(+), 31 deletions(-) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 8a2fed1c722e..21100c749a5c 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -45,40 +45,28 @@ s.move(library / "google/cloud/bigtable_admin_v2") s.move(library / "tests") -s.replace( - [ - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", - ], - "'google-cloud-bigtable-admin'", - "'google-cloud-bigtable'", -) - -s.replace( - "google/**/*.py", - "from google\.cloud\.bigtable\.admin_v2.proto", - "from google.cloud.bigtable_admin_v2.proto", -) +# Work around non-standard installations -s.replace( - ["google/cloud/bigtable_admin_v2/__init__.py"], - " __doc__ = bigtable_instance_admin_client." - "BigtableInstanceAdminClient.__doc__\n", - " __doc__ = (\n" - " bigtable_instance_admin_client.BigtableInstanceAdminClient." - "__doc__)\n", -) - -s.replace( - ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], - "if ``true_mutations`` is empty, and at most\n\n\s*100000.", - "if ``true_mutations`` is empty, and at most 100000.", -) +admin_clients = [ + "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", + "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", +] s.replace( - ["google/cloud/bigtable_v2/gapic/bigtable_client.py"], - "if ``false_mutations`` is empty, and at most\n\n\s*100000.", - "if ``false_mutations`` is empty, and at most 100000.", + admin_clients, + """\ +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution\( + 'google-cloud-bigtable-admin', +\).version +""", + """\ +try: + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable" + ).version +except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GAPIC_LIBRARY_VERSION = None +""" ) # ---------------------------------------------------------------------------- From 4d40b66a5ab1bf93384cced17f980a21b969a1a6 Mon Sep 17 00:00:00 2001 From: MF2199 <38331387+mf2199@users.noreply.github.com> Date: Thu, 12 Nov 2020 12:08:21 -0500 Subject: [PATCH 367/892] feat: Backup Level IAM (#160) --- .../google/cloud/bigtable/backup.py | 52 +++++++++ .../tests/unit/test_backup.py | 107 ++++++++++++++++++ 2 files changed, 159 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 03a1c894edd4..291ac783ad41 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -21,6 +21,7 @@ BigtableTableAdminClient, ) from google.cloud.bigtable_admin_v2.types import table_pb2 +from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 @@ -392,3 +393,54 @@ def restore(self, table_id): """ api = self._instance._client.table_admin_client return api.restore_table(self._instance.name, table_id, self.name) + + def get_iam_policy(self): + """Gets the IAM access control policy for this backup. + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this backup. + """ + table_api = self._instance._client.table_admin_client + args = {"resource": self.name} + response = table_api.get_iam_policy(**args) + return Policy.from_pb(response) + + def set_iam_policy(self, policy): + """Sets the IAM access control policy for this backup. Replaces any + existing policy. + + For more information about policy, please see documentation of + class `google.cloud.bigtable.policy.Policy` + + :type policy: :class:`google.cloud.bigtable.policy.Policy` + :param policy: A new IAM policy to replace the current IAM policy + of this backup. + + :rtype: :class:`google.cloud.bigtable.policy.Policy` + :returns: The current IAM policy of this backup. + """ + table_api = self._instance._client.table_admin_client + response = table_api.set_iam_policy(resource=self.name, policy=policy.to_pb()) + return Policy.from_pb(response) + + def test_iam_permissions(self, permissions): + """Tests whether the caller has the given permissions for this backup. + Returns the permissions that the caller has. + + :type permissions: list + :param permissions: The set of permissions to check for + the ``resource``. Permissions with wildcards (such as '*' + or 'storage.*') are not allowed. For more information see + `IAM Overview + `_. + `Bigtable Permissions + `_. + + :rtype: list + :returns: A List(string) of permissions allowed on the backup. + """ + table_api = self._instance._client.table_admin_client + response = table_api.test_iam_permissions( + resource=self.name, permissions=permissions + ) + return list(response.permissions) diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 2f263dffdc8a..0285d668bf74 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -734,6 +734,113 @@ def test_restore_success(self): backup=self.BACKUP_NAME, ) + def test_get_iam_policy(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=self.INSTANCE_ID) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + client._table_admin_client = table_api + table_api.get_iam_policy.return_value = iam_policy + + result = backup.get_iam_policy() + + table_api.get_iam_policy.assert_called_once_with(resource=backup.name) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + + def test_set_iam_policy(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=self.INSTANCE_ID) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] + iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + client._table_admin_client = table_api + table_api.set_iam_policy.return_value = iam_policy_pb + + iam_policy = Policy(etag=etag, version=version) + iam_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("user1@test.com"), + Policy.service_account("service_acc1@test.com"), + ] + + result = backup.set_iam_policy(iam_policy) + + table_api.set_iam_policy.assert_called_once_with( + resource=backup.name, policy=iam_policy_pb + ) + self.assertEqual(result.version, version) + self.assertEqual(result.etag, etag) + + admins = result.bigtable_admins + self.assertEqual(len(admins), len(members)) + for found, expected in zip(sorted(admins), sorted(members)): + self.assertEqual(found, expected) + + def test_test_iam_permissions(self): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.iam.v1 import iam_policy_pb2 + + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=self.INSTANCE_ID) + backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + + permissions = ["bigtable.backups.create", "bigtable.backups.list"] + + response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) + + table_api = mock.create_autospec( + bigtable_table_admin_client.BigtableTableAdminClient + ) + table_api.test_iam_permissions.return_value = response + client._table_admin_client = table_api + + result = backup.test_iam_permissions(permissions) + + self.assertEqual(result, permissions) + table_api.test_iam_permissions.assert_called_once_with( + resource=backup.name, permissions=permissions + ) + class _Client(object): def __init__(self, project=TestBackup.PROJECT_ID): From f18b50981613e6eaac8095137e211283b611a7a3 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 12 Nov 2020 15:58:51 -0500 Subject: [PATCH 368/892] feat: add 'timeout' arg to 'Table.mutate_rows' (#157) Also, call data client's 'mutate_rows' directly -- do *not* scribble on its internal API wrappers. See: https://github.com/googleapis/python-bigtable/issues/7#issuecomment-715538708 Closes #7 --- .../google/cloud/bigtable/table.py | 67 +++--- .../tests/unit/test_table.py | 196 +++++++++++++----- 2 files changed, 172 insertions(+), 91 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 950a8c3fe827..35ca43d2460b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -20,9 +20,9 @@ from google.api_core.exceptions import NotFound from google.api_core.exceptions import RetryError from google.api_core.exceptions import ServiceUnavailable +from google.api_core.gapic_v1.method import DEFAULT from google.api_core.retry import if_exception_type from google.api_core.retry import Retry -from google.api_core.gapic_v1.method import wrap_method from google.cloud._helpers import _to_bytes from google.cloud.bigtable.backup import Backup from google.cloud.bigtable.column_family import _gc_rule_from_pb @@ -625,7 +625,7 @@ def yield_rows(self, **kwargs): ) return self.read_rows(**kwargs) - def mutate_rows(self, rows, retry=DEFAULT_RETRY): + def mutate_rows(self, rows, retry=DEFAULT_RETRY, timeout=DEFAULT): """Mutates multiple rows in bulk. For example: @@ -656,17 +656,23 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY): the :meth:`~google.api_core.retry.Retry.with_delay` method or the :meth:`~google.api_core.retry.Retry.with_deadline` method. + :type timeout: float + :param timeout: number of seconds bounding retries for the call + :rtype: list :returns: A list of response statuses (`google.rpc.status_pb2.Status`) corresponding to success or failure of each row mutation sent. These will be in the same order as the `rows`. """ + if timeout is DEFAULT: + timeout = self.mutation_timeout + retryable_mutate_rows = _RetryableMutateRowsWorker( self._instance._client, self.name, rows, app_profile_id=self._app_profile_id, - timeout=self.mutation_timeout, + timeout=timeout, ) return retryable_mutate_rows(retry=retry) @@ -1058,27 +1064,20 @@ def _do_mutate_retryable_rows(self): # All mutations are either successful or non-retryable now. return self.responses_statuses - mutate_rows_request = _mutate_rows_request( - self.table_name, retryable_rows, app_profile_id=self.app_profile_id - ) + entries = _compile_mutation_entries(self.table_name, retryable_rows) data_client = self.client.table_data_client - inner_api_calls = data_client._inner_api_calls - if "mutate_rows" not in inner_api_calls: - default_retry = (data_client._method_configs["MutateRows"].retry,) - if self.timeout is None: - default_timeout = data_client._method_configs["MutateRows"].timeout - else: - default_timeout = timeout.ExponentialTimeout(deadline=self.timeout) - data_client._inner_api_calls["mutate_rows"] = wrap_method( - data_client.transport.mutate_rows, - default_retry=default_retry, - default_timeout=default_timeout, - client_info=data_client._client_info, - ) + + kwargs = {} + if self.timeout is not None: + kwargs["timeout"] = timeout.ExponentialTimeout(deadline=self.timeout) try: - responses = data_client._inner_api_calls["mutate_rows"]( - mutate_rows_request, retry=None + responses = data_client.mutate_rows( + self.table_name, + entries, + app_profile_id=self.app_profile_id, + retry=None, + **kwargs ) except (ServiceUnavailable, DeadlineExceeded, Aborted): # If an exception, considered retryable by `RETRY_CODES`, is @@ -1260,8 +1259,8 @@ def _create_row_request( return message -def _mutate_rows_request(table_name, rows, app_profile_id=None): - """Creates a request to mutate rows in a table. +def _compile_mutation_entries(table_name, rows): + """Create list of mutation entries :type table_name: str :param table_name: The name of the table to write to. @@ -1269,29 +1268,29 @@ def _mutate_rows_request(table_name, rows, app_profile_id=None): :type rows: list :param rows: List or other iterable of :class:`.DirectRow` instances. - :type: app_profile_id: str - :param app_profile_id: (Optional) The unique name of the AppProfile. - - :rtype: :class:`data_messages_v2_pb2.MutateRowsRequest` - :returns: The ``MutateRowsRequest`` protobuf corresponding to the inputs. + :rtype: List[:class:`data_messages_v2_pb2.MutateRowsRequest.Entry`] + :returns: entries corresponding to the inputs. :raises: :exc:`~.table.TooManyMutationsError` if the number of mutations is - greater than 100,000 - """ - request_pb = data_messages_v2_pb2.MutateRowsRequest( - table_name=table_name, app_profile_id=app_profile_id + greater than the max ({}) + """.format( + _MAX_BULK_MUTATIONS ) + entries = [] mutations_count = 0 + entry_klass = data_messages_v2_pb2.MutateRowsRequest.Entry + for row in rows: _check_row_table_name(table_name, row) _check_row_type(row) mutations = row._get_mutations() - request_pb.entries.add(row_key=row.row_key, mutations=mutations) + entries.append(entry_klass(row_key=row.row_key, mutations=mutations)) mutations_count += len(mutations) + if mutations_count > _MAX_BULK_MUTATIONS: raise TooManyMutationsError( "Maximum number of mutations is %s" % (_MAX_BULK_MUTATIONS,) ) - return request_pb + return entries def _check_row_table_name(table_name, row): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index c99cd65913fc..4469846b12d1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -20,14 +20,14 @@ from google.api_core.exceptions import DeadlineExceeded -class Test___mutate_rows_request(unittest.TestCase): +class Test__compile_mutation_entries(unittest.TestCase): def _call_fut(self, table_name, rows): - from google.cloud.bigtable.table import _mutate_rows_request + from google.cloud.bigtable.table import _compile_mutation_entries - return _mutate_rows_request(table_name, rows) + return _compile_mutation_entries(table_name, rows) @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) - def test__mutate_rows_too_many_mutations(self): + def test_w_too_many_mutations(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import TooManyMutationsError @@ -41,13 +41,15 @@ def test__mutate_rows_too_many_mutations(self): rows[0].set_cell("cf1", b"c1", 2) rows[1].set_cell("cf1", b"c1", 3) rows[1].set_cell("cf1", b"c1", 4) + with self.assertRaises(TooManyMutationsError): self._call_fut("table", rows) - def test__mutate_rows_request(self): + def test_normal(self): from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable_v2.proto import bigtable_pb2 - table = mock.Mock(name="table", spec=["name"]) + table = mock.Mock(spec=["name"]) table.name = "table" rows = [ DirectRow(row_key=b"row_key", table=table), @@ -55,25 +57,26 @@ def test__mutate_rows_request(self): ] rows[0].set_cell("cf1", b"c1", b"1") rows[1].set_cell("cf1", b"c1", b"2") + result = self._call_fut("table", rows) - expected_result = _mutate_rows_request_pb(table_name="table") - entry1 = expected_result.entries.add() - entry1.row_key = b"row_key" - mutations1 = entry1.mutations.add() - mutations1.set_cell.family_name = "cf1" - mutations1.set_cell.column_qualifier = b"c1" - mutations1.set_cell.timestamp_micros = -1 - mutations1.set_cell.value = b"1" - entry2 = expected_result.entries.add() - entry2.row_key = b"row_key_2" - mutations2 = entry2.mutations.add() - mutations2.set_cell.family_name = "cf1" - mutations2.set_cell.column_qualifier = b"c1" - mutations2.set_cell.timestamp_micros = -1 - mutations2.set_cell.value = b"2" + Entry = bigtable_pb2.MutateRowsRequest.Entry - self.assertEqual(result, expected_result) + entry_1 = Entry(row_key=b"row_key") + mutations_1 = entry_1.mutations.add() + mutations_1.set_cell.family_name = "cf1" + mutations_1.set_cell.column_qualifier = b"c1" + mutations_1.set_cell.timestamp_micros = -1 + mutations_1.set_cell.value = b"1" + + entry_2 = Entry(row_key=b"row_key_2") + mutations_2 = entry_2.mutations.add() + mutations_2.set_cell.family_name = "cf1" + mutations_2.set_cell.column_qualifier = b"c1" + mutations_2.set_cell.timestamp_micros = -1 + mutations_2.set_cell.value = b"2" + + self.assertEqual(result, [entry_1, entry_2]) class Test__check_row_table_name(unittest.TestCase): @@ -162,27 +165,49 @@ def _get_target_client_class(): def _make_client(self, *args, **kwargs): return self._get_target_client_class()(*args, **kwargs) - def test_constructor_w_admin(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT_ID, credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) + def test_constructor_defaults(self): + instance = mock.Mock(spec=[]) + table = self._make_one(self.TABLE_ID, instance) + + self.assertEqual(table.table_id, self.TABLE_ID) + self.assertIs(table._instance, instance) + self.assertIsNone(table.mutation_timeout) + self.assertIsNone(table._app_profile_id) + + def test_constructor_explicit(self): + instance = mock.Mock(spec=[]) + mutation_timeout = 123 + app_profile_id = "profile-123" + + table = self._make_one( + self.TABLE_ID, + instance, + mutation_timeout=mutation_timeout, + app_profile_id=app_profile_id, + ) + self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance._client, client) - self.assertEqual(table.name, self.TABLE_NAME) + self.assertIs(table._instance, instance) + self.assertEqual(table.mutation_timeout, mutation_timeout) + self.assertEqual(table._app_profile_id, app_profile_id) - def test_constructor_wo_admin(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT_ID, credentials=credentials, admin=False + def test_name(self): + table_data_client = mock.Mock(spec=["table_path"]) + client = mock.Mock( + project=self.PROJECT_ID, + table_data_client=table_data_client, + spec=["project", "table_data_client"], ) - instance = client.instance(instance_id=self.INSTANCE_ID) + instance = mock.Mock( + _client=client, + instance_id=self.INSTANCE_ID, + spec=["_client", "instance_id"], + ) + table = self._make_one(self.TABLE_ID, instance) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance._client, client) - self.assertEqual(table.name, self.TABLE_NAME) + + self.assertEqual(table.name, table_data_client.table_path.return_value) def _row_methods_helper(self): client = self._make_client( @@ -620,8 +645,11 @@ def test_read_row_still_partial(self): with self.assertRaises(ValueError): self._read_row_helper(chunks, None) - def test_mutate_rows(self): + def _mutate_rows_helper( + self, mutation_timeout=None, app_profile_id=None, retry=None, timeout=None + ): from google.rpc.status_pb2 import Status + from google.cloud.bigtable.table import DEFAULT_RETRY from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client table_api = mock.create_autospec( @@ -633,21 +661,78 @@ def test_mutate_rows(self): ) instance = client.instance(instance_id=self.INSTANCE_ID) client._table_admin_client = table_api - table = self._make_one(self.TABLE_ID, instance) + ctor_kwargs = {} - response = [Status(code=0), Status(code=1)] + if mutation_timeout is not None: + ctor_kwargs["mutation_timeout"] = mutation_timeout + + if app_profile_id is not None: + ctor_kwargs["app_profile_id"] = app_profile_id - mock_worker = mock.Mock(return_value=response) - with mock.patch( + table = self._make_one(self.TABLE_ID, instance, **ctor_kwargs) + + rows = [mock.MagicMock(), mock.MagicMock()] + response = [Status(code=0), Status(code=1)] + instance_mock = mock.Mock(return_value=response) + klass_mock = mock.patch( "google.cloud.bigtable.table._RetryableMutateRowsWorker", - new=mock.MagicMock(return_value=mock_worker), - ): - statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()]) + new=mock.MagicMock(return_value=instance_mock), + ) + + call_kwargs = {} + + if retry is not None: + call_kwargs["retry"] = retry + + if timeout is not None: + expected_timeout = call_kwargs["timeout"] = timeout + else: + expected_timeout = mutation_timeout + + with klass_mock: + statuses = table.mutate_rows(rows, **call_kwargs) + result = [status.code for status in statuses] expected_result = [0, 1] - self.assertEqual(result, expected_result) + klass_mock.new.assert_called_once_with( + client, + self.TABLE_NAME, + rows, + app_profile_id=app_profile_id, + timeout=expected_timeout, + ) + + if retry is not None: + instance_mock.assert_called_once_with(retry=retry) + else: + instance_mock.assert_called_once_with(retry=DEFAULT_RETRY) + + def test_mutate_rows_w_default_mutation_timeout_app_profile_id(self): + self._mutate_rows_helper() + + def test_mutate_rows_w_mutation_timeout(self): + mutation_timeout = 123 + self._mutate_rows_helper(mutation_timeout=mutation_timeout) + + def test_mutate_rows_w_app_profile_id(self): + app_profile_id = "profile-123" + self._mutate_rows_helper(app_profile_id=app_profile_id) + + def test_mutate_rows_w_retry(self): + retry = mock.Mock() + self._mutate_rows_helper(retry=retry) + + def test_mutate_rows_w_timeout_arg(self): + timeout = 123 + self._mutate_rows_helper(timeout=timeout) + + def test_mutate_rows_w_mutation_timeout_and_timeout_arg(self): + mutation_timeout = 123 + timeout = 456 + self._mutate_rows_helper(mutation_timeout=mutation_timeout, timeout=timeout) + def test_read_rows(self): from google.cloud._testing import _Monkey from google.cloud.bigtable.row_data import PartialRowsData @@ -1424,21 +1509,18 @@ def test_callable_no_retry_strategy(self): row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") - response = self._make_responses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - ) + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - with mock.patch("google.cloud.bigtable.table.wrap_method") as patched: - patched.return_value = mock.Mock(return_value=[response]) + response_codes = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + response = self._make_responses(response_codes) + data_api.mutate_rows = mock.MagicMock(return_value=[response]) - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - statuses = worker(retry=None) + statuses = worker(retry=None) result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + self.assertEqual(result, response_codes) - client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once() - self.assertEqual(result, expected_result) + data_api.mutate_rows.assert_called_once() def test_callable_retry(self): from google.cloud.bigtable.row import DirectRow From 41074d4082449369576a88eb019fdf64eba77561 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 16 Nov 2020 19:49:38 +0100 Subject: [PATCH 369/892] chore(deps): update dependency apache-beam to v2.25.0 (#158) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 5ec8ddb2cace..565c39c3920d 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.24.0 +apache-beam==2.25.0 google-cloud-bigtable==1.5.1 google-cloud-core==1.4.3 \ No newline at end of file From 09381e16ced9d19568f58a4652171e9257ad89a4 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 16 Nov 2020 10:50:17 -0800 Subject: [PATCH 370/892] chore(python): use BUILD_SPECIFIC_GCLOUD_PROJECT for samples (#148) https://github.com/googleapis/python-talent/blob/ef045e8eb348db36d7a2a611e6f26b11530d273b/samples/snippets/noxfile_config.py#L27-L32 `BUILD_SPECIFIC_GCLOUD_PROJECT` is an alternate project used for sample tests that do poorly with concurrent runs on the same project. Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Wed Sep 30 13:06:03 2020 -0600 Source-Repo: googleapis/synthtool Source-Sha: 9b0da5204ab90bcc36f8cd4e5689eff1a54cc3e4 Source-Link: https://github.com/googleapis/synthtool/commit/9b0da5204ab90bcc36f8cd4e5689eff1a54cc3e4 --- .../.kokoro/samples/python3.6/common.cfg | 6 ++++++ .../.kokoro/samples/python3.7/common.cfg | 6 ++++++ .../.kokoro/samples/python3.8/common.cfg | 6 ++++++ packages/google-cloud-bigtable/synth.metadata | 6 +++--- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg index dd662013654a..f71693fca0bc 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.6" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py36" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-bigtable/.kokoro/test-samples.sh" diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg index 6ee44dbb96cb..5fa465fda5f5 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.7" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py37" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-bigtable/.kokoro/test-samples.sh" diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg index cc909eb206e1..f3a6fa7ec10e 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.8" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py38" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/python-bigtable/.kokoro/test-samples.sh" diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 7206c46f8619..a87cbb4075c7 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "3861f6b0552e431a1fc7aa872c4d293ca129c28c" + "sha": "9a61c57a73d8b55d59799e2b78ced03b07660fb8" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f3c04883d6c43261ff13db1f52d03a283be06871" + "sha": "9b0da5204ab90bcc36f8cd4e5689eff1a54cc3e4" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f3c04883d6c43261ff13db1f52d03a283be06871" + "sha": "9b0da5204ab90bcc36f8cd4e5689eff1a54cc3e4" } } ], From aec5c9d8ca66cf8654b99eecadf45f4b35c47f2c Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 16 Nov 2020 14:25:22 -0500 Subject: [PATCH 371/892] chore: release 1.6.0 (#164) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 8 ++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index f46bcc42f3ae..94bdbdf535c5 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,14 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [1.6.0](https://www.github.com/googleapis/python-bigtable/compare/v1.5.1...v1.6.0) (2020-11-16) + + +### Features + +* add 'timeout' arg to 'Table.mutate_rows' ([#157](https://www.github.com/googleapis/python-bigtable/issues/157)) ([6d597a1](https://www.github.com/googleapis/python-bigtable/commit/6d597a1e5be05c993c9f86beca4c1486342caf94)), closes [/github.com/googleapis/python-bigtable/issues/7#issuecomment-715538708](https://www.github.com/googleapis//github.com/googleapis/python-bigtable/issues/7/issues/issuecomment-715538708) [#7](https://www.github.com/googleapis/python-bigtable/issues/7) +* Backup Level IAM ([#160](https://www.github.com/googleapis/python-bigtable/issues/160)) ([44932cb](https://www.github.com/googleapis/python-bigtable/commit/44932cb8710e12279dbd4e9271577f8bee238980)) + ### [1.5.1](https://www.github.com/googleapis/python-bigtable/compare/v1.5.0...v1.5.1) (2020-10-06) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 0f3c9cd82887..0c98fee990a6 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = "1.5.1" +version = "1.6.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From b393e03d4cc2e10f846fe6b782514435c6e37c26 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 16 Nov 2020 22:22:04 +0100 Subject: [PATCH 372/892] chore(deps): update dependency google-cloud-bigtable to v1.6.0 (#166) This PR contains the following updates: | Package | Update | Change | |---|---|---| | [google-cloud-bigtable](https://togithub.com/googleapis/python-bigtable) | minor | `==1.5.1` -> `==1.6.0` | --- ### Release Notes
googleapis/python-bigtable ### [`v1.6.0`](https://togithub.com/googleapis/python-bigtable/blob/master/CHANGELOG.md#​160-httpswwwgithubcomgoogleapispython-bigtablecomparev151v160-2020-11-16) [Compare Source](https://togithub.com/googleapis/python-bigtable/compare/v1.5.1...v1.6.0) ##### Features - add 'timeout' arg to 'Table.mutate_rows' ([#​157](https://www.github.com/googleapis/python-bigtable/issues/157)) ([6d597a1](https://www.github.com/googleapis/python-bigtable/commit/6d597a1e5be05c993c9f86beca4c1486342caf94)), closes [/github.com/googleapis/python-bigtable/issues/7#issuecomment-715538708](https://www.github.com/googleapis//github.com/googleapis/python-bigtable/issues/7/issues/issuecomment-715538708) [#​7](https://www.github.com/googleapis/python-bigtable/issues/7) - Backup Level IAM ([#​160](https://www.github.com/googleapis/python-bigtable/issues/160)) ([44932cb](https://www.github.com/googleapis/python-bigtable/commit/44932cb8710e12279dbd4e9271577f8bee238980)) ##### [1.5.1](https://www.github.com/googleapis/python-bigtable/compare/v1.5.0...v1.5.1) (2020-10-06) ##### Bug Fixes - harden version data gathering against DistributionNotFound ([#​150](https://www.github.com/googleapis/python-bigtable/issues/150)) ([c815421](https://www.github.com/googleapis/python-bigtable/commit/c815421422f1c845983e174651a5292767cfe2e7))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-bigtable). --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 565c39c3920d..44c2eb0a17bd 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.25.0 -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 google-cloud-core==1.4.3 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 3360cf133306..526c8458572a 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 google-cloud-core==1.4.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 23b27ea709e0..d51198faad68 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index aa1a6e220ce2..a4dd52df1503 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 google-cloud-monitoring==2.0.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 23b27ea709e0..d51198faad68 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index ac34bb1f7ef1..862df6f5f989 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index ac34bb1f7ef1..862df6f5f989 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 168f72e9180c..734386965087 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.1 \ No newline at end of file +google-cloud-bigtable==1.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 23b27ea709e0..d51198faad68 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.5.1 +google-cloud-bigtable==1.6.0 From bde2247b178e26a1ab686a0368810907bee93bd6 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 17 Nov 2020 12:50:27 -0500 Subject: [PATCH 373/892] chore: tweak version lookup for non-admin client (#165) Closes #162 Closes #163 Closes #167 --- packages/google-cloud-bigtable/synth.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 21100c749a5c..07c2933395bc 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -45,7 +45,12 @@ s.move(library / "google/cloud/bigtable_admin_v2") s.move(library / "tests") -# Work around non-standard installations +# ---------------------------------------------------------------------------- +# Work around non-standard installations (missing setuptools). +# +# These replacements can be removed after migrating to the microgenerator, +# which will generate them directly. +# ---------------------------------------------------------------------------- admin_clients = [ "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", @@ -69,6 +74,23 @@ """ ) +s.replace( + "google/cloud/bigtable_v2/gapic/bigtable_client.py", + """\ +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution\( + 'google-cloud-bigtable', +\).version +""", + """\ +try: + _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( + "google-cloud-bigtable" + ).version +except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GAPIC_LIBRARY_VERSION = None +""" +) + # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- From a4f7aa8bf96a4dce498b016c8c79d0d6845f4b53 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 24 Nov 2020 19:21:13 -0500 Subject: [PATCH 374/892] tests: 'test_scale_bigtable', backoff when testing cluster node counts (#168) Closes #108 Closes #159 --- .../samples/metricscaler/metricscaler_test.py | 39 +++++++++---------- .../metricscaler/requirements-test.txt | 1 + 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 06e1e27ca348..219ec535e25a 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -15,7 +15,6 @@ """Unit and system tests for metricscaler.py""" import os -import time import uuid from google.cloud import bigtable @@ -23,6 +22,7 @@ from mock import Mock, patch import pytest +from test_utils.retry import RetryInstanceState from metricscaler import get_cpu_load from metricscaler import get_storage_utilization @@ -109,32 +109,31 @@ def test_scale_bigtable(instance): instance.reload() cluster = instance.cluster(BIGTABLE_INSTANCE) - cluster.reload() + + _nonzero_node_count = RetryInstanceState( + instance_predicate=lambda c: c.serve_nodes > 0, + max_tries=10, + ) + _nonzero_node_count(cluster.reload)() + original_node_count = cluster.serve_nodes scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) - for n in range(10): - time.sleep(10) - cluster.reload() - new_node_count = cluster.serve_nodes - try: - assert (new_node_count == (original_node_count + SIZE_CHANGE_STEP)) - except AssertionError: - if n == 9: - raise + expected_count = original_node_count + SIZE_CHANGE_STEP + _scaled_node_count = RetryInstanceState( + instance_predicate=lambda c: c.serve_nodes == expected_count, + max_tries=10, + ) + _scaled_node_count(cluster.reload)() scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False) - for n in range(10): - time.sleep(10) - cluster.reload() - final_node_count = cluster.serve_nodes - try: - assert final_node_count == original_node_count - except AssertionError: - if n == 9: - raise + _restored_node_count = RetryInstanceState( + instance_predicate=lambda c: c.serve_nodes == original_node_count, + max_tries=10, + ) + _restored_node_count(cluster.reload)() def test_handle_dev_instance(capsys, dev_instance): diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 41c4d5110536..470dbe7c16d9 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,2 +1,3 @@ pytest==5.3.2 mock==3.0.5 +google-cloud-testutils From 32ee760783beb599f472faec7efe94c834cd4551 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 24 Nov 2020 17:21:28 -0800 Subject: [PATCH 375/892] chore: update code of conduct (via synth) (#171) * build(python): samples tests should pass if no samples exist Source-Author: Daniel Sanche Source-Date: Wed Oct 14 08:00:06 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 477764cc4ee6db346d3febef2bb1ea0abf27de52 Source-Link: https://github.com/googleapis/synthtool/commit/477764cc4ee6db346d3febef2bb1ea0abf27de52 * chore(python_library): change the docs bucket name Source-Author: Takashi Matsuo Source-Date: Fri Oct 16 09:58:05 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: da5c6050d13b4950c82666a81d8acd25157664ae Source-Link: https://github.com/googleapis/synthtool/commit/da5c6050d13b4950c82666a81d8acd25157664ae * chore(docs): update code of conduct of synthtool and templates Source-Author: Christopher Wilcox Source-Date: Thu Oct 22 14:22:01 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 5f6ef0ec5501d33c4667885b37a7685a30d41a76 Source-Link: https://github.com/googleapis/synthtool/commit/5f6ef0ec5501d33c4667885b37a7685a30d41a76 * docs: add proto-plus to intersphinx mapping Source-Author: Tim Swast Source-Date: Tue Oct 27 12:01:14 2020 -0500 Source-Repo: googleapis/synthtool Source-Sha: ea52b8a0bd560f72f376efcf45197fb7c8869120 Source-Link: https://github.com/googleapis/synthtool/commit/ea52b8a0bd560f72f376efcf45197fb7c8869120 --- .../.kokoro/docs/common.cfg | 2 +- .../.kokoro/test-samples.sh | 8 +- .../google-cloud-bigtable/CODE_OF_CONDUCT.md | 123 +++++++++++++----- packages/google-cloud-bigtable/docs/conf.py | 1 + .../gapic/bigtable_instance_admin_client.py | 2 +- .../gapic/bigtable_table_admin_client.py | 4 +- .../bigtable_v2/gapic/bigtable_client.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 6 +- 8 files changed, 102 insertions(+), 46 deletions(-) diff --git a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg index 1831bf9d21a5..08aac45ad5ad 100644 --- a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg @@ -30,7 +30,7 @@ env_vars: { env_vars: { key: "V2_STAGING_BUCKET" - value: "docs-staging-v2-staging" + value: "docs-staging-v2" } # It will upload the docker image after successful builds. diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh index 6da844235705..639efd458ff8 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples.sh @@ -28,6 +28,12 @@ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then git checkout $LATEST_RELEASE fi +# Exit early if samples directory doesn't exist +if [ ! -d "./samples" ]; then + echo "No tests run. `./samples` not found" + exit 0 +fi + # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -101,4 +107,4 @@ cd "$ROOT" # Workaround for Kokoro permissions issue: delete secrets rm testing/{test-env.sh,client-secrets.json,service-account.json} -exit "$RTN" \ No newline at end of file +exit "$RTN" diff --git a/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md b/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md index b3d1f6029849..039f43681204 100644 --- a/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md +++ b/packages/google-cloud-bigtable/CODE_OF_CONDUCT.md @@ -1,44 +1,95 @@ -# Contributor Code of Conduct +# Code of Conduct -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. +## Our Pledge -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + + +Reports should be directed to *googleapis-stewards@google.com*, the +Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html \ No newline at end of file diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index c0b3a25a4639..4064617bbc8e 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -352,6 +352,7 @@ None, ), "grpc": ("https://grpc.io/grpc/python/", None), + "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py index 8b1795249269..4e8a0d0badd3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py @@ -53,7 +53,7 @@ _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( "google-cloud-bigtable" ).version -except pkg_resources.DistributionNotFound: +except pkg_resources.DistributionNotFound: # pragma: NO COVER _GAPIC_LIBRARY_VERSION = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py index 2f19a880a33c..d507a3c0b73f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py @@ -54,13 +54,11 @@ from google.protobuf import field_mask_pb2 -import pkg_resources - try: _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( "google-cloud-bigtable" ).version -except pkg_resources.DistributionNotFound: +except pkg_resources.DistributionNotFound: # pragma: NO COVER _GAPIC_LIBRARY_VERSION = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py index 43ff81029fee..a9ddfad8ab8a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py @@ -40,7 +40,7 @@ _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( "google-cloud-bigtable" ).version -except pkg_resources.DistributionNotFound: +except pkg_resources.DistributionNotFound: # pragma: NO COVER _GAPIC_LIBRARY_VERSION = None diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index a87cbb4075c7..be90c87ccc69 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "9a61c57a73d8b55d59799e2b78ced03b07660fb8" + "sha": "c9dafa922111eebd2efaa144403a6b1c48b6f9c1" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "9b0da5204ab90bcc36f8cd4e5689eff1a54cc3e4" + "sha": "ea52b8a0bd560f72f376efcf45197fb7c8869120" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "9b0da5204ab90bcc36f8cd4e5689eff1a54cc3e4" + "sha": "ea52b8a0bd560f72f376efcf45197fb7c8869120" } } ], From c3492877f1353b4a0617def9fda29edbc875fa48 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 25 Nov 2020 09:59:35 -0800 Subject: [PATCH 376/892] docs: update intersphinx mappings (#172) * docs(python): update intersphinx for grpc and auth * docs(python): update intersphinx for grpc and auth * use https for python intersphinx Co-authored-by: Tim Swast Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Wed Nov 18 14:37:25 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: 9a7d9fbb7045c34c9d3d22c1ff766eeae51f04c9 Source-Link: https://github.com/googleapis/synthtool/commit/9a7d9fbb7045c34c9d3d22c1ff766eeae51f04c9 * docs(python): fix intersphinx link for google-auth Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Thu Nov 19 10:16:05 2020 -0700 Source-Repo: googleapis/synthtool Source-Sha: a073c873f3928c561bdf87fdfbf1d081d1998984 Source-Link: https://github.com/googleapis/synthtool/commit/a073c873f3928c561bdf87fdfbf1d081d1998984 --- packages/google-cloud-bigtable/docs/conf.py | 6 +++--- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 4064617bbc8e..ef2392b38c3c 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -345,13 +345,13 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "python": ("https://python.readthedocs.org/en/latest/", None), + "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), "google.api_core": ( "https://googleapis.dev/python/google-api-core/latest/", None, ), - "grpc": ("https://grpc.io/grpc/python/", None), + "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), } diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index be90c87ccc69..1d3b579adb81 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "c9dafa922111eebd2efaa144403a6b1c48b6f9c1" + "sha": "39d7c153ffab3f1bc86a6c1b6802d53254df9c52" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ea52b8a0bd560f72f376efcf45197fb7c8869120" + "sha": "a073c873f3928c561bdf87fdfbf1d081d1998984" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ea52b8a0bd560f72f376efcf45197fb7c8869120" + "sha": "a073c873f3928c561bdf87fdfbf1d081d1998984" } } ], From 9e0be58f2199f3eef572270593de7227afdad60a Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 1 Dec 2020 16:09:51 -0500 Subject: [PATCH 377/892] tests: unflake 'filter_limit_row_sample' snippet (#175) Enforce that mutated table rows are readable before running snippet tests. Closes #75. --- .../samples/snippets/filters/filters_test.py | 29 +++++++++++++++---- 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py index f46541bffd35..36dc4a5b1bba 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py @@ -14,6 +14,7 @@ import datetime import os +import time import uuid from google.cloud import bigtable @@ -29,6 +30,8 @@ @pytest.fixture(scope="module", autouse=True) def table_id(): + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=PROJECT, admin=True) instance = client.instance(BIGTABLE_INSTANCE) @@ -43,14 +46,16 @@ def table_id(): timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta( hours=1) - rows = [ - table.direct_row("phone#4c410523#20190501"), - table.direct_row("phone#4c410523#20190502"), - table.direct_row("phone#4c410523#20190505"), - table.direct_row("phone#5c10102#20190501"), - table.direct_row("phone#5c10102#20190502"), + row_keys = [ + "phone#4c410523#20190501", + "phone#4c410523#20190502", + "phone#4c410523#20190505", + "phone#5c10102#20190501", + "phone#5c10102#20190502", ] + rows = [table.direct_row(row_key) for row_key in row_keys] + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) @@ -76,6 +81,18 @@ def table_id(): table.mutate_rows(rows) + # Ensure mutations have propagated. + row_set = RowSet() + + for row_key in row_keys: + row_set.add_row_key(row_key) + + fetched = list(table.read_rows(row_set=row_set)) + + while len(fetched) < len(rows): + time.sleep(5) + fetched = list(table.read_rows(row_set=row_set)) + yield table_id table.delete() From dd3605f9a5b211cf8f1982bf8c276c527b1abea6 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 2 Dec 2020 10:17:19 -0800 Subject: [PATCH 378/892] chore: release 1.6.1 (#173) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 94bdbdf535c5..f4d6a8815494 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [1.6.1](https://www.github.com/googleapis/python-bigtable/compare/v1.6.0...v1.6.1) (2020-12-01) + + +### Documentation + +* update intersphinx mappings ([#172](https://www.github.com/googleapis/python-bigtable/issues/172)) ([7b09368](https://www.github.com/googleapis/python-bigtable/commit/7b09368d5121782c7f271b3575c838e8a2284c05)) + ## [1.6.0](https://www.github.com/googleapis/python-bigtable/compare/v1.5.1...v1.6.0) (2020-11-16) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 0c98fee990a6..48ef1c70df00 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = "1.6.0" +version = "1.6.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 1edb1c905ac19c32950e8a4a775eb0c874a865d8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 29 Dec 2020 21:16:28 +0100 Subject: [PATCH 379/892] chore(deps): update dependency apache-beam to v2.26.0 (#185) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 44c2eb0a17bd..18a5023b7ad4 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.25.0 +apache-beam==2.26.0 google-cloud-bigtable==1.6.0 google-cloud-core==1.4.3 \ No newline at end of file From aa13829ed384d71f6ea70d38dbfdb8bcf719d278 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 29 Dec 2020 23:24:09 +0100 Subject: [PATCH 380/892] chore(deps): update dependency google-cloud-bigtable to v1.6.1 (#180) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 18a5023b7ad4..af4407777352 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.26.0 -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 google-cloud-core==1.4.3 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 526c8458572a..465ddbcc6331 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 google-cloud-core==1.4.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index d51198faad68..9f839250f0d9 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index a4dd52df1503..d84efda8a4ab 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 google-cloud-monitoring==2.0.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index d51198faad68..9f839250f0d9 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 862df6f5f989..8119baad2324 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 862df6f5f989..8119baad2324 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 734386965087..661aba7c1a15 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.0 \ No newline at end of file +google-cloud-bigtable==1.6.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index d51198faad68..9f839250f0d9 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.0 +google-cloud-bigtable==1.6.1 From cba1bf216c6a848da729ee9aa8856b19fa4aca5a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 30 Dec 2020 16:48:22 +0100 Subject: [PATCH 381/892] chore(deps): update dependency google-cloud-core to v1.5.0 (#179) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index af4407777352..6f1bd3ee9aed 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.26.0 google-cloud-bigtable==1.6.1 -google-cloud-core==1.4.3 \ No newline at end of file +google-cloud-core==1.5.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 465ddbcc6331..d1e7037cfc44 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.6.1 -google-cloud-core==1.4.3 +google-cloud-core==1.5.0 From 578b03364644eae418e0f48cfb7f64bdb370f156 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 5 Jan 2021 08:46:57 -0800 Subject: [PATCH 382/892] chore: add config / docs for 'pre-commit' support (#176) Source-Author: Tres Seaver Source-Date: Tue Dec 1 16:01:20 2020 -0500 Source-Repo: googleapis/synthtool Source-Sha: 32af6da519a6b042e3da62008e2a75e991efb6b4 Source-Link: https://github.com/googleapis/synthtool/commit/32af6da519a6b042e3da62008e2a75e991efb6b4 --- .../.pre-commit-config.yaml | 17 +++++++++++++++++ packages/google-cloud-bigtable/CONTRIBUTING.rst | 10 ++++++++++ packages/google-cloud-bigtable/synth.metadata | 7 ++++--- 3 files changed, 31 insertions(+), 3 deletions(-) create mode 100644 packages/google-cloud-bigtable/.pre-commit-config.yaml diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml new file mode 100644 index 000000000000..fb80a95e88bb --- /dev/null +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml +- repo: https://github.com/psf/black + rev: 19.10b0 + hooks: + - id: black +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.8.4 + hooks: + - id: flake8 diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 6d9432272aab..7af53a86549a 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -111,6 +111,16 @@ Coding Style should point to the official ``googleapis`` checkout and the the branch should be the main branch on that remote (``master``). +- This repository contains configuration for the + `pre-commit `__ tool, which automates checking + our linters during a commit. If you have it installed on your ``$PATH``, + you can enable enforcing those checks via: + +.. code-block:: bash + + $ pre-commit install + pre-commit installed at .git/hooks/pre-commit + Exceptions to PEP8: - Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 1d3b579adb81..55bec6877777 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "39d7c153ffab3f1bc86a6c1b6802d53254df9c52" + "sha": "eb0c04682098fe6bfb7298c74e74bcc62c09eb79" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "a073c873f3928c561bdf87fdfbf1d081d1998984" + "sha": "32af6da519a6b042e3da62008e2a75e991efb6b4" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "a073c873f3928c561bdf87fdfbf1d081d1998984" + "sha": "32af6da519a6b042e3da62008e2a75e991efb6b4" } } ], @@ -95,6 +95,7 @@ ".kokoro/test-samples.sh", ".kokoro/trampoline.sh", ".kokoro/trampoline_v2.sh", + ".pre-commit-config.yaml", ".trampolinerc", "CODE_OF_CONDUCT.md", "CONTRIBUTING.rst", From 80fd2e8362125e86e54212c6a8a6cac2aabd4919 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 6 Jan 2021 07:13:39 -0800 Subject: [PATCH 383/892] chore: Re-generated to pick up changes from synthtool. (#189) * chore(deps): update precommit hook pre-commit/pre-commit-hooks to v3.3.0 Source-Author: WhiteSource Renovate Source-Date: Wed Dec 2 17:18:24 2020 +0100 Source-Repo: googleapis/synthtool Source-Sha: 69629b64b83c6421d616be2b8e11795738ec8a6c Source-Link: https://github.com/googleapis/synthtool/commit/69629b64b83c6421d616be2b8e11795738ec8a6c * chore(deps): update precommit hook pre-commit/pre-commit-hooks to v3.4.0 Co-authored-by: Tres Seaver Source-Author: WhiteSource Renovate Source-Date: Wed Dec 16 18:13:24 2020 +0100 Source-Repo: googleapis/synthtool Source-Sha: aa255b15d52b6d8950cca48cfdf58f7d27a60c8a Source-Link: https://github.com/googleapis/synthtool/commit/aa255b15d52b6d8950cca48cfdf58f7d27a60c8a * docs(python): document adding Python 3.9 support, dropping 3.5 support Closes #787 Source-Author: Tres Seaver Source-Date: Thu Dec 17 16:08:02 2020 -0500 Source-Repo: googleapis/synthtool Source-Sha: b670a77a454f415d247907908e8ee7943e06d718 Source-Link: https://github.com/googleapis/synthtool/commit/b670a77a454f415d247907908e8ee7943e06d718 * chore: exclude `.nox` directories from linting The samples tests create `.nox` directories with all dependencies installed. These directories should be excluded from linting. I've tested this change locally, and it significantly speeds up linting on my machine. Source-Author: Tim Swast Source-Date: Tue Dec 22 13:04:04 2020 -0600 Source-Repo: googleapis/synthtool Source-Sha: 373861061648b5fe5e0ac4f8a38b32d639ee93e4 Source-Link: https://github.com/googleapis/synthtool/commit/373861061648b5fe5e0ac4f8a38b32d639ee93e4 --- packages/google-cloud-bigtable/.flake8 | 1 + .../google-cloud-bigtable/.pre-commit-config.yaml | 2 +- packages/google-cloud-bigtable/CONTRIBUTING.rst | 11 +++++------ packages/google-cloud-bigtable/synth.metadata | 6 +++--- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index ed9316381c9c..29227d4cf419 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -26,6 +26,7 @@ exclude = *_pb2.py # Standard linting exemptions. + **/.nox/** __pycache__, .git, *.pyc, diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index fb80a95e88bb..a9024b15d725 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.2.0 + rev: v3.4.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 7af53a86549a..c1edbeca0afb 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -21,8 +21,8 @@ In order to add a feature: - The feature must be documented in both the API and narrative documentation. -- The feature must work fully on the following CPython versions: 2.7, - 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows. +- The feature must work fully on the following CPython versions: + 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -202,25 +202,24 @@ Supported Python Versions We support: -- `Python 3.5`_ - `Python 3.6`_ - `Python 3.7`_ - `Python 3.8`_ +- `Python 3.9`_ -.. _Python 3.5: https://docs.python.org/3.5/ .. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ +.. _Python 3.9: https://docs.python.org/3.9/ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-bigtable/blob/master/noxfile.py -Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020. We also explicitly decided to support Python 3 beginning with version -3.5. Reasons for this include: +3.6. Reasons for this include: - Encouraging use of newest versions of Python 3 - Taking the lead of `prominent`_ open-source `projects`_ diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 55bec6877777..ff1adf69eee2 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "eb0c04682098fe6bfb7298c74e74bcc62c09eb79" + "sha": "58485dd159de1b6f78937740bb9ed6b57d3000ee" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "32af6da519a6b042e3da62008e2a75e991efb6b4" + "sha": "373861061648b5fe5e0ac4f8a38b32d639ee93e4" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "32af6da519a6b042e3da62008e2a75e991efb6b4" + "sha": "373861061648b5fe5e0ac4f8a38b32d639ee93e4" } } ], From a33052a690fb51d5bcf5b02c24fac5fa26a75b96 Mon Sep 17 00:00:00 2001 From: Billy Jacobson Date: Fri, 8 Jan 2021 10:03:06 -0500 Subject: [PATCH 384/892] fix: Renaming region tags to not conflict with documentation snippets (#190) --- .../google-cloud-bigtable/docs/snippets.py | 156 ++++++------ .../docs/snippets_table.py | 224 +++++++++--------- .../google/cloud/bigtable/app_profile.py | 24 +- .../google/cloud/bigtable/batcher.py | 12 +- .../google/cloud/bigtable/client.py | 28 +-- .../google/cloud/bigtable/cluster.py | 32 +-- .../google/cloud/bigtable/column_family.py | 32 +-- .../google/cloud/bigtable/instance.py | 68 +++--- .../google/cloud/bigtable/policy.py | 16 +- .../google/cloud/bigtable/row.py | 76 +++--- .../google/cloud/bigtable/row_data.py | 16 +- .../google/cloud/bigtable/row_set.py | 16 +- .../google/cloud/bigtable/table.py | 84 +++---- 13 files changed, 392 insertions(+), 392 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 32fdfcb24ce0..dda59079d511 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -109,7 +109,7 @@ def teardown_module(): def test_bigtable_create_instance(): - # [START bigtable_create_prod_instance] + # [START bigtable_api_create_prod_instance] from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -134,7 +134,7 @@ def test_bigtable_create_instance(): # We want to make sure the operation completes. operation.result(timeout=100) - # [END bigtable_create_prod_instance] + # [END bigtable_api_create_prod_instance] try: assert instance.exists() @@ -143,7 +143,7 @@ def test_bigtable_create_instance(): def test_bigtable_create_additional_cluster(): - # [START bigtable_create_cluster] + # [START bigtable_api_create_cluster] from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -169,7 +169,7 @@ def test_bigtable_create_additional_cluster(): operation = cluster.create() # We want to make sure the operation completes. operation.result(timeout=100) - # [END bigtable_create_cluster] + # [END bigtable_api_create_cluster] try: assert cluster.exists() @@ -180,7 +180,7 @@ def test_bigtable_create_additional_cluster(): def test_bigtable_create_reload_delete_app_profile(): import re - # [START bigtable_create_app_profile] + # [START bigtable_api_create_app_profile] from google.cloud.bigtable import Client from google.cloud.bigtable import enums @@ -199,9 +199,9 @@ def test_bigtable_create_reload_delete_app_profile(): ) app_profile = app_profile.create(ignore_warnings=True) - # [END bigtable_create_app_profile] + # [END bigtable_api_create_app_profile] - # [START bigtable_app_profile_name] + # [START bigtable_api_app_profile_name] from google.cloud.bigtable import Client client = Client(admin=True) @@ -209,7 +209,7 @@ def test_bigtable_create_reload_delete_app_profile(): app_profile = instance.app_profile(APP_PROFILE_ID) app_profile_name = app_profile.name - # [END bigtable_app_profile_name] + # [END bigtable_api_app_profile_name] _profile_name_re = re.compile( r"^projects/(?P[^/]+)/" r"instances/(?P[^/]+)/" @@ -218,7 +218,7 @@ def test_bigtable_create_reload_delete_app_profile(): ) assert _profile_name_re.match(app_profile_name) - # [START bigtable_app_profile_exists] + # [START bigtable_api_app_profile_exists] from google.cloud.bigtable import Client client = Client(admin=True) @@ -226,10 +226,10 @@ def test_bigtable_create_reload_delete_app_profile(): app_profile = instance.app_profile(APP_PROFILE_ID) app_profile_exists = app_profile.exists() - # [END bigtable_app_profile_exists] + # [END bigtable_api_app_profile_exists] assert app_profile_exists - # [START bigtable_reload_app_profile] + # [START bigtable_api_reload_app_profile] from google.cloud.bigtable import Client client = Client(admin=True) @@ -237,10 +237,10 @@ def test_bigtable_create_reload_delete_app_profile(): app_profile = instance.app_profile(APP_PROFILE_ID) app_profile.reload() - # [END bigtable_reload_app_profile] + # [END bigtable_api_reload_app_profile] assert app_profile.routing_policy_type == ROUTING_POLICY_TYPE - # [START bigtable_update_app_profile] + # [START bigtable_api_update_app_profile] from google.cloud.bigtable import Client client = Client(admin=True) @@ -251,10 +251,10 @@ def test_bigtable_create_reload_delete_app_profile(): description = "My new app profile" app_profile.description = description app_profile.update() - # [END bigtable_update_app_profile] + # [END bigtable_api_update_app_profile] assert app_profile.description == description - # [START bigtable_delete_app_profile] + # [START bigtable_api_delete_app_profile] from google.cloud.bigtable import Client client = Client(admin=True) @@ -263,40 +263,40 @@ def test_bigtable_create_reload_delete_app_profile(): app_profile.reload() app_profile.delete(ignore_warnings=True) - # [END bigtable_delete_app_profile] + # [END bigtable_api_delete_app_profile] assert not app_profile.exists() def test_bigtable_list_instances(): - # [START bigtable_list_instances] + # [START bigtable_api_list_instances] from google.cloud.bigtable import Client client = Client(admin=True) (instances_list, failed_locations_list) = client.list_instances() - # [END bigtable_list_instances] + # [END bigtable_api_list_instances] assert len(instances_list) > 0 def test_bigtable_list_clusters_on_instance(): - # [START bigtable_list_clusters_on_instance] + # [START bigtable_api_list_clusters_on_instance] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) (clusters_list, failed_locations_list) = instance.list_clusters() - # [END bigtable_list_clusters_on_instance] + # [END bigtable_api_list_clusters_on_instance] assert len(clusters_list) > 0 def test_bigtable_list_clusters_in_project(): - # [START bigtable_list_clusters_in_project] + # [START bigtable_api_list_clusters_in_project] from google.cloud.bigtable import Client client = Client(admin=True) (clusters_list, failed_locations_list) = client.list_clusters() - # [END bigtable_list_clusters_in_project] + # [END bigtable_api_list_clusters_in_project] assert len(clusters_list) > 0 @@ -308,14 +308,14 @@ def test_bigtable_list_app_profiles(): ) app_profile = app_profile.create(ignore_warnings=True) - # [START bigtable_list_app_profiles] + # [START bigtable_api_list_app_profiles] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) app_profiles_list = instance.list_app_profiles() - # [END bigtable_list_app_profiles] + # [END bigtable_api_list_app_profiles] try: assert len(app_profiles_list) > 0 @@ -324,57 +324,57 @@ def test_bigtable_list_app_profiles(): def test_bigtable_instance_exists(): - # [START bigtable_check_instance_exists] + # [START bigtable_api_check_instance_exists] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance_exists = instance.exists() - # [END bigtable_check_instance_exists] + # [END bigtable_api_check_instance_exists] assert instance_exists def test_bigtable_cluster_exists(): - # [START bigtable_check_cluster_exists] + # [START bigtable_api_check_cluster_exists] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) cluster = instance.cluster(CLUSTER_ID) cluster_exists = cluster.exists() - # [END bigtable_check_cluster_exists] + # [END bigtable_api_check_cluster_exists] assert cluster_exists def test_bigtable_reload_instance(): - # [START bigtable_reload_instance] + # [START bigtable_api_reload_instance] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance.reload() - # [END bigtable_reload_instance] + # [END bigtable_api_reload_instance] assert instance.type_ == PRODUCTION.value def test_bigtable_reload_cluster(): - # [START bigtable_reload_cluster] + # [START bigtable_api_reload_cluster] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) cluster = instance.cluster(CLUSTER_ID) cluster.reload() - # [END bigtable_reload_cluster] + # [END bigtable_api_reload_cluster] assert cluster.serve_nodes == SERVER_NODES def test_bigtable_update_instance(): - # [START bigtable_update_instance] + # [START bigtable_api_update_instance] from google.cloud.bigtable import Client client = Client(admin=True) @@ -382,13 +382,13 @@ def test_bigtable_update_instance(): display_name = "My new instance" instance.display_name = display_name instance.update() - # [END bigtable_update_instance] + # [END bigtable_api_update_instance] assert instance.display_name == display_name def test_bigtable_update_cluster(): - # [START bigtable_update_cluster] + # [START bigtable_api_update_cluster] from google.cloud.bigtable import Client client = Client(admin=True) @@ -396,13 +396,13 @@ def test_bigtable_update_cluster(): cluster = instance.cluster(CLUSTER_ID) cluster.serve_nodes = 4 cluster.update() - # [END bigtable_update_cluster] + # [END bigtable_api_update_cluster] assert cluster.serve_nodes == 4 def test_bigtable_create_table(): - # [START bigtable_create_table] + # [START bigtable_api_create_table] from google.api_core import exceptions from google.api_core import retry from google.cloud.bigtable import Client @@ -420,7 +420,7 @@ def test_bigtable_create_table(): retry_504 = retry.Retry(predicate_504) retry_504(table.create)(column_families={"cf1": max_versions_rule}) - # [END bigtable_create_table] + # [END bigtable_api_create_table] try: assert table.exists() @@ -430,13 +430,13 @@ def test_bigtable_create_table(): def test_bigtable_list_tables(): - # [START bigtable_list_tables] + # [START bigtable_api_list_tables] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) tables_list = instance.list_tables() - # [END bigtable_list_tables] + # [END bigtable_api_list_tables] # Check if returned list has expected table table_names = [table.name for table in tables_list] @@ -460,7 +460,7 @@ def test_bigtable_delete_cluster(): # We want to make sure the operation completes. operation.result(timeout=1000) - # [START bigtable_delete_cluster] + # [START bigtable_api_delete_cluster] from google.cloud.bigtable import Client client = Client(admin=True) @@ -468,7 +468,7 @@ def test_bigtable_delete_cluster(): cluster_to_delete = instance.cluster(cluster_id) cluster_to_delete.delete() - # [END bigtable_delete_cluster] + # [END bigtable_api_delete_cluster] assert not cluster_to_delete.exists() @@ -495,14 +495,14 @@ def test_bigtable_delete_instance(): # Make sure this instance gets deleted after the test case. INSTANCES_TO_DELETE.append(instance) - # [START bigtable_delete_instance] + # [START bigtable_api_delete_instance] from google.cloud.bigtable import Client client = Client(admin=True) instance_to_delete = client.instance(instance_id) instance_to_delete.delete() - # [END bigtable_delete_instance] + # [END bigtable_api_delete_instance] assert not instance_to_delete.exists() @@ -511,7 +511,7 @@ def test_bigtable_delete_instance(): def test_bigtable_test_iam_permissions(): - # [START bigtable_test_iam_permissions] + # [START bigtable_api_test_iam_permissions] from google.cloud.bigtable import Client client = Client(admin=True) @@ -519,7 +519,7 @@ def test_bigtable_test_iam_permissions(): instance.reload() permissions = ["bigtable.clusters.create", "bigtable.tables.create"] permissions_allowed = instance.test_iam_permissions(permissions) - # [END bigtable_test_iam_permissions] + # [END bigtable_api_test_iam_permissions] assert permissions_allowed == permissions @@ -527,7 +527,7 @@ def test_bigtable_test_iam_permissions(): def test_bigtable_set_iam_policy_then_get_iam_policy(): service_account_email = Config.CLIENT._credentials.service_account_email - # [START bigtable_set_iam_policy] + # [START bigtable_api_set_iam_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -539,17 +539,17 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] policy_latest = instance.set_iam_policy(new_policy) - # [END bigtable_set_iam_policy] + # [END bigtable_api_set_iam_policy] assert len(policy_latest.bigtable_admins) > 0 - # [START bigtable_get_iam_policy] + # [START bigtable_api_get_iam_policy] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) policy = instance.get_iam_policy() - # [END bigtable_get_iam_policy] + # [END bigtable_api_get_iam_policy] assert len(policy.bigtable_admins) > 0 @@ -557,45 +557,45 @@ def test_bigtable_set_iam_policy_then_get_iam_policy(): def test_bigtable_project_path(): import re - # [START bigtable_project_path] + # [START bigtable_api_project_path] from google.cloud.bigtable import Client client = Client(admin=True) project_path = client.project_path - # [END bigtable_project_path] + # [END bigtable_api_project_path] def test_bigtable_table_data_client(): - # [START bigtable_table_data_client] + # [START bigtable_api_table_data_client] from google.cloud.bigtable import Client client = Client(admin=True) table_data_client = client.table_data_client - # [END bigtable_table_data_client] + # [END bigtable_api_table_data_client] def test_bigtable_table_admin_client(): - # [START bigtable_table_admin_client] + # [START bigtable_api_table_admin_client] from google.cloud.bigtable import Client client = Client(admin=True) table_admin_client = client.table_admin_client - # [END bigtable_table_admin_client] + # [END bigtable_api_table_admin_client] def test_bigtable_instance_admin_client(): - # [START bigtable_instance_admin_client] + # [START bigtable_api_instance_admin_client] from google.cloud.bigtable import Client client = Client(admin=True) instance_admin_client = client.instance_admin_client - # [END bigtable_instance_admin_client] + # [END bigtable_api_instance_admin_client] def test_bigtable_admins_policy(): service_account_email = Config.CLIENT._credentials.service_account_email - # [START bigtable_admins_policy] + # [START bigtable_api_admins_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -608,7 +608,7 @@ def test_bigtable_admins_policy(): policy_latest = instance.set_iam_policy(new_policy) policy = policy_latest.bigtable_admins - # [END bigtable_admins_policy] + # [END bigtable_api_admins_policy] assert len(policy) > 0 @@ -616,7 +616,7 @@ def test_bigtable_admins_policy(): def test_bigtable_readers_policy(): service_account_email = Config.CLIENT._credentials.service_account_email - # [START bigtable_readers_policy] + # [START bigtable_api_readers_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE @@ -629,7 +629,7 @@ def test_bigtable_readers_policy(): policy_latest = instance.set_iam_policy(new_policy) policy = policy_latest.bigtable_readers - # [END bigtable_readers_policy] + # [END bigtable_api_readers_policy] assert len(policy) > 0 @@ -637,7 +637,7 @@ def test_bigtable_readers_policy(): def test_bigtable_users_policy(): service_account_email = Config.CLIENT._credentials.service_account_email - # [START bigtable_users_policy] + # [START bigtable_api_users_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE @@ -650,7 +650,7 @@ def test_bigtable_users_policy(): policy_latest = instance.set_iam_policy(new_policy) policy = policy_latest.bigtable_users - # [END bigtable_users_policy] + # [END bigtable_api_users_policy] assert len(policy) > 0 @@ -658,7 +658,7 @@ def test_bigtable_users_policy(): def test_bigtable_viewers_policy(): service_account_email = Config.CLIENT._credentials.service_account_email - # [START bigtable_viewers_policy] + # [START bigtable_api_viewers_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE @@ -671,7 +671,7 @@ def test_bigtable_viewers_policy(): policy_latest = instance.set_iam_policy(new_policy) policy = policy_latest.bigtable_viewers - # [END bigtable_viewers_policy] + # [END bigtable_api_viewers_policy] assert len(policy) > 0 @@ -679,30 +679,30 @@ def test_bigtable_viewers_policy(): def test_bigtable_instance_name(): import re - # [START bigtable_instance_name] + # [START bigtable_api_instance_name] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance_name = instance.name - # [END bigtable_instance_name] + # [END bigtable_api_instance_name] def test_bigtable_cluster_name(): import re - # [START bigtable_cluster_name] + # [START bigtable_api_cluster_name] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) cluster = instance.cluster(CLUSTER_ID) cluster_name = cluster.name - # [END bigtable_cluster_name] + # [END bigtable_api_cluster_name] def test_bigtable_instance_from_pb(): - # [START bigtable_instance_from_pb] + # [START bigtable_api_instance_from_pb] from google.cloud.bigtable import Client from google.cloud.bigtable_admin_v2.types import instance_pb2 @@ -715,13 +715,13 @@ def test_bigtable_instance_from_pb(): ) instance2 = instance.from_pb(instance_pb, client) - # [END bigtable_instance_from_pb] + # [END bigtable_api_instance_from_pb] assert instance2.name == instance.name def test_bigtable_cluster_from_pb(): - # [START bigtable_cluster_from_pb] + # [START bigtable_api_cluster_from_pb] from google.cloud.bigtable import Client from google.cloud.bigtable_admin_v2.types import instance_pb2 @@ -741,32 +741,32 @@ def test_bigtable_cluster_from_pb(): ) cluster2 = cluster.from_pb(cluster_pb, instance) - # [END bigtable_cluster_from_pb] + # [END bigtable_api_cluster_from_pb] assert cluster2.name == cluster.name def test_bigtable_instance_state(): - # [START bigtable_instance_state] + # [START bigtable_api_instance_state] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) instance_state = instance.state - # [END bigtable_instance_state] + # [END bigtable_api_instance_state] assert not instance_state def test_bigtable_cluster_state(): - # [START bigtable_cluster_state] + # [START bigtable_api_cluster_state] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) cluster = instance.cluster(CLUSTER_ID) cluster_state = cluster.state - # [END bigtable_cluster_state] + # [END bigtable_api_cluster_state] assert not cluster_state diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index a20918c036a9..4c3304fd0d73 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -112,7 +112,7 @@ def teardown_module(): def test_bigtable_create_table(): - # [START bigtable_create_table] + # [START bigtable_api_create_table] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -129,7 +129,7 @@ def test_bigtable_create_table(): max_versions_rule = column_family.MaxVersionsGCRule(2) table2.create(column_families={"cf1": max_versions_rule}) - # [END bigtable_create_table] + # [END bigtable_api_create_table] assert table1.exists() assert table2.exists() table1.delete() @@ -142,7 +142,7 @@ def test_bigtable_sample_row_keys(): table_sample.create(initial_split_keys=initial_split_keys) assert table_sample.exists() - # [START bigtable_sample_row_keys] + # [START bigtable_api_sample_row_keys] from google.cloud.bigtable import Client client = Client(admin=True) @@ -151,14 +151,14 @@ def test_bigtable_sample_row_keys(): table = instance.table("table_id1_samplerow") data = table.sample_row_keys() actual_keys, offset = zip(*[(rk.row_key, rk.offset_bytes) for rk in data]) - # [END bigtable_sample_row_keys] + # [END bigtable_api_sample_row_keys] initial_split_keys.append(b"") assert list(actual_keys) == initial_split_keys table.delete() def test_bigtable_write_read_drop_truncate(): - # [START bigtable_mutate_rows] + # [START bigtable_api_mutate_rows] from google.cloud.bigtable import Client client = Client(admin=True) @@ -187,9 +187,9 @@ def test_bigtable_write_read_drop_truncate(): for i, status in enumerate(response): if status.code != 0: print("Row number {} failed to write".format(i)) - # [END bigtable_mutate_rows] + # [END bigtable_api_mutate_rows] assert len(response) == len(rows) - # [START bigtable_read_row] + # [START bigtable_api_read_row] from google.cloud.bigtable import Client client = Client(admin=True) @@ -197,9 +197,9 @@ def test_bigtable_write_read_drop_truncate(): table = instance.table(TABLE_ID) row_key = "row_key_1" row = table.read_row(row_key) - # [END bigtable_read_row] + # [END bigtable_api_read_row] assert row.row_key.decode("utf-8") == row_key - # [START bigtable_read_rows] + # [START bigtable_api_read_rows] from google.cloud.bigtable import Client client = Client(admin=True) @@ -215,9 +215,9 @@ def test_bigtable_write_read_drop_truncate(): cell = row.cells[COLUMN_FAMILY_ID][col_name][0] print(cell.value.decode("utf-8")) total_rows.append(cell) - # [END bigtable_read_rows] + # [END bigtable_api_read_rows] assert len(total_rows) == len(rows) - # [START bigtable_drop_by_prefix] + # [START bigtable_api_drop_by_prefix] from google.cloud.bigtable import Client client = Client(admin=True) @@ -225,19 +225,19 @@ def test_bigtable_write_read_drop_truncate(): table = instance.table(TABLE_ID) row_key_prefix = b"row_key_2" table.drop_by_prefix(row_key_prefix, timeout=200) - # [END bigtable_drop_by_prefix] + # [END bigtable_api_drop_by_prefix] dropped_row_keys = [b"row_key_2", b"row_key_20", b"row_key_22", b"row_key_200"] for row in table.read_rows(): assert row.row_key.decode("utf-8") not in dropped_row_keys - # [START bigtable_truncate_table] + # [START bigtable_api_truncate_table] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table(TABLE_ID) table.truncate(timeout=200) - # [END bigtable_truncate_table] + # [END bigtable_api_truncate_table] rows_data_after_truncate = [] for row in table.read_rows(): rows_data_after_truncate.append(row.row_key) @@ -245,14 +245,14 @@ def test_bigtable_write_read_drop_truncate(): def test_bigtable_mutations_batcher(): - # [START bigtable_mutations_batcher] + # [START bigtable_api_mutations_batcher] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table(TABLE_ID) batcher = table.mutations_batcher() - # [END bigtable_mutations_batcher] + # [END bigtable_api_mutations_batcher] # Below code will be used while creating batcher.py snippets. # So not removing this code as of now. @@ -296,7 +296,7 @@ def test_bigtable_mutations_batcher(): def test_bigtable_table_column_family(): - # [START bigtable_table_column_family] + # [START bigtable_api_table_column_family] from google.cloud.bigtable import Client client = Client(admin=True) @@ -304,26 +304,26 @@ def test_bigtable_table_column_family(): table = instance.table(TABLE_ID) column_family_obj = table.column_family(COLUMN_FAMILY_ID) - # [END bigtable_table_column_family] + # [END bigtable_api_table_column_family] assert column_family_obj.column_family_id == COLUMN_FAMILY_ID def test_bigtable_list_tables(): - # [START bigtable_list_tables] + # [START bigtable_api_list_tables] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) tables_list = instance.list_tables() - # [END bigtable_list_tables] + # [END bigtable_api_list_tables] assert len(tables_list) != 0 def test_bigtable_table_name(): import re - # [START bigtable_table_name] + # [START bigtable_api_table_name] from google.cloud.bigtable import Client client = Client(admin=True) @@ -331,7 +331,7 @@ def test_bigtable_table_name(): table = instance.table(TABLE_ID) table_name = table.name - # [END bigtable_table_name] + # [END bigtable_api_table_name] _table_name_re = re.compile( r"^projects/(?P[^/]+)/" r"instances/(?P[^/]+)/tables/" @@ -341,7 +341,7 @@ def test_bigtable_table_name(): def test_bigtable_list_column_families(): - # [START bigtable_list_column_families] + # [START bigtable_api_list_column_families] from google.cloud.bigtable import Client client = Client(admin=True) @@ -349,13 +349,13 @@ def test_bigtable_list_column_families(): table = instance.table(TABLE_ID) column_family_list = table.list_column_families() - # [END bigtable_list_column_families] + # [END bigtable_api_list_column_families] assert len(column_family_list) > 0 def test_bigtable_get_cluster_states(): - # [START bigtable_get_cluster_states] + # [START bigtable_api_get_cluster_states] from google.cloud.bigtable import Client client = Client(admin=True) @@ -363,7 +363,7 @@ def test_bigtable_get_cluster_states(): table = instance.table(TABLE_ID) get_cluster_states = table.get_cluster_states() - # [END bigtable_get_cluster_states] + # [END bigtable_api_get_cluster_states] assert CLUSTER_ID in get_cluster_states @@ -373,7 +373,7 @@ def test_bigtable_table_test_iam_permissions(): table_policy.create() assert table_policy.exists - # [START bigtable_table_test_iam_permissions] + # [START bigtable_api_table_test_iam_permissions] from google.cloud.bigtable import Client client = Client(admin=True) @@ -382,7 +382,7 @@ def test_bigtable_table_test_iam_permissions(): permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] permissions_allowed = table.test_iam_permissions(permissions) - # [END bigtable_table_test_iam_permissions] + # [END bigtable_api_table_test_iam_permissions] assert permissions_allowed == permissions @@ -391,7 +391,7 @@ def test_bigtable_table_set_iam_policy_then_get_iam_policy(): assert table_policy.exists service_account_email = Config.CLIENT._credentials.service_account_email - # [START bigtable_table_set_iam_policy] + # [START bigtable_api_table_set_iam_policy] from google.cloud.bigtable import Client from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -403,29 +403,29 @@ def test_bigtable_table_set_iam_policy_then_get_iam_policy(): new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] policy_latest = table.set_iam_policy(new_policy) - # [END bigtable_table_set_iam_policy] + # [END bigtable_api_table_set_iam_policy] assert len(policy_latest.bigtable_admins) > 0 - # [START bigtable_table_get_iam_policy] + # [START bigtable_api_table_get_iam_policy] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table("table_id_iam_policy") policy = table.get_iam_policy() - # [END bigtable_table_get_iam_policy] + # [END bigtable_api_table_get_iam_policy] assert len(policy.bigtable_admins) > 0 def test_bigtable_table_exists(): - # [START bigtable_check_table_exists] + # [START bigtable_api_check_table_exists] from google.cloud.bigtable import Client client = Client(admin=True) instance = client.instance(INSTANCE_ID) table = instance.table(TABLE_ID) table_exists = table.exists() - # [END bigtable_check_table_exists] + # [END bigtable_api_check_table_exists] assert table_exists @@ -434,7 +434,7 @@ def test_bigtable_delete_table(): table_del.create() assert table_del.exists() - # [START bigtable_delete_table] + # [START bigtable_api_delete_table] from google.cloud.bigtable import Client client = Client(admin=True) @@ -442,12 +442,12 @@ def test_bigtable_delete_table(): table = instance.table("table_id_del") table.delete() - # [END bigtable_delete_table] + # [END bigtable_api_delete_table] assert not table.exists() def test_bigtable_table_row(): - # [START bigtable_table_row] + # [START bigtable_api_table_row] from google.cloud.bigtable import Client client = Client(admin=True) @@ -457,7 +457,7 @@ def test_bigtable_table_row(): row_keys = [b"row_key_1", b"row_key_2"] row1_obj = table.row(row_keys[0]) row2_obj = table.row(row_keys[1]) - # [END bigtable_table_row] + # [END bigtable_api_table_row] row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) row1_obj.commit() @@ -474,7 +474,7 @@ def test_bigtable_table_row(): def test_bigtable_table_append_row(): - # [START bigtable_table_append_row] + # [START bigtable_api_table_append_row] from google.cloud.bigtable import Client client = Client(admin=True) @@ -484,7 +484,7 @@ def test_bigtable_table_append_row(): row_keys = [b"row_key_1", b"row_key_2"] row1_obj = table.append_row(row_keys[0]) row2_obj = table.append_row(row_keys[1]) - # [END bigtable_table_append_row] + # [END bigtable_api_table_append_row] row1_obj.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) row1_obj.commit() @@ -501,7 +501,7 @@ def test_bigtable_table_append_row(): def test_bigtable_table_direct_row(): - # [START bigtable_table_direct_row] + # [START bigtable_api_table_direct_row] from google.cloud.bigtable import Client client = Client(admin=True) @@ -511,7 +511,7 @@ def test_bigtable_table_direct_row(): row_keys = [b"row_key_1", b"row_key_2"] row1_obj = table.direct_row(row_keys[0]) row2_obj = table.direct_row(row_keys[1]) - # [END bigtable_table_direct_row] + # [END bigtable_api_table_direct_row] row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1) row1_obj.commit() @@ -528,7 +528,7 @@ def test_bigtable_table_direct_row(): def test_bigtable_table_conditional_row(): - # [START bigtable_table_conditional_row] + # [START bigtable_api_table_conditional_row] from google.cloud.bigtable import Client from google.cloud.bigtable.row_filters import PassAllFilter @@ -540,7 +540,7 @@ def test_bigtable_table_conditional_row(): filter_ = PassAllFilter(True) row1_obj = table.conditional_row(row_keys[0], filter_=filter_) row2_obj = table.conditional_row(row_keys[1], filter_=filter_) - # [END bigtable_table_conditional_row] + # [END bigtable_api_table_conditional_row] row1_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, CELL_VAL1, state=False) row1_obj.commit() @@ -557,7 +557,7 @@ def test_bigtable_table_conditional_row(): def test_bigtable_column_family_name(): - # [START bigtable_column_family_name] + # [START bigtable_api_column_family_name] from google.cloud.bigtable import Client client = Client(admin=True) @@ -567,7 +567,7 @@ def test_bigtable_column_family_name(): column_families = table.list_column_families() column_family_obj = column_families[COLUMN_FAMILY_ID] column_family_name = column_family_obj.name - # [END bigtable_column_family_name] + # [END bigtable_api_column_family_name] import re _cf_name_re = re.compile( @@ -580,7 +580,7 @@ def test_bigtable_column_family_name(): def test_bigtable_create_update_delete_column_family(): - # [START bigtable_create_column_family] + # [START bigtable_api_create_column_family] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -593,11 +593,11 @@ def test_bigtable_create_update_delete_column_family(): column_family_obj = table.column_family(column_family_id, gc_rule=gc_rule) column_family_obj.create() - # [END bigtable_create_column_family] + # [END bigtable_api_create_column_family] column_families = table.list_column_families() assert column_families[column_family_id].gc_rule == gc_rule - # [START bigtable_update_column_family] + # [START bigtable_api_update_column_family] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -611,12 +611,12 @@ def test_bigtable_create_update_delete_column_family(): max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) column_family_obj = table.column_family(column_family_id, gc_rule=max_age_rule) column_family_obj.update() - # [END bigtable_update_column_family] + # [END bigtable_api_update_column_family] updated_families = table.list_column_families() assert updated_families[column_family_id].gc_rule == max_age_rule - # [START bigtable_delete_column_family] + # [START bigtable_api_delete_column_family] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -627,7 +627,7 @@ def test_bigtable_create_update_delete_column_family(): column_family_id = "column_family_id1" column_family_obj = table.column_family(column_family_id) column_family_obj.delete() - # [END bigtable_delete_column_family] + # [END bigtable_api_delete_column_family] column_families = table.list_column_families() assert column_family_id not in column_families @@ -652,7 +652,7 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): rows.append(row) Config.TABLE.mutate_rows(rows) - # [START bigtable_add_row_key] + # [START bigtable_api_add_row_key] from google.cloud.bigtable import Client from google.cloud.bigtable.row_set import RowSet @@ -662,14 +662,14 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): row_set = RowSet() row_set.add_row_key(b"row_key_5") - # [END bigtable_add_row_key] + # [END bigtable_api_add_row_key] read_rows = table.read_rows(row_set=row_set) expected_row_keys = [b"row_key_5"] found_row_keys = [row.row_key for row in read_rows] assert found_row_keys == expected_row_keys - # [START bigtable_add_row_range] + # [START bigtable_api_add_row_range] from google.cloud.bigtable import Client from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange @@ -680,14 +680,14 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): row_set = RowSet() row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) - # [END bigtable_add_row_range] + # [END bigtable_api_add_row_range] read_rows = table.read_rows(row_set=row_set) expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"] found_row_keys = [row.row_key for row in read_rows] assert found_row_keys == expected_row_keys - # [START bigtable_row_range_from_keys] + # [START bigtable_api_row_range_from_keys] from google.cloud.bigtable import Client from google.cloud.bigtable.row_set import RowSet @@ -697,7 +697,7 @@ def test_bigtable_add_row_add_row_range_add_row_range_from_keys(): row_set = RowSet() row_set.add_row_range_from_keys(start_key=b"row_key_3", end_key=b"row_key_7") - # [END bigtable_row_range_from_keys] + # [END bigtable_api_row_range_from_keys] read_rows = table.read_rows(row_set=row_set) expected_row_keys = [b"row_key_3", b"row_key_4", b"row_key_5", b"row_key_6"] @@ -722,7 +722,7 @@ def test_bigtable_add_row_range_with_prefix(): rows.append(row) Config.TABLE.mutate_rows(rows) - # [START bigtable_add_row_range_with_prefix] + # [START bigtable_api_add_row_range_with_prefix] from google.cloud.bigtable import Client from google.cloud.bigtable.row_set import RowSet @@ -732,7 +732,7 @@ def test_bigtable_add_row_range_with_prefix(): row_set = RowSet() row_set.add_row_range_with_prefix("row") - # [END bigtable_add_row_range_with_prefix] + # [END bigtable_api_add_row_range_with_prefix] read_rows = table.read_rows(row_set=row_set) expected_row_keys = [ @@ -746,7 +746,7 @@ def test_bigtable_add_row_range_with_prefix(): def test_bigtable_batcher_mutate_flush_mutate_rows(): - # [START bigtable_batcher_mutate] + # [START bigtable_api_batcher_mutate] from google.cloud.bigtable import Client client = Client(admin=True) @@ -766,9 +766,9 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): # reaches the max_row_bytes batcher.mutate(row) batcher.flush() - # [END bigtable_batcher_mutate] + # [END bigtable_api_batcher_mutate] - # [START bigtable_batcher_flush] + # [START bigtable_api_batcher_flush] from google.cloud.bigtable import Client client = Client(admin=True) @@ -786,7 +786,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): # reaches the max_row_bytes batcher.mutate(row) batcher.flush() - # [END bigtable_batcher_flush] + # [END bigtable_api_batcher_flush] rows_on_table = [] for row in table.read_rows(): @@ -794,7 +794,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): assert len(rows_on_table) == 2 table.truncate(timeout=200) - # [START bigtable_batcher_mutate_rows] + # [START bigtable_api_batcher_mutate_rows] from google.cloud.bigtable import Client client = Client(admin=True) @@ -818,7 +818,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): # reaches the max flush_count # Manually send the current batch to Cloud Bigtable batcher.flush() - # [END bigtable_batcher_mutate_rows] + # [END bigtable_api_batcher_mutate_rows] rows_on_table = [] for row in table.read_rows(): @@ -828,7 +828,7 @@ def test_bigtable_batcher_mutate_flush_mutate_rows(): def test_bigtable_create_family_gc_max_age(): - # [START bigtable_create_family_gc_max_age] + # [START bigtable_api_create_family_gc_max_age] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -842,7 +842,7 @@ def test_bigtable_create_family_gc_max_age(): column_family_obj = table.column_family("cf1", max_age_rule) column_family_obj.create() - # [END bigtable_create_family_gc_max_age] + # [END bigtable_api_create_family_gc_max_age] rule = str(column_family_obj.to_pb()) assert "max_age" in rule assert "seconds: 432000" in rule @@ -850,7 +850,7 @@ def test_bigtable_create_family_gc_max_age(): def test_bigtable_create_family_gc_max_versions(): - # [START bigtable_create_family_gc_max_versions] + # [START bigtable_api_create_family_gc_max_versions] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -864,14 +864,14 @@ def test_bigtable_create_family_gc_max_versions(): column_family_obj = table.column_family("cf2", max_versions_rule) column_family_obj.create() - # [END bigtable_create_family_gc_max_versions] + # [END bigtable_api_create_family_gc_max_versions] rule = str(column_family_obj.to_pb()) assert "max_num_versions: 2" in rule column_family_obj.delete() def test_bigtable_create_family_gc_union(): - # [START bigtable_create_family_gc_union] + # [START bigtable_api_create_family_gc_union] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -887,7 +887,7 @@ def test_bigtable_create_family_gc_union(): column_family_obj = table.column_family("cf3", union_rule) column_family_obj.create() - # [END bigtable_create_family_gc_union] + # [END bigtable_api_create_family_gc_union] rule = str(column_family_obj.to_pb()) assert "union" in rule assert "max_age" in rule @@ -897,7 +897,7 @@ def test_bigtable_create_family_gc_union(): def test_bigtable_create_family_gc_intersection(): - # [START bigtable_create_family_gc_intersection] + # [START bigtable_api_create_family_gc_intersection] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -915,7 +915,7 @@ def test_bigtable_create_family_gc_intersection(): column_family_obj = table.column_family("cf4", intersection_rule) column_family_obj.create() - # [END bigtable_create_family_gc_intersection] + # [END bigtable_api_create_family_gc_intersection] rule = str(column_family_obj.to_pb()) assert "intersection" in rule @@ -926,7 +926,7 @@ def test_bigtable_create_family_gc_intersection(): def test_bigtable_create_family_gc_nested(): - # [START bigtable_create_family_gc_nested] + # [START bigtable_api_create_family_gc_nested] from google.cloud.bigtable import Client from google.cloud.bigtable import column_family @@ -953,7 +953,7 @@ def test_bigtable_create_family_gc_nested(): column_family_obj = table.column_family("cf5", nested_rule) column_family_obj.create() - # [END bigtable_create_family_gc_nested] + # [END bigtable_api_create_family_gc_nested] rule = str(column_family_obj.to_pb()) assert "intersection" in rule @@ -977,7 +977,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): ) row.commit() - # [START bigtable_row_data_cells] + # [START bigtable_api_row_data_cells] from google.cloud.bigtable import Client client = Client(admin=True) @@ -987,12 +987,12 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): row_data = table.read_row(row_key) cells = row_data.cells - # [END bigtable_row_data_cells] + # [END bigtable_api_row_data_cells] actual_cell_value = cells[COLUMN_FAMILY_ID][COL_NAME1][0].value assert actual_cell_value == value - # [START bigtable_row_cell_value] + # [START bigtable_api_row_cell_value] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1002,10 +1002,10 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): row_data = table.read_row(row_key) cell_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) - # [END bigtable_row_cell_value] + # [END bigtable_api_row_cell_value] assert cell_value == value - # [START bigtable_row_cell_values] + # [START bigtable_api_row_cell_values] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1015,7 +1015,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): row_data = table.read_row(row_key) cell_values = row_data.cell_values(COLUMN_FAMILY_ID, COL_NAME1) - # [END bigtable_row_cell_values] + # [END bigtable_api_row_cell_values] for actual_value, timestamp in cell_values: assert actual_value == value @@ -1024,7 +1024,7 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): row.set_cell(COLUMN_FAMILY_ID, COL_NAME2, value2) row.commit() - # [START bigtable_row_find_cells] + # [START bigtable_api_row_find_cells] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1034,14 +1034,14 @@ def test_bigtable_row_data_cells_cell_value_cell_values(): row = table.read_row(row_key) cells = row.find_cells(COLUMN_FAMILY_ID, COL_NAME2) - # [END bigtable_row_find_cells] + # [END bigtable_api_row_find_cells] assert cells[0].value == value2 table.truncate(timeout=200) def test_bigtable_row_setcell_rowkey(): - # [START bigtable_row_set_cell] + # [START bigtable_api_row_set_cell] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1053,14 +1053,14 @@ def test_bigtable_row_setcell_rowkey(): row.set_cell( COLUMN_FAMILY_ID, COL_NAME1, cell_val, timestamp=datetime.datetime.utcnow() ) - # [END bigtable_row_set_cell] + # [END bigtable_api_row_set_cell] response = table.mutate_rows([row]) # validate that all rows written successfully for i, status in enumerate(response): assert status.code == 0 - # [START bigtable_row_row_key] + # [START bigtable_api_row_row_key] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1069,10 +1069,10 @@ def test_bigtable_row_setcell_rowkey(): row = table.row(ROW_KEY1) row_key = row.row_key - # [END bigtable_row_row_key] + # [END bigtable_api_row_row_key] assert row_key == ROW_KEY1 - # [START bigtable_row_table] + # [START bigtable_api_row_table] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1081,7 +1081,7 @@ def test_bigtable_row_setcell_rowkey(): row = table.row(ROW_KEY1) table1 = row.table - # [END bigtable_row_table] + # [END bigtable_api_row_table] assert table1 == table table.truncate(timeout=200) @@ -1097,7 +1097,7 @@ def test_bigtable_row_delete(): written_row_keys.append(row.row_key) assert written_row_keys == [b"row_key_1"] - # [START bigtable_row_delete] + # [START bigtable_api_row_delete] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1109,7 +1109,7 @@ def test_bigtable_row_delete(): row_obj.delete() row_obj.commit() - # [END bigtable_row_delete] + # [END bigtable_api_row_delete] written_row_keys = [] for row in table.read_rows(): @@ -1129,7 +1129,7 @@ def test_bigtable_row_delete_cell(): written_row_keys.append(row.row_key) assert written_row_keys == [row_key1] - # [START bigtable_row_delete_cell] + # [START bigtable_api_row_delete_cell] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1141,7 +1141,7 @@ def test_bigtable_row_delete_cell(): row_obj.delete_cell(COLUMN_FAMILY_ID, COL_NAME1) row_obj.commit() - # [END bigtable_row_delete_cell] + # [END bigtable_api_row_delete_cell] for row in table.read_rows(): assert not row.row_key @@ -1162,7 +1162,7 @@ def test_bigtable_row_delete_cells(): written_row_keys.append(row.row_key) assert written_row_keys == [row_key1] - # [START bigtable_row_delete_cells] + # [START bigtable_api_row_delete_cells] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1174,7 +1174,7 @@ def test_bigtable_row_delete_cells(): row_obj.delete_cells(COLUMN_FAMILY_ID, [COL_NAME1, COL_NAME2]) row_obj.commit() - # [END bigtable_row_delete_cells] + # [END bigtable_api_row_delete_cells] for row in table.read_rows(): assert not row.row_key @@ -1188,7 +1188,7 @@ def test_bigtable_row_clear(): mutation_size = row_obj.get_mutations_size() assert mutation_size > 0 - # [START bigtable_row_clear] + # [START bigtable_api_row_clear] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1200,14 +1200,14 @@ def test_bigtable_row_clear(): row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") row_obj.clear() - # [END bigtable_row_clear] + # [END bigtable_api_row_clear] mutation_size = row_obj.get_mutations_size() assert mutation_size == 0 def test_bigtable_row_clear_get_mutations_size(): - # [START bigtable_row_get_mutations_size] + # [START bigtable_api_row_get_mutations_size] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1218,7 +1218,7 @@ def test_bigtable_row_clear_get_mutations_size(): row_obj = table.row(row_key_id) mutation_size = row_obj.get_mutations_size() - # [END bigtable_row_get_mutations_size] + # [END bigtable_api_row_get_mutations_size] row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, b"cell-val") mutation_size = row_obj.get_mutations_size() assert mutation_size > 0 @@ -1229,7 +1229,7 @@ def test_bigtable_row_clear_get_mutations_size(): def test_bigtable_row_setcell_commit_rowkey(): - # [START bigtable_row_set_cell] + # [START bigtable_api_row_set_cell] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1240,10 +1240,10 @@ def test_bigtable_row_setcell_commit_rowkey(): cell_val = b"cell-val" row_obj = table.row(row_key) row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) - # [END bigtable_row_set_cell] + # [END bigtable_api_row_set_cell] row_obj.commit() - # [START bigtable_row_commit] + # [START bigtable_api_row_commit] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1255,7 +1255,7 @@ def test_bigtable_row_setcell_commit_rowkey(): row_obj = table.row(row_key) row_obj.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) row_obj.commit() - # [END bigtable_row_commit] + # [END bigtable_api_row_commit] written_row_keys = [] for row in table.read_rows(): @@ -1263,7 +1263,7 @@ def test_bigtable_row_setcell_commit_rowkey(): assert written_row_keys == [b"row_key_1", b"row_key_2"] - # [START bigtable_row_row_key] + # [START bigtable_api_row_row_key] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1273,7 +1273,7 @@ def test_bigtable_row_setcell_commit_rowkey(): row_key_id = b"row_key_2" row_obj = table.row(row_key_id) row_key = row_obj.row_key - # [END bigtable_row_row_key] + # [END bigtable_api_row_row_key] assert row_key == row_key_id table.truncate(timeout=300) @@ -1285,7 +1285,7 @@ def test_bigtable_row_append_cell_value(): row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val1) row.commit() - # [START bigtable_row_append_cell_value] + # [START bigtable_api_row_append_cell_value] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1295,14 +1295,14 @@ def test_bigtable_row_append_cell_value(): cell_val2 = b"2" row.append_cell_value(COLUMN_FAMILY_ID, COL_NAME1, cell_val2) - # [END bigtable_row_append_cell_value] + # [END bigtable_api_row_append_cell_value] row.commit() row_data = table.read_row(ROW_KEY1) actual_value = row_data.cell_value(COLUMN_FAMILY_ID, COL_NAME1) assert actual_value == cell_val1 + cell_val2 - # [START bigtable_row_commit] + # [START bigtable_api_row_commit] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1312,9 +1312,9 @@ def test_bigtable_row_append_cell_value(): cell_val = 1 row.set_cell(COLUMN_FAMILY_ID, COL_NAME1, cell_val) row.commit() - # [END bigtable_row_commit] + # [END bigtable_api_row_commit] - # [START bigtable_row_increment_cell_value] + # [START bigtable_api_row_increment_cell_value] from google.cloud.bigtable import Client client = Client(admin=True) @@ -1324,7 +1324,7 @@ def test_bigtable_row_append_cell_value(): int_val = 3 row.increment_cell_value(COLUMN_FAMILY_ID, COL_NAME1, int_val) - # [END bigtable_row_increment_cell_value] + # [END bigtable_api_row_increment_cell_value] row.commit() row_data = table.read_row(ROW_KEY2) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py index 8b36eaede4d2..ebf817c4ede0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py @@ -93,8 +93,8 @@ def name(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_app_profile_name] - :end-before: [END bigtable_app_profile_name] + :start-after: [START bigtable_api_app_profile_name] + :end-before: [END bigtable_api_app_profile_name] :dedent: 4 The AppProfile name is of the form @@ -237,8 +237,8 @@ def reload(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_reload_app_profile] - :end-before: [END bigtable_reload_app_profile] + :start-after: [START bigtable_api_reload_app_profile] + :end-before: [END bigtable_api_reload_app_profile] :dedent: 4 """ @@ -254,8 +254,8 @@ def exists(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_app_profile_exists] - :end-before: [END bigtable_app_profile_exists] + :start-after: [START bigtable_api_app_profile_exists] + :end-before: [END bigtable_api_app_profile_exists] :dedent: 4 :rtype: bool @@ -281,8 +281,8 @@ def create(self, ignore_warnings=None): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_create_app_profile] - :end-before: [END bigtable_create_app_profile] + :start-after: [START bigtable_api_create_app_profile] + :end-before: [END bigtable_api_create_app_profile] :dedent: 4 :type: ignore_warnings: bool @@ -313,8 +313,8 @@ def update(self, ignore_warnings=None): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_update_app_profile] - :end-before: [END bigtable_update_app_profile] + :start-after: [START bigtable_api_update_app_profile] + :end-before: [END bigtable_api_update_app_profile] :dedent: 4 """ update_mask_pb = field_mask_pb2.FieldMask() @@ -339,8 +339,8 @@ def delete(self, ignore_warnings=None): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_delete_app_profile] - :end-before: [END bigtable_delete_app_profile] + :start-after: [START bigtable_api_delete_app_profile] + :end-before: [END bigtable_api_delete_app_profile] :dedent: 4 :type: ignore_warnings: bool diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 782cb979ab47..950a198ef182 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -71,8 +71,8 @@ def mutate(self, row): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_batcher_mutate] - :end-before: [END bigtable_batcher_mutate] + :start-after: [START bigtable_api_batcher_mutate] + :end-before: [END bigtable_api_batcher_mutate] :dedent: 4 :type row: class @@ -111,8 +111,8 @@ def mutate_rows(self, rows): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_batcher_mutate_rows] - :end-before: [END bigtable_batcher_mutate_rows] + :start-after: [START bigtable_api_batcher_mutate_rows] + :end-before: [END bigtable_api_batcher_mutate_rows] :dedent: 4 :type rows: list:[`~google.cloud.bigtable.row.DirectRow`] @@ -134,8 +134,8 @@ def flush(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_batcher_flush] - :end-before: [END bigtable_batcher_flush] + :start-after: [START bigtable_api_batcher_flush] + :end-before: [END bigtable_api_batcher_flush] :dedent: 4 """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 2ee6e7c77926..05f3b7761c44 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -208,8 +208,8 @@ def project_path(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_project_path] - :end-before: [END bigtable_project_path] + :start-after: [START bigtable_api_project_path] + :end-before: [END bigtable_api_project_path] :dedent: 4 The project name is of the form @@ -228,8 +228,8 @@ def table_data_client(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_table_data_client] - :end-before: [END bigtable_table_data_client] + :start-after: [START bigtable_api_table_data_client] + :end-before: [END bigtable_api_table_data_client] :dedent: 4 :rtype: :class:`.bigtable_v2.BigtableClient` @@ -249,8 +249,8 @@ def table_admin_client(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_table_admin_client] - :end-before: [END bigtable_table_admin_client] + :start-after: [START bigtable_api_table_admin_client] + :end-before: [END bigtable_api_table_admin_client] :dedent: 4 :rtype: :class:`.bigtable_admin_pb2.BigtableTableAdmin` @@ -276,8 +276,8 @@ def instance_admin_client(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_admin_client] - :end-before: [END bigtable_instance_admin_client] + :start-after: [START bigtable_api_instance_admin_client] + :end-before: [END bigtable_api_instance_admin_client] :dedent: 4 :rtype: :class:`.bigtable_admin_pb2.BigtableInstanceAdmin` @@ -302,8 +302,8 @@ def instance(self, instance_id, display_name=None, instance_type=None, labels=No For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_create_prod_instance] - :end-before: [END bigtable_create_prod_instance] + :start-after: [START bigtable_api_create_prod_instance] + :end-before: [END bigtable_api_create_prod_instance] :dedent: 4 :type instance_id: str @@ -352,8 +352,8 @@ def list_instances(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_list_instances] - :end-before: [END bigtable_list_instances] + :start-after: [START bigtable_api_list_instances] + :end-before: [END bigtable_api_list_instances] :dedent: 4 :rtype: tuple @@ -373,8 +373,8 @@ def list_clusters(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_list_clusters_in_project] - :end-before: [END bigtable_list_clusters_in_project] + :start-after: [START bigtable_api_list_clusters_in_project] + :end-before: [END bigtable_api_list_clusters_in_project] :dedent: 4 :rtype: tuple diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 9048c94f4ab0..1cf66f86bc55 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -97,8 +97,8 @@ def from_pb(cls, cluster_pb, instance): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_cluster_from_pb] - :end-before: [END bigtable_cluster_from_pb] + :start-after: [START bigtable_api_cluster_from_pb] + :end-before: [END bigtable_api_cluster_from_pb] :dedent: 4 :type cluster_pb: :class:`instance_pb2.Cluster` @@ -158,8 +158,8 @@ def name(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_cluster_name] - :end-before: [END bigtable_cluster_name] + :start-after: [START bigtable_api_cluster_name] + :end-before: [END bigtable_api_cluster_name] :dedent: 4 The cluster name is of the form @@ -180,8 +180,8 @@ def state(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_cluster_state] - :end-before: [END bigtable_cluster_state] + :start-after: [START bigtable_api_cluster_state] + :end-before: [END bigtable_api_cluster_state] :dedent: 4 """ @@ -207,8 +207,8 @@ def reload(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_reload_cluster] - :end-before: [END bigtable_reload_cluster] + :start-after: [START bigtable_api_reload_cluster] + :end-before: [END bigtable_api_reload_cluster] :dedent: 4 """ cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name) @@ -223,8 +223,8 @@ def exists(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_check_cluster_exists] - :end-before: [END bigtable_check_cluster_exists] + :start-after: [START bigtable_api_check_cluster_exists] + :end-before: [END bigtable_api_check_cluster_exists] :dedent: 4 :rtype: bool @@ -244,8 +244,8 @@ def create(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_create_cluster] - :end-before: [END bigtable_create_cluster] + :start-after: [START bigtable_api_create_cluster] + :end-before: [END bigtable_api_create_cluster] :dedent: 4 .. note:: @@ -278,8 +278,8 @@ def update(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_update_cluster] - :end-before: [END bigtable_update_cluster] + :start-after: [START bigtable_api_update_cluster] + :end-before: [END bigtable_api_update_cluster] :dedent: 4 .. note:: @@ -311,8 +311,8 @@ def delete(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_delete_cluster] - :end-before: [END bigtable_delete_cluster] + :start-after: [START bigtable_api_delete_cluster] + :end-before: [END bigtable_api_delete_cluster] :dedent: 4 Marks a cluster and all of its tables for permanent deletion in 7 days. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index 0e884fa8919e..eb854cb8b085 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -46,8 +46,8 @@ class MaxVersionsGCRule(GarbageCollectionRule): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_max_versions] - :end-before: [END bigtable_create_family_gc_max_versions] + :start-after: [START bigtable_api_create_family_gc_max_versions] + :end-before: [END bigtable_api_create_family_gc_max_versions] :dedent: 4 :type max_num_versions: int @@ -80,8 +80,8 @@ class MaxAgeGCRule(GarbageCollectionRule): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_max_age] - :end-before: [END bigtable_create_family_gc_max_age] + :start-after: [START bigtable_api_create_family_gc_max_age] + :end-before: [END bigtable_api_create_family_gc_max_age] :dedent: 4 :type max_age: :class:`datetime.timedelta` @@ -115,8 +115,8 @@ class GCRuleUnion(GarbageCollectionRule): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_union] - :end-before: [END bigtable_create_family_gc_union] + :start-after: [START bigtable_api_create_family_gc_union] + :end-before: [END bigtable_api_create_family_gc_union] :dedent: 4 :type rules: list @@ -150,8 +150,8 @@ class GCRuleIntersection(GarbageCollectionRule): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_family_gc_intersection] - :end-before: [END bigtable_create_family_gc_intersection] + :start-after: [START bigtable_api_create_family_gc_intersection] + :end-before: [END bigtable_api_create_family_gc_intersection] :dedent: 4 :type rules: list @@ -214,8 +214,8 @@ def name(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_column_family_name] - :end-before: [END bigtable_column_family_name] + :start-after: [START bigtable_api_column_family_name] + :end-before: [END bigtable_api_column_family_name] :dedent: 4 .. note:: @@ -261,8 +261,8 @@ def create(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_column_family] - :end-before: [END bigtable_create_column_family] + :start-after: [START bigtable_api_create_column_family] + :end-before: [END bigtable_api_create_column_family] :dedent: 4 """ @@ -284,8 +284,8 @@ def update(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_update_column_family] - :end-before: [END bigtable_update_column_family] + :start-after: [START bigtable_api_update_column_family] + :end-before: [END bigtable_api_update_column_family] :dedent: 4 .. note:: @@ -311,8 +311,8 @@ def delete(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_delete_column_family] - :end-before: [END bigtable_delete_column_family] + :start-after: [START bigtable_api_delete_column_family] + :end-before: [END bigtable_api_delete_column_family] :dedent: 4 """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 3656f40a4936..a126ee27a67a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -132,8 +132,8 @@ def from_pb(cls, instance_pb, client): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_from_pb] - :end-before: [END bigtable_instance_from_pb] + :start-after: [START bigtable_api_instance_from_pb] + :end-before: [END bigtable_api_instance_from_pb] :dedent: 4 :type instance_pb: :class:`instance_pb2.Instance` @@ -177,8 +177,8 @@ def name(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_name] - :end-before: [END bigtable_instance_name] + :start-after: [START bigtable_api_instance_name] + :end-before: [END bigtable_api_instance_name] :dedent: 4 The instance name is of the form @@ -199,8 +199,8 @@ def state(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_instance_state] - :end-before: [END bigtable_instance_state] + :start-after: [START bigtable_api_instance_state] + :end-before: [END bigtable_api_instance_state] :dedent: 4 """ @@ -232,8 +232,8 @@ def create( For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_create_prod_instance] - :end-before: [END bigtable_create_prod_instance] + :start-after: [START bigtable_api_create_prod_instance] + :end-before: [END bigtable_api_create_prod_instance] :dedent: 4 .. note:: @@ -333,8 +333,8 @@ def exists(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_check_instance_exists] - :end-before: [END bigtable_check_instance_exists] + :start-after: [START bigtable_api_check_instance_exists] + :end-before: [END bigtable_api_check_instance_exists] :dedent: 4 :rtype: bool @@ -353,8 +353,8 @@ def reload(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_reload_instance] - :end-before: [END bigtable_reload_instance] + :start-after: [START bigtable_api_reload_instance] + :end-before: [END bigtable_api_reload_instance] :dedent: 4 """ instance_pb = self._client.instance_admin_client.get_instance(self.name) @@ -369,8 +369,8 @@ def update(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_update_instance] - :end-before: [END bigtable_update_instance] + :start-after: [START bigtable_api_update_instance] + :end-before: [END bigtable_api_update_instance] :dedent: 4 .. note:: @@ -416,8 +416,8 @@ def delete(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_delete_instance] - :end-before: [END bigtable_delete_instance] + :start-after: [START bigtable_api_delete_instance] + :end-before: [END bigtable_api_delete_instance] :dedent: 4 Marks an instance and all of its tables for permanent deletion @@ -447,8 +447,8 @@ def get_iam_policy(self, requested_policy_version=None): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_get_iam_policy] - :end-before: [END bigtable_get_iam_policy] + :start-after: [START bigtable_api_get_iam_policy] + :end-before: [END bigtable_api_get_iam_policy] :dedent: 4 :type requested_policy_version: int or ``NoneType`` @@ -487,8 +487,8 @@ class `google.cloud.bigtable.policy.Policy` For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_set_iam_policy] - :end-before: [END bigtable_set_iam_policy] + :start-after: [START bigtable_api_set_iam_policy] + :end-before: [END bigtable_api_set_iam_policy] :dedent: 4 :type policy: :class:`google.cloud.bigtable.policy.Policy` @@ -511,8 +511,8 @@ def test_iam_permissions(self, permissions): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_test_iam_permissions] - :end-before: [END bigtable_test_iam_permissions] + :start-after: [START bigtable_api_test_iam_permissions] + :end-before: [END bigtable_api_test_iam_permissions] :dedent: 4 :type permissions: list @@ -541,8 +541,8 @@ def cluster( For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_create_cluster] - :end-before: [END bigtable_create_cluster] + :start-after: [START bigtable_api_create_cluster] + :end-before: [END bigtable_api_create_cluster] :dedent: 4 :type cluster_id: str @@ -585,8 +585,8 @@ def list_clusters(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_list_clusters_on_instance] - :end-before: [END bigtable_list_clusters_on_instance] + :start-after: [START bigtable_api_list_clusters_on_instance] + :end-before: [END bigtable_api_list_clusters_on_instance] :dedent: 4 :rtype: tuple @@ -606,8 +606,8 @@ def table(self, table_id, mutation_timeout=None, app_profile_id=None): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_create_table] - :end-before: [END bigtable_create_table] + :start-after: [START bigtable_api_create_table] + :end-before: [END bigtable_api_create_table] :dedent: 4 :type table_id: str @@ -632,8 +632,8 @@ def list_tables(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_list_tables] - :end-before: [END bigtable_list_tables] + :start-after: [START bigtable_api_list_tables] + :end-before: [END bigtable_api_list_tables] :dedent: 4 :rtype: list of :class:`Table ` @@ -668,8 +668,8 @@ def app_profile( For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_create_app_profile] - :end-before: [END bigtable_create_app_profile] + :start-after: [START bigtable_api_create_app_profile] + :end-before: [END bigtable_api_create_app_profile] :dedent: 4 :type app_profile_id: str @@ -715,8 +715,8 @@ def list_app_profiles(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_list_app_profiles] - :end-before: [END bigtable_list_app_profiles] + :start-after: [START bigtable_api_list_app_profiles] + :end-before: [END bigtable_api_list_app_profiles] :dedent: 4 :rtype: :list:[`~google.cloud.bigtable.app_profile.AppProfile`] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index 1fd7494247d7..f5558b6f0d49 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -104,8 +104,8 @@ def bigtable_admins(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_admins_policy] - :end-before: [END bigtable_admins_policy] + :start-after: [START bigtable_api_admins_policy] + :end-before: [END bigtable_api_admins_policy] :dedent: 4 """ result = set() @@ -122,8 +122,8 @@ def bigtable_readers(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_readers_policy] - :end-before: [END bigtable_readers_policy] + :start-after: [START bigtable_api_readers_policy] + :end-before: [END bigtable_api_readers_policy] :dedent: 4 """ result = set() @@ -140,8 +140,8 @@ def bigtable_users(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_users_policy] - :end-before: [END bigtable_users_policy] + :start-after: [START bigtable_api_users_policy] + :end-before: [END bigtable_api_users_policy] :dedent: 4 """ result = set() @@ -158,8 +158,8 @@ def bigtable_viewers(self): For example: .. literalinclude:: snippets.py - :start-after: [START bigtable_viewers_policy] - :end-before: [END bigtable_viewers_policy] + :start-after: [START bigtable_api_viewers_policy] + :end-before: [END bigtable_api_viewers_policy] :dedent: 4 """ result = set() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 87a2680568de..f3e4231e1fc4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -59,8 +59,8 @@ def row_key(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_row_key] - :end-before: [END bigtable_row_row_key] + :start-after: [START bigtable_api_row_row_key] + :end-before: [END bigtable_api_row_row_key] :dedent: 4 :rtype: bytes @@ -75,8 +75,8 @@ def table(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_table] - :end-before: [END bigtable_row_table] + :start-after: [START bigtable_api_row_table] + :end-before: [END bigtable_api_row_table] :dedent: 4 :rtype: table: :class:`Table ` @@ -300,8 +300,8 @@ def get_mutations_size(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_get_mutations_size] - :end-before: [END bigtable_row_get_mutations_size] + :start-after: [START bigtable_api_row_get_mutations_size] + :end-before: [END bigtable_api_row_get_mutations_size] :dedent: 4 """ @@ -328,8 +328,8 @@ def set_cell(self, column_family_id, column, value, timestamp=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_set_cell] - :end-before: [END bigtable_row_set_cell] + :start-after: [START bigtable_api_row_set_cell] + :end-before: [END bigtable_api_row_set_cell] :dedent: 4 :type column_family_id: str @@ -364,8 +364,8 @@ def delete(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete] - :end-before: [END bigtable_row_delete] + :start-after: [START bigtable_api_row_delete] + :end-before: [END bigtable_api_row_delete] :dedent: 4 """ self._delete(state=None) @@ -383,8 +383,8 @@ def delete_cell(self, column_family_id, column, time_range=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cell] - :end-before: [END bigtable_row_delete_cell] + :start-after: [START bigtable_api_row_delete_cell] + :end-before: [END bigtable_api_row_delete_cell] :dedent: 4 :type column_family_id: str @@ -417,8 +417,8 @@ def delete_cells(self, column_family_id, columns, time_range=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cells] - :end-before: [END bigtable_row_delete_cells] + :start-after: [START bigtable_api_row_delete_cells] + :end-before: [END bigtable_api_row_delete_cells] :dedent: 4 :type column_family_id: str @@ -453,8 +453,8 @@ def commit(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_commit] - :end-before: [END bigtable_row_commit] + :start-after: [START bigtable_api_row_commit] + :end-before: [END bigtable_api_row_commit] :dedent: 4 :rtype: :class:`~google.rpc.status_pb2.Status` @@ -475,8 +475,8 @@ def clear(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_clear] - :end-before: [END bigtable_row_clear] + :start-after: [START bigtable_api_row_clear] + :end-before: [END bigtable_api_row_clear] :dedent: 4 """ del self._pb_mutations[:] @@ -567,8 +567,8 @@ def commit(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_commit] - :end-before: [END bigtable_row_commit] + :start-after: [START bigtable_api_row_commit] + :end-before: [END bigtable_api_row_commit] :dedent: 4 :rtype: bool @@ -621,8 +621,8 @@ def set_cell(self, column_family_id, column, value, timestamp=None, state=True): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_set_cell] - :end-before: [END bigtable_row_set_cell] + :start-after: [START bigtable_api_row_set_cell] + :end-before: [END bigtable_api_row_set_cell] :dedent: 4 :type column_family_id: str @@ -663,8 +663,8 @@ def delete(self, state=True): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete] - :end-before: [END bigtable_row_delete] + :start-after: [START bigtable_api_row_delete] + :end-before: [END bigtable_api_row_delete] :dedent: 4 :type state: bool @@ -686,8 +686,8 @@ def delete_cell(self, column_family_id, column, time_range=None, state=True): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cell] - :end-before: [END bigtable_row_delete_cell] + :start-after: [START bigtable_api_row_delete_cell] + :end-before: [END bigtable_api_row_delete_cell] :dedent: 4 :type column_family_id: str @@ -724,8 +724,8 @@ def delete_cells(self, column_family_id, columns, time_range=None, state=True): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_delete_cells] - :end-before: [END bigtable_row_delete_cells] + :start-after: [START bigtable_api_row_delete_cells] + :end-before: [END bigtable_api_row_delete_cells] :dedent: 4 :type column_family_id: str @@ -759,8 +759,8 @@ def clear(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_clear] - :end-before: [END bigtable_row_clear] + :start-after: [START bigtable_api_row_clear] + :end-before: [END bigtable_api_row_clear] :dedent: 4 """ del self._true_pb_mutations[:] @@ -798,8 +798,8 @@ def clear(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_clear] - :end-before: [END bigtable_row_clear] + :start-after: [START bigtable_api_row_clear] + :end-before: [END bigtable_api_row_clear] :dedent: 4 """ del self._rule_pb_list[:] @@ -817,8 +817,8 @@ def append_cell_value(self, column_family_id, column, value): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_append_cell_value] - :end-before: [END bigtable_row_append_cell_value] + :start-after: [START bigtable_api_row_append_cell_value] + :end-before: [END bigtable_api_row_append_cell_value] :dedent: 4 :type column_family_id: str @@ -858,8 +858,8 @@ def increment_cell_value(self, column_family_id, column, int_value): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_increment_cell_value] - :end-before: [END bigtable_row_increment_cell_value] + :start-after: [START bigtable_api_row_increment_cell_value] + :end-before: [END bigtable_api_row_increment_cell_value] :dedent: 4 :type column_family_id: str @@ -905,8 +905,8 @@ def commit(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_commit] - :end-before: [END bigtable_row_commit] + :start-after: [START bigtable_api_row_commit] + :end-before: [END bigtable_api_row_commit] :dedent: 4 :rtype: dict diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 8760d77b0b6d..1cc442f2cb89 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -182,8 +182,8 @@ def cells(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_data_cells] - :end-before: [END bigtable_row_data_cells] + :start-after: [START bigtable_api_row_data_cells] + :end-before: [END bigtable_api_row_data_cells] :dedent: 4 :rtype: dict @@ -209,8 +209,8 @@ def find_cells(self, column_family_id, column): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_find_cells] - :end-before: [END bigtable_row_find_cells] + :start-after: [START bigtable_api_row_find_cells] + :end-before: [END bigtable_api_row_find_cells] :dedent: 4 Args: @@ -247,8 +247,8 @@ def cell_value(self, column_family_id, column, index=0): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_cell_value] - :end-before: [END bigtable_row_cell_value] + :start-after: [START bigtable_api_row_cell_value] + :end-before: [END bigtable_api_row_cell_value] :dedent: 4 Args: @@ -289,8 +289,8 @@ def cell_values(self, column_family_id, column, max_count=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_cell_values] - :end-before: [END bigtable_row_cell_values] + :start-after: [START bigtable_api_row_cell_values] + :end-before: [END bigtable_api_row_cell_values] :dedent: 4 Args: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index e229c805a05e..7697af4f776a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -56,8 +56,8 @@ def add_row_key(self, row_key): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_add_row_key] - :end-before: [END bigtable_add_row_key] + :start-after: [START bigtable_api_add_row_key] + :end-before: [END bigtable_api_add_row_key] :dedent: 4 :type row_key: bytes @@ -71,8 +71,8 @@ def add_row_range(self, row_range): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_add_row_range] - :end-before: [END bigtable_add_row_range] + :start-after: [START bigtable_api_add_row_range] + :end-before: [END bigtable_api_add_row_range] :dedent: 4 :type row_range: class:`RowRange` @@ -88,8 +88,8 @@ def add_row_range_from_keys( For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_row_range_from_keys] - :end-before: [END bigtable_row_range_from_keys] + :start-after: [START bigtable_api_row_range_from_keys] + :end-before: [END bigtable_api_row_range_from_keys] :dedent: 4 :type start_key: bytes @@ -118,8 +118,8 @@ def add_row_range_with_prefix(self, row_key_prefix): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_add_row_range_with_prefix] - :end-before: [END bigtable_add_row_range_with_prefix] + :start-after: [START bigtable_api_add_row_range_with_prefix] + :end-before: [END bigtable_api_add_row_range_with_prefix] :type row_key_prefix: str :param row_key_prefix: To retrieve all rows that start with this row key prefix. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 35ca43d2460b..887b74b0251f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -120,8 +120,8 @@ def name(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_name] - :end-before: [END bigtable_table_name] + :start-after: [START bigtable_api_table_name] + :end-before: [END bigtable_api_table_name] :dedent: 4 .. note:: @@ -149,8 +149,8 @@ def get_iam_policy(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_get_iam_policy] - :end-before: [END bigtable_table_get_iam_policy] + :start-after: [START bigtable_api_table_get_iam_policy] + :end-before: [END bigtable_api_table_get_iam_policy] :dedent: 4 :rtype: :class:`google.cloud.bigtable.policy.Policy` @@ -170,8 +170,8 @@ class `google.cloud.bigtable.policy.Policy` For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_set_iam_policy] - :end-before: [END bigtable_table_set_iam_policy] + :start-after: [START bigtable_api_table_set_iam_policy] + :end-before: [END bigtable_api_table_set_iam_policy] :dedent: 4 :type policy: :class:`google.cloud.bigtable.policy.Policy` @@ -192,8 +192,8 @@ def test_iam_permissions(self, permissions): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_test_iam_permissions] - :end-before: [END bigtable_table_test_iam_permissions] + :start-after: [START bigtable_api_table_test_iam_permissions] + :end-before: [END bigtable_api_table_test_iam_permissions] :dedent: 4 :type permissions: list @@ -220,8 +220,8 @@ def column_family(self, column_family_id, gc_rule=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_column_family] - :end-before: [END bigtable_table_column_family] + :start-after: [START bigtable_api_table_column_family] + :end-before: [END bigtable_api_table_column_family] :dedent: 4 :type column_family_id: str @@ -243,8 +243,8 @@ def row(self, row_key, filter_=None, append=False): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_row] - :end-before: [END bigtable_table_row] + :start-after: [START bigtable_api_table_row] + :end-before: [END bigtable_api_table_row] :dedent: 4 .. warning:: @@ -291,8 +291,8 @@ def append_row(self, row_key): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_append_row] - :end-before: [END bigtable_table_append_row] + :start-after: [START bigtable_api_table_append_row] + :end-before: [END bigtable_api_table_append_row] :dedent: 4 Args: @@ -309,8 +309,8 @@ def direct_row(self, row_key): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_direct_row] - :end-before: [END bigtable_table_direct_row] + :start-after: [START bigtable_api_table_direct_row] + :end-before: [END bigtable_api_table_direct_row] :dedent: 4 Args: @@ -327,8 +327,8 @@ def conditional_row(self, row_key, filter_): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_table_conditional_row] - :end-before: [END bigtable_table_conditional_row] + :start-after: [START bigtable_api_table_conditional_row] + :end-before: [END bigtable_api_table_conditional_row] :dedent: 4 Args: @@ -356,8 +356,8 @@ def create(self, initial_split_keys=[], column_families={}): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_create_table] - :end-before: [END bigtable_create_table] + :start-after: [START bigtable_api_create_table] + :end-before: [END bigtable_api_create_table] :dedent: 4 .. note:: @@ -401,8 +401,8 @@ def exists(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_check_table_exists] - :end-before: [END bigtable_check_table_exists] + :start-after: [START bigtable_api_check_table_exists] + :end-before: [END bigtable_api_check_table_exists] :dedent: 4 :rtype: bool @@ -421,8 +421,8 @@ def delete(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_delete_table] - :end-before: [END bigtable_delete_table] + :start-after: [START bigtable_api_delete_table] + :end-before: [END bigtable_api_delete_table] :dedent: 4 """ table_client = self._instance._client.table_admin_client @@ -434,8 +434,8 @@ def list_column_families(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_list_column_families] - :end-before: [END bigtable_list_column_families] + :start-after: [START bigtable_api_list_column_families] + :end-before: [END bigtable_api_list_column_families] :dedent: 4 :rtype: dict @@ -462,8 +462,8 @@ def get_cluster_states(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_get_cluster_states] - :end-before: [END bigtable_get_cluster_states] + :start-after: [START bigtable_api_get_cluster_states] + :end-before: [END bigtable_api_get_cluster_states] :dedent: 4 :rtype: dict @@ -487,8 +487,8 @@ def read_row(self, row_key, filter_=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_read_row] - :end-before: [END bigtable_read_row] + :start-after: [START bigtable_api_read_row] + :end-before: [END bigtable_api_read_row] :dedent: 4 :type row_key: bytes @@ -527,8 +527,8 @@ def read_rows( For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_read_rows] - :end-before: [END bigtable_read_rows] + :start-after: [START bigtable_api_read_rows] + :end-before: [END bigtable_api_read_rows] :dedent: 4 :type start_key: bytes @@ -631,8 +631,8 @@ def mutate_rows(self, rows, retry=DEFAULT_RETRY, timeout=DEFAULT): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_mutate_rows] - :end-before: [END bigtable_mutate_rows] + :start-after: [START bigtable_api_mutate_rows] + :end-before: [END bigtable_api_mutate_rows] :dedent: 4 The method tries to update all specified rows. @@ -682,8 +682,8 @@ def sample_row_keys(self): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_sample_row_keys] - :end-before: [END bigtable_sample_row_keys] + :start-after: [START bigtable_api_sample_row_keys] + :end-before: [END bigtable_api_sample_row_keys] :dedent: 4 The returned row keys will delimit contiguous sections of the table of @@ -727,8 +727,8 @@ def truncate(self, timeout=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_truncate_table] - :end-before: [END bigtable_truncate_table] + :start-after: [START bigtable_api_truncate_table] + :end-before: [END bigtable_api_truncate_table] :dedent: 4 :type timeout: float @@ -758,8 +758,8 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_drop_by_prefix] - :end-before: [END bigtable_drop_by_prefix] + :start-after: [START bigtable_api_drop_by_prefix] + :end-before: [END bigtable_api_drop_by_prefix] :dedent: 4 :type row_key_prefix: bytes @@ -793,8 +793,8 @@ def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES For example: .. literalinclude:: snippets_table.py - :start-after: [START bigtable_mutations_batcher] - :end-before: [END bigtable_mutations_batcher] + :start-after: [START bigtable_api_mutations_batcher] + :end-before: [END bigtable_api_mutations_batcher] :dedent: 4 :type flush_count: int From 486199f455353a1febb8ea036855c48c7e011875 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 11 Jan 2021 21:15:39 +0100 Subject: [PATCH 385/892] chore(deps): update dependency apache-beam to v2.27.0 (#193) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 6f1bd3ee9aed..481d8fdfb60f 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.26.0 +apache-beam==2.27.0 google-cloud-bigtable==1.6.1 google-cloud-core==1.5.0 \ No newline at end of file From a999931e60d0a3fb6f4c83d49a662f32ecb86c46 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 14 Jan 2021 08:22:02 -0800 Subject: [PATCH 386/892] chore: Re-generated to pick up changes from synthtool. (#191) * chore(python): fix column sizing issue in docs Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Thu Jan 7 11:58:32 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: f15b57ccfd71106c2299e9b89835fe6e55015662 Source-Link: https://github.com/googleapis/synthtool/commit/f15b57ccfd71106c2299e9b89835fe6e55015662 * chore(python): use 'http' in LICENSE Co-authored-by: Tim Swast Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Thu Jan 7 13:05:12 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: 41a4e56982620d3edcf110d76f4fcdfdec471ac8 Source-Link: https://github.com/googleapis/synthtool/commit/41a4e56982620d3edcf110d76f4fcdfdec471ac8 --- packages/google-cloud-bigtable/LICENSE | 7 ++++--- packages/google-cloud-bigtable/docs/_static/custom.css | 7 ++++++- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/LICENSE b/packages/google-cloud-bigtable/LICENSE index a8ee855de2aa..d64569567334 100644 --- a/packages/google-cloud-bigtable/LICENSE +++ b/packages/google-cloud-bigtable/LICENSE @@ -1,6 +1,7 @@ - Apache License + + Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -192,7 +193,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/packages/google-cloud-bigtable/docs/_static/custom.css b/packages/google-cloud-bigtable/docs/_static/custom.css index 0abaf229fce3..bcd37bbd3c4a 100644 --- a/packages/google-cloud-bigtable/docs/_static/custom.css +++ b/packages/google-cloud-bigtable/docs/_static/custom.css @@ -1,4 +1,9 @@ div#python2-eol { border-color: red; border-width: medium; -} \ No newline at end of file +} + +/* Ensure minimum width for 'Parameters' / 'Returns' column */ +dl.field-list > dt { + min-width: 100px +} diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index ff1adf69eee2..24be7e4da03d 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "58485dd159de1b6f78937740bb9ed6b57d3000ee" + "sha": "a6a88838659a728edaadb369a8d10d7992a5679f" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "373861061648b5fe5e0ac4f8a38b32d639ee93e4" + "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "373861061648b5fe5e0ac4f8a38b32d639ee93e4" + "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8" } } ], From 900dc7d4e337e8afbd51707fbcdc44cf55d881f9 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 21 Jan 2021 12:28:58 -0800 Subject: [PATCH 387/892] chore: tweak kokoro config (via synth) (#194) * chore(python): skip docfx in main presubmit * chore(python): skip docfx in main presubmit * fix: properly template the repo name Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Fri Jan 8 10:32:13 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483 Source-Link: https://github.com/googleapis/synthtool/commit/fb53b6fb373b7c3edf4e55f3e8036bc6d73fa483 * chore: add missing quotation mark Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Mon Jan 11 09:43:06 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: 16ec872dd898d7de6e1822badfac32484b5d9031 Source-Link: https://github.com/googleapis/synthtool/commit/16ec872dd898d7de6e1822badfac32484b5d9031 --- packages/google-cloud-bigtable/.kokoro/build.sh | 16 ++++++++++------ .../.kokoro/docs/docs-presubmit.cfg | 11 +++++++++++ packages/google-cloud-bigtable/.trampolinerc | 2 ++ packages/google-cloud-bigtable/synth.metadata | 6 +++--- 4 files changed, 26 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index 124fd6ce9fac..76d9329bad4d 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -15,7 +15,11 @@ set -eo pipefail -cd github/python-bigtable +if [[ -z "${PROJECT_ROOT:-}" ]]; then + PROJECT_ROOT="github/python-bigtable" +fi + +cd "${PROJECT_ROOT}" # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -30,16 +34,16 @@ export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") # Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +python3 -m pip uninstall --yes --quiet nox-automation # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --upgrade --quiet nox +python3 -m nox --version # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then - python3.6 -m nox -s "${NOX_SESSION:-}" + python3 -m nox -s ${NOX_SESSION:-} else - python3.6 -m nox + python3 -m nox fi diff --git a/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg index 1118107829b7..001770ea6f12 100644 --- a/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg +++ b/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg @@ -15,3 +15,14 @@ env_vars: { key: "TRAMPOLINE_IMAGE_UPLOAD" value: "false" } + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/build.sh" +} + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "docs docfx" +} diff --git a/packages/google-cloud-bigtable/.trampolinerc b/packages/google-cloud-bigtable/.trampolinerc index 995ee29111e1..c7d663ae9c57 100644 --- a/packages/google-cloud-bigtable/.trampolinerc +++ b/packages/google-cloud-bigtable/.trampolinerc @@ -18,12 +18,14 @@ required_envvars+=( "STAGING_BUCKET" "V2_STAGING_BUCKET" + "NOX_SESSION" ) # Add env vars which are passed down into the container here. pass_down_envvars+=( "STAGING_BUCKET" "V2_STAGING_BUCKET" + "NOX_SESSION" ) # Prevent unintentional override on the default image. diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 24be7e4da03d..7cf1fe83af32 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "a6a88838659a728edaadb369a8d10d7992a5679f" + "sha": "604dde3aa4aa292d7ec9598917ade7acf1c93f5f" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8" + "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "41a4e56982620d3edcf110d76f4fcdfdec471ac8" + "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" } } ], From a85575328a80ef309fa866c0232ca6e8f7082c05 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 27 Jan 2021 08:32:24 -0800 Subject: [PATCH 388/892] build(python): make `NOX_SESSION` optional (#197) I added this accidentally in #889. `NOX_SESSION` should be passed down if it is set but not marked required. Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Tue Jan 19 09:38:04 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: ba960d730416fe05c50547e975ce79fcee52c671 Source-Link: https://github.com/googleapis/synthtool/commit/ba960d730416fe05c50547e975ce79fcee52c671 --- packages/google-cloud-bigtable/.trampolinerc | 1 - packages/google-cloud-bigtable/synth.metadata | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.trampolinerc b/packages/google-cloud-bigtable/.trampolinerc index c7d663ae9c57..383b6ec89fbc 100644 --- a/packages/google-cloud-bigtable/.trampolinerc +++ b/packages/google-cloud-bigtable/.trampolinerc @@ -18,7 +18,6 @@ required_envvars+=( "STAGING_BUCKET" "V2_STAGING_BUCKET" - "NOX_SESSION" ) # Add env vars which are passed down into the container here. diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 7cf1fe83af32..d148b4e01972 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "604dde3aa4aa292d7ec9598917ade7acf1c93f5f" + "sha": "227482f8fd5ea500b3677348ee3fa25e486a2163" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" + "sha": "ba960d730416fe05c50547e975ce79fcee52c671" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "16ec872dd898d7de6e1822badfac32484b5d9031" + "sha": "ba960d730416fe05c50547e975ce79fcee52c671" } } ], From 4bf4e698aaa710e0282ac2ed5e90506fb84af7bc Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 27 Jan 2021 13:48:39 -0500 Subject: [PATCH 389/892] feat: support filtering on incrementable values (#178) Document that 'regex' may be either bytes or text, and add an explicit test for that. Add 'ExactValueFilter' shortcut, wrapping 'ValueRegexFilter', but also convertin integer values to the equivalent packed 8-octet bytes. Allow integer values for 'ValueRangeFilter', converting them to the equivalent packed 8-octet bytes values. Closes #177. --- .../google/cloud/bigtable/row_filters.py | 32 +++++++- .../tests/unit/test_row_filters.py | 75 +++++++++++++++++-- 2 files changed, 99 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index e8a70a9f4add..973ba9565437 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -14,11 +14,15 @@ """Filters for Google Cloud Bigtable Row classes.""" +import struct + from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +_PACK_I64 = struct.Struct(">q").pack + class RowFilter(object): """Basic filter to apply to cells in a row. @@ -115,7 +119,9 @@ class _RegexFilter(RowFilter): .. _RE2 reference: https://github.com/google/re2/wiki/Syntax :type regex: bytes or str - :param regex: A regular expression (RE2) for some row filter. + :param regex: + A regular expression (RE2) for some row filter. String values + will be encoded as ASCII. """ def __init__(self, regex): @@ -439,9 +445,9 @@ class ValueRegexFilter(_RegexFilter): character will not match the new line character ``\\n``, which may be present in a binary value. - :type regex: bytes + :type regex: bytes or str :param regex: A regular expression (RE2) to match cells with values that - match this regex. + match this regex. String values will be encoded as ASCII. """ def to_pb(self): @@ -453,6 +459,22 @@ def to_pb(self): return data_v2_pb2.RowFilter(value_regex_filter=self.regex) +class ExactValueFilter(ValueRegexFilter): + """Row filter for an exact value. + + + :type value: bytes or str or int + :param value: + a literal string encodable as ASCII, or the + equivalent bytes, or an integer (which will be packed into 8-bytes). + """ + + def __init__(self, value): + if isinstance(value, int): + value = _PACK_I64(value) + super(ExactValueFilter, self).__init__(value) + + class ValueRangeFilter(RowFilter): """A range of values to restrict to in a row filter. @@ -496,6 +518,8 @@ def __init__( raise ValueError( "Inclusive start was specified but no " "start value was given." ) + if isinstance(start_value, int): + start_value = _PACK_I64(start_value) self.start_value = start_value self.inclusive_start = inclusive_start @@ -505,6 +529,8 @@ def __init__( raise ValueError( "Inclusive end was specified but no " "end value was given." ) + if isinstance(end_value, int): + end_value = _PACK_I64(end_value) self.end_value = end_value self.inclusive_end = inclusive_end diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index 1c51651d8c44..02a9123188a0 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -498,9 +498,53 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) - def test_to_pb(self): - regex = b"value-regex" - row_filter = self._make_one(regex) + def test_to_pb_w_bytes(self): + value = regex = b"value-regex" + row_filter = self._make_one(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + def test_to_pb_w_str(self): + value = u"value-regex" + regex = value.encode("ascii") + row_filter = self._make_one(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + +class TestExactValueFilter(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.row_filters import ExactValueFilter + + return ExactValueFilter + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_to_pb_w_bytes(self): + value = regex = b"value-regex" + row_filter = self._make_one(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + def test_to_pb_w_str(self): + value = u"value-regex" + regex = value.encode("ascii") + row_filter = self._make_one(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + self.assertEqual(pb_val, expected_pb) + + def test_to_pb_w_int(self): + import struct + + value = 1 + regex = struct.Struct(">q").pack(value) + row_filter = self._make_one(value) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(value_regex_filter=regex) self.assertEqual(pb_val, expected_pb) @@ -518,6 +562,7 @@ def _make_one(self, *args, **kwargs): def test_constructor_defaults(self): row_filter = self._make_one() + self.assertIsNone(row_filter.start_value) self.assertIsNone(row_filter.end_value) self.assertTrue(row_filter.inclusive_start) @@ -528,22 +573,42 @@ def test_constructor_explicit(self): end_value = object() inclusive_start = object() inclusive_end = object() + row_filter = self._make_one( start_value=start_value, end_value=end_value, inclusive_start=inclusive_start, inclusive_end=inclusive_end, ) + self.assertIs(row_filter.start_value, start_value) self.assertIs(row_filter.end_value, end_value) self.assertIs(row_filter.inclusive_start, inclusive_start) self.assertIs(row_filter.inclusive_end, inclusive_end) + def test_constructor_w_int_values(self): + import struct + + start_value = 1 + end_value = 10 + + row_filter = self._make_one(start_value=start_value, end_value=end_value) + + expected_start_value = struct.Struct(">q").pack(start_value) + expected_end_value = struct.Struct(">q").pack(end_value) + + self.assertEqual(row_filter.start_value, expected_start_value) + self.assertEqual(row_filter.end_value, expected_end_value) + self.assertTrue(row_filter.inclusive_start) + self.assertTrue(row_filter.inclusive_end) + def test_constructor_bad_start(self): - self.assertRaises(ValueError, self._make_one, inclusive_start=True) + with self.assertRaises(ValueError): + self._make_one(inclusive_start=True) def test_constructor_bad_end(self): - self.assertRaises(ValueError, self._make_one, inclusive_end=True) + with self.assertRaises(ValueError): + self._make_one(inclusive_end=True) def test___eq__(self): start_value = object() From ff1f11881f516947e3b9944d7db8d703d97a3bb6 Mon Sep 17 00:00:00 2001 From: Justin Beckwith Date: Wed, 3 Feb 2021 14:06:54 -0800 Subject: [PATCH 390/892] build: migrate to flakybot (#200) --- packages/google-cloud-bigtable/.kokoro/test-samples.sh | 8 ++++---- packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh index 639efd458ff8..4dc285283546 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples.sh @@ -87,11 +87,11 @@ for file in samples/**/requirements.txt; do python3.6 -m nox -s "$RUN_TESTS_SESSION" EXIT=$? - # If this is a periodic build, send the test log to the Build Cop Bot. - # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. + # If this is a periodic build, send the test log to the FlakyBot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop - $KOKORO_GFILE_DIR/linux_amd64/buildcop + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot fi if [[ $EXIT -ne 0 ]]; then diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh index 719bcd5ba84d..4af6cdc26dbc 100755 --- a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh +++ b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh @@ -159,7 +159,7 @@ if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then "KOKORO_GITHUB_COMMIT" "KOKORO_GITHUB_PULL_REQUEST_NUMBER" "KOKORO_GITHUB_PULL_REQUEST_COMMIT" - # For Build Cop Bot + # For FlakyBot "KOKORO_GITHUB_COMMIT_URL" "KOKORO_GITHUB_PULL_REQUEST_URL" ) From ef79c5b9f1519177a1034c32965994427f08e8e5 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 3 Feb 2021 14:07:16 -0800 Subject: [PATCH 391/892] chore: Add header checker config to python library synth (#199) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have it working in [python-docs-samples](https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/.github/header-checker-lint.yml) we should consider adding it to the 🐍 libraries :) Source-Author: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Source-Date: Mon Jan 25 13:24:08 2021 -0800 Source-Repo: googleapis/synthtool Source-Sha: 573f7655311b553a937f9123bee17bf78497db95 Source-Link: https://github.com/googleapis/synthtool/commit/573f7655311b553a937f9123bee17bf78497db95 --- .../.github/header-checker-lint.yml | 15 +++++++++++++++ packages/google-cloud-bigtable/synth.metadata | 7 ++++--- 2 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/header-checker-lint.yml diff --git a/packages/google-cloud-bigtable/.github/header-checker-lint.yml b/packages/google-cloud-bigtable/.github/header-checker-lint.yml new file mode 100644 index 000000000000..fc281c05bd55 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/header-checker-lint.yml @@ -0,0 +1,15 @@ +{"allowedCopyrightHolders": ["Google LLC"], + "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"], + "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"], + "sourceFileExtensions": [ + "ts", + "js", + "java", + "sh", + "Dockerfile", + "yaml", + "py", + "html", + "txt" + ] +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index d148b4e01972..5ac347b21c0c 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "227482f8fd5ea500b3677348ee3fa25e486a2163" + "sha": "e2213520951d3da97019a1d784e5bf31d94e3353" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ba960d730416fe05c50547e975ce79fcee52c671" + "sha": "573f7655311b553a937f9123bee17bf78497db95" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ba960d730416fe05c50547e975ce79fcee52c671" + "sha": "573f7655311b553a937f9123bee17bf78497db95" } } ], @@ -58,6 +58,7 @@ ".github/ISSUE_TEMPLATE/feature_request.md", ".github/ISSUE_TEMPLATE/support_request.md", ".github/PULL_REQUEST_TEMPLATE.md", + ".github/header-checker-lint.yml", ".github/release-please.yml", ".github/snippet-bot.yml", ".gitignore", From 6f6301133caac39bae13aae97a2bb1c3018ce5a9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 8 Feb 2021 17:27:07 +0100 Subject: [PATCH 392/892] chore(deps): update dependency google-cloud-core to v1.6.0 (#204) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 481d8fdfb60f..cb0825c6f0bf 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.27.0 google-cloud-bigtable==1.6.1 -google-cloud-core==1.5.0 \ No newline at end of file +google-cloud-core==1.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index d1e7037cfc44..fa1ec85d7c68 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.6.1 -google-cloud-core==1.5.0 +google-cloud-core==1.6.0 From cfee00554a00dea8dd1f1071ae415e60041ee2c6 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 8 Feb 2021 08:27:55 -0800 Subject: [PATCH 393/892] chore: upstream synth template changes (#205) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * build: migrate to flakybot Source-Author: Justin Beckwith Source-Date: Thu Jan 28 22:22:38 2021 -0800 Source-Repo: googleapis/synthtool Source-Sha: d1bb9173100f62c0cfc8f3138b62241e7f47ca6a Source-Link: https://github.com/googleapis/synthtool/commit/d1bb9173100f62c0cfc8f3138b62241e7f47ca6a * chore(python): include py.typed files in release A py.typed file must be included in the released package for it to be considered typed by type checkers. https://www.python.org/dev/peps/pep-0561/#packaging-type-information. See https://github.com/googleapis/python-secret-manager/issues/79 Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Fri Feb 5 17:32:06 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: 33366574ffb9e11737b3547eb6f020ecae0536e8 Source-Link: https://github.com/googleapis/synthtool/commit/33366574ffb9e11737b3547eb6f020ecae0536e8 --- packages/google-cloud-bigtable/MANIFEST.in | 4 ++-- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index e9e29d12033d..e783f4c6209b 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -16,10 +16,10 @@ # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE -recursive-include google *.json *.proto +recursive-include google *.json *.proto py.typed recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ # Exclude scripts for samples readmegen -prune scripts/readme-gen \ No newline at end of file +prune scripts/readme-gen diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 5ac347b21c0c..5a4e4dabb9a3 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "e2213520951d3da97019a1d784e5bf31d94e3353" + "sha": "d5624621647feb3a71a2509f5d29b68ac94cf17a" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "573f7655311b553a937f9123bee17bf78497db95" + "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "573f7655311b553a937f9123bee17bf78497db95" + "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" } } ], From c5363bdeb52f6e3f58aa7ac37fde0003ca0b14e6 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 9 Feb 2021 07:23:14 -0800 Subject: [PATCH 394/892] docs: update python contributing guide (#206) Adds details about blacken, updates version for system tests, and shows how to pass through pytest arguments. Source-Author: Chris Cotter Source-Date: Mon Feb 8 17:13:36 2021 -0500 Source-Repo: googleapis/synthtool Source-Sha: 4679e7e415221f03ff2a71e3ffad75b9ec41d87e Source-Link: https://github.com/googleapis/synthtool/commit/4679e7e415221f03ff2a71e3ffad75b9ec41d87e --- .../google-cloud-bigtable/CONTRIBUTING.rst | 22 +++++++++++++++---- packages/google-cloud-bigtable/synth.metadata | 6 ++--- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index c1edbeca0afb..d68622f60b2a 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -70,9 +70,14 @@ We use `nox `__ to instrument our tests. - To test your changes, run unit tests with ``nox``:: $ nox -s unit-2.7 - $ nox -s unit-3.7 + $ nox -s unit-3.8 $ ... +- Args to pytest can be passed through the nox command separated by a `--`. For + example, to run a single test:: + + $ nox -s unit-3.8 -- -k + .. note:: The unit tests and system tests are described in the @@ -93,8 +98,12 @@ On Debian/Ubuntu:: ************ Coding Style ************ +- We use the automatic code formatter ``black``. You can run it using + the nox session ``blacken``. This will eliminate many lint errors. Run via:: + + $ nox -s blacken -- PEP8 compliance, with exceptions defined in the linter configuration. +- PEP8 compliance is required, with exceptions defined in the linter configuration. If you have ``nox`` installed, you can test that you have not introduced any non-compliant code via:: @@ -133,13 +142,18 @@ Running System Tests - To run system tests, you can execute:: - $ nox -s system-3.7 + # Run all system tests + $ nox -s system-3.8 $ nox -s system-2.7 + # Run a single system test + $ nox -s system-3.8 -- -k + + .. note:: System tests are only configured to run under Python 2.7 and - Python 3.7. For expediency, we do not run them in older versions + Python 3.8. For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 5a4e4dabb9a3..4416e5d4efa5 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "d5624621647feb3a71a2509f5d29b68ac94cf17a" + "sha": "6fe87016a159bbdc6bf29856b1cf6e633e16216a" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" + "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "33366574ffb9e11737b3547eb6f020ecae0536e8" + "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" } } ], From fd184de3f4d207f31fb26b88fa41b9da975196c3 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 9 Feb 2021 15:58:05 -0500 Subject: [PATCH 395/892] chore: add yoshi-python to CODEWONERS (#207) Per discussion in today's Python libraries call. --- packages/google-cloud-bigtable/.github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/CODEOWNERS b/packages/google-cloud-bigtable/.github/CODEOWNERS index 76c8b03b9d03..dc38a1e1d1d0 100644 --- a/packages/google-cloud-bigtable/.github/CODEOWNERS +++ b/packages/google-cloud-bigtable/.github/CODEOWNERS @@ -7,5 +7,5 @@ # The api-bigtable team is the default owner for anything not # explicitly taken by someone else. -* @googleapis/api-bigtable -/samples/ @googleapis/api-bigtable @googleapis/python-samples-owners \ No newline at end of file +* @googleapis/api-bigtable @googleapis/yoshi-python +/samples/ @googleapis/api-bigtable @googleapis/python-samples-owners From 2eb0c70a4e230e3bf2a295f639d7c189fa011c3e Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Tue, 9 Feb 2021 16:02:38 -0500 Subject: [PATCH 396/892] feat: add keep alive timeout (#182) * feat: add keep alive timeout * feat: override channel settings --- .../google/cloud/bigtable/client.py | 61 +++++++++++++++++-- .../tests/unit/test_client.py | 36 ++++++++--- 2 files changed, 84 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 05f3b7761c44..703a1bd60c83 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -35,6 +35,11 @@ from google.cloud import bigtable_v2 from google.cloud import bigtable_admin_v2 +from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport +from google.cloud.bigtable_admin_v2.gapic.transports import ( + bigtable_table_admin_grpc_transport, + bigtable_instance_admin_grpc_transport, +) from google.cloud.bigtable import __version__ from google.cloud.bigtable.instance import Instance @@ -60,13 +65,14 @@ """Scope for reading table data.""" -def _create_gapic_client(client_class, client_options=None): +def _create_gapic_client(client_class, client_options=None, transport=None): def inner(self): if self._emulator_host is None: return client_class( - credentials=self._credentials, + credentials=None, client_info=self._client_info, client_options=client_options, + transport=transport, ) else: return client_class( @@ -161,7 +167,13 @@ def __init__( self._emulator_channel = None if self._emulator_host is not None: - self._emulator_channel = grpc.insecure_channel(self._emulator_host) + self._emulator_channel = grpc.insecure_channel( + target=self._emulator_host, + options={ + "grpc.keepalive_time_ms": 30000, + "grpc.keepalive_timeout_ms": 10000, + }.items(), + ) if channel is not None: warnings.warn( @@ -196,6 +208,29 @@ def _get_scopes(self): return scopes + def _create_gapic_client_channel(self, client_class, grpc_transport): + if self._client_options and self._client_options.api_endpoint: + api_endpoint = self._client_options.api_endpoint + else: + api_endpoint = client_class.SERVICE_ADDRESS + + channel = grpc_transport.create_channel( + api_endpoint, + self._credentials, + options={ + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + "grpc.keepalive_time_ms": 30000, + "grpc.keepalive_timeout_ms": 10000, + }.items(), + ) + transport = grpc_transport( + address=api_endpoint, + channel=channel, + credentials=None, + ) + return transport + @property def project_path(self): """Project name to be used with Instance Admin API. @@ -236,8 +271,14 @@ def table_data_client(self): :returns: A BigtableClient object. """ if self._table_data_client is None: + transport = self._create_gapic_client_channel( + bigtable_v2.BigtableClient, + bigtable_grpc_transport.BigtableGrpcTransport, + ) klass = _create_gapic_client( - bigtable_v2.BigtableClient, client_options=self._client_options + bigtable_v2.BigtableClient, + client_options=self._client_options, + transport=transport, ) self._table_data_client = klass(self) return self._table_data_client @@ -262,9 +303,15 @@ def table_admin_client(self): if self._table_admin_client is None: if not self._admin: raise ValueError("Client is not an admin client.") + + transport = self._create_gapic_client_channel( + bigtable_admin_v2.BigtableTableAdminClient, + bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, + ) klass = _create_gapic_client( bigtable_admin_v2.BigtableTableAdminClient, client_options=self._admin_client_options, + transport=transport, ) self._table_admin_client = klass(self) return self._table_admin_client @@ -289,9 +336,15 @@ def instance_admin_client(self): if self._instance_admin_client is None: if not self._admin: raise ValueError("Client is not an admin client.") + + transport = self._create_gapic_client_channel( + bigtable_admin_v2.BigtableInstanceAdminClient, + bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, + ) klass = _create_gapic_client( bigtable_admin_v2.BigtableInstanceAdminClient, client_options=self._admin_client_options, + transport=transport, ) self._instance_admin_client = klass(self) return self._instance_admin_client diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 204e1a5c151b..21ec479d0799 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -31,14 +31,16 @@ def test_wo_emulator(self): credentials = _make_credentials() client = _Client(credentials) client_info = client._client_info = mock.Mock() + transport = mock.Mock() - result = self._invoke_client_factory(client_class)(client) + result = self._invoke_client_factory(client_class, transport=transport)(client) self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - credentials=client._credentials, + credentials=None, client_info=client_info, client_options=None, + transport=transport, ) def test_wo_emulator_w_client_options(self): @@ -47,16 +49,18 @@ def test_wo_emulator_w_client_options(self): client = _Client(credentials) client_info = client._client_info = mock.Mock() client_options = mock.Mock() + transport = mock.Mock() result = self._invoke_client_factory( - client_class, client_options=client_options + client_class, client_options=client_options, transport=transport )(client) self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - credentials=client._credentials, + credentials=None, client_info=client_info, client_options=client_options, + transport=transport, ) def test_w_emulator(self): @@ -170,7 +174,13 @@ def test_constructor_with_emulator_host(self): self.assertEqual(client._emulator_host, emulator_host) self.assertIs(client._emulator_channel, factory.return_value) - factory.assert_called_once_with(emulator_host) + factory.assert_called_once_with( + target=emulator_host, + options={ + "grpc.keepalive_time_ms": 30000, + "grpc.keepalive_timeout_ms": 10000, + }.items(), + ) getenv.assert_called_once_with(BIGTABLE_EMULATOR) def test__get_scopes_default(self): @@ -234,7 +244,9 @@ def test_table_data_client_not_initialized_w_client_options(self): from google.api_core.client_options import ClientOptions credentials = _make_credentials() - client_options = ClientOptions(quota_project_id="QUOTA-PROJECT") + client_options = ClientOptions( + quota_project_id="QUOTA-PROJECT", api_endpoint="xyz" + ) client = self._make_one( project=self.PROJECT, credentials=credentials, client_options=client_options ) @@ -245,9 +257,11 @@ def test_table_data_client_not_initialized_w_client_options(self): self.assertIs(table_data_client, mocked.return_value) self.assertIs(client._table_data_client, table_data_client) + mocked.assert_called_once_with( client_info=client._client_info, - credentials=mock.ANY, # added scopes + credentials=None, + transport=mock.ANY, client_options=client_options, ) @@ -308,6 +322,7 @@ def test_table_admin_client_not_initialized_w_client_options(self): admin_client_options=admin_client_options, ) + client._create_gapic_client_channel = mock.Mock() patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient") with patch as mocked: table_admin_client = client.table_admin_client @@ -316,7 +331,8 @@ def test_table_admin_client_not_initialized_w_client_options(self): self.assertIs(client._table_admin_client, table_admin_client) mocked.assert_called_once_with( client_info=client._client_info, - credentials=mock.ANY, # added scopes + credentials=None, + transport=mock.ANY, client_options=admin_client_options, ) @@ -377,6 +393,7 @@ def test_instance_admin_client_not_initialized_w_client_options(self): admin_client_options=admin_client_options, ) + client._create_gapic_client_channel = mock.Mock() patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient") with patch as mocked: instance_admin_client = client.instance_admin_client @@ -385,7 +402,8 @@ def test_instance_admin_client_not_initialized_w_client_options(self): self.assertIs(client._instance_admin_client, instance_admin_client) mocked.assert_called_once_with( client_info=client._client_info, - credentials=mock.ANY, # added scopes + credentials=None, + transport=mock.ANY, client_options=admin_client_options, ) From e97b610f8a11358d0825574d9f686154dd75c959 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 9 Feb 2021 16:05:26 -0500 Subject: [PATCH 397/892] tests: harden systest further against instance creation timeout (#209) Closes #208 --- packages/google-cloud-bigtable/tests/system.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 92bd582a3138..daf644ea28a4 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -288,7 +288,7 @@ def test_create_instance_w_two_clusters(self): self.instances_to_delete.append(instance) # We want to make sure the operation completes. - operation.result(timeout=60) + operation.result(timeout=120) # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) From 9dfec58c801ec774b08c71c68a43d4ffae674ece Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 9 Feb 2021 14:07:02 -0800 Subject: [PATCH 398/892] chore: release 1.7.0 (#198) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 18 ++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index f4d6a8815494..91c791aff011 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,24 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [1.7.0](https://www.github.com/googleapis/python-bigtable/compare/v1.6.1...v1.7.0) (2021-02-09) + + +### Features + +* add keep alive timeout ([#182](https://www.github.com/googleapis/python-bigtable/issues/182)) ([e9637cb](https://www.github.com/googleapis/python-bigtable/commit/e9637cbd4461dcca509dca43ef116d6ff41b80c7)) +* support filtering on incrementable values ([#178](https://www.github.com/googleapis/python-bigtable/issues/178)) ([e221352](https://www.github.com/googleapis/python-bigtable/commit/e2213520951d3da97019a1d784e5bf31d94e3353)) + + +### Bug Fixes + +* Renaming region tags to not conflict with documentation snippets ([#190](https://www.github.com/googleapis/python-bigtable/issues/190)) ([dd0cdc5](https://www.github.com/googleapis/python-bigtable/commit/dd0cdc5bcfd92e18ab9a7255684a9f5b21198867)) + + +### Documentation + +* update python contributing guide ([#206](https://www.github.com/googleapis/python-bigtable/issues/206)) ([e301ac3](https://www.github.com/googleapis/python-bigtable/commit/e301ac3b61364d779fdb50a57ae8e2cb9952df9e)) + ### [1.6.1](https://www.github.com/googleapis/python-bigtable/compare/v1.6.0...v1.6.1) (2020-12-01) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 48ef1c70df00..bfb6240f5143 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = 'google-cloud-bigtable' description = 'Google Cloud Bigtable API client library' -version = "1.6.1" +version = "1.7.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From d00667a54723c639f3790818277bbf01a031b634 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Thu, 11 Feb 2021 10:12:46 -0500 Subject: [PATCH 399/892] feat!: microgenerator changes (#203) Release-As: v2.0.0-dev1 * wip microgenerator changes * fix unit tests * fix system tests * lint * fixup after update * fix test * regen * run fixup script * run fixup script admin * add scripts to build * regenerate * update synth * fix tests * more test cleanup * fix mutate rows * fix backups test * fix tests * fix docs and lint * fix docs and lint * temporarily put coverage at 99 * code feedback * move coverage to 99 * pin beam bigtable deps to 1.0.0 * pin beam bigtable deps to 1.0.0 * fix imports * fixup keepalive config * lint * cleanup * cleanup * cleanup --- packages/google-cloud-bigtable/.coveragerc | 4 +- packages/google-cloud-bigtable/docs/conf.py | 5 +- .../google-cloud-bigtable/docs/snippets.py | 8 +- .../google/cloud/bigtable/app_profile.py | 42 +- .../google/cloud/bigtable/backup.py | 76 +- .../google/cloud/bigtable/client.py | 57 +- .../google/cloud/bigtable/cluster.py | 28 +- .../google/cloud/bigtable/column_family.py | 17 +- .../google/cloud/bigtable/enums.py | 52 +- .../google/cloud/bigtable/instance.py | 56 +- .../google/cloud/bigtable/row.py | 4 +- .../google/cloud/bigtable/row_data.py | 18 +- .../google/cloud/bigtable/row_filters.py | 2 +- .../google/cloud/bigtable/row_set.py | 2 +- .../google/cloud/bigtable/table.py | 84 +- .../cloud/bigtable_admin_v2/__init__.py | 167 +- .../cloud/bigtable_admin_v2/gapic/__init__.py | 0 .../gapic/bigtable_instance_admin_client.py | 1919 ------ .../bigtable_instance_admin_client_config.py | 136 - .../gapic/bigtable_table_admin_client.py | 2336 ------- .../bigtable_table_admin_client_config.py | 160 - .../gapic/transports/__init__.py | 0 .../bigtable_instance_admin_grpc_transport.py | 380 -- .../bigtable_table_admin_grpc_transport.py | 471 -- .../cloud/bigtable_admin_v2/proto/__init__.py | 0 .../proto/bigtable_cluster_data.proto | 94 - .../proto/bigtable_cluster_service.proto | 130 - .../bigtable_cluster_service_messages.proto | 141 - .../proto/bigtable_instance_admin.proto | 11 +- .../proto/bigtable_instance_admin_pb2.py | 2434 ------- .../proto/bigtable_instance_admin_pb2_grpc.py | 880 --- .../proto/bigtable_table_admin.proto | 327 +- .../proto/bigtable_table_admin_pb2.py | 3578 ---------- .../proto/bigtable_table_admin_pb2_grpc.py | 1083 --- .../proto/bigtable_table_data.proto | 126 - .../proto/bigtable_table_service.proto | 80 - .../bigtable_table_service_messages.proto | 116 - .../bigtable_admin_v2/proto/common_pb2.py | 190 - .../proto/common_pb2_grpc.py | 3 - .../bigtable_admin_v2/proto/instance_pb2.py | 893 --- .../proto/instance_pb2_grpc.py | 3 - .../bigtable_admin_v2/proto/table_pb2.py | 1694 ----- .../bigtable_admin_v2/proto/table_pb2_grpc.py | 3 - .../google/cloud/bigtable_admin_v2/py.typed | 2 + .../bigtable_admin_v2/services/__init__.py | 16 + .../bigtable_instance_admin/__init__.py | 24 + .../bigtable_instance_admin/async_client.py | 1935 ++++++ .../bigtable_instance_admin/client.py | 2069 ++++++ .../bigtable_instance_admin/pagers.py | 153 + .../transports/__init__.py | 37 + .../transports/base.py | 491 ++ .../transports/grpc.py | 794 +++ .../transports/grpc_asyncio.py | 822 +++ .../bigtable_table_admin}/__init__.py | 21 +- .../bigtable_table_admin/async_client.py | 2284 +++++++ .../services/bigtable_table_admin/client.py | 2473 +++++++ .../services/bigtable_table_admin/pagers.py | 405 ++ .../transports/__init__.py | 37 + .../bigtable_table_admin/transports/base.py | 517 ++ .../bigtable_table_admin/transports/grpc.py | 944 +++ .../transports/grpc_asyncio.py | 962 +++ .../google/cloud/bigtable_admin_v2/types.py | 76 - .../cloud/bigtable_admin_v2/types/__init__.py | 158 + .../types/bigtable_instance_admin.py | 530 ++ .../types/bigtable_table_admin.py | 912 +++ .../cloud/bigtable_admin_v2/types/common.py | 58 + .../cloud/bigtable_admin_v2/types/instance.py | 209 + .../cloud/bigtable_admin_v2/types/table.py | 376 + .../google/cloud/bigtable_v2/__init__.py | 75 +- .../cloud/bigtable_v2/gapic/__init__.py | 0 .../bigtable_v2/gapic/bigtable_client.py | 779 --- .../gapic/bigtable_client_config.py | 80 - .../bigtable_v2/gapic/transports/__init__.py | 0 .../transports/bigtable_grpc_transport.py | 207 - .../cloud/bigtable_v2/proto/__init__.py | 0 .../proto/bigtable_cluster_data.proto | 94 - .../proto/bigtable_cluster_service.proto | 130 - .../bigtable_cluster_service_messages.proto | 141 - .../bigtable_v2/proto/bigtable_data.proto | 516 -- .../proto/bigtable_instance_admin.proto | 456 -- .../cloud/bigtable_v2/proto/bigtable_pb2.py | 1804 ----- .../bigtable_v2/proto/bigtable_pb2_grpc.py | 313 - .../bigtable_v2/proto/bigtable_service.proto | 91 - .../proto/bigtable_service_messages.proto | 218 - .../proto/bigtable_table_admin.proto | 525 -- .../proto/bigtable_table_data.proto | 126 - .../proto/bigtable_table_service.proto | 80 - .../bigtable_table_service_messages.proto | 116 - .../cloud/bigtable_v2/proto/common.proto | 41 - .../cloud/bigtable_v2/proto/data_pb2.py | 2672 -------- .../cloud/bigtable_v2/proto/data_pb2_grpc.py | 3 - .../cloud/bigtable_v2/proto/instance.proto | 208 - .../cloud/bigtable_v2/proto/table.proto | 221 - .../google/cloud/bigtable_v2/py.typed | 2 + .../cloud/bigtable_v2/services/__init__.py | 16 + .../bigtable_v2/services/bigtable/__init__.py | 24 + .../services/bigtable/async_client.py | 865 +++ .../bigtable_v2/services/bigtable/client.py | 1041 +++ .../services/bigtable/transports/__init__.py | 35 + .../services/bigtable/transports/base.py | 254 + .../services/bigtable/transports/grpc.py | 432 ++ .../bigtable/transports/grpc_asyncio.py | 440 ++ .../google/cloud/bigtable_v2/types.py | 54 - .../cloud/bigtable_v2/types/__init__.py | 72 + .../cloud/bigtable_v2/types/bigtable.py | 463 ++ .../google/cloud/bigtable_v2/types/data.py | 728 ++ packages/google-cloud-bigtable/noxfile.py | 118 +- .../samples/beam/noxfile.py | 3 +- .../samples/beam/requirements.txt | 2 +- .../fixup_bigtable_admin_v2_keywords.py | 216 + .../scripts/fixup_bigtable_v2_keywords.py | 184 + packages/google-cloud-bigtable/setup.py | 63 +- packages/google-cloud-bigtable/synth.py | 55 +- .../google-cloud-bigtable/tests/system.py | 29 +- .../unit/gapic/bigtable_admin_v2/__init__.py | 1 + .../test_bigtable_instance_admin.py | 5316 +++++++++++++++ .../test_bigtable_table_admin.py | 6067 +++++++++++++++++ .../tests/unit/gapic/bigtable_v2/__init__.py | 1 + .../unit/gapic/bigtable_v2/test_bigtable.py | 2372 +++++++ .../unit/gapic/v2/test_bigtable_client_v2.py | 316 - .../test_bigtable_instance_admin_client_v2.py | 924 --- .../v2/test_bigtable_table_admin_client_v2.py | 1039 --- .../tests/unit/test_app_profile.py | 210 +- .../tests/unit/test_backup.py | 246 +- .../tests/unit/test_client.py | 58 +- .../tests/unit/test_cluster.py | 143 +- .../tests/unit/test_column_family.py | 58 +- .../tests/unit/test_instance.py | 261 +- .../tests/unit/test_policy.py | 6 +- .../tests/unit/test_row.py | 50 +- .../tests/unit/test_row_data.py | 129 +- .../tests/unit/test_row_filters.py | 14 +- .../tests/unit/test_row_set.py | 4 +- .../tests/unit/test_table.py | 591 +- 134 files changed, 36477 insertions(+), 29436 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/py.typed create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py rename packages/google-cloud-bigtable/google/{ => cloud/bigtable_admin_v2/services/bigtable_table_admin}/__init__.py (69%) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/__init__.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/__init__.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/py.typed create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py create mode 100644 packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py create mode 100644 packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 0d8e6297dc9c..b11c3eaa34ed 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -21,7 +21,7 @@ omit = google/cloud/__init__.py [report] -fail_under = 100 +fail_under = 99 show_missing = True exclude_lines = # Re-enable the standard pragma @@ -30,6 +30,8 @@ exclude_lines = def __repr__ # Ignore abstract methods raise NotImplementedError + # Ignore setuptools-less fallback + except pkg_resources.DistributionNotFound: omit = */gapic/*.py */proto/*.py diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index ef2392b38c3c..dc4b4d822f8c 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -347,10 +347,7 @@ intersphinx_mapping = { "python": ("https://python.readthedocs.org/en/latest/", None), "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), - "google.api_core": ( - "https://googleapis.dev/python/google-api-core/latest/", - None, - ), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), } diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index dda59079d511..eeb39c3bb32c 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -704,13 +704,13 @@ def test_bigtable_cluster_name(): def test_bigtable_instance_from_pb(): # [START bigtable_api_instance_from_pb] from google.cloud.bigtable import Client - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 client = Client(admin=True) instance = client.instance(INSTANCE_ID) name = instance.name - instance_pb = instance_pb2.Instance( + instance_pb = data_v2_pb2.Instance( name=name, display_name=INSTANCE_ID, type=PRODUCTION, labels=LABELS ) @@ -723,7 +723,7 @@ def test_bigtable_instance_from_pb(): def test_bigtable_cluster_from_pb(): # [START bigtable_api_cluster_from_pb] from google.cloud.bigtable import Client - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 client = Client(admin=True) instance = client.instance(INSTANCE_ID) @@ -732,7 +732,7 @@ def test_bigtable_cluster_from_pb(): name = cluster.name cluster_state = cluster.state serve_nodes = 1 - cluster_pb = instance_pb2.Cluster( + cluster_pb = data_v2_pb2.Cluster( name=name, location=LOCATION_ID, state=cluster_state, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py index ebf817c4ede0..5d6dbdb81a1e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py @@ -18,7 +18,7 @@ import re from google.cloud.bigtable.enums import RoutingPolicyType -from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.cloud.bigtable_admin_v2.types import instance from google.protobuf import field_mask_pb2 from google.api_core.exceptions import NotFound @@ -138,7 +138,7 @@ def __ne__(self, other): def from_pb(cls, app_profile_pb, instance): """Creates an instance app_profile from a protobuf. - :type app_profile_pb: :class:`instance_pb2.app_profile_pb` + :type app_profile_pb: :class:`instance.app_profile_pb` :param app_profile_pb: An instance protobuf object. :type instance: :class:`google.cloud.bigtable.instance.Instance` @@ -188,7 +188,7 @@ def _update_from_pb(self, app_profile_pb): self.description = app_profile_pb.description routing_policy_type = None - if app_profile_pb.HasField("multi_cluster_routing_use_any"): + if app_profile_pb._pb.HasField("multi_cluster_routing_use_any"): routing_policy_type = RoutingPolicyType.ANY self.allow_transactional_writes = False else: @@ -201,7 +201,7 @@ def _update_from_pb(self, app_profile_pb): def _to_pb(self): """Create an AppProfile proto buff message for API calls - :rtype: :class:`.instance_pb2.AppProfile` + :rtype: :class:`.instance.AppProfile` :returns: The converted current object. :raises: :class:`ValueError ` if the AppProfile @@ -215,15 +215,15 @@ def _to_pb(self): if self.routing_policy_type == RoutingPolicyType.ANY: multi_cluster_routing_use_any = ( - instance_pb2.AppProfile.MultiClusterRoutingUseAny() + instance.AppProfile.MultiClusterRoutingUseAny() ) else: - single_cluster_routing = instance_pb2.AppProfile.SingleClusterRouting( + single_cluster_routing = instance.AppProfile.SingleClusterRouting( cluster_id=self.cluster_id, allow_transactional_writes=self.allow_transactional_writes, ) - app_profile_pb = instance_pb2.AppProfile( + app_profile_pb = instance.AppProfile( name=self.name, description=self.description, multi_cluster_routing_use_any=multi_cluster_routing_use_any, @@ -242,7 +242,9 @@ def reload(self): :dedent: 4 """ - app_profile_pb = self.instance_admin_client.get_app_profile(self.name) + app_profile_pb = self.instance_admin_client.get_app_profile( + request={"name": self.name} + ) # NOTE: _update_from_pb does not check that the project and # app_profile ID on the response match the request. @@ -262,7 +264,7 @@ def exists(self): :returns: True if the AppProfile exists, else False. """ try: - self.instance_admin_client.get_app_profile(self.name) + self.instance_admin_client.get_app_profile(request={"name": self.name}) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: @@ -291,10 +293,12 @@ def create(self, ignore_warnings=None): """ return self.from_pb( self.instance_admin_client.create_app_profile( - parent=self._instance.name, - app_profile_id=self.app_profile_id, - app_profile=self._to_pb(), - ignore_warnings=ignore_warnings, + request={ + "parent": self._instance.name, + "app_profile_id": self.app_profile_id, + "app_profile": self._to_pb(), + "ignore_warnings": ignore_warnings, + } ), self._instance, ) @@ -328,9 +332,11 @@ def update(self, ignore_warnings=None): update_mask_pb.paths.append("single_cluster_routing") return self.instance_admin_client.update_app_profile( - app_profile=self._to_pb(), - update_mask=update_mask_pb, - ignore_warnings=ignore_warnings, + request={ + "app_profile": self._to_pb(), + "update_mask": update_mask_pb, + "ignore_warnings": ignore_warnings, + } ) def delete(self, ignore_warnings=None): @@ -352,4 +358,6 @@ def delete(self, ignore_warnings=None): If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ - self.instance_admin_client.delete_app_profile(self.name, ignore_warnings) + self.instance_admin_client.delete_app_profile( + request={"name": self.name, "ignore_warnings": ignore_warnings} + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 291ac783ad41..6dead1f74c64 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -17,10 +17,8 @@ import re from google.cloud._helpers import _datetime_to_pb_timestamp -from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import ( - BigtableTableAdminClient, -) -from google.cloud.bigtable_admin_v2.types import table_pb2 +from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 @@ -220,7 +218,7 @@ def state(self): def from_pb(cls, backup_pb, instance): """Creates a Backup instance from a protobuf message. - :type backup_pb: :class:`table_pb2.Backup` + :type backup_pb: :class:`table.Backup` :param backup_pb: A Backup protobuf object. :type instance: :class:`Instance ` @@ -256,7 +254,7 @@ def from_pb(cls, backup_pb, instance): match = _TABLE_NAME_RE.match(backup_pb.source_table) table_id = match.group("table_id") if match else None - expire_time = backup_pb.expire_time + expire_time = backup_pb._pb.expire_time backup = cls( backup_id, @@ -265,10 +263,10 @@ def from_pb(cls, backup_pb, instance): table_id=table_id, expire_time=expire_time, ) - backup._start_time = backup_pb.start_time - backup._end_time = backup_pb.end_time - backup._size_bytes = backup_pb.size_bytes - backup._state = backup_pb.state + backup._start_time = backup_pb._pb.start_time + backup._end_time = backup_pb._pb.end_time + backup._size_bytes = backup_pb._pb.size_bytes + backup._state = backup_pb._pb.state return backup @@ -308,13 +306,19 @@ def create(self, cluster_id=None): if not self._cluster: raise ValueError('"cluster" parameter must be set') - backup = table_pb2.Backup( + backup = table.Backup( source_table=self.source_table, expire_time=_datetime_to_pb_timestamp(self.expire_time), ) - api = self._instance._client.table_admin_client - return api.create_backup(self.parent, self.backup_id, backup) + api = self._instance._client._table_admin_client + return api.create_backup( + request={ + "parent": self.parent, + "backup_id": self.backup_id, + "backup": backup, + } + ) def get(self): """Retrieves metadata of a pending or completed Backup. @@ -328,9 +332,9 @@ def get(self): due to a retryable error and retry attempts failed. :raises ValueError: If the parameters are invalid. """ - api = self._instance._client.table_admin_client + api = self._instance._client._table_admin_client try: - return api.get_backup(self.name) + return api.get_backup(request={"name": self.name}) except NotFound: return None @@ -338,11 +342,11 @@ def reload(self): """Refreshes the stored backup properties.""" backup = self.get() self._source_table = backup.source_table - self._expire_time = backup.expire_time - self._start_time = backup.start_time - self._end_time = backup.end_time - self._size_bytes = backup.size_bytes - self._state = backup.state + self._expire_time = backup._pb.expire_time + self._start_time = backup._pb.start_time + self._end_time = backup._pb.end_time + self._size_bytes = backup._pb.size_bytes + self._state = backup._pb.state def exists(self): """Tests whether this Backup exists. @@ -358,18 +362,19 @@ def update_expire_time(self, new_expire_time): :type new_expire_time: :class:`datetime.datetime` :param new_expire_time: the new expiration time timestamp """ - backup_update = table_pb2.Backup( - name=self.name, - expire_time=_datetime_to_pb_timestamp(new_expire_time), + backup_update = table.Backup( + name=self.name, expire_time=_datetime_to_pb_timestamp(new_expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) - api = self._instance._client.table_admin_client - api.update_backup(backup_update, update_mask) + api = self._instance._client._table_admin_client + api.update_backup(request={"backup": backup_update, "update_mask": update_mask}) self._expire_time = new_expire_time def delete(self): """Delete this Backup.""" - self._instance._client.table_admin_client.delete_backup(self.name) + self._instance._client._table_admin_client.delete_backup( + request={"name": self.name} + ) def restore(self, table_id): """Creates a new Table by restoring from this Backup. The new Table @@ -391,8 +396,14 @@ def restore(self, table_id): due to a retryable error and retry attempts failed. :raises: ValueError: If the parameters are invalid. """ - api = self._instance._client.table_admin_client - return api.restore_table(self._instance.name, table_id, self.name) + api = self._instance._client._table_admin_client + return api.restore_table( + request={ + "parent": self._instance.name, + "table_id": table_id, + "backup": self.name, + } + ) def get_iam_policy(self): """Gets the IAM access control policy for this backup. @@ -401,8 +412,7 @@ def get_iam_policy(self): :returns: The current IAM policy of this backup. """ table_api = self._instance._client.table_admin_client - args = {"resource": self.name} - response = table_api.get_iam_policy(**args) + response = table_api.get_iam_policy(request={"resource": self.name}) return Policy.from_pb(response) def set_iam_policy(self, policy): @@ -420,7 +430,9 @@ class `google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this backup. """ table_api = self._instance._client.table_admin_client - response = table_api.set_iam_policy(resource=self.name, policy=policy.to_pb()) + response = table_api.set_iam_policy( + request={"resource": self.name, "policy": policy.to_pb()} + ) return Policy.from_pb(response) def test_iam_permissions(self, permissions): @@ -441,6 +453,6 @@ def test_iam_permissions(self, permissions): """ table_api = self._instance._client.table_admin_client response = table_api.test_iam_permissions( - resource=self.name, permissions=permissions + request={"resource": self.name, "permissions": permissions} ) return list(response.permissions) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 703a1bd60c83..5e49934d0625 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -35,10 +35,12 @@ from google.cloud import bigtable_v2 from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_table_admin_grpc_transport, - bigtable_instance_admin_grpc_transport, +from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports import ( + BigtableInstanceAdminGrpcTransport, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports import ( + BigtableTableAdminGrpcTransport, ) from google.cloud.bigtable import __version__ @@ -47,14 +49,14 @@ from google.cloud.client import ClientWithProject -from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable_admin_v2.types import instance from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE from google.cloud.environment_vars import BIGTABLE_EMULATOR -INSTANCE_TYPE_PRODUCTION = enums.Instance.Type.PRODUCTION -INSTANCE_TYPE_DEVELOPMENT = enums.Instance.Type.DEVELOPMENT -INSTANCE_TYPE_UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED +INSTANCE_TYPE_PRODUCTION = instance.Instance.Type.PRODUCTION +INSTANCE_TYPE_DEVELOPMENT = instance.Instance.Type.DEVELOPMENT +INSTANCE_TYPE_UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED _CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin" @@ -187,9 +189,7 @@ def __init__( self._channel = channel self.SCOPE = self._get_scopes() super(Client, self).__init__( - project=project, - credentials=credentials, - client_options=client_options, + project=project, credentials=credentials, client_options=client_options, ) def _get_scopes(self): @@ -212,11 +212,11 @@ def _create_gapic_client_channel(self, client_class, grpc_transport): if self._client_options and self._client_options.api_endpoint: api_endpoint = self._client_options.api_endpoint else: - api_endpoint = client_class.SERVICE_ADDRESS + api_endpoint = client_class.DEFAULT_ENDPOINT channel = grpc_transport.create_channel( - api_endpoint, - self._credentials, + host=api_endpoint, + credentials=self._credentials, options={ "grpc.max_send_message_length": -1, "grpc.max_receive_message_length": -1, @@ -224,11 +224,7 @@ def _create_gapic_client_channel(self, client_class, grpc_transport): "grpc.keepalive_timeout_ms": 10000, }.items(), ) - transport = grpc_transport( - address=api_endpoint, - channel=channel, - credentials=None, - ) + transport = grpc_transport(channel=channel, host=api_endpoint) return transport @property @@ -254,7 +250,7 @@ def project_path(self): :rtype: str :returns: Return a fully-qualified project string. """ - return self.instance_admin_client.project_path(self.project) + return self.instance_admin_client.common_project_path(self.project) @property def table_data_client(self): @@ -272,8 +268,7 @@ def table_data_client(self): """ if self._table_data_client is None: transport = self._create_gapic_client_channel( - bigtable_v2.BigtableClient, - bigtable_grpc_transport.BigtableGrpcTransport, + bigtable_v2.BigtableClient, BigtableGrpcTransport, ) klass = _create_gapic_client( bigtable_v2.BigtableClient, @@ -306,7 +301,7 @@ def table_admin_client(self): transport = self._create_gapic_client_channel( bigtable_admin_v2.BigtableTableAdminClient, - bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, + BigtableTableAdminGrpcTransport, ) klass = _create_gapic_client( bigtable_admin_v2.BigtableTableAdminClient, @@ -339,7 +334,7 @@ def instance_admin_client(self): transport = self._create_gapic_client_channel( bigtable_admin_v2.BigtableInstanceAdminClient, - bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, + BigtableInstanceAdminGrpcTransport, ) klass = _create_gapic_client( bigtable_admin_v2.BigtableInstanceAdminClient, @@ -372,10 +367,10 @@ def instance(self, instance_id, display_name=None, instance_type=None, labels=No :param instance_type: (Optional) The type of the instance. Possible values are represented by the following constants: - :data:`google.cloud.bigtable.enums.InstanceType.PRODUCTION`. - :data:`google.cloud.bigtable.enums.InstanceType.DEVELOPMENT`, + :data:`google.cloud.bigtable.instance.InstanceType.PRODUCTION`. + :data:`google.cloud.bigtable.instance.InstanceType.DEVELOPMENT`, Defaults to - :data:`google.cloud.bigtable.enums.InstanceType.UNSPECIFIED`. + :data:`google.cloud.bigtable.instance.InstanceType.UNSPECIFIED`. :type labels: dict :param labels: (Optional) Labels are a flexible and lightweight @@ -416,7 +411,9 @@ def list_instances(self): 'failed_locations' is a list of locations which could not be resolved. """ - resp = self.instance_admin_client.list_instances(self.project_path) + resp = self.instance_admin_client.list_instances( + request={"parent": self.project_path} + ) instances = [Instance.from_pb(instance, self) for instance in resp.instances] return instances, resp.failed_locations @@ -438,7 +435,9 @@ def list_clusters(self): locations which could not be resolved. """ resp = self.instance_admin_client.list_clusters( - self.instance_admin_client.instance_path(self.project, "-") + request={ + "parent": self.instance_admin_client.instance_path(self.project, "-") + } ) clusters = [] instances = {} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 1cf66f86bc55..5c4c355ffdf1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -16,7 +16,7 @@ import re -from google.cloud.bigtable_admin_v2.types import instance_pb2 +from google.cloud.bigtable_admin_v2.types import instance from google.api_core.exceptions import NotFound @@ -101,7 +101,7 @@ def from_pb(cls, cluster_pb, instance): :end-before: [END bigtable_api_cluster_from_pb] :dedent: 4 - :type cluster_pb: :class:`instance_pb2.Cluster` + :type cluster_pb: :class:`instance.Cluster` :param cluster_pb: An instance protobuf object. :type instance: :class:`google.cloud.bigtable.instance.Instance` @@ -211,7 +211,9 @@ def reload(self): :end-before: [END bigtable_api_reload_cluster] :dedent: 4 """ - cluster_pb = self._instance._client.instance_admin_client.get_cluster(self.name) + cluster_pb = self._instance._client.instance_admin_client.get_cluster( + request={"name": self.name} + ) # NOTE: _update_from_pb does not check that the project and # cluster ID on the response match the request. @@ -232,7 +234,7 @@ def exists(self): """ client = self._instance._client try: - client.instance_admin_client.get_cluster(name=self.name) + client.instance_admin_client.get_cluster(request={"name": self.name}) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: @@ -269,7 +271,11 @@ def create(self): cluster_pb = self._to_pb() return client.instance_admin_client.create_cluster( - self._instance.name, self.cluster_id, cluster_pb + request={ + "parent": self._instance.name, + "cluster_id": self.cluster_id, + "cluster": cluster_pb, + } ) def update(self): @@ -302,7 +308,11 @@ def update(self): # Location is set only at the time of creation of a cluster # and can not be changed after cluster has been created. return client.instance_admin_client.update_cluster( - name=self.name, serve_nodes=self.serve_nodes, location=None + request={ + "serve_nodes": self.serve_nodes, + "name": self.name, + "location": None, + } ) def delete(self): @@ -333,15 +343,15 @@ def delete(self): permanently deleted. """ client = self._instance._client - client.instance_admin_client.delete_cluster(self.name) + client.instance_admin_client.delete_cluster(request={"name": self.name}) def _to_pb(self): """ Create cluster proto buff message for API calls """ client = self._instance._client - location = client.instance_admin_client.location_path( + location = client.instance_admin_client.common_location_path( client.project, self.location_id ) - cluster_pb = instance_pb2.Cluster( + cluster_pb = instance.Cluster( location=location, serve_nodes=self.serve_nodes, default_storage_type=self.default_storage_type, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index eb854cb8b085..4660119231b5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -16,9 +16,9 @@ from google.cloud import _helpers -from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, +from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 +from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, ) @@ -275,7 +275,7 @@ def create(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification] + request={"name": self._table.name, "modifications": [modification]} ) def update(self): @@ -302,7 +302,7 @@ def update(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification] + request={"name": self._table.name, "modifications": [modification]} ) def delete(self): @@ -324,7 +324,7 @@ def delete(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - self._table.name, [modification] + request={"name": self._table.name, "modifications": [modification]} ) @@ -341,15 +341,14 @@ def _gc_rule_from_pb(gc_rule_pb): :raises: :class:`ValueError ` if the rule name is unexpected. """ - rule_name = gc_rule_pb.WhichOneof("rule") + rule_name = gc_rule_pb._pb.WhichOneof("rule") if rule_name is None: return None if rule_name == "max_num_versions": return MaxVersionsGCRule(gc_rule_pb.max_num_versions) elif rule_name == "max_age": - max_age = _helpers._duration_pb_to_timedelta(gc_rule_pb.max_age) - return MaxAgeGCRule(max_age) + return MaxAgeGCRule(gc_rule_pb.max_age) elif rule_name == "union": return GCRuleUnion([_gc_rule_from_pb(rule) for rule in gc_rule_pb.union.rules]) elif rule_name == "intersection": diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index f0965779fc8b..50c7f2e6061f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -13,7 +13,9 @@ # limitations under the License. """Wrappers for gapic enum types.""" -from google.cloud.bigtable_admin_v2 import enums +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import table class StorageType(object): @@ -26,9 +28,9 @@ class StorageType(object): HDD (int): Magnetic drive (HDD) storage should be used. """ - UNSPECIFIED = enums.StorageType.STORAGE_TYPE_UNSPECIFIED - SSD = enums.StorageType.SSD - HDD = enums.StorageType.HDD + UNSPECIFIED = common.StorageType.STORAGE_TYPE_UNSPECIFIED + SSD = common.StorageType.SSD + HDD = common.StorageType.HDD class Instance(object): @@ -45,9 +47,9 @@ class State(object): destroyed if the creation process encounters an error. """ - NOT_KNOWN = enums.Instance.State.STATE_NOT_KNOWN - READY = enums.Instance.State.READY - CREATING = enums.Instance.State.CREATING + NOT_KNOWN = instance.Instance.State.STATE_NOT_KNOWN + READY = instance.Instance.State.READY + CREATING = instance.Instance.State.CREATING class Type(object): """ @@ -70,9 +72,9 @@ class Type(object): must not be set. """ - UNSPECIFIED = enums.Instance.Type.TYPE_UNSPECIFIED - PRODUCTION = enums.Instance.Type.PRODUCTION - DEVELOPMENT = enums.Instance.Type.DEVELOPMENT + UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED + PRODUCTION = instance.Instance.Type.PRODUCTION + DEVELOPMENT = instance.Instance.Type.DEVELOPMENT class Cluster(object): @@ -96,11 +98,11 @@ class State(object): still exist, but no operations can be performed on the cluster. """ - NOT_KNOWN = enums.Cluster.State.STATE_NOT_KNOWN - READY = enums.Cluster.State.READY - CREATING = enums.Cluster.State.CREATING - RESIZING = enums.Cluster.State.RESIZING - DISABLED = enums.Cluster.State.DISABLED + NOT_KNOWN = instance.Cluster.State.STATE_NOT_KNOWN + READY = instance.Cluster.State.READY + CREATING = instance.Cluster.State.CREATING + RESIZING = instance.Cluster.State.RESIZING + DISABLED = instance.Cluster.State.DISABLED class RoutingPolicyType(object): @@ -150,11 +152,11 @@ class View(object): FULL (int): Populates all fields. """ - VIEW_UNSPECIFIED = enums.Table.View.VIEW_UNSPECIFIED - NAME_ONLY = enums.Table.View.NAME_ONLY - SCHEMA_VIEW = enums.Table.View.SCHEMA_VIEW - REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW - FULL = enums.Table.View.FULL + VIEW_UNSPECIFIED = table.Table.View.VIEW_UNSPECIFIED + NAME_ONLY = table.Table.View.NAME_ONLY + SCHEMA_VIEW = table.Table.View.SCHEMA_VIEW + REPLICATION_VIEW = table.Table.View.REPLICATION_VIEW + FULL = table.Table.View.FULL class ReplicationState(object): """ @@ -180,12 +182,12 @@ class ReplicationState(object): reflect the state of the table in other clusters. """ - STATE_NOT_KNOWN = enums.Table.ClusterState.ReplicationState.STATE_NOT_KNOWN - INITIALIZING = enums.Table.ClusterState.ReplicationState.INITIALIZING + STATE_NOT_KNOWN = table.Table.ClusterState.ReplicationState.STATE_NOT_KNOWN + INITIALIZING = table.Table.ClusterState.ReplicationState.INITIALIZING PLANNED_MAINTENANCE = ( - enums.Table.ClusterState.ReplicationState.PLANNED_MAINTENANCE + table.Table.ClusterState.ReplicationState.PLANNED_MAINTENANCE ) UNPLANNED_MAINTENANCE = ( - enums.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE + table.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE ) - READY = enums.Table.ClusterState.ReplicationState.READY + READY = table.Table.ClusterState.ReplicationState.READY diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index a126ee27a67a..d2fb5db072e9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -22,7 +22,9 @@ from google.protobuf import field_mask_pb2 -from google.cloud.bigtable_admin_v2.types import instance_pb2, options_pb2 +from google.cloud.bigtable_admin_v2.types import instance + +from google.iam.v1 import options_pb2 from google.api_core.exceptions import NotFound @@ -121,7 +123,7 @@ def _update_from_pb(self, instance_pb): if not instance_pb.display_name: # Simple field (string) raise ValueError("Instance protobuf does not contain display_name") self.display_name = instance_pb.display_name - self.type_ = instance_pb.type + self.type_ = instance_pb.type_ self.labels = dict(instance_pb.labels) self._state = instance_pb.state @@ -136,7 +138,7 @@ def from_pb(cls, instance_pb, client): :end-before: [END bigtable_api_instance_from_pb] :dedent: 4 - :type instance_pb: :class:`instance_pb2.Instance` + :type instance_pb: :class:`instance.Instance` :param instance_pb: An instance protobuf object. :type client: :class:`Client ` @@ -314,17 +316,19 @@ def create( simultaneously." ) - instance_pb = instance_pb2.Instance( - display_name=self.display_name, type=self.type_, labels=self.labels + instance_pb = instance.Instance( + display_name=self.display_name, type_=self.type_, labels=self.labels ) parent = self._client.project_path return self._client.instance_admin_client.create_instance( - parent=parent, - instance_id=self.instance_id, - instance=instance_pb, - clusters={c.cluster_id: c._to_pb() for c in clusters}, + request={ + "parent": parent, + "instance_id": self.instance_id, + "instance": instance_pb, + "clusters": {c.cluster_id: c._to_pb() for c in clusters}, + } ) def exists(self): @@ -341,7 +345,7 @@ def exists(self): :returns: True if the table exists, else False. """ try: - self._client.instance_admin_client.get_instance(name=self.name) + self._client.instance_admin_client.get_instance(request={"name": self.name}) return True # NOTE: There could be other exceptions that are returned to the user. except NotFound: @@ -357,7 +361,9 @@ def reload(self): :end-before: [END bigtable_api_reload_instance] :dedent: 4 """ - instance_pb = self._client.instance_admin_client.get_instance(self.name) + instance_pb = self._client.instance_admin_client.get_instance( + request={"name": self.name} + ) # NOTE: _update_from_pb does not check that the project and # instance ID on the response match the request. @@ -399,15 +405,15 @@ def update(self): update_mask_pb.paths.append("type") if self.labels is not None: update_mask_pb.paths.append("labels") - instance_pb = instance_pb2.Instance( + instance_pb = instance.Instance( name=self.name, display_name=self.display_name, - type=self.type_, + type_=self.type_, labels=self.labels, ) return self._client.instance_admin_client.partial_update_instance( - instance=instance_pb, update_mask=update_mask_pb + request={"instance": instance_pb, "update_mask": update_mask_pb} ) def delete(self): @@ -439,7 +445,7 @@ def delete(self): irrevocably disappear from the API, and their data will be permanently deleted. """ - self._client.instance_admin_client.delete_instance(name=self.name) + self._client.instance_admin_client.delete_instance(request={"name": self.name}) def get_iam_policy(self, requested_policy_version=None): """Gets the access control policy for an instance resource. @@ -474,7 +480,7 @@ def get_iam_policy(self, requested_policy_version=None): instance_admin_client = self._client.instance_admin_client - resp = instance_admin_client.get_iam_policy(**args) + resp = instance_admin_client.get_iam_policy(request=args) return Policy.from_pb(resp) def set_iam_policy(self, policy): @@ -500,7 +506,7 @@ class `google.cloud.bigtable.policy.Policy` """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.set_iam_policy( - resource=self.name, policy=policy.to_pb() + request={"resource": self.name, "policy": policy.to_pb()} ) return Policy.from_pb(resp) @@ -529,7 +535,7 @@ def test_iam_permissions(self, permissions): """ instance_admin_client = self._client.instance_admin_client resp = instance_admin_client.test_iam_permissions( - resource=self.name, permissions=permissions + request={"resource": self.name, "permissions": permissions} ) return list(resp.permissions) @@ -596,7 +602,9 @@ def list_clusters(self): 'failed_locations' is a list of locations which could not be resolved. """ - resp = self._client.instance_admin_client.list_clusters(self.name) + resp = self._client.instance_admin_client.list_clusters( + request={"parent": self.name} + ) clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] return clusters, resp.failed_locations @@ -641,10 +649,12 @@ def list_tables(self): :raises: :class:`ValueError ` if one of the returned tables has a name that is not of the expected format. """ - table_list_pb = self._client.table_admin_client.list_tables(self.name) + table_list_pb = self._client.table_admin_client.list_tables( + request={"parent": self.name} + ) result = [] - for table_pb in table_list_pb: + for table_pb in table_list_pb.tables: table_prefix = self.name + "/tables/" if not table_pb.name.startswith(table_prefix): raise ValueError( @@ -725,5 +735,7 @@ def list_app_profiles(self): :class:`~google.cloud.bigtable.app_profile.AppProfile` instances. """ - resp = self._client.instance_admin_client.list_app_profiles(self.name) + resp = self._client.instance_admin_client.list_app_profiles( + request={"parent": self.name} + ) return [AppProfile.from_pb(app_profile, self) for app_profile in resp] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index f3e4231e1fc4..1898ea772c2d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -22,7 +22,7 @@ from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 _PACK_I64 = struct.Struct(">q").pack @@ -307,7 +307,7 @@ def get_mutations_size(self): mutation_size = 0 for mutation in self._get_mutations(): - mutation_size += mutation.ByteSize() + mutation_size += mutation._pb.ByteSize() return mutation_size diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 1cc442f2cb89..0d22e2fc66cb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -24,8 +24,8 @@ from google.api_core import retry from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 _MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." _MISSING_COLUMN = ( @@ -537,11 +537,11 @@ def _process_chunk(self, chunk): def _update_cell(self, chunk): if self._cell is None: qualifier = None - if chunk.HasField("qualifier"): - qualifier = chunk.qualifier.value + if "qualifier" in chunk: + qualifier = chunk.qualifier family = None - if chunk.HasField("family_name"): - family = chunk.family_name.value + if "family_name" in chunk: + family = chunk.family_name self._cell = PartialCellData( chunk.row_key, @@ -571,8 +571,8 @@ def _validate_chunk_reset_row(self, chunk): # No reset with other keys _raise_if(chunk.row_key) - _raise_if(chunk.HasField("family_name")) - _raise_if(chunk.HasField("qualifier")) + _raise_if("family_name" in chunk) + _raise_if("qualifier" in chunk) _raise_if(chunk.timestamp_micros) _raise_if(chunk.labels) _raise_if(chunk.value_size) @@ -638,7 +638,7 @@ def build_updated_request(self): # if neither RowSet.row_keys nor RowSet.row_ranges currently exist, # add row_range that starts with last_scanned_key as start_key_open # to request only rows that have not been returned yet - if not self.message.HasField("rows"): + if "rows" not in self.message: row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key) r_kwargs["rows"] = data_v2_pb2.RowSet(row_ranges=[row_range]) else: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index 973ba9565437..b495fb6463c9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -19,7 +19,7 @@ from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 _PACK_I64 = struct.Struct(">q").pack diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 7697af4f776a..0269d8761c2f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -141,7 +141,7 @@ def _update_message_request(self, message): for each in self.row_ranges: r_kwrags = each.get_range_kwargs() - message.rows.row_ranges.add(**r_kwrags) + message.rows.row_ranges.append(r_kwrags) class RowRange(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 887b74b0251f..740a65ae64d8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -13,7 +13,6 @@ # limitations under the License. """User-friendly container for Google Cloud Bigtable Table.""" - from google.api_core import timeout from google.api_core.exceptions import Aborted from google.api_core.exceptions import DeadlineExceeded @@ -38,13 +37,11 @@ from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums -from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 -from google.cloud.bigtable_admin_v2.gapic.bigtable_table_admin_client import ( - BigtableTableAdminClient, -) -from google.cloud.bigtable_admin_v2.proto import table_pb2 as admin_messages_v2_pb2 -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, +from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 +from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2 +from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_messages_v2_pb2, ) import warnings @@ -157,7 +154,7 @@ def get_iam_policy(self): :returns: The current IAM policy of this table. """ table_client = self._instance._client.table_admin_client - resp = table_client.get_iam_policy(resource=self.name) + resp = table_client.get_iam_policy(request={"resource": self.name}) return Policy.from_pb(resp) def set_iam_policy(self, policy): @@ -182,7 +179,9 @@ class `google.cloud.bigtable.policy.Policy` :returns: The current IAM policy of this table. """ table_client = self._instance._client.table_admin_client - resp = table_client.set_iam_policy(resource=self.name, policy=policy.to_pb()) + resp = table_client.set_iam_policy( + request={"resource": self.name, "policy": policy.to_pb()} + ) return Policy.from_pb(resp) def test_iam_permissions(self, permissions): @@ -210,7 +209,7 @@ def test_iam_permissions(self, permissions): """ table_client = self._instance._client.table_admin_client resp = table_client.test_iam_permissions( - resource=self.name, permissions=permissions + request={"resource": self.name, "permissions": permissions} ) return list(resp.permissions) @@ -363,7 +362,7 @@ def create(self, initial_split_keys=[], column_families={}): .. note:: A create request returns a - :class:`._generated.table_pb2.Table` but we don't use + :class:`._generated.table.Table` but we don't use this response. :type initial_split_keys: list @@ -389,10 +388,12 @@ def create(self, initial_split_keys=[], column_families={}): splits = [split(key=_to_bytes(key)) for key in initial_split_keys] table_client.create_table( - parent=instance_name, - table_id=self.table_id, - table=table, - initial_splits=splits, + request={ + "parent": instance_name, + "table_id": self.table_id, + "table": table, + "initial_splits": splits, + } ) def exists(self): @@ -410,7 +411,7 @@ def exists(self): """ table_client = self._instance._client.table_admin_client try: - table_client.get_table(name=self.name, view=VIEW_NAME_ONLY) + table_client.get_table(request={"name": self.name, "view": VIEW_NAME_ONLY}) return True except NotFound: return False @@ -426,7 +427,7 @@ def delete(self): :dedent: 4 """ table_client = self._instance._client.table_admin_client - table_client.delete_table(name=self.name) + table_client.delete_table(request={"name": self.name}) def list_column_families(self): """List the column families owned by this table. @@ -447,7 +448,7 @@ def list_column_families(self): name from the column family ID. """ table_client = self._instance._client.table_admin_client - table_pb = table_client.get_table(self.name) + table_pb = table_client.get_table(request={"name": self.name}) result = {} for column_family_id, value_pb in table_pb.column_families.items(): @@ -474,7 +475,9 @@ def get_cluster_states(self): REPLICATION_VIEW = enums.Table.View.REPLICATION_VIEW table_client = self._instance._client.table_admin_client - table_pb = table_client.get_table(self.name, view=REPLICATION_VIEW) + table_pb = table_client.get_table( + request={"name": self.name, "view": REPLICATION_VIEW} + ) return { cluster_id: ClusterState(value_pb.replication_state) @@ -582,7 +585,7 @@ def read_rows( row_set=row_set, ) data_client = self._instance._client.table_data_client - return PartialRowsData(data_client.transport.read_rows, request_pb, retry) + return PartialRowsData(data_client.read_rows, request_pb, retry) def yield_rows(self, **kwargs): """Read rows from this table. @@ -716,7 +719,7 @@ def sample_row_keys(self): """ data_client = self._instance._client.table_data_client response_iterator = data_client.sample_row_keys( - self.name, app_profile_id=self._app_profile_id + request={"table_name": self.name, "app_profile_id": self._app_profile_id} ) return response_iterator @@ -745,11 +748,12 @@ def truncate(self, timeout=None): table_admin_client = client.table_admin_client if timeout: table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True, timeout=timeout + request={"name": self.name, "delete_all_data_from_table": True}, + timeout=timeout, ) else: table_admin_client.drop_row_range( - self.name, delete_all_data_from_table=True + request={"name": self.name, "delete_all_data_from_table": True} ) def drop_by_prefix(self, row_key_prefix, timeout=None): @@ -780,11 +784,15 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): table_admin_client = client.table_admin_client if timeout: table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix), timeout=timeout + request={ + "name": self.name, + "row_key_prefix": _to_bytes(row_key_prefix), + }, + timeout=timeout, ) else: table_admin_client.drop_row_range( - self.name, row_key_prefix=_to_bytes(row_key_prefix) + request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): @@ -926,14 +934,16 @@ def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0 ) client = self._instance._client.table_admin_client backup_list_pb = client.list_backups( - parent=parent, - filter_=backups_filter, - order_by=order_by, - page_size=page_size, + request={ + "parent": parent, + "filter": backups_filter, + "order_by": order_by, + "page_size": page_size, + } ) result = [] - for backup_pb in backup_list_pb: + for backup_pb in backup_list_pb.backups: result.append(Backup.from_pb(backup_pb, self._instance)) return result @@ -982,7 +992,13 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non cluster=cluster_id, backup=backup_id, ) - return api.restore_table(self._instance.name, new_table_id, backup_name) + return api.restore_table( + request={ + "parent": self._instance.name, + "table_id": new_table_id, + "backup": backup_name, + } + ) class _RetryableMutateRowsWorker(object): @@ -1073,8 +1089,8 @@ def _do_mutate_retryable_rows(self): try: responses = data_client.mutate_rows( - self.table_name, - entries, + table_name=self.table_name, + entries=entries, app_profile_id=self.app_profile_id, retry=None, **kwargs diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 9f72d4f53222..423742502ede 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,54 +1,153 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_admin_v2 import types -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client -from google.cloud.bigtable_admin_v2.gapic import enums - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableInstanceAdminClient( - bigtable_instance_admin_client.BigtableInstanceAdminClient -): - __doc__ = bigtable_instance_admin_client.BigtableInstanceAdminClient.__doc__ - enums = enums - - -class BigtableTableAdminClient(bigtable_table_admin_client.BigtableTableAdminClient): - __doc__ = bigtable_table_admin_client.BigtableTableAdminClient.__doc__ - enums = enums +from .services.bigtable_instance_admin import BigtableInstanceAdminClient +from .services.bigtable_table_admin import BigtableTableAdminClient +from .types.bigtable_instance_admin import CreateAppProfileRequest +from .types.bigtable_instance_admin import CreateClusterMetadata +from .types.bigtable_instance_admin import CreateClusterRequest +from .types.bigtable_instance_admin import CreateInstanceMetadata +from .types.bigtable_instance_admin import CreateInstanceRequest +from .types.bigtable_instance_admin import DeleteAppProfileRequest +from .types.bigtable_instance_admin import DeleteClusterRequest +from .types.bigtable_instance_admin import DeleteInstanceRequest +from .types.bigtable_instance_admin import GetAppProfileRequest +from .types.bigtable_instance_admin import GetClusterRequest +from .types.bigtable_instance_admin import GetInstanceRequest +from .types.bigtable_instance_admin import ListAppProfilesRequest +from .types.bigtable_instance_admin import ListAppProfilesResponse +from .types.bigtable_instance_admin import ListClustersRequest +from .types.bigtable_instance_admin import ListClustersResponse +from .types.bigtable_instance_admin import ListInstancesRequest +from .types.bigtable_instance_admin import ListInstancesResponse +from .types.bigtable_instance_admin import PartialUpdateInstanceRequest +from .types.bigtable_instance_admin import UpdateAppProfileMetadata +from .types.bigtable_instance_admin import UpdateAppProfileRequest +from .types.bigtable_instance_admin import UpdateClusterMetadata +from .types.bigtable_instance_admin import UpdateInstanceMetadata +from .types.bigtable_table_admin import CheckConsistencyRequest +from .types.bigtable_table_admin import CheckConsistencyResponse +from .types.bigtable_table_admin import CreateBackupMetadata +from .types.bigtable_table_admin import CreateBackupRequest +from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata +from .types.bigtable_table_admin import CreateTableFromSnapshotRequest +from .types.bigtable_table_admin import CreateTableRequest +from .types.bigtable_table_admin import DeleteBackupRequest +from .types.bigtable_table_admin import DeleteSnapshotRequest +from .types.bigtable_table_admin import DeleteTableRequest +from .types.bigtable_table_admin import DropRowRangeRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenResponse +from .types.bigtable_table_admin import GetBackupRequest +from .types.bigtable_table_admin import GetSnapshotRequest +from .types.bigtable_table_admin import GetTableRequest +from .types.bigtable_table_admin import ListBackupsRequest +from .types.bigtable_table_admin import ListBackupsResponse +from .types.bigtable_table_admin import ListSnapshotsRequest +from .types.bigtable_table_admin import ListSnapshotsResponse +from .types.bigtable_table_admin import ListTablesRequest +from .types.bigtable_table_admin import ListTablesResponse +from .types.bigtable_table_admin import ModifyColumnFamiliesRequest +from .types.bigtable_table_admin import OptimizeRestoredTableMetadata +from .types.bigtable_table_admin import RestoreTableMetadata +from .types.bigtable_table_admin import RestoreTableRequest +from .types.bigtable_table_admin import SnapshotTableMetadata +from .types.bigtable_table_admin import SnapshotTableRequest +from .types.bigtable_table_admin import UpdateBackupRequest +from .types.common import OperationProgress +from .types.common import StorageType +from .types.instance import AppProfile +from .types.instance import Cluster +from .types.instance import Instance +from .types.table import Backup +from .types.table import BackupInfo +from .types.table import ColumnFamily +from .types.table import GcRule +from .types.table import RestoreInfo +from .types.table import RestoreSourceType +from .types.table import Snapshot +from .types.table import Table __all__ = ( - "enums", - "types", + "AppProfile", + "Backup", + "BackupInfo", "BigtableInstanceAdminClient", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "Cluster", + "ColumnFamily", + "CreateAppProfileRequest", + "CreateBackupMetadata", + "CreateBackupRequest", + "CreateClusterMetadata", + "CreateClusterRequest", + "CreateInstanceMetadata", + "CreateInstanceRequest", + "CreateTableFromSnapshotMetadata", + "CreateTableFromSnapshotRequest", + "CreateTableRequest", + "DeleteAppProfileRequest", + "DeleteBackupRequest", + "DeleteClusterRequest", + "DeleteInstanceRequest", + "DeleteSnapshotRequest", + "DeleteTableRequest", + "DropRowRangeRequest", + "GcRule", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "GetAppProfileRequest", + "GetBackupRequest", + "GetClusterRequest", + "GetInstanceRequest", + "GetSnapshotRequest", + "GetTableRequest", + "Instance", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "ListBackupsRequest", + "ListBackupsResponse", + "ListClustersRequest", + "ListClustersResponse", + "ListInstancesRequest", + "ListInstancesResponse", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "ListTablesRequest", + "ListTablesResponse", + "ModifyColumnFamiliesRequest", + "OperationProgress", + "OptimizeRestoredTableMetadata", + "PartialUpdateInstanceRequest", + "RestoreInfo", + "RestoreSourceType", + "RestoreTableMetadata", + "RestoreTableRequest", + "Snapshot", + "SnapshotTableMetadata", + "SnapshotTableRequest", + "StorageType", + "Table", + "UpdateAppProfileMetadata", + "UpdateAppProfileRequest", + "UpdateBackupRequest", + "UpdateClusterMetadata", + "UpdateInstanceMetadata", "BigtableTableAdminClient", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py deleted file mode 100644 index 4e8a0d0badd3..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py +++ /dev/null @@ -1,1919 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableInstanceAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_instance_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None - - -class BigtableInstanceAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableInstanceAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def app_profile_path(cls, project, instance, app_profile): - """Return a fully-qualified app_profile string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/appProfiles/{app_profile}", - project=project, - instance=instance, - app_profile=app_profile, - ) - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def project_path(cls, project): - """Return a fully-qualified project string.""" - return google.api_core.path_template.expand( - "projects/{project}", - project=project, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableInstanceAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableInstanceAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_instance_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_instance_admin_grpc_transport.BigtableInstanceAdminGrpcTransport( - address=api_endpoint, - channel=channel, - credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_instance( - self, - parent, - instance_id, - instance, - clusters, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create an instance within a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> # TODO: Initialize `instance_id`: - >>> instance_id = '' - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `clusters`: - >>> clusters = {} - >>> - >>> response = client.create_instance(parent, instance_id, instance, clusters) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the project in which to create the new - instance. Values are of the form ``projects/{project}``. - instance_id (str): Required. The ID to be used when referring to the new instance - within its project, e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The instance to create. Fields marked ``OutputOnly`` must - be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - clusters (dict[str -> Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]]): Required. The clusters to be created within the instance, mapped by - desired cluster ID, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields - marked ``OutputOnly`` must be left blank. Currently, at most four - clusters can be specified. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "create_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_instance, - default_retry=self._method_configs["CreateInstance"].retry, - default_timeout=self._method_configs["CreateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, - instance_id=instance_id, - instance=instance, - clusters=clusters, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.CreateInstanceMetadata, - ) - - def get_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.get_instance(name) - - Args: - name (str): Required. The unique name of the requested instance. Values are of - the form ``projects/{project}/instances/{instance}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "get_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_instance, - default_retry=self._method_configs["GetInstance"].retry, - default_timeout=self._method_configs["GetInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetInstanceRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_instances( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about instances in a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.project_path('[PROJECT]') - >>> - >>> response = client.list_instances(parent) - - Args: - parent (str): Required. The unique name of the project for which a list of - instances is requested. Values are of the form ``projects/{project}``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListInstancesResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_instances" not in self._inner_api_calls: - self._inner_api_calls[ - "list_instances" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_instances, - default_retry=self._method_configs["ListInstances"].retry, - default_timeout=self._method_configs["ListInstances"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent, - page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_instances"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_instance( - self, - display_name, - name=None, - state=None, - type_=None, - labels=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `display_name`: - >>> display_name = '' - >>> - >>> response = client.update_instance(display_name) - - Args: - display_name (str): Required. The descriptive name for this instance as it appears in UIs. - Can be changed at any time, but should be kept globally unique - to avoid confusion. - name (str): The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - state (~google.cloud.bigtable_admin_v2.types.State): (``OutputOnly``) The current state of the instance. - type_ (~google.cloud.bigtable_admin_v2.types.Type): The type of the instance. Defaults to ``PRODUCTION``. - labels (dict[str -> str]): Labels are a flexible and lightweight mechanism for organizing cloud - resources into groups that reflect a customer's organizational needs and - deployment strategies. They can be used to filter resources and - aggregate metrics. - - - Label keys must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and must - conform to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given resource. - - Keys and values must both be under 128 bytes. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Instance` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_instance, - default_retry=self._method_configs["UpdateInstance"].retry, - default_timeout=self._method_configs["UpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Instance( - display_name=display_name, - name=name, - state=state, - type=type_, - labels=labels, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def partial_update_instance( - self, - instance, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `instance`: - >>> instance = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.partial_update_instance(instance, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - instance (Union[dict, ~google.cloud.bigtable_admin_v2.types.Instance]): Required. The Instance which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Instance` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of Instance fields which should be replaced. - Must be explicitly set. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "partial_update_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "partial_update_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.partial_update_instance, - default_retry=self._method_configs["PartialUpdateInstance"].retry, - default_timeout=self._method_configs["PartialUpdateInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, - update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("instance.name", instance.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["partial_update_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Instance, - metadata_type=bigtable_instance_admin_pb2.UpdateInstanceMetadata, - ) - - def delete_instance( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Delete an instance from a project. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> client.delete_instance(name) - - Args: - name (str): Required. The unique name of the instance to be deleted. Values are - of the form ``projects/{project}/instances/{instance}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_instance" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_instance" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_instance, - default_retry=self._method_configs["DeleteInstance"].retry, - default_timeout=self._method_configs["DeleteInstance"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteInstanceRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_instance"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_cluster( - self, - parent, - cluster_id, - cluster, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `cluster_id`: - >>> cluster_id = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(parent, cluster_id, cluster) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the instance in which to create the new - cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id (str): Required. The ID to be used when referring to the new cluster within - its instance, e.g., just ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (Union[dict, ~google.cloud.bigtable_admin_v2.types.Cluster]): Required. The cluster to be created. Fields marked ``OutputOnly`` - must be left blank. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Cluster` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, - cluster_id=cluster_id, - cluster=cluster, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.CreateClusterMetadata, - ) - - def get_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about a cluster. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> response = client.get_cluster(name) - - Args: - name (str): Required. The unique name of the requested cluster. Values are of - the form ``projects/{project}/instances/{instance}/clusters/{cluster}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetClusterRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_clusters( - self, - parent, - page_token=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about clusters in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> response = client.list_clusters(parent) - - Args: - parent (str): Required. The unique name of the instance for which a list of - clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to - list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token (str): DEPRECATED: This field is unused and ignored. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.ListClustersResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent, - page_token=page_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["list_clusters"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_cluster( - self, - serve_nodes, - name=None, - location=None, - state=None, - default_storage_type=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a cluster within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `serve_nodes`: - >>> serve_nodes = 0 - >>> - >>> response = client.update_cluster(serve_nodes) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - serve_nodes (int): Required. The number of nodes allocated to this cluster. More nodes enable - higher throughput and more consistent performance. - name (str): The unique name of the cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location (str): (``CreationOnly``) The location where this cluster's nodes and - storage reside. For best performance, clients should be located as close - as possible to this cluster. Currently only zones are supported, so - values should be of the form ``projects/{project}/locations/{zone}``. - state (~google.cloud.bigtable_admin_v2.types.State): The current state of the cluster. - default_storage_type (~google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve - its parent instance's tables, unless explicitly overridden. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = instance_pb2.Cluster( - serve_nodes=serve_nodes, - name=name, - location=location, - state=state, - default_storage_type=default_storage_type, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.Cluster, - metadata_type=bigtable_instance_admin_pb2.UpdateClusterMetadata, - ) - - def delete_cluster( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a cluster from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> client.delete_cluster(name) - - Args: - name (str): Required. The unique name of the cluster to be deleted. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteClusterRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_app_profile( - self, - parent, - app_profile_id, - app_profile, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `app_profile_id`: - >>> app_profile_id = '' - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> response = client.create_app_profile(parent, app_profile_id, app_profile) - - Args: - parent (str): Required. The unique name of the instance in which to create the new - app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id (str): Required. The ID to be used when referring to the new app profile - within its instance, e.g., just ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - ignore_warnings (bool): If true, ignore safety checks when creating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "create_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_app_profile, - default_retry=self._method_configs["CreateAppProfile"].retry, - default_timeout=self._method_configs["CreateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, - app_profile_id=app_profile_id, - app_profile=app_profile, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_app_profile( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets information about an app profile. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> response = client.get_app_profile(name) - - Args: - name (str): Required. The unique name of the requested app profile. Values are - of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "get_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_app_profile, - default_retry=self._method_configs["GetAppProfile"].retry, - default_timeout=self._method_configs["GetAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.GetAppProfileRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_app_profiles( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists information about app profiles in an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_app_profiles(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_app_profiles(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the instance for which a list of app - profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} = '-'`` to - list AppProfiles for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_app_profiles" not in self._inner_api_calls: - self._inner_api_calls[ - "list_app_profiles" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_app_profiles, - default_retry=self._method_configs["ListAppProfiles"].retry, - default_timeout=self._method_configs["ListAppProfiles"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_app_profiles"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="app_profiles", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_app_profile( - self, - app_profile, - update_mask, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates an app profile within an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `app_profile`: - >>> app_profile = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_app_profile(app_profile, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - app_profile (Union[dict, ~google.cloud.bigtable_admin_v2.types.AppProfile]): Required. The app profile which will (partially) replace the current value. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.AppProfile` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. The subset of app profile fields which should be replaced. - If unset, all fields will be replaced. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - ignore_warnings (bool): If true, ignore safety checks when updating the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "update_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_app_profile, - default_retry=self._method_configs["UpdateAppProfile"].retry, - default_timeout=self._method_configs["UpdateAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, - update_mask=update_mask, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("app_profile.name", app_profile.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["update_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - instance_pb2.AppProfile, - metadata_type=bigtable_instance_admin_pb2.UpdateAppProfileMetadata, - ) - - def delete_app_profile( - self, - name, - ignore_warnings=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an app profile from an instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> name = client.app_profile_path('[PROJECT]', '[INSTANCE]', '[APP_PROFILE]') - >>> - >>> client.delete_app_profile(name) - - Args: - name (str): Required. The unique name of the app profile to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - ignore_warnings (bool): If true, ignore safety checks when deleting the app profile. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_app_profile" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_app_profile" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_app_profile, - default_retry=self._method_configs["DeleteAppProfile"].retry, - default_timeout=self._method_configs["DeleteAppProfile"].timeout, - client_info=self._client_info, - ) - - request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name, - ignore_warnings=ignore_warnings, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_app_profile"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified instance resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableInstanceAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py deleted file mode 100644 index b2ec35e0146e..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py +++ /dev/null @@ -1,136 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableInstanceAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - }, - "methods": { - "CreateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "GetInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListInstances": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "PartialUpdateInstance": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteInstance": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListClusters": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateCluster": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteCluster": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListAppProfiles": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteAppProfile": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - }, - } - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py deleted file mode 100644 index d507a3c0b73f..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py +++ /dev/null @@ -1,2336 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.admin.v2 BigtableTableAdmin API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import google.api_core.protobuf_helpers -import grpc - -from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client_config -from google.cloud.bigtable_admin_v2.gapic import enums -from google.cloud.bigtable_admin_v2.gapic.transports import ( - bigtable_table_admin_grpc_transport, -) -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None - - -class BigtableTableAdminClient(object): - """ - Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - SERVICE_ADDRESS = "bigtableadmin.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.admin.v2.BigtableTableAdmin" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def backup_path(cls, project, instance, cluster, backup): - """Return a fully-qualified backup string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", - project=project, - instance=instance, - cluster=cluster, - backup=backup, - ) - - @classmethod - def cluster_path(cls, project, instance, cluster): - """Return a fully-qualified cluster string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}", - project=project, - instance=instance, - cluster=cluster, - ) - - @classmethod - def instance_path(cls, project, instance): - """Return a fully-qualified instance string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}", - project=project, - instance=instance, - ) - - @classmethod - def snapshot_path(cls, project, instance, cluster, snapshot): - """Return a fully-qualified snapshot string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", - project=project, - instance=instance, - cluster=cluster, - snapshot=snapshot, - ) - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableTableAdminGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableTableAdminGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_table_admin_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = ( - bigtable_table_admin_grpc_transport.BigtableTableAdminGrpcTransport( - address=api_endpoint, - channel=channel, - credentials=credentials, - ) - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_table( - self, - parent, - table_id, - table, - initial_splits=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> - >>> # TODO: Initialize `table`: - >>> table = {} - >>> - >>> response = client.create_table(parent, table_id, table) - - Args: - parent (str): Required. The unique name of the instance in which to create the - table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): Required. The name by which the new table should be referred to - within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table (Union[dict, ~google.cloud.bigtable_admin_v2.types.Table]): Required. The Table to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Table` - initial_splits (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Split]]): The optional list of row keys that will be used to initially split - the table into several tablets (tablets are similar to HBase regions). - Given two split keys, ``s1`` and ``s2``, three tablets will be created, - spanning the key ranges: ``[, s1), [s1, s2), [s2, )``. - - Example: - - - Row keys := ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Split` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table, - default_retry=self._method_configs["CreateTable"].retry, - default_timeout=self._method_configs["CreateTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, - table_id=table_id, - table=table, - initial_splits=initial_splits, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_table_from_snapshot( - self, - parent, - table_id, - source_snapshot, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # TODO: Initialize `table_id`: - >>> table_id = '' - >>> source_snapshot = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The unique name of the instance in which to create the - table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): Required. The name by which the new table should be referred to - within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot (str): Required. The unique name of the snapshot from which to restore the - table. The snapshot and the table must be in the same instance. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_table_from_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "create_table_from_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_table_from_snapshot, - default_retry=self._method_configs["CreateTableFromSnapshot"].retry, - default_timeout=self._method_configs["CreateTableFromSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, - table_id=table_id, - source_snapshot=source_snapshot, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_table_from_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.CreateTableFromSnapshotMetadata, - ) - - def list_tables( - self, - parent, - view=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all tables served from a specified instance. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') - >>> - >>> # Iterate over all results - >>> for element in client.list_tables(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_tables(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the instance for which tables should be - listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned tables' fields. Only - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Table` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_tables" not in self._inner_api_calls: - self._inner_api_calls[ - "list_tables" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_tables, - default_retry=self._method_configs["ListTables"].retry, - default_timeout=self._method_configs["ListTables"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListTablesRequest( - parent=parent, - view=view, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_tables"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="tables", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def get_table( - self, - name, - view=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified table. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.get_table(name) - - Args: - name (str): Required. The unique name of the requested table. Values are of the - form ``projects/{project}/instances/{instance}/tables/{table}``. - view (~google.cloud.bigtable_admin_v2.types.View): The view to be applied to the returned table's fields. Defaults to - ``SCHEMA_VIEW`` if unspecified. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_table" not in self._inner_api_calls: - self._inner_api_calls[ - "get_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table, - default_retry=self._method_configs["GetTable"].retry, - default_timeout=self._method_configs["GetTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetTableRequest( - name=name, - view=view, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_table( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes a specified table and all of its data. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.delete_table(name) - - Args: - name (str): Required. The unique name of the table to be deleted. Values are of - the form ``projects/{project}/instances/{instance}/tables/{table}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_table" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_table, - default_retry=self._method_configs["DeleteTable"].retry, - default_timeout=self._method_configs["DeleteTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteTableRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def modify_column_families( - self, - name, - modifications, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `modifications`: - >>> modifications = [] - >>> - >>> response = client.modify_column_families(name, modifications) - - Args: - name (str): Required. The unique name of the table whose families should be - modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (list[Union[dict, ~google.cloud.bigtable_admin_v2.types.Modification]]): Required. Modifications to be atomically applied to the specified table's - families. Entries are applied in order, meaning that earlier modifications - can be masked by later ones (in the case of repeated updates to the same - family, for example). - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Modification` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Table` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "modify_column_families" not in self._inner_api_calls: - self._inner_api_calls[ - "modify_column_families" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.modify_column_families, - default_retry=self._method_configs["ModifyColumnFamilies"].retry, - default_timeout=self._method_configs["ModifyColumnFamilies"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, - modifications=modifications, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["modify_column_families"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def drop_row_range( - self, - name, - row_key_prefix=None, - delete_all_data_from_table=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> client.drop_row_range(name) - - Args: - name (str): Required. The unique name of the table on which to drop a range of - rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be - zero length. - delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "drop_row_range" not in self._inner_api_calls: - self._inner_api_calls[ - "drop_row_range" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.drop_row_range, - default_retry=self._method_configs["DropRowRange"].retry, - default_timeout=self._method_configs["DropRowRange"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - - request = bigtable_table_admin_pb2.DropRowRangeRequest( - name=name, - row_key_prefix=row_key_prefix, - delete_all_data_from_table=delete_all_data_from_table, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["drop_row_range"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def generate_consistency_token( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> response = client.generate_consistency_token(name) - - Args: - name (str): Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "generate_consistency_token" not in self._inner_api_calls: - self._inner_api_calls[ - "generate_consistency_token" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.generate_consistency_token, - default_retry=self._method_configs["GenerateConsistencyToken"].retry, - default_timeout=self._method_configs[ - "GenerateConsistencyToken" - ].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["generate_consistency_token"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_consistency( - self, - name, - consistency_token, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `consistency_token`: - >>> consistency_token = '' - >>> - >>> response = client.check_consistency(name, consistency_token) - - Args: - name (str): Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_consistency" not in self._inner_api_calls: - self._inner_api_calls[ - "check_consistency" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_consistency, - default_retry=self._method_configs["CheckConsistency"].retry, - default_timeout=self._method_configs["CheckConsistency"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, - consistency_token=consistency_token, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_consistency"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_iam_policy( - self, - resource, - options_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the access control policy for a resource. - Returns an empty policy if the resource exists but does not have a policy - set. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> response = client.get_iam_policy(resource) - - Args: - resource (str): REQUIRED: The resource for which the policy is being requested. - See the operation documentation for the appropriate value for this field. - options_ (Union[dict, ~google.cloud.bigtable_admin_v2.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to - ``GetIamPolicy``. This field is only used by Cloud IAM. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.GetPolicyOptions` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_iam_policy, - default_retry=self._method_configs["GetIamPolicy"].retry, - default_timeout=self._method_configs["GetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - options=options_, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def set_iam_policy( - self, - resource, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.set_iam_policy(resource, policy) - - Args: - resource (str): REQUIRED: The resource for which the policy is being specified. - See the operation documentation for the appropriate value for this field. - policy (Union[dict, ~google.cloud.bigtable_admin_v2.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The - size of the policy is limited to a few 10s of KB. An empty policy is a - valid policy but certain Cloud Platform services (such as Projects) - might reject them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Policy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Policy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "set_iam_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "set_iam_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.set_iam_policy, - default_retry=self._method_configs["SetIamPolicy"].retry, - default_timeout=self._method_configs["SetIamPolicy"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["set_iam_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def test_iam_permissions( - self, - resource, - permissions, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns permissions that the caller has on the specified table resource. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `resource`: - >>> resource = '' - >>> - >>> # TODO: Initialize `permissions`: - >>> permissions = [] - >>> - >>> response = client.test_iam_permissions(resource, permissions) - - Args: - resource (str): REQUIRED: The resource for which the policy detail is being requested. - See the operation documentation for the appropriate value for this field. - permissions (list[str]): The set of permissions to check for the ``resource``. Permissions - with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see `IAM - Overview `__. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.TestIamPermissionsResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "test_iam_permissions" not in self._inner_api_calls: - self._inner_api_calls[ - "test_iam_permissions" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.test_iam_permissions, - default_retry=self._method_configs["TestIamPermissions"].retry, - default_timeout=self._method_configs["TestIamPermissions"].timeout, - client_info=self._client_info, - ) - - request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("resource", resource)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["test_iam_permissions"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def snapshot_table( - self, - name, - cluster, - snapshot_id, - ttl=None, - description=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> cluster = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `snapshot_id`: - >>> snapshot_id = '' - >>> - >>> response = client.snapshot_table(name, cluster, snapshot_id) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The unique name of the table to have the snapshot taken. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster (str): Required. The name of the cluster where the snapshot will be created - in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - snapshot_id (str): Required. The ID by which the new snapshot should be referred to - within the parent cluster, e.g., ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - ttl (Union[dict, ~google.cloud.bigtable_admin_v2.types.Duration]): The amount of time that the new snapshot can stay active after it is - created. Once 'ttl' expires, the snapshot will get deleted. The maximum - amount of time a snapshot can stay active is 7 days. If 'ttl' is not - specified, the default value of 24 hours will be used. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Duration` - description (str): Description of the snapshot. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "snapshot_table" not in self._inner_api_calls: - self._inner_api_calls[ - "snapshot_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.snapshot_table, - default_retry=self._method_configs["SnapshotTable"].retry, - default_timeout=self._method_configs["SnapshotTable"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, - cluster=cluster, - snapshot_id=snapshot_id, - ttl=ttl, - description=description, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["snapshot_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Snapshot, - metadata_type=bigtable_table_admin_pb2.SnapshotTableMetadata, - ) - - def get_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> response = client.get_snapshot(name) - - Args: - name (str): Required. The unique name of the requested snapshot. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "get_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_snapshot, - default_retry=self._method_configs["GetSnapshot"].retry, - default_timeout=self._method_configs["GetSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetSnapshotRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_snapshots( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # Iterate over all results - >>> for element in client.list_snapshots(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_snapshots(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The unique name of the cluster for which snapshots should - be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use - ``{cluster} = '-'`` to list snapshots for all clusters in an instance, - e.g., ``projects/{project}/instances/{instance}/clusters/-``. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Snapshot` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_snapshots" not in self._inner_api_calls: - self._inner_api_calls[ - "list_snapshots" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_snapshots, - default_retry=self._method_configs["ListSnapshots"].retry, - default_timeout=self._method_configs["ListSnapshots"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListSnapshotsRequest( - parent=parent, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_snapshots"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="snapshots", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_snapshot( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.snapshot_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[SNAPSHOT]') - >>> - >>> client.delete_snapshot(name) - - Args: - name (str): Required. The unique name of the snapshot to be deleted. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_snapshot" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_snapshot" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_snapshot, - default_retry=self._method_configs["DeleteSnapshot"].retry, - default_timeout=self._method_configs["DeleteSnapshot"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteSnapshotRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_snapshot"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def create_backup( - self, - parent, - backup_id, - backup, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # TODO: Initialize `backup_id`: - >>> backup_id = '' - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> response = client.create_backup(parent, backup_id, backup) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. This must be one of the clusters in the instance in which - this table is located. The backup will be stored in this cluster. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): Required. The id of the backup to be created. The ``backup_id`` - along with the parent ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup name, of the - form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length and match the - regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Backup` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "create_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_backup, - default_retry=self._method_configs["CreateBackup"].retry, - default_timeout=self._method_configs["CreateBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, - backup_id=backup_id, - backup=backup, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["create_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Backup, - metadata_type=bigtable_table_admin_pb2.CreateBackupMetadata, - ) - - def get_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets metadata on a pending or completed Cloud Bigtable Backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') - >>> - >>> response = client.get_backup(name) - - Args: - name (str): Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "get_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_backup, - default_retry=self._method_configs["GetBackup"].retry, - default_timeout=self._method_configs["GetBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.GetBackupRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_backups( - self, - parent, - filter_=None, - order_by=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists Cloud Bigtable backups. Returns both completed and pending - backups. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> parent = client.cluster_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]') - >>> - >>> # Iterate over all results - >>> for element in client.list_backups(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_backups(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The cluster to list backups from. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. Use - ``{cluster} = '-'`` to list backups for all clusters in an instance, - e.g., ``projects/{project}/instances/{instance}/clusters/-``. - filter_ (str): A filter expression that filters backups listed in the response. The - expression must specify the field name, a comparison operator, and the - value that you want to use for filtering. The value must be a string, a - number, or a boolean. The comparison operator must be <, >, <=, >=, !=, - =, or :. Colon ‘:’ represents a HAS operator which is roughly synonymous - with equality. Filter rules are case insensitive. - - The fields eligible for filtering are: - - - ``name`` - - ``source_table`` - - ``state`` - - ``start_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``end_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` - - To filter on multiple expressions, provide each separate expression - within parentheses. By default, each expression is an AND expression. - However, you can include AND, OR, and NOT expressions explicitly. - - Some examples of using filters are: - - - ``name:"exact"`` --> The backup's name is the string "exact". - - ``name:howl`` --> The backup's name contains the string "howl". - - ``source_table:prod`` --> The source_table's name contains the string - "prod". - - ``state:CREATING`` --> The backup is pending creation. - - ``state:READY`` --> The backup is fully created and ready for use. - - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` --> The - backup name contains the string "howl" and start_time of the backup - is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` --> The backup's size is greater than - 10GB - order_by (str): An expression for specifying the sort order of the results of the - request. The string value should specify one or more fields in - ``Backup``. The full syntax is described at - https://aip.dev/132#ordering. - - Fields supported are: \* name \* source_table \* expire_time \* - start_time \* end_time \* size_bytes \* state - - For example, "start_time". The default sorting order is ascending. To - specify descending order for the field, a suffix " desc" should be - appended to the field name. For example, "start_time desc". Redundant - space characters in the syntax are insigificant. - - If order_by is empty, results will be sorted by ``start_time`` in - descending order starting from the most recently created backup. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.bigtable_admin_v2.types.Backup` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_backups" not in self._inner_api_calls: - self._inner_api_calls[ - "list_backups" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_backups, - default_retry=self._method_configs["ListBackups"].retry, - default_timeout=self._method_configs["ListBackups"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.ListBackupsRequest( - parent=parent, - filter=filter_, - order_by=order_by, - page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_backups"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="backups", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_backup( - self, - backup, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a pending or completed Cloud Bigtable Backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> # TODO: Initialize `backup`: - >>> backup = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_backup(backup, update_mask) - - Args: - backup (Union[dict, ~google.cloud.bigtable_admin_v2.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to - be updated as specified by ``update_mask`` are required. Other fields - are ignored. Update is only supported for the following fields: - - - ``backup.expire_time``. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.Backup` - update_mask (Union[dict, ~google.cloud.bigtable_admin_v2.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in - the Backup resource should be updated. This mask is relative to the - Backup resource, not to the request message. The field mask must always - be specified; this prevents any future fields from being erased - accidentally by clients that do not know about them. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_admin_v2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types.Backup` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "update_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_backup, - default_retry=self._method_configs["UpdateBackup"].retry, - default_timeout=self._method_configs["UpdateBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, - update_mask=update_mask, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("backup.name", backup.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_backup( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a pending or completed Cloud Bigtable backup. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[CLUSTER]', '[BACKUP]') - >>> - >>> client.delete_backup(name) - - Args: - name (str): Required. Name of the backup to delete. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_backup" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_backup" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_backup, - default_retry=self._method_configs["DeleteBackup"].retry, - default_timeout=self._method_configs["DeleteBackup"].timeout, - client_info=self._client_info, - ) - - request = bigtable_table_admin_pb2.DeleteBackupRequest( - name=name, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_backup"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def restore_table( - self, - parent=None, - table_id=None, - backup=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. - - Example: - >>> from google.cloud import bigtable_admin_v2 - >>> - >>> client = bigtable_admin_v2.BigtableTableAdminClient() - >>> - >>> response = client.restore_table() - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The name of the instance in which to create the restored - table. This instance must be the parent of the source backup. Values are - of the form ``projects//instances/``. - table_id (str): Required. The id of the table to create and restore to. This table - must not already exist. The ``table_id`` appended to ``parent`` forms - the full table name of the form - ``projects//instances//tables/``. - backup (str): Name of the backup from which to restore. Values are of the form - ``projects//instances//clusters//backups/``. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "restore_table" not in self._inner_api_calls: - self._inner_api_calls[ - "restore_table" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.restore_table, - default_retry=self._method_configs["RestoreTable"].retry, - default_timeout=self._method_configs["RestoreTable"].timeout, - client_info=self._client_info, - ) - - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - backup=backup, - ) - - request = bigtable_table_admin_pb2.RestoreTableRequest( - parent=parent, - table_id=table_id, - backup=backup, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["restore_table"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - table_pb2.Table, - metadata_type=bigtable_table_admin_pb2.RestoreTableMetadata, - ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py deleted file mode 100644 index db60047bd5a4..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py +++ /dev/null @@ -1,160 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.admin.v2.BigtableTableAdmin": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 1000, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 60000, - }, - "non_idempotent_heavy_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "drop_row_range_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 1.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 3600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 3600000, - "total_timeout_millis": 3600000, - }, - }, - "methods": { - "CreateTable": { - "timeout_millis": 130000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "CreateTableFromSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ListTables": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "GetTable": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ModifyColumnFamilies": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_heavy_params", - }, - "DropRowRange": { - "timeout_millis": 900000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "drop_row_range_params", - }, - "GenerateConsistencyToken": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "CheckConsistency": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "GetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SetIamPolicy": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "TestIamPermissions": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "SnapshotTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListSnapshots": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "DeleteSnapshot": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "CreateBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "GetBackup": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "ListBackups": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "UpdateBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "DeleteBackup": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "RestoreTable": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - }, - } - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py deleted file mode 100644 index 536629604260..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py +++ /dev/null @@ -1,380 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2_grpc - - -class BigtableInstanceAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableInstanceAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_instance_admin_stub": bigtable_instance_admin_pb2_grpc.BigtableInstanceAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_instance`. - - Create an instance within a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateInstance - - @property - def get_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_instance`. - - Gets information about an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetInstance - - @property - def list_instances(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_instances`. - - Lists information about instances in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListInstances - - @property - def update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_instance`. - - Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateInstance - - @property - def partial_update_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.partial_update_instance`. - - Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].PartialUpdateInstance - - @property - def delete_instance(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_instance`. - - Delete an instance from a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteInstance - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_cluster`. - - Creates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateCluster - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_cluster`. - - Gets information about a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetCluster - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_clusters`. - - Lists information about clusters in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListClusters - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_cluster`. - - Updates a cluster within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateCluster - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_cluster`. - - Deletes a cluster from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteCluster - - @property - def create_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.create_app_profile`. - - Creates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].CreateAppProfile - - @property - def get_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_app_profile`. - - Gets information about an app profile. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetAppProfile - - @property - def list_app_profiles(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.list_app_profiles`. - - Lists information about app profiles in an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].ListAppProfiles - - @property - def update_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.update_app_profile`. - - Updates an app profile within an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].UpdateAppProfile - - @property - def delete_app_profile(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.delete_app_profile`. - - Deletes an app profile from an instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].DeleteAppProfile - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.get_iam_policy`. - - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.set_iam_policy`. - - Sets the access control policy on an instance resource. Replaces any - existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableInstanceAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified instance resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_instance_admin_stub"].TestIamPermissions diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py deleted file mode 100644 index 281bad20a253..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py +++ /dev/null @@ -1,471 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2_grpc - - -class BigtableTableAdminGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.admin.v2 BigtableTableAdmin API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtableadmin.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_table_admin_stub": bigtable_table_admin_pb2_grpc.BigtableTableAdminStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="bigtableadmin.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table`. - - Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTable - - @property - def create_table_from_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_table_from_snapshot`. - - Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateTableFromSnapshot - - @property - def list_tables(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_tables`. - - Lists all tables served from a specified instance. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListTables - - @property - def get_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_table`. - - Gets metadata information about the specified table. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetTable - - @property - def delete_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_table`. - - Permanently deletes a specified table and all of its data. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteTable - - @property - def modify_column_families(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.modify_column_families`. - - Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ModifyColumnFamilies - - @property - def drop_row_range(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.drop_row_range`. - - Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DropRowRange - - @property - def generate_consistency_token(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.generate_consistency_token`. - - Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GenerateConsistencyToken - - @property - def check_consistency(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.check_consistency`. - - Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CheckConsistency - - @property - def get_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_iam_policy`. - - Gets the access control policy for a resource. - Returns an empty policy if the resource exists but does not have a policy - set. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetIamPolicy - - @property - def set_iam_policy(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.set_iam_policy`. - - Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SetIamPolicy - - @property - def test_iam_permissions(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.test_iam_permissions`. - - Returns permissions that the caller has on the specified table resource. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].TestIamPermissions - - @property - def snapshot_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.snapshot_table`. - - Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].SnapshotTable - - @property - def get_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_snapshot`. - - Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetSnapshot - - @property - def list_snapshots(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_snapshots`. - - Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListSnapshots - - @property - def delete_snapshot(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_snapshot`. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteSnapshot - - @property - def create_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.create_backup`. - - Starts creating a new Cloud Bigtable Backup. The returned backup - ``long-running operation`` can be used to track creation of the backup. - The ``metadata`` field type is ``CreateBackupMetadata``. The - ``response`` field type is ``Backup``, if successful. Cancelling the - returned operation will stop the creation and delete the backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].CreateBackup - - @property - def get_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.get_backup`. - - Gets metadata on a pending or completed Cloud Bigtable Backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].GetBackup - - @property - def list_backups(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.list_backups`. - - Lists Cloud Bigtable backups. Returns both completed and pending - backups. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].ListBackups - - @property - def update_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.update_backup`. - - Updates a pending or completed Cloud Bigtable Backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].UpdateBackup - - @property - def delete_backup(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.delete_backup`. - - Deletes a pending or completed Cloud Bigtable backup. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].DeleteBackup - - @property - def restore_table(self): - """Return the gRPC stub for :meth:`BigtableTableAdminClient.restore_table`. - - Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing the - backup. The returned table ``long-running operation`` can be used to - track the progress of the operation, and to cancel it. The ``metadata`` - field type is ``RestoreTableMetadata``. The ``response`` type is - ``Table``, if successful. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_table_admin_stub"].RestoreTable diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto deleted file mode 100644 index ca3b663d8661..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_data.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto deleted file mode 100644 index 038fcc46397f..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service.proto +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto deleted file mode 100644 index 518d14dac8e0..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} - -// Metadata type for operations initiated by the V2 BigtableAdmin service. -// More complete information for such operations is available via the V2 API. -message V2OperationMetadata { - -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto index 8b19b5582248..ca3aaed7a1ef 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -565,9 +564,11 @@ message DeleteAppProfileRequest { } ]; - // If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2; + // Required. If true, ignore safety checks when deleting the app profile. + bool ignore_warnings = 2 [(google.api.field_behavior) = REQUIRED]; } // The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata {} +message UpdateAppProfileMetadata { + +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py deleted file mode 100644 index 38fe53f88c19..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py +++ /dev/null @@ -1,2434 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\032BigtableInstanceAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\nBgoogle/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x33google/cloud/bigtable_admin_v2/proto/instance.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xdb\x02\n\x15\x43reateInstanceRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x18\n\x0binstance_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x08instance\x18\x03 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12T\n\x08\x63lusters\x18\x04 \x03(\x0b\x32=.google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntryB\x03\xe0\x41\x02\x1aR\n\rClustersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster:\x02\x38\x01"L\n\x12GetInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"o\n\x14ListInstancesRequest\x12\x43\n\x06parent\x18\x01 \x01(\tB3\xe0\x41\x02\xfa\x41-\n+cloudresourcemanager.googleapis.com/Project\x12\x12\n\npage_token\x18\x02 \x01(\t"\x81\x01\n\x15ListInstancesResponse\x12\x35\n\tinstances\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Instance\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"\x8f\x01\n\x1cPartialUpdateInstanceRequest\x12\x39\n\x08instance\x18\x01 \x01(\x0b\x32".google.bigtable.admin.v2.InstanceB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"O\n\x15\x44\x65leteInstanceRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance"\xa2\x01\n\x14\x43reateClusterRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x17\n\ncluster_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.bigtable.admin.v2.ClusterB\x03\xe0\x41\x02"J\n\x11GetClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"c\n\x13ListClustersRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x12\n\npage_token\x18\x02 \x01(\t"~\n\x14ListClustersResponse\x12\x33\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x18\n\x10\x66\x61iled_locations\x18\x02 \x03(\t\x12\x17\n\x0fnext_page_token\x18\x03 \x01(\t"M\n\x14\x44\x65leteClusterRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster"\xc6\x01\n\x16\x43reateInstanceMetadata\x12I\n\x10original_request\x18\x01 \x01(\x0b\x32/.google.bigtable.admin.v2.CreateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xcd\x01\n\x16UpdateInstanceMetadata\x12P\n\x10original_request\x18\x01 \x01(\x0b\x32\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc4\x01\n\x15\x43reateClusterMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.CreateClusterRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xb7\x01\n\x15UpdateClusterMetadata\x12;\n\x10original_request\x18\x01 \x01(\x0b\x32!.google.bigtable.admin.v2.Cluster\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xc9\x01\n\x17\x43reateAppProfileRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12>\n\x0b\x61pp_profile\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x04 \x01(\x08"P\n\x14GetAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile"y\n\x16ListAppProfilesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t"\x88\x01\n\x17ListAppProfilesResponse\x12:\n\x0c\x61pp_profiles\x18\x01 \x03(\x0b\x32$.google.bigtable.admin.v2.AppProfile\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x18\n\x10\x66\x61iled_locations\x18\x03 \x03(\t"\xa8\x01\n\x17UpdateAppProfileRequest\x12>\n\x0b\x61pp_profile\x18\x01 \x01(\x0b\x32$.google.bigtable.admin.v2.AppProfileB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\x0fignore_warnings\x18\x03 \x01(\x08"l\n\x17\x44\x65leteAppProfileRequest\x12\x38\n\x04name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n"bigtable.googleapis.com/AppProfile\x12\x17\n\x0fignore_warnings\x18\x02 \x01(\x08"\x1a\n\x18UpdateAppProfileMetadata2\x92\x1e\n\x15\x42igtableInstanceAdmin\x12\xda\x01\n\x0e\x43reateInstance\x12/.google.bigtable.admin.v2.CreateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02&"!/v2/{parent=projects/*}/instances:\x01*\xda\x41$parent,instance_id,instance,clusters\xca\x41"\n\x08Instance\x12\x16\x43reateInstanceMetadata\x12\x91\x01\n\x0bGetInstance\x12,.google.bigtable.admin.v2.GetInstanceRequest\x1a".google.bigtable.admin.v2.Instance"0\x82\xd3\xe4\x93\x02#\x12!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xa4\x01\n\rListInstances\x12..google.bigtable.admin.v2.ListInstancesRequest\x1a/.google.bigtable.admin.v2.ListInstancesResponse"2\x82\xd3\xe4\x93\x02#\x12!/v2/{parent=projects/*}/instances\xda\x41\x06parent\x12\x86\x01\n\x0eUpdateInstance\x12".google.bigtable.admin.v2.Instance\x1a".google.bigtable.admin.v2.Instance",\x82\xd3\xe4\x93\x02&\x1a!/v2/{name=projects/*/instances/*}:\x01*\x12\xe8\x01\n\x15PartialUpdateInstance\x12\x36.google.bigtable.admin.v2.PartialUpdateInstanceRequest\x1a\x1d.google.longrunning.Operation"x\x82\xd3\xe4\x93\x02\x36\x32*/v2/{instance.name=projects/*/instances/*}:\x08instance\xda\x41\x14instance,update_mask\xca\x41"\n\x08Instance\x12\x16UpdateInstanceMetadata\x12\x8b\x01\n\x0e\x44\x65leteInstance\x12/.google.bigtable.admin.v2.DeleteInstanceRequest\x1a\x16.google.protobuf.Empty"0\x82\xd3\xe4\x93\x02#*!/v2/{name=projects/*/instances/*}\xda\x41\x04name\x12\xdc\x01\n\rCreateCluster\x12..google.bigtable.admin.v2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"|\x82\xd3\xe4\x93\x02\x37",/v2/{parent=projects/*/instances/*}/clusters:\x07\x63luster\xda\x41\x19parent,cluster_id,cluster\xca\x41 \n\x07\x43luster\x12\x15\x43reateClusterMetadata\x12\x99\x01\n\nGetCluster\x12+.google.bigtable.admin.v2.GetClusterRequest\x1a!.google.bigtable.admin.v2.Cluster";\x82\xd3\xe4\x93\x02.\x12,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xac\x01\n\x0cListClusters\x12-.google.bigtable.admin.v2.ListClustersRequest\x1a..google.bigtable.admin.v2.ListClustersResponse"=\x82\xd3\xe4\x93\x02.\x12,/v2/{parent=projects/*/instances/*}/clusters\xda\x41\x06parent\x12\xad\x01\n\rUpdateCluster\x12!.google.bigtable.admin.v2.Cluster\x1a\x1d.google.longrunning.Operation"Z\x82\xd3\xe4\x93\x02\x31\x1a,/v2/{name=projects/*/instances/*/clusters/*}:\x01*\xca\x41 \n\x07\x43luster\x12\x15UpdateClusterMetadata\x12\x94\x01\n\rDeleteCluster\x12..google.bigtable.admin.v2.DeleteClusterRequest\x1a\x16.google.protobuf.Empty";\x82\xd3\xe4\x93\x02.*,/v2/{name=projects/*/instances/*/clusters/*}\xda\x41\x04name\x12\xd5\x01\n\x10\x43reateAppProfile\x12\x31.google.bigtable.admin.v2.CreateAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile"h\x82\xd3\xe4\x93\x02>"//v2/{parent=projects/*/instances/*}/appProfiles:\x0b\x61pp_profile\xda\x41!parent,app_profile_id,app_profile\x12\xa5\x01\n\rGetAppProfile\x12..google.bigtable.admin.v2.GetAppProfileRequest\x1a$.google.bigtable.admin.v2.AppProfile">\x82\xd3\xe4\x93\x02\x31\x12//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\xb8\x01\n\x0fListAppProfiles\x12\x30.google.bigtable.admin.v2.ListAppProfilesRequest\x1a\x31.google.bigtable.admin.v2.ListAppProfilesResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v2/{parent=projects/*/instances/*}/appProfiles\xda\x41\x06parent\x12\xfa\x01\n\x10UpdateAppProfile\x12\x31.google.bigtable.admin.v2.UpdateAppProfileRequest\x1a\x1d.google.longrunning.Operation"\x93\x01\x82\xd3\xe4\x93\x02J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\x0b\x61pp_profile\xda\x41\x17\x61pp_profile,update_mask\xca\x41&\n\nAppProfile\x12\x18UpdateAppProfileMetadata\x12\x9d\x01\n\x10\x44\x65leteAppProfile\x12\x31.google.bigtable.admin.v2.DeleteAppProfileRequest\x1a\x16.google.protobuf.Empty">\x82\xd3\xe4\x93\x02\x31*//v2/{name=projects/*/instances/*/appProfiles/*}\xda\x41\x04name\x12\x93\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"H\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\x9a\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"O\x82\xd3\xe4\x93\x02\x37"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xc5\x01\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"Z\x82\xd3\xe4\x93\x02="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\x9a\x03\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xf7\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xe2\x01\n\x1c\x63om.google.bigtable.admin.v2B\x1a\x42igtableInstanceAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY = _descriptor.Descriptor( - name="ClustersEntry", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=723, - serialized_end=805, -) - -_CREATEINSTANCEREQUEST = _descriptor.Descriptor( - name="CreateInstanceRequest", - full_name="google.bigtable.admin.v2.CreateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.instance", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.CreateInstanceRequest.clusters", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _CREATEINSTANCEREQUEST_CLUSTERSENTRY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=458, - serialized_end=805, -) - - -_GETINSTANCEREQUEST = _descriptor.Descriptor( - name="GetInstanceRequest", - full_name="google.bigtable.admin.v2.GetInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=807, - serialized_end=883, -) - - -_LISTINSTANCESREQUEST = _descriptor.Descriptor( - name="ListInstancesRequest", - full_name="google.bigtable.admin.v2.ListInstancesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListInstancesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A-\n+cloudresourcemanager.googleapis.com/Project", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListInstancesRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=885, - serialized_end=996, -) - - -_LISTINSTANCESRESPONSE = _descriptor.Descriptor( - name="ListInstancesResponse", - full_name="google.bigtable.admin.v2.ListInstancesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instances", - full_name="google.bigtable.admin.v2.ListInstancesResponse.instances", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListInstancesResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListInstancesResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=999, - serialized_end=1128, -) - - -_PARTIALUPDATEINSTANCEREQUEST = _descriptor.Descriptor( - name="PartialUpdateInstanceRequest", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.instance", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.PartialUpdateInstanceRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1131, - serialized_end=1274, -) - - -_DELETEINSTANCEREQUEST = _descriptor.Descriptor( - name="DeleteInstanceRequest", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteInstanceRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1276, - serialized_end=1355, -) - - -_CREATECLUSTERREQUEST = _descriptor.Descriptor( - name="CreateClusterRequest", - full_name="google.bigtable.admin.v2.CreateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateClusterRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.CreateClusterRequest.cluster", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1358, - serialized_end=1520, -) - - -_GETCLUSTERREQUEST = _descriptor.Descriptor( - name="GetClusterRequest", - full_name="google.bigtable.admin.v2.GetClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1522, - serialized_end=1596, -) - - -_LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name="ListClustersRequest", - full_name="google.bigtable.admin.v2.ListClustersRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListClustersRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListClustersRequest.page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1598, - serialized_end=1697, -) - - -_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name="ListClustersResponse", - full_name="google.bigtable.admin.v2.ListClustersResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.bigtable.admin.v2.ListClustersResponse.clusters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListClustersResponse.failed_locations", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListClustersResponse.next_page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1699, - serialized_end=1825, -) - - -_DELETECLUSTERREQUEST = _descriptor.Descriptor( - name="DeleteClusterRequest", - full_name="google.bigtable.admin.v2.DeleteClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteClusterRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1827, - serialized_end=1904, -) - - -_CREATEINSTANCEMETADATA = _descriptor.Descriptor( - name="CreateInstanceMetadata", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1907, - serialized_end=2105, -) - - -_UPDATEINSTANCEMETADATA = _descriptor.Descriptor( - name="UpdateInstanceMetadata", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateInstanceMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2108, - serialized_end=2313, -) - - -_CREATECLUSTERMETADATA = _descriptor.Descriptor( - name="CreateClusterMetadata", - full_name="google.bigtable.admin.v2.CreateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2316, - serialized_end=2512, -) - - -_UPDATECLUSTERMETADATA = _descriptor.Descriptor( - name="UpdateClusterMetadata", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.UpdateClusterMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2515, - serialized_end=2698, -) - - -_CREATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="CreateAppProfileRequest", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.app_profile", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.CreateAppProfileRequest.ignore_warnings", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2701, - serialized_end=2902, -) - - -_GETAPPPROFILEREQUEST = _descriptor.Descriptor( - name="GetAppProfileRequest", - full_name="google.bigtable.admin.v2.GetAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2904, - serialized_end=2984, -) - - -_LISTAPPPROFILESREQUEST = _descriptor.Descriptor( - name="ListAppProfilesRequest", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_size", - index=1, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesRequest.page_token", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2986, - serialized_end=3107, -) - - -_LISTAPPPROFILESRESPONSE = _descriptor.Descriptor( - name="ListAppProfilesResponse", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="app_profiles", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.app_profiles", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="failed_locations", - full_name="google.bigtable.admin.v2.ListAppProfilesResponse.failed_locations", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3110, - serialized_end=3246, -) - - -_UPDATEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="UpdateAppProfileRequest", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="app_profile", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.app_profile", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.UpdateAppProfileRequest.ignore_warnings", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3249, - serialized_end=3417, -) - - -_DELETEAPPPROFILEREQUEST = _descriptor.Descriptor( - name="DeleteAppProfileRequest", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A$\n"bigtable.googleapis.com/AppProfile', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ignore_warnings", - full_name="google.bigtable.admin.v2.DeleteAppProfileRequest.ignore_warnings", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3419, - serialized_end=3527, -) - - -_UPDATEAPPPROFILEMETADATA = _descriptor.Descriptor( - name="UpdateAppProfileMetadata", - full_name="google.bigtable.admin.v2.UpdateAppProfileMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3529, - serialized_end=3555, -) - -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.fields_by_name[ - "value" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEREQUEST_CLUSTERSENTRY.containing_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_CREATEINSTANCEREQUEST.fields_by_name[ - "clusters" -].message_type = _CREATEINSTANCEREQUEST_CLUSTERSENTRY -_LISTINSTANCESRESPONSE.fields_by_name[ - "instances" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "instance" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE -) -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_CREATECLUSTERREQUEST.fields_by_name[ - "cluster" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_LISTCLUSTERSRESPONSE.fields_by_name[ - "clusters" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_CREATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATEINSTANCEREQUEST -_CREATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "original_request" -].message_type = _PARTIALUPDATEINSTANCEREQUEST -_UPDATEINSTANCEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEINSTANCEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATECLUSTERREQUEST -_CREATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "original_request" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER -) -_UPDATECLUSTERMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATECLUSTERMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_LISTAPPPROFILESRESPONSE.fields_by_name[ - "app_profiles" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "app_profile" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE -) -_UPDATEAPPPROFILEREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -DESCRIPTOR.message_types_by_name["CreateInstanceRequest"] = _CREATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["GetInstanceRequest"] = _GETINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesRequest"] = _LISTINSTANCESREQUEST -DESCRIPTOR.message_types_by_name["ListInstancesResponse"] = _LISTINSTANCESRESPONSE -DESCRIPTOR.message_types_by_name[ - "PartialUpdateInstanceRequest" -] = _PARTIALUPDATEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["DeleteInstanceRequest"] = _DELETEINSTANCEREQUEST -DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["CreateInstanceMetadata"] = _CREATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["UpdateInstanceMetadata"] = _UPDATEINSTANCEMETADATA -DESCRIPTOR.message_types_by_name["CreateClusterMetadata"] = _CREATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["UpdateClusterMetadata"] = _UPDATECLUSTERMETADATA -DESCRIPTOR.message_types_by_name["CreateAppProfileRequest"] = _CREATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["GetAppProfileRequest"] = _GETAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesRequest"] = _LISTAPPPROFILESREQUEST -DESCRIPTOR.message_types_by_name["ListAppProfilesResponse"] = _LISTAPPPROFILESRESPONSE -DESCRIPTOR.message_types_by_name["UpdateAppProfileRequest"] = _UPDATEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["DeleteAppProfileRequest"] = _DELETEAPPPROFILEREQUEST -DESCRIPTOR.message_types_by_name["UpdateAppProfileMetadata"] = _UPDATEAPPPROFILEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "CreateInstanceRequest", - (_message.Message,), - { - "ClustersEntry": _reflection.GeneratedProtocolMessageType( - "ClustersEntry", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEREQUEST_CLUSTERSENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest.ClustersEntry) - }, - ), - "DESCRIPTOR": _CREATEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateInstance. - - Attributes: - parent: - Required. The unique name of the project in which to create - the new instance. Values are of the form - ``projects/{project}``. - instance_id: - Required. The ID to be used when referring to the new instance - within its project, e.g., just ``myinstance`` rather than - ``projects/myproject/instances/myinstance``. - instance: - Required. The instance to create. Fields marked ``OutputOnly`` - must be left blank. - clusters: - Required. The clusters to be created within the instance, - mapped by desired cluster ID, e.g., just ``mycluster`` rather - than ``projects/myproject/instances/myinstance/clusters/myclus - ter``. Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceRequest) - }, -) -_sym_db.RegisterMessage(CreateInstanceRequest) -_sym_db.RegisterMessage(CreateInstanceRequest.ClustersEntry) - -GetInstanceRequest = _reflection.GeneratedProtocolMessageType( - "GetInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetInstance. - - Attributes: - name: - Required. The unique name of the requested instance. Values - are of the form ``projects/{project}/instances/{instance}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetInstanceRequest) - }, -) -_sym_db.RegisterMessage(GetInstanceRequest) - -ListInstancesRequest = _reflection.GeneratedProtocolMessageType( - "ListInstancesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListInstances. - - Attributes: - parent: - Required. The unique name of the project for which a list of - instances is requested. Values are of the form - ``projects/{project}``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesRequest) - }, -) -_sym_db.RegisterMessage(ListInstancesRequest) - -ListInstancesResponse = _reflection.GeneratedProtocolMessageType( - "ListInstancesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTINSTANCESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListInstances. - - Attributes: - instances: - The list of requested instances. - failed_locations: - Locations from which Instance information could not be - retrieved, due to an outage or some other transient condition. - Instances whose Clusters are all in one of the failed - locations may be missing from ``instances``, and Instances - with at least one Cluster in a failed location may only have - partial information returned. Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListInstancesResponse) - }, -) -_sym_db.RegisterMessage(ListInstancesResponse) - -PartialUpdateInstanceRequest = _reflection.GeneratedProtocolMessageType( - "PartialUpdateInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _PARTIALUPDATEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.PartialUpdateInstance. - - Attributes: - instance: - Required. The Instance which will (partially) replace the - current value. - update_mask: - Required. The subset of Instance fields which should be - replaced. Must be explicitly set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.PartialUpdateInstanceRequest) - }, -) -_sym_db.RegisterMessage(PartialUpdateInstanceRequest) - -DeleteInstanceRequest = _reflection.GeneratedProtocolMessageType( - "DeleteInstanceRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEINSTANCEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteInstance. - - Attributes: - name: - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteInstanceRequest) - }, -) -_sym_db.RegisterMessage(DeleteInstanceRequest) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateCluster. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id: - Required. The ID to be used when referring to the new cluster - within its instance, e.g., just ``mycluster`` rather than ``pr - ojects/myproject/instances/myinstance/clusters/mycluster``. - cluster: - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterRequest) - }, -) -_sym_db.RegisterMessage(CreateClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetCluster. - - Attributes: - name: - Required. The unique name of the requested cluster. Values are - of the form ``projects/{project}/instances/{instance}/clusters - /{cluster}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetClusterRequest) - }, -) -_sym_db.RegisterMessage(GetClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListClusters. - - Attributes: - parent: - Required. The unique name of the instance for which a list of - clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} - = '-'`` to list Clusters for all Instances in a project, e.g., - ``projects/myproject/instances/-``. - page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersRequest) - }, -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListClusters. - - Attributes: - clusters: - The list of requested clusters. - failed_locations: - Locations from which Cluster information could not be - retrieved, due to an outage or some other transient condition. - Clusters from these locations may be missing from - ``clusters``, or may only have partial information returned. - Values are of the form - ``projects//locations/`` - next_page_token: - DEPRECATED: This field is unused and ignored. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListClustersResponse) - }, -) -_sym_db.RegisterMessage(ListClustersResponse) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETECLUSTERREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteCluster. - - Attributes: - name: - Required. The unique name of the cluster to be deleted. Values - are of the form ``projects/{project}/instances/{instance}/clus - ters/{cluster}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteClusterRequest) - }, -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -CreateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "CreateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEINSTANCEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateInstance. - - Attributes: - original_request: - The request that prompted the initiation of this - CreateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(CreateInstanceMetadata) - -UpdateInstanceMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateInstanceMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEINSTANCEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateInstance. - - Attributes: - original_request: - The request that prompted the initiation of this - UpdateInstance operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateInstanceMetadata) - }, -) -_sym_db.RegisterMessage(UpdateInstanceMetadata) - -CreateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "CreateClusterMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateCluster. - - Attributes: - original_request: - The request that prompted the initiation of this CreateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateClusterMetadata) - }, -) -_sym_db.RegisterMessage(CreateClusterMetadata) - -UpdateClusterMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateClusterMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECLUSTERMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateCluster. - - Attributes: - original_request: - The request that prompted the initiation of this UpdateCluster - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateClusterMetadata) - }, -) -_sym_db.RegisterMessage(UpdateClusterMetadata) - -CreateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "CreateAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.CreateAppProfile. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id: - Required. The ID to be used when referring to the new app - profile within its instance, e.g., just ``myprofile`` rather - than ``projects/myproject/instances/myinstance/appProfiles/myp - rofile``. - app_profile: - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - ignore_warnings: - If true, ignore safety checks when creating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateAppProfileRequest) - }, -) -_sym_db.RegisterMessage(CreateAppProfileRequest) - -GetAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "GetAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.GetAppProfile. - - Attributes: - name: - Required. The unique name of the requested app profile. Values - are of the form ``projects/{project}/instances/{instance}/appP - rofiles/{app_profile}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetAppProfileRequest) - }, -) -_sym_db.RegisterMessage(GetAppProfileRequest) - -ListAppProfilesRequest = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTAPPPROFILESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - parent: - Required. The unique name of the instance for which a list of - app profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use ``{instance} - = '-'`` to list AppProfiles for all Instances in a project, - e.g., ``projects/myproject/instances/-``. - page_size: - Maximum number of results per page. A page_size of zero lets - the server choose the number of items to return. A page_size - which is strictly positive will return at most that many - items. A negative page_size will cause an error. Following - the first request, subsequent paginated calls are not required - to pass a page_size. If a page_size is set in subsequent - calls, it must match the page_size given in the first request. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesRequest) - }, -) -_sym_db.RegisterMessage(ListAppProfilesRequest) - -ListAppProfilesResponse = _reflection.GeneratedProtocolMessageType( - "ListAppProfilesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTAPPPROFILESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Response message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - app_profiles: - The list of requested app profiles. - next_page_token: - Set if not all app profiles could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - failed_locations: - Locations from which AppProfile information could not be - retrieved, due to an outage or some other transient condition. - AppProfiles from these locations may be missing from - ``app_profiles``. Values are of the form - ``projects//locations/`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListAppProfilesResponse) - }, -) -_sym_db.RegisterMessage(ListAppProfilesResponse) - -UpdateAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.UpdateAppProfile. - - Attributes: - app_profile: - Required. The app profile which will (partially) replace the - current value. - update_mask: - Required. The subset of app profile fields which should be - replaced. If unset, all fields will be replaced. - ignore_warnings: - If true, ignore safety checks when updating the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileRequest) - }, -) -_sym_db.RegisterMessage(UpdateAppProfileRequest) - -DeleteAppProfileRequest = _reflection.GeneratedProtocolMessageType( - "DeleteAppProfileRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEAPPPROFILEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """Request message for BigtableInstanceAdmin.DeleteAppProfile. - - Attributes: - name: - Required. The unique name of the app profile to be deleted. - Values are of the form ``projects/{project}/instances/{instanc - e}/appProfiles/{app_profile}``. - ignore_warnings: - If true, ignore safety checks when deleting the app profile. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteAppProfileRequest) - }, -) -_sym_db.RegisterMessage(DeleteAppProfileRequest) - -UpdateAppProfileMetadata = _reflection.GeneratedProtocolMessageType( - "UpdateAppProfileMetadata", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEAPPPROFILEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_instance_admin_pb2", - "__doc__": """The metadata for the Operation returned by UpdateAppProfile.""", - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateAppProfileMetadata) - }, -) -_sym_db.RegisterMessage(UpdateAppProfileMetadata) - - -DESCRIPTOR._options = None -_CREATEINSTANCEREQUEST_CLUSTERSENTRY._options = None -_CREATEINSTANCEREQUEST.fields_by_name["parent"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance_id"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_CREATEINSTANCEREQUEST.fields_by_name["clusters"]._options = None -_GETINSTANCEREQUEST.fields_by_name["name"]._options = None -_LISTINSTANCESREQUEST.fields_by_name["parent"]._options = None -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["instance"]._options = None -_PARTIALUPDATEINSTANCEREQUEST.fields_by_name["update_mask"]._options = None -_DELETEINSTANCEREQUEST.fields_by_name["name"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["parent"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster_id"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_GETCLUSTERREQUEST.fields_by_name["name"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["parent"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["name"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["parent"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile_id"]._options = None -_CREATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None -_GETAPPPROFILEREQUEST.fields_by_name["name"]._options = None -_LISTAPPPROFILESREQUEST.fields_by_name["parent"]._options = None -_UPDATEAPPPROFILEREQUEST.fields_by_name["app_profile"]._options = None -_UPDATEAPPPROFILEREQUEST.fields_by_name["update_mask"]._options = None -_DELETEAPPPROFILEREQUEST.fields_by_name["name"]._options = None - -_BIGTABLEINSTANCEADMIN = _descriptor.ServiceDescriptor( - name="BigtableInstanceAdmin", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\367\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.cluster,https://www.googleapis.com/auth/bigtable.admin.instance,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.cluster,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=3558, - serialized_end=7416, - methods=[ - _descriptor.MethodDescriptor( - name="CreateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", - index=0, - containing_service=None, - input_type=_CREATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002&"!/v2/{parent=projects/*}/instances:\001*\332A$parent,instance_id,instance,clusters\312A"\n\010Instance\022\026CreateInstanceMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", - index=1, - containing_service=None, - input_type=_GETINSTANCEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=b"\202\323\344\223\002#\022!/v2/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListInstances", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", - index=2, - containing_service=None, - input_type=_LISTINSTANCESREQUEST, - output_type=_LISTINSTANCESRESPONSE, - serialized_options=b"\202\323\344\223\002#\022!/v2/{parent=projects/*}/instances\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", - index=3, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._INSTANCE, - serialized_options=b"\202\323\344\223\002&\032!/v2/{name=projects/*/instances/*}:\001*", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="PartialUpdateInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", - index=4, - containing_service=None, - input_type=_PARTIALUPDATEINSTANCEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\00262*/v2/{instance.name=projects/*/instances/*}:\010instance\332A\024instance,update_mask\312A"\n\010Instance\022\026UpdateInstanceMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteInstance", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", - index=5, - containing_service=None, - input_type=_DELETEINSTANCEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002#*!/v2/{name=projects/*/instances/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", - index=6, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0027",/v2/{parent=projects/*/instances/*}/clusters:\007cluster\332A\031parent,cluster_id,cluster\312A \n\007Cluster\022\025CreateClusterMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", - index=7, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - serialized_options=b"\202\323\344\223\002.\022,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", - index=8, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=b"\202\323\344\223\002.\022,/v2/{parent=projects/*/instances/*}/clusters\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", - index=9, - containing_service=None, - input_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._CLUSTER, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\0021\032,/v2/{name=projects/*/instances/*/clusters/*}:\001*\312A \n\007Cluster\022\025UpdateClusterMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteCluster", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", - index=10, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002.*,/v2/{name=projects/*/instances/*/clusters/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", - index=11, - containing_service=None, - input_type=_CREATEAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=b'\202\323\344\223\002>"//v2/{parent=projects/*/instances/*}/appProfiles:\013app_profile\332A!parent,app_profile_id,app_profile', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", - index=12, - containing_service=None, - input_type=_GETAPPPROFILEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2._APPPROFILE, - serialized_options=b"\202\323\344\223\0021\022//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListAppProfiles", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", - index=13, - containing_service=None, - input_type=_LISTAPPPROFILESREQUEST, - output_type=_LISTAPPPROFILESRESPONSE, - serialized_options=b"\202\323\344\223\0021\022//v2/{parent=projects/*/instances/*}/appProfiles\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", - index=14, - containing_service=None, - input_type=_UPDATEAPPPROFILEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002J2;/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}:\013app_profile\332A\027app_profile,update_mask\312A&\n\nAppProfile\022\030UpdateAppProfileMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteAppProfile", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", - index=15, - containing_service=None, - input_type=_DELETEAPPPROFILEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0021*//v2/{name=projects/*/instances/*/appProfiles/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", - index=16, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:getIamPolicy:\001*\332A\010resource', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", - index=17, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\0027"2/v2/{resource=projects/*/instances/*}:setIamPolicy:\001*\332A\017resource,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", - index=18, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002="8/v2/{resource=projects/*/instances/*}:testIamPermissions:\001*\332A\024resource,permissions', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLEINSTANCEADMIN) - -DESCRIPTOR.services_by_name["BigtableInstanceAdmin"] = _BIGTABLEINSTANCEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py deleted file mode 100644 index 0337e5d4fcc7..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py +++ /dev/null @@ -1,880 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - instance_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableInstanceAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.ListInstances = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, - ) - self.UpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - ) - self.PartialUpdateInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteInstance = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - ) - self.ListClusters = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, - ) - self.UpdateCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteCluster = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.GetAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - ) - self.ListAppProfiles = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, - ) - self.UpdateAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteAppProfile = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableInstanceAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - def CreateInstance(self, request, context): - """Create an instance within a project.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetInstance(self, request, context): - """Gets information about an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListInstances(self, request, context): - """Lists information about instances in a project.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateInstance(self, request, context): - """Updates an instance within a project. This method updates only the display - name and type for an Instance. To update other Instance properties, such as - labels, use PartialUpdateInstance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def PartialUpdateInstance(self, request, context): - """Partially updates an instance within a project. This method can modify all - fields of an Instance and is the preferred way to update an Instance. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteInstance(self, request, context): - """Delete an instance from a project.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateCluster(self, request, context): - """Creates a cluster within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetCluster(self, request, context): - """Gets information about a cluster.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListClusters(self, request, context): - """Lists information about clusters in an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateCluster(self, request, context): - """Updates a cluster within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteCluster(self, request, context): - """Deletes a cluster from an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateAppProfile(self, request, context): - """Creates an app profile within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetAppProfile(self, request, context): - """Gets information about an app profile.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListAppProfiles(self, request, context): - """Lists information about app profiles in an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateAppProfile(self, request, context): - """Updates an app profile within an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteAppProfile(self, request, context): - """Deletes an app profile from an instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on an instance resource. Replaces any - existing policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified instance resource.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableInstanceAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateInstance": grpc.unary_unary_rpc_method_handler( - servicer.CreateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetInstance": grpc.unary_unary_rpc_method_handler( - servicer.GetInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "ListInstances": grpc.unary_unary_rpc_method_handler( - servicer.ListInstances, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.SerializeToString, - ), - "UpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.UpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - ), - "PartialUpdateInstance": grpc.unary_unary_rpc_method_handler( - servicer.PartialUpdateInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteInstance": grpc.unary_unary_rpc_method_handler( - servicer.DeleteInstance, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateCluster": grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetCluster": grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - ), - "ListClusters": grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.SerializeToString, - ), - "UpdateCluster": grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteCluster": grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.CreateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "GetAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.GetAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.SerializeToString, - ), - "ListAppProfiles": grpc.unary_unary_rpc_method_handler( - servicer.ListAppProfiles, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.SerializeToString, - ), - "UpdateAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.UpdateAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteAppProfile": grpc.unary_unary_rpc_method_handler( - servicer.DeleteAppProfile, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableInstanceAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class BigtableInstanceAdmin(object): - """Service for creating, configuring, and deleting Cloud Bigtable Instances and - Clusters. Provides access to the Instance and Cluster schemas only, not the - tables' metadata or data stored in those tables. - """ - - @staticmethod - def CreateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetInstanceRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListInstances( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListInstancesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Instance.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def PartialUpdateInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.PartialUpdateInstanceRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteInstance( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteInstanceRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetClusterRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListClusters( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListClustersResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.Cluster.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteClusterRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.CreateAppProfileRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.GetAppProfileRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_instance__pb2.AppProfile.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListAppProfiles( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.ListAppProfilesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.UpdateAppProfileRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteAppProfile( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__instance__admin__pb2.DeleteAppProfileRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto index 6f434a473557..d979dba597e6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto @@ -72,8 +72,7 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) - returns (google.longrunning.Operation) { + rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" body: "*" @@ -135,8 +134,7 @@ service BigtableTableAdmin { // CheckConsistency to check whether mutations to the table that finished // before this call started have been replicated. The tokens will be available // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) - returns (GenerateConsistencyTokenResponse) { + rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" body: "*" @@ -147,8 +145,7 @@ service BigtableTableAdmin { // Checks replication consistency based on a consistency token, that is, if // replication has caught up based on the conditions specified in the token // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) - returns (CheckConsistencyResponse) { + rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" body: "*" @@ -164,14 +161,12 @@ service BigtableTableAdmin { // feature might be changed in backward-incompatible ways and is not // recommended for production use. It is not subject to any SLA or deprecation // policy. - rpc SnapshotTable(SnapshotTableRequest) - returns (google.longrunning.Operation) { + rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" body: "*" }; - option (google.api.method_signature) = - "name,cluster,snapshot_id,description"; + option (google.api.method_signature) = "name,cluster,snapshot_id,description"; option (google.longrunning.operation_info) = { response_type: "Snapshot" metadata_type: "SnapshotTableMetadata" @@ -220,24 +215,24 @@ service BigtableTableAdmin { option (google.api.method_signature) = "name"; } - // Starts creating a new Cloud Bigtable Backup. The returned backup + // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - // returned operation will stop the creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the + // creation and delete the backup. rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" body: "backup" }; + option (google.api.method_signature) = "parent,backup_id,backup"; option (google.longrunning.operation_info) = { response_type: "Backup" metadata_type: "CreateBackupMetadata" }; - option (google.api.method_signature) = "parent,backup_id,backup"; } // Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -275,11 +270,11 @@ service BigtableTableAdmin { } // Create a new table by restoring from a completed backup. The new table - // must be in the same instance as the instance containing the backup. The + // must be in the same instance as the instance containing the backup. The // returned table [long-running operation][google.longrunning.Operation] can - // be used to track the progress of the operation, and to cancel it. The + // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. rpc RestoreTable(RestoreTableRequest) returns (google.longrunning.Operation) { @@ -293,22 +288,24 @@ service BigtableTableAdmin { }; } - // Gets the access control policy for a resource. + // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" body: "*" + additional_bindings { + post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy" + body: "*" + } }; option (google.api.method_signature) = "resource"; } // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" body: "*" @@ -320,9 +317,8 @@ service BigtableTableAdmin { option (google.api.method_signature) = "resource,policy"; } - // Returns permissions that the caller has on the specified table resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) - returns (google.iam.v1.TestIamPermissionsResponse) { + // Returns permissions that the caller has on the specified Table or Backup resource. + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" body: "*" @@ -335,6 +331,78 @@ service BigtableTableAdmin { } } +// The request for +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableRequest { + // Required. The name of the instance in which to create the restored + // table. This instance must be the parent of the source backup. Values are + // of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Instance" + } + ]; + + // Required. The id of the table to create and restore to. This + // table must not already exist. The `table_id` appended to + // `parent` forms the full table name of the form + // `projects//instances//tables/`. + string table_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The source from which to restore. + oneof source { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//clusters//backups/`. + string backup = 3 [(google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + }]; + } +} + +// Metadata type for the long-running operation returned by +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +message RestoreTableMetadata { + // Name of the table being created and restored to. + string name = 1; + + // The type of the restore source. + RestoreSourceType source_type = 2; + + // Information about the source used to restore the table, as specified by + // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. + oneof source_info { + BackupInfo backup_info = 3; + } + + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored table. The metadata type of the long-running operation is + // [OptimizeRestoreTableMetadata][]. The response type is + // [Empty][google.protobuf.Empty]. This long-running operation may be + // automatically created by the system if applicable after the + // RestoreTable long-running operation completes successfully. This operation + // may not be created if the table is already optimized or the restore was + // not successful. + string optimize_table_operation_name = 4; + + // The progress of the [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + // operation. + OperationProgress progress = 5; +} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored table. This long-running +// operation is automatically created by the system after the successful +// completion of a table restore, and cannot be cancelled. +message OptimizeRestoredTableMetadata { + // Name of the restored table being optimized. + string name = 1; + + // The progress of the post-restore optimizations. + OperationProgress progress = 2; +} + // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] message CreateTableRequest { @@ -353,8 +421,8 @@ message CreateTableRequest { } ]; - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. // Maximum 50 characters. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; @@ -397,13 +465,13 @@ message CreateTableFromSnapshotRequest { } ]; - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - // Required. The unique name of the snapshot from which to restore the table. - // The snapshot and the table must be in the same instance. Values are of the - // form + // Required. The unique name of the snapshot from which to restore the table. The + // snapshot and the table must be in the same instance. + // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. string source_snapshot = 3 [ (google.api.field_behavior) = REQUIRED, @@ -421,7 +489,9 @@ message DropRowRangeRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Delete all rows or by prefix. @@ -438,8 +508,8 @@ message DropRowRangeRequest { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] message ListTablesRequest { - // Required. The unique name of the instance for which tables should be - // listed. Values are of the form `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance for which tables should be listed. + // Values are of the form `projects/{project}/instances/{instance}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -486,7 +556,9 @@ message GetTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // The view to be applied to the returned table's fields. @@ -502,7 +574,9 @@ message DeleteTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; } @@ -535,26 +609,29 @@ message ModifyColumnFamiliesRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; - // Required. Modifications to be atomically applied to the specified table's - // families. Entries are applied in order, meaning that earlier modifications - // can be masked by later ones (in the case of repeated updates to the same - // family, for example). - repeated Modification modifications = 2 - [(google.api.field_behavior) = REQUIRED]; + // Required. Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). + repeated Modification modifications = 2 [(google.api.field_behavior) = REQUIRED]; } // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] message GenerateConsistencyTokenRequest { - // Required. The unique name of the Table for which to create a consistency - // token. Values are of the form + // Required. The unique name of the Table for which to create a consistency token. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; } @@ -568,12 +645,14 @@ message GenerateConsistencyTokenResponse { // Request message for // [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] message CheckConsistencyRequest { - // Required. The unique name of the Table for which to check replication - // consistency. Values are of the form + // Required. The unique name of the Table for which to check replication consistency. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Required. The token created using GenerateConsistencyToken for the Table. @@ -601,7 +680,9 @@ message SnapshotTableRequest { // `projects/{project}/instances/{instance}/tables/{table}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Table" + } ]; // Required. The name of the cluster where the snapshot will be created in. @@ -614,9 +695,9 @@ message SnapshotTableRequest { } ]; - // Required. The ID by which the new snapshot should be referred to within the - // parent cluster, e.g., `mysnapshot` of the form: - // `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` rather than + // Required. The ID by which the new snapshot should be referred to within the parent + // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // rather than // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. string snapshot_id = 3 [(google.api.field_behavior) = REQUIRED]; @@ -657,8 +738,8 @@ message GetSnapshotRequest { // feature might be changed in backward-incompatible ways and is not recommended // for production use. It is not subject to any SLA or deprecation policy. message ListSnapshotsRequest { - // Required. The unique name of the cluster for which snapshots should be - // listed. Values are of the form + // Required. The unique name of the cluster for which snapshots should be listed. + // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -748,8 +829,7 @@ message CreateTableFromSnapshotMetadata { google.protobuf.Timestamp finish_time = 3; } -// The request for -// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +// The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. message CreateBackupRequest { // Required. This must be one of the clusters in the instance in which this // table is located. The backup will be stored in this cluster. Values are @@ -789,20 +869,7 @@ message CreateBackupMetadata { google.protobuf.Timestamp end_time = 4; } -// The request for -// [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. -message GetBackupRequest { - // Required. Name of the backup. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } - ]; -} - -// The request for -// [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. +// The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. message UpdateBackupRequest { // Required. The backup to update. `backup.name`, and the fields to be updated // as specified by `update_mask` are required. Other fields are ignored. @@ -815,26 +882,38 @@ message UpdateBackupRequest { // resource, not to the request message. The field mask must always be // specified; this prevents any future fields from being erased accidentally // by clients that do not know about them. - google.protobuf.FieldMask update_mask = 2 - [(google.api.field_behavior) = REQUIRED]; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } -// The request for -// [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. +// The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. +message GetBackupRequest { + // Required. Name of the backup. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + } + ]; +} + +// The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. message DeleteBackupRequest { // Required. Name of the backup to delete. // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigtable.googleapis.com/Backup" } + (google.api.resource_reference) = { + type: "bigtable.googleapis.com/Backup" + } ]; } -// The request for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsRequest { - // Required. The cluster to list backups from. Values are of the + // Required. The cluster to list backups from. Values are of the // form `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list backups for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -849,7 +928,7 @@ message ListBackupsRequest { // The expression must specify the field name, a comparison operator, // and the value that you want to use for filtering. The value must be a // string, a number, or a boolean. The comparison operator must be - // <, >, <=, >=, !=, =, or :. Colon ‘:’ represents a HAS operator which is + // <, >, <=, >=, !=, =, or :. Colon ':' represents a HAS operator which is // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: @@ -880,9 +959,8 @@ message ListBackupsRequest { string filter = 2; // An expression for specifying the sort order of the results of the request. - // The string value should specify one or more fields in - // [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at - // https://aip.dev/132#ordering. + // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full + // syntax is described at https://aip.dev/132#ordering. // // Fields supported are: // * name @@ -907,88 +985,19 @@ message ListBackupsRequest { int32 page_size = 4; // If non-empty, `page_token` should contain a - // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] - // from a previous - // [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the - // same `parent` and with the same `filter`. + // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] from a + // previous [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the same `parent` and with the same + // `filter`. string page_token = 5; } -// The response for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The response for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. message ListBackupsResponse { // The list of matching backups. repeated Backup backups = 1; // `next_page_token` can be sent in a subsequent - // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call - // to fetch more of the matching backups. + // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call to fetch more + // of the matching backups. string next_page_token = 2; } - -// The request for -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableRequest { - // Required. The name of the instance in which to create the restored - // table. This instance must be the parent of the source backup. Values are - // of the form `projects//instances/`. - string parent = 1; - - // Required. The id of the table to create and restore to. This - // table must not already exist. The `table_id` appended to - // `parent` forms the full table name of the form - // `projects//instances//tables/`. - string table_id = 2; - - // Required. The source from which to restore. - oneof source { - // Name of the backup from which to restore. Values are of the form - // `projects//instances//clusters//backups/`. - string backup = 3; - } -} - -// Metadata type for the long-running operation returned by -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableMetadata { - // Name of the table being created and restored to. - string name = 1; - - // The type of the restore source. - RestoreSourceType source_type = 2; - - // Information about the source used to restore the table, as specified by - // `source` in - // [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. - oneof source_info { - BackupInfo backup_info = 3; - } - - // If exists, the name of the long-running operation that will be used to - // track the post-restore optimization process to optimize the performance of - // the restored table. The metadata type of the long-running operation is - // [OptimizeRestoreTableMetadata][]. The response type is - // [Empty][google.protobuf.Empty]. This long-running operation may be - // automatically created by the system if applicable after the - // RestoreTable long-running operation completes successfully. This operation - // may not be created if the table is already optimized or the restore was - // not successful. - string optimize_table_operation_name = 4; - - // The progress of the - // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - // operation. - OperationProgress progress = 5; -} - -// Metadata type for the long-running operation used to track the progress -// of optimizations performed on a newly restored table. This long-running -// operation is automatically created by the system after the successful -// completion of a table restore, and cannot be cancelled. -message OptimizeRestoredTableMetadata { - // Name of the restored table being optimized. - string name = 1; - - // The progress of the post-restore optimizations. - OperationProgress progress = 2; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py deleted file mode 100644 index c7094eac20a3..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py +++ /dev/null @@ -1,3578 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\027BigtableTableAdminProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n?google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto\x12\x18google.bigtable.admin.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto\x1a\x30google/cloud/bigtable_admin_v2/proto/table.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xfc\x01\n\x12\x43reateTableRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x33\n\x05table\x18\x03 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.TableB\x03\xe0\x41\x02\x12J\n\x0einitial_splits\x18\x04 \x03(\x0b\x32\x32.google.bigtable.admin.v2.CreateTableRequest.Split\x1a\x14\n\x05Split\x12\x0b\n\x03key\x18\x01 \x01(\x0c"\xb4\x01\n\x1e\x43reateTableFromSnapshotRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x15\n\x08table_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x41\n\x0fsource_snapshot\x18\x03 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\x94\x01\n\x13\x44ropRowRangeRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x18\n\x0erow_key_prefix\x18\x02 \x01(\x0cH\x00\x12$\n\x1a\x64\x65lete_all_data_from_table\x18\x03 \x01(\x08H\x00\x42\x08\n\x06target"\xa8\x01\n\x11ListTablesRequest\x12\x38\n\x06parent\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Instance\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"^\n\x12ListTablesResponse\x12/\n\x06tables\x18\x01 \x03(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"z\n\x0fGetTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x32\n\x04view\x18\x02 \x01(\x0e\x32$.google.bigtable.admin.v2.Table.View"I\n\x12\x44\x65leteTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"\xda\x02\n\x1bModifyColumnFamiliesRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12^\n\rmodifications\x18\x02 \x03(\x0b\x32\x42.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.ModificationB\x03\xe0\x41\x02\x1a\xa5\x01\n\x0cModification\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x63reate\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x38\n\x06update\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamilyH\x00\x12\x0e\n\x04\x64rop\x18\x04 \x01(\x08H\x00\x42\x05\n\x03mod"V\n\x1fGenerateConsistencyTokenRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table"=\n GenerateConsistencyTokenResponse\x12\x19\n\x11\x63onsistency_token\x18\x01 \x01(\t"n\n\x17\x43heckConsistencyRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x1e\n\x11\x63onsistency_token\x18\x02 \x01(\tB\x03\xe0\x41\x02".\n\x18\x43heckConsistencyResponse\x12\x12\n\nconsistent\x18\x01 \x01(\x08"\xdc\x01\n\x14SnapshotTableRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x38\n\x07\x63luster\x18\x02 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x18\n\x0bsnapshot_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12&\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t"L\n\x12GetSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"v\n\x14ListSnapshotsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"g\n\x15ListSnapshotsResponse\x12\x35\n\tsnapshots\x18\x01 \x03(\x0b\x32".google.bigtable.admin.v2.Snapshot\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"O\n\x15\x44\x65leteSnapshotRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n bigtable.googleapis.com/Snapshot"\xc4\x01\n\x15SnapshotTableMetadata\x12H\n\x10original_request\x18\x01 \x01(\x0b\x32..google.bigtable.admin.v2.SnapshotTableRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\xd8\x01\n\x1f\x43reateTableFromSnapshotMetadata\x12R\n\x10original_request\x18\x01 \x01(\x0b\x32\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x12\x30\n\x0crequest_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x66inish_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x9d\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x35\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02"\x98\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0csource_table\x18\x02 \x01(\t\x12.\n\nstart_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"H\n\x10GetBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x82\x01\n\x13UpdateBackupRequest\x12\x35\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"K\n\x13\x44\x65leteBackupRequest\x12\x34\n\x04name\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1e\x62igtable.googleapis.com/Backup"\x96\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x62igtable.googleapis.com/Cluster\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x10\n\x08order_by\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x05 \x01(\t"a\n\x13ListBackupsResponse\x12\x31\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"S\n\x13RestoreTableRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x10\n\x06\x62\x61\x63kup\x18\x03 \x01(\tH\x00\x42\x08\n\x06source"\x98\x02\n\x14RestoreTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\x0bsource_type\x18\x02 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x03 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x12%\n\x1doptimize_table_operation_name\x18\x04 \x01(\t\x12=\n\x08progress\x18\x05 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgressB\r\n\x0bsource_info"l\n\x1dOptimizeRestoredTableMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12=\n\x08progress\x18\x02 \x01(\x0b\x32+.google.bigtable.admin.v2.OperationProgress2\xc8$\n\x12\x42igtableTableAdmin\x12\xab\x01\n\x0b\x43reateTable\x12,.google.bigtable.admin.v2.CreateTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"M\x82\xd3\xe4\x93\x02/"*/v2/{parent=projects/*/instances/*}/tables:\x01*\xda\x41\x15parent,table_id,table\x12\x8a\x02\n\x17\x43reateTableFromSnapshot\x12\x38.google.bigtable.admin.v2.CreateTableFromSnapshotRequest\x1a\x1d.google.longrunning.Operation"\x95\x01\x82\xd3\xe4\x93\x02\x42"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\x01*\xda\x41\x1fparent,table_id,source_snapshot\xca\x41(\n\x05Table\x12\x1f\x43reateTableFromSnapshotMetadata\x12\xa4\x01\n\nListTables\x12+.google.bigtable.admin.v2.ListTablesRequest\x1a,.google.bigtable.admin.v2.ListTablesResponse";\x82\xd3\xe4\x93\x02,\x12*/v2/{parent=projects/*/instances/*}/tables\xda\x41\x06parent\x12\x91\x01\n\x08GetTable\x12).google.bigtable.admin.v2.GetTableRequest\x1a\x1f.google.bigtable.admin.v2.Table"9\x82\xd3\xe4\x93\x02,\x12*/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\x8e\x01\n\x0b\x44\x65leteTable\x12,.google.bigtable.admin.v2.DeleteTableRequest\x1a\x16.google.protobuf.Empty"9\x82\xd3\xe4\x93\x02,**/v2/{name=projects/*/instances/*/tables/*}\xda\x41\x04name\x12\xcf\x01\n\x14ModifyColumnFamilies\x12\x35.google.bigtable.admin.v2.ModifyColumnFamiliesRequest\x1a\x1f.google.bigtable.admin.v2.Table"_\x82\xd3\xe4\x93\x02\x44"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\x01*\xda\x41\x12name,modifications\x12\x99\x01\n\x0c\x44ropRowRange\x12-.google.bigtable.admin.v2.DropRowRangeRequest\x1a\x16.google.protobuf.Empty"B\x82\xd3\xe4\x93\x02<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\x01*\x12\xe8\x01\n\x18GenerateConsistencyToken\x12\x39.google.bigtable.admin.v2.GenerateConsistencyTokenRequest\x1a:.google.bigtable.admin.v2.GenerateConsistencyTokenResponse"U\x82\xd3\xe4\x93\x02H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\x01*\xda\x41\x04name\x12\xda\x01\n\x10\x43heckConsistency\x12\x31.google.bigtable.admin.v2.CheckConsistencyRequest\x1a\x32.google.bigtable.admin.v2.CheckConsistencyResponse"_\x82\xd3\xe4\x93\x02@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\x01*\xda\x41\x16name,consistency_token\x12\xea\x01\n\rSnapshotTable\x12..google.bigtable.admin.v2.SnapshotTableRequest\x1a\x1d.google.longrunning.Operation"\x89\x01\x82\xd3\xe4\x93\x02\x38"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\x01*\xda\x41$name,cluster,snapshot_id,description\xca\x41!\n\x08Snapshot\x12\x15SnapshotTableMetadata\x12\xa8\x01\n\x0bGetSnapshot\x12,.google.bigtable.admin.v2.GetSnapshotRequest\x1a".google.bigtable.admin.v2.Snapshot"G\x82\xd3\xe4\x93\x02:\x12\x38/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xbb\x01\n\rListSnapshots\x12..google.bigtable.admin.v2.ListSnapshotsRequest\x1a/.google.bigtable.admin.v2.ListSnapshotsResponse"I\x82\xd3\xe4\x93\x02:\x12\x38/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\xda\x41\x06parent\x12\xa2\x01\n\x0e\x44\x65leteSnapshot\x12/.google.bigtable.admin.v2.DeleteSnapshotRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\xda\x41\x04name\x12\xe0\x01\n\x0c\x43reateBackup\x12-.google.bigtable.admin.v2.CreateBackupRequest\x1a\x1d.google.longrunning.Operation"\x81\x01\x82\xd3\xe4\x93\x02@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\x06\x62\x61\x63kup\xca\x41\x1e\n\x06\x42\x61\x63kup\x12\x14\x43reateBackupMetadata\xda\x41\x17parent,backup_id,backup\x12\xa0\x01\n\tGetBackup\x12*.google.bigtable.admin.v2.GetBackupRequest\x1a .google.bigtable.admin.v2.Backup"E\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xc3\x01\n\x0cUpdateBackup\x12-.google.bigtable.admin.v2.UpdateBackupRequest\x1a .google.bigtable.admin.v2.Backup"b\x82\xd3\xe4\x93\x02G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\x06\x62\x61\x63kup\xda\x41\x12\x62\x61\x63kup,update_mask\x12\x9c\x01\n\x0c\x44\x65leteBackup\x12-.google.bigtable.admin.v2.DeleteBackupRequest\x1a\x16.google.protobuf.Empty"E\x82\xd3\xe4\x93\x02\x38*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\xda\x41\x04name\x12\xb3\x01\n\x0bListBackups\x12,.google.bigtable.admin.v2.ListBackupsRequest\x1a-.google.bigtable.admin.v2.ListBackupsResponse"G\x82\xd3\xe4\x93\x02\x38\x12\x36/v2/{parent=projects/*/instances/*/clusters/*}/backups\xda\x41\x06parent\x12\xbb\x01\n\x0cRestoreTable\x12-.google.bigtable.admin.v2.RestoreTableRequest\x1a\x1d.google.longrunning.Operation"]\x82\xd3\xe4\x93\x02\x37"2/v2/{parent=projects/*/instances/*}/tables:restore:\x01*\xca\x41\x1d\n\x05Table\x12\x14RestoreTableMetadata\x12\x9c\x01\n\x0cGetIamPolicy\x12".google.iam.v1.GetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"Q\x82\xd3\xe4\x93\x02@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\x01*\xda\x41\x08resource\x12\xf3\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\xa7\x01\x82\xd3\xe4\x93\x02\x8e\x01";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\x01*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\x01*\xda\x41\x0fresource,policy\x12\xa4\x02\n\x12TestIamPermissions\x12(.google.iam.v1.TestIamPermissionsRequest\x1a).google.iam.v1.TestIamPermissionsResponse"\xb8\x01\x82\xd3\xe4\x93\x02\x9a\x01"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\x01*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\x01*\xda\x41\x14resource,permissions\x1a\xde\x02\xca\x41\x1c\x62igtableadmin.googleapis.com\xd2\x41\xbb\x02https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xdf\x01\n\x1c\x63om.google.bigtable.admin.v2B\x17\x42igtableTableAdminProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_iam__policy__pb2.DESCRIPTOR, - google_dot_iam_dot_v1_dot_policy__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CREATETABLEREQUEST_SPLIT = _descriptor.Descriptor( - name="Split", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.CreateTableRequest.Split.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=767, - serialized_end=787, -) - -_CREATETABLEREQUEST = _descriptor.Descriptor( - name="CreateTableRequest", - full_name="google.bigtable.admin.v2.CreateTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table", - full_name="google.bigtable.admin.v2.CreateTableRequest.table", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="initial_splits", - full_name="google.bigtable.admin.v2.CreateTableRequest.initial_splits", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _CREATETABLEREQUEST_SPLIT, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=535, - serialized_end=787, -) - - -_CREATETABLEFROMSNAPSHOTREQUEST = _descriptor.Descriptor( - name="CreateTableFromSnapshotRequest", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_snapshot", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotRequest.source_snapshot", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=790, - serialized_end=970, -) - - -_DROPROWRANGEREQUEST = _descriptor.Descriptor( - name="DropRowRangeRequest", - full_name="google.bigtable.admin.v2.DropRowRangeRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key_prefix", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.row_key_prefix", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_all_data_from_table", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.delete_all_data_from_table", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="target", - full_name="google.bigtable.admin.v2.DropRowRangeRequest.target", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=973, - serialized_end=1121, -) - - -_LISTTABLESREQUEST = _descriptor.Descriptor( - name="ListTablesRequest", - full_name="google.bigtable.admin.v2.ListTablesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListTablesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Instance', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.ListTablesRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_size", - index=2, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListTablesRequest.page_token", - index=3, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1124, - serialized_end=1292, -) - - -_LISTTABLESRESPONSE = _descriptor.Descriptor( - name="ListTablesResponse", - full_name="google.bigtable.admin.v2.ListTablesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="tables", - full_name="google.bigtable.admin.v2.ListTablesResponse.tables", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListTablesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1294, - serialized_end=1388, -) - - -_GETTABLEREQUEST = _descriptor.Descriptor( - name="GetTableRequest", - full_name="google.bigtable.admin.v2.GetTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="view", - full_name="google.bigtable.admin.v2.GetTableRequest.view", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1390, - serialized_end=1512, -) - - -_DELETETABLEREQUEST = _descriptor.Descriptor( - name="DeleteTableRequest", - full_name="google.bigtable.admin.v2.DeleteTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1514, - serialized_end=1587, -) - - -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION = _descriptor.Descriptor( - name="Modification", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="id", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="drop", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.drop", - index=3, - number=4, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mod", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.mod", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1771, - serialized_end=1936, -) - -_MODIFYCOLUMNFAMILIESREQUEST = _descriptor.Descriptor( - name="ModifyColumnFamiliesRequest", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="modifications", - full_name="google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1590, - serialized_end=1936, -) - - -_GENERATECONSISTENCYTOKENREQUEST = _descriptor.Descriptor( - name="GenerateConsistencyTokenRequest", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1938, - serialized_end=2024, -) - - -_GENERATECONSISTENCYTOKENRESPONSE = _descriptor.Descriptor( - name="GenerateConsistencyTokenResponse", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.GenerateConsistencyTokenResponse.consistency_token", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2026, - serialized_end=2087, -) - - -_CHECKCONSISTENCYREQUEST = _descriptor.Descriptor( - name="CheckConsistencyRequest", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="consistency_token", - full_name="google.bigtable.admin.v2.CheckConsistencyRequest.consistency_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2089, - serialized_end=2199, -) - - -_CHECKCONSISTENCYRESPONSE = _descriptor.Descriptor( - name="CheckConsistencyResponse", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consistent", - full_name="google.bigtable.admin.v2.CheckConsistencyResponse.consistent", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2201, - serialized_end=2247, -) - - -_SNAPSHOTTABLEREQUEST = _descriptor.Descriptor( - name="SnapshotTableRequest", - full_name="google.bigtable.admin.v2.SnapshotTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.cluster", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="snapshot_id", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.snapshot_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="ttl", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.ttl", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.SnapshotTableRequest.description", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2250, - serialized_end=2470, -) - - -_GETSNAPSHOTREQUEST = _descriptor.Descriptor( - name="GetSnapshotRequest", - full_name="google.bigtable.admin.v2.GetSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2472, - serialized_end=2548, -) - - -_LISTSNAPSHOTSREQUEST = _descriptor.Descriptor( - name="ListSnapshotsRequest", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2550, - serialized_end=2668, -) - - -_LISTSNAPSHOTSRESPONSE = _descriptor.Descriptor( - name="ListSnapshotsResponse", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="snapshots", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.snapshots", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListSnapshotsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2670, - serialized_end=2773, -) - - -_DELETESNAPSHOTREQUEST = _descriptor.Descriptor( - name="DeleteSnapshotRequest", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteSnapshotRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\002\372A"\n bigtable.googleapis.com/Snapshot', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2775, - serialized_end=2854, -) - - -_SNAPSHOTTABLEMETADATA = _descriptor.Descriptor( - name="SnapshotTableMetadata", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.SnapshotTableMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2857, - serialized_end=3053, -) - - -_CREATETABLEFROMSNAPSHOTMETADATA = _descriptor.Descriptor( - name="CreateTableFromSnapshotMetadata", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="original_request", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="finish_time", - full_name="google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3056, - serialized_end=3272, -) - - -_CREATEBACKUPREQUEST = _descriptor.Descriptor( - name="CreateBackupRequest", - full_name="google.bigtable.admin.v2.CreateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.CreateBackupRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_id", - full_name="google.bigtable.admin.v2.CreateBackupRequest.backup_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.CreateBackupRequest.backup", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3275, - serialized_end=3432, -) - - -_CREATEBACKUPMETADATA = _descriptor.Descriptor( - name="CreateBackupMetadata", - full_name="google.bigtable.admin.v2.CreateBackupMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.source_table", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.start_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.CreateBackupMetadata.end_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3435, - serialized_end=3587, -) - - -_GETBACKUPREQUEST = _descriptor.Descriptor( - name="GetBackupRequest", - full_name="google.bigtable.admin.v2.GetBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.GetBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3589, - serialized_end=3661, -) - - -_UPDATEBACKUPREQUEST = _descriptor.Descriptor( - name="UpdateBackupRequest", - full_name="google.bigtable.admin.v2.UpdateBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.backup", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.bigtable.admin.v2.UpdateBackupRequest.update_mask", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3664, - serialized_end=3794, -) - - -_DELETEBACKUPREQUEST = _descriptor.Descriptor( - name="DeleteBackupRequest", - full_name="google.bigtable.admin.v2.DeleteBackupRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.DeleteBackupRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A \n\036bigtable.googleapis.com/Backup", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3796, - serialized_end=3871, -) - - -_LISTBACKUPSREQUEST = _descriptor.Descriptor( - name="ListBackupsRequest", - full_name="google.bigtable.admin.v2.ListBackupsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.ListBackupsRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A!\n\037bigtable.googleapis.com/Cluster", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.admin.v2.ListBackupsRequest.filter", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="order_by", - full_name="google.bigtable.admin.v2.ListBackupsRequest.order_by", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_size", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.bigtable.admin.v2.ListBackupsRequest.page_token", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3874, - serialized_end=4024, -) - - -_LISTBACKUPSRESPONSE = _descriptor.Descriptor( - name="ListBackupsResponse", - full_name="google.bigtable.admin.v2.ListBackupsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backups", - full_name="google.bigtable.admin.v2.ListBackupsResponse.backups", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.bigtable.admin.v2.ListBackupsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4026, - serialized_end=4123, -) - - -_RESTORETABLEREQUEST = _descriptor.Descriptor( - name="RestoreTableRequest", - full_name="google.bigtable.admin.v2.RestoreTableRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.bigtable.admin.v2.RestoreTableRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="table_id", - full_name="google.bigtable.admin.v2.RestoreTableRequest.table_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.RestoreTableRequest.backup", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source", - full_name="google.bigtable.admin.v2.RestoreTableRequest.source", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4125, - serialized_end=4208, -) - - -_RESTORETABLEMETADATA = _descriptor.Descriptor( - name="RestoreTableMetadata", - full_name="google.bigtable.admin.v2.RestoreTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_type", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_type", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.backup_info", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="optimize_table_operation_name", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.optimize_table_operation_name", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.progress", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.bigtable.admin.v2.RestoreTableMetadata.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4211, - serialized_end=4491, -) - - -_OPTIMIZERESTOREDTABLEMETADATA = _descriptor.Descriptor( - name="OptimizeRestoredTableMetadata", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4493, - serialized_end=4601, -) - -_CREATETABLEREQUEST_SPLIT.containing_type = _CREATETABLEREQUEST -_CREATETABLEREQUEST.fields_by_name[ - "table" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE -) -_CREATETABLEREQUEST.fields_by_name[ - "initial_splits" -].message_type = _CREATETABLEREQUEST_SPLIT -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["row_key_prefix"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "row_key_prefix" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_DROPROWRANGEREQUEST.oneofs_by_name["target"].fields.append( - _DROPROWRANGEREQUEST.fields_by_name["delete_all_data_from_table"] -) -_DROPROWRANGEREQUEST.fields_by_name[ - "delete_all_data_from_table" -].containing_oneof = _DROPROWRANGEREQUEST.oneofs_by_name["target"] -_LISTTABLESREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_LISTTABLESRESPONSE.fields_by_name[ - "tables" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE -) -_GETTABLEREQUEST.fields_by_name[ - "view" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE_VIEW -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._COLUMNFAMILY -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.containing_type = _MODIFYCOLUMNFAMILIESREQUEST -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["create"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "create" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["update"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "update" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"].fields.append( - _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name["drop"] -) -_MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.fields_by_name[ - "drop" -].containing_oneof = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION.oneofs_by_name["mod"] -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name[ - "modifications" -].message_type = _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION -_SNAPSHOTTABLEREQUEST.fields_by_name[ - "ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LISTSNAPSHOTSRESPONSE.fields_by_name[ - "snapshots" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT -) -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "original_request" -].message_type = _SNAPSHOTTABLEREQUEST -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOTTABLEMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "original_request" -].message_type = _CREATETABLEFROMSNAPSHOTREQUEST -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "request_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATETABLEFROMSNAPSHOTMETADATA.fields_by_name[ - "finish_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEBACKUPREQUEST.fields_by_name[ - "backup" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_CREATEBACKUPMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CREATEBACKUPMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_UPDATEBACKUPREQUEST.fields_by_name[ - "backup" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_UPDATEBACKUPREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTBACKUPSRESPONSE.fields_by_name[ - "backups" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP -) -_RESTORETABLEREQUEST.oneofs_by_name["source"].fields.append( - _RESTORETABLEREQUEST.fields_by_name["backup"] -) -_RESTORETABLEREQUEST.fields_by_name[ - "backup" -].containing_oneof = _RESTORETABLEREQUEST.oneofs_by_name["source"] -_RESTORETABLEMETADATA.fields_by_name[ - "source_type" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._RESTORESOURCETYPE -) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUPINFO -) -_RESTORETABLEMETADATA.fields_by_name[ - "progress" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -_RESTORETABLEMETADATA.oneofs_by_name["source_info"].fields.append( - _RESTORETABLEMETADATA.fields_by_name["backup_info"] -) -_RESTORETABLEMETADATA.fields_by_name[ - "backup_info" -].containing_oneof = _RESTORETABLEMETADATA.oneofs_by_name["source_info"] -_OPTIMIZERESTOREDTABLEMETADATA.fields_by_name[ - "progress" -].message_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._OPERATIONPROGRESS -) -DESCRIPTOR.message_types_by_name["CreateTableRequest"] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotRequest" -] = _CREATETABLEFROMSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["DropRowRangeRequest"] = _DROPROWRANGEREQUEST -DESCRIPTOR.message_types_by_name["ListTablesRequest"] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name["ListTablesResponse"] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name["GetTableRequest"] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name["DeleteTableRequest"] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name[ - "ModifyColumnFamiliesRequest" -] = _MODIFYCOLUMNFAMILIESREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenRequest" -] = _GENERATECONSISTENCYTOKENREQUEST -DESCRIPTOR.message_types_by_name[ - "GenerateConsistencyTokenResponse" -] = _GENERATECONSISTENCYTOKENRESPONSE -DESCRIPTOR.message_types_by_name["CheckConsistencyRequest"] = _CHECKCONSISTENCYREQUEST -DESCRIPTOR.message_types_by_name["CheckConsistencyResponse"] = _CHECKCONSISTENCYRESPONSE -DESCRIPTOR.message_types_by_name["SnapshotTableRequest"] = _SNAPSHOTTABLEREQUEST -DESCRIPTOR.message_types_by_name["GetSnapshotRequest"] = _GETSNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsRequest"] = _LISTSNAPSHOTSREQUEST -DESCRIPTOR.message_types_by_name["ListSnapshotsResponse"] = _LISTSNAPSHOTSRESPONSE -DESCRIPTOR.message_types_by_name["DeleteSnapshotRequest"] = _DELETESNAPSHOTREQUEST -DESCRIPTOR.message_types_by_name["SnapshotTableMetadata"] = _SNAPSHOTTABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "CreateTableFromSnapshotMetadata" -] = _CREATETABLEFROMSNAPSHOTMETADATA -DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA -DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST -DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST -DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE -DESCRIPTOR.message_types_by_name["RestoreTableRequest"] = _RESTORETABLEREQUEST -DESCRIPTOR.message_types_by_name["RestoreTableMetadata"] = _RESTORETABLEMETADATA -DESCRIPTOR.message_types_by_name[ - "OptimizeRestoredTableMetadata" -] = _OPTIMIZERESTOREDTABLEMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -CreateTableRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableRequest", - (_message.Message,), - { - "Split": _reflection.GeneratedProtocolMessageType( - "Split", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEREQUEST_SPLIT, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """An initial split point for a newly created table. - - Attributes: - key: - Row key to use as an initial tablet boundary. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest.Split) - }, - ), - "DESCRIPTOR": _CREATETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat - eTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - - Attributes: - parent: - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id: - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table: - Required. The Table to create. - initial_splits: - The optional list of row keys that will be used to initially - split the table into several tablets (tablets are similar to - HBase regions). Given two split keys, ``s1`` and ``s2``, three - tablets will be created, spanning the key ranges: ``[, s1), - [s1, s2), [s2, )``. Example: - Row keys := ``["a", "apple", - "custom", "customer_1", "customer_2",`` ``"other", "zz"]`` - - initial_split_keys := ``["apple", "customer_1", - "customer_2", "other"]`` - Key assignment: - Tablet 1 - ``[, apple) => {"a"}.`` - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableRequest) - }, -) -_sym_db.RegisterMessage(CreateTableRequest) -_sym_db.RegisterMessage(CreateTableRequest.Split) - -CreateTableFromSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Creat - eTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.Create - TableFromSnapshot] Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently available to most - Cloud Bigtable customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. It is not - subject to any SLA or deprecation policy. - - Attributes: - parent: - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id: - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot: - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in the - same instance. Values are of the form ``projects/{project}/ins - tances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotRequest) - }, -) -_sym_db.RegisterMessage(CreateTableFromSnapshotRequest) - -DropRowRangeRequest = _reflection.GeneratedProtocolMessageType( - "DropRowRangeRequest", - (_message.Message,), - { - "DESCRIPTOR": _DROPROWRANGEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropR - owRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - - Attributes: - name: - Required. The unique name of the table on which to drop a - range of rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - target: - Delete all rows or by prefix. - row_key_prefix: - Delete all rows that start with this row key prefix. Prefix - cannot be zero length. - delete_all_data_from_table: - Delete all rows in the table. Setting this to false is a no- - op. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DropRowRangeRequest) - }, -) -_sym_db.RegisterMessage(DropRowRangeRequest) - -ListTablesRequest = _reflection.GeneratedProtocolMessageType( - "ListTablesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListT - ables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - parent: - Required. The unique name of the instance for which tables - should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view: - The view to be applied to the returned tables’ fields. Only - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size: - Maximum number of results per page. A page_size of zero lets - the server choose the number of items to return. A page_size - which is strictly positive will return at most that many - items. A negative page_size will cause an error. Following - the first request, subsequent paginated calls are not required - to pass a page_size. If a page_size is set in subsequent - calls, it must match the page_size given in the first request. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesRequest) - }, -) -_sym_db.RegisterMessage(ListTablesRequest) - -ListTablesResponse = _reflection.GeneratedProtocolMessageType( - "ListTablesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTTABLESRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List - Tables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - tables: - The tables present in the requested instance. - next_page_token: - Set if not all tables could be returned in a single response. - Pass this value to ``page_token`` in another request to get - the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListTablesResponse) - }, -) -_sym_db.RegisterMessage(ListTablesResponse) - -GetTableRequest = _reflection.GeneratedProtocolMessageType( - "GetTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETTABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTa - ble][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - - Attributes: - name: - Required. The unique name of the requested table. Values are - of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - view: - The view to be applied to the returned table’s fields. - Defaults to ``SCHEMA_VIEW`` if unspecified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetTableRequest) - }, -) -_sym_db.RegisterMessage(GetTableRequest) - -DeleteTableRequest = _reflection.GeneratedProtocolMessageType( - "DeleteTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet - eTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - - Attributes: - name: - Required. The unique name of the table to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteTableRequest) - }, -) -_sym_db.RegisterMessage(DeleteTableRequest) - -ModifyColumnFamiliesRequest = _reflection.GeneratedProtocolMessageType( - "ModifyColumnFamiliesRequest", - (_message.Message,), - { - "Modification": _reflection.GeneratedProtocolMessageType( - "Modification", - (_message.Message,), - { - "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST_MODIFICATION, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """A create, update, or delete of a particular column family. - - Attributes: - id: - The ID of the column family to be modified. - mod: - Column familiy modifications. - create: - Create a new column family with the specified schema, or fail - if one already exists with the given ID. - update: - Update an existing column family to the specified schema, or - fail if no column family exists with the given ID. - drop: - Drop (delete) the column family with the given ID, or fail if - no such family exists. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification) - }, - ), - "DESCRIPTOR": _MODIFYCOLUMNFAMILIESREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Modif - yColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyCol - umnFamilies] - - Attributes: - name: - Required. The unique name of the table whose families should - be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications: - Required. Modifications to be atomically applied to the - specified table’s families. Entries are applied in order, - meaning that earlier modifications can be masked by later ones - (in the case of repeated updates to the same family, for - example). - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ModifyColumnFamiliesRequest) - }, -) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest) -_sym_db.RegisterMessage(ModifyColumnFamiliesRequest.Modification) - -GenerateConsistencyTokenRequest = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenRequest", - (_message.Message,), - { - "DESCRIPTOR": _GENERATECONSISTENCYTOKENREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Gener - ateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gener - ateConsistencyToken] - - Attributes: - name: - Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenRequest) - }, -) -_sym_db.RegisterMessage(GenerateConsistencyTokenRequest) - -GenerateConsistencyTokenResponse = _reflection.GeneratedProtocolMessageType( - "GenerateConsistencyTokenResponse", - (_message.Message,), - { - "DESCRIPTOR": _GENERATECONSISTENCYTOKENRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Gene - rateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.Gene - rateConsistencyToken] - - Attributes: - consistency_token: - The generated consistency token. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GenerateConsistencyTokenResponse) - }, -) -_sym_db.RegisterMessage(GenerateConsistencyTokenResponse) - -CheckConsistencyRequest = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyRequest", - (_message.Message,), - { - "DESCRIPTOR": _CHECKCONSISTENCYREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Check - Consistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsiste - ncy] - - Attributes: - name: - Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token: - Required. The token created using GenerateConsistencyToken for - the Table. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyRequest) - }, -) -_sym_db.RegisterMessage(CheckConsistencyRequest) - -CheckConsistencyResponse = _reflection.GeneratedProtocolMessageType( - "CheckConsistencyResponse", - (_message.Message,), - { - "DESCRIPTOR": _CHECKCONSISTENCYRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.Chec - kConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsist - ency] - - Attributes: - consistent: - True only if the token is consistent. A token is consistent if - replication has caught up with the restrictions specified in - the request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CheckConsistencyResponse) - }, -) -_sym_db.RegisterMessage(CheckConsistencyResponse) - -SnapshotTableRequest = _reflection.GeneratedProtocolMessageType( - "SnapshotTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOTTABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Snaps - hotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the table to have the snapshot - taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster: - Required. The name of the cluster where the snapshot will be - created in. Values are of the form ``projects/{project}/instan - ces/{instance}/clusters/{cluster}``. - snapshot_id: - Required. The ID by which the new snapshot should be referred - to within the parent cluster, e.g., ``mysnapshot`` of the - form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than ``projects/{ - project}/instances/{instance}/clusters/{cluster}/snapshots/mys - napshot``. - ttl: - The amount of time that the new snapshot can stay active after - it is created. Once ‘ttl’ expires, the snapshot will get - deleted. The maximum amount of time a snapshot can stay active - is 7 days. If ‘ttl’ is not specified, the default value of 24 - hours will be used. - description: - Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableRequest) - }, -) -_sym_db.RegisterMessage(SnapshotTableRequest) - -GetSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "GetSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETSNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSn - apshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the requested snapshot. Values - are of the form ``projects/{project}/instances/{instance}/clus - ters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetSnapshotRequest) - }, -) -_sym_db.RegisterMessage(GetSnapshotRequest) - -ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTSNAPSHOTSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListS - napshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - parent: - Required. The unique name of the cluster for which snapshots - should be listed. Values are of the form ``projects/{project}/ - instances/{instance}/clusters/{cluster}``. Use ``{cluster} = - '-'`` to list snapshots for all clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - page_size: - The maximum number of snapshots to return per page. CURRENTLY - UNIMPLEMENTED AND IGNORED. - page_token: - The value of ``next_page_token`` returned by a previous call. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsRequest) - }, -) -_sym_db.RegisterMessage(ListSnapshotsRequest) - -ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType( - "ListSnapshotsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTSNAPSHOTSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Response message for [google.bigtable.admin.v2.BigtableTableAdmin.List - Snapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - snapshots: - The snapshots present in the requested cluster. - next_page_token: - Set if not all snapshots could be returned in a single - response. Pass this value to ``page_token`` in another request - to get the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListSnapshotsResponse) - }, -) -_sym_db.RegisterMessage(ListSnapshotsResponse) - -DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType( - "DeleteSnapshotRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETESNAPSHOTREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Request message for [google.bigtable.admin.v2.BigtableTableAdmin.Delet - eSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Required. The unique name of the snapshot to be deleted. - Values are of the form ``projects/{project}/instances/{instanc - e}/clusters/{cluster}/snapshots/{snapshot}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteSnapshotRequest) - }, -) -_sym_db.RegisterMessage(DeleteSnapshotRequest) - -SnapshotTableMetadata = _reflection.GeneratedProtocolMessageType( - "SnapshotTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOTTABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The metadata for the Operation returned by SnapshotTable. Note: This - is a private alpha release of Cloud Bigtable snapshots. This feature - is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or - deprecation policy. - - Attributes: - original_request: - The request that prompted the initiation of this SnapshotTable - operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.SnapshotTableMetadata) - }, -) -_sym_db.RegisterMessage(SnapshotTableMetadata) - -CreateTableFromSnapshotMetadata = _reflection.GeneratedProtocolMessageType( - "CreateTableFromSnapshotMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATETABLEFROMSNAPSHOTMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The metadata for the Operation returned by CreateTableFromSnapshot. - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - original_request: - The request that prompted the initiation of this - CreateTableFromSnapshot operation. - request_time: - The time at which the original request was received. - finish_time: - The time at which the operation failed or was completed - successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateTableFromSnapshotMetadata) - }, -) -_sym_db.RegisterMessage(CreateTableFromSnapshotMetadata) - -CreateBackupRequest = _reflection.GeneratedProtocolMessageType( - "CreateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableA - dmin.CreateBackup]. - - Attributes: - parent: - Required. This must be one of the clusters in the instance in - which this table is located. The backup will be stored in this - cluster. Values are of the form ``projects/{project}/instances - /{instance}/clusters/{cluster}``. - backup_id: - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup name, - of the form: ``projects/{project}/instances/{instance}/cluster - s/{cluster}/backups/{backup_id}``. This string must be between - 1 and 50 characters in length and match the regex [_a- - zA-Z0-9][-_.a-zA-Z0-9]*. - backup: - Required. The backup to create. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupRequest) - }, -) -_sym_db.RegisterMessage(CreateBackupRequest) - -CreateBackupMetadata = _reflection.GeneratedProtocolMessageType( - "CreateBackupMetadata", - (_message.Message,), - { - "DESCRIPTOR": _CREATEBACKUPMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the operation returned by [CreateBackup][google.bigt - able.admin.v2.BigtableTableAdmin.CreateBackup]. - - Attributes: - name: - The name of the backup being created. - source_table: - The name of the table the backup is created from. - start_time: - The time at which this operation started. - end_time: - If set, the time at which this operation finished or was - cancelled. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.CreateBackupMetadata) - }, -) -_sym_db.RegisterMessage(CreateBackupMetadata) - -GetBackupRequest = _reflection.GeneratedProtocolMessageType( - "GetBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - - Attributes: - name: - Required. Name of the backup. Values are of the form ``project - s/{project}/instances/{instance}/clusters/{cluster}/backups/{b - ackup}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GetBackupRequest) - }, -) -_sym_db.RegisterMessage(GetBackupRequest) - -UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( - "UpdateBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableA - dmin.UpdateBackup]. - - Attributes: - backup: - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only supported - for the following fields: \* ``backup.expire_time``. - update_mask: - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be updated. - This mask is relative to the Backup resource, not to the - request message. The field mask must always be specified; this - prevents any future fields from being erased accidentally by - clients that do not know about them. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.UpdateBackupRequest) - }, -) -_sym_db.RegisterMessage(UpdateBackupRequest) - -DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( - "DeleteBackupRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEBACKUPREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableA - dmin.DeleteBackup]. - - Attributes: - name: - Required. Name of the backup to delete. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/b - ackups/{backup}``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DeleteBackupRequest) - }, -) -_sym_db.RegisterMessage(DeleteBackupRequest) - -ListBackupsRequest = _reflection.GeneratedProtocolMessageType( - "ListBackupsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAd - min.ListBackups]. - - Attributes: - parent: - Required. The cluster to list backups from. Values are of the - form ``projects/{project}/instances/{instance}/clusters/{clust - er}``. Use ``{cluster} = '-'`` to list backups for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - filter: - A filter expression that filters backups listed in the - response. The expression must specify the field name, a - comparison operator, and the value that you want to use for - filtering. The value must be a string, a number, or a boolean. - The comparison operator must be <, >, <=, >=, !=, =, or :. - Colon ‘:’ represents a HAS operator which is roughly - synonymous with equality. Filter rules are case insensitive. - The fields eligible for filtering are: \* ``name`` \* - ``source_table`` \* ``state`` \* ``start_time`` (and values - are of the format YYYY-MM-DDTHH:MM:SSZ) \* ``end_time`` (and - values are of the format YYYY-MM-DDTHH:MM:SSZ) \* - ``expire_time`` (and values are of the format YYYY-MM- - DDTHH:MM:SSZ) \* ``size_bytes`` To filter on multiple - expressions, provide each separate expression within - parentheses. By default, each expression is an AND expression. - However, you can include AND, OR, and NOT expressions - explicitly. Some examples of using filters are: - - ``name:"exact"`` –> The backup’s name is the string “exact”. - - ``name:howl`` –> The backup’s name contains the string “howl”. - - ``source_table:prod`` –> The source_table’s name contains - the string “prod”. - ``state:CREATING`` –> The backup is - pending creation. - ``state:READY`` –> The backup is fully - created and ready for use. - ``(name:howl) AND (start_time < - \"2018-03-28T14:50:00Z\")`` –> The backup name contains the - string “howl” and start_time of the backup is before - 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` –> The - backup’s size is greater than 10GB - order_by: - An expression for specifying the sort order of the results of - the request. The string value should specify one or more - fields in [Backup][google.bigtable.admin.v2.Backup]. The full - syntax is described at https://aip.dev/132#ordering. Fields - supported are: \* name \* source_table \* expire_time \* - start_time \* end_time \* size_bytes \* state For example, - “start_time”. The default sorting order is ascending. To - specify descending order for the field, a suffix " desc" - should be appended to the field name. For example, “start_time - desc”. Redundant space characters in the syntax are - insigificant. If order_by is empty, results will be sorted by - ``start_time`` in descending order starting from the most - recently created backup. - page_size: - Number of backups to be returned in the response. If 0 or - less, defaults to the server’s maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next_page_token - ][google.bigtable.admin.v2.ListBackupsResponse.next_page_token - ] from a previous [ListBackupsResponse][google.bigtable.admin. - v2.ListBackupsResponse] to the same ``parent`` and with the - same ``filter``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsRequest) - }, -) -_sym_db.RegisterMessage(ListBackupsRequest) - -ListBackupsResponse = _reflection.GeneratedProtocolMessageType( - "ListBackupsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTBACKUPSRESPONSE, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The response for [ListBackups][google.bigtable.admin.v2.BigtableTableA - dmin.ListBackups]. - - Attributes: - backups: - The list of matching backups. - next_page_token: - \ ``next_page_token`` can be sent in a subsequent [ListBackups - ][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] - call to fetch more of the matching backups. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ListBackupsResponse) - }, -) -_sym_db.RegisterMessage(ListBackupsResponse) - -RestoreTableRequest = _reflection.GeneratedProtocolMessageType( - "RestoreTableRequest", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEREQUEST, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableA - dmin.RestoreTable]. - - Attributes: - parent: - Required. The name of the instance in which to create the - restored table. This instance must be the parent of the source - backup. Values are of the form - ``projects//instances/``. - table_id: - Required. The id of the table to create and restore to. This - table must not already exist. The ``table_id`` appended to - ``parent`` forms the full table name of the form - ``projects//instances//tables/``. - source: - Required. The source from which to restore. - backup: - Name of the backup from which to restore. Values are of the - form ``projects//instances//clusters//backups/``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableRequest) - }, -) -_sym_db.RegisterMessage(RestoreTableRequest) - -RestoreTableMetadata = _reflection.GeneratedProtocolMessageType( - "RestoreTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _RESTORETABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation returned by [RestoreTable - ][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - Attributes: - name: - Name of the table being created and restored to. - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the table, as - specified by ``source`` in [RestoreTableRequest][google.bigtab - le.admin.v2.RestoreTableRequest]. - optimize_table_operation_name: - If exists, the name of the long-running operation that will be - used to track the post-restore optimization process to - optimize the performance of the restored table. The metadata - type of the long-running operation is - [OptimizeRestoreTableMetadata][]. The response type is - [Empty][google.protobuf.Empty]. This long-running operation - may be automatically created by the system if applicable after - the RestoreTable long-running operation completes - successfully. This operation may not be created if the table - is already optimized or the restore was not successful. - progress: - The progress of the [RestoreTable][google.bigtable.admin.v2.Bi - gtableTableAdmin.RestoreTable] operation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreTableMetadata) - }, -) -_sym_db.RegisterMessage(RestoreTableMetadata) - -OptimizeRestoredTableMetadata = _reflection.GeneratedProtocolMessageType( - "OptimizeRestoredTableMetadata", - (_message.Message,), - { - "DESCRIPTOR": _OPTIMIZERESTOREDTABLEMETADATA, - "__module__": "google.cloud.bigtable_admin_v2.proto.bigtable_table_admin_pb2", - "__doc__": """Metadata type for the long-running operation used to track the - progress of optimizations performed on a newly restored table. This - long-running operation is automatically created by the system after - the successful completion of a table restore, and cannot be cancelled. - - Attributes: - name: - Name of the restored table being optimized. - progress: - The progress of the post-restore optimizations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OptimizeRestoredTableMetadata) - }, -) -_sym_db.RegisterMessage(OptimizeRestoredTableMetadata) - - -DESCRIPTOR._options = None -_CREATETABLEREQUEST.fields_by_name["parent"]._options = None -_CREATETABLEREQUEST.fields_by_name["table_id"]._options = None -_CREATETABLEREQUEST.fields_by_name["table"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["parent"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["table_id"]._options = None -_CREATETABLEFROMSNAPSHOTREQUEST.fields_by_name["source_snapshot"]._options = None -_DROPROWRANGEREQUEST.fields_by_name["name"]._options = None -_LISTTABLESREQUEST.fields_by_name["parent"]._options = None -_GETTABLEREQUEST.fields_by_name["name"]._options = None -_DELETETABLEREQUEST.fields_by_name["name"]._options = None -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["name"]._options = None -_MODIFYCOLUMNFAMILIESREQUEST.fields_by_name["modifications"]._options = None -_GENERATECONSISTENCYTOKENREQUEST.fields_by_name["name"]._options = None -_CHECKCONSISTENCYREQUEST.fields_by_name["name"]._options = None -_CHECKCONSISTENCYREQUEST.fields_by_name["consistency_token"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["name"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["cluster"]._options = None -_SNAPSHOTTABLEREQUEST.fields_by_name["snapshot_id"]._options = None -_GETSNAPSHOTREQUEST.fields_by_name["name"]._options = None -_LISTSNAPSHOTSREQUEST.fields_by_name["parent"]._options = None -_DELETESNAPSHOTREQUEST.fields_by_name["name"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None -_CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_GETBACKUPREQUEST.fields_by_name["name"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None -_UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None -_DELETEBACKUPREQUEST.fields_by_name["name"]._options = None -_LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None - -_BIGTABLETABLEADMIN = _descriptor.ServiceDescriptor( - name="BigtableTableAdmin", - full_name="google.bigtable.admin.v2.BigtableTableAdmin", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\034bigtableadmin.googleapis.com\322A\273\002https://www.googleapis.com/auth/bigtable.admin,https://www.googleapis.com/auth/bigtable.admin.table,https://www.googleapis.com/auth/cloud-bigtable.admin,https://www.googleapis.com/auth/cloud-bigtable.admin.table,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=4604, - serialized_end=9284, - methods=[ - _descriptor.MethodDescriptor( - name="CreateTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", - index=0, - containing_service=None, - input_type=_CREATETABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b'\202\323\344\223\002/"*/v2/{parent=projects/*/instances/*}/tables:\001*\332A\025parent,table_id,table', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateTableFromSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", - index=1, - containing_service=None, - input_type=_CREATETABLEFROMSNAPSHOTREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002B"=/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot:\001*\332A\037parent,table_id,source_snapshot\312A(\n\005Table\022\037CreateTableFromSnapshotMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListTables", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListTables", - index=2, - containing_service=None, - input_type=_LISTTABLESREQUEST, - output_type=_LISTTABLESRESPONSE, - serialized_options=b"\202\323\344\223\002,\022*/v2/{parent=projects/*/instances/*}/tables\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetTable", - index=3, - containing_service=None, - input_type=_GETTABLEREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b"\202\323\344\223\002,\022*/v2/{name=projects/*/instances/*/tables/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", - index=4, - containing_service=None, - input_type=_DELETETABLEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002,**/v2/{name=projects/*/instances/*/tables/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ModifyColumnFamilies", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", - index=5, - containing_service=None, - input_type=_MODIFYCOLUMNFAMILIESREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._TABLE, - serialized_options=b'\202\323\344\223\002D"?/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies:\001*\332A\022name,modifications', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DropRowRange", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", - index=6, - containing_service=None, - input_type=_DROPROWRANGEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b'\202\323\344\223\002<"7/v2/{name=projects/*/instances/*/tables/*}:dropRowRange:\001*', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GenerateConsistencyToken", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", - index=7, - containing_service=None, - input_type=_GENERATECONSISTENCYTOKENREQUEST, - output_type=_GENERATECONSISTENCYTOKENRESPONSE, - serialized_options=b'\202\323\344\223\002H"C/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken:\001*\332A\004name', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CheckConsistency", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", - index=8, - containing_service=None, - input_type=_CHECKCONSISTENCYREQUEST, - output_type=_CHECKCONSISTENCYRESPONSE, - serialized_options=b'\202\323\344\223\002@";/v2/{name=projects/*/instances/*/tables/*}:checkConsistency:\001*\332A\026name,consistency_token', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SnapshotTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", - index=9, - containing_service=None, - input_type=_SNAPSHOTTABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0028"3/v2/{name=projects/*/instances/*/tables/*}:snapshot:\001*\332A$name,cluster,snapshot_id,description\312A!\n\010Snapshot\022\025SnapshotTableMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", - index=10, - containing_service=None, - input_type=_GETSNAPSHOTREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._SNAPSHOT, - serialized_options=b"\202\323\344\223\002:\0228/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListSnapshots", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", - index=11, - containing_service=None, - input_type=_LISTSNAPSHOTSREQUEST, - output_type=_LISTSNAPSHOTSRESPONSE, - serialized_options=b"\202\323\344\223\002:\0228/v2/{parent=projects/*/instances/*/clusters/*}/snapshots\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteSnapshot", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", - index=12, - containing_service=None, - input_type=_DELETESNAPSHOTREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002:*8/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CreateBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", - index=13, - containing_service=None, - input_type=_CREATEBACKUPREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002@"6/v2/{parent=projects/*/instances/*/clusters/*}/backups:\006backup\312A\036\n\006Backup\022\024CreateBackupMetadata\332A\027parent,backup_id,backup', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", - index=14, - containing_service=None, - input_type=_GETBACKUPREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, - serialized_options=b"\202\323\344\223\0028\0226/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", - index=15, - containing_service=None, - input_type=_UPDATEBACKUPREQUEST, - output_type=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2._BACKUP, - serialized_options=b"\202\323\344\223\002G2=/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}:\006backup\332A\022backup,update_mask", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteBackup", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", - index=16, - containing_service=None, - input_type=_DELETEBACKUPREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\0028*6/v2/{name=projects/*/instances/*/clusters/*/backups/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListBackups", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", - index=17, - containing_service=None, - input_type=_LISTBACKUPSREQUEST, - output_type=_LISTBACKUPSRESPONSE, - serialized_options=b"\202\323\344\223\0028\0226/v2/{parent=projects/*/instances/*/clusters/*}/backups\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="RestoreTable", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", - index=18, - containing_service=None, - input_type=_RESTORETABLEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\0027"2/v2/{parent=projects/*/instances/*}/tables:restore:\001*\312A\035\n\005Table\022\024RestoreTableMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", - index=19, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._GETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002@";/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy:\001*\332A\010resource', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SetIamPolicy", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", - index=20, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._SETIAMPOLICYREQUEST, - output_type=google_dot_iam_dot_v1_dot_policy__pb2._POLICY, - serialized_options=b'\202\323\344\223\002\216\001";/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy:\001*ZL"G/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy:\001*\332A\017resource,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="TestIamPermissions", - full_name="google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", - index=21, - containing_service=None, - input_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSREQUEST, - output_type=google_dot_iam_dot_v1_dot_iam__policy__pb2._TESTIAMPERMISSIONSRESPONSE, - serialized_options=b'\202\323\344\223\002\232\001"A/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions:\001*ZR"M/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions:\001*\332A\024resource,permissions', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLETABLEADMIN) - -DESCRIPTOR.services_by_name["BigtableTableAdmin"] = _BIGTABLETABLEADMIN - -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py deleted file mode 100644 index 949de429e0de..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py +++ /dev/null @@ -1,1083 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2, -) -from google.cloud.bigtable_admin_v2.proto import ( - table_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2, -) -from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 -from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class BigtableTableAdminStub(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.CreateTableFromSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.ListTables = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, - ) - self.GetTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DeleteTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ModifyColumnFamilies = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - ) - self.DropRowRange = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.GenerateConsistencyToken = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, - ) - self.CheckConsistency = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, - ) - self.SnapshotTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, - ) - self.ListSnapshots = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, - ) - self.DeleteSnapshot = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.CreateBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - ) - self.UpdateBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - ) - self.DeleteBackup = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.ListBackups = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, - ) - self.RestoreTable = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - request_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.SetIamPolicy = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - ) - self.TestIamPermissions = channel.unary_unary( - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - ) - - -class BigtableTableAdminServicer(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - def CreateTable(self, request, context): - """Creates a new table in the specified instance. - The table can be created with a full set of initial column families, - specified in the request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateTableFromSnapshot(self, request, context): - """Creates a new table from the specified snapshot. The target table must - not exist. The snapshot and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListTables(self, request, context): - """Lists all tables served from a specified instance.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetTable(self, request, context): - """Gets metadata information about the specified table.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteTable(self, request, context): - """Permanently deletes a specified table and all of its data.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ModifyColumnFamilies(self, request, context): - """Performs a series of column family modifications on the specified table. - Either all or none of the modifications will occur before this method - returns, but data requests received prior to that point may see a table - where only some modifications have taken effect. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DropRowRange(self, request, context): - """Permanently drop/delete a row range from a specified table. The request can - specify whether to delete all rows in a table, or only those that match a - particular prefix. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GenerateConsistencyToken(self, request, context): - """Generates a consistency token for a Table, which can be used in - CheckConsistency to check whether mutations to the table that finished - before this call started have been replicated. The tokens will be available - for 90 days. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckConsistency(self, request, context): - """Checks replication consistency based on a consistency token, that is, if - replication has caught up based on the conditions specified in the token - and the check request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SnapshotTable(self, request, context): - """Creates a new snapshot in the specified cluster from the specified - source table. The cluster and the table must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetSnapshot(self, request, context): - """Gets metadata information about the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListSnapshots(self, request, context): - """Lists all snapshots associated with the specified cluster. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteSnapshot(self, request, context): - """Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable snapshots. This - feature is not currently available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any SLA or deprecation - policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CreateBackup(self, request, context): - """Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be used to - track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The - [response][google.longrunning.Operation.response] field type is - [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - returned operation will stop the creation and delete the backup. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetBackup(self, request, context): - """Gets metadata on a pending or completed Cloud Bigtable Backup.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateBackup(self, request, context): - """Updates a pending or completed Cloud Bigtable Backup.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteBackup(self, request, context): - """Deletes a pending or completed Cloud Bigtable backup.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListBackups(self, request, context): - """Lists Cloud Bigtable backups. Returns both completed and pending - backups. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def RestoreTable(self, request, context): - """Create a new table by restoring from a completed backup. The new table - must be in the same instance as the instance containing the backup. The - returned table [long-running operation][google.longrunning.Operation] can - be used to track the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The - [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetIamPolicy(self, request, context): - """Gets the access control policy for a resource. - Returns an empty policy if the resource exists but does not have a policy - set. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SetIamPolicy(self, request, context): - """Sets the access control policy on a Table or Backup resource. - Replaces any existing policy. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified table resource.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableTableAdminServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateTable": grpc.unary_unary_rpc_method_handler( - servicer.CreateTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "CreateTableFromSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.CreateTableFromSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "ListTables": grpc.unary_unary_rpc_method_handler( - servicer.ListTables, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString, - ), - "GetTable": grpc.unary_unary_rpc_method_handler( - servicer.GetTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DeleteTable": grpc.unary_unary_rpc_method_handler( - servicer.DeleteTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ModifyColumnFamilies": grpc.unary_unary_rpc_method_handler( - servicer.ModifyColumnFamilies, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.SerializeToString, - ), - "DropRowRange": grpc.unary_unary_rpc_method_handler( - servicer.DropRowRange, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "GenerateConsistencyToken": grpc.unary_unary_rpc_method_handler( - servicer.GenerateConsistencyToken, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString, - ), - "CheckConsistency": grpc.unary_unary_rpc_method_handler( - servicer.CheckConsistency, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString, - ), - "SnapshotTable": grpc.unary_unary_rpc_method_handler( - servicer.SnapshotTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.GetSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.SerializeToString, - ), - "ListSnapshots": grpc.unary_unary_rpc_method_handler( - servicer.ListSnapshots, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString, - ), - "DeleteSnapshot": grpc.unary_unary_rpc_method_handler( - servicer.DeleteSnapshot, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "CreateBackup": grpc.unary_unary_rpc_method_handler( - servicer.CreateBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetBackup": grpc.unary_unary_rpc_method_handler( - servicer.GetBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, - ), - "UpdateBackup": grpc.unary_unary_rpc_method_handler( - servicer.UpdateBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.SerializeToString, - ), - "DeleteBackup": grpc.unary_unary_rpc_method_handler( - servicer.DeleteBackup, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - "ListBackups": grpc.unary_unary_rpc_method_handler( - servicer.ListBackups, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.SerializeToString, - ), - "RestoreTable": grpc.unary_unary_rpc_method_handler( - servicer.RestoreTable, - request_deserializer=google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.GetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "SetIamPolicy": grpc.unary_unary_rpc_method_handler( - servicer.SetIamPolicy, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, - ), - "TestIamPermissions": grpc.unary_unary_rpc_method_handler( - servicer.TestIamPermissions, - request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, - response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.admin.v2.BigtableTableAdmin", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class BigtableTableAdmin(object): - """Service for creating, configuring, and deleting Cloud Bigtable tables. - - - Provides access to the table schemas only, not the data stored within - the tables. - """ - - @staticmethod - def CreateTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateTableFromSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListTables( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ModifyColumnFamilies( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Table.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DropRowRange( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GenerateConsistencyToken( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CheckConsistency( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SnapshotTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Snapshot.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListSnapshots( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteSnapshot( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CreateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.CreateBackupRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.GetBackupRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.UpdateBackupRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_table__pb2.Backup.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteBackup( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.DeleteBackupRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListBackups( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.ListBackupsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def RestoreTable( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_bigtable__table__admin__pb2.RestoreTableRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SetIamPolicy( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, - google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def TestIamPermissions( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, - google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto deleted file mode 100644 index e4efb74f560e..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_data.proto +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto deleted file mode 100644 index 6e968fee17c1..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } - - // Delete all rows in a table corresponding to a particular prefix - rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto deleted file mode 100644 index 617ede65592f..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_service_messages.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} - -message BulkDeleteRowsRequest { - // The unique name of the table on which to perform the bulk delete - string table_name = 1; - - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py deleted file mode 100644 index e07dea1d1506..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/common.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/common.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\013CommonProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n1google/cloud/bigtable_admin_v2/proto/common.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/protobuf/timestamp.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp*=\n\x0bStorageType\x12\x1c\n\x18STORAGE_TYPE_UNSPECIFIED\x10\x00\x12\x07\n\x03SSD\x10\x01\x12\x07\n\x03HDD\x10\x02\x42\xd3\x01\n\x1c\x63om.google.bigtable.admin.v2B\x0b\x43ommonProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - -_STORAGETYPE = _descriptor.EnumDescriptor( - name="StorageType", - full_name="google.bigtable.admin.v2.StorageType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STORAGE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SSD", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="HDD", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=254, - serialized_end=315, -) -_sym_db.RegisterEnumDescriptor(_STORAGETYPE) - -StorageType = enum_type_wrapper.EnumTypeWrapper(_STORAGETYPE) -STORAGE_TYPE_UNSPECIFIED = 0 -SSD = 1 -HDD = 2 - - -_OPERATIONPROGRESS = _descriptor.Descriptor( - name="OperationProgress", - full_name="google.bigtable.admin.v2.OperationProgress", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="progress_percent", - full_name="google.bigtable.admin.v2.OperationProgress.progress_percent", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.OperationProgress.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.OperationProgress.end_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=113, - serialized_end=252, -) - -_OPERATIONPROGRESS.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_OPERATIONPROGRESS.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["OperationProgress"] = _OPERATIONPROGRESS -DESCRIPTOR.enum_types_by_name["StorageType"] = _STORAGETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -OperationProgress = _reflection.GeneratedProtocolMessageType( - "OperationProgress", - (_message.Message,), - { - "DESCRIPTOR": _OPERATIONPROGRESS, - "__module__": "google.cloud.bigtable_admin_v2.proto.common_pb2", - "__doc__": """Encapsulates progress related information for a Cloud Bigtable long - running operation. - - Attributes: - progress_percent: - Percent completion of the operation. Values are between 0 and - 100 inclusive. - start_time: - Time the request was received. - end_time: - If set, the time at which this operation failed or was - completed successfully. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.OperationProgress) - }, -) -_sym_db.RegisterMessage(OperationProgress) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py deleted file mode 100644 index 8a9393943bdf..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py deleted file mode 100644 index 4f3ce0a5b254..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2.py +++ /dev/null @@ -1,893 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/instance.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_admin_v2.proto import ( - common_pb2 as google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2, -) - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/instance.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\rInstanceProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n3google/cloud/bigtable_admin_v2/proto/instance.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x31google/cloud/bigtable_admin_v2/proto/common.proto"\xdd\x03\n\x08Instance\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x64isplay_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x05state\x18\x03 \x01(\x0e\x32(.google.bigtable.admin.v2.Instance.State\x12\x35\n\x04type\x18\x04 \x01(\x0e\x32\'.google.bigtable.admin.v2.Instance.Type\x12>\n\x06labels\x18\x05 \x03(\x0b\x32..google.bigtable.admin.v2.Instance.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02"=\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nPRODUCTION\x10\x01\x12\x0f\n\x0b\x44\x45VELOPMENT\x10\x02:N\xea\x41K\n bigtable.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}"\xa7\x03\n\x07\x43luster\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x08location\x18\x02 \x01(\tB&\xfa\x41#\n!locations.googleapis.com/Location\x12;\n\x05state\x18\x03 \x01(\x0e\x32\'.google.bigtable.admin.v2.Cluster.StateB\x03\xe0\x41\x03\x12\x18\n\x0bserve_nodes\x18\x04 \x01(\x05\x42\x03\xe0\x41\x02\x12\x43\n\x14\x64\x65\x66\x61ult_storage_type\x18\x05 \x01(\x0e\x32%.google.bigtable.admin.v2.StorageType"Q\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02\x12\x0c\n\x08RESIZING\x10\x03\x12\x0c\n\x08\x44ISABLED\x10\x04:`\xea\x41]\n\x1f\x62igtable.googleapis.com/Cluster\x12:projects/{project}/instances/{instance}/clusters/{cluster}"\xee\x03\n\nAppProfile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04\x65tag\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12g\n\x1dmulti_cluster_routing_use_any\x18\x05 \x01(\x0b\x32>.google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAnyH\x00\x12[\n\x16single_cluster_routing\x18\x06 \x01(\x0b\x32\x39.google.bigtable.admin.v2.AppProfile.SingleClusterRoutingH\x00\x1a\x1b\n\x19MultiClusterRoutingUseAny\x1aN\n\x14SingleClusterRouting\x12\x12\n\ncluster_id\x18\x01 \x01(\t\x12"\n\x1a\x61llow_transactional_writes\x18\x02 \x01(\x08:j\xea\x41g\n"bigtable.googleapis.com/AppProfile\x12\x41projects/{project}/instances/{instance}/appProfiles/{app_profile}B\x10\n\x0erouting_policyB\xd5\x01\n\x1c\x63om.google.bigtable.admin.v2B\rInstanceProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2.DESCRIPTOR, - ], -) - - -_INSTANCE_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Instance.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=474, - serialized_end=527, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_STATE) - -_INSTANCE_TYPE = _descriptor.EnumDescriptor( - name="Type", - full_name="google.bigtable.admin.v2.Instance.Type", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PRODUCTION", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DEVELOPMENT", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=529, - serialized_end=590, -) -_sym_db.RegisterEnumDescriptor(_INSTANCE_TYPE) - -_CLUSTER_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Cluster.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RESIZING", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DISABLED", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=917, - serialized_end=998, -) -_sym_db.RegisterEnumDescriptor(_CLUSTER_STATE) - - -_INSTANCE_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Instance.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=427, - serialized_end=472, -) - -_INSTANCE = _descriptor.Descriptor( - name="Instance", - full_name="google.bigtable.admin.v2.Instance", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Instance.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="display_name", - full_name="google.bigtable.admin.v2.Instance.display_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Instance.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="type", - full_name="google.bigtable.admin.v2.Instance.type", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.admin.v2.Instance.labels", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _INSTANCE_LABELSENTRY, - ], - enum_types=[ - _INSTANCE_STATE, - _INSTANCE_TYPE, - ], - serialized_options=b"\352AK\n bigtable.googleapis.com/Instance\022'projects/{project}/instances/{instance}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=193, - serialized_end=670, -) - - -_CLUSTER = _descriptor.Descriptor( - name="Cluster", - full_name="google.bigtable.admin.v2.Cluster", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Cluster.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="location", - full_name="google.bigtable.admin.v2.Cluster.location", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\372A#\n!locations.googleapis.com/Location", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Cluster.state", - index=2, - number=3, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="serve_nodes", - full_name="google.bigtable.admin.v2.Cluster.serve_nodes", - index=3, - number=4, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="default_storage_type", - full_name="google.bigtable.admin.v2.Cluster.default_storage_type", - index=4, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _CLUSTER_STATE, - ], - serialized_options=b"\352A]\n\037bigtable.googleapis.com/Cluster\022:projects/{project}/instances/{instance}/clusters/{cluster}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=673, - serialized_end=1096, -) - - -_APPPROFILE_MULTICLUSTERROUTINGUSEANY = _descriptor.Descriptor( - name="MultiClusterRoutingUseAny", - full_name="google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1360, - serialized_end=1387, -) - -_APPPROFILE_SINGLECLUSTERROUTING = _descriptor.Descriptor( - name="SingleClusterRouting", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_id", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.cluster_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="allow_transactional_writes", - full_name="google.bigtable.admin.v2.AppProfile.SingleClusterRouting.allow_transactional_writes", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1389, - serialized_end=1467, -) - -_APPPROFILE = _descriptor.Descriptor( - name="AppProfile", - full_name="google.bigtable.admin.v2.AppProfile", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.AppProfile.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="etag", - full_name="google.bigtable.admin.v2.AppProfile.etag", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.AppProfile.description", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="multi_cluster_routing_use_any", - full_name="google.bigtable.admin.v2.AppProfile.multi_cluster_routing_use_any", - index=3, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="single_cluster_routing", - full_name="google.bigtable.admin.v2.AppProfile.single_cluster_routing", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - _APPPROFILE_SINGLECLUSTERROUTING, - ], - enum_types=[], - serialized_options=b'\352Ag\n"bigtable.googleapis.com/AppProfile\022Aprojects/{project}/instances/{instance}/appProfiles/{app_profile}', - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="routing_policy", - full_name="google.bigtable.admin.v2.AppProfile.routing_policy", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1099, - serialized_end=1593, -) - -_INSTANCE_LABELSENTRY.containing_type = _INSTANCE -_INSTANCE.fields_by_name["state"].enum_type = _INSTANCE_STATE -_INSTANCE.fields_by_name["type"].enum_type = _INSTANCE_TYPE -_INSTANCE.fields_by_name["labels"].message_type = _INSTANCE_LABELSENTRY -_INSTANCE_STATE.containing_type = _INSTANCE -_INSTANCE_TYPE.containing_type = _INSTANCE -_CLUSTER.fields_by_name["state"].enum_type = _CLUSTER_STATE -_CLUSTER.fields_by_name[ - "default_storage_type" -].enum_type = ( - google_dot_cloud_dot_bigtable__admin__v2_dot_proto_dot_common__pb2._STORAGETYPE -) -_CLUSTER_STATE.containing_type = _CLUSTER -_APPPROFILE_MULTICLUSTERROUTINGUSEANY.containing_type = _APPPROFILE -_APPPROFILE_SINGLECLUSTERROUTING.containing_type = _APPPROFILE -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].message_type = _APPPROFILE_MULTICLUSTERROUTINGUSEANY -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].message_type = _APPPROFILE_SINGLECLUSTERROUTING -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["multi_cluster_routing_use_any"] -) -_APPPROFILE.fields_by_name[ - "multi_cluster_routing_use_any" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -_APPPROFILE.oneofs_by_name["routing_policy"].fields.append( - _APPPROFILE.fields_by_name["single_cluster_routing"] -) -_APPPROFILE.fields_by_name[ - "single_cluster_routing" -].containing_oneof = _APPPROFILE.oneofs_by_name["routing_policy"] -DESCRIPTOR.message_types_by_name["Instance"] = _INSTANCE -DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER -DESCRIPTOR.message_types_by_name["AppProfile"] = _APPPROFILE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Instance = _reflection.GeneratedProtocolMessageType( - "Instance", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCE_LABELSENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance.LabelsEntry) - }, - ), - "DESCRIPTOR": _INSTANCE, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an instance are served - from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - - Attributes: - name: - The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name: - Required. The descriptive name for this instance as it appears - in UIs. Can be changed at any time, but should be kept - globally unique to avoid confusion. - state: - (\ ``OutputOnly``) The current state of the instance. - type: - The type of the instance. Defaults to ``PRODUCTION``. - labels: - Labels are a flexible and lightweight mechanism for organizing - cloud resources into groups that reflect a customer’s - organizational needs and deployment strategies. They can be - used to filter resources and aggregate metrics. - Label keys - must be between 1 and 63 characters long and must conform - to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - Label values - must be between 0 and 63 characters long and must conform - to the regular expression: ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - Keys and values must both be under 128 bytes. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Instance) - }, -) -_sym_db.RegisterMessage(Instance) -_sym_db.RegisterMessage(Instance.LabelsEntry) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTER, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A resizable group of nodes in a particular cloud location, capable of - serving all [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - - Attributes: - name: - The unique name of the cluster. Values are of the form ``proje - cts/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location: - (\ ``CreationOnly``) The location where this cluster’s nodes - and storage reside. For best performance, clients should be - located as close as possible to this cluster. Currently only - zones are supported, so values should be of the form - ``projects/{project}/locations/{zone}``. - state: - The current state of the cluster. - serve_nodes: - Required. The number of nodes allocated to this cluster. More - nodes enable higher throughput and more consistent - performance. - default_storage_type: - (\ ``CreationOnly``) The type of storage used by this cluster - to serve its parent instance’s tables, unless explicitly - overridden. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Cluster) - }, -) -_sym_db.RegisterMessage(Cluster) - -AppProfile = _reflection.GeneratedProtocolMessageType( - "AppProfile", - (_message.Message,), - { - "MultiClusterRoutingUseAny": _reflection.GeneratedProtocolMessageType( - "MultiClusterRoutingUseAny", - (_message.Message,), - { - "DESCRIPTOR": _APPPROFILE_MULTICLUSTERROUTINGUSEANY, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """Read/write requests are routed to the nearest cluster in the instance, - and will fail over to the nearest cluster that is available in the - event of transient errors or delays. Clusters in a region are - considered equidistant. Choosing this option sacrifices read-your- - writes consistency to improve availability.""", - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny) - }, - ), - "SingleClusterRouting": _reflection.GeneratedProtocolMessageType( - "SingleClusterRouting", - (_message.Message,), - { - "DESCRIPTOR": _APPPROFILE_SINGLECLUSTERROUTING, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """Unconditionally routes all read/write requests to a specific cluster. - This option preserves read-your-writes consistency but does not - improve availability. - - Attributes: - cluster_id: - The cluster to which read/write requests should be routed. - allow_transactional_writes: - Whether or not ``CheckAndMutateRow`` and - ``ReadModifyWriteRow`` requests are allowed by this app - profile. It is unsafe to send these requests to the same - table/row/column in multiple clusters. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.SingleClusterRouting) - }, - ), - "DESCRIPTOR": _APPPROFILE, - "__module__": "google.cloud.bigtable_admin_v2.proto.instance_pb2", - "__doc__": """A configuration object describing how Cloud Bigtable should treat - traffic from a particular end user application. - - Attributes: - name: - (\ ``OutputOnly``) The unique name of the app profile. Values - are of the form - ``projects//instances//appProfiles/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. - etag: - Strongly validated etag for optimistic concurrency control. - Preserve the value returned from ``GetAppProfile`` when - calling ``UpdateAppProfile`` to fail the request if there has - been a modification in the mean time. The ``update_mask`` of - the request need not include ``etag`` for this protection to - apply. See `Wikipedia - `__ and `RFC 7232 - `__ for more - details. - description: - Optional long form description of the use case for this - AppProfile. - routing_policy: - The routing policy for all read/write requests that use this - app profile. A value must be explicitly set. - multi_cluster_routing_use_any: - Use a multi-cluster routing policy. - single_cluster_routing: - Use a single-cluster routing policy. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile) - }, -) -_sym_db.RegisterMessage(AppProfile) -_sym_db.RegisterMessage(AppProfile.MultiClusterRoutingUseAny) -_sym_db.RegisterMessage(AppProfile.SingleClusterRouting) - - -DESCRIPTOR._options = None -_INSTANCE_LABELSENTRY._options = None -_INSTANCE.fields_by_name["name"]._options = None -_INSTANCE.fields_by_name["display_name"]._options = None -_INSTANCE._options = None -_CLUSTER.fields_by_name["name"]._options = None -_CLUSTER.fields_by_name["location"]._options = None -_CLUSTER.fields_by_name["state"]._options = None -_CLUSTER.fields_by_name["serve_nodes"]._options = None -_CLUSTER._options = None -_APPPROFILE._options = None -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py deleted file mode 100644 index 8a9393943bdf..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py deleted file mode 100644 index 71191acbabb1..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2.py +++ /dev/null @@ -1,1694 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_admin_v2/proto/table.proto -"""Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_admin_v2/proto/table.proto", - package="google.bigtable.admin.v2", - syntax="proto3", - serialized_options=b'\n\034com.google.bigtable.admin.v2B\nTableProtoP\001Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002"Google::Cloud::Bigtable::Admin::V2', - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n0google/cloud/bigtable_admin_v2/proto/table.proto\x12\x18google.bigtable.admin.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\x9b\x01\n\x0bRestoreInfo\x12@\n\x0bsource_type\x18\x01 \x01(\x0e\x32+.google.bigtable.admin.v2.RestoreSourceType\x12;\n\x0b\x62\x61\x63kup_info\x18\x02 \x01(\x0b\x32$.google.bigtable.admin.v2.BackupInfoH\x00\x42\r\n\x0bsource_info"\xfb\x07\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0e\x63luster_states\x18\x02 \x03(\x0b\x32\x32.google.bigtable.admin.v2.Table.ClusterStatesEntry\x12L\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x33.google.bigtable.admin.v2.Table.ColumnFamiliesEntry\x12I\n\x0bgranularity\x18\x04 \x01(\x0e\x32\x34.google.bigtable.admin.v2.Table.TimestampGranularity\x12;\n\x0crestore_info\x18\x06 \x01(\x0b\x32%.google.bigtable.admin.v2.RestoreInfo\x1a\xf9\x01\n\x0c\x43lusterState\x12X\n\x11replication_state\x18\x01 \x01(\x0e\x32=.google.bigtable.admin.v2.Table.ClusterState.ReplicationState"\x8e\x01\n\x10ReplicationState\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\x10\n\x0cINITIALIZING\x10\x01\x12\x17\n\x13PLANNED_MAINTENANCE\x10\x02\x12\x19\n\x15UNPLANNED_MAINTENANCE\x10\x03\x12\t\n\x05READY\x10\x04\x12\x14\n\x10READY_OPTIMIZING\x10\x05\x1a\x62\n\x12\x43lusterStatesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.v2.Table.ClusterState:\x02\x38\x01\x1a]\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.bigtable.admin.v2.ColumnFamily:\x02\x38\x01"I\n\x14TimestampGranularity\x12%\n!TIMESTAMP_GRANULARITY_UNSPECIFIED\x10\x00\x12\n\n\x06MILLIS\x10\x01"\\\n\x04View\x12\x14\n\x10VIEW_UNSPECIFIED\x10\x00\x12\r\n\tNAME_ONLY\x10\x01\x12\x0f\n\x0bSCHEMA_VIEW\x10\x02\x12\x14\n\x10REPLICATION_VIEW\x10\x03\x12\x08\n\x04\x46ULL\x10\x04:Z\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}"A\n\x0c\x43olumnFamily\x12\x31\n\x07gc_rule\x18\x01 \x01(\x0b\x32 .google.bigtable.admin.v2.GcRule"\xd5\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12\x45\n\x0cintersection\x18\x03 \x01(\x0b\x32-.google.bigtable.admin.v2.GcRule.IntersectionH\x00\x12\x37\n\x05union\x18\x04 \x01(\x0b\x32&.google.bigtable.admin.v2.GcRule.UnionH\x00\x1a?\n\x0cIntersection\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRule\x1a\x38\n\x05Union\x12/\n\x05rules\x18\x01 \x03(\x0b\x32 .google.bigtable.admin.v2.GcRuleB\x06\n\x04rule"\xc7\x03\n\x08Snapshot\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x35\n\x0csource_table\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.admin.v2.Table\x12\x17\n\x0f\x64\x61ta_size_bytes\x18\x03 \x01(\x03\x12/\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0b\x64\x65lete_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x37\n\x05state\x18\x06 \x01(\x0e\x32(.google.bigtable.admin.v2.Snapshot.State\x12\x13\n\x0b\x64\x65scription\x18\x07 \x01(\t"5\n\x05State\x12\x13\n\x0fSTATE_NOT_KNOWN\x10\x00\x12\t\n\x05READY\x10\x01\x12\x0c\n\x08\x43REATING\x10\x02:v\xea\x41s\n bigtable.googleapis.com/Snapshot\x12Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}"\xd7\x03\n\x06\x42\x61\x63kup\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x1c\n\x0csource_table\x18\x02 \x01(\tB\x06\xe0\x41\x05\xe0\x41\x02\x12\x34\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x02\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x06 \x01(\x03\x42\x03\xe0\x41\x03\x12:\n\x05state\x18\x07 \x01(\x0e\x32&.google.bigtable.admin.v2.Backup.StateB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:p\xea\x41m\n\x1e\x62igtable.googleapis.com/Backup\x12Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}"\xa4\x01\n\nBackupInfo\x12\x13\n\x06\x62\x61\x63kup\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0csource_table\x18\x04 \x01(\tB\x03\xe0\x41\x03*D\n\x11RestoreSourceType\x12#\n\x1fRESTORE_SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\n\n\x06\x42\x41\x43KUP\x10\x01\x42\xd2\x01\n\x1c\x63om.google.bigtable.admin.v2B\nTableProtoP\x01Z=google.golang.org/genproto/googleapis/bigtable/admin/v2;admin\xaa\x02\x1eGoogle.Cloud.Bigtable.Admin.V2\xca\x02\x1eGoogle\\Cloud\\Bigtable\\Admin\\V2\xea\x02"Google::Cloud::Bigtable::Admin::V2b\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - -_RESTORESOURCETYPE = _descriptor.EnumDescriptor( - name="RestoreSourceType", - full_name="google.bigtable.admin.v2.RestoreSourceType", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="RESTORE_SOURCE_TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="BACKUP", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2893, - serialized_end=2961, -) -_sym_db.RegisterEnumDescriptor(_RESTORESOURCETYPE) - -RestoreSourceType = enum_type_wrapper.EnumTypeWrapper(_RESTORESOURCETYPE) -RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 -BACKUP = 1 - - -_TABLE_CLUSTERSTATE_REPLICATIONSTATE = _descriptor.EnumDescriptor( - name="ReplicationState", - full_name="google.bigtable.admin.v2.Table.ClusterState.ReplicationState", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="INITIALIZING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PLANNED_MAINTENANCE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UNPLANNED_MAINTENANCE", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY_OPTIMIZING", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=783, - serialized_end=925, -) -_sym_db.RegisterEnumDescriptor(_TABLE_CLUSTERSTATE_REPLICATIONSTATE) - -_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name="TimestampGranularity", - full_name="google.bigtable.admin.v2.Table.TimestampGranularity", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TIMESTAMP_GRANULARITY_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="MILLIS", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1122, - serialized_end=1195, -) -_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) - -_TABLE_VIEW = _descriptor.EnumDescriptor( - name="View", - full_name="google.bigtable.admin.v2.Table.View", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="VIEW_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NAME_ONLY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SCHEMA_VIEW", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="REPLICATION_VIEW", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FULL", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=1197, - serialized_end=1289, -) -_sym_db.RegisterEnumDescriptor(_TABLE_VIEW) - -_SNAPSHOT_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Snapshot.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_NOT_KNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2077, - serialized_end=2130, -) -_sym_db.RegisterEnumDescriptor(_SNAPSHOT_STATE) - -_BACKUP_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.bigtable.admin.v2.Backup.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="READY", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2555, - serialized_end=2610, -) -_sym_db.RegisterEnumDescriptor(_BACKUP_STATE) - - -_RESTOREINFO = _descriptor.Descriptor( - name="RestoreInfo", - full_name="google.bigtable.admin.v2.RestoreInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="source_type", - full_name="google.bigtable.admin.v2.RestoreInfo.source_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="backup_info", - full_name="google.bigtable.admin.v2.RestoreInfo.backup_info", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="source_info", - full_name="google.bigtable.admin.v2.RestoreInfo.source_info", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=204, - serialized_end=359, -) - - -_TABLE_CLUSTERSTATE = _descriptor.Descriptor( - name="ClusterState", - full_name="google.bigtable.admin.v2.Table.ClusterState", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="replication_state", - full_name="google.bigtable.admin.v2.Table.ClusterState.replication_state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _TABLE_CLUSTERSTATE_REPLICATIONSTATE, - ], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=676, - serialized_end=925, -) - -_TABLE_CLUSTERSTATESENTRY = _descriptor.Descriptor( - name="ClusterStatesEntry", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ClusterStatesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=1025, -) - -_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name="ColumnFamiliesEntry", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.admin.v2.Table.ColumnFamiliesEntry.value", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1027, - serialized_end=1120, -) - -_TABLE = _descriptor.Descriptor( - name="Table", - full_name="google.bigtable.admin.v2.Table", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Table.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_states", - full_name="google.bigtable.admin.v2.Table.cluster_states", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_families", - full_name="google.bigtable.admin.v2.Table.column_families", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="granularity", - full_name="google.bigtable.admin.v2.Table.granularity", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="restore_info", - full_name="google.bigtable.admin.v2.Table.restore_info", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _TABLE_CLUSTERSTATE, - _TABLE_CLUSTERSTATESENTRY, - _TABLE_COLUMNFAMILIESENTRY, - ], - enum_types=[ - _TABLE_TIMESTAMPGRANULARITY, - _TABLE_VIEW, - ], - serialized_options=b"\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=362, - serialized_end=1381, -) - - -_COLUMNFAMILY = _descriptor.Descriptor( - name="ColumnFamily", - full_name="google.bigtable.admin.v2.ColumnFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gc_rule", - full_name="google.bigtable.admin.v2.ColumnFamily.gc_rule", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1383, - serialized_end=1448, -) - - -_GCRULE_INTERSECTION = _descriptor.Descriptor( - name="Intersection", - full_name="google.bigtable.admin.v2.GcRule.Intersection", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Intersection.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1663, - serialized_end=1726, -) - -_GCRULE_UNION = _descriptor.Descriptor( - name="Union", - full_name="google.bigtable.admin.v2.GcRule.Union", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.admin.v2.GcRule.Union.rules", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1728, - serialized_end=1784, -) - -_GCRULE = _descriptor.Descriptor( - name="GcRule", - full_name="google.bigtable.admin.v2.GcRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="max_num_versions", - full_name="google.bigtable.admin.v2.GcRule.max_num_versions", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="max_age", - full_name="google.bigtable.admin.v2.GcRule.max_age", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="intersection", - full_name="google.bigtable.admin.v2.GcRule.intersection", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="union", - full_name="google.bigtable.admin.v2.GcRule.union", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _GCRULE_INTERSECTION, - _GCRULE_UNION, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.admin.v2.GcRule.rule", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1451, - serialized_end=1792, -) - - -_SNAPSHOT = _descriptor.Descriptor( - name="Snapshot", - full_name="google.bigtable.admin.v2.Snapshot", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Snapshot.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.Snapshot.source_table", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="data_size_bytes", - full_name="google.bigtable.admin.v2.Snapshot.data_size_bytes", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.bigtable.admin.v2.Snapshot.create_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_time", - full_name="google.bigtable.admin.v2.Snapshot.delete_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Snapshot.state", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.bigtable.admin.v2.Snapshot.description", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _SNAPSHOT_STATE, - ], - serialized_options=b"\352As\n bigtable.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1795, - serialized_end=2250, -) - - -_BACKUP = _descriptor.Descriptor( - name="Backup", - full_name="google.bigtable.admin.v2.Backup", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.admin.v2.Backup.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.Backup.source_table", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\005\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="expire_time", - full_name="google.bigtable.admin.v2.Backup.expire_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.Backup.start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.Backup.end_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="size_bytes", - full_name="google.bigtable.admin.v2.Backup.size_bytes", - index=5, - number=6, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.bigtable.admin.v2.Backup.state", - index=6, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[ - _BACKUP_STATE, - ], - serialized_options=b"\352Am\n\036bigtable.googleapis.com/Backup\022Kprojects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2253, - serialized_end=2724, -) - - -_BACKUPINFO = _descriptor.Descriptor( - name="BackupInfo", - full_name="google.bigtable.admin.v2.BackupInfo", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="backup", - full_name="google.bigtable.admin.v2.BackupInfo.backup", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.bigtable.admin.v2.BackupInfo.start_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.bigtable.admin.v2.BackupInfo.end_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="source_table", - full_name="google.bigtable.admin.v2.BackupInfo.source_table", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2727, - serialized_end=2891, -) - -_RESTOREINFO.fields_by_name["source_type"].enum_type = _RESTORESOURCETYPE -_RESTOREINFO.fields_by_name["backup_info"].message_type = _BACKUPINFO -_RESTOREINFO.oneofs_by_name["source_info"].fields.append( - _RESTOREINFO.fields_by_name["backup_info"] -) -_RESTOREINFO.fields_by_name[ - "backup_info" -].containing_oneof = _RESTOREINFO.oneofs_by_name["source_info"] -_TABLE_CLUSTERSTATE.fields_by_name[ - "replication_state" -].enum_type = _TABLE_CLUSTERSTATE_REPLICATIONSTATE -_TABLE_CLUSTERSTATE.containing_type = _TABLE -_TABLE_CLUSTERSTATE_REPLICATIONSTATE.containing_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.fields_by_name["value"].message_type = _TABLE_CLUSTERSTATE -_TABLE_CLUSTERSTATESENTRY.containing_type = _TABLE -_TABLE_COLUMNFAMILIESENTRY.fields_by_name["value"].message_type = _COLUMNFAMILY -_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name["cluster_states"].message_type = _TABLE_CLUSTERSTATESENTRY -_TABLE.fields_by_name["column_families"].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name["granularity"].enum_type = _TABLE_TIMESTAMPGRANULARITY -_TABLE.fields_by_name["restore_info"].message_type = _RESTOREINFO -_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE -_TABLE_VIEW.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name["gc_rule"].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name["rules"].message_type = _GCRULE -_GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name[ - "max_age" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name["intersection"].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name["union"].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_num_versions"]) -_GCRULE.fields_by_name["max_num_versions"].containing_oneof = _GCRULE.oneofs_by_name[ - "rule" -] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["max_age"]) -_GCRULE.fields_by_name["max_age"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["intersection"]) -_GCRULE.fields_by_name["intersection"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_GCRULE.oneofs_by_name["rule"].fields.append(_GCRULE.fields_by_name["union"]) -_GCRULE.fields_by_name["union"].containing_oneof = _GCRULE.oneofs_by_name["rule"] -_SNAPSHOT.fields_by_name["source_table"].message_type = _TABLE -_SNAPSHOT.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name[ - "delete_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SNAPSHOT.fields_by_name["state"].enum_type = _SNAPSHOT_STATE -_SNAPSHOT_STATE.containing_type = _SNAPSHOT -_BACKUP.fields_by_name[ - "expire_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUP.fields_by_name["state"].enum_type = _BACKUP_STATE -_BACKUP_STATE.containing_type = _BACKUP -_BACKUPINFO.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_BACKUPINFO.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["RestoreInfo"] = _RESTOREINFO -DESCRIPTOR.message_types_by_name["Table"] = _TABLE -DESCRIPTOR.message_types_by_name["ColumnFamily"] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name["GcRule"] = _GCRULE -DESCRIPTOR.message_types_by_name["Snapshot"] = _SNAPSHOT -DESCRIPTOR.message_types_by_name["Backup"] = _BACKUP -DESCRIPTOR.message_types_by_name["BackupInfo"] = _BACKUPINFO -DESCRIPTOR.enum_types_by_name["RestoreSourceType"] = _RESTORESOURCETYPE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -RestoreInfo = _reflection.GeneratedProtocolMessageType( - "RestoreInfo", - (_message.Message,), - { - "DESCRIPTOR": _RESTOREINFO, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Information about a table restore. - - Attributes: - source_type: - The type of the restore source. - source_info: - Information about the source used to restore the table. - backup_info: - Information about the backup used to restore the table. The - backup may no longer exist. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.RestoreInfo) - }, -) -_sym_db.RegisterMessage(RestoreInfo) - -Table = _reflection.GeneratedProtocolMessageType( - "Table", - (_message.Message,), - { - "ClusterState": _reflection.GeneratedProtocolMessageType( - "ClusterState", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_CLUSTERSTATE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """The state of a table’s data in a particular cluster. - - Attributes: - replication_state: - Output only. The state of replication for the table in this - cluster. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterState) - }, - ), - "ClusterStatesEntry": _reflection.GeneratedProtocolMessageType( - "ClusterStatesEntry", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_CLUSTERSTATESENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ClusterStatesEntry) - }, - ), - "ColumnFamiliesEntry": _reflection.GeneratedProtocolMessageType( - "ColumnFamiliesEntry", - (_message.Message,), - { - "DESCRIPTOR": _TABLE_COLUMNFAMILIESENTRY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2" - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table.ColumnFamiliesEntry) - }, - ), - "DESCRIPTOR": _TABLE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A collection of user data indexed by row, column, and timestamp. Each - table is served using the resources of its parent cluster. - - Attributes: - name: - Output only. The unique name of the table. Values are of the - form ``projects//instances//tables/[_a- - zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, - ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` - cluster_states: - Output only. Map from cluster ID to per-cluster table state. - If it could not be determined whether or not the table has - data in a particular cluster (for example, if its zone is - unavailable), then there will be an entry for the cluster with - UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, - ``FULL`` - column_families: - (\ ``CreationOnly``) The column families configured for this - table, mapped by column family ID. Views: ``SCHEMA_VIEW``, - ``FULL`` - granularity: - (\ ``CreationOnly``) The granularity (i.e. ``MILLIS``) at - which timestamps are stored in this table. Timestamps not - matching the granularity will be rejected. If unspecified at - creation time, the value will be set to ``MILLIS``. Views: - ``SCHEMA_VIEW``, ``FULL``. - restore_info: - Output only. If this table was restored from another data - source (e.g. a backup), this field will be populated with - information about the restore. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Table) - }, -) -_sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ClusterState) -_sym_db.RegisterMessage(Table.ClusterStatesEntry) -_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) - -ColumnFamily = _reflection.GeneratedProtocolMessageType( - "ColumnFamily", - (_message.Message,), - { - "DESCRIPTOR": _COLUMNFAMILY, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A set of columns within a table which share a common configuration. - - Attributes: - gc_rule: - Garbage collection rule specified as a protobuf. Must - serialize to at most 500 bytes. NOTE: Garbage collection - executes opportunistically in the background, and so it’s - possible for reads to return a cell even if it matches the - active GC expression for its family. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.ColumnFamily) - }, -) -_sym_db.RegisterMessage(ColumnFamily) - -GcRule = _reflection.GeneratedProtocolMessageType( - "GcRule", - (_message.Message,), - { - "Intersection": _reflection.GeneratedProtocolMessageType( - "Intersection", - (_message.Message,), - { - "DESCRIPTOR": _GCRULE_INTERSECTION, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A GcRule which deletes cells matching all of the given rules. - - Attributes: - rules: - Only delete cells which would be deleted by every element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Intersection) - }, - ), - "Union": _reflection.GeneratedProtocolMessageType( - "Union", - (_message.Message,), - { - "DESCRIPTOR": _GCRULE_UNION, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A GcRule which deletes cells matching any of the given rules. - - Attributes: - rules: - Delete cells which would be deleted by any element of - ``rules``. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule.Union) - }, - ), - "DESCRIPTOR": _GCRULE, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Rule for determining which cells to delete during garbage collection. - - Attributes: - rule: - Garbage collection rules. - max_num_versions: - Delete all cells in a column except the most recent N. - max_age: - Delete cells in a column older than the given age. Values must - be at least one millisecond, and will be truncated to - microsecond granularity. - intersection: - Delete cells that would be deleted by every nested rule. - union: - Delete cells that would be deleted by any nested rule. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.GcRule) - }, -) -_sym_db.RegisterMessage(GcRule) -_sym_db.RegisterMessage(GcRule.Intersection) -_sym_db.RegisterMessage(GcRule.Union) - -Snapshot = _reflection.GeneratedProtocolMessageType( - "Snapshot", - (_message.Message,), - { - "DESCRIPTOR": _SNAPSHOT, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A snapshot of a table at a particular time. A snapshot can be used as - a checkpoint for data restoration or a data source for a new table. - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible ways - and is not recommended for production use. It is not subject to any - SLA or deprecation policy. - - Attributes: - name: - Output only. The unique name of the snapshot. Values are of - the form ``projects//instances//clusters//snapshots/``. - source_table: - Output only. The source table at the time the snapshot was - taken. - data_size_bytes: - Output only. The size of the data in the source table at the - time the snapshot was taken. In some cases, this value may be - computed asynchronously via a background process and a - placeholder of 0 will be used in the meantime. - create_time: - Output only. The time when the snapshot is created. - delete_time: - Output only. The time when the snapshot will be deleted. The - maximum amount of time a snapshot can stay active is 365 days. - If ‘ttl’ is not specified, the default maximum of 365 days - will be used. - state: - Output only. The current state of the snapshot. - description: - Output only. Description of the snapshot. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Snapshot) - }, -) -_sym_db.RegisterMessage(Snapshot) - -Backup = _reflection.GeneratedProtocolMessageType( - "Backup", - (_message.Message,), - { - "DESCRIPTOR": _BACKUP, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """A backup of a Cloud Bigtable table. - - Attributes: - name: - Output only. A globally unique identifier for the backup which - cannot be changed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/ - backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` The final segment of the - name must be between 1 and 50 characters in length. The - backup is stored in the cluster identified by the prefix of - the backup name of the form ``projects/{project}/instances/{in - stance}/clusters/{cluster}``. - source_table: - Required. Immutable. Name of the table from which this backup - was created. This needs to be in the same instance as the - backup. Values are of the form ``projects/{project}/instances/ - {instance}/tables/{source_table}``. - expire_time: - Required. The expiration time of the backup, with microseconds - granularity that must be at least 6 hours and at most 30 days - from the time the request is received. Once the - ``expire_time`` has passed, Cloud Bigtable will delete the - backup and free the resources used by the backup. - start_time: - Output only. ``start_time`` is the time that the backup was - started (i.e. approximately the time the [CreateBackup][google - .bigtable.admin.v2.BigtableTableAdmin.CreateBackup] request is - received). The row data in this backup will be no older than - this timestamp. - end_time: - Output only. ``end_time`` is the time that the backup was - finished. The row data in the backup will be no newer than - this timestamp. - size_bytes: - Output only. Size of the backup in bytes. - state: - Output only. The current state of the backup. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.Backup) - }, -) -_sym_db.RegisterMessage(Backup) - -BackupInfo = _reflection.GeneratedProtocolMessageType( - "BackupInfo", - (_message.Message,), - { - "DESCRIPTOR": _BACKUPINFO, - "__module__": "google.cloud.bigtable_admin_v2.proto.table_pb2", - "__doc__": """Information about a backup. - - Attributes: - backup: - Output only. Name of the backup. - start_time: - Output only. The time that the backup was started. Row data in - the backup will be no older than this timestamp. - end_time: - Output only. This time that the backup was finished. Row data - in the backup will be no newer than this timestamp. - source_table: - Output only. Name of the table the backup was created from. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.BackupInfo) - }, -) -_sym_db.RegisterMessage(BackupInfo) - - -DESCRIPTOR._options = None -_TABLE_CLUSTERSTATESENTRY._options = None -_TABLE_COLUMNFAMILIESENTRY._options = None -_TABLE._options = None -_SNAPSHOT._options = None -_BACKUP.fields_by_name["name"]._options = None -_BACKUP.fields_by_name["source_table"]._options = None -_BACKUP.fields_by_name["expire_time"]._options = None -_BACKUP.fields_by_name["start_time"]._options = None -_BACKUP.fields_by_name["end_time"]._options = None -_BACKUP.fields_by_name["size_bytes"]._options = None -_BACKUP.fields_by_name["state"]._options = None -_BACKUP._options = None -_BACKUPINFO.fields_by_name["backup"]._options = None -_BACKUPINFO.fields_by_name["start_time"]._options = None -_BACKUPINFO.fields_by_name["end_time"]._options = None -_BACKUPINFO.fields_by_name["source_table"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py deleted file mode 100644 index 8a9393943bdf..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/py.typed new file mode 100644 index 000000000000..bc26f20697c2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable-admin package uses inline types. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py new file mode 100644 index 000000000000..42ffdf2bc43d --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py new file mode 100644 index 000000000000..5606dd4ffa9e --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import BigtableInstanceAdminClient +from .async_client import BigtableInstanceAdminAsyncClient + +__all__ = ( + "BigtableInstanceAdminClient", + "BigtableInstanceAdminAsyncClient", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py new file mode 100644 index 000000000000..4df47ff4a7a2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -0,0 +1,1935 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport +from .client import BigtableInstanceAdminClient + + +class BigtableInstanceAdminAsyncClient: + """Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + """ + + _client: BigtableInstanceAdminClient + + DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + + app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) + parse_app_profile_path = staticmethod( + BigtableInstanceAdminClient.parse_app_profile_path + ) + cluster_path = staticmethod(BigtableInstanceAdminClient.cluster_path) + parse_cluster_path = staticmethod(BigtableInstanceAdminClient.parse_cluster_path) + instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) + parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) + + common_billing_account_path = staticmethod( + BigtableInstanceAdminClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + BigtableInstanceAdminClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(BigtableInstanceAdminClient.common_folder_path) + parse_common_folder_path = staticmethod( + BigtableInstanceAdminClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + BigtableInstanceAdminClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + BigtableInstanceAdminClient.parse_common_organization_path + ) + + common_project_path = staticmethod(BigtableInstanceAdminClient.common_project_path) + parse_common_project_path = staticmethod( + BigtableInstanceAdminClient.parse_common_project_path + ) + + common_location_path = staticmethod( + BigtableInstanceAdminClient.common_location_path + ) + parse_common_location_path = staticmethod( + BigtableInstanceAdminClient.parse_common_location_path + ) + + from_service_account_info = BigtableInstanceAdminClient.from_service_account_info + from_service_account_file = BigtableInstanceAdminClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableInstanceAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableInstanceAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(BigtableInstanceAdminClient).get_transport_class, + type(BigtableInstanceAdminClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableInstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = BigtableInstanceAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_instance( + self, + request: bigtable_instance_admin.CreateInstanceRequest = None, + *, + parent: str = None, + instance_id: str = None, + instance: gba_instance.Instance = None, + clusters: Sequence[ + bigtable_instance_admin.CreateInstanceRequest.ClustersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create an instance within a project. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + parent (:class:`str`): + Required. The unique name of the project in which to + create the new instance. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (:class:`str`): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + clusters (:class:`Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]`): + Required. The clusters to be created within the + instance, mapped by desired cluster ID, e.g., just + ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. + + This corresponds to the ``clusters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance, clusters]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.CreateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + + if clusters: + request.clusters.update(clusters) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_instance, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def get_instance( + self, + request: bigtable_instance_admin.GetInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Gets information about an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + name (:class:`str`): + Required. The unique name of the requested instance. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.GetInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_instances( + self, + request: bigtable_instance_admin.ListInstancesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Lists information about instances in a project. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListInstancesRequest`): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + parent (:class:`str`): + Required. The unique name of the project for which a + list of instances is requested. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.ListInstancesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_instances, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_instance( + self, + request: instance.Instance = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + + request = instance.Instance(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def partial_update_instance( + self, + request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, + *, + instance: gba_instance.Instance = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + Required. The Instance which will + (partially) replace the current value. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of Instance + fields which should be replaced. Must be + explicitly set. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if instance is not None: + request.instance = instance + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.partial_update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance.name", request.instance.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def delete_instance( + self, + request: bigtable_instance_admin.DeleteInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete an instance from a project. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest`): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + name (:class:`str`): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.DeleteInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_instance, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def create_cluster( + self, + request: bigtable_instance_admin.CreateClusterRequest = None, + *, + parent: str = None, + cluster_id: str = None, + cluster: instance.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a cluster within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateClusterRequest`): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster_id, cluster]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if cluster_id is not None: + request.cluster_id = cluster_id + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.CreateClusterMetadata, + ) + + # Done; return the response. + return response + + async def get_cluster( + self, + request: bigtable_instance_admin.GetClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Cluster: + r"""Gets information about a cluster. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetClusterRequest`): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + name (:class:`str`): + Required. The unique name of the requested cluster. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Cluster: + A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_clusters( + self, + request: bigtable_instance_admin.ListClustersRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Lists information about clusters in an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListClustersRequest`): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + parent (:class:`str`): + Required. The unique name of the instance for which a + list of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_cluster( + self, + request: instance.Cluster = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a cluster within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + The request object. A resizable group of nodes in a + particular cloud location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + + request = instance.Cluster(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.UpdateClusterMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster( + self, + request: bigtable_instance_admin.DeleteClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a cluster from an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteClusterRequest`): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + name (:class:`str`): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def create_app_profile( + self, + request: bigtable_instance_admin.CreateAppProfileRequest = None, + *, + parent: str = None, + app_profile_id: str = None, + app_profile: instance.AppProfile = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Creates an app profile within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + Required. The ID to be used when referring to the new + app profile within its instance, e.g., just + ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, app_profile_id, app_profile]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.CreateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if app_profile is not None: + request.app_profile = app_profile + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_app_profile, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_app_profile( + self, + request: bigtable_instance_admin.GetAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Gets information about an app profile. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + name (:class:`str`): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.GetAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_app_profiles( + self, + request: bigtable_instance_admin.ListAppProfilesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAppProfilesAsyncPager: + r"""Lists information about app profiles in an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest`): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + parent (:class:`str`): + Required. The unique name of the instance for which a + list of app profiles is requested. Values are of the + form ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.ListAppProfilesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_app_profiles, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAppProfilesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_app_profile( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest = None, + *, + app_profile: instance.AppProfile = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an app profile within an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): + Required. The app profile which will + (partially) replace the current value. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of app profile + fields which should be replaced. If + unset, all fields will be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic + from a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([app_profile, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.UpdateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if app_profile is not None: + request.app_profile = app_profile + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("app_profile.name", request.app_profile.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.AppProfile, + metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, + ) + + # Done; return the response. + return response + + async def delete_app_profile( + self, + request: bigtable_instance_admin.DeleteAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an app profile from an instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest`): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + name (:class:`str`): + Required. The unique name of the app profile to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.DeleteAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_app_profile, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on an instance + resource. Replaces any existing policy. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified instance resource. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableInstanceAdminAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py new file mode 100644 index 000000000000..8e6f504da4e9 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -0,0 +1,2069 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableInstanceAdminGrpcTransport +from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport + + +class BigtableInstanceAdminClientMeta(type): + """Metaclass for the BigtableInstanceAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[BigtableInstanceAdminTransport]] + _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[BigtableInstanceAdminTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableInstanceAdminClient(metaclass=BigtableInstanceAdminClientMeta): + """Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableInstanceAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableInstanceAdminTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def app_profile_path(project: str, instance: str, app_profile: str,) -> str: + """Return a fully-qualified app_profile string.""" + return "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( + project=project, instance=instance, app_profile=app_profile, + ) + + @staticmethod + def parse_app_profile_path(path: str) -> Dict[str, str]: + """Parse a app_profile path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/appProfiles/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def cluster_path(project: str, instance: str, cluster: str,) -> str: + """Return a fully-qualified cluster string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str, str]: + """Parse a cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def instance_path(project: str, instance: str,) -> str: + """Return a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parse a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, BigtableInstanceAdminTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableInstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableInstanceAdminTransport): + # transport is a BigtableInstanceAdminTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_instance( + self, + request: bigtable_instance_admin.CreateInstanceRequest = None, + *, + parent: str = None, + instance_id: str = None, + instance: gba_instance.Instance = None, + clusters: Sequence[ + bigtable_instance_admin.CreateInstanceRequest.ClustersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Create an instance within a project. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + parent (str): + Required. The unique name of the project in which to + create the new instance. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (str): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]): + Required. The clusters to be created within the + instance, mapped by desired cluster ID, e.g., just + ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. + + This corresponds to the ``clusters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance, clusters]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): + request = bigtable_instance_admin.CreateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + + if clusters: + request.clusters.update(clusters) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + def get_instance( + self, + request: bigtable_instance_admin.GetInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Gets information about an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + name (str): + Required. The unique name of the requested instance. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): + request = bigtable_instance_admin.GetInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_instances( + self, + request: bigtable_instance_admin.ListInstancesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Lists information about instances in a project. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListInstancesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + parent (str): + Required. The unique name of the project for which a + list of instances is requested. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): + request = bigtable_instance_admin.ListInstancesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_instances] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_instance( + self, + request: instance.Instance = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Args: + request (google.cloud.bigtable_admin_v2.types.Instance): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a instance.Instance. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, instance.Instance): + request = instance.Instance(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def partial_update_instance( + self, + request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, + *, + instance: gba_instance.Instance = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The Instance which will + (partially) replace the current value. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Instance + fields which should be replaced. Must be + explicitly set. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.PartialUpdateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, bigtable_instance_admin.PartialUpdateInstanceRequest + ): + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if instance is not None: + request.instance = instance + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.partial_update_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance.name", request.instance.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + def delete_instance( + self, + request: bigtable_instance_admin.DeleteInstanceRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete an instance from a project. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + name (str): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): + request = bigtable_instance_admin.DeleteInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def create_cluster( + self, + request: bigtable_instance_admin.CreateClusterRequest = None, + *, + parent: str = None, + cluster_id: str = None, + cluster: instance.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a cluster within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + parent (str): + Required. The unique name of the instance in which to + create the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster_id, cluster]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): + request = bigtable_instance_admin.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if cluster_id is not None: + request.cluster_id = cluster_id + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.CreateClusterMetadata, + ) + + # Done; return the response. + return response + + def get_cluster( + self, + request: bigtable_instance_admin.GetClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Cluster: + r"""Gets information about a cluster. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + name (str): + Required. The unique name of the requested cluster. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Cluster: + A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetClusterRequest): + request = bigtable_instance_admin.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_clusters( + self, + request: bigtable_instance_admin.ListClustersRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Lists information about clusters in an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListClustersRequest): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + parent (str): + Required. The unique name of the instance for which a + list of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListClustersRequest): + request = bigtable_instance_admin.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_cluster( + self, + request: instance.Cluster = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a cluster within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.Cluster): + The request object. A resizable group of nodes in a + particular cloud location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a instance.Cluster. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, instance.Cluster): + request = instance.Cluster(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.UpdateClusterMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster( + self, + request: bigtable_instance_admin.DeleteClusterRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a cluster from an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + name (str): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): + request = bigtable_instance_admin.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def create_app_profile( + self, + request: bigtable_instance_admin.CreateAppProfileRequest = None, + *, + parent: str = None, + app_profile_id: str = None, + app_profile: instance.AppProfile = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Creates an app profile within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + parent (str): + Required. The unique name of the instance in which to + create the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + Required. The ID to be used when referring to the new + app profile within its instance, e.g., just + ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, app_profile_id, app_profile]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): + request = bigtable_instance_admin.CreateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if app_profile is not None: + request.app_profile = app_profile + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_app_profile( + self, + request: bigtable_instance_admin.GetAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Gets information about an app profile. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + name (str): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): + request = bigtable_instance_admin.GetAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_app_profiles( + self, + request: bigtable_instance_admin.ListAppProfilesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAppProfilesPager: + r"""Lists information about app profiles in an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + parent (str): + Required. The unique name of the instance for which a + list of app profiles is requested. Values are of the + form ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListAppProfilesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): + request = bigtable_instance_admin.ListAppProfilesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_app_profiles] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAppProfilesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_app_profile( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest = None, + *, + app_profile: instance.AppProfile = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates an app profile within an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile which will + (partially) replace the current value. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of app profile + fields which should be replaced. If + unset, all fields will be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic + from a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([app_profile, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.UpdateAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): + request = bigtable_instance_admin.UpdateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if app_profile is not None: + request.app_profile = app_profile + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("app_profile.name", request.app_profile.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.AppProfile, + metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, + ) + + # Done; return the response. + return response + + def delete_app_profile( + self, + request: bigtable_instance_admin.DeleteAppProfileRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an app profile from an instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + name (str): + Required. The unique name of the app profile to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): + request = bigtable_instance_admin.DeleteAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Args: + request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for `GetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on an instance + resource. Replaces any existing policy. + + Args: + request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for `SetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified instance resource. + + Args: + request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for + `TestIamPermissions` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (Sequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableInstanceAdminClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py new file mode 100644 index 000000000000..f70936b5b458 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance + + +class ListAppProfilesPager: + """A pager for iterating through ``list_app_profiles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``app_profiles`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAppProfiles`` requests and continue to iterate + through the ``app_profiles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_instance_admin.ListAppProfilesResponse], + request: bigtable_instance_admin.ListAppProfilesRequest, + response: bigtable_instance_admin.ListAppProfilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListAppProfilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_instance_admin.ListAppProfilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[instance.AppProfile]: + for page in self.pages: + yield from page.app_profiles + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAppProfilesAsyncPager: + """A pager for iterating through ``list_app_profiles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``app_profiles`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAppProfiles`` requests and continue to iterate + through the ``app_profiles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_instance_admin.ListAppProfilesResponse] + ], + request: bigtable_instance_admin.ListAppProfilesRequest, + response: bigtable_instance_admin.ListAppProfilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListAppProfilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[bigtable_instance_admin.ListAppProfilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[instance.AppProfile]: + async def async_generator(): + async for page in self.pages: + for response in page.app_profiles: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py new file mode 100644 index 000000000000..23b5107110b4 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableInstanceAdminTransport +from .grpc import BigtableInstanceAdminGrpcTransport +from .grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[BigtableInstanceAdminTransport]] +_transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport +_transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport + +__all__ = ( + "BigtableInstanceAdminTransport", + "BigtableInstanceAdminGrpcTransport", + "BigtableInstanceAdminGrpcAsyncIOTransport", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py new file mode 100644 index 000000000000..004424c28d97 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -0,0 +1,491 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BigtableInstanceAdminTransport(abc.ABC): + """Abstract transport class for BigtableInstanceAdmin.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_instance: gapic_v1.method.wrap_method( + self.create_instance, default_timeout=300.0, client_info=client_info, + ), + self.get_instance: gapic_v1.method.wrap_method( + self.get_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_instances: gapic_v1.method.wrap_method( + self.list_instances, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_instance: gapic_v1.method.wrap_method( + self.update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_instance: gapic_v1.method.wrap_method( + self.partial_update_instance, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_instance: gapic_v1.method.wrap_method( + self.delete_instance, default_timeout=60.0, client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, default_timeout=60.0, client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, default_timeout=60.0, client_info=client_info, + ), + self.create_app_profile: gapic_v1.method.wrap_method( + self.create_app_profile, default_timeout=60.0, client_info=client_info, + ), + self.get_app_profile: gapic_v1.method.wrap_method( + self.get_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_app_profiles: gapic_v1.method.wrap_method( + self.list_app_profiles, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_app_profile: gapic_v1.method.wrap_method( + self.update_app_profile, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_app_profile: gapic_v1.method.wrap_method( + self.delete_app_profile, default_timeout=60.0, client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, default_timeout=60.0, client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.CreateInstanceRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.GetInstanceRequest], + typing.Union[instance.Instance, typing.Awaitable[instance.Instance]], + ]: + raise NotImplementedError() + + @property + def list_instances( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.ListInstancesRequest], + typing.Union[ + bigtable_instance_admin.ListInstancesResponse, + typing.Awaitable[bigtable_instance_admin.ListInstancesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_instance( + self, + ) -> typing.Callable[ + [instance.Instance], + typing.Union[instance.Instance, typing.Awaitable[instance.Instance]], + ]: + raise NotImplementedError() + + @property + def partial_update_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_instance( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def create_cluster( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.CreateClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_cluster( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.GetClusterRequest], + typing.Union[instance.Cluster, typing.Awaitable[instance.Cluster]], + ]: + raise NotImplementedError() + + @property + def list_clusters( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.ListClustersRequest], + typing.Union[ + bigtable_instance_admin.ListClustersResponse, + typing.Awaitable[bigtable_instance_admin.ListClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_cluster( + self, + ) -> typing.Callable[ + [instance.Cluster], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_cluster( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.DeleteClusterRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def create_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + typing.Union[instance.AppProfile, typing.Awaitable[instance.AppProfile]], + ]: + raise NotImplementedError() + + @property + def get_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.GetAppProfileRequest], + typing.Union[instance.AppProfile, typing.Awaitable[instance.AppProfile]], + ]: + raise NotImplementedError() + + @property + def list_app_profiles( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + typing.Union[ + bigtable_instance_admin.ListAppProfilesResponse, + typing.Awaitable[bigtable_instance_admin.ListAppProfilesResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_app_profile( + self, + ) -> typing.Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.GetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.SetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> typing.Callable[ + [iam_policy.TestIamPermissionsRequest], + typing.Union[ + iam_policy.TestIamPermissionsResponse, + typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("BigtableInstanceAdminTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py new file mode 100644 index 000000000000..0cbca1c6762a --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -0,0 +1,794 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO + + +class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): + """gRPC backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], operations.Operation + ]: + r"""Return a callable for the create instance method over gRPC. + + Create an instance within a project. + + Returns: + Callable[[~.CreateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_instance" not in self._stubs: + self._stubs["create_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_instance"] + + @property + def get_instance( + self, + ) -> Callable[[bigtable_instance_admin.GetInstanceRequest], instance.Instance]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about an instance. + + Returns: + Callable[[~.GetInstanceRequest], + ~.Instance]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance" not in self._stubs: + self._stubs["get_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["get_instance"] + + @property + def list_instances( + self, + ) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + bigtable_instance_admin.ListInstancesResponse, + ]: + r"""Return a callable for the list instances method over gRPC. + + Lists information about instances in a project. + + Returns: + Callable[[~.ListInstancesRequest], + ~.ListInstancesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instances" not in self._stubs: + self._stubs["list_instances"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs["list_instances"] + + @property + def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Returns: + Callable[[~.Instance], + ~.Instance]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_instance" not in self._stubs: + self._stubs["update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + request_serializer=instance.Instance.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["update_instance"] + + @property + def partial_update_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], operations.Operation + ]: + r"""Return a callable for the partial update instance method over gRPC. + + Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Returns: + Callable[[~.PartialUpdateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partial_update_instance" not in self._stubs: + self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["partial_update_instance"] + + @property + def delete_instance( + self, + ) -> Callable[[bigtable_instance_admin.DeleteInstanceRequest], empty.Empty]: + r"""Return a callable for the delete instance method over gRPC. + + Delete an instance from a project. + + Returns: + Callable[[~.DeleteInstanceRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_instance" not in self._stubs: + self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_instance"] + + @property + def create_cluster( + self, + ) -> Callable[[bigtable_instance_admin.CreateClusterRequest], operations.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster within an instance. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def get_cluster( + self, + ) -> Callable[[bigtable_instance_admin.GetClusterRequest], instance.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets information about a cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, + response_deserializer=instance.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + bigtable_instance_admin.ListClustersResponse, + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists information about clusters in an instance. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, + response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def update_cluster(self) -> Callable[[instance.Cluster], operations.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster within an instance. + + Returns: + Callable[[~.Cluster], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + request_serializer=instance.Cluster.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[[bigtable_instance_admin.DeleteClusterRequest], empty.Empty]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster from an instance. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def create_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], instance.AppProfile + ]: + r"""Return a callable for the create app profile method over gRPC. + + Creates an app profile within an instance. + + Returns: + Callable[[~.CreateAppProfileRequest], + ~.AppProfile]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_app_profile" not in self._stubs: + self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["create_app_profile"] + + @property + def get_app_profile( + self, + ) -> Callable[[bigtable_instance_admin.GetAppProfileRequest], instance.AppProfile]: + r"""Return a callable for the get app profile method over gRPC. + + Gets information about an app profile. + + Returns: + Callable[[~.GetAppProfileRequest], + ~.AppProfile]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_app_profile" not in self._stubs: + self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["get_app_profile"] + + @property + def list_app_profiles( + self, + ) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + bigtable_instance_admin.ListAppProfilesResponse, + ]: + r"""Return a callable for the list app profiles method over gRPC. + + Lists information about app profiles in an instance. + + Returns: + Callable[[~.ListAppProfilesRequest], + ~.ListAppProfilesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_app_profiles" not in self._stubs: + self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, + ) + return self._stubs["list_app_profiles"] + + @property + def update_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], operations.Operation + ]: + r"""Return a callable for the update app profile method over gRPC. + + Updates an app profile within an instance. + + Returns: + Callable[[~.UpdateAppProfileRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_app_profile" not in self._stubs: + self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_app_profile"] + + @property + def delete_app_profile( + self, + ) -> Callable[[bigtable_instance_admin.DeleteAppProfileRequest], empty.Empty]: + r"""Return a callable for the delete app profile method over gRPC. + + Deletes an app profile from an instance. + + Returns: + Callable[[~.DeleteAppProfileRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_app_profile" not in self._stubs: + self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_app_profile"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified instance resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableInstanceAdminGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py new file mode 100644 index 000000000000..e5fbf6a4c7e7 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -0,0 +1,822 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableInstanceAdminGrpcTransport + + +class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): + """gRPC AsyncIO backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + self._operations_client = None + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create instance method over gRPC. + + Create an instance within a project. + + Returns: + Callable[[~.CreateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_instance" not in self._stubs: + self._stubs["create_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", + request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_instance"] + + @property + def get_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.GetInstanceRequest], Awaitable[instance.Instance] + ]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about an instance. + + Returns: + Callable[[~.GetInstanceRequest], + Awaitable[~.Instance]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_instance" not in self._stubs: + self._stubs["get_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", + request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["get_instance"] + + @property + def list_instances( + self, + ) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + Awaitable[bigtable_instance_admin.ListInstancesResponse], + ]: + r"""Return a callable for the list instances method over gRPC. + + Lists information about instances in a project. + + Returns: + Callable[[~.ListInstancesRequest], + Awaitable[~.ListInstancesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_instances" not in self._stubs: + self._stubs["list_instances"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", + request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs["list_instances"] + + @property + def update_instance( + self, + ) -> Callable[[instance.Instance], Awaitable[instance.Instance]]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Returns: + Callable[[~.Instance], + Awaitable[~.Instance]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_instance" not in self._stubs: + self._stubs["update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", + request_serializer=instance.Instance.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs["update_instance"] + + @property + def partial_update_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the partial update instance method over gRPC. + + Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Returns: + Callable[[~.PartialUpdateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partial_update_instance" not in self._stubs: + self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", + request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["partial_update_instance"] + + @property + def delete_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete instance method over gRPC. + + Delete an instance from a project. + + Returns: + Callable[[~.DeleteInstanceRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_instance" not in self._stubs: + self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", + request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_instance"] + + @property + def create_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster within an instance. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", + request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def get_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.GetClusterRequest], Awaitable[instance.Cluster] + ]: + r"""Return a callable for the get cluster method over gRPC. + + Gets information about a cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", + request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, + response_deserializer=instance.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + Awaitable[bigtable_instance_admin.ListClustersResponse], + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists information about clusters in an instance. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", + request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, + response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def update_cluster( + self, + ) -> Callable[[instance.Cluster], Awaitable[operations.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster within an instance. + + Returns: + Callable[[~.Cluster], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", + request_serializer=instance.Cluster.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteClusterRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster from an instance. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", + request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def create_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + Awaitable[instance.AppProfile], + ]: + r"""Return a callable for the create app profile method over gRPC. + + Creates an app profile within an instance. + + Returns: + Callable[[~.CreateAppProfileRequest], + Awaitable[~.AppProfile]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_app_profile" not in self._stubs: + self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", + request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["create_app_profile"] + + @property + def get_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.GetAppProfileRequest], Awaitable[instance.AppProfile] + ]: + r"""Return a callable for the get app profile method over gRPC. + + Gets information about an app profile. + + Returns: + Callable[[~.GetAppProfileRequest], + Awaitable[~.AppProfile]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_app_profile" not in self._stubs: + self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", + request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs["get_app_profile"] + + @property + def list_app_profiles( + self, + ) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + Awaitable[bigtable_instance_admin.ListAppProfilesResponse], + ]: + r"""Return a callable for the list app profiles method over gRPC. + + Lists information about app profiles in an instance. + + Returns: + Callable[[~.ListAppProfilesRequest], + Awaitable[~.ListAppProfilesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_app_profiles" not in self._stubs: + self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", + request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, + ) + return self._stubs["list_app_profiles"] + + @property + def update_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the update app profile method over gRPC. + + Updates an app profile within an instance. + + Returns: + Callable[[~.UpdateAppProfileRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_app_profile" not in self._stubs: + self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", + request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_app_profile"] + + @property + def delete_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete app profile method over gRPC. + + Deletes an app profile from an instance. + + Returns: + Callable[[~.DeleteAppProfileRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_app_profile" not in self._stubs: + self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", + request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_app_profile"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], + Awaitable[iam_policy.TestIamPermissionsResponse], + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified instance resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableInstanceAdminGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py similarity index 69% rename from packages/google-cloud-bigtable/google/__init__.py rename to packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index abc37089339e..76c35f3bb880 100644 --- a/packages/google-cloud-bigtable/google/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,4 +1,6 @@ -# Copyright 2015 Google LLC +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,15 +13,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -"""Google Cloud Bigtable API package.""" - - -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import BigtableTableAdminClient +from .async_client import BigtableTableAdminAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "BigtableTableAdminClient", + "BigtableTableAdminAsyncClient", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py new file mode 100644 index 000000000000..19e9ee8278dd --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -0,0 +1,2284 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport +from .client import BigtableTableAdminClient + + +class BigtableTableAdminAsyncClient: + """Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + """ + + _client: BigtableTableAdminClient + + DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + + backup_path = staticmethod(BigtableTableAdminClient.backup_path) + parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) + cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) + parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path) + instance_path = staticmethod(BigtableTableAdminClient.instance_path) + parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) + snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) + parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) + table_path = staticmethod(BigtableTableAdminClient.table_path) + parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path) + + common_billing_account_path = staticmethod( + BigtableTableAdminClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + BigtableTableAdminClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path) + parse_common_folder_path = staticmethod( + BigtableTableAdminClient.parse_common_folder_path + ) + + common_organization_path = staticmethod( + BigtableTableAdminClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + BigtableTableAdminClient.parse_common_organization_path + ) + + common_project_path = staticmethod(BigtableTableAdminClient.common_project_path) + parse_common_project_path = staticmethod( + BigtableTableAdminClient.parse_common_project_path + ) + + common_location_path = staticmethod(BigtableTableAdminClient.common_location_path) + parse_common_location_path = staticmethod( + BigtableTableAdminClient.parse_common_location_path + ) + + from_service_account_info = BigtableTableAdminClient.from_service_account_info + from_service_account_file = BigtableTableAdminClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTableAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTableAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(BigtableTableAdminClient).get_transport_class, + type(BigtableTableAdminClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableTableAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = BigtableTableAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_table( + self, + request: bigtable_table_admin.CreateTableRequest = None, + *, + parent: str = None, + table_id: str = None, + table: gba_table.Table = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gba_table.Table: + r"""Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + parent (:class:`str`): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (:class:`str`): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. Maximum 50 + characters. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table (:class:`google.cloud.bigtable_admin_v2.types.Table`): + Required. The Table to create. + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, table]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CreateTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if table is not None: + request.table = table + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_table, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def create_table_from_snapshot( + self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest = None, + *, + parent: str = None, + table_id: str = None, + source_snapshot: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (:class:`str`): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_snapshot (:class:`str`): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in + the same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``source_snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, source_snapshot]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if source_snapshot is not None: + request.source_snapshot = source_snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_table_from_snapshot, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, + ) + + # Done; return the response. + return response + + async def list_tables( + self, + request: bigtable_table_admin.ListTablesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTablesAsyncPager: + r"""Lists all tables served from a specified instance. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListTablesRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + parent (:class:`str`): + Required. The unique name of the instance for which + tables should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ListTablesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tables, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTablesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_table( + self, + request: bigtable_table_admin.GetTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Gets metadata information about the specified table. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + name (:class:`str`): + Required. The unique name of the requested table. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GetTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_table, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_table( + self, + request: bigtable_table_admin.DeleteTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified table and all of its + data. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + name (:class:`str`): + Required. The unique name of the table to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.DeleteTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_table, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def modify_column_families( + self, + request: bigtable_table_admin.ModifyColumnFamiliesRequest = None, + *, + name: str = None, + modifications: Sequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + name (:class:`str`): + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + modifications (:class:`Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, modifications]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + if modifications: + request.modifications.extend(modifications) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.modify_column_families, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def drop_row_range( + self, + request: bigtable_table_admin.DropRowRangeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DropRowRangeRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + + request = bigtable_table_admin.DropRowRangeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.drop_row_range, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def generate_consistency_token( + self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + name (:class:`str`): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_consistency_token, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def check_consistency( + self, + request: bigtable_table_admin.CheckConsistencyRequest = None, + *, + name: str = None, + consistency_token: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (:class:`str`): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (:class:`str`): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, consistency_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CheckConsistencyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_consistency, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def snapshot_table( + self, + request: bigtable_table_admin.SnapshotTableRequest = None, + *, + name: str = None, + cluster: str = None, + snapshot_id: str = None, + description: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.SnapshotTableRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`str`): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (:class:`str`): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (:class:`str`): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, cluster, snapshot_id, description]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.SnapshotTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.snapshot_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + async def get_snapshot( + self, + request: bigtable_table_admin.GetSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetSnapshotRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GetSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_snapshot, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_snapshots( + self, + request: bigtable_table_admin.ListSnapshotsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSnapshotsAsyncPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (:class:`str`): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ListSnapshotsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_snapshots, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSnapshotsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_snapshot( + self, + request: bigtable_table_admin.DeleteSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest`): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.DeleteSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_snapshot, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def create_backup( + self, + request: bigtable_table_admin.CreateBackupRequest = None, + *, + parent: str = None, + backup_id: str = None, + backup: table.Backup = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.CreateBackupRequest`): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (:class:`str`): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, backup]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_backup, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + async def get_backup( + self, + request: bigtable_table_admin.GetBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.GetBackupRequest`): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (:class:`str`): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_backup( + self, + request: bigtable_table_admin.UpdateBackupRequest = None, + *, + backup: table.Backup = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.UpdateBackupRequest`): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_backup, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_backup( + self, + request: bigtable_table_admin.DeleteBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.DeleteBackupRequest`): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (:class:`str`): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_backup, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + async def list_backups( + self, + request: bigtable_table_admin.ListBackupsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsAsyncPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.ListBackupsRequest`): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (:class:`str`): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_backups, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBackupsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def restore_table( + self, + request: bigtable_table_admin.RestoreTableRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Args: + request (:class:`google.cloud.bigtable_admin_v2.types.RestoreTableRequest`): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + + request = bigtable_table_admin.RestoreTableRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.restore_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Table or Backup resource. + + Args: + request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`Sequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableTableAdminAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py new file mode 100644 index 000000000000..58eb4a9cdbf8 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -0,0 +1,2473 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableTableAdminGrpcTransport +from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport + + +class BigtableTableAdminClientMeta(type): + """Metaclass for the BigtableTableAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[BigtableTableAdminTransport]] + _transport_registry["grpc"] = BigtableTableAdminGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[BigtableTableAdminTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta): + """Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTableAdminTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTableAdminTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def backup_path(project: str, instance: str, cluster: str, backup: str,) -> str: + """Return a fully-qualified backup string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( + project=project, instance=instance, cluster=cluster, backup=backup, + ) + + @staticmethod + def parse_backup_path(path: str) -> Dict[str, str]: + """Parse a backup path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/backups/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def cluster_path(project: str, instance: str, cluster: str,) -> str: + """Return a fully-qualified cluster string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str, str]: + """Parse a cluster path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def instance_path(project: str, instance: str,) -> str: + """Return a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parse a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def snapshot_path(project: str, instance: str, cluster: str, snapshot: str,) -> str: + """Return a fully-qualified snapshot string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( + project=project, instance=instance, cluster=cluster, snapshot=snapshot, + ) + + @staticmethod + def parse_snapshot_path(path: str) -> Dict[str, str]: + """Parse a snapshot path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/snapshots/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def table_path(project: str, instance: str, table: str,) -> str: + """Return a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str, str]: + """Parse a table path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, BigtableTableAdminTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableTableAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableTableAdminTransport): + # transport is a BigtableTableAdminTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def create_table( + self, + request: bigtable_table_admin.CreateTableRequest = None, + *, + parent: str = None, + table_id: str = None, + table: gba_table.Table = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gba_table.Table: + r"""Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + parent (str): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (str): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. Maximum 50 + characters. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The Table to create. + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, table]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateTableRequest): + request = bigtable_table_admin.CreateTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if table is not None: + request.table = table + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def create_table_from_snapshot( + self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest = None, + *, + parent: str = None, + table_id: str = None, + source_snapshot: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (str): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (str): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_snapshot (str): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in + the same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``source_snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, source_snapshot]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateTableFromSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if source_snapshot is not None: + request.source_snapshot = source_snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_table_from_snapshot + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, + ) + + # Done; return the response. + return response + + def list_tables( + self, + request: bigtable_table_admin.ListTablesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTablesPager: + r"""Lists all tables served from a specified instance. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + parent (str): + Required. The unique name of the instance for which + tables should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListTablesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListTablesRequest): + request = bigtable_table_admin.ListTablesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tables] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTablesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def get_table( + self, + request: bigtable_table_admin.GetTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Gets metadata information about the specified table. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + name (str): + Required. The unique name of the requested table. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetTableRequest): + request = bigtable_table_admin.GetTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_table( + self, + request: bigtable_table_admin.DeleteTableRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified table and all of its + data. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + name (str): + Required. The unique name of the table to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteTableRequest): + request = bigtable_table_admin.DeleteTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def modify_column_families( + self, + request: bigtable_table_admin.ModifyColumnFamiliesRequest = None, + *, + name: str = None, + modifications: Sequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Args: + request (google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + name (str): + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + modifications (Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, modifications]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ModifyColumnFamiliesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + if modifications: + request.modifications.extend(modifications) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.modify_column_families] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def drop_row_range( + self, + request: bigtable_table_admin.DropRowRangeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Args: + request (google.cloud.bigtable_admin_v2.types.DropRowRangeRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DropRowRangeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): + request = bigtable_table_admin.DropRowRangeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.drop_row_range] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def generate_consistency_token( + self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Args: + request (google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + name (str): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GenerateConsistencyTokenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, bigtable_table_admin.GenerateConsistencyTokenRequest + ): + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.generate_consistency_token + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def check_consistency( + self, + request: bigtable_table_admin.CheckConsistencyRequest = None, + *, + name: str = None, + consistency_token: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Args: + request (google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (str): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (str): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, consistency_token]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CheckConsistencyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): + request = bigtable_table_admin.CheckConsistencyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_consistency] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def snapshot_table( + self, + request: bigtable_table_admin.SnapshotTableRequest = None, + *, + name: str = None, + cluster: str = None, + snapshot_id: str = None, + description: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (str): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (str): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (str): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, cluster, snapshot_id, description]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.SnapshotTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): + request = bigtable_table_admin.SnapshotTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.snapshot_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + def get_snapshot( + self, + request: bigtable_table_admin.GetSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): + request = bigtable_table_admin.GetSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_snapshots( + self, + request: bigtable_table_admin.ListSnapshotsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSnapshotsPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (str): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListSnapshotsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): + request = bigtable_table_admin.ListSnapshotsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_snapshots] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSnapshotsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_snapshot( + self, + request: bigtable_table_admin.DeleteSnapshotRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): + request = bigtable_table_admin.DeleteSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def create_backup( + self, + request: bigtable_table_admin.CreateBackupRequest = None, + *, + parent: str = None, + backup_id: str = None, + backup: table.Backup = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.CreateBackupRequest): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (str): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, backup]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateBackupRequest): + request = bigtable_table_admin.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + def get_backup( + self, + request: bigtable_table_admin.GetBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.GetBackupRequest): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (str): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetBackupRequest): + request = bigtable_table_admin.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_backup( + self, + request: bigtable_table_admin.UpdateBackupRequest = None, + *, + backup: table.Backup = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.UpdateBackupRequest): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.UpdateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): + request = bigtable_table_admin.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("backup.name", request.backup.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_backup( + self, + request: bigtable_table_admin.DeleteBackupRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + Args: + request (google.cloud.bigtable_admin_v2.types.DeleteBackupRequest): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (str): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): + request = bigtable_table_admin.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + def list_backups( + self, + request: bigtable_table_admin.ListBackupsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Args: + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (str): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListBackupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListBackupsRequest): + request = bigtable_table_admin.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_backups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBackupsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def restore_table( + self, + request: bigtable_table_admin.RestoreTableRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Args: + request (google.cloud.bigtable_admin_v2.types.RestoreTableRequest): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.RestoreTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.RestoreTableRequest): + request = bigtable_table_admin.RestoreTableRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.restore_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: iam_policy.GetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Args: + request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for `GetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.GetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.GetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: iam_policy.SetIamPolicyRequest = None, + *, + resource: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy.Policy: + r"""Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Args: + request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for `SetIamPolicy` + method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. It is used to + specify access control policies for Cloud Platform + resources. + + A Policy is a collection of bindings. A binding binds + one or more members to a single role. Members can be + user accounts, service accounts, Google groups, and + domains (such as G Suite). A role is a named list of + permissions (defined by IAM or configured by users). + A binding can optionally specify a condition, which + is a logic expression that further constrains the + role binding based on attributes about the request + and/or target resource. + + **JSON Example** + + { + "bindings": [ + { + "role": + "roles/resourcemanager.organizationAdmin", + "members": [ "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + + }, { "role": + "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { "title": "expirable access", + "description": "Does not grant access after + Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } + + ] + + } + + **YAML Example** + + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - + members: - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer + condition: title: expirable access description: + Does not grant access after Sep 2020 expression: + request.time < + timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the + [IAM developer's + guide](\ https://cloud.google.com/iam/docs). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.SetIamPolicyRequest(**request) + + elif not request: + request = iam_policy.SetIamPolicyRequest(resource=resource,) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: iam_policy.TestIamPermissionsRequest = None, + *, + resource: str = None, + permissions: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Table or Backup resource. + + Args: + request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for + `TestIamPermissions` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (Sequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy.TestIamPermissionsRequest(**request) + + elif not request: + request = iam_policy.TestIamPermissionsRequest( + resource=resource, permissions=permissions, + ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableTableAdminClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py new file mode 100644 index 000000000000..be7c121d74cb --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -0,0 +1,405 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table + + +class ListTablesPager: + """A pager for iterating through ``list_tables`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tables`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTables`` requests and continue to iterate + through the ``tables`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListTablesResponse], + request: bigtable_table_admin.ListTablesRequest, + response: bigtable_table_admin.ListTablesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListTablesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_table_admin.ListTablesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[table.Table]: + for page in self.pages: + yield from page.tables + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTablesAsyncPager: + """A pager for iterating through ``list_tables`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tables`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTables`` requests and continue to iterate + through the ``tables`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[bigtable_table_admin.ListTablesResponse]], + request: bigtable_table_admin.ListTablesRequest, + response: bigtable_table_admin.ListTablesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListTablesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[bigtable_table_admin.ListTablesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[table.Table]: + async def async_generator(): + async for page in self.pages: + for response in page.tables: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSnapshotsPager: + """A pager for iterating through ``list_snapshots`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``snapshots`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSnapshots`` requests and continue to iterate + through the ``snapshots`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListSnapshotsResponse], + request: bigtable_table_admin.ListSnapshotsRequest, + response: bigtable_table_admin.ListSnapshotsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListSnapshotsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_table_admin.ListSnapshotsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[table.Snapshot]: + for page in self.pages: + yield from page.snapshots + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSnapshotsAsyncPager: + """A pager for iterating through ``list_snapshots`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``snapshots`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSnapshots`` requests and continue to iterate + through the ``snapshots`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[bigtable_table_admin.ListSnapshotsResponse]], + request: bigtable_table_admin.ListSnapshotsRequest, + response: bigtable_table_admin.ListSnapshotsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListSnapshotsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[bigtable_table_admin.ListSnapshotsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[table.Snapshot]: + async def async_generator(): + async for page in self.pages: + for response in page.snapshots: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupsPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListBackupsResponse], + request: bigtable_table_admin.ListBackupsRequest, + response: bigtable_table_admin.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[bigtable_table_admin.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[table.Backup]: + for page in self.pages: + yield from page.backups + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListBackupsAsyncPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[bigtable_table_admin.ListBackupsResponse]], + request: bigtable_table_admin.ListBackupsRequest, + response: bigtable_table_admin.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[bigtable_table_admin.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[table.Backup]: + async def async_generator(): + async for page in self.pages: + for response in page.backups: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py new file mode 100644 index 000000000000..8e9ae114dd57 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableTableAdminTransport +from .grpc import BigtableTableAdminGrpcTransport +from .grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[BigtableTableAdminTransport]] +_transport_registry["grpc"] = BigtableTableAdminGrpcTransport +_transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport + +__all__ = ( + "BigtableTableAdminTransport", + "BigtableTableAdminGrpcTransport", + "BigtableTableAdminGrpcAsyncIOTransport", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py new file mode 100644 index 000000000000..b54025c94e57 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -0,0 +1,517 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable-admin", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BigtableTableAdminTransport(abc.ABC): + """Abstract transport class for BigtableTableAdmin.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_table: gapic_v1.method.wrap_method( + self.create_table, default_timeout=300.0, client_info=client_info, + ), + self.create_table_from_snapshot: gapic_v1.method.wrap_method( + self.create_table_from_snapshot, + default_timeout=None, + client_info=client_info, + ), + self.list_tables: gapic_v1.method.wrap_method( + self.list_tables, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_table: gapic_v1.method.wrap_method( + self.get_table, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_table: gapic_v1.method.wrap_method( + self.delete_table, default_timeout=60.0, client_info=client_info, + ), + self.modify_column_families: gapic_v1.method.wrap_method( + self.modify_column_families, + default_timeout=300.0, + client_info=client_info, + ), + self.drop_row_range: gapic_v1.method.wrap_method( + self.drop_row_range, default_timeout=3600.0, client_info=client_info, + ), + self.generate_consistency_token: gapic_v1.method.wrap_method( + self.generate_consistency_token, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.check_consistency: gapic_v1.method.wrap_method( + self.check_consistency, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.snapshot_table: gapic_v1.method.wrap_method( + self.snapshot_table, default_timeout=None, client_info=client_info, + ), + self.get_snapshot: gapic_v1.method.wrap_method( + self.get_snapshot, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_snapshots: gapic_v1.method.wrap_method( + self.list_snapshots, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_snapshot: gapic_v1.method.wrap_method( + self.delete_snapshot, default_timeout=60.0, client_info=client_info, + ), + self.create_backup: gapic_v1.method.wrap_method( + self.create_backup, default_timeout=None, client_info=client_info, + ), + self.get_backup: gapic_v1.method.wrap_method( + self.get_backup, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_backup: gapic_v1.method.wrap_method( + self.update_backup, default_timeout=60.0, client_info=client_info, + ), + self.delete_backup: gapic_v1.method.wrap_method( + self.delete_backup, default_timeout=60.0, client_info=client_info, + ), + self.list_backups: gapic_v1.method.wrap_method( + self.list_backups, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.restore_table: gapic_v1.method.wrap_method( + self.restore_table, default_timeout=None, client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, default_timeout=60.0, client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CreateTableRequest], + typing.Union[gba_table.Table, typing.Awaitable[gba_table.Table]], + ]: + raise NotImplementedError() + + @property + def create_table_from_snapshot( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def list_tables( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ListTablesRequest], + typing.Union[ + bigtable_table_admin.ListTablesResponse, + typing.Awaitable[bigtable_table_admin.ListTablesResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GetTableRequest], + typing.Union[table.Table, typing.Awaitable[table.Table]], + ]: + raise NotImplementedError() + + @property + def delete_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DeleteTableRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def modify_column_families( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], + typing.Union[table.Table, typing.Awaitable[table.Table]], + ]: + raise NotImplementedError() + + @property + def drop_row_range( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DropRowRangeRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def generate_consistency_token( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + typing.Union[ + bigtable_table_admin.GenerateConsistencyTokenResponse, + typing.Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], + ], + ]: + raise NotImplementedError() + + @property + def check_consistency( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + typing.Union[ + bigtable_table_admin.CheckConsistencyResponse, + typing.Awaitable[bigtable_table_admin.CheckConsistencyResponse], + ], + ]: + raise NotImplementedError() + + @property + def snapshot_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.SnapshotTableRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_snapshot( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GetSnapshotRequest], + typing.Union[table.Snapshot, typing.Awaitable[table.Snapshot]], + ]: + raise NotImplementedError() + + @property + def list_snapshots( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + typing.Union[ + bigtable_table_admin.ListSnapshotsResponse, + typing.Awaitable[bigtable_table_admin.ListSnapshotsResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_snapshot( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DeleteSnapshotRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def create_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.CreateBackupRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.GetBackupRequest], + typing.Union[table.Backup, typing.Awaitable[table.Backup]], + ]: + raise NotImplementedError() + + @property + def update_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.UpdateBackupRequest], + typing.Union[table.Backup, typing.Awaitable[table.Backup]], + ]: + raise NotImplementedError() + + @property + def delete_backup( + self, + ) -> typing.Callable[ + [bigtable_table_admin.DeleteBackupRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + @property + def list_backups( + self, + ) -> typing.Callable[ + [bigtable_table_admin.ListBackupsRequest], + typing.Union[ + bigtable_table_admin.ListBackupsResponse, + typing.Awaitable[bigtable_table_admin.ListBackupsResponse], + ], + ]: + raise NotImplementedError() + + @property + def restore_table( + self, + ) -> typing.Callable[ + [bigtable_table_admin.RestoreTableRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.GetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> typing.Callable[ + [iam_policy.SetIamPolicyRequest], + typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> typing.Callable[ + [iam_policy.TestIamPermissionsRequest], + typing.Union[ + iam_policy.TestIamPermissionsResponse, + typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("BigtableTableAdminTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py new file mode 100644 index 000000000000..4f54f3a7ee70 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -0,0 +1,944 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO + + +class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): + """gRPC backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._stubs = {} # type: Dict[str, Callable] + self._operations_client = None + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_table( + self, + ) -> Callable[[bigtable_table_admin.CreateTableRequest], gba_table.Table]: + r"""Return a callable for the create table method over gRPC. + + Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Returns: + Callable[[~.CreateTableRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table" not in self._stubs: + self._stubs["create_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + request_serializer=bigtable_table_admin.CreateTableRequest.serialize, + response_deserializer=gba_table.Table.deserialize, + ) + return self._stubs["create_table"] + + @property + def create_table_from_snapshot( + self, + ) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], operations.Operation + ]: + r"""Return a callable for the create table from snapshot method over gRPC. + + Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.CreateTableFromSnapshotRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table_from_snapshot" not in self._stubs: + self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_table_from_snapshot"] + + @property + def list_tables( + self, + ) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + bigtable_table_admin.ListTablesResponse, + ]: + r"""Return a callable for the list tables method over gRPC. + + Lists all tables served from a specified instance. + + Returns: + Callable[[~.ListTablesRequest], + ~.ListTablesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tables" not in self._stubs: + self._stubs["list_tables"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + request_serializer=bigtable_table_admin.ListTablesRequest.serialize, + response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, + ) + return self._stubs["list_tables"] + + @property + def get_table( + self, + ) -> Callable[[bigtable_table_admin.GetTableRequest], table.Table]: + r"""Return a callable for the get table method over gRPC. + + Gets metadata information about the specified table. + + Returns: + Callable[[~.GetTableRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_table" not in self._stubs: + self._stubs["get_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + request_serializer=bigtable_table_admin.GetTableRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["get_table"] + + @property + def delete_table( + self, + ) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty.Empty]: + r"""Return a callable for the delete table method over gRPC. + + Permanently deletes a specified table and all of its + data. + + Returns: + Callable[[~.DeleteTableRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_table" not in self._stubs: + self._stubs["delete_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_table"] + + @property + def modify_column_families( + self, + ) -> Callable[[bigtable_table_admin.ModifyColumnFamiliesRequest], table.Table]: + r"""Return a callable for the modify column families method over gRPC. + + Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Returns: + Callable[[~.ModifyColumnFamiliesRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "modify_column_families" not in self._stubs: + self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["modify_column_families"] + + @property + def drop_row_range( + self, + ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty.Empty]: + r"""Return a callable for the drop row range method over gRPC. + + Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Returns: + Callable[[~.DropRowRangeRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "drop_row_range" not in self._stubs: + self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["drop_row_range"] + + @property + def generate_consistency_token( + self, + ) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + bigtable_table_admin.GenerateConsistencyTokenResponse, + ]: + r"""Return a callable for the generate consistency token method over gRPC. + + Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Returns: + Callable[[~.GenerateConsistencyTokenRequest], + ~.GenerateConsistencyTokenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_consistency_token" not in self._stubs: + self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, + response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, + ) + return self._stubs["generate_consistency_token"] + + @property + def check_consistency( + self, + ) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + bigtable_table_admin.CheckConsistencyResponse, + ]: + r"""Return a callable for the check consistency method over gRPC. + + Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Returns: + Callable[[~.CheckConsistencyRequest], + ~.CheckConsistencyResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_consistency" not in self._stubs: + self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, + response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, + ) + return self._stubs["check_consistency"] + + @property + def snapshot_table( + self, + ) -> Callable[[bigtable_table_admin.SnapshotTableRequest], operations.Operation]: + r"""Return a callable for the snapshot table method over gRPC. + + Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.SnapshotTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "snapshot_table" not in self._stubs: + self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["snapshot_table"] + + @property + def get_snapshot( + self, + ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], table.Snapshot]: + r"""Return a callable for the get snapshot method over gRPC. + + Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.GetSnapshotRequest], + ~.Snapshot]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_snapshot" not in self._stubs: + self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, + response_deserializer=table.Snapshot.deserialize, + ) + return self._stubs["get_snapshot"] + + @property + def list_snapshots( + self, + ) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + bigtable_table_admin.ListSnapshotsResponse, + ]: + r"""Return a callable for the list snapshots method over gRPC. + + Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.ListSnapshotsRequest], + ~.ListSnapshotsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_snapshots" not in self._stubs: + self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, + response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, + ) + return self._stubs["list_snapshots"] + + @property + def delete_snapshot( + self, + ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty.Empty]: + r"""Return a callable for the delete snapshot method over gRPC. + + Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.DeleteSnapshotRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_snapshot" not in self._stubs: + self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_snapshot"] + + @property + def create_backup( + self, + ) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations.Operation]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Returns: + Callable[[~.CreateBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_backup" not in self._stubs: + self._stubs["create_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_backup"] + + @property + def get_backup( + self, + ) -> Callable[[bigtable_table_admin.GetBackupRequest], table.Backup]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Returns: + Callable[[~.GetBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_backup" not in self._stubs: + self._stubs["get_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + request_serializer=bigtable_table_admin.GetBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["get_backup"] + + @property + def update_backup( + self, + ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], table.Backup]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable[[~.UpdateBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_backup" not in self._stubs: + self._stubs["update_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["update_backup"] + + @property + def delete_backup( + self, + ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty.Empty]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable[[~.DeleteBackupRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_backup" not in self._stubs: + self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_backup"] + + @property + def list_backups( + self, + ) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + bigtable_table_admin.ListBackupsResponse, + ]: + r"""Return a callable for the list backups method over gRPC. + + Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Returns: + Callable[[~.ListBackupsRequest], + ~.ListBackupsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backups" not in self._stubs: + self._stubs["list_backups"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, + response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, + ) + return self._stubs["list_backups"] + + @property + def restore_table( + self, + ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations.Operation]: + r"""Return a callable for the restore table method over gRPC. + + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Returns: + Callable[[~.RestoreTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_table" not in self._stubs: + self._stubs["restore_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["restore_table"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified Table or Backup resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableTableAdminGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py new file mode 100644 index 000000000000..8e9197468ae3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -0,0 +1,962 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableTableAdminGrpcTransport + + +class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): + """gRPC AsyncIO backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + self._operations_client = None + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_table( + self, + ) -> Callable[ + [bigtable_table_admin.CreateTableRequest], Awaitable[gba_table.Table] + ]: + r"""Return a callable for the create table method over gRPC. + + Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Returns: + Callable[[~.CreateTableRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table" not in self._stubs: + self._stubs["create_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", + request_serializer=bigtable_table_admin.CreateTableRequest.serialize, + response_deserializer=gba_table.Table.deserialize, + ) + return self._stubs["create_table"] + + @property + def create_table_from_snapshot( + self, + ) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the create table from snapshot method over gRPC. + + Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.CreateTableFromSnapshotRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_table_from_snapshot" not in self._stubs: + self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", + request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_table_from_snapshot"] + + @property + def list_tables( + self, + ) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + Awaitable[bigtable_table_admin.ListTablesResponse], + ]: + r"""Return a callable for the list tables method over gRPC. + + Lists all tables served from a specified instance. + + Returns: + Callable[[~.ListTablesRequest], + Awaitable[~.ListTablesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tables" not in self._stubs: + self._stubs["list_tables"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", + request_serializer=bigtable_table_admin.ListTablesRequest.serialize, + response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, + ) + return self._stubs["list_tables"] + + @property + def get_table( + self, + ) -> Callable[[bigtable_table_admin.GetTableRequest], Awaitable[table.Table]]: + r"""Return a callable for the get table method over gRPC. + + Gets metadata information about the specified table. + + Returns: + Callable[[~.GetTableRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_table" not in self._stubs: + self._stubs["get_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", + request_serializer=bigtable_table_admin.GetTableRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["get_table"] + + @property + def delete_table( + self, + ) -> Callable[[bigtable_table_admin.DeleteTableRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete table method over gRPC. + + Permanently deletes a specified table and all of its + data. + + Returns: + Callable[[~.DeleteTableRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_table" not in self._stubs: + self._stubs["delete_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", + request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_table"] + + @property + def modify_column_families( + self, + ) -> Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], Awaitable[table.Table] + ]: + r"""Return a callable for the modify column families method over gRPC. + + Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Returns: + Callable[[~.ModifyColumnFamiliesRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "modify_column_families" not in self._stubs: + self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", + request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs["modify_column_families"] + + @property + def drop_row_range( + self, + ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the drop row range method over gRPC. + + Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Returns: + Callable[[~.DropRowRangeRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "drop_row_range" not in self._stubs: + self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", + request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["drop_row_range"] + + @property + def generate_consistency_token( + self, + ) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], + ]: + r"""Return a callable for the generate consistency token method over gRPC. + + Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Returns: + Callable[[~.GenerateConsistencyTokenRequest], + Awaitable[~.GenerateConsistencyTokenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_consistency_token" not in self._stubs: + self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", + request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, + response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, + ) + return self._stubs["generate_consistency_token"] + + @property + def check_consistency( + self, + ) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + Awaitable[bigtable_table_admin.CheckConsistencyResponse], + ]: + r"""Return a callable for the check consistency method over gRPC. + + Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Returns: + Callable[[~.CheckConsistencyRequest], + Awaitable[~.CheckConsistencyResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_consistency" not in self._stubs: + self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", + request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, + response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, + ) + return self._stubs["check_consistency"] + + @property + def snapshot_table( + self, + ) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the snapshot table method over gRPC. + + Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.SnapshotTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "snapshot_table" not in self._stubs: + self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", + request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["snapshot_table"] + + @property + def get_snapshot( + self, + ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], Awaitable[table.Snapshot]]: + r"""Return a callable for the get snapshot method over gRPC. + + Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.GetSnapshotRequest], + Awaitable[~.Snapshot]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_snapshot" not in self._stubs: + self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", + request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, + response_deserializer=table.Snapshot.deserialize, + ) + return self._stubs["get_snapshot"] + + @property + def list_snapshots( + self, + ) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + Awaitable[bigtable_table_admin.ListSnapshotsResponse], + ]: + r"""Return a callable for the list snapshots method over gRPC. + + Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.ListSnapshotsRequest], + Awaitable[~.ListSnapshotsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_snapshots" not in self._stubs: + self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", + request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, + response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, + ) + return self._stubs["list_snapshots"] + + @property + def delete_snapshot( + self, + ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete snapshot method over gRPC. + + Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.DeleteSnapshotRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_snapshot" not in self._stubs: + self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", + request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_snapshot"] + + @property + def create_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CreateBackupRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Returns: + Callable[[~.CreateBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_backup" not in self._stubs: + self._stubs["create_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", + request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_backup"] + + @property + def get_backup( + self, + ) -> Callable[[bigtable_table_admin.GetBackupRequest], Awaitable[table.Backup]]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Returns: + Callable[[~.GetBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_backup" not in self._stubs: + self._stubs["get_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", + request_serializer=bigtable_table_admin.GetBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["get_backup"] + + @property + def update_backup( + self, + ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], Awaitable[table.Backup]]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable[[~.UpdateBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_backup" not in self._stubs: + self._stubs["update_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", + request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs["update_backup"] + + @property + def delete_backup( + self, + ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable[[~.DeleteBackupRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_backup" not in self._stubs: + self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", + request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_backup"] + + @property + def list_backups( + self, + ) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + Awaitable[bigtable_table_admin.ListBackupsResponse], + ]: + r"""Return a callable for the list backups method over gRPC. + + Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Returns: + Callable[[~.ListBackupsRequest], + Awaitable[~.ListBackupsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_backups" not in self._stubs: + self._stubs["list_backups"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", + request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, + response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, + ) + return self._stubs["list_backups"] + + @property + def restore_table( + self, + ) -> Callable[ + [bigtable_table_admin.RestoreTableRequest], Awaitable[operations.Operation] + ]: + r"""Return a callable for the restore table method over gRPC. + + Create a new table by restoring from a completed backup. The new + table must be in the same instance as the instance containing + the backup. The returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Returns: + Callable[[~.RestoreTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "restore_table" not in self._stubs: + self._stubs["restore_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", + request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["restore_table"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", + request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", + request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy.TestIamPermissionsRequest], + Awaitable[iam_policy.TestIamPermissionsResponse], + ]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified Table or Backup resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", + request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("BigtableTableAdminGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py deleted file mode 100644 index 7dbb939d1639..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import common_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import options_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 -from google.type import expr_pb2 - - -_shared_modules = [ - iam_policy_pb2, - options_pb2, - policy_pb2, - operations_pb2, - any_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, - expr_pb2, -] - -_local_modules = [ - bigtable_instance_admin_pb2, - bigtable_table_admin_pb2, - common_pb2, - instance_pb2, - table_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_admin_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py new file mode 100644 index 000000000000..26c4b40c9dc2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .common import ( + OperationProgress, + StorageType, +) +from .instance import ( + Instance, + Cluster, + AppProfile, +) +from .bigtable_instance_admin import ( + CreateInstanceRequest, + GetInstanceRequest, + ListInstancesRequest, + ListInstancesResponse, + PartialUpdateInstanceRequest, + DeleteInstanceRequest, + CreateClusterRequest, + GetClusterRequest, + ListClustersRequest, + ListClustersResponse, + DeleteClusterRequest, + CreateInstanceMetadata, + UpdateInstanceMetadata, + CreateClusterMetadata, + UpdateClusterMetadata, + CreateAppProfileRequest, + GetAppProfileRequest, + ListAppProfilesRequest, + ListAppProfilesResponse, + UpdateAppProfileRequest, + DeleteAppProfileRequest, + UpdateAppProfileMetadata, +) +from .table import ( + RestoreInfo, + Table, + ColumnFamily, + GcRule, + Snapshot, + Backup, + BackupInfo, + RestoreSourceType, +) +from .bigtable_table_admin import ( + RestoreTableRequest, + RestoreTableMetadata, + OptimizeRestoredTableMetadata, + CreateTableRequest, + CreateTableFromSnapshotRequest, + DropRowRangeRequest, + ListTablesRequest, + ListTablesResponse, + GetTableRequest, + DeleteTableRequest, + ModifyColumnFamiliesRequest, + GenerateConsistencyTokenRequest, + GenerateConsistencyTokenResponse, + CheckConsistencyRequest, + CheckConsistencyResponse, + SnapshotTableRequest, + GetSnapshotRequest, + ListSnapshotsRequest, + ListSnapshotsResponse, + DeleteSnapshotRequest, + SnapshotTableMetadata, + CreateTableFromSnapshotMetadata, + CreateBackupRequest, + CreateBackupMetadata, + UpdateBackupRequest, + GetBackupRequest, + DeleteBackupRequest, + ListBackupsRequest, + ListBackupsResponse, +) + +__all__ = ( + "OperationProgress", + "StorageType", + "Instance", + "Cluster", + "AppProfile", + "CreateInstanceRequest", + "GetInstanceRequest", + "ListInstancesRequest", + "ListInstancesResponse", + "PartialUpdateInstanceRequest", + "DeleteInstanceRequest", + "CreateClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DeleteClusterRequest", + "CreateInstanceMetadata", + "UpdateInstanceMetadata", + "CreateClusterMetadata", + "UpdateClusterMetadata", + "CreateAppProfileRequest", + "GetAppProfileRequest", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "UpdateAppProfileRequest", + "DeleteAppProfileRequest", + "UpdateAppProfileMetadata", + "RestoreInfo", + "Table", + "ColumnFamily", + "GcRule", + "Snapshot", + "Backup", + "BackupInfo", + "RestoreSourceType", + "RestoreTableRequest", + "RestoreTableMetadata", + "OptimizeRestoredTableMetadata", + "CreateTableRequest", + "CreateTableFromSnapshotRequest", + "DropRowRangeRequest", + "ListTablesRequest", + "ListTablesResponse", + "GetTableRequest", + "DeleteTableRequest", + "ModifyColumnFamiliesRequest", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "SnapshotTableRequest", + "GetSnapshotRequest", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "DeleteSnapshotRequest", + "SnapshotTableMetadata", + "CreateTableFromSnapshotMetadata", + "CreateBackupRequest", + "CreateBackupMetadata", + "UpdateBackupRequest", + "GetBackupRequest", + "DeleteBackupRequest", + "ListBackupsRequest", + "ListBackupsResponse", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py new file mode 100644 index 000000000000..38ae3eab6af1 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -0,0 +1,530 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "CreateInstanceRequest", + "GetInstanceRequest", + "ListInstancesRequest", + "ListInstancesResponse", + "PartialUpdateInstanceRequest", + "DeleteInstanceRequest", + "CreateClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DeleteClusterRequest", + "CreateInstanceMetadata", + "UpdateInstanceMetadata", + "CreateClusterMetadata", + "UpdateClusterMetadata", + "CreateAppProfileRequest", + "GetAppProfileRequest", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "UpdateAppProfileRequest", + "DeleteAppProfileRequest", + "UpdateAppProfileMetadata", + }, +) + + +class CreateInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateInstance. + + Attributes: + parent (str): + Required. The unique name of the project in which to create + the new instance. Values are of the form + ``projects/{project}``. + instance_id (str): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]): + Required. The clusters to be created within the instance, + mapped by desired cluster ID, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. Currently, + at most four clusters can be specified. + """ + + parent = proto.Field(proto.STRING, number=1) + + instance_id = proto.Field(proto.STRING, number=2) + + instance = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Instance,) + + clusters = proto.MapField( + proto.STRING, proto.MESSAGE, number=4, message=gba_instance.Cluster, + ) + + +class GetInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetInstance. + + Attributes: + name (str): + Required. The unique name of the requested instance. Values + are of the form ``projects/{project}/instances/{instance}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListInstancesRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListInstances. + + Attributes: + parent (str): + Required. The unique name of the project for which a list of + instances is requested. Values are of the form + ``projects/{project}``. + page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + +class ListInstancesResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListInstances. + + Attributes: + instances (Sequence[google.cloud.bigtable_admin_v2.types.Instance]): + The list of requested instances. + failed_locations (Sequence[str]): + Locations from which Instance information could not be + retrieved, due to an outage or some other transient + condition. Instances whose Clusters are all in one of the + failed locations may be missing from ``instances``, and + Instances with at least one Cluster in a failed location may + only have partial information returned. Values are of the + form ``projects//locations/`` + next_page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + @property + def raw_page(self): + return self + + instances = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_instance.Instance, + ) + + failed_locations = proto.RepeatedField(proto.STRING, number=2) + + next_page_token = proto.Field(proto.STRING, number=3) + + +class PartialUpdateInstanceRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + + Attributes: + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The Instance which will (partially) + replace the current value. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Instance fields which + should be replaced. Must be explicitly set. + """ + + instance = proto.Field(proto.MESSAGE, number=1, message=gba_instance.Instance,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class DeleteInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteInstance. + + Attributes: + name (str): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateCluster. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + cluster_id (str): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` rather + than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + """ + + parent = proto.Field(proto.STRING, number=1) + + cluster_id = proto.Field(proto.STRING, number=2) + + cluster = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Cluster,) + + +class GetClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetCluster. + + Attributes: + name (str): + Required. The unique name of the requested cluster. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListClustersRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListClusters. + + Attributes: + parent (str): + Required. The unique name of the instance for which a list + of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances in a + project, e.g., ``projects/myproject/instances/-``. + page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_token = proto.Field(proto.STRING, number=2) + + +class ListClustersResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListClusters. + + Attributes: + clusters (Sequence[google.cloud.bigtable_admin_v2.types.Cluster]): + The list of requested clusters. + failed_locations (Sequence[str]): + Locations from which Cluster information could not be + retrieved, due to an outage or some other transient + condition. Clusters from these locations may be missing from + ``clusters``, or may only have partial information returned. + Values are of the form + ``projects//locations/`` + next_page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + @property + def raw_page(self): + return self + + clusters = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_instance.Cluster, + ) + + failed_locations = proto.RepeatedField(proto.STRING, number=2) + + next_page_token = proto.Field(proto.STRING, number=3) + + +class DeleteClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteCluster. + + Attributes: + name (str): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class CreateInstanceMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateInstance. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): + The request that prompted the initiation of + this CreateInstance operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="CreateInstanceRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class UpdateInstanceMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateInstance. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): + The request that prompted the initiation of + this UpdateInstance operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="PartialUpdateInstanceRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateCluster. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): + The request that prompted the initiation of + this CreateCluster operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="CreateClusterRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class UpdateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateCluster. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.Cluster): + The request that prompted the initiation of + this UpdateCluster operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message=gba_instance.Cluster, + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateAppProfile. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + app_profile_id (str): + Required. The ID to be used when referring to the new app + profile within its instance, e.g., just ``myprofile`` rather + than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + ignore_warnings (bool): + If true, ignore safety checks when creating + the app profile. + """ + + parent = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=2) + + app_profile = proto.Field(proto.MESSAGE, number=3, message=gba_instance.AppProfile,) + + ignore_warnings = proto.Field(proto.BOOL, number=4) + + +class GetAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetAppProfile. + + Attributes: + name (str): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListAppProfilesRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + parent (str): + Required. The unique name of the instance for which a list + of app profiles is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + page_size (int): + Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=3) + + page_token = proto.Field(proto.STRING, number=2) + + +class ListAppProfilesResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + app_profiles (Sequence[google.cloud.bigtable_admin_v2.types.AppProfile]): + The list of requested app profiles. + next_page_token (str): + Set if not all app profiles could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + failed_locations (Sequence[str]): + Locations from which AppProfile information could not be + retrieved, due to an outage or some other transient + condition. AppProfiles from these locations may be missing + from ``app_profiles``. Values are of the form + ``projects//locations/`` + """ + + @property + def raw_page(self): + return self + + app_profiles = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_instance.AppProfile, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + failed_locations = proto.RepeatedField(proto.STRING, number=3) + + +class UpdateAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.UpdateAppProfile. + + Attributes: + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile which will + (partially) replace the current value. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of app profile fields + which should be replaced. If unset, all fields + will be replaced. + ignore_warnings (bool): + If true, ignore safety checks when updating + the app profile. + """ + + app_profile = proto.Field(proto.MESSAGE, number=1, message=gba_instance.AppProfile,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + ignore_warnings = proto.Field(proto.BOOL, number=3) + + +class DeleteAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteAppProfile. + + Attributes: + name (str): + Required. The unique name of the app profile to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + ignore_warnings (bool): + Required. If true, ignore safety checks when + deleting the app profile. + """ + + name = proto.Field(proto.STRING, number=1) + + ignore_warnings = proto.Field(proto.BOOL, number=2) + + +class UpdateAppProfileMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateAppProfile.""" + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py new file mode 100644 index 000000000000..ac146b798b21 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -0,0 +1,912 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "RestoreTableRequest", + "RestoreTableMetadata", + "OptimizeRestoredTableMetadata", + "CreateTableRequest", + "CreateTableFromSnapshotRequest", + "DropRowRangeRequest", + "ListTablesRequest", + "ListTablesResponse", + "GetTableRequest", + "DeleteTableRequest", + "ModifyColumnFamiliesRequest", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "SnapshotTableRequest", + "GetSnapshotRequest", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "DeleteSnapshotRequest", + "SnapshotTableMetadata", + "CreateTableFromSnapshotMetadata", + "CreateBackupRequest", + "CreateBackupMetadata", + "UpdateBackupRequest", + "GetBackupRequest", + "DeleteBackupRequest", + "ListBackupsRequest", + "ListBackupsResponse", + }, +) + + +class RestoreTableRequest(proto.Message): + r"""The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + Attributes: + parent (str): + Required. The name of the instance in which to create the + restored table. This instance must be the parent of the + source backup. Values are of the form + ``projects//instances/``. + table_id (str): + Required. The id of the table to create and restore to. This + table must not already exist. The ``table_id`` appended to + ``parent`` forms the full table name of the form + ``projects//instances//tables/``. + backup (str): + Name of the backup from which to restore. Values are of the + form + ``projects//instances//clusters//backups/``. + """ + + parent = proto.Field(proto.STRING, number=1) + + table_id = proto.Field(proto.STRING, number=2) + + backup = proto.Field(proto.STRING, number=3, oneof="source") + + +class RestoreTableMetadata(proto.Message): + r"""Metadata type for the long-running operation returned by + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + Attributes: + name (str): + Name of the table being created and restored + to. + source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): + The type of the restore source. + backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + + optimize_table_operation_name (str): + If exists, the name of the long-running operation that will + be used to track the post-restore optimization process to + optimize the performance of the restored table. The metadata + type of the long-running operation is + [OptimizeRestoreTableMetadata][]. The response type is + [Empty][google.protobuf.Empty]. This long-running operation + may be automatically created by the system if applicable + after the RestoreTable long-running operation completes + successfully. This operation may not be created if the table + is already optimized or the restore was not successful. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + operation. + """ + + name = proto.Field(proto.STRING, number=1) + + source_type = proto.Field(proto.ENUM, number=2, enum=gba_table.RestoreSourceType,) + + backup_info = proto.Field( + proto.MESSAGE, number=3, oneof="source_info", message=gba_table.BackupInfo, + ) + + optimize_table_operation_name = proto.Field(proto.STRING, number=4) + + progress = proto.Field(proto.MESSAGE, number=5, message=common.OperationProgress,) + + +class OptimizeRestoredTableMetadata(proto.Message): + r"""Metadata type for the long-running operation used to track + the progress of optimizations performed on a newly restored + table. This long-running operation is automatically created by + the system after the successful completion of a table restore, + and cannot be cancelled. + + Attributes: + name (str): + Name of the restored table being optimized. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the post-restore + optimizations. + """ + + name = proto.Field(proto.STRING, number=1) + + progress = proto.Field(proto.MESSAGE, number=2, message=common.OperationProgress,) + + +class CreateTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The Table to create. + initial_splits (Sequence[google.cloud.bigtable_admin_v2.types.CreateTableRequest.Split]): + The optional list of row keys that will be used to initially + split the table into several tablets (tablets are similar to + HBase regions). Given two split keys, ``s1`` and ``s2``, + three tablets will be created, spanning the key ranges: + ``[, s1), [s1, s2), [s2, )``. + + Example: + + - Row keys := + ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial_split_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: + + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 + ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 ``[other, ) => {"other", "zz"}.`` + """ + + class Split(proto.Message): + r"""An initial split point for a newly created table. + + Attributes: + key (bytes): + Row key to use as an initial tablet boundary. + """ + + key = proto.Field(proto.BYTES, number=1) + + parent = proto.Field(proto.STRING, number=1) + + table_id = proto.Field(proto.STRING, number=2) + + table = proto.Field(proto.MESSAGE, number=3, message=gba_table.Table,) + + initial_splits = proto.RepeatedField(proto.MESSAGE, number=4, message=Split,) + + +class CreateTableFromSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. + source_snapshot (str): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in the + same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + parent = proto.Field(proto.STRING, number=1) + + table_id = proto.Field(proto.STRING, number=2) + + source_snapshot = proto.Field(proto.STRING, number=3) + + +class DropRowRangeRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + Attributes: + name (str): + Required. The unique name of the table on which to drop a + range of rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + row_key_prefix (bytes): + Delete all rows that start with this row key + prefix. Prefix cannot be zero length. + delete_all_data_from_table (bool): + Delete all rows in the table. Setting this to + false is a no-op. + """ + + name = proto.Field(proto.STRING, number=1) + + row_key_prefix = proto.Field(proto.BYTES, number=2, oneof="target") + + delete_all_data_from_table = proto.Field(proto.BOOL, number=3, oneof="target") + + +class ListTablesRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + parent (str): + Required. The unique name of the instance for which tables + should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + view (google.cloud.bigtable_admin_v2.types.Table.View): + The view to be applied to the returned tables' fields. Only + NAME_ONLY view (default) and REPLICATION_VIEW are supported. + page_size (int): + Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListTablesResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + tables (Sequence[google.cloud.bigtable_admin_v2.types.Table]): + The tables present in the requested instance. + next_page_token (str): + Set if not all tables could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + tables = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Table,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class GetTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + + Attributes: + name (str): + Required. The unique name of the requested table. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + view (google.cloud.bigtable_admin_v2.types.Table.View): + The view to be applied to the returned table's fields. + Defaults to ``SCHEMA_VIEW`` if unspecified. + """ + + name = proto.Field(proto.STRING, number=1) + + view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) + + +class DeleteTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + + Attributes: + name (str): + Required. The unique name of the table to be deleted. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ModifyColumnFamiliesRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + + Attributes: + name (str): + Required. The unique name of the table whose families should + be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + modifications (Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + Required. Modifications to be atomically + applied to the specified table's families. + Entries are applied in order, meaning that + earlier modifications can be masked by later + ones (in the case of repeated updates to the + same family, for example). + """ + + class Modification(proto.Message): + r"""A create, update, or delete of a particular column family. + + Attributes: + id (str): + The ID of the column family to be modified. + create (google.cloud.bigtable_admin_v2.types.ColumnFamily): + Create a new column family with the specified + schema, or fail if one already exists with the + given ID. + update (google.cloud.bigtable_admin_v2.types.ColumnFamily): + Update an existing column family to the + specified schema, or fail if no column family + exists with the given ID. + drop (bool): + Drop (delete) the column family with the + given ID, or fail if no such family exists. + """ + + id = proto.Field(proto.STRING, number=1) + + create = proto.Field( + proto.MESSAGE, number=2, oneof="mod", message=gba_table.ColumnFamily, + ) + + update = proto.Field( + proto.MESSAGE, number=3, oneof="mod", message=gba_table.ColumnFamily, + ) + + drop = proto.Field(proto.BOOL, number=4, oneof="mod") + + name = proto.Field(proto.STRING, number=1) + + modifications = proto.RepeatedField(proto.MESSAGE, number=2, message=Modification,) + + +class GenerateConsistencyTokenRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + Attributes: + name (str): + Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class GenerateConsistencyTokenResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + Attributes: + consistency_token (str): + The generated consistency token. + """ + + consistency_token = proto.Field(proto.STRING, number=1) + + +class CheckConsistencyRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + Attributes: + name (str): + Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + consistency_token (str): + Required. The token created using + GenerateConsistencyToken for the Table. + """ + + name = proto.Field(proto.STRING, number=1) + + consistency_token = proto.Field(proto.STRING, number=2) + + +class CheckConsistencyResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + Attributes: + consistent (bool): + True only if the token is consistent. A token + is consistent if replication has caught up with + the restrictions specified in the request. + """ + + consistent = proto.Field(proto.BOOL, number=1) + + +class SnapshotTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the table to have the snapshot + taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + cluster (str): + Required. The name of the cluster where the snapshot will be + created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + snapshot_id (str): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., ``mysnapshot`` + of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + ttl (google.protobuf.duration_pb2.Duration): + The amount of time that the new snapshot can + stay active after it is created. Once 'ttl' + expires, the snapshot will get deleted. The + maximum amount of time a snapshot can stay + active is 7 days. If 'ttl' is not specified, the + default value of 24 hours will be used. + description (str): + Description of the snapshot. + """ + + name = proto.Field(proto.STRING, number=1) + + cluster = proto.Field(proto.STRING, number=2) + + snapshot_id = proto.Field(proto.STRING, number=3) + + ttl = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) + + description = proto.Field(proto.STRING, number=5) + + +class GetSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the requested snapshot. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListSnapshotsRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + parent (str): + Required. The unique name of the cluster for which snapshots + should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + page_size (int): + The maximum number of snapshots to return per + page. CURRENTLY UNIMPLEMENTED AND IGNORED. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListSnapshotsResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + snapshots (Sequence[google.cloud.bigtable_admin_v2.types.Snapshot]): + The snapshots present in the requested + cluster. + next_page_token (str): + Set if not all snapshots could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + snapshots = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_table.Snapshot, + ) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class SnapshotTableMetadata(proto.Message): + r"""The metadata for the Operation returned by SnapshotTable. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It + is not subject to any SLA or deprecation policy. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): + The request that prompted the initiation of + this SnapshotTable operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="SnapshotTableRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateTableFromSnapshotMetadata(proto.Message): + r"""The metadata for the Operation returned by + CreateTableFromSnapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It + is not subject to any SLA or deprecation policy. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): + The request that prompted the initiation of + this CreateTableFromSnapshot operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request = proto.Field( + proto.MESSAGE, number=1, message="CreateTableFromSnapshotRequest", + ) + + request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +class CreateBackupRequest(proto.Message): + r"""The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + + Attributes: + parent (str): + Required. This must be one of the clusters in the instance + in which this table is located. The backup will be stored in + this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are combined + as {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + backup_id = proto.Field(proto.STRING, number=2) + + backup = proto.Field(proto.MESSAGE, number=3, message=gba_table.Backup,) + + +class CreateBackupMetadata(proto.Message): + r"""Metadata type for the operation returned by + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + + Attributes: + name (str): + The name of the backup being created. + source_table (str): + The name of the table the backup is created + from. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was cancelled. + """ + + name = proto.Field(proto.STRING, number=1) + + source_table = proto.Field(proto.STRING, number=2) + + start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class UpdateBackupRequest(proto.Message): + r"""The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + + Attributes: + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only supported + for the following fields: + + - ``backup.expire_time``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be updated. + This mask is relative to the Backup resource, not to the + request message. The field mask must always be specified; + this prevents any future fields from being erased + accidentally by clients that do not know about them. + """ + + backup = proto.Field(proto.MESSAGE, number=1, message=gba_table.Backup,) + + update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + + +class GetBackupRequest(proto.Message): + r"""The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + + Attributes: + name (str): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class DeleteBackupRequest(proto.Message): + r"""The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + + Attributes: + name (str): + Required. Name of the backup to delete. Values are of the + form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListBackupsRequest(proto.Message): + r"""The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Attributes: + parent (str): + Required. The cluster to list backups from. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters in + an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + filter (str): + A filter expression that filters backups listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be <, >, <=, >=, !=, + =, or :. Colon ':' represents a HAS operator which is + roughly synonymous with equality. Filter rules are case + insensitive. + + The fields eligible for filtering are: + + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + To filter on multiple expressions, provide each separate + expression within parentheses. By default, each expression + is an AND expression. However, you can include AND, OR, and + NOT expressions explicitly. + + Some examples of using filters are: + + - ``name:"exact"`` --> The backup's name is the string + "exact". + - ``name:howl`` --> The backup's name contains the string + "howl". + - ``source_table:prod`` --> The source_table's name + contains the string "prod". + - ``state:CREATING`` --> The backup is pending creation. + - ``state:READY`` --> The backup is fully created and ready + for use. + - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` + --> The backup name contains the string "howl" and + start_time of the backup is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The backup's size is + greater than 10GB + order_by (str): + An expression for specifying the sort order of the results + of the request. The string value should specify one or more + fields in [Backup][google.bigtable.admin.v2.Backup]. The + full syntax is described at https://aip.dev/132#ordering. + + Fields supported are: \* name \* source_table \* expire_time + \* start_time \* end_time \* size_bytes \* state + + For example, "start_time". The default sorting order is + ascending. To specify descending order for the field, a + suffix " desc" should be appended to the field name. For + example, "start_time desc". Redundant space characters in + the syntax are insigificant. + + If order_by is empty, results will be sorted by + ``start_time`` in descending order starting from the most + recently created backup. + page_size (int): + Number of backups to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] + from a previous + [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] + to the same ``parent`` and with the same ``filter``. + """ + + parent = proto.Field(proto.STRING, number=1) + + filter = proto.Field(proto.STRING, number=2) + + order_by = proto.Field(proto.STRING, number=3) + + page_size = proto.Field(proto.INT32, number=4) + + page_token = proto.Field(proto.STRING, number=5) + + +class ListBackupsResponse(proto.Message): + r"""The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Attributes: + backups (Sequence[google.cloud.bigtable_admin_v2.types.Backup]): + The list of matching backups. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] + call to fetch more of the matching backups. + """ + + @property + def raw_page(self): + return self + + backups = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Backup,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py new file mode 100644 index 000000000000..43d500dc0a14 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", manifest={"StorageType", "OperationProgress",}, +) + + +class StorageType(proto.Enum): + r"""Storage media types for persisting Bigtable data.""" + STORAGE_TYPE_UNSPECIFIED = 0 + SSD = 1 + HDD = 2 + + +class OperationProgress(proto.Message): + r"""Encapsulates progress related information for a Cloud + Bigtable long running operation. + + Attributes: + progress_percent (int): + Percent completion of the operation. + Values are between 0 and 100 inclusive. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Time the request was received. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + failed or was completed successfully. + """ + + progress_percent = proto.Field(proto.INT32, number=1) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py new file mode 100644 index 000000000000..ddef8a0d180c --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_admin_v2.types import common + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", manifest={"Instance", "Cluster", "AppProfile",}, +) + + +class Instance(proto.Message): + r"""A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] + and the resources that serve them. All tables in an instance are + served from all [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + Attributes: + name (str): + The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + display_name (str): + Required. The descriptive name for this + instance as it appears in UIs. Can be changed at + any time, but should be kept globally unique to + avoid confusion. + state (google.cloud.bigtable_admin_v2.types.Instance.State): + (``OutputOnly``) The current state of the instance. + type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): + The type of the instance. Defaults to ``PRODUCTION``. + labels (Sequence[google.cloud.bigtable_admin_v2.types.Instance.LabelsEntry]): + Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a + customer's organizational needs and deployment strategies. + They can be used to filter resources and aggregate metrics. + + - Label keys must be between 1 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given + resource. + - Keys and values must both be under 128 bytes. + """ + + class State(proto.Enum): + r"""Possible states of an instance.""" + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(proto.Enum): + r"""The type of the instance.""" + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + name = proto.Field(proto.STRING, number=1) + + display_name = proto.Field(proto.STRING, number=2) + + state = proto.Field(proto.ENUM, number=3, enum=State,) + + type_ = proto.Field(proto.ENUM, number=4, enum=Type,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=5) + + +class Cluster(proto.Message): + r"""A resizable group of nodes in a particular cloud location, capable + of serving all [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + Attributes: + name (str): + The unique name of the cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. + location (str): + (``CreationOnly``) The location where this cluster's nodes + and storage reside. For best performance, clients should be + located as close as possible to this cluster. Currently only + zones are supported, so values should be of the form + ``projects/{project}/locations/{zone}``. + state (google.cloud.bigtable_admin_v2.types.Cluster.State): + The current state of the cluster. + serve_nodes (int): + Required. The number of nodes allocated to + this cluster. More nodes enable higher + throughput and more consistent performance. + default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType): + (``CreationOnly``) The type of storage used by this cluster + to serve its parent instance's tables, unless explicitly + overridden. + """ + + class State(proto.Enum): + r"""Possible states of a cluster.""" + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 + + name = proto.Field(proto.STRING, number=1) + + location = proto.Field(proto.STRING, number=2) + + state = proto.Field(proto.ENUM, number=3, enum=State,) + + serve_nodes = proto.Field(proto.INT32, number=4) + + default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,) + + +class AppProfile(proto.Message): + r"""A configuration object describing how Cloud Bigtable should + treat traffic from a particular end user application. + + Attributes: + name (str): + (``OutputOnly``) The unique name of the app profile. Values + are of the form + ``projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + etag (str): + Strongly validated etag for optimistic concurrency control. + Preserve the value returned from ``GetAppProfile`` when + calling ``UpdateAppProfile`` to fail the request if there + has been a modification in the mean time. The + ``update_mask`` of the request need not include ``etag`` for + this protection to apply. See + `Wikipedia `__ and + `RFC + 7232 `__ + for more details. + description (str): + Optional long form description of the use + case for this AppProfile. + multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): + Use a multi-cluster routing policy. + single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting): + Use a single-cluster routing policy. + """ + + class MultiClusterRoutingUseAny(proto.Message): + r"""Read/write requests are routed to the nearest cluster in the + instance, and will fail over to the nearest cluster that is + available in the event of transient errors or delays. Clusters + in a region are considered equidistant. Choosing this option + sacrifices read-your-writes consistency to improve availability. + """ + + class SingleClusterRouting(proto.Message): + r"""Unconditionally routes all read/write requests to a specific + cluster. This option preserves read-your-writes consistency but + does not improve availability. + + Attributes: + cluster_id (str): + The cluster to which read/write requests + should be routed. + allow_transactional_writes (bool): + Whether or not ``CheckAndMutateRow`` and + ``ReadModifyWriteRow`` requests are allowed by this app + profile. It is unsafe to send these requests to the same + table/row/column in multiple clusters. + """ + + cluster_id = proto.Field(proto.STRING, number=1) + + allow_transactional_writes = proto.Field(proto.BOOL, number=2) + + name = proto.Field(proto.STRING, number=1) + + etag = proto.Field(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + multi_cluster_routing_use_any = proto.Field( + proto.MESSAGE, + number=5, + oneof="routing_policy", + message=MultiClusterRoutingUseAny, + ) + + single_cluster_routing = proto.Field( + proto.MESSAGE, number=6, oneof="routing_policy", message=SingleClusterRouting, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py new file mode 100644 index 000000000000..96d7750f718f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "RestoreSourceType", + "RestoreInfo", + "Table", + "ColumnFamily", + "GcRule", + "Snapshot", + "Backup", + "BackupInfo", + }, +) + + +class RestoreSourceType(proto.Enum): + r"""Indicates the type of the restore source.""" + RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 + BACKUP = 1 + + +class RestoreInfo(proto.Message): + r"""Information about a table restore. + + Attributes: + source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): + The type of the restore source. + backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + Information about the backup used to restore + the table. The backup may no longer exist. + """ + + source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",) + + backup_info = proto.Field( + proto.MESSAGE, number=2, oneof="source_info", message="BackupInfo", + ) + + +class Table(proto.Message): + r"""A collection of user data indexed by row, column, and + timestamp. Each table is served using the resources of its + parent cluster. + + Attributes: + name (str): + Output only. The unique name of the table. Values are of the + form + ``projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, + ``FULL`` + cluster_states (Sequence[google.cloud.bigtable_admin_v2.types.Table.ClusterStatesEntry]): + Output only. Map from cluster ID to per-cluster table state. + If it could not be determined whether or not the table has + data in a particular cluster (for example, if its zone is + unavailable), then there will be an entry for the cluster + with UNKNOWN ``replication_status``. Views: + ``REPLICATION_VIEW``, ``FULL`` + column_families (Sequence[google.cloud.bigtable_admin_v2.types.Table.ColumnFamiliesEntry]): + (``CreationOnly``) The column families configured for this + table, mapped by column family ID. Views: ``SCHEMA_VIEW``, + ``FULL`` + granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): + (``CreationOnly``) The granularity (i.e. ``MILLIS``) at + which timestamps are stored in this table. Timestamps not + matching the granularity will be rejected. If unspecified at + creation time, the value will be set to ``MILLIS``. Views: + ``SCHEMA_VIEW``, ``FULL``. + restore_info (google.cloud.bigtable_admin_v2.types.RestoreInfo): + Output only. If this table was restored from + another data source (e.g. a backup), this field + will be populated with information about the + restore. + """ + + class TimestampGranularity(proto.Enum): + r"""Possible timestamp granularities to use when keeping multiple + versions of data in a table. + """ + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 + MILLIS = 1 + + class View(proto.Enum): + r"""Defines a view over a table's fields.""" + VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + SCHEMA_VIEW = 2 + REPLICATION_VIEW = 3 + FULL = 4 + + class ClusterState(proto.Message): + r"""The state of a table's data in a particular cluster. + + Attributes: + replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): + Output only. The state of replication for the + table in this cluster. + """ + + class ReplicationState(proto.Enum): + r"""Table replication states.""" + STATE_NOT_KNOWN = 0 + INITIALIZING = 1 + PLANNED_MAINTENANCE = 2 + UNPLANNED_MAINTENANCE = 3 + READY = 4 + READY_OPTIMIZING = 5 + + replication_state = proto.Field( + proto.ENUM, number=1, enum="Table.ClusterState.ReplicationState", + ) + + name = proto.Field(proto.STRING, number=1) + + cluster_states = proto.MapField( + proto.STRING, proto.MESSAGE, number=2, message=ClusterState, + ) + + column_families = proto.MapField( + proto.STRING, proto.MESSAGE, number=3, message="ColumnFamily", + ) + + granularity = proto.Field(proto.ENUM, number=4, enum=TimestampGranularity,) + + restore_info = proto.Field(proto.MESSAGE, number=6, message="RestoreInfo",) + + +class ColumnFamily(proto.Message): + r"""A set of columns within a table which share a common + configuration. + + Attributes: + gc_rule (google.cloud.bigtable_admin_v2.types.GcRule): + Garbage collection rule specified as a + protobuf. Must serialize to at most 500 bytes. + NOTE: Garbage collection executes + opportunistically in the background, and so it's + possible for reads to return a cell even if it + matches the active GC expression for its family. + """ + + gc_rule = proto.Field(proto.MESSAGE, number=1, message="GcRule",) + + +class GcRule(proto.Message): + r"""Rule for determining which cells to delete during garbage + collection. + + Attributes: + max_num_versions (int): + Delete all cells in a column except the most + recent N. + max_age (google.protobuf.duration_pb2.Duration): + Delete cells in a column older than the given + age. Values must be at least one millisecond, + and will be truncated to microsecond + granularity. + intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection): + Delete cells that would be deleted by every + nested rule. + union (google.cloud.bigtable_admin_v2.types.GcRule.Union): + Delete cells that would be deleted by any + nested rule. + """ + + class Intersection(proto.Message): + r"""A GcRule which deletes cells matching all of the given rules. + + Attributes: + rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): + Only delete cells which would be deleted by every element of + ``rules``. + """ + + rules = proto.RepeatedField(proto.MESSAGE, number=1, message="GcRule",) + + class Union(proto.Message): + r"""A GcRule which deletes cells matching any of the given rules. + + Attributes: + rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): + Delete cells which would be deleted by any element of + ``rules``. + """ + + rules = proto.RepeatedField(proto.MESSAGE, number=1, message="GcRule",) + + max_num_versions = proto.Field(proto.INT32, number=1, oneof="rule") + + max_age = proto.Field( + proto.MESSAGE, number=2, oneof="rule", message=duration.Duration, + ) + + intersection = proto.Field( + proto.MESSAGE, number=3, oneof="rule", message=Intersection, + ) + + union = proto.Field(proto.MESSAGE, number=4, oneof="rule", message=Union,) + + +class Snapshot(proto.Message): + r"""A snapshot of a table at a particular time. A snapshot can be + used as a checkpoint for data restoration or a data source for a + new table. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in backward- + incompatible ways and is not recommended for production use. It + is not subject to any SLA or deprecation policy. + + Attributes: + name (str): + Output only. The unique name of the snapshot. Values are of + the form + ``projects//instances//clusters//snapshots/``. + source_table (google.cloud.bigtable_admin_v2.types.Table): + Output only. The source table at the time the + snapshot was taken. + data_size_bytes (int): + Output only. The size of the data in the + source table at the time the snapshot was taken. + In some cases, this value may be computed + asynchronously via a background process and a + placeholder of 0 will be used in the meantime. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the snapshot is + created. + delete_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the snapshot will + be deleted. The maximum amount of time a + snapshot can stay active is 365 days. If 'ttl' + is not specified, the default maximum of 365 + days will be used. + state (google.cloud.bigtable_admin_v2.types.Snapshot.State): + Output only. The current state of the + snapshot. + description (str): + Output only. Description of the snapshot. + """ + + class State(proto.Enum): + r"""Possible states of a snapshot.""" + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + name = proto.Field(proto.STRING, number=1) + + source_table = proto.Field(proto.MESSAGE, number=2, message="Table",) + + data_size_bytes = proto.Field(proto.INT64, number=3) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + delete_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + state = proto.Field(proto.ENUM, number=6, enum=State,) + + description = proto.Field(proto.STRING, number=7) + + +class Backup(proto.Message): + r"""A backup of a Cloud Bigtable table. + + Attributes: + name (str): + Output only. A globally unique identifier for the backup + which cannot be changed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/ backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + The final segment of the name must be between 1 and 50 + characters in length. + + The backup is stored in the cluster identified by the prefix + of the backup name of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + source_table (str): + Required. Immutable. Name of the table from which this + backup was created. This needs to be in the same instance as + the backup. Values are of the form + ``projects/{project}/instances/{instance}/tables/{source_table}``. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. The expiration time of the backup, with + microseconds granularity that must be at least 6 hours and + at most 30 days from the time the request is received. Once + the ``expire_time`` has passed, Cloud Bigtable will delete + the backup and free the resources used by the backup. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. ``start_time`` is the time that the backup was + started (i.e. approximately the time the + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] + request is received). The row data in this backup will be no + older than this timestamp. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. ``end_time`` is the time that the backup was + finished. The row data in the backup will be no newer than + this timestamp. + size_bytes (int): + Output only. Size of the backup in bytes. + state (google.cloud.bigtable_admin_v2.types.Backup.State): + Output only. The current state of the backup. + """ + + class State(proto.Enum): + r"""Indicates the current state of the backup.""" + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + name = proto.Field(proto.STRING, number=1) + + source_table = proto.Field(proto.STRING, number=2) + + expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + size_bytes = proto.Field(proto.INT64, number=6) + + state = proto.Field(proto.ENUM, number=7, enum=State,) + + +class BackupInfo(proto.Message): + r"""Information about a backup. + + Attributes: + backup (str): + Output only. Name of the backup. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time that the backup was + started. Row data in the backup will be no older + than this timestamp. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. This time that the backup was + finished. Row data in the backup will be no + newer than this timestamp. + source_table (str): + Output only. Name of the table the backup was + created from. + """ + + backup = proto.Field(proto.STRING, number=1) + + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + + source_table = proto.Field(proto.STRING, number=4) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 8c31017cc47f..0ab15791b864 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,42 +1,71 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.bigtable_v2 import types -from google.cloud.bigtable_v2.gapic import bigtable_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class BigtableClient(bigtable_client.BigtableClient): - __doc__ = bigtable_client.BigtableClient.__doc__ +from .services.bigtable import BigtableClient +from .types.bigtable import CheckAndMutateRowRequest +from .types.bigtable import CheckAndMutateRowResponse +from .types.bigtable import MutateRowRequest +from .types.bigtable import MutateRowResponse +from .types.bigtable import MutateRowsRequest +from .types.bigtable import MutateRowsResponse +from .types.bigtable import ReadModifyWriteRowRequest +from .types.bigtable import ReadModifyWriteRowResponse +from .types.bigtable import ReadRowsRequest +from .types.bigtable import ReadRowsResponse +from .types.bigtable import SampleRowKeysRequest +from .types.bigtable import SampleRowKeysResponse +from .types.data import Cell +from .types.data import Column +from .types.data import ColumnRange +from .types.data import Family +from .types.data import Mutation +from .types.data import ReadModifyWriteRule +from .types.data import Row +from .types.data import RowFilter +from .types.data import RowRange +from .types.data import RowSet +from .types.data import TimestampRange +from .types.data import ValueRange __all__ = ( - "types", + "Cell", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", + "Column", + "ColumnRange", + "Family", + "MutateRowRequest", + "MutateRowResponse", + "MutateRowsRequest", + "MutateRowsResponse", + "Mutation", + "ReadModifyWriteRowRequest", + "ReadModifyWriteRowResponse", + "ReadModifyWriteRule", + "ReadRowsRequest", + "ReadRowsResponse", + "Row", + "RowFilter", + "RowRange", + "RowSet", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "TimestampRange", + "ValueRange", "BigtableClient", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py deleted file mode 100644 index a9ddfad8ab8a..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client.py +++ /dev/null @@ -1,779 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.bigtable.v2 Bigtable API.""" - -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.path_template -import grpc - -from google.cloud.bigtable_v2.gapic import bigtable_client_config -from google.cloud.bigtable_v2.gapic.transports import bigtable_grpc_transport -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc -from google.cloud.bigtable_v2.proto import data_pb2 - - -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None - - -class BigtableClient(object): - """Service for reading from and writing to existing Bigtable tables.""" - - SERVICE_ADDRESS = "bigtable.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.bigtable.v2.Bigtable" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def table_path(cls, project, instance, table): - """Return a fully-qualified table string.""" - return google.api_core.path_template.expand( - "projects/{project}/instances/{instance}/tables/{table}", - project=project, - instance=instance, - table=table, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.BigtableGrpcTransport, - Callable[[~.Credentials, type], ~.BigtableGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = bigtable_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=bigtable_grpc_transport.BigtableGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = bigtable_grpc_transport.BigtableGrpcTransport( - address=api_endpoint, - channel=channel, - credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def read_rows( - self, - table_name, - app_profile_id=None, - rows=None, - filter_=None, - rows_limit=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.read_rows(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to read. Values - are of the form - ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowSet` - filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset, - reads the entirety of each row. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - rows_limit (long): The read will terminate after committing to N rows' worth of results. The - default (zero) is to return all results. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "read_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_rows, - default_retry=self._method_configs["ReadRows"].retry, - default_timeout=self._method_configs["ReadRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadRowsRequest( - table_name=table_name, - app_profile_id=app_profile_id, - rows=rows, - filter=filter_, - rows_limit=rows_limit, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def sample_row_keys( - self, - table_name, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> for element in client.sample_row_keys(table_name): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table from which to sample row - keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.SampleRowKeysResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "sample_row_keys" not in self._inner_api_calls: - self._inner_api_calls[ - "sample_row_keys" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.sample_row_keys, - default_retry=self._method_configs["SampleRowKeys"].retry, - default_timeout=self._method_configs["SampleRowKeys"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.SampleRowKeysRequest( - table_name=table_name, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["sample_row_keys"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_row( - self, - table_name, - row_key, - mutations, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `mutations`: - >>> mutations = [] - >>> - >>> response = client.mutate_row(table_name, row_key, mutations) - - Args: - table_name (str): Required. The unique name of the table to which the mutation should - be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the mutation should be applied. - mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Required. Changes to be atomically applied to the specified row. Entries are applied - in order, meaning that earlier mutations can be masked by later ones. - Must contain at least one entry and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.MutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_row, - default_retry=self._method_configs["MutateRow"].retry, - default_timeout=self._method_configs["MutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowRequest( - table_name=table_name, - row_key=row_key, - mutations=mutations, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def mutate_rows( - self, - table_name, - entries, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `entries`: - >>> entries = [] - >>> - >>> for element in client.mutate_rows(table_name, entries): - ... # process element - ... pass - - Args: - table_name (str): Required. The unique name of the table to which the mutations should be applied. - entries (list[Union[dict, ~google.cloud.bigtable_v2.types.Entry]]): Required. The row keys and corresponding mutations to be applied in bulk. - Each entry is applied as an atomic mutation, but the entries may be - applied in arbitrary order (even between entries for the same row). - At least one entry must be specified, and in total the entries can - contain at most 100000 mutations. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Entry` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - Iterable[~google.cloud.bigtable_v2.types.MutateRowsResponse]. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "mutate_rows" not in self._inner_api_calls: - self._inner_api_calls[ - "mutate_rows" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.mutate_rows, - default_retry=self._method_configs["MutateRows"].retry, - default_timeout=self._method_configs["MutateRows"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, - entries=entries, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["mutate_rows"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def check_and_mutate_row( - self, - table_name, - row_key, - app_profile_id=None, - predicate_filter=None, - true_mutations=None, - false_mutations=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Mutates a row atomically based on the output of a predicate Reader filter. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> response = client.check_and_mutate_row(table_name, row_key) - - Args: - table_name (str): Required. The unique name of the table to which the conditional - mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the conditional mutation should be applied. - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - predicate_filter (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, either - ``true_mutations`` or ``false_mutations`` will be executed. If unset, - checks that the row contains any values at all. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.RowFilter` - true_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``false_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - false_mutations (list[Union[dict, ~google.cloud.bigtable_v2.types.Mutation]]): Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least one entry - if ``true_mutations`` is empty, and at most 100000. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.Mutation` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.CheckAndMutateRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "check_and_mutate_row" not in self._inner_api_calls: - self._inner_api_calls[ - "check_and_mutate_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.check_and_mutate_row, - default_retry=self._method_configs["CheckAndMutateRow"].retry, - default_timeout=self._method_configs["CheckAndMutateRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, - row_key=row_key, - app_profile_id=app_profile_id, - predicate_filter=predicate_filter, - true_mutations=true_mutations, - false_mutations=false_mutations, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["check_and_mutate_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def read_modify_write_row( - self, - table_name, - row_key, - rules, - app_profile_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Example: - >>> from google.cloud import bigtable_v2 - >>> - >>> client = bigtable_v2.BigtableClient() - >>> - >>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]') - >>> - >>> # TODO: Initialize `row_key`: - >>> row_key = b'' - >>> - >>> # TODO: Initialize `rules`: - >>> rules = [] - >>> - >>> response = client.read_modify_write_row(table_name, row_key, rules) - - Args: - table_name (str): Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the form - ``projects//instances//tables/
``. - row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. - rules (list[Union[dict, ~google.cloud.bigtable_v2.types.ReadModifyWriteRule]]): Required. Rules specifying how the specified row's contents are to be transformed - into writes. Entries are applied in order, meaning that earlier rules will - affect the results of later ones. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRule` - app_profile_id (str): This value specifies routing for replication. If not specified, the - "default" application profile will be used. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "read_modify_write_row" not in self._inner_api_calls: - self._inner_api_calls[ - "read_modify_write_row" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.read_modify_write_row, - default_retry=self._method_configs["ReadModifyWriteRow"].retry, - default_timeout=self._method_configs["ReadModifyWriteRow"].timeout, - client_info=self._client_info, - ) - - request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, - row_key=row_key, - rules=rules, - app_profile_id=app_profile_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("table_name", table_name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["read_modify_write_row"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py deleted file mode 100644 index 8a57847bf8d9..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/bigtable_client_config.py +++ /dev/null @@ -1,80 +0,0 @@ -config = { - "interfaces": { - "google.bigtable.v2.Bigtable": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, - "retry_params": { - "idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - }, - "non_idempotent_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 20000, - }, - "read_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 43200000, - }, - "mutate_rows_params": { - "initial_retry_delay_millis": 10, - "retry_delay_multiplier": 2.0, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "ReadRows": { - "timeout_millis": 43200000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "read_rows_params", - }, - "SampleRowKeys": { - "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "MutateRow": { - "timeout_millis": 60000, - "retry_codes_name": "idempotent", - "retry_params_name": "idempotent_params", - }, - "MutateRows": { - "timeout_millis": 600000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "mutate_rows_params", - }, - "CheckAndMutateRow": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - "ReadModifyWriteRow": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "non_idempotent_params", - }, - }, - } - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py deleted file mode 100644 index 5b2757db2d6d..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py +++ /dev/null @@ -1,207 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.bigtable_v2.proto import bigtable_pb2_grpc - - -class BigtableGrpcTransport(object): - """gRPC transport class providing stubs for - google.bigtable.v2 Bigtable API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ) - - def __init__( - self, channel=None, credentials=None, address="bigtable.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "bigtable_stub": bigtable_pb2_grpc.BigtableStub(channel), - } - - @classmethod - def create_channel( - cls, address="bigtable.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def read_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.read_rows`. - - Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadRows - - @property - def sample_row_keys(self): - """Return the gRPC stub for :meth:`BigtableClient.sample_row_keys`. - - Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].SampleRowKeys - - @property - def mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_row`. - - Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by ``mutation``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRow - - @property - def mutate_rows(self): - """Return the gRPC stub for :meth:`BigtableClient.mutate_rows`. - - Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].MutateRows - - @property - def check_and_mutate_row(self): - """Return the gRPC stub for :meth:`BigtableClient.check_and_mutate_row`. - - Mutates a row atomically based on the output of a predicate Reader filter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].CheckAndMutateRow - - @property - def read_modify_write_row(self): - """Return the gRPC stub for :meth:`BigtableClient.read_modify_write_row`. - - Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["bigtable_stub"].ReadModifyWriteRow diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto deleted file mode 100644 index ca3b663d8661..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_data.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto deleted file mode 100644 index 038fcc46397f..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service.proto +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" }; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto deleted file mode 100644 index 518d14dac8e0..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster"; -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} - -// Metadata type for operations initiated by the V2 BigtableAdmin service. -// More complete information for such operations is available via the V2 API. -message V2OperationMetadata { - -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto deleted file mode 100644 index bd063a925f45..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_data.proto +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableDataProto"; -option java_package = "com.google.bigtable.v1"; - - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column of a table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // Inclusive lower bound. If left empty, interpreted as the empty string. - bytes start_key = 2; - - // Exclusive upper bound. If left empty, interpreted as infinity. - bytes end_key = 3; -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within 'column_family'). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_inclusive = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_exclusive = 3; - } - - // The column qualifier at which to end the range (within 'column_family'). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_inclusive = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_exclusive = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_inclusive = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_exclusive = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_inclusive = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_exclusive = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if "predicate_filter" returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if "predicate_filter" does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the ':' - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // '\n', it is sufficient to use '.' as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the '\C' - // escape sequence must be used if a true wildcard is desired. The '.' - // character will not match the new line character '\n', which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, - // skip all earlier cells in "foo:bar", and then begin matching again in - // column "foo:bar2". - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern [a-z0-9\\-]+ - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a apply_label_transformer. It is okay for - // an Interleave to contain multiple apply_label_transformers, as they will - // be applied to separate copies of the input. This may be relaxed in the - // future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that "append_value" be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that "increment_amount" be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto deleted file mode 100644 index ec992ea0f818..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_instance_admin.proto +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/instance.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableInstanceAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable Instances and -// Clusters. Provides access to the Instance and Cluster schemas only, not the -// tables' metadata or data stored in those tables. -service BigtableInstanceAdmin { - // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*}/instances" - body: "*" - }; - } - - // Gets information about an instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*}" - }; - } - - // Lists information about instances in a project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*}/instances" - }; - } - - // Updates an instance within a project. - rpc UpdateInstance(Instance) returns (Instance) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*}" - body: "*" - }; - } - - // Partially updates an instance within a project. - rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{instance.name=projects/*/instances/*}" - body: "instance" - }; - } - - // Delete an instance from a project. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*}" - }; - } - - // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/clusters" - body: "cluster" - }; - } - - // Gets information about a cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Lists information about clusters in an instance. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/clusters" - }; - } - - // Updates a cluster within an instance. - rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*/clusters/*}" - body: "*" - }; - } - - // Deletes a cluster from an instance. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - } - - // Creates an app profile within an instance. - rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/appProfiles" - body: "app_profile" - }; - } - - // Gets information about an app profile. - rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Lists information about app profiles in an instance. - rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/appProfiles" - }; - } - - // Updates an app profile within an instance. - rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - body: "app_profile" - }; - } - - // Deletes an app profile from an instance. - rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - } - - // Returns permissions that the caller has on the specified instance resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - } -} - -// Request message for BigtableInstanceAdmin.CreateInstance. -message CreateInstanceRequest { - // The unique name of the project in which to create the new instance. - // Values are of the form `projects/`. - string parent = 1; - - // The ID to be used when referring to the new instance within its project, - // e.g., just `myinstance` rather than - // `projects/myproject/instances/myinstance`. - string instance_id = 2; - - // The instance to create. - // Fields marked `OutputOnly` must be left blank. - Instance instance = 3; - - // The clusters to be created within the instance, mapped by desired - // cluster ID, e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - // Fields marked `OutputOnly` must be left blank. - // Currently, at most two clusters can be specified. - map clusters = 4; -} - -// Request message for BigtableInstanceAdmin.GetInstance. -message GetInstanceRequest { - // The unique name of the requested instance. Values are of the form - // `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListInstances. -message ListInstancesRequest { - // The unique name of the project for which a list of instances is requested. - // Values are of the form `projects/`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListInstances. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // Locations from which Instance information could not be retrieved, - // due to an outage or some other transient condition. - // Instances whose Clusters are all in one of the failed locations - // may be missing from `instances`, and Instances with at least one - // Cluster in a failed location may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.PartialUpdateInstance. -message PartialUpdateInstanceRequest { - // The Instance which will (partially) replace the current value. - Instance instance = 1; - - // The subset of Instance fields which should be replaced. - // Must be explicitly set. - google.protobuf.FieldMask update_mask = 2; -} - -// Request message for BigtableInstanceAdmin.DeleteInstance. -message DeleteInstanceRequest { - // The unique name of the instance to be deleted. - // Values are of the form `projects//instances/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.CreateCluster. -message CreateClusterRequest { - // The unique name of the instance in which to create the new cluster. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new cluster within its instance, - // e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - string cluster_id = 2; - - // The cluster to be created. - // Fields marked `OutputOnly` must be left blank. - Cluster cluster = 3; -} - -// Request message for BigtableInstanceAdmin.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListClusters. -message ListClustersRequest { - // The unique name of the instance for which a list of clusters is requested. - // Values are of the form `projects//instances/`. - // Use ` = '-'` to list Clusters for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListClusters. -message ListClustersResponse { - // The list of requested clusters. - repeated Cluster clusters = 1; - - // Locations from which Cluster information could not be retrieved, - // due to an outage or some other transient condition. - // Clusters from these locations may be missing from `clusters`, - // or may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. Values are of the form - // `projects//instances//clusters/`. - string name = 1; -} - -// The metadata for the Operation returned by CreateInstance. -message CreateInstanceMetadata { - // The request that prompted the initiation of this CreateInstance operation. - CreateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateInstance. -message UpdateInstanceMetadata { - // The request that prompted the initiation of this UpdateInstance operation. - PartialUpdateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateCluster. -message CreateClusterMetadata { - // The request that prompted the initiation of this CreateCluster operation. - CreateClusterRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateCluster. -message UpdateClusterMetadata { - // The request that prompted the initiation of this UpdateCluster operation. - Cluster original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Request message for BigtableInstanceAdmin.CreateAppProfile. -message CreateAppProfileRequest { - // The unique name of the instance in which to create the new app profile. - // Values are of the form - // `projects//instances/`. - string parent = 1; - - // The ID to be used when referring to the new app profile within its - // instance, e.g., just `myprofile` rather than - // `projects/myproject/instances/myinstance/appProfiles/myprofile`. - string app_profile_id = 2; - - // The app profile to be created. - // Fields marked `OutputOnly` will be ignored. - AppProfile app_profile = 3; - - // If true, ignore safety checks when creating the app profile. - bool ignore_warnings = 4; -} - -// Request message for BigtableInstanceAdmin.GetAppProfile. -message GetAppProfileRequest { - // The unique name of the requested app profile. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; -} - -// Request message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesRequest { - // The unique name of the instance for which a list of app profiles is - // requested. Values are of the form - // `projects//instances/`. - // Use ` = '-'` to list AppProfiles for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 3; - - // The value of `next_page_token` returned by a previous call. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesResponse { - // The list of requested app profiles. - repeated AppProfile app_profiles = 1; - - // Set if not all app profiles could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; - - // Locations from which AppProfile information could not be retrieved, - // due to an outage or some other transient condition. - // AppProfiles from these locations may be missing from `app_profiles`. - // Values are of the form `projects//locations/` - repeated string failed_locations = 3; -} - -// Request message for BigtableInstanceAdmin.UpdateAppProfile. -message UpdateAppProfileRequest { - // The app profile which will (partially) replace the current value. - AppProfile app_profile = 1; - - // The subset of app profile fields which should be replaced. - // If unset, all fields will be replaced. - google.protobuf.FieldMask update_mask = 2; - - // If true, ignore safety checks when updating the app profile. - bool ignore_warnings = 3; -} - - -// Request message for BigtableInstanceAdmin.DeleteAppProfile. -message DeleteAppProfileRequest { - // The unique name of the app profile to be deleted. Values are of the form - // `projects//instances//appProfiles/`. - string name = 1; - - // If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2; -} - -// The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata { - -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py deleted file mode 100644 index f6d825d89ebf..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2.py +++ /dev/null @@ -1,1804 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/bigtable.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.bigtable_v2.proto import ( - data_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2, -) -from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/bigtable.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AW\n\035bigtable.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n-google/cloud/bigtable_v2/proto/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a)google/cloud/bigtable_v2/proto/data.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto"\xd1\x01\n\x0fReadRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03"\xf8\x02\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status"i\n\x14SampleRowKeysRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03"\xb1\x01\n\x10MutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x13\n\x11MutateRowResponse"\xf9\x01\n\x11MutateRowsRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status"\xa9\x02\n\x18\x43heckAndMutateRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08"\xc1\x01\n\x19ReadModifyWriteRowRequest\x12\x39\n\ntable_name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x62igtable.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row2\xc4\x0e\n\x08\x42igtable\x12\xc6\x01\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse"m\x82\xd3\xe4\x93\x02>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xd7\x01\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse"o\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xed\x01\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse"\x92\x01\x82\xd3\xe4\x93\x02?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xde\x01\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse"\x7f\x82\xd3\xe4\x93\x02@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xd9\x02\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse"\xe6\x01\x82\xd3\xe4\x93\x02G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\x89\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse"\x93\x01\x82\xd3\xe4\x93\x02H"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\x93\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41W\n\x1d\x62igtable.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2.DESCRIPTOR, - google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR, - google_dot_rpc_dot_status__pb2.DESCRIPTOR, - ], -) - - -_READROWSREQUEST = _descriptor.Descriptor( - name="ReadRowsRequest", - full_name="google.bigtable.v2.ReadRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadRowsRequest.app_profile_id", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rows", - full_name="google.bigtable.v2.ReadRowsRequest.rows", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.bigtable.v2.ReadRowsRequest.filter", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rows_limit", - full_name="google.bigtable.v2.ReadRowsRequest.rows_limit", - index=4, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=285, - serialized_end=494, -) - - -_READROWSRESPONSE_CELLCHUNK = _descriptor.Descriptor( - name="CellChunk", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.family_name", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.qualifier", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.timestamp_micros", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.labels", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value", - index=5, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_size", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.value_size", - index=6, - number=7, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="reset_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.reset_row", - index=7, - number=8, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="commit_row", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.commit_row", - index=8, - number=9, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="row_status", - full_name="google.bigtable.v2.ReadRowsResponse.CellChunk.row_status", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=612, - serialized_end=873, -) - -_READROWSRESPONSE = _descriptor.Descriptor( - name="ReadRowsResponse", - full_name="google.bigtable.v2.ReadRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="chunks", - full_name="google.bigtable.v2.ReadRowsResponse.chunks", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="last_scanned_row_key", - full_name="google.bigtable.v2.ReadRowsResponse.last_scanned_row_key", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _READROWSRESPONSE_CELLCHUNK, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=497, - serialized_end=873, -) - - -_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name="SampleRowKeysRequest", - full_name="google.bigtable.v2.SampleRowKeysRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.SampleRowKeysRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.SampleRowKeysRequest.app_profile_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=875, - serialized_end=980, -) - - -_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name="SampleRowKeysResponse", - full_name="google.bigtable.v2.SampleRowKeysResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.SampleRowKeysResponse.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="offset_bytes", - full_name="google.bigtable.v2.SampleRowKeysResponse.offset_bytes", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=982, - serialized_end=1044, -) - - -_MUTATEROWREQUEST = _descriptor.Descriptor( - name="MutateRowRequest", - full_name="google.bigtable.v2.MutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowRequest.mutations", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1047, - serialized_end=1224, -) - - -_MUTATEROWRESPONSE = _descriptor.Descriptor( - name="MutateRowResponse", - full_name="google.bigtable.v2.MutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1226, - serialized_end=1245, -) - - -_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsRequest.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.row_key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="mutations", - full_name="google.bigtable.v2.MutateRowsRequest.Entry.mutations", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1419, - serialized_end=1497, -) - -_MUTATEROWSREQUEST = _descriptor.Descriptor( - name="MutateRowsRequest", - full_name="google.bigtable.v2.MutateRowsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.MutateRowsRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.MutateRowsRequest.app_profile_id", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsRequest.entries", - index=2, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MUTATEROWSREQUEST_ENTRY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1248, - serialized_end=1497, -) - - -_MUTATEROWSRESPONSE_ENTRY = _descriptor.Descriptor( - name="Entry", - full_name="google.bigtable.v2.MutateRowsResponse.Entry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="index", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.index", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.bigtable.v2.MutateRowsResponse.Entry.status", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1585, - serialized_end=1643, -) - -_MUTATEROWSRESPONSE = _descriptor.Descriptor( - name="MutateRowsResponse", - full_name="google.bigtable.v2.MutateRowsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="entries", - full_name="google.bigtable.v2.MutateRowsResponse.entries", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MUTATEROWSRESPONSE_ENTRY, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1500, - serialized_end=1643, -) - - -_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name="CheckAndMutateRowRequest", - full_name="google.bigtable.v2.CheckAndMutateRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.app_profile_id", - index=1, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.predicate_filter", - index=3, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.true_mutations", - index=4, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_mutations", - full_name="google.bigtable.v2.CheckAndMutateRowRequest.false_mutations", - index=5, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1646, - serialized_end=1943, -) - - -_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name="CheckAndMutateRowResponse", - full_name="google.bigtable.v2.CheckAndMutateRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_matched", - full_name="google.bigtable.v2.CheckAndMutateRowResponse.predicate_matched", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1945, - serialized_end=1999, -) - - -_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name="ReadModifyWriteRowRequest", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="table_name", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.table_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A\037\n\035bigtable.googleapis.com/Table", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="app_profile_id", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.app_profile_id", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.row_key", - index=2, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="rules", - full_name="google.bigtable.v2.ReadModifyWriteRowRequest.rules", - index=3, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2002, - serialized_end=2195, -) - - -_READMODIFYWRITEROWRESPONSE = _descriptor.Descriptor( - name="ReadModifyWriteRowResponse", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row", - full_name="google.bigtable.v2.ReadModifyWriteRowResponse.row", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2197, - serialized_end=2263, -) - -_READROWSREQUEST.fields_by_name[ - "rows" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWSET -_READROWSREQUEST.fields_by_name[ - "filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "family_name" -].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "qualifier" -].message_type = google_dot_protobuf_dot_wrappers__pb2._BYTESVALUE -_READROWSRESPONSE_CELLCHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["reset_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "reset_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"].fields.append( - _READROWSRESPONSE_CELLCHUNK.fields_by_name["commit_row"] -) -_READROWSRESPONSE_CELLCHUNK.fields_by_name[ - "commit_row" -].containing_oneof = _READROWSRESPONSE_CELLCHUNK.oneofs_by_name["row_status"] -_READROWSRESPONSE.fields_by_name["chunks"].message_type = _READROWSRESPONSE_CELLCHUNK -_MUTATEROWREQUEST.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name[ - "mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name["entries"].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE_ENTRY.fields_by_name[ - "status" -].message_type = google_dot_rpc_dot_status__pb2._STATUS -_MUTATEROWSRESPONSE_ENTRY.containing_type = _MUTATEROWSRESPONSE -_MUTATEROWSRESPONSE.fields_by_name["entries"].message_type = _MUTATEROWSRESPONSE_ENTRY -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "predicate_filter" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "true_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name[ - "false_mutations" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name[ - "rules" -].message_type = ( - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._READMODIFYWRITERULE -) -_READMODIFYWRITEROWRESPONSE.fields_by_name[ - "row" -].message_type = google_dot_cloud_dot_bigtable__v2_dot_proto_dot_data__pb2._ROW -DESCRIPTOR.message_types_by_name["ReadRowsRequest"] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name["ReadRowsResponse"] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name["SampleRowKeysRequest"] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name["SampleRowKeysResponse"] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowRequest"] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name["MutateRowResponse"] = _MUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name["MutateRowsRequest"] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name["MutateRowsResponse"] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name["CheckAndMutateRowRequest"] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "CheckAndMutateRowResponse" -] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowRequest" -] = _READMODIFYWRITEROWREQUEST -DESCRIPTOR.message_types_by_name[ - "ReadModifyWriteRowResponse" -] = _READMODIFYWRITEROWRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ReadRowsRequest = _reflection.GeneratedProtocolMessageType( - "ReadRowsRequest", - (_message.Message,), - { - "DESCRIPTOR": _READROWSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.ReadRows. - - Attributes: - table_name: - Required. The unique name of the table from which to read. - Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - rows: - The row keys and/or ranges to read. If not specified, reads - from all rows. - filter: - The filter to apply to the contents of the specified row(s). - If unset, reads the entirety of each row. - rows_limit: - The read will terminate after committing to N rows’ worth of - results. The default (zero) is to return all results. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsRequest) - }, -) -_sym_db.RegisterMessage(ReadRowsRequest) - -ReadRowsResponse = _reflection.GeneratedProtocolMessageType( - "ReadRowsResponse", - (_message.Message,), - { - "CellChunk": _reflection.GeneratedProtocolMessageType( - "CellChunk", - (_message.Message,), - { - "DESCRIPTOR": _READROWSRESPONSE_CELLCHUNK, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Specifies a piece of a row’s contents returned as part of the read - response stream. - - Attributes: - row_key: - The row key for this chunk of data. If the row key is empty, - this CellChunk is a continuation of the same row as the - previous CellChunk in the response stream, even if that - CellChunk was in a previous ReadRowsResponse message. - family_name: - The column family name for this chunk of data. If this message - is not present this CellChunk is a continuation of the same - column family as the previous CellChunk. The empty string can - occur as a column family name in a response so clients must - check explicitly for the presence of this message, not just - for ``family_name.value`` being non-empty. - qualifier: - The column qualifier for this chunk of data. If this message - is not present, this CellChunk is a continuation of the same - column as the previous CellChunk. Column qualifiers may be - empty so clients must check for the presence of this message, - not just for ``qualifier.value`` being non-empty. - timestamp_micros: - The cell’s stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. Timestamps are only set in the first CellChunk per - cell (for cells split into multiple chunks). - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set - on the first CellChunk per cell. - value: - The value stored in the cell. Cell values can be split across - multiple CellChunks. In that case only the value field will be - set in CellChunks after the first: the timestamp and labels - will only be present in the first CellChunk, even if the first - CellChunk came in a previous ReadRowsResponse. - value_size: - If this CellChunk is part of a chunked cell value and this is - not the final chunk of that cell, value_size will be set to - the total length of the cell value. The client can use this - size to pre-allocate memory to hold the full cell value. - row_status: - Signals to the client concerning previous CellChunks received. - reset_row: - Indicates that the client should drop all previous chunks for - ``row_key``, as it will be re-read from the beginning. - commit_row: - Indicates that the client can safely process all previous - chunks for ``row_key``, as its data has been fully read. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse.CellChunk) - }, - ), - "DESCRIPTOR": _READROWSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.ReadRows. - - Attributes: - chunks: - A collection of a row’s contents as part of the read request. - last_scanned_row_key: - Optionally the server might return the row key of the last row - it has scanned. The client can use this to construct a more - efficient retry request if needed: any row keys or portions of - ranges less than this row key can be dropped from the request. - This is primarily useful for cases where the server has read a - lot of data that was filtered out since the last committed row - key, allowing the client to skip that work on a retry. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadRowsResponse) - }, -) -_sym_db.RegisterMessage(ReadRowsResponse) -_sym_db.RegisterMessage(ReadRowsResponse.CellChunk) - -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysRequest", - (_message.Message,), - { - "DESCRIPTOR": _SAMPLEROWKEYSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.SampleRowKeys. - - Attributes: - table_name: - Required. The unique name of the table from which to sample - row keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysRequest) - }, -) -_sym_db.RegisterMessage(SampleRowKeysRequest) - -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType( - "SampleRowKeysResponse", - (_message.Message,), - { - "DESCRIPTOR": _SAMPLEROWKEYSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.SampleRowKeys. - - Attributes: - row_key: - Sorted streamed sequence of sample row keys in the table. The - table might have contents before the first row key in the list - and after the last one, but a key containing the empty string - indicates “end of table” and will be the last response given, - if present. Note that row keys in this list may not have ever - been written to or read from, and users should therefore not - make any assumptions about the row key structure that are - specific to their use case. - offset_bytes: - Approximate total storage space used by all rows in the table - which precede ``row_key``. Buffering the contents of all rows - between two subsequent samples would require space roughly - equal to the difference in their ``offset_bytes`` fields. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.SampleRowKeysResponse) - }, -) -_sym_db.RegisterMessage(SampleRowKeysResponse) - -MutateRowRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.MutateRow. - - Attributes: - table_name: - Required. The unique name of the table to which the mutation - should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the mutation should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Entries are applied in order, meaning that earlier - mutations can be masked by later ones. Must contain at least - one entry and at most 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowRequest) - }, -) -_sym_db.RegisterMessage(MutateRowRequest) - -MutateRowResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.MutateRow.""", - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowResponse) - }, -) -_sym_db.RegisterMessage(MutateRowResponse) - -MutateRowsRequest = _reflection.GeneratedProtocolMessageType( - "MutateRowsRequest", - (_message.Message,), - { - "Entry": _reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWSREQUEST_ENTRY, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """A mutation for a given row. - - Attributes: - row_key: - The key of the row to which the ``mutations`` should be - applied. - mutations: - Required. Changes to be atomically applied to the specified - row. Mutations are applied in order, meaning that earlier - mutations can be masked by later ones. You must specify at - least one mutation. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest.Entry) - }, - ), - "DESCRIPTOR": _MUTATEROWSREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for BigtableService.MutateRows. - - Attributes: - table_name: - Required. The unique name of the table to which the mutations - should be applied. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - entries: - Required. The row keys and corresponding mutations to be - applied in bulk. Each entry is applied as an atomic mutation, - but the entries may be applied in arbitrary order (even - between entries for the same row). At least one entry must be - specified, and in total the entries can contain at most 100000 - mutations. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsRequest) - }, -) -_sym_db.RegisterMessage(MutateRowsRequest) -_sym_db.RegisterMessage(MutateRowsRequest.Entry) - -MutateRowsResponse = _reflection.GeneratedProtocolMessageType( - "MutateRowsResponse", - (_message.Message,), - { - "Entry": _reflection.GeneratedProtocolMessageType( - "Entry", - (_message.Message,), - { - "DESCRIPTOR": _MUTATEROWSRESPONSE_ENTRY, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """The result of applying a passed mutation in the original request. - - Attributes: - index: - The index into the original request’s ``entries`` list of the - Entry for which a result is being reported. - status: - The result of the request Entry identified by ``index``. - Depending on how requests are batched during execution, it is - possible for one Entry to fail due to an error with another - Entry. In the event that this occurs, the same error will be - reported for both entries. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse.Entry) - }, - ), - "DESCRIPTOR": _MUTATEROWSRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for BigtableService.MutateRows. - - Attributes: - entries: - One or more results for Entries from the batch request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.MutateRowsResponse) - }, -) -_sym_db.RegisterMessage(MutateRowsResponse) -_sym_db.RegisterMessage(MutateRowsResponse.Entry) - -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _CHECKANDMUTATEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.CheckAndMutateRow. - - Attributes: - table_name: - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the conditional mutation - should be applied. - predicate_filter: - The filter to be applied to the contents of the specified row. - Depending on whether or not any results are yielded, either - ``true_mutations`` or ``false_mutations`` will be executed. If - unset, checks that the row contains any values at all. - true_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``false_mutations`` is empty, and at most - 100000. - false_mutations: - Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied to - ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain at - least one entry if ``true_mutations`` is empty, and at most - 100000. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowRequest) - }, -) -_sym_db.RegisterMessage(CheckAndMutateRowRequest) - -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType( - "CheckAndMutateRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _CHECKANDMUTATEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.CheckAndMutateRow. - - Attributes: - predicate_matched: - Whether or not the request’s ``predicate_filter`` yielded any - results for the specified row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.CheckAndMutateRowResponse) - }, -) -_sym_db.RegisterMessage(CheckAndMutateRowResponse) - -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowRequest", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITEROWREQUEST, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Request message for Bigtable.ReadModifyWriteRow. - - Attributes: - table_name: - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the - form - ``projects//instances//tables/
``. - app_profile_id: - This value specifies routing for replication. If not - specified, the “default” application profile will be used. - row_key: - Required. The key of the row to which the read/modify/write - rules should be applied. - rules: - Required. Rules specifying how the specified row’s contents - are to be transformed into writes. Entries are applied in - order, meaning that earlier rules will affect the results of - later ones. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowRequest) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRowRequest) - -ReadModifyWriteRowResponse = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRowResponse", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITEROWRESPONSE, - "__module__": "google.cloud.bigtable_v2.proto.bigtable_pb2", - "__doc__": """Response message for Bigtable.ReadModifyWriteRow. - - Attributes: - row: - A Row containing the new contents of all cells modified by the - request. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRowResponse) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRowResponse) - - -DESCRIPTOR._options = None -_READROWSREQUEST.fields_by_name["table_name"]._options = None -_SAMPLEROWKEYSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_MUTATEROWREQUEST.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST_ENTRY.fields_by_name["mutations"]._options = None -_MUTATEROWSREQUEST.fields_by_name["table_name"]._options = None -_MUTATEROWSREQUEST.fields_by_name["entries"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["table_name"]._options = None -_CHECKANDMUTATEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["table_name"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["row_key"]._options = None -_READMODIFYWRITEROWREQUEST.fields_by_name["rules"]._options = None - -_BIGTABLE = _descriptor.ServiceDescriptor( - name="Bigtable", - full_name="google.bigtable.v2.Bigtable", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only", - create_key=_descriptor._internal_create_key, - serialized_start=2266, - serialized_end=4126, - methods=[ - _descriptor.MethodDescriptor( - name="ReadRows", - full_name="google.bigtable.v2.Bigtable.ReadRows", - index=0, - containing_service=None, - input_type=_READROWSREQUEST, - output_type=_READROWSRESPONSE, - serialized_options=b'\202\323\344\223\002>"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\332A\ntable_name\332A\031table_name,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SampleRowKeys", - full_name="google.bigtable.v2.Bigtable.SampleRowKeys", - index=1, - containing_service=None, - input_type=_SAMPLEROWKEYSREQUEST, - output_type=_SAMPLEROWKEYSRESPONSE, - serialized_options=b"\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\332A\ntable_name\332A\031table_name,app_profile_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="MutateRow", - full_name="google.bigtable.v2.Bigtable.MutateRow", - index=2, - containing_service=None, - input_type=_MUTATEROWREQUEST, - output_type=_MUTATEROWRESPONSE, - serialized_options=b'\202\323\344\223\002?":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="MutateRows", - full_name="google.bigtable.v2.Bigtable.MutateRows", - index=3, - containing_service=None, - input_type=_MUTATEROWSREQUEST, - output_type=_MUTATEROWSRESPONSE, - serialized_options=b'\202\323\344\223\002@";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\332A\022table_name,entries\332A!table_name,entries,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CheckAndMutateRow", - full_name="google.bigtable.v2.Bigtable.CheckAndMutateRow", - index=4, - containing_service=None, - input_type=_CHECKANDMUTATEROWREQUEST, - output_type=_CHECKANDMUTATEROWRESPONSE, - serialized_options=b'\202\323\344\223\002G"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ReadModifyWriteRow", - full_name="google.bigtable.v2.Bigtable.ReadModifyWriteRow", - index=5, - containing_service=None, - input_type=_READMODIFYWRITEROWREQUEST, - output_type=_READMODIFYWRITEROWRESPONSE, - serialized_options=b"\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\332A\030table_name,row_key,rules\332A'table_name,row_key,rules,app_profile_id", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_BIGTABLE) - -DESCRIPTOR.services_by_name["Bigtable"] = _BIGTABLE - -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py deleted file mode 100644 index 2a094a7f9d48..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py +++ /dev/null @@ -1,313 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.bigtable_v2.proto import ( - bigtable_pb2 as google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2, -) - - -class BigtableStub(object): - """Service for reading from and writing to existing Bigtable tables.""" - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ReadRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/ReadRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, - ) - self.SampleRowKeys = channel.unary_stream( - "/google.bigtable.v2.Bigtable/SampleRowKeys", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - ) - self.MutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/MutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, - ) - self.MutateRows = channel.unary_stream( - "/google.bigtable.v2.Bigtable/MutateRows", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, - ) - self.CheckAndMutateRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/CheckAndMutateRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - ) - self.ReadModifyWriteRow = channel.unary_unary( - "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", - request_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - ) - - -class BigtableServicer(object): - """Service for reading from and writing to existing Bigtable tables.""" - - def ReadRows(self, request, context): - """Streams back the contents of all requested rows in key order, optionally - applying the same Reader filter to each. Depending on their size, - rows and cells may be broken up across multiple responses, but - atomicity of each row will still be preserved. See the - ReadRowsResponse documentation for details. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SampleRowKeys(self, request, context): - """Returns a sample of row keys in the table. The returned row keys will - delimit contiguous sections of the table of approximately equal size, - which can be used to break up the data for distributed tasks like - mapreduces. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRow(self, request, context): - """Mutates a row atomically. Cells already present in the row are left - unchanged unless explicitly changed by `mutation`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def MutateRows(self, request, context): - """Mutates multiple rows in a batch. Each individual row is mutated - atomically as in MutateRow, but the entire batch is not executed - atomically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CheckAndMutateRow(self, request, context): - """Mutates a row atomically based on the output of a predicate Reader filter.""" - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ReadModifyWriteRow(self, request, context): - """Modifies a row atomically on the server. The method reads the latest - existing timestamp and value from the specified columns and writes a new - entry based on pre-defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or the current server - time. The method returns the new contents of all modified cells. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_BigtableServicer_to_server(servicer, server): - rpc_method_handlers = { - "ReadRows": grpc.unary_stream_rpc_method_handler( - servicer.ReadRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, - ), - "SampleRowKeys": grpc.unary_stream_rpc_method_handler( - servicer.SampleRowKeys, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, - ), - "MutateRow": grpc.unary_unary_rpc_method_handler( - servicer.MutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.SerializeToString, - ), - "MutateRows": grpc.unary_stream_rpc_method_handler( - servicer.MutateRows, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, - ), - "CheckAndMutateRow": grpc.unary_unary_rpc_method_handler( - servicer.CheckAndMutateRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, - ), - "ReadModifyWriteRow": grpc.unary_unary_rpc_method_handler( - servicer.ReadModifyWriteRow, - request_deserializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, - response_serializer=google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.bigtable.v2.Bigtable", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class Bigtable(object): - """Service for reading from and writing to existing Bigtable tables.""" - - @staticmethod - def ReadRows( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/ReadRows", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadRowsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SampleRowKeys( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/SampleRowKeys", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def MutateRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/MutateRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def MutateRows( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_stream( - request, - target, - "/google.bigtable.v2.Bigtable/MutateRows", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.MutateRowsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CheckAndMutateRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/CheckAndMutateRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ReadModifyWriteRow( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, - google_dot_cloud_dot_bigtable__v2_dot_proto_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto deleted file mode 100644 index b1f729517a47..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service.proto +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/v1/bigtable_data.proto"; -import "google/bigtable/v1/bigtable_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_generic_services = true; -option java_multiple_files = true; -option java_outer_classname = "BigtableServicesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Service for reading from and writing to existing Bigtables. -service BigtableService { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" - body: "*" - }; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { - get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" - }; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" - body: "*" - }; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" - body: "*" - }; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" - body: "*" - }; - } - - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { - option (google.api.http) = { - post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" - body: "*" - }; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto deleted file mode 100644 index d734ececaec3..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_service_messages.proto +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2018 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/bigtable/v1/bigtable_data.proto"; -import "google/rpc/status.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableServiceMessagesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Request message for BigtableServer.ReadRows. -message ReadRowsRequest { - // The unique name of the table from which to read. - string table_name = 1; - - // If neither row_key nor row_range is set, reads from all rows. - oneof target { - // The key of a single row from which to read. - bytes row_key = 2; - - // A range of rows from which to read. - RowRange row_range = 3; - - // A set of rows from which to read. Entries need not be in order, and will - // be deduplicated before reading. - // The total serialized size of the set must not exceed 1MB. - RowSet row_set = 8; - } - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entire table. - RowFilter filter = 5; - - // By default, rows are read sequentially, producing results which are - // guaranteed to arrive in increasing row order. Setting - // "allow_row_interleaving" to true allows multiple rows to be interleaved in - // the response stream, which increases throughput but breaks this guarantee, - // and may force the client to use more memory to buffer partially-received - // rows. Cannot be set to true when specifying "num_rows_limit". - bool allow_row_interleaving = 6; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - // Note that "allow_row_interleaving" cannot be set to true when this is set. - int64 num_rows_limit = 7; -} - -// Response message for BigtableService.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message Chunk { - oneof chunk { - // A subset of the data from a particular row. As long as no "reset_row" - // is received in between, multiple "row_contents" from the same row are - // from the same atomic view of that row, and will be received in the - // expected family/column/timestamp order. - Family row_contents = 1; - - // Indicates that the client should drop all previous chunks for - // "row_key", as it will be re-read from the beginning. - bool reset_row = 2; - - // Indicates that the client can safely process all previous chunks for - // "row_key", as its data has been fully read. - bool commit_row = 3; - } - } - - // The key of the row for which we're receiving data. - // Results will be received in increasing row key order, unless - // "allow_row_interleaving" was specified in the request. - bytes row_key = 1; - - // One or more chunks of the row specified by "row_key". - repeated Chunk chunks = 2; -} - -// Request message for BigtableService.SampleRowKeys. -message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. - string table_name = 1; -} - -// Response message for BigtableService.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. - int64 offset_bytes = 2; -} - -// Request message for BigtableService.MutateRow. -message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. - string table_name = 1; - - // The key of the row to which the mutation should be applied. - bytes row_key = 2; - - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // At least one mutation must be specified. - repeated Mutation mutations = 2; - } - - // The unique name of the table to which the mutations should be applied. - string table_name = 1; - - // The row keys/mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries may - // contain at most 100000 mutations. - repeated Entry entries = 2; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - // The results for each Entry from the request, presented in the order - // in which the entries were originally given. - // Depending on how requests are batched during execution, it is possible - // for one Entry to fail due to an error with another Entry. In the event - // that this occurs, the same error will be reported for both entries. - repeated google.rpc.Status statuses = 1; -} - -// Request message for BigtableService.CheckAndMutateRowRequest -message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be - // applied. - string table_name = 1; - - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for BigtableService.CheckAndMutateRowRequest. -message CheckAndMutateRowResponse { - // Whether or not the request's "predicate_filter" yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for BigtableService.ReadModifyWriteRowRequest. -message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be - // applied. - string table_name = 1; - - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; - - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto deleted file mode 100644 index 2d5bddf302aa..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_admin.proto +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/table.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// -// -// Provides access to the table schemas only, not the data stored within -// the tables. -service BigtableTableAdmin { - // Creates a new table in the specified instance. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables" - body: "*" - }; - } - - // Creates a new table from the specified snapshot. The target table must - // not exist. The snapshot and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - body: "*" - }; - } - - // Lists all tables served from a specified instance. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/tables" - }; - } - - // Gets metadata information about the specified table. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/tables/*}" - }; - } - - // Performs a series of column family modifications on the specified table. - // Either all or none of the modifications will occur before this method - // returns, but data requests received prior to that point may see a table - // where only some modifications have taken effect. - rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - body: "*" - }; - } - - // Permanently drop/delete a row range from a specified table. The request can - // specify whether to delete all rows in a table, or only those that match a - // particular prefix. - rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" - body: "*" - }; - } - - // Generates a consistency token for a Table, which can be used in - // CheckConsistency to check whether mutations to the table that finished - // before this call started have been replicated. The tokens will be available - // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - body: "*" - }; - } - - // Checks replication consistency based on a consistency token, that is, if - // replication has caught up based on the conditions specified in the token - // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" - body: "*" - }; - } - - // Creates a new snapshot in the specified cluster from the specified - // source table. The cluster and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" - body: "*" - }; - } - - // Gets metadata information about the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } - - // Lists all snapshots associated with the specified cluster. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - }; - } - - // Permanently deletes the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] -message CreateTableRequest { - // An initial split point for a newly created table. - message Split { - // Row key to use as an initial tablet boundary. - bytes key = 1; - } - - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The Table to create. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (tablets are similar to HBase regions). - // Given two split keys, `s1` and `s2`, three tablets will be created, - // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. - // - // Example: - // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` - repeated Split initial_splits = 4; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotRequest { - // The unique name of the instance in which to create the table. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `/tables/foobar`. - string table_id = 2; - - // The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string source_snapshot = 3; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] -message DropRowRangeRequest { - // The unique name of the table on which to drop a range of rows. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Delete all rows or by prefix. - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesRequest { - // The unique name of the instance for which tables should be listed. - // Values are of the form `projects//instances/`. - string parent = 1; - - // The view to be applied to the returned tables' fields. - // Defaults to `NAME_ONLY` if unspecified; no others are currently supported. - Table.View view = 2; - - // Maximum number of results per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 4; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesResponse { - // The tables present in the requested instance. - repeated Table tables = 1; - - // Set if not all tables could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] -message GetTableRequest { - // The unique name of the requested table. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The view to be applied to the returned table's fields. - // Defaults to `SCHEMA_VIEW` if unspecified. - Table.View view = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] -message DeleteTableRequest { - // The unique name of the table to be deleted. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] -message ModifyColumnFamiliesRequest { - // A create, update, or delete of a particular column family. - message Modification { - // The ID of the column family to be modified. - string id = 1; - - // Column familiy modifications. - oneof mod { - // Create a new column family with the specified schema, or fail if - // one already exists with the given ID. - ColumnFamily create = 2; - - // Update an existing column family to the specified schema, or fail - // if no column family exists with the given ID. - ColumnFamily update = 3; - - // Drop (delete) the column family with the given ID, or fail if no such - // family exists. - bool drop = 4; - } - } - - // The unique name of the table whose families should be modified. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenRequest { - // The unique name of the Table for which to create a consistency token. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenResponse { - // The generated consistency token. - string consistency_token = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyRequest { - // The unique name of the Table for which to check replication consistency. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The token created using GenerateConsistencyToken for the Table. - string consistency_token = 2; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyResponse { - // True only if the token is consistent. A token is consistent if replication - // has caught up with the restrictions specified in the request. - bool consistent = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableRequest { - // The unique name of the table to have the snapshot taken. - // Values are of the form - // `projects//instances//tables/
`. - string name = 1; - - // The name of the cluster where the snapshot will be created in. - // Values are of the form - // `projects//instances//clusters/`. - string cluster = 2; - - // The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than - // `projects//instances//clusters//snapshots/mysnapshot`. - string snapshot_id = 3; - - // The amount of time that the new snapshot can stay active after it is - // created. Once 'ttl' expires, the snapshot will get deleted. The maximum - // amount of time a snapshot can stay active is 7 days. If 'ttl' is not - // specified, the default value of 24 hours will be used. - google.protobuf.Duration ttl = 4; - - // Description of the snapshot. - string description = 5; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message GetSnapshotRequest { - // The unique name of the requested snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsRequest { - // The unique name of the cluster for which snapshots should be listed. - // Values are of the form - // `projects//instances//clusters/`. - // Use ` = '-'` to list snapshots for all clusters in an instance, - // e.g., `projects//instances//clusters/-`. - string parent = 1; - - // The maximum number of snapshots to return per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 2; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsResponse { - // The snapshots present in the requested cluster. - repeated Snapshot snapshots = 1; - - // Set if not all snapshots could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message DeleteSnapshotRequest { - // The unique name of the snapshot to be deleted. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; -} - -// The metadata for the Operation returned by SnapshotTable. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableMetadata { - // The request that prompted the initiation of this SnapshotTable operation. - SnapshotTableRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateTableFromSnapshot. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotMetadata { - // The request that prompted the initiation of this CreateTableFromSnapshot - // operation. - CreateTableFromSnapshotRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto deleted file mode 100644 index e4efb74f560e..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_data.proto +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto deleted file mode 100644 index 6e968fee17c1..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } - - // Delete all rows in a table corresponding to a particular prefix - rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" }; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto deleted file mode 100644 index 617ede65592f..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable_table_service_messages.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} - -message BulkDeleteRowsRequest { - // The unique name of the table on which to perform the bulk delete - string table_name = 1; - - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto deleted file mode 100644 index 0ece12780eb9..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/common.proto +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// Storage media types for persisting Bigtable data. -enum StorageType { - // The user did not specify a storage type. - STORAGE_TYPE_UNSPECIFIED = 0; - - // Flash (SSD) storage should be used. - SSD = 1; - - // Magnetic drive (HDD) storage should be used. - HDD = 2; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py deleted file mode 100644 index 5f62756a88d4..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2.py +++ /dev/null @@ -1,2672 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/bigtable_v2/proto/data.proto -"""Generated protocol buffer code.""" -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/bigtable_v2/proto/data.proto", - package="google.bigtable.v2", - syntax="proto3", - serialized_options=b"\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n)google/cloud/bigtable_v2/proto/data.proto\x12\x12google.bigtable.v2"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3', -) - - -_ROW = _descriptor.Descriptor( - name="Row", - full_name="google.bigtable.v2.Row", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.bigtable.v2.Row.key", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="families", - full_name="google.bigtable.v2.Row.families", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=65, - serialized_end=129, -) - - -_FAMILY = _descriptor.Descriptor( - name="Family", - full_name="google.bigtable.v2.Family", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.bigtable.v2.Family.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="columns", - full_name="google.bigtable.v2.Family.columns", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=131, - serialized_end=198, -) - - -_COLUMN = _descriptor.Descriptor( - name="Column", - full_name="google.bigtable.v2.Column", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="qualifier", - full_name="google.bigtable.v2.Column.qualifier", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells", - full_name="google.bigtable.v2.Column.cells", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=200, - serialized_end=268, -) - - -_CELL = _descriptor.Descriptor( - name="Cell", - full_name="google.bigtable.v2.Cell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Cell.timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Cell.value", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.bigtable.v2.Cell.labels", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=270, - serialized_end=333, -) - - -_ROWRANGE = _descriptor.Descriptor( - name="RowRange", - full_name="google.bigtable.v2.RowRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_key_closed", - full_name="google.bigtable.v2.RowRange.start_key_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_key_open", - full_name="google.bigtable.v2.RowRange.start_key_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_key_open", - full_name="google.bigtable.v2.RowRange.end_key_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_key_closed", - full_name="google.bigtable.v2.RowRange.end_key_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_key", - full_name="google.bigtable.v2.RowRange.start_key", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_key", - full_name="google.bigtable.v2.RowRange.end_key", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=336, - serialized_end=474, -) - - -_ROWSET = _descriptor.Descriptor( - name="RowSet", - full_name="google.bigtable.v2.RowSet", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="row_keys", - full_name="google.bigtable.v2.RowSet.row_keys", - index=0, - number=1, - type=12, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_ranges", - full_name="google.bigtable.v2.RowSet.row_ranges", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=476, - serialized_end=552, -) - - -_COLUMNRANGE = _descriptor.Descriptor( - name="ColumnRange", - full_name="google.bigtable.v2.ColumnRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ColumnRange.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_closed", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.start_qualifier_open", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_closed", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_closed", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_qualifier_open", - full_name="google.bigtable.v2.ColumnRange.end_qualifier_open", - index=4, - number=5, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_qualifier", - full_name="google.bigtable.v2.ColumnRange.start_qualifier", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_qualifier", - full_name="google.bigtable.v2.ColumnRange.end_qualifier", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=555, - serialized_end=753, -) - - -_TIMESTAMPRANGE = _descriptor.Descriptor( - name="TimestampRange", - full_name="google.bigtable.v2.TimestampRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.start_timestamp_micros", - index=0, - number=1, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_timestamp_micros", - full_name="google.bigtable.v2.TimestampRange.end_timestamp_micros", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=755, - serialized_end=833, -) - - -_VALUERANGE = _descriptor.Descriptor( - name="ValueRange", - full_name="google.bigtable.v2.ValueRange", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="start_value_closed", - full_name="google.bigtable.v2.ValueRange.start_value_closed", - index=0, - number=1, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_value_open", - full_name="google.bigtable.v2.ValueRange.start_value_open", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_value_closed", - full_name="google.bigtable.v2.ValueRange.end_value_closed", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_value_open", - full_name="google.bigtable.v2.ValueRange.end_value_open", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="start_value", - full_name="google.bigtable.v2.ValueRange.start_value", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - _descriptor.OneofDescriptor( - name="end_value", - full_name="google.bigtable.v2.ValueRange.end_value", - index=1, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=836, - serialized_end=988, -) - - -_ROWFILTER_CHAIN = _descriptor.Descriptor( - name="Chain", - full_name="google.bigtable.v2.RowFilter.Chain", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Chain.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1807, - serialized_end=1862, -) - -_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( - name="Interleave", - full_name="google.bigtable.v2.RowFilter.Interleave", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="filters", - full_name="google.bigtable.v2.RowFilter.Interleave.filters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1864, - serialized_end=1924, -) - -_ROWFILTER_CONDITION = _descriptor.Descriptor( - name="Condition", - full_name="google.bigtable.v2.RowFilter.Condition", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="predicate_filter", - full_name="google.bigtable.v2.RowFilter.Condition.predicate_filter", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="true_filter", - full_name="google.bigtable.v2.RowFilter.Condition.true_filter", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="false_filter", - full_name="google.bigtable.v2.RowFilter.Condition.false_filter", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1927, - serialized_end=2100, -) - -_ROWFILTER = _descriptor.Descriptor( - name="RowFilter", - full_name="google.bigtable.v2.RowFilter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="chain", - full_name="google.bigtable.v2.RowFilter.chain", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="interleave", - full_name="google.bigtable.v2.RowFilter.interleave", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="condition", - full_name="google.bigtable.v2.RowFilter.condition", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="sink", - full_name="google.bigtable.v2.RowFilter.sink", - index=3, - number=16, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pass_all_filter", - full_name="google.bigtable.v2.RowFilter.pass_all_filter", - index=4, - number=17, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="block_all_filter", - full_name="google.bigtable.v2.RowFilter.block_all_filter", - index=5, - number=18, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_key_regex_filter", - full_name="google.bigtable.v2.RowFilter.row_key_regex_filter", - index=6, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="row_sample_filter", - full_name="google.bigtable.v2.RowFilter.row_sample_filter", - index=7, - number=14, - type=1, - cpp_type=5, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="family_name_regex_filter", - full_name="google.bigtable.v2.RowFilter.family_name_regex_filter", - index=8, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier_regex_filter", - full_name="google.bigtable.v2.RowFilter.column_qualifier_regex_filter", - index=9, - number=6, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_range_filter", - full_name="google.bigtable.v2.RowFilter.column_range_filter", - index=10, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_range_filter", - full_name="google.bigtable.v2.RowFilter.timestamp_range_filter", - index=11, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_regex_filter", - full_name="google.bigtable.v2.RowFilter.value_regex_filter", - index=12, - number=9, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value_range_filter", - full_name="google.bigtable.v2.RowFilter.value_range_filter", - index=13, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_offset_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_offset_filter", - index=14, - number=10, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_row_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_row_limit_filter", - index=15, - number=11, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cells_per_column_limit_filter", - full_name="google.bigtable.v2.RowFilter.cells_per_column_limit_filter", - index=16, - number=12, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="strip_value_transformer", - full_name="google.bigtable.v2.RowFilter.strip_value_transformer", - index=17, - number=13, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="apply_label_transformer", - full_name="google.bigtable.v2.RowFilter.apply_label_transformer", - index=18, - number=19, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _ROWFILTER_CHAIN, - _ROWFILTER_INTERLEAVE, - _ROWFILTER_CONDITION, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="filter", - full_name="google.bigtable.v2.RowFilter.filter", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=991, - serialized_end=2110, -) - - -_MUTATION_SETCELL = _descriptor.Descriptor( - name="SetCell", - full_name="google.bigtable.v2.Mutation.SetCell", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.SetCell.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.SetCell.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="timestamp_micros", - full_name="google.bigtable.v2.Mutation.SetCell.timestamp_micros", - index=2, - number=3, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.bigtable.v2.Mutation.SetCell.value", - index=3, - number=4, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2408, - serialized_end=2505, -) - -_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( - name="DeleteFromColumn", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="time_range", - full_name="google.bigtable.v2.Mutation.DeleteFromColumn.time_range", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2507, - serialized_end=2628, -) - -_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( - name="DeleteFromFamily", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.Mutation.DeleteFromFamily.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2630, - serialized_end=2669, -) - -_MUTATION_DELETEFROMROW = _descriptor.Descriptor( - name="DeleteFromRow", - full_name="google.bigtable.v2.Mutation.DeleteFromRow", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2671, - serialized_end=2686, -) - -_MUTATION = _descriptor.Descriptor( - name="Mutation", - full_name="google.bigtable.v2.Mutation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="set_cell", - full_name="google.bigtable.v2.Mutation.set_cell", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_column", - full_name="google.bigtable.v2.Mutation.delete_from_column", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_family", - full_name="google.bigtable.v2.Mutation.delete_from_family", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_from_row", - full_name="google.bigtable.v2.Mutation.delete_from_row", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[ - _MUTATION_SETCELL, - _MUTATION_DELETEFROMCOLUMN, - _MUTATION_DELETEFROMFAMILY, - _MUTATION_DELETEFROMROW, - ], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="mutation", - full_name="google.bigtable.v2.Mutation.mutation", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2113, - serialized_end=2698, -) - - -_READMODIFYWRITERULE = _descriptor.Descriptor( - name="ReadModifyWriteRule", - full_name="google.bigtable.v2.ReadModifyWriteRule", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="family_name", - full_name="google.bigtable.v2.ReadModifyWriteRule.family_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="column_qualifier", - full_name="google.bigtable.v2.ReadModifyWriteRule.column_qualifier", - index=1, - number=2, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="append_value", - full_name="google.bigtable.v2.ReadModifyWriteRule.append_value", - index=2, - number=3, - type=12, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"", - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="increment_amount", - full_name="google.bigtable.v2.ReadModifyWriteRule.increment_amount", - index=3, - number=4, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="rule", - full_name="google.bigtable.v2.ReadModifyWriteRule.rule", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2701, - serialized_end=2829, -) - -_ROW.fields_by_name["families"].message_type = _FAMILY -_FAMILY.fields_by_name["columns"].message_type = _COLUMN -_COLUMN.fields_by_name["cells"].message_type = _CELL -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_closed"] -) -_ROWRANGE.fields_by_name[ - "start_key_closed" -].containing_oneof = _ROWRANGE.oneofs_by_name["start_key"] -_ROWRANGE.oneofs_by_name["start_key"].fields.append( - _ROWRANGE.fields_by_name["start_key_open"] -) -_ROWRANGE.fields_by_name["start_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "start_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_open"] -) -_ROWRANGE.fields_by_name["end_key_open"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWRANGE.oneofs_by_name["end_key"].fields.append( - _ROWRANGE.fields_by_name["end_key_closed"] -) -_ROWRANGE.fields_by_name["end_key_closed"].containing_oneof = _ROWRANGE.oneofs_by_name[ - "end_key" -] -_ROWSET.fields_by_name["row_ranges"].message_type = _ROWRANGE -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["start_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["start_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "start_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["start_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_closed"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_closed" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_COLUMNRANGE.oneofs_by_name["end_qualifier"].fields.append( - _COLUMNRANGE.fields_by_name["end_qualifier_open"] -) -_COLUMNRANGE.fields_by_name[ - "end_qualifier_open" -].containing_oneof = _COLUMNRANGE.oneofs_by_name["end_qualifier"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_closed"] -) -_VALUERANGE.fields_by_name[ - "start_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["start_value"].fields.append( - _VALUERANGE.fields_by_name["start_value_open"] -) -_VALUERANGE.fields_by_name[ - "start_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["start_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_closed"] -) -_VALUERANGE.fields_by_name[ - "end_value_closed" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_VALUERANGE.oneofs_by_name["end_value"].fields.append( - _VALUERANGE.fields_by_name["end_value_open"] -) -_VALUERANGE.fields_by_name[ - "end_value_open" -].containing_oneof = _VALUERANGE.oneofs_by_name["end_value"] -_ROWFILTER_CHAIN.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_CHAIN.containing_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.fields_by_name["filters"].message_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["predicate_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["true_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name["false_filter"].message_type = _ROWFILTER -_ROWFILTER_CONDITION.containing_type = _ROWFILTER -_ROWFILTER.fields_by_name["chain"].message_type = _ROWFILTER_CHAIN -_ROWFILTER.fields_by_name["interleave"].message_type = _ROWFILTER_INTERLEAVE -_ROWFILTER.fields_by_name["condition"].message_type = _ROWFILTER_CONDITION -_ROWFILTER.fields_by_name["column_range_filter"].message_type = _COLUMNRANGE -_ROWFILTER.fields_by_name["timestamp_range_filter"].message_type = _TIMESTAMPRANGE -_ROWFILTER.fields_by_name["value_range_filter"].message_type = _VALUERANGE -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["chain"]) -_ROWFILTER.fields_by_name["chain"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["interleave"] -) -_ROWFILTER.fields_by_name["interleave"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["condition"] -) -_ROWFILTER.fields_by_name["condition"].containing_oneof = _ROWFILTER.oneofs_by_name[ - "filter" -] -_ROWFILTER.oneofs_by_name["filter"].fields.append(_ROWFILTER.fields_by_name["sink"]) -_ROWFILTER.fields_by_name["sink"].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["pass_all_filter"] -) -_ROWFILTER.fields_by_name[ - "pass_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["block_all_filter"] -) -_ROWFILTER.fields_by_name[ - "block_all_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_key_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "row_key_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["row_sample_filter"] -) -_ROWFILTER.fields_by_name[ - "row_sample_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["family_name_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "family_name_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_qualifier_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "column_qualifier_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["column_range_filter"] -) -_ROWFILTER.fields_by_name[ - "column_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["timestamp_range_filter"] -) -_ROWFILTER.fields_by_name[ - "timestamp_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_regex_filter"] -) -_ROWFILTER.fields_by_name[ - "value_regex_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["value_range_filter"] -) -_ROWFILTER.fields_by_name[ - "value_range_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_offset_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_offset_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_row_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_row_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["cells_per_column_limit_filter"] -) -_ROWFILTER.fields_by_name[ - "cells_per_column_limit_filter" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["strip_value_transformer"] -) -_ROWFILTER.fields_by_name[ - "strip_value_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_ROWFILTER.oneofs_by_name["filter"].fields.append( - _ROWFILTER.fields_by_name["apply_label_transformer"] -) -_ROWFILTER.fields_by_name[ - "apply_label_transformer" -].containing_oneof = _ROWFILTER.oneofs_by_name["filter"] -_MUTATION_SETCELL.containing_type = _MUTATION -_MUTATION_DELETEFROMCOLUMN.fields_by_name["time_range"].message_type = _TIMESTAMPRANGE -_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION -_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION -_MUTATION_DELETEFROMROW.containing_type = _MUTATION -_MUTATION.fields_by_name["set_cell"].message_type = _MUTATION_SETCELL -_MUTATION.fields_by_name["delete_from_column"].message_type = _MUTATION_DELETEFROMCOLUMN -_MUTATION.fields_by_name["delete_from_family"].message_type = _MUTATION_DELETEFROMFAMILY -_MUTATION.fields_by_name["delete_from_row"].message_type = _MUTATION_DELETEFROMROW -_MUTATION.oneofs_by_name["mutation"].fields.append(_MUTATION.fields_by_name["set_cell"]) -_MUTATION.fields_by_name["set_cell"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_column"] -) -_MUTATION.fields_by_name[ - "delete_from_column" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_family"] -) -_MUTATION.fields_by_name[ - "delete_from_family" -].containing_oneof = _MUTATION.oneofs_by_name["mutation"] -_MUTATION.oneofs_by_name["mutation"].fields.append( - _MUTATION.fields_by_name["delete_from_row"] -) -_MUTATION.fields_by_name["delete_from_row"].containing_oneof = _MUTATION.oneofs_by_name[ - "mutation" -] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["append_value"] -) -_READMODIFYWRITERULE.fields_by_name[ - "append_value" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -_READMODIFYWRITERULE.oneofs_by_name["rule"].fields.append( - _READMODIFYWRITERULE.fields_by_name["increment_amount"] -) -_READMODIFYWRITERULE.fields_by_name[ - "increment_amount" -].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name["rule"] -DESCRIPTOR.message_types_by_name["Row"] = _ROW -DESCRIPTOR.message_types_by_name["Family"] = _FAMILY -DESCRIPTOR.message_types_by_name["Column"] = _COLUMN -DESCRIPTOR.message_types_by_name["Cell"] = _CELL -DESCRIPTOR.message_types_by_name["RowRange"] = _ROWRANGE -DESCRIPTOR.message_types_by_name["RowSet"] = _ROWSET -DESCRIPTOR.message_types_by_name["ColumnRange"] = _COLUMNRANGE -DESCRIPTOR.message_types_by_name["TimestampRange"] = _TIMESTAMPRANGE -DESCRIPTOR.message_types_by_name["ValueRange"] = _VALUERANGE -DESCRIPTOR.message_types_by_name["RowFilter"] = _ROWFILTER -DESCRIPTOR.message_types_by_name["Mutation"] = _MUTATION -DESCRIPTOR.message_types_by_name["ReadModifyWriteRule"] = _READMODIFYWRITERULE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Row = _reflection.GeneratedProtocolMessageType( - "Row", - (_message.Message,), - { - "DESCRIPTOR": _ROW, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies the complete (requested) contents of a single row of a - table. Rows which exceed 256MiB in size cannot be read in full. - - Attributes: - key: - The unique key which identifies this row within its table. - This is the same key that’s used to identify the row in, for - example, a MutateRowRequest. May contain any non-empty byte - string up to 4KiB in length. - families: - May be empty, but only if the entire row is empty. The mutual - ordering of column families is not specified. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Row) - }, -) -_sym_db.RegisterMessage(Row) - -Family = _reflection.GeneratedProtocolMessageType( - "Family", - (_message.Message,), - { - "DESCRIPTOR": _FAMILY, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column family - intersection of a table. - - Attributes: - name: - The unique key which identifies this family within its row. - This is the same key that’s used to identify the family in, - for example, a RowFilter which sets its - “family_name_regex_filter” field. Must match - ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may - produce cells in a sentinel family with an empty name. Must be - no greater than 64 characters in length. - columns: - Must not be empty. Sorted in order of increasing “qualifier”. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Family) - }, -) -_sym_db.RegisterMessage(Family) - -Column = _reflection.GeneratedProtocolMessageType( - "Column", - (_message.Message,), - { - "DESCRIPTOR": _COLUMN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column intersection - of a table. - - Attributes: - qualifier: - The unique key which identifies this column within its family. - This is the same key that’s used to identify the column in, - for example, a RowFilter which sets its - ``column_qualifier_regex_filter`` field. May contain any byte - string, including the empty string, up to 16kiB in length. - cells: - Must not be empty. Sorted in order of decreasing - “timestamp_micros”. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Column) - }, -) -_sym_db.RegisterMessage(Column) - -Cell = _reflection.GeneratedProtocolMessageType( - "Cell", - (_message.Message,), - { - "DESCRIPTOR": _CELL, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies (some of) the contents of a single row/column/timestamp of a - table. - - Attributes: - timestamp_micros: - The cell’s stored timestamp, which also uniquely identifies it - within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity will - only allow values of ``timestamp_micros`` which are multiples - of 1000. - value: - The value stored in the cell. May contain any byte string, - including the empty string, up to 100MiB in length. - labels: - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Cell) - }, -) -_sym_db.RegisterMessage(Cell) - -RowRange = _reflection.GeneratedProtocolMessageType( - "RowRange", - (_message.Message,), - { - "DESCRIPTOR": _ROWRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of rows. - - Attributes: - start_key: - The row key at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_key_closed: - Used when giving an inclusive lower bound for the range. - start_key_open: - Used when giving an exclusive lower bound for the range. - end_key: - The row key at which to end the range. If neither field is - set, interpreted as the infinite row key, exclusive. - end_key_open: - Used when giving an exclusive upper bound for the range. - end_key_closed: - Used when giving an inclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowRange) - }, -) -_sym_db.RegisterMessage(RowRange) - -RowSet = _reflection.GeneratedProtocolMessageType( - "RowSet", - (_message.Message,), - { - "DESCRIPTOR": _ROWSET, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a non-contiguous set of rows. - - Attributes: - row_keys: - Single rows included in the set. - row_ranges: - Contiguous row ranges included in the set. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowSet) - }, -) -_sym_db.RegisterMessage(RowSet) - -ColumnRange = _reflection.GeneratedProtocolMessageType( - "ColumnRange", - (_message.Message,), - { - "DESCRIPTOR": _COLUMNRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of columns within a single column family. - The range spans from : to - :, where both bounds can be either - inclusive or exclusive. - - Attributes: - family_name: - The name of the column family within which this range falls. - start_qualifier: - The column qualifier at which to start the range (within - ``column_family``). If neither field is set, interpreted as - the empty string, inclusive. - start_qualifier_closed: - Used when giving an inclusive lower bound for the range. - start_qualifier_open: - Used when giving an exclusive lower bound for the range. - end_qualifier: - The column qualifier at which to end the range (within - ``column_family``). If neither field is set, interpreted as - the infinite string, exclusive. - end_qualifier_closed: - Used when giving an inclusive upper bound for the range. - end_qualifier_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ColumnRange) - }, -) -_sym_db.RegisterMessage(ColumnRange) - -TimestampRange = _reflection.GeneratedProtocolMessageType( - "TimestampRange", - (_message.Message,), - { - "DESCRIPTOR": _TIMESTAMPRANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specified a contiguous range of microsecond timestamps. - - Attributes: - start_timestamp_micros: - Inclusive lower bound. If left empty, interpreted as 0. - end_timestamp_micros: - Exclusive upper bound. If left empty, interpreted as infinity. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.TimestampRange) - }, -) -_sym_db.RegisterMessage(TimestampRange) - -ValueRange = _reflection.GeneratedProtocolMessageType( - "ValueRange", - (_message.Message,), - { - "DESCRIPTOR": _VALUERANGE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a contiguous range of raw byte values. - - Attributes: - start_value: - The value at which to start the range. If neither field is - set, interpreted as the empty string, inclusive. - start_value_closed: - Used when giving an inclusive lower bound for the range. - start_value_open: - Used when giving an exclusive lower bound for the range. - end_value: - The value at which to end the range. If neither field is set, - interpreted as the infinite string, exclusive. - end_value_closed: - Used when giving an inclusive upper bound for the range. - end_value_open: - Used when giving an exclusive upper bound for the range. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ValueRange) - }, -) -_sym_db.RegisterMessage(ValueRange) - -RowFilter = _reflection.GeneratedProtocolMessageType( - "RowFilter", - (_message.Message,), - { - "Chain": _reflection.GeneratedProtocolMessageType( - "Chain", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_CHAIN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which sends rows through several RowFilters in sequence. - - Attributes: - filters: - The elements of “filters” are chained together to process the - input row: in row -> f(0) -> intermediate row -> f(1) -> … -> - f(N) -> out row The full chain is executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Chain) - }, - ), - "Interleave": _reflection.GeneratedProtocolMessageType( - "Interleave", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_INTERLEAVE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which sends each row to each of several component - RowFilters and interleaves the results. - - Attributes: - filters: - The elements of “filters” all process a copy of the input row, - and the results are pooled, sorted, and combined into a single - output row. If multiple cells are produced with the same - column and timestamp, they will all appear in the output row - in an unspecified mutual order. Consider the following - example, with three filters: :: - input row | - ----------------------------------------------------- - | | | - f(0) f(1) f(2) - | | | 1: - foo,bar,10,x foo,bar,10,z far,bar,7,a - 2: foo,blah,11,z far,blah,5,x - far,blah,5,x | | - | - ----------------------------------------------------- - | 1: foo,bar,10,z // could have - switched with #2 2: foo,bar,10,x // - could have switched with #1 3: - foo,blah,11,z 4: far,bar,7,a 5: - far,blah,5,x // identical to #6 6: - far,blah,5,x // identical to #5 All interleaved filters are - executed atomically. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Interleave) - }, - ), - "Condition": _reflection.GeneratedProtocolMessageType( - "Condition", - (_message.Message,), - { - "DESCRIPTOR": _ROWFILTER_CONDITION, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A RowFilter which evaluates one of two possible RowFilters, depending - on whether or not a predicate RowFilter outputs any cells from the - input row. IMPORTANT NOTE: The predicate filter does not execute - atomically with the true and false filters, which may lead to - inconsistent or unexpected results. Additionally, Condition filters - have poor performance, especially when filters are set for the false - condition. - - Attributes: - predicate_filter: - If ``predicate_filter`` outputs any cells, then - ``true_filter`` will be evaluated on the input row. Otherwise, - ``false_filter`` will be evaluated. - true_filter: - The filter to apply to the input row if ``predicate_filter`` - returns any results. If not provided, no results will be - returned in the true case. - false_filter: - The filter to apply to the input row if ``predicate_filter`` - does not return any results. If not provided, no results will - be returned in the false case. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter.Condition) - }, - ), - "DESCRIPTOR": _ROWFILTER, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Takes a row as input and produces an alternate view of the row based - on specified rules. For example, a RowFilter might trim down a row to - include just the cells from columns matching a given regular - expression, or might return all the cells of a row but not their - values. More complicated filters can be composed out of these - components to express requests such as, “within every column of a - particular family, give just the two most recent cells which are older - than timestamp X.” There are two broad categories of RowFilters (true - filters and transformers), as well as two ways to compose simple - filters into more complex ones (chains and interleaves). They work as - follows: - True filters alter the input row by excluding some of its - cells wholesale from the output row. An example of a true filter is - the ``value_regex_filter``, which excludes cells whose values don’t - match the specified pattern. All regex true filters use RE2 syntax - (https://github.com/google/re2/wiki/Syntax) in raw byte mode - (RE2::Latin1), and are evaluated as full matches. An important point - to keep in mind is that ``RE2(.)`` is equivalent by default to - ``RE2([^\n])``, meaning that it does not match newlines. When - attempting to match an arbitrary byte, you should therefore use the - escape sequence ``\C``, which may need to be further escaped as - ``\\C`` in your client language. - Transformers alter the input row - by changing the values of some of its cells in the output, without - excluding them completely. Currently, the only supported - transformer is the ``strip_value_transformer``, which replaces - every cell’s value with the empty string. - Chains and - interleaves are described in more detail in the RowFilter.Chain and - RowFilter.Interleave documentation. The total serialized size of a - RowFilter message must not exceed 4096 bytes, and RowFilters may not - be nested within each other (in Chains or Interleaves) to a depth of - more than 20. - - Attributes: - filter: - Which of the possible RowFilter types to apply. If none are - set, this RowFilter returns all cells in the input row. - chain: - Applies several RowFilters to the data in sequence, - progressively narrowing the results. - interleave: - Applies several RowFilters to the data in parallel and - combines the results. - condition: - Applies one of two possible RowFilters to the data based on - the output of a predicate RowFilter. - sink: - ADVANCED USE ONLY. Hook for introspection into the RowFilter. - Outputs all cells directly to the output of the read rather - than to any parent filter. Consider the following example: :: - Chain( FamilyRegex("A"), Interleave( All(), - Chain(Label("foo"), Sink()) ), QualifierRegex("B") - ) A,A,1,w - A,B,2,x B,B,4,z - | FamilyRegex("A") - | A,A,1,w - A,B,2,x | - +------------+-------------+ | - | All() Label(foo) - | | A,A,1,w - A,A,1,w,labels:[foo] A,B,2,x - A,B,2,x,labels:[foo] | | - | Sink() --------------+ | - | | +------------+ x------+ - A,A,1,w,labels:[foo] | - A,B,2,x,labels:[foo] A,A,1,w - | A,B,2,x | - | | - QualifierRegex("B") | - | | - A,B,2,x | - | | - +--------------------------------+ | - A,A,1,w,labels:[foo] - A,B,2,x,labels:[foo] // could be switched - A,B,2,x // could be switched Despite being - excluded by the qualifier filter, a copy of every cell that - reaches the sink is present in the final result. As with an - [Interleave][google.bigtable.v2.RowFilter.Interleave], - duplicate cells are possible, and appear in an unspecified - mutual order. In this case we have a duplicate with column - “A:B” and timestamp 2, because one copy passed through the all - filter while the other was passed through the label and sink. - Note that one copy has label “foo”, while the other does not. - Cannot be used within the ``predicate_filter``, - ``true_filter``, or ``false_filter`` of a - [Condition][google.bigtable.v2.RowFilter.Condition]. - pass_all_filter: - Matches all cells, regardless of input. Functionally - equivalent to leaving ``filter`` unset, but included for - completeness. - block_all_filter: - Does not match any cells, regardless of input. Useful for - temporarily disabling just part of a filter. - row_key_regex_filter: - Matches only cells from rows whose keys satisfy the given RE2 - regex. In other words, passes through the entire row when the - key matches, and otherwise produces an empty row. Note that, - since row keys can contain arbitrary bytes, the ``\C`` escape - sequence must be used if a true wildcard is desired. The ``.`` - character will not match the new line character ``\n``, which - may be present in a binary key. - row_sample_filter: - Matches all cells from a row with probability p, and matches - no cells from the row with probability 1-p. - family_name_regex_filter: - Matches only cells from columns whose families satisfy the - given RE2 regex. For technical reasons, the regex must not - contain the ``:`` character, even if it is not being used as a - literal. Note that, since column families cannot contain the - new line character ``\n``, it is sufficient to use ``.`` as a - full wildcard when matching column family names. - column_qualifier_regex_filter: - Matches only cells from columns whose qualifiers satisfy the - given RE2 regex. Note that, since column qualifiers can - contain arbitrary bytes, the ``\C`` escape sequence must be - used if a true wildcard is desired. The ``.`` character will - not match the new line character ``\n``, which may be present - in a binary qualifier. - column_range_filter: - Matches only cells from columns within the given range. - timestamp_range_filter: - Matches only cells with timestamps within the given range. - value_regex_filter: - Matches only cells with values that satisfy the given regular - expression. Note that, since cell values can contain arbitrary - bytes, the ``\C`` escape sequence must be used if a true - wildcard is desired. The ``.`` character will not match the - new line character ``\n``, which may be present in a binary - value. - value_range_filter: - Matches only cells with values that fall within the given - range. - cells_per_row_offset_filter: - Skips the first N cells of each row, matching all subsequent - cells. If duplicate cells are present, as is possible when - using an Interleave, each copy of the cell is counted - separately. - cells_per_row_limit_filter: - Matches only the first N cells of each row. If duplicate cells - are present, as is possible when using an Interleave, each - copy of the cell is counted separately. - cells_per_column_limit_filter: - Matches only the most recent N cells within each column. For - example, if N=2, this filter would match column ``foo:bar`` at - timestamps 10 and 9, skip all earlier cells in ``foo:bar``, - and then begin matching again in column ``foo:bar2``. If - duplicate cells are present, as is possible when using an - Interleave, each copy of the cell is counted separately. - strip_value_transformer: - Replaces each cell’s value with the empty string. - apply_label_transformer: - Applies the given label to all cells in the output row. This - allows the client to determine which results were produced - from which part of the filter. Values must be at most 15 - characters in length, and match the RE2 pattern - ``[a-z0-9\\-]+`` Due to a technical limitation, it is not - currently possible to apply multiple labels to a cell. As a - result, a Chain may have no more than one sub-filter which - contains a ``apply_label_transformer``. It is okay for an - Interleave to contain multiple ``apply_label_transformers``, - as they will be applied to separate copies of the input. This - may be relaxed in the future. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.RowFilter) - }, -) -_sym_db.RegisterMessage(RowFilter) -_sym_db.RegisterMessage(RowFilter.Chain) -_sym_db.RegisterMessage(RowFilter.Interleave) -_sym_db.RegisterMessage(RowFilter.Condition) - -Mutation = _reflection.GeneratedProtocolMessageType( - "Mutation", - (_message.Message,), - { - "SetCell": _reflection.GeneratedProtocolMessageType( - "SetCell", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_SETCELL, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which sets the value of the specified cell. - - Attributes: - family_name: - The name of the family into which new data should be written. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column into which new data should be - written. Can be any byte string, including the empty string. - timestamp_micros: - The timestamp of the cell into which new data should be - written. Use -1 for current Bigtable server time. Otherwise, - the client should set this value itself, noting that the - default value is a timestamp of zero if the field is left - unspecified. Values must match the granularity of the table - (e.g. micros, millis). - value: - The value to be written into the specified cell. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.SetCell) - }, - ), - "DeleteFromColumn": _reflection.GeneratedProtocolMessageType( - "DeleteFromColumn", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMCOLUMN, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes cells from the specified column, optionally - restricting the deletions to a given timestamp range. - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column from which cells should be - deleted. Can be any byte string, including the empty string. - time_range: - The range of timestamps within which cells should be deleted. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromColumn) - }, - ), - "DeleteFromFamily": _reflection.GeneratedProtocolMessageType( - "DeleteFromFamily", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMFAMILY, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes all cells from the specified column family. - - Attributes: - family_name: - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromFamily) - }, - ), - "DeleteFromRow": _reflection.GeneratedProtocolMessageType( - "DeleteFromRow", - (_message.Message,), - { - "DESCRIPTOR": _MUTATION_DELETEFROMROW, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """A Mutation which deletes all cells from the containing row.""", - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation.DeleteFromRow) - }, - ), - "DESCRIPTOR": _MUTATION, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies a particular change to be made to the contents of a row. - - Attributes: - mutation: - Which of the possible Mutation types to apply. - set_cell: - Set a cell’s value. - delete_from_column: - Deletes cells from a column. - delete_from_family: - Deletes cells from a column family. - delete_from_row: - Deletes cells from the entire row. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.Mutation) - }, -) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.SetCell) -_sym_db.RegisterMessage(Mutation.DeleteFromColumn) -_sym_db.RegisterMessage(Mutation.DeleteFromFamily) -_sym_db.RegisterMessage(Mutation.DeleteFromRow) - -ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType( - "ReadModifyWriteRule", - (_message.Message,), - { - "DESCRIPTOR": _READMODIFYWRITERULE, - "__module__": "google.cloud.bigtable_v2.proto.data_pb2", - "__doc__": """Specifies an atomic read/modify/write operation on the latest value of - the specified column. - - Attributes: - family_name: - The name of the family to which the read/modify/write should - be applied. Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier: - The qualifier of the column to which the read/modify/write - should be applied. Can be any byte string, including the empty - string. - rule: - The rule used to determine the column’s new latest value from - its current latest value. - append_value: - Rule specifying that ``append_value`` be appended to the - existing value. If the targeted cell is unset, it will be - treated as containing the empty string. - increment_amount: - Rule specifying that ``increment_amount`` be added to the - existing value. If the targeted cell is unset, it will be - treated as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit big- - endian signed integer), or the entire request will fail. - """, - # @@protoc_insertion_point(class_scope:google.bigtable.v2.ReadModifyWriteRule) - }, -) -_sym_db.RegisterMessage(ReadModifyWriteRule) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py deleted file mode 100644 index 8a9393943bdf..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto deleted file mode 100644 index bb69b1f66d42..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/instance.proto +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/v2/common.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "InstanceProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and -// the resources that serve them. -// All tables in an instance are served from a single -// [Cluster][google.bigtable.admin.v2.Cluster]. -message Instance { - // Possible states of an instance. - enum State { - // The state of the instance could not be determined. - STATE_NOT_KNOWN = 0; - - // The instance has been successfully created and can serve requests - // to its tables. - READY = 1; - - // The instance is currently being created, and may be destroyed - // if the creation process encounters an error. - CREATING = 2; - } - - // The type of the instance. - enum Type { - // The type of the instance is unspecified. If set when creating an - // instance, a `PRODUCTION` instance will be created. If set when updating - // an instance, the type will be left unchanged. - TYPE_UNSPECIFIED = 0; - - // An instance meant for production use. `serve_nodes` must be set - // on the cluster. - PRODUCTION = 1; - - // The instance is meant for development and testing purposes only; it has - // no performance or uptime guarantees and is not covered by SLA. - // After a development instance is created, it can be upgraded by - // updating the instance to type `PRODUCTION`. An instance created - // as a production instance cannot be changed to a development instance. - // When creating a development instance, `serve_nodes` on the cluster must - // not be set. - DEVELOPMENT = 2; - } - - // (`OutputOnly`) - // The unique name of the instance. Values are of the form - // `projects//instances/[a-z][a-z0-9\\-]+[a-z0-9]`. - string name = 1; - - // The descriptive name for this instance as it appears in UIs. - // Can be changed at any time, but should be kept globally unique - // to avoid confusion. - string display_name = 2; - - // (`OutputOnly`) - // The current state of the instance. - State state = 3; - - // The type of the instance. Defaults to `PRODUCTION`. - Type type = 4; - - // Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. They can be used to filter resources and aggregate - // metrics. - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. - // * Label values must be between 0 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. - // * No more than 64 labels can be associated with a given resource. - // * Keys and values must both be under 128 bytes. - map labels = 5; -} - -// A resizable group of nodes in a particular cloud location, capable -// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent -// [Instance][google.bigtable.admin.v2.Instance]. -message Cluster { - // Possible states of a cluster. - enum State { - // The state of the cluster could not be determined. - STATE_NOT_KNOWN = 0; - - // The cluster has been successfully created and is ready to serve requests. - READY = 1; - - // The cluster is currently being created, and may be destroyed - // if the creation process encounters an error. - // A cluster may not be able to serve requests while being created. - CREATING = 2; - - // The cluster is currently being resized, and may revert to its previous - // node count if the process encounters an error. - // A cluster is still capable of serving requests while being resized, - // but may exhibit performance as if its number of allocated nodes is - // between the starting and requested states. - RESIZING = 3; - - // The cluster has no backing nodes. The data (tables) still - // exist, but no operations can be performed on the cluster. - DISABLED = 4; - } - - // (`OutputOnly`) - // The unique name of the cluster. Values are of the form - // `projects//instances//clusters/[a-z][-a-z0-9]*`. - string name = 1; - - // (`CreationOnly`) - // The location where this cluster's nodes and storage reside. For best - // performance, clients should be located as close as possible to this - // cluster. Currently only zones are supported, so values should be of the - // form `projects//locations/`. - string location = 2; - - // (`OutputOnly`) - // The current state of the cluster. - State state = 3; - - // The number of nodes allocated to this cluster. More nodes enable higher - // throughput and more consistent performance. - int32 serve_nodes = 4; - - // (`CreationOnly`) - // The type of storage used by this cluster to serve its - // parent instance's tables, unless explicitly overridden. - StorageType default_storage_type = 5; -} - -// A configuration object describing how Cloud Bigtable should treat traffic -// from a particular end user application. -message AppProfile { - // Read/write requests may be routed to any cluster in the instance, and will - // fail over to another cluster in the event of transient errors or delays. - // Choosing this option sacrifices read-your-writes consistency to improve - // availability. - message MultiClusterRoutingUseAny { - - } - - // Unconditionally routes all read/write requests to a specific cluster. - // This option preserves read-your-writes consistency, but does not improve - // availability. - message SingleClusterRouting { - // The cluster to which read/write requests should be routed. - string cluster_id = 1; - - // Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are - // allowed by this app profile. It is unsafe to send these requests to - // the same table/row/column in multiple clusters. - bool allow_transactional_writes = 2; - } - - // (`OutputOnly`) - // The unique name of the app profile. Values are of the form - // `projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - string name = 1; - - // Strongly validated etag for optimistic concurrency control. Preserve the - // value returned from `GetAppProfile` when calling `UpdateAppProfile` to - // fail the request if there has been a modification in the mean time. The - // `update_mask` of the request need not include `etag` for this protection - // to apply. - // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and - // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more - // details. - string etag = 2; - - // Optional long form description of the use case for this AppProfile. - string description = 3; - - // The routing policy for all read/write requests which use this app profile. - // A value must be explicitly set. - oneof routing_policy { - // Use a multi-cluster routing policy that may pick any cluster. - MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; - - // Use a single-cluster routing policy. - SingleClusterRouting single_cluster_routing = 6; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto deleted file mode 100644 index 5d4374effc59..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/table.proto +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2018 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "TableProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - // The state of a table's data in a particular cluster. - message ClusterState { - // Table replication states. - enum ReplicationState { - // The replication state of the table is unknown in this cluster. - STATE_NOT_KNOWN = 0; - - // The cluster was recently created, and the table must finish copying - // over pre-existing data from other clusters before it can begin - // receiving live replication updates and serving Data API requests. - INITIALIZING = 1; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to planned internal maintenance. - PLANNED_MAINTENANCE = 2; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to unplanned or emergency maintenance. - UNPLANNED_MAINTENANCE = 3; - - // The table can serve Data API requests from this cluster. Depending on - // replication delay, reads may not immediately reflect the state of the - // table in other clusters. - READY = 4; - } - - // (`OutputOnly`) - // The state of replication for the table in this cluster. - ReplicationState replication_state = 1; - } - - // Possible timestamp granularities to use when keeping multiple versions - // of data in a table. - enum TimestampGranularity { - // The user did not specify a granularity. Should not be returned. - // When specified during table creation, MILLIS will be used. - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; - - // The table keeps data versioned at a granularity of 1ms. - MILLIS = 1; - } - - // Defines a view over a table's fields. - enum View { - // Uses the default view for each method as documented in its request. - VIEW_UNSPECIFIED = 0; - - // Only populates `name`. - NAME_ONLY = 1; - - // Only populates `name` and fields related to the table's schema. - SCHEMA_VIEW = 2; - - // Only populates `name` and fields related to the table's - // replication state. - REPLICATION_VIEW = 3; - - // Populates all fields. - FULL = 4; - } - - // (`OutputOnly`) - // The unique name of the table. Values are of the form - // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` - string name = 1; - - // (`OutputOnly`) - // Map from cluster ID to per-cluster table state. - // If it could not be determined whether or not the table has data in a - // particular cluster (for example, if its zone is unavailable), then - // there will be an entry for the cluster with UNKNOWN `replication_status`. - // Views: `REPLICATION_VIEW`, `FULL` - map cluster_states = 2; - - // (`CreationOnly`) - // The column families configured for this table, mapped by column family ID. - // Views: `SCHEMA_VIEW`, `FULL` - map column_families = 3; - - // (`CreationOnly`) - // The granularity (i.e. `MILLIS`) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // If unspecified at creation time, the value will be set to `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL` - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // Garbage collection rule specified as a protobuf. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 1; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - // Garbage collection rules. - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} - -// A snapshot of a table at a particular time. A snapshot can be used as a -// checkpoint for data restoration or a data source for a new table. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message Snapshot { - // Possible states of a snapshot. - enum State { - // The state of the snapshot could not be determined. - STATE_NOT_KNOWN = 0; - - // The snapshot has been successfully created and can serve all requests. - READY = 1; - - // The snapshot is currently being created, and may be destroyed if the - // creation process encounters an error. A snapshot may not be restored to a - // table while it is being created. - CREATING = 2; - } - - // (`OutputOnly`) - // The unique name of the snapshot. - // Values are of the form - // `projects//instances//clusters//snapshots/`. - string name = 1; - - // (`OutputOnly`) - // The source table at the time the snapshot was taken. - Table source_table = 2; - - // (`OutputOnly`) - // The size of the data in the source table at the time the snapshot was - // taken. In some cases, this value may be computed asynchronously via a - // background process and a placeholder of 0 will be used in the meantime. - int64 data_size_bytes = 3; - - // (`OutputOnly`) - // The time when the snapshot is created. - google.protobuf.Timestamp create_time = 4; - - // (`OutputOnly`) - // The time when the snapshot will be deleted. The maximum amount of time a - // snapshot can stay active is 365 days. If 'ttl' is not specified, - // the default maximum of 365 days will be used. - google.protobuf.Timestamp delete_time = 5; - - // (`OutputOnly`) - // The current state of the snapshot. - State state = 6; - - // (`OutputOnly`) - // Description of the snapshot. - string description = 7; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/py.typed new file mode 100644 index 000000000000..889d34043118 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable package uses inline types. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py new file mode 100644 index 000000000000..42ffdf2bc43d --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py new file mode 100644 index 000000000000..622941c65d1b --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import BigtableClient +from .async_client import BigtableAsyncClient + +__all__ = ( + "BigtableClient", + "BigtableAsyncClient", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py new file mode 100644 index 000000000000..6e170e791ef4 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -0,0 +1,865 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data + +from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport +from .client import BigtableClient + + +class BigtableAsyncClient: + """Service for reading from and writing to existing Bigtable + tables. + """ + + _client: BigtableClient + + DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT + + table_path = staticmethod(BigtableClient.table_path) + parse_table_path = staticmethod(BigtableClient.parse_table_path) + + common_billing_account_path = staticmethod( + BigtableClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + BigtableClient.parse_common_billing_account_path + ) + + common_folder_path = staticmethod(BigtableClient.common_folder_path) + parse_common_folder_path = staticmethod(BigtableClient.parse_common_folder_path) + + common_organization_path = staticmethod(BigtableClient.common_organization_path) + parse_common_organization_path = staticmethod( + BigtableClient.parse_common_organization_path + ) + + common_project_path = staticmethod(BigtableClient.common_project_path) + parse_common_project_path = staticmethod(BigtableClient.parse_common_project_path) + + common_location_path = staticmethod(BigtableClient.common_location_path) + parse_common_location_path = staticmethod(BigtableClient.parse_common_location_path) + + from_service_account_info = BigtableClient.from_service_account_info + from_service_account_file = BigtableClient.from_service_account_file + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(BigtableClient).get_transport_class, type(BigtableClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, BigtableTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = BigtableClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + def read_rows( + self, + request: bigtable.ReadRowsRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: + r"""Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Args: + request (:class:`google.cloud.bigtable_v2.types.ReadRowsRequest`): + The request object. Request message for + Bigtable.ReadRows. + table_name (:class:`str`): + Required. The unique name of the table from which to + read. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: + Response message for + Bigtable.ReadRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.ReadRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=43200.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def sample_row_keys( + self, + request: bigtable.SampleRowKeysRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: + r"""Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Args: + request (:class:`google.cloud.bigtable_v2.types.SampleRowKeysRequest`): + The request object. Request message for + Bigtable.SampleRowKeys. + table_name (:class:`str`): + Required. The unique name of the table from which to + sample row keys. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: + Response message for + Bigtable.SampleRowKeys. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.SampleRowKeysRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.sample_row_keys, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def mutate_row( + self, + request: bigtable.MutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.MutateRowResponse: + r"""Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Args: + request (:class:`google.cloud.bigtable_v2.types.MutateRowRequest`): + The request object. Request message for + Bigtable.MutateRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the mutation should be applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + Required. Changes to be atomically + applied to the specified row. Entries + are applied in order, meaning that + earlier mutations can be masked by later + ones. Must contain at least one entry + and at most 100000. + + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.MutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if mutations: + request.mutations.extend(mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def mutate_rows( + self, + request: bigtable.MutateRowsRequest = None, + *, + table_name: str = None, + entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: + r"""Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Args: + request (:class:`google.cloud.bigtable_v2.types.MutateRowsRequest`): + The request object. Request message for + BigtableService.MutateRows. + table_name (:class:`str`): + Required. The unique name of the + table to which the mutations should be + applied. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (:class:`Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]`): + Required. The row keys and + corresponding mutations to be applied in + bulk. Each entry is applied as an atomic + mutation, but the entries may be applied + in arbitrary order (even between entries + for the same row). At least one entry + must be specified, and in total the + entries can contain at most 100000 + mutations. + + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: + Response message for + BigtableService.MutateRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, entries, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.MutateRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if entries: + request.entries.extend(entries) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def check_and_mutate_row( + self, + request: bigtable.CheckAndMutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + predicate_filter: data.RowFilter = None, + true_mutations: Sequence[data.Mutation] = None, + false_mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Mutates a row atomically based on the output of a + predicate Reader filter. + + Args: + request (:class:`google.cloud.bigtable_v2.types.CheckAndMutateRowRequest`): + The request object. Request message for + Bigtable.CheckAndMutateRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the conditional mutation should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + predicate_filter (:class:`google.cloud.bigtable_v2.types.RowFilter`): + The filter to be applied to the contents of the + specified row. Depending on whether or not any results + are yielded, either ``true_mutations`` or + ``false_mutations`` will be executed. If unset, checks + that the row contains any values at all. + + This corresponds to the ``predicate_filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + true_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``false_mutations`` is empty, and at most 100000. + + This corresponds to the ``true_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + false_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``true_mutations`` is empty, and at most 100000. + + This corresponds to the ``false_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.CheckAndMutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if predicate_filter is not None: + request.predicate_filter = predicate_filter + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if true_mutations: + request.true_mutations.extend(true_mutations) + if false_mutations: + request.false_mutations.extend(false_mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_and_mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def read_modify_write_row( + self, + request: bigtable.ReadModifyWriteRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + rules: Sequence[data.ReadModifyWriteRule] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Args: + request (:class:`google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest`): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the read/modify/write rules should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rules (:class:`Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]`): + Required. Rules specifying how the + specified row's contents are to be + transformed into writes. Entries are + applied in order, meaning that earlier + rules will affect the results of later + ones. + + This corresponds to the ``rules`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.ReadModifyWriteRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if rules: + request.rules.extend(rules) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_modify_write_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py new file mode 100644 index 000000000000..8ae8110541c7 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -0,0 +1,1041 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data + +from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableGrpcTransport +from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport + + +class BigtableClientMeta(type): + """Metaclass for the Bigtable client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] + _transport_registry["grpc"] = BigtableGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[BigtableTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableClient(metaclass=BigtableClientMeta): + """Service for reading from and writing to existing Bigtable + tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtable.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTransport: + """Return the transport used by the client instance. + + Returns: + BigtableTransport: The transport used by the client instance. + """ + return self._transport + + @staticmethod + def table_path(project: str, instance: str, table: str,) -> str: + """Return a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str, str]: + """Parse a table path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str,) -> str: + """Return a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str,) -> str: + """Return a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder,) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str,) -> str: + """Return a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization,) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str,) -> str: + """Return a fully-qualified project string.""" + return "projects/{project}".format(project=project,) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str,) -> str: + """Return a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: Optional[credentials.Credentials] = None, + transport: Union[str, BigtableTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the bigtable client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableTransport): + # transport is a BigtableTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + ) + + def read_rows( + self, + request: bigtable.ReadRowsRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.ReadRowsResponse]: + r"""Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Args: + request (google.cloud.bigtable_v2.types.ReadRowsRequest): + The request object. Request message for + Bigtable.ReadRows. + table_name (str): + Required. The unique name of the table from which to + read. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: + Response message for + Bigtable.ReadRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadRowsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadRowsRequest): + request = bigtable.ReadRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_rows] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def sample_row_keys( + self, + request: bigtable.SampleRowKeysRequest = None, + *, + table_name: str = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.SampleRowKeysResponse]: + r"""Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Args: + request (google.cloud.bigtable_v2.types.SampleRowKeysRequest): + The request object. Request message for + Bigtable.SampleRowKeys. + table_name (str): + Required. The unique name of the table from which to + sample row keys. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: + Response message for + Bigtable.SampleRowKeys. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.SampleRowKeysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.SampleRowKeysRequest): + request = bigtable.SampleRowKeysRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.sample_row_keys] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def mutate_row( + self, + request: bigtable.MutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.MutateRowResponse: + r"""Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Args: + request (google.cloud.bigtable_v2.types.MutateRowRequest): + The request object. Request message for + Bigtable.MutateRow. + table_name (str): + Required. The unique name of the table to which the + mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the mutation should be applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically + applied to the specified row. Entries + are applied in order, meaning that + earlier mutations can be masked by later + ones. Must contain at least one entry + and at most 100000. + + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.MutateRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.MutateRowRequest): + request = bigtable.MutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if mutations: + request.mutations.extend(mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_row] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def mutate_rows( + self, + request: bigtable.MutateRowsRequest = None, + *, + table_name: str = None, + entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.MutateRowsResponse]: + r"""Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Args: + request (google.cloud.bigtable_v2.types.MutateRowsRequest): + The request object. Request message for + BigtableService.MutateRows. + table_name (str): + Required. The unique name of the + table to which the mutations should be + applied. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + Required. The row keys and + corresponding mutations to be applied in + bulk. Each entry is applied as an atomic + mutation, but the entries may be applied + in arbitrary order (even between entries + for the same row). At least one entry + must be specified, and in total the + entries can contain at most 100000 + mutations. + + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: + Response message for + BigtableService.MutateRows. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, entries, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.MutateRowsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.MutateRowsRequest): + request = bigtable.MutateRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if entries: + request.entries.extend(entries) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_rows] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def check_and_mutate_row( + self, + request: bigtable.CheckAndMutateRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + predicate_filter: data.RowFilter = None, + true_mutations: Sequence[data.Mutation] = None, + false_mutations: Sequence[data.Mutation] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Mutates a row atomically based on the output of a + predicate Reader filter. + + Args: + request (google.cloud.bigtable_v2.types.CheckAndMutateRowRequest): + The request object. Request message for + Bigtable.CheckAndMutateRow. + table_name (str): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the conditional mutation should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to be applied to the contents of the + specified row. Depending on whether or not any results + are yielded, either ``true_mutations`` or + ``false_mutations`` will be executed. If unset, checks + that the row contains any values at all. + + This corresponds to the ``predicate_filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + true_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``false_mutations`` is empty, and at most 100000. + + This corresponds to the ``true_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + false_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``true_mutations`` is empty, and at most 100000. + + This corresponds to the ``false_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.CheckAndMutateRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.CheckAndMutateRowRequest): + request = bigtable.CheckAndMutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if predicate_filter is not None: + request.predicate_filter = predicate_filter + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if true_mutations: + request.true_mutations.extend(true_mutations) + if false_mutations: + request.false_mutations.extend(false_mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_and_mutate_row] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def read_modify_write_row( + self, + request: bigtable.ReadModifyWriteRowRequest = None, + *, + table_name: str = None, + row_key: bytes = None, + rules: Sequence[data.ReadModifyWriteRule] = None, + app_profile_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Args: + request (google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + table_name (str): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the read/modify/write rules should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rules (Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + Required. Rules specifying how the + specified row's contents are to be + transformed into writes. Entries are + applied in order, meaning that earlier + rules will affect the results of later + ones. + + This corresponds to the ``rules`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadModifyWriteRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadModifyWriteRowRequest): + request = bigtable.ReadModifyWriteRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + if rules: + request.rules.extend(rules) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_modify_write_row] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ("BigtableClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py new file mode 100644 index 000000000000..e18b4592419b --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableTransport +from .grpc import BigtableGrpcTransport +from .grpc_asyncio import BigtableGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] +_transport_registry["grpc"] = BigtableGrpcTransport +_transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport + +__all__ = ( + "BigtableTransport", + "BigtableGrpcTransport", + "BigtableGrpcAsyncIOTransport", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py new file mode 100644 index 000000000000..8f3d81687203 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.bigtable_v2.types import bigtable + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BigtableTransport(abc.ABC): + """Abstract transport class for Bigtable.""" + + AUTH_SCOPES = ( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ) + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_rows: gapic_v1.method.wrap_method( + self.read_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=43200.0, + client_info=client_info, + ), + self.sample_row_keys: gapic_v1.method.wrap_method( + self.sample_row_keys, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_row: gapic_v1.method.wrap_method( + self.mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_rows: gapic_v1.method.wrap_method( + self.mutate_rows, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=600.0, + client_info=client_info, + ), + self.check_and_mutate_row: gapic_v1.method.wrap_method( + self.check_and_mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.read_modify_write_row: gapic_v1.method.wrap_method( + self.read_modify_write_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type(), + ), + default_timeout=20.0, + client_info=client_info, + ), + } + + @property + def read_rows( + self, + ) -> typing.Callable[ + [bigtable.ReadRowsRequest], + typing.Union[ + bigtable.ReadRowsResponse, typing.Awaitable[bigtable.ReadRowsResponse] + ], + ]: + raise NotImplementedError() + + @property + def sample_row_keys( + self, + ) -> typing.Callable[ + [bigtable.SampleRowKeysRequest], + typing.Union[ + bigtable.SampleRowKeysResponse, + typing.Awaitable[bigtable.SampleRowKeysResponse], + ], + ]: + raise NotImplementedError() + + @property + def mutate_row( + self, + ) -> typing.Callable[ + [bigtable.MutateRowRequest], + typing.Union[ + bigtable.MutateRowResponse, typing.Awaitable[bigtable.MutateRowResponse] + ], + ]: + raise NotImplementedError() + + @property + def mutate_rows( + self, + ) -> typing.Callable[ + [bigtable.MutateRowsRequest], + typing.Union[ + bigtable.MutateRowsResponse, typing.Awaitable[bigtable.MutateRowsResponse] + ], + ]: + raise NotImplementedError() + + @property + def check_and_mutate_row( + self, + ) -> typing.Callable[ + [bigtable.CheckAndMutateRowRequest], + typing.Union[ + bigtable.CheckAndMutateRowResponse, + typing.Awaitable[bigtable.CheckAndMutateRowResponse], + ], + ]: + raise NotImplementedError() + + @property + def read_modify_write_row( + self, + ) -> typing.Callable[ + [bigtable.ReadModifyWriteRowRequest], + typing.Union[ + bigtable.ReadModifyWriteRowResponse, + typing.Awaitable[bigtable.ReadModifyWriteRowResponse], + ], + ]: + raise NotImplementedError() + + +__all__ = ("BigtableTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py new file mode 100644 index 000000000000..6b34e8ab0039 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -0,0 +1,432 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_v2.types import bigtable + +from .base import BigtableTransport, DEFAULT_CLIENT_INFO + + +class BigtableGrpcTransport(BigtableTransport): + """gRPC backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + @classmethod + def create_channel( + cls, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_rows( + self, + ) -> Callable[[bigtable.ReadRowsRequest], bigtable.ReadRowsResponse]: + r"""Return a callable for the read rows method over gRPC. + + Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable[[~.ReadRowsRequest], + ~.ReadRowsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_rows" not in self._stubs: + self._stubs["read_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadRows", + request_serializer=bigtable.ReadRowsRequest.serialize, + response_deserializer=bigtable.ReadRowsResponse.deserialize, + ) + return self._stubs["read_rows"] + + @property + def sample_row_keys( + self, + ) -> Callable[[bigtable.SampleRowKeysRequest], bigtable.SampleRowKeysResponse]: + r"""Return a callable for the sample row keys method over gRPC. + + Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable[[~.SampleRowKeysRequest], + ~.SampleRowKeysResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "sample_row_keys" not in self._stubs: + self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/SampleRowKeys", + request_serializer=bigtable.SampleRowKeysRequest.serialize, + response_deserializer=bigtable.SampleRowKeysResponse.deserialize, + ) + return self._stubs["sample_row_keys"] + + @property + def mutate_row( + self, + ) -> Callable[[bigtable.MutateRowRequest], bigtable.MutateRowResponse]: + r"""Return a callable for the mutate row method over gRPC. + + Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable[[~.MutateRowRequest], + ~.MutateRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_row" not in self._stubs: + self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/MutateRow", + request_serializer=bigtable.MutateRowRequest.serialize, + response_deserializer=bigtable.MutateRowResponse.deserialize, + ) + return self._stubs["mutate_row"] + + @property + def mutate_rows( + self, + ) -> Callable[[bigtable.MutateRowsRequest], bigtable.MutateRowsResponse]: + r"""Return a callable for the mutate rows method over gRPC. + + Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Returns: + Callable[[~.MutateRowsRequest], + ~.MutateRowsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_rows" not in self._stubs: + self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/MutateRows", + request_serializer=bigtable.MutateRowsRequest.serialize, + response_deserializer=bigtable.MutateRowsResponse.deserialize, + ) + return self._stubs["mutate_rows"] + + @property + def check_and_mutate_row( + self, + ) -> Callable[ + [bigtable.CheckAndMutateRowRequest], bigtable.CheckAndMutateRowResponse + ]: + r"""Return a callable for the check and mutate row method over gRPC. + + Mutates a row atomically based on the output of a + predicate Reader filter. + + Returns: + Callable[[~.CheckAndMutateRowRequest], + ~.CheckAndMutateRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_and_mutate_row" not in self._stubs: + self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + request_serializer=bigtable.CheckAndMutateRowRequest.serialize, + response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, + ) + return self._stubs["check_and_mutate_row"] + + @property + def read_modify_write_row( + self, + ) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], bigtable.ReadModifyWriteRowResponse + ]: + r"""Return a callable for the read modify write row method over gRPC. + + Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Returns: + Callable[[~.ReadModifyWriteRowRequest], + ~.ReadModifyWriteRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_modify_write_row" not in self._stubs: + self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, + response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, + ) + return self._stubs["read_modify_write_row"] + + +__all__ = ("BigtableGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py new file mode 100644 index 000000000000..aa7ff2ecc7d6 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -0,0 +1,440 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_v2.types import bigtable + +from .base import BigtableTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableGrpcTransport + + +class BigtableGrpcAsyncIOTransport(BigtableTransport): + """gRPC AsyncIO backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._ssl_channel_credentials = ssl_channel_credentials + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + self._ssl_channel_credentials = ssl_credentials + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=self._ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_rows( + self, + ) -> Callable[[bigtable.ReadRowsRequest], Awaitable[bigtable.ReadRowsResponse]]: + r"""Return a callable for the read rows method over gRPC. + + Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable[[~.ReadRowsRequest], + Awaitable[~.ReadRowsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_rows" not in self._stubs: + self._stubs["read_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadRows", + request_serializer=bigtable.ReadRowsRequest.serialize, + response_deserializer=bigtable.ReadRowsResponse.deserialize, + ) + return self._stubs["read_rows"] + + @property + def sample_row_keys( + self, + ) -> Callable[ + [bigtable.SampleRowKeysRequest], Awaitable[bigtable.SampleRowKeysResponse] + ]: + r"""Return a callable for the sample row keys method over gRPC. + + Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable[[~.SampleRowKeysRequest], + Awaitable[~.SampleRowKeysResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "sample_row_keys" not in self._stubs: + self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/SampleRowKeys", + request_serializer=bigtable.SampleRowKeysRequest.serialize, + response_deserializer=bigtable.SampleRowKeysResponse.deserialize, + ) + return self._stubs["sample_row_keys"] + + @property + def mutate_row( + self, + ) -> Callable[[bigtable.MutateRowRequest], Awaitable[bigtable.MutateRowResponse]]: + r"""Return a callable for the mutate row method over gRPC. + + Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable[[~.MutateRowRequest], + Awaitable[~.MutateRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_row" not in self._stubs: + self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/MutateRow", + request_serializer=bigtable.MutateRowRequest.serialize, + response_deserializer=bigtable.MutateRowResponse.deserialize, + ) + return self._stubs["mutate_row"] + + @property + def mutate_rows( + self, + ) -> Callable[[bigtable.MutateRowsRequest], Awaitable[bigtable.MutateRowsResponse]]: + r"""Return a callable for the mutate rows method over gRPC. + + Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Returns: + Callable[[~.MutateRowsRequest], + Awaitable[~.MutateRowsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "mutate_rows" not in self._stubs: + self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/MutateRows", + request_serializer=bigtable.MutateRowsRequest.serialize, + response_deserializer=bigtable.MutateRowsResponse.deserialize, + ) + return self._stubs["mutate_rows"] + + @property + def check_and_mutate_row( + self, + ) -> Callable[ + [bigtable.CheckAndMutateRowRequest], + Awaitable[bigtable.CheckAndMutateRowResponse], + ]: + r"""Return a callable for the check and mutate row method over gRPC. + + Mutates a row atomically based on the output of a + predicate Reader filter. + + Returns: + Callable[[~.CheckAndMutateRowRequest], + Awaitable[~.CheckAndMutateRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "check_and_mutate_row" not in self._stubs: + self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/CheckAndMutateRow", + request_serializer=bigtable.CheckAndMutateRowRequest.serialize, + response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, + ) + return self._stubs["check_and_mutate_row"] + + @property + def read_modify_write_row( + self, + ) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], + Awaitable[bigtable.ReadModifyWriteRowResponse], + ]: + r"""Return a callable for the read modify write row method over gRPC. + + Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on pre- + defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or + the current server time. The method returns the new + contents of all modified cells. + + Returns: + Callable[[~.ReadModifyWriteRowRequest], + Awaitable[~.ReadModifyWriteRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_modify_write_row" not in self._stubs: + self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", + request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, + response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, + ) + return self._stubs["read_modify_write_row"] + + +__all__ = ("BigtableGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py deleted file mode 100644 index 607e1b09c5dd..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.bigtable_v2.proto import bigtable_pb2 -from google.cloud.bigtable_v2.proto import data_pb2 -from google.protobuf import any_pb2 -from google.protobuf import wrappers_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - any_pb2, - wrappers_pb2, - status_pb2, -] - -_local_modules = [ - bigtable_pb2, - data_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.bigtable_v2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py new file mode 100644 index 000000000000..0aa74d208985 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .data import ( + Row, + Family, + Column, + Cell, + RowRange, + RowSet, + ColumnRange, + TimestampRange, + ValueRange, + RowFilter, + Mutation, + ReadModifyWriteRule, +) +from .bigtable import ( + ReadRowsRequest, + ReadRowsResponse, + SampleRowKeysRequest, + SampleRowKeysResponse, + MutateRowRequest, + MutateRowResponse, + MutateRowsRequest, + MutateRowsResponse, + CheckAndMutateRowRequest, + CheckAndMutateRowResponse, + ReadModifyWriteRowRequest, + ReadModifyWriteRowResponse, +) + +__all__ = ( + "Row", + "Family", + "Column", + "Cell", + "RowRange", + "RowSet", + "ColumnRange", + "TimestampRange", + "ValueRange", + "RowFilter", + "Mutation", + "ReadModifyWriteRule", + "ReadRowsRequest", + "ReadRowsResponse", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "MutateRowRequest", + "MutateRowResponse", + "MutateRowsRequest", + "MutateRowsResponse", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", + "ReadModifyWriteRowRequest", + "ReadModifyWriteRowResponse", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py new file mode 100644 index 000000000000..83def634e9a9 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -0,0 +1,463 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.bigtable_v2.types import data +from google.protobuf import wrappers_pb2 as wrappers # type: ignore +from google.rpc import status_pb2 as gr_status # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "ReadRowsRequest", + "ReadRowsResponse", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "MutateRowRequest", + "MutateRowResponse", + "MutateRowsRequest", + "MutateRowsResponse", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", + "ReadModifyWriteRowRequest", + "ReadModifyWriteRowResponse", + }, +) + + +class ReadRowsRequest(proto.Message): + r"""Request message for Bigtable.ReadRows. + + Attributes: + table_name (str): + Required. The unique name of the table from which to read. + Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + rows (google.cloud.bigtable_v2.types.RowSet): + The row keys and/or ranges to read. If not + specified, reads from all rows. + filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the contents of the + specified row(s). If unset, reads the entirety + of each row. + rows_limit (int): + The read will terminate after committing to N + rows' worth of results. The default (zero) is to + return all results. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=5) + + rows = proto.Field(proto.MESSAGE, number=2, message=data.RowSet,) + + filter = proto.Field(proto.MESSAGE, number=3, message=data.RowFilter,) + + rows_limit = proto.Field(proto.INT64, number=4) + + +class ReadRowsResponse(proto.Message): + r"""Response message for Bigtable.ReadRows. + + Attributes: + chunks (Sequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): + A collection of a row's contents as part of + the read request. + last_scanned_row_key (bytes): + Optionally the server might return the row + key of the last row it has scanned. The client + can use this to construct a more efficient retry + request if needed: any row keys or portions of + ranges less than this row key can be dropped + from the request. This is primarily useful for + cases where the server has read a lot of data + that was filtered out since the last committed + row key, allowing the client to skip that work + on a retry. + """ + + class CellChunk(proto.Message): + r"""Specifies a piece of a row's contents returned as part of the + read response stream. + + Attributes: + row_key (bytes): + The row key for this chunk of data. If the + row key is empty, this CellChunk is a + continuation of the same row as the previous + CellChunk in the response stream, even if that + CellChunk was in a previous ReadRowsResponse + message. + family_name (google.protobuf.wrappers_pb2.StringValue): + The column family name for this chunk of data. If this + message is not present this CellChunk is a continuation of + the same column family as the previous CellChunk. The empty + string can occur as a column family name in a response so + clients must check explicitly for the presence of this + message, not just for ``family_name.value`` being non-empty. + qualifier (google.protobuf.wrappers_pb2.BytesValue): + The column qualifier for this chunk of data. If this message + is not present, this CellChunk is a continuation of the same + column as the previous CellChunk. Column qualifiers may be + empty so clients must check for the presence of this + message, not just for ``qualifier.value`` being non-empty. + timestamp_micros (int): + The cell's stored timestamp, which also uniquely identifies + it within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity + will only allow values of ``timestamp_micros`` which are + multiples of 1000. Timestamps are only set in the first + CellChunk per cell (for cells split into multiple chunks). + labels (Sequence[str]): + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. Labels are only + set on the first CellChunk per cell. + value (bytes): + The value stored in the cell. Cell values + can be split across multiple CellChunks. In + that case only the value field will be set in + CellChunks after the first: the timestamp and + labels will only be present in the first + CellChunk, even if the first CellChunk came in a + previous ReadRowsResponse. + value_size (int): + If this CellChunk is part of a chunked cell value and this + is not the final chunk of that cell, value_size will be set + to the total length of the cell value. The client can use + this size to pre-allocate memory to hold the full cell + value. + reset_row (bool): + Indicates that the client should drop all previous chunks + for ``row_key``, as it will be re-read from the beginning. + commit_row (bool): + Indicates that the client can safely process all previous + chunks for ``row_key``, as its data has been fully read. + """ + + row_key = proto.Field(proto.BYTES, number=1) + + family_name = proto.Field( + proto.MESSAGE, number=2, message=wrappers.StringValue, + ) + + qualifier = proto.Field(proto.MESSAGE, number=3, message=wrappers.BytesValue,) + + timestamp_micros = proto.Field(proto.INT64, number=4) + + labels = proto.RepeatedField(proto.STRING, number=5) + + value = proto.Field(proto.BYTES, number=6) + + value_size = proto.Field(proto.INT32, number=7) + + reset_row = proto.Field(proto.BOOL, number=8, oneof="row_status") + + commit_row = proto.Field(proto.BOOL, number=9, oneof="row_status") + + chunks = proto.RepeatedField(proto.MESSAGE, number=1, message=CellChunk,) + + last_scanned_row_key = proto.Field(proto.BYTES, number=2) + + +class SampleRowKeysRequest(proto.Message): + r"""Request message for Bigtable.SampleRowKeys. + + Attributes: + table_name (str): + Required. The unique name of the table from which to sample + row keys. Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=2) + + +class SampleRowKeysResponse(proto.Message): + r"""Response message for Bigtable.SampleRowKeys. + + Attributes: + row_key (bytes): + Sorted streamed sequence of sample row keys + in the table. The table might have contents + before the first row key in the list and after + the last one, but a key containing the empty + string indicates "end of table" and will be the + last response given, if present. + Note that row keys in this list may not have + ever been written to or read from, and users + should therefore not make any assumptions about + the row key structure that are specific to their + use case. + offset_bytes (int): + Approximate total storage space used by all rows in the + table which precede ``row_key``. Buffering the contents of + all rows between two subsequent samples would require space + roughly equal to the difference in their ``offset_bytes`` + fields. + """ + + row_key = proto.Field(proto.BYTES, number=1) + + offset_bytes = proto.Field(proto.INT64, number=2) + + +class MutateRowRequest(proto.Message): + r"""Request message for Bigtable.MutateRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the mutation + should be applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + mutation should be applied. + mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically applied to + the specified row. Entries are applied in order, + meaning that earlier mutations can be masked by + later ones. Must contain at least one entry and + at most 100000. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=4) + + row_key = proto.Field(proto.BYTES, number=2) + + mutations = proto.RepeatedField(proto.MESSAGE, number=3, message=data.Mutation,) + + +class MutateRowResponse(proto.Message): + r"""Response message for Bigtable.MutateRow.""" + + +class MutateRowsRequest(proto.Message): + r"""Request message for BigtableService.MutateRows. + + Attributes: + table_name (str): + Required. The unique name of the table to + which the mutations should be applied. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + Required. The row keys and corresponding + mutations to be applied in bulk. Each entry is + applied as an atomic mutation, but the entries + may be applied in arbitrary order (even between + entries for the same row). At least one entry + must be specified, and in total the entries can + contain at most 100000 mutations. + """ + + class Entry(proto.Message): + r"""A mutation for a given row. + + Attributes: + row_key (bytes): + The key of the row to which the ``mutations`` should be + applied. + mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically applied to + the specified row. Mutations are applied in + order, meaning that earlier mutations can be + masked by later ones. + You must specify at least one mutation. + """ + + row_key = proto.Field(proto.BYTES, number=1) + + mutations = proto.RepeatedField(proto.MESSAGE, number=2, message=data.Mutation,) + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=3) + + entries = proto.RepeatedField(proto.MESSAGE, number=2, message=Entry,) + + +class MutateRowsResponse(proto.Message): + r"""Response message for BigtableService.MutateRows. + + Attributes: + entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): + One or more results for Entries from the + batch request. + """ + + class Entry(proto.Message): + r"""The result of applying a passed mutation in the original + request. + + Attributes: + index (int): + The index into the original request's ``entries`` list of + the Entry for which a result is being reported. + status (google.rpc.status_pb2.Status): + The result of the request Entry identified by ``index``. + Depending on how requests are batched during execution, it + is possible for one Entry to fail due to an error with + another Entry. In the event that this occurs, the same error + will be reported for both entries. + """ + + index = proto.Field(proto.INT64, number=1) + + status = proto.Field(proto.MESSAGE, number=2, message=gr_status.Status,) + + entries = proto.RepeatedField(proto.MESSAGE, number=1, message=Entry,) + + +class CheckAndMutateRowRequest(proto.Message): + r"""Request message for Bigtable.CheckAndMutateRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of the + form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + conditional mutation should be applied. + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to be applied to the contents of the specified + row. Depending on whether or not any results are yielded, + either ``true_mutations`` or ``false_mutations`` will be + executed. If unset, checks that the row contains any values + at all. + true_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when applied + to ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain + at least one entry if ``false_mutations`` is empty, and at + most 100000. + false_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when applied + to ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain + at least one entry if ``true_mutations`` is empty, and at + most 100000. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=7) + + row_key = proto.Field(proto.BYTES, number=2) + + predicate_filter = proto.Field(proto.MESSAGE, number=6, message=data.RowFilter,) + + true_mutations = proto.RepeatedField( + proto.MESSAGE, number=4, message=data.Mutation, + ) + + false_mutations = proto.RepeatedField( + proto.MESSAGE, number=5, message=data.Mutation, + ) + + +class CheckAndMutateRowResponse(proto.Message): + r"""Response message for Bigtable.CheckAndMutateRow. + + Attributes: + predicate_matched (bool): + Whether or not the request's ``predicate_filter`` yielded + any results for the specified row. + """ + + predicate_matched = proto.Field(proto.BOOL, number=1) + + +class ReadModifyWriteRowRequest(proto.Message): + r"""Request message for Bigtable.ReadModifyWriteRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of the + form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + read/modify/write rules should be applied. + rules (Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + Required. Rules specifying how the specified + row's contents are to be transformed into + writes. Entries are applied in order, meaning + that earlier rules will affect the results of + later ones. + """ + + table_name = proto.Field(proto.STRING, number=1) + + app_profile_id = proto.Field(proto.STRING, number=4) + + row_key = proto.Field(proto.BYTES, number=2) + + rules = proto.RepeatedField( + proto.MESSAGE, number=3, message=data.ReadModifyWriteRule, + ) + + +class ReadModifyWriteRowResponse(proto.Message): + r"""Response message for Bigtable.ReadModifyWriteRow. + + Attributes: + row (google.cloud.bigtable_v2.types.Row): + A Row containing the new contents of all + cells modified by the request. + """ + + row = proto.Field(proto.MESSAGE, number=1, message=data.Row,) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py new file mode 100644 index 000000000000..eece89c5ae57 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -0,0 +1,728 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "Row", + "Family", + "Column", + "Cell", + "RowRange", + "RowSet", + "ColumnRange", + "TimestampRange", + "ValueRange", + "RowFilter", + "Mutation", + "ReadModifyWriteRule", + }, +) + + +class Row(proto.Message): + r"""Specifies the complete (requested) contents of a single row + of a table. Rows which exceed 256MiB in size cannot be read in + full. + + Attributes: + key (bytes): + The unique key which identifies this row + within its table. This is the same key that's + used to identify the row in, for example, a + MutateRowRequest. May contain any non-empty byte + string up to 4KiB in length. + families (Sequence[google.cloud.bigtable_v2.types.Family]): + May be empty, but only if the entire row is + empty. The mutual ordering of column families is + not specified. + """ + + key = proto.Field(proto.BYTES, number=1) + + families = proto.RepeatedField(proto.MESSAGE, number=2, message="Family",) + + +class Family(proto.Message): + r"""Specifies (some of) the contents of a single row/column + family intersection of a table. + + Attributes: + name (str): + The unique key which identifies this family within its row. + This is the same key that's used to identify the family in, + for example, a RowFilter which sets its + "family_name_regex_filter" field. Must match + ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors + may produce cells in a sentinel family with an empty name. + Must be no greater than 64 characters in length. + columns (Sequence[google.cloud.bigtable_v2.types.Column]): + Must not be empty. Sorted in order of + increasing "qualifier". + """ + + name = proto.Field(proto.STRING, number=1) + + columns = proto.RepeatedField(proto.MESSAGE, number=2, message="Column",) + + +class Column(proto.Message): + r"""Specifies (some of) the contents of a single row/column + intersection of a table. + + Attributes: + qualifier (bytes): + The unique key which identifies this column within its + family. This is the same key that's used to identify the + column in, for example, a RowFilter which sets its + ``column_qualifier_regex_filter`` field. May contain any + byte string, including the empty string, up to 16kiB in + length. + cells (Sequence[google.cloud.bigtable_v2.types.Cell]): + Must not be empty. Sorted in order of decreasing + "timestamp_micros". + """ + + qualifier = proto.Field(proto.BYTES, number=1) + + cells = proto.RepeatedField(proto.MESSAGE, number=2, message="Cell",) + + +class Cell(proto.Message): + r"""Specifies (some of) the contents of a single + row/column/timestamp of a table. + + Attributes: + timestamp_micros (int): + The cell's stored timestamp, which also uniquely identifies + it within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity + will only allow values of ``timestamp_micros`` which are + multiples of 1000. + value (bytes): + The value stored in the cell. + May contain any byte string, including the empty + string, up to 100MiB in length. + labels (Sequence[str]): + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. + """ + + timestamp_micros = proto.Field(proto.INT64, number=1) + + value = proto.Field(proto.BYTES, number=2) + + labels = proto.RepeatedField(proto.STRING, number=3) + + +class RowRange(proto.Message): + r"""Specifies a contiguous range of rows. + + Attributes: + start_key_closed (bytes): + Used when giving an inclusive lower bound for + the range. + start_key_open (bytes): + Used when giving an exclusive lower bound for + the range. + end_key_open (bytes): + Used when giving an exclusive upper bound for + the range. + end_key_closed (bytes): + Used when giving an inclusive upper bound for + the range. + """ + + start_key_closed = proto.Field(proto.BYTES, number=1, oneof="start_key") + + start_key_open = proto.Field(proto.BYTES, number=2, oneof="start_key") + + end_key_open = proto.Field(proto.BYTES, number=3, oneof="end_key") + + end_key_closed = proto.Field(proto.BYTES, number=4, oneof="end_key") + + +class RowSet(proto.Message): + r"""Specifies a non-contiguous set of rows. + + Attributes: + row_keys (Sequence[bytes]): + Single rows included in the set. + row_ranges (Sequence[google.cloud.bigtable_v2.types.RowRange]): + Contiguous row ranges included in the set. + """ + + row_keys = proto.RepeatedField(proto.BYTES, number=1) + + row_ranges = proto.RepeatedField(proto.MESSAGE, number=2, message="RowRange",) + + +class ColumnRange(proto.Message): + r"""Specifies a contiguous range of columns within a single column + family. The range spans from : to + :, where both bounds can be either + inclusive or exclusive. + + Attributes: + family_name (str): + The name of the column family within which + this range falls. + start_qualifier_closed (bytes): + Used when giving an inclusive lower bound for + the range. + start_qualifier_open (bytes): + Used when giving an exclusive lower bound for + the range. + end_qualifier_closed (bytes): + Used when giving an inclusive upper bound for + the range. + end_qualifier_open (bytes): + Used when giving an exclusive upper bound for + the range. + """ + + family_name = proto.Field(proto.STRING, number=1) + + start_qualifier_closed = proto.Field(proto.BYTES, number=2, oneof="start_qualifier") + + start_qualifier_open = proto.Field(proto.BYTES, number=3, oneof="start_qualifier") + + end_qualifier_closed = proto.Field(proto.BYTES, number=4, oneof="end_qualifier") + + end_qualifier_open = proto.Field(proto.BYTES, number=5, oneof="end_qualifier") + + +class TimestampRange(proto.Message): + r"""Specified a contiguous range of microsecond timestamps. + + Attributes: + start_timestamp_micros (int): + Inclusive lower bound. If left empty, + interpreted as 0. + end_timestamp_micros (int): + Exclusive upper bound. If left empty, + interpreted as infinity. + """ + + start_timestamp_micros = proto.Field(proto.INT64, number=1) + + end_timestamp_micros = proto.Field(proto.INT64, number=2) + + +class ValueRange(proto.Message): + r"""Specifies a contiguous range of raw byte values. + + Attributes: + start_value_closed (bytes): + Used when giving an inclusive lower bound for + the range. + start_value_open (bytes): + Used when giving an exclusive lower bound for + the range. + end_value_closed (bytes): + Used when giving an inclusive upper bound for + the range. + end_value_open (bytes): + Used when giving an exclusive upper bound for + the range. + """ + + start_value_closed = proto.Field(proto.BYTES, number=1, oneof="start_value") + + start_value_open = proto.Field(proto.BYTES, number=2, oneof="start_value") + + end_value_closed = proto.Field(proto.BYTES, number=3, oneof="end_value") + + end_value_open = proto.Field(proto.BYTES, number=4, oneof="end_value") + + +class RowFilter(proto.Message): + r"""Takes a row as input and produces an alternate view of the row based + on specified rules. For example, a RowFilter might trim down a row + to include just the cells from columns matching a given regular + expression, or might return all the cells of a row but not their + values. More complicated filters can be composed out of these + components to express requests such as, "within every column of a + particular family, give just the two most recent cells which are + older than timestamp X." + + There are two broad categories of RowFilters (true filters and + transformers), as well as two ways to compose simple filters into + more complex ones (chains and interleaves). They work as follows: + + - True filters alter the input row by excluding some of its cells + wholesale from the output row. An example of a true filter is the + ``value_regex_filter``, which excludes cells whose values don't + match the specified pattern. All regex true filters use RE2 + syntax (https://github.com/google/re2/wiki/Syntax) in raw byte + mode (RE2::Latin1), and are evaluated as full matches. An + important point to keep in mind is that ``RE2(.)`` is equivalent + by default to ``RE2([^\n])``, meaning that it does not match + newlines. When attempting to match an arbitrary byte, you should + therefore use the escape sequence ``\C``, which may need to be + further escaped as ``\\C`` in your client language. + + - Transformers alter the input row by changing the values of some + of its cells in the output, without excluding them completely. + Currently, the only supported transformer is the + ``strip_value_transformer``, which replaces every cell's value + with the empty string. + + - Chains and interleaves are described in more detail in the + RowFilter.Chain and RowFilter.Interleave documentation. + + The total serialized size of a RowFilter message must not exceed + 4096 bytes, and RowFilters may not be nested within each other (in + Chains or Interleaves) to a depth of more than 20. + + Attributes: + chain (google.cloud.bigtable_v2.types.RowFilter.Chain): + Applies several RowFilters to the data in + sequence, progressively narrowing the results. + interleave (google.cloud.bigtable_v2.types.RowFilter.Interleave): + Applies several RowFilters to the data in + parallel and combines the results. + condition (google.cloud.bigtable_v2.types.RowFilter.Condition): + Applies one of two possible RowFilters to the + data based on the output of a predicate + RowFilter. + sink (bool): + ADVANCED USE ONLY. Hook for introspection into the + RowFilter. Outputs all cells directly to the output of the + read rather than to any parent filter. Consider the + following example: + + :: + + Chain( + FamilyRegex("A"), + Interleave( + All(), + Chain(Label("foo"), Sink()) + ), + QualifierRegex("B") + ) + + A,A,1,w + A,B,2,x + B,B,4,z + | + FamilyRegex("A") + | + A,A,1,w + A,B,2,x + | + +------------+-------------+ + | | + All() Label(foo) + | | + A,A,1,w A,A,1,w,labels:[foo] + A,B,2,x A,B,2,x,labels:[foo] + | | + | Sink() --------------+ + | | | + +------------+ x------+ A,A,1,w,labels:[foo] + | A,B,2,x,labels:[foo] + A,A,1,w | + A,B,2,x | + | | + QualifierRegex("B") | + | | + A,B,2,x | + | | + +--------------------------------+ + | + A,A,1,w,labels:[foo] + A,B,2,x,labels:[foo] // could be switched + A,B,2,x // could be switched + + Despite being excluded by the qualifier filter, a copy of + every cell that reaches the sink is present in the final + result. + + As with an + [Interleave][google.bigtable.v2.RowFilter.Interleave], + duplicate cells are possible, and appear in an unspecified + mutual order. In this case we have a duplicate with column + "A:B" and timestamp 2, because one copy passed through the + all filter while the other was passed through the label and + sink. Note that one copy has label "foo", while the other + does not. + + Cannot be used within the ``predicate_filter``, + ``true_filter``, or ``false_filter`` of a + [Condition][google.bigtable.v2.RowFilter.Condition]. + pass_all_filter (bool): + Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + block_all_filter (bool): + Does not match any cells, regardless of + input. Useful for temporarily disabling just + part of a filter. + row_key_regex_filter (bytes): + Matches only cells from rows whose keys satisfy the given + RE2 regex. In other words, passes through the entire row + when the key matches, and otherwise produces an empty row. + Note that, since row keys can contain arbitrary bytes, the + ``\C`` escape sequence must be used if a true wildcard is + desired. The ``.`` character will not match the new line + character ``\n``, which may be present in a binary key. + row_sample_filter (float): + Matches all cells from a row with probability + p, and matches no cells from the row with + probability 1-p. + family_name_regex_filter (str): + Matches only cells from columns whose families satisfy the + given RE2 regex. For technical reasons, the regex must not + contain the ``:`` character, even if it is not being used as + a literal. Note that, since column families cannot contain + the new line character ``\n``, it is sufficient to use ``.`` + as a full wildcard when matching column family names. + column_qualifier_regex_filter (bytes): + Matches only cells from columns whose qualifiers satisfy the + given RE2 regex. Note that, since column qualifiers can + contain arbitrary bytes, the ``\C`` escape sequence must be + used if a true wildcard is desired. The ``.`` character will + not match the new line character ``\n``, which may be + present in a binary qualifier. + column_range_filter (google.cloud.bigtable_v2.types.ColumnRange): + Matches only cells from columns within the + given range. + timestamp_range_filter (google.cloud.bigtable_v2.types.TimestampRange): + Matches only cells with timestamps within the + given range. + value_regex_filter (bytes): + Matches only cells with values that satisfy the given + regular expression. Note that, since cell values can contain + arbitrary bytes, the ``\C`` escape sequence must be used if + a true wildcard is desired. The ``.`` character will not + match the new line character ``\n``, which may be present in + a binary value. + value_range_filter (google.cloud.bigtable_v2.types.ValueRange): + Matches only cells with values that fall + within the given range. + cells_per_row_offset_filter (int): + Skips the first N cells of each row, matching + all subsequent cells. If duplicate cells are + present, as is possible when using an + Interleave, each copy of the cell is counted + separately. + cells_per_row_limit_filter (int): + Matches only the first N cells of each row. + If duplicate cells are present, as is possible + when using an Interleave, each copy of the cell + is counted separately. + cells_per_column_limit_filter (int): + Matches only the most recent N cells within each column. For + example, if N=2, this filter would match column ``foo:bar`` + at timestamps 10 and 9, skip all earlier cells in + ``foo:bar``, and then begin matching again in column + ``foo:bar2``. If duplicate cells are present, as is possible + when using an Interleave, each copy of the cell is counted + separately. + strip_value_transformer (bool): + Replaces each cell's value with the empty + string. + apply_label_transformer (str): + Applies the given label to all cells in the output row. This + allows the client to determine which results were produced + from which part of the filter. + + Values must be at most 15 characters in length, and match + the RE2 pattern ``[a-z0-9\\-]+`` + + Due to a technical limitation, it is not currently possible + to apply multiple labels to a cell. As a result, a Chain may + have no more than one sub-filter which contains a + ``apply_label_transformer``. It is okay for an Interleave to + contain multiple ``apply_label_transformers``, as they will + be applied to separate copies of the input. This may be + relaxed in the future. + """ + + class Chain(proto.Message): + r"""A RowFilter which sends rows through several RowFilters in + sequence. + + Attributes: + filters (Sequence[google.cloud.bigtable_v2.types.RowFilter]): + The elements of "filters" are chained + together to process the input row: in row -> + f(0) -> intermediate row -> f(1) -> ... -> f(N) + -> out row The full chain is executed + atomically. + """ + + filters = proto.RepeatedField(proto.MESSAGE, number=1, message="RowFilter",) + + class Interleave(proto.Message): + r"""A RowFilter which sends each row to each of several component + RowFilters and interleaves the results. + + Attributes: + filters (Sequence[google.cloud.bigtable_v2.types.RowFilter]): + The elements of "filters" all process a copy of the input + row, and the results are pooled, sorted, and combined into a + single output row. If multiple cells are produced with the + same column and timestamp, they will all appear in the + output row in an unspecified mutual order. Consider the + following example, with three filters: + + :: + + input row + | + ----------------------------------------------------- + | | | + f(0) f(1) f(2) + | | | + 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + 2: foo,blah,11,z far,blah,5,x far,blah,5,x + | | | + ----------------------------------------------------- + | + 1: foo,bar,10,z // could have switched with #2 + 2: foo,bar,10,x // could have switched with #1 + 3: foo,blah,11,z + 4: far,bar,7,a + 5: far,blah,5,x // identical to #6 + 6: far,blah,5,x // identical to #5 + + All interleaved filters are executed atomically. + """ + + filters = proto.RepeatedField(proto.MESSAGE, number=1, message="RowFilter",) + + class Condition(proto.Message): + r"""A RowFilter which evaluates one of two possible RowFilters, + depending on whether or not a predicate RowFilter outputs any + cells from the input row. + IMPORTANT NOTE: The predicate filter does not execute atomically + with the true and false filters, which may lead to inconsistent + or unexpected results. Additionally, Condition filters have poor + performance, especially when filters are set for the false + condition. + + Attributes: + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + If ``predicate_filter`` outputs any cells, then + ``true_filter`` will be evaluated on the input row. + Otherwise, ``false_filter`` will be evaluated. + true_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the input row if ``predicate_filter`` + returns any results. If not provided, no results will be + returned in the true case. + false_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the input row if ``predicate_filter`` + does not return any results. If not provided, no results + will be returned in the false case. + """ + + predicate_filter = proto.Field(proto.MESSAGE, number=1, message="RowFilter",) + + true_filter = proto.Field(proto.MESSAGE, number=2, message="RowFilter",) + + false_filter = proto.Field(proto.MESSAGE, number=3, message="RowFilter",) + + chain = proto.Field(proto.MESSAGE, number=1, oneof="filter", message=Chain,) + + interleave = proto.Field( + proto.MESSAGE, number=2, oneof="filter", message=Interleave, + ) + + condition = proto.Field(proto.MESSAGE, number=3, oneof="filter", message=Condition,) + + sink = proto.Field(proto.BOOL, number=16, oneof="filter") + + pass_all_filter = proto.Field(proto.BOOL, number=17, oneof="filter") + + block_all_filter = proto.Field(proto.BOOL, number=18, oneof="filter") + + row_key_regex_filter = proto.Field(proto.BYTES, number=4, oneof="filter") + + row_sample_filter = proto.Field(proto.DOUBLE, number=14, oneof="filter") + + family_name_regex_filter = proto.Field(proto.STRING, number=5, oneof="filter") + + column_qualifier_regex_filter = proto.Field(proto.BYTES, number=6, oneof="filter") + + column_range_filter = proto.Field( + proto.MESSAGE, number=7, oneof="filter", message="ColumnRange", + ) + + timestamp_range_filter = proto.Field( + proto.MESSAGE, number=8, oneof="filter", message="TimestampRange", + ) + + value_regex_filter = proto.Field(proto.BYTES, number=9, oneof="filter") + + value_range_filter = proto.Field( + proto.MESSAGE, number=15, oneof="filter", message="ValueRange", + ) + + cells_per_row_offset_filter = proto.Field(proto.INT32, number=10, oneof="filter") + + cells_per_row_limit_filter = proto.Field(proto.INT32, number=11, oneof="filter") + + cells_per_column_limit_filter = proto.Field(proto.INT32, number=12, oneof="filter") + + strip_value_transformer = proto.Field(proto.BOOL, number=13, oneof="filter") + + apply_label_transformer = proto.Field(proto.STRING, number=19, oneof="filter") + + +class Mutation(proto.Message): + r"""Specifies a particular change to be made to the contents of a + row. + + Attributes: + set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): + Set a cell's value. + delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): + Deletes cells from a column. + delete_from_family (google.cloud.bigtable_v2.types.Mutation.DeleteFromFamily): + Deletes cells from a column family. + delete_from_row (google.cloud.bigtable_v2.types.Mutation.DeleteFromRow): + Deletes cells from the entire row. + """ + + class SetCell(proto.Message): + r"""A Mutation which sets the value of the specified cell. + + Attributes: + family_name (str): + The name of the family into which new data should be + written. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column into which new + data should be written. Can be any byte string, + including the empty string. + timestamp_micros (int): + The timestamp of the cell into which new data + should be written. Use -1 for current Bigtable + server time. Otherwise, the client should set + this value itself, noting that the default value + is a timestamp of zero if the field is left + unspecified. Values must match the granularity + of the table (e.g. micros, millis). + value (bytes): + The value to be written into the specified + cell. + """ + + family_name = proto.Field(proto.STRING, number=1) + + column_qualifier = proto.Field(proto.BYTES, number=2) + + timestamp_micros = proto.Field(proto.INT64, number=3) + + value = proto.Field(proto.BYTES, number=4) + + class DeleteFromColumn(proto.Message): + r"""A Mutation which deletes cells from the specified column, + optionally restricting the deletions to a given timestamp range. + + Attributes: + family_name (str): + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column from which cells + should be deleted. Can be any byte string, + including the empty string. + time_range (google.cloud.bigtable_v2.types.TimestampRange): + The range of timestamps within which cells + should be deleted. + """ + + family_name = proto.Field(proto.STRING, number=1) + + column_qualifier = proto.Field(proto.BYTES, number=2) + + time_range = proto.Field(proto.MESSAGE, number=3, message="TimestampRange",) + + class DeleteFromFamily(proto.Message): + r"""A Mutation which deletes all cells from the specified column + family. + + Attributes: + family_name (str): + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + """ + + family_name = proto.Field(proto.STRING, number=1) + + class DeleteFromRow(proto.Message): + r"""A Mutation which deletes all cells from the containing row.""" + + set_cell = proto.Field(proto.MESSAGE, number=1, oneof="mutation", message=SetCell,) + + delete_from_column = proto.Field( + proto.MESSAGE, number=2, oneof="mutation", message=DeleteFromColumn, + ) + + delete_from_family = proto.Field( + proto.MESSAGE, number=3, oneof="mutation", message=DeleteFromFamily, + ) + + delete_from_row = proto.Field( + proto.MESSAGE, number=4, oneof="mutation", message=DeleteFromRow, + ) + + +class ReadModifyWriteRule(proto.Message): + r"""Specifies an atomic read/modify/write operation on the latest + value of the specified column. + + Attributes: + family_name (str): + The name of the family to which the read/modify/write should + be applied. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column to which the + read/modify/write should be applied. + Can be any byte string, including the empty + string. + append_value (bytes): + Rule specifying that ``append_value`` be appended to the + existing value. If the targeted cell is unset, it will be + treated as containing the empty string. + increment_amount (int): + Rule specifying that ``increment_amount`` be added to the + existing value. If the targeted cell is unset, it will be + treated as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit + big-endian signed integer), or the entire request will fail. + """ + + family_name = proto.Field(proto.STRING, number=1) + + column_qualifier = proto.Field(proto.BYTES, number=2) + + append_value = proto.Field(proto.BYTES, number=3, oneof="rule") + + increment_amount = proto.Field(proto.INT64, number=4, oneof="rule") + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 7947441c6f92..70d9c13c2561 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# Generated by synthtool. DO NOT EDIT! + from __future__ import absolute_import import os import shutil @@ -21,10 +23,24 @@ import nox +BLACK_VERSION = "black==19.10b0" +BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] + DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] -LOCAL_DEPS = () +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] + +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit", + "system", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", +] + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): @@ -33,13 +49,9 @@ def lint(session): Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ - session.install("flake8", "black", *LOCAL_DEPS) + session.install("flake8", BLACK_VERSION) session.run( - "black", - "--check", - "google", - "tests", - "docs", + "black", "--check", *BLACK_PATHS, ) session.run("flake8", "google", "tests") @@ -49,13 +61,14 @@ def blacken(session): """Run black. Format code to uniform standard. + + This currently uses Python 3.6 due to the automated Kokoro run of synthtool. + That run uses an image that doesn't have 3.6 installed. Before updating this + check the state of the `gcp_ubuntu_config` we use for that Kokoro run. """ - session.install("black") + session.install(BLACK_VERSION) session.run( - "black", - "google", - "tests", - "docs", + "black", *BLACK_PATHS, ) @@ -68,17 +81,20 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("mock", "pytest", "pytest-cov") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) + session.install("asyncmock", "pytest-asyncio") + + session.install( + "mock", "pytest", "pytest-cov", + ) + session.install("-e", ".") # Run py.test against the unit tests. session.run( "py.test", "--quiet", - "--cov=google.cloud", - "--cov=tests.unit", + "--cov=google/cloud", + "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", "--cov-report=", @@ -94,24 +110,15 @@ def unit(session): default(session) -@nox.session(python=DEFAULT_PYTHON_VERSION) -def cover(session): - """Run the final coverage report. - - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=99") - - session.run("coverage", "erase") - - @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") @@ -127,10 +134,9 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "test_utils/") + session.install( + "mock", "pytest", "google-cloud-testutils", + ) session.install("-e", ".") # Run py.test against the system tests. @@ -140,32 +146,17 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def snippets(session): - """Run the documentation example snippets.""" - # Sanity check: Only run snippets system tests if the environment variable - # is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable.") +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. - # Install all test dependencies, then install local packages in place. - session.install("mock", "pytest") - for local_dep in LOCAL_DEPS: - session.install("-e", local_dep) - session.install("-e", "test_utils/") - session.install("-e", ".") - session.run( - "py.test", - "--quiet", - os.path.join("docs", "snippets.py"), - *session.posargs - ) - session.run( - "py.test", - "--quiet", - os.path.join("docs", "snippets_table.py"), - *session.posargs - ) + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=99") + + session.run("coverage", "erase") @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -189,12 +180,15 @@ def docs(session): os.path.join("docs", "_build", "html", ""), ) + @nox.session(python=DEFAULT_PYTHON_VERSION) def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark", "sphinx-docfx-yaml") + # sphinx-docfx-yaml supports up to sphinx version 1.5.5. + # https://github.com/docascode/sphinx-docfx-yaml/issues/97 + session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index ba55d7ce53ca..171bee6570df 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -87,7 +87,8 @@ def get_pytest_env_vars(): TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +# todo(kolea2): temporary workaround to install pinned dep version +INSTALL_LIBRARY_FROM_SOURCE = False # # Style Checks # diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index cb0825c6f0bf..69b59d1e29d9 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.27.0 -google-cloud-bigtable==1.6.1 +google-cloud-bigtable<2.0.0dev1 google-cloud-core==1.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py new file mode 100644 index 000000000000..d30de39dbfd8 --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -0,0 +1,216 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class bigtable_adminCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'check_consistency': ('name', 'consistency_token', ), + 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_backup': ('parent', 'backup_id', 'backup', ), + 'create_cluster': ('parent', 'cluster_id', 'cluster', ), + 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), + 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), + 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_backup': ('name', ), + 'delete_cluster': ('name', ), + 'delete_instance': ('name', ), + 'delete_snapshot': ('name', ), + 'delete_table': ('name', ), + 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), + 'generate_consistency_token': ('name', ), + 'get_app_profile': ('name', ), + 'get_backup': ('name', ), + 'get_cluster': ('name', ), + 'get_iam_policy': ('resource', 'options', ), + 'get_instance': ('name', ), + 'get_snapshot': ('name', ), + 'get_table': ('name', 'view', ), + 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_clusters': ('parent', 'page_token', ), + 'list_instances': ('parent', 'page_token', ), + 'list_snapshots': ('parent', 'page_size', 'page_token', ), + 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), + 'modify_column_families': ('name', 'modifications', ), + 'partial_update_instance': ('instance', 'update_mask', ), + 'restore_table': ('parent', 'table_id', 'backup', ), + 'set_iam_policy': ('resource', 'policy', ), + 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_backup': ('backup', 'update_mask', ), + 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=bigtable_adminCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the bigtable_admin client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py new file mode 100644 index 000000000000..e1ff816ee5d9 --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -0,0 +1,184 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class bigtableCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), + 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), + 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), + 'sample_row_keys': ('table_name', 'app_profile_id', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=bigtableCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the bigtable client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index bfb6240f5143..c1fa1311cbf5 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -20,41 +20,44 @@ # Package metadata. -name = 'google-cloud-bigtable' -description = 'Google Cloud Bigtable API client library' +name = "google-cloud-bigtable" +description = "Google Cloud Bigtable API client library" version = "1.7.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' -release_status = 'Development Status :: 5 - Production/Stable' +release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.14.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", "google-cloud-core >= 1.4.1, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", + "proto-plus >= 1.13.0", + "libcst >= 0.2.5", ] -extras = { -} +extras = {} # Setup boilerplate below this line. package_root = os.path.abspath(os.path.dirname(__file__)) -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: readme = readme_file.read() # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() - if package.startswith('google')] + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") +] # Determine which namespaces are needed. -namespaces = ['google'] -if 'google.cloud' in packages: - namespaces.append('google.cloud') +namespaces = ["google"] +if "google.cloud" in packages: + namespaces.append("google.cloud") setuptools.setup( @@ -62,30 +65,30 @@ version=version, description=description, long_description=readme, - author='Google LLC', - author_email='googleapis-packages@google.com', - license='Apache 2.0', - url='https://github.com/googleapis/python-bigtable', + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url="https://github.com/googleapis/python-bigtable", classifiers=[ release_status, - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Operating System :: OS Independent', - 'Topic :: Internet', + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Operating System :: OS Independent", + "Topic :: Internet", ], - platforms='Posix; MacOS X; Windows', + platforms="Posix; MacOS X; Windows", packages=packages, namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', + scripts=[ + "scripts/fixup_bigtable_v2_keywords.py", + "scripts/fixup_bigtable_admin_v2_keywords.py", + ], + python_requires=">=3.6", include_package_data=True, zip_safe=False, ) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 07c2933395bc..e2fda520a737 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -33,6 +33,7 @@ s.move(library / "google/cloud/bigtable_v2") s.move(library / "tests") +s.move(library / "scripts") # Generate admin client library = gapic.py_library( @@ -44,58 +45,16 @@ s.move(library / "google/cloud/bigtable_admin_v2") s.move(library / "tests") - -# ---------------------------------------------------------------------------- -# Work around non-standard installations (missing setuptools). -# -# These replacements can be removed after migrating to the microgenerator, -# which will generate them directly. -# ---------------------------------------------------------------------------- - -admin_clients = [ - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", -] - -s.replace( - admin_clients, - """\ -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution\( - 'google-cloud-bigtable-admin', -\).version -""", - """\ -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None -""" -) - -s.replace( - "google/cloud/bigtable_v2/gapic/bigtable_client.py", - """\ -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution\( - 'google-cloud-bigtable', -\).version -""", - """\ -try: - _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-bigtable" - ).version -except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GAPIC_LIBRARY_VERSION = None -""" -) +s.move(library / "scripts") # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=99, samples=True) -s.move(templated_files, excludes=['noxfile.py']) +templated_files = common.py_library( + samples=True, # set to True only if there are samples + microgenerator=True, +) +s.move(templated_files, excludes=[".coveragerc"]) # ---------------------------------------------------------------------------- # Samples templates diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index daf644ea28a4..84f9977e1d95 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -18,6 +18,7 @@ import time import unittest +from google.api_core.datetime_helpers import DatetimeWithNanoseconds from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import TooManyRequests from google.cloud.environment_vars import BIGTABLE_EMULATOR @@ -41,9 +42,10 @@ from google.cloud.bigtable.row_data import PartialRowData from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client_config as table_admin_config, -) + +# from google.cloud.bigtable_admin_v2.gapic import ( +# bigtable_table_admin_client_config as table_admin_config, +# ) UNIQUE_SUFFIX = unique_resource_id("-") LOCATION_ID = "us-central1-c" @@ -104,11 +106,11 @@ def setUpModule(): from google.cloud.bigtable.enums import Instance # See: https://github.com/googleapis/google-cloud-python/issues/5928 - interfaces = table_admin_config.config["interfaces"] - iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] - methods = iface_config["methods"] - create_table = methods["CreateTable"] - create_table["timeout_millis"] = 90000 + # interfaces = table_admin_config.config["interfaces"] + # iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] + # methods = iface_config["methods"] + # create_table = methods["CreateTable"] + # create_table["timeout_millis"] = 90000 Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None @@ -838,6 +840,8 @@ def test_delete_column_family(self): self.assertEqual(temp_table.list_column_families(), {}) def test_backup(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + temp_table_id = "test-backup-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) temp_table.create() @@ -876,11 +880,16 @@ def test_backup(self): # Testing `Backup.update_expire_time()` method expire += 3600 # A one-hour change in the `expire_time` parameter - temp_backup.update_expire_time(datetime.datetime.utcfromtimestamp(expire)) + updated_time = datetime.datetime.utcfromtimestamp(expire) + temp_backup.update_expire_time(updated_time) + test = _datetime_to_pb_timestamp(updated_time) # Testing `Backup.get()` method temp_table_backup = temp_backup.get() - self.assertEqual(expire, temp_table_backup.expire_time.seconds) + self.assertEqual( + test.seconds, + DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), + ) # Testing `Table.restore()` and `Backup.retore()` methods restored_table_id = "test-backup-table-restored" diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -0,0 +1 @@ + diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py new file mode 100644 index 000000000000..5c6752cac3dd --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -0,0 +1,5316 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminAsyncClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import options_pb2 as options # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.type import expr_pb2 as expr # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(None) is None + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test_bigtable_instance_admin_client_from_service_account_info(): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = BigtableInstanceAdminClient.from_service_account_info(info) + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] +) +def test_bigtable_instance_admin_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_instance_admin_client_get_transport_class(): + transport = BigtableInstanceAdminClient.get_transport_class() + available_transports = [ + transports.BigtableInstanceAdminGrpcTransport, + ] + assert transport in available_transports + + transport = BigtableInstanceAdminClient.get_transport_class("grpc") + assert transport == transports.BigtableInstanceAdminGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + BigtableInstanceAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminAsyncClient), +) +def test_bigtable_instance_admin_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + "true", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + "false", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + BigtableInstanceAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_instance_admin_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_instance_admin_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_instance_admin_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_bigtable_instance_admin_client_client_options_from_dict(): + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = BigtableInstanceAdminClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_instance( + transport: str = "grpc", request_type=bigtable_instance_admin.CreateInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_instance_from_dict(): + test_create_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_instance_async_from_dict(): + await test_create_instance_async(request_type=dict) + + +def test_create_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_instance( + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].instance_id == "instance_id_value" + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].clusters == { + "key_value": gba_instance.Cluster(name="name_value") + } + + +def test_create_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + +@pytest.mark.asyncio +async def test_create_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_instance( + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].instance_id == "instance_id_value" + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].clusters == { + "key_value": gba_instance.Cluster(name="name_value") + } + + +@pytest.mark.asyncio +async def test_create_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + +def test_get_instance( + transport: str = "grpc", request_type=bigtable_instance_admin.GetInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + + response = client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +def test_get_instance_from_dict(): + test_get_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + ) + + response = await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +@pytest.mark.asyncio +async def test_get_instance_async_from_dict(): + await test_get_instance_async(request_type=dict) + + +def test_get_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = instance.Instance() + + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + + await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), name="name_value", + ) + + +def test_list_instances( + transport: str = "grpc", request_type=bigtable_instance_admin.ListInstancesRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + response = client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + + assert response.raw_page is response + + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +def test_list_instances_from_dict(): + test_list_instances(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_instances_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListInstancesRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_instances_async_from_dict(): + await test_list_instances_async(request_type=dict) + + +def test_list_instances_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListInstancesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = bigtable_instance_admin.ListInstancesResponse() + + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_instances_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListInstancesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse() + ) + + await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_instances_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_instances(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_instances_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_instances_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_instances(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_instances_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), parent="parent_value", + ) + + +def test_update_instance(transport: str = "grpc", request_type=instance.Instance): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + + response = client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Instance() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +def test_update_instance_from_dict(): + test_update_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_instance_async( + transport: str = "grpc_asyncio", request_type=instance.Instance +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + ) + ) + + response = await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Instance() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + + assert response.name == "name_value" + + assert response.display_name == "display_name_value" + + assert response.state == instance.Instance.State.READY + + assert response.type_ == instance.Instance.Type.PRODUCTION + + +@pytest.mark.asyncio +async def test_update_instance_async_from_dict(): + await test_update_instance_async(request_type=dict) + + +def test_update_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Instance() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = instance.Instance() + + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Instance() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + + await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_partial_update_instance( + transport: str = "grpc", + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_partial_update_instance_from_dict(): + test_partial_update_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_partial_update_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_partial_update_instance_async_from_dict(): + await test_partial_update_instance_async(request_type=dict) + + +def test_partial_update_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + request.instance.name = "instance.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_partial_update_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + request.instance.name = "instance.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ + "metadata" + ] + + +def test_partial_update_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.partial_update_instance( + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_partial_update_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_partial_update_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.partial_update_instance( + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].instance == gba_instance.Instance(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_partial_update_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_instance( + transport: str = "grpc", request_type=bigtable_instance_admin.DeleteInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_instance_from_dict(): + test_delete_instance(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_instance_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteInstanceRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_instance_async_from_dict(): + await test_delete_instance_async(request_type=dict) + + +def test_delete_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = None + + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteInstanceRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_instance(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), name="name_value", + ) + + +def test_create_cluster( + transport: str = "grpc", request_type=bigtable_instance_admin.CreateClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateClusterRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].cluster_id == "cluster_id_value" + + assert args[0].cluster == instance.Cluster(name="name_value") + + +def test_create_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].cluster_id == "cluster_id_value" + + assert args[0].cluster == instance.Cluster(name="name_value") + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + +def test_get_cluster( + transport: str = "grpc", request_type=bigtable_instance_admin.GetClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + ) + + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.Cluster) + + assert response.name == "name_value" + + assert response.location == "location_value" + + assert response.state == instance.Cluster.State.READY + + assert response.serve_nodes == 1181 + + assert response.default_storage_type == common.StorageType.SSD + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + ) + ) + + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + + assert response.name == "name_value" + + assert response.location == "location_value" + + assert response.state == instance.Cluster.State.READY + + assert response.serve_nodes == 1181 + + assert response.default_storage_type == common.StorageType.SSD + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = instance.Cluster() + + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) + + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), name="name_value", + ) + + +def test_list_clusters( + transport: str = "grpc", request_type=bigtable_instance_admin.ListClustersRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + # Establish that the response is the type that we expect. + + assert response.raw_page is response + + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_clusters_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListClustersRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + + assert response.failed_locations == ["failed_locations_value"] + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = bigtable_instance_admin.ListClustersResponse() + + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListClustersRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse() + ) + + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_clusters_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_clusters_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), parent="parent_value", + ) + + +def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Cluster() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_cluster_async( + transport: str = "grpc_asyncio", request_type=instance.Cluster +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Cluster() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Cluster() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Cluster() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster( + transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = None + + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteClusterRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), name="name_value", + ) + + +def test_create_app_profile( + transport: str = "grpc", + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + multi_cluster_routing_use_any=None, + ) + + response = client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +def test_create_app_profile_from_dict(): + test_create_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", etag="etag_value", description="description_value", + ) + ) + + response = await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_create_app_profile_async_from_dict(): + await test_create_app_profile_async(request_type=dict) + + +def test_create_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateAppProfileRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value = instance.AppProfile() + + client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateAppProfileRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + + await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_app_profile( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + +def test_create_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_app_profile( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + +@pytest.mark.asyncio +async def test_create_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + +def test_get_app_profile( + transport: str = "grpc", request_type=bigtable_instance_admin.GetAppProfileRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + multi_cluster_routing_use_any=None, + ) + + response = client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +def test_get_app_profile_from_dict(): + test_get_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", etag="etag_value", description="description_value", + ) + ) + + response = await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + + assert response.name == "name_value" + + assert response.etag == "etag_value" + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_app_profile_async_from_dict(): + await test_get_app_profile_async(request_type=dict) + + +def test_get_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value = instance.AppProfile() + + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + + await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), name="name_value", + ) + + +def test_list_app_profiles( + transport: str = "grpc", request_type=bigtable_instance_admin.ListAppProfilesRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + + response = client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListAppProfilesPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.failed_locations == ["failed_locations_value"] + + +def test_list_app_profiles_from_dict(): + test_list_app_profiles(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_app_profiles_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListAppProfilesRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + ) + + response = await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAppProfilesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + assert response.failed_locations == ["failed_locations_value"] + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_from_dict(): + await test_list_app_profiles_async(request_type=dict) + + +def test_list_app_profiles_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListAppProfilesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_app_profiles_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListAppProfilesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse() + ) + + await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_app_profiles_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_app_profiles(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_app_profiles_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_app_profiles_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_app_profiles(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_app_profiles_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value", + ) + + +def test_list_app_profiles_pager(): + client = BigtableInstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_app_profiles(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, instance.AppProfile) for i in results) + + +def test_list_app_profiles_pages(): + client = BigtableInstanceAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + pages = list(client.list_app_profiles(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + async_pager = await client.list_app_profiles(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, instance.AppProfile) for i in responses) + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[instance.AppProfile(), instance.AppProfile(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_app_profiles(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_app_profile( + transport: str = "grpc", + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_app_profile_from_dict(): + test_update_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_app_profile_async_from_dict(): + await test_update_app_profile_async(request_type=dict) + + +def test_update_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateAppProfileRequest() + request.app_profile.name = "app_profile.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateAppProfileRequest() + request.app_profile.name = "app_profile.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[ + "metadata" + ] + + +def test_update_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_app_profile( + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_app_profile( + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].app_profile == instance.AppProfile(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_app_profile( + transport: str = "grpc", + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_app_profile_from_dict(): + test_delete_app_profile(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_app_profile_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_app_profile_async_from_dict(): + await test_delete_app_profile_async(request_type=dict) + + +def test_delete_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value = None + + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteAppProfileRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_app_profile(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ) + + +def test_get_iam_policy( + transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_from_dict(): + test_get_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_get_iam_policy_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +def test_set_iam_policy( + transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_from_dict(): + test_set_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy.Policy(version=774), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_set_iam_policy_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +def test_test_iam_permissions( + transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_from_dict(): + test_test_iam_permissions(request_type=dict) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +def test_test_iam_permissions_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = BigtableInstanceAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance(client.transport, transports.BigtableInstanceAdminGrpcTransport,) + + +def test_bigtable_instance_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.BigtableInstanceAdminTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_instance_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableInstanceAdminTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_instance", + "get_instance", + "list_instances", + "update_instance", + "partial_update_instance", + "delete_instance", + "create_cluster", + "get_cluster", + "list_clusters", + "update_cluster", + "delete_cluster", + "create_app_profile", + "get_app_profile", + "list_app_profiles", + "update_app_profile", + "delete_app_profile", + "get_iam_policy", + "set_iam_policy", + "test_iam_permissions", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_bigtable_instance_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableInstanceAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +def test_bigtable_instance_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableInstanceAdminTransport() + adc.assert_called_once() + + +def test_bigtable_instance_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + BigtableInstanceAdminClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +def test_bigtable_instance_admin_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.BigtableInstanceAdminGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_bigtable_instance_admin_host_no_port(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_instance_admin_host_with_port(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com:8000" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:8000" + + +def test_bigtable_instance_admin_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableInstanceAdminGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_instance_admin_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_bigtable_instance_admin_grpc_lro_client(): + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_bigtable_instance_admin_grpc_lro_async_client(): + client = BigtableInstanceAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_app_profile_path(): + project = "squid" + instance = "clam" + app_profile = "whelk" + + expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( + project=project, instance=instance, app_profile=app_profile, + ) + actual = BigtableInstanceAdminClient.app_profile_path( + project, instance, app_profile + ) + assert expected == actual + + +def test_parse_app_profile_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "app_profile": "nudibranch", + } + path = BigtableInstanceAdminClient.app_profile_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_app_profile_path(path) + assert expected == actual + + +def test_cluster_path(): + project = "cuttlefish" + instance = "mussel" + cluster = "winkle" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "nautilus", + "instance": "scallop", + "cluster": "abalone", + } + path = BigtableInstanceAdminClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_cluster_path(path) + assert expected == actual + + +def test_instance_path(): + project = "squid" + instance = "clam" + + expected = "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + actual = BigtableInstanceAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "whelk", + "instance": "octopus", + } + path = BigtableInstanceAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_instance_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "oyster" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = BigtableInstanceAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = BigtableInstanceAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "cuttlefish" + + expected = "folders/{folder}".format(folder=folder,) + actual = BigtableInstanceAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = BigtableInstanceAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "winkle" + + expected = "organizations/{organization}".format(organization=organization,) + actual = BigtableInstanceAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = BigtableInstanceAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "scallop" + + expected = "projects/{project}".format(project=project,) + actual = BigtableInstanceAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = BigtableInstanceAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "squid" + location = "clam" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = BigtableInstanceAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = BigtableInstanceAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" + ) as prep: + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = BigtableInstanceAdminClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py new file mode 100644 index 000000000000..92bdb8718436 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -0,0 +1,6067 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminAsyncClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore +from google.iam.v1 import options_pb2 as options # type: ignore +from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.type import expr_pb2 as expr # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableTableAdminClient._get_default_mtls_endpoint(None) is None + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test_bigtable_table_admin_client_from_service_account_info(): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = BigtableTableAdminClient.from_service_account_info(info) + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] +) +def test_bigtable_table_admin_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_table_admin_client_get_transport_class(): + transport = BigtableTableAdminClient.get_transport_class() + available_transports = [ + transports.BigtableTableAdminGrpcTransport, + ] + assert transport in available_transports + + transport = BigtableTableAdminClient.get_transport_class("grpc") + assert transport == transports.BigtableTableAdminGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + BigtableTableAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminAsyncClient), +) +def test_bigtable_table_admin_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + BigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + "true", + ), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + BigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + "false", + ), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + BigtableTableAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_table_admin_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_table_admin_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_table_admin_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_bigtable_table_admin_client_client_options_from_dict(): + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = BigtableTableAdminClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_create_table( + transport: str = "grpc", request_type=bigtable_table_admin.CreateTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table( + name="name_value", granularity=gba_table.Table.TimestampGranularity.MILLIS, + ) + + response = client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, gba_table.Table) + + assert response.name == "name_value" + + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + + +def test_create_table_from_dict(): + test_create_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + ) + ) + + response = await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + + assert response.name == "name_value" + + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + + +@pytest.mark.asyncio +async def test_create_table_async_from_dict(): + await test_create_table_async(request_type=dict) + + +def test_create_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value = gba_table.Table() + + client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) + + await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_table( + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].table == gba_table.Table(name="name_value") + + +def test_create_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_table( + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].table == gba_table.Table(name="name_value") + + +@pytest.mark.asyncio +async def test_create_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + +def test_create_table_from_snapshot( + transport: str = "grpc", + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_table_from_snapshot_from_dict(): + test_create_table_from_snapshot(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async_from_dict(): + await test_create_table_from_snapshot_async(request_type=dict) + + +def test_create_table_from_snapshot_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_table_from_snapshot_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_table_from_snapshot( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].source_snapshot == "source_snapshot_value" + + +def test_create_table_from_snapshot_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_table_from_snapshot( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].table_id == "table_id_value" + + assert args[0].source_snapshot == "source_snapshot_value" + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +def test_list_tables( + transport: str = "grpc", request_type=bigtable_table_admin.ListTablesRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListTablesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListTablesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_tables_from_dict(): + test_list_tables(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_tables_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListTablesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tables_async_from_dict(): + await test_list_tables_async(request_type=dict) + + +def test_list_tables_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListTablesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value = bigtable_table_admin.ListTablesResponse() + + client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_tables_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListTablesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse() + ) + + await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_tables_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tables(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_tables_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tables( + bigtable_table_admin.ListTablesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_tables_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tables(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_tables_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tables( + bigtable_table_admin.ListTablesRequest(), parent="parent_value", + ) + + +def test_list_tables_pager(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_tables(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, table.Table) for i in results) + + +def test_list_tables_pages(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + pages = list(client.list_tables(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tables_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + async_pager = await client.list_tables(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Table) for i in responses) + + +@pytest.mark.asyncio +async def test_list_tables_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(), table.Table(),], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(),], next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[table.Table(), table.Table(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_tables(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_table( + transport: str = "grpc", request_type=bigtable_table_admin.GetTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + + response = client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetTableRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +def test_get_table_from_dict(): + test_get_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_table_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + ) + + response = await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +@pytest.mark.asyncio +async def test_get_table_async_from_dict(): + await test_get_table_async(request_type=dict) + + +def test_get_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value = table.Table() + + client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + + await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table( + bigtable_table_admin.GetTableRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_table( + bigtable_table_admin.GetTableRequest(), name="name_value", + ) + + +def test_delete_table( + transport: str = "grpc", request_type=bigtable_table_admin.DeleteTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_table_from_dict(): + test_delete_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_table_async_from_dict(): + await test_delete_table_async(request_type=dict) + + +def test_delete_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value = None + + client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_table(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_table( + bigtable_table_admin.DeleteTableRequest(), name="name_value", + ) + + +def test_modify_column_families( + transport: str = "grpc", + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + + response = client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +def test_modify_column_families_from_dict(): + test_modify_column_families(request_type=dict) + + +@pytest.mark.asyncio +async def test_modify_column_families_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + ) + ) + + response = await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + + assert response.name == "name_value" + + assert response.granularity == table.Table.TimestampGranularity.MILLIS + + +@pytest.mark.asyncio +async def test_modify_column_families_async_from_dict(): + await test_modify_column_families_async(request_type=dict) + + +def test_modify_column_families_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = table.Table() + + client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_modify_column_families_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + + await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_modify_column_families_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].modifications == [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] + + +def test_modify_column_families_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + +@pytest.mark.asyncio +async def test_modify_column_families_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].modifications == [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] + + +@pytest.mark.asyncio +async def test_modify_column_families_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + +def test_drop_row_range( + transport: str = "grpc", request_type=bigtable_table_admin.DropRowRangeRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_drop_row_range_from_dict(): + test_drop_row_range(request_type=dict) + + +@pytest.mark.asyncio +async def test_drop_row_range_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DropRowRangeRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_drop_row_range_async_from_dict(): + await test_drop_row_range_async(request_type=dict) + + +def test_drop_row_range_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DropRowRangeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value = None + + client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_drop_row_range_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DropRowRangeRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_generate_consistency_token( + transport: str = "grpc", + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + + response = client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + + assert response.consistency_token == "consistency_token_value" + + +def test_generate_consistency_token_from_dict(): + test_generate_consistency_token(request_type=dict) + + +@pytest.mark.asyncio +async def test_generate_consistency_token_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + ) + + response = await client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + + assert response.consistency_token == "consistency_token_value" + + +@pytest.mark.asyncio +async def test_generate_consistency_token_async_from_dict(): + await test_generate_consistency_token_async(request_type=dict) + + +def test_generate_consistency_token_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_generate_consistency_token_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) + + await client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_generate_consistency_token_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_consistency_token(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_generate_consistency_token_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_generate_consistency_token_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_consistency_token(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_generate_consistency_token_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), name="name_value", + ) + + +def test_check_consistency( + transport: str = "grpc", request_type=bigtable_table_admin.CheckConsistencyRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + + response = client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + + assert response.consistent is True + + +def test_check_consistency_from_dict(): + test_check_consistency(request_type=dict) + + +@pytest.mark.asyncio +async def test_check_consistency_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CheckConsistencyRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse(consistent=True,) + ) + + response = await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + + assert response.consistent is True + + +@pytest.mark.asyncio +async def test_check_consistency_async_from_dict(): + await test_check_consistency_async(request_type=dict) + + +def test_check_consistency_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CheckConsistencyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + + client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_check_consistency_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CheckConsistencyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) + + await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_check_consistency_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.check_consistency( + name="name_value", consistency_token="consistency_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].consistency_token == "consistency_token_value" + + +def test_check_consistency_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +@pytest.mark.asyncio +async def test_check_consistency_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.check_consistency( + name="name_value", consistency_token="consistency_token_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].consistency_token == "consistency_token_value" + + +@pytest.mark.asyncio +async def test_check_consistency_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +def test_snapshot_table( + transport: str = "grpc", request_type=bigtable_table_admin.SnapshotTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_snapshot_table_from_dict(): + test_snapshot_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_snapshot_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.SnapshotTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_snapshot_table_async_from_dict(): + await test_snapshot_table_async(request_type=dict) + + +def test_snapshot_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.SnapshotTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_snapshot_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.SnapshotTableRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_snapshot_table_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].cluster == "cluster_value" + + assert args[0].snapshot_id == "snapshot_id_value" + + assert args[0].description == "description_value" + + +def test_snapshot_table_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +@pytest.mark.asyncio +async def test_snapshot_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].cluster == "cluster_value" + + assert args[0].snapshot_id == "snapshot_id_value" + + assert args[0].description == "description_value" + + +@pytest.mark.asyncio +async def test_snapshot_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +def test_get_snapshot( + transport: str = "grpc", request_type=bigtable_table_admin.GetSnapshotRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + + response = client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Snapshot) + + assert response.name == "name_value" + + assert response.data_size_bytes == 1594 + + assert response.state == table.Snapshot.State.READY + + assert response.description == "description_value" + + +def test_get_snapshot_from_dict(): + test_get_snapshot(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetSnapshotRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + + response = await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + + assert response.name == "name_value" + + assert response.data_size_bytes == 1594 + + assert response.state == table.Snapshot.State.READY + + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_snapshot_async_from_dict(): + await test_get_snapshot_async(request_type=dict) + + +def test_get_snapshot_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = table.Snapshot() + + client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + + await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_snapshot_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_snapshot_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", + ) + + +def test_list_snapshots( + transport: str = "grpc", request_type=bigtable_table_admin.ListSnapshotsRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListSnapshotsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_snapshots_from_dict(): + test_list_snapshots(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_snapshots_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_snapshots_async_from_dict(): + await test_list_snapshots_async(request_type=dict) + + +def test_list_snapshots_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSnapshotsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + + client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_snapshots_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSnapshotsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse() + ) + + await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_snapshots_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_snapshots(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_snapshots_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_snapshots_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_snapshots(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_snapshots_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", + ) + + +def test_list_snapshots_pager(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_snapshots(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) for i in results) + + +def test_list_snapshots_pages(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + pages = list(client.list_snapshots(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_snapshots_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + async_pager = await client.list_snapshots(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Snapshot) for i in responses) + + +@pytest.mark.asyncio +async def test_list_snapshots_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(),], next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[table.Snapshot(), table.Snapshot(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_snapshots(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_snapshot( + transport: str = "grpc", request_type=bigtable_table_admin.DeleteSnapshotRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_snapshot_from_dict(): + test_delete_snapshot(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteSnapshotRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_snapshot_async_from_dict(): + await test_delete_snapshot_async(request_type=dict) + + +def test_delete_snapshot_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = None + + client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSnapshotRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_snapshot_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_snapshot_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_snapshot(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), name="name_value", + ) + + +def test_create_backup( + transport: str = "grpc", request_type=bigtable_table_admin.CreateBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_backup_from_dict(): + test_create_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateBackupRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_backup_async_from_dict(): + await test_create_backup_async(request_type=dict) + + +def test_create_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateBackupRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateBackupRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_backup( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].backup_id == "backup_id_value" + + assert args[0].backup == table.Backup(name="name_value") + + +def test_create_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_backup( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].backup_id == "backup_id_value" + + assert args[0].backup == table.Backup(name="name_value") + + +@pytest.mark.asyncio +async def test_create_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + +def test_get_backup( + transport: str = "grpc", request_type=bigtable_table_admin.GetBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + response = client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetBackupRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +def test_get_backup_from_dict(): + test_get_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + ) + + response = await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +@pytest.mark.asyncio +async def test_get_backup_async_from_dict(): + await test_get_backup_async(request_type=dict) + + +def test_get_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = table.Backup() + + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + + await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + bigtable_table_admin.GetBackupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_backup( + bigtable_table_admin.GetBackupRequest(), name="name_value", + ) + + +def test_update_backup( + transport: str = "grpc", request_type=bigtable_table_admin.UpdateBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + response = client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +def test_update_backup_from_dict(): + test_update_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateBackupRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + ) + + response = await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + + assert response.name == "name_value" + + assert response.source_table == "source_table_value" + + assert response.size_bytes == 1089 + + assert response.state == table.Backup.State.CREATING + + +@pytest.mark.asyncio +async def test_update_backup_async_from_dict(): + await test_update_backup_async(request_type=dict) + + +def test_update_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateBackupRequest() + request.backup.name = "backup.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = table.Backup() + + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateBackupRequest() + request.backup.name = "backup.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + + await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + + +def test_update_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].backup == table.Backup(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].backup == table.Backup(name="name_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_backup( + transport: str = "grpc", request_type=bigtable_table_admin.DeleteBackupRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_backup_from_dict(): + test_delete_backup(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteBackupRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_backup_async_from_dict(): + await test_delete_backup_async(request_type=dict) + + +def test_delete_backup_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_backup_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_backup_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_backup(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), name="name_value", + ) + + +def test_list_backups( + transport: str = "grpc", request_type=bigtable_table_admin.ListBackupsRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, pagers.ListBackupsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_backups_from_dict(): + test_list_backups(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_backups_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListBackupsRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_backups_async_from_dict(): + await test_list_backups_async(request_type=dict) + + +def test_list_backups_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_backups_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() + ) + + await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_backups_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_backups(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_backups_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_backups_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_backups(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_backups_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_backups( + bigtable_table_admin.ListBackupsRequest(), parent="parent_value", + ) + + +def test_list_backups_pager(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_backups(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) + + +def test_list_backups_pages(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + pages = list(client.list_backups(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_backups_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + async_pager = await client.list_backups(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Backup) for i in responses) + + +@pytest.mark.asyncio +async def test_list_backups_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(), table.Backup(),], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(),], next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[table.Backup(), table.Backup(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_backups(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_restore_table( + transport: str = "grpc", request_type=bigtable_table_admin.RestoreTableRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_restore_table_from_dict(): + test_restore_table(request_type=dict) + + +@pytest.mark.asyncio +async def test_restore_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.RestoreTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_restore_table_async_from_dict(): + await test_restore_table_async(request_type=dict) + + +def test_restore_table_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_restore_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_get_iam_policy( + transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_from_dict(): + test_get_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_get_iam_policy_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy.GetIamPolicyRequest(), resource="resource_value", + ) + + +def test_set_iam_policy( + transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy(version=774, etag=b"etag_blob",) + + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_from_dict(): + test_set_iam_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy.Policy(version=774, etag=b"etag_blob",) + ) + + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy.Policy(version=774), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +def test_set_iam_policy_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy(resource="resource_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy.SetIamPolicyRequest(), resource="resource_value", + ) + + +def test_test_iam_permissions( + transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_from_dict(): + test_test_iam_permissions(request_type=dict) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest +): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +def test_test_iam_permissions_flattened_error(): + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].resource == "resource_value" + + assert args[0].permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = BigtableTableAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.BigtableTableAdminGrpcTransport,) + + +def test_bigtable_table_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTableAdminTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_table_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableTableAdminTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_table", + "create_table_from_snapshot", + "list_tables", + "get_table", + "delete_table", + "modify_column_families", + "drop_row_range", + "generate_consistency_token", + "check_consistency", + "snapshot_table", + "get_snapshot", + "list_snapshots", + "delete_snapshot", + "create_backup", + "get_backup", + "update_backup", + "delete_backup", + "list_backups", + "restore_table", + "get_iam_policy", + "set_iam_policy", + "test_iam_permissions", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_bigtable_table_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTableAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +def test_bigtable_table_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTableAdminTransport() + adc.assert_called_once() + + +def test_bigtable_table_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + BigtableTableAdminClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +def test_bigtable_table_admin_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.BigtableTableAdminGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_bigtable_table_admin_host_no_port(): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:443" + + +def test_bigtable_table_admin_host_with_port(): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtableadmin.googleapis.com:8000" + ), + ) + assert client.transport._host == "bigtableadmin.googleapis.com:8000" + + +def test_bigtable_table_admin_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableTableAdminGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_table_admin_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_bigtable_table_admin_grpc_lro_client(): + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_bigtable_table_admin_grpc_lro_async_client(): + client = BigtableTableAdminAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_backup_path(): + project = "squid" + instance = "clam" + cluster = "whelk" + backup = "octopus" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( + project=project, instance=instance, cluster=cluster, backup=backup, + ) + actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup) + assert expected == actual + + +def test_parse_backup_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "cluster": "cuttlefish", + "backup": "mussel", + } + path = BigtableTableAdminClient.backup_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_backup_path(path) + assert expected == actual + + +def test_cluster_path(): + project = "winkle" + instance = "nautilus" + cluster = "scallop" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( + project=project, instance=instance, cluster=cluster, + ) + actual = BigtableTableAdminClient.cluster_path(project, instance, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "abalone", + "instance": "squid", + "cluster": "clam", + } + path = BigtableTableAdminClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_cluster_path(path) + assert expected == actual + + +def test_instance_path(): + project = "whelk" + instance = "octopus" + + expected = "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + actual = BigtableTableAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + } + path = BigtableTableAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_instance_path(path) + assert expected == actual + + +def test_snapshot_path(): + project = "cuttlefish" + instance = "mussel" + cluster = "winkle" + snapshot = "nautilus" + + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( + project=project, instance=instance, cluster=cluster, snapshot=snapshot, + ) + actual = BigtableTableAdminClient.snapshot_path( + project, instance, cluster, snapshot + ) + assert expected == actual + + +def test_parse_snapshot_path(): + expected = { + "project": "scallop", + "instance": "abalone", + "cluster": "squid", + "snapshot": "clam", + } + path = BigtableTableAdminClient.snapshot_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_snapshot_path(path) + assert expected == actual + + +def test_table_path(): + project = "whelk" + instance = "octopus" + table = "oyster" + + expected = "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + actual = BigtableTableAdminClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "nudibranch", + "instance": "cuttlefish", + "table": "mussel", + } + path = BigtableTableAdminClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_table_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = BigtableTableAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = BigtableTableAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + + expected = "folders/{folder}".format(folder=folder,) + actual = BigtableTableAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = BigtableTableAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + + expected = "organizations/{organization}".format(organization=organization,) + actual = BigtableTableAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = BigtableTableAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + + expected = "projects/{project}".format(project=project,) + actual = BigtableTableAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = BigtableTableAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = BigtableTableAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = BigtableTableAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.BigtableTableAdminTransport, "_prep_wrapped_messages" + ) as prep: + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.BigtableTableAdminTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = BigtableTableAdminClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py new file mode 100644 index 000000000000..8b137891791f --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py @@ -0,0 +1 @@ + diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py new file mode 100644 index 000000000000..0a42c2dade65 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -0,0 +1,2372 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient +from google.cloud.bigtable_v2.services.bigtable import BigtableClient +from google.cloud.bigtable_v2.services.bigtable import transports +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data +from google.oauth2 import service_account + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableClient._get_default_mtls_endpoint(None) is None + assert BigtableClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ( + BigtableClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + BigtableClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + BigtableClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +def test_bigtable_client_from_service_account_info(): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = BigtableClient.from_service_account_info(info) + assert client.transport._credentials == creds + + assert client.transport._host == "bigtable.googleapis.com:443" + + +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) +def test_bigtable_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + + assert client.transport._host == "bigtable.googleapis.com:443" + + +def test_bigtable_client_get_transport_class(): + transport = BigtableClient.get_transport_class() + available_transports = [ + transports.BigtableGrpcTransport, + ] + assert transport in available_transports + + transport = BigtableClient.get_transport_class("grpc") + assert transport == transports.BigtableGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +@mock.patch.object( + BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) +) +@mock.patch.object( + BigtableAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableAsyncClient), +) +def test_bigtable_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc", "true"), + ( + BigtableAsyncClient, + transports.BigtableGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (BigtableClient, transports.BigtableGrpcTransport, "grpc", "false"), + ( + BigtableAsyncClient, + transports.BigtableGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) +) +@mock.patch.object( + BigtableAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_bigtable_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_bigtable_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_bigtable_client_client_options_from_dict(): + with mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = BigtableClient(client_options={"api_endpoint": "squid.clam.whelk"}) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsRequest): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + + response = client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadRowsRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.ReadRowsResponse) + + +def test_read_rows_from_dict(): + test_read_rows(request_type=dict) + + +@pytest.mark.asyncio +async def test_read_rows_async( + transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + + response = await client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadRowsRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.ReadRowsResponse) + + +@pytest.mark.asyncio +async def test_read_rows_async_from_dict(): + await test_read_rows_async(request_type=dict) + + +def test_read_rows_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_rows_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + + await client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_read_rows_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_rows( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_read_rows_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_rows( + bigtable.ReadRowsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_read_rows_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_rows( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_read_rows_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_rows( + bigtable.ReadRowsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_sample_row_keys( + transport: str = "grpc", request_type=bigtable.SampleRowKeysRequest +): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + response = client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.SampleRowKeysRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.SampleRowKeysResponse) + + +def test_sample_row_keys_from_dict(): + test_sample_row_keys(request_type=dict) + + +@pytest.mark.asyncio +async def test_sample_row_keys_async( + transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + + response = await client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.SampleRowKeysRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.SampleRowKeysResponse) + + +@pytest.mark.asyncio +async def test_sample_row_keys_async_from_dict(): + await test_sample_row_keys_async(request_type=dict) + + +def test_sample_row_keys_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.SampleRowKeysRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_sample_row_keys_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.SampleRowKeysRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + + await client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_sample_row_keys_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.sample_row_keys( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_sample_row_keys_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_sample_row_keys_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.sample_row_keys( + table_name="table_name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_sample_row_keys_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequest): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + + response = client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable.MutateRowResponse) + + +def test_mutate_row_from_dict(): + test_mutate_row(request_type=dict) + + +@pytest.mark.asyncio +async def test_mutate_row_async( + transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + + response = await client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +@pytest.mark.asyncio +async def test_mutate_row_async_from_dict(): + await test_mutate_row_async(request_type=dict) + + +def test_mutate_row_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_mutate_row_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + + await client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_mutate_row_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_mutate_row_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_row( + bigtable.MutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_mutate_row_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_mutate_row_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_row( + bigtable.MutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRequest): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + + response = client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowsRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.MutateRowsResponse) + + +def test_mutate_rows_from_dict(): + test_mutate_rows(request_type=dict) + + +@pytest.mark.asyncio +async def test_mutate_rows_async( + transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + + response = await client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowsRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.MutateRowsResponse) + + +@pytest.mark.asyncio +async def test_mutate_rows_async_from_dict(): + await test_mutate_rows_async(request_type=dict) + + +def test_mutate_rows_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_mutate_rows_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowsRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + + await client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_mutate_rows_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_rows( + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].entries == [ + bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_mutate_rows_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_mutate_rows_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_rows( + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].entries == [ + bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_mutate_rows_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + +def test_check_and_mutate_row( + transport: str = "grpc", request_type=bigtable.CheckAndMutateRowRequest +): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse(predicate_matched=True,) + + response = client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.CheckAndMutateRowRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + + assert response.predicate_matched is True + + +def test_check_and_mutate_row_from_dict(): + test_check_and_mutate_row(request_type=dict) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_async( + transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse(predicate_matched=True,) + ) + + response = await client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.CheckAndMutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + + assert response.predicate_matched is True + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_async_from_dict(): + await test_check_and_mutate_row_async(request_type=dict) + + +def test_check_and_mutate_row_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.CheckAndMutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.CheckAndMutateRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse() + ) + + await client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_check_and_mutate_row_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.check_and_mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].predicate_filter == data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]) + ) + ] + ) + ) + + assert args[0].true_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].false_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_check_and_mutate_row_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.check_and_mutate_row( + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].predicate_filter == data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]) + ) + ] + ) + ) + + assert args[0].true_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].false_mutations == [ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_read_modify_write_row( + transport: str = "grpc", request_type=bigtable.ReadModifyWriteRowRequest +): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + + response = client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + # Establish that the response is the type that we expect. + + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +def test_read_modify_write_row_from_dict(): + test_read_modify_write_row(request_type=dict) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_async( + transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest +): + client = BigtableAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + + response = await client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_async_from_dict(): + await test_read_modify_write_row_async(request_type=dict) + + +def test_read_modify_write_row_field_headers(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadModifyWriteRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_modify_write_row_field_headers_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadModifyWriteRowRequest() + request.table_name = "table_name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + + await client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + + +def test_read_modify_write_row_flattened(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_modify_write_row( + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].rules == [ + data.ReadModifyWriteRule(family_name="family_name_value") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +def test_read_modify_write_row_flattened_error(): + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_flattened_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_modify_write_row( + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].table_name == "table_name_value" + + assert args[0].row_key == b"row_key_blob" + + assert args[0].rules == [ + data.ReadModifyWriteRule(family_name="family_name_value") + ] + + assert args[0].app_profile_id == "app_profile_id_value" + + +@pytest.mark.asyncio +async def test_read_modify_write_row_flattened_error_async(): + client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = BigtableClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client.transport, transports.BigtableGrpcTransport,) + + +def test_bigtable_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "read_rows", + "sample_row_keys", + "mutate_row", + "mutate_rows", + "check_and_mutate_row", + "read_modify_write_row", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_bigtable_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +def test_bigtable_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport() + adc.assert_called_once() + + +def test_bigtable_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + BigtableClient() + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +def test_bigtable_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.BigtableGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], +) +def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_bigtable_host_no_port(): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtable.googleapis.com" + ), + ) + assert client.transport._host == "bigtable.googleapis.com:443" + + +def test_bigtable_host_with_port(): + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="bigtable.googleapis.com:8000" + ), + ) + assert client.transport._host == "bigtable.googleapis.com:8000" + + +def test_bigtable_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableGrpcTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableGrpcAsyncIOTransport( + host="squid.clam.whelk", channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], +) +def test_bigtable_transport_channel_mtls_with_client_cert_source(transport_class): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], +) +def test_bigtable_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_table_path(): + project = "squid" + instance = "clam" + table = "whelk" + + expected = "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + actual = BigtableClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "table": "nudibranch", + } + path = BigtableClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_table_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = BigtableClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = BigtableClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "winkle" + + expected = "folders/{folder}".format(folder=folder,) + actual = BigtableClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = BigtableClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "scallop" + + expected = "organizations/{organization}".format(organization=organization,) + actual = BigtableClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = BigtableClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "squid" + + expected = "projects/{project}".format(project=project,) + actual = BigtableClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = BigtableClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "whelk" + location = "octopus" + + expected = "projects/{project}/locations/{location}".format( + project=project, location=location, + ) + actual = BigtableClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = BigtableClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.BigtableTransport, "_prep_wrapped_messages" + ) as prep: + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.BigtableTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = BigtableClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py deleted file mode 100644 index 84abfecef5a0..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_client_v2.py +++ /dev/null @@ -1,316 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import bigtable_v2 -from google.cloud.bigtable_v2.proto import bigtable_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableClient(object): - def test_read_rows(self): - # Setup Expected Response - last_scanned_row_key = b"-126" - expected_response = {"last_scanned_row_key": last_scanned_row_key} - expected_response = bigtable_pb2.ReadRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.read_rows(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadRowsRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.read_rows(table_name) - - def test_sample_row_keys(self): - # Setup Expected Response - row_key = b"122" - offset_bytes = 889884095 - expected_response = {"row_key": row_key, "offset_bytes": offset_bytes} - expected_response = bigtable_pb2.SampleRowKeysResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.sample_row_keys(table_name) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.SampleRowKeysRequest(table_name=table_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_sample_row_keys_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.sample_row_keys(table_name) - - def test_mutate_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - response = client.mutate_row(table_name, row_key, mutations) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowRequest( - table_name=table_name, row_key=row_key, mutations=mutations - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - mutations = [] - - with pytest.raises(CustomException): - client.mutate_row(table_name, row_key, mutations) - - def test_mutate_rows(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.MutateRowsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[iter([expected_response])]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - response = client.mutate_rows(table_name, entries) - resources = list(response) - assert len(resources) == 1 - assert expected_response == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.MutateRowsRequest( - table_name=table_name, entries=entries - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_mutate_rows_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - entries = [] - - with pytest.raises(CustomException): - client.mutate_rows(table_name, entries) - - def test_check_and_mutate_row(self): - # Setup Expected Response - predicate_matched = True - expected_response = {"predicate_matched": predicate_matched} - expected_response = bigtable_pb2.CheckAndMutateRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - response = client.check_and_mutate_row(table_name, row_key) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.CheckAndMutateRowRequest( - table_name=table_name, row_key=row_key - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_and_mutate_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - - with pytest.raises(CustomException): - client.check_and_mutate_row(table_name, row_key) - - def test_read_modify_write_row(self): - # Setup Expected Response - expected_response = {} - expected_response = bigtable_pb2.ReadModifyWriteRowResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup Request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - response = client.read_modify_write_row(table_name, row_key, rules) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_pb2.ReadModifyWriteRowRequest( - table_name=table_name, row_key=row_key, rules=rules - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_read_modify_write_row_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_v2.BigtableClient() - - # Setup request - table_name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - row_key = b"122" - rules = [] - - with pytest.raises(CustomException): - client.read_modify_write_row(table_name, row_key, rules) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py deleted file mode 100644 index df083406b4e2..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py +++ /dev/null @@ -1,924 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_instance_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import instance_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableInstanceAdminClient(object): - def test_create_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateInstanceRequest( - parent=parent, instance_id=instance_id, instance=instance, clusters=clusters - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - instance_id = "instanceId-2101995259" - instance = {} - clusters = {} - - response = client.create_instance(parent, instance_id, instance, clusters) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_instance(self): - # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - expected_response = {"name": name_2, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.get_instance(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.get_instance(name) - - def test_list_instances(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListInstancesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.project_path("[PROJECT]") - - response = client.list_instances(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListInstancesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_instances_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.project_path("[PROJECT]") - - with pytest.raises(CustomException): - client.list_instances(parent) - - def test_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name_2 = "displayName21615000987" - expected_response = {"name": name, "display_name": display_name_2} - expected_response = instance_pb2.Instance(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - display_name = "displayName1615086568" - - response = client.update_instance(display_name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Instance(display_name=display_name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - display_name = "displayName1615086568" - - with pytest.raises(CustomException): - client.update_instance(display_name) - - def test_partial_update_instance(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - expected_response = {"name": name, "display_name": display_name} - expected_response = instance_pb2.Instance(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.PartialUpdateInstanceRequest( - instance=instance, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_partial_update_instance_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_partial_update_instance_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - instance = {} - update_mask = {} - - response = client.partial_update_instance(instance, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_instance(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - client.delete_instance(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteInstanceRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_instance_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.delete_instance(name) - - def test_create_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateClusterRequest( - parent=parent, cluster_id=cluster_id, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - cluster_id = "clusterId240280960" - cluster = {} - - response = client.create_cluster(parent, cluster_id, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_cluster(self): - # Setup Expected Response - name_2 = "name2-1052831874" - location = "location1901043637" - serve_nodes = 1288838783 - expected_response = { - "name": name_2, - "location": location, - "serve_nodes": serve_nodes, - } - expected_response = instance_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - response = client.get_cluster(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.get_cluster(name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "nextPageToken-1530815211" - expected_response = {"next_page_token": next_page_token} - expected_response = bigtable_instance_admin_pb2.ListClustersResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - response = client.list_clusters(parent) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListClustersRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - with pytest.raises(CustomException): - client.list_clusters(parent) - - def test_update_cluster(self): - # Setup Expected Response - name = "name3373707" - location = "location1901043637" - serve_nodes_2 = 1623486220 - expected_response = { - "name": name, - "location": location, - "serve_nodes": serve_nodes_2, - } - expected_response = instance_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = instance_pb2.Cluster(serve_nodes=serve_nodes) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - serve_nodes = 1288838783 - - response = client.update_cluster(serve_nodes) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_cluster(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - client.delete_cluster(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteClusterRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - with pytest.raises(CustomException): - client.delete_cluster(name) - - def test_create_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - response = client.create_app_profile(parent, app_profile_id, app_profile) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.CreateAppProfileRequest( - parent=parent, app_profile_id=app_profile_id, app_profile=app_profile - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - app_profile_id = "appProfileId1262094415" - app_profile = {} - - with pytest.raises(CustomException): - client.create_app_profile(parent, app_profile_id, app_profile) - - def test_get_app_profile(self): - # Setup Expected Response - name_2 = "name2-1052831874" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name_2, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - response = client.get_app_profile(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.GetAppProfileRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.get_app_profile(name) - - def test_list_app_profiles(self): - # Setup Expected Response - next_page_token = "" - app_profiles_element = {} - app_profiles = [app_profiles_element] - expected_response = { - "next_page_token": next_page_token, - "app_profiles": app_profiles, - } - expected_response = bigtable_instance_admin_pb2.ListAppProfilesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.app_profiles[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.ListAppProfilesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_app_profiles_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_app_profiles(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_app_profile(self): - # Setup Expected Response - name = "name3373707" - etag = "etag3123477" - description = "description-1724546052" - expected_response = {"name": name, "etag": etag, "description": description} - expected_response = instance_pb2.AppProfile(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_app_profile", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.UpdateAppProfileRequest( - app_profile=app_profile, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_app_profile_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_app_profile_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - app_profile = {} - update_mask = {} - - response = client.update_app_profile(app_profile, update_mask) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_app_profile(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - client.delete_app_profile(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_instance_admin_pb2.DeleteAppProfileRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_app_profile_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - name = client.app_profile_path("[PROJECT]", "[INSTANCE]", "[APP_PROFILE]") - - with pytest.raises(CustomException): - client.delete_app_profile(name) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableInstanceAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py b/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py deleted file mode 100644 index 42db08579f9d..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py +++ /dev/null @@ -1,1039 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import bigtable_admin_v2 -from google.cloud.bigtable_admin_v2.proto import bigtable_table_admin_pb2 -from google.cloud.bigtable_admin_v2.proto import table_pb2 -from google.iam.v1 import iam_policy_pb2 -from google.iam.v1 import policy_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestBigtableTableAdminClient(object): - def test_create_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - response = client.create_table(parent, table_id, table) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableRequest( - parent=parent, table_id=table_id, table=table - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - table = {} - - with pytest.raises(CustomException): - client.create_table(parent, table_id, table) - - def test_create_table_from_snapshot(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateTableFromSnapshotRequest( - parent=parent, table_id=table_id, source_snapshot=source_snapshot - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_table_from_snapshot_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_table_from_snapshot_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - table_id = "tableId-895419604" - source_snapshot = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.create_table_from_snapshot(parent, table_id, source_snapshot) - exception = response.exception() - assert exception.errors[0] == error - - def test_list_tables(self): - # Setup Expected Response - next_page_token = "" - tables_element = {} - tables = [tables_element] - expected_response = {"next_page_token": next_page_token, "tables": tables} - expected_response = bigtable_table_admin_pb2.ListTablesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.tables[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListTablesRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_tables_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.instance_path("[PROJECT]", "[INSTANCE]") - - paged_list_response = client.list_tables(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_get_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.get_table(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.get_table(name) - - def test_delete_table(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.delete_table(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteTableRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_table_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.delete_table(name) - - def test_modify_column_families(self): - # Setup Expected Response - name_2 = "name2-1052831874" - expected_response = {"name": name_2} - expected_response = table_pb2.Table(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - response = client.modify_column_families(name, modifications) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ModifyColumnFamiliesRequest( - name=name, modifications=modifications - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_modify_column_families_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - modifications = [] - - with pytest.raises(CustomException): - client.modify_column_families(name, modifications) - - def test_drop_row_range(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - client.drop_row_range(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DropRowRangeRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_drop_row_range_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.drop_row_range(name) - - def test_generate_consistency_token(self): - # Setup Expected Response - consistency_token = "consistencyToken-1090516718" - expected_response = {"consistency_token": consistency_token} - expected_response = bigtable_table_admin_pb2.GenerateConsistencyTokenResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - response = client.generate_consistency_token(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GenerateConsistencyTokenRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_generate_consistency_token_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - - with pytest.raises(CustomException): - client.generate_consistency_token(name) - - def test_check_consistency(self): - # Setup Expected Response - consistent = True - expected_response = {"consistent": consistent} - expected_response = bigtable_table_admin_pb2.CheckConsistencyResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - response = client.check_consistency(name, consistency_token) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CheckConsistencyRequest( - name=name, consistency_token=consistency_token - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_check_consistency_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - consistency_token = "consistencyToken-1090516718" - - with pytest.raises(CustomException): - client.check_consistency(name, consistency_token) - - def test_get_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - - response = client.get_iam_policy(resource) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - - with pytest.raises(CustomException): - client.get_iam_policy(resource) - - def test_set_iam_policy(self): - # Setup Expected Response - version = 351608024 - etag = b"etag3123477" - expected_response = {"version": version, "etag": etag} - expected_response = policy_pb2.Policy(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - policy = {} - - response = client.set_iam_policy(resource, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_set_iam_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - policy = {} - - with pytest.raises(CustomException): - client.set_iam_policy(resource, policy) - - def test_test_iam_permissions(self): - # Setup Expected Response - expected_response = {} - expected_response = iam_policy_pb2.TestIamPermissionsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - resource = "resource-341064690" - permissions = [] - - response = client.test_iam_permissions(resource, permissions) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_test_iam_permissions_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - resource = "resource-341064690" - permissions = [] - - with pytest.raises(CustomException): - client.test_iam_permissions(resource, permissions) - - def test_snapshot_table(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_snapshot_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.SnapshotTableRequest( - name=name, cluster=cluster, snapshot_id=snapshot_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_snapshot_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_snapshot_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.table_path("[PROJECT]", "[INSTANCE]", "[TABLE]") - cluster = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - snapshot_id = "snapshotId-168585866" - - response = client.snapshot_table(name, cluster, snapshot_id) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_snapshot(self): - # Setup Expected Response - name_2 = "name2-1052831874" - data_size_bytes = 2110122398 - description = "description-1724546052" - expected_response = { - "name": name_2, - "data_size_bytes": data_size_bytes, - "description": description, - } - expected_response = table_pb2.Snapshot(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - response = client.get_snapshot(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.get_snapshot(name) - - def test_list_snapshots(self): - # Setup Expected Response - next_page_token = "" - snapshots_element = {} - snapshots = [snapshots_element] - expected_response = {"next_page_token": next_page_token, "snapshots": snapshots} - expected_response = bigtable_table_admin_pb2.ListSnapshotsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.snapshots[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListSnapshotsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_snapshots_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_snapshots(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_snapshot(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - client.delete_snapshot(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteSnapshotRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_snapshot_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.snapshot_path( - "[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[SNAPSHOT]" - ) - - with pytest.raises(CustomException): - client.delete_snapshot(name) - - def test_create_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_backup", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.CreateBackupRequest( - parent=parent, backup_id=backup_id, backup=backup - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_backup_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_backup_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - backup_id = "backupId1355353272" - backup = {} - - response = client.create_backup(parent, backup_id, backup) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_backup(self): - # Setup Expected Response - name_2 = "name2-1052831874" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name_2, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - response = client.get_backup(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.GetBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.get_backup(name) - - def test_list_backups(self): - # Setup Expected Response - next_page_token = "" - backups_element = {} - backups = [backups_element] - expected_response = {"next_page_token": next_page_token, "backups": backups} - expected_response = bigtable_table_admin_pb2.ListBackupsResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.backups[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.ListBackupsRequest(parent=parent) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_backups_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - parent = client.cluster_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]") - - paged_list_response = client.list_backups(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_backup(self): - # Setup Expected Response - name = "name3373707" - source_table = "sourceTable1670858410" - size_bytes = 1796325715 - expected_response = { - "name": name, - "source_table": source_table, - "size_bytes": size_bytes, - } - expected_response = table_pb2.Backup(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - backup = {} - update_mask = {} - - response = client.update_backup(backup, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.UpdateBackupRequest( - backup=backup, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - backup = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_backup(backup, update_mask) - - def test_delete_backup(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup Request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - client.delete_backup(name) - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.DeleteBackupRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_backup_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - # Setup request - name = client.backup_path("[PROJECT]", "[INSTANCE]", "[CLUSTER]", "[BACKUP]") - - with pytest.raises(CustomException): - client.delete_backup(name) - - def test_restore_table(self): - # Setup Expected Response - name = "name3373707" - expected_response = {"name": name} - expected_response = table_pb2.Table(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_restore_table", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - response = client.restore_table() - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = bigtable_table_admin_pb2.RestoreTableRequest() - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_restore_table_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_restore_table_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = bigtable_admin_v2.BigtableTableAdminClient() - - response = client.restore_table() - exception = response.exception() - assert exception.errors[0] == error diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py index f7ec0a85511f..d0a08c5e12b1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -166,7 +166,7 @@ def test___ne__(self): self.assertTrue(app_profile1 != app_profile2) def test_from_pb_success_routing_any(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(self.PROJECT) @@ -195,7 +195,7 @@ def test_from_pb_success_routing_any(self): self.assertEqual(app_profile.allow_transactional_writes, False) def test_from_pb_success_routing_single(self): - from google.cloud.bigtable_admin_v2.types import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType client = _Client(self.PROJECT) @@ -228,7 +228,7 @@ def test_from_pb_success_routing_single(self): ) def test_from_pb_bad_app_profile_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 bad_app_profile_name = "BAD_NAME" @@ -239,7 +239,7 @@ def test_from_pb_bad_app_profile_name(self): klass.from_pb(app_profile_pb, None) def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(self.PROJECT) @@ -253,7 +253,7 @@ def test_from_pb_instance_id_mistmatch(self): klass.from_pb(app_profile_pb, instance) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) @@ -267,11 +267,13 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(app_profile_pb, instance) def test_reload_routing_any(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import RoutingPolicyType - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -305,7 +307,7 @@ def test_reload_routing_any(self): # Patch the stub used by the API method. client._instance_admin_client = api - instance_stub = client._instance_admin_client.transport + instance_stub = client._instance_admin_client instance_stub.get_app_profile.side_effect = [response_pb] # Create expected_result. @@ -328,13 +330,13 @@ def test_reload_routing_any(self): ) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.api_core import exceptions - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -347,7 +349,7 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport + instance_stub = client._instance_admin_client instance_stub.get_app_profile.side_effect = [ response_pb, exceptions.NotFound("testing"), @@ -364,11 +366,10 @@ def test_exists(self): alt_app_profile.exists() def test_create_routing_any(self): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) credentials = _make_credentials() client = self._make_client( @@ -386,23 +387,34 @@ def test_create_routing_any(self): routing_policy_type=routing, description=description, ) + expected_request_app_profile = app_profile._to_pb() - expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, - app_profile_id=self.APP_PROFILE_ID, - app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings, - ) + name = instance.name + expected_request = { + "request": { + "parent": name, + "app_profile_id": self.APP_PROFILE_ID, + "app_profile": expected_request_app_profile, + "ignore_warnings": ignore_warnings, + } + } + + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + instance_api.instance_path.return_value = name + instance_api.create_app_profile.return_value = expected_request_app_profile # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) client._instance_admin_client = instance_api + app_profile._instance._client._instance_admin_client = instance_api # Perform the method and check the result. result = app_profile.create(ignore_warnings) - actual_request = channel.requests[0][1] + + actual_request = client._instance_admin_client.create_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, self._get_target_class()) @@ -414,11 +426,10 @@ def test_create_routing_any(self): self.assertIsNone(result.cluster_id) def test_create_routing_single(self): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, - ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) credentials = _make_credentials() client = self._make_client( @@ -440,22 +451,30 @@ def test_create_routing_single(self): allow_transactional_writes=allow_writes, ) expected_request_app_profile = app_profile._to_pb() - expected_request = messages_v2_pb2.CreateAppProfileRequest( - parent=instance.name, - app_profile_id=self.APP_PROFILE_ID, - app_profile=expected_request_app_profile, - ignore_warnings=ignore_warnings, - ) + instance_name = instance.name + expected_request = { + "request": { + "parent": instance_name, + "app_profile_id": self.APP_PROFILE_ID, + "app_profile": expected_request_app_profile, + "ignore_warnings": ignore_warnings, + } + } # Patch the stub used by the API method. - channel = ChannelStub(responses=[expected_request_app_profile]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" ) + instance_api.instance_path.return_value = instance_name + instance_api.create_app_profile.return_value = expected_request_app_profile client._instance_admin_client = instance_api # Perform the method and check the result. result = app_profile.create(ignore_warnings) - actual_request = channel.requests[0][1] + + actual_request = client._instance_admin_client.create_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) self.assertIsInstance(result, self._get_target_class()) @@ -479,14 +498,15 @@ def test_create_app_profile_with_wrong_routing_policy(self): app_profile.create() def test_update_app_profile_routing_any(self): - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.protobuf import field_mask_pb2 credentials = _make_credentials() @@ -510,19 +530,20 @@ def test_update_app_profile_routing_any(self): # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) # Mock api calls + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + client._instance_admin_client = instance_api # Perform the method and check the result. @@ -530,29 +551,38 @@ def test_update_app_profile_routing_any(self): expected_request_update_mask = field_mask_pb2.FieldMask( paths=["description", "single_cluster_routing"] ) - expected_request = messages_v2_pb2.UpdateAppProfileRequest( - app_profile=app_profile._to_pb(), - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings, - ) + expected_request = { + "request": { + "app_profile": app_profile._to_pb(), + "update_mask": expected_request_update_mask, + "ignore_warnings": ignore_warnings, + } + } + + instance_api.update_app_profile.return_value = response_pb + app_profile._instance._client._instance_admin_client = instance_api result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = channel.requests[0][1] + actual_request = client._instance_admin_client.update_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) + self.assertEqual( + result.metadata.type_url, + "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata", + ) def test_update_app_profile_routing_single(self): - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.protobuf import field_mask_pb2 credentials = _make_credentials() @@ -569,39 +599,43 @@ def test_update_app_profile_routing_single(self): # Create response_pb metadata = messages_v2_pb2.UpdateAppProfileMetadata() type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) # Mock api calls + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) client._instance_admin_client = instance_api - + client._instance_admin_client.update_app_profile.return_value = response_pb # Perform the method and check the result. ignore_warnings = True expected_request_update_mask = field_mask_pb2.FieldMask( paths=["multi_cluster_routing_use_any"] ) - expected_request = messages_v2_pb2.UpdateAppProfileRequest( - app_profile=app_profile._to_pb(), - update_mask=expected_request_update_mask, - ignore_warnings=ignore_warnings, - ) + expected_request = { + "request": { + "app_profile": app_profile._to_pb(), + "update_mask": expected_request_update_mask, + "ignore_warnings": ignore_warnings, + } + } result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = channel.requests[0][1] - + actual_request = client._instance_admin_client.update_app_profile.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateAppProfileMetadata) + self.assertEqual( + result.metadata.type_url, + "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata", + ) def test_update_app_profile_with_wrong_routing_policy(self): credentials = _make_credentials() @@ -617,12 +651,12 @@ def test_update_app_profile_with_wrong_routing_policy(self): def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 0285d668bf74..68e5f6162105 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -93,35 +93,35 @@ def test_constructor_non_defaults(self): self.assertIsNone(backup._state) def test_from_pb_project_mismatch(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table alt_project_id = "alt-project-id" client = _Client(project=alt_project_id) instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = table.Backup(name=self.BACKUP_NAME) klasse = self._get_target_class() with self.assertRaises(ValueError): klasse.from_pb(backup_pb, instance) def test_from_pb_instance_mismatch(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table alt_instance = "/projects/%s/instances/alt-instance" % self.PROJECT_ID client = _Client() instance = _Instance(alt_instance, client) - backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) + backup_pb = table.Backup(name=self.BACKUP_NAME) klasse = self._get_target_class() with self.assertRaises(ValueError): klasse.from_pb(backup_pb, instance) def test_from_pb_bad_name(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table client = _Client() instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = table_pb2.Backup(name="invalid_name") + backup_pb = table.Backup(name="invalid_name") klasse = self._get_target_class() with self.assertRaises(ValueError): @@ -129,7 +129,7 @@ def test_from_pb_bad_name(self): def test_from_pb_success(self): from google.cloud.bigtable_admin_v2.gapic import enums - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp client = _Client() @@ -137,7 +137,7 @@ def test_from_pb_success(self): timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) size_bytes = 1234 state = enums.Backup.State.READY - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, expire_time=timestamp, @@ -156,16 +156,18 @@ def test_from_pb_success(self): self.assertEqual(backup.cluster, self.CLUSTER_ID) self.assertEqual(backup.table_id, self.TABLE_ID) self.assertEqual(backup._expire_time, timestamp) - self.assertEqual(backup._start_time, timestamp) - self.assertEqual(backup._end_time, timestamp) + self.assertEqual(backup.start_time, timestamp) + self.assertEqual(backup.end_time, timestamp) self.assertEqual(backup._size_bytes, size_bytes) self.assertEqual(backup._state, state) def test_property_name(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -186,17 +188,16 @@ def test_property_cluster_setter(self): self.assertEqual(backup.cluster, self.CLUSTER_ID) def test_property_parent_none(self): - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME), - ) + backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME),) self.assertIsNone(backup.parent) def test_property_parent_w_cluster(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -208,9 +209,11 @@ def test_property_parent_w_cluster(self): def test_property_source_table_none(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -221,9 +224,11 @@ def test_property_source_table_none(self): def test_property_source_table_valid(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) client._table_admin_client = api @@ -298,10 +303,10 @@ def test_create_grpc_error(self): from google.api_core.exceptions import GoogleAPICallError from google.api_core.exceptions import Unknown from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = Unknown("testing") timestamp = self._make_timestamp() @@ -312,7 +317,7 @@ def test_create_grpc_error(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -321,18 +326,20 @@ def test_create_grpc_error(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_create_already_exists(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud.exceptions import Conflict client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = Conflict("testing") timestamp = self._make_timestamp() @@ -343,7 +350,7 @@ def test_create_already_exists(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -352,18 +359,20 @@ def test_create_already_exists(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_create_instance_not_found(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud.exceptions import NotFound client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = NotFound("testing") timestamp = self._make_timestamp() @@ -374,7 +383,7 @@ def test_create_instance_not_found(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -383,9 +392,11 @@ def test_create_instance_not_found(self): backup.create(self.CLUSTER_ID) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_create_cluster_not_set(self): @@ -411,9 +422,7 @@ def test_create_table_not_set(self): def test_create_expire_time_not_set(self): backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME), - table_id=self.TABLE_ID, + self.BACKUP_ID, _Instance(self.INSTANCE_NAME), table_id=self.TABLE_ID, ) with self.assertRaises(ValueError): @@ -421,11 +430,13 @@ def test_create_expire_time_not_set(self): def test_create_success(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.bigtable import Client op_future = object() - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + credentials = _make_credentials() + client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) + api = client._table_admin_client = self._make_table_admin_client() api.create_backup.return_value = op_future timestamp = self._make_timestamp() @@ -436,7 +447,7 @@ def test_create_success(self): expire_time=timestamp, ) - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( source_table=self.TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), ) @@ -446,16 +457,18 @@ def test_create_success(self): self.assertIs(future, op_future) api.create_backup.assert_called_once_with( - parent=self.CLUSTER_NAME, - backup_id=self.BACKUP_ID, - backup=backup_pb, + request={ + "parent": self.CLUSTER_NAME, + "backup_id": self.BACKUP_ID, + "backup": backup_pb, + } ) def test_exists_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) @@ -463,14 +476,13 @@ def test_exists_grpc_error(self): with self.assertRaises(Unknown): backup.exists() - - api.get_backup.assert_called_once_with(self.BACKUP_NAME) + api.get_backup(self.BACKUP_NAME) def test_exists_not_found(self): from google.api_core.exceptions import NotFound client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) @@ -478,18 +490,18 @@ def test_exists_not_found(self): self.assertFalse(backup.exists()) - api.get_backup.assert_called_once_with(self.BACKUP_NAME) + api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_get(self): from google.cloud.bigtable_admin_v2.gapic import enums - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) state = enums.Backup.State.READY client = _Client() - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, expire_time=timestamp, @@ -498,7 +510,7 @@ def test_get(self): size_bytes=0, state=state, ) - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -508,14 +520,14 @@ def test_get(self): def test_reload(self): from google.cloud.bigtable_admin_v2.gapic import enums - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) state = enums.Backup.State.READY client = _Client() - backup_pb = table_pb2.Backup( + backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, expire_time=timestamp, @@ -524,7 +536,7 @@ def test_reload(self): size_bytes=0, state=state, ) - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -539,11 +551,11 @@ def test_reload(self): self.assertEqual(backup._state, state) def test_exists_success(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table client = _Client() - backup_pb = table_pb2.Backup(name=self.BACKUP_NAME) - api = client.table_admin_client = self._make_table_admin_client() + backup_pb = table.Backup(name=self.BACKUP_NAME) + api = client._table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -551,13 +563,13 @@ def test_exists_success(self): self.assertTrue(backup.exists()) - api.get_backup.assert_called_once_with(self.BACKUP_NAME) + api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_delete_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.delete_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -565,13 +577,13 @@ def test_delete_grpc_error(self): with self.assertRaises(Unknown): backup.delete() - api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_delete_not_found(self): from google.api_core.exceptions import NotFound client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.delete_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -579,29 +591,29 @@ def test_delete_not_found(self): with self.assertRaises(NotFound): backup.delete() - api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_delete_success(self): from google.protobuf.empty_pb2 import Empty client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.delete_backup.return_value = Empty() instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) backup.delete() - api.delete_backup.assert_called_once_with(self.BACKUP_NAME) + api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_update_expire_time_grpc_error(self): from google.api_core.exceptions import Unknown from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.protobuf import field_mask_pb2 client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.update_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -610,24 +622,22 @@ def test_update_expire_time_grpc_error(self): with self.assertRaises(Unknown): backup.update_expire_time(expire_time) - backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, - expire_time=_datetime_to_pb_timestamp(expire_time), + backup_update = table.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, - update_mask, + request={"backup": backup_update, "update_mask": update_mask} ) def test_update_expire_time_not_found(self): from google.api_core.exceptions import NotFound from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.protobuf import field_mask_pb2 client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.update_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -636,38 +646,34 @@ def test_update_expire_time_not_found(self): with self.assertRaises(NotFound): backup.update_expire_time(expire_time) - backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, - expire_time=_datetime_to_pb_timestamp(expire_time), + backup_update = table.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, - update_mask, + request={"backup": backup_update, "update_mask": update_mask} ) def test_update_expire_time_success(self): from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.proto import table_pb2 + from google.cloud.bigtable_admin_v2.types import table from google.protobuf import field_mask_pb2 client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.update_backup.return_type = table_pb2.Backup(name=self.BACKUP_NAME) + api = client._table_admin_client = self._make_table_admin_client() + api.update_backup.return_type = table.Backup(name=self.BACKUP_NAME) instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) expire_time = self._make_timestamp() backup.update_expire_time(expire_time) - backup_update = table_pb2.Backup( - name=self.BACKUP_NAME, - expire_time=_datetime_to_pb_timestamp(expire_time), + backup_update = table.Backup( + name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( - backup_update, - update_mask, + request={"backup": backup_update, "update_mask": update_mask} ) def test_restore_grpc_error(self): @@ -675,7 +681,7 @@ def test_restore_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.restore_table.side_effect = Unknown("testing") timestamp = self._make_timestamp() @@ -691,14 +697,16 @@ def test_restore_grpc_error(self): backup.restore(self.TABLE_ID) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table_id=self.TABLE_ID, - backup=self.BACKUP_NAME, + request={ + "parent": self.INSTANCE_NAME, + "table_id": self.TABLE_ID, + "backup": self.BACKUP_NAME, + } ) def test_restore_cluster_not_set(self): client = _Client() - client.table_admin_client = self._make_table_admin_client() + client._table_admin_client = self._make_table_admin_client() backup = self._make_one( self.BACKUP_ID, _Instance(self.INSTANCE_NAME, client=client), @@ -712,7 +720,7 @@ def test_restore_cluster_not_set(self): def test_restore_success(self): op_future = object() client = _Client() - api = client.table_admin_client = self._make_table_admin_client() + api = client._table_admin_client = self._make_table_admin_client() api.restore_table.return_value = op_future timestamp = self._make_timestamp() @@ -729,14 +737,18 @@ def test_restore_success(self): self.assertIs(future, op_future) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table_id=self.TABLE_ID, - backup=self.BACKUP_NAME, + request={ + "parent": self.INSTANCE_NAME, + "table_id": self.TABLE_ID, + "backup": self.BACKUP_NAME, + } ) def test_get_iam_policy(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -752,15 +764,15 @@ def test_get_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(BigtableTableAdminClient) client._table_admin_client = table_api table_api.get_iam_policy.return_value = iam_policy result = backup.get_iam_policy() - table_api.get_iam_policy.assert_called_once_with(resource=backup.name) + table_api.get_iam_policy.assert_called_once_with( + request={"resource": backup.name} + ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -771,7 +783,9 @@ def test_get_iam_policy(self): def test_set_iam_policy(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -788,9 +802,7 @@ def test_set_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(BigtableTableAdminClient) client._table_admin_client = table_api table_api.set_iam_policy.return_value = iam_policy_pb @@ -803,7 +815,7 @@ def test_set_iam_policy(self): result = backup.set_iam_policy(iam_policy) table_api.set_iam_policy.assert_called_once_with( - resource=backup.name, policy=iam_policy_pb + request={"resource": backup.name, "policy": iam_policy_pb} ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -815,7 +827,9 @@ def test_set_iam_policy(self): def test_test_iam_permissions(self): from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() @@ -828,9 +842,7 @@ def test_test_iam_permissions(self): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(BigtableTableAdminClient) table_api.test_iam_permissions.return_value = response client._table_admin_client = table_api @@ -838,7 +850,7 @@ def test_test_iam_permissions(self): self.assertEqual(result, permissions) table_api.test_iam_permissions.assert_called_once_with( - resource=backup.name, permissions=permissions + request={"resource": backup.name, "permissions": permissions} ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 21ec479d0799..60a2cd738541 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -223,7 +223,7 @@ def test_table_data_client_not_initialized(self): table_data_client = client.table_data_client self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(table_data_client._client_info, _CLIENT_INFO) + self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._table_data_client, table_data_client) def test_table_data_client_not_initialized_w_client_info(self): @@ -237,7 +237,7 @@ def test_table_data_client_not_initialized_w_client_info(self): table_data_client = client.table_data_client self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(table_data_client._client_info, client_info) + self.assertIs(client._client_info, client_info) self.assertIs(client._table_data_client, table_data_client) def test_table_data_client_not_initialized_w_client_options(self): @@ -292,7 +292,7 @@ def test_table_admin_client_not_initialized_w_admin_flag(self): table_admin_client = client.table_admin_client self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(table_admin_client._client_info, _CLIENT_INFO) + self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._table_admin_client, table_admin_client) def test_table_admin_client_not_initialized_w_client_info(self): @@ -309,7 +309,7 @@ def test_table_admin_client_not_initialized_w_client_info(self): table_admin_client = client.table_admin_client self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(table_admin_client._client_info, client_info) + self.assertIs(client._client_info, client_info) self.assertIs(client._table_admin_client, table_admin_client) def test_table_admin_client_not_initialized_w_client_options(self): @@ -363,7 +363,7 @@ def test_instance_admin_client_not_initialized_w_admin_flag(self): instance_admin_client = client.instance_admin_client self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(instance_admin_client._client_info, _CLIENT_INFO) + self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._instance_admin_client, instance_admin_client) def test_instance_admin_client_not_initialized_w_client_info(self): @@ -380,7 +380,7 @@ def test_instance_admin_client_not_initialized_w_client_info(self): instance_admin_client = client.instance_admin_client self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(instance_admin_client._client_info, client_info) + self.assertIs(client._client_info, client_info) self.assertIs(client._instance_admin_client, instance_admin_client) def test_instance_admin_client_not_initialized_w_client_options(self): @@ -460,11 +460,13 @@ def test_instance_factory_non_defaults(self): self.assertIs(instance._client, client) def test_list_instances(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.instance import Instance FAILED_LOCATION = "FAILED" @@ -473,8 +475,9 @@ def test_list_instances(self): INSTANCE_NAME1 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID1 INSTANCE_NAME2 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID2 + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + client = self._make_one( project=self.PROJECT, credentials=credentials, admin=True ) @@ -490,8 +493,9 @@ def test_list_instances(self): # Patch the stub used by the API method. client._instance_admin_client = api - bigtable_instance_stub = client.instance_admin_client.transport - bigtable_instance_stub.list_instances.side_effect = [response_pb] + instance_stub = client._instance_admin_client + + instance_stub.list_instances.side_effect = [response_pb] # Perform the method and check the result. instances, failed_locations = client.list_instances() @@ -499,26 +503,27 @@ def test_list_instances(self): instance_1, instance_2 = instances self.assertIsInstance(instance_1, Instance) - self.assertEqual(instance_1.name, INSTANCE_NAME1) + self.assertEqual(instance_1.instance_id, INSTANCE_ID1) self.assertTrue(instance_1._client is client) self.assertIsInstance(instance_2, Instance) - self.assertEqual(instance_2.name, INSTANCE_NAME2) + self.assertEqual(instance_2.instance_id, INSTANCE_ID2) self.assertTrue(instance_2._client is client) self.assertEqual(failed_locations, [FAILED_LOCATION]) def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.instance import Cluster - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() client = self._make_one( project=self.PROJECT, credentials=credentials, admin=True @@ -553,7 +558,8 @@ def test_list_clusters(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport + instance_stub = client._instance_admin_client + instance_stub.list_clusters.side_effect = [response_pb] # Perform the method and check the result. @@ -562,15 +568,15 @@ def test_list_clusters(self): cluster_1, cluster_2, cluster_3 = clusters self.assertIsInstance(cluster_1, Cluster) - self.assertEqual(cluster_1.name, cluster_name1) + self.assertEqual(cluster_1.cluster_id, cluster_id1) self.assertEqual(cluster_1._instance.instance_id, INSTANCE_ID1) self.assertIsInstance(cluster_2, Cluster) - self.assertEqual(cluster_2.name, cluster_name2) + self.assertEqual(cluster_2.cluster_id, cluster_id2) self.assertEqual(cluster_2._instance.instance_id, INSTANCE_ID2) self.assertIsInstance(cluster_3, Cluster) - self.assertEqual(cluster_3.name, cluster_name3) + self.assertEqual(cluster_3.cluster_id, cluster_id3) self.assertEqual(cluster_3._instance.instance_id, INSTANCE_ID2) self.assertEqual(failed_locations, [failed_location]) diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 9a0d39c84977..d5f731eb6960 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -126,7 +126,7 @@ def test_name_property(self): self.assertEqual(cluster.name, self.CLUSTER_NAME) def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums client = _Client(self.PROJECT) @@ -154,7 +154,7 @@ def test_from_pb_success(self): self.assertEqual(cluster.default_storage_type, storage_type) def test_from_pb_bad_cluster_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 bad_cluster_name = "BAD_NAME" @@ -165,7 +165,7 @@ def test_from_pb_bad_cluster_name(self): klass.from_pb(cluster_pb, None) def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_INSTANCE_ID = "ALT_INSTANCE_ID" client = _Client(self.PROJECT) @@ -179,7 +179,7 @@ def test_from_pb_instance_id_mistmatch(self): klass.from_pb(cluster_pb, instance) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_PROJECT = "ALT_PROJECT" client = _Client(project=ALT_PROJECT) @@ -222,12 +222,15 @@ def test___ne__(self): self.assertNotEqual(cluster1, cluster2) def test_reload(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.enums import StorageType from google.cloud.bigtable.enums import Cluster - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -258,8 +261,8 @@ def test_reload(self): # Patch the stub used by the API method. client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport + instance_stub = client._instance_admin_client + instance_stub.get_cluster.side_effect = [response_pb] # Create expected_result. @@ -280,14 +283,14 @@ def test_reload(self): self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.instance import Instance from google.api_core import exceptions - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -302,9 +305,9 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = instance_api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.get_cluster.side_effect = [ + bigtable_instance_stub = client._instance_admin_client + + bigtable_instance_stub.get_cluster.side_effect = [ response_pb, exceptions.NotFound("testing"), exceptions.BadRequest("testing"), @@ -321,19 +324,17 @@ def test_exists(self): def test_create(self): import datetime - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as instance_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) + from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() @@ -352,53 +353,56 @@ def test_create(self): serve_nodes=self.SERVE_NODES, default_storage_type=STORAGE_TYPE_SSD, ) - expected_request_cluster = instance_pb2.Cluster( + expected_request_cluster = instance_v2_pb2.Cluster( location=LOCATION, serve_nodes=cluster.serve_nodes, default_storage_type=cluster.default_storage_type, ) - expected_request = instance_v2_pb2.CreateClusterRequest( - parent=instance.name, - cluster_id=self.CLUSTER_ID, - cluster=expected_request_cluster, - ) - + expected_request = { + "request": { + "parent": instance.name, + "cluster_id": self.CLUSTER_ID, + "cluster": expected_request_cluster, + } + } + name = instance.name metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateClusterMetadata.DESCRIPTOR.full_name + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + api = mock.create_autospec(BigtableInstanceAdminClient) + api.common_location_path.return_value = LOCATION client._instance_admin_client = api - + cluster._instance._client = client + cluster._instance._client.instance_admin_client.instance_path.return_value = ( + name + ) + client._instance_admin_client.create_cluster.return_value = response_pb # Perform the method and check the result. - result = cluster.create() - actual_request = channel.requests[0][1] + cluster.create() + actual_request = client._instance_admin_client.create_cluster.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.CreateClusterMetadata) def test_update(self): import datetime - from google.api_core import operation from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.enums import StorageType NOW = datetime.datetime.utcnow() @@ -418,40 +422,45 @@ def test_update(self): default_storage_type=STORAGE_TYPE_SSD, ) # Create expected_request - expected_request = instance_pb2.Cluster( - name=cluster.name, serve_nodes=self.SERVE_NODES - ) - + expected_request = { + "request": { + "name": "projects/project/instances/instance-id/clusters/cluster-id", + "serve_nodes": 5, + "location": None, + } + } metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateClusterMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) # Patch the stub used by the API method. - channel = ChannelStub(responses=[response_pb]) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - channel=channel - ) + api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = api - + cluster._instance._client.instance_admin_client.cluster_path.return_value = ( + "projects/project/instances/instance-id/clusters/cluster-id" + ) # Perform the method and check the result. - result = cluster.update() - actual_request = channel.requests[0][1] + client._instance_admin_client.update_cluster.return_value = response_pb + cluster.update() + + actual_request = client._instance_admin_client.update_cluster.call_args_list[ + 0 + ].kwargs self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, operation.Operation) - self.assertEqual(result.operation.name, self.OP_NAME) - self.assertIsInstance(result.metadata, messages_v2_pb2.UpdateClusterMetadata) def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -465,7 +474,7 @@ def test_delete(self): # Patch the stub used by the API method. client._instance_admin_client = api instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport + instance_stub = instance_admin_client instance_stub.delete_cluster.side_effect = [response_pb] # Create expected_result. diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index d6f6c2672047..601c37cf5d13 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -344,11 +344,13 @@ def test_to_pb_with_rule(self): self.assertEqual(pb_val, expected) def _create_test_helper(self, gc_rule=None): - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, ) from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) project_id = "project-id" zone = "zone" @@ -366,7 +368,8 @@ def _create_test_helper(self, gc_rule=None): + table_id ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -380,7 +383,10 @@ def _create_test_helper(self, gc_rule=None): else: column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, create=column_family_pb) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() + modification.id = column_family_id + modification.create = column_family_pb + request_pb.modifications.append(modification) # Create response_pb response_pb = _ColumnFamilyPB() @@ -409,10 +415,12 @@ def test_create_with_gc_rule(self): def _update_test_helper(self, gc_rule=None): from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, ) - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client project_id = "project-id" zone = "zone" @@ -430,7 +438,7 @@ def _update_test_helper(self, gc_rule=None): + table_id ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -444,7 +452,10 @@ def _update_test_helper(self, gc_rule=None): else: column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, update=column_family_pb) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() + modification.id = column_family_id + modification.update = column_family_pb + request_pb.modifications.append(modification) # Create response_pb response_pb = _ColumnFamilyPB() @@ -473,11 +484,13 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_v2_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, ) from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) project_id = "project-id" zone = "zone" @@ -495,7 +508,7 @@ def test_delete(self): + table_id ) - api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -505,7 +518,10 @@ def test_delete(self): # Create request_pb request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - request_pb.modifications.add(id=column_family_id, drop=True) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=column_family_id, drop=True + ) + request_pb.modifications.append(modification) # Create response_pb response_pb = empty_pb2.Empty() @@ -587,36 +603,40 @@ class MockProto(object): names = [] + _pb = {} + @classmethod def WhichOneof(cls, name): cls.names.append(name) return "unknown" + MockProto._pb = MockProto + self.assertEqual(MockProto.names, []) self.assertRaises(ValueError, self._call_fut, MockProto) self.assertEqual(MockProto.names, ["rule"]) def _GcRulePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.GcRule(*args, **kw) def _GcRuleIntersectionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.GcRule.Intersection(*args, **kw) def _GcRuleUnionPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.GcRule.Union(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.ColumnFamily(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index 14dd0bf5872d..e493fd9c8b9f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -97,14 +97,15 @@ def test_constructor_non_default(self): self.assertEqual(instance.state, state) def test__update_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums - instance_type = enums.Instance.Type.PRODUCTION + instance_type = data_v2_pb2.Instance.Type.PRODUCTION state = enums.Instance.State.READY + # todo type to type_? instance_pb = data_v2_pb2.Instance( display_name=self.DISPLAY_NAME, - type=instance_type, + type_=instance_type, labels=self.LABELS, state=state, ) @@ -113,14 +114,14 @@ def test__update_from_pb_success(self): self.assertIsNone(instance.display_name) self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb) + instance._update_from_pb(instance_pb._pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, instance_type) self.assertEqual(instance.labels, self.LABELS) self.assertEqual(instance._state, state) def test__update_from_pb_success_defaults(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME) @@ -129,13 +130,13 @@ def test__update_from_pb_success_defaults(self): self.assertIsNone(instance.display_name) self.assertIsNone(instance.type_) self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb) + instance._update_from_pb(instance_pb._pb) self.assertEqual(instance.display_name, self.DISPLAY_NAME) self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED) self.assertFalse(instance.labels) def test__update_from_pb_no_display_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 instance_pb = data_v2_pb2.Instance() instance = self._make_one(None, None) @@ -144,7 +145,7 @@ def test__update_from_pb_no_display_name(self): instance._update_from_pb(instance_pb) def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums credentials = _make_credentials() @@ -156,7 +157,7 @@ def test_from_pb_success(self): instance_pb = data_v2_pb2.Instance( name=self.INSTANCE_NAME, display_name=self.INSTANCE_ID, - type=instance_type, + type_=instance_type, labels=self.LABELS, state=state, ) @@ -172,7 +173,7 @@ def test_from_pb_success(self): self.assertEqual(instance._state, state) def test_from_pb_bad_instance_name(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 instance_name = "INCORRECT_FORMAT" instance_pb = data_v2_pb2.Instance(name=instance_name) @@ -182,7 +183,7 @@ def test_from_pb_bad_instance_name(self): klass.from_pb(instance_pb, None) def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 ALT_PROJECT = "ALT_PROJECT" credentials = _make_credentials() @@ -199,14 +200,17 @@ def test_from_pb_project_mistmatch(self): klass.from_pb(instance_pb, client) def test_name_property(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) + api.instance_path.return_value = "projects/project/instances/instance-id" # Patch the the API method. client._instance_admin_client = api @@ -261,41 +265,43 @@ def _instance_api_response_for_create(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateInstanceMetadata.DESCRIPTOR.full_name + messages_v2_pb2.CreateInstanceMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) response = operation.from_gapic( response_pb, mock.Mock(), - instance_pb2.Instance, + instance.Instance, metadata_type=messages_v2_pb2.CreateInstanceMetadata, ) project_path_template = "projects/{}" location_path_template = "projects/{}/locations/{}" - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.create_instance.return_value = response instance_api.project_path = project_path_template.format instance_api.location_path = location_path_template.format + instance_api.common_location_path = location_path_template.format return instance_api, response def test_create(self): from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import Instance + from google.cloud.bigtable_admin_v2.types import Cluster import warnings credentials = _make_credentials() @@ -310,6 +316,7 @@ def test_create(self): self.LABELS, ) instance_api, response = self._instance_api_response_for_create() + instance_api.common_project_path.return_value = "projects/project" client._instance_admin_client = instance_api serve_nodes = 3 @@ -318,22 +325,24 @@ def test_create(self): location_id=self.LOCATION_ID, serve_nodes=serve_nodes ) - cluster_pb = instance_pb2.Cluster( + cluster_pb = Cluster( location=instance_api.location_path(self.PROJECT, self.LOCATION_ID), serve_nodes=serve_nodes, default_storage_type=enums.StorageType.UNSPECIFIED, ) - instance_pb = instance_pb2.Instance( + instance_pb = Instance( display_name=self.DISPLAY_NAME, - type=enums.Instance.Type.PRODUCTION, + type_=enums.Instance.Type.PRODUCTION, labels=self.LABELS, ) cluster_id = "{}-cluster".format(self.INSTANCE_ID) instance_api.create_instance.assert_called_once_with( - parent=instance_api.project_path(self.PROJECT), - instance_id=self.INSTANCE_ID, - instance=instance_pb, - clusters={cluster_id: cluster_pb}, + request={ + "parent": instance_api.project_path(self.PROJECT), + "instance_id": self.INSTANCE_ID, + "instance": instance_pb, + "clusters": {cluster_id: cluster_pb}, + } ) self.assertEqual(len(warned), 1) @@ -343,7 +352,9 @@ def test_create(self): def test_create_w_clusters(self): from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable.cluster import Cluster + from google.cloud.bigtable_admin_v2.types import Cluster as cluster_pb + from google.cloud.bigtable_admin_v2.types import Instance as instance_pb credentials = _make_credentials() client = self._make_client( @@ -357,6 +368,7 @@ def test_create_w_clusters(self): self.LABELS, ) instance_api, response = self._instance_api_response_for_create() + instance_api.common_project_path.return_value = "projects/project" client._instance_admin_client = instance_api # Perform the method and check the result. @@ -383,36 +395,40 @@ def test_create_w_clusters(self): result = instance.create(clusters=clusters) - cluster_pb_1 = instance_pb2.Cluster( + cluster_pb_1 = cluster_pb( location=instance_api.location_path(self.PROJECT, location_id_1), serve_nodes=serve_nodes_1, default_storage_type=enums.StorageType.UNSPECIFIED, ) - cluster_pb_2 = instance_pb2.Cluster( + cluster_pb_2 = cluster_pb( location=instance_api.location_path(self.PROJECT, location_id_2), serve_nodes=serve_nodes_2, default_storage_type=enums.StorageType.UNSPECIFIED, ) - instance_pb = instance_pb2.Instance( + instance_pb = instance_pb( display_name=self.DISPLAY_NAME, - type=enums.Instance.Type.PRODUCTION, + type_=enums.Instance.Type.PRODUCTION, labels=self.LABELS, ) instance_api.create_instance.assert_called_once_with( - parent=instance_api.project_path(self.PROJECT), - instance_id=self.INSTANCE_ID, - instance=instance_pb, - clusters={cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, + request={ + "parent": instance_api.project_path(self.PROJECT), + "instance_id": self.INSTANCE_ID, + "instance": instance_pb, + "clusters": {cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, + } ) self.assertIs(result, response) def test_exists(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.api_core import exceptions - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -426,9 +442,9 @@ def test_exists(self): # Patch the stub used by the API method. client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client.transport - instance_stub.get_instance.side_effect = [ + instance_admin_stub = client._instance_admin_client + + instance_admin_stub.get_instance.side_effect = [ response_pb, exceptions.NotFound("testing"), exceptions.BadRequest("testing"), @@ -445,11 +461,13 @@ def test_exists(self): alt_instance_2.exists() def test_reload(self): - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.cloud.bigtable import enums - api = bigtable_instance_admin_client.BigtableInstanceAdminClient(mock.Mock()) + api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True @@ -460,12 +478,12 @@ def test_reload(self): DISPLAY_NAME = u"hey-hi-hello" instance_type = enums.Instance.Type.PRODUCTION response_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, type=instance_type, labels=self.LABELS + display_name=DISPLAY_NAME, type_=instance_type, labels=self.LABELS ) # Patch the stub used by the API method. client._instance_admin_client = api - bigtable_instance_stub = client._instance_admin_client.transport + bigtable_instance_stub = client._instance_admin_client bigtable_instance_stub.get_instance.side_effect = [response_pb] # Create expected_result. @@ -487,32 +505,32 @@ def _instance_api_response_for_update(self): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance NOW = datetime.datetime.utcnow() NOW_PB = _datetime_to_pb_timestamp(NOW) metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateInstanceMetadata.DESCRIPTOR.full_name + messages_v2_pb2.UpdateInstanceMetadata._meta._pb.DESCRIPTOR.full_name ) response_pb = operations_pb2.Operation( name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata.SerializeToString()), + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) response = operation.from_gapic( response_pb, mock.Mock(), - instance_pb2.Instance, + instance.Instance, metadata_type=messages_v2_pb2.UpdateInstanceMetadata, ) instance_path_template = "projects/{project}/instances/{instance}" - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.partial_update_instance.return_value = response instance_api.instance_path = instance_path_template.format return instance_api, response @@ -520,7 +538,7 @@ def _instance_api_response_for_update(self): def test_update(self): from google.cloud.bigtable import enums from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import Instance credentials = _make_credentials() client = self._make_client( @@ -538,10 +556,10 @@ def test_update(self): result = instance.update() - instance_pb = instance_pb2.Instance( + instance_pb = Instance( name=instance.name, display_name=instance.display_name, - type=instance.type_, + type_=instance.type_, labels=instance.labels, ) update_mask_pb = field_mask_pb2.FieldMask( @@ -549,14 +567,14 @@ def test_update(self): ) instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, update_mask=update_mask_pb + request={"instance": instance_pb, "update_mask": update_mask_pb} ) self.assertIs(result, response) def test_update_empty(self): from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import instance_pb2 + from google.cloud.bigtable_admin_v2.types import Instance credentials = _make_credentials() client = self._make_client( @@ -568,42 +586,46 @@ def test_update_empty(self): result = instance.update() - instance_pb = instance_pb2.Instance( + instance_pb = Instance( name=instance.name, display_name=instance.display_name, - type=instance.type_, + type_=instance.type_, labels=instance.labels, ) update_mask_pb = field_mask_pb2.FieldMask() instance_api.partial_update_instance.assert_called_once_with( - instance=instance_pb, update_mask=update_mask_pb + request={"instance": instance_pb, "update_mask": update_mask_pb} ) self.assertIs(result, response) def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.delete_instance.return_value = None client._instance_admin_client = instance_api result = instance.delete() - instance_api.delete_instance.assert_called_once_with(instance.name) + instance_api.delete_instance.assert_called_once_with( + request={"name": instance.name} + ) self.assertIsNone(result) def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -620,16 +642,16 @@ def test_get_iam_policy(self): iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = instance_api instance_api.get_iam_policy.return_value = iam_policy # Perform the method and check the result. result = instance.get_iam_policy() - instance_api.get_iam_policy.assert_called_once_with(resource=instance.name) + instance_api.get_iam_policy.assert_called_once_with( + request={"resource": instance.name} + ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins @@ -638,7 +660,9 @@ def test_get_iam_policy(self): self.assertEqual(found, expected) def test_get_iam_policy_w_requested_policy_version(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import policy_pb2, options_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -655,9 +679,7 @@ def test_get_iam_policy_w_requested_policy_version(self): iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = instance_api instance_api.get_iam_policy.return_value = iam_policy @@ -665,8 +687,10 @@ def test_get_iam_policy_w_requested_policy_version(self): result = instance.get_iam_policy(requested_policy_version=3) instance_api.get_iam_policy.assert_called_once_with( - resource=instance.name, - options_=options_pb2.GetPolicyOptions(requested_policy_version=3), + request={ + "resource": instance.name, + "options_": options_pb2.GetPolicyOptions(requested_policy_version=3), + } ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -676,7 +700,9 @@ def test_get_iam_policy_w_requested_policy_version(self): self.assertEqual(found, expected) def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -694,9 +720,7 @@ def test_set_iam_policy(self): iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.set_iam_policy.return_value = iam_policy_pb client._instance_admin_client = instance_api @@ -710,7 +734,7 @@ def test_set_iam_policy(self): result = instance.set_iam_policy(iam_policy) instance_api.set_iam_policy.assert_called_once_with( - resource=instance.name, policy=iam_policy_pb + request={"resource": instance.name, "policy": iam_policy_pb} ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -720,7 +744,9 @@ def test_set_iam_policy(self): self.assertEqual(found, expected) def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() @@ -733,9 +759,7 @@ def test_test_iam_permissions(self): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.test_iam_permissions.return_value = response client._instance_admin_client = instance_api @@ -743,7 +767,7 @@ def test_test_iam_permissions(self): self.assertEqual(result, permissions) instance_api.test_iam_permissions.assert_called_once_with( - resource=instance.name, permissions=permissions + request={"resource": instance.name, "permissions": permissions} ) def test_cluster_factory(self): @@ -770,11 +794,13 @@ def test_cluster_factory(self): self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_instance_admin_pb2 as messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, ) - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.instance import Cluster @@ -805,9 +831,7 @@ def test_list_clusters(self): ) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) instance_api.list_clusters.side_effect = [response_pb] instance_api.cluster_path = cluster_path_template.format client._instance_admin_client = instance_api @@ -838,25 +862,26 @@ def test_table_factory(self): self.assertEqual(table._app_profile_id, app_profile_id) def _list_tables_helper(self, table_name=None): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2, + from google.cloud.bigtable_admin_v2.types import table as table_data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_messages_v1_pb2, ) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_table_admin_client, - bigtable_instance_admin_client, + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, ) - - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, ) + + table_api = mock.create_autospec(BigtableTableAdminClient) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) credentials = _make_credentials() client = self._make_client( project=self.PROJECT, credentials=credentials, admin=True ) instance = self._make_one(self.INSTANCE_ID, client) + instance_api.instance_path.return_value = instance.name # Create response_pb if table_name is None: table_name = self.TABLE_NAME @@ -868,7 +893,7 @@ def _list_tables_helper(self, table_name=None): # Patch the stub used by the API method. client._table_admin_client = table_api client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client bigtable_table_stub.list_tables.side_effect = [response_pb] # Create expected_result. @@ -939,8 +964,10 @@ def test_app_profile_factory(self): def test_list_app_profiles(self): from google.api_core.page_iterator import Iterator from google.api_core.page_iterator import Page - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client - from google.cloud.bigtable_admin_v2.proto import instance_pb2 as data_v2_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable.app_profile import AppProfile class _Iterator(Iterator): @@ -982,9 +1009,7 @@ def _next_page(self): iterator = _Iterator(pages=[app_profiles]) # Patch the stub used by the API method. - instance_api = mock.create_autospec( - bigtable_instance_admin_client.BigtableInstanceAdminClient - ) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) client._instance_admin_client = instance_api instance_api.app_profile_path = app_profile_path_template.format instance_api.list_app_profiles.return_value = iterator diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py index 939e02a9d742..63f9ba03fb23 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_policy.py +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -147,11 +147,7 @@ def test_from_pb_with_condition(self): }, } ] - message = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=BINDINGS, - ) + message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) klass = self._get_target_class() policy = klass.from_pb(message) self.assertEqual(policy.etag, ETAG) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 16a8232ec5bd..6b5f4168b0cc 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -120,7 +120,7 @@ def test_get_mutations_size(self): total_mutations_size = 0 for mutation in row._get_mutations(): - total_mutations_size += mutation.ByteSize() + total_mutations_size += mutation._pb.ByteSize() self.assertEqual(row.get_mutations_size(), total_mutations_size) @@ -282,7 +282,9 @@ def _delete_cells_helper(self, time_range=None): ) ) if time_range is not None: - expected_pb.delete_from_column.time_range.CopyFrom(time_range.to_pb()) + expected_pb.delete_from_column.time_range._pb.CopyFrom( + time_range.to_pb()._pb + ) self.assertEqual(row._pb_mutations, [expected_pb]) def test_delete_cells_no_time_range(self): @@ -427,7 +429,7 @@ def test__get_mutations(self): def test_commit(self): from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient project_id = "project-id" row_key = b"row_key" @@ -439,7 +441,7 @@ def test_commit(self): column1 = b"column1" column2 = b"column2" - api = bigtable_client.BigtableClient(mock.Mock()) + api = mock.create_autospec(BigtableClient) credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -456,7 +458,7 @@ def test_commit(self): response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched) # Patch the stub used by the API method. - api.transport.check_and_mutate_row.side_effect = [response_pb] + api.check_and_mutate_row.side_effect = [response_pb] client._table_data_client = api # Create expected_result. @@ -468,8 +470,8 @@ def test_commit(self): row.delete_cell(column_family_id2, column2, state=True) row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) result = row.commit() - call_args = api.transport.check_and_mutate_row.call_args.args[0] - self.assertEqual(app_profile_id, call_args.app_profile_id) + call_args = api.check_and_mutate_row.call_args + self.assertEqual(app_profile_id, call_args.app_profile_id[0]) self.assertEqual(result, expected_result) self.assertEqual(row._true_pb_mutations, []) self.assertEqual(row._false_pb_mutations, []) @@ -585,7 +587,7 @@ def test_increment_cell_value(self): def test_commit(self): from google.cloud._testing import _Monkey from google.cloud.bigtable import row as MUT - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient project_id = "project-id" row_key = b"row_key" @@ -594,7 +596,8 @@ def test_commit(self): column_family_id = u"column_family_id" column = b"column" - api = bigtable_client.BigtableClient(mock.Mock()) + api = mock.create_autospec(BigtableClient) + credentials = _make_credentials() client = self._make_client( project=project_id, credentials=credentials, admin=True @@ -618,10 +621,11 @@ def mock_parse_rmw_row_response(row_response): # Perform the method and check the result. with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): + row._table._instance._client._table_data_client = api row.append_cell_value(column_family_id, column, value) result = row.commit() - call_args = api.transport.read_modify_write_row.call_args.args[0] - self.assertEqual(app_profile_id, call_args.app_profile_id) + call_args = api.read_modify_write_row.call_args_list[0] + self.assertEqual(app_profile_id, call_args.app_profile_id[0]) self.assertEqual(result, expected_result) self.assertEqual(row._rule_pb_list, []) @@ -770,73 +774,73 @@ def test_it(self): def _CheckAndMutateRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) def _ReadModifyWriteRowResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) def _CellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Cell(*args, **kw) def _ColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Column(*args, **kw) def _FamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Family(*args, **kw) def _MutationPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation(*args, **kw) def _MutationSetCellPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.SetCell(*args, **kw) def _MutationDeleteFromColumnPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) def _MutationDeleteFromFamilyPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) def _MutationDeleteFromRowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) def _RowPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.Row(*args, **kw) def _ReadModifyWriteRulePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.ReadModifyWriteRule(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index c59da844b8d9..21c0a582b4cc 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -19,7 +19,7 @@ from google.api_core.exceptions import DeadlineExceeded from ._testing import _make_credentials from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 +from google.cloud.bigtable_v2.types import data as data_v2_pb2 class MultiCallableStub(object): @@ -64,7 +64,7 @@ def _make_one(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from google.cloud._helpers import _EPOCH - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 timestamp_micros = TestCell.timestamp_micros timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) @@ -453,7 +453,7 @@ def test_state_start(self): self.assertEqual(yrd.state, yrd.NEW_ROW) def test_state_new_row_w_row(self): - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -467,8 +467,9 @@ def test_state_new_row_w_row(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - channel = ChannelStub(responses=[iterator]) - data_api = bigtable_client.BigtableClient(channel=channel) + + data_api = mock.create_autospec(BigtableClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -476,10 +477,10 @@ def test_state_new_row_w_row(self): client._table_data_client = data_api request = object() - yrd = self._make_one(client._table_data_client.transport.read_rows, request) + yrd = self._make_one(client._table_data_client.read_rows, request) self.assertEqual(yrd.retry._deadline, 60.0) - yrd._response_iterator = iterator + yrd.response_iterator = iterator rows = [row for row in yrd] result = rows[0] @@ -488,7 +489,7 @@ def test_state_new_row_w_row(self): self.assertEqual(yrd.state, yrd.NEW_ROW) def test_multiple_chunks(self): - from google.cloud.bigtable_v2.gapic import bigtable_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient chunk1 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -508,8 +509,7 @@ def test_multiple_chunks(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - channel = ChannelStub(responses=[iterator]) - data_api = bigtable_client.BigtableClient(channel=channel) + data_api = mock.create_autospec(BigtableClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -517,9 +517,9 @@ def test_multiple_chunks(self): client._table_data_client = data_api request = object() - yrd = self._make_one(client._table_data_client.transport.read_rows, request) + yrd = self._make_one(client._table_data_client.read_rows, request) - yrd._response_iterator = iterator + yrd.response_iterator = iterator rows = [row for row in yrd] result = rows[0] self.assertEqual(result.row_key, self.ROW_KEY) @@ -544,7 +544,7 @@ def test__copy_from_previous_unset(self): client = _Client() client._data_stub = mock.MagicMock() request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) cell = _PartialCellData() yrd._copy_from_previous(cell) self.assertEqual(cell.row_key, b"") @@ -579,15 +579,18 @@ def test__copy_from_previous_blank(self): self.assertEqual(cell.labels, LABELS) def test__copy_from_previous_filled(self): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + ROW_KEY = "RK" FAMILY_NAME = u"A" QUALIFIER = b"C" TIMESTAMP_MICROS = 100 LABELS = ["L1", "L2"] client = _Client() - client._data_stub = mock.MagicMock() + data_api = mock.create_autospec(BigtableClient) + client._data_stub = data_api request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) yrd._previous_cell = _PartialCellData( row_key=ROW_KEY, family_name=FAMILY_NAME, @@ -608,33 +611,37 @@ def test_valid_last_scanned_row_key_on_start(self): response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER") iterator = _MockCancellableIterator(response) client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + client._data_stub.read_rows.side_effect = [iterator] request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) yrd.last_scanned_row_key = "1.BEFORE" self._consume_all(yrd) self.assertEqual(yrd.last_scanned_row_key, "2.AFTER") def test_invalid_empty_chunk(self): from google.cloud.bigtable.row_data import InvalidChunk + from google.cloud.bigtable_v2.services.bigtable import BigtableClient client = _Client() chunks = _generate_cell_chunks([""]) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + client._data_stub = mock.create_autospec(BigtableClient) + client._data_stub.read_rows.side_effect = [iterator] request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) with self.assertRaises(InvalidChunk): self._consume_all(yrd) def test_state_cell_in_progress(self): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + LABELS = ["L1", "L2"] request = object() - read_rows = mock.MagicMock() - yrd = self._make_one(read_rows, request) + client = _Client() + client._data_stub = mock.create_autospec(BigtableClient) + yrd = self._make_one(client._data_stub.read_rows, request) chunk = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY, @@ -657,6 +664,8 @@ def test_state_cell_in_progress(self): self.assertEqual(yrd._cell.value, self.VALUE + self.VALUE) def test_yield_rows_data(self): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + client = _Client() chunk = _ReadRowsResponseCellChunkPB( @@ -671,12 +680,13 @@ def test_yield_rows_data(self): response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + data_api = mock.create_autospec(BigtableClient) + client._data_stub = data_api + client._data_stub.read_rows.side_effect = [iterator] request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) + yrd = self._make_one(client._data_stub.read_rows, request) result = self._consume_all(yrd)[0] @@ -726,9 +736,9 @@ def setUpClass(cls): cls.row_range3 = RowRange(b"row_key41", b"row_key49") cls.request = _ReadRowsRequestPB(table_name=cls.table_name) - cls.request.rows.row_ranges.add(**cls.row_range1.get_range_kwargs()) - cls.request.rows.row_ranges.add(**cls.row_range2.get_range_kwargs()) - cls.request.rows.row_ranges.add(**cls.row_range3.get_range_kwargs()) + cls.request.rows.row_ranges.append(cls.row_range1.get_range_kwargs()) + cls.request.rows.row_ranges.append(cls.row_range2.get_range_kwargs()) + cls.request.rows.row_ranges.append(cls.row_range3.get_range_kwargs()) @staticmethod def _get_target_class(): @@ -796,9 +806,9 @@ def test__filter_row_ranges_all_ranges_already_read_open_closed(self): row_range3 = RowRange(b"row_key41", b"row_key49", False, True) request = _ReadRowsRequestPB(table_name=self.table_name) - request.rows.row_ranges.add(**row_range1.get_range_kwargs()) - request.rows.row_ranges.add(**row_range2.get_range_kwargs()) - request.rows.row_ranges.add(**row_range3.get_range_kwargs()) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + request.rows.row_ranges.append(row_range2.get_range_kwargs()) + request.rows.row_ranges.append(row_range3.get_range_kwargs()) request_manager = self._make_one(request, last_scanned_key, 2) request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) @@ -827,13 +837,14 @@ def test__filter_row_ranges_some_ranges_already_read(self): def test_build_updated_request(self): from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.types import RowRange row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" request = _ReadRowsRequestPB( filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name ) - request.rows.row_ranges.add(**self.row_range1.get_range_kwargs()) + request.rows.row_ranges.append(self.row_range1.get_range_kwargs()) request_manager = self._make_one(request, last_scanned_key, 2) @@ -842,13 +853,17 @@ def test_build_updated_request(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 ) - expected_result.rows.row_ranges.add( + + row_range1 = RowRange( start_key_open=last_scanned_key, end_key_open=self.row_range1.end_key ) + expected_result.rows.row_ranges.append(row_range1) self.assertEqual(expected_result, result) def test_build_updated_request_full_table(self): + from google.cloud.bigtable_v2.types import RowRange + last_scanned_key = b"row_key14" request = _ReadRowsRequestPB(table_name=self.table_name) @@ -856,18 +871,21 @@ def test_build_updated_request_full_table(self): result = request_manager.build_updated_request() expected_result = _ReadRowsRequestPB(table_name=self.table_name, filter={}) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + row_range1 = RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range1) self.assertEqual(expected_result, result) def test_build_updated_request_no_start_key(self): from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.types import RowRange row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" request = _ReadRowsRequestPB( filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name ) - request.rows.row_ranges.add(end_key_open=b"row_key29") + row_range1 = RowRange(end_key_open=b"row_key29") + request.rows.row_ranges.append(row_range1) request_manager = self._make_one(request, last_scanned_key, 2) @@ -876,21 +894,26 @@ def test_build_updated_request_no_start_key(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 ) - expected_result.rows.row_ranges.add( + + row_range2 = RowRange( start_key_open=last_scanned_key, end_key_open=b"row_key29" ) + expected_result.rows.row_ranges.append(row_range2) self.assertEqual(expected_result, result) def test_build_updated_request_no_end_key(self): from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.types import RowRange row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" request = _ReadRowsRequestPB( filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name ) - request.rows.row_ranges.add(start_key_closed=b"row_key20") + + row_range1 = RowRange(start_key_closed=b"row_key20") + request.rows.row_ranges.append(row_range1) request_manager = self._make_one(request, last_scanned_key, 2) @@ -899,7 +922,8 @@ def test_build_updated_request_no_end_key(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 ) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + row_range2 = RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range2) self.assertEqual(expected_result, result) @@ -934,6 +958,8 @@ def test_build_updated_request_rows(self): self.assertEqual(expected_result, result) def test_build_updated_request_rows_limit(self): + from google.cloud.bigtable_v2.types import RowRange + last_scanned_key = b"row_key14" request = _ReadRowsRequestPB(table_name=self.table_name, rows_limit=10) @@ -943,7 +969,8 @@ def test_build_updated_request_rows_limit(self): expected_result = _ReadRowsRequestPB( table_name=self.table_name, filter={}, rows_limit=8 ) - expected_result.rows.row_ranges.add(start_key_open=last_scanned_key) + row_range1 = RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range1) self.assertEqual(expected_result, result) def test__key_already_read(self): @@ -1074,14 +1101,17 @@ def test_invalid_last_row_missing_commit(self): _marker = object() def _match_results(self, testcase_name, expected_result=_marker): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + client = _Client() chunks, results = self._load_json_test(testcase_name) response = _ReadRowsResponseV2(chunks) iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] + data_api = mock.create_autospec(BigtableClient) + client._table_data_client = data_api + client._table_data_client.read_rows.side_effect = [iterator] request = object() - prd = self._make_one(client._data_stub.ReadRows, request) + prd = self._make_one(client._table_data_client.read_rows, request) prd.consume_all() flattened = self._sort_flattend_cells(_flatten_cells(prd)) if expected_result is self._marker: @@ -1216,6 +1246,7 @@ class _MockCancellableIterator(object): def __init__(self, *values): self.iter_values = iter(values) + self.last_scanned_row_key = "" def cancel(self): self.cancel_calls += 1 @@ -1239,6 +1270,7 @@ class _PartialCellData(object): family_name = u"" qualifier = None timestamp_micros = 0 + last_scanned_row_key = "" def __init__(self, **kw): self.labels = kw.pop("labels", []) @@ -1253,13 +1285,14 @@ def __init__(self, chunks, last_scanned_row_key=""): def _generate_cell_chunks(chunk_text_pbs): from google.protobuf.text_format import Merge - from google.cloud.bigtable_v2.proto.bigtable_pb2 import ReadRowsResponse + from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse chunks = [] for chunk_text_pb in chunk_text_pbs: chunk = ReadRowsResponse.CellChunk() - chunks.append(Merge(chunk_text_pb, chunk)) + chunk._pb = Merge(chunk_text_pb, chunk._pb) + chunks.append(chunk) return chunks @@ -1284,16 +1317,16 @@ def _parse_readrows_acceptance_tests(filename): def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 family_name = kw.pop("family_name", None) qualifier = kw.pop("qualifier", None) message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) if family_name: - message.family_name.value = family_name + message.family_name = family_name if qualifier: - message.qualifier.value = qualifier + message.qualifier = qualifier return message @@ -1305,7 +1338,7 @@ def _make_cell(value): def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index 02a9123188a0..c42345ee0686 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -1057,42 +1057,42 @@ def test_to_pb_false_only(self): def _ColumnRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.ColumnRange(*args, **kw) def _RowFilterPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter(*args, **kw) def _RowFilterChainPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter.Chain(*args, **kw) def _RowFilterConditionPB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter.Condition(*args, **kw) def _RowFilterInterleavePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.RowFilter.Interleave(*args, **kw) def _TimestampRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.TimestampRange(*args, **kw) def _ValueRangePB(*args, **kw): - from google.cloud.bigtable_v2.proto import data_pb2 as data_v2_pb2 + from google.cloud.bigtable_v2.types import data as data_v2_pb2 return data_v2_pb2.ValueRange(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_set.py b/packages/google-cloud-bigtable/tests/unit/test_row_set.py index a855099a1fb3..c1fa4ca87bb1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_set.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_set.py @@ -185,7 +185,7 @@ def test__update_message_request(self): expected_request = _ReadRowsRequestPB(table_name=table_name) expected_request.rows.row_keys.append(_to_bytes("row_key1")) - expected_request.rows.row_ranges.add(**row_range1.get_range_kwargs()) + expected_request.rows.row_ranges.append(row_range1.get_range_kwargs()) self.assertEqual(request, expected_request) @@ -270,6 +270,6 @@ def test_get_range_kwargs_open_closed(self): def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 4469846b12d1..c521191920ae 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -47,7 +47,8 @@ def test_w_too_many_mutations(self): def test_normal(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.proto import bigtable_pb2 + from google.cloud.bigtable_v2.types import MutateRowsRequest + from google.cloud.bigtable_v2.types import data table = mock.Mock(spec=["name"]) table.name = "table" @@ -60,22 +61,23 @@ def test_normal(self): result = self._call_fut("table", rows) - Entry = bigtable_pb2.MutateRowsRequest.Entry - - entry_1 = Entry(row_key=b"row_key") - mutations_1 = entry_1.mutations.add() + entry_1 = MutateRowsRequest.Entry() + entry_1.row_key = b"row_key" + mutations_1 = data.Mutation() mutations_1.set_cell.family_name = "cf1" mutations_1.set_cell.column_qualifier = b"c1" mutations_1.set_cell.timestamp_micros = -1 mutations_1.set_cell.value = b"1" + entry_1.mutations.append(mutations_1) - entry_2 = Entry(row_key=b"row_key_2") - mutations_2 = entry_2.mutations.add() + entry_2 = MutateRowsRequest.Entry() + entry_2.row_key = b"row_key_2" + mutations_2 = data.Mutation() mutations_2.set_cell.family_name = "cf1" mutations_2.set_cell.column_qualifier = b"c1" mutations_2.set_cell.timestamp_micros = -1 mutations_2.set_cell.value = b"2" - + entry_2.mutations.append(mutations_2) self.assertEqual(result, [entry_1, entry_2]) @@ -141,7 +143,7 @@ class TestTable(unittest.TestCase): ROW_KEY_1 = b"row-key-1" ROW_KEY_2 = b"row-key-2" ROW_KEY_3 = b"row-key-3" - FAMILY_NAME = u"family" + FAMILY_NAME = "family" QUALIFIER = b"qualifier" TIMESTAMP_MICROS = 100 VALUE = b"value" @@ -322,16 +324,16 @@ def test___ne__(self): self.assertNotEqual(table1, table2) def _create_test_helper(self, split_keys=[], column_families={}): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - from google.cloud.bigtable_admin_v2.proto import table_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_admin_messages_v2_pb2, + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) + from google.cloud.bigtable_admin_v2.types import table as table_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_messages_v2_pb2, ) from google.cloud.bigtable.column_family import ColumnFamily - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -354,10 +356,12 @@ def _create_test_helper(self, split_keys=[], column_families={}): splits = [split(key=split_key) for split_key in split_keys] table_api.create_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table=table_pb2.Table(column_families=families), - table_id=self.TABLE_ID, - initial_splits=splits, + request={ + "parent": self.INSTANCE_NAME, + "table": table_pb2.Table(column_families=families), + "table_id": self.TABLE_ID, + "initial_splits": splits, + } ) def test_create(self): @@ -373,35 +377,44 @@ def test_create_with_split_keys(self): self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"]) def test_exists(self): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2 - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2 as table_messages_v1_pb2, + from google.cloud.bigtable_admin_v2.types import ListTablesResponse + from google.cloud.bigtable_admin_v2.types import Table + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as table_admin_client, ) - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, - bigtable_table_admin_client, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + client as instance_admin_client, ) from google.api_core.exceptions import NotFound from google.api_core.exceptions import BadRequest - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient( - mock.Mock() + table_api = mock.create_autospec(table_admin_client.BigtableTableAdminClient) + instance_api = mock.create_autospec( + instance_admin_client.BigtableInstanceAdminClient ) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) instance = client.instance(instance_id=self.INSTANCE_ID) # Create response_pb - response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)] - ) + response_pb = ListTablesResponse(tables=[Table(name=self.TABLE_NAME)]) # Patch API calls client._table_admin_client = table_api client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client + + bigtable_table_stub.get_table.side_effect = [ + response_pb, + NotFound("testing"), + BadRequest("testing"), + ] + + client._table_admin_client = table_api + client._instance_admin_client = instance_api + bigtable_table_stub = client._table_admin_client bigtable_table_stub.get_table.side_effect = [ response_pb, NotFound("testing"), @@ -422,11 +435,11 @@ def test_exists(self): table2.exists() def test_delete(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -445,9 +458,11 @@ def test_delete(self): self.assertEqual(result, expected_result) def _list_column_families_helper(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -462,7 +477,7 @@ def _list_column_families_helper(self): # Patch the stub used by the API method. client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client bigtable_table_stub.get_table.side_effect = [response_pb] # Create expected_result. @@ -476,7 +491,9 @@ def test_list_column_families(self): self._list_column_families_helper() def test_get_cluster_states(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.enums import Table as enum_table from google.cloud.bigtable.table import ClusterState @@ -484,7 +501,7 @@ def test_get_cluster_states(self): PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE READY = enum_table.ReplicationState.READY - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -502,14 +519,15 @@ def test_get_cluster_states(self): # Patch the stub used by the API method. client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client.transport + bigtable_table_stub = client._table_admin_client + bigtable_table_stub.get_table.side_effect = [response_pb] # build expected result expected_result = { - u"cluster-id1": ClusterState(INITIALIZING), - u"cluster-id2": ClusterState(PLANNED_MAINTENANCE), - u"cluster-id3": ClusterState(READY), + "cluster-id1": ClusterState(INITIALIZING), + "cluster-id2": ClusterState(PLANNED_MAINTENANCE), + "cluster-id3": ClusterState(READY), } # Perform the method and check the result. @@ -521,14 +539,14 @@ def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey from google.cloud.bigtable import table as MUT from google.cloud.bigtable.row_set import RowSet - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.row_filters import RowSampleFilter - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -554,10 +572,8 @@ def mock_create_row_request(table_name, **kwargs): # Patch the stub used by the API method. client._table_data_client = data_api client._table_admin_client = table_api - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator] - ) - + client._table_data_client.read_rows.side_effect = [response_iterator] + table._instance._client._table_data_client = client._table_data_client # Perform the method and check the result. filter_obj = RowSampleFilter(0.33) result = None @@ -618,7 +634,7 @@ def test_read_row_more_than_one_row_returned(self): timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, - ) + )._pb chunk_2 = _ReadRowsResponseCellChunkPB( row_key=self.ROW_KEY_2, family_name=self.FAMILY_NAME, @@ -626,7 +642,7 @@ def test_read_row_more_than_one_row_returned(self): timestamp_micros=self.TIMESTAMP_MICROS, value=self.VALUE, commit_row=True, - ) + )._pb chunks = [chunk_1, chunk_2] with self.assertRaises(ValueError): @@ -650,11 +666,11 @@ def _mutate_rows_helper( ): from google.rpc.status_pb2 import Status from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -737,14 +753,14 @@ def test_read_rows(self): from google.cloud._testing import _Monkey from google.cloud.bigtable.row_data import PartialRowsData from google.cloud.bigtable import table as MUT - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -797,12 +813,14 @@ def mock_create_row_request(table_name, **kwargs): self.assertEqual(mock_created, [(table.name, created_kwargs)]) def test_read_retry_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.api_core import retry - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -840,7 +858,9 @@ def test_read_retry_rows(self): response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( + data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + + client._table_data_client.read_rows = mock.Mock( side_effect=[ response_failure_iterator_1, response_failure_iterator_2, @@ -848,6 +868,8 @@ def test_read_retry_rows(self): ] ) + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api rows = [] for row in table.read_rows( start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows @@ -858,12 +880,14 @@ def test_read_retry_rows(self): self.assertEqual(result.row_key, self.ROW_KEY_2) def test_yield_retry_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) import warnings - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -899,13 +923,16 @@ def test_yield_retry_rows(self): response_iterator = _MockReadRowsIterator(response_2) # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[ - response_failure_iterator_1, - response_failure_iterator_2, - response_iterator, - ] - ) + data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api + client._table_data_client.read_rows.side_effect = [ + response_failure_iterator_1, + response_failure_iterator_2, + response_iterator, + ] rows = [] with warnings.catch_warnings(record=True) as warned: @@ -921,14 +948,16 @@ def test_yield_retry_rows(self): self.assertEqual(result.row_key, self.ROW_KEY_2) def test_yield_rows_with_row_set(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange import warnings - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -972,9 +1001,12 @@ def test_yield_rows_with_row_set(self): response_iterator = _MockReadRowsIterator(response_1, response_2, response_3) # Patch the stub used by the API method. - client._table_data_client.transport.read_rows = mock.Mock( - side_effect=[response_iterator] - ) + data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api + client._table_data_client.read_rows.side_effect = [response_iterator] rows = [] row_set = RowSet() @@ -995,11 +1027,13 @@ def test_yield_rows_with_row_set(self): self.assertEqual(rows[2].row_key, self.ROW_KEY_3) def test_sample_row_keys(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1013,10 +1047,7 @@ def test_sample_row_keys(self): response_iterator = object() # Just passed to a mock. # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["sample_row_keys"] = mock.Mock( - side_effect=[[response_iterator]] - ) + client._table_data_client.sample_row_keys.side_effect = [[response_iterator]] # Create expected_result. expected_result = response_iterator @@ -1026,13 +1057,13 @@ def test_sample_row_keys(self): self.assertEqual(result[0], expected_result) def test_truncate(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1047,19 +1078,19 @@ def test_truncate(self): result = table.truncate() table_api.drop_row_range.assert_called_once_with( - name=self.TABLE_NAME, delete_all_data_from_table=True + request={"name": self.TABLE_NAME, "delete_all_data_from_table": True} ) self.assertEqual(result, expected_result) def test_truncate_w_timeout(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1077,13 +1108,13 @@ def test_truncate_w_timeout(self): self.assertEqual(result, expected_result) def test_drop_by_prefix(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1102,13 +1133,13 @@ def test_drop_by_prefix(self): self.assertEqual(result, expected_result) def test_drop_by_prefix_w_timeout(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1140,7 +1171,9 @@ def test_mutations_batcher_factory(self): self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes) def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -1157,15 +1190,15 @@ def test_get_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) client._table_admin_client = table_api table_api.get_iam_policy.return_value = iam_policy result = table.get_iam_policy() - table_api.get_iam_policy.assert_called_once_with(resource=table.name) + table_api.get_iam_policy.assert_called_once_with( + request={"resource": table.name} + ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) admins = result.bigtable_admins @@ -1174,7 +1207,9 @@ def test_get_iam_policy(self): self.assertEqual(found, expected) def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -1192,9 +1227,7 @@ def test_set_iam_policy(self): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) client._table_admin_client = table_api table_api.set_iam_policy.return_value = iam_policy_pb @@ -1207,7 +1240,7 @@ def test_set_iam_policy(self): result = table.set_iam_policy(iam_policy) table_api.set_iam_policy.assert_called_once_with( - resource=table.name, policy=iam_policy_pb + request={"resource": table.name, "policy": iam_policy_pb} ) self.assertEqual(result.version, version) self.assertEqual(result.etag, etag) @@ -1217,7 +1250,9 @@ def test_set_iam_policy(self): self.assertEqual(found, expected) def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) from google.iam.v1 import iam_policy_pb2 credentials = _make_credentials() @@ -1231,9 +1266,7 @@ def test_test_iam_permissions(self): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) table_api.test_iam_permissions.return_value = response client._table_admin_client = table_api @@ -1241,7 +1274,7 @@ def test_test_iam_permissions(self): self.assertEqual(result, permissions) table_api.test_iam_permissions.assert_called_once_with( - resource=table.name, permissions=permissions + request={"resource": table.name, "permissions": permissions} ) def test_backup_factory_defaults(self): @@ -1274,9 +1307,7 @@ def test_backup_factory_non_defaults(self): table = self._make_one(self.TABLE_ID, instance) timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) backup = table.backup( - self.BACKUP_ID, - cluster_id=self.CLUSTER_ID, - expire_time=timestamp, + self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp, ) self.assertIsInstance(backup, Backup) @@ -1293,18 +1324,20 @@ def test_backup_factory_non_defaults(self): self.assertIsNone(backup._state) def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): - from google.cloud.bigtable_admin_v2.gapic import ( - bigtable_instance_admin_client, - bigtable_table_admin_client, + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, ) - from google.cloud.bigtable_admin_v2.proto import ( - bigtable_table_admin_pb2, - table_pb2, + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin, + Backup as backup_pb, ) from google.cloud.bigtable.backup import Backup - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + table_api = mock.create_autospec(BigtableTableAdminClient) client = self._make_client( project=self.PROJECT_ID, credentials=_make_credentials(), admin=True ) @@ -1313,19 +1346,20 @@ def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): client._instance_admin_client = instance_api client._table_admin_client = table_api + table._instance._client._instance_admin_client = instance_api + table._instance._client._table_admin_client = table_api parent = self.INSTANCE_NAME + "/clusters/cluster" - backups_pb = bigtable_table_admin_pb2.ListBackupsResponse( + backups_pb = bigtable_table_admin.ListBackupsResponse( backups=[ - table_pb2.Backup(name=parent + "/backups/op1"), - table_pb2.Backup(name=parent + "/backups/op2"), - table_pb2.Backup(name=parent + "/backups/op3"), + backup_pb(name=parent + "/backups/op1"), + backup_pb(name=parent + "/backups/op2"), + backup_pb(name=parent + "/backups/op3"), ] ) - api = table_api._inner_api_calls["list_backups"] = mock.Mock( - return_value=backups_pb - ) + table_api.list_backups.return_value = backups_pb + api = table._instance._client._table_admin_client.list_backups backups_filter = "source_table:{}".format(self.TABLE_NAME) if filter_: @@ -1340,16 +1374,21 @@ def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): cluster_id = "-" parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id) - expected_metadata = [ - ("x-goog-request-params", "parent={}".format(parent)), - ] + order_by = None + page_size = 0 + if "order_by" in kwargs: + order_by = kwargs["order_by"] + + if "page_size" in kwargs: + page_size = kwargs["page_size"] + api.assert_called_once_with( - bigtable_table_admin_pb2.ListBackupsRequest( - parent=parent, filter=backups_filter, **kwargs - ), - retry=mock.ANY, - timeout=mock.ANY, - metadata=expected_metadata, + request={ + "parent": parent, + "filter": backups_filter, + "order_by": order_by, + "page_size": page_size, + } ) def test_list_backups_defaults(self): @@ -1362,20 +1401,23 @@ def test_list_backups_w_options(self): def _restore_helper(self, backup_name=None): from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client from google.cloud.bigtable.instance import Instance op_future = object() - instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT_ID, credentials=credentials, admin=True + ) - client = mock.Mock(project=self.PROJECT_ID, instance_admin_client=instance_api) instance = Instance(self.INSTANCE_ID, client=client) table = self._make_one(self.TABLE_ID, instance) - api = client.table_admin_client = mock.create_autospec( - BigtableTableAdminClient, instance=True + api = client._table_admin_client = mock.create_autospec( + BigtableTableAdminClient ) + api.restore_table.return_value = op_future + table._instance._client._table_admin_client = api if backup_name: future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME) @@ -1384,9 +1426,11 @@ def _restore_helper(self, backup_name=None): self.assertIs(future, op_future) api.restore_table.assert_called_once_with( - parent=self.INSTANCE_NAME, - table_id=self.TABLE_ID, - backup=self.BACKUP_NAME, + request={ + "parent": self.INSTANCE_NAME, + "table_id": self.TABLE_ID, + "backup": self.BACKUP_NAME, + } ) def test_restore_table_w_backup_id(self): @@ -1445,7 +1489,7 @@ def _make_responses_statuses(self, codes): def _make_responses(self, codes): import six - from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse + from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse from google.rpc.status_pb2 import Status entries = [ @@ -1455,13 +1499,13 @@ def _make_responses(self, codes): return MutateRowsResponse(entries=entries) def test_callable_empty_rows(self): - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - data_api = mock.create_autospec(bigtable_client.BigtableClient) - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1478,8 +1522,10 @@ def test_callable_empty_rows(self): def test_callable_no_retry_strategy(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 3 rows. @@ -1491,8 +1537,9 @@ def test_callable_no_retry_strategy(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1509,12 +1556,16 @@ def test_callable_no_retry_strategy(self): row_3 = DirectRow(row_key=b"row_key_3", table=table) row_3.set_cell("cf", b"col", b"value3") - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - response_codes = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] response = self._make_responses(response_codes) data_api.mutate_rows = mock.MagicMock(return_value=[response]) + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api + + table._instance._client._table_data_client.mutate_rows.return_value = [response] + + worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) statuses = worker(retry=None) result = [status.code for status in statuses] @@ -1525,8 +1576,10 @@ def test_callable_no_retry_strategy(self): def test_callable_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 3 rows. @@ -1539,8 +1592,9 @@ def test_callable_retry(self): # - State of responses_statuses should be # [success, success, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1549,7 +1603,6 @@ def test_callable_retry(self): client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b"row_key", table=table) row_1.set_cell("cf", b"col", b"value1") row_2 = DirectRow(row_key=b"row_key_2", table=table) @@ -1563,9 +1616,9 @@ def test_callable_retry(self): response_2 = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock( - side_effect=[[response_1], [response_2]] - ) + client._table_data_client.mutate_rows.side_effect = [[response_1], [response_2]] + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api retry = DEFAULT_RETRY.with_delay(initial=0.1) worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1574,17 +1627,15 @@ def test_callable_retry(self): result = [status.code for status in statuses] expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] - self.assertEqual( - client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2 - ) + self.assertEqual(client._table_data_client.mutate_rows.call_count, 2) self.assertEqual(result, expected_result) def test_do_mutate_retryable_rows_empty_rows(self): - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client - - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1600,8 +1651,10 @@ def test_do_mutate_retryable_rows_empty_rows(self): def test_do_mutate_retryable_rows(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 2 rows. @@ -1610,8 +1663,9 @@ def test_do_mutate_retryable_rows(self): # Expectation: # - Expect [success, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1629,8 +1683,9 @@ def test_do_mutate_retryable_rows(self): response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2]) statuses = worker._do_mutate_retryable_rows() @@ -1643,8 +1698,10 @@ def test_do_mutate_retryable_rows(self): def test_do_mutate_retryable_rows_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 3 rows. @@ -1655,8 +1712,8 @@ def test_do_mutate_retryable_rows_retry(self): # - State of responses_statuses should be # [success, retryable, non-retryable] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1678,8 +1735,10 @@ def test_do_mutate_retryable_rows_retry(self): ) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) @@ -1695,8 +1754,10 @@ def test_do_mutate_retryable_rows_retry(self): def test_do_mutate_retryable_rows_second_retry(self): from google.cloud.bigtable.row import DirectRow from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 4 rows. @@ -1712,8 +1773,8 @@ def test_do_mutate_retryable_rows_second_retry(self): # - Exception contains response whose index should be '3' even though # only two rows were retried. - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1735,8 +1796,10 @@ def test_do_mutate_retryable_rows_second_retry(self): response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses( @@ -1759,8 +1822,10 @@ def test_do_mutate_retryable_rows_second_retry(self): def test_do_mutate_retryable_rows_second_try(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 4 rows. @@ -1772,8 +1837,8 @@ def test_do_mutate_retryable_rows_second_try(self): # - After second try: # [success, non-retryable, non-retryable, success] - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1795,8 +1860,10 @@ def test_do_mutate_retryable_rows_second_try(self): response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) worker.responses_statuses = self._make_responses_statuses( @@ -1817,7 +1884,9 @@ def test_do_mutate_retryable_rows_second_try(self): def test_do_mutate_retryable_rows_second_try_no_retryable(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) # Setup: # - Mutate 2 rows. @@ -1827,9 +1896,7 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): # Expectation: # - After second try: [success, non-retryable] - table_api = mock.create_autospec( - bigtable_table_admin_client.BigtableTableAdminClient - ) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1848,6 +1915,8 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): [self.SUCCESS, self.NON_RETRYABLE] ) + table._instance._client._table_admin_client = table_api + statuses = worker._do_mutate_retryable_rows() result = [status.code for status in statuses] @@ -1857,11 +1926,13 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): def test_do_mutate_retryable_rows_mismatch_num_responses(self): from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.gapic import bigtable_client - from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) - data_api = bigtable_client.BigtableClient(mock.Mock()) - table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock()) + data_api = mock.create_autospec(BigtableClient) + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True @@ -1879,8 +1950,10 @@ def test_do_mutate_retryable_rows_mismatch_num_responses(self): response = self._make_responses([self.SUCCESS]) # Patch the stub used by the API method. - inner_api_calls = client._table_data_client._inner_api_calls - inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]]) + client._table_data_client.mutate_rows.side_effect = [[response]] + + table._instance._client._table_data_client = data_api + table._instance._client._table_admin_client = table_api worker = self._make_worker(client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): @@ -1924,33 +1997,42 @@ def test_row_range_row_set_conflict(self): self._call_fut(None, end_key=object(), row_set=object()) def test_row_range_start_key(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" start_key = b"start_key" result = self._call_fut(table_name, start_key=start_key) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(start_key_closed=start_key) + row_range = RowRange(start_key_closed=start_key) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_row_range_end_key(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" end_key = b"end_key" result = self._call_fut(table_name, end_key=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add(end_key_open=end_key) + row_range = RowRange(end_key_open=end_key) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_row_range_both_keys(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" start_key = b"start_key" end_key = b"end_key" result = self._call_fut(table_name, start_key=start_key, end_key=end_key) + row_range = RowRange(start_key_closed=start_key, end_key_open=end_key) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_open=end_key - ) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_row_range_both_keys_inclusive(self): + from google.cloud.bigtable_v2.types import RowRange + table_name = "table_name" start_key = b"start_key" end_key = b"end_key" @@ -1958,9 +2040,8 @@ def test_row_range_both_keys_inclusive(self): table_name, start_key=start_key, end_key=end_key, end_inclusive=True ) expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.add( - start_key_closed=start_key, end_key_closed=end_key - ) + row_range = RowRange(start_key_closed=start_key, end_key_closed=end_key) + expected_result.rows.row_ranges.append(row_range) self.assertEqual(result, expected_result) def test_with_filter(self): @@ -2002,7 +2083,7 @@ def test_with_app_profile_id(self): def _ReadRowsRequestPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsRequest(*args, **kw) @@ -2094,24 +2175,24 @@ def test__repr__(self): def _ReadRowsResponseCellChunkPB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 family_name = kw.pop("family_name") qualifier = kw.pop("qualifier") message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw) - message.family_name.value = family_name - message.qualifier.value = qualifier + message.family_name = family_name + message.qualifier = qualifier return message def _ReadRowsResponsePB(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 return messages_v2_pb2.ReadRowsResponse(*args, **kw) def _mutate_rows_request_pb(*args, **kw): - from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2 + from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) @@ -2130,6 +2211,9 @@ class _MockFailureIterator_1(object): def next(self): raise DeadlineExceeded("Failed to read from server") + def __init__(self, last_scanned_row_key=""): + self.last_scanned_row_key = last_scanned_row_key + __next__ = next @@ -2137,6 +2221,7 @@ class _MockFailureIterator_2(object): def __init__(self, *values): self.iter_values = values[0] self.calls = 0 + self.last_scanned_row_key = "" def next(self): self.calls += 1 @@ -2155,19 +2240,19 @@ def __init__(self, chunks, last_scanned_row_key=""): def _TablePB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.Table(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.ColumnFamily(*args, **kw) def _ClusterStatePB(replication_state): - from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2 + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 return table_v2_pb2.Table.ClusterState(replication_state=replication_state) From f08a5c30e4c311f9474f2a90dadd5eed1c1bac8b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 11 Feb 2021 16:30:01 +0100 Subject: [PATCH 400/892] chore(deps): update dependency google-cloud-bigtable to v1.7.0 (#210) [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-cloud-bigtable](https://togithub.com/googleapis/python-bigtable) | `==1.6.1` -> `==1.7.0` | [![age](https://badges.renovateapi.com/packages/pypi/google-cloud-bigtable/1.7.0/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-cloud-bigtable/1.7.0/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-cloud-bigtable/1.7.0/compatibility-slim/1.6.1)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-cloud-bigtable/1.7.0/confidence-slim/1.6.1)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/python-bigtable ### [`v1.7.0`](https://togithub.com/googleapis/python-bigtable/blob/master/CHANGELOG.md#​170-httpswwwgithubcomgoogleapispython-bigtablecomparev161v170-2021-02-09) [Compare Source](https://togithub.com/googleapis/python-bigtable/compare/v1.6.1...v1.7.0) ##### Features - add keep alive timeout ([#​182](https://www.github.com/googleapis/python-bigtable/issues/182)) ([e9637cb](https://www.github.com/googleapis/python-bigtable/commit/e9637cbd4461dcca509dca43ef116d6ff41b80c7)) - support filtering on incrementable values ([#​178](https://www.github.com/googleapis/python-bigtable/issues/178)) ([e221352](https://www.github.com/googleapis/python-bigtable/commit/e2213520951d3da97019a1d784e5bf31d94e3353)) ##### Bug Fixes - Renaming region tags to not conflict with documentation snippets ([#​190](https://www.github.com/googleapis/python-bigtable/issues/190)) ([dd0cdc5](https://www.github.com/googleapis/python-bigtable/commit/dd0cdc5bcfd92e18ab9a7255684a9f5b21198867)) ##### Documentation - update python contributing guide ([#​206](https://www.github.com/googleapis/python-bigtable/issues/206)) ([e301ac3](https://www.github.com/googleapis/python-bigtable/commit/e301ac3b61364d779fdb50a57ae8e2cb9952df9e)) ##### [1.6.1](https://www.github.com/googleapis/python-bigtable/compare/v1.6.0...v1.6.1) (2020-12-01) ##### Documentation - update intersphinx mappings ([#​172](https://www.github.com/googleapis/python-bigtable/issues/172)) ([7b09368](https://www.github.com/googleapis/python-bigtable/commit/7b09368d5121782c7f271b3575c838e8a2284c05))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-bigtable). --- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index fa1ec85d7c68..3c38f94bb211 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.1 +google-cloud-bigtable==1.7.0 google-cloud-core==1.6.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 9f839250f0d9..1e0bcfdf8913 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.1 +google-cloud-bigtable==1.7.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index d84efda8a4ab..3e93b37473ca 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.1 +google-cloud-bigtable==1.7.0 google-cloud-monitoring==2.0.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 9f839250f0d9..1e0bcfdf8913 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.1 +google-cloud-bigtable==1.7.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 8119baad2324..c256e38ebf47 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.1 +google-cloud-bigtable==1.7.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 8119baad2324..c256e38ebf47 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.6.1 +google-cloud-bigtable==1.7.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 661aba7c1a15..bf09e1de90a3 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.1 \ No newline at end of file +google-cloud-bigtable==1.7.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 9f839250f0d9..1e0bcfdf8913 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.6.1 +google-cloud-bigtable==1.7.0 From 3473f9beb7fee1766e7d50fbdb93a24f29360b5a Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Fri, 12 Feb 2021 10:59:29 -0500 Subject: [PATCH 401/892] chore(fix): cleanup old enums prior to microgen (#214) --- .../cloud/bigtable_admin_v2/gapic/enums.py | 213 ------------------ .../tests/unit/test_backup.py | 13 +- 2 files changed, 5 insertions(+), 221 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py deleted file mode 100644 index c71bee34bdde..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic/enums.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class RestoreSourceType(enum.IntEnum): - """ - Indicates the type of the restore source. - - Attributes: - RESTORE_SOURCE_TYPE_UNSPECIFIED (int): No restore associated. - BACKUP (int): A backup was used as the source of the restore. - """ - - RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 - BACKUP = 1 - - -class StorageType(enum.IntEnum): - """ - Storage media types for persisting Bigtable data. - - Attributes: - STORAGE_TYPE_UNSPECIFIED (int): The user did not specify a storage type. - SSD (int): Flash (SSD) storage should be used. - HDD (int): Magnetic drive (HDD) storage should be used. - """ - - STORAGE_TYPE_UNSPECIFIED = 0 - SSD = 1 - HDD = 2 - - -class Backup(object): - class State(enum.IntEnum): - """ - Indicates the current state of the backup. - - Attributes: - STATE_UNSPECIFIED (int): Not specified. - CREATING (int): The pending backup is still being created. Operations on the backup - may fail with ``FAILED_PRECONDITION`` in this state. - READY (int): The backup is complete and ready for use. - """ - - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - - -class Cluster(object): - class State(enum.IntEnum): - """ - Possible states of a cluster. - - Attributes: - STATE_NOT_KNOWN (int): The state of the cluster could not be determined. - READY (int): The cluster has been successfully created and is ready to serve requests. - CREATING (int): The cluster is currently being created, and may be destroyed - if the creation process encounters an error. - A cluster may not be able to serve requests while being created. - RESIZING (int): The cluster is currently being resized, and may revert to its previous - node count if the process encounters an error. - A cluster is still capable of serving requests while being resized, - but may exhibit performance as if its number of allocated nodes is - between the starting and requested states. - DISABLED (int): The cluster has no backing nodes. The data (tables) still - exist, but no operations can be performed on the cluster. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - -class Instance(object): - class State(enum.IntEnum): - """ - Possible states of an instance. - - Attributes: - STATE_NOT_KNOWN (int): The state of the instance could not be determined. - READY (int): The instance has been successfully created and can serve requests - to its tables. - CREATING (int): The instance is currently being created, and may be destroyed - if the creation process encounters an error. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(enum.IntEnum): - """ - The type of the instance. - - Attributes: - TYPE_UNSPECIFIED (int): The type of the instance is unspecified. If set when creating an - instance, a ``PRODUCTION`` instance will be created. If set when - updating an instance, the type will be left unchanged. - PRODUCTION (int): An instance meant for production use. ``serve_nodes`` must be set on - the cluster. - DEVELOPMENT (int): The instance is meant for development and testing purposes only; it - has no performance or uptime guarantees and is not covered by SLA. After - a development instance is created, it can be upgraded by updating the - instance to type ``PRODUCTION``. An instance created as a production - instance cannot be changed to a development instance. When creating a - development instance, ``serve_nodes`` on the cluster must not be set. - """ - - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - -class Snapshot(object): - class State(enum.IntEnum): - """ - Possible states of a snapshot. - - Attributes: - STATE_NOT_KNOWN (int): The state of the snapshot could not be determined. - READY (int): The snapshot has been successfully created and can serve all requests. - CREATING (int): The snapshot is currently being created, and may be destroyed if the - creation process encounters an error. A snapshot may not be restored to a - table while it is being created. - """ - - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - -class Table(object): - class TimestampGranularity(enum.IntEnum): - """ - Possible timestamp granularities to use when keeping multiple versions - of data in a table. - - Attributes: - TIMESTAMP_GRANULARITY_UNSPECIFIED (int): The user did not specify a granularity. Should not be returned. - When specified during table creation, MILLIS will be used. - MILLIS (int): The table keeps data versioned at a granularity of 1ms. - """ - - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 - MILLIS = 1 - - class View(enum.IntEnum): - """ - Defines a view over a table's fields. - - Attributes: - VIEW_UNSPECIFIED (int): Uses the default view for each method as documented in its request. - NAME_ONLY (int): Only populates ``name``. - SCHEMA_VIEW (int): Only populates ``name`` and fields related to the table's schema. - REPLICATION_VIEW (int): Only populates ``name`` and fields related to the table's - replication state. - FULL (int): Populates all fields. - """ - - VIEW_UNSPECIFIED = 0 - NAME_ONLY = 1 - SCHEMA_VIEW = 2 - REPLICATION_VIEW = 3 - FULL = 4 - - class ClusterState(object): - class ReplicationState(enum.IntEnum): - """ - Table replication states. - - Attributes: - STATE_NOT_KNOWN (int): The replication state of the table is unknown in this cluster. - INITIALIZING (int): The cluster was recently created, and the table must finish copying - over pre-existing data from other clusters before it can begin - receiving live replication updates and serving Data API requests. - PLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to planned internal maintenance. - UNPLANNED_MAINTENANCE (int): The table is temporarily unable to serve Data API requests from this - cluster due to unplanned or emergency maintenance. - READY (int): The table can serve Data API requests from this cluster. Depending on - replication delay, reads may not immediately reflect the state of the - table in other clusters. - READY_OPTIMIZING (int): The table is fully created and ready for use after a restore, and is - being optimized for performance. When optimizations are complete, the - table will transition to ``READY`` state. - """ - - STATE_NOT_KNOWN = 0 - INITIALIZING = 1 - PLANNED_MAINTENANCE = 2 - UNPLANNED_MAINTENANCE = 3 - READY = 4 - READY_OPTIMIZING = 5 diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 68e5f6162105..02efef492ab2 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -128,7 +128,6 @@ def test_from_pb_bad_name(self): klasse.from_pb(backup_pb, instance) def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.gapic import enums from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp @@ -136,7 +135,7 @@ def test_from_pb_success(self): instance = _Instance(self.INSTANCE_NAME, client) timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) size_bytes = 1234 - state = enums.Backup.State.READY + state = table.Backup.State.READY backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, @@ -269,11 +268,11 @@ def test_property_size(self): self.assertEqual(backup.size_bytes, expected) def test_property_state(self): - from google.cloud.bigtable_admin_v2.gapic import enums + from google.cloud.bigtable_admin_v2.types import table instance = _Instance(self.INSTANCE_NAME) backup = self._make_one(self.BACKUP_ID, instance) - expected = backup._state = enums.Backup.State.READY + expected = backup._state = table.Backup.State.READY self.assertEqual(backup.state, expected) def test___eq__(self): @@ -493,12 +492,11 @@ def test_exists_not_found(self): api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) def test_get(self): - from google.cloud.bigtable_admin_v2.gapic import enums from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) - state = enums.Backup.State.READY + state = table.Backup.State.READY client = _Client() backup_pb = table.Backup( @@ -519,12 +517,11 @@ def test_get(self): self.assertEqual(backup.get(), backup_pb) def test_reload(self): - from google.cloud.bigtable_admin_v2.gapic import enums from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) - state = enums.Backup.State.READY + state = table.Backup.State.READY client = _Client() backup_pb = table.Backup( From b27bed0dd4ba8d7c05df299feec11d062864e9f2 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 19 Feb 2021 17:22:58 +0100 Subject: [PATCH 402/892] chore(deps): update dependency google-cloud-monitoring to v2.0.1 (#218) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 3e93b37473ca..cbcdcd4774aa 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.7.0 -google-cloud-monitoring==2.0.0 +google-cloud-monitoring==2.0.1 From 5eb9857ac9249a5263eabb37a17cf9baa5a917e4 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Wed, 24 Feb 2021 14:32:10 -0500 Subject: [PATCH 403/892] fix: guard assignments of certain values against None (#220) * chore: manual regen of synth * cleanup --- packages/google-cloud-bigtable/.gitignore | 4 +++- .../google-cloud-bigtable/.kokoro/build.sh | 10 ++++++++ .../bigtable_instance_admin/client.py | 14 ++++++++--- .../bigtable_instance_admin/pagers.py | 11 ++++++++- .../services/bigtable_table_admin/client.py | 14 ++++++++--- .../services/bigtable_table_admin/pagers.py | 11 ++++++++- .../bigtable_v2/services/bigtable/client.py | 24 ++++++++----------- packages/google-cloud-bigtable/synth.py | 8 +++++++ 8 files changed, 73 insertions(+), 23 deletions(-) diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore index b9daa52f118d..b4243ced74e4 100644 --- a/packages/google-cloud-bigtable/.gitignore +++ b/packages/google-cloud-bigtable/.gitignore @@ -50,8 +50,10 @@ docs.metadata # Virtual environment env/ + +# Test logs coverage.xml -sponge_log.xml +*sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index 76d9329bad4d..9773bfca7cd7 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -40,6 +40,16 @@ python3 -m pip uninstall --yes --quiet nox-automation python3 -m pip install --upgrade --quiet nox python3 -m nox --version +# If this is a continuous build, send the test log to the FlakyBot. +# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP +fi + # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 8e6f504da4e9..d7b1a778f43f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -484,9 +484,8 @@ def create_instance( request.instance_id = instance_id if instance is not None: request.instance = instance - - if clusters: - request.clusters.update(clusters) + if clusters is not None: + request.clusters = clusters # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1832,6 +1831,9 @@ def get_iam_policy( elif not request: request = iam_policy.GetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] @@ -1957,6 +1959,9 @@ def set_iam_policy( elif not request: request = iam_policy.SetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] @@ -2039,6 +2044,9 @@ def test_iam_permissions( resource=resource, permissions=permissions, ) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index f70936b5b458..f92d478868e1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 58eb4a9cdbf8..de31461164d8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -952,9 +952,8 @@ def modify_column_families( if name is not None: request.name = name - - if modifications: - request.modifications.extend(modifications) + if modifications is not None: + request.modifications = modifications # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2236,6 +2235,9 @@ def get_iam_policy( elif not request: request = iam_policy.GetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] @@ -2361,6 +2363,9 @@ def set_iam_policy( elif not request: request = iam_policy.SetIamPolicyRequest(resource=resource,) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] @@ -2443,6 +2448,9 @@ def test_iam_permissions( resource=resource, permissions=permissions, ) + if resource is not None: + request.resource = resource + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index be7c121d74cb..203d94f83e51 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 8ae8110541c7..a9f3dfd74925 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -621,12 +621,11 @@ def mutate_row( request.table_name = table_name if row_key is not None: request.row_key = row_key + if mutations is not None: + request.mutations = mutations if app_profile_id is not None: request.app_profile_id = app_profile_id - if mutations: - request.mutations.extend(mutations) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.mutate_row] @@ -730,12 +729,11 @@ def mutate_rows( if table_name is not None: request.table_name = table_name + if entries is not None: + request.entries = entries if app_profile_id is not None: request.app_profile_id = app_profile_id - if entries: - request.entries.extend(entries) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.mutate_rows] @@ -881,14 +879,13 @@ def check_and_mutate_row( request.row_key = row_key if predicate_filter is not None: request.predicate_filter = predicate_filter + if true_mutations is not None: + request.true_mutations = true_mutations + if false_mutations is not None: + request.false_mutations = false_mutations if app_profile_id is not None: request.app_profile_id = app_profile_id - if true_mutations: - request.true_mutations.extend(true_mutations) - if false_mutations: - request.false_mutations.extend(false_mutations) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.check_and_mutate_row] @@ -1005,12 +1002,11 @@ def read_modify_write_row( request.table_name = table_name if row_key is not None: request.row_key = row_key + if rules is not None: + request.rules = rules if app_profile_id is not None: request.app_profile_id = app_profile_id - if rules: - request.rules.extend(rules) - # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.read_modify_write_row] diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index e2fda520a737..2a74e80f61a9 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -47,6 +47,14 @@ s.move(library / "tests") s.move(library / "scripts") +# temporary workaround for https://github.com/googleapis/gapic-generator-python/issues/778 +s.replace( + "google/cloud/**/client.py", + """\s+if permissions: +\s+request\.permissions\.extend\(permissions\)""", + "", +) + # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- From d0d1348dd886d038e8f383bdc5bb6edfc04a0a90 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 24 Feb 2021 15:23:46 -0500 Subject: [PATCH 404/892] chore: release 2.0.0-dev1 (#211) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 16 ++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 91c791aff011..420cdc4f0883 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,22 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.0.0-dev1](https://www.github.com/googleapis/python-bigtable/compare/v1.7.0...v2.0.0-dev1) (2021-02-24) + + +### ⚠ BREAKING CHANGES + +* microgenerator changes (#203) + +### Features + +* microgenerator changes ([#203](https://www.github.com/googleapis/python-bigtable/issues/203)) ([b31bd87](https://www.github.com/googleapis/python-bigtable/commit/b31bd87c3fa8cad32768611a52d5effcc7d9b3e2)) + + +### Bug Fixes + +* guard assignments of certain values against None ([#220](https://www.github.com/googleapis/python-bigtable/issues/220)) ([341f448](https://www.github.com/googleapis/python-bigtable/commit/341f448ce378375ab79bfc82f864fb6c88ed71a0)) + ## [1.7.0](https://www.github.com/googleapis/python-bigtable/compare/v1.6.1...v1.7.0) (2021-02-09) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index c1fa1311cbf5..b460b91b3691 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "1.7.0" +version = "2.0.0-dev1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 7c030bbfa02315c8f4cf3e0eb78623d56a02e2ac Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 25 Feb 2021 11:28:20 -0800 Subject: [PATCH 405/892] feat: publish new fields for CMEK (#222) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * Bump gapic-generator to 2.6.1. - Fix a scenario where generator attempts to assign a string to an integer in tests by using a separate value generator in test generation PiperOrigin-RevId: 336931287 Source-Author: Google APIs Source-Date: Tue Oct 13 12:29:21 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 3a935fab757e09c72afd4aa121147a4c97dccc3e Source-Link: https://github.com/googleapis/googleapis/commit/3a935fab757e09c72afd4aa121147a4c97dccc3e * chore: update grpc dependency to v1.33.1 PiperOrigin-RevId: 338646463 Source-Author: Google APIs Source-Date: Fri Oct 23 03:57:15 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: 20b11dfe4538cd5da7b4c3dd7d2bf5b9922ff3ed Source-Link: https://github.com/googleapis/googleapis/commit/20b11dfe4538cd5da7b4c3dd7d2bf5b9922ff3ed * feat:Update BigtableTableAdmin GetIamPolicy to include the additional binding for Backup. feat:Change DeleteAppProfileRequest.ignore_warnings to REQUIRED. PiperOrigin-RevId: 339464550 Source-Author: Google APIs Source-Date: Wed Oct 28 08:32:48 2020 -0700 Source-Repo: googleapis/googleapis Source-Sha: ccd6462d31e6422fd188b6590aa8d0ad03e7d9a3 Source-Link: https://github.com/googleapis/googleapis/commit/ccd6462d31e6422fd188b6590aa8d0ad03e7d9a3 * feat: migrate bigtable retry/timeout settings to gRPC's service configs Committer: @miraleung PiperOrigin-RevId: 346894665 Source-Author: Google APIs Source-Date: Thu Dec 10 16:55:31 2020 -0800 Source-Repo: googleapis/googleapis Source-Sha: cbbd3170bcf217e36ae72f4ac522449bf861346f Source-Link: https://github.com/googleapis/googleapis/commit/cbbd3170bcf217e36ae72f4ac522449bf861346f * chore: migrate bigtable to the Python microgenerator PiperOrigin-RevId: 356992836 Source-Author: Google APIs Source-Date: Thu Feb 11 09:33:53 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 1e0c7413684ca6f6322620ecfc0d3e0352933dc1 Source-Link: https://github.com/googleapis/googleapis/commit/1e0c7413684ca6f6322620ecfc0d3e0352933dc1 * chore: migrate StreetView to the {Java,Python} microgenerators Committer: @miraleung PiperOrigin-RevId: 357863594 Source-Author: Google APIs Source-Date: Tue Feb 16 20:19:58 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 8ca872ced04c96c05a36db3d2113d568ac814be8 Source-Link: https://github.com/googleapis/googleapis/commit/8ca872ced04c96c05a36db3d2113d568ac814be8 * feat: Publish new fields to support Customer Managed Encryption Keys (CMEK) on the existing Cloud Bigtable service methods. PiperOrigin-RevId: 359130387 Source-Author: Google APIs Source-Date: Tue Feb 23 14:08:20 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: eabec5a21219401bad79e1cc7d900c1658aee5fd Source-Link: https://github.com/googleapis/googleapis/commit/eabec5a21219401bad79e1cc7d900c1658aee5fd * fix: Use rules_gapic to v0.5.0. Fixes handling parameters with spaces. Committer: @alexander-fenster PiperOrigin-RevId: 359364666 Source-Author: Google APIs Source-Date: Wed Feb 24 14:01:05 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: c06bbe28cc7287a55bf7926ee48da2565854de7f Source-Link: https://github.com/googleapis/googleapis/commit/c06bbe28cc7287a55bf7926ee48da2565854de7f * fix noxfile Co-authored-by: Kristen O'Leary --- packages/google-cloud-bigtable/.gitignore | 4 +- .../google-cloud-bigtable/.kokoro/build.sh | 10 -- .../cloud/bigtable_admin_v2/__init__.py | 6 +- .../bigtable_admin_v2/proto/instance.proto | 29 +++++- .../cloud/bigtable_admin_v2/proto/table.proto | 75 +++++++++++++- .../bigtable_instance_admin/async_client.py | 4 + .../bigtable_instance_admin/client.py | 21 ++++ .../bigtable_table_admin/async_client.py | 6 ++ .../services/bigtable_table_admin/client.py | 26 +++++ .../cloud/bigtable_admin_v2/types/__init__.py | 2 + .../cloud/bigtable_admin_v2/types/instance.py | 29 +++++- .../cloud/bigtable_admin_v2/types/table.py | 64 +++++++++++- .../fixup_bigtable_admin_v2_keywords.py | 2 +- packages/google-cloud-bigtable/synth.metadata | 86 ++++++++-------- .../test_bigtable_instance_admin.py | 61 +++++++++--- .../test_bigtable_table_admin.py | 97 +++++++++++++------ 16 files changed, 407 insertions(+), 115 deletions(-) diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore index b4243ced74e4..b9daa52f118d 100644 --- a/packages/google-cloud-bigtable/.gitignore +++ b/packages/google-cloud-bigtable/.gitignore @@ -50,10 +50,8 @@ docs.metadata # Virtual environment env/ - -# Test logs coverage.xml -*sponge_log.xml +sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index 9773bfca7cd7..76d9329bad4d 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -40,16 +40,6 @@ python3 -m pip uninstall --yes --quiet nox-automation python3 -m pip install --upgrade --quiet nox python3 -m nox --version -# If this is a continuous build, send the test log to the FlakyBot. -# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. -if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then - cleanup() { - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot - } - trap cleanup EXIT HUP -fi - # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 423742502ede..79a9bea684e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -76,6 +76,7 @@ from .types.table import Backup from .types.table import BackupInfo from .types.table import ColumnFamily +from .types.table import EncryptionInfo from .types.table import GcRule from .types.table import RestoreInfo from .types.table import RestoreSourceType @@ -87,7 +88,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -109,6 +110,7 @@ "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", + "EncryptionInfo", "GcRule", "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", @@ -149,5 +151,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto index 2086f9707c8b..d590788b2d6c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -28,6 +27,10 @@ option java_outer_classname = "InstanceProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKey" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}" +}; // A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and // the resources that serve them. @@ -113,6 +116,22 @@ message Cluster { pattern: "projects/{project}/instances/{instance}/clusters/{cluster}" }; + // Cloud Key Management Service (Cloud KMS) settings for a CMEK-protected + // cluster. + message EncryptionConfig { + // Describes the Cloud KMS encryption key that will be used to protect the + // destination Bigtable cluster. The requirements for this key are: + // 1) The Cloud Bigtable service account associated with the project that + // contains this cluster must be granted the + // `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. + // 2) Only regional keys can be used and the region of the CMEK key must + // match the region of the cluster. + // 3) All clusters within an instance must use the same CMEK key. + string kms_key_name = 1 [(google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKey" + }]; + } + // Possible states of a cluster. enum State { // The state of the cluster could not be determined. @@ -162,6 +181,10 @@ message Cluster { // The type of storage used by this cluster to serve its // parent instance's tables, unless explicitly overridden. StorageType default_storage_type = 5; + + // Immutable. The encryption configuration for CMEK-protected clusters. + EncryptionConfig encryption_config = 6 + [(google.api.field_behavior) = IMMUTABLE]; } // A configuration object describing how Cloud Bigtable should treat traffic @@ -194,7 +217,7 @@ message AppProfile { // (`OutputOnly`) // The unique name of the app profile. Values are of the form - // `projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + // `projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. string name = 1; // Strongly validated etag for optimistic concurrency control. Preserve the diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto index e85ca8ca9745..a5578225ea18 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; @@ -28,6 +29,10 @@ option java_outer_classname = "TableProto"; option java_package = "com.google.bigtable.admin.v2"; option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; +option (google.api.resource_definition) = { + type: "cloudkms.googleapis.com/CryptoKeyVersion" + pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}" +}; // Indicates the type of the restore source. enum RestoreSourceType { @@ -92,6 +97,14 @@ message Table { // Output only. The state of replication for the table in this cluster. ReplicationState replication_state = 1; + + // Output only. The encryption information for the table in this cluster. + // If the encryption key protecting this resource is customer managed, then + // its version can be rotated in Cloud Key Management Service (Cloud KMS). + // The primary version of the key and its status will be reflected here when + // changes propagate from Cloud KMS. + repeated EncryptionInfo encryption_info = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Possible timestamp granularities to use when keeping multiple versions @@ -120,12 +133,15 @@ message Table { // state. REPLICATION_VIEW = 3; + // Only populates 'name' and fields related to the table's encryption state. + ENCRYPTION_VIEW = 5; + // Populates all fields. FULL = 4; } - // Output only. The unique name of the table. Values are of the form - // `projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. + // The unique name of the table. Values are of the form + // `projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` string name = 1; @@ -133,7 +149,7 @@ message Table { // If it could not be determined whether or not the table has data in a // particular cluster (for example, if its zone is unavailable), then // there will be an entry for the cluster with UNKNOWN `replication_status`. - // Views: `REPLICATION_VIEW`, `FULL` + // Views: `REPLICATION_VIEW`, `ENCRYPTION_VIEW`, `FULL` map cluster_states = 2; // (`CreationOnly`) @@ -196,6 +212,51 @@ message GcRule { } } +// Encryption information for a given resource. +// If this resource is protected with customer managed encryption, the in-use +// Cloud Key Management Service (Cloud KMS) key version is specified along with +// its status. +message EncryptionInfo { + // Possible encryption types for a resource. + enum EncryptionType { + // Encryption type was not specified, though data at rest remains encrypted. + ENCRYPTION_TYPE_UNSPECIFIED = 0; + + // The data backing this resource is encrypted at rest with a key that is + // fully managed by Google. No key version or status will be populated. + // This is the default state. + GOOGLE_DEFAULT_ENCRYPTION = 1; + + // The data backing this resource is encrypted at rest with a key that is + // managed by the customer. + // The in-use version of the key and its status are populated for + // CMEK-protected tables. + // CMEK-protected backups are pinned to the key version that was in use at + // the time the backup was taken. This key version is populated but its + // status is not tracked and is reported as `UNKNOWN`. + CUSTOMER_MANAGED_ENCRYPTION = 2; + } + + // Output only. The type of encryption used to protect this resource. + EncryptionType encryption_type = 3 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The status of encrypt/decrypt calls on underlying data for + // this resource. Regardless of status, the existing data is always encrypted + // at rest. + google.rpc.Status encryption_status = 4 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The version of the Cloud KMS key specified in the parent + // cluster that is in use for the data underlying this table. + string kms_key_version = 2 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "cloudkms.googleapis.com/CryptoKeyVersion" + } + ]; +} + // A snapshot of a table at a particular time. A snapshot can be used as a // checkpoint for data restoration or a data source for a new table. // @@ -225,7 +286,7 @@ message Snapshot { // Output only. The unique name of the snapshot. // Values are of the form - // `projects//instances//clusters//snapshots/`. + // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. string name = 1; // Output only. The source table at the time the snapshot was taken. @@ -318,6 +379,10 @@ message Backup { // Output only. The current state of the backup. State state = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The encryption information for the backup. + EncryptionInfo encryption_info = 9 + [(google.api.field_behavior) = OUTPUT_ONLY]; } // Information about a backup. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 4df47ff4a7a2..d375f3280161 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -62,6 +62,10 @@ class BigtableInstanceAdminAsyncClient: ) cluster_path = staticmethod(BigtableInstanceAdminClient.cluster_path) parse_cluster_path = staticmethod(BigtableInstanceAdminClient.parse_cluster_path) + crypto_key_path = staticmethod(BigtableInstanceAdminClient.crypto_key_path) + parse_crypto_key_path = staticmethod( + BigtableInstanceAdminClient.parse_crypto_key_path + ) instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index d7b1a778f43f..fc161479b234 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -201,6 +201,27 @@ def parse_cluster_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def crypto_key_path( + project: str, location: str, key_ring: str, crypto_key: str, + ) -> str: + """Return a fully-qualified crypto_key string.""" + return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, + ) + + @staticmethod + def parse_crypto_key_path(path: str) -> Dict[str, str]: + """Parse a crypto_key path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def instance_path(project: str, instance: str,) -> str: """Return a fully-qualified instance string.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 19e9ee8278dd..5aca2d37759e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -61,6 +61,12 @@ class BigtableTableAdminAsyncClient: parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path) + crypto_key_version_path = staticmethod( + BigtableTableAdminClient.crypto_key_version_path + ) + parse_crypto_key_version_path = staticmethod( + BigtableTableAdminClient.parse_crypto_key_version_path + ) instance_path = staticmethod(BigtableTableAdminClient.instance_path) parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index de31461164d8..362013eccebd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -202,6 +202,32 @@ def parse_cluster_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def crypto_key_version_path( + project: str, + location: str, + key_ring: str, + crypto_key: str, + crypto_key_version: str, + ) -> str: + """Return a fully-qualified crypto_key_version string.""" + return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, + crypto_key_version=crypto_key_version, + ) + + @staticmethod + def parse_crypto_key_version_path(path: str) -> Dict[str, str]: + """Parse a crypto_key_version path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)/cryptoKeyVersions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def instance_path(project: str, instance: str,) -> str: """Return a fully-qualified instance string.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 26c4b40c9dc2..f637988c4d89 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -53,6 +53,7 @@ Table, ColumnFamily, GcRule, + EncryptionInfo, Snapshot, Backup, BackupInfo, @@ -122,6 +123,7 @@ "Table", "ColumnFamily", "GcRule", + "EncryptionInfo", "Snapshot", "Backup", "BackupInfo", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index ddef8a0d180c..1f13a0cefe2a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -110,6 +110,9 @@ class Cluster(proto.Message): (``CreationOnly``) The type of storage used by this cluster to serve its parent instance's tables, unless explicitly overridden. + encryption_config (google.cloud.bigtable_admin_v2.types.Cluster.EncryptionConfig): + Immutable. The encryption configuration for + CMEK-protected clusters. """ class State(proto.Enum): @@ -120,6 +123,28 @@ class State(proto.Enum): RESIZING = 3 DISABLED = 4 + class EncryptionConfig(proto.Message): + r"""Cloud Key Management Service (Cloud KMS) settings for a CMEK- + rotected cluster. + + Attributes: + kms_key_name (str): + Describes the Cloud KMS encryption key that will be used to + protect the destination Bigtable cluster. The requirements + for this key are: + + 1) The Cloud Bigtable service account associated with the + project that contains this cluster must be granted the + ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK + key. + 2) Only regional keys can be used and the region of the CMEK + key must match the region of the cluster. + 3) All clusters within an instance must use the same CMEK + key. + """ + + kms_key_name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1) location = proto.Field(proto.STRING, number=2) @@ -130,6 +155,8 @@ class State(proto.Enum): default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,) + encryption_config = proto.Field(proto.MESSAGE, number=6, message=EncryptionConfig,) + class AppProfile(proto.Message): r"""A configuration object describing how Cloud Bigtable should @@ -139,7 +166,7 @@ class AppProfile(proto.Message): name (str): (``OutputOnly``) The unique name of the app profile. Values are of the form - ``projects//instances//appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + ``projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. etag (str): Strongly validated etag for optimistic concurrency control. Preserve the value returned from ``GetAppProfile`` when diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 96d7750f718f..7f5f88e4f58f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -20,6 +20,7 @@ from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore __protobuf__ = proto.module( @@ -30,6 +31,7 @@ "Table", "ColumnFamily", "GcRule", + "EncryptionInfo", "Snapshot", "Backup", "BackupInfo", @@ -68,9 +70,8 @@ class Table(proto.Message): Attributes: name (str): - Output only. The unique name of the table. Values are of the - form - ``projects//instances//tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + The unique name of the table. Values are of the form + ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` cluster_states (Sequence[google.cloud.bigtable_admin_v2.types.Table.ClusterStatesEntry]): @@ -79,7 +80,7 @@ class Table(proto.Message): data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN ``replication_status``. Views: - ``REPLICATION_VIEW``, ``FULL`` + ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` column_families (Sequence[google.cloud.bigtable_admin_v2.types.Table.ColumnFamiliesEntry]): (``CreationOnly``) The column families configured for this table, mapped by column family ID. Views: ``SCHEMA_VIEW``, @@ -110,6 +111,7 @@ class View(proto.Enum): NAME_ONLY = 1 SCHEMA_VIEW = 2 REPLICATION_VIEW = 3 + ENCRYPTION_VIEW = 5 FULL = 4 class ClusterState(proto.Message): @@ -119,6 +121,15 @@ class ClusterState(proto.Message): replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): Output only. The state of replication for the table in this cluster. + encryption_info (Sequence[google.cloud.bigtable_admin_v2.types.EncryptionInfo]): + Output only. The encryption information for + the table in this cluster. If the encryption key + protecting this resource is customer managed, + then its version can be rotated in Cloud Key + Management Service (Cloud KMS). The primary + version of the key and its status will be + reflected here when changes propagate from Cloud + KMS. """ class ReplicationState(proto.Enum): @@ -134,6 +145,10 @@ class ReplicationState(proto.Enum): proto.ENUM, number=1, enum="Table.ClusterState.ReplicationState", ) + encryption_info = proto.RepeatedField( + proto.MESSAGE, number=2, message="EncryptionInfo", + ) + name = proto.Field(proto.STRING, number=1) cluster_states = proto.MapField( @@ -222,6 +237,40 @@ class Union(proto.Message): union = proto.Field(proto.MESSAGE, number=4, oneof="rule", message=Union,) +class EncryptionInfo(proto.Message): + r"""Encryption information for a given resource. + If this resource is protected with customer managed encryption, + the in-use Cloud Key Management Service (Cloud KMS) key version + is specified along with its status. + + Attributes: + encryption_type (google.cloud.bigtable_admin_v2.types.EncryptionInfo.EncryptionType): + Output only. The type of encryption used to + protect this resource. + encryption_status (google.rpc.status_pb2.Status): + Output only. The status of encrypt/decrypt + calls on underlying data for this resource. + Regardless of status, the existing data is + always encrypted at rest. + kms_key_version (str): + Output only. The version of the Cloud KMS key + specified in the parent cluster that is in use + for the data underlying this table. + """ + + class EncryptionType(proto.Enum): + r"""Possible encryption types for a resource.""" + ENCRYPTION_TYPE_UNSPECIFIED = 0 + GOOGLE_DEFAULT_ENCRYPTION = 1 + CUSTOMER_MANAGED_ENCRYPTION = 2 + + encryption_type = proto.Field(proto.ENUM, number=3, enum=EncryptionType,) + + encryption_status = proto.Field(proto.MESSAGE, number=4, message=status.Status,) + + kms_key_version = proto.Field(proto.STRING, number=2) + + class Snapshot(proto.Message): r"""A snapshot of a table at a particular time. A snapshot can be used as a checkpoint for data restoration or a data source for a @@ -236,7 +285,7 @@ class Snapshot(proto.Message): name (str): Output only. The unique name of the snapshot. Values are of the form - ``projects//instances//clusters//snapshots/``. + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. source_table (google.cloud.bigtable_admin_v2.types.Table): Output only. The source table at the time the snapshot was taken. @@ -322,6 +371,9 @@ class Backup(proto.Message): Output only. Size of the backup in bytes. state (google.cloud.bigtable_admin_v2.types.Backup.State): Output only. The current state of the backup. + encryption_info (google.cloud.bigtable_admin_v2.types.EncryptionInfo): + Output only. The encryption information for + the backup. """ class State(proto.Enum): @@ -344,6 +396,8 @@ class State(proto.Enum): state = proto.Field(proto.ENUM, number=7, enum=State,) + encryption_info = proto.Field(proto.MESSAGE, number=9, message="EncryptionInfo",) + class BackupInfo(proto.Message): r"""Information about a backup. diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index d30de39dbfd8..3902adff57af 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -77,7 +77,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'test_iam_permissions': ('resource', 'permissions', ), 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), - 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', ), + 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ), 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), } diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 4416e5d4efa5..fe502187f97e 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "6fe87016a159bbdc6bf29856b1cf6e633e16216a" + "sha": "b7489b65319eabd1dbe01d5d01b24500d013b53f" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "8d73f9486fc193a150f6c907dfb9f49431aff3ff", - "internalRef": "332497859" + "sha": "c06bbe28cc7287a55bf7926ee48da2565854de7f", + "internalRef": "359364666" } }, { @@ -51,7 +51,6 @@ } ], "generatedFiles": [ - ".coveragerc", ".flake8", ".github/CONTRIBUTING.md", ".github/ISSUE_TEMPLATE/bug_report.md", @@ -107,46 +106,51 @@ "docs/conf.py", "docs/multiprocessing.rst", "google/cloud/bigtable_admin_v2/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_instance_admin_client_config.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client.py", - "google/cloud/bigtable_admin_v2/gapic/bigtable_table_admin_client_config.py", - "google/cloud/bigtable_admin_v2/gapic/enums.py", - "google/cloud/bigtable_admin_v2/gapic/transports/__init__.py", - "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_instance_admin_grpc_transport.py", - "google/cloud/bigtable_admin_v2/gapic/transports/bigtable_table_admin_grpc_transport.py", - "google/cloud/bigtable_admin_v2/proto/__init__.py", "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/common.proto", - "google/cloud/bigtable_admin_v2/proto/common_pb2.py", - "google/cloud/bigtable_admin_v2/proto/common_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/instance.proto", - "google/cloud/bigtable_admin_v2/proto/instance_pb2.py", - "google/cloud/bigtable_admin_v2/proto/instance_pb2_grpc.py", "google/cloud/bigtable_admin_v2/proto/table.proto", - "google/cloud/bigtable_admin_v2/proto/table_pb2.py", - "google/cloud/bigtable_admin_v2/proto/table_pb2_grpc.py", - "google/cloud/bigtable_admin_v2/types.py", + "google/cloud/bigtable_admin_v2/py.typed", + "google/cloud/bigtable_admin_v2/services/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py", + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py", + "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py", + "google/cloud/bigtable_admin_v2/types/__init__.py", + "google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py", + "google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py", + "google/cloud/bigtable_admin_v2/types/common.py", + "google/cloud/bigtable_admin_v2/types/instance.py", + "google/cloud/bigtable_admin_v2/types/table.py", "google/cloud/bigtable_v2/__init__.py", - "google/cloud/bigtable_v2/gapic/__init__.py", - "google/cloud/bigtable_v2/gapic/bigtable_client.py", - "google/cloud/bigtable_v2/gapic/bigtable_client_config.py", - "google/cloud/bigtable_v2/gapic/transports/__init__.py", - "google/cloud/bigtable_v2/gapic/transports/bigtable_grpc_transport.py", - "google/cloud/bigtable_v2/proto/__init__.py", "google/cloud/bigtable_v2/proto/bigtable.proto", - "google/cloud/bigtable_v2/proto/bigtable_pb2.py", - "google/cloud/bigtable_v2/proto/bigtable_pb2_grpc.py", "google/cloud/bigtable_v2/proto/data.proto", - "google/cloud/bigtable_v2/proto/data_pb2.py", - "google/cloud/bigtable_v2/proto/data_pb2_grpc.py", - "google/cloud/bigtable_v2/types.py", + "google/cloud/bigtable_v2/py.typed", + "google/cloud/bigtable_v2/services/__init__.py", + "google/cloud/bigtable_v2/services/bigtable/__init__.py", + "google/cloud/bigtable_v2/services/bigtable/async_client.py", + "google/cloud/bigtable_v2/services/bigtable/client.py", + "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", + "google/cloud/bigtable_v2/services/bigtable/transports/base.py", + "google/cloud/bigtable_v2/services/bigtable/transports/grpc.py", + "google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py", + "google/cloud/bigtable_v2/types/__init__.py", + "google/cloud/bigtable_v2/types/bigtable.py", + "google/cloud/bigtable_v2/types/data.py", + "noxfile.py", "renovate.json", "samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md", @@ -160,6 +164,8 @@ "samples/snippets/README.md", "samples/tableadmin/README.md", "scripts/decrypt-secrets.sh", + "scripts/fixup_bigtable_admin_v2_keywords.py", + "scripts/fixup_bigtable_v2_keywords.py", "scripts/readme-gen/readme_gen.py", "scripts/readme-gen/templates/README.tmpl.rst", "scripts/readme-gen/templates/auth.tmpl.rst", @@ -168,8 +174,10 @@ "scripts/readme-gen/templates/install_portaudio.tmpl.rst", "setup.cfg", "testing/.gitignore", - "tests/unit/gapic/v2/test_bigtable_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_instance_admin_client_v2.py", - "tests/unit/gapic/v2/test_bigtable_table_admin_client_v2.py" + "tests/unit/gapic/bigtable_admin_v2/__init__.py", + "tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py", + "tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py", + "tests/unit/gapic/bigtable_v2/__init__.py", + "tests/unit/gapic/bigtable_v2/test_bigtable.py" ] } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 5c6752cac3dd..8a676d825b37 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -5171,9 +5171,38 @@ def test_parse_cluster_path(): assert expected == actual -def test_instance_path(): +def test_crypto_key_path(): project = "squid" - instance = "clam" + location = "clam" + key_ring = "whelk" + crypto_key = "octopus" + + expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( + project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, + ) + actual = BigtableInstanceAdminClient.crypto_key_path( + project, location, key_ring, crypto_key + ) + assert expected == actual + + +def test_parse_crypto_key_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "key_ring": "cuttlefish", + "crypto_key": "mussel", + } + path = BigtableInstanceAdminClient.crypto_key_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_crypto_key_path(path) + assert expected == actual + + +def test_instance_path(): + project = "winkle" + instance = "nautilus" expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, @@ -5184,8 +5213,8 @@ def test_instance_path(): def test_parse_instance_path(): expected = { - "project": "whelk", - "instance": "octopus", + "project": "scallop", + "instance": "abalone", } path = BigtableInstanceAdminClient.instance_path(**expected) @@ -5195,7 +5224,7 @@ def test_parse_instance_path(): def test_common_billing_account_path(): - billing_account = "oyster" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -5206,7 +5235,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nudibranch", + "billing_account": "clam", } path = BigtableInstanceAdminClient.common_billing_account_path(**expected) @@ -5216,7 +5245,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "cuttlefish" + folder = "whelk" expected = "folders/{folder}".format(folder=folder,) actual = BigtableInstanceAdminClient.common_folder_path(folder) @@ -5225,7 +5254,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "mussel", + "folder": "octopus", } path = BigtableInstanceAdminClient.common_folder_path(**expected) @@ -5235,7 +5264,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "winkle" + organization = "oyster" expected = "organizations/{organization}".format(organization=organization,) actual = BigtableInstanceAdminClient.common_organization_path(organization) @@ -5244,7 +5273,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nautilus", + "organization": "nudibranch", } path = BigtableInstanceAdminClient.common_organization_path(**expected) @@ -5254,7 +5283,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "scallop" + project = "cuttlefish" expected = "projects/{project}".format(project=project,) actual = BigtableInstanceAdminClient.common_project_path(project) @@ -5263,7 +5292,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "abalone", + "project": "mussel", } path = BigtableInstanceAdminClient.common_project_path(**expected) @@ -5273,8 +5302,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "squid" - location = "clam" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -5285,8 +5314,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "whelk", - "location": "octopus", + "project": "scallop", + "location": "abalone", } path = BigtableInstanceAdminClient.common_location_path(**expected) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 92bdb8718436..862179e172a7 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -51,9 +51,11 @@ from google.iam.v1 import policy_pb2 as policy # type: ignore from google.longrunning import operations_pb2 from google.oauth2 import service_account +from google.protobuf import any_pb2 as gp_any # type: ignore from google.protobuf import duration_pb2 as duration # type: ignore from google.protobuf import field_mask_pb2 as field_mask # type: ignore from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.rpc import status_pb2 as status # type: ignore from google.type import expr_pb2 as expr # type: ignore @@ -5868,9 +5870,44 @@ def test_parse_cluster_path(): assert expected == actual -def test_instance_path(): +def test_crypto_key_version_path(): project = "whelk" - instance = "octopus" + location = "octopus" + key_ring = "oyster" + crypto_key = "nudibranch" + crypto_key_version = "cuttlefish" + + expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, + crypto_key_version=crypto_key_version, + ) + actual = BigtableTableAdminClient.crypto_key_version_path( + project, location, key_ring, crypto_key, crypto_key_version + ) + assert expected == actual + + +def test_parse_crypto_key_version_path(): + expected = { + "project": "mussel", + "location": "winkle", + "key_ring": "nautilus", + "crypto_key": "scallop", + "crypto_key_version": "abalone", + } + path = BigtableTableAdminClient.crypto_key_version_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_crypto_key_version_path(path) + assert expected == actual + + +def test_instance_path(): + project = "squid" + instance = "clam" expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, @@ -5881,8 +5918,8 @@ def test_instance_path(): def test_parse_instance_path(): expected = { - "project": "oyster", - "instance": "nudibranch", + "project": "whelk", + "instance": "octopus", } path = BigtableTableAdminClient.instance_path(**expected) @@ -5892,10 +5929,10 @@ def test_parse_instance_path(): def test_snapshot_path(): - project = "cuttlefish" - instance = "mussel" - cluster = "winkle" - snapshot = "nautilus" + project = "oyster" + instance = "nudibranch" + cluster = "cuttlefish" + snapshot = "mussel" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( project=project, instance=instance, cluster=cluster, snapshot=snapshot, @@ -5908,10 +5945,10 @@ def test_snapshot_path(): def test_parse_snapshot_path(): expected = { - "project": "scallop", - "instance": "abalone", - "cluster": "squid", - "snapshot": "clam", + "project": "winkle", + "instance": "nautilus", + "cluster": "scallop", + "snapshot": "abalone", } path = BigtableTableAdminClient.snapshot_path(**expected) @@ -5921,9 +5958,9 @@ def test_parse_snapshot_path(): def test_table_path(): - project = "whelk" - instance = "octopus" - table = "oyster" + project = "squid" + instance = "clam" + table = "whelk" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, table=table, @@ -5934,9 +5971,9 @@ def test_table_path(): def test_parse_table_path(): expected = { - "project": "nudibranch", - "instance": "cuttlefish", - "table": "mussel", + "project": "octopus", + "instance": "oyster", + "table": "nudibranch", } path = BigtableTableAdminClient.table_path(**expected) @@ -5946,7 +5983,7 @@ def test_parse_table_path(): def test_common_billing_account_path(): - billing_account = "winkle" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -5957,7 +5994,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nautilus", + "billing_account": "mussel", } path = BigtableTableAdminClient.common_billing_account_path(**expected) @@ -5967,7 +6004,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "scallop" + folder = "winkle" expected = "folders/{folder}".format(folder=folder,) actual = BigtableTableAdminClient.common_folder_path(folder) @@ -5976,7 +6013,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "abalone", + "folder": "nautilus", } path = BigtableTableAdminClient.common_folder_path(**expected) @@ -5986,7 +6023,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "squid" + organization = "scallop" expected = "organizations/{organization}".format(organization=organization,) actual = BigtableTableAdminClient.common_organization_path(organization) @@ -5995,7 +6032,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "clam", + "organization": "abalone", } path = BigtableTableAdminClient.common_organization_path(**expected) @@ -6005,7 +6042,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "whelk" + project = "squid" expected = "projects/{project}".format(project=project,) actual = BigtableTableAdminClient.common_project_path(project) @@ -6014,7 +6051,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "octopus", + "project": "clam", } path = BigtableTableAdminClient.common_project_path(**expected) @@ -6024,8 +6061,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "oyster" - location = "nudibranch" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -6036,8 +6073,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "cuttlefish", - "location": "mussel", + "project": "oyster", + "location": "nudibranch", } path = BigtableTableAdminClient.common_location_path(**expected) From 7afb552313897681cfda1731f05b374f400bf78c Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Fri, 26 Feb 2021 10:47:46 -0500 Subject: [PATCH 406/892] chore: remove temp fix, set cov level to 99 (#228) --- packages/google-cloud-bigtable/synth.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 2a74e80f61a9..185ee6942fd5 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -47,20 +47,13 @@ s.move(library / "tests") s.move(library / "scripts") -# temporary workaround for https://github.com/googleapis/gapic-generator-python/issues/778 -s.replace( - "google/cloud/**/client.py", - """\s+if permissions: -\s+request\.permissions\.extend\(permissions\)""", - "", -) - # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library( samples=True, # set to True only if there are samples microgenerator=True, + cov_level=99 ) s.move(templated_files, excludes=[".coveragerc"]) From e3beef80da74bf9f6f947a5e52ec9008b51a031e Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 1 Mar 2021 07:56:34 -0800 Subject: [PATCH 407/892] deps: update gapic-generator-python to 0.40.11 (#230) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * docs: update python contributing guide Adds details about blacken, updates version for system tests, and shows how to pass through pytest arguments. Source-Author: Chris Cotter Source-Date: Mon Feb 8 17:13:36 2021 -0500 Source-Repo: googleapis/synthtool Source-Sha: 4679e7e415221f03ff2a71e3ffad75b9ec41d87e Source-Link: https://github.com/googleapis/synthtool/commit/4679e7e415221f03ff2a71e3ffad75b9ec41d87e * chore: update gapic-generator-python to 0.40.11 PiperOrigin-RevId: 359562873 Source-Author: Google APIs Source-Date: Thu Feb 25 10:52:32 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 07932bb995e7dc91b43620ea8402c6668c7d102c Source-Link: https://github.com/googleapis/googleapis/commit/07932bb995e7dc91b43620ea8402c6668c7d102c * chore(java): fix gapic target name PiperOrigin-RevId: 359594504 Source-Author: Google APIs Source-Date: Thu Feb 25 13:07:14 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 80fafbce83f1d95c3616c7b3f50504a4ad251bfd Source-Link: https://github.com/googleapis/googleapis/commit/80fafbce83f1d95c3616c7b3f50504a4ad251bfd * feat: Enable PHP micro-generator beta01 PiperOrigin-RevId: 359620992 Source-Author: Google APIs Source-Date: Thu Feb 25 15:00:05 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: e41506dc28a42bae9b86c7b45e889bdf6d786648 Source-Link: https://github.com/googleapis/googleapis/commit/e41506dc28a42bae9b86c7b45e889bdf6d786648 --- .../cloud/bigtable_admin_v2/__init__.py | 4 +- .../bigtable_instance_admin/async_client.py | 32 +- .../bigtable_instance_admin/client.py | 11 +- .../bigtable_table_admin/async_client.py | 32 +- .../services/bigtable_table_admin/client.py | 11 +- .../services/bigtable/async_client.py | 32 +- packages/google-cloud-bigtable/synth.metadata | 6 +- .../unit/gapic/bigtable_admin_v2/__init__.py | 15 + .../test_bigtable_instance_admin.py | 326 ++++++++++++++- .../test_bigtable_table_admin.py | 372 +++++++++++++++++- .../tests/unit/gapic/bigtable_v2/__init__.py | 15 + .../unit/gapic/bigtable_v2/test_bigtable.py | 108 ++++- 12 files changed, 937 insertions(+), 27 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 79a9bea684e0..edfce27effc3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -88,7 +88,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -151,5 +151,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index d375f3280161..f316ef48b88c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -100,8 +100,36 @@ class BigtableInstanceAdminAsyncClient: BigtableInstanceAdminClient.parse_common_location_path ) - from_service_account_info = BigtableInstanceAdminClient.from_service_account_info - from_service_account_file = BigtableInstanceAdminClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminAsyncClient: The constructed client. + """ + return BigtableInstanceAdminClient.from_service_account_info.__func__(BigtableInstanceAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminAsyncClient: The constructed client. + """ + return BigtableInstanceAdminClient.from_service_account_file.__func__(BigtableInstanceAdminAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fc161479b234..6fac355bbaef 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1850,7 +1850,7 @@ def get_iam_policy( request = iam_policy.GetIamPolicyRequest(**request) elif not request: - request = iam_policy.GetIamPolicyRequest(resource=resource,) + request = iam_policy.GetIamPolicyRequest() if resource is not None: request.resource = resource @@ -1978,7 +1978,7 @@ def set_iam_policy( request = iam_policy.SetIamPolicyRequest(**request) elif not request: - request = iam_policy.SetIamPolicyRequest(resource=resource,) + request = iam_policy.SetIamPolicyRequest() if resource is not None: request.resource = resource @@ -2061,13 +2061,14 @@ def test_iam_permissions( request = iam_policy.TestIamPermissionsRequest(**request) elif not request: - request = iam_policy.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) + request = iam_policy.TestIamPermissionsRequest() if resource is not None: request.resource = resource + if permissions: + request.permissions.extend(permissions) + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 5aca2d37759e..0f604f79898e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -103,8 +103,36 @@ class BigtableTableAdminAsyncClient: BigtableTableAdminClient.parse_common_location_path ) - from_service_account_info = BigtableTableAdminClient.from_service_account_info - from_service_account_file = BigtableTableAdminClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminAsyncClient: The constructed client. + """ + return BigtableTableAdminClient.from_service_account_info.__func__(BigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminAsyncClient: The constructed client. + """ + return BigtableTableAdminClient.from_service_account_file.__func__(BigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 362013eccebd..834914cf54ac 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2259,7 +2259,7 @@ def get_iam_policy( request = iam_policy.GetIamPolicyRequest(**request) elif not request: - request = iam_policy.GetIamPolicyRequest(resource=resource,) + request = iam_policy.GetIamPolicyRequest() if resource is not None: request.resource = resource @@ -2387,7 +2387,7 @@ def set_iam_policy( request = iam_policy.SetIamPolicyRequest(**request) elif not request: - request = iam_policy.SetIamPolicyRequest(resource=resource,) + request = iam_policy.SetIamPolicyRequest() if resource is not None: request.resource = resource @@ -2470,13 +2470,14 @@ def test_iam_permissions( request = iam_policy.TestIamPermissionsRequest(**request) elif not request: - request = iam_policy.TestIamPermissionsRequest( - resource=resource, permissions=permissions, - ) + request = iam_policy.TestIamPermissionsRequest() if resource is not None: request.resource = resource + if permissions: + request.permissions.extend(permissions) + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 6e170e791ef4..215ec1432c4a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -70,8 +70,36 @@ class BigtableAsyncClient: common_location_path = staticmethod(BigtableClient.common_location_path) parse_common_location_path = staticmethod(BigtableClient.parse_common_location_path) - from_service_account_info = BigtableClient.from_service_account_info - from_service_account_file = BigtableClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableAsyncClient: The constructed client. + """ + return BigtableClient.from_service_account_info.__func__(BigtableAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableAsyncClient: The constructed client. + """ + return BigtableClient.from_service_account_file.__func__(BigtableAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index fe502187f97e..6b71416b6471 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "b7489b65319eabd1dbe01d5d01b24500d013b53f" + "sha": "fd19db49f843514d070e296b3934eb7371b9e2b8" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c06bbe28cc7287a55bf7926ee48da2565854de7f", - "internalRef": "359364666" + "sha": "e41506dc28a42bae9b86c7b45e889bdf6d786648", + "internalRef": "359620992" } }, { diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py index 8b137891791f..42ffdf2bc43d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 8a676d825b37..3ffcffc3bb1a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -101,15 +101,19 @@ def test__get_default_mtls_endpoint(): ) -def test_bigtable_instance_admin_client_from_service_account_info(): +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] +) +def test_bigtable_instance_admin_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = BigtableInstanceAdminClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "bigtableadmin.googleapis.com:443" @@ -125,9 +129,11 @@ def test_bigtable_instance_admin_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "bigtableadmin.googleapis.com:443" @@ -507,6 +513,22 @@ def test_create_instance_from_dict(): test_create_instance(request_type=dict) +def test_create_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + client.create_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + @pytest.mark.asyncio async def test_create_instance_async( transport: str = "grpc_asyncio", @@ -754,6 +776,22 @@ def test_get_instance_from_dict(): test_get_instance(request_type=dict) +def test_get_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + client.get_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + @pytest.mark.asyncio async def test_get_instance_async( transport: str = "grpc_asyncio", @@ -972,6 +1010,22 @@ def test_list_instances_from_dict(): test_list_instances(request_type=dict) +def test_list_instances_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + client.list_instances() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + @pytest.mark.asyncio async def test_list_instances_async( transport: str = "grpc_asyncio", @@ -1190,6 +1244,22 @@ def test_update_instance_from_dict(): test_update_instance(request_type=dict) +def test_update_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + client.update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Instance() + + @pytest.mark.asyncio async def test_update_instance_async( transport: str = "grpc_asyncio", request_type=instance.Instance @@ -1327,6 +1397,24 @@ def test_partial_update_instance_from_dict(): test_partial_update_instance(request_type=dict) +def test_partial_update_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + client.partial_update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + @pytest.mark.asyncio async def test_partial_update_instance_async( transport: str = "grpc_asyncio", @@ -1554,6 +1642,22 @@ def test_delete_instance_from_dict(): test_delete_instance(request_type=dict) +def test_delete_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + client.delete_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + @pytest.mark.asyncio async def test_delete_instance_async( transport: str = "grpc_asyncio", @@ -1747,6 +1851,22 @@ def test_create_cluster_from_dict(): test_create_cluster(request_type=dict) +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + @pytest.mark.asyncio async def test_create_cluster_async( transport: str = "grpc_asyncio", @@ -1985,6 +2105,22 @@ def test_get_cluster_from_dict(): test_get_cluster(request_type=dict) +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + @pytest.mark.asyncio async def test_get_cluster_async( transport: str = "grpc_asyncio", @@ -2206,6 +2342,22 @@ def test_list_clusters_from_dict(): test_list_clusters(request_type=dict) +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + @pytest.mark.asyncio async def test_list_clusters_async( transport: str = "grpc_asyncio", @@ -2410,6 +2562,22 @@ def test_update_cluster_from_dict(): test_update_cluster(request_type=dict) +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == instance.Cluster() + + @pytest.mark.asyncio async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=instance.Cluster @@ -2533,6 +2701,22 @@ def test_delete_cluster_from_dict(): test_delete_cluster(request_type=dict) +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + @pytest.mark.asyncio async def test_delete_cluster_async( transport: str = "grpc_asyncio", @@ -2741,6 +2925,24 @@ def test_create_app_profile_from_dict(): test_create_app_profile(request_type=dict) +def test_create_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + client.create_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + @pytest.mark.asyncio async def test_create_app_profile_async( transport: str = "grpc_asyncio", @@ -2988,6 +3190,22 @@ def test_get_app_profile_from_dict(): test_get_app_profile(request_type=dict) +def test_get_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + client.get_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + @pytest.mark.asyncio async def test_get_app_profile_async( transport: str = "grpc_asyncio", @@ -3201,6 +3419,24 @@ def test_list_app_profiles_from_dict(): test_list_app_profiles(request_type=dict) +def test_list_app_profiles_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + client.list_app_profiles() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + @pytest.mark.asyncio async def test_list_app_profiles_async( transport: str = "grpc_asyncio", @@ -3578,6 +3814,24 @@ def test_update_app_profile_from_dict(): test_update_app_profile(request_type=dict) +def test_update_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + client.update_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + @pytest.mark.asyncio async def test_update_app_profile_async( transport: str = "grpc_asyncio", @@ -3808,6 +4062,24 @@ def test_delete_app_profile_from_dict(): test_delete_app_profile(request_type=dict) +def test_delete_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + client.delete_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + @pytest.mark.asyncio async def test_delete_app_profile_async( transport: str = "grpc_asyncio", @@ -4016,6 +4288,22 @@ def test_get_iam_policy_from_dict(): test_get_iam_policy(request_type=dict) +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + @pytest.mark.asyncio async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest @@ -4237,6 +4525,22 @@ def test_set_iam_policy_from_dict(): test_set_iam_policy(request_type=dict) +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + @pytest.mark.asyncio async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest @@ -4460,6 +4764,24 @@ def test_test_iam_permissions_from_dict(): test_test_iam_permissions(request_type=dict) +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + @pytest.mark.asyncio async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 862179e172a7..aca51c98e0d6 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -104,15 +104,19 @@ def test__get_default_mtls_endpoint(): ) -def test_bigtable_table_admin_client_from_service_account_info(): +@pytest.mark.parametrize( + "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] +) +def test_bigtable_table_admin_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = BigtableTableAdminClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "bigtableadmin.googleapis.com:443" @@ -128,9 +132,11 @@ def test_bigtable_table_admin_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "bigtableadmin.googleapis.com:443" @@ -505,6 +511,22 @@ def test_create_table_from_dict(): test_create_table(request_type=dict) +def test_create_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + client.create_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableRequest() + + @pytest.mark.asyncio async def test_create_table_async( transport: str = "grpc_asyncio", @@ -726,6 +748,24 @@ def test_create_table_from_snapshot_from_dict(): test_create_table_from_snapshot(request_type=dict) +def test_create_table_from_snapshot_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + client.create_table_from_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + @pytest.mark.asyncio async def test_create_table_from_snapshot_async( transport: str = "grpc_asyncio", @@ -956,6 +996,22 @@ def test_list_tables_from_dict(): test_list_tables(request_type=dict) +def test_list_tables_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + client.list_tables() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListTablesRequest() + + @pytest.mark.asyncio async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest @@ -1285,6 +1341,22 @@ def test_get_table_from_dict(): test_get_table(request_type=dict) +def test_get_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + client.get_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetTableRequest() + + @pytest.mark.asyncio async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest @@ -1479,6 +1551,22 @@ def test_delete_table_from_dict(): test_delete_table(request_type=dict) +def test_delete_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + client.delete_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + @pytest.mark.asyncio async def test_delete_table_async( transport: str = "grpc_asyncio", @@ -1676,6 +1764,24 @@ def test_modify_column_families_from_dict(): test_modify_column_families(request_type=dict) +def test_modify_column_families_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + client.modify_column_families() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + + @pytest.mark.asyncio async def test_modify_column_families_async( transport: str = "grpc_asyncio", @@ -1915,6 +2021,22 @@ def test_drop_row_range_from_dict(): test_drop_row_range(request_type=dict) +def test_drop_row_range_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + client.drop_row_range() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + + @pytest.mark.asyncio async def test_drop_row_range_async( transport: str = "grpc_asyncio", @@ -2041,6 +2163,24 @@ def test_generate_consistency_token_from_dict(): test_generate_consistency_token(request_type=dict) +def test_generate_consistency_token_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + client.generate_consistency_token() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + + @pytest.mark.asyncio async def test_generate_consistency_token_async( transport: str = "grpc_asyncio", @@ -2255,6 +2395,24 @@ def test_check_consistency_from_dict(): test_check_consistency(request_type=dict) +def test_check_consistency_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + client.check_consistency() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + @pytest.mark.asyncio async def test_check_consistency_async( transport: str = "grpc_asyncio", @@ -2472,6 +2630,22 @@ def test_snapshot_table_from_dict(): test_snapshot_table(request_type=dict) +def test_snapshot_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + client.snapshot_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + @pytest.mark.asyncio async def test_snapshot_table_async( transport: str = "grpc_asyncio", @@ -2709,6 +2883,22 @@ def test_get_snapshot_from_dict(): test_get_snapshot(request_type=dict) +def test_get_snapshot_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + client.get_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + @pytest.mark.asyncio async def test_get_snapshot_async( transport: str = "grpc_asyncio", @@ -2916,6 +3106,22 @@ def test_list_snapshots_from_dict(): test_list_snapshots(request_type=dict) +def test_list_snapshots_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + client.list_snapshots() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + + @pytest.mark.asyncio async def test_list_snapshots_async( transport: str = "grpc_asyncio", @@ -3247,6 +3453,22 @@ def test_delete_snapshot_from_dict(): test_delete_snapshot(request_type=dict) +def test_delete_snapshot_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + client.delete_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + + @pytest.mark.asyncio async def test_delete_snapshot_async( transport: str = "grpc_asyncio", @@ -3434,6 +3656,22 @@ def test_create_backup_from_dict(): test_create_backup(request_type=dict) +def test_create_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + client.create_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + @pytest.mark.asyncio async def test_create_backup_async( transport: str = "grpc_asyncio", @@ -3663,6 +3901,22 @@ def test_get_backup_from_dict(): test_get_backup(request_type=dict) +def test_get_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + client.get_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.GetBackupRequest() + + @pytest.mark.asyncio async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest @@ -3878,6 +4132,22 @@ def test_update_backup_from_dict(): test_update_backup(request_type=dict) +def test_update_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + client.update_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + @pytest.mark.asyncio async def test_update_backup_async( transport: str = "grpc_asyncio", @@ -4094,6 +4364,22 @@ def test_delete_backup_from_dict(): test_delete_backup(request_type=dict) +def test_delete_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + client.delete_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + @pytest.mark.asyncio async def test_delete_backup_async( transport: str = "grpc_asyncio", @@ -4286,6 +4572,22 @@ def test_list_backups_from_dict(): test_list_backups(request_type=dict) +def test_list_backups_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + client.list_backups() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + @pytest.mark.asyncio async def test_list_backups_async( transport: str = "grpc_asyncio", @@ -4617,6 +4919,22 @@ def test_restore_table_from_dict(): test_restore_table(request_type=dict) +def test_restore_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + client.restore_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + @pytest.mark.asyncio async def test_restore_table_async( transport: str = "grpc_asyncio", @@ -4744,6 +5062,22 @@ def test_get_iam_policy_from_dict(): test_get_iam_policy(request_type=dict) +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.GetIamPolicyRequest() + + @pytest.mark.asyncio async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest @@ -4957,6 +5291,22 @@ def test_set_iam_policy_from_dict(): test_set_iam_policy(request_type=dict) +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.SetIamPolicyRequest() + + @pytest.mark.asyncio async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest @@ -5172,6 +5522,24 @@ def test_test_iam_permissions_from_dict(): test_test_iam_permissions(request_type=dict) +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == iam_policy.TestIamPermissionsRequest() + + @pytest.mark.asyncio async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py index 8b137891791f..42ffdf2bc43d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 0a42c2dade65..618d80317201 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -79,15 +79,17 @@ def test__get_default_mtls_endpoint(): assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -def test_bigtable_client_from_service_account_info(): +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) +def test_bigtable_client_from_service_account_info(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = BigtableClient.from_service_account_info(info) + client = client_class.from_service_account_info(info) assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "bigtable.googleapis.com:443" @@ -101,9 +103,11 @@ def test_bigtable_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "bigtable.googleapis.com:443" @@ -440,6 +444,22 @@ def test_read_rows_from_dict(): test_read_rows(request_type=dict) +def test_read_rows_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadRowsRequest() + + @pytest.mark.asyncio async def test_read_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest @@ -640,6 +660,22 @@ def test_sample_row_keys_from_dict(): test_sample_row_keys(request_type=dict) +def test_sample_row_keys_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.SampleRowKeysRequest() + + @pytest.mark.asyncio async def test_sample_row_keys_async( transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest @@ -838,6 +874,22 @@ def test_mutate_row_from_dict(): test_mutate_row(request_type=dict) +def test_mutate_row_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowRequest() + + @pytest.mark.asyncio async def test_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest @@ -1077,6 +1129,22 @@ def test_mutate_rows_from_dict(): test_mutate_rows(request_type=dict) +def test_mutate_rows_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.MutateRowsRequest() + + @pytest.mark.asyncio async def test_mutate_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest @@ -1295,6 +1363,24 @@ def test_check_and_mutate_row_from_dict(): test_check_and_mutate_row(request_type=dict) +def test_check_and_mutate_row_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.CheckAndMutateRowRequest() + + @pytest.mark.asyncio async def test_check_and_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest @@ -1646,6 +1732,24 @@ def test_read_modify_write_row_from_dict(): test_read_modify_write_row(request_type=dict) +def test_read_modify_write_row_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + @pytest.mark.asyncio async def test_read_modify_write_row_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest From a4e60c8fc0b075570e2f14b0bfb523034e22407a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 1 Mar 2021 17:31:31 +0100 Subject: [PATCH 408/892] chore(deps): update dependency apache-beam to v2.28.0 (#219) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 69b59d1e29d9..a22c93d0b761 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.27.0 +apache-beam==2.28.0 google-cloud-bigtable<2.0.0dev1 google-cloud-core==1.6.0 \ No newline at end of file From 9971794e11a113820990d425582628803c8081de Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 2 Mar 2021 06:46:02 -0800 Subject: [PATCH 409/892] build(python): enable flakybot on library unit and system tests (#234) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/e15c80db-37d6-4001-8245-0beecb2f23db/targets - [ ] To automatically regenerate this PR, check this box. Source-Link: https://github.com/googleapis/synthtool/commit/d17674372e27fb8f23013935e794aa37502071aa --- packages/google-cloud-bigtable/.gitignore | 4 +++- packages/google-cloud-bigtable/.kokoro/build.sh | 10 ++++++++++ .../google/cloud/bigtable_admin_v2/__init__.py | 4 ++-- packages/google-cloud-bigtable/noxfile.py | 17 +++++++++++++++-- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 5 files changed, 33 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore index b9daa52f118d..b4243ced74e4 100644 --- a/packages/google-cloud-bigtable/.gitignore +++ b/packages/google-cloud-bigtable/.gitignore @@ -50,8 +50,10 @@ docs.metadata # Virtual environment env/ + +# Test logs coverage.xml -sponge_log.xml +*sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index 76d9329bad4d..9773bfca7cd7 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -40,6 +40,16 @@ python3 -m pip uninstall --yes --quiet nox-automation python3 -m pip install --upgrade --quiet nox python3 -m nox --version +# If this is a continuous build, send the test log to the FlakyBot. +# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP +fi + # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index edfce27effc3..79a9bea684e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -88,7 +88,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -151,5 +151,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", ) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 70d9c13c2561..6e69bfe0be16 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -93,6 +93,7 @@ def default(session): session.run( "py.test", "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", "--cov=google/cloud", "--cov=tests/unit", "--cov-append", @@ -141,9 +142,21 @@ def system(session): # Run py.test against the system tests. if system_test_exists: - session.run("py.test", "--quiet", system_test_path, *session.posargs) + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + ) if system_test_folder_exists: - session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + ) @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 6b71416b6471..22cbe9e6b79f 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "fd19db49f843514d070e296b3934eb7371b9e2b8" + "sha": "f0a2bc8ab62bf946c62ce9baffbd332ac5126b27" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" + "sha": "d17674372e27fb8f23013935e794aa37502071aa" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "4679e7e415221f03ff2a71e3ffad75b9ec41d87e" + "sha": "d17674372e27fb8f23013935e794aa37502071aa" } } ], From 2f6b12ddd33c2ee14783f9aaeb30161a75815230 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 3 Mar 2021 06:36:01 -0800 Subject: [PATCH 410/892] test: install pyopenssl for mtls testing (#235) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * test: install pyopenssl for mtls testing Source-Author: arithmetic1728 <58957152+arithmetic1728@users.noreply.github.com> Source-Date: Tue Mar 2 12:27:56 2021 -0800 Source-Repo: googleapis/synthtool Source-Sha: 0780323da96d5a53925fe0547757181fe76e8f1e Source-Link: https://github.com/googleapis/synthtool/commit/0780323da96d5a53925fe0547757181fe76e8f1e --- .../google/cloud/bigtable_admin_v2/__init__.py | 4 ++-- packages/google-cloud-bigtable/noxfile.py | 3 +++ packages/google-cloud-bigtable/synth.metadata | 6 +++--- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 79a9bea684e0..edfce27effc3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -88,7 +88,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -151,5 +151,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", ) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 6e69bfe0be16..72b38757056a 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -123,6 +123,9 @@ def system(session): # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") + # Install pyopenssl for mTLS testing. + if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": + session.install("pyopenssl") system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 22cbe9e6b79f..b34bdccd3f95 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "f0a2bc8ab62bf946c62ce9baffbd332ac5126b27" + "sha": "75f9df66d6037dbfa3a34722fed5fe362be5459f" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d17674372e27fb8f23013935e794aa37502071aa" + "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "d17674372e27fb8f23013935e794aa37502071aa" + "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" } } ], From f55060731b4e3c22aafb43ee2d3171261400bc2d Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Fri, 5 Mar 2021 13:10:00 -0500 Subject: [PATCH 411/892] chore: remove `google/cloud/bigtable.py` (#238) This file was redundant with/hidden by `google/cloud/bigtable/__init__.py` and seems to have been left in as an oversight. --- .../google/cloud/bigtable.py | 20 ------------------- 1 file changed, 20 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable.py deleted file mode 100644 index 72858878e8a7..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import - -from google.cloud.bigtable_v2 import BigtableClient -from google.cloud.bigtable_v2 import types - -__all__ = ("types", "BigtableClient") From 4b720fba9931dea47a7199d81792d0eaebc07535 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Fri, 5 Mar 2021 14:05:00 -0500 Subject: [PATCH 412/892] fix: fix unit test that could be broken by user's environment (#239) Unit test for the `google.cloud.bigtable.Client` constructor was picking up the project from user's `GOOGLE_CLOUD_PROJECT` environment variable. --- packages/google-cloud-bigtable/tests/unit/test_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 60a2cd738541..5f2d7db26ab7 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -103,6 +103,7 @@ def _get_target_class(): def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) + @mock.patch("os.environ", {}) def test_constructor_defaults(self): from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable.client import DATA_SCOPE From 4b9637f8739261ce99f2eff8d30638612cdb6d30 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 8 Mar 2021 11:01:16 -0800 Subject: [PATCH 413/892] fix(retry): restore grpc_service_config for CreateBackup and {Restore,Snapshot}Table (#240) Committer: @miraleung PiperOrigin-RevId: 361301101 Source-Author: Google APIs Source-Date: Sat Mar 6 02:06:05 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 458338f8ca444e43b9df96b984b68f3978852b74 Source-Link: https://github.com/googleapis/googleapis/commit/458338f8ca444e43b9df96b984b68f3978852b74 --- .../services/bigtable_table_admin/async_client.py | 4 ++-- .../services/bigtable_table_admin/transports/base.py | 4 ++-- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 0f604f79898e..1e3e817bc849 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1515,7 +1515,7 @@ async def create_backup( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_backup, - default_timeout=None, + default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -1919,7 +1919,7 @@ async def restore_table( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.restore_table, - default_timeout=None, + default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index b54025c94e57..2958323b730a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -224,7 +224,7 @@ def _prep_wrapped_messages(self, client_info): self.delete_snapshot, default_timeout=60.0, client_info=client_info, ), self.create_backup: gapic_v1.method.wrap_method( - self.create_backup, default_timeout=None, client_info=client_info, + self.create_backup, default_timeout=60.0, client_info=client_info, ), self.get_backup: gapic_v1.method.wrap_method( self.get_backup, @@ -259,7 +259,7 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.restore_table: gapic_v1.method.wrap_method( - self.restore_table, default_timeout=None, client_info=client_info, + self.restore_table, default_timeout=60.0, client_info=client_info, ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index b34bdccd3f95..f4c43c18d4db 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "75f9df66d6037dbfa3a34722fed5fe362be5459f" + "sha": "cbd712e6d3aded0c025525f97da1d667fbe2f061" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "e41506dc28a42bae9b86c7b45e889bdf6d786648", - "internalRef": "359620992" + "sha": "458338f8ca444e43b9df96b984b68f3978852b74", + "internalRef": "361301101" } }, { From 77b591799b1b946158dfd58af926e3d71ec11830 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 11 Mar 2021 14:22:35 -0800 Subject: [PATCH 414/892] chore: upstream deps regen (#242) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * chore: upgrade gapic-generator-python to 0.42.2 PiperOrigin-RevId: 361662015 Source-Author: Google APIs Source-Date: Mon Mar 8 14:47:18 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 28a591963253d52ce3a25a918cafbdd9928de8cf Source-Link: https://github.com/googleapis/googleapis/commit/28a591963253d52ce3a25a918cafbdd9928de8cf * build: use gapic-generator-typescript v1.2.11. Fixed IAM v1 library generation. Committer: @alexander-fenster PiperOrigin-RevId: 361676678 Source-Author: Google APIs Source-Date: Mon Mar 8 15:51:18 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 3aeb3a70f66457a9e6b07caff841719bb9873b57 Source-Link: https://github.com/googleapis/googleapis/commit/3aeb3a70f66457a9e6b07caff841719bb9873b57 --- .../bigtable_instance_admin/client.py | 18 +- .../services/bigtable_table_admin/client.py | 18 +- .../cloud/bigtable_admin_v2/types/__init__.py | 212 +++++++++--------- .../cloud/bigtable_v2/types/__init__.py | 76 +++---- packages/google-cloud-bigtable/synth.metadata | 6 +- 5 files changed, 165 insertions(+), 165 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 6fac355bbaef..68768d70e8b0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1844,12 +1844,12 @@ def get_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.GetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.GetIamPolicyRequest() if resource is not None: @@ -1972,12 +1972,12 @@ def set_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.SetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.SetIamPolicyRequest() if resource is not None: @@ -2055,12 +2055,12 @@ def test_iam_permissions( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.TestIamPermissionsRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.TestIamPermissionsRequest() if resource is not None: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 834914cf54ac..7240aa1c3d06 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2253,12 +2253,12 @@ def get_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.GetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.GetIamPolicyRequest() if resource is not None: @@ -2381,12 +2381,12 @@ def set_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.SetIamPolicyRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.SetIamPolicyRequest() if resource is not None: @@ -2464,12 +2464,12 @@ def test_iam_permissions( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy.TestIamPermissionsRequest(**request) - elif not request: + # Null request, just make one. request = iam_policy.TestIamPermissionsRequest() if resource is not None: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index f637988c4d89..01e834d9b139 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -15,146 +15,146 @@ # limitations under the License. # -from .common import ( - OperationProgress, - StorageType, -) -from .instance import ( - Instance, - Cluster, - AppProfile, -) from .bigtable_instance_admin import ( - CreateInstanceRequest, - GetInstanceRequest, - ListInstancesRequest, - ListInstancesResponse, - PartialUpdateInstanceRequest, - DeleteInstanceRequest, + CreateAppProfileRequest, + CreateClusterMetadata, CreateClusterRequest, - GetClusterRequest, - ListClustersRequest, - ListClustersResponse, - DeleteClusterRequest, CreateInstanceMetadata, - UpdateInstanceMetadata, - CreateClusterMetadata, - UpdateClusterMetadata, - CreateAppProfileRequest, + CreateInstanceRequest, + DeleteAppProfileRequest, + DeleteClusterRequest, + DeleteInstanceRequest, GetAppProfileRequest, + GetClusterRequest, + GetInstanceRequest, ListAppProfilesRequest, ListAppProfilesResponse, - UpdateAppProfileRequest, - DeleteAppProfileRequest, + ListClustersRequest, + ListClustersResponse, + ListInstancesRequest, + ListInstancesResponse, + PartialUpdateInstanceRequest, UpdateAppProfileMetadata, -) -from .table import ( - RestoreInfo, - Table, - ColumnFamily, - GcRule, - EncryptionInfo, - Snapshot, - Backup, - BackupInfo, - RestoreSourceType, + UpdateAppProfileRequest, + UpdateClusterMetadata, + UpdateInstanceMetadata, ) from .bigtable_table_admin import ( - RestoreTableRequest, - RestoreTableMetadata, - OptimizeRestoredTableMetadata, - CreateTableRequest, + CheckConsistencyRequest, + CheckConsistencyResponse, + CreateBackupMetadata, + CreateBackupRequest, + CreateTableFromSnapshotMetadata, CreateTableFromSnapshotRequest, - DropRowRangeRequest, - ListTablesRequest, - ListTablesResponse, - GetTableRequest, + CreateTableRequest, + DeleteBackupRequest, + DeleteSnapshotRequest, DeleteTableRequest, - ModifyColumnFamiliesRequest, + DropRowRangeRequest, GenerateConsistencyTokenRequest, GenerateConsistencyTokenResponse, - CheckConsistencyRequest, - CheckConsistencyResponse, - SnapshotTableRequest, + GetBackupRequest, GetSnapshotRequest, + GetTableRequest, + ListBackupsRequest, + ListBackupsResponse, ListSnapshotsRequest, ListSnapshotsResponse, - DeleteSnapshotRequest, + ListTablesRequest, + ListTablesResponse, + ModifyColumnFamiliesRequest, + OptimizeRestoredTableMetadata, + RestoreTableMetadata, + RestoreTableRequest, SnapshotTableMetadata, - CreateTableFromSnapshotMetadata, - CreateBackupRequest, - CreateBackupMetadata, + SnapshotTableRequest, UpdateBackupRequest, - GetBackupRequest, - DeleteBackupRequest, - ListBackupsRequest, - ListBackupsResponse, +) +from .common import ( + OperationProgress, + StorageType, +) +from .instance import ( + AppProfile, + Cluster, + Instance, +) +from .table import ( + Backup, + BackupInfo, + ColumnFamily, + EncryptionInfo, + GcRule, + RestoreInfo, + Snapshot, + Table, + RestoreSourceType, ) __all__ = ( - "OperationProgress", - "StorageType", - "Instance", - "Cluster", - "AppProfile", - "CreateInstanceRequest", - "GetInstanceRequest", - "ListInstancesRequest", - "ListInstancesResponse", - "PartialUpdateInstanceRequest", - "DeleteInstanceRequest", + "CreateAppProfileRequest", + "CreateClusterMetadata", "CreateClusterRequest", - "GetClusterRequest", - "ListClustersRequest", - "ListClustersResponse", - "DeleteClusterRequest", "CreateInstanceMetadata", - "UpdateInstanceMetadata", - "CreateClusterMetadata", - "UpdateClusterMetadata", - "CreateAppProfileRequest", + "CreateInstanceRequest", + "DeleteAppProfileRequest", + "DeleteClusterRequest", + "DeleteInstanceRequest", "GetAppProfileRequest", + "GetClusterRequest", + "GetInstanceRequest", "ListAppProfilesRequest", "ListAppProfilesResponse", - "UpdateAppProfileRequest", - "DeleteAppProfileRequest", + "ListClustersRequest", + "ListClustersResponse", + "ListInstancesRequest", + "ListInstancesResponse", + "PartialUpdateInstanceRequest", "UpdateAppProfileMetadata", - "RestoreInfo", - "Table", - "ColumnFamily", - "GcRule", - "EncryptionInfo", - "Snapshot", - "Backup", - "BackupInfo", - "RestoreSourceType", - "RestoreTableRequest", - "RestoreTableMetadata", - "OptimizeRestoredTableMetadata", - "CreateTableRequest", + "UpdateAppProfileRequest", + "UpdateClusterMetadata", + "UpdateInstanceMetadata", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "CreateBackupMetadata", + "CreateBackupRequest", + "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", - "DropRowRangeRequest", - "ListTablesRequest", - "ListTablesResponse", - "GetTableRequest", + "CreateTableRequest", + "DeleteBackupRequest", + "DeleteSnapshotRequest", "DeleteTableRequest", - "ModifyColumnFamiliesRequest", + "DropRowRangeRequest", "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", - "CheckConsistencyRequest", - "CheckConsistencyResponse", - "SnapshotTableRequest", + "GetBackupRequest", "GetSnapshotRequest", + "GetTableRequest", + "ListBackupsRequest", + "ListBackupsResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", - "DeleteSnapshotRequest", + "ListTablesRequest", + "ListTablesResponse", + "ModifyColumnFamiliesRequest", + "OptimizeRestoredTableMetadata", + "RestoreTableMetadata", + "RestoreTableRequest", "SnapshotTableMetadata", - "CreateTableFromSnapshotMetadata", - "CreateBackupRequest", - "CreateBackupMetadata", + "SnapshotTableRequest", "UpdateBackupRequest", - "GetBackupRequest", - "DeleteBackupRequest", - "ListBackupsRequest", - "ListBackupsResponse", + "OperationProgress", + "StorageType", + "AppProfile", + "Cluster", + "Instance", + "Backup", + "BackupInfo", + "ColumnFamily", + "EncryptionInfo", + "GcRule", + "RestoreInfo", + "Snapshot", + "Table", + "RestoreSourceType", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index 0aa74d208985..d744f93de889 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -15,58 +15,58 @@ # limitations under the License. # -from .data import ( - Row, - Family, - Column, - Cell, - RowRange, - RowSet, - ColumnRange, - TimestampRange, - ValueRange, - RowFilter, - Mutation, - ReadModifyWriteRule, -) from .bigtable import ( - ReadRowsRequest, - ReadRowsResponse, - SampleRowKeysRequest, - SampleRowKeysResponse, + CheckAndMutateRowRequest, + CheckAndMutateRowResponse, MutateRowRequest, MutateRowResponse, MutateRowsRequest, MutateRowsResponse, - CheckAndMutateRowRequest, - CheckAndMutateRowResponse, ReadModifyWriteRowRequest, ReadModifyWriteRowResponse, + ReadRowsRequest, + ReadRowsResponse, + SampleRowKeysRequest, + SampleRowKeysResponse, +) +from .data import ( + Cell, + Column, + ColumnRange, + Family, + Mutation, + ReadModifyWriteRule, + Row, + RowFilter, + RowRange, + RowSet, + TimestampRange, + ValueRange, ) __all__ = ( - "Row", - "Family", - "Column", - "Cell", - "RowRange", - "RowSet", - "ColumnRange", - "TimestampRange", - "ValueRange", - "RowFilter", - "Mutation", - "ReadModifyWriteRule", - "ReadRowsRequest", - "ReadRowsResponse", - "SampleRowKeysRequest", - "SampleRowKeysResponse", + "CheckAndMutateRowRequest", + "CheckAndMutateRowResponse", "MutateRowRequest", "MutateRowResponse", "MutateRowsRequest", "MutateRowsResponse", - "CheckAndMutateRowRequest", - "CheckAndMutateRowResponse", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", + "ReadRowsRequest", + "ReadRowsResponse", + "SampleRowKeysRequest", + "SampleRowKeysResponse", + "Cell", + "Column", + "ColumnRange", + "Family", + "Mutation", + "ReadModifyWriteRule", + "Row", + "RowFilter", + "RowRange", + "RowSet", + "TimestampRange", + "ValueRange", ) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index f4c43c18d4db..84e7654b20f5 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,15 +4,15 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "cbd712e6d3aded0c025525f97da1d667fbe2f061" + "sha": "79f1734c897e5e1b2fd02d043185c44b7ee34dc9" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "458338f8ca444e43b9df96b984b68f3978852b74", - "internalRef": "361301101" + "sha": "3aeb3a70f66457a9e6b07caff841719bb9873b57", + "internalRef": "361676678" } }, { From d3c0aec4bea59d44ed0febfe3ed41cba9ae88a77 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 12 Mar 2021 17:23:04 +0100 Subject: [PATCH 415/892] chore(deps): update dependency google-cloud-monitoring to v2.1.0 (#244) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index cbcdcd4774aa..428e6d9ec66d 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.7.0 -google-cloud-monitoring==2.0.1 +google-cloud-monitoring==2.1.0 From 9c5d62425744cbfe096ee1d23af45aad4bcfe0e8 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Fri, 19 Mar 2021 13:34:37 -0700 Subject: [PATCH 416/892] fix: address issue in establishing an emulator connection (#246) Adjusts emulation code to use a newer method of creating a gRPC channel adds a test scenario to validate emulation. --- .../google/cloud/bigtable/client.py | 102 ++++++++++++------ packages/google-cloud-bigtable/noxfile.py | 26 +++++ .../google-cloud-bigtable/tests/system.py | 14 +-- .../tests/unit/test_client.py | 41 ++++--- 4 files changed, 129 insertions(+), 54 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 5e49934d0625..be536f2957dd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -32,6 +32,7 @@ import grpc from google.api_core.gapic_v1 import client_info +import google.auth from google.cloud import bigtable_v2 from google.cloud import bigtable_admin_v2 @@ -69,17 +70,12 @@ def _create_gapic_client(client_class, client_options=None, transport=None): def inner(self): - if self._emulator_host is None: - return client_class( - credentials=None, - client_info=self._client_info, - client_options=client_options, - transport=transport, - ) - else: - return client_class( - channel=self._emulator_channel, client_info=self._client_info - ) + return client_class( + credentials=None, + client_info=self._client_info, + client_options=client_options, + transport=transport, + ) return inner @@ -166,16 +162,6 @@ def __init__( self._admin = bool(admin) self._client_info = client_info self._emulator_host = os.getenv(BIGTABLE_EMULATOR) - self._emulator_channel = None - - if self._emulator_host is not None: - self._emulator_channel = grpc.insecure_channel( - target=self._emulator_host, - options={ - "grpc.keepalive_time_ms": 30000, - "grpc.keepalive_timeout_ms": 10000, - }.items(), - ) if channel is not None: warnings.warn( @@ -208,22 +194,76 @@ def _get_scopes(self): return scopes + def _emulator_channel(self, transport, options): + """ + Creates a channel using self._credentials in a similar way to grpc.secure_channel but + using grpc.local_channel_credentials() rather than grpc.ssh_channel_credentials() + to allow easy connection to a local emulator. + :return: grpc.Channel or grpc.aio.Channel + """ + # TODO: Implement a special credentials type for emulator and use + # "transport.create_channel" to create gRPC channels once google-auth + # extends it's allowed credentials types. + # Note: this code also exists in the firestore client. + if "GrpcAsyncIOTransport" in str(transport.__name__): + return grpc.aio.secure_channel( + self._emulator_host, + self._local_composite_credentials(), + options=options, + ) + else: + return grpc.secure_channel( + self._emulator_host, + self._local_composite_credentials(), + options=options, + ) + + def _local_composite_credentials(self): + """ + Creates the credentials for the local emulator channel + :return: grpc.ChannelCredentials + """ + credentials = google.auth.credentials.with_scopes_if_required( + self._credentials, None + ) + request = google.auth.transport.requests.Request() + + # Create the metadata plugin for inserting the authorization header. + metadata_plugin = google.auth.transport.grpc.AuthMetadataPlugin( + credentials, request + ) + + # Create a set of grpc.CallCredentials using the metadata plugin. + google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin) + + # Using the local_credentials to allow connection to emulator + local_credentials = grpc.local_channel_credentials() + + # Combine the local credentials and the authorization credentials. + return grpc.composite_channel_credentials( + local_credentials, google_auth_credentials + ) + def _create_gapic_client_channel(self, client_class, grpc_transport): + options = { + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + "grpc.keepalive_time_ms": 30000, + "grpc.keepalive_timeout_ms": 10000, + }.items() if self._client_options and self._client_options.api_endpoint: api_endpoint = self._client_options.api_endpoint else: api_endpoint = client_class.DEFAULT_ENDPOINT - channel = grpc_transport.create_channel( - host=api_endpoint, - credentials=self._credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - "grpc.keepalive_time_ms": 30000, - "grpc.keepalive_timeout_ms": 10000, - }.items(), - ) + channel = None + if self._emulator_host is not None: + api_endpoint = self._emulator_host + channel = self._emulator_channel(grpc_transport, options) + else: + channel = grpc_transport.create_channel( + host=api_endpoint, credentials=self._credentials, options=options, + ) transport = grpc_transport(channel=channel, host=api_endpoint) return transport diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 72b38757056a..84fbd0583d73 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -33,6 +33,7 @@ # 'docfx' is excluded since it only needs to run in 'docs-presubmit' nox.options.sessions = [ "unit", + "system_emulated", "system", "cover", "lint", @@ -111,6 +112,31 @@ def unit(session): default(session) +@nox.session(python="3.8") +def system_emulated(session): + import subprocess + import signal + + try: + subprocess.call(["gcloud", "--version"]) + except OSError: + session.skip("gcloud not found but required for emulator support") + + # Currently, CI/CD doesn't have beta component of gcloud. + subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) + + hostport = "localhost:8789" + p = subprocess.Popen( + ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] + ) + + session.env["BIGTABLE_EMULATOR_HOST"] = hostport + system(session) + + # Stop Emulator + os.killpg(os.getpgid(p.pid), signal.SIGTERM) + + @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 84f9977e1d95..21a39eb29500 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -24,7 +24,8 @@ from google.cloud.environment_vars import BIGTABLE_EMULATOR from test_utils.retry import RetryErrors from test_utils.retry import RetryResult -from test_utils.system import EmulatorCreds + +# from test_utils.system import EmulatorCreds from test_utils.system import unique_resource_id from google.cloud._helpers import _datetime_from_microseconds @@ -114,11 +115,9 @@ def setUpModule(): Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None - if Config.IN_EMULATOR: - credentials = EmulatorCreds() - Config.CLIENT = Client(admin=True, credentials=credentials) - else: - Config.CLIENT = Client(admin=True) + # Previously we created clients using a mock EmulatorCreds when targeting + # an emulator. + Config.CLIENT = Client(admin=True) Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) Config.CLUSTER = Config.INSTANCE.cluster( @@ -840,6 +839,9 @@ def test_delete_column_family(self): self.assertEqual(temp_table.list_column_families(), {}) def test_backup(self): + if Config.IN_EMULATOR: + self.skipTest("backups are not supported in the emulator") + from google.cloud._helpers import _datetime_to_pb_timestamp temp_table_id = "test-backup-table" diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 5f2d7db26ab7..f6b8eb5bca71 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -67,16 +67,23 @@ def test_w_emulator(self): client_class = mock.Mock() emulator_host = emulator_channel = object() credentials = _make_credentials() + client_options = mock.Mock() + transport = mock.Mock() + client = _Client( credentials, emulator_host=emulator_host, emulator_channel=emulator_channel ) client_info = client._client_info = mock.Mock() - - result = self._invoke_client_factory(client_class)(client) + result = self._invoke_client_factory( + client_class, client_options=client_options, transport=transport + )(client) self.assertIs(result, client_class.return_value) client_class.assert_called_once_with( - channel=client._emulator_channel, client_info=client_info + credentials=None, + client_info=client_info, + client_options=client_options, + transport=transport, ) @@ -121,7 +128,6 @@ def test_constructor_defaults(self): self.assertIs(client._client_info, _CLIENT_INFO) self.assertIsNone(client._channel) self.assertIsNone(client._emulator_host) - self.assertIsNone(client._emulator_channel) self.assertEqual(client.SCOPE, (DATA_SCOPE,)) def test_constructor_explicit(self): @@ -167,22 +173,23 @@ def test_constructor_with_emulator_host(self): credentials = _make_credentials() emulator_host = "localhost:8081" - with mock.patch("os.getenv") as getenv: - getenv.return_value = emulator_host - with mock.patch("grpc.insecure_channel") as factory: - getenv.return_value = emulator_host + with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): + with mock.patch("grpc.secure_channel") as factory: client = self._make_one(project=self.PROJECT, credentials=credentials) + # don't test local_composite_credentials + client._local_composite_credentials = lambda: credentials + # channels are formed when needed, so access a client + # create a gapic channel + client.table_data_client self.assertEqual(client._emulator_host, emulator_host) - self.assertIs(client._emulator_channel, factory.return_value) - factory.assert_called_once_with( - target=emulator_host, - options={ - "grpc.keepalive_time_ms": 30000, - "grpc.keepalive_timeout_ms": 10000, - }.items(), - ) - getenv.assert_called_once_with(BIGTABLE_EMULATOR) + options = { + "grpc.max_send_message_length": -1, + "grpc.max_receive_message_length": -1, + "grpc.keepalive_time_ms": 30000, + "grpc.keepalive_timeout_ms": 10000, + }.items() + factory.assert_called_once_with(emulator_host, credentials, options=options) def test__get_scopes_default(self): from google.cloud.bigtable.client import DATA_SCOPE From c324f61303e9c3b74a02eaff9f574040d39bf55a Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 23 Mar 2021 12:35:38 -0700 Subject: [PATCH 417/892] chore: alphabetize class names (#248) --- .../google/cloud/bigtable_admin_v2/__init__.py | 4 ++-- packages/google-cloud-bigtable/synth.metadata | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index edfce27effc3..79a9bea684e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -88,7 +88,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -151,5 +151,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", ) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 84e7654b20f5..a7a5cc1d25c6 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "79f1734c897e5e1b2fd02d043185c44b7ee34dc9" + "sha": "cf3b9a1820780863911f4741fca4e9936cb79670" } }, { From 6a9b357ac8654ba40187903a3ca7e068e4cd9987 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 23 Mar 2021 19:50:58 -0700 Subject: [PATCH 418/892] chore: add pre-commit-config to renovate ignore paths (#247) Disable renovate PRs on the .pre-commit-config.yaml which is templated from synthtool. https://docs.renovatebot.com/configuration-options/#ignorepaths Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Mon Mar 15 09:05:39 2021 -0600 Source-Repo: googleapis/synthtool Source-Sha: 2c54c473779ea731128cea61a3a6c975a08a5378 Source-Link: https://github.com/googleapis/synthtool/commit/2c54c473779ea731128cea61a3a6c975a08a5378 --- packages/google-cloud-bigtable/renovate.json | 3 ++- packages/google-cloud-bigtable/synth.metadata | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index 4fa949311b20..f08bc22c9a55 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -1,5 +1,6 @@ { "extends": [ "config:base", ":preserveSemverRanges" - ] + ], + "ignorePaths": [".pre-commit-config.yaml"] } diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index a7a5cc1d25c6..f932182d6dbd 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" + "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "0780323da96d5a53925fe0547757181fe76e8f1e" + "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" } } ], From 7124b8e2f3d92f58c255d8bfcbac8b01f4d7e36d Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Wed, 24 Mar 2021 10:36:56 -0400 Subject: [PATCH 419/892] docs: add backup docs (#251) --- packages/google-cloud-bigtable/docs/backup.rst | 6 ++++++ packages/google-cloud-bigtable/docs/usage.rst | 1 + .../google-cloud-bigtable/google/cloud/bigtable/table.py | 2 ++ 3 files changed, 9 insertions(+) create mode 100644 packages/google-cloud-bigtable/docs/backup.rst diff --git a/packages/google-cloud-bigtable/docs/backup.rst b/packages/google-cloud-bigtable/docs/backup.rst new file mode 100644 index 000000000000..e75abd43143c --- /dev/null +++ b/packages/google-cloud-bigtable/docs/backup.rst @@ -0,0 +1,6 @@ +Backup +~~~~~~~~ + +.. automodule:: google.cloud.bigtable.backup + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index 4e27768053e4..532f1ce8d367 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -9,6 +9,7 @@ Using the API cluster instance table + backup column-family row row-data diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 740a65ae64d8..c2d11436243e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -832,6 +832,8 @@ def backup(self, backup_id, cluster_id=None, expire_time=None): :type expire_time: :class:`datetime.datetime` :param expire_time: (Optional) The expiration time of this new Backup. Required, if the `create` method needs to be called. + :rtype: :class:`.Backup` + :returns: A backup linked to this table. """ return Backup( backup_id, From d48b20191c5c7e324c89e54dc0e790fa05c849de Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 30 Mar 2021 16:24:04 +0200 Subject: [PATCH 420/892] chore(deps): update dependency google-cloud-monitoring to v2.2.1 (#274) [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [google-cloud-monitoring](https://togithub.com/googleapis/python-monitoring) | `==2.1.0` -> `==2.2.1` | [![age](https://badges.renovateapi.com/packages/pypi/google-cloud-monitoring/2.2.1/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/google-cloud-monitoring/2.2.1/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/google-cloud-monitoring/2.2.1/compatibility-slim/2.1.0)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/google-cloud-monitoring/2.2.1/confidence-slim/2.1.0)](https://docs.renovatebot.com/merge-confidence/) | --- ### Release Notes
googleapis/python-monitoring ### [`v2.2.1`](https://togithub.com/googleapis/python-monitoring/blob/master/CHANGELOG.md#​221-httpswwwgithubcomgoogleapispython-monitoringcomparev220v221-2021-03-29) [Compare Source](https://togithub.com/googleapis/python-monitoring/compare/v2.2.0...v2.2.1) ### [`v2.2.0`](https://togithub.com/googleapis/python-monitoring/blob/master/CHANGELOG.md#​220-httpswwwgithubcomgoogleapispython-monitoringcomparev210v220-2021-03-25) [Compare Source](https://togithub.com/googleapis/python-monitoring/compare/v2.1.0...v2.2.0) ##### Features - add `client_cert_source_for_mtls` ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added `IstioCanonicalService` for service monitoring. ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added `total_size` to the response of `ListAlertPolicies`. ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added creation and mutation records to notification channels. ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added support for Monitoring Query Language ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added support for Monitoring Query Language ([#​101](https://www.github.com/googleapis/python-monitoring/issues/101)) ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added support for querying metrics for folders and organizations. ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added support for secondary aggregation when querying metrics. ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Added support for units in the `MetricService` ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) ##### Bug Fixes - Extended the default deadline for `UpdateGroup` to 180s. ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - Un-deprecated `cluster_istio` for service monitoring. ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b)) - use correct retry deadline ([0eb2ca6](https://www.github.com/googleapis/python-monitoring/commit/0eb2ca6b5044553c11d5f5e0f4859bf65387909b))
--- ### Renovate configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-bigtable). --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 428e6d9ec66d..ebf403be9844 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==1.7.0 -google-cloud-monitoring==2.1.0 +google-cloud-monitoring==2.2.1 From 643960112ea0aee6184098900003560cc95d2c11 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Tue, 30 Mar 2021 10:58:42 -0400 Subject: [PATCH 421/892] chore: ignore noxfile in generation (#273) --- packages/google-cloud-bigtable/synth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/synth.py index 185ee6942fd5..500f95321ff2 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/synth.py @@ -55,7 +55,7 @@ microgenerator=True, cov_level=99 ) -s.move(templated_files, excludes=[".coveragerc"]) +s.move(templated_files, excludes=[".coveragerc", "noxfile.py"]) # ---------------------------------------------------------------------------- # Samples templates From 1d078480b497b3bc6ef274f0d6ab7351a54a4d62 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 31 Mar 2021 07:40:03 -0700 Subject: [PATCH 422/892] chore: add kokoro configs for periodic builds against head (#277) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/70d3f6f9-d2bb-4d60-b274-1766d46faf80/targets - [ ] To automatically regenerate this PR, check this box. (May take up to 24 hours.) Source-Link: https://github.com/googleapis/synthtool/commit/f5c5904fb0c6aa3b3730eadf4e5a4485afc65726 Source-Link: https://github.com/googleapis/synthtool/commit/79c8dd7ee768292f933012d3a69a5b4676404cda --- .../samples/python3.6/periodic-head.cfg | 11 ++ .../samples/python3.7/periodic-head.cfg | 11 ++ .../samples/python3.8/periodic-head.cfg | 11 ++ .../.kokoro/test-samples-against-head.sh | 28 +++++ .../.kokoro/test-samples-impl.sh | 102 ++++++++++++++++++ .../.kokoro/test-samples.sh | 96 +++-------------- .../.pre-commit-config.yaml | 2 +- packages/google-cloud-bigtable/synth.metadata | 12 ++- 8 files changed, 188 insertions(+), 85 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg create mode 100755 packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh create mode 100755 packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg new file mode 100644 index 000000000000..f9cfcd33e058 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg new file mode 100644 index 000000000000..f9cfcd33e058 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg new file mode 100644 index 000000000000..f9cfcd33e058 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh new file mode 100755 index 000000000000..2dda9815b6d3 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A customized test runner for samples. +# +# For periodic builds, you can specify this file for testing against head. + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-bigtable + +exec .kokoro/test-samples-impl.sh diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh new file mode 100755 index 000000000000..cf5de74c17a5 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +# Exit early if samples directory doesn't exist +if [ ! -d "./samples" ]; then + echo "No tests run. `./samples` not found" + exit 0 +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the FlakyBot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh index 4dc285283546..4666d34f90e5 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples.sh @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +# The default test runner for samples. +# +# For periodic builds, we rewinds the repo to the latest release, and +# run test-samples-impl.sh. # `-e` enables the script to automatically fail when a command fails # `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero @@ -24,87 +28,19 @@ cd github/python-bigtable # Run periodic samples tests at latest release if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + # preserving the test runner implementation. + cp .kokoro/test-samples-impl.sh "${TMPDIR}/test-samples-impl.sh" + echo "--- IMPORTANT IMPORTANT IMPORTANT ---" + echo "Now we rewind the repo back to the latest release..." LATEST_RELEASE=$(git describe --abbrev=0 --tags) git checkout $LATEST_RELEASE -fi - -# Exit early if samples directory doesn't exist -if [ ! -d "./samples" ]; then - echo "No tests run. `./samples` not found" - exit 0 -fi - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Debug: show build environment -env | grep KOKORO - -# Install nox -python3.6 -m pip install --upgrade --quiet nox - -# Use secrets acessor service account to get secrets -if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then - gcloud auth activate-service-account \ - --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ - --project="cloud-devrel-kokoro-resources" -fi - -# This script will create 3 files: -# - testing/test-env.sh -# - testing/service-account.json -# - testing/client-secrets.json -./scripts/decrypt-secrets.sh - -source ./testing/test-env.sh -export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json - -# For cloud-run session, we activate the service account for gcloud sdk. -gcloud auth activate-service-account \ - --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" - -export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json - -echo -e "\n******************** TESTING PROJECTS ********************" - -# Switch to 'fail at end' to allow all tests to complete before exiting. -set +e -# Use RTN to return a non-zero value if the test fails. -RTN=0 -ROOT=$(pwd) -# Find all requirements.txt in the samples directory (may break on whitespace). -for file in samples/**/requirements.txt; do - cd "$ROOT" - # Navigate to the project folder. - file=$(dirname "$file") - cd "$file" - - echo "------------------------------------------------------------" - echo "- testing $file" - echo "------------------------------------------------------------" - - # Use nox to execute the tests for the project. - python3.6 -m nox -s "$RUN_TESTS_SESSION" - EXIT=$? - - # If this is a periodic build, send the test log to the FlakyBot. - # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. - if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot + echo "The current head is: " + echo $(git rev-parse --verify HEAD) + echo "--- IMPORTANT IMPORTANT IMPORTANT ---" + # move back the test runner implementation if there's no file. + if [ ! -f .kokoro/test-samples-impl.sh ]; then + cp "${TMPDIR}/test-samples-impl.sh" .kokoro/test-samples-impl.sh fi +fi - if [[ $EXIT -ne 0 ]]; then - RTN=1 - echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" - else - echo -e "\n Testing completed.\n" - fi - -done -cd "$ROOT" - -# Workaround for Kokoro permissions issue: delete secrets -rm testing/{test-env.sh,client-secrets.json,service-account.json} - -exit "$RTN" +exec .kokoro/test-samples-impl.sh diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index a9024b15d725..32302e4883a1 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -12,6 +12,6 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.4 + rev: 3.9.0 hooks: - id: flake8 diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index f932182d6dbd..b9ce82dc0516 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "cf3b9a1820780863911f4741fca4e9936cb79670" + "sha": "5089335c220c16ff3675f01b76f7d4e0dc2219e3" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" + "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "2c54c473779ea731128cea61a3a6c975a08a5378" + "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" } } ], @@ -82,16 +82,21 @@ ".kokoro/samples/lint/presubmit.cfg", ".kokoro/samples/python3.6/common.cfg", ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic-head.cfg", ".kokoro/samples/python3.6/periodic.cfg", ".kokoro/samples/python3.6/presubmit.cfg", ".kokoro/samples/python3.7/common.cfg", ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic-head.cfg", ".kokoro/samples/python3.7/periodic.cfg", ".kokoro/samples/python3.7/presubmit.cfg", ".kokoro/samples/python3.8/common.cfg", ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic-head.cfg", ".kokoro/samples/python3.8/periodic.cfg", ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples-against-head.sh", + ".kokoro/test-samples-impl.sh", ".kokoro/test-samples.sh", ".kokoro/trampoline.sh", ".kokoro/trampoline_v2.sh", @@ -150,7 +155,6 @@ "google/cloud/bigtable_v2/types/__init__.py", "google/cloud/bigtable_v2/types/bigtable.py", "google/cloud/bigtable_v2/types/data.py", - "noxfile.py", "renovate.json", "samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md", From 4f89e0b3aad3f04f0dd880381c946eb7237f0e72 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 1 Apr 2021 10:21:33 -0700 Subject: [PATCH 423/892] deps: upgrade gapic-generator-python to 0.43.1 (#276) * build: use gapic-generator-typescript v1.2.11. Fixed IAM v1 library generation. Committer: @alexander-fenster PiperOrigin-RevId: 361676678 Source-Author: Google APIs Source-Date: Mon Mar 8 15:51:18 2021 -0800 Source-Repo: googleapis/googleapis Source-Sha: 3aeb3a70f66457a9e6b07caff841719bb9873b57 Source-Link: https://github.com/googleapis/googleapis/commit/3aeb3a70f66457a9e6b07caff841719bb9873b57 * chore: upgrade gapic-generator-python to 0.43.1 PiperOrigin-RevId: 364411656 Source-Author: Google APIs Source-Date: Mon Mar 22 14:40:22 2021 -0700 Source-Repo: googleapis/googleapis Source-Sha: 149a3a84c29c9b8189576c7442ccb6dcf6a8f95b Source-Link: https://github.com/googleapis/googleapis/commit/149a3a84c29c9b8189576c7442ccb6dcf6a8f95b * feat: add `kind` field which is used to distinguish between response types feat: add `potentially_thresholded_requests_per_hour` field to `PropertyQuota` PiperOrigin-RevId: 365882072 Source-Author: Google APIs Source-Date: Tue Mar 30 13:17:13 2021 -0700 Source-Repo: googleapis/googleapis Source-Sha: 95dd24960cf9f794ef583e59ad9f1fabe1c4a924 Source-Link: https://github.com/googleapis/googleapis/commit/95dd24960cf9f794ef583e59ad9f1fabe1c4a924 --- .../bigtable_instance_admin/async_client.py | 12 ++ .../transports/base.py | 30 +++-- .../transports/grpc.py | 103 ++++++---------- .../transports/grpc_asyncio.py | 111 +++++++----------- .../bigtable_table_admin/async_client.py | 10 ++ .../bigtable_table_admin/transports/base.py | 28 +++-- .../bigtable_table_admin/transports/grpc.py | 103 ++++++---------- .../transports/grpc_asyncio.py | 111 +++++++----------- .../services/bigtable/async_client.py | 6 + .../services/bigtable/transports/base.py | 24 ++-- .../services/bigtable/transports/grpc.py | 101 ++++++---------- .../bigtable/transports/grpc_asyncio.py | 109 +++++++---------- packages/google-cloud-bigtable/synth.metadata | 4 +- 13 files changed, 323 insertions(+), 429 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index f316ef48b88c..4c849e3cb9f4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -387,6 +387,7 @@ async def get_instance( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -469,6 +470,7 @@ async def list_instances( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -538,6 +540,7 @@ async def update_instance( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -637,6 +640,7 @@ async def partial_update_instance( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -906,6 +910,7 @@ async def get_cluster( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -990,6 +995,7 @@ async def list_clusters( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1055,6 +1061,7 @@ async def update_cluster( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1310,6 +1317,7 @@ async def get_app_profile( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1398,6 +1406,7 @@ async def list_app_profiles( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1498,6 +1507,7 @@ async def update_app_profile( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1713,6 +1723,7 @@ async def get_iam_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1936,6 +1947,7 @@ async def test_iam_permissions( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 004424c28d97..9a60430e7982 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -83,10 +83,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -94,6 +94,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -103,20 +106,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -132,6 +132,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -145,6 +146,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -158,6 +160,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -171,6 +174,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -190,6 +194,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -203,6 +208,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -216,6 +222,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -235,6 +242,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -248,6 +256,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -261,6 +270,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -277,6 +287,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -293,6 +304,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 0cbca1c6762a..9d204473a017 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -115,7 +115,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -123,70 +126,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -194,18 +177,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -219,7 +192,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index e5fbf6a4c7e7..d2bc4647b3e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -70,7 +70,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -148,10 +148,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -160,7 +160,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -168,70 +171,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -239,18 +222,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 1e3e817bc849..e7b708305781 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -485,6 +485,7 @@ async def list_tables( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -575,6 +576,7 @@ async def get_table( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -874,6 +876,7 @@ async def generate_consistency_token( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -969,6 +972,7 @@ async def check_consistency( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1212,6 +1216,7 @@ async def get_snapshot( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1320,6 +1325,7 @@ async def list_snapshots( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1602,6 +1608,7 @@ async def get_backup( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -1850,6 +1857,7 @@ async def list_backups( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -2064,6 +2072,7 @@ async def get_iam_policy( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -2287,6 +2296,7 @@ async def test_iam_permissions( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 2958323b730a..731f83280081 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -83,10 +83,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -94,6 +94,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -103,20 +106,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -137,6 +137,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -150,6 +151,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -174,6 +176,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -187,6 +190,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -203,6 +207,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -216,6 +221,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -235,6 +241,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -254,6 +261,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -270,6 +278,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -286,6 +295,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 4f54f3a7ee70..6b890ff7f253 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -117,7 +117,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -125,70 +128,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -196,18 +179,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -221,7 +194,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 8e9197468ae3..0cabde5f18d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -72,7 +72,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -150,10 +150,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -162,7 +162,10 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -170,70 +173,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -241,18 +224,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 215ec1432c4a..2cbb94a9b2ab 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -243,6 +243,7 @@ def read_rows( maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=43200.0, ), default_timeout=43200.0, client_info=DEFAULT_CLIENT_INFO, @@ -341,6 +342,7 @@ def sample_row_keys( maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -463,6 +465,7 @@ async def mutate_row( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, @@ -577,6 +580,7 @@ def mutate_rows( maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=600.0, ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, @@ -735,6 +739,7 @@ async def check_and_mutate_row( maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=20.0, ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, @@ -862,6 +867,7 @@ async def read_modify_write_row( maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=20.0, ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 8f3d81687203..5c362374c62e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -74,10 +74,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -85,6 +85,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -94,20 +97,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -118,6 +118,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=43200.0, ), default_timeout=43200.0, client_info=client_info, @@ -129,6 +130,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -142,6 +144,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=60.0, ), default_timeout=60.0, client_info=client_info, @@ -153,6 +156,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=600.0, ), default_timeout=600.0, client_info=client_info, @@ -164,6 +168,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=20.0, ), default_timeout=20.0, client_info=client_info, @@ -175,6 +180,7 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type(), + deadline=20.0, ), default_timeout=20.0, client_info=client_info, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 6b34e8ab0039..c19847171716 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -107,7 +107,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -115,70 +117,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -186,17 +168,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -210,7 +183,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index aa7ff2ecc7d6..5d722ce908e9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -62,7 +62,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -140,10 +140,10 @@ def __init__( ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -152,7 +152,9 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -160,70 +162,50 @@ def __init__( warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - ssl_credentials = SslCredentials().ssl_credentials - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials else: - host = host if ":" in host else host + ":443" + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, + scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -231,17 +213,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index b9ce82dc0516..94fe3cfcc398 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -11,8 +11,8 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "3aeb3a70f66457a9e6b07caff841719bb9873b57", - "internalRef": "361676678" + "sha": "95dd24960cf9f794ef583e59ad9f1fabe1c4a924", + "internalRef": "365882072" } }, { From 8d70609f02397b62c25cdabe39e3c36b7117b3e5 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Tue, 6 Apr 2021 08:06:03 -0600 Subject: [PATCH 424/892] chore: use gcp-sphinx-docfx-yaml for docfx session (#278) CC @dandhlee --- packages/google-cloud-bigtable/noxfile.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 84fbd0583d73..3557f6615ebd 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -228,9 +228,7 @@ def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") - # sphinx-docfx-yaml supports up to sphinx version 1.5.5. - # https://github.com/docascode/sphinx-docfx-yaml/issues/97 - session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") + session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( From 0b83d6db2914f912ee2680a3b43319ca3418ea08 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 6 Apr 2021 12:13:32 -0400 Subject: [PATCH 425/892] chore: release 2.0.0 (#282) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 31 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 420cdc4f0883..5bd37baf1f16 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,37 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.0.0](https://www.github.com/googleapis/python-bigtable/compare/v1.7.0...v2.0.0) (2021-04-06) + + +### ⚠ BREAKING CHANGES + +* microgenerator changes (#203) + +### Features + +* microgenerator changes ([#203](https://www.github.com/googleapis/python-bigtable/issues/203)) ([b31bd87](https://www.github.com/googleapis/python-bigtable/commit/b31bd87c3fa8cad32768611a52d5effcc7d9b3e2)) +* publish new fields for CMEK ([#222](https://www.github.com/googleapis/python-bigtable/issues/222)) ([0fe5b63](https://www.github.com/googleapis/python-bigtable/commit/0fe5b638e45e711d25f55664689a9baf4d12dc57)) + + +### Bug Fixes + +* address issue in establishing an emulator connection ([#246](https://www.github.com/googleapis/python-bigtable/issues/246)) ([1a31826](https://www.github.com/googleapis/python-bigtable/commit/1a31826e2e378468e057160c07d850ebca1c5879)) +* fix unit test that could be broken by user's environment ([#239](https://www.github.com/googleapis/python-bigtable/issues/239)) ([cbd712e](https://www.github.com/googleapis/python-bigtable/commit/cbd712e6d3aded0c025525f97da1d667fbe2f061)) +* guard assignments of certain values against None ([#220](https://www.github.com/googleapis/python-bigtable/issues/220)) ([341f448](https://www.github.com/googleapis/python-bigtable/commit/341f448ce378375ab79bfc82f864fb6c88ed71a0)) +* **retry:** restore grpc_service_config for CreateBackup and {Restore,Snapshot}Table ([#240](https://www.github.com/googleapis/python-bigtable/issues/240)) ([79f1734](https://www.github.com/googleapis/python-bigtable/commit/79f1734c897e5e1b2fd02d043185c44b7ee34dc9)) + + +### Documentation + +* add backup docs ([#251](https://www.github.com/googleapis/python-bigtable/issues/251)) ([7d5c7aa](https://www.github.com/googleapis/python-bigtable/commit/7d5c7aa92cb476b07ac9efb5d231888c4c417783)) + + +### Dependencies + +* update gapic-generator-python to 0.40.11 ([#230](https://www.github.com/googleapis/python-bigtable/issues/230)) ([47d5dc1](https://www.github.com/googleapis/python-bigtable/commit/47d5dc1853f0be609e666e8a8fad0146f2905482)) +* upgrade gapic-generator-python to 0.43.1 ([#276](https://www.github.com/googleapis/python-bigtable/issues/276)) ([0e9fe54](https://www.github.com/googleapis/python-bigtable/commit/0e9fe5410e1b5d16ae0735ba1f606f7d1befafb9)) + ## [2.0.0-dev1](https://www.github.com/googleapis/python-bigtable/compare/v1.7.0...v2.0.0-dev1) (2021-02-24) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b460b91b3691..7a19c96d2671 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.0.0-dev1" +version = "2.0.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 8b6e5d8fbaa711bedc215b2ca0be5031e8509d2b Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 7 Apr 2021 09:15:03 -0700 Subject: [PATCH 426/892] chore: add license headers (#285) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * chore: Add license headers for python config files Source-Author: Anthonios Partheniou Source-Date: Tue Apr 6 11:32:03 2021 -0400 Source-Repo: googleapis/synthtool Source-Sha: 5b5bf6d519b2d658d9f2e483d9f6f3d0ba8ee6bc Source-Link: https://github.com/googleapis/synthtool/commit/5b5bf6d519b2d658d9f2e483d9f6f3d0ba8ee6bc --- .../google-cloud-bigtable/.pre-commit-config.yaml | 14 ++++++++++++++ packages/google-cloud-bigtable/docs/conf.py | 13 +++++++++++++ .../google/cloud/bigtable_admin_v2/__init__.py | 4 ++-- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 32302e4883a1..8912e9b5d7d7 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -1,3 +1,17 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index dc4b4d822f8c..fcd69d50d030 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -1,4 +1,17 @@ # -*- coding: utf-8 -*- +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # # google-cloud-bigtable documentation build configuration file # diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 79a9bea684e0..edfce27effc3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -88,7 +88,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -151,5 +151,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", ) diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 94fe3cfcc398..8f3bfa79469d 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "5089335c220c16ff3675f01b76f7d4e0dc2219e3" + "sha": "ddeade6e1b0b02ead5dd43b714bd7c26999f085c" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" + "sha": "5b5bf6d519b2d658d9f2e483d9f6f3d0ba8ee6bc" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "f5c5904fb0c6aa3b3730eadf4e5a4485afc65726" + "sha": "5b5bf6d519b2d658d9f2e483d9f6f3d0ba8ee6bc" } } ], From c24337968552e1a4857bbad004220d7015bc1409 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Thu, 8 Apr 2021 15:22:51 -0400 Subject: [PATCH 427/892] chore(deps): update samples to bigtable 2.0.0 (#284) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index a22c93d0b761..2e0625e13af1 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.28.0 -google-cloud-bigtable<2.0.0dev1 +google-cloud-bigtable<2.0.0 google-cloud-core==1.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 3c38f94bb211..68fd9e57fd85 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.7.0 +google-cloud-bigtable==2.0.0 google-cloud-core==1.6.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 1e0bcfdf8913..81a589a745c5 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.7.0 +google-cloud-bigtable==2.0.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index ebf403be9844..f9c39c01ff58 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.7.0 +google-cloud-bigtable==2.0.0 google-cloud-monitoring==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 1e0bcfdf8913..81a589a745c5 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.7.0 +google-cloud-bigtable==2.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index c256e38ebf47..17c553b39745 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.7.0 +google-cloud-bigtable==2.0.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index c256e38ebf47..17c553b39745 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==1.7.0 +google-cloud-bigtable==2.0.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index bf09e1de90a3..d7b9b5c81616 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.7.0 \ No newline at end of file +google-cloud-bigtable==2.0.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 1e0bcfdf8913..81a589a745c5 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==1.7.0 +google-cloud-bigtable==2.0.0 From 986a16ced1b62f23f15d8485769b1d48cd16bde8 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Wed, 14 Apr 2021 09:28:53 -0700 Subject: [PATCH 428/892] chore: update templates (#286) * changes without context autosynth cannot find the source of changes triggered by earlier changes in this repository, or by version upgrades to tools such as linters. * chore: add constraints file check for python samples This is the sibling PR to https://github.com/GoogleCloudPlatform/python-docs-samples/pull/5611 and this is the issue opened for it https://github.com/GoogleCloudPlatform/python-docs-samples/issues/5549 If you look at the files in [this example repo](https://github.com/leahecole/testrepo-githubapp/pull/31/files), you'll see that renovate successfully opened a PR on three constraints files in `samples` directories and subdirectories, and properly ignored `constraints` files at the root level cc @tswast TODO: - [x] update renovate to check for samples/constraints.txt dependency updates - [x] run lint locally to double check that I'm not introducing lint error Source-Author: Leah E. Cole <6719667+leahecole@users.noreply.github.com> Source-Date: Fri Apr 9 22:50:04 2021 -0700 Source-Repo: googleapis/synthtool Source-Sha: 0a071b3460344886297a304253bf924aa68ddb7e Source-Link: https://github.com/googleapis/synthtool/commit/0a071b3460344886297a304253bf924aa68ddb7e --- .../google-cloud-bigtable/.github/header-checker-lint.yml | 2 +- .../google/cloud/bigtable_admin_v2/__init__.py | 4 ++-- packages/google-cloud-bigtable/renovate.json | 5 ++++- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/header-checker-lint.yml b/packages/google-cloud-bigtable/.github/header-checker-lint.yml index fc281c05bd55..6fe78aa7987a 100644 --- a/packages/google-cloud-bigtable/.github/header-checker-lint.yml +++ b/packages/google-cloud-bigtable/.github/header-checker-lint.yml @@ -1,6 +1,6 @@ {"allowedCopyrightHolders": ["Google LLC"], "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"], - "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"], + "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt", "**/__init__.py", "samples/**/constraints.txt", "samples/**/constraints-test.txt"], "sourceFileExtensions": [ "ts", "js", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index edfce27effc3..79a9bea684e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -88,7 +88,7 @@ "AppProfile", "Backup", "BackupInfo", - "BigtableInstanceAdminClient", + "BigtableTableAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", @@ -151,5 +151,5 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableTableAdminClient", + "BigtableInstanceAdminClient", ) diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index f08bc22c9a55..c04895563e69 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -2,5 +2,8 @@ "extends": [ "config:base", ":preserveSemverRanges" ], - "ignorePaths": [".pre-commit-config.yaml"] + "ignorePaths": [".pre-commit-config.yaml"], + "pip_requirements": { + "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"] + } } diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 8f3bfa79469d..7607fcf6d2ae 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "ddeade6e1b0b02ead5dd43b714bd7c26999f085c" + "sha": "990c26d3dce9da83f385cad1b0094eed0c597aa4" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "5b5bf6d519b2d658d9f2e483d9f6f3d0ba8ee6bc" + "sha": "0a071b3460344886297a304253bf924aa68ddb7e" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "5b5bf6d519b2d658d9f2e483d9f6f3d0ba8ee6bc" + "sha": "0a071b3460344886297a304253bf924aa68ddb7e" } } ], From 70d7b1f7b94a55ea1b4eb47a6846d80a5ea44e90 Mon Sep 17 00:00:00 2001 From: Dan Lee <71398022+dandhlee@users.noreply.github.com> Date: Fri, 16 Apr 2021 17:28:01 -0400 Subject: [PATCH 429/892] chore: prevent normalization of semver versioning (#292) * chore: prevent normalization of semver versioning * chore: update workaround to make sic work --- packages/google-cloud-bigtable/setup.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 7a19c96d2671..f75c3a004af3 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -17,6 +17,20 @@ import setuptools +# Disable version normalization performed by setuptools.setup() +try: + # Try the approach of using sic(), added in setuptools 46.1.0 + from setuptools import sic +except ImportError: + # Try the approach of replacing packaging.version.Version + sic = lambda v: v + try: + # setuptools >=39.0.0 uses packaging from setuptools.extern + from setuptools.extern import packaging + except ImportError: + # setuptools <39.0.0 uses packaging from pkg_resources.extern + from pkg_resources.extern import packaging + packaging.version.Version = packaging.version.LegacyVersion # Package metadata. @@ -62,7 +76,7 @@ setuptools.setup( name=name, - version=version, + version=sic(version), description=description, long_description=readme, author="Google LLC", From 471199dd88cc85edb9f8a5c76146b67dfbe353a6 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Mon, 19 Apr 2021 16:03:49 -0400 Subject: [PATCH 430/892] feat: customer managed keys (CMEK) (#249) * feat: customer managed keys (CMEK) Implement customer managed keys (CMEK) feature. WIP. DO NOT MERGE. * Wrap Status. * Wrapper for Status, reorganize to avoid circular imports. * Blacken. * Make system tests in charge of their own key. * Consolidate system tests. Get KMS_KEY_NAME from user's environment. * Fix test. * Lint. * Put system tests where nox is expecting to find them. * Test backup with CMEK. * Differentiate instance and cluster names for cmek test, so tests aren't stepping on each other's toes. Remove bogus backup with cmek test. * rename `encryption.py` to `encryption_info.py` * make sure `kms_key_name` is set to `None` if `encryption_info` is not PB. * Fix typo. Use more realistic looking test strings. --- .../google/cloud/bigtable/backup.py | 21 +- .../google/cloud/bigtable/cluster.py | 28 +++ .../google/cloud/bigtable/encryption_info.py | 64 +++++ .../google/cloud/bigtable/enums.py | 30 +++ .../google/cloud/bigtable/error.py | 64 +++++ .../google/cloud/bigtable/instance.py | 24 +- .../google/cloud/bigtable/table.py | 28 +++ .../google-cloud-bigtable/tests/system.py | 236 +++++++++++++++++- .../tests/unit/test_backup.py | 32 +++ .../tests/unit/test_cluster.py | 99 ++++++++ .../tests/unit/test_table.py | 107 ++++++++ 11 files changed, 724 insertions(+), 9 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/encryption_info.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/error.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 6dead1f74c64..3666b7132520 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -19,6 +19,7 @@ from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.exceptions import NotFound from google.protobuf import field_mask_pb2 @@ -67,13 +68,20 @@ class Backup(object): """ def __init__( - self, backup_id, instance, cluster_id=None, table_id=None, expire_time=None + self, + backup_id, + instance, + cluster_id=None, + table_id=None, + expire_time=None, + encryption_info=None, ): self.backup_id = backup_id self._instance = instance self._cluster = cluster_id self.table_id = table_id self._expire_time = expire_time + self._encryption_info = encryption_info self._parent = None self._source_table = None @@ -176,6 +184,15 @@ def expire_time(self): def expire_time(self, new_expire_time): self._expire_time = new_expire_time + @property + def encryption_info(self): + """Encryption info for this Backup. + + :rtype: :class:`google.cloud.bigtable.encryption.EncryptionInfo` + :returns: The encryption information for this backup. + """ + return self._encryption_info + @property def start_time(self): """The time this Backup was started. @@ -255,6 +272,7 @@ def from_pb(cls, backup_pb, instance): table_id = match.group("table_id") if match else None expire_time = backup_pb._pb.expire_time + encryption_info = EncryptionInfo._from_pb(backup_pb.encryption_info) backup = cls( backup_id, @@ -262,6 +280,7 @@ def from_pb(cls, backup_pb, instance): cluster_id=cluster_id, table_id=table_id, expire_time=expire_time, + encryption_info=encryption_info, ) backup._start_time = backup_pb._pb.start_time backup._end_time = backup_pb._pb.end_time diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 5c4c355ffdf1..f3e79c6c2e9f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -63,6 +63,19 @@ class Cluster(object): Defaults to :data:`google.cloud.bigtable.enums.StorageType.UNSPECIFIED`. + :type kms_key_name: str + :param kms_key_name: (Optional, Creation Only) The name of the KMS customer managed + encryption key (CMEK) to use for at-rest encryption of data in + this cluster. If omitted, Google's default encryption will be + used. If specified, the requirements for this key are: + + 1) The Cloud Bigtable service account associated with the + project that contains the cluster must be granted the + ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK. + 2) Only regional keys can be used and the region of the CMEK + key must match the region of the cluster. + 3) All clusters within an instance must use the same CMEK key. + :type _state: int :param _state: (`OutputOnly`) The current state of the cluster. @@ -81,6 +94,7 @@ def __init__( location_id=None, serve_nodes=None, default_storage_type=None, + kms_key_name=None, _state=None, ): self.cluster_id = cluster_id @@ -88,6 +102,7 @@ def __init__( self.location_id = location_id self.serve_nodes = serve_nodes self.default_storage_type = default_storage_type + self._kms_key_name = kms_key_name self._state = _state @classmethod @@ -145,6 +160,10 @@ def _update_from_pb(self, cluster_pb): self.location_id = cluster_pb.location.split("/")[-1] self.serve_nodes = cluster_pb.serve_nodes self.default_storage_type = cluster_pb.default_storage_type + if cluster_pb.encryption_config: + self._kms_key_name = cluster_pb.encryption_config.kms_key_name + else: + self._kms_key_name = None self._state = cluster_pb.state @property @@ -187,6 +206,11 @@ def state(self): """ return self._state + @property + def kms_key_name(self): + """str: Customer managed encryption key for the cluster.""" + return self._kms_key_name + def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented @@ -356,4 +380,8 @@ def _to_pb(self): serve_nodes=self.serve_nodes, default_storage_type=self.default_storage_type, ) + if self._kms_key_name: + cluster_pb.encryption_config = instance.Cluster.EncryptionConfig( + kms_key_name=self._kms_key_name, + ) return cluster_pb diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/encryption_info.py b/packages/google-cloud-bigtable/google/cloud/bigtable/encryption_info.py new file mode 100644 index 000000000000..1757297bcbeb --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/encryption_info.py @@ -0,0 +1,64 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Class for encryption info for tables and backups.""" + +from google.cloud.bigtable.error import Status + + +class EncryptionInfo: + """Encryption information for a given resource. + + If this resource is protected with customer managed encryption, the in-use Google + Cloud Key Management Service (KMS) key versions will be specified along with their + status. + + :type encryption_type: int + :param encryption_type: See :class:`enums.EncryptionInfo.EncryptionType` + + :type encryption_status: google.cloud.bigtable.encryption.Status + :param encryption_status: The encryption status. + + :type kms_key_version: str + :param kms_key_version: The key version used for encryption. + """ + + @classmethod + def _from_pb(cls, info_pb): + return cls( + info_pb.encryption_type, + Status(info_pb.encryption_status), + info_pb.kms_key_version, + ) + + def __init__(self, encryption_type, encryption_status, kms_key_version): + self.encryption_type = encryption_type + self.encryption_status = encryption_status + self.kms_key_version = kms_key_version + + def __eq__(self, other): + if self is other: + return True + + if not isinstance(other, type(self)): + return NotImplemented + + return ( + self.encryption_type == other.encryption_type + and self.encryption_status == other.encryption_status + and self.kms_key_version == other.kms_key_version + ) + + def __ne__(self, other): + return not self == other diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py index 50c7f2e6061f..327b2f828c3b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/enums.py @@ -156,6 +156,7 @@ class View(object): NAME_ONLY = table.Table.View.NAME_ONLY SCHEMA_VIEW = table.Table.View.SCHEMA_VIEW REPLICATION_VIEW = table.Table.View.REPLICATION_VIEW + ENCRYPTION_VIEW = table.Table.View.ENCRYPTION_VIEW FULL = table.Table.View.FULL class ReplicationState(object): @@ -191,3 +192,32 @@ class ReplicationState(object): table.Table.ClusterState.ReplicationState.UNPLANNED_MAINTENANCE ) READY = table.Table.ClusterState.ReplicationState.READY + + +class EncryptionInfo: + class EncryptionType: + """Possible encryption types for a resource. + + Attributes: + ENCRYPTION_TYPE_UNSPECIFIED (int): Encryption type was not specified, though + data at rest remains encrypted. + GOOGLE_DEFAULT_ENCRYPTION (int): The data backing this resource is encrypted + at rest with a key that is fully managed by Google. No key version or + status will be populated. This is the default state. + CUSTOMER_MANAGED_ENCRYPTION (int): The data backing this resource is + encrypted at rest with a key that is managed by the customer. The in-use + version of the key and its status are populated for CMEK-protected + tables. CMEK-protected backups are pinned to the key version that was in + use at the time the backup was taken. This key version is populated but + its status is not tracked and is reported as `UNKNOWN`. + """ + + ENCRYPTION_TYPE_UNSPECIFIED = ( + table.EncryptionInfo.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED + ) + GOOGLE_DEFAULT_ENCRYPTION = ( + table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + CUSTOMER_MANAGED_ENCRYPTION = ( + table.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/error.py b/packages/google-cloud-bigtable/google/cloud/bigtable/error.py new file mode 100644 index 000000000000..261cfc2c3b10 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/error.py @@ -0,0 +1,64 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Class for error status.""" + + +class Status: + """A status, comprising a code and a message. + + See: `Cloud APIs Errors `_ + + This is a thin wrapper for ``google.rpc.status_pb2.Status``. + + :type status_pb: google.rpc.status_pb2.Status + :param status_pb: The status protocol buffer. + """ + + def __init__(self, status_pb): + self.status_pb = status_pb + + @property + def code(self): + """The status code. + + Values are defined in ``google.rpc.code_pb2.Code``. + + See: `google.rpc.Code + `_ + + :rtype: int + :returns: The status code. + """ + return self.status_pb.code + + @property + def message(self): + """A human readable status message. + + :rypte: str + :returns: The status message. + """ + return self.status_pb.message + + def __repr__(self): + return repr(self.status_pb) + + def __eq__(self, other): + if isinstance(other, type(self)): + return self.status_pb == other.status_pb + return NotImplemented + + def __ne__(self, other): + return not self == other diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index d2fb5db072e9..138d3bfc1423 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -540,7 +540,12 @@ def test_iam_permissions(self, permissions): return list(resp.permissions) def cluster( - self, cluster_id, location_id=None, serve_nodes=None, default_storage_type=None + self, + cluster_id, + location_id=None, + serve_nodes=None, + default_storage_type=None, + kms_key_name=None, ): """Factory to create a cluster associated with this instance. @@ -576,6 +581,22 @@ def cluster( :rtype: :class:`~google.cloud.bigtable.instance.Cluster` :returns: a cluster owned by this instance. + + :type kms_key_name: str + :param kms_key_name: (Optional, Creation Only) The name of the KMS customer + managed encryption key (CMEK) to use for at-rest encryption + of data in this cluster. If omitted, Google's default + encryption will be used. If specified, the requirements for + this key are: + + 1) The Cloud Bigtable service account associated with the + project that contains the cluster must be granted the + ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the + CMEK. + 2) Only regional keys can be used and the region of the + CMEK key must match the region of the cluster. + 3) All clusters within an instance must use the same CMEK + key. """ return Cluster( cluster_id, @@ -583,6 +604,7 @@ def cluster( location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, + kms_key_name=kms_key_name, ) def list_clusters(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index c2d11436243e..95fb55c50e4c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -28,6 +28,7 @@ from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES +from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow @@ -484,6 +485,33 @@ def get_cluster_states(self): for cluster_id, value_pb in table_pb.cluster_states.items() } + def get_encryption_info(self): + """List the encryption info for each cluster owned by this table. + + Gets the current encryption info for the table across all of the clusters. The + returned dict will be keyed by cluster id and contain a status for all of the + keys in use. + + :rtype: dict + :returns: Dictionary of encryption info for this table. Keys are cluster ids and + values are tuples of :class:`google.cloud.bigtable.encryption.EncryptionInfo` instances. + """ + ENCRYPTION_VIEW = enums.Table.View.ENCRYPTION_VIEW + table_client = self._instance._client.table_admin_client + table_pb = table_client.get_table( + request={"name": self.name, "view": ENCRYPTION_VIEW} + ) + + return { + cluster_id: tuple( + ( + EncryptionInfo._from_pb(info_pb) + for info_pb in value_pb.encryption_info + ) + ) + for cluster_id, value_pb in table_pb.cluster_states.items() + } + def read_row(self, row_key, filter_=None): """Read a single row from this table. diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 21a39eb29500..48f7e3bdfae5 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -18,6 +18,8 @@ import time import unittest +import pytest + from google.api_core.datetime_helpers import DatetimeWithNanoseconds from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import TooManyRequests @@ -56,8 +58,8 @@ CLUSTER_ID = INSTANCE_ID + "-cluster" CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" SERVE_NODES = 3 -COLUMN_FAMILY_ID1 = u"col-fam-id1" -COLUMN_FAMILY_ID2 = u"col-fam-id2" +COLUMN_FAMILY_ID1 = "col-fam-id1" +COLUMN_FAMILY_ID2 = "col-fam-id2" COL_NAME1 = b"col-name1" COL_NAME2 = b"col-name2" COL_NAME3 = b"col-name3-but-other-fam" @@ -68,13 +70,14 @@ ROW_KEY = b"row-key" ROW_KEY_ALT = b"row-key-alt" EXISTING_INSTANCES = [] -LABEL_KEY = u"python-system" +LABEL_KEY = "python-system" label_stamp = ( datetime.datetime.utcnow() .replace(microsecond=0, tzinfo=UTC) .strftime("%Y-%m-%dt%H-%M-%S") ) LABELS = {LABEL_KEY: str(label_stamp)} +KMS_KEY_NAME = os.environ.get("KMS_KEY_NAME", None) class Config(object): @@ -121,13 +124,13 @@ def setUpModule(): Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) Config.CLUSTER = Config.INSTANCE.cluster( - CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES + CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, ) Config.INSTANCE_DATA = Config.CLIENT.instance( INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS ) Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID + CLUSTER_ID_DATA, location_id=LOCATION_ID, ) if not Config.IN_EMULATOR: @@ -331,6 +334,220 @@ def test_create_instance_w_two_clusters(self): temp_table_id = "test-get-cluster-states" temp_table = instance.table(temp_table_id) temp_table.create() + + encryption_info = temp_table.get_encryption_info() + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, + enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, + enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) + + result = temp_table.get_cluster_states() + ReplicationState = enums.Table.ReplicationState + expected_results = [ + ClusterState(ReplicationState.STATE_NOT_KNOWN), + ClusterState(ReplicationState.INITIALIZING), + ClusterState(ReplicationState.PLANNED_MAINTENANCE), + ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), + ClusterState(ReplicationState.READY), + ] + cluster_id_list = result.keys() + self.assertEqual(len(cluster_id_list), 2) + self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) + self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) + for clusterstate in result.values(): + self.assertIn(clusterstate, expected_results) + + # Test create app profile with multi_cluster_routing policy + app_profiles_to_delete = [] + description = "routing policy-multy" + app_profile_id_1 = "app_profile_id_1" + routing = enums.RoutingPolicyType.ANY + self._test_create_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + ignore_warnings=True, + ) + app_profiles_to_delete.append(app_profile_id_1) + + # Test list app profiles + self._test_list_app_profiles_helper(instance, [app_profile_id_1]) + + # Test modify app profile app_profile_id_1 + # routing policy to single cluster policy, + # cluster -> ALT_CLUSTER_ID_1, + # allow_transactional_writes -> disallowed + # modify description + description = "to routing policy-single" + routing = enums.RoutingPolicyType.SINGLE + self._test_modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_1, + allow_transactional_writes=False, + ) + + # Test modify app profile app_profile_id_1 + # cluster -> ALT_CLUSTER_ID_2, + # allow_transactional_writes -> allowed + self._test_modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True, + ignore_warnings=True, + ) + + # Test create app profile with single cluster routing policy + description = "routing policy-single" + app_profile_id_2 = "app_profile_id_2" + routing = enums.RoutingPolicyType.SINGLE + self._test_create_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=False, + ) + app_profiles_to_delete.append(app_profile_id_2) + + # Test list app profiles + self._test_list_app_profiles_helper( + instance, [app_profile_id_1, app_profile_id_2] + ) + + # Test modify app profile app_profile_id_2 to + # allow transactional writes + # Note: no need to set ``ignore_warnings`` to True + # since we are not restrictings anything with this modification. + self._test_modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=ALT_CLUSTER_ID_2, + allow_transactional_writes=True, + ) + + # Test modify app profile app_profile_id_2 routing policy + # to multi_cluster_routing policy + # modify description + description = "to routing policy-multy" + routing = enums.RoutingPolicyType.ANY + self._test_modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + allow_transactional_writes=False, + ignore_warnings=True, + ) + + # Test delete app profiles + for app_profile_id in app_profiles_to_delete: + self._test_delete_app_profile_helper(app_profile_id, instance) + + @pytest.mark.skipif( + not KMS_KEY_NAME, reason="requires KMS_KEY_NAME environment variable" + ) + def test_create_instance_w_two_clusters_cmek(self): + from google.cloud.bigtable import enums + from google.cloud.bigtable.table import ClusterState + + _PRODUCTION = enums.Instance.Type.PRODUCTION + ALT_INSTANCE_ID = "dif-cmek" + UNIQUE_SUFFIX + instance = Config.CLIENT.instance( + ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS + ) + + ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" + ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" + LOCATION_ID_2 = "us-central1-f" + STORAGE_TYPE = enums.StorageType.HDD + serve_nodes = 1 + cluster_1 = instance.cluster( + ALT_CLUSTER_ID_1, + location_id=LOCATION_ID, + serve_nodes=serve_nodes, + default_storage_type=STORAGE_TYPE, + kms_key_name=KMS_KEY_NAME, + ) + cluster_2 = instance.cluster( + ALT_CLUSTER_ID_2, + location_id=LOCATION_ID_2, + serve_nodes=serve_nodes, + default_storage_type=STORAGE_TYPE, + kms_key_name=KMS_KEY_NAME, + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + + # Make sure this instance gets deleted after the test case. + self.instances_to_delete.append(instance) + + # We want to make sure the operation completes. + operation.result(timeout=120) + + # Create a new instance instance and make sure it is the same. + instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) + instance_alt.reload() + + self.assertEqual(instance, instance_alt) + self.assertEqual(instance.display_name, instance_alt.display_name) + self.assertEqual(instance.type_, instance_alt.type_) + + clusters, failed_locations = instance_alt.list_clusters() + self.assertEqual(failed_locations, []) + + clusters.sort(key=lambda x: x.name) + alt_cluster_1, alt_cluster_2 = clusters + + self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) + self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) + self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) + self.assertEqual( + cluster_1.default_storage_type, alt_cluster_1.default_storage_type + ) + self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) + self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) + self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) + self.assertEqual( + cluster_2.default_storage_type, alt_cluster_2.default_storage_type + ) + + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = Config.CLIENT.list_clusters() + self.assertFalse(failed_locations) + found = set([cluster.name for cluster in clusters]) + self.assertTrue( + {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( + found + ) + ) + + temp_table_id = "test-get-cluster-states" + temp_table = instance.table(temp_table_id) + temp_table.create() + + encryption_info = temp_table.get_encryption_info() + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + self.assertEqual( + encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, + enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + result = temp_table.get_cluster_states() ReplicationState = enums.Table.ReplicationState expected_results = [ @@ -843,6 +1060,7 @@ def test_backup(self): self.skipTest("backups are not supported in the emulator") from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums temp_table_id = "test-backup-table" temp_table = Config.INSTANCE_DATA.table(temp_table_id) @@ -879,6 +1097,10 @@ def test_backup(self): self.assertEqual(temp_backup_id, temp_table_backup.backup_id) self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) self.assertEqual(expire, temp_table_backup.expire_time.seconds) + self.assertEqual( + temp_table_backup.encryption_info.encryption_type, + enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) # Testing `Backup.update_expire_time()` method expire += 3600 # A one-hour change in the `expire_time` parameter @@ -1213,13 +1435,13 @@ def test_read_with_label_applied(self): row.commit() # Combine a label with column 1. - label1 = u"label-red" + label1 = "label-red" label1_filter = ApplyLabelFilter(label1) col1_filter = ColumnQualifierRegexFilter(COL_NAME1) chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) # Combine a label with column 2. - label2 = u"label-blue" + label2 = "label-blue" label2_filter = ApplyLabelFilter(label2) col2_filter = ColumnQualifierRegexFilter(COL_NAME2) chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 02efef492ab2..0a5ba74c1189 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -66,6 +66,7 @@ def test_constructor_defaults(self): self.assertIsNone(backup._end_time) self.assertIsNone(backup._size_bytes) self.assertIsNone(backup._state) + self.assertIsNone(backup._encryption_info) def test_constructor_non_defaults(self): instance = _Instance(self.INSTANCE_NAME) @@ -77,6 +78,7 @@ def test_constructor_non_defaults(self): cluster_id=self.CLUSTER_ID, table_id=self.TABLE_ID, expire_time=expire_time, + encryption_info="encryption_info", ) self.assertEqual(backup.backup_id, self.BACKUP_ID) @@ -84,6 +86,7 @@ def test_constructor_non_defaults(self): self.assertIs(backup._cluster, self.CLUSTER_ID) self.assertEqual(backup.table_id, self.TABLE_ID) self.assertEqual(backup._expire_time, expire_time) + self.assertEqual(backup._encryption_info, "encryption_info") self.assertIsNone(backup._parent) self.assertIsNone(backup._source_table) @@ -128,14 +131,20 @@ def test_from_pb_bad_name(self): klasse.from_pb(backup_pb, instance) def test_from_pb_success(self): + from google.cloud.bigtable.encryption_info import EncryptionInfo + from google.cloud.bigtable.error import Status from google.cloud.bigtable_admin_v2.types import table from google.cloud._helpers import _datetime_to_pb_timestamp + from google.rpc.code_pb2 import Code client = _Client() instance = _Instance(self.INSTANCE_NAME, client) timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) size_bytes = 1234 state = table.Backup.State.READY + GOOGLE_DEFAULT_ENCRYPTION = ( + table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) backup_pb = table.Backup( name=self.BACKUP_NAME, source_table=self.TABLE_NAME, @@ -144,6 +153,11 @@ def test_from_pb_success(self): end_time=timestamp, size_bytes=size_bytes, state=state, + encryption_info=table.EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=_StatusPB(Code.OK, "Status OK"), + kms_key_version="2", + ), ) klasse = self._get_target_class() @@ -159,6 +173,14 @@ def test_from_pb_success(self): self.assertEqual(backup.end_time, timestamp) self.assertEqual(backup._size_bytes, size_bytes) self.assertEqual(backup._state, state) + self.assertEqual( + backup.encryption_info, + EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=Status(_StatusPB(Code.OK, "Status OK")), + kms_key_version="2", + ), + ) def test_property_name(self): from google.cloud.bigtable.client import Client @@ -862,3 +884,13 @@ def __init__(self, name, client=None): self.name = name self.instance_id = name.rsplit("/", 1)[1] self._client = client + + +def _StatusPB(code, message): + from google.rpc import status_pb2 + + status_pb = status_pb2.Status() + status_pb.code = code + status_pb.message = message + + return status_pb diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index d5f731eb6960..49a32ea56919 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -16,6 +16,7 @@ import unittest import mock +import pytest from ._testing import _make_credentials @@ -60,6 +61,9 @@ class TestCluster(unittest.TestCase): OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format( PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID ) + KEY_RING_ID = "key-ring-id" + CRYPTO_KEY_ID = "crypto-key-id" + KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}" @staticmethod def _get_target_class(): @@ -90,6 +94,7 @@ def test_constructor_defaults(self): self.assertIsNone(cluster.state) self.assertIsNone(cluster.serve_nodes) self.assertIsNone(cluster.default_storage_type) + self.assertIsNone(cluster.kms_key_name) def test_constructor_non_default(self): from google.cloud.bigtable.enums import StorageType @@ -107,6 +112,7 @@ def test_constructor_non_default(self): _state=STATE, serve_nodes=self.SERVE_NODES, default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=self.KMS_KEY_NAME, ) self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) self.assertIs(cluster._instance, instance) @@ -114,6 +120,7 @@ def test_constructor_non_default(self): self.assertEqual(cluster.state, STATE) self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) + self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) def test_name_property(self): credentials = _make_credentials() @@ -125,6 +132,18 @@ def test_name_property(self): self.assertEqual(cluster.name, self.CLUSTER_NAME) + def test_kms_key_name_property(self): + client = _Client(self.PROJECT) + instance = _Instance(self.INSTANCE_ID, client) + + cluster = self._make_one( + self.CLUSTER_ID, instance, kms_key_name=self.KMS_KEY_NAME + ) + + self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) + with pytest.raises(AttributeError): + cluster.kms_key_name = "I'm read only" + def test_from_pb_success(self): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums @@ -141,6 +160,9 @@ def test_from_pb_success(self): state=state, serve_nodes=self.SERVE_NODES, default_storage_type=storage_type, + encryption_config=data_v2_pb2.Cluster.EncryptionConfig( + kms_key_name=self.KMS_KEY_NAME, + ), ) klass = self._get_target_class() @@ -152,6 +174,7 @@ def test_from_pb_success(self): self.assertEqual(cluster.state, state) self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) self.assertEqual(cluster.default_storage_type, storage_type) + self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) def test_from_pb_bad_cluster_name(self): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 @@ -243,6 +266,7 @@ def test_reload(self): location_id=self.LOCATION_ID, serve_nodes=self.SERVE_NODES, default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=self.KMS_KEY_NAME, ) # Create response_pb @@ -281,6 +305,7 @@ def test_reload(self): self.assertEqual(cluster.state, STATE) self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER) self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER) + self.assertEqual(cluster.kms_key_name, None) def test_exists(self): from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( @@ -392,6 +417,80 @@ def test_create(self): ].kwargs self.assertEqual(actual_request, expected_request) + def test_create_w_cmek(self): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + credentials = _make_credentials() + client = self._make_client( + project=self.PROJECT, credentials=credentials, admin=True + ) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = self.LOCATION_PATH + self.LOCATION_ID + instance = Instance(self.INSTANCE_ID, client) + cluster = self._make_one( + self.CLUSTER_ID, + instance, + location_id=self.LOCATION_ID, + serve_nodes=self.SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=self.KMS_KEY_NAME, + ) + expected_request_cluster = instance_v2_pb2.Cluster( + location=LOCATION, + serve_nodes=cluster.serve_nodes, + default_storage_type=cluster.default_storage_type, + encryption_config=instance_v2_pb2.Cluster.EncryptionConfig( + kms_key_name=self.KMS_KEY_NAME, + ), + ) + expected_request = { + "request": { + "parent": instance.name, + "cluster_id": self.CLUSTER_ID, + "cluster": expected_request_cluster, + } + } + name = instance.name + metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=self.OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + # Patch the stub used by the API method. + api = mock.create_autospec(BigtableInstanceAdminClient) + api.common_location_path.return_value = LOCATION + client._instance_admin_client = api + cluster._instance._client = client + cluster._instance._client.instance_admin_client.instance_path.return_value = ( + name + ) + client._instance_admin_client.create_cluster.return_value = response_pb + # Perform the method and check the result. + cluster.create() + + actual_request = client._instance_admin_client.create_cluster.call_args_list[ + 0 + ].kwargs + self.assertEqual(actual_request, expected_request) + def test_update(self): import datetime from google.longrunning import operations_pb2 diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index c521191920ae..ccb8350a30a2 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -534,6 +534,87 @@ def test_get_cluster_states(self): result = table.get_cluster_states() self.assertEqual(result, expected_result) + def test_get_encryption_info(self): + from google.rpc.code_pb2 import Code + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) + from google.cloud.bigtable.encryption_info import EncryptionInfo + from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto + from google.cloud.bigtable.error import Status + + ENCRYPTION_TYPE_UNSPECIFIED = ( + enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED + ) + GOOGLE_DEFAULT_ENCRYPTION = enum_crypto.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + CUSTOMER_MANAGED_ENCRYPTION = ( + enum_crypto.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) + + table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + credentials = _make_credentials() + client = self._make_client( + project="project-id", credentials=credentials, admin=True + ) + instance = client.instance(instance_id=self.INSTANCE_ID) + table = self._make_one(self.TABLE_ID, instance) + + response_pb = _TablePB( + cluster_states={ + "cluster-id1": _ClusterStateEncryptionInfoPB( + encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, + encryption_status=_StatusPB(Code.OK, "Status OK"), + ), + "cluster-id2": _ClusterStateEncryptionInfoPB( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + ), + "cluster-id3": _ClusterStateEncryptionInfoPB( + encryption_type=CUSTOMER_MANAGED_ENCRYPTION, + encryption_status=_StatusPB( + Code.UNKNOWN, "Key version is not yet known." + ), + kms_key_version="UNKNOWN", + ), + } + ) + + # Patch the stub used by the API method. + client._table_admin_client = table_api + bigtable_table_stub = client._table_admin_client + + bigtable_table_stub.get_table.side_effect = [response_pb] + + # build expected result + expected_result = { + "cluster-id1": ( + EncryptionInfo( + encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, + encryption_status=Status(_StatusPB(Code.OK, "Status OK")), + kms_key_version="", + ), + ), + "cluster-id2": ( + EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=Status(_StatusPB(0, "")), + kms_key_version="", + ), + ), + "cluster-id3": ( + EncryptionInfo( + encryption_type=CUSTOMER_MANAGED_ENCRYPTION, + encryption_status=Status( + _StatusPB(Code.UNKNOWN, "Key version is not yet known.") + ), + kms_key_version="UNKNOWN", + ), + ), + } + + # Perform the method and check the result. + result = table.get_encryption_info() + self.assertEqual(result, expected_result) + def _read_row_helper(self, chunks, expected_result, app_profile_id=None): from google.cloud._testing import _Monkey @@ -2257,5 +2338,31 @@ def _ClusterStatePB(replication_state): return table_v2_pb2.Table.ClusterState(replication_state=replication_state) +def _ClusterStateEncryptionInfoPB( + encryption_type, encryption_status=None, kms_key_version=None +): + from google.cloud.bigtable_admin_v2.types import table as table_v2_pb2 + + return table_v2_pb2.Table.ClusterState( + encryption_info=( + table_v2_pb2.EncryptionInfo( + encryption_type=encryption_type, + encryption_status=encryption_status, + kms_key_version=kms_key_version, + ), + ) + ) + + +def _StatusPB(code, message): + from google.rpc import status_pb2 + + status_pb = status_pb2.Status() + status_pb.code = code + status_pb.message = message + + return status_pb + + def _read_rows_retry_exception(exc): return isinstance(exc, DeadlineExceeded) From d4a2bcda035bc2bd8476f334c484e101edf571f5 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Mon, 19 Apr 2021 13:06:10 -0700 Subject: [PATCH 431/892] build: use PyPI API token in secret manager (#291) * docs(python): add empty lines between methods Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Wed Apr 14 14:41:09 2021 -0600 Source-Repo: googleapis/synthtool Source-Sha: 721339ab60a6eb63b889978b3d9b295dcb3be370 Source-Link: https://github.com/googleapis/synthtool/commit/721339ab60a6eb63b889978b3d9b295dcb3be370 * build: use PyPI API token in secret manager Migrate python libraries onto the PyPI API token stored in secret manager. A PyPI API token is limited in scope to uploading new releases. https://pypi.org/help/#apitoken Verified that this works with [build](https://fusion2.corp.google.com/invocations/14bae126-83fa-4328-8da9-d390ed99315c/targets/cloud-devrel%2Fclient-libraries%2Fpython%2Fgoogleapis%2Fpython-vision%2Frelease%2Frelease;config=default/log) on https://github.com/googleapis/python-vision/pull/136 Source-Author: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Source-Date: Wed Apr 14 17:46:06 2021 -0600 Source-Repo: googleapis/synthtool Source-Sha: 043cc620d6a6111816d9e09f2a97208565fde958 Source-Link: https://github.com/googleapis/synthtool/commit/043cc620d6a6111816d9e09f2a97208565fde958 --- packages/google-cloud-bigtable/.kokoro/release.sh | 4 ++-- .../.kokoro/release/common.cfg | 14 ++------------ .../google-cloud-bigtable/docs/_static/custom.css | 13 ++++++++++++- packages/google-cloud-bigtable/synth.metadata | 6 +++--- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index 102d0ba6d06c..d3ffac5f677c 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -26,7 +26,7 @@ python3 -m pip install --upgrade twine wheel setuptools export PYTHONUNBUFFERED=1 # Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google_cloud_pypi_password") +TWINE_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secret_manager/google-cloud-pypi-token") cd github/python-bigtable python3 setup.py sdist bdist_wheel -twine upload --username gcloudpypi --password "${TWINE_PASSWORD}" dist/* +twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg index ceb054317811..d964a8f0692d 100644 --- a/packages/google-cloud-bigtable/.kokoro/release/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/release/common.cfg @@ -23,18 +23,8 @@ env_vars: { value: "github/python-bigtable/.kokoro/release.sh" } -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google_cloud_pypi_password" - } - } -} - # Tokens needed to report release status back to GitHub env_vars: { key: "SECRET_MANAGER_KEYS" - value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" -} \ No newline at end of file + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem,google-cloud-pypi-token" +} diff --git a/packages/google-cloud-bigtable/docs/_static/custom.css b/packages/google-cloud-bigtable/docs/_static/custom.css index bcd37bbd3c4a..b0a295464b23 100644 --- a/packages/google-cloud-bigtable/docs/_static/custom.css +++ b/packages/google-cloud-bigtable/docs/_static/custom.css @@ -1,9 +1,20 @@ div#python2-eol { border-color: red; border-width: medium; -} +} /* Ensure minimum width for 'Parameters' / 'Returns' column */ dl.field-list > dt { min-width: 100px } + +/* Insert space between methods for readability */ +dl.method { + padding-top: 10px; + padding-bottom: 10px +} + +/* Insert empty space between classes */ +dl.class { + padding-bottom: 50px +} diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata index 7607fcf6d2ae..e4171541fecb 100644 --- a/packages/google-cloud-bigtable/synth.metadata +++ b/packages/google-cloud-bigtable/synth.metadata @@ -4,7 +4,7 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "990c26d3dce9da83f385cad1b0094eed0c597aa4" + "sha": "39010afb8212bc21ec4538dadc164c4b4f30f5b8" } }, { @@ -19,14 +19,14 @@ "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "0a071b3460344886297a304253bf924aa68ddb7e" + "sha": "043cc620d6a6111816d9e09f2a97208565fde958" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "0a071b3460344886297a304253bf924aa68ddb7e" + "sha": "043cc620d6a6111816d9e09f2a97208565fde958" } } ], From 9d227cb20919ee6cb32b357227c3fdc461e835b1 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 19 Apr 2021 22:06:35 +0200 Subject: [PATCH 432/892] chore(deps): update dependency pytest to v6.2.3 (#289) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 7e460c8c866e..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==6.0.1 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 470dbe7c16d9..86e32d682c49 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==5.3.2 +pytest==6.2.3 mock==3.0.5 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 8855f3cf1f88..93f50ad13971 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.10.0 -pytest==5.3.2 +pytest==6.2.3 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 781d4326c947..f7e3ec09da60 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1 +1 @@ -pytest==5.3.2 +pytest==6.2.3 From c5b09154071a66fb1394f48ff8e8a433964598f8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 19 Apr 2021 22:20:04 +0200 Subject: [PATCH 433/892] chore(deps): update dependency mock to v4 (#288) [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | |---|---|---|---|---|---| | [mock](http://mock.readthedocs.org/en/latest/) | `==3.0.5` -> `==4.0.3` | [![age](https://badges.renovateapi.com/packages/pypi/mock/4.0.3/age-slim)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://badges.renovateapi.com/packages/pypi/mock/4.0.3/adoption-slim)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://badges.renovateapi.com/packages/pypi/mock/4.0.3/compatibility-slim/3.0.5)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://badges.renovateapi.com/packages/pypi/mock/4.0.3/confidence-slim/3.0.5)](https://docs.renovatebot.com/merge-confidence/) | --- ### Configuration :date: **Schedule**: At any time (no schedule defined). :vertical_traffic_light: **Automerge**: Disabled by config. Please merge this manually once you are satisfied. :recycle: **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. :no_bell: **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/python-bigtable). --- .../samples/metricscaler/requirements-test.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 86e32d682c49..f5fc801aa064 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ pytest==6.2.3 -mock==3.0.5 +mock==4.0.3 google-cloud-testutils From 52eaecaa1495497d5c8deffcb11caec02f8c1a06 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Wed, 21 Apr 2021 16:40:02 -0400 Subject: [PATCH 434/892] chore(revert): Revert "chore: prevent normalization of semver versioning (#292)" (#296) This reverts commit 05bc9aa0 --- packages/google-cloud-bigtable/setup.py | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index f75c3a004af3..7a19c96d2671 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -17,20 +17,6 @@ import setuptools -# Disable version normalization performed by setuptools.setup() -try: - # Try the approach of using sic(), added in setuptools 46.1.0 - from setuptools import sic -except ImportError: - # Try the approach of replacing packaging.version.Version - sic = lambda v: v - try: - # setuptools >=39.0.0 uses packaging from setuptools.extern - from setuptools.extern import packaging - except ImportError: - # setuptools <39.0.0 uses packaging from pkg_resources.extern - from pkg_resources.extern import packaging - packaging.version.Version = packaging.version.LegacyVersion # Package metadata. @@ -76,7 +62,7 @@ setuptools.setup( name=name, - version=sic(version), + version=version, description=description, long_description=readme, author="Google LLC", From b0340eb29ccc1169cf2585d0406435cbb893a014 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 21 Apr 2021 20:54:02 +0000 Subject: [PATCH 435/892] chore: release 2.1.0 (#294) :robot: I have created a release \*beep\* \*boop\* --- ## [2.1.0](https://www.github.com/googleapis/python-bigtable/compare/v2.0.0...v2.1.0) (2021-04-21) ### Features * customer managed keys (CMEK) ([#249](https://www.github.com/googleapis/python-bigtable/issues/249)) ([93df829](https://www.github.com/googleapis/python-bigtable/commit/93df82998cc0218cbc4a1bc2ab41a48b7478758d)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 5bd37baf1f16..ae316609e7f3 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.1.0](https://www.github.com/googleapis/python-bigtable/compare/v2.0.0...v2.1.0) (2021-04-21) + + +### Features + +* customer managed keys (CMEK) ([#249](https://www.github.com/googleapis/python-bigtable/issues/249)) ([93df829](https://www.github.com/googleapis/python-bigtable/commit/93df82998cc0218cbc4a1bc2ab41a48b7478758d)) + ## [2.0.0](https://www.github.com/googleapis/python-bigtable/compare/v1.7.0...v2.0.0) (2021-04-06) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 7a19c96d2671..fe2d6af2ed25 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.0.0" +version = "2.1.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 2a7f91b1f97288329d44b216b235422d49b57067 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 27 Apr 2021 21:14:07 +0200 Subject: [PATCH 436/892] chore(deps): update dependency apache-beam to v2.29.0 (#301) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 2e0625e13af1..78398e1f6a9a 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.28.0 +apache-beam==2.29.0 google-cloud-bigtable<2.0.0 google-cloud-core==1.6.0 \ No newline at end of file From 8f5e0564174e9e8fbc0a1626fce3f0d993450167 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Fri, 30 Apr 2021 12:48:02 -0400 Subject: [PATCH 437/892] feat: backup restore to different instance (#300) --- .../google/cloud/bigtable/backup.py | 29 +++++++++++++------ .../google-cloud-bigtable/tests/system.py | 23 ++++++++++++++- .../tests/unit/test_backup.py | 18 ++++++++++-- 3 files changed, 57 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 3666b7132520..564c97ad7a57 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -395,17 +395,25 @@ def delete(self): request={"name": self.name} ) - def restore(self, table_id): + def restore(self, table_id, instance_id=None): """Creates a new Table by restoring from this Backup. The new Table - must be in the same Instance as the Instance containing the Backup. + can be created in the same Instance as the Instance containing the + Backup, or another Instance whose ID can be specified in the arguments. The returned Table ``long-running operation`` can be used to track the progress of the operation and to cancel it. The ``response`` type is ``Table``, if successful. + :type table_id: str :param table_id: The ID of the Table to create and restore to. This Table must not already exist. - :returns: An instance of - :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`. + + :type instance_id: str + :param instance_id: (Optional) The ID of the Instance to restore the + backup into, if different from the current one. + + :rtype: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` + :returns: A future to be used to poll the status of the 'restore' + request. :raises: google.api_core.exceptions.AlreadyExists: If the table already exists. @@ -416,12 +424,15 @@ def restore(self, table_id): :raises: ValueError: If the parameters are invalid. """ api = self._instance._client._table_admin_client + if instance_id: + parent = BigtableTableAdminClient.instance_path( + project=self._instance._client.project, instance=instance_id, + ) + else: + parent = self._instance.name + return api.restore_table( - request={ - "parent": self._instance.name, - "table_id": table_id, - "backup": self.name, - } + request={"parent": parent, "table_id": table_id, "backup": self.name} ) def get_iam_policy(self): diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 48f7e3bdfae5..9896154239ce 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -1119,12 +1119,33 @@ def test_backup(self): restored_table_id = "test-backup-table-restored" restored_table = Config.INSTANCE_DATA.table(restored_table_id) temp_table.restore( - restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id + restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id, ).result() tables = Config.INSTANCE_DATA.list_tables() self.assertIn(restored_table, tables) restored_table.delete() + # Testing `Backup.restore()` into a different instance: + # Setting up another instance... + alt_instance_id = "gcp-alt-" + UNIQUE_SUFFIX + alt_cluster_id = alt_instance_id + "-cluster" + alt_instance = Config.CLIENT.instance(alt_instance_id, labels=LABELS) + alt_cluster = alt_instance.cluster( + cluster_id=alt_cluster_id, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, + ) + if not Config.IN_EMULATOR: + alt_instance.create(clusters=[alt_cluster]).result(timeout=10) + + # Testing `restore()`... + temp_backup.restore(restored_table_id, alt_instance_id).result() + restored_table = alt_instance.table(restored_table_id) + self.assertIn(restored_table, alt_instance.list_tables()) + restored_table.delete() + + # Tearing down the resources... + if not Config.IN_EMULATOR: + retry_429(alt_instance.delete)() + class TestDataAPI(unittest.TestCase): @classmethod diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 0a5ba74c1189..49168e04eb86 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -32,6 +32,11 @@ class TestBackup(unittest.TestCase): BACKUP_ID = "backup-id" BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID + ALT_INSTANCE = "other-instance-id" + ALT_INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + ALT_INSTANCE + ALT_CLUSTER_NAME = ALT_INSTANCE_NAME + "/clusters/" + CLUSTER_ID + ALT_BACKUP_NAME = ALT_CLUSTER_NAME + "/backups/" + BACKUP_ID + @staticmethod def _get_target_class(): from google.cloud.bigtable.backup import Backup @@ -736,7 +741,7 @@ def test_restore_cluster_not_set(self): with self.assertRaises(ValueError): backup.restore(self.TABLE_ID) - def test_restore_success(self): + def _restore_helper(self, instance_id=None, instance_name=None): op_future = object() client = _Client() api = client._table_admin_client = self._make_table_admin_client() @@ -751,17 +756,24 @@ def test_restore_success(self): expire_time=timestamp, ) - future = backup.restore(self.TABLE_ID) + future = backup.restore(self.TABLE_ID, instance_id) self.assertEqual(backup._cluster, self.CLUSTER_ID) self.assertIs(future, op_future) api.restore_table.assert_called_once_with( request={ - "parent": self.INSTANCE_NAME, + "parent": instance_name or self.INSTANCE_NAME, "table_id": self.TABLE_ID, "backup": self.BACKUP_NAME, } ) + api.restore_table.reset_mock() + + def test_restore_default(self): + self._restore_helper() + + def test_restore_to_another_instance(self): + self._restore_helper(self.ALT_INSTANCE, self.ALT_INSTANCE_NAME) def test_get_iam_policy(self): from google.cloud.bigtable.client import Client From 44f51c28deb3cfa4efa693a39ddc2acff99f8321 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Fri, 30 Apr 2021 13:16:50 -0400 Subject: [PATCH 438/892] chore: release 2.2.0 (#303) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index ae316609e7f3..ac209648e5df 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.2.0](https://www.github.com/googleapis/python-bigtable/compare/v2.1.0...v2.2.0) (2021-04-30) + + +### Features + +* backup restore to different instance ([#300](https://www.github.com/googleapis/python-bigtable/issues/300)) ([049a25f](https://www.github.com/googleapis/python-bigtable/commit/049a25f903bb6b062e41430b6e7ce6d7b164f22c)) + ## [2.1.0](https://www.github.com/googleapis/python-bigtable/compare/v2.0.0...v2.1.0) (2021-04-21) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index fe2d6af2ed25..b37a44e490f3 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.1.0" +version = "2.2.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 26bc832b4e41174f3eb2928c4d0784c4aefa29d2 Mon Sep 17 00:00:00 2001 From: "google-cloud-policy-bot[bot]" <80869356+google-cloud-policy-bot[bot]@users.noreply.github.com> Date: Mon, 3 May 2021 17:16:05 +0000 Subject: [PATCH 439/892] chore: add SECURITY.md (#302) chore: add SECURITY.md --- packages/google-cloud-bigtable/SECURITY.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 packages/google-cloud-bigtable/SECURITY.md diff --git a/packages/google-cloud-bigtable/SECURITY.md b/packages/google-cloud-bigtable/SECURITY.md new file mode 100644 index 000000000000..8b58ae9c01ae --- /dev/null +++ b/packages/google-cloud-bigtable/SECURITY.md @@ -0,0 +1,7 @@ +# Security Policy + +To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). + +The Google Security Team will respond within 5 working days of your report on g.co/vulnz. + +We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. From 176ea7edf803f38636e3556c16c2dcffcd4366ab Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Mon, 3 May 2021 16:26:28 -0400 Subject: [PATCH 440/892] docs: add paramter mutation_timeout to instance.table docs (#305) --- .../google-cloud-bigtable/google/cloud/bigtable/instance.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 138d3bfc1423..e6e2ac027a3a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -643,6 +643,9 @@ def table(self, table_id, mutation_timeout=None, app_profile_id=None): :type table_id: str :param table_id: The ID of the table. + :type mutation_timeout: int + :param mutation_timeout: (Optional) The overriding mutation timeout. + :type app_profile_id: str :param app_profile_id: (Optional) The unique name of the AppProfile. From 9fb1745feb0d7f302a9ae266a9f092a3bb386bc7 Mon Sep 17 00:00:00 2001 From: Billy Jacobson Date: Tue, 4 May 2021 11:00:06 -0400 Subject: [PATCH 441/892] samples: update tests to use uuid for bigtable table ids (#304) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-bigtable/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes #195 🦕 --- .../samples/quickstart/main_test.py | 6 ++--- .../samples/quickstart_happybase/main_test.py | 6 ++--- .../samples/tableadmin/tableadmin_test.py | 27 +++++++++---------- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_test.py index 55c06f413df9..ea1e8776ba7a 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart/main_test.py @@ -13,7 +13,7 @@ # limitations under the License. import os -import random +import uuid from google.cloud import bigtable import pytest @@ -24,13 +24,11 @@ PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_FORMAT = 'quickstart-test-{}' -TABLE_ID_RANGE = 10000 @pytest.fixture() def table(): - table_id = TABLE_ID_FORMAT.format( - random.randrange(TABLE_ID_RANGE)) + table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) client = bigtable.Client(project=PROJECT, admin=True) instance = client.instance(BIGTABLE_INSTANCE) table = instance.table(table_id) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py index 5d4ae1e7acd7..26afa6d6bfe1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py @@ -13,7 +13,7 @@ # limitations under the License. import os -import random +import uuid from google.cloud import bigtable import pytest @@ -24,13 +24,11 @@ PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_FORMAT = 'quickstart-hb-test-{}' -TABLE_ID_RANGE = 10000 @pytest.fixture() def table(): - table_id = TABLE_ID_FORMAT.format( - random.randrange(TABLE_ID_RANGE)) + table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) client = bigtable.Client(project=PROJECT, admin=True) instance = client.instance(BIGTABLE_INSTANCE) table = instance.table(table_id) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py index 782f6b621677..c0ef09d12fae 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py @@ -14,7 +14,7 @@ # limitations under the License. import os -import random +import uuid from tableadmin import create_table from tableadmin import delete_table @@ -22,18 +22,16 @@ PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_NAME_FORMAT = 'tableadmin-test-{}' -TABLE_NAME_RANGE = 10000 +TABLE_ID_FORMAT = 'tableadmin-test-{}' def test_run_table_operations(capsys): - table_name = TABLE_NAME_FORMAT.format( - random.randrange(TABLE_NAME_RANGE)) + table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - run_table_operations(PROJECT, BIGTABLE_INSTANCE, table_name) + run_table_operations(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - assert 'Creating the ' + table_name + ' table.' in out + assert 'Creating the ' + table_id + ' table.' in out assert 'Listing tables in current project.' in out assert 'Creating column family cf1 with with MaxAge GC Rule' in out assert 'Created column family cf1 with MaxAge GC Rule.' in out @@ -50,17 +48,16 @@ def test_run_table_operations(capsys): assert 'Delete a column family cf2...' in out assert 'Column family cf2 deleted successfully.' in out - delete_table(PROJECT, BIGTABLE_INSTANCE, table_name) + delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) def test_delete_table(capsys): - table_name = TABLE_NAME_FORMAT.format( - random.randrange(TABLE_NAME_RANGE)) - create_table(PROJECT, BIGTABLE_INSTANCE, table_name) + table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) + create_table(PROJECT, BIGTABLE_INSTANCE, table_id) - delete_table(PROJECT, BIGTABLE_INSTANCE, table_name) + delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - assert 'Table ' + table_name + ' exists.' in out - assert 'Deleting ' + table_name + ' table.' in out - assert 'Deleted ' + table_name + ' table.' in out + assert 'Table ' + table_id + ' exists.' in out + assert 'Deleting ' + table_id + ' table.' in out + assert 'Deleted ' + table_id + ' table.' in out From 23f9745051fe138ddfed67964ef1cc16438c43a8 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 4 May 2021 23:02:44 +0200 Subject: [PATCH 442/892] chore(deps): update dependency pytest to v6.2.4 (#307) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index f5fc801aa064..7903fa1e1133 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==6.2.3 +pytest==6.2.4 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 93f50ad13971..766a8035d690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.10.0 -pytest==6.2.3 +pytest==6.2.4 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index f7e3ec09da60..95ea1e6a02b0 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.3 +pytest==6.2.4 From 890a74334a39e7266e0ca05ab149a93589359ceb Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Thu, 6 May 2021 12:56:59 -0400 Subject: [PATCH 443/892] test: add tests for instanceadmin sample (#290) * test: add tests for instanceadmin sample Closes #201. * lint * Fix CLI. * Remove anything to do with deprecated dev instances. * Make sure we don't litter our test project with instances. * Rearrange imports to make linter happy. --- .../samples/instanceadmin/instanceadmin.py | 487 ++++++++---------- .../instanceadmin/test_instanceadmin.py | 152 ++++++ 2 files changed, 380 insertions(+), 259 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py index 482806ac989a..13e992eec13d 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -1,259 +1,228 @@ -#!/usr/bin/env python - -# Copyright 2018, Google LLC -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Demonstrates how to connect to Cloud Bigtable and run some basic operations. -# http://www.apache.org/licenses/LICENSE-2.0 -Prerequisites: -- Create a Cloud Bigtable project. - https://cloud.google.com/bigtable/docs/ -- Set your Google Application Default Credentials. - https://developers.google.com/identity/protocols/application-default-credentials - -Operations performed: -- Create a Cloud Bigtable Instance. -- List Instance for a Cloud Bigtable. -- Delete a Cloud Bigtable Instance. -- Create a Cloud Bigtable Cluster. -- List Cloud Bigtable Clusters. -- Delete a Cloud Bigtable Cluster. -""" - -import argparse - -from google.cloud import bigtable -from google.cloud.bigtable import enums - - -def run_instance_operations(project_id, instance_id): - ''' Check Instance exists. - Creates a Production instance with default Cluster. - List instances in a project. - List clusters in an instance. - - :type project_id: str - :param project_id: Project id of the client. - - :type instance_id: str - :param instance_id: Instance of the client. - ''' - client = bigtable.Client(project=project_id, admin=True) - location_id = 'us-central1-f' - serve_nodes = 1 - storage_type = enums.StorageType.SSD - production = enums.Instance.Type.PRODUCTION - labels = {'prod-label': 'prod-label'} - instance = client.instance(instance_id, instance_type=production, - labels=labels) - - # [START bigtable_check_instance_exists] - if not instance.exists(): - print('Instance {} does not exists.'.format(instance_id)) - else: - print('Instance {} already exists.'.format(instance_id)) - # [END bigtable_check_instance_exists] - - # [START bigtable_create_prod_instance] - cluster = instance.cluster("ssd-cluster1", location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=storage_type) - if not instance.exists(): - print('\nCreating an Instance') - # Create instance with given options - instance.create(clusters=[cluster]) - print('\nCreated instance: {}'.format(instance_id)) - # [END bigtable_create_prod_instance] - - # [START bigtable_list_instances] - print('\nListing Instances:') - for instance_local in client.list_instances()[0]: - print(instance_local.instance_id) - # [END bigtable_list_instances] - - # [START bigtable_get_instance] - print('\nName of instance:{}\nLabels:{}'.format(instance.display_name, - instance.labels)) - # [END bigtable_get_instance] - - # [START bigtable_get_clusters] - print('\nListing Clusters...') - for cluster in instance.list_clusters()[0]: - print(cluster.cluster_id) - # [END bigtable_get_clusters] - - -def create_dev_instance(project_id, instance_id, cluster_id): - ''' Creates a Development instance with the name "hdd-instance" - location us-central1-f - Cluster nodes should not be set while creating Development - Instance - - :type project_id: str - :param project_id: Project id of the client. - - :type instance_id: str - :param instance_id: Instance of the client. - ''' - - client = bigtable.Client(project=project_id, admin=True) - - # [START bigtable_create_dev_instance] - print('\nCreating a DEVELOPMENT Instance') - # Set options to create an Instance - location_id = 'us-central1-f' - development = enums.Instance.Type.DEVELOPMENT - storage_type = enums.StorageType.HDD - labels = {'dev-label': 'dev-label'} - - # Create instance with given options - instance = client.instance(instance_id, instance_type=development, - labels=labels) - cluster = instance.cluster(cluster_id, location_id=location_id, - default_storage_type=storage_type) - - # Create development instance with given options - if not instance.exists(): - instance.create(clusters=[cluster]) - print('Created development instance: {}'.format(instance_id)) - else: - print('Instance {} already exists.'.format(instance_id)) - - # [END bigtable_create_dev_instance] - - -def delete_instance(project_id, instance_id): - ''' Delete the Instance - - :type project_id: str - :param project_id: Project id of the client. - - :type instance_id: str - :param instance_id: Instance of the client. - ''' - - client = bigtable.Client(project=project_id, admin=True) - instance = client.instance(instance_id) - # [START bigtable_delete_instance] - print('\nDeleting Instance') - if not instance.exists(): - print('Instance {} does not exists.'.format(instance_id)) - else: - instance.delete() - print('Deleted Instance: {}'.format(instance_id)) - # [END bigtable_delete_instance] - - -def add_cluster(project_id, instance_id, cluster_id): - ''' Add Cluster - - :type project_id: str - :param project_id: Project id of the client. - - :type instance_id: str - :param instance_id: Instance of the client. - - :type cluster_id: str - :param cluster_id: Cluster id. - ''' - client = bigtable.Client(project=project_id, admin=True) - instance = client.instance(instance_id) - - location_id = 'us-central1-a' - serve_nodes = 1 - storage_type = enums.StorageType.SSD - - if not instance.exists(): - print('Instance {} does not exists.'.format(instance_id)) - else: - print('\nAdding Cluster to Instance {}'.format(instance_id)) - # [START bigtable_create_cluster] - print('\nListing Clusters...') - for cluster in instance.list_clusters()[0]: - print(cluster.cluster_id) - cluster = instance.cluster(cluster_id, location_id=location_id, - serve_nodes=serve_nodes, - default_storage_type=storage_type) - if cluster.exists(): - print( - '\nCluster not created, as {} already exists.'. - format(cluster_id) - ) - else: - cluster.create() - print('\nCluster created: {}'.format(cluster_id)) - # [END bigtable_create_cluster] - - -def delete_cluster(project_id, instance_id, cluster_id): - ''' Delete the cluster - - :type project_id: str - :param project_id: Project id of the client. - - :type instance_id: str - :param instance_id: Instance of the client. - - :type cluster_id: str - :param cluster_id: Cluster id. - ''' - - client = bigtable.Client(project=project_id, admin=True) - instance = client.instance(instance_id) - cluster = instance.cluster(cluster_id) - - # [START bigtable_delete_cluster] - print('\nDeleting Cluster') - if cluster.exists(): - cluster.delete() - print('Cluster deleted: {}'.format(cluster_id)) - else: - print('\nCluster {} does not exist.'.format(cluster_id)) - - # [END bigtable_delete_cluster] - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument('command', - help='run, dev-instance, del-instance, \ - add-cluster or del-cluster. \ - Operation to perform on Instance.') - parser.add_argument('project_id', - help='Your Cloud Platform project ID.') - parser.add_argument('instance_id', - help='ID of the Cloud Bigtable instance to \ - connect to.') - parser.add_argument('cluster_id', - help='ID of the Cloud Bigtable cluster to \ - connect to.') - - args = parser.parse_args() - - if args.command.lower() == 'run': - run_instance_operations(args.project_id, args.instance_id) - elif args.command.lower() == 'dev-instance': - create_dev_instance(args.project_id, args.instance_id, - args.cluster_id) - elif args.command.lower() == 'del-instance': - delete_instance(args.project_id, args.instance_id) - elif args.command.lower() == 'add-cluster': - add_cluster(args.project_id, args.instance_id, args.cluster_id) - elif args.command.lower() == 'del-cluster': - delete_cluster(args.project_id, args.instance_id, args.cluster_id) - else: - print('Command should be either run \n Use argument -h, \ - --help to show help and exit.') +#!/usr/bin/env python + +# Copyright 2018, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates how to connect to Cloud Bigtable and run some basic operations. +# http://www.apache.org/licenses/LICENSE-2.0 +Prerequisites: +- Create a Cloud Bigtable project. + https://cloud.google.com/bigtable/docs/ +- Set your Google Application Default Credentials. + https://developers.google.com/identity/protocols/application-default-credentials + +Operations performed: +- Create a Cloud Bigtable Instance. +- List Instance for a Cloud Bigtable. +- Delete a Cloud Bigtable Instance. +- Create a Cloud Bigtable Cluster. +- List Cloud Bigtable Clusters. +- Delete a Cloud Bigtable Cluster. +""" + +import argparse + +from google.cloud import bigtable +from google.cloud.bigtable import enums + + +def run_instance_operations(project_id, instance_id, cluster_id): + """Check Instance exists. + Creates a Production instance with default Cluster. + List instances in a project. + List clusters in an instance. + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + """ + client = bigtable.Client(project=project_id, admin=True) + location_id = "us-central1-f" + serve_nodes = 1 + storage_type = enums.StorageType.SSD + labels = {"prod-label": "prod-label"} + instance = client.instance(instance_id, labels=labels) + + # [START bigtable_check_instance_exists] + if not instance.exists(): + print("Instance {} does not exist.".format(instance_id)) + else: + print("Instance {} already exists.".format(instance_id)) + # [END bigtable_check_instance_exists] + + # [START bigtable_create_prod_instance] + cluster = instance.cluster( + cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type, + ) + if not instance.exists(): + print("\nCreating an instance") + # Create instance with given options + instance.create(clusters=[cluster]) + print("\nCreated instance: {}".format(instance_id)) + # [END bigtable_create_prod_instance] + + # [START bigtable_list_instances] + print("\nListing instances:") + for instance_local in client.list_instances()[0]: + print(instance_local.instance_id) + # [END bigtable_list_instances] + + # [START bigtable_get_instance] + print( + "\nName of instance: {}\nLabels: {}".format( + instance.display_name, instance.labels + ) + ) + # [END bigtable_get_instance] + + # [START bigtable_get_clusters] + print("\nListing clusters...") + for cluster in instance.list_clusters()[0]: + print(cluster.cluster_id) + # [END bigtable_get_clusters] + + +def delete_instance(project_id, instance_id): + """Delete the Instance + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + """ + + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + # [START bigtable_delete_instance] + print("\nDeleting instance") + if not instance.exists(): + print("Instance {} does not exist.".format(instance_id)) + else: + instance.delete() + print("Deleted instance: {}".format(instance_id)) + # [END bigtable_delete_instance] + + +def add_cluster(project_id, instance_id, cluster_id): + """Add Cluster + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + + :type cluster_id: str + :param cluster_id: Cluster id. + """ + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + + location_id = "us-central1-a" + serve_nodes = 1 + storage_type = enums.StorageType.SSD + + if not instance.exists(): + print("Instance {} does not exist.".format(instance_id)) + else: + print("\nAdding cluster to instance {}".format(instance_id)) + # [START bigtable_create_cluster] + print("\nListing clusters...") + for cluster in instance.list_clusters()[0]: + print(cluster.cluster_id) + cluster = instance.cluster( + cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=storage_type, + ) + if cluster.exists(): + print("\nCluster not created, as {} already exists.".format(cluster_id)) + else: + cluster.create() + print("\nCluster created: {}".format(cluster_id)) + # [END bigtable_create_cluster] + + +def delete_cluster(project_id, instance_id, cluster_id): + """Delete the cluster + + :type project_id: str + :param project_id: Project id of the client. + + :type instance_id: str + :param instance_id: Instance of the client. + + :type cluster_id: str + :param cluster_id: Cluster id. + """ + + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + cluster = instance.cluster(cluster_id) + + # [START bigtable_delete_cluster] + print("\nDeleting cluster") + if cluster.exists(): + cluster.delete() + print("Cluster deleted: {}".format(cluster_id)) + else: + print("\nCluster {} does not exist.".format(cluster_id)) + + # [END bigtable_delete_cluster] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + "command", + help="run, del-instance, \ + add-cluster or del-cluster. \ + Operation to perform on Instance.", + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") + parser.add_argument( + "instance_id", + help="ID of the Cloud Bigtable instance to \ + connect to.", + ) + parser.add_argument( + "cluster_id", + help="ID of the Cloud Bigtable cluster to \ + connect to.", + ) + + args = parser.parse_args() + + if args.command.lower() == "run": + run_instance_operations(args.project_id, args.instance_id, args.cluster_id) + elif args.command.lower() == "del-instance": + delete_instance(args.project_id, args.instance_id) + elif args.command.lower() == "add-cluster": + add_cluster(args.project_id, args.instance_id, args.cluster_id) + elif args.command.lower() == "del-cluster": + delete_cluster(args.project_id, args.instance_id, args.cluster_id) + else: + print( + "Command should be either run \n Use argument -h, \ + --help to show help and exit." + ) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py new file mode 100644 index 000000000000..a89ebc765f2e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py @@ -0,0 +1,152 @@ +# Copyright 2018 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random +import warnings + +from google.cloud import bigtable + +import pytest + +import instanceadmin + + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +ID_FORMAT = "instanceadmin-test-{:06}" +ID_RANGE = 1000000 + +INSTANCE = ID_FORMAT.format(random.randrange(ID_RANGE)) +CLUSTER1 = ID_FORMAT.format(random.randrange(ID_RANGE)) +CLUSTER2 = ID_FORMAT.format(random.randrange(ID_RANGE)) + + +@pytest.fixture(scope="module", autouse=True) +def preclean(): + """In case any test instances weren't cleared out in a previous run.""" + client = bigtable.Client(project=PROJECT, admin=True) + for instance in client.list_instances()[0]: + if instance.instance_id.startswith("instanceadmin-test-"): + warnings.warn(f"Deleting leftover test instance: {instance.instance_id}") + instance.delete() + + +@pytest.fixture +def dispose_of(): + instances = [] + + def disposal(instance): + instances.append(instance) + + yield disposal + + client = bigtable.Client(project=PROJECT, admin=True) + for instance_id in instances: + instance = client.instance(instance_id) + if instance.exists(): + instanceadmin.delete_instance(PROJECT, INSTANCE) + + +def test_run_instance_operations(capsys, dispose_of): + dispose_of(INSTANCE) + + instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) + out = capsys.readouterr().out + assert f"Instance {INSTANCE} does not exist." in out + assert "Creating an instance" in out + assert f"Created instance: {INSTANCE}" in out + assert "Listing instances" in out + assert f"\n{INSTANCE}\n" in out + assert f"Name of instance: {INSTANCE}" in out + assert "Labels: {'prod-label': 'prod-label'}" in out + assert "Listing clusters..." in out + assert f"\n{CLUSTER1}\n" in out + + instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) + out = capsys.readouterr().out + assert f"Instance {INSTANCE} already exists." in out + assert "Listing instances" in out + assert f"\n{INSTANCE}\n" in out + assert f"Name of instance: {INSTANCE}" in out + assert "Labels: {'prod-label': 'prod-label'}" in out + assert "Listing clusters..." in out + assert f"\n{CLUSTER1}\n" in out + + +def test_delete_instance(capsys, dispose_of): + dispose_of(INSTANCE) + + # Can't delete it, it doesn't exist + instanceadmin.delete_instance(PROJECT, INSTANCE) + out = capsys.readouterr().out + assert "Deleting instance" in out + assert f"Instance {INSTANCE} does not exist" in out + + # Ok, create it then + instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) + capsys.readouterr() # throw away output + + # Now delete it + instanceadmin.delete_instance(PROJECT, INSTANCE) + out = capsys.readouterr().out + assert "Deleting instance" in out + assert f"Deleted instance: {INSTANCE}" in out + + +def test_add_and_delete_cluster(capsys, dispose_of): + dispose_of(INSTANCE) + + # This won't work, because the instance isn't created yet + instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2) + out = capsys.readouterr().out + assert f"Instance {INSTANCE} does not exist" in out + + # Get the instance created + instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) + capsys.readouterr() # throw away output + + # Add a cluster to that instance + instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2) + out = capsys.readouterr().out + assert f"Adding cluster to instance {INSTANCE}" in out + assert "Listing clusters..." in out + assert f"\n{CLUSTER1}\n" in out + assert f"Cluster created: {CLUSTER2}" in out + + # Try to add the same cluster again, won't work + instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2) + out = capsys.readouterr().out + assert "Listing clusters..." in out + assert f"\n{CLUSTER1}\n" in out + assert f"\n{CLUSTER2}\n" in out + assert f"Cluster not created, as {CLUSTER2} already exists." + + # Now delete it + instanceadmin.delete_cluster(PROJECT, INSTANCE, CLUSTER2) + out = capsys.readouterr().out + assert "Deleting cluster" in out + assert f"Cluster deleted: {CLUSTER2}" in out + + # Verify deletion + instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) + out = capsys.readouterr().out + assert "Listing clusters..." in out + assert f"\n{CLUSTER1}\n" in out + assert f"\n{CLUSTER2}\n" not in out + + # Try deleting it again, for fun (and coverage) + instanceadmin.delete_cluster(PROJECT, INSTANCE, CLUSTER2) + out = capsys.readouterr().out + assert "Deleting cluster" in out + assert f"Cluster {CLUSTER2} does not exist" in out From 72b30331fd6c1a2ab8290db7895705442d7f4034 Mon Sep 17 00:00:00 2001 From: Chris Rossi Date: Tue, 11 May 2021 15:31:49 -0400 Subject: [PATCH 444/892] test: only cleanup stale instances if more than an hour old (#309) A timestamp is now embedded in the test instance id in order to determine when a test instance is created. Test instances more than an hour old are considered stale and removed. --- .../instanceadmin/test_instanceadmin.py | 31 +++++++++++++------ 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py index a89ebc765f2e..c63222953cf3 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py @@ -14,6 +14,7 @@ import os import random +import time import warnings from google.cloud import bigtable @@ -24,22 +25,32 @@ PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] -ID_FORMAT = "instanceadmin-test-{:06}" -ID_RANGE = 1000000 +INSTANCE_ID_FORMAT = "instanceadmin-{:03}-{}" +CLUSTER_ID_FORMAT = "instanceadmin-{:03}" +ID_RANGE = 1000 -INSTANCE = ID_FORMAT.format(random.randrange(ID_RANGE)) -CLUSTER1 = ID_FORMAT.format(random.randrange(ID_RANGE)) -CLUSTER2 = ID_FORMAT.format(random.randrange(ID_RANGE)) +INSTANCE = INSTANCE_ID_FORMAT.format(random.randrange(ID_RANGE), int(time.time())) +CLUSTER1 = CLUSTER_ID_FORMAT.format(random.randrange(ID_RANGE)) +CLUSTER2 = CLUSTER_ID_FORMAT.format(random.randrange(ID_RANGE)) @pytest.fixture(scope="module", autouse=True) def preclean(): - """In case any test instances weren't cleared out in a previous run.""" + """In case any test instances weren't cleared out in a previous run. + + Deletes any test instances that were created over an hour ago. Newer instances may + be being used by a concurrent test run. + """ client = bigtable.Client(project=PROJECT, admin=True) for instance in client.list_instances()[0]: - if instance.instance_id.startswith("instanceadmin-test-"): - warnings.warn(f"Deleting leftover test instance: {instance.instance_id}") - instance.delete() + if instance.instance_id.startswith("instanceadmin-"): + timestamp = instance.instance_id.split("-")[-1] + timestamp = int(timestamp) + if time.time() - timestamp > 3600: + warnings.warn( + f"Deleting leftover test instance: {instance.instance_id}" + ) + instance.delete() @pytest.fixture @@ -55,7 +66,7 @@ def disposal(instance): for instance_id in instances: instance = client.instance(instance_id) if instance.exists(): - instanceadmin.delete_instance(PROJECT, INSTANCE) + instance.delete() def test_run_instance_operations(capsys, dispose_of): From ecfcc7ee0dce6d4ba66a51243889db5e14b7b168 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 11 May 2021 17:08:02 -0400 Subject: [PATCH 445/892] chore: add library type to .repo-metadata.json (#312) --- packages/google-cloud-bigtable/.repo-metadata.json | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json index 33b5c73a3fae..883fd57c0ce0 100644 --- a/packages/google-cloud-bigtable/.repo-metadata.json +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -6,6 +6,7 @@ "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", "release_level": "ga", "language": "python", + "library_type": "GAPIC_COMBO", "repo": "googleapis/python-bigtable", "distribution_name": "google-cloud-bigtable", "api_id": "bigtable.googleapis.com", From a0150d0a22a72ef28b767959eed382c9a82c9580 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Thu, 13 May 2021 13:23:41 -0400 Subject: [PATCH 446/892] chore: migrate to owl bot (#313) * chore: migrate to owl bot * chore: copy files from googleapis-gen a21f1091413a260393548c1b2ac44b7347923f08 * chore: run the post processor * Remove outdated comment in noxfile.py --- .../.github/.OwlBot.lock.yaml | 4 + .../.github/.OwlBot.yaml | 28 +++ .../.pre-commit-config.yaml | 2 +- .../google-cloud-bigtable/CONTRIBUTING.rst | 16 +- packages/google-cloud-bigtable/noxfile.py | 6 +- .../{synth.py => owlbot.py} | 41 ++-- packages/google-cloud-bigtable/synth.metadata | 187 ------------------ 7 files changed, 52 insertions(+), 232 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml create mode 100644 packages/google-cloud-bigtable/.github/.OwlBot.yaml rename packages/google-cloud-bigtable/{synth.py => owlbot.py} (63%) delete mode 100644 packages/google-cloud-bigtable/synth.metadata diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml new file mode 100644 index 000000000000..d49860b32e70 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -0,0 +1,4 @@ +docker: + digest: sha256:457583330eec64daa02aeb7a72a04d33e7be2428f646671ce4045dcbc0191b1e + image: gcr.io/repo-automation-bots/owlbot-python:latest + diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.yaml new file mode 100644 index 000000000000..7a10c025ca9d --- /dev/null +++ b/packages/google-cloud-bigtable/.github/.OwlBot.yaml @@ -0,0 +1,28 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker: + image: gcr.io/repo-automation-bots/owlbot-python:latest + +deep-remove-regex: + - /owl-bot-staging + +deep-copy-regex: + - source: /google/bigtable/admin/(v.*)/.*-py/(.*) + dest: /owl-bot-staging/admin/$1/$2 + - source: /google/bigtable/(v.*)/.*-py/(.*) + dest: /owl-bot-staging/$1/$2 + +begin-after-commit-hash: a21f1091413a260393548c1b2ac44b7347923f08 + diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 8912e9b5d7d7..1bbd787833ec 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -26,6 +26,6 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.0 + rev: 3.9.1 hooks: - id: flake8 diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index d68622f60b2a..5437a1b5bb2f 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -160,21 +160,7 @@ Running System Tests auth settings and change some configuration in your project to run all the tests. -- System tests will be run against an actual project and - so you'll need to provide some environment variables to facilitate - authentication to your project: - - - ``GOOGLE_APPLICATION_CREDENTIALS``: The path to a JSON key file; - Such a file can be downloaded directly from the developer's console by clicking - "Generate new JSON key". See private key - `docs `__ - for more details. - -- Once you have downloaded your json keys, set the environment variable - ``GOOGLE_APPLICATION_CREDENTIALS`` to the absolute path of the json file:: - - $ export GOOGLE_APPLICATION_CREDENTIALS="/Users//path/to/app_credentials.json" - +- System tests will be run against an actual project. You should use local credentials from gcloud when possible. See `Best practices for application authentication `__. Some tests require a service account. For those tests see `Authenticating as a service account `__. ************* Test Coverage diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 3557f6615ebd..e74321f4943e 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -57,15 +57,11 @@ def lint(session): session.run("flake8", "google", "tests") -@nox.session(python="3.6") +@nox.session(python=DEFAULT_PYTHON_VERSION) def blacken(session): """Run black. Format code to uniform standard. - - This currently uses Python 3.6 due to the automated Kokoro run of synthtool. - That run uses an image that doesn't have 3.6 installed. Before updating this - check the state of the `gcp_ubuntu_config` we use for that Kokoro run. """ session.install(BLACK_VERSION) session.run( diff --git a/packages/google-cloud-bigtable/synth.py b/packages/google-cloud-bigtable/owlbot.py similarity index 63% rename from packages/google-cloud-bigtable/synth.py rename to packages/google-cloud-bigtable/owlbot.py index 500f95321ff2..0102060b664e 100644 --- a/packages/google-cloud-bigtable/synth.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -18,34 +18,26 @@ from synthtool import gcp from synthtool.languages import python -gapic = gcp.GAPICBazel() common = gcp.CommonTemplates() -# ---------------------------------------------------------------------------- -# Generate bigtable and bigtable_admin GAPIC layer -# ---------------------------------------------------------------------------- -library = gapic.py_library( - service="bigtable", - version="v2", - bazel_target="//google/bigtable/v2:bigtable-v2-py", - include_protos=True, -) +# This library ships clients for two different APIs, +# BigTable and BigTable Admin +bigtable_default_version = "v2" +bigtable_admin_default_version = "v2" -s.move(library / "google/cloud/bigtable_v2") -s.move(library / "tests") -s.move(library / "scripts") +for library in s.get_staging_dirs(bigtable_default_version): + s.move(library / "google/cloud/bigtable_v*") + s.move(library / "tests") + s.move(library / "scripts") -# Generate admin client -library = gapic.py_library( - service="bigtable_admin", - version="v2", - bazel_target="//google/bigtable/admin/v2:bigtable-admin-v2-py", - include_protos=True, -) +s.remove_staging_dirs() + +for library in s.get_staging_dirs(bigtable_admin_default_version): + s.move(library / "google/cloud/bigtable_admin_v*") + s.move(library / "tests") + s.move(library / "scripts") -s.move(library / "google/cloud/bigtable_admin_v2") -s.move(library / "tests") -s.move(library / "scripts") +s.remove_staging_dirs() # ---------------------------------------------------------------------------- # Add templated files @@ -55,6 +47,7 @@ microgenerator=True, cov_level=99 ) + s.move(templated_files, excludes=[".coveragerc", "noxfile.py"]) # ---------------------------------------------------------------------------- @@ -66,4 +59,4 @@ s.move(path, excludes=['noxfile.py']) -s.shell.run(["nox", "-s", "blacken"], hide_output=False) +s.shell.run(["nox", "-s", "blacken"], hide_output=False) \ No newline at end of file diff --git a/packages/google-cloud-bigtable/synth.metadata b/packages/google-cloud-bigtable/synth.metadata deleted file mode 100644 index e4171541fecb..000000000000 --- a/packages/google-cloud-bigtable/synth.metadata +++ /dev/null @@ -1,187 +0,0 @@ -{ - "sources": [ - { - "git": { - "name": ".", - "remote": "https://github.com/googleapis/python-bigtable.git", - "sha": "39010afb8212bc21ec4538dadc164c4b4f30f5b8" - } - }, - { - "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "95dd24960cf9f794ef583e59ad9f1fabe1c4a924", - "internalRef": "365882072" - } - }, - { - "git": { - "name": "synthtool", - "remote": "https://github.com/googleapis/synthtool.git", - "sha": "043cc620d6a6111816d9e09f2a97208565fde958" - } - }, - { - "git": { - "name": "synthtool", - "remote": "https://github.com/googleapis/synthtool.git", - "sha": "043cc620d6a6111816d9e09f2a97208565fde958" - } - } - ], - "destinations": [ - { - "client": { - "source": "googleapis", - "apiName": "bigtable", - "apiVersion": "v2", - "language": "python", - "generator": "bazel" - } - }, - { - "client": { - "source": "googleapis", - "apiName": "bigtable_admin", - "apiVersion": "v2", - "language": "python", - "generator": "bazel" - } - } - ], - "generatedFiles": [ - ".flake8", - ".github/CONTRIBUTING.md", - ".github/ISSUE_TEMPLATE/bug_report.md", - ".github/ISSUE_TEMPLATE/feature_request.md", - ".github/ISSUE_TEMPLATE/support_request.md", - ".github/PULL_REQUEST_TEMPLATE.md", - ".github/header-checker-lint.yml", - ".github/release-please.yml", - ".github/snippet-bot.yml", - ".gitignore", - ".kokoro/build.sh", - ".kokoro/continuous/common.cfg", - ".kokoro/continuous/continuous.cfg", - ".kokoro/docker/docs/Dockerfile", - ".kokoro/docker/docs/fetch_gpg_keys.sh", - ".kokoro/docs/common.cfg", - ".kokoro/docs/docs-presubmit.cfg", - ".kokoro/docs/docs.cfg", - ".kokoro/populate-secrets.sh", - ".kokoro/presubmit/common.cfg", - ".kokoro/presubmit/presubmit.cfg", - ".kokoro/publish-docs.sh", - ".kokoro/release.sh", - ".kokoro/release/common.cfg", - ".kokoro/release/release.cfg", - ".kokoro/samples/lint/common.cfg", - ".kokoro/samples/lint/continuous.cfg", - ".kokoro/samples/lint/periodic.cfg", - ".kokoro/samples/lint/presubmit.cfg", - ".kokoro/samples/python3.6/common.cfg", - ".kokoro/samples/python3.6/continuous.cfg", - ".kokoro/samples/python3.6/periodic-head.cfg", - ".kokoro/samples/python3.6/periodic.cfg", - ".kokoro/samples/python3.6/presubmit.cfg", - ".kokoro/samples/python3.7/common.cfg", - ".kokoro/samples/python3.7/continuous.cfg", - ".kokoro/samples/python3.7/periodic-head.cfg", - ".kokoro/samples/python3.7/periodic.cfg", - ".kokoro/samples/python3.7/presubmit.cfg", - ".kokoro/samples/python3.8/common.cfg", - ".kokoro/samples/python3.8/continuous.cfg", - ".kokoro/samples/python3.8/periodic-head.cfg", - ".kokoro/samples/python3.8/periodic.cfg", - ".kokoro/samples/python3.8/presubmit.cfg", - ".kokoro/test-samples-against-head.sh", - ".kokoro/test-samples-impl.sh", - ".kokoro/test-samples.sh", - ".kokoro/trampoline.sh", - ".kokoro/trampoline_v2.sh", - ".pre-commit-config.yaml", - ".trampolinerc", - "CODE_OF_CONDUCT.md", - "CONTRIBUTING.rst", - "LICENSE", - "MANIFEST.in", - "docs/_static/custom.css", - "docs/_templates/layout.html", - "docs/conf.py", - "docs/multiprocessing.rst", - "google/cloud/bigtable_admin_v2/__init__.py", - "google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto", - "google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto", - "google/cloud/bigtable_admin_v2/proto/common.proto", - "google/cloud/bigtable_admin_v2/proto/instance.proto", - "google/cloud/bigtable_admin_v2/proto/table.proto", - "google/cloud/bigtable_admin_v2/py.typed", - "google/cloud/bigtable_admin_v2/services/__init__.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py", - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py", - "google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py", - "google/cloud/bigtable_admin_v2/types/__init__.py", - "google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py", - "google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py", - "google/cloud/bigtable_admin_v2/types/common.py", - "google/cloud/bigtable_admin_v2/types/instance.py", - "google/cloud/bigtable_admin_v2/types/table.py", - "google/cloud/bigtable_v2/__init__.py", - "google/cloud/bigtable_v2/proto/bigtable.proto", - "google/cloud/bigtable_v2/proto/data.proto", - "google/cloud/bigtable_v2/py.typed", - "google/cloud/bigtable_v2/services/__init__.py", - "google/cloud/bigtable_v2/services/bigtable/__init__.py", - "google/cloud/bigtable_v2/services/bigtable/async_client.py", - "google/cloud/bigtable_v2/services/bigtable/client.py", - "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", - "google/cloud/bigtable_v2/services/bigtable/transports/base.py", - "google/cloud/bigtable_v2/services/bigtable/transports/grpc.py", - "google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py", - "google/cloud/bigtable_v2/types/__init__.py", - "google/cloud/bigtable_v2/types/bigtable.py", - "google/cloud/bigtable_v2/types/data.py", - "renovate.json", - "samples/AUTHORING_GUIDE.md", - "samples/CONTRIBUTING.md", - "samples/README.md", - "samples/hello/README.md", - "samples/hello_happybase/README.md", - "samples/instanceadmin/README.md", - "samples/metricscaler/README.md", - "samples/quickstart/README.md", - "samples/quickstart_happybase/README.md", - "samples/snippets/README.md", - "samples/tableadmin/README.md", - "scripts/decrypt-secrets.sh", - "scripts/fixup_bigtable_admin_v2_keywords.py", - "scripts/fixup_bigtable_v2_keywords.py", - "scripts/readme-gen/readme_gen.py", - "scripts/readme-gen/templates/README.tmpl.rst", - "scripts/readme-gen/templates/auth.tmpl.rst", - "scripts/readme-gen/templates/auth_api_key.tmpl.rst", - "scripts/readme-gen/templates/install_deps.tmpl.rst", - "scripts/readme-gen/templates/install_portaudio.tmpl.rst", - "setup.cfg", - "testing/.gitignore", - "tests/unit/gapic/bigtable_admin_v2/__init__.py", - "tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py", - "tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py", - "tests/unit/gapic/bigtable_v2/__init__.py", - "tests/unit/gapic/bigtable_v2/test_bigtable.py" - ] -} \ No newline at end of file From 093009afc227b454288aa8a858deffb470630f02 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 14 May 2021 13:38:05 +0000 Subject: [PATCH 447/892] chore: upgrade gapic-generator-python to 0.46.3 (#314) PiperOrigin-RevId: 373649163 Source-Link: https://github.com/googleapis/googleapis/commit/7e1b14e6c7a9ab96d2db7e4a131981f162446d34 Source-Link: https://github.com/googleapis/googleapis-gen/commit/0a3c7d272d697796db75857bac73905c68e498c3 --- .../google/cloud/bigtable_v2/__init__.py | 7 +- .../cloud/bigtable_v2/gapic_metadata.json | 83 +++ .../cloud/bigtable_v2/services/__init__.py | 1 - .../bigtable_v2/services/bigtable/__init__.py | 2 - .../services/bigtable/async_client.py | 43 +- .../bigtable_v2/services/bigtable/client.py | 79 ++- .../services/bigtable/transports/__init__.py | 2 - .../services/bigtable/transports/base.py | 138 +++-- .../services/bigtable/transports/grpc.py | 22 +- .../bigtable/transports/grpc_asyncio.py | 23 +- .../cloud/bigtable_v2/types/__init__.py | 2 - .../cloud/bigtable_v2/types/bigtable.py | 117 ++-- .../google/cloud/bigtable_v2/types/data.py | 151 ++--- .../scripts/fixup_bigtable_v2_keywords.py | 17 +- .../google-cloud-bigtable/tests/__init__.py | 15 + .../tests/unit/__init__.py | 4 +- .../tests/unit/gapic/__init__.py | 15 + .../tests/unit/gapic/bigtable_v2/__init__.py | 1 - .../unit/gapic/bigtable_v2/test_bigtable.py | 523 +++++++++++------- 19 files changed, 702 insertions(+), 543 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json create mode 100644 packages/google-cloud-bigtable/tests/unit/gapic/__init__.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 0ab15791b864..1df0bdc5423f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +15,8 @@ # from .services.bigtable import BigtableClient +from .services.bigtable import BigtableAsyncClient + from .types.bigtable import CheckAndMutateRowRequest from .types.bigtable import CheckAndMutateRowResponse from .types.bigtable import MutateRowRequest @@ -41,8 +42,9 @@ from .types.data import TimestampRange from .types.data import ValueRange - __all__ = ( + "BigtableAsyncClient", + "BigtableClient", "Cell", "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", @@ -67,5 +69,4 @@ "SampleRowKeysResponse", "TimestampRange", "ValueRange", - "BigtableClient", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json new file mode 100644 index 000000000000..854c13be2936 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json @@ -0,0 +1,83 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.bigtable_v2", + "protoPackage": "google.bigtable.v2", + "schema": "1.0", + "services": { + "Bigtable": { + "clients": { + "grpc": { + "libraryClient": "BigtableClient", + "rpcs": { + "CheckAndMutateRow": { + "methods": [ + "check_and_mutate_row" + ] + }, + "MutateRow": { + "methods": [ + "mutate_row" + ] + }, + "MutateRows": { + "methods": [ + "mutate_rows" + ] + }, + "ReadModifyWriteRow": { + "methods": [ + "read_modify_write_row" + ] + }, + "ReadRows": { + "methods": [ + "read_rows" + ] + }, + "SampleRowKeys": { + "methods": [ + "sample_row_keys" + ] + } + } + }, + "grpc-async": { + "libraryClient": "BigtableAsyncClient", + "rpcs": { + "CheckAndMutateRow": { + "methods": [ + "check_and_mutate_row" + ] + }, + "MutateRow": { + "methods": [ + "mutate_row" + ] + }, + "MutateRows": { + "methods": [ + "mutate_rows" + ] + }, + "ReadModifyWriteRow": { + "methods": [ + "read_modify_write_row" + ] + }, + "ReadRows": { + "methods": [ + "read_rows" + ] + }, + "SampleRowKeys": { + "methods": [ + "sample_row_keys" + ] + } + } + } + } + } + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py index 42ffdf2bc43d..4de65971c238 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py index 622941c65d1b..a79e1d780866 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from .client import BigtableClient from .async_client import BigtableAsyncClient diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 2cbb94a9b2ab..9aa15e391ed1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict import functools import re @@ -22,15 +20,14 @@ import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data - from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport from .client import BigtableClient @@ -48,31 +45,27 @@ class BigtableAsyncClient: table_path = staticmethod(BigtableClient.table_path) parse_table_path = staticmethod(BigtableClient.parse_table_path) - common_billing_account_path = staticmethod( BigtableClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( BigtableClient.parse_common_billing_account_path ) - common_folder_path = staticmethod(BigtableClient.common_folder_path) parse_common_folder_path = staticmethod(BigtableClient.parse_common_folder_path) - common_organization_path = staticmethod(BigtableClient.common_organization_path) parse_common_organization_path = staticmethod( BigtableClient.parse_common_organization_path ) - common_project_path = staticmethod(BigtableClient.common_project_path) parse_common_project_path = staticmethod(BigtableClient.parse_common_project_path) - common_location_path = staticmethod(BigtableClient.common_location_path) parse_common_location_path = staticmethod(BigtableClient.parse_common_location_path) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -87,7 +80,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -104,7 +97,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> BigtableTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: BigtableTransport: The transport used by the client instance. @@ -118,12 +111,12 @@ def transport(self) -> BigtableTransport: def __init__( self, *, - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, transport: Union[str, BigtableTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the bigtable client. + """Instantiates the bigtable client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -155,7 +148,6 @@ def __init__( google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ - self._client = BigtableClient( credentials=credentials, transport=transport, @@ -201,7 +193,6 @@ def read_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -228,7 +219,6 @@ def read_rows( # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if app_profile_id is not None: @@ -300,7 +290,6 @@ def sample_row_keys( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -327,7 +316,6 @@ def sample_row_keys( # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if app_profile_id is not None: @@ -416,7 +404,6 @@ async def mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -443,14 +430,12 @@ async def mutate_row( # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if row_key is not None: request.row_key = row_key if app_profile_id is not None: request.app_profile_id = app_profile_id - if mutations: request.mutations.extend(mutations) @@ -463,7 +448,8 @@ async def mutate_row( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -535,7 +521,6 @@ def mutate_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -562,12 +547,10 @@ def mutate_rows( # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if app_profile_id is not None: request.app_profile_id = app_profile_id - if entries: request.entries.extend(entries) @@ -679,7 +662,6 @@ async def check_and_mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -715,7 +697,6 @@ async def check_and_mutate_row( # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if row_key is not None: @@ -724,7 +705,6 @@ async def check_and_mutate_row( request.predicate_filter = predicate_filter if app_profile_id is not None: request.app_profile_id = app_profile_id - if true_mutations: request.true_mutations.extend(true_mutations) if false_mutations: @@ -820,7 +800,6 @@ async def read_modify_write_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -847,14 +826,12 @@ async def read_modify_write_row( # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if row_key is not None: request.row_key = row_key if app_profile_id is not None: request.app_profile_id = app_profile_id - if rules: request.rules.extend(rules) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index a9f3dfd74925..9448e2af7570 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict from distutils import util import os @@ -23,10 +21,10 @@ import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore @@ -34,7 +32,6 @@ from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data - from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableGrpcTransport from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport @@ -53,7 +50,7 @@ class BigtableClientMeta(type): _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport def get_transport_class(cls, label: str = None,) -> Type[BigtableTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -78,7 +75,8 @@ class BigtableClient(metaclass=BigtableClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -112,7 +110,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -129,7 +128,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -148,23 +147,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> BigtableTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - BigtableTransport: The transport used by the client instance. + BigtableTransport: The transport used by the client + instance. """ return self._transport @staticmethod def table_path(project: str, instance: str, table: str,) -> str: - """Return a fully-qualified table string.""" + """Returns a fully-qualified table string.""" return "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, table=table, ) @staticmethod def parse_table_path(path: str) -> Dict[str, str]: - """Parse a table path into its component segments.""" + """Parses a table path into its component segments.""" m = re.match( r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path, @@ -173,7 +173,7 @@ def parse_table_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -186,7 +186,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -197,7 +197,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -208,7 +208,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -219,7 +219,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -233,12 +233,12 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def __init__( self, *, - credentials: Optional[credentials.Credentials] = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigtableTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the bigtable client. + """Instantiates the bigtable client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -293,9 +293,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -307,12 +308,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -327,8 +330,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -381,7 +384,6 @@ def read_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -410,10 +412,8 @@ def read_rows( # there are no flattened fields. if not isinstance(request, bigtable.ReadRowsRequest): request = bigtable.ReadRowsRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if app_profile_id is not None: @@ -474,7 +474,6 @@ def sample_row_keys( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -503,10 +502,8 @@ def sample_row_keys( # there are no flattened fields. if not isinstance(request, bigtable.SampleRowKeysRequest): request = bigtable.SampleRowKeysRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if app_profile_id is not None: @@ -584,7 +581,6 @@ def mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -613,10 +609,8 @@ def mutate_row( # there are no flattened fields. if not isinstance(request, bigtable.MutateRowRequest): request = bigtable.MutateRowRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if row_key is not None: @@ -694,7 +688,6 @@ def mutate_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -723,10 +716,8 @@ def mutate_rows( # there are no flattened fields. if not isinstance(request, bigtable.MutateRowsRequest): request = bigtable.MutateRowsRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if entries is not None: @@ -831,7 +822,6 @@ def check_and_mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -869,10 +859,8 @@ def check_and_mutate_row( # there are no flattened fields. if not isinstance(request, bigtable.CheckAndMutateRowRequest): request = bigtable.CheckAndMutateRowRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if row_key is not None: @@ -965,7 +953,6 @@ def read_modify_write_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -994,10 +981,8 @@ def read_modify_write_row( # there are no flattened fields. if not isinstance(request, bigtable.ReadModifyWriteRowRequest): request = bigtable.ReadModifyWriteRowRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if table_name is not None: request.table_name = table_name if row_key is not None: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index e18b4592419b..ba1f2b88e8d9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict from typing import Dict, Type diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 5c362374c62e..da60f8f83adb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,20 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import abc -import typing +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version import pkg_resources -from google import auth # type: ignore -from google.api_core import exceptions # type: ignore +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.cloud.bigtable_v2.types import bigtable - try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, @@ -35,6 +34,17 @@ except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + +_API_CORE_VERSION = google.api_core.__version__ + class BigtableTransport(abc.ABC): """Abstract transport class for Bigtable.""" @@ -48,21 +58,24 @@ class BigtableTransport(abc.ABC): "https://www.googleapis.com/auth/cloud-platform.read-only", ) + DEFAULT_HOST: str = "bigtable.googleapis.com" + def __init__( self, *, - host: str = "bigtable.googleapis.com", - credentials: credentials.Credentials = None, - credentials_file: typing.Optional[str] = None, - scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, - quota_project_id: typing.Optional[str] = None, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, **kwargs, ) -> None: """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -71,7 +84,7 @@ def __init__( credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. - scope (Optional[Sequence[str]]): A list of scopes. + scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -85,29 +98,76 @@ def __init__( host += ":443" self._host = host + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + # Save the scopes. self._scopes = scopes or self.AUTH_SCOPES # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: - raise exceptions.DuplicateCredentialArgs( + raise core_exceptions.DuplicateCredentialArgs( "'credentials_file' and 'credentials' are mutually exclusive" ) if credentials_file is not None: - credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) elif credentials is None: - credentials, _ = auth.default( - scopes=self._scopes, quota_project_id=quota_project_id + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials + # TODO(busunkim): These two class methods are in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-api-core + # and google-auth are increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + # TODO: Remove this function once google-api-core >= 1.26.0 is required + @classmethod + def _get_self_signed_jwt_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Union[Optional[Sequence[str]], str]]: + """Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version""" + + self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {} + + if _API_CORE_VERSION and ( + packaging.version.parse(_API_CORE_VERSION) + >= packaging.version.parse("1.26.0") + ): + self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES + self_signed_jwt_kwargs["scopes"] = scopes + self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST + else: + self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES + + return self_signed_jwt_kwargs + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -142,7 +202,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -190,22 +251,19 @@ def _prep_wrapped_messages(self, client_info): @property def read_rows( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable.ReadRowsRequest], - typing.Union[ - bigtable.ReadRowsResponse, typing.Awaitable[bigtable.ReadRowsResponse] - ], + Union[bigtable.ReadRowsResponse, Awaitable[bigtable.ReadRowsResponse]], ]: raise NotImplementedError() @property def sample_row_keys( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable.SampleRowKeysRequest], - typing.Union[ - bigtable.SampleRowKeysResponse, - typing.Awaitable[bigtable.SampleRowKeysResponse], + Union[ + bigtable.SampleRowKeysResponse, Awaitable[bigtable.SampleRowKeysResponse] ], ]: raise NotImplementedError() @@ -213,33 +271,29 @@ def sample_row_keys( @property def mutate_row( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable.MutateRowRequest], - typing.Union[ - bigtable.MutateRowResponse, typing.Awaitable[bigtable.MutateRowResponse] - ], + Union[bigtable.MutateRowResponse, Awaitable[bigtable.MutateRowResponse]], ]: raise NotImplementedError() @property def mutate_rows( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable.MutateRowsRequest], - typing.Union[ - bigtable.MutateRowsResponse, typing.Awaitable[bigtable.MutateRowsResponse] - ], + Union[bigtable.MutateRowsResponse, Awaitable[bigtable.MutateRowsResponse]], ]: raise NotImplementedError() @property def check_and_mutate_row( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable.CheckAndMutateRowRequest], - typing.Union[ + Union[ bigtable.CheckAndMutateRowResponse, - typing.Awaitable[bigtable.CheckAndMutateRowResponse], + Awaitable[bigtable.CheckAndMutateRowResponse], ], ]: raise NotImplementedError() @@ -247,11 +301,11 @@ def check_and_mutate_row( @property def read_modify_write_row( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable.ReadModifyWriteRowRequest], - typing.Union[ + Union[ bigtable.ReadModifyWriteRowResponse, - typing.Awaitable[bigtable.ReadModifyWriteRowResponse], + Awaitable[bigtable.ReadModifyWriteRowResponse], ], ]: raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index c19847171716..12d0a50d1a72 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,20 +13,18 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple +from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers # type: ignore from google.api_core import gapic_v1 # type: ignore -from google import auth # type: ignore -from google.auth import credentials # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.cloud.bigtable_v2.types import bigtable - from .base import BigtableTransport, DEFAULT_CLIENT_INFO @@ -51,7 +48,7 @@ def __init__( self, *, host: str = "bigtable.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, @@ -65,7 +62,8 @@ def __init__( """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -175,7 +173,7 @@ def __init__( def create_channel( cls, host: str = "bigtable.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -206,13 +204,15 @@ def create_channel( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ - scopes = scopes or cls.AUTH_SCOPES + + self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) + return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, - scopes=scopes, quota_project_id=quota_project_id, + **self_signed_jwt_kwargs, **kwargs, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 5d722ce908e9..e0da77a75bc5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,21 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 # type: ignore from google.api_core import grpc_helpers_async # type: ignore -from google import auth # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_v2.types import bigtable - from .base import BigtableTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableGrpcTransport @@ -54,7 +51,7 @@ class BigtableGrpcAsyncIOTransport(BigtableTransport): def create_channel( cls, host: str = "bigtable.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -81,13 +78,15 @@ def create_channel( Returns: aio.Channel: A gRPC AsyncIO channel object. """ - scopes = scopes or cls.AUTH_SCOPES + + self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) + return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, - scopes=scopes, quota_project_id=quota_project_id, + **self_signed_jwt_kwargs, **kwargs, ) @@ -95,7 +94,7 @@ def __init__( self, *, host: str = "bigtable.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, @@ -109,7 +108,8 @@ def __init__( """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -167,7 +167,6 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - else: if api_mtls_endpoint: host = api_mtls_endpoint diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index d744f93de889..4c15b6742e5f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from .bigtable import ( CheckAndMutateRowRequest, CheckAndMutateRowResponse, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 83def634e9a9..35a19e2d185e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,13 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore - from google.cloud.bigtable_v2.types import data -from google.protobuf import wrappers_pb2 as wrappers # type: ignore -from google.rpc import status_pb2 as gr_status # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore __protobuf__ = proto.module( @@ -44,7 +41,6 @@ class ReadRowsRequest(proto.Message): r"""Request message for Bigtable.ReadRows. - Attributes: table_name (str): Required. The unique name of the table from which to read. @@ -67,20 +63,15 @@ class ReadRowsRequest(proto.Message): return all results. """ - table_name = proto.Field(proto.STRING, number=1) - - app_profile_id = proto.Field(proto.STRING, number=5) - + table_name = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=5,) rows = proto.Field(proto.MESSAGE, number=2, message=data.RowSet,) - filter = proto.Field(proto.MESSAGE, number=3, message=data.RowFilter,) - - rows_limit = proto.Field(proto.INT64, number=4) + rows_limit = proto.Field(proto.INT64, number=4,) class ReadRowsResponse(proto.Message): r"""Response message for Bigtable.ReadRows. - Attributes: chunks (Sequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): A collection of a row's contents as part of @@ -158,34 +149,26 @@ class CellChunk(proto.Message): chunks for ``row_key``, as its data has been fully read. """ - row_key = proto.Field(proto.BYTES, number=1) - + row_key = proto.Field(proto.BYTES, number=1,) family_name = proto.Field( - proto.MESSAGE, number=2, message=wrappers.StringValue, + proto.MESSAGE, number=2, message=wrappers_pb2.StringValue, ) - - qualifier = proto.Field(proto.MESSAGE, number=3, message=wrappers.BytesValue,) - - timestamp_micros = proto.Field(proto.INT64, number=4) - - labels = proto.RepeatedField(proto.STRING, number=5) - - value = proto.Field(proto.BYTES, number=6) - - value_size = proto.Field(proto.INT32, number=7) - - reset_row = proto.Field(proto.BOOL, number=8, oneof="row_status") - - commit_row = proto.Field(proto.BOOL, number=9, oneof="row_status") + qualifier = proto.Field( + proto.MESSAGE, number=3, message=wrappers_pb2.BytesValue, + ) + timestamp_micros = proto.Field(proto.INT64, number=4,) + labels = proto.RepeatedField(proto.STRING, number=5,) + value = proto.Field(proto.BYTES, number=6,) + value_size = proto.Field(proto.INT32, number=7,) + reset_row = proto.Field(proto.BOOL, number=8, oneof="row_status",) + commit_row = proto.Field(proto.BOOL, number=9, oneof="row_status",) chunks = proto.RepeatedField(proto.MESSAGE, number=1, message=CellChunk,) - - last_scanned_row_key = proto.Field(proto.BYTES, number=2) + last_scanned_row_key = proto.Field(proto.BYTES, number=2,) class SampleRowKeysRequest(proto.Message): r"""Request message for Bigtable.SampleRowKeys. - Attributes: table_name (str): Required. The unique name of the table from which to sample @@ -197,14 +180,12 @@ class SampleRowKeysRequest(proto.Message): profile will be used. """ - table_name = proto.Field(proto.STRING, number=1) - - app_profile_id = proto.Field(proto.STRING, number=2) + table_name = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=2,) class SampleRowKeysResponse(proto.Message): r"""Response message for Bigtable.SampleRowKeys. - Attributes: row_key (bytes): Sorted streamed sequence of sample row keys @@ -226,14 +207,12 @@ class SampleRowKeysResponse(proto.Message): fields. """ - row_key = proto.Field(proto.BYTES, number=1) - - offset_bytes = proto.Field(proto.INT64, number=2) + row_key = proto.Field(proto.BYTES, number=1,) + offset_bytes = proto.Field(proto.INT64, number=2,) class MutateRowRequest(proto.Message): r"""Request message for Bigtable.MutateRow. - Attributes: table_name (str): Required. The unique name of the table to which the mutation @@ -254,22 +233,18 @@ class MutateRowRequest(proto.Message): at most 100000. """ - table_name = proto.Field(proto.STRING, number=1) - - app_profile_id = proto.Field(proto.STRING, number=4) - - row_key = proto.Field(proto.BYTES, number=2) - + table_name = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=4,) + row_key = proto.Field(proto.BYTES, number=2,) mutations = proto.RepeatedField(proto.MESSAGE, number=3, message=data.Mutation,) class MutateRowResponse(proto.Message): - r"""Response message for Bigtable.MutateRow.""" + r"""Response message for Bigtable.MutateRow. """ class MutateRowsRequest(proto.Message): r"""Request message for BigtableService.MutateRows. - Attributes: table_name (str): Required. The unique name of the table to @@ -290,7 +265,6 @@ class MutateRowsRequest(proto.Message): class Entry(proto.Message): r"""A mutation for a given row. - Attributes: row_key (bytes): The key of the row to which the ``mutations`` should be @@ -303,20 +277,16 @@ class Entry(proto.Message): You must specify at least one mutation. """ - row_key = proto.Field(proto.BYTES, number=1) - + row_key = proto.Field(proto.BYTES, number=1,) mutations = proto.RepeatedField(proto.MESSAGE, number=2, message=data.Mutation,) - table_name = proto.Field(proto.STRING, number=1) - - app_profile_id = proto.Field(proto.STRING, number=3) - + table_name = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=3,) entries = proto.RepeatedField(proto.MESSAGE, number=2, message=Entry,) class MutateRowsResponse(proto.Message): r"""Response message for BigtableService.MutateRows. - Attributes: entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): One or more results for Entries from the @@ -339,16 +309,14 @@ class Entry(proto.Message): will be reported for both entries. """ - index = proto.Field(proto.INT64, number=1) - - status = proto.Field(proto.MESSAGE, number=2, message=gr_status.Status,) + index = proto.Field(proto.INT64, number=1,) + status = proto.Field(proto.MESSAGE, number=2, message=status_pb2.Status,) entries = proto.RepeatedField(proto.MESSAGE, number=1, message=Entry,) class CheckAndMutateRowRequest(proto.Message): r"""Request message for Bigtable.CheckAndMutateRow. - Attributes: table_name (str): Required. The unique name of the table to which the @@ -384,18 +352,13 @@ class CheckAndMutateRowRequest(proto.Message): most 100000. """ - table_name = proto.Field(proto.STRING, number=1) - - app_profile_id = proto.Field(proto.STRING, number=7) - - row_key = proto.Field(proto.BYTES, number=2) - + table_name = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=7,) + row_key = proto.Field(proto.BYTES, number=2,) predicate_filter = proto.Field(proto.MESSAGE, number=6, message=data.RowFilter,) - true_mutations = proto.RepeatedField( proto.MESSAGE, number=4, message=data.Mutation, ) - false_mutations = proto.RepeatedField( proto.MESSAGE, number=5, message=data.Mutation, ) @@ -403,19 +366,17 @@ class CheckAndMutateRowRequest(proto.Message): class CheckAndMutateRowResponse(proto.Message): r"""Response message for Bigtable.CheckAndMutateRow. - Attributes: predicate_matched (bool): Whether or not the request's ``predicate_filter`` yielded any results for the specified row. """ - predicate_matched = proto.Field(proto.BOOL, number=1) + predicate_matched = proto.Field(proto.BOOL, number=1,) class ReadModifyWriteRowRequest(proto.Message): r"""Request message for Bigtable.ReadModifyWriteRow. - Attributes: table_name (str): Required. The unique name of the table to which the @@ -437,12 +398,9 @@ class ReadModifyWriteRowRequest(proto.Message): later ones. """ - table_name = proto.Field(proto.STRING, number=1) - - app_profile_id = proto.Field(proto.STRING, number=4) - - row_key = proto.Field(proto.BYTES, number=2) - + table_name = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=4,) + row_key = proto.Field(proto.BYTES, number=2,) rules = proto.RepeatedField( proto.MESSAGE, number=3, message=data.ReadModifyWriteRule, ) @@ -450,7 +408,6 @@ class ReadModifyWriteRowRequest(proto.Message): class ReadModifyWriteRowResponse(proto.Message): r"""Response message for Bigtable.ReadModifyWriteRow. - Attributes: row (google.cloud.bigtable_v2.types.Row): A Row containing the new contents of all diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index eece89c5ae57..ca2302889eb6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore @@ -55,8 +53,7 @@ class Row(proto.Message): not specified. """ - key = proto.Field(proto.BYTES, number=1) - + key = proto.Field(proto.BYTES, number=1,) families = proto.RepeatedField(proto.MESSAGE, number=2, message="Family",) @@ -78,8 +75,7 @@ class Family(proto.Message): increasing "qualifier". """ - name = proto.Field(proto.STRING, number=1) - + name = proto.Field(proto.STRING, number=1,) columns = proto.RepeatedField(proto.MESSAGE, number=2, message="Column",) @@ -100,8 +96,7 @@ class Column(proto.Message): "timestamp_micros". """ - qualifier = proto.Field(proto.BYTES, number=1) - + qualifier = proto.Field(proto.BYTES, number=1,) cells = proto.RepeatedField(proto.MESSAGE, number=2, message="Cell",) @@ -127,16 +122,13 @@ class Cell(proto.Message): [RowFilter][google.bigtable.v2.RowFilter]. """ - timestamp_micros = proto.Field(proto.INT64, number=1) - - value = proto.Field(proto.BYTES, number=2) - - labels = proto.RepeatedField(proto.STRING, number=3) + timestamp_micros = proto.Field(proto.INT64, number=1,) + value = proto.Field(proto.BYTES, number=2,) + labels = proto.RepeatedField(proto.STRING, number=3,) class RowRange(proto.Message): r"""Specifies a contiguous range of rows. - Attributes: start_key_closed (bytes): Used when giving an inclusive lower bound for @@ -152,18 +144,14 @@ class RowRange(proto.Message): the range. """ - start_key_closed = proto.Field(proto.BYTES, number=1, oneof="start_key") - - start_key_open = proto.Field(proto.BYTES, number=2, oneof="start_key") - - end_key_open = proto.Field(proto.BYTES, number=3, oneof="end_key") - - end_key_closed = proto.Field(proto.BYTES, number=4, oneof="end_key") + start_key_closed = proto.Field(proto.BYTES, number=1, oneof="start_key",) + start_key_open = proto.Field(proto.BYTES, number=2, oneof="start_key",) + end_key_open = proto.Field(proto.BYTES, number=3, oneof="end_key",) + end_key_closed = proto.Field(proto.BYTES, number=4, oneof="end_key",) class RowSet(proto.Message): r"""Specifies a non-contiguous set of rows. - Attributes: row_keys (Sequence[bytes]): Single rows included in the set. @@ -171,8 +159,7 @@ class RowSet(proto.Message): Contiguous row ranges included in the set. """ - row_keys = proto.RepeatedField(proto.BYTES, number=1) - + row_keys = proto.RepeatedField(proto.BYTES, number=1,) row_ranges = proto.RepeatedField(proto.MESSAGE, number=2, message="RowRange",) @@ -200,20 +187,17 @@ class ColumnRange(proto.Message): the range. """ - family_name = proto.Field(proto.STRING, number=1) - - start_qualifier_closed = proto.Field(proto.BYTES, number=2, oneof="start_qualifier") - - start_qualifier_open = proto.Field(proto.BYTES, number=3, oneof="start_qualifier") - - end_qualifier_closed = proto.Field(proto.BYTES, number=4, oneof="end_qualifier") - - end_qualifier_open = proto.Field(proto.BYTES, number=5, oneof="end_qualifier") + family_name = proto.Field(proto.STRING, number=1,) + start_qualifier_closed = proto.Field( + proto.BYTES, number=2, oneof="start_qualifier", + ) + start_qualifier_open = proto.Field(proto.BYTES, number=3, oneof="start_qualifier",) + end_qualifier_closed = proto.Field(proto.BYTES, number=4, oneof="end_qualifier",) + end_qualifier_open = proto.Field(proto.BYTES, number=5, oneof="end_qualifier",) class TimestampRange(proto.Message): r"""Specified a contiguous range of microsecond timestamps. - Attributes: start_timestamp_micros (int): Inclusive lower bound. If left empty, @@ -223,14 +207,12 @@ class TimestampRange(proto.Message): interpreted as infinity. """ - start_timestamp_micros = proto.Field(proto.INT64, number=1) - - end_timestamp_micros = proto.Field(proto.INT64, number=2) + start_timestamp_micros = proto.Field(proto.INT64, number=1,) + end_timestamp_micros = proto.Field(proto.INT64, number=2,) class ValueRange(proto.Message): r"""Specifies a contiguous range of raw byte values. - Attributes: start_value_closed (bytes): Used when giving an inclusive lower bound for @@ -246,13 +228,10 @@ class ValueRange(proto.Message): the range. """ - start_value_closed = proto.Field(proto.BYTES, number=1, oneof="start_value") - - start_value_open = proto.Field(proto.BYTES, number=2, oneof="start_value") - - end_value_closed = proto.Field(proto.BYTES, number=3, oneof="end_value") - - end_value_open = proto.Field(proto.BYTES, number=4, oneof="end_value") + start_value_closed = proto.Field(proto.BYTES, number=1, oneof="start_value",) + start_value_open = proto.Field(proto.BYTES, number=2, oneof="start_value",) + end_value_closed = proto.Field(proto.BYTES, number=3, oneof="end_value",) + end_value_open = proto.Field(proto.BYTES, number=4, oneof="end_value",) class RowFilter(proto.Message): @@ -539,56 +518,36 @@ class Condition(proto.Message): """ predicate_filter = proto.Field(proto.MESSAGE, number=1, message="RowFilter",) - true_filter = proto.Field(proto.MESSAGE, number=2, message="RowFilter",) - false_filter = proto.Field(proto.MESSAGE, number=3, message="RowFilter",) chain = proto.Field(proto.MESSAGE, number=1, oneof="filter", message=Chain,) - interleave = proto.Field( proto.MESSAGE, number=2, oneof="filter", message=Interleave, ) - condition = proto.Field(proto.MESSAGE, number=3, oneof="filter", message=Condition,) - - sink = proto.Field(proto.BOOL, number=16, oneof="filter") - - pass_all_filter = proto.Field(proto.BOOL, number=17, oneof="filter") - - block_all_filter = proto.Field(proto.BOOL, number=18, oneof="filter") - - row_key_regex_filter = proto.Field(proto.BYTES, number=4, oneof="filter") - - row_sample_filter = proto.Field(proto.DOUBLE, number=14, oneof="filter") - - family_name_regex_filter = proto.Field(proto.STRING, number=5, oneof="filter") - - column_qualifier_regex_filter = proto.Field(proto.BYTES, number=6, oneof="filter") - + sink = proto.Field(proto.BOOL, number=16, oneof="filter",) + pass_all_filter = proto.Field(proto.BOOL, number=17, oneof="filter",) + block_all_filter = proto.Field(proto.BOOL, number=18, oneof="filter",) + row_key_regex_filter = proto.Field(proto.BYTES, number=4, oneof="filter",) + row_sample_filter = proto.Field(proto.DOUBLE, number=14, oneof="filter",) + family_name_regex_filter = proto.Field(proto.STRING, number=5, oneof="filter",) + column_qualifier_regex_filter = proto.Field(proto.BYTES, number=6, oneof="filter",) column_range_filter = proto.Field( proto.MESSAGE, number=7, oneof="filter", message="ColumnRange", ) - timestamp_range_filter = proto.Field( proto.MESSAGE, number=8, oneof="filter", message="TimestampRange", ) - - value_regex_filter = proto.Field(proto.BYTES, number=9, oneof="filter") - + value_regex_filter = proto.Field(proto.BYTES, number=9, oneof="filter",) value_range_filter = proto.Field( proto.MESSAGE, number=15, oneof="filter", message="ValueRange", ) - - cells_per_row_offset_filter = proto.Field(proto.INT32, number=10, oneof="filter") - - cells_per_row_limit_filter = proto.Field(proto.INT32, number=11, oneof="filter") - - cells_per_column_limit_filter = proto.Field(proto.INT32, number=12, oneof="filter") - - strip_value_transformer = proto.Field(proto.BOOL, number=13, oneof="filter") - - apply_label_transformer = proto.Field(proto.STRING, number=19, oneof="filter") + cells_per_row_offset_filter = proto.Field(proto.INT32, number=10, oneof="filter",) + cells_per_row_limit_filter = proto.Field(proto.INT32, number=11, oneof="filter",) + cells_per_column_limit_filter = proto.Field(proto.INT32, number=12, oneof="filter",) + strip_value_transformer = proto.Field(proto.BOOL, number=13, oneof="filter",) + apply_label_transformer = proto.Field(proto.STRING, number=19, oneof="filter",) class Mutation(proto.Message): @@ -608,7 +567,6 @@ class Mutation(proto.Message): class SetCell(proto.Message): r"""A Mutation which sets the value of the specified cell. - Attributes: family_name (str): The name of the family into which new data should be @@ -630,13 +588,10 @@ class SetCell(proto.Message): cell. """ - family_name = proto.Field(proto.STRING, number=1) - - column_qualifier = proto.Field(proto.BYTES, number=2) - - timestamp_micros = proto.Field(proto.INT64, number=3) - - value = proto.Field(proto.BYTES, number=4) + family_name = proto.Field(proto.STRING, number=1,) + column_qualifier = proto.Field(proto.BYTES, number=2,) + timestamp_micros = proto.Field(proto.INT64, number=3,) + value = proto.Field(proto.BYTES, number=4,) class DeleteFromColumn(proto.Message): r"""A Mutation which deletes cells from the specified column, @@ -655,10 +610,8 @@ class DeleteFromColumn(proto.Message): should be deleted. """ - family_name = proto.Field(proto.STRING, number=1) - - column_qualifier = proto.Field(proto.BYTES, number=2) - + family_name = proto.Field(proto.STRING, number=1,) + column_qualifier = proto.Field(proto.BYTES, number=2,) time_range = proto.Field(proto.MESSAGE, number=3, message="TimestampRange",) class DeleteFromFamily(proto.Message): @@ -671,21 +624,18 @@ class DeleteFromFamily(proto.Message): Must match ``[-_.a-zA-Z0-9]+`` """ - family_name = proto.Field(proto.STRING, number=1) + family_name = proto.Field(proto.STRING, number=1,) class DeleteFromRow(proto.Message): - r"""A Mutation which deletes all cells from the containing row.""" + r"""A Mutation which deletes all cells from the containing row. """ set_cell = proto.Field(proto.MESSAGE, number=1, oneof="mutation", message=SetCell,) - delete_from_column = proto.Field( proto.MESSAGE, number=2, oneof="mutation", message=DeleteFromColumn, ) - delete_from_family = proto.Field( proto.MESSAGE, number=3, oneof="mutation", message=DeleteFromFamily, ) - delete_from_row = proto.Field( proto.MESSAGE, number=4, oneof="mutation", message=DeleteFromRow, ) @@ -716,13 +666,10 @@ class ReadModifyWriteRule(proto.Message): big-endian signed integer), or the entire request will fail. """ - family_name = proto.Field(proto.STRING, number=1) - - column_qualifier = proto.Field(proto.BYTES, number=2) - - append_value = proto.Field(proto.BYTES, number=3, oneof="rule") - - increment_amount = proto.Field(proto.INT64, number=4, oneof="rule") + family_name = proto.Field(proto.STRING, number=1,) + column_qualifier = proto.Field(proto.BYTES, number=2,) + append_value = proto.Field(proto.BYTES, number=3, oneof="rule",) + increment_amount = proto.Field(proto.INT64, number=4, oneof="rule",) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index e1ff816ee5d9..dcb87c5f64b2 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -1,6 +1,5 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import argparse import os import libcst as cst @@ -41,13 +39,12 @@ def partition( class bigtableCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), - 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), - 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), - 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), - 'sample_row_keys': ('table_name', 'app_profile_id', ), - + 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), + 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), + 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), + 'sample_row_keys': ('table_name', 'app_profile_id', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: @@ -78,7 +75,7 @@ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: value=cst.Dict([ cst.DictElement( cst.SimpleString("'{}'".format(name)), - cst.Element(value=arg.value) +cst.Element(value=arg.value) ) # Note: the args + kwargs looks silly, but keep in mind that # the control parameters had to be stripped out, and that diff --git a/packages/google-cloud-bigtable/tests/__init__.py b/packages/google-cloud-bigtable/tests/__init__.py index e69de29bb2d1..4de65971c238 100644 --- a/packages/google-cloud-bigtable/tests/__init__.py +++ b/packages/google-cloud-bigtable/tests/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/unit/__init__.py b/packages/google-cloud-bigtable/tests/unit/__init__.py index df379f1e9d88..4de65971c238 100644 --- a/packages/google-cloud-bigtable/tests/unit/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/__init__.py @@ -1,4 +1,5 @@ -# Copyright 2016 Google LLC +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,3 +12,4 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py new file mode 100644 index 000000000000..4de65971c238 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py index 42ffdf2bc43d..4de65971c238 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 618d80317201..8fd55715c7e8 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import os import mock +import packaging.version import grpc from grpc.experimental import aio @@ -24,20 +23,48 @@ import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule -from google import auth + from google.api_core import client_options -from google.api_core import exceptions +from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async -from google.auth import credentials +from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient from google.cloud.bigtable_v2.services.bigtable import BigtableClient from google.cloud.bigtable_v2.services.bigtable import transports +from google.cloud.bigtable_v2.services.bigtable.transports.base import _API_CORE_VERSION +from google.cloud.bigtable_v2.services.bigtable.transports.base import ( + _GOOGLE_AUTH_VERSION, +) from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.oauth2 import service_account +import google.auth + + +# TODO(busunkim): Once google-api-core >= 1.26.0 is required: +# - Delete all the api-core and auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +requires_api_core_lt_1_26_0 = pytest.mark.skipif( + packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"), + reason="This test requires google-api-core < 1.26.0", +) + +requires_api_core_gte_1_26_0 = pytest.mark.skipif( + packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"), + reason="This test requires google-api-core >= 1.26.0", +) def client_cert_source_callback(): @@ -81,7 +108,7 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) def test_bigtable_client_from_service_account_info(client_class): - creds = credentials.AnonymousCredentials() + creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: @@ -96,7 +123,7 @@ def test_bigtable_client_from_service_account_info(client_class): @pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) def test_bigtable_client_from_service_account_file(client_class): - creds = credentials.AnonymousCredentials() + creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: @@ -141,7 +168,7 @@ def test_bigtable_client_get_transport_class(): def test_bigtable_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(BigtableClient, "get_transport_class") as gtc: - transport = transport_class(credentials=credentials.AnonymousCredentials()) + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() @@ -415,7 +442,7 @@ def test_bigtable_client_client_options_from_dict(): def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsRequest): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -426,13 +453,11 @@ def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsReques with mock.patch.object(type(client.transport.read_rows), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = iter([bigtable.ReadRowsResponse()]) - response = client.read_rows(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() # Establish that the response is the type that we expect. @@ -448,7 +473,7 @@ def test_read_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -456,7 +481,6 @@ def test_read_rows_empty_call(): client.read_rows() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() @@ -465,7 +489,7 @@ async def test_read_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest ): client = BigtableAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -479,13 +503,11 @@ async def test_read_rows_async( call.return_value.read = mock.AsyncMock( side_effect=[bigtable.ReadRowsResponse()] ) - response = await client.read_rows(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() # Establish that the response is the type that we expect. @@ -499,17 +521,17 @@ async def test_read_rows_async_from_dict(): def test_read_rows_field_headers(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.ReadRowsRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) # Establish that the underlying gRPC stub method was called. @@ -524,11 +546,12 @@ def test_read_rows_field_headers(): @pytest.mark.asyncio async def test_read_rows_field_headers_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.ReadRowsRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -537,7 +560,6 @@ async def test_read_rows_field_headers_async(): call.return_value.read = mock.AsyncMock( side_effect=[bigtable.ReadRowsResponse()] ) - await client.read_rows(request) # Establish that the underlying gRPC stub method was called. @@ -551,13 +573,12 @@ async def test_read_rows_field_headers_async(): def test_read_rows_flattened(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = iter([bigtable.ReadRowsResponse()]) - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.read_rows( @@ -568,14 +589,12 @@ def test_read_rows_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" def test_read_rows_flattened_error(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -589,7 +608,7 @@ def test_read_rows_flattened_error(): @pytest.mark.asyncio async def test_read_rows_flattened_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: @@ -607,15 +626,13 @@ async def test_read_rows_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" @pytest.mark.asyncio async def test_read_rows_flattened_error_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -631,7 +648,7 @@ def test_sample_row_keys( transport: str = "grpc", request_type=bigtable.SampleRowKeysRequest ): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -642,13 +659,11 @@ def test_sample_row_keys( with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = iter([bigtable.SampleRowKeysResponse()]) - response = client.sample_row_keys(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() # Establish that the response is the type that we expect. @@ -664,7 +679,7 @@ def test_sample_row_keys_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -672,7 +687,6 @@ def test_sample_row_keys_empty_call(): client.sample_row_keys() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() @@ -681,7 +695,7 @@ async def test_sample_row_keys_async( transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest ): client = BigtableAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -695,13 +709,11 @@ async def test_sample_row_keys_async( call.return_value.read = mock.AsyncMock( side_effect=[bigtable.SampleRowKeysResponse()] ) - response = await client.sample_row_keys(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() # Establish that the response is the type that we expect. @@ -715,17 +727,17 @@ async def test_sample_row_keys_async_from_dict(): def test_sample_row_keys_field_headers(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.SampleRowKeysRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) # Establish that the underlying gRPC stub method was called. @@ -740,11 +752,12 @@ def test_sample_row_keys_field_headers(): @pytest.mark.asyncio async def test_sample_row_keys_field_headers_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.SampleRowKeysRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -753,7 +766,6 @@ async def test_sample_row_keys_field_headers_async(): call.return_value.read = mock.AsyncMock( side_effect=[bigtable.SampleRowKeysResponse()] ) - await client.sample_row_keys(request) # Establish that the underlying gRPC stub method was called. @@ -767,13 +779,12 @@ async def test_sample_row_keys_field_headers_async(): def test_sample_row_keys_flattened(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = iter([bigtable.SampleRowKeysResponse()]) - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.sample_row_keys( @@ -784,14 +795,12 @@ def test_sample_row_keys_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" def test_sample_row_keys_flattened_error(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -805,7 +814,7 @@ def test_sample_row_keys_flattened_error(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: @@ -823,15 +832,13 @@ async def test_sample_row_keys_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" @pytest.mark.asyncio async def test_sample_row_keys_flattened_error_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -845,7 +852,7 @@ async def test_sample_row_keys_flattened_error_async(): def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequest): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -856,17 +863,14 @@ def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequ with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = bigtable.MutateRowResponse() - response = client.mutate_row(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowResponse) @@ -878,7 +882,7 @@ def test_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -886,7 +890,6 @@ def test_mutate_row_empty_call(): client.mutate_row() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() @@ -895,7 +898,7 @@ async def test_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest ): client = BigtableAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -908,13 +911,11 @@ async def test_mutate_row_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable.MutateRowResponse() ) - response = await client.mutate_row(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() # Establish that the response is the type that we expect. @@ -927,17 +928,17 @@ async def test_mutate_row_async_from_dict(): def test_mutate_row_field_headers(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.MutateRowRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) # Establish that the underlying gRPC stub method was called. @@ -952,11 +953,12 @@ def test_mutate_row_field_headers(): @pytest.mark.asyncio async def test_mutate_row_field_headers_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.MutateRowRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -964,7 +966,6 @@ async def test_mutate_row_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable.MutateRowResponse() ) - await client.mutate_row(request) # Establish that the underlying gRPC stub method was called. @@ -978,13 +979,12 @@ async def test_mutate_row_field_headers_async(): def test_mutate_row_flattened(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = bigtable.MutateRowResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.mutate_row( @@ -1002,22 +1002,18 @@ def test_mutate_row_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].mutations == [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" def test_mutate_row_flattened_error(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1037,7 +1033,7 @@ def test_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_mutate_row_flattened_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: @@ -1064,23 +1060,19 @@ async def test_mutate_row_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].mutations == [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" @pytest.mark.asyncio async def test_mutate_row_flattened_error_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1100,7 +1092,7 @@ async def test_mutate_row_flattened_error_async(): def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRequest): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1111,13 +1103,11 @@ def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRe with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = iter([bigtable.MutateRowsResponse()]) - response = client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() # Establish that the response is the type that we expect. @@ -1133,7 +1123,7 @@ def test_mutate_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1141,7 +1131,6 @@ def test_mutate_rows_empty_call(): client.mutate_rows() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() @@ -1150,7 +1139,7 @@ async def test_mutate_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest ): client = BigtableAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1164,13 +1153,11 @@ async def test_mutate_rows_async( call.return_value.read = mock.AsyncMock( side_effect=[bigtable.MutateRowsResponse()] ) - response = await client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() # Establish that the response is the type that we expect. @@ -1184,17 +1171,17 @@ async def test_mutate_rows_async_from_dict(): def test_mutate_rows_field_headers(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.MutateRowsRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. @@ -1209,11 +1196,12 @@ def test_mutate_rows_field_headers(): @pytest.mark.asyncio async def test_mutate_rows_field_headers_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.MutateRowsRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1222,7 +1210,6 @@ async def test_mutate_rows_field_headers_async(): call.return_value.read = mock.AsyncMock( side_effect=[bigtable.MutateRowsResponse()] ) - await client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. @@ -1236,13 +1223,12 @@ async def test_mutate_rows_field_headers_async(): def test_mutate_rows_flattened(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = iter([bigtable.MutateRowsResponse()]) - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.mutate_rows( @@ -1255,18 +1241,15 @@ def test_mutate_rows_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].entries == [ bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") ] - assert args[0].app_profile_id == "app_profile_id_value" def test_mutate_rows_flattened_error(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1281,7 +1264,7 @@ def test_mutate_rows_flattened_error(): @pytest.mark.asyncio async def test_mutate_rows_flattened_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: @@ -1301,19 +1284,16 @@ async def test_mutate_rows_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].entries == [ bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") ] - assert args[0].app_profile_id == "app_profile_id_value" @pytest.mark.asyncio async def test_mutate_rows_flattened_error_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1330,7 +1310,7 @@ def test_check_and_mutate_row( transport: str = "grpc", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1343,19 +1323,15 @@ def test_check_and_mutate_row( ) as call: # Designate an appropriate return value for the call. call.return_value = bigtable.CheckAndMutateRowResponse(predicate_matched=True,) - response = client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True @@ -1367,7 +1343,7 @@ def test_check_and_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1377,7 +1353,6 @@ def test_check_and_mutate_row_empty_call(): client.check_and_mutate_row() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() @@ -1386,7 +1361,7 @@ async def test_check_and_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1401,18 +1376,15 @@ async def test_check_and_mutate_row_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable.CheckAndMutateRowResponse(predicate_matched=True,) ) - response = await client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() # Establish that the response is the type that we expect. assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True @@ -1422,11 +1394,12 @@ async def test_check_and_mutate_row_async_from_dict(): def test_check_and_mutate_row_field_headers(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.CheckAndMutateRowRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1434,7 +1407,6 @@ def test_check_and_mutate_row_field_headers(): type(client.transport.check_and_mutate_row), "__call__" ) as call: call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. @@ -1449,11 +1421,12 @@ def test_check_and_mutate_row_field_headers(): @pytest.mark.asyncio async def test_check_and_mutate_row_field_headers_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.CheckAndMutateRowRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1463,7 +1436,6 @@ async def test_check_and_mutate_row_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable.CheckAndMutateRowResponse() ) - await client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. @@ -1477,7 +1449,7 @@ async def test_check_and_mutate_row_field_headers_async(): def test_check_and_mutate_row_flattened(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1485,7 +1457,6 @@ def test_check_and_mutate_row_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = bigtable.CheckAndMutateRowResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.check_and_mutate_row( @@ -1519,11 +1490,8 @@ def test_check_and_mutate_row_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].predicate_filter == data.RowFilter( chain=data.RowFilter.Chain( filters=[ @@ -1533,24 +1501,21 @@ def test_check_and_mutate_row_flattened(): ] ) ) - assert args[0].true_mutations == [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].false_mutations == [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" def test_check_and_mutate_row_flattened_error(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1586,7 +1551,7 @@ def test_check_and_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1631,11 +1596,8 @@ async def test_check_and_mutate_row_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].predicate_filter == data.RowFilter( chain=data.RowFilter.Chain( filters=[ @@ -1645,25 +1607,22 @@ async def test_check_and_mutate_row_flattened_async(): ] ) ) - assert args[0].true_mutations == [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].false_mutations == [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_error_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1701,7 +1660,7 @@ def test_read_modify_write_row( transport: str = "grpc", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1714,17 +1673,14 @@ def test_read_modify_write_row( ) as call: # Designate an appropriate return value for the call. call.return_value = bigtable.ReadModifyWriteRowResponse() - response = client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadModifyWriteRowResponse) @@ -1736,7 +1692,7 @@ def test_read_modify_write_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1746,7 +1702,6 @@ def test_read_modify_write_row_empty_call(): client.read_modify_write_row() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() @@ -1755,7 +1710,7 @@ async def test_read_modify_write_row_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1770,13 +1725,11 @@ async def test_read_modify_write_row_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable.ReadModifyWriteRowResponse() ) - response = await client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() # Establish that the response is the type that we expect. @@ -1789,11 +1742,12 @@ async def test_read_modify_write_row_async_from_dict(): def test_read_modify_write_row_field_headers(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.ReadModifyWriteRowRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1801,7 +1755,6 @@ def test_read_modify_write_row_field_headers(): type(client.transport.read_modify_write_row), "__call__" ) as call: call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. @@ -1816,11 +1769,12 @@ def test_read_modify_write_row_field_headers(): @pytest.mark.asyncio async def test_read_modify_write_row_field_headers_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.ReadModifyWriteRowRequest() + request.table_name = "table_name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1830,7 +1784,6 @@ async def test_read_modify_write_row_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable.ReadModifyWriteRowResponse() ) - await client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. @@ -1844,7 +1797,7 @@ async def test_read_modify_write_row_field_headers_async(): def test_read_modify_write_row_flattened(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1852,7 +1805,6 @@ def test_read_modify_write_row_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = bigtable.ReadModifyWriteRowResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.read_modify_write_row( @@ -1866,20 +1818,16 @@ def test_read_modify_write_row_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].rules == [ data.ReadModifyWriteRule(family_name="family_name_value") ] - assert args[0].app_profile_id == "app_profile_id_value" def test_read_modify_write_row_flattened_error(): - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1895,7 +1843,7 @@ def test_read_modify_write_row_flattened_error(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1920,21 +1868,17 @@ async def test_read_modify_write_row_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].rules == [ data.ReadModifyWriteRule(family_name="family_name_value") ] - assert args[0].app_profile_id == "app_profile_id_value" @pytest.mark.asyncio async def test_read_modify_write_row_flattened_error_async(): - client = BigtableAsyncClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1951,16 +1895,16 @@ async def test_read_modify_write_row_flattened_error_async(): def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.BigtableGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.BigtableGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableClient( @@ -1970,7 +1914,7 @@ def test_credentials_transport_error(): # It is an error to provide scopes and a transport instance. transport = transports.BigtableGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableClient( @@ -1981,7 +1925,7 @@ def test_credentials_transport_error(): def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) client = BigtableClient(transport=transport) assert client.transport is transport @@ -1990,13 +1934,13 @@ def test_transport_instance(): def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.BigtableGrpcAsyncIOTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @@ -2008,23 +1952,23 @@ def test_transport_get_channel(): ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. - client = BigtableClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) assert isinstance(client.transport, transports.BigtableGrpcTransport,) def test_bigtable_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(exceptions.DuplicateCredentialArgs): + with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.BigtableTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) @@ -2036,7 +1980,7 @@ def test_bigtable_base_transport(): ) as Transport: Transport.return_value = None transport = transports.BigtableTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly @@ -2054,15 +1998,44 @@ def test_bigtable_base_transport(): getattr(transport, method)(request=object()) +@requires_google_auth_gte_1_25_0 def test_bigtable_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( - auth, "load_credentials_from_file" + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_bigtable_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - load_creds.return_value = (credentials.AnonymousCredentials(), None) + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableTransport( credentials_file="credentials.json", quota_project_id="octopus", ) @@ -2082,19 +2055,40 @@ def test_bigtable_base_transport_with_credentials_file(): def test_bigtable_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(auth, "default") as adc, mock.patch( + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - adc.return_value = (credentials.AnonymousCredentials(), None) + adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableTransport() adc.assert_called_once() +@requires_google_auth_gte_1_25_0 def test_bigtable_auth_adc(): # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BigtableClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_bigtable_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) BigtableClient() adc.assert_called_once_with( scopes=( @@ -2109,14 +2103,42 @@ def test_bigtable_auth_adc(): ) -def test_bigtable_transport_auth_adc(): +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], +) +@requires_google_auth_gte_1_25_0 +def test_bigtable_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) - transports.BigtableGrpcTransport( - host="squid.clam.whelk", quota_project_id="octopus" + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", ) + + +@pytest.mark.parametrize( + "transport_class", + [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], +) +@requires_google_auth_lt_1_25_0 +def test_bigtable_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") adc.assert_called_once_with( scopes=( "https://www.googleapis.com/auth/bigtable.data", @@ -2130,12 +2152,131 @@ def test_bigtable_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableGrpcTransport, grpc_helpers), + (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_gte_1_26_0 +def test_bigtable_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "bigtable.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + scopes=["1", "2"], + default_host="bigtable.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableGrpcTransport, grpc_helpers), + (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_lt_1_26_0 +def test_bigtable_transport_create_channel_old_api_core(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus") + + create_channel.assert_called_with( + "bigtable.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableGrpcTransport, grpc_helpers), + (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_lt_1_26_0 +def test_bigtable_transport_create_channel_user_scopes(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "bigtable.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + scopes=["1", "2"], + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + @pytest.mark.parametrize( "transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], ) def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): - cred = credentials.AnonymousCredentials() + cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: @@ -2181,7 +2322,7 @@ def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): def test_bigtable_host_no_port(): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtable.googleapis.com" ), @@ -2191,7 +2332,7 @@ def test_bigtable_host_no_port(): def test_bigtable_host_with_port(): client = BigtableClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtable.googleapis.com:8000" ), @@ -2242,9 +2383,9 @@ def test_bigtable_transport_channel_mtls_with_client_cert_source(transport_class mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel - cred = credentials.AnonymousCredentials() + cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): - with mock.patch.object(auth, "default") as adc: + with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", @@ -2333,7 +2474,6 @@ def test_table_path(): project = "squid" instance = "clam" table = "whelk" - expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, table=table, ) @@ -2356,7 +2496,6 @@ def test_parse_table_path(): def test_common_billing_account_path(): billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -2377,7 +2516,6 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): folder = "winkle" - expected = "folders/{folder}".format(folder=folder,) actual = BigtableClient.common_folder_path(folder) assert expected == actual @@ -2396,7 +2534,6 @@ def test_parse_common_folder_path(): def test_common_organization_path(): organization = "scallop" - expected = "organizations/{organization}".format(organization=organization,) actual = BigtableClient.common_organization_path(organization) assert expected == actual @@ -2415,7 +2552,6 @@ def test_parse_common_organization_path(): def test_common_project_path(): project = "squid" - expected = "projects/{project}".format(project=project,) actual = BigtableClient.common_project_path(project) assert expected == actual @@ -2435,7 +2571,6 @@ def test_parse_common_project_path(): def test_common_location_path(): project = "whelk" location = "octopus" - expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -2462,7 +2597,7 @@ def test_client_withDEFAULT_CLIENT_INFO(): transports.BigtableTransport, "_prep_wrapped_messages" ) as prep: client = BigtableClient( - credentials=credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -2471,6 +2606,6 @@ def test_client_withDEFAULT_CLIENT_INFO(): ) as prep: transport_class = BigtableClient.get_transport_class() transport = transport_class( - credentials=credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) From 1d329248dd8a6adef0fd213702335d69db45cc71 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sun, 16 May 2021 11:16:02 +0000 Subject: [PATCH 448/892] chore: new owl bot post processor docker image (#315) gcr.io/repo-automation-bots/owlbot-python:latest@sha256:4c981a6b6f2b8914a448d7b3a01688365be03e3ed26dfee399a6aa77fb112eaa --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 5 ++--- packages/google-cloud-bigtable/.pre-commit-config.yaml | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index d49860b32e70..864c17653f80 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,4 +1,3 @@ docker: - digest: sha256:457583330eec64daa02aeb7a72a04d33e7be2428f646671ce4045dcbc0191b1e - image: gcr.io/repo-automation-bots/owlbot-python:latest - + image: gcr.io/repo-automation-bots/owlbot-python:latest + digest: sha256:4c981a6b6f2b8914a448d7b3a01688365be03e3ed26dfee399a6aa77fb112eaa diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 1bbd787833ec..4f00c7cffcfd 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -26,6 +26,6 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.9.1 + rev: 3.9.2 hooks: - id: flake8 From 831f937327765d74670d47e0269ee488ebe33520 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 24 May 2021 16:34:03 +0000 Subject: [PATCH 449/892] docs: fix broken links in multiprocessing.rst (#317) gcr.io/repo-automation-bots/owlbot-python:latest@sha256:0856ca711da1fd5ec9d6d7da6c50aa0bbf550fb94acb47b55159a640791987bf --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/docs/multiprocessing.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 864c17653f80..127c2cdf9503 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:4c981a6b6f2b8914a448d7b3a01688365be03e3ed26dfee399a6aa77fb112eaa + digest: sha256:0856ca711da1fd5ec9d6d7da6c50aa0bbf550fb94acb47b55159a640791987bf diff --git a/packages/google-cloud-bigtable/docs/multiprocessing.rst b/packages/google-cloud-bigtable/docs/multiprocessing.rst index 1cb29d4ca967..536d17b2ea65 100644 --- a/packages/google-cloud-bigtable/docs/multiprocessing.rst +++ b/packages/google-cloud-bigtable/docs/multiprocessing.rst @@ -1,7 +1,7 @@ .. note:: - Because this client uses :mod:`grpcio` library, it is safe to + Because this client uses :mod:`grpc` library, it is safe to share instances across threads. In multiprocessing scenarios, the best practice is to create client instances *after* the invocation of - :func:`os.fork` by :class:`multiprocessing.Pool` or + :func:`os.fork` by :class:`multiprocessing.pool.Pool` or :class:`multiprocessing.Process`. From 6a12cb7e8b8b5561b067f54af64e9d5a1b6fd68b Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 25 May 2021 09:38:02 -0400 Subject: [PATCH 450/892] chore: delete unused protos (#316) --- .../proto/bigtable_instance_admin.proto | 574 ---------- .../proto/bigtable_table_admin.proto | 1003 ----------------- .../bigtable_admin_v2/proto/common.proto | 54 - .../bigtable_admin_v2/proto/instance.proto | 245 ---- .../cloud/bigtable_admin_v2/proto/table.proto | 405 ------- .../cloud/bigtable_v2/proto/bigtable.proto | 427 ------- .../google/cloud/bigtable_v2/proto/data.proto | 536 --------- 7 files changed, 3244 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto deleted file mode 100644 index ca3aaed7a1ef..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/bigtable/admin/v2/instance.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableInstanceAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; -option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; - -// Service for creating, configuring, and deleting Cloud Bigtable Instances and -// Clusters. Provides access to the Instance and Cluster schemas only, not the -// tables' metadata or data stored in those tables. -service BigtableInstanceAdmin { - option (google.api.default_host) = "bigtableadmin.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigtable.admin," - "https://www.googleapis.com/auth/bigtable.admin.cluster," - "https://www.googleapis.com/auth/bigtable.admin.instance," - "https://www.googleapis.com/auth/cloud-bigtable.admin," - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster," - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/cloud-platform.read-only"; - - // Create an instance within a project. - rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*}/instances" - body: "*" - }; - option (google.api.method_signature) = "parent,instance_id,instance,clusters"; - option (google.longrunning.operation_info) = { - response_type: "Instance" - metadata_type: "CreateInstanceMetadata" - }; - } - - // Gets information about an instance. - rpc GetInstance(GetInstanceRequest) returns (Instance) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists information about instances in a project. - rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*}/instances" - }; - option (google.api.method_signature) = "parent"; - } - - // Updates an instance within a project. This method updates only the display - // name and type for an Instance. To update other Instance properties, such as - // labels, use PartialUpdateInstance. - rpc UpdateInstance(Instance) returns (Instance) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*}" - body: "*" - }; - } - - // Partially updates an instance within a project. This method can modify all - // fields of an Instance and is the preferred way to update an Instance. - rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{instance.name=projects/*/instances/*}" - body: "instance" - }; - option (google.api.method_signature) = "instance,update_mask"; - option (google.longrunning.operation_info) = { - response_type: "Instance" - metadata_type: "UpdateInstanceMetadata" - }; - } - - // Delete an instance from a project. - rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Creates a cluster within an instance. - rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/clusters" - body: "cluster" - }; - option (google.api.method_signature) = "parent,cluster_id,cluster"; - option (google.longrunning.operation_info) = { - response_type: "Cluster" - metadata_type: "CreateClusterMetadata" - }; - } - - // Gets information about a cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists information about clusters in an instance. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/clusters" - }; - option (google.api.method_signature) = "parent"; - } - - // Updates a cluster within an instance. - rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) { - option (google.api.http) = { - put: "/v2/{name=projects/*/instances/*/clusters/*}" - body: "*" - }; - option (google.longrunning.operation_info) = { - response_type: "Cluster" - metadata_type: "UpdateClusterMetadata" - }; - } - - // Deletes a cluster from an instance. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Creates an app profile within an instance. - rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/appProfiles" - body: "app_profile" - }; - option (google.api.method_signature) = "parent,app_profile_id,app_profile"; - } - - // Gets information about an app profile. - rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists information about app profiles in an instance. - rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/appProfiles" - }; - option (google.api.method_signature) = "parent"; - } - - // Updates an app profile within an instance. - rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - body: "app_profile" - }; - option (google.api.method_signature) = "app_profile,update_mask"; - option (google.longrunning.operation_info) = { - response_type: "AppProfile" - metadata_type: "UpdateAppProfileMetadata" - }; - } - - // Deletes an app profile from an instance. - rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/appProfiles/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Gets the access control policy for an instance resource. Returns an empty - // policy if an instance exists but does not have a policy set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:getIamPolicy" - body: "*" - }; - option (google.api.method_signature) = "resource"; - } - - // Sets the access control policy on an instance resource. Replaces any - // existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:setIamPolicy" - body: "*" - }; - option (google.api.method_signature) = "resource,policy"; - } - - // Returns permissions that the caller has on the specified instance resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*}:testIamPermissions" - body: "*" - }; - option (google.api.method_signature) = "resource,permissions"; - } -} - -// Request message for BigtableInstanceAdmin.CreateInstance. -message CreateInstanceRequest { - // Required. The unique name of the project in which to create the new instance. - // Values are of the form `projects/{project}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Required. The ID to be used when referring to the new instance within its project, - // e.g., just `myinstance` rather than - // `projects/myproject/instances/myinstance`. - string instance_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The instance to create. - // Fields marked `OutputOnly` must be left blank. - Instance instance = 3 [(google.api.field_behavior) = REQUIRED]; - - // Required. The clusters to be created within the instance, mapped by desired - // cluster ID, e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - // Fields marked `OutputOnly` must be left blank. - // Currently, at most four clusters can be specified. - map clusters = 4 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for BigtableInstanceAdmin.GetInstance. -message GetInstanceRequest { - // Required. The unique name of the requested instance. Values are of the form - // `projects/{project}/instances/{instance}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; -} - -// Request message for BigtableInstanceAdmin.ListInstances. -message ListInstancesRequest { - // Required. The unique name of the project for which a list of instances is requested. - // Values are of the form `projects/{project}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListInstances. -message ListInstancesResponse { - // The list of requested instances. - repeated Instance instances = 1; - - // Locations from which Instance information could not be retrieved, - // due to an outage or some other transient condition. - // Instances whose Clusters are all in one of the failed locations - // may be missing from `instances`, and Instances with at least one - // Cluster in a failed location may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.PartialUpdateInstance. -message PartialUpdateInstanceRequest { - // Required. The Instance which will (partially) replace the current value. - Instance instance = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The subset of Instance fields which should be replaced. - // Must be explicitly set. - google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for BigtableInstanceAdmin.DeleteInstance. -message DeleteInstanceRequest { - // Required. The unique name of the instance to be deleted. - // Values are of the form `projects/{project}/instances/{instance}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; -} - -// Request message for BigtableInstanceAdmin.CreateCluster. -message CreateClusterRequest { - // Required. The unique name of the instance in which to create the new cluster. - // Values are of the form - // `projects/{project}/instances/{instance}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // Required. The ID to be used when referring to the new cluster within its instance, - // e.g., just `mycluster` rather than - // `projects/myproject/instances/myinstance/clusters/mycluster`. - string cluster_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The cluster to be created. - // Fields marked `OutputOnly` must be left blank. - Cluster cluster = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for BigtableInstanceAdmin.GetCluster. -message GetClusterRequest { - // Required. The unique name of the requested cluster. Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Cluster" - } - ]; -} - -// Request message for BigtableInstanceAdmin.ListClusters. -message ListClustersRequest { - // Required. The unique name of the instance for which a list of clusters is requested. - // Values are of the form `projects/{project}/instances/{instance}`. - // Use `{instance} = '-'` to list Clusters for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // DEPRECATED: This field is unused and ignored. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListClusters. -message ListClustersResponse { - // The list of requested clusters. - repeated Cluster clusters = 1; - - // Locations from which Cluster information could not be retrieved, - // due to an outage or some other transient condition. - // Clusters from these locations may be missing from `clusters`, - // or may only have partial information returned. - // Values are of the form `projects//locations/` - repeated string failed_locations = 2; - - // DEPRECATED: This field is unused and ignored. - string next_page_token = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteCluster. -message DeleteClusterRequest { - // Required. The unique name of the cluster to be deleted. Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Cluster" - } - ]; -} - -// The metadata for the Operation returned by CreateInstance. -message CreateInstanceMetadata { - // The request that prompted the initiation of this CreateInstance operation. - CreateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateInstance. -message UpdateInstanceMetadata { - // The request that prompted the initiation of this UpdateInstance operation. - PartialUpdateInstanceRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateCluster. -message CreateClusterMetadata { - // The request that prompted the initiation of this CreateCluster operation. - CreateClusterRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by UpdateCluster. -message UpdateClusterMetadata { - // The request that prompted the initiation of this UpdateCluster operation. - Cluster original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Request message for BigtableInstanceAdmin.CreateAppProfile. -message CreateAppProfileRequest { - // Required. The unique name of the instance in which to create the new app profile. - // Values are of the form - // `projects/{project}/instances/{instance}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // Required. The ID to be used when referring to the new app profile within its - // instance, e.g., just `myprofile` rather than - // `projects/myproject/instances/myinstance/appProfiles/myprofile`. - string app_profile_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The app profile to be created. - // Fields marked `OutputOnly` will be ignored. - AppProfile app_profile = 3 [(google.api.field_behavior) = REQUIRED]; - - // If true, ignore safety checks when creating the app profile. - bool ignore_warnings = 4; -} - -// Request message for BigtableInstanceAdmin.GetAppProfile. -message GetAppProfileRequest { - // Required. The unique name of the requested app profile. Values are of the form - // `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/AppProfile" - } - ]; -} - -// Request message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesRequest { - // Required. The unique name of the instance for which a list of app profiles is - // requested. Values are of the form - // `projects/{project}/instances/{instance}`. - // Use `{instance} = '-'` to list AppProfiles for all Instances in a project, - // e.g., `projects/myproject/instances/-`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // Maximum number of results per page. - // - // A page_size of zero lets the server choose the number of items to return. - // A page_size which is strictly positive will return at most that many items. - // A negative page_size will cause an error. - // - // Following the first request, subsequent paginated calls are not required - // to pass a page_size. If a page_size is set in subsequent calls, it must - // match the page_size given in the first request. - int32 page_size = 3; - - // The value of `next_page_token` returned by a previous call. - string page_token = 2; -} - -// Response message for BigtableInstanceAdmin.ListAppProfiles. -message ListAppProfilesResponse { - // The list of requested app profiles. - repeated AppProfile app_profiles = 1; - - // Set if not all app profiles could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; - - // Locations from which AppProfile information could not be retrieved, - // due to an outage or some other transient condition. - // AppProfiles from these locations may be missing from `app_profiles`. - // Values are of the form `projects//locations/` - repeated string failed_locations = 3; -} - -// Request message for BigtableInstanceAdmin.UpdateAppProfile. -message UpdateAppProfileRequest { - // Required. The app profile which will (partially) replace the current value. - AppProfile app_profile = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The subset of app profile fields which should be replaced. - // If unset, all fields will be replaced. - google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; - - // If true, ignore safety checks when updating the app profile. - bool ignore_warnings = 3; -} - -// Request message for BigtableInstanceAdmin.DeleteAppProfile. -message DeleteAppProfileRequest { - // Required. The unique name of the app profile to be deleted. Values are of the form - // `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/AppProfile" - } - ]; - - // Required. If true, ignore safety checks when deleting the app profile. - bool ignore_warnings = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The metadata for the Operation returned by UpdateAppProfile. -message UpdateAppProfileMetadata { - -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto deleted file mode 100644 index d979dba597e6..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +++ /dev/null @@ -1,1003 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/bigtable/admin/v2/common.proto"; -import "google/bigtable/admin/v2/table.proto"; -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "BigtableTableAdminProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; -option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// -// -// Provides access to the table schemas only, not the data stored within -// the tables. -service BigtableTableAdmin { - option (google.api.default_host) = "bigtableadmin.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigtable.admin," - "https://www.googleapis.com/auth/bigtable.admin.table," - "https://www.googleapis.com/auth/cloud-bigtable.admin," - "https://www.googleapis.com/auth/cloud-bigtable.admin.table," - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/cloud-platform.read-only"; - - // Creates a new table in the specified instance. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables" - body: "*" - }; - option (google.api.method_signature) = "parent,table_id,table"; - } - - // Creates a new table from the specified snapshot. The target table must - // not exist. The snapshot and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - body: "*" - }; - option (google.api.method_signature) = "parent,table_id,source_snapshot"; - option (google.longrunning.operation_info) = { - response_type: "Table" - metadata_type: "CreateTableFromSnapshotMetadata" - }; - } - - // Lists all tables served from a specified instance. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*}/tables" - }; - option (google.api.method_signature) = "parent"; - } - - // Gets metadata information about the specified table. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/tables/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/tables/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Performs a series of column family modifications on the specified table. - // Either all or none of the modifications will occur before this method - // returns, but data requests received prior to that point may see a table - // where only some modifications have taken effect. - rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - body: "*" - }; - option (google.api.method_signature) = "name,modifications"; - } - - // Permanently drop/delete a row range from a specified table. The request can - // specify whether to delete all rows in a table, or only those that match a - // particular prefix. - rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange" - body: "*" - }; - } - - // Generates a consistency token for a Table, which can be used in - // CheckConsistency to check whether mutations to the table that finished - // before this call started have been replicated. The tokens will be available - // for 90 days. - rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - body: "*" - }; - option (google.api.method_signature) = "name"; - } - - // Checks replication consistency based on a consistency token, that is, if - // replication has caught up based on the conditions specified in the token - // and the check request. - rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" - body: "*" - }; - option (google.api.method_signature) = "name,consistency_token"; - } - - // Creates a new snapshot in the specified cluster from the specified - // source table. The cluster and the table must be in the same instance. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot" - body: "*" - }; - option (google.api.method_signature) = "name,cluster,snapshot_id,description"; - option (google.longrunning.operation_info) = { - response_type: "Snapshot" - metadata_type: "SnapshotTableMetadata" - }; - } - - // Gets metadata information about the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists all snapshots associated with the specified cluster. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - }; - option (google.api.method_signature) = "parent"; - } - - // Permanently deletes the specified snapshot. - // - // Note: This is a private alpha release of Cloud Bigtable snapshots. This - // feature is not currently available to most Cloud Bigtable customers. This - // feature might be changed in backward-incompatible ways and is not - // recommended for production use. It is not subject to any SLA or deprecation - // policy. - rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Starts creating a new Cloud Bigtable Backup. The returned backup - // [long-running operation][google.longrunning.Operation] can be used to - // track creation of the backup. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The - // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the - // creation and delete the backup. - rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" - body: "backup" - }; - option (google.api.method_signature) = "parent,backup_id,backup"; - option (google.longrunning.operation_info) = { - response_type: "Backup" - metadata_type: "CreateBackupMetadata" - }; - } - - // Gets metadata on a pending or completed Cloud Bigtable Backup. - rpc GetBackup(GetBackupRequest) returns (Backup) { - option (google.api.http) = { - get: "/v2/{name=projects/*/instances/*/clusters/*/backups/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Updates a pending or completed Cloud Bigtable Backup. - rpc UpdateBackup(UpdateBackupRequest) returns (Backup) { - option (google.api.http) = { - patch: "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" - body: "backup" - }; - option (google.api.method_signature) = "backup,update_mask"; - } - - // Deletes a pending or completed Cloud Bigtable backup. - rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v2/{name=projects/*/instances/*/clusters/*/backups/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists Cloud Bigtable backups. Returns both completed and pending - // backups. - rpc ListBackups(ListBackupsRequest) returns (ListBackupsResponse) { - option (google.api.http) = { - get: "/v2/{parent=projects/*/instances/*/clusters/*}/backups" - }; - option (google.api.method_signature) = "parent"; - } - - // Create a new table by restoring from a completed backup. The new table - // must be in the same instance as the instance containing the backup. The - // returned table [long-running operation][google.longrunning.Operation] can - // be used to track the progress of the operation, and to cancel it. The - // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The - // [response][google.longrunning.Operation.response] type is - // [Table][google.bigtable.admin.v2.Table], if successful. - rpc RestoreTable(RestoreTableRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { - post: "/v2/{parent=projects/*/instances/*}/tables:restore" - body: "*" - }; - option (google.longrunning.operation_info) = { - response_type: "Table" - metadata_type: "RestoreTableMetadata" - }; - } - - // Gets the access control policy for a Table or Backup resource. - // Returns an empty policy if the resource exists but does not have a policy - // set. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" - body: "*" - additional_bindings { - post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy" - body: "*" - } - }; - option (google.api.method_signature) = "resource"; - } - - // Sets the access control policy on a Table or Backup resource. - // Replaces any existing policy. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" - body: "*" - additional_bindings { - post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy" - body: "*" - } - }; - option (google.api.method_signature) = "resource,policy"; - } - - // Returns permissions that the caller has on the specified Table or Backup resource. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - option (google.api.http) = { - post: "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" - body: "*" - additional_bindings { - post: "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions" - body: "*" - } - }; - option (google.api.method_signature) = "resource,permissions"; - } -} - -// The request for -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableRequest { - // Required. The name of the instance in which to create the restored - // table. This instance must be the parent of the source backup. Values are - // of the form `projects//instances/`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // Required. The id of the table to create and restore to. This - // table must not already exist. The `table_id` appended to - // `parent` forms the full table name of the form - // `projects//instances//tables/`. - string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The source from which to restore. - oneof source { - // Name of the backup from which to restore. Values are of the form - // `projects//instances//clusters//backups/`. - string backup = 3 [(google.api.resource_reference) = { - type: "bigtable.googleapis.com/Backup" - }]; - } -} - -// Metadata type for the long-running operation returned by -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -message RestoreTableMetadata { - // Name of the table being created and restored to. - string name = 1; - - // The type of the restore source. - RestoreSourceType source_type = 2; - - // Information about the source used to restore the table, as specified by - // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. - oneof source_info { - BackupInfo backup_info = 3; - } - - // If exists, the name of the long-running operation that will be used to - // track the post-restore optimization process to optimize the performance of - // the restored table. The metadata type of the long-running operation is - // [OptimizeRestoreTableMetadata][]. The response type is - // [Empty][google.protobuf.Empty]. This long-running operation may be - // automatically created by the system if applicable after the - // RestoreTable long-running operation completes successfully. This operation - // may not be created if the table is already optimized or the restore was - // not successful. - string optimize_table_operation_name = 4; - - // The progress of the [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - // operation. - OperationProgress progress = 5; -} - -// Metadata type for the long-running operation used to track the progress -// of optimizations performed on a newly restored table. This long-running -// operation is automatically created by the system after the successful -// completion of a table restore, and cannot be cancelled. -message OptimizeRestoredTableMetadata { - // Name of the restored table being optimized. - string name = 1; - - // The progress of the post-restore optimizations. - OperationProgress progress = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] -message CreateTableRequest { - // An initial split point for a newly created table. - message Split { - // Row key to use as an initial tablet boundary. - bytes key = 1; - } - - // Required. The unique name of the instance in which to create the table. - // Values are of the form `projects/{project}/instances/{instance}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // Required. The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. - // Maximum 50 characters. - string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The Table to create. - Table table = 3 [(google.api.field_behavior) = REQUIRED]; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (tablets are similar to HBase regions). - // Given two split keys, `s1` and `s2`, three tablets will be created, - // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. - // - // Example: - // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` - repeated Split initial_splits = 4; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotRequest { - // Required. The unique name of the instance in which to create the table. - // Values are of the form `projects/{project}/instances/{instance}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // Required. The name by which the new table should be referred to within the parent - // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. - string table_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The unique name of the snapshot from which to restore the table. The - // snapshot and the table must be in the same instance. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. - string source_snapshot = 3 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Snapshot" - } - ]; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] -message DropRowRangeRequest { - // Required. The unique name of the table on which to drop a range of rows. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // Delete all rows or by prefix. - oneof target { - // Delete all rows that start with this row key prefix. Prefix cannot be - // zero length. - bytes row_key_prefix = 2; - - // Delete all rows in the table. Setting this to false is a no-op. - bool delete_all_data_from_table = 3; - } -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesRequest { - // Required. The unique name of the instance for which tables should be listed. - // Values are of the form `projects/{project}/instances/{instance}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Instance" - } - ]; - - // The view to be applied to the returned tables' fields. - // Only NAME_ONLY view (default) and REPLICATION_VIEW are supported. - Table.View view = 2; - - // Maximum number of results per page. - // - // A page_size of zero lets the server choose the number of items to return. - // A page_size which is strictly positive will return at most that many items. - // A negative page_size will cause an error. - // - // Following the first request, subsequent paginated calls are not required - // to pass a page_size. If a page_size is set in subsequent calls, it must - // match the page_size given in the first request. - int32 page_size = 4; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] -message ListTablesResponse { - // The tables present in the requested instance. - repeated Table tables = 1; - - // Set if not all tables could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] -message GetTableRequest { - // Required. The unique name of the requested table. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // The view to be applied to the returned table's fields. - // Defaults to `SCHEMA_VIEW` if unspecified. - Table.View view = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] -message DeleteTableRequest { - // Required. The unique name of the table to be deleted. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] -message ModifyColumnFamiliesRequest { - // A create, update, or delete of a particular column family. - message Modification { - // The ID of the column family to be modified. - string id = 1; - - // Column familiy modifications. - oneof mod { - // Create a new column family with the specified schema, or fail if - // one already exists with the given ID. - ColumnFamily create = 2; - - // Update an existing column family to the specified schema, or fail - // if no column family exists with the given ID. - ColumnFamily update = 3; - - // Drop (delete) the column family with the given ID, or fail if no such - // family exists. - bool drop = 4; - } - } - - // Required. The unique name of the table whose families should be modified. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // Required. Modifications to be atomically applied to the specified table's families. - // Entries are applied in order, meaning that earlier modifications can be - // masked by later ones (in the case of repeated updates to the same family, - // for example). - repeated Modification modifications = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenRequest { - // Required. The unique name of the Table for which to create a consistency token. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] -message GenerateConsistencyTokenResponse { - // The generated consistency token. - string consistency_token = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyRequest { - // Required. The unique name of the Table for which to check replication consistency. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // Required. The token created using GenerateConsistencyToken for the Table. - string consistency_token = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] -message CheckConsistencyResponse { - // True only if the token is consistent. A token is consistent if replication - // has caught up with the restrictions specified in the request. - bool consistent = 1; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableRequest { - // Required. The unique name of the table to have the snapshot taken. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // Required. The name of the cluster where the snapshot will be created in. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}`. - string cluster = 2 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Cluster" - } - ]; - - // Required. The ID by which the new snapshot should be referred to within the parent - // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // rather than - // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. - string snapshot_id = 3 [(google.api.field_behavior) = REQUIRED]; - - // The amount of time that the new snapshot can stay active after it is - // created. Once 'ttl' expires, the snapshot will get deleted. The maximum - // amount of time a snapshot can stay active is 7 days. If 'ttl' is not - // specified, the default value of 24 hours will be used. - google.protobuf.Duration ttl = 4; - - // Description of the snapshot. - string description = 5; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message GetSnapshotRequest { - // Required. The unique name of the requested snapshot. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Snapshot" - } - ]; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsRequest { - // Required. The unique name of the cluster for which snapshots should be listed. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}`. - // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, - // e.g., `projects/{project}/instances/{instance}/clusters/-`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Cluster" - } - ]; - - // The maximum number of snapshots to return per page. - // CURRENTLY UNIMPLEMENTED AND IGNORED. - int32 page_size = 2; - - // The value of `next_page_token` returned by a previous call. - string page_token = 3; -} - -// Response message for -// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message ListSnapshotsResponse { - // The snapshots present in the requested cluster. - repeated Snapshot snapshots = 1; - - // Set if not all snapshots could be returned in a single response. - // Pass this value to `page_token` in another request to get the next - // page of results. - string next_page_token = 2; -} - -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message DeleteSnapshotRequest { - // Required. The unique name of the snapshot to be deleted. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Snapshot" - } - ]; -} - -// The metadata for the Operation returned by SnapshotTable. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message SnapshotTableMetadata { - // The request that prompted the initiation of this SnapshotTable operation. - SnapshotTableRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The metadata for the Operation returned by CreateTableFromSnapshot. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message CreateTableFromSnapshotMetadata { - // The request that prompted the initiation of this CreateTableFromSnapshot - // operation. - CreateTableFromSnapshotRequest original_request = 1; - - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which the operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. -message CreateBackupRequest { - // Required. This must be one of the clusters in the instance in which this - // table is located. The backup will be stored in this cluster. Values are - // of the form `projects/{project}/instances/{instance}/clusters/{cluster}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Cluster" - } - ]; - - // Required. The id of the backup to be created. The `backup_id` along with - // the parent `parent` are combined as {parent}/backups/{backup_id} to create - // the full backup name, of the form: - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}`. - // This string must be between 1 and 50 characters in length and match the - // regex [_a-zA-Z0-9][-_.a-zA-Z0-9]*. - string backup_id = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. The backup to create. - Backup backup = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Metadata type for the operation returned by -// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. -message CreateBackupMetadata { - // The name of the backup being created. - string name = 1; - - // The name of the table the backup is created from. - string source_table = 2; - - // The time at which this operation started. - google.protobuf.Timestamp start_time = 3; - - // If set, the time at which this operation finished or was cancelled. - google.protobuf.Timestamp end_time = 4; -} - -// The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. -message UpdateBackupRequest { - // Required. The backup to update. `backup.name`, and the fields to be updated - // as specified by `update_mask` are required. Other fields are ignored. - // Update is only supported for the following fields: - // * `backup.expire_time`. - Backup backup = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. A mask specifying which fields (e.g. `expire_time`) in the - // Backup resource should be updated. This mask is relative to the Backup - // resource, not to the request message. The field mask must always be - // specified; this prevents any future fields from being erased accidentally - // by clients that do not know about them. - google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. -message GetBackupRequest { - // Required. Name of the backup. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Backup" - } - ]; -} - -// The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. -message DeleteBackupRequest { - // Required. Name of the backup to delete. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Backup" - } - ]; -} - -// The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. -message ListBackupsRequest { - // Required. The cluster to list backups from. Values are of the - // form `projects/{project}/instances/{instance}/clusters/{cluster}`. - // Use `{cluster} = '-'` to list backups for all clusters in an instance, - // e.g., `projects/{project}/instances/{instance}/clusters/-`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Cluster" - } - ]; - - // A filter expression that filters backups listed in the response. - // The expression must specify the field name, a comparison operator, - // and the value that you want to use for filtering. The value must be a - // string, a number, or a boolean. The comparison operator must be - // <, >, <=, >=, !=, =, or :. Colon ':' represents a HAS operator which is - // roughly synonymous with equality. Filter rules are case insensitive. - // - // The fields eligible for filtering are: - // * `name` - // * `source_table` - // * `state` - // * `start_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `end_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) - // * `size_bytes` - // - // To filter on multiple expressions, provide each separate expression within - // parentheses. By default, each expression is an AND expression. However, - // you can include AND, OR, and NOT expressions explicitly. - // - // Some examples of using filters are: - // - // * `name:"exact"` --> The backup's name is the string "exact". - // * `name:howl` --> The backup's name contains the string "howl". - // * `source_table:prod` - // --> The source_table's name contains the string "prod". - // * `state:CREATING` --> The backup is pending creation. - // * `state:READY` --> The backup is fully created and ready for use. - // * `(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")` - // --> The backup name contains the string "howl" and start_time - // of the backup is before 2018-03-28T14:50:00Z. - // * `size_bytes > 10000000000` --> The backup's size is greater than 10GB - string filter = 2; - - // An expression for specifying the sort order of the results of the request. - // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full - // syntax is described at https://aip.dev/132#ordering. - // - // Fields supported are: - // * name - // * source_table - // * expire_time - // * start_time - // * end_time - // * size_bytes - // * state - // - // For example, "start_time". The default sorting order is ascending. - // To specify descending order for the field, a suffix " desc" should - // be appended to the field name. For example, "start_time desc". - // Redundant space characters in the syntax are insigificant. - // - // If order_by is empty, results will be sorted by `start_time` in descending - // order starting from the most recently created backup. - string order_by = 3; - - // Number of backups to be returned in the response. If 0 or - // less, defaults to the server's maximum allowed page size. - int32 page_size = 4; - - // If non-empty, `page_token` should contain a - // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] from a - // previous [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the same `parent` and with the same - // `filter`. - string page_token = 5; -} - -// The response for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. -message ListBackupsResponse { - // The list of matching backups. - repeated Backup backups = 1; - - // `next_page_token` can be sent in a subsequent - // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call to fetch more - // of the matching backups. - string next_page_token = 2; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto deleted file mode 100644 index 17c69d469a0c..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/common.proto +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/protobuf/timestamp.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "CommonProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; -option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; - -// Storage media types for persisting Bigtable data. -enum StorageType { - // The user did not specify a storage type. - STORAGE_TYPE_UNSPECIFIED = 0; - - // Flash (SSD) storage should be used. - SSD = 1; - - // Magnetic drive (HDD) storage should be used. - HDD = 2; -} - -// Encapsulates progress related information for a Cloud Bigtable long -// running operation. -message OperationProgress { - // Percent completion of the operation. - // Values are between 0 and 100 inclusive. - int32 progress_percent = 1; - - // Time the request was received. - google.protobuf.Timestamp start_time = 2; - - // If set, the time at which this operation failed or was completed - // successfully. - google.protobuf.Timestamp end_time = 3; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto deleted file mode 100644 index d590788b2d6c..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/instance.proto +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/bigtable/admin/v2/common.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "InstanceProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; -option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; -option (google.api.resource_definition) = { - type: "cloudkms.googleapis.com/CryptoKey" - pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}" -}; - -// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and -// the resources that serve them. -// All tables in an instance are served from all -// [Clusters][google.bigtable.admin.v2.Cluster] in the instance. -message Instance { - option (google.api.resource) = { - type: "bigtable.googleapis.com/Instance" - pattern: "projects/{project}/instances/{instance}" - }; - - // Possible states of an instance. - enum State { - // The state of the instance could not be determined. - STATE_NOT_KNOWN = 0; - - // The instance has been successfully created and can serve requests - // to its tables. - READY = 1; - - // The instance is currently being created, and may be destroyed - // if the creation process encounters an error. - CREATING = 2; - } - - // The type of the instance. - enum Type { - // The type of the instance is unspecified. If set when creating an - // instance, a `PRODUCTION` instance will be created. If set when updating - // an instance, the type will be left unchanged. - TYPE_UNSPECIFIED = 0; - - // An instance meant for production use. `serve_nodes` must be set - // on the cluster. - PRODUCTION = 1; - - // The instance is meant for development and testing purposes only; it has - // no performance or uptime guarantees and is not covered by SLA. - // After a development instance is created, it can be upgraded by - // updating the instance to type `PRODUCTION`. An instance created - // as a production instance cannot be changed to a development instance. - // When creating a development instance, `serve_nodes` on the cluster must - // not be set. - DEVELOPMENT = 2; - } - - // The unique name of the instance. Values are of the form - // `projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]`. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Required. The descriptive name for this instance as it appears in UIs. - // Can be changed at any time, but should be kept globally unique - // to avoid confusion. - string display_name = 2 [(google.api.field_behavior) = REQUIRED]; - - // (`OutputOnly`) - // The current state of the instance. - State state = 3; - - // The type of the instance. Defaults to `PRODUCTION`. - Type type = 4; - - // Labels are a flexible and lightweight mechanism for organizing cloud - // resources into groups that reflect a customer's organizational needs and - // deployment strategies. They can be used to filter resources and aggregate - // metrics. - // - // * Label keys must be between 1 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`. - // * Label values must be between 0 and 63 characters long and must conform to - // the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`. - // * No more than 64 labels can be associated with a given resource. - // * Keys and values must both be under 128 bytes. - map labels = 5; -} - -// A resizable group of nodes in a particular cloud location, capable -// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent -// [Instance][google.bigtable.admin.v2.Instance]. -message Cluster { - option (google.api.resource) = { - type: "bigtable.googleapis.com/Cluster" - pattern: "projects/{project}/instances/{instance}/clusters/{cluster}" - }; - - // Cloud Key Management Service (Cloud KMS) settings for a CMEK-protected - // cluster. - message EncryptionConfig { - // Describes the Cloud KMS encryption key that will be used to protect the - // destination Bigtable cluster. The requirements for this key are: - // 1) The Cloud Bigtable service account associated with the project that - // contains this cluster must be granted the - // `cloudkms.cryptoKeyEncrypterDecrypter` role on the CMEK key. - // 2) Only regional keys can be used and the region of the CMEK key must - // match the region of the cluster. - // 3) All clusters within an instance must use the same CMEK key. - string kms_key_name = 1 [(google.api.resource_reference) = { - type: "cloudkms.googleapis.com/CryptoKey" - }]; - } - - // Possible states of a cluster. - enum State { - // The state of the cluster could not be determined. - STATE_NOT_KNOWN = 0; - - // The cluster has been successfully created and is ready to serve requests. - READY = 1; - - // The cluster is currently being created, and may be destroyed - // if the creation process encounters an error. - // A cluster may not be able to serve requests while being created. - CREATING = 2; - - // The cluster is currently being resized, and may revert to its previous - // node count if the process encounters an error. - // A cluster is still capable of serving requests while being resized, - // but may exhibit performance as if its number of allocated nodes is - // between the starting and requested states. - RESIZING = 3; - - // The cluster has no backing nodes. The data (tables) still - // exist, but no operations can be performed on the cluster. - DISABLED = 4; - } - - // The unique name of the cluster. Values are of the form - // `projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*`. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // (`CreationOnly`) - // The location where this cluster's nodes and storage reside. For best - // performance, clients should be located as close as possible to this - // cluster. Currently only zones are supported, so values should be of the - // form `projects/{project}/locations/{zone}`. - string location = 2 [(google.api.resource_reference) = { - type: "locations.googleapis.com/Location" - }]; - - // The current state of the cluster. - State state = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Required. The number of nodes allocated to this cluster. More nodes enable - // higher throughput and more consistent performance. - int32 serve_nodes = 4 [(google.api.field_behavior) = REQUIRED]; - - // (`CreationOnly`) - // The type of storage used by this cluster to serve its - // parent instance's tables, unless explicitly overridden. - StorageType default_storage_type = 5; - - // Immutable. The encryption configuration for CMEK-protected clusters. - EncryptionConfig encryption_config = 6 - [(google.api.field_behavior) = IMMUTABLE]; -} - -// A configuration object describing how Cloud Bigtable should treat traffic -// from a particular end user application. -message AppProfile { - option (google.api.resource) = { - type: "bigtable.googleapis.com/AppProfile" - pattern: "projects/{project}/instances/{instance}/appProfiles/{app_profile}" - }; - - // Read/write requests are routed to the nearest cluster in the instance, and - // will fail over to the nearest cluster that is available in the event of - // transient errors or delays. Clusters in a region are considered - // equidistant. Choosing this option sacrifices read-your-writes consistency - // to improve availability. - message MultiClusterRoutingUseAny {} - - // Unconditionally routes all read/write requests to a specific cluster. - // This option preserves read-your-writes consistency but does not improve - // availability. - message SingleClusterRouting { - // The cluster to which read/write requests should be routed. - string cluster_id = 1; - - // Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are - // allowed by this app profile. It is unsafe to send these requests to - // the same table/row/column in multiple clusters. - bool allow_transactional_writes = 2; - } - - // (`OutputOnly`) - // The unique name of the app profile. Values are of the form - // `projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - string name = 1; - - // Strongly validated etag for optimistic concurrency control. Preserve the - // value returned from `GetAppProfile` when calling `UpdateAppProfile` to - // fail the request if there has been a modification in the mean time. The - // `update_mask` of the request need not include `etag` for this protection - // to apply. - // See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and - // [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more - // details. - string etag = 2; - - // Optional long form description of the use case for this AppProfile. - string description = 3; - - // The routing policy for all read/write requests that use this app profile. - // A value must be explicitly set. - oneof routing_policy { - // Use a multi-cluster routing policy. - MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5; - - // Use a single-cluster routing policy. - SingleClusterRouting single_cluster_routing = 6; - } -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto deleted file mode 100644 index a5578225ea18..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/proto/table.proto +++ /dev/null @@ -1,405 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.v2; - -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin"; -option java_multiple_files = true; -option java_outer_classname = "TableProto"; -option java_package = "com.google.bigtable.admin.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2"; -option ruby_package = "Google::Cloud::Bigtable::Admin::V2"; -option (google.api.resource_definition) = { - type: "cloudkms.googleapis.com/CryptoKeyVersion" - pattern: "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}" -}; - -// Indicates the type of the restore source. -enum RestoreSourceType { - // No restore associated. - RESTORE_SOURCE_TYPE_UNSPECIFIED = 0; - - // A backup was used as the source of the restore. - BACKUP = 1; -} - -// Information about a table restore. -message RestoreInfo { - // The type of the restore source. - RestoreSourceType source_type = 1; - - // Information about the source used to restore the table. - oneof source_info { - // Information about the backup used to restore the table. The backup - // may no longer exist. - BackupInfo backup_info = 2; - } -} - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - option (google.api.resource) = { - type: "bigtable.googleapis.com/Table" - pattern: "projects/{project}/instances/{instance}/tables/{table}" - }; - - // The state of a table's data in a particular cluster. - message ClusterState { - // Table replication states. - enum ReplicationState { - // The replication state of the table is unknown in this cluster. - STATE_NOT_KNOWN = 0; - - // The cluster was recently created, and the table must finish copying - // over pre-existing data from other clusters before it can begin - // receiving live replication updates and serving Data API requests. - INITIALIZING = 1; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to planned internal maintenance. - PLANNED_MAINTENANCE = 2; - - // The table is temporarily unable to serve Data API requests from this - // cluster due to unplanned or emergency maintenance. - UNPLANNED_MAINTENANCE = 3; - - // The table can serve Data API requests from this cluster. Depending on - // replication delay, reads may not immediately reflect the state of the - // table in other clusters. - READY = 4; - - // The table is fully created and ready for use after a restore, and is - // being optimized for performance. When optimizations are complete, the - // table will transition to `READY` state. - READY_OPTIMIZING = 5; - } - - // Output only. The state of replication for the table in this cluster. - ReplicationState replication_state = 1; - - // Output only. The encryption information for the table in this cluster. - // If the encryption key protecting this resource is customer managed, then - // its version can be rotated in Cloud Key Management Service (Cloud KMS). - // The primary version of the key and its status will be reflected here when - // changes propagate from Cloud KMS. - repeated EncryptionInfo encryption_info = 2 - [(google.api.field_behavior) = OUTPUT_ONLY]; - } - - // Possible timestamp granularities to use when keeping multiple versions - // of data in a table. - enum TimestampGranularity { - // The user did not specify a granularity. Should not be returned. - // When specified during table creation, MILLIS will be used. - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0; - - // The table keeps data versioned at a granularity of 1ms. - MILLIS = 1; - } - - // Defines a view over a table's fields. - enum View { - // Uses the default view for each method as documented in its request. - VIEW_UNSPECIFIED = 0; - - // Only populates `name`. - NAME_ONLY = 1; - - // Only populates `name` and fields related to the table's schema. - SCHEMA_VIEW = 2; - - // Only populates `name` and fields related to the table's replication - // state. - REPLICATION_VIEW = 3; - - // Only populates 'name' and fields related to the table's encryption state. - ENCRYPTION_VIEW = 5; - - // Populates all fields. - FULL = 4; - } - - // The unique name of the table. Values are of the form - // `projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. - // Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL` - string name = 1; - - // Output only. Map from cluster ID to per-cluster table state. - // If it could not be determined whether or not the table has data in a - // particular cluster (for example, if its zone is unavailable), then - // there will be an entry for the cluster with UNKNOWN `replication_status`. - // Views: `REPLICATION_VIEW`, `ENCRYPTION_VIEW`, `FULL` - map cluster_states = 2; - - // (`CreationOnly`) - // The column families configured for this table, mapped by column family ID. - // Views: `SCHEMA_VIEW`, `FULL` - map column_families = 3; - - // (`CreationOnly`) - // The granularity (i.e. `MILLIS`) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // If unspecified at creation time, the value will be set to `MILLIS`. - // Views: `SCHEMA_VIEW`, `FULL`. - TimestampGranularity granularity = 4; - - // Output only. If this table was restored from another data source (e.g. a - // backup), this field will be populated with information about the restore. - RestoreInfo restore_info = 6; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // Garbage collection rule specified as a protobuf. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 1; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - // Garbage collection rules. - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} - -// Encryption information for a given resource. -// If this resource is protected with customer managed encryption, the in-use -// Cloud Key Management Service (Cloud KMS) key version is specified along with -// its status. -message EncryptionInfo { - // Possible encryption types for a resource. - enum EncryptionType { - // Encryption type was not specified, though data at rest remains encrypted. - ENCRYPTION_TYPE_UNSPECIFIED = 0; - - // The data backing this resource is encrypted at rest with a key that is - // fully managed by Google. No key version or status will be populated. - // This is the default state. - GOOGLE_DEFAULT_ENCRYPTION = 1; - - // The data backing this resource is encrypted at rest with a key that is - // managed by the customer. - // The in-use version of the key and its status are populated for - // CMEK-protected tables. - // CMEK-protected backups are pinned to the key version that was in use at - // the time the backup was taken. This key version is populated but its - // status is not tracked and is reported as `UNKNOWN`. - CUSTOMER_MANAGED_ENCRYPTION = 2; - } - - // Output only. The type of encryption used to protect this resource. - EncryptionType encryption_type = 3 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The status of encrypt/decrypt calls on underlying data for - // this resource. Regardless of status, the existing data is always encrypted - // at rest. - google.rpc.Status encryption_status = 4 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The version of the Cloud KMS key specified in the parent - // cluster that is in use for the data underlying this table. - string kms_key_version = 2 [ - (google.api.field_behavior) = OUTPUT_ONLY, - (google.api.resource_reference) = { - type: "cloudkms.googleapis.com/CryptoKeyVersion" - } - ]; -} - -// A snapshot of a table at a particular time. A snapshot can be used as a -// checkpoint for data restoration or a data source for a new table. -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -message Snapshot { - option (google.api.resource) = { - type: "bigtable.googleapis.com/Snapshot" - pattern: "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}" - }; - - // Possible states of a snapshot. - enum State { - // The state of the snapshot could not be determined. - STATE_NOT_KNOWN = 0; - - // The snapshot has been successfully created and can serve all requests. - READY = 1; - - // The snapshot is currently being created, and may be destroyed if the - // creation process encounters an error. A snapshot may not be restored to a - // table while it is being created. - CREATING = 2; - } - - // Output only. The unique name of the snapshot. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. - string name = 1; - - // Output only. The source table at the time the snapshot was taken. - Table source_table = 2; - - // Output only. The size of the data in the source table at the time the - // snapshot was taken. In some cases, this value may be computed - // asynchronously via a background process and a placeholder of 0 will be used - // in the meantime. - int64 data_size_bytes = 3; - - // Output only. The time when the snapshot is created. - google.protobuf.Timestamp create_time = 4; - - // Output only. The time when the snapshot will be deleted. The maximum amount - // of time a snapshot can stay active is 365 days. If 'ttl' is not specified, - // the default maximum of 365 days will be used. - google.protobuf.Timestamp delete_time = 5; - - // Output only. The current state of the snapshot. - State state = 6; - - // Output only. Description of the snapshot. - string description = 7; -} - -// A backup of a Cloud Bigtable table. -message Backup { - option (google.api.resource) = { - type: "bigtable.googleapis.com/Backup" - pattern: "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}" - }; - - // Indicates the current state of the backup. - enum State { - // Not specified. - STATE_UNSPECIFIED = 0; - - // The pending backup is still being created. Operations on the - // backup may fail with `FAILED_PRECONDITION` in this state. - CREATING = 1; - - // The backup is complete and ready for use. - READY = 2; - } - - // Output only. A globally unique identifier for the backup which cannot be - // changed. Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/ - // backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*` - // The final segment of the name must be between 1 and 50 characters - // in length. - // - // The backup is stored in the cluster identified by the prefix of the backup - // name of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}`. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Required. Immutable. Name of the table from which this backup was created. - // This needs to be in the same instance as the backup. Values are of the form - // `projects/{project}/instances/{instance}/tables/{source_table}`. - string source_table = 2 [ - (google.api.field_behavior) = IMMUTABLE, - (google.api.field_behavior) = REQUIRED - ]; - - // Required. The expiration time of the backup, with microseconds - // granularity that must be at least 6 hours and at most 30 days - // from the time the request is received. Once the `expire_time` - // has passed, Cloud Bigtable will delete the backup and free the - // resources used by the backup. - google.protobuf.Timestamp expire_time = 3 - [(google.api.field_behavior) = REQUIRED]; - - // Output only. `start_time` is the time that the backup was started - // (i.e. approximately the time the - // [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] - // request is received). The row data in this backup will be no older than - // this timestamp. - google.protobuf.Timestamp start_time = 4 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. `end_time` is the time that the backup was finished. The row - // data in the backup will be no newer than this timestamp. - google.protobuf.Timestamp end_time = 5 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Size of the backup in bytes. - int64 size_bytes = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The current state of the backup. - State state = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The encryption information for the backup. - EncryptionInfo encryption_info = 9 - [(google.api.field_behavior) = OUTPUT_ONLY]; -} - -// Information about a backup. -message BackupInfo { - // Output only. Name of the backup. - string backup = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The time that the backup was started. Row data in the backup - // will be no older than this timestamp. - google.protobuf.Timestamp start_time = 2 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. This time that the backup was finished. Row data in the - // backup will be no newer than this timestamp. - google.protobuf.Timestamp end_time = 3 - [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Name of the table the backup was created from. - string source_table = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto deleted file mode 100644 index 32aaba21d05e..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/bigtable.proto +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/bigtable/v2/data.proto"; -import "google/protobuf/wrappers.proto"; -import "google/rpc/status.proto"; - -option csharp_namespace = "Google.Cloud.Bigtable.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "BigtableProto"; -option java_package = "com.google.bigtable.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\V2"; -option ruby_package = "Google::Cloud::Bigtable::V2"; -option (google.api.resource_definition) = { - type: "bigtable.googleapis.com/Table" - pattern: "projects/{project}/instances/{instance}/tables/{table}" -}; - -// Service for reading from and writing to existing Bigtable tables. -service Bigtable { - option (google.api.default_host) = "bigtable.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigtable.data," - "https://www.googleapis.com/auth/bigtable.data.readonly," - "https://www.googleapis.com/auth/cloud-bigtable.data," - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly," - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/cloud-platform.read-only"; - - // Streams back the contents of all requested rows in key order, optionally - // applying the same Reader filter to each. Depending on their size, - // rows and cells may be broken up across multiple responses, but - // atomicity of each row will still be preserved. See the - // ReadRowsResponse documentation for details. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" - body: "*" - }; - option (google.api.method_signature) = "table_name"; - option (google.api.method_signature) = "table_name,app_profile_id"; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { - get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" - }; - option (google.api.method_signature) = "table_name"; - option (google.api.method_signature) = "table_name,app_profile_id"; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by `mutation`. - rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" - body: "*" - }; - option (google.api.method_signature) = "table_name,row_key,mutations"; - option (google.api.method_signature) = "table_name,row_key,mutations,app_profile_id"; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" - body: "*" - }; - option (google.api.method_signature) = "table_name,entries"; - option (google.api.method_signature) = "table_name,entries,app_profile_id"; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" - body: "*" - }; - option (google.api.method_signature) = "table_name,row_key,predicate_filter,true_mutations,false_mutations"; - option (google.api.method_signature) = "table_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id"; - } - - // Modifies a row atomically on the server. The method reads the latest - // existing timestamp and value from the specified columns and writes a new - // entry based on pre-defined read/modify/write rules. The new value for the - // timestamp is the greater of the existing timestamp or the current server - // time. The method returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) { - option (google.api.http) = { - post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" - body: "*" - }; - option (google.api.method_signature) = "table_name,row_key,rules"; - option (google.api.method_signature) = "table_name,row_key,rules,app_profile_id"; - } -} - -// Request message for Bigtable.ReadRows. -message ReadRowsRequest { - // Required. The unique name of the table from which to read. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 5; - - // The row keys and/or ranges to read. If not specified, reads from all rows. - RowSet rows = 2; - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entirety of each row. - RowFilter filter = 3; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - int64 rows_limit = 4; -} - -// Response message for Bigtable.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message CellChunk { - // The row key for this chunk of data. If the row key is empty, - // this CellChunk is a continuation of the same row as the previous - // CellChunk in the response stream, even if that CellChunk was in a - // previous ReadRowsResponse message. - bytes row_key = 1; - - // The column family name for this chunk of data. If this message - // is not present this CellChunk is a continuation of the same column - // family as the previous CellChunk. The empty string can occur as a - // column family name in a response so clients must check - // explicitly for the presence of this message, not just for - // `family_name.value` being non-empty. - google.protobuf.StringValue family_name = 2; - - // The column qualifier for this chunk of data. If this message - // is not present, this CellChunk is a continuation of the same column - // as the previous CellChunk. Column qualifiers may be empty so - // clients must check for the presence of this message, not just - // for `qualifier.value` being non-empty. - google.protobuf.BytesValue qualifier = 3; - - // The cell's stored timestamp, which also uniquely identifies it - // within its column. Values are always expressed in - // microseconds, but individual tables may set a coarser - // granularity to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will - // only allow values of `timestamp_micros` which are multiples of - // 1000. Timestamps are only set in the first CellChunk per cell - // (for cells split into multiple chunks). - int64 timestamp_micros = 4; - - // Labels applied to the cell by a - // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set - // on the first CellChunk per cell. - repeated string labels = 5; - - // The value stored in the cell. Cell values can be split across - // multiple CellChunks. In that case only the value field will be - // set in CellChunks after the first: the timestamp and labels - // will only be present in the first CellChunk, even if the first - // CellChunk came in a previous ReadRowsResponse. - bytes value = 6; - - // If this CellChunk is part of a chunked cell value and this is - // not the final chunk of that cell, value_size will be set to the - // total length of the cell value. The client can use this size - // to pre-allocate memory to hold the full cell value. - int32 value_size = 7; - - // Signals to the client concerning previous CellChunks received. - oneof row_status { - // Indicates that the client should drop all previous chunks for - // `row_key`, as it will be re-read from the beginning. - bool reset_row = 8; - - // Indicates that the client can safely process all previous chunks for - // `row_key`, as its data has been fully read. - bool commit_row = 9; - } - } - - // A collection of a row's contents as part of the read request. - repeated CellChunk chunks = 1; - - // Optionally the server might return the row key of the last row it - // has scanned. The client can use this to construct a more - // efficient retry request if needed: any row keys or portions of - // ranges less than this row key can be dropped from the request. - // This is primarily useful for cases where the server has read a - // lot of data that was filtered out since the last committed row - // key, allowing the client to skip that work on a retry. - bytes last_scanned_row_key = 2; -} - -// Request message for Bigtable.SampleRowKeys. -message SampleRowKeysRequest { - // Required. The unique name of the table from which to sample row keys. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 2; -} - -// Response message for Bigtable.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // `row_key`. Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // `offset_bytes` fields. - int64 offset_bytes = 2; -} - -// Request message for Bigtable.MutateRow. -message MutateRowRequest { - // Required. The unique name of the table to which the mutation should be applied. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 4; - - // Required. The key of the row to which the mutation should be applied. - bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for Bigtable.MutateRow. -message MutateRowResponse { - -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - // A mutation for a given row. - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Required. Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // You must specify at least one mutation. - repeated Mutation mutations = 2 [(google.api.field_behavior) = REQUIRED]; - } - - // Required. The unique name of the table to which the mutations should be applied. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 3; - - // Required. The row keys and corresponding mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries can - // contain at most 100000 mutations. - repeated Entry entries = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - // The result of applying a passed mutation in the original request. - message Entry { - // The index into the original request's `entries` list of the Entry - // for which a result is being reported. - int64 index = 1; - - // The result of the request Entry identified by `index`. - // Depending on how requests are batched during execution, it is possible - // for one Entry to fail due to an error with another Entry. In the event - // that this occurs, the same error will be reported for both entries. - google.rpc.Status status = 2; - } - - // One or more results for Entries from the batch request. - repeated Entry entries = 1; -} - -// Request message for Bigtable.CheckAndMutateRow. -message CheckAndMutateRowRequest { - // Required. The unique name of the table to which the conditional mutation should be - // applied. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 7; - - // Required. The key of the row to which the conditional mutation should be applied. - bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either `true_mutations` or - // `false_mutations` will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if `predicate_filter` - // yields at least one cell when applied to `row_key`. Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if `false_mutations` is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if `predicate_filter` - // does not yield any cells when applied to `row_key`. Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if `true_mutations` is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for Bigtable.CheckAndMutateRow. -message CheckAndMutateRowResponse { - // Whether or not the request's `predicate_filter` yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for Bigtable.ReadModifyWriteRow. -message ReadModifyWriteRowRequest { - // Required. The unique name of the table to which the read/modify/write rules should be - // applied. - // Values are of the form - // `projects//instances//tables/
`. - string table_name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigtable.googleapis.com/Table" - } - ]; - - // This value specifies routing for replication. If not specified, the - // "default" application profile will be used. - string app_profile_id = 4; - - // Required. The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2 [(google.api.field_behavior) = REQUIRED]; - - // Required. Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for Bigtable.ReadModifyWriteRow. -message ReadModifyWriteRowResponse { - // A Row containing the new contents of all cells modified by the request. - Row row = 1; -} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto deleted file mode 100644 index 2cc916454b81..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/proto/data.proto +++ /dev/null @@ -1,536 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.bigtable.v2; - -option csharp_namespace = "Google.Cloud.Bigtable.V2"; -option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable"; -option java_multiple_files = true; -option java_outer_classname = "DataProto"; -option java_package = "com.google.bigtable.v2"; -option php_namespace = "Google\\Cloud\\Bigtable\\V2"; -option ruby_package = "Google::Cloud::Bigtable::V2"; - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family intersection -// of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column intersection of a -// table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its `column_qualifier_regex_filter` field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser granularity to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of `timestamp_micros` which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // The row key at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_key { - // Used when giving an inclusive lower bound for the range. - bytes start_key_closed = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_key_open = 2; - } - - // The row key at which to end the range. - // If neither field is set, interpreted as the infinite row key, exclusive. - oneof end_key { - // Used when giving an exclusive upper bound for the range. - bytes end_key_open = 3; - - // Used when giving an inclusive upper bound for the range. - bytes end_key_closed = 4; - } -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from <column_family>:<start_qualifier> to -// <column_family>:<end_qualifier>, where both bounds can be either -// inclusive or exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within `column_family`). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_closed = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_open = 3; - } - - // The column qualifier at which to end the range (within `column_family`). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_closed = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_open = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_closed = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_open = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_closed = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_open = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the `value_regex_filter`, -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that `RE2(.)` is equivalent by default to -// `RE2([^\n])`, meaning that it does not match newlines. When attempting to -// match an arbitrary byte, you should therefore use the escape sequence `\C`, -// which may need to be further escaped as `\\C` in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the `strip_value_transformer`, which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If `predicate_filter` outputs any cells, then `true_filter` will be - // evaluated on the input row. Otherwise, `false_filter` will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if `predicate_filter` returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if `predicate_filter` does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v2.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the `\C` escape - // sequence must be used if a true wildcard is desired. The `.` character - // will not match the new line character `\n`, which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the `:` - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // `\n`, it is sufficient to use `.` as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the `\C` - // escape sequence must be used if a true wildcard is desired. The `.` - // character will not match the new line character `\n`, which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the `\C` escape - // sequence must be used if a true wildcard is desired. The `.` character - // will not match the new line character `\n`, which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column `foo:bar` at timestamps 10 and 9, - // skip all earlier cells in `foo:bar`, and then begin matching again in - // column `foo:bar2`. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern `[a-z0-9\\-]+` - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a `apply_label_transformer`. It is okay for - // an Interleave to contain multiple `apply_label_transformers`, as they - // will be applied to separate copies of the input. This may be relaxed in - // the future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the granularity of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match `[-_.a-zA-Z0-9]+` - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that `append_value` be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that `increment_amount` be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} From 26a76fd6c25ff2e17a5afad4b47fb9a1b385e0f3 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 25 May 2021 11:36:06 -0400 Subject: [PATCH 451/892] chore: s.remove_staging_dirs() should only be called once (#318) There is [an issue](https://github.com/googleapis/python-bigtable/blob/master/owlbot.py#L33) in the `owlbot.py` file added in #313 in that [s.remove_staging_dirs()](https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L309) should only be called once after all the files are copied over. [get_staging_dirs()](https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280) will only return staging directories that exist. --- packages/google-cloud-bigtable/owlbot.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 0102060b664e..e0408b3e5429 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -30,8 +30,6 @@ s.move(library / "tests") s.move(library / "scripts") -s.remove_staging_dirs() - for library in s.get_staging_dirs(bigtable_admin_default_version): s.move(library / "google/cloud/bigtable_admin_v*") s.move(library / "tests") @@ -59,4 +57,4 @@ s.move(path, excludes=['noxfile.py']) -s.shell.run(["nox", "-s", "blacken"], hide_output=False) \ No newline at end of file +s.shell.run(["nox", "-s", "blacken"], hide_output=False) From 02d8445b8cf33da91d7349d8fa4d1eab1f067a7e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 28 May 2021 16:56:06 +0000 Subject: [PATCH 452/892] chore: new owl bot post processor docker image (#319) Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:c66ba3c8d7bc8566f47df841f98cd0097b28fff0b1864c86f5817f4c8c3e8600 --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/docs/conf.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 127c2cdf9503..da616c91a3b6 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:0856ca711da1fd5ec9d6d7da6c50aa0bbf550fb94acb47b55159a640791987bf + digest: sha256:c66ba3c8d7bc8566f47df841f98cd0097b28fff0b1864c86f5817f4c8c3e8600 diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index fcd69d50d030..ebc5c9904993 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -363,6 +363,7 @@ "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), } From 6e3725d34666372b5df3fa72bbf5257e09931374 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Mon, 7 Jun 2021 07:20:25 -0400 Subject: [PATCH 453/892] chore: update owlbot.py to properly copy folders from googleapis-gen (#320) --- .../.github/.OwlBot.yaml | 4 +- .../cloud/bigtable_admin_v2/__init__.py | 11 +- .../bigtable_admin_v2/gapic_metadata.json | 447 ++++++ .../bigtable_admin_v2/services/__init__.py | 1 - .../bigtable_instance_admin/__init__.py | 2 - .../bigtable_instance_admin/async_client.py | 132 +- .../bigtable_instance_admin/client.py | 160 +- .../bigtable_instance_admin/pagers.py | 4 +- .../transports/__init__.py | 2 - .../transports/base.py | 235 ++- .../transports/grpc.py | 83 +- .../transports/grpc_asyncio.py | 85 +- .../services/bigtable_table_admin/__init__.py | 2 - .../bigtable_table_admin/async_client.py | 137 +- .../services/bigtable_table_admin/client.py | 181 +-- .../services/bigtable_table_admin/pagers.py | 8 +- .../transports/__init__.py | 2 - .../bigtable_table_admin/transports/base.py | 244 +-- .../bigtable_table_admin/transports/grpc.py | 87 +- .../transports/grpc_asyncio.py | 95 +- .../cloud/bigtable_admin_v2/types/__init__.py | 2 - .../types/bigtable_instance_admin.py | 147 +- .../types/bigtable_table_admin.py | 185 +-- .../cloud/bigtable_admin_v2/types/common.py | 13 +- .../cloud/bigtable_admin_v2/types/instance.py | 43 +- .../cloud/bigtable_admin_v2/types/table.py | 82 +- packages/google-cloud-bigtable/owlbot.py | 47 +- .../fixup_bigtable_admin_v2_keywords.py | 81 +- .../unit/gapic/bigtable_admin_v2/__init__.py | 1 - .../test_bigtable_instance_admin.py | 1151 +++++++------- .../test_bigtable_table_admin.py | 1379 +++++++++-------- 31 files changed, 2721 insertions(+), 2332 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.yaml index 7a10c025ca9d..64de1cedac36 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.yaml @@ -20,9 +20,9 @@ deep-remove-regex: deep-copy-regex: - source: /google/bigtable/admin/(v.*)/.*-py/(.*) - dest: /owl-bot-staging/admin/$1/$2 + dest: /owl-bot-staging/bigtable_admin/$1/$2 - source: /google/bigtable/(v.*)/.*-py/(.*) - dest: /owl-bot-staging/$1/$2 + dest: /owl-bot-staging/bigtable/$1/$2 begin-after-commit-hash: a21f1091413a260393548c1b2ac44b7347923f08 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 79a9bea684e0..db670f299d21 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,7 +15,10 @@ # from .services.bigtable_instance_admin import BigtableInstanceAdminClient +from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient from .services.bigtable_table_admin import BigtableTableAdminClient +from .services.bigtable_table_admin import BigtableTableAdminAsyncClient + from .types.bigtable_instance_admin import CreateAppProfileRequest from .types.bigtable_instance_admin import CreateClusterMetadata from .types.bigtable_instance_admin import CreateClusterRequest @@ -79,15 +81,17 @@ from .types.table import EncryptionInfo from .types.table import GcRule from .types.table import RestoreInfo -from .types.table import RestoreSourceType from .types.table import Snapshot from .types.table import Table - +from .types.table import RestoreSourceType __all__ = ( + "BigtableInstanceAdminAsyncClient", + "BigtableTableAdminAsyncClient", "AppProfile", "Backup", "BackupInfo", + "BigtableInstanceAdminClient", "BigtableTableAdminClient", "CheckConsistencyRequest", "CheckConsistencyResponse", @@ -151,5 +155,4 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", - "BigtableInstanceAdminClient", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json new file mode 100644 index 000000000000..f5e13454327a --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -0,0 +1,447 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.bigtable_admin_v2", + "protoPackage": "google.bigtable.admin.v2", + "schema": "1.0", + "services": { + "BigtableInstanceAdmin": { + "clients": { + "grpc": { + "libraryClient": "BigtableInstanceAdminClient", + "rpcs": { + "CreateAppProfile": { + "methods": [ + "create_app_profile" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateInstance": { + "methods": [ + "create_instance" + ] + }, + "DeleteAppProfile": { + "methods": [ + "delete_app_profile" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteInstance": { + "methods": [ + "delete_instance" + ] + }, + "GetAppProfile": { + "methods": [ + "get_app_profile" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetInstance": { + "methods": [ + "get_instance" + ] + }, + "ListAppProfiles": { + "methods": [ + "list_app_profiles" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "PartialUpdateInstance": { + "methods": [ + "partial_update_instance" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateAppProfile": { + "methods": [ + "update_app_profile" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateInstance": { + "methods": [ + "update_instance" + ] + } + } + }, + "grpc-async": { + "libraryClient": "BigtableInstanceAdminAsyncClient", + "rpcs": { + "CreateAppProfile": { + "methods": [ + "create_app_profile" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateInstance": { + "methods": [ + "create_instance" + ] + }, + "DeleteAppProfile": { + "methods": [ + "delete_app_profile" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteInstance": { + "methods": [ + "delete_instance" + ] + }, + "GetAppProfile": { + "methods": [ + "get_app_profile" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetInstance": { + "methods": [ + "get_instance" + ] + }, + "ListAppProfiles": { + "methods": [ + "list_app_profiles" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "PartialUpdateInstance": { + "methods": [ + "partial_update_instance" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateAppProfile": { + "methods": [ + "update_app_profile" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateInstance": { + "methods": [ + "update_instance" + ] + } + } + } + } + }, + "BigtableTableAdmin": { + "clients": { + "grpc": { + "libraryClient": "BigtableTableAdminClient", + "rpcs": { + "CheckConsistency": { + "methods": [ + "check_consistency" + ] + }, + "CreateBackup": { + "methods": [ + "create_backup" + ] + }, + "CreateTable": { + "methods": [ + "create_table" + ] + }, + "CreateTableFromSnapshot": { + "methods": [ + "create_table_from_snapshot" + ] + }, + "DeleteBackup": { + "methods": [ + "delete_backup" + ] + }, + "DeleteSnapshot": { + "methods": [ + "delete_snapshot" + ] + }, + "DeleteTable": { + "methods": [ + "delete_table" + ] + }, + "DropRowRange": { + "methods": [ + "drop_row_range" + ] + }, + "GenerateConsistencyToken": { + "methods": [ + "generate_consistency_token" + ] + }, + "GetBackup": { + "methods": [ + "get_backup" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetSnapshot": { + "methods": [ + "get_snapshot" + ] + }, + "GetTable": { + "methods": [ + "get_table" + ] + }, + "ListBackups": { + "methods": [ + "list_backups" + ] + }, + "ListSnapshots": { + "methods": [ + "list_snapshots" + ] + }, + "ListTables": { + "methods": [ + "list_tables" + ] + }, + "ModifyColumnFamilies": { + "methods": [ + "modify_column_families" + ] + }, + "RestoreTable": { + "methods": [ + "restore_table" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SnapshotTable": { + "methods": [ + "snapshot_table" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateBackup": { + "methods": [ + "update_backup" + ] + } + } + }, + "grpc-async": { + "libraryClient": "BigtableTableAdminAsyncClient", + "rpcs": { + "CheckConsistency": { + "methods": [ + "check_consistency" + ] + }, + "CreateBackup": { + "methods": [ + "create_backup" + ] + }, + "CreateTable": { + "methods": [ + "create_table" + ] + }, + "CreateTableFromSnapshot": { + "methods": [ + "create_table_from_snapshot" + ] + }, + "DeleteBackup": { + "methods": [ + "delete_backup" + ] + }, + "DeleteSnapshot": { + "methods": [ + "delete_snapshot" + ] + }, + "DeleteTable": { + "methods": [ + "delete_table" + ] + }, + "DropRowRange": { + "methods": [ + "drop_row_range" + ] + }, + "GenerateConsistencyToken": { + "methods": [ + "generate_consistency_token" + ] + }, + "GetBackup": { + "methods": [ + "get_backup" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetSnapshot": { + "methods": [ + "get_snapshot" + ] + }, + "GetTable": { + "methods": [ + "get_table" + ] + }, + "ListBackups": { + "methods": [ + "list_backups" + ] + }, + "ListSnapshots": { + "methods": [ + "list_snapshots" + ] + }, + "ListTables": { + "methods": [ + "list_tables" + ] + }, + "ModifyColumnFamilies": { + "methods": [ + "modify_column_families" + ] + }, + "RestoreTable": { + "methods": [ + "restore_table" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SnapshotTable": { + "methods": [ + "snapshot_table" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateBackup": { + "methods": [ + "update_backup" + ] + } + } + } + } + } + } +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py index 42ffdf2bc43d..4de65971c238 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py index 5606dd4ffa9e..5746abf02969 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from .client import BigtableInstanceAdminClient from .async_client import BigtableInstanceAdminAsyncClient diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 4c849e3cb9f4..c118257de4fe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict import functools import re @@ -22,10 +20,10 @@ import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation # type: ignore @@ -35,10 +33,9 @@ from google.cloud.bigtable_admin_v2.types import common from google.cloud.bigtable_admin_v2.types import instance from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.protobuf import field_mask_pb2 as field_mask # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport from .client import BigtableInstanceAdminClient @@ -68,31 +65,26 @@ class BigtableInstanceAdminAsyncClient: ) instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) - common_billing_account_path = staticmethod( BigtableInstanceAdminClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( BigtableInstanceAdminClient.parse_common_billing_account_path ) - common_folder_path = staticmethod(BigtableInstanceAdminClient.common_folder_path) parse_common_folder_path = staticmethod( BigtableInstanceAdminClient.parse_common_folder_path ) - common_organization_path = staticmethod( BigtableInstanceAdminClient.common_organization_path ) parse_common_organization_path = staticmethod( BigtableInstanceAdminClient.parse_common_organization_path ) - common_project_path = staticmethod(BigtableInstanceAdminClient.common_project_path) parse_common_project_path = staticmethod( BigtableInstanceAdminClient.parse_common_project_path ) - common_location_path = staticmethod( BigtableInstanceAdminClient.common_location_path ) @@ -102,7 +94,8 @@ class BigtableInstanceAdminAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -117,7 +110,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -134,7 +127,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> BigtableInstanceAdminTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: BigtableInstanceAdminTransport: The transport used by the client instance. @@ -149,12 +142,12 @@ def transport(self) -> BigtableInstanceAdminTransport: def __init__( self, *, - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the bigtable instance admin client. + """Instantiates the bigtable instance admin client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -186,7 +179,6 @@ def __init__( google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ - self._client = BigtableInstanceAdminClient( credentials=credentials, transport=transport, @@ -248,7 +240,6 @@ async def create_instance( This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -280,7 +271,6 @@ async def create_instance( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if instance_id is not None: @@ -342,7 +332,6 @@ async def get_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -372,7 +361,6 @@ async def get_instance( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -385,7 +373,8 @@ async def get_instance( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -428,7 +417,6 @@ async def list_instances( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -455,7 +443,6 @@ async def list_instances( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -468,7 +455,8 @@ async def list_instances( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -509,7 +497,6 @@ async def update_instance( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -526,7 +513,6 @@ async def update_instance( """ # Create or coerce a protobuf request object. - request = instance.Instance(request) # Wrap the RPC method; this adds retry and timeout information, @@ -538,7 +524,8 @@ async def update_instance( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -563,7 +550,7 @@ async def partial_update_instance( request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, *, instance: gba_instance.Instance = None, - update_mask: field_mask.FieldMask = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -591,7 +578,6 @@ async def partial_update_instance( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -623,7 +609,6 @@ async def partial_update_instance( # If we have keyword arguments corresponding to fields on the # request, apply these. - if instance is not None: request.instance = instance if update_mask is not None: @@ -638,7 +623,8 @@ async def partial_update_instance( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -691,7 +677,6 @@ async def delete_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -712,7 +697,6 @@ async def delete_instance( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -776,7 +760,6 @@ async def create_cluster( This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -807,7 +790,6 @@ async def create_cluster( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if cluster_id is not None: @@ -866,7 +848,6 @@ async def get_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -895,7 +876,6 @@ async def get_cluster( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -908,7 +888,8 @@ async def get_cluster( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -953,7 +934,6 @@ async def list_clusters( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -980,7 +960,6 @@ async def list_clusters( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -993,7 +972,8 @@ async def list_clusters( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1029,7 +1009,6 @@ async def update_cluster( particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1047,7 +1026,6 @@ async def update_cluster( """ # Create or coerce a protobuf request object. - request = instance.Cluster(request) # Wrap the RPC method; this adds retry and timeout information, @@ -1059,7 +1037,8 @@ async def update_cluster( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1110,7 +1089,6 @@ async def delete_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1131,7 +1109,6 @@ async def delete_cluster( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1195,7 +1172,6 @@ async def create_app_profile( This corresponds to the ``app_profile`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1223,7 +1199,6 @@ async def create_app_profile( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if app_profile_id is not None: @@ -1274,7 +1249,6 @@ async def get_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1302,7 +1276,6 @@ async def get_app_profile( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1315,7 +1288,8 @@ async def get_app_profile( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1361,7 +1335,6 @@ async def list_app_profiles( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1391,7 +1364,6 @@ async def list_app_profiles( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -1404,7 +1376,8 @@ async def list_app_profiles( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1435,7 +1408,7 @@ async def update_app_profile( request: bigtable_instance_admin.UpdateAppProfileRequest = None, *, app_profile: instance.AppProfile = None, - update_mask: field_mask.FieldMask = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1461,7 +1434,6 @@ async def update_app_profile( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1490,7 +1462,6 @@ async def update_app_profile( # If we have keyword arguments corresponding to fields on the # request, apply these. - if app_profile is not None: request.app_profile = app_profile if update_mask is not None: @@ -1505,7 +1476,8 @@ async def update_app_profile( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1558,7 +1530,6 @@ async def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1579,7 +1550,6 @@ async def delete_app_profile( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1604,13 +1574,13 @@ async def delete_app_profile( async def get_iam_policy( self, - request: iam_policy.GetIamPolicyRequest = None, + request: iam_policy_pb2.GetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. @@ -1628,7 +1598,6 @@ async def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1707,10 +1676,9 @@ async def get_iam_policy( # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): - request = iam_policy.GetIamPolicyRequest(**request) - + request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: - request = iam_policy.GetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1721,7 +1689,8 @@ async def get_iam_policy( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1743,13 +1712,13 @@ async def get_iam_policy( async def set_iam_policy( self, - request: iam_policy.SetIamPolicyRequest = None, + request: iam_policy_pb2.SetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -1766,7 +1735,6 @@ async def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1845,10 +1813,9 @@ async def set_iam_policy( # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): - request = iam_policy.SetIamPolicyRequest(**request) - + request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: - request = iam_policy.SetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1872,14 +1839,14 @@ async def set_iam_policy( async def test_iam_permissions( self, - request: iam_policy.TestIamPermissionsRequest = None, + request: iam_policy_pb2.TestIamPermissionsRequest = None, *, resource: str = None, permissions: Sequence[str] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy.TestIamPermissionsResponse: + ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -1905,7 +1872,6 @@ async def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1929,10 +1895,9 @@ async def test_iam_permissions( # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): - request = iam_policy.TestIamPermissionsRequest(**request) - + request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: - request = iam_policy.TestIamPermissionsRequest( + request = iam_policy_pb2.TestIamPermissionsRequest( resource=resource, permissions=permissions, ) @@ -1945,7 +1910,8 @@ async def test_iam_permissions( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 68768d70e8b0..ea12552fc6b3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict from distutils import util import os @@ -23,10 +21,10 @@ import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore @@ -39,10 +37,9 @@ from google.cloud.bigtable_admin_v2.types import common from google.cloud.bigtable_admin_v2.types import instance from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.protobuf import field_mask_pb2 as field_mask # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableInstanceAdminGrpcTransport from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport @@ -65,7 +62,7 @@ class BigtableInstanceAdminClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[BigtableInstanceAdminTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -92,7 +89,8 @@ class BigtableInstanceAdminClient(metaclass=BigtableInstanceAdminClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -126,7 +124,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -143,7 +142,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -162,23 +161,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> BigtableInstanceAdminTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - BigtableInstanceAdminTransport: The transport used by the client instance. + BigtableInstanceAdminTransport: The transport used by the client + instance. """ return self._transport @staticmethod def app_profile_path(project: str, instance: str, app_profile: str,) -> str: - """Return a fully-qualified app_profile string.""" + """Returns a fully-qualified app_profile string.""" return "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( project=project, instance=instance, app_profile=app_profile, ) @staticmethod def parse_app_profile_path(path: str) -> Dict[str, str]: - """Parse a app_profile path into its component segments.""" + """Parses a app_profile path into its component segments.""" m = re.match( r"^projects/(?P.+?)/instances/(?P.+?)/appProfiles/(?P.+?)$", path, @@ -187,14 +187,14 @@ def parse_app_profile_path(path: str) -> Dict[str, str]: @staticmethod def cluster_path(project: str, instance: str, cluster: str,) -> str: - """Return a fully-qualified cluster string.""" + """Returns a fully-qualified cluster string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}".format( project=project, instance=instance, cluster=cluster, ) @staticmethod def parse_cluster_path(path: str) -> Dict[str, str]: - """Parse a cluster path into its component segments.""" + """Parses a cluster path into its component segments.""" m = re.match( r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", path, @@ -205,7 +205,7 @@ def parse_cluster_path(path: str) -> Dict[str, str]: def crypto_key_path( project: str, location: str, key_ring: str, crypto_key: str, ) -> str: - """Return a fully-qualified crypto_key string.""" + """Returns a fully-qualified crypto_key string.""" return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( project=project, location=location, @@ -215,7 +215,7 @@ def crypto_key_path( @staticmethod def parse_crypto_key_path(path: str) -> Dict[str, str]: - """Parse a crypto_key path into its component segments.""" + """Parses a crypto_key path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", path, @@ -224,20 +224,20 @@ def parse_crypto_key_path(path: str) -> Dict[str, str]: @staticmethod def instance_path(project: str, instance: str,) -> str: - """Return a fully-qualified instance string.""" + """Returns a fully-qualified instance string.""" return "projects/{project}/instances/{instance}".format( project=project, instance=instance, ) @staticmethod def parse_instance_path(path: str) -> Dict[str, str]: - """Parse a instance path into its component segments.""" + """Parses a instance path into its component segments.""" m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -250,7 +250,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -261,7 +261,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -272,7 +272,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -283,7 +283,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -297,12 +297,12 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def __init__( self, *, - credentials: Optional[credentials.Credentials] = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigtableInstanceAdminTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the bigtable instance admin client. + """Instantiates the bigtable instance admin client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -357,9 +357,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -371,12 +372,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -391,8 +394,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -461,7 +464,6 @@ def create_instance( This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -495,10 +497,8 @@ def create_instance( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): request = bigtable_instance_admin.CreateInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if instance_id is not None: @@ -555,7 +555,6 @@ def get_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -587,10 +586,8 @@ def get_instance( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): request = bigtable_instance_admin.GetInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -633,7 +630,6 @@ def list_instances( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -662,10 +658,8 @@ def list_instances( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): request = bigtable_instance_admin.ListInstancesRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -706,7 +700,6 @@ def update_instance( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -723,7 +716,6 @@ def update_instance( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes # in a instance.Instance. # There's no risk of modifying the input as we've already verified @@ -752,7 +744,7 @@ def partial_update_instance( request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, *, instance: gba_instance.Instance = None, - update_mask: field_mask.FieldMask = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -780,7 +772,6 @@ def partial_update_instance( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -816,10 +807,8 @@ def partial_update_instance( request, bigtable_instance_admin.PartialUpdateInstanceRequest ): request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if instance is not None: request.instance = instance if update_mask is not None: @@ -874,7 +863,6 @@ def delete_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -897,10 +885,8 @@ def delete_instance( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): request = bigtable_instance_admin.DeleteInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -960,7 +946,6 @@ def create_cluster( This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -993,10 +978,8 @@ def create_cluster( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): request = bigtable_instance_admin.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if cluster_id is not None: @@ -1051,7 +1034,6 @@ def get_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1082,10 +1064,8 @@ def get_cluster( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.GetClusterRequest): request = bigtable_instance_admin.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1130,7 +1110,6 @@ def list_clusters( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1159,10 +1138,8 @@ def list_clusters( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.ListClustersRequest): request = bigtable_instance_admin.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -1198,7 +1175,6 @@ def update_cluster( particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1216,7 +1192,6 @@ def update_cluster( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes # in a instance.Cluster. # There's no risk of modifying the input as we've already verified @@ -1271,7 +1246,6 @@ def delete_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1294,10 +1268,8 @@ def delete_cluster( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): request = bigtable_instance_admin.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1357,7 +1329,6 @@ def create_app_profile( This corresponds to the ``app_profile`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1387,10 +1358,8 @@ def create_app_profile( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): request = bigtable_instance_admin.CreateAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if app_profile_id is not None: @@ -1437,7 +1406,6 @@ def get_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1467,10 +1435,8 @@ def get_app_profile( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): request = bigtable_instance_admin.GetAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1516,7 +1482,6 @@ def list_app_profiles( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1548,10 +1513,8 @@ def list_app_profiles( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): request = bigtable_instance_admin.ListAppProfilesRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -1582,7 +1545,7 @@ def update_app_profile( request: bigtable_instance_admin.UpdateAppProfileRequest = None, *, app_profile: instance.AppProfile = None, - update_mask: field_mask.FieldMask = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1608,7 +1571,6 @@ def update_app_profile( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1639,10 +1601,8 @@ def update_app_profile( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): request = bigtable_instance_admin.UpdateAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if app_profile is not None: request.app_profile = app_profile if update_mask is not None: @@ -1697,7 +1657,6 @@ def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1720,10 +1679,8 @@ def delete_app_profile( # there are no flattened fields. if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): request = bigtable_instance_admin.DeleteAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1744,13 +1701,13 @@ def delete_app_profile( def get_iam_policy( self, - request: iam_policy.GetIamPolicyRequest = None, + request: iam_policy_pb2.GetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy set. @@ -1768,7 +1725,6 @@ def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1847,11 +1803,10 @@ def get_iam_policy( if isinstance(request, dict): # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. - request = iam_policy.GetIamPolicyRequest(**request) + request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: # Null request, just make one. - request = iam_policy.GetIamPolicyRequest() - + request = iam_policy_pb2.GetIamPolicyRequest() if resource is not None: request.resource = resource @@ -1873,13 +1828,13 @@ def get_iam_policy( def set_iam_policy( self, - request: iam_policy.SetIamPolicyRequest = None, + request: iam_policy_pb2.SetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -1896,7 +1851,6 @@ def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1975,11 +1929,10 @@ def set_iam_policy( if isinstance(request, dict): # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. - request = iam_policy.SetIamPolicyRequest(**request) + request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: # Null request, just make one. - request = iam_policy.SetIamPolicyRequest() - + request = iam_policy_pb2.SetIamPolicyRequest() if resource is not None: request.resource = resource @@ -2001,14 +1954,14 @@ def set_iam_policy( def test_iam_permissions( self, - request: iam_policy.TestIamPermissionsRequest = None, + request: iam_policy_pb2.TestIamPermissionsRequest = None, *, resource: str = None, permissions: Sequence[str] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy.TestIamPermissionsResponse: + ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -2034,7 +1987,6 @@ def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2058,14 +2010,12 @@ def test_iam_permissions( if isinstance(request, dict): # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. - request = iam_policy.TestIamPermissionsRequest(**request) + request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: # Null request, just make one. - request = iam_policy.TestIamPermissionsRequest() - + request = iam_policy_pb2.TestIamPermissionsRequest() if resource is not None: request.resource = resource - if permissions: request.permissions.extend(permissions) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index f92d478868e1..cf5def768e44 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from typing import ( Any, AsyncIterable, @@ -120,7 +118,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index 23b5107110b4..0dbb19a6aa27 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict from typing import Dict, Type diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 9a60430e7982..689dbc5f7b0c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,25 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import abc -import typing +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version import pkg_resources -from google import auth # type: ignore -from google.api_core import exceptions # type: ignore +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.api_core import operations_v1 # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.longrunning import operations_pb2 as operations # type: ignore -from google.protobuf import empty_pb2 as empty # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( @@ -43,6 +42,17 @@ except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + +_API_CORE_VERSION = google.api_core.__version__ + class BigtableInstanceAdminTransport(abc.ABC): """Abstract transport class for BigtableInstanceAdmin.""" @@ -57,21 +67,24 @@ class BigtableInstanceAdminTransport(abc.ABC): "https://www.googleapis.com/auth/cloud-platform.read-only", ) + DEFAULT_HOST: str = "bigtableadmin.googleapis.com" + def __init__( self, *, - host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, - credentials_file: typing.Optional[str] = None, - scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, - quota_project_id: typing.Optional[str] = None, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, **kwargs, ) -> None: """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -80,7 +93,7 @@ def __init__( credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. - scope (Optional[Sequence[str]]): A list of scopes. + scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -94,29 +107,76 @@ def __init__( host += ":443" self._host = host + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + # Save the scopes. self._scopes = scopes or self.AUTH_SCOPES # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: - raise exceptions.DuplicateCredentialArgs( + raise core_exceptions.DuplicateCredentialArgs( "'credentials_file' and 'credentials' are mutually exclusive" ) if credentials_file is not None: - credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) elif credentials is None: - credentials, _ = auth.default( - scopes=self._scopes, quota_project_id=quota_project_id + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials + # TODO(busunkim): These two class methods are in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-api-core + # and google-auth are increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + # TODO: Remove this function once google-api-core >= 1.26.0 is required + @classmethod + def _get_self_signed_jwt_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Union[Optional[Sequence[str]], str]]: + """Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version""" + + self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {} + + if _API_CORE_VERSION and ( + packaging.version.parse(_API_CORE_VERSION) + >= packaging.version.parse("1.26.0") + ): + self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES + self_signed_jwt_kwargs["scopes"] = scopes + self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST + else: + self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES + + return self_signed_jwt_kwargs + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -130,7 +190,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -144,7 +205,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -158,7 +220,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -172,7 +235,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -192,7 +256,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -206,7 +271,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -220,7 +286,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -240,7 +307,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -254,7 +322,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -268,7 +337,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -285,7 +355,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -302,7 +373,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -319,29 +391,29 @@ def operations_client(self) -> operations_v1.OperationsClient: @property def create_instance( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.CreateInstanceRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def get_instance( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.GetInstanceRequest], - typing.Union[instance.Instance, typing.Awaitable[instance.Instance]], + Union[instance.Instance, Awaitable[instance.Instance]], ]: raise NotImplementedError() @property def list_instances( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.ListInstancesRequest], - typing.Union[ + Union[ bigtable_instance_admin.ListInstancesResponse, - typing.Awaitable[bigtable_instance_admin.ListInstancesResponse], + Awaitable[bigtable_instance_admin.ListInstancesResponse], ], ]: raise NotImplementedError() @@ -349,56 +421,55 @@ def list_instances( @property def update_instance( self, - ) -> typing.Callable[ - [instance.Instance], - typing.Union[instance.Instance, typing.Awaitable[instance.Instance]], + ) -> Callable[ + [instance.Instance], Union[instance.Instance, Awaitable[instance.Instance]] ]: raise NotImplementedError() @property def partial_update_instance( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.PartialUpdateInstanceRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def delete_instance( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.DeleteInstanceRequest], - typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def create_cluster( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.CreateClusterRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def get_cluster( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.GetClusterRequest], - typing.Union[instance.Cluster, typing.Awaitable[instance.Cluster]], + Union[instance.Cluster, Awaitable[instance.Cluster]], ]: raise NotImplementedError() @property def list_clusters( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.ListClustersRequest], - typing.Union[ + Union[ bigtable_instance_admin.ListClustersResponse, - typing.Awaitable[bigtable_instance_admin.ListClustersResponse], + Awaitable[bigtable_instance_admin.ListClustersResponse], ], ]: raise NotImplementedError() @@ -406,47 +477,47 @@ def list_clusters( @property def update_cluster( self, - ) -> typing.Callable[ + ) -> Callable[ [instance.Cluster], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def delete_cluster( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.DeleteClusterRequest], - typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def create_app_profile( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.CreateAppProfileRequest], - typing.Union[instance.AppProfile, typing.Awaitable[instance.AppProfile]], + Union[instance.AppProfile, Awaitable[instance.AppProfile]], ]: raise NotImplementedError() @property def get_app_profile( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.GetAppProfileRequest], - typing.Union[instance.AppProfile, typing.Awaitable[instance.AppProfile]], + Union[instance.AppProfile, Awaitable[instance.AppProfile]], ]: raise NotImplementedError() @property def list_app_profiles( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.ListAppProfilesRequest], - typing.Union[ + Union[ bigtable_instance_admin.ListAppProfilesResponse, - typing.Awaitable[bigtable_instance_admin.ListAppProfilesResponse], + Awaitable[bigtable_instance_admin.ListAppProfilesResponse], ], ]: raise NotImplementedError() @@ -454,47 +525,47 @@ def list_app_profiles( @property def update_app_profile( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.UpdateAppProfileRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def delete_app_profile( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_instance_admin.DeleteAppProfileRequest], - typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def get_iam_policy( self, - ) -> typing.Callable[ - [iam_policy.GetIamPolicyRequest], - typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], ]: raise NotImplementedError() @property def set_iam_policy( self, - ) -> typing.Callable[ - [iam_policy.SetIamPolicyRequest], - typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], ]: raise NotImplementedError() @property def test_iam_permissions( self, - ) -> typing.Callable[ - [iam_policy.TestIamPermissionsRequest], - typing.Union[ - iam_policy.TestIamPermissionsResponse, - typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], ], ]: raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 9d204473a017..3d3a4144e98e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,26 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple +from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers # type: ignore from google.api_core import operations_v1 # type: ignore from google.api_core import gapic_v1 # type: ignore -from google import auth # type: ignore -from google.auth import credentials # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.longrunning import operations_pb2 as operations # type: ignore -from google.protobuf import empty_pb2 as empty # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO @@ -59,7 +56,7 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, @@ -73,7 +70,8 @@ def __init__( """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -184,7 +182,7 @@ def __init__( def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -215,13 +213,15 @@ def create_channel( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ - scopes = scopes or cls.AUTH_SCOPES + + self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) + return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, - scopes=scopes, quota_project_id=quota_project_id, + **self_signed_jwt_kwargs, **kwargs, ) @@ -249,7 +249,7 @@ def operations_client(self) -> operations_v1.OperationsClient: def create_instance( self, ) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], operations.Operation + [bigtable_instance_admin.CreateInstanceRequest], operations_pb2.Operation ]: r"""Return a callable for the create instance method over gRPC. @@ -269,7 +269,7 @@ def create_instance( self._stubs["create_instance"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_instance"] @@ -359,7 +359,7 @@ def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: def partial_update_instance( self, ) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], operations.Operation + [bigtable_instance_admin.PartialUpdateInstanceRequest], operations_pb2.Operation ]: r"""Return a callable for the partial update instance method over gRPC. @@ -381,14 +381,14 @@ def partial_update_instance( self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["partial_update_instance"] @property def delete_instance( self, - ) -> Callable[[bigtable_instance_admin.DeleteInstanceRequest], empty.Empty]: + ) -> Callable[[bigtable_instance_admin.DeleteInstanceRequest], empty_pb2.Empty]: r"""Return a callable for the delete instance method over gRPC. Delete an instance from a project. @@ -407,14 +407,16 @@ def delete_instance( self._stubs["delete_instance"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_instance"] @property def create_cluster( self, - ) -> Callable[[bigtable_instance_admin.CreateClusterRequest], operations.Operation]: + ) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], operations_pb2.Operation + ]: r"""Return a callable for the create cluster method over gRPC. Creates a cluster within an instance. @@ -433,7 +435,7 @@ def create_cluster( self._stubs["create_cluster"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_cluster"] @@ -493,7 +495,7 @@ def list_clusters( return self._stubs["list_clusters"] @property - def update_cluster(self) -> Callable[[instance.Cluster], operations.Operation]: + def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operation]: r"""Return a callable for the update cluster method over gRPC. Updates a cluster within an instance. @@ -512,14 +514,14 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations.Operation]: self._stubs["update_cluster"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_cluster"] @property def delete_cluster( self, - ) -> Callable[[bigtable_instance_admin.DeleteClusterRequest], empty.Empty]: + ) -> Callable[[bigtable_instance_admin.DeleteClusterRequest], empty_pb2.Empty]: r"""Return a callable for the delete cluster method over gRPC. Deletes a cluster from an instance. @@ -538,7 +540,7 @@ def delete_cluster( self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_cluster"] @@ -629,7 +631,7 @@ def list_app_profiles( def update_app_profile( self, ) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], operations.Operation + [bigtable_instance_admin.UpdateAppProfileRequest], operations_pb2.Operation ]: r"""Return a callable for the update app profile method over gRPC. @@ -649,14 +651,14 @@ def update_app_profile( self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_app_profile"] @property def delete_app_profile( self, - ) -> Callable[[bigtable_instance_admin.DeleteAppProfileRequest], empty.Empty]: + ) -> Callable[[bigtable_instance_admin.DeleteAppProfileRequest], empty_pb2.Empty]: r"""Return a callable for the delete app profile method over gRPC. Deletes an app profile from an instance. @@ -675,14 +677,14 @@ def delete_app_profile( self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_app_profile"] @property def get_iam_policy( self, - ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the get iam policy method over gRPC. Gets the access control policy for an instance @@ -702,15 +704,15 @@ def get_iam_policy( if "get_iam_policy" not in self._stubs: self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["get_iam_policy"] @property def set_iam_policy( self, - ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the set iam policy method over gRPC. Sets the access control policy on an instance @@ -729,8 +731,8 @@ def set_iam_policy( if "set_iam_policy" not in self._stubs: self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["set_iam_policy"] @@ -738,7 +740,8 @@ def set_iam_policy( def test_iam_permissions( self, ) -> Callable[ - [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, ]: r"""Return a callable for the test iam permissions method over gRPC. @@ -758,8 +761,8 @@ def test_iam_permissions( if "test_iam_permissions" not in self._stubs: self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, ) return self._stubs["test_iam_permissions"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index d2bc4647b3e0..7dbec88061f5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,27 +13,25 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 # type: ignore from google.api_core import grpc_helpers_async # type: ignore from google.api_core import operations_v1 # type: ignore -from google import auth # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.longrunning import operations_pb2 as operations # type: ignore -from google.protobuf import empty_pb2 as empty # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableInstanceAdminGrpcTransport @@ -62,7 +59,7 @@ class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -89,13 +86,15 @@ def create_channel( Returns: aio.Channel: A gRPC AsyncIO channel object. """ - scopes = scopes or cls.AUTH_SCOPES + + self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) + return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, - scopes=scopes, quota_project_id=quota_project_id, + **self_signed_jwt_kwargs, **kwargs, ) @@ -103,7 +102,7 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, @@ -117,7 +116,8 @@ def __init__( """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -176,7 +176,6 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - else: if api_mtls_endpoint: host = api_mtls_endpoint @@ -255,7 +254,8 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: def create_instance( self, ) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], Awaitable[operations.Operation] + [bigtable_instance_admin.CreateInstanceRequest], + Awaitable[operations_pb2.Operation], ]: r"""Return a callable for the create instance method over gRPC. @@ -275,7 +275,7 @@ def create_instance( self._stubs["create_instance"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_instance"] @@ -370,7 +370,7 @@ def partial_update_instance( self, ) -> Callable[ [bigtable_instance_admin.PartialUpdateInstanceRequest], - Awaitable[operations.Operation], + Awaitable[operations_pb2.Operation], ]: r"""Return a callable for the partial update instance method over gRPC. @@ -392,7 +392,7 @@ def partial_update_instance( self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["partial_update_instance"] @@ -400,7 +400,7 @@ def partial_update_instance( def delete_instance( self, ) -> Callable[ - [bigtable_instance_admin.DeleteInstanceRequest], Awaitable[empty.Empty] + [bigtable_instance_admin.DeleteInstanceRequest], Awaitable[empty_pb2.Empty] ]: r"""Return a callable for the delete instance method over gRPC. @@ -420,7 +420,7 @@ def delete_instance( self._stubs["delete_instance"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_instance"] @@ -428,7 +428,8 @@ def delete_instance( def create_cluster( self, ) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], Awaitable[operations.Operation] + [bigtable_instance_admin.CreateClusterRequest], + Awaitable[operations_pb2.Operation], ]: r"""Return a callable for the create cluster method over gRPC. @@ -448,7 +449,7 @@ def create_cluster( self._stubs["create_cluster"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_cluster"] @@ -512,7 +513,7 @@ def list_clusters( @property def update_cluster( self, - ) -> Callable[[instance.Cluster], Awaitable[operations.Operation]]: + ) -> Callable[[instance.Cluster], Awaitable[operations_pb2.Operation]]: r"""Return a callable for the update cluster method over gRPC. Updates a cluster within an instance. @@ -531,7 +532,7 @@ def update_cluster( self._stubs["update_cluster"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_cluster"] @@ -539,7 +540,7 @@ def update_cluster( def delete_cluster( self, ) -> Callable[ - [bigtable_instance_admin.DeleteClusterRequest], Awaitable[empty.Empty] + [bigtable_instance_admin.DeleteClusterRequest], Awaitable[empty_pb2.Empty] ]: r"""Return a callable for the delete cluster method over gRPC. @@ -559,7 +560,7 @@ def delete_cluster( self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_cluster"] @@ -654,7 +655,7 @@ def update_app_profile( self, ) -> Callable[ [bigtable_instance_admin.UpdateAppProfileRequest], - Awaitable[operations.Operation], + Awaitable[operations_pb2.Operation], ]: r"""Return a callable for the update app profile method over gRPC. @@ -674,7 +675,7 @@ def update_app_profile( self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_app_profile"] @@ -682,7 +683,7 @@ def update_app_profile( def delete_app_profile( self, ) -> Callable[ - [bigtable_instance_admin.DeleteAppProfileRequest], Awaitable[empty.Empty] + [bigtable_instance_admin.DeleteAppProfileRequest], Awaitable[empty_pb2.Empty] ]: r"""Return a callable for the delete app profile method over gRPC. @@ -702,14 +703,14 @@ def delete_app_profile( self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_app_profile"] @property def get_iam_policy( self, - ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the get iam policy method over gRPC. Gets the access control policy for an instance @@ -729,15 +730,15 @@ def get_iam_policy( if "get_iam_policy" not in self._stubs: self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", - request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["get_iam_policy"] @property def set_iam_policy( self, - ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the set iam policy method over gRPC. Sets the access control policy on an instance @@ -756,8 +757,8 @@ def set_iam_policy( if "set_iam_policy" not in self._stubs: self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", - request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["set_iam_policy"] @@ -765,8 +766,8 @@ def set_iam_policy( def test_iam_permissions( self, ) -> Callable[ - [iam_policy.TestIamPermissionsRequest], - Awaitable[iam_policy.TestIamPermissionsResponse], + [iam_policy_pb2.TestIamPermissionsRequest], + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], ]: r"""Return a callable for the test iam permissions method over gRPC. @@ -786,8 +787,8 @@ def test_iam_permissions( if "test_iam_permissions" not in self._stubs: self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", - request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, ) return self._stubs["test_iam_permissions"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index 76c35f3bb880..a93a9932b084 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from .client import BigtableTableAdminClient from .async_client import BigtableTableAdminAsyncClient diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index e7b708305781..62bef2e7b2f5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict import functools import re @@ -22,10 +20,10 @@ import pkg_resources import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.api_core import operation # type: ignore @@ -34,11 +32,10 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.protobuf import field_mask_pb2 as field_mask # type: ignore -from google.protobuf import timestamp_pb2 as timestamp # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport from .client import BigtableTableAdminClient @@ -73,31 +70,26 @@ class BigtableTableAdminAsyncClient: parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) table_path = staticmethod(BigtableTableAdminClient.table_path) parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path) - common_billing_account_path = staticmethod( BigtableTableAdminClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( BigtableTableAdminClient.parse_common_billing_account_path ) - common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path) parse_common_folder_path = staticmethod( BigtableTableAdminClient.parse_common_folder_path ) - common_organization_path = staticmethod( BigtableTableAdminClient.common_organization_path ) parse_common_organization_path = staticmethod( BigtableTableAdminClient.parse_common_organization_path ) - common_project_path = staticmethod(BigtableTableAdminClient.common_project_path) parse_common_project_path = staticmethod( BigtableTableAdminClient.parse_common_project_path ) - common_location_path = staticmethod(BigtableTableAdminClient.common_location_path) parse_common_location_path = staticmethod( BigtableTableAdminClient.parse_common_location_path @@ -105,7 +97,8 @@ class BigtableTableAdminAsyncClient: @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -120,7 +113,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -137,7 +130,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> BigtableTableAdminTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: BigtableTableAdminTransport: The transport used by the client instance. @@ -152,12 +145,12 @@ def transport(self) -> BigtableTableAdminTransport: def __init__( self, *, - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the bigtable table admin client. + """Instantiates the bigtable table admin client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -189,7 +182,6 @@ def __init__( google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ - self._client = BigtableTableAdminClient( credentials=credentials, transport=transport, @@ -238,7 +230,6 @@ async def create_table( This corresponds to the ``table`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -267,7 +258,6 @@ async def create_table( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if table_id is not None: @@ -351,7 +341,6 @@ async def create_table_from_snapshot( This corresponds to the ``source_snapshot`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -381,7 +370,6 @@ async def create_table_from_snapshot( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if table_id is not None: @@ -440,7 +428,6 @@ async def list_tables( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -470,7 +457,6 @@ async def list_tables( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -483,7 +469,8 @@ async def list_tables( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -532,7 +519,6 @@ async def get_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -561,7 +547,6 @@ async def get_table( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -574,7 +559,8 @@ async def get_table( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -618,7 +604,6 @@ async def delete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -639,7 +624,6 @@ async def delete_table( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -704,7 +688,6 @@ async def modify_column_families( This corresponds to the ``modifications`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -733,10 +716,8 @@ async def modify_column_families( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name - if modifications: request.modifications.extend(modifications) @@ -777,7 +758,6 @@ async def drop_row_range( request (:class:`google.cloud.bigtable_admin_v2.types.DropRowRangeRequest`): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -785,7 +765,6 @@ async def drop_row_range( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - request = bigtable_table_admin.DropRowRangeRequest(request) # Wrap the RPC method; this adds retry and timeout information, @@ -834,7 +813,6 @@ async def generate_consistency_token( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -861,7 +839,6 @@ async def generate_consistency_token( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -874,7 +851,8 @@ async def generate_consistency_token( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -928,7 +906,6 @@ async def check_consistency( This corresponds to the ``consistency_token`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -955,7 +932,6 @@ async def check_consistency( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name if consistency_token is not None: @@ -970,7 +946,8 @@ async def check_consistency( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1053,7 +1030,6 @@ async def snapshot_table( This corresponds to the ``description`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1090,7 +1066,6 @@ async def snapshot_table( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name if cluster is not None: @@ -1164,7 +1139,6 @@ async def get_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1201,7 +1175,6 @@ async def get_snapshot( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1214,7 +1187,8 @@ async def get_snapshot( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1273,7 +1247,6 @@ async def list_snapshots( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1310,7 +1283,6 @@ async def list_snapshots( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -1323,7 +1295,8 @@ async def list_snapshots( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1384,7 +1357,6 @@ async def delete_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1405,7 +1377,6 @@ async def delete_snapshot( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1479,7 +1450,6 @@ async def create_backup( This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1509,7 +1479,6 @@ async def create_backup( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if backup_id is not None: @@ -1568,7 +1537,6 @@ async def get_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1593,7 +1561,6 @@ async def get_backup( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1606,7 +1573,8 @@ async def get_backup( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1631,7 +1599,7 @@ async def update_backup( request: bigtable_table_admin.UpdateBackupRequest = None, *, backup: table.Backup = None, - update_mask: field_mask.FieldMask = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1665,7 +1633,6 @@ async def update_backup( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1690,7 +1657,6 @@ async def update_backup( # If we have keyword arguments corresponding to fields on the # request, apply these. - if backup is not None: request.backup = backup if update_mask is not None: @@ -1741,7 +1707,6 @@ async def delete_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1762,7 +1727,6 @@ async def delete_backup( # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1812,7 +1776,6 @@ async def list_backups( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1842,7 +1805,6 @@ async def list_backups( # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -1855,7 +1817,8 @@ async def list_backups( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -1890,8 +1853,8 @@ async def restore_table( metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing - the backup. The returned table [long-running + table must be in the same project as the instance containing the + backup. The returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -1903,7 +1866,6 @@ async def restore_table( request (:class:`google.cloud.bigtable_admin_v2.types.RestoreTableRequest`): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1920,7 +1882,6 @@ async def restore_table( """ # Create or coerce a protobuf request object. - request = bigtable_table_admin.RestoreTableRequest(request) # Wrap the RPC method; this adds retry and timeout information, @@ -1953,13 +1914,13 @@ async def restore_table( async def get_iam_policy( self, - request: iam_policy.GetIamPolicyRequest = None, + request: iam_policy_pb2.GetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -1977,7 +1938,6 @@ async def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2056,10 +2016,9 @@ async def get_iam_policy( # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): - request = iam_policy.GetIamPolicyRequest(**request) - + request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: - request = iam_policy.GetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2070,7 +2029,8 @@ async def get_iam_policy( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -2092,13 +2052,13 @@ async def get_iam_policy( async def set_iam_policy( self, - request: iam_policy.SetIamPolicyRequest = None, + request: iam_policy_pb2.SetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup resource. Replaces any existing policy. @@ -2115,7 +2075,6 @@ async def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2194,10 +2153,9 @@ async def set_iam_policy( # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): - request = iam_policy.SetIamPolicyRequest(**request) - + request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: - request = iam_policy.SetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2221,14 +2179,14 @@ async def set_iam_policy( async def test_iam_permissions( self, - request: iam_policy.TestIamPermissionsRequest = None, + request: iam_policy_pb2.TestIamPermissionsRequest = None, *, resource: str = None, permissions: Sequence[str] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy.TestIamPermissionsResponse: + ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified Table or Backup resource. @@ -2254,7 +2212,6 @@ async def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2278,10 +2235,9 @@ async def test_iam_permissions( # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. if isinstance(request, dict): - request = iam_policy.TestIamPermissionsRequest(**request) - + request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: - request = iam_policy.TestIamPermissionsRequest( + request = iam_policy_pb2.TestIamPermissionsRequest( resource=resource, permissions=permissions, ) @@ -2294,7 +2250,8 @@ async def test_iam_permissions( maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 7240aa1c3d06..afa920a55350 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict from distutils import util import os @@ -23,10 +21,10 @@ import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore @@ -38,11 +36,10 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.protobuf import field_mask_pb2 as field_mask # type: ignore -from google.protobuf import timestamp_pb2 as timestamp # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableTableAdminGrpcTransport from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport @@ -65,7 +62,7 @@ class BigtableTableAdminClientMeta(type): def get_transport_class( cls, label: str = None, ) -> Type[BigtableTableAdminTransport]: - """Return an appropriate transport class. + """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is @@ -93,7 +90,8 @@ class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): - """Convert api endpoint to mTLS endpoint. + """Converts api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: @@ -127,7 +125,8 @@ def _get_default_mtls_endpoint(api_endpoint): @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials info. + """Creates an instance of this client using the provided credentials + info. Args: info (dict): The service account private key info. @@ -144,7 +143,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials - file. + file. Args: filename (str): The path to the service account private key json @@ -163,23 +162,24 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): @property def transport(self) -> BigtableTableAdminTransport: - """Return the transport used by the client instance. + """Returns the transport used by the client instance. Returns: - BigtableTableAdminTransport: The transport used by the client instance. + BigtableTableAdminTransport: The transport used by the client + instance. """ return self._transport @staticmethod def backup_path(project: str, instance: str, cluster: str, backup: str,) -> str: - """Return a fully-qualified backup string.""" + """Returns a fully-qualified backup string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( project=project, instance=instance, cluster=cluster, backup=backup, ) @staticmethod def parse_backup_path(path: str) -> Dict[str, str]: - """Parse a backup path into its component segments.""" + """Parses a backup path into its component segments.""" m = re.match( r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/backups/(?P.+?)$", path, @@ -188,14 +188,14 @@ def parse_backup_path(path: str) -> Dict[str, str]: @staticmethod def cluster_path(project: str, instance: str, cluster: str,) -> str: - """Return a fully-qualified cluster string.""" + """Returns a fully-qualified cluster string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}".format( project=project, instance=instance, cluster=cluster, ) @staticmethod def parse_cluster_path(path: str) -> Dict[str, str]: - """Parse a cluster path into its component segments.""" + """Parses a cluster path into its component segments.""" m = re.match( r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", path, @@ -210,7 +210,7 @@ def crypto_key_version_path( crypto_key: str, crypto_key_version: str, ) -> str: - """Return a fully-qualified crypto_key_version string.""" + """Returns a fully-qualified crypto_key_version string.""" return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( project=project, location=location, @@ -221,7 +221,7 @@ def crypto_key_version_path( @staticmethod def parse_crypto_key_version_path(path: str) -> Dict[str, str]: - """Parse a crypto_key_version path into its component segments.""" + """Parses a crypto_key_version path into its component segments.""" m = re.match( r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)/cryptoKeyVersions/(?P.+?)$", path, @@ -230,27 +230,27 @@ def parse_crypto_key_version_path(path: str) -> Dict[str, str]: @staticmethod def instance_path(project: str, instance: str,) -> str: - """Return a fully-qualified instance string.""" + """Returns a fully-qualified instance string.""" return "projects/{project}/instances/{instance}".format( project=project, instance=instance, ) @staticmethod def parse_instance_path(path: str) -> Dict[str, str]: - """Parse a instance path into its component segments.""" + """Parses a instance path into its component segments.""" m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def snapshot_path(project: str, instance: str, cluster: str, snapshot: str,) -> str: - """Return a fully-qualified snapshot string.""" + """Returns a fully-qualified snapshot string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( project=project, instance=instance, cluster=cluster, snapshot=snapshot, ) @staticmethod def parse_snapshot_path(path: str) -> Dict[str, str]: - """Parse a snapshot path into its component segments.""" + """Parses a snapshot path into its component segments.""" m = re.match( r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/snapshots/(?P.+?)$", path, @@ -259,14 +259,14 @@ def parse_snapshot_path(path: str) -> Dict[str, str]: @staticmethod def table_path(project: str, instance: str, table: str,) -> str: - """Return a fully-qualified table string.""" + """Returns a fully-qualified table string.""" return "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, table=table, ) @staticmethod def parse_table_path(path: str) -> Dict[str, str]: - """Parse a table path into its component segments.""" + """Parses a table path into its component segments.""" m = re.match( r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path, @@ -275,7 +275,7 @@ def parse_table_path(path: str) -> Dict[str, str]: @staticmethod def common_billing_account_path(billing_account: str,) -> str: - """Return a fully-qualified billing_account string.""" + """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -288,7 +288,7 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: @staticmethod def common_folder_path(folder: str,) -> str: - """Return a fully-qualified folder string.""" + """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod @@ -299,7 +299,7 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: @staticmethod def common_organization_path(organization: str,) -> str: - """Return a fully-qualified organization string.""" + """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod @@ -310,7 +310,7 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: @staticmethod def common_project_path(project: str,) -> str: - """Return a fully-qualified project string.""" + """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod @@ -321,7 +321,7 @@ def parse_common_project_path(path: str) -> Dict[str, str]: @staticmethod def common_location_path(project: str, location: str,) -> str: - """Return a fully-qualified location string.""" + """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -335,12 +335,12 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def __init__( self, *, - credentials: Optional[credentials.Credentials] = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigtableTableAdminTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiate the bigtable table admin client. + """Instantiates the bigtable table admin client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -395,9 +395,10 @@ def __init__( client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() - client_cert_source_func = ( - mtls.default_client_cert_source() if is_mtls else None - ) + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -409,12 +410,14 @@ def __init__( elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT - ) + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" ) # Save or instantiate the transport. @@ -429,8 +432,8 @@ def __init__( ) if client_options.scopes: raise ValueError( - "When providing a transport instance, " - "provide its scopes directly." + "When providing a transport instance, provide its scopes " + "directly." ) self._transport = transport else: @@ -486,7 +489,6 @@ def create_table( This corresponds to the ``table`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -517,10 +519,8 @@ def create_table( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.CreateTableRequest): request = bigtable_table_admin.CreateTableRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if table_id is not None: @@ -600,7 +600,6 @@ def create_table_from_snapshot( This corresponds to the ``source_snapshot`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -632,10 +631,8 @@ def create_table_from_snapshot( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if table_id is not None: @@ -692,7 +689,6 @@ def list_tables( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -724,10 +720,8 @@ def list_tables( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.ListTablesRequest): request = bigtable_table_admin.ListTablesRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -776,7 +770,6 @@ def get_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -807,10 +800,8 @@ def get_table( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.GetTableRequest): request = bigtable_table_admin.GetTableRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -854,7 +845,6 @@ def delete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -877,10 +867,8 @@ def delete_table( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.DeleteTableRequest): request = bigtable_table_admin.DeleteTableRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -941,7 +929,6 @@ def modify_column_families( This corresponds to the ``modifications`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -972,10 +959,8 @@ def modify_column_families( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name if modifications is not None: @@ -1014,7 +999,6 @@ def drop_row_range( request (google.cloud.bigtable_admin_v2.types.DropRowRangeRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1022,7 +1006,6 @@ def drop_row_range( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes # in a bigtable_table_admin.DropRowRangeRequest. # There's no risk of modifying the input as we've already verified @@ -1072,7 +1055,6 @@ def generate_consistency_token( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1103,10 +1085,8 @@ def generate_consistency_token( request, bigtable_table_admin.GenerateConsistencyTokenRequest ): request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1162,7 +1142,6 @@ def check_consistency( This corresponds to the ``consistency_token`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1191,10 +1170,8 @@ def check_consistency( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): request = bigtable_table_admin.CheckConsistencyRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name if consistency_token is not None: @@ -1279,7 +1256,6 @@ def snapshot_table( This corresponds to the ``description`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1318,10 +1294,8 @@ def snapshot_table( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): request = bigtable_table_admin.SnapshotTableRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name if cluster is not None: @@ -1391,7 +1365,6 @@ def get_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1430,10 +1403,8 @@ def get_snapshot( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): request = bigtable_table_admin.GetSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1492,7 +1463,6 @@ def list_snapshots( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1531,10 +1501,8 @@ def list_snapshots( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): request = bigtable_table_admin.ListSnapshotsRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -1595,7 +1563,6 @@ def delete_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1618,10 +1585,8 @@ def delete_snapshot( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): request = bigtable_table_admin.DeleteSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1691,7 +1656,6 @@ def create_backup( This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1723,10 +1687,8 @@ def create_backup( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.CreateBackupRequest): request = bigtable_table_admin.CreateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent if backup_id is not None: @@ -1781,7 +1743,6 @@ def get_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1808,10 +1769,8 @@ def get_backup( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.GetBackupRequest): request = bigtable_table_admin.GetBackupRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -1836,7 +1795,7 @@ def update_backup( request: bigtable_table_admin.UpdateBackupRequest = None, *, backup: table.Backup = None, - update_mask: field_mask.FieldMask = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -1870,7 +1829,6 @@ def update_backup( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1897,10 +1855,8 @@ def update_backup( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): request = bigtable_table_admin.UpdateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if backup is not None: request.backup = backup if update_mask is not None: @@ -1947,7 +1903,6 @@ def delete_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1970,10 +1925,8 @@ def delete_backup( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): request = bigtable_table_admin.DeleteBackupRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if name is not None: request.name = name @@ -2019,7 +1972,6 @@ def list_backups( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2051,10 +2003,8 @@ def list_backups( # there are no flattened fields. if not isinstance(request, bigtable_table_admin.ListBackupsRequest): request = bigtable_table_admin.ListBackupsRequest(request) - # If we have keyword arguments corresponding to fields on the # request, apply these. - if parent is not None: request.parent = parent @@ -2089,8 +2039,8 @@ def restore_table( metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing - the backup. The returned table [long-running + table must be in the same project as the instance containing the + backup. The returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -2102,7 +2052,6 @@ def restore_table( request (google.cloud.bigtable_admin_v2.types.RestoreTableRequest): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2119,7 +2068,6 @@ def restore_table( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes # in a bigtable_table_admin.RestoreTableRequest. # There's no risk of modifying the input as we've already verified @@ -2153,13 +2101,13 @@ def restore_table( def get_iam_policy( self, - request: iam_policy.GetIamPolicyRequest = None, + request: iam_policy_pb2.GetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -2177,7 +2125,6 @@ def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2256,11 +2203,10 @@ def get_iam_policy( if isinstance(request, dict): # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. - request = iam_policy.GetIamPolicyRequest(**request) + request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: # Null request, just make one. - request = iam_policy.GetIamPolicyRequest() - + request = iam_policy_pb2.GetIamPolicyRequest() if resource is not None: request.resource = resource @@ -2282,13 +2228,13 @@ def get_iam_policy( def set_iam_policy( self, - request: iam_policy.SetIamPolicyRequest = None, + request: iam_policy_pb2.SetIamPolicyRequest = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> policy.Policy: + ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup resource. Replaces any existing policy. @@ -2305,7 +2251,6 @@ def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2384,11 +2329,10 @@ def set_iam_policy( if isinstance(request, dict): # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. - request = iam_policy.SetIamPolicyRequest(**request) + request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: # Null request, just make one. - request = iam_policy.SetIamPolicyRequest() - + request = iam_policy_pb2.SetIamPolicyRequest() if resource is not None: request.resource = resource @@ -2410,14 +2354,14 @@ def set_iam_policy( def test_iam_permissions( self, - request: iam_policy.TestIamPermissionsRequest = None, + request: iam_policy_pb2.TestIamPermissionsRequest = None, *, resource: str = None, permissions: Sequence[str] = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy.TestIamPermissionsResponse: + ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified Table or Backup resource. @@ -2443,7 +2387,6 @@ def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2467,14 +2410,12 @@ def test_iam_permissions( if isinstance(request, dict): # The request isn't a proto-plus wrapped type, # so it must be constructed via keyword expansion. - request = iam_policy.TestIamPermissionsRequest(**request) + request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: # Null request, just make one. - request = iam_policy.TestIamPermissionsRequest() - + request = iam_policy_pb2.TestIamPermissionsRequest() if resource is not None: request.resource = resource - if permissions: request.permissions.extend(permissions) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 203d94f83e51..84ead0192a12 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from typing import ( Any, AsyncIterable, @@ -118,7 +116,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -246,7 +244,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and @@ -374,7 +372,7 @@ def __init__( *, metadata: Sequence[Tuple[str, str]] = () ): - """Instantiate the pager. + """Instantiates the pager. Args: method (Callable): The method that was originally called, and diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index 8e9ae114dd57..b1231802cb64 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from collections import OrderedDict from typing import Dict, Type diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 731f83280081..c61021e0676a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,26 +13,26 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import abc -import typing +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version import pkg_resources -from google import auth # type: ignore -from google.api_core import exceptions # type: ignore +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.api_core import operations_v1 # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.longrunning import operations_pb2 as operations # type: ignore -from google.protobuf import empty_pb2 as empty # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( @@ -44,6 +43,17 @@ except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + +_API_CORE_VERSION = google.api_core.__version__ + class BigtableTableAdminTransport(abc.ABC): """Abstract transport class for BigtableTableAdmin.""" @@ -57,21 +67,24 @@ class BigtableTableAdminTransport(abc.ABC): "https://www.googleapis.com/auth/cloud-platform.read-only", ) + DEFAULT_HOST: str = "bigtableadmin.googleapis.com" + def __init__( self, *, - host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, - credentials_file: typing.Optional[str] = None, - scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, - quota_project_id: typing.Optional[str] = None, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, **kwargs, ) -> None: """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -80,7 +93,7 @@ def __init__( credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. - scope (Optional[Sequence[str]]): A list of scopes. + scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -94,29 +107,76 @@ def __init__( host += ":443" self._host = host + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + # Save the scopes. self._scopes = scopes or self.AUTH_SCOPES # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: - raise exceptions.DuplicateCredentialArgs( + raise core_exceptions.DuplicateCredentialArgs( "'credentials_file' and 'credentials' are mutually exclusive" ) if credentials_file is not None: - credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=self._scopes, quota_project_id=quota_project_id + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) elif credentials is None: - credentials, _ = auth.default( - scopes=self._scopes, quota_project_id=quota_project_id + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials + # TODO(busunkim): These two class methods are in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-api-core + # and google-auth are increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + # TODO: Remove this function once google-api-core >= 1.26.0 is required + @classmethod + def _get_self_signed_jwt_kwargs( + cls, host: str, scopes: Optional[Sequence[str]] + ) -> Dict[str, Union[Optional[Sequence[str]], str]]: + """Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version""" + + self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {} + + if _API_CORE_VERSION and ( + packaging.version.parse(_API_CORE_VERSION) + >= packaging.version.parse("1.26.0") + ): + self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES + self_signed_jwt_kwargs["scopes"] = scopes + self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST + else: + self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES + + return self_signed_jwt_kwargs + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -135,7 +195,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -149,7 +210,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -174,7 +236,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -188,7 +251,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -205,7 +269,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -219,7 +284,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -239,7 +305,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -259,7 +326,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -276,7 +344,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -293,7 +362,8 @@ def _prep_wrapped_messages(self, client_info): maximum=60.0, multiplier=2, predicate=retries.if_exception_type( - exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, ), deadline=60.0, ), @@ -310,29 +380,29 @@ def operations_client(self) -> operations_v1.OperationsClient: @property def create_table( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.CreateTableRequest], - typing.Union[gba_table.Table, typing.Awaitable[gba_table.Table]], + Union[gba_table.Table, Awaitable[gba_table.Table]], ]: raise NotImplementedError() @property def create_table_from_snapshot( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.CreateTableFromSnapshotRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def list_tables( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.ListTablesRequest], - typing.Union[ + Union[ bigtable_table_admin.ListTablesResponse, - typing.Awaitable[bigtable_table_admin.ListTablesResponse], + Awaitable[bigtable_table_admin.ListTablesResponse], ], ]: raise NotImplementedError() @@ -340,47 +410,47 @@ def list_tables( @property def get_table( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.GetTableRequest], - typing.Union[table.Table, typing.Awaitable[table.Table]], + Union[table.Table, Awaitable[table.Table]], ]: raise NotImplementedError() @property def delete_table( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.DeleteTableRequest], - typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def modify_column_families( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.ModifyColumnFamiliesRequest], - typing.Union[table.Table, typing.Awaitable[table.Table]], + Union[table.Table, Awaitable[table.Table]], ]: raise NotImplementedError() @property def drop_row_range( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.DropRowRangeRequest], - typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def generate_consistency_token( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.GenerateConsistencyTokenRequest], - typing.Union[ + Union[ bigtable_table_admin.GenerateConsistencyTokenResponse, - typing.Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], + Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse], ], ]: raise NotImplementedError() @@ -388,11 +458,11 @@ def generate_consistency_token( @property def check_consistency( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.CheckConsistencyRequest], - typing.Union[ + Union[ bigtable_table_admin.CheckConsistencyResponse, - typing.Awaitable[bigtable_table_admin.CheckConsistencyResponse], + Awaitable[bigtable_table_admin.CheckConsistencyResponse], ], ]: raise NotImplementedError() @@ -400,29 +470,29 @@ def check_consistency( @property def snapshot_table( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.SnapshotTableRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def get_snapshot( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.GetSnapshotRequest], - typing.Union[table.Snapshot, typing.Awaitable[table.Snapshot]], + Union[table.Snapshot, Awaitable[table.Snapshot]], ]: raise NotImplementedError() @property def list_snapshots( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.ListSnapshotsRequest], - typing.Union[ + Union[ bigtable_table_admin.ListSnapshotsResponse, - typing.Awaitable[bigtable_table_admin.ListSnapshotsResponse], + Awaitable[bigtable_table_admin.ListSnapshotsResponse], ], ]: raise NotImplementedError() @@ -430,56 +500,56 @@ def list_snapshots( @property def delete_snapshot( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.DeleteSnapshotRequest], - typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def create_backup( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.CreateBackupRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def get_backup( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.GetBackupRequest], - typing.Union[table.Backup, typing.Awaitable[table.Backup]], + Union[table.Backup, Awaitable[table.Backup]], ]: raise NotImplementedError() @property def update_backup( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.UpdateBackupRequest], - typing.Union[table.Backup, typing.Awaitable[table.Backup]], + Union[table.Backup, Awaitable[table.Backup]], ]: raise NotImplementedError() @property def delete_backup( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.DeleteBackupRequest], - typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def list_backups( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.ListBackupsRequest], - typing.Union[ + Union[ bigtable_table_admin.ListBackupsResponse, - typing.Awaitable[bigtable_table_admin.ListBackupsResponse], + Awaitable[bigtable_table_admin.ListBackupsResponse], ], ]: raise NotImplementedError() @@ -487,38 +557,38 @@ def list_backups( @property def restore_table( self, - ) -> typing.Callable[ + ) -> Callable[ [bigtable_table_admin.RestoreTableRequest], - typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @property def get_iam_policy( self, - ) -> typing.Callable[ - [iam_policy.GetIamPolicyRequest], - typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], ]: raise NotImplementedError() @property def set_iam_policy( self, - ) -> typing.Callable[ - [iam_policy.SetIamPolicyRequest], - typing.Union[policy.Policy, typing.Awaitable[policy.Policy]], + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], ]: raise NotImplementedError() @property def test_iam_permissions( self, - ) -> typing.Callable[ - [iam_policy.TestIamPermissionsRequest], - typing.Union[ - iam_policy.TestIamPermissionsResponse, - typing.Awaitable[iam_policy.TestIamPermissionsResponse], + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], ], ]: raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 6b890ff7f253..337bdcb1caff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,15 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple +from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers # type: ignore from google.api_core import operations_v1 # type: ignore from google.api_core import gapic_v1 # type: ignore -from google import auth # type: ignore -from google.auth import credentials # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore @@ -30,11 +28,10 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.longrunning import operations_pb2 as operations # type: ignore -from google.protobuf import empty_pb2 as empty # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO @@ -61,7 +58,7 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, @@ -75,7 +72,8 @@ def __init__( """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -186,7 +184,7 @@ def __init__( def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -217,13 +215,15 @@ def create_channel( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ - scopes = scopes or cls.AUTH_SCOPES + + self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) + return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, - scopes=scopes, quota_project_id=quota_project_id, + **self_signed_jwt_kwargs, **kwargs, ) @@ -279,7 +279,7 @@ def create_table( def create_table_from_snapshot( self, ) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], operations.Operation + [bigtable_table_admin.CreateTableFromSnapshotRequest], operations_pb2.Operation ]: r"""Return a callable for the create table from snapshot method over gRPC. @@ -307,7 +307,7 @@ def create_table_from_snapshot( self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_table_from_snapshot"] @@ -369,7 +369,7 @@ def get_table( @property def delete_table( self, - ) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty.Empty]: + ) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty_pb2.Empty]: r"""Return a callable for the delete table method over gRPC. Permanently deletes a specified table and all of its @@ -389,7 +389,7 @@ def delete_table( self._stubs["delete_table"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_table"] @@ -426,7 +426,7 @@ def modify_column_families( @property def drop_row_range( self, - ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty.Empty]: + ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty_pb2.Empty]: r"""Return a callable for the drop row range method over gRPC. Permanently drop/delete a row range from a specified @@ -448,7 +448,7 @@ def drop_row_range( self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["drop_row_range"] @@ -520,7 +520,9 @@ def check_consistency( @property def snapshot_table( self, - ) -> Callable[[bigtable_table_admin.SnapshotTableRequest], operations.Operation]: + ) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], operations_pb2.Operation + ]: r"""Return a callable for the snapshot table method over gRPC. Creates a new snapshot in the specified cluster from @@ -547,7 +549,7 @@ def snapshot_table( self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["snapshot_table"] @@ -623,7 +625,7 @@ def list_snapshots( @property def delete_snapshot( self, - ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty.Empty]: + ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty_pb2.Empty]: r"""Return a callable for the delete snapshot method over gRPC. Permanently deletes the specified snapshot. @@ -648,14 +650,14 @@ def delete_snapshot( self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_snapshot"] @property def create_backup( self, - ) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations.Operation]: + ) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations_pb2.Operation]: r"""Return a callable for the create backup method over gRPC. Starts creating a new Cloud Bigtable Backup. The returned backup @@ -682,7 +684,7 @@ def create_backup( self._stubs["create_backup"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_backup"] @@ -742,7 +744,7 @@ def update_backup( @property def delete_backup( self, - ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty.Empty]: + ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty_pb2.Empty]: r"""Return a callable for the delete backup method over gRPC. Deletes a pending or completed Cloud Bigtable backup. @@ -761,7 +763,7 @@ def delete_backup( self._stubs["delete_backup"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_backup"] @@ -798,12 +800,12 @@ def list_backups( @property def restore_table( self, - ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations.Operation]: + ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]: r"""Return a callable for the restore table method over gRPC. Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing - the backup. The returned table [long-running + table must be in the same project as the instance containing the + backup. The returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -825,14 +827,14 @@ def restore_table( self._stubs["restore_table"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["restore_table"] @property def get_iam_policy( self, - ) -> Callable[[iam_policy.GetIamPolicyRequest], policy.Policy]: + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the get iam policy method over gRPC. Gets the access control policy for a Table or Backup @@ -852,15 +854,15 @@ def get_iam_policy( if "get_iam_policy" not in self._stubs: self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["get_iam_policy"] @property def set_iam_policy( self, - ) -> Callable[[iam_policy.SetIamPolicyRequest], policy.Policy]: + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the set iam policy method over gRPC. Sets the access control policy on a Table or Backup @@ -879,8 +881,8 @@ def set_iam_policy( if "set_iam_policy" not in self._stubs: self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["set_iam_policy"] @@ -888,7 +890,8 @@ def set_iam_policy( def test_iam_permissions( self, ) -> Callable[ - [iam_policy.TestIamPermissionsRequest], iam_policy.TestIamPermissionsResponse + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, ]: r"""Return a callable for the test iam permissions method over gRPC. @@ -908,8 +911,8 @@ def test_iam_permissions( if "test_iam_permissions" not in self._stubs: self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, ) return self._stubs["test_iam_permissions"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 0cabde5f18d6..5358404cfef4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,16 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 # type: ignore from google.api_core import grpc_helpers_async # type: ignore from google.api_core import operations_v1 # type: ignore -from google import auth # type: ignore -from google.auth import credentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore @@ -31,11 +29,10 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore -from google.longrunning import operations_pb2 as operations # type: ignore -from google.protobuf import empty_pb2 as empty # type: ignore - +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableTableAdminGrpcTransport @@ -64,7 +61,7 @@ class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -91,13 +88,15 @@ def create_channel( Returns: aio.Channel: A gRPC AsyncIO channel object. """ - scopes = scopes or cls.AUTH_SCOPES + + self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) + return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, - scopes=scopes, quota_project_id=quota_project_id, + **self_signed_jwt_kwargs, **kwargs, ) @@ -105,7 +104,7 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: credentials.Credentials = None, + credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, @@ -119,7 +118,8 @@ def __init__( """Instantiate the transport. Args: - host (Optional[str]): The hostname to connect to. + host (Optional[str]): + The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -178,7 +178,6 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - else: if api_mtls_endpoint: host = api_mtls_endpoint @@ -288,7 +287,7 @@ def create_table_from_snapshot( self, ) -> Callable[ [bigtable_table_admin.CreateTableFromSnapshotRequest], - Awaitable[operations.Operation], + Awaitable[operations_pb2.Operation], ]: r"""Return a callable for the create table from snapshot method over gRPC. @@ -316,7 +315,7 @@ def create_table_from_snapshot( self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_table_from_snapshot"] @@ -378,7 +377,9 @@ def get_table( @property def delete_table( self, - ) -> Callable[[bigtable_table_admin.DeleteTableRequest], Awaitable[empty.Empty]]: + ) -> Callable[ + [bigtable_table_admin.DeleteTableRequest], Awaitable[empty_pb2.Empty] + ]: r"""Return a callable for the delete table method over gRPC. Permanently deletes a specified table and all of its @@ -398,7 +399,7 @@ def delete_table( self._stubs["delete_table"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_table"] @@ -437,7 +438,9 @@ def modify_column_families( @property def drop_row_range( self, - ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], Awaitable[empty.Empty]]: + ) -> Callable[ + [bigtable_table_admin.DropRowRangeRequest], Awaitable[empty_pb2.Empty] + ]: r"""Return a callable for the drop row range method over gRPC. Permanently drop/delete a row range from a specified @@ -459,7 +462,7 @@ def drop_row_range( self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["drop_row_range"] @@ -532,7 +535,7 @@ def check_consistency( def snapshot_table( self, ) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], Awaitable[operations.Operation] + [bigtable_table_admin.SnapshotTableRequest], Awaitable[operations_pb2.Operation] ]: r"""Return a callable for the snapshot table method over gRPC. @@ -560,7 +563,7 @@ def snapshot_table( self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["snapshot_table"] @@ -636,7 +639,9 @@ def list_snapshots( @property def delete_snapshot( self, - ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty.Empty]]: + ) -> Callable[ + [bigtable_table_admin.DeleteSnapshotRequest], Awaitable[empty_pb2.Empty] + ]: r"""Return a callable for the delete snapshot method over gRPC. Permanently deletes the specified snapshot. @@ -661,7 +666,7 @@ def delete_snapshot( self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_snapshot"] @@ -669,7 +674,7 @@ def delete_snapshot( def create_backup( self, ) -> Callable[ - [bigtable_table_admin.CreateBackupRequest], Awaitable[operations.Operation] + [bigtable_table_admin.CreateBackupRequest], Awaitable[operations_pb2.Operation] ]: r"""Return a callable for the create backup method over gRPC. @@ -697,7 +702,7 @@ def create_backup( self._stubs["create_backup"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_backup"] @@ -757,7 +762,9 @@ def update_backup( @property def delete_backup( self, - ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], Awaitable[empty.Empty]]: + ) -> Callable[ + [bigtable_table_admin.DeleteBackupRequest], Awaitable[empty_pb2.Empty] + ]: r"""Return a callable for the delete backup method over gRPC. Deletes a pending or completed Cloud Bigtable backup. @@ -776,7 +783,7 @@ def delete_backup( self._stubs["delete_backup"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, - response_deserializer=empty.Empty.FromString, + response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_backup"] @@ -814,13 +821,13 @@ def list_backups( def restore_table( self, ) -> Callable[ - [bigtable_table_admin.RestoreTableRequest], Awaitable[operations.Operation] + [bigtable_table_admin.RestoreTableRequest], Awaitable[operations_pb2.Operation] ]: r"""Return a callable for the restore table method over gRPC. Create a new table by restoring from a completed backup. The new - table must be in the same instance as the instance containing - the backup. The returned table [long-running + table must be in the same project as the instance containing the + backup. The returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -842,14 +849,14 @@ def restore_table( self._stubs["restore_table"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, - response_deserializer=operations.Operation.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["restore_table"] @property def get_iam_policy( self, - ) -> Callable[[iam_policy.GetIamPolicyRequest], Awaitable[policy.Policy]]: + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the get iam policy method over gRPC. Gets the access control policy for a Table or Backup @@ -869,15 +876,15 @@ def get_iam_policy( if "get_iam_policy" not in self._stubs: self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", - request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["get_iam_policy"] @property def set_iam_policy( self, - ) -> Callable[[iam_policy.SetIamPolicyRequest], Awaitable[policy.Policy]]: + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the set iam policy method over gRPC. Sets the access control policy on a Table or Backup @@ -896,8 +903,8 @@ def set_iam_policy( if "set_iam_policy" not in self._stubs: self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", - request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy.Policy.FromString, + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, ) return self._stubs["set_iam_policy"] @@ -905,8 +912,8 @@ def set_iam_policy( def test_iam_permissions( self, ) -> Callable[ - [iam_policy.TestIamPermissionsRequest], - Awaitable[iam_policy.TestIamPermissionsResponse], + [iam_policy_pb2.TestIamPermissionsRequest], + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], ]: r"""Return a callable for the test iam permissions method over gRPC. @@ -926,8 +933,8 @@ def test_iam_permissions( if "test_iam_permissions" not in self._stubs: self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", - request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy.TestIamPermissionsResponse.FromString, + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, ) return self._stubs["test_iam_permissions"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 01e834d9b139..aeeed3466230 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - from .bigtable_instance_admin import ( CreateAppProfileRequest, CreateClusterMetadata, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 38ae3eab6af1..69b251f65615 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,13 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore - from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.protobuf import field_mask_pb2 as field_mask # type: ignore -from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -54,7 +51,6 @@ class CreateInstanceRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.CreateInstance. - Attributes: parent (str): Required. The unique name of the project in which to create @@ -76,12 +72,9 @@ class CreateInstanceRequest(proto.Message): at most four clusters can be specified. """ - parent = proto.Field(proto.STRING, number=1) - - instance_id = proto.Field(proto.STRING, number=2) - + parent = proto.Field(proto.STRING, number=1,) + instance_id = proto.Field(proto.STRING, number=2,) instance = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Instance,) - clusters = proto.MapField( proto.STRING, proto.MESSAGE, number=4, message=gba_instance.Cluster, ) @@ -89,19 +82,17 @@ class CreateInstanceRequest(proto.Message): class GetInstanceRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.GetInstance. - Attributes: name (str): Required. The unique name of the requested instance. Values are of the form ``projects/{project}/instances/{instance}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class ListInstancesRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.ListInstances. - Attributes: parent (str): Required. The unique name of the project for which a list of @@ -111,14 +102,12 @@ class ListInstancesRequest(proto.Message): DEPRECATED: This field is unused and ignored. """ - parent = proto.Field(proto.STRING, number=1) - - page_token = proto.Field(proto.STRING, number=2) + parent = proto.Field(proto.STRING, number=1,) + page_token = proto.Field(proto.STRING, number=2,) class ListInstancesResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListInstances. - Attributes: instances (Sequence[google.cloud.bigtable_admin_v2.types.Instance]): The list of requested instances. @@ -141,10 +130,8 @@ def raw_page(self): instances = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_instance.Instance, ) - - failed_locations = proto.RepeatedField(proto.STRING, number=2) - - next_page_token = proto.Field(proto.STRING, number=3) + failed_locations = proto.RepeatedField(proto.STRING, number=2,) + next_page_token = proto.Field(proto.STRING, number=3,) class PartialUpdateInstanceRequest(proto.Message): @@ -161,13 +148,13 @@ class PartialUpdateInstanceRequest(proto.Message): """ instance = proto.Field(proto.MESSAGE, number=1, message=gba_instance.Instance,) - - update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) class DeleteInstanceRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.DeleteInstance. - Attributes: name (str): Required. The unique name of the instance to be deleted. @@ -175,12 +162,11 @@ class DeleteInstanceRequest(proto.Message): ``projects/{project}/instances/{instance}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class CreateClusterRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.CreateCluster. - Attributes: parent (str): Required. The unique name of the instance in which to create @@ -196,16 +182,13 @@ class CreateClusterRequest(proto.Message): ``OutputOnly`` must be left blank. """ - parent = proto.Field(proto.STRING, number=1) - - cluster_id = proto.Field(proto.STRING, number=2) - + parent = proto.Field(proto.STRING, number=1,) + cluster_id = proto.Field(proto.STRING, number=2,) cluster = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Cluster,) class GetClusterRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.GetCluster. - Attributes: name (str): Required. The unique name of the requested cluster. Values @@ -213,12 +196,11 @@ class GetClusterRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class ListClustersRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.ListClusters. - Attributes: parent (str): Required. The unique name of the instance for which a list @@ -230,14 +212,12 @@ class ListClustersRequest(proto.Message): DEPRECATED: This field is unused and ignored. """ - parent = proto.Field(proto.STRING, number=1) - - page_token = proto.Field(proto.STRING, number=2) + parent = proto.Field(proto.STRING, number=1,) + page_token = proto.Field(proto.STRING, number=2,) class ListClustersResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListClusters. - Attributes: clusters (Sequence[google.cloud.bigtable_admin_v2.types.Cluster]): The list of requested clusters. @@ -259,15 +239,12 @@ def raw_page(self): clusters = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_instance.Cluster, ) - - failed_locations = proto.RepeatedField(proto.STRING, number=2) - - next_page_token = proto.Field(proto.STRING, number=3) + failed_locations = proto.RepeatedField(proto.STRING, number=2,) + next_page_token = proto.Field(proto.STRING, number=3,) class DeleteClusterRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.DeleteCluster. - Attributes: name (str): Required. The unique name of the cluster to be deleted. @@ -275,12 +252,11 @@ class DeleteClusterRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class CreateInstanceMetadata(proto.Message): r"""The metadata for the Operation returned by CreateInstance. - Attributes: original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): The request that prompted the initiation of @@ -296,15 +272,14 @@ class CreateInstanceMetadata(proto.Message): original_request = proto.Field( proto.MESSAGE, number=1, message="CreateInstanceRequest", ) - - request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + request_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class UpdateInstanceMetadata(proto.Message): r"""The metadata for the Operation returned by UpdateInstance. - Attributes: original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): The request that prompted the initiation of @@ -320,15 +295,14 @@ class UpdateInstanceMetadata(proto.Message): original_request = proto.Field( proto.MESSAGE, number=1, message="PartialUpdateInstanceRequest", ) - - request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + request_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class CreateClusterMetadata(proto.Message): r"""The metadata for the Operation returned by CreateCluster. - Attributes: original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): The request that prompted the initiation of @@ -344,15 +318,14 @@ class CreateClusterMetadata(proto.Message): original_request = proto.Field( proto.MESSAGE, number=1, message="CreateClusterRequest", ) - - request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + request_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class UpdateClusterMetadata(proto.Message): r"""The metadata for the Operation returned by UpdateCluster. - Attributes: original_request (google.cloud.bigtable_admin_v2.types.Cluster): The request that prompted the initiation of @@ -368,15 +341,14 @@ class UpdateClusterMetadata(proto.Message): original_request = proto.Field( proto.MESSAGE, number=1, message=gba_instance.Cluster, ) - - request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + request_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class CreateAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.CreateAppProfile. - Attributes: parent (str): Required. The unique name of the instance in which to create @@ -395,18 +367,14 @@ class CreateAppProfileRequest(proto.Message): the app profile. """ - parent = proto.Field(proto.STRING, number=1) - - app_profile_id = proto.Field(proto.STRING, number=2) - + parent = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=2,) app_profile = proto.Field(proto.MESSAGE, number=3, message=gba_instance.AppProfile,) - - ignore_warnings = proto.Field(proto.BOOL, number=4) + ignore_warnings = proto.Field(proto.BOOL, number=4,) class GetAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.GetAppProfile. - Attributes: name (str): Required. The unique name of the requested app profile. @@ -414,12 +382,11 @@ class GetAppProfileRequest(proto.Message): ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class ListAppProfilesRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.ListAppProfiles. - Attributes: parent (str): Required. The unique name of the instance for which a list @@ -444,16 +411,13 @@ class ListAppProfilesRequest(proto.Message): call. """ - parent = proto.Field(proto.STRING, number=1) - - page_size = proto.Field(proto.INT32, number=3) - - page_token = proto.Field(proto.STRING, number=2) + parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=3,) + page_token = proto.Field(proto.STRING, number=2,) class ListAppProfilesResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListAppProfiles. - Attributes: app_profiles (Sequence[google.cloud.bigtable_admin_v2.types.AppProfile]): The list of requested app profiles. @@ -476,15 +440,12 @@ def raw_page(self): app_profiles = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_instance.AppProfile, ) - - next_page_token = proto.Field(proto.STRING, number=2) - - failed_locations = proto.RepeatedField(proto.STRING, number=3) + next_page_token = proto.Field(proto.STRING, number=2,) + failed_locations = proto.RepeatedField(proto.STRING, number=3,) class UpdateAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.UpdateAppProfile. - Attributes: app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): Required. The app profile which will @@ -499,15 +460,14 @@ class UpdateAppProfileRequest(proto.Message): """ app_profile = proto.Field(proto.MESSAGE, number=1, message=gba_instance.AppProfile,) - - update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) - - ignore_warnings = proto.Field(proto.BOOL, number=3) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + ignore_warnings = proto.Field(proto.BOOL, number=3,) class DeleteAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.DeleteAppProfile. - Attributes: name (str): Required. The unique name of the app profile to be deleted. @@ -518,13 +478,12 @@ class DeleteAppProfileRequest(proto.Message): deleting the app profile. """ - name = proto.Field(proto.STRING, number=1) - - ignore_warnings = proto.Field(proto.BOOL, number=2) + name = proto.Field(proto.STRING, number=1,) + ignore_warnings = proto.Field(proto.BOOL, number=2,) class UpdateAppProfileMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateAppProfile.""" + r"""The metadata for the Operation returned by UpdateAppProfile. """ __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index ac146b798b21..1d93991ad72d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,15 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore - from google.cloud.bigtable_admin_v2.types import common from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.protobuf import duration_pb2 as duration # type: ignore -from google.protobuf import field_mask_pb2 as field_mask # type: ignore -from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -68,8 +65,8 @@ class RestoreTableRequest(proto.Message): Attributes: parent (str): Required. The name of the instance in which to create the - restored table. This instance must be the parent of the - source backup. Values are of the form + restored table. This instance must be in the same project as + the source backup. Values are of the form ``projects//instances/``. table_id (str): Required. The id of the table to create and restore to. This @@ -82,11 +79,9 @@ class RestoreTableRequest(proto.Message): ``projects//instances//clusters//backups/``. """ - parent = proto.Field(proto.STRING, number=1) - - table_id = proto.Field(proto.STRING, number=2) - - backup = proto.Field(proto.STRING, number=3, oneof="source") + parent = proto.Field(proto.STRING, number=1,) + table_id = proto.Field(proto.STRING, number=2,) + backup = proto.Field(proto.STRING, number=3, oneof="source",) class RestoreTableMetadata(proto.Message): @@ -118,16 +113,12 @@ class RestoreTableMetadata(proto.Message): operation. """ - name = proto.Field(proto.STRING, number=1) - + name = proto.Field(proto.STRING, number=1,) source_type = proto.Field(proto.ENUM, number=2, enum=gba_table.RestoreSourceType,) - backup_info = proto.Field( proto.MESSAGE, number=3, oneof="source_info", message=gba_table.BackupInfo, ) - - optimize_table_operation_name = proto.Field(proto.STRING, number=4) - + optimize_table_operation_name = proto.Field(proto.STRING, number=4,) progress = proto.Field(proto.MESSAGE, number=5, message=common.OperationProgress,) @@ -146,8 +137,7 @@ class OptimizeRestoredTableMetadata(proto.Message): optimizations. """ - name = proto.Field(proto.STRING, number=1) - + name = proto.Field(proto.STRING, number=1,) progress = proto.Field(proto.MESSAGE, number=2, message=common.OperationProgress,) @@ -193,20 +183,16 @@ class CreateTableRequest(proto.Message): class Split(proto.Message): r"""An initial split point for a newly created table. - Attributes: key (bytes): Row key to use as an initial tablet boundary. """ - key = proto.Field(proto.BYTES, number=1) - - parent = proto.Field(proto.STRING, number=1) - - table_id = proto.Field(proto.STRING, number=2) + key = proto.Field(proto.BYTES, number=1,) + parent = proto.Field(proto.STRING, number=1,) + table_id = proto.Field(proto.STRING, number=2,) table = proto.Field(proto.MESSAGE, number=3, message=gba_table.Table,) - initial_splits = proto.RepeatedField(proto.MESSAGE, number=4, message=Split,) @@ -236,11 +222,9 @@ class CreateTableFromSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - parent = proto.Field(proto.STRING, number=1) - - table_id = proto.Field(proto.STRING, number=2) - - source_snapshot = proto.Field(proto.STRING, number=3) + parent = proto.Field(proto.STRING, number=1,) + table_id = proto.Field(proto.STRING, number=2,) + source_snapshot = proto.Field(proto.STRING, number=3,) class DropRowRangeRequest(proto.Message): @@ -260,11 +244,9 @@ class DropRowRangeRequest(proto.Message): false is a no-op. """ - name = proto.Field(proto.STRING, number=1) - - row_key_prefix = proto.Field(proto.BYTES, number=2, oneof="target") - - delete_all_data_from_table = proto.Field(proto.BOOL, number=3, oneof="target") + name = proto.Field(proto.STRING, number=1,) + row_key_prefix = proto.Field(proto.BYTES, number=2, oneof="target",) + delete_all_data_from_table = proto.Field(proto.BOOL, number=3, oneof="target",) class ListTablesRequest(proto.Message): @@ -296,13 +278,10 @@ class ListTablesRequest(proto.Message): call. """ - parent = proto.Field(proto.STRING, number=1) - + parent = proto.Field(proto.STRING, number=1,) view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) - - page_size = proto.Field(proto.INT32, number=4) - - page_token = proto.Field(proto.STRING, number=3) + page_size = proto.Field(proto.INT32, number=4,) + page_token = proto.Field(proto.STRING, number=3,) class ListTablesResponse(proto.Message): @@ -323,8 +302,7 @@ def raw_page(self): return self tables = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Table,) - - next_page_token = proto.Field(proto.STRING, number=2) + next_page_token = proto.Field(proto.STRING, number=2,) class GetTableRequest(proto.Message): @@ -341,8 +319,7 @@ class GetTableRequest(proto.Message): Defaults to ``SCHEMA_VIEW`` if unspecified. """ - name = proto.Field(proto.STRING, number=1) - + name = proto.Field(proto.STRING, number=1,) view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) @@ -357,7 +334,7 @@ class DeleteTableRequest(proto.Message): ``projects/{project}/instances/{instance}/tables/{table}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class ModifyColumnFamiliesRequest(proto.Message): @@ -380,7 +357,6 @@ class ModifyColumnFamiliesRequest(proto.Message): class Modification(proto.Message): r"""A create, update, or delete of a particular column family. - Attributes: id (str): The ID of the column family to be modified. @@ -397,20 +373,16 @@ class Modification(proto.Message): given ID, or fail if no such family exists. """ - id = proto.Field(proto.STRING, number=1) - + id = proto.Field(proto.STRING, number=1,) create = proto.Field( proto.MESSAGE, number=2, oneof="mod", message=gba_table.ColumnFamily, ) - update = proto.Field( proto.MESSAGE, number=3, oneof="mod", message=gba_table.ColumnFamily, ) + drop = proto.Field(proto.BOOL, number=4, oneof="mod",) - drop = proto.Field(proto.BOOL, number=4, oneof="mod") - - name = proto.Field(proto.STRING, number=1) - + name = proto.Field(proto.STRING, number=1,) modifications = proto.RepeatedField(proto.MESSAGE, number=2, message=Modification,) @@ -425,7 +397,7 @@ class GenerateConsistencyTokenRequest(proto.Message): ``projects/{project}/instances/{instance}/tables/{table}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class GenerateConsistencyTokenResponse(proto.Message): @@ -437,7 +409,7 @@ class GenerateConsistencyTokenResponse(proto.Message): The generated consistency token. """ - consistency_token = proto.Field(proto.STRING, number=1) + consistency_token = proto.Field(proto.STRING, number=1,) class CheckConsistencyRequest(proto.Message): @@ -454,9 +426,8 @@ class CheckConsistencyRequest(proto.Message): GenerateConsistencyToken for the Table. """ - name = proto.Field(proto.STRING, number=1) - - consistency_token = proto.Field(proto.STRING, number=2) + name = proto.Field(proto.STRING, number=1,) + consistency_token = proto.Field(proto.STRING, number=2,) class CheckConsistencyResponse(proto.Message): @@ -470,7 +441,7 @@ class CheckConsistencyResponse(proto.Message): the restrictions specified in the request. """ - consistent = proto.Field(proto.BOOL, number=1) + consistent = proto.Field(proto.BOOL, number=1,) class SnapshotTableRequest(proto.Message): @@ -508,15 +479,11 @@ class SnapshotTableRequest(proto.Message): Description of the snapshot. """ - name = proto.Field(proto.STRING, number=1) - - cluster = proto.Field(proto.STRING, number=2) - - snapshot_id = proto.Field(proto.STRING, number=3) - - ttl = proto.Field(proto.MESSAGE, number=4, message=duration.Duration,) - - description = proto.Field(proto.STRING, number=5) + name = proto.Field(proto.STRING, number=1,) + cluster = proto.Field(proto.STRING, number=2,) + snapshot_id = proto.Field(proto.STRING, number=3,) + ttl = proto.Field(proto.MESSAGE, number=4, message=duration_pb2.Duration,) + description = proto.Field(proto.STRING, number=5,) class GetSnapshotRequest(proto.Message): @@ -536,7 +503,7 @@ class GetSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class ListSnapshotsRequest(proto.Message): @@ -565,11 +532,9 @@ class ListSnapshotsRequest(proto.Message): call. """ - parent = proto.Field(proto.STRING, number=1) - - page_size = proto.Field(proto.INT32, number=2) - - page_token = proto.Field(proto.STRING, number=3) + parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=2,) + page_token = proto.Field(proto.STRING, number=3,) class ListSnapshotsResponse(proto.Message): @@ -599,8 +564,7 @@ def raw_page(self): snapshots = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_table.Snapshot, ) - - next_page_token = proto.Field(proto.STRING, number=2) + next_page_token = proto.Field(proto.STRING, number=2,) class DeleteSnapshotRequest(proto.Message): @@ -620,7 +584,7 @@ class DeleteSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class SnapshotTableMetadata(proto.Message): @@ -646,10 +610,10 @@ class SnapshotTableMetadata(proto.Message): original_request = proto.Field( proto.MESSAGE, number=1, message="SnapshotTableRequest", ) - - request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + request_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class CreateTableFromSnapshotMetadata(proto.Message): @@ -676,10 +640,10 @@ class CreateTableFromSnapshotMetadata(proto.Message): original_request = proto.Field( proto.MESSAGE, number=1, message="CreateTableFromSnapshotRequest", ) - - request_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + request_time = proto.Field( + proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class CreateBackupRequest(proto.Message): @@ -704,10 +668,8 @@ class CreateBackupRequest(proto.Message): Required. The backup to create. """ - parent = proto.Field(proto.STRING, number=1) - - backup_id = proto.Field(proto.STRING, number=2) - + parent = proto.Field(proto.STRING, number=1,) + backup_id = proto.Field(proto.STRING, number=2,) backup = proto.Field(proto.MESSAGE, number=3, message=gba_table.Backup,) @@ -728,13 +690,10 @@ class CreateBackupMetadata(proto.Message): finished or was cancelled. """ - name = proto.Field(proto.STRING, number=1) - - source_table = proto.Field(proto.STRING, number=2) - - start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) - - end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + name = proto.Field(proto.STRING, number=1,) + source_table = proto.Field(proto.STRING, number=2,) + start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) class UpdateBackupRequest(proto.Message): @@ -759,8 +718,9 @@ class UpdateBackupRequest(proto.Message): """ backup = proto.Field(proto.MESSAGE, number=1, message=gba_table.Backup,) - - update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) class GetBackupRequest(proto.Message): @@ -773,7 +733,7 @@ class GetBackupRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class DeleteBackupRequest(proto.Message): @@ -787,7 +747,7 @@ class DeleteBackupRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. """ - name = proto.Field(proto.STRING, number=1) + name = proto.Field(proto.STRING, number=1,) class ListBackupsRequest(proto.Message): @@ -876,15 +836,11 @@ class ListBackupsRequest(proto.Message): to the same ``parent`` and with the same ``filter``. """ - parent = proto.Field(proto.STRING, number=1) - - filter = proto.Field(proto.STRING, number=2) - - order_by = proto.Field(proto.STRING, number=3) - - page_size = proto.Field(proto.INT32, number=4) - - page_token = proto.Field(proto.STRING, number=5) + parent = proto.Field(proto.STRING, number=1,) + filter = proto.Field(proto.STRING, number=2,) + order_by = proto.Field(proto.STRING, number=3,) + page_size = proto.Field(proto.INT32, number=4,) + page_token = proto.Field(proto.STRING, number=5,) class ListBackupsResponse(proto.Message): @@ -905,8 +861,7 @@ def raw_page(self): return self backups = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Backup,) - - next_page_token = proto.Field(proto.STRING, number=2) + next_page_token = proto.Field(proto.STRING, number=2,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 43d500dc0a14..788671e71ca1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,11 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore - -from google.protobuf import timestamp_pb2 as timestamp # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -48,11 +45,9 @@ class OperationProgress(proto.Message): failed or was completed successfully. """ - progress_percent = proto.Field(proto.INT32, number=1) - - start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) + progress_percent = proto.Field(proto.INT32, number=1,) + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 1f13a0cefe2a..64eb1edc7f5f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,10 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore - from google.cloud.bigtable_admin_v2.types import common @@ -74,15 +71,11 @@ class Type(proto.Enum): PRODUCTION = 1 DEVELOPMENT = 2 - name = proto.Field(proto.STRING, number=1) - - display_name = proto.Field(proto.STRING, number=2) - + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) state = proto.Field(proto.ENUM, number=3, enum=State,) - type_ = proto.Field(proto.ENUM, number=4, enum=Type,) - - labels = proto.MapField(proto.STRING, proto.STRING, number=5) + labels = proto.MapField(proto.STRING, proto.STRING, number=5,) class Cluster(proto.Message): @@ -143,18 +136,13 @@ class EncryptionConfig(proto.Message): key. """ - kms_key_name = proto.Field(proto.STRING, number=1) - - name = proto.Field(proto.STRING, number=1) - - location = proto.Field(proto.STRING, number=2) + kms_key_name = proto.Field(proto.STRING, number=1,) + name = proto.Field(proto.STRING, number=1,) + location = proto.Field(proto.STRING, number=2,) state = proto.Field(proto.ENUM, number=3, enum=State,) - - serve_nodes = proto.Field(proto.INT32, number=4) - + serve_nodes = proto.Field(proto.INT32, number=4,) default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,) - encryption_config = proto.Field(proto.MESSAGE, number=6, message=EncryptionConfig,) @@ -193,7 +181,7 @@ class MultiClusterRoutingUseAny(proto.Message): available in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability. - """ + """ class SingleClusterRouting(proto.Message): r"""Unconditionally routes all read/write requests to a specific @@ -211,23 +199,18 @@ class SingleClusterRouting(proto.Message): table/row/column in multiple clusters. """ - cluster_id = proto.Field(proto.STRING, number=1) - - allow_transactional_writes = proto.Field(proto.BOOL, number=2) - - name = proto.Field(proto.STRING, number=1) - - etag = proto.Field(proto.STRING, number=2) - - description = proto.Field(proto.STRING, number=3) + cluster_id = proto.Field(proto.STRING, number=1,) + allow_transactional_writes = proto.Field(proto.BOOL, number=2,) + name = proto.Field(proto.STRING, number=1,) + etag = proto.Field(proto.STRING, number=2,) + description = proto.Field(proto.STRING, number=3,) multi_cluster_routing_use_any = proto.Field( proto.MESSAGE, number=5, oneof="routing_policy", message=MultiClusterRoutingUseAny, ) - single_cluster_routing = proto.Field( proto.MESSAGE, number=6, oneof="routing_policy", message=SingleClusterRouting, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 7f5f88e4f58f..75ceaf263132 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,13 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import proto # type: ignore - -from google.protobuf import duration_pb2 as duration # type: ignore -from google.protobuf import timestamp_pb2 as timestamp # type: ignore -from google.rpc import status_pb2 as status # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore __protobuf__ = proto.module( @@ -47,7 +44,6 @@ class RestoreSourceType(proto.Enum): class RestoreInfo(proto.Message): r"""Information about a table restore. - Attributes: source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): The type of the restore source. @@ -57,7 +53,6 @@ class RestoreInfo(proto.Message): """ source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",) - backup_info = proto.Field( proto.MESSAGE, number=2, oneof="source_info", message="BackupInfo", ) @@ -116,7 +111,6 @@ class View(proto.Enum): class ClusterState(proto.Message): r"""The state of a table's data in a particular cluster. - Attributes: replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): Output only. The state of replication for the @@ -144,23 +138,18 @@ class ReplicationState(proto.Enum): replication_state = proto.Field( proto.ENUM, number=1, enum="Table.ClusterState.ReplicationState", ) - encryption_info = proto.RepeatedField( proto.MESSAGE, number=2, message="EncryptionInfo", ) - name = proto.Field(proto.STRING, number=1) - + name = proto.Field(proto.STRING, number=1,) cluster_states = proto.MapField( proto.STRING, proto.MESSAGE, number=2, message=ClusterState, ) - column_families = proto.MapField( proto.STRING, proto.MESSAGE, number=3, message="ColumnFamily", ) - granularity = proto.Field(proto.ENUM, number=4, enum=TimestampGranularity,) - restore_info = proto.Field(proto.MESSAGE, number=6, message="RestoreInfo",) @@ -204,7 +193,6 @@ class GcRule(proto.Message): class Intersection(proto.Message): r"""A GcRule which deletes cells matching all of the given rules. - Attributes: rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): Only delete cells which would be deleted by every element of @@ -215,7 +203,6 @@ class Intersection(proto.Message): class Union(proto.Message): r"""A GcRule which deletes cells matching any of the given rules. - Attributes: rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): Delete cells which would be deleted by any element of @@ -224,16 +211,13 @@ class Union(proto.Message): rules = proto.RepeatedField(proto.MESSAGE, number=1, message="GcRule",) - max_num_versions = proto.Field(proto.INT32, number=1, oneof="rule") - + max_num_versions = proto.Field(proto.INT32, number=1, oneof="rule",) max_age = proto.Field( - proto.MESSAGE, number=2, oneof="rule", message=duration.Duration, + proto.MESSAGE, number=2, oneof="rule", message=duration_pb2.Duration, ) - intersection = proto.Field( proto.MESSAGE, number=3, oneof="rule", message=Intersection, ) - union = proto.Field(proto.MESSAGE, number=4, oneof="rule", message=Union,) @@ -265,10 +249,8 @@ class EncryptionType(proto.Enum): CUSTOMER_MANAGED_ENCRYPTION = 2 encryption_type = proto.Field(proto.ENUM, number=3, enum=EncryptionType,) - - encryption_status = proto.Field(proto.MESSAGE, number=4, message=status.Status,) - - kms_key_version = proto.Field(proto.STRING, number=2) + encryption_status = proto.Field(proto.MESSAGE, number=4, message=status_pb2.Status,) + kms_key_version = proto.Field(proto.STRING, number=2,) class Snapshot(proto.Message): @@ -317,24 +299,17 @@ class State(proto.Enum): READY = 1 CREATING = 2 - name = proto.Field(proto.STRING, number=1) - + name = proto.Field(proto.STRING, number=1,) source_table = proto.Field(proto.MESSAGE, number=2, message="Table",) - - data_size_bytes = proto.Field(proto.INT64, number=3) - - create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) - - delete_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) - + data_size_bytes = proto.Field(proto.INT64, number=3,) + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + delete_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) state = proto.Field(proto.ENUM, number=6, enum=State,) - - description = proto.Field(proto.STRING, number=7) + description = proto.Field(proto.STRING, number=7,) class Backup(proto.Message): r"""A backup of a Cloud Bigtable table. - Attributes: name (str): Output only. A globally unique identifier for the backup @@ -382,26 +357,18 @@ class State(proto.Enum): CREATING = 1 READY = 2 - name = proto.Field(proto.STRING, number=1) - - source_table = proto.Field(proto.STRING, number=2) - - expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) - - start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) - - end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) - - size_bytes = proto.Field(proto.INT64, number=6) - + name = proto.Field(proto.STRING, number=1,) + source_table = proto.Field(proto.STRING, number=2,) + expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) + size_bytes = proto.Field(proto.INT64, number=6,) state = proto.Field(proto.ENUM, number=7, enum=State,) - encryption_info = proto.Field(proto.MESSAGE, number=9, message="EncryptionInfo",) class BackupInfo(proto.Message): r"""Information about a backup. - Attributes: backup (str): Output only. Name of the backup. @@ -418,13 +385,10 @@ class BackupInfo(proto.Message): created from. """ - backup = proto.Field(proto.STRING, number=1) - - start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,) - - end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,) - - source_table = proto.Field(proto.STRING, number=4) + backup = proto.Field(proto.STRING, number=1,) + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + source_table = proto.Field(proto.STRING, number=4,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index e0408b3e5429..7d3832962613 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -14,23 +14,66 @@ """This script is used to synthesize generated parts of this library.""" +from pathlib import Path +from typing import List, Optional + import synthtool as s from synthtool import gcp from synthtool.languages import python common = gcp.CommonTemplates() +# This is a customized version of the s.get_staging_dirs() function from synthtool to +# cater for copying 2 different folders from googleapis-gen +# which are bigtable and bigtable/admin. +# Source https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280 +def get_staging_dirs( + default_version: Optional[str] = None, sub_directory: Optional[str] = None +) -> List[Path]: + """Returns the list of directories, one per version, copied from + https://github.com/googleapis/googleapis-gen. Will return in lexical sorting + order with the exception of the default_version which will be last (if specified). + + Args: + default_version (str): the default version of the API. The directory for this version + will be the last item in the returned list if specified. + sub_directory (str): if a `sub_directory` is provided, only the directories within the + specified `sub_directory` will be returned. + + Returns: the empty list if no file were copied. + """ + + staging = Path("owl-bot-staging") + + if sub_directory: + staging /= sub_directory + + if staging.is_dir(): + # Collect the subdirectories of the staging directory. + versions = [v.name for v in staging.iterdir() if v.is_dir()] + # Reorder the versions so the default version always comes last. + versions = [v for v in versions if v != default_version] + versions.sort() + if default_version is not None: + versions += [default_version] + dirs = [staging / v for v in versions] + for dir in dirs: + s._tracked_paths.add(dir) + return dirs + else: + return [] + # This library ships clients for two different APIs, # BigTable and BigTable Admin bigtable_default_version = "v2" bigtable_admin_default_version = "v2" -for library in s.get_staging_dirs(bigtable_default_version): +for library in get_staging_dirs(bigtable_default_version, "bigtable"): s.move(library / "google/cloud/bigtable_v*") s.move(library / "tests") s.move(library / "scripts") -for library in s.get_staging_dirs(bigtable_admin_default_version): +for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"): s.move(library / "google/cloud/bigtable_admin_v*") s.move(library / "tests") s.move(library / "scripts") diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 3902adff57af..fddcbf1f15c4 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -1,6 +1,5 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import argparse import os import libcst as cst @@ -41,45 +39,44 @@ def partition( class bigtable_adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_consistency': ('name', 'consistency_token', ), - 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), - 'create_backup': ('parent', 'backup_id', 'backup', ), - 'create_cluster': ('parent', 'cluster_id', 'cluster', ), - 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), - 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), - 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), - 'delete_app_profile': ('name', 'ignore_warnings', ), - 'delete_backup': ('name', ), - 'delete_cluster': ('name', ), - 'delete_instance': ('name', ), - 'delete_snapshot': ('name', ), - 'delete_table': ('name', ), - 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), - 'generate_consistency_token': ('name', ), - 'get_app_profile': ('name', ), - 'get_backup': ('name', ), - 'get_cluster': ('name', ), - 'get_iam_policy': ('resource', 'options', ), - 'get_instance': ('name', ), - 'get_snapshot': ('name', ), - 'get_table': ('name', 'view', ), - 'list_app_profiles': ('parent', 'page_size', 'page_token', ), - 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), - 'list_clusters': ('parent', 'page_token', ), - 'list_instances': ('parent', 'page_token', ), - 'list_snapshots': ('parent', 'page_size', 'page_token', ), - 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), - 'modify_column_families': ('name', 'modifications', ), - 'partial_update_instance': ('instance', 'update_mask', ), - 'restore_table': ('parent', 'table_id', 'backup', ), - 'set_iam_policy': ('resource', 'policy', ), - 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), - 'test_iam_permissions': ('resource', 'permissions', ), - 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), - 'update_backup': ('backup', 'update_mask', ), - 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), - + 'check_consistency': ('name', 'consistency_token', ), + 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_backup': ('parent', 'backup_id', 'backup', ), + 'create_cluster': ('parent', 'cluster_id', 'cluster', ), + 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), + 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), + 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_backup': ('name', ), + 'delete_cluster': ('name', ), + 'delete_instance': ('name', ), + 'delete_snapshot': ('name', ), + 'delete_table': ('name', ), + 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), + 'generate_consistency_token': ('name', ), + 'get_app_profile': ('name', ), + 'get_backup': ('name', ), + 'get_cluster': ('name', ), + 'get_iam_policy': ('resource', 'options', ), + 'get_instance': ('name', ), + 'get_snapshot': ('name', ), + 'get_table': ('name', 'view', ), + 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_clusters': ('parent', 'page_token', ), + 'list_instances': ('parent', 'page_token', ), + 'list_snapshots': ('parent', 'page_size', 'page_token', ), + 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), + 'modify_column_families': ('name', 'modifications', ), + 'partial_update_instance': ('instance', 'update_mask', ), + 'restore_table': ('parent', 'table_id', 'backup', ), + 'set_iam_policy': ('resource', 'policy', ), + 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_backup': ('backup', 'update_mask', ), + 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: @@ -110,7 +107,7 @@ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: value=cst.Dict([ cst.DictElement( cst.SimpleString("'{}'".format(name)), - cst.Element(value=arg.value) +cst.Element(value=arg.value) ) # Note: the args + kwargs looks silly, but keep in mind that # the control parameters had to be stripped out, and that diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py index 42ffdf2bc43d..4de65971c238 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 3ffcffc3bb1a..046742719f78 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import os import mock +import packaging.version import grpc from grpc.experimental import aio @@ -24,16 +23,16 @@ import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule -from google import auth + from google.api_core import client_options -from google.api_core import exceptions +from google.api_core import exceptions as core_exceptions from google.api_core import future from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 -from google.auth import credentials +from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( BigtableInstanceAdminAsyncClient, @@ -43,17 +42,47 @@ ) from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import ( + _API_CORE_VERSION, +) +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import ( + _GOOGLE_AUTH_VERSION, +) from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import common from google.cloud.bigtable_admin_v2.types import instance from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import options_pb2 as options # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 as field_mask # type: ignore -from google.type import expr_pb2 as expr # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.type import expr_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-api-core >= 1.26.0 is required: +# - Delete all the api-core and auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +requires_api_core_lt_1_26_0 = pytest.mark.skipif( + packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"), + reason="This test requires google-api-core < 1.26.0", +) + +requires_api_core_gte_1_26_0 = pytest.mark.skipif( + packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"), + reason="This test requires google-api-core >= 1.26.0", +) def client_cert_source_callback(): @@ -105,7 +134,7 @@ def test__get_default_mtls_endpoint(): "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] ) def test_bigtable_instance_admin_client_from_service_account_info(client_class): - creds = credentials.AnonymousCredentials() + creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: @@ -122,7 +151,7 @@ def test_bigtable_instance_admin_client_from_service_account_info(client_class): "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] ) def test_bigtable_instance_admin_client_from_service_account_file(client_class): - creds = credentials.AnonymousCredentials() + creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: @@ -179,7 +208,7 @@ def test_bigtable_instance_admin_client_client_options( ): # Check that if channel is provided we won't create a new one. with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: - transport = transport_class(credentials=credentials.AnonymousCredentials()) + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() @@ -485,7 +514,7 @@ def test_create_instance( transport: str = "grpc", request_type=bigtable_instance_admin.CreateInstanceRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -496,13 +525,11 @@ def test_create_instance( with mock.patch.object(type(client.transport.create_instance), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() # Establish that the response is the type that we expect. @@ -517,7 +544,7 @@ def test_create_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -525,7 +552,6 @@ def test_create_instance_empty_call(): client.create_instance() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() @@ -535,7 +561,7 @@ async def test_create_instance_async( request_type=bigtable_instance_admin.CreateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -548,13 +574,11 @@ async def test_create_instance_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.create_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() # Establish that the response is the type that we expect. @@ -568,18 +592,18 @@ async def test_create_instance_async_from_dict(): def test_create_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_instance), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.create_instance(request) # Establish that the underlying gRPC stub method was called. @@ -595,12 +619,13 @@ def test_create_instance_field_headers(): @pytest.mark.asyncio async def test_create_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateInstanceRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -608,7 +633,6 @@ async def test_create_instance_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.create_instance(request) # Establish that the underlying gRPC stub method was called. @@ -623,14 +647,13 @@ async def test_create_instance_field_headers_async(): def test_create_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_instance), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_instance( @@ -644,13 +667,9 @@ def test_create_instance_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].instance_id == "instance_id_value" - assert args[0].instance == gba_instance.Instance(name="name_value") - assert args[0].clusters == { "key_value": gba_instance.Cluster(name="name_value") } @@ -658,7 +677,7 @@ def test_create_instance_flattened(): def test_create_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -676,7 +695,7 @@ def test_create_instance_flattened_error(): @pytest.mark.asyncio async def test_create_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -700,13 +719,9 @@ async def test_create_instance_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].instance_id == "instance_id_value" - assert args[0].instance == gba_instance.Instance(name="name_value") - assert args[0].clusters == { "key_value": gba_instance.Cluster(name="name_value") } @@ -715,7 +730,7 @@ async def test_create_instance_flattened_async(): @pytest.mark.asyncio async def test_create_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -734,7 +749,7 @@ def test_get_instance( transport: str = "grpc", request_type=bigtable_instance_admin.GetInstanceRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -750,25 +765,18 @@ def test_get_instance( state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, ) - response = client.get_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION @@ -780,7 +788,7 @@ def test_get_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -788,7 +796,6 @@ def test_get_instance_empty_call(): client.get_instance() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() @@ -798,7 +805,7 @@ async def test_get_instance_async( request_type=bigtable_instance_admin.GetInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -816,24 +823,18 @@ async def test_get_instance_async( type_=instance.Instance.Type.PRODUCTION, ) ) - response = await client.get_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION @@ -844,18 +845,18 @@ async def test_get_instance_async_from_dict(): def test_get_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetInstanceRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_instance), "__call__") as call: call.return_value = instance.Instance() - client.get_instance(request) # Establish that the underlying gRPC stub method was called. @@ -871,18 +872,18 @@ def test_get_instance_field_headers(): @pytest.mark.asyncio async def test_get_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetInstanceRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_instance), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - await client.get_instance(request) # Establish that the underlying gRPC stub method was called. @@ -897,14 +898,13 @@ async def test_get_instance_field_headers_async(): def test_get_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_instance), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = instance.Instance() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_instance(name="name_value",) @@ -913,13 +913,12 @@ def test_get_instance_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_get_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -933,7 +932,7 @@ def test_get_instance_flattened_error(): @pytest.mark.asyncio async def test_get_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -950,14 +949,13 @@ async def test_get_instance_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_get_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -972,7 +970,7 @@ def test_list_instances( transport: str = "grpc", request_type=bigtable_instance_admin.ListInstancesRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -986,23 +984,17 @@ def test_list_instances( failed_locations=["failed_locations_value"], next_page_token="next_page_token_value", ) - response = client.list_instances(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() # Establish that the response is the type that we expect. - assert response.raw_page is response - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" @@ -1014,7 +1006,7 @@ def test_list_instances_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1022,7 +1014,6 @@ def test_list_instances_empty_call(): client.list_instances() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() @@ -1032,7 +1023,7 @@ async def test_list_instances_async( request_type=bigtable_instance_admin.ListInstancesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1048,20 +1039,16 @@ async def test_list_instances_async( next_page_token="next_page_token_value", ) ) - response = await client.list_instances(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() # Establish that the response is the type that we expect. assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" @@ -1072,18 +1059,18 @@ async def test_list_instances_async_from_dict(): def test_list_instances_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListInstancesRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_instances), "__call__") as call: call.return_value = bigtable_instance_admin.ListInstancesResponse() - client.list_instances(request) # Establish that the underlying gRPC stub method was called. @@ -1099,12 +1086,13 @@ def test_list_instances_field_headers(): @pytest.mark.asyncio async def test_list_instances_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListInstancesRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1112,7 +1100,6 @@ async def test_list_instances_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_instance_admin.ListInstancesResponse() ) - await client.list_instances(request) # Establish that the underlying gRPC stub method was called. @@ -1127,14 +1114,13 @@ async def test_list_instances_field_headers_async(): def test_list_instances_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_instances), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = bigtable_instance_admin.ListInstancesResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_instances(parent="parent_value",) @@ -1143,13 +1129,12 @@ def test_list_instances_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" def test_list_instances_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1163,7 +1148,7 @@ def test_list_instances_flattened_error(): @pytest.mark.asyncio async def test_list_instances_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1182,14 +1167,13 @@ async def test_list_instances_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" @pytest.mark.asyncio async def test_list_instances_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1202,7 +1186,7 @@ async def test_list_instances_flattened_error_async(): def test_update_instance(transport: str = "grpc", request_type=instance.Instance): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1218,25 +1202,18 @@ def test_update_instance(transport: str = "grpc", request_type=instance.Instance state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, ) - response = client.update_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION @@ -1248,7 +1225,7 @@ def test_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1256,7 +1233,6 @@ def test_update_instance_empty_call(): client.update_instance() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() @@ -1265,7 +1241,7 @@ async def test_update_instance_async( transport: str = "grpc_asyncio", request_type=instance.Instance ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1283,24 +1259,18 @@ async def test_update_instance_async( type_=instance.Instance.Type.PRODUCTION, ) ) - response = await client.update_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION @@ -1311,18 +1281,18 @@ async def test_update_instance_async_from_dict(): def test_update_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = instance.Instance() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_instance), "__call__") as call: call.return_value = instance.Instance() - client.update_instance(request) # Establish that the underlying gRPC stub method was called. @@ -1338,18 +1308,18 @@ def test_update_instance_field_headers(): @pytest.mark.asyncio async def test_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = instance.Instance() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_instance), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - await client.update_instance(request) # Establish that the underlying gRPC stub method was called. @@ -1367,7 +1337,7 @@ def test_partial_update_instance( request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1380,13 +1350,11 @@ def test_partial_update_instance( ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.partial_update_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() # Establish that the response is the type that we expect. @@ -1401,7 +1369,7 @@ def test_partial_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1411,7 +1379,6 @@ def test_partial_update_instance_empty_call(): client.partial_update_instance() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() @@ -1421,7 +1388,7 @@ async def test_partial_update_instance_async( request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1436,13 +1403,11 @@ async def test_partial_update_instance_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.partial_update_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() # Establish that the response is the type that we expect. @@ -1456,12 +1421,13 @@ async def test_partial_update_instance_async_from_dict(): def test_partial_update_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.PartialUpdateInstanceRequest() + request.instance.name = "instance.name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1469,7 +1435,6 @@ def test_partial_update_instance_field_headers(): type(client.transport.partial_update_instance), "__call__" ) as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.partial_update_instance(request) # Establish that the underlying gRPC stub method was called. @@ -1487,12 +1452,13 @@ def test_partial_update_instance_field_headers(): @pytest.mark.asyncio async def test_partial_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.PartialUpdateInstanceRequest() + request.instance.name = "instance.name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1502,7 +1468,6 @@ async def test_partial_update_instance_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.partial_update_instance(request) # Establish that the underlying gRPC stub method was called. @@ -1519,7 +1484,7 @@ async def test_partial_update_instance_field_headers_async(): def test_partial_update_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1528,27 +1493,24 @@ def test_partial_update_instance_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.partial_update_instance( instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].instance == gba_instance.Instance(name="name_value") - - assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) def test_partial_update_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1557,14 +1519,14 @@ def test_partial_update_instance_flattened_error(): client.partial_update_instance( bigtable_instance_admin.PartialUpdateInstanceRequest(), instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_partial_update_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1581,23 +1543,21 @@ async def test_partial_update_instance_flattened_async(): # using the keyword arguments to the method. response = await client.partial_update_instance( instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].instance == gba_instance.Instance(name="name_value") - - assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) @pytest.mark.asyncio async def test_partial_update_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1606,7 +1566,7 @@ async def test_partial_update_instance_flattened_error_async(): await client.partial_update_instance( bigtable_instance_admin.PartialUpdateInstanceRequest(), instance=gba_instance.Instance(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @@ -1614,7 +1574,7 @@ def test_delete_instance( transport: str = "grpc", request_type=bigtable_instance_admin.DeleteInstanceRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1625,13 +1585,11 @@ def test_delete_instance( with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - response = client.delete_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() # Establish that the response is the type that we expect. @@ -1646,7 +1604,7 @@ def test_delete_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1654,7 +1612,6 @@ def test_delete_instance_empty_call(): client.delete_instance() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() @@ -1664,7 +1621,7 @@ async def test_delete_instance_async( request_type=bigtable_instance_admin.DeleteInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1675,13 +1632,11 @@ async def test_delete_instance_async( with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_instance(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() # Establish that the response is the type that we expect. @@ -1695,18 +1650,18 @@ async def test_delete_instance_async_from_dict(): def test_delete_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteInstanceRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: call.return_value = None - client.delete_instance(request) # Establish that the underlying gRPC stub method was called. @@ -1722,18 +1677,18 @@ def test_delete_instance_field_headers(): @pytest.mark.asyncio async def test_delete_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteInstanceRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_instance(request) # Establish that the underlying gRPC stub method was called. @@ -1748,14 +1703,13 @@ async def test_delete_instance_field_headers_async(): def test_delete_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_instance(name="name_value",) @@ -1764,13 +1718,12 @@ def test_delete_instance_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_delete_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1784,7 +1737,7 @@ def test_delete_instance_flattened_error(): @pytest.mark.asyncio async def test_delete_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1801,14 +1754,13 @@ async def test_delete_instance_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_delete_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1823,7 +1775,7 @@ def test_create_cluster( transport: str = "grpc", request_type=bigtable_instance_admin.CreateClusterRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1834,13 +1786,11 @@ def test_create_cluster( with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() # Establish that the response is the type that we expect. @@ -1855,7 +1805,7 @@ def test_create_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1863,7 +1813,6 @@ def test_create_cluster_empty_call(): client.create_cluster() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() @@ -1873,7 +1822,7 @@ async def test_create_cluster_async( request_type=bigtable_instance_admin.CreateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1886,13 +1835,11 @@ async def test_create_cluster_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.create_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() # Establish that the response is the type that we expect. @@ -1906,18 +1853,18 @@ async def test_create_cluster_async_from_dict(): def test_create_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateClusterRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.create_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -1933,12 +1880,13 @@ def test_create_cluster_field_headers(): @pytest.mark.asyncio async def test_create_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateClusterRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1946,7 +1894,6 @@ async def test_create_cluster_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.create_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -1961,14 +1908,13 @@ async def test_create_cluster_field_headers_async(): def test_create_cluster_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_cluster( @@ -1981,17 +1927,14 @@ def test_create_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].cluster_id == "cluster_id_value" - assert args[0].cluster == instance.Cluster(name="name_value") def test_create_cluster_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2008,7 +1951,7 @@ def test_create_cluster_flattened_error(): @pytest.mark.asyncio async def test_create_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2031,18 +1974,15 @@ async def test_create_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].cluster_id == "cluster_id_value" - assert args[0].cluster == instance.Cluster(name="name_value") @pytest.mark.asyncio async def test_create_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2060,7 +2000,7 @@ def test_get_cluster( transport: str = "grpc", request_type=bigtable_instance_admin.GetClusterRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2077,27 +2017,19 @@ def test_get_cluster( serve_nodes=1181, default_storage_type=common.StorageType.SSD, ) - response = client.get_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert response.default_storage_type == common.StorageType.SSD @@ -2109,7 +2041,7 @@ def test_get_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2117,7 +2049,6 @@ def test_get_cluster_empty_call(): client.get_cluster() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() @@ -2127,7 +2058,7 @@ async def test_get_cluster_async( request_type=bigtable_instance_admin.GetClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2146,26 +2077,19 @@ async def test_get_cluster_async( default_storage_type=common.StorageType.SSD, ) ) - response = await client.get_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert response.default_storage_type == common.StorageType.SSD @@ -2176,18 +2100,18 @@ async def test_get_cluster_async_from_dict(): def test_get_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetClusterRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: call.return_value = instance.Cluster() - client.get_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -2203,18 +2127,18 @@ def test_get_cluster_field_headers(): @pytest.mark.asyncio async def test_get_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetClusterRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) - await client.get_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -2229,14 +2153,13 @@ async def test_get_cluster_field_headers_async(): def test_get_cluster_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = instance.Cluster() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_cluster(name="name_value",) @@ -2245,13 +2168,12 @@ def test_get_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_get_cluster_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2265,7 +2187,7 @@ def test_get_cluster_flattened_error(): @pytest.mark.asyncio async def test_get_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2282,14 +2204,13 @@ async def test_get_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_get_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2304,7 +2225,7 @@ def test_list_clusters( transport: str = "grpc", request_type=bigtable_instance_admin.ListClustersRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2318,23 +2239,17 @@ def test_list_clusters( failed_locations=["failed_locations_value"], next_page_token="next_page_token_value", ) - response = client.list_clusters(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() # Establish that the response is the type that we expect. - assert response.raw_page is response - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" @@ -2346,7 +2261,7 @@ def test_list_clusters_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2354,7 +2269,6 @@ def test_list_clusters_empty_call(): client.list_clusters() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() @@ -2364,7 +2278,7 @@ async def test_list_clusters_async( request_type=bigtable_instance_admin.ListClustersRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2380,20 +2294,16 @@ async def test_list_clusters_async( next_page_token="next_page_token_value", ) ) - response = await client.list_clusters(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() # Establish that the response is the type that we expect. assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" @@ -2404,18 +2314,18 @@ async def test_list_clusters_async_from_dict(): def test_list_clusters_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListClustersRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: call.return_value = bigtable_instance_admin.ListClustersResponse() - client.list_clusters(request) # Establish that the underlying gRPC stub method was called. @@ -2431,12 +2341,13 @@ def test_list_clusters_field_headers(): @pytest.mark.asyncio async def test_list_clusters_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListClustersRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -2444,7 +2355,6 @@ async def test_list_clusters_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_instance_admin.ListClustersResponse() ) - await client.list_clusters(request) # Establish that the underlying gRPC stub method was called. @@ -2459,14 +2369,13 @@ async def test_list_clusters_field_headers_async(): def test_list_clusters_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = bigtable_instance_admin.ListClustersResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_clusters(parent="parent_value",) @@ -2475,13 +2384,12 @@ def test_list_clusters_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" def test_list_clusters_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2495,7 +2403,7 @@ def test_list_clusters_flattened_error(): @pytest.mark.asyncio async def test_list_clusters_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2514,14 +2422,13 @@ async def test_list_clusters_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" @pytest.mark.asyncio async def test_list_clusters_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2534,7 +2441,7 @@ async def test_list_clusters_flattened_error_async(): def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2545,13 +2452,11 @@ def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster): with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() # Establish that the response is the type that we expect. @@ -2566,7 +2471,7 @@ def test_update_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2574,7 +2479,6 @@ def test_update_cluster_empty_call(): client.update_cluster() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() @@ -2583,7 +2487,7 @@ async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=instance.Cluster ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2596,13 +2500,11 @@ async def test_update_cluster_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.update_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() # Establish that the response is the type that we expect. @@ -2616,18 +2518,18 @@ async def test_update_cluster_async_from_dict(): def test_update_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = instance.Cluster() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.update_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -2643,12 +2545,13 @@ def test_update_cluster_field_headers(): @pytest.mark.asyncio async def test_update_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = instance.Cluster() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -2656,7 +2559,6 @@ async def test_update_cluster_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.update_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -2673,7 +2575,7 @@ def test_delete_cluster( transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2684,13 +2586,11 @@ def test_delete_cluster( with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - response = client.delete_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() # Establish that the response is the type that we expect. @@ -2705,7 +2605,7 @@ def test_delete_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2713,7 +2613,6 @@ def test_delete_cluster_empty_call(): client.delete_cluster() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() @@ -2723,7 +2622,7 @@ async def test_delete_cluster_async( request_type=bigtable_instance_admin.DeleteClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2734,13 +2633,11 @@ async def test_delete_cluster_async( with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_cluster(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() # Establish that the response is the type that we expect. @@ -2754,18 +2651,18 @@ async def test_delete_cluster_async_from_dict(): def test_delete_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteClusterRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: call.return_value = None - client.delete_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -2781,18 +2678,18 @@ def test_delete_cluster_field_headers(): @pytest.mark.asyncio async def test_delete_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteClusterRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_cluster(request) # Establish that the underlying gRPC stub method was called. @@ -2807,14 +2704,13 @@ async def test_delete_cluster_field_headers_async(): def test_delete_cluster_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_cluster(name="name_value",) @@ -2823,13 +2719,12 @@ def test_delete_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_delete_cluster_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2843,7 +2738,7 @@ def test_delete_cluster_flattened_error(): @pytest.mark.asyncio async def test_delete_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2860,14 +2755,13 @@ async def test_delete_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_delete_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2883,7 +2777,7 @@ def test_create_app_profile( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2901,23 +2795,17 @@ def test_create_app_profile( description="description_value", multi_cluster_routing_use_any=None, ) - response = client.create_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" @@ -2929,7 +2817,7 @@ def test_create_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2939,7 +2827,6 @@ def test_create_app_profile_empty_call(): client.create_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() @@ -2949,7 +2836,7 @@ async def test_create_app_profile_async( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2966,22 +2853,17 @@ async def test_create_app_profile_async( name="name_value", etag="etag_value", description="description_value", ) ) - response = await client.create_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" @@ -2992,12 +2874,13 @@ async def test_create_app_profile_async_from_dict(): def test_create_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateAppProfileRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3005,7 +2888,6 @@ def test_create_app_profile_field_headers(): type(client.transport.create_app_profile), "__call__" ) as call: call.return_value = instance.AppProfile() - client.create_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -3021,12 +2903,13 @@ def test_create_app_profile_field_headers(): @pytest.mark.asyncio async def test_create_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateAppProfileRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3034,7 +2917,6 @@ async def test_create_app_profile_field_headers_async(): type(client.transport.create_app_profile), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - await client.create_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -3049,7 +2931,7 @@ async def test_create_app_profile_field_headers_async(): def test_create_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3058,7 +2940,6 @@ def test_create_app_profile_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = instance.AppProfile() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_app_profile( @@ -3071,17 +2952,14 @@ def test_create_app_profile_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].app_profile_id == "app_profile_id_value" - assert args[0].app_profile == instance.AppProfile(name="name_value") def test_create_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3098,7 +2976,7 @@ def test_create_app_profile_flattened_error(): @pytest.mark.asyncio async def test_create_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3121,18 +2999,15 @@ async def test_create_app_profile_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].app_profile_id == "app_profile_id_value" - assert args[0].app_profile == instance.AppProfile(name="name_value") @pytest.mark.asyncio async def test_create_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3150,7 +3025,7 @@ def test_get_app_profile( transport: str = "grpc", request_type=bigtable_instance_admin.GetAppProfileRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3166,23 +3041,17 @@ def test_get_app_profile( description="description_value", multi_cluster_routing_use_any=None, ) - response = client.get_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" @@ -3194,7 +3063,7 @@ def test_get_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3202,7 +3071,6 @@ def test_get_app_profile_empty_call(): client.get_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() @@ -3212,7 +3080,7 @@ async def test_get_app_profile_async( request_type=bigtable_instance_admin.GetAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3227,22 +3095,17 @@ async def test_get_app_profile_async( name="name_value", etag="etag_value", description="description_value", ) ) - response = await client.get_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" @@ -3253,18 +3116,18 @@ async def test_get_app_profile_async_from_dict(): def test_get_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetAppProfileRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: call.return_value = instance.AppProfile() - client.get_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -3280,18 +3143,18 @@ def test_get_app_profile_field_headers(): @pytest.mark.asyncio async def test_get_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetAppProfileRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - await client.get_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -3306,14 +3169,13 @@ async def test_get_app_profile_field_headers_async(): def test_get_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = instance.AppProfile() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_app_profile(name="name_value",) @@ -3322,13 +3184,12 @@ def test_get_app_profile_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_get_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3342,7 +3203,7 @@ def test_get_app_profile_flattened_error(): @pytest.mark.asyncio async def test_get_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3359,14 +3220,13 @@ async def test_get_app_profile_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_get_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3381,7 +3241,7 @@ def test_list_app_profiles( transport: str = "grpc", request_type=bigtable_instance_admin.ListAppProfilesRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3397,21 +3257,16 @@ def test_list_app_profiles( next_page_token="next_page_token_value", failed_locations=["failed_locations_value"], ) - response = client.list_app_profiles(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) - assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] @@ -3423,7 +3278,7 @@ def test_list_app_profiles_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3433,7 +3288,6 @@ def test_list_app_profiles_empty_call(): client.list_app_profiles() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() @@ -3443,7 +3297,7 @@ async def test_list_app_profiles_async( request_type=bigtable_instance_admin.ListAppProfilesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3461,20 +3315,16 @@ async def test_list_app_profiles_async( failed_locations=["failed_locations_value"], ) ) - response = await client.list_app_profiles(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAppProfilesAsyncPager) - assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] @@ -3485,12 +3335,13 @@ async def test_list_app_profiles_async_from_dict(): def test_list_app_profiles_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListAppProfilesRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3498,7 +3349,6 @@ def test_list_app_profiles_field_headers(): type(client.transport.list_app_profiles), "__call__" ) as call: call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - client.list_app_profiles(request) # Establish that the underlying gRPC stub method was called. @@ -3514,12 +3364,13 @@ def test_list_app_profiles_field_headers(): @pytest.mark.asyncio async def test_list_app_profiles_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListAppProfilesRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3529,7 +3380,6 @@ async def test_list_app_profiles_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_instance_admin.ListAppProfilesResponse() ) - await client.list_app_profiles(request) # Establish that the underlying gRPC stub method was called. @@ -3544,7 +3394,7 @@ async def test_list_app_profiles_field_headers_async(): def test_list_app_profiles_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3553,7 +3403,6 @@ def test_list_app_profiles_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_app_profiles(parent="parent_value",) @@ -3562,13 +3411,12 @@ def test_list_app_profiles_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" def test_list_app_profiles_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3582,7 +3430,7 @@ def test_list_app_profiles_flattened_error(): @pytest.mark.asyncio async def test_list_app_profiles_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3603,14 +3451,13 @@ async def test_list_app_profiles_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" @pytest.mark.asyncio async def test_list_app_profiles_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3622,7 +3469,9 @@ async def test_list_app_profiles_flattened_error_async(): def test_list_app_profiles_pager(): - client = BigtableInstanceAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -3664,7 +3513,9 @@ def test_list_app_profiles_pager(): def test_list_app_profiles_pages(): - client = BigtableInstanceAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -3699,7 +3550,7 @@ def test_list_app_profiles_pages(): @pytest.mark.asyncio async def test_list_app_profiles_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3742,7 +3593,7 @@ async def test_list_app_profiles_async_pager(): @pytest.mark.asyncio async def test_list_app_profiles_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3784,7 +3635,7 @@ def test_update_app_profile( request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3797,13 +3648,11 @@ def test_update_app_profile( ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.update_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() # Establish that the response is the type that we expect. @@ -3818,7 +3667,7 @@ def test_update_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3828,7 +3677,6 @@ def test_update_app_profile_empty_call(): client.update_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() @@ -3838,7 +3686,7 @@ async def test_update_app_profile_async( request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3853,13 +3701,11 @@ async def test_update_app_profile_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.update_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() # Establish that the response is the type that we expect. @@ -3873,12 +3719,13 @@ async def test_update_app_profile_async_from_dict(): def test_update_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.UpdateAppProfileRequest() + request.app_profile.name = "app_profile.name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3886,7 +3733,6 @@ def test_update_app_profile_field_headers(): type(client.transport.update_app_profile), "__call__" ) as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.update_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -3904,12 +3750,13 @@ def test_update_app_profile_field_headers(): @pytest.mark.asyncio async def test_update_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.UpdateAppProfileRequest() + request.app_profile.name = "app_profile.name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3919,7 +3766,6 @@ async def test_update_app_profile_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.update_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -3936,7 +3782,7 @@ async def test_update_app_profile_field_headers_async(): def test_update_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3945,27 +3791,24 @@ def test_update_app_profile_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_app_profile( app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].app_profile == instance.AppProfile(name="name_value") - - assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) def test_update_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3974,14 +3817,14 @@ def test_update_app_profile_flattened_error(): client.update_app_profile( bigtable_instance_admin.UpdateAppProfileRequest(), app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3998,23 +3841,21 @@ async def test_update_app_profile_flattened_async(): # using the keyword arguments to the method. response = await client.update_app_profile( app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].app_profile == instance.AppProfile(name="name_value") - - assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) @pytest.mark.asyncio async def test_update_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -4023,7 +3864,7 @@ async def test_update_app_profile_flattened_error_async(): await client.update_app_profile( bigtable_instance_admin.UpdateAppProfileRequest(), app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @@ -4032,7 +3873,7 @@ def test_delete_app_profile( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4045,13 +3886,11 @@ def test_delete_app_profile( ) as call: # Designate an appropriate return value for the call. call.return_value = None - response = client.delete_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() # Establish that the response is the type that we expect. @@ -4066,7 +3905,7 @@ def test_delete_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4076,7 +3915,6 @@ def test_delete_app_profile_empty_call(): client.delete_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() @@ -4086,7 +3924,7 @@ async def test_delete_app_profile_async( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4099,13 +3937,11 @@ async def test_delete_app_profile_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_app_profile(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() # Establish that the response is the type that we expect. @@ -4119,12 +3955,13 @@ async def test_delete_app_profile_async_from_dict(): def test_delete_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteAppProfileRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -4132,7 +3969,6 @@ def test_delete_app_profile_field_headers(): type(client.transport.delete_app_profile), "__call__" ) as call: call.return_value = None - client.delete_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -4148,12 +3984,13 @@ def test_delete_app_profile_field_headers(): @pytest.mark.asyncio async def test_delete_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteAppProfileRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -4161,7 +3998,6 @@ async def test_delete_app_profile_field_headers_async(): type(client.transport.delete_app_profile), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_app_profile(request) # Establish that the underlying gRPC stub method was called. @@ -4176,7 +4012,7 @@ async def test_delete_app_profile_field_headers_async(): def test_delete_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4185,7 +4021,6 @@ def test_delete_app_profile_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = None - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_app_profile(name="name_value",) @@ -4194,13 +4029,12 @@ def test_delete_app_profile_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_delete_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -4214,7 +4048,7 @@ def test_delete_app_profile_flattened_error(): @pytest.mark.asyncio async def test_delete_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4233,14 +4067,13 @@ async def test_delete_app_profile_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_delete_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -4252,10 +4085,10 @@ async def test_delete_app_profile_flattened_error_async(): def test_get_iam_policy( - transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest + transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4265,22 +4098,17 @@ def test_get_iam_policy( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy(version=774, etag=b"etag_blob",) - + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) response = client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.GetIamPolicyRequest() + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() # Establish that the response is the type that we expect. - - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -4292,7 +4120,7 @@ def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4300,16 +4128,15 @@ def test_get_iam_policy_empty_call(): client.get_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.GetIamPolicyRequest() + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() @pytest.mark.asyncio async def test_get_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4320,22 +4147,18 @@ async def test_get_iam_policy_async( with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy(version=774, etag=b"etag_blob",) ) - response = await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.GetIamPolicyRequest() + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() # Establish that the response is the type that we expect. - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -4346,18 +4169,18 @@ async def test_get_iam_policy_async_from_dict(): def test_get_iam_policy_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -4373,18 +4196,18 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) - + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -4399,17 +4222,16 @@ async def test_get_iam_policy_field_headers_async(): def test_get_iam_policy_from_dict_foreign(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() response = client.get_iam_policy( request={ "resource": "resource_value", - "options": options.GetPolicyOptions(requested_policy_version=2598), + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), } ) call.assert_called() @@ -4417,14 +4239,13 @@ def test_get_iam_policy_from_dict_foreign(): def test_get_iam_policy_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_iam_policy(resource="resource_value",) @@ -4433,35 +4254,34 @@ def test_get_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" def test_get_iam_policy_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_iam_policy( - iam_policy.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", ) @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() + call.return_value = policy_pb2.Policy() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_iam_policy(resource="resource_value",) @@ -4470,29 +4290,28 @@ async def test_get_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_iam_policy( - iam_policy.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", ) def test_set_iam_policy( - transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest + transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4502,22 +4321,17 @@ def test_set_iam_policy( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy(version=774, etag=b"etag_blob",) - + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) response = client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.SetIamPolicyRequest() + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() # Establish that the response is the type that we expect. - - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -4529,7 +4343,7 @@ def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4537,16 +4351,15 @@ def test_set_iam_policy_empty_call(): client.set_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.SetIamPolicyRequest() + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() @pytest.mark.asyncio async def test_set_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4557,22 +4370,18 @@ async def test_set_iam_policy_async( with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy(version=774, etag=b"etag_blob",) ) - response = await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.SetIamPolicyRequest() + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() # Establish that the response is the type that we expect. - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -4583,18 +4392,18 @@ async def test_set_iam_policy_async_from_dict(): def test_set_iam_policy_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -4610,18 +4419,18 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) - + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -4636,17 +4445,16 @@ async def test_set_iam_policy_field_headers_async(): def test_set_iam_policy_from_dict_foreign(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() response = client.set_iam_policy( request={ "resource": "resource_value", - "policy": policy.Policy(version=774), + "policy": policy_pb2.Policy(version=774), } ) call.assert_called() @@ -4654,14 +4462,13 @@ def test_set_iam_policy_from_dict_foreign(): def test_set_iam_policy_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.set_iam_policy(resource="resource_value",) @@ -4670,35 +4477,34 @@ def test_set_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" def test_set_iam_policy_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.set_iam_policy( - iam_policy.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", ) @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() + call.return_value = policy_pb2.Policy() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.set_iam_policy(resource="resource_value",) @@ -4707,29 +4513,28 @@ async def test_set_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.set_iam_policy( - iam_policy.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", ) def test_test_iam_permissions( - transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest + transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest ): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4741,22 +4546,18 @@ def test_test_iam_permissions( type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse( + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( permissions=["permissions_value"], ) - response = client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.TestIamPermissionsRequest() + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() # Establish that the response is the type that we expect. - - assert isinstance(response, iam_policy.TestIamPermissionsResponse) - + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) assert response.permissions == ["permissions_value"] @@ -4768,7 +4569,7 @@ def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4778,16 +4579,16 @@ def test_test_iam_permissions_empty_call(): client.test_iam_permissions() call.assert_called() _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.TestIamPermissionsRequest() + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() @pytest.mark.asyncio async def test_test_iam_permissions_async( - transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest + transport: str = "grpc_asyncio", + request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4800,20 +4601,19 @@ async def test_test_iam_permissions_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) ) - response = await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.TestIamPermissionsRequest() + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy.TestIamPermissionsResponse) - + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) assert response.permissions == ["permissions_value"] @@ -4824,20 +4624,20 @@ async def test_test_iam_permissions_async_from_dict(): def test_test_iam_permissions_field_headers(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.TestIamPermissionsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: - call.return_value = iam_policy.TestIamPermissionsResponse() - + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. @@ -4853,12 +4653,13 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.TestIamPermissionsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -4866,9 +4667,8 @@ async def test_test_iam_permissions_field_headers_async(): type(client.transport.test_iam_permissions), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy.TestIamPermissionsResponse() + iam_policy_pb2.TestIamPermissionsResponse() ) - await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. @@ -4883,15 +4683,14 @@ async def test_test_iam_permissions_field_headers_async(): def test_test_iam_permissions_from_dict_foreign(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse() - + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() response = client.test_iam_permissions( request={ "resource": "resource_value", @@ -4903,7 +4702,7 @@ def test_test_iam_permissions_from_dict_foreign(): def test_test_iam_permissions_flattened(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4911,8 +4710,7 @@ def test_test_iam_permissions_flattened(): type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse() - + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.test_iam_permissions( @@ -4923,22 +4721,20 @@ def test_test_iam_permissions_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] def test_test_iam_permissions_flattened_error(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.test_iam_permissions( - iam_policy.TestIamPermissionsRequest(), + iam_policy_pb2.TestIamPermissionsRequest(), resource="resource_value", permissions=["permissions_value"], ) @@ -4947,7 +4743,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4955,10 +4751,10 @@ async def test_test_iam_permissions_flattened_async(): type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse() + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy.TestIamPermissionsResponse() + iam_policy_pb2.TestIamPermissionsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. @@ -4970,23 +4766,21 @@ async def test_test_iam_permissions_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.test_iam_permissions( - iam_policy.TestIamPermissionsRequest(), + iam_policy_pb2.TestIamPermissionsRequest(), resource="resource_value", permissions=["permissions_value"], ) @@ -4995,16 +4789,16 @@ async def test_test_iam_permissions_flattened_error_async(): def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( @@ -5014,7 +4808,7 @@ def test_credentials_transport_error(): # It is an error to provide scopes and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( @@ -5025,7 +4819,7 @@ def test_credentials_transport_error(): def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) client = BigtableInstanceAdminClient(transport=transport) assert client.transport is transport @@ -5034,13 +4828,13 @@ def test_transport_instance(): def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @@ -5055,8 +4849,8 @@ def test_transport_get_channel(): ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() @@ -5064,16 +4858,16 @@ def test_transport_adc(transport_class): def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance(client.transport, transports.BigtableInstanceAdminGrpcTransport,) def test_bigtable_instance_admin_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(exceptions.DuplicateCredentialArgs): + with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.BigtableInstanceAdminTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) @@ -5085,7 +4879,7 @@ def test_bigtable_instance_admin_base_transport(): ) as Transport: Transport.return_value = None transport = transports.BigtableInstanceAdminTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly @@ -5121,15 +4915,45 @@ def test_bigtable_instance_admin_base_transport(): transport.operations_client +@requires_google_auth_gte_1_25_0 def test_bigtable_instance_admin_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( - auth, "load_credentials_from_file" + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableInstanceAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_bigtable_instance_admin_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - load_creds.return_value = (credentials.AnonymousCredentials(), None) + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableInstanceAdminTransport( credentials_file="credentials.json", quota_project_id="octopus", ) @@ -5150,19 +4974,41 @@ def test_bigtable_instance_admin_base_transport_with_credentials_file(): def test_bigtable_instance_admin_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(auth, "default") as adc, mock.patch( + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - adc.return_value = (credentials.AnonymousCredentials(), None) + adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableInstanceAdminTransport() adc.assert_called_once() +@requires_google_auth_gte_1_25_0 def test_bigtable_instance_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BigtableInstanceAdminClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_bigtable_instance_admin_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) BigtableInstanceAdminClient() adc.assert_called_once_with( scopes=( @@ -5178,14 +5024,49 @@ def test_bigtable_instance_admin_auth_adc(): ) -def test_bigtable_instance_admin_transport_auth_adc(): +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_bigtable_instance_admin_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) - transports.BigtableInstanceAdminGrpcTransport( - host="squid.clam.whelk", quota_project_id="octopus" + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_bigtable_instance_admin_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") adc.assert_called_once_with( scopes=( "https://www.googleapis.com/auth/bigtable.admin", @@ -5200,6 +5081,133 @@ def test_bigtable_instance_admin_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), + (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_gte_1_26_0 +def test_bigtable_instance_admin_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + scopes=["1", "2"], + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), + (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_lt_1_26_0 +def test_bigtable_instance_admin_transport_create_channel_old_api_core( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus") + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), + (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_lt_1_26_0 +def test_bigtable_instance_admin_transport_create_channel_user_scopes( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + scopes=["1", "2"], + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + @pytest.mark.parametrize( "transport_class", [ @@ -5210,7 +5218,7 @@ def test_bigtable_instance_admin_transport_auth_adc(): def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( transport_class, ): - cred = credentials.AnonymousCredentials() + cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: @@ -5257,7 +5265,7 @@ def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( def test_bigtable_instance_admin_host_no_port(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" ), @@ -5267,7 +5275,7 @@ def test_bigtable_instance_admin_host_no_port(): def test_bigtable_instance_admin_host_with_port(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" ), @@ -5323,9 +5331,9 @@ def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel - cred = credentials.AnonymousCredentials() + cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): - with mock.patch.object(auth, "default") as adc: + with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", @@ -5417,7 +5425,7 @@ def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class def test_bigtable_instance_admin_grpc_lro_client(): client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) transport = client.transport @@ -5430,7 +5438,7 @@ def test_bigtable_instance_admin_grpc_lro_client(): def test_bigtable_instance_admin_grpc_lro_async_client(): client = BigtableInstanceAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) transport = client.transport @@ -5445,7 +5453,6 @@ def test_app_profile_path(): project = "squid" instance = "clam" app_profile = "whelk" - expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( project=project, instance=instance, app_profile=app_profile, ) @@ -5472,7 +5479,6 @@ def test_cluster_path(): project = "cuttlefish" instance = "mussel" cluster = "winkle" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( project=project, instance=instance, cluster=cluster, ) @@ -5498,7 +5504,6 @@ def test_crypto_key_path(): location = "clam" key_ring = "whelk" crypto_key = "octopus" - expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, ) @@ -5525,7 +5530,6 @@ def test_parse_crypto_key_path(): def test_instance_path(): project = "winkle" instance = "nautilus" - expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, ) @@ -5547,7 +5551,6 @@ def test_parse_instance_path(): def test_common_billing_account_path(): billing_account = "squid" - expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -5568,7 +5571,6 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): folder = "whelk" - expected = "folders/{folder}".format(folder=folder,) actual = BigtableInstanceAdminClient.common_folder_path(folder) assert expected == actual @@ -5587,7 +5589,6 @@ def test_parse_common_folder_path(): def test_common_organization_path(): organization = "oyster" - expected = "organizations/{organization}".format(organization=organization,) actual = BigtableInstanceAdminClient.common_organization_path(organization) assert expected == actual @@ -5606,7 +5607,6 @@ def test_parse_common_organization_path(): def test_common_project_path(): project = "cuttlefish" - expected = "projects/{project}".format(project=project,) actual = BigtableInstanceAdminClient.common_project_path(project) assert expected == actual @@ -5626,7 +5626,6 @@ def test_parse_common_project_path(): def test_common_location_path(): project = "winkle" location = "nautilus" - expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -5653,7 +5652,7 @@ def test_client_withDEFAULT_CLIENT_INFO(): transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" ) as prep: client = BigtableInstanceAdminClient( - credentials=credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -5662,6 +5661,6 @@ def test_client_withDEFAULT_CLIENT_INFO(): ) as prep: transport_class = BigtableInstanceAdminClient.get_transport_class() transport = transport_class( - credentials=credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index aca51c98e0d6..5b5918a7d624 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,9 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # - import os import mock +import packaging.version import grpc from grpc.experimental import aio @@ -24,16 +23,16 @@ import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule -from google import auth + from google.api_core import client_options -from google.api_core import exceptions +from google.api_core import exceptions as core_exceptions from google.api_core import future from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 -from google.auth import credentials +from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( BigtableTableAdminAsyncClient, @@ -43,20 +42,50 @@ ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( + _API_CORE_VERSION, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( + _GOOGLE_AUTH_VERSION, +) from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore -from google.iam.v1 import options_pb2 as options # type: ignore -from google.iam.v1 import policy_pb2 as policy # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore from google.longrunning import operations_pb2 from google.oauth2 import service_account -from google.protobuf import any_pb2 as gp_any # type: ignore -from google.protobuf import duration_pb2 as duration # type: ignore -from google.protobuf import field_mask_pb2 as field_mask # type: ignore -from google.protobuf import timestamp_pb2 as timestamp # type: ignore -from google.rpc import status_pb2 as status # type: ignore -from google.type import expr_pb2 as expr # type: ignore +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import expr_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-api-core >= 1.26.0 is required: +# - Delete all the api-core and auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +requires_api_core_lt_1_26_0 = pytest.mark.skipif( + packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"), + reason="This test requires google-api-core < 1.26.0", +) + +requires_api_core_gte_1_26_0 = pytest.mark.skipif( + packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"), + reason="This test requires google-api-core >= 1.26.0", +) def client_cert_source_callback(): @@ -108,7 +137,7 @@ def test__get_default_mtls_endpoint(): "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] ) def test_bigtable_table_admin_client_from_service_account_info(client_class): - creds = credentials.AnonymousCredentials() + creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: @@ -125,7 +154,7 @@ def test_bigtable_table_admin_client_from_service_account_info(client_class): "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] ) def test_bigtable_table_admin_client_from_service_account_file(client_class): - creds = credentials.AnonymousCredentials() + creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: @@ -178,7 +207,7 @@ def test_bigtable_table_admin_client_client_options( ): # Check that if channel is provided we won't create a new one. with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: - transport = transport_class(credentials=credentials.AnonymousCredentials()) + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() @@ -476,7 +505,7 @@ def test_create_table( transport: str = "grpc", request_type=bigtable_table_admin.CreateTableRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -489,21 +518,16 @@ def test_create_table( call.return_value = gba_table.Table( name="name_value", granularity=gba_table.Table.TimestampGranularity.MILLIS, ) - response = client.create_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS @@ -515,7 +539,7 @@ def test_create_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -523,7 +547,6 @@ def test_create_table_empty_call(): client.create_table() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() @@ -533,7 +556,7 @@ async def test_create_table_async( request_type=bigtable_table_admin.CreateTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -549,20 +572,16 @@ async def test_create_table_async( granularity=gba_table.Table.TimestampGranularity.MILLIS, ) ) - response = await client.create_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() # Establish that the response is the type that we expect. assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS @@ -572,17 +591,19 @@ async def test_create_table_async_from_dict(): def test_create_table_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_table), "__call__") as call: call.return_value = gba_table.Table() - client.create_table(request) # Establish that the underlying gRPC stub method was called. @@ -598,18 +619,18 @@ def test_create_table_field_headers(): @pytest.mark.asyncio async def test_create_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_table), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) - await client.create_table(request) # Establish that the underlying gRPC stub method was called. @@ -623,13 +644,14 @@ async def test_create_table_field_headers_async(): def test_create_table_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gba_table.Table() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_table( @@ -642,16 +664,15 @@ def test_create_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].table == gba_table.Table(name="name_value") def test_create_table_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -667,7 +688,7 @@ def test_create_table_flattened_error(): @pytest.mark.asyncio async def test_create_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -688,18 +709,15 @@ async def test_create_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].table == gba_table.Table(name="name_value") @pytest.mark.asyncio async def test_create_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -718,7 +736,7 @@ def test_create_table_from_snapshot( request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -731,13 +749,11 @@ def test_create_table_from_snapshot( ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_table_from_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() # Establish that the response is the type that we expect. @@ -752,7 +768,7 @@ def test_create_table_from_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -762,7 +778,6 @@ def test_create_table_from_snapshot_empty_call(): client.create_table_from_snapshot() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() @@ -772,7 +787,7 @@ async def test_create_table_from_snapshot_async( request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -787,13 +802,11 @@ async def test_create_table_from_snapshot_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.create_table_from_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() # Establish that the response is the type that we expect. @@ -806,11 +819,14 @@ async def test_create_table_from_snapshot_async_from_dict(): def test_create_table_from_snapshot_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -818,7 +834,6 @@ def test_create_table_from_snapshot_field_headers(): type(client.transport.create_table_from_snapshot), "__call__" ) as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.create_table_from_snapshot(request) # Establish that the underlying gRPC stub method was called. @@ -834,12 +849,13 @@ def test_create_table_from_snapshot_field_headers(): @pytest.mark.asyncio async def test_create_table_from_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -849,7 +865,6 @@ async def test_create_table_from_snapshot_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.create_table_from_snapshot(request) # Establish that the underlying gRPC stub method was called. @@ -863,7 +878,9 @@ async def test_create_table_from_snapshot_field_headers_async(): def test_create_table_from_snapshot_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -871,7 +888,6 @@ def test_create_table_from_snapshot_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_table_from_snapshot( @@ -884,16 +900,15 @@ def test_create_table_from_snapshot_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].source_snapshot == "source_snapshot_value" def test_create_table_from_snapshot_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -909,7 +924,7 @@ def test_create_table_from_snapshot_flattened_error(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -934,18 +949,15 @@ async def test_create_table_from_snapshot_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].source_snapshot == "source_snapshot_value" @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -963,7 +975,7 @@ def test_list_tables( transport: str = "grpc", request_type=bigtable_table_admin.ListTablesRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -976,19 +988,15 @@ def test_list_tables( call.return_value = bigtable_table_admin.ListTablesResponse( next_page_token="next_page_token_value", ) - response = client.list_tables(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == "next_page_token_value" @@ -1000,7 +1008,7 @@ def test_list_tables_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1008,7 +1016,6 @@ def test_list_tables_empty_call(): client.list_tables() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() @@ -1017,7 +1024,7 @@ async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1032,18 +1039,15 @@ async def test_list_tables_async( next_page_token="next_page_token_value", ) ) - response = await client.list_tables(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListTablesAsyncPager) - assert response.next_page_token == "next_page_token_value" @@ -1053,17 +1057,19 @@ async def test_list_tables_async_from_dict(): def test_list_tables_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListTablesRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: call.return_value = bigtable_table_admin.ListTablesResponse() - client.list_tables(request) # Establish that the underlying gRPC stub method was called. @@ -1079,12 +1085,13 @@ def test_list_tables_field_headers(): @pytest.mark.asyncio async def test_list_tables_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListTablesRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1092,7 +1099,6 @@ async def test_list_tables_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_table_admin.ListTablesResponse() ) - await client.list_tables(request) # Establish that the underlying gRPC stub method was called. @@ -1106,13 +1112,14 @@ async def test_list_tables_field_headers_async(): def test_list_tables_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = bigtable_table_admin.ListTablesResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_tables(parent="parent_value",) @@ -1121,12 +1128,13 @@ def test_list_tables_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" def test_list_tables_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1139,7 +1147,7 @@ def test_list_tables_flattened_error(): @pytest.mark.asyncio async def test_list_tables_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1158,14 +1166,13 @@ async def test_list_tables_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" @pytest.mark.asyncio async def test_list_tables_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1177,7 +1184,7 @@ async def test_list_tables_flattened_error_async(): def test_list_tables_pager(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: @@ -1211,7 +1218,7 @@ def test_list_tables_pager(): def test_list_tables_pages(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: @@ -1238,7 +1245,7 @@ def test_list_tables_pages(): @pytest.mark.asyncio async def test_list_tables_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1273,7 +1280,7 @@ async def test_list_tables_async_pager(): @pytest.mark.asyncio async def test_list_tables_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1306,7 +1313,7 @@ def test_get_table( transport: str = "grpc", request_type=bigtable_table_admin.GetTableRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1319,21 +1326,16 @@ def test_get_table( call.return_value = table.Table( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, ) - response = client.get_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS @@ -1345,7 +1347,7 @@ def test_get_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1353,7 +1355,6 @@ def test_get_table_empty_call(): client.get_table() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() @@ -1362,7 +1363,7 @@ async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1377,20 +1378,16 @@ async def test_get_table_async( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, ) ) - response = await client.get_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() # Establish that the response is the type that we expect. assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS @@ -1400,17 +1397,19 @@ async def test_get_table_async_from_dict(): def test_get_table_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetTableRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_table), "__call__") as call: call.return_value = table.Table() - client.get_table(request) # Establish that the underlying gRPC stub method was called. @@ -1426,18 +1425,18 @@ def test_get_table_field_headers(): @pytest.mark.asyncio async def test_get_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetTableRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_table), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client.get_table(request) # Establish that the underlying gRPC stub method was called. @@ -1451,13 +1450,14 @@ async def test_get_table_field_headers_async(): def test_get_table_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = table.Table() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_table(name="name_value",) @@ -1466,12 +1466,13 @@ def test_get_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_get_table_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1484,7 +1485,7 @@ def test_get_table_flattened_error(): @pytest.mark.asyncio async def test_get_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1501,14 +1502,13 @@ async def test_get_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_get_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1523,7 +1523,7 @@ def test_delete_table( transport: str = "grpc", request_type=bigtable_table_admin.DeleteTableRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1534,13 +1534,11 @@ def test_delete_table( with mock.patch.object(type(client.transport.delete_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - response = client.delete_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() # Establish that the response is the type that we expect. @@ -1555,7 +1553,7 @@ def test_delete_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1563,7 +1561,6 @@ def test_delete_table_empty_call(): client.delete_table() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() @@ -1573,7 +1570,7 @@ async def test_delete_table_async( request_type=bigtable_table_admin.DeleteTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1584,13 +1581,11 @@ async def test_delete_table_async( with mock.patch.object(type(client.transport.delete_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() # Establish that the response is the type that we expect. @@ -1603,17 +1598,19 @@ async def test_delete_table_async_from_dict(): def test_delete_table_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteTableRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_table), "__call__") as call: call.return_value = None - client.delete_table(request) # Establish that the underlying gRPC stub method was called. @@ -1629,18 +1626,18 @@ def test_delete_table_field_headers(): @pytest.mark.asyncio async def test_delete_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteTableRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_table), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_table(request) # Establish that the underlying gRPC stub method was called. @@ -1654,13 +1651,14 @@ async def test_delete_table_field_headers_async(): def test_delete_table_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_table(name="name_value",) @@ -1669,12 +1667,13 @@ def test_delete_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_delete_table_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1687,7 +1686,7 @@ def test_delete_table_flattened_error(): @pytest.mark.asyncio async def test_delete_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1704,14 +1703,13 @@ async def test_delete_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_delete_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1727,7 +1725,7 @@ def test_modify_column_families( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1742,21 +1740,16 @@ def test_modify_column_families( call.return_value = table.Table( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, ) - response = client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS @@ -1768,7 +1761,7 @@ def test_modify_column_families_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1778,7 +1771,6 @@ def test_modify_column_families_empty_call(): client.modify_column_families() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() @@ -1788,7 +1780,7 @@ async def test_modify_column_families_async( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1805,20 +1797,16 @@ async def test_modify_column_families_async( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, ) ) - response = await client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() # Establish that the response is the type that we expect. assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS @@ -1828,11 +1816,14 @@ async def test_modify_column_families_async_from_dict(): def test_modify_column_families_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1840,7 +1831,6 @@ def test_modify_column_families_field_headers(): type(client.transport.modify_column_families), "__call__" ) as call: call.return_value = table.Table() - client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. @@ -1856,12 +1846,13 @@ def test_modify_column_families_field_headers(): @pytest.mark.asyncio async def test_modify_column_families_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -1869,7 +1860,6 @@ async def test_modify_column_families_field_headers_async(): type(client.transport.modify_column_families), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. @@ -1883,7 +1873,9 @@ async def test_modify_column_families_field_headers_async(): def test_modify_column_families_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1891,7 +1883,6 @@ def test_modify_column_families_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = table.Table() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.modify_column_families( @@ -1907,16 +1898,16 @@ def test_modify_column_families_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].modifications == [ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") ] def test_modify_column_families_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1935,7 +1926,7 @@ def test_modify_column_families_flattened_error(): @pytest.mark.asyncio async def test_modify_column_families_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1961,9 +1952,7 @@ async def test_modify_column_families_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].modifications == [ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") ] @@ -1972,7 +1961,7 @@ async def test_modify_column_families_flattened_async(): @pytest.mark.asyncio async def test_modify_column_families_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -1993,7 +1982,7 @@ def test_drop_row_range( transport: str = "grpc", request_type=bigtable_table_admin.DropRowRangeRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2004,13 +1993,11 @@ def test_drop_row_range( with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - response = client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() # Establish that the response is the type that we expect. @@ -2025,7 +2012,7 @@ def test_drop_row_range_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2033,7 +2020,6 @@ def test_drop_row_range_empty_call(): client.drop_row_range() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() @@ -2043,7 +2029,7 @@ async def test_drop_row_range_async( request_type=bigtable_table_admin.DropRowRangeRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2054,13 +2040,11 @@ async def test_drop_row_range_async( with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() # Establish that the response is the type that we expect. @@ -2073,17 +2057,19 @@ async def test_drop_row_range_async_from_dict(): def test_drop_row_range_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DropRowRangeRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: call.return_value = None - client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. @@ -2099,18 +2085,18 @@ def test_drop_row_range_field_headers(): @pytest.mark.asyncio async def test_drop_row_range_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DropRowRangeRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. @@ -2128,7 +2114,7 @@ def test_generate_consistency_token( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2143,19 +2129,15 @@ def test_generate_consistency_token( call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( consistency_token="consistency_token_value", ) - response = client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" @@ -2167,7 +2149,7 @@ def test_generate_consistency_token_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2177,7 +2159,6 @@ def test_generate_consistency_token_empty_call(): client.generate_consistency_token() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() @@ -2187,7 +2168,7 @@ async def test_generate_consistency_token_async( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2204,18 +2185,15 @@ async def test_generate_consistency_token_async( consistency_token="consistency_token_value", ) ) - response = await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() # Establish that the response is the type that we expect. assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" @@ -2225,11 +2203,14 @@ async def test_generate_consistency_token_async_from_dict(): def test_generate_consistency_token_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -2237,7 +2218,6 @@ def test_generate_consistency_token_field_headers(): type(client.transport.generate_consistency_token), "__call__" ) as call: call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. @@ -2253,12 +2233,13 @@ def test_generate_consistency_token_field_headers(): @pytest.mark.asyncio async def test_generate_consistency_token_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -2268,7 +2249,6 @@ async def test_generate_consistency_token_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_table_admin.GenerateConsistencyTokenResponse() ) - await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. @@ -2282,7 +2262,9 @@ async def test_generate_consistency_token_field_headers_async(): def test_generate_consistency_token_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2290,7 +2272,6 @@ def test_generate_consistency_token_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.generate_consistency_token(name="name_value",) @@ -2299,12 +2280,13 @@ def test_generate_consistency_token_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_generate_consistency_token_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -2317,7 +2299,7 @@ def test_generate_consistency_token_flattened_error(): @pytest.mark.asyncio async def test_generate_consistency_token_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2338,14 +2320,13 @@ async def test_generate_consistency_token_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_generate_consistency_token_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2360,7 +2341,7 @@ def test_check_consistency( transport: str = "grpc", request_type=bigtable_table_admin.CheckConsistencyRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2375,19 +2356,15 @@ def test_check_consistency( call.return_value = bigtable_table_admin.CheckConsistencyResponse( consistent=True, ) - response = client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True @@ -2399,7 +2376,7 @@ def test_check_consistency_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2409,7 +2386,6 @@ def test_check_consistency_empty_call(): client.check_consistency() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() @@ -2419,7 +2395,7 @@ async def test_check_consistency_async( request_type=bigtable_table_admin.CheckConsistencyRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2434,18 +2410,15 @@ async def test_check_consistency_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_table_admin.CheckConsistencyResponse(consistent=True,) ) - response = await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() # Establish that the response is the type that we expect. assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True @@ -2455,11 +2428,14 @@ async def test_check_consistency_async_from_dict(): def test_check_consistency_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CheckConsistencyRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -2467,7 +2443,6 @@ def test_check_consistency_field_headers(): type(client.transport.check_consistency), "__call__" ) as call: call.return_value = bigtable_table_admin.CheckConsistencyResponse() - client.check_consistency(request) # Establish that the underlying gRPC stub method was called. @@ -2483,12 +2458,13 @@ def test_check_consistency_field_headers(): @pytest.mark.asyncio async def test_check_consistency_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CheckConsistencyRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -2498,7 +2474,6 @@ async def test_check_consistency_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_table_admin.CheckConsistencyResponse() ) - await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. @@ -2512,7 +2487,9 @@ async def test_check_consistency_field_headers_async(): def test_check_consistency_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2520,7 +2497,6 @@ def test_check_consistency_flattened(): ) as call: # Designate an appropriate return value for the call. call.return_value = bigtable_table_admin.CheckConsistencyResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.check_consistency( @@ -2531,14 +2507,14 @@ def test_check_consistency_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].consistency_token == "consistency_token_value" def test_check_consistency_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -2553,7 +2529,7 @@ def test_check_consistency_flattened_error(): @pytest.mark.asyncio async def test_check_consistency_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2576,16 +2552,14 @@ async def test_check_consistency_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].consistency_token == "consistency_token_value" @pytest.mark.asyncio async def test_check_consistency_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2602,7 +2576,7 @@ def test_snapshot_table( transport: str = "grpc", request_type=bigtable_table_admin.SnapshotTableRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2613,13 +2587,11 @@ def test_snapshot_table( with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() # Establish that the response is the type that we expect. @@ -2634,7 +2606,7 @@ def test_snapshot_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2642,7 +2614,6 @@ def test_snapshot_table_empty_call(): client.snapshot_table() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() @@ -2652,7 +2623,7 @@ async def test_snapshot_table_async( request_type=bigtable_table_admin.SnapshotTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2665,13 +2636,11 @@ async def test_snapshot_table_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() # Establish that the response is the type that we expect. @@ -2684,17 +2653,19 @@ async def test_snapshot_table_async_from_dict(): def test_snapshot_table_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.SnapshotTableRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. @@ -2710,12 +2681,13 @@ def test_snapshot_table_field_headers(): @pytest.mark.asyncio async def test_snapshot_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.SnapshotTableRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -2723,7 +2695,6 @@ async def test_snapshot_table_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. @@ -2737,13 +2708,14 @@ async def test_snapshot_table_field_headers_async(): def test_snapshot_table_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.snapshot_table( @@ -2757,18 +2729,16 @@ def test_snapshot_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].cluster == "cluster_value" - assert args[0].snapshot_id == "snapshot_id_value" - assert args[0].description == "description_value" def test_snapshot_table_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -2785,7 +2755,7 @@ def test_snapshot_table_flattened_error(): @pytest.mark.asyncio async def test_snapshot_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2809,20 +2779,16 @@ async def test_snapshot_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].cluster == "cluster_value" - assert args[0].snapshot_id == "snapshot_id_value" - assert args[0].description == "description_value" @pytest.mark.asyncio async def test_snapshot_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -2841,7 +2807,7 @@ def test_get_snapshot( transport: str = "grpc", request_type=bigtable_table_admin.GetSnapshotRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2857,25 +2823,18 @@ def test_get_snapshot( state=table.Snapshot.State.READY, description="description_value", ) - response = client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" @@ -2887,7 +2846,7 @@ def test_get_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2895,7 +2854,6 @@ def test_get_snapshot_empty_call(): client.get_snapshot() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() @@ -2905,7 +2863,7 @@ async def test_get_snapshot_async( request_type=bigtable_table_admin.GetSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2923,24 +2881,18 @@ async def test_get_snapshot_async( description="description_value", ) ) - response = await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() # Establish that the response is the type that we expect. assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" @@ -2950,17 +2902,19 @@ async def test_get_snapshot_async_from_dict(): def test_get_snapshot_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetSnapshotRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: call.return_value = table.Snapshot() - client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. @@ -2976,18 +2930,18 @@ def test_get_snapshot_field_headers(): @pytest.mark.asyncio async def test_get_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetSnapshotRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) - await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. @@ -3001,13 +2955,14 @@ async def test_get_snapshot_field_headers_async(): def test_get_snapshot_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = table.Snapshot() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_snapshot(name="name_value",) @@ -3016,12 +2971,13 @@ def test_get_snapshot_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_get_snapshot_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -3034,7 +2990,7 @@ def test_get_snapshot_flattened_error(): @pytest.mark.asyncio async def test_get_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3051,14 +3007,13 @@ async def test_get_snapshot_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_get_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3073,7 +3028,7 @@ def test_list_snapshots( transport: str = "grpc", request_type=bigtable_table_admin.ListSnapshotsRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3086,19 +3041,15 @@ def test_list_snapshots( call.return_value = bigtable_table_admin.ListSnapshotsResponse( next_page_token="next_page_token_value", ) - response = client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" @@ -3110,7 +3061,7 @@ def test_list_snapshots_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3118,7 +3069,6 @@ def test_list_snapshots_empty_call(): client.list_snapshots() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() @@ -3128,7 +3078,7 @@ async def test_list_snapshots_async( request_type=bigtable_table_admin.ListSnapshotsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3143,18 +3093,15 @@ async def test_list_snapshots_async( next_page_token="next_page_token_value", ) ) - response = await client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListSnapshotsAsyncPager) - assert response.next_page_token == "next_page_token_value" @@ -3164,17 +3111,19 @@ async def test_list_snapshots_async_from_dict(): def test_list_snapshots_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListSnapshotsRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: call.return_value = bigtable_table_admin.ListSnapshotsResponse() - client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. @@ -3190,12 +3139,13 @@ def test_list_snapshots_field_headers(): @pytest.mark.asyncio async def test_list_snapshots_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListSnapshotsRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3203,7 +3153,6 @@ async def test_list_snapshots_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_table_admin.ListSnapshotsResponse() ) - await client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. @@ -3217,13 +3166,14 @@ async def test_list_snapshots_field_headers_async(): def test_list_snapshots_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = bigtable_table_admin.ListSnapshotsResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_snapshots(parent="parent_value",) @@ -3232,12 +3182,13 @@ def test_list_snapshots_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" def test_list_snapshots_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -3250,7 +3201,7 @@ def test_list_snapshots_flattened_error(): @pytest.mark.asyncio async def test_list_snapshots_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3269,14 +3220,13 @@ async def test_list_snapshots_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" @pytest.mark.asyncio async def test_list_snapshots_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3288,7 +3238,7 @@ async def test_list_snapshots_flattened_error_async(): def test_list_snapshots_pager(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: @@ -3324,7 +3274,7 @@ def test_list_snapshots_pager(): def test_list_snapshots_pages(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: @@ -3353,7 +3303,7 @@ def test_list_snapshots_pages(): @pytest.mark.asyncio async def test_list_snapshots_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3390,7 +3340,7 @@ async def test_list_snapshots_async_pager(): @pytest.mark.asyncio async def test_list_snapshots_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3425,7 +3375,7 @@ def test_delete_snapshot( transport: str = "grpc", request_type=bigtable_table_admin.DeleteSnapshotRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3436,13 +3386,11 @@ def test_delete_snapshot( with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - response = client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() # Establish that the response is the type that we expect. @@ -3457,7 +3405,7 @@ def test_delete_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3465,7 +3413,6 @@ def test_delete_snapshot_empty_call(): client.delete_snapshot() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() @@ -3475,7 +3422,7 @@ async def test_delete_snapshot_async( request_type=bigtable_table_admin.DeleteSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3486,13 +3433,11 @@ async def test_delete_snapshot_async( with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() # Establish that the response is the type that we expect. @@ -3505,17 +3450,19 @@ async def test_delete_snapshot_async_from_dict(): def test_delete_snapshot_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteSnapshotRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: call.return_value = None - client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. @@ -3531,18 +3478,18 @@ def test_delete_snapshot_field_headers(): @pytest.mark.asyncio async def test_delete_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteSnapshotRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. @@ -3556,13 +3503,14 @@ async def test_delete_snapshot_field_headers_async(): def test_delete_snapshot_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_snapshot(name="name_value",) @@ -3571,12 +3519,13 @@ def test_delete_snapshot_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_delete_snapshot_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -3589,7 +3538,7 @@ def test_delete_snapshot_flattened_error(): @pytest.mark.asyncio async def test_delete_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3606,14 +3555,13 @@ async def test_delete_snapshot_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_delete_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3628,7 +3576,7 @@ def test_create_backup( transport: str = "grpc", request_type=bigtable_table_admin.CreateBackupRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3639,13 +3587,11 @@ def test_create_backup( with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() # Establish that the response is the type that we expect. @@ -3660,7 +3606,7 @@ def test_create_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3668,7 +3614,6 @@ def test_create_backup_empty_call(): client.create_backup() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() @@ -3678,7 +3623,7 @@ async def test_create_backup_async( request_type=bigtable_table_admin.CreateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3691,13 +3636,11 @@ async def test_create_backup_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() # Establish that the response is the type that we expect. @@ -3710,17 +3653,19 @@ async def test_create_backup_async_from_dict(): def test_create_backup_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateBackupRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_backup), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.create_backup(request) # Establish that the underlying gRPC stub method was called. @@ -3736,12 +3681,13 @@ def test_create_backup_field_headers(): @pytest.mark.asyncio async def test_create_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateBackupRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -3749,7 +3695,6 @@ async def test_create_backup_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.create_backup(request) # Establish that the underlying gRPC stub method was called. @@ -3763,13 +3708,14 @@ async def test_create_backup_field_headers_async(): def test_create_backup_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.create_backup( @@ -3782,16 +3728,15 @@ def test_create_backup_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].backup_id == "backup_id_value" - assert args[0].backup == table.Backup(name="name_value") def test_create_backup_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -3807,7 +3752,7 @@ def test_create_backup_flattened_error(): @pytest.mark.asyncio async def test_create_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3830,18 +3775,15 @@ async def test_create_backup_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].backup_id == "backup_id_value" - assert args[0].backup == table.Backup(name="name_value") @pytest.mark.asyncio async def test_create_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -3859,7 +3801,7 @@ def test_get_backup( transport: str = "grpc", request_type=bigtable_table_admin.GetBackupRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3875,25 +3817,18 @@ def test_get_backup( size_bytes=1089, state=table.Backup.State.CREATING, ) - response = client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING @@ -3905,7 +3840,7 @@ def test_get_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3913,7 +3848,6 @@ def test_get_backup_empty_call(): client.get_backup() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() @@ -3922,7 +3856,7 @@ async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3940,24 +3874,18 @@ async def test_get_backup_async( state=table.Backup.State.CREATING, ) ) - response = await client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() # Establish that the response is the type that we expect. assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING @@ -3967,17 +3895,19 @@ async def test_get_backup_async_from_dict(): def test_get_backup_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetBackupRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_backup), "__call__") as call: call.return_value = table.Backup() - client.get_backup(request) # Establish that the underlying gRPC stub method was called. @@ -3993,18 +3923,18 @@ def test_get_backup_field_headers(): @pytest.mark.asyncio async def test_get_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetBackupRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_backup), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.get_backup(request) # Establish that the underlying gRPC stub method was called. @@ -4018,13 +3948,14 @@ async def test_get_backup_field_headers_async(): def test_get_backup_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = table.Backup() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_backup(name="name_value",) @@ -4033,12 +3964,13 @@ def test_get_backup_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_get_backup_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -4051,7 +3983,7 @@ def test_get_backup_flattened_error(): @pytest.mark.asyncio async def test_get_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4068,14 +4000,13 @@ async def test_get_backup_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_get_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -4090,7 +4021,7 @@ def test_update_backup( transport: str = "grpc", request_type=bigtable_table_admin.UpdateBackupRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4106,25 +4037,18 @@ def test_update_backup( size_bytes=1089, state=table.Backup.State.CREATING, ) - response = client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING @@ -4136,7 +4060,7 @@ def test_update_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4144,7 +4068,6 @@ def test_update_backup_empty_call(): client.update_backup() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() @@ -4154,7 +4077,7 @@ async def test_update_backup_async( request_type=bigtable_table_admin.UpdateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4172,24 +4095,18 @@ async def test_update_backup_async( state=table.Backup.State.CREATING, ) ) - response = await client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() # Establish that the response is the type that we expect. assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING @@ -4199,17 +4116,19 @@ async def test_update_backup_async_from_dict(): def test_update_backup_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.UpdateBackupRequest() + request.backup.name = "backup.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_backup), "__call__") as call: call.return_value = table.Backup() - client.update_backup(request) # Establish that the underlying gRPC stub method was called. @@ -4225,18 +4144,18 @@ def test_update_backup_field_headers(): @pytest.mark.asyncio async def test_update_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.UpdateBackupRequest() + request.backup.name = "backup.name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_backup), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.update_backup(request) # Establish that the underlying gRPC stub method was called. @@ -4250,32 +4169,33 @@ async def test_update_backup_field_headers_async(): def test_update_backup_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = table.Backup() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.update_backup( backup=table.Backup(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].backup == table.Backup(name="name_value") - - assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) def test_update_backup_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -4283,14 +4203,14 @@ def test_update_backup_flattened_error(): client.update_backup( bigtable_table_admin.UpdateBackupRequest(), backup=table.Backup(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio async def test_update_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4303,23 +4223,21 @@ async def test_update_backup_flattened_async(): # using the keyword arguments to the method. response = await client.update_backup( backup=table.Backup(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].backup == table.Backup(name="name_value") - - assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) @pytest.mark.asyncio async def test_update_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -4328,7 +4246,7 @@ async def test_update_backup_flattened_error_async(): await client.update_backup( bigtable_table_admin.UpdateBackupRequest(), backup=table.Backup(name="name_value"), - update_mask=field_mask.FieldMask(paths=["paths_value"]), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @@ -4336,7 +4254,7 @@ def test_delete_backup( transport: str = "grpc", request_type=bigtable_table_admin.DeleteBackupRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4347,13 +4265,11 @@ def test_delete_backup( with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - response = client.delete_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() # Establish that the response is the type that we expect. @@ -4368,7 +4284,7 @@ def test_delete_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4376,7 +4292,6 @@ def test_delete_backup_empty_call(): client.delete_backup() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() @@ -4386,7 +4301,7 @@ async def test_delete_backup_async( request_type=bigtable_table_admin.DeleteBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4397,13 +4312,11 @@ async def test_delete_backup_async( with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() # Establish that the response is the type that we expect. @@ -4416,17 +4329,19 @@ async def test_delete_backup_async_from_dict(): def test_delete_backup_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteBackupRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: call.return_value = None - client.delete_backup(request) # Establish that the underlying gRPC stub method was called. @@ -4442,18 +4357,18 @@ def test_delete_backup_field_headers(): @pytest.mark.asyncio async def test_delete_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteBackupRequest() + request.name = "name/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_backup(request) # Establish that the underlying gRPC stub method was called. @@ -4467,13 +4382,14 @@ async def test_delete_backup_field_headers_async(): def test_delete_backup_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = None - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.delete_backup(name="name_value",) @@ -4482,12 +4398,13 @@ def test_delete_backup_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" def test_delete_backup_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -4500,7 +4417,7 @@ def test_delete_backup_flattened_error(): @pytest.mark.asyncio async def test_delete_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4517,14 +4434,13 @@ async def test_delete_backup_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" @pytest.mark.asyncio async def test_delete_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -4539,7 +4455,7 @@ def test_list_backups( transport: str = "grpc", request_type=bigtable_table_admin.ListBackupsRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4552,19 +4468,15 @@ def test_list_backups( call.return_value = bigtable_table_admin.ListBackupsResponse( next_page_token="next_page_token_value", ) - response = client.list_backups(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == "next_page_token_value" @@ -4576,7 +4488,7 @@ def test_list_backups_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4584,7 +4496,6 @@ def test_list_backups_empty_call(): client.list_backups() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() @@ -4594,7 +4505,7 @@ async def test_list_backups_async( request_type=bigtable_table_admin.ListBackupsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4609,18 +4520,15 @@ async def test_list_backups_async( next_page_token="next_page_token_value", ) ) - response = await client.list_backups(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListBackupsAsyncPager) - assert response.next_page_token == "next_page_token_value" @@ -4630,17 +4538,19 @@ async def test_list_backups_async_from_dict(): def test_list_backups_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListBackupsRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: call.return_value = bigtable_table_admin.ListBackupsResponse() - client.list_backups(request) # Establish that the underlying gRPC stub method was called. @@ -4656,12 +4566,13 @@ def test_list_backups_field_headers(): @pytest.mark.asyncio async def test_list_backups_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListBackupsRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -4669,7 +4580,6 @@ async def test_list_backups_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( bigtable_table_admin.ListBackupsResponse() ) - await client.list_backups(request) # Establish that the underlying gRPC stub method was called. @@ -4683,13 +4593,14 @@ async def test_list_backups_field_headers_async(): def test_list_backups_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = bigtable_table_admin.ListBackupsResponse() - # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.list_backups(parent="parent_value",) @@ -4698,12 +4609,13 @@ def test_list_backups_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" def test_list_backups_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -4716,7 +4628,7 @@ def test_list_backups_flattened_error(): @pytest.mark.asyncio async def test_list_backups_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4735,14 +4647,13 @@ async def test_list_backups_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" @pytest.mark.asyncio async def test_list_backups_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened @@ -4754,7 +4665,7 @@ async def test_list_backups_flattened_error_async(): def test_list_backups_pager(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: @@ -4790,7 +4701,7 @@ def test_list_backups_pager(): def test_list_backups_pages(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials,) + client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: @@ -4819,7 +4730,7 @@ def test_list_backups_pages(): @pytest.mark.asyncio async def test_list_backups_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4856,7 +4767,7 @@ async def test_list_backups_async_pager(): @pytest.mark.asyncio async def test_list_backups_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4891,7 +4802,7 @@ def test_restore_table( transport: str = "grpc", request_type=bigtable_table_admin.RestoreTableRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4902,13 +4813,11 @@ def test_restore_table( with mock.patch.object(type(client.transport.restore_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.restore_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() # Establish that the response is the type that we expect. @@ -4923,7 +4832,7 @@ def test_restore_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4931,7 +4840,6 @@ def test_restore_table_empty_call(): client.restore_table() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() @@ -4941,7 +4849,7 @@ async def test_restore_table_async( request_type=bigtable_table_admin.RestoreTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4954,13 +4862,11 @@ async def test_restore_table_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.restore_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() # Establish that the response is the type that we expect. @@ -4973,17 +4879,19 @@ async def test_restore_table_async_from_dict(): def test_restore_table_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.RestoreTableRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_table), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.restore_table(request) # Establish that the underlying gRPC stub method was called. @@ -4999,12 +4907,13 @@ def test_restore_table_field_headers(): @pytest.mark.asyncio async def test_restore_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable_table_admin.RestoreTableRequest() + request.parent = "parent/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -5012,7 +4921,6 @@ async def test_restore_table_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.restore_table(request) # Establish that the underlying gRPC stub method was called. @@ -5026,10 +4934,10 @@ async def test_restore_table_field_headers_async(): def test_get_iam_policy( - transport: str = "grpc", request_type=iam_policy.GetIamPolicyRequest + transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5039,22 +4947,17 @@ def test_get_iam_policy( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy(version=774, etag=b"etag_blob",) - + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) response = client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.GetIamPolicyRequest() + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() # Establish that the response is the type that we expect. - - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -5066,7 +4969,7 @@ def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5074,16 +4977,15 @@ def test_get_iam_policy_empty_call(): client.get_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.GetIamPolicyRequest() + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() @pytest.mark.asyncio async def test_get_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy.GetIamPolicyRequest + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5094,22 +4996,18 @@ async def test_get_iam_policy_async( with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy(version=774, etag=b"etag_blob",) ) - response = await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.GetIamPolicyRequest() + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() # Establish that the response is the type that we expect. - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -5119,17 +5017,19 @@ async def test_get_iam_policy_async_from_dict(): def test_get_iam_policy_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -5145,18 +5045,18 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) - + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -5170,29 +5070,31 @@ async def test_get_iam_policy_field_headers_async(): def test_get_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() response = client.get_iam_policy( request={ "resource": "resource_value", - "options": options.GetPolicyOptions(requested_policy_version=2598), + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), } ) call.assert_called() def test_get_iam_policy_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.get_iam_policy(resource="resource_value",) @@ -5201,33 +5103,34 @@ def test_get_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" def test_get_iam_policy_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.get_iam_policy( - iam_policy.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", ) @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() + call.return_value = policy_pb2.Policy() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.get_iam_policy(resource="resource_value",) @@ -5236,29 +5139,28 @@ async def test_get_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.get_iam_policy( - iam_policy.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", ) def test_set_iam_policy( - transport: str = "grpc", request_type=iam_policy.SetIamPolicyRequest + transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5268,22 +5170,17 @@ def test_set_iam_policy( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy(version=774, etag=b"etag_blob",) - + call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) response = client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.SetIamPolicyRequest() + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() # Establish that the response is the type that we expect. - - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -5295,7 +5192,7 @@ def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5303,16 +5200,15 @@ def test_set_iam_policy_empty_call(): client.set_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.SetIamPolicyRequest() + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() @pytest.mark.asyncio async def test_set_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy.SetIamPolicyRequest + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5323,22 +5219,18 @@ async def test_set_iam_policy_async( with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy(version=774, etag=b"etag_blob",) ) - response = await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.SetIamPolicyRequest() + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() # Establish that the response is the type that we expect. - assert isinstance(response, policy.Policy) - + assert isinstance(response, policy_pb2.Policy) assert response.version == 774 - assert response.etag == b"etag_blob" @@ -5348,17 +5240,19 @@ async def test_set_iam_policy_async_from_dict(): def test_set_iam_policy_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -5374,18 +5268,18 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) - + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -5399,29 +5293,31 @@ async def test_set_iam_policy_field_headers_async(): def test_set_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() response = client.set_iam_policy( request={ "resource": "resource_value", - "policy": policy.Policy(version=774), + "policy": policy_pb2.Policy(version=774), } ) call.assert_called() def test_set_iam_policy_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() - + call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.set_iam_policy(resource="resource_value",) @@ -5430,33 +5326,34 @@ def test_set_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" def test_set_iam_policy_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.set_iam_policy( - iam_policy.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", ) @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy.Policy() + call.return_value = policy_pb2.Policy() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.set_iam_policy(resource="resource_value",) @@ -5465,29 +5362,28 @@ async def test_set_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.set_iam_policy( - iam_policy.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", ) def test_test_iam_permissions( - transport: str = "grpc", request_type=iam_policy.TestIamPermissionsRequest + transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest ): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5499,22 +5395,18 @@ def test_test_iam_permissions( type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse( + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( permissions=["permissions_value"], ) - response = client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.TestIamPermissionsRequest() + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() # Establish that the response is the type that we expect. - - assert isinstance(response, iam_policy.TestIamPermissionsResponse) - + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) assert response.permissions == ["permissions_value"] @@ -5526,7 +5418,7 @@ def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5536,16 +5428,16 @@ def test_test_iam_permissions_empty_call(): client.test_iam_permissions() call.assert_called() _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.TestIamPermissionsRequest() + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() @pytest.mark.asyncio async def test_test_iam_permissions_async( - transport: str = "grpc_asyncio", request_type=iam_policy.TestIamPermissionsRequest + transport: str = "grpc_asyncio", + request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5558,20 +5450,19 @@ async def test_test_iam_permissions_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy.TestIamPermissionsResponse(permissions=["permissions_value"],) + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) ) - response = await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - - assert args[0] == iam_policy.TestIamPermissionsRequest() + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy.TestIamPermissionsResponse) - + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) assert response.permissions == ["permissions_value"] @@ -5581,19 +5472,21 @@ async def test_test_iam_permissions_async_from_dict(): def test_test_iam_permissions_field_headers(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.TestIamPermissionsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: - call.return_value = iam_policy.TestIamPermissionsResponse() - + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. @@ -5609,12 +5502,13 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy.TestIamPermissionsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" # Mock the actual call within the gRPC stub, and fake the request. @@ -5622,9 +5516,8 @@ async def test_test_iam_permissions_field_headers_async(): type(client.transport.test_iam_permissions), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy.TestIamPermissionsResponse() + iam_policy_pb2.TestIamPermissionsResponse() ) - await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. @@ -5638,14 +5531,15 @@ async def test_test_iam_permissions_field_headers_async(): def test_test_iam_permissions_from_dict_foreign(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse() - + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() response = client.test_iam_permissions( request={ "resource": "resource_value", @@ -5656,15 +5550,16 @@ def test_test_iam_permissions_from_dict_foreign(): def test_test_iam_permissions_flattened(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse() - + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.test_iam_permissions( @@ -5675,20 +5570,20 @@ def test_test_iam_permissions_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] def test_test_iam_permissions_flattened_error(): - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): client.test_iam_permissions( - iam_policy.TestIamPermissionsRequest(), + iam_policy_pb2.TestIamPermissionsRequest(), resource="resource_value", permissions=["permissions_value"], ) @@ -5697,7 +5592,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5705,10 +5600,10 @@ async def test_test_iam_permissions_flattened_async(): type(client.transport.test_iam_permissions), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy.TestIamPermissionsResponse() + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy.TestIamPermissionsResponse() + iam_policy_pb2.TestIamPermissionsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. @@ -5720,23 +5615,21 @@ async def test_test_iam_permissions_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): await client.test_iam_permissions( - iam_policy.TestIamPermissionsRequest(), + iam_policy_pb2.TestIamPermissionsRequest(), resource="resource_value", permissions=["permissions_value"], ) @@ -5745,16 +5638,16 @@ async def test_test_iam_permissions_flattened_error_async(): def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableTableAdminClient( @@ -5764,7 +5657,7 @@ def test_credentials_transport_error(): # It is an error to provide scopes and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): client = BigtableTableAdminClient( @@ -5775,7 +5668,7 @@ def test_credentials_transport_error(): def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) client = BigtableTableAdminClient(transport=transport) assert client.transport is transport @@ -5784,13 +5677,13 @@ def test_transport_instance(): def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) channel = transport.grpc_channel assert channel @@ -5805,23 +5698,25 @@ def test_transport_get_channel(): ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport_class() adc.assert_called_once() def test_transport_grpc_default(): # A client should use the gRPC transport by default. - client = BigtableTableAdminClient(credentials=credentials.AnonymousCredentials(),) + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) assert isinstance(client.transport, transports.BigtableTableAdminGrpcTransport,) def test_bigtable_table_admin_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(exceptions.DuplicateCredentialArgs): + with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.BigtableTableAdminTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), credentials_file="credentials.json", ) @@ -5833,7 +5728,7 @@ def test_bigtable_table_admin_base_transport(): ) as Transport: Transport.return_value = None transport = transports.BigtableTableAdminTransport( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), ) # Every method on the transport should just blindly @@ -5872,15 +5767,44 @@ def test_bigtable_table_admin_base_transport(): transport.operations_client +@requires_google_auth_gte_1_25_0 def test_bigtable_table_admin_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( - auth, "load_credentials_from_file" + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTableAdminTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_bigtable_table_admin_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True ) as load_creds, mock.patch( "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - load_creds.return_value = (credentials.AnonymousCredentials(), None) + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableTableAdminTransport( credentials_file="credentials.json", quota_project_id="octopus", ) @@ -5900,19 +5824,40 @@ def test_bigtable_table_admin_base_transport_with_credentials_file(): def test_bigtable_table_admin_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(auth, "default") as adc, mock.patch( + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - adc.return_value = (credentials.AnonymousCredentials(), None) + adc.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableTableAdminTransport() adc.assert_called_once() +@requires_google_auth_gte_1_25_0 def test_bigtable_table_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BigtableTableAdminClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_bigtable_table_admin_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) BigtableTableAdminClient() adc.assert_called_once_with( scopes=( @@ -5927,14 +5872,48 @@ def test_bigtable_table_admin_auth_adc(): ) -def test_bigtable_table_admin_transport_auth_adc(): +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_bigtable_table_admin_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. - with mock.patch.object(auth, "default") as adc: - adc.return_value = (credentials.AnonymousCredentials(), None) - transports.BigtableTableAdminGrpcTransport( - host="squid.clam.whelk", quota_project_id="octopus" + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_bigtable_table_admin_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") adc.assert_called_once_with( scopes=( "https://www.googleapis.com/auth/bigtable.admin", @@ -5948,6 +5927,129 @@ def test_bigtable_table_admin_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableTableAdminGrpcTransport, grpc_helpers), + (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_gte_1_26_0 +def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + scopes=["1", "2"], + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableTableAdminGrpcTransport, grpc_helpers), + (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_lt_1_26_0 +def test_bigtable_table_admin_transport_create_channel_old_api_core( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus") + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableTableAdminGrpcTransport, grpc_helpers), + (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +@requires_api_core_lt_1_26_0 +def test_bigtable_table_admin_transport_create_channel_user_scopes( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + scopes=["1", "2"], + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + @pytest.mark.parametrize( "transport_class", [ @@ -5958,7 +6060,7 @@ def test_bigtable_table_admin_transport_auth_adc(): def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( transport_class, ): - cred = credentials.AnonymousCredentials() + cred = ga_credentials.AnonymousCredentials() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: @@ -6004,7 +6106,7 @@ def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( def test_bigtable_table_admin_host_no_port(): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" ), @@ -6014,7 +6116,7 @@ def test_bigtable_table_admin_host_no_port(): def test_bigtable_table_admin_host_with_port(): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), + credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" ), @@ -6070,9 +6172,9 @@ def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel - cred = credentials.AnonymousCredentials() + cred = ga_credentials.AnonymousCredentials() with pytest.warns(DeprecationWarning): - with mock.patch.object(auth, "default") as adc: + with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) transport = transport_class( host="squid.clam.whelk", @@ -6162,7 +6264,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): def test_bigtable_table_admin_grpc_lro_client(): client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) transport = client.transport @@ -6175,7 +6277,7 @@ def test_bigtable_table_admin_grpc_lro_client(): def test_bigtable_table_admin_grpc_lro_async_client(): client = BigtableTableAdminAsyncClient( - credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) transport = client.transport @@ -6191,7 +6293,6 @@ def test_backup_path(): instance = "clam" cluster = "whelk" backup = "octopus" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( project=project, instance=instance, cluster=cluster, backup=backup, ) @@ -6217,7 +6318,6 @@ def test_cluster_path(): project = "winkle" instance = "nautilus" cluster = "scallop" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( project=project, instance=instance, cluster=cluster, ) @@ -6244,7 +6344,6 @@ def test_crypto_key_version_path(): key_ring = "oyster" crypto_key = "nudibranch" crypto_key_version = "cuttlefish" - expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( project=project, location=location, @@ -6276,7 +6375,6 @@ def test_parse_crypto_key_version_path(): def test_instance_path(): project = "squid" instance = "clam" - expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, ) @@ -6301,7 +6399,6 @@ def test_snapshot_path(): instance = "nudibranch" cluster = "cuttlefish" snapshot = "mussel" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( project=project, instance=instance, cluster=cluster, snapshot=snapshot, ) @@ -6329,7 +6426,6 @@ def test_table_path(): project = "squid" instance = "clam" table = "whelk" - expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, table=table, ) @@ -6352,7 +6448,6 @@ def test_parse_table_path(): def test_common_billing_account_path(): billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6373,7 +6468,6 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): folder = "winkle" - expected = "folders/{folder}".format(folder=folder,) actual = BigtableTableAdminClient.common_folder_path(folder) assert expected == actual @@ -6392,7 +6486,6 @@ def test_parse_common_folder_path(): def test_common_organization_path(): organization = "scallop" - expected = "organizations/{organization}".format(organization=organization,) actual = BigtableTableAdminClient.common_organization_path(organization) assert expected == actual @@ -6411,7 +6504,6 @@ def test_parse_common_organization_path(): def test_common_project_path(): project = "squid" - expected = "projects/{project}".format(project=project,) actual = BigtableTableAdminClient.common_project_path(project) assert expected == actual @@ -6431,7 +6523,6 @@ def test_parse_common_project_path(): def test_common_location_path(): project = "whelk" location = "octopus" - expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -6458,7 +6549,7 @@ def test_client_withDEFAULT_CLIENT_INFO(): transports.BigtableTableAdminTransport, "_prep_wrapped_messages" ) as prep: client = BigtableTableAdminClient( - credentials=credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -6467,6 +6558,6 @@ def test_client_withDEFAULT_CLIENT_INFO(): ) as prep: transport_class = BigtableTableAdminClient.get_transport_class() transport = transport_class( - credentials=credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) From f72a361de7b83afa885602377826b44c84b388b5 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 9 Jun 2021 17:41:53 +0200 Subject: [PATCH 454/892] chore(deps): update dependency apache-beam to v2.30.0 (#321) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 78398e1f6a9a..f0ccb84d7e7c 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.29.0 +apache-beam==2.30.0 google-cloud-bigtable<2.0.0 google-cloud-core==1.6.0 \ No newline at end of file From 835ec33f0363a794bb4caa61ffd9c21756addaf0 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Wed, 16 Jun 2021 11:08:02 -0400 Subject: [PATCH 455/892] fix(deps): add packaging requirement (#326) --- packages/google-cloud-bigtable/setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b37a44e490f3..21a0126f35b4 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -34,6 +34,7 @@ "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", "proto-plus >= 1.13.0", "libcst >= 0.2.5", + "packaging >= 14.3", ] extras = {} From a2284e6416da8c5a6cbe55d42848f8dd7681ef09 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 17 Jun 2021 10:52:11 +0000 Subject: [PATCH 456/892] chore: new owl bot post processor docker image (#327) Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:58c7342b0bccf85028100adaa3d856cb4a871c22ca9c01960d996e66c40548ce --- .../google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/docs/conf.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index da616c91a3b6..ea06d395ea2b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:c66ba3c8d7bc8566f47df841f98cd0097b28fff0b1864c86f5817f4c8c3e8600 + digest: sha256:58c7342b0bccf85028100adaa3d856cb4a871c22ca9c01960d996e66c40548ce diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index ebc5c9904993..4e05a219b43f 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -80,9 +80,9 @@ master_doc = "index" # General information about the project. -project = u"google-cloud-bigtable" -copyright = u"2019, Google" -author = u"Google APIs" +project = "google-cloud-bigtable" +copyright = "2019, Google" +author = "Google APIs" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -281,7 +281,7 @@ ( master_doc, "google-cloud-bigtable.tex", - u"google-cloud-bigtable Documentation", + "google-cloud-bigtable Documentation", author, "manual", ) @@ -316,7 +316,7 @@ ( master_doc, "google-cloud-bigtable", - u"google-cloud-bigtable Documentation", + "google-cloud-bigtable Documentation", [author], 1, ) @@ -335,7 +335,7 @@ ( master_doc, "google-cloud-bigtable", - u"google-cloud-bigtable Documentation", + "google-cloud-bigtable Documentation", author, "google-cloud-bigtable", "google-cloud-bigtable Library", From b008bf15a3d4d3b7e5599691f8f063ed8b5008c1 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 18 Jun 2021 20:34:58 +0200 Subject: [PATCH 457/892] chore(deps): update dependency google-cloud-monitoring to v2.3.0 (#328) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index f9c39c01ff58..b4c87f2cb809 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.0.0 -google-cloud-monitoring==2.2.1 +google-cloud-monitoring==2.3.0 From e0b5d12398daf2226b097df0b8939499a0b18f33 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 19 Jun 2021 01:36:07 +0000 Subject: [PATCH 458/892] docs: omit mention of Python 2.7 in 'CONTRIBUTING.rst' (#1127) (#329) Closes #1126 Source-Link: https://github.com/googleapis/synthtool/commit/b91f129527853d5b756146a0b5044481fb4e09a8 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:b6169fc6a5207b11800a7c002d0c5c2bc6d82697185ca12e666f44031468cfcd --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/CONTRIBUTING.rst | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index ea06d395ea2b..cc49c6a3dfac 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:58c7342b0bccf85028100adaa3d856cb4a871c22ca9c01960d996e66c40548ce + digest: sha256:b6169fc6a5207b11800a7c002d0c5c2bc6d82697185ca12e666f44031468cfcd diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 5437a1b5bb2f..999ecd5db0d8 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -69,7 +69,6 @@ We use `nox `__ to instrument our tests. - To test your changes, run unit tests with ``nox``:: - $ nox -s unit-2.7 $ nox -s unit-3.8 $ ... @@ -144,7 +143,6 @@ Running System Tests # Run all system tests $ nox -s system-3.8 - $ nox -s system-2.7 # Run a single system test $ nox -s system-3.8 -- -k @@ -152,9 +150,8 @@ Running System Tests .. note:: - System tests are only configured to run under Python 2.7 and - Python 3.8. For expediency, we do not run them in older versions - of Python 3. + System tests are only configured to run under Python 3.8. + For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local auth settings and change some configuration in your project to From 293295f81781f0b80db812c5918abf1535405fff Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sun, 20 Jun 2021 14:04:02 +0000 Subject: [PATCH 459/892] chore: update precommit hook pre-commit/pre-commit-hooks to v4 (#1083) (#330) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![WhiteSource Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [pre-commit/pre-commit-hooks](https://togithub.com/pre-commit/pre-commit-hooks) | repository | major | `v3.4.0` -> `v4.0.1` | --- ### Release Notes
pre-commit/pre-commit-hooks ### [`v4.0.1`](https://togithub.com/pre-commit/pre-commit-hooks/releases/v4.0.1) [Compare Source](https://togithub.com/pre-commit/pre-commit-hooks/compare/v4.0.0...v4.0.1) ##### Fixes - `check-shebang-scripts-are-executable` fix entry point. - [#​602](https://togithub.com/pre-commit/pre-commit-hooks/issues/602) issue by [@​Person-93](https://togithub.com/Person-93). - [#​603](https://togithub.com/pre-commit/pre-commit-hooks/issues/603) PR by [@​scop](https://togithub.com/scop). ### [`v4.0.0`](https://togithub.com/pre-commit/pre-commit-hooks/releases/v4.0.0) [Compare Source](https://togithub.com/pre-commit/pre-commit-hooks/compare/v3.4.0...v4.0.0) ##### Features - `check-json`: report duplicate keys. - [#​558](https://togithub.com/pre-commit/pre-commit-hooks/issues/558) PR by [@​AdityaKhursale](https://togithub.com/AdityaKhursale). - [#​554](https://togithub.com/pre-commit/pre-commit-hooks/issues/554) issue by [@​adamchainz](https://togithub.com/adamchainz). - `no-commit-to-branch`: add `main` to default blocked branches. - [#​565](https://togithub.com/pre-commit/pre-commit-hooks/issues/565) PR by [@​ndevenish](https://togithub.com/ndevenish). - `check-case-conflict`: check conflicts in directory names as well. - [#​575](https://togithub.com/pre-commit/pre-commit-hooks/issues/575) PR by [@​slsyy](https://togithub.com/slsyy). - [#​70](https://togithub.com/pre-commit/pre-commit-hooks/issues/70) issue by [@​andyjack](https://togithub.com/andyjack). - `check-vcs-permalinks`: forbid other branch names. - [#​582](https://togithub.com/pre-commit/pre-commit-hooks/issues/582) PR by [@​jack1142](https://togithub.com/jack1142). - [#​581](https://togithub.com/pre-commit/pre-commit-hooks/issues/581) issue by [@​jack1142](https://togithub.com/jack1142). - `check-shebang-scripts-are-executable`: new hook which ensures shebang'd scripts are executable. - [#​545](https://togithub.com/pre-commit/pre-commit-hooks/issues/545) PR by [@​scop](https://togithub.com/scop). ##### Fixes - `check-executables-have-shebangs`: Short circuit shebang lookup on windows. - [#​544](https://togithub.com/pre-commit/pre-commit-hooks/issues/544) PR by [@​scop](https://togithub.com/scop). - `requirements-txt-fixer`: Fix comments which have indentation - [#​549](https://togithub.com/pre-commit/pre-commit-hooks/issues/549) PR by [@​greshilov](https://togithub.com/greshilov). - [#​548](https://togithub.com/pre-commit/pre-commit-hooks/issues/548) issue by [@​greshilov](https://togithub.com/greshilov). - `pretty-format-json`: write to stdout using UTF-8 encoding. - [#​571](https://togithub.com/pre-commit/pre-commit-hooks/issues/571) PR by [@​jack1142](https://togithub.com/jack1142). - [#​570](https://togithub.com/pre-commit/pre-commit-hooks/issues/570) issue by [@​jack1142](https://togithub.com/jack1142). - Use more inclusive language. - [#​599](https://togithub.com/pre-commit/pre-commit-hooks/issues/599) PR by [@​asottile](https://togithub.com/asottile). ##### Breaking changes - Remove deprecated hooks: `flake8`, `pyflakes`, `autopep8-wrapper`. - [#​597](https://togithub.com/pre-commit/pre-commit-hooks/issues/597) PR by [@​asottile](https://togithub.com/asottile).
--- ### Configuration 📅 **Schedule**: At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻️ **Rebasing**: Renovate will not automatically rebase this PR, because other commits have been found. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box. --- This PR has been generated by [WhiteSource Renovate](https://renovate.whitesourcesoftware.com). View repository job log [here](https://app.renovatebot.com/dashboard#github/googleapis/synthtool). Source-Link: https://github.com/googleapis/synthtool/commit/333fd90856f1454380514bc59fc0936cdaf1c202 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:b8c131c558606d3cea6e18f8e87befbd448c1482319b0db3c5d5388fa6ea72e3 --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.pre-commit-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index cc49c6a3dfac..9602d540595e 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:b6169fc6a5207b11800a7c002d0c5c2bc6d82697185ca12e666f44031468cfcd + digest: sha256:b8c131c558606d3cea6e18f8e87befbd448c1482319b0db3c5d5388fa6ea72e3 diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 4f00c7cffcfd..62eb5a77d9a3 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -16,7 +16,7 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v3.4.0 + rev: v4.0.1 hooks: - id: trailing-whitespace - id: end-of-file-fixer From b19a3539f60897801ad6f86aadaaeb962ab507ae Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 22 Jun 2021 19:20:07 +0000 Subject: [PATCH 460/892] chore: add kokoro 3.9 config templates (#332) Source-Link: https://github.com/googleapis/synthtool/commit/b0eb8a8b30b46a3c98d23c23107acb748c6601a1 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:df50e8d462f86d6bcb42f27ecad55bb12c404f1c65de9c6fe4c4d25120080bd6 --- .../.github/.OwlBot.lock.yaml | 2 +- .../.kokoro/samples/python3.9/common.cfg | 40 +++++++++++++++++++ .../.kokoro/samples/python3.9/continuous.cfg | 6 +++ .../samples/python3.9/periodic-head.cfg | 11 +++++ .../.kokoro/samples/python3.9/periodic.cfg | 6 +++ .../.kokoro/samples/python3.9/presubmit.cfg | 6 +++ 6 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 9602d540595e..0954585f2833 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:b8c131c558606d3cea6e18f8e87befbd448c1482319b0db3c5d5388fa6ea72e3 + digest: sha256:df50e8d462f86d6bcb42f27ecad55bb12c404f1c65de9c6fe4c4d25120080bd6 diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg new file mode 100644 index 000000000000..5bc5fa834e81 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.9" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py39" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg new file mode 100644 index 000000000000..f9cfcd33e058 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg new file mode 100644 index 000000000000..50fec9649732 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file From 062200bf00fd00a5a32d161f79b3ec56ccb194c9 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 23 Jun 2021 20:24:33 +0000 Subject: [PATCH 461/892] feat: add always_use_jwt_access (#333) ... chore: update gapic-generator-ruby to the latest commit chore: release gapic-generator-typescript 1.5.0 Committer: @miraleung PiperOrigin-RevId: 380641501 Source-Link: https://github.com/googleapis/googleapis/commit/076f7e9f0b258bdb54338895d7251b202e8f0de3 Source-Link: https://github.com/googleapis/googleapis-gen/commit/27e4c88b4048e5f56508d4e1aa417d60a3380892 --- .../transports/base.py | 40 +++---- .../transports/grpc.py | 7 +- .../transports/grpc_asyncio.py | 7 +- .../bigtable_table_admin/transports/base.py | 40 +++---- .../bigtable_table_admin/transports/grpc.py | 7 +- .../transports/grpc_asyncio.py | 7 +- .../services/bigtable/transports/base.py | 40 +++---- .../services/bigtable/transports/grpc.py | 7 +- .../bigtable/transports/grpc_asyncio.py | 7 +- .../test_bigtable_instance_admin.py | 112 +++--------------- .../test_bigtable_table_admin.py | 111 +++-------------- .../unit/gapic/bigtable_v2/test_bigtable.py | 103 ++-------------- 12 files changed, 109 insertions(+), 379 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 689dbc5f7b0c..f8a292263fd8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -25,6 +25,7 @@ from google.api_core import retry as retries # type: ignore from google.api_core import operations_v1 # type: ignore from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -51,8 +52,6 @@ except pkg_resources.DistributionNotFound: # pragma: NO COVER _GOOGLE_AUTH_VERSION = None -_API_CORE_VERSION = google.api_core.__version__ - class BigtableInstanceAdminTransport(abc.ABC): """Abstract transport class for BigtableInstanceAdmin.""" @@ -78,6 +77,7 @@ def __init__( scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, **kwargs, ) -> None: """Instantiate the transport. @@ -101,6 +101,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: @@ -129,13 +131,20 @@ def __init__( **scopes_kwargs, quota_project_id=quota_project_id ) + # If the credentials is service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + # Save the credentials. self._credentials = credentials - # TODO(busunkim): These two class methods are in the base transport + # TODO(busunkim): This method is in the base transport # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-api-core - # and google-auth are increased. + # should be deleted once the minimum required versions of google-auth is increased. # TODO: Remove this function once google-auth >= 1.25.0 is required @classmethod @@ -156,27 +165,6 @@ def _get_scopes_kwargs( return scopes_kwargs - # TODO: Remove this function once google-api-core >= 1.26.0 is required - @classmethod - def _get_self_signed_jwt_kwargs( - cls, host: str, scopes: Optional[Sequence[str]] - ) -> Dict[str, Union[Optional[Sequence[str]], str]]: - """Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version""" - - self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {} - - if _API_CORE_VERSION and ( - packaging.version.parse(_API_CORE_VERSION) - >= packaging.version.parse("1.26.0") - ): - self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES - self_signed_jwt_kwargs["scopes"] = scopes - self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST - else: - self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES - - return self_signed_jwt_kwargs - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 3d3a4144e98e..85cc215e5140 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -159,6 +159,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, + always_use_jwt_access=True, ) if not self._grpc_channel: @@ -214,14 +215,14 @@ def create_channel( and ``credentials_file`` are passed. """ - self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) - return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, - **self_signed_jwt_kwargs, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, **kwargs, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 7dbec88061f5..8294da096ec7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -87,14 +87,14 @@ def create_channel( aio.Channel: A gRPC AsyncIO channel object. """ - self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) - return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, - **self_signed_jwt_kwargs, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, **kwargs, ) @@ -205,6 +205,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, + always_use_jwt_access=True, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index c61021e0676a..5c286d9f908f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -25,6 +25,7 @@ from google.api_core import retry as retries # type: ignore from google.api_core import operations_v1 # type: ignore from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -52,8 +53,6 @@ except pkg_resources.DistributionNotFound: # pragma: NO COVER _GOOGLE_AUTH_VERSION = None -_API_CORE_VERSION = google.api_core.__version__ - class BigtableTableAdminTransport(abc.ABC): """Abstract transport class for BigtableTableAdmin.""" @@ -78,6 +77,7 @@ def __init__( scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, **kwargs, ) -> None: """Instantiate the transport. @@ -101,6 +101,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: @@ -129,13 +131,20 @@ def __init__( **scopes_kwargs, quota_project_id=quota_project_id ) + # If the credentials is service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + # Save the credentials. self._credentials = credentials - # TODO(busunkim): These two class methods are in the base transport + # TODO(busunkim): This method is in the base transport # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-api-core - # and google-auth are increased. + # should be deleted once the minimum required versions of google-auth is increased. # TODO: Remove this function once google-auth >= 1.25.0 is required @classmethod @@ -156,27 +165,6 @@ def _get_scopes_kwargs( return scopes_kwargs - # TODO: Remove this function once google-api-core >= 1.26.0 is required - @classmethod - def _get_self_signed_jwt_kwargs( - cls, host: str, scopes: Optional[Sequence[str]] - ) -> Dict[str, Union[Optional[Sequence[str]], str]]: - """Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version""" - - self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {} - - if _API_CORE_VERSION and ( - packaging.version.parse(_API_CORE_VERSION) - >= packaging.version.parse("1.26.0") - ): - self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES - self_signed_jwt_kwargs["scopes"] = scopes - self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST - else: - self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES - - return self_signed_jwt_kwargs - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 337bdcb1caff..240a798cd280 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -161,6 +161,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, + always_use_jwt_access=True, ) if not self._grpc_channel: @@ -216,14 +217,14 @@ def create_channel( and ``credentials_file`` are passed. """ - self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) - return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, - **self_signed_jwt_kwargs, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, **kwargs, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 5358404cfef4..a4994a71287d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -89,14 +89,14 @@ def create_channel( aio.Channel: A gRPC AsyncIO channel object. """ - self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) - return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, - **self_signed_jwt_kwargs, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, **kwargs, ) @@ -207,6 +207,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, + always_use_jwt_access=True, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index da60f8f83adb..6f5029fe91fe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -24,6 +24,7 @@ from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore from google.cloud.bigtable_v2.types import bigtable @@ -43,8 +44,6 @@ except pkg_resources.DistributionNotFound: # pragma: NO COVER _GOOGLE_AUTH_VERSION = None -_API_CORE_VERSION = google.api_core.__version__ - class BigtableTransport(abc.ABC): """Abstract transport class for Bigtable.""" @@ -69,6 +68,7 @@ def __init__( scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, **kwargs, ) -> None: """Instantiate the transport. @@ -92,6 +92,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: @@ -120,13 +122,20 @@ def __init__( **scopes_kwargs, quota_project_id=quota_project_id ) + # If the credentials is service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + # Save the credentials. self._credentials = credentials - # TODO(busunkim): These two class methods are in the base transport + # TODO(busunkim): This method is in the base transport # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-api-core - # and google-auth are increased. + # should be deleted once the minimum required versions of google-auth is increased. # TODO: Remove this function once google-auth >= 1.25.0 is required @classmethod @@ -147,27 +156,6 @@ def _get_scopes_kwargs( return scopes_kwargs - # TODO: Remove this function once google-api-core >= 1.26.0 is required - @classmethod - def _get_self_signed_jwt_kwargs( - cls, host: str, scopes: Optional[Sequence[str]] - ) -> Dict[str, Union[Optional[Sequence[str]], str]]: - """Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version""" - - self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {} - - if _API_CORE_VERSION and ( - packaging.version.parse(_API_CORE_VERSION) - >= packaging.version.parse("1.26.0") - ): - self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES - self_signed_jwt_kwargs["scopes"] = scopes - self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST - else: - self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES - - return self_signed_jwt_kwargs - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 12d0a50d1a72..21a048e35b5a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -150,6 +150,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, + always_use_jwt_access=True, ) if not self._grpc_channel: @@ -205,14 +206,14 @@ def create_channel( and ``credentials_file`` are passed. """ - self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) - return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, - **self_signed_jwt_kwargs, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, **kwargs, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index e0da77a75bc5..ab448a6b640c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -79,14 +79,14 @@ def create_channel( aio.Channel: A gRPC AsyncIO channel object. """ - self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes) - return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, - **self_signed_jwt_kwargs, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, **kwargs, ) @@ -196,6 +196,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, + always_use_jwt_access=True, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 046742719f78..196c1e22317e 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -42,9 +42,6 @@ ) from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import ( - _API_CORE_VERSION, -) from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import ( _GOOGLE_AUTH_VERSION, ) @@ -62,8 +59,9 @@ import google.auth -# TODO(busunkim): Once google-api-core >= 1.26.0 is required: -# - Delete all the api-core and auth "less than" test cases +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases # - Delete these pytest markers (Make the "greater than or equal to" tests the default). requires_google_auth_lt_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), @@ -74,16 +72,6 @@ reason="This test requires google-auth >= 1.25.0", ) -requires_api_core_lt_1_26_0 = pytest.mark.skipif( - packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"), - reason="This test requires google-api-core < 1.26.0", -) - -requires_api_core_gte_1_26_0 = pytest.mark.skipif( - packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"), - reason="This test requires google-api-core >= 1.26.0", -) - def client_cert_source_callback(): return b"cert bytes", b"key bytes" @@ -147,6 +135,18 @@ def test_bigtable_instance_admin_client_from_service_account_info(client_class): assert client.transport._host == "bigtableadmin.googleapis.com:443" +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] +) +def test_bigtable_instance_admin_client_service_account_always_use_jwt(client_class): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_called_with(True) + + @pytest.mark.parametrize( "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] ) @@ -5088,7 +5088,6 @@ def test_bigtable_instance_admin_transport_auth_adc_old_google_auth(transport_cl (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async), ], ) -@requires_api_core_gte_1_26_0 def test_bigtable_instance_admin_transport_create_channel( transport_class, grpc_helpers ): @@ -5127,87 +5126,6 @@ def test_bigtable_instance_admin_transport_create_channel( ) -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), - (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -@requires_api_core_lt_1_26_0 -def test_bigtable_instance_admin_transport_create_channel_old_api_core( - transport_class, grpc_helpers -): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class(quota_project_id="octopus") - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), - (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -@requires_api_core_lt_1_26_0 -def test_bigtable_instance_admin_transport_create_channel_user_scopes( - transport_class, grpc_helpers -): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - scopes=["1", "2"], - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - @pytest.mark.parametrize( "transport_class", [ diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 5b5918a7d624..c09713ec29a6 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -42,9 +42,6 @@ ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( - _API_CORE_VERSION, -) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( _GOOGLE_AUTH_VERSION, ) @@ -65,8 +62,9 @@ import google.auth -# TODO(busunkim): Once google-api-core >= 1.26.0 is required: -# - Delete all the api-core and auth "less than" test cases +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases # - Delete these pytest markers (Make the "greater than or equal to" tests the default). requires_google_auth_lt_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), @@ -77,16 +75,6 @@ reason="This test requires google-auth >= 1.25.0", ) -requires_api_core_lt_1_26_0 = pytest.mark.skipif( - packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"), - reason="This test requires google-api-core < 1.26.0", -) - -requires_api_core_gte_1_26_0 = pytest.mark.skipif( - packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"), - reason="This test requires google-api-core >= 1.26.0", -) - def client_cert_source_callback(): return b"cert bytes", b"key bytes" @@ -150,6 +138,18 @@ def test_bigtable_table_admin_client_from_service_account_info(client_class): assert client.transport._host == "bigtableadmin.googleapis.com:443" +@pytest.mark.parametrize( + "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] +) +def test_bigtable_table_admin_client_service_account_always_use_jwt(client_class): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_called_with(True) + + @pytest.mark.parametrize( "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] ) @@ -5934,7 +5934,6 @@ def test_bigtable_table_admin_transport_auth_adc_old_google_auth(transport_class (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async), ], ) -@requires_api_core_gte_1_26_0 def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. @@ -5970,86 +5969,6 @@ def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_hel ) -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableTableAdminGrpcTransport, grpc_helpers), - (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -@requires_api_core_lt_1_26_0 -def test_bigtable_table_admin_transport_create_channel_old_api_core( - transport_class, grpc_helpers -): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class(quota_project_id="octopus") - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableTableAdminGrpcTransport, grpc_helpers), - (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -@requires_api_core_lt_1_26_0 -def test_bigtable_table_admin_transport_create_channel_user_scopes( - transport_class, grpc_helpers -): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - scopes=["1", "2"], - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - @pytest.mark.parametrize( "transport_class", [ diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 8fd55715c7e8..ab9763a79eef 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -34,7 +34,6 @@ from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient from google.cloud.bigtable_v2.services.bigtable import BigtableClient from google.cloud.bigtable_v2.services.bigtable import transports -from google.cloud.bigtable_v2.services.bigtable.transports.base import _API_CORE_VERSION from google.cloud.bigtable_v2.services.bigtable.transports.base import ( _GOOGLE_AUTH_VERSION, ) @@ -44,8 +43,9 @@ import google.auth -# TODO(busunkim): Once google-api-core >= 1.26.0 is required: -# - Delete all the api-core and auth "less than" test cases +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases # - Delete these pytest markers (Make the "greater than or equal to" tests the default). requires_google_auth_lt_1_25_0 = pytest.mark.skipif( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), @@ -56,16 +56,6 @@ reason="This test requires google-auth >= 1.25.0", ) -requires_api_core_lt_1_26_0 = pytest.mark.skipif( - packaging.version.parse(_API_CORE_VERSION) >= packaging.version.parse("1.26.0"), - reason="This test requires google-api-core < 1.26.0", -) - -requires_api_core_gte_1_26_0 = pytest.mark.skipif( - packaging.version.parse(_API_CORE_VERSION) < packaging.version.parse("1.26.0"), - reason="This test requires google-api-core >= 1.26.0", -) - def client_cert_source_callback(): return b"cert bytes", b"key bytes" @@ -121,6 +111,16 @@ def test_bigtable_client_from_service_account_info(client_class): assert client.transport._host == "bigtable.googleapis.com:443" +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) +def test_bigtable_client_service_account_always_use_jwt(client_class): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + client = client_class(credentials=creds) + use_jwt.assert_called_with(True) + + @pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) def test_bigtable_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() @@ -2159,7 +2159,6 @@ def test_bigtable_transport_auth_adc_old_google_auth(transport_class): (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async), ], ) -@requires_api_core_gte_1_26_0 def test_bigtable_transport_create_channel(transport_class, grpc_helpers): # If credentials and host are not provided, the transport class should use # ADC credentials. @@ -2195,82 +2194,6 @@ def test_bigtable_transport_create_channel(transport_class, grpc_helpers): ) -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableGrpcTransport, grpc_helpers), - (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -@requires_api_core_lt_1_26_0 -def test_bigtable_transport_create_channel_old_api_core(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class(quota_project_id="octopus") - - create_channel.assert_called_with( - "bigtable.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableGrpcTransport, grpc_helpers), - (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async), - ], -) -@requires_api_core_lt_1_26_0 -def test_bigtable_transport_create_channel_user_scopes(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - - create_channel.assert_called_with( - "bigtable.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - scopes=["1", "2"], - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - @pytest.mark.parametrize( "transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], From 525813e8a927a2a532abdbc5db613515c62afdef Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 24 Jun 2021 17:13:38 -0400 Subject: [PATCH 462/892] tests: remove unused fixtures (#336) Toward #335. --- .../tests/unit/test_app_profile.py | 23 ---------------- .../tests/unit/test_cluster.py | 24 ----------------- .../tests/unit/test_row_data.py | 27 ------------------- .../tests/unit/test_table.py | 6 ----- 4 files changed, 80 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py index d0a08c5e12b1..6422e87e9419 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -20,29 +20,6 @@ from ._testing import _make_credentials -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - return self.channel_stub.responses.pop() - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - class TestAppProfile(unittest.TestCase): PROJECT = "project" diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 49a32ea56919..1194e53c9cca 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -21,30 +21,6 @@ from ._testing import _make_credentials -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - return self.channel_stub.responses.pop() - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - class TestCluster(unittest.TestCase): PROJECT = "project" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 21c0a582b4cc..d855ffeee9f6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -22,33 +22,6 @@ from google.cloud.bigtable_v2.types import data as data_v2_pb2 -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - return self.channel_stub.responses.pop() - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - def unary_stream(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - class TestCell(unittest.TestCase): timestamp_micros = 18738724000 # Make sure millis granularity diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index ccb8350a30a2..307038ecde56 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -2272,12 +2272,6 @@ def _ReadRowsResponsePB(*args, **kw): return messages_v2_pb2.ReadRowsResponse(*args, **kw) -def _mutate_rows_request_pb(*args, **kw): - from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 - - return data_messages_v2_pb2.MutateRowsRequest(*args, **kw) - - class _MockReadRowsIterator(object): def __init__(self, *values): self.iter_values = iter(values) From be16bc0adfd2bb6861da6ffd027e1298f1069870 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 25 Jun 2021 13:40:06 -0400 Subject: [PATCH 463/892] tests: add coverage for 'encryption_info' module (#342) Toward #335. --- .../tests/unit/test_encryption_info.py | 167 ++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 packages/google-cloud-bigtable/tests/unit/test_encryption_info.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py b/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py new file mode 100644 index 000000000000..ede6f4883bff --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py @@ -0,0 +1,167 @@ +# Copyright 2021 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import mock + +from google.cloud.bigtable import enums + + +EncryptionType = enums.EncryptionInfo.EncryptionType +_STATUS_CODE = 123 +_STATUS_MESSAGE = "message" +_KMS_KEY_VERSION = 345 + + +def _make_status_pb(code=_STATUS_CODE, message=_STATUS_MESSAGE): + from google.rpc.status_pb2 import Status + + return Status(code=code, message=message) + + +def _make_status(code=_STATUS_CODE, message=_STATUS_MESSAGE): + from google.cloud.bigtable.error import Status + + status_pb = _make_status_pb(code=code, message=message) + return Status(status_pb) + + +def _make_info_pb( + encryption_type=EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + code=_STATUS_CODE, + message=_STATUS_MESSAGE, + kms_key_version=_KMS_KEY_VERSION, +): + encryption_status = _make_status_pb(code=code, message=message) + + spec = ["encryption_type", "encryption_status", "kms_key_version"] + return mock.Mock( + spec=spec, + encryption_type=encryption_type, + encryption_status=encryption_status, + kms_key_version=kms_key_version, + ) + + +class TestEncryptionInfo(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.encryption_info import EncryptionInfo + + return EncryptionInfo + + def _make_one(self, encryption_type, encryption_status, kms_key_version): + return self._get_target_class()( + encryption_type, encryption_status, kms_key_version, + ) + + def _make_one_defaults( + self, + encryption_type=EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + code=_STATUS_CODE, + message=_STATUS_MESSAGE, + kms_key_version=_KMS_KEY_VERSION, + ): + encryption_status = _make_status(code=code, message=message) + return self._make_one(encryption_type, encryption_status, kms_key_version) + + def test__from_pb(self): + klass = self._get_target_class() + info_pb = _make_info_pb() + + info = klass._from_pb(info_pb) + + self.assertEqual( + info.encryption_type, EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + ) + self.assertEqual(info.encryption_status.code, _STATUS_CODE) + self.assertEqual(info.encryption_status.message, _STATUS_MESSAGE) + self.assertEqual(info.kms_key_version, _KMS_KEY_VERSION) + + def test_ctor(self): + encryption_type = EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + encryption_status = _make_status() + + info = self._make_one( + encryption_type=encryption_type, + encryption_status=encryption_status, + kms_key_version=_KMS_KEY_VERSION, + ) + + self.assertEqual(info.encryption_type, encryption_type) + self.assertEqual(info.encryption_status, encryption_status) + self.assertEqual(info.kms_key_version, _KMS_KEY_VERSION) + + def test___eq___identity(self): + info = self._make_one_defaults() + self.assertTrue(info == info) + + def test___eq___wrong_type(self): + info = self._make_one_defaults() + other = object() + self.assertFalse(info == other) + + def test___eq___same_values(self): + info = self._make_one_defaults() + other = self._make_one_defaults() + self.assertTrue(info == other) + + def test___eq___different_encryption_type(self): + info = self._make_one_defaults() + other = self._make_one_defaults( + encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + self.assertFalse(info == other) + + def test___eq___different_encryption_status(self): + info = self._make_one_defaults() + other = self._make_one_defaults(code=456) + self.assertFalse(info == other) + + def test___eq___different_kms_key_version(self): + info = self._make_one_defaults() + other = self._make_one_defaults(kms_key_version=789) + self.assertFalse(info == other) + + def test___ne___identity(self): + info = self._make_one_defaults() + self.assertFalse(info != info) + + def test___ne___wrong_type(self): + info = self._make_one_defaults() + other = object() + self.assertTrue(info != other) + + def test___ne___same_values(self): + info = self._make_one_defaults() + other = self._make_one_defaults() + self.assertFalse(info != other) + + def test___ne___different_encryption_type(self): + info = self._make_one_defaults() + other = self._make_one_defaults( + encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + self.assertTrue(info != other) + + def test___ne___different_encryption_status(self): + info = self._make_one_defaults() + other = self._make_one_defaults(code=456) + self.assertTrue(info != other) + + def test___ne___different_kms_key_version(self): + info = self._make_one_defaults() + other = self._make_one_defaults(kms_key_version=789) + self.assertTrue(info != other) From 8f38a37027b068e9c66cd8034e4dd93ee23e69b3 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 25 Jun 2021 13:40:42 -0400 Subject: [PATCH 464/892] tests: complete coverage of 'PartialRowsData' (#338) Add a test for cancellation during iteration of rows. Toward #335 --- .../tests/unit/test_row_data.py | 38 ++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index d855ffeee9f6..ae5e6091fefb 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -490,7 +490,7 @@ def test_multiple_chunks(self): client._table_data_client = data_api request = object() - yrd = self._make_one(client._table_data_client.read_rows, request) + yrd = self._make_one(data_api.read_rows, request) yrd.response_iterator = iterator rows = [row for row in yrd] @@ -511,6 +511,42 @@ def test_cancel(self): self.assertEqual(response_iterator.cancel_calls, 1) self.assertEqual(list(yield_rows_data), []) + def test_cancel_between_chunks(self): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + chunk1 = _ReadRowsResponseCellChunkPB( + row_key=self.ROW_KEY, + family_name=self.FAMILY_NAME, + qualifier=self.QUALIFIER, + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunk2 = _ReadRowsResponseCellChunkPB( + qualifier=self.QUALIFIER + b"1", + timestamp_micros=self.TIMESTAMP_MICROS, + value=self.VALUE, + commit_row=True, + ) + chunks = [chunk1, chunk2] + response = _ReadRowsResponseV2(chunks) + response_iterator = _MockCancellableIterator(response) + + client = _Client() + data_api = mock.create_autospec(BigtableClient) + client._table_data_client = data_api + request = object() + yrd = self._make_one(data_api.read_rows, request) + yrd.response_iterator = response_iterator + + rows = [] + for row in yrd: + yrd.cancel() + rows.append(row) + + self.assertEqual(response_iterator.cancel_calls, 1) + self.assertEqual(list(yrd), []) + # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' def test__copy_from_previous_unset(self): From 090649dc626d390b2570696d402a576215e7c986 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 25 Jun 2021 13:40:56 -0400 Subject: [PATCH 465/892] tests: restore assertion for API call (#339) --- packages/google-cloud-bigtable/tests/unit/test_backup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 49168e04eb86..bd3e7610085f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -502,7 +502,9 @@ def test_exists_grpc_error(self): with self.assertRaises(Unknown): backup.exists() - api.get_backup(self.BACKUP_NAME) + + request = {"name": self.BACKUP_NAME} + api.get_backup.assert_called_once_with(request) def test_exists_not_found(self): from google.api_core.exceptions import NotFound From 62b9e5f3e2882afaa9d174d94408d6e4f604f47f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 25 Jun 2021 13:41:18 -0400 Subject: [PATCH 466/892] tests: add coverage for 'error' module (#340) Toward #335. --- .../tests/unit/test_error.py | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 packages/google-cloud-bigtable/tests/unit/test_error.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_error.py b/packages/google-cloud-bigtable/tests/unit/test_error.py new file mode 100644 index 000000000000..c53d63991d51 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_error.py @@ -0,0 +1,97 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + + +class TestStatus(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.error import Status + + return Status + + @staticmethod + def _make_status_pb(**kwargs): + from google.rpc.status_pb2 import Status + + return Status(**kwargs) + + def _make_one(self, status_pb): + return self._get_target_class()(status_pb) + + def test_ctor(self): + status_pb = self._make_status_pb() + status = self._make_one(status_pb) + self.assertIs(status.status_pb, status_pb) + + def test_code(self): + code = 123 + status_pb = self._make_status_pb(code=code) + status = self._make_one(status_pb) + self.assertEqual(status.code, code) + + def test_message(self): + message = "message" + status_pb = self._make_status_pb(message=message) + status = self._make_one(status_pb) + self.assertEqual(status.message, message) + + def test___eq___self(self): + status_pb = self._make_status_pb() + status = self._make_one(status_pb) + self.assertTrue(status == status) + + def test___eq___other_hit(self): + status_pb = self._make_status_pb(code=123, message="message") + status = self._make_one(status_pb) + other = self._make_one(status_pb) + self.assertTrue(status == other) + + def test___eq___other_miss(self): + status_pb = self._make_status_pb(code=123, message="message") + other_status_pb = self._make_status_pb(code=456, message="oops") + status = self._make_one(status_pb) + other = self._make_one(other_status_pb) + self.assertFalse(status == other) + + def test___eq___wrong_type(self): + status_pb = self._make_status_pb(code=123, message="message") + status = self._make_one(status_pb) + other = object() + self.assertFalse(status == other) + + def test___ne___self(self): + status_pb = self._make_status_pb() + status = self._make_one(status_pb) + self.assertFalse(status != status) + + def test___ne___other_hit(self): + status_pb = self._make_status_pb(code=123, message="message") + status = self._make_one(status_pb) + other = self._make_one(status_pb) + self.assertFalse(status != other) + + def test___ne___other_miss(self): + status_pb = self._make_status_pb(code=123, message="message") + other_status_pb = self._make_status_pb(code=456, message="oops") + status = self._make_one(status_pb) + other = self._make_one(other_status_pb) + self.assertTrue(status != other) + + def test___ne___wrong_type(self): + status_pb = self._make_status_pb(code=123, message="message") + status = self._make_one(status_pb) + other = object() + self.assertTrue(status != other) From 47d03f73ee5759f90561d629f6bd511ec2f87778 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 25 Jun 2021 14:56:46 -0400 Subject: [PATCH 467/892] tests: restore coverage for 'google.cloud.bigtable.table' to 100% (#337) Also, document retryable errors as a module scope constant. Toward #335 --- .../google/cloud/bigtable/table.py | 11 +- .../tests/unit/test_table.py | 431 +++++++++--------- 2 files changed, 213 insertions(+), 229 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 95fb55c50e4c..bff4b7a2aba4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -53,6 +53,9 @@ _MAX_BULK_MUTATIONS = 100000 VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY +RETRYABLE_MUTATION_ERRORS = (Aborted, DeadlineExceeded, ServiceUnavailable) +"""Errors which can be retried during row mutation.""" + class _BigtableRetryableError(Exception): """Retry-able error expected by the default retry strategy.""" @@ -1039,10 +1042,8 @@ class _RetryableMutateRowsWorker(object): are retryable, any subsequent call on this callable will be a no-op. """ - RETRY_CODES = ( - Aborted.grpc_status_code.value[0], - DeadlineExceeded.grpc_status_code.value[0], - ServiceUnavailable.grpc_status_code.value[0], + RETRY_CODES = tuple( + retryable.grpc_status_code.value[0] for retryable in RETRYABLE_MUTATION_ERRORS ) def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None): @@ -1125,7 +1126,7 @@ def _do_mutate_retryable_rows(self): retry=None, **kwargs ) - except (ServiceUnavailable, DeadlineExceeded, Aborted): + except RETRYABLE_MUTATION_ERRORS: # If an exception, considered retryable by `RETRY_CODES`, is # returned from the initial call, consider # it to be retryable. Wrap as a Bigtable Retryable Error. diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 307038ecde56..109e37dcfcb2 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1533,6 +1533,8 @@ class Test__RetryableMutateRowsWorker(unittest.TestCase): SUCCESS = StatusCode.OK.value[0] RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0] RETRYABLE_2 = StatusCode.ABORTED.value[0] + RETRYABLE_3 = StatusCode.UNAVAILABLE.value[0] + RETRYABLES = (RETRYABLE_1, RETRYABLE_2, RETRYABLE_3) NON_RETRYABLE = StatusCode.CANCELLED.value[0] @staticmethod @@ -1711,31 +1713,21 @@ def test_callable_retry(self): self.assertEqual(client._table_data_client.mutate_rows.call_count, 2) self.assertEqual(result, expected_result) - def test_do_mutate_retryable_rows_empty_rows(self): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - worker = self._make_worker(client, table.name, []) - statuses = worker._do_mutate_retryable_rows() - - self.assertEqual(len(statuses), 0) - - def test_do_mutate_retryable_rows(self): + def _do_mutate_retryable_rows_helper( + self, + row_cells, + responses, + prior_statuses=None, + expected_result=None, + raising_retry=False, + retryable_error=False, + timeout=None, + ): + from google.api_core.exceptions import ServiceUnavailable from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _BigtableRetryableError from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 # Setup: # - Mutate 2 rows. @@ -1745,101 +1737,160 @@ def test_do_mutate_retryable_rows(self): # - Expect [success, non-retryable] data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) credentials = _make_credentials() client = self._make_client( project="project-id", credentials=credentials, admin=True ) client._table_data_client = data_api - client._table_admin_client = table_api instance = client.instance(instance_id=self.INSTANCE_ID) table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") + rows = [] + for row_key, cell_data in row_cells: + row = DirectRow(row_key=row_key, table=table) + row.set_cell(*cell_data) + rows.append(row) - response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE]) + response = self._make_responses(responses) - # Patch the stub used by the API method. - client._table_data_client.mutate_rows.side_effect = [[response]] - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api + if retryable_error: + data_api.mutate_rows.side_effect = ServiceUnavailable("testing") + else: + data_api.mutate_rows.side_effect = [[response]] - worker = self._make_worker(client, table.name, [row_1, row_2]) - statuses = worker._do_mutate_retryable_rows() + worker = self._make_worker(client, table.name, rows=rows) + if prior_statuses is not None: + assert len(prior_statuses) == len(rows) + worker.responses_statuses = self._make_responses_statuses(prior_statuses) - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.NON_RETRYABLE] + expected_entries = [] + for row, prior_status in zip(rows, worker.responses_statuses): - self.assertEqual(result, expected_result) + if prior_status is None or prior_status.code in self.RETRYABLES: + mutations = row._get_mutations().copy() # row clears on success + entry = data_messages_v2_pb2.MutateRowsRequest.Entry( + row_key=row.row_key, mutations=mutations, + ) + expected_entries.append(entry) - def test_do_mutate_retryable_rows_retry(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + expected_kwargs = {} + if timeout is not None: + worker.timeout = timeout + expected_kwargs["timeout"] = mock.ANY + if retryable_error or raising_retry: + with self.assertRaises(_BigtableRetryableError): + worker._do_mutate_retryable_rows() + statuses = worker.responses_statuses + else: + statuses = worker._do_mutate_retryable_rows() + + if not retryable_error: + result = [status.code for status in statuses] + + if expected_result is None: + expected_result = responses + + self.assertEqual(result, expected_result) + + if len(responses) == 0 and not retryable_error: + data_api.mutate_rows.assert_not_called() + else: + data_api.mutate_rows.assert_called_once_with( + table_name=table.name, + entries=expected_entries, + app_profile_id=None, + retry=None, + **expected_kwargs, + ) + if timeout is not None: + called = data_api.mutate_rows.mock_calls[0] + self.assertEqual(called.kwargs["timeout"]._deadline, timeout) + + def test_do_mutate_retryable_rows_empty_rows(self): + # # Setup: - # - Mutate 3 rows. + # - No mutated rows. # Action: - # - Initial attempt will mutate all 3 rows. + # - No API call made. # Expectation: - # - Second row returns retryable error code, so expect a raise. - # - State of responses_statuses should be - # [success, retryable, non-retryable] + # - No change. + # + row_cells = [] + responses = [] - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) + self._do_mutate_retryable_rows_helper(row_cells, responses) - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") + def test_do_mutate_retryable_rows_w_timeout(self): + # + # Setup: + # - Mutate 2 rows. + # Action: + # - Initial attempt will mutate all 2 rows. + # Expectation: + # - No retryable error codes, so don't expect a raise. + # - State of responses_statuses should be [success, non-retryable]. + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] - response = self._make_responses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - ) + responses = [self.SUCCESS, self.NON_RETRYABLE] - # Patch the stub used by the API method. - client._table_data_client.mutate_rows.side_effect = [[response]] + timeout = 5 # seconds - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api + self._do_mutate_retryable_rows_helper( + row_cells, responses, timeout=timeout, + ) - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) + def test_do_mutate_retryable_rows_w_retryable_error(self): + # + # Setup: + # - Mutate 2 rows. + # Action: + # - Initial attempt will mutate all 2 rows. + # Expectation: + # - No retryable error codes, so don't expect a raise. + # - State of responses_statuses should be [success, non-retryable]. + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] - with self.assertRaises(_BigtableRetryableError): - worker._do_mutate_retryable_rows() + responses = () - statuses = worker.responses_statuses - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + self._do_mutate_retryable_rows_helper( + row_cells, responses, retryable_error=True, + ) - self.assertEqual(result, expected_result) + def test_do_mutate_retryable_rows_retry(self): + # + # Setup: + # - Mutate 3 rows. + # Action: + # - Initial attempt will mutate all 3 rows. + # Expectation: + # - Second row returns retryable error code, so expect a raise. + # - State of responses_statuses should be + # [success, retryable, non-retryable] + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + (b"row_key_3", ("cf", b"col", b"value3")), + ] - def test_do_mutate_retryable_rows_second_retry(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, + responses = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] + + self._do_mutate_retryable_rows_helper( + row_cells, responses, raising_retry=True, ) + def test_do_mutate_retryable_rows_second_retry(self): + # # Setup: # - Mutate 4 rows. # - First try results: @@ -1853,45 +1904,23 @@ def test_do_mutate_retryable_rows_second_retry(self): # so expect a raise. # - Exception contains response whose index should be '3' even though # only two rows were retried. + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + (b"row_key_3", ("cf", b"col", b"value3")), + (b"row_key_4", ("cf", b"col", b"value4")), + ] - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - row_4 = DirectRow(row_key=b"row_key_4", table=table) - row_4.set_cell("cf", b"col", b"value4") - - response = self._make_responses([self.SUCCESS, self.RETRYABLE_1]) - - # Patch the stub used by the API method. - client._table_data_client.mutate_rows.side_effect = [[response]] - - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api - - worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] - ) + responses = [self.SUCCESS, self.RETRYABLE_1] - with self.assertRaises(_BigtableRetryableError): - worker._do_mutate_retryable_rows() + prior_statuses = [ + self.SUCCESS, + self.RETRYABLE_1, + self.NON_RETRYABLE, + self.RETRYABLE_2, + ] - statuses = worker.responses_statuses - result = [status.code for status in statuses] expected_result = [ self.SUCCESS, self.SUCCESS, @@ -1899,15 +1928,16 @@ def test_do_mutate_retryable_rows_second_retry(self): self.RETRYABLE_1, ] - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_second_try(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, + self._do_mutate_retryable_rows_helper( + row_cells, + responses, + prior_statuses=prior_statuses, + expected_result=expected_result, + raising_retry=True, ) + def test_do_mutate_retryable_rows_second_try(self): + # # Setup: # - Mutate 4 rows. # - First try results: @@ -1917,43 +1947,23 @@ def test_do_mutate_retryable_rows_second_try(self): # Expectation: # - After second try: # [success, non-retryable, non-retryable, success] + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + (b"row_key_3", ("cf", b"col", b"value3")), + (b"row_key_4", ("cf", b"col", b"value4")), + ] - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - row_4 = DirectRow(row_key=b"row_key_4", table=table) - row_4.set_cell("cf", b"col", b"value4") - - response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS]) - - # Patch the stub used by the API method. - client._table_data_client.mutate_rows.side_effect = [[response]] - - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api - - worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4]) - worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2] - ) + responses = [self.NON_RETRYABLE, self.SUCCESS] - statuses = worker._do_mutate_retryable_rows() + prior_statuses = [ + self.SUCCESS, + self.RETRYABLE_1, + self.NON_RETRYABLE, + self.RETRYABLE_2, + ] - result = [status.code for status in statuses] expected_result = [ self.SUCCESS, self.NON_RETRYABLE, @@ -1961,14 +1971,15 @@ def test_do_mutate_retryable_rows_second_try(self): self.SUCCESS, ] - self.assertEqual(result, expected_result) - - def test_do_mutate_retryable_rows_second_try_no_retryable(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, + self._do_mutate_retryable_rows_helper( + row_cells, + responses, + prior_statuses=prior_statuses, + expected_result=expected_result, ) + def test_do_mutate_retryable_rows_second_try_no_retryable(self): + # # Setup: # - Mutate 2 rows. # - First try results: [success, non-retryable] @@ -1976,69 +1987,41 @@ def test_do_mutate_retryable_rows_second_try_no_retryable(self): # - Second try has no row to retry. # Expectation: # - After second try: [success, non-retryable] + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - - worker = self._make_worker(client, table.name, [row_1, row_2]) - worker.responses_statuses = self._make_responses_statuses( - [self.SUCCESS, self.NON_RETRYABLE] - ) - - table._instance._client._table_admin_client = table_api - - statuses = worker._do_mutate_retryable_rows() - - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.NON_RETRYABLE] + responses = [] # no calls will be made - self.assertEqual(result, expected_result) + prior_statuses = [ + self.SUCCESS, + self.NON_RETRYABLE, + ] - def test_do_mutate_retryable_rows_mismatch_num_responses(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + expected_result = [ + self.SUCCESS, + self.NON_RETRYABLE, + ] - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True + self._do_mutate_retryable_rows_helper( + row_cells, + responses, + prior_statuses=prior_statuses, + expected_result=expected_result, ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - - response = self._make_responses([self.SUCCESS]) - - # Patch the stub used by the API method. - client._table_data_client.mutate_rows.side_effect = [[response]] + def test_do_mutate_retryable_rows_mismatch_num_responses(self): + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api + responses = [self.SUCCESS] - worker = self._make_worker(client, table.name, [row_1, row_2]) with self.assertRaises(RuntimeError): - worker._do_mutate_retryable_rows() + self._do_mutate_retryable_rows_helper(row_cells, responses) class Test__create_row_request(unittest.TestCase): From 4c4a05ef143660f486a7bbe939462412d31fdd87 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 25 Jun 2021 14:57:53 -0400 Subject: [PATCH 468/892] tests: restore 100% unit test coverage for 'google.cloud.bigtable.client' (#343) Define gRPC channel options at module-scope, improving testability, discoverability Toward #335. --- .../google/cloud/bigtable/client.py | 48 +++--- .../tests/unit/test_client.py | 145 +++++++++++++++++- 2 files changed, 166 insertions(+), 27 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index be536f2957dd..7249c0b358c8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -67,6 +67,13 @@ READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly" """Scope for reading table data.""" +_GRPC_CHANNEL_OPTIONS = ( + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ("grpc.keepalive_time_ms", 30000), + ("grpc.keepalive_timeout_ms", 10000), +) + def _create_gapic_client(client_class, client_options=None, transport=None): def inner(self): @@ -195,11 +202,15 @@ def _get_scopes(self): return scopes def _emulator_channel(self, transport, options): - """ - Creates a channel using self._credentials in a similar way to grpc.secure_channel but - using grpc.local_channel_credentials() rather than grpc.ssh_channel_credentials() - to allow easy connection to a local emulator. - :return: grpc.Channel or grpc.aio.Channel + """Create a channel using self._credentials + + Works in a similar way to ``grpc.secure_channel`` but using + ``grpc.local_channel_credentials`` rather than + ``grpc.ssh_channel_credentials`` to allow easy connection to a + local emulator. + + Returns: + grpc.Channel or grpc.aio.Channel """ # TODO: Implement a special credentials type for emulator and use # "transport.create_channel" to create gRPC channels once google-auth @@ -219,8 +230,8 @@ def _emulator_channel(self, transport, options): ) def _local_composite_credentials(self): - """ - Creates the credentials for the local emulator channel + """Create credentials for the local emulator channel. + :return: grpc.ChannelCredentials """ credentials = google.auth.credentials.with_scopes_if_required( @@ -245,27 +256,24 @@ def _local_composite_credentials(self): ) def _create_gapic_client_channel(self, client_class, grpc_transport): - options = { - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - "grpc.keepalive_time_ms": 30000, - "grpc.keepalive_timeout_ms": 10000, - }.items() - if self._client_options and self._client_options.api_endpoint: + if self._emulator_host is not None: + api_endpoint = self._emulator_host + elif self._client_options and self._client_options.api_endpoint: api_endpoint = self._client_options.api_endpoint else: api_endpoint = client_class.DEFAULT_ENDPOINT - channel = None if self._emulator_host is not None: - api_endpoint = self._emulator_host - channel = self._emulator_channel(grpc_transport, options) + channel = self._emulator_channel( + transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS, + ) else: channel = grpc_transport.create_channel( - host=api_endpoint, credentials=self._credentials, options=options, + host=api_endpoint, + credentials=self._credentials, + options=_GRPC_CHANNEL_OPTIONS, ) - transport = grpc_transport(channel=channel, host=api_endpoint) - return transport + return grpc_transport(channel=channel, host=api_endpoint) @property def project_path(self): diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index f6b8eb5bca71..5c557763a072 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -170,6 +170,7 @@ def test_constructor_both_admin_and_read_only(self): def test_constructor_with_emulator_host(self): from google.cloud.environment_vars import BIGTABLE_EMULATOR + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS credentials = _make_credentials() emulator_host = "localhost:8081" @@ -183,13 +184,9 @@ def test_constructor_with_emulator_host(self): client.table_data_client self.assertEqual(client._emulator_host, emulator_host) - options = { - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - "grpc.keepalive_time_ms": 30000, - "grpc.keepalive_timeout_ms": 10000, - }.items() - factory.assert_called_once_with(emulator_host, credentials, options=options) + factory.assert_called_once_with( + emulator_host, credentials, options=_GRPC_CHANNEL_OPTIONS, + ) def test__get_scopes_default(self): from google.cloud.bigtable.client import DATA_SCOPE @@ -215,6 +212,140 @@ def test__get_scopes_read_only(self): ) self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,)) + def test__emulator_channel_sync(self): + emulator_host = "localhost:8081" + transport_name = "GrpcTransportTesting" + transport = mock.Mock(spec=["__name__"], __name__=transport_name) + options = mock.Mock(spec=[]) + client = self._make_one( + project=self.PROJECT, credentials=_make_credentials(), read_only=True + ) + client._emulator_host = emulator_host + lcc = client._local_composite_credentials = mock.Mock(spec=[]) + + with mock.patch("grpc.secure_channel") as patched: + channel = client._emulator_channel(transport, options) + + assert channel is patched.return_value + patched.assert_called_once_with( + emulator_host, lcc.return_value, options=options, + ) + + def test__emulator_channel_async(self): + emulator_host = "localhost:8081" + transport_name = "GrpcAsyncIOTransportTesting" + transport = mock.Mock(spec=["__name__"], __name__=transport_name) + options = mock.Mock(spec=[]) + client = self._make_one( + project=self.PROJECT, credentials=_make_credentials(), read_only=True + ) + client._emulator_host = emulator_host + lcc = client._local_composite_credentials = mock.Mock(spec=[]) + + with mock.patch("grpc.aio.secure_channel") as patched: + channel = client._emulator_channel(transport, options) + + assert channel is patched.return_value + patched.assert_called_once_with( + emulator_host, lcc.return_value, options=options, + ) + + def test__local_composite_credentials(self): + client = self._make_one( + project=self.PROJECT, credentials=_make_credentials(), read_only=True + ) + + wsir_patch = mock.patch("google.auth.credentials.with_scopes_if_required") + request_patch = mock.patch("google.auth.transport.requests.Request") + amp_patch = mock.patch("google.auth.transport.grpc.AuthMetadataPlugin") + grpc_patches = mock.patch.multiple( + "grpc", + metadata_call_credentials=mock.DEFAULT, + local_channel_credentials=mock.DEFAULT, + composite_channel_credentials=mock.DEFAULT, + ) + with wsir_patch as wsir_patched: + with request_patch as request_patched: + with amp_patch as amp_patched: + with grpc_patches as grpc_patched: + credentials = client._local_composite_credentials() + + grpc_mcc = grpc_patched["metadata_call_credentials"] + grpc_lcc = grpc_patched["local_channel_credentials"] + grpc_ccc = grpc_patched["composite_channel_credentials"] + + self.assertIs(credentials, grpc_ccc.return_value) + + wsir_patched.assert_called_once_with(client._credentials, None) + request_patched.assert_called_once_with() + amp_patched.assert_called_once_with( + wsir_patched.return_value, request_patched.return_value, + ) + grpc_mcc.assert_called_once_with(amp_patched.return_value) + grpc_lcc.assert_called_once_with() + grpc_ccc.assert_called_once_with(grpc_lcc.return_value, grpc_mcc.return_value) + + def _create_gapic_client_channel_helper( + self, endpoint=None, emulator_host=None, + ): + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + + client_class = mock.Mock(spec=["DEFAULT_ENDPOINT"]) + credentials = _make_credentials() + client = self._make_one(project=self.PROJECT, credentials=credentials) + + if endpoint is not None: + client._client_options = mock.Mock( + spec=["api_endpoint"], api_endpoint=endpoint, + ) + expected_host = endpoint + else: + expected_host = client_class.DEFAULT_ENDPOINT + + if emulator_host is not None: + client._emulator_host = emulator_host + client._emulator_channel = mock.Mock(spec=[]) + expected_host = emulator_host + + grpc_transport = mock.Mock(spec=["create_channel"]) + + transport = client._create_gapic_client_channel(client_class, grpc_transport) + + self.assertIs(transport, grpc_transport.return_value) + + if emulator_host is not None: + client._emulator_channel.assert_called_once_with( + transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS, + ) + grpc_transport.assert_called_once_with( + channel=client._emulator_channel.return_value, host=expected_host, + ) + else: + grpc_transport.create_channel.assert_called_once_with( + host=expected_host, + credentials=client._credentials, + options=_GRPC_CHANNEL_OPTIONS, + ) + grpc_transport.assert_called_once_with( + channel=grpc_transport.create_channel.return_value, host=expected_host, + ) + + def test__create_gapic_client_channel_w_defaults(self): + self._create_gapic_client_channel_helper() + + def test__create_gapic_client_channel_w_endpoint(self): + endpoint = "api.example.com" + self._create_gapic_client_channel_helper(endpoint=endpoint) + + def test__create_gapic_client_channel_w_emulator_host(self): + host = "api.example.com:1234" + self._create_gapic_client_channel_helper(emulator_host=host) + + def test__create_gapic_client_channel_w_endpoint_w_emulator_host(self): + endpoint = "api.example.com" + host = "other.example.com:1234" + self._create_gapic_client_channel_helper(endpoint=endpoint, emulator_host=host) + def test_project_path_property(self): credentials = _make_credentials() project = "PROJECT" From d37afa50d6770d78f4357c5605641d51bf29f4d1 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Fri, 25 Jun 2021 14:26:56 -0600 Subject: [PATCH 469/892] fix(deps): require google-api-core >= 1.26.0 (#344) Also, pin google-auth to lower version in constraints. --- packages/google-cloud-bigtable/noxfile.py | 26 ++++++++++++------- packages/google-cloud-bigtable/setup.py | 2 +- .../testing/constraints-3.10.txt | 0 .../testing/constraints-3.6.txt | 14 ++++++++++ .../testing/constraints-3.7.txt | 0 .../testing/constraints-3.8.txt | 0 .../testing/constraints-3.9.txt | 0 7 files changed, 31 insertions(+), 11 deletions(-) create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.10.txt create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.6.txt create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.7.txt create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.8.txt create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.9.txt diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index e74321f4943e..804225c8c8a3 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -18,6 +18,7 @@ from __future__ import absolute_import import os +import pathlib import shutil import nox @@ -30,6 +31,8 @@ SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + # 'docfx' is excluded since it only needs to run in 'docs-presubmit' nox.options.sessions = [ "unit", @@ -78,13 +81,14 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("asyncmock", "pytest-asyncio") - - session.install( - "mock", "pytest", "pytest-cov", + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) + session.install("asyncmock", "pytest-asyncio", "-c", constraints_path) - session.install("-e", ".") + session.install("mock", "pytest", "pytest-cov", "-c", constraints_path) + + session.install("-e", ".", "-c", constraints_path) # Run py.test against the unit tests. session.run( @@ -136,6 +140,10 @@ def system_emulated(session): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") @@ -147,7 +155,7 @@ def system(session): session.skip("Credentials must be set via environment variable") # Install pyopenssl for mTLS testing. if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": - session.install("pyopenssl") + session.install("pyopenssl", "-c", constraints_path) system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) @@ -160,10 +168,8 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install( - "mock", "pytest", "google-cloud-testutils", - ) - session.install("-e", ".") + session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) + session.install("-e", ".", "-c", constraints_path) # Run py.test against the system tests. if system_test_exists: diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 21a0126f35b4..2a4362deb6b3 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,7 +29,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", + "google-api-core[grpc] >= 1.26.0, < 2.0.0dev", "google-cloud-core >= 1.4.1, < 2.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", "proto-plus >= 1.13.0", diff --git a/packages/google-cloud-bigtable/testing/constraints-3.10.txt b/packages/google-cloud-bigtable/testing/constraints-3.10.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.6.txt b/packages/google-cloud-bigtable/testing/constraints-3.6.txt new file mode 100644 index 000000000000..25d8d3eef538 --- /dev/null +++ b/packages/google-cloud-bigtable/testing/constraints-3.6.txt @@ -0,0 +1,14 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List *all* library dependencies and extras in this file. +# Pin the version to the lower bound. +# +# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", +# Then this file should have foo==1.14.0 +google-api-core==1.26.0 +google-cloud-core==1.4.1 +grpc-google-iam-v1==0.12.3 +proto-plus==1.13.0 +libcst==0.2.5 +packaging==14.3 +google-auth==1.24.0 # TODO: remove when google-auth >= 1.25.0 is required transitively through google-api-core diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.8.txt b/packages/google-cloud-bigtable/testing/constraints-3.8.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.9.txt b/packages/google-cloud-bigtable/testing/constraints-3.9.txt new file mode 100644 index 000000000000..e69de29bb2d1 From 90f301045a430adfdda7bf7de3fe58f84b20f953 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 25 Jun 2021 17:04:31 -0400 Subject: [PATCH 470/892] chore(python): simplify nox steps in CONTRIBUTING.rst (#341) Source-Link: https://github.com/googleapis/synthtool/commit/26558bae8976a985d73c2d98c31d8612273f907d Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:99d90d097e4a4710cc8658ee0b5b963f4426d0e424819787c3ac1405c9a26719 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/CONTRIBUTING.rst | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 0954585f2833..e2b39f946040 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:df50e8d462f86d6bcb42f27ecad55bb12c404f1c65de9c6fe4c4d25120080bd6 + digest: sha256:99d90d097e4a4710cc8658ee0b5b963f4426d0e424819787c3ac1405c9a26719 diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 999ecd5db0d8..c97388fa83f1 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -68,14 +68,12 @@ Using ``nox`` We use `nox `__ to instrument our tests. - To test your changes, run unit tests with ``nox``:: + $ nox -s unit - $ nox -s unit-3.8 - $ ... +- To run a single unit test:: -- Args to pytest can be passed through the nox command separated by a `--`. For - example, to run a single test:: + $ nox -s unit-3.9 -- -k - $ nox -s unit-3.8 -- -k .. note:: @@ -142,7 +140,7 @@ Running System Tests - To run system tests, you can execute:: # Run all system tests - $ nox -s system-3.8 + $ nox -s system # Run a single system test $ nox -s system-3.8 -- -k @@ -215,8 +213,8 @@ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-bigtable/blob/master/noxfile.py -We also explicitly decided to support Python 3 beginning with version -3.6. Reasons for this include: +We also explicitly decided to support Python 3 beginning with version 3.6. +Reasons for this include: - Encouraging use of newest versions of Python 3 - Taking the lead of `prominent`_ open-source `projects`_ From f203501f82357d8c0c19c5dee55c996f1fe7e00d Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 25 Jun 2021 17:46:00 -0400 Subject: [PATCH 471/892] tests: require 100% coverage again (#345) Closes #335. --- packages/google-cloud-bigtable/.coveragerc | 2 +- packages/google-cloud-bigtable/noxfile.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index b11c3eaa34ed..1ba5bb57db4b 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -21,7 +21,7 @@ omit = google/cloud/__init__.py [report] -fail_under = 99 +fail_under = 100 show_missing = True exclude_lines = # Re-enable the standard pragma diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 804225c8c8a3..b0e278df105e 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -198,7 +198,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=99") + session.run("coverage", "report", "--show-missing", "--fail-under=100") session.run("coverage", "erase") From 5754fe928c57b9fbf01461dcd214b2b8586f6077 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 28 Jun 2021 19:43:27 +0200 Subject: [PATCH 472/892] chore(deps): update dependency google-cloud-core to v1.7.1 (#323) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index f0ccb84d7e7c..42da7391c189 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.30.0 google-cloud-bigtable<2.0.0 -google-cloud-core==1.6.0 \ No newline at end of file +google-cloud-core==1.7.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 68fd9e57fd85..8161b7c19d3d 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.0.0 -google-cloud-core==1.6.0 +google-cloud-core==1.7.1 From 0eb3846df5478e2bac1fba08d75d1e7d066d08f7 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 30 Jun 2021 13:31:06 -0400 Subject: [PATCH 473/892] fix: disable always_use_jwt_access (#348) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: use gapic-generator-python 0.50.3 fix: disable always_use_jwt_access Committer: @busunkim96 PiperOrigin-RevId: 382142900 Source-Link: https://github.com/googleapis/googleapis/commit/513440fda515f3c799c22a30e3906dcda325004e Source-Link: https://github.com/googleapis/googleapis-gen/commit/7b1e2c31233f79a704ec21ca410bf661d6bc68d0 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/master/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../transports/base.py | 2 +- .../transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- .../bigtable_table_admin/transports/base.py | 2 +- .../bigtable_table_admin/transports/grpc.py | 5 +- .../transports/grpc_asyncio.py | 5 +- .../services/bigtable/transports/base.py | 2 +- .../services/bigtable/transports/grpc.py | 5 +- .../bigtable/transports/grpc_asyncio.py | 5 +- .../test_bigtable_instance_admin.py | 50 ++++++++----------- .../test_bigtable_table_admin.py | 47 ++++++++--------- .../unit/gapic/bigtable_v2/test_bigtable.py | 47 ++++++++--------- 12 files changed, 93 insertions(+), 87 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index f8a292263fd8..2c4b6bb42c44 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -112,7 +112,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 85cc215e5140..9c311a51e008 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -66,6 +66,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -106,6 +107,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -159,7 +162,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 8294da096ec7..5ab099b357a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -112,6 +112,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -153,6 +154,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -205,7 +208,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 5c286d9f908f..5b94d6128b29 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -112,7 +112,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 240a798cd280..cfa9075ee96f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -68,6 +68,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -108,6 +109,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -161,7 +164,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index a4994a71287d..e75297b4bc87 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -114,6 +114,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -155,6 +156,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -207,7 +210,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 6f5029fe91fe..12230759f087 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -103,7 +103,7 @@ def __init__( scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. - self._scopes = scopes or self.AUTH_SCOPES + self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 21a048e35b5a..3f1b5dc6a480 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -58,6 +58,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -98,6 +99,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -150,7 +153,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index ab448a6b640c..5375474a4f05 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -104,6 +104,7 @@ def __init__( client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. @@ -145,6 +146,8 @@ def __init__( API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -196,7 +199,7 @@ def __init__( scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, - always_use_jwt_access=True, + always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 196c1e22317e..405cabb9ba32 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -144,7 +144,25 @@ def test_bigtable_instance_admin_client_service_account_always_use_jwt(client_cl ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.BigtableInstanceAdminGrpcTransport, "grpc"), + (transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_bigtable_instance_admin_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -5150,15 +5168,7 @@ def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -5267,15 +5277,7 @@ def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -5322,15 +5324,7 @@ def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index c09713ec29a6..2de0e03d174c 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -147,7 +147,25 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt(client_class ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.BigtableTableAdminGrpcTransport, "grpc"), + (transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_bigtable_table_admin_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize( @@ -5993,14 +6011,7 @@ def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -6109,14 +6120,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -6163,14 +6167,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index ab9763a79eef..80c8769319fc 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -118,7 +118,25 @@ def test_bigtable_client_service_account_always_use_jwt(client_class): ) as use_jwt: creds = service_account.Credentials(None, None, None) client = client_class(credentials=creds) - use_jwt.assert_called_with(True) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.BigtableGrpcTransport, "grpc"), + (transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_bigtable_client_service_account_always_use_jwt_true( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) @pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) @@ -2213,14 +2231,7 @@ def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): "squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_channel_creds, quota_project_id=None, options=[ @@ -2324,14 +2335,7 @@ def test_bigtable_transport_channel_mtls_with_client_cert_source(transport_class "mtls.squid.clam.whelk:443", credentials=cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ @@ -2375,14 +2379,7 @@ def test_bigtable_transport_channel_mtls_with_adc(transport_class): "mtls.squid.clam.whelk:443", credentials=mock_cred, credentials_file=None, - scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), + scopes=None, ssl_credentials=mock_ssl_cred, quota_project_id=None, options=[ From 0ac2fd735ddad0b9f69e4e9c866ef0a1b8dac55f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 1 Jul 2021 16:45:16 +0200 Subject: [PATCH 474/892] chore(deps): update dependency google-cloud-monitoring to v2.4.0 (#349) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index b4c87f2cb809..72c1c3d8664f 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.0.0 -google-cloud-monitoring==2.3.0 +google-cloud-monitoring==2.4.0 From e3f74015eea32453179f1e368abce6c71d102fec Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 7 Jul 2021 14:19:46 -0400 Subject: [PATCH 475/892] chore: release 2.3.0 (#334) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 21 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index ac209648e5df..cd8b97fa6f75 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,27 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.3.0](https://www.github.com/googleapis/python-bigtable/compare/v2.2.0...v2.3.0) (2021-07-01) + + +### Features + +* add always_use_jwt_access ([#333](https://www.github.com/googleapis/python-bigtable/issues/333)) ([f1fce5b](https://www.github.com/googleapis/python-bigtable/commit/f1fce5b0694d965202fc2a4fcf8bc6e09e78deae)) + + +### Bug Fixes + +* **deps:** add packaging requirement ([#326](https://www.github.com/googleapis/python-bigtable/issues/326)) ([d31c27b](https://www.github.com/googleapis/python-bigtable/commit/d31c27b01d1f7c351effc2856a8d4777a1a10690)) +* **deps:** require google-api-core >= 1.26.0 ([#344](https://www.github.com/googleapis/python-bigtable/issues/344)) ([ce4ceb6](https://www.github.com/googleapis/python-bigtable/commit/ce4ceb6d8fe74eff16cf9ca151e0b98502256a2f)) +* disable always_use_jwt_access ([#348](https://www.github.com/googleapis/python-bigtable/issues/348)) ([4623248](https://www.github.com/googleapis/python-bigtable/commit/4623248376deccf4651d4badf8966311ebe3c16a)) + + +### Documentation + +* add paramter mutation_timeout to instance.table docs ([#305](https://www.github.com/googleapis/python-bigtable/issues/305)) ([5bbd06e](https://www.github.com/googleapis/python-bigtable/commit/5bbd06e5413e8b7597ba128174b10fe45fd38380)) +* fix broken links in multiprocessing.rst ([#317](https://www.github.com/googleapis/python-bigtable/issues/317)) ([e329352](https://www.github.com/googleapis/python-bigtable/commit/e329352d7e6d81de1d1d770c73406a60d29d01bb)) +* omit mention of Python 2.7 in 'CONTRIBUTING.rst' ([#1127](https://www.github.com/googleapis/python-bigtable/issues/1127)) ([#329](https://www.github.com/googleapis/python-bigtable/issues/329)) ([6bf0c64](https://www.github.com/googleapis/python-bigtable/commit/6bf0c647bcebed641b4cbdc5eb70528c88b26a01)), closes [#1126](https://www.github.com/googleapis/python-bigtable/issues/1126) + ## [2.2.0](https://www.github.com/googleapis/python-bigtable/compare/v2.1.0...v2.2.0) (2021-04-30) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 2a4362deb6b3..a570f0b067a0 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.2.0" +version = "2.3.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 2ad5dd800f2a379e5b1860b5680cd5bd3890bdf7 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 8 Jul 2021 11:51:46 -0400 Subject: [PATCH 476/892] chore: remove 'test_utils' fossil (#351) Closes #350 --- .../test_utils/credentials.json.enc | 49 ---- .../scripts/circleci/get_tagged_package.py | 64 ----- .../scripts/circleci/twine_upload.sh | 36 --- .../test_utils/scripts/get_target_packages.py | 268 ------------------ .../scripts/get_target_packages_kokoro.py | 98 ------- .../test_utils/scripts/run_emulator.py | 199 ------------- .../test_utils/scripts/update_docs.sh | 93 ------ .../google-cloud-bigtable/test_utils/setup.py | 64 ----- .../test_utils/test_utils/__init__.py | 0 .../test_utils/test_utils/imports.py | 38 --- .../test_utils/test_utils/retry.py | 207 -------------- .../test_utils/test_utils/system.py | 81 ------ 12 files changed, 1197 deletions(-) delete mode 100644 packages/google-cloud-bigtable/test_utils/credentials.json.enc delete mode 100644 packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py delete mode 100755 packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh delete mode 100644 packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py delete mode 100644 packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py delete mode 100644 packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py delete mode 100755 packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh delete mode 100644 packages/google-cloud-bigtable/test_utils/setup.py delete mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/__init__.py delete mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/imports.py delete mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/retry.py delete mode 100644 packages/google-cloud-bigtable/test_utils/test_utils/system.py diff --git a/packages/google-cloud-bigtable/test_utils/credentials.json.enc b/packages/google-cloud-bigtable/test_utils/credentials.json.enc deleted file mode 100644 index f073c7e4f774..000000000000 --- a/packages/google-cloud-bigtable/test_utils/credentials.json.enc +++ /dev/null @@ -1,49 +0,0 @@ -U2FsdGVkX1/vVm/dOEg1DCACYbdOcL+ey6+64A+DZGZVgF8Z/3skK6rpPocu6GOA -UZAqASsBH9QifDf8cKVXQXVYpYq6HSv2O0w7vOmVorZO9GYPo98s9/8XO+4ty/AU -aB6TD68frBAYv4cT/l5m7aYdzfzMTy0EOXoleZT09JYP3B5FV3KCO114FzMXGwrj -HXsR6E5SyUUlUnWPC3eD3aqmovay0gxOKYO3ZwjFK1nlbN/8q6/8nwBCf/Bg6SHV -V93pNxdolRlJev9kgKz4RN1z4jGCy5PAndhSLE82NFIs9LoAiEOU5YeMlN+Ulqus -J92nh+ptUe9a4pJGbAuveUWO7zdS1QyXvTMUcmmSfXCNm/eIQjNuu5+rHtIjWKh8 -Ilwj2w1aTfSptQEhk/kwRgFz/d11vfwJzvwTmCxO6zyOeL0VUWLqdCBGgG5As9He -/RenF8PZ1O0WbTt7fns5oTlTk/MUo+0xJ1xqvu/y45LaqqcBAnEdrWKmtM3dJHWv -ufQku+kD+83F/VwBnQdvgMHu6KZEs6LRrNo58r4QuK6fS7VCACdzxID1RM2cL7kT -6BFRlyGj1aigmjne9g9M9Jx4R+mZDpPU1WDzzG71J4qCUwaX8Dfwutuv4uiFvzwq -NUF0wLJJPtKWmtW+hnZ/fhHQGCRsOpZzFnqp6Zv7J7k6esqxMgIjfal7Djk5Acy8 -j3iVvm6CYmKMVqzL62JHYS9Ye83tzBCaR8hpnJQKgH3FSOFY8HSwrtQSIsl/hSeF -41sgnz0Y+/gkzNeU18qFk+eCZmvljyu+JK0nPYUgpOCJYVBNQpNHz5PUyiAEKhtM -IOSdjPRW1Y+Xf4RroJnLPoF24Ijwrow5LCm9hBRY6TPPMMmnIXCd23xcLJ1rMj6g -x4ZikElans+cwuc9wtbb7w01DcpTwQ1+eIV1qV+KIgpnLjRGLhZD4etobBsrwYu/ -vnIwy2QHCKENPb8sbdgp7x2mF7VSX0/7tf+9+i70EBiMzpOKBkiZhtLzm6hOBkEy -ODaWrx4lTTwbSw8Rmtf58APhPFMsjHoNsjiUoK249Y8Y2Ff4fMfqYsXu6VC1n/At -CuWYHc3EfBwFcLJS+RQB9kFk/4FygFBWq4Kj0MqoRruLbKmoGeJKH9q35W0f0NCD -j+iHt3014kMGiuyJe1UDQ6fvEihFFdHuDivFpPAXDt4PTY/WtpDhaGMx23kb54pK -jkAuxpznAB1lK3u9bGRXDasGeHIrNtIlPvgkrWHXvoBVqM7zry8TGtoxp3E3I42Z -cUfDWfB9GqVdrOwvrTzyZsl2uShRkAJaZFZj5aMyYxiptp4gM8CwWiNtOd2EwtRO -LxZX4M02PQFIqXV3FSDA0q6EwglUrTZdAlYeOEkopaKCtG31dEPOSQG3NGJAEYso -Cxm99H7970dp0OAgpNSgRbcWDbhVbQXnRzvFGqLeH6a9dQ/a8uD3s8Qm9Du/kB6d -XxTRe2OGxzcD0AgI8GClE4rIZHCLbcwuJRp0EYcN+pgY80O4U98fZ5RYpU6OYbU/ -MEiaBYFKtZtGkV6AQD568V7hHJWqc5DDfVHUQ/aeQwnKi2vnU66u+nnV2rZxXxLP -+dqeLRpul+wKa5b/Z5SfQ14Ff8s7aVyxaogGpyggyPL1vyq4KWZ6Or/wEE5hgNO4 -kBh6ht0QT1Hti8XY2JK1M+Jgbjgcg4jkHBGVqegrG1Rvcc2A4TYKwx+QMSBhyxrU -5qhROjS4lTcC42hQslMUkUwc4U/Y91XdFbOOnaAkwzI36NRYL0pmgZnYxGJZeRvr -E5foOhnOEVSFGdOkLfFh+FkWZQf56Lmn8Gg2wHE3dZTxLHibiUYfkgOr1uEosq29 -D1NstvlJURPQ0Q+8QQNWcl9nEZHMAjOmnL1hbx+QfuC6seucp+sXGzdZByMLZbvT -tG8KNL293CmyQowgf9MXToWYnwRkcvqfTaKyor2Ggze3JtoFW4t0j4DI1XPciZFX -XmfApHrzdB/bZadzxyaZ2NE0CuH9zDelwI6rz38xsN5liYnp5qmNKVCZVOHccXa6 -J8x365m5/VaaA2RrtdPqKxn8VaKy7+T690QgMXVGM4PbzQzQxHuSleklocqlP+sB -jSMXCZY+ng/i4UmRO9noiyW3UThYh0hIdMYs12EmmI9cnF/OuYZpl30fmqwV+VNM -td5B2fYvAvvsjiX60SFCn3DATP1GrPMBlZSmhhP3GYS+xrWt3Xxta9qIX2BEF1Gg -twnZZRjoULSRFUYPfJPEOfEH2UQwm84wxx/GezVE+S/RpBlatPOgCiLnNNaLfdTC -mTG9qY9elJv3GGQO8Lqgf4i8blExs05lSPk1BDhzTB6H9TLz+Ge0/l1QxKf3gPXU -aImK1azieXMXHECkdKxrzmehwu1dZ/oYOLc/OFQCETwSRoLPFOFpYUpizwmVVHR6 -uLSfRptte4ZOU3zHfpd/0+J4tkwHwEkGzsmMdqudlm7qME6upuIplyVBH8JiXzUK -n1RIH/OPmVEluAnexWRLZNdk7MrakIO4XACVbICENiYQgAIErP568An6twWEGDbZ -bEN64E3cVDTDRPRAunIhhsEaapcxpFEPWlHorxv36nMUt0R0h0bJlCu5QdzckfcX -ZrRuu1kl76ZfbSE8T0G4/rBb9gsU4Gn3WyvLIO3MgFBuxR68ZwcR8LpEUd8qp38H -NG4cxPmN1nGKo663Z+xI2Gt5up4gpl+fOt4mXqxY386rB7yHaOfElMG5TUYdrS9w -1xbbCVgeJ6zxX+NFlndG33cSAPprhw+C18eUu6ZU63WZcYFo3GfK6rs3lvYtofvE -8DxztdTidQedNVNE+63YCjhxd/cZUI5n/UpgYkr9owp7hNGJiR3tdoNLR2gcoGqL -qWhH928k2aSgF2j97LZ2OqoPCp0tUB7ho4jD2u4Ik3GLVNlCc3dCvWRvpHtDTQDv -tujESMfHUc9I2r4S/PD3bku/ABGwa977Yp1PjzJGr9RajA5is5n6GVpyynwjtKG4 -iyyITpdwpCgr8pueTBLwZnas3slmiMOog/E4PmPgctHzvC+vhQijhUtw5zSsmv0l -bZlw/mVhp5Ta7dTcLBKR8DA3m3vTbaEGkz0xpfQr7GfiSMRbJyvIw88pDK0gyTMD diff --git a/packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py b/packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py deleted file mode 100644 index c148b9dc2370..000000000000 --- a/packages/google-cloud-bigtable/test_utils/scripts/circleci/get_tagged_package.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Helper to determine package from tag. -Get the current package directory corresponding to the Circle Tag. -""" - -from __future__ import print_function - -import os -import re -import sys - - -TAG_RE = re.compile(r""" - ^ - (?P - (([a-z]+)[_-])*) # pkg-name-with-hyphens-or-underscores (empty allowed) - ([0-9]+)\.([0-9]+)\.([0-9]+) # Version x.y.z (x, y, z all ints) - $ -""", re.VERBOSE) -TAG_ENV = 'CIRCLE_TAG' -ERROR_MSG = '%s env. var. not set' % (TAG_ENV,) -BAD_TAG_MSG = 'Invalid tag name: %s. Expected pkg-name-x.y.z' -CIRCLE_CI_SCRIPTS_DIR = os.path.dirname(__file__) -ROOT_DIR = os.path.realpath( - os.path.join(CIRCLE_CI_SCRIPTS_DIR, '..', '..', '..')) - - -def main(): - """Get the current package directory. - Prints the package directory out so callers can consume it. - """ - if TAG_ENV not in os.environ: - print(ERROR_MSG, file=sys.stderr) - sys.exit(1) - - tag_name = os.environ[TAG_ENV] - match = TAG_RE.match(tag_name) - if match is None: - print(BAD_TAG_MSG % (tag_name,), file=sys.stderr) - sys.exit(1) - - pkg_name = match.group('pkg') - if pkg_name is None: - print(ROOT_DIR) - else: - pkg_dir = pkg_name.rstrip('-').replace('-', '_') - print(os.path.join(ROOT_DIR, pkg_dir)) - - -if __name__ == '__main__': - main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh b/packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh deleted file mode 100755 index 23a4738e90b9..000000000000 --- a/packages/google-cloud-bigtable/test_utils/scripts/circleci/twine_upload.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ev - -# If this is not a CircleCI tag, no-op. -if [[ -z "$CIRCLE_TAG" ]]; then - echo "This is not a release tag. Doing nothing." - exit 0 -fi - -# H/T: http://stackoverflow.com/a/246128/1068170 -SCRIPT="$(dirname "${BASH_SOURCE[0]}")/get_tagged_package.py" -# Determine the package directory being deploying on this tag. -PKG_DIR="$(python ${SCRIPT})" - -# Ensure that we have the latest versions of Twine, Wheel, and Setuptools. -python3 -m pip install --upgrade twine wheel setuptools - -# Move into the package, build the distribution and upload. -cd ${PKG_DIR} -python3 setup.py sdist bdist_wheel -twine upload dist/* diff --git a/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py b/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py deleted file mode 100644 index 1d51830cc23a..000000000000 --- a/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Print a list of packages which require testing.""" - -import os -import re -import subprocess -import warnings - - -CURRENT_DIR = os.path.realpath(os.path.dirname(__file__)) -BASE_DIR = os.path.realpath(os.path.join(CURRENT_DIR, '..', '..')) -GITHUB_REPO = os.environ.get('GITHUB_REPO', 'google-cloud-python') -CI = os.environ.get('CI', '') -CI_BRANCH = os.environ.get('CIRCLE_BRANCH') -CI_PR = os.environ.get('CIRCLE_PR_NUMBER') -CIRCLE_TAG = os.environ.get('CIRCLE_TAG') -head_hash, head_name = subprocess.check_output(['git', 'show-ref', 'HEAD'] -).strip().decode('ascii').split() -rev_parse = subprocess.check_output( - ['git', 'rev-parse', '--abbrev-ref', 'HEAD'] -).strip().decode('ascii') -MAJOR_DIV = '#' * 78 -MINOR_DIV = '#' + '-' * 77 - -# NOTE: This reg-ex is copied from ``get_tagged_packages``. -TAG_RE = re.compile(r""" - ^ - (?P - (([a-z]+)-)*) # pkg-name-with-hyphens- (empty allowed) - ([0-9]+)\.([0-9]+)\.([0-9]+) # Version x.y.z (x, y, z all ints) - $ -""", re.VERBOSE) - -# This is the current set of dependencies by package. -# As of this writing, the only "real" dependency is that of error_reporting -# (on logging), the rest are just system test dependencies. -PKG_DEPENDENCIES = { - 'logging': {'pubsub'}, -} - - -def get_baseline(): - """Return the baseline commit. - - On a pull request, or on a branch, return the common parent revision - with the master branch. - - Locally, return a value pulled from environment variables, or None if - the environment variables are not set. - - On a push to master, return None. This will effectively cause everything - to be considered to be affected. - """ - - # If this is a pull request or branch, return the tip for master. - # We will test only packages which have changed since that point. - ci_non_master = (CI == 'true') and any([CI_BRANCH != 'master', CI_PR]) - - if ci_non_master: - - repo_url = 'git@github.com:GoogleCloudPlatform/{}'.format(GITHUB_REPO) - subprocess.run(['git', 'remote', 'add', 'baseline', repo_url], - stderr=subprocess.DEVNULL) - subprocess.run(['git', 'pull', 'baseline'], stderr=subprocess.DEVNULL) - - if CI_PR is None and CI_BRANCH is not None: - output = subprocess.check_output([ - 'git', 'merge-base', '--fork-point', - 'baseline/master', CI_BRANCH]) - return output.strip().decode('ascii') - - return 'baseline/master' - - # If environment variables are set identifying what the master tip is, - # use that. - if os.environ.get('GOOGLE_CLOUD_TESTING_REMOTE', ''): - remote = os.environ['GOOGLE_CLOUD_TESTING_REMOTE'] - branch = os.environ.get('GOOGLE_CLOUD_TESTING_BRANCH', 'master') - return '%s/%s' % (remote, branch) - - # If we are not in CI and we got this far, issue a warning. - if not CI: - warnings.warn('No baseline could be determined; this means tests ' - 'will run for every package. If this is local ' - 'development, set the $GOOGLE_CLOUD_TESTING_REMOTE ' - 'environment variable.') - - # That is all we can do; return None. - return None - - -def get_changed_files(): - """Return a list of files that have been changed since the baseline. - - If there is no base, return None. - """ - # Get the baseline, and fail quickly if there is no baseline. - baseline = get_baseline() - print('# Baseline commit: {}'.format(baseline)) - if not baseline: - return None - - # Return a list of altered files. - try: - return subprocess.check_output([ - 'git', 'diff', '--name-only', '{}..HEAD'.format(baseline), - ], stderr=subprocess.DEVNULL).decode('utf8').strip().split('\n') - except subprocess.CalledProcessError: - warnings.warn('Unable to perform git diff; falling back to assuming ' - 'all packages have changed.') - return None - - -def reverse_map(dict_of_sets): - """Reverse a map of one-to-many. - - So the map:: - - { - 'A': {'B', 'C'}, - 'B': {'C'}, - } - - becomes - - { - 'B': {'A'}, - 'C': {'A', 'B'}, - } - - Args: - dict_of_sets (dict[set]): A dictionary of sets, mapping - one value to many. - - Returns: - dict[set]: The reversed map. - """ - result = {} - for key, values in dict_of_sets.items(): - for value in values: - result.setdefault(value, set()).add(key) - - return result - -def get_changed_packages(file_list): - """Return a list of changed packages based on the provided file list. - - If the file list is None, then all packages should be considered to be - altered. - """ - # Determine a complete list of packages. - all_packages = set() - for file_ in os.listdir(BASE_DIR): - abs_file = os.path.realpath(os.path.join(BASE_DIR, file_)) - nox_file = os.path.join(abs_file, 'nox.py') - if os.path.isdir(abs_file) and os.path.isfile(nox_file): - all_packages.add(file_) - - # If ther is no file list, send down the full package set. - if file_list is None: - return all_packages - - # Create a set based on the list of changed files. - answer = set() - reverse_deps = reverse_map(PKG_DEPENDENCIES) - for file_ in file_list: - # Ignore root directory changes (setup.py, .gitignore, etc.). - if os.path.sep not in file_: - continue - - # Ignore changes that are not in a package (usually this will be docs). - package = file_.split(os.path.sep, 1)[0] - if package not in all_packages: - continue - - # If there is a change in core, short-circuit now and return - # everything. - if package in ('core',): - return all_packages - - # Add the package, as well as any dependencies this package has. - # NOTE: For now, dependencies only go down one level. - answer.add(package) - answer = answer.union(reverse_deps.get(package, set())) - - # We got this far without being short-circuited; return the final answer. - return answer - - -def get_tagged_package(): - """Return the package corresponding to the current tag. - - If there is not tag, will return :data:`None`. - """ - if CIRCLE_TAG is None: - return - - match = TAG_RE.match(CIRCLE_TAG) - if match is None: - return - - pkg_name = match.group('pkg') - if pkg_name == '': - # NOTE: This corresponds to the "umbrella" tag. - return - - return pkg_name.rstrip('-').replace('-', '_') - - -def get_target_packages(): - """Return a list of target packages to be run in the current build. - - If in a tag build, will run only the package(s) that are tagged, otherwise - will run the packages that have file changes in them (or packages that - depend on those). - """ - tagged_package = get_tagged_package() - if tagged_package is None: - file_list = get_changed_files() - print(MAJOR_DIV) - print('# Changed files:') - print(MINOR_DIV) - for file_ in file_list or (): - print('# {}'.format(file_)) - for package in sorted(get_changed_packages(file_list)): - yield package - else: - yield tagged_package - - -def main(): - print(MAJOR_DIV) - print('# Environment') - print(MINOR_DIV) - print('# CircleCI: {}'.format(CI)) - print('# CircleCI branch: {}'.format(CI_BRANCH)) - print('# CircleCI pr: {}'.format(CI_PR)) - print('# CircleCI tag: {}'.format(CIRCLE_TAG)) - print('# HEAD ref: {}'.format(head_hash)) - print('# {}'.format(head_name)) - print('# Git branch: {}'.format(rev_parse)) - print(MAJOR_DIV) - - packages = list(get_target_packages()) - - print(MAJOR_DIV) - print('# Target packages:') - print(MINOR_DIV) - for package in packages: - print(package) - print(MAJOR_DIV) - - -if __name__ == '__main__': - main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py b/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py deleted file mode 100644 index 27d3a0c940ea..000000000000 --- a/packages/google-cloud-bigtable/test_utils/scripts/get_target_packages_kokoro.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Print a list of packages which require testing.""" - -import pathlib -import subprocess - -import ci_diff_helper -import requests - - -def print_environment(environment): - print("-> CI environment:") - print('Branch', environment.branch) - print('PR', environment.pr) - print('In PR', environment.in_pr) - print('Repo URL', environment.repo_url) - if environment.in_pr: - print('PR Base', environment.base) - - -def get_base(environment): - if environment.in_pr: - return environment.base - else: - # If we're not in a PR, just calculate the changes between this commit - # and its parent. - return 'HEAD~1' - - -def get_changed_files_from_base(base): - return subprocess.check_output([ - 'git', 'diff', '--name-only', f'{base}..HEAD', - ], stderr=subprocess.DEVNULL).decode('utf8').strip().split('\n') - - -_URL_TEMPLATE = ( - 'https://api.github.com/repos/googleapis/google-cloud-python/pulls/' - '{}/files' -) - - -def get_changed_files_from_pr(pr): - url = _URL_TEMPLATE.format(pr) - while url is not None: - response = requests.get(url) - for info in response.json(): - yield info['filename'] - url = response.links.get('next', {}).get('url') - - -def determine_changed_packages(changed_files): - packages = [ - path.parent for path in pathlib.Path('.').glob('*/noxfile.py') - ] - - changed_packages = set() - for file in changed_files: - file = pathlib.Path(file) - for package in packages: - if package in file.parents: - changed_packages.add(package) - - return changed_packages - - -def main(): - environment = ci_diff_helper.get_config() - print_environment(environment) - base = get_base(environment) - - if environment.in_pr: - changed_files = list(get_changed_files_from_pr(environment.pr)) - else: - changed_files = get_changed_files_from_base(base) - - packages = determine_changed_packages(changed_files) - - print(f"Comparing against {base}.") - print("-> Changed packages:") - - for package in packages: - print(package) - - -main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py b/packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py deleted file mode 100644 index 287b08640691..000000000000 --- a/packages/google-cloud-bigtable/test_utils/scripts/run_emulator.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Run system tests locally with the emulator. - -First makes system calls to spawn the emulator and get the local environment -variable needed for it. Then calls the system tests. -""" - - -import argparse -import os -import subprocess - -import psutil - -from google.cloud.environment_vars import BIGTABLE_EMULATOR -from google.cloud.environment_vars import GCD_DATASET -from google.cloud.environment_vars import GCD_HOST -from google.cloud.environment_vars import PUBSUB_EMULATOR -from run_system_test import run_module_tests - - -BIGTABLE = 'bigtable' -DATASTORE = 'datastore' -PUBSUB = 'pubsub' -PACKAGE_INFO = { - BIGTABLE: (BIGTABLE_EMULATOR,), - DATASTORE: (GCD_DATASET, GCD_HOST), - PUBSUB: (PUBSUB_EMULATOR,), -} -EXTRA = { - DATASTORE: ('--no-legacy',), -} -_DS_READY_LINE = '[datastore] Dev App Server is now running.\n' -_PS_READY_LINE_PREFIX = '[pubsub] INFO: Server started, listening on ' -_BT_READY_LINE_PREFIX = '[bigtable] Cloud Bigtable emulator running on ' - - -def get_parser(): - """Get simple ``argparse`` parser to determine package. - - :rtype: :class:`argparse.ArgumentParser` - :returns: The parser for this script. - """ - parser = argparse.ArgumentParser( - description='Run google-cloud system tests against local emulator.') - parser.add_argument('--package', dest='package', - choices=sorted(PACKAGE_INFO.keys()), - default=DATASTORE, help='Package to be tested.') - return parser - - -def get_start_command(package): - """Get command line arguments for starting emulator. - - :type package: str - :param package: The package to start an emulator for. - - :rtype: tuple - :returns: The arguments to be used, in a tuple. - """ - result = ('gcloud', 'beta', 'emulators', package, 'start') - extra = EXTRA.get(package, ()) - return result + extra - - -def get_env_init_command(package): - """Get command line arguments for getting emulator env. info. - - :type package: str - :param package: The package to get environment info for. - - :rtype: tuple - :returns: The arguments to be used, in a tuple. - """ - result = ('gcloud', 'beta', 'emulators', package, 'env-init') - extra = EXTRA.get(package, ()) - return result + extra - - -def datastore_wait_ready(popen): - """Wait until the datastore emulator is ready to use. - - :type popen: :class:`subprocess.Popen` - :param popen: An open subprocess to interact with. - """ - emulator_ready = False - while not emulator_ready: - emulator_ready = popen.stderr.readline() == _DS_READY_LINE - - -def wait_ready_prefix(popen, prefix): - """Wait until the a process encounters a line with matching prefix. - - :type popen: :class:`subprocess.Popen` - :param popen: An open subprocess to interact with. - - :type prefix: str - :param prefix: The prefix to match - """ - emulator_ready = False - while not emulator_ready: - emulator_ready = popen.stderr.readline().startswith(prefix) - - -def wait_ready(package, popen): - """Wait until the emulator is ready to use. - - :type package: str - :param package: The package to check if ready. - - :type popen: :class:`subprocess.Popen` - :param popen: An open subprocess to interact with. - - :raises: :class:`KeyError` if the ``package`` is not among - ``datastore``, ``pubsub`` or ``bigtable``. - """ - if package == DATASTORE: - datastore_wait_ready(popen) - elif package == PUBSUB: - wait_ready_prefix(popen, _PS_READY_LINE_PREFIX) - elif package == BIGTABLE: - wait_ready_prefix(popen, _BT_READY_LINE_PREFIX) - else: - raise KeyError('Package not supported', package) - - -def cleanup(pid): - """Cleanup a process (including all of its children). - - :type pid: int - :param pid: Process ID. - """ - proc = psutil.Process(pid) - for child_proc in proc.children(recursive=True): - try: - child_proc.kill() - child_proc.terminate() - except psutil.NoSuchProcess: - pass - proc.terminate() - proc.kill() - - -def run_tests_in_emulator(package): - """Spawn an emulator instance and run the system tests. - - :type package: str - :param package: The package to run system tests against. - """ - # Make sure this package has environment vars to replace. - env_vars = PACKAGE_INFO[package] - - start_command = get_start_command(package) - # Ignore stdin and stdout, don't pollute the user's output with them. - proc_start = subprocess.Popen(start_command, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - try: - wait_ready(package, proc_start) - env_init_command = get_env_init_command(package) - proc_env = subprocess.Popen(env_init_command, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - env_status = proc_env.wait() - if env_status != 0: - raise RuntimeError(env_status, proc_env.stderr.read()) - env_lines = proc_env.stdout.read().strip().split('\n') - # Set environment variables before running the system tests. - for env_var in env_vars: - line_prefix = 'export ' + env_var + '=' - value, = [line.split(line_prefix, 1)[1] for line in env_lines - if line.startswith(line_prefix)] - os.environ[env_var] = value - run_module_tests(package, - ignore_requirements=True) - finally: - cleanup(proc_start.pid) - - -def main(): - """Main method to run this script.""" - parser = get_parser() - args = parser.parse_args() - run_tests_in_emulator(args.package) - - -if __name__ == '__main__': - main() diff --git a/packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh b/packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh deleted file mode 100755 index 8cbab9f0dad0..000000000000 --- a/packages/google-cloud-bigtable/test_utils/scripts/update_docs.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/bin/bash - -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -ev - -GH_OWNER='GoogleCloudPlatform' -GH_PROJECT_NAME='google-cloud-python' - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -# Function to build the docs. -function build_docs { - rm -rf docs/_build/ - rm -f docs/bigquery/generated/*.rst - # -W -> warnings as errors - # -T -> show full traceback on exception - # -N -> no color - sphinx-build \ - -W -T -N \ - -b html \ - -d docs/_build/doctrees \ - docs/ \ - docs/_build/html/ - return $? -} - -# Only update docs if we are on CircleCI. -if [[ "${CIRCLE_BRANCH}" == "master" ]] && [[ -z "${CIRCLE_PR_NUMBER}" ]]; then - echo "Building new docs on a merged commit." -elif [[ "$1" == "kokoro" ]]; then - echo "Building and publishing docs on Kokoro." -elif [[ -n "${CIRCLE_TAG}" ]]; then - echo "Building new docs on a tag (but will not deploy)." - build_docs - exit $? -else - echo "Not on master nor a release tag." - echo "Building new docs for testing purposes, but not deploying." - build_docs - exit $? -fi - -# Adding GitHub pages branch. `git submodule add` checks it -# out at HEAD. -GH_PAGES_DIR='ghpages' -git submodule add -q -b gh-pages \ - "git@github.com:${GH_OWNER}/${GH_PROJECT_NAME}" ${GH_PAGES_DIR} - -# Determine if we are building a new tag or are building docs -# for master. Then build new docs in docs/_build from master. -if [[ -n "${CIRCLE_TAG}" ]]; then - # Sphinx will use the package version by default. - build_docs -else - SPHINX_RELEASE=$(git log -1 --pretty=%h) build_docs -fi - -# Update gh-pages with the created docs. -cd ${GH_PAGES_DIR} -git rm -fr latest/ -cp -R ../docs/_build/html/ latest/ - -# Update the files push to gh-pages. -git add . -git status - -# If there are no changes, just exit cleanly. -if [[ -z "$(git status --porcelain)" ]]; then - echo "Nothing to commit. Exiting without pushing changes." - exit -fi - -# Commit to gh-pages branch to apply changes. -git config --global user.email "dpebot@google.com" -git config --global user.name "dpebot" -git commit -m "Update docs after merge to master." - -# NOTE: This may fail if two docs updates (on merges to master) -# happen in close proximity. -git push -q origin HEAD:gh-pages diff --git a/packages/google-cloud-bigtable/test_utils/setup.py b/packages/google-cloud-bigtable/test_utils/setup.py deleted file mode 100644 index 8e9222a7f862..000000000000 --- a/packages/google-cloud-bigtable/test_utils/setup.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -from setuptools import find_packages -from setuptools import setup - - -PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__)) - - -# NOTE: This is duplicated throughout and we should try to -# consolidate. -SETUP_BASE = { - 'author': 'Google Cloud Platform', - 'author_email': 'googleapis-publisher@google.com', - 'scripts': [], - 'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python', - 'license': 'Apache 2.0', - 'platforms': 'Posix; MacOS X; Windows', - 'include_package_data': True, - 'zip_safe': False, - 'classifiers': [ - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Topic :: Internet', - ], -} - - -REQUIREMENTS = [ - 'google-auth >= 0.4.0', - 'six', -] - -setup( - name='google-cloud-testutils', - version='0.24.0', - description='System test utilities for google-cloud-python', - packages=find_packages(), - install_requires=REQUIREMENTS, - python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', - **SETUP_BASE -) diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/__init__.py b/packages/google-cloud-bigtable/test_utils/test_utils/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/imports.py b/packages/google-cloud-bigtable/test_utils/test_utils/imports.py deleted file mode 100644 index 5991af7fc465..000000000000 --- a/packages/google-cloud-bigtable/test_utils/test_utils/imports.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import six - - -def maybe_fail_import(predicate): - """Create and return a patcher that conditionally makes an import fail. - - Args: - predicate (Callable[[...], bool]): A callable that, if it returns `True`, - triggers an `ImportError`. It must accept the same arguments as the - built-in `__import__` function. - https://docs.python.org/3/library/functions.html#__import__ - - Returns: - A mock patcher object that can be used to enable patched import behavior. - """ - orig_import = six.moves.builtins.__import__ - - def custom_import(name, globals=None, locals=None, fromlist=(), level=0): - if predicate(name, globals, locals, fromlist, level): - raise ImportError - return orig_import(name, globals, locals, fromlist, level) - - return mock.patch.object(six.moves.builtins, "__import__", new=custom_import) diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/retry.py b/packages/google-cloud-bigtable/test_utils/test_utils/retry.py deleted file mode 100644 index e61c001a03e1..000000000000 --- a/packages/google-cloud-bigtable/test_utils/test_utils/retry.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time -from functools import wraps - -import six - -MAX_TRIES = 4 -DELAY = 1 -BACKOFF = 2 - - -def _retry_all(_): - """Retry all caught exceptions.""" - return True - - -class BackoffFailed(Exception): - """Retry w/ backoffs did not complete successfully.""" - - -class RetryBase(object): - """Base for retrying calling a decorated function w/ exponential backoff. - - :type max_tries: int - :param max_tries: Number of times to try (not retry) before giving up. - - :type delay: int - :param delay: Initial delay between retries in seconds. - - :type backoff: int - :param backoff: Backoff multiplier e.g. value of 2 will double the - delay each retry. - - :type logger: logging.Logger instance - :param logger: Logger to use. If None, print. - """ - def __init__(self, max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): - self.max_tries = max_tries - self.delay = delay - self.backoff = backoff - self.logger = logger.warning if logger else six.print_ - - -class RetryErrors(RetryBase): - """Decorator for retrying given exceptions in testing. - - :type exception: Exception or tuple of Exceptions - :param exception: The exception to check or may be a tuple of - exceptions to check. - - :type error_predicate: function, takes caught exception, returns bool - :param error_predicate: Predicate evaluating whether to retry after a - caught exception. - - :type max_tries: int - :param max_tries: Number of times to try (not retry) before giving up. - - :type delay: int - :param delay: Initial delay between retries in seconds. - - :type backoff: int - :param backoff: Backoff multiplier e.g. value of 2 will double the - delay each retry. - - :type logger: logging.Logger instance - :param logger: Logger to use. If None, print. - """ - def __init__(self, exception, error_predicate=_retry_all, - max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): - super(RetryErrors, self).__init__(max_tries, delay, backoff, logger) - self.exception = exception - self.error_predicate = error_predicate - - def __call__(self, to_wrap): - @wraps(to_wrap) - def wrapped_function(*args, **kwargs): - tries = 0 - while tries < self.max_tries: - try: - return to_wrap(*args, **kwargs) - except self.exception as caught_exception: - - if not self.error_predicate(caught_exception): - raise - - delay = self.delay * self.backoff**tries - msg = ("%s, Trying again in %d seconds..." % - (caught_exception, delay)) - self.logger(msg) - - time.sleep(delay) - tries += 1 - return to_wrap(*args, **kwargs) - - return wrapped_function - - -class RetryResult(RetryBase): - """Decorator for retrying based on non-error result. - - :type result_predicate: function, takes result, returns bool - :param result_predicate: Predicate evaluating whether to retry after a - result is returned. - - :type max_tries: int - :param max_tries: Number of times to try (not retry) before giving up. - - :type delay: int - :param delay: Initial delay between retries in seconds. - - :type backoff: int - :param backoff: Backoff multiplier e.g. value of 2 will double the - delay each retry. - - :type logger: logging.Logger instance - :param logger: Logger to use. If None, print. - """ - def __init__(self, result_predicate, - max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): - super(RetryResult, self).__init__(max_tries, delay, backoff, logger) - self.result_predicate = result_predicate - - def __call__(self, to_wrap): - @wraps(to_wrap) - def wrapped_function(*args, **kwargs): - tries = 0 - while tries < self.max_tries: - result = to_wrap(*args, **kwargs) - if self.result_predicate(result): - return result - - delay = self.delay * self.backoff**tries - msg = "%s. Trying again in %d seconds..." % ( - self.result_predicate.__name__, delay,) - self.logger(msg) - - time.sleep(delay) - tries += 1 - raise BackoffFailed() - - return wrapped_function - - -class RetryInstanceState(RetryBase): - """Decorator for retrying based on instance state. - - :type instance_predicate: function, takes instance, returns bool - :param instance_predicate: Predicate evaluating whether to retry after an - API-invoking method is called. - - :type max_tries: int - :param max_tries: Number of times to try (not retry) before giving up. - - :type delay: int - :param delay: Initial delay between retries in seconds. - - :type backoff: int - :param backoff: Backoff multiplier e.g. value of 2 will double the - delay each retry. - - :type logger: logging.Logger instance - :param logger: Logger to use. If None, print. - """ - def __init__(self, instance_predicate, - max_tries=MAX_TRIES, delay=DELAY, backoff=BACKOFF, - logger=None): - super(RetryInstanceState, self).__init__( - max_tries, delay, backoff, logger) - self.instance_predicate = instance_predicate - - def __call__(self, to_wrap): - instance = to_wrap.__self__ # only instance methods allowed - - @wraps(to_wrap) - def wrapped_function(*args, **kwargs): - tries = 0 - while tries < self.max_tries: - result = to_wrap(*args, **kwargs) - if self.instance_predicate(instance): - return result - - delay = self.delay * self.backoff**tries - msg = "%s. Trying again in %d seconds..." % ( - self.instance_predicate.__name__, delay,) - self.logger(msg) - - time.sleep(delay) - tries += 1 - raise BackoffFailed() - - return wrapped_function diff --git a/packages/google-cloud-bigtable/test_utils/test_utils/system.py b/packages/google-cloud-bigtable/test_utils/test_utils/system.py deleted file mode 100644 index 590dc62a06e6..000000000000 --- a/packages/google-cloud-bigtable/test_utils/test_utils/system.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2014 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function -import os -import sys -import time - -import google.auth.credentials -from google.auth.environment_vars import CREDENTIALS as TEST_CREDENTIALS - - -# From shell environ. May be None. -CREDENTIALS = os.getenv(TEST_CREDENTIALS) - -ENVIRON_ERROR_MSG = """\ -To run the system tests, you need to set some environment variables. -Please check the CONTRIBUTING guide for instructions. -""" - - -class EmulatorCreds(google.auth.credentials.Credentials): - """A mock credential object. - - Used to avoid unnecessary token refreshing or reliance on the network - while an emulator is running. - """ - - def __init__(self): # pylint: disable=super-init-not-called - self.token = b'seekrit' - self.expiry = None - - @property - def valid(self): - """Would-be validity check of the credentials. - - Always is :data:`True`. - """ - return True - - def refresh(self, unused_request): # pylint: disable=unused-argument - """Off-limits implementation for abstract method.""" - raise RuntimeError('Should never be refreshed.') - - -def check_environ(): - err_msg = None - if CREDENTIALS is None: - err_msg = '\nMissing variables: ' + TEST_CREDENTIALS - elif not os.path.isfile(CREDENTIALS): - err_msg = '\nThe %s path %r is not a file.' % (TEST_CREDENTIALS, - CREDENTIALS) - - if err_msg is not None: - msg = ENVIRON_ERROR_MSG + err_msg - print(msg, file=sys.stderr) - sys.exit(1) - - -def unique_resource_id(delimiter='_'): - """A unique identifier for a resource. - - Intended to help locate resources created in particular - testing environments and at particular times. - """ - build_id = os.getenv('CIRCLE_BUILD_NUM', '') - if build_id == '': - return '%s%d' % (delimiter, 1000 * time.time()) - else: - return '%s%s%s%d' % (delimiter, build_id, delimiter, time.time()) From 9c0e509b85e777811edd92d0f2bb8beb33fa7ca9 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 9 Jul 2021 14:39:23 -0400 Subject: [PATCH 477/892] doc(samples): use operation to ensure creation completed (#356) In addition to showing the better practice (using the operation returned from 'Instance.create' / 'Cluster.create'), this change also hardens the sample against eventual-consistency issues. Closes #353. --- .../samples/instanceadmin/instanceadmin.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py index 13e992eec13d..a900740ed5f7 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -72,7 +72,9 @@ def run_instance_operations(project_id, instance_id, cluster_id): if not instance.exists(): print("\nCreating an instance") # Create instance with given options - instance.create(clusters=[cluster]) + operation = instance.create(clusters=[cluster]) + # Ensure the operation completes. + operation.result(timeout=30) print("\nCreated instance: {}".format(instance_id)) # [END bigtable_create_prod_instance] @@ -155,7 +157,9 @@ def add_cluster(project_id, instance_id, cluster_id): if cluster.exists(): print("\nCluster not created, as {} already exists.".format(cluster_id)) else: - cluster.create() + operation = cluster.create() + # Ensure the operation completes. + operation.result(timeout=30) print("\nCluster created: {}".format(cluster_id)) # [END bigtable_create_cluster] From fa9a4dc969aa91279ee3cb0bcffe7e6e6dcde590 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 12 Jul 2021 14:59:29 -0400 Subject: [PATCH 478/892] chore: use templated 'noxfile.py' (#355) Extracted from PR #325. Closes #324. --- packages/google-cloud-bigtable/noxfile.py | 21 +++---- packages/google-cloud-bigtable/owlbot.py | 63 ++++++++++++++++++- .../google-cloud-bigtable/tests/system.py | 28 ++++----- 3 files changed, 84 insertions(+), 28 deletions(-) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index b0e278df105e..0687bafb6be0 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -45,6 +45,9 @@ "docs", ] +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): @@ -62,10 +65,7 @@ def lint(session): @nox.session(python=DEFAULT_PYTHON_VERSION) def blacken(session): - """Run black. - - Format code to uniform standard. - """ + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) session.run( "black", *BLACK_PATHS, @@ -81,6 +81,7 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) @@ -143,19 +144,15 @@ def system(session): constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) - system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": session.skip("RUN_SYSTEM_TESTS is set to false, skipping") - # Sanity check: Only run tests if the environment variable is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable") # Install pyopenssl for mTLS testing. if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": - session.install("pyopenssl", "-c", constraints_path) + session.install("pyopenssl") system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) @@ -208,7 +205,7 @@ def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark") + session.install("sphinx==4.0.1", "alabaster", "recommonmark") shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( @@ -230,7 +227,9 @@ def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") - session.install("sphinx", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml") + session.install( + "sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml" + ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 7d3832962613..4244df3d6926 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -86,10 +86,67 @@ def get_staging_dirs( templated_files = common.py_library( samples=True, # set to True only if there are samples microgenerator=True, - cov_level=99 + cov_level=100, ) -s.move(templated_files, excludes=[".coveragerc", "noxfile.py"]) +s.move(templated_files, excludes=[".coveragerc"]) + +# ---------------------------------------------------------------------------- +# Customize noxfile.py +# ---------------------------------------------------------------------------- + +def place_before(path, text, *before_text, escape=None): + replacement = "\n".join(before_text) + "\n" + text + if escape: + for c in escape: + text = text.replace(c, '\\' + c) + s.replace([path], text, replacement) + +system_emulated_session = """ +@nox.session(python="3.8") +def system_emulated(session): + import subprocess + import signal + + try: + subprocess.call(["gcloud", "--version"]) + except OSError: + session.skip("gcloud not found but required for emulator support") + + # Currently, CI/CD doesn't have beta component of gcloud. + subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) + + hostport = "localhost:8789" + p = subprocess.Popen( + ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] + ) + + session.env["BIGTABLE_EMULATOR_HOST"] = hostport + system(session) + + # Stop Emulator + os.killpg(os.getpgid(p.pid), signal.SIGTERM) + +""" + +place_before( + "noxfile.py", + "@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n" + "def system(session):", + system_emulated_session, + escape="()" +) + +# add system_emulated nox session +s.replace("noxfile.py", + """nox.options.sessions = \[ + "unit", + "system",""", + """nox.options.sessions = [ + "unit", + "system_emulated", + "system",""", +) # ---------------------------------------------------------------------------- # Samples templates @@ -97,7 +154,7 @@ def get_staging_dirs( sample_files = common.py_samples(samples=True) for path in sample_files: - s.move(path, excludes=['noxfile.py']) + s.move(path) s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 9896154239ce..259e560da976 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -144,9 +144,9 @@ def setUpModule(): # After listing, create the test instances. admin_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) - admin_op.result(timeout=10) + admin_op.result(timeout=30) data_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) - data_op.result(timeout=10) + data_op.result(timeout=30) def tearDownModule(): @@ -203,7 +203,7 @@ def test_create_instance_defaults(self): self.instances_to_delete.append(instance) # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) @@ -232,7 +232,7 @@ def test_create_instance(self): self.instances_to_delete.append(instance) # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Create a new instance instance and make sure it is the same. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) @@ -675,7 +675,7 @@ def test_update_display_name_and_labels(self): operation = Config.INSTANCE.update() # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Create a new instance instance and reload it. instance_alt = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) @@ -692,7 +692,7 @@ def test_update_display_name_and_labels(self): operation = Config.INSTANCE.update() # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) def test_update_type(self): from google.cloud.bigtable.enums import Instance @@ -709,7 +709,7 @@ def test_update_type(self): self.instances_to_delete.append(instance) # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Unset the display_name instance.display_name = None @@ -718,7 +718,7 @@ def test_update_type(self): operation = instance.update() # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Create a new instance instance and reload it. instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) @@ -734,7 +734,7 @@ def test_update_cluster(self): operation = Config.CLUSTER.update() # We want to make sure the operation completes. - operation.result(timeout=10) + operation.result(timeout=30) # Create a new cluster instance and reload it. alt_cluster = Config.INSTANCE.cluster(CLUSTER_ID) @@ -745,7 +745,7 @@ def test_update_cluster(self): # other test cases. Config.CLUSTER.serve_nodes = SERVE_NODES operation = Config.CLUSTER.update() - operation.result(timeout=20) + operation.result(timeout=30) def test_create_cluster(self): from google.cloud.bigtable.enums import StorageType @@ -1084,7 +1084,7 @@ def test_backup(self): self.assertFalse(temp_backup.exists()) # Testing `Backup.create()` method - temp_backup.create().result() + temp_backup.create().result(timeout=30) # Implicit testing of `Backup.delete()` method self.backups_to_delete.append(temp_backup) @@ -1120,7 +1120,7 @@ def test_backup(self): restored_table = Config.INSTANCE_DATA.table(restored_table_id) temp_table.restore( restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id, - ).result() + ).result(timeout=30) tables = Config.INSTANCE_DATA.list_tables() self.assertIn(restored_table, tables) restored_table.delete() @@ -1134,10 +1134,10 @@ def test_backup(self): cluster_id=alt_cluster_id, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, ) if not Config.IN_EMULATOR: - alt_instance.create(clusters=[alt_cluster]).result(timeout=10) + alt_instance.create(clusters=[alt_cluster]).result(timeout=30) # Testing `restore()`... - temp_backup.restore(restored_table_id, alt_instance_id).result() + temp_backup.restore(restored_table_id, alt_instance_id).result(timeout=30) restored_table = alt_instance.table(restored_table_id) self.assertIn(restored_table, alt_instance.list_tables()) restored_table.delete() From eb96149717e44143a3182ab4530b7f80db053858 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 12 Jul 2021 21:31:02 +0200 Subject: [PATCH 479/892] chore(deps): update dependency backoff to v1.11.0 (#361) --- .../samples/snippets/writes/requirements-test.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 766a8035d690..b179d9b00cec 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ -backoff==1.10.0 +backoff==1.11.0 pytest==6.2.4 From eb2ef42b918a76e4d9fe136475e4c9b03c1b4bac Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 13 Jul 2021 12:19:45 -0400 Subject: [PATCH 480/892] tests: suppress PDR warnings in table unit tests (#358) Closes #357. --- .../google/cloud/bigtable/table.py | 5 ++- .../tests/unit/test_table.py | 38 ++++++++++++++----- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index bff4b7a2aba4..8dc4f5e4281f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -13,6 +13,9 @@ # limitations under the License. """User-friendly container for Google Cloud Bigtable Table.""" + +import warnings + from google.api_core import timeout from google.api_core.exceptions import Aborted from google.api_core.exceptions import DeadlineExceeded @@ -45,8 +48,6 @@ bigtable_table_admin as table_admin_messages_v2_pb2, ) -import warnings - # Maximum number of mutations in bulk (MutateRowsRequest message): # (https://cloud.google.com/bigtable/docs/reference/data/rpc/ # google.bigtable.v2#google.bigtable.v2.MutateRowRequest) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 109e37dcfcb2..d4ec0e7292af 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -14,8 +14,10 @@ import unittest +import warnings import mock + from ._testing import _make_credentials from google.api_core.exceptions import DeadlineExceeded @@ -224,33 +226,56 @@ def test_row_factory_direct(self): from google.cloud.bigtable.row import DirectRow table, row_key = self._row_methods_helper() - row = table.row(row_key) + with warnings.catch_warnings(record=True) as warned: + row = table.row(row_key) self.assertIsInstance(row, DirectRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) + self.assertEqual(len(warned), 1) + self.assertIs(warned[0].category, PendingDeprecationWarning) + def test_row_factory_conditional(self): from google.cloud.bigtable.row import ConditionalRow table, row_key = self._row_methods_helper() filter_ = object() - row = table.row(row_key, filter_=filter_) + + with warnings.catch_warnings(record=True) as warned: + row = table.row(row_key, filter_=filter_) self.assertIsInstance(row, ConditionalRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) + self.assertEqual(len(warned), 1) + self.assertIs(warned[0].category, PendingDeprecationWarning) + def test_row_factory_append(self): from google.cloud.bigtable.row import AppendRow table, row_key = self._row_methods_helper() - row = table.row(row_key, append=True) + + with warnings.catch_warnings(record=True) as warned: + row = table.row(row_key, append=True) self.assertIsInstance(row, AppendRow) self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) + self.assertEqual(len(warned), 1) + self.assertIs(warned[0].category, PendingDeprecationWarning) + + def test_row_factory_failure(self): + table, row_key = self._row_methods_helper() + with self.assertRaises(ValueError): + with warnings.catch_warnings(record=True) as warned: + table.row(row_key, filter_=object(), append=True) + + self.assertEqual(len(warned), 1) + self.assertIs(warned[0].category, PendingDeprecationWarning) + def test_direct_row(self): from google.cloud.bigtable.row import DirectRow @@ -282,11 +307,6 @@ def test_append_row(self): self.assertEqual(row._row_key, row_key) self.assertEqual(row._table, table) - def test_row_factory_failure(self): - table, row_key = self._row_methods_helper() - with self.assertRaises(ValueError): - table.row(row_key, filter_=object(), append=True) - def test___eq__(self): credentials = _make_credentials() client = self._make_client( @@ -965,7 +985,6 @@ def test_yield_retry_rows(self): from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( client as bigtable_table_admin, ) - import warnings data_api = mock.create_autospec(BigtableClient) table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) @@ -1035,7 +1054,6 @@ def test_yield_rows_with_row_set(self): ) from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange - import warnings data_api = mock.create_autospec(BigtableClient) table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) From 9c48fc61a5dc8280aee175282b9b13607849e621 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Tue, 13 Jul 2021 14:30:19 -0400 Subject: [PATCH 481/892] fix: use public 'table_admin_client' property in backups methods (#359) * fix: call table_admin_client in backups methods * fix other tests * add system test --- .../google/cloud/bigtable/backup.py | 10 +++--- .../google-cloud-bigtable/tests/system.py | 3 ++ .../tests/unit/test_backup.py | 34 +++++++++---------- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 564c97ad7a57..0991e85f54d5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -330,7 +330,7 @@ def create(self, cluster_id=None): expire_time=_datetime_to_pb_timestamp(self.expire_time), ) - api = self._instance._client._table_admin_client + api = self._instance._client.table_admin_client return api.create_backup( request={ "parent": self.parent, @@ -351,7 +351,7 @@ def get(self): due to a retryable error and retry attempts failed. :raises ValueError: If the parameters are invalid. """ - api = self._instance._client._table_admin_client + api = self._instance._client.table_admin_client try: return api.get_backup(request={"name": self.name}) except NotFound: @@ -385,13 +385,13 @@ def update_expire_time(self, new_expire_time): name=self.name, expire_time=_datetime_to_pb_timestamp(new_expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) - api = self._instance._client._table_admin_client + api = self._instance._client.table_admin_client api.update_backup(request={"backup": backup_update, "update_mask": update_mask}) self._expire_time = new_expire_time def delete(self): """Delete this Backup.""" - self._instance._client._table_admin_client.delete_backup( + self._instance._client.table_admin_client.delete_backup( request={"name": self.name} ) @@ -423,7 +423,7 @@ def restore(self, table_id, instance_id=None): due to a retryable error and retry attempts failed. :raises: ValueError: If the parameters are invalid. """ - api = self._instance._client._table_admin_client + api = self._instance._client.table_admin_client if instance_id: parent = BigtableTableAdminClient.instance_path( project=self._instance._client.project, instance=instance_id, diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py index 259e560da976..aa3c1cac69bc 100644 --- a/packages/google-cloud-bigtable/tests/system.py +++ b/packages/google-cloud-bigtable/tests/system.py @@ -1080,6 +1080,9 @@ def test_backup(self): expire_time=datetime.datetime.utcfromtimestamp(expire), ) + # Reinitialize the admin client. This is to test `_table_admin_client` returns a client object (and not NoneType) + temp_backup._instance._client = Client(admin=True) + # Sanity check for `Backup.exists()` method self.assertFalse(temp_backup.exists()) diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index bd3e7610085f..a32e18adb0bd 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -332,7 +332,7 @@ def test_create_grpc_error(self): from google.cloud.bigtable_admin_v2.types import table client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = Unknown("testing") timestamp = self._make_timestamp() @@ -365,7 +365,7 @@ def test_create_already_exists(self): from google.cloud.exceptions import Conflict client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = Conflict("testing") timestamp = self._make_timestamp() @@ -398,7 +398,7 @@ def test_create_instance_not_found(self): from google.cloud.exceptions import NotFound client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.create_backup.side_effect = NotFound("testing") timestamp = self._make_timestamp() @@ -494,7 +494,7 @@ def test_exists_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.get_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) @@ -510,7 +510,7 @@ def test_exists_not_found(self): from google.api_core.exceptions import NotFound client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.get_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) @@ -537,7 +537,7 @@ def test_get(self): size_bytes=0, state=state, ) - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -562,7 +562,7 @@ def test_reload(self): size_bytes=0, state=state, ) - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -581,7 +581,7 @@ def test_exists_success(self): client = _Client() backup_pb = table.Backup(name=self.BACKUP_NAME) - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.get_backup.return_value = backup_pb instance = _Instance(self.INSTANCE_NAME, client=client) @@ -595,7 +595,7 @@ def test_delete_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.delete_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -609,7 +609,7 @@ def test_delete_not_found(self): from google.api_core.exceptions import NotFound client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.delete_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -623,7 +623,7 @@ def test_delete_success(self): from google.protobuf.empty_pb2 import Empty client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.delete_backup.return_value = Empty() instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -639,7 +639,7 @@ def test_update_expire_time_grpc_error(self): from google.protobuf import field_mask_pb2 client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.update_backup.side_effect = Unknown("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -663,7 +663,7 @@ def test_update_expire_time_not_found(self): from google.protobuf import field_mask_pb2 client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.update_backup.side_effect = NotFound("testing") instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -686,7 +686,7 @@ def test_update_expire_time_success(self): from google.protobuf import field_mask_pb2 client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.update_backup.return_type = table.Backup(name=self.BACKUP_NAME) instance = _Instance(self.INSTANCE_NAME, client=client) backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) @@ -707,7 +707,7 @@ def test_restore_grpc_error(self): from google.api_core.exceptions import Unknown client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.restore_table.side_effect = Unknown("testing") timestamp = self._make_timestamp() @@ -732,7 +732,7 @@ def test_restore_grpc_error(self): def test_restore_cluster_not_set(self): client = _Client() - client._table_admin_client = self._make_table_admin_client() + client.table_admin_client = self._make_table_admin_client() backup = self._make_one( self.BACKUP_ID, _Instance(self.INSTANCE_NAME, client=client), @@ -746,7 +746,7 @@ def test_restore_cluster_not_set(self): def _restore_helper(self, instance_id=None, instance_name=None): op_future = object() client = _Client() - api = client._table_admin_client = self._make_table_admin_client() + api = client.table_admin_client = self._make_table_admin_client() api.restore_table.return_value = op_future timestamp = self._make_timestamp() From cc2eb384ba642a26b0756fd8ed9861e4434a9eea Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 13 Jul 2021 15:57:01 -0400 Subject: [PATCH 482/892] test(samples): add backoff to cluster creation sample (#362) Closes #353. --- .../samples/instanceadmin/requirements.txt | 1 + .../samples/instanceadmin/test_instanceadmin.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 81a589a745c5..b5ec991770c5 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1 +1,2 @@ google-cloud-bigtable==2.0.0 +backoff==1.11.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py index c63222953cf3..929da10e44c4 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py @@ -17,8 +17,9 @@ import time import warnings +import backoff +from google.api_core import exceptions from google.cloud import bigtable - import pytest import instanceadmin @@ -128,7 +129,10 @@ def test_add_and_delete_cluster(capsys, dispose_of): capsys.readouterr() # throw away output # Add a cluster to that instance - instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2) + # Avoid failing for "instance is currently being changed" by + # applying an exponential backoff + w_backoff = backoff.on_exception(backoff.expo, exceptions.ServiceUnavailable) + w_backoff(instanceadmin.add_cluster)(PROJECT, INSTANCE, CLUSTER2) out = capsys.readouterr().out assert f"Adding cluster to instance {INSTANCE}" in out assert "Listing clusters..." in out From 9e815c9661828ad997e82fbaee1e695f53ba7a33 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 14 Jul 2021 10:30:36 -0400 Subject: [PATCH 483/892] chore: release 2.3.1 (#371) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index cd8b97fa6f75..d18c5a534948 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [2.3.1](https://www.github.com/googleapis/python-bigtable/compare/v2.3.0...v2.3.1) (2021-07-13) + + +### Bug Fixes + +* use public 'table_admin_client' property in backups methods ([#359](https://www.github.com/googleapis/python-bigtable/issues/359)) ([bc57c79](https://www.github.com/googleapis/python-bigtable/commit/bc57c79640b270ff89fd10ec243dd04559168c5c)) + ## [2.3.0](https://www.github.com/googleapis/python-bigtable/compare/v2.2.0...v2.3.0) (2021-07-01) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index a570f0b067a0..20831e7fb0a9 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.3.0" +version = "2.3.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 8dc60a83d90b8af22ae8d2d8263a96bb4575ee84 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 14 Jul 2021 15:24:41 +0000 Subject: [PATCH 484/892] build(python): exit with success status if no samples found (#372) Source-Link: https://github.com/googleapis/synthtool/commit/53ea3896a52f87c758e79b5a19fa338c83925a98 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:e1793a23ae0ee9aafb2e3a53b564a351f74790dbe3c2d75f8fc3b8c43e5c036c --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index e2b39f946040..a5d3697f2167 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:99d90d097e4a4710cc8658ee0b5b963f4426d0e424819787c3ac1405c9a26719 + digest: sha256:e1793a23ae0ee9aafb2e3a53b564a351f74790dbe3c2d75f8fc3b8c43e5c036c diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh index cf5de74c17a5..311a8d54b9f1 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh @@ -20,9 +20,9 @@ set -eo pipefail # Enables `**` to include files nested inside sub-folders shopt -s globstar -# Exit early if samples directory doesn't exist -if [ ! -d "./samples" ]; then - echo "No tests run. `./samples` not found" +# Exit early if samples don't exist +if ! find samples -name 'requirements.txt' | grep -q .; then + echo "No tests run. './samples/**/requirements.txt' not found" exit 0 fi From fe0f79a6cbc63830f795714caa42c81b4e0be261 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 14 Jul 2021 16:34:22 +0000 Subject: [PATCH 485/892] build(python): remove python 3.7 from kokoro Dockerfile (#376) Source-Link: https://github.com/googleapis/synthtool/commit/e44dc0c742b1230887a73552357e0c18dcc30b92 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:5ff7446edeaede81c3ed58b23a4e76a5403fba1350ce28478045657303b6479d --- .../.github/.OwlBot.lock.yaml | 2 +- .../.kokoro/docker/docs/Dockerfile | 35 ++----------------- 2 files changed, 3 insertions(+), 34 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index a5d3697f2167..cb06536dab0b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:e1793a23ae0ee9aafb2e3a53b564a351f74790dbe3c2d75f8fc3b8c43e5c036c + digest: sha256:5ff7446edeaede81c3ed58b23a4e76a5403fba1350ce28478045657303b6479d diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index 412b0b56a921..4e1b1fb8b5a5 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -40,6 +40,7 @@ RUN apt-get update \ libssl-dev \ libsqlite3-dev \ portaudio19-dev \ + python3-distutils \ redis-server \ software-properties-common \ ssh \ @@ -59,40 +60,8 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb - -COPY fetch_gpg_keys.sh /tmp -# Install the desired versions of Python. -RUN set -ex \ - && export GNUPGHOME="$(mktemp -d)" \ - && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \ - && /tmp/fetch_gpg_keys.sh \ - && for PYTHON_VERSION in 3.7.8 3.8.5; do \ - wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \ - && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \ - && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \ - && rm -r python-${PYTHON_VERSION}.tar.xz.asc \ - && mkdir -p /usr/src/python-${PYTHON_VERSION} \ - && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \ - && rm python-${PYTHON_VERSION}.tar.xz \ - && cd /usr/src/python-${PYTHON_VERSION} \ - && ./configure \ - --enable-shared \ - # This works only on Python 2.7 and throws a warning on every other - # version, but seems otherwise harmless. - --enable-unicode=ucs4 \ - --with-system-ffi \ - --without-ensurepip \ - && make -j$(nproc) \ - && make install \ - && ldconfig \ - ; done \ - && rm -rf "${GNUPGHOME}" \ - && rm -rf /usr/src/python* \ - && rm -rf ~/.cache/ - RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3.7 /tmp/get-pip.py \ && python3.8 /tmp/get-pip.py \ && rm /tmp/get-pip.py -CMD ["python3.7"] +CMD ["python3.8"] From 4da8aacf71f857c9b6464445ce5b98202bb0180f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 14 Jul 2021 18:42:22 +0200 Subject: [PATCH 486/892] chore(deps): update dependency apache-beam to v2.31.0 (#354) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 42da7391c189..b7fce4d899ef 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.30.0 +apache-beam==2.31.0 google-cloud-bigtable<2.0.0 google-cloud-core==1.7.1 \ No newline at end of file From d079eeacc6f0ad354352e16d6c49c08f49909938 Mon Sep 17 00:00:00 2001 From: kolea2 <45548808+kolea2@users.noreply.github.com> Date: Thu, 15 Jul 2021 12:09:35 -0400 Subject: [PATCH 487/892] deps(samples): update google-cloud-bigtable to 2.3.1 (#375) Closes #352. --- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 8161b7c19d3d..e642d3960fa6 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.0.0 +google-cloud-bigtable==2.3.1 google-cloud-core==1.7.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index b5ec991770c5..b50c7d683764 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.0.0 +google-cloud-bigtable==2.3.1 backoff==1.11.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 72c1c3d8664f..d7473b432b46 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.0.0 +google-cloud-bigtable==2.3.1 google-cloud-monitoring==2.4.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 81a589a745c5..5197d54ba05a 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.0.0 +google-cloud-bigtable==2.3.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 17c553b39745..83fd1d5e2306 100755 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.0.0 +google-cloud-bigtable==2.3.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 17c553b39745..83fd1d5e2306 100755 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.0.0 +google-cloud-bigtable==2.3.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index d7b9b5c81616..f9a2edd68809 100755 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.0.0 \ No newline at end of file +google-cloud-bigtable==2.3.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 81a589a745c5..5197d54ba05a 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.0.0 +google-cloud-bigtable==2.3.1 From 2bb23912ca7f323c2be0db77bfa9bea916fe3d4c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 15 Jul 2021 20:42:28 +0200 Subject: [PATCH 488/892] chore(deps): update dependency backoff to v1.11.1 (#373) --- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index b50c7d683764..807a82ce39d1 100755 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.3.1 -backoff==1.11.0 +backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index b179d9b00cec..0db5cc446cf1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ -backoff==1.11.0 +backoff==1.11.1 pytest==6.2.4 From 70b3b9b5f7bc6ab8ae02a6c79b867b3c16bb43ae Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 16 Jul 2021 10:37:26 -0400 Subject: [PATCH 489/892] chore: pin 'google-{api,cloud}-core' to allow 2.x versions (#377) --- packages/google-cloud-bigtable/setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 20831e7fb0a9..7f91d5d2c87d 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,8 +29,8 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.26.0, < 2.0.0dev", - "google-cloud-core >= 1.4.1, < 2.0dev", + "google-api-core[grpc] >= 1.26.0, < 3.0.0dev", + "google-cloud-core >= 1.4.1, < 3.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", "proto-plus >= 1.13.0", "libcst >= 0.2.5", From c4b71cee549c6fcd0724c0b134c704dbcba0dd9e Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 19 Jul 2021 14:44:24 -0400 Subject: [PATCH 490/892] chore: 'requirements.txt' is not a script (#374) --- .../google-cloud-bigtable/samples/instanceadmin/requirements.txt | 0 .../samples/snippets/filters/requirements.txt | 0 .../google-cloud-bigtable/samples/snippets/reads/requirements.txt | 0 .../samples/snippets/writes/requirements.txt | 0 .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 0 5 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt mode change 100755 => 100644 packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt mode change 100755 => 100644 packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt mode change 100755 => 100644 packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt mode change 100755 => 100644 packages/google-cloud-bigtable/samples/tableadmin/requirements.txt diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt old mode 100755 new mode 100644 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt old mode 100755 new mode 100644 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt old mode 100755 new mode 100644 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt old mode 100755 new mode 100644 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt old mode 100755 new mode 100644 From c9586262f0b37a04268606e578abe1982afecb86 Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Tue, 20 Jul 2021 03:44:08 -0600 Subject: [PATCH 491/892] fix(deps): pin 'google-{api,cloud}-core', 'google-auth' to allow 2.x versions (#379) Expand pins on library dependencies in preparation for these dependencies taking a new major version. See https://github.com/googleapis/google-cloud-python/issues/10566. --- packages/google-cloud-bigtable/setup.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 7f91d5d2c87d..56ade5d39b01 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,8 +29,14 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.26.0, < 3.0.0dev", - "google-cloud-core >= 1.4.1, < 3.0dev", + # NOTE: Maintainers, please do not require google-api-core>=2.x.x + # Until this issue is closed + # https://github.com/googleapis/google-cloud-python/issues/10566 + "google-api-core[grpc] >= 1.26.0, <3.0.0dev", + # NOTE: Maintainers, please do not require google-api-core>=2.x.x + # Until this issue is closed + # https://github.com/googleapis/google-cloud-python/issues/10566 + "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", "proto-plus >= 1.13.0", "libcst >= 0.2.5", From 0e3d769c58b68299f14ab32c4af7d0f8bf7c7bdc Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Tue, 20 Jul 2021 11:07:41 -0700 Subject: [PATCH 492/892] fix: directly append to pb for beter read row performance (#382) --- packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 0269d8761c2f..32a9bd1e30de 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -137,7 +137,7 @@ def _update_message_request(self, message): :param message: The ``ReadRowsRequest`` protobuf """ for each in self.row_keys: - message.rows.row_keys.append(_to_bytes(each)) + message.rows.row_keys._pb.append(_to_bytes(each)) for each in self.row_ranges: r_kwrags = each.get_range_kwargs() From 8d51b2f246b02e2910373ac4078e841e709c3d28 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 20 Jul 2021 14:14:13 -0400 Subject: [PATCH 493/892] chore(samples): increase timeouts for instance/cluster creation (#384) Closes #383. --- .../samples/instanceadmin/instanceadmin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py index a900740ed5f7..13234f6c77f4 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -74,7 +74,7 @@ def run_instance_operations(project_id, instance_id, cluster_id): # Create instance with given options operation = instance.create(clusters=[cluster]) # Ensure the operation completes. - operation.result(timeout=30) + operation.result(timeout=60) print("\nCreated instance: {}".format(instance_id)) # [END bigtable_create_prod_instance] @@ -159,7 +159,7 @@ def add_cluster(project_id, instance_id, cluster_id): else: operation = cluster.create() # Ensure the operation completes. - operation.result(timeout=30) + operation.result(timeout=60) print("\nCluster created: {}".format(cluster_id)) # [END bigtable_create_cluster] From 85a56042a254d4d394dab460219144db6d2c065b Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 20 Jul 2021 16:53:32 -0400 Subject: [PATCH 494/892] chore: release 2.3.2 (#378) --- packages/google-cloud-bigtable/CHANGELOG.md | 8 ++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index d18c5a534948..9eea76e4239c 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,14 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [2.3.2](https://www.github.com/googleapis/python-bigtable/compare/v2.3.1...v2.3.2) (2021-07-20) + + +### Bug Fixes + +* **deps:** pin 'google-{api,cloud}-core', 'google-auth' to allow 2.x versions ([#379](https://www.github.com/googleapis/python-bigtable/issues/379)) ([95b2e13](https://www.github.com/googleapis/python-bigtable/commit/95b2e13b776dca4a6998313c41aa960ffe2e47e9)) +* directly append to pb for beter read row performance ([#382](https://www.github.com/googleapis/python-bigtable/issues/382)) ([7040e11](https://www.github.com/googleapis/python-bigtable/commit/7040e113b93bb2e0625c054486305235d8f14c2a)) + ### [2.3.1](https://www.github.com/googleapis/python-bigtable/compare/v2.3.0...v2.3.1) (2021-07-13) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 56ade5d39b01..ae00d7cd40a3 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.3.1" +version = "2.3.2" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 923dd16d4e23bd310d2458d57138034450ff9e77 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 21 Jul 2021 17:10:09 -0400 Subject: [PATCH 495/892] tests: split systest into a seperate Kokoro build (#390) Closes #388. --- .../google-cloud-bigtable/.github/sync-repo-settings.yaml | 1 + .../google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg | 8 +++++++- .../.kokoro/presubmit/system-3.8.cfg | 7 +++++++ packages/google-cloud-bigtable/owlbot.py | 1 + 4 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/system-3.8.cfg diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml index 97245042dc30..84e25a9ced6d 100644 --- a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml +++ b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml @@ -29,6 +29,7 @@ branchProtectionRules: # List of required status check contexts that must pass for commits to be accepted to matching branches. requiredStatusCheckContexts: - 'Kokoro' + - 'Kokoro system-3.8' - 'cla/google' # List of explicit permissions to add (additive only) permissionRules: diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg index 8f43917d92fe..b158096f0ae2 100644 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg @@ -1 +1,7 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file +# Format: //devtools/kokoro/config/proto/build.proto + +# Disable system tests. +env_vars: { + key: "RUN_SYSTEM_TESTS" + value: "false" +} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.8.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.8.cfg new file mode 100644 index 000000000000..f4bcee3db0f0 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.8.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "system-3.8" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 4244df3d6926..71c1186c23f8 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -85,6 +85,7 @@ def get_staging_dirs( # ---------------------------------------------------------------------------- templated_files = common.py_library( samples=True, # set to True only if there are samples + split_system_tests=True, microgenerator=True, cov_level=100, ) From c3d78b96572c8b79411fd27fb60372cf001992df Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 22 Jul 2021 20:57:45 +0200 Subject: [PATCH 496/892] chore(deps): update dependency google-cloud-monitoring to v2.4.1 (#392) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index d7473b432b46..6ec0b65468d9 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.3.1 -google-cloud-monitoring==2.4.0 +google-cloud-monitoring==2.4.1 From 0f43f15f21c913aa1a86d726b0ae88e940e2979f Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 22 Jul 2021 18:09:23 -0400 Subject: [PATCH 497/892] chore: modernize README (#387) --- packages/google-cloud-bigtable/README.rst | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 5330d231688b..859012f5862c 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -51,11 +51,17 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 + +Python >= 3.6 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. + +- Python 2.7: the last released version which supported Python 2.7 was + version 1.7.0, released 2021-02-09. + +- Python 3.5: the last released version which supported Python 3.5 was + version 1.7.0, released 2021-02-09. Mac/Linux From b08009e2a1f91b9b2fa103e9d55880d610f37b61 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 23 Jul 2021 15:33:36 +0000 Subject: [PATCH 498/892] chore: fix kokoro config for samples (#396) Source-Link: https://github.com/googleapis/synthtool/commit/dd05f9d12f134871c9e45282349c9856fbebecdd Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:aea14a583128771ae8aefa364e1652f3c56070168ef31beb203534222d842b8b --- .../.github/.OwlBot.lock.yaml | 2 +- .../samples/python3.6/periodic-head.cfg | 2 +- .../samples/python3.7/periodic-head.cfg | 2 +- .../samples/python3.8/periodic-head.cfg | 2 +- .../samples/python3.9/periodic-head.cfg | 2 +- .../google-cloud-bigtable/CONTRIBUTING.rst | 24 +++++++++++++++++++ 6 files changed, 29 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index cb06536dab0b..9ee60f7e4850 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:5ff7446edeaede81c3ed58b23a4e76a5403fba1350ce28478045657303b6479d + digest: sha256:aea14a583128771ae8aefa364e1652f3c56070168ef31beb203534222d842b8b diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg index f9cfcd33e058..be25a34f9ad3 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg @@ -7,5 +7,5 @@ env_vars: { env_vars: { key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" } diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg index f9cfcd33e058..be25a34f9ad3 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg @@ -7,5 +7,5 @@ env_vars: { env_vars: { key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" } diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg index f9cfcd33e058..be25a34f9ad3 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg @@ -7,5 +7,5 @@ env_vars: { env_vars: { key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" } diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg index f9cfcd33e058..be25a34f9ad3 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg @@ -7,5 +7,5 @@ env_vars: { env_vars: { key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" } diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index c97388fa83f1..b8c70c94cc6a 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -177,6 +177,30 @@ Build the docs via: $ nox -s docs +************************* +Samples and code snippets +************************* + +Code samples and snippets live in the `samples/` catalogue. Feel free to +provide more examples, but make sure to write tests for those examples. +Each folder containing example code requires its own `noxfile.py` script +which automates testing. If you decide to create a new folder, you can +base it on the `samples/snippets` folder (providing `noxfile.py` and +the requirements files). + +The tests will run against a real Google Cloud Project, so you should +configure them just like the System Tests. + +- To run sample tests, you can execute:: + + # Run all tests in a folder + $ cd samples/snippets + $ nox -s py-3.8 + + # Run a single sample test + $ cd samples/snippets + $ nox -s py-3.8 -- -k + ******************************************** Note About ``README`` as it pertains to PyPI ******************************************** From b14a2cdeaa991fef9f84808d82d09dd7ebe54d66 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 24 Jul 2021 10:16:12 +0000 Subject: [PATCH 499/892] fix: enable self signed jwt for grpc (#397) PiperOrigin-RevId: 386504689 Source-Link: https://github.com/googleapis/googleapis/commit/762094a99ac6e03a17516b13dfbef37927267a70 Source-Link: https://github.com/googleapis/googleapis-gen/commit/6bfc480e1a161d5de121c2bcc3745885d33b265a --- .../bigtable_instance_admin/client.py | 4 +++ .../services/bigtable_table_admin/client.py | 4 +++ .../bigtable_v2/services/bigtable/client.py | 4 +++ .../test_bigtable_instance_admin.py | 31 +++++++++++-------- .../test_bigtable_table_admin.py | 31 +++++++++++-------- .../unit/gapic/bigtable_v2/test_bigtable.py | 29 ++++++++++------- 6 files changed, 66 insertions(+), 37 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index ea12552fc6b3..0e3d57d4bc7a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -408,6 +408,10 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, + always_use_jwt_access=( + Transport == type(self).get_transport_class("grpc") + or Transport == type(self).get_transport_class("grpc_asyncio") + ), ) def create_instance( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index afa920a55350..49f024e815f8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -446,6 +446,10 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, + always_use_jwt_access=( + Transport == type(self).get_transport_class("grpc") + or Transport == type(self).get_transport_class("grpc_asyncio") + ), ) def create_table( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 9448e2af7570..8efdceca2030 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -344,6 +344,10 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, + always_use_jwt_access=( + Transport == type(self).get_transport_class("grpc") + or Transport == type(self).get_transport_class("grpc_asyncio") + ), ) def read_rows( diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 405cabb9ba32..f4180811bf49 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -135,18 +135,6 @@ def test_bigtable_instance_admin_client_from_service_account_info(client_class): assert client.transport._host == "bigtableadmin.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] -) -def test_bigtable_instance_admin_client_service_account_always_use_jwt(client_class): - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - @pytest.mark.parametrize( "transport_class,transport_name", [ @@ -154,7 +142,7 @@ def test_bigtable_instance_admin_client_service_account_always_use_jwt(client_cl (transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), ], ) -def test_bigtable_instance_admin_client_service_account_always_use_jwt_true( +def test_bigtable_instance_admin_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( @@ -164,6 +152,13 @@ def test_bigtable_instance_admin_client_service_account_always_use_jwt_true( transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + @pytest.mark.parametrize( "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] @@ -248,6 +243,7 @@ def test_bigtable_instance_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -264,6 +260,7 @@ def test_bigtable_instance_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -280,6 +277,7 @@ def test_bigtable_instance_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -308,6 +306,7 @@ def test_bigtable_instance_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -384,6 +383,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -417,6 +417,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -438,6 +439,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -472,6 +474,7 @@ def test_bigtable_instance_admin_client_client_options_scopes( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -506,6 +509,7 @@ def test_bigtable_instance_admin_client_client_options_credentials_file( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -525,6 +529,7 @@ def test_bigtable_instance_admin_client_client_options_from_dict(): client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 2de0e03d174c..6bfe7d012c16 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -138,18 +138,6 @@ def test_bigtable_table_admin_client_from_service_account_info(client_class): assert client.transport._host == "bigtableadmin.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] -) -def test_bigtable_table_admin_client_service_account_always_use_jwt(client_class): - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - @pytest.mark.parametrize( "transport_class,transport_name", [ @@ -157,7 +145,7 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt(client_class (transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), ], ) -def test_bigtable_table_admin_client_service_account_always_use_jwt_true( +def test_bigtable_table_admin_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( @@ -167,6 +155,13 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt_true( transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + @pytest.mark.parametrize( "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] @@ -247,6 +242,7 @@ def test_bigtable_table_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -263,6 +259,7 @@ def test_bigtable_table_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -279,6 +276,7 @@ def test_bigtable_table_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -307,6 +305,7 @@ def test_bigtable_table_admin_client_client_options( client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -383,6 +382,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -416,6 +416,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -437,6 +438,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -467,6 +469,7 @@ def test_bigtable_table_admin_client_client_options_scopes( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -497,6 +500,7 @@ def test_bigtable_table_admin_client_client_options_credentials_file( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -516,6 +520,7 @@ def test_bigtable_table_admin_client_client_options_from_dict(): client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 80c8769319fc..3735f10745ec 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -111,16 +111,6 @@ def test_bigtable_client_from_service_account_info(client_class): assert client.transport._host == "bigtable.googleapis.com:443" -@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) -def test_bigtable_client_service_account_always_use_jwt(client_class): - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - @pytest.mark.parametrize( "transport_class,transport_name", [ @@ -128,7 +118,7 @@ def test_bigtable_client_service_account_always_use_jwt(client_class): (transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), ], ) -def test_bigtable_client_service_account_always_use_jwt_true( +def test_bigtable_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( @@ -138,6 +128,13 @@ def test_bigtable_client_service_account_always_use_jwt_true( transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + @pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) def test_bigtable_client_from_service_account_file(client_class): @@ -208,6 +205,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -224,6 +222,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -240,6 +239,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -268,6 +268,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -332,6 +333,7 @@ def test_bigtable_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -365,6 +367,7 @@ def test_bigtable_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -386,6 +389,7 @@ def test_bigtable_client_mtls_env_auto( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -412,6 +416,7 @@ def test_bigtable_client_client_options_scopes( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -438,6 +443,7 @@ def test_bigtable_client_client_options_credentials_file( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -455,6 +461,7 @@ def test_bigtable_client_client_options_from_dict(): client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) From d2c39addaa37e59208dbe8c1e965092373ce95ff Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 26 Jul 2021 18:16:25 +0000 Subject: [PATCH 500/892] chore: release 2.3.3 (#398) :robot: I have created a release \*beep\* \*boop\* --- ### [2.3.3](https://www.github.com/googleapis/python-bigtable/compare/v2.3.2...v2.3.3) (2021-07-24) ### Bug Fixes * enable self signed jwt for grpc ([#397](https://www.github.com/googleapis/python-bigtable/issues/397)) ([9d43a38](https://www.github.com/googleapis/python-bigtable/commit/9d43a388470746608d324ca8d72f41bb3a4492b7)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 9eea76e4239c..20715657673a 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [2.3.3](https://www.github.com/googleapis/python-bigtable/compare/v2.3.2...v2.3.3) (2021-07-24) + + +### Bug Fixes + +* enable self signed jwt for grpc ([#397](https://www.github.com/googleapis/python-bigtable/issues/397)) ([9d43a38](https://www.github.com/googleapis/python-bigtable/commit/9d43a388470746608d324ca8d72f41bb3a4492b7)) + ### [2.3.2](https://www.github.com/googleapis/python-bigtable/compare/v2.3.1...v2.3.2) (2021-07-20) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index ae00d7cd40a3..79feacd1b613 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.3.2" +version = "2.3.3" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 6b02b3231dcbdb5380441349beca064989a9bdfe Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 29 Jul 2021 16:24:59 +0200 Subject: [PATCH 501/892] chore(deps): update dependency google-cloud-monitoring to v2.4.2 (#400) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 6ec0b65468d9..8a9f48af893f 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.3.1 -google-cloud-monitoring==2.4.1 +google-cloud-monitoring==2.4.2 From 59dbe0d746a126c372a4dd4ef2200bc63082a616 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 29 Jul 2021 16:41:34 +0200 Subject: [PATCH 502/892] chore(deps): update dependency google-cloud-core to v1.7.2 (#399) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index b7fce4d899ef..29731a5f9428 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.31.0 google-cloud-bigtable<2.0.0 -google-cloud-core==1.7.1 \ No newline at end of file +google-cloud-core==1.7.2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index e642d3960fa6..20e1f50782c4 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.3.1 -google-cloud-core==1.7.1 +google-cloud-core==1.7.2 From c07f84831744d493008ec495d727fd898c6b276e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 11 Aug 2021 14:41:10 -0400 Subject: [PATCH 503/892] chore: avoid `.nox` directories when building docs (#404) Source-Link: https://github.com/googleapis/synthtool/commit/7e1f6da50524b5d98eb67adbf6dd0805df54233d Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:a1a891041baa4ffbe1a809ac1b8b9b4a71887293c9101c88e8e255943c5aec2d Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/docs/conf.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 9ee60f7e4850..b771c37caef8 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:aea14a583128771ae8aefa364e1652f3c56070168ef31beb203534222d842b8b + digest: sha256:a1a891041baa4ffbe1a809ac1b8b9b4a71887293c9101c88e8e255943c5aec2d diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 4e05a219b43f..c00305573340 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -110,6 +110,7 @@ # directories to ignore when looking for source files. exclude_patterns = [ "_build", + "**/.nox/**/*", "samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md", "samples/snippets/README.rst", From faa8967f46c9c3dc5b42369e91cd5c3f8457d0be Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 13 Aug 2021 11:19:51 -0400 Subject: [PATCH 504/892] chore: drop mention of Python 2.7 from templates (#405) Source-Link: https://github.com/googleapis/synthtool/commit/facee4cc1ea096cd8bcc008bb85929daa7c414c0 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:9743664022bd63a8084be67f144898314c7ca12f0a03e422ac17c733c129d803 Co-authored-by: Owl Bot --- .../google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/noxfile.py | 12 +++++++++--- .../readme-gen/templates/install_deps.tmpl.rst | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index b771c37caef8..a9fcd07cc43b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:a1a891041baa4ffbe1a809ac1b8b9b4a71887293c9101c88e8e255943c5aec2d + digest: sha256:9743664022bd63a8084be67f144898314c7ca12f0a03e422ac17c733c129d803 diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 0687bafb6be0..d938c5d2b423 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -85,9 +85,15 @@ def default(session): constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) - session.install("asyncmock", "pytest-asyncio", "-c", constraints_path) - - session.install("mock", "pytest", "pytest-cov", "-c", constraints_path) + session.install( + "mock", + "asyncmock", + "pytest", + "pytest-cov", + "pytest-asyncio", + "-c", + constraints_path, + ) session.install("-e", ".", "-c", constraints_path) diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst index a0406dba8c84..275d649890d7 100644 --- a/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst +++ b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst @@ -12,7 +12,7 @@ Install Dependencies .. _Python Development Environment Setup Guide: https://cloud.google.com/python/setup -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. +#. Create a virtualenv. Samples are compatible with Python 3.6+. .. code-block:: bash From 39654fb884e7ae9f6496d04e90b39258354951e7 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 17 Aug 2021 12:30:11 -0400 Subject: [PATCH 505/892] tests: allow prerelease deps on Python 3.9 (#402) Closes #401. --- packages/google-cloud-bigtable/testing/constraints-3.9.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/google-cloud-bigtable/testing/constraints-3.9.txt b/packages/google-cloud-bigtable/testing/constraints-3.9.txt index e69de29bb2d1..6d34489a53a4 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.9.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.9.txt @@ -0,0 +1,2 @@ +# Allow prerelease requirements +--pre From 79884059a0559e58092cca1a3c4de97149a10b0c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 18 Aug 2021 12:04:50 -0400 Subject: [PATCH 506/892] Revert "tests: allow prerelease deps on Python 3.9 (#402)" (#406) This reverts commit d8532682dbdd9b5d2553399d635161ba3648e930. --- packages/google-cloud-bigtable/testing/constraints-3.9.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/google-cloud-bigtable/testing/constraints-3.9.txt b/packages/google-cloud-bigtable/testing/constraints-3.9.txt index 6d34489a53a4..e69de29bb2d1 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.9.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.9.txt @@ -1,2 +0,0 @@ -# Allow prerelease requirements ---pre From e27989c8302189fece65dd2522ce50b2f4baea27 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 18 Aug 2021 16:05:04 -0400 Subject: [PATCH 507/892] feat: Publish new fields to support cluster group routing for Cloud Bigtable (#407) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Publish new fields to support cluster group routing for Cloud Bigtable Committer: @garye PiperOrigin-RevId: 391576441 Source-Link: https://github.com/googleapis/googleapis/commit/5f761138c64054797b7e25164798d573ff4c1c62 Source-Link: https://github.com/googleapis/googleapis-gen/commit/e01b342bb2e0ee53adea5800868efb6094c7cdce * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin_v2/types/instance.py | 11 ++++++++++- .../bigtable_admin_v2/test_bigtable_instance_admin.py | 8 ++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 64eb1edc7f5f..f1ba750e122a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -181,7 +181,16 @@ class MultiClusterRoutingUseAny(proto.Message): available in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability. - """ + + Attributes: + cluster_ids (Sequence[str]): + The set of clusters to route to. The order is + ignored; clusters will be tried in order of + distance. If left empty, all clusters are + eligible. + """ + + cluster_ids = proto.RepeatedField(proto.STRING, number=1,) class SingleClusterRouting(proto.Message): r"""Unconditionally routes all read/write requests to a specific diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index f4180811bf49..029ed196feae 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -2816,7 +2816,9 @@ def test_create_app_profile( name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=None, + multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( + cluster_ids=["cluster_ids_value"] + ), ) response = client.create_app_profile(request) @@ -3062,7 +3064,9 @@ def test_get_app_profile( name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=None, + multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( + cluster_ids=["cluster_ids_value"] + ), ) response = client.get_app_profile(request) From f4f6ad80399d4f9b2801c5aa7b174b8a1ec0e1c6 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 30 Aug 2021 15:30:28 +0000 Subject: [PATCH 508/892] chore(python): disable dependency dashboard (#413) --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/renovate.json | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index a9fcd07cc43b..b75186cf1ba4 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:9743664022bd63a8084be67f144898314c7ca12f0a03e422ac17c733c129d803 + digest: sha256:d6761eec279244e57fe9d21f8343381a01d3632c034811a72f68b83119e58c69 diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index c04895563e69..9fa8816fe873 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -1,6 +1,8 @@ { "extends": [ - "config:base", ":preserveSemverRanges" + "config:base", + ":preserveSemverRanges", + ":disableDependencyDashboard" ], "ignorePaths": [".pre-commit-config.yaml"], "pip_requirements": { From 6567d846cde1d19912812d2291e01305c819d259 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 31 Aug 2021 17:58:36 -0400 Subject: [PATCH 509/892] chore: migrate default branch to main (#411) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: migrate default branch to main * chore: restore 'master' in link to 'python-docs-samples' repo * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: grrr * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../.github/sync-repo-settings.yaml | 6 +-- .../google-cloud-bigtable/.kokoro/build.sh | 2 +- .../.kokoro/test-samples-impl.sh | 2 +- .../google-cloud-bigtable/CONTRIBUTING.rst | 12 ++--- packages/google-cloud-bigtable/README.rst | 2 +- .../docs/client-intro.rst | 4 +- packages/google-cloud-bigtable/docs/conf.py | 10 ++-- .../google-cloud-bigtable/docs/data-api.rst | 10 ++-- .../docs/instance-api.rst | 2 +- .../google-cloud-bigtable/docs/table-api.rst | 2 +- .../google/cloud/bigtable/error.py | 2 +- packages/google-cloud-bigtable/owlbot.py | 53 +++++++++++++++++++ .../tests/unit/test_row_data.py | 2 +- 13 files changed, 81 insertions(+), 28 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml index 84e25a9ced6d..a0d3362c94af 100644 --- a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml +++ b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml @@ -10,11 +10,11 @@ squashMergeAllowed: true # Defaults to `false` mergeCommitAllowed: false -# Rules for master branch protection +# Rules for main branch protection branchProtectionRules: # Identifies the protection rule pattern. Name of the branch to be protected. -# Defaults to `master` -- pattern: master +# Defaults to `main` +- pattern: main # Can admins overwrite branch protection. # Defaults to `true` isAdminEnforced: true diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index 9773bfca7cd7..2ab1155b2e83 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -41,7 +41,7 @@ python3 -m pip install --upgrade --quiet nox python3 -m nox --version # If this is a continuous build, send the test log to the FlakyBot. -# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. +# See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then cleanup() { chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh index 311a8d54b9f1..8a324c9c7bc6 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh @@ -80,7 +80,7 @@ for file in samples/**/requirements.txt; do EXIT=$? # If this is a periodic build, send the test log to the FlakyBot. - # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. + # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot $KOKORO_GFILE_DIR/linux_amd64/flakybot diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index b8c70c94cc6a..285d32232d8e 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -50,9 +50,9 @@ You'll have to create a development environment using a Git checkout: # Configure remotes such that you can pull changes from the googleapis/python-bigtable # repository into your local repository. $ git remote add upstream git@github.com:googleapis/python-bigtable.git - # fetch and merge changes from upstream into master + # fetch and merge changes from upstream into main $ git fetch upstream - $ git merge upstream/master + $ git merge upstream/main Now your local repo is set up such that you will push changes to your GitHub repo, from which you can submit a pull request. @@ -110,12 +110,12 @@ Coding Style variables:: export GOOGLE_CLOUD_TESTING_REMOTE="upstream" - export GOOGLE_CLOUD_TESTING_BRANCH="master" + export GOOGLE_CLOUD_TESTING_BRANCH="main" By doing this, you are specifying the location of the most up-to-date version of ``python-bigtable``. The the suggested remote name ``upstream`` should point to the official ``googleapis`` checkout and the - the branch should be the main branch on that remote (``master``). + the branch should be the main branch on that remote (``main``). - This repository contains configuration for the `pre-commit `__ tool, which automates checking @@ -209,7 +209,7 @@ The `description on PyPI`_ for the project comes directly from the ``README``. Due to the reStructuredText (``rst``) parser used by PyPI, relative links which will work on GitHub (e.g. ``CONTRIBUTING.rst`` instead of -``https://github.com/googleapis/python-bigtable/blob/master/CONTRIBUTING.rst``) +``https://github.com/googleapis/python-bigtable/blob/main/CONTRIBUTING.rst``) may cause problems creating links or rendering the description. .. _description on PyPI: https://pypi.org/project/google-cloud-bigtable @@ -234,7 +234,7 @@ We support: Supported versions can be found in our ``noxfile.py`` `config`_. -.. _config: https://github.com/googleapis/python-bigtable/blob/master/noxfile.py +.. _config: https://github.com/googleapis/python-bigtable/blob/main/noxfile.py We also explicitly decided to support Python 3 beginning with version 3.6. diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 859012f5862c..28cc372dad52 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -11,7 +11,7 @@ Analytics, Maps, and Gmail. - `Product Documentation`_ .. |GA| image:: https://img.shields.io/badge/support-GA-gold.svg - :target: https://github.com/googleapis/google-cloud-python/blob/master/README.rst#general-availability + :target: https://github.com/googleapis/google-cloud-python/blob/main/README.rst#general-availability .. |pypi| image:: https://img.shields.io/pypi/v/google-cloud-bigtable.svg :target: https://pypi.org/project/google-cloud-bigtable/ .. |versions| image:: https://img.shields.io/pypi/pyversions/google-cloud-bigtable.svg diff --git a/packages/google-cloud-bigtable/docs/client-intro.rst b/packages/google-cloud-bigtable/docs/client-intro.rst index 36b2677d0325..2420684996a5 100644 --- a/packages/google-cloud-bigtable/docs/client-intro.rst +++ b/packages/google-cloud-bigtable/docs/client-intro.rst @@ -86,5 +86,5 @@ one before you can interact with tables or data. Head next to learn about the :doc:`instance-api`. -.. _Instance Admin: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto -.. _Table Admin: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto +.. _Instance Admin: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_admin_v2/proto/bigtable_instance_admin.proto +.. _Table Admin: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index c00305573340..26814b0aa607 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -76,8 +76,8 @@ # The encoding of source files. # source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = "index" +# The root toctree document. +root_doc = "index" # General information about the project. project = "google-cloud-bigtable" @@ -280,7 +280,7 @@ # author, documentclass [howto, manual, or own class]). latex_documents = [ ( - master_doc, + root_doc, "google-cloud-bigtable.tex", "google-cloud-bigtable Documentation", author, @@ -315,7 +315,7 @@ # (source start file, name, description, authors, manual section). man_pages = [ ( - master_doc, + root_doc, "google-cloud-bigtable", "google-cloud-bigtable Documentation", [author], @@ -334,7 +334,7 @@ # dir menu entry, description, category) texinfo_documents = [ ( - master_doc, + root_doc, "google-cloud-bigtable", "google-cloud-bigtable Documentation", author, diff --git a/packages/google-cloud-bigtable/docs/data-api.rst b/packages/google-cloud-bigtable/docs/data-api.rst index d9269cddb934..9d9205e6b1e9 100644 --- a/packages/google-cloud-bigtable/docs/data-api.rst +++ b/packages/google-cloud-bigtable/docs/data-api.rst @@ -337,8 +337,8 @@ Just as with reading, the stream can be canceled: keys_iterator.cancel() -.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L54-L61 -.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L67-L73 -.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L77-L84 -.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L99-L106 -.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_v2/proto/bigtable.proto#L113-L121 +.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L54-L61 +.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L67-L73 +.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L77-L84 +.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L99-L106 +.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L113-L121 diff --git a/packages/google-cloud-bigtable/docs/instance-api.rst b/packages/google-cloud-bigtable/docs/instance-api.rst index 52a2fb0a5869..88b4eb4dc914 100644 --- a/packages/google-cloud-bigtable/docs/instance-api.rst +++ b/packages/google-cloud-bigtable/docs/instance-api.rst @@ -127,4 +127,4 @@ Head next to learn about the :doc:`table-api`. .. _DeleteInstance: https://googleapis.dev/python/bigtable/latest/instance-api.html#delete-an-existing-instance .. _ListInstances: https://googleapis.dev/python/bigtable/latest/instance-api.html#list-instances .. _GetOperation: https://googleapis.dev/python/bigtable/latest/instance-api.html#check-on-current-operation -.. _long-running operation: https://github.com/googleapis/googleapis/blob/master/google/longrunning/operations.proto#L128-L162 +.. _long-running operation: https://github.com/googleapis/googleapis/blob/main/google/longrunning/operations.proto#L128-L162 diff --git a/packages/google-cloud-bigtable/docs/table-api.rst b/packages/google-cloud-bigtable/docs/table-api.rst index 20d70e990a35..1bbf851462bb 100644 --- a/packages/google-cloud-bigtable/docs/table-api.rst +++ b/packages/google-cloud-bigtable/docs/table-api.rst @@ -146,7 +146,7 @@ Head next to learn about the :doc:`data-api`. .. _ListTables: https://googleapis.dev/python/bigtable/latest/table-api.html#list-tables .. _CreateTable: https://googleapis.dev/python/bigtable/latest/table-api.html#create-a-new-table .. _DeleteTable: https://googleapis.dev/python/bigtable/latest/table-api.html#delete-an-existing-table -.. _GetTable: https://github.com/googleapis/python-bigtable/blob/master/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L97-L102 +.. _GetTable: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_admin_v2/proto/bigtable_table_admin.proto#L97-L102 .. _CreateColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#create-a-new-column-family .. _UpdateColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#update-an-existing-column-family .. _DeleteColumnFamily: https://googleapis.dev/python/bigtable/latest/table-api.html?highlight=gettable#delete-an-existing-column-family diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/error.py b/packages/google-cloud-bigtable/google/cloud/bigtable/error.py index 261cfc2c3b10..075bb01ccd04 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/error.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/error.py @@ -36,7 +36,7 @@ def code(self): Values are defined in ``google.rpc.code_pb2.Code``. See: `google.rpc.Code - `_ + `_ :rtype: int :returns: The status code. diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 71c1186c23f8..98e126e91df0 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -149,6 +149,54 @@ def system_emulated(session): "system",""", ) +# Remove the replacements below once https://github.com/googleapis/synthtool/pull/1188 is merged + +# Update googleapis/repo-automation-bots repo to main in .kokoro/*.sh files +s.replace( + ".kokoro/*.sh", "repo-automation-bots/tree/master", "repo-automation-bots/tree/main" +) + +# Customize CONTRIBUTING.rst to replace master with main +s.replace( + "CONTRIBUTING.rst", + "fetch and merge changes from upstream into master", + "fetch and merge changes from upstream into main", +) + +s.replace( + "CONTRIBUTING.rst", "git merge upstream/master", "git merge upstream/main", +) + +s.replace( + "CONTRIBUTING.rst", + """export GOOGLE_CLOUD_TESTING_BRANCH=\"master\"""", + """export GOOGLE_CLOUD_TESTING_BRANCH=\"main\"""", +) + +s.replace( + "CONTRIBUTING.rst", r"remote \(``master``\)", "remote (``main``)", +) + +s.replace( + "CONTRIBUTING.rst", "blob/master/CONTRIBUTING.rst", "blob/main/CONTRIBUTING.rst", +) + +s.replace( + "CONTRIBUTING.rst", "blob/master/noxfile.py", "blob/main/noxfile.py", +) + +s.replace( + "docs/conf.py", "master_doc", "root_doc", +) + +s.replace( + "docs/conf.py", "# The master toctree document.", "# The root toctree document.", +) + +s.replace( + "docs/**/*.rst", r"/blob/master/", "/blob/main/", +) + # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- @@ -157,5 +205,10 @@ def system_emulated(session): for path in sample_files: s.move(path) +# Note: google-cloud-python and python-docs-samples are nnt yet usin 'main': +#s.replace( +# "samples/**/*.md", r"/blob/master/", "/blob/main/", +#) + s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index ae5e6091fefb..a95cf2ec40f7 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -1310,7 +1310,7 @@ def _parse_readrows_acceptance_tests(filename): """Parse acceptance tests from JSON See - https://github.com/googleapis/python-bigtable/blob/master/\ + https://github.com/googleapis/python-bigtable/blob/main/\ tests/unit/read-rows-acceptance-test.json """ import json From 395fc8d88dd8d831e28030ec1b9d00fb15bb7c5e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 1 Sep 2021 15:42:47 +0000 Subject: [PATCH 510/892] chore(python): group renovate prs (#419) --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/renovate.json | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index b75186cf1ba4..ef3cb34f66fd 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:d6761eec279244e57fe9d21f8343381a01d3632c034811a72f68b83119e58c69 + digest: sha256:1456ea2b3b523ccff5e13030acef56d1de28f21249c62aa0f196265880338fa7 diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index 9fa8816fe873..c21036d385e5 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -1,6 +1,7 @@ { "extends": [ "config:base", + "group:all", ":preserveSemverRanges", ":disableDependencyDashboard" ], From 4f49d7bcf07695790fe6c40b3057916723e855fc Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 1 Sep 2021 14:31:20 -0400 Subject: [PATCH 511/892] chore(python): rename default branch to main (#421) Source-Link: https://github.com/googleapis/synthtool/commit/5c0fa62eea9c33ebe61e582424b659eb264e1ba4 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:0ffe3bdd6c7159692df5f7744da74e5ef19966288a6bf76023e8e04e0c424d7d Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/CONTRIBUTING.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index ef3cb34f66fd..c07f148f0b0b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:1456ea2b3b523ccff5e13030acef56d1de28f21249c62aa0f196265880338fa7 + digest: sha256:0ffe3bdd6c7159692df5f7744da74e5ef19966288a6bf76023e8e04e0c424d7d diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 285d32232d8e..78b6684a36e5 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -113,9 +113,9 @@ Coding Style export GOOGLE_CLOUD_TESTING_BRANCH="main" By doing this, you are specifying the location of the most up-to-date - version of ``python-bigtable``. The the suggested remote name ``upstream`` - should point to the official ``googleapis`` checkout and the - the branch should be the main branch on that remote (``main``). + version of ``python-bigtable``. The + remote name ``upstream`` should point to the official ``googleapis`` + checkout and the branch should be the default branch on that remote (``main``). - This repository contains configuration for the `pre-commit `__ tool, which automates checking From 122fdeabc25f9d6804daee3fba80acf1fd6ccf53 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 15 Sep 2021 15:02:28 -0400 Subject: [PATCH 512/892] tests: refactor systests into separate modules w/ pytest fixtures (#393) * tests: break out shared setup / teardown into pytest fixtures * tests: move instance admin systests into a separate module * tests: move table admin systests into a separate module * tests: move data API systests into a separate module * tests: remove remaining monolith fixtures Closes #391. * test: harden flaky 'w_two_clusters' systest against GRPC Unavailable Ported from PR #385. Fixes #381. --- .../google-cloud-bigtable/tests/system.py | 1504 ----------------- .../tests/system/__init__.py | 15 + .../tests/system/_helpers.py | 45 + .../tests/system/conftest.py | 172 ++ .../tests/system/test_data_api.py | 383 +++++ .../tests/system/test_instance_admin.py | 614 +++++++ .../tests/system/test_table_admin.py | 351 ++++ 7 files changed, 1580 insertions(+), 1504 deletions(-) delete mode 100644 packages/google-cloud-bigtable/tests/system.py create mode 100644 packages/google-cloud-bigtable/tests/system/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/system/_helpers.py create mode 100644 packages/google-cloud-bigtable/tests/system/conftest.py create mode 100644 packages/google-cloud-bigtable/tests/system/test_data_api.py create mode 100644 packages/google-cloud-bigtable/tests/system/test_instance_admin.py create mode 100644 packages/google-cloud-bigtable/tests/system/test_table_admin.py diff --git a/packages/google-cloud-bigtable/tests/system.py b/packages/google-cloud-bigtable/tests/system.py deleted file mode 100644 index aa3c1cac69bc..000000000000 --- a/packages/google-cloud-bigtable/tests/system.py +++ /dev/null @@ -1,1504 +0,0 @@ -# Copyright 2016 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import datetime -import operator -import os -import time -import unittest - -import pytest - -from google.api_core.datetime_helpers import DatetimeWithNanoseconds -from google.api_core.exceptions import DeadlineExceeded -from google.api_core.exceptions import TooManyRequests -from google.cloud.environment_vars import BIGTABLE_EMULATOR -from test_utils.retry import RetryErrors -from test_utils.retry import RetryResult - -# from test_utils.system import EmulatorCreds -from test_utils.system import unique_resource_id - -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import UTC -from google.cloud.bigtable.client import Client -from google.cloud.bigtable.column_family import MaxVersionsGCRule -from google.cloud.bigtable.policy import Policy -from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE -from google.cloud.bigtable.row_filters import ApplyLabelFilter -from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter -from google.cloud.bigtable.row_filters import RowFilterChain -from google.cloud.bigtable.row_filters import RowFilterUnion -from google.cloud.bigtable.row_data import Cell -from google.cloud.bigtable.row_data import PartialRowData -from google.cloud.bigtable.row_set import RowSet -from google.cloud.bigtable.row_set import RowRange - -# from google.cloud.bigtable_admin_v2.gapic import ( -# bigtable_table_admin_client_config as table_admin_config, -# ) - -UNIQUE_SUFFIX = unique_resource_id("-") -LOCATION_ID = "us-central1-c" -INSTANCE_ID = "g-c-p" + UNIQUE_SUFFIX -INSTANCE_ID_DATA = "g-c-p-d" + UNIQUE_SUFFIX -TABLE_ID = "google-cloud-python-test-table" -CLUSTER_ID = INSTANCE_ID + "-cluster" -CLUSTER_ID_DATA = INSTANCE_ID_DATA + "-cluster" -SERVE_NODES = 3 -COLUMN_FAMILY_ID1 = "col-fam-id1" -COLUMN_FAMILY_ID2 = "col-fam-id2" -COL_NAME1 = b"col-name1" -COL_NAME2 = b"col-name2" -COL_NAME3 = b"col-name3-but-other-fam" -CELL_VAL1 = b"cell-val" -CELL_VAL2 = b"cell-val-newer" -CELL_VAL3 = b"altcol-cell-val" -CELL_VAL4 = b"foo" -ROW_KEY = b"row-key" -ROW_KEY_ALT = b"row-key-alt" -EXISTING_INSTANCES = [] -LABEL_KEY = "python-system" -label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") -) -LABELS = {LABEL_KEY: str(label_stamp)} -KMS_KEY_NAME = os.environ.get("KMS_KEY_NAME", None) - - -class Config(object): - """Run-time configuration to be modified at set-up. - - This is a mutable stand-in to allow test set-up to modify - global state. - """ - - CLIENT = None - INSTANCE = None - INSTANCE_DATA = None - CLUSTER = None - CLUSTER_DATA = None - IN_EMULATOR = False - - -def _retry_on_unavailable(exc): - """Retry only errors whose status code is 'UNAVAILABLE'.""" - from grpc import StatusCode - - return exc.code() == StatusCode.UNAVAILABLE - - -retry_429 = RetryErrors(TooManyRequests, max_tries=9) - - -def setUpModule(): - from google.cloud.exceptions import GrpcRendezvous - from google.cloud.bigtable.enums import Instance - - # See: https://github.com/googleapis/google-cloud-python/issues/5928 - # interfaces = table_admin_config.config["interfaces"] - # iface_config = interfaces["google.bigtable.admin.v2.BigtableTableAdmin"] - # methods = iface_config["methods"] - # create_table = methods["CreateTable"] - # create_table["timeout_millis"] = 90000 - - Config.IN_EMULATOR = os.getenv(BIGTABLE_EMULATOR) is not None - - # Previously we created clients using a mock EmulatorCreds when targeting - # an emulator. - Config.CLIENT = Client(admin=True) - - Config.INSTANCE = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - Config.CLUSTER = Config.INSTANCE.cluster( - CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, - ) - Config.INSTANCE_DATA = Config.CLIENT.instance( - INSTANCE_ID_DATA, instance_type=Instance.Type.DEVELOPMENT, labels=LABELS - ) - Config.CLUSTER_DATA = Config.INSTANCE_DATA.cluster( - CLUSTER_ID_DATA, location_id=LOCATION_ID, - ) - - if not Config.IN_EMULATOR: - retry = RetryErrors(GrpcRendezvous, error_predicate=_retry_on_unavailable) - instances, failed_locations = retry(Config.CLIENT.list_instances)() - - if len(failed_locations) != 0: - raise ValueError("List instances failed in module set up.") - - EXISTING_INSTANCES[:] = instances - - # After listing, create the test instances. - admin_op = Config.INSTANCE.create(clusters=[Config.CLUSTER]) - admin_op.result(timeout=30) - data_op = Config.INSTANCE_DATA.create(clusters=[Config.CLUSTER_DATA]) - data_op.result(timeout=30) - - -def tearDownModule(): - if not Config.IN_EMULATOR: - retry_429(Config.INSTANCE.delete)() - retry_429(Config.INSTANCE_DATA.delete)() - - -class TestInstanceAdminAPI(unittest.TestCase): - def setUp(self): - if Config.IN_EMULATOR: - self.skipTest("Instance Admin API not supported in emulator") - self.instances_to_delete = [] - - def tearDown(self): - for instance in self.instances_to_delete: - retry_429(instance.delete)() - - def test_list_instances(self): - instances, failed_locations = Config.CLIENT.list_instances() - - self.assertEqual(failed_locations, []) - - found = set([instance.name for instance in instances]) - self.assertTrue(Config.INSTANCE.name in found) - - def test_reload(self): - from google.cloud.bigtable import enums - - # Use same arguments as Config.INSTANCE (created in `setUpModule`) - # so we can use reload() on a fresh instance. - alt_instance = Config.CLIENT.instance(INSTANCE_ID) - # Make sure metadata unset before reloading. - alt_instance.display_name = None - - alt_instance.reload() - self.assertEqual(alt_instance.display_name, Config.INSTANCE.display_name) - self.assertEqual(alt_instance.labels, Config.INSTANCE.labels) - self.assertEqual(alt_instance.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance_defaults(self): - from google.cloud.bigtable import enums - - ALT_INSTANCE_ID = "ndef" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance(ALT_INSTANCE_ID, labels=LABELS) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - serve_nodes = 1 - cluster = instance.cluster( - ALT_CLUSTER_ID, location_id=LOCATION_ID, serve_nodes=serve_nodes - ) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - # Make sure that by default a PRODUCTION type instance is created - self.assertIsNone(instance.type_) - self.assertEqual(instance_alt.type_, enums.Instance.Type.PRODUCTION) - - def test_create_instance(self): - from google.cloud.bigtable import enums - - _DEVELOPMENT = enums.Instance.Type.DEVELOPMENT - - ALT_INSTANCE_ID = "new" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - ALT_CLUSTER_ID = ALT_INSTANCE_ID + "-cluster" - cluster = instance.cluster(ALT_CLUSTER_ID, location_id=LOCATION_ID) - operation = instance.create(clusters=[cluster]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - self.assertEqual(instance_alt.labels, LABELS) - self.assertEqual(instance_alt.state, enums.Instance.State.READY) - - def test_cluster_exists(self): - NONEXISTING_CLUSTER_ID = "cluster-id" - - cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster = Config.INSTANCE.cluster(NONEXISTING_CLUSTER_ID) - self.assertTrue(cluster.exists()) - self.assertFalse(alt_cluster.exists()) - - def test_instance_exists(self): - NONEXISTING_INSTANCE_ID = "instancer-id" - - alt_instance = Config.CLIENT.instance(NONEXISTING_INSTANCE_ID) - self.assertTrue(Config.INSTANCE.exists()) - self.assertFalse(alt_instance.exists()) - - def test_create_instance_w_two_clusters(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable.table import ClusterState - - _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS - ) - - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" - LOCATION_ID_2 = "us-central1-f" - STORAGE_TYPE = enums.StorageType.HDD - serve_nodes = 1 - cluster_1 = instance.cluster( - ALT_CLUSTER_ID_1, - location_id=LOCATION_ID, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - ) - cluster_2 = instance.cluster( - ALT_CLUSTER_ID_2, - location_id=LOCATION_ID_2, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - ) - operation = instance.create(clusters=[cluster_1, cluster_2]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=120) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - - clusters, failed_locations = instance_alt.list_clusters() - self.assertEqual(failed_locations, []) - - clusters.sort(key=lambda x: x.name) - alt_cluster_1, alt_cluster_2 = clusters - - self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) - self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) - self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) - self.assertEqual( - cluster_1.default_storage_type, alt_cluster_1.default_storage_type - ) - self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) - self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster_2.default_storage_type - ) - - # Test list clusters in project via 'client.list_clusters' - clusters, failed_locations = Config.CLIENT.list_clusters() - self.assertFalse(failed_locations) - found = set([cluster.name for cluster in clusters]) - self.assertTrue( - {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( - found - ) - ) - - temp_table_id = "test-get-cluster-states" - temp_table = instance.table(temp_table_id) - temp_table.create() - - encryption_info = temp_table.get_encryption_info() - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, - enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - ) - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, - enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - ) - - result = temp_table.get_cluster_states() - ReplicationState = enums.Table.ReplicationState - expected_results = [ - ClusterState(ReplicationState.STATE_NOT_KNOWN), - ClusterState(ReplicationState.INITIALIZING), - ClusterState(ReplicationState.PLANNED_MAINTENANCE), - ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), - ClusterState(ReplicationState.READY), - ] - cluster_id_list = result.keys() - self.assertEqual(len(cluster_id_list), 2) - self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) - self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) - for clusterstate in result.values(): - self.assertIn(clusterstate, expected_results) - - # Test create app profile with multi_cluster_routing policy - app_profiles_to_delete = [] - description = "routing policy-multy" - app_profile_id_1 = "app_profile_id_1" - routing = enums.RoutingPolicyType.ANY - self._test_create_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - ignore_warnings=True, - ) - app_profiles_to_delete.append(app_profile_id_1) - - # Test list app profiles - self._test_list_app_profiles_helper(instance, [app_profile_id_1]) - - # Test modify app profile app_profile_id_1 - # routing policy to single cluster policy, - # cluster -> ALT_CLUSTER_ID_1, - # allow_transactional_writes -> disallowed - # modify description - description = "to routing policy-single" - routing = enums.RoutingPolicyType.SINGLE - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_1, - allow_transactional_writes=False, - ) - - # Test modify app profile app_profile_id_1 - # cluster -> ALT_CLUSTER_ID_2, - # allow_transactional_writes -> allowed - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ignore_warnings=True, - ) - - # Test create app profile with single cluster routing policy - description = "routing policy-single" - app_profile_id_2 = "app_profile_id_2" - routing = enums.RoutingPolicyType.SINGLE - self._test_create_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=False, - ) - app_profiles_to_delete.append(app_profile_id_2) - - # Test list app profiles - self._test_list_app_profiles_helper( - instance, [app_profile_id_1, app_profile_id_2] - ) - - # Test modify app profile app_profile_id_2 to - # allow transactional writes - # Note: no need to set ``ignore_warnings`` to True - # since we are not restrictings anything with this modification. - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ) - - # Test modify app profile app_profile_id_2 routing policy - # to multi_cluster_routing policy - # modify description - description = "to routing policy-multy" - routing = enums.RoutingPolicyType.ANY - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - allow_transactional_writes=False, - ignore_warnings=True, - ) - - # Test delete app profiles - for app_profile_id in app_profiles_to_delete: - self._test_delete_app_profile_helper(app_profile_id, instance) - - @pytest.mark.skipif( - not KMS_KEY_NAME, reason="requires KMS_KEY_NAME environment variable" - ) - def test_create_instance_w_two_clusters_cmek(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable.table import ClusterState - - _PRODUCTION = enums.Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "dif-cmek" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_PRODUCTION, labels=LABELS - ) - - ALT_CLUSTER_ID_1 = ALT_INSTANCE_ID + "-c1" - ALT_CLUSTER_ID_2 = ALT_INSTANCE_ID + "-c2" - LOCATION_ID_2 = "us-central1-f" - STORAGE_TYPE = enums.StorageType.HDD - serve_nodes = 1 - cluster_1 = instance.cluster( - ALT_CLUSTER_ID_1, - location_id=LOCATION_ID, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - kms_key_name=KMS_KEY_NAME, - ) - cluster_2 = instance.cluster( - ALT_CLUSTER_ID_2, - location_id=LOCATION_ID_2, - serve_nodes=serve_nodes, - default_storage_type=STORAGE_TYPE, - kms_key_name=KMS_KEY_NAME, - ) - operation = instance.create(clusters=[cluster_1, cluster_2]) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=120) - - # Create a new instance instance and make sure it is the same. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - instance_alt.reload() - - self.assertEqual(instance, instance_alt) - self.assertEqual(instance.display_name, instance_alt.display_name) - self.assertEqual(instance.type_, instance_alt.type_) - - clusters, failed_locations = instance_alt.list_clusters() - self.assertEqual(failed_locations, []) - - clusters.sort(key=lambda x: x.name) - alt_cluster_1, alt_cluster_2 = clusters - - self.assertEqual(cluster_1.location_id, alt_cluster_1.location_id) - self.assertEqual(alt_cluster_1.state, enums.Cluster.State.READY) - self.assertEqual(cluster_1.serve_nodes, alt_cluster_1.serve_nodes) - self.assertEqual( - cluster_1.default_storage_type, alt_cluster_1.default_storage_type - ) - self.assertEqual(cluster_2.location_id, alt_cluster_2.location_id) - self.assertEqual(alt_cluster_2.state, enums.Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster_2.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster_2.default_storage_type - ) - - # Test list clusters in project via 'client.list_clusters' - clusters, failed_locations = Config.CLIENT.list_clusters() - self.assertFalse(failed_locations) - found = set([cluster.name for cluster in clusters]) - self.assertTrue( - {alt_cluster_1.name, alt_cluster_2.name, Config.CLUSTER.name}.issubset( - found - ) - ) - - temp_table_id = "test-get-cluster-states" - temp_table = instance.table(temp_table_id) - temp_table.create() - - encryption_info = temp_table.get_encryption_info() - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_1][0].encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - self.assertEqual( - encryption_info[ALT_CLUSTER_ID_2][0].encryption_type, - enums.EncryptionInfo.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - - result = temp_table.get_cluster_states() - ReplicationState = enums.Table.ReplicationState - expected_results = [ - ClusterState(ReplicationState.STATE_NOT_KNOWN), - ClusterState(ReplicationState.INITIALIZING), - ClusterState(ReplicationState.PLANNED_MAINTENANCE), - ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), - ClusterState(ReplicationState.READY), - ] - cluster_id_list = result.keys() - self.assertEqual(len(cluster_id_list), 2) - self.assertIn(ALT_CLUSTER_ID_1, cluster_id_list) - self.assertIn(ALT_CLUSTER_ID_2, cluster_id_list) - for clusterstate in result.values(): - self.assertIn(clusterstate, expected_results) - - # Test create app profile with multi_cluster_routing policy - app_profiles_to_delete = [] - description = "routing policy-multy" - app_profile_id_1 = "app_profile_id_1" - routing = enums.RoutingPolicyType.ANY - self._test_create_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - ignore_warnings=True, - ) - app_profiles_to_delete.append(app_profile_id_1) - - # Test list app profiles - self._test_list_app_profiles_helper(instance, [app_profile_id_1]) - - # Test modify app profile app_profile_id_1 - # routing policy to single cluster policy, - # cluster -> ALT_CLUSTER_ID_1, - # allow_transactional_writes -> disallowed - # modify description - description = "to routing policy-single" - routing = enums.RoutingPolicyType.SINGLE - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_1, - allow_transactional_writes=False, - ) - - # Test modify app profile app_profile_id_1 - # cluster -> ALT_CLUSTER_ID_2, - # allow_transactional_writes -> allowed - self._test_modify_app_profile_helper( - app_profile_id_1, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ignore_warnings=True, - ) - - # Test create app profile with single cluster routing policy - description = "routing policy-single" - app_profile_id_2 = "app_profile_id_2" - routing = enums.RoutingPolicyType.SINGLE - self._test_create_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=False, - ) - app_profiles_to_delete.append(app_profile_id_2) - - # Test list app profiles - self._test_list_app_profiles_helper( - instance, [app_profile_id_1, app_profile_id_2] - ) - - # Test modify app profile app_profile_id_2 to - # allow transactional writes - # Note: no need to set ``ignore_warnings`` to True - # since we are not restrictings anything with this modification. - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - cluster_id=ALT_CLUSTER_ID_2, - allow_transactional_writes=True, - ) - - # Test modify app profile app_profile_id_2 routing policy - # to multi_cluster_routing policy - # modify description - description = "to routing policy-multy" - routing = enums.RoutingPolicyType.ANY - self._test_modify_app_profile_helper( - app_profile_id_2, - instance, - routing_policy_type=routing, - description=description, - allow_transactional_writes=False, - ignore_warnings=True, - ) - - # Test delete app profiles - for app_profile_id in app_profiles_to_delete: - self._test_delete_app_profile_helper(app_profile_id, instance) - - def test_update_display_name_and_labels(self): - OLD_DISPLAY_NAME = Config.INSTANCE.display_name - NEW_DISPLAY_NAME = "Foo Bar Baz" - n_label_stamp = ( - datetime.datetime.utcnow() - .replace(microsecond=0, tzinfo=UTC) - .strftime("%Y-%m-%dt%H-%M-%S") - ) - - NEW_LABELS = {LABEL_KEY: str(n_label_stamp)} - Config.INSTANCE.display_name = NEW_DISPLAY_NAME - Config.INSTANCE.labels = NEW_LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(INSTANCE_ID, labels=LABELS) - self.assertEqual(instance_alt.display_name, OLD_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, LABELS) - instance_alt.reload() - self.assertEqual(instance_alt.display_name, NEW_DISPLAY_NAME) - self.assertEqual(instance_alt.labels, NEW_LABELS) - - # Make sure to put the instance back the way it was for the - # other test cases. - Config.INSTANCE.display_name = OLD_DISPLAY_NAME - Config.INSTANCE.labels = LABELS - operation = Config.INSTANCE.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - def test_update_type(self): - from google.cloud.bigtable.enums import Instance - - _DEVELOPMENT = Instance.Type.DEVELOPMENT - _PRODUCTION = Instance.Type.PRODUCTION - ALT_INSTANCE_ID = "ndif" + UNIQUE_SUFFIX - instance = Config.CLIENT.instance( - ALT_INSTANCE_ID, instance_type=_DEVELOPMENT, labels=LABELS - ) - operation = instance.create(location_id=LOCATION_ID) - - # Make sure this instance gets deleted after the test case. - self.instances_to_delete.append(instance) - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Unset the display_name - instance.display_name = None - - instance.type_ = _PRODUCTION - operation = instance.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new instance instance and reload it. - instance_alt = Config.CLIENT.instance(ALT_INSTANCE_ID) - self.assertIsNone(instance_alt.type_) - instance_alt.reload() - self.assertEqual(instance_alt.type_, _PRODUCTION) - - def test_update_cluster(self): - NEW_SERVE_NODES = 4 - - Config.CLUSTER.serve_nodes = NEW_SERVE_NODES - - operation = Config.CLUSTER.update() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new cluster instance and reload it. - alt_cluster = Config.INSTANCE.cluster(CLUSTER_ID) - alt_cluster.reload() - self.assertEqual(alt_cluster.serve_nodes, NEW_SERVE_NODES) - - # Make sure to put the cluster back the way it was for the - # other test cases. - Config.CLUSTER.serve_nodes = SERVE_NODES - operation = Config.CLUSTER.update() - operation.result(timeout=30) - - def test_create_cluster(self): - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - ALT_CLUSTER_ID = INSTANCE_ID + "-c2" - ALT_LOCATION_ID = "us-central1-f" - ALT_SERVE_NODES = 2 - - cluster_2 = Config.INSTANCE.cluster( - ALT_CLUSTER_ID, - location_id=ALT_LOCATION_ID, - serve_nodes=ALT_SERVE_NODES, - default_storage_type=(StorageType.SSD), - ) - operation = cluster_2.create() - - # We want to make sure the operation completes. - operation.result(timeout=30) - - # Create a new object instance, reload and make sure it is the same. - alt_cluster = Config.INSTANCE.cluster(ALT_CLUSTER_ID) - alt_cluster.reload() - - self.assertEqual(cluster_2, alt_cluster) - self.assertEqual(cluster_2.location_id, alt_cluster.location_id) - self.assertEqual(alt_cluster.state, Cluster.State.READY) - self.assertEqual(cluster_2.serve_nodes, alt_cluster.serve_nodes) - self.assertEqual( - cluster_2.default_storage_type, alt_cluster.default_storage_type - ) - - # Delete the newly created cluster and confirm - self.assertTrue(cluster_2.exists()) - cluster_2.delete() - self.assertFalse(cluster_2.exists()) - - def _test_create_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - app_profile = app_profile.create(ignore_warnings=ignore_warnings) - - # Load a different app_profile objec form the server and - # verrify that it is the same - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - - self.assertEqual(app_profile.app_profile_id, alt_app_profile.app_profile_id) - self.assertEqual(app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(app_profile.description, alt_app_profile.description) - self.assertFalse(app_profile.allow_transactional_writes) - self.assertFalse(alt_app_profile.allow_transactional_writes) - - def _test_list_app_profiles_helper(self, instance, app_profile_ids): - app_profiles = instance.list_app_profiles() - found = [app_prof.app_profile_id for app_prof in app_profiles] - for app_profile_id in app_profile_ids: - self.assertTrue(app_profile_id in found) - - def _test_modify_app_profile_helper( - self, - app_profile_id, - instance, - routing_policy_type, - description=None, - cluster_id=None, - allow_transactional_writes=None, - ignore_warnings=None, - ): - app_profile = instance.app_profile( - app_profile_id=app_profile_id, - routing_policy_type=routing_policy_type, - description=description, - cluster_id=cluster_id, - allow_transactional_writes=allow_transactional_writes, - ) - - operation = app_profile.update(ignore_warnings) - operation.result(timeout=30) - - alt_app_profile = instance.app_profile(app_profile_id) - alt_app_profile.reload() - self.assertEqual(alt_app_profile.description, description) - self.assertEqual(alt_app_profile.routing_policy_type, routing_policy_type) - self.assertEqual(alt_app_profile.cluster_id, cluster_id) - self.assertEqual( - alt_app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def _test_delete_app_profile_helper(self, app_profile_id, instance): - app_profile = instance.app_profile(app_profile_id) - self.assertTrue(app_profile.exists()) - app_profile.delete(ignore_warnings=True) - self.assertFalse(app_profile.exists()) - - -class TestTableAdminAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = Config.INSTANCE_DATA.table(TABLE_ID) - cls._table.create() - - @classmethod - def tearDownClass(cls): - cls._table.delete() - - def setUp(self): - self.tables_to_delete = [] - self.backups_to_delete = [] - - def tearDown(self): - for table in self.tables_to_delete: - table.delete() - for backup in self.backups_to_delete: - backup.delete() - - def _skip_if_emulated(self, message): - # NOTE: This method is necessary because ``Config.IN_EMULATOR`` - # is set at runtime rather than import time, which means we - # can't use the @unittest.skipIf decorator. - if Config.IN_EMULATOR: - self.skipTest(message) - - def test_list_tables(self): - # Since `Config.INSTANCE_DATA` is newly created in `setUpModule`, the - # table created in `setUpClass` here will be the only one. - tables = Config.INSTANCE_DATA.list_tables() - self.assertEqual(tables, [self._table]) - - def test_exists(self): - retry_until_true = RetryResult(lambda result: result) - retry_until_false = RetryResult(lambda result: not result) - temp_table_id = "test-table_existence" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - self.assertFalse(temp_table.exists()) - temp_table.create() - self.assertTrue(retry_until_true(temp_table.exists)()) - temp_table.delete() - self.assertFalse(retry_until_false(temp_table.exists)()) - - def test_create_table(self): - temp_table_id = "test-create-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - # First, create a sorted version of our expected result. - name_attr = operator.attrgetter("name") - expected_tables = sorted([temp_table, self._table], key=name_attr) - - # Then query for the tables in the instance and sort them by - # name as well. - tables = Config.INSTANCE_DATA.list_tables() - sorted_tables = sorted(tables, key=name_attr) - self.assertEqual(sorted_tables, expected_tables) - - def test_test_iam_permissions(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-test-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] - permissions_allowed = temp_table.test_iam_permissions(permissions) - self.assertEqual(permissions, permissions_allowed) - - def test_get_iam_policy(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-get-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - policy = temp_table.get_iam_policy().to_api_repr() - self.assertEqual(policy["etag"], "ACAB") - self.assertEqual(policy["version"], 0) - - def test_set_iam_policy(self): - self._skip_if_emulated("Method not implemented in bigtable emulator") - temp_table_id = "test-set-iam-policy-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - new_policy = Policy() - service_account_email = Config.CLIENT._credentials.service_account_email - new_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.service_account(service_account_email) - ] - policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() - - self.assertEqual(policy_latest["bindings"][0]["role"], "roles/bigtable.admin") - self.assertIn(service_account_email, policy_latest["bindings"][0]["members"][0]) - - def test_create_table_with_families(self): - temp_table_id = "test-create-table-with-failies" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - gc_rule = MaxVersionsGCRule(1) - temp_table.create(column_families={COLUMN_FAMILY_ID1: gc_rule}) - self.tables_to_delete.append(temp_table) - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, temp_table) - self.assertEqual(retrieved_col_fam.column_family_id, COLUMN_FAMILY_ID1) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_create_table_with_split_keys(self): - self._skip_if_emulated("Split keys are not supported by Bigtable emulator") - temp_table_id = "foo-bar-baz-split-table" - initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create(initial_split_keys=initial_split_keys) - self.tables_to_delete.append(temp_table) - - # Read Sample Row Keys for created splits - sample_row_keys = temp_table.sample_row_keys() - actual_keys = [srk.row_key for srk in sample_row_keys] - - expected_keys = initial_split_keys - expected_keys.append(b"") - - self.assertEqual(actual_keys, expected_keys) - - def test_create_column_family(self): - temp_table_id = "test-create-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - col_fams = temp_table.list_column_families() - - self.assertEqual(len(col_fams), 1) - retrieved_col_fam = col_fams[COLUMN_FAMILY_ID1] - self.assertIs(retrieved_col_fam._table, column_family._table) - self.assertEqual( - retrieved_col_fam.column_family_id, column_family.column_family_id - ) - self.assertEqual(retrieved_col_fam.gc_rule, gc_rule) - - def test_update_column_family(self): - temp_table_id = "test-update-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - gc_rule = MaxVersionsGCRule(1) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1, gc_rule=gc_rule) - column_family.create() - - # Check that our created table is as expected. - col_fams = temp_table.list_column_families() - self.assertEqual(col_fams, {COLUMN_FAMILY_ID1: column_family}) - - # Update the column family's GC rule and then try to update. - column_family.gc_rule = None - column_family.update() - - # Check that the update has propagated. - col_fams = temp_table.list_column_families() - self.assertIsNone(col_fams[COLUMN_FAMILY_ID1].gc_rule) - - def test_delete_column_family(self): - temp_table_id = "test-delete-column-family" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - self.assertEqual(temp_table.list_column_families(), {}) - column_family = temp_table.column_family(COLUMN_FAMILY_ID1) - column_family.create() - - # Make sure the family is there before deleting it. - col_fams = temp_table.list_column_families() - self.assertEqual(list(col_fams.keys()), [COLUMN_FAMILY_ID1]) - - retry_504 = RetryErrors(DeadlineExceeded) - retry_504(column_family.delete)() - # Make sure we have successfully deleted it. - self.assertEqual(temp_table.list_column_families(), {}) - - def test_backup(self): - if Config.IN_EMULATOR: - self.skipTest("backups are not supported in the emulator") - - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable import enums - - temp_table_id = "test-backup-table" - temp_table = Config.INSTANCE_DATA.table(temp_table_id) - temp_table.create() - self.tables_to_delete.append(temp_table) - - temp_backup_id = "test-backup" - - # TODO: consider using `datetime.datetime.now().timestamp()` - # when support for Python 2 is fully dropped - expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 - - # Testing `Table.backup()` factory - temp_backup = temp_table.backup( - temp_backup_id, - cluster_id=CLUSTER_ID_DATA, - expire_time=datetime.datetime.utcfromtimestamp(expire), - ) - - # Reinitialize the admin client. This is to test `_table_admin_client` returns a client object (and not NoneType) - temp_backup._instance._client = Client(admin=True) - - # Sanity check for `Backup.exists()` method - self.assertFalse(temp_backup.exists()) - - # Testing `Backup.create()` method - temp_backup.create().result(timeout=30) - - # Implicit testing of `Backup.delete()` method - self.backups_to_delete.append(temp_backup) - - # Testing `Backup.exists()` method - self.assertTrue(temp_backup.exists()) - - # Testing `Table.list_backups()` method - temp_table_backup = temp_table.list_backups()[0] - self.assertEqual(temp_backup_id, temp_table_backup.backup_id) - self.assertEqual(CLUSTER_ID_DATA, temp_table_backup.cluster) - self.assertEqual(expire, temp_table_backup.expire_time.seconds) - self.assertEqual( - temp_table_backup.encryption_info.encryption_type, - enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - ) - - # Testing `Backup.update_expire_time()` method - expire += 3600 # A one-hour change in the `expire_time` parameter - updated_time = datetime.datetime.utcfromtimestamp(expire) - temp_backup.update_expire_time(updated_time) - test = _datetime_to_pb_timestamp(updated_time) - - # Testing `Backup.get()` method - temp_table_backup = temp_backup.get() - self.assertEqual( - test.seconds, - DatetimeWithNanoseconds.timestamp(temp_table_backup.expire_time), - ) - - # Testing `Table.restore()` and `Backup.retore()` methods - restored_table_id = "test-backup-table-restored" - restored_table = Config.INSTANCE_DATA.table(restored_table_id) - temp_table.restore( - restored_table_id, cluster_id=CLUSTER_ID_DATA, backup_id=temp_backup_id, - ).result(timeout=30) - tables = Config.INSTANCE_DATA.list_tables() - self.assertIn(restored_table, tables) - restored_table.delete() - - # Testing `Backup.restore()` into a different instance: - # Setting up another instance... - alt_instance_id = "gcp-alt-" + UNIQUE_SUFFIX - alt_cluster_id = alt_instance_id + "-cluster" - alt_instance = Config.CLIENT.instance(alt_instance_id, labels=LABELS) - alt_cluster = alt_instance.cluster( - cluster_id=alt_cluster_id, location_id=LOCATION_ID, serve_nodes=SERVE_NODES, - ) - if not Config.IN_EMULATOR: - alt_instance.create(clusters=[alt_cluster]).result(timeout=30) - - # Testing `restore()`... - temp_backup.restore(restored_table_id, alt_instance_id).result(timeout=30) - restored_table = alt_instance.table(restored_table_id) - self.assertIn(restored_table, alt_instance.list_tables()) - restored_table.delete() - - # Tearing down the resources... - if not Config.IN_EMULATOR: - retry_429(alt_instance.delete)() - - -class TestDataAPI(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls._table = table = Config.INSTANCE_DATA.table("test-data-api") - table.create() - table.column_family(COLUMN_FAMILY_ID1).create() - table.column_family(COLUMN_FAMILY_ID2).create() - - @classmethod - def tearDownClass(cls): - # Will also delete any data contained in the table. - cls._table.delete() - - def _maybe_emulator_skip(self, message): - # NOTE: This method is necessary because ``Config.IN_EMULATOR`` - # is set at runtime rather than import time, which means we - # can't use the @unittest.skipIf decorator. - if Config.IN_EMULATOR: - self.skipTest(message) - - def setUp(self): - self.rows_to_delete = [] - - def tearDown(self): - for row in self.rows_to_delete: - row.clear() - row.delete() - row.commit() - - def _write_to_row(self, row1=None, row2=None, row3=None, row4=None): - timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) - timestamp1_micros = _microseconds_from_datetime(timestamp1) - # Truncate to millisecond granularity. - timestamp1_micros -= timestamp1_micros % 1000 - timestamp1 = _datetime_from_microseconds(timestamp1_micros) - # 1000 microseconds is a millisecond - timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) - timestamp2_micros = _microseconds_from_datetime(timestamp2) - timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) - timestamp3_micros = _microseconds_from_datetime(timestamp3) - timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) - timestamp4_micros = _microseconds_from_datetime(timestamp4) - - if row1 is not None: - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) - if row2 is not None: - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) - if row3 is not None: - row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) - if row4 is not None: - row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) - - # Create the cells we will check. - cell1 = Cell(CELL_VAL1, timestamp1_micros) - cell2 = Cell(CELL_VAL2, timestamp2_micros) - cell3 = Cell(CELL_VAL3, timestamp3_micros) - cell4 = Cell(CELL_VAL4, timestamp4_micros) - return cell1, cell2, cell3, cell4 - - def test_timestamp_filter_millisecond_granularity(self): - from google.cloud.bigtable import row_filters - - end = datetime.datetime.now() - start = end - datetime.timedelta(minutes=60) - timestamp_range = row_filters.TimestampRange(start=start, end=end) - timefilter = row_filters.TimestampRangeFilter(timestamp_range) - row_data = self._table.read_rows(filter_=timefilter) - row_data.consume_all() - - def test_mutate_rows(self): - row1 = self._table.row(ROW_KEY) - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row1.commit() - self.rows_to_delete.append(row1) - row2 = self._table.row(ROW_KEY_ALT) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) - row2.commit() - self.rows_to_delete.append(row2) - - # Change the contents - row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) - row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) - rows = [row1, row2] - statuses = self._table.mutate_rows(rows) - result = [status.code for status in statuses] - expected_result = [0, 0] - self.assertEqual(result, expected_result) - - # Check the contents - row1_data = self._table.read_row(ROW_KEY) - self.assertEqual( - row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL3 - ) - row2_data = self._table.read_row(ROW_KEY_ALT) - self.assertEqual( - row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value, CELL_VAL4 - ) - - def test_truncate_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.truncate(timeout=200) - - read_rows = self._table.yield_rows() - - for row in read_rows: - self.assertNotIn(row.row_key.decode("utf-8"), row_keys) - - def test_drop_by_prefix_table(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_pr_1", - b"row_key_pr_2", - b"row_key_pr_3", - b"row_key_pr_4", - b"row_key_pr_5", - ] - - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - row.commit() - self.rows_to_delete.append(row) - - self._table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) - - read_rows = self._table.yield_rows() - expected_rows_count = 5 - read_rows_count = 0 - - for row in read_rows: - if row.row_key in row_keys: - read_rows_count += 1 - - self.assertEqual(expected_rows_count, read_rows_count) - - def test_yield_rows_with_row_set(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - b"row_key_7", - b"row_key_8", - b"row_key_9", - ] - - rows = [] - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - self.rows_to_delete.append(row) - self._table.mutate_rows(rows) - - row_set = RowSet() - row_set.add_row_range(RowRange(start_key=b"row_key_3", end_key=b"row_key_7")) - row_set.add_row_key(b"row_key_1") - - read_rows = self._table.yield_rows(row_set=row_set) - - expected_row_keys = [ - b"row_key_1", - b"row_key_3", - b"row_key_4", - b"row_key_5", - b"row_key_6", - ] - found_row_keys = [row.row_key for row in read_rows] - self.assertEqual(found_row_keys, expected_row_keys) - - def test_add_row_range_by_prefix_from_keys(self): - row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - b"sample_row_key_1", - b"sample_row_key_2", - ] - - rows = [] - for row_key in row_keys: - row = self._table.row(row_key) - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) - rows.append(row) - self.rows_to_delete.append(row) - self._table.mutate_rows(rows) - - row_set = RowSet() - row_set.add_row_range_with_prefix("row") - - read_rows = self._table.yield_rows(row_set=row_set) - - expected_row_keys = [ - b"row_key_1", - b"row_key_2", - b"row_key_3", - b"row_key_4", - ] - found_row_keys = [row.row_key for row in read_rows] - self.assertEqual(found_row_keys, expected_row_keys) - - def test_read_large_cell_limit(self): - self._maybe_emulator_skip( - "Maximum gRPC received message size for emulator is 4194304 bytes." - ) - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - number_of_bytes = 10 * 1024 * 1024 - data = b"1" * number_of_bytes # 10MB of 1's. - row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - cell = partial_row_data.cells[COLUMN_FAMILY_ID1] - column = cell[COL_NAME1] - self.assertEqual(len(column), 1) - self.assertEqual(column[0].value, data) - - def test_read_row(self): - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row, row, row) - row.commit() - - # Read back the contents of the row. - partial_row_data = self._table.read_row(ROW_KEY) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - # Check the cells match. - ts_attr = operator.attrgetter("timestamp") - expected_row_contents = { - COLUMN_FAMILY_ID1: { - COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), - COL_NAME2: [cell3], - }, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - self.assertEqual(partial_row_data.cells, expected_row_contents) - - def test_read_rows(self): - row = self._table.row(ROW_KEY) - row_alt = self._table.row(ROW_KEY_ALT) - self.rows_to_delete.extend([row, row_alt]) - - cell1, cell2, cell3, cell4 = self._write_to_row(row, row_alt, row, row_alt) - row.commit() - row_alt.commit() - - rows_data = self._table.read_rows() - self.assertEqual(rows_data.rows, {}) - rows_data.consume_all() - - # NOTE: We should refrain from editing protected data on instances. - # Instead we should make the values public or provide factories - # for constructing objects with them. - row_data = PartialRowData(ROW_KEY) - row_data._chunks_encountered = True - row_data._committed = True - row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} - - row_alt_data = PartialRowData(ROW_KEY_ALT) - row_alt_data._chunks_encountered = True - row_alt_data._committed = True - row_alt_data._cells = { - COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, - COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, - } - - expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} - self.assertEqual(rows_data.rows, expected_rows) - - def test_read_with_label_applied(self): - self._maybe_emulator_skip("Labels not supported by Bigtable emulator") - row = self._table.row(ROW_KEY) - self.rows_to_delete.append(row) - - cell1, _, cell3, _ = self._write_to_row(row, None, row) - row.commit() - - # Combine a label with column 1. - label1 = "label-red" - label1_filter = ApplyLabelFilter(label1) - col1_filter = ColumnQualifierRegexFilter(COL_NAME1) - chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) - - # Combine a label with column 2. - label2 = "label-blue" - label2_filter = ApplyLabelFilter(label2) - col2_filter = ColumnQualifierRegexFilter(COL_NAME2) - chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) - - # Bring our two labeled columns together. - row_filter = RowFilterUnion(filters=[chain1, chain2]) - partial_row_data = self._table.read_row(ROW_KEY, filter_=row_filter) - self.assertEqual(partial_row_data.row_key, ROW_KEY) - - cells_returned = partial_row_data.cells - col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) - # Make sure COLUMN_FAMILY_ID1 was the only key. - self.assertEqual(len(cells_returned), 0) - - (cell1_new,) = col_fam1.pop(COL_NAME1) - (cell3_new,) = col_fam1.pop(COL_NAME2) - # Make sure COL_NAME1 and COL_NAME2 were the only keys. - self.assertEqual(len(col_fam1), 0) - - # Check that cell1 has matching values and gained a label. - self.assertEqual(cell1_new.value, cell1.value) - self.assertEqual(cell1_new.timestamp, cell1.timestamp) - self.assertEqual(cell1.labels, []) - self.assertEqual(cell1_new.labels, [label1]) - - # Check that cell3 has matching values and gained a label. - self.assertEqual(cell3_new.value, cell3.value) - self.assertEqual(cell3_new.timestamp, cell3.timestamp) - self.assertEqual(cell3.labels, []) - self.assertEqual(cell3_new.labels, [label2]) - - def test_access_with_non_admin_client(self): - client = Client(admin=False) - instance = client.instance(INSTANCE_ID_DATA) - table = instance.table(self._table.table_id) - self.assertIsNone(table.read_row("nonesuch")) diff --git a/packages/google-cloud-bigtable/tests/system/__init__.py b/packages/google-cloud-bigtable/tests/system/__init__.py new file mode 100644 index 000000000000..4de65971c238 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/system/_helpers.py b/packages/google-cloud-bigtable/tests/system/_helpers.py new file mode 100644 index 000000000000..f6895a51f239 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/_helpers.py @@ -0,0 +1,45 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime + +import grpc +from google.api_core import exceptions +from google.cloud import exceptions as core_exceptions +from google.cloud._helpers import UTC +from test_utils import retry + + +retry_429 = retry.RetryErrors(exceptions.TooManyRequests, max_tries=9) +retry_504 = retry.RetryErrors(exceptions.DeadlineExceeded) +retry_until_true = retry.RetryResult(lambda result: result) +retry_until_false = retry.RetryResult(lambda result: not result) + + +def _retry_on_unavailable(exc): + """Retry only errors whose status code is 'UNAVAILABLE'.""" + return exc.code() == grpc.StatusCode.UNAVAILABLE + + +retry_grpc_unavailable = retry.RetryErrors( + core_exceptions.GrpcRendezvous, error_predicate=_retry_on_unavailable, +) + + +def label_stamp(): + return ( + datetime.datetime.utcnow() + .replace(microsecond=0, tzinfo=UTC) + .strftime("%Y-%m-%dt%H-%M-%S") + ) diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py new file mode 100644 index 000000000000..778cf8c94033 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -0,0 +1,172 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest +from test_utils.system import unique_resource_id + +from google.cloud.bigtable.client import Client +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from . import _helpers + + +@pytest.fixture(scope="session") +def in_emulator(): + return os.getenv(BIGTABLE_EMULATOR) is not None + + +@pytest.fixture(scope="session") +def kms_key_name(): + return os.getenv("KMS_KEY_NAME") + + +@pytest.fixture(scope="session") +def with_kms_key_name(kms_key_name): + if kms_key_name is None: + pytest.skip("Test requires KMS_KEY_NAME environment variable") + return kms_key_name + + +@pytest.fixture(scope="session") +def not_in_emulator(in_emulator): + if in_emulator: + pytest.skip("Emulator does not support this feature") + + +@pytest.fixture(scope="session") +def unique_suffix(): + return unique_resource_id("-") + + +@pytest.fixture(scope="session") +def location_id(): + return "us-central1-c" + + +@pytest.fixture(scope="session") +def serve_nodes(): + return 3 + + +@pytest.fixture(scope="session") +def label_key(): + return "python-system" + + +@pytest.fixture(scope="session") +def instance_labels(label_key): + return {label_key: _helpers.label_stamp()} + + +@pytest.fixture(scope="session") +def admin_client(): + return Client(admin=True) + + +@pytest.fixture(scope="session") +def service_account(admin_client): + from google.oauth2.service_account import Credentials + + if not isinstance(admin_client._credentials, Credentials): + pytest.skip("These tests require a service account credential") + return admin_client._credentials + + +@pytest.fixture(scope="session") +def admin_instance_id(unique_suffix): + return f"g-c-p{unique_suffix}" + + +@pytest.fixture(scope="session") +def admin_cluster_id(admin_instance_id): + return f"{admin_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def admin_instance(admin_client, admin_instance_id, instance_labels): + return admin_client.instance(admin_instance_id, labels=instance_labels) + + +@pytest.fixture(scope="session") +def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): + return admin_instance.cluster( + admin_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) + + +@pytest.fixture(scope="session") +def admin_instance_populated(admin_instance, admin_cluster, in_emulator): + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator + if not in_emulator: + operation = admin_instance.create(clusters=[admin_cluster]) + operation.result(timeout=30) + + yield admin_instance + + if not in_emulator: + _helpers.retry_429(admin_instance.delete)() + + +@pytest.fixture(scope="session") +def data_client(): + return Client(admin=False) + + +@pytest.fixture(scope="session") +def data_instance_id(unique_suffix): + return f"g-c-p-d{unique_suffix}" + + +@pytest.fixture(scope="session") +def data_cluster_id(data_instance_id): + return f"{data_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def data_instance_populated( + admin_client, + data_instance_id, + instance_labels, + data_cluster_id, + location_id, + serve_nodes, + in_emulator, +): + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator + if not in_emulator: + instance = admin_client.instance(data_instance_id, labels=instance_labels) + cluster = instance.cluster( + data_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) + operation = instance.create(clusters=[cluster]) + operation.result(timeout=30) + + yield instance + + if not in_emulator: + _helpers.retry_429(instance.delete)() + + +@pytest.fixture(scope="function") +def instances_to_delete(): + instances_to_delete = [] + + yield instances_to_delete + + for instance in instances_to_delete: + _helpers.retry_429(instance.delete)() diff --git a/packages/google-cloud-bigtable/tests/system/test_data_api.py b/packages/google-cloud-bigtable/tests/system/test_data_api.py new file mode 100644 index 000000000000..2137aa2e4508 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/test_data_api.py @@ -0,0 +1,383 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import operator + +import pytest + +COLUMN_FAMILY_ID1 = "col-fam-id1" +COLUMN_FAMILY_ID2 = "col-fam-id2" +COL_NAME1 = b"col-name1" +COL_NAME2 = b"col-name2" +COL_NAME3 = b"col-name3-but-other-fam" +CELL_VAL1 = b"cell-val" +CELL_VAL2 = b"cell-val-newer" +CELL_VAL3 = b"altcol-cell-val" +CELL_VAL4 = b"foo" +ROW_KEY = b"row-key" +ROW_KEY_ALT = b"row-key-alt" + + +@pytest.fixture(scope="module") +def data_table_id(): + return "test-data-api" + + +@pytest.fixture(scope="module") +def data_table(data_instance_populated, data_table_id): + table = data_instance_populated.table(data_table_id) + table.create() + table.column_family(COLUMN_FAMILY_ID1).create() + table.column_family(COLUMN_FAMILY_ID2).create() + + yield table + + table.delete() + + +@pytest.fixture(scope="function") +def rows_to_delete(): + rows_to_delete = [] + + yield rows_to_delete + + for row in rows_to_delete: + row.clear() + row.delete() + row.commit() + + +def test_table_read_rows_filter_millis(data_table): + from google.cloud.bigtable import row_filters + + end = datetime.datetime.now() + start = end - datetime.timedelta(minutes=60) + timestamp_range = row_filters.TimestampRange(start=start, end=end) + timefilter = row_filters.TimestampRangeFilter(timestamp_range) + row_data = data_table.read_rows(filter_=timefilter) + row_data.consume_all() + + +def test_table_mutate_rows(data_table, rows_to_delete): + row1 = data_table.direct_row(ROW_KEY) + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row1.commit() + rows_to_delete.append(row1) + + row2 = data_table.direct_row(ROW_KEY_ALT) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2) + row2.commit() + rows_to_delete.append(row2) + + # Change the contents + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL3) + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL4) + rows = [row1, row2] + + statuses = data_table.mutate_rows(rows) + assert len(statuses) == len(rows) + for status in statuses: + assert status.code == 0 + + # Check the contents + row1_data = data_table.read_row(ROW_KEY) + assert row1_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value == CELL_VAL3 + + row2_data = data_table.read_row(ROW_KEY_ALT) + assert row2_data.cells[COLUMN_FAMILY_ID1][COL_NAME1][0].value == CELL_VAL4 + + +def _populate_table(data_table, rows_to_delete, row_keys): + for row_key in row_keys: + row = data_table.direct_row(row_key) + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1) + row.commit() + rows_to_delete.append(row) + + +def test_table_truncate(data_table, rows_to_delete): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] + _populate_table(data_table, rows_to_delete, row_keys) + + data_table.truncate(timeout=200) + + assert list(data_table.read_rows()) == [] + + +def test_table_drop_by_prefix(data_table, rows_to_delete): + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_pr_1", + b"row_key_pr_2", + b"row_key_pr_3", + b"row_key_pr_4", + b"row_key_pr_5", + ] + _populate_table(data_table, rows_to_delete, row_keys) + + data_table.drop_by_prefix(row_key_prefix="row_key_pr", timeout=200) + + remaining_row_keys = [ + row_key for row_key in row_keys if not row_key.startswith(b"row_key_pr") + ] + expected_rows_count = len(remaining_row_keys) + found_rows_count = 0 + + for row in data_table.read_rows(): + if row.row_key in row_keys: + found_rows_count += 1 + + assert expected_rows_count == found_rows_count + + +def test_table_read_rows_w_row_set(data_table, rows_to_delete): + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"row_key_5", + b"row_key_6", + b"row_key_7", + b"row_key_8", + b"row_key_9", + ] + _populate_table(data_table, rows_to_delete, row_keys) + + row_range = RowRange(start_key=b"row_key_3", end_key=b"row_key_7") + row_set = RowSet() + row_set.add_row_range(row_range) + row_set.add_row_key(b"row_key_1") + + found_rows = data_table.read_rows(row_set=row_set) + + found_row_keys = [row.row_key for row in found_rows] + expected_row_keys = [ + row_key for row_key in row_keys[:6] if not row_key.endswith(b"_2") + ] + assert found_row_keys == expected_row_keys + + +def test_rowset_add_row_range_w_pfx(data_table, rows_to_delete): + from google.cloud.bigtable.row_set import RowSet + + row_keys = [ + b"row_key_1", + b"row_key_2", + b"row_key_3", + b"row_key_4", + b"sample_row_key_1", + b"sample_row_key_2", + ] + _populate_table(data_table, rows_to_delete, row_keys) + + row_set = RowSet() + row_set.add_row_range_with_prefix("row") + + expected_row_keys = [row_key for row_key in row_keys if row_key.startswith(b"row")] + found_rows = data_table.read_rows(row_set=row_set) + found_row_keys = [row.row_key for row in found_rows] + assert found_row_keys == expected_row_keys + + +def test_table_read_row_large_cell(data_table, rows_to_delete, not_in_emulator): + # Maximum gRPC received message size for emulator is 4194304 bytes. + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + + number_of_bytes = 10 * 1024 * 1024 + data = b"1" * number_of_bytes # 10MB of 1's. + row.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, data) + row.commit() + + # Read back the contents of the row. + row_data = data_table.read_row(ROW_KEY) + assert row_data.row_key == ROW_KEY + + cell = row_data.cells[COLUMN_FAMILY_ID1] + column = cell[COL_NAME1] + assert len(column) == 1 + assert column[0].value == data + + +def _write_to_row(row1, row2, row3, row4): + from google.cloud._helpers import _datetime_from_microseconds + from google.cloud._helpers import _microseconds_from_datetime + from google.cloud._helpers import UTC + from google.cloud.bigtable.row_data import Cell + + timestamp1 = datetime.datetime.utcnow().replace(tzinfo=UTC) + timestamp1_micros = _microseconds_from_datetime(timestamp1) + # Truncate to millisecond granularity. + timestamp1_micros -= timestamp1_micros % 1000 + timestamp1 = _datetime_from_microseconds(timestamp1_micros) + # 1000 microseconds is a millisecond + timestamp2 = timestamp1 + datetime.timedelta(microseconds=1000) + timestamp2_micros = _microseconds_from_datetime(timestamp2) + timestamp3 = timestamp1 + datetime.timedelta(microseconds=2000) + timestamp3_micros = _microseconds_from_datetime(timestamp3) + timestamp4 = timestamp1 + datetime.timedelta(microseconds=3000) + timestamp4_micros = _microseconds_from_datetime(timestamp4) + + if row1 is not None: + row1.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL1, timestamp=timestamp1) + if row2 is not None: + row2.set_cell(COLUMN_FAMILY_ID1, COL_NAME1, CELL_VAL2, timestamp=timestamp2) + if row3 is not None: + row3.set_cell(COLUMN_FAMILY_ID1, COL_NAME2, CELL_VAL3, timestamp=timestamp3) + if row4 is not None: + row4.set_cell(COLUMN_FAMILY_ID2, COL_NAME3, CELL_VAL4, timestamp=timestamp4) + + # Create the cells we will check. + cell1 = Cell(CELL_VAL1, timestamp1_micros) + cell2 = Cell(CELL_VAL2, timestamp2_micros) + cell3 = Cell(CELL_VAL3, timestamp3_micros) + cell4 = Cell(CELL_VAL4, timestamp4_micros) + + return cell1, cell2, cell3, cell4 + + +def test_table_read_row(data_table, rows_to_delete): + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + cell1, cell2, cell3, cell4 = _write_to_row(row, row, row, row) + row.commit() + + partial_row_data = data_table.read_row(ROW_KEY) + + assert partial_row_data.row_key == ROW_KEY + + # Check the cells match. + ts_attr = operator.attrgetter("timestamp") + expected_row_contents = { + COLUMN_FAMILY_ID1: { + COL_NAME1: sorted([cell1, cell2], key=ts_attr, reverse=True), + COL_NAME2: [cell3], + }, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, + } + assert partial_row_data.cells == expected_row_contents + + +def test_table_read_rows(data_table, rows_to_delete): + from google.cloud.bigtable.row_data import PartialRowData + + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + row_alt = data_table.direct_row(ROW_KEY_ALT) + rows_to_delete.append(row_alt) + + cell1, cell2, cell3, cell4 = _write_to_row(row, row_alt, row, row_alt) + row.commit() + row_alt.commit() + + rows_data = data_table.read_rows() + assert rows_data.rows == {} + rows_data.consume_all() + + # NOTE: We should refrain from editing protected data on instances. + # Instead we should make the values public or provide factories + # for constructing objects with them. + row_data = PartialRowData(ROW_KEY) + row_data._chunks_encountered = True + row_data._committed = True + row_data._cells = {COLUMN_FAMILY_ID1: {COL_NAME1: [cell1], COL_NAME2: [cell3]}} + + row_alt_data = PartialRowData(ROW_KEY_ALT) + row_alt_data._chunks_encountered = True + row_alt_data._committed = True + row_alt_data._cells = { + COLUMN_FAMILY_ID1: {COL_NAME1: [cell2]}, + COLUMN_FAMILY_ID2: {COL_NAME3: [cell4]}, + } + + expected_rows = {ROW_KEY: row_data, ROW_KEY_ALT: row_alt_data} + assert rows_data.rows == expected_rows + + +def test_read_with_label_applied(data_table, rows_to_delete, not_in_emulator): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.row_filters import RowFilterUnion + + row = data_table.direct_row(ROW_KEY) + rows_to_delete.append(row) + + cell1, _, cell3, _ = _write_to_row(row, None, row, None) + row.commit() + + # Combine a label with column 1. + label1 = "label-red" + label1_filter = ApplyLabelFilter(label1) + col1_filter = ColumnQualifierRegexFilter(COL_NAME1) + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Combine a label with column 2. + label2 = "label-blue" + label2_filter = ApplyLabelFilter(label2) + col2_filter = ColumnQualifierRegexFilter(COL_NAME2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + partial_row_data = data_table.read_row(ROW_KEY, filter_=row_filter) + assert partial_row_data.row_key == ROW_KEY + + cells_returned = partial_row_data.cells + col_fam1 = cells_returned.pop(COLUMN_FAMILY_ID1) + # Make sure COLUMN_FAMILY_ID1 was the only key. + assert len(cells_returned) == 0 + + (cell1_new,) = col_fam1.pop(COL_NAME1) + (cell3_new,) = col_fam1.pop(COL_NAME2) + # Make sure COL_NAME1 and COL_NAME2 were the only keys. + assert len(col_fam1) == 0 + + # Check that cell1 has matching values and gained a label. + assert cell1_new.value == cell1.value + assert cell1_new.timestamp == cell1.timestamp + assert cell1.labels == [] + assert cell1_new.labels == [label1] + + # Check that cell3 has matching values and gained a label. + assert cell3_new.value == cell3.value + assert cell3_new.timestamp == cell3.timestamp + assert cell3.labels == [] + assert cell3_new.labels == [label2] + + +def test_access_with_non_admin_client(data_client, data_instance_id, data_table_id): + instance = data_client.instance(data_instance_id) + table = instance.table(data_table_id) + assert table.read_row("nonesuch") is None # no raise diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py new file mode 100644 index 000000000000..c5f7b525e5a0 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -0,0 +1,614 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable import enums +from google.cloud.bigtable.table import ClusterState + +from . import _helpers + + +def _create_app_profile_helper( + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, +): + + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes, + ) + assert app_profile.allow_transactional_writes == allow_transactional_writes + + app_profile.create(ignore_warnings=ignore_warnings) + + # Load a different app_profile objec form the server and + # verrify that it is the same + alt_app_profile = instance.app_profile(app_profile_id) + alt_app_profile.reload() + + app_profile.app_profile_id == alt_app_profile.app_profile_id + app_profile.routing_policy_type == routing_policy_type + alt_app_profile.routing_policy_type == routing_policy_type + app_profile.description == alt_app_profile.description + assert not app_profile.allow_transactional_writes + assert not alt_app_profile.allow_transactional_writes + + return app_profile + + +def _list_app_profiles_helper(instance, expected_app_profile_ids): + app_profiles = instance.list_app_profiles() + found = [app_prof.app_profile_id for app_prof in app_profiles] + for expected in expected_app_profile_ids: + assert expected in found + + +def _modify_app_profile_helper( + app_profile_id, + instance, + routing_policy_type, + description=None, + cluster_id=None, + allow_transactional_writes=None, + ignore_warnings=None, +): + app_profile = instance.app_profile( + app_profile_id=app_profile_id, + routing_policy_type=routing_policy_type, + description=description, + cluster_id=cluster_id, + allow_transactional_writes=allow_transactional_writes, + ) + + operation = app_profile.update(ignore_warnings=ignore_warnings) + operation.result(timeout=30) + + alt_profile = instance.app_profile(app_profile_id) + alt_profile.reload() + + assert alt_profile.description == description + assert alt_profile.routing_policy_type == routing_policy_type + assert alt_profile.cluster_id == cluster_id + assert alt_profile.allow_transactional_writes == allow_transactional_writes + + +def _delete_app_profile_helper(app_profile): + assert app_profile.exists() + app_profile.delete(ignore_warnings=True) + assert not app_profile.exists() + + +def test_client_list_instances(admin_client, admin_instance_populated, not_in_emulator): + instances, failed_locations = admin_client.list_instances() + + assert failed_locations == [] + + found = set([instance.name for instance in instances]) + assert admin_instance_populated.name in found + + +def test_instance_exists_hit(admin_instance_populated): + # Emulator does not support instance admin operations (create / delete). + # It allows connecting with *any* project / instance name. + # See: https://cloud.google.com/bigtable/docs/emulator + assert admin_instance_populated.exists() + + +def test_instance_exists_miss(admin_client): + alt_instance = admin_client.instance("nonesuch-instance") + assert not alt_instance.exists() + + +def test_instance_reload( + admin_client, admin_instance_id, admin_instance_populated, not_in_emulator +): + # Use same arguments as 'admin_instance_populated' + # so we can use reload() on a fresh instance. + alt_instance = admin_client.instance(admin_instance_id) + # Make sure metadata unset before reloading. + alt_instance.display_name = None + + alt_instance.reload() + + assert alt_instance.display_name == admin_instance_populated.display_name + assert alt_instance.labels == admin_instance_populated.labels + assert alt_instance.type_ == enums.Instance.Type.PRODUCTION + + +def test_instance_create_prod( + admin_client, + unique_suffix, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + from google.cloud.bigtable import enums + + alt_instance_id = f"ndef{unique_suffix}" + instance = admin_client.instance(alt_instance_id, labels=instance_labels) + alt_cluster_id = f"{alt_instance_id}-cluster" + serve_nodes = 1 + cluster = instance.cluster( + alt_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) + + operation = instance.create(clusters=[cluster]) + instances_to_delete.append(instance) + operation.result(timeout=30) # Ensure the operation completes. + assert instance.type_ is None + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance_alt.type_ == enums.Instance.Type.PRODUCTION + + +def test_instance_create_development( + admin_client, + unique_suffix, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + alt_instance_id = f"new{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.DEVELOPMENT, + labels=instance_labels, + ) + alt_cluster_id = f"{alt_instance_id}-cluster" + cluster = instance.cluster(alt_cluster_id, location_id=location_id) + + operation = instance.create(clusters=[cluster]) + instances_to_delete.append(instance) + operation.result(timeout=30) # Ensure the operation completes. + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance.type_ == instance_alt.type_ + assert instance_alt.labels == instance_labels + assert instance_alt.state == enums.Instance.State.READY + + +def test_instance_create_w_two_clusters( + admin_client, + unique_suffix, + admin_instance_populated, + admin_cluster, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + alt_instance_id = f"dif{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.PRODUCTION, + labels=instance_labels, + ) + + serve_nodes = 1 + + alt_cluster_id_1 = f"{alt_instance_id}-c1" + cluster_1 = instance.cluster( + alt_cluster_id_1, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + ) + + alt_cluster_id_2 = f"{alt_instance_id}-c2" + location_id_2 = "us-central1-f" + cluster_2 = instance.cluster( + alt_cluster_id_2, + location_id=location_id_2, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + instances_to_delete.append(instance) + operation.result(timeout=120) # Ensure the operation completes. + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance.type_ == instance_alt.type_ + + clusters, failed_locations = instance_alt.list_clusters() + assert failed_locations == [] + + alt_cluster_1, alt_cluster_2 = sorted(clusters, key=lambda x: x.name) + + assert cluster_1.location_id == alt_cluster_1.location_id + assert alt_cluster_1.state == enums.Cluster.State.READY + assert cluster_1.serve_nodes == alt_cluster_1.serve_nodes + assert cluster_1.default_storage_type == alt_cluster_1.default_storage_type + assert cluster_2.location_id == alt_cluster_2.location_id + assert alt_cluster_2.state == enums.Cluster.State.READY + assert cluster_2.serve_nodes == alt_cluster_2.serve_nodes + assert cluster_2.default_storage_type == alt_cluster_2.default_storage_type + + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = admin_client.list_clusters() + assert not failed_locations + found = set([cluster.name for cluster in clusters]) + expected = {alt_cluster_1.name, alt_cluster_2.name, admin_cluster.name} + assert expected.issubset(found) + + temp_table_id = "test-get-cluster-states" + temp_table = instance.table(temp_table_id) + _helpers.retry_grpc_unavailable(temp_table.create)() + + EncryptionType = enums.EncryptionInfo.EncryptionType + encryption_info = temp_table.get_encryption_info() + assert ( + encryption_info[alt_cluster_id_1][0].encryption_type + == EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + assert ( + encryption_info[alt_cluster_id_2][0].encryption_type + == EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + + c_states = temp_table.get_cluster_states() + cluster_ids = set(c_states.keys()) + assert cluster_ids == {alt_cluster_id_1, alt_cluster_id_2} + + ReplicationState = enums.Table.ReplicationState + expected_results = [ + ClusterState(ReplicationState.STATE_NOT_KNOWN), + ClusterState(ReplicationState.INITIALIZING), + ClusterState(ReplicationState.PLANNED_MAINTENANCE), + ClusterState(ReplicationState.UNPLANNED_MAINTENANCE), + ClusterState(ReplicationState.READY), + ] + + for clusterstate in c_states.values(): + assert clusterstate in expected_results + + # Test create app profile with multi_cluster_routing policy + app_profiles_to_delete = [] + description = "routing policy-multy" + app_profile_id_1 = "app_profile_id_1" + routing = enums.RoutingPolicyType.ANY + app_profile_1 = _create_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + ignore_warnings=True, + ) + app_profiles_to_delete.append(app_profile_1) + + # Test list app profiles + _list_app_profiles_helper(instance, [app_profile_id_1]) + + # Test modify app profile app_profile_id_1 + # routing policy to single cluster policy, + # cluster -> alt_cluster_id_1, + # allow_transactional_writes -> disallowed + # modify description + description = "to routing policy-single" + routing = enums.RoutingPolicyType.SINGLE + _modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_1, + allow_transactional_writes=False, + ) + + # Test modify app profile app_profile_id_1 + # cluster -> alt_cluster_id_2, + # allow_transactional_writes -> allowed + _modify_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_2, + allow_transactional_writes=True, + ignore_warnings=True, + ) + + # Test create app profile with single cluster routing policy + description = "routing policy-single" + app_profile_id_2 = "app_profile_id_2" + routing = enums.RoutingPolicyType.SINGLE + app_profile_2 = _create_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_2, + allow_transactional_writes=False, + ) + app_profiles_to_delete.append(app_profile_2) + + # Test list app profiles + _list_app_profiles_helper(instance, [app_profile_id_1, app_profile_id_2]) + + # Test modify app profile app_profile_id_2 to + # allow transactional writes + # Note: no need to set ``ignore_warnings`` to True + # since we are not restrictings anything with this modification. + _modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + cluster_id=alt_cluster_id_2, + allow_transactional_writes=True, + ) + + # Test modify app profile app_profile_id_2 routing policy + # to multi_cluster_routing policy + # modify description + description = "to routing policy-multy" + routing = enums.RoutingPolicyType.ANY + _modify_app_profile_helper( + app_profile_id_2, + instance, + routing_policy_type=routing, + description=description, + allow_transactional_writes=False, + ignore_warnings=True, + ) + + # Test delete app profiles + for app_profile in app_profiles_to_delete: + _delete_app_profile_helper(app_profile) + + +def test_instance_create_w_two_clusters_cmek( + admin_client, + unique_suffix, + admin_instance_populated, + admin_cluster, + location_id, + instance_labels, + instances_to_delete, + with_kms_key_name, + not_in_emulator, +): + alt_instance_id = f"dif-cmek{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.PRODUCTION, + labels=instance_labels, + ) + + serve_nodes = 1 + + alt_cluster_id_1 = f"{alt_instance_id}-c1" + cluster_1 = instance.cluster( + alt_cluster_id_1, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + kms_key_name=with_kms_key_name, + ) + + alt_cluster_id_2 = f"{alt_instance_id}-c2" + location_id_2 = "us-central1-f" + cluster_2 = instance.cluster( + alt_cluster_id_2, + location_id=location_id_2, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + kms_key_name=with_kms_key_name, + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + instances_to_delete.append(instance) + operation.result(timeout=120) # Ensure the operation completes. + + # Create a new instance instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance.type_ == instance_alt.type_ + + clusters, failed_locations = instance_alt.list_clusters() + assert failed_locations == [] + + alt_cluster_1, alt_cluster_2 = sorted(clusters, key=lambda x: x.name) + + assert cluster_1.location_id == alt_cluster_1.location_id + assert alt_cluster_1.state == enums.Cluster.State.READY + assert cluster_1.serve_nodes == alt_cluster_1.serve_nodes + assert cluster_1.default_storage_type == alt_cluster_1.default_storage_type + assert cluster_2.location_id == alt_cluster_2.location_id + assert alt_cluster_2.state == enums.Cluster.State.READY + assert cluster_2.serve_nodes == alt_cluster_2.serve_nodes + assert cluster_2.default_storage_type == alt_cluster_2.default_storage_type + + # Test list clusters in project via 'client.list_clusters' + clusters, failed_locations = admin_client.list_clusters() + assert not failed_locations + found = set([cluster.name for cluster in clusters]) + expected = {alt_cluster_1.name, alt_cluster_2.name, admin_cluster.name} + assert expected.issubset(found) + + temp_table_id = "test-get-cluster-states" + temp_table = instance.table(temp_table_id) + temp_table.create() + + EncryptionType = enums.EncryptionInfo.EncryptionType + encryption_info = temp_table.get_encryption_info() + assert ( + encryption_info[alt_cluster_id_1][0].encryption_type + == EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) + assert ( + encryption_info[alt_cluster_id_2][0].encryption_type + == EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + ) + + +def test_instance_update_display_name_and_labels( + admin_client, + admin_instance_id, + admin_instance_populated, + label_key, + instance_labels, + not_in_emulator, +): + old_display_name = admin_instance_populated.display_name + new_display_name = "Foo Bar Baz" + + new_labels = {label_key: _helpers.label_stamp()} + admin_instance_populated.display_name = new_display_name + admin_instance_populated.labels = new_labels + + operation = admin_instance_populated.update() + operation.result(timeout=30) # ensure the operation completes. + + # Create a new instance instance and reload it. + instance_alt = admin_client.instance(admin_instance_id, labels={}) + assert instance_alt.display_name == old_display_name + assert instance_alt.labels == {} + + instance_alt.reload() + + assert instance_alt.display_name == new_display_name + assert instance_alt.labels == new_labels + + # Make sure to put the instance back the way it was for the + # other test cases. + admin_instance_populated.display_name = old_display_name + admin_instance_populated.labels = instance_labels + operation = admin_instance_populated.update() + operation.result(timeout=30) # ensure the operation completes. + + +def test_instance_update_w_type( + admin_client, + unique_suffix, + admin_instance_populated, + location_id, + instance_labels, + instances_to_delete, + not_in_emulator, +): + alt_instance_id = f"ndif{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.DEVELOPMENT, + labels=instance_labels, + ) + alt_cluster_id = f"{alt_instance_id}-cluster" + cluster = instance.cluster(alt_cluster_id, location_id=location_id,) + + operation = instance.create(clusters=[cluster]) + instances_to_delete.append(instance) + operation.result(timeout=30) # Ensure the operation completes. + + instance.display_name = None + instance.type_ = enums.Instance.Type.PRODUCTION + operation = instance.update() + operation.result(timeout=30) # ensure the operation completes. + + # Create a new instance instance and reload it. + instance_alt = admin_client.instance(alt_instance_id) + assert instance_alt.type_ is None + instance_alt.reload() + assert instance_alt.type_ == enums.Instance.Type.PRODUCTION + + +def test_cluster_exists_hit(admin_cluster, not_in_emulator): + assert admin_cluster.exists() + + +def test_cluster_exists_miss(admin_instance_populated, not_in_emulator): + alt_cluster = admin_instance_populated.cluster("nonesuch-cluster") + assert not alt_cluster.exists() + + +def test_cluster_create( + admin_instance_populated, admin_instance_id, +): + alt_cluster_id = f"{admin_instance_id}-c2" + alt_location_id = "us-central1-f" + serve_nodes = 2 + + cluster_2 = admin_instance_populated.cluster( + alt_cluster_id, + location_id=alt_location_id, + serve_nodes=serve_nodes, + default_storage_type=(enums.StorageType.SSD), + ) + operation = cluster_2.create() + operation.result(timeout=30) # Ensure the operation completes. + + # Create a new object instance, reload and make sure it is the same. + alt_cluster = admin_instance_populated.cluster(alt_cluster_id) + alt_cluster.reload() + + assert cluster_2 == alt_cluster + assert cluster_2.location_id == alt_cluster.location_id + assert alt_cluster.state == enums.Cluster.State.READY + assert cluster_2.serve_nodes == alt_cluster.serve_nodes + assert cluster_2.default_storage_type == alt_cluster.default_storage_type + + # Delete the newly created cluster and confirm + assert cluster_2.exists() + cluster_2.delete() + assert not cluster_2.exists() + + +def test_cluster_update( + admin_instance_populated, + admin_cluster_id, + admin_cluster, + serve_nodes, + not_in_emulator, +): + new_serve_nodes = 4 + + admin_cluster.serve_nodes = new_serve_nodes + + operation = admin_cluster.update() + operation.result(timeout=30) # Ensure the operation completes. + + # Create a new cluster instance and reload it. + alt_cluster = admin_instance_populated.cluster(admin_cluster_id) + alt_cluster.reload() + assert alt_cluster.serve_nodes == new_serve_nodes + + # Put the cluster back the way it was for the other test cases. + admin_cluster.serve_nodes = serve_nodes + operation = admin_cluster.update() + operation.result(timeout=30) # Ensure the operation completes. diff --git a/packages/google-cloud-bigtable/tests/system/test_table_admin.py b/packages/google-cloud-bigtable/tests/system/test_table_admin.py new file mode 100644 index 000000000000..232c6d0fc1c6 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/test_table_admin.py @@ -0,0 +1,351 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import operator +import time + +import pytest +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +from . import _helpers + + +@pytest.fixture(scope="module") +def shared_table_id(): + return "google-cloud-python-test-table" + + +@pytest.fixture(scope="module") +def shared_table(data_instance_populated, shared_table_id): + table = data_instance_populated.table(shared_table_id) + table.create() + + yield table + + table.delete() + + +@pytest.fixture(scope="function") +def tables_to_delete(): + tables_to_delete = [] + + yield tables_to_delete + + for table in tables_to_delete: + table.delete() + + +@pytest.fixture(scope="function") +def backups_to_delete(): + backups_to_delete = [] + + yield backups_to_delete + + for backup in backups_to_delete: + backup.delete() + + +def test_instance_list_tables(data_instance_populated, shared_table): + # Since `data_instance_populated` is newly created, the + # table created in `shared_table` here will be the only one. + tables = data_instance_populated.list_tables() + assert tables == [shared_table] + + +def test_table_exists(data_instance_populated): + temp_table_id = "test-table_exists" + temp_table = data_instance_populated.table(temp_table_id) + assert not temp_table.exists() + + temp_table.create() + assert _helpers.retry_until_true(temp_table.exists)() + + temp_table.delete() + assert not _helpers.retry_until_false(temp_table.exists)() + + +def test_table_create(data_instance_populated, shared_table, tables_to_delete): + temp_table_id = "test-table-create" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + # First, create a sorted version of our expected result. + name_attr = operator.attrgetter("name") + expected_tables = sorted([temp_table, shared_table], key=name_attr) + + # Then query for the tables in the instance and sort them by + # name as well. + tables = data_instance_populated.list_tables() + sorted_tables = sorted(tables, key=name_attr) + assert sorted_tables == expected_tables + + +def test_table_create_w_families( + data_instance_populated, tables_to_delete, +): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + temp_table_id = "test-create-table-with-failies" + column_family_id = "col-fam-id1" + temp_table = data_instance_populated.table(temp_table_id) + gc_rule = MaxVersionsGCRule(1) + temp_table.create(column_families={column_family_id: gc_rule}) + tables_to_delete.append(temp_table) + + col_fams = temp_table.list_column_families() + assert len(col_fams) == 1 + + retrieved_col_fam = col_fams[column_family_id] + assert retrieved_col_fam._table is temp_table + assert retrieved_col_fam.column_family_id == column_family_id + assert retrieved_col_fam.gc_rule == gc_rule + + +def test_table_create_w_split_keys( + data_instance_populated, tables_to_delete, not_in_emulator, +): + temp_table_id = "foo-bar-baz-split-table" + initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create(initial_split_keys=initial_split_keys) + tables_to_delete.append(temp_table) + + # Read Sample Row Keys for created splits + sample_row_keys = temp_table.sample_row_keys() + actual_keys = [srk.row_key for srk in sample_row_keys] + + expected_keys = initial_split_keys + expected_keys.append(b"") + assert actual_keys == expected_keys + + +def test_column_family_create(data_instance_populated, tables_to_delete): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + temp_table_id = "test-create-column-family" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + assert temp_table.list_column_families() == {} + + column_family_id = "col-fam-id1" + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(column_family_id, gc_rule=gc_rule) + column_family.create() + + col_fams = temp_table.list_column_families() + assert len(col_fams) == 1 + + retrieved_col_fam = col_fams[column_family_id] + assert retrieved_col_fam._table is column_family._table + assert retrieved_col_fam.column_family_id == column_family.column_family_id + assert retrieved_col_fam.gc_rule == gc_rule + + +def test_column_family_update(data_instance_populated, tables_to_delete): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + temp_table_id = "test-update-column-family" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + column_family_id = "col-fam-id1" + gc_rule = MaxVersionsGCRule(1) + column_family = temp_table.column_family(column_family_id, gc_rule=gc_rule) + column_family.create() + + # Check that our created table is as expected. + col_fams = temp_table.list_column_families() + assert col_fams == {column_family_id: column_family} + + # Update the column family's GC rule and then try to update. + column_family.gc_rule = None + column_family.update() + + # Check that the update has propagated. + col_fams = temp_table.list_column_families() + assert col_fams[column_family_id].gc_rule is None + + +def test_column_family_delete(data_instance_populated, tables_to_delete): + temp_table_id = "test-delete-column-family" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + column_family_id = "col-fam-id1" + assert temp_table.list_column_families() == {} + column_family = temp_table.column_family(column_family_id) + column_family.create() + + # Make sure the family is there before deleting it. + col_fams = temp_table.list_column_families() + assert list(col_fams.keys()) == [column_family_id] + + _helpers.retry_504(column_family.delete)() + # Make sure we have successfully deleted it. + assert temp_table.list_column_families() == {} + + +def test_table_get_iam_policy( + data_instance_populated, tables_to_delete, not_in_emulator, +): + temp_table_id = "test-get-iam-policy-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + policy = temp_table.get_iam_policy().to_api_repr() + assert policy["etag"] == "ACAB" + assert policy["version"] == 0 + + +def test_table_set_iam_policy( + service_account, data_instance_populated, tables_to_delete, not_in_emulator, +): + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy + + temp_table_id = "test-set-iam-policy-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + new_policy = Policy() + service_account_email = service_account.service_account_email + new_policy[BIGTABLE_ADMIN_ROLE] = [Policy.service_account(service_account_email)] + policy_latest = temp_table.set_iam_policy(new_policy).to_api_repr() + + assert policy_latest["bindings"][0]["role"] == BIGTABLE_ADMIN_ROLE + assert service_account_email in policy_latest["bindings"][0]["members"][0] + + +def test_table_test_iam_permissions( + data_instance_populated, tables_to_delete, not_in_emulator, +): + temp_table_id = "test-test-iam-policy-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] + permissions_allowed = temp_table.test_iam_permissions(permissions) + assert permissions == permissions_allowed + + +def test_table_backup( + admin_client, + unique_suffix, + instance_labels, + location_id, + data_instance_populated, + data_cluster_id, + instances_to_delete, + tables_to_delete, + backups_to_delete, + not_in_emulator, +): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable import enums + + temp_table_id = "test-backup-table" + temp_table = data_instance_populated.table(temp_table_id) + temp_table.create() + tables_to_delete.append(temp_table) + + temp_backup_id = "test-backup" + + # TODO: consider using `datetime.datetime.now().timestamp()` + # when support for Python 2 is fully dropped + expire = int(time.mktime(datetime.datetime.now().timetuple())) + 604800 + + # Testing `Table.backup()` factory + temp_backup = temp_table.backup( + temp_backup_id, + cluster_id=data_cluster_id, + expire_time=datetime.datetime.utcfromtimestamp(expire), + ) + + # Reinitialize the admin client. This is to test `_table_admin_client` + # returns a client object (and not NoneType) + temp_backup._instance._client = admin_client + + # Sanity check for `Backup.exists()` method + assert not temp_backup.exists() + + # Testing `Backup.create()` method + backup_op = temp_backup.create() + backup_op.result(timeout=30) + + # Implicit testing of `Backup.delete()` method + backups_to_delete.append(temp_backup) + + # Testing `Backup.exists()` method + assert temp_backup.exists() + + # Testing `Table.list_backups()` method + temp_table_backup = temp_table.list_backups()[0] + assert temp_backup_id == temp_table_backup.backup_id + assert data_cluster_id == temp_table_backup.cluster + assert expire == temp_table_backup.expire_time.seconds + assert ( + temp_table_backup.encryption_info.encryption_type + == enums.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + + # Testing `Backup.update_expire_time()` method + expire += 3600 # A one-hour change in the `expire_time` parameter + updated_time = datetime.datetime.utcfromtimestamp(expire) + temp_backup.update_expire_time(updated_time) + test = _datetime_to_pb_timestamp(updated_time) + + # Testing `Backup.get()` method + temp_table_backup = temp_backup.get() + assert test.seconds == DatetimeWithNanoseconds.timestamp( + temp_table_backup.expire_time + ) + + # Testing `Table.restore()` and `Backup.retore()` methods + restored_table_id = "test-backup-table-restored" + restored_table = data_instance_populated.table(restored_table_id) + local_restore_op = temp_table.restore( + restored_table_id, cluster_id=data_cluster_id, backup_id=temp_backup_id + ) + local_restore_op.result(timeout=30) + tables = data_instance_populated.list_tables() + assert restored_table in tables + restored_table.delete() + + # Testing `Backup.restore()` into a different instance: + # Setting up another instance... + alt_instance_id = f"gcp-alt-{unique_suffix}" + alt_cluster_id = f"{alt_instance_id}-cluster" + alt_instance = admin_client.instance(alt_instance_id, labels=instance_labels) + alt_cluster = alt_instance.cluster( + cluster_id=alt_cluster_id, location_id=location_id, serve_nodes=1, + ) + create_op = alt_instance.create(clusters=[alt_cluster]) + instances_to_delete.append(alt_instance) + create_op.result(timeout=30) + + # Testing `restore()`... + restore_op = temp_backup.restore(restored_table_id, alt_instance_id) + restore_op.result(timeout=30) + restored_table = alt_instance.table(restored_table_id) + assert restored_table in alt_instance.list_tables() + restored_table.delete() From 9c265f0af384b1b705b175b664283b54eda2e29f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 20 Sep 2021 09:58:51 -0400 Subject: [PATCH 513/892] chore: blacken samples noxfile template (#424) Source-Link: https://github.com/googleapis/synthtool/commit/8b781e190b09590992733a214863f770425f5ab3 Post-Processor: gcr.io/repo-automation-bots/owlbot-python:latest@sha256:0ccd9f4d714d36e311f60f407199dd460e43a99a125b5ca64b1d75f6e5f8581b Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/samples/README.md | 2 +- packages/google-cloud-bigtable/samples/hello/README.md | 2 +- .../google-cloud-bigtable/samples/hello_happybase/README.md | 2 +- packages/google-cloud-bigtable/samples/instanceadmin/README.md | 2 +- packages/google-cloud-bigtable/samples/metricscaler/README.md | 2 +- packages/google-cloud-bigtable/samples/quickstart/README.md | 2 +- .../samples/quickstart_happybase/README.md | 2 +- packages/google-cloud-bigtable/samples/snippets/README.md | 2 +- packages/google-cloud-bigtable/samples/tableadmin/README.md | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index c07f148f0b0b..e2c23777477e 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:0ffe3bdd6c7159692df5f7744da74e5ef19966288a6bf76023e8e04e0c424d7d + digest: sha256:0ccd9f4d714d36e311f60f407199dd460e43a99a125b5ca64b1d75f6e5f8581b diff --git a/packages/google-cloud-bigtable/samples/README.md b/packages/google-cloud-bigtable/samples/README.md index 70d852d4f3f0..1301c6fb1f60 100644 --- a/packages/google-cloud-bigtable/samples/README.md +++ b/packages/google-cloud-bigtable/samples/README.md @@ -17,7 +17,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/hello/README.md b/packages/google-cloud-bigtable/samples/hello/README.md index 1ffd9b86eda9..0e1fc92f9289 100644 --- a/packages/google-cloud-bigtable/samples/hello/README.md +++ b/packages/google-cloud-bigtable/samples/hello/README.md @@ -45,7 +45,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/README.md b/packages/google-cloud-bigtable/samples/hello_happybase/README.md index a37d4fd5e51d..fdbea4e63739 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/README.md +++ b/packages/google-cloud-bigtable/samples/hello_happybase/README.md @@ -45,7 +45,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/README.md b/packages/google-cloud-bigtable/samples/instanceadmin/README.md index 59c51c5bde40..675add700e93 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/README.md +++ b/packages/google-cloud-bigtable/samples/instanceadmin/README.md @@ -45,7 +45,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/metricscaler/README.md b/packages/google-cloud-bigtable/samples/metricscaler/README.md index cf88eb8bf3d9..e1624bb1872e 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/README.md +++ b/packages/google-cloud-bigtable/samples/metricscaler/README.md @@ -45,7 +45,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/quickstart/README.md b/packages/google-cloud-bigtable/samples/quickstart/README.md index 455a412f2bb5..f61000e135d0 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart/README.md @@ -45,7 +45,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md index c97cbc675e7d..6d4d8871e3cb 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/README.md @@ -45,7 +45,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/snippets/README.md b/packages/google-cloud-bigtable/samples/snippets/README.md index 391bdc2b415b..134b247329b5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/README.md +++ b/packages/google-cloud-bigtable/samples/snippets/README.md @@ -26,7 +26,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs diff --git a/packages/google-cloud-bigtable/samples/tableadmin/README.md b/packages/google-cloud-bigtable/samples/tableadmin/README.md index 1dee1ff2384a..b2f6a13af55a 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/README.md +++ b/packages/google-cloud-bigtable/samples/tableadmin/README.md @@ -45,7 +45,7 @@ View the [contributing guidelines][contrib_guide], the [Python style guide][py_s [enable_billing]:https://cloud.google.com/apis/docs/getting-started#enabling_billing [client_library_python]: https://googlecloudplatform.github.io/google-cloud-python/ [issues]: https://github.com/GoogleCloudPlatform/google-cloud-python/issues -[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/master/CONTRIBUTING.rst +[contrib_guide]: https://github.com/googleapis/google-cloud-python/blob/main/CONTRIBUTING.rst [py_style]: http://google.github.io/styleguide/pyguide.html [cloud_sdk]: https://cloud.google.com/sdk/docs [gcloud_shell]: https://cloud.google.com/shell/docs From a1a4cf3a40cbdf5b94882d905fe7e1401f2f93f2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Mon, 20 Sep 2021 15:15:58 -0400 Subject: [PATCH 514/892] chore: remove superseded owlbot replacements (#422) --- packages/google-cloud-bigtable/owlbot.py | 59 ++++-------------------- 1 file changed, 9 insertions(+), 50 deletions(-) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 98e126e91df0..081c12574807 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -149,53 +149,6 @@ def system_emulated(session): "system",""", ) -# Remove the replacements below once https://github.com/googleapis/synthtool/pull/1188 is merged - -# Update googleapis/repo-automation-bots repo to main in .kokoro/*.sh files -s.replace( - ".kokoro/*.sh", "repo-automation-bots/tree/master", "repo-automation-bots/tree/main" -) - -# Customize CONTRIBUTING.rst to replace master with main -s.replace( - "CONTRIBUTING.rst", - "fetch and merge changes from upstream into master", - "fetch and merge changes from upstream into main", -) - -s.replace( - "CONTRIBUTING.rst", "git merge upstream/master", "git merge upstream/main", -) - -s.replace( - "CONTRIBUTING.rst", - """export GOOGLE_CLOUD_TESTING_BRANCH=\"master\"""", - """export GOOGLE_CLOUD_TESTING_BRANCH=\"main\"""", -) - -s.replace( - "CONTRIBUTING.rst", r"remote \(``master``\)", "remote (``main``)", -) - -s.replace( - "CONTRIBUTING.rst", "blob/master/CONTRIBUTING.rst", "blob/main/CONTRIBUTING.rst", -) - -s.replace( - "CONTRIBUTING.rst", "blob/master/noxfile.py", "blob/main/noxfile.py", -) - -s.replace( - "docs/conf.py", "master_doc", "root_doc", -) - -s.replace( - "docs/conf.py", "# The master toctree document.", "# The root toctree document.", -) - -s.replace( - "docs/**/*.rst", r"/blob/master/", "/blob/main/", -) # ---------------------------------------------------------------------------- # Samples templates @@ -205,10 +158,16 @@ def system_emulated(session): for path in sample_files: s.move(path) -# Note: google-cloud-python and python-docs-samples are nnt yet usin 'main': +# Note: python-docs-samples is not yet using 'main': #s.replace( -# "samples/**/*.md", r"/blob/master/", "/blob/main/", +# "samples/**/*.md", +# r"python-docs-samples/blob/master/", +# "python-docs-samples/blob/main/", #) - +s.replace( + "samples/**/*.md", + r"google-cloud-python/blob/master/", + "google-cloud-python/blob/main/", +) s.shell.run(["nox", "-s", "blacken"], hide_output=False) From a4deeee2b9c5b5550c794faf401ad90f602ebbaf Mon Sep 17 00:00:00 2001 From: Jeffrey Rennie Date: Tue, 21 Sep 2021 12:36:40 -0700 Subject: [PATCH 515/892] chore: relocate owl bot post processor (#426) chore: relocate owl bot post processor --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.github/.OwlBot.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index e2c23777477e..2567653c000d 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: - image: gcr.io/repo-automation-bots/owlbot-python:latest - digest: sha256:0ccd9f4d714d36e311f60f407199dd460e43a99a125b5ca64b1d75f6e5f8581b + image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest + digest: sha256:87eee22d276554e4e52863ec9b1cb6a7245815dfae20439712bf644348215a5a diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.yaml index 64de1cedac36..fe2f7841a3d4 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.yaml @@ -13,7 +13,7 @@ # limitations under the License. docker: - image: gcr.io/repo-automation-bots/owlbot-python:latest + image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest deep-remove-regex: - /owl-bot-staging From 23c789fbc5102e34a491cd67a14da7bc0a9abbb2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 21 Sep 2021 17:36:58 -0400 Subject: [PATCH 516/892] chore: drop six (#427) --- .../google-cloud-bigtable/google/cloud/bigtable/row.py | 4 +--- .../google/cloud/bigtable/row_data.py | 7 +++---- packages/google-cloud-bigtable/tests/unit/test_row.py | 3 +-- packages/google-cloud-bigtable/tests/unit/test_table.py | 3 +-- 4 files changed, 6 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 1898ea772c2d..3fdc230f78b1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -17,8 +17,6 @@ import struct -import six - from google.cloud._helpers import _datetime_from_microseconds from google.cloud._helpers import _microseconds_from_datetime from google.cloud._helpers import _to_bytes @@ -151,7 +149,7 @@ def _set_cell(self, column_family_id, column, value, timestamp=None, state=None) :meth:`_get_mutations`. """ column = _to_bytes(column) - if isinstance(value, six.integer_types): + if isinstance(value, int): value = _PACK_I64(value) value = _to_bytes(value) if timestamp is None: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 0d22e2fc66cb..18d82153b0fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -16,7 +16,6 @@ import copy -import six import grpc @@ -169,8 +168,8 @@ def to_dict(self): :returns: Dictionary containing all the data in the cells of this row. """ result = {} - for column_family_id, columns in six.iteritems(self._cells): - for column_qual, cells in six.iteritems(columns): + for column_family_id, columns in self._cells.items(): + for column_qual, cells in columns.items(): key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual) result[key] = cells return result @@ -467,7 +466,7 @@ def _on_error(self, exc): def _read_next(self): """Helper for :meth:`__iter__`.""" - return six.next(self.response_iterator) + return next(self.response_iterator) def _read_next_response(self): """Helper for :meth:`__iter__`.""" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 6b5f4168b0cc..1f33f214bdc5 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -132,7 +132,6 @@ def _set_cell_helper( timestamp=None, timestamp_micros=-1, ): - import six import struct row_key = b"row_key" @@ -144,7 +143,7 @@ def _set_cell_helper( self.assertEqual(row._pb_mutations, []) row.set_cell(column_family_id, column, value, timestamp=timestamp) - if isinstance(value, six.integer_types): + if isinstance(value, int): value = struct.pack(">q", value) expected_pb = _MutationPB( set_cell=_MutationSetCellPB( diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index d4ec0e7292af..bb6cca6a70c1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1589,13 +1589,12 @@ def _make_responses_statuses(self, codes): return response def _make_responses(self, codes): - import six from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse from google.rpc.status_pb2 import Status entries = [ MutateRowsResponse.Entry(index=i, status=Status(code=codes[i])) - for i in six.moves.xrange(len(codes)) + for i in range(len(codes)) ] return MutateRowsResponse(entries=entries) From 97ad44eaf917e1721752f9ef51818c27b4a4145b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 24 Sep 2021 15:14:27 +0000 Subject: [PATCH 517/892] chore: use gapic-generator-python 0.51.2 (#428) - [ ] Regenerate this pull request now. fix: add 'dict' annotation type to 'request' Committer: @busunkim96 PiperOrigin-RevId: 398509016 Source-Link: https://github.com/googleapis/googleapis/commit/b224dfa52642a733ea64849d4e06d15c274bc08f Source-Link: https://github.com/googleapis/googleapis-gen/commit/63a1db7a38d74b9639592f521ed1daaf7299ad9a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjNhMWRiN2EzOGQ3NGI5NjM5NTkyZjUyMWVkMWRhYWY3Mjk5YWQ5YSJ9 --- .../bigtable_instance_admin/client.py | 80 ++++++++-------- .../transports/base.py | 2 +- .../transports/grpc.py | 6 +- .../transports/grpc_asyncio.py | 6 +- .../services/bigtable_table_admin/client.py | 94 ++++++++++--------- .../bigtable_table_admin/transports/base.py | 2 +- .../bigtable_table_admin/transports/grpc.py | 6 +- .../transports/grpc_asyncio.py | 6 +- .../bigtable_v2/services/bigtable/client.py | 26 ++--- .../services/bigtable/transports/base.py | 2 +- .../services/bigtable/transports/grpc.py | 6 +- .../bigtable/transports/grpc_asyncio.py | 6 +- .../fixup_bigtable_admin_v2_keywords.py | 78 +++++++-------- .../scripts/fixup_bigtable_v2_keywords.py | 14 +-- 14 files changed, 170 insertions(+), 164 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 0e3d57d4bc7a..9c9a8a152978 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -17,7 +17,7 @@ from distutils import util import os import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore @@ -416,7 +416,7 @@ def __init__( def create_instance( self, - request: bigtable_instance_admin.CreateInstanceRequest = None, + request: Union[bigtable_instance_admin.CreateInstanceRequest, dict] = None, *, parent: str = None, instance_id: str = None, @@ -431,7 +431,7 @@ def create_instance( r"""Create an instance within a project. Args: - request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): + request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.CreateInstance. parent (str): @@ -538,7 +538,7 @@ def create_instance( def get_instance( self, - request: bigtable_instance_admin.GetInstanceRequest = None, + request: Union[bigtable_instance_admin.GetInstanceRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -548,7 +548,7 @@ def get_instance( r"""Gets information about an instance. Args: - request (google.cloud.bigtable_admin_v2.types.GetInstanceRequest): + request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.GetInstance. name (str): @@ -613,7 +613,7 @@ def get_instance( def list_instances( self, - request: bigtable_instance_admin.ListInstancesRequest = None, + request: Union[bigtable_instance_admin.ListInstancesRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -623,7 +623,7 @@ def list_instances( r"""Lists information about instances in a project. Args: - request (google.cloud.bigtable_admin_v2.types.ListInstancesRequest): + request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): The request object. Request message for BigtableInstanceAdmin.ListInstances. parent (str): @@ -685,7 +685,7 @@ def list_instances( def update_instance( self, - request: instance.Instance = None, + request: Union[instance.Instance, dict] = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, @@ -697,7 +697,7 @@ def update_instance( PartialUpdateInstance. Args: - request (google.cloud.bigtable_admin_v2.types.Instance): + request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): The request object. A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and the resources that serve them. All tables in an instance are @@ -745,7 +745,9 @@ def update_instance( def partial_update_instance( self, - request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, + request: Union[ + bigtable_instance_admin.PartialUpdateInstanceRequest, dict + ] = None, *, instance: gba_instance.Instance = None, update_mask: field_mask_pb2.FieldMask = None, @@ -758,7 +760,7 @@ def partial_update_instance( preferred way to update an Instance. Args: - request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): + request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.PartialUpdateInstance. instance (google.cloud.bigtable_admin_v2.types.Instance): @@ -846,7 +848,7 @@ def partial_update_instance( def delete_instance( self, - request: bigtable_instance_admin.DeleteInstanceRequest = None, + request: Union[bigtable_instance_admin.DeleteInstanceRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -856,7 +858,7 @@ def delete_instance( r"""Delete an instance from a project. Args: - request (google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.DeleteInstance. name (str): @@ -911,7 +913,7 @@ def delete_instance( def create_cluster( self, - request: bigtable_instance_admin.CreateClusterRequest = None, + request: Union[bigtable_instance_admin.CreateClusterRequest, dict] = None, *, parent: str = None, cluster_id: str = None, @@ -923,7 +925,7 @@ def create_cluster( r"""Creates a cluster within an instance. Args: - request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): + request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): The request object. Request message for BigtableInstanceAdmin.CreateCluster. parent (str): @@ -1017,7 +1019,7 @@ def create_cluster( def get_cluster( self, - request: bigtable_instance_admin.GetClusterRequest = None, + request: Union[bigtable_instance_admin.GetClusterRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1027,7 +1029,7 @@ def get_cluster( r"""Gets information about a cluster. Args: - request (google.cloud.bigtable_admin_v2.types.GetClusterRequest): + request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): The request object. Request message for BigtableInstanceAdmin.GetCluster. name (str): @@ -1091,7 +1093,7 @@ def get_cluster( def list_clusters( self, - request: bigtable_instance_admin.ListClustersRequest = None, + request: Union[bigtable_instance_admin.ListClustersRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1101,7 +1103,7 @@ def list_clusters( r"""Lists information about clusters in an instance. Args: - request (google.cloud.bigtable_admin_v2.types.ListClustersRequest): + request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): The request object. Request message for BigtableInstanceAdmin.ListClusters. parent (str): @@ -1165,7 +1167,7 @@ def list_clusters( def update_cluster( self, - request: instance.Cluster = None, + request: Union[instance.Cluster, dict] = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, @@ -1174,7 +1176,7 @@ def update_cluster( r"""Updates a cluster within an instance. Args: - request (google.cloud.bigtable_admin_v2.types.Cluster): + request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): The request object. A resizable group of nodes in a particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent @@ -1229,7 +1231,7 @@ def update_cluster( def delete_cluster( self, - request: bigtable_instance_admin.DeleteClusterRequest = None, + request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1239,7 +1241,7 @@ def delete_cluster( r"""Deletes a cluster from an instance. Args: - request (google.cloud.bigtable_admin_v2.types.DeleteClusterRequest): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): The request object. Request message for BigtableInstanceAdmin.DeleteCluster. name (str): @@ -1294,7 +1296,7 @@ def delete_cluster( def create_app_profile( self, - request: bigtable_instance_admin.CreateAppProfileRequest = None, + request: Union[bigtable_instance_admin.CreateAppProfileRequest, dict] = None, *, parent: str = None, app_profile_id: str = None, @@ -1306,7 +1308,7 @@ def create_app_profile( r"""Creates an app profile within an instance. Args: - request (google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest): + request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.CreateAppProfile. parent (str): @@ -1389,7 +1391,7 @@ def create_app_profile( def get_app_profile( self, - request: bigtable_instance_admin.GetAppProfileRequest = None, + request: Union[bigtable_instance_admin.GetAppProfileRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1399,7 +1401,7 @@ def get_app_profile( r"""Gets information about an app profile. Args: - request (google.cloud.bigtable_admin_v2.types.GetAppProfileRequest): + request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.GetAppProfile. name (str): @@ -1462,7 +1464,7 @@ def get_app_profile( def list_app_profiles( self, - request: bigtable_instance_admin.ListAppProfilesRequest = None, + request: Union[bigtable_instance_admin.ListAppProfilesRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1472,7 +1474,7 @@ def list_app_profiles( r"""Lists information about app profiles in an instance. Args: - request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): The request object. Request message for BigtableInstanceAdmin.ListAppProfiles. parent (str): @@ -1546,7 +1548,7 @@ def list_app_profiles( def update_app_profile( self, - request: bigtable_instance_admin.UpdateAppProfileRequest = None, + request: Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] = None, *, app_profile: instance.AppProfile = None, update_mask: field_mask_pb2.FieldMask = None, @@ -1557,7 +1559,7 @@ def update_app_profile( r"""Updates an app profile within an instance. Args: - request (google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest): + request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.UpdateAppProfile. app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): @@ -1640,7 +1642,7 @@ def update_app_profile( def delete_app_profile( self, - request: bigtable_instance_admin.DeleteAppProfileRequest = None, + request: Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1650,7 +1652,7 @@ def delete_app_profile( r"""Deletes an app profile from an instance. Args: - request (google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.DeleteAppProfile. name (str): @@ -1705,7 +1707,7 @@ def delete_app_profile( def get_iam_policy( self, - request: iam_policy_pb2.GetIamPolicyRequest = None, + request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1717,7 +1719,7 @@ def get_iam_policy( but does not have a policy set. Args: - request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest): + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): The request object. Request message for `GetIamPolicy` method. resource (str): @@ -1832,7 +1834,7 @@ def get_iam_policy( def set_iam_policy( self, - request: iam_policy_pb2.SetIamPolicyRequest = None, + request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1843,7 +1845,7 @@ def set_iam_policy( resource. Replaces any existing policy. Args: - request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest): + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): The request object. Request message for `SetIamPolicy` method. resource (str): @@ -1958,7 +1960,7 @@ def set_iam_policy( def test_iam_permissions( self, - request: iam_policy_pb2.TestIamPermissionsRequest = None, + request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, *, resource: str = None, permissions: Sequence[str] = None, @@ -1970,7 +1972,7 @@ def test_iam_permissions( specified instance resource. Args: - request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest): + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): The request object. Request message for `TestIamPermissions` method. resource (str): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 2c4b6bb42c44..fa1456714cba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -131,7 +131,7 @@ def __init__( **scopes_kwargs, quota_project_id=quota_project_id ) - # If the credentials is service account credentials, then always try to use self signed JWT. + # If the credentials are service account credentials, then always try to use self signed JWT. if ( always_use_jwt_access and isinstance(credentials, service_account.Credentials) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 9c311a51e008..7e2e5161163c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -89,16 +89,16 @@ def __init__( api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. + ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is + both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 5ab099b357a5..9eddeaa0241a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -136,16 +136,16 @@ def __init__( api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. + ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is + both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 49f024e815f8..8c891fd870fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -17,7 +17,7 @@ from distutils import util import os import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore @@ -454,7 +454,7 @@ def __init__( def create_table( self, - request: bigtable_table_admin.CreateTableRequest = None, + request: Union[bigtable_table_admin.CreateTableRequest, dict] = None, *, parent: str = None, table_id: str = None, @@ -468,7 +468,7 @@ def create_table( column families, specified in the request. Args: - request (google.cloud.bigtable_admin_v2.types.CreateTableRequest): + request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] parent (str): @@ -550,7 +550,9 @@ def create_table( def create_table_from_snapshot( self, - request: bigtable_table_admin.CreateTableFromSnapshotRequest = None, + request: Union[ + bigtable_table_admin.CreateTableFromSnapshotRequest, dict + ] = None, *, parent: str = None, table_id: str = None, @@ -570,7 +572,7 @@ def create_table_from_snapshot( SLA or deprecation policy. Args: - request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): + request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -672,7 +674,7 @@ def create_table_from_snapshot( def list_tables( self, - request: bigtable_table_admin.ListTablesRequest = None, + request: Union[bigtable_table_admin.ListTablesRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -682,7 +684,7 @@ def list_tables( r"""Lists all tables served from a specified instance. Args: - request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] parent (str): @@ -753,7 +755,7 @@ def list_tables( def get_table( self, - request: bigtable_table_admin.GetTableRequest = None, + request: Union[bigtable_table_admin.GetTableRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -763,7 +765,7 @@ def get_table( r"""Gets metadata information about the specified table. Args: - request (google.cloud.bigtable_admin_v2.types.GetTableRequest): + request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] name (str): @@ -827,7 +829,7 @@ def get_table( def delete_table( self, - request: bigtable_table_admin.DeleteTableRequest = None, + request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -838,7 +840,7 @@ def delete_table( data. Args: - request (google.cloud.bigtable_admin_v2.types.DeleteTableRequest): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] name (str): @@ -893,7 +895,7 @@ def delete_table( def modify_column_families( self, - request: bigtable_table_admin.ModifyColumnFamiliesRequest = None, + request: Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] = None, *, name: str = None, modifications: Sequence[ @@ -910,7 +912,7 @@ def modify_column_families( table where only some modifications have taken effect. Args: - request (google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest): + request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] name (str): @@ -988,7 +990,7 @@ def modify_column_families( def drop_row_range( self, - request: bigtable_table_admin.DropRowRangeRequest = None, + request: Union[bigtable_table_admin.DropRowRangeRequest, dict] = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, @@ -1000,7 +1002,7 @@ def drop_row_range( prefix. Args: - request (google.cloud.bigtable_admin_v2.types.DropRowRangeRequest): + request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -1034,7 +1036,9 @@ def drop_row_range( def generate_consistency_token( self, - request: bigtable_table_admin.GenerateConsistencyTokenRequest = None, + request: Union[ + bigtable_table_admin.GenerateConsistencyTokenRequest, dict + ] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1048,7 +1052,7 @@ def generate_consistency_token( days. Args: - request (google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest): + request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] name (str): @@ -1114,7 +1118,7 @@ def generate_consistency_token( def check_consistency( self, - request: bigtable_table_admin.CheckConsistencyRequest = None, + request: Union[bigtable_table_admin.CheckConsistencyRequest, dict] = None, *, name: str = None, consistency_token: str = None, @@ -1128,7 +1132,7 @@ def check_consistency( request. Args: - request (google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest): + request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] name (str): @@ -1199,7 +1203,7 @@ def check_consistency( def snapshot_table( self, - request: bigtable_table_admin.SnapshotTableRequest = None, + request: Union[bigtable_table_admin.SnapshotTableRequest, dict] = None, *, name: str = None, cluster: str = None, @@ -1220,7 +1224,7 @@ def snapshot_table( SLA or deprecation policy. Args: - request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): + request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] Note: This is a private alpha release of Cloud Bigtable @@ -1335,7 +1339,7 @@ def snapshot_table( def get_snapshot( self, - request: bigtable_table_admin.GetSnapshotRequest = None, + request: Union[bigtable_table_admin.GetSnapshotRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1352,7 +1356,7 @@ def get_snapshot( SLA or deprecation policy. Args: - request (google.cloud.bigtable_admin_v2.types.GetSnapshotRequest): + request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -1430,7 +1434,7 @@ def get_snapshot( def list_snapshots( self, - request: bigtable_table_admin.ListSnapshotsRequest = None, + request: Union[bigtable_table_admin.ListSnapshotsRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1447,7 +1451,7 @@ def list_snapshots( SLA or deprecation policy. Args: - request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] Note: This is a private alpha release of Cloud Bigtable @@ -1534,7 +1538,7 @@ def list_snapshots( def delete_snapshot( self, - request: bigtable_table_admin.DeleteSnapshotRequest = None, + request: Union[bigtable_table_admin.DeleteSnapshotRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1550,7 +1554,7 @@ def delete_snapshot( SLA or deprecation policy. Args: - request (google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -1611,7 +1615,7 @@ def delete_snapshot( def create_backup( self, - request: bigtable_table_admin.CreateBackupRequest = None, + request: Union[bigtable_table_admin.CreateBackupRequest, dict] = None, *, parent: str = None, backup_id: str = None, @@ -1631,7 +1635,7 @@ def create_backup( delete the backup. Args: - request (google.cloud.bigtable_admin_v2.types.CreateBackupRequest): + request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): The request object. The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. parent (str): @@ -1726,7 +1730,7 @@ def create_backup( def get_backup( self, - request: bigtable_table_admin.GetBackupRequest = None, + request: Union[bigtable_table_admin.GetBackupRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1737,7 +1741,7 @@ def get_backup( Bigtable Backup. Args: - request (google.cloud.bigtable_admin_v2.types.GetBackupRequest): + request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): The request object. The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. name (str): @@ -1796,7 +1800,7 @@ def get_backup( def update_backup( self, - request: bigtable_table_admin.UpdateBackupRequest = None, + request: Union[bigtable_table_admin.UpdateBackupRequest, dict] = None, *, backup: table.Backup = None, update_mask: field_mask_pb2.FieldMask = None, @@ -1807,7 +1811,7 @@ def update_backup( r"""Updates a pending or completed Cloud Bigtable Backup. Args: - request (google.cloud.bigtable_admin_v2.types.UpdateBackupRequest): + request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): The request object. The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. backup (google.cloud.bigtable_admin_v2.types.Backup): @@ -1886,7 +1890,7 @@ def update_backup( def delete_backup( self, - request: bigtable_table_admin.DeleteBackupRequest = None, + request: Union[bigtable_table_admin.DeleteBackupRequest, dict] = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1896,7 +1900,7 @@ def delete_backup( r"""Deletes a pending or completed Cloud Bigtable backup. Args: - request (google.cloud.bigtable_admin_v2.types.DeleteBackupRequest): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): The request object. The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. name (str): @@ -1951,7 +1955,7 @@ def delete_backup( def list_backups( self, - request: bigtable_table_admin.ListBackupsRequest = None, + request: Union[bigtable_table_admin.ListBackupsRequest, dict] = None, *, parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -1962,7 +1966,7 @@ def list_backups( and pending backups. Args: - request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): The request object. The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. parent (str): @@ -2036,7 +2040,7 @@ def list_backups( def restore_table( self, - request: bigtable_table_admin.RestoreTableRequest = None, + request: Union[bigtable_table_admin.RestoreTableRequest, dict] = None, *, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, @@ -2053,7 +2057,7 @@ def restore_table( [Table][google.bigtable.admin.v2.Table], if successful. Args: - request (google.cloud.bigtable_admin_v2.types.RestoreTableRequest): + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -2105,7 +2109,7 @@ def restore_table( def get_iam_policy( self, - request: iam_policy_pb2.GetIamPolicyRequest = None, + request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -2117,7 +2121,7 @@ def get_iam_policy( but does not have a policy set. Args: - request (google.iam.v1.iam_policy_pb2.GetIamPolicyRequest): + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): The request object. Request message for `GetIamPolicy` method. resource (str): @@ -2232,7 +2236,7 @@ def get_iam_policy( def set_iam_policy( self, - request: iam_policy_pb2.SetIamPolicyRequest = None, + request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, *, resource: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, @@ -2243,7 +2247,7 @@ def set_iam_policy( resource. Replaces any existing policy. Args: - request (google.iam.v1.iam_policy_pb2.SetIamPolicyRequest): + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): The request object. Request message for `SetIamPolicy` method. resource (str): @@ -2358,7 +2362,7 @@ def set_iam_policy( def test_iam_permissions( self, - request: iam_policy_pb2.TestIamPermissionsRequest = None, + request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, *, resource: str = None, permissions: Sequence[str] = None, @@ -2370,7 +2374,7 @@ def test_iam_permissions( specified Table or Backup resource. Args: - request (google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest): + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): The request object. Request message for `TestIamPermissions` method. resource (str): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 5b94d6128b29..e136c81c6e01 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -131,7 +131,7 @@ def __init__( **scopes_kwargs, quota_project_id=quota_project_id ) - # If the credentials is service account credentials, then always try to use self signed JWT. + # If the credentials are service account credentials, then always try to use self signed JWT. if ( always_use_jwt_access and isinstance(credentials, service_account.Credentials) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index cfa9075ee96f..37ecdb03933c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -91,16 +91,16 @@ def __init__( api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. + ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is + both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index e75297b4bc87..e797ff875003 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -138,16 +138,16 @@ def __init__( api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. + ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is + both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 8efdceca2030..32dd6739cfae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -17,7 +17,7 @@ from distutils import util import os import re -from typing import Callable, Dict, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore @@ -352,7 +352,7 @@ def __init__( def read_rows( self, - request: bigtable.ReadRowsRequest = None, + request: Union[bigtable.ReadRowsRequest, dict] = None, *, table_name: str = None, app_profile_id: str = None, @@ -368,7 +368,7 @@ def read_rows( ReadRowsResponse documentation for details. Args: - request (google.cloud.bigtable_v2.types.ReadRowsRequest): + request (Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]): The request object. Request message for Bigtable.ReadRows. table_name (str): @@ -443,7 +443,7 @@ def read_rows( def sample_row_keys( self, - request: bigtable.SampleRowKeysRequest = None, + request: Union[bigtable.SampleRowKeysRequest, dict] = None, *, table_name: str = None, app_profile_id: str = None, @@ -458,7 +458,7 @@ def sample_row_keys( mapreduces. Args: - request (google.cloud.bigtable_v2.types.SampleRowKeysRequest): + request (Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]): The request object. Request message for Bigtable.SampleRowKeys. table_name (str): @@ -533,7 +533,7 @@ def sample_row_keys( def mutate_row( self, - request: bigtable.MutateRowRequest = None, + request: Union[bigtable.MutateRowRequest, dict] = None, *, table_name: str = None, row_key: bytes = None, @@ -547,7 +547,7 @@ def mutate_row( left unchanged unless explicitly changed by ``mutation``. Args: - request (google.cloud.bigtable_v2.types.MutateRowRequest): + request (Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]): The request object. Request message for Bigtable.MutateRow. table_name (str): @@ -644,7 +644,7 @@ def mutate_row( def mutate_rows( self, - request: bigtable.MutateRowsRequest = None, + request: Union[bigtable.MutateRowsRequest, dict] = None, *, table_name: str = None, entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, @@ -658,7 +658,7 @@ def mutate_rows( batch is not executed atomically. Args: - request (google.cloud.bigtable_v2.types.MutateRowsRequest): + request (Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]): The request object. Request message for BigtableService.MutateRows. table_name (str): @@ -749,7 +749,7 @@ def mutate_rows( def check_and_mutate_row( self, - request: bigtable.CheckAndMutateRowRequest = None, + request: Union[bigtable.CheckAndMutateRowRequest, dict] = None, *, table_name: str = None, row_key: bytes = None, @@ -765,7 +765,7 @@ def check_and_mutate_row( predicate Reader filter. Args: - request (google.cloud.bigtable_v2.types.CheckAndMutateRowRequest): + request (Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]): The request object. Request message for Bigtable.CheckAndMutateRow. table_name (str): @@ -898,7 +898,7 @@ def check_and_mutate_row( def read_modify_write_row( self, - request: bigtable.ReadModifyWriteRowRequest = None, + request: Union[bigtable.ReadModifyWriteRowRequest, dict] = None, *, table_name: str = None, row_key: bytes = None, @@ -917,7 +917,7 @@ def read_modify_write_row( contents of all modified cells. Args: - request (google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest): + request (Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]): The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (str): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 12230759f087..a6dbca220399 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -122,7 +122,7 @@ def __init__( **scopes_kwargs, quota_project_id=quota_project_id ) - # If the credentials is service account credentials, then always try to use self signed JWT. + # If the credentials are service account credentials, then always try to use self signed JWT. if ( always_use_jwt_access and isinstance(credentials, service_account.Credentials) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 3f1b5dc6a480..2df844a9cf50 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -81,16 +81,16 @@ def __init__( api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. + ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is + both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 5375474a4f05..56bf684bd709 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -128,16 +128,16 @@ def __init__( api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. + ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is + both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index fddcbf1f15c4..c8e998f88f00 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -39,44 +39,44 @@ def partition( class bigtable_adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_consistency': ('name', 'consistency_token', ), - 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), - 'create_backup': ('parent', 'backup_id', 'backup', ), - 'create_cluster': ('parent', 'cluster_id', 'cluster', ), - 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), - 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), - 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), - 'delete_app_profile': ('name', 'ignore_warnings', ), - 'delete_backup': ('name', ), - 'delete_cluster': ('name', ), - 'delete_instance': ('name', ), - 'delete_snapshot': ('name', ), - 'delete_table': ('name', ), - 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), - 'generate_consistency_token': ('name', ), - 'get_app_profile': ('name', ), - 'get_backup': ('name', ), - 'get_cluster': ('name', ), - 'get_iam_policy': ('resource', 'options', ), - 'get_instance': ('name', ), - 'get_snapshot': ('name', ), - 'get_table': ('name', 'view', ), - 'list_app_profiles': ('parent', 'page_size', 'page_token', ), - 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), - 'list_clusters': ('parent', 'page_token', ), - 'list_instances': ('parent', 'page_token', ), - 'list_snapshots': ('parent', 'page_size', 'page_token', ), - 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), - 'modify_column_families': ('name', 'modifications', ), - 'partial_update_instance': ('instance', 'update_mask', ), - 'restore_table': ('parent', 'table_id', 'backup', ), - 'set_iam_policy': ('resource', 'policy', ), - 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), - 'test_iam_permissions': ('resource', 'permissions', ), - 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), - 'update_backup': ('backup', 'update_mask', ), - 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), + 'check_consistency': ('name', 'consistency_token', ), + 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_backup': ('parent', 'backup_id', 'backup', ), + 'create_cluster': ('parent', 'cluster_id', 'cluster', ), + 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), + 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), + 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_backup': ('name', ), + 'delete_cluster': ('name', ), + 'delete_instance': ('name', ), + 'delete_snapshot': ('name', ), + 'delete_table': ('name', ), + 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), + 'generate_consistency_token': ('name', ), + 'get_app_profile': ('name', ), + 'get_backup': ('name', ), + 'get_cluster': ('name', ), + 'get_iam_policy': ('resource', 'options', ), + 'get_instance': ('name', ), + 'get_snapshot': ('name', ), + 'get_table': ('name', 'view', ), + 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_clusters': ('parent', 'page_token', ), + 'list_instances': ('parent', 'page_token', ), + 'list_snapshots': ('parent', 'page_size', 'page_token', ), + 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), + 'modify_column_families': ('name', 'modifications', ), + 'partial_update_instance': ('instance', 'update_mask', ), + 'restore_table': ('parent', 'table_id', 'backup', ), + 'set_iam_policy': ('resource', 'policy', ), + 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_backup': ('backup', 'update_mask', ), + 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: @@ -95,7 +95,7 @@ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: return updated kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, + lambda a: a.keyword.value not in self.CTRL_PARAMS, kwargs ) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index dcb87c5f64b2..4b32d617456c 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -39,12 +39,12 @@ def partition( class bigtableCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), - 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), - 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), - 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), - 'sample_row_keys': ('table_name', 'app_profile_id', ), + 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), + 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), + 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), + 'sample_row_keys': ('table_name', 'app_profile_id', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: @@ -63,7 +63,7 @@ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: return updated kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, + lambda a: a.keyword.value not in self.CTRL_PARAMS, kwargs ) From cc0feac852db5ce6242a0aa296016aa451a96884 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 27 Sep 2021 16:02:24 +0000 Subject: [PATCH 518/892] chore: release 2.4.0 (#408) :robot: I have created a release \*beep\* \*boop\* --- ## [2.4.0](https://www.github.com/googleapis/python-bigtable/compare/v2.3.3...v2.4.0) (2021-09-24) ### Features * Publish new fields to support cluster group routing for Cloud Bigtable ([#407](https://www.github.com/googleapis/python-bigtable/issues/407)) ([66af554](https://www.github.com/googleapis/python-bigtable/commit/66af554a103eea0139cb313691d69f4c88a9e87f)) ### Bug Fixes * add 'dict' annotation type to 'request' ([160bfd3](https://www.github.com/googleapis/python-bigtable/commit/160bfd317a83561821acc0212d3514701a031ac6)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --- packages/google-cloud-bigtable/CHANGELOG.md | 12 ++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 20715657673a..0bee749f3b03 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.4.0](https://www.github.com/googleapis/python-bigtable/compare/v2.3.3...v2.4.0) (2021-09-24) + + +### Features + +* Publish new fields to support cluster group routing for Cloud Bigtable ([#407](https://www.github.com/googleapis/python-bigtable/issues/407)) ([66af554](https://www.github.com/googleapis/python-bigtable/commit/66af554a103eea0139cb313691d69f4c88a9e87f)) + + +### Bug Fixes + +* add 'dict' annotation type to 'request' ([160bfd3](https://www.github.com/googleapis/python-bigtable/commit/160bfd317a83561821acc0212d3514701a031ac6)) + ### [2.3.3](https://www.github.com/googleapis/python-bigtable/compare/v2.3.2...v2.3.3) (2021-07-24) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 79feacd1b613..71e055017080 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.3.3" +version = "2.4.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 5ca5d096d2a84be15dd17bc5b97042834b7a86c5 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 30 Sep 2021 11:42:09 +0000 Subject: [PATCH 519/892] chore: use gapic-generator-python 0.52.0 (#432) - [ ] Regenerate this pull request now. fix: improper types in pagers generation PiperOrigin-RevId: 399773015 Source-Link: https://github.com/googleapis/googleapis/commit/410c184536a22fadaf00aec3cab04102e34d2322 Source-Link: https://github.com/googleapis/googleapis-gen/commit/290e883545e3ac9ff2bd00cd0dacb28f1b8ca945 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjkwZTg4MzU0NWUzYWM5ZmYyYmQwMGNkMGRhY2IyOGYxYjhjYTk0NSJ9 --- .../bigtable_instance_admin/pagers.py | 12 ++++---- .../services/bigtable_table_admin/pagers.py | 28 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index cf5def768e44..d220a1b26ae2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[bigtable_instance_admin.ListAppProfilesResponse]: + def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[instance.AppProfile]: + def __iter__(self) -> Iterator[instance.AppProfile]: for page in self.pages: yield from page.app_profiles @@ -141,14 +141,14 @@ def __getattr__(self, name: str) -> Any: @property async def pages( self, - ) -> AsyncIterable[bigtable_instance_admin.ListAppProfilesResponse]: + ) -> AsyncIterator[bigtable_instance_admin.ListAppProfilesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[instance.AppProfile]: + def __aiter__(self) -> AsyncIterator[instance.AppProfile]: async def async_generator(): async for page in self.pages: for response in page.app_profiles: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 84ead0192a12..07e82255ad08 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -15,13 +15,13 @@ # from typing import ( Any, - AsyncIterable, + AsyncIterator, Awaitable, Callable, - Iterable, Sequence, Tuple, Optional, + Iterator, ) from google.cloud.bigtable_admin_v2.types import bigtable_table_admin @@ -75,14 +75,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[bigtable_table_admin.ListTablesResponse]: + def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[table.Table]: + def __iter__(self) -> Iterator[table.Table]: for page in self.pages: yield from page.tables @@ -137,14 +137,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[bigtable_table_admin.ListTablesResponse]: + async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[table.Table]: + def __aiter__(self) -> AsyncIterator[table.Table]: async def async_generator(): async for page in self.pages: for response in page.tables: @@ -203,14 +203,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[bigtable_table_admin.ListSnapshotsResponse]: + def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[table.Snapshot]: + def __iter__(self) -> Iterator[table.Snapshot]: for page in self.pages: yield from page.snapshots @@ -265,14 +265,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[bigtable_table_admin.ListSnapshotsResponse]: + async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[table.Snapshot]: + def __aiter__(self) -> AsyncIterator[table.Snapshot]: async def async_generator(): async for page in self.pages: for response in page.snapshots: @@ -331,14 +331,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - def pages(self) -> Iterable[bigtable_table_admin.ListBackupsResponse]: + def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = self._method(self._request, metadata=self._metadata) yield self._response - def __iter__(self) -> Iterable[table.Backup]: + def __iter__(self) -> Iterator[table.Backup]: for page in self.pages: yield from page.backups @@ -393,14 +393,14 @@ def __getattr__(self, name: str) -> Any: return getattr(self._response, name) @property - async def pages(self) -> AsyncIterable[bigtable_table_admin.ListBackupsResponse]: + async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token self._response = await self._method(self._request, metadata=self._metadata) yield self._response - def __aiter__(self) -> AsyncIterable[table.Backup]: + def __aiter__(self) -> AsyncIterator[table.Backup]: async def async_generator(): async for page in self.pages: for response in page.backups: From 3aadbdb2f98a8fe078b08ce4a202815bd8964e34 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Thu, 30 Sep 2021 15:15:20 -0400 Subject: [PATCH 520/892] chore(samples): add EC check to metricscalar instance setup (#395) Closes #394. --- .../samples/metricscaler/metricscaler_test.py | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 219ec535e25a..13d46332536f 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -23,6 +23,7 @@ import pytest from test_utils.retry import RetryInstanceState +from test_utils.retry import RetryResult from metricscaler import get_cpu_load from metricscaler import get_storage_utilization @@ -74,6 +75,10 @@ def instance(): default_storage_type=storage_type) instance.create(clusters=[cluster]) + # Eventual consistency check + retry_found = RetryResult(bool) + retry_found(instance.exists)() + yield instance.delete() @@ -97,11 +102,27 @@ def dev_instance(): default_storage_type=storage_type) instance.create(clusters=[cluster]) + # Eventual consistency check + retry_found = RetryResult(bool) + retry_found(instance.exists)() + yield instance.delete() +class ClusterNodeCountPredicate: + def __init__(self, expected_node_count): + self.expected_node_count = expected_node_count + + def __call__(self, cluster): + expected = self.expected_node_count + print( + f"Expected node count: {expected}; found: {cluster.serve_nodes}" + ) + return cluster.serve_nodes == expected + + def test_scale_bigtable(instance): bigtable_client = bigtable.Client(admin=True) @@ -120,18 +141,21 @@ def test_scale_bigtable(instance): scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) - expected_count = original_node_count + SIZE_CHANGE_STEP + scaled_node_count_predicate = ClusterNodeCountPredicate( + original_node_count + SIZE_CHANGE_STEP + ) + scaled_node_count_predicate.__name__ = "scaled_node_count_predicate" _scaled_node_count = RetryInstanceState( - instance_predicate=lambda c: c.serve_nodes == expected_count, - max_tries=10, + instance_predicate=scaled_node_count_predicate, max_tries=10, ) _scaled_node_count(cluster.reload)() scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False) + restored_node_count_predicate = ClusterNodeCountPredicate(original_node_count) + restored_node_count_predicate.__name__ = "restored_node_count_predicate" _restored_node_count = RetryInstanceState( - instance_predicate=lambda c: c.serve_nodes == original_node_count, - max_tries=10, + instance_predicate=restored_node_count_predicate, max_tries=10, ) _restored_node_count(cluster.reload)() From ab0555e71ecb9a06661c2f7dbd8a8fc7555c9d4c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 1 Oct 2021 12:23:09 -0400 Subject: [PATCH 521/892] chore: exclude 'CODEOWNERS' from templated files (#429) See: https://github.com/googleapis/synthtool/pull/1201 --- packages/google-cloud-bigtable/owlbot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 081c12574807..c9eaa54d2457 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -90,7 +90,7 @@ def get_staging_dirs( cov_level=100, ) -s.move(templated_files, excludes=[".coveragerc"]) +s.move(templated_files, excludes=[".coveragerc", ".github/CODEOWNERS"]) # ---------------------------------------------------------------------------- # Customize noxfile.py From 9d99bb433e3404115fb423c728f2419bf79138d6 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Mon, 4 Oct 2021 11:12:07 -0400 Subject: [PATCH 522/892] chore: add default_version and codeowner_team to .repo-metadata.json (#435) * chore: add default_version and codeowner_team to .repo-metadata.json * update default_version and codeowner_team --- .../google-cloud-bigtable/.repo-metadata.json | 154 +++++++++--------- 1 file changed, 78 insertions(+), 76 deletions(-) diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json index 883fd57c0ce0..ea61343a0119 100644 --- a/packages/google-cloud-bigtable/.repo-metadata.json +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -1,77 +1,79 @@ { - "name": "bigtable", - "name_pretty": "Cloud Bigtable", - "product_documentation": "https://cloud.google.com/bigtable", - "client_documentation": "https://googleapis.dev/python/bigtable/latest", - "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", - "release_level": "ga", - "language": "python", - "library_type": "GAPIC_COMBO", - "repo": "googleapis/python-bigtable", - "distribution_name": "google-cloud-bigtable", - "api_id": "bigtable.googleapis.com", - "requires_billing": true, - "samples": [ - { - "name": "Hello World in Cloud Bigtable", - "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "hello" - }, - { - "name": "Hello World using HappyBase", - "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "hello_happybase" - }, - { - "name": "cbt Command Demonstration", - "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt", - "file": "instanceadmin.py", - "runnable": true, - "custom_content": "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "instanceadmin" - }, - { - "name": "Metric Scaler", - "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.", - "file": "metricscaler.py", - "runnable": true, - "custom_content": "
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
", - "override_path": "metricscaler" - }, - { - "name": "Quickstart", - "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
", - "override_path": "quickstart" - }, - { - "name": "Quickstart using HappyBase", - "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", - "file": "main.py", - "runnable": true, - "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", - "override_path": "tableadmin" - } - ] -} \ No newline at end of file + "name": "bigtable", + "name_pretty": "Cloud Bigtable", + "product_documentation": "https://cloud.google.com/bigtable", + "client_documentation": "https://googleapis.dev/python/bigtable/latest", + "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", + "release_level": "ga", + "language": "python", + "library_type": "GAPIC_COMBO", + "repo": "googleapis/python-bigtable", + "distribution_name": "google-cloud-bigtable", + "api_id": "bigtable.googleapis.com", + "requires_billing": true, + "samples": [ + { + "name": "Hello World in Cloud Bigtable", + "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello" + }, + { + "name": "Hello World using HappyBase", + "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello_happybase" + }, + { + "name": "cbt Command Demonstration", + "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt", + "file": "instanceadmin.py", + "runnable": true, + "custom_content": "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "instanceadmin" + }, + { + "name": "Metric Scaler", + "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.", + "file": "metricscaler.py", + "runnable": true, + "custom_content": "
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
", + "override_path": "metricscaler" + }, + { + "name": "Quickstart", + "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
", + "override_path": "quickstart" + }, + { + "name": "Quickstart using HappyBase", + "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "tableadmin" + } + ], + "default_version": "v2", + "codeowner_team": "@googleapis/api-bigtable" +} From 44b6aaf76c46ecbc74c001b8911c239193a97067 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 5 Oct 2021 13:09:49 -0600 Subject: [PATCH 523/892] build: use trampoline_v2 for python samples and allow custom dockerfile (#436) Source-Link: https://github.com/googleapis/synthtool/commit/a7ed11ec0863c422ba2e73aafa75eab22c32b33d Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:6e7328583be8edd3ba8f35311c76a1ecbc823010279ccb6ab46b7a76e25eafcc Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 2 +- .../.kokoro/samples/lint/common.cfg | 2 +- .../.kokoro/samples/python3.6/common.cfg | 2 +- .../.kokoro/samples/python3.6/periodic.cfg | 2 +- .../.kokoro/samples/python3.7/common.cfg | 2 +- .../.kokoro/samples/python3.7/periodic.cfg | 2 +- .../.kokoro/samples/python3.8/common.cfg | 2 +- .../.kokoro/samples/python3.8/periodic.cfg | 2 +- .../.kokoro/samples/python3.9/common.cfg | 2 +- .../.kokoro/samples/python3.9/periodic.cfg | 2 +- .../.kokoro/test-samples-against-head.sh | 2 -- .../.kokoro/test-samples.sh | 2 -- packages/google-cloud-bigtable/.trampolinerc | 17 ++++++++++++++--- 13 files changed, 24 insertions(+), 17 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 2567653c000d..ee94722ab57b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:87eee22d276554e4e52863ec9b1cb6a7245815dfae20439712bf644348215a5a + digest: sha256:6e7328583be8edd3ba8f35311c76a1ecbc823010279ccb6ab46b7a76e25eafcc diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg index b597cb22fee7..54b069fd0d4f 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg @@ -31,4 +31,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg index f71693fca0bc..21e1885071d6 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg index 50fec9649732..71cd1e597e38 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg index 5fa465fda5f5..7db66bb86d7a 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg index 50fec9649732..71cd1e597e38 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg index f3a6fa7ec10e..482008891964 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg index 50fec9649732..71cd1e597e38 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg index 5bc5fa834e81..4e3b12fcc4ce 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg @@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" \ No newline at end of file +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg index 50fec9649732..71cd1e597e38 100644 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg @@ -3,4 +3,4 @@ env_vars: { key: "INSTALL_LIBRARY_FROM_SOURCE" value: "False" -} \ No newline at end of file +} diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh index 2dda9815b6d3..ba3a707b040c 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh @@ -23,6 +23,4 @@ set -eo pipefail # Enables `**` to include files nested inside sub-folders shopt -s globstar -cd github/python-bigtable - exec .kokoro/test-samples-impl.sh diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh index 4666d34f90e5..11c042d342d7 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples.sh @@ -24,8 +24,6 @@ set -eo pipefail # Enables `**` to include files nested inside sub-folders shopt -s globstar -cd github/python-bigtable - # Run periodic samples tests at latest release if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then # preserving the test runner implementation. diff --git a/packages/google-cloud-bigtable/.trampolinerc b/packages/google-cloud-bigtable/.trampolinerc index 383b6ec89fbc..0eee72ab62aa 100644 --- a/packages/google-cloud-bigtable/.trampolinerc +++ b/packages/google-cloud-bigtable/.trampolinerc @@ -16,15 +16,26 @@ # Add required env vars here. required_envvars+=( - "STAGING_BUCKET" - "V2_STAGING_BUCKET" ) # Add env vars which are passed down into the container here. pass_down_envvars+=( + "NOX_SESSION" + ############### + # Docs builds + ############### "STAGING_BUCKET" "V2_STAGING_BUCKET" - "NOX_SESSION" + ################## + # Samples builds + ################## + "INSTALL_LIBRARY_FROM_SOURCE" + "RUN_TESTS_SESSION" + "BUILD_SPECIFIC_GCLOUD_PROJECT" + # Target directories. + "RUN_TESTS_DIRS" + # The nox session to run. + "RUN_TESTS_SESSION" ) # Prevent unintentional override on the default image. From 17fae9d7a5729aee3e04b6b66c4a1e669bbc1a81 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 7 Oct 2021 18:12:50 +0000 Subject: [PATCH 524/892] chore(python): fix formatting issue in noxfile.py.j2 (#439) --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/CONTRIBUTING.rst | 6 ++++-- packages/google-cloud-bigtable/noxfile.py | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index ee94722ab57b..76d0baa0a49d 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:6e7328583be8edd3ba8f35311c76a1ecbc823010279ccb6ab46b7a76e25eafcc + digest: sha256:4370ced27a324687ede5da07132dcdc5381993502a5e8a3e31e16dc631d026f0 diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 78b6684a36e5..a15cf6527263 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows. + 3.6, 3.7, 3.8, 3.9 and 3.10 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.9 -- -k + $ nox -s unit-3.10 -- -k .. note:: @@ -225,11 +225,13 @@ We support: - `Python 3.7`_ - `Python 3.8`_ - `Python 3.9`_ +- `Python 3.10`_ .. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ +.. _Python 3.10: https://docs.python.org/3.10/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index d938c5d2b423..348d4fc60439 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -29,7 +29,7 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() From b313d4cca38861203942140ea45f6506eec9cec5 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 8 Oct 2021 14:16:28 +0000 Subject: [PATCH 525/892] feat: add context manager support in client (#440) - [ ] Regenerate this pull request now. chore: fix docstring for first attribute of protos committer: @busunkim96 PiperOrigin-RevId: 401271153 Source-Link: https://github.com/googleapis/googleapis/commit/787f8c9a731f44e74a90b9847d48659ca9462d10 Source-Link: https://github.com/googleapis/googleapis-gen/commit/81decffe9fc72396a8153e756d1d67a6eecfd620 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiODFkZWNmZmU5ZmM3MjM5NmE4MTUzZTc1NmQxZDY3YTZlZWNmZDYyMCJ9 --- .../bigtable_instance_admin/async_client.py | 6 +++ .../bigtable_instance_admin/client.py | 18 +++++-- .../transports/base.py | 9 ++++ .../transports/grpc.py | 3 ++ .../transports/grpc_asyncio.py | 3 ++ .../bigtable_table_admin/async_client.py | 6 +++ .../services/bigtable_table_admin/client.py | 18 +++++-- .../bigtable_table_admin/transports/base.py | 9 ++++ .../bigtable_table_admin/transports/grpc.py | 3 ++ .../transports/grpc_asyncio.py | 3 ++ .../types/bigtable_instance_admin.py | 23 ++++++++- .../types/bigtable_table_admin.py | 2 + .../cloud/bigtable_admin_v2/types/table.py | 6 +++ .../services/bigtable/async_client.py | 6 +++ .../bigtable_v2/services/bigtable/client.py | 18 +++++-- .../services/bigtable/transports/base.py | 9 ++++ .../services/bigtable/transports/grpc.py | 3 ++ .../bigtable/transports/grpc_asyncio.py | 3 ++ .../cloud/bigtable_v2/types/bigtable.py | 15 +++++- .../google/cloud/bigtable_v2/types/data.py | 8 ++- .../test_bigtable_instance_admin.py | 50 +++++++++++++++++++ .../test_bigtable_table_admin.py | 50 +++++++++++++++++++ .../unit/gapic/bigtable_v2/test_bigtable.py | 50 +++++++++++++++++++ 23 files changed, 306 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index c118257de4fe..b0290a66d2ef 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1931,6 +1931,12 @@ async def test_iam_permissions( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 9c9a8a152978..5ec1c2ed6188 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -408,10 +408,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_instance( @@ -2041,6 +2038,19 @@ def test_iam_permissions( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index fa1456714cba..10dac01a2b4a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -371,6 +371,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 7e2e5161163c..40c722c7ebff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -770,5 +770,8 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + def close(self): + self.grpc_channel.close() + __all__ = ("BigtableInstanceAdminGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 9eddeaa0241a..70a0e87950e9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -796,5 +796,8 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + def close(self): + return self.grpc_channel.close() + __all__ = ("BigtableInstanceAdminGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 62bef2e7b2f5..5a5a3f039235 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2271,6 +2271,12 @@ async def test_iam_permissions( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 8c891fd870fb..ece1e880fdf3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -446,10 +446,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def create_table( @@ -2443,6 +2440,19 @@ def test_iam_permissions( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index e136c81c6e01..5a4201bbecc0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -360,6 +360,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def operations_client(self) -> operations_v1.OperationsClient: """Return the client designed to process long-running operations.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 37ecdb03933c..eaf333baf798 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -920,5 +920,8 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + def close(self): + self.grpc_channel.close() + __all__ = ("BigtableTableAdminGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index e797ff875003..438571f88a88 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -942,5 +942,8 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + def close(self): + return self.grpc_channel.close() + __all__ = ("BigtableTableAdminGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 69b251f65615..a5753b613d99 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -51,6 +51,7 @@ class CreateInstanceRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.CreateInstance. + Attributes: parent (str): Required. The unique name of the project in which to create @@ -82,6 +83,7 @@ class CreateInstanceRequest(proto.Message): class GetInstanceRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.GetInstance. + Attributes: name (str): Required. The unique name of the requested instance. Values @@ -93,6 +95,7 @@ class GetInstanceRequest(proto.Message): class ListInstancesRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.ListInstances. + Attributes: parent (str): Required. The unique name of the project for which a list of @@ -108,6 +111,7 @@ class ListInstancesRequest(proto.Message): class ListInstancesResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListInstances. + Attributes: instances (Sequence[google.cloud.bigtable_admin_v2.types.Instance]): The list of requested instances. @@ -155,6 +159,7 @@ class PartialUpdateInstanceRequest(proto.Message): class DeleteInstanceRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.DeleteInstance. + Attributes: name (str): Required. The unique name of the instance to be deleted. @@ -167,6 +172,7 @@ class DeleteInstanceRequest(proto.Message): class CreateClusterRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.CreateCluster. + Attributes: parent (str): Required. The unique name of the instance in which to create @@ -189,6 +195,7 @@ class CreateClusterRequest(proto.Message): class GetClusterRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.GetCluster. + Attributes: name (str): Required. The unique name of the requested cluster. Values @@ -201,6 +208,7 @@ class GetClusterRequest(proto.Message): class ListClustersRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.ListClusters. + Attributes: parent (str): Required. The unique name of the instance for which a list @@ -218,6 +226,7 @@ class ListClustersRequest(proto.Message): class ListClustersResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListClusters. + Attributes: clusters (Sequence[google.cloud.bigtable_admin_v2.types.Cluster]): The list of requested clusters. @@ -245,6 +254,7 @@ def raw_page(self): class DeleteClusterRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.DeleteCluster. + Attributes: name (str): Required. The unique name of the cluster to be deleted. @@ -257,6 +267,7 @@ class DeleteClusterRequest(proto.Message): class CreateInstanceMetadata(proto.Message): r"""The metadata for the Operation returned by CreateInstance. + Attributes: original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): The request that prompted the initiation of @@ -280,6 +291,7 @@ class CreateInstanceMetadata(proto.Message): class UpdateInstanceMetadata(proto.Message): r"""The metadata for the Operation returned by UpdateInstance. + Attributes: original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): The request that prompted the initiation of @@ -303,6 +315,7 @@ class UpdateInstanceMetadata(proto.Message): class CreateClusterMetadata(proto.Message): r"""The metadata for the Operation returned by CreateCluster. + Attributes: original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): The request that prompted the initiation of @@ -326,6 +339,7 @@ class CreateClusterMetadata(proto.Message): class UpdateClusterMetadata(proto.Message): r"""The metadata for the Operation returned by UpdateCluster. + Attributes: original_request (google.cloud.bigtable_admin_v2.types.Cluster): The request that prompted the initiation of @@ -349,6 +363,7 @@ class UpdateClusterMetadata(proto.Message): class CreateAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.CreateAppProfile. + Attributes: parent (str): Required. The unique name of the instance in which to create @@ -375,6 +390,7 @@ class CreateAppProfileRequest(proto.Message): class GetAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.GetAppProfile. + Attributes: name (str): Required. The unique name of the requested app profile. @@ -387,6 +403,7 @@ class GetAppProfileRequest(proto.Message): class ListAppProfilesRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.ListAppProfiles. + Attributes: parent (str): Required. The unique name of the instance for which a list @@ -418,6 +435,7 @@ class ListAppProfilesRequest(proto.Message): class ListAppProfilesResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListAppProfiles. + Attributes: app_profiles (Sequence[google.cloud.bigtable_admin_v2.types.AppProfile]): The list of requested app profiles. @@ -446,6 +464,7 @@ def raw_page(self): class UpdateAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.UpdateAppProfile. + Attributes: app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): Required. The app profile which will @@ -468,6 +487,7 @@ class UpdateAppProfileRequest(proto.Message): class DeleteAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.DeleteAppProfile. + Attributes: name (str): Required. The unique name of the app profile to be deleted. @@ -483,7 +503,8 @@ class DeleteAppProfileRequest(proto.Message): class UpdateAppProfileMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateAppProfile. """ + r"""The metadata for the Operation returned by UpdateAppProfile. + """ __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 1d93991ad72d..2a89d117427d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -183,6 +183,7 @@ class CreateTableRequest(proto.Message): class Split(proto.Message): r"""An initial split point for a newly created table. + Attributes: key (bytes): Row key to use as an initial tablet boundary. @@ -357,6 +358,7 @@ class ModifyColumnFamiliesRequest(proto.Message): class Modification(proto.Message): r"""A create, update, or delete of a particular column family. + Attributes: id (str): The ID of the column family to be modified. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 75ceaf263132..e90d587386b9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -44,6 +44,7 @@ class RestoreSourceType(proto.Enum): class RestoreInfo(proto.Message): r"""Information about a table restore. + Attributes: source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): The type of the restore source. @@ -111,6 +112,7 @@ class View(proto.Enum): class ClusterState(proto.Message): r"""The state of a table's data in a particular cluster. + Attributes: replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): Output only. The state of replication for the @@ -193,6 +195,7 @@ class GcRule(proto.Message): class Intersection(proto.Message): r"""A GcRule which deletes cells matching all of the given rules. + Attributes: rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): Only delete cells which would be deleted by every element of @@ -203,6 +206,7 @@ class Intersection(proto.Message): class Union(proto.Message): r"""A GcRule which deletes cells matching any of the given rules. + Attributes: rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): Delete cells which would be deleted by any element of @@ -310,6 +314,7 @@ class State(proto.Enum): class Backup(proto.Message): r"""A backup of a Cloud Bigtable table. + Attributes: name (str): Output only. A globally unique identifier for the backup @@ -369,6 +374,7 @@ class State(proto.Enum): class BackupInfo(proto.Message): r"""Information about a backup. + Attributes: backup (str): Output only. Name of the backup. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 9aa15e391ed1..03e99eda26ae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -864,6 +864,12 @@ async def read_modify_write_row( # Done; return the response. return response + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 32dd6739cfae..beeed24d12cf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -344,10 +344,7 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), + always_use_jwt_access=True, ) def read_rows( @@ -1014,6 +1011,19 @@ def read_modify_write_row( # Done; return the response. return response + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index a6dbca220399..d89c01d228e7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -236,6 +236,15 @@ def _prep_wrapped_messages(self, client_info): ), } + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + @property def read_rows( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 2df844a9cf50..fd9d1134fdb3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -405,5 +405,8 @@ def read_modify_write_row( ) return self._stubs["read_modify_write_row"] + def close(self): + self.grpc_channel.close() + __all__ = ("BigtableGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 56bf684bd709..c0560526e2b0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -412,5 +412,8 @@ def read_modify_write_row( ) return self._stubs["read_modify_write_row"] + def close(self): + return self.grpc_channel.close() + __all__ = ("BigtableGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 35a19e2d185e..2c18a5155d28 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -41,6 +41,7 @@ class ReadRowsRequest(proto.Message): r"""Request message for Bigtable.ReadRows. + Attributes: table_name (str): Required. The unique name of the table from which to read. @@ -72,6 +73,7 @@ class ReadRowsRequest(proto.Message): class ReadRowsResponse(proto.Message): r"""Response message for Bigtable.ReadRows. + Attributes: chunks (Sequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): A collection of a row's contents as part of @@ -169,6 +171,7 @@ class CellChunk(proto.Message): class SampleRowKeysRequest(proto.Message): r"""Request message for Bigtable.SampleRowKeys. + Attributes: table_name (str): Required. The unique name of the table from which to sample @@ -186,6 +189,7 @@ class SampleRowKeysRequest(proto.Message): class SampleRowKeysResponse(proto.Message): r"""Response message for Bigtable.SampleRowKeys. + Attributes: row_key (bytes): Sorted streamed sequence of sample row keys @@ -213,6 +217,7 @@ class SampleRowKeysResponse(proto.Message): class MutateRowRequest(proto.Message): r"""Request message for Bigtable.MutateRow. + Attributes: table_name (str): Required. The unique name of the table to which the mutation @@ -240,11 +245,13 @@ class MutateRowRequest(proto.Message): class MutateRowResponse(proto.Message): - r"""Response message for Bigtable.MutateRow. """ + r"""Response message for Bigtable.MutateRow. + """ class MutateRowsRequest(proto.Message): r"""Request message for BigtableService.MutateRows. + Attributes: table_name (str): Required. The unique name of the table to @@ -265,6 +272,7 @@ class MutateRowsRequest(proto.Message): class Entry(proto.Message): r"""A mutation for a given row. + Attributes: row_key (bytes): The key of the row to which the ``mutations`` should be @@ -287,6 +295,7 @@ class Entry(proto.Message): class MutateRowsResponse(proto.Message): r"""Response message for BigtableService.MutateRows. + Attributes: entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): One or more results for Entries from the @@ -317,6 +326,7 @@ class Entry(proto.Message): class CheckAndMutateRowRequest(proto.Message): r"""Request message for Bigtable.CheckAndMutateRow. + Attributes: table_name (str): Required. The unique name of the table to which the @@ -366,6 +376,7 @@ class CheckAndMutateRowRequest(proto.Message): class CheckAndMutateRowResponse(proto.Message): r"""Response message for Bigtable.CheckAndMutateRow. + Attributes: predicate_matched (bool): Whether or not the request's ``predicate_filter`` yielded @@ -377,6 +388,7 @@ class CheckAndMutateRowResponse(proto.Message): class ReadModifyWriteRowRequest(proto.Message): r"""Request message for Bigtable.ReadModifyWriteRow. + Attributes: table_name (str): Required. The unique name of the table to which the @@ -408,6 +420,7 @@ class ReadModifyWriteRowRequest(proto.Message): class ReadModifyWriteRowResponse(proto.Message): r"""Response message for Bigtable.ReadModifyWriteRow. + Attributes: row (google.cloud.bigtable_v2.types.Row): A Row containing the new contents of all diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index ca2302889eb6..2b97ac0f7a83 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -129,6 +129,7 @@ class Cell(proto.Message): class RowRange(proto.Message): r"""Specifies a contiguous range of rows. + Attributes: start_key_closed (bytes): Used when giving an inclusive lower bound for @@ -152,6 +153,7 @@ class RowRange(proto.Message): class RowSet(proto.Message): r"""Specifies a non-contiguous set of rows. + Attributes: row_keys (Sequence[bytes]): Single rows included in the set. @@ -198,6 +200,7 @@ class ColumnRange(proto.Message): class TimestampRange(proto.Message): r"""Specified a contiguous range of microsecond timestamps. + Attributes: start_timestamp_micros (int): Inclusive lower bound. If left empty, @@ -213,6 +216,7 @@ class TimestampRange(proto.Message): class ValueRange(proto.Message): r"""Specifies a contiguous range of raw byte values. + Attributes: start_value_closed (bytes): Used when giving an inclusive lower bound for @@ -567,6 +571,7 @@ class Mutation(proto.Message): class SetCell(proto.Message): r"""A Mutation which sets the value of the specified cell. + Attributes: family_name (str): The name of the family into which new data should be @@ -627,7 +632,8 @@ class DeleteFromFamily(proto.Message): family_name = proto.Field(proto.STRING, number=1,) class DeleteFromRow(proto.Message): - r"""A Mutation which deletes all cells from the containing row. """ + r"""A Mutation which deletes all cells from the containing row. + """ set_cell = proto.Field(proto.MESSAGE, number=1, oneof="mutation", message=SetCell,) delete_from_column = proto.Field( diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 029ed196feae..f7fb223ee063 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( @@ -4936,6 +4937,9 @@ def test_bigtable_instance_admin_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -5585,3 +5589,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 6bfe7d012c16..95739fc94ab8 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -32,6 +32,7 @@ from google.api_core import grpc_helpers_async from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( @@ -5784,6 +5785,9 @@ def test_bigtable_table_admin_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + # Additionally, the LRO client (a property) should # also raise NotImplementedError with pytest.raises(NotImplementedError): @@ -6482,3 +6486,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 3735f10745ec..95fd03bb32af 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -29,6 +29,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async +from google.api_core import path_template from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient @@ -2022,6 +2023,9 @@ def test_bigtable_base_transport(): with pytest.raises(NotImplementedError): getattr(transport, method)(request=object()) + with pytest.raises(NotImplementedError): + transport.close() + @requires_google_auth_gte_1_25_0 def test_bigtable_base_transport_with_credentials_file(): @@ -2536,3 +2540,49 @@ def test_client_withDEFAULT_CLIENT_INFO(): credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() From d9d15ca25655f6788ab5390e40b177c9a7346e45 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 8 Oct 2021 11:57:53 -0400 Subject: [PATCH 526/892] feat: add support for Python 3.10 (#437) --- packages/google-cloud-bigtable/owlbot.py | 1 + packages/google-cloud-bigtable/setup.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index c9eaa54d2457..9a2748788d81 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -85,6 +85,7 @@ def get_staging_dirs( # ---------------------------------------------------------------------------- templated_files = common.py_library( samples=True, # set to True only if there are samples + unit_test_python_versions=["3.6", "3.7", "3.8", "3.9", "3.10"], split_system_tests=True, microgenerator=True, cov_level=100, diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 71e055017080..76775e91f59c 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -83,6 +83,9 @@ "Programming Language :: Python", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Operating System :: OS Independent", "Topic :: Internet", ], From 931759674f3963faff6831a72419e7d1e838c51f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 8 Oct 2021 17:16:35 +0000 Subject: [PATCH 527/892] chore(python): Add kokoro configs for python 3.10 samples testing (#442) --- .../.github/.OwlBot.lock.yaml | 2 +- .../.kokoro/samples/python3.10/common.cfg | 40 +++++++++++++++++++ .../.kokoro/samples/python3.10/continuous.cfg | 6 +++ .../samples/python3.10/periodic-head.cfg | 11 +++++ .../.kokoro/samples/python3.10/periodic.cfg | 6 +++ .../.kokoro/samples/python3.10/presubmit.cfg | 6 +++ 6 files changed, 70 insertions(+), 1 deletion(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 76d0baa0a49d..7d98291cc35f 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:4370ced27a324687ede5da07132dcdc5381993502a5e8a3e31e16dc631d026f0 + digest: sha256:58f73ba196b5414782605236dd0712a73541b44ff2ff4d3a36ec41092dd6fa5b diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg new file mode 100644 index 000000000000..0dc18096b8cc --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.10" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-310" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg new file mode 100644 index 000000000000..be25a34f9ad3 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg new file mode 100644 index 000000000000..71cd1e597e38 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file From 2243664742f6a5e29a0e3bb1d43f2257ed22350d Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Tue, 12 Oct 2021 08:16:53 -0700 Subject: [PATCH 528/892] test: address issue with fixtures and emulator (#445) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: address issue with fixtures not raising exceptions under emulator, flag tests as not emulator compatible * test: modify not_in_emulator to skip_on_emulator * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../tests/system/conftest.py | 10 +++---- .../tests/system/test_data_api.py | 4 +-- .../tests/system/test_instance_admin.py | 30 ++++++++++--------- .../tests/system/test_table_admin.py | 12 ++++---- 4 files changed, 29 insertions(+), 27 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index 778cf8c94033..b48e7a62a0d3 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -41,7 +41,7 @@ def with_kms_key_name(kms_key_name): @pytest.fixture(scope="session") -def not_in_emulator(in_emulator): +def skip_on_emulator(in_emulator): if in_emulator: pytest.skip("Emulator does not support this feature") @@ -146,13 +146,13 @@ def data_instance_populated( serve_nodes, in_emulator, ): + instance = admin_client.instance(data_instance_id, labels=instance_labels) + cluster = instance.cluster( + data_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) # Emulator does not support instance admin operations (create / delete). # See: https://cloud.google.com/bigtable/docs/emulator if not in_emulator: - instance = admin_client.instance(data_instance_id, labels=instance_labels) - cluster = instance.cluster( - data_cluster_id, location_id=location_id, serve_nodes=serve_nodes, - ) operation = instance.create(clusters=[cluster]) operation.result(timeout=30) diff --git a/packages/google-cloud-bigtable/tests/system/test_data_api.py b/packages/google-cloud-bigtable/tests/system/test_data_api.py index 2137aa2e4508..2ca7e150479a 100644 --- a/packages/google-cloud-bigtable/tests/system/test_data_api.py +++ b/packages/google-cloud-bigtable/tests/system/test_data_api.py @@ -210,7 +210,7 @@ def test_rowset_add_row_range_w_pfx(data_table, rows_to_delete): assert found_row_keys == expected_row_keys -def test_table_read_row_large_cell(data_table, rows_to_delete, not_in_emulator): +def test_table_read_row_large_cell(data_table, rows_to_delete, skip_on_emulator): # Maximum gRPC received message size for emulator is 4194304 bytes. row = data_table.direct_row(ROW_KEY) rows_to_delete.append(row) @@ -325,7 +325,7 @@ def test_table_read_rows(data_table, rows_to_delete): assert rows_data.rows == expected_rows -def test_read_with_label_applied(data_table, rows_to_delete, not_in_emulator): +def test_read_with_label_applied(data_table, rows_to_delete, skip_on_emulator): from google.cloud.bigtable.row_filters import ApplyLabelFilter from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter from google.cloud.bigtable.row_filters import RowFilterChain diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index c5f7b525e5a0..c3e419a112e9 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -96,7 +96,9 @@ def _delete_app_profile_helper(app_profile): assert not app_profile.exists() -def test_client_list_instances(admin_client, admin_instance_populated, not_in_emulator): +def test_client_list_instances( + admin_client, admin_instance_populated, skip_on_emulator +): instances, failed_locations = admin_client.list_instances() assert failed_locations == [] @@ -105,20 +107,20 @@ def test_client_list_instances(admin_client, admin_instance_populated, not_in_em assert admin_instance_populated.name in found -def test_instance_exists_hit(admin_instance_populated): +def test_instance_exists_hit(admin_instance_populated, skip_on_emulator): # Emulator does not support instance admin operations (create / delete). # It allows connecting with *any* project / instance name. # See: https://cloud.google.com/bigtable/docs/emulator assert admin_instance_populated.exists() -def test_instance_exists_miss(admin_client): +def test_instance_exists_miss(admin_client, skip_on_emulator): alt_instance = admin_client.instance("nonesuch-instance") assert not alt_instance.exists() def test_instance_reload( - admin_client, admin_instance_id, admin_instance_populated, not_in_emulator + admin_client, admin_instance_id, admin_instance_populated, skip_on_emulator ): # Use same arguments as 'admin_instance_populated' # so we can use reload() on a fresh instance. @@ -139,7 +141,7 @@ def test_instance_create_prod( location_id, instance_labels, instances_to_delete, - not_in_emulator, + skip_on_emulator, ): from google.cloud.bigtable import enums @@ -171,7 +173,7 @@ def test_instance_create_development( location_id, instance_labels, instances_to_delete, - not_in_emulator, + skip_on_emulator, ): alt_instance_id = f"new{unique_suffix}" instance = admin_client.instance( @@ -205,7 +207,7 @@ def test_instance_create_w_two_clusters( location_id, instance_labels, instances_to_delete, - not_in_emulator, + skip_on_emulator, ): alt_instance_id = f"dif{unique_suffix}" instance = admin_client.instance( @@ -400,7 +402,7 @@ def test_instance_create_w_two_clusters_cmek( instance_labels, instances_to_delete, with_kms_key_name, - not_in_emulator, + skip_on_emulator, ): alt_instance_id = f"dif-cmek{unique_suffix}" instance = admin_client.instance( @@ -484,7 +486,7 @@ def test_instance_update_display_name_and_labels( admin_instance_populated, label_key, instance_labels, - not_in_emulator, + skip_on_emulator, ): old_display_name = admin_instance_populated.display_name new_display_name = "Foo Bar Baz" @@ -521,7 +523,7 @@ def test_instance_update_w_type( location_id, instance_labels, instances_to_delete, - not_in_emulator, + skip_on_emulator, ): alt_instance_id = f"ndif{unique_suffix}" instance = admin_client.instance( @@ -548,17 +550,17 @@ def test_instance_update_w_type( assert instance_alt.type_ == enums.Instance.Type.PRODUCTION -def test_cluster_exists_hit(admin_cluster, not_in_emulator): +def test_cluster_exists_hit(admin_cluster, skip_on_emulator): assert admin_cluster.exists() -def test_cluster_exists_miss(admin_instance_populated, not_in_emulator): +def test_cluster_exists_miss(admin_instance_populated, skip_on_emulator): alt_cluster = admin_instance_populated.cluster("nonesuch-cluster") assert not alt_cluster.exists() def test_cluster_create( - admin_instance_populated, admin_instance_id, + admin_instance_populated, admin_instance_id, skip_on_emulator, ): alt_cluster_id = f"{admin_instance_id}-c2" alt_location_id = "us-central1-f" @@ -594,7 +596,7 @@ def test_cluster_update( admin_cluster_id, admin_cluster, serve_nodes, - not_in_emulator, + skip_on_emulator, ): new_serve_nodes = 4 diff --git a/packages/google-cloud-bigtable/tests/system/test_table_admin.py b/packages/google-cloud-bigtable/tests/system/test_table_admin.py index 232c6d0fc1c6..1ed540d632fd 100644 --- a/packages/google-cloud-bigtable/tests/system/test_table_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_table_admin.py @@ -57,7 +57,7 @@ def backups_to_delete(): backup.delete() -def test_instance_list_tables(data_instance_populated, shared_table): +def test_instance_list_tables(data_instance_populated, shared_table, skip_on_emulator): # Since `data_instance_populated` is newly created, the # table created in `shared_table` here will be the only one. tables = data_instance_populated.list_tables() @@ -115,7 +115,7 @@ def test_table_create_w_families( def test_table_create_w_split_keys( - data_instance_populated, tables_to_delete, not_in_emulator, + data_instance_populated, tables_to_delete, skip_on_emulator ): temp_table_id = "foo-bar-baz-split-table" initial_split_keys = [b"split_key_1", b"split_key_10", b"split_key_20"] @@ -203,7 +203,7 @@ def test_column_family_delete(data_instance_populated, tables_to_delete): def test_table_get_iam_policy( - data_instance_populated, tables_to_delete, not_in_emulator, + data_instance_populated, tables_to_delete, skip_on_emulator ): temp_table_id = "test-get-iam-policy-table" temp_table = data_instance_populated.table(temp_table_id) @@ -216,7 +216,7 @@ def test_table_get_iam_policy( def test_table_set_iam_policy( - service_account, data_instance_populated, tables_to_delete, not_in_emulator, + service_account, data_instance_populated, tables_to_delete, skip_on_emulator ): from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE from google.cloud.bigtable.policy import Policy @@ -236,7 +236,7 @@ def test_table_set_iam_policy( def test_table_test_iam_permissions( - data_instance_populated, tables_to_delete, not_in_emulator, + data_instance_populated, tables_to_delete, skip_on_emulator, ): temp_table_id = "test-test-iam-policy-table" temp_table = data_instance_populated.table(temp_table_id) @@ -258,7 +258,7 @@ def test_table_backup( instances_to_delete, tables_to_delete, backups_to_delete, - not_in_emulator, + skip_on_emulator, ): from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable import enums From e004abd5d46bb58e75ca7259163d83cc9e132a4a Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 12 Oct 2021 11:38:57 -0400 Subject: [PATCH 529/892] chore: add GH Workflow to run systests under emulator (#430) * chore: add GH Workflow to run systests under emulator (DO NOT MERGE) * Even if the tests are interrupted. Also, they seem to need SIGKILL, rather than SIGTERM. * ci: run systest_emulated workflow on PRs targeting 'main' Co-authored-by: Owl Bot Co-authored-by: Christopher Wilcox --- .../.github/workflows/system_emulated.yml | 29 ++++++++++ .../google/cloud/bigtable/client.py | 8 +++ packages/google-cloud-bigtable/noxfile.py | 14 ++--- packages/google-cloud-bigtable/owlbot.py | 14 ++--- .../tests/system/conftest.py | 6 +-- .../tests/unit/test_client.py | 53 +++++++++++++++++-- 6 files changed, 105 insertions(+), 19 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/workflows/system_emulated.yml diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml new file mode 100644 index 000000000000..57656d3ce2b9 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -0,0 +1,29 @@ +name: "Run systests on emulator" +on: + pull_request: + branches: + - main + +jobs: + + run-systests: + runs-on: ubuntu-20.04 + + steps: + + - name: Checkout + uses: actions/checkout@v2 + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: '3.8' + + - name: Setup GCloud SDK + uses: google-github-actions/setup-gcloud@v0.2.1 + + - name: Install / run Nox + run: | + python -m pip install --upgrade setuptools pip + python -m pip install nox + nox -s system_emulated diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 7249c0b358c8..7746ee2ae5d9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -33,6 +33,7 @@ from google.api_core.gapic_v1 import client_info import google.auth +from google.auth.credentials import AnonymousCredentials from google.cloud import bigtable_v2 from google.cloud import bigtable_admin_v2 @@ -67,6 +68,7 @@ READ_ONLY_SCOPE = "https://www.googleapis.com/auth/bigtable.data.readonly" """Scope for reading table data.""" +_DEFAULT_BIGTABLE_EMULATOR_CLIENT = "google-cloud-bigtable-emulator" _GRPC_CHANNEL_OPTIONS = ( ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), @@ -170,6 +172,12 @@ def __init__( self._client_info = client_info self._emulator_host = os.getenv(BIGTABLE_EMULATOR) + if self._emulator_host is not None: + if credentials is None: + credentials = AnonymousCredentials() + if project is None: + project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT + if channel is not None: warnings.warn( "'channel' is deprecated and no longer used.", diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 348d4fc60439..15117ee22359 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -119,7 +119,7 @@ def unit(session): default(session) -@nox.session(python="3.8") +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system_emulated(session): import subprocess import signal @@ -133,15 +133,17 @@ def system_emulated(session): subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) hostport = "localhost:8789" + session.env["BIGTABLE_EMULATOR_HOST"] = hostport + p = subprocess.Popen( ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] ) - session.env["BIGTABLE_EMULATOR_HOST"] = hostport - system(session) - - # Stop Emulator - os.killpg(os.getpgid(p.pid), signal.SIGTERM) + try: + system(session) + finally: + # Stop Emulator + os.killpg(os.getpgid(p.pid), signal.SIGKILL) @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 9a2748788d81..bacfdfbd16a7 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -105,7 +105,7 @@ def place_before(path, text, *before_text, escape=None): s.replace([path], text, replacement) system_emulated_session = """ -@nox.session(python="3.8") +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system_emulated(session): import subprocess import signal @@ -119,15 +119,17 @@ def system_emulated(session): subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) hostport = "localhost:8789" + session.env["BIGTABLE_EMULATOR_HOST"] = hostport + p = subprocess.Popen( ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] ) - session.env["BIGTABLE_EMULATOR_HOST"] = hostport - system(session) - - # Stop Emulator - os.killpg(os.getpgid(p.pid), signal.SIGTERM) + try: + system(session) + finally: + # Stop Emulator + os.killpg(os.getpgid(p.pid), signal.SIGKILL) """ diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index b48e7a62a0d3..6f6cdc2d1a95 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -147,12 +147,12 @@ def data_instance_populated( in_emulator, ): instance = admin_client.instance(data_instance_id, labels=instance_labels) - cluster = instance.cluster( - data_cluster_id, location_id=location_id, serve_nodes=serve_nodes, - ) # Emulator does not support instance admin operations (create / delete). # See: https://cloud.google.com/bigtable/docs/emulator if not in_emulator: + cluster = instance.cluster( + data_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + ) operation = instance.create(clusters=[cluster]) operation.result(timeout=30) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 5c557763a072..b80290e4fb7c 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -170,22 +170,67 @@ def test_constructor_both_admin_and_read_only(self): def test_constructor_with_emulator_host(self): from google.cloud.environment_vars import BIGTABLE_EMULATOR + from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS - credentials = _make_credentials() emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): with mock.patch("grpc.secure_channel") as factory: - client = self._make_one(project=self.PROJECT, credentials=credentials) + client = self._make_one() # don't test local_composite_credentials - client._local_composite_credentials = lambda: credentials + # client._local_composite_credentials = lambda: credentials + # channels are formed when needed, so access a client + # create a gapic channel + client.table_data_client + + self.assertEqual(client._emulator_host, emulator_host) + self.assertEqual(client.project, _DEFAULT_BIGTABLE_EMULATOR_CLIENT) + factory.assert_called_once_with( + emulator_host, + mock.ANY, # test of creds wrapping in '_emulator_host' below + options=_GRPC_CHANNEL_OPTIONS, + ) + + def test_constructor_with_emulator_host_w_project(self): + from google.cloud.environment_vars import BIGTABLE_EMULATOR + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + + emulator_host = "localhost:8081" + with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): + with mock.patch("grpc.secure_channel") as factory: + client = self._make_one(project=self.PROJECT) + # channels are formed when needed, so access a client + # create a gapic channel + client.table_data_client + + self.assertEqual(client._emulator_host, emulator_host) + self.assertEqual(client.project, self.PROJECT) + factory.assert_called_once_with( + emulator_host, + mock.ANY, # test of creds wrapping in '_emulator_host' below + options=_GRPC_CHANNEL_OPTIONS, + ) + + def test_constructor_with_emulator_host_w_credentials(self): + from google.cloud.environment_vars import BIGTABLE_EMULATOR + from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + + emulator_host = "localhost:8081" + credentials = _make_credentials() + with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): + with mock.patch("grpc.secure_channel") as factory: + client = self._make_one(credentials=credentials) # channels are formed when needed, so access a client # create a gapic channel client.table_data_client self.assertEqual(client._emulator_host, emulator_host) + self.assertEqual(client.project, _DEFAULT_BIGTABLE_EMULATOR_CLIENT) factory.assert_called_once_with( - emulator_host, credentials, options=_GRPC_CHANNEL_OPTIONS, + emulator_host, + mock.ANY, # test of creds wrapping in '_emulator_host' below + options=_GRPC_CHANNEL_OPTIONS, ) def test__get_scopes_default(self): From 33bf8ccedb029eaa01cdeffcdef0d5fc8ba09958 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Wed, 13 Oct 2021 01:32:05 -0700 Subject: [PATCH 530/892] chore: add py.typed file for PEP 561 compatibility (#447) * chore: add py.typed file for PEP 561 compatibility * Update py.typed --- packages/google-cloud-bigtable/google/cloud/bigtable/py.typed | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/py.typed diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed new file mode 100644 index 000000000000..7bd4705d4d9f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable package uses inline types. From 6b4230fa1050ce9b01f6cee5b11616b7c37a89e6 Mon Sep 17 00:00:00 2001 From: Christopher Wilcox Date: Thu, 14 Oct 2021 14:43:01 -0700 Subject: [PATCH 531/892] fix: improve type hints, mypy checks (#448) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: ensure mypy passes * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: move type info to inline, not mypy.ini * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * test: add mypy test scenario * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: simplify type of __version__ * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: expand typing verification to google namespace * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: remove ignores on api_core module * chore: no pytype * chore: scope to just google.cloud.bigtable, defer fixing errors on broader package * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: fix template * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: lint * fix: break circular import Remove '_CLIENT_INFO' from module scope. * fix: unsnarl typing around list of retryable status codes * ci: fix coverage gap induced by typing Co-authored-by: Owl Bot Co-authored-by: Tres Seaver --- .../google/cloud/__init__.py | 4 +++- .../google/cloud/bigtable/__init__.py | 7 +++--- .../google/cloud/bigtable/backup.py | 4 ++-- .../google/cloud/bigtable/client.py | 21 +++++++++------- .../google/cloud/bigtable/instance.py | 2 +- .../google/cloud/bigtable/policy.py | 4 ++-- .../google/cloud/bigtable/row.py | 6 ++--- .../google/cloud/bigtable/row_data.py | 6 ++--- .../google/cloud/bigtable/row_filters.py | 4 ++-- .../google/cloud/bigtable/row_set.py | 2 +- .../google/cloud/bigtable/table.py | 17 +++++++------ packages/google-cloud-bigtable/mypy.ini | 6 +++++ packages/google-cloud-bigtable/noxfile.py | 10 ++++++++ packages/google-cloud-bigtable/owlbot.py | 24 ++++++++++++++++++- .../tests/unit/test_client.py | 12 ++++------ 15 files changed, 86 insertions(+), 43 deletions(-) create mode 100644 packages/google-cloud-bigtable/mypy.ini diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py index 2f4b4738aee1..ced5017a1018 100644 --- a/packages/google-cloud-bigtable/google/cloud/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/__init__.py @@ -1,3 +1,5 @@ +from typing import List + try: import pkg_resources @@ -5,4 +7,4 @@ except ImportError: import pkgutil - __path__ = pkgutil.extend_path(__path__, __name__) + __path__: List[str] = pkgutil.extend_path(__path__, __name__) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index f2c5a24bd5a2..a54096624c0c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -15,15 +15,16 @@ """Google Cloud Bigtable API package.""" +from typing import Optional import pkg_resources +from google.cloud.bigtable.client import Client + +__version__: Optional[str] try: __version__ = pkg_resources.get_distribution("google-cloud-bigtable").version except pkg_resources.DistributionNotFound: __version__ = None -from google.cloud.bigtable.client import Client - - __all__ = ["__version__", "Client"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 0991e85f54d5..c2b5ec9ee4be 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -16,12 +16,12 @@ import re -from google.cloud._helpers import _datetime_to_pb_timestamp +from google.cloud._helpers import _datetime_to_pb_timestamp # type: ignore from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy -from google.cloud.exceptions import NotFound +from google.cloud.exceptions import NotFound # type: ignore from google.protobuf import field_mask_pb2 _BACKUP_NAME_RE = re.compile( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 7746ee2ae5d9..c50c20b0f7db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -29,11 +29,11 @@ """ import os import warnings -import grpc +import grpc # type: ignore -from google.api_core.gapic_v1 import client_info -import google.auth -from google.auth.credentials import AnonymousCredentials +from google.api_core.gapic_v1 import client_info as client_info_lib +import google.auth # type: ignore +from google.auth.credentials import AnonymousCredentials # type: ignore from google.cloud import bigtable_v2 from google.cloud import bigtable_admin_v2 @@ -45,21 +45,20 @@ BigtableTableAdminGrpcTransport, ) -from google.cloud.bigtable import __version__ +from google.cloud import bigtable from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.cluster import Cluster -from google.cloud.client import ClientWithProject +from google.cloud.client import ClientWithProject # type: ignore from google.cloud.bigtable_admin_v2.types import instance from google.cloud.bigtable.cluster import _CLUSTER_NAME_RE -from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore INSTANCE_TYPE_PRODUCTION = instance.Instance.Type.PRODUCTION INSTANCE_TYPE_DEVELOPMENT = instance.Instance.Type.DEVELOPMENT INSTANCE_TYPE_UNSPECIFIED = instance.Instance.Type.TYPE_UNSPECIFIED -_CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" ADMIN_SCOPE = "https://www.googleapis.com/auth/bigtable.admin" """Scope for interacting with the Cluster Admin and Table Admin APIs.""" @@ -155,11 +154,15 @@ def __init__( credentials=None, read_only=False, admin=False, - client_info=_CLIENT_INFO, + client_info=None, client_options=None, admin_client_options=None, channel=None, ): + if client_info is None: + client_info = client_info_lib.ClientInfo( + client_library_version=bigtable.__version__, + ) if read_only and admin: raise ValueError( "A read-only client cannot also perform" "administrative actions." diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index e6e2ac027a3a..9c22aaa79b53 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -24,7 +24,7 @@ from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import options_pb2 +from google.iam.v1 import options_pb2 # type: ignore from google.api_core.exceptions import NotFound diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py index f5558b6f0d49..8396642fb23c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/policy.py @@ -15,8 +15,8 @@ import base64 from google.api_core.iam import Policy as BasePolicy -from google.cloud._helpers import _to_bytes -from google.iam.v1 import policy_pb2 +from google.cloud._helpers import _to_bytes # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore """IAM roles supported by Bigtable Instance resource""" BIGTABLE_ADMIN_ROLE = "roles/bigtable.admin" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 3fdc230f78b1..9127a1aae695 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -17,9 +17,9 @@ import struct -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _datetime_from_microseconds # type: ignore +from google.cloud._helpers import _microseconds_from_datetime # type: ignore +from google.cloud._helpers import _to_bytes # type: ignore from google.cloud.bigtable_v2.types import data as data_v2_pb2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 18d82153b0fc..6ab1188a8c18 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -17,12 +17,12 @@ import copy -import grpc +import grpc # type: ignore from google.api_core import exceptions from google.api_core import retry -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _datetime_from_microseconds # type: ignore +from google.cloud._helpers import _to_bytes # type: ignore from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 from google.cloud.bigtable_v2.types import data as data_v2_pb2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py index b495fb6463c9..53192acc86d0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_filters.py @@ -17,8 +17,8 @@ import struct -from google.cloud._helpers import _microseconds_from_datetime -from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _microseconds_from_datetime # type: ignore +from google.cloud._helpers import _to_bytes # type: ignore from google.cloud.bigtable_v2.types import data as data_v2_pb2 _PACK_I64 = struct.Struct(">q").pack diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 32a9bd1e30de..82a540b5a891 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -15,7 +15,7 @@ """User-friendly container for Google Cloud Bigtable RowSet """ -from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _to_bytes # type: ignore class RowSet(object): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 8dc4f5e4281f..fddd04809925 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -14,6 +14,7 @@ """User-friendly container for Google Cloud Bigtable Table.""" +from typing import Set import warnings from google.api_core import timeout @@ -25,7 +26,7 @@ from google.api_core.gapic_v1.method import DEFAULT from google.api_core.retry import if_exception_type from google.api_core.retry import Retry -from google.cloud._helpers import _to_bytes +from google.cloud._helpers import _to_bytes # type: ignore from google.cloud.bigtable.backup import Backup from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily @@ -57,6 +58,12 @@ RETRYABLE_MUTATION_ERRORS = (Aborted, DeadlineExceeded, ServiceUnavailable) """Errors which can be retried during row mutation.""" +RETRYABLE_CODES: Set[int] = set() + +for retryable in RETRYABLE_MUTATION_ERRORS: + if retryable.grpc_status_code is not None: # pragma: NO COVER + RETRYABLE_CODES.add(retryable.grpc_status_code.value[0]) + class _BigtableRetryableError(Exception): """Retry-able error expected by the default retry strategy.""" @@ -1043,10 +1050,6 @@ class _RetryableMutateRowsWorker(object): are retryable, any subsequent call on this callable will be a no-op. """ - RETRY_CODES = tuple( - retryable.grpc_status_code.value[0] for retryable in RETRYABLE_MUTATION_ERRORS - ) - def __init__(self, client, table_name, rows, app_profile_id=None, timeout=None): self.client = client self.table_name = table_name @@ -1083,7 +1086,7 @@ def __call__(self, retry=DEFAULT_RETRY): @staticmethod def _is_retryable(status): - return status is None or status.code in _RetryableMutateRowsWorker.RETRY_CODES + return status is None or status.code in RETRYABLE_CODES def _do_mutate_retryable_rows(self): """Mutate all the rows that are eligible for retry. @@ -1128,7 +1131,7 @@ def _do_mutate_retryable_rows(self): **kwargs ) except RETRYABLE_MUTATION_ERRORS: - # If an exception, considered retryable by `RETRY_CODES`, is + # If an exception, considered retryable by `RETRYABLE_MUTATION_ERRORS`, is # returned from the initial call, consider # it to be retryable. Wrap as a Bigtable Retryable Error. raise _BigtableRetryableError diff --git a/packages/google-cloud-bigtable/mypy.ini b/packages/google-cloud-bigtable/mypy.ini new file mode 100644 index 000000000000..9aef441decc5 --- /dev/null +++ b/packages/google-cloud-bigtable/mypy.ini @@ -0,0 +1,6 @@ +[mypy] +python_version = 3.6 +namespace_packages = True + +[mypy-google.protobuf] +ignore_missing_imports = True diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 15117ee22359..206e146f4430 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -38,6 +38,7 @@ "unit", "system_emulated", "system", + "mypy", "cover", "lint", "lint_setup_py", @@ -72,6 +73,15 @@ def blacken(session): ) +@nox.session(python=DEFAULT_PYTHON_VERSION) +def mypy(session): + """Verify type hints are mypy compatible.""" + session.install("-e", ".") + session.install("mypy", "types-setuptools") + # TODO: also verify types on tests, all of google package + session.run("mypy", "-p", "google.cloud.bigtable", "--no-incremental") + + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index bacfdfbd16a7..4386284137df 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -149,10 +149,32 @@ def system_emulated(session): """nox.options.sessions = [ "unit", "system_emulated", - "system",""", + "system", + "mypy",""", ) +s.replace( + "noxfile.py", + """\ +@nox.session\(python=DEFAULT_PYTHON_VERSION\) +def lint_setup_py\(session\): +""", + '''\ +@nox.session(python=DEFAULT_PYTHON_VERSION) +def mypy(session): + """Verify type hints are mypy compatible.""" + session.install("-e", ".") + session.install("mypy", "types-setuptools") + # TODO: also verify types on tests, all of google package + session.run("mypy", "-p", "google.cloud.bigtable", "--no-incremental") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): +''', +) + # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index b80290e4fb7c..f6cd7a5ccc4e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -112,7 +112,8 @@ def _make_one(self, *args, **kwargs): @mock.patch("os.environ", {}) def test_constructor_defaults(self): - from google.cloud.bigtable.client import _CLIENT_INFO + from google.api_core import client_info + from google.cloud.bigtable import __version__ from google.cloud.bigtable.client import DATA_SCOPE credentials = _make_credentials() @@ -125,7 +126,8 @@ def test_constructor_defaults(self): self.assertIs(client._credentials, credentials.with_scopes.return_value) self.assertFalse(client._read_only) self.assertFalse(client._admin) - self.assertIs(client._client_info, _CLIENT_INFO) + self.assertIsInstance(client._client_info, client_info.ClientInfo) + self.assertEqual(client._client_info.client_library_version, __version__) self.assertIsNone(client._channel) self.assertIsNone(client._emulator_host) self.assertEqual(client.SCOPE, (DATA_SCOPE,)) @@ -399,7 +401,6 @@ def test_project_path_property(self): self.assertEqual(client.project_path, project_name) def test_table_data_client_not_initialized(self): - from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable_v2 import BigtableClient credentials = _make_credentials() @@ -407,7 +408,6 @@ def test_table_data_client_not_initialized(self): table_data_client = client.table_data_client self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._table_data_client, table_data_client) def test_table_data_client_not_initialized_w_client_info(self): @@ -466,7 +466,6 @@ def test_table_admin_client_not_initialized_no_admin_flag(self): client.table_admin_client() def test_table_admin_client_not_initialized_w_admin_flag(self): - from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient credentials = _make_credentials() @@ -476,7 +475,6 @@ def test_table_admin_client_not_initialized_w_admin_flag(self): table_admin_client = client.table_admin_client self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._table_admin_client, table_admin_client) def test_table_admin_client_not_initialized_w_client_info(self): @@ -537,7 +535,6 @@ def test_instance_admin_client_not_initialized_no_admin_flag(self): client.instance_admin_client() def test_instance_admin_client_not_initialized_w_admin_flag(self): - from google.cloud.bigtable.client import _CLIENT_INFO from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient credentials = _make_credentials() @@ -547,7 +544,6 @@ def test_instance_admin_client_not_initialized_w_admin_flag(self): instance_admin_client = client.instance_admin_client self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(client._client_info, _CLIENT_INFO) self.assertIs(client._instance_admin_client, instance_admin_client) def test_instance_admin_client_not_initialized_w_client_info(self): From 71bfd16acd829d3d12061dc84d08b2dbb0abf491 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 20 Oct 2021 14:13:11 -0400 Subject: [PATCH 532/892] tests: harden instance admin systests vs timeout / 503 (#453) * tests: retry harder on ServiceUnavailable errors Closes #450 * tests: bump all 30s timeouts to 60s for instance admin systests Closes #451. --- .../tests/system/_helpers.py | 2 +- .../tests/system/test_instance_admin.py | 20 +++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system/_helpers.py b/packages/google-cloud-bigtable/tests/system/_helpers.py index f6895a51f239..ab4b54b05b1b 100644 --- a/packages/google-cloud-bigtable/tests/system/_helpers.py +++ b/packages/google-cloud-bigtable/tests/system/_helpers.py @@ -33,7 +33,7 @@ def _retry_on_unavailable(exc): retry_grpc_unavailable = retry.RetryErrors( - core_exceptions.GrpcRendezvous, error_predicate=_retry_on_unavailable, + core_exceptions.GrpcRendezvous, error_predicate=_retry_on_unavailable, max_tries=9, ) diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index c3e419a112e9..c2cf21291e6e 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -79,7 +79,7 @@ def _modify_app_profile_helper( ) operation = app_profile.update(ignore_warnings=ignore_warnings) - operation.result(timeout=30) + operation.result(timeout=60) alt_profile = instance.app_profile(app_profile_id) alt_profile.reload() @@ -155,7 +155,7 @@ def test_instance_create_prod( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=30) # Ensure the operation completes. + operation.result(timeout=60) # Ensure the operation completes. assert instance.type_ is None # Create a new instance instance and make sure it is the same. @@ -186,7 +186,7 @@ def test_instance_create_development( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=30) # Ensure the operation completes. + operation.result(timeout=60) # Ensure the operation completes. # Create a new instance instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -496,7 +496,7 @@ def test_instance_update_display_name_and_labels( admin_instance_populated.labels = new_labels operation = admin_instance_populated.update() - operation.result(timeout=30) # ensure the operation completes. + operation.result(timeout=60) # ensure the operation completes. # Create a new instance instance and reload it. instance_alt = admin_client.instance(admin_instance_id, labels={}) @@ -513,7 +513,7 @@ def test_instance_update_display_name_and_labels( admin_instance_populated.display_name = old_display_name admin_instance_populated.labels = instance_labels operation = admin_instance_populated.update() - operation.result(timeout=30) # ensure the operation completes. + operation.result(timeout=60) # ensure the operation completes. def test_instance_update_w_type( @@ -536,12 +536,12 @@ def test_instance_update_w_type( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=30) # Ensure the operation completes. + operation.result(timeout=60) # Ensure the operation completes. instance.display_name = None instance.type_ = enums.Instance.Type.PRODUCTION operation = instance.update() - operation.result(timeout=30) # ensure the operation completes. + operation.result(timeout=60) # ensure the operation completes. # Create a new instance instance and reload it. instance_alt = admin_client.instance(alt_instance_id) @@ -573,7 +573,7 @@ def test_cluster_create( default_storage_type=(enums.StorageType.SSD), ) operation = cluster_2.create() - operation.result(timeout=30) # Ensure the operation completes. + operation.result(timeout=60) # Ensure the operation completes. # Create a new object instance, reload and make sure it is the same. alt_cluster = admin_instance_populated.cluster(alt_cluster_id) @@ -603,7 +603,7 @@ def test_cluster_update( admin_cluster.serve_nodes = new_serve_nodes operation = admin_cluster.update() - operation.result(timeout=30) # Ensure the operation completes. + operation.result(timeout=60) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -613,4 +613,4 @@ def test_cluster_update( # Put the cluster back the way it was for the other test cases. admin_cluster.serve_nodes = serve_nodes operation = admin_cluster.update() - operation.result(timeout=30) # Ensure the operation completes. + operation.result(timeout=60) # Ensure the operation completes. From 73a91a4105dd760a9a39e9ab9002bf9ce49c1166 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 20 Oct 2021 14:13:53 -0400 Subject: [PATCH 533/892] chore(samples): harden 'tableadmin' samples against 429/503 errors (#418) Closes #417. --- .../samples/tableadmin/requirements-test.txt | 1 + .../samples/tableadmin/tableadmin_test.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 95ea1e6a02b0..2ff95fe08b1d 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1 +1,2 @@ pytest==6.2.4 +google-cloud-testutils==1.0.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py index c0ef09d12fae..b001ce076a18 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py @@ -16,6 +16,9 @@ import os import uuid +from google.api_core import exceptions +from test_utils.retry import RetryErrors + from tableadmin import create_table from tableadmin import delete_table from tableadmin import run_table_operations @@ -24,11 +27,13 @@ BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] TABLE_ID_FORMAT = 'tableadmin-test-{}' +retry_429_503 = RetryErrors(exceptions.TooManyRequests, exceptions.ServiceUnavailable) + def test_run_table_operations(capsys): table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - run_table_operations(PROJECT, BIGTABLE_INSTANCE, table_id) + retry_429_503(run_table_operations)(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() assert 'Creating the ' + table_id + ' table.' in out @@ -48,14 +53,14 @@ def test_run_table_operations(capsys): assert 'Delete a column family cf2...' in out assert 'Column family cf2 deleted successfully.' in out - delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) + retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) def test_delete_table(capsys): table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - create_table(PROJECT, BIGTABLE_INSTANCE, table_id) + retry_429_503(create_table)(PROJECT, BIGTABLE_INSTANCE, table_id) - delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) + retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() assert 'Table ' + table_id + ' exists.' in out From 7ae27dd5a3e9de1fd9e5a81d5ea281bfcfae698c Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 20 Oct 2021 16:16:38 -0400 Subject: [PATCH 534/892] tests: harden instance admin samples against timeouts (#452) Closes #383. Closes #434. --- .../instanceadmin/test_instanceadmin.py | 51 ++++++++++++------- 1 file changed, 32 insertions(+), 19 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py index 929da10e44c4..b0041294bf1c 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/test_instanceadmin.py @@ -97,17 +97,23 @@ def test_run_instance_operations(capsys, dispose_of): def test_delete_instance(capsys, dispose_of): - dispose_of(INSTANCE) + from concurrent.futures import TimeoutError - # Can't delete it, it doesn't exist - instanceadmin.delete_instance(PROJECT, INSTANCE) - out = capsys.readouterr().out - assert "Deleting instance" in out - assert f"Instance {INSTANCE} does not exist" in out + @backoff.on_exception(backoff.expo, TimeoutError) + def _set_up_instance(): + dispose_of(INSTANCE) - # Ok, create it then - instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) - capsys.readouterr() # throw away output + # Can't delete it, it doesn't exist + instanceadmin.delete_instance(PROJECT, INSTANCE) + out = capsys.readouterr().out + assert "Deleting instance" in out + assert f"Instance {INSTANCE} does not exist" in out + + # Ok, create it then + instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) + capsys.readouterr() # throw away output + + _set_up_instance() # Now delete it instanceadmin.delete_instance(PROJECT, INSTANCE) @@ -117,22 +123,29 @@ def test_delete_instance(capsys, dispose_of): def test_add_and_delete_cluster(capsys, dispose_of): - dispose_of(INSTANCE) + from concurrent.futures import TimeoutError - # This won't work, because the instance isn't created yet - instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2) - out = capsys.readouterr().out - assert f"Instance {INSTANCE} does not exist" in out + @backoff.on_exception(backoff.expo, TimeoutError) + def _set_up_instance(): + dispose_of(INSTANCE) - # Get the instance created - instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) - capsys.readouterr() # throw away output + # This won't work, because the instance isn't created yet + instanceadmin.add_cluster(PROJECT, INSTANCE, CLUSTER2) + out = capsys.readouterr().out + assert f"Instance {INSTANCE} does not exist" in out + + # Get the instance created + instanceadmin.run_instance_operations(PROJECT, INSTANCE, CLUSTER1) + capsys.readouterr() # throw away output + + _set_up_instance() # Add a cluster to that instance # Avoid failing for "instance is currently being changed" by # applying an exponential backoff - w_backoff = backoff.on_exception(backoff.expo, exceptions.ServiceUnavailable) - w_backoff(instanceadmin.add_cluster)(PROJECT, INSTANCE, CLUSTER2) + backoff_503 = backoff.on_exception(backoff.expo, exceptions.ServiceUnavailable) + + backoff_503(instanceadmin.add_cluster)(PROJECT, INSTANCE, CLUSTER2) out = capsys.readouterr().out assert f"Adding cluster to instance {INSTANCE}" in out assert "Listing clusters..." in out From ccc7d98b9770216b3ac3f6e06ba0f75e0a1cdc83 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 21 Oct 2021 14:56:24 -0400 Subject: [PATCH 535/892] feat: add 'Instance.create_time' field (#449) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add create_time to Instance Committer: @gdcolella PiperOrigin-RevId: 404267819 Source-Link: https://github.com/googleapis/googleapis/commit/324f036d9dcc21318d89172ceaba5e0fd2377271 Source-Link: https://github.com/googleapis/googleapis-gen/commit/2fada43b275eaaadd279838baf1120bddcffc762 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMmZhZGE0M2IyNzVlYWFhZGQyNzk4MzhiYWYxMTIwYmRkY2ZmYzc2MiJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../services/bigtable_instance_admin/async_client.py | 1 + .../services/bigtable_instance_admin/client.py | 1 + .../google/cloud/bigtable_admin_v2/types/instance.py | 7 +++++++ .../scripts/fixup_bigtable_admin_v2_keywords.py | 2 +- .../bigtable_admin_v2/test_bigtable_instance_admin.py | 1 + 5 files changed, 11 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index b0290a66d2ef..570caa4f5e97 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -36,6 +36,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport from .client import BigtableInstanceAdminClient diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 5ec1c2ed6188..6c9e721ae619 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -40,6 +40,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableInstanceAdminGrpcTransport from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index f1ba750e122a..f8bef1865d49 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -16,6 +16,7 @@ import proto # type: ignore from google.cloud.bigtable_admin_v2.types import common +from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( @@ -57,6 +58,11 @@ class Instance(proto.Message): - No more than 64 labels can be associated with a given resource. - Keys and values must both be under 128 bytes. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. A server-assigned timestamp representing when + this Instance was created. For instances created before this + field was added (August 2021), this value is + ``seconds: 0, nanos: 1``. """ class State(proto.Enum): @@ -76,6 +82,7 @@ class Type(proto.Enum): state = proto.Field(proto.ENUM, number=3, enum=State,) type_ = proto.Field(proto.ENUM, number=4, enum=Type,) labels = proto.MapField(proto.STRING, proto.STRING, number=5,) + create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) class Cluster(proto.Message): diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index c8e998f88f00..ff285085e6de 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -76,7 +76,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index f7fb223ee063..b658c7361df5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -56,6 +56,7 @@ from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore from google.type import expr_pb2 # type: ignore import google.auth From 7f1b8ec6db8e29276967d8aad72fcdeb9f545280 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 25 Oct 2021 21:30:29 -0400 Subject: [PATCH 536/892] chore(python): Push cloud library docs to Cloud RAD (#461) Source-Link: https://github.com/googleapis/synthtool/commit/694118b039b09551fb5d445fceb361a7dbb06400 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:ec49167c606648a063d1222220b48119c912562849a0528f35bfb592a9f72737 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.kokoro/docs/common.cfg | 1 + packages/google-cloud-bigtable/noxfile.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 7d98291cc35f..cb89b2e326b7 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:58f73ba196b5414782605236dd0712a73541b44ff2ff4d3a36ec41092dd6fa5b + digest: sha256:ec49167c606648a063d1222220b48119c912562849a0528f35bfb592a9f72737 diff --git a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg index 08aac45ad5ad..9b8937c571bb 100644 --- a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg @@ -30,6 +30,7 @@ env_vars: { env_vars: { key: "V2_STAGING_BUCKET" + # Push google cloud library docs to the Cloud RAD bucket `docs-staging-v2` value: "docs-staging-v2" } diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 206e146f4430..11ec0e948aea 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -112,7 +112,7 @@ def default(session): "py.test", "--quiet", f"--junitxml=unit_{session.python}_sponge_log.xml", - "--cov=google/cloud", + "--cov=google", "--cov=tests/unit", "--cov-append", "--cov-config=.coveragerc", From 7a4682d1e11ac1222e04ff6ddc9cacfab0f9e72d Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Fri, 29 Oct 2021 14:21:06 -0400 Subject: [PATCH 537/892] ci: work around 'python-api-core#297' (#465) --- packages/google-cloud-bigtable/tests/unit/test_row_data.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index a95cf2ec40f7..b146abaa82fb 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -282,6 +282,9 @@ def code(self): def details(self): return "Testing" + def trailing_metadata(self): + return None + return TestingException(exception) def test_w_miss(self): From a705ae48b261466ae9fcd736f5788da545ed057c Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 1 Nov 2021 15:02:12 +0000 Subject: [PATCH 538/892] chore: use gapic-generator-python 0.53.4 (#467) - [ ] Regenerate this pull request now. docs: list oneofs in docstring fix(deps): require google-api-core >= 1.28.0 fix(deps): drop packaging dependency committer: busunkim96@ PiperOrigin-RevId: 406468269 Source-Link: https://github.com/googleapis/googleapis/commit/83d81b0c8fc22291a13398d6d77f02dc97a5b6f4 Source-Link: https://github.com/googleapis/googleapis-gen/commit/2ff001fbacb9e77e71d734de5f955c05fdae8526 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMmZmMDAxZmJhY2I5ZTc3ZTcxZDczNGRlNWY5NTVjMDVmZGFlODUyNiJ9 --- .../bigtable_instance_admin/async_client.py | 120 +++++++-------- .../bigtable_instance_admin/client.py | 40 ++--- .../transports/base.py | 37 +---- .../transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 3 +- .../bigtable_table_admin/async_client.py | 140 +++++++++--------- .../services/bigtable_table_admin/client.py | 46 +++--- .../bigtable_table_admin/transports/base.py | 37 +---- .../bigtable_table_admin/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 3 +- .../types/bigtable_table_admin.py | 27 ++++ .../cloud/bigtable_admin_v2/types/instance.py | 9 ++ .../cloud/bigtable_admin_v2/types/table.py | 14 ++ .../services/bigtable/async_client.py | 40 ++--- .../bigtable_v2/services/bigtable/client.py | 14 +- .../services/bigtable/transports/base.py | 35 +---- .../bigtable/transports/grpc_asyncio.py | 1 - .../cloud/bigtable_v2/types/bigtable.py | 9 ++ .../google/cloud/bigtable_v2/types/data.py | 79 ++++++++++ packages/google-cloud-bigtable/setup.py | 3 +- .../testing/constraints-3.6.txt | 4 +- .../test_bigtable_instance_admin.py | 115 ++------------ .../test_bigtable_table_admin.py | 112 ++------------ .../unit/gapic/bigtable_v2/test_bigtable.py | 109 ++------------ 24 files changed, 385 insertions(+), 616 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 570caa4f5e97..ed2a079c8a33 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -19,13 +19,15 @@ from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources -import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core.client_options import ClientOptions # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +OptionalRetry = Union[retries.Retry, object] + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers @@ -189,7 +191,7 @@ def __init__( async def create_instance( self, - request: bigtable_instance_admin.CreateInstanceRequest = None, + request: Union[bigtable_instance_admin.CreateInstanceRequest, dict] = None, *, parent: str = None, instance_id: str = None, @@ -197,14 +199,14 @@ async def create_instance( clusters: Sequence[ bigtable_instance_admin.CreateInstanceRequest.ClustersEntry ] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Create an instance within a project. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.CreateInstanceRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.CreateInstance. parent (:class:`str`): @@ -312,17 +314,17 @@ async def create_instance( async def get_instance( self, - request: bigtable_instance_admin.GetInstanceRequest = None, + request: Union[bigtable_instance_admin.GetInstanceRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Gets information about an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.GetInstanceRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.GetInstance. name (:class:`str`): @@ -397,17 +399,17 @@ async def get_instance( async def list_instances( self, - request: bigtable_instance_admin.ListInstancesRequest = None, + request: Union[bigtable_instance_admin.ListInstancesRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.ListInstancesRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): The request object. Request message for BigtableInstanceAdmin.ListInstances. parent (:class:`str`): @@ -479,9 +481,9 @@ async def list_instances( async def update_instance( self, - request: instance.Instance = None, + request: Union[instance.Instance, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: @@ -491,7 +493,7 @@ async def update_instance( PartialUpdateInstance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): The request object. A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and the resources that serve them. All tables in an instance are @@ -548,11 +550,13 @@ async def update_instance( async def partial_update_instance( self, - request: bigtable_instance_admin.PartialUpdateInstanceRequest = None, + request: Union[ + bigtable_instance_admin.PartialUpdateInstanceRequest, dict + ] = None, *, instance: gba_instance.Instance = None, update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: @@ -561,7 +565,7 @@ async def partial_update_instance( preferred way to update an Instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.PartialUpdateInstance. instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): @@ -657,17 +661,17 @@ async def partial_update_instance( async def delete_instance( self, - request: bigtable_instance_admin.DeleteInstanceRequest = None, + request: Union[bigtable_instance_admin.DeleteInstanceRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Delete an instance from a project. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): The request object. Request message for BigtableInstanceAdmin.DeleteInstance. name (:class:`str`): @@ -722,19 +726,19 @@ async def delete_instance( async def create_cluster( self, - request: bigtable_instance_admin.CreateClusterRequest = None, + request: Union[bigtable_instance_admin.CreateClusterRequest, dict] = None, *, parent: str = None, cluster_id: str = None, cluster: instance.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a cluster within an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.CreateClusterRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): The request object. Request message for BigtableInstanceAdmin.CreateCluster. parent (:class:`str`): @@ -828,17 +832,17 @@ async def create_cluster( async def get_cluster( self, - request: bigtable_instance_admin.GetClusterRequest = None, + request: Union[bigtable_instance_admin.GetClusterRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.GetClusterRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): The request object. Request message for BigtableInstanceAdmin.GetCluster. name (:class:`str`): @@ -912,17 +916,17 @@ async def get_cluster( async def list_clusters( self, - request: bigtable_instance_admin.ListClustersRequest = None, + request: Union[bigtable_instance_admin.ListClustersRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.ListClustersRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): The request object. Request message for BigtableInstanceAdmin.ListClusters. parent (:class:`str`): @@ -996,16 +1000,16 @@ async def list_clusters( async def update_cluster( self, - request: instance.Cluster = None, + request: Union[instance.Cluster, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates a cluster within an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): The request object. A resizable group of nodes in a particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent @@ -1069,17 +1073,17 @@ async def update_cluster( async def delete_cluster( self, - request: bigtable_instance_admin.DeleteClusterRequest = None, + request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a cluster from an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.DeleteClusterRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): The request object. Request message for BigtableInstanceAdmin.DeleteCluster. name (:class:`str`): @@ -1134,19 +1138,19 @@ async def delete_cluster( async def create_app_profile( self, - request: bigtable_instance_admin.CreateAppProfileRequest = None, + request: Union[bigtable_instance_admin.CreateAppProfileRequest, dict] = None, *, parent: str = None, app_profile_id: str = None, app_profile: instance.AppProfile = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.CreateAppProfile. parent (:class:`str`): @@ -1229,17 +1233,17 @@ async def create_app_profile( async def get_app_profile( self, - request: bigtable_instance_admin.GetAppProfileRequest = None, + request: Union[bigtable_instance_admin.GetAppProfileRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.GetAppProfileRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.GetAppProfile. name (:class:`str`): @@ -1312,17 +1316,17 @@ async def get_app_profile( async def list_app_profiles( self, - request: bigtable_instance_admin.ListAppProfilesRequest = None, + request: Union[bigtable_instance_admin.ListAppProfilesRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAppProfilesAsyncPager: r"""Lists information about app profiles in an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): The request object. Request message for BigtableInstanceAdmin.ListAppProfiles. parent (:class:`str`): @@ -1406,18 +1410,18 @@ async def list_app_profiles( async def update_app_profile( self, - request: bigtable_instance_admin.UpdateAppProfileRequest = None, + request: Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] = None, *, app_profile: instance.AppProfile = None, update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates an app profile within an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.UpdateAppProfile. app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): @@ -1510,17 +1514,17 @@ async def update_app_profile( async def delete_app_profile( self, - request: bigtable_instance_admin.DeleteAppProfileRequest = None, + request: Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an app profile from an instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): The request object. Request message for BigtableInstanceAdmin.DeleteAppProfile. name (:class:`str`): @@ -1575,10 +1579,10 @@ async def delete_app_profile( async def get_iam_policy( self, - request: iam_policy_pb2.GetIamPolicyRequest = None, + request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -1587,7 +1591,7 @@ async def get_iam_policy( but does not have a policy set. Args: - request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`): + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): The request object. Request message for `GetIamPolicy` method. resource (:class:`str`): @@ -1713,10 +1717,10 @@ async def get_iam_policy( async def set_iam_policy( self, - request: iam_policy_pb2.SetIamPolicyRequest = None, + request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -1724,7 +1728,7 @@ async def set_iam_policy( resource. Replaces any existing policy. Args: - request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`): + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): The request object. Request message for `SetIamPolicy` method. resource (:class:`str`): @@ -1840,11 +1844,11 @@ async def set_iam_policy( async def test_iam_permissions( self, - request: iam_policy_pb2.TestIamPermissionsRequest = None, + request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, *, resource: str = None, permissions: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: @@ -1852,7 +1856,7 @@ async def test_iam_permissions( specified instance resource. Args: - request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`): + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): The request object. Request message for `TestIamPermissions` method. resource (:class:`str`): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 6c9e721ae619..b1e168aad42c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -30,6 +30,8 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +OptionalRetry = Union[retries.Retry, object] + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers @@ -422,7 +424,7 @@ def create_instance( clusters: Sequence[ bigtable_instance_admin.CreateInstanceRequest.ClustersEntry ] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -539,7 +541,7 @@ def get_instance( request: Union[bigtable_instance_admin.GetInstanceRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: @@ -614,7 +616,7 @@ def list_instances( request: Union[bigtable_instance_admin.ListInstancesRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: @@ -685,7 +687,7 @@ def update_instance( self, request: Union[instance.Instance, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: @@ -749,7 +751,7 @@ def partial_update_instance( *, instance: gba_instance.Instance = None, update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -849,7 +851,7 @@ def delete_instance( request: Union[bigtable_instance_admin.DeleteInstanceRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -916,7 +918,7 @@ def create_cluster( parent: str = None, cluster_id: str = None, cluster: instance.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -1020,7 +1022,7 @@ def get_cluster( request: Union[bigtable_instance_admin.GetClusterRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Cluster: @@ -1094,7 +1096,7 @@ def list_clusters( request: Union[bigtable_instance_admin.ListClustersRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListClustersResponse: @@ -1167,7 +1169,7 @@ def update_cluster( self, request: Union[instance.Cluster, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -1232,7 +1234,7 @@ def delete_cluster( request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -1299,7 +1301,7 @@ def create_app_profile( parent: str = None, app_profile_id: str = None, app_profile: instance.AppProfile = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: @@ -1392,7 +1394,7 @@ def get_app_profile( request: Union[bigtable_instance_admin.GetAppProfileRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: @@ -1465,7 +1467,7 @@ def list_app_profiles( request: Union[bigtable_instance_admin.ListAppProfilesRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAppProfilesPager: @@ -1550,7 +1552,7 @@ def update_app_profile( *, app_profile: instance.AppProfile = None, update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -1643,7 +1645,7 @@ def delete_app_profile( request: Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -1708,7 +1710,7 @@ def get_iam_policy( request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -1835,7 +1837,7 @@ def set_iam_policy( request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -1962,7 +1964,7 @@ def test_iam_permissions( *, resource: str = None, permissions: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 10dac01a2b4a..c33e9a49c89b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -15,7 +15,6 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version import pkg_resources import google.auth # type: ignore @@ -43,15 +42,6 @@ except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - class BigtableInstanceAdminTransport(abc.ABC): """Abstract transport class for BigtableInstanceAdmin.""" @@ -109,7 +99,7 @@ def __init__( host += ":443" self._host = host - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. self._scopes = scopes @@ -142,29 +132,6 @@ def __init__( # Save the credentials. self._credentials = credentials - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs( - cls, host: str, scopes: Optional[Sequence[str]] - ) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -381,7 +348,7 @@ def close(self): raise NotImplementedError() @property - def operations_client(self) -> operations_v1.OperationsClient: + def operations_client(self): """Return the client designed to process long-running operations.""" raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 40c722c7ebff..0ffcb7e3ba66 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -119,7 +119,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} - self._operations_client = None + self._operations_client: Optional[operations_v1.OperationsClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 70a0e87950e9..a94088e10ff1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -21,7 +21,6 @@ from google.api_core import operations_v1 # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore @@ -166,7 +165,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} - self._operations_client = None + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 5a5a3f039235..d51852a50ef5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -19,13 +19,15 @@ from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources -import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core.client_options import ClientOptions # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +OptionalRetry = Union[retries.Retry, object] + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers @@ -191,12 +193,12 @@ def __init__( async def create_table( self, - request: bigtable_table_admin.CreateTableRequest = None, + request: Union[bigtable_table_admin.CreateTableRequest, dict] = None, *, parent: str = None, table_id: str = None, table: gba_table.Table = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gba_table.Table: @@ -205,7 +207,7 @@ async def create_table( column families, specified in the request. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] parent (:class:`str`): @@ -287,12 +289,14 @@ async def create_table( async def create_table_from_snapshot( self, - request: bigtable_table_admin.CreateTableFromSnapshotRequest = None, + request: Union[ + bigtable_table_admin.CreateTableFromSnapshotRequest, dict + ] = None, *, parent: str = None, table_id: str = None, source_snapshot: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: @@ -307,7 +311,7 @@ async def create_table_from_snapshot( SLA or deprecation policy. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -407,17 +411,17 @@ async def create_table_from_snapshot( async def list_tables( self, - request: bigtable_table_admin.ListTablesRequest = None, + request: Union[bigtable_table_admin.ListTablesRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTablesAsyncPager: r"""Lists all tables served from a specified instance. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.ListTablesRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] parent (:class:`str`): @@ -498,17 +502,17 @@ async def list_tables( async def get_table( self, - request: bigtable_table_admin.GetTableRequest = None, + request: Union[bigtable_table_admin.GetTableRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.GetTableRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] name (:class:`str`): @@ -582,10 +586,10 @@ async def get_table( async def delete_table( self, - request: bigtable_table_admin.DeleteTableRequest = None, + request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -593,7 +597,7 @@ async def delete_table( data. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.DeleteTableRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] name (:class:`str`): @@ -648,13 +652,13 @@ async def delete_table( async def modify_column_families( self, - request: bigtable_table_admin.ModifyColumnFamiliesRequest = None, + request: Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] = None, *, name: str = None, modifications: Sequence[ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification ] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: @@ -665,7 +669,7 @@ async def modify_column_families( table where only some modifications have taken effect. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] name (:class:`str`): @@ -743,9 +747,9 @@ async def modify_column_families( async def drop_row_range( self, - request: bigtable_table_admin.DropRowRangeRequest = None, + request: Union[bigtable_table_admin.DropRowRangeRequest, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -755,7 +759,7 @@ async def drop_row_range( prefix. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.DropRowRangeRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -788,10 +792,12 @@ async def drop_row_range( async def generate_consistency_token( self, - request: bigtable_table_admin.GenerateConsistencyTokenRequest = None, + request: Union[ + bigtable_table_admin.GenerateConsistencyTokenRequest, dict + ] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: @@ -802,7 +808,7 @@ async def generate_consistency_token( days. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] name (:class:`str`): @@ -874,11 +880,11 @@ async def generate_consistency_token( async def check_consistency( self, - request: bigtable_table_admin.CheckConsistencyRequest = None, + request: Union[bigtable_table_admin.CheckConsistencyRequest, dict] = None, *, name: str = None, consistency_token: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: @@ -888,7 +894,7 @@ async def check_consistency( request. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] name (:class:`str`): @@ -969,13 +975,13 @@ async def check_consistency( async def snapshot_table( self, - request: bigtable_table_admin.SnapshotTableRequest = None, + request: Union[bigtable_table_admin.SnapshotTableRequest, dict] = None, *, name: str = None, cluster: str = None, snapshot_id: str = None, description: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: @@ -990,7 +996,7 @@ async def snapshot_table( SLA or deprecation policy. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.SnapshotTableRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] Note: This is a private alpha release of Cloud Bigtable @@ -1105,10 +1111,10 @@ async def snapshot_table( async def get_snapshot( self, - request: bigtable_table_admin.GetSnapshotRequest = None, + request: Union[bigtable_table_admin.GetSnapshotRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Snapshot: @@ -1122,7 +1128,7 @@ async def get_snapshot( SLA or deprecation policy. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.GetSnapshotRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -1210,10 +1216,10 @@ async def get_snapshot( async def list_snapshots( self, - request: bigtable_table_admin.ListSnapshotsRequest = None, + request: Union[bigtable_table_admin.ListSnapshotsRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListSnapshotsAsyncPager: @@ -1227,7 +1233,7 @@ async def list_snapshots( SLA or deprecation policy. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] Note: This is a private alpha release of Cloud Bigtable @@ -1324,10 +1330,10 @@ async def list_snapshots( async def delete_snapshot( self, - request: bigtable_table_admin.DeleteSnapshotRequest = None, + request: Union[bigtable_table_admin.DeleteSnapshotRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -1340,7 +1346,7 @@ async def delete_snapshot( SLA or deprecation policy. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -1401,12 +1407,12 @@ async def delete_snapshot( async def create_backup( self, - request: bigtable_table_admin.CreateBackupRequest = None, + request: Union[bigtable_table_admin.CreateBackupRequest, dict] = None, *, parent: str = None, backup_id: str = None, backup: table.Backup = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: @@ -1421,7 +1427,7 @@ async def create_backup( delete the backup. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.CreateBackupRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): The request object. The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. parent (:class:`str`): @@ -1516,10 +1522,10 @@ async def create_backup( async def get_backup( self, - request: bigtable_table_admin.GetBackupRequest = None, + request: Union[bigtable_table_admin.GetBackupRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: @@ -1527,7 +1533,7 @@ async def get_backup( Bigtable Backup. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.GetBackupRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): The request object. The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. name (:class:`str`): @@ -1596,18 +1602,18 @@ async def get_backup( async def update_backup( self, - request: bigtable_table_admin.UpdateBackupRequest = None, + request: Union[bigtable_table_admin.UpdateBackupRequest, dict] = None, *, backup: table.Backup = None, update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.UpdateBackupRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): The request object. The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): @@ -1686,17 +1692,17 @@ async def update_backup( async def delete_backup( self, - request: bigtable_table_admin.DeleteBackupRequest = None, + request: Union[bigtable_table_admin.DeleteBackupRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.DeleteBackupRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): The request object. The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. name (:class:`str`): @@ -1751,10 +1757,10 @@ async def delete_backup( async def list_backups( self, - request: bigtable_table_admin.ListBackupsRequest = None, + request: Union[bigtable_table_admin.ListBackupsRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBackupsAsyncPager: @@ -1762,7 +1768,7 @@ async def list_backups( and pending backups. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.ListBackupsRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): The request object. The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. parent (:class:`str`): @@ -1846,9 +1852,9 @@ async def list_backups( async def restore_table( self, - request: bigtable_table_admin.RestoreTableRequest = None, + request: Union[bigtable_table_admin.RestoreTableRequest, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: @@ -1863,7 +1869,7 @@ async def restore_table( [Table][google.bigtable.admin.v2.Table], if successful. Args: - request (:class:`google.cloud.bigtable_admin_v2.types.RestoreTableRequest`): + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -1914,10 +1920,10 @@ async def restore_table( async def get_iam_policy( self, - request: iam_policy_pb2.GetIamPolicyRequest = None, + request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -1926,7 +1932,7 @@ async def get_iam_policy( but does not have a policy set. Args: - request (:class:`google.iam.v1.iam_policy_pb2.GetIamPolicyRequest`): + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): The request object. Request message for `GetIamPolicy` method. resource (:class:`str`): @@ -2052,10 +2058,10 @@ async def get_iam_policy( async def set_iam_policy( self, - request: iam_policy_pb2.SetIamPolicyRequest = None, + request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -2063,7 +2069,7 @@ async def set_iam_policy( resource. Replaces any existing policy. Args: - request (:class:`google.iam.v1.iam_policy_pb2.SetIamPolicyRequest`): + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): The request object. Request message for `SetIamPolicy` method. resource (:class:`str`): @@ -2179,11 +2185,11 @@ async def set_iam_policy( async def test_iam_permissions( self, - request: iam_policy_pb2.TestIamPermissionsRequest = None, + request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, *, resource: str = None, permissions: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: @@ -2191,7 +2197,7 @@ async def test_iam_permissions( specified Table or Backup resource. Args: - request (:class:`google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest`): + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): The request object. Request message for `TestIamPermissions` method. resource (:class:`str`): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index ece1e880fdf3..3beafca3e1db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -30,6 +30,8 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +OptionalRetry = Union[retries.Retry, object] + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers @@ -456,7 +458,7 @@ def create_table( parent: str = None, table_id: str = None, table: gba_table.Table = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gba_table.Table: @@ -554,7 +556,7 @@ def create_table_from_snapshot( parent: str = None, table_id: str = None, source_snapshot: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -674,7 +676,7 @@ def list_tables( request: Union[bigtable_table_admin.ListTablesRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTablesPager: @@ -755,7 +757,7 @@ def get_table( request: Union[bigtable_table_admin.GetTableRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: @@ -829,7 +831,7 @@ def delete_table( request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -898,7 +900,7 @@ def modify_column_families( modifications: Sequence[ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification ] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: @@ -989,7 +991,7 @@ def drop_row_range( self, request: Union[bigtable_table_admin.DropRowRangeRequest, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -1038,7 +1040,7 @@ def generate_consistency_token( ] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: @@ -1119,7 +1121,7 @@ def check_consistency( *, name: str = None, consistency_token: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: @@ -1206,7 +1208,7 @@ def snapshot_table( cluster: str = None, snapshot_id: str = None, description: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -1339,7 +1341,7 @@ def get_snapshot( request: Union[bigtable_table_admin.GetSnapshotRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Snapshot: @@ -1434,7 +1436,7 @@ def list_snapshots( request: Union[bigtable_table_admin.ListSnapshotsRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListSnapshotsPager: @@ -1538,7 +1540,7 @@ def delete_snapshot( request: Union[bigtable_table_admin.DeleteSnapshotRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -1617,7 +1619,7 @@ def create_backup( parent: str = None, backup_id: str = None, backup: table.Backup = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -1730,7 +1732,7 @@ def get_backup( request: Union[bigtable_table_admin.GetBackupRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: @@ -1801,7 +1803,7 @@ def update_backup( *, backup: table.Backup = None, update_mask: field_mask_pb2.FieldMask = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: @@ -1890,7 +1892,7 @@ def delete_backup( request: Union[bigtable_table_admin.DeleteBackupRequest, dict] = None, *, name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: @@ -1955,7 +1957,7 @@ def list_backups( request: Union[bigtable_table_admin.ListBackupsRequest, dict] = None, *, parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBackupsPager: @@ -2039,7 +2041,7 @@ def restore_table( self, request: Union[bigtable_table_admin.RestoreTableRequest, dict] = None, *, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: @@ -2109,7 +2111,7 @@ def get_iam_policy( request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -2236,7 +2238,7 @@ def set_iam_policy( request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, *, resource: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: @@ -2363,7 +2365,7 @@ def test_iam_permissions( *, resource: str = None, permissions: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 5a4201bbecc0..903c596b6b96 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -15,7 +15,6 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version import pkg_resources import google.auth # type: ignore @@ -44,15 +43,6 @@ except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - class BigtableTableAdminTransport(abc.ABC): """Abstract transport class for BigtableTableAdmin.""" @@ -109,7 +99,7 @@ def __init__( host += ":443" self._host = host - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. self._scopes = scopes @@ -142,29 +132,6 @@ def __init__( # Save the credentials. self._credentials = credentials - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs( - cls, host: str, scopes: Optional[Sequence[str]] - ) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -370,7 +337,7 @@ def close(self): raise NotImplementedError() @property - def operations_client(self) -> operations_v1.OperationsClient: + def operations_client(self): """Return the client designed to process long-running operations.""" raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index eaf333baf798..7bf703af8e7e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -121,7 +121,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} - self._operations_client = None + self._operations_client: Optional[operations_v1.OperationsClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 438571f88a88..c995f1673b05 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -21,7 +21,6 @@ from google.api_core import operations_v1 # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore @@ -168,7 +167,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} - self._operations_client = None + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 2a89d117427d..dbafe2d9f77b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -62,6 +62,9 @@ class RestoreTableRequest(proto.Message): r"""The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: parent (str): Required. The name of the instance in which to create the @@ -77,6 +80,7 @@ class RestoreTableRequest(proto.Message): Name of the backup from which to restore. Values are of the form ``projects//instances//clusters//backups/``. + This field is a member of `oneof`_ ``source``. """ parent = proto.Field(proto.STRING, number=1,) @@ -88,6 +92,9 @@ class RestoreTableMetadata(proto.Message): r"""Metadata type for the long-running operation returned by [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): Name of the table being created and restored @@ -96,6 +103,7 @@ class RestoreTableMetadata(proto.Message): The type of the restore source. backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + This field is a member of `oneof`_ ``source_info``. optimize_table_operation_name (str): If exists, the name of the long-running operation that will be used to track the post-restore optimization process to @@ -232,6 +240,13 @@ class DropRowRangeRequest(proto.Message): r"""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): Required. The unique name of the table on which to drop a @@ -240,9 +255,11 @@ class DropRowRangeRequest(proto.Message): row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be zero length. + This field is a member of `oneof`_ ``target``. delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. + This field is a member of `oneof`_ ``target``. """ name = proto.Field(proto.STRING, number=1,) @@ -359,6 +376,13 @@ class ModifyColumnFamiliesRequest(proto.Message): class Modification(proto.Message): r"""A create, update, or delete of a particular column family. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: id (str): The ID of the column family to be modified. @@ -366,13 +390,16 @@ class Modification(proto.Message): Create a new column family with the specified schema, or fail if one already exists with the given ID. + This field is a member of `oneof`_ ``mod``. update (google.cloud.bigtable_admin_v2.types.ColumnFamily): Update an existing column family to the specified schema, or fail if no column family exists with the given ID. + This field is a member of `oneof`_ ``mod``. drop (bool): Drop (delete) the column family with the given ID, or fail if no such family exists. + This field is a member of `oneof`_ ``mod``. """ id = proto.Field(proto.STRING, number=1,) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index f8bef1865d49..b278d9dd02ff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -157,6 +157,13 @@ class AppProfile(proto.Message): r"""A configuration object describing how Cloud Bigtable should treat traffic from a particular end user application. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): (``OutputOnly``) The unique name of the app profile. Values @@ -178,8 +185,10 @@ class AppProfile(proto.Message): case for this AppProfile. multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): Use a multi-cluster routing policy. + This field is a member of `oneof`_ ``routing_policy``. single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting): Use a single-cluster routing policy. + This field is a member of `oneof`_ ``routing_policy``. """ class MultiClusterRoutingUseAny(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index e90d587386b9..bc3d603cda60 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -45,12 +45,15 @@ class RestoreSourceType(proto.Enum): class RestoreInfo(proto.Message): r"""Information about a table restore. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): The type of the restore source. backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): Information about the backup used to restore the table. The backup may no longer exist. + This field is a member of `oneof`_ ``source_info``. """ source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",) @@ -176,21 +179,32 @@ class GcRule(proto.Message): r"""Rule for determining which cells to delete during garbage collection. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: max_num_versions (int): Delete all cells in a column except the most recent N. + This field is a member of `oneof`_ ``rule``. max_age (google.protobuf.duration_pb2.Duration): Delete cells in a column older than the given age. Values must be at least one millisecond, and will be truncated to microsecond granularity. + This field is a member of `oneof`_ ``rule``. intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection): Delete cells that would be deleted by every nested rule. + This field is a member of `oneof`_ ``rule``. union (google.cloud.bigtable_admin_v2.types.GcRule.Union): Delete cells that would be deleted by any nested rule. + This field is a member of `oneof`_ ``rule``. """ class Intersection(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 03e99eda26ae..948bf0da8b51 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -19,13 +19,15 @@ from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union import pkg_resources -import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core.client_options import ClientOptions # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +OptionalRetry = Union[retries.Retry, object] + from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO @@ -157,11 +159,11 @@ def __init__( def read_rows( self, - request: bigtable.ReadRowsRequest = None, + request: Union[bigtable.ReadRowsRequest, dict] = None, *, table_name: str = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: @@ -173,7 +175,7 @@ def read_rows( ReadRowsResponse documentation for details. Args: - request (:class:`google.cloud.bigtable_v2.types.ReadRowsRequest`): + request (Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]): The request object. Request message for Bigtable.ReadRows. table_name (:class:`str`): @@ -255,11 +257,11 @@ def read_rows( def sample_row_keys( self, - request: bigtable.SampleRowKeysRequest = None, + request: Union[bigtable.SampleRowKeysRequest, dict] = None, *, table_name: str = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: @@ -270,7 +272,7 @@ def sample_row_keys( mapreduces. Args: - request (:class:`google.cloud.bigtable_v2.types.SampleRowKeysRequest`): + request (Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]): The request object. Request message for Bigtable.SampleRowKeys. table_name (:class:`str`): @@ -352,13 +354,13 @@ def sample_row_keys( async def mutate_row( self, - request: bigtable.MutateRowRequest = None, + request: Union[bigtable.MutateRowRequest, dict] = None, *, table_name: str = None, row_key: bytes = None, mutations: Sequence[data.Mutation] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.MutateRowResponse: @@ -366,7 +368,7 @@ async def mutate_row( left unchanged unless explicitly changed by ``mutation``. Args: - request (:class:`google.cloud.bigtable_v2.types.MutateRowRequest`): + request (Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]): The request object. Request message for Bigtable.MutateRow. table_name (:class:`str`): @@ -473,12 +475,12 @@ async def mutate_row( def mutate_rows( self, - request: bigtable.MutateRowsRequest = None, + request: Union[bigtable.MutateRowsRequest, dict] = None, *, table_name: str = None, entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: @@ -487,7 +489,7 @@ def mutate_rows( batch is not executed atomically. Args: - request (:class:`google.cloud.bigtable_v2.types.MutateRowsRequest`): + request (Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]): The request object. Request message for BigtableService.MutateRows. table_name (:class:`str`): @@ -585,7 +587,7 @@ def mutate_rows( async def check_and_mutate_row( self, - request: bigtable.CheckAndMutateRowRequest = None, + request: Union[bigtable.CheckAndMutateRowRequest, dict] = None, *, table_name: str = None, row_key: bytes = None, @@ -593,7 +595,7 @@ async def check_and_mutate_row( true_mutations: Sequence[data.Mutation] = None, false_mutations: Sequence[data.Mutation] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.CheckAndMutateRowResponse: @@ -601,7 +603,7 @@ async def check_and_mutate_row( predicate Reader filter. Args: - request (:class:`google.cloud.bigtable_v2.types.CheckAndMutateRowRequest`): + request (Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]): The request object. Request message for Bigtable.CheckAndMutateRow. table_name (:class:`str`): @@ -741,13 +743,13 @@ async def check_and_mutate_row( async def read_modify_write_row( self, - request: bigtable.ReadModifyWriteRowRequest = None, + request: Union[bigtable.ReadModifyWriteRowRequest, dict] = None, *, table_name: str = None, row_key: bytes = None, rules: Sequence[data.ReadModifyWriteRule] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.ReadModifyWriteRowResponse: @@ -760,7 +762,7 @@ async def read_modify_write_row( contents of all modified cells. Args: - request (:class:`google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest`): + request (Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]): The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (:class:`str`): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index beeed24d12cf..05466167c749 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -30,6 +30,8 @@ from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +OptionalRetry = Union[retries.Retry, object] + from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO @@ -353,7 +355,7 @@ def read_rows( *, table_name: str = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.ReadRowsResponse]: @@ -444,7 +446,7 @@ def sample_row_keys( *, table_name: str = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.SampleRowKeysResponse]: @@ -536,7 +538,7 @@ def mutate_row( row_key: bytes = None, mutations: Sequence[data.Mutation] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.MutateRowResponse: @@ -646,7 +648,7 @@ def mutate_rows( table_name: str = None, entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.MutateRowsResponse]: @@ -754,7 +756,7 @@ def check_and_mutate_row( true_mutations: Sequence[data.Mutation] = None, false_mutations: Sequence[data.Mutation] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.CheckAndMutateRowResponse: @@ -901,7 +903,7 @@ def read_modify_write_row( row_key: bytes = None, rules: Sequence[data.ReadModifyWriteRule] = None, app_profile_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, + retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.ReadModifyWriteRowResponse: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index d89c01d228e7..b1dc65d809ae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -15,7 +15,6 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version import pkg_resources import google.auth # type: ignore @@ -35,15 +34,6 @@ except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - class BigtableTransport(abc.ABC): """Abstract transport class for Bigtable.""" @@ -100,7 +90,7 @@ def __init__( host += ":443" self._host = host - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. self._scopes = scopes @@ -133,29 +123,6 @@ def __init__( # Save the credentials. self._credentials = credentials - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs( - cls, host: str, scopes: Optional[Sequence[str]] - ) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index c0560526e2b0..fcaac9190b54 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -20,7 +20,6 @@ from google.api_core import grpc_helpers_async # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version import grpc # type: ignore from grpc.experimental import aio # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 2c18a5155d28..3aa0eceafbf9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -95,6 +95,13 @@ class CellChunk(proto.Message): r"""Specifies a piece of a row's contents returned as part of the read response stream. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: row_key (bytes): The row key for this chunk of data. If the @@ -146,9 +153,11 @@ class CellChunk(proto.Message): reset_row (bool): Indicates that the client should drop all previous chunks for ``row_key``, as it will be re-read from the beginning. + This field is a member of `oneof`_ ``row_status``. commit_row (bool): Indicates that the client can safely process all previous chunks for ``row_key``, as its data has been fully read. + This field is a member of `oneof`_ ``row_status``. """ row_key = proto.Field(proto.BYTES, number=1,) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 2b97ac0f7a83..bdb037a64bb4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -130,19 +130,30 @@ class Cell(proto.Message): class RowRange(proto.Message): r"""Specifies a contiguous range of rows. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: start_key_closed (bytes): Used when giving an inclusive lower bound for the range. + This field is a member of `oneof`_ ``start_key``. start_key_open (bytes): Used when giving an exclusive lower bound for the range. + This field is a member of `oneof`_ ``start_key``. end_key_open (bytes): Used when giving an exclusive upper bound for the range. + This field is a member of `oneof`_ ``end_key``. end_key_closed (bytes): Used when giving an inclusive upper bound for the range. + This field is a member of `oneof`_ ``end_key``. """ start_key_closed = proto.Field(proto.BYTES, number=1, oneof="start_key",) @@ -171,6 +182,13 @@ class ColumnRange(proto.Message): :, where both bounds can be either inclusive or exclusive. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: family_name (str): The name of the column family within which @@ -178,15 +196,19 @@ class ColumnRange(proto.Message): start_qualifier_closed (bytes): Used when giving an inclusive lower bound for the range. + This field is a member of `oneof`_ ``start_qualifier``. start_qualifier_open (bytes): Used when giving an exclusive lower bound for the range. + This field is a member of `oneof`_ ``start_qualifier``. end_qualifier_closed (bytes): Used when giving an inclusive upper bound for the range. + This field is a member of `oneof`_ ``end_qualifier``. end_qualifier_open (bytes): Used when giving an exclusive upper bound for the range. + This field is a member of `oneof`_ ``end_qualifier``. """ family_name = proto.Field(proto.STRING, number=1,) @@ -217,19 +239,30 @@ class TimestampRange(proto.Message): class ValueRange(proto.Message): r"""Specifies a contiguous range of raw byte values. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: start_value_closed (bytes): Used when giving an inclusive lower bound for the range. + This field is a member of `oneof`_ ``start_value``. start_value_open (bytes): Used when giving an exclusive lower bound for the range. + This field is a member of `oneof`_ ``start_value``. end_value_closed (bytes): Used when giving an inclusive upper bound for the range. + This field is a member of `oneof`_ ``end_value``. end_value_open (bytes): Used when giving an exclusive upper bound for the range. + This field is a member of `oneof`_ ``end_value``. """ start_value_closed = proto.Field(proto.BYTES, number=1, oneof="start_value",) @@ -277,17 +310,27 @@ class RowFilter(proto.Message): 4096 bytes, and RowFilters may not be nested within each other (in Chains or Interleaves) to a depth of more than 20. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: chain (google.cloud.bigtable_v2.types.RowFilter.Chain): Applies several RowFilters to the data in sequence, progressively narrowing the results. + This field is a member of `oneof`_ ``filter``. interleave (google.cloud.bigtable_v2.types.RowFilter.Interleave): Applies several RowFilters to the data in parallel and combines the results. + This field is a member of `oneof`_ ``filter``. condition (google.cloud.bigtable_v2.types.RowFilter.Condition): Applies one of two possible RowFilters to the data based on the output of a predicate RowFilter. + This field is a member of `oneof`_ ``filter``. sink (bool): ADVANCED USE ONLY. Hook for introspection into the RowFilter. Outputs all cells directly to the output of the @@ -354,14 +397,17 @@ class RowFilter(proto.Message): Cannot be used within the ``predicate_filter``, ``true_filter``, or ``false_filter`` of a [Condition][google.bigtable.v2.RowFilter.Condition]. + This field is a member of `oneof`_ ``filter``. pass_all_filter (bool): Matches all cells, regardless of input. Functionally equivalent to leaving ``filter`` unset, but included for completeness. + This field is a member of `oneof`_ ``filter``. block_all_filter (bool): Does not match any cells, regardless of input. Useful for temporarily disabling just part of a filter. + This field is a member of `oneof`_ ``filter``. row_key_regex_filter (bytes): Matches only cells from rows whose keys satisfy the given RE2 regex. In other words, passes through the entire row @@ -370,10 +416,12 @@ class RowFilter(proto.Message): ``\C`` escape sequence must be used if a true wildcard is desired. The ``.`` character will not match the new line character ``\n``, which may be present in a binary key. + This field is a member of `oneof`_ ``filter``. row_sample_filter (float): Matches all cells from a row with probability p, and matches no cells from the row with probability 1-p. + This field is a member of `oneof`_ ``filter``. family_name_regex_filter (str): Matches only cells from columns whose families satisfy the given RE2 regex. For technical reasons, the regex must not @@ -381,6 +429,7 @@ class RowFilter(proto.Message): a literal. Note that, since column families cannot contain the new line character ``\n``, it is sufficient to use ``.`` as a full wildcard when matching column family names. + This field is a member of `oneof`_ ``filter``. column_qualifier_regex_filter (bytes): Matches only cells from columns whose qualifiers satisfy the given RE2 regex. Note that, since column qualifiers can @@ -388,12 +437,15 @@ class RowFilter(proto.Message): used if a true wildcard is desired. The ``.`` character will not match the new line character ``\n``, which may be present in a binary qualifier. + This field is a member of `oneof`_ ``filter``. column_range_filter (google.cloud.bigtable_v2.types.ColumnRange): Matches only cells from columns within the given range. + This field is a member of `oneof`_ ``filter``. timestamp_range_filter (google.cloud.bigtable_v2.types.TimestampRange): Matches only cells with timestamps within the given range. + This field is a member of `oneof`_ ``filter``. value_regex_filter (bytes): Matches only cells with values that satisfy the given regular expression. Note that, since cell values can contain @@ -401,20 +453,24 @@ class RowFilter(proto.Message): a true wildcard is desired. The ``.`` character will not match the new line character ``\n``, which may be present in a binary value. + This field is a member of `oneof`_ ``filter``. value_range_filter (google.cloud.bigtable_v2.types.ValueRange): Matches only cells with values that fall within the given range. + This field is a member of `oneof`_ ``filter``. cells_per_row_offset_filter (int): Skips the first N cells of each row, matching all subsequent cells. If duplicate cells are present, as is possible when using an Interleave, each copy of the cell is counted separately. + This field is a member of `oneof`_ ``filter``. cells_per_row_limit_filter (int): Matches only the first N cells of each row. If duplicate cells are present, as is possible when using an Interleave, each copy of the cell is counted separately. + This field is a member of `oneof`_ ``filter``. cells_per_column_limit_filter (int): Matches only the most recent N cells within each column. For example, if N=2, this filter would match column ``foo:bar`` @@ -423,9 +479,11 @@ class RowFilter(proto.Message): ``foo:bar2``. If duplicate cells are present, as is possible when using an Interleave, each copy of the cell is counted separately. + This field is a member of `oneof`_ ``filter``. strip_value_transformer (bool): Replaces each cell's value with the empty string. + This field is a member of `oneof`_ ``filter``. apply_label_transformer (str): Applies the given label to all cells in the output row. This allows the client to determine which results were produced @@ -441,6 +499,7 @@ class RowFilter(proto.Message): contain multiple ``apply_label_transformers``, as they will be applied to separate copies of the input. This may be relaxed in the future. + This field is a member of `oneof`_ ``filter``. """ class Chain(proto.Message): @@ -558,15 +617,26 @@ class Mutation(proto.Message): r"""Specifies a particular change to be made to the contents of a row. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): Set a cell's value. + This field is a member of `oneof`_ ``mutation``. delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): Deletes cells from a column. + This field is a member of `oneof`_ ``mutation``. delete_from_family (google.cloud.bigtable_v2.types.Mutation.DeleteFromFamily): Deletes cells from a column family. + This field is a member of `oneof`_ ``mutation``. delete_from_row (google.cloud.bigtable_v2.types.Mutation.DeleteFromRow): Deletes cells from the entire row. + This field is a member of `oneof`_ ``mutation``. """ class SetCell(proto.Message): @@ -651,6 +721,13 @@ class ReadModifyWriteRule(proto.Message): r"""Specifies an atomic read/modify/write operation on the latest value of the specified column. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: family_name (str): The name of the family to which the read/modify/write should @@ -664,12 +741,14 @@ class ReadModifyWriteRule(proto.Message): Rule specifying that ``append_value`` be appended to the existing value. If the targeted cell is unset, it will be treated as containing the empty string. + This field is a member of `oneof`_ ``rule``. increment_amount (int): Rule specifying that ``increment_amount`` be added to the existing value. If the targeted cell is unset, it will be treated as containing a zero. Otherwise, the targeted cell must contain an 8-byte value (interpreted as a 64-bit big-endian signed integer), or the entire request will fail. + This field is a member of `oneof`_ ``rule``. """ family_name = proto.Field(proto.STRING, number=1,) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 76775e91f59c..73a2b28199c5 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -32,7 +32,7 @@ # NOTE: Maintainers, please do not require google-api-core>=2.x.x # Until this issue is closed # https://github.com/googleapis/google-cloud-python/issues/10566 - "google-api-core[grpc] >= 1.26.0, <3.0.0dev", + "google-api-core[grpc] >= 1.28.0, <3.0.0dev", # NOTE: Maintainers, please do not require google-api-core>=2.x.x # Until this issue is closed # https://github.com/googleapis/google-cloud-python/issues/10566 @@ -40,7 +40,6 @@ "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", "proto-plus >= 1.13.0", "libcst >= 0.2.5", - "packaging >= 14.3", ] extras = {} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.6.txt b/packages/google-cloud-bigtable/testing/constraints-3.6.txt index 25d8d3eef538..1e50717bfda5 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.6.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.6.txt @@ -5,10 +5,8 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==1.26.0 +google-api-core==1.28.0 google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.3 proto-plus==1.13.0 libcst==0.2.5 -packaging==14.3 -google-auth==1.24.0 # TODO: remove when google-auth >= 1.25.0 is required transitively through google-api-core diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index b658c7361df5..944919359f2a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -15,7 +15,6 @@ # import os import mock -import packaging.version import grpc from grpc.experimental import aio @@ -43,9 +42,6 @@ ) from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.base import ( - _GOOGLE_AUTH_VERSION, -) from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import common from google.cloud.bigtable_admin_v2.types import instance @@ -61,20 +57,6 @@ import google.auth -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - - def client_cert_source_callback(): return b"cert bytes", b"key bytes" @@ -236,7 +218,7 @@ def test_bigtable_instance_admin_client_client_options( options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -253,7 +235,7 @@ def test_bigtable_instance_admin_client_client_options( with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -270,7 +252,7 @@ def test_bigtable_instance_admin_client_client_options( with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -299,7 +281,7 @@ def test_bigtable_instance_admin_client_client_options( options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -368,7 +350,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) if use_client_cert_env == "false": expected_client_cert_source = None @@ -410,7 +392,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( expected_client_cert_source = client_cert_source_callback patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -432,7 +414,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( return_value=False, ): patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -467,7 +449,7 @@ def test_bigtable_instance_admin_client_client_options_scopes( options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -502,7 +484,7 @@ def test_bigtable_instance_admin_client_client_options_credentials_file( options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", @@ -4947,7 +4929,6 @@ def test_bigtable_instance_admin_base_transport(): transport.operations_client -@requires_google_auth_gte_1_25_0 def test_bigtable_instance_admin_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( @@ -4976,34 +4957,6 @@ def test_bigtable_instance_admin_base_transport_with_credentials_file(): ) -@requires_google_auth_lt_1_25_0 -def test_bigtable_instance_admin_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableInstanceAdminTransport( - credentials_file="credentials.json", quota_project_id="octopus", - ) - load_creds.assert_called_once_with( - "credentials.json", - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - def test_bigtable_instance_admin_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( @@ -5015,7 +4968,6 @@ def test_bigtable_instance_admin_base_transport_with_adc(): adc.assert_called_once() -@requires_google_auth_gte_1_25_0 def test_bigtable_instance_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: @@ -5036,26 +4988,6 @@ def test_bigtable_instance_admin_auth_adc(): ) -@requires_google_auth_lt_1_25_0 -def test_bigtable_instance_admin_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BigtableInstanceAdminClient() - adc.assert_called_once_with( - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id=None, - ) - - @pytest.mark.parametrize( "transport_class", [ @@ -5063,7 +4995,6 @@ def test_bigtable_instance_admin_auth_adc_old_google_auth(): transports.BigtableInstanceAdminGrpcAsyncIOTransport, ], ) -@requires_google_auth_gte_1_25_0 def test_bigtable_instance_admin_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. @@ -5085,34 +5016,6 @@ def test_bigtable_instance_admin_transport_auth_adc(transport_class): ) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_bigtable_instance_admin_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with( - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.cluster", - "https://www.googleapis.com/auth/bigtable.admin.instance", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - @pytest.mark.parametrize( "transport_class,grpc_helpers", [ diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 95739fc94ab8..c4622b25305b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -15,7 +15,6 @@ # import os import mock -import packaging.version import grpc from grpc.experimental import aio @@ -43,9 +42,6 @@ ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( - _GOOGLE_AUTH_VERSION, -) from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table @@ -63,20 +59,6 @@ import google.auth -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - - def client_cert_source_callback(): return b"cert bytes", b"key bytes" @@ -234,7 +216,7 @@ def test_bigtable_table_admin_client_client_options( options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -251,7 +233,7 @@ def test_bigtable_table_admin_client_client_options( with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -268,7 +250,7 @@ def test_bigtable_table_admin_client_client_options( with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -297,7 +279,7 @@ def test_bigtable_table_admin_client_client_options( options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -366,7 +348,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) if use_client_cert_env == "false": expected_client_cert_source = None @@ -408,7 +390,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( expected_client_cert_source = client_cert_source_callback patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -430,7 +412,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( return_value=False, ): patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -461,7 +443,7 @@ def test_bigtable_table_admin_client_client_options_scopes( options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -492,7 +474,7 @@ def test_bigtable_table_admin_client_client_options_credentials_file( options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", @@ -5794,7 +5776,6 @@ def test_bigtable_table_admin_base_transport(): transport.operations_client -@requires_google_auth_gte_1_25_0 def test_bigtable_table_admin_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( @@ -5822,33 +5803,6 @@ def test_bigtable_table_admin_base_transport_with_credentials_file(): ) -@requires_google_auth_lt_1_25_0 -def test_bigtable_table_admin_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableTableAdminTransport( - credentials_file="credentials.json", quota_project_id="octopus", - ) - load_creds.assert_called_once_with( - "credentials.json", - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - def test_bigtable_table_admin_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( @@ -5860,7 +5814,6 @@ def test_bigtable_table_admin_base_transport_with_adc(): adc.assert_called_once() -@requires_google_auth_gte_1_25_0 def test_bigtable_table_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: @@ -5880,25 +5833,6 @@ def test_bigtable_table_admin_auth_adc(): ) -@requires_google_auth_lt_1_25_0 -def test_bigtable_table_admin_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BigtableTableAdminClient() - adc.assert_called_once_with( - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id=None, - ) - - @pytest.mark.parametrize( "transport_class", [ @@ -5906,7 +5840,6 @@ def test_bigtable_table_admin_auth_adc_old_google_auth(): transports.BigtableTableAdminGrpcAsyncIOTransport, ], ) -@requires_google_auth_gte_1_25_0 def test_bigtable_table_admin_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. @@ -5927,33 +5860,6 @@ def test_bigtable_table_admin_transport_auth_adc(transport_class): ) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_bigtable_table_admin_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with( - scopes=( - "https://www.googleapis.com/auth/bigtable.admin", - "https://www.googleapis.com/auth/bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-bigtable.admin", - "https://www.googleapis.com/auth/cloud-bigtable.admin.table", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - @pytest.mark.parametrize( "transport_class,grpc_helpers", [ diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 95fd03bb32af..580d4ec4e189 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -15,7 +15,6 @@ # import os import mock -import packaging.version import grpc from grpc.experimental import aio @@ -35,29 +34,12 @@ from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient from google.cloud.bigtable_v2.services.bigtable import BigtableClient from google.cloud.bigtable_v2.services.bigtable import transports -from google.cloud.bigtable_v2.services.bigtable.transports.base import ( - _GOOGLE_AUTH_VERSION, -) from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.oauth2 import service_account import google.auth -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - - def client_cert_source_callback(): return b"cert bytes", b"key bytes" @@ -197,7 +179,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -214,7 +196,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -231,7 +213,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -260,7 +242,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -317,7 +299,7 @@ def test_bigtable_client_mtls_env_auto( ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) if use_client_cert_env == "false": expected_client_cert_source = None @@ -359,7 +341,7 @@ def test_bigtable_client_mtls_env_auto( expected_client_cert_source = client_cert_source_callback patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -381,7 +363,7 @@ def test_bigtable_client_mtls_env_auto( return_value=False, ): patched.return_value = None - client = client_class() + client = client_class(transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -408,7 +390,7 @@ def test_bigtable_client_client_options_scopes( options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -435,7 +417,7 @@ def test_bigtable_client_client_options_credentials_file( options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(client_options=options) + client = client_class(transport=transport_name, client_options=options) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", @@ -2027,7 +2009,6 @@ def test_bigtable_base_transport(): transport.close() -@requires_google_auth_gte_1_25_0 def test_bigtable_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( @@ -2055,33 +2036,6 @@ def test_bigtable_base_transport_with_credentials_file(): ) -@requires_google_auth_lt_1_25_0 -def test_bigtable_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch( - "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableTransport( - credentials_file="credentials.json", quota_project_id="octopus", - ) - load_creds.assert_called_once_with( - "credentials.json", - scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - def test_bigtable_base_transport_with_adc(): # Test the default credentials are used if credentials and credentials_file are None. with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( @@ -2093,7 +2047,6 @@ def test_bigtable_base_transport_with_adc(): adc.assert_called_once() -@requires_google_auth_gte_1_25_0 def test_bigtable_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: @@ -2113,30 +2066,10 @@ def test_bigtable_auth_adc(): ) -@requires_google_auth_lt_1_25_0 -def test_bigtable_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BigtableClient() - adc.assert_called_once_with( - scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id=None, - ) - - @pytest.mark.parametrize( "transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], ) -@requires_google_auth_gte_1_25_0 def test_bigtable_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. @@ -2157,30 +2090,6 @@ def test_bigtable_transport_auth_adc(transport_class): ) -@pytest.mark.parametrize( - "transport_class", - [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], -) -@requires_google_auth_lt_1_25_0 -def test_bigtable_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with( - scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", - ) - - @pytest.mark.parametrize( "transport_class,grpc_helpers", [ From 8234ad8f72d7479e84e81ab0bce898a2737dfd11 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 3 Nov 2021 09:41:39 -0400 Subject: [PATCH 539/892] tests: refactor unittests using pytest idioms (#456) --- .../tests/unit/test_app_profile.py | 1236 +++--- .../tests/unit/test_backup.py | 1702 ++++---- .../tests/unit/test_batcher.py | 189 +- .../tests/unit/test_client.py | 1309 +++--- .../tests/unit/test_cluster.py | 1044 +++-- .../tests/unit/test_column_family.py | 1184 +++--- .../tests/unit/test_encryption_info.py | 220 +- .../tests/unit/test_error.py | 170 +- .../tests/unit/test_instance.py | 1905 ++++----- .../tests/unit/test_policy.py | 522 +-- .../tests/unit/test_row.py | 1309 +++--- .../tests/unit/test_row_data.py | 2147 +++++----- .../tests/unit/test_row_filters.py | 1910 +++++---- .../tests/unit/test_row_set.py | 526 +-- .../tests/unit/test_table.py | 3717 ++++++++--------- 15 files changed, 9465 insertions(+), 9625 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py index 6422e87e9419..07c686fb884e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -12,650 +12,624 @@ # See the License for the specific language governing permissions and # limitations under the License. - -import unittest - import mock +import pytest from ._testing import _make_credentials +PROJECT = "project" +INSTANCE_ID = "instance-id" +APP_PROFILE_ID = "app-profile-id" +APP_PROFILE_NAME = "projects/{}/instances/{}/appProfiles/{}".format( + PROJECT, INSTANCE_ID, APP_PROFILE_ID +) +CLUSTER_ID = "cluster-id" +OP_ID = 8765 +OP_NAME = "operations/projects/{}/instances/{}/appProfiles/{}/operations/{}".format( + PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID +) + + +def _make_app_profile(*args, **kwargs): + from google.cloud.bigtable.app_profile import AppProfile + + return AppProfile(*args, **kwargs) + + +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client + + return Client(*args, **kwargs) + + +def test_app_profile_constructor_defaults(): + from google.cloud.bigtable.app_profile import AppProfile + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + app_profile = _make_app_profile(APP_PROFILE_ID, instance) + assert isinstance(app_profile, AppProfile) + assert app_profile._instance == instance + assert app_profile.routing_policy_type is None + assert app_profile.description is None + assert app_profile.cluster_id is None + assert app_profile.allow_transactional_writes is None + + +def test_app_profile_constructor_explicit(): + from google.cloud.bigtable.enums import RoutingPolicyType + + ANY = RoutingPolicyType.ANY + DESCRIPTION_1 = "routing policy any" + APP_PROFILE_ID_2 = "app-profile-id-2" + SINGLE = RoutingPolicyType.SINGLE + DESCRIPTION_2 = "routing policy single" + ALLOW_WRITES = True + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + app_profile1 = _make_app_profile( + APP_PROFILE_ID, instance, routing_policy_type=ANY, description=DESCRIPTION_1, + ) + app_profile2 = _make_app_profile( + APP_PROFILE_ID_2, + instance, + routing_policy_type=SINGLE, + description=DESCRIPTION_2, + cluster_id=CLUSTER_ID, + allow_transactional_writes=ALLOW_WRITES, + ) + assert app_profile1.app_profile_id == APP_PROFILE_ID + assert app_profile1._instance is instance + assert app_profile1.routing_policy_type == ANY + assert app_profile1.description == DESCRIPTION_1 + assert app_profile2.app_profile_id == APP_PROFILE_ID_2 + assert app_profile2._instance is instance + assert app_profile2.routing_policy_type == SINGLE + assert app_profile2.description == DESCRIPTION_2 + assert app_profile2.cluster_id == CLUSTER_ID + assert app_profile2.allow_transactional_writes == ALLOW_WRITES + + +def test_app_profile_name(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _Instance(INSTANCE_ID, client) + + app_profile = _make_app_profile(APP_PROFILE_ID, instance) + assert app_profile.name == APP_PROFILE_NAME + + +def test_app_profile___eq__(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + app_profile1 = _make_app_profile(APP_PROFILE_ID, instance) + app_profile2 = _make_app_profile(APP_PROFILE_ID, instance) + assert app_profile1 == app_profile2 + + +def test_app_profile___eq___w_type_instance_differ(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + alt_instance = _Instance("other-instance", client) + other_object = _Other(APP_PROFILE_ID, instance) + app_profile1 = _make_app_profile(APP_PROFILE_ID, instance) + app_profile2 = _make_app_profile(APP_PROFILE_ID, alt_instance) + assert not (app_profile1 == other_object) + assert not (app_profile1 == app_profile2) + + +def test_app_profile___ne___w_same_value(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + app_profile1 = _make_app_profile(APP_PROFILE_ID, instance) + app_profile2 = _make_app_profile(APP_PROFILE_ID, instance) + assert not (app_profile1 != app_profile2) + + +def test_app_profile___ne__(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + app_profile1 = _make_app_profile("app_profile_id1", instance) + app_profile2 = _make_app_profile("app_profile_id2", instance) + assert app_profile1 != app_profile2 + + +def test_app_profile_from_pb_success_w_routing_any(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + desctiption = "routing any" + routing = RoutingPolicyType.ANY + multi_cluster_routing_use_any = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() + + app_profile_pb = data_v2_pb2.AppProfile( + name=APP_PROFILE_NAME, + description=desctiption, + multi_cluster_routing_use_any=multi_cluster_routing_use_any, + ) + + app_profile = AppProfile.from_pb(app_profile_pb, instance) + assert isinstance(app_profile, AppProfile) + assert app_profile._instance is instance + assert app_profile.app_profile_id == APP_PROFILE_ID + assert app_profile.description == desctiption + assert app_profile.routing_policy_type == routing + assert app_profile.cluster_id is None + assert app_profile.allow_transactional_writes is False + + +def test_app_profile_from_pb_success_w_routing_single(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + desctiption = "routing single" + allow_transactional_writes = True + routing = RoutingPolicyType.SINGLE + single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( + cluster_id=CLUSTER_ID, allow_transactional_writes=allow_transactional_writes, + ) + + app_profile_pb = data_v2_pb2.AppProfile( + name=APP_PROFILE_NAME, + description=desctiption, + single_cluster_routing=single_cluster_routing, + ) + + app_profile = AppProfile.from_pb(app_profile_pb, instance) + assert isinstance(app_profile, AppProfile) + assert app_profile._instance is instance + assert app_profile.app_profile_id == APP_PROFILE_ID + assert app_profile.description == desctiption + assert app_profile.routing_policy_type == routing + assert app_profile.cluster_id == CLUSTER_ID + assert app_profile.allow_transactional_writes == allow_transactional_writes + + +def test_app_profile_from_pb_w_bad_app_profile_name(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.app_profile import AppProfile + + bad_app_profile_name = "BAD_NAME" + + app_profile_pb = data_v2_pb2.AppProfile(name=bad_app_profile_name) + + with pytest.raises(ValueError): + AppProfile.from_pb(app_profile_pb, None) + + +def test_app_profile_from_pb_w_instance_id_mistmatch(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.app_profile import AppProfile + + ALT_INSTANCE_ID = "ALT_INSTANCE_ID" + client = _Client(PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) + assert instance.instance_id == ALT_INSTANCE_ID + + app_profile_pb = data_v2_pb2.AppProfile(name=APP_PROFILE_NAME) + + with pytest.raises(ValueError): + AppProfile.from_pb(app_profile_pb, instance) + + +def test_app_profile_from_pb_w_project_mistmatch(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.app_profile import AppProfile + + ALT_PROJECT = "ALT_PROJECT" + client = _Client(project=ALT_PROJECT) + instance = _Instance(INSTANCE_ID, client) + assert client.project == ALT_PROJECT + + app_profile_pb = data_v2_pb2.AppProfile(name=APP_PROFILE_NAME) + + with pytest.raises(ValueError): + AppProfile.from_pb(app_profile_pb, instance) + + +def test_app_profile_reload_w_routing_any(): + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.enums import RoutingPolicyType + + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _Instance(INSTANCE_ID, client) + + routing = RoutingPolicyType.ANY + description = "routing policy any" + + app_profile = _make_app_profile( + APP_PROFILE_ID, instance, routing_policy_type=routing, description=description, + ) + + # Create response_pb + description_from_server = "routing policy switched to single" + cluster_id_from_server = CLUSTER_ID + allow_transactional_writes = True + single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( + cluster_id=cluster_id_from_server, + allow_transactional_writes=allow_transactional_writes, + ) + + response_pb = data_v2_pb2.AppProfile( + name=app_profile.name, + single_cluster_routing=single_cluster_routing, + description=description_from_server, + ) + + # Patch the stub used by the API method. + client._instance_admin_client = api + instance_stub = client._instance_admin_client + instance_stub.get_app_profile.side_effect = [response_pb] + + # Create expected_result. + expected_result = None # reload() has no return value. -class TestAppProfile(unittest.TestCase): - - PROJECT = "project" - INSTANCE_ID = "instance-id" - APP_PROFILE_ID = "app-profile-id" - APP_PROFILE_NAME = "projects/{}/instances/{}/appProfiles/{}".format( - PROJECT, INSTANCE_ID, APP_PROFILE_ID - ) - CLUSTER_ID = "cluster-id" - OP_ID = 8765 - OP_NAME = "operations/projects/{}/instances/{}/appProfiles/{}/operations/{}".format( - PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID - ) - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.app_profile import AppProfile - - return AppProfile - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_defaults(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - self.assertIsInstance(app_profile, self._get_target_class()) - self.assertEqual(app_profile._instance, instance) - self.assertIsNone(app_profile.routing_policy_type) - self.assertIsNone(app_profile.description) - self.assertIsNone(app_profile.cluster_id) - self.assertIsNone(app_profile.allow_transactional_writes) - - def test_constructor_non_defaults(self): - from google.cloud.bigtable.enums import RoutingPolicyType - - ANY = RoutingPolicyType.ANY - DESCRIPTION_1 = "routing policy any" - APP_PROFILE_ID_2 = "app-profile-id-2" - SINGLE = RoutingPolicyType.SINGLE - DESCRIPTION_2 = "routing policy single" - ALLOW_WRITES = True - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - app_profile1 = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=ANY, - description=DESCRIPTION_1, - ) - app_profile2 = self._make_one( - APP_PROFILE_ID_2, - instance, - routing_policy_type=SINGLE, - description=DESCRIPTION_2, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=ALLOW_WRITES, - ) - self.assertEqual(app_profile1.app_profile_id, self.APP_PROFILE_ID) - self.assertIs(app_profile1._instance, instance) - self.assertEqual(app_profile1.routing_policy_type, ANY) - self.assertEqual(app_profile1.description, DESCRIPTION_1) - self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2) - self.assertIs(app_profile2._instance, instance) - self.assertEqual(app_profile2.routing_policy_type, SINGLE) - self.assertEqual(app_profile2.description, DESCRIPTION_2) - self.assertEqual(app_profile2.cluster_id, self.CLUSTER_ID) - self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) - - def test_name_property(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - self.assertEqual(app_profile.name, self.APP_PROFILE_NAME) - - def test___eq__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) - app_profile2 = self._make_one(self.APP_PROFILE_ID, instance) - self.assertTrue(app_profile1 == app_profile2) - - def test___eq__type_instance_differ(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - alt_instance = _Instance("other-instance", client) - other_object = _Other(self.APP_PROFILE_ID, instance) - app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) - app_profile2 = self._make_one(self.APP_PROFILE_ID, alt_instance) - self.assertFalse(app_profile1 == other_object) - self.assertFalse(app_profile1 == app_profile2) - - def test___ne__same_value(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one(self.APP_PROFILE_ID, instance) - app_profile2 = self._make_one(self.APP_PROFILE_ID, instance) - self.assertFalse(app_profile1 != app_profile2) - - def test___ne__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - app_profile1 = self._make_one("app_profile_id1", instance) - app_profile2 = self._make_one("app_profile_id2", instance) - self.assertTrue(app_profile1 != app_profile2) - - def test_from_pb_success_routing_any(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.enums import RoutingPolicyType - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - desctiption = "routing any" - routing = RoutingPolicyType.ANY - multi_cluster_routing_use_any = ( - data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() - ) - - app_profile_pb = data_v2_pb2.AppProfile( - name=self.APP_PROFILE_NAME, - description=desctiption, - multi_cluster_routing_use_any=multi_cluster_routing_use_any, - ) - - klass = self._get_target_class() - app_profile = klass.from_pb(app_profile_pb, instance) - self.assertIsInstance(app_profile, klass) - self.assertIs(app_profile._instance, instance) - self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID) - self.assertEqual(app_profile.description, desctiption) - self.assertEqual(app_profile.routing_policy_type, routing) - self.assertIsNone(app_profile.cluster_id) - self.assertEqual(app_profile.allow_transactional_writes, False) - - def test_from_pb_success_routing_single(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.enums import RoutingPolicyType - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - desctiption = "routing single" - allow_transactional_writes = True - routing = RoutingPolicyType.SINGLE - single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_transactional_writes, - ) - - app_profile_pb = data_v2_pb2.AppProfile( - name=self.APP_PROFILE_NAME, - description=desctiption, - single_cluster_routing=single_cluster_routing, - ) - - klass = self._get_target_class() - app_profile = klass.from_pb(app_profile_pb, instance) - self.assertIsInstance(app_profile, klass) - self.assertIs(app_profile._instance, instance) - self.assertEqual(app_profile.app_profile_id, self.APP_PROFILE_ID) - self.assertEqual(app_profile.description, desctiption) - self.assertEqual(app_profile.routing_policy_type, routing) - self.assertEqual(app_profile.cluster_id, self.CLUSTER_ID) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def test_from_pb_bad_app_profile_name(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - bad_app_profile_name = "BAD_NAME" - - app_profile_pb = data_v2_pb2.AppProfile(name=bad_app_profile_name) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(app_profile_pb, None) - - def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - ALT_INSTANCE_ID = "ALT_INSTANCE_ID" - client = _Client(self.PROJECT) - instance = _Instance(ALT_INSTANCE_ID, client) - self.assertEqual(instance.instance_id, ALT_INSTANCE_ID) - - app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(app_profile_pb, instance) - - def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - ALT_PROJECT = "ALT_PROJECT" - client = _Client(project=ALT_PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - self.assertEqual(client.project, ALT_PROJECT) - - app_profile_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(app_profile_pb, instance) - - def test_reload_routing_any(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.enums import RoutingPolicyType - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - - routing = RoutingPolicyType.ANY - description = "routing policy any" - - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - ) - - # Create response_pb - description_from_server = "routing policy switched to single" - cluster_id_from_server = self.CLUSTER_ID - allow_transactional_writes = True - single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( - cluster_id=cluster_id_from_server, - allow_transactional_writes=allow_transactional_writes, - ) - - response_pb = data_v2_pb2.AppProfile( - name=app_profile.name, - single_cluster_routing=single_cluster_routing, - description=description_from_server, - ) - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_stub = client._instance_admin_client - instance_stub.get_app_profile.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check app_profile config values before. - self.assertEqual(app_profile.routing_policy_type, routing) - self.assertEqual(app_profile.description, description) - self.assertIsNone(app_profile.cluster_id) - self.assertIsNone(app_profile.allow_transactional_writes) - - # Perform the method and check the result. - result = app_profile.reload() - self.assertEqual(result, expected_result) - self.assertEqual(app_profile.routing_policy_type, RoutingPolicyType.SINGLE) - self.assertEqual(app_profile.description, description_from_server) - self.assertEqual(app_profile.cluster_id, cluster_id_from_server) - self.assertEqual( - app_profile.allow_transactional_writes, allow_transactional_writes - ) - - def test_exists(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.api_core import exceptions - - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - # Create response_pb - response_pb = data_v2_pb2.AppProfile(name=self.APP_PROFILE_NAME) - client._instance_admin_client = instance_api - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client - instance_stub.get_app_profile.side_effect = [ - response_pb, - exceptions.NotFound("testing"), - exceptions.BadRequest("testing"), - ] - - # Perform the method and check the result. - non_existing_app_profile_id = "other-app-profile-id" - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - alt_app_profile = self._make_one(non_existing_app_profile_id, instance) - self.assertTrue(app_profile.exists()) - self.assertFalse(alt_app_profile.exists()) - with self.assertRaises(exceptions.BadRequest): - alt_app_profile.exists() - - def test_create_routing_any(self): - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.ANY - description = "routing policy any" - ignore_warnings = True - - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - ) - - expected_request_app_profile = app_profile._to_pb() - name = instance.name - expected_request = { - "request": { - "parent": name, - "app_profile_id": self.APP_PROFILE_ID, - "app_profile": expected_request_app_profile, - "ignore_warnings": ignore_warnings, - } + # Check app_profile config values before. + assert app_profile.routing_policy_type == routing + assert app_profile.description == description + assert app_profile.cluster_id is None + assert app_profile.allow_transactional_writes is None + + # Perform the method and check the result. + result = app_profile.reload() + assert result == expected_result + assert app_profile.routing_policy_type == RoutingPolicyType.SINGLE + assert app_profile.description == description_from_server + assert app_profile.cluster_id == cluster_id_from_server + assert app_profile.allow_transactional_writes == allow_transactional_writes + + +def test_app_profile_exists(): + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.api_core import exceptions + + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + + # Create response_pb + response_pb = data_v2_pb2.AppProfile(name=APP_PROFILE_NAME) + client._instance_admin_client = instance_api + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_stub = client._instance_admin_client + instance_stub.get_app_profile.side_effect = [ + response_pb, + exceptions.NotFound("testing"), + exceptions.BadRequest("testing"), + ] + + # Perform the method and check the result. + non_existing_app_profile_id = "other-app-profile-id" + app_profile = _make_app_profile(APP_PROFILE_ID, instance) + alt_app_profile = _make_app_profile(non_existing_app_profile_id, instance) + assert app_profile.exists() + assert not alt_app_profile.exists() + with pytest.raises(exceptions.BadRequest): + alt_app_profile.exists() + + +def test_app_profile_create_w_routing_any(): + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + + routing = RoutingPolicyType.ANY + description = "routing policy any" + ignore_warnings = True + + app_profile = _make_app_profile( + APP_PROFILE_ID, instance, routing_policy_type=routing, description=description, + ) + + expected_request_app_profile = app_profile._to_pb() + name = instance.name + expected_request = { + "request": { + "parent": name, + "app_profile_id": APP_PROFILE_ID, + "app_profile": expected_request_app_profile, + "ignore_warnings": ignore_warnings, } + } - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.app_profile_path.return_value = ( - "projects/project/instances/instance-id/appProfiles/app-profile-id" - ) - instance_api.instance_path.return_value = name - instance_api.create_app_profile.return_value = expected_request_app_profile - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - app_profile._instance._client._instance_admin_client = instance_api - # Perform the method and check the result. - result = app_profile.create(ignore_warnings) - - actual_request = client._instance_admin_client.create_app_profile.call_args_list[ - 0 - ].kwargs - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, self._get_target_class()) - self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID) - self.assertIs(result._instance, instance) - self.assertEqual(result.routing_policy_type, routing) - self.assertEqual(result.description, description) - self.assertEqual(result.allow_transactional_writes, False) - self.assertIsNone(result.cluster_id) - - def test_create_routing_single(self): - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.SINGLE - description = "routing policy single" - allow_writes = False - ignore_warnings = True - - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_writes, - ) - expected_request_app_profile = app_profile._to_pb() - instance_name = instance.name - expected_request = { - "request": { - "parent": instance_name, - "app_profile_id": self.APP_PROFILE_ID, - "app_profile": expected_request_app_profile, - "ignore_warnings": ignore_warnings, - } + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + instance_api.instance_path.return_value = name + instance_api.create_app_profile.return_value = expected_request_app_profile + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + app_profile._instance._client._instance_admin_client = instance_api + # Perform the method and check the result. + result = app_profile.create(ignore_warnings) + + actual_request = client._instance_admin_client.create_app_profile.call_args_list[ + 0 + ].kwargs + + assert actual_request == expected_request + assert isinstance(result, AppProfile) + assert result.app_profile_id == APP_PROFILE_ID + assert result._instance is instance + assert result.routing_policy_type == routing + assert result.description == description + assert result.allow_transactional_writes is False + assert result.cluster_id is None + + +def test_app_profile_create_w_routing_single(): + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + + routing = RoutingPolicyType.SINGLE + description = "routing policy single" + allow_writes = False + ignore_warnings = True + + app_profile = _make_app_profile( + APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, + cluster_id=CLUSTER_ID, + allow_transactional_writes=allow_writes, + ) + expected_request_app_profile = app_profile._to_pb() + instance_name = instance.name + expected_request = { + "request": { + "parent": instance_name, + "app_profile_id": APP_PROFILE_ID, + "app_profile": expected_request_app_profile, + "ignore_warnings": ignore_warnings, } + } + + # Patch the stub used by the API method. + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + instance_api.instance_path.return_value = instance_name + instance_api.create_app_profile.return_value = expected_request_app_profile + client._instance_admin_client = instance_api + # Perform the method and check the result. + result = app_profile.create(ignore_warnings) + + actual_request = client._instance_admin_client.create_app_profile.call_args_list[ + 0 + ].kwargs + + assert actual_request == expected_request + assert isinstance(result, AppProfile) + assert result.app_profile_id == APP_PROFILE_ID + assert result._instance is instance + assert result.routing_policy_type == routing + assert result.description == description + assert result.allow_transactional_writes == allow_writes + assert result.cluster_id == CLUSTER_ID + + +def test_app_profile_create_w_wrong_routing_policy(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = _make_app_profile(APP_PROFILE_ID, instance, routing_policy_type=None) + with pytest.raises(ValueError): + app_profile.create() + + +def test_app_profile_update_w_routing_any(): + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable.enums import RoutingPolicyType + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.protobuf import field_mask_pb2 + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + + routing = RoutingPolicyType.SINGLE + description = "to routing policy single" + allow_writes = True + app_profile = _make_app_profile( + APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, + cluster_id=CLUSTER_ID, + allow_transactional_writes=allow_writes, + ) + + # Create response_pb + metadata = messages_v2_pb2.UpdateAppProfileMetadata() + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + # Patch the stub used by the API method. + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + # Mock api calls + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + + client._instance_admin_client = instance_api + + # Perform the method and check the result. + ignore_warnings = True + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=["description", "single_cluster_routing"] + ) - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.app_profile_path.return_value = ( - "projects/project/instances/instance-id/appProfiles/app-profile-id" - ) - instance_api.instance_path.return_value = instance_name - instance_api.create_app_profile.return_value = expected_request_app_profile - client._instance_admin_client = instance_api - # Perform the method and check the result. - result = app_profile.create(ignore_warnings) - - actual_request = client._instance_admin_client.create_app_profile.call_args_list[ - 0 - ].kwargs - - self.assertEqual(actual_request, expected_request) - self.assertIsInstance(result, self._get_target_class()) - self.assertEqual(result.app_profile_id, self.APP_PROFILE_ID) - self.assertIs(result._instance, instance) - self.assertEqual(result.routing_policy_type, routing) - self.assertEqual(result.description, description) - self.assertEqual(result.allow_transactional_writes, allow_writes) - self.assertEqual(result.cluster_id, self.CLUSTER_ID) - - def test_create_app_profile_with_wrong_routing_policy(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one( - self.APP_PROFILE_ID, instance, routing_policy_type=None - ) - with self.assertRaises(ValueError): - app_profile.create() - - def test_update_app_profile_routing_any(self): - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.protobuf import field_mask_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.SINGLE - description = "to routing policy single" - allow_writes = True - app_profile = self._make_one( - self.APP_PROFILE_ID, - instance, - routing_policy_type=routing, - description=description, - cluster_id=self.CLUSTER_ID, - allow_transactional_writes=allow_writes, - ) - - # Create response_pb - metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), - ) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - # Mock api calls - instance_api.app_profile_path.return_value = ( - "projects/project/instances/instance-id/appProfiles/app-profile-id" - ) - - client._instance_admin_client = instance_api - - # Perform the method and check the result. - ignore_warnings = True - expected_request_update_mask = field_mask_pb2.FieldMask( - paths=["description", "single_cluster_routing"] - ) - - expected_request = { - "request": { - "app_profile": app_profile._to_pb(), - "update_mask": expected_request_update_mask, - "ignore_warnings": ignore_warnings, - } + expected_request = { + "request": { + "app_profile": app_profile._to_pb(), + "update_mask": expected_request_update_mask, + "ignore_warnings": ignore_warnings, } + } + + instance_api.update_app_profile.return_value = response_pb + app_profile._instance._client._instance_admin_client = instance_api + result = app_profile.update(ignore_warnings=ignore_warnings) + actual_request = client._instance_admin_client.update_app_profile.call_args_list[ + 0 + ].kwargs + + assert actual_request == expected_request + assert ( + result.metadata.type_url + == "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata" + ) + - instance_api.update_app_profile.return_value = response_pb - app_profile._instance._client._instance_admin_client = instance_api - result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = client._instance_admin_client.update_app_profile.call_args_list[ - 0 - ].kwargs - - self.assertEqual(actual_request, expected_request) - self.assertEqual( - result.metadata.type_url, - "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata", - ) - - def test_update_app_profile_routing_single(self): - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable.enums import RoutingPolicyType - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.protobuf import field_mask_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - - routing = RoutingPolicyType.ANY - app_profile = self._make_one( - self.APP_PROFILE_ID, instance, routing_policy_type=routing - ) - - # Create response_pb - metadata = messages_v2_pb2.UpdateAppProfileMetadata() - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), - ) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - # Mock api calls - instance_api.app_profile_path.return_value = ( - "projects/project/instances/instance-id/appProfiles/app-profile-id" - ) - client._instance_admin_client = instance_api - client._instance_admin_client.update_app_profile.return_value = response_pb - # Perform the method and check the result. - ignore_warnings = True - expected_request_update_mask = field_mask_pb2.FieldMask( - paths=["multi_cluster_routing_use_any"] - ) - expected_request = { - "request": { - "app_profile": app_profile._to_pb(), - "update_mask": expected_request_update_mask, - "ignore_warnings": ignore_warnings, - } +def test_app_profile_update_w_routing_single(): + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable.enums import RoutingPolicyType + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.protobuf import field_mask_pb2 + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + + routing = RoutingPolicyType.ANY + app_profile = _make_app_profile( + APP_PROFILE_ID, instance, routing_policy_type=routing + ) + + # Create response_pb + metadata = messages_v2_pb2.UpdateAppProfileMetadata() + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + # Patch the stub used by the API method. + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + # Mock api calls + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + client._instance_admin_client = instance_api + client._instance_admin_client.update_app_profile.return_value = response_pb + # Perform the method and check the result. + ignore_warnings = True + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=["multi_cluster_routing_use_any"] + ) + expected_request = { + "request": { + "app_profile": app_profile._to_pb(), + "update_mask": expected_request_update_mask, + "ignore_warnings": ignore_warnings, } + } + + result = app_profile.update(ignore_warnings=ignore_warnings) + actual_request = client._instance_admin_client.update_app_profile.call_args_list[ + 0 + ].kwargs + assert actual_request == expected_request + assert ( + result.metadata.type_url + == "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata" + ) + + +def test_app_profile_update_w_wrong_routing_policy(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = _make_app_profile(APP_PROFILE_ID, instance, routing_policy_type=None) + with pytest.raises(ValueError): + app_profile.update() + + +def test_app_profile_delete(): + from google.protobuf import empty_pb2 + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + app_profile = _make_app_profile(APP_PROFILE_ID, instance) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_stub = client._instance_admin_client.transport + instance_stub.delete_cluster.side_effect = [response_pb] + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + result = app_profile.delete() - result = app_profile.update(ignore_warnings=ignore_warnings) - actual_request = client._instance_admin_client.update_app_profile.call_args_list[ - 0 - ].kwargs - self.assertEqual(actual_request, expected_request) - self.assertEqual( - result.metadata.type_url, - "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata", - ) - - def test_update_app_profile_with_wrong_routing_policy(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one( - self.APP_PROFILE_ID, instance, routing_policy_type=None - ) - with self.assertRaises(ValueError): - app_profile.update() - - def test_delete(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = client.instance(self.INSTANCE_ID) - app_profile = self._make_one(self.APP_PROFILE_ID, instance) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client.transport - instance_stub.delete_cluster.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = app_profile.delete() - - self.assertEqual(result, expected_result) + assert result == expected_result class _Client(object): diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index a32e18adb0bd..92e9d7307e7a 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -14,881 +14,893 @@ import datetime + import mock -import unittest +import pytest from ._testing import _make_credentials from google.cloud._helpers import UTC +PROJECT_ID = "project-id" +INSTANCE_ID = "instance-id" +INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID +CLUSTER_ID = "cluster-id" +CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID +TABLE_ID = "table-id" +TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID +BACKUP_ID = "backup-id" +BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID + +ALT_INSTANCE = "other-instance-id" +ALT_INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + ALT_INSTANCE +ALT_CLUSTER_NAME = ALT_INSTANCE_NAME + "/clusters/" + CLUSTER_ID +ALT_BACKUP_NAME = ALT_CLUSTER_NAME + "/backups/" + BACKUP_ID + + +def _make_timestamp(): + return datetime.datetime.utcnow().replace(tzinfo=UTC) + + +def _make_table_admin_client(): + from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + + return mock.create_autospec(BigtableTableAdminClient, instance=True) + + +def _make_backup(*args, **kwargs): + from google.cloud.bigtable.backup import Backup + + return Backup(*args, **kwargs) + + +def test_backup_constructor_defaults(): + instance = _Instance(INSTANCE_NAME) + backup = _make_backup(BACKUP_ID, instance) + + assert backup.backup_id == BACKUP_ID + assert backup._instance is instance + assert backup._cluster is None + assert backup.table_id is None + assert backup._expire_time is None + + assert backup._parent is None + assert backup._source_table is None + assert backup._start_time is None + assert backup._end_time is None + assert backup._size_bytes is None + assert backup._state is None + assert backup._encryption_info is None + + +def test_backup_constructor_explicit(): + instance = _Instance(INSTANCE_NAME) + expire_time = _make_timestamp() + + backup = _make_backup( + BACKUP_ID, + instance, + cluster_id=CLUSTER_ID, + table_id=TABLE_ID, + expire_time=expire_time, + encryption_info="encryption_info", + ) + + assert backup.backup_id == BACKUP_ID + assert backup._instance is instance + assert backup._cluster is CLUSTER_ID + assert backup.table_id == TABLE_ID + assert backup._expire_time == expire_time + assert backup._encryption_info == "encryption_info" + + assert backup._parent is None + assert backup._source_table is None + assert backup._start_time is None + assert backup._end_time is None + assert backup._size_bytes is None + assert backup._state is None + + +def test_backup_from_pb_w_project_mismatch(): + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.bigtable.backup import Backup + + alt_project_id = "alt-project-id" + client = _Client(project=alt_project_id) + instance = _Instance(INSTANCE_NAME, client) + backup_pb = table.Backup(name=BACKUP_NAME) + + with pytest.raises(ValueError): + Backup.from_pb(backup_pb, instance) + + +def test_backup_from_pb_w_instance_mismatch(): + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.bigtable.backup import Backup + + alt_instance = "/projects/%s/instances/alt-instance" % PROJECT_ID + client = _Client() + instance = _Instance(alt_instance, client) + backup_pb = table.Backup(name=BACKUP_NAME) + + with pytest.raises(ValueError): + Backup.from_pb(backup_pb, instance) + + +def test_backup_from_pb_w_bad_name(): + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.bigtable.backup import Backup + + client = _Client() + instance = _Instance(INSTANCE_NAME, client) + backup_pb = table.Backup(name="invalid_name") + + with pytest.raises(ValueError): + Backup.from_pb(backup_pb, instance) + + +def test_backup_from_pb_success(): + from google.cloud.bigtable.encryption_info import EncryptionInfo + from google.cloud.bigtable.error import Status + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.bigtable.backup import Backup + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.rpc.code_pb2 import Code + + client = _Client() + instance = _Instance(INSTANCE_NAME, client) + timestamp = _datetime_to_pb_timestamp(_make_timestamp()) + size_bytes = 1234 + state = table.Backup.State.READY + GOOGLE_DEFAULT_ENCRYPTION = ( + table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + ) + backup_pb = table.Backup( + name=BACKUP_NAME, + source_table=TABLE_NAME, + expire_time=timestamp, + start_time=timestamp, + end_time=timestamp, + size_bytes=size_bytes, + state=state, + encryption_info=table.EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=_StatusPB(Code.OK, "Status OK"), + kms_key_version="2", + ), + ) + + backup = Backup.from_pb(backup_pb, instance) + + assert isinstance(backup, Backup) + assert backup._instance == instance + assert backup.backup_id == BACKUP_ID + assert backup.cluster == CLUSTER_ID + assert backup.table_id == TABLE_ID + assert backup._expire_time == timestamp + assert backup.start_time == timestamp + assert backup.end_time == timestamp + assert backup._size_bytes == size_bytes + assert backup._state == state + expected_info = EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=Status(_StatusPB(Code.OK, "Status OK")), + kms_key_version="2", + ) + assert backup.encryption_info == expected_info + + +def test_backup_name(): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(INSTANCE_NAME, client) + + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + assert backup.name == BACKUP_NAME + + +def test_backup_cluster(): + backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME), cluster_id=CLUSTER_ID) + assert backup.cluster == CLUSTER_ID + + +def test_backup_cluster_setter(): + backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME)) + backup.cluster = CLUSTER_ID + assert backup.cluster == CLUSTER_ID + + +def test_backup_parent_none(): + backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME),) + assert backup.parent is None + + +def test_backup_parent_w_cluster(): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(INSTANCE_NAME, client) + + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + assert backup._cluster == CLUSTER_ID + assert backup.parent == CLUSTER_NAME + + +def test_backup_source_table_none(): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(INSTANCE_NAME, client) + + backup = _make_backup(BACKUP_ID, instance) + assert backup.source_table is None + + +def test_backup_source_table_valid(): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + client._table_admin_client = api + instance = _Instance(INSTANCE_NAME, client) + + backup = _make_backup(BACKUP_ID, instance, table_id=TABLE_ID) + assert backup.source_table == TABLE_NAME + + +def test_backup_expire_time(): + instance = _Instance(INSTANCE_NAME) + expire_time = _make_timestamp() + backup = _make_backup(BACKUP_ID, instance, expire_time=expire_time) + assert backup.expire_time == expire_time + + +def test_backup_expire_time_setter(): + instance = _Instance(INSTANCE_NAME) + expire_time = _make_timestamp() + backup = _make_backup(BACKUP_ID, instance) + backup.expire_time = expire_time + assert backup.expire_time == expire_time + + +def test_backup_start_time(): + instance = _Instance(INSTANCE_NAME) + backup = _make_backup(BACKUP_ID, instance) + expected = backup._start_time = _make_timestamp() + assert backup.start_time == expected + + +def test_backup_end_time(): + instance = _Instance(INSTANCE_NAME) + backup = _make_backup(BACKUP_ID, instance) + expected = backup._end_time = _make_timestamp() + assert backup.end_time == expected + + +def test_backup_size(): + instance = _Instance(INSTANCE_NAME) + backup = _make_backup(BACKUP_ID, instance) + expected = backup._size_bytes = 10 + assert backup.size_bytes == expected + + +def test_backup_state(): + from google.cloud.bigtable_admin_v2.types import table + + instance = _Instance(INSTANCE_NAME) + backup = _make_backup(BACKUP_ID, instance) + expected = backup._state = table.Backup.State.READY + assert backup.state == expected + + +def test_backup___eq__(): + instance = object() + backup1 = _make_backup(BACKUP_ID, instance) + backup2 = _make_backup(BACKUP_ID, instance) + assert backup1 == backup2 + + +def test_backup___eq___w_different_types(): + instance = object() + backup1 = _make_backup(BACKUP_ID, instance) + backup2 = object() + assert not (backup1 == backup2) + + +def test_backup___ne___w_same_value(): + instance = object() + backup1 = _make_backup(BACKUP_ID, instance) + backup2 = _make_backup(BACKUP_ID, instance) + assert not (backup1 != backup2) + + +def test_backup___ne__(): + backup1 = _make_backup("backup_1", "instance1") + backup2 = _make_backup("backup_2", "instance2") + assert backup1 != backup2 + + +def test_backup_create_w_grpc_error(): + from google.api_core.exceptions import GoogleAPICallError + from google.api_core.exceptions import Unknown + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.create_backup.side_effect = Unknown("testing") + + timestamp = _make_timestamp() + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME, client=client), + table_id=TABLE_ID, + expire_time=timestamp, + ) + + backup_pb = table.Backup( + source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + ) + + with pytest.raises(GoogleAPICallError): + backup.create(CLUSTER_ID) + + api.create_backup.assert_called_once_with( + request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb} + ) + + +def test_backup_create_w_already_exists(): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.exceptions import Conflict + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.create_backup.side_effect = Conflict("testing") + + timestamp = _make_timestamp() + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME, client=client), + table_id=TABLE_ID, + expire_time=timestamp, + ) + + backup_pb = table.Backup( + source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + ) + + with pytest.raises(Conflict): + backup.create(CLUSTER_ID) + + api.create_backup.assert_called_once_with( + request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb} + ) + + +def test_backup_create_w_instance_not_found(): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.exceptions import NotFound + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.create_backup.side_effect = NotFound("testing") + + timestamp = _make_timestamp() + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME, client=client), + table_id=TABLE_ID, + expire_time=timestamp, + ) + + backup_pb = table.Backup( + source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + ) + + with pytest.raises(NotFound): + backup.create(CLUSTER_ID) + + api.create_backup.assert_called_once_with( + request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb} + ) + + +def test_backup_create_w_cluster_not_set(): + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME), + table_id=TABLE_ID, + expire_time=_make_timestamp(), + ) + + with pytest.raises(ValueError): + backup.create() + + +def test_backup_create_w_table_not_set(): + backup = _make_backup( + BACKUP_ID, _Instance(INSTANCE_NAME), expire_time=_make_timestamp(), + ) + + with pytest.raises(ValueError): + backup.create(CLUSTER_ID) + + +def test_backup_create_w_expire_time_not_set(): + backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME), table_id=TABLE_ID,) + + with pytest.raises(ValueError): + backup.create(CLUSTER_ID) + + +def test_backup_create_success(): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table + from google.cloud.bigtable import Client + + op_future = object() + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + api = client._table_admin_client = _make_table_admin_client() + api.create_backup.return_value = op_future + + timestamp = _make_timestamp() + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME, client=client), + table_id=TABLE_ID, + expire_time=timestamp, + ) -class TestBackup(unittest.TestCase): - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - CLUSTER_ID = "cluster-id" - CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID - TABLE_ID = "table-id" - TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID - BACKUP_ID = "backup-id" - BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID - - ALT_INSTANCE = "other-instance-id" - ALT_INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + ALT_INSTANCE - ALT_CLUSTER_NAME = ALT_INSTANCE_NAME + "/clusters/" + CLUSTER_ID - ALT_BACKUP_NAME = ALT_CLUSTER_NAME + "/backups/" + BACKUP_ID - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.backup import Backup - - return Backup - - @staticmethod - def _make_table_admin_client(): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - - return mock.create_autospec(BigtableTableAdminClient, instance=True) - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def _make_timestamp(self): - return datetime.datetime.utcnow().replace(tzinfo=UTC) - - def test_constructor_defaults(self): - instance = _Instance(self.INSTANCE_NAME) - backup = self._make_one(self.BACKUP_ID, instance) - - self.assertEqual(backup.backup_id, self.BACKUP_ID) - self.assertIs(backup._instance, instance) - self.assertIsNone(backup._cluster) - self.assertIsNone(backup.table_id) - self.assertIsNone(backup._expire_time) - - self.assertIsNone(backup._parent) - self.assertIsNone(backup._source_table) - self.assertIsNone(backup._start_time) - self.assertIsNone(backup._end_time) - self.assertIsNone(backup._size_bytes) - self.assertIsNone(backup._state) - self.assertIsNone(backup._encryption_info) - - def test_constructor_non_defaults(self): - instance = _Instance(self.INSTANCE_NAME) - expire_time = self._make_timestamp() - - backup = self._make_one( - self.BACKUP_ID, - instance, - cluster_id=self.CLUSTER_ID, - table_id=self.TABLE_ID, - expire_time=expire_time, - encryption_info="encryption_info", - ) - - self.assertEqual(backup.backup_id, self.BACKUP_ID) - self.assertIs(backup._instance, instance) - self.assertIs(backup._cluster, self.CLUSTER_ID) - self.assertEqual(backup.table_id, self.TABLE_ID) - self.assertEqual(backup._expire_time, expire_time) - self.assertEqual(backup._encryption_info, "encryption_info") - - self.assertIsNone(backup._parent) - self.assertIsNone(backup._source_table) - self.assertIsNone(backup._start_time) - self.assertIsNone(backup._end_time) - self.assertIsNone(backup._size_bytes) - self.assertIsNone(backup._state) - - def test_from_pb_project_mismatch(self): - from google.cloud.bigtable_admin_v2.types import table - - alt_project_id = "alt-project-id" - client = _Client(project=alt_project_id) - instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = table.Backup(name=self.BACKUP_NAME) - klasse = self._get_target_class() - - with self.assertRaises(ValueError): - klasse.from_pb(backup_pb, instance) - - def test_from_pb_instance_mismatch(self): - from google.cloud.bigtable_admin_v2.types import table - - alt_instance = "/projects/%s/instances/alt-instance" % self.PROJECT_ID - client = _Client() - instance = _Instance(alt_instance, client) - backup_pb = table.Backup(name=self.BACKUP_NAME) - klasse = self._get_target_class() - - with self.assertRaises(ValueError): - klasse.from_pb(backup_pb, instance) - - def test_from_pb_bad_name(self): - from google.cloud.bigtable_admin_v2.types import table - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client) - backup_pb = table.Backup(name="invalid_name") - klasse = self._get_target_class() - - with self.assertRaises(ValueError): - klasse.from_pb(backup_pb, instance) - - def test_from_pb_success(self): - from google.cloud.bigtable.encryption_info import EncryptionInfo - from google.cloud.bigtable.error import Status - from google.cloud.bigtable_admin_v2.types import table - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.rpc.code_pb2 import Code - - client = _Client() - instance = _Instance(self.INSTANCE_NAME, client) - timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) - size_bytes = 1234 - state = table.Backup.State.READY - GOOGLE_DEFAULT_ENCRYPTION = ( - table.EncryptionInfo.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION - ) - backup_pb = table.Backup( - name=self.BACKUP_NAME, - source_table=self.TABLE_NAME, - expire_time=timestamp, - start_time=timestamp, - end_time=timestamp, - size_bytes=size_bytes, - state=state, - encryption_info=table.EncryptionInfo( - encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - encryption_status=_StatusPB(Code.OK, "Status OK"), - kms_key_version="2", - ), - ) - klasse = self._get_target_class() - - backup = klasse.from_pb(backup_pb, instance) - - self.assertTrue(isinstance(backup, klasse)) - self.assertEqual(backup._instance, instance) - self.assertEqual(backup.backup_id, self.BACKUP_ID) - self.assertEqual(backup.cluster, self.CLUSTER_ID) - self.assertEqual(backup.table_id, self.TABLE_ID) - self.assertEqual(backup._expire_time, timestamp) - self.assertEqual(backup.start_time, timestamp) - self.assertEqual(backup.end_time, timestamp) - self.assertEqual(backup._size_bytes, size_bytes) - self.assertEqual(backup._state, state) - self.assertEqual( - backup.encryption_info, - EncryptionInfo( - encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - encryption_status=Status(_StatusPB(Code.OK, "Status OK")), - kms_key_version="2", - ), - ) - - def test_property_name(self): - from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - client._table_admin_client = api - instance = _Instance(self.INSTANCE_NAME, client) - - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - self.assertEqual(backup.name, self.BACKUP_NAME) - - def test_property_cluster(self): - backup = self._make_one( - self.BACKUP_ID, _Instance(self.INSTANCE_NAME), cluster_id=self.CLUSTER_ID - ) - self.assertEqual(backup.cluster, self.CLUSTER_ID) - - def test_property_cluster_setter(self): - backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME)) - backup.cluster = self.CLUSTER_ID - self.assertEqual(backup.cluster, self.CLUSTER_ID) - - def test_property_parent_none(self): - backup = self._make_one(self.BACKUP_ID, _Instance(self.INSTANCE_NAME),) - self.assertIsNone(backup.parent) - - def test_property_parent_w_cluster(self): - from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - client._table_admin_client = api - instance = _Instance(self.INSTANCE_NAME, client) - - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - self.assertEqual(backup._cluster, self.CLUSTER_ID) - self.assertEqual(backup.parent, self.CLUSTER_NAME) - - def test_property_source_table_none(self): - from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - client._table_admin_client = api - instance = _Instance(self.INSTANCE_NAME, client) - - backup = self._make_one(self.BACKUP_ID, instance) - self.assertIsNone(backup.source_table) - - def test_property_source_table_valid(self): - from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - client._table_admin_client = api - instance = _Instance(self.INSTANCE_NAME, client) - - backup = self._make_one(self.BACKUP_ID, instance, table_id=self.TABLE_ID) - self.assertEqual(backup.source_table, self.TABLE_NAME) - - def test_property_expire_time(self): - instance = _Instance(self.INSTANCE_NAME) - expire_time = self._make_timestamp() - backup = self._make_one(self.BACKUP_ID, instance, expire_time=expire_time) - self.assertEqual(backup.expire_time, expire_time) - - def test_property_expire_time_setter(self): - instance = _Instance(self.INSTANCE_NAME) - expire_time = self._make_timestamp() - backup = self._make_one(self.BACKUP_ID, instance) - backup.expire_time = expire_time - self.assertEqual(backup.expire_time, expire_time) - - def test_property_start_time(self): - instance = _Instance(self.INSTANCE_NAME) - backup = self._make_one(self.BACKUP_ID, instance) - expected = backup._start_time = self._make_timestamp() - self.assertEqual(backup.start_time, expected) - - def test_property_end_time(self): - instance = _Instance(self.INSTANCE_NAME) - backup = self._make_one(self.BACKUP_ID, instance) - expected = backup._end_time = self._make_timestamp() - self.assertEqual(backup.end_time, expected) - - def test_property_size(self): - instance = _Instance(self.INSTANCE_NAME) - backup = self._make_one(self.BACKUP_ID, instance) - expected = backup._size_bytes = 10 - self.assertEqual(backup.size_bytes, expected) - - def test_property_state(self): - from google.cloud.bigtable_admin_v2.types import table - - instance = _Instance(self.INSTANCE_NAME) - backup = self._make_one(self.BACKUP_ID, instance) - expected = backup._state = table.Backup.State.READY - self.assertEqual(backup.state, expected) - - def test___eq__(self): - instance = object() - backup1 = self._make_one(self.BACKUP_ID, instance) - backup2 = self._make_one(self.BACKUP_ID, instance) - self.assertTrue(backup1 == backup2) - - def test___eq__different_types(self): - instance = object() - backup1 = self._make_one(self.BACKUP_ID, instance) - backup2 = object() - self.assertFalse(backup1 == backup2) - - def test___ne__same_value(self): - instance = object() - backup1 = self._make_one(self.BACKUP_ID, instance) - backup2 = self._make_one(self.BACKUP_ID, instance) - self.assertFalse(backup1 != backup2) - - def test___ne__(self): - backup1 = self._make_one("backup_1", "instance1") - backup2 = self._make_one("backup_2", "instance2") - self.assertTrue(backup1 != backup2) - - def test_create_grpc_error(self): - from google.api_core.exceptions import GoogleAPICallError - from google.api_core.exceptions import Unknown - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.create_backup.side_effect = Unknown("testing") - - timestamp = self._make_timestamp() - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME, client=client), - table_id=self.TABLE_ID, - expire_time=timestamp, - ) - - backup_pb = table.Backup( - source_table=self.TABLE_NAME, - expire_time=_datetime_to_pb_timestamp(timestamp), - ) - - with self.assertRaises(GoogleAPICallError): - backup.create(self.CLUSTER_ID) - - api.create_backup.assert_called_once_with( - request={ - "parent": self.CLUSTER_NAME, - "backup_id": self.BACKUP_ID, - "backup": backup_pb, - } - ) - - def test_create_already_exists(self): - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table - from google.cloud.exceptions import Conflict - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.create_backup.side_effect = Conflict("testing") - - timestamp = self._make_timestamp() - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME, client=client), - table_id=self.TABLE_ID, - expire_time=timestamp, - ) - - backup_pb = table.Backup( - source_table=self.TABLE_NAME, - expire_time=_datetime_to_pb_timestamp(timestamp), - ) - - with self.assertRaises(Conflict): - backup.create(self.CLUSTER_ID) - - api.create_backup.assert_called_once_with( - request={ - "parent": self.CLUSTER_NAME, - "backup_id": self.BACKUP_ID, - "backup": backup_pb, - } - ) - - def test_create_instance_not_found(self): - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table - from google.cloud.exceptions import NotFound - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.create_backup.side_effect = NotFound("testing") - - timestamp = self._make_timestamp() - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME, client=client), - table_id=self.TABLE_ID, - expire_time=timestamp, - ) - - backup_pb = table.Backup( - source_table=self.TABLE_NAME, - expire_time=_datetime_to_pb_timestamp(timestamp), - ) - - with self.assertRaises(NotFound): - backup.create(self.CLUSTER_ID) - - api.create_backup.assert_called_once_with( - request={ - "parent": self.CLUSTER_NAME, - "backup_id": self.BACKUP_ID, - "backup": backup_pb, - } - ) - - def test_create_cluster_not_set(self): - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME), - table_id=self.TABLE_ID, - expire_time=self._make_timestamp(), - ) - - with self.assertRaises(ValueError): - backup.create() - - def test_create_table_not_set(self): - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME), - expire_time=self._make_timestamp(), - ) - - with self.assertRaises(ValueError): - backup.create(self.CLUSTER_ID) - - def test_create_expire_time_not_set(self): - backup = self._make_one( - self.BACKUP_ID, _Instance(self.INSTANCE_NAME), table_id=self.TABLE_ID, - ) - - with self.assertRaises(ValueError): - backup.create(self.CLUSTER_ID) - - def test_create_success(self): - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table - from google.cloud.bigtable import Client - - op_future = object() - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - api = client._table_admin_client = self._make_table_admin_client() - api.create_backup.return_value = op_future - - timestamp = self._make_timestamp() - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME, client=client), - table_id=self.TABLE_ID, - expire_time=timestamp, - ) - - backup_pb = table.Backup( - source_table=self.TABLE_NAME, - expire_time=_datetime_to_pb_timestamp(timestamp), - ) - - future = backup.create(self.CLUSTER_ID) - self.assertEqual(backup._cluster, self.CLUSTER_ID) - self.assertIs(future, op_future) - - api.create_backup.assert_called_once_with( - request={ - "parent": self.CLUSTER_NAME, - "backup_id": self.BACKUP_ID, - "backup": backup_pb, - } - ) - - def test_exists_grpc_error(self): - from google.api_core.exceptions import Unknown - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.get_backup.side_effect = Unknown("testing") - - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - - with self.assertRaises(Unknown): - backup.exists() - - request = {"name": self.BACKUP_NAME} - api.get_backup.assert_called_once_with(request) - - def test_exists_not_found(self): - from google.api_core.exceptions import NotFound - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.get_backup.side_effect = NotFound("testing") - - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - - self.assertFalse(backup.exists()) - - api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) - - def test_get(self): - from google.cloud.bigtable_admin_v2.types import table - from google.cloud._helpers import _datetime_to_pb_timestamp - - timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) - state = table.Backup.State.READY - - client = _Client() - backup_pb = table.Backup( - name=self.BACKUP_NAME, - source_table=self.TABLE_NAME, - expire_time=timestamp, - start_time=timestamp, - end_time=timestamp, - size_bytes=0, - state=state, - ) - api = client.table_admin_client = self._make_table_admin_client() - api.get_backup.return_value = backup_pb - - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - - self.assertEqual(backup.get(), backup_pb) - - def test_reload(self): - from google.cloud.bigtable_admin_v2.types import table - from google.cloud._helpers import _datetime_to_pb_timestamp - - timestamp = _datetime_to_pb_timestamp(self._make_timestamp()) - state = table.Backup.State.READY - - client = _Client() - backup_pb = table.Backup( - name=self.BACKUP_NAME, - source_table=self.TABLE_NAME, - expire_time=timestamp, - start_time=timestamp, - end_time=timestamp, - size_bytes=0, - state=state, - ) - api = client.table_admin_client = self._make_table_admin_client() - api.get_backup.return_value = backup_pb - - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - - backup.reload() - self.assertEqual(backup._source_table, self.TABLE_NAME) - self.assertEqual(backup._expire_time, timestamp) - self.assertEqual(backup._start_time, timestamp) - self.assertEqual(backup._end_time, timestamp) - self.assertEqual(backup._size_bytes, 0) - self.assertEqual(backup._state, state) + backup_pb = table.Backup( + source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + ) - def test_exists_success(self): - from google.cloud.bigtable_admin_v2.types import table + future = backup.create(CLUSTER_ID) + assert backup._cluster == CLUSTER_ID + assert future is op_future - client = _Client() - backup_pb = table.Backup(name=self.BACKUP_NAME) - api = client.table_admin_client = self._make_table_admin_client() - api.get_backup.return_value = backup_pb + api.create_backup.assert_called_once_with( + request={"parent": CLUSTER_NAME, "backup_id": BACKUP_ID, "backup": backup_pb} + ) - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - self.assertTrue(backup.exists()) +def test_backup_get(): + from google.cloud.bigtable_admin_v2.types import table + from google.cloud._helpers import _datetime_to_pb_timestamp - api.get_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) + timestamp = _datetime_to_pb_timestamp(_make_timestamp()) + state = table.Backup.State.READY - def test_delete_grpc_error(self): - from google.api_core.exceptions import Unknown + client = _Client() + backup_pb = table.Backup( + name=BACKUP_NAME, + source_table=TABLE_NAME, + expire_time=timestamp, + start_time=timestamp, + end_time=timestamp, + size_bytes=0, + state=state, + ) + api = client.table_admin_client = _make_table_admin_client() + api.get_backup.return_value = backup_pb - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.delete_backup.side_effect = Unknown("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) - with self.assertRaises(Unknown): - backup.delete() + assert backup.get() == backup_pb - api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) - def test_delete_not_found(self): - from google.api_core.exceptions import NotFound +def test_backup_reload(): + from google.cloud.bigtable_admin_v2.types import table + from google.cloud._helpers import _datetime_to_pb_timestamp - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.delete_backup.side_effect = NotFound("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + timestamp = _datetime_to_pb_timestamp(_make_timestamp()) + state = table.Backup.State.READY - with self.assertRaises(NotFound): - backup.delete() - - api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) - - def test_delete_success(self): - from google.protobuf.empty_pb2 import Empty - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.delete_backup.return_value = Empty() - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + client = _Client() + backup_pb = table.Backup( + name=BACKUP_NAME, + source_table=TABLE_NAME, + expire_time=timestamp, + start_time=timestamp, + end_time=timestamp, + size_bytes=0, + state=state, + ) + api = client.table_admin_client = _make_table_admin_client() + api.get_backup.return_value = backup_pb + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + backup.reload() + assert backup._source_table == TABLE_NAME + assert backup._expire_time == timestamp + assert backup._start_time == timestamp + assert backup._end_time == timestamp + assert backup._size_bytes == 0 + assert backup._state == state + + +def test_backup_exists_w_grpc_error(): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.get_backup.side_effect = Unknown("testing") + + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + with pytest.raises(Unknown): + backup.exists() + + request = {"name": BACKUP_NAME} + api.get_backup.assert_called_once_with(request) + + +def test_backup_exists_w_not_found(): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.get_backup.side_effect = NotFound("testing") + + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + assert not backup.exists() + + api.get_backup.assert_called_once_with(request={"name": BACKUP_NAME}) + + +def test_backup_exists_success(): + from google.cloud.bigtable_admin_v2.types import table + + client = _Client() + backup_pb = table.Backup(name=BACKUP_NAME) + api = client.table_admin_client = _make_table_admin_client() + api.get_backup.return_value = backup_pb + + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + assert backup.exists() + + api.get_backup.assert_called_once_with(request={"name": BACKUP_NAME}) + + +def test_backup_delete_w_grpc_error(): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.delete_backup.side_effect = Unknown("testing") + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + with pytest.raises(Unknown): backup.delete() - api.delete_backup.assert_called_once_with(request={"name": self.BACKUP_NAME}) - - def test_update_expire_time_grpc_error(self): - from google.api_core.exceptions import Unknown - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table - from google.protobuf import field_mask_pb2 - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.update_backup.side_effect = Unknown("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - expire_time = self._make_timestamp() - - with self.assertRaises(Unknown): - backup.update_expire_time(expire_time) - - backup_update = table.Backup( - name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), - ) - update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) - api.update_backup.assert_called_once_with( - request={"backup": backup_update, "update_mask": update_mask} - ) - - def test_update_expire_time_not_found(self): - from google.api_core.exceptions import NotFound - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table - from google.protobuf import field_mask_pb2 - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.update_backup.side_effect = NotFound("testing") - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - expire_time = self._make_timestamp() - - with self.assertRaises(NotFound): - backup.update_expire_time(expire_time) - - backup_update = table.Backup( - name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), - ) - update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) - api.update_backup.assert_called_once_with( - request={"backup": backup_update, "update_mask": update_mask} - ) - - def test_update_expire_time_success(self): - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import table - from google.protobuf import field_mask_pb2 - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.update_backup.return_type = table.Backup(name=self.BACKUP_NAME) - instance = _Instance(self.INSTANCE_NAME, client=client) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - expire_time = self._make_timestamp() + api.delete_backup.assert_called_once_with(request={"name": BACKUP_NAME}) + + +def test_backup_delete_w_not_found(): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.delete_backup.side_effect = NotFound("testing") + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + with pytest.raises(NotFound): + backup.delete() + + api.delete_backup.assert_called_once_with(request={"name": BACKUP_NAME}) + + +def test_backup_delete_success(): + from google.protobuf.empty_pb2 import Empty + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.delete_backup.return_value = Empty() + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + backup.delete() + + api.delete_backup.assert_called_once_with(request={"name": BACKUP_NAME}) + + +def test_backup_update_expire_time_w_grpc_error(): + from google.api_core.exceptions import Unknown + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table + from google.protobuf import field_mask_pb2 + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.update_backup.side_effect = Unknown("testing") + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + expire_time = _make_timestamp() + + with pytest.raises(Unknown): + backup.update_expire_time(expire_time) + + backup_update = table.Backup( + name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + ) + update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) + api.update_backup.assert_called_once_with( + request={"backup": backup_update, "update_mask": update_mask} + ) + + +def test_backup_update_expire_time_w_not_found(): + from google.api_core.exceptions import NotFound + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table + from google.protobuf import field_mask_pb2 + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.update_backup.side_effect = NotFound("testing") + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + expire_time = _make_timestamp() + + with pytest.raises(NotFound): backup.update_expire_time(expire_time) - backup_update = table.Backup( - name=self.BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), - ) - update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) - api.update_backup.assert_called_once_with( - request={"backup": backup_update, "update_mask": update_mask} - ) - - def test_restore_grpc_error(self): - from google.api_core.exceptions import GoogleAPICallError - from google.api_core.exceptions import Unknown - - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.restore_table.side_effect = Unknown("testing") - - timestamp = self._make_timestamp() - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME, client=client), - cluster_id=self.CLUSTER_ID, - table_id=self.TABLE_NAME, - expire_time=timestamp, - ) - - with self.assertRaises(GoogleAPICallError): - backup.restore(self.TABLE_ID) - - api.restore_table.assert_called_once_with( - request={ - "parent": self.INSTANCE_NAME, - "table_id": self.TABLE_ID, - "backup": self.BACKUP_NAME, - } - ) - - def test_restore_cluster_not_set(self): - client = _Client() - client.table_admin_client = self._make_table_admin_client() - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME, client=client), - table_id=self.TABLE_ID, - expire_time=self._make_timestamp(), - ) - - with self.assertRaises(ValueError): - backup.restore(self.TABLE_ID) - - def _restore_helper(self, instance_id=None, instance_name=None): - op_future = object() - client = _Client() - api = client.table_admin_client = self._make_table_admin_client() - api.restore_table.return_value = op_future - - timestamp = self._make_timestamp() - backup = self._make_one( - self.BACKUP_ID, - _Instance(self.INSTANCE_NAME, client=client), - cluster_id=self.CLUSTER_ID, - table_id=self.TABLE_NAME, - expire_time=timestamp, - ) - - future = backup.restore(self.TABLE_ID, instance_id) - self.assertEqual(backup._cluster, self.CLUSTER_ID) - self.assertIs(future, op_future) - - api.restore_table.assert_called_once_with( - request={ - "parent": instance_name or self.INSTANCE_NAME, - "table_id": self.TABLE_ID, - "backup": self.BACKUP_NAME, - } - ) - api.restore_table.reset_mock() - - def test_restore_default(self): - self._restore_helper() - - def test_restore_to_another_instance(self): - self._restore_helper(self.ALT_INSTANCE, self.ALT_INSTANCE_NAME) - - def test_get_iam_policy(self): - from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - - instance = client.instance(instance_id=self.INSTANCE_ID) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] - iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - table_api = mock.create_autospec(BigtableTableAdminClient) - client._table_admin_client = table_api - table_api.get_iam_policy.return_value = iam_policy - - result = backup.get_iam_policy() - - table_api.get_iam_policy.assert_called_once_with( - request={"resource": backup.name} - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_set_iam_policy(self): - from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - - instance = client.instance(instance_id=self.INSTANCE_ID) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] - iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - table_api = mock.create_autospec(BigtableTableAdminClient) - client._table_admin_client = table_api - table_api.set_iam_policy.return_value = iam_policy_pb - - iam_policy = Policy(etag=etag, version=version) - iam_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.user("user1@test.com"), - Policy.service_account("service_acc1@test.com"), - ] - - result = backup.set_iam_policy(iam_policy) - - table_api.set_iam_policy.assert_called_once_with( - request={"resource": backup.name, "policy": iam_policy_pb} - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_test_iam_permissions(self): - from google.cloud.bigtable.client import Client - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - from google.iam.v1 import iam_policy_pb2 - - credentials = _make_credentials() - client = Client(project=self.PROJECT_ID, credentials=credentials, admin=True) - - instance = client.instance(instance_id=self.INSTANCE_ID) - backup = self._make_one(self.BACKUP_ID, instance, cluster_id=self.CLUSTER_ID) + backup_update = table.Backup( + name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + ) + update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) + api.update_backup.assert_called_once_with( + request={"backup": backup_update, "update_mask": update_mask} + ) + + +def test_backup_update_expire_time_success(): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import table + from google.protobuf import field_mask_pb2 + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.update_backup.return_type = table.Backup(name=BACKUP_NAME) + instance = _Instance(INSTANCE_NAME, client=client) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + expire_time = _make_timestamp() + + backup.update_expire_time(expire_time) + + backup_update = table.Backup( + name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + ) + update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) + api.update_backup.assert_called_once_with( + request={"backup": backup_update, "update_mask": update_mask} + ) + + +def test_backup_restore_w_grpc_error(): + from google.api_core.exceptions import GoogleAPICallError + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.restore_table.side_effect = Unknown("testing") + + timestamp = _make_timestamp() + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME, client=client), + cluster_id=CLUSTER_ID, + table_id=TABLE_NAME, + expire_time=timestamp, + ) + + with pytest.raises(GoogleAPICallError): + backup.restore(TABLE_ID) + + api.restore_table.assert_called_once_with( + request={"parent": INSTANCE_NAME, "table_id": TABLE_ID, "backup": BACKUP_NAME} + ) + + +def test_backup_restore_w_cluster_not_set(): + client = _Client() + client.table_admin_client = _make_table_admin_client() + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME, client=client), + table_id=TABLE_ID, + expire_time=_make_timestamp(), + ) + + with pytest.raises(ValueError): + backup.restore(TABLE_ID) + + +def _restore_helper(instance_id=None, instance_name=None): + op_future = object() + client = _Client() + api = client.table_admin_client = _make_table_admin_client() + api.restore_table.return_value = op_future + + timestamp = _make_timestamp() + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME, client=client), + cluster_id=CLUSTER_ID, + table_id=TABLE_NAME, + expire_time=timestamp, + ) + + future = backup.restore(TABLE_ID, instance_id) + assert backup._cluster == CLUSTER_ID + assert future is op_future + + api.restore_table.assert_called_once_with( + request={ + "parent": instance_name or INSTANCE_NAME, + "table_id": TABLE_ID, + "backup": BACKUP_NAME, + } + ) + api.restore_table.reset_mock() + + +def test_backup_restore_default(): + _restore_helper() + + +def test_backup_restore_to_another_instance(): + _restore_helper(ALT_INSTANCE, ALT_INSTANCE_NAME) + + +def test_backup_get_iam_policy(): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=INSTANCE_ID) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec(BigtableTableAdminClient) + client._table_admin_client = table_api + table_api.get_iam_policy.return_value = iam_policy + + result = backup.get_iam_policy() + + table_api.get_iam_policy.assert_called_once_with(request={"resource": backup.name}) + assert result.version == version + assert result.etag == etag + + admins = result.bigtable_admins + assert len(admins) == len(members) + for found, expected in zip(sorted(admins), sorted(members)): + assert found == expected + + +def test_backup_set_iam_policy(): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=INSTANCE_ID) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] + iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = mock.create_autospec(BigtableTableAdminClient) + client._table_admin_client = table_api + table_api.set_iam_policy.return_value = iam_policy_pb + + iam_policy = Policy(etag=etag, version=version) + iam_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("user1@test.com"), + Policy.service_account("service_acc1@test.com"), + ] + + result = backup.set_iam_policy(iam_policy) + + table_api.set_iam_policy.assert_called_once_with( + request={"resource": backup.name, "policy": iam_policy_pb} + ) + assert result.version == version + assert result.etag == etag + + admins = result.bigtable_admins + assert len(admins) == len(members) + for found, expected in zip(sorted(admins), sorted(members)): + assert found == expected + + +def test_backup_test_iam_permissions(): + from google.cloud.bigtable.client import Client + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) + from google.iam.v1 import iam_policy_pb2 + + credentials = _make_credentials() + client = Client(project=PROJECT_ID, credentials=credentials, admin=True) + + instance = client.instance(instance_id=INSTANCE_ID) + backup = _make_backup(BACKUP_ID, instance, cluster_id=CLUSTER_ID) + + permissions = ["bigtable.backups.create", "bigtable.backups.list"] + + response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - permissions = ["bigtable.backups.create", "bigtable.backups.list"] + table_api = mock.create_autospec(BigtableTableAdminClient) + table_api.test_iam_permissions.return_value = response + client._table_admin_client = table_api - response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - - table_api = mock.create_autospec(BigtableTableAdminClient) - table_api.test_iam_permissions.return_value = response - client._table_admin_client = table_api - - result = backup.test_iam_permissions(permissions) + result = backup.test_iam_permissions(permissions) - self.assertEqual(result, permissions) - table_api.test_iam_permissions.assert_called_once_with( - request={"resource": backup.name, "permissions": permissions} - ) + assert result == permissions + table_api.test_iam_permissions.assert_called_once_with( + request={"resource": backup.name, "permissions": permissions} + ) class _Client(object): - def __init__(self, project=TestBackup.PROJECT_ID): + def __init__(self, project=PROJECT_ID): self.project = project self.project_name = "projects/" + self.project diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py index 8760c3a2de2c..9ae6ed175624 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -13,154 +13,135 @@ # limitations under the License. -import unittest - import mock +import pytest -from ._testing import _make_credentials - -from google.cloud.bigtable.batcher import MutationsBatcher from google.cloud.bigtable.row import DirectRow +TABLE_ID = "table-id" +TABLE_NAME = "/tables/" + TABLE_ID -class TestMutationsBatcher(unittest.TestCase): - from grpc import StatusCode - TABLE_ID = "table-id" - TABLE_NAME = "/tables/" + TABLE_ID +def _make_mutation_batcher(table, **kw): + from google.cloud.bigtable.batcher import MutationsBatcher - # RPC Status Codes - SUCCESS = StatusCode.OK.value[0] + return MutationsBatcher(table, **kw) - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.table import Table - return Table +def test_mutation_batcher_constructor(): + table = _Table(TABLE_NAME) - def _make_table(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) + mutation_batcher = _make_mutation_batcher(table) + assert table is mutation_batcher.table - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - return Client +def test_mutation_batcher_mutate_row(): + table = _Table(TABLE_NAME) + mutation_batcher = _make_mutation_batcher(table=table) - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - def test_constructor(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) + mutation_batcher.mutate_rows(rows) + mutation_batcher.flush() - instance = client.instance(instance_id="instance-id") - table = self._make_table(self.TABLE_ID, instance) + assert table.mutation_calls == 1 - mutation_batcher = MutationsBatcher(table) - self.assertEqual(table, mutation_batcher.table) - def test_mutate_row(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) +def test_mutation_batcher_mutate(): + table = _Table(TABLE_NAME) + mutation_batcher = _make_mutation_batcher(table=table) - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.mutate_rows(rows) - mutation_batcher.flush() + mutation_batcher.mutate(row) - self.assertEqual(table.mutation_calls, 1) + mutation_batcher.flush() - def test_mutate_rows(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) + assert table.mutation_calls == 1 - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) - mutation_batcher.mutate(row) +def test_mutation_batcher_flush_w_no_rows(): + table = _Table(TABLE_NAME) + mutation_batcher = _make_mutation_batcher(table=table) + mutation_batcher.flush() - mutation_batcher.flush() + assert table.mutation_calls == 0 - self.assertEqual(table.mutation_calls, 1) - def test_flush_with_no_rows(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) - mutation_batcher.flush() +def test_mutation_batcher_mutate_w_max_flush_count(): + table = _Table(TABLE_NAME) + mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) - self.assertEqual(table.mutation_calls, 0) + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - def test_add_row_with_max_flush_count(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, flush_count=3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + assert table.mutation_calls == 1 - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) - self.assertEqual(table.mutation_calls, 1) +@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) +def test_mutation_batcher_mutate_with_max_mutations_failure(): + from google.cloud.bigtable.batcher import MaxMutationsError - @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) - def test_mutate_row_with_max_mutations_failure(self): - from google.cloud.bigtable.batcher import MaxMutationsError + table = _Table(TABLE_NAME) + mutation_batcher = _make_mutation_batcher(table=table) - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + with pytest.raises(MaxMutationsError): + mutation_batcher.mutate(row) - with self.assertRaises(MaxMutationsError): - mutation_batcher.mutate(row) - @mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) - def test_mutate_row_with_max_mutations(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table) +@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): + table = _Table(TABLE_NAME) + mutation_batcher = _make_mutation_batcher(table=table) - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - mutation_batcher.mutate(row) - mutation_batcher.flush() + mutation_batcher.mutate(row) + mutation_batcher.flush() - self.assertEqual(table.mutation_calls, 1) + assert table.mutation_calls == 1 - def test_mutate_row_with_max_row_bytes(self): - table = _Table(self.TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes +def test_mutation_batcher_mutate_w_max_row_bytes(): + table = _Table(TABLE_NAME) + mutation_batcher = _make_mutation_batcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", max_value) - row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes - mutation_batcher.mutate(row) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) + + mutation_batcher.mutate(row) - self.assertEqual(table.mutation_calls, 1) + assert table.mutation_calls == 1 class _Instance(object): diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index f6cd7a5ccc4e..00f8524bc894 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -13,750 +13,747 @@ # limitations under the License. -import unittest - import mock +import pytest from ._testing import _make_credentials +PROJECT = "PROJECT" +INSTANCE_ID = "instance-id" +DISPLAY_NAME = "display-name" +USER_AGENT = "you-sir-age-int" + + +def _invoke_client_factory(client_class, **kw): + from google.cloud.bigtable.client import _create_gapic_client + + return _create_gapic_client(client_class, **kw) + + +def test___create_gapic_client_wo_emulator(): + client_class = mock.Mock() + credentials = _make_credentials() + client = _MockClient(credentials) + client_info = client._client_info = mock.Mock() + transport = mock.Mock() + + result = _invoke_client_factory(client_class, transport=transport)(client) + + assert result is client_class.return_value + client_class.assert_called_once_with( + credentials=None, + client_info=client_info, + client_options=None, + transport=transport, + ) + + +def test___create_gapic_client_wo_emulator_w_client_options(): + client_class = mock.Mock() + credentials = _make_credentials() + client = _MockClient(credentials) + client_info = client._client_info = mock.Mock() + client_options = mock.Mock() + transport = mock.Mock() + + result = _invoke_client_factory( + client_class, client_options=client_options, transport=transport + )(client) + + assert result is client_class.return_value + client_class.assert_called_once_with( + credentials=None, + client_info=client_info, + client_options=client_options, + transport=transport, + ) + + +def test___create_gapic_client_w_emulator(): + client_class = mock.Mock() + emulator_host = emulator_channel = object() + credentials = _make_credentials() + client_options = mock.Mock() + transport = mock.Mock() + + client = _MockClient( + credentials, emulator_host=emulator_host, emulator_channel=emulator_channel + ) + client_info = client._client_info = mock.Mock() + result = _invoke_client_factory( + client_class, client_options=client_options, transport=transport + )(client) + + assert result is client_class.return_value + client_class.assert_called_once_with( + credentials=None, + client_info=client_info, + client_options=client_options, + transport=transport, + ) + + +class _MockClient(object): + def __init__(self, credentials, emulator_host=None, emulator_channel=None): + self._credentials = credentials + self._emulator_host = emulator_host + self._emulator_channel = emulator_channel -class Test__create_gapic_client(unittest.TestCase): - def _invoke_client_factory(self, client_class, **kw): - from google.cloud.bigtable.client import _create_gapic_client - return _create_gapic_client(client_class, **kw) +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client - def test_wo_emulator(self): - client_class = mock.Mock() - credentials = _make_credentials() - client = _Client(credentials) - client_info = client._client_info = mock.Mock() - transport = mock.Mock() + return Client(*args, **kwargs) - result = self._invoke_client_factory(client_class, transport=transport)(client) - self.assertIs(result, client_class.return_value) - client_class.assert_called_once_with( - credentials=None, - client_info=client_info, - client_options=None, - transport=transport, - ) +@mock.patch("os.environ", {}) +def test_client_constructor_defaults(): + from google.api_core import client_info + from google.cloud.bigtable import __version__ + from google.cloud.bigtable.client import DATA_SCOPE - def test_wo_emulator_w_client_options(self): - client_class = mock.Mock() - credentials = _make_credentials() - client = _Client(credentials) - client_info = client._client_info = mock.Mock() - client_options = mock.Mock() - transport = mock.Mock() - - result = self._invoke_client_factory( - client_class, client_options=client_options, transport=transport - )(client) - - self.assertIs(result, client_class.return_value) - client_class.assert_called_once_with( - credentials=None, - client_info=client_info, - client_options=client_options, - transport=transport, - ) + credentials = _make_credentials() - def test_w_emulator(self): - client_class = mock.Mock() - emulator_host = emulator_channel = object() - credentials = _make_credentials() - client_options = mock.Mock() - transport = mock.Mock() + with mock.patch("google.auth.default") as mocked: + mocked.return_value = credentials, PROJECT + client = _make_client() - client = _Client( - credentials, emulator_host=emulator_host, emulator_channel=emulator_channel - ) - client_info = client._client_info = mock.Mock() - result = self._invoke_client_factory( - client_class, client_options=client_options, transport=transport - )(client) - - self.assertIs(result, client_class.return_value) - client_class.assert_called_once_with( - credentials=None, + assert client.project == PROJECT + assert client._credentials is credentials.with_scopes.return_value + assert not client._read_only + assert not client._admin + assert isinstance(client._client_info, client_info.ClientInfo) + assert client._client_info.client_library_version == __version__ + assert client._channel is None + assert client._emulator_host is None + assert client.SCOPE == (DATA_SCOPE,) + + +def test_client_constructor_explicit(): + import warnings + from google.cloud.bigtable.client import ADMIN_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE + + credentials = _make_credentials() + client_info = mock.Mock() + + with warnings.catch_warnings(record=True) as warned: + client = _make_client( + project=PROJECT, + credentials=credentials, + read_only=False, + admin=True, client_info=client_info, - client_options=client_options, - transport=transport, - ) + channel=mock.sentinel.channel, + ) + + assert len(warned) == 1 + + assert client.project == PROJECT + assert client._credentials is credentials.with_scopes.return_value + assert not client._read_only + assert client._admin + assert client._client_info is client_info + assert client._channel is mock.sentinel.channel + assert client.SCOPE == (DATA_SCOPE, ADMIN_SCOPE) + +def test_client_constructor_w_both_admin_and_read_only(): + credentials = _make_credentials() + with pytest.raises(ValueError): + _make_client( + project=PROJECT, credentials=credentials, admin=True, read_only=True, + ) + + +def test_client_constructor_w_emulator_host(): + from google.cloud.environment_vars import BIGTABLE_EMULATOR + from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + + emulator_host = "localhost:8081" + with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): + with mock.patch("grpc.secure_channel") as factory: + client = _make_client() + # don't test local_composite_credentials + # client._local_composite_credentials = lambda: credentials + # channels are formed when needed, so access a client + # create a gapic channel + client.table_data_client + + assert client._emulator_host == emulator_host + assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT + factory.assert_called_once_with( + emulator_host, + mock.ANY, # test of creds wrapping in '_emulator_host' below + options=_GRPC_CHANNEL_OPTIONS, + ) + + +def test_client_constructor_w_emulator_host_w_project(): + from google.cloud.environment_vars import BIGTABLE_EMULATOR + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + + emulator_host = "localhost:8081" + with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): + with mock.patch("grpc.secure_channel") as factory: + client = _make_client(project=PROJECT) + # channels are formed when needed, so access a client + # create a gapic channel + client.table_data_client + + assert client._emulator_host == emulator_host + assert client.project == PROJECT + factory.assert_called_once_with( + emulator_host, + mock.ANY, # test of creds wrapping in '_emulator_host' below + options=_GRPC_CHANNEL_OPTIONS, + ) + + +def test_client_constructor_w_emulator_host_w_credentials(): + from google.cloud.environment_vars import BIGTABLE_EMULATOR + from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + + emulator_host = "localhost:8081" + credentials = _make_credentials() + with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): + with mock.patch("grpc.secure_channel") as factory: + client = _make_client(credentials=credentials) + # channels are formed when needed, so access a client + # create a gapic channel + client.table_data_client + + assert client._emulator_host == emulator_host + assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT + factory.assert_called_once_with( + emulator_host, + mock.ANY, # test of creds wrapping in '_emulator_host' below + options=_GRPC_CHANNEL_OPTIONS, + ) + + +def test_client__get_scopes_default(): + from google.cloud.bigtable.client import DATA_SCOPE + + client = _make_client(project=PROJECT, credentials=_make_credentials()) + assert client._get_scopes() == (DATA_SCOPE,) + + +def test_client__get_scopes_w_admin(): + from google.cloud.bigtable.client import ADMIN_SCOPE + from google.cloud.bigtable.client import DATA_SCOPE + + client = _make_client(project=PROJECT, credentials=_make_credentials(), admin=True) + expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) + assert client._get_scopes() == expected_scopes + + +def test_client__get_scopes_w_read_only(): + from google.cloud.bigtable.client import READ_ONLY_SCOPE + + client = _make_client( + project=PROJECT, credentials=_make_credentials(), read_only=True + ) + assert client._get_scopes() == (READ_ONLY_SCOPE,) + + +def test_client__emulator_channel_w_sync(): + emulator_host = "localhost:8081" + transport_name = "GrpcTransportTesting" + transport = mock.Mock(spec=["__name__"], __name__=transport_name) + options = mock.Mock(spec=[]) + client = _make_client( + project=PROJECT, credentials=_make_credentials(), read_only=True + ) + client._emulator_host = emulator_host + lcc = client._local_composite_credentials = mock.Mock(spec=[]) + + with mock.patch("grpc.secure_channel") as patched: + channel = client._emulator_channel(transport, options) + + assert channel is patched.return_value + patched.assert_called_once_with( + emulator_host, lcc.return_value, options=options, + ) + + +def test_client__emulator_channel_w_async(): + emulator_host = "localhost:8081" + transport_name = "GrpcAsyncIOTransportTesting" + transport = mock.Mock(spec=["__name__"], __name__=transport_name) + options = mock.Mock(spec=[]) + client = _make_client( + project=PROJECT, credentials=_make_credentials(), read_only=True + ) + client._emulator_host = emulator_host + lcc = client._local_composite_credentials = mock.Mock(spec=[]) + + with mock.patch("grpc.aio.secure_channel") as patched: + channel = client._emulator_channel(transport, options) + + assert channel is patched.return_value + patched.assert_called_once_with( + emulator_host, lcc.return_value, options=options, + ) + + +def test_client__local_composite_credentials(): + client = _make_client( + project=PROJECT, credentials=_make_credentials(), read_only=True + ) + + wsir_patch = mock.patch("google.auth.credentials.with_scopes_if_required") + request_patch = mock.patch("google.auth.transport.requests.Request") + amp_patch = mock.patch("google.auth.transport.grpc.AuthMetadataPlugin") + grpc_patches = mock.patch.multiple( + "grpc", + metadata_call_credentials=mock.DEFAULT, + local_channel_credentials=mock.DEFAULT, + composite_channel_credentials=mock.DEFAULT, + ) + with wsir_patch as wsir_patched: + with request_patch as request_patched: + with amp_patch as amp_patched: + with grpc_patches as grpc_patched: + credentials = client._local_composite_credentials() + + grpc_mcc = grpc_patched["metadata_call_credentials"] + grpc_lcc = grpc_patched["local_channel_credentials"] + grpc_ccc = grpc_patched["composite_channel_credentials"] + + assert credentials is grpc_ccc.return_value + + wsir_patched.assert_called_once_with(client._credentials, None) + request_patched.assert_called_once_with() + amp_patched.assert_called_once_with( + wsir_patched.return_value, request_patched.return_value, + ) + grpc_mcc.assert_called_once_with(amp_patched.return_value) + grpc_lcc.assert_called_once_with() + grpc_ccc.assert_called_once_with(grpc_lcc.return_value, grpc_mcc.return_value) + + +def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None): + from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + + client_class = mock.Mock(spec=["DEFAULT_ENDPOINT"]) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials) + + if endpoint is not None: + client._client_options = mock.Mock( + spec=["api_endpoint"], api_endpoint=endpoint, + ) + expected_host = endpoint + else: + expected_host = client_class.DEFAULT_ENDPOINT + + if emulator_host is not None: + client._emulator_host = emulator_host + client._emulator_channel = mock.Mock(spec=[]) + expected_host = emulator_host -class _Client(object): - def __init__(self, credentials, emulator_host=None, emulator_channel=None): - self._credentials = credentials - self._emulator_host = emulator_host - self._emulator_channel = emulator_channel + grpc_transport = mock.Mock(spec=["create_channel"]) + transport = client._create_gapic_client_channel(client_class, grpc_transport) -class TestClient(unittest.TestCase): - - PROJECT = "PROJECT" - INSTANCE_ID = "instance-id" - DISPLAY_NAME = "display-name" - USER_AGENT = "you-sir-age-int" - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @mock.patch("os.environ", {}) - def test_constructor_defaults(self): - from google.api_core import client_info - from google.cloud.bigtable import __version__ - from google.cloud.bigtable.client import DATA_SCOPE - - credentials = _make_credentials() - - with mock.patch("google.auth.default") as mocked: - mocked.return_value = credentials, self.PROJECT - client = self._make_one() - - self.assertEqual(client.project, self.PROJECT) - self.assertIs(client._credentials, credentials.with_scopes.return_value) - self.assertFalse(client._read_only) - self.assertFalse(client._admin) - self.assertIsInstance(client._client_info, client_info.ClientInfo) - self.assertEqual(client._client_info.client_library_version, __version__) - self.assertIsNone(client._channel) - self.assertIsNone(client._emulator_host) - self.assertEqual(client.SCOPE, (DATA_SCOPE,)) - - def test_constructor_explicit(self): - import warnings - from google.cloud.bigtable.client import ADMIN_SCOPE - from google.cloud.bigtable.client import DATA_SCOPE - - credentials = _make_credentials() - client_info = mock.Mock() - - with warnings.catch_warnings(record=True) as warned: - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - read_only=False, - admin=True, - client_info=client_info, - channel=mock.sentinel.channel, - ) - - self.assertEqual(len(warned), 1) - - self.assertEqual(client.project, self.PROJECT) - self.assertIs(client._credentials, credentials.with_scopes.return_value) - self.assertFalse(client._read_only) - self.assertTrue(client._admin) - self.assertIs(client._client_info, client_info) - self.assertIs(client._channel, mock.sentinel.channel) - self.assertEqual(client.SCOPE, (DATA_SCOPE, ADMIN_SCOPE)) - - def test_constructor_both_admin_and_read_only(self): - credentials = _make_credentials() - with self.assertRaises(ValueError): - self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - read_only=True, - ) - - def test_constructor_with_emulator_host(self): - from google.cloud.environment_vars import BIGTABLE_EMULATOR - from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT - from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS - - emulator_host = "localhost:8081" - with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: - client = self._make_one() - # don't test local_composite_credentials - # client._local_composite_credentials = lambda: credentials - # channels are formed when needed, so access a client - # create a gapic channel - client.table_data_client - - self.assertEqual(client._emulator_host, emulator_host) - self.assertEqual(client.project, _DEFAULT_BIGTABLE_EMULATOR_CLIENT) - factory.assert_called_once_with( - emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below - options=_GRPC_CHANNEL_OPTIONS, - ) + assert transport is grpc_transport.return_value - def test_constructor_with_emulator_host_w_project(self): - from google.cloud.environment_vars import BIGTABLE_EMULATOR - from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS - - emulator_host = "localhost:8081" - with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: - client = self._make_one(project=self.PROJECT) - # channels are formed when needed, so access a client - # create a gapic channel - client.table_data_client - - self.assertEqual(client._emulator_host, emulator_host) - self.assertEqual(client.project, self.PROJECT) - factory.assert_called_once_with( - emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below - options=_GRPC_CHANNEL_OPTIONS, + if emulator_host is not None: + client._emulator_channel.assert_called_once_with( + transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS, ) - - def test_constructor_with_emulator_host_w_credentials(self): - from google.cloud.environment_vars import BIGTABLE_EMULATOR - from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT - from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS - - emulator_host = "localhost:8081" - credentials = _make_credentials() - with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: - client = self._make_one(credentials=credentials) - # channels are formed when needed, so access a client - # create a gapic channel - client.table_data_client - - self.assertEqual(client._emulator_host, emulator_host) - self.assertEqual(client.project, _DEFAULT_BIGTABLE_EMULATOR_CLIENT) - factory.assert_called_once_with( - emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below + grpc_transport.assert_called_once_with( + channel=client._emulator_channel.return_value, host=expected_host, + ) + else: + grpc_transport.create_channel.assert_called_once_with( + host=expected_host, + credentials=client._credentials, options=_GRPC_CHANNEL_OPTIONS, ) + grpc_transport.assert_called_once_with( + channel=grpc_transport.create_channel.return_value, host=expected_host, + ) - def test__get_scopes_default(self): - from google.cloud.bigtable.client import DATA_SCOPE - client = self._make_one(project=self.PROJECT, credentials=_make_credentials()) - self.assertEqual(client._get_scopes(), (DATA_SCOPE,)) +def test_client__create_gapic_client_channel_w_defaults(): + _create_gapic_client_channel_helper() - def test__get_scopes_admin(self): - from google.cloud.bigtable.client import ADMIN_SCOPE - from google.cloud.bigtable.client import DATA_SCOPE - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), admin=True - ) - expected_scopes = (DATA_SCOPE, ADMIN_SCOPE) - self.assertEqual(client._get_scopes(), expected_scopes) +def test_client__create_gapic_client_channel_w_endpoint(): + endpoint = "api.example.com" + _create_gapic_client_channel_helper(endpoint=endpoint) - def test__get_scopes_read_only(self): - from google.cloud.bigtable.client import READ_ONLY_SCOPE - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), read_only=True - ) - self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,)) - - def test__emulator_channel_sync(self): - emulator_host = "localhost:8081" - transport_name = "GrpcTransportTesting" - transport = mock.Mock(spec=["__name__"], __name__=transport_name) - options = mock.Mock(spec=[]) - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), read_only=True - ) - client._emulator_host = emulator_host - lcc = client._local_composite_credentials = mock.Mock(spec=[]) +def test_client__create_gapic_client_channel_w_emulator_host(): + host = "api.example.com:1234" + _create_gapic_client_channel_helper(emulator_host=host) - with mock.patch("grpc.secure_channel") as patched: - channel = client._emulator_channel(transport, options) - assert channel is patched.return_value - patched.assert_called_once_with( - emulator_host, lcc.return_value, options=options, - ) +def test_client__create_gapic_client_channel_w_endpoint_w_emulator_host(): + endpoint = "api.example.com" + host = "other.example.com:1234" + _create_gapic_client_channel_helper(endpoint=endpoint, emulator_host=host) - def test__emulator_channel_async(self): - emulator_host = "localhost:8081" - transport_name = "GrpcAsyncIOTransportTesting" - transport = mock.Mock(spec=["__name__"], __name__=transport_name) - options = mock.Mock(spec=[]) - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), read_only=True - ) - client._emulator_host = emulator_host - lcc = client._local_composite_credentials = mock.Mock(spec=[]) - with mock.patch("grpc.aio.secure_channel") as patched: - channel = client._emulator_channel(transport, options) +def test_client_project_path(): + credentials = _make_credentials() + project = "PROJECT" + client = _make_client(project=project, credentials=credentials, admin=True) + project_name = "projects/" + project + assert client.project_path == project_name - assert channel is patched.return_value - patched.assert_called_once_with( - emulator_host, lcc.return_value, options=options, - ) - def test__local_composite_credentials(self): - client = self._make_one( - project=self.PROJECT, credentials=_make_credentials(), read_only=True - ) +def test_client_table_data_client_not_initialized(): + from google.cloud.bigtable_v2 import BigtableClient - wsir_patch = mock.patch("google.auth.credentials.with_scopes_if_required") - request_patch = mock.patch("google.auth.transport.requests.Request") - amp_patch = mock.patch("google.auth.transport.grpc.AuthMetadataPlugin") - grpc_patches = mock.patch.multiple( - "grpc", - metadata_call_credentials=mock.DEFAULT, - local_channel_credentials=mock.DEFAULT, - composite_channel_credentials=mock.DEFAULT, - ) - with wsir_patch as wsir_patched: - with request_patch as request_patched: - with amp_patch as amp_patched: - with grpc_patches as grpc_patched: - credentials = client._local_composite_credentials() - - grpc_mcc = grpc_patched["metadata_call_credentials"] - grpc_lcc = grpc_patched["local_channel_credentials"] - grpc_ccc = grpc_patched["composite_channel_credentials"] - - self.assertIs(credentials, grpc_ccc.return_value) - - wsir_patched.assert_called_once_with(client._credentials, None) - request_patched.assert_called_once_with() - amp_patched.assert_called_once_with( - wsir_patched.return_value, request_patched.return_value, - ) - grpc_mcc.assert_called_once_with(amp_patched.return_value) - grpc_lcc.assert_called_once_with() - grpc_ccc.assert_called_once_with(grpc_lcc.return_value, grpc_mcc.return_value) - - def _create_gapic_client_channel_helper( - self, endpoint=None, emulator_host=None, - ): - from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS - - client_class = mock.Mock(spec=["DEFAULT_ENDPOINT"]) - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - - if endpoint is not None: - client._client_options = mock.Mock( - spec=["api_endpoint"], api_endpoint=endpoint, - ) - expected_host = endpoint - else: - expected_host = client_class.DEFAULT_ENDPOINT - - if emulator_host is not None: - client._emulator_host = emulator_host - client._emulator_channel = mock.Mock(spec=[]) - expected_host = emulator_host - - grpc_transport = mock.Mock(spec=["create_channel"]) - - transport = client._create_gapic_client_channel(client_class, grpc_transport) - - self.assertIs(transport, grpc_transport.return_value) - - if emulator_host is not None: - client._emulator_channel.assert_called_once_with( - transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS, - ) - grpc_transport.assert_called_once_with( - channel=client._emulator_channel.return_value, host=expected_host, - ) - else: - grpc_transport.create_channel.assert_called_once_with( - host=expected_host, - credentials=client._credentials, - options=_GRPC_CHANNEL_OPTIONS, - ) - grpc_transport.assert_called_once_with( - channel=grpc_transport.create_channel.return_value, host=expected_host, - ) - - def test__create_gapic_client_channel_w_defaults(self): - self._create_gapic_client_channel_helper() - - def test__create_gapic_client_channel_w_endpoint(self): - endpoint = "api.example.com" - self._create_gapic_client_channel_helper(endpoint=endpoint) - - def test__create_gapic_client_channel_w_emulator_host(self): - host = "api.example.com:1234" - self._create_gapic_client_channel_helper(emulator_host=host) - - def test__create_gapic_client_channel_w_endpoint_w_emulator_host(self): - endpoint = "api.example.com" - host = "other.example.com:1234" - self._create_gapic_client_channel_helper(endpoint=endpoint, emulator_host=host) - - def test_project_path_property(self): - credentials = _make_credentials() - project = "PROJECT" - client = self._make_one(project=project, credentials=credentials, admin=True) - project_name = "projects/" + project - self.assertEqual(client.project_path, project_name) - - def test_table_data_client_not_initialized(self): - from google.cloud.bigtable_v2 import BigtableClient - - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials) - table_data_client = client.table_data_client - self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(client._table_data_client, table_data_client) + table_data_client = client.table_data_client + assert isinstance(table_data_client, BigtableClient) + assert client._table_data_client is table_data_client - def test_table_data_client_not_initialized_w_client_info(self): - from google.cloud.bigtable_v2 import BigtableClient - credentials = _make_credentials() - client_info = mock.Mock() - client = self._make_one( - project=self.PROJECT, credentials=credentials, client_info=client_info - ) +def test_client_table_data_client_not_initialized_w_client_info(): + from google.cloud.bigtable_v2 import BigtableClient - table_data_client = client.table_data_client - self.assertIsInstance(table_data_client, BigtableClient) - self.assertIs(client._client_info, client_info) - self.assertIs(client._table_data_client, table_data_client) + credentials = _make_credentials() + client_info = mock.Mock() + client = _make_client( + project=PROJECT, credentials=credentials, client_info=client_info + ) - def test_table_data_client_not_initialized_w_client_options(self): - from google.api_core.client_options import ClientOptions + table_data_client = client.table_data_client + assert isinstance(table_data_client, BigtableClient) + assert client._client_info is client_info + assert client._table_data_client is table_data_client - credentials = _make_credentials() - client_options = ClientOptions( - quota_project_id="QUOTA-PROJECT", api_endpoint="xyz" - ) - client = self._make_one( - project=self.PROJECT, credentials=credentials, client_options=client_options - ) - patch = mock.patch("google.cloud.bigtable_v2.BigtableClient") - with patch as mocked: - table_data_client = client.table_data_client +def test_client_table_data_client_not_initialized_w_client_options(): + from google.api_core.client_options import ClientOptions - self.assertIs(table_data_client, mocked.return_value) - self.assertIs(client._table_data_client, table_data_client) + credentials = _make_credentials() + client_options = ClientOptions(quota_project_id="QUOTA-PROJECT", api_endpoint="xyz") + client = _make_client( + project=PROJECT, credentials=credentials, client_options=client_options + ) - mocked.assert_called_once_with( - client_info=client._client_info, - credentials=None, - transport=mock.ANY, - client_options=client_options, - ) + patch = mock.patch("google.cloud.bigtable_v2.BigtableClient") + with patch as mocked: + table_data_client = client.table_data_client - def test_table_data_client_initialized(self): - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) + assert table_data_client is mocked.return_value + assert client._table_data_client is table_data_client - already = client._table_data_client = object() - self.assertIs(client.table_data_client, already) + mocked.assert_called_once_with( + client_info=client._client_info, + credentials=None, + transport=mock.ANY, + client_options=client_options, + ) - def test_table_admin_client_not_initialized_no_admin_flag(self): - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - with self.assertRaises(ValueError): - client.table_admin_client() +def test_client_table_data_client_initialized(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) - def test_table_admin_client_not_initialized_w_admin_flag(self): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + already = client._table_data_client = object() + assert client.table_data_client is already - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - table_admin_client = client.table_admin_client - self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(client._table_admin_client, table_admin_client) +def test_client_table_admin_client_not_initialized_no_admin_flag(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials) - def test_table_admin_client_not_initialized_w_client_info(self): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + with pytest.raises(ValueError): + client.table_admin_client() - credentials = _make_credentials() - client_info = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - client_info=client_info, - ) - - table_admin_client = client.table_admin_client - self.assertIsInstance(table_admin_client, BigtableTableAdminClient) - self.assertIs(client._client_info, client_info) - self.assertIs(client._table_admin_client, table_admin_client) - - def test_table_admin_client_not_initialized_w_client_options(self): - credentials = _make_credentials() - admin_client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - admin_client_options=admin_client_options, - ) - client._create_gapic_client_channel = mock.Mock() - patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient") - with patch as mocked: - table_admin_client = client.table_admin_client - - self.assertIs(table_admin_client, mocked.return_value) - self.assertIs(client._table_admin_client, table_admin_client) - mocked.assert_called_once_with( - client_info=client._client_info, - credentials=None, - transport=mock.ANY, - client_options=admin_client_options, - ) +def test_client_table_admin_client_not_initialized_w_admin_flag(): + from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - def test_table_admin_client_initialized(self): - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) - already = client._table_admin_client = object() - self.assertIs(client.table_admin_client, already) + table_admin_client = client.table_admin_client + assert isinstance(table_admin_client, BigtableTableAdminClient) + assert client._table_admin_client is table_admin_client - def test_instance_admin_client_not_initialized_no_admin_flag(self): - credentials = _make_credentials() - client = self._make_one(project=self.PROJECT, credentials=credentials) - with self.assertRaises(ValueError): - client.instance_admin_client() +def test_client_table_admin_client_not_initialized_w_client_info(): + from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - def test_instance_admin_client_not_initialized_w_admin_flag(self): - from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient + credentials = _make_credentials() + client_info = mock.Mock() + client = _make_client( + project=PROJECT, credentials=credentials, admin=True, client_info=client_info, + ) - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) + table_admin_client = client.table_admin_client + assert isinstance(table_admin_client, BigtableTableAdminClient) + assert client._client_info is client_info + assert client._table_admin_client is table_admin_client - instance_admin_client = client.instance_admin_client - self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(client._instance_admin_client, instance_admin_client) - def test_instance_admin_client_not_initialized_w_client_info(self): - from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient +def test_client_table_admin_client_not_initialized_w_client_options(): + credentials = _make_credentials() + admin_client_options = mock.Mock() + client = _make_client( + project=PROJECT, + credentials=credentials, + admin=True, + admin_client_options=admin_client_options, + ) - credentials = _make_credentials() - client_info = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - client_info=client_info, - ) + client._create_gapic_client_channel = mock.Mock() + patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient") + with patch as mocked: + table_admin_client = client.table_admin_client - instance_admin_client = client.instance_admin_client - self.assertIsInstance(instance_admin_client, BigtableInstanceAdminClient) - self.assertIs(client._client_info, client_info) - self.assertIs(client._instance_admin_client, instance_admin_client) - - def test_instance_admin_client_not_initialized_w_client_options(self): - credentials = _make_credentials() - admin_client_options = mock.Mock() - client = self._make_one( - project=self.PROJECT, - credentials=credentials, - admin=True, - admin_client_options=admin_client_options, - ) + assert table_admin_client is mocked.return_value + assert client._table_admin_client is table_admin_client + mocked.assert_called_once_with( + client_info=client._client_info, + credentials=None, + transport=mock.ANY, + client_options=admin_client_options, + ) - client._create_gapic_client_channel = mock.Mock() - patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient") - with patch as mocked: - instance_admin_client = client.instance_admin_client - - self.assertIs(instance_admin_client, mocked.return_value) - self.assertIs(client._instance_admin_client, instance_admin_client) - mocked.assert_called_once_with( - client_info=client._client_info, - credentials=None, - transport=mock.ANY, - client_options=admin_client_options, - ) - def test_instance_admin_client_initialized(self): - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) +def test_client_table_admin_client_initialized(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) - already = client._instance_admin_client = object() - self.assertIs(client.instance_admin_client, already) - - def test_instance_factory_defaults(self): - from google.cloud.bigtable.instance import Instance - - PROJECT = "PROJECT" - INSTANCE_ID = "instance-id" - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials) - - instance = client.instance(INSTANCE_ID) - - self.assertIsInstance(instance, Instance) - self.assertEqual(instance.instance_id, INSTANCE_ID) - self.assertEqual(instance.display_name, INSTANCE_ID) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - self.assertIs(instance._client, client) - - def test_instance_factory_non_defaults(self): - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable import enums - - PROJECT = "PROJECT" - INSTANCE_ID = "instance-id" - DISPLAY_NAME = "display-name" - instance_type = enums.Instance.Type.DEVELOPMENT - labels = {"foo": "bar"} - credentials = _make_credentials() - client = self._make_one(project=PROJECT, credentials=credentials) - - instance = client.instance( - INSTANCE_ID, - display_name=DISPLAY_NAME, - instance_type=instance_type, - labels=labels, - ) + already = client._table_admin_client = object() + assert client.table_admin_client is already - self.assertIsInstance(instance, Instance) - self.assertEqual(instance.instance_id, INSTANCE_ID) - self.assertEqual(instance.display_name, DISPLAY_NAME) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, labels) - self.assertIs(instance._client, client) - - def test_list_instances(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable.instance import Instance - FAILED_LOCATION = "FAILED" - INSTANCE_ID1 = "instance-id1" - INSTANCE_ID2 = "instance-id2" - INSTANCE_NAME1 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID1 - INSTANCE_NAME2 = "projects/" + self.PROJECT + "/instances/" + INSTANCE_ID2 +def test_client_instance_admin_client_not_initialized_no_admin_flag(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials) - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() + with pytest.raises(ValueError): + client.instance_admin_client() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) - # Create response_pb - response_pb = messages_v2_pb2.ListInstancesResponse( - failed_locations=[FAILED_LOCATION], - instances=[ - data_v2_pb2.Instance(name=INSTANCE_NAME1, display_name=INSTANCE_NAME1), - data_v2_pb2.Instance(name=INSTANCE_NAME2, display_name=INSTANCE_NAME2), - ], - ) +def test_client_instance_admin_client_not_initialized_w_admin_flag(): + from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_stub = client._instance_admin_client + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) - instance_stub.list_instances.side_effect = [response_pb] + instance_admin_client = client.instance_admin_client + assert isinstance(instance_admin_client, BigtableInstanceAdminClient) + assert client._instance_admin_client is instance_admin_client - # Perform the method and check the result. - instances, failed_locations = client.list_instances() - instance_1, instance_2 = instances +def test_client_instance_admin_client_not_initialized_w_client_info(): + from google.cloud.bigtable_admin_v2 import BigtableInstanceAdminClient - self.assertIsInstance(instance_1, Instance) - self.assertEqual(instance_1.instance_id, INSTANCE_ID1) - self.assertTrue(instance_1._client is client) + credentials = _make_credentials() + client_info = mock.Mock() + client = _make_client( + project=PROJECT, credentials=credentials, admin=True, client_info=client_info, + ) - self.assertIsInstance(instance_2, Instance) - self.assertEqual(instance_2.instance_id, INSTANCE_ID2) - self.assertTrue(instance_2._client is client) + instance_admin_client = client.instance_admin_client + assert isinstance(instance_admin_client, BigtableInstanceAdminClient) + assert client._client_info is client_info + assert client._instance_admin_client is instance_admin_client - self.assertEqual(failed_locations, [FAILED_LOCATION]) - def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.instance import Cluster +def test_client_instance_admin_client_not_initialized_w_client_options(): + credentials = _make_credentials() + admin_client_options = mock.Mock() + client = _make_client( + project=PROJECT, + credentials=credentials, + admin=True, + admin_client_options=admin_client_options, + ) - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - - credentials = _make_credentials() - client = self._make_one( - project=self.PROJECT, credentials=credentials, admin=True - ) + client._create_gapic_client_channel = mock.Mock() + patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient") + with patch as mocked: + instance_admin_client = client.instance_admin_client - INSTANCE_ID1 = "instance-id1" - INSTANCE_ID2 = "instance-id2" + assert instance_admin_client is mocked.return_value + assert client._instance_admin_client is instance_admin_client + mocked.assert_called_once_with( + client_info=client._client_info, + credentials=None, + transport=mock.ANY, + client_options=admin_client_options, + ) - failed_location = "FAILED" - cluster_id1 = "{}-cluster".format(INSTANCE_ID1) - cluster_id2 = "{}-cluster-1".format(INSTANCE_ID2) - cluster_id3 = "{}-cluster-2".format(INSTANCE_ID2) - cluster_name1 = client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID1, cluster_id1 - ) - cluster_name2 = client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID2, cluster_id2 - ) - cluster_name3 = client.instance_admin_client.cluster_path( - self.PROJECT, INSTANCE_ID2, cluster_id3 - ) - # Create response_pb - response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[failed_location], - clusters=[ - data_v2_pb2.Cluster(name=cluster_name1), - data_v2_pb2.Cluster(name=cluster_name2), - data_v2_pb2.Cluster(name=cluster_name3), - ], - ) +def test_client_instance_admin_client_initialized(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - instance_stub = client._instance_admin_client + already = client._instance_admin_client = object() + assert client.instance_admin_client is already - instance_stub.list_clusters.side_effect = [response_pb] - # Perform the method and check the result. - clusters, failed_locations = client.list_clusters() +def test_client_instance_factory_defaults(): + from google.cloud.bigtable.instance import Instance - cluster_1, cluster_2, cluster_3 = clusters + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials) - self.assertIsInstance(cluster_1, Cluster) - self.assertEqual(cluster_1.cluster_id, cluster_id1) - self.assertEqual(cluster_1._instance.instance_id, INSTANCE_ID1) + instance = client.instance(INSTANCE_ID) - self.assertIsInstance(cluster_2, Cluster) - self.assertEqual(cluster_2.cluster_id, cluster_id2) - self.assertEqual(cluster_2._instance.instance_id, INSTANCE_ID2) + assert isinstance(instance, Instance) + assert instance.instance_id == INSTANCE_ID + assert instance.display_name == INSTANCE_ID + assert instance.type_ is None + assert instance.labels is None + assert instance._client is client - self.assertIsInstance(cluster_3, Cluster) - self.assertEqual(cluster_3.cluster_id, cluster_id3) - self.assertEqual(cluster_3._instance.instance_id, INSTANCE_ID2) - self.assertEqual(failed_locations, [failed_location]) +def test_client_instance_factory_non_defaults(): + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable import enums + + instance_type = enums.Instance.Type.DEVELOPMENT + labels = {"foo": "bar"} + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials) + + instance = client.instance( + INSTANCE_ID, + display_name=DISPLAY_NAME, + instance_type=instance_type, + labels=labels, + ) + + assert isinstance(instance, Instance) + assert instance.instance_id == INSTANCE_ID + assert instance.display_name == DISPLAY_NAME + assert instance.type_ == instance_type + assert instance.labels == labels + assert instance._client is client + + +def test_client_list_instances(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable.instance import Instance + + FAILED_LOCATION = "FAILED" + INSTANCE_ID1 = "instance-id1" + INSTANCE_ID2 = "instance-id2" + INSTANCE_NAME1 = "projects/" + PROJECT + "/instances/" + INSTANCE_ID1 + INSTANCE_NAME2 = "projects/" + PROJECT + "/instances/" + INSTANCE_ID2 + + api = mock.create_autospec(BigtableInstanceAdminClient) + credentials = _make_credentials() + + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + + # Create response_pb + response_pb = messages_v2_pb2.ListInstancesResponse( + failed_locations=[FAILED_LOCATION], + instances=[ + data_v2_pb2.Instance(name=INSTANCE_NAME1, display_name=INSTANCE_NAME1), + data_v2_pb2.Instance(name=INSTANCE_NAME2, display_name=INSTANCE_NAME2), + ], + ) + + # Patch the stub used by the API method. + client._instance_admin_client = api + instance_stub = client._instance_admin_client + + instance_stub.list_instances.side_effect = [response_pb] + + # Perform the method and check the result. + instances, failed_locations = client.list_instances() + + instance_1, instance_2 = instances + + assert isinstance(instance_1, Instance) + assert instance_1.instance_id == INSTANCE_ID1 + assert instance_1._client is client + + assert isinstance(instance_2, Instance) + assert instance_2.instance_id == INSTANCE_ID2 + assert instance_2._client is client + + assert failed_locations == [FAILED_LOCATION] + + +def test_client_list_clusters(): + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.instance import Cluster + + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + + INSTANCE_ID1 = "instance-id1" + INSTANCE_ID2 = "instance-id2" + + failed_location = "FAILED" + cluster_id1 = "{}-cluster".format(INSTANCE_ID1) + cluster_id2 = "{}-cluster-1".format(INSTANCE_ID2) + cluster_id3 = "{}-cluster-2".format(INSTANCE_ID2) + cluster_name1 = client.instance_admin_client.cluster_path( + PROJECT, INSTANCE_ID1, cluster_id1 + ) + cluster_name2 = client.instance_admin_client.cluster_path( + PROJECT, INSTANCE_ID2, cluster_id2 + ) + cluster_name3 = client.instance_admin_client.cluster_path( + PROJECT, INSTANCE_ID2, cluster_id3 + ) + + # Create response_pb + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[failed_location], + clusters=[ + data_v2_pb2.Cluster(name=cluster_name1), + data_v2_pb2.Cluster(name=cluster_name2), + data_v2_pb2.Cluster(name=cluster_name3), + ], + ) + + # Patch the stub used by the API method. + client._instance_admin_client = instance_api + instance_stub = client._instance_admin_client + + instance_stub.list_clusters.side_effect = [response_pb] + + # Perform the method and check the result. + clusters, failed_locations = client.list_clusters() + + cluster_1, cluster_2, cluster_3 = clusters + + assert isinstance(cluster_1, Cluster) + assert cluster_1.cluster_id == cluster_id1 + assert cluster_1._instance.instance_id == INSTANCE_ID1 + + assert isinstance(cluster_2, Cluster) + assert cluster_2.cluster_id == cluster_id2 + assert cluster_2._instance.instance_id == INSTANCE_ID2 + + assert isinstance(cluster_3, Cluster) + assert cluster_3.cluster_id == cluster_id3 + assert cluster_3._instance.instance_id == INSTANCE_ID2 + + assert failed_locations == [failed_location] diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 1194e53c9cca..74ca98830a8e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -13,552 +13,522 @@ # limitations under the License. -import unittest - import mock import pytest from ._testing import _make_credentials +PROJECT = "project" +INSTANCE_ID = "instance-id" +LOCATION_ID = "location-id" +CLUSTER_ID = "cluster-id" +LOCATION_ID = "location-id" +CLUSTER_NAME = ( + "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/clusters/" + CLUSTER_ID +) +LOCATION_PATH = "projects/" + PROJECT + "/locations/" +SERVE_NODES = 5 +OP_ID = 5678 +OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format( + PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID +) +KEY_RING_ID = "key-ring-id" +CRYPTO_KEY_ID = "crypto-key-id" +KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}" + + +def _make_cluster(*args, **kwargs): + from google.cloud.bigtable.cluster import Cluster + + return Cluster(*args, **kwargs) + + +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client + + return Client(*args, **kwargs) + + +def test_cluster_constructor_defaults(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + cluster = _make_cluster(CLUSTER_ID, instance) + + assert cluster.cluster_id == CLUSTER_ID + assert cluster._instance is instance + assert cluster.location_id is None + assert cluster.state is None + assert cluster.serve_nodes is None + assert cluster.default_storage_type is None + assert cluster.kms_key_name is None + + +def test_cluster_constructor_explicit(): + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster + + STATE = Cluster.State.READY + STORAGE_TYPE_SSD = StorageType.SSD + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + _state=STATE, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=KMS_KEY_NAME, + ) + assert cluster.cluster_id == CLUSTER_ID + assert cluster._instance is instance + assert cluster.location_id == LOCATION_ID + assert cluster.state == STATE + assert cluster.serve_nodes == SERVE_NODES + assert cluster.default_storage_type == STORAGE_TYPE_SSD + assert cluster.kms_key_name == KMS_KEY_NAME + + +def test_cluster_name(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster(CLUSTER_ID, instance) + + assert cluster.name == CLUSTER_NAME + + +def test_cluster_kms_key_name(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster(CLUSTER_ID, instance, kms_key_name=KMS_KEY_NAME) + + assert cluster.kms_key_name == KMS_KEY_NAME + + +def test_cluster_kms_key_name_setter(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster(CLUSTER_ID, instance, kms_key_name=KMS_KEY_NAME) + + with pytest.raises(AttributeError): + cluster.kms_key_name = "I'm read only" + + +def test_cluster_from_pb_success(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.cluster import Cluster + from google.cloud.bigtable import enums + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + location = LOCATION_PATH + LOCATION_ID + state = enums.Cluster.State.RESIZING + storage_type = enums.StorageType.SSD + cluster_pb = data_v2_pb2.Cluster( + name=CLUSTER_NAME, + location=location, + state=state, + serve_nodes=SERVE_NODES, + default_storage_type=storage_type, + encryption_config=data_v2_pb2.Cluster.EncryptionConfig( + kms_key_name=KMS_KEY_NAME, + ), + ) + + cluster = Cluster.from_pb(cluster_pb, instance) + assert isinstance(cluster, Cluster) + assert cluster._instance == instance + assert cluster.cluster_id == CLUSTER_ID + assert cluster.location_id == LOCATION_ID + assert cluster.state == state + assert cluster.serve_nodes == SERVE_NODES + assert cluster.default_storage_type == storage_type + assert cluster.kms_key_name == KMS_KEY_NAME + + +def test_cluster_from_pb_w_bad_cluster_name(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.cluster import Cluster + + bad_cluster_name = "BAD_NAME" + + cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name) + + with pytest.raises(ValueError): + Cluster.from_pb(cluster_pb, None) + + +def test_cluster_from_pb_w_instance_id_mistmatch(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.cluster import Cluster + + ALT_INSTANCE_ID = "ALT_INSTANCE_ID" + client = _Client(PROJECT) + instance = _Instance(ALT_INSTANCE_ID, client) + + assert INSTANCE_ID != ALT_INSTANCE_ID + cluster_pb = data_v2_pb2.Cluster(name=CLUSTER_NAME) -class TestCluster(unittest.TestCase): + with pytest.raises(ValueError): + Cluster.from_pb(cluster_pb, instance) - PROJECT = "project" - INSTANCE_ID = "instance-id" - LOCATION_ID = "location-id" - CLUSTER_ID = "cluster-id" - LOCATION_ID = "location-id" - CLUSTER_NAME = ( - "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/clusters/" + CLUSTER_ID + +def test_cluster_from_pb_w_project_mistmatch(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.cluster import Cluster + + ALT_PROJECT = "ALT_PROJECT" + client = _Client(project=ALT_PROJECT) + instance = _Instance(INSTANCE_ID, client) + + assert PROJECT != ALT_PROJECT + cluster_pb = data_v2_pb2.Cluster(name=CLUSTER_NAME) + + with pytest.raises(ValueError): + Cluster.from_pb(cluster_pb, instance) + + +def test_cluster___eq__(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster1 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID) + cluster2 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID) + assert cluster1 == cluster2 + + +def test_cluster___eq___w_type_differ(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster1 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID) + cluster2 = object() + assert cluster1 != cluster2 + + +def test_cluster___ne___w_same_value(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster1 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID) + cluster2 = _make_cluster(CLUSTER_ID, instance, LOCATION_ID) + assert not (cluster1 != cluster2) + + +def test_cluster___ne__(): + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + cluster1 = _make_cluster("cluster_id1", instance, LOCATION_ID) + cluster2 = _make_cluster("cluster_id2", instance, LOCATION_ID) + assert cluster1 != cluster2 + + +def _make_instance_admin_client(): + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + + return mock.create_autospec(BigtableInstanceAdminClient) + + +def test_cluster_reload(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.enums import StorageType + from google.cloud.bigtable.enums import Cluster + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=KMS_KEY_NAME, ) - LOCATION_PATH = "projects/" + PROJECT + "/locations/" - SERVE_NODES = 5 - OP_ID = 5678 - OP_NAME = "operations/projects/{}/instances/{}/clusters/{}/operations/{}".format( - PROJECT, INSTANCE_ID, CLUSTER_ID, OP_ID + + # Create response_pb + LOCATION_ID_FROM_SERVER = "new-location-id" + STATE = Cluster.State.READY + SERVE_NODES_FROM_SERVER = 10 + STORAGE_TYPE_FROM_SERVER = StorageType.HDD + + response_pb = data_v2_pb2.Cluster( + name=cluster.name, + location=LOCATION_PATH + LOCATION_ID_FROM_SERVER, + state=STATE, + serve_nodes=SERVE_NODES_FROM_SERVER, + default_storage_type=STORAGE_TYPE_FROM_SERVER, ) - KEY_RING_ID = "key-ring-id" - CRYPTO_KEY_ID = "crypto-key-id" - KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}" - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.cluster import Cluster - - return Cluster - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_defaults(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster = self._make_one(self.CLUSTER_ID, instance) - self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) - self.assertIs(cluster._instance, instance) - self.assertIsNone(cluster.location_id) - self.assertIsNone(cluster.state) - self.assertIsNone(cluster.serve_nodes) - self.assertIsNone(cluster.default_storage_type) - self.assertIsNone(cluster.kms_key_name) - - def test_constructor_non_default(self): - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - STATE = Cluster.State.READY - STORAGE_TYPE_SSD = StorageType.SSD - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - _state=STATE, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - kms_key_name=self.KMS_KEY_NAME, - ) - self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) - self.assertIs(cluster._instance, instance) - self.assertEqual(cluster.location_id, self.LOCATION_ID) - self.assertEqual(cluster.state, STATE) - self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) - self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) - - def test_name_property(self): - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance) - - self.assertEqual(cluster.name, self.CLUSTER_NAME) - - def test_kms_key_name_property(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - cluster = self._make_one( - self.CLUSTER_ID, instance, kms_key_name=self.KMS_KEY_NAME - ) - - self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) - with pytest.raises(AttributeError): - cluster.kms_key_name = "I'm read only" - - def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable import enums - - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - location = self.LOCATION_PATH + self.LOCATION_ID - state = enums.Cluster.State.RESIZING - storage_type = enums.StorageType.SSD - cluster_pb = data_v2_pb2.Cluster( - name=self.CLUSTER_NAME, - location=location, - state=state, - serve_nodes=self.SERVE_NODES, - default_storage_type=storage_type, - encryption_config=data_v2_pb2.Cluster.EncryptionConfig( - kms_key_name=self.KMS_KEY_NAME, - ), - ) - - klass = self._get_target_class() - cluster = klass.from_pb(cluster_pb, instance) - self.assertIsInstance(cluster, klass) - self.assertEqual(cluster._instance, instance) - self.assertEqual(cluster.cluster_id, self.CLUSTER_ID) - self.assertEqual(cluster.location_id, self.LOCATION_ID) - self.assertEqual(cluster.state, state) - self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) - self.assertEqual(cluster.default_storage_type, storage_type) - self.assertEqual(cluster.kms_key_name, self.KMS_KEY_NAME) - - def test_from_pb_bad_cluster_name(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - bad_cluster_name = "BAD_NAME" - - cluster_pb = data_v2_pb2.Cluster(name=bad_cluster_name) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, None) - - def test_from_pb_instance_id_mistmatch(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - ALT_INSTANCE_ID = "ALT_INSTANCE_ID" - client = _Client(self.PROJECT) - instance = _Instance(ALT_INSTANCE_ID, client) - - self.assertNotEqual(self.INSTANCE_ID, ALT_INSTANCE_ID) - cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, instance) - - def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - ALT_PROJECT = "ALT_PROJECT" - client = _Client(project=ALT_PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - - self.assertNotEqual(self.PROJECT, ALT_PROJECT) - cluster_pb = data_v2_pb2.Cluster(name=self.CLUSTER_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(cluster_pb, instance) - - def test___eq__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - self.assertEqual(cluster1, cluster2) - - def test___eq__type_differ(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - cluster2 = object() - self.assertNotEqual(cluster1, cluster2) - - def test___ne__same_value(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - cluster2 = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - comparison_val = cluster1 != cluster2 - self.assertFalse(comparison_val) - - def test___ne__(self): - client = _Client(self.PROJECT) - instance = _Instance(self.INSTANCE_ID, client) - cluster1 = self._make_one("cluster_id1", instance, self.LOCATION_ID) - cluster2 = self._make_one("cluster_id2", instance, self.LOCATION_ID) - self.assertNotEqual(cluster1, cluster2) - - def test_reload(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.enums import StorageType - from google.cloud.bigtable.enums import Cluster - - api = mock.create_autospec(BigtableInstanceAdminClient) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - STORAGE_TYPE_SSD = StorageType.SSD - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - kms_key_name=self.KMS_KEY_NAME, - ) - - # Create response_pb - LOCATION_ID_FROM_SERVER = "new-location-id" - STATE = Cluster.State.READY - SERVE_NODES_FROM_SERVER = 10 - STORAGE_TYPE_FROM_SERVER = StorageType.HDD - - response_pb = data_v2_pb2.Cluster( - name=cluster.name, - location=self.LOCATION_PATH + LOCATION_ID_FROM_SERVER, - state=STATE, - serve_nodes=SERVE_NODES_FROM_SERVER, - default_storage_type=STORAGE_TYPE_FROM_SERVER, - ) - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_stub = client._instance_admin_client - - instance_stub.get_cluster.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check Cluster optional config values before. - self.assertEqual(cluster.location_id, self.LOCATION_ID) - self.assertIsNone(cluster.state) - self.assertEqual(cluster.serve_nodes, self.SERVE_NODES) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_SSD) - - # Perform the method and check the result. - result = cluster.reload() - self.assertEqual(result, expected_result) - self.assertEqual(cluster.location_id, LOCATION_ID_FROM_SERVER) - self.assertEqual(cluster.state, STATE) - self.assertEqual(cluster.serve_nodes, SERVE_NODES_FROM_SERVER) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE_FROM_SERVER) - self.assertEqual(cluster.kms_key_name, None) - - def test_exists(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.instance import Instance - from google.api_core import exceptions - - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = Instance(self.INSTANCE_ID, client) - - # Create response_pb - cluster_name = client.instance_admin_client.cluster_path( - self.PROJECT, self.INSTANCE_ID, self.CLUSTER_ID - ) - response_pb = data_v2_pb2.Cluster(name=cluster_name) - - # Patch the stub used by the API method. - client._instance_admin_client = instance_api - bigtable_instance_stub = client._instance_admin_client - - bigtable_instance_stub.get_cluster.side_effect = [ - response_pb, - exceptions.NotFound("testing"), - exceptions.BadRequest("testing"), - ] - - # Perform the method and check the result. - non_existing_cluster_id = "cluster-id-2" - alt_cluster_1 = self._make_one(self.CLUSTER_ID, instance) - alt_cluster_2 = self._make_one(non_existing_cluster_id, instance) - self.assertTrue(alt_cluster_1.exists()) - self.assertFalse(alt_cluster_2.exists()) - with self.assertRaises(exceptions.BadRequest): - alt_cluster_1.exists() - - def test_create(self): - import datetime - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 - from google.cloud.bigtable.enums import StorageType - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - STORAGE_TYPE_SSD = StorageType.SSD - LOCATION = self.LOCATION_PATH + self.LOCATION_ID - instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - ) - expected_request_cluster = instance_v2_pb2.Cluster( - location=LOCATION, - serve_nodes=cluster.serve_nodes, - default_storage_type=cluster.default_storage_type, - ) - expected_request = { - "request": { - "parent": instance.name, - "cluster_id": self.CLUSTER_ID, - "cluster": expected_request_cluster, - } - } - name = instance.name - metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), - ) - - # Patch the stub used by the API method. - api = mock.create_autospec(BigtableInstanceAdminClient) - api.common_location_path.return_value = LOCATION - client._instance_admin_client = api - cluster._instance._client = client - cluster._instance._client.instance_admin_client.instance_path.return_value = ( - name - ) - client._instance_admin_client.create_cluster.return_value = response_pb - # Perform the method and check the result. - cluster.create() - - actual_request = client._instance_admin_client.create_cluster.call_args_list[ - 0 - ].kwargs - self.assertEqual(actual_request, expected_request) - - def test_create_w_cmek(self): - import datetime - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 - from google.cloud.bigtable.enums import StorageType - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - STORAGE_TYPE_SSD = StorageType.SSD - LOCATION = self.LOCATION_PATH + self.LOCATION_ID - instance = Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - kms_key_name=self.KMS_KEY_NAME, - ) - expected_request_cluster = instance_v2_pb2.Cluster( - location=LOCATION, - serve_nodes=cluster.serve_nodes, - default_storage_type=cluster.default_storage_type, - encryption_config=instance_v2_pb2.Cluster.EncryptionConfig( - kms_key_name=self.KMS_KEY_NAME, - ), - ) - expected_request = { - "request": { - "parent": instance.name, - "cluster_id": self.CLUSTER_ID, - "cluster": expected_request_cluster, - } - } - name = instance.name - metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), - ) - - # Patch the stub used by the API method. - api = mock.create_autospec(BigtableInstanceAdminClient) - api.common_location_path.return_value = LOCATION - client._instance_admin_client = api - cluster._instance._client = client - cluster._instance._client.instance_admin_client.instance_path.return_value = ( - name - ) - client._instance_admin_client.create_cluster.return_value = response_pb - # Perform the method and check the result. - cluster.create() - - actual_request = client._instance_admin_client.create_cluster.call_args_list[ - 0 - ].kwargs - self.assertEqual(actual_request, expected_request) - - def test_update(self): - import datetime - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable.enums import StorageType - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - STORAGE_TYPE_SSD = StorageType.SSD - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one( - self.CLUSTER_ID, - instance, - location_id=self.LOCATION_ID, - serve_nodes=self.SERVE_NODES, - default_storage_type=STORAGE_TYPE_SSD, - ) - # Create expected_request - expected_request = { - "request": { - "name": "projects/project/instances/instance-id/clusters/cluster-id", - "serve_nodes": 5, - "location": None, - } - } - metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), - ) - - # Patch the stub used by the API method. - api = mock.create_autospec(BigtableInstanceAdminClient) - client._instance_admin_client = api - cluster._instance._client.instance_admin_client.cluster_path.return_value = ( - "projects/project/instances/instance-id/clusters/cluster-id" - ) - # Perform the method and check the result. - client._instance_admin_client.update_cluster.return_value = response_pb - cluster.update() - - actual_request = client._instance_admin_client.update_cluster.call_args_list[ - 0 - ].kwargs - - self.assertEqual(actual_request, expected_request) - - def test_delete(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = _Instance(self.INSTANCE_ID, client) - cluster = self._make_one(self.CLUSTER_ID, instance, self.LOCATION_ID) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_admin_client = client._instance_admin_client - instance_stub = instance_admin_client - instance_stub.delete_cluster.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - result = cluster.delete() - - self.assertEqual(result, expected_result) + + # Patch the stub used by the API method. + api = client._instance_admin_client = _make_instance_admin_client() + api.get_cluster.side_effect = [response_pb] + + # Create expected_result. + expected_result = None # reload() has no return value. + + # Check Cluster optional config values before. + assert cluster.location_id == LOCATION_ID + assert cluster.state is None + assert cluster.serve_nodes == SERVE_NODES + assert cluster.default_storage_type == STORAGE_TYPE_SSD + + # Perform the method and check the result. + result = cluster.reload() + assert result == expected_result + assert cluster.location_id == LOCATION_ID_FROM_SERVER + assert cluster.state == STATE + assert cluster.serve_nodes == SERVE_NODES_FROM_SERVER + assert cluster.default_storage_type == STORAGE_TYPE_FROM_SERVER + assert cluster.kms_key_name is None + + api.get_cluster.assert_called_once_with(request={"name": cluster.name}) + + +def test_cluster_exists_hit(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.instance import Instance + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = Instance(INSTANCE_ID, client) + + cluster_name = client.instance_admin_client.cluster_path( + PROJECT, INSTANCE_ID, CLUSTER_ID + ) + response_pb = data_v2_pb2.Cluster(name=cluster_name) + + api = client._instance_admin_client = _make_instance_admin_client() + api.get_cluster.return_value = response_pb + + cluster = _make_cluster(CLUSTER_ID, instance) + + assert cluster.exists() + + api.get_cluster.assert_called_once_with(request={"name": cluster.name}) + + +def test_cluster_exists_miss(): + from google.cloud.bigtable.instance import Instance + from google.api_core import exceptions + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = Instance(INSTANCE_ID, client) + + api = client._instance_admin_client = _make_instance_admin_client() + api.get_cluster.side_effect = exceptions.NotFound("testing") + + non_existing_cluster_id = "nonesuch-cluster-2" + cluster = _make_cluster(non_existing_cluster_id, instance) + + assert not cluster.exists() + + api.get_cluster.assert_called_once_with(request={"name": cluster.name}) + + +def test_cluster_exists_w_error(): + from google.cloud.bigtable.instance import Instance + from google.api_core import exceptions + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = Instance(INSTANCE_ID, client) + + api = client._instance_admin_client = _make_instance_admin_client() + api.get_cluster.side_effect = exceptions.BadRequest("testing") + + cluster = _make_cluster(CLUSTER_ID, instance) + + with pytest.raises(exceptions.BadRequest): + cluster.exists() + + api.get_cluster.assert_called_once_with(request={"name": cluster.name}) + + +def test_cluster_create(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID + instance = Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + ) + metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + api = client._instance_admin_client = _make_instance_admin_client() + api.common_location_path.return_value = LOCATION + api.instance_path.return_value = instance.name + api.create_cluster.return_value = response_pb + + cluster.create() + + expected_request_cluster = instance_v2_pb2.Cluster( + location=LOCATION, + serve_nodes=cluster.serve_nodes, + default_storage_type=cluster.default_storage_type, + ) + expected_request = { + "parent": instance.name, + "cluster_id": CLUSTER_ID, + "cluster": expected_request_cluster, + } + api.create_cluster.assert_called_once_with(request=expected_request) + + +def test_cluster_create_w_cmek(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID + instance = Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + kms_key_name=KMS_KEY_NAME, + ) + name = instance.name + metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + api = client._instance_admin_client = _make_instance_admin_client() + api.common_location_path.return_value = LOCATION + api.instance_path.return_value = name + api.create_cluster.return_value = response_pb + + cluster.create() + + expected_request_cluster = instance_v2_pb2.Cluster( + location=LOCATION, + serve_nodes=cluster.serve_nodes, + default_storage_type=cluster.default_storage_type, + encryption_config=instance_v2_pb2.Cluster.EncryptionConfig( + kms_key_name=KMS_KEY_NAME, + ), + ) + expected_request = { + "parent": instance.name, + "cluster_id": CLUSTER_ID, + "cluster": expected_request_cluster, + } + api.create_cluster.assert_called_once_with(request=expected_request) + + +def test_cluster_update(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + ) + metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + api = client._instance_admin_client = _make_instance_admin_client() + api.cluster_path.return_value = ( + "projects/project/instances/instance-id/clusters/cluster-id" + ) + api.update_cluster.return_value = response_pb + + cluster.update() + + expected_request = { + "name": "projects/project/instances/instance-id/clusters/cluster-id", + "serve_nodes": 5, + "location": None, + } + api.update_cluster.assert_called_once_with(request=expected_request) + + +def test_cluster_delete(): + from google.protobuf import empty_pb2 + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster(CLUSTER_ID, instance, LOCATION_ID) + + api = client._instance_admin_client = _make_instance_admin_client() + api.delete_cluster.side_effect = [empty_pb2.Empty()] + + # Perform the method and check the result. + assert cluster.delete() is None + + api.delete_cluster.assert_called_once_with(request={"name": cluster.name}) class _Instance(object): diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index 601c37cf5d13..9d4632e2a632 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -12,609 +12,607 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest - import mock +import pytest from ._testing import _make_credentials -class TestMaxVersionsGCRule(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - return MaxVersionsGCRule - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test___eq__type_differ(self): - gc_rule1 = self._make_one(10) - self.assertNotEqual(gc_rule1, object()) - self.assertEqual(gc_rule1, mock.ANY) - - def test___eq__same_value(self): - gc_rule1 = self._make_one(2) - gc_rule2 = self._make_one(2) - self.assertEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - gc_rule1 = self._make_one(99) - gc_rule2 = self._make_one(99) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) - - def test_to_pb(self): - max_num_versions = 1337 - gc_rule = self._make_one(max_num_versions=max_num_versions) - pb_val = gc_rule.to_pb() - expected = _GcRulePB(max_num_versions=max_num_versions) - self.assertEqual(pb_val, expected) - - -class TestMaxAgeGCRule(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import MaxAgeGCRule - - return MaxAgeGCRule - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test___eq__type_differ(self): - max_age = object() - gc_rule1 = self._make_one(max_age=max_age) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___eq__same_value(self): - max_age = object() - gc_rule1 = self._make_one(max_age=max_age) - gc_rule2 = self._make_one(max_age=max_age) - self.assertEqual(gc_rule1, gc_rule2) +def _make_max_versions_gc_rule(*args, **kwargs): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + return MaxVersionsGCRule(*args, **kwargs) + + +def test_max_versions_gc_rule___eq__type_differ(): + gc_rule1 = _make_max_versions_gc_rule(10) + assert gc_rule1 != object() + assert gc_rule1 == mock.ANY + + +def test_max_versions_gc_rule___eq__same_value(): + gc_rule1 = _make_max_versions_gc_rule(2) + gc_rule2 = _make_max_versions_gc_rule(2) + assert gc_rule1 == gc_rule2 + + +def test_max_versions_gc_rule___ne__same_value(): + gc_rule1 = _make_max_versions_gc_rule(99) + gc_rule2 = _make_max_versions_gc_rule(99) + assert not (gc_rule1 != gc_rule2) + + +def test_max_versions_gc_rule_to_pb(): + max_num_versions = 1337 + gc_rule = _make_max_versions_gc_rule(max_num_versions=max_num_versions) + pb_val = gc_rule.to_pb() + expected = _GcRulePB(max_num_versions=max_num_versions) + assert pb_val == expected + + +def _make_max_age_gc_rule(*args, **kwargs): + from google.cloud.bigtable.column_family import MaxAgeGCRule + + return MaxAgeGCRule(*args, **kwargs) + + +def test_max_age_gc_rule___eq__type_differ(): + max_age = object() + gc_rule1 = _make_max_age_gc_rule(max_age=max_age) + gc_rule2 = object() + assert gc_rule1 != gc_rule2 + + +def test_max_age_gc_rule___eq__same_value(): + max_age = object() + gc_rule1 = _make_max_age_gc_rule(max_age=max_age) + gc_rule2 = _make_max_age_gc_rule(max_age=max_age) + assert gc_rule1 == gc_rule2 + + +def test_max_age_gc_rule___ne__same_value(): + max_age = object() + gc_rule1 = _make_max_age_gc_rule(max_age=max_age) + gc_rule2 = _make_max_age_gc_rule(max_age=max_age) + assert not (gc_rule1 != gc_rule2) + + +def test_max_age_gc_rule_to_pb(): + import datetime + from google.protobuf import duration_pb2 + + max_age = datetime.timedelta(seconds=1) + duration = duration_pb2.Duration(seconds=1) + gc_rule = _make_max_age_gc_rule(max_age=max_age) + pb_val = gc_rule.to_pb() + assert pb_val == _GcRulePB(max_age=duration) + + +def _make_gc_rule_union(*args, **kwargs): + from google.cloud.bigtable.column_family import GCRuleUnion + + return GCRuleUnion(*args, **kwargs) + + +def test_gc_rule_union_constructor(): + rules = object() + rule_union = _make_gc_rule_union(rules) + assert rule_union.rules is rules + + +def test_gc_rule_union___eq__(): + rules = object() + gc_rule1 = _make_gc_rule_union(rules) + gc_rule2 = _make_gc_rule_union(rules) + assert gc_rule1 == gc_rule2 + + +def test_gc_rule_union___eq__type_differ(): + rules = object() + gc_rule1 = _make_gc_rule_union(rules) + gc_rule2 = object() + assert gc_rule1 != gc_rule2 + + +def test_gc_rule_union___ne__same_value(): + rules = object() + gc_rule1 = _make_gc_rule_union(rules) + gc_rule2 = _make_gc_rule_union(rules) + assert not (gc_rule1 != gc_rule2) + + +def test_gc_rule_union_to_pb(): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions = 42 + rule1 = MaxVersionsGCRule(max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) + + rule3 = _make_gc_rule_union(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) + + gc_rule_pb = rule3.to_pb() + assert gc_rule_pb == pb_rule3 + + +def test_gc_rule_union_to_pb_nested(): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions1 = 42 + rule1 = MaxVersionsGCRule(max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) + + rule3 = _make_gc_rule_union(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) + + max_num_versions2 = 1337 + rule4 = MaxVersionsGCRule(max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) + + rule5 = _make_gc_rule_union(rules=[rule3, rule4]) + pb_rule5 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) + + gc_rule_pb = rule5.to_pb() + assert gc_rule_pb == pb_rule5 + + +def _make_gc_rule_intersection(*args, **kwargs): + from google.cloud.bigtable.column_family import GCRuleIntersection + + return GCRuleIntersection(*args, **kwargs) + + +def test_gc_rule_intersection_constructor(): + rules = object() + rule_intersection = _make_gc_rule_intersection(rules) + assert rule_intersection.rules is rules + + +def test_gc_rule_intersection___eq__(): + rules = object() + gc_rule1 = _make_gc_rule_intersection(rules) + gc_rule2 = _make_gc_rule_intersection(rules) + assert gc_rule1 == gc_rule2 + + +def test_gc_rule_intersection___eq__type_differ(): + rules = object() + gc_rule1 = _make_gc_rule_intersection(rules) + gc_rule2 = object() + assert gc_rule1 != gc_rule2 + + +def test_gc_rule_intersection___ne__same_value(): + rules = object() + gc_rule1 = _make_gc_rule_intersection(rules) + gc_rule2 = _make_gc_rule_intersection(rules) + assert not (gc_rule1 != gc_rule2) + + +def test_gc_rule_intersection_to_pb(): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions = 42 + rule1 = MaxVersionsGCRule(max_num_versions) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) + + rule3 = _make_gc_rule_intersection(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB(intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2])) + + gc_rule_pb = rule3.to_pb() + assert gc_rule_pb == pb_rule3 + + +def test_gc_rule_intersection_to_pb_nested(): + import datetime + from google.protobuf import duration_pb2 + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + max_num_versions1 = 42 + rule1 = MaxVersionsGCRule(max_num_versions1) + pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) + + max_age = datetime.timedelta(seconds=1) + rule2 = MaxAgeGCRule(max_age) + pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) + + rule3 = _make_gc_rule_intersection(rules=[rule1, rule2]) + pb_rule3 = _GcRulePB(intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2])) + + max_num_versions2 = 1337 + rule4 = MaxVersionsGCRule(max_num_versions2) + pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) + + rule5 = _make_gc_rule_intersection(rules=[rule3, rule4]) + pb_rule5 = _GcRulePB(intersection=_GcRuleIntersectionPB(rules=[pb_rule3, pb_rule4])) + + gc_rule_pb = rule5.to_pb() + assert gc_rule_pb == pb_rule5 + + +def _make_column_family(*args, **kwargs): + from google.cloud.bigtable.column_family import ColumnFamily + + return ColumnFamily(*args, **kwargs) + + +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client + + return Client(*args, **kwargs) + + +def test_column_family_constructor(): + column_family_id = u"column-family-id" + table = object() + gc_rule = object() + column_family = _make_column_family(column_family_id, table, gc_rule=gc_rule) + + assert column_family.column_family_id == column_family_id + assert column_family._table is table + assert column_family.gc_rule is gc_rule + + +def test_column_family_name_property(): + column_family_id = u"column-family-id" + table_name = "table_name" + table = _Table(table_name) + column_family = _make_column_family(column_family_id, table) + + expected_name = table_name + "/columnFamilies/" + column_family_id + assert column_family.name == expected_name + + +def test_column_family___eq__(): + column_family_id = "column_family_id" + table = object() + gc_rule = object() + column_family1 = _make_column_family(column_family_id, table, gc_rule=gc_rule) + column_family2 = _make_column_family(column_family_id, table, gc_rule=gc_rule) + assert column_family1 == column_family2 + + +def test_column_family___eq__type_differ(): + column_family1 = _make_column_family("column_family_id", None) + column_family2 = object() + assert column_family1 != column_family2 + + +def test_column_family___ne__same_value(): + column_family_id = "column_family_id" + table = object() + gc_rule = object() + column_family1 = _make_column_family(column_family_id, table, gc_rule=gc_rule) + column_family2 = _make_column_family(column_family_id, table, gc_rule=gc_rule) + assert not (column_family1 != column_family2) + + +def test_column_family___ne__(): + column_family1 = _make_column_family("column_family_id1", None) + column_family2 = _make_column_family("column_family_id2", None) + assert column_family1 != column_family2 + + +def test_column_family_to_pb_no_rules(): + column_family = _make_column_family("column_family_id", None) + pb_val = column_family.to_pb() + expected = _ColumnFamilyPB() + assert pb_val == expected + + +def test_column_family_to_pb_with_rule(): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + gc_rule = MaxVersionsGCRule(1) + column_family = _make_column_family("column_family_id", None, gc_rule=gc_rule) + pb_val = column_family.to_pb() + expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + assert pb_val == expected + + +def _create_test_helper(gc_rule=None): + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, + ) + from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) + + project_id = "project-id" + zone = "zone" + cluster_id = "cluster-id" + table_id = "table-id" + column_family_id = "column-family-id" + table_name = ( + "projects/" + + project_id + + "/zones/" + + zone + + "/clusters/" + + cluster_id + + "/tables/" + + table_id + ) + + api = mock.create_autospec(BigtableTableAdminClient) + + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(table_name, client=client) + column_family = _make_column_family(column_family_id, table, gc_rule=gc_rule) + + # Create request_pb + if gc_rule is None: + column_family_pb = _ColumnFamilyPB() + else: + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() + modification.id = column_family_id + modification.create = column_family_pb + request_pb.modifications.append(modification) + + # Create response_pb + response_pb = _ColumnFamilyPB() + + # Patch the stub used by the API method. + stub = _FakeStub(response_pb) + client._table_admin_client = api + client._table_admin_client.transport.create = stub + + # Create expected_result. + expected_result = None # create() has no return value. + + # Perform the method and check the result. + assert stub.results == (response_pb,) + result = column_family.create() + assert result == expected_result + + +def test_column_family_create(): + _create_test_helper(gc_rule=None) + + +def test_column_family_create_with_gc_rule(): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + gc_rule = MaxVersionsGCRule(1337) + _create_test_helper(gc_rule=gc_rule) + + +def _update_test_helper(gc_rule=None): + from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) + + project_id = "project-id" + zone = "zone" + cluster_id = "cluster-id" + table_id = "table-id" + column_family_id = "column-family-id" + table_name = ( + "projects/" + + project_id + + "/zones/" + + zone + + "/clusters/" + + cluster_id + + "/tables/" + + table_id + ) + + api = mock.create_autospec(BigtableTableAdminClient) + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(table_name, client=client) + column_family = _make_column_family(column_family_id, table, gc_rule=gc_rule) + + # Create request_pb + if gc_rule is None: + column_family_pb = _ColumnFamilyPB() + else: + column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() + modification.id = column_family_id + modification.update = column_family_pb + request_pb.modifications.append(modification) + + # Create response_pb + response_pb = _ColumnFamilyPB() + + # Patch the stub used by the API method. + stub = _FakeStub(response_pb) + client._table_admin_client = api + client._table_admin_client.transport.update = stub + + # Create expected_result. + expected_result = None # update() has no return value. + + # Perform the method and check the result. + assert stub.results == (response_pb,) + result = column_family.update() + assert result == expected_result + + +def test_column_family_update(): + _update_test_helper(gc_rule=None) + + +def test_column_family_update_with_gc_rule(): + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + gc_rule = MaxVersionsGCRule(1337) + _update_test_helper(gc_rule=gc_rule) + + +def test_column_family_delete(): + from google.protobuf import empty_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_v2_pb2, + ) + from tests.unit._testing import _FakeStub + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) + + project_id = "project-id" + zone = "zone" + cluster_id = "cluster-id" + table_id = "table-id" + column_family_id = "column-family-id" + table_name = ( + "projects/" + + project_id + + "/zones/" + + zone + + "/clusters/" + + cluster_id + + "/tables/" + + table_id + ) + + api = mock.create_autospec(BigtableTableAdminClient) + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(table_name, client=client) + column_family = _make_column_family(column_family_id, table) + + # Create request_pb + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) + modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( + id=column_family_id, drop=True + ) + request_pb.modifications.append(modification) + + # Create response_pb + response_pb = empty_pb2.Empty() + + # Patch the stub used by the API method. + stub = _FakeStub(response_pb) + client._table_admin_client = api + client._table_admin_client.transport.delete = stub + + # Create expected_result. + expected_result = None # delete() has no return value. + + # Perform the method and check the result. + assert stub.results == (response_pb,) + result = column_family.delete() + assert result == expected_result + + +def test__gc_rule_from_pb_empty(): + from google.cloud.bigtable.column_family import _gc_rule_from_pb + + gc_rule_pb = _GcRulePB() + assert _gc_rule_from_pb(gc_rule_pb) is None + + +def test__gc_rule_from_pb_max_num_versions(): + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + orig_rule = MaxVersionsGCRule(1) + gc_rule_pb = orig_rule.to_pb() + result = _gc_rule_from_pb(gc_rule_pb) + assert isinstance(result, MaxVersionsGCRule) + assert result == orig_rule + + +def test__gc_rule_from_pb_max_age(): + import datetime + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import MaxAgeGCRule + + orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1)) + gc_rule_pb = orig_rule.to_pb() + result = _gc_rule_from_pb(gc_rule_pb) + assert isinstance(result, MaxAgeGCRule) + assert result == orig_rule + + +def test__gc_rule_from_pb_union(): + import datetime + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import GCRuleUnion + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + rule1 = MaxVersionsGCRule(1) + rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) + orig_rule = GCRuleUnion([rule1, rule2]) + gc_rule_pb = orig_rule.to_pb() + result = _gc_rule_from_pb(gc_rule_pb) + assert isinstance(result, GCRuleUnion) + assert result == orig_rule + + +def test__gc_rule_from_pb_intersection(): + import datetime + from google.cloud.bigtable.column_family import _gc_rule_from_pb + from google.cloud.bigtable.column_family import GCRuleIntersection + from google.cloud.bigtable.column_family import MaxAgeGCRule + from google.cloud.bigtable.column_family import MaxVersionsGCRule + + rule1 = MaxVersionsGCRule(1) + rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) + orig_rule = GCRuleIntersection([rule1, rule2]) + gc_rule_pb = orig_rule.to_pb() + result = _gc_rule_from_pb(gc_rule_pb) + assert isinstance(result, GCRuleIntersection) + assert result == orig_rule + + +def test__gc_rule_from_pb_unknown_field_name(): + from google.cloud.bigtable.column_family import _gc_rule_from_pb + + class MockProto(object): + + names = [] + + _pb = {} - def test___ne__same_value(self): - max_age = object() - gc_rule1 = self._make_one(max_age=max_age) - gc_rule2 = self._make_one(max_age=max_age) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) + @classmethod + def WhichOneof(cls, name): + cls.names.append(name) + return "unknown" - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 + MockProto._pb = MockProto - max_age = datetime.timedelta(seconds=1) - duration = duration_pb2.Duration(seconds=1) - gc_rule = self._make_one(max_age=max_age) - pb_val = gc_rule.to_pb() - self.assertEqual(pb_val, _GcRulePB(max_age=duration)) + assert MockProto.names == [] + with pytest.raises(ValueError): + _gc_rule_from_pb(MockProto) -class TestGCRuleUnion(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import GCRuleUnion - - return GCRuleUnion - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - rules = object() - rule_union = self._make_one(rules) - self.assertIs(rule_union.rules, rules) - - def test___eq__(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - self.assertEqual(gc_rule1, gc_rule2) - - def test___eq__type_differ(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions = 42 - rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) - - gc_rule_pb = rule3.to_pb() - self.assertEqual(gc_rule_pb, pb_rule3) - - def test_to_pb_nested(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions1 = 42 - rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule1, pb_rule2])) - - max_num_versions2 = 1337 - rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) - - rule5 = self._make_one(rules=[rule3, rule4]) - pb_rule5 = _GcRulePB(union=_GcRuleUnionPB(rules=[pb_rule3, pb_rule4])) - - gc_rule_pb = rule5.to_pb() - self.assertEqual(gc_rule_pb, pb_rule5) - - -class TestGCRuleIntersection(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import GCRuleIntersection - - return GCRuleIntersection - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - rules = object() - rule_intersection = self._make_one(rules) - self.assertIs(rule_intersection.rules, rules) - - def test___eq__(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - self.assertEqual(gc_rule1, gc_rule2) - - def test___eq__type_differ(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = object() - self.assertNotEqual(gc_rule1, gc_rule2) - - def test___ne__same_value(self): - rules = object() - gc_rule1 = self._make_one(rules) - gc_rule2 = self._make_one(rules) - comparison_val = gc_rule1 != gc_rule2 - self.assertFalse(comparison_val) - - def test_to_pb(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions = 42 - rule1 = MaxVersionsGCRule(max_num_versions) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB( - intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]) - ) - - gc_rule_pb = rule3.to_pb() - self.assertEqual(gc_rule_pb, pb_rule3) - - def test_to_pb_nested(self): - import datetime - from google.protobuf import duration_pb2 - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - max_num_versions1 = 42 - rule1 = MaxVersionsGCRule(max_num_versions1) - pb_rule1 = _GcRulePB(max_num_versions=max_num_versions1) - - max_age = datetime.timedelta(seconds=1) - rule2 = MaxAgeGCRule(max_age) - pb_rule2 = _GcRulePB(max_age=duration_pb2.Duration(seconds=1)) - - rule3 = self._make_one(rules=[rule1, rule2]) - pb_rule3 = _GcRulePB( - intersection=_GcRuleIntersectionPB(rules=[pb_rule1, pb_rule2]) - ) - - max_num_versions2 = 1337 - rule4 = MaxVersionsGCRule(max_num_versions2) - pb_rule4 = _GcRulePB(max_num_versions=max_num_versions2) - - rule5 = self._make_one(rules=[rule3, rule4]) - pb_rule5 = _GcRulePB( - intersection=_GcRuleIntersectionPB(rules=[pb_rule3, pb_rule4]) - ) - - gc_rule_pb = rule5.to_pb() - self.assertEqual(gc_rule_pb, pb_rule5) - - -class TestColumnFamily(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.column_family import ColumnFamily - - return ColumnFamily - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor(self): - column_family_id = u"column-family-id" - table = object() - gc_rule = object() - column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) - - self.assertEqual(column_family.column_family_id, column_family_id) - self.assertIs(column_family._table, table) - self.assertIs(column_family.gc_rule, gc_rule) - - def test_name_property(self): - column_family_id = u"column-family-id" - table_name = "table_name" - table = _Table(table_name) - column_family = self._make_one(column_family_id, table) - - expected_name = table_name + "/columnFamilies/" + column_family_id - self.assertEqual(column_family.name, expected_name) - - def test___eq__(self): - column_family_id = "column_family_id" - table = object() - gc_rule = object() - column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) - column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) - self.assertEqual(column_family1, column_family2) - - def test___eq__type_differ(self): - column_family1 = self._make_one("column_family_id", None) - column_family2 = object() - self.assertNotEqual(column_family1, column_family2) - - def test___ne__same_value(self): - column_family_id = "column_family_id" - table = object() - gc_rule = object() - column_family1 = self._make_one(column_family_id, table, gc_rule=gc_rule) - column_family2 = self._make_one(column_family_id, table, gc_rule=gc_rule) - comparison_val = column_family1 != column_family2 - self.assertFalse(comparison_val) - - def test___ne__(self): - column_family1 = self._make_one("column_family_id1", None) - column_family2 = self._make_one("column_family_id2", None) - self.assertNotEqual(column_family1, column_family2) - - def test_to_pb_no_rules(self): - column_family = self._make_one("column_family_id", None) - pb_val = column_family.to_pb() - expected = _ColumnFamilyPB() - self.assertEqual(pb_val, expected) - - def test_to_pb_with_rule(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - gc_rule = MaxVersionsGCRule(1) - column_family = self._make_one("column_family_id", None, gc_rule=gc_rule) - pb_val = column_family.to_pb() - expected = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - self.assertEqual(pb_val, expected) - - def _create_test_helper(self, gc_rule=None): - from google.cloud.bigtable_admin_v2.types import ( - bigtable_table_admin as table_admin_v2_pb2, - ) - from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - - project_id = "project-id" - zone = "zone" - cluster_id = "cluster-id" - table_id = "table-id" - column_family_id = "column-family-id" - table_name = ( - "projects/" - + project_id - + "/zones/" - + zone - + "/clusters/" - + cluster_id - + "/tables/" - + table_id - ) - - api = mock.create_autospec(BigtableTableAdminClient) - - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) - - # Create request_pb - if gc_rule is None: - column_family_pb = _ColumnFamilyPB() - else: - column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() - modification.id = column_family_id - modification.create = column_family_pb - request_pb.modifications.append(modification) - - # Create response_pb - response_pb = _ColumnFamilyPB() - - # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._table_admin_client = api - client._table_admin_client.transport.create = stub - - # Create expected_result. - expected_result = None # create() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.create() - self.assertEqual(result, expected_result) - - def test_create(self): - self._create_test_helper(gc_rule=None) - - def test_create_with_gc_rule(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - gc_rule = MaxVersionsGCRule(1337) - self._create_test_helper(gc_rule=gc_rule) - - def _update_test_helper(self, gc_rule=None): - from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.types import ( - bigtable_table_admin as table_admin_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - - project_id = "project-id" - zone = "zone" - cluster_id = "cluster-id" - table_id = "table-id" - column_family_id = "column-family-id" - table_name = ( - "projects/" - + project_id - + "/zones/" - + zone - + "/clusters/" - + cluster_id - + "/tables/" - + table_id - ) - - api = mock.create_autospec(BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - column_family = self._make_one(column_family_id, table, gc_rule=gc_rule) - - # Create request_pb - if gc_rule is None: - column_family_pb = _ColumnFamilyPB() - else: - column_family_pb = _ColumnFamilyPB(gc_rule=gc_rule.to_pb()) - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification() - modification.id = column_family_id - modification.update = column_family_pb - request_pb.modifications.append(modification) - - # Create response_pb - response_pb = _ColumnFamilyPB() - - # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._table_admin_client = api - client._table_admin_client.transport.update = stub - - # Create expected_result. - expected_result = None # update() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.update() - self.assertEqual(result, expected_result) - - def test_update(self): - self._update_test_helper(gc_rule=None) - - def test_update_with_gc_rule(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - gc_rule = MaxVersionsGCRule(1337) - self._update_test_helper(gc_rule=gc_rule) - - def test_delete(self): - from google.protobuf import empty_pb2 - from google.cloud.bigtable_admin_v2.types import ( - bigtable_table_admin as table_admin_v2_pb2, - ) - from tests.unit._testing import _FakeStub - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - - project_id = "project-id" - zone = "zone" - cluster_id = "cluster-id" - table_id = "table-id" - column_family_id = "column-family-id" - table_name = ( - "projects/" - + project_id - + "/zones/" - + zone - + "/clusters/" - + cluster_id - + "/tables/" - + table_id - ) - - api = mock.create_autospec(BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - column_family = self._make_one(column_family_id, table) - - # Create request_pb - request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest(name=table_name) - modification = table_admin_v2_pb2.ModifyColumnFamiliesRequest.Modification( - id=column_family_id, drop=True - ) - request_pb.modifications.append(modification) - - # Create response_pb - response_pb = empty_pb2.Empty() - - # Patch the stub used by the API method. - stub = _FakeStub(response_pb) - client._table_admin_client = api - client._table_admin_client.transport.delete = stub - - # Create expected_result. - expected_result = None # delete() has no return value. - - # Perform the method and check the result. - self.assertEqual(stub.results, (response_pb,)) - result = column_family.delete() - self.assertEqual(result, expected_result) - - -class Test__gc_rule_from_pb(unittest.TestCase): - def _call_fut(self, *args, **kwargs): - from google.cloud.bigtable.column_family import _gc_rule_from_pb - - return _gc_rule_from_pb(*args, **kwargs) - - def test_empty(self): - - gc_rule_pb = _GcRulePB() - self.assertIsNone(self._call_fut(gc_rule_pb)) - - def test_max_num_versions(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - orig_rule = MaxVersionsGCRule(1) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, MaxVersionsGCRule) - self.assertEqual(result, orig_rule) - - def test_max_age(self): - import datetime - from google.cloud.bigtable.column_family import MaxAgeGCRule - - orig_rule = MaxAgeGCRule(datetime.timedelta(seconds=1)) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, MaxAgeGCRule) - self.assertEqual(result, orig_rule) - - def test_union(self): - import datetime - from google.cloud.bigtable.column_family import GCRuleUnion - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - rule1 = MaxVersionsGCRule(1) - rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) - orig_rule = GCRuleUnion([rule1, rule2]) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, GCRuleUnion) - self.assertEqual(result, orig_rule) - - def test_intersection(self): - import datetime - from google.cloud.bigtable.column_family import GCRuleIntersection - from google.cloud.bigtable.column_family import MaxAgeGCRule - from google.cloud.bigtable.column_family import MaxVersionsGCRule - - rule1 = MaxVersionsGCRule(1) - rule2 = MaxAgeGCRule(datetime.timedelta(seconds=1)) - orig_rule = GCRuleIntersection([rule1, rule2]) - gc_rule_pb = orig_rule.to_pb() - result = self._call_fut(gc_rule_pb) - self.assertIsInstance(result, GCRuleIntersection) - self.assertEqual(result, orig_rule) - - def test_unknown_field_name(self): - class MockProto(object): - - names = [] - - _pb = {} - - @classmethod - def WhichOneof(cls, name): - cls.names.append(name) - return "unknown" - - MockProto._pb = MockProto - - self.assertEqual(MockProto.names, []) - self.assertRaises(ValueError, self._call_fut, MockProto) - self.assertEqual(MockProto.names, ["rule"]) + assert MockProto.names == ["rule"] def _GcRulePB(*args, **kw): diff --git a/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py b/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py index ede6f4883bff..8b92a83ed980 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py +++ b/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest - import mock from google.cloud.bigtable import enums @@ -55,113 +53,119 @@ def _make_info_pb( ) -class TestEncryptionInfo(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.encryption_info import EncryptionInfo +def _make_encryption_info(*args, **kwargs): + from google.cloud.bigtable.encryption_info import EncryptionInfo + + return EncryptionInfo(*args, **kwargs) + + +def _make_encryption_info_defaults( + encryption_type=EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, + code=_STATUS_CODE, + message=_STATUS_MESSAGE, + kms_key_version=_KMS_KEY_VERSION, +): + encryption_status = _make_status(code=code, message=message) + return _make_encryption_info(encryption_type, encryption_status, kms_key_version) + + +def test_encryption_info__from_pb(): + from google.cloud.bigtable.encryption_info import EncryptionInfo - return EncryptionInfo + info_pb = _make_info_pb() - def _make_one(self, encryption_type, encryption_status, kms_key_version): - return self._get_target_class()( - encryption_type, encryption_status, kms_key_version, - ) + info = EncryptionInfo._from_pb(info_pb) - def _make_one_defaults( - self, - encryption_type=EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - code=_STATUS_CODE, - message=_STATUS_MESSAGE, + assert info.encryption_type == EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + assert info.encryption_status.code == _STATUS_CODE + assert info.encryption_status.message == _STATUS_MESSAGE + assert info.kms_key_version == _KMS_KEY_VERSION + + +def test_encryption_info_ctor(): + encryption_type = EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + encryption_status = _make_status() + + info = _make_encryption_info( + encryption_type=encryption_type, + encryption_status=encryption_status, kms_key_version=_KMS_KEY_VERSION, - ): - encryption_status = _make_status(code=code, message=message) - return self._make_one(encryption_type, encryption_status, kms_key_version) - - def test__from_pb(self): - klass = self._get_target_class() - info_pb = _make_info_pb() - - info = klass._from_pb(info_pb) - - self.assertEqual( - info.encryption_type, EncryptionType.GOOGLE_DEFAULT_ENCRYPTION, - ) - self.assertEqual(info.encryption_status.code, _STATUS_CODE) - self.assertEqual(info.encryption_status.message, _STATUS_MESSAGE) - self.assertEqual(info.kms_key_version, _KMS_KEY_VERSION) - - def test_ctor(self): - encryption_type = EncryptionType.GOOGLE_DEFAULT_ENCRYPTION - encryption_status = _make_status() - - info = self._make_one( - encryption_type=encryption_type, - encryption_status=encryption_status, - kms_key_version=_KMS_KEY_VERSION, - ) - - self.assertEqual(info.encryption_type, encryption_type) - self.assertEqual(info.encryption_status, encryption_status) - self.assertEqual(info.kms_key_version, _KMS_KEY_VERSION) - - def test___eq___identity(self): - info = self._make_one_defaults() - self.assertTrue(info == info) - - def test___eq___wrong_type(self): - info = self._make_one_defaults() - other = object() - self.assertFalse(info == other) - - def test___eq___same_values(self): - info = self._make_one_defaults() - other = self._make_one_defaults() - self.assertTrue(info == other) - - def test___eq___different_encryption_type(self): - info = self._make_one_defaults() - other = self._make_one_defaults( - encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - self.assertFalse(info == other) - - def test___eq___different_encryption_status(self): - info = self._make_one_defaults() - other = self._make_one_defaults(code=456) - self.assertFalse(info == other) - - def test___eq___different_kms_key_version(self): - info = self._make_one_defaults() - other = self._make_one_defaults(kms_key_version=789) - self.assertFalse(info == other) - - def test___ne___identity(self): - info = self._make_one_defaults() - self.assertFalse(info != info) - - def test___ne___wrong_type(self): - info = self._make_one_defaults() - other = object() - self.assertTrue(info != other) - - def test___ne___same_values(self): - info = self._make_one_defaults() - other = self._make_one_defaults() - self.assertFalse(info != other) - - def test___ne___different_encryption_type(self): - info = self._make_one_defaults() - other = self._make_one_defaults( - encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, - ) - self.assertTrue(info != other) - - def test___ne___different_encryption_status(self): - info = self._make_one_defaults() - other = self._make_one_defaults(code=456) - self.assertTrue(info != other) - - def test___ne___different_kms_key_version(self): - info = self._make_one_defaults() - other = self._make_one_defaults(kms_key_version=789) - self.assertTrue(info != other) + ) + + assert info.encryption_type == encryption_type + assert info.encryption_status == encryption_status + assert info.kms_key_version == _KMS_KEY_VERSION + + +def test_encryption_info___eq___identity(): + info = _make_encryption_info_defaults() + assert info == info + + +def test_encryption_info___eq___wrong_type(): + info = _make_encryption_info_defaults() + other = object() + assert not (info == other) + + +def test_encryption_info___eq___same_values(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults() + assert info == other + + +def test_encryption_info___eq___different_encryption_type(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults( + encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + assert not (info == other) + + +def test_encryption_info___eq___different_encryption_status(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults(code=456) + assert not (info == other) + + +def test_encryption_info___eq___different_kms_key_version(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults(kms_key_version=789) + assert not (info == other) + + +def test_encryption_info___ne___identity(): + info = _make_encryption_info_defaults() + assert not (info != info) + + +def test_encryption_info___ne___wrong_type(): + info = _make_encryption_info_defaults() + other = object() + assert info != other + + +def test_encryption_info___ne___same_values(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults() + assert not (info != other) + + +def test_encryption_info___ne___different_encryption_type(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults( + encryption_type=EncryptionType.CUSTOMER_MANAGED_ENCRYPTION, + ) + assert info != other + + +def test_encryption_info___ne___different_encryption_status(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults(code=456) + assert info != other + + +def test_encryption_info___ne___different_kms_key_version(): + info = _make_encryption_info_defaults() + other = _make_encryption_info_defaults(kms_key_version=789) + assert info != other diff --git a/packages/google-cloud-bigtable/tests/unit/test_error.py b/packages/google-cloud-bigtable/tests/unit/test_error.py index c53d63991d51..8b148473cc9c 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_error.py +++ b/packages/google-cloud-bigtable/tests/unit/test_error.py @@ -12,86 +12,90 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest - - -class TestStatus(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.error import Status - - return Status - - @staticmethod - def _make_status_pb(**kwargs): - from google.rpc.status_pb2 import Status - - return Status(**kwargs) - - def _make_one(self, status_pb): - return self._get_target_class()(status_pb) - - def test_ctor(self): - status_pb = self._make_status_pb() - status = self._make_one(status_pb) - self.assertIs(status.status_pb, status_pb) - - def test_code(self): - code = 123 - status_pb = self._make_status_pb(code=code) - status = self._make_one(status_pb) - self.assertEqual(status.code, code) - - def test_message(self): - message = "message" - status_pb = self._make_status_pb(message=message) - status = self._make_one(status_pb) - self.assertEqual(status.message, message) - - def test___eq___self(self): - status_pb = self._make_status_pb() - status = self._make_one(status_pb) - self.assertTrue(status == status) - - def test___eq___other_hit(self): - status_pb = self._make_status_pb(code=123, message="message") - status = self._make_one(status_pb) - other = self._make_one(status_pb) - self.assertTrue(status == other) - - def test___eq___other_miss(self): - status_pb = self._make_status_pb(code=123, message="message") - other_status_pb = self._make_status_pb(code=456, message="oops") - status = self._make_one(status_pb) - other = self._make_one(other_status_pb) - self.assertFalse(status == other) - - def test___eq___wrong_type(self): - status_pb = self._make_status_pb(code=123, message="message") - status = self._make_one(status_pb) - other = object() - self.assertFalse(status == other) - - def test___ne___self(self): - status_pb = self._make_status_pb() - status = self._make_one(status_pb) - self.assertFalse(status != status) - - def test___ne___other_hit(self): - status_pb = self._make_status_pb(code=123, message="message") - status = self._make_one(status_pb) - other = self._make_one(status_pb) - self.assertFalse(status != other) - - def test___ne___other_miss(self): - status_pb = self._make_status_pb(code=123, message="message") - other_status_pb = self._make_status_pb(code=456, message="oops") - status = self._make_one(status_pb) - other = self._make_one(other_status_pb) - self.assertTrue(status != other) - - def test___ne___wrong_type(self): - status_pb = self._make_status_pb(code=123, message="message") - status = self._make_one(status_pb) - other = object() - self.assertTrue(status != other) + +def _make_status_pb(**kwargs): + from google.rpc.status_pb2 import Status + + return Status(**kwargs) + + +def _make_status(status_pb): + from google.cloud.bigtable.error import Status + + return Status(status_pb) + + +def test_status_ctor(): + status_pb = _make_status_pb() + status = _make_status(status_pb) + assert status.status_pb is status_pb + + +def test_status_code(): + code = 123 + status_pb = _make_status_pb(code=code) + status = _make_status(status_pb) + assert status.code == code + + +def test_status_message(): + message = "message" + status_pb = _make_status_pb(message=message) + status = _make_status(status_pb) + assert status.message == message + + +def test_status___eq___self(): + status_pb = _make_status_pb() + status = _make_status(status_pb) + assert status == status + + +def test_status___eq___other_hit(): + status_pb = _make_status_pb(code=123, message="message") + status = _make_status(status_pb) + other = _make_status(status_pb) + assert status == other + + +def test_status___eq___other_miss(): + status_pb = _make_status_pb(code=123, message="message") + other_status_pb = _make_status_pb(code=456, message="oops") + status = _make_status(status_pb) + other = _make_status(other_status_pb) + assert not (status == other) + + +def test_status___eq___wrong_type(): + status_pb = _make_status_pb(code=123, message="message") + status = _make_status(status_pb) + other = object() + assert not (status == other) + + +def test_status___ne___self(): + status_pb = _make_status_pb() + status = _make_status(status_pb) + assert not (status != status) + + +def test_status___ne___other_hit(): + status_pb = _make_status_pb(code=123, message="message") + status = _make_status(status_pb) + other = _make_status(status_pb) + assert not (status != other) + + +def test_status___ne___other_miss(): + status_pb = _make_status_pb(code=123, message="message") + other_status_pb = _make_status_pb(code=456, message="oops") + status = _make_status(status_pb) + other = _make_status(other_status_pb) + assert status != other + + +def test_status___ne___wrong_type(): + status_pb = _make_status_pb(code=123, message="message") + status = _make_status(status_pb) + other = object() + assert status != other diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index e493fd9c8b9f..def7e3e38df1 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -13,1014 +13,919 @@ # limitations under the License. -import unittest - import mock +import pytest from ._testing import _make_credentials from google.cloud.bigtable.cluster import Cluster +PROJECT = "project" +INSTANCE_ID = "instance-id" +INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID +LOCATION_ID = "locid" +LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID +APP_PROFILE_PATH = "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/" +DISPLAY_NAME = "display_name" +LABELS = {"foo": "bar"} +OP_ID = 8915 +OP_NAME = "operations/projects/{}/instances/{}operations/{}".format( + PROJECT, INSTANCE_ID, OP_ID +) +TABLE_ID = "table_id" +TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID +CLUSTER_ID = "cluster-id" +CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID +BACKUP_ID = "backup-id" +BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID + +APP_PROFILE_ID_1 = "app-profile-id-1" +DESCRIPTION_1 = "routing policy any" +APP_PROFILE_ID_2 = "app-profile-id-2" +DESCRIPTION_2 = "routing policy single" +ALLOW_WRITES = True +CLUSTER_ID = "cluster-id" + + +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client + + return Client(*args, **kwargs) + + +def _make_instance_admin_api(): + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + + return mock.create_autospec(BigtableInstanceAdminClient) + + +def _make_instance(*args, **kwargs): + from google.cloud.bigtable.instance import Instance + + return Instance(*args, **kwargs) + + +def test_instance_constructor_defaults(): + + client = object() + instance = _make_instance(INSTANCE_ID, client) + assert instance.instance_id == INSTANCE_ID + assert instance.display_name == INSTANCE_ID + assert instance.type_ is None + assert instance.labels is None + assert instance._client is client + assert instance.state is None + + +def test_instance_constructor_non_default(): + from google.cloud.bigtable import enums + + instance_type = enums.Instance.Type.DEVELOPMENT + state = enums.Instance.State.READY + labels = {"test": "test"} + client = object() + + instance = _make_instance( + INSTANCE_ID, + client, + display_name=DISPLAY_NAME, + instance_type=instance_type, + labels=labels, + _state=state, + ) + assert instance.instance_id == INSTANCE_ID + assert instance.display_name == DISPLAY_NAME + assert instance.type_ == instance_type + assert instance.labels == labels + assert instance._client is client + assert instance.state == state + + +def test_instance__update_from_pb_success(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable import enums + + instance_type = data_v2_pb2.Instance.Type.PRODUCTION + state = enums.Instance.State.READY + # todo type to type_? + instance_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, type_=instance_type, labels=LABELS, state=state, + ) + + instance = _make_instance(None, None) + assert instance.display_name is None + assert instance.type_ is None + assert instance.labels is None + instance._update_from_pb(instance_pb._pb) + assert instance.display_name == DISPLAY_NAME + assert instance.type_ == instance_type + assert instance.labels == LABELS + assert instance._state == state + + +def test_instance__update_from_pb_success_defaults(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable import enums + + instance_pb = data_v2_pb2.Instance(display_name=DISPLAY_NAME) + + instance = _make_instance(None, None) + assert instance.display_name is None + assert instance.type_ is None + assert instance.labels is None + instance._update_from_pb(instance_pb._pb) + assert instance.display_name == DISPLAY_NAME + assert instance.type_ == enums.Instance.Type.UNSPECIFIED + assert not instance.labels + + +def test_instance__update_from_pb_wo_display_name(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + + instance_pb = data_v2_pb2.Instance() + instance = _make_instance(None, None) + assert instance.display_name is None + + with pytest.raises(ValueError): + instance._update_from_pb(instance_pb) + + +def test_instance_from_pb_success(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable import enums + from google.cloud.bigtable.instance import Instance + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance_type = enums.Instance.Type.PRODUCTION + state = enums.Instance.State.READY + instance_pb = data_v2_pb2.Instance( + name=INSTANCE_NAME, + display_name=INSTANCE_ID, + type_=instance_type, + labels=LABELS, + state=state, + ) + + instance = Instance.from_pb(instance_pb, client) + + assert isinstance(instance, Instance) + assert instance._client == client + assert instance.instance_id == INSTANCE_ID + assert instance.display_name == INSTANCE_ID + assert instance.type_ == instance_type + assert instance.labels == LABELS + assert instance._state == state + + +def test_instance_from_pb_bad_instance_name(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.instance import Instance + + instance_name = "INCORRECT_FORMAT" + instance_pb = data_v2_pb2.Instance(name=instance_name) + + with pytest.raises(ValueError): + Instance.from_pb(instance_pb, None) + + +def test_instance_from_pb_project_mistmatch(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.instance import Instance + + ALT_PROJECT = "ALT_PROJECT" + credentials = _make_credentials() + client = _make_client(project=ALT_PROJECT, credentials=credentials, admin=True) -class TestInstance(unittest.TestCase): + instance_pb = data_v2_pb2.Instance(name=INSTANCE_NAME) + + with pytest.raises(ValueError): + Instance.from_pb(instance_pb, client) + + +def test_instance_name(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + + api = client._instance_admin_client = _make_instance_admin_api() + api.instance_path.return_value = INSTANCE_NAME + instance = _make_instance(INSTANCE_ID, client) + + assert instance.name == INSTANCE_NAME + + +def test_instance___eq__(): + client = object() + instance1 = _make_instance(INSTANCE_ID, client) + instance2 = _make_instance(INSTANCE_ID, client) + assert instance1 == instance2 + + +def test_instance___eq__type_differ(): + client = object() + instance1 = _make_instance(INSTANCE_ID, client) + instance2 = object() + assert instance1 != instance2 + + +def test_instance___ne__same_value(): + client = object() + instance1 = _make_instance(INSTANCE_ID, client) + instance2 = _make_instance(INSTANCE_ID, client) + assert not (instance1 != instance2) + + +def test_instance___ne__(): + instance1 = _make_instance("instance_id1", "client1") + instance2 = _make_instance("instance_id2", "client2") + assert instance1 != instance2 + + +def test_instance_create_w_location_and_clusters(): + instance = _make_instance(INSTANCE_ID, None) + + with pytest.raises(ValueError): + instance.create(location_id=LOCATION_ID, clusters=[object(), object()]) + + +def test_instance_create_w_serve_nodes_and_clusters(): + instance = _make_instance(INSTANCE_ID, None) + + with pytest.raises(ValueError): + instance.create(serve_nodes=3, clusters=[object(), object()]) + + +def test_instance_create_w_default_storage_type_and_clusters(): + instance = _make_instance(INSTANCE_ID, None) + + with pytest.raises(ValueError): + instance.create(default_storage_type=1, clusters=[object(), object()]) + + +def _instance_api_response_for_create(): + import datetime + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance - PROJECT = "project" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID - LOCATION_ID = "locid" - LOCATION = "projects/" + PROJECT + "/locations/" + LOCATION_ID - APP_PROFILE_PATH = ( - "projects/" + PROJECT + "/instances/" + INSTANCE_ID + "/appProfiles/" + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateInstanceMetadata._meta._pb.DESCRIPTOR.full_name ) - DISPLAY_NAME = "display_name" - LABELS = {"foo": "bar"} - OP_ID = 8915 - OP_NAME = "operations/projects/{}/instances/{}operations/{}".format( - PROJECT, INSTANCE_ID, OP_ID + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), ) - TABLE_ID = "table_id" - TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID - CLUSTER_ID = "cluster-id" - CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID - BACKUP_ID = "backup-id" - BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.instance import Instance - - return Instance - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_defaults(self): - - client = object() - instance = self._make_one(self.INSTANCE_ID, client) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - self.assertIs(instance._client, client) - self.assertIsNone(instance.state) - - def test_constructor_non_default(self): - from google.cloud.bigtable import enums - - instance_type = enums.Instance.Type.DEVELOPMENT - state = enums.Instance.State.READY - labels = {"test": "test"} - client = object() - - instance = self._make_one( - self.INSTANCE_ID, - client, - display_name=self.DISPLAY_NAME, - instance_type=instance_type, - labels=labels, - _state=state, - ) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, labels) - self.assertIs(instance._client, client) - self.assertEqual(instance.state, state) - - def test__update_from_pb_success(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable import enums - - instance_type = data_v2_pb2.Instance.Type.PRODUCTION - state = enums.Instance.State.READY - # todo type to type_? - instance_pb = data_v2_pb2.Instance( - display_name=self.DISPLAY_NAME, - type_=instance_type, - labels=self.LABELS, - state=state, - ) - - instance = self._make_one(None, None) - self.assertIsNone(instance.display_name) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb._pb) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, self.LABELS) - self.assertEqual(instance._state, state) - - def test__update_from_pb_success_defaults(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable import enums - - instance_pb = data_v2_pb2.Instance(display_name=self.DISPLAY_NAME) - - instance = self._make_one(None, None) - self.assertIsNone(instance.display_name) - self.assertIsNone(instance.type_) - self.assertIsNone(instance.labels) - instance._update_from_pb(instance_pb._pb) - self.assertEqual(instance.display_name, self.DISPLAY_NAME) - self.assertEqual(instance.type_, enums.Instance.Type.UNSPECIFIED) - self.assertFalse(instance.labels) - - def test__update_from_pb_no_display_name(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - instance_pb = data_v2_pb2.Instance() - instance = self._make_one(None, None) - self.assertIsNone(instance.display_name) - with self.assertRaises(ValueError): - instance._update_from_pb(instance_pb) - - def test_from_pb_success(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable import enums - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance_type = enums.Instance.Type.PRODUCTION - state = enums.Instance.State.READY - instance_pb = data_v2_pb2.Instance( - name=self.INSTANCE_NAME, - display_name=self.INSTANCE_ID, - type_=instance_type, - labels=self.LABELS, - state=state, - ) - - klass = self._get_target_class() - instance = klass.from_pb(instance_pb, client) - self.assertIsInstance(instance, klass) - self.assertEqual(instance._client, client) - self.assertEqual(instance.instance_id, self.INSTANCE_ID) - self.assertEqual(instance.display_name, self.INSTANCE_ID) - self.assertEqual(instance.type_, instance_type) - self.assertEqual(instance.labels, self.LABELS) - self.assertEqual(instance._state, state) - - def test_from_pb_bad_instance_name(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - instance_name = "INCORRECT_FORMAT" - instance_pb = data_v2_pb2.Instance(name=instance_name) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(instance_pb, None) - - def test_from_pb_project_mistmatch(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - - ALT_PROJECT = "ALT_PROJECT" - credentials = _make_credentials() - client = self._make_client( - project=ALT_PROJECT, credentials=credentials, admin=True - ) - - self.assertNotEqual(self.PROJECT, ALT_PROJECT) - - instance_pb = data_v2_pb2.Instance(name=self.INSTANCE_NAME) - - klass = self._get_target_class() - with self.assertRaises(ValueError): - klass.from_pb(instance_pb, client) - - def test_name_property(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - - api.instance_path.return_value = "projects/project/instances/instance-id" - # Patch the the API method. - client._instance_admin_client = api - - instance = self._make_one(self.INSTANCE_ID, client) - self.assertEqual(instance.name, self.INSTANCE_NAME) - - def test___eq__(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client) - instance2 = self._make_one(self.INSTANCE_ID, client) - self.assertEqual(instance1, instance2) - - def test___eq__type_differ(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client) - instance2 = object() - self.assertNotEqual(instance1, instance2) - - def test___ne__same_value(self): - client = object() - instance1 = self._make_one(self.INSTANCE_ID, client) - instance2 = self._make_one(self.INSTANCE_ID, client) - comparison_val = instance1 != instance2 - self.assertFalse(comparison_val) - - def test___ne__(self): - instance1 = self._make_one("instance_id1", "client1") - instance2 = self._make_one("instance_id2", "client2") - self.assertNotEqual(instance1, instance2) - - def test_create_check_location_and_clusters(self): - instance = self._make_one(self.INSTANCE_ID, None) - - with self.assertRaises(ValueError): - instance.create(location_id=self.LOCATION_ID, clusters=[object(), object()]) - - def test_create_check_serve_nodes_and_clusters(self): - instance = self._make_one(self.INSTANCE_ID, None) - - with self.assertRaises(ValueError): - instance.create(serve_nodes=3, clusters=[object(), object()]) - - def test_create_check_default_storage_type_and_clusters(self): - instance = self._make_one(self.INSTANCE_ID, None) - - with self.assertRaises(ValueError): - instance.create(default_storage_type=1, clusters=[object(), object()]) - - def _instance_api_response_for_create(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.types import instance - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - metadata = messages_v2_pb2.CreateInstanceMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.CreateInstanceMetadata._meta._pb.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), - ) - response = operation.from_gapic( - response_pb, - mock.Mock(), - instance.Instance, - metadata_type=messages_v2_pb2.CreateInstanceMetadata, - ) - project_path_template = "projects/{}" - location_path_template = "projects/{}/locations/{}" - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.create_instance.return_value = response - instance_api.project_path = project_path_template.format - instance_api.location_path = location_path_template.format - instance_api.common_location_path = location_path_template.format - return instance_api, response - - def test_create(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable_admin_v2.types import Instance - from google.cloud.bigtable_admin_v2.types import Cluster - import warnings - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - self.DISPLAY_NAME, - enums.Instance.Type.PRODUCTION, - self.LABELS, - ) - instance_api, response = self._instance_api_response_for_create() - instance_api.common_project_path.return_value = "projects/project" - client._instance_admin_client = instance_api - serve_nodes = 3 - - with warnings.catch_warnings(record=True) as warned: - result = instance.create( - location_id=self.LOCATION_ID, serve_nodes=serve_nodes - ) - - cluster_pb = Cluster( - location=instance_api.location_path(self.PROJECT, self.LOCATION_ID), - serve_nodes=serve_nodes, - default_storage_type=enums.StorageType.UNSPECIFIED, - ) - instance_pb = Instance( - display_name=self.DISPLAY_NAME, - type_=enums.Instance.Type.PRODUCTION, - labels=self.LABELS, - ) - cluster_id = "{}-cluster".format(self.INSTANCE_ID) - instance_api.create_instance.assert_called_once_with( - request={ - "parent": instance_api.project_path(self.PROJECT), - "instance_id": self.INSTANCE_ID, - "instance": instance_pb, - "clusters": {cluster_id: cluster_pb}, - } - ) - - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) - - self.assertIs(result, response) - - def test_create_w_clusters(self): - from google.cloud.bigtable import enums - from google.cloud.bigtable.cluster import Cluster - from google.cloud.bigtable_admin_v2.types import Cluster as cluster_pb - from google.cloud.bigtable_admin_v2.types import Instance as instance_pb - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - self.DISPLAY_NAME, - enums.Instance.Type.PRODUCTION, - self.LABELS, - ) - instance_api, response = self._instance_api_response_for_create() - instance_api.common_project_path.return_value = "projects/project" - client._instance_admin_client = instance_api - - # Perform the method and check the result. - cluster_id_1 = "cluster-1" - cluster_id_2 = "cluster-2" - location_id_1 = "location-id-1" - location_id_2 = "location-id-2" - serve_nodes_1 = 3 - serve_nodes_2 = 5 - clusters = [ - Cluster( - cluster_id_1, - instance, - location_id=location_id_1, - serve_nodes=serve_nodes_1, - ), - Cluster( - cluster_id_2, - instance, - location_id=location_id_2, - serve_nodes=serve_nodes_2, - ), - ] - - result = instance.create(clusters=clusters) - - cluster_pb_1 = cluster_pb( - location=instance_api.location_path(self.PROJECT, location_id_1), + response = operation.from_gapic( + response_pb, + mock.Mock(), + instance.Instance, + metadata_type=messages_v2_pb2.CreateInstanceMetadata, + ) + project_path_template = "projects/{}" + location_path_template = "projects/{}/locations/{}" + api = _make_instance_admin_api() + api.create_instance.return_value = response + api.project_path = project_path_template.format + api.location_path = location_path_template.format + api.common_location_path = location_path_template.format + return api, response + + +def test_instance_create(): + from google.cloud.bigtable import enums + from google.cloud.bigtable_admin_v2.types import Instance + from google.cloud.bigtable_admin_v2.types import Cluster + import warnings + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance( + INSTANCE_ID, client, DISPLAY_NAME, enums.Instance.Type.PRODUCTION, LABELS, + ) + api, response = _instance_api_response_for_create() + client._instance_admin_client = api + api.common_project_path.return_value = "projects/project" + serve_nodes = 3 + + with warnings.catch_warnings(record=True) as warned: + result = instance.create(location_id=LOCATION_ID, serve_nodes=serve_nodes) + + assert result is response + + cluster_pb = Cluster( + location=api.location_path(PROJECT, LOCATION_ID), + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.UNSPECIFIED, + ) + instance_pb = Instance( + display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS, + ) + cluster_id = "{}-cluster".format(INSTANCE_ID) + api.create_instance.assert_called_once_with( + request={ + "parent": api.project_path(PROJECT), + "instance_id": INSTANCE_ID, + "instance": instance_pb, + "clusters": {cluster_id: cluster_pb}, + } + ) + + assert len(warned) == 1 + assert warned[0].category is DeprecationWarning + + +def test_instance_create_w_clusters(): + from google.cloud.bigtable import enums + from google.cloud.bigtable.cluster import Cluster + from google.cloud.bigtable_admin_v2.types import Cluster as cluster_pb + from google.cloud.bigtable_admin_v2.types import Instance as instance_pb + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance( + INSTANCE_ID, client, DISPLAY_NAME, enums.Instance.Type.PRODUCTION, LABELS, + ) + api, response = _instance_api_response_for_create() + client._instance_admin_client = api + api.common_project_path.return_value = "projects/project" + cluster_id_1 = "cluster-1" + cluster_id_2 = "cluster-2" + location_id_1 = "location-id-1" + location_id_2 = "location-id-2" + serve_nodes_1 = 3 + serve_nodes_2 = 5 + clusters = [ + Cluster( + cluster_id_1, + instance, + location_id=location_id_1, serve_nodes=serve_nodes_1, - default_storage_type=enums.StorageType.UNSPECIFIED, - ) - cluster_pb_2 = cluster_pb( - location=instance_api.location_path(self.PROJECT, location_id_2), + ), + Cluster( + cluster_id_2, + instance, + location_id=location_id_2, serve_nodes=serve_nodes_2, - default_storage_type=enums.StorageType.UNSPECIFIED, - ) - instance_pb = instance_pb( - display_name=self.DISPLAY_NAME, - type_=enums.Instance.Type.PRODUCTION, - labels=self.LABELS, - ) - instance_api.create_instance.assert_called_once_with( - request={ - "parent": instance_api.project_path(self.PROJECT), - "instance_id": self.INSTANCE_ID, - "instance": instance_pb, - "clusters": {cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, - } - ) - - self.assertIs(result, response) - - def test_exists(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.api_core import exceptions - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - - # Create response_pb - instance_name = client.instance_admin_client.instance_path( - self.PROJECT, self.INSTANCE_ID - ) - response_pb = data_v2_pb2.Instance(name=instance_name) - - # Patch the stub used by the API method. - client._instance_admin_client = api - instance_admin_stub = client._instance_admin_client - - instance_admin_stub.get_instance.side_effect = [ - response_pb, - exceptions.NotFound("testing"), - exceptions.BadRequest("testing"), - ] - - # Perform the method and check the result. - non_existing_instance_id = "instance-id-2" - alt_instance_1 = self._make_one(self.INSTANCE_ID, client) - alt_instance_2 = self._make_one(non_existing_instance_id, client) - self.assertTrue(alt_instance_1.exists()) - self.assertFalse(alt_instance_2.exists()) - - with self.assertRaises(exceptions.BadRequest): - alt_instance_2.exists() - - def test_reload(self): - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable import enums - - api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - # Create response_pb - DISPLAY_NAME = u"hey-hi-hello" - instance_type = enums.Instance.Type.PRODUCTION - response_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, type_=instance_type, labels=self.LABELS - ) - - # Patch the stub used by the API method. - client._instance_admin_client = api - bigtable_instance_stub = client._instance_admin_client - bigtable_instance_stub.get_instance.side_effect = [response_pb] - - # Create expected_result. - expected_result = None # reload() has no return value. - - # Check Instance optional config values before. - self.assertEqual(instance.display_name, self.INSTANCE_ID) - - # Perform the method and check the result. - result = instance.reload() - self.assertEqual(result, expected_result) - - # Check Instance optional config values before. - self.assertEqual(instance.display_name, DISPLAY_NAME) - - def _instance_api_response_for_update(self): - import datetime - from google.api_core import operation - from google.longrunning import operations_pb2 - from google.protobuf.any_pb2 import Any - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.types import instance - - NOW = datetime.datetime.utcnow() - NOW_PB = _datetime_to_pb_timestamp(NOW) - metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) - type_url = "type.googleapis.com/{}".format( - messages_v2_pb2.UpdateInstanceMetadata._meta._pb.DESCRIPTOR.full_name - ) - response_pb = operations_pb2.Operation( - name=self.OP_NAME, - metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), - ) - response = operation.from_gapic( - response_pb, - mock.Mock(), - instance.Instance, - metadata_type=messages_v2_pb2.UpdateInstanceMetadata, - ) - instance_path_template = "projects/{project}/instances/{instance}" - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.partial_update_instance.return_value = response - instance_api.instance_path = instance_path_template.format - return instance_api, response - - def test_update(self): - from google.cloud.bigtable import enums - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import Instance - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one( - self.INSTANCE_ID, - client, - display_name=self.DISPLAY_NAME, - instance_type=enums.Instance.Type.DEVELOPMENT, - labels=self.LABELS, - ) - instance_api, response = self._instance_api_response_for_update() - client._instance_admin_client = instance_api - - result = instance.update() - - instance_pb = Instance( - name=instance.name, - display_name=instance.display_name, - type_=instance.type_, - labels=instance.labels, - ) - update_mask_pb = field_mask_pb2.FieldMask( - paths=["display_name", "type", "labels"] - ) - - instance_api.partial_update_instance.assert_called_once_with( - request={"instance": instance_pb, "update_mask": update_mask_pb} - ) - - self.assertIs(result, response) - - def test_update_empty(self): - from google.protobuf import field_mask_pb2 - from google.cloud.bigtable_admin_v2.types import Instance - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(None, client) - instance_api, response = self._instance_api_response_for_update() - client._instance_admin_client = instance_api - - result = instance.update() - - instance_pb = Instance( - name=instance.name, - display_name=instance.display_name, - type_=instance.type_, - labels=instance.labels, - ) - update_mask_pb = field_mask_pb2.FieldMask() - - instance_api.partial_update_instance.assert_called_once_with( - request={"instance": instance_pb, "update_mask": update_mask_pb} - ) - - self.assertIs(result, response) - - def test_delete(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.delete_instance.return_value = None - client._instance_admin_client = instance_api - - result = instance.delete() - - instance_api.delete_instance.assert_called_once_with( - request={"name": instance.name} - ) - - self.assertIsNone(result) - - def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] - iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - client._instance_admin_client = instance_api - instance_api.get_iam_policy.return_value = iam_policy - - # Perform the method and check the result. - result = instance.get_iam_policy() - - instance_api.get_iam_policy.assert_called_once_with( - request={"resource": instance.name} - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_get_iam_policy_w_requested_policy_version(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.iam.v1 import policy_pb2, options_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] - iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - client._instance_admin_client = instance_api - instance_api.get_iam_policy.return_value = iam_policy - - # Perform the method and check the result. - result = instance.get_iam_policy(requested_policy_version=3) - - instance_api.get_iam_policy.assert_called_once_with( - request={ - "resource": instance.name, - "options_": options_pb2.GetPolicyOptions(requested_policy_version=3), - } - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] - iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.set_iam_policy.return_value = iam_policy_pb - client._instance_admin_client = instance_api - - # Perform the method and check the result. - iam_policy = Policy(etag=etag, version=version) - iam_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.user("user1@test.com"), - Policy.service_account("service_acc1@test.com"), - ] - - result = instance.set_iam_policy(iam_policy) - - instance_api.set_iam_policy.assert_called_once_with( - request={"resource": instance.name, "policy": iam_policy_pb} - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.iam.v1 import iam_policy_pb2 - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - permissions = ["bigtable.tables.create", "bigtable.clusters.create"] - - response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.test_iam_permissions.return_value = response - client._instance_admin_client = instance_api - - result = instance.test_iam_permissions(permissions) - - self.assertEqual(result, permissions) - instance_api.test_iam_permissions.assert_called_once_with( - request={"resource": instance.name, "permissions": permissions} - ) - - def test_cluster_factory(self): - from google.cloud.bigtable import enums - - CLUSTER_ID = "{}-cluster".format(self.INSTANCE_ID) - LOCATION_ID = "us-central1-c" - SERVE_NODES = 3 - STORAGE_TYPE = enums.StorageType.HDD - - instance = self._make_one(self.INSTANCE_ID, None) - - cluster = instance.cluster( - CLUSTER_ID, - location_id=LOCATION_ID, - serve_nodes=SERVE_NODES, - default_storage_type=STORAGE_TYPE, - ) - self.assertIsInstance(cluster, Cluster) - self.assertEqual(cluster.cluster_id, CLUSTER_ID) - self.assertEqual(cluster.location_id, LOCATION_ID) - self.assertIsNone(cluster._state) - self.assertEqual(cluster.serve_nodes, SERVE_NODES) - self.assertEqual(cluster.default_storage_type, STORAGE_TYPE) - - def test_list_clusters(self): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import ( - bigtable_instance_admin as messages_v2_pb2, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.instance import Instance - from google.cloud.bigtable.instance import Cluster - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = Instance(self.INSTANCE_ID, client) - - failed_location = "FAILED" - cluster_id1 = "cluster-id1" - cluster_id2 = "cluster-id2" - cluster_path_template = "projects/{}/instances/{}/clusters/{}" - cluster_name1 = cluster_path_template.format( - self.PROJECT, self.INSTANCE_ID, cluster_id1 - ) - cluster_name2 = cluster_path_template.format( - self.PROJECT, self.INSTANCE_ID, cluster_id2 - ) - - # Create response_pb - response_pb = messages_v2_pb2.ListClustersResponse( - failed_locations=[failed_location], - clusters=[ - data_v2_pb2.Cluster(name=cluster_name1), - data_v2_pb2.Cluster(name=cluster_name2), - ], - ) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - instance_api.list_clusters.side_effect = [response_pb] - instance_api.cluster_path = cluster_path_template.format - client._instance_admin_client = instance_api - - # Perform the method and check the result. - clusters, failed_locations = instance.list_clusters() - - cluster_1, cluster_2 = clusters - - self.assertIsInstance(cluster_1, Cluster) - self.assertEqual(cluster_1.name, cluster_name1) - - self.assertIsInstance(cluster_2, Cluster) - self.assertEqual(cluster_2.name, cluster_name2) - - self.assertEqual(failed_locations, [failed_location]) - - def test_table_factory(self): - from google.cloud.bigtable.table import Table - - app_profile_id = "appProfileId1262094415" - instance = self._make_one(self.INSTANCE_ID, None) - - table = instance.table(self.TABLE_ID, app_profile_id=app_profile_id) - self.assertIsInstance(table, Table) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertEqual(table._instance, instance) - self.assertEqual(table._app_profile_id, app_profile_id) - - def _list_tables_helper(self, table_name=None): - from google.cloud.bigtable_admin_v2.types import table as table_data_v2_pb2 - from google.cloud.bigtable_admin_v2.types import ( - bigtable_table_admin as table_messages_v1_pb2, - ) - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - - table_api = mock.create_autospec(BigtableTableAdminClient) - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - instance_api.instance_path.return_value = instance.name - # Create response_pb - if table_name is None: - table_name = self.TABLE_NAME - - response_pb = table_messages_v1_pb2.ListTablesResponse( - tables=[table_data_v2_pb2.Table(name=table_name)] - ) - - # Patch the stub used by the API method. - client._table_admin_client = table_api - client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client - bigtable_table_stub.list_tables.side_effect = [response_pb] - - # Create expected_result. - expected_table = instance.table(self.TABLE_ID) - expected_result = [expected_table] - - # Perform the method and check the result. - result = instance.list_tables() - - self.assertEqual(result, expected_result) - - def test_list_tables(self): - self._list_tables_helper() - - def test_list_tables_failure_bad_split(self): - with self.assertRaises(ValueError): - self._list_tables_helper(table_name="wrong-format") - - def test_list_tables_failure_name_bad_before(self): - BAD_TABLE_NAME = ( - "nonempty-section-before" - + "projects/" - + self.PROJECT - + "/instances/" - + self.INSTANCE_ID - + "/tables/" - + self.TABLE_ID - ) - with self.assertRaises(ValueError): - self._list_tables_helper(table_name=BAD_TABLE_NAME) - - def test_app_profile_factory(self): - from google.cloud.bigtable.enums import RoutingPolicyType - - APP_PROFILE_ID_1 = "app-profile-id-1" - ANY = RoutingPolicyType.ANY - DESCRIPTION_1 = "routing policy any" - APP_PROFILE_ID_2 = "app-profile-id-2" - SINGLE = RoutingPolicyType.SINGLE - DESCRIPTION_2 = "routing policy single" - ALLOW_WRITES = True - CLUSTER_ID = "cluster-id" - - instance = self._make_one(self.INSTANCE_ID, None) - - app_profile1 = instance.app_profile( - APP_PROFILE_ID_1, routing_policy_type=ANY, description=DESCRIPTION_1 - ) - - app_profile2 = instance.app_profile( - APP_PROFILE_ID_2, - routing_policy_type=SINGLE, - description=DESCRIPTION_2, - cluster_id=CLUSTER_ID, - allow_transactional_writes=ALLOW_WRITES, - ) - self.assertEqual(app_profile1.app_profile_id, APP_PROFILE_ID_1) - self.assertIs(app_profile1._instance, instance) - self.assertEqual(app_profile1.routing_policy_type, ANY) - self.assertEqual(app_profile1.description, DESCRIPTION_1) - self.assertEqual(app_profile2.app_profile_id, APP_PROFILE_ID_2) - self.assertIs(app_profile2._instance, instance) - self.assertEqual(app_profile2.routing_policy_type, SINGLE) - self.assertEqual(app_profile2.description, DESCRIPTION_2) - self.assertEqual(app_profile2.cluster_id, CLUSTER_ID) - self.assertEqual(app_profile2.allow_transactional_writes, ALLOW_WRITES) - - def test_list_app_profiles(self): - from google.api_core.page_iterator import Iterator - from google.api_core.page_iterator import Page - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 - from google.cloud.bigtable.app_profile import AppProfile - - class _Iterator(Iterator): - def __init__(self, pages): - super(_Iterator, self).__init__(client=None) - self._pages = pages - - def _next_page(self): - if self._pages: - page, self._pages = self._pages[0], self._pages[1:] - return Page(self, page, self.item_to_value) - - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT, credentials=credentials, admin=True - ) - instance = self._make_one(self.INSTANCE_ID, client) - - # Setup Expected Response - app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}" - app_profile_id1 = "app-profile-id1" - app_profile_id2 = "app-profile-id2" - app_profile_name1 = app_profile_path_template.format( - self.PROJECT, self.INSTANCE_ID, app_profile_id1 - ) - app_profile_name2 = app_profile_path_template.format( - self.PROJECT, self.INSTANCE_ID, app_profile_id2 - ) - routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() - - app_profiles = [ - data_v2_pb2.AppProfile( - name=app_profile_name1, multi_cluster_routing_use_any=routing_policy - ), - data_v2_pb2.AppProfile( - name=app_profile_name2, multi_cluster_routing_use_any=routing_policy - ), - ] - iterator = _Iterator(pages=[app_profiles]) - - # Patch the stub used by the API method. - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - client._instance_admin_client = instance_api - instance_api.app_profile_path = app_profile_path_template.format - instance_api.list_app_profiles.return_value = iterator - - # Perform the method and check the result. - app_profiles = instance.list_app_profiles() - - app_profile_1, app_profile_2 = app_profiles - - self.assertIsInstance(app_profile_1, AppProfile) - self.assertEqual(app_profile_1.name, app_profile_name1) - - self.assertIsInstance(app_profile_2, AppProfile) - self.assertEqual(app_profile_2.name, app_profile_name2) + ), + ] + + result = instance.create(clusters=clusters) + + assert result is response + + cluster_pb_1 = cluster_pb( + location=api.location_path(PROJECT, location_id_1), + serve_nodes=serve_nodes_1, + default_storage_type=enums.StorageType.UNSPECIFIED, + ) + cluster_pb_2 = cluster_pb( + location=api.location_path(PROJECT, location_id_2), + serve_nodes=serve_nodes_2, + default_storage_type=enums.StorageType.UNSPECIFIED, + ) + instance_pb = instance_pb( + display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS, + ) + api.create_instance.assert_called_once_with( + request={ + "parent": api.project_path(PROJECT), + "instance_id": INSTANCE_ID, + "instance": instance_pb, + "clusters": {cluster_id_1: cluster_pb_1, cluster_id_2: cluster_pb_2}, + } + ) + + +def test_instance_exists_hit(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + response_pb = data_v2_pb2.Instance(name=INSTANCE_NAME) + api = client._instance_admin_client = _make_instance_admin_api() + api.instance_path.return_value = INSTANCE_NAME + api.get_instance.return_value = response_pb + instance = _make_instance(INSTANCE_ID, client) + + assert instance.exists() + + api.get_instance.assert_called_once_with(request={"name": INSTANCE_NAME}) + + +def test_instance_exists_miss(): + from google.api_core import exceptions + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + + api = client._instance_admin_client = _make_instance_admin_api() + api.instance_path.return_value = INSTANCE_NAME + api.get_instance.side_effect = exceptions.NotFound("testing") + + non_existing_instance_id = "instance-id-2" + instance = _make_instance(non_existing_instance_id, client) + + assert not instance.exists() + + api.get_instance.assert_called_once_with(request={"name": INSTANCE_NAME}) + + +def test_instance_exists_w_error(): + from google.api_core import exceptions + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + + api = client._instance_admin_client = _make_instance_admin_api() + api.instance_path.return_value = INSTANCE_NAME + api.get_instance.side_effect = exceptions.BadRequest("testing") + instance = _make_instance(INSTANCE_ID, client) + + with pytest.raises(exceptions.BadRequest): + instance.exists() + + api.get_instance.assert_called_once_with(request={"name": INSTANCE_NAME}) + + +def test_instance_reload(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable import enums + + DISPLAY_NAME = u"hey-hi-hello" + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + response_pb = data_v2_pb2.Instance( + display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS + ) + api = client._instance_admin_client = _make_instance_admin_api() + api.get_instance.side_effect = [response_pb] + assert instance.display_name == INSTANCE_ID + + result = instance.reload() + + assert result is None + assert instance.display_name == DISPLAY_NAME + + +def _instance_api_response_for_update(): + import datetime + from google.api_core import operation + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + metadata = messages_v2_pb2.UpdateInstanceMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateInstanceMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + response = operation.from_gapic( + response_pb, + mock.Mock(), + instance.Instance, + metadata_type=messages_v2_pb2.UpdateInstanceMetadata, + ) + instance_path_template = "projects/{project}/instances/{instance}" + api = _make_instance_admin_api() + api.partial_update_instance.return_value = response + api.instance_path = instance_path_template.format + return api, response + + +def test_instance_update(): + from google.cloud.bigtable import enums + from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2.types import Instance + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance( + INSTANCE_ID, + client, + display_name=DISPLAY_NAME, + instance_type=enums.Instance.Type.DEVELOPMENT, + labels=LABELS, + ) + api, response = _instance_api_response_for_update() + client._instance_admin_client = api + + result = instance.update() + + assert result is response + + instance_pb = Instance( + name=instance.name, + display_name=instance.display_name, + type_=instance.type_, + labels=instance.labels, + ) + update_mask_pb = field_mask_pb2.FieldMask(paths=["display_name", "type", "labels"]) + + api.partial_update_instance.assert_called_once_with( + request={"instance": instance_pb, "update_mask": update_mask_pb} + ) + + +def test_instance_update_empty(): + from google.protobuf import field_mask_pb2 + from google.cloud.bigtable_admin_v2.types import Instance + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(None, client) + api, response = _instance_api_response_for_update() + client._instance_admin_client = api + + result = instance.update() + + assert result is response + + instance_pb = Instance( + name=instance.name, + display_name=instance.display_name, + type_=instance.type_, + labels=instance.labels, + ) + update_mask_pb = field_mask_pb2.FieldMask() + + api.partial_update_instance.assert_called_once_with( + request={"instance": instance_pb, "update_mask": update_mask_pb} + ) + + +def test_instance_delete(): + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + api = client._instance_admin_client = _make_instance_admin_api() + api.delete_instance.return_value = None + + result = instance.delete() + + assert result is None + + api.delete_instance.assert_called_once_with(request={"name": instance.name}) + + +def test_instance_get_iam_policy(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + api = client._instance_admin_client = _make_instance_admin_api() + api.get_iam_policy.return_value = iam_policy + + result = instance.get_iam_policy() + + assert result.version == version + assert result.etag == etag + admins = result.bigtable_admins + assert len(admins) == len(members) + + for found, expected in zip(sorted(admins), sorted(members)): + assert found == expected + api.get_iam_policy.assert_called_once_with(request={"resource": instance.name}) + + +def test_instance_get_iam_policy_w_requested_policy_version(): + from google.iam.v1 import policy_pb2, options_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + api = client._instance_admin_client = _make_instance_admin_api() + api.get_iam_policy.return_value = iam_policy + + result = instance.get_iam_policy(requested_policy_version=3) + + assert result.version == version + assert result.etag == etag + admins = result.bigtable_admins + assert len(admins) == len(members) + for found, expected in zip(sorted(admins), sorted(members)): + assert found == expected + + api.get_iam_policy.assert_called_once_with( + request={ + "resource": instance.name, + "options_": options_pb2.GetPolicyOptions(requested_policy_version=3), + } + ) + + +def test_instance_set_iam_policy(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] + iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + api = client._instance_admin_client = _make_instance_admin_api() + api.set_iam_policy.return_value = iam_policy_pb + iam_policy = Policy(etag=etag, version=version) + iam_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("user1@test.com"), + Policy.service_account("service_acc1@test.com"), + ] + + result = instance.set_iam_policy(iam_policy) + + api.set_iam_policy.assert_called_once_with( + request={"resource": instance.name, "policy": iam_policy_pb} + ) + assert result.version == version + assert result.etag == etag + admins = result.bigtable_admins + assert len(admins) == len(members) + for found, expected in zip(sorted(admins), sorted(members)): + assert found == expected + + +def test_instance_test_iam_permissions(): + from google.iam.v1 import iam_policy_pb2 + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + + permissions = ["bigtable.tables.create", "bigtable.clusters.create"] + + response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) + api = client._instance_admin_client = _make_instance_admin_api() + api.test_iam_permissions.return_value = response + + result = instance.test_iam_permissions(permissions) + + assert result == permissions + api.test_iam_permissions.assert_called_once_with( + request={"resource": instance.name, "permissions": permissions} + ) + + +def test_instance_cluster_factory(): + from google.cloud.bigtable import enums + + CLUSTER_ID = "{}-cluster".format(INSTANCE_ID) + LOCATION_ID = "us-central1-c" + SERVE_NODES = 3 + STORAGE_TYPE = enums.StorageType.HDD + + instance = _make_instance(INSTANCE_ID, None) + + cluster = instance.cluster( + CLUSTER_ID, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE, + ) + assert isinstance(cluster, Cluster) + assert cluster.cluster_id == CLUSTER_ID + assert cluster.location_id == LOCATION_ID + assert cluster._state is None + assert cluster.serve_nodes == SERVE_NODES + assert cluster.default_storage_type == STORAGE_TYPE + + +def test_instance_list_clusters(): + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.instance import Cluster + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = Instance(INSTANCE_ID, client) + + failed_location = "FAILED" + cluster_id1 = "cluster-id1" + cluster_id2 = "cluster-id2" + cluster_path_template = "projects/{}/instances/{}/clusters/{}" + cluster_name1 = cluster_path_template.format(PROJECT, INSTANCE_ID, cluster_id1) + cluster_name2 = cluster_path_template.format(PROJECT, INSTANCE_ID, cluster_id2) + response_pb = messages_v2_pb2.ListClustersResponse( + failed_locations=[failed_location], + clusters=[ + data_v2_pb2.Cluster(name=cluster_name1), + data_v2_pb2.Cluster(name=cluster_name2), + ], + ) + api = client._instance_admin_client = _make_instance_admin_api() + api.list_clusters.side_effect = [response_pb] + api.cluster_path = cluster_path_template.format + + # Perform the method and check the result. + clusters, failed_locations = instance.list_clusters() + + cluster_1, cluster_2 = clusters + + assert isinstance(cluster_1, Cluster) + assert cluster_1.name == cluster_name1 + + assert isinstance(cluster_2, Cluster) + assert cluster_2.name == cluster_name2 + + assert failed_locations == [failed_location] + + +def test_instance_table_factory(): + from google.cloud.bigtable.table import Table + + app_profile_id = "appProfileId1262094415" + instance = _make_instance(INSTANCE_ID, None) + + table = instance.table(TABLE_ID, app_profile_id=app_profile_id) + assert isinstance(table, Table) + assert table.table_id == TABLE_ID + assert table._instance == instance + assert table._app_profile_id == app_profile_id + + +def _list_tables_helper(table_name=None): + from google.cloud.bigtable_admin_v2.types import table as table_data_v2_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_messages_v1_pb2, + ) + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + BigtableTableAdminClient, + ) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + + instance_api = client._instance_admin_client = _make_instance_admin_api() + instance_api.instance_path.return_value = "projects/project/instances/instance-id" + table_api = client._table_admin_client = mock.create_autospec( + BigtableTableAdminClient + ) + if table_name is None: + table_name = TABLE_NAME + + response_pb = table_messages_v1_pb2.ListTablesResponse( + tables=[table_data_v2_pb2.Table(name=table_name)] + ) + + table_api.list_tables.side_effect = [response_pb] + + result = instance.list_tables() + + expected_table = instance.table(TABLE_ID) + assert result == [expected_table] + + +def test_instance_list_tables(): + _list_tables_helper() + + +def test_instance_list_tables_failure_bad_split(): + with pytest.raises(ValueError): + _list_tables_helper(table_name="wrong-format") + + +def test_instance_list_tables_failure_name_bad_before(): + BAD_TABLE_NAME = ( + "nonempty-section-before" + + "projects/" + + PROJECT + + "/instances/" + + INSTANCE_ID + + "/tables/" + + TABLE_ID + ) + with pytest.raises(ValueError): + _list_tables_helper(table_name=BAD_TABLE_NAME) + + +def test_instance_app_profile_factory(): + from google.cloud.bigtable.enums import RoutingPolicyType + + instance = _make_instance(INSTANCE_ID, None) + + app_profile1 = instance.app_profile( + APP_PROFILE_ID_1, + routing_policy_type=RoutingPolicyType.ANY, + description=DESCRIPTION_1, + ) + + app_profile2 = instance.app_profile( + APP_PROFILE_ID_2, + routing_policy_type=RoutingPolicyType.SINGLE, + description=DESCRIPTION_2, + cluster_id=CLUSTER_ID, + allow_transactional_writes=ALLOW_WRITES, + ) + assert app_profile1.app_profile_id == APP_PROFILE_ID_1 + assert app_profile1._instance is instance + assert app_profile1.routing_policy_type == RoutingPolicyType.ANY + assert app_profile1.description == DESCRIPTION_1 + assert app_profile2.app_profile_id == APP_PROFILE_ID_2 + assert app_profile2._instance is instance + assert app_profile2.routing_policy_type == RoutingPolicyType.SINGLE + assert app_profile2.description == DESCRIPTION_2 + assert app_profile2.cluster_id == CLUSTER_ID + assert app_profile2.allow_transactional_writes == ALLOW_WRITES + + +def test_instance_list_app_profiles(): + from google.api_core.page_iterator import Iterator + from google.api_core.page_iterator import Page + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.app_profile import AppProfile + + class _Iterator(Iterator): + def __init__(self, pages): + super(_Iterator, self).__init__(client=None) + self._pages = pages + + def _next_page(self): + if self._pages: + page, self._pages = self._pages[0], self._pages[1:] + return Page(self, page, self.item_to_value) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = _make_instance(INSTANCE_ID, client) + + # Setup Expected Response + app_profile_path_template = "projects/{}/instances/{}/appProfiles/{}" + app_profile_id1 = "app-profile-id1" + app_profile_id2 = "app-profile-id2" + app_profile_name1 = app_profile_path_template.format( + PROJECT, INSTANCE_ID, app_profile_id1 + ) + app_profile_name2 = app_profile_path_template.format( + PROJECT, INSTANCE_ID, app_profile_id2 + ) + routing_policy = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() + + app_profiles = [ + data_v2_pb2.AppProfile( + name=app_profile_name1, multi_cluster_routing_use_any=routing_policy + ), + data_v2_pb2.AppProfile( + name=app_profile_name2, multi_cluster_routing_use_any=routing_policy + ), + ] + iterator = _Iterator(pages=[app_profiles]) + + # Patch the stub used by the API method. + api = _make_instance_admin_api() + client._instance_admin_client = api + api.app_profile_path = app_profile_path_template.format + api.list_app_profiles.return_value = iterator + + # Perform the method and check the result. + app_profiles = instance.list_app_profiles() + + app_profile_1, app_profile_2 = app_profiles + + assert isinstance(app_profile_1, AppProfile) + assert app_profile_1.name == app_profile_name1 + + assert isinstance(app_profile_2, AppProfile) + assert app_profile_2.name == app_profile_name2 diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py index 63f9ba03fb23..1b1adbed567f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_policy.py +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -12,263 +12,267 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest - - -class TestPolicy(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.policy import Policy - - return Policy - - def _make_one(self, *args, **kw): - return self._get_target_class()(*args, **kw) - - def test_ctor_defaults(self): - empty = frozenset() - policy = self._make_one() - self.assertIsNone(policy.etag) - self.assertIsNone(policy.version) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_ctor_explicit(self): - VERSION = 1 - ETAG = b"ETAG" - empty = frozenset() - policy = self._make_one(ETAG, VERSION) - self.assertEqual(policy.etag, ETAG) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_bigtable_admins_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_ADMIN_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_admins, expected) - - def test_bigtable_readers_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_READER_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_readers, expected) - - def test_bigtable_users_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_USER_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_users, expected) - - def test_bigtable_viewers_getter(self): - from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE - - MEMBER = "user:phred@example.com" - expected = frozenset([MEMBER]) - policy = self._make_one() - policy[BIGTABLE_VIEWER_ROLE] = [MEMBER] - self.assertEqual(policy.bigtable_viewers, expected) - - def test_from_pb_empty(self): - from google.iam.v1 import policy_pb2 - - empty = frozenset() - message = policy_pb2.Policy() - klass = self._get_target_class() - policy = klass.from_pb(message) - self.assertEqual(policy.etag, b"") - self.assertEqual(policy.version, 0) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_from_pb_non_empty(self): - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - ETAG = b"ETAG" - VERSION = 1 - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - empty = frozenset() - message = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=[{"role": BIGTABLE_ADMIN_ROLE, "members": members}], - ) - klass = self._get_target_class() - policy = klass.from_pb(message) - self.assertEqual(policy.etag, ETAG) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bigtable_admins, set(members)) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 1) - self.assertEqual(dict(policy), {BIGTABLE_ADMIN_ROLE: set(members)}) - - def test_from_pb_with_condition(self): - import pytest - from google.iam.v1 import policy_pb2 - from google.api_core.iam import InvalidOperationException, _DICT_ACCESS_MSG - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - ETAG = b"ETAG" - VERSION = 3 - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - BINDINGS = [ - { - "role": BIGTABLE_ADMIN_ROLE, - "members": members, - "condition": { - "title": "request_time", - "description": "Requests made before 2021-01-01T00:00:00Z", - "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', - }, - } - ] - message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) - klass = self._get_target_class() - policy = klass.from_pb(message) - self.assertEqual(policy.etag, ETAG) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bindings[0]["role"], BIGTABLE_ADMIN_ROLE) - self.assertEqual(policy.bindings[0]["members"], set(members)) - self.assertEqual(policy.bindings[0]["condition"], BINDINGS[0]["condition"]) - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_admins - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_readers - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_users - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - policy.bigtable_viewers - with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): - len(policy) - - def test_to_pb_empty(self): - from google.iam.v1 import policy_pb2 - - policy = self._make_one() - expected = policy_pb2.Policy() - - self.assertEqual(policy.to_pb(), expected) - - def test_to_pb_explicit(self): - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - VERSION = 1 - ETAG = b"ETAG" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - policy = self._make_one(ETAG, VERSION) - policy[BIGTABLE_ADMIN_ROLE] = members - expected = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=[ - policy_pb2.Binding(role=BIGTABLE_ADMIN_ROLE, members=sorted(members)) - ], - ) - - self.assertEqual(policy.to_pb(), expected) - - def test_to_pb_with_condition(self): - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - - VERSION = 3 - ETAG = b"ETAG" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - condition = { - "title": "request_time", - "description": "Requests made before 2021-01-01T00:00:00Z", - "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', + +def _make_policy(*args, **kw): + from google.cloud.bigtable.policy import Policy + + return Policy(*args, **kw) + + +def test_policy_ctor_defaults(): + empty = frozenset() + policy = _make_policy() + assert policy.etag is None + assert policy.version is None + assert policy.bigtable_admins == empty + assert policy.bigtable_readers == empty + assert policy.bigtable_users == empty + assert policy.bigtable_viewers == empty + assert len(policy) == 0 + assert dict(policy) == {} + + +def test_policy_ctor_explicit(): + VERSION = 1 + ETAG = b"ETAG" + empty = frozenset() + policy = _make_policy(ETAG, VERSION) + assert policy.etag == ETAG + assert policy.version == VERSION + assert policy.bigtable_admins == empty + assert policy.bigtable_readers == empty + assert policy.bigtable_users == empty + assert policy.bigtable_viewers == empty + assert len(policy) == 0 + assert dict(policy) == {} + + +def test_policy_bigtable_admins(): + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + MEMBER = "user:phred@example.com" + expected = frozenset([MEMBER]) + policy = _make_policy() + policy[BIGTABLE_ADMIN_ROLE] = [MEMBER] + assert policy.bigtable_admins == expected + + +def test_policy_bigtable_readers(): + from google.cloud.bigtable.policy import BIGTABLE_READER_ROLE + + MEMBER = "user:phred@example.com" + expected = frozenset([MEMBER]) + policy = _make_policy() + policy[BIGTABLE_READER_ROLE] = [MEMBER] + assert policy.bigtable_readers == expected + + +def test_policy_bigtable_users(): + from google.cloud.bigtable.policy import BIGTABLE_USER_ROLE + + MEMBER = "user:phred@example.com" + expected = frozenset([MEMBER]) + policy = _make_policy() + policy[BIGTABLE_USER_ROLE] = [MEMBER] + assert policy.bigtable_users == expected + + +def test_policy_bigtable_viewers(): + from google.cloud.bigtable.policy import BIGTABLE_VIEWER_ROLE + + MEMBER = "user:phred@example.com" + expected = frozenset([MEMBER]) + policy = _make_policy() + policy[BIGTABLE_VIEWER_ROLE] = [MEMBER] + assert policy.bigtable_viewers == expected + + +def test_policy_from_pb_w_empty(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + + empty = frozenset() + message = policy_pb2.Policy() + policy = Policy.from_pb(message) + assert policy.etag == b"" + assert policy.version == 0 + assert policy.bigtable_admins == empty + assert policy.bigtable_readers == empty + assert policy.bigtable_users == empty + assert policy.bigtable_viewers == empty + assert len(policy) == 0 + assert dict(policy) == {} + + +def test_policy_from_pb_w_non_empty(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy + + ETAG = b"ETAG" + VERSION = 1 + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + empty = frozenset() + message = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=[{"role": BIGTABLE_ADMIN_ROLE, "members": members}], + ) + policy = Policy.from_pb(message) + assert policy.etag == ETAG + assert policy.version == VERSION + assert policy.bigtable_admins == set(members) + assert policy.bigtable_readers == empty + assert policy.bigtable_users == empty + assert policy.bigtable_viewers == empty + assert len(policy) == 1 + assert dict(policy) == {BIGTABLE_ADMIN_ROLE: set(members)} + + +def test_policy_from_pb_w_condition(): + import pytest + from google.iam.v1 import policy_pb2 + from google.api_core.iam import InvalidOperationException, _DICT_ACCESS_MSG + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + from google.cloud.bigtable.policy import Policy + + ETAG = b"ETAG" + VERSION = 3 + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + BINDINGS = [ + { + "role": BIGTABLE_ADMIN_ROLE, + "members": members, + "condition": { + "title": "request_time", + "description": "Requests made before 2021-01-01T00:00:00Z", + "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', + }, } - policy = self._make_one(ETAG, VERSION) - policy.bindings = [ - { - "role": BIGTABLE_ADMIN_ROLE, - "members": set(members), - "condition": condition, - } - ] - expected = policy_pb2.Policy( - etag=ETAG, - version=VERSION, - bindings=[ - policy_pb2.Binding( - role=BIGTABLE_ADMIN_ROLE, - members=sorted(members), - condition=condition, - ) - ], - ) - - self.assertEqual(policy.to_pb(), expected) - - def test_from_api_repr_wo_etag(self): - VERSION = 1 - empty = frozenset() - resource = {"version": VERSION} - klass = self._get_target_class() - policy = klass.from_api_repr(resource) - self.assertIsNone(policy.etag) - self.assertEqual(policy.version, VERSION) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_from_api_repr_w_etag(self): - import base64 - - ETAG = b"ETAG" - empty = frozenset() - resource = {"etag": base64.b64encode(ETAG).decode("ascii")} - klass = self._get_target_class() - policy = klass.from_api_repr(resource) - self.assertEqual(policy.etag, ETAG) - self.assertIsNone(policy.version) - self.assertEqual(policy.bigtable_admins, empty) - self.assertEqual(policy.bigtable_readers, empty) - self.assertEqual(policy.bigtable_users, empty) - self.assertEqual(policy.bigtable_viewers, empty) - self.assertEqual(len(policy), 0) - self.assertEqual(dict(policy), {}) - - def test_to_api_repr_wo_etag(self): - VERSION = 1 - resource = {"version": VERSION} - policy = self._make_one(version=VERSION) - self.assertEqual(policy.to_api_repr(), resource) - - def test_to_api_repr_w_etag(self): - import base64 - - ETAG = b"ETAG" - policy = self._make_one(etag=ETAG) - resource = {"etag": base64.b64encode(ETAG).decode("ascii")} - self.assertEqual(policy.to_api_repr(), resource) + ] + message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) + policy = Policy.from_pb(message) + assert policy.etag == ETAG + assert policy.version == VERSION + assert policy.bindings[0]["role"] == BIGTABLE_ADMIN_ROLE + assert policy.bindings[0]["members"] == set(members) + assert policy.bindings[0]["condition"] == BINDINGS[0]["condition"] + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_admins + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_readers + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_users + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + policy.bigtable_viewers + with pytest.raises(InvalidOperationException, match=_DICT_ACCESS_MSG): + len(policy) + + +def test_policy_to_pb_empty(): + from google.iam.v1 import policy_pb2 + + policy = _make_policy() + expected = policy_pb2.Policy() + + assert policy.to_pb() == expected + + +def test_policy_to_pb_explicit(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + VERSION = 1 + ETAG = b"ETAG" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + policy = _make_policy(ETAG, VERSION) + policy[BIGTABLE_ADMIN_ROLE] = members + expected = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=[ + policy_pb2.Binding(role=BIGTABLE_ADMIN_ROLE, members=sorted(members)) + ], + ) + + assert policy.to_pb() == expected + + +def test_policy_to_pb_w_condition(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + + VERSION = 3 + ETAG = b"ETAG" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + condition = { + "title": "request_time", + "description": "Requests made before 2021-01-01T00:00:00Z", + "expression": 'request.time < timestamp("2021-01-01T00:00:00Z")', + } + policy = _make_policy(ETAG, VERSION) + policy.bindings = [ + {"role": BIGTABLE_ADMIN_ROLE, "members": set(members), "condition": condition} + ] + expected = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=[ + policy_pb2.Binding( + role=BIGTABLE_ADMIN_ROLE, members=sorted(members), condition=condition, + ) + ], + ) + + assert policy.to_pb() == expected + + +def test_policy_from_api_repr_wo_etag(): + from google.cloud.bigtable.policy import Policy + + VERSION = 1 + empty = frozenset() + resource = {"version": VERSION} + policy = Policy.from_api_repr(resource) + assert policy.etag is None + assert policy.version == VERSION + assert policy.bigtable_admins == empty + assert policy.bigtable_readers == empty + assert policy.bigtable_users == empty + assert policy.bigtable_viewers == empty + assert len(policy) == 0 + assert dict(policy) == {} + + +def test_policy_from_api_repr_w_etag(): + import base64 + from google.cloud.bigtable.policy import Policy + + ETAG = b"ETAG" + empty = frozenset() + resource = {"etag": base64.b64encode(ETAG).decode("ascii")} + policy = Policy.from_api_repr(resource) + assert policy.etag == ETAG + assert policy.version is None + assert policy.bigtable_admins == empty + assert policy.bigtable_readers == empty + assert policy.bigtable_users == empty + assert policy.bigtable_viewers == empty + assert len(policy) == 0 + assert dict(policy) == {} + + +def test_policy_to_api_repr_wo_etag(): + VERSION = 1 + resource = {"version": VERSION} + policy = _make_policy(version=VERSION) + assert policy.to_api_repr() == resource + + +def test_policy_to_api_repr_w_etag(): + import base64 + + ETAG = b"ETAG" + policy = _make_policy(etag=ETAG) + resource = {"etag": base64.b64encode(ETAG).decode("ascii")} + assert policy.to_api_repr() == resource diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 1f33f214bdc5..77475631491e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -13,763 +13,726 @@ # limitations under the License. -import unittest - import mock +import pytest from ._testing import _make_credentials -class TestRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import Row +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client - return Row + return Client(*args, **kwargs) - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - def test_row_key_getter(self): - row = self._make_one(row_key=b"row_key", table="table") - self.assertEqual(b"row_key", row.row_key) +def _make_row(*args, **kwargs): + from google.cloud.bigtable.row import Row - def test_row_table_getter(self): - row = self._make_one(row_key=b"row_key", table="table") - self.assertEqual("table", row.table) + return Row(*args, **kwargs) -class Test_SetDeleteRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import _SetDeleteRow +def test_row_key_getter(): + row = _make_row(row_key=b"row_key", table="table") + assert b"row_key" == row.row_key - return _SetDeleteRow - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +def test_row_table_getter(): + row = _make_row(row_key=b"row_key", table="table") + assert "table" == row.table - def test__get_mutations_virtual(self): - row = self._make_one(b"row-key", None) - with self.assertRaises(NotImplementedError): - row._get_mutations(None) +def _make__set_delete_row(*args, **kwargs): + from google.cloud.bigtable.row import _SetDeleteRow -class TestDirectRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import DirectRow + return _SetDeleteRow(*args, **kwargs) - return DirectRow - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +def test__set_detlete_row__get_mutations_virtual(): + row = _make__set_delete_row(b"row-key", None) + with pytest.raises(NotImplementedError): + row._get_mutations(None) - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - return Client +def _make_direct_row(*args, **kwargs): + from google.cloud.bigtable.row import DirectRow - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) + return DirectRow(*args, **kwargs) - def test_constructor(self): - row_key = b"row_key" - table = object() - row = self._make_one(row_key, table) - self.assertEqual(row._row_key, row_key) - self.assertIs(row._table, table) - self.assertEqual(row._pb_mutations, []) +def test_direct_row_constructor(): + row_key = b"row_key" + table = object() - def test_constructor_with_unicode(self): - row_key = u"row_key" - row_key_bytes = b"row_key" - table = object() + row = _make_direct_row(row_key, table) + assert row._row_key == row_key + assert row._table is table + assert row._pb_mutations == [] - row = self._make_one(row_key, table) - self.assertEqual(row._row_key, row_key_bytes) - self.assertIs(row._table, table) - def test_constructor_with_non_bytes(self): - row_key = object() - with self.assertRaises(TypeError): - self._make_one(row_key, None) +def test_direct_row_constructor_with_unicode(): + row_key = u"row_key" + row_key_bytes = b"row_key" + table = object() - def test__get_mutations(self): - row_key = b"row_key" - row = self._make_one(row_key, None) + row = _make_direct_row(row_key, table) + assert row._row_key == row_key_bytes + assert row._table is table - row._pb_mutations = mutations = object() - self.assertIs(mutations, row._get_mutations(None)) - def test_get_mutations_size(self): - row_key = b"row_key" - row = self._make_one(row_key, None) +def test_direct_row_constructor_with_non_bytes(): + row_key = object() + with pytest.raises(TypeError): + _make_direct_row(row_key, None) - column_family_id1 = u"column_family_id1" - column_family_id2 = u"column_family_id2" - column1 = b"column1" - column2 = b"column2" - number_of_bytes = 1 * 1024 * 1024 - value = b"1" * number_of_bytes - row.set_cell(column_family_id1, column1, value) - row.set_cell(column_family_id2, column2, value) +def test_direct_row__get_mutations(): + row_key = b"row_key" + row = _make_direct_row(row_key, None) - total_mutations_size = 0 - for mutation in row._get_mutations(): - total_mutations_size += mutation._pb.ByteSize() - - self.assertEqual(row.get_mutations_size(), total_mutations_size) - - def _set_cell_helper( - self, - column=None, - column_bytes=None, - value=b"foobar", - timestamp=None, - timestamp_micros=-1, - ): - import struct - - row_key = b"row_key" - column_family_id = u"column_family_id" - if column is None: - column = b"column" - table = object() - row = self._make_one(row_key, table) - self.assertEqual(row._pb_mutations, []) - row.set_cell(column_family_id, column, value, timestamp=timestamp) - - if isinstance(value, int): - value = struct.pack(">q", value) - expected_pb = _MutationPB( - set_cell=_MutationSetCellPB( - family_name=column_family_id, - column_qualifier=column_bytes or column, - timestamp_micros=timestamp_micros, - value=value, - ) - ) - self.assertEqual(row._pb_mutations, [expected_pb]) + row._pb_mutations = mutations = object() + assert mutations is row._get_mutations(None) - def test_set_cell(self): - self._set_cell_helper() - def test_set_cell_with_string_column(self): - column_bytes = b"column" - column_non_bytes = u"column" - self._set_cell_helper(column=column_non_bytes, column_bytes=column_bytes) +def test_direct_row_get_mutations_size(): + row_key = b"row_key" + row = _make_direct_row(row_key, None) - def test_set_cell_with_integer_value(self): - value = 1337 - self._set_cell_helper(value=value) + column_family_id1 = u"column_family_id1" + column_family_id2 = u"column_family_id2" + column1 = b"column1" + column2 = b"column2" + number_of_bytes = 1 * 1024 * 1024 + value = b"1" * number_of_bytes - def test_set_cell_with_non_bytes_value(self): - row_key = b"row_key" - column = b"column" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - value = object() # Not bytes - with self.assertRaises(TypeError): - row.set_cell(column_family_id, column, value) - - def test_set_cell_with_non_null_timestamp(self): - import datetime - from google.cloud._helpers import _EPOCH - - microseconds = 898294371 - millis_granularity = microseconds - (microseconds % 1000) - timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds) - self._set_cell_helper(timestamp=timestamp, timestamp_micros=millis_granularity) - - def test_delete(self): - row_key = b"row_key" - row = self._make_one(row_key, object()) - self.assertEqual(row._pb_mutations, []) - row.delete() - - expected_pb = _MutationPB(delete_from_row=_MutationDeleteFromRowPB()) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cell(self): - klass = self._get_target_class() - - class MockRow(klass): - def __init__(self, *args, **kwargs): - super(MockRow, self).__init__(*args, **kwargs) - self._args = [] - self._kwargs = [] - - # Replace the called method with one that logs arguments. - def _delete_cells(self, *args, **kwargs): - self._args.append(args) - self._kwargs.append(kwargs) - - row_key = b"row_key" - column = b"column" - column_family_id = u"column_family_id" - table = object() - - mock_row = MockRow(row_key, table) - # Make sure no values are set before calling the method. - self.assertEqual(mock_row._pb_mutations, []) - self.assertEqual(mock_row._args, []) - self.assertEqual(mock_row._kwargs, []) - - # Actually make the request against the mock class. - time_range = object() - mock_row.delete_cell(column_family_id, column, time_range=time_range) - self.assertEqual(mock_row._pb_mutations, []) - self.assertEqual(mock_row._args, [(column_family_id, [column])]) - self.assertEqual(mock_row._kwargs, [{"state": None, "time_range": time_range}]) - - def test_delete_cells_non_iterable(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - columns = object() # Not iterable - with self.assertRaises(TypeError): - row.delete_cells(column_family_id, columns) - - def test_delete_cells_all_columns(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - klass = self._get_target_class() - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, klass.ALL_COLUMNS) - - expected_pb = _MutationPB( - delete_from_family=_MutationDeleteFromFamilyPB(family_name=column_family_id) - ) - self.assertEqual(row._pb_mutations, [expected_pb]) + row.set_cell(column_family_id1, column1, value) + row.set_cell(column_family_id2, column2, value) - def test_delete_cells_no_columns(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - table = object() + total_mutations_size = 0 + for mutation in row._get_mutations(): + total_mutations_size += mutation._pb.ByteSize() + + assert row.get_mutations_size() == total_mutations_size - row = self._make_one(row_key, table) - columns = [] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns) - self.assertEqual(row._pb_mutations, []) - def _delete_cells_helper(self, time_range=None): - row_key = b"row_key" +def _set_cell_helper( + column=None, + column_bytes=None, + value=b"foobar", + timestamp=None, + timestamp_micros=-1, +): + import struct + + row_key = b"row_key" + column_family_id = u"column_family_id" + if column is None: column = b"column" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - columns = [column] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns, time_range=time_range) - - expected_pb = _MutationPB( - delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, column_qualifier=column - ) + table = object() + row = _make_direct_row(row_key, table) + assert row._pb_mutations == [] + row.set_cell(column_family_id, column, value, timestamp=timestamp) + + if isinstance(value, int): + value = struct.pack(">q", value) + expected_pb = _MutationPB( + set_cell=_MutationSetCellPB( + family_name=column_family_id, + column_qualifier=column_bytes or column, + timestamp_micros=timestamp_micros, + value=value, ) - if time_range is not None: - expected_pb.delete_from_column.time_range._pb.CopyFrom( - time_range.to_pb()._pb - ) - self.assertEqual(row._pb_mutations, [expected_pb]) - - def test_delete_cells_no_time_range(self): - self._delete_cells_helper() - - def test_delete_cells_with_time_range(self): - import datetime - from google.cloud._helpers import _EPOCH - from google.cloud.bigtable.row_filters import TimestampRange - - microseconds = 30871000 # Makes sure already milliseconds granularity - start = _EPOCH + datetime.timedelta(microseconds=microseconds) - time_range = TimestampRange(start=start) - self._delete_cells_helper(time_range=time_range) - - def test_delete_cells_with_bad_column(self): - # This makes sure a failure on one of the columns doesn't leave - # the row's mutations in a bad state. - row_key = b"row_key" - column = b"column" - column_family_id = u"column_family_id" - table = object() - - row = self._make_one(row_key, table) - columns = [column, object()] - self.assertEqual(row._pb_mutations, []) - with self.assertRaises(TypeError): - row.delete_cells(column_family_id, columns) - self.assertEqual(row._pb_mutations, []) - - def test_delete_cells_with_string_columns(self): - row_key = b"row_key" - column_family_id = u"column_family_id" - column1 = u"column1" - column1_bytes = b"column1" - column2 = u"column2" - column2_bytes = b"column2" - table = object() - - row = self._make_one(row_key, table) - columns = [column1, column2] - self.assertEqual(row._pb_mutations, []) - row.delete_cells(column_family_id, columns) + ) + assert row._pb_mutations == [expected_pb] - expected_pb1 = _MutationPB( - delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, column_qualifier=column1_bytes - ) - ) - expected_pb2 = _MutationPB( - delete_from_column=_MutationDeleteFromColumnPB( - family_name=column_family_id, column_qualifier=column2_bytes - ) - ) - self.assertEqual(row._pb_mutations, [expected_pb1, expected_pb2]) - def test_commit(self): - project_id = "project-id" - row_key = b"row_key" - table_name = "projects/more-stuff" - column_family_id = u"column_family_id" - column = b"column" +def test_direct_row_set_cell(): + _set_cell_helper() - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - row = self._make_one(row_key, table) - value = b"bytes-value" - # Perform the method and check the result. - row.set_cell(column_family_id, column, value) - row.commit() - self.assertEqual(table.mutated_rows, [row]) +def test_direct_row_set_cell_with_string_column(): + column_bytes = b"column" + column_non_bytes = u"column" + _set_cell_helper(column=column_non_bytes, column_bytes=column_bytes) - def test_commit_with_exception(self): - from google.rpc import status_pb2 - project_id = "project-id" - row_key = b"row_key" - table_name = "projects/more-stuff" - column_family_id = u"column_family_id" - column = b"column" +def test_direct_row_set_cell_with_integer_value(): + value = 1337 + _set_cell_helper(value=value) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client) - row = self._make_one(row_key, table) - value = b"bytes-value" - # Perform the method and check the result. +def test_direct_row_set_cell_with_non_bytes_value(): + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" + table = object() + + row = _make_direct_row(row_key, table) + value = object() # Not bytes + with pytest.raises(TypeError): row.set_cell(column_family_id, column, value) - result = row.commit() - expected = status_pb2.Status(code=0) - self.assertEqual(result, expected) - - -class TestConditionalRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import ConditionalRow - - return ConditionalRow - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor(self): - row_key = b"row_key" - table = object() - filter_ = object() - - row = self._make_one(row_key, table, filter_=filter_) - self.assertEqual(row._row_key, row_key) - self.assertIs(row._table, table) - self.assertIs(row._filter, filter_) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - def test__get_mutations(self): - row_key = b"row_key" - filter_ = object() - row = self._make_one(row_key, None, filter_=filter_) - - row._true_pb_mutations = true_mutations = object() - row._false_pb_mutations = false_mutations = object() - self.assertIs(true_mutations, row._get_mutations(True)) - self.assertIs(false_mutations, row._get_mutations(False)) - self.assertIs(false_mutations, row._get_mutations(None)) - - def test_commit(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - project_id = "project-id" - row_key = b"row_key" - table_name = "projects/more-stuff" - app_profile_id = "app_profile_id" - column_family_id1 = u"column_family_id1" - column_family_id2 = u"column_family_id2" - column_family_id3 = u"column_family_id3" - column1 = b"column1" - column2 = b"column2" - - api = mock.create_autospec(BigtableClient) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client, app_profile_id=app_profile_id) - row_filter = RowSampleFilter(0.33) - row = self._make_one(row_key, table, filter_=row_filter) - # Create request_pb - value1 = b"bytes-value" - # Create response_pb - predicate_matched = True - response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched) +def test_direct_row_set_cell_with_non_null_timestamp(): + import datetime + from google.cloud._helpers import _EPOCH - # Patch the stub used by the API method. - api.check_and_mutate_row.side_effect = [response_pb] - client._table_data_client = api + microseconds = 898294371 + millis_granularity = microseconds - (microseconds % 1000) + timestamp = _EPOCH + datetime.timedelta(microseconds=microseconds) + _set_cell_helper(timestamp=timestamp, timestamp_micros=millis_granularity) - # Create expected_result. - expected_result = predicate_matched - # Perform the method and check the result. - row.set_cell(column_family_id1, column1, value1, state=True) - row.delete(state=False) - row.delete_cell(column_family_id2, column2, state=True) - row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) - result = row.commit() - call_args = api.check_and_mutate_row.call_args - self.assertEqual(app_profile_id, call_args.app_profile_id[0]) - self.assertEqual(result, expected_result) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) - - def test_commit_too_many_mutations(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import row as MUT - - row_key = b"row_key" - table = object() - filter_ = object() - row = self._make_one(row_key, table, filter_=filter_) - row._true_pb_mutations = [1, 2, 3] - num_mutations = len(row._true_pb_mutations) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - def test_commit_no_mutations(self): - from tests.unit._testing import _FakeStub - - project_id = "project-id" - row_key = b"row_key" - - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(None, client=client) - filter_ = object() - row = self._make_one(row_key, table, filter_=filter_) - self.assertEqual(row._true_pb_mutations, []) - self.assertEqual(row._false_pb_mutations, []) +def test_direct_row_delete(): + row_key = b"row_key" + row = _make_direct_row(row_key, object()) + assert row._pb_mutations == [] + row.delete() - # Patch the stub used by the API method. - stub = _FakeStub() + expected_pb = _MutationPB(delete_from_row=_MutationDeleteFromRowPB()) + assert row._pb_mutations == [expected_pb] - # Perform the method and check the result. - result = row.commit() - self.assertIsNone(result) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) +def test_direct_row_delete_cell(): + from google.cloud.bigtable.row import DirectRow -class TestAppendRow(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row import AppendRow + class MockRow(DirectRow): + def __init__(self, *args, **kwargs): + super(MockRow, self).__init__(*args, **kwargs) + self._args = [] + self._kwargs = [] - return AppendRow + # Replace the called method with one that logs arguments. + def _delete_cells(self, *args, **kwargs): + self._args.append(args) + self._kwargs.append(kwargs) - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" + table = object() - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client + mock_row = MockRow(row_key, table) + # Make sure no values are set before calling the method. + assert mock_row._pb_mutations == [] + assert mock_row._args == [] + assert mock_row._kwargs == [] - return Client + # Actually make the request against the mock class. + time_range = object() + mock_row.delete_cell(column_family_id, column, time_range=time_range) + assert mock_row._pb_mutations == [] + assert mock_row._args == [(column_family_id, [column])] + assert mock_row._kwargs == [{"state": None, "time_range": time_range}] - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - def test_constructor(self): - row_key = b"row_key" - table = object() +def test_direct_row_delete_cells_non_iterable(): + row_key = b"row_key" + column_family_id = u"column_family_id" + table = object() - row = self._make_one(row_key, table) - self.assertEqual(row._row_key, row_key) - self.assertIs(row._table, table) - self.assertEqual(row._rule_pb_list, []) + row = _make_direct_row(row_key, table) + columns = object() # Not iterable + with pytest.raises(TypeError): + row.delete_cells(column_family_id, columns) - def test_clear(self): - row_key = b"row_key" - table = object() - row = self._make_one(row_key, table) - row._rule_pb_list = [1, 2, 3] - row.clear() - self.assertEqual(row._rule_pb_list, []) - def test_append_cell_value(self): - table = object() - row_key = b"row_key" - row = self._make_one(row_key, table) - self.assertEqual(row._rule_pb_list, []) +def test_direct_row_delete_cells_all_columns(): + from google.cloud.bigtable.row import DirectRow - column = b"column" - column_family_id = u"column_family_id" - value = b"bytes-val" - row.append_cell_value(column_family_id, column, value) - expected_pb = _ReadModifyWriteRulePB( - family_name=column_family_id, column_qualifier=column, append_value=value - ) - self.assertEqual(row._rule_pb_list, [expected_pb]) + row_key = b"row_key" + column_family_id = u"column_family_id" + table = object() - def test_increment_cell_value(self): - table = object() - row_key = b"row_key" - row = self._make_one(row_key, table) - self.assertEqual(row._rule_pb_list, []) + row = _make_direct_row(row_key, table) + assert row._pb_mutations == [] + row.delete_cells(column_family_id, DirectRow.ALL_COLUMNS) - column = b"column" - column_family_id = u"column_family_id" - int_value = 281330 - row.increment_cell_value(column_family_id, column, int_value) - expected_pb = _ReadModifyWriteRulePB( - family_name=column_family_id, - column_qualifier=column, - increment_amount=int_value, - ) - self.assertEqual(row._rule_pb_list, [expected_pb]) - - def test_commit(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import row as MUT - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - project_id = "project-id" - row_key = b"row_key" - table_name = "projects/more-stuff" - app_profile_id = "app_profile_id" - column_family_id = u"column_family_id" - column = b"column" + expected_pb = _MutationPB( + delete_from_family=_MutationDeleteFromFamilyPB(family_name=column_family_id) + ) + assert row._pb_mutations == [expected_pb] - api = mock.create_autospec(BigtableClient) - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(table_name, client=client, app_profile_id=app_profile_id) - row = self._make_one(row_key, table) - - # Create request_pb - value = b"bytes-value" - - # Create expected_result. - row_responses = [] - expected_result = object() - - # Patch API calls - client._table_data_client = api - - def mock_parse_rmw_row_response(row_response): - row_responses.append(row_response) - return expected_result - - # Perform the method and check the result. - with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): - row._table._instance._client._table_data_client = api - row.append_cell_value(column_family_id, column, value) - result = row.commit() - call_args = api.read_modify_write_row.call_args_list[0] - self.assertEqual(app_profile_id, call_args.app_profile_id[0]) - self.assertEqual(result, expected_result) - self.assertEqual(row._rule_pb_list, []) - - def test_commit_no_rules(self): - from tests.unit._testing import _FakeStub - - project_id = "project-id" - row_key = b"row_key" - - credentials = _make_credentials() - client = self._make_client( - project=project_id, credentials=credentials, admin=True - ) - table = _Table(None, client=client) - row = self._make_one(row_key, table) - self.assertEqual(row._rule_pb_list, []) +def test_direct_row_delete_cells_no_columns(): + row_key = b"row_key" + column_family_id = u"column_family_id" + table = object() - # Patch the stub used by the API method. - stub = _FakeStub() + row = _make_direct_row(row_key, table) + columns = [] + assert row._pb_mutations == [] + row.delete_cells(column_family_id, columns) + assert row._pb_mutations == [] - # Perform the method and check the result. - result = row.commit() - self.assertEqual(result, {}) - # Make sure no request was sent. - self.assertEqual(stub.method_calls, []) - - def test_commit_too_many_mutations(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable import row as MUT - - row_key = b"row_key" - table = object() - row = self._make_one(row_key, table) - row._rule_pb_list = [1, 2, 3] - num_mutations = len(row._rule_pb_list) - with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): - with self.assertRaises(ValueError): - row.commit() - - -class Test__parse_rmw_row_response(unittest.TestCase): - def _call_fut(self, row_response): - from google.cloud.bigtable.row import _parse_rmw_row_response - - return _parse_rmw_row_response(row_response) - - def test_it(self): - from google.cloud._helpers import _datetime_from_microseconds - - col_fam1 = u"col-fam-id" - col_fam2 = u"col-fam-id2" - col_name1 = b"col-name1" - col_name2 = b"col-name2" - col_name3 = b"col-name3-but-other-fam" - cell_val1 = b"cell-val" - cell_val2 = b"cell-val-newer" - cell_val3 = b"altcol-cell-val" - cell_val4 = b"foo" - - microseconds = 1000871 - timestamp = _datetime_from_microseconds(microseconds) - expected_output = { - col_fam1: { - col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)], - col_name2: [(cell_val3, timestamp)], - }, - col_fam2: {col_name3: [(cell_val4, timestamp)]}, - } - response_row = _RowPB( - families=[ - _FamilyPB( - name=col_fam1, - columns=[ - _ColumnPB( - qualifier=col_name1, - cells=[ - _CellPB(value=cell_val1, timestamp_micros=microseconds), - _CellPB(value=cell_val2, timestamp_micros=microseconds), - ], - ), - _ColumnPB( - qualifier=col_name2, - cells=[ - _CellPB(value=cell_val3, timestamp_micros=microseconds) - ], - ), - ], - ), - _FamilyPB( - name=col_fam2, - columns=[ - _ColumnPB( - qualifier=col_name3, - cells=[ - _CellPB(value=cell_val4, timestamp_micros=microseconds) - ], - ) - ], - ), - ] + +def _delete_cells_helper(time_range=None): + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" + table = object() + + row = _make_direct_row(row_key, table) + columns = [column] + assert row._pb_mutations == [] + row.delete_cells(column_family_id, columns, time_range=time_range) + + expected_pb = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( + family_name=column_family_id, column_qualifier=column ) - sample_input = _ReadModifyWriteRowResponsePB(row=response_row) - self.assertEqual(expected_output, self._call_fut(sample_input)) + ) + if time_range is not None: + expected_pb.delete_from_column.time_range._pb.CopyFrom(time_range.to_pb()._pb) + assert row._pb_mutations == [expected_pb] + + +def test_direct_row_delete_cells_no_time_range(): + _delete_cells_helper() + + +def test_direct_row_delete_cells_with_time_range(): + import datetime + from google.cloud._helpers import _EPOCH + from google.cloud.bigtable.row_filters import TimestampRange + microseconds = 30871000 # Makes sure already milliseconds granularity + start = _EPOCH + datetime.timedelta(microseconds=microseconds) + time_range = TimestampRange(start=start) + _delete_cells_helper(time_range=time_range) -class Test__parse_family_pb(unittest.TestCase): - def _call_fut(self, family_pb): - from google.cloud.bigtable.row import _parse_family_pb - return _parse_family_pb(family_pb) +def test_direct_row_delete_cells_with_bad_column(): + # This makes sure a failure on one of the columns doesn't leave + # the row's mutations in a bad state. + row_key = b"row_key" + column = b"column" + column_family_id = u"column_family_id" + table = object() - def test_it(self): - from google.cloud._helpers import _datetime_from_microseconds + row = _make_direct_row(row_key, table) + columns = [column, object()] + assert row._pb_mutations == [] + with pytest.raises(TypeError): + row.delete_cells(column_family_id, columns) + assert row._pb_mutations == [] + + +def test_direct_row_delete_cells_with_string_columns(): + row_key = b"row_key" + column_family_id = u"column_family_id" + column1 = u"column1" + column1_bytes = b"column1" + column2 = u"column2" + column2_bytes = b"column2" + table = object() + + row = _make_direct_row(row_key, table) + columns = [column1, column2] + assert row._pb_mutations == [] + row.delete_cells(column_family_id, columns) + + expected_pb1 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( + family_name=column_family_id, column_qualifier=column1_bytes + ) + ) + expected_pb2 = _MutationPB( + delete_from_column=_MutationDeleteFromColumnPB( + family_name=column_family_id, column_qualifier=column2_bytes + ) + ) + assert row._pb_mutations == [expected_pb1, expected_pb2] + + +def test_direct_row_commit(): + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + column_family_id = u"column_family_id" + column = b"column" - col_fam1 = u"col-fam-id" - col_name1 = b"col-name1" - col_name2 = b"col-name2" - cell_val1 = b"cell-val" - cell_val2 = b"cell-val-newer" - cell_val3 = b"altcol-cell-val" + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(table_name, client=client) + row = _make_direct_row(row_key, table) + value = b"bytes-value" + + # Perform the method and check the result. + row.set_cell(column_family_id, column, value) + row.commit() + assert table.mutated_rows == [row] + + +def test_direct_row_commit_with_exception(): + from google.rpc import status_pb2 + + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + column_family_id = u"column_family_id" + column = b"column" + + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(table_name, client=client) + row = _make_direct_row(row_key, table) + value = b"bytes-value" + + # Perform the method and check the result. + row.set_cell(column_family_id, column, value) + result = row.commit() + expected = status_pb2.Status(code=0) + assert result == expected + + +def _make_conditional_row(*args, **kwargs): + from google.cloud.bigtable.row import ConditionalRow + + return ConditionalRow(*args, **kwargs) + + +def test_conditional_row_constructor(): + row_key = b"row_key" + table = object() + filter_ = object() + + row = _make_conditional_row(row_key, table, filter_=filter_) + assert row._row_key == row_key + assert row._table is table + assert row._filter is filter_ + assert row._true_pb_mutations == [] + assert row._false_pb_mutations == [] + + +def test_conditional_row__get_mutations(): + row_key = b"row_key" + filter_ = object() + row = _make_conditional_row(row_key, None, filter_=filter_) + + row._true_pb_mutations = true_mutations = object() + row._false_pb_mutations = false_mutations = object() + assert true_mutations is row._get_mutations(True) + assert false_mutations is row._get_mutations(False) + assert false_mutations is row._get_mutations(None) + + +def test_conditional_row_commit(): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + app_profile_id = "app_profile_id" + column_family_id1 = u"column_family_id1" + column_family_id2 = u"column_family_id2" + column_family_id3 = u"column_family_id3" + column1 = b"column1" + column2 = b"column2" + + api = mock.create_autospec(BigtableClient) + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(table_name, client=client, app_profile_id=app_profile_id) + row_filter = RowSampleFilter(0.33) + row = _make_conditional_row(row_key, table, filter_=row_filter) + + # Create request_pb + value1 = b"bytes-value" + + # Create response_pb + predicate_matched = True + response_pb = _CheckAndMutateRowResponsePB(predicate_matched=predicate_matched) + + # Patch the stub used by the API method. + api.check_and_mutate_row.side_effect = [response_pb] + client._table_data_client = api + + # Create expected_result. + expected_result = predicate_matched + + # Perform the method and check the result. + row.set_cell(column_family_id1, column1, value1, state=True) + row.delete(state=False) + row.delete_cell(column_family_id2, column2, state=True) + row.delete_cells(column_family_id3, row.ALL_COLUMNS, state=True) + result = row.commit() + call_args = api.check_and_mutate_row.call_args + assert app_profile_id == call_args.app_profile_id[0] + assert result == expected_result + assert row._true_pb_mutations == [] + assert row._false_pb_mutations == [] + + +def test_conditional_row_commit_too_many_mutations(): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import row as MUT + + row_key = b"row_key" + table = object() + filter_ = object() + row = _make_conditional_row(row_key, table, filter_=filter_) + row._true_pb_mutations = [1, 2, 3] + num_mutations = len(row._true_pb_mutations) + with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): + with pytest.raises(ValueError): + row.commit() + + +def test_conditional_row_commit_no_mutations(): + from tests.unit._testing import _FakeStub + + project_id = "project-id" + row_key = b"row_key" + + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(None, client=client) + filter_ = object() + row = _make_conditional_row(row_key, table, filter_=filter_) + assert row._true_pb_mutations == [] + assert row._false_pb_mutations == [] + + # Patch the stub used by the API method. + stub = _FakeStub() + + # Perform the method and check the result. + result = row.commit() + assert result is None + # Make sure no request was sent. + assert stub.method_calls == [] + + +def _make_append_row(*args, **kwargs): + from google.cloud.bigtable.row import AppendRow + + return AppendRow(*args, **kwargs) + + +def test_append_row_constructor(): + row_key = b"row_key" + table = object() + + row = _make_append_row(row_key, table) + assert row._row_key == row_key + assert row._table is table + assert row._rule_pb_list == [] + + +def test_append_row_clear(): + row_key = b"row_key" + table = object() + row = _make_append_row(row_key, table) + row._rule_pb_list = [1, 2, 3] + row.clear() + assert row._rule_pb_list == [] + + +def test_append_row_append_cell_value(): + table = object() + row_key = b"row_key" + row = _make_append_row(row_key, table) + assert row._rule_pb_list == [] + + column = b"column" + column_family_id = u"column_family_id" + value = b"bytes-val" + row.append_cell_value(column_family_id, column, value) + expected_pb = _ReadModifyWriteRulePB( + family_name=column_family_id, column_qualifier=column, append_value=value + ) + assert row._rule_pb_list == [expected_pb] + + +def test_append_row_increment_cell_value(): + table = object() + row_key = b"row_key" + row = _make_append_row(row_key, table) + assert row._rule_pb_list == [] + + column = b"column" + column_family_id = u"column_family_id" + int_value = 281330 + row.increment_cell_value(column_family_id, column, int_value) + expected_pb = _ReadModifyWriteRulePB( + family_name=column_family_id, + column_qualifier=column, + increment_amount=int_value, + ) + assert row._rule_pb_list == [expected_pb] + + +def test_append_row_commit(): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import row as MUT + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + project_id = "project-id" + row_key = b"row_key" + table_name = "projects/more-stuff" + app_profile_id = "app_profile_id" + column_family_id = u"column_family_id" + column = b"column" + + api = mock.create_autospec(BigtableClient) - microseconds = 5554441037 - timestamp = _datetime_from_microseconds(microseconds) - expected_dict = { + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(table_name, client=client, app_profile_id=app_profile_id) + row = _make_append_row(row_key, table) + + # Create request_pb + value = b"bytes-value" + + # Create expected_result. + row_responses = [] + expected_result = object() + + # Patch API calls + client._table_data_client = api + + def mock_parse_rmw_row_response(row_response): + row_responses.append(row_response) + return expected_result + + # Perform the method and check the result. + with _Monkey(MUT, _parse_rmw_row_response=mock_parse_rmw_row_response): + row._table._instance._client._table_data_client = api + row.append_cell_value(column_family_id, column, value) + result = row.commit() + call_args = api.read_modify_write_row.call_args_list[0] + assert app_profile_id == call_args.app_profile_id[0] + assert result == expected_result + assert row._rule_pb_list == [] + + +def test_append_row_commit_no_rules(): + from tests.unit._testing import _FakeStub + + project_id = "project-id" + row_key = b"row_key" + + credentials = _make_credentials() + client = _make_client(project=project_id, credentials=credentials, admin=True) + table = _Table(None, client=client) + row = _make_append_row(row_key, table) + assert row._rule_pb_list == [] + + # Patch the stub used by the API method. + stub = _FakeStub() + + # Perform the method and check the result. + result = row.commit() + assert result == {} + # Make sure no request was sent. + assert stub.method_calls == [] + + +def test_append_row_commit_too_many_mutations(): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import row as MUT + + row_key = b"row_key" + table = object() + row = _make_append_row(row_key, table) + row._rule_pb_list = [1, 2, 3] + num_mutations = len(row._rule_pb_list) + with _Monkey(MUT, MAX_MUTATIONS=num_mutations - 1): + with pytest.raises(ValueError): + row.commit() + + +def test__parse_rmw_row_response(): + from google.cloud._helpers import _datetime_from_microseconds + from google.cloud.bigtable.row import _parse_rmw_row_response + + col_fam1 = u"col-fam-id" + col_fam2 = u"col-fam-id2" + col_name1 = b"col-name1" + col_name2 = b"col-name2" + col_name3 = b"col-name3-but-other-fam" + cell_val1 = b"cell-val" + cell_val2 = b"cell-val-newer" + cell_val3 = b"altcol-cell-val" + cell_val4 = b"foo" + + microseconds = 1000871 + timestamp = _datetime_from_microseconds(microseconds) + expected_output = { + col_fam1: { col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)], col_name2: [(cell_val3, timestamp)], - } - expected_output = (col_fam1, expected_dict) - sample_input = _FamilyPB( - name=col_fam1, - columns=[ - _ColumnPB( - qualifier=col_name1, - cells=[ - _CellPB(value=cell_val1, timestamp_micros=microseconds), - _CellPB(value=cell_val2, timestamp_micros=microseconds), - ], - ), - _ColumnPB( - qualifier=col_name2, - cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)], - ), - ], - ) - self.assertEqual(expected_output, self._call_fut(sample_input)) + }, + col_fam2: {col_name3: [(cell_val4, timestamp)]}, + } + response_row = _RowPB( + families=[ + _FamilyPB( + name=col_fam1, + columns=[ + _ColumnPB( + qualifier=col_name1, + cells=[ + _CellPB(value=cell_val1, timestamp_micros=microseconds), + _CellPB(value=cell_val2, timestamp_micros=microseconds), + ], + ), + _ColumnPB( + qualifier=col_name2, + cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)], + ), + ], + ), + _FamilyPB( + name=col_fam2, + columns=[ + _ColumnPB( + qualifier=col_name3, + cells=[_CellPB(value=cell_val4, timestamp_micros=microseconds)], + ) + ], + ), + ] + ) + sample_input = _ReadModifyWriteRowResponsePB(row=response_row) + assert expected_output == _parse_rmw_row_response(sample_input) + + +def test__parse_family_pb(): + from google.cloud._helpers import _datetime_from_microseconds + from google.cloud.bigtable.row import _parse_family_pb + + col_fam1 = u"col-fam-id" + col_name1 = b"col-name1" + col_name2 = b"col-name2" + cell_val1 = b"cell-val" + cell_val2 = b"cell-val-newer" + cell_val3 = b"altcol-cell-val" + + microseconds = 5554441037 + timestamp = _datetime_from_microseconds(microseconds) + expected_dict = { + col_name1: [(cell_val1, timestamp), (cell_val2, timestamp)], + col_name2: [(cell_val3, timestamp)], + } + expected_output = (col_fam1, expected_dict) + sample_input = _FamilyPB( + name=col_fam1, + columns=[ + _ColumnPB( + qualifier=col_name1, + cells=[ + _CellPB(value=cell_val1, timestamp_micros=microseconds), + _CellPB(value=cell_val2, timestamp_micros=microseconds), + ], + ), + _ColumnPB( + qualifier=col_name2, + cells=[_CellPB(value=cell_val3, timestamp_micros=microseconds)], + ), + ], + ) + assert expected_output == _parse_family_pb(sample_input) def _CheckAndMutateRowResponsePB(*args, **kw): diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index b146abaa82fb..06fd2f016f36 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -13,1222 +13,1298 @@ # limitations under the License. -import unittest +import os + import mock +import pytest -from google.api_core.exceptions import DeadlineExceeded from ._testing import _make_credentials -from google.cloud.bigtable.row_set import RowRange -from google.cloud.bigtable_v2.types import data as data_v2_pb2 - - -class TestCell(unittest.TestCase): - timestamp_micros = 18738724000 # Make sure millis granularity - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import Cell - - return Cell - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def _from_pb_test_helper(self, labels=None): - import datetime - from google.cloud._helpers import _EPOCH - from google.cloud.bigtable_v2.types import data as data_v2_pb2 - - timestamp_micros = TestCell.timestamp_micros - timestamp = _EPOCH + datetime.timedelta(microseconds=timestamp_micros) - value = b"value-bytes" - - if labels is None: - cell_pb = data_v2_pb2.Cell(value=value, timestamp_micros=timestamp_micros) - cell_expected = self._make_one(value, timestamp_micros) - else: - cell_pb = data_v2_pb2.Cell( - value=value, timestamp_micros=timestamp_micros, labels=labels - ) - cell_expected = self._make_one(value, timestamp_micros, labels=labels) - - klass = self._get_target_class() - result = klass.from_pb(cell_pb) - self.assertEqual(result, cell_expected) - self.assertEqual(result.timestamp, timestamp) - - def test_from_pb(self): - self._from_pb_test_helper() - - def test_from_pb_with_labels(self): - labels = [u"label1", u"label2"] - self._from_pb_test_helper(labels) - - def test_constructor(self): - value = object() - cell = self._make_one(value, TestCell.timestamp_micros) - self.assertEqual(cell.value, value) - - def test___eq__(self): - value = object() - cell1 = self._make_one(value, TestCell.timestamp_micros) - cell2 = self._make_one(value, TestCell.timestamp_micros) - self.assertEqual(cell1, cell2) - - def test___eq__type_differ(self): - cell1 = self._make_one(None, None) - cell2 = object() - self.assertNotEqual(cell1, cell2) - - def test___ne__same_value(self): - value = object() - cell1 = self._make_one(value, TestCell.timestamp_micros) - cell2 = self._make_one(value, TestCell.timestamp_micros) - comparison_val = cell1 != cell2 - self.assertFalse(comparison_val) - - def test___ne__(self): - value1 = "value1" - value2 = "value2" - cell1 = self._make_one(value1, TestCell.timestamp_micros) - cell2 = self._make_one(value2, TestCell.timestamp_micros) - self.assertNotEqual(cell1, cell2) - - -class TestPartialRowData(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import PartialRowData - - return PartialRowData - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - row_key = object() - partial_row_data = self._make_one(row_key) - self.assertIs(partial_row_data._row_key, row_key) - self.assertEqual(partial_row_data._cells, {}) - - def test___eq__(self): - row_key = object() - partial_row_data1 = self._make_one(row_key) - partial_row_data2 = self._make_one(row_key) - self.assertEqual(partial_row_data1, partial_row_data2) - - def test___eq__type_differ(self): - partial_row_data1 = self._make_one(None) - partial_row_data2 = object() - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test___ne__same_value(self): - row_key = object() - partial_row_data1 = self._make_one(row_key) - partial_row_data2 = self._make_one(row_key) - comparison_val = partial_row_data1 != partial_row_data2 - self.assertFalse(comparison_val) - - def test___ne__(self): - row_key1 = object() - partial_row_data1 = self._make_one(row_key1) - row_key2 = object() - partial_row_data2 = self._make_one(row_key2) - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test___ne__cells(self): - row_key = object() - partial_row_data1 = self._make_one(row_key) - partial_row_data1._cells = object() - partial_row_data2 = self._make_one(row_key) - self.assertNotEqual(partial_row_data1, partial_row_data2) - - def test_to_dict(self): - cell1 = object() - cell2 = object() - cell3 = object() - - family_name1 = u"name1" - family_name2 = u"name2" - qual1 = b"col1" - qual2 = b"col2" - qual3 = b"col3" - - partial_row_data = self._make_one(None) - partial_row_data._cells = { - family_name1: {qual1: cell1, qual2: cell2}, - family_name2: {qual3: cell3}, - } - - result = partial_row_data.to_dict() - expected_result = { - b"name1:col1": cell1, - b"name1:col2": cell2, - b"name2:col3": cell3, - } - self.assertEqual(result, expected_result) - - def test_cell_value(self): - family_name = u"name1" - qualifier = b"col1" - cell = _make_cell(b"value-bytes") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell]}} - - result = partial_row_data.cell_value(family_name, qualifier) - self.assertEqual(result, cell.value) - - def test_cell_value_invalid_index(self): - family_name = u"name1" - qualifier = b"col1" - cell = _make_cell(b"") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell]}} - - with self.assertRaises(IndexError): - partial_row_data.cell_value(family_name, qualifier, index=None) - - def test_cell_value_invalid_column_family_key(self): - family_name = u"name1" - qualifier = b"col1" - - partial_row_data = self._make_one(None) - - with self.assertRaises(KeyError): - partial_row_data.cell_value(family_name, qualifier) - - def test_cell_value_invalid_column_key(self): - family_name = u"name1" - qualifier = b"col1" - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {}} - - with self.assertRaises(KeyError): - partial_row_data.cell_value(family_name, qualifier) - - def test_cell_values(self): - family_name = u"name1" - qualifier = b"col1" - cell = _make_cell(b"value-bytes") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell]}} - - values = [] - for value, timestamp_micros in partial_row_data.cell_values( - family_name, qualifier - ): - values.append(value) - - self.assertEqual(values[0], cell.value) - - def test_cell_values_with_max_count(self): - family_name = u"name1" - qualifier = b"col1" - cell_1 = _make_cell(b"value-bytes-1") - cell_2 = _make_cell(b"value-bytes-2") - - partial_row_data = self._make_one(None) - partial_row_data._cells = {family_name: {qualifier: [cell_1, cell_2]}} - - values = [] - for value, timestamp_micros in partial_row_data.cell_values( - family_name, qualifier, max_count=1 - ): - values.append(value) - - self.assertEqual(1, len(values)) - self.assertEqual(values[0], cell_1.value) - def test_cells_property(self): - partial_row_data = self._make_one(None) - cells = {1: 2} - partial_row_data._cells = cells - self.assertEqual(partial_row_data.cells, cells) +TIMESTAMP_MICROS = 18738724000 # Make sure millis granularity +ROW_KEY = b"row-key" +FAMILY_NAME = u"family" +QUALIFIER = b"qualifier" +TIMESTAMP_MICROS = 100 +VALUE = b"value" +TABLE_NAME = "table_name" - def test_row_key_getter(self): - row_key = object() - partial_row_data = self._make_one(row_key) - self.assertIs(partial_row_data.row_key, row_key) +def _make_cell(*args, **kwargs): + from google.cloud.bigtable.row_data import Cell -class _Client(object): + return Cell(*args, **kwargs) - data_stub = None +def _cell_from_pb_test_helper(labels=None): + import datetime + from google.cloud._helpers import _EPOCH + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + from google.cloud.bigtable.row_data import Cell -class Test_retry_read_rows_exception(unittest.TestCase): - @staticmethod - def _call_fut(exc): - from google.cloud.bigtable.row_data import _retry_read_rows_exception + timestamp = _EPOCH + datetime.timedelta(microseconds=TIMESTAMP_MICROS) + value = b"value-bytes" - return _retry_read_rows_exception(exc) + if labels is None: + cell_pb = data_v2_pb2.Cell(value=value, timestamp_micros=TIMESTAMP_MICROS) + cell_expected = _make_cell(value, TIMESTAMP_MICROS) + else: + cell_pb = data_v2_pb2.Cell( + value=value, timestamp_micros=TIMESTAMP_MICROS, labels=labels + ) + cell_expected = _make_cell(value, TIMESTAMP_MICROS, labels=labels) - @staticmethod - def _make_grpc_call_error(exception): - from grpc import Call - from grpc import RpcError + result = Cell.from_pb(cell_pb) - class TestingException(Call, RpcError): - def __init__(self, exception): - self.exception = exception + assert result == cell_expected + assert result.timestamp == timestamp - def code(self): - return self.exception.grpc_status_code - def details(self): - return "Testing" +def test_cell_from_pb(): + _cell_from_pb_test_helper() - def trailing_metadata(self): - return None - return TestingException(exception) +def test_cell_from_pb_with_labels(): + labels = [u"label1", u"label2"] + _cell_from_pb_test_helper(labels) - def test_w_miss(self): - from google.api_core.exceptions import Conflict - exception = Conflict("testing") - self.assertFalse(self._call_fut(exception)) +def test_cell_constructor(): + value = object() + cell = _make_cell(value, TIMESTAMP_MICROS) + assert cell.value == value - def test_w_service_unavailable(self): - from google.api_core.exceptions import ServiceUnavailable - exception = ServiceUnavailable("testing") - self.assertTrue(self._call_fut(exception)) +def test_cell___eq__(): + value = object() + cell1 = _make_cell(value, TIMESTAMP_MICROS) + cell2 = _make_cell(value, TIMESTAMP_MICROS) + assert cell1 == cell2 - def test_w_deadline_exceeded(self): - from google.api_core.exceptions import DeadlineExceeded - exception = DeadlineExceeded("testing") - self.assertTrue(self._call_fut(exception)) +def test_cell___eq__type_differ(): + cell1 = _make_cell(None, None) + cell2 = object() + assert not (cell1 == cell2) - def test_w_miss_wrapped_in_grpc(self): - from google.api_core.exceptions import Conflict - wrapped = Conflict("testing") - exception = self._make_grpc_call_error(wrapped) - self.assertFalse(self._call_fut(exception)) +def test_cell___ne__same_value(): + value = object() + cell1 = _make_cell(value, TIMESTAMP_MICROS) + cell2 = _make_cell(value, TIMESTAMP_MICROS) + assert not (cell1 != cell2) - def test_w_service_unavailable_wrapped_in_grpc(self): - from google.api_core.exceptions import ServiceUnavailable - wrapped = ServiceUnavailable("testing") - exception = self._make_grpc_call_error(wrapped) - self.assertTrue(self._call_fut(exception)) +def test_cell___ne__(): + value1 = "value1" + value2 = "value2" + cell1 = _make_cell(value1, TIMESTAMP_MICROS) + cell2 = _make_cell(value2, TIMESTAMP_MICROS) + assert cell1 != cell2 - def test_w_deadline_exceeded_wrapped_in_grpc(self): - from google.api_core.exceptions import DeadlineExceeded - wrapped = DeadlineExceeded("testing") - exception = self._make_grpc_call_error(wrapped) - self.assertTrue(self._call_fut(exception)) +def _make_partial_row_data(*args, **kwargs): + from google.cloud.bigtable.row_data import PartialRowData + + return PartialRowData(*args, **kwargs) + + +def test_partial_row_data_constructor(): + row_key = object() + partial_row_data = _make_partial_row_data(row_key) + assert partial_row_data._row_key is row_key + assert partial_row_data._cells == {} + + +def test_partial_row_data___eq__(): + row_key = object() + partial_row_data1 = _make_partial_row_data(row_key) + partial_row_data2 = _make_partial_row_data(row_key) + assert partial_row_data1 == partial_row_data2 + + +def test_partial_row_data___eq__type_differ(): + partial_row_data1 = _make_partial_row_data(None) + partial_row_data2 = object() + assert not (partial_row_data1 == partial_row_data2) + + +def test_partial_row_data___ne__same_value(): + row_key = object() + partial_row_data1 = _make_partial_row_data(row_key) + partial_row_data2 = _make_partial_row_data(row_key) + assert not (partial_row_data1 != partial_row_data2) + + +def test_partial_row_data___ne__(): + row_key1 = object() + partial_row_data1 = _make_partial_row_data(row_key1) + row_key2 = object() + partial_row_data2 = _make_partial_row_data(row_key2) + assert partial_row_data1 != partial_row_data2 + + +def test_partial_row_data___ne__cells(): + row_key = object() + partial_row_data1 = _make_partial_row_data(row_key) + partial_row_data1._cells = object() + partial_row_data2 = _make_partial_row_data(row_key) + assert partial_row_data1 != partial_row_data2 + + +def test_partial_row_data_to_dict(): + cell1 = object() + cell2 = object() + cell3 = object() + + family_name1 = u"name1" + family_name2 = u"name2" + qual1 = b"col1" + qual2 = b"col2" + qual3 = b"col3" + + partial_row_data = _make_partial_row_data(None) + partial_row_data._cells = { + family_name1: {qual1: cell1, qual2: cell2}, + family_name2: {qual3: cell3}, + } + + result = partial_row_data.to_dict() + expected_result = { + b"name1:col1": cell1, + b"name1:col2": cell2, + b"name2:col3": cell3, + } + assert result == expected_result + + +def test_partial_row_data_cell_value(): + family_name = u"name1" + qualifier = b"col1" + cell = _make_cell_pb(b"value-bytes") + + partial_row_data = _make_partial_row_data(None) + partial_row_data._cells = {family_name: {qualifier: [cell]}} + + result = partial_row_data.cell_value(family_name, qualifier) + assert result == cell.value + + +def test_partial_row_data_cell_value_invalid_index(): + family_name = u"name1" + qualifier = b"col1" + cell = _make_cell_pb(b"") + + partial_row_data = _make_partial_row_data(None) + partial_row_data._cells = {family_name: {qualifier: [cell]}} + + with pytest.raises(IndexError): + partial_row_data.cell_value(family_name, qualifier, index=None) + + +def test_partial_row_data_cell_value_invalid_column_family_key(): + family_name = u"name1" + qualifier = b"col1" + + partial_row_data = _make_partial_row_data(None) + + with pytest.raises(KeyError): + partial_row_data.cell_value(family_name, qualifier) + + +def test_partial_row_data_cell_value_invalid_column_key(): + family_name = u"name1" + qualifier = b"col1" + + partial_row_data = _make_partial_row_data(None) + partial_row_data._cells = {family_name: {}} + + with pytest.raises(KeyError): + partial_row_data.cell_value(family_name, qualifier) + + +def test_partial_row_data_cell_values(): + family_name = u"name1" + qualifier = b"col1" + cell = _make_cell_pb(b"value-bytes") + + partial_row_data = _make_partial_row_data(None) + partial_row_data._cells = {family_name: {qualifier: [cell]}} + + values = [] + for value, timestamp_micros in partial_row_data.cell_values(family_name, qualifier): + values.append(value) + + assert values[0] == cell.value + + +def test_partial_row_data_cell_values_with_max_count(): + family_name = u"name1" + qualifier = b"col1" + cell_1 = _make_cell_pb(b"value-bytes-1") + cell_2 = _make_cell_pb(b"value-bytes-2") + + partial_row_data = _make_partial_row_data(None) + partial_row_data._cells = {family_name: {qualifier: [cell_1, cell_2]}} + + values = [] + for value, timestamp_micros in partial_row_data.cell_values( + family_name, qualifier, max_count=1 + ): + values.append(value) + + assert 1 == len(values) + assert values[0] == cell_1.value + + +def test_partial_row_data_cells_property(): + partial_row_data = _make_partial_row_data(None) + cells = {1: 2} + partial_row_data._cells = cells + assert partial_row_data.cells == cells + + +def test_partial_row_data_row_key_getter(): + row_key = object() + partial_row_data = _make_partial_row_data(row_key) + assert partial_row_data.row_key is row_key + + +def _make_grpc_call_error(exception): + from grpc import Call + from grpc import RpcError + + class TestingException(Call, RpcError): + def __init__(self, exception): + self.exception = exception + + def code(self): + return self.exception.grpc_status_code + + def details(self): + return "Testing" + + def trailing_metadata(self): + return None + + return TestingException(exception) + + +def test__retry_read_rows_exception_miss(): + from google.api_core.exceptions import Conflict + from google.cloud.bigtable.row_data import _retry_read_rows_exception + + exception = Conflict("testing") + assert not _retry_read_rows_exception(exception) + + +def test__retry_read_rows_exception_service_unavailable(): + from google.api_core.exceptions import ServiceUnavailable + from google.cloud.bigtable.row_data import _retry_read_rows_exception + + exception = ServiceUnavailable("testing") + assert _retry_read_rows_exception(exception) + + +def test__retry_read_rows_exception_deadline_exceeded(): + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.row_data import _retry_read_rows_exception + + exception = DeadlineExceeded("testing") + assert _retry_read_rows_exception(exception) + + +def test__retry_read_rows_exception_miss_wrapped_in_grpc(): + from google.api_core.exceptions import Conflict + from google.cloud.bigtable.row_data import _retry_read_rows_exception + + wrapped = Conflict("testing") + exception = _make_grpc_call_error(wrapped) + assert not _retry_read_rows_exception(exception) + + +def test__retry_read_rows_exception_service_unavailable_wrapped_in_grpc(): + from google.api_core.exceptions import ServiceUnavailable + from google.cloud.bigtable.row_data import _retry_read_rows_exception + + wrapped = ServiceUnavailable("testing") + exception = _make_grpc_call_error(wrapped) + assert _retry_read_rows_exception(exception) + + +def test__retry_read_rows_exception_deadline_exceeded_wrapped_in_grpc(): + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.row_data import _retry_read_rows_exception + + wrapped = DeadlineExceeded("testing") + exception = _make_grpc_call_error(wrapped) + assert _retry_read_rows_exception(exception) + + +def _make_partial_rows_data(*args, **kwargs): + from google.cloud.bigtable.row_data import PartialRowsData + + return PartialRowsData(*args, **kwargs) + + +def _partial_rows_data_consume_all(yrd): + return [row.row_key for row in yrd] + + +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client + + return Client(*args, **kwargs) + + +def test_partial_rows_data_constructor(): + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS + + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data = _make_partial_rows_data(client._data_stub.ReadRows, request) + assert partial_rows_data.request is request + assert partial_rows_data.rows == {} + assert partial_rows_data.retry == DEFAULT_RETRY_READ_ROWS + + +def test_partial_rows_data_constructor_with_retry(): + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS + + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + retry = DEFAULT_RETRY_READ_ROWS + partial_rows_data = _make_partial_rows_data( + client._data_stub.ReadRows, request, retry + ) + partial_rows_data.read_method.assert_called_once_with( + request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1 + ) + assert partial_rows_data.request is request + assert partial_rows_data.rows == {} + assert partial_rows_data.retry == retry + + +def test_partial_rows_data___eq__(): + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request) + partial_rows_data2 = _make_partial_rows_data(client._data_stub.ReadRows, request) + assert partial_rows_data1.rows == partial_rows_data2.rows + + +def test_partial_rows_data___eq__type_differ(): + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request) + partial_rows_data2 = object() + assert not (partial_rows_data1 == partial_rows_data2) + +def test_partial_rows_data___ne__same_value(): + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request) + partial_rows_data2 = _make_partial_rows_data(client._data_stub.ReadRows, request) + assert partial_rows_data1 != partial_rows_data2 -class TestPartialRowsData(unittest.TestCase): - ROW_KEY = b"row-key" - FAMILY_NAME = u"family" - QUALIFIER = b"qualifier" + +def test_partial_rows_data___ne__(): + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data1 = _make_partial_rows_data(client._data_stub.ReadRows, request) + partial_rows_data2 = _make_partial_rows_data(client._data_stub.ReadRows, request) + assert partial_rows_data1 != partial_rows_data2 + + +def test_partial_rows_data_rows_getter(): + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + partial_rows_data = _make_partial_rows_data(client._data_stub.ReadRows, request) + partial_rows_data.rows = value = object() + assert partial_rows_data.rows is value + + +def test_partial_rows_data_state_start(): + client = _Client() + iterator = _MockCancellableIterator() + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + yrd = _make_partial_rows_data(client._data_stub.ReadRows, request) + assert yrd.state == yrd.NEW_ROW + + +def test_partial_rows_data_state_new_row_w_row(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + chunk = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + chunks = [chunk] + + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + + data_api = mock.create_autospec(BigtableClient) + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + client._table_data_client = data_api + request = object() + + yrd = _make_partial_rows_data(client._table_data_client.read_rows, request) + assert yrd.retry._deadline == 60.0 + + yrd.response_iterator = iterator + rows = [row for row in yrd] + + result = rows[0] + assert result.row_key == ROW_KEY + assert yrd._counter == 1 + assert yrd.state == yrd.NEW_ROW + + +def test_partial_rows_data_multiple_chunks(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + chunk1 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=False, + ) + chunk2 = _ReadRowsResponseCellChunkPB( + qualifier=QUALIFIER + b"1", + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + chunks = [chunk1, chunk2] + + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + data_api = mock.create_autospec(BigtableClient) + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + client._table_data_client = data_api + request = object() + + yrd = _make_partial_rows_data(data_api.read_rows, request) + + yrd.response_iterator = iterator + rows = [row for row in yrd] + result = rows[0] + assert result.row_key == ROW_KEY + assert yrd._counter == 1 + assert yrd.state == yrd.NEW_ROW + + +def test_partial_rows_data_cancel(): + client = _Client() + response_iterator = _MockCancellableIterator() + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [response_iterator] + request = object() + yield_rows_data = _make_partial_rows_data(client._data_stub.ReadRows, request) + assert response_iterator.cancel_calls == 0 + yield_rows_data.cancel() + assert response_iterator.cancel_calls == 1 + assert list(yield_rows_data) == [] + + +def test_partial_rows_data_cancel_between_chunks(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + chunk1 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + chunk2 = _ReadRowsResponseCellChunkPB( + qualifier=QUALIFIER + b"1", + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + chunks = [chunk1, chunk2] + response = _ReadRowsResponseV2(chunks) + response_iterator = _MockCancellableIterator(response) + + client = _Client() + data_api = mock.create_autospec(BigtableClient) + client._table_data_client = data_api + request = object() + yrd = _make_partial_rows_data(data_api.read_rows, request) + yrd.response_iterator = response_iterator + + rows = [] + for row in yrd: + yrd.cancel() + rows.append(row) + + assert response_iterator.cancel_calls == 1 + assert list(yrd) == [] + + +# 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' + + +def test_partial_rows_data__copy_from_previous_unset(): + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = _make_partial_rows_data(client._data_stub.read_rows, request) + cell = _PartialCellData() + yrd._copy_from_previous(cell) + assert cell.row_key == b"" + assert cell.family_name == u"" + assert cell.qualifier is None + assert cell.timestamp_micros == 0 + assert cell.labels == [] + + +def test_partial_rows_data__copy_from_previous_blank(): + ROW_KEY = "RK" + FAMILY_NAME = u"A" + QUALIFIER = b"C" + TIMESTAMP_MICROS = 100 + LABELS = ["L1", "L2"] + client = _Client() + client._data_stub = mock.MagicMock() + request = object() + yrd = _make_partial_rows_data(client._data_stub.ReadRows, request) + cell = _PartialCellData( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + yrd._previous_cell = _PartialCellData() + yrd._copy_from_previous(cell) + assert cell.row_key == ROW_KEY + assert cell.family_name == FAMILY_NAME + assert cell.qualifier == QUALIFIER + assert cell.timestamp_micros == TIMESTAMP_MICROS + assert cell.labels == LABELS + + +def test_partial_rows_data__copy_from_previous_filled(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + ROW_KEY = "RK" + FAMILY_NAME = u"A" + QUALIFIER = b"C" TIMESTAMP_MICROS = 100 - VALUE = b"value" + LABELS = ["L1", "L2"] + client = _Client() + data_api = mock.create_autospec(BigtableClient) + client._data_stub = data_api + request = object() + yrd = _make_partial_rows_data(client._data_stub.read_rows, request) + yrd._previous_cell = _PartialCellData( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + labels=LABELS, + ) + cell = _PartialCellData() + yrd._copy_from_previous(cell) + assert cell.row_key == ROW_KEY + assert cell.family_name == FAMILY_NAME + assert cell.qualifier == QUALIFIER + assert cell.timestamp_micros == 0 + assert cell.labels == [] + + +def test_partial_rows_data_valid_last_scanned_row_key_on_start(): + client = _Client() + response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER") + iterator = _MockCancellableIterator(response) + client._data_stub = mock.MagicMock() + client._data_stub.read_rows.side_effect = [iterator] + request = object() + yrd = _make_partial_rows_data(client._data_stub.read_rows, request) + yrd.last_scanned_row_key = "1.BEFORE" + _partial_rows_data_consume_all(yrd) + assert yrd.last_scanned_row_key == "2.AFTER" + + +def test_partial_rows_data_invalid_empty_chunk(): + from google.cloud.bigtable.row_data import InvalidChunk + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + client = _Client() + chunks = _generate_cell_chunks([""]) + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + client._data_stub = mock.create_autospec(BigtableClient) + client._data_stub.read_rows.side_effect = [iterator] + request = object() + yrd = _make_partial_rows_data(client._data_stub.read_rows, request) + with pytest.raises(InvalidChunk): + _partial_rows_data_consume_all(yrd) + + +def test_partial_rows_data_state_cell_in_progress(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + LABELS = ["L1", "L2"] + + request = object() + client = _Client() + client._data_stub = mock.create_autospec(BigtableClient) + yrd = _make_partial_rows_data(client._data_stub.read_rows, request) + + chunk = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + labels=LABELS, + ) + yrd._update_cell(chunk) + + more_cell_data = _ReadRowsResponseCellChunkPB(value=VALUE) + yrd._update_cell(more_cell_data) + + assert yrd._cell.row_key == ROW_KEY + assert yrd._cell.family_name == FAMILY_NAME + assert yrd._cell.qualifier == QUALIFIER + assert yrd._cell.timestamp_micros == TIMESTAMP_MICROS + assert yrd._cell.labels == LABELS + assert yrd._cell.value == VALUE + VALUE + + +def test_partial_rows_data_yield_rows_data(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + client = _Client() + + chunk = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + chunks = [chunk] + + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + data_api = mock.create_autospec(BigtableClient) + client._data_stub = data_api + client._data_stub.read_rows.side_effect = [iterator] + + request = object() - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import PartialRowsData + yrd = _make_partial_rows_data(client._data_stub.read_rows, request) - return PartialRowsData + result = _partial_rows_data_consume_all(yrd)[0] - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client + assert result == ROW_KEY - return Client - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +def test_partial_rows_data_yield_retry_rows_data(): + from google.api_core import retry - def test_constructor(self): - from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS + client = _Client() - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, request) - self.assertIs(partial_rows_data.request, request) - self.assertEqual(partial_rows_data.rows, {}) - self.assertEqual(partial_rows_data.retry, DEFAULT_RETRY_READ_ROWS) + retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) - def test_constructor_with_retry(self): - from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS + chunk = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + chunks = [chunk] - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - retry = DEFAULT_RETRY_READ_ROWS - partial_rows_data = self._make_one(client._data_stub.ReadRows, request, retry) - partial_rows_data.read_method.assert_called_once_with( - request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1 - ) - self.assertIs(partial_rows_data.request, request) - self.assertEqual(partial_rows_data.rows, {}) - self.assertEqual(partial_rows_data.retry, retry) - - def test___eq__(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) - self.assertEqual(partial_rows_data1.rows, partial_rows_data2.rows) - - def test___eq__type_differ(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = object() - self.assertNotEqual(partial_rows_data1, partial_rows_data2) - - def test___ne__same_value(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) - comparison_val = partial_rows_data1 != partial_rows_data2 - self.assertTrue(comparison_val) - - def test___ne__(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data1 = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data2 = self._make_one(client._data_stub.ReadRows, request) - self.assertNotEqual(partial_rows_data1, partial_rows_data2) - - def test_rows_getter(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - partial_rows_data = self._make_one(client._data_stub.ReadRows, request) - partial_rows_data.rows = value = object() - self.assertIs(partial_rows_data.rows, value) - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_state_start(self): - client = _Client() - iterator = _MockCancellableIterator() - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - self.assertEqual(yrd.state, yrd.NEW_ROW) - - def test_state_new_row_w_row(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] + response = _ReadRowsResponseV2(chunks) + failure_iterator = _MockFailureIterator_1() + iterator = _MockCancellableIterator(response) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [failure_iterator, iterator] - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) + request = object() - data_api = mock.create_autospec(BigtableClient) + yrd = _make_partial_rows_data(client._data_stub.ReadRows, request, retry_read_rows) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - request = object() - - yrd = self._make_one(client._table_data_client.read_rows, request) - self.assertEqual(yrd.retry._deadline, 60.0) - - yrd.response_iterator = iterator - rows = [row for row in yrd] - - result = rows[0] - self.assertEqual(result.row_key, self.ROW_KEY) - self.assertEqual(yrd._counter, 1) - self.assertEqual(yrd.state, yrd.NEW_ROW) - - def test_multiple_chunks(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - chunk1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=False, - ) - chunk2 = _ReadRowsResponseCellChunkPB( - qualifier=self.QUALIFIER + b"1", - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk1, chunk2] - - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - data_api = mock.create_autospec(BigtableClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - request = object() - - yrd = self._make_one(data_api.read_rows, request) - - yrd.response_iterator = iterator - rows = [row for row in yrd] - result = rows[0] - self.assertEqual(result.row_key, self.ROW_KEY) - self.assertEqual(yrd._counter, 1) - self.assertEqual(yrd.state, yrd.NEW_ROW) - - def test_cancel(self): - client = _Client() - response_iterator = _MockCancellableIterator() - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [response_iterator] - request = object() - yield_rows_data = self._make_one(client._data_stub.ReadRows, request) - self.assertEqual(response_iterator.cancel_calls, 0) - yield_rows_data.cancel() - self.assertEqual(response_iterator.cancel_calls, 1) - self.assertEqual(list(yield_rows_data), []) - - def test_cancel_between_chunks(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - chunk1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunk2 = _ReadRowsResponseCellChunkPB( - qualifier=self.QUALIFIER + b"1", - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk1, chunk2] - response = _ReadRowsResponseV2(chunks) - response_iterator = _MockCancellableIterator(response) - - client = _Client() - data_api = mock.create_autospec(BigtableClient) - client._table_data_client = data_api - request = object() - yrd = self._make_one(data_api.read_rows, request) - yrd.response_iterator = response_iterator - - rows = [] - for row in yrd: - yrd.cancel() - rows.append(row) - - self.assertEqual(response_iterator.cancel_calls, 1) - self.assertEqual(list(yrd), []) - - # 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' - - def test__copy_from_previous_unset(self): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.read_rows, request) - cell = _PartialCellData() - yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, b"") - self.assertEqual(cell.family_name, u"") - self.assertIsNone(cell.qualifier) - self.assertEqual(cell.timestamp_micros, 0) - self.assertEqual(cell.labels, []) - - def test__copy_from_previous_blank(self): - ROW_KEY = "RK" - FAMILY_NAME = u"A" - QUALIFIER = b"C" - TIMESTAMP_MICROS = 100 - LABELS = ["L1", "L2"] - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = self._make_one(client._data_stub.ReadRows, request) - cell = _PartialCellData( - row_key=ROW_KEY, - family_name=FAMILY_NAME, - qualifier=QUALIFIER, - timestamp_micros=TIMESTAMP_MICROS, - labels=LABELS, - ) - yrd._previous_cell = _PartialCellData() - yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, ROW_KEY) - self.assertEqual(cell.family_name, FAMILY_NAME) - self.assertEqual(cell.qualifier, QUALIFIER) - self.assertEqual(cell.timestamp_micros, TIMESTAMP_MICROS) - self.assertEqual(cell.labels, LABELS) - - def test__copy_from_previous_filled(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - ROW_KEY = "RK" - FAMILY_NAME = u"A" - QUALIFIER = b"C" - TIMESTAMP_MICROS = 100 - LABELS = ["L1", "L2"] - client = _Client() - data_api = mock.create_autospec(BigtableClient) - client._data_stub = data_api - request = object() - yrd = self._make_one(client._data_stub.read_rows, request) - yrd._previous_cell = _PartialCellData( - row_key=ROW_KEY, - family_name=FAMILY_NAME, - qualifier=QUALIFIER, - timestamp_micros=TIMESTAMP_MICROS, - labels=LABELS, - ) - cell = _PartialCellData() - yrd._copy_from_previous(cell) - self.assertEqual(cell.row_key, ROW_KEY) - self.assertEqual(cell.family_name, FAMILY_NAME) - self.assertEqual(cell.qualifier, QUALIFIER) - self.assertEqual(cell.timestamp_micros, 0) - self.assertEqual(cell.labels, []) - - def test_valid_last_scanned_row_key_on_start(self): - client = _Client() - response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER") - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.read_rows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.read_rows, request) - yrd.last_scanned_row_key = "1.BEFORE" - self._consume_all(yrd) - self.assertEqual(yrd.last_scanned_row_key, "2.AFTER") - - def test_invalid_empty_chunk(self): - from google.cloud.bigtable.row_data import InvalidChunk - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - client = _Client() - chunks = _generate_cell_chunks([""]) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.create_autospec(BigtableClient) - client._data_stub.read_rows.side_effect = [iterator] - request = object() - yrd = self._make_one(client._data_stub.read_rows, request) - with self.assertRaises(InvalidChunk): - self._consume_all(yrd) - - def test_state_cell_in_progress(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - LABELS = ["L1", "L2"] - - request = object() - client = _Client() - client._data_stub = mock.create_autospec(BigtableClient) - yrd = self._make_one(client._data_stub.read_rows, request) - - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - labels=LABELS, - ) - yrd._update_cell(chunk) - - more_cell_data = _ReadRowsResponseCellChunkPB(value=self.VALUE) - yrd._update_cell(more_cell_data) - - self.assertEqual(yrd._cell.row_key, self.ROW_KEY) - self.assertEqual(yrd._cell.family_name, self.FAMILY_NAME) - self.assertEqual(yrd._cell.qualifier, self.QUALIFIER) - self.assertEqual(yrd._cell.timestamp_micros, self.TIMESTAMP_MICROS) - self.assertEqual(yrd._cell.labels, LABELS) - self.assertEqual(yrd._cell.value, self.VALUE + self.VALUE) - - def test_yield_rows_data(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - client = _Client() - - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] + result = _partial_rows_data_consume_all(yrd)[0] - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - data_api = mock.create_autospec(BigtableClient) - client._data_stub = data_api - client._data_stub.read_rows.side_effect = [iterator] + assert result == ROW_KEY - request = object() - yrd = self._make_one(client._data_stub.read_rows, request) +def _make_read_rows_request_manager(*args, **kwargs): + from google.cloud.bigtable.row_data import _ReadRowsRequestManager - result = self._consume_all(yrd)[0] + return _ReadRowsRequestManager(*args, **kwargs) - self.assertEqual(result, self.ROW_KEY) - def test_yield_retry_rows_data(self): - from google.api_core import retry +@pytest.fixture(scope="session") +def rrrm_data(): + from google.cloud.bigtable import row_set - client = _Client() + row_range1 = row_set.RowRange(b"row_key21", b"row_key29") + row_range2 = row_set.RowRange(b"row_key31", b"row_key39") + row_range3 = row_set.RowRange(b"row_key41", b"row_key49") - retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + request.rows.row_ranges.append(row_range2.get_range_kwargs()) + request.rows.row_ranges.append(row_range3.get_range_kwargs()) - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] + yield { + "row_range1": row_range1, + "row_range2": row_range2, + "row_range3": row_range3, + "request": request, + } - response = _ReadRowsResponseV2(chunks) - failure_iterator = _MockFailureIterator_1() - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [failure_iterator, iterator] - request = object() +def test_RRRM_constructor(): + request = mock.Mock() + last_scanned_key = "last_key" + rows_read_so_far = 10 - yrd = self._make_one(client._data_stub.ReadRows, request, retry_read_rows) + request_manager = _make_read_rows_request_manager( + request, last_scanned_key, rows_read_so_far + ) + assert request == request_manager.message + assert last_scanned_key == request_manager.last_scanned_key + assert rows_read_so_far == request_manager.rows_read_so_far - result = self._consume_all(yrd)[0] - self.assertEqual(result, self.ROW_KEY) +def test_RRRM__filter_row_key(): + table_name = "table_name" + request = _ReadRowsRequestPB(table_name=table_name) + request.rows.row_keys.extend([b"row_key1", b"row_key2", b"row_key3", b"row_key4"]) - def _consume_all(self, yrd): - return [row.row_key for row in yrd] + last_scanned_key = b"row_key2" + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) + row_keys = request_manager._filter_rows_keys() + expected_row_keys = [b"row_key3", b"row_key4"] + assert expected_row_keys == row_keys -class Test_ReadRowsRequestManager(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.table_name = "table_name" - cls.row_range1 = RowRange(b"row_key21", b"row_key29") - cls.row_range2 = RowRange(b"row_key31", b"row_key39") - cls.row_range3 = RowRange(b"row_key41", b"row_key49") - cls.request = _ReadRowsRequestPB(table_name=cls.table_name) - cls.request.rows.row_ranges.append(cls.row_range1.get_range_kwargs()) - cls.request.rows.row_ranges.append(cls.row_range2.get_range_kwargs()) - cls.request.rows.row_ranges.append(cls.row_range3.get_range_kwargs()) +def test_RRRM__filter_row_ranges_all_ranges_added_back(rrrm_data): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import _ReadRowsRequestManager + request = rrrm_data["request"] + last_scanned_key = b"row_key14" + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) + row_ranges = request_manager._filter_row_ranges() - return _ReadRowsRequestManager + exp_row_range1 = data_v2_pb2.RowRange( + start_key_closed=b"row_key21", end_key_open=b"row_key29" + ) + exp_row_range2 = data_v2_pb2.RowRange( + start_key_closed=b"row_key31", end_key_open=b"row_key39" + ) + exp_row_range3 = data_v2_pb2.RowRange( + start_key_closed=b"row_key41", end_key_open=b"row_key49" + ) + exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) + assert exp_row_ranges == row_ranges - def test_constructor(self): - request = mock.Mock() - last_scanned_key = "last_key" - rows_read_so_far = 10 - request_manager = self._make_one(request, last_scanned_key, rows_read_so_far) - self.assertEqual(request, request_manager.message) - self.assertEqual(last_scanned_key, request_manager.last_scanned_key) - self.assertEqual(rows_read_so_far, request_manager.rows_read_so_far) +def test_RRRM__filter_row_ranges_all_ranges_already_read(rrrm_data): + request = rrrm_data["request"] + last_scanned_key = b"row_key54" + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) + row_ranges = request_manager._filter_row_ranges() - def test__filter_row_key(self): - table_name = "table_name" - request = _ReadRowsRequestPB(table_name=table_name) - request.rows.row_keys.extend( - [b"row_key1", b"row_key2", b"row_key3", b"row_key4"] - ) + assert row_ranges == [] - last_scanned_key = b"row_key2" - request_manager = self._make_one(request, last_scanned_key, 2) - row_keys = request_manager._filter_rows_keys() - expected_row_keys = [b"row_key3", b"row_key4"] - self.assertEqual(expected_row_keys, row_keys) +def test_RRRM__filter_row_ranges_all_ranges_already_read_open_closed(): + from google.cloud.bigtable import row_set - def test__filter_row_ranges_all_ranges_added_back(self): - last_scanned_key = b"row_key14" - request_manager = self._make_one(self.request, last_scanned_key, 2) - row_ranges = request_manager._filter_row_ranges() + last_scanned_key = b"row_key54" - exp_row_range1 = data_v2_pb2.RowRange( - start_key_closed=b"row_key21", end_key_open=b"row_key29" - ) - exp_row_range2 = data_v2_pb2.RowRange( - start_key_closed=b"row_key31", end_key_open=b"row_key39" - ) - exp_row_range3 = data_v2_pb2.RowRange( - start_key_closed=b"row_key41", end_key_open=b"row_key49" - ) - exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] + row_range1 = row_set.RowRange(b"row_key21", b"row_key29", False, True) + row_range2 = row_set.RowRange(b"row_key31", b"row_key39") + row_range3 = row_set.RowRange(b"row_key41", b"row_key49", False, True) - self.assertEqual(exp_row_ranges, row_ranges) + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + request.rows.row_ranges.append(row_range2.get_range_kwargs()) + request.rows.row_ranges.append(row_range3.get_range_kwargs()) - def test__filter_row_ranges_all_ranges_already_read(self): - last_scanned_key = b"row_key54" - request_manager = self._make_one(self.request, last_scanned_key, 2) - row_ranges = request_manager._filter_row_ranges() + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) + request_manager.new_message = _ReadRowsRequestPB(table_name=TABLE_NAME) + row_ranges = request_manager._filter_row_ranges() - self.assertEqual(row_ranges, []) + assert row_ranges == [] - def test__filter_row_ranges_all_ranges_already_read_open_closed(self): - last_scanned_key = b"row_key54" - row_range1 = RowRange(b"row_key21", b"row_key29", False, True) - row_range2 = RowRange(b"row_key31", b"row_key39") - row_range3 = RowRange(b"row_key41", b"row_key49", False, True) +def test_RRRM__filter_row_ranges_some_ranges_already_read(rrrm_data): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 - request = _ReadRowsRequestPB(table_name=self.table_name) - request.rows.row_ranges.append(row_range1.get_range_kwargs()) - request.rows.row_ranges.append(row_range2.get_range_kwargs()) - request.rows.row_ranges.append(row_range3.get_range_kwargs()) + request = rrrm_data["request"] + last_scanned_key = b"row_key22" + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) + request_manager.new_message = _ReadRowsRequestPB(table_name=TABLE_NAME) + row_ranges = request_manager._filter_row_ranges() - request_manager = self._make_one(request, last_scanned_key, 2) - request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) - row_ranges = request_manager._filter_row_ranges() + exp_row_range1 = data_v2_pb2.RowRange( + start_key_open=b"row_key22", end_key_open=b"row_key29" + ) + exp_row_range2 = data_v2_pb2.RowRange( + start_key_closed=b"row_key31", end_key_open=b"row_key39" + ) + exp_row_range3 = data_v2_pb2.RowRange( + start_key_closed=b"row_key41", end_key_open=b"row_key49" + ) + exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] - self.assertEqual(row_ranges, []) + assert exp_row_ranges == row_ranges - def test__filter_row_ranges_some_ranges_already_read(self): - last_scanned_key = b"row_key22" - request_manager = self._make_one(self.request, last_scanned_key, 2) - request_manager.new_message = _ReadRowsRequestPB(table_name=self.table_name) - row_ranges = request_manager._filter_row_ranges() - exp_row_range1 = data_v2_pb2.RowRange( - start_key_open=b"row_key22", end_key_open=b"row_key29" - ) - exp_row_range2 = data_v2_pb2.RowRange( - start_key_closed=b"row_key31", end_key_open=b"row_key39" - ) - exp_row_range3 = data_v2_pb2.RowRange( - start_key_closed=b"row_key41", end_key_open=b"row_key49" - ) - exp_row_ranges = [exp_row_range1, exp_row_range2, exp_row_range3] +def test_RRRM_build_updated_request(rrrm_data): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2 import types - self.assertEqual(exp_row_ranges, row_ranges) + row_range1 = rrrm_data["row_range1"] + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key25" + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=8, table_name=TABLE_NAME + ) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) - def test_build_updated_request(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable_v2.types import RowRange + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name - ) - request.rows.row_ranges.append(self.row_range1.get_range_kwargs()) + result = request_manager.build_updated_request() - request_manager = self._make_one(request, last_scanned_key, 2) + expected_result = _ReadRowsRequestPB( + table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=6 + ) - result = request_manager.build_updated_request() + row_range1 = types.RowRange( + start_key_open=last_scanned_key, end_key_open=row_range1.end_key + ) + expected_result.rows.row_ranges.append(row_range1) - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 - ) + assert expected_result == result - row_range1 = RowRange( - start_key_open=last_scanned_key, end_key_open=self.row_range1.end_key - ) - expected_result.rows.row_ranges.append(row_range1) - self.assertEqual(expected_result, result) +def test_RRRM_build_updated_request_full_table(): + from google.cloud.bigtable_v2 import types - def test_build_updated_request_full_table(self): - from google.cloud.bigtable_v2.types import RowRange + last_scanned_key = b"row_key14" - last_scanned_key = b"row_key14" + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) - request = _ReadRowsRequestPB(table_name=self.table_name) - request_manager = self._make_one(request, last_scanned_key, 2) + result = request_manager.build_updated_request() + expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME, filter={}) + row_range1 = types.RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range1) + assert expected_result == result - result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=self.table_name, filter={}) - row_range1 = RowRange(start_key_open=last_scanned_key) - expected_result.rows.row_ranges.append(row_range1) - self.assertEqual(expected_result, result) - def test_build_updated_request_no_start_key(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable_v2.types import RowRange +def test_RRRM_build_updated_request_no_start_key(): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2 import types - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name - ) - row_range1 = RowRange(end_key_open=b"row_key29") - request.rows.row_ranges.append(row_range1) + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key25" + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=8, table_name=TABLE_NAME + ) + row_range1 = types.RowRange(end_key_open=b"row_key29") + request.rows.row_ranges.append(row_range1) - request_manager = self._make_one(request, last_scanned_key, 2) + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) - result = request_manager.build_updated_request() + result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 - ) + expected_result = _ReadRowsRequestPB( + table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=6 + ) - row_range2 = RowRange( - start_key_open=last_scanned_key, end_key_open=b"row_key29" - ) - expected_result.rows.row_ranges.append(row_range2) + row_range2 = types.RowRange( + start_key_open=last_scanned_key, end_key_open=b"row_key29" + ) + expected_result.rows.row_ranges.append(row_range2) - self.assertEqual(expected_result, result) + assert expected_result == result - def test_build_updated_request_no_end_key(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable_v2.types import RowRange - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key25" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=8, table_name=self.table_name - ) +def test_RRRM_build_updated_request_no_end_key(): + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable_v2 import types - row_range1 = RowRange(start_key_closed=b"row_key20") - request.rows.row_ranges.append(row_range1) + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key25" + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=8, table_name=TABLE_NAME + ) - request_manager = self._make_one(request, last_scanned_key, 2) + row_range1 = types.RowRange(start_key_closed=b"row_key20") + request.rows.row_ranges.append(row_range1) - result = request_manager.build_updated_request() + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=6 - ) - row_range2 = RowRange(start_key_open=last_scanned_key) - expected_result.rows.row_ranges.append(row_range2) + result = request_manager.build_updated_request() - self.assertEqual(expected_result, result) + expected_result = _ReadRowsRequestPB( + table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=6 + ) + row_range2 = types.RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range2) - def test_build_updated_request_rows(self): - from google.cloud.bigtable.row_filters import RowSampleFilter + assert expected_result == result - row_filter = RowSampleFilter(0.33) - last_scanned_key = b"row_key4" - request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=5, table_name=self.table_name - ) - request.rows.row_keys.extend( - [ - b"row_key1", - b"row_key2", - b"row_key4", - b"row_key5", - b"row_key7", - b"row_key9", - ] - ) - request_manager = self._make_one(request, last_scanned_key, 3) +def test_RRRM_build_updated_request_rows(): + from google.cloud.bigtable.row_filters import RowSampleFilter - result = request_manager.build_updated_request() + row_filter = RowSampleFilter(0.33) + last_scanned_key = b"row_key4" + request = _ReadRowsRequestPB( + filter=row_filter.to_pb(), rows_limit=5, table_name=TABLE_NAME + ) + request.rows.row_keys.extend( + [b"row_key1", b"row_key2", b"row_key4", b"row_key5", b"row_key7", b"row_key9"] + ) - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter=row_filter.to_pb(), rows_limit=2 - ) - expected_result.rows.row_keys.extend([b"row_key5", b"row_key7", b"row_key9"]) + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 3) - self.assertEqual(expected_result, result) + result = request_manager.build_updated_request() - def test_build_updated_request_rows_limit(self): - from google.cloud.bigtable_v2.types import RowRange + expected_result = _ReadRowsRequestPB( + table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=2 + ) + expected_result.rows.row_keys.extend([b"row_key5", b"row_key7", b"row_key9"]) - last_scanned_key = b"row_key14" + assert expected_result == result - request = _ReadRowsRequestPB(table_name=self.table_name, rows_limit=10) - request_manager = self._make_one(request, last_scanned_key, 2) - result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB( - table_name=self.table_name, filter={}, rows_limit=8 - ) - row_range1 = RowRange(start_key_open=last_scanned_key) - expected_result.rows.row_ranges.append(row_range1) - self.assertEqual(expected_result, result) +def test_RRRM_build_updated_request_rows_limit(): + from google.cloud.bigtable_v2 import types - def test__key_already_read(self): - last_scanned_key = b"row_key14" - request = _ReadRowsRequestPB(table_name=self.table_name) - request_manager = self._make_one(request, last_scanned_key, 2) + last_scanned_key = b"row_key14" - self.assertTrue(request_manager._key_already_read(b"row_key11")) - self.assertFalse(request_manager._key_already_read(b"row_key16")) + request = _ReadRowsRequestPB(table_name=TABLE_NAME, rows_limit=10) + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) + result = request_manager.build_updated_request() + expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME, filter={}, rows_limit=8) + row_range1 = types.RowRange(start_key_open=last_scanned_key) + expected_result.rows.row_ranges.append(row_range1) + assert expected_result == result -class TestPartialRowsData_JSON_acceptance_tests(unittest.TestCase): - _json_tests = None +def test_RRRM__key_already_read(): + last_scanned_key = b"row_key14" + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_data import PartialRowsData + assert request_manager._key_already_read(b"row_key11") + assert not request_manager._key_already_read(b"row_key16") - return PartialRowsData - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +@pytest.fixture(scope="session") +def json_tests(): + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, "read-rows-acceptance-test.json") + raw = _parse_readrows_acceptance_tests(filename) + tests = {} + for (name, chunks, results) in raw: + tests[name] = chunks, results - def _load_json_test(self, test_name): - import os + yield tests - if self.__class__._json_tests is None: - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, "read-rows-acceptance-test.json") - raw = _parse_readrows_acceptance_tests(filename) - tests = self.__class__._json_tests = {} - for (name, chunks, results) in raw: - tests[name] = chunks, results - return self.__class__._json_tests[test_name] - # JSON Error cases: invalid chunks +# JSON Error cases: invalid chunks - def _fail_during_consume(self, testcase_name): - from google.cloud.bigtable.row_data import InvalidChunk - client = _Client() - chunks, results = self._load_json_test(testcase_name) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - prd = self._make_one(client._data_stub.ReadRows, request) - with self.assertRaises(InvalidChunk): - prd.consume_all() - expected_result = self._sort_flattend_cells( - [result for result in results if not result["error"]] - ) - flattened = self._sort_flattend_cells(_flatten_cells(prd)) - self.assertEqual(flattened, expected_result) +def _fail_during_consume(json_tests, testcase_name): + from google.cloud.bigtable.row_data import InvalidChunk - def test_invalid_no_cell_key_before_commit(self): - self._fail_during_consume("invalid - no cell key before commit") + client = _Client() + chunks, results = json_tests[testcase_name] + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + prd = _make_partial_rows_data(client._data_stub.ReadRows, request) + with pytest.raises(InvalidChunk): + prd.consume_all() + expected_result = _sort_flattend_cells( + [result for result in results if not result["error"]] + ) + flattened = _sort_flattend_cells(_flatten_cells(prd)) + assert flattened == expected_result - def test_invalid_no_cell_key_before_value(self): - self._fail_during_consume("invalid - no cell key before value") - def test_invalid_new_col_family_wo_qualifier(self): - self._fail_during_consume("invalid - new col family must specify qualifier") +def test_prd_json_accept_invalid_no_cell_key_before_commit(json_tests): + _fail_during_consume(json_tests, "invalid - no cell key before commit") - def test_invalid_no_commit_between_rows(self): - self._fail_during_consume("invalid - no commit between rows") - def test_invalid_no_commit_after_first_row(self): - self._fail_during_consume("invalid - no commit after first row") +def test_prd_json_accept_invalid_no_cell_key_before_value(json_tests): + _fail_during_consume(json_tests, "invalid - no cell key before value") - def test_invalid_duplicate_row_key(self): - self._fail_during_consume("invalid - duplicate row key") - def test_invalid_new_row_missing_row_key(self): - self._fail_during_consume("invalid - new row missing row key") +def test_prd_json_accept_invalid_new_col_family_wo_qualifier(json_tests): + _fail_during_consume(json_tests, "invalid - new col family must specify qualifier") - def test_invalid_bare_reset(self): - self._fail_during_consume("invalid - bare reset") - def test_invalid_bad_reset_no_commit(self): - self._fail_during_consume("invalid - bad reset, no commit") +def test_prd_json_accept_invalid_no_commit_between_rows(json_tests): + _fail_during_consume(json_tests, "invalid - no commit between rows") - def test_invalid_missing_key_after_reset(self): - self._fail_during_consume("invalid - missing key after reset") - def test_invalid_reset_with_chunk(self): - self._fail_during_consume("invalid - reset with chunk") +def test_prd_json_accept_invalid_no_commit_after_first_row(json_tests): + _fail_during_consume(json_tests, "invalid - no commit after first row") - def test_invalid_commit_with_chunk(self): - self._fail_during_consume("invalid - commit with chunk") - # JSON Error cases: incomplete final row +def test_prd_json_accept_invalid_duplicate_row_key(json_tests): + _fail_during_consume(json_tests, "invalid - duplicate row key") - def _sort_flattend_cells(self, flattened): - import operator - key_func = operator.itemgetter("rk", "fm", "qual") - return sorted(flattened, key=key_func) +def test_prd_json_accept_invalid_new_row_missing_row_key(json_tests): + _fail_during_consume(json_tests, "invalid - new row missing row key") - def _incomplete_final_row(self, testcase_name): - client = _Client() - chunks, results = self._load_json_test(testcase_name) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - prd = self._make_one(client._data_stub.ReadRows, request) - with self.assertRaises(ValueError): - prd.consume_all() - self.assertEqual(prd.state, prd.ROW_IN_PROGRESS) - expected_result = self._sort_flattend_cells( - [result for result in results if not result["error"]] - ) - flattened = self._sort_flattend_cells(_flatten_cells(prd)) - self.assertEqual(flattened, expected_result) - def test_invalid_no_commit(self): - self._incomplete_final_row("invalid - no commit") +def test_prd_json_accept_invalid_bare_reset(json_tests): + _fail_during_consume(json_tests, "invalid - bare reset") - def test_invalid_last_row_missing_commit(self): - self._incomplete_final_row("invalid - last row missing commit") - # Non-error cases +def test_prd_json_accept_invalid_bad_reset_no_commit(json_tests): + _fail_during_consume(json_tests, "invalid - bad reset, no commit") - _marker = object() - def _match_results(self, testcase_name, expected_result=_marker): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient +def test_prd_json_accept_invalid_missing_key_after_reset(json_tests): + _fail_during_consume(json_tests, "invalid - missing key after reset") - client = _Client() - chunks, results = self._load_json_test(testcase_name) - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - data_api = mock.create_autospec(BigtableClient) - client._table_data_client = data_api - client._table_data_client.read_rows.side_effect = [iterator] - request = object() - prd = self._make_one(client._table_data_client.read_rows, request) + +def test_prd_json_accept_invalid_reset_with_chunk(json_tests): + _fail_during_consume(json_tests, "invalid - reset with chunk") + + +def test_prd_json_accept_invalid_commit_with_chunk(json_tests): + _fail_during_consume(json_tests, "invalid - commit with chunk") + + +# JSON Error cases: incomplete final row + + +def _sort_flattend_cells(flattened): + import operator + + key_func = operator.itemgetter("rk", "fm", "qual") + return sorted(flattened, key=key_func) + + +def _incomplete_final_row(json_tests, testcase_name): + client = _Client() + chunks, results = json_tests[testcase_name] + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + client._data_stub = mock.MagicMock() + client._data_stub.ReadRows.side_effect = [iterator] + request = object() + prd = _make_partial_rows_data(client._data_stub.ReadRows, request) + with pytest.raises(ValueError): prd.consume_all() - flattened = self._sort_flattend_cells(_flatten_cells(prd)) - if expected_result is self._marker: - expected_result = self._sort_flattend_cells(results) - self.assertEqual(flattened, expected_result) + assert prd.state == prd.ROW_IN_PROGRESS + expected_result = _sort_flattend_cells( + [result for result in results if not result["error"]] + ) + flattened = _sort_flattend_cells(_flatten_cells(prd)) + assert flattened == expected_result + + +def test_prd_json_accept_invalid_no_commit(json_tests): + _incomplete_final_row(json_tests, "invalid - no commit") - def test_bare_commit_implies_ts_zero(self): - self._match_results("bare commit implies ts=0") - def test_simple_row_with_timestamp(self): - self._match_results("simple row with timestamp") +def test_prd_json_accept_invalid_last_row_missing_commit(json_tests): + _incomplete_final_row(json_tests, "invalid - last row missing commit") - def test_missing_timestamp_implies_ts_zero(self): - self._match_results("missing timestamp, implied ts=0") - def test_empty_cell_value(self): - self._match_results("empty cell value") +# Non-error cases - def test_two_unsplit_cells(self): - self._match_results("two unsplit cells") +_marker = object() - def test_two_qualifiers(self): - self._match_results("two qualifiers") - def test_two_families(self): - self._match_results("two families") +def _match_results(json_tests, testcase_name, expected_result=_marker): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient - def test_with_labels(self): - self._match_results("with labels") + client = _Client() + chunks, results = json_tests[testcase_name] + response = _ReadRowsResponseV2(chunks) + iterator = _MockCancellableIterator(response) + data_api = mock.create_autospec(BigtableClient) + client._table_data_client = data_api + client._table_data_client.read_rows.side_effect = [iterator] + request = object() + prd = _make_partial_rows_data(client._table_data_client.read_rows, request) + prd.consume_all() + flattened = _sort_flattend_cells(_flatten_cells(prd)) + if expected_result is _marker: + expected_result = _sort_flattend_cells(results) + assert flattened == expected_result - def test_split_cell_bare_commit(self): - self._match_results("split cell, bare commit") - def test_split_cell(self): - self._match_results("split cell") +def test_prd_json_accept_bare_commit_implies_ts_zero(json_tests): + _match_results(json_tests, "bare commit implies ts=0") - def test_split_four_ways(self): - self._match_results("split four ways") - def test_two_split_cells(self): - self._match_results("two split cells") +def test_prd_json_accept_simple_row_with_timestamp(json_tests): + _match_results(json_tests, "simple row with timestamp") - def test_multi_qualifier_splits(self): - self._match_results("multi-qualifier splits") - def test_multi_qualifier_multi_split(self): - self._match_results("multi-qualifier multi-split") +def test_prd_json_accept_missing_timestamp_implies_ts_zero(json_tests): + _match_results(json_tests, "missing timestamp, implied ts=0") - def test_multi_family_split(self): - self._match_results("multi-family split") - def test_two_rows(self): - self._match_results("two rows") +def test_prd_json_accept_empty_cell_value(json_tests): + _match_results(json_tests, "empty cell value") - def test_two_rows_implicit_timestamp(self): - self._match_results("two rows implicit timestamp") - def test_two_rows_empty_value(self): - self._match_results("two rows empty value") +def test_prd_json_accept_two_unsplit_cells(json_tests): + _match_results(json_tests, "two unsplit cells") - def test_two_rows_one_with_multiple_cells(self): - self._match_results("two rows, one with multiple cells") - def test_two_rows_multiple_cells_multiple_families(self): - self._match_results("two rows, multiple cells, multiple families") +def test_prd_json_accept_two_qualifiers(json_tests): + _match_results(json_tests, "two qualifiers") - def test_two_rows_multiple_cells(self): - self._match_results("two rows, multiple cells") - def test_two_rows_four_cells_two_labels(self): - self._match_results("two rows, four cells, 2 labels") +def test_prd_json_accept_two_families(json_tests): + _match_results(json_tests, "two families") - def test_two_rows_with_splits_same_timestamp(self): - self._match_results("two rows with splits, same timestamp") - def test_no_data_after_reset(self): - # JSON testcase has `"results": null` - self._match_results("no data after reset", expected_result=[]) +def test_prd_json_accept_with_labels(json_tests): + _match_results(json_tests, "with labels") - def test_simple_reset(self): - self._match_results("simple reset") - def test_reset_to_new_val(self): - self._match_results("reset to new val") +def test_prd_json_accept_split_cell_bare_commit(json_tests): + _match_results(json_tests, "split cell, bare commit") - def test_reset_to_new_qual(self): - self._match_results("reset to new qual") - def test_reset_with_splits(self): - self._match_results("reset with splits") +def test_prd_json_accept_split_cell(json_tests): + _match_results(json_tests, "split cell") - def test_two_resets(self): - self._match_results("two resets") - def test_reset_to_new_row(self): - self._match_results("reset to new row") +def test_prd_json_accept_split_four_ways(json_tests): + _match_results(json_tests, "split four ways") - def test_reset_in_between_chunks(self): - self._match_results("reset in between chunks") - def test_empty_cell_chunk(self): - self._match_results("empty cell chunk") +def test_prd_json_accept_two_split_cells(json_tests): + _match_results(json_tests, "two split cells") - def test_empty_second_qualifier(self): - self._match_results("empty second qualifier") + +def test_prd_json_accept_multi_qualifier_splits(json_tests): + _match_results(json_tests, "multi-qualifier splits") + + +def test_prd_json_accept_multi_qualifier_multi_split(json_tests): + _match_results(json_tests, "multi-qualifier multi-split") + + +def test_prd_json_accept_multi_family_split(json_tests): + _match_results(json_tests, "multi-family split") + + +def test_prd_json_accept_two_rows(json_tests): + _match_results(json_tests, "two rows") + + +def test_prd_json_accept_two_rows_implicit_timestamp(json_tests): + _match_results(json_tests, "two rows implicit timestamp") + + +def test_prd_json_accept_two_rows_empty_value(json_tests): + _match_results(json_tests, "two rows empty value") + + +def test_prd_json_accept_two_rows_one_with_multiple_cells(json_tests): + _match_results(json_tests, "two rows, one with multiple cells") + + +def test_prd_json_accept_two_rows_multiple_cells_multiple_families(json_tests): + _match_results(json_tests, "two rows, multiple cells, multiple families") + + +def test_prd_json_accept_two_rows_multiple_cells(json_tests): + _match_results(json_tests, "two rows, multiple cells") + + +def test_prd_json_accept_two_rows_four_cells_two_labels(json_tests): + _match_results(json_tests, "two rows, four cells, 2 labels") + + +def test_prd_json_accept_two_rows_with_splits_same_timestamp(json_tests): + _match_results(json_tests, "two rows with splits, same timestamp") + + +def test_prd_json_accept_no_data_after_reset(json_tests): + # JSON testcase has `"results": null` + _match_results(json_tests, "no data after reset", expected_result=[]) + + +def test_prd_json_accept_simple_reset(json_tests): + _match_results(json_tests, "simple reset") + + +def test_prd_json_accept_reset_to_new_val(json_tests): + _match_results(json_tests, "reset to new val") + + +def test_prd_json_accept_reset_to_new_qual(json_tests): + _match_results(json_tests, "reset to new qual") + + +def test_prd_json_accept_reset_with_splits(json_tests): + _match_results(json_tests, "reset with splits") + + +def test_prd_json_accept_two_resets(json_tests): + _match_results(json_tests, "two resets") + + +def test_prd_json_accept_reset_to_new_row(json_tests): + _match_results(json_tests, "reset to new row") + + +def test_prd_json_accept_reset_in_between_chunks(json_tests): + _match_results(json_tests, "reset in between chunks") + + +def test_prd_json_accept_empty_cell_chunk(json_tests): + _match_results(json_tests, "empty cell chunk") + + +def test_prd_json_accept_empty_second_qualifier(json_tests): + _match_results(json_tests, "empty second qualifier") def _flatten_cells(prd): @@ -1271,6 +1347,8 @@ def next(self): class _MockFailureIterator_1(object): def next(self): + from google.api_core.exceptions import DeadlineExceeded + raise DeadlineExceeded("Failed to read from server") __next__ = next @@ -1343,10 +1421,10 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): return message -def _make_cell(value): +def _make_cell_pb(value): from google.cloud.bigtable import row_data - return row_data.Cell(value, TestCell.timestamp_micros) + return row_data.Cell(value, TIMESTAMP_MICROS) def _ReadRowsRequestPB(*args, **kw): @@ -1356,4 +1434,11 @@ def _ReadRowsRequestPB(*args, **kw): def _read_rows_retry_exception(exc): + from google.api_core.exceptions import DeadlineExceeded + return isinstance(exc, DeadlineExceeded) + + +class _Client(object): + + data_stub = None diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index c42345ee0686..8c591e03cf33 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -13,1047 +13,1107 @@ # limitations under the License. -import unittest +import pytest -class Test_BoolFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _BoolFilter +def test_bool_filter_constructor(): + from google.cloud.bigtable.row_filters import _BoolFilter - return _BoolFilter + flag = object() + row_filter = _BoolFilter(flag) + assert row_filter.flag is flag - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - def test_constructor(self): - flag = object() - row_filter = self._make_one(flag) - self.assertIs(row_filter.flag, flag) +def test_bool_filter___eq__type_differ(): + from google.cloud.bigtable.row_filters import _BoolFilter - def test___eq__type_differ(self): - flag = object() - row_filter1 = self._make_one(flag) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) + flag = object() + row_filter1 = _BoolFilter(flag) + row_filter2 = object() + assert not (row_filter1 == row_filter2) - def test___eq__same_value(self): - flag = object() - row_filter1 = self._make_one(flag) - row_filter2 = self._make_one(flag) - self.assertEqual(row_filter1, row_filter2) - def test___ne__same_value(self): - flag = object() - row_filter1 = self._make_one(flag) - row_filter2 = self._make_one(flag) - comparison_val = row_filter1 != row_filter2 - self.assertFalse(comparison_val) +def test_bool_filter___eq__same_value(): + from google.cloud.bigtable.row_filters import _BoolFilter + flag = object() + row_filter1 = _BoolFilter(flag) + row_filter2 = _BoolFilter(flag) + assert row_filter1 == row_filter2 -class TestSinkFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import SinkFilter - return SinkFilter +def test_bool_filter___ne__same_value(): + from google.cloud.bigtable.row_filters import _BoolFilter - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) + flag = object() + row_filter1 = _BoolFilter(flag) + row_filter2 = _BoolFilter(flag) + assert not (row_filter1 != row_filter2) - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(sink=flag) - self.assertEqual(pb_val, expected_pb) +def test_sink_filter_to_pb(): + from google.cloud.bigtable.row_filters import SinkFilter -class TestPassAllFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import PassAllFilter + flag = True + row_filter = SinkFilter(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(sink=flag) + assert pb_val == expected_pb - return PassAllFilter - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +def test_pass_all_filter_to_pb(): + from google.cloud.bigtable.row_filters import PassAllFilter - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(pass_all_filter=flag) - self.assertEqual(pb_val, expected_pb) + flag = True + row_filter = PassAllFilter(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(pass_all_filter=flag) + assert pb_val == expected_pb -class TestBlockAllFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import BlockAllFilter +def test_block_all_filter_to_pb(): + from google.cloud.bigtable.row_filters import BlockAllFilter - return BlockAllFilter + flag = True + row_filter = BlockAllFilter(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(block_all_filter=flag) + assert pb_val == expected_pb - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(block_all_filter=flag) - self.assertEqual(pb_val, expected_pb) +def test_regex_filterconstructor(): + from google.cloud.bigtable.row_filters import _RegexFilter + regex = b"abc" + row_filter = _RegexFilter(regex) + assert row_filter.regex is regex -class Test_RegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _RegexFilter - return _RegexFilter +def test_regex_filterconstructor_non_bytes(): + from google.cloud.bigtable.row_filters import _RegexFilter - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) + regex = u"abc" + row_filter = _RegexFilter(regex) + assert row_filter.regex == b"abc" - def test_constructor(self): - regex = b"abc" - row_filter = self._make_one(regex) - self.assertIs(row_filter.regex, regex) - def test_constructor_non_bytes(self): - regex = u"abc" - row_filter = self._make_one(regex) - self.assertEqual(row_filter.regex, b"abc") +def test_regex_filter__eq__type_differ(): + from google.cloud.bigtable.row_filters import _RegexFilter - def test___eq__type_differ(self): - regex = b"def-rgx" - row_filter1 = self._make_one(regex) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) + regex = b"def-rgx" + row_filter1 = _RegexFilter(regex) + row_filter2 = object() + assert not (row_filter1 == row_filter2) - def test___eq__same_value(self): - regex = b"trex-regex" - row_filter1 = self._make_one(regex) - row_filter2 = self._make_one(regex) - self.assertEqual(row_filter1, row_filter2) - def test___ne__same_value(self): - regex = b"abc" - row_filter1 = self._make_one(regex) - row_filter2 = self._make_one(regex) - comparison_val = row_filter1 != row_filter2 - self.assertFalse(comparison_val) +def test_regex_filter__eq__same_value(): + from google.cloud.bigtable.row_filters import _RegexFilter + regex = b"trex-regex" + row_filter1 = _RegexFilter(regex) + row_filter2 = _RegexFilter(regex) + assert row_filter1 == row_filter2 -class TestRowKeyRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowKeyRegexFilter - return RowKeyRegexFilter +def test_regex_filter__ne__same_value(): + from google.cloud.bigtable.row_filters import _RegexFilter - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) + regex = b"abc" + row_filter1 = _RegexFilter(regex) + row_filter2 = _RegexFilter(regex) + assert not (row_filter1 != row_filter2) - def test_to_pb(self): - regex = b"row-key-regex" - row_filter = self._make_one(regex) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(row_key_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestRowSampleFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowSampleFilter - return RowSampleFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - sample = object() - row_filter = self._make_one(sample) - self.assertIs(row_filter.sample, sample) - - def test___eq__type_differ(self): - sample = object() - row_filter1 = self._make_one(sample) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - sample = object() - row_filter1 = self._make_one(sample) - row_filter2 = self._make_one(sample) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - sample = 0.25 - row_filter = self._make_one(sample) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(row_sample_filter=sample) - self.assertEqual(pb_val, expected_pb) - - -class TestFamilyNameRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import FamilyNameRegexFilter - - return FamilyNameRegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - regex = u"family-regex" - row_filter = self._make_one(regex) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(family_name_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestColumnQualifierRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter - - return ColumnQualifierRegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - regex = b"column-regex" - row_filter = self._make_one(regex) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestTimestampRange(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import TimestampRange - - return TimestampRange - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - start = object() - end = object() - time_range = self._make_one(start=start, end=end) - self.assertIs(time_range.start, start) - self.assertIs(time_range.end, end) - - def test___eq__(self): - start = object() - end = object() - time_range1 = self._make_one(start=start, end=end) - time_range2 = self._make_one(start=start, end=end) - self.assertEqual(time_range1, time_range2) - - def test___eq__type_differ(self): - start = object() - end = object() - time_range1 = self._make_one(start=start, end=end) - time_range2 = object() - self.assertNotEqual(time_range1, time_range2) - - def test___ne__same_value(self): - start = object() - end = object() - time_range1 = self._make_one(start=start, end=end) - time_range2 = self._make_one(start=start, end=end) - comparison_val = time_range1 != time_range2 - self.assertFalse(comparison_val) - - def _to_pb_helper(self, pb_kwargs, start=None, end=None): - import datetime - from google.cloud._helpers import _EPOCH - - if start is not None: - start = _EPOCH + datetime.timedelta(microseconds=start) - if end is not None: - end = _EPOCH + datetime.timedelta(microseconds=end) - time_range = self._make_one(start=start, end=end) - expected_pb = _TimestampRangePB(**pb_kwargs) - time_pb = time_range.to_pb() - self.assertEqual( - time_pb.start_timestamp_micros, expected_pb.start_timestamp_micros - ) - self.assertEqual(time_pb.end_timestamp_micros, expected_pb.end_timestamp_micros) - self.assertEqual(time_pb, expected_pb) - - def test_to_pb(self): - start_micros = 30871234 - end_micros = 12939371234 - start_millis = start_micros // 1000 * 1000 - self.assertEqual(start_millis, 30871000) - end_millis = end_micros // 1000 * 1000 + 1000 - self.assertEqual(end_millis, 12939372000) - pb_kwargs = {} - pb_kwargs["start_timestamp_micros"] = start_millis - pb_kwargs["end_timestamp_micros"] = end_millis - self._to_pb_helper(pb_kwargs, start=start_micros, end=end_micros) - - def test_to_pb_start_only(self): - # Makes sure already milliseconds granularity - start_micros = 30871000 - start_millis = start_micros // 1000 * 1000 - self.assertEqual(start_millis, 30871000) - pb_kwargs = {} - pb_kwargs["start_timestamp_micros"] = start_millis - self._to_pb_helper(pb_kwargs, start=start_micros, end=None) - - def test_to_pb_end_only(self): - # Makes sure already milliseconds granularity - end_micros = 12939371000 - end_millis = end_micros // 1000 * 1000 - self.assertEqual(end_millis, 12939371000) - pb_kwargs = {} - pb_kwargs["end_timestamp_micros"] = end_millis - self._to_pb_helper(pb_kwargs, start=None, end=end_micros) - - -class TestTimestampRangeFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import TimestampRangeFilter - - return TimestampRangeFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - range_ = object() - row_filter = self._make_one(range_) - self.assertIs(row_filter.range_, range_) - - def test___eq__type_differ(self): - range_ = object() - row_filter1 = self._make_one(range_) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - range_ = object() - row_filter1 = self._make_one(range_) - row_filter2 = self._make_one(range_) - self.assertEqual(row_filter1, row_filter2) - - def test_to_pb(self): - from google.cloud.bigtable.row_filters import TimestampRange - - range_ = TimestampRange() - row_filter = self._make_one(range_) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB()) - self.assertEqual(pb_val, expected_pb) - - -class TestColumnRangeFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ColumnRangeFilter - - return ColumnRangeFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor_defaults(self): - column_family_id = object() - row_filter = self._make_one(column_family_id) - self.assertIs(row_filter.column_family_id, column_family_id) - self.assertIsNone(row_filter.start_column) - self.assertIsNone(row_filter.end_column) - self.assertTrue(row_filter.inclusive_start) - self.assertTrue(row_filter.inclusive_end) - - def test_constructor_explicit(self): - column_family_id = object() - start_column = object() - end_column = object() - inclusive_start = object() - inclusive_end = object() - row_filter = self._make_one( - column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertIs(row_filter.column_family_id, column_family_id) - self.assertIs(row_filter.start_column, start_column) - self.assertIs(row_filter.end_column, end_column) - self.assertIs(row_filter.inclusive_start, inclusive_start) - self.assertIs(row_filter.inclusive_end, inclusive_end) - - def test_constructor_bad_start(self): - column_family_id = object() - self.assertRaises( - ValueError, self._make_one, column_family_id, inclusive_start=True - ) +def test_row_key_regex_filter_to_pb(): + from google.cloud.bigtable.row_filters import RowKeyRegexFilter - def test_constructor_bad_end(self): - column_family_id = object() - self.assertRaises( - ValueError, self._make_one, column_family_id, inclusive_end=True - ) + regex = b"row-key-regex" + row_filter = RowKeyRegexFilter(regex) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(row_key_regex_filter=regex) + assert pb_val == expected_pb - def test___eq__(self): - column_family_id = object() - start_column = object() - end_column = object() - inclusive_start = object() - inclusive_end = object() - row_filter1 = self._make_one( - column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - row_filter2 = self._make_one( - column_family_id, - start_column=start_column, - end_column=end_column, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - column_family_id = object() - row_filter1 = self._make_one(column_family_id) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test_to_pb(self): - column_family_id = u"column-family-id" - row_filter = self._make_one(column_family_id) - col_range_pb = _ColumnRangePB(family_name=column_family_id) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_start(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one(column_family_id, start_column=column) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, start_qualifier_closed=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_start(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one( - column_family_id, start_column=column, inclusive_start=False - ) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, start_qualifier_open=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_end(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one(column_family_id, end_column=column) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, end_qualifier_closed=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_end(self): - column_family_id = u"column-family-id" - column = b"column" - row_filter = self._make_one( - column_family_id, end_column=column, inclusive_end=False - ) - col_range_pb = _ColumnRangePB( - family_name=column_family_id, end_qualifier_open=column - ) - expected_pb = _RowFilterPB(column_range_filter=col_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - -class TestValueRegexFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ValueRegexFilter - - return ValueRegexFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb_w_bytes(self): - value = regex = b"value-regex" - row_filter = self._make_one(value) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(value_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - def test_to_pb_w_str(self): - value = u"value-regex" - regex = value.encode("ascii") - row_filter = self._make_one(value) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(value_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestExactValueFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ExactValueFilter - - return ExactValueFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb_w_bytes(self): - value = regex = b"value-regex" - row_filter = self._make_one(value) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(value_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - def test_to_pb_w_str(self): - value = u"value-regex" - regex = value.encode("ascii") - row_filter = self._make_one(value) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(value_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - def test_to_pb_w_int(self): - import struct - - value = 1 - regex = struct.Struct(">q").pack(value) - row_filter = self._make_one(value) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(value_regex_filter=regex) - self.assertEqual(pb_val, expected_pb) - - -class TestValueRangeFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ValueRangeFilter - - return ValueRangeFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor_defaults(self): - row_filter = self._make_one() - - self.assertIsNone(row_filter.start_value) - self.assertIsNone(row_filter.end_value) - self.assertTrue(row_filter.inclusive_start) - self.assertTrue(row_filter.inclusive_end) - - def test_constructor_explicit(self): - start_value = object() - end_value = object() - inclusive_start = object() - inclusive_end = object() - - row_filter = self._make_one( - start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertIs(row_filter.start_value, start_value) - self.assertIs(row_filter.end_value, end_value) - self.assertIs(row_filter.inclusive_start, inclusive_start) - self.assertIs(row_filter.inclusive_end, inclusive_end) - - def test_constructor_w_int_values(self): - import struct - - start_value = 1 - end_value = 10 - - row_filter = self._make_one(start_value=start_value, end_value=end_value) - - expected_start_value = struct.Struct(">q").pack(start_value) - expected_end_value = struct.Struct(">q").pack(end_value) - - self.assertEqual(row_filter.start_value, expected_start_value) - self.assertEqual(row_filter.end_value, expected_end_value) - self.assertTrue(row_filter.inclusive_start) - self.assertTrue(row_filter.inclusive_end) - - def test_constructor_bad_start(self): - with self.assertRaises(ValueError): - self._make_one(inclusive_start=True) - - def test_constructor_bad_end(self): - with self.assertRaises(ValueError): - self._make_one(inclusive_end=True) - - def test___eq__(self): - start_value = object() - end_value = object() - inclusive_start = object() - inclusive_end = object() - row_filter1 = self._make_one( - start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - row_filter2 = self._make_one( - start_value=start_value, - end_value=end_value, - inclusive_start=inclusive_start, - inclusive_end=inclusive_end, - ) - self.assertEqual(row_filter1, row_filter2) - - def test___eq__type_differ(self): - row_filter1 = self._make_one() - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test_to_pb(self): - row_filter = self._make_one() - expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_start(self): - value = b"some-value" - row_filter = self._make_one(start_value=value) - val_range_pb = _ValueRangePB(start_value_closed=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_start(self): - value = b"some-value" - row_filter = self._make_one(start_value=value, inclusive_start=False) - val_range_pb = _ValueRangePB(start_value_open=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_inclusive_end(self): - value = b"some-value" - row_filter = self._make_one(end_value=value) - val_range_pb = _ValueRangePB(end_value_closed=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - def test_to_pb_exclusive_end(self): - value = b"some-value" - row_filter = self._make_one(end_value=value, inclusive_end=False) - val_range_pb = _ValueRangePB(end_value_open=value) - expected_pb = _RowFilterPB(value_range_filter=val_range_pb) - self.assertEqual(row_filter.to_pb(), expected_pb) - - -class Test_CellCountFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _CellCountFilter - - return _CellCountFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - num_cells = object() - row_filter = self._make_one(num_cells) - self.assertIs(row_filter.num_cells, num_cells) - - def test___eq__type_differ(self): - num_cells = object() - row_filter1 = self._make_one(num_cells) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) - - def test___eq__same_value(self): - num_cells = object() - row_filter1 = self._make_one(num_cells) - row_filter2 = self._make_one(num_cells) - self.assertEqual(row_filter1, row_filter2) - - def test___ne__same_value(self): - num_cells = object() - row_filter1 = self._make_one(num_cells) - row_filter2 = self._make_one(num_cells) - comparison_val = row_filter1 != row_filter2 - self.assertFalse(comparison_val) - - -class TestCellsRowOffsetFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter - - return CellsRowOffsetFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - num_cells = 76 - row_filter = self._make_one(num_cells) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestCellsRowLimitFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - - return CellsRowLimitFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_to_pb(self): - num_cells = 189 - row_filter = self._make_one(num_cells) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells) - self.assertEqual(pb_val, expected_pb) - - -class TestCellsColumnLimitFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import CellsColumnLimitFilter - - return CellsColumnLimitFilter - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +def test_row_sample_filter_constructor(): + from google.cloud.bigtable.row_filters import RowSampleFilter - def test_to_pb(self): - num_cells = 10 - row_filter = self._make_one(num_cells) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells) - self.assertEqual(pb_val, expected_pb) + sample = object() + row_filter = RowSampleFilter(sample) + assert row_filter.sample is sample -class TestStripValueTransformerFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import StripValueTransformerFilter +def test_row_sample_filter___eq__type_differ(): + from google.cloud.bigtable.row_filters import RowSampleFilter - return StripValueTransformerFilter + sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = object() + assert not (row_filter1 == row_filter2) - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - def test_to_pb(self): - flag = True - row_filter = self._make_one(flag) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(strip_value_transformer=flag) - self.assertEqual(pb_val, expected_pb) +def test_row_sample_filter___eq__same_value(): + from google.cloud.bigtable.row_filters import RowSampleFilter + sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = RowSampleFilter(sample) + assert row_filter1 == row_filter2 -class TestApplyLabelFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ApplyLabelFilter - return ApplyLabelFilter +def test_row_sample_filter___ne__(): + from google.cloud.bigtable.row_filters import RowSampleFilter - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) + sample = object() + other_sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = RowSampleFilter(other_sample) + assert row_filter1 != row_filter2 - def test_constructor(self): - label = object() - row_filter = self._make_one(label) - self.assertIs(row_filter.label, label) - def test___eq__type_differ(self): - label = object() - row_filter1 = self._make_one(label) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) +def test_row_sample_filter_to_pb(): + from google.cloud.bigtable.row_filters import RowSampleFilter - def test___eq__same_value(self): - label = object() - row_filter1 = self._make_one(label) - row_filter2 = self._make_one(label) - self.assertEqual(row_filter1, row_filter2) + sample = 0.25 + row_filter = RowSampleFilter(sample) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(row_sample_filter=sample) + assert pb_val == expected_pb - def test_to_pb(self): - label = u"label" - row_filter = self._make_one(label) - pb_val = row_filter.to_pb() - expected_pb = _RowFilterPB(apply_label_transformer=label) - self.assertEqual(pb_val, expected_pb) +def test_family_name_regex_filter_to_pb(): + from google.cloud.bigtable.row_filters import FamilyNameRegexFilter -class Test_FilterCombination(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import _FilterCombination + regex = u"family-regex" + row_filter = FamilyNameRegexFilter(regex) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(family_name_regex_filter=regex) + assert pb_val == expected_pb - return _FilterCombination - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +def test_column_qualifier_regext_filter_to_pb(): + from google.cloud.bigtable.row_filters import ColumnQualifierRegexFilter - def test_constructor_defaults(self): - row_filter = self._make_one() - self.assertEqual(row_filter.filters, []) + regex = b"column-regex" + row_filter = ColumnQualifierRegexFilter(regex) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex) + assert pb_val == expected_pb - def test_constructor_explicit(self): - filters = object() - row_filter = self._make_one(filters=filters) - self.assertIs(row_filter.filters, filters) - def test___eq__(self): - filters = object() - row_filter1 = self._make_one(filters=filters) - row_filter2 = self._make_one(filters=filters) - self.assertEqual(row_filter1, row_filter2) +def test_timestamp_range_constructor(): + from google.cloud.bigtable.row_filters import TimestampRange - def test___eq__type_differ(self): - filters = object() - row_filter1 = self._make_one(filters=filters) - row_filter2 = object() - self.assertNotEqual(row_filter1, row_filter2) + start = object() + end = object() + time_range = TimestampRange(start=start, end=end) + assert time_range.start is start + assert time_range.end is end -class TestRowFilterChain(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowFilterChain +def test_timestamp_range___eq__(): + from google.cloud.bigtable.row_filters import TimestampRange - return RowFilterChain + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = TimestampRange(start=start, end=end) + assert time_range1 == time_range2 - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - def test_to_pb(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter +def test_timestamp_range___eq__type_differ(): + from google.cloud.bigtable.row_filters import TimestampRange - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = object() + assert not (time_range1 == time_range2) - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - filter_pb = row_filter3.to_pb() +def test_timestamp_range___ne__same_value(): + from google.cloud.bigtable.row_filters import TimestampRange - expected_pb = _RowFilterPB( - chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb]) - ) - self.assertEqual(filter_pb, expected_pb) + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = TimestampRange(start=start, end=end) + assert not (time_range1 != time_range2) - def test_to_pb_nested(self): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - row_filter1 = StripValueTransformerFilter(True) - row_filter2 = RowSampleFilter(0.25) +def _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=None): + import datetime + from google.cloud._helpers import _EPOCH + from google.cloud.bigtable.row_filters import TimestampRange - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - row_filter3_pb = row_filter3.to_pb() + if start is not None: + start = _EPOCH + datetime.timedelta(microseconds=start) + if end is not None: + end = _EPOCH + datetime.timedelta(microseconds=end) + time_range = TimestampRange(start=start, end=end) + expected_pb = _TimestampRangePB(**pb_kwargs) + time_pb = time_range.to_pb() + assert time_pb.start_timestamp_micros == expected_pb.start_timestamp_micros + assert time_pb.end_timestamp_micros == expected_pb.end_timestamp_micros + assert time_pb == expected_pb - row_filter4 = CellsRowLimitFilter(11) - row_filter4_pb = row_filter4.to_pb() - row_filter5 = self._make_one(filters=[row_filter3, row_filter4]) - filter_pb = row_filter5.to_pb() +def test_timestamp_range_to_pb(): + start_micros = 30871234 + end_micros = 12939371234 + start_millis = start_micros // 1000 * 1000 + assert start_millis == 30871000 + end_millis = end_micros // 1000 * 1000 + 1000 + assert end_millis == 12939372000 + pb_kwargs = {} + pb_kwargs["start_timestamp_micros"] = start_millis + pb_kwargs["end_timestamp_micros"] = end_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=end_micros) - expected_pb = _RowFilterPB( - chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb]) - ) - self.assertEqual(filter_pb, expected_pb) +def test_timestamp_range_to_pb_start_only(): + # Makes sure already milliseconds granularity + start_micros = 30871000 + start_millis = start_micros // 1000 * 1000 + assert start_millis == 30871000 + pb_kwargs = {} + pb_kwargs["start_timestamp_micros"] = start_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=None) -class TestRowFilterUnion(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import RowFilterUnion - return RowFilterUnion +def test_timestamp_range_to_pb_end_only(): + # Makes sure already milliseconds granularity + end_micros = 12939371000 + end_millis = end_micros // 1000 * 1000 + assert end_millis == 12939371000 + pb_kwargs = {} + pb_kwargs["end_timestamp_micros"] = end_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=end_micros) - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - def test_to_pb(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter +def test_timestamp_range_filter_constructor(): + from google.cloud.bigtable.row_filters import TimestampRangeFilter - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() + range_ = object() + row_filter = TimestampRangeFilter(range_) + assert row_filter.range_ is range_ - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - filter_pb = row_filter3.to_pb() +def test_timestamp_range_filter___eq__type_differ(): + from google.cloud.bigtable.row_filters import TimestampRangeFilter + + range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_timestamp_range_filter___eq__same_value(): + from google.cloud.bigtable.row_filters import TimestampRangeFilter + + range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = TimestampRangeFilter(range_) + assert row_filter1 == row_filter2 + + +def test_timestamp_range_filter___ne__(): + from google.cloud.bigtable.row_filters import TimestampRangeFilter + + range_ = object() + other_range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = TimestampRangeFilter(other_range_) + assert row_filter1 != row_filter2 + + +def test_timestamp_range_filter_to_pb(): + from google.cloud.bigtable.row_filters import TimestampRangeFilter + from google.cloud.bigtable.row_filters import TimestampRange + + range_ = TimestampRange() + row_filter = TimestampRangeFilter(range_) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB()) + assert pb_val == expected_pb + + +def test_column_range_filter_constructor_defaults(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = object() + row_filter = ColumnRangeFilter(column_family_id) + assert row_filter.column_family_id is column_family_id + assert row_filter.start_column is None + assert row_filter.end_column is None + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_column_range_filter_constructor_explicit(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = object() + start_column = object() + end_column = object() + inclusive_start = object() + inclusive_end = object() + row_filter = ColumnRangeFilter( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter.column_family_id is column_family_id + assert row_filter.start_column is start_column + assert row_filter.end_column is end_column + assert row_filter.inclusive_start is inclusive_start + assert row_filter.inclusive_end is inclusive_end + + +def test_column_range_filter_constructor_bad_start(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = object() + with pytest.raises(ValueError): + ColumnRangeFilter(column_family_id, inclusive_start=True) + + +def test_column_range_filter_constructor_bad_end(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = object() + with pytest.raises(ValueError): + ColumnRangeFilter(column_family_id, inclusive_end=True) + + +def test_column_range_filter___eq__(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = object() + start_column = object() + end_column = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ColumnRangeFilter( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ColumnRangeFilter( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 == row_filter2 + + +def test_column_range_filter___eq__type_differ(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = object() + row_filter1 = ColumnRangeFilter(column_family_id) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_column_range_filter___ne__(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = object() + other_column_family_id = object() + start_column = object() + end_column = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ColumnRangeFilter( + column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ColumnRangeFilter( + other_column_family_id, + start_column=start_column, + end_column=end_column, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 != row_filter2 + + +def test_column_range_filter_to_pb(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = u"column-family-id" + row_filter = ColumnRangeFilter(column_family_id) + col_range_pb = _ColumnRangePB(family_name=column_family_id) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_column_range_filter_to_pb_inclusive_start(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = u"column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(column_family_id, start_column=column) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, start_qualifier_closed=column + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_column_range_filter_to_pb_exclusive_start(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = u"column-family-id" + column = b"column" + row_filter = ColumnRangeFilter( + column_family_id, start_column=column, inclusive_start=False + ) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, start_qualifier_open=column + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_column_range_filter_to_pb_inclusive_end(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = u"column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(column_family_id, end_column=column) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, end_qualifier_closed=column + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_column_range_filter_to_pb_exclusive_end(): + from google.cloud.bigtable.row_filters import ColumnRangeFilter + + column_family_id = u"column-family-id" + column = b"column" + row_filter = ColumnRangeFilter( + column_family_id, end_column=column, inclusive_end=False + ) + col_range_pb = _ColumnRangePB( + family_name=column_family_id, end_qualifier_open=column + ) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_value_regex_filter_to_pb_w_bytes(): + from google.cloud.bigtable.row_filters import ValueRegexFilter + + value = regex = b"value-regex" + row_filter = ValueRegexFilter(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb - expected_pb = _RowFilterPB( - interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb]) - ) - self.assertEqual(filter_pb, expected_pb) - def test_to_pb_nested(self): - from google.cloud.bigtable.row_filters import CellsRowLimitFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter +def test_value_regex_filter_to_pb_w_str(): + from google.cloud.bigtable.row_filters import ValueRegexFilter - row_filter1 = StripValueTransformerFilter(True) - row_filter2 = RowSampleFilter(0.25) + value = u"value-regex" + regex = value.encode("ascii") + row_filter = ValueRegexFilter(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb - row_filter3 = self._make_one(filters=[row_filter1, row_filter2]) - row_filter3_pb = row_filter3.to_pb() - row_filter4 = CellsRowLimitFilter(11) - row_filter4_pb = row_filter4.to_pb() +def test_exact_value_filter_to_pb_w_bytes(): + from google.cloud.bigtable.row_filters import ExactValueFilter - row_filter5 = self._make_one(filters=[row_filter3, row_filter4]) - filter_pb = row_filter5.to_pb() + value = regex = b"value-regex" + row_filter = ExactValueFilter(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb - expected_pb = _RowFilterPB( - interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb]) - ) - self.assertEqual(filter_pb, expected_pb) +def test_exact_value_filter_to_pb_w_str(): + from google.cloud.bigtable.row_filters import ExactValueFilter -class TestConditionalRowFilter(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_filters import ConditionalRowFilter + value = u"value-regex" + regex = value.encode("ascii") + row_filter = ExactValueFilter(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb - return ConditionalRowFilter - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) +def test_exact_value_filter_to_pb_w_int(): + import struct + from google.cloud.bigtable.row_filters import ExactValueFilter - def test_constructor(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - self.assertIs(cond_filter.base_filter, base_filter) - self.assertIs(cond_filter.true_filter, true_filter) - self.assertIs(cond_filter.false_filter, false_filter) - - def test___eq__(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter1 = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - cond_filter2 = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - self.assertEqual(cond_filter1, cond_filter2) - - def test___eq__type_differ(self): - base_filter = object() - true_filter = object() - false_filter = object() - cond_filter1 = self._make_one( - base_filter, true_filter=true_filter, false_filter=false_filter - ) - cond_filter2 = object() - self.assertNotEqual(cond_filter1, cond_filter2) + value = 1 + regex = struct.Struct(">q").pack(value) + row_filter = ExactValueFilter(value) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb - def test_to_pb(self): - from google.cloud.bigtable.row_filters import CellsRowOffsetFilter - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() +def test_value_range_filter_constructor_defaults(): + from google.cloud.bigtable.row_filters import ValueRangeFilter - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() + row_filter = ValueRangeFilter() - row_filter3 = CellsRowOffsetFilter(11) - row_filter3_pb = row_filter3.to_pb() + assert row_filter.start_value is None + assert row_filter.end_value is None + assert row_filter.inclusive_start + assert row_filter.inclusive_end - row_filter4 = self._make_one( - row_filter1, true_filter=row_filter2, false_filter=row_filter3 - ) - filter_pb = row_filter4.to_pb() - - expected_pb = _RowFilterPB( - condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, - true_filter=row_filter2_pb, - false_filter=row_filter3_pb, - ) + +def test_value_range_filter_constructor_explicit(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + + row_filter = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + + assert row_filter.start_value is start_value + assert row_filter.end_value is end_value + assert row_filter.inclusive_start is inclusive_start + assert row_filter.inclusive_end is inclusive_end + + +def test_value_range_filter_constructor_w_int_values(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + import struct + + start_value = 1 + end_value = 10 + + row_filter = ValueRangeFilter(start_value=start_value, end_value=end_value) + + expected_start_value = struct.Struct(">q").pack(start_value) + expected_end_value = struct.Struct(">q").pack(end_value) + + assert row_filter.start_value == expected_start_value + assert row_filter.end_value == expected_end_value + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_value_range_filter_constructor_bad_start(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + with pytest.raises(ValueError): + ValueRangeFilter(inclusive_start=True) + + +def test_value_range_filter_constructor_bad_end(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + with pytest.raises(ValueError): + ValueRangeFilter(inclusive_end=True) + + +def test_value_range_filter___eq__(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 == row_filter2 + + +def test_value_range_filter___eq__type_differ(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + row_filter1 = ValueRangeFilter() + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_value_range_filter___ne__(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + start_value = object() + other_start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ValueRangeFilter( + start_value=other_start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 != row_filter2 + + +def test_value_range_filter_to_pb(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + row_filter = ValueRangeFilter() + expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) + assert row_filter.to_pb() == expected_pb + + +def test_value_range_filter_to_pb_inclusive_start(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(start_value=value) + val_range_pb = _ValueRangePB(start_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_value_range_filter_to_pb_exclusive_start(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(start_value=value, inclusive_start=False) + val_range_pb = _ValueRangePB(start_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_value_range_filter_to_pb_inclusive_end(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(end_value=value) + val_range_pb = _ValueRangePB(end_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_value_range_filter_to_pb_exclusive_end(): + from google.cloud.bigtable.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(end_value=value, inclusive_end=False) + val_range_pb = _ValueRangePB(end_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter.to_pb() == expected_pb + + +def test_cell_count_constructor(): + from google.cloud.bigtable.row_filters import _CellCountFilter + + num_cells = object() + row_filter = _CellCountFilter(num_cells) + assert row_filter.num_cells is num_cells + + +def test_cell_count___eq__type_differ(): + from google.cloud.bigtable.row_filters import _CellCountFilter + + num_cells = object() + row_filter1 = _CellCountFilter(num_cells) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_cell_count___eq__same_value(): + from google.cloud.bigtable.row_filters import _CellCountFilter + + num_cells = object() + row_filter1 = _CellCountFilter(num_cells) + row_filter2 = _CellCountFilter(num_cells) + assert row_filter1 == row_filter2 + + +def test_cell_count___ne__same_value(): + from google.cloud.bigtable.row_filters import _CellCountFilter + + num_cells = object() + row_filter1 = _CellCountFilter(num_cells) + row_filter2 = _CellCountFilter(num_cells) + assert not (row_filter1 != row_filter2) + + +def test_cells_row_offset_filter_to_pb(): + from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + + num_cells = 76 + row_filter = CellsRowOffsetFilter(num_cells) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_row_limit_filter_to_pb(): + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + + num_cells = 189 + row_filter = CellsRowLimitFilter(num_cells) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_column_limit_filter_to_pb(): + from google.cloud.bigtable.row_filters import CellsColumnLimitFilter + + num_cells = 10 + row_filter = CellsColumnLimitFilter(num_cells) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells) + assert pb_val == expected_pb + + +def test_strip_value_transformer_filter_to_pb(): + from google.cloud.bigtable.row_filters import StripValueTransformerFilter + + flag = True + row_filter = StripValueTransformerFilter(flag) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(strip_value_transformer=flag) + assert pb_val == expected_pb + + +def test_apply_label_filter_constructor(): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + + label = object() + row_filter = ApplyLabelFilter(label) + assert row_filter.label is label + + +def test_apply_label_filter___eq__type_differ(): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + + label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_apply_label_filter___eq__same_value(): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + + label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = ApplyLabelFilter(label) + assert row_filter1 == row_filter2 + + +def test_apply_label_filter___ne__(): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + + label = object() + other_label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = ApplyLabelFilter(other_label) + assert row_filter1 != row_filter2 + + +def test_apply_label_filter_to_pb(): + from google.cloud.bigtable.row_filters import ApplyLabelFilter + + label = u"label" + row_filter = ApplyLabelFilter(label) + pb_val = row_filter.to_pb() + expected_pb = _RowFilterPB(apply_label_transformer=label) + assert pb_val == expected_pb + + +def test_filter_combination_constructor_defaults(): + from google.cloud.bigtable.row_filters import _FilterCombination + + row_filter = _FilterCombination() + assert row_filter.filters == [] + + +def test_filter_combination_constructor_explicit(): + from google.cloud.bigtable.row_filters import _FilterCombination + + filters = object() + row_filter = _FilterCombination(filters=filters) + assert row_filter.filters is filters + + +def test_filter_combination___eq__(): + from google.cloud.bigtable.row_filters import _FilterCombination + + filters = object() + row_filter1 = _FilterCombination(filters=filters) + row_filter2 = _FilterCombination(filters=filters) + assert row_filter1 == row_filter2 + + +def test_filter_combination___eq__type_differ(): + from google.cloud.bigtable.row_filters import _FilterCombination + + filters = object() + row_filter1 = _FilterCombination(filters=filters) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_filter_combination___ne__(): + from google.cloud.bigtable.row_filters import _FilterCombination + + filters = object() + other_filters = object() + row_filter1 = _FilterCombination(filters=filters) + row_filter2 = _FilterCombination(filters=other_filters) + assert row_filter1 != row_filter2 + + +def test_row_filter_chain_to_pb(): + from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3.to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_chain_to_pb_nested(): + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.row_filters import RowFilterChain + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3.to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4.to_pb() + + row_filter5 = RowFilterChain(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5.to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_union_to_pb(): + from google.cloud.bigtable.row_filters import RowFilterUnion + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3.to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_union_to_pb_nested(): + from google.cloud.bigtable.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.row_filters import RowFilterUnion + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3.to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4.to_pb() + + row_filter5 = RowFilterUnion(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5.to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb]) + ) + assert filter_pb == expected_pb + + +def test_conditional_row_filter_constructor(): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + + base_filter = object() + true_filter = object() + false_filter = object() + cond_filter = ConditionalRowFilter( + base_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter.base_filter is base_filter + assert cond_filter.true_filter is true_filter + assert cond_filter.false_filter is false_filter + + +def test_conditional_row_filter___eq__(): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + + base_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + base_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = ConditionalRowFilter( + base_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter1 == cond_filter2 + + +def test_conditional_row_filter___eq__type_differ(): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + + base_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + base_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = object() + assert not (cond_filter1 == cond_filter2) + + +def test_conditional_row_filter___ne__(): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + + base_filter = object() + other_base_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + base_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = ConditionalRowFilter( + other_base_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter1 != cond_filter2 + + +def test_conditional_row_filter_to_pb(): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() + + row_filter3 = CellsRowOffsetFilter(11) + row_filter3_pb = row_filter3.to_pb() + + row_filter4 = ConditionalRowFilter( + row_filter1, true_filter=row_filter2, false_filter=row_filter3 + ) + filter_pb = row_filter4.to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, + true_filter=row_filter2_pb, + false_filter=row_filter3_pb, ) - self.assertEqual(filter_pb, expected_pb) + ) + assert filter_pb == expected_pb - def test_to_pb_true_only(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() +def test_conditional_row_filter_to_pb_true_only(): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() - row_filter3 = self._make_one(row_filter1, true_filter=row_filter2) - filter_pb = row_filter3.to_pb() + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() - expected_pb = _RowFilterPB( - condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, true_filter=row_filter2_pb - ) + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + filter_pb = row_filter3.to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, true_filter=row_filter2_pb ) - self.assertEqual(filter_pb, expected_pb) + ) + assert filter_pb == expected_pb + - def test_to_pb_false_only(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - from google.cloud.bigtable.row_filters import StripValueTransformerFilter +def test_conditional_row_filter_to_pb_false_only(): + from google.cloud.bigtable.row_filters import ConditionalRowFilter + from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_filters import StripValueTransformerFilter - row_filter1 = StripValueTransformerFilter(True) - row_filter1_pb = row_filter1.to_pb() + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1.to_pb() - row_filter2 = RowSampleFilter(0.25) - row_filter2_pb = row_filter2.to_pb() + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2.to_pb() - row_filter3 = self._make_one(row_filter1, false_filter=row_filter2) - filter_pb = row_filter3.to_pb() + row_filter3 = ConditionalRowFilter(row_filter1, false_filter=row_filter2) + filter_pb = row_filter3.to_pb() - expected_pb = _RowFilterPB( - condition=_RowFilterConditionPB( - predicate_filter=row_filter1_pb, false_filter=row_filter2_pb - ) + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, false_filter=row_filter2_pb ) - self.assertEqual(filter_pb, expected_pb) + ) + assert filter_pb == expected_pb def _ColumnRangePB(*args, **kw): diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_set.py b/packages/google-cloud-bigtable/tests/unit/test_row_set.py index c1fa4ca87bb1..1a33be7202e4 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_set.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_set.py @@ -13,260 +13,308 @@ # limitations under the License. -import unittest -from google.cloud.bigtable.row_set import RowRange -from google.cloud._helpers import _to_bytes +def test_row_set_constructor(): + from google.cloud.bigtable.row_set import RowSet + row_set = RowSet() + assert [] == row_set.row_keys + assert [] == row_set.row_ranges -class TestRowSet(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_set import RowSet - return RowSet +def test_row_set__eq__(): + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet - def _make_one(self): - return self._get_target_class()() + row_key1 = b"row_key1" + row_key2 = b"row_key1" + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key4", b"row_key9") - def test_constructor(self): - row_set = self._make_one() - self.assertEqual([], row_set.row_keys) - self.assertEqual([], row_set.row_ranges) + row_set1 = RowSet() + row_set2 = RowSet() - def test__eq__(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key4", b"row_key9") + row_set1.add_row_key(row_key1) + row_set2.add_row_key(row_key2) + row_set1.add_row_range(row_range1) + row_set2.add_row_range(row_range2) - row_set1 = self._make_one() - row_set2 = self._make_one() + assert row_set1 == row_set2 - row_set1.add_row_key(row_key1) - row_set2.add_row_key(row_key2) - row_set1.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - self.assertEqual(row_set1, row_set2) +def test_row_set__eq__type_differ(): + from google.cloud.bigtable.row_set import RowSet - def test__eq__type_differ(self): - row_set1 = self._make_one() - row_set2 = object() - self.assertNotEqual(row_set1, row_set2) + row_set1 = RowSet() + row_set2 = object() + assert not (row_set1 == row_set2) - def test__eq__len_row_keys_differ(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(row_key1) - row_set1.add_row_key(row_key2) - row_set2.add_row_key(row_key2) - - self.assertNotEqual(row_set1, row_set2) - - def test__eq__len_row_ranges_differ(self): - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key4", b"row_key9") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_range(row_range1) - row_set1.add_row_range(row_range2) - row_set2.add_row_range(row_range2) - - self.assertNotEqual(row_set1, row_set2) - - def test__eq__row_keys_differ(self): - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(b"row_key1") - row_set1.add_row_key(b"row_key2") - row_set1.add_row_key(b"row_key3") - row_set2.add_row_key(b"row_key1") - row_set2.add_row_key(b"row_key2") - row_set2.add_row_key(b"row_key4") +def test_row_set__eq__len_row_keys_differ(): + from google.cloud.bigtable.row_set import RowSet - self.assertNotEqual(row_set1, row_set2) - - def test__eq__row_ranges_differ(self): - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key14", b"row_key19") - row_range3 = RowRange(b"row_key24", b"row_key29") + row_key1 = b"row_key1" + row_key2 = b"row_key1" - row_set1 = self._make_one() - row_set2 = self._make_one() + row_set1 = RowSet() + row_set2 = RowSet() - row_set1.add_row_range(row_range1) - row_set1.add_row_range(row_range2) - row_set1.add_row_range(row_range3) - row_set2.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - - self.assertNotEqual(row_set1, row_set2) - - def test__ne__(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key5", b"row_key9") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(row_key1) - row_set2.add_row_key(row_key2) - row_set1.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - - self.assertNotEqual(row_set1, row_set2) - - def test__ne__same_value(self): - row_key1 = b"row_key1" - row_key2 = b"row_key1" - row_range1 = RowRange(b"row_key4", b"row_key9") - row_range2 = RowRange(b"row_key4", b"row_key9") - - row_set1 = self._make_one() - row_set2 = self._make_one() - - row_set1.add_row_key(row_key1) - row_set2.add_row_key(row_key2) - row_set1.add_row_range(row_range1) - row_set2.add_row_range(row_range2) - - comparison_val = row_set1 != row_set2 - self.assertFalse(comparison_val) - - def test_add_row_key(self): - row_set = self._make_one() - row_set.add_row_key("row_key1") - row_set.add_row_key("row_key2") - self.assertEqual(["row_key1", "row_key2"], row_set.row_keys) - - def test_add_row_range(self): - row_set = self._make_one() - row_range1 = RowRange(b"row_key1", b"row_key9") - row_range2 = RowRange(b"row_key21", b"row_key29") - row_set.add_row_range(row_range1) - row_set.add_row_range(row_range2) - expected = [row_range1, row_range2] - self.assertEqual(expected, row_set.row_ranges) - - def test_add_row_range_from_keys(self): - row_set = self._make_one() - row_set.add_row_range_from_keys( - start_key=b"row_key1", - end_key=b"row_key9", - start_inclusive=False, - end_inclusive=True, - ) - self.assertEqual(row_set.row_ranges[0].end_key, b"row_key9") - - def test_add_row_range_with_prefix(self): - row_set = self._make_one() - row_set.add_row_range_with_prefix("row") - self.assertEqual(row_set.row_ranges[0].end_key, b"rox") - - def test__update_message_request(self): - row_set = self._make_one() - table_name = "table_name" - row_set.add_row_key("row_key1") - row_range1 = RowRange(b"row_key21", b"row_key29") - row_set.add_row_range(row_range1) - - request = _ReadRowsRequestPB(table_name=table_name) - row_set._update_message_request(request) - - expected_request = _ReadRowsRequestPB(table_name=table_name) - expected_request.rows.row_keys.append(_to_bytes("row_key1")) - - expected_request.rows.row_ranges.append(row_range1.get_range_kwargs()) - - self.assertEqual(request, expected_request) - - -class TestRowRange(unittest.TestCase): - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.row_set import RowRange - - return RowRange - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - def test_constructor(self): - start_key = "row_key1" - end_key = "row_key9" - row_range = self._make_one(start_key, end_key) - self.assertEqual(start_key, row_range.start_key) - self.assertEqual(end_key, row_range.end_key) - self.assertTrue(row_range.start_inclusive) - self.assertFalse(row_range.end_inclusive) - - def test___hash__set_equality(self): - row_range1 = self._make_one("row_key1", "row_key9") - row_range2 = self._make_one("row_key1", "row_key9") - set_one = {row_range1, row_range2} - set_two = {row_range1, row_range2} - self.assertEqual(set_one, set_two) - - def test___hash__not_equals(self): - row_range1 = self._make_one("row_key1", "row_key9") - row_range2 = self._make_one("row_key1", "row_key19") - set_one = {row_range1} - set_two = {row_range2} - self.assertNotEqual(set_one, set_two) - - def test__eq__(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = self._make_one(start_key, end_key, True, False) - self.assertEqual(row_range1, row_range2) - - def test___eq__type_differ(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = object() - self.assertNotEqual(row_range1, row_range2) - - def test__ne__(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = self._make_one(start_key, end_key, False, True) - self.assertNotEqual(row_range1, row_range2) - - def test__ne__same_value(self): - start_key = b"row_key1" - end_key = b"row_key9" - row_range1 = self._make_one(start_key, end_key, True, False) - row_range2 = self._make_one(start_key, end_key, True, False) - comparison_val = row_range1 != row_range2 - self.assertFalse(comparison_val) - - def test_get_range_kwargs_closed_open(self): - start_key = b"row_key1" - end_key = b"row_key9" - expected_result = {"start_key_closed": start_key, "end_key_open": end_key} - row_range = self._make_one(start_key, end_key) - actual_result = row_range.get_range_kwargs() - self.assertEqual(expected_result, actual_result) - - def test_get_range_kwargs_open_closed(self): - start_key = b"row_key1" - end_key = b"row_key9" - expected_result = {"start_key_open": start_key, "end_key_closed": end_key} - row_range = self._make_one(start_key, end_key, False, True) - actual_result = row_range.get_range_kwargs() - self.assertEqual(expected_result, actual_result) + row_set1.add_row_key(row_key1) + row_set1.add_row_key(row_key2) + row_set2.add_row_key(row_key2) + + assert not (row_set1 == row_set2) + + +def test_row_set__eq__len_row_ranges_differ(): + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet + + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key4", b"row_key9") + + row_set1 = RowSet() + row_set2 = RowSet() + + row_set1.add_row_range(row_range1) + row_set1.add_row_range(row_range2) + row_set2.add_row_range(row_range2) + + assert not (row_set1 == row_set2) + + +def test_row_set__eq__row_keys_differ(): + from google.cloud.bigtable.row_set import RowSet + + row_set1 = RowSet() + row_set2 = RowSet() + + row_set1.add_row_key(b"row_key1") + row_set1.add_row_key(b"row_key2") + row_set1.add_row_key(b"row_key3") + row_set2.add_row_key(b"row_key1") + row_set2.add_row_key(b"row_key2") + row_set2.add_row_key(b"row_key4") + + assert not (row_set1 == row_set2) + + +def test_row_set__eq__row_ranges_differ(): + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet + + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key14", b"row_key19") + row_range3 = RowRange(b"row_key24", b"row_key29") + + row_set1 = RowSet() + row_set2 = RowSet() + + row_set1.add_row_range(row_range1) + row_set1.add_row_range(row_range2) + row_set1.add_row_range(row_range3) + row_set2.add_row_range(row_range1) + row_set2.add_row_range(row_range2) + + assert not (row_set1 == row_set2) + + +def test_row_set__ne__(): + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet + + row_key1 = b"row_key1" + row_key2 = b"row_key1" + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key5", b"row_key9") + + row_set1 = RowSet() + row_set2 = RowSet() + + row_set1.add_row_key(row_key1) + row_set2.add_row_key(row_key2) + row_set1.add_row_range(row_range1) + row_set2.add_row_range(row_range2) + + assert row_set1 != row_set2 + + +def test_row_set__ne__same_value(): + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet + + row_key1 = b"row_key1" + row_key2 = b"row_key1" + row_range1 = RowRange(b"row_key4", b"row_key9") + row_range2 = RowRange(b"row_key4", b"row_key9") + + row_set1 = RowSet() + row_set2 = RowSet() + + row_set1.add_row_key(row_key1) + row_set2.add_row_key(row_key2) + row_set1.add_row_range(row_range1) + row_set2.add_row_range(row_range2) + + assert not (row_set1 != row_set2) + + +def test_row_set_add_row_key(): + from google.cloud.bigtable.row_set import RowSet + + row_set = RowSet() + row_set.add_row_key("row_key1") + row_set.add_row_key("row_key2") + assert ["row_key1" == "row_key2"], row_set.row_keys + + +def test_row_set_add_row_range(): + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet + + row_set = RowSet() + row_range1 = RowRange(b"row_key1", b"row_key9") + row_range2 = RowRange(b"row_key21", b"row_key29") + row_set.add_row_range(row_range1) + row_set.add_row_range(row_range2) + expected = [row_range1, row_range2] + assert expected == row_set.row_ranges + + +def test_row_set_add_row_range_from_keys(): + from google.cloud.bigtable.row_set import RowSet + + row_set = RowSet() + row_set.add_row_range_from_keys( + start_key=b"row_key1", + end_key=b"row_key9", + start_inclusive=False, + end_inclusive=True, + ) + assert row_set.row_ranges[0].end_key == b"row_key9" + + +def test_row_set_add_row_range_with_prefix(): + from google.cloud.bigtable.row_set import RowSet + + row_set = RowSet() + row_set.add_row_range_with_prefix("row") + assert row_set.row_ranges[0].end_key == b"rox" + + +def test_row_set__update_message_request(): + from google.cloud._helpers import _to_bytes + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.row_set import RowSet + + row_set = RowSet() + table_name = "table_name" + row_set.add_row_key("row_key1") + row_range1 = RowRange(b"row_key21", b"row_key29") + row_set.add_row_range(row_range1) + + request = _ReadRowsRequestPB(table_name=table_name) + row_set._update_message_request(request) + + expected_request = _ReadRowsRequestPB(table_name=table_name) + expected_request.rows.row_keys.append(_to_bytes("row_key1")) + + expected_request.rows.row_ranges.append(row_range1.get_range_kwargs()) + + assert request == expected_request + + +def test_row_range_constructor(): + from google.cloud.bigtable.row_set import RowRange + + start_key = "row_key1" + end_key = "row_key9" + row_range = RowRange(start_key, end_key) + assert start_key == row_range.start_key + assert end_key == row_range.end_key + assert row_range.start_inclusive + assert not row_range.end_inclusive + + +def test_row_range___hash__set_equality(): + from google.cloud.bigtable.row_set import RowRange + + row_range1 = RowRange("row_key1", "row_key9") + row_range2 = RowRange("row_key1", "row_key9") + set_one = {row_range1, row_range2} + set_two = {row_range1, row_range2} + assert set_one == set_two + + +def test_row_range___hash__not_equals(): + from google.cloud.bigtable.row_set import RowRange + + row_range1 = RowRange("row_key1", "row_key9") + row_range2 = RowRange("row_key1", "row_key19") + set_one = {row_range1} + set_two = {row_range2} + assert set_one != set_two + + +def test_row_range__eq__(): + from google.cloud.bigtable.row_set import RowRange + + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = RowRange(start_key, end_key, True, False) + row_range2 = RowRange(start_key, end_key, True, False) + assert row_range1 == row_range2 + + +def test_row_range___eq__type_differ(): + from google.cloud.bigtable.row_set import RowRange + + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = RowRange(start_key, end_key, True, False) + row_range2 = object() + assert row_range1 != row_range2 + + +def test_row_range__ne__(): + from google.cloud.bigtable.row_set import RowRange + + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = RowRange(start_key, end_key, True, False) + row_range2 = RowRange(start_key, end_key, False, True) + assert row_range1 != row_range2 + + +def test_row_range__ne__same_value(): + from google.cloud.bigtable.row_set import RowRange + + start_key = b"row_key1" + end_key = b"row_key9" + row_range1 = RowRange(start_key, end_key, True, False) + row_range2 = RowRange(start_key, end_key, True, False) + assert not (row_range1 != row_range2) + + +def test_row_range_get_range_kwargs_closed_open(): + from google.cloud.bigtable.row_set import RowRange + + start_key = b"row_key1" + end_key = b"row_key9" + expected_result = {"start_key_closed": start_key, "end_key_open": end_key} + row_range = RowRange(start_key, end_key) + actual_result = row_range.get_range_kwargs() + assert expected_result == actual_result + + +def test_row_range_get_range_kwargs_open_closed(): + from google.cloud.bigtable.row_set import RowRange + + start_key = b"row_key1" + end_key = b"row_key9" + expected_result = {"start_key_open": start_key, "end_key_closed": end_key} + row_range = RowRange(start_key, end_key, False, True) + actual_result = row_range.get_range_kwargs() + assert expected_result == actual_result def _ReadRowsRequestPB(*args, **kw): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index bb6cca6a70c1..eacde3c3e633 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -13,2154 +13,1996 @@ # limitations under the License. -import unittest import warnings import mock +import pytest +from grpc import StatusCode -from ._testing import _make_credentials from google.api_core.exceptions import DeadlineExceeded +from ._testing import _make_credentials +PROJECT_ID = "project-id" +INSTANCE_ID = "instance-id" +INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID +CLUSTER_ID = "cluster-id" +CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID +TABLE_ID = "table-id" +TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID +BACKUP_ID = "backup-id" +BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID +ROW_KEY = b"row-key" +ROW_KEY_1 = b"row-key-1" +ROW_KEY_2 = b"row-key-2" +ROW_KEY_3 = b"row-key-3" +FAMILY_NAME = "family" +QUALIFIER = b"qualifier" +TIMESTAMP_MICROS = 100 +VALUE = b"value" + +# RPC Status Codes +SUCCESS = StatusCode.OK.value[0] +RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0] +RETRYABLE_2 = StatusCode.ABORTED.value[0] +RETRYABLE_3 = StatusCode.UNAVAILABLE.value[0] +RETRYABLES = (RETRYABLE_1, RETRYABLE_2, RETRYABLE_3) +NON_RETRYABLE = StatusCode.CANCELLED.value[0] + + +@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) +def test__compile_mutation_entries_w_too_many_mutations(): + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import TooManyMutationsError + from google.cloud.bigtable.table import _compile_mutation_entries + + table = mock.Mock(name="table", spec=["name"]) + table.name = "table" + rows = [ + DirectRow(row_key=b"row_key", table=table), + DirectRow(row_key=b"row_key_2", table=table), + ] + rows[0].set_cell("cf1", b"c1", 1) + rows[0].set_cell("cf1", b"c1", 2) + rows[1].set_cell("cf1", b"c1", 3) + rows[1].set_cell("cf1", b"c1", 4) + + with pytest.raises(TooManyMutationsError): + _compile_mutation_entries("table", rows) -class Test__compile_mutation_entries(unittest.TestCase): - def _call_fut(self, table_name, rows): - from google.cloud.bigtable.table import _compile_mutation_entries - return _compile_mutation_entries(table_name, rows) +def test__compile_mutation_entries_normal(): + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _compile_mutation_entries + from google.cloud.bigtable_v2.types import MutateRowsRequest + from google.cloud.bigtable_v2.types import data - @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) - def test_w_too_many_mutations(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import TooManyMutationsError + table = mock.Mock(spec=["name"]) + table.name = "table" + rows = [ + DirectRow(row_key=b"row_key", table=table), + DirectRow(row_key=b"row_key_2"), + ] + rows[0].set_cell("cf1", b"c1", b"1") + rows[1].set_cell("cf1", b"c1", b"2") + + result = _compile_mutation_entries("table", rows) + + entry_1 = MutateRowsRequest.Entry() + entry_1.row_key = b"row_key" + mutations_1 = data.Mutation() + mutations_1.set_cell.family_name = "cf1" + mutations_1.set_cell.column_qualifier = b"c1" + mutations_1.set_cell.timestamp_micros = -1 + mutations_1.set_cell.value = b"1" + entry_1.mutations.append(mutations_1) + + entry_2 = MutateRowsRequest.Entry() + entry_2.row_key = b"row_key_2" + mutations_2 = data.Mutation() + mutations_2.set_cell.family_name = "cf1" + mutations_2.set_cell.column_qualifier = b"c1" + mutations_2.set_cell.timestamp_micros = -1 + mutations_2.set_cell.value = b"2" + entry_2.mutations.append(mutations_2) + assert result == [entry_1, entry_2] + + +def test__check_row_table_name_w_wrong_table_name(): + from google.cloud.bigtable.table import _check_row_table_name + from google.cloud.bigtable.table import TableMismatchError + from google.cloud.bigtable.row import DirectRow + + table = mock.Mock(name="table", spec=["name"]) + table.name = "table" + row = DirectRow(row_key=b"row_key", table=table) + + with pytest.raises(TableMismatchError): + _check_row_table_name("other_table", row) + + +def test__check_row_table_name_w_right_table_name(): + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _check_row_table_name + + table = mock.Mock(name="table", spec=["name"]) + table.name = "table" + row = DirectRow(row_key=b"row_key", table=table) - table = mock.Mock(name="table", spec=["name"]) - table.name = "table" - rows = [ - DirectRow(row_key=b"row_key", table=table), - DirectRow(row_key=b"row_key_2", table=table), - ] - rows[0].set_cell("cf1", b"c1", 1) - rows[0].set_cell("cf1", b"c1", 2) - rows[1].set_cell("cf1", b"c1", 3) - rows[1].set_cell("cf1", b"c1", 4) - - with self.assertRaises(TooManyMutationsError): - self._call_fut("table", rows) - - def test_normal(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.types import MutateRowsRequest - from google.cloud.bigtable_v2.types import data - - table = mock.Mock(spec=["name"]) - table.name = "table" - rows = [ - DirectRow(row_key=b"row_key", table=table), - DirectRow(row_key=b"row_key_2"), - ] - rows[0].set_cell("cf1", b"c1", b"1") - rows[1].set_cell("cf1", b"c1", b"2") - - result = self._call_fut("table", rows) - - entry_1 = MutateRowsRequest.Entry() - entry_1.row_key = b"row_key" - mutations_1 = data.Mutation() - mutations_1.set_cell.family_name = "cf1" - mutations_1.set_cell.column_qualifier = b"c1" - mutations_1.set_cell.timestamp_micros = -1 - mutations_1.set_cell.value = b"1" - entry_1.mutations.append(mutations_1) + assert not _check_row_table_name("table", row) - entry_2 = MutateRowsRequest.Entry() - entry_2.row_key = b"row_key_2" - mutations_2 = data.Mutation() - mutations_2.set_cell.family_name = "cf1" - mutations_2.set_cell.column_qualifier = b"c1" - mutations_2.set_cell.timestamp_micros = -1 - mutations_2.set_cell.value = b"2" - entry_2.mutations.append(mutations_2) - self.assertEqual(result, [entry_1, entry_2]) - - -class Test__check_row_table_name(unittest.TestCase): - def _call_fut(self, table_name, row): - from google.cloud.bigtable.table import _check_row_table_name - - return _check_row_table_name(table_name, row) - - def test_wrong_table_name(self): - from google.cloud.bigtable.table import TableMismatchError - from google.cloud.bigtable.row import DirectRow - - table = mock.Mock(name="table", spec=["name"]) - table.name = "table" - row = DirectRow(row_key=b"row_key", table=table) - with self.assertRaises(TableMismatchError): - self._call_fut("other_table", row) - - def test_right_table_name(self): - from google.cloud.bigtable.row import DirectRow - - table = mock.Mock(name="table", spec=["name"]) - table.name = "table" - row = DirectRow(row_key=b"row_key", table=table) - result = self._call_fut("table", row) - self.assertFalse(result) - - -class Test__check_row_type(unittest.TestCase): - def _call_fut(self, row): - from google.cloud.bigtable.table import _check_row_type - - return _check_row_type(row) - - def test_test_wrong_row_type(self): - from google.cloud.bigtable.row import ConditionalRow - - row = ConditionalRow(row_key=b"row_key", table="table", filter_=None) - with self.assertRaises(TypeError): - self._call_fut(row) - - def test_right_row_type(self): - from google.cloud.bigtable.row import DirectRow - - row = DirectRow(row_key=b"row_key", table="table") - result = self._call_fut(row) - self.assertFalse(result) - - -class TestTable(unittest.TestCase): - - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - CLUSTER_ID = "cluster-id" - CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID - TABLE_ID = "table-id" - TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID - BACKUP_ID = "backup-id" - BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID - ROW_KEY = b"row-key" - ROW_KEY_1 = b"row-key-1" - ROW_KEY_2 = b"row-key-2" - ROW_KEY_3 = b"row-key-3" - FAMILY_NAME = "family" - QUALIFIER = b"qualifier" - TIMESTAMP_MICROS = 100 - VALUE = b"value" - _json_tests = None - - @staticmethod - def _get_target_class(): - from google.cloud.bigtable.table import Table - - return Table - - def _make_one(self, *args, **kwargs): - return self._get_target_class()(*args, **kwargs) - - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client - - return Client - - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) - - def test_constructor_defaults(self): - instance = mock.Mock(spec=[]) - table = self._make_one(self.TABLE_ID, instance) - - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance, instance) - self.assertIsNone(table.mutation_timeout) - self.assertIsNone(table._app_profile_id) +def test__check_row_type_w_wrong_row_type(): + from google.cloud.bigtable.row import ConditionalRow + from google.cloud.bigtable.table import _check_row_type - def test_constructor_explicit(self): - instance = mock.Mock(spec=[]) - mutation_timeout = 123 - app_profile_id = "profile-123" + row = ConditionalRow(row_key=b"row_key", table="table", filter_=None) + with pytest.raises(TypeError): + _check_row_type(row) - table = self._make_one( - self.TABLE_ID, - instance, - mutation_timeout=mutation_timeout, - app_profile_id=app_profile_id, - ) - self.assertEqual(table.table_id, self.TABLE_ID) - self.assertIs(table._instance, instance) - self.assertEqual(table.mutation_timeout, mutation_timeout) - self.assertEqual(table._app_profile_id, app_profile_id) - - def test_name(self): - table_data_client = mock.Mock(spec=["table_path"]) - client = mock.Mock( - project=self.PROJECT_ID, - table_data_client=table_data_client, - spec=["project", "table_data_client"], - ) - instance = mock.Mock( - _client=client, - instance_id=self.INSTANCE_ID, - spec=["_client", "instance_id"], - ) +def test__check_row_type_w_right_row_type(): + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _check_row_type - table = self._make_one(self.TABLE_ID, instance) + row = DirectRow(row_key=b"row_key", table="table") + assert not _check_row_type(row) - self.assertEqual(table.name, table_data_client.table_path.return_value) - def _row_methods_helper(self): - client = self._make_client( - project="project-id", credentials=_make_credentials(), admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - row_key = b"row_key" - return table, row_key +def _make_client(*args, **kwargs): + from google.cloud.bigtable.client import Client - def test_row_factory_direct(self): - from google.cloud.bigtable.row import DirectRow + return Client(*args, **kwargs) - table, row_key = self._row_methods_helper() - with warnings.catch_warnings(record=True) as warned: - row = table.row(row_key) - self.assertIsInstance(row, DirectRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) +def _make_table(*args, **kwargs): + from google.cloud.bigtable.table import Table - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, PendingDeprecationWarning) + return Table(*args, **kwargs) - def test_row_factory_conditional(self): - from google.cloud.bigtable.row import ConditionalRow - table, row_key = self._row_methods_helper() - filter_ = object() +def test_table_constructor_defaults(): + instance = mock.Mock(spec=[]) - with warnings.catch_warnings(record=True) as warned: - row = table.row(row_key, filter_=filter_) + table = _make_table(TABLE_ID, instance) + + assert table.table_id == TABLE_ID + assert table._instance is instance + assert table.mutation_timeout is None + assert table._app_profile_id is None + + +def test_table_constructor_explicit(): + instance = mock.Mock(spec=[]) + mutation_timeout = 123 + app_profile_id = "profile-123" + + table = _make_table( + TABLE_ID, + instance, + mutation_timeout=mutation_timeout, + app_profile_id=app_profile_id, + ) + + assert table.table_id == TABLE_ID + assert table._instance is instance + assert table.mutation_timeout == mutation_timeout + assert table._app_profile_id == app_profile_id + + +def test_table_name(): + table_data_client = mock.Mock(spec=["table_path"]) + client = mock.Mock( + project=PROJECT_ID, + table_data_client=table_data_client, + spec=["project", "table_data_client"], + ) + instance = mock.Mock( + _client=client, instance_id=INSTANCE_ID, spec=["_client", "instance_id"], + ) + + table = _make_table(TABLE_ID, instance) + + assert table.name == table_data_client.table_path.return_value - self.assertIsInstance(row, ConditionalRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, PendingDeprecationWarning) +def _table_row_methods_helper(): + client = _make_client( + project="project-id", credentials=_make_credentials(), admin=True + ) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + row_key = b"row_key" + return table, row_key + + +def test_table_row_factory_direct(): + from google.cloud.bigtable.row import DirectRow + + table, row_key = _table_row_methods_helper() + with warnings.catch_warnings(record=True) as warned: + row = table.row(row_key) + + assert isinstance(row, DirectRow) + assert row._row_key == row_key + assert row._table == table + + assert len(warned) == 1 + assert warned[0].category is PendingDeprecationWarning + + +def test_table_row_factory_conditional(): + from google.cloud.bigtable.row import ConditionalRow + + table, row_key = _table_row_methods_helper() + filter_ = object() + + with warnings.catch_warnings(record=True) as warned: + row = table.row(row_key, filter_=filter_) + + assert isinstance(row, ConditionalRow) + assert row._row_key == row_key + assert row._table == table - def test_row_factory_append(self): - from google.cloud.bigtable.row import AppendRow + assert len(warned) == 1 + assert warned[0].category is PendingDeprecationWarning - table, row_key = self._row_methods_helper() +def test_table_row_factory_append(): + from google.cloud.bigtable.row import AppendRow + + table, row_key = _table_row_methods_helper() + + with warnings.catch_warnings(record=True) as warned: + row = table.row(row_key, append=True) + + assert isinstance(row, AppendRow) + assert row._row_key == row_key + assert row._table == table + + assert len(warned) == 1 + assert warned[0].category is PendingDeprecationWarning + + +def test_table_row_factory_failure(): + table, row_key = _table_row_methods_helper() + + with pytest.raises(ValueError): with warnings.catch_warnings(record=True) as warned: - row = table.row(row_key, append=True) + table.row(row_key, filter_=object(), append=True) - self.assertIsInstance(row, AppendRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) + assert len(warned) == 1 + assert warned[0].category is PendingDeprecationWarning - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, PendingDeprecationWarning) - def test_row_factory_failure(self): - table, row_key = self._row_methods_helper() - with self.assertRaises(ValueError): - with warnings.catch_warnings(record=True) as warned: - table.row(row_key, filter_=object(), append=True) +def test_table_direct_row(): + from google.cloud.bigtable.row import DirectRow - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, PendingDeprecationWarning) + table, row_key = _table_row_methods_helper() + row = table.direct_row(row_key) - def test_direct_row(self): - from google.cloud.bigtable.row import DirectRow + assert isinstance(row, DirectRow) + assert row._row_key == row_key + assert row._table == table - table, row_key = self._row_methods_helper() - row = table.direct_row(row_key) - self.assertIsInstance(row, DirectRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) +def test_table_conditional_row(): + from google.cloud.bigtable.row import ConditionalRow - def test_conditional_row(self): - from google.cloud.bigtable.row import ConditionalRow + table, row_key = _table_row_methods_helper() + filter_ = object() + row = table.conditional_row(row_key, filter_=filter_) - table, row_key = self._row_methods_helper() - filter_ = object() - row = table.conditional_row(row_key, filter_=filter_) + assert isinstance(row, ConditionalRow) + assert row._row_key == row_key + assert row._table == table - self.assertIsInstance(row, ConditionalRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) - def test_append_row(self): - from google.cloud.bigtable.row import AppendRow +def test_table_append_row(): + from google.cloud.bigtable.row import AppendRow - table, row_key = self._row_methods_helper() - row = table.append_row(row_key) + table, row_key = _table_row_methods_helper() + row = table.append_row(row_key) - self.assertIsInstance(row, AppendRow) - self.assertEqual(row._row_key, row_key) - self.assertEqual(row._table, table) + assert isinstance(row, AppendRow) + assert row._row_key == row_key + assert row._table == table - def test___eq__(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table1 = self._make_one(self.TABLE_ID, instance) - table2 = self._make_one(self.TABLE_ID, instance) - self.assertEqual(table1, table2) - - def test___eq__type_differ(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table1 = self._make_one(self.TABLE_ID, instance) - table2 = object() - self.assertNotEqual(table1, table2) - - def test___ne__same_value(self): - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table1 = self._make_one(self.TABLE_ID, instance) - table2 = self._make_one(self.TABLE_ID, instance) - comparison_val = table1 != table2 - self.assertFalse(comparison_val) - - def test___ne__(self): - table1 = self._make_one("table_id1", None) - table2 = self._make_one("table_id2", None) - self.assertNotEqual(table1, table2) - - def _create_test_helper(self, split_keys=[], column_families={}): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.cloud.bigtable_admin_v2.types import table as table_pb2 - from google.cloud.bigtable_admin_v2.types import ( - bigtable_table_admin as table_admin_messages_v2_pb2, - ) - from google.cloud.bigtable.column_family import ColumnFamily - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) +def test_table___eq__(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table1 = _make_table(TABLE_ID, instance) + table2 = _make_table(TABLE_ID, instance) + assert table1 == table2 + + +def test_table___eq__type_differ(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table1 = _make_table(TABLE_ID, instance) + table2 = object() + assert not (table1 == table2) + + +def test_table___ne__same_value(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table1 = _make_table(TABLE_ID, instance) + table2 = _make_table(TABLE_ID, instance) + assert not (table1 != table2) + + +def test_table___ne__(): + table1 = _make_table("table_id1", None) + table2 = _make_table("table_id2", None) + assert table1 != table2 + + +def _make_table_api(): + from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as bigtable_table_admin, + ) - # Patch API calls - client._table_admin_client = table_api + return mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - # Perform the method and check the result. - table.create(column_families=column_families, initial_split_keys=split_keys) - families = { - id: ColumnFamily(id, self, rule).to_pb() - for (id, rule) in column_families.items() +def _create_table_helper(split_keys=[], column_families={}): + from google.cloud.bigtable_admin_v2.types import table as table_pb2 + from google.cloud.bigtable_admin_v2.types import ( + bigtable_table_admin as table_admin_messages_v2_pb2, + ) + from google.cloud.bigtable.column_family import ColumnFamily + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + table_api = client._table_admin_client = _make_table_api() + + table.create(column_families=column_families, initial_split_keys=split_keys) + + families = { + id: ColumnFamily(id, table, rule).to_pb() + for (id, rule) in column_families.items() + } + + split = table_admin_messages_v2_pb2.CreateTableRequest.Split + splits = [split(key=split_key) for split_key in split_keys] + + table_api.create_table.assert_called_once_with( + request={ + "parent": INSTANCE_NAME, + "table": table_pb2.Table(column_families=families), + "table_id": TABLE_ID, + "initial_splits": splits, } + ) - split = table_admin_messages_v2_pb2.CreateTableRequest.Split - splits = [split(key=split_key) for split_key in split_keys] - table_api.create_table.assert_called_once_with( - request={ - "parent": self.INSTANCE_NAME, - "table": table_pb2.Table(column_families=families), - "table_id": self.TABLE_ID, - "initial_splits": splits, - } - ) +def test_table_create(): + _create_table_helper() - def test_create(self): - self._create_test_helper() - def test_create_with_families(self): - from google.cloud.bigtable.column_family import MaxVersionsGCRule +def test_table_create_with_families(): + from google.cloud.bigtable.column_family import MaxVersionsGCRule - families = {"family": MaxVersionsGCRule(5)} - self._create_test_helper(column_families=families) + families = {"family": MaxVersionsGCRule(5)} + _create_table_helper(column_families=families) - def test_create_with_split_keys(self): - self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"]) - def test_exists(self): - from google.cloud.bigtable_admin_v2.types import ListTablesResponse - from google.cloud.bigtable_admin_v2.types import Table - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as table_admin_client, - ) - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - client as instance_admin_client, - ) - from google.api_core.exceptions import NotFound - from google.api_core.exceptions import BadRequest +def test_table_create_with_split_keys(): + _create_table_helper(split_keys=[b"split1", b"split2", b"split3"]) - table_api = mock.create_autospec(table_admin_client.BigtableTableAdminClient) - instance_api = mock.create_autospec( - instance_admin_client.BigtableInstanceAdminClient - ) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - # Create response_pb - response_pb = ListTablesResponse(tables=[Table(name=self.TABLE_NAME)]) - - # Patch API calls - client._table_admin_client = table_api - client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client - - bigtable_table_stub.get_table.side_effect = [ - response_pb, - NotFound("testing"), - BadRequest("testing"), - ] +def test_table_exists_hit(): + from google.cloud.bigtable_admin_v2.types import ListTablesResponse + from google.cloud.bigtable_admin_v2.types import Table + from google.cloud.bigtable import enums - client._table_admin_client = table_api - client._instance_admin_client = instance_api - bigtable_table_stub = client._table_admin_client - bigtable_table_stub.get_table.side_effect = [ - response_pb, - NotFound("testing"), - BadRequest("testing"), - ] + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = instance.table(TABLE_ID) - # Perform the method and check the result. - table1 = instance.table(self.TABLE_ID) - table2 = instance.table("table-id2") + response_pb = ListTablesResponse(tables=[Table(name=TABLE_NAME)]) + table_api = client._table_admin_client = _make_table_api() + table_api.get_table.return_value = response_pb - result = table1.exists() - self.assertEqual(True, result) + assert table.exists() - result = table2.exists() - self.assertEqual(False, result) + expected_request = { + "name": table.name, + "view": enums.Table.View.NAME_ONLY, + } + table_api.get_table.assert_called_once_with(request=expected_request) - with self.assertRaises(BadRequest): - table2.exists() - def test_delete(self): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) +def test_table_exists_miss(): + from google.api_core.exceptions import NotFound + from google.cloud.bigtable import enums - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = instance.table("nonesuch-table-id2") - # Patch API calls - client._table_admin_client = table_api + table_api = client._table_admin_client = _make_table_api() + table_api.get_table.side_effect = NotFound("testing") - # Create expected_result. - expected_result = None # delete() has no return value. + assert not table.exists() - # Perform the method and check the result. - result = table.delete() - self.assertEqual(result, expected_result) + expected_request = { + "name": table.name, + "view": enums.Table.View.NAME_ONLY, + } + table_api.get_table.assert_called_once_with(request=expected_request) - def _list_column_families_helper(self): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) +def test_table_exists_error(): + from google.api_core.exceptions import BadRequest + from google.cloud.bigtable import enums - # Create response_pb - COLUMN_FAMILY_ID = "foo" - column_family = _ColumnFamilyPB() - response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family}) + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) - # Patch the stub used by the API method. - client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client - bigtable_table_stub.get_table.side_effect = [response_pb] + table_api = client._table_admin_client = _make_table_api() + table_api.get_table.side_effect = BadRequest("testing") - # Create expected_result. - expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)} + table = instance.table(TABLE_ID) - # Perform the method and check the result. - result = table.list_column_families() - self.assertEqual(result, expected_result) + with pytest.raises(BadRequest): + table.exists() - def test_list_column_families(self): - self._list_column_families_helper() + expected_request = { + "name": table.name, + "view": enums.Table.View.NAME_ONLY, + } + table_api.get_table.assert_called_once_with(request=expected_request) - def test_get_cluster_states(self): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - INITIALIZING = enum_table.ReplicationState.INITIALIZING - PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE - READY = enum_table.ReplicationState.READY +def test_table_delete(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - response_pb = _TablePB( - cluster_states={ - "cluster-id1": _ClusterStatePB(INITIALIZING), - "cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE), - "cluster-id3": _ClusterStatePB(READY), - } - ) + table_api = client._table_admin_client = _make_table_api() - # Patch the stub used by the API method. - client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client + assert table.delete() is None - bigtable_table_stub.get_table.side_effect = [response_pb] + table_api.delete_table.assert_called_once_with(request={"name": table.name}) - # build expected result - expected_result = { - "cluster-id1": ClusterState(INITIALIZING), - "cluster-id2": ClusterState(PLANNED_MAINTENANCE), - "cluster-id3": ClusterState(READY), - } - # Perform the method and check the result. - result = table.get_cluster_states() - self.assertEqual(result, expected_result) +def _table_list_column_families_helper(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) - def test_get_encryption_info(self): - from google.rpc.code_pb2 import Code - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.cloud.bigtable.encryption_info import EncryptionInfo - from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto - from google.cloud.bigtable.error import Status + # Create response_pb + COLUMN_FAMILY_ID = "foo" + column_family = _ColumnFamilyPB() + response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family}) - ENCRYPTION_TYPE_UNSPECIFIED = ( - enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED - ) - GOOGLE_DEFAULT_ENCRYPTION = enum_crypto.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION - CUSTOMER_MANAGED_ENCRYPTION = ( - enum_crypto.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION - ) + # Patch the stub used by the API method. + table_api = client._table_admin_client = _make_table_api() + table_api.get_table.return_value = response_pb - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - response_pb = _TablePB( - cluster_states={ - "cluster-id1": _ClusterStateEncryptionInfoPB( - encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, - encryption_status=_StatusPB(Code.OK, "Status OK"), - ), - "cluster-id2": _ClusterStateEncryptionInfoPB( - encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - ), - "cluster-id3": _ClusterStateEncryptionInfoPB( - encryption_type=CUSTOMER_MANAGED_ENCRYPTION, - encryption_status=_StatusPB( - Code.UNKNOWN, "Key version is not yet known." - ), - kms_key_version="UNKNOWN", - ), - } - ) + # Create expected_result. + expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)} - # Patch the stub used by the API method. - client._table_admin_client = table_api - bigtable_table_stub = client._table_admin_client + # Perform the method and check the result. + result = table.list_column_families() - bigtable_table_stub.get_table.side_effect = [response_pb] + assert result == expected_result - # build expected result - expected_result = { - "cluster-id1": ( - EncryptionInfo( - encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, - encryption_status=Status(_StatusPB(Code.OK, "Status OK")), - kms_key_version="", - ), + table_api.get_table.assert_called_once_with(request={"name": table.name}) + + +def test_table_list_column_families(): + _table_list_column_families_helper() + + +def test_table_get_cluster_states(): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + + INITIALIZING = enum_table.ReplicationState.INITIALIZING + PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE + READY = enum_table.ReplicationState.READY + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + response_pb = _TablePB( + cluster_states={ + "cluster-id1": _ClusterStatePB(INITIALIZING), + "cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE), + "cluster-id3": _ClusterStatePB(READY), + } + ) + + # Patch the stub used by the API method. + table_api = client._table_admin_client = _make_table_api() + table_api.get_table.return_value = response_pb + + # build expected result + expected_result = { + "cluster-id1": ClusterState(INITIALIZING), + "cluster-id2": ClusterState(PLANNED_MAINTENANCE), + "cluster-id3": ClusterState(READY), + } + + # Perform the method and check the result. + result = table.get_cluster_states() + + assert result == expected_result + + expected_request = { + "name": table.name, + "view": enum_table.View.REPLICATION_VIEW, + } + table_api.get_table.assert_called_once_with(request=expected_request) + + +def test_table_get_encryption_info(): + from google.rpc.code_pb2 import Code + from google.cloud.bigtable.encryption_info import EncryptionInfo + from google.cloud.bigtable.enums import EncryptionInfo as enum_crypto + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.error import Status + + ENCRYPTION_TYPE_UNSPECIFIED = enum_crypto.EncryptionType.ENCRYPTION_TYPE_UNSPECIFIED + GOOGLE_DEFAULT_ENCRYPTION = enum_crypto.EncryptionType.GOOGLE_DEFAULT_ENCRYPTION + CUSTOMER_MANAGED_ENCRYPTION = enum_crypto.EncryptionType.CUSTOMER_MANAGED_ENCRYPTION + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + response_pb = _TablePB( + cluster_states={ + "cluster-id1": _ClusterStateEncryptionInfoPB( + encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, + encryption_status=_StatusPB(Code.OK, "Status OK"), ), - "cluster-id2": ( - EncryptionInfo( - encryption_type=GOOGLE_DEFAULT_ENCRYPTION, - encryption_status=Status(_StatusPB(0, "")), - kms_key_version="", - ), + "cluster-id2": _ClusterStateEncryptionInfoPB( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, ), - "cluster-id3": ( - EncryptionInfo( - encryption_type=CUSTOMER_MANAGED_ENCRYPTION, - encryption_status=Status( - _StatusPB(Code.UNKNOWN, "Key version is not yet known.") - ), - kms_key_version="UNKNOWN", + "cluster-id3": _ClusterStateEncryptionInfoPB( + encryption_type=CUSTOMER_MANAGED_ENCRYPTION, + encryption_status=_StatusPB( + Code.UNKNOWN, "Key version is not yet known." ), + kms_key_version="UNKNOWN", ), } + ) - # Perform the method and check the result. - result = table.get_encryption_info() - self.assertEqual(result, expected_result) + # Patch the stub used by the API method. + table_api = client._table_admin_client = _make_table_api() + table_api.get_table.return_value = response_pb + + # build expected result + expected_result = { + "cluster-id1": ( + EncryptionInfo( + encryption_type=ENCRYPTION_TYPE_UNSPECIFIED, + encryption_status=Status(_StatusPB(Code.OK, "Status OK")), + kms_key_version="", + ), + ), + "cluster-id2": ( + EncryptionInfo( + encryption_type=GOOGLE_DEFAULT_ENCRYPTION, + encryption_status=Status(_StatusPB(0, "")), + kms_key_version="", + ), + ), + "cluster-id3": ( + EncryptionInfo( + encryption_type=CUSTOMER_MANAGED_ENCRYPTION, + encryption_status=Status( + _StatusPB(Code.UNKNOWN, "Key version is not yet known.") + ), + kms_key_version="UNKNOWN", + ), + ), + } - def _read_row_helper(self, chunks, expected_result, app_profile_id=None): + # Perform the method and check the result. + result = table.get_encryption_info() - from google.cloud._testing import _Monkey - from google.cloud.bigtable import table as MUT - from google.cloud.bigtable.row_set import RowSet - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.cloud.bigtable.row_filters import RowSampleFilter + assert result == expected_result + expected_request = { + "name": table.name, + "view": enum_table.View.ENCRYPTION_VIEW, + } + table_api.get_table.assert_called_once_with(request=expected_request) - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) - # Create request_pb - request_pb = object() # Returned by our mock. - mock_created = [] +def _make_data_api(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request_pb + return mock.create_autospec(BigtableClient) - # Create response_iterator - if chunks is None: - response_iterator = iter(()) # no responses at all - else: - response_pb = _ReadRowsResponsePB(chunks=chunks) - response_iterator = iter([response_pb]) - - # Patch the stub used by the API method. - client._table_data_client = data_api - client._table_admin_client = table_api - client._table_data_client.read_rows.side_effect = [response_iterator] - table._instance._client._table_data_client = client._table_data_client - # Perform the method and check the result. - filter_obj = RowSampleFilter(0.33) - result = None - with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_row(self.ROW_KEY, filter_=filter_obj) - row_set = RowSet() - row_set.add_row_key(self.ROW_KEY) - expected_request = [ - ( - table.name, - { - "end_inclusive": False, - "row_set": row_set, - "app_profile_id": app_profile_id, - "end_key": None, - "limit": None, - "start_key": None, - "filter_": filter_obj, - }, - ) - ] - self.assertEqual(result, expected_result) - self.assertEqual(mock_created, expected_request) - - def test_read_row_miss_no__responses(self): - self._read_row_helper(None, None) - - def test_read_row_miss_no_chunks_in_response(self): - chunks = [] - self._read_row_helper(chunks, None) - - def test_read_row_complete(self): - from google.cloud.bigtable.row_data import Cell - from google.cloud.bigtable.row_data import PartialRowData - - app_profile_id = "app-profile-id" - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunks = [chunk] - expected_result = PartialRowData(row_key=self.ROW_KEY) - family = expected_result._cells.setdefault(self.FAMILY_NAME, {}) - column = family.setdefault(self.QUALIFIER, []) - column.append(Cell.from_pb(chunk)) - self._read_row_helper(chunks, expected_result, app_profile_id) - - def test_read_row_more_than_one_row_returned(self): - app_profile_id = "app-profile-id" - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - )._pb - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - )._pb - - chunks = [chunk_1, chunk_2] - with self.assertRaises(ValueError): - self._read_row_helper(chunks, None, app_profile_id) - - def test_read_row_still_partial(self): - chunk = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - ) - # No "commit row". - chunks = [chunk] - with self.assertRaises(ValueError): - self._read_row_helper(chunks, None) - - def _mutate_rows_helper( - self, mutation_timeout=None, app_profile_id=None, retry=None, timeout=None - ): - from google.rpc.status_pb2 import Status - from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - client._table_admin_client = table_api - ctor_kwargs = {} +def _table_read_row_helper(chunks, expected_result, app_profile_id=None): + from google.cloud._testing import _Monkey + from google.cloud.bigtable import table as MUT + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_filters import RowSampleFilter + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance, app_profile_id=app_profile_id) + + # Create request_pb + request_pb = object() # Returned by our mock. + mock_created = [] + + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb - if mutation_timeout is not None: - ctor_kwargs["mutation_timeout"] = mutation_timeout + # Create response_iterator + if chunks is None: + response_iterator = iter(()) # no responses at all + else: + response_pb = _ReadRowsResponsePB(chunks=chunks) + response_iterator = iter([response_pb]) - if app_profile_id is not None: - ctor_kwargs["app_profile_id"] = app_profile_id + data_api = client._table_data_client = _make_data_api() + data_api.read_rows.return_value = response_iterator - table = self._make_one(self.TABLE_ID, instance, **ctor_kwargs) + filter_obj = RowSampleFilter(0.33) - rows = [mock.MagicMock(), mock.MagicMock()] - response = [Status(code=0), Status(code=1)] - instance_mock = mock.Mock(return_value=response) - klass_mock = mock.patch( - "google.cloud.bigtable.table._RetryableMutateRowsWorker", - new=mock.MagicMock(return_value=instance_mock), + with _Monkey(MUT, _create_row_request=mock_create_row_request): + result = table.read_row(ROW_KEY, filter_=filter_obj) + + row_set = RowSet() + row_set.add_row_key(ROW_KEY) + expected_request = [ + ( + table.name, + { + "end_inclusive": False, + "row_set": row_set, + "app_profile_id": app_profile_id, + "end_key": None, + "limit": None, + "start_key": None, + "filter_": filter_obj, + }, ) + ] + assert result == expected_result + assert mock_created == expected_request - call_kwargs = {} + data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) - if retry is not None: - call_kwargs["retry"] = retry - if timeout is not None: - expected_timeout = call_kwargs["timeout"] = timeout - else: - expected_timeout = mutation_timeout +def test_table_read_row_miss_no__responses(): + _table_read_row_helper(None, None) - with klass_mock: - statuses = table.mutate_rows(rows, **call_kwargs) - result = [status.code for status in statuses] - expected_result = [0, 1] - self.assertEqual(result, expected_result) - - klass_mock.new.assert_called_once_with( - client, - self.TABLE_NAME, - rows, - app_profile_id=app_profile_id, - timeout=expected_timeout, - ) +def test_table_read_row_miss_no_chunks_in_response(): + chunks = [] + _table_read_row_helper(chunks, None) - if retry is not None: - instance_mock.assert_called_once_with(retry=retry) - else: - instance_mock.assert_called_once_with(retry=DEFAULT_RETRY) - - def test_mutate_rows_w_default_mutation_timeout_app_profile_id(self): - self._mutate_rows_helper() - - def test_mutate_rows_w_mutation_timeout(self): - mutation_timeout = 123 - self._mutate_rows_helper(mutation_timeout=mutation_timeout) - - def test_mutate_rows_w_app_profile_id(self): - app_profile_id = "profile-123" - self._mutate_rows_helper(app_profile_id=app_profile_id) - - def test_mutate_rows_w_retry(self): - retry = mock.Mock() - self._mutate_rows_helper(retry=retry) - - def test_mutate_rows_w_timeout_arg(self): - timeout = 123 - self._mutate_rows_helper(timeout=timeout) - - def test_mutate_rows_w_mutation_timeout_and_timeout_arg(self): - mutation_timeout = 123 - timeout = 456 - self._mutate_rows_helper(mutation_timeout=mutation_timeout, timeout=timeout) - - def test_read_rows(self): - from google.cloud._testing import _Monkey - from google.cloud.bigtable.row_data import PartialRowsData - from google.cloud.bigtable import table as MUT - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - app_profile_id = "app-profile-id" - table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id) - - # Create request_pb - request = object() # Returned by our mock. - retry = DEFAULT_RETRY_READ_ROWS - mock_created = [] - - def mock_create_row_request(table_name, **kwargs): - mock_created.append((table_name, kwargs)) - return request - - # Create expected_result. - expected_result = PartialRowsData( - client._table_data_client.transport.read_rows, request, retry - ) +def test_table_read_row_complete(): + from google.cloud.bigtable.row_data import Cell + from google.cloud.bigtable.row_data import PartialRowData - # Perform the method and check the result. - start_key = b"start-key" - end_key = b"end-key" - filter_obj = object() - limit = 22 - with _Monkey(MUT, _create_row_request=mock_create_row_request): - result = table.read_rows( - start_key=start_key, - end_key=end_key, - filter_=filter_obj, - limit=limit, - retry=retry, - ) + app_profile_id = "app-profile-id" + chunk = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + chunks = [chunk] + expected_result = PartialRowData(row_key=ROW_KEY) + family = expected_result._cells.setdefault(FAMILY_NAME, {}) + column = family.setdefault(QUALIFIER, []) + column.append(Cell.from_pb(chunk)) + + _table_read_row_helper(chunks, expected_result, app_profile_id) + + +def test_table_read_row_more_than_one_row_returned(): + app_profile_id = "app-profile-id" + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + )._pb + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_2, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + )._pb + + chunks = [chunk_1, chunk_2] + + with pytest.raises(ValueError): + _table_read_row_helper(chunks, None, app_profile_id) + + +def test_table_read_row_still_partial(): + chunk = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + ) + chunks = [chunk] # No "commit row". - self.assertEqual(result.rows, expected_result.rows) - self.assertEqual(result.retry, expected_result.retry) - created_kwargs = { - "start_key": start_key, - "end_key": end_key, - "filter_": filter_obj, - "limit": limit, - "end_inclusive": False, - "app_profile_id": app_profile_id, - "row_set": None, - } - self.assertEqual(mock_created, [(table.name, created_kwargs)]) + with pytest.raises(ValueError): + _table_read_row_helper(chunks, None) - def test_read_retry_rows(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.api_core import retry - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) - - # Create response_iterator - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_1, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) +def _table_mutate_rows_helper( + mutation_timeout=None, app_profile_id=None, retry=None, timeout=None +): + from google.rpc.status_pb2 import Status + from google.cloud.bigtable.table import DEFAULT_RETRY - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + ctor_kwargs = {} - response_1 = _ReadRowsResponseV2([chunk_1]) - response_2 = _ReadRowsResponseV2([chunk_2]) - response_failure_iterator_1 = _MockFailureIterator_1() - response_failure_iterator_2 = _MockFailureIterator_2([response_1]) - response_iterator = _MockReadRowsIterator(response_2) - - # Patch the stub used by the API method. - data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" - - client._table_data_client.read_rows = mock.Mock( - side_effect=[ - response_failure_iterator_1, - response_failure_iterator_2, - response_iterator, - ] - ) + if mutation_timeout is not None: + ctor_kwargs["mutation_timeout"] = mutation_timeout - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api - rows = [] - for row in table.read_rows( - start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows - ): - rows.append(row) + if app_profile_id is not None: + ctor_kwargs["app_profile_id"] = app_profile_id - result = rows[1] - self.assertEqual(result.row_key, self.ROW_KEY_2) + table = _make_table(TABLE_ID, instance, **ctor_kwargs) - def test_yield_retry_rows(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + rows = [mock.MagicMock(), mock.MagicMock()] + response = [Status(code=0), Status(code=1)] + instance_mock = mock.Mock(return_value=response) + klass_mock = mock.patch( + "google.cloud.bigtable.table._RetryableMutateRowsWorker", + new=mock.MagicMock(return_value=instance_mock), + ) - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Create response_iterator - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_1, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) + call_kwargs = {} - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) + if retry is not None: + call_kwargs["retry"] = retry - response_1 = _ReadRowsResponseV2([chunk_1]) - response_2 = _ReadRowsResponseV2([chunk_2]) - response_failure_iterator_1 = _MockFailureIterator_1() - response_failure_iterator_2 = _MockFailureIterator_2([response_1]) - response_iterator = _MockReadRowsIterator(response_2) - - # Patch the stub used by the API method. - data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" - table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" - - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api - client._table_data_client.read_rows.side_effect = [ - response_failure_iterator_1, - response_failure_iterator_2, - response_iterator, - ] + if timeout is not None: + expected_timeout = call_kwargs["timeout"] = timeout + else: + expected_timeout = mutation_timeout - rows = [] - with warnings.catch_warnings(record=True) as warned: - for row in table.yield_rows( - start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2 - ): - rows.append(row) + with klass_mock: + statuses = table.mutate_rows(rows, **call_kwargs) - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) + result = [status.code for status in statuses] + expected_result = [0, 1] + assert result == expected_result - result = rows[1] - self.assertEqual(result.row_key, self.ROW_KEY_2) + klass_mock.new.assert_called_once_with( + client, + TABLE_NAME, + rows, + app_profile_id=app_profile_id, + timeout=expected_timeout, + ) - def test_yield_rows_with_row_set(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.cloud.bigtable.row_set import RowSet - from google.cloud.bigtable.row_set import RowRange - - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - # Create response_iterator - chunk_1 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_1, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) + if retry is not None: + instance_mock.assert_called_once_with(retry=retry) + else: + instance_mock.assert_called_once_with(retry=DEFAULT_RETRY) - chunk_2 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_2, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) - chunk_3 = _ReadRowsResponseCellChunkPB( - row_key=self.ROW_KEY_3, - family_name=self.FAMILY_NAME, - qualifier=self.QUALIFIER, - timestamp_micros=self.TIMESTAMP_MICROS, - value=self.VALUE, - commit_row=True, - ) +def test_table_mutate_rows_w_default_mutation_timeout_app_profile_id(): + _table_mutate_rows_helper() - response_1 = _ReadRowsResponseV2([chunk_1]) - response_2 = _ReadRowsResponseV2([chunk_2]) - response_3 = _ReadRowsResponseV2([chunk_3]) - response_iterator = _MockReadRowsIterator(response_1, response_2, response_3) - # Patch the stub used by the API method. - data_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" - table_api.table_path.return_value = f"projects/{self.PROJECT_ID}/instances/{self.INSTANCE_ID}/tables/{self.TABLE_ID}" +def test_table_mutate_rows_w_mutation_timeout(): + mutation_timeout = 123 + _table_mutate_rows_helper(mutation_timeout=mutation_timeout) - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api - client._table_data_client.read_rows.side_effect = [response_iterator] - rows = [] - row_set = RowSet() - row_set.add_row_range( - RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2) - ) - row_set.add_row_key(self.ROW_KEY_3) +def test_table_mutate_rows_w_app_profile_id(): + app_profile_id = "profile-123" + _table_mutate_rows_helper(app_profile_id=app_profile_id) - with warnings.catch_warnings(record=True) as warned: - for row in table.yield_rows(row_set=row_set): - rows.append(row) - self.assertEqual(len(warned), 1) - self.assertIs(warned[0].category, DeprecationWarning) +def test_table_mutate_rows_w_retry(): + retry = mock.Mock() + _table_mutate_rows_helper(retry=retry) - self.assertEqual(rows[0].row_key, self.ROW_KEY_1) - self.assertEqual(rows[1].row_key, self.ROW_KEY_2) - self.assertEqual(rows[2].row_key, self.ROW_KEY_3) - def test_sample_row_keys(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) +def test_table_mutate_rows_w_timeout_arg(): + timeout = 123 + _table_mutate_rows_helper(timeout=timeout) - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - # Create response_iterator - response_iterator = object() # Just passed to a mock. +def test_table_mutate_rows_w_mutation_timeout_and_timeout_arg(): + mutation_timeout = 123 + timeout = 456 + _table_mutate_rows_helper(mutation_timeout=mutation_timeout, timeout=timeout) - # Patch the stub used by the API method. - client._table_data_client.sample_row_keys.side_effect = [[response_iterator]] - # Create expected_result. - expected_result = response_iterator +def test_table_read_rows(): + from google.cloud._testing import _Monkey + from google.cloud.bigtable.row_data import PartialRowsData + from google.cloud.bigtable import table as MUT + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS - # Perform the method and check the result. - result = table.sample_row_keys() - self.assertEqual(result[0], expected_result) + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + data_api = client._table_data_client = _make_data_api() + instance = client.instance(instance_id=INSTANCE_ID) + app_profile_id = "app-profile-id" + table = _make_table(TABLE_ID, instance, app_profile_id=app_profile_id) - def test_truncate(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + # Create request_pb + request_pb = object() # Returned by our mock. + retry = DEFAULT_RETRY_READ_ROWS + mock_created = [] - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + def mock_create_row_request(table_name, **kwargs): + mock_created.append((table_name, kwargs)) + return request_pb - expected_result = None # truncate() has no return value. - with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME): - result = table.truncate() + # Create expected_result. + expected_result = PartialRowsData( + client._table_data_client.transport.read_rows, request_pb, retry + ) - table_api.drop_row_range.assert_called_once_with( - request={"name": self.TABLE_NAME, "delete_all_data_from_table": True} - ) + # Perform the method and check the result. + start_key = b"start-key" + end_key = b"end-key" + filter_obj = object() + limit = 22 + with _Monkey(MUT, _create_row_request=mock_create_row_request): + result = table.read_rows( + start_key=start_key, + end_key=end_key, + filter_=filter_obj, + limit=limit, + retry=retry, + ) + + assert result.rows == expected_result.rows + assert result.retry == expected_result.retry + created_kwargs = { + "start_key": start_key, + "end_key": end_key, + "filter_": filter_obj, + "limit": limit, + "end_inclusive": False, + "app_profile_id": app_profile_id, + "row_set": None, + } + assert mock_created == [(table.name, created_kwargs)] + + data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) + + +def test_table_read_retry_rows(): + from google.api_core import retry + from google.cloud.bigtable.table import _create_row_request + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + data_api = client._table_data_client = _make_data_api() + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) + + # Create response_iterator + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_1, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) - self.assertEqual(result, expected_result) + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_2, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) - def test_truncate_w_timeout(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + response_1 = _ReadRowsResponseV2([chunk_1]) + response_2 = _ReadRowsResponseV2([chunk_2]) + response_failure_iterator_1 = _MockFailureIterator_1() + response_failure_iterator_2 = _MockFailureIterator_2([response_1]) + response_iterator = _MockReadRowsIterator(response_2) - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True + data_api.table_path.return_value = ( + f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}" + ) + + data_api.read_rows.side_effect = [ + response_failure_iterator_1, + response_failure_iterator_2, + response_iterator, + ] + + rows = [ + row + for row in table.read_rows( + start_key=ROW_KEY_1, end_key=ROW_KEY_2, retry=retry_read_rows ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + ] - expected_result = None # truncate() has no return value. + result = rows[1] + assert result.row_key == ROW_KEY_2 - timeout = 120 - result = table.truncate(timeout=timeout) + expected_request = _create_row_request( + table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2, + ) + data_api.read_rows.mock_calls = [expected_request] * 3 - self.assertEqual(result, expected_result) - def test_drop_by_prefix(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) +def test_table_yield_retry_rows(): + from google.cloud.bigtable.table import _create_row_request - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + # Create response_iterator + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_1, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) + + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_2, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) - expected_result = None # drop_by_prefix() has no return value. + response_1 = _ReadRowsResponseV2([chunk_1]) + response_2 = _ReadRowsResponseV2([chunk_2]) + response_failure_iterator_1 = _MockFailureIterator_1() + response_failure_iterator_2 = _MockFailureIterator_2([response_1]) + response_iterator = _MockReadRowsIterator(response_2) - row_key_prefix = "row-key-prefix" + data_api = client._table_data_client = _make_data_api() + data_api.table_path.return_value = ( + f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}" + ) + data_api.read_rows.side_effect = [ + response_failure_iterator_1, + response_failure_iterator_2, + response_iterator, + ] + + rows = [] + with warnings.catch_warnings(record=True) as warned: + for row in table.yield_rows(start_key=ROW_KEY_1, end_key=ROW_KEY_2): + rows.append(row) - result = table.drop_by_prefix(row_key_prefix=row_key_prefix) + assert len(warned) == 1 + assert warned[0].category is DeprecationWarning - self.assertEqual(result, expected_result) + result = rows[1] + assert result.row_key == ROW_KEY_2 - def test_drop_by_prefix_w_timeout(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + expected_request = _create_row_request( + table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2, + ) + data_api.read_rows.mock_calls = [expected_request] * 3 + + +def test_table_yield_rows_with_row_set(): + from google.cloud.bigtable.row_set import RowSet + from google.cloud.bigtable.row_set import RowRange + from google.cloud.bigtable.table import _create_row_request + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + # Create response_iterator + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_1, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + chunk_2 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_2, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) - expected_result = None # drop_by_prefix() has no return value. + chunk_3 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_3, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ) - row_key_prefix = "row-key-prefix" + response_1 = _ReadRowsResponseV2([chunk_1]) + response_2 = _ReadRowsResponseV2([chunk_2]) + response_3 = _ReadRowsResponseV2([chunk_3]) + response_iterator = _MockReadRowsIterator(response_1, response_2, response_3) - timeout = 120 - result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout) + data_api = client._table_data_client = _make_data_api() + data_api.table_path.return_value = ( + f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}" + ) + data_api.read_rows.side_effect = [response_iterator] - self.assertEqual(result, expected_result) + rows = [] + row_set = RowSet() + row_set.add_row_range(RowRange(start_key=ROW_KEY_1, end_key=ROW_KEY_2)) + row_set.add_row_key(ROW_KEY_3) - def test_mutations_batcher_factory(self): - flush_count = 100 - max_row_bytes = 1000 - table = self._make_one(self.TABLE_ID, None) - mutation_batcher = table.mutations_batcher( - flush_count=flush_count, max_row_bytes=max_row_bytes - ) + with warnings.catch_warnings(record=True) as warned: + for row in table.yield_rows(row_set=row_set): + rows.append(row) - self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID) - self.assertEqual(mutation_batcher.flush_count, flush_count) - self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes) + assert len(warned) == 1 + assert warned[0].category is DeprecationWarning - def test_get_iam_policy(self): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + assert rows[0].row_key == ROW_KEY_1 + assert rows[1].row_key == ROW_KEY_2 + assert rows[2].row_key == ROW_KEY_3 - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + expected_request = _create_row_request( + table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2, + ) + expected_request.rows.row_keys.append(ROW_KEY_3) + data_api.read_rows.assert_called_once_with(expected_request, timeout=61.0) - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] - iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - client._table_admin_client = table_api - table_api.get_iam_policy.return_value = iam_policy +def test_table_sample_row_keys(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + response_iterator = object() - result = table.get_iam_policy() + data_api = client._table_data_client = _make_data_api() + data_api.sample_row_keys.return_value = [response_iterator] - table_api.get_iam_policy.assert_called_once_with( - request={"resource": table.name} - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_set_iam_policy(self): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.iam.v1 import policy_pb2 - from google.cloud.bigtable.policy import Policy - from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE + result = table.sample_row_keys() - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - version = 1 - etag = b"etag_v1" - members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] - bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] - iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - client._table_admin_client = table_api - table_api.set_iam_policy.return_value = iam_policy_pb - - iam_policy = Policy(etag=etag, version=version) - iam_policy[BIGTABLE_ADMIN_ROLE] = [ - Policy.user("user1@test.com"), - Policy.service_account("service_acc1@test.com"), - ] + assert result[0] == response_iterator - result = table.set_iam_policy(iam_policy) - table_api.set_iam_policy.assert_called_once_with( - request={"resource": table.name, "policy": iam_policy_pb} - ) - self.assertEqual(result.version, version) - self.assertEqual(result.etag, etag) - admins = result.bigtable_admins - self.assertEqual(len(admins), len(members)) - for found, expected in zip(sorted(admins), sorted(members)): - self.assertEqual(found, expected) - - def test_test_iam_permissions(self): - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) - from google.iam.v1 import iam_policy_pb2 +def test_table_truncate(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + table_api = client._table_admin_client = _make_table_api() - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) + with mock.patch("google.cloud.bigtable.table.Table.name", new=TABLE_NAME): + result = table.truncate() + + assert result is None - permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] + table_api.drop_row_range.assert_called_once_with( + request={"name": TABLE_NAME, "delete_all_data_from_table": True} + ) - response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - table_api.test_iam_permissions.return_value = response - client._table_admin_client = table_api +def test_table_truncate_w_timeout(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + table_api = client._table_admin_client = _make_table_api() - result = table.test_iam_permissions(permissions) + timeout = 120 + result = table.truncate(timeout=timeout) - self.assertEqual(result, permissions) - table_api.test_iam_permissions.assert_called_once_with( - request={"resource": table.name, "permissions": permissions} - ) + assert result is None - def test_backup_factory_defaults(self): - from google.cloud.bigtable.backup import Backup - - instance = self._make_one(self.INSTANCE_ID, None) - table = self._make_one(self.TABLE_ID, instance) - backup = table.backup(self.BACKUP_ID) - - self.assertIsInstance(backup, Backup) - self.assertEqual(backup.backup_id, self.BACKUP_ID) - self.assertIs(backup._instance, instance) - self.assertIsNone(backup._cluster) - self.assertEqual(backup.table_id, self.TABLE_ID) - self.assertIsNone(backup._expire_time) - - self.assertIsNone(backup._parent) - self.assertIsNone(backup._source_table) - self.assertIsNone(backup._start_time) - self.assertIsNone(backup._end_time) - self.assertIsNone(backup._size_bytes) - self.assertIsNone(backup._state) - - def test_backup_factory_non_defaults(self): - import datetime - from google.cloud._helpers import UTC - from google.cloud.bigtable.backup import Backup - - instance = self._make_one(self.INSTANCE_ID, None) - table = self._make_one(self.TABLE_ID, instance) - timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) - backup = table.backup( - self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp, - ) + table_api.drop_row_range.assert_called_once_with( + request={"name": TABLE_NAME, "delete_all_data_from_table": True}, timeout=120, + ) - self.assertIsInstance(backup, Backup) - self.assertEqual(backup.backup_id, self.BACKUP_ID) - self.assertIs(backup._instance, instance) - - self.assertEqual(backup.backup_id, self.BACKUP_ID) - self.assertIs(backup._cluster, self.CLUSTER_ID) - self.assertEqual(backup.table_id, self.TABLE_ID) - self.assertEqual(backup._expire_time, timestamp) - self.assertIsNone(backup._start_time) - self.assertIsNone(backup._end_time) - self.assertIsNone(backup._size_bytes) - self.assertIsNone(backup._state) - - def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs): - from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( - BigtableInstanceAdminClient, - ) - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, - ) - from google.cloud.bigtable_admin_v2.types import ( - bigtable_table_admin, - Backup as backup_pb, - ) - from google.cloud.bigtable.backup import Backup - instance_api = mock.create_autospec(BigtableInstanceAdminClient) - table_api = mock.create_autospec(BigtableTableAdminClient) - client = self._make_client( - project=self.PROJECT_ID, credentials=_make_credentials(), admin=True - ) - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_one(self.TABLE_ID, instance) - - client._instance_admin_client = instance_api - client._table_admin_client = table_api - table._instance._client._instance_admin_client = instance_api - table._instance._client._table_admin_client = table_api - - parent = self.INSTANCE_NAME + "/clusters/cluster" - backups_pb = bigtable_table_admin.ListBackupsResponse( - backups=[ - backup_pb(name=parent + "/backups/op1"), - backup_pb(name=parent + "/backups/op2"), - backup_pb(name=parent + "/backups/op3"), - ] - ) +def test_table_drop_by_prefix(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + table_api = client._table_admin_client = _make_table_api() - table_api.list_backups.return_value = backups_pb - api = table._instance._client._table_admin_client.list_backups + row_key_prefix = b"row-key-prefix" - backups_filter = "source_table:{}".format(self.TABLE_NAME) - if filter_: - backups_filter = "({}) AND ({})".format(backups_filter, filter_) + result = table.drop_by_prefix(row_key_prefix=row_key_prefix) - backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs) + assert result is None - for backup in backups: - self.assertIsInstance(backup, Backup) + table_api.drop_row_range.assert_called_once_with( + request={"name": TABLE_NAME, "row_key_prefix": row_key_prefix}, + ) - if not cluster_id: - cluster_id = "-" - parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id) - order_by = None - page_size = 0 - if "order_by" in kwargs: - order_by = kwargs["order_by"] +def test_table_drop_by_prefix_w_timeout(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + table_api = client._table_admin_client = _make_table_api() - if "page_size" in kwargs: - page_size = kwargs["page_size"] + row_key_prefix = b"row-key-prefix" - api.assert_called_once_with( - request={ - "parent": parent, - "filter": backups_filter, - "order_by": order_by, - "page_size": page_size, - } - ) + timeout = 120 + result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout) - def test_list_backups_defaults(self): - self._list_backups_helper() + assert result is None - def test_list_backups_w_options(self): - self._list_backups_helper( - cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10 - ) + table_api.drop_row_range.assert_called_once_with( + request={"name": TABLE_NAME, "row_key_prefix": row_key_prefix}, timeout=120, + ) - def _restore_helper(self, backup_name=None): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient - from google.cloud.bigtable.instance import Instance - op_future = object() - credentials = _make_credentials() - client = self._make_client( - project=self.PROJECT_ID, credentials=credentials, admin=True - ) +def test_table_mutations_batcher_factory(): + flush_count = 100 + max_row_bytes = 1000 + table = _make_table(TABLE_ID, None) + mutation_batcher = table.mutations_batcher( + flush_count=flush_count, max_row_bytes=max_row_bytes + ) - instance = Instance(self.INSTANCE_ID, client=client) - table = self._make_one(self.TABLE_ID, instance) + assert mutation_batcher.table.table_id == TABLE_ID + assert mutation_batcher.flush_count == flush_count + assert mutation_batcher.max_row_bytes == max_row_bytes - api = client._table_admin_client = mock.create_autospec( - BigtableTableAdminClient - ) - api.restore_table.return_value = op_future - table._instance._client._table_admin_client = api +def test_table_get_iam_policy(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - if backup_name: - future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME) - else: - future = table.restore(self.TABLE_ID, self.CLUSTER_ID, self.BACKUP_ID) - self.assertIs(future, op_future) - - api.restore_table.assert_called_once_with( - request={ - "parent": self.INSTANCE_NAME, - "table_id": self.TABLE_ID, - "backup": self.BACKUP_NAME, - } - ) + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] + iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) + + table_api = client._table_admin_client = _make_table_api() + table_api.get_iam_policy.return_value = iam_policy - def test_restore_table_w_backup_id(self): - self._restore_helper() + result = table.get_iam_policy() - def test_restore_table_w_backup_name(self): - self._restore_helper(backup_name=self.BACKUP_NAME) + assert result.version == version + assert result.etag == etag + admins = result.bigtable_admins + assert len(admins) == len(members) + for found, expected in zip(sorted(admins), sorted(members)): + assert found == expected -class Test__RetryableMutateRowsWorker(unittest.TestCase): - from grpc import StatusCode + table_api.get_iam_policy.assert_called_once_with(request={"resource": table.name}) - PROJECT_ID = "project-id" - INSTANCE_ID = "instance-id" - INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID - TABLE_ID = "table-id" - # RPC Status Codes - SUCCESS = StatusCode.OK.value[0] - RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0] - RETRYABLE_2 = StatusCode.ABORTED.value[0] - RETRYABLE_3 = StatusCode.UNAVAILABLE.value[0] - RETRYABLES = (RETRYABLE_1, RETRYABLE_2, RETRYABLE_3) - NON_RETRYABLE = StatusCode.CANCELLED.value[0] +def test_table_set_iam_policy(): + from google.iam.v1 import policy_pb2 + from google.cloud.bigtable.policy import Policy + from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE - @staticmethod - def _get_target_class_for_worker(): - from google.cloud.bigtable.table import _RetryableMutateRowsWorker + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) - return _RetryableMutateRowsWorker + version = 1 + etag = b"etag_v1" + members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"] + bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] + iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - def _make_worker(self, *args, **kwargs): - return self._get_target_class_for_worker()(*args, **kwargs) + table_api = client._table_admin_client = _make_table_api() + table_api.set_iam_policy.return_value = iam_policy_pb - @staticmethod - def _get_target_class_for_table(): - from google.cloud.bigtable.table import Table + iam_policy = Policy(etag=etag, version=version) + iam_policy[BIGTABLE_ADMIN_ROLE] = [ + Policy.user("user1@test.com"), + Policy.service_account("service_acc1@test.com"), + ] - return Table + result = table.set_iam_policy(iam_policy) - def _make_table(self, *args, **kwargs): - return self._get_target_class_for_table()(*args, **kwargs) + assert result.version == version + assert result.etag == etag + admins = result.bigtable_admins + assert len(admins) == len(members) - @staticmethod - def _get_target_client_class(): - from google.cloud.bigtable.client import Client + for found, expected in zip(sorted(admins), sorted(members)): + assert found == expected + + table_api.set_iam_policy.assert_called_once_with( + request={"resource": table.name, "policy": iam_policy_pb} + ) - return Client - def _make_client(self, *args, **kwargs): - return self._get_target_client_class()(*args, **kwargs) +def test_table_test_iam_permissions(): + from google.iam.v1 import iam_policy_pb2 - def _make_responses_statuses(self, codes): - from google.rpc.status_pb2 import Status + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) - response = [Status(code=code) for code in codes] - return response + permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"] - def _make_responses(self, codes): - from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse - from google.rpc.status_pb2 import Status + response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - entries = [ - MutateRowsResponse.Entry(index=i, status=Status(code=codes[i])) - for i in range(len(codes)) + table_api = client._table_admin_client = _make_table_api() + table_api.test_iam_permissions.return_value = response + + result = table.test_iam_permissions(permissions) + + assert result == permissions + + table_api.test_iam_permissions.assert_called_once_with( + request={"resource": table.name, "permissions": permissions} + ) + + +def test_table_backup_factory_defaults(): + from google.cloud.bigtable.backup import Backup + + instance = _make_table(INSTANCE_ID, None) + table = _make_table(TABLE_ID, instance) + backup = table.backup(BACKUP_ID) + + assert isinstance(backup, Backup) + assert backup.backup_id == BACKUP_ID + assert backup._instance is instance + assert backup._cluster is None + assert backup.table_id == TABLE_ID + assert backup._expire_time is None + + assert backup._parent is None + assert backup._source_table is None + assert backup._start_time is None + assert backup._end_time is None + assert backup._size_bytes is None + assert backup._state is None + + +def test_table_backup_factory_non_defaults(): + import datetime + from google.cloud._helpers import UTC + from google.cloud.bigtable.backup import Backup + from google.cloud.bigtable.instance import Instance + + instance = Instance(INSTANCE_ID, None) + table = _make_table(TABLE_ID, instance) + timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) + backup = table.backup(BACKUP_ID, cluster_id=CLUSTER_ID, expire_time=timestamp,) + + assert isinstance(backup, Backup) + assert backup.backup_id == BACKUP_ID + assert backup._instance is instance + + assert backup.backup_id == BACKUP_ID + assert backup._cluster is CLUSTER_ID + assert backup.table_id == TABLE_ID + assert backup._expire_time == timestamp + assert backup._start_time is None + assert backup._end_time is None + assert backup._size_bytes is None + assert backup._state is None + + +def _table_list_backups_helper(cluster_id=None, filter_=None, **kwargs): + from google.cloud.bigtable_admin_v2.types import ( + Backup as backup_pb, + bigtable_table_admin, + ) + from google.cloud.bigtable.backup import Backup + + client = _make_client( + project=PROJECT_ID, credentials=_make_credentials(), admin=True + ) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + parent = INSTANCE_NAME + "/clusters/cluster" + backups_pb = bigtable_table_admin.ListBackupsResponse( + backups=[ + backup_pb(name=parent + "/backups/op1"), + backup_pb(name=parent + "/backups/op2"), + backup_pb(name=parent + "/backups/op3"), ] - return MutateRowsResponse(entries=entries) + ) - def test_callable_empty_rows(self): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + table_api = client._table_admin_client = _make_table_api() + table_api.list_backups.return_value = backups_pb - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) + backups_filter = "source_table:{}".format(TABLE_NAME) + if filter_: + backups_filter = "({}) AND ({})".format(backups_filter, filter_) - worker = self._make_worker(client, table.name, []) - statuses = worker() + backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs) - self.assertEqual(len(statuses), 0) + for backup in backups: + assert isinstance(backup, Backup) - def test_callable_no_retry_strategy(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) + if not cluster_id: + cluster_id = "-" + parent = "{}/clusters/{}".format(INSTANCE_NAME, cluster_id) - # Setup: - # - Mutate 3 rows. - # Action: - # - Attempt to mutate the rows w/o any retry strategy. - # Expectation: - # - Since no retry, should return statuses as they come back. - # - Even if there are retryable errors, no retry attempt is made. - # - State of responses_statuses should be - # [success, retryable, non-retryable] - - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) + order_by = None + page_size = 0 + if "order_by" in kwargs: + order_by = kwargs["order_by"] - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") + if "page_size" in kwargs: + page_size = kwargs["page_size"] - response_codes = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - response = self._make_responses(response_codes) - data_api.mutate_rows = mock.MagicMock(return_value=[response]) + table_api.list_backups.assert_called_once_with( + request={ + "parent": parent, + "filter": backups_filter, + "order_by": order_by, + "page_size": page_size, + } + ) - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api - table._instance._client._table_data_client.mutate_rows.return_value = [response] +def test_table_list_backups_defaults(): + _table_list_backups_helper() - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - statuses = worker(retry=None) - result = [status.code for status in statuses] - self.assertEqual(result, response_codes) +def test_table_list_backups_w_options(): + _table_list_backups_helper( + cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10 + ) - data_api.mutate_rows.assert_called_once() - def test_callable_retry(self): - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import DEFAULT_RETRY - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - client as bigtable_table_admin, - ) +def _table_restore_helper(backup_name=None): + from google.cloud.bigtable.instance import Instance - # Setup: - # - Mutate 3 rows. - # Action: - # - Initial attempt will mutate all 3 rows. - # Expectation: - # - First attempt will result in one retryable error. - # - Second attempt will result in success for the retry-ed row. - # - Check MutateRows is called twice. - # - State of responses_statuses should be - # [success, success, non-retryable] - - data_api = mock.create_autospec(BigtableClient) - table_api = mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) - - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - client._table_admin_client = table_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - row_1 = DirectRow(row_key=b"row_key", table=table) - row_1.set_cell("cf", b"col", b"value1") - row_2 = DirectRow(row_key=b"row_key_2", table=table) - row_2.set_cell("cf", b"col", b"value2") - row_3 = DirectRow(row_key=b"row_key_3", table=table) - row_3.set_cell("cf", b"col", b"value3") - - response_1 = self._make_responses( - [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] - ) - response_2 = self._make_responses([self.SUCCESS]) + op_future = object() + credentials = _make_credentials() + client = _make_client(project=PROJECT_ID, credentials=credentials, admin=True) - # Patch the stub used by the API method. - client._table_data_client.mutate_rows.side_effect = [[response_1], [response_2]] - table._instance._client._table_data_client = data_api - table._instance._client._table_admin_client = table_api + instance = Instance(INSTANCE_ID, client=client) + table = _make_table(TABLE_ID, instance) - retry = DEFAULT_RETRY.with_delay(initial=0.1) - worker = self._make_worker(client, table.name, [row_1, row_2, row_3]) - statuses = worker(retry=retry) + table_api = client._table_admin_client = _make_table_api() + table_api.restore_table.return_value = op_future - result = [status.code for status in statuses] - expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE] + if backup_name: + future = table.restore(TABLE_ID, backup_name=BACKUP_NAME) + else: + future = table.restore(TABLE_ID, CLUSTER_ID, BACKUP_ID) - self.assertEqual(client._table_data_client.mutate_rows.call_count, 2) - self.assertEqual(result, expected_result) + assert future is op_future - def _do_mutate_retryable_rows_helper( - self, - row_cells, - responses, - prior_statuses=None, - expected_result=None, - raising_retry=False, - retryable_error=False, - timeout=None, - ): - from google.api_core.exceptions import ServiceUnavailable - from google.cloud.bigtable.row import DirectRow - from google.cloud.bigtable.table import _BigtableRetryableError - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 - - # Setup: - # - Mutate 2 rows. - # Action: - # - Initial attempt will mutate all 2 rows. - # Expectation: - # - Expect [success, non-retryable] - - data_api = mock.create_autospec(BigtableClient) - - credentials = _make_credentials() - client = self._make_client( - project="project-id", credentials=credentials, admin=True - ) - client._table_data_client = data_api - instance = client.instance(instance_id=self.INSTANCE_ID) - table = self._make_table(self.TABLE_ID, instance) - - rows = [] - for row_key, cell_data in row_cells: - row = DirectRow(row_key=row_key, table=table) - row.set_cell(*cell_data) - rows.append(row) + expected_request = { + "parent": INSTANCE_NAME, + "table_id": TABLE_ID, + "backup": BACKUP_NAME, + } + table_api.restore_table.assert_called_once_with(request=expected_request) - response = self._make_responses(responses) - if retryable_error: - data_api.mutate_rows.side_effect = ServiceUnavailable("testing") - else: - data_api.mutate_rows.side_effect = [[response]] +def test_table_restore_table_w_backup_id(): + _table_restore_helper() - worker = self._make_worker(client, table.name, rows=rows) - if prior_statuses is not None: - assert len(prior_statuses) == len(rows) - worker.responses_statuses = self._make_responses_statuses(prior_statuses) - expected_entries = [] - for row, prior_status in zip(rows, worker.responses_statuses): +def test_table_restore_table_w_backup_name(): + _table_restore_helper(backup_name=BACKUP_NAME) - if prior_status is None or prior_status.code in self.RETRYABLES: - mutations = row._get_mutations().copy() # row clears on success - entry = data_messages_v2_pb2.MutateRowsRequest.Entry( - row_key=row.row_key, mutations=mutations, - ) - expected_entries.append(entry) - expected_kwargs = {} - if timeout is not None: - worker.timeout = timeout - expected_kwargs["timeout"] = mock.ANY +def _make_worker(*args, **kwargs): + from google.cloud.bigtable.table import _RetryableMutateRowsWorker - if retryable_error or raising_retry: - with self.assertRaises(_BigtableRetryableError): - worker._do_mutate_retryable_rows() - statuses = worker.responses_statuses - else: - statuses = worker._do_mutate_retryable_rows() + return _RetryableMutateRowsWorker(*args, **kwargs) - if not retryable_error: - result = [status.code for status in statuses] - if expected_result is None: - expected_result = responses +def _make_responses_statuses(codes): + from google.rpc.status_pb2 import Status - self.assertEqual(result, expected_result) + response = [Status(code=code) for code in codes] + return response - if len(responses) == 0 and not retryable_error: - data_api.mutate_rows.assert_not_called() - else: - data_api.mutate_rows.assert_called_once_with( - table_name=table.name, - entries=expected_entries, - app_profile_id=None, - retry=None, - **expected_kwargs, + +def _make_responses(codes): + from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse + from google.rpc.status_pb2 import Status + + entries = [ + MutateRowsResponse.Entry(index=i, status=Status(code=codes[i])) + for i in range(len(codes)) + ] + return MutateRowsResponse(entries=entries) + + +def test_rmrw_callable_empty_rows(): + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + data_api = client._table_data_client = _make_data_api() + data_api.mutate_rows.return_value = [] + data_api.table_path.return_value = ( + f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}" + ) + + worker = _make_worker(client, table.name, []) + statuses = worker() + + assert len(statuses) == 0 + + +def test_rmrw_callable_no_retry_strategy(): + from google.cloud.bigtable.row import DirectRow + + # Setup: + # - Mutate 3 rows. + # Action: + # - Attempt to mutate the rows w/o any retry strategy. + # Expectation: + # - Since no retry, should return statuses as they come back. + # - Even if there are retryable errors, no retry attempt is made. + # - State of responses_statuses should be + # [success, retryable, non-retryable] + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") + row_3 = DirectRow(row_key=b"row_key_3", table=table) + row_3.set_cell("cf", b"col", b"value3") + + response_codes = [SUCCESS, RETRYABLE_1, NON_RETRYABLE] + response = _make_responses(response_codes) + + data_api = client._table_data_client = _make_data_api() + data_api.mutate_rows.return_value = [response] + data_api.table_path.return_value = ( + f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}" + ) + worker = _make_worker(client, table.name, [row_1, row_2, row_3]) + + statuses = worker(retry=None) + + result = [status.code for status in statuses] + assert result == response_codes + + data_api.mutate_rows.assert_called_once() + + +def test_rmrw_callable_retry(): + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import DEFAULT_RETRY + + # Setup: + # - Mutate 3 rows. + # Action: + # - Initial attempt will mutate all 3 rows. + # Expectation: + # - First attempt will result in one retryable error. + # - Second attempt will result in success for the retry-ed row. + # - Check MutateRows is called twice. + # - State of responses_statuses should be + # [success, success, non-retryable] + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + row_1 = DirectRow(row_key=b"row_key", table=table) + row_1.set_cell("cf", b"col", b"value1") + row_2 = DirectRow(row_key=b"row_key_2", table=table) + row_2.set_cell("cf", b"col", b"value2") + row_3 = DirectRow(row_key=b"row_key_3", table=table) + row_3.set_cell("cf", b"col", b"value3") + + response_1 = _make_responses([SUCCESS, RETRYABLE_1, NON_RETRYABLE]) + response_2 = _make_responses([SUCCESS]) + data_api = client._table_data_client = _make_data_api() + data_api.mutate_rows.side_effect = [[response_1], [response_2]] + data_api.table_path.return_value = ( + f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}" + ) + worker = _make_worker(client, table.name, [row_1, row_2, row_3]) + retry = DEFAULT_RETRY.with_delay(initial=0.1) + + statuses = worker(retry=retry) + + result = [status.code for status in statuses] + + assert result == [SUCCESS, SUCCESS, NON_RETRYABLE] + + assert client._table_data_client.mutate_rows.call_count == 2 + + +def _do_mutate_retryable_rows_helper( + row_cells, + responses, + prior_statuses=None, + expected_result=None, + raising_retry=False, + retryable_error=False, + timeout=None, +): + from google.api_core.exceptions import ServiceUnavailable + from google.cloud.bigtable.row import DirectRow + from google.cloud.bigtable.table import _BigtableRetryableError + from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 + + # Setup: + # - Mutate 2 rows. + # Action: + # - Initial attempt will mutate all 2 rows. + # Expectation: + # - Expect [success, non-retryable] + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + rows = [] + for row_key, cell_data in row_cells: + row = DirectRow(row_key=row_key, table=table) + row.set_cell(*cell_data) + rows.append(row) + + response = _make_responses(responses) + + data_api = client._table_data_client = _make_data_api() + if retryable_error: + data_api.mutate_rows.side_effect = ServiceUnavailable("testing") + else: + data_api.mutate_rows.return_value = [response] + + worker = _make_worker(client, table.name, rows=rows) + + if prior_statuses is not None: + assert len(prior_statuses) == len(rows) + worker.responses_statuses = _make_responses_statuses(prior_statuses) + + expected_entries = [] + for row, prior_status in zip(rows, worker.responses_statuses): + + if prior_status is None or prior_status.code in RETRYABLES: + mutations = row._get_mutations().copy() # row clears on success + entry = data_messages_v2_pb2.MutateRowsRequest.Entry( + row_key=row.row_key, mutations=mutations, ) - if timeout is not None: - called = data_api.mutate_rows.mock_calls[0] - self.assertEqual(called.kwargs["timeout"]._deadline, timeout) - - def test_do_mutate_retryable_rows_empty_rows(self): - # - # Setup: - # - No mutated rows. - # Action: - # - No API call made. - # Expectation: - # - No change. - # - row_cells = [] - responses = [] - - self._do_mutate_retryable_rows_helper(row_cells, responses) - - def test_do_mutate_retryable_rows_w_timeout(self): - # - # Setup: - # - Mutate 2 rows. - # Action: - # - Initial attempt will mutate all 2 rows. - # Expectation: - # - No retryable error codes, so don't expect a raise. - # - State of responses_statuses should be [success, non-retryable]. - # - row_cells = [ - (b"row_key_1", ("cf", b"col", b"value1")), - (b"row_key_2", ("cf", b"col", b"value2")), - ] + expected_entries.append(entry) - responses = [self.SUCCESS, self.NON_RETRYABLE] + expected_kwargs = {} + if timeout is not None: + worker.timeout = timeout + expected_kwargs["timeout"] = mock.ANY - timeout = 5 # seconds + if retryable_error or raising_retry: + with pytest.raises(_BigtableRetryableError): + worker._do_mutate_retryable_rows() + statuses = worker.responses_statuses + else: + statuses = worker._do_mutate_retryable_rows() - self._do_mutate_retryable_rows_helper( - row_cells, responses, timeout=timeout, - ) + if not retryable_error: + result = [status.code for status in statuses] - def test_do_mutate_retryable_rows_w_retryable_error(self): - # - # Setup: - # - Mutate 2 rows. - # Action: - # - Initial attempt will mutate all 2 rows. - # Expectation: - # - No retryable error codes, so don't expect a raise. - # - State of responses_statuses should be [success, non-retryable]. - # - row_cells = [ - (b"row_key_1", ("cf", b"col", b"value1")), - (b"row_key_2", ("cf", b"col", b"value2")), - ] + if expected_result is None: + expected_result = responses - responses = () + assert result == expected_result - self._do_mutate_retryable_rows_helper( - row_cells, responses, retryable_error=True, + if len(responses) == 0 and not retryable_error: + data_api.mutate_rows.assert_not_called() + else: + data_api.mutate_rows.assert_called_once_with( + table_name=table.name, + entries=expected_entries, + app_profile_id=None, + retry=None, + **expected_kwargs, ) + if timeout is not None: + called = data_api.mutate_rows.mock_calls[0] + assert called.kwargs["timeout"]._deadline == timeout + + +def test_rmrw_do_mutate_retryable_rows_empty_rows(): + # + # Setup: + # - No mutated rows. + # Action: + # - No API call made. + # Expectation: + # - No change. + # + row_cells = [] + responses = [] + + _do_mutate_retryable_rows_helper(row_cells, responses) + + +def test_rmrw_do_mutate_retryable_rows_w_timeout(): + # + # Setup: + # - Mutate 2 rows. + # Action: + # - Initial attempt will mutate all 2 rows. + # Expectation: + # - No retryable error codes, so don't expect a raise. + # - State of responses_statuses should be [success, non-retryable]. + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] + + responses = [SUCCESS, NON_RETRYABLE] + + timeout = 5 # seconds + + _do_mutate_retryable_rows_helper( + row_cells, responses, timeout=timeout, + ) - def test_do_mutate_retryable_rows_retry(self): - # - # Setup: - # - Mutate 3 rows. - # Action: - # - Initial attempt will mutate all 3 rows. - # Expectation: - # - Second row returns retryable error code, so expect a raise. - # - State of responses_statuses should be - # [success, retryable, non-retryable] - # - row_cells = [ - (b"row_key_1", ("cf", b"col", b"value1")), - (b"row_key_2", ("cf", b"col", b"value2")), - (b"row_key_3", ("cf", b"col", b"value3")), - ] - responses = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE] +def test_rmrw_do_mutate_retryable_rows_w_retryable_error(): + # + # Setup: + # - Mutate 2 rows. + # Action: + # - Initial attempt will mutate all 2 rows. + # Expectation: + # - No retryable error codes, so don't expect a raise. + # - State of responses_statuses should be [success, non-retryable]. + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] + + responses = () + + _do_mutate_retryable_rows_helper( + row_cells, responses, retryable_error=True, + ) - self._do_mutate_retryable_rows_helper( - row_cells, responses, raising_retry=True, - ) - def test_do_mutate_retryable_rows_second_retry(self): - # - # Setup: - # - Mutate 4 rows. - # - First try results: - # [success, retryable, non-retryable, retryable] - # Action: - # - Second try should re-attempt the 'retryable' rows. - # Expectation: - # - After second try: - # [success, success, non-retryable, retryable] - # - One of the rows tried second time returns retryable error code, - # so expect a raise. - # - Exception contains response whose index should be '3' even though - # only two rows were retried. - # - row_cells = [ - (b"row_key_1", ("cf", b"col", b"value1")), - (b"row_key_2", ("cf", b"col", b"value2")), - (b"row_key_3", ("cf", b"col", b"value3")), - (b"row_key_4", ("cf", b"col", b"value4")), - ] +def test_rmrw_do_mutate_retryable_rows_retry(): + # + # Setup: + # - Mutate 3 rows. + # Action: + # - Initial attempt will mutate all 3 rows. + # Expectation: + # - Second row returns retryable error code, so expect a raise. + # - State of responses_statuses should be + # [success, retryable, non-retryable] + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + (b"row_key_3", ("cf", b"col", b"value3")), + ] + + responses = [SUCCESS, RETRYABLE_1, NON_RETRYABLE] + + _do_mutate_retryable_rows_helper( + row_cells, responses, raising_retry=True, + ) - responses = [self.SUCCESS, self.RETRYABLE_1] - prior_statuses = [ - self.SUCCESS, - self.RETRYABLE_1, - self.NON_RETRYABLE, - self.RETRYABLE_2, - ] +def test_rmrw_do_mutate_retryable_rows_second_retry(): + # + # Setup: + # - Mutate 4 rows. + # - First try results: + # [success, retryable, non-retryable, retryable] + # Action: + # - Second try should re-attempt the 'retryable' rows. + # Expectation: + # - After second try: + # [success, success, non-retryable, retryable] + # - One of the rows tried second time returns retryable error code, + # so expect a raise. + # - Exception contains response whose index should be '3' even though + # only two rows were retried. + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + (b"row_key_3", ("cf", b"col", b"value3")), + (b"row_key_4", ("cf", b"col", b"value4")), + ] + + responses = [SUCCESS, RETRYABLE_1] + + prior_statuses = [ + SUCCESS, + RETRYABLE_1, + NON_RETRYABLE, + RETRYABLE_2, + ] + + expected_result = [ + SUCCESS, + SUCCESS, + NON_RETRYABLE, + RETRYABLE_1, + ] + + _do_mutate_retryable_rows_helper( + row_cells, + responses, + prior_statuses=prior_statuses, + expected_result=expected_result, + raising_retry=True, + ) - expected_result = [ - self.SUCCESS, - self.SUCCESS, - self.NON_RETRYABLE, - self.RETRYABLE_1, - ] - self._do_mutate_retryable_rows_helper( - row_cells, - responses, - prior_statuses=prior_statuses, - expected_result=expected_result, - raising_retry=True, - ) +def test_rmrw_do_mutate_retryable_rows_second_try(): + # + # Setup: + # - Mutate 4 rows. + # - First try results: + # [success, retryable, non-retryable, retryable] + # Action: + # - Second try should re-attempt the 'retryable' rows. + # Expectation: + # - After second try: + # [success, non-retryable, non-retryable, success] + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + (b"row_key_3", ("cf", b"col", b"value3")), + (b"row_key_4", ("cf", b"col", b"value4")), + ] + + responses = [NON_RETRYABLE, SUCCESS] + + prior_statuses = [ + SUCCESS, + RETRYABLE_1, + NON_RETRYABLE, + RETRYABLE_2, + ] + + expected_result = [ + SUCCESS, + NON_RETRYABLE, + NON_RETRYABLE, + SUCCESS, + ] + + _do_mutate_retryable_rows_helper( + row_cells, + responses, + prior_statuses=prior_statuses, + expected_result=expected_result, + ) - def test_do_mutate_retryable_rows_second_try(self): - # - # Setup: - # - Mutate 4 rows. - # - First try results: - # [success, retryable, non-retryable, retryable] - # Action: - # - Second try should re-attempt the 'retryable' rows. - # Expectation: - # - After second try: - # [success, non-retryable, non-retryable, success] - # - row_cells = [ - (b"row_key_1", ("cf", b"col", b"value1")), - (b"row_key_2", ("cf", b"col", b"value2")), - (b"row_key_3", ("cf", b"col", b"value3")), - (b"row_key_4", ("cf", b"col", b"value4")), - ] - responses = [self.NON_RETRYABLE, self.SUCCESS] +def test_rmrw_do_mutate_retryable_rows_second_try_no_retryable(): + # + # Setup: + # - Mutate 2 rows. + # - First try results: [success, non-retryable] + # Action: + # - Second try has no row to retry. + # Expectation: + # - After second try: [success, non-retryable] + # + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] + + responses = [] # no calls will be made + + prior_statuses = [ + SUCCESS, + NON_RETRYABLE, + ] + + expected_result = [ + SUCCESS, + NON_RETRYABLE, + ] + + _do_mutate_retryable_rows_helper( + row_cells, + responses, + prior_statuses=prior_statuses, + expected_result=expected_result, + ) - prior_statuses = [ - self.SUCCESS, - self.RETRYABLE_1, - self.NON_RETRYABLE, - self.RETRYABLE_2, - ] - expected_result = [ - self.SUCCESS, - self.NON_RETRYABLE, - self.NON_RETRYABLE, - self.SUCCESS, - ] +def test_rmrw_do_mutate_retryable_rows_mismatch_num_responses(): + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] - self._do_mutate_retryable_rows_helper( - row_cells, - responses, - prior_statuses=prior_statuses, - expected_result=expected_result, - ) + responses = [SUCCESS] - def test_do_mutate_retryable_rows_second_try_no_retryable(self): - # - # Setup: - # - Mutate 2 rows. - # - First try results: [success, non-retryable] - # Action: - # - Second try has no row to retry. - # Expectation: - # - After second try: [success, non-retryable] - # - row_cells = [ - (b"row_key_1", ("cf", b"col", b"value1")), - (b"row_key_2", ("cf", b"col", b"value2")), - ] + with pytest.raises(RuntimeError): + _do_mutate_retryable_rows_helper(row_cells, responses) - responses = [] # no calls will be made - prior_statuses = [ - self.SUCCESS, - self.NON_RETRYABLE, - ] +def test__create_row_request_table_name_only(): + from google.cloud.bigtable.table import _create_row_request - expected_result = [ - self.SUCCESS, - self.NON_RETRYABLE, - ] + table_name = "table_name" + result = _create_row_request(table_name) + expected_result = _ReadRowsRequestPB(table_name=table_name) + assert result == expected_result - self._do_mutate_retryable_rows_helper( - row_cells, - responses, - prior_statuses=prior_statuses, - expected_result=expected_result, - ) - def test_do_mutate_retryable_rows_mismatch_num_responses(self): - row_cells = [ - (b"row_key_1", ("cf", b"col", b"value1")), - (b"row_key_2", ("cf", b"col", b"value2")), - ] +def test__create_row_request_row_range_row_set_conflict(): + from google.cloud.bigtable.table import _create_row_request - responses = [self.SUCCESS] + with pytest.raises(ValueError): + _create_row_request(None, end_key=object(), row_set=object()) - with self.assertRaises(RuntimeError): - self._do_mutate_retryable_rows_helper(row_cells, responses) +def test__create_row_request_row_range_start_key(): + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable_v2.types import RowRange -class Test__create_row_request(unittest.TestCase): - def _call_fut( - self, - table_name, - start_key=None, - end_key=None, - filter_=None, - limit=None, - end_inclusive=False, - app_profile_id=None, - row_set=None, - ): + table_name = "table_name" + start_key = b"start_key" + result = _create_row_request(table_name, start_key=start_key) + expected_result = _ReadRowsRequestPB(table_name=table_name) + row_range = RowRange(start_key_closed=start_key) + expected_result.rows.row_ranges.append(row_range) + assert result == expected_result - from google.cloud.bigtable.table import _create_row_request - return _create_row_request( - table_name, - start_key=start_key, - end_key=end_key, - filter_=filter_, - limit=limit, - end_inclusive=end_inclusive, - app_profile_id=app_profile_id, - row_set=row_set, - ) +def test__create_row_request_row_range_end_key(): + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable_v2.types import RowRange - def test_table_name_only(self): - table_name = "table_name" - result = self._call_fut(table_name) - expected_result = _ReadRowsRequestPB(table_name=table_name) - self.assertEqual(result, expected_result) - - def test_row_range_row_set_conflict(self): - with self.assertRaises(ValueError): - self._call_fut(None, end_key=object(), row_set=object()) - - def test_row_range_start_key(self): - from google.cloud.bigtable_v2.types import RowRange - - table_name = "table_name" - start_key = b"start_key" - result = self._call_fut(table_name, start_key=start_key) - expected_result = _ReadRowsRequestPB(table_name=table_name) - row_range = RowRange(start_key_closed=start_key) - expected_result.rows.row_ranges.append(row_range) - self.assertEqual(result, expected_result) - - def test_row_range_end_key(self): - from google.cloud.bigtable_v2.types import RowRange - - table_name = "table_name" - end_key = b"end_key" - result = self._call_fut(table_name, end_key=end_key) - expected_result = _ReadRowsRequestPB(table_name=table_name) - row_range = RowRange(end_key_open=end_key) - expected_result.rows.row_ranges.append(row_range) - self.assertEqual(result, expected_result) - - def test_row_range_both_keys(self): - from google.cloud.bigtable_v2.types import RowRange - - table_name = "table_name" - start_key = b"start_key" - end_key = b"end_key" - result = self._call_fut(table_name, start_key=start_key, end_key=end_key) - row_range = RowRange(start_key_closed=start_key, end_key_open=end_key) - expected_result = _ReadRowsRequestPB(table_name=table_name) - expected_result.rows.row_ranges.append(row_range) - self.assertEqual(result, expected_result) - - def test_row_range_both_keys_inclusive(self): - from google.cloud.bigtable_v2.types import RowRange - - table_name = "table_name" - start_key = b"start_key" - end_key = b"end_key" - result = self._call_fut( - table_name, start_key=start_key, end_key=end_key, end_inclusive=True - ) - expected_result = _ReadRowsRequestPB(table_name=table_name) - row_range = RowRange(start_key_closed=start_key, end_key_closed=end_key) - expected_result.rows.row_ranges.append(row_range) - self.assertEqual(result, expected_result) - - def test_with_filter(self): - from google.cloud.bigtable.row_filters import RowSampleFilter - - table_name = "table_name" - row_filter = RowSampleFilter(0.33) - result = self._call_fut(table_name, filter_=row_filter) - expected_result = _ReadRowsRequestPB( - table_name=table_name, filter=row_filter.to_pb() - ) - self.assertEqual(result, expected_result) - - def test_with_limit(self): - table_name = "table_name" - limit = 1337 - result = self._call_fut(table_name, limit=limit) - expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit) - self.assertEqual(result, expected_result) - - def test_with_row_set(self): - from google.cloud.bigtable.row_set import RowSet - - table_name = "table_name" - row_set = RowSet() - result = self._call_fut(table_name, row_set=row_set) - expected_result = _ReadRowsRequestPB(table_name=table_name) - self.assertEqual(result, expected_result) - - def test_with_app_profile_id(self): - table_name = "table_name" - limit = 1337 - app_profile_id = "app-profile-id" - result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id) - expected_result = _ReadRowsRequestPB( - table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id - ) - self.assertEqual(result, expected_result) + table_name = "table_name" + end_key = b"end_key" + result = _create_row_request(table_name, end_key=end_key) + expected_result = _ReadRowsRequestPB(table_name=table_name) + row_range = RowRange(end_key_open=end_key) + expected_result.rows.row_ranges.append(row_range) + assert result == expected_result + + +def test__create_row_request_row_range_both_keys(): + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable_v2.types import RowRange + + table_name = "table_name" + start_key = b"start_key" + end_key = b"end_key" + result = _create_row_request(table_name, start_key=start_key, end_key=end_key) + row_range = RowRange(start_key_closed=start_key, end_key_open=end_key) + expected_result = _ReadRowsRequestPB(table_name=table_name) + expected_result.rows.row_ranges.append(row_range) + assert result == expected_result + + +def test__create_row_request_row_range_both_keys_inclusive(): + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable_v2.types import RowRange + + table_name = "table_name" + start_key = b"start_key" + end_key = b"end_key" + result = _create_row_request( + table_name, start_key=start_key, end_key=end_key, end_inclusive=True + ) + expected_result = _ReadRowsRequestPB(table_name=table_name) + row_range = RowRange(start_key_closed=start_key, end_key_closed=end_key) + expected_result.rows.row_ranges.append(row_range) + assert result == expected_result + + +def test__create_row_request_with_filter(): + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable.row_filters import RowSampleFilter + + table_name = "table_name" + row_filter = RowSampleFilter(0.33) + result = _create_row_request(table_name, filter_=row_filter) + expected_result = _ReadRowsRequestPB( + table_name=table_name, filter=row_filter.to_pb() + ) + assert result == expected_result + + +def test__create_row_request_with_limit(): + from google.cloud.bigtable.table import _create_row_request + + table_name = "table_name" + limit = 1337 + result = _create_row_request(table_name, limit=limit) + expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit) + assert result == expected_result + + +def test__create_row_request_with_row_set(): + from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable.row_set import RowSet + + table_name = "table_name" + row_set = RowSet() + result = _create_row_request(table_name, row_set=row_set) + expected_result = _ReadRowsRequestPB(table_name=table_name) + assert result == expected_result + + +def test__create_row_request_with_app_profile_id(): + from google.cloud.bigtable.table import _create_row_request + + table_name = "table_name" + limit = 1337 + app_profile_id = "app-profile-id" + result = _create_row_request(table_name, limit=limit, app_profile_id=app_profile_id) + expected_result = _ReadRowsRequestPB( + table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id + ) + assert result == expected_result def _ReadRowsRequestPB(*args, **kw): @@ -2169,90 +2011,83 @@ def _ReadRowsRequestPB(*args, **kw): return messages_v2_pb2.ReadRowsRequest(*args, **kw) -class Test_ClusterState(unittest.TestCase): - def test___eq__(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - state1 = ClusterState(READY) - state2 = ClusterState(READY) - self.assertEqual(state1, state2) - - def test___eq__type_differ(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - state1 = ClusterState(READY) - state2 = object() - self.assertNotEqual(state1, state2) - - def test___ne__same_value(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - state1 = ClusterState(READY) - state2 = ClusterState(READY) - comparison_val = state1 != state2 - self.assertFalse(comparison_val) - - def test___ne__(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - READY = enum_table.ReplicationState.READY - INITIALIZING = enum_table.ReplicationState.INITIALIZING - state1 = ClusterState(READY) - state2 = ClusterState(INITIALIZING) - self.assertNotEqual(state1, state2) - - def test__repr__(self): - from google.cloud.bigtable.enums import Table as enum_table - from google.cloud.bigtable.table import ClusterState - - STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN - INITIALIZING = enum_table.ReplicationState.INITIALIZING - PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE - UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE - READY = enum_table.ReplicationState.READY - - replication_dict = { - STATE_NOT_KNOWN: "STATE_NOT_KNOWN", - INITIALIZING: "INITIALIZING", - PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", - UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", - READY: "READY", - } +def test_cluster_state___eq__(): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState - self.assertEqual( - str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN] - ) - self.assertEqual( - str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING] - ) - self.assertEqual( - str(ClusterState(PLANNED_MAINTENANCE)), - replication_dict[PLANNED_MAINTENANCE], - ) - self.assertEqual( - str(ClusterState(UNPLANNED_MAINTENANCE)), - replication_dict[UNPLANNED_MAINTENANCE], - ) - self.assertEqual(str(ClusterState(READY)), replication_dict[READY]) + READY = enum_table.ReplicationState.READY + state1 = ClusterState(READY) + state2 = ClusterState(READY) + assert state1 == state2 - self.assertEqual( - ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN - ) - self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING) - self.assertEqual( - ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE - ) - self.assertEqual( - ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE - ) - self.assertEqual(ClusterState(READY).replication_state, READY) + +def test_cluster_state___eq__type_differ(): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + + READY = enum_table.ReplicationState.READY + state1 = ClusterState(READY) + state2 = object() + assert not (state1 == state2) + + +def test_cluster_state___ne__same_value(): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + + READY = enum_table.ReplicationState.READY + state1 = ClusterState(READY) + state2 = ClusterState(READY) + assert not (state1 != state2) + + +def test_cluster_state___ne__(): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + + READY = enum_table.ReplicationState.READY + INITIALIZING = enum_table.ReplicationState.INITIALIZING + state1 = ClusterState(READY) + state2 = ClusterState(INITIALIZING) + assert state1 != state2 + + +def test_cluster_state__repr__(): + from google.cloud.bigtable.enums import Table as enum_table + from google.cloud.bigtable.table import ClusterState + + STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN + INITIALIZING = enum_table.ReplicationState.INITIALIZING + PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE + UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE + READY = enum_table.ReplicationState.READY + + replication_dict = { + STATE_NOT_KNOWN: "STATE_NOT_KNOWN", + INITIALIZING: "INITIALIZING", + PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE", + UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE", + READY: "READY", + } + + assert str(ClusterState(STATE_NOT_KNOWN)) == replication_dict[STATE_NOT_KNOWN] + assert str(ClusterState(INITIALIZING)) == replication_dict[INITIALIZING] + assert ( + str(ClusterState(PLANNED_MAINTENANCE)) == replication_dict[PLANNED_MAINTENANCE] + ) + assert ( + str(ClusterState(UNPLANNED_MAINTENANCE)) + == replication_dict[UNPLANNED_MAINTENANCE] + ) + assert str(ClusterState(READY)) == replication_dict[READY] + + assert ClusterState(STATE_NOT_KNOWN).replication_state == STATE_NOT_KNOWN + assert ClusterState(INITIALIZING).replication_state == INITIALIZING + assert ClusterState(PLANNED_MAINTENANCE).replication_state == PLANNED_MAINTENANCE + assert ( + ClusterState(UNPLANNED_MAINTENANCE).replication_state == UNPLANNED_MAINTENANCE + ) + assert ClusterState(READY).replication_state == READY def _ReadRowsResponseCellChunkPB(*args, **kw): From 0537dd2bab4eaaf3358c6a691d341d5d484847a7 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 10 Nov 2021 05:13:28 -0500 Subject: [PATCH 540/892] chore: use gapic-generator-python 0.56.2 (#472) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: update Java and Python dependencies PiperOrigin-RevId: 408420890 Source-Link: https://github.com/googleapis/googleapis/commit/2921f9fb3bfbd16f6b2da0104373e2b47a80a65e Source-Link: https://github.com/googleapis/googleapis-gen/commit/6598ca8cbbf5226733a099c4506518a5af6ff74c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjU5OGNhOGNiYmY1MjI2NzMzYTA5OWM0NTA2NTE4YTVhZjZmZjc0YyJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 13 +- .../bigtable_instance_admin/client.py | 25 +- .../transports/base.py | 10 +- .../transports/grpc.py | 6 +- .../transports/grpc_asyncio.py | 6 +- .../bigtable_table_admin/async_client.py | 13 +- .../services/bigtable_table_admin/client.py | 25 +- .../bigtable_table_admin/transports/base.py | 10 +- .../bigtable_table_admin/transports/grpc.py | 6 +- .../transports/grpc_asyncio.py | 6 +- .../types/bigtable_table_admin.py | 6 + .../cloud/bigtable_admin_v2/types/instance.py | 2 + .../cloud/bigtable_admin_v2/types/table.py | 5 + .../services/bigtable/async_client.py | 13 +- .../bigtable_v2/services/bigtable/client.py | 25 +- .../services/bigtable/transports/base.py | 8 +- .../services/bigtable/transports/grpc.py | 4 +- .../bigtable/transports/grpc_asyncio.py | 4 +- .../cloud/bigtable_v2/types/bigtable.py | 2 + .../google/cloud/bigtable_v2/types/data.py | 37 +++ .../test_bigtable_instance_admin.py | 220 +++++++++++---- .../test_bigtable_table_admin.py | 264 +++++++++++++----- .../unit/gapic/bigtable_v2/test_bigtable.py | 176 ++++++++---- 23 files changed, 643 insertions(+), 243 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ed2a079c8a33..a3f4b2a727b6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -19,14 +19,17 @@ from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources -from google.api_core.client_options import ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore -OptionalRetry = Union[retries.Retry, object] +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index b1e168aad42c..ce245570e030 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -14,23 +14,25 @@ # limitations under the License. # from collections import OrderedDict -from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -OptionalRetry = Union[retries.Retry, object] +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -348,8 +350,15 @@ def __init__( client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool( - util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( + "true", + "false", + ): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + use_client_cert = ( + os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" ) client_cert_source_func = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index c33e9a49c89b..98a4f334db05 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -18,11 +18,11 @@ import pkg_resources import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 0ffcb7e3ba66..b677cc40488c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -16,9 +16,9 @@ import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index a94088e10ff1..de80dfbe0429 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -16,9 +16,9 @@ import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index d51852a50ef5..476a70ee7588 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -19,14 +19,17 @@ from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources -from google.api_core.client_options import ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore -OptionalRetry = Union[retries.Retry, object] +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 3beafca3e1db..55e85f064987 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -14,23 +14,25 @@ # limitations under the License. # from collections import OrderedDict -from distutils import util import os import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -OptionalRetry = Union[retries.Retry, object] +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -385,8 +387,15 @@ def __init__( client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool( - util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( + "true", + "false", + ): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + use_client_cert = ( + os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" ) client_cert_source_func = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 903c596b6b96..10068063f646 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -18,11 +18,11 @@ import pkg_resources import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.api_core import operations_v1 # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 7bf703af8e7e..a566aecf5e62 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -16,9 +16,9 @@ import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union -from google.api_core import grpc_helpers # type: ignore -from google.api_core import operations_v1 # type: ignore -from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index c995f1673b05..f7fcd4435f07 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -16,9 +16,9 @@ import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.api_core import operations_v1 # type: ignore +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index dbafe2d9f77b..ebd54fcf9faf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -80,6 +80,7 @@ class RestoreTableRequest(proto.Message): Name of the backup from which to restore. Values are of the form ``projects//instances//clusters//backups/``. + This field is a member of `oneof`_ ``source``. """ @@ -255,10 +256,12 @@ class DropRowRangeRequest(proto.Message): row_key_prefix (bytes): Delete all rows that start with this row key prefix. Prefix cannot be zero length. + This field is a member of `oneof`_ ``target``. delete_all_data_from_table (bool): Delete all rows in the table. Setting this to false is a no-op. + This field is a member of `oneof`_ ``target``. """ @@ -390,15 +393,18 @@ class Modification(proto.Message): Create a new column family with the specified schema, or fail if one already exists with the given ID. + This field is a member of `oneof`_ ``mod``. update (google.cloud.bigtable_admin_v2.types.ColumnFamily): Update an existing column family to the specified schema, or fail if no column family exists with the given ID. + This field is a member of `oneof`_ ``mod``. drop (bool): Drop (delete) the column family with the given ID, or fail if no such family exists. + This field is a member of `oneof`_ ``mod``. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index b278d9dd02ff..814c9d3bf641 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -185,9 +185,11 @@ class AppProfile(proto.Message): case for this AppProfile. multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): Use a multi-cluster routing policy. + This field is a member of `oneof`_ ``routing_policy``. single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting): Use a single-cluster routing policy. + This field is a member of `oneof`_ ``routing_policy``. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index bc3d603cda60..c6cde8089c4d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -53,6 +53,7 @@ class RestoreInfo(proto.Message): backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): Information about the backup used to restore the table. The backup may no longer exist. + This field is a member of `oneof`_ ``source_info``. """ @@ -190,20 +191,24 @@ class GcRule(proto.Message): max_num_versions (int): Delete all cells in a column except the most recent N. + This field is a member of `oneof`_ ``rule``. max_age (google.protobuf.duration_pb2.Duration): Delete cells in a column older than the given age. Values must be at least one millisecond, and will be truncated to microsecond granularity. + This field is a member of `oneof`_ ``rule``. intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection): Delete cells that would be deleted by every nested rule. + This field is a member of `oneof`_ ``rule``. union (google.cloud.bigtable_admin_v2.types.GcRule.Union): Delete cells that would be deleted by any nested rule. + This field is a member of `oneof`_ ``rule``. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 948bf0da8b51..e0227e8578b3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -19,14 +19,17 @@ from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union import pkg_resources -from google.api_core.client_options import ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore -OptionalRetry = Union[retries.Retry, object] +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 05466167c749..8dcfdb746143 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -14,23 +14,25 @@ # limitations under the License. # from collections import OrderedDict -from distutils import util import os import re from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore -OptionalRetry = Union[retries.Retry, object] +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data @@ -283,8 +285,15 @@ def __init__( client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool( - util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( + "true", + "false", + ): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + use_client_cert = ( + os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" ) client_cert_source_func = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index b1dc65d809ae..36f5a1a2e363 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -18,10 +18,10 @@ import pkg_resources import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index fd9d1134fdb3..ce409fbf2faa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -16,8 +16,8 @@ import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index fcaac9190b54..d6d46cb81f7a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -16,8 +16,8 @@ import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 3aa0eceafbf9..55420ba60022 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -153,10 +153,12 @@ class CellChunk(proto.Message): reset_row (bool): Indicates that the client should drop all previous chunks for ``row_key``, as it will be re-read from the beginning. + This field is a member of `oneof`_ ``row_status``. commit_row (bool): Indicates that the client can safely process all previous chunks for ``row_key``, as its data has been fully read. + This field is a member of `oneof`_ ``row_status``. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index bdb037a64bb4..dbf63ead048e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -141,18 +141,22 @@ class RowRange(proto.Message): start_key_closed (bytes): Used when giving an inclusive lower bound for the range. + This field is a member of `oneof`_ ``start_key``. start_key_open (bytes): Used when giving an exclusive lower bound for the range. + This field is a member of `oneof`_ ``start_key``. end_key_open (bytes): Used when giving an exclusive upper bound for the range. + This field is a member of `oneof`_ ``end_key``. end_key_closed (bytes): Used when giving an inclusive upper bound for the range. + This field is a member of `oneof`_ ``end_key``. """ @@ -196,18 +200,22 @@ class ColumnRange(proto.Message): start_qualifier_closed (bytes): Used when giving an inclusive lower bound for the range. + This field is a member of `oneof`_ ``start_qualifier``. start_qualifier_open (bytes): Used when giving an exclusive lower bound for the range. + This field is a member of `oneof`_ ``start_qualifier``. end_qualifier_closed (bytes): Used when giving an inclusive upper bound for the range. + This field is a member of `oneof`_ ``end_qualifier``. end_qualifier_open (bytes): Used when giving an exclusive upper bound for the range. + This field is a member of `oneof`_ ``end_qualifier``. """ @@ -250,18 +258,22 @@ class ValueRange(proto.Message): start_value_closed (bytes): Used when giving an inclusive lower bound for the range. + This field is a member of `oneof`_ ``start_value``. start_value_open (bytes): Used when giving an exclusive lower bound for the range. + This field is a member of `oneof`_ ``start_value``. end_value_closed (bytes): Used when giving an inclusive upper bound for the range. + This field is a member of `oneof`_ ``end_value``. end_value_open (bytes): Used when giving an exclusive upper bound for the range. + This field is a member of `oneof`_ ``end_value``. """ @@ -321,15 +333,18 @@ class RowFilter(proto.Message): chain (google.cloud.bigtable_v2.types.RowFilter.Chain): Applies several RowFilters to the data in sequence, progressively narrowing the results. + This field is a member of `oneof`_ ``filter``. interleave (google.cloud.bigtable_v2.types.RowFilter.Interleave): Applies several RowFilters to the data in parallel and combines the results. + This field is a member of `oneof`_ ``filter``. condition (google.cloud.bigtable_v2.types.RowFilter.Condition): Applies one of two possible RowFilters to the data based on the output of a predicate RowFilter. + This field is a member of `oneof`_ ``filter``. sink (bool): ADVANCED USE ONLY. Hook for introspection into the @@ -397,16 +412,19 @@ class RowFilter(proto.Message): Cannot be used within the ``predicate_filter``, ``true_filter``, or ``false_filter`` of a [Condition][google.bigtable.v2.RowFilter.Condition]. + This field is a member of `oneof`_ ``filter``. pass_all_filter (bool): Matches all cells, regardless of input. Functionally equivalent to leaving ``filter`` unset, but included for completeness. + This field is a member of `oneof`_ ``filter``. block_all_filter (bool): Does not match any cells, regardless of input. Useful for temporarily disabling just part of a filter. + This field is a member of `oneof`_ ``filter``. row_key_regex_filter (bytes): Matches only cells from rows whose keys satisfy the given @@ -416,11 +434,13 @@ class RowFilter(proto.Message): ``\C`` escape sequence must be used if a true wildcard is desired. The ``.`` character will not match the new line character ``\n``, which may be present in a binary key. + This field is a member of `oneof`_ ``filter``. row_sample_filter (float): Matches all cells from a row with probability p, and matches no cells from the row with probability 1-p. + This field is a member of `oneof`_ ``filter``. family_name_regex_filter (str): Matches only cells from columns whose families satisfy the @@ -429,6 +449,7 @@ class RowFilter(proto.Message): a literal. Note that, since column families cannot contain the new line character ``\n``, it is sufficient to use ``.`` as a full wildcard when matching column family names. + This field is a member of `oneof`_ ``filter``. column_qualifier_regex_filter (bytes): Matches only cells from columns whose qualifiers satisfy the @@ -437,14 +458,17 @@ class RowFilter(proto.Message): used if a true wildcard is desired. The ``.`` character will not match the new line character ``\n``, which may be present in a binary qualifier. + This field is a member of `oneof`_ ``filter``. column_range_filter (google.cloud.bigtable_v2.types.ColumnRange): Matches only cells from columns within the given range. + This field is a member of `oneof`_ ``filter``. timestamp_range_filter (google.cloud.bigtable_v2.types.TimestampRange): Matches only cells with timestamps within the given range. + This field is a member of `oneof`_ ``filter``. value_regex_filter (bytes): Matches only cells with values that satisfy the given @@ -453,10 +477,12 @@ class RowFilter(proto.Message): a true wildcard is desired. The ``.`` character will not match the new line character ``\n``, which may be present in a binary value. + This field is a member of `oneof`_ ``filter``. value_range_filter (google.cloud.bigtable_v2.types.ValueRange): Matches only cells with values that fall within the given range. + This field is a member of `oneof`_ ``filter``. cells_per_row_offset_filter (int): Skips the first N cells of each row, matching @@ -464,12 +490,14 @@ class RowFilter(proto.Message): present, as is possible when using an Interleave, each copy of the cell is counted separately. + This field is a member of `oneof`_ ``filter``. cells_per_row_limit_filter (int): Matches only the first N cells of each row. If duplicate cells are present, as is possible when using an Interleave, each copy of the cell is counted separately. + This field is a member of `oneof`_ ``filter``. cells_per_column_limit_filter (int): Matches only the most recent N cells within each column. For @@ -479,10 +507,12 @@ class RowFilter(proto.Message): ``foo:bar2``. If duplicate cells are present, as is possible when using an Interleave, each copy of the cell is counted separately. + This field is a member of `oneof`_ ``filter``. strip_value_transformer (bool): Replaces each cell's value with the empty string. + This field is a member of `oneof`_ ``filter``. apply_label_transformer (str): Applies the given label to all cells in the output row. This @@ -499,6 +529,7 @@ class RowFilter(proto.Message): contain multiple ``apply_label_transformers``, as they will be applied to separate copies of the input. This may be relaxed in the future. + This field is a member of `oneof`_ ``filter``. """ @@ -627,15 +658,19 @@ class Mutation(proto.Message): Attributes: set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): Set a cell's value. + This field is a member of `oneof`_ ``mutation``. delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): Deletes cells from a column. + This field is a member of `oneof`_ ``mutation``. delete_from_family (google.cloud.bigtable_v2.types.Mutation.DeleteFromFamily): Deletes cells from a column family. + This field is a member of `oneof`_ ``mutation``. delete_from_row (google.cloud.bigtable_v2.types.Mutation.DeleteFromRow): Deletes cells from the entire row. + This field is a member of `oneof`_ ``mutation``. """ @@ -741,6 +776,7 @@ class ReadModifyWriteRule(proto.Message): Rule specifying that ``append_value`` be appended to the existing value. If the targeted cell is unset, it will be treated as containing the empty string. + This field is a member of `oneof`_ ``rule``. increment_amount (int): Rule specifying that ``increment_amount`` be added to the @@ -748,6 +784,7 @@ class ReadModifyWriteRule(proto.Message): treated as containing a zero. Otherwise, the targeted cell must contain an 8-byte value (interpreted as a 64-bit big-endian signed integer), or the entire request will fail. + This field is a member of `oneof`_ ``rule``. """ diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 944919359f2a..32eccdb61135 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -674,12 +674,18 @@ def test_create_instance_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].instance_id == "instance_id_value" - assert args[0].instance == gba_instance.Instance(name="name_value") - assert args[0].clusters == { - "key_value": gba_instance.Cluster(name="name_value") - } + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].instance_id + mock_val = "instance_id_value" + assert arg == mock_val + arg = args[0].instance + mock_val = gba_instance.Instance(name="name_value") + assert arg == mock_val + arg = args[0].clusters + mock_val = {"key_value": gba_instance.Cluster(name="name_value")} + assert arg == mock_val def test_create_instance_flattened_error(): @@ -726,12 +732,18 @@ async def test_create_instance_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].instance_id == "instance_id_value" - assert args[0].instance == gba_instance.Instance(name="name_value") - assert args[0].clusters == { - "key_value": gba_instance.Cluster(name="name_value") - } + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].instance_id + mock_val = "instance_id_value" + assert arg == mock_val + arg = args[0].instance + mock_val = gba_instance.Instance(name="name_value") + assert arg == mock_val + arg = args[0].clusters + mock_val = {"key_value": gba_instance.Cluster(name="name_value")} + assert arg == mock_val @pytest.mark.asyncio @@ -920,7 +932,9 @@ def test_get_instance_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_get_instance_flattened_error(): @@ -956,7 +970,9 @@ async def test_get_instance_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1136,7 +1152,9 @@ def test_list_instances_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val def test_list_instances_flattened_error(): @@ -1174,7 +1192,9 @@ async def test_list_instances_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1511,8 +1531,12 @@ def test_partial_update_instance_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].instance == gba_instance.Instance(name="name_value") - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].instance + mock_val = gba_instance.Instance(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val def test_partial_update_instance_flattened_error(): @@ -1557,8 +1581,12 @@ async def test_partial_update_instance_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].instance == gba_instance.Instance(name="name_value") - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].instance + mock_val = gba_instance.Instance(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val @pytest.mark.asyncio @@ -1725,7 +1753,9 @@ def test_delete_instance_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_delete_instance_flattened_error(): @@ -1761,7 +1791,9 @@ async def test_delete_instance_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1934,9 +1966,15 @@ def test_create_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].cluster_id == "cluster_id_value" - assert args[0].cluster == instance.Cluster(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].cluster_id + mock_val = "cluster_id_value" + assert arg == mock_val + arg = args[0].cluster + mock_val = instance.Cluster(name="name_value") + assert arg == mock_val def test_create_cluster_flattened_error(): @@ -1981,9 +2019,15 @@ async def test_create_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].cluster_id == "cluster_id_value" - assert args[0].cluster == instance.Cluster(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].cluster_id + mock_val = "cluster_id_value" + assert arg == mock_val + arg = args[0].cluster + mock_val = instance.Cluster(name="name_value") + assert arg == mock_val @pytest.mark.asyncio @@ -2175,7 +2219,9 @@ def test_get_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_get_cluster_flattened_error(): @@ -2211,7 +2257,9 @@ async def test_get_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2391,7 +2439,9 @@ def test_list_clusters_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val def test_list_clusters_flattened_error(): @@ -2429,7 +2479,9 @@ async def test_list_clusters_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2726,7 +2778,9 @@ def test_delete_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_delete_cluster_flattened_error(): @@ -2762,7 +2816,9 @@ async def test_delete_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2961,9 +3017,15 @@ def test_create_app_profile_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].app_profile_id == "app_profile_id_value" - assert args[0].app_profile == instance.AppProfile(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + arg = args[0].app_profile + mock_val = instance.AppProfile(name="name_value") + assert arg == mock_val def test_create_app_profile_flattened_error(): @@ -3008,9 +3070,15 @@ async def test_create_app_profile_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].app_profile_id == "app_profile_id_value" - assert args[0].app_profile == instance.AppProfile(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + arg = args[0].app_profile + mock_val = instance.AppProfile(name="name_value") + assert arg == mock_val @pytest.mark.asyncio @@ -3195,7 +3263,9 @@ def test_get_app_profile_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_get_app_profile_flattened_error(): @@ -3231,7 +3301,9 @@ async def test_get_app_profile_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3422,7 +3494,9 @@ def test_list_app_profiles_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val def test_list_app_profiles_flattened_error(): @@ -3462,7 +3536,9 @@ async def test_list_app_profiles_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3813,8 +3889,12 @@ def test_update_app_profile_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].app_profile == instance.AppProfile(name="name_value") - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].app_profile + mock_val = instance.AppProfile(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val def test_update_app_profile_flattened_error(): @@ -3859,8 +3939,12 @@ async def test_update_app_profile_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].app_profile == instance.AppProfile(name="name_value") - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].app_profile + mock_val = instance.AppProfile(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val @pytest.mark.asyncio @@ -4040,7 +4124,9 @@ def test_delete_app_profile_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_delete_app_profile_flattened_error(): @@ -4078,7 +4164,9 @@ async def test_delete_app_profile_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -4265,7 +4353,9 @@ def test_get_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val def test_get_iam_policy_flattened_error(): @@ -4301,7 +4391,9 @@ async def test_get_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val @pytest.mark.asyncio @@ -4488,7 +4580,9 @@ def test_set_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val def test_set_iam_policy_flattened_error(): @@ -4524,7 +4618,9 @@ async def test_set_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val @pytest.mark.asyncio @@ -4732,8 +4828,12 @@ def test_test_iam_permissions_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val def test_test_iam_permissions_flattened_error(): @@ -4777,8 +4877,12 @@ async def test_test_iam_permissions_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val @pytest.mark.asyncio diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index c4622b25305b..541acb903aa5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -670,9 +670,15 @@ def test_create_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].table == gba_table.Table(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].table_id + mock_val = "table_id_value" + assert arg == mock_val + arg = args[0].table + mock_val = gba_table.Table(name="name_value") + assert arg == mock_val def test_create_table_flattened_error(): @@ -715,9 +721,15 @@ async def test_create_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].table == gba_table.Table(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].table_id + mock_val = "table_id_value" + assert arg == mock_val + arg = args[0].table + mock_val = gba_table.Table(name="name_value") + assert arg == mock_val @pytest.mark.asyncio @@ -906,9 +918,15 @@ def test_create_table_from_snapshot_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].source_snapshot == "source_snapshot_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].table_id + mock_val = "table_id_value" + assert arg == mock_val + arg = args[0].source_snapshot + mock_val = "source_snapshot_value" + assert arg == mock_val def test_create_table_from_snapshot_flattened_error(): @@ -955,9 +973,15 @@ async def test_create_table_from_snapshot_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].table_id == "table_id_value" - assert args[0].source_snapshot == "source_snapshot_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].table_id + mock_val = "table_id_value" + assert arg == mock_val + arg = args[0].source_snapshot + mock_val = "source_snapshot_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1134,7 +1158,9 @@ def test_list_tables_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val def test_list_tables_flattened_error(): @@ -1172,7 +1198,9 @@ async def test_list_tables_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1472,7 +1500,9 @@ def test_get_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_get_table_flattened_error(): @@ -1508,7 +1538,9 @@ async def test_get_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1673,7 +1705,9 @@ def test_delete_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_delete_table_flattened_error(): @@ -1709,7 +1743,9 @@ async def test_delete_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1904,10 +1940,14 @@ def test_modify_column_families_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].modifications == [ + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].modifications + mock_val = [ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") ] + assert arg == mock_val def test_modify_column_families_flattened_error(): @@ -1958,10 +1998,14 @@ async def test_modify_column_families_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].modifications == [ + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].modifications + mock_val = [ bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") ] + assert arg == mock_val @pytest.mark.asyncio @@ -2286,7 +2330,9 @@ def test_generate_consistency_token_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_generate_consistency_token_flattened_error(): @@ -2326,7 +2372,9 @@ async def test_generate_consistency_token_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2513,8 +2561,12 @@ def test_check_consistency_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].consistency_token == "consistency_token_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].consistency_token + mock_val = "consistency_token_value" + assert arg == mock_val def test_check_consistency_flattened_error(): @@ -2558,8 +2610,12 @@ async def test_check_consistency_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].consistency_token == "consistency_token_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].consistency_token + mock_val = "consistency_token_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2735,10 +2791,18 @@ def test_snapshot_table_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].cluster == "cluster_value" - assert args[0].snapshot_id == "snapshot_id_value" - assert args[0].description == "description_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].cluster + mock_val = "cluster_value" + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = "snapshot_id_value" + assert arg == mock_val + arg = args[0].description + mock_val = "description_value" + assert arg == mock_val def test_snapshot_table_flattened_error(): @@ -2785,10 +2849,18 @@ async def test_snapshot_table_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" - assert args[0].cluster == "cluster_value" - assert args[0].snapshot_id == "snapshot_id_value" - assert args[0].description == "description_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].cluster + mock_val = "cluster_value" + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = "snapshot_id_value" + assert arg == mock_val + arg = args[0].description + mock_val = "description_value" + assert arg == mock_val @pytest.mark.asyncio @@ -2977,7 +3049,9 @@ def test_get_snapshot_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_get_snapshot_flattened_error(): @@ -3013,7 +3087,9 @@ async def test_get_snapshot_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3188,7 +3264,9 @@ def test_list_snapshots_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val def test_list_snapshots_flattened_error(): @@ -3226,7 +3304,9 @@ async def test_list_snapshots_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3525,7 +3605,9 @@ def test_delete_snapshot_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_delete_snapshot_flattened_error(): @@ -3561,7 +3643,9 @@ async def test_delete_snapshot_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -3734,9 +3818,15 @@ def test_create_backup_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].backup_id == "backup_id_value" - assert args[0].backup == table.Backup(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].backup + mock_val = table.Backup(name="name_value") + assert arg == mock_val def test_create_backup_flattened_error(): @@ -3781,9 +3871,15 @@ async def test_create_backup_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" - assert args[0].backup_id == "backup_id_value" - assert args[0].backup == table.Backup(name="name_value") + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].backup + mock_val = table.Backup(name="name_value") + assert arg == mock_val @pytest.mark.asyncio @@ -3970,7 +4066,9 @@ def test_get_backup_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_get_backup_flattened_error(): @@ -4006,7 +4104,9 @@ async def test_get_backup_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -4194,8 +4294,12 @@ def test_update_backup_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].backup == table.Backup(name="name_value") - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].backup + mock_val = table.Backup(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val def test_update_backup_flattened_error(): @@ -4236,8 +4340,12 @@ async def test_update_backup_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].backup == table.Backup(name="name_value") - assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].backup + mock_val = table.Backup(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val @pytest.mark.asyncio @@ -4404,7 +4512,9 @@ def test_delete_backup_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val def test_delete_backup_flattened_error(): @@ -4440,7 +4550,9 @@ async def test_delete_backup_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].name == "name_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val @pytest.mark.asyncio @@ -4615,7 +4727,9 @@ def test_list_backups_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val def test_list_backups_flattened_error(): @@ -4653,7 +4767,9 @@ async def test_list_backups_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].parent == "parent_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val @pytest.mark.asyncio @@ -5109,7 +5225,9 @@ def test_get_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val def test_get_iam_policy_flattened_error(): @@ -5145,7 +5263,9 @@ async def test_get_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val @pytest.mark.asyncio @@ -5332,7 +5452,9 @@ def test_set_iam_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val def test_set_iam_policy_flattened_error(): @@ -5368,7 +5490,9 @@ async def test_set_iam_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val @pytest.mark.asyncio @@ -5576,8 +5700,12 @@ def test_test_iam_permissions_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val def test_test_iam_permissions_flattened_error(): @@ -5621,8 +5749,12 @@ async def test_test_iam_permissions_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].resource == "resource_value" - assert args[0].permissions == ["permissions_value"] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val @pytest.mark.asyncio diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 580d4ec4e189..0339d130d410 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -597,8 +597,12 @@ def test_read_rows_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val def test_read_rows_flattened_error(): @@ -634,8 +638,12 @@ async def test_read_rows_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -803,8 +811,12 @@ def test_sample_row_keys_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val def test_sample_row_keys_flattened_error(): @@ -840,8 +852,12 @@ async def test_sample_row_keys_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1010,14 +1026,22 @@ def test_mutate_row_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].mutations == [ + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].row_key + mock_val = b"row_key_blob" + assert arg == mock_val + arg = args[0].mutations + mock_val = [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val def test_mutate_row_flattened_error(): @@ -1068,14 +1092,22 @@ async def test_mutate_row_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].mutations == [ + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].row_key + mock_val = b"row_key_blob" + assert arg == mock_val + arg = args[0].mutations + mock_val = [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1249,11 +1281,15 @@ def test_mutate_rows_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].entries == [ - bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") - ] - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].entries + mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val def test_mutate_rows_flattened_error(): @@ -1292,11 +1328,15 @@ async def test_mutate_rows_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].entries == [ - bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob") - ] - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].entries + mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1498,9 +1538,14 @@ def test_check_and_mutate_row_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].predicate_filter == data.RowFilter( + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].row_key + mock_val = b"row_key_blob" + assert arg == mock_val + arg = args[0].predicate_filter + mock_val = data.RowFilter( chain=data.RowFilter.Chain( filters=[ data.RowFilter( @@ -1509,17 +1554,24 @@ def test_check_and_mutate_row_flattened(): ] ) ) - assert args[0].true_mutations == [ + assert arg == mock_val + arg = args[0].true_mutations + mock_val = [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].false_mutations == [ + assert arg == mock_val + arg = args[0].false_mutations + mock_val = [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val def test_check_and_mutate_row_flattened_error(): @@ -1604,9 +1656,14 @@ async def test_check_and_mutate_row_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].predicate_filter == data.RowFilter( + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].row_key + mock_val = b"row_key_blob" + assert arg == mock_val + arg = args[0].predicate_filter + mock_val = data.RowFilter( chain=data.RowFilter.Chain( filters=[ data.RowFilter( @@ -1615,17 +1672,24 @@ async def test_check_and_mutate_row_flattened_async(): ] ) ) - assert args[0].true_mutations == [ + assert arg == mock_val + arg = args[0].true_mutations + mock_val = [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].false_mutations == [ + assert arg == mock_val + arg = args[0].false_mutations + mock_val = [ data.Mutation( set_cell=data.Mutation.SetCell(family_name="family_name_value") ) ] - assert args[0].app_profile_id == "app_profile_id_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val @pytest.mark.asyncio @@ -1826,12 +1890,18 @@ def test_read_modify_write_row_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].rules == [ - data.ReadModifyWriteRule(family_name="family_name_value") - ] - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].row_key + mock_val = b"row_key_blob" + assert arg == mock_val + arg = args[0].rules + mock_val = [data.ReadModifyWriteRule(family_name="family_name_value")] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val def test_read_modify_write_row_flattened_error(): @@ -1876,12 +1946,18 @@ async def test_read_modify_write_row_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0].table_name == "table_name_value" - assert args[0].row_key == b"row_key_blob" - assert args[0].rules == [ - data.ReadModifyWriteRule(family_name="family_name_value") - ] - assert args[0].app_profile_id == "app_profile_id_value" + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].row_key + mock_val = b"row_key_blob" + assert arg == mock_val + arg = args[0].rules + mock_val = [data.ReadModifyWriteRule(family_name="family_name_value")] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val @pytest.mark.asyncio From 51bde1e2929dbdea448db3e84408ecc330d32130 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Tue, 21 Dec 2021 13:44:13 -0500 Subject: [PATCH 541/892] ci: expand mypy coverage (#476) * Work around invalid generated typing for map fields See: https://github.com/googleapis/gapic-generator-python/issues/689 * Restore missing namespace marker file * Get mypy running against all modules in 'google/' / 'tests/' Note we still exclude generated tests, due to broken typing emitted from the gapic-generator. --- packages/google-cloud-bigtable/.coveragerc | 1 + .../google-cloud-bigtable/google/__init__.py | 10 +++++ .../bigtable_instance_admin/async_client.py | 6 +-- .../bigtable_instance_admin/client.py | 6 +-- packages/google-cloud-bigtable/mypy.ini | 24 ++++++++++- packages/google-cloud-bigtable/noxfile.py | 5 ++- packages/google-cloud-bigtable/owlbot.py | 42 ++++++++++++++++++- 7 files changed, 81 insertions(+), 13 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/__init__.py diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 1ba5bb57db4b..9b0751055ecc 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -19,6 +19,7 @@ branch = True omit = google/cloud/__init__.py + google/__init__.py [report] fail_under = 100 diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/__init__.py new file mode 100644 index 000000000000..ced5017a1018 --- /dev/null +++ b/packages/google-cloud-bigtable/google/__init__.py @@ -0,0 +1,10 @@ +from typing import List + +try: + import pkg_resources + + pkg_resources.declare_namespace(__name__) +except ImportError: + import pkgutil + + __path__: List[str] = pkgutil.extend_path(__path__, __name__) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index a3f4b2a727b6..6fdda38faab9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -199,9 +199,7 @@ async def create_instance( parent: str = None, instance_id: str = None, instance: gba_instance.Instance = None, - clusters: Sequence[ - bigtable_instance_admin.CreateInstanceRequest.ClustersEntry - ] = None, + clusters: Dict[str, gba_instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -235,7 +233,7 @@ async def create_instance( This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - clusters (:class:`Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]`): + clusters (Dict[str, gba_instance.Cluster]): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index ce245570e030..fca8eed0389e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -430,9 +430,7 @@ def create_instance( parent: str = None, instance_id: str = None, instance: gba_instance.Instance = None, - clusters: Sequence[ - bigtable_instance_admin.CreateInstanceRequest.ClustersEntry - ] = None, + clusters: Dict[str, gba_instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -466,7 +464,7 @@ def create_instance( This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]): + clusters (Dict[str, gba_instance.Cluster]): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than diff --git a/packages/google-cloud-bigtable/mypy.ini b/packages/google-cloud-bigtable/mypy.ini index 9aef441decc5..f12ed46fc21a 100644 --- a/packages/google-cloud-bigtable/mypy.ini +++ b/packages/google-cloud-bigtable/mypy.ini @@ -1,6 +1,28 @@ [mypy] python_version = 3.6 namespace_packages = True +exclude = tests/unit/gapic/ -[mypy-google.protobuf] +[mypy-grpc.*] +ignore_missing_imports = True + +[mypy-google.auth.*] +ignore_missing_imports = True + +[mypy-google.iam.*] +ignore_missing_imports = True + +[mypy-google.longrunning.*] +ignore_missing_imports = True + +[mypy-google.oauth2.*] +ignore_missing_imports = True + +[mypy-google.rpc.*] +ignore_missing_imports = True + +[mypy-proto.*] +ignore_missing_imports = True + +[mypy-pytest] ignore_missing_imports = True diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 11ec0e948aea..6ae044f00c8a 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -77,9 +77,10 @@ def blacken(session): def mypy(session): """Verify type hints are mypy compatible.""" session.install("-e", ".") - session.install("mypy", "types-setuptools") + session.install("mypy", "types-setuptools", "types-protobuf", "types-mock") + session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package - session.run("mypy", "-p", "google.cloud.bigtable", "--no-incremental") + session.run("mypy", "google/", "tests/") @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 4386284137df..ca452ddf36b2 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -15,6 +15,7 @@ """This script is used to synthesize generated parts of this library.""" from pathlib import Path +import re from typing import List, Optional import synthtool as s @@ -165,9 +166,10 @@ def lint_setup_py\(session\): def mypy(session): """Verify type hints are mypy compatible.""" session.install("-e", ".") - session.install("mypy", "types-setuptools") + session.install("mypy", "types-setuptools", "types-protobuf", "types-mock") + session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package - session.run("mypy", "-p", "google.cloud.bigtable", "--no-incremental") + session.run("mypy", "google/", "tests/") @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -175,6 +177,42 @@ def lint_setup_py(session): ''', ) +# Work around https://github.com/googleapis/gapic-generator-python/issues/689 +bad_clusters_typing = r""" + clusters: Sequence\[ + bigtable_instance_admin\.CreateInstanceRequest\.ClustersEntry + \] = None,""" + +good_clusters_typing = """ + clusters: Dict[str, gba_instance.Cluster] = None,""" + +s.replace( + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py", + bad_clusters_typing, + good_clusters_typing, +) + +bad_clusters_docstring_1 = re.escape(r""" + clusters (:class:`Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]`):""") + +bad_clusters_docstring_2 = re.escape(r""" + clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]):""") + +good_clusters_docstring = """ + clusters (Dict[str, gba_instance.Cluster]):""" + +s.replace( + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py", + bad_clusters_docstring_1, + good_clusters_docstring, +) + +s.replace( + "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py", + bad_clusters_docstring_2, + good_clusters_docstring, +) + # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- From 1192a37d4b96a986dc944f07c0877d9963749d49 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 21 Dec 2021 16:42:51 -0500 Subject: [PATCH 542/892] chore: update python-docs-samples link to main branch (#479) Source-Link: https://github.com/googleapis/synthtool/commit/0941ef32b18aff0be34a40404f3971d9f51996e9 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:2f90537dd7df70f6b663cd654b1fa5dee483cf6a4edcfd46072b2775be8a23ec Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md | 2 +- packages/google-cloud-bigtable/samples/CONTRIBUTING.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index cb89b2e326b7..0b3c8cd98f89 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:ec49167c606648a063d1222220b48119c912562849a0528f35bfb592a9f72737 + digest: sha256:2f90537dd7df70f6b663cd654b1fa5dee483cf6a4edcfd46072b2775be8a23ec diff --git a/packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md b/packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md index 55c97b32f4c1..8249522ffc2d 100644 --- a/packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md +++ b/packages/google-cloud-bigtable/samples/AUTHORING_GUIDE.md @@ -1 +1 @@ -See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/AUTHORING_GUIDE.md \ No newline at end of file +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/CONTRIBUTING.md b/packages/google-cloud-bigtable/samples/CONTRIBUTING.md index 34c882b6f1a3..f5fe2e6baf13 100644 --- a/packages/google-cloud-bigtable/samples/CONTRIBUTING.md +++ b/packages/google-cloud-bigtable/samples/CONTRIBUTING.md @@ -1 +1 @@ -See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/CONTRIBUTING.md \ No newline at end of file +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/CONTRIBUTING.md \ No newline at end of file From 0fa5ad8fa8a808e710950e174b88ccf43d5d35e2 Mon Sep 17 00:00:00 2001 From: Tres Seaver Date: Wed, 12 Jan 2022 05:52:00 -0500 Subject: [PATCH 543/892] ci: run samples under Python 3.9 / 3.10 (#478) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: run samples under Python 3.9 / 3.10 Refresh each sample's noxfile via: ----------------------------- %< ----------------------------- $ for noxfile in samples/*/noxfile.py; do echo "Refreshing $noxfile"; wget -O $noxfile https://github.com/GoogleCloudPlatform/python-docs-samples/raw/main/noxfile-template.py echo "Blackening samples for $noxfile" nox -f $noxfile -s blacken done ----------------------------- %< ----------------------------- Closes #477. * fix: disable install-from-sorce for beam sample Per #203. * fix: skip beam sample for Python 3.10 Beam-related wheels are not yet available. * fix: also refresh noxfiles for 'samples/snippets' * ci: don't enforce type hints on old samples * resolve issue where samples templates are not updated * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * resolve mypy error Name __path__ already defined * add workaroud from PR #203 Co-authored-by: Anthonios Partheniou Co-authored-by: Owl Bot --- .../google-cloud-bigtable/google/__init__.py | 6 +- .../google/cloud/__init__.py | 6 +- packages/google-cloud-bigtable/owlbot.py | 23 +-- .../samples/beam/hello_world_write.py | 40 +++-- .../samples/beam/hello_world_write_test.py | 19 +- .../samples/beam/noxfile.py | 99 ++++++++--- .../samples/beam/noxfile_config.py | 45 +++++ .../samples/hello/main.py | 51 +++--- .../samples/hello/main_test.py | 23 ++- .../samples/hello/noxfile.py | 104 ++++++++--- .../samples/hello_happybase/main.py | 55 +++--- .../samples/hello_happybase/main_test.py | 28 ++- .../samples/hello_happybase/noxfile.py | 104 ++++++++--- .../samples/instanceadmin/noxfile.py | 104 ++++++++--- .../samples/metricscaler/metricscaler.py | 119 +++++++------ .../samples/metricscaler/metricscaler_test.py | 92 +++++----- .../samples/metricscaler/noxfile.py | 104 ++++++++--- .../samples/quickstart/main.py | 31 ++-- .../samples/quickstart/main_test.py | 10 +- .../samples/quickstart/noxfile.py | 104 ++++++++--- .../samples/quickstart_happybase/main.py | 29 ++- .../samples/quickstart_happybase/main_test.py | 10 +- .../samples/quickstart_happybase/noxfile.py | 104 ++++++++--- .../snippets/filters/filter_snippets.py | 99 +++++++---- .../samples/snippets/filters/filters_test.py | 70 +++----- .../samples/snippets/filters/noxfile.py | 104 ++++++++--- .../samples/snippets/reads/noxfile.py | 104 ++++++++--- .../samples/snippets/reads/read_snippets.py | 36 ++-- .../samples/snippets/reads/reads_test.py | 8 +- .../samples/snippets/writes/noxfile.py | 104 ++++++++--- .../samples/snippets/writes/write_batch.py | 32 ++-- .../snippets/writes/write_conditionally.py | 18 +- .../snippets/writes/write_increment.py | 4 +- .../samples/snippets/writes/write_simple.py | 19 +- .../samples/snippets/writes/writes_test.py | 16 +- .../samples/tableadmin/noxfile.py | 104 ++++++++--- .../samples/tableadmin/tableadmin.py | 166 ++++++++++-------- .../samples/tableadmin/tableadmin_test.py | 44 ++--- 38 files changed, 1395 insertions(+), 843 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/beam/noxfile_config.py diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/__init__.py index ced5017a1018..a5ba8065626d 100644 --- a/packages/google-cloud-bigtable/google/__init__.py +++ b/packages/google-cloud-bigtable/google/__init__.py @@ -1,10 +1,6 @@ -from typing import List - try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: - import pkgutil - - __path__: List[str] = pkgutil.extend_path(__path__, __name__) + pass diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py index ced5017a1018..a5ba8065626d 100644 --- a/packages/google-cloud-bigtable/google/cloud/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/__init__.py @@ -1,10 +1,6 @@ -from typing import List - try: import pkg_resources pkg_resources.declare_namespace(__name__) except ImportError: - import pkgutil - - __path__: List[str] = pkgutil.extend_path(__path__, __name__) + pass diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index ca452ddf36b2..6ab6579e1695 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -217,20 +217,15 @@ def lint_setup_py(session): # Samples templates # ---------------------------------------------------------------------------- -sample_files = common.py_samples(samples=True) -for path in sample_files: - s.move(path) - -# Note: python-docs-samples is not yet using 'main': -#s.replace( -# "samples/**/*.md", -# r"python-docs-samples/blob/master/", -# "python-docs-samples/blob/main/", -#) +python.py_samples(skip_readmes=True) + s.replace( - "samples/**/*.md", - r"google-cloud-python/blob/master/", - "google-cloud-python/blob/main/", -) + "samples/beam/noxfile.py", + """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \( + "True", + "true", +\)""", + """# todo(kolea2): temporary workaround to install pinned dep version +INSTALL_LIBRARY_FROM_SOURCE = False""") s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/packages/google-cloud-bigtable/samples/beam/hello_world_write.py b/packages/google-cloud-bigtable/samples/beam/hello_world_write.py index 894edc46fb73..89f541d0d190 100644 --- a/packages/google-cloud-bigtable/samples/beam/hello_world_write.py +++ b/packages/google-cloud-bigtable/samples/beam/hello_world_write.py @@ -23,28 +23,29 @@ class BigtableOptions(PipelineOptions): @classmethod def _add_argparse_args(cls, parser): parser.add_argument( - '--bigtable-project', - help='The Bigtable project ID, this can be different than your ' - 'Dataflow project', - default='bigtable-project') + "--bigtable-project", + help="The Bigtable project ID, this can be different than your " + "Dataflow project", + default="bigtable-project", + ) parser.add_argument( - '--bigtable-instance', - help='The Bigtable instance ID', - default='bigtable-instance') + "--bigtable-instance", + help="The Bigtable instance ID", + default="bigtable-instance", + ) parser.add_argument( - '--bigtable-table', - help='The Bigtable table ID in the instance.', - default='bigtable-table') + "--bigtable-table", + help="The Bigtable table ID in the instance.", + default="bigtable-table", + ) class CreateRowFn(beam.DoFn): def process(self, key): direct_row = row.DirectRow(row_key=key) direct_row.set_cell( - "stats_summary", - b"os_build", - b"android", - datetime.datetime.now()) + "stats_summary", b"os_build", b"android", datetime.datetime.now() + ) return [direct_row] @@ -52,13 +53,14 @@ def run(argv=None): """Build and run the pipeline.""" options = BigtableOptions(argv) with beam.Pipeline(options=options) as p: - p | beam.Create(["phone#4c410523#20190501", - "phone#4c410523#20190502"]) | beam.ParDo( - CreateRowFn()) | WriteToBigTable( + p | beam.Create( + ["phone#4c410523#20190501", "phone#4c410523#20190502"] + ) | beam.ParDo(CreateRowFn()) | WriteToBigTable( project_id=options.bigtable_project, instance_id=options.bigtable_instance, - table_id=options.bigtable_table) + table_id=options.bigtable_table, + ) -if __name__ == '__main__': +if __name__ == "__main__": run() diff --git a/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py b/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py index cdbecc661e3c..4e9a47c7dabc 100644 --- a/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py +++ b/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py @@ -19,9 +19,9 @@ import hello_world_write -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_ID_PREFIX = 'mobile-time-series-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" @pytest.fixture(scope="module", autouse=True) @@ -34,17 +34,20 @@ def table_id(): if table.exists(): table.delete() - table.create(column_families={'stats_summary': None}) + table.create(column_families={"stats_summary": None}) yield table_id table.delete() def test_hello_world_write(table_id): - hello_world_write.run([ - '--bigtable-project=%s' % PROJECT, - '--bigtable-instance=%s' % BIGTABLE_INSTANCE, - '--bigtable-table=%s' % table_id]) + hello_world_write.run( + [ + "--bigtable-project=%s" % PROJECT, + "--bigtable-instance=%s" % BIGTABLE_INSTANCE, + "--bigtable-table=%s" % table_id, + ] + ) client = bigtable.Client(project=PROJECT, admin=True) instance = client.instance(BIGTABLE_INSTANCE) diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index 171bee6570df..d7567dee99c0 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,37 +71,41 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) # todo(kolea2): temporary workaround to install pinned dep version INSTALL_LIBRARY_FROM_SOURCE = False + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -132,18 +143,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -152,13 +179,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -173,19 +211,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -193,7 +231,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -202,6 +240,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -211,7 +254,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile_config.py b/packages/google-cloud-bigtable/samples/beam/noxfile_config.py new file mode 100644 index 000000000000..eb01435a0579 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/beam/noxfile_config.py @@ -0,0 +1,45 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be imported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + "ignored_versions": [ + "2.7", # not supported + "3.10", # Beam wheels not yet released for Python 3.10 + ], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} diff --git a/packages/google-cloud-bigtable/samples/hello/main.py b/packages/google-cloud-bigtable/samples/hello/main.py index 073270847232..7b2b1764a7ab 100644 --- a/packages/google-cloud-bigtable/samples/hello/main.py +++ b/packages/google-cloud-bigtable/samples/hello/main.py @@ -25,12 +25,14 @@ """ import argparse + # [START bigtable_hw_imports] import datetime from google.cloud import bigtable from google.cloud.bigtable import column_family from google.cloud.bigtable import row_filters + # [END bigtable_hw_imports] @@ -43,14 +45,14 @@ def main(project_id, instance_id, table_id): # [END bigtable_hw_connect] # [START bigtable_hw_create_table] - print('Creating the {} table.'.format(table_id)) + print("Creating the {} table.".format(table_id)) table = instance.table(table_id) - print('Creating column family cf1 with Max Version GC rule...') + print("Creating column family cf1 with Max Version GC rule...") # Create a column family with GC policy : most recent N versions # Define the GC policy to retain only the most recent 2 versions max_versions_rule = column_family.MaxVersionsGCRule(2) - column_family_id = 'cf1' + column_family_id = "cf1" column_families = {column_family_id: max_versions_rule} if not table.exists(): table.create(column_families=column_families) @@ -59,10 +61,10 @@ def main(project_id, instance_id, table_id): # [END bigtable_hw_create_table] # [START bigtable_hw_write_rows] - print('Writing some greetings to the table.') - greetings = ['Hello World!', 'Hello Cloud Bigtable!', 'Hello Python!'] + print("Writing some greetings to the table.") + greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] rows = [] - column = 'greeting'.encode() + column = "greeting".encode() for i, value in enumerate(greetings): # Note: This example uses sequential numeric IDs for simplicity, # but this can result in poor performance in a production @@ -74,12 +76,11 @@ def main(project_id, instance_id, table_id): # the best performance, see the documentation: # # https://cloud.google.com/bigtable/docs/schema-design - row_key = 'greeting{}'.format(i).encode() + row_key = "greeting{}".format(i).encode() row = table.direct_row(row_key) - row.set_cell(column_family_id, - column, - value, - timestamp=datetime.datetime.utcnow()) + row.set_cell( + column_family_id, column, value, timestamp=datetime.datetime.utcnow() + ) rows.append(row) table.mutate_rows(rows) # [END bigtable_hw_write_rows] @@ -91,40 +92,40 @@ def main(project_id, instance_id, table_id): # [END bigtable_hw_create_filter] # [START bigtable_hw_get_with_filter] - print('Getting a single greeting by row key.') - key = 'greeting0'.encode() + print("Getting a single greeting by row key.") + key = "greeting0".encode() row = table.read_row(key, row_filter) cell = row.cells[column_family_id][column][0] - print(cell.value.decode('utf-8')) + print(cell.value.decode("utf-8")) # [END bigtable_hw_get_with_filter] # [START bigtable_hw_scan_with_filter] - print('Scanning for all greetings:') + print("Scanning for all greetings:") partial_rows = table.read_rows(filter_=row_filter) for row in partial_rows: cell = row.cells[column_family_id][column][0] - print(cell.value.decode('utf-8')) + print(cell.value.decode("utf-8")) # [END bigtable_hw_scan_with_filter] # [START bigtable_hw_delete_table] - print('Deleting the {} table.'.format(table_id)) + print("Deleting the {} table.".format(table_id)) table.delete() # [END bigtable_hw_delete_table] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('project_id', help='Your Cloud Platform project ID.') + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") parser.add_argument( - 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) parser.add_argument( - '--table', - help='Table to create and destroy.', - default='Hello-Bigtable') + "--table", help="Table to create and destroy.", default="Hello-Bigtable" + ) args = parser.parse_args() main(args.project_id, args.instance_id, args.table) diff --git a/packages/google-cloud-bigtable/samples/hello/main_test.py b/packages/google-cloud-bigtable/samples/hello/main_test.py index 49b8098fcd7e..641b34d11e5f 100644 --- a/packages/google-cloud-bigtable/samples/hello/main_test.py +++ b/packages/google-cloud-bigtable/samples/hello/main_test.py @@ -17,23 +17,22 @@ from main import main -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_NAME_FORMAT = 'hello-world-test-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_NAME_FORMAT = "hello-world-test-{}" TABLE_NAME_RANGE = 10000 def test_main(capsys): - table_name = TABLE_NAME_FORMAT.format( - random.randrange(TABLE_NAME_RANGE)) + table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) main(PROJECT, BIGTABLE_INSTANCE, table_name) out, _ = capsys.readouterr() - assert 'Creating the {} table.'.format(table_name) in out - assert 'Writing some greetings to the table.' in out - assert 'Getting a single greeting by row key.' in out - assert 'Hello World!' in out - assert 'Scanning for all greetings' in out - assert 'Hello Cloud Bigtable!' in out - assert 'Deleting the {} table.'.format(table_name) in out + assert "Creating the {} table.".format(table_name) in out + assert "Writing some greetings to the table." in out + assert "Getting a single greeting by row key." in out + assert "Hello World!" in out + assert "Scanning for all greetings" in out + assert "Hello Cloud Bigtable!" in out + assert "Deleting the {} table.".format(table_name) in out diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/main.py b/packages/google-cloud-bigtable/samples/hello_happybase/main.py index ade4acbf0d84..7999fd0064e4 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/main.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/main.py @@ -29,6 +29,7 @@ # [START bigtable_hw_imports_happybase] from google.cloud import bigtable from google.cloud import happybase + # [END bigtable_hw_imports_happybase] @@ -43,23 +44,21 @@ def main(project_id, instance_id, table_name): try: # [START bigtable_hw_create_table_happybase] - print('Creating the {} table.'.format(table_name)) - column_family_name = 'cf1' + print("Creating the {} table.".format(table_name)) + column_family_name = "cf1" connection.create_table( - table_name, - { - column_family_name: dict() # Use default options. - }) + table_name, {column_family_name: dict()} # Use default options. + ) # [END bigtable_hw_create_table_happybase] # [START bigtable_hw_write_rows_happybase] - print('Writing some greetings to the table.') + print("Writing some greetings to the table.") table = connection.table(table_name) - column_name = '{fam}:greeting'.format(fam=column_family_name) + column_name = "{fam}:greeting".format(fam=column_family_name) greetings = [ - 'Hello World!', - 'Hello Cloud Bigtable!', - 'Hello HappyBase!', + "Hello World!", + "Hello Cloud Bigtable!", + "Hello HappyBase!", ] for i, value in enumerate(greetings): @@ -73,28 +72,26 @@ def main(project_id, instance_id, table_name): # the best performance, see the documentation: # # https://cloud.google.com/bigtable/docs/schema-design - row_key = 'greeting{}'.format(i) - table.put( - row_key, {column_name.encode('utf-8'): value.encode('utf-8')} - ) + row_key = "greeting{}".format(i) + table.put(row_key, {column_name.encode("utf-8"): value.encode("utf-8")}) # [END bigtable_hw_write_rows_happybase] # [START bigtable_hw_get_by_key_happybase] - print('Getting a single greeting by row key.') - key = 'greeting0'.encode('utf-8') + print("Getting a single greeting by row key.") + key = "greeting0".encode("utf-8") row = table.row(key) - print('\t{}: {}'.format(key, row[column_name.encode('utf-8')])) + print("\t{}: {}".format(key, row[column_name.encode("utf-8")])) # [END bigtable_hw_get_by_key_happybase] # [START bigtable_hw_scan_all_happybase] - print('Scanning for all greetings:') + print("Scanning for all greetings:") for key, row in table.scan(): - print('\t{}: {}'.format(key, row[column_name.encode('utf-8')])) + print("\t{}: {}".format(key, row[column_name.encode("utf-8")])) # [END bigtable_hw_scan_all_happybase] # [START bigtable_hw_delete_table_happybase] - print('Deleting the {} table.'.format(table_name)) + print("Deleting the {} table.".format(table_name)) connection.delete_table(table_name) # [END bigtable_hw_delete_table_happybase] @@ -102,17 +99,17 @@ def main(project_id, instance_id, table_name): connection.close() -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('project_id', help='Your Cloud Platform project ID.') + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") parser.add_argument( - 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) parser.add_argument( - '--table', - help='Table to create and destroy.', - default='Hello-Bigtable') + "--table", help="Table to create and destroy.", default="Hello-Bigtable" + ) args = parser.parse_args() main(args.project_id, args.instance_id, args.table) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py index f72fc0b2e52b..6a63750da95b 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py @@ -17,25 +17,21 @@ from main import main -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_NAME_FORMAT = 'hello-world-hb-test-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_NAME_FORMAT = "hello-world-hb-test-{}" TABLE_NAME_RANGE = 10000 def test_main(capsys): - table_name = TABLE_NAME_FORMAT.format( - random.randrange(TABLE_NAME_RANGE)) - main( - PROJECT, - BIGTABLE_INSTANCE, - table_name) + table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) + main(PROJECT, BIGTABLE_INSTANCE, table_name) out, _ = capsys.readouterr() - assert 'Creating the {} table.'.format(table_name) in out - assert 'Writing some greetings to the table.' in out - assert 'Getting a single greeting by row key.' in out - assert 'Hello World!' in out - assert 'Scanning for all greetings' in out - assert 'Hello Cloud Bigtable!' in out - assert 'Deleting the {} table.'.format(table_name) in out + assert "Creating the {} table.".format(table_name) in out + assert "Writing some greetings to the table." in out + assert "Getting a single greeting by row key." in out + assert "Hello World!" in out + assert "Scanning for all greetings" in out + assert "Hello Cloud Bigtable!" in out + assert "Deleting the {} table.".format(table_name) in out diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py index 43b430859a69..d29e40a398c9 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -25,9 +25,9 @@ from google.cloud.bigtable import enums from google.cloud.monitoring_v3 import query -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] -logger = logging.getLogger('bigtable.metricscaler') +logger = logging.getLogger("bigtable.metricscaler") logger.addHandler(logging.StreamHandler()) logger.setLevel(logging.INFO) @@ -40,12 +40,15 @@ def get_cpu_load(bigtable_instance, bigtable_cluster): """ # [START bigtable_cpu] client = monitoring_v3.MetricServiceClient() - cpu_query = query.Query(client, - project=PROJECT, - metric_type='bigtable.googleapis.com/' - 'cluster/cpu_load', - minutes=5) - cpu_query = cpu_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster) + cpu_query = query.Query( + client, + project=PROJECT, + metric_type="bigtable.googleapis.com/" "cluster/cpu_load", + minutes=5, + ) + cpu_query = cpu_query.select_resources( + instance=bigtable_instance, cluster=bigtable_cluster + ) cpu = next(cpu_query.iter()) return cpu.points[0].value.double_value # [END bigtable_cpu] @@ -59,12 +62,15 @@ def get_storage_utilization(bigtable_instance, bigtable_cluster): """ # [START bigtable_metric_scaler_storage_utilization] client = monitoring_v3.MetricServiceClient() - utilization_query = query.Query(client, - project=PROJECT, - metric_type='bigtable.googleapis.com/' - 'cluster/storage_utilization', - minutes=5) - utilization_query = utilization_query.select_resources(instance=bigtable_instance, cluster=bigtable_cluster) + utilization_query = query.Query( + client, + project=PROJECT, + metric_type="bigtable.googleapis.com/" "cluster/storage_utilization", + minutes=5, + ) + utilization_query = utilization_query.select_resources( + instance=bigtable_instance, cluster=bigtable_cluster + ) utilization = next(utilization_query.iter()) return utilization.points[0].value.double_value # [END bigtable_metric_scaler_storage_utilization] @@ -114,20 +120,24 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): if scale_up: if current_node_count < max_node_count: - new_node_count = min( - current_node_count + size_change_step, max_node_count) + new_node_count = min(current_node_count + size_change_step, max_node_count) cluster.serve_nodes = new_node_count cluster.update() - logger.info('Scaled up from {} to {} nodes.'.format( - current_node_count, new_node_count)) + logger.info( + "Scaled up from {} to {} nodes.".format( + current_node_count, new_node_count + ) + ) else: if current_node_count > min_node_count: - new_node_count = max( - current_node_count - size_change_step, min_node_count) + new_node_count = max(current_node_count - size_change_step, min_node_count) cluster.serve_nodes = new_node_count cluster.update() - logger.info('Scaled down from {} to {} nodes.'.format( - current_node_count, new_node_count)) + logger.info( + "Scaled down from {} to {} nodes.".format( + current_node_count, new_node_count + ) + ) # [END bigtable_scale] @@ -138,7 +148,7 @@ def main( low_cpu_threshold, high_storage_threshold, short_sleep, - long_sleep + long_sleep, ): """Main loop runner that autoscales Cloud Bigtable. @@ -154,8 +164,8 @@ def main( """ cluster_cpu = get_cpu_load(bigtable_instance, bigtable_cluster) cluster_storage = get_storage_utilization(bigtable_instance, bigtable_cluster) - logger.info('Detected cpu of {}'.format(cluster_cpu)) - logger.info('Detected storage utilization of {}'.format(cluster_storage)) + logger.info("Detected cpu of {}".format(cluster_cpu)) + logger.info("Detected storage utilization of {}".format(cluster_storage)) try: if cluster_cpu > high_cpu_threshold or cluster_storage > high_storage_threshold: scale_bigtable(bigtable_instance, bigtable_cluster, True) @@ -165,44 +175,50 @@ def main( scale_bigtable(bigtable_instance, bigtable_cluster, False) time.sleep(long_sleep) else: - logger.info('CPU within threshold, sleeping.') + logger.info("CPU within threshold, sleeping.") time.sleep(short_sleep) except Exception as e: logger.error("Error during scaling: %s", e) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description='Scales Cloud Bigtable clusters based on CPU usage.') + description="Scales Cloud Bigtable clusters based on CPU usage." + ) parser.add_argument( - 'bigtable_instance', - help='ID of the Cloud Bigtable instance to connect to.') + "bigtable_instance", help="ID of the Cloud Bigtable instance to connect to." + ) parser.add_argument( - 'bigtable_cluster', - help='ID of the Cloud Bigtable cluster to connect to.') + "bigtable_cluster", help="ID of the Cloud Bigtable cluster to connect to." + ) parser.add_argument( - '--high_cpu_threshold', - help='If Cloud Bigtable CPU usage is above this threshold, scale up', - default=0.6) + "--high_cpu_threshold", + help="If Cloud Bigtable CPU usage is above this threshold, scale up", + default=0.6, + ) parser.add_argument( - '--low_cpu_threshold', - help='If Cloud Bigtable CPU usage is below this threshold, scale down', - default=0.2) + "--low_cpu_threshold", + help="If Cloud Bigtable CPU usage is below this threshold, scale down", + default=0.2, + ) parser.add_argument( - '--high_storage_threshold', - help='If Cloud Bigtable storage utilization is above this threshold, ' - 'scale up', - default=0.6) + "--high_storage_threshold", + help="If Cloud Bigtable storage utilization is above this threshold, " + "scale up", + default=0.6, + ) parser.add_argument( - '--short_sleep', - help='How long to sleep in seconds between checking metrics after no ' - 'scale operation', - default=60) + "--short_sleep", + help="How long to sleep in seconds between checking metrics after no " + "scale operation", + default=60, + ) parser.add_argument( - '--long_sleep', - help='How long to sleep in seconds between checking metrics after a ' - 'scaling operation', - default=60 * 10) + "--long_sleep", + help="How long to sleep in seconds between checking metrics after a " + "scaling operation", + default=60 * 10, + ) args = parser.parse_args() while True: @@ -213,4 +229,5 @@ def main( float(args.low_cpu_threshold), float(args.high_storage_threshold), int(args.short_sleep), - int(args.long_sleep)) + int(args.long_sleep), + ) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 13d46332536f..4420605ecee1 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -31,10 +31,10 @@ from metricscaler import scale_bigtable -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_ZONE = os.environ['BIGTABLE_ZONE'] +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_ZONE = os.environ["BIGTABLE_ZONE"] SIZE_CHANGE_STEP = 3 -INSTANCE_ID_FORMAT = 'metric-scale-test-{}' +INSTANCE_ID_FORMAT = "metric-scale-test-{}" BIGTABLE_INSTANCE = INSTANCE_ID_FORMAT.format(str(uuid.uuid4())[:10]) BIGTABLE_DEV_INSTANCE = INSTANCE_ID_FORMAT.format(str(uuid.uuid4())[:10]) @@ -42,14 +42,14 @@ # System tests to verify API calls succeed -@patch('metricscaler.query') +@patch("metricscaler.query") def test_get_cpu_load(monitoring_v3_query): iter_mock = monitoring_v3_query.Query().select_resources().iter iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])]) assert float(get_cpu_load(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE)) > 0.0 -@patch('metricscaler.query') +@patch("metricscaler.query") def test_get_storage_utilization(monitoring_v3_query): iter_mock = monitoring_v3_query.Query().select_resources().iter iter_mock.return_value = iter([Mock(points=[Mock(value=Mock(double_value=1.0))])]) @@ -65,14 +65,18 @@ def instance(): serve_nodes = 1 storage_type = enums.StorageType.SSD production = enums.Instance.Type.PRODUCTION - labels = {'prod-label': 'prod-label'} - instance = client.instance(BIGTABLE_INSTANCE, instance_type=production, - labels=labels) + labels = {"prod-label": "prod-label"} + instance = client.instance( + BIGTABLE_INSTANCE, instance_type=production, labels=labels + ) if not instance.exists(): - cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE, - serve_nodes=serve_nodes, - default_storage_type=storage_type) + cluster = instance.cluster( + cluster_id, + location_id=BIGTABLE_ZONE, + serve_nodes=serve_nodes, + default_storage_type=storage_type, + ) instance.create(clusters=[cluster]) # Eventual consistency check @@ -92,14 +96,15 @@ def dev_instance(): storage_type = enums.StorageType.SSD development = enums.Instance.Type.DEVELOPMENT - labels = {'dev-label': 'dev-label'} - instance = client.instance(BIGTABLE_DEV_INSTANCE, - instance_type=development, - labels=labels) + labels = {"dev-label": "dev-label"} + instance = client.instance( + BIGTABLE_DEV_INSTANCE, instance_type=development, labels=labels + ) if not instance.exists(): - cluster = instance.cluster(cluster_id, location_id=BIGTABLE_ZONE, - default_storage_type=storage_type) + cluster = instance.cluster( + cluster_id, location_id=BIGTABLE_ZONE, default_storage_type=storage_type + ) instance.create(clusters=[cluster]) # Eventual consistency check @@ -117,9 +122,7 @@ def __init__(self, expected_node_count): def __call__(self, cluster): expected = self.expected_node_count - print( - f"Expected node count: {expected}; found: {cluster.serve_nodes}" - ) + print(f"Expected node count: {expected}; found: {cluster.serve_nodes}") return cluster.serve_nodes == expected @@ -146,7 +149,8 @@ def test_scale_bigtable(instance): ) scaled_node_count_predicate.__name__ = "scaled_node_count_predicate" _scaled_node_count = RetryInstanceState( - instance_predicate=scaled_node_count_predicate, max_tries=10, + instance_predicate=scaled_node_count_predicate, + max_tries=10, ) _scaled_node_count(cluster.reload)() @@ -155,7 +159,8 @@ def test_scale_bigtable(instance): restored_node_count_predicate = ClusterNodeCountPredicate(original_node_count) restored_node_count_predicate.__name__ = "restored_node_count_predicate" _restored_node_count = RetryInstanceState( - instance_predicate=restored_node_count_predicate, max_tries=10, + instance_predicate=restored_node_count_predicate, + max_tries=10, ) _restored_node_count(cluster.reload)() @@ -165,10 +170,10 @@ def test_handle_dev_instance(capsys, dev_instance): scale_bigtable(BIGTABLE_DEV_INSTANCE, BIGTABLE_DEV_INSTANCE, True) -@patch('time.sleep') -@patch('metricscaler.get_storage_utilization') -@patch('metricscaler.get_cpu_load') -@patch('metricscaler.scale_bigtable') +@patch("time.sleep") +@patch("metricscaler.get_storage_utilization") +@patch("metricscaler.get_cpu_load") +@patch("metricscaler.scale_bigtable") def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep): SHORT_SLEEP = 5 LONG_SLEEP = 10 @@ -177,57 +182,46 @@ def test_main(scale_bigtable, get_cpu_load, get_storage_utilization, sleep): get_cpu_load.return_value = 0.5 get_storage_utilization.return_value = 0.5 - main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, - LONG_SLEEP) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP) scale_bigtable.assert_not_called() scale_bigtable.reset_mock() # Test high CPU, okay storage utilization get_cpu_load.return_value = 0.7 get_storage_utilization.return_value = 0.5 - main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, - LONG_SLEEP) - scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, - BIGTABLE_INSTANCE, True) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) scale_bigtable.reset_mock() # Test low CPU, okay storage utilization get_storage_utilization.return_value = 0.5 get_cpu_load.return_value = 0.2 - main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, - LONG_SLEEP) - scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, - BIGTABLE_INSTANCE, False) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False) scale_bigtable.reset_mock() # Test okay CPU, high storage utilization get_cpu_load.return_value = 0.5 get_storage_utilization.return_value = 0.7 - main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, - LONG_SLEEP) - scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, - BIGTABLE_INSTANCE, True) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) scale_bigtable.reset_mock() # Test high CPU, high storage utilization get_cpu_load.return_value = 0.7 get_storage_utilization.return_value = 0.7 - main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, - LONG_SLEEP) - scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, - BIGTABLE_INSTANCE, True) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) scale_bigtable.reset_mock() # Test low CPU, high storage utilization get_cpu_load.return_value = 0.2 get_storage_utilization.return_value = 0.7 - main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, - LONG_SLEEP) - scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, - BIGTABLE_INSTANCE, True) + main(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, 0.6, 0.3, 0.6, SHORT_SLEEP, LONG_SLEEP) + scale_bigtable.assert_called_once_with(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, True) scale_bigtable.reset_mock() -if __name__ == '__main__': +if __name__ == "__main__": test_get_cpu_load() diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/quickstart/main.py b/packages/google-cloud-bigtable/samples/quickstart/main.py index 3763296f1e4c..50bfe639426c 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/main.py +++ b/packages/google-cloud-bigtable/samples/quickstart/main.py @@ -20,8 +20,7 @@ from google.cloud import bigtable -def main(project_id="project-id", instance_id="instance-id", - table_id="my-table"): +def main(project_id="project-id", instance_id="instance-id", table_id="my-table"): # Create a Cloud Bigtable client. client = bigtable.Client(project=project_id) @@ -31,27 +30,27 @@ def main(project_id="project-id", instance_id="instance-id", # Open an existing table. table = instance.table(table_id) - row_key = 'r1' - row = table.read_row(row_key.encode('utf-8')) + row_key = "r1" + row = table.read_row(row_key.encode("utf-8")) - column_family_id = 'cf1' - column_id = 'c1'.encode('utf-8') - value = row.cells[column_family_id][column_id][0].value.decode('utf-8') + column_family_id = "cf1" + column_id = "c1".encode("utf-8") + value = row.cells[column_family_id][column_id][0].value.decode("utf-8") - print('Row key: {}\nData: {}'.format(row_key, value)) + print("Row key: {}\nData: {}".format(row_key, value)) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('project_id', help='Your Cloud Platform project ID.') + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") parser.add_argument( - 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) parser.add_argument( - '--table', - help='Existing table used in the quickstart.', - default='my-table') + "--table", help="Existing table used in the quickstart.", default="my-table" + ) args = parser.parse_args() main(args.project_id, args.instance_id, args.table) diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_test.py index ea1e8776ba7a..46d578b6b93c 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart/main_test.py @@ -21,9 +21,9 @@ from main import main -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_ID_FORMAT = 'quickstart-test-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_FORMAT = "quickstart-test-{}" @pytest.fixture() @@ -32,7 +32,7 @@ def table(): client = bigtable.Client(project=PROJECT, admin=True) instance = client.instance(BIGTABLE_INSTANCE) table = instance.table(table_id) - column_family_id = 'cf1' + column_family_id = "cf1" column_families = {column_family_id: None} table.create(column_families=column_families) @@ -50,4 +50,4 @@ def test_main(capsys, table): main(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - assert 'Row key: r1\nData: test-value\n' in out + assert "Row key: r1\nData: test-value\n" in out diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/main.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/main.py index 056e3666bb5b..6a05c4cbd46b 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/main.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/main.py @@ -20,8 +20,7 @@ from google.cloud import happybase -def main(project_id="project-id", instance_id="instance-id", - table_id="my-table"): +def main(project_id="project-id", instance_id="instance-id", table_id="my-table"): # Creates a Bigtable client client = bigtable.Client(project=project_id) @@ -34,28 +33,28 @@ def main(project_id="project-id", instance_id="instance-id", # Connect to an existing table:my-table table = connection.table(table_id) - key = 'r1' - row = table.row(key.encode('utf-8')) + key = "r1" + row = table.row(key.encode("utf-8")) - column = 'cf1:c1'.encode('utf-8') - value = row[column].decode('utf-8') - print('Row key: {}\nData: {}'.format(key, value)) + column = "cf1:c1".encode("utf-8") + value = row[column].decode("utf-8") + print("Row key: {}\nData: {}".format(key, value)) finally: connection.close() -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('project_id', help='Your Cloud Platform project ID.') + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") parser.add_argument( - 'instance_id', help='ID of the Cloud Bigtable instance to connect to.') + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) parser.add_argument( - '--table', - help='Existing table used in the quickstart.', - default='my-table') + "--table", help="Existing table used in the quickstart.", default="my-table" + ) args = parser.parse_args() main(args.project_id, args.instance_id, args.table) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py index 26afa6d6bfe1..dc62ebede8dd 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py @@ -21,9 +21,9 @@ from main import main -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_ID_FORMAT = 'quickstart-hb-test-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_FORMAT = "quickstart-hb-test-{}" @pytest.fixture() @@ -32,7 +32,7 @@ def table(): client = bigtable.Client(project=PROJECT, admin=True) instance = client.instance(BIGTABLE_INSTANCE) table = instance.table(table_id) - column_family_id = 'cf1' + column_family_id = "cf1" column_families = {column_family_id: None} table.create(column_families=column_families) @@ -50,4 +50,4 @@ def test_main(capsys, table): main(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - assert 'Row key: r1\nData: test-value\n' in out + assert "Row key: r1\nData: test-value\n" in out diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py index c815eae99b8e..4211378f3deb 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py @@ -29,7 +29,7 @@ def filter_limit_row_sample(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - rows = table.read_rows(filter_=row_filters.RowSampleFilter(.75)) + rows = table.read_rows(filter_=row_filters.RowSampleFilter(0.75)) for row in rows: print_row(row) @@ -42,7 +42,8 @@ def filter_limit_row_regex(project_id, instance_id, table_id): table = instance.table(table_id) rows = table.read_rows( - filter_=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8"))) + filter_=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8")) + ) for row in rows: print_row(row) @@ -91,7 +92,8 @@ def filter_limit_col_family_regex(project_id, instance_id, table_id): table = instance.table(table_id) rows = table.read_rows( - filter_=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8"))) + filter_=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")) + ) for row in rows: print_row(row) @@ -104,8 +106,8 @@ def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): table = instance.table(table_id) rows = table.read_rows( - filter_=row_filters.ColumnQualifierRegexFilter( - "connected_.*$".encode("utf-8"))) + filter_=row_filters.ColumnQualifierRegexFilter("connected_.*$".encode("utf-8")) + ) for row in rows: print_row(row) @@ -118,10 +120,10 @@ def filter_limit_col_range(project_id, instance_id, table_id): table = instance.table(table_id) rows = table.read_rows( - filter_=row_filters.ColumnRangeFilter("cell_plan", - b"data_plan_01gb", - b"data_plan_10gb", - inclusive_end=False)) + filter_=row_filters.ColumnRangeFilter( + "cell_plan", b"data_plan_01gb", b"data_plan_10gb", inclusive_end=False + ) + ) for row in rows: print_row(row) @@ -134,7 +136,8 @@ def filter_limit_value_range(project_id, instance_id, table_id): table = instance.table(table_id) rows = table.read_rows( - filter_=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406")) + filter_=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406") + ) for row in rows: print_row(row) @@ -150,7 +153,8 @@ def filter_limit_value_regex(project_id, instance_id, table_id): table = instance.table(table_id) rows = table.read_rows( - filter_=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8"))) + filter_=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")) + ) for row in rows: print_row(row) @@ -165,8 +169,8 @@ def filter_limit_timestamp_range(project_id, instance_id, table_id): end = datetime.datetime(2019, 5, 1) rows = table.read_rows( - filter_=row_filters.TimestampRangeFilter( - row_filters.TimestampRange(end=end))) + filter_=row_filters.TimestampRangeFilter(row_filters.TimestampRange(end=end)) + ) for row in rows: print_row(row) @@ -202,8 +206,7 @@ def filter_modify_strip_value(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - rows = table.read_rows( - filter_=row_filters.StripValueTransformerFilter(True)) + rows = table.read_rows(filter_=row_filters.StripValueTransformerFilter(True)) for row in rows: print_row(row) @@ -215,8 +218,7 @@ def filter_modify_apply_label(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - rows = table.read_rows( - filter_=row_filters.ApplyLabelFilter(label="labelled")) + rows = table.read_rows(filter_=row_filters.ApplyLabelFilter(label="labelled")) for row in rows: print_row(row) @@ -228,9 +230,14 @@ def filter_composing_chain(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - rows = table.read_rows(filter_=row_filters.RowFilterChain( - filters=[row_filters.CellsColumnLimitFilter(1), - row_filters.FamilyNameRegexFilter("cell_plan")])) + rows = table.read_rows( + filter_=row_filters.RowFilterChain( + filters=[ + row_filters.CellsColumnLimitFilter(1), + row_filters.FamilyNameRegexFilter("cell_plan"), + ] + ) + ) for row in rows: print_row(row) @@ -242,9 +249,14 @@ def filter_composing_interleave(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - rows = table.read_rows(filter_=row_filters.RowFilterUnion( - filters=[row_filters.ValueRegexFilter("true"), - row_filters.ColumnQualifierRegexFilter("os_build")])) + rows = table.read_rows( + filter_=row_filters.RowFilterUnion( + filters=[ + row_filters.ValueRegexFilter("true"), + row_filters.ColumnQualifierRegexFilter("os_build"), + ] + ) + ) for row in rows: print_row(row) @@ -256,16 +268,18 @@ def filter_composing_condition(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - rows = table.read_rows(filter_=row_filters.ConditionalRowFilter( - base_filter=row_filters.RowFilterChain(filters=[ - row_filters.ColumnQualifierRegexFilter( - "data_plan_10gb"), - row_filters.ValueRegexFilter( - "true")]), - true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), - false_filter=row_filters.ApplyLabelFilter(label="filtered-out") - - )) + rows = table.read_rows( + filter_=row_filters.ConditionalRowFilter( + base_filter=row_filters.RowFilterChain( + filters=[ + row_filters.ColumnQualifierRegexFilter("data_plan_10gb"), + row_filters.ValueRegexFilter("true"), + ] + ), + true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), + false_filter=row_filters.ApplyLabelFilter(label="filtered-out"), + ) + ) for row in rows: print_row(row) @@ -275,16 +289,23 @@ def filter_composing_condition(project_id, instance_id, table_id): def print_row(row): - print("Reading data for {}:".format(row.row_key.decode('utf-8'))) + print("Reading data for {}:".format(row.row_key.decode("utf-8"))) for cf, cols in sorted(row.cells.items()): print("Column Family {}".format(cf)) for col, cells in sorted(cols.items()): for cell in cells: - labels = " [{}]".format(",".join(cell.labels)) \ - if len(cell.labels) else "" + labels = ( + " [{}]".format(",".join(cell.labels)) if len(cell.labels) else "" + ) print( - "\t{}: {} @{}{}".format(col.decode('utf-8'), - cell.value.decode('utf-8'), - cell.timestamp, labels)) + "\t{}: {} @{}{}".format( + col.decode("utf-8"), + cell.value.decode("utf-8"), + cell.timestamp, + labels, + ) + ) print("") + + # [END bigtable_filters_print] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py index 36dc4a5b1bba..35cf62ff0eaa 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py @@ -23,9 +23,9 @@ import filter_snippets -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_ID_PREFIX = 'mobile-time-series-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" @pytest.fixture(scope="module", autouse=True) @@ -40,11 +40,10 @@ def table_id(): if table.exists(): table.delete() - table.create(column_families={'stats_summary': None, 'cell_plan': None}) + table.create(column_families={"stats_summary": None, "cell_plan": None}) timestamp = datetime.datetime(2019, 5, 1) - timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta( - hours=1) + timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) row_keys = [ "phone#4c410523#20190501", @@ -99,98 +98,88 @@ def table_id(): def test_filter_limit_row_sample(capsys, snapshot, table_id): - filter_snippets.filter_limit_row_sample(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_row_sample(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - assert 'Reading data for' in out + assert "Reading data for" in out def test_filter_limit_row_regex(capsys, snapshot, table_id): - filter_snippets.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_cells_per_col(capsys, snapshot, table_id): - filter_snippets.filter_limit_cells_per_col(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_cells_per_col(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_cells_per_row(capsys, snapshot, table_id): - filter_snippets.filter_limit_cells_per_row(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_cells_per_row(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_cells_per_row_offset(capsys, snapshot, table_id): - filter_snippets.filter_limit_cells_per_row_offset(PROJECT, - BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_cells_per_row_offset( + PROJECT, BIGTABLE_INSTANCE, table_id + ) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_col_family_regex(capsys, snapshot, table_id): - filter_snippets.filter_limit_col_family_regex(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_col_family_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_col_qualifier_regex(capsys, snapshot, table_id): - filter_snippets.filter_limit_col_qualifier_regex(PROJECT, - BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_col_qualifier_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_col_range(capsys, snapshot, table_id): - filter_snippets.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_value_range(capsys, snapshot, table_id): - filter_snippets.filter_limit_value_range(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_value_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_value_regex(capsys, snapshot, table_id): - filter_snippets.filter_limit_value_regex(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_value_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_timestamp_range(capsys, snapshot, table_id): - filter_snippets.filter_limit_timestamp_range(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_timestamp_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_limit_block_all(capsys, snapshot, table_id): - filter_snippets.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) @@ -204,40 +193,35 @@ def test_filter_limit_pass_all(capsys, snapshot, table_id): def test_filter_modify_strip_value(capsys, snapshot, table_id): - filter_snippets.filter_modify_strip_value(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_modify_strip_value(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_modify_apply_label(capsys, snapshot, table_id): - filter_snippets.filter_modify_apply_label(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_modify_apply_label(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_composing_chain(capsys, snapshot, table_id): - filter_snippets.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_composing_interleave(capsys, snapshot, table_id): - filter_snippets.filter_composing_interleave(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_composing_interleave(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) def test_filter_composing_condition(capsys, snapshot, table_id): - filter_snippets.filter_composing_condition(PROJECT, BIGTABLE_INSTANCE, - table_id) + filter_snippets.filter_composing_condition(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() snapshot.assert_match(out) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py index 6936b4c64c8b..afd0955b8bbf 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py @@ -43,7 +43,7 @@ def read_row_partial(project_id, instance_id, table_id): table = instance.table(table_id) row_key = "phone#4c410523#20190501" - col_filter = row_filters.ColumnQualifierRegexFilter(b'os_build') + col_filter = row_filters.ColumnQualifierRegexFilter(b"os_build") row = table.read_row(row_key, filter_=col_filter) print_row(row) @@ -74,8 +74,8 @@ def read_row_range(project_id, instance_id, table_id): row_set = RowSet() row_set.add_row_range_from_keys( - start_key=b"phone#4c410523#20190501", - end_key=b"phone#4c410523#201906201") + start_key=b"phone#4c410523#20190501", end_key=b"phone#4c410523#201906201" + ) rows = table.read_rows(row_set=row_set) for row in rows: @@ -91,11 +91,11 @@ def read_row_ranges(project_id, instance_id, table_id): row_set = RowSet() row_set.add_row_range_from_keys( - start_key=b"phone#4c410523#20190501", - end_key=b"phone#4c410523#201906201") + start_key=b"phone#4c410523#20190501", end_key=b"phone#4c410523#201906201" + ) row_set.add_row_range_from_keys( - start_key=b"phone#5c10102#20190501", - end_key=b"phone#5c10102#201906201") + start_key=b"phone#5c10102#20190501", end_key=b"phone#5c10102#201906201" + ) rows = table.read_rows(row_set=row_set) for row in rows: @@ -112,8 +112,7 @@ def read_prefix(project_id, instance_id, table_id): end_key = prefix[:-1] + chr(ord(prefix[-1]) + 1) row_set = RowSet() - row_set.add_row_range_from_keys(prefix.encode("utf-8"), - end_key.encode("utf-8")) + row_set.add_row_range_from_keys(prefix.encode("utf-8"), end_key.encode("utf-8")) rows = table.read_rows(row_set=row_set) for row in rows: @@ -137,16 +136,23 @@ def read_filter(project_id, instance_id, table_id): def print_row(row): - print("Reading data for {}:".format(row.row_key.decode('utf-8'))) + print("Reading data for {}:".format(row.row_key.decode("utf-8"))) for cf, cols in sorted(row.cells.items()): print("Column Family {}".format(cf)) for col, cells in sorted(cols.items()): for cell in cells: - labels = " [{}]".format(",".join(cell.labels)) \ - if len(cell.labels) else "" + labels = ( + " [{}]".format(",".join(cell.labels)) if len(cell.labels) else "" + ) print( - "\t{}: {} @{}{}".format(col.decode('utf-8'), - cell.value.decode('utf-8'), - cell.timestamp, labels)) + "\t{}: {} @{}{}".format( + col.decode("utf-8"), + cell.value.decode("utf-8"), + cell.timestamp, + labels, + ) + ) print("") + + # [END bigtable_reads_print] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py index fc3421000229..0b61e341f7ed 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py @@ -21,9 +21,9 @@ import read_snippets -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_ID_PREFIX = 'mobile-time-series-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" @pytest.fixture(scope="module", autouse=True) @@ -36,7 +36,7 @@ def table_id(): if table.exists(): table.delete() - table.create(column_families={'stats_summary': None}) + table.create(column_families={"stats_summary": None}) # table = instance.table(table_id) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py index ecc8f273b0a6..fd51172420b2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py @@ -26,30 +26,22 @@ def write_batch(project_id, instance_id, table_id): timestamp = datetime.datetime.utcnow() column_family_id = "stats_summary" - rows = [table.direct_row("tablet#a0b81f74#20190501"), - table.direct_row("tablet#a0b81f74#20190502")] - - rows[0].set_cell(column_family_id, - "connected_wifi", - 1, - timestamp) - rows[0].set_cell(column_family_id, - "os_build", - "12155.0.0-rc1", - timestamp) - rows[1].set_cell(column_family_id, - "connected_wifi", - 1, - timestamp) - rows[1].set_cell(column_family_id, - "os_build", - "12145.0.0-rc6", - timestamp) + rows = [ + table.direct_row("tablet#a0b81f74#20190501"), + table.direct_row("tablet#a0b81f74#20190502"), + ] + + rows[0].set_cell(column_family_id, "connected_wifi", 1, timestamp) + rows[0].set_cell(column_family_id, "os_build", "12155.0.0-rc1", timestamp) + rows[1].set_cell(column_family_id, "connected_wifi", 1, timestamp) + rows[1].set_cell(column_family_id, "os_build", "12145.0.0-rc6", timestamp) response = table.mutate_rows(rows) for i, status in enumerate(response): if status.code != 0: print("Error writing row: {}".format(status.message)) - print('Successfully wrote 2 rows.') + print("Successfully wrote 2 rows.") + + # [END bigtable_writes_batch] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py index 5f3d4d607dc8..7fb640aad631 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_conditionally.py @@ -30,15 +30,17 @@ def write_conditional(project_id, instance_id, table_id): row_key = "phone#4c410523#20190501" row_filter = row_filters.RowFilterChain( - filters=[row_filters.FamilyNameRegexFilter(column_family_id), - row_filters.ColumnQualifierRegexFilter('os_build'), - row_filters.ValueRegexFilter("PQ2A\\..*")]) + filters=[ + row_filters.FamilyNameRegexFilter(column_family_id), + row_filters.ColumnQualifierRegexFilter("os_build"), + row_filters.ValueRegexFilter("PQ2A\\..*"), + ] + ) row = table.conditional_row(row_key, filter_=row_filter) - row.set_cell(column_family_id, - "os_name", - "android", - timestamp) + row.set_cell(column_family_id, "os_name", "android", timestamp) row.commit() - print('Successfully updated row\'s os_name.') + print("Successfully updated row's os_name.") + + # [END bigtable_writes_conditional] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py index 73ce52c2f6d2..ac8e2d16af34 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_increment.py @@ -30,5 +30,7 @@ def write_increment(project_id, instance_id, table_id): row.increment_cell_value(column_family_id, "connected_wifi", -1) row.commit() - print('Successfully updated row {}.'.format(row_key)) + print("Successfully updated row {}.".format(row_key)) + + # [END bigtable_writes_increment] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py index b4222d234798..1aa5a810f1b1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_simple.py @@ -30,20 +30,13 @@ def write_simple(project_id, instance_id, table_id): row_key = "phone#4c410523#20190501" row = table.direct_row(row_key) - row.set_cell(column_family_id, - "connected_cell", - 1, - timestamp) - row.set_cell(column_family_id, - "connected_wifi", - 1, - timestamp) - row.set_cell(column_family_id, - "os_build", - "PQ2A.190405.003", - timestamp) + row.set_cell(column_family_id, "connected_cell", 1, timestamp) + row.set_cell(column_family_id, "connected_wifi", 1, timestamp) + row.set_cell(column_family_id, "os_build", "PQ2A.190405.003", timestamp) row.commit() - print('Successfully wrote row {}.'.format(row_key)) + print("Successfully wrote row {}.".format(row_key)) + + # [END bigtable_writes_simple] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py index abe3000959ec..77ae883d609e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py @@ -26,9 +26,9 @@ from .write_simple import write_simple -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_ID_PREFIX = 'mobile-time-series-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" @pytest.fixture @@ -48,7 +48,7 @@ def table_id(bigtable_instance): if table.exists(): table.delete() - column_family_id = 'stats_summary' + column_family_id = "stats_summary" column_families = {column_family_id: None} table.create(column_families=column_families) @@ -67,7 +67,7 @@ def _write_simple(): _write_simple() out, _ = capsys.readouterr() - assert 'Successfully wrote row' in out + assert "Successfully wrote row" in out @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60) def _write_increment(): @@ -75,7 +75,7 @@ def _write_increment(): _write_increment() out, _ = capsys.readouterr() - assert 'Successfully updated row' in out + assert "Successfully updated row" in out @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60) def _write_conditional(): @@ -83,7 +83,7 @@ def _write_conditional(): _write_conditional() out, _ = capsys.readouterr() - assert 'Successfully updated row\'s os_name' in out + assert "Successfully updated row's os_name" in out @backoff.on_exception(backoff.expo, DeadlineExceeded, max_time=60) def _write_batch(): @@ -91,4 +91,4 @@ def _write_batch(): _write_batch() out, _ = capsys.readouterr() - assert 'Successfully wrote 2 rows' in out + assert "Successfully wrote 2 rows" in out diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index ba55d7ce53ca..93a9122cc457 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -17,6 +17,7 @@ import os from pathlib import Path import sys +from typing import Callable, Dict, List, Optional import nox @@ -27,8 +28,9 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -# Copy `noxfile_config.py` to your directory and modify it instead. +BLACK_VERSION = "black==19.10b0" +# Copy `noxfile_config.py` to your directory and modify it instead. # `TEST_CONFIG` dict is a configuration hook that allows users to # modify the test configurations. The values here should be in sync @@ -37,24 +39,29 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -64,36 +71,43 @@ TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) -def get_pytest_env_vars(): +def get_pytest_env_vars() -> Dict[str, str]: """Returns a dict for pytest invocation.""" ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret # DO NOT EDIT - automatically generated. -# All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) -INSTALL_LIBRARY_FROM_SOURCE = bool(os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False)) +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + # # Style Checks # -def _determine_local_import_names(start_dir): +def _determine_local_import_names(start_dir: str) -> List[str]: """Determines all import names that should be considered "local". This is used when running the linter to insure that import order is @@ -131,18 +145,34 @@ def _determine_local_import_names(start_dir): @nox.session -def lint(session): - session.install("flake8", "flake8-import-order") +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + # # Sample Tests # @@ -151,13 +181,24 @@ def lint(session): PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests(session, post_install=None): +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): - session.install("-r", "requirements.txt") + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") if os.path.exists("requirements-test.txt"): - session.install("-r", "requirements-test.txt") + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") if INSTALL_LIBRARY_FROM_SOURCE: session.install("-e", _get_repo_root()) @@ -172,19 +213,19 @@ def _session_tests(session, post_install=None): # on travis where slow and flaky tests are excluded. # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html success_codes=[0, 5], - env=get_pytest_env_vars() + env=get_pytest_env_vars(), ) @nox.session(python=ALL_VERSIONS) -def py(session): +def py(session: nox.sessions.Session) -> None: """Runs py.test for a sample using the specified version of Python.""" if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # @@ -192,7 +233,7 @@ def py(session): # -def _get_repo_root(): +def _get_repo_root() -> Optional[str]: """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) @@ -201,6 +242,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.") @@ -210,7 +256,7 @@ def _get_repo_root(): @nox.session @nox.parametrize("path", GENERATED_READMES) -def readmegen(session, path): +def readmegen(session: nox.sessions.Session, path: str) -> None: """(Re-)generates the readme for a sample.""" session.install("jinja2", "pyyaml") dir_ = os.path.dirname(path) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py index 29551a7f390c..7c28601fb075 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py @@ -38,7 +38,7 @@ def create_table(project_id, instance_id, table_id): - ''' Create a Bigtable table + """Create a Bigtable table :type project_id: str :param project_id: Project id of the client. @@ -48,7 +48,7 @@ def create_table(project_id, instance_id, table_id): :type table_id: str :param table_id: Table id to create table. - ''' + """ client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) @@ -56,19 +56,19 @@ def create_table(project_id, instance_id, table_id): # Check whether table exists in an instance. # Create table if it does not exists. - print('Checking if table {} exists...'.format(table_id)) + print("Checking if table {} exists...".format(table_id)) if table.exists(): - print('Table {} already exists.'.format(table_id)) + print("Table {} already exists.".format(table_id)) else: - print('Creating the {} table.'.format(table_id)) + print("Creating the {} table.".format(table_id)) table.create() - print('Created table {}.'.format(table_id)) + print("Created table {}.".format(table_id)) return client, instance, table def run_table_operations(project_id, instance_id, table_id): - ''' Create a Bigtable table and perform basic operations on it + """Create a Bigtable table and perform basic operations on it :type project_id: str :param project_id: Project id of the client. @@ -78,78 +78,84 @@ def run_table_operations(project_id, instance_id, table_id): :type table_id: str :param table_id: Table id to create table. - ''' + """ client, instance, table = create_table(project_id, instance_id, table_id) # [START bigtable_list_tables] tables = instance.list_tables() - print('Listing tables in current project...') + print("Listing tables in current project...") if tables != []: for tbl in tables: print(tbl.table_id) else: - print('No table exists in current project...') + print("No table exists in current project...") # [END bigtable_list_tables] # [START bigtable_create_family_gc_max_age] - print('Creating column family cf1 with with MaxAge GC Rule...') + print("Creating column family cf1 with with MaxAge GC Rule...") # Create a column family with GC policy : maximum age # where age = current time minus cell timestamp # Define the GC rule to retain data with max age of 5 days max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - column_family1 = table.column_family('cf1', max_age_rule) + column_family1 = table.column_family("cf1", max_age_rule) column_family1.create() - print('Created column family cf1 with MaxAge GC Rule.') + print("Created column family cf1 with MaxAge GC Rule.") # [END bigtable_create_family_gc_max_age] # [START bigtable_create_family_gc_max_versions] - print('Creating column family cf2 with max versions GC rule...') + print("Creating column family cf2 with max versions GC rule...") # Create a column family with GC policy : most recent N versions # where 1 = most recent version # Define the GC policy to retain only the most recent 2 versions max_versions_rule = column_family.MaxVersionsGCRule(2) - column_family2 = table.column_family('cf2', max_versions_rule) + column_family2 = table.column_family("cf2", max_versions_rule) column_family2.create() - print('Created column family cf2 with Max Versions GC Rule.') + print("Created column family cf2 with Max Versions GC Rule.") # [END bigtable_create_family_gc_max_versions] # [START bigtable_create_family_gc_union] - print('Creating column family cf3 with union GC rule...') + print("Creating column family cf3 with union GC rule...") # Create a column family with GC policy to drop data that matches # at least one condition. # Define a GC rule to drop cells older than 5 days or not the # most recent version - union_rule = column_family.GCRuleUnion([ - column_family.MaxAgeGCRule(datetime.timedelta(days=5)), - column_family.MaxVersionsGCRule(2)]) - - column_family3 = table.column_family('cf3', union_rule) + union_rule = column_family.GCRuleUnion( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2), + ] + ) + + column_family3 = table.column_family("cf3", union_rule) column_family3.create() - print('Created column family cf3 with Union GC rule') + print("Created column family cf3 with Union GC rule") # [END bigtable_create_family_gc_union] # [START bigtable_create_family_gc_intersection] - print('Creating column family cf4 with Intersection GC rule...') + print("Creating column family cf4 with Intersection GC rule...") # Create a column family with GC policy to drop data that matches # all conditions # GC rule: Drop cells older than 5 days AND older than the most # recent 2 versions - intersection_rule = column_family.GCRuleIntersection([ - column_family.MaxAgeGCRule(datetime.timedelta(days=5)), - column_family.MaxVersionsGCRule(2)]) - - column_family4 = table.column_family('cf4', intersection_rule) + intersection_rule = column_family.GCRuleIntersection( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2), + ] + ) + + column_family4 = table.column_family("cf4", intersection_rule) column_family4.create() - print('Created column family cf4 with Intersection GC rule.') + print("Created column family cf4 with Intersection GC rule.") # [END bigtable_create_family_gc_intersection] # [START bigtable_create_family_gc_nested] - print('Creating column family cf5 with a Nested GC rule...') + print("Creating column family cf5 with a Nested GC rule...") # Create a column family with nested GC policies. # Create a nested GC rule: # Drop cells that are either older than the 10 recent versions @@ -157,23 +163,26 @@ def run_table_operations(project_id, instance_id, table_id): # Drop cells that are older than a month AND older than the # 2 recent versions rule1 = column_family.MaxVersionsGCRule(10) - rule2 = column_family.GCRuleIntersection([ - column_family.MaxAgeGCRule(datetime.timedelta(days=30)), - column_family.MaxVersionsGCRule(2)]) + rule2 = column_family.GCRuleIntersection( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=30)), + column_family.MaxVersionsGCRule(2), + ] + ) nested_rule = column_family.GCRuleUnion([rule1, rule2]) - column_family5 = table.column_family('cf5', nested_rule) + column_family5 = table.column_family("cf5", nested_rule) column_family5.create() - print('Created column family cf5 with a Nested GC rule.') + print("Created column family cf5 with a Nested GC rule.") # [END bigtable_create_family_gc_nested] # [START bigtable_list_column_families] - print('Printing Column Family and GC Rule for all column families...') + print("Printing Column Family and GC Rule for all column families...") column_families = table.list_column_families() for column_family_name, gc_rule in sorted(column_families.items()): - print('Column Family:', column_family_name) - print('GC Rule:') + print("Column Family:", column_family_name) + print("GC Rule:") print(gc_rule.to_pb()) # Sample output: # Column Family: cf4 @@ -192,37 +201,37 @@ def run_table_operations(project_id, instance_id, table_id): # } # [END bigtable_list_column_families] - print('Print column family cf1 GC rule before update...') - print('Column Family: cf1') + print("Print column family cf1 GC rule before update...") + print("Column Family: cf1") print(column_family1.to_pb()) # [START bigtable_update_gc_rule] - print('Updating column family cf1 GC rule...') + print("Updating column family cf1 GC rule...") # Update the column family cf1 to update the GC rule - column_family1 = table.column_family( - 'cf1', - column_family.MaxVersionsGCRule(1)) + column_family1 = table.column_family("cf1", column_family.MaxVersionsGCRule(1)) column_family1.update() - print('Updated column family cf1 GC rule\n') + print("Updated column family cf1 GC rule\n") # [END bigtable_update_gc_rule] - print('Print column family cf1 GC rule after update...') - print('Column Family: cf1') + print("Print column family cf1 GC rule after update...") + print("Column Family: cf1") print(column_family1.to_pb()) # [START bigtable_delete_family] - print('Delete a column family cf2...') + print("Delete a column family cf2...") # Delete a column family column_family2.delete() - print('Column family cf2 deleted successfully.') + print("Column family cf2 deleted successfully.") # [END bigtable_delete_family] - print('execute command "python tableadmin.py delete [project_id] \ - [instance_id] --table [tableName]" to delete the table.') + print( + 'execute command "python tableadmin.py delete [project_id] \ + [instance_id] --table [tableName]" to delete the table.' + ) def delete_table(project_id, instance_id, table_id): - ''' Delete bigtable. + """Delete bigtable. :type project_id: str :param project_id: Project id of the client. @@ -232,7 +241,7 @@ def delete_table(project_id, instance_id, table_id): :type table_id: str :param table_id: Table id to create table. - ''' + """ client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) @@ -241,43 +250,44 @@ def delete_table(project_id, instance_id, table_id): # [START bigtable_delete_table] # Delete the entire table - print('Checking if table {} exists...'.format(table_id)) + print("Checking if table {} exists...".format(table_id)) if table.exists(): - print('Table {} exists.'.format(table_id)) - print('Deleting {} table.'.format(table_id)) + print("Table {} exists.".format(table_id)) + print("Deleting {} table.".format(table_id)) table.delete() - print('Deleted {} table.'.format(table_id)) + print("Deleted {} table.".format(table_id)) else: - print('Table {} does not exists.'.format(table_id)) + print("Table {} does not exists.".format(table_id)) # [END bigtable_delete_table] -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) - parser.add_argument('command', - help='run or delete. \ - Operation to perform on table.') parser.add_argument( - '--table', - help='Cloud Bigtable Table name.', - default='Hello-Bigtable') + "command", + help="run or delete. \ + Operation to perform on table.", + ) + parser.add_argument( + "--table", help="Cloud Bigtable Table name.", default="Hello-Bigtable" + ) - parser.add_argument('project_id', - help='Your Cloud Platform project ID.') + parser.add_argument("project_id", help="Your Cloud Platform project ID.") parser.add_argument( - 'instance_id', - help='ID of the Cloud Bigtable instance to connect to.') + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) args = parser.parse_args() - if args.command.lower() == 'run': - run_table_operations(args.project_id, args.instance_id, - args.table) - elif args.command.lower() == 'delete': + if args.command.lower() == "run": + run_table_operations(args.project_id, args.instance_id, args.table) + elif args.command.lower() == "delete": delete_table(args.project_id, args.instance_id, args.table) else: - print('Command should be either run or delete.\n Use argument -h,\ - --help to show help and exit.') + print( + "Command should be either run or delete.\n Use argument -h,\ + --help to show help and exit." + ) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py index b001ce076a18..3063eee9fb06 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py @@ -23,9 +23,9 @@ from tableadmin import delete_table from tableadmin import run_table_operations -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE'] -TABLE_ID_FORMAT = 'tableadmin-test-{}' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_FORMAT = "tableadmin-test-{}" retry_429_503 = RetryErrors(exceptions.TooManyRequests, exceptions.ServiceUnavailable) @@ -36,22 +36,22 @@ def test_run_table_operations(capsys): retry_429_503(run_table_operations)(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - assert 'Creating the ' + table_id + ' table.' in out - assert 'Listing tables in current project.' in out - assert 'Creating column family cf1 with with MaxAge GC Rule' in out - assert 'Created column family cf1 with MaxAge GC Rule.' in out - assert 'Created column family cf2 with Max Versions GC Rule.' in out - assert 'Created column family cf3 with Union GC rule' in out - assert 'Created column family cf4 with Intersection GC rule.' in out - assert 'Created column family cf5 with a Nested GC rule.' in out - assert 'Printing Column Family and GC Rule for all column families.' in out - assert 'Updating column family cf1 GC rule...' in out - assert 'Updated column family cf1 GC rule' in out - assert 'Print column family cf1 GC rule after update...' in out - assert 'Column Family: cf1' in out - assert 'max_num_versions: 1' in out - assert 'Delete a column family cf2...' in out - assert 'Column family cf2 deleted successfully.' in out + assert "Creating the " + table_id + " table." in out + assert "Listing tables in current project." in out + assert "Creating column family cf1 with with MaxAge GC Rule" in out + assert "Created column family cf1 with MaxAge GC Rule." in out + assert "Created column family cf2 with Max Versions GC Rule." in out + assert "Created column family cf3 with Union GC rule" in out + assert "Created column family cf4 with Intersection GC rule." in out + assert "Created column family cf5 with a Nested GC rule." in out + assert "Printing Column Family and GC Rule for all column families." in out + assert "Updating column family cf1 GC rule..." in out + assert "Updated column family cf1 GC rule" in out + assert "Print column family cf1 GC rule after update..." in out + assert "Column Family: cf1" in out + assert "max_num_versions: 1" in out + assert "Delete a column family cf2..." in out + assert "Column family cf2 deleted successfully." in out retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) @@ -63,6 +63,6 @@ def test_delete_table(capsys): retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - assert 'Table ' + table_id + ' exists.' in out - assert 'Deleting ' + table_id + ' table.' in out - assert 'Deleted ' + table_id + ' table.' in out + assert "Table " + table_id + " exists." in out + assert "Deleting " + table_id + " table." in out + assert "Deleted " + table_id + " table." in out From 6226fa02d67d65b2bcfbbd6367b8207ec730b35d Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 12 Jan 2022 06:23:18 -0500 Subject: [PATCH 544/892] feat: add Autoscaling API (#475) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add Autoscaling API PiperOrigin-RevId: 410080804 Source-Link: https://github.com/googleapis/googleapis/commit/0fd6a324383fdd1220c9a937b2eef37f53764664 Source-Link: https://github.com/googleapis/googleapis-gen/commit/788247b7cbda5b05f2ac4f6c13f10ff265e183f0 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNzg4MjQ3YjdjYmRhNWIwNWYyYWM0ZjZjMTNmMTBmZjI2NWUxODNmMCJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * resolve issue where samples templates are not updated * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../cloud/bigtable_admin_v2/__init__.py | 8 + .../bigtable_admin_v2/gapic_metadata.json | 10 + .../bigtable_instance_admin/async_client.py | 136 ++++++++++ .../bigtable_instance_admin/client.py | 126 +++++++++ .../transports/base.py | 24 ++ .../transports/grpc.py | 57 ++++ .../transports/grpc_asyncio.py | 58 ++++ .../cloud/bigtable_admin_v2/types/__init__.py | 8 + .../types/bigtable_instance_admin.py | 46 ++++ .../cloud/bigtable_admin_v2/types/instance.py | 88 +++++- .../fixup_bigtable_admin_v2_keywords.py | 3 +- .../test_bigtable_instance_admin.py | 252 ++++++++++++++++++ 12 files changed, 811 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index db670f299d21..545000fbfce3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -36,6 +36,8 @@ from .types.bigtable_instance_admin import ListClustersResponse from .types.bigtable_instance_admin import ListInstancesRequest from .types.bigtable_instance_admin import ListInstancesResponse +from .types.bigtable_instance_admin import PartialUpdateClusterMetadata +from .types.bigtable_instance_admin import PartialUpdateClusterRequest from .types.bigtable_instance_admin import PartialUpdateInstanceRequest from .types.bigtable_instance_admin import UpdateAppProfileMetadata from .types.bigtable_instance_admin import UpdateAppProfileRequest @@ -73,6 +75,8 @@ from .types.common import OperationProgress from .types.common import StorageType from .types.instance import AppProfile +from .types.instance import AutoscalingLimits +from .types.instance import AutoscalingTargets from .types.instance import Cluster from .types.instance import Instance from .types.table import Backup @@ -89,6 +93,8 @@ "BigtableInstanceAdminAsyncClient", "BigtableTableAdminAsyncClient", "AppProfile", + "AutoscalingLimits", + "AutoscalingTargets", "Backup", "BackupInfo", "BigtableInstanceAdminClient", @@ -140,6 +146,8 @@ "ModifyColumnFamiliesRequest", "OperationProgress", "OptimizeRestoredTableMetadata", + "PartialUpdateClusterMetadata", + "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", "RestoreInfo", "RestoreSourceType", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index f5e13454327a..c360e7712249 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -75,6 +75,11 @@ "list_instances" ] }, + "PartialUpdateCluster": { + "methods": [ + "partial_update_cluster" + ] + }, "PartialUpdateInstance": { "methods": [ "partial_update_instance" @@ -175,6 +180,11 @@ "list_instances" ] }, + "PartialUpdateCluster": { + "methods": [ + "partial_update_cluster" + ] + }, "PartialUpdateInstance": { "methods": [ "partial_update_instance" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 6fdda38faab9..649877c3e904 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -206,6 +206,12 @@ async def create_instance( ) -> operation_async.AsyncOperation: r"""Create an instance within a project. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): The request object. Request message for @@ -738,6 +744,12 @@ async def create_cluster( ) -> operation_async.AsyncOperation: r"""Creates a cluster within an instance. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): The request object. Request message for @@ -1009,6 +1021,10 @@ async def update_cluster( ) -> operation_async.AsyncOperation: r"""Updates a cluster within an instance. + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + Args: request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): The request object. A resizable group of nodes in a @@ -1072,6 +1088,126 @@ async def update_cluster( # Done; return the response. return response + async def partial_update_cluster( + self, + request: Union[ + bigtable_instance_admin.PartialUpdateClusterRequest, dict + ] = None, + *, + cluster: instance.Cluster = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + Required. The Cluster which contains the partial updates + to be applied, subject to the update_mask. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of Cluster + fields which should be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.PartialUpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.partial_update_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("cluster.name", request.cluster.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, + ) + + # Done; return the response. + return response + async def delete_cluster( self, request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fca8eed0389e..d1f445c34537 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -437,6 +437,12 @@ def create_instance( ) -> operation.Operation: r"""Create an instance within a project. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): The request object. Request message for @@ -931,6 +937,12 @@ def create_cluster( ) -> operation.Operation: r"""Creates a cluster within an instance. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): The request object. Request message for @@ -1182,6 +1194,10 @@ def update_cluster( ) -> operation.Operation: r"""Updates a cluster within an instance. + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + Args: request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): The request object. A resizable group of nodes in a @@ -1236,6 +1252,116 @@ def update_cluster( # Done; return the response. return response + def partial_update_cluster( + self, + request: Union[ + bigtable_instance_admin.PartialUpdateClusterRequest, dict + ] = None, + *, + cluster: instance.Cluster = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The Cluster which contains the partial updates + to be applied, subject to the update_mask. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Cluster + fields which should be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.PartialUpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): + request = bigtable_instance_admin.PartialUpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.partial_update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("cluster.name", request.cluster.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, + ) + + # Done; return the response. + return response + def delete_cluster( self, request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 98a4f334db05..b928472c0ba2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -249,6 +249,21 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.partial_update_cluster: gapic_v1.method.wrap_method( + self.partial_update_cluster, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), self.delete_cluster: gapic_v1.method.wrap_method( self.delete_cluster, default_timeout=60.0, client_info=client_info, ), @@ -447,6 +462,15 @@ def update_cluster( ]: raise NotImplementedError() + @property + def partial_update_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def delete_cluster( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index b677cc40488c..fa92fac0ce3f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -259,6 +259,12 @@ def create_instance( Create an instance within a project. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Returns: Callable[[~.CreateInstanceRequest], ~.Operation]: @@ -425,6 +431,12 @@ def create_cluster( Creates a cluster within an instance. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Returns: Callable[[~.CreateClusterRequest], ~.Operation]: @@ -504,6 +516,10 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operatio Updates a cluster within an instance. + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + Returns: Callable[[~.Cluster], ~.Operation]: @@ -522,6 +538,47 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operatio ) return self._stubs["update_cluster"] + @property + def partial_update_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], operations_pb2.Operation + ]: + r"""Return a callable for the partial update cluster method over gRPC. + + Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Returns: + Callable[[~.PartialUpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partial_update_cluster" not in self._stubs: + self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", + request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["partial_update_cluster"] + @property def delete_cluster( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index de80dfbe0429..5eaaf33f2588 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -264,6 +264,12 @@ def create_instance( Create an instance within a project. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Returns: Callable[[~.CreateInstanceRequest], Awaitable[~.Operation]]: @@ -438,6 +444,12 @@ def create_cluster( Creates a cluster within an instance. + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + Returns: Callable[[~.CreateClusterRequest], Awaitable[~.Operation]]: @@ -521,6 +533,10 @@ def update_cluster( Updates a cluster within an instance. + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + Returns: Callable[[~.Cluster], Awaitable[~.Operation]]: @@ -539,6 +555,48 @@ def update_cluster( ) return self._stubs["update_cluster"] + @property + def partial_update_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the partial update cluster method over gRPC. + + Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Returns: + Callable[[~.PartialUpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "partial_update_cluster" not in self._stubs: + self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", + request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["partial_update_cluster"] + @property def delete_cluster( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index aeeed3466230..d1e4c8f1ca45 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -31,6 +31,8 @@ ListClustersResponse, ListInstancesRequest, ListInstancesResponse, + PartialUpdateClusterMetadata, + PartialUpdateClusterRequest, PartialUpdateInstanceRequest, UpdateAppProfileMetadata, UpdateAppProfileRequest, @@ -74,6 +76,8 @@ ) from .instance import ( AppProfile, + AutoscalingLimits, + AutoscalingTargets, Cluster, Instance, ) @@ -107,6 +111,8 @@ "ListClustersResponse", "ListInstancesRequest", "ListInstancesResponse", + "PartialUpdateClusterMetadata", + "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", "UpdateAppProfileMetadata", "UpdateAppProfileRequest", @@ -144,6 +150,8 @@ "OperationProgress", "StorageType", "AppProfile", + "AutoscalingLimits", + "AutoscalingTargets", "Cluster", "Instance", "Backup", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index a5753b613d99..842b0e5fe42d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -38,6 +38,8 @@ "UpdateInstanceMetadata", "CreateClusterMetadata", "UpdateClusterMetadata", + "PartialUpdateClusterMetadata", + "PartialUpdateClusterRequest", "CreateAppProfileRequest", "GetAppProfileRequest", "ListAppProfilesRequest", @@ -361,6 +363,50 @@ class UpdateClusterMetadata(proto.Message): finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) +class PartialUpdateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by + PartialUpdateCluster. + + Attributes: + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest): + The original request for + PartialUpdateCluster. + """ + + request_time = proto.Field( + proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) + original_request = proto.Field( + proto.MESSAGE, number=3, message="PartialUpdateClusterRequest", + ) + + +class PartialUpdateClusterRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + + Attributes: + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The Cluster which contains the partial updates to + be applied, subject to the update_mask. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Cluster fields which + should be replaced. + """ + + cluster = proto.Field(proto.MESSAGE, number=1, message=gba_instance.Cluster,) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + + class CreateAppProfileRequest(proto.Message): r"""Request message for BigtableInstanceAdmin.CreateAppProfile. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 814c9d3bf641..0b008748cf54 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -20,7 +20,14 @@ __protobuf__ = proto.module( - package="google.bigtable.admin.v2", manifest={"Instance", "Cluster", "AppProfile",}, + package="google.bigtable.admin.v2", + manifest={ + "Instance", + "AutoscalingTargets", + "AutoscalingLimits", + "Cluster", + "AppProfile", + }, ) @@ -85,11 +92,46 @@ class Type(proto.Enum): create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) +class AutoscalingTargets(proto.Message): + r"""The Autoscaling targets for a Cluster. These determine the + recommended nodes. + + Attributes: + cpu_utilization_percent (int): + The cpu utilization that the Autoscaler + should be trying to achieve. This number is on a + scale from 0 (no utilization) to 100 (total + utilization). + """ + + cpu_utilization_percent = proto.Field(proto.INT32, number=2,) + + +class AutoscalingLimits(proto.Message): + r"""Limits for the number of nodes a Cluster can autoscale + up/down to. + + Attributes: + min_serve_nodes (int): + Required. Minimum number of nodes to scale + down to. + max_serve_nodes (int): + Required. Maximum number of nodes to scale up + to. + """ + + min_serve_nodes = proto.Field(proto.INT32, number=1,) + max_serve_nodes = proto.Field(proto.INT32, number=2,) + + class Cluster(proto.Message): r"""A resizable group of nodes in a particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): The unique name of the cluster. Values are of the form @@ -103,9 +145,13 @@ class Cluster(proto.Message): state (google.cloud.bigtable_admin_v2.types.Cluster.State): The current state of the cluster. serve_nodes (int): - Required. The number of nodes allocated to - this cluster. More nodes enable higher - throughput and more consistent performance. + The number of nodes allocated to this + cluster. More nodes enable higher throughput and + more consistent performance. + cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig): + Configuration for this cluster. + + This field is a member of `oneof`_ ``config``. default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType): (``CreationOnly``) The type of storage used by this cluster to serve its parent instance's tables, unless explicitly @@ -123,6 +169,37 @@ class State(proto.Enum): RESIZING = 3 DISABLED = 4 + class ClusterAutoscalingConfig(proto.Message): + r"""Autoscaling config for a cluster. + + Attributes: + autoscaling_limits (google.cloud.bigtable_admin_v2.types.AutoscalingLimits): + Required. Autoscaling limits for this + cluster. + autoscaling_targets (google.cloud.bigtable_admin_v2.types.AutoscalingTargets): + Required. Autoscaling targets for this + cluster. + """ + + autoscaling_limits = proto.Field( + proto.MESSAGE, number=1, message="AutoscalingLimits", + ) + autoscaling_targets = proto.Field( + proto.MESSAGE, number=2, message="AutoscalingTargets", + ) + + class ClusterConfig(proto.Message): + r"""Configuration for a cluster. + + Attributes: + cluster_autoscaling_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterAutoscalingConfig): + Autoscaling configuration for this cluster. + """ + + cluster_autoscaling_config = proto.Field( + proto.MESSAGE, number=1, message="Cluster.ClusterAutoscalingConfig", + ) + class EncryptionConfig(proto.Message): r"""Cloud Key Management Service (Cloud KMS) settings for a CMEK- rotected cluster. @@ -149,6 +226,9 @@ class EncryptionConfig(proto.Message): location = proto.Field(proto.STRING, number=2,) state = proto.Field(proto.ENUM, number=3, enum=State,) serve_nodes = proto.Field(proto.INT32, number=4,) + cluster_config = proto.Field( + proto.MESSAGE, number=7, oneof="config", message=ClusterConfig, + ) default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,) encryption_config = proto.Field(proto.MESSAGE, number=6, message=EncryptionConfig,) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index ff285085e6de..a837ad2927df 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -68,6 +68,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'list_snapshots': ('parent', 'page_size', 'page_token', ), 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), 'modify_column_families': ('name', 'modifications', ), + 'partial_update_cluster': ('cluster', 'update_mask', ), 'partial_update_instance': ('instance', 'update_mask', ), 'restore_table': ('parent', 'table_id', 'backup', ), 'set_iam_policy': ('resource', 'policy', ), @@ -75,7 +76,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'test_iam_permissions': ('resource', 'permissions', ), 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), - 'update_cluster': ('serve_nodes', 'name', 'location', 'state', 'default_storage_type', 'encryption_config', ), + 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', ), } diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 32eccdb61135..9b1636fbcc58 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -2067,6 +2067,11 @@ def test_get_cluster( state=instance.Cluster.State.READY, serve_nodes=1181, default_storage_type=common.StorageType.SSD, + cluster_config=instance.Cluster.ClusterConfig( + cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig( + autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600) + ) + ), ) response = client.get_cluster(request) @@ -2630,6 +2635,252 @@ async def test_update_cluster_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] +def test_partial_update_cluster( + transport: str = "grpc", + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_partial_update_cluster_from_dict(): + test_partial_update_cluster(request_type=dict) + + +def test_partial_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + client.partial_update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + + +@pytest.mark.asyncio +async def test_partial_update_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_partial_update_cluster_async_from_dict(): + await test_partial_update_cluster_async(request_type=dict) + + +def test_partial_update_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateClusterRequest() + + request.cluster.name = "cluster.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "cluster.name=cluster.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_partial_update_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateClusterRequest() + + request.cluster.name = "cluster.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "cluster.name=cluster.name/value",) in kw[ + "metadata" + ] + + +def test_partial_update_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.partial_update_cluster( + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = instance.Cluster(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_partial_update_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_partial_update_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.partial_update_cluster( + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = instance.Cluster(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_partial_update_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + def test_delete_cluster( transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest ): @@ -5010,6 +5261,7 @@ def test_bigtable_instance_admin_base_transport(): "get_cluster", "list_clusters", "update_cluster", + "partial_update_cluster", "delete_cluster", "create_app_profile", "get_app_profile", From c45211d9bff02e72ac79dc5729d72318937f2429 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 13 Jan 2022 15:58:18 +0000 Subject: [PATCH 545/892] build: switch to release-please for tagging (#488) --- .../.github/.OwlBot.lock.yaml | 2 +- .../.github/release-please.yml | 1 + .../.github/release-trigger.yml | 1 + .../samples/beam/noxfile.py | 70 +++++++++++-------- .../samples/hello/noxfile.py | 70 +++++++++++-------- .../samples/hello_happybase/noxfile.py | 70 +++++++++++-------- .../samples/instanceadmin/noxfile.py | 70 +++++++++++-------- .../samples/metricscaler/noxfile.py | 70 +++++++++++-------- .../samples/quickstart/noxfile.py | 70 +++++++++++-------- .../samples/quickstart_happybase/noxfile.py | 70 +++++++++++-------- .../samples/snippets/filters/noxfile.py | 70 +++++++++++-------- .../samples/snippets/reads/noxfile.py | 70 +++++++++++-------- .../samples/snippets/writes/noxfile.py | 70 +++++++++++-------- .../samples/tableadmin/noxfile.py | 70 +++++++++++-------- 14 files changed, 432 insertions(+), 342 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/release-trigger.yml diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 0b3c8cd98f89..ff5126c188d0 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2f90537dd7df70f6b663cd654b1fa5dee483cf6a4edcfd46072b2775be8a23ec + digest: sha256:dfa9b663b32de8b5b327e32c1da665a80de48876558dd58091d8160c60ad7355 diff --git a/packages/google-cloud-bigtable/.github/release-please.yml b/packages/google-cloud-bigtable/.github/release-please.yml index 4507ad0598a5..466597e5b196 100644 --- a/packages/google-cloud-bigtable/.github/release-please.yml +++ b/packages/google-cloud-bigtable/.github/release-please.yml @@ -1 +1,2 @@ releaseType: python +handleGHRelease: true diff --git a/packages/google-cloud-bigtable/.github/release-trigger.yml b/packages/google-cloud-bigtable/.github/release-trigger.yml new file mode 100644 index 000000000000..d4ca94189e16 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/release-trigger.yml @@ -0,0 +1 @@ +enabled: true diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index d7567dee99c0..b14b26647148 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -182,37 +183,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 93a9122cc457..3bbef5d54f44 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -14,6 +14,7 @@ from __future__ import print_function +import glob import os from pathlib import Path import sys @@ -184,37 +185,44 @@ def blacken(session: nox.sessions.Session) -> None: def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + if len(test_list) == 0: + print("No tests found, skipping directory.") + else: + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) From 77f9c4b28c46b691547b067966275b3a578daaeb Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Thu, 13 Jan 2022 12:55:38 -0500 Subject: [PATCH 546/892] chore: update .repo-metadata.json (#483) --- packages/google-cloud-bigtable/.repo-metadata.json | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json index ea61343a0119..3c65ac669823 100644 --- a/packages/google-cloud-bigtable/.repo-metadata.json +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -2,9 +2,9 @@ "name": "bigtable", "name_pretty": "Cloud Bigtable", "product_documentation": "https://cloud.google.com/bigtable", - "client_documentation": "https://googleapis.dev/python/bigtable/latest", + "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest", "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", - "release_level": "ga", + "release_level": "stable", "language": "python", "library_type": "GAPIC_COMBO", "repo": "googleapis/python-bigtable", @@ -75,5 +75,6 @@ } ], "default_version": "v2", - "codeowner_team": "@googleapis/api-bigtable" + "codeowner_team": "@googleapis/api-bigtable", + "api_shortname": "bigtable" } From 9f183d90284db019e45e5ad573b5f8dcf31e7947 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 14 Jan 2022 12:34:56 -0500 Subject: [PATCH 547/892] chore(python): update release.sh to use keystore (#489) Source-Link: https://github.com/googleapis/synthtool/commit/69fda12e2994f0b595a397e8bb6e3e9f380524eb Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:ae600f36b6bc972b368367b6f83a1d91ec2c82a4a116b383d67d547c56fe6de3 Co-authored-by: Owl Bot --- .../google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.kokoro/release.sh | 2 +- .../google-cloud-bigtable/.kokoro/release/common.cfg | 12 +++++++++++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index ff5126c188d0..eecb84c21b27 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:dfa9b663b32de8b5b327e32c1da665a80de48876558dd58091d8160c60ad7355 + digest: sha256:ae600f36b6bc972b368367b6f83a1d91ec2c82a4a116b383d67d547c56fe6de3 diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index d3ffac5f677c..f0cb9d5db53f 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -26,7 +26,7 @@ python3 -m pip install --upgrade twine wheel setuptools export PYTHONUNBUFFERED=1 # Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secret_manager/google-cloud-pypi-token") +TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1") cd github/python-bigtable python3 setup.py sdist bdist_wheel twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg index d964a8f0692d..8477e4ca6dd5 100644 --- a/packages/google-cloud-bigtable/.kokoro/release/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/release/common.cfg @@ -23,8 +23,18 @@ env_vars: { value: "github/python-bigtable/.kokoro/release.sh" } +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google-cloud-pypi-token-keystore-1" + } + } +} + # Tokens needed to report release status back to GitHub env_vars: { key: "SECRET_MANAGER_KEYS" - value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem,google-cloud-pypi-token" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" } From 9bc0861dfc7d5feb1110f6309b2ae7bc2b3c03f0 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 15 Jan 2022 09:29:10 -0500 Subject: [PATCH 548/892] chore: use gapic-generator-python 0.58.4 (#484) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: use gapic-generator-python 0.58.4 fix: provide appropriate mock values for message body fields committer: dovs PiperOrigin-RevId: 419025932 Source-Link: https://github.com/googleapis/googleapis/commit/73da6697f598f1ba30618924936a59f8e457ec89 Source-Link: https://github.com/googleapis/googleapis-gen/commit/46df624a54b9ed47c1a7eefb7a49413cf7b82f98 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDZkZjYyNGE1NGI5ZWQ0N2MxYTdlZWZiN2E0OTQxM2NmN2I4MmY5OCJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * set python-samples-reviewers as codeowners for samples * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../google-cloud-bigtable/.github/CODEOWNERS | 9 +- .../transports/base.py | 1 - .../bigtable_table_admin/transports/base.py | 1 - .../services/bigtable/transports/base.py | 1 - packages/google-cloud-bigtable/owlbot.py | 2 +- .../test_bigtable_instance_admin.py | 235 +++++--------- .../test_bigtable_table_admin.py | 287 +++++++----------- .../unit/gapic/bigtable_v2/test_bigtable.py | 62 ++-- 8 files changed, 219 insertions(+), 379 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/CODEOWNERS b/packages/google-cloud-bigtable/.github/CODEOWNERS index dc38a1e1d1d0..2f1fee90455c 100644 --- a/packages/google-cloud-bigtable/.github/CODEOWNERS +++ b/packages/google-cloud-bigtable/.github/CODEOWNERS @@ -3,9 +3,10 @@ # # For syntax help see: # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax +# Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json. +# @googleapis/yoshi-python @googleapis/api-bigtable are the default owners for changes in this repo +* @googleapis/yoshi-python @googleapis/api-bigtable -# The api-bigtable team is the default owner for anything not -# explicitly taken by someone else. -* @googleapis/api-bigtable @googleapis/yoshi-python -/samples/ @googleapis/api-bigtable @googleapis/python-samples-owners +# @googleapis/python-samples-reviewers @googleapis/api-bigtable are the default owners for samples changes +/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index b928472c0ba2..f86569e0a0b7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -115,7 +115,6 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 10068063f646..e8937e5392c7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -115,7 +115,6 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 36f5a1a2e363..bb727d67e271 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -106,7 +106,6 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 6ab6579e1695..a5e1b09de435 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -92,7 +92,7 @@ def get_staging_dirs( cov_level=100, ) -s.move(templated_files, excludes=[".coveragerc", ".github/CODEOWNERS"]) +s.move(templated_files, excludes=[".coveragerc"]) # ---------------------------------------------------------------------------- # Customize noxfile.py diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 9b1636fbcc58..11205044dcc9 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -268,20 +268,20 @@ def test_bigtable_instance_admin_client_client_options( # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): - client = client_class() + client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): - client = client_class() + client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -350,7 +350,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None @@ -449,7 +449,7 @@ def test_bigtable_instance_admin_client_client_options_scopes( options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -484,7 +484,7 @@ def test_bigtable_instance_admin_client_client_options_credentials_file( options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", @@ -517,9 +517,10 @@ def test_bigtable_instance_admin_client_client_options_from_dict(): ) -def test_create_instance( - transport: str = "grpc", request_type=bigtable_instance_admin.CreateInstanceRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.CreateInstanceRequest, dict,] +) +def test_create_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -543,10 +544,6 @@ def test_create_instance( assert isinstance(response, future.Future) -def test_create_instance_from_dict(): - test_create_instance(request_type=dict) - - def test_create_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -764,9 +761,10 @@ async def test_create_instance_flattened_error_async(): ) -def test_get_instance( - transport: str = "grpc", request_type=bigtable_instance_admin.GetInstanceRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.GetInstanceRequest, dict,] +) +def test_get_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -799,10 +797,6 @@ def test_get_instance( assert response.type_ == instance.Instance.Type.PRODUCTION -def test_get_instance_from_dict(): - test_get_instance(request_type=dict) - - def test_get_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -989,9 +983,10 @@ async def test_get_instance_flattened_error_async(): ) -def test_list_instances( - transport: str = "grpc", request_type=bigtable_instance_admin.ListInstancesRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.ListInstancesRequest, dict,] +) +def test_list_instances(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1021,10 +1016,6 @@ def test_list_instances( assert response.next_page_token == "next_page_token_value" -def test_list_instances_from_dict(): - test_list_instances(request_type=dict) - - def test_list_instances_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1211,7 +1202,8 @@ async def test_list_instances_flattened_error_async(): ) -def test_update_instance(transport: str = "grpc", request_type=instance.Instance): +@pytest.mark.parametrize("request_type", [instance.Instance, dict,]) +def test_update_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1244,10 +1236,6 @@ def test_update_instance(transport: str = "grpc", request_type=instance.Instance assert response.type_ == instance.Instance.Type.PRODUCTION -def test_update_instance_from_dict(): - test_update_instance(request_type=dict) - - def test_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1359,10 +1347,10 @@ async def test_update_instance_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_partial_update_instance( - transport: str = "grpc", - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.PartialUpdateInstanceRequest, dict,] +) +def test_partial_update_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1388,10 +1376,6 @@ def test_partial_update_instance( assert isinstance(response, future.Future) -def test_partial_update_instance_from_dict(): - test_partial_update_instance(request_type=dict) - - def test_partial_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1605,9 +1589,10 @@ async def test_partial_update_instance_flattened_error_async(): ) -def test_delete_instance( - transport: str = "grpc", request_type=bigtable_instance_admin.DeleteInstanceRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.DeleteInstanceRequest, dict,] +) +def test_delete_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1631,10 +1616,6 @@ def test_delete_instance( assert response is None -def test_delete_instance_from_dict(): - test_delete_instance(request_type=dict) - - def test_delete_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1810,9 +1791,10 @@ async def test_delete_instance_flattened_error_async(): ) -def test_create_cluster( - transport: str = "grpc", request_type=bigtable_instance_admin.CreateClusterRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.CreateClusterRequest, dict,] +) +def test_create_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1836,10 +1818,6 @@ def test_create_cluster( assert isinstance(response, future.Future) -def test_create_cluster_from_dict(): - test_create_cluster(request_type=dict) - - def test_create_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2047,9 +2025,10 @@ async def test_create_cluster_flattened_error_async(): ) -def test_get_cluster( - transport: str = "grpc", request_type=bigtable_instance_admin.GetClusterRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.GetClusterRequest, dict,] +) +def test_get_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2089,10 +2068,6 @@ def test_get_cluster( assert response.default_storage_type == common.StorageType.SSD -def test_get_cluster_from_dict(): - test_get_cluster(request_type=dict) - - def test_get_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2281,9 +2256,10 @@ async def test_get_cluster_flattened_error_async(): ) -def test_list_clusters( - transport: str = "grpc", request_type=bigtable_instance_admin.ListClustersRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.ListClustersRequest, dict,] +) +def test_list_clusters(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2313,10 +2289,6 @@ def test_list_clusters( assert response.next_page_token == "next_page_token_value" -def test_list_clusters_from_dict(): - test_list_clusters(request_type=dict) - - def test_list_clusters_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2503,7 +2475,8 @@ async def test_list_clusters_flattened_error_async(): ) -def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster): +@pytest.mark.parametrize("request_type", [instance.Cluster, dict,]) +def test_update_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2527,10 +2500,6 @@ def test_update_cluster(transport: str = "grpc", request_type=instance.Cluster): assert isinstance(response, future.Future) -def test_update_cluster_from_dict(): - test_update_cluster(request_type=dict) - - def test_update_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2635,10 +2604,10 @@ async def test_update_cluster_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_partial_update_cluster( - transport: str = "grpc", - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.PartialUpdateClusterRequest, dict,] +) +def test_partial_update_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2664,10 +2633,6 @@ def test_partial_update_cluster( assert isinstance(response, future.Future) -def test_partial_update_cluster_from_dict(): - test_partial_update_cluster(request_type=dict) - - def test_partial_update_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2881,9 +2846,10 @@ async def test_partial_update_cluster_flattened_error_async(): ) -def test_delete_cluster( - transport: str = "grpc", request_type=bigtable_instance_admin.DeleteClusterRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.DeleteClusterRequest, dict,] +) +def test_delete_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2907,10 +2873,6 @@ def test_delete_cluster( assert response is None -def test_delete_cluster_from_dict(): - test_delete_cluster(request_type=dict) - - def test_delete_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3086,10 +3048,10 @@ async def test_delete_cluster_flattened_error_async(): ) -def test_create_app_profile( - transport: str = "grpc", - request_type=bigtable_instance_admin.CreateAppProfileRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.CreateAppProfileRequest, dict,] +) +def test_create_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3125,10 +3087,6 @@ def test_create_app_profile( assert response.description == "description_value" -def test_create_app_profile_from_dict(): - test_create_app_profile(request_type=dict) - - def test_create_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3349,9 +3307,10 @@ async def test_create_app_profile_flattened_error_async(): ) -def test_get_app_profile( - transport: str = "grpc", request_type=bigtable_instance_admin.GetAppProfileRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.GetAppProfileRequest, dict,] +) +def test_get_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3385,10 +3344,6 @@ def test_get_app_profile( assert response.description == "description_value" -def test_get_app_profile_from_dict(): - test_get_app_profile(request_type=dict) - - def test_get_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3571,9 +3526,10 @@ async def test_get_app_profile_flattened_error_async(): ) -def test_list_app_profiles( - transport: str = "grpc", request_type=bigtable_instance_admin.ListAppProfilesRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.ListAppProfilesRequest, dict,] +) +def test_list_app_profiles(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3604,10 +3560,6 @@ def test_list_app_profiles( assert response.failed_locations == ["failed_locations_value"] -def test_list_app_profiles_from_dict(): - test_list_app_profiles(request_type=dict) - - def test_list_app_profiles_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3806,9 +3758,9 @@ async def test_list_app_profiles_flattened_error_async(): ) -def test_list_app_profiles_pager(): +def test_list_app_profiles_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3850,9 +3802,9 @@ def test_list_app_profiles_pager(): assert all(isinstance(i, instance.AppProfile) for i in results) -def test_list_app_profiles_pages(): +def test_list_app_profiles_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3968,10 +3920,10 @@ async def test_list_app_profiles_async_pages(): assert page_.raw_page.next_page_token == token -def test_update_app_profile( - transport: str = "grpc", - request_type=bigtable_instance_admin.UpdateAppProfileRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.UpdateAppProfileRequest, dict,] +) +def test_update_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3997,10 +3949,6 @@ def test_update_app_profile( assert isinstance(response, future.Future) -def test_update_app_profile_from_dict(): - test_update_app_profile(request_type=dict) - - def test_update_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4214,10 +4162,10 @@ async def test_update_app_profile_flattened_error_async(): ) -def test_delete_app_profile( - transport: str = "grpc", - request_type=bigtable_instance_admin.DeleteAppProfileRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.DeleteAppProfileRequest, dict,] +) +def test_delete_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4243,10 +4191,6 @@ def test_delete_app_profile( assert response is None -def test_delete_app_profile_from_dict(): - test_delete_app_profile(request_type=dict) - - def test_delete_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4434,9 +4378,8 @@ async def test_delete_app_profile_flattened_error_async(): ) -def test_get_iam_policy( - transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest -): +@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,]) +def test_get_iam_policy(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4462,10 +4405,6 @@ def test_get_iam_policy( assert response.etag == b"etag_blob" -def test_get_iam_policy_from_dict(): - test_get_iam_policy(request_type=dict) - - def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4661,9 +4600,8 @@ async def test_get_iam_policy_flattened_error_async(): ) -def test_set_iam_policy( - transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest -): +@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,]) +def test_set_iam_policy(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4689,10 +4627,6 @@ def test_set_iam_policy( assert response.etag == b"etag_blob" -def test_set_iam_policy_from_dict(): - test_set_iam_policy(request_type=dict) - - def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4888,9 +4822,10 @@ async def test_set_iam_policy_flattened_error_async(): ) -def test_test_iam_permissions( - transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest -): +@pytest.mark.parametrize( + "request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,] +) +def test_test_iam_permissions(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4919,10 +4854,6 @@ def test_test_iam_permissions( assert response.permissions == ["permissions_value"] -def test_test_iam_permissions_from_dict(): - test_test_iam_permissions(request_type=dict) - - def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -5830,7 +5761,7 @@ def test_parse_common_location_path(): assert expected == actual -def test_client_withDEFAULT_CLIENT_INFO(): +def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 541acb903aa5..6c81ca8161f9 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -266,20 +266,20 @@ def test_bigtable_table_admin_client_client_options( # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): - client = client_class() + client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): - client = client_class() + client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -348,7 +348,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None @@ -443,7 +443,7 @@ def test_bigtable_table_admin_client_client_options_scopes( options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -474,7 +474,7 @@ def test_bigtable_table_admin_client_client_options_credentials_file( options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", @@ -507,9 +507,10 @@ def test_bigtable_table_admin_client_client_options_from_dict(): ) -def test_create_table( - transport: str = "grpc", request_type=bigtable_table_admin.CreateTableRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.CreateTableRequest, dict,] +) +def test_create_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -537,10 +538,6 @@ def test_create_table( assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS -def test_create_table_from_dict(): - test_create_table(request_type=dict) - - def test_create_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -749,10 +746,10 @@ async def test_create_table_flattened_error_async(): ) -def test_create_table_from_snapshot( - transport: str = "grpc", - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.CreateTableFromSnapshotRequest, dict,] +) +def test_create_table_from_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -778,10 +775,6 @@ def test_create_table_from_snapshot( assert isinstance(response, future.Future) -def test_create_table_from_snapshot_from_dict(): - test_create_table_from_snapshot(request_type=dict) - - def test_create_table_from_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1001,9 +994,10 @@ async def test_create_table_from_snapshot_flattened_error_async(): ) -def test_list_tables( - transport: str = "grpc", request_type=bigtable_table_admin.ListTablesRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.ListTablesRequest, dict,] +) +def test_list_tables(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1030,10 +1024,6 @@ def test_list_tables( assert response.next_page_token == "next_page_token_value" -def test_list_tables_from_dict(): - test_list_tables(request_type=dict) - - def test_list_tables_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1217,8 +1207,10 @@ async def test_list_tables_flattened_error_async(): ) -def test_list_tables_pager(): - client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) +def test_list_tables_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: @@ -1251,8 +1243,10 @@ def test_list_tables_pager(): assert all(isinstance(i, table.Table) for i in results) -def test_list_tables_pages(): - client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) +def test_list_tables_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: @@ -1343,9 +1337,8 @@ async def test_list_tables_async_pages(): assert page_.raw_page.next_page_token == token -def test_get_table( - transport: str = "grpc", request_type=bigtable_table_admin.GetTableRequest -): +@pytest.mark.parametrize("request_type", [bigtable_table_admin.GetTableRequest, dict,]) +def test_get_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1373,10 +1366,6 @@ def test_get_table( assert response.granularity == table.Table.TimestampGranularity.MILLIS -def test_get_table_from_dict(): - test_get_table(request_type=dict) - - def test_get_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1557,9 +1546,10 @@ async def test_get_table_flattened_error_async(): ) -def test_delete_table( - transport: str = "grpc", request_type=bigtable_table_admin.DeleteTableRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.DeleteTableRequest, dict,] +) +def test_delete_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1583,10 +1573,6 @@ def test_delete_table( assert response is None -def test_delete_table_from_dict(): - test_delete_table(request_type=dict) - - def test_delete_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1762,10 +1748,10 @@ async def test_delete_table_flattened_error_async(): ) -def test_modify_column_families( - transport: str = "grpc", - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.ModifyColumnFamiliesRequest, dict,] +) +def test_modify_column_families(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1795,10 +1781,6 @@ def test_modify_column_families( assert response.granularity == table.Table.TimestampGranularity.MILLIS -def test_modify_column_families_from_dict(): - test_modify_column_families(request_type=dict) - - def test_modify_column_families_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2028,9 +2010,10 @@ async def test_modify_column_families_flattened_error_async(): ) -def test_drop_row_range( - transport: str = "grpc", request_type=bigtable_table_admin.DropRowRangeRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.DropRowRangeRequest, dict,] +) +def test_drop_row_range(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2054,10 +2037,6 @@ def test_drop_row_range( assert response is None -def test_drop_row_range_from_dict(): - test_drop_row_range(request_type=dict) - - def test_drop_row_range_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2159,10 +2138,10 @@ async def test_drop_row_range_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_generate_consistency_token( - transport: str = "grpc", - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.GenerateConsistencyTokenRequest, dict,] +) +def test_generate_consistency_token(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2191,10 +2170,6 @@ def test_generate_consistency_token( assert response.consistency_token == "consistency_token_value" -def test_generate_consistency_token_from_dict(): - test_generate_consistency_token(request_type=dict) - - def test_generate_consistency_token_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2391,9 +2366,10 @@ async def test_generate_consistency_token_flattened_error_async(): ) -def test_check_consistency( - transport: str = "grpc", request_type=bigtable_table_admin.CheckConsistencyRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.CheckConsistencyRequest, dict,] +) +def test_check_consistency(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2422,10 +2398,6 @@ def test_check_consistency( assert response.consistent is True -def test_check_consistency_from_dict(): - test_check_consistency(request_type=dict) - - def test_check_consistency_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2634,9 +2606,10 @@ async def test_check_consistency_flattened_error_async(): ) -def test_snapshot_table( - transport: str = "grpc", request_type=bigtable_table_admin.SnapshotTableRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.SnapshotTableRequest, dict,] +) +def test_snapshot_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2660,10 +2633,6 @@ def test_snapshot_table( assert isinstance(response, future.Future) -def test_snapshot_table_from_dict(): - test_snapshot_table(request_type=dict) - - def test_snapshot_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2881,9 +2850,10 @@ async def test_snapshot_table_flattened_error_async(): ) -def test_get_snapshot( - transport: str = "grpc", request_type=bigtable_table_admin.GetSnapshotRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.GetSnapshotRequest, dict,] +) +def test_get_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2916,10 +2886,6 @@ def test_get_snapshot( assert response.description == "description_value" -def test_get_snapshot_from_dict(): - test_get_snapshot(request_type=dict) - - def test_get_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3106,9 +3072,10 @@ async def test_get_snapshot_flattened_error_async(): ) -def test_list_snapshots( - transport: str = "grpc", request_type=bigtable_table_admin.ListSnapshotsRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.ListSnapshotsRequest, dict,] +) +def test_list_snapshots(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3135,10 +3102,6 @@ def test_list_snapshots( assert response.next_page_token == "next_page_token_value" -def test_list_snapshots_from_dict(): - test_list_snapshots(request_type=dict) - - def test_list_snapshots_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3323,8 +3286,10 @@ async def test_list_snapshots_flattened_error_async(): ) -def test_list_snapshots_pager(): - client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) +def test_list_snapshots_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: @@ -3359,8 +3324,10 @@ def test_list_snapshots_pager(): assert all(isinstance(i, table.Snapshot) for i in results) -def test_list_snapshots_pages(): - client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) +def test_list_snapshots_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: @@ -3457,9 +3424,10 @@ async def test_list_snapshots_async_pages(): assert page_.raw_page.next_page_token == token -def test_delete_snapshot( - transport: str = "grpc", request_type=bigtable_table_admin.DeleteSnapshotRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.DeleteSnapshotRequest, dict,] +) +def test_delete_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3483,10 +3451,6 @@ def test_delete_snapshot( assert response is None -def test_delete_snapshot_from_dict(): - test_delete_snapshot(request_type=dict) - - def test_delete_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3662,9 +3626,10 @@ async def test_delete_snapshot_flattened_error_async(): ) -def test_create_backup( - transport: str = "grpc", request_type=bigtable_table_admin.CreateBackupRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.CreateBackupRequest, dict,] +) +def test_create_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3688,10 +3653,6 @@ def test_create_backup( assert isinstance(response, future.Future) -def test_create_backup_from_dict(): - test_create_backup(request_type=dict) - - def test_create_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -3899,9 +3860,8 @@ async def test_create_backup_flattened_error_async(): ) -def test_get_backup( - transport: str = "grpc", request_type=bigtable_table_admin.GetBackupRequest -): +@pytest.mark.parametrize("request_type", [bigtable_table_admin.GetBackupRequest, dict,]) +def test_get_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3934,10 +3894,6 @@ def test_get_backup( assert response.state == table.Backup.State.CREATING -def test_get_backup_from_dict(): - test_get_backup(request_type=dict) - - def test_get_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4123,9 +4079,10 @@ async def test_get_backup_flattened_error_async(): ) -def test_update_backup( - transport: str = "grpc", request_type=bigtable_table_admin.UpdateBackupRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.UpdateBackupRequest, dict,] +) +def test_update_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4158,10 +4115,6 @@ def test_update_backup( assert response.state == table.Backup.State.CREATING -def test_update_backup_from_dict(): - test_update_backup(request_type=dict) - - def test_update_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4364,9 +4317,10 @@ async def test_update_backup_flattened_error_async(): ) -def test_delete_backup( - transport: str = "grpc", request_type=bigtable_table_admin.DeleteBackupRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.DeleteBackupRequest, dict,] +) +def test_delete_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4390,10 +4344,6 @@ def test_delete_backup( assert response is None -def test_delete_backup_from_dict(): - test_delete_backup(request_type=dict) - - def test_delete_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4569,9 +4519,10 @@ async def test_delete_backup_flattened_error_async(): ) -def test_list_backups( - transport: str = "grpc", request_type=bigtable_table_admin.ListBackupsRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.ListBackupsRequest, dict,] +) +def test_list_backups(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4598,10 +4549,6 @@ def test_list_backups( assert response.next_page_token == "next_page_token_value" -def test_list_backups_from_dict(): - test_list_backups(request_type=dict) - - def test_list_backups_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -4786,8 +4733,10 @@ async def test_list_backups_flattened_error_async(): ) -def test_list_backups_pager(): - client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) +def test_list_backups_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: @@ -4822,8 +4771,10 @@ def test_list_backups_pager(): assert all(isinstance(i, table.Backup) for i in results) -def test_list_backups_pages(): - client = BigtableTableAdminClient(credentials=ga_credentials.AnonymousCredentials,) +def test_list_backups_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: @@ -4920,9 +4871,10 @@ async def test_list_backups_async_pages(): assert page_.raw_page.next_page_token == token -def test_restore_table( - transport: str = "grpc", request_type=bigtable_table_admin.RestoreTableRequest -): +@pytest.mark.parametrize( + "request_type", [bigtable_table_admin.RestoreTableRequest, dict,] +) +def test_restore_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4946,10 +4898,6 @@ def test_restore_table( assert isinstance(response, future.Future) -def test_restore_table_from_dict(): - test_restore_table(request_type=dict) - - def test_restore_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -5055,9 +5003,8 @@ async def test_restore_table_field_headers_async(): assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] -def test_get_iam_policy( - transport: str = "grpc", request_type=iam_policy_pb2.GetIamPolicyRequest -): +@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,]) +def test_get_iam_policy(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5083,10 +5030,6 @@ def test_get_iam_policy( assert response.etag == b"etag_blob" -def test_get_iam_policy_from_dict(): - test_get_iam_policy(request_type=dict) - - def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -5282,9 +5225,8 @@ async def test_get_iam_policy_flattened_error_async(): ) -def test_set_iam_policy( - transport: str = "grpc", request_type=iam_policy_pb2.SetIamPolicyRequest -): +@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,]) +def test_set_iam_policy(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5310,10 +5252,6 @@ def test_set_iam_policy( assert response.etag == b"etag_blob" -def test_set_iam_policy_from_dict(): - test_set_iam_policy(request_type=dict) - - def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -5509,9 +5447,10 @@ async def test_set_iam_policy_flattened_error_async(): ) -def test_test_iam_permissions( - transport: str = "grpc", request_type=iam_policy_pb2.TestIamPermissionsRequest -): +@pytest.mark.parametrize( + "request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,] +) +def test_test_iam_permissions(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5540,10 +5479,6 @@ def test_test_iam_permissions( assert response.permissions == ["permissions_value"] -def test_test_iam_permissions_from_dict(): - test_test_iam_permissions(request_type=dict) - - def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -6505,7 +6440,7 @@ def test_parse_common_location_path(): assert expected == actual -def test_client_withDEFAULT_CLIENT_INFO(): +def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 0339d130d410..690b21b62cae 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -229,20 +229,20 @@ def test_bigtable_client_client_options(client_class, transport_class, transport # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError): - client = client_class() + client = client_class(transport=transport_name) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError): - client = client_class() + client = client_class(transport=transport_name) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -299,7 +299,7 @@ def test_bigtable_client_mtls_env_auto( ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) if use_client_cert_env == "false": expected_client_cert_source = None @@ -390,7 +390,7 @@ def test_bigtable_client_client_options_scopes( options = client_options.ClientOptions(scopes=["1", "2"],) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file=None, @@ -417,7 +417,7 @@ def test_bigtable_client_client_options_credentials_file( options = client_options.ClientOptions(credentials_file="credentials.json") with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None - client = client_class(transport=transport_name, client_options=options) + client = client_class(client_options=options, transport=transport_name) patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", @@ -448,7 +448,8 @@ def test_bigtable_client_client_options_from_dict(): ) -def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsRequest): +@pytest.mark.parametrize("request_type", [bigtable.ReadRowsRequest, dict,]) +def test_read_rows(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -473,10 +474,6 @@ def test_read_rows(transport: str = "grpc", request_type=bigtable.ReadRowsReques assert isinstance(message, bigtable.ReadRowsResponse) -def test_read_rows_from_dict(): - test_read_rows(request_type=dict) - - def test_read_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -660,9 +657,8 @@ async def test_read_rows_flattened_error_async(): ) -def test_sample_row_keys( - transport: str = "grpc", request_type=bigtable.SampleRowKeysRequest -): +@pytest.mark.parametrize("request_type", [bigtable.SampleRowKeysRequest, dict,]) +def test_sample_row_keys(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -687,10 +683,6 @@ def test_sample_row_keys( assert isinstance(message, bigtable.SampleRowKeysResponse) -def test_sample_row_keys_from_dict(): - test_sample_row_keys(request_type=dict) - - def test_sample_row_keys_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -874,7 +866,8 @@ async def test_sample_row_keys_flattened_error_async(): ) -def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequest): +@pytest.mark.parametrize("request_type", [bigtable.MutateRowRequest, dict,]) +def test_mutate_row(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -898,10 +891,6 @@ def test_mutate_row(transport: str = "grpc", request_type=bigtable.MutateRowRequ assert isinstance(response, bigtable.MutateRowResponse) -def test_mutate_row_from_dict(): - test_mutate_row(request_type=dict) - - def test_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1130,7 +1119,8 @@ async def test_mutate_row_flattened_error_async(): ) -def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRequest): +@pytest.mark.parametrize("request_type", [bigtable.MutateRowsRequest, dict,]) +def test_mutate_rows(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1155,10 +1145,6 @@ def test_mutate_rows(transport: str = "grpc", request_type=bigtable.MutateRowsRe assert isinstance(message, bigtable.MutateRowsResponse) -def test_mutate_rows_from_dict(): - test_mutate_rows(request_type=dict) - - def test_mutate_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1354,9 +1340,8 @@ async def test_mutate_rows_flattened_error_async(): ) -def test_check_and_mutate_row( - transport: str = "grpc", request_type=bigtable.CheckAndMutateRowRequest -): +@pytest.mark.parametrize("request_type", [bigtable.CheckAndMutateRowRequest, dict,]) +def test_check_and_mutate_row(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1383,10 +1368,6 @@ def test_check_and_mutate_row( assert response.predicate_matched is True -def test_check_and_mutate_row_from_dict(): - test_check_and_mutate_row(request_type=dict) - - def test_check_and_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -1728,9 +1709,8 @@ async def test_check_and_mutate_row_flattened_error_async(): ) -def test_read_modify_write_row( - transport: str = "grpc", request_type=bigtable.ReadModifyWriteRowRequest -): +@pytest.mark.parametrize("request_type", [bigtable.ReadModifyWriteRowRequest, dict,]) +def test_read_modify_write_row(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1756,10 +1736,6 @@ def test_read_modify_write_row( assert isinstance(response, bigtable.ReadModifyWriteRowResponse) -def test_read_modify_write_row_from_dict(): - test_read_modify_write_row(request_type=dict) - - def test_read_modify_write_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @@ -2506,7 +2482,7 @@ def test_parse_common_location_path(): assert expected == actual -def test_client_withDEFAULT_CLIENT_INFO(): +def test_client_with_default_client_info(): client_info = gapic_v1.client_info.ClientInfo() with mock.patch.object( From 196fff2ccecb763c6b0e18815aecd94efa27d2aa Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 18 Jan 2022 21:07:05 -0500 Subject: [PATCH 549/892] chore(python): Noxfile recognizes that tests can live in a folder (#491) Source-Link: https://github.com/googleapis/synthtool/commit/4760d8dce1351d93658cb11d02a1b7ceb23ae5d7 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f0e4b51deef56bed74d3e2359c583fc104a8d6367da3984fc5c66938db738828 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/samples/beam/noxfile.py | 1 + packages/google-cloud-bigtable/samples/hello/noxfile.py | 1 + .../google-cloud-bigtable/samples/hello_happybase/noxfile.py | 1 + packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py | 1 + packages/google-cloud-bigtable/samples/metricscaler/noxfile.py | 1 + packages/google-cloud-bigtable/samples/quickstart/noxfile.py | 1 + .../samples/quickstart_happybase/noxfile.py | 1 + .../google-cloud-bigtable/samples/snippets/filters/noxfile.py | 1 + .../google-cloud-bigtable/samples/snippets/reads/noxfile.py | 1 + .../google-cloud-bigtable/samples/snippets/writes/noxfile.py | 1 + packages/google-cloud-bigtable/samples/tableadmin/noxfile.py | 1 + 12 files changed, 12 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index eecb84c21b27..52d79c11f3ad 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,3 @@ docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:ae600f36b6bc972b368367b6f83a1d91ec2c82a4a116b383d67d547c56fe6de3 + digest: sha256:f0e4b51deef56bed74d3e2359c583fc104a8d6367da3984fc5c66938db738828 diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index b14b26647148..5b10d2811707 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -185,6 +185,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 3bbef5d54f44..20cdfc620138 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -187,6 +187,7 @@ def _session_tests( ) -> None: # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) if len(test_list) == 0: print("No tests found, skipping directory.") else: From d4f5503caf878680059c128781f8b6f773b231db Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 21 Jan 2022 04:42:03 -0500 Subject: [PATCH 550/892] docs: clarify comments in ReadRowsRequest and RowFilter (#494) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add explicit routing header annotations for bigtable/v2 feat: add `routing_proto` to the Bazel dependencies for bigtable/v2 Committer: @viacheslav-rostovtsev PiperOrigin-RevId: 423158199 Source-Link: https://github.com/googleapis/googleapis/commit/d7ee523e449c530c52b24a207a83b1b9c6e2a432 Source-Link: https://github.com/googleapis/googleapis-gen/commit/9a040c8b8cd5e82949feeaa6381b957579cd2c68 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWEwNDBjOGI4Y2Q1ZTgyOTQ5ZmVlYWE2MzgxYjk1NzU3OWNkMmM2OCJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../google/cloud/bigtable_v2/types/bigtable.py | 7 ++++--- .../google/cloud/bigtable_v2/types/data.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 55420ba60022..956eeca5c164 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -52,14 +52,15 @@ class ReadRowsRequest(proto.Message): If not specified, the "default" application profile will be used. rows (google.cloud.bigtable_v2.types.RowSet): - The row keys and/or ranges to read. If not - specified, reads from all rows. + The row keys and/or ranges to read + sequentially. If not specified, reads from all + rows. filter (google.cloud.bigtable_v2.types.RowFilter): The filter to apply to the contents of the specified row(s). If unset, reads the entirety of each row. rows_limit (int): - The read will terminate after committing to N + The read will stop after committing to N rows' worth of results. The default (zero) is to return all results. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index dbf63ead048e..7cd74b0471fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -319,7 +319,7 @@ class RowFilter(proto.Message): RowFilter.Chain and RowFilter.Interleave documentation. The total serialized size of a RowFilter message must not exceed - 4096 bytes, and RowFilters may not be nested within each other (in + 20480 bytes, and RowFilters may not be nested within each other (in Chains or Interleaves) to a depth of more than 20. This message has `oneof`_ fields (mutually exclusive fields). From 37df16c4754189b8982d124a69132120360d10d8 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 21 Jan 2022 07:34:07 -0500 Subject: [PATCH 551/892] ci(python): run lint / unit tests / docs as GH actions (#493) * ci(python): run lint / unit tests / docs as GH actions Source-Link: https://github.com/googleapis/synthtool/commit/57be0cdb0b94e1669cee0ca38d790de1dfdbcd44 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:ed1f9983d5a935a89fe8085e8bb97d94e41015252c5b6c9771257cf8624367e6 * add mypy check as a gh action Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/.OwlBot.lock.yaml | 15 ++++- .../.github/workflows/docs.yml | 38 +++++++++++++ .../.github/workflows/lint.yml | 25 ++++++++ .../.github/workflows/mypy.yml | 22 +++++++ .../.github/workflows/unittest.yml | 57 +++++++++++++++++++ 5 files changed, 156 insertions(+), 1 deletion(-) create mode 100644 packages/google-cloud-bigtable/.github/workflows/docs.yml create mode 100644 packages/google-cloud-bigtable/.github/workflows/lint.yml create mode 100644 packages/google-cloud-bigtable/.github/workflows/mypy.yml create mode 100644 packages/google-cloud-bigtable/.github/workflows/unittest.yml diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 52d79c11f3ad..8cb43804d999 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,3 +1,16 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:f0e4b51deef56bed74d3e2359c583fc104a8d6367da3984fc5c66938db738828 + digest: sha256:ed1f9983d5a935a89fe8085e8bb97d94e41015252c5b6c9771257cf8624367e6 diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml new file mode 100644 index 000000000000..f7b8344c4500 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -0,0 +1,38 @@ +on: + pull_request: + branches: + - main +name: docs +jobs: + docs: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: "3.10" + - name: Install nox + run: | + python -m pip install --upgrade setuptools pip wheel + python -m pip install nox + - name: Run docs + run: | + nox -s docs + docfx: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: "3.10" + - name: Install nox + run: | + python -m pip install --upgrade setuptools pip wheel + python -m pip install nox + - name: Run docfx + run: | + nox -s docfx diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml new file mode 100644 index 000000000000..1e8b05c3d7ff --- /dev/null +++ b/packages/google-cloud-bigtable/.github/workflows/lint.yml @@ -0,0 +1,25 @@ +on: + pull_request: + branches: + - main +name: lint +jobs: + lint: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: "3.10" + - name: Install nox + run: | + python -m pip install --upgrade setuptools pip wheel + python -m pip install nox + - name: Run lint + run: | + nox -s lint + - name: Run lint_setup_py + run: | + nox -s lint_setup_py diff --git a/packages/google-cloud-bigtable/.github/workflows/mypy.yml b/packages/google-cloud-bigtable/.github/workflows/mypy.yml new file mode 100644 index 000000000000..5a0f0e090d69 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/workflows/mypy.yml @@ -0,0 +1,22 @@ +on: + pull_request: + branches: + - main +name: mypy +jobs: + mypy: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: "3.8" + - name: Install nox + run: | + python -m pip install --upgrade setuptools pip wheel + python -m pip install nox + - name: Run mypy + run: | + nox -s mypy diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml new file mode 100644 index 000000000000..074ee2504ca5 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -0,0 +1,57 @@ +on: + pull_request: + branches: + - main +name: unittest +jobs: + unit: + runs-on: ubuntu-latest + strategy: + matrix: + python: ['3.6', '3.7', '3.8', '3.9', '3.10'] + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + - name: Install nox + run: | + python -m pip install --upgrade setuptools pip wheel + python -m pip install nox + - name: Run unit tests + env: + COVERAGE_FILE: .coverage-${{ matrix.python }} + run: | + nox -s unit-${{ matrix.python }} + - name: Upload coverage results + uses: actions/upload-artifact@v2 + with: + name: coverage-artifacts + path: .coverage-${{ matrix.python }} + + cover: + runs-on: ubuntu-latest + needs: + - unit + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: "3.10" + - name: Install coverage + run: | + python -m pip install --upgrade setuptools pip wheel + python -m pip install coverage + - name: Download coverage results + uses: actions/download-artifact@v2 + with: + name: coverage-artifacts + path: .coverage-results/ + - name: Report coverage results + run: | + coverage combine .coverage-results/.coverage* + coverage report --show-missing --fail-under=100 From 4bd41023a4eb9903e325ca7c496cfb28fffeee9b Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 22 Jan 2022 00:58:25 +0100 Subject: [PATCH 552/892] chore(deps): update all dependencies (#420) * chore(deps): update all dependencies * revert pin change for beam samples as a test * also revert pin change for google-cloud-core for beam samples Co-authored-by: Anthonios Partheniou --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/beam/requirements.txt | 4 ++-- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 4 ++-- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../samples/quickstart/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 4 ++-- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 20 files changed, 24 insertions(+), 24 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 29731a5f9428..0ac314d1671d 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.31.0 +apache-beam==2.34.0 google-cloud-bigtable<2.0.0 -google-cloud-core==1.7.2 \ No newline at end of file +google-cloud-core==1.7.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 20e1f50782c4..58379c8cb66a 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.3.1 -google-cloud-core==1.7.2 +google-cloud-bigtable==2.4.0 +google-cloud-core==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 807a82ce39d1..844169f7be50 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.3.1 +google-cloud-bigtable==2.4.0 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 7903fa1e1133..c16fa6493ded 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==6.2.4 +pytest==6.2.5 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 8a9f48af893f..2e1843a99736 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.3.1 -google-cloud-monitoring==2.4.2 +google-cloud-bigtable==2.4.0 +google-cloud-monitoring==2.8.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 5197d54ba05a..73d64741ddc6 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.3.1 +google-cloud-bigtable==2.4.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 83fd1d5e2306..d2916abfca1e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.3.1 +google-cloud-bigtable==2.4.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 95ea1e6a02b0..927094516e65 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 83fd1d5e2306..d2916abfca1e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.3.1 +google-cloud-bigtable==2.4.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 0db5cc446cf1..fbe6c1c5cfc8 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.11.1 -pytest==6.2.4 +pytest==6.2.5 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index f9a2edd68809..2946eff51627 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.3.1 \ No newline at end of file +google-cloud-bigtable==2.4.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 2ff95fe08b1d..06b8f206a0e6 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==6.2.4 -google-cloud-testutils==1.0.0 +pytest==6.2.5 +google-cloud-testutils==1.3.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 5197d54ba05a..73d64741ddc6 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.3.1 +google-cloud-bigtable==2.4.0 From 0970b0bd1b35b280ff362c9127bb6b0198c65c5e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 25 Jan 2022 14:22:44 -0500 Subject: [PATCH 553/892] feat: add api key support (#497) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: upgrade gapic-generator-java, gax-java and gapic-generator-python PiperOrigin-RevId: 423842556 Source-Link: https://github.com/googleapis/googleapis/commit/a616ca08f4b1416abbac7bc5dd6d61c791756a81 Source-Link: https://github.com/googleapis/googleapis-gen/commit/29b938c58c1e51d019f2ee539d55dc0a3c86a905 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjliOTM4YzU4YzFlNTFkMDE5ZjJlZTUzOWQ1NWRjMGEzYzg2YTkwNSJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 38 ++++- .../bigtable_instance_admin/client.py | 127 +++++++++++------ .../bigtable_table_admin/async_client.py | 38 ++++- .../services/bigtable_table_admin/client.py | 127 +++++++++++------ .../services/bigtable/async_client.py | 47 ++++++- .../bigtable_v2/services/bigtable/client.py | 127 +++++++++++------ .../test_bigtable_instance_admin.py | 133 ++++++++++++++++++ .../test_bigtable_table_admin.py | 131 +++++++++++++++++ .../unit/gapic/bigtable_v2/test_bigtable.py | 124 ++++++++++++++++ 9 files changed, 760 insertions(+), 132 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 649877c3e904..ef6fa27783dc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Sequence, Tuple, Type, Union +from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -131,6 +131,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): from_service_account_json = from_service_account_file + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return BigtableInstanceAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + @property def transport(self) -> BigtableInstanceAdminTransport: """Returns the transport used by the client instance. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index d1f445c34537..58adf8abb4f6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -299,6 +299,73 @@ def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) return m.groupdict() if m else {} + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + def __init__( self, *, @@ -349,57 +416,22 @@ def __init__( if client_options is None: client_options = client_options_lib.ClientOptions() - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( - "true", - "false", - ): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - use_client_cert = ( - os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options ) - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, BigtableInstanceAdminTransport): # transport is a BigtableInstanceAdminTransport instance. - if credentials or client_options.credentials_file: + if credentials or client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." @@ -411,6 +443,15 @@ def __init__( ) self._transport = transport else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 476a70ee7588..b6eaece9f1b5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Sequence, Tuple, Type, Union +from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -133,6 +133,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): from_service_account_json = from_service_account_file + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return BigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + @property def transport(self) -> BigtableTableAdminTransport: """Returns the transport used by the client instance. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 55e85f064987..8804ee213bb5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -336,6 +336,73 @@ def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) return m.groupdict() if m else {} + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + def __init__( self, *, @@ -386,57 +453,22 @@ def __init__( if client_options is None: client_options = client_options_lib.ClientOptions() - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( - "true", - "false", - ): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - use_client_cert = ( - os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options ) - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, BigtableTableAdminTransport): # transport is a BigtableTableAdminTransport instance. - if credentials or client_options.credentials_file: + if credentials or client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." @@ -448,6 +480,15 @@ def __init__( ) self._transport = transport else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index e0227e8578b3..7a48c382edfb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -16,7 +16,16 @@ from collections import OrderedDict import functools import re -from typing import Dict, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Optional, + AsyncIterable, + Awaitable, + Sequence, + Tuple, + Type, + Union, +) import pkg_resources from google.api_core.client_options import ClientOptions @@ -100,6 +109,42 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): from_service_account_json = from_service_account_file + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return BigtableClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + @property def transport(self) -> BigtableTransport: """Returns the transport used by the client instance. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 8dcfdb746143..2d755b8f3a0b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -234,6 +234,73 @@ def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) return m.groupdict() if m else {} + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + def __init__( self, *, @@ -284,57 +351,22 @@ def __init__( if client_options is None: client_options = client_options_lib.ClientOptions() - # Create SSL credentials for mutual TLS if needed. - if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ( - "true", - "false", - ): - raise ValueError( - "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - ) - use_client_cert = ( - os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options ) - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, BigtableTransport): # transport is a BigtableTransport instance. - if credentials or client_options.credentials_file: + if credentials or client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." @@ -346,6 +378,15 @@ def __init__( ) self._transport = transport else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 11205044dcc9..3403568fd2a5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -427,6 +427,87 @@ def test_bigtable_instance_admin_client_mtls_env_auto( ) +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient] +) +@mock.patch.object( + BigtableInstanceAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableInstanceAdminAsyncClient), +) +def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ @@ -5103,6 +5184,25 @@ def test_credentials_transport_error(): transport=transport, ) + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + # It is an error to provide scopes and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), @@ -5826,3 +5926,36 @@ def test_client_ctx(): with client: pass close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 6c81ca8161f9..674383edca65 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -425,6 +425,87 @@ def test_bigtable_table_admin_client_mtls_env_auto( ) +@pytest.mark.parametrize( + "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient] +) +@mock.patch.object( + BigtableTableAdminClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableTableAdminAsyncClient), +) +def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ @@ -5728,6 +5809,23 @@ def test_credentials_transport_error(): transport=transport, ) + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient(client_options=options, transport=transport,) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + # It is an error to provide scopes and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), @@ -6505,3 +6603,36 @@ def test_client_ctx(): with client: pass close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 690b21b62cae..f745a63ff0e1 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -376,6 +376,83 @@ def test_bigtable_client_mtls_env_auto( ) +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient]) +@mock.patch.object( + BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) +) +@mock.patch.object( + BigtableAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(BigtableAsyncClient), +) +def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ @@ -1972,6 +2049,23 @@ def test_credentials_transport_error(): transport=transport, ) + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient(client_options=options, transport=transport,) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + # It is an error to provide scopes and a transport instance. transport = transports.BigtableGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), @@ -2547,3 +2641,33 @@ def test_client_ctx(): with client: pass close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (BigtableClient, transports.BigtableGrpcTransport), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) From 69232667e99c2a001f3c98368c2cd470d52f41a7 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 3 Feb 2022 19:48:36 +0000 Subject: [PATCH 554/892] chore: use gapic-generator-python 0.62.1 (#500) - [ ] Regenerate this pull request now. fix: resolve DuplicateCredentialArgs error when using credentials_file committer: parthea PiperOrigin-RevId: 425964861 Source-Link: https://github.com/googleapis/googleapis/commit/84b1a5a4f6fb2d04905be58e586b8a7a4310a8cf Source-Link: https://github.com/googleapis/googleapis-gen/commit/4fb761bbd8506ac156f49bac5f18306aa8eb3aa8 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGZiNzYxYmJkODUwNmFjMTU2ZjQ5YmFjNWYxODMwNmFhOGViM2FhOCJ9 --- .../bigtable_instance_admin/async_client.py | 36 ++++---- .../bigtable_instance_admin/client.py | 36 ++++---- .../transports/grpc.py | 7 +- .../transports/grpc_asyncio.py | 7 +- .../bigtable_table_admin/async_client.py | 40 ++++----- .../services/bigtable_table_admin/client.py | 40 ++++----- .../bigtable_table_admin/transports/grpc.py | 7 +- .../transports/grpc_asyncio.py | 7 +- .../types/bigtable_table_admin.py | 12 +-- .../cloud/bigtable_admin_v2/types/instance.py | 4 +- .../cloud/bigtable_admin_v2/types/table.py | 6 +- .../services/bigtable/async_client.py | 20 ++--- .../bigtable_v2/services/bigtable/client.py | 20 ++--- .../services/bigtable/transports/grpc.py | 13 +-- .../bigtable/transports/grpc_asyncio.py | 13 +-- .../test_bigtable_instance_admin.py | 82 ++++++++++++++++- .../test_bigtable_table_admin.py | 87 ++++++++++++++++++- .../unit/gapic/bigtable_v2/test_bigtable.py | 82 ++++++++++++++++- 18 files changed, 385 insertions(+), 134 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ef6fa27783dc..9cc58c7eb6ac 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -304,7 +304,7 @@ async def create_instance( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, instance_id, instance, clusters]) if request is not None and has_flattened_params: @@ -394,7 +394,7 @@ async def get_instance( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -476,7 +476,7 @@ async def list_instances( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -644,7 +644,7 @@ async def partial_update_instance( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([instance, update_mask]) if request is not None and has_flattened_params: @@ -732,7 +732,7 @@ async def delete_instance( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -831,7 +831,7 @@ async def create_cluster( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, cluster_id, cluster]) if request is not None and has_flattened_params: @@ -917,7 +917,7 @@ async def get_cluster( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1001,7 +1001,7 @@ async def list_clusters( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -1186,7 +1186,7 @@ async def partial_update_cluster( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([cluster, update_mask]) if request is not None and has_flattened_params: @@ -1274,7 +1274,7 @@ async def delete_cluster( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1364,7 +1364,7 @@ async def create_app_profile( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, app_profile_id, app_profile]) if request is not None and has_flattened_params: @@ -1441,7 +1441,7 @@ async def get_app_profile( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1529,7 +1529,7 @@ async def list_app_profiles( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -1627,7 +1627,7 @@ async def update_app_profile( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([app_profile, update_mask]) if request is not None and has_flattened_params: @@ -1715,7 +1715,7 @@ async def delete_app_profile( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1842,7 +1842,7 @@ async def get_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -1979,7 +1979,7 @@ async def set_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -2061,7 +2061,7 @@ async def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 58adf8abb4f6..697d0fd9b792 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -540,7 +540,7 @@ def create_instance( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, instance_id, instance, clusters]) if request is not None and has_flattened_params: @@ -629,7 +629,7 @@ def get_instance( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -701,7 +701,7 @@ def list_instances( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -850,7 +850,7 @@ def partial_update_instance( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([instance, update_mask]) if request is not None and has_flattened_params: @@ -930,7 +930,7 @@ def delete_instance( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1029,7 +1029,7 @@ def create_cluster( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, cluster_id, cluster]) if request is not None and has_flattened_params: @@ -1115,7 +1115,7 @@ def get_cluster( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1189,7 +1189,7 @@ def list_clusters( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -1355,7 +1355,7 @@ def partial_update_cluster( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([cluster, update_mask]) if request is not None and has_flattened_params: @@ -1433,7 +1433,7 @@ def delete_cluster( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1523,7 +1523,7 @@ def create_app_profile( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, app_profile_id, app_profile]) if request is not None and has_flattened_params: @@ -1600,7 +1600,7 @@ def get_app_profile( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1678,7 +1678,7 @@ def list_app_profiles( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -1766,7 +1766,7 @@ def update_app_profile( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([app_profile, update_mask]) if request is not None and has_flattened_params: @@ -1844,7 +1844,7 @@ def delete_app_profile( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1971,7 +1971,7 @@ def get_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -2097,7 +2097,7 @@ def set_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -2178,7 +2178,7 @@ def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index fa92fac0ce3f..c477ee926d71 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -168,8 +168,11 @@ def __init__( if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, + # use the credentials which are saved credentials=self._credentials, - credentials_file=credentials_file, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, @@ -242,7 +245,7 @@ def operations_client(self) -> operations_v1.OperationsClient: This property caches on the instance; repeated calls return the same client. """ - # Sanity check: Only create a new client if we do not already have one. + # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsClient(self.grpc_channel) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 5eaaf33f2588..97c8f1ad92f2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -213,8 +213,11 @@ def __init__( if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, + # use the credentials which are saved credentials=self._credentials, - credentials_file=credentials_file, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, @@ -244,7 +247,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: This property caches on the instance; repeated calls return the same client. """ - # Sanity check: Only create a new client if we do not already have one. + # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index b6eaece9f1b5..303bf2d33a7c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -286,7 +286,7 @@ async def create_table( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, table]) if request is not None and has_flattened_params: @@ -400,7 +400,7 @@ async def create_table_from_snapshot( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, source_snapshot]) if request is not None and has_flattened_params: @@ -487,7 +487,7 @@ async def list_tables( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -577,7 +577,7 @@ async def get_table( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -654,7 +654,7 @@ async def delete_table( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -746,7 +746,7 @@ async def modify_column_families( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, modifications]) if request is not None and has_flattened_params: @@ -871,7 +871,7 @@ async def generate_consistency_token( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -964,7 +964,7 @@ async def check_consistency( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, consistency_token]) if request is not None and has_flattened_params: @@ -1098,7 +1098,7 @@ async def snapshot_table( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, cluster, snapshot_id, description]) if request is not None and has_flattened_params: @@ -1207,7 +1207,7 @@ async def get_snapshot( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1315,7 +1315,7 @@ async def list_snapshots( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -1409,7 +1409,7 @@ async def delete_snapshot( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1511,7 +1511,7 @@ async def create_backup( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, backup_id, backup]) if request is not None and has_flattened_params: @@ -1593,7 +1593,7 @@ async def get_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1689,7 +1689,7 @@ async def update_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([backup, update_mask]) if request is not None and has_flattened_params: @@ -1759,7 +1759,7 @@ async def delete_backup( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1837,7 +1837,7 @@ async def list_backups( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -2049,7 +2049,7 @@ async def get_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -2186,7 +2186,7 @@ async def set_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -2268,7 +2268,7 @@ async def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 8804ee213bb5..070423018610 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -557,7 +557,7 @@ def create_table( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, table]) if request is not None and has_flattened_params: @@ -671,7 +671,7 @@ def create_table_from_snapshot( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, source_snapshot]) if request is not None and has_flattened_params: @@ -760,7 +760,7 @@ def list_tables( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -840,7 +840,7 @@ def get_table( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -907,7 +907,7 @@ def delete_table( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -999,7 +999,7 @@ def modify_column_families( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, modifications]) if request is not None and has_flattened_params: @@ -1125,7 +1125,7 @@ def generate_consistency_token( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1212,7 +1212,7 @@ def check_consistency( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, consistency_token]) if request is not None and has_flattened_params: @@ -1336,7 +1336,7 @@ def snapshot_table( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, cluster, snapshot_id, description]) if request is not None and has_flattened_params: @@ -1445,7 +1445,7 @@ def get_snapshot( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1543,7 +1543,7 @@ def list_snapshots( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -1627,7 +1627,7 @@ def delete_snapshot( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1729,7 +1729,7 @@ def create_backup( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, backup_id, backup]) if request is not None and has_flattened_params: @@ -1811,7 +1811,7 @@ def get_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -1897,7 +1897,7 @@ def update_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([backup, update_mask]) if request is not None and has_flattened_params: @@ -1967,7 +1967,7 @@ def delete_backup( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: @@ -2045,7 +2045,7 @@ def list_backups( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: @@ -2248,7 +2248,7 @@ def get_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -2374,7 +2374,7 @@ def set_iam_policy( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: @@ -2455,7 +2455,7 @@ def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index a566aecf5e62..906d6b13dc49 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -170,8 +170,11 @@ def __init__( if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, + # use the credentials which are saved credentials=self._credentials, - credentials_file=credentials_file, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, @@ -244,7 +247,7 @@ def operations_client(self) -> operations_v1.OperationsClient: This property caches on the instance; repeated calls return the same client. """ - # Sanity check: Only create a new client if we do not already have one. + # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsClient(self.grpc_channel) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index f7fcd4435f07..790568ceef38 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -215,8 +215,11 @@ def __init__( if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, + # use the credentials which are saved credentials=self._credentials, - credentials_file=credentials_file, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, @@ -246,7 +249,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: This property caches on the instance; repeated calls return the same client. """ - # Sanity check: Only create a new client if we do not already have one. + # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( self.grpc_channel diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index ebd54fcf9faf..b8ff4e60e1e7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -626,9 +626,9 @@ class SnapshotTableMetadata(proto.Message): r"""The metadata for the Operation returned by SnapshotTable. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. It - is not subject to any SLA or deprecation policy. + Bigtable customers. This feature might be changed in + backward-incompatible ways and is not recommended for production + use. It is not subject to any SLA or deprecation policy. Attributes: original_request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): @@ -656,9 +656,9 @@ class CreateTableFromSnapshotMetadata(proto.Message): CreateTableFromSnapshot. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. It - is not subject to any SLA or deprecation policy. + Bigtable customers. This feature might be changed in + backward-incompatible ways and is not recommended for production + use. It is not subject to any SLA or deprecation policy. Attributes: original_request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 0b008748cf54..206cb40c414d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -201,8 +201,8 @@ class ClusterConfig(proto.Message): ) class EncryptionConfig(proto.Message): - r"""Cloud Key Management Service (Cloud KMS) settings for a CMEK- - rotected cluster. + r"""Cloud Key Management Service (Cloud KMS) settings for a + CMEK-protected cluster. Attributes: kms_key_name (str): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index c6cde8089c4d..7ced1216cd6b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -282,9 +282,9 @@ class Snapshot(proto.Message): new table. Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in backward- - incompatible ways and is not recommended for production use. It - is not subject to any SLA or deprecation policy. + Bigtable customers. This feature might be changed in + backward-incompatible ways and is not recommended for production + use. It is not subject to any SLA or deprecation policy. Attributes: name (str): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 7a48c382edfb..9db7ac1cb118 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -256,7 +256,7 @@ def read_rows( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: @@ -353,7 +353,7 @@ def sample_row_keys( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: @@ -467,7 +467,7 @@ async def mutate_row( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) if request is not None and has_flattened_params: @@ -584,7 +584,7 @@ def mutate_rows( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, entries, app_profile_id]) if request is not None and has_flattened_params: @@ -725,7 +725,7 @@ async def check_and_mutate_row( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any( [ @@ -803,10 +803,10 @@ async def read_modify_write_row( ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the - specified columns and writes a new entry based on pre- - defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or - the current server time. The method returns the new + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new contents of all modified cells. Args: @@ -863,7 +863,7 @@ async def read_modify_write_row( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, rules, app_profile_id]) if request is not None and has_flattened_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 2d755b8f3a0b..90a7536066f8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -450,7 +450,7 @@ def read_rows( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: @@ -540,7 +540,7 @@ def sample_row_keys( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: @@ -647,7 +647,7 @@ def mutate_row( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) if request is not None and has_flattened_params: @@ -754,7 +754,7 @@ def mutate_rows( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, entries, app_profile_id]) if request is not None and has_flattened_params: @@ -888,7 +888,7 @@ def check_and_mutate_row( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any( [ @@ -959,10 +959,10 @@ def read_modify_write_row( ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the - specified columns and writes a new entry based on pre- - defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or - the current server time. The method returns the new + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new contents of all modified cells. Args: @@ -1019,7 +1019,7 @@ def read_modify_write_row( """ # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have + # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, rules, app_profile_id]) if request is not None and has_flattened_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index ce409fbf2faa..78b2215ffb69 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -159,8 +159,11 @@ def __init__( if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, + # use the credentials which are saved credentials=self._credentials, - credentials_file=credentials_file, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, @@ -381,10 +384,10 @@ def read_modify_write_row( Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the - specified columns and writes a new entry based on pre- - defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or - the current server time. The method returns the new + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new contents of all modified cells. Returns: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index d6d46cb81f7a..aa3b80f13139 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -204,8 +204,11 @@ def __init__( if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, + # use the credentials which are saved credentials=self._credentials, - credentials_file=credentials_file, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, @@ -387,10 +390,10 @@ def read_modify_write_row( Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the - specified columns and writes a new entry based on pre- - defined read/modify/write rules. The new value for the - timestamp is the greater of the existing timestamp or - the current server time. The method returns the new + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new contents of all modified cells. Returns: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 3403568fd2a5..bf5b3e9e5be2 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -29,6 +29,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async +from google.api_core import operation from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template @@ -544,25 +545,28 @@ def test_bigtable_instance_admin_client_client_options_scopes( @pytest.mark.parametrize( - "client_class,transport_class,transport_name", + "client_class,transport_class,transport_name,grpc_helpers", [ ( BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", + grpc_helpers, ), ( BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", + grpc_helpers_async, ), ], ) def test_bigtable_instance_admin_client_client_options_credentials_file( - client_class, transport_class, transport_name + client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) @@ -598,6 +602,80 @@ def test_bigtable_instance_admin_client_client_options_from_dict(): ) +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + BigtableInstanceAdminAsyncClient, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_bigtable_instance_admin_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.cluster", + "https://www.googleapis.com/auth/bigtable.admin.instance", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.cluster", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + scopes=None, + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + @pytest.mark.parametrize( "request_type", [bigtable_instance_admin.CreateInstanceRequest, dict,] ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 674383edca65..49d2c9ddfc3a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -29,6 +29,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async +from google.api_core import operation from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template @@ -538,21 +539,28 @@ def test_bigtable_table_admin_client_client_options_scopes( @pytest.mark.parametrize( - "client_class,transport_class,transport_name", + "client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + ( + BigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + grpc_helpers, + ), ( BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", + grpc_helpers_async, ), ], ) def test_bigtable_table_admin_client_client_options_credentials_file( - client_class, transport_class, transport_name + client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) @@ -588,6 +596,79 @@ def test_bigtable_table_admin_client_client_options_from_dict(): ) +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + BigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + BigtableTableAdminAsyncClient, + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_bigtable_table_admin_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.admin", + "https://www.googleapis.com/auth/bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-bigtable.admin", + "https://www.googleapis.com/auth/cloud-bigtable.admin.table", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + scopes=None, + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + @pytest.mark.parametrize( "request_type", [bigtable_table_admin.CreateTableRequest, dict,] ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index f745a63ff0e1..19868b14e614 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -481,17 +481,23 @@ def test_bigtable_client_client_options_scopes( @pytest.mark.parametrize( - "client_class,transport_class,transport_name", + "client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc"), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers), + ( + BigtableAsyncClient, + transports.BigtableGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), ], ) def test_bigtable_client_client_options_credentials_file( - client_class, transport_class, transport_name + client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) @@ -525,6 +531,74 @@ def test_bigtable_client_client_options_from_dict(): ) +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers), + ( + BigtableAsyncClient, + transports.BigtableGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_bigtable_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "bigtable.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + scopes=None, + default_host="bigtable.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + @pytest.mark.parametrize("request_type", [bigtable.ReadRowsRequest, dict,]) def test_read_rows(request_type, transport: str = "grpc"): client = BigtableClient( From c0029b68ef1b9e2d4948787aba5c70ea39f7fe8c Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Thu, 3 Feb 2022 14:58:56 -0800 Subject: [PATCH 555/892] doc: Fix broken links in data-api documentation (#501) There was some refactoring, and the bigtable_v2/proto directory no longer exists. Updated the links to the correct ones, and used permalink. --- packages/google-cloud-bigtable/docs/data-api.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/data-api.rst b/packages/google-cloud-bigtable/docs/data-api.rst index 9d9205e6b1e9..01a49178fd4d 100644 --- a/packages/google-cloud-bigtable/docs/data-api.rst +++ b/packages/google-cloud-bigtable/docs/data-api.rst @@ -337,8 +337,8 @@ Just as with reading, the stream can be canceled: keys_iterator.cancel() -.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L54-L61 -.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L67-L73 -.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L77-L84 -.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L99-L106 -.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/main/google/cloud/bigtable_v2/proto/bigtable.proto#L113-L121 +.. _ReadRows: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L42-L72 +.. _SampleRowKeys: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L184-L199 +.. _MutateRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L230-L256 +.. _CheckAndMutateRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L339-L386 +.. _ReadModifyWriteRow: https://github.com/googleapis/python-bigtable/blob/d6bff70654b41e31d2ac83d307bdc6bbd111201e/google/cloud/bigtable_v2/types/bigtable.py#L401-L430 From 8a2e0b7954b1476e664491ace0f823ac6c161911 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 7 Feb 2022 17:20:44 +0100 Subject: [PATCH 556/892] chore(deps): update all dependencies (#498) --- .../.github/workflows/system_emulated.yml | 2 +- .../samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/beam/requirements.txt | 6 +++--- .../samples/hello/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 4 ++-- 14 files changed, 17 insertions(+), 17 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 57656d3ce2b9..480ae98a4258 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v0.2.1 + uses: google-github-actions/setup-gcloud@v0.5.0 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 0ac314d1671d..094b6ef915e9 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.34.0 -google-cloud-bigtable<2.0.0 -google-cloud-core==1.7.2 +apache-beam==2.35.0 +google-cloud-bigtable<2.5.0 +google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 58379c8cb66a..f62375fdbeed 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.4.0 -google-cloud-core==2.2.1 +google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index c16fa6493ded..2d5a435bd4b9 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==6.2.5 +pytest==7.0.0 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 927094516e65..4a46ff600804 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index fbe6c1c5cfc8..27df4634c3c3 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.11.1 -pytest==6.2.5 +pytest==7.0.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 06b8f206a0e6..f5889ff1d25c 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==6.2.5 -google-cloud-testutils==1.3.0 +pytest==7.0.0 +google-cloud-testutils==1.3.1 From 56fc7517adb4092bc0f1913c20ea7e9a4f5e46c2 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 7 Feb 2022 09:42:21 -0800 Subject: [PATCH 557/892] chore(main): release 2.5.0 (#486) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 26 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 0bee749f3b03..01e2650d5932 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,32 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.5.0](https://github.com/googleapis/python-bigtable/compare/v2.4.0...v2.5.0) (2022-02-07) + + +### Features + +* add 'Instance.create_time' field ([#449](https://github.com/googleapis/python-bigtable/issues/449)) ([b9ecfa9](https://github.com/googleapis/python-bigtable/commit/b9ecfa97281ae21dcf233e60c70cacc701f12c32)) +* add api key support ([#497](https://github.com/googleapis/python-bigtable/issues/497)) ([ee3a6c4](https://github.com/googleapis/python-bigtable/commit/ee3a6c4c5f810fab08671db3407195864ecc1972)) +* add Autoscaling API ([#475](https://github.com/googleapis/python-bigtable/issues/475)) ([97b3cdd](https://github.com/googleapis/python-bigtable/commit/97b3cddb908098e255e7a1209cdb985087b95a26)) +* add context manager support in client ([#440](https://github.com/googleapis/python-bigtable/issues/440)) ([a3d2cf1](https://github.com/googleapis/python-bigtable/commit/a3d2cf18b49cddc91e5e6448c46d6b936d86954d)) +* add support for Python 3.10 ([#437](https://github.com/googleapis/python-bigtable/issues/437)) ([3cf0814](https://github.com/googleapis/python-bigtable/commit/3cf08149411f3f4df41e9b5a9894dbfb101bd86f)) + + +### Bug Fixes + +* **deps:** drop packaging dependency ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186)) +* **deps:** require google-api-core >= 1.28.0 ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186)) +* improper types in pagers generation ([f9c7699](https://github.com/googleapis/python-bigtable/commit/f9c7699eb6d4071314abbb0477ba47370059e041)) +* improve type hints, mypy checks ([#448](https://github.com/googleapis/python-bigtable/issues/448)) ([a99bf88](https://github.com/googleapis/python-bigtable/commit/a99bf88417d6aec03923447c70c2752f6bb5c459)) +* resolve DuplicateCredentialArgs error when using credentials_file ([d6bff70](https://github.com/googleapis/python-bigtable/commit/d6bff70654b41e31d2ac83d307bdc6bbd111201e)) + + +### Documentation + +* clarify comments in ReadRowsRequest and RowFilter ([#494](https://github.com/googleapis/python-bigtable/issues/494)) ([1efd9b5](https://github.com/googleapis/python-bigtable/commit/1efd9b598802f766a3c4c8c78ec7b0ca208d3325)) +* list oneofs in docstring ([a535f99](https://github.com/googleapis/python-bigtable/commit/a535f99e9f0bb16488a5d372a0a6efc3c4b69186)) + ## [2.4.0](https://www.github.com/googleapis/python-bigtable/compare/v2.3.3...v2.4.0) (2021-09-24) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 73a2b28199c5..ac58e62e7e04 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.4.0" +version = "2.5.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 9b0a412caf20c549a4e3d98c24c466b87698d007 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 8 Feb 2022 22:56:24 +0100 Subject: [PATCH 558/892] chore(deps): update all dependencies (#503) * chore(deps): update all dependencies * remove upper limit for google-cloud-bigtable in beam sample Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 4 ++-- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 094b6ef915e9..f0912d8729a4 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.35.0 -google-cloud-bigtable<2.5.0 +apache-beam==2.36.0 +google-cloud-bigtable==2.5.0 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index f62375fdbeed..fc6ed0577025 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.4.0 +google-cloud-bigtable==2.5.0 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 844169f7be50..1bdbe0869fa8 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.4.0 +google-cloud-bigtable==2.5.0 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 2e1843a99736..4a54d4456c5e 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.4.0 +google-cloud-bigtable==2.5.0 google-cloud-monitoring==2.8.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 73d64741ddc6..f46f9866e3b7 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.4.0 +google-cloud-bigtable==2.5.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index d2916abfca1e..42605f9bdccb 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.4.0 +google-cloud-bigtable==2.5.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index d2916abfca1e..42605f9bdccb 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.4.0 +google-cloud-bigtable==2.5.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 2946eff51627..a44d15abe759 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.4.0 \ No newline at end of file +google-cloud-bigtable==2.5.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 73d64741ddc6..f46f9866e3b7 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.4.0 +google-cloud-bigtable==2.5.0 From fd945f8bfb4861fb1242b0ee0789ae032c0c8e44 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 14 Feb 2022 16:52:15 +0100 Subject: [PATCH 559/892] chore(deps): update dependency pytest to v7.0.1 (#505) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 2d5a435bd4b9..b4186c059922 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.0.0 +pytest==7.0.1 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 4a46ff600804..c2845bffbe89 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 27df4634c3c3..c531e813e29e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.11.1 -pytest==7.0.0 +pytest==7.0.1 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index f5889ff1d25c..786624c3ceb2 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.0.0 +pytest==7.0.1 google-cloud-testutils==1.3.1 From 3df8cd1ba95003f9416b691c4e3b83195a582a58 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 17 Feb 2022 00:56:52 +0100 Subject: [PATCH 560/892] chore(deps): update google-github-actions/setup-gcloud action to v0.5.1 (#510) --- .../google-cloud-bigtable/.github/workflows/system_emulated.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 480ae98a4258..8e6c0cfcf0b2 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v0.5.0 + uses: google-github-actions/setup-gcloud@v0.5.1 - name: Install / run Nox run: | From d68d53ffea46483a1feab6d76efcda3ad570afea Mon Sep 17 00:00:00 2001 From: Bu Sun Kim <8822365+busunkim96@users.noreply.github.com> Date: Thu, 17 Feb 2022 08:43:46 -0700 Subject: [PATCH 561/892] fix(deps): move libcst to extras (#508) --- packages/google-cloud-bigtable/setup.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index ac58e62e7e04..fb0cf8187e26 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -39,9 +39,8 @@ "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", "proto-plus >= 1.13.0", - "libcst >= 0.2.5", ] -extras = {} +extras = {"libcst": "libcst >= 0.2.5"} # Setup boilerplate below this line. From 63de0e93c7409a4b80c69532ab96b2154043f90a Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 17 Feb 2022 16:33:27 -0800 Subject: [PATCH 562/892] chore(main): release 2.5.1 (#511) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 01e2650d5932..152caaf20096 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [2.5.1](https://github.com/googleapis/python-bigtable/compare/v2.5.0...v2.5.1) (2022-02-17) + + +### Bug Fixes + +* **deps:** move libcst to extras ([#508](https://github.com/googleapis/python-bigtable/issues/508)) ([4b4d7e2](https://github.com/googleapis/python-bigtable/commit/4b4d7e2796788b2cd3764f54ff532a9c9d092aec)) + ## [2.5.0](https://github.com/googleapis/python-bigtable/compare/v2.4.0...v2.5.0) (2022-02-07) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index fb0cf8187e26..49509269e1a1 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.5.0" +version = "2.5.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From e2fc0c93e2c256d4d38a277ed062c18895c90bca Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 18 Feb 2022 18:19:54 +0100 Subject: [PATCH 563/892] chore(deps): update dependency google-cloud-bigtable to v2.5.1 (#513) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index f0912d8729a4..a63120291c02 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.36.0 -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index fc6ed0577025..ba01dd874610 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 1bdbe0869fa8..05ce0a0cc2fd 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 4a54d4456c5e..c3eadde68cea 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 google-cloud-monitoring==2.8.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index f46f9866e3b7..6cf46e721759 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 42605f9bdccb..3fb81307b37a 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 42605f9bdccb..3fb81307b37a 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index a44d15abe759..4c2c54ec0cc3 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.0 \ No newline at end of file +google-cloud-bigtable==2.5.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index f46f9866e3b7..6cf46e721759 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.0 +google-cloud-bigtable==2.5.1 From 52da5202b8f7a70b7b5d3dcdda8c18cba849263e Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Thu, 24 Feb 2022 13:44:16 -0800 Subject: [PATCH 564/892] fix: Pass app_profile_id when building updated request (#512) - Pass the app_profile_id from the message - Update unittest Fixes internal bug #214449800 --- .../google/cloud/bigtable/row_data.py | 4 ++++ .../google-cloud-bigtable/tests/unit/test_row_data.py | 10 ++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 6ab1188a8c18..1cdd99026b38 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -624,9 +624,13 @@ def __init__(self, message, last_scanned_key, rows_read_so_far): def build_updated_request(self): """Updates the given message request as per last scanned key""" + + # TODO: Generalize this to ensure fields don't get rewritten when retrying the request + r_kwargs = { "table_name": self.message.table_name, "filter": self.message.filter, + "app_profile_id": self.message.app_profile_id, } if self.message.rows_limit != 0: diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 06fd2f016f36..e48893df5553 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -893,7 +893,10 @@ def test_RRRM_build_updated_request(rrrm_data): row_filter = RowSampleFilter(0.33) last_scanned_key = b"row_key25" request = _ReadRowsRequestPB( - filter=row_filter.to_pb(), rows_limit=8, table_name=TABLE_NAME + filter=row_filter.to_pb(), + rows_limit=8, + table_name=TABLE_NAME, + app_profile_id="app-profile-id-1", ) request.rows.row_ranges.append(row_range1.get_range_kwargs()) @@ -902,7 +905,10 @@ def test_RRRM_build_updated_request(rrrm_data): result = request_manager.build_updated_request() expected_result = _ReadRowsRequestPB( - table_name=TABLE_NAME, filter=row_filter.to_pb(), rows_limit=6 + table_name=TABLE_NAME, + filter=row_filter.to_pb(), + rows_limit=6, + app_profile_id="app-profile-id-1", ) row_range1 = types.RowRange( From 65b0289013a0eb9065cb472a57a8630500315686 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 24 Feb 2022 14:58:33 -0800 Subject: [PATCH 565/892] chore(main): release 2.5.2 (#514) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 152caaf20096..5733ebd366d0 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [2.5.2](https://github.com/googleapis/python-bigtable/compare/v2.5.1...v2.5.2) (2022-02-24) + + +### Bug Fixes + +* Pass app_profile_id when building updated request ([#512](https://github.com/googleapis/python-bigtable/issues/512)) ([2f8ba7a](https://github.com/googleapis/python-bigtable/commit/2f8ba7a4801b17b5afb6180a7ace1327a2d05a52)) + ### [2.5.1](https://github.com/googleapis/python-bigtable/compare/v2.5.0...v2.5.1) (2022-02-17) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 49509269e1a1..b48340a8ab8e 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.5.1" +version = "2.5.2" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From b4cf56e9f4128ddbb9b0e57cc27b32ed6a4a8604 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 25 Feb 2022 19:28:29 +0100 Subject: [PATCH 566/892] chore(deps): update dependency google-cloud-bigtable to v2.5.2 (#516) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index a63120291c02..4aed9a55d7d0 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.36.0 -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index ba01dd874610..f3158ef18938 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 05ce0a0cc2fd..b3fa08aa0f40 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index c3eadde68cea..ffb371b56454 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 google-cloud-monitoring==2.8.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 6cf46e721759..307f5ffa4f10 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 3fb81307b37a..711d7d2cdc3d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 3fb81307b37a..711d7d2cdc3d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 4c2c54ec0cc3..0ed46fa412f5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.1 \ No newline at end of file +google-cloud-bigtable==2.5.2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 6cf46e721759..307f5ffa4f10 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.1 +google-cloud-bigtable==2.5.2 From 530cca9a2f43ea76d8ed7908e1450f6836534fee Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 26 Feb 2022 12:23:32 -0500 Subject: [PATCH 567/892] feat: add WarmAndPing request for channel priming (#504) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: use gapic-generator-python 0.63.2 PiperOrigin-RevId: 427792504 Source-Link: https://github.com/googleapis/googleapis/commit/55b9e1e0b3106c850d13958352bc0751147b6b15 Source-Link: https://github.com/googleapis/googleapis-gen/commit/bf4e86b753f42cb0edb1fd51fbe840d7da0a1cde Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYmY0ZTg2Yjc1M2Y0MmNiMGVkYjFmZDUxZmJlODQwZDdkYTBhMWNkZSJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add WarmAndPing request for channel priming PiperOrigin-RevId: 428795660 Source-Link: https://github.com/googleapis/googleapis/commit/6cce671cb21e5ba9ee785dfe50f5a86b87bb5f21 Source-Link: https://github.com/googleapis/googleapis-gen/commit/2282bc1b081364ea783300be91a8c14cb4a718c4 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjI4MmJjMWIwODEzNjRlYTc4MzMwMGJlOTFhOGMxNGNiNGE3MThjNCJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix replacement in owlbot.py * remove autogenerated snippets * chore: use gapic-generator-python 0.63.4 chore: fix snippet region tag format chore: fix docstring code block formatting PiperOrigin-RevId: 430730865 Source-Link: https://github.com/googleapis/googleapis/commit/ea5800229f73f94fd7204915a86ed09dcddf429a Source-Link: https://github.com/googleapis/googleapis-gen/commit/ca893ff8af25fc7fe001de1405a517d80446ecca Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2E4OTNmZjhhZjI1ZmM3ZmUwMDFkZTE0MDVhNTE3ZDgwNDQ2ZWNjYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: update copyright year to 2022 PiperOrigin-RevId: 431037888 Source-Link: https://github.com/googleapis/googleapis/commit/b3397f5febbf21dfc69b875ddabaf76bee765058 Source-Link: https://github.com/googleapis/googleapis-gen/commit/510b54e1cdefd53173984df16645081308fe897e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNTEwYjU0ZTFjZGVmZDUzMTczOTg0ZGYxNjY0NTA4MTMwOGZlODk3ZSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: disable Python snippetgen for bigtable PiperOrigin-RevId: 431126635 Source-Link: https://github.com/googleapis/googleapis/commit/dbfbfdb38a2f891384779a7ee31deb15adba6659 Source-Link: https://github.com/googleapis/googleapis-gen/commit/4b4a4e7fec73c60913d8b1061f0b29affb1e2a72 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGI0YTRlN2ZlYzczYzYwOTEzZDhiMTA2MWYwYjI5YWZmYjFlMmE3MiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../cloud/bigtable_admin_v2/__init__.py | 2 +- .../bigtable_admin_v2/services/__init__.py | 2 +- .../bigtable_instance_admin/__init__.py | 2 +- .../bigtable_instance_admin/async_client.py | 2 +- .../bigtable_instance_admin/client.py | 2 +- .../bigtable_instance_admin/pagers.py | 2 +- .../transports/__init__.py | 2 +- .../transports/base.py | 2 +- .../transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../services/bigtable_table_admin/__init__.py | 2 +- .../bigtable_table_admin/async_client.py | 2 +- .../services/bigtable_table_admin/client.py | 2 +- .../services/bigtable_table_admin/pagers.py | 2 +- .../transports/__init__.py | 2 +- .../bigtable_table_admin/transports/base.py | 2 +- .../bigtable_table_admin/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../cloud/bigtable_admin_v2/types/__init__.py | 2 +- .../types/bigtable_instance_admin.py | 2 +- .../types/bigtable_table_admin.py | 2 +- .../cloud/bigtable_admin_v2/types/common.py | 2 +- .../cloud/bigtable_admin_v2/types/instance.py | 2 +- .../cloud/bigtable_admin_v2/types/table.py | 2 +- .../google/cloud/bigtable_v2/__init__.py | 6 +- .../cloud/bigtable_v2/gapic_metadata.json | 10 + .../cloud/bigtable_v2/services/__init__.py | 2 +- .../bigtable_v2/services/bigtable/__init__.py | 2 +- .../services/bigtable/async_client.py | 126 +++-- .../bigtable_v2/services/bigtable/client.py | 237 +++++++-- .../services/bigtable/transports/__init__.py | 2 +- .../services/bigtable/transports/base.py | 61 +-- .../services/bigtable/transports/grpc.py | 30 +- .../bigtable/transports/grpc_asyncio.py | 32 +- .../cloud/bigtable_v2/types/__init__.py | 6 +- .../cloud/bigtable_v2/types/bigtable.py | 29 +- .../google/cloud/bigtable_v2/types/data.py | 2 +- packages/google-cloud-bigtable/owlbot.py | 2 +- .../fixup_bigtable_admin_v2_keywords.py | 2 +- .../scripts/fixup_bigtable_v2_keywords.py | 3 +- .../google-cloud-bigtable/tests/__init__.py | 2 +- .../tests/unit/__init__.py | 2 +- .../tests/unit/gapic/__init__.py | 2 +- .../unit/gapic/bigtable_admin_v2/__init__.py | 2 +- .../test_bigtable_instance_admin.py | 2 +- .../test_bigtable_table_admin.py | 2 +- .../tests/unit/gapic/bigtable_v2/__init__.py | 2 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 463 ++++++++++++------ 48 files changed, 763 insertions(+), 314 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 545000fbfce3..1b46d6215b40 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py index 4de65971c238..e8e1c3845db5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py index 5746abf02969..1fb10736ec78 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 9cc58c7eb6ac..18b6541dce97 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 697d0fd9b792..a13a4b794245 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index d220a1b26ae2..77bde77e45aa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index 0dbb19a6aa27..bf207fb3a53b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index f86569e0a0b7..b32cba7156f4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index c477ee926d71..8a0169ad00fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 97c8f1ad92f2..c1be2101a1e3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index a93a9932b084..515696537b03 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 303bf2d33a7c..bef292dc83a9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 070423018610..6ba7ca063591 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 07e82255ad08..e639227df3c9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index b1231802cb64..78a7850e4598 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index e8937e5392c7..ac5c17d0fef5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 906d6b13dc49..8cd13e806faa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 790568ceef38..7d2077f236b0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index d1e4c8f1ca45..a81a4b7ed54a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 842b0e5fe42d..131c2817778c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index b8ff4e60e1e7..ae1c6c91611b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 788671e71ca1..5615167e667d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 206cb40c414d..22c73dfddc53 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 7ced1216cd6b..b99c3a64668b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 1df0bdc5423f..d744bd53f4df 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,6 +23,8 @@ from .types.bigtable import MutateRowResponse from .types.bigtable import MutateRowsRequest from .types.bigtable import MutateRowsResponse +from .types.bigtable import PingAndWarmRequest +from .types.bigtable import PingAndWarmResponse from .types.bigtable import ReadModifyWriteRowRequest from .types.bigtable import ReadModifyWriteRowResponse from .types.bigtable import ReadRowsRequest @@ -56,6 +58,8 @@ "MutateRowsRequest", "MutateRowsResponse", "Mutation", + "PingAndWarmRequest", + "PingAndWarmResponse", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", "ReadModifyWriteRule", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json index 854c13be2936..4ceadc15181f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json @@ -25,6 +25,11 @@ "mutate_rows" ] }, + "PingAndWarm": { + "methods": [ + "ping_and_warm" + ] + }, "ReadModifyWriteRow": { "methods": [ "read_modify_write_row" @@ -60,6 +65,11 @@ "mutate_rows" ] }, + "PingAndWarm": { + "methods": [ + "ping_and_warm" + ] + }, "ReadModifyWriteRow": { "methods": [ "read_modify_write_row" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py index 4de65971c238..e8e1c3845db5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py index a79e1d780866..cfce7b6b8340 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 9db7ac1cb118..cf40edc6c7a8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -57,6 +57,8 @@ class BigtableAsyncClient: DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT + instance_path = staticmethod(BigtableClient.instance_path) + parse_instance_path = staticmethod(BigtableClient.parse_instance_path) table_path = staticmethod(BigtableClient.table_path) parse_table_path = staticmethod(BigtableClient.parse_table_path) common_billing_account_path = staticmethod( @@ -278,13 +280,6 @@ def read_rows( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.read_rows, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=43200.0, - ), default_timeout=43200.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -375,13 +370,6 @@ def sample_row_keys( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.sample_row_keys, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=60.0, - ), default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -608,13 +596,6 @@ def mutate_rows( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.mutate_rows, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=600.0, - ), default_timeout=600.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -764,13 +745,6 @@ async def check_and_mutate_row( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.check_and_mutate_row, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=20.0, - ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) @@ -789,6 +763,93 @@ async def check_and_mutate_row( # Done; return the response. return response + async def ping_and_warm( + self, + request: Union[bigtable.PingAndWarmRequest, dict] = None, + *, + name: str = None, + app_profile_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.PingAndWarmResponse: + r"""Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Args: + request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): + The request object. Request message for client + connection keep-alive and warming. + name (:class:`str`): + Required. The unique name of the instance to check + permissions for as well as respond. Values are of the + form ``projects//instances/``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.PingAndWarmResponse: + Response message for + Bigtable.PingAndWarm connection + keepalive and warming. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.PingAndWarmRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.ping_and_warm, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + async def read_modify_write_row( self, request: Union[bigtable.ReadModifyWriteRowRequest, dict] = None, @@ -889,13 +950,6 @@ async def read_modify_write_row( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.read_modify_write_row, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=20.0, - ), default_timeout=20.0, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 90a7536066f8..e221fefb5458 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -159,6 +159,19 @@ def transport(self) -> BigtableTransport: """ return self._transport + @staticmethod + def instance_path(project: str, instance: str,) -> str: + """Returns a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str, str]: + """Parses a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + @staticmethod def table_path(project: str, instance: str, table: str,) -> str: """Returns a fully-qualified table string.""" @@ -476,13 +489,22 @@ def read_rows( # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.read_rows] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) @@ -566,13 +588,22 @@ def sample_row_keys( # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.sample_row_keys] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) @@ -677,13 +708,22 @@ def mutate_row( # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.mutate_row] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) @@ -782,13 +822,22 @@ def mutate_rows( # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.mutate_rows] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) @@ -931,13 +980,118 @@ def check_and_mutate_row( # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.check_and_mutate_row] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def ping_and_warm( + self, + request: Union[bigtable.PingAndWarmRequest, dict] = None, + *, + name: str = None, + app_profile_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.PingAndWarmResponse: + r"""Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Args: + request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): + The request object. Request message for client + connection keep-alive and warming. + name (str): + Required. The unique name of the instance to check + permissions for as well as respond. Values are of the + form ``projects//instances/``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.PingAndWarmResponse: + Response message for + Bigtable.PingAndWarm connection + keepalive and warming. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.PingAndWarmRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.PingAndWarmRequest): + request = bigtable.PingAndWarmRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.ping_and_warm] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) @@ -1049,13 +1203,22 @@ def read_modify_write_row( # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.read_modify_write_row] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index ba1f2b88e8d9..67a9abdf9862 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index bb727d67e271..1ced17ddc35b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -126,28 +126,10 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.read_rows: gapic_v1.method.wrap_method( - self.read_rows, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=43200.0, - ), - default_timeout=43200.0, - client_info=client_info, + self.read_rows, default_timeout=43200.0, client_info=client_info, ), self.sample_row_keys: gapic_v1.method.wrap_method( - self.sample_row_keys, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, + self.sample_row_keys, default_timeout=60.0, client_info=client_info, ), self.mutate_row: gapic_v1.method.wrap_method( self.mutate_row, @@ -165,38 +147,18 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.mutate_rows: gapic_v1.method.wrap_method( - self.mutate_rows, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=600.0, - ), - default_timeout=600.0, - client_info=client_info, + self.mutate_rows, default_timeout=600.0, client_info=client_info, ), self.check_and_mutate_row: gapic_v1.method.wrap_method( self.check_and_mutate_row, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=20.0, - ), default_timeout=20.0, client_info=client_info, ), + self.ping_and_warm: gapic_v1.method.wrap_method( + self.ping_and_warm, default_timeout=None, client_info=client_info, + ), self.read_modify_write_row: gapic_v1.method.wrap_method( self.read_modify_write_row, - default_retry=retries.Retry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type(), - deadline=20.0, - ), default_timeout=20.0, client_info=client_info, ), @@ -261,6 +223,15 @@ def check_and_mutate_row( ]: raise NotImplementedError() + @property + def ping_and_warm( + self, + ) -> Callable[ + [bigtable.PingAndWarmRequest], + Union[bigtable.PingAndWarmResponse, Awaitable[bigtable.PingAndWarmResponse]], + ]: + raise NotImplementedError() + @property def read_modify_write_row( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 78b2215ffb69..089cab726f90 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -374,6 +374,34 @@ def check_and_mutate_row( ) return self._stubs["check_and_mutate_row"] + @property + def ping_and_warm( + self, + ) -> Callable[[bigtable.PingAndWarmRequest], bigtable.PingAndWarmResponse]: + r"""Return a callable for the ping and warm method over gRPC. + + Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Returns: + Callable[[~.PingAndWarmRequest], + ~.PingAndWarmResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "ping_and_warm" not in self._stubs: + self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/PingAndWarm", + request_serializer=bigtable.PingAndWarmRequest.serialize, + response_deserializer=bigtable.PingAndWarmResponse.deserialize, + ) + return self._stubs["ping_and_warm"] + @property def read_modify_write_row( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index aa3b80f13139..4099e7bd77d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -379,6 +379,36 @@ def check_and_mutate_row( ) return self._stubs["check_and_mutate_row"] + @property + def ping_and_warm( + self, + ) -> Callable[ + [bigtable.PingAndWarmRequest], Awaitable[bigtable.PingAndWarmResponse] + ]: + r"""Return a callable for the ping and warm method over gRPC. + + Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Returns: + Callable[[~.PingAndWarmRequest], + Awaitable[~.PingAndWarmResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "ping_and_warm" not in self._stubs: + self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + "/google.bigtable.v2.Bigtable/PingAndWarm", + request_serializer=bigtable.PingAndWarmRequest.serialize, + response_deserializer=bigtable.PingAndWarmResponse.deserialize, + ) + return self._stubs["ping_and_warm"] + @property def read_modify_write_row( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index 4c15b6742e5f..401705715b51 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,6 +20,8 @@ MutateRowResponse, MutateRowsRequest, MutateRowsResponse, + PingAndWarmRequest, + PingAndWarmResponse, ReadModifyWriteRowRequest, ReadModifyWriteRowResponse, ReadRowsRequest, @@ -49,6 +51,8 @@ "MutateRowResponse", "MutateRowsRequest", "MutateRowsResponse", + "PingAndWarmRequest", + "PingAndWarmResponse", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", "ReadRowsRequest", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 956eeca5c164..58fc457038ee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,6 +33,8 @@ "MutateRowsResponse", "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", + "PingAndWarmRequest", + "PingAndWarmResponse", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", }, @@ -398,6 +400,31 @@ class CheckAndMutateRowResponse(proto.Message): predicate_matched = proto.Field(proto.BOOL, number=1,) +class PingAndWarmRequest(proto.Message): + r"""Request message for client connection keep-alive and warming. + + Attributes: + name (str): + Required. The unique name of the instance to check + permissions for as well as respond. Values are of the form + ``projects//instances/``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + """ + + name = proto.Field(proto.STRING, number=1,) + app_profile_id = proto.Field(proto.STRING, number=2,) + + +class PingAndWarmResponse(proto.Message): + r"""Response message for Bigtable.PingAndWarm connection + keepalive and warming. + + """ + + class ReadModifyWriteRowRequest(proto.Message): r"""Request message for Bigtable.ReadModifyWriteRow. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 7cd74b0471fc..c81358f07369 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index a5e1b09de435..015670675a29 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -181,7 +181,7 @@ def lint_setup_py(session): bad_clusters_typing = r""" clusters: Sequence\[ bigtable_instance_admin\.CreateInstanceRequest\.ClustersEntry - \] = None,""" + \] \= None,""" good_clusters_typing = """ clusters: Dict[str, gba_instance.Cluster] = None,""" diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index a837ad2927df..6d5bc00f43c6 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 4b32d617456c..7459d0806c48 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ class bigtableCallTransformer(cst.CSTTransformer): 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), + 'ping_and_warm': ('name', 'app_profile_id', ), 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), 'sample_row_keys': ('table_name', 'app_profile_id', ), diff --git a/packages/google-cloud-bigtable/tests/__init__.py b/packages/google-cloud-bigtable/tests/__init__.py index 4de65971c238..e8e1c3845db5 100644 --- a/packages/google-cloud-bigtable/tests/__init__.py +++ b/packages/google-cloud-bigtable/tests/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/__init__.py b/packages/google-cloud-bigtable/tests/unit/__init__.py index 4de65971c238..e8e1c3845db5 100644 --- a/packages/google-cloud-bigtable/tests/unit/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py index 4de65971c238..e8e1c3845db5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py index 4de65971c238..e8e1c3845db5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index bf5b3e9e5be2..76df253aa812 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 49d2c9ddfc3a..439b62113b8d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py index 4de65971c238..e8e1c3845db5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 19868b14e614..f8d18608849d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -676,14 +676,14 @@ async def test_read_rows_async_from_dict(): await test_read_rows_async(request_type=dict) -def test_read_rows_field_headers(): +def test_read_rows_routing_parameters(): client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest() - - request.table_name = "table_name/value" + request = bigtable.ReadRowsRequest( + {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: @@ -695,37 +695,26 @@ def test_read_rows_field_headers(): _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_read_rows_field_headers_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) - + # This test doesn't assert anything useful. + assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest() - - request.table_name = "table_name/value" + request = bigtable.ReadRowsRequest({"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.ReadRowsResponse()] - ) - await client.read_rows(request) + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_read_rows_flattened(): @@ -885,14 +874,14 @@ async def test_sample_row_keys_async_from_dict(): await test_sample_row_keys_async(request_type=dict) -def test_sample_row_keys_field_headers(): +def test_sample_row_keys_routing_parameters(): client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest() - - request.table_name = "table_name/value" + request = bigtable.SampleRowKeysRequest( + {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: @@ -904,37 +893,26 @@ def test_sample_row_keys_field_headers(): _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_sample_row_keys_field_headers_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) - + # This test doesn't assert anything useful. + assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest() - - request.table_name = "table_name/value" + request = bigtable.SampleRowKeysRequest({"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.SampleRowKeysResponse()] - ) - await client.sample_row_keys(request) + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_sample_row_keys_flattened(): @@ -1091,14 +1069,14 @@ async def test_mutate_row_async_from_dict(): await test_mutate_row_async(request_type=dict) -def test_mutate_row_field_headers(): +def test_mutate_row_routing_parameters(): client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest() - - request.table_name = "table_name/value" + request = bigtable.MutateRowRequest( + {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: @@ -1110,36 +1088,26 @@ def test_mutate_row_field_headers(): _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_mutate_row_field_headers_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) - + # This test doesn't assert anything useful. + assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest() - - request.table_name = "table_name/value" + request = bigtable.MutateRowRequest({"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.MutateRowResponse() - ) - await client.mutate_row(request) + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_mutate_row_flattened(): @@ -1347,14 +1315,14 @@ async def test_mutate_rows_async_from_dict(): await test_mutate_rows_async(request_type=dict) -def test_mutate_rows_field_headers(): +def test_mutate_rows_routing_parameters(): client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest() - - request.table_name = "table_name/value" + request = bigtable.MutateRowsRequest( + {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: @@ -1366,37 +1334,26 @@ def test_mutate_rows_field_headers(): _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_mutate_rows_field_headers_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) - + # This test doesn't assert anything useful. + assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest() - - request.table_name = "table_name/value" + request = bigtable.MutateRowsRequest({"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.MutateRowsResponse()] - ) - await client.mutate_rows(request) + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_mutate_rows_flattened(): @@ -1573,14 +1530,14 @@ async def test_check_and_mutate_row_async_from_dict(): await test_check_and_mutate_row_async(request_type=dict) -def test_check_and_mutate_row_field_headers(): +def test_check_and_mutate_row_routing_parameters(): client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest() - - request.table_name = "table_name/value" + request = bigtable.CheckAndMutateRowRequest( + {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1594,38 +1551,28 @@ def test_check_and_mutate_row_field_headers(): _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_check_and_mutate_row_field_headers_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) - + # This test doesn't assert anything useful. + assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest() - - request.table_name = "table_name/value" + request = bigtable.CheckAndMutateRowRequest({"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.check_and_mutate_row), "__call__" ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.CheckAndMutateRowResponse() - ) - await client.check_and_mutate_row(request) + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_check_and_mutate_row_flattened(): @@ -1860,6 +1807,203 @@ async def test_check_and_mutate_row_flattened_error_async(): ) +@pytest.mark.parametrize("request_type", [bigtable.PingAndWarmRequest, dict,]) +def test_ping_and_warm(request_type, transport: str = "grpc"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PingAndWarmResponse() + response = client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) + + +def test_ping_and_warm_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + client.ping_and_warm() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest() + + +@pytest.mark.asyncio +async def test_ping_and_warm_async( + transport: str = "grpc_asyncio", request_type=bigtable.PingAndWarmRequest +): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + response = await client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) + + +@pytest.mark.asyncio +async def test_ping_and_warm_async_from_dict(): + await test_ping_and_warm_async(request_type=dict) + + +def test_ping_and_warm_routing_parameters(): + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.PingAndWarmRequest( + {"name": "projects/sample1/instances/sample2"} + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.PingAndWarmRequest({"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] + + +def test_ping_and_warm_flattened(): + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PingAndWarmResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.ping_and_warm( + name="name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +def test_ping_and_warm_flattened_error(): + client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name="name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_ping_and_warm_flattened_async(): + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PingAndWarmResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.ping_and_warm( + name="name_value", app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_ping_and_warm_flattened_error_async(): + client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name="name_value", + app_profile_id="app_profile_id_value", + ) + + @pytest.mark.parametrize("request_type", [bigtable.ReadModifyWriteRowRequest, dict,]) def test_read_modify_write_row(request_type, transport: str = "grpc"): client = BigtableClient( @@ -1940,14 +2084,14 @@ async def test_read_modify_write_row_async_from_dict(): await test_read_modify_write_row_async(request_type=dict) -def test_read_modify_write_row_field_headers(): +def test_read_modify_write_row_routing_parameters(): client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest() - - request.table_name = "table_name/value" + request = bigtable.ReadModifyWriteRowRequest( + {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1961,38 +2105,28 @@ def test_read_modify_write_row_field_headers(): _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] - - -@pytest.mark.asyncio -async def test_read_modify_write_row_field_headers_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) - + # This test doesn't assert anything useful. + assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest() - - request.table_name = "table_name/value" + request = bigtable.ReadModifyWriteRowRequest({"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.read_modify_write_row), "__call__" ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.ReadModifyWriteRowResponse() - ) - await client.read_modify_write_row(request) + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request - # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "table_name=table_name/value",) in kw["metadata"] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_read_modify_write_row_flattened(): @@ -2219,6 +2353,7 @@ def test_bigtable_base_transport(): "mutate_row", "mutate_rows", "check_and_mutate_row", + "ping_and_warm", "read_modify_write_row", ) for method in methods: @@ -2530,10 +2665,32 @@ def test_bigtable_transport_channel_mtls_with_adc(transport_class): assert transport.grpc_channel == mock_grpc_channel -def test_table_path(): +def test_instance_path(): project = "squid" instance = "clam" - table = "whelk" + expected = "projects/{project}/instances/{instance}".format( + project=project, instance=instance, + ) + actual = BigtableClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "whelk", + "instance": "octopus", + } + path = BigtableClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_instance_path(path) + assert expected == actual + + +def test_table_path(): + project = "oyster" + instance = "nudibranch" + table = "cuttlefish" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, table=table, ) @@ -2543,9 +2700,9 @@ def test_table_path(): def test_parse_table_path(): expected = { - "project": "octopus", - "instance": "oyster", - "table": "nudibranch", + "project": "mussel", + "instance": "winkle", + "table": "nautilus", } path = BigtableClient.table_path(**expected) @@ -2555,7 +2712,7 @@ def test_parse_table_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -2565,7 +2722,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "abalone", } path = BigtableClient.common_billing_account_path(**expected) @@ -2575,7 +2732,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "squid" expected = "folders/{folder}".format(folder=folder,) actual = BigtableClient.common_folder_path(folder) assert expected == actual @@ -2583,7 +2740,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "clam", } path = BigtableClient.common_folder_path(**expected) @@ -2593,7 +2750,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "whelk" expected = "organizations/{organization}".format(organization=organization,) actual = BigtableClient.common_organization_path(organization) assert expected == actual @@ -2601,7 +2758,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "octopus", } path = BigtableClient.common_organization_path(**expected) @@ -2611,7 +2768,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "oyster" expected = "projects/{project}".format(project=project,) actual = BigtableClient.common_project_path(project) assert expected == actual @@ -2619,7 +2776,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nudibranch", } path = BigtableClient.common_project_path(**expected) @@ -2629,8 +2786,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -2640,8 +2797,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "winkle", + "location": "nautilus", } path = BigtableClient.common_location_path(**expected) From 8278335c3dbfdb9ac1189aef044b2b6859240152 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 28 Feb 2022 11:37:17 -0800 Subject: [PATCH 568/892] chore(main): release 2.6.0 (#517) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 5733ebd366d0..62c060c69971 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.6.0](https://github.com/googleapis/python-bigtable/compare/v2.5.2...v2.6.0) (2022-02-26) + + +### Features + +* add WarmAndPing request for channel priming ([#504](https://github.com/googleapis/python-bigtable/issues/504)) ([df5fc1f](https://github.com/googleapis/python-bigtable/commit/df5fc1f7d6ded88d9bce67f7cc6989981745931f)) + ### [2.5.2](https://github.com/googleapis/python-bigtable/compare/v2.5.1...v2.5.2) (2022-02-24) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b48340a8ab8e..ffedd10a7b0b 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.5.2" +version = "2.6.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From c08ffc7549d56b2425ee560120da66bea2fa900c Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 1 Mar 2022 12:12:17 +0000 Subject: [PATCH 569/892] chore(deps): update actions/setup-python action to v3 (#520) Source-Link: https://github.com/googleapis/synthtool/commit/571ee2c3b26182429eddcf115122ee545d7d3787 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:660abdf857d3ab9aabcd967c163c70e657fcc5653595c709263af5f3fa23ef67 --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.github/workflows/docs.yml | 4 ++-- packages/google-cloud-bigtable/.github/workflows/lint.yml | 2 +- packages/google-cloud-bigtable/.github/workflows/unittest.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 8cb43804d999..d9a55fa405e8 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:ed1f9983d5a935a89fe8085e8bb97d94e41015252c5b6c9771257cf8624367e6 + digest: sha256:660abdf857d3ab9aabcd967c163c70e657fcc5653595c709263af5f3fa23ef67 diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml index f7b8344c4500..cca4e98bf236 100644 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -10,7 +10,7 @@ jobs: - name: Checkout uses: actions/checkout@v2 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: "3.10" - name: Install nox @@ -26,7 +26,7 @@ jobs: - name: Checkout uses: actions/checkout@v2 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: "3.10" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml index 1e8b05c3d7ff..f687324ef2eb 100644 --- a/packages/google-cloud-bigtable/.github/workflows/lint.yml +++ b/packages/google-cloud-bigtable/.github/workflows/lint.yml @@ -10,7 +10,7 @@ jobs: - name: Checkout uses: actions/checkout@v2 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: "3.10" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 074ee2504ca5..d3003e09e0c6 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -13,7 +13,7 @@ jobs: - name: Checkout uses: actions/checkout@v2 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python }} - name: Install nox @@ -39,7 +39,7 @@ jobs: - name: Checkout uses: actions/checkout@v2 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: "3.10" - name: Install coverage From ac16f07d1bc326d9dd11349271204bc91360bbd5 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 1 Mar 2022 23:16:42 +0100 Subject: [PATCH 570/892] chore(deps): update all dependencies (#519) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/workflows/mypy.yml | 4 ++-- .../.github/workflows/system_emulated.yml | 4 ++-- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 11 files changed, 14 insertions(+), 14 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/mypy.yml b/packages/google-cloud-bigtable/.github/workflows/mypy.yml index 5a0f0e090d69..f9f07f4de171 100644 --- a/packages/google-cloud-bigtable/.github/workflows/mypy.yml +++ b/packages/google-cloud-bigtable/.github/workflows/mypy.yml @@ -8,9 +8,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: "3.8" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 8e6c0cfcf0b2..c974d6b113d5 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -12,10 +12,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: '3.8' diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 4aed9a55d7d0..2bd731ab787a 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.36.0 -google-cloud-bigtable==2.5.2 +google-cloud-bigtable==2.6.0 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index f3158ef18938..117a6e939b73 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.2 +google-cloud-bigtable==2.6.0 google-cloud-core==2.2.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index b3fa08aa0f40..1877e7a06de5 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.2 +google-cloud-bigtable==2.6.0 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index ffb371b56454..20596a095a2e 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.2 -google-cloud-monitoring==2.8.0 +google-cloud-bigtable==2.6.0 +google-cloud-monitoring==2.9.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 307f5ffa4f10..89f83eb341b2 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.2 +google-cloud-bigtable==2.6.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 711d7d2cdc3d..7f67da9d1c77 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.2 +google-cloud-bigtable==2.6.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 711d7d2cdc3d..7f67da9d1c77 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.5.2 +google-cloud-bigtable==2.6.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 0ed46fa412f5..b523e0991b63 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.2 \ No newline at end of file +google-cloud-bigtable==2.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 307f5ffa4f10..89f83eb341b2 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.5.2 +google-cloud-bigtable==2.6.0 From 20a485b2d89154f1c7776aad49fd3649bb0994d2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 3 Mar 2022 00:22:37 +0000 Subject: [PATCH 571/892] chore(deps): update actions/checkout action to v3 (#522) Source-Link: https://github.com/googleapis/synthtool/commit/ca879097772aeec2cbb971c3cea8ecc81522b68a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:6162c384d685c5fe22521d3f37f6fc732bf99a085f6d47b677dbcae97fc21392 --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.github/workflows/docs.yml | 4 ++-- packages/google-cloud-bigtable/.github/workflows/lint.yml | 2 +- packages/google-cloud-bigtable/.github/workflows/unittest.yml | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index d9a55fa405e8..480226ac08a9 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:660abdf857d3ab9aabcd967c163c70e657fcc5653595c709263af5f3fa23ef67 + digest: sha256:6162c384d685c5fe22521d3f37f6fc732bf99a085f6d47b677dbcae97fc21392 diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml index cca4e98bf236..b46d7305d8cf 100644 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python uses: actions/setup-python@v3 with: @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python uses: actions/setup-python@v3 with: diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml index f687324ef2eb..f512a4960beb 100644 --- a/packages/google-cloud-bigtable/.github/workflows/lint.yml +++ b/packages/google-cloud-bigtable/.github/workflows/lint.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python uses: actions/setup-python@v3 with: diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index d3003e09e0c6..e87fe5b7b79a 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -11,7 +11,7 @@ jobs: python: ['3.6', '3.7', '3.8', '3.9', '3.10'] steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python uses: actions/setup-python@v3 with: @@ -37,7 +37,7 @@ jobs: - unit steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Python uses: actions/setup-python@v3 with: From 58c679bff7deb4c5bd1772c151fd340f9e62258d Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Thu, 3 Mar 2022 14:55:34 -0500 Subject: [PATCH 572/892] fix(deps): require google-api-core>=1.31.5, >=2.3.2 (#526) fix(deps): require proto-plus>=1.15.0 --- packages/google-cloud-bigtable/setup.py | 4 ++-- packages/google-cloud-bigtable/testing/constraints-3.6.txt | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index ffedd10a7b0b..a3f51e45019f 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -32,13 +32,13 @@ # NOTE: Maintainers, please do not require google-api-core>=2.x.x # Until this issue is closed # https://github.com/googleapis/google-cloud-python/issues/10566 - "google-api-core[grpc] >= 1.28.0, <3.0.0dev", + "google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0", # NOTE: Maintainers, please do not require google-api-core>=2.x.x # Until this issue is closed # https://github.com/googleapis/google-cloud-python/issues/10566 "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", - "proto-plus >= 1.13.0", + "proto-plus >= 1.15.0", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.6.txt b/packages/google-cloud-bigtable/testing/constraints-3.6.txt index 1e50717bfda5..3d010787d846 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.6.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.6.txt @@ -5,8 +5,8 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==1.28.0 +google-api-core==1.31.5 google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.3 -proto-plus==1.13.0 +proto-plus==1.15.0 libcst==0.2.5 From 4b0be76a1465aa6ac0ca012f9cb8371bb405bc68 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Fri, 4 Mar 2022 08:16:11 -0800 Subject: [PATCH 573/892] feat: Add support for autoscaling (#509) * feat: Add support for autoscaling - Add the parameters min_serve_nodes, max_serve_nodes, and cpu_utilization_percent - Create disable_autoscaling function - Update documentation and tests - Add validation when scaling config was not set correctly. --- .../google-cloud-bigtable/docs/snippets.py | 19 + .../google/cloud/bigtable/cluster.py | 176 +++++- .../google/cloud/bigtable/instance.py | 15 + .../tests/system/conftest.py | 33 ++ .../tests/system/test_instance_admin.py | 138 +++++ .../tests/unit/test_cluster.py | 511 +++++++++++++++++- 6 files changed, 877 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index eeb39c3bb32c..ee5490afea2f 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -401,6 +401,25 @@ def test_bigtable_update_cluster(): assert cluster.serve_nodes == 4 +def test_bigtable_cluster_disable_autoscaling(): + # [START bigtable_api_cluster_disable_autoscaling] + from google.cloud.bigtable import Client + + client = Client(admin=True) + instance = client.instance(INSTANCE_ID) + # Create a cluster with autoscaling enabled + cluster = instance.cluster( + CLUSTER_ID, min_serve_nodes=1, max_serve_nodes=2, cpu_utilization_percent=10 + ) + instance.create(clusters=[cluster]) + + # Disable autoscaling + cluster.disable_autoscaling(serve_nodes=4) + # [END bigtable_api_cluster_disable_autoscaling] + + assert cluster.serve_nodes == 4 + + def test_bigtable_create_table(): # [START bigtable_api_create_table] from google.api_core import exceptions diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index f3e79c6c2e9f..1d0af2c693ce 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -18,6 +18,7 @@ import re from google.cloud.bigtable_admin_v2.types import instance from google.api_core.exceptions import NotFound +from google.protobuf import field_mask_pb2 _CLUSTER_NAME_RE = re.compile( @@ -36,6 +37,7 @@ class Cluster(object): * :meth:`create` itself * :meth:`update` itself * :meth:`delete` itself + * :meth:`disable_autoscaling` itself :type cluster_id: str :param cluster_id: The ID of the cluster. @@ -52,7 +54,9 @@ class Cluster(object): https://cloud.google.com/bigtable/docs/locations :type serve_nodes: int - :param serve_nodes: (Optional) The number of nodes in the cluster. + :param serve_nodes: (Optional) The number of nodes in the cluster for manual scaling. If any of the + autoscaling configuration are specified, then the autoscaling + configuration will take precedent. :type default_storage_type: int :param default_storage_type: (Optional) The type of storage @@ -85,6 +89,27 @@ class Cluster(object): :data:`google.cloud.bigtable.enums.Cluster.State.CREATING`. :data:`google.cloud.bigtable.enums.Cluster.State.RESIZING`. :data:`google.cloud.bigtable.enums.Cluster.State.DISABLED`. + + :type min_serve_nodes: int + :param min_serve_nodes: (Optional) The minimum number of nodes to be set in the cluster for autoscaling. + Must be 1 or greater. + If specified, this configuration takes precedence over + ``serve_nodes``. + If specified, then + ``max_serve_nodes`` and ``cpu_utilization_percent`` must be + specified too. + + :type max_serve_nodes: int + :param max_serve_nodes: (Optional) The maximum number of nodes to be set in the cluster for autoscaling. + If specified, this configuration + takes precedence over ``serve_nodes``. If specified, then + ``min_serve_nodes`` and ``cpu_utilization_percent`` must be + specified too. + + :param cpu_utilization_percent: (Optional) The CPU utilization target for the cluster's workload for autoscaling. + If specified, this configuration takes precedence over ``serve_nodes``. If specified, then + ``min_serve_nodes`` and ``max_serve_nodes`` must be + specified too. """ def __init__( @@ -96,6 +121,9 @@ def __init__( default_storage_type=None, kms_key_name=None, _state=None, + min_serve_nodes=None, + max_serve_nodes=None, + cpu_utilization_percent=None, ): self.cluster_id = cluster_id self._instance = instance @@ -104,10 +132,13 @@ def __init__( self.default_storage_type = default_storage_type self._kms_key_name = kms_key_name self._state = _state + self.min_serve_nodes = min_serve_nodes + self.max_serve_nodes = max_serve_nodes + self.cpu_utilization_percent = cpu_utilization_percent @classmethod def from_pb(cls, cluster_pb, instance): - """Creates an cluster instance from a protobuf. + """Creates a cluster instance from a protobuf. For example: @@ -159,6 +190,17 @@ def _update_from_pb(self, cluster_pb): self.location_id = cluster_pb.location.split("/")[-1] self.serve_nodes = cluster_pb.serve_nodes + + self.min_serve_nodes = ( + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes + ) + self.max_serve_nodes = ( + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes + ) + self.cpu_utilization_percent = ( + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent + ) + self.default_storage_type = cluster_pb.default_storage_type if cluster_pb.encryption_config: self._kms_key_name = cluster_pb.encryption_config.kms_key_name @@ -211,6 +253,42 @@ def kms_key_name(self): """str: Customer managed encryption key for the cluster.""" return self._kms_key_name + def _validate_scaling_config(self): + """Validate auto/manual scaling configuration before creating or updating.""" + + if ( + not self.serve_nodes + and not self.min_serve_nodes + and not self.max_serve_nodes + and not self.cpu_utilization_percent + ): + raise ValueError( + "Must specify either serve_nodes or all of the autoscaling configurations (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)." + ) + if self.serve_nodes and ( + self.max_serve_nodes or self.min_serve_nodes or self.cpu_utilization_percent + ): + raise ValueError( + "Cannot specify both serve_nodes and autoscaling configurations (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)." + ) + if ( + ( + self.min_serve_nodes + and (not self.max_serve_nodes or not self.cpu_utilization_percent) + ) + or ( + self.max_serve_nodes + and (not self.min_serve_nodes or not self.cpu_utilization_percent) + ) + or ( + self.cpu_utilization_percent + and (not self.min_serve_nodes or not self.max_serve_nodes) + ) + ): + raise ValueError( + "All of autoscaling configurations must be specified at the same time (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)." + ) + def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented @@ -290,7 +368,15 @@ def create(self): :rtype: :class:`~google.api_core.operation.Operation` :returns: The long-running operation corresponding to the create operation. + + :raises: :class:`ValueError ` if the both ``serve_nodes`` and autoscaling configurations + are set at the same time or if none of the ``serve_nodes`` or autoscaling configurations are set + or if the autoscaling configurations are only partially set. + """ + + self._validate_scaling_config() + client = self._instance._client cluster_pb = self._to_pb() @@ -323,20 +409,73 @@ def update(self): before calling :meth:`update`. + If autoscaling is already enabled, manual scaling will be silently ignored. + To disable autoscaling and enable manual scaling, use the :meth:`disable_autoscaling` instead. + :rtype: :class:`Operation` :returns: The long-running operation corresponding to the update operation. + """ + client = self._instance._client - # We are passing `None` for third argument location. - # Location is set only at the time of creation of a cluster - # and can not be changed after cluster has been created. - return client.instance_admin_client.update_cluster( - request={ - "serve_nodes": self.serve_nodes, - "name": self.name, - "location": None, - } + + update_mask_pb = field_mask_pb2.FieldMask() + + if self.serve_nodes: + update_mask_pb.paths.append("serve_nodes") + + if self.min_serve_nodes: + update_mask_pb.paths.append( + "cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes" + ) + if self.max_serve_nodes: + update_mask_pb.paths.append( + "cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes" + ) + if self.cpu_utilization_percent: + update_mask_pb.paths.append( + "cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent" + ) + + cluster_pb = self._to_pb() + cluster_pb.name = self.name + + return client.instance_admin_client.partial_update_cluster( + request={"cluster": cluster_pb, "update_mask": update_mask_pb} + ) + + def disable_autoscaling(self, serve_nodes): + """ + Disable autoscaling by specifying the number of nodes. + + For example: + + .. literalinclude:: snippets.py + :start-after: [START bigtable_api_cluster_disable_autoscaling] + :end-before: [END bigtable_api_cluster_disable_autoscaling] + :dedent: 4 + + :type serve_nodes: int + :param serve_nodes: The number of nodes in the cluster. + """ + + client = self._instance._client + + update_mask_pb = field_mask_pb2.FieldMask() + + self.serve_nodes = serve_nodes + self.min_serve_nodes = 0 + self.max_serve_nodes = 0 + self.cpu_utilization_percent = 0 + + update_mask_pb.paths.append("serve_nodes") + update_mask_pb.paths.append("cluster_config.cluster_autoscaling_config") + cluster_pb = self._to_pb() + cluster_pb.name = self.name + + return client.instance_admin_client.partial_update_cluster( + request={"cluster": cluster_pb, "update_mask": update_mask_pb} ) def delete(self): @@ -375,6 +514,7 @@ def _to_pb(self): location = client.instance_admin_client.common_location_path( client.project, self.location_id ) + cluster_pb = instance.Cluster( location=location, serve_nodes=self.serve_nodes, @@ -384,4 +524,18 @@ def _to_pb(self): cluster_pb.encryption_config = instance.Cluster.EncryptionConfig( kms_key_name=self._kms_key_name, ) + + if self.min_serve_nodes: + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes = ( + self.min_serve_nodes + ) + if self.max_serve_nodes: + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes = ( + self.max_serve_nodes + ) + if self.cpu_utilization_percent: + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent = ( + self.cpu_utilization_percent + ) + return cluster_pb diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 9c22aaa79b53..e838ec9adfd4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -228,6 +228,9 @@ def create( serve_nodes=None, default_storage_type=None, clusters=None, + min_serve_nodes=None, + max_serve_nodes=None, + cpu_utilization_percent=None, ): """Create this instance. @@ -303,12 +306,18 @@ def create( location_id=location_id, serve_nodes=serve_nodes, default_storage_type=default_storage_type, + min_serve_nodes=None, + max_serve_nodes=None, + cpu_utilization_percent=None, ) ] elif ( location_id is not None or serve_nodes is not None or default_storage_type is not None + or min_serve_nodes is not None + or max_serve_nodes is not None + or cpu_utilization_percent is not None ): raise ValueError( "clusters and one of location_id, serve_nodes, \ @@ -546,6 +555,9 @@ def cluster( serve_nodes=None, default_storage_type=None, kms_key_name=None, + min_serve_nodes=None, + max_serve_nodes=None, + cpu_utilization_percent=None, ): """Factory to create a cluster associated with this instance. @@ -605,6 +617,9 @@ def cluster( serve_nodes=serve_nodes, default_storage_type=default_storage_type, kms_key_name=kms_key_name, + min_serve_nodes=min_serve_nodes, + max_serve_nodes=max_serve_nodes, + cpu_utilization_percent=cpu_utilization_percent, ) def list_clusters(self): diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index 6f6cdc2d1a95..fdf111a53d93 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -107,6 +107,24 @@ def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): ) +@pytest.fixture(scope="session") +def admin_cluster_with_autoscaling( + admin_instance, + admin_cluster_id, + location_id, + min_serve_nodes, + max_serve_nodes, + cpu_utilization_percent, +): + return admin_instance.cluster( + admin_cluster_id, + location_id=location_id, + min_serve_nodes=min_serve_nodes, + max_serve_nodes=max_serve_nodes, + cpu_utilization_percent=cpu_utilization_percent, + ) + + @pytest.fixture(scope="session") def admin_instance_populated(admin_instance, admin_cluster, in_emulator): # Emulator does not support instance admin operations (create / delete). @@ -170,3 +188,18 @@ def instances_to_delete(): for instance in instances_to_delete: _helpers.retry_429(instance.delete)() + + +@pytest.fixture(scope="session") +def min_serve_nodes(in_emulator): + return 1 + + +@pytest.fixture(scope="session") +def max_serve_nodes(in_emulator): + return 8 + + +@pytest.fixture(scope="session") +def cpu_utilization_percent(in_emulator): + return 10 diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index c2cf21291e6e..36b61d6ddf6b 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -591,6 +591,44 @@ def test_cluster_create( assert not cluster_2.exists() +def test_cluster_create_w_autoscaling( + admin_instance_populated, admin_instance_id, skip_on_emulator, +): + alt_cluster_id = f"{admin_instance_id}-c2" + alt_location_id = "us-central1-f" + min_serve_nodes = 1 + max_serve_nodes = 8 + cpu_utilization_percent = 20 + + cluster_2 = admin_instance_populated.cluster( + alt_cluster_id, + location_id=alt_location_id, + min_serve_nodes=min_serve_nodes, + max_serve_nodes=max_serve_nodes, + cpu_utilization_percent=cpu_utilization_percent, + default_storage_type=(enums.StorageType.SSD), + ) + operation = cluster_2.create() + operation.result(timeout=60) # Ensure the operation completes. + + # Create a new object instance, reload and make sure it is the same. + alt_cluster = admin_instance_populated.cluster(alt_cluster_id) + alt_cluster.reload() + + assert cluster_2 == alt_cluster + assert cluster_2.location_id == alt_cluster.location_id + assert alt_cluster.state == enums.Cluster.State.READY + assert cluster_2.min_serve_nodes == alt_cluster.min_serve_nodes + assert cluster_2.max_serve_nodes == alt_cluster.max_serve_nodes + assert cluster_2.cpu_utilization_percent == alt_cluster.cpu_utilization_percent + assert cluster_2.default_storage_type == alt_cluster.default_storage_type + + # Delete the newly created cluster and confirm + assert cluster_2.exists() + cluster_2.delete() + assert not cluster_2.exists() + + def test_cluster_update( admin_instance_populated, admin_cluster_id, @@ -614,3 +652,103 @@ def test_cluster_update( admin_cluster.serve_nodes = serve_nodes operation = admin_cluster.update() operation.result(timeout=60) # Ensure the operation completes. + + +def test_cluster_update_w_autoscaling( + admin_instance_populated, + admin_cluster_id, + admin_cluster_with_autoscaling, + min_serve_nodes, + max_serve_nodes, + cpu_utilization_percent, + skip_on_emulator, +): + new_min_serve_nodes = min_serve_nodes + 1 + new_max_serve_nodes = max_serve_nodes + 1 + new_cpu_utilization_percent = cpu_utilization_percent + 10 + admin_cluster_with_autoscaling.min_serve_nodes = new_min_serve_nodes + admin_cluster_with_autoscaling.max_serve_nodes = new_max_serve_nodes + admin_cluster_with_autoscaling.cpu_utilization_percent = new_cpu_utilization_percent + + operation = admin_cluster_with_autoscaling.update() + operation.result(timeout=60) # Ensure the operation completes. + + # Create a new cluster instance and reload it. + alt_cluster = admin_instance_populated.cluster(admin_cluster_id) + alt_cluster.reload() + assert alt_cluster.min_serve_nodes == new_min_serve_nodes + assert alt_cluster.max_serve_nodes == new_max_serve_nodes + assert alt_cluster.cpu_utilization_percent == new_cpu_utilization_percent + + # Put the cluster back the way it was for the other test cases. + admin_cluster_with_autoscaling.min_serve_nodes = min_serve_nodes + admin_cluster_with_autoscaling.max_serve_nodes = max_serve_nodes + admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent + operation = admin_cluster_with_autoscaling.update() + operation.result(timeout=60) # Ensure the operation completes. + + +def test_cluster_update_w_autoscaling_partial( + admin_instance_populated, + admin_cluster_id, + admin_cluster_with_autoscaling, + min_serve_nodes, + max_serve_nodes, + cpu_utilization_percent, + skip_on_emulator, +): + new_min_serve_nodes = min_serve_nodes + 1 + + admin_cluster_with_autoscaling.min_serve_nodes = new_min_serve_nodes + + operation = admin_cluster_with_autoscaling.update() + operation.result(timeout=60) # Ensure the operation completes. + + # Create a new cluster instance and reload it. + alt_cluster = admin_instance_populated.cluster(admin_cluster_id) + alt_cluster.reload() + + # assert that only the min_serve_nodes was changed + + assert alt_cluster.min_serve_nodes == new_min_serve_nodes + assert alt_cluster.max_serve_nodes == max_serve_nodes + assert alt_cluster.cpu_utilization_percent == cpu_utilization_percent + + # Put the cluster back the way it was for the other test cases. + admin_cluster_with_autoscaling.min_serve_nodes = min_serve_nodes + admin_cluster_with_autoscaling.max_serve_nodes = max_serve_nodes + admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent + operation = admin_cluster_with_autoscaling.update() + operation.result(timeout=60) # Ensure the operation completes. + + +def test_cluster_disable_autoscaling( + admin_instance_populated, + admin_cluster_id, + admin_cluster_with_autoscaling, + serve_nodes, + min_serve_nodes, + max_serve_nodes, + cpu_utilization_percent, + skip_on_emulator, +): + operation = admin_cluster_with_autoscaling.disable_autoscaling( + serve_nodes=serve_nodes + ) + operation.result(timeout=60) # Ensure the operation completes. + + # Create a new cluster instance and reload it. + alt_cluster = admin_instance_populated.cluster(admin_cluster_id) + alt_cluster.reload() + assert alt_cluster.min_serve_nodes == 0 + assert alt_cluster.max_serve_nodes == 0 + assert alt_cluster.cpu_utilization_percent == 0 + assert alt_cluster.serve_nodes == serve_nodes + + # Put the cluster back the way it was for the other test cases. + admin_cluster_with_autoscaling.min_serve_nodes = min_serve_nodes + admin_cluster_with_autoscaling.max_serve_nodes = max_serve_nodes + admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent + admin_cluster_with_autoscaling.serve_nodes = 0 + operation = admin_cluster_with_autoscaling.update() + operation.result(timeout=60) # Ensure the operation completes. diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 74ca98830a8e..56c0a3cc57ea 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -36,6 +36,10 @@ CRYPTO_KEY_ID = "crypto-key-id" KMS_KEY_NAME = f"{LOCATION_PATH}/keyRings/{KEY_RING_ID}/cryptoKeys/{CRYPTO_KEY_ID}" +MIN_SERVE_NODES = 1 +MAX_SERVE_NODES = 8 +CPU_UTILIZATION_PERCENT = 20 + def _make_cluster(*args, **kwargs): from google.cloud.bigtable.cluster import Cluster @@ -62,6 +66,9 @@ def test_cluster_constructor_defaults(): assert cluster.serve_nodes is None assert cluster.default_storage_type is None assert cluster.kms_key_name is None + assert cluster.min_serve_nodes is None + assert cluster.max_serve_nodes is None + assert cluster.cpu_utilization_percent is None def test_cluster_constructor_explicit(): @@ -148,6 +155,9 @@ def test_cluster_from_pb_success(): assert cluster.serve_nodes == SERVE_NODES assert cluster.default_storage_type == storage_type assert cluster.kms_key_name == KMS_KEY_NAME + assert cluster.min_serve_nodes == 0 + assert cluster.max_serve_nodes == 0 + assert cluster.cpu_utilization_percent == 0 def test_cluster_from_pb_w_bad_cluster_name(): @@ -192,6 +202,53 @@ def test_cluster_from_pb_w_project_mistmatch(): Cluster.from_pb(cluster_pb, instance) +def test_cluster_from_pb_w_autoscaling(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.cluster import Cluster + from google.cloud.bigtable import enums + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + location = LOCATION_PATH + LOCATION_ID + state = enums.Cluster.State.RESIZING + storage_type = enums.StorageType.SSD + + cluster_config = data_v2_pb2.Cluster.ClusterConfig( + cluster_autoscaling_config=data_v2_pb2.Cluster.ClusterAutoscalingConfig( + autoscaling_limits=data_v2_pb2.AutoscalingLimits( + min_serve_nodes=MIN_SERVE_NODES, max_serve_nodes=MAX_SERVE_NODES, + ), + autoscaling_targets=data_v2_pb2.AutoscalingTargets( + cpu_utilization_percent=CPU_UTILIZATION_PERCENT + ), + ), + ) + cluster_pb = data_v2_pb2.Cluster( + name=CLUSTER_NAME, + location=location, + state=state, + cluster_config=cluster_config, + default_storage_type=storage_type, + encryption_config=data_v2_pb2.Cluster.EncryptionConfig( + kms_key_name=KMS_KEY_NAME, + ), + ) + + cluster = Cluster.from_pb(cluster_pb, instance) + assert isinstance(cluster, Cluster) + assert cluster._instance == instance + assert cluster.cluster_id == CLUSTER_ID + assert cluster.location_id == LOCATION_ID + assert cluster.state == state + assert cluster.serve_nodes == 0 + assert cluster.default_storage_type == storage_type + assert cluster.kms_key_name == KMS_KEY_NAME + assert cluster.min_serve_nodes == MIN_SERVE_NODES + assert cluster.max_serve_nodes == MAX_SERVE_NODES + assert cluster.cpu_utilization_percent == CPU_UTILIZATION_PERCENT + + def test_cluster___eq__(): client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) @@ -465,9 +522,77 @@ def test_cluster_create_w_cmek(): api.create_cluster.assert_called_once_with(request=expected_request) +def test_cluster_create_w_autoscaling(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable_admin_v2.types import instance as instance_v2_pb2 + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID + instance = Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + default_storage_type=STORAGE_TYPE_SSD, + min_serve_nodes=MIN_SERVE_NODES, + max_serve_nodes=MAX_SERVE_NODES, + cpu_utilization_percent=CPU_UTILIZATION_PERCENT, + ) + metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + api = client._instance_admin_client = _make_instance_admin_client() + api.common_location_path.return_value = LOCATION + api.instance_path.return_value = instance.name + api.create_cluster.return_value = response_pb + + cluster.create() + + cluster_config = instance_v2_pb2.Cluster.ClusterConfig( + cluster_autoscaling_config=instance_v2_pb2.Cluster.ClusterAutoscalingConfig( + autoscaling_limits=instance_v2_pb2.AutoscalingLimits( + min_serve_nodes=MIN_SERVE_NODES, max_serve_nodes=MAX_SERVE_NODES, + ), + autoscaling_targets=instance_v2_pb2.AutoscalingTargets( + cpu_utilization_percent=CPU_UTILIZATION_PERCENT + ), + ), + ) + expected_request_cluster = instance_v2_pb2.Cluster( + location=LOCATION, + default_storage_type=cluster.default_storage_type, + cluster_config=cluster_config, + ) + expected_request = { + "parent": instance.name, + "cluster_id": CLUSTER_ID, + "cluster": expected_request_cluster, + } + api.create_cluster.assert_called_once_with(request=expected_request) + + def test_cluster_update(): import datetime from google.longrunning import operations_pb2 + from google.protobuf import field_mask_pb2 from google.protobuf.any_pb2 import Any from google.cloud._helpers import _datetime_to_pb_timestamp from google.cloud.bigtable_admin_v2.types import ( @@ -481,6 +606,7 @@ def test_cluster_update(): credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID instance = _Instance(INSTANCE_ID, client) cluster = _make_cluster( CLUSTER_ID, @@ -503,15 +629,392 @@ def test_cluster_update(): "projects/project/instances/instance-id/clusters/cluster-id" ) api.update_cluster.return_value = response_pb + api.common_location_path.return_value = LOCATION + + cluster.update() + cluster_pb = cluster._to_pb() + cluster_pb.name = cluster.name + update_mask_pb = field_mask_pb2.FieldMask(paths=["serve_nodes"]) + + expected_request = { + "cluster": cluster_pb, + "update_mask": update_mask_pb, + } + api.partial_update_cluster.assert_called_once_with(request=expected_request) + + assert ( + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes + == 0 + ) + assert ( + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes + == 0 + ) + assert ( + cluster_pb.cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent + == 0 + ) + + +def test_cluster_update_w_autoscaling(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf import field_mask_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + default_storage_type=STORAGE_TYPE_SSD, + min_serve_nodes=2, + ) + metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + cluster.min_serve_nodes = 2 + + api = client._instance_admin_client = _make_instance_admin_client() + api.cluster_path.return_value = ( + "projects/project/instances/instance-id/clusters/cluster-id" + ) + api.update_cluster.return_value = response_pb + api.common_location_path.return_value = LOCATION cluster.update() + cluster_pb = cluster._to_pb() + cluster_pb.name = cluster.name + update_mask_pb = field_mask_pb2.FieldMask( + paths=[ + "cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes" + ] + ) expected_request = { - "name": "projects/project/instances/instance-id/clusters/cluster-id", - "serve_nodes": 5, - "location": None, + "cluster": cluster_pb, + "update_mask": update_mask_pb, } - api.update_cluster.assert_called_once_with(request=expected_request) + api.partial_update_cluster.assert_called_once_with(request=expected_request) + + +def test_cluster_update_w_partial_autoscaling_config(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf import field_mask_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID + instance = _Instance(INSTANCE_ID, client) + + cluster_config = [ + {"min_serve_nodes": MIN_SERVE_NODES}, + {"max_serve_nodes": MAX_SERVE_NODES}, + {"cpu_utilization_percent": CPU_UTILIZATION_PERCENT}, + { + "min_serve_nodes": MIN_SERVE_NODES, + "cpu_utilization_percent": CPU_UTILIZATION_PERCENT, + }, + {"min_serve_nodes": MIN_SERVE_NODES, "max_serve_nodes": MAX_SERVE_NODES}, + { + "max_serve_nodes": MAX_SERVE_NODES, + "cpu_utilization_percent": CPU_UTILIZATION_PERCENT, + }, + ] + for config in cluster_config: + + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + default_storage_type=STORAGE_TYPE_SSD, + **config, + ) + metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + api = client._instance_admin_client = _make_instance_admin_client() + api.cluster_path.return_value = ( + "projects/project/instances/instance-id/clusters/cluster-id" + ) + api.update_cluster.return_value = response_pb + api.common_location_path.return_value = LOCATION + + cluster.update() + cluster_pb = cluster._to_pb() + cluster_pb.name = cluster.name + + expected_paths = [] + for key, _ in config.items(): + if key == "min_serve_nodes": + expected_paths.append( + "cluster_config.cluster_autoscaling_config.autoscaling_limits.min_serve_nodes" + ) + if key == "max_serve_nodes": + expected_paths.append( + "cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes" + ) + if key == "cpu_utilization_percent": + expected_paths.append( + "cluster_config.cluster_autoscaling_config.autoscaling_targets.cpu_utilization_percent" + ) + update_mask_pb = field_mask_pb2.FieldMask(paths=expected_paths) + + expected_request = { + "cluster": cluster_pb, + "update_mask": update_mask_pb, + } + api.partial_update_cluster.assert_called_once_with(request=expected_request) + + +def test_cluster_update_w_both_manual_and_autoscaling(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf import field_mask_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID + instance = _Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + default_storage_type=STORAGE_TYPE_SSD, + ) + cluster.max_serve_nodes = 2 + cluster.serve_nodes = SERVE_NODES + metadata = messages_v2_pb2.UpdateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + api = client._instance_admin_client = _make_instance_admin_client() + api.cluster_path.return_value = ( + "projects/project/instances/instance-id/clusters/cluster-id" + ) + api.update_cluster.return_value = response_pb + api.common_location_path.return_value = LOCATION + + cluster.update() + cluster_pb = cluster._to_pb() + cluster_pb.name = cluster.name + + expected_paths = [ + "serve_nodes", + "cluster_config.cluster_autoscaling_config.autoscaling_limits.max_serve_nodes", + ] + + update_mask_pb = field_mask_pb2.FieldMask(paths=expected_paths) + + expected_request = { + "cluster": cluster_pb, + "update_mask": update_mask_pb, + } + api.partial_update_cluster.assert_called_once_with(request=expected_request) + + +def test_cluster_disable_autoscaling(): + import datetime + from google.longrunning import operations_pb2 + from google.protobuf import field_mask_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType + + NOW = datetime.datetime.utcnow() + NOW_PB = _datetime_to_pb_timestamp(NOW) + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + LOCATION = LOCATION_PATH + LOCATION_ID + instance = Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + default_storage_type=STORAGE_TYPE_SSD, + min_serve_nodes=MIN_SERVE_NODES, + max_serve_nodes=MAX_SERVE_NODES, + cpu_utilization_percent=CPU_UTILIZATION_PERCENT, + ) + metadata = messages_v2_pb2.CreateClusterMetadata(request_time=NOW_PB) + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.CreateClusterMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + api = client._instance_admin_client = _make_instance_admin_client() + api.common_location_path.return_value = LOCATION + api.instance_path.return_value = instance.name + api.create_cluster.return_value = response_pb + api.cluster_path.return_value = CLUSTER_NAME + + cluster.create() + + cluster.disable_autoscaling(serve_nodes=SERVE_NODES) + + cluster_pb = cluster._to_pb() + cluster_pb.name = cluster.name + update_mask_pb = field_mask_pb2.FieldMask( + paths=["serve_nodes", "cluster_config.cluster_autoscaling_config"] + ) + + expected_request = { + "cluster": cluster_pb, + "update_mask": update_mask_pb, + } + api.partial_update_cluster.assert_called_once_with(request=expected_request) + + assert cluster.min_serve_nodes == 0 + assert cluster.max_serve_nodes == 0 + assert cluster.cpu_utilization_percent == 0 + + +def test_create_cluster_with_both_manual_and_autoscaling(): + + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + instance = Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + serve_nodes=SERVE_NODES, + default_storage_type=STORAGE_TYPE_SSD, + min_serve_nodes=MIN_SERVE_NODES, + max_serve_nodes=MAX_SERVE_NODES, + cpu_utilization_percent=CPU_UTILIZATION_PERCENT, + ) + + with pytest.raises(ValueError) as excinfo: + cluster.create() + assert ( + str(excinfo.value) + == "Cannot specify both serve_nodes and autoscaling configurations (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)." + ) + + +def test_create_cluster_with_partial_autoscaling_config(): + + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + instance = Instance(INSTANCE_ID, client) + + cluster_config = [ + {"min_serve_nodes": MIN_SERVE_NODES}, + {"max_serve_nodes": MAX_SERVE_NODES}, + {"cpu_utilization_percent": CPU_UTILIZATION_PERCENT}, + { + "min_serve_nodes": MIN_SERVE_NODES, + "cpu_utilization_percent": CPU_UTILIZATION_PERCENT, + }, + {"min_serve_nodes": MIN_SERVE_NODES, "max_serve_nodes": MAX_SERVE_NODES}, + { + "max_serve_nodes": MAX_SERVE_NODES, + "cpu_utilization_percent": CPU_UTILIZATION_PERCENT, + }, + ] + for config in cluster_config: + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + default_storage_type=STORAGE_TYPE_SSD, + **config, + ) + + with pytest.raises(ValueError) as excinfo: + cluster.create() + assert ( + str(excinfo.value) + == "All of autoscaling configurations must be specified at the same time (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)." + ) + + +def test_create_cluster_with_no_scaling_config(): + + from google.cloud.bigtable.instance import Instance + from google.cloud.bigtable.enums import StorageType + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + STORAGE_TYPE_SSD = StorageType.SSD + instance = Instance(INSTANCE_ID, client) + cluster = _make_cluster( + CLUSTER_ID, + instance, + location_id=LOCATION_ID, + default_storage_type=STORAGE_TYPE_SSD, + ) + + with pytest.raises(ValueError) as excinfo: + cluster.create() + assert ( + str(excinfo.value) + == "Must specify either serve_nodes or all of the autoscaling configurations (min_serve_nodes, max_serve_nodes, and cpu_utilization_percent)." + ) def test_cluster_delete(): From 4e67ef370478ca1c397a0bbde4cbb41d78c507e5 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 4 Mar 2022 12:56:16 -0500 Subject: [PATCH 574/892] chore: Adding support for pytest-xdist and pytest-parallel (#528) Source-Link: https://github.com/googleapis/synthtool/commit/82f5cb283efffe96e1b6cd634738e0e7de2cd90a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:5d8da01438ece4021d135433f2cf3227aa39ef0eaccc941d62aa35e6902832ae Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 2 +- .../samples/beam/noxfile.py | 80 +++++++++++-------- .../samples/hello/noxfile.py | 80 +++++++++++-------- .../samples/hello_happybase/noxfile.py | 80 +++++++++++-------- .../samples/instanceadmin/noxfile.py | 80 +++++++++++-------- .../samples/metricscaler/noxfile.py | 80 +++++++++++-------- .../samples/quickstart/noxfile.py | 80 +++++++++++-------- .../samples/quickstart_happybase/noxfile.py | 80 +++++++++++-------- .../samples/snippets/filters/noxfile.py | 80 +++++++++++-------- .../samples/snippets/reads/noxfile.py | 80 +++++++++++-------- .../samples/snippets/writes/noxfile.py | 80 +++++++++++-------- .../samples/tableadmin/noxfile.py | 80 +++++++++++-------- 12 files changed, 507 insertions(+), 375 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 480226ac08a9..7e08e05a380c 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:6162c384d685c5fe22521d3f37f6fc732bf99a085f6d47b677dbcae97fc21392 + digest: sha256:5d8da01438ece4021d135433f2cf3227aa39ef0eaccc941d62aa35e6902832ae diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index 5b10d2811707..d9d4d1469bec 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -186,42 +186,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 20cdfc620138..4c808af73ea2 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -188,42 +188,54 @@ def _session_tests( # check for presence of tests test_list = glob.glob("*_test.py") + glob.glob("test_*.py") test_list.extend(glob.glob("tests")) + if len(test_list) == 0: print("No tests found, skipping directory.") - else: - if TEST_CONFIG["pip_version_override"]: - pip_version = TEST_CONFIG["pip_version_override"] - session.install(f"pip=={pip_version}") - """Runs py.test for a particular project.""" - if os.path.exists("requirements.txt"): - if os.path.exists("constraints.txt"): - session.install("-r", "requirements.txt", "-c", "constraints.txt") - else: - session.install("-r", "requirements.txt") - - if os.path.exists("requirements-test.txt"): - if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) - else: - session.install("-r", "requirements-test.txt") - - if INSTALL_LIBRARY_FROM_SOURCE: - session.install("-e", _get_repo_root()) - - if post_install: - post_install(session) - - session.run( - "pytest", - *(PYTEST_COMMON_ARGS + session.posargs), - # Pytest will return 5 when no tests are collected. This can happen - # on travis where slow and flaky tests are excluded. - # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html - success_codes=[0, 5], - env=get_pytest_env_vars(), - ) + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) @nox.session(python=ALL_VERSIONS) From d0c4a87888e5c8c234a74f1605b4bc6bca87164a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 5 Mar 2022 00:24:23 +0000 Subject: [PATCH 575/892] chore(deps): update actions/download-artifact action to v3 (#529) Source-Link: https://github.com/googleapis/synthtool/commit/38e11ad1104dcc1e63b52691ddf2fe4015d06955 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:4e1991042fe54b991db9ca17c8fb386e61b22fe4d1472a568bf0fcac85dcf5d3 --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.github/workflows/unittest.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 7e08e05a380c..44c78f7cc12d 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5d8da01438ece4021d135433f2cf3227aa39ef0eaccc941d62aa35e6902832ae + digest: sha256:4e1991042fe54b991db9ca17c8fb386e61b22fe4d1472a568bf0fcac85dcf5d3 diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index e87fe5b7b79a..e5be6edbd54d 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -26,7 +26,7 @@ jobs: run: | nox -s unit-${{ matrix.python }} - name: Upload coverage results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: coverage-artifacts path: .coverage-${{ matrix.python }} @@ -47,7 +47,7 @@ jobs: python -m pip install --upgrade setuptools pip wheel python -m pip install coverage - name: Download coverage results - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: coverage-artifacts path: .coverage-results/ From e0230bd50942e6e975be0896bed7eab5ab53265a Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sun, 6 Mar 2022 01:04:30 +0100 Subject: [PATCH 576/892] chore(deps): update dependency apache-beam to v2.37.0 (#530) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 2bd731ab787a..8296b13724ea 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.36.0 +apache-beam==2.37.0 google-cloud-bigtable==2.6.0 google-cloud-core==2.2.2 From b470274f4e3ca80b81d94bf0e74a810dd4ab632c Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 7 Mar 2022 11:24:51 -0500 Subject: [PATCH 577/892] chore(main): release 2.7.0 (#527) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 13 +++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 62c060c69971..7226b2090a0a 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,19 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.7.0](https://github.com/googleapis/python-bigtable/compare/v2.6.0...v2.7.0) (2022-03-06) + + +### Features + +* Add support for autoscaling ([#509](https://github.com/googleapis/python-bigtable/issues/509)) ([8f4e197](https://github.com/googleapis/python-bigtable/commit/8f4e197148644ded934190814ff44fa132a2dda6)) + + +### Bug Fixes + +* **deps:** require google-api-core>=1.31.5, >=2.3.2 ([#526](https://github.com/googleapis/python-bigtable/issues/526)) ([a8a92ee](https://github.com/googleapis/python-bigtable/commit/a8a92ee1b6bd284055fee3e1029a9a6aacbc5f1c)) +* **deps:** require proto-plus>=1.15.0 ([a8a92ee](https://github.com/googleapis/python-bigtable/commit/a8a92ee1b6bd284055fee3e1029a9a6aacbc5f1c)) + ## [2.6.0](https://github.com/googleapis/python-bigtable/compare/v2.5.2...v2.6.0) (2022-02-26) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index a3f51e45019f..78bfefcecee3 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.6.0" +version = "2.7.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 33fc465f3d5a824c3fa25d6c8dc39cdce1c8f429 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 8 Mar 2022 01:50:05 +0100 Subject: [PATCH 578/892] chore(deps): update all dependencies (#531) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 4 ++-- packages/google-cloud-bigtable/samples/hello/requirements.txt | 4 ++-- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 8296b13724ea..3270f46becb9 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.37.0 -google-cloud-bigtable==2.6.0 -google-cloud-core==2.2.2 +google-cloud-bigtable==2.7.0 +google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 117a6e939b73..26cac3f4d116 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.6.0 -google-cloud-core==2.2.2 +google-cloud-bigtable==2.7.0 +google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 1877e7a06de5..40a8ffc5ed31 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.6.0 +google-cloud-bigtable==2.7.0 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 20596a095a2e..d3da5f1e9d46 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.6.0 -google-cloud-monitoring==2.9.0 +google-cloud-bigtable==2.7.0 +google-cloud-monitoring==2.9.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 89f83eb341b2..39c4d4ea3df8 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.6.0 +google-cloud-bigtable==2.7.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 7f67da9d1c77..d8043c6554ff 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.6.0 +google-cloud-bigtable==2.7.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 7f67da9d1c77..d8043c6554ff 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.6.0 +google-cloud-bigtable==2.7.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index b523e0991b63..9b025a34e8af 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.6.0 \ No newline at end of file +google-cloud-bigtable==2.7.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 89f83eb341b2..39c4d4ea3df8 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.6.0 +google-cloud-bigtable==2.7.0 From c2c8e2299073fb367720275991d6d4db31c1dec4 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 9 Mar 2022 12:45:48 +0100 Subject: [PATCH 579/892] chore(deps): update google-github-actions/setup-gcloud action to v0.6.0 (#532) --- .../google-cloud-bigtable/.github/workflows/system_emulated.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index c974d6b113d5..303d3672409e 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v0.5.1 + uses: google-github-actions/setup-gcloud@v0.6.0 - name: Install / run Nox run: | From 9d2f2a850ac6af1680deab4b0d503d6d76b41062 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sun, 13 Mar 2022 17:08:00 +0100 Subject: [PATCH 580/892] chore(deps): update dependency pytest to v7.1.0 (#535) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index b4186c059922..fded0b1be4e0 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.0.1 +pytest==7.1.0 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index c2845bffbe89..824a8a7a0ce6 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index c531e813e29e..c265ab7091f6 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.11.1 -pytest==7.0.1 +pytest==7.1.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 786624c3ceb2..4fbdd9106a6c 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.0.1 +pytest==7.1.0 google-cloud-testutils==1.3.1 From 0522055f7c0eac053129bb506a7de42ebe5fb7d9 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Thu, 17 Mar 2022 13:41:15 -0700 Subject: [PATCH 581/892] fix: Ensure message fields are copied when building retry request (#533) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: Ensure message fields are copied when building retry request Use the to_dict() function when building the ReadRowsRequest arguments. This way, all available fields in the message will get copied over. The `message.filter` field needed a special handling, since the to_dict() function doesn't seem to parse the value the way we want it to be. Fixes internal bug #214449800 * Use copy_from instead of to_dict. copy_from doesn't copy over "empty" fields, so had to adjust the test case expected values. * Use the copy_from function from proto.Message * Bump the min version of proto-plus * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../google/cloud/bigtable/row_data.py | 18 +++++++----------- packages/google-cloud-bigtable/setup.py | 2 +- .../testing/constraints-3.6.txt | 2 +- .../tests/unit/test_row_data.py | 4 ++-- 4 files changed, 11 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 1cdd99026b38..3abb6c68cbeb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -625,16 +625,12 @@ def __init__(self, message, last_scanned_key, rows_read_so_far): def build_updated_request(self): """Updates the given message request as per last scanned key""" - # TODO: Generalize this to ensure fields don't get rewritten when retrying the request - - r_kwargs = { - "table_name": self.message.table_name, - "filter": self.message.filter, - "app_profile_id": self.message.app_profile_id, - } + resume_request = data_messages_v2_pb2.ReadRowsRequest() + data_messages_v2_pb2.ReadRowsRequest.copy_from(resume_request, self.message) if self.message.rows_limit != 0: - r_kwargs["rows_limit"] = max( + # TODO: Throw an error if rows_limit - read_so_far is 0 or negative. + resume_request.rows_limit = max( 1, self.message.rows_limit - self.rows_read_so_far ) @@ -643,14 +639,14 @@ def build_updated_request(self): # to request only rows that have not been returned yet if "rows" not in self.message: row_range = data_v2_pb2.RowRange(start_key_open=self.last_scanned_key) - r_kwargs["rows"] = data_v2_pb2.RowSet(row_ranges=[row_range]) + resume_request.rows = data_v2_pb2.RowSet(row_ranges=[row_range]) else: row_keys = self._filter_rows_keys() row_ranges = self._filter_row_ranges() - r_kwargs["rows"] = data_v2_pb2.RowSet( + resume_request.rows = data_v2_pb2.RowSet( row_keys=row_keys, row_ranges=row_ranges ) - return data_messages_v2_pb2.ReadRowsRequest(**r_kwargs) + return resume_request def _filter_rows_keys(self): """ Helper for :meth:`build_updated_request`""" diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 78bfefcecee3..15a66b6a3b94 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -38,7 +38,7 @@ # https://github.com/googleapis/google-cloud-python/issues/10566 "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", - "proto-plus >= 1.15.0", + "proto-plus >= 1.18.0", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.6.txt b/packages/google-cloud-bigtable/testing/constraints-3.6.txt index 3d010787d846..92345bed82b7 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.6.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.6.txt @@ -8,5 +8,5 @@ google-api-core==1.31.5 google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.3 -proto-plus==1.15.0 +proto-plus==1.18.0 libcst==0.2.5 diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index e48893df5553..60a138800209 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -928,7 +928,7 @@ def test_RRRM_build_updated_request_full_table(): request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME, filter={}) + expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME) row_range1 = types.RowRange(start_key_open=last_scanned_key) expected_result.rows.row_ranges.append(row_range1) assert expected_result == result @@ -1021,7 +1021,7 @@ def test_RRRM_build_updated_request_rows_limit(): request_manager = _make_read_rows_request_manager(request, last_scanned_key, 2) result = request_manager.build_updated_request() - expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME, filter={}, rows_limit=8) + expected_result = _ReadRowsRequestPB(table_name=TABLE_NAME, rows_limit=8) row_range1 = types.RowRange(start_key_open=last_scanned_key) expected_result.rows.row_ranges.append(row_range1) assert expected_result == result From f839ab76b9cddc98beb0c7e903a3674970f7f861 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 17 Mar 2022 14:41:26 -0700 Subject: [PATCH 582/892] chore(main): release 2.7.1 (#536) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 7226b2090a0a..45f5756a3d4c 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [2.7.1](https://github.com/googleapis/python-bigtable/compare/v2.7.0...v2.7.1) (2022-03-17) + + +### Bug Fixes + +* Ensure message fields are copied when building retry request ([#533](https://github.com/googleapis/python-bigtable/issues/533)) ([ff7f190](https://github.com/googleapis/python-bigtable/commit/ff7f1901b6420e66e1388e757eeec20d30484ad9)) + ## [2.7.0](https://github.com/googleapis/python-bigtable/compare/v2.6.0...v2.7.0) (2022-03-06) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 15a66b6a3b94..5869995b28b1 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.7.0" +version = "2.7.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From f483bcccd5924dc8dc2b81cfd4ef6fbb8cba09d2 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sat, 19 Mar 2022 11:40:11 +0100 Subject: [PATCH 583/892] chore(deps): update all dependencies (#540) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 20 files changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 3270f46becb9..860a1045d313 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.37.0 -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 26cac3f4d116..f0836a540a87 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 40a8ffc5ed31..f5c202aa0546 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index fded0b1be4e0..fe1569ff4599 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.1.0 +pytest==7.1.1 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index d3da5f1e9d46..af3bb97b79a3 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 google-cloud-monitoring==2.9.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 39c4d4ea3df8..01e026f9316b 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index d8043c6554ff..7e0c6dad2782 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 824a8a7a0ce6..4f6bf643fc5e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index d8043c6554ff..7e0c6dad2782 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index c265ab7091f6..678dbc8efd1b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.11.1 -pytest==7.1.0 +pytest==7.1.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 9b025a34e8af..094fd46386fc 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.7.0 \ No newline at end of file +google-cloud-bigtable==2.7.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 4fbdd9106a6c..e3434bfa8cb1 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.1.0 +pytest==7.1.1 google-cloud-testutils==1.3.1 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 39c4d4ea3df8..01e026f9316b 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.7.0 +google-cloud-bigtable==2.7.1 From 626cbe164af2ba67e157403c7aeaf67eb08d39ca Mon Sep 17 00:00:00 2001 From: John Fremlin Date: Wed, 23 Mar 2022 19:02:10 -0400 Subject: [PATCH 584/892] docs: explain mutate vs mutate_rows (#543) Thanks to Bora for spotting this! --- .../google-cloud-bigtable/google/cloud/bigtable/batcher.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 950a198ef182..3c23f44363ce 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -33,7 +33,7 @@ class MutationsBatcher(object): request. This class is not suited for usage in systems where each mutation - needs to guaranteed to be sent, since calling mutate may only result in an + must be guaranteed to be sent, since calling mutate may only result in an in-memory change. In a case of a system crash, any DirectRows remaining in memory will not necessarily be sent to the service, even after the completion of the mutate() method. @@ -105,7 +105,7 @@ def mutate(self, row): self.flush() def mutate_rows(self, rows): - """Add a row to the batch. If the current batch meets one of the size + """Add multiple rows to the batch. If the current batch meets one of the size limits, the batch is sent synchronously. For example: From 9ff482536d47f898f823012b43e3c9310e369c33 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 24 Mar 2022 07:12:44 -0400 Subject: [PATCH 585/892] feat: Add ListHotTablets API method and protobufs (#542) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add ListHotTablets API method and protobufs PiperOrigin-RevId: 436758628 Source-Link: https://github.com/googleapis/googleapis/commit/92ab86a79f4a9e774dfd6ee533d9376af084b376 Source-Link: https://github.com/googleapis/googleapis-gen/commit/931ef114d5f845abf117bf8e0a29836ca300b694 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTMxZWYxMTRkNWY4NDVhYmYxMTdiZjhlMGEyOTgzNmNhMzAwYjY5NCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../cloud/bigtable_admin_v2/__init__.py | 6 + .../bigtable_admin_v2/gapic_metadata.json | 10 + .../bigtable_instance_admin/async_client.py | 98 ++++ .../bigtable_instance_admin/client.py | 116 +++++ .../bigtable_instance_admin/pagers.py | 132 +++++ .../transports/base.py | 27 ++ .../transports/grpc.py | 30 ++ .../transports/grpc_asyncio.py | 30 ++ .../cloud/bigtable_admin_v2/types/__init__.py | 6 + .../types/bigtable_instance_admin.py | 76 +++ .../cloud/bigtable_admin_v2/types/instance.py | 43 ++ .../fixup_bigtable_admin_v2_keywords.py | 1 + .../test_bigtable_instance_admin.py | 451 +++++++++++++++++- 13 files changed, 1011 insertions(+), 15 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 1b46d6215b40..3713dc1e8c4e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -34,6 +34,8 @@ from .types.bigtable_instance_admin import ListAppProfilesResponse from .types.bigtable_instance_admin import ListClustersRequest from .types.bigtable_instance_admin import ListClustersResponse +from .types.bigtable_instance_admin import ListHotTabletsRequest +from .types.bigtable_instance_admin import ListHotTabletsResponse from .types.bigtable_instance_admin import ListInstancesRequest from .types.bigtable_instance_admin import ListInstancesResponse from .types.bigtable_instance_admin import PartialUpdateClusterMetadata @@ -78,6 +80,7 @@ from .types.instance import AutoscalingLimits from .types.instance import AutoscalingTargets from .types.instance import Cluster +from .types.instance import HotTablet from .types.instance import Instance from .types.table import Backup from .types.table import BackupInfo @@ -130,6 +133,7 @@ "GetInstanceRequest", "GetSnapshotRequest", "GetTableRequest", + "HotTablet", "Instance", "ListAppProfilesRequest", "ListAppProfilesResponse", @@ -137,6 +141,8 @@ "ListBackupsResponse", "ListClustersRequest", "ListClustersResponse", + "ListHotTabletsRequest", + "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", "ListSnapshotsRequest", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index c360e7712249..a843c42e0dcd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -70,6 +70,11 @@ "list_clusters" ] }, + "ListHotTablets": { + "methods": [ + "list_hot_tablets" + ] + }, "ListInstances": { "methods": [ "list_instances" @@ -175,6 +180,11 @@ "list_clusters" ] }, + "ListHotTablets": { + "methods": [ + "list_hot_tablets" + ] + }, "ListInstances": { "methods": [ "list_instances" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 18b6541dce97..af8e62857295 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -69,8 +69,14 @@ class BigtableInstanceAdminAsyncClient: parse_crypto_key_path = staticmethod( BigtableInstanceAdminClient.parse_crypto_key_path ) + hot_tablet_path = staticmethod(BigtableInstanceAdminClient.hot_tablet_path) + parse_hot_tablet_path = staticmethod( + BigtableInstanceAdminClient.parse_hot_tablet_path + ) instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) + table_path = staticmethod(BigtableInstanceAdminClient.table_path) + parse_table_path = staticmethod(BigtableInstanceAdminClient.parse_table_path) common_billing_account_path = staticmethod( BigtableInstanceAdminClient.common_billing_account_path ) @@ -2109,6 +2115,98 @@ async def test_iam_permissions( # Done; return the response. return response + async def list_hot_tablets( + self, + request: Union[bigtable_instance_admin.ListHotTabletsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHotTabletsAsyncPager: + r"""Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListHotTablets. + parent (:class:`str`): + Required. The cluster name to list hot tablets. Value is + in the following form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager: + Response message for + BigtableInstanceAdmin.ListHotTablets. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_instance_admin.ListHotTabletsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_hot_tablets, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListHotTabletsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self): return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index a13a4b794245..f149ccf9d55a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -227,6 +227,24 @@ def parse_crypto_key_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def hot_tablet_path( + project: str, instance: str, cluster: str, hot_tablet: str, + ) -> str: + """Returns a fully-qualified hot_tablet string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format( + project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, + ) + + @staticmethod + def parse_hot_tablet_path(path: str) -> Dict[str, str]: + """Parses a hot_tablet path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/hotTablets/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def instance_path(project: str, instance: str,) -> str: """Returns a fully-qualified instance string.""" @@ -240,6 +258,22 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def table_path(project: str, instance: str, table: str,) -> str: + """Returns a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str, str]: + """Parses a table path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" @@ -2215,6 +2249,88 @@ def test_iam_permissions( # Done; return the response. return response + def list_hot_tablets( + self, + request: Union[bigtable_instance_admin.ListHotTabletsRequest, dict] = None, + *, + parent: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHotTabletsPager: + r"""Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListHotTablets. + parent (str): + Required. The cluster name to list hot tablets. Value is + in the following form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager: + Response message for + BigtableInstanceAdmin.ListHotTablets. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListHotTabletsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): + request = bigtable_instance_admin.ListHotTabletsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_hot_tablets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListHotTabletsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self): return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index 77bde77e45aa..bfcbbf23d703 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -158,3 +158,135 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListHotTabletsPager: + """A pager for iterating through ``list_hot_tablets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``hot_tablets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListHotTablets`` requests and continue to iterate + through the ``hot_tablets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_instance_admin.ListHotTabletsResponse], + request: bigtable_instance_admin.ListHotTabletsRequest, + response: bigtable_instance_admin.ListHotTabletsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListHotTabletsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_instance_admin.ListHotTabletsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[instance.HotTablet]: + for page in self.pages: + yield from page.hot_tablets + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListHotTabletsAsyncPager: + """A pager for iterating through ``list_hot_tablets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``hot_tablets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListHotTablets`` requests and continue to iterate + through the ``hot_tablets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_instance_admin.ListHotTabletsResponse] + ], + request: bigtable_instance_admin.ListHotTabletsRequest, + response: bigtable_instance_admin.ListHotTabletsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListHotTabletsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_instance_admin.ListHotTabletsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[instance.HotTablet]: + async def async_generator(): + async for page in self.pages: + for response in page.hot_tablets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index b32cba7156f4..2330879c7057 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -350,6 +350,21 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.list_hot_tablets: gapic_v1.method.wrap_method( + self.list_hot_tablets, + default_retry=retries.Retry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), } def close(self): @@ -557,5 +572,17 @@ def test_iam_permissions( ]: raise NotImplementedError() + @property + def list_hot_tablets( + self, + ) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + Union[ + bigtable_instance_admin.ListHotTabletsResponse, + Awaitable[bigtable_instance_admin.ListHotTabletsResponse], + ], + ]: + raise NotImplementedError() + __all__ = ("BigtableInstanceAdminTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 8a0169ad00fb..e33cc473f910 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -830,6 +830,36 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + @property + def list_hot_tablets( + self, + ) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + bigtable_instance_admin.ListHotTabletsResponse, + ]: + r"""Return a callable for the list hot tablets method over gRPC. + + Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Returns: + Callable[[~.ListHotTabletsRequest], + ~.ListHotTabletsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_hot_tablets" not in self._stubs: + self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", + request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, + ) + return self._stubs["list_hot_tablets"] + def close(self): self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index c1be2101a1e3..bf1b8a38e268 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -856,6 +856,36 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + @property + def list_hot_tablets( + self, + ) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + Awaitable[bigtable_instance_admin.ListHotTabletsResponse], + ]: + r"""Return a callable for the list hot tablets method over gRPC. + + Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Returns: + Callable[[~.ListHotTabletsRequest], + Awaitable[~.ListHotTabletsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_hot_tablets" not in self._stubs: + self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", + request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, + ) + return self._stubs["list_hot_tablets"] + def close(self): return self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index a81a4b7ed54a..f35f0f4ab5c0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -29,6 +29,8 @@ ListAppProfilesResponse, ListClustersRequest, ListClustersResponse, + ListHotTabletsRequest, + ListHotTabletsResponse, ListInstancesRequest, ListInstancesResponse, PartialUpdateClusterMetadata, @@ -79,6 +81,7 @@ AutoscalingLimits, AutoscalingTargets, Cluster, + HotTablet, Instance, ) from .table import ( @@ -109,6 +112,8 @@ "ListAppProfilesResponse", "ListClustersRequest", "ListClustersResponse", + "ListHotTabletsRequest", + "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", "PartialUpdateClusterMetadata", @@ -153,6 +158,7 @@ "AutoscalingLimits", "AutoscalingTargets", "Cluster", + "HotTablet", "Instance", "Backup", "BackupInfo", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 131c2817778c..b4d7c55aa77a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -47,6 +47,8 @@ "UpdateAppProfileRequest", "DeleteAppProfileRequest", "UpdateAppProfileMetadata", + "ListHotTabletsRequest", + "ListHotTabletsResponse", }, ) @@ -553,4 +555,78 @@ class UpdateAppProfileMetadata(proto.Message): """ +class ListHotTabletsRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListHotTablets. + + Attributes: + parent (str): + Required. The cluster name to list hot tablets. Value is in + the following form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The start time to list hot tablets. The hot + tablets in the response will have start times + between the requested start time and end time. + Start time defaults to Now if it is unset, and + end time defaults to Now - 24 hours if it is + unset. The start time should be less than the + end time, and the maximum allowed time range + between start time and end time is 48 hours. + Start time and end time should have values + between Now and Now - 14 days. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The end time to list hot tablets. + page_size (int): + Maximum number of results per page. + + A page_size that is empty or zero lets the server choose the + number of items to return. A page_size which is strictly + positive will return at most that many items. A negative + page_size will cause an error. + + Following the first request, subsequent paginated calls do + not need a page_size field. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent = proto.Field(proto.STRING, number=1,) + start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + page_size = proto.Field(proto.INT32, number=4,) + page_token = proto.Field(proto.STRING, number=5,) + + +class ListHotTabletsResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListHotTablets. + + Attributes: + hot_tablets (Sequence[google.cloud.bigtable_admin_v2.types.HotTablet]): + List of hot tablets in the tables of the + requested cluster that fall within the requested + time range. Hot tablets are ordered by node cpu + usage percent. If there are multiple hot tablets + that correspond to the same tablet within a + 15-minute interval, only the hot tablet with the + highest node cpu usage will be included in the + response. + next_page_token (str): + Set if not all hot tablets could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + hot_tablets = proto.RepeatedField( + proto.MESSAGE, number=1, message=gba_instance.HotTablet, + ) + next_page_token = proto.Field(proto.STRING, number=2,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 22c73dfddc53..961cba43421b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -27,6 +27,7 @@ "AutoscalingLimits", "Cluster", "AppProfile", + "HotTablet", }, ) @@ -323,4 +324,46 @@ class SingleClusterRouting(proto.Message): ) +class HotTablet(proto.Message): + r"""A tablet is a defined by a start and end key and is explained + in https://cloud.google.com/bigtable/docs/overview#architecture + and + https://cloud.google.com/bigtable/docs/performance#optimization. + A Hot tablet is a tablet that exhibits high average cpu usage + during the time interval from start time to end time. + + Attributes: + name (str): + The unique name of the hot tablet. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/[a-zA-Z0-9_-]*``. + table_name (str): + Name of the table that contains the tablet. Values are of + the form + ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The start time of the hot + tablet. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The end time of the hot tablet. + start_key (str): + Tablet Start Key (inclusive). + end_key (str): + Tablet End Key (inclusive). + node_cpu_usage_percent (float): + Output only. The average CPU usage spent by a node on this + tablet over the start_time to end_time time range. The + percentage is the amount of CPU used by the node to serve + the tablet, from 0% (tablet was not interacted with) to 100% + (the node spent all cycles serving the hot tablet). + """ + + name = proto.Field(proto.STRING, number=1,) + table_name = proto.Field(proto.STRING, number=2,) + start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + start_key = proto.Field(proto.STRING, number=5,) + end_key = proto.Field(proto.STRING, number=6,) + node_cpu_usage_percent = proto.Field(proto.FLOAT, number=7,) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 6d5bc00f43c6..2079dd99b3d9 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -64,6 +64,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'list_app_profiles': ('parent', 'page_size', 'page_token', ), 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), 'list_clusters': ('parent', 'page_token', ), + 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ), 'list_instances': ('parent', 'page_token', ), 'list_snapshots': ('parent', 'page_size', 'page_token', ), 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 76df253aa812..226f5acf76a3 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -5242,6 +5242,374 @@ async def test_test_iam_permissions_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", [bigtable_instance_admin.ListHotTabletsRequest, dict,] +) +def test_list_hot_tablets(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_hot_tablets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + client.list_hot_tablets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListHotTabletsRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_from_dict(): + await test_list_hot_tablets_async(request_type=dict) + + +def test_list_hot_tablets_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListHotTabletsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_hot_tablets_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListHotTabletsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListHotTabletsResponse() + ) + await client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_hot_tablets_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_hot_tablets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_hot_tablets_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_hot_tablets_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListHotTabletsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_hot_tablets(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_hot_tablets_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), parent="parent_value", + ) + + +def test_list_hot_tablets_pager(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_hot_tablets(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, instance.HotTablet) for i in results) + + +def test_list_hot_tablets_pages(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + ), + RuntimeError, + ) + pages = list(client.list_hot_tablets(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + ), + RuntimeError, + ) + async_pager = await client.list_hot_tablets(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, instance.HotTablet) for i in responses) + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_hot_tablets(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( @@ -5380,6 +5748,7 @@ def test_bigtable_instance_admin_base_transport(): "get_iam_policy", "set_iam_policy", "test_iam_permissions", + "list_hot_tablets", ) for method in methods: with pytest.raises(NotImplementedError): @@ -5821,9 +6190,37 @@ def test_parse_crypto_key_path(): assert expected == actual -def test_instance_path(): +def test_hot_tablet_path(): project = "winkle" instance = "nautilus" + cluster = "scallop" + hot_tablet = "abalone" + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format( + project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, + ) + actual = BigtableInstanceAdminClient.hot_tablet_path( + project, instance, cluster, hot_tablet + ) + assert expected == actual + + +def test_parse_hot_tablet_path(): + expected = { + "project": "squid", + "instance": "clam", + "cluster": "whelk", + "hot_tablet": "octopus", + } + path = BigtableInstanceAdminClient.hot_tablet_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_hot_tablet_path(path) + assert expected == actual + + +def test_instance_path(): + project = "oyster" + instance = "nudibranch" expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, ) @@ -5833,8 +6230,8 @@ def test_instance_path(): def test_parse_instance_path(): expected = { - "project": "scallop", - "instance": "abalone", + "project": "cuttlefish", + "instance": "mussel", } path = BigtableInstanceAdminClient.instance_path(**expected) @@ -5843,8 +6240,32 @@ def test_parse_instance_path(): assert expected == actual +def test_table_path(): + project = "winkle" + instance = "nautilus" + table = "scallop" + expected = "projects/{project}/instances/{instance}/tables/{table}".format( + project=project, instance=instance, table=table, + ) + actual = BigtableInstanceAdminClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "abalone", + "instance": "squid", + "table": "clam", + } + path = BigtableInstanceAdminClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_table_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "squid" + billing_account = "whelk" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -5854,7 +6275,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "clam", + "billing_account": "octopus", } path = BigtableInstanceAdminClient.common_billing_account_path(**expected) @@ -5864,7 +6285,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "whelk" + folder = "oyster" expected = "folders/{folder}".format(folder=folder,) actual = BigtableInstanceAdminClient.common_folder_path(folder) assert expected == actual @@ -5872,7 +6293,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "octopus", + "folder": "nudibranch", } path = BigtableInstanceAdminClient.common_folder_path(**expected) @@ -5882,7 +6303,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "oyster" + organization = "cuttlefish" expected = "organizations/{organization}".format(organization=organization,) actual = BigtableInstanceAdminClient.common_organization_path(organization) assert expected == actual @@ -5890,7 +6311,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nudibranch", + "organization": "mussel", } path = BigtableInstanceAdminClient.common_organization_path(**expected) @@ -5900,7 +6321,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "cuttlefish" + project = "winkle" expected = "projects/{project}".format(project=project,) actual = BigtableInstanceAdminClient.common_project_path(project) assert expected == actual @@ -5908,7 +6329,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "mussel", + "project": "nautilus", } path = BigtableInstanceAdminClient.common_project_path(**expected) @@ -5918,8 +6339,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "winkle" - location = "nautilus" + project = "scallop" + location = "abalone" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -5929,8 +6350,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "scallop", - "location": "abalone", + "project": "squid", + "location": "clam", } path = BigtableInstanceAdminClient.common_location_path(**expected) From 38f303bc27bce1134218675f5002453aad1f4aab Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 28 Mar 2022 23:58:15 +0000 Subject: [PATCH 586/892] chore(python): use black==22.3.0 (#545) Source-Link: https://github.com/googleapis/synthtool/commit/6fab84af09f2cf89a031fd8671d1def6b2931b11 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:7cffbc10910c3ab1b852c05114a08d374c195a81cdec1d4a67a1d129331d0bfe --- .../.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/docs/conf.py | 5 +- .../google-cloud-bigtable/docs/snippets.py | 2 +- .../docs/snippets_table.py | 2 +- .../google/cloud/bigtable/backup.py | 6 +- .../google/cloud/bigtable/client.py | 10 +- .../google/cloud/bigtable/cluster.py | 2 +- .../google/cloud/bigtable/row_data.py | 10 +- .../bigtable_instance_admin/async_client.py | 162 ++- .../bigtable_instance_admin/client.py | 247 +++- .../transports/base.py | 34 +- .../transports/grpc.py | 3 +- .../bigtable_table_admin/async_client.py | 172 ++- .../services/bigtable_table_admin/client.py | 252 +++- .../bigtable_table_admin/transports/base.py | 46 +- .../bigtable_table_admin/transports/grpc.py | 3 +- .../types/bigtable_instance_admin.py | 314 ++++- .../types/bigtable_table_admin.py | 400 ++++-- .../cloud/bigtable_admin_v2/types/common.py | 23 +- .../cloud/bigtable_admin_v2/types/instance.py | 176 ++- .../cloud/bigtable_admin_v2/types/table.py | 210 ++- .../services/bigtable/async_client.py | 53 +- .../bigtable_v2/services/bigtable/client.py | 112 +- .../services/bigtable/transports/base.py | 26 +- .../services/bigtable/transports/grpc.py | 3 +- .../cloud/bigtable_v2/types/bigtable.py | 240 +++- .../google/cloud/bigtable_v2/types/data.py | 378 +++++- packages/google-cloud-bigtable/noxfile.py | 9 +- .../samples/beam/noxfile.py | 2 +- .../samples/hello/noxfile.py | 2 +- .../samples/hello_happybase/noxfile.py | 2 +- .../samples/instanceadmin/noxfile.py | 2 +- .../samples/metricscaler/noxfile.py | 2 +- .../samples/quickstart/noxfile.py | 2 +- .../samples/quickstart_happybase/noxfile.py | 2 +- .../samples/snippets/filters/noxfile.py | 2 +- .../samples/snippets/reads/noxfile.py | 2 +- .../samples/snippets/writes/noxfile.py | 2 +- .../samples/tableadmin/noxfile.py | 2 +- .../tests/system/_helpers.py | 4 +- .../tests/system/conftest.py | 8 +- .../tests/system/test_instance_admin.py | 17 +- .../tests/system/test_table_admin.py | 11 +- .../test_bigtable_instance_admin.py | 979 ++++++++++---- .../test_bigtable_table_admin.py | 1145 +++++++++++++---- .../unit/gapic/bigtable_v2/test_bigtable.py | 375 ++++-- .../tests/unit/test_app_profile.py | 18 +- .../tests/unit/test_backup.py | 36 +- .../tests/unit/test_client.py | 38 +- .../tests/unit/test_cluster.py | 6 +- .../tests/unit/test_column_family.py | 4 +- .../tests/unit/test_instance.py | 27 +- .../tests/unit/test_policy.py | 10 +- .../tests/unit/test_row.py | 52 +- .../tests/unit/test_row_data.py | 42 +- .../tests/unit/test_row_filters.py | 20 +- .../tests/unit/test_table.py | 43 +- 57 files changed, 4447 insertions(+), 1312 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 44c78f7cc12d..87dd00611576 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:4e1991042fe54b991db9ca17c8fb386e61b22fe4d1472a568bf0fcac85dcf5d3 + digest: sha256:7cffbc10910c3ab1b852c05114a08d374c195a81cdec1d4a67a1d129331d0bfe diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 26814b0aa607..34f3a4d08aba 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -361,7 +361,10 @@ intersphinx_mapping = { "python": ("https://python.readthedocs.org/en/latest/", None), "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), + "google.api_core": ( + "https://googleapis.dev/python/google-api-core/latest/", + None, + ), "grpc": ("https://grpc.github.io/grpc/python/", None), "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None), "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index ee5490afea2f..1d93fdf124fb 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -55,7 +55,7 @@ PRODUCTION = enums.Instance.Type.PRODUCTION SERVER_NODES = 3 STORAGE_TYPE = enums.StorageType.SSD -LABEL_KEY = u"python-snippet" +LABEL_KEY = "python-snippet" LABEL_STAMP = ( datetime.datetime.utcnow() .replace(microsecond=0, tzinfo=UTC) diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index 4c3304fd0d73..f27260425321 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -52,7 +52,7 @@ PRODUCTION = enums.Instance.Type.PRODUCTION SERVER_NODES = 3 STORAGE_TYPE = enums.StorageType.SSD -LABEL_KEY = u"python-snippet" +LABEL_KEY = "python-snippet" LABEL_STAMP = ( datetime.datetime.utcnow() .replace(microsecond=0, tzinfo=UTC) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index c2b5ec9ee4be..6986d730a791 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -382,7 +382,8 @@ def update_expire_time(self, new_expire_time): :param new_expire_time: the new expiration time timestamp """ backup_update = table.Backup( - name=self.name, expire_time=_datetime_to_pb_timestamp(new_expire_time), + name=self.name, + expire_time=_datetime_to_pb_timestamp(new_expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api = self._instance._client.table_admin_client @@ -426,7 +427,8 @@ def restore(self, table_id, instance_id=None): api = self._instance._client.table_admin_client if instance_id: parent = BigtableTableAdminClient.instance_path( - project=self._instance._client.project, instance=instance_id, + project=self._instance._client.project, + instance=instance_id, ) else: parent = self._instance.name diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index c50c20b0f7db..c82a268c63fa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -193,7 +193,9 @@ def __init__( self._channel = channel self.SCOPE = self._get_scopes() super(Client, self).__init__( - project=project, credentials=credentials, client_options=client_options, + project=project, + credentials=credentials, + client_options=client_options, ) def _get_scopes(self): @@ -276,7 +278,8 @@ def _create_gapic_client_channel(self, client_class, grpc_transport): if self._emulator_host is not None: channel = self._emulator_channel( - transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS, + transport=grpc_transport, + options=_GRPC_CHANNEL_OPTIONS, ) else: channel = grpc_transport.create_channel( @@ -327,7 +330,8 @@ def table_data_client(self): """ if self._table_data_client is None: transport = self._create_gapic_client_channel( - bigtable_v2.BigtableClient, BigtableGrpcTransport, + bigtable_v2.BigtableClient, + BigtableGrpcTransport, ) klass = _create_gapic_client( bigtable_v2.BigtableClient, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py index 1d0af2c693ce..11fb5492dad4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/cluster.py @@ -509,7 +509,7 @@ def delete(self): client.instance_admin_client.delete_cluster(request={"name": self.name}) def _to_pb(self): - """ Create cluster proto buff message for API calls """ + """Create cluster proto buff message for API calls""" client = self._instance._client location = client.instance_admin_client.common_location_path( client.project, self.location_id diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 3abb6c68cbeb..0517f82e1098 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -649,7 +649,7 @@ def build_updated_request(self): return resume_request def _filter_rows_keys(self): - """ Helper for :meth:`build_updated_request`""" + """Helper for :meth:`build_updated_request`""" return [ row_key for row_key in self.message.rows.row_keys @@ -657,7 +657,7 @@ def _filter_rows_keys(self): ] def _filter_row_ranges(self): - """ Helper for :meth:`build_updated_request`""" + """Helper for :meth:`build_updated_request`""" new_row_ranges = [] for row_range in self.message.rows.row_ranges: @@ -688,17 +688,17 @@ def _filter_row_ranges(self): return new_row_ranges def _key_already_read(self, key): - """ Helper for :meth:`_filter_row_ranges`""" + """Helper for :meth:`_filter_row_ranges`""" return key <= self.last_scanned_key @staticmethod def _start_key_set(row_range): - """ Helper for :meth:`_filter_row_ranges`""" + """Helper for :meth:`_filter_row_ranges`""" return row_range.start_key_open or row_range.start_key_closed @staticmethod def _end_key_set(row_range): - """ Helper for :meth:`_filter_row_ranges`""" + """Helper for :meth:`_filter_row_ranges`""" return row_range.end_key_open or row_range.end_key_closed diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index af8e62857295..ed5862d3971f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -348,7 +348,12 @@ async def create_instance( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -441,7 +446,12 @@ async def get_instance( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -523,7 +533,12 @@ async def list_instances( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -592,7 +607,12 @@ async def update_instance( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -695,7 +715,12 @@ async def partial_update_instance( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -770,7 +795,10 @@ async def delete_instance( # Send the request. await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) async def create_cluster( @@ -872,7 +900,12 @@ async def create_cluster( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -964,7 +997,12 @@ async def get_cluster( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1048,7 +1086,12 @@ async def list_clusters( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1117,7 +1160,12 @@ async def update_cluster( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -1237,7 +1285,12 @@ async def partial_update_cluster( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -1312,7 +1365,10 @@ async def delete_cluster( # Send the request. await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) async def create_app_profile( @@ -1405,7 +1461,12 @@ async def create_app_profile( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1488,7 +1549,12 @@ async def get_app_profile( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1576,12 +1642,20 @@ async def list_app_profiles( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListAppProfilesAsyncPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -1678,7 +1752,12 @@ async def update_app_profile( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -1753,7 +1832,10 @@ async def delete_app_profile( # Send the request. await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) async def get_iam_policy( @@ -1862,7 +1944,9 @@ async def get_iam_policy( if isinstance(request, dict): request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource, + ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1889,7 +1973,12 @@ async def get_iam_policy( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1999,7 +2088,9 @@ async def set_iam_policy( if isinstance(request, dict): request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, + ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2016,7 +2107,12 @@ async def set_iam_policy( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2082,7 +2178,8 @@ async def test_iam_permissions( request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, + resource=resource, + permissions=permissions, ) # Wrap the RPC method; this adds retry and timeout information, @@ -2110,7 +2207,12 @@ async def test_iam_permissions( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2196,12 +2298,20 @@ async def list_hot_tablets( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListHotTabletsAsyncPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index f149ccf9d55a..cc9317f6df59 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -65,7 +65,8 @@ class BigtableInstanceAdminClientMeta(type): _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport def get_transport_class( - cls, label: str = None, + cls, + label: str = None, ) -> Type[BigtableInstanceAdminTransport]: """Returns an appropriate transport class. @@ -175,10 +176,18 @@ def transport(self) -> BigtableInstanceAdminTransport: return self._transport @staticmethod - def app_profile_path(project: str, instance: str, app_profile: str,) -> str: + def app_profile_path( + project: str, + instance: str, + app_profile: str, + ) -> str: """Returns a fully-qualified app_profile string.""" - return "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( - project=project, instance=instance, app_profile=app_profile, + return ( + "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( + project=project, + instance=instance, + app_profile=app_profile, + ) ) @staticmethod @@ -191,10 +200,16 @@ def parse_app_profile_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def cluster_path(project: str, instance: str, cluster: str,) -> str: + def cluster_path( + project: str, + instance: str, + cluster: str, + ) -> str: """Returns a fully-qualified cluster string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, instance=instance, cluster=cluster, + project=project, + instance=instance, + cluster=cluster, ) @staticmethod @@ -208,7 +223,10 @@ def parse_cluster_path(path: str) -> Dict[str, str]: @staticmethod def crypto_key_path( - project: str, location: str, key_ring: str, crypto_key: str, + project: str, + location: str, + key_ring: str, + crypto_key: str, ) -> str: """Returns a fully-qualified crypto_key string.""" return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( @@ -229,11 +247,17 @@ def parse_crypto_key_path(path: str) -> Dict[str, str]: @staticmethod def hot_tablet_path( - project: str, instance: str, cluster: str, hot_tablet: str, + project: str, + instance: str, + cluster: str, + hot_tablet: str, ) -> str: """Returns a fully-qualified hot_tablet string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format( - project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, + project=project, + instance=instance, + cluster=cluster, + hot_tablet=hot_tablet, ) @staticmethod @@ -246,10 +270,14 @@ def parse_hot_tablet_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def instance_path(project: str, instance: str,) -> str: + def instance_path( + project: str, + instance: str, + ) -> str: """Returns a fully-qualified instance string.""" return "projects/{project}/instances/{instance}".format( - project=project, instance=instance, + project=project, + instance=instance, ) @staticmethod @@ -259,10 +287,16 @@ def parse_instance_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def table_path(project: str, instance: str, table: str,) -> str: + def table_path( + project: str, + instance: str, + table: str, + ) -> str: """Returns a fully-qualified table string.""" return "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, instance=instance, table=table, + project=project, + instance=instance, + table=table, ) @staticmethod @@ -275,7 +309,9 @@ def parse_table_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_billing_account_path(billing_account: str,) -> str: + def common_billing_account_path( + billing_account: str, + ) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -288,9 +324,13 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_folder_path(folder: str,) -> str: + def common_folder_path( + folder: str, + ) -> str: """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder,) + return "folders/{folder}".format( + folder=folder, + ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: @@ -299,9 +339,13 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_organization_path(organization: str,) -> str: + def common_organization_path( + organization: str, + ) -> str: """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization,) + return "organizations/{organization}".format( + organization=organization, + ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: @@ -310,9 +354,13 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_project_path(project: str,) -> str: + def common_project_path( + project: str, + ) -> str: """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project,) + return "projects/{project}".format( + project=project, + ) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: @@ -321,10 +369,14 @@ def parse_common_project_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_location_path(project: str, location: str,) -> str: + def common_location_path( + project: str, + location: str, + ) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( - project=project, location=location, + project=project, + location=location, ) @staticmethod @@ -611,7 +663,12 @@ def create_instance( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -694,7 +751,12 @@ def get_instance( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -766,7 +828,12 @@ def list_instances( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -826,7 +893,12 @@ def update_instance( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -921,7 +993,12 @@ def partial_update_instance( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -996,7 +1073,10 @@ def delete_instance( # Send the request. rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) def create_cluster( @@ -1098,7 +1178,12 @@ def create_cluster( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -1180,7 +1265,12 @@ def get_cluster( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1254,7 +1344,12 @@ def list_clusters( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1314,7 +1409,12 @@ def update_cluster( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -1424,7 +1524,12 @@ def partial_update_cluster( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -1499,7 +1604,10 @@ def delete_cluster( # Send the request. rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) def create_app_profile( @@ -1592,7 +1700,12 @@ def create_app_profile( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1665,7 +1778,12 @@ def get_app_profile( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1743,12 +1861,20 @@ def list_app_profiles( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListAppProfilesPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -1835,7 +1961,12 @@ def update_app_profile( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -1910,7 +2041,10 @@ def delete_app_profile( # Send the request. rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) def get_iam_policy( @@ -2035,7 +2169,12 @@ def get_iam_policy( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2161,7 +2300,12 @@ def set_iam_policy( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2244,7 +2388,12 @@ def test_iam_permissions( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2320,12 +2469,20 @@ def list_hot_tablets( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListHotTabletsPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 2330879c7057..8084f4e21184 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -135,7 +135,9 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_instance: gapic_v1.method.wrap_method( - self.create_instance, default_timeout=300.0, client_info=client_info, + self.create_instance, + default_timeout=300.0, + client_info=client_info, ), self.get_instance: gapic_v1.method.wrap_method( self.get_instance, @@ -198,10 +200,14 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.delete_instance: gapic_v1.method.wrap_method( - self.delete_instance, default_timeout=60.0, client_info=client_info, + self.delete_instance, + default_timeout=60.0, + client_info=client_info, ), self.create_cluster: gapic_v1.method.wrap_method( - self.create_cluster, default_timeout=60.0, client_info=client_info, + self.create_cluster, + default_timeout=60.0, + client_info=client_info, ), self.get_cluster: gapic_v1.method.wrap_method( self.get_cluster, @@ -264,10 +270,14 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.delete_cluster: gapic_v1.method.wrap_method( - self.delete_cluster, default_timeout=60.0, client_info=client_info, + self.delete_cluster, + default_timeout=60.0, + client_info=client_info, ), self.create_app_profile: gapic_v1.method.wrap_method( - self.create_app_profile, default_timeout=60.0, client_info=client_info, + self.create_app_profile, + default_timeout=60.0, + client_info=client_info, ), self.get_app_profile: gapic_v1.method.wrap_method( self.get_app_profile, @@ -315,7 +325,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.delete_app_profile: gapic_v1.method.wrap_method( - self.delete_app_profile, default_timeout=60.0, client_info=client_info, + self.delete_app_profile, + default_timeout=60.0, + client_info=client_info, ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, @@ -333,7 +345,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.set_iam_policy: gapic_v1.method.wrap_method( - self.set_iam_policy, default_timeout=60.0, client_info=client_info, + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, ), self.test_iam_permissions: gapic_v1.method.wrap_method( self.test_iam_permissions, @@ -370,9 +384,9 @@ def _prep_wrapped_messages(self, client_info): def close(self): """Closes resources associated with the transport. - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! """ raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index e33cc473f910..005fa38ba0b8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -234,8 +234,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index bef292dc83a9..47f4754433a0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -321,7 +321,12 @@ async def create_table( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -435,7 +440,12 @@ async def create_table_from_snapshot( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -528,12 +538,20 @@ async def list_tables( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListTablesAsyncPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -618,7 +636,12 @@ async def get_table( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -686,7 +709,10 @@ async def delete_table( # Send the request. await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) async def modify_column_families( @@ -779,7 +805,12 @@ async def modify_column_families( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -826,7 +857,10 @@ async def drop_row_range( # Send the request. await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) async def generate_consistency_token( @@ -912,7 +946,12 @@ async def generate_consistency_token( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1007,7 +1046,12 @@ async def check_consistency( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1135,7 +1179,12 @@ async def snapshot_table( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -1248,7 +1297,12 @@ async def get_snapshot( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1356,12 +1410,20 @@ async def list_snapshots( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListSnapshotsAsyncPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -1441,7 +1503,10 @@ async def delete_snapshot( # Send the request. await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) async def create_backup( @@ -1546,7 +1611,12 @@ async def create_backup( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -1634,7 +1704,12 @@ async def get_backup( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1724,7 +1799,12 @@ async def update_backup( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1791,7 +1871,10 @@ async def delete_backup( # Send the request. await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) async def list_backups( @@ -1878,12 +1961,20 @@ async def list_backups( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListBackupsAsyncPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -1944,7 +2035,12 @@ async def restore_table( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation_async.from_gapic( @@ -2063,7 +2159,9 @@ async def get_iam_policy( if isinstance(request, dict): request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.GetIamPolicyRequest( + resource=resource, + ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2090,7 +2188,12 @@ async def get_iam_policy( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2200,7 +2303,9 @@ async def set_iam_policy( if isinstance(request, dict): request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource,) + request = iam_policy_pb2.SetIamPolicyRequest( + resource=resource, + ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2217,7 +2322,12 @@ async def set_iam_policy( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2283,7 +2393,8 @@ async def test_iam_permissions( request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, permissions=permissions, + resource=resource, + permissions=permissions, ) # Wrap the RPC method; this adds retry and timeout information, @@ -2311,7 +2422,12 @@ async def test_iam_permissions( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 6ba7ca063591..4d4d82f26973 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -64,7 +64,8 @@ class BigtableTableAdminClientMeta(type): _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport def get_transport_class( - cls, label: str = None, + cls, + label: str = None, ) -> Type[BigtableTableAdminTransport]: """Returns an appropriate transport class. @@ -175,10 +176,18 @@ def transport(self) -> BigtableTableAdminTransport: return self._transport @staticmethod - def backup_path(project: str, instance: str, cluster: str, backup: str,) -> str: + def backup_path( + project: str, + instance: str, + cluster: str, + backup: str, + ) -> str: """Returns a fully-qualified backup string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( - project=project, instance=instance, cluster=cluster, backup=backup, + project=project, + instance=instance, + cluster=cluster, + backup=backup, ) @staticmethod @@ -191,10 +200,16 @@ def parse_backup_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def cluster_path(project: str, instance: str, cluster: str,) -> str: + def cluster_path( + project: str, + instance: str, + cluster: str, + ) -> str: """Returns a fully-qualified cluster string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, instance=instance, cluster=cluster, + project=project, + instance=instance, + cluster=cluster, ) @staticmethod @@ -233,10 +248,14 @@ def parse_crypto_key_version_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def instance_path(project: str, instance: str,) -> str: + def instance_path( + project: str, + instance: str, + ) -> str: """Returns a fully-qualified instance string.""" return "projects/{project}/instances/{instance}".format( - project=project, instance=instance, + project=project, + instance=instance, ) @staticmethod @@ -246,10 +265,18 @@ def parse_instance_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def snapshot_path(project: str, instance: str, cluster: str, snapshot: str,) -> str: + def snapshot_path( + project: str, + instance: str, + cluster: str, + snapshot: str, + ) -> str: """Returns a fully-qualified snapshot string.""" return "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( - project=project, instance=instance, cluster=cluster, snapshot=snapshot, + project=project, + instance=instance, + cluster=cluster, + snapshot=snapshot, ) @staticmethod @@ -262,10 +289,16 @@ def parse_snapshot_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def table_path(project: str, instance: str, table: str,) -> str: + def table_path( + project: str, + instance: str, + table: str, + ) -> str: """Returns a fully-qualified table string.""" return "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, instance=instance, table=table, + project=project, + instance=instance, + table=table, ) @staticmethod @@ -278,7 +311,9 @@ def parse_table_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_billing_account_path(billing_account: str,) -> str: + def common_billing_account_path( + billing_account: str, + ) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -291,9 +326,13 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_folder_path(folder: str,) -> str: + def common_folder_path( + folder: str, + ) -> str: """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder,) + return "folders/{folder}".format( + folder=folder, + ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: @@ -302,9 +341,13 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_organization_path(organization: str,) -> str: + def common_organization_path( + organization: str, + ) -> str: """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization,) + return "organizations/{organization}".format( + organization=organization, + ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: @@ -313,9 +356,13 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_project_path(project: str,) -> str: + def common_project_path( + project: str, + ) -> str: """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project,) + return "projects/{project}".format( + project=project, + ) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: @@ -324,10 +371,14 @@ def parse_common_project_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_location_path(project: str, location: str,) -> str: + def common_location_path( + project: str, + location: str, + ) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( - project=project, location=location, + project=project, + location=location, ) @staticmethod @@ -592,7 +643,12 @@ def create_table( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -708,7 +764,12 @@ def create_table_from_snapshot( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -791,12 +852,20 @@ def list_tables( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListTablesPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -871,7 +940,12 @@ def get_table( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -939,7 +1013,10 @@ def delete_table( # Send the request. rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) def modify_column_families( @@ -1032,7 +1109,12 @@ def modify_column_families( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1080,7 +1162,10 @@ def drop_row_range( # Send the request. rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) def generate_consistency_token( @@ -1160,7 +1245,12 @@ def generate_consistency_token( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1245,7 +1335,12 @@ def check_consistency( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1373,7 +1468,12 @@ def snapshot_table( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -1476,7 +1576,12 @@ def get_snapshot( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1574,12 +1679,20 @@ def list_snapshots( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListSnapshotsPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -1659,7 +1772,10 @@ def delete_snapshot( # Send the request. rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) def create_backup( @@ -1764,7 +1880,12 @@ def create_backup( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -1842,7 +1963,12 @@ def get_backup( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1932,7 +2058,12 @@ def update_backup( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1999,7 +2130,10 @@ def delete_backup( # Send the request. rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + request, + retry=retry, + timeout=timeout, + metadata=metadata, ) def list_backups( @@ -2076,12 +2210,20 @@ def list_backups( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListBackupsPager( - method=rpc, request=request, response=response, metadata=metadata, + method=rpc, + request=request, + response=response, + metadata=metadata, ) # Done; return the response. @@ -2143,7 +2285,12 @@ def restore_table( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Wrap the response in an operation future. response = operation.from_gapic( @@ -2278,7 +2425,12 @@ def get_iam_policy( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2404,7 +2556,12 @@ def set_iam_policy( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -2487,7 +2644,12 @@ def test_iam_permissions( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index ac5c17d0fef5..d53b3d8f31a6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -135,7 +135,9 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_table: gapic_v1.method.wrap_method( - self.create_table, default_timeout=300.0, client_info=client_info, + self.create_table, + default_timeout=300.0, + client_info=client_info, ), self.create_table_from_snapshot: gapic_v1.method.wrap_method( self.create_table_from_snapshot, @@ -173,7 +175,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.delete_table: gapic_v1.method.wrap_method( - self.delete_table, default_timeout=60.0, client_info=client_info, + self.delete_table, + default_timeout=60.0, + client_info=client_info, ), self.modify_column_families: gapic_v1.method.wrap_method( self.modify_column_families, @@ -181,7 +185,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.drop_row_range: gapic_v1.method.wrap_method( - self.drop_row_range, default_timeout=3600.0, client_info=client_info, + self.drop_row_range, + default_timeout=3600.0, + client_info=client_info, ), self.generate_consistency_token: gapic_v1.method.wrap_method( self.generate_consistency_token, @@ -214,7 +220,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.snapshot_table: gapic_v1.method.wrap_method( - self.snapshot_table, default_timeout=None, client_info=client_info, + self.snapshot_table, + default_timeout=None, + client_info=client_info, ), self.get_snapshot: gapic_v1.method.wrap_method( self.get_snapshot, @@ -247,10 +255,14 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.delete_snapshot: gapic_v1.method.wrap_method( - self.delete_snapshot, default_timeout=60.0, client_info=client_info, + self.delete_snapshot, + default_timeout=60.0, + client_info=client_info, ), self.create_backup: gapic_v1.method.wrap_method( - self.create_backup, default_timeout=60.0, client_info=client_info, + self.create_backup, + default_timeout=60.0, + client_info=client_info, ), self.get_backup: gapic_v1.method.wrap_method( self.get_backup, @@ -268,10 +280,14 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.update_backup: gapic_v1.method.wrap_method( - self.update_backup, default_timeout=60.0, client_info=client_info, + self.update_backup, + default_timeout=60.0, + client_info=client_info, ), self.delete_backup: gapic_v1.method.wrap_method( - self.delete_backup, default_timeout=60.0, client_info=client_info, + self.delete_backup, + default_timeout=60.0, + client_info=client_info, ), self.list_backups: gapic_v1.method.wrap_method( self.list_backups, @@ -289,7 +305,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.restore_table: gapic_v1.method.wrap_method( - self.restore_table, default_timeout=60.0, client_info=client_info, + self.restore_table, + default_timeout=60.0, + client_info=client_info, ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, @@ -307,7 +325,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.set_iam_policy: gapic_v1.method.wrap_method( - self.set_iam_policy, default_timeout=60.0, client_info=client_info, + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, ), self.test_iam_permissions: gapic_v1.method.wrap_method( self.test_iam_permissions, @@ -329,9 +349,9 @@ def _prep_wrapped_messages(self, client_info): def close(self): """Closes resources associated with the transport. - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! """ raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 8cd13e806faa..04b0e37bf649 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -236,8 +236,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index b4d7c55aa77a..924deeb46f11 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -77,11 +77,24 @@ class CreateInstanceRequest(proto.Message): at most four clusters can be specified. """ - parent = proto.Field(proto.STRING, number=1,) - instance_id = proto.Field(proto.STRING, number=2,) - instance = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Instance,) + parent = proto.Field( + proto.STRING, + number=1, + ) + instance_id = proto.Field( + proto.STRING, + number=2, + ) + instance = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.Instance, + ) clusters = proto.MapField( - proto.STRING, proto.MESSAGE, number=4, message=gba_instance.Cluster, + proto.STRING, + proto.MESSAGE, + number=4, + message=gba_instance.Cluster, ) @@ -94,7 +107,10 @@ class GetInstanceRequest(proto.Message): are of the form ``projects/{project}/instances/{instance}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class ListInstancesRequest(proto.Message): @@ -109,8 +125,14 @@ class ListInstancesRequest(proto.Message): DEPRECATED: This field is unused and ignored. """ - parent = proto.Field(proto.STRING, number=1,) - page_token = proto.Field(proto.STRING, number=2,) + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) class ListInstancesResponse(proto.Message): @@ -136,10 +158,18 @@ def raw_page(self): return self instances = proto.RepeatedField( - proto.MESSAGE, number=1, message=gba_instance.Instance, + proto.MESSAGE, + number=1, + message=gba_instance.Instance, + ) + failed_locations = proto.RepeatedField( + proto.STRING, + number=2, + ) + next_page_token = proto.Field( + proto.STRING, + number=3, ) - failed_locations = proto.RepeatedField(proto.STRING, number=2,) - next_page_token = proto.Field(proto.STRING, number=3,) class PartialUpdateInstanceRequest(proto.Message): @@ -155,9 +185,15 @@ class PartialUpdateInstanceRequest(proto.Message): should be replaced. Must be explicitly set. """ - instance = proto.Field(proto.MESSAGE, number=1, message=gba_instance.Instance,) + instance = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.Instance, + ) update_mask = proto.Field( - proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, ) @@ -171,7 +207,10 @@ class DeleteInstanceRequest(proto.Message): ``projects/{project}/instances/{instance}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class CreateClusterRequest(proto.Message): @@ -192,9 +231,19 @@ class CreateClusterRequest(proto.Message): ``OutputOnly`` must be left blank. """ - parent = proto.Field(proto.STRING, number=1,) - cluster_id = proto.Field(proto.STRING, number=2,) - cluster = proto.Field(proto.MESSAGE, number=3, message=gba_instance.Cluster,) + parent = proto.Field( + proto.STRING, + number=1, + ) + cluster_id = proto.Field( + proto.STRING, + number=2, + ) + cluster = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.Cluster, + ) class GetClusterRequest(proto.Message): @@ -207,7 +256,10 @@ class GetClusterRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class ListClustersRequest(proto.Message): @@ -224,8 +276,14 @@ class ListClustersRequest(proto.Message): DEPRECATED: This field is unused and ignored. """ - parent = proto.Field(proto.STRING, number=1,) - page_token = proto.Field(proto.STRING, number=2,) + parent = proto.Field( + proto.STRING, + number=1, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) class ListClustersResponse(proto.Message): @@ -250,10 +308,18 @@ def raw_page(self): return self clusters = proto.RepeatedField( - proto.MESSAGE, number=1, message=gba_instance.Cluster, + proto.MESSAGE, + number=1, + message=gba_instance.Cluster, + ) + failed_locations = proto.RepeatedField( + proto.STRING, + number=2, + ) + next_page_token = proto.Field( + proto.STRING, + number=3, ) - failed_locations = proto.RepeatedField(proto.STRING, number=2,) - next_page_token = proto.Field(proto.STRING, number=3,) class DeleteClusterRequest(proto.Message): @@ -266,7 +332,10 @@ class DeleteClusterRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class CreateInstanceMetadata(proto.Message): @@ -285,12 +354,20 @@ class CreateInstanceMetadata(proto.Message): """ original_request = proto.Field( - proto.MESSAGE, number=1, message="CreateInstanceRequest", + proto.MESSAGE, + number=1, + message="CreateInstanceRequest", ) request_time = proto.Field( - proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class UpdateInstanceMetadata(proto.Message): @@ -309,12 +386,20 @@ class UpdateInstanceMetadata(proto.Message): """ original_request = proto.Field( - proto.MESSAGE, number=1, message="PartialUpdateInstanceRequest", + proto.MESSAGE, + number=1, + message="PartialUpdateInstanceRequest", ) request_time = proto.Field( - proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class CreateClusterMetadata(proto.Message): @@ -333,12 +418,20 @@ class CreateClusterMetadata(proto.Message): """ original_request = proto.Field( - proto.MESSAGE, number=1, message="CreateClusterRequest", + proto.MESSAGE, + number=1, + message="CreateClusterRequest", ) request_time = proto.Field( - proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class UpdateClusterMetadata(proto.Message): @@ -357,12 +450,20 @@ class UpdateClusterMetadata(proto.Message): """ original_request = proto.Field( - proto.MESSAGE, number=1, message=gba_instance.Cluster, + proto.MESSAGE, + number=1, + message=gba_instance.Cluster, ) request_time = proto.Field( - proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class PartialUpdateClusterMetadata(proto.Message): @@ -382,11 +483,19 @@ class PartialUpdateClusterMetadata(proto.Message): """ request_time = proto.Field( - proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) original_request = proto.Field( - proto.MESSAGE, number=3, message="PartialUpdateClusterRequest", + proto.MESSAGE, + number=3, + message="PartialUpdateClusterRequest", ) @@ -403,9 +512,15 @@ class PartialUpdateClusterRequest(proto.Message): should be replaced. """ - cluster = proto.Field(proto.MESSAGE, number=1, message=gba_instance.Cluster,) + cluster = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.Cluster, + ) update_mask = proto.Field( - proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, ) @@ -430,10 +545,23 @@ class CreateAppProfileRequest(proto.Message): the app profile. """ - parent = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=2,) - app_profile = proto.Field(proto.MESSAGE, number=3, message=gba_instance.AppProfile,) - ignore_warnings = proto.Field(proto.BOOL, number=4,) + parent = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=2, + ) + app_profile = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.AppProfile, + ) + ignore_warnings = proto.Field( + proto.BOOL, + number=4, + ) class GetAppProfileRequest(proto.Message): @@ -446,7 +574,10 @@ class GetAppProfileRequest(proto.Message): ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class ListAppProfilesRequest(proto.Message): @@ -476,9 +607,18 @@ class ListAppProfilesRequest(proto.Message): call. """ - parent = proto.Field(proto.STRING, number=1,) - page_size = proto.Field(proto.INT32, number=3,) - page_token = proto.Field(proto.STRING, number=2,) + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=2, + ) class ListAppProfilesResponse(proto.Message): @@ -504,10 +644,18 @@ def raw_page(self): return self app_profiles = proto.RepeatedField( - proto.MESSAGE, number=1, message=gba_instance.AppProfile, + proto.MESSAGE, + number=1, + message=gba_instance.AppProfile, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + failed_locations = proto.RepeatedField( + proto.STRING, + number=3, ) - next_page_token = proto.Field(proto.STRING, number=2,) - failed_locations = proto.RepeatedField(proto.STRING, number=3,) class UpdateAppProfileRequest(proto.Message): @@ -526,11 +674,20 @@ class UpdateAppProfileRequest(proto.Message): the app profile. """ - app_profile = proto.Field(proto.MESSAGE, number=1, message=gba_instance.AppProfile,) + app_profile = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.AppProfile, + ) update_mask = proto.Field( - proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + ignore_warnings = proto.Field( + proto.BOOL, + number=3, ) - ignore_warnings = proto.Field(proto.BOOL, number=3,) class DeleteAppProfileRequest(proto.Message): @@ -546,13 +703,18 @@ class DeleteAppProfileRequest(proto.Message): deleting the app profile. """ - name = proto.Field(proto.STRING, number=1,) - ignore_warnings = proto.Field(proto.BOOL, number=2,) + name = proto.Field( + proto.STRING, + number=1, + ) + ignore_warnings = proto.Field( + proto.BOOL, + number=2, + ) class UpdateAppProfileMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateAppProfile. - """ + r"""The metadata for the Operation returned by UpdateAppProfile.""" class ListHotTabletsRequest(proto.Message): @@ -593,11 +755,28 @@ class ListHotTabletsRequest(proto.Message): call. """ - parent = proto.Field(proto.STRING, number=1,) - start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) - end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) - page_size = proto.Field(proto.INT32, number=4,) - page_token = proto.Field(proto.STRING, number=5,) + parent = proto.Field( + proto.STRING, + number=1, + ) + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + page_size = proto.Field( + proto.INT32, + number=4, + ) + page_token = proto.Field( + proto.STRING, + number=5, + ) class ListHotTabletsResponse(proto.Message): @@ -624,9 +803,14 @@ def raw_page(self): return self hot_tablets = proto.RepeatedField( - proto.MESSAGE, number=1, message=gba_instance.HotTablet, + proto.MESSAGE, + number=1, + message=gba_instance.HotTablet, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, ) - next_page_token = proto.Field(proto.STRING, number=2,) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index ae1c6c91611b..6a366a5e42bc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -84,9 +84,19 @@ class RestoreTableRequest(proto.Message): This field is a member of `oneof`_ ``source``. """ - parent = proto.Field(proto.STRING, number=1,) - table_id = proto.Field(proto.STRING, number=2,) - backup = proto.Field(proto.STRING, number=3, oneof="source",) + parent = proto.Field( + proto.STRING, + number=1, + ) + table_id = proto.Field( + proto.STRING, + number=2, + ) + backup = proto.Field( + proto.STRING, + number=3, + oneof="source", + ) class RestoreTableMetadata(proto.Message): @@ -122,13 +132,30 @@ class RestoreTableMetadata(proto.Message): operation. """ - name = proto.Field(proto.STRING, number=1,) - source_type = proto.Field(proto.ENUM, number=2, enum=gba_table.RestoreSourceType,) + name = proto.Field( + proto.STRING, + number=1, + ) + source_type = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.RestoreSourceType, + ) backup_info = proto.Field( - proto.MESSAGE, number=3, oneof="source_info", message=gba_table.BackupInfo, + proto.MESSAGE, + number=3, + oneof="source_info", + message=gba_table.BackupInfo, + ) + optimize_table_operation_name = proto.Field( + proto.STRING, + number=4, + ) + progress = proto.Field( + proto.MESSAGE, + number=5, + message=common.OperationProgress, ) - optimize_table_operation_name = proto.Field(proto.STRING, number=4,) - progress = proto.Field(proto.MESSAGE, number=5, message=common.OperationProgress,) class OptimizeRestoredTableMetadata(proto.Message): @@ -146,8 +173,15 @@ class OptimizeRestoredTableMetadata(proto.Message): optimizations. """ - name = proto.Field(proto.STRING, number=1,) - progress = proto.Field(proto.MESSAGE, number=2, message=common.OperationProgress,) + name = proto.Field( + proto.STRING, + number=1, + ) + progress = proto.Field( + proto.MESSAGE, + number=2, + message=common.OperationProgress, + ) class CreateTableRequest(proto.Message): @@ -198,12 +232,29 @@ class Split(proto.Message): Row key to use as an initial tablet boundary. """ - key = proto.Field(proto.BYTES, number=1,) + key = proto.Field( + proto.BYTES, + number=1, + ) - parent = proto.Field(proto.STRING, number=1,) - table_id = proto.Field(proto.STRING, number=2,) - table = proto.Field(proto.MESSAGE, number=3, message=gba_table.Table,) - initial_splits = proto.RepeatedField(proto.MESSAGE, number=4, message=Split,) + parent = proto.Field( + proto.STRING, + number=1, + ) + table_id = proto.Field( + proto.STRING, + number=2, + ) + table = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.Table, + ) + initial_splits = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Split, + ) class CreateTableFromSnapshotRequest(proto.Message): @@ -232,9 +283,18 @@ class CreateTableFromSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - parent = proto.Field(proto.STRING, number=1,) - table_id = proto.Field(proto.STRING, number=2,) - source_snapshot = proto.Field(proto.STRING, number=3,) + parent = proto.Field( + proto.STRING, + number=1, + ) + table_id = proto.Field( + proto.STRING, + number=2, + ) + source_snapshot = proto.Field( + proto.STRING, + number=3, + ) class DropRowRangeRequest(proto.Message): @@ -265,9 +325,20 @@ class DropRowRangeRequest(proto.Message): This field is a member of `oneof`_ ``target``. """ - name = proto.Field(proto.STRING, number=1,) - row_key_prefix = proto.Field(proto.BYTES, number=2, oneof="target",) - delete_all_data_from_table = proto.Field(proto.BOOL, number=3, oneof="target",) + name = proto.Field( + proto.STRING, + number=1, + ) + row_key_prefix = proto.Field( + proto.BYTES, + number=2, + oneof="target", + ) + delete_all_data_from_table = proto.Field( + proto.BOOL, + number=3, + oneof="target", + ) class ListTablesRequest(proto.Message): @@ -299,10 +370,23 @@ class ListTablesRequest(proto.Message): call. """ - parent = proto.Field(proto.STRING, number=1,) - view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) - page_size = proto.Field(proto.INT32, number=4,) - page_token = proto.Field(proto.STRING, number=3,) + parent = proto.Field( + proto.STRING, + number=1, + ) + view = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.Table.View, + ) + page_size = proto.Field( + proto.INT32, + number=4, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) class ListTablesResponse(proto.Message): @@ -322,8 +406,15 @@ class ListTablesResponse(proto.Message): def raw_page(self): return self - tables = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Table,) - next_page_token = proto.Field(proto.STRING, number=2,) + tables = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.Table, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) class GetTableRequest(proto.Message): @@ -340,8 +431,15 @@ class GetTableRequest(proto.Message): Defaults to ``SCHEMA_VIEW`` if unspecified. """ - name = proto.Field(proto.STRING, number=1,) - view = proto.Field(proto.ENUM, number=2, enum=gba_table.Table.View,) + name = proto.Field( + proto.STRING, + number=1, + ) + view = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.Table.View, + ) class DeleteTableRequest(proto.Message): @@ -355,7 +453,10 @@ class DeleteTableRequest(proto.Message): ``projects/{project}/instances/{instance}/tables/{table}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class ModifyColumnFamiliesRequest(proto.Message): @@ -408,17 +509,37 @@ class Modification(proto.Message): This field is a member of `oneof`_ ``mod``. """ - id = proto.Field(proto.STRING, number=1,) + id = proto.Field( + proto.STRING, + number=1, + ) create = proto.Field( - proto.MESSAGE, number=2, oneof="mod", message=gba_table.ColumnFamily, + proto.MESSAGE, + number=2, + oneof="mod", + message=gba_table.ColumnFamily, ) update = proto.Field( - proto.MESSAGE, number=3, oneof="mod", message=gba_table.ColumnFamily, + proto.MESSAGE, + number=3, + oneof="mod", + message=gba_table.ColumnFamily, + ) + drop = proto.Field( + proto.BOOL, + number=4, + oneof="mod", ) - drop = proto.Field(proto.BOOL, number=4, oneof="mod",) - name = proto.Field(proto.STRING, number=1,) - modifications = proto.RepeatedField(proto.MESSAGE, number=2, message=Modification,) + name = proto.Field( + proto.STRING, + number=1, + ) + modifications = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=Modification, + ) class GenerateConsistencyTokenRequest(proto.Message): @@ -432,7 +553,10 @@ class GenerateConsistencyTokenRequest(proto.Message): ``projects/{project}/instances/{instance}/tables/{table}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class GenerateConsistencyTokenResponse(proto.Message): @@ -444,7 +568,10 @@ class GenerateConsistencyTokenResponse(proto.Message): The generated consistency token. """ - consistency_token = proto.Field(proto.STRING, number=1,) + consistency_token = proto.Field( + proto.STRING, + number=1, + ) class CheckConsistencyRequest(proto.Message): @@ -461,8 +588,14 @@ class CheckConsistencyRequest(proto.Message): GenerateConsistencyToken for the Table. """ - name = proto.Field(proto.STRING, number=1,) - consistency_token = proto.Field(proto.STRING, number=2,) + name = proto.Field( + proto.STRING, + number=1, + ) + consistency_token = proto.Field( + proto.STRING, + number=2, + ) class CheckConsistencyResponse(proto.Message): @@ -476,7 +609,10 @@ class CheckConsistencyResponse(proto.Message): the restrictions specified in the request. """ - consistent = proto.Field(proto.BOOL, number=1,) + consistent = proto.Field( + proto.BOOL, + number=1, + ) class SnapshotTableRequest(proto.Message): @@ -514,11 +650,27 @@ class SnapshotTableRequest(proto.Message): Description of the snapshot. """ - name = proto.Field(proto.STRING, number=1,) - cluster = proto.Field(proto.STRING, number=2,) - snapshot_id = proto.Field(proto.STRING, number=3,) - ttl = proto.Field(proto.MESSAGE, number=4, message=duration_pb2.Duration,) - description = proto.Field(proto.STRING, number=5,) + name = proto.Field( + proto.STRING, + number=1, + ) + cluster = proto.Field( + proto.STRING, + number=2, + ) + snapshot_id = proto.Field( + proto.STRING, + number=3, + ) + ttl = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + description = proto.Field( + proto.STRING, + number=5, + ) class GetSnapshotRequest(proto.Message): @@ -538,7 +690,10 @@ class GetSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class ListSnapshotsRequest(proto.Message): @@ -567,9 +722,18 @@ class ListSnapshotsRequest(proto.Message): call. """ - parent = proto.Field(proto.STRING, number=1,) - page_size = proto.Field(proto.INT32, number=2,) - page_token = proto.Field(proto.STRING, number=3,) + parent = proto.Field( + proto.STRING, + number=1, + ) + page_size = proto.Field( + proto.INT32, + number=2, + ) + page_token = proto.Field( + proto.STRING, + number=3, + ) class ListSnapshotsResponse(proto.Message): @@ -597,9 +761,14 @@ def raw_page(self): return self snapshots = proto.RepeatedField( - proto.MESSAGE, number=1, message=gba_table.Snapshot, + proto.MESSAGE, + number=1, + message=gba_table.Snapshot, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, ) - next_page_token = proto.Field(proto.STRING, number=2,) class DeleteSnapshotRequest(proto.Message): @@ -619,7 +788,10 @@ class DeleteSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class SnapshotTableMetadata(proto.Message): @@ -643,12 +815,20 @@ class SnapshotTableMetadata(proto.Message): """ original_request = proto.Field( - proto.MESSAGE, number=1, message="SnapshotTableRequest", + proto.MESSAGE, + number=1, + message="SnapshotTableRequest", ) request_time = proto.Field( - proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class CreateTableFromSnapshotMetadata(proto.Message): @@ -673,12 +853,20 @@ class CreateTableFromSnapshotMetadata(proto.Message): """ original_request = proto.Field( - proto.MESSAGE, number=1, message="CreateTableFromSnapshotRequest", + proto.MESSAGE, + number=1, + message="CreateTableFromSnapshotRequest", ) request_time = proto.Field( - proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class CreateBackupRequest(proto.Message): @@ -703,9 +891,19 @@ class CreateBackupRequest(proto.Message): Required. The backup to create. """ - parent = proto.Field(proto.STRING, number=1,) - backup_id = proto.Field(proto.STRING, number=2,) - backup = proto.Field(proto.MESSAGE, number=3, message=gba_table.Backup,) + parent = proto.Field( + proto.STRING, + number=1, + ) + backup_id = proto.Field( + proto.STRING, + number=2, + ) + backup = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.Backup, + ) class CreateBackupMetadata(proto.Message): @@ -725,10 +923,24 @@ class CreateBackupMetadata(proto.Message): finished or was cancelled. """ - name = proto.Field(proto.STRING, number=1,) - source_table = proto.Field(proto.STRING, number=2,) - start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) - end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) + name = proto.Field( + proto.STRING, + number=1, + ) + source_table = proto.Field( + proto.STRING, + number=2, + ) + start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) class UpdateBackupRequest(proto.Message): @@ -752,9 +964,15 @@ class UpdateBackupRequest(proto.Message): accidentally by clients that do not know about them. """ - backup = proto.Field(proto.MESSAGE, number=1, message=gba_table.Backup,) + backup = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.Backup, + ) update_mask = proto.Field( - proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, ) @@ -768,7 +986,10 @@ class GetBackupRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class DeleteBackupRequest(proto.Message): @@ -782,7 +1003,10 @@ class DeleteBackupRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. """ - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) class ListBackupsRequest(proto.Message): @@ -871,11 +1095,26 @@ class ListBackupsRequest(proto.Message): to the same ``parent`` and with the same ``filter``. """ - parent = proto.Field(proto.STRING, number=1,) - filter = proto.Field(proto.STRING, number=2,) - order_by = proto.Field(proto.STRING, number=3,) - page_size = proto.Field(proto.INT32, number=4,) - page_token = proto.Field(proto.STRING, number=5,) + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + order_by = proto.Field( + proto.STRING, + number=3, + ) + page_size = proto.Field( + proto.INT32, + number=4, + ) + page_token = proto.Field( + proto.STRING, + number=5, + ) class ListBackupsResponse(proto.Message): @@ -895,8 +1134,15 @@ class ListBackupsResponse(proto.Message): def raw_page(self): return self - backups = proto.RepeatedField(proto.MESSAGE, number=1, message=gba_table.Backup,) - next_page_token = proto.Field(proto.STRING, number=2,) + backups = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.Backup, + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 5615167e667d..704a07732339 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -19,7 +19,11 @@ __protobuf__ = proto.module( - package="google.bigtable.admin.v2", manifest={"StorageType", "OperationProgress",}, + package="google.bigtable.admin.v2", + manifest={ + "StorageType", + "OperationProgress", + }, ) @@ -45,9 +49,20 @@ class OperationProgress(proto.Message): failed or was completed successfully. """ - progress_percent = proto.Field(proto.INT32, number=1,) - start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) - end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) + progress_percent = proto.Field( + proto.INT32, + number=1, + ) + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 961cba43421b..76733b615107 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -85,12 +85,34 @@ class Type(proto.Enum): PRODUCTION = 1 DEVELOPMENT = 2 - name = proto.Field(proto.STRING, number=1,) - display_name = proto.Field(proto.STRING, number=2,) - state = proto.Field(proto.ENUM, number=3, enum=State,) - type_ = proto.Field(proto.ENUM, number=4, enum=Type,) - labels = proto.MapField(proto.STRING, proto.STRING, number=5,) - create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) + name = proto.Field( + proto.STRING, + number=1, + ) + display_name = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + type_ = proto.Field( + proto.ENUM, + number=4, + enum=Type, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + create_time = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) class AutoscalingTargets(proto.Message): @@ -105,7 +127,10 @@ class AutoscalingTargets(proto.Message): utilization). """ - cpu_utilization_percent = proto.Field(proto.INT32, number=2,) + cpu_utilization_percent = proto.Field( + proto.INT32, + number=2, + ) class AutoscalingLimits(proto.Message): @@ -121,8 +146,14 @@ class AutoscalingLimits(proto.Message): to. """ - min_serve_nodes = proto.Field(proto.INT32, number=1,) - max_serve_nodes = proto.Field(proto.INT32, number=2,) + min_serve_nodes = proto.Field( + proto.INT32, + number=1, + ) + max_serve_nodes = proto.Field( + proto.INT32, + number=2, + ) class Cluster(proto.Message): @@ -183,10 +214,14 @@ class ClusterAutoscalingConfig(proto.Message): """ autoscaling_limits = proto.Field( - proto.MESSAGE, number=1, message="AutoscalingLimits", + proto.MESSAGE, + number=1, + message="AutoscalingLimits", ) autoscaling_targets = proto.Field( - proto.MESSAGE, number=2, message="AutoscalingTargets", + proto.MESSAGE, + number=2, + message="AutoscalingTargets", ) class ClusterConfig(proto.Message): @@ -198,7 +233,9 @@ class ClusterConfig(proto.Message): """ cluster_autoscaling_config = proto.Field( - proto.MESSAGE, number=1, message="Cluster.ClusterAutoscalingConfig", + proto.MESSAGE, + number=1, + message="Cluster.ClusterAutoscalingConfig", ) class EncryptionConfig(proto.Message): @@ -221,17 +258,44 @@ class EncryptionConfig(proto.Message): key. """ - kms_key_name = proto.Field(proto.STRING, number=1,) + kms_key_name = proto.Field( + proto.STRING, + number=1, + ) - name = proto.Field(proto.STRING, number=1,) - location = proto.Field(proto.STRING, number=2,) - state = proto.Field(proto.ENUM, number=3, enum=State,) - serve_nodes = proto.Field(proto.INT32, number=4,) + name = proto.Field( + proto.STRING, + number=1, + ) + location = proto.Field( + proto.STRING, + number=2, + ) + state = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + serve_nodes = proto.Field( + proto.INT32, + number=4, + ) cluster_config = proto.Field( - proto.MESSAGE, number=7, oneof="config", message=ClusterConfig, + proto.MESSAGE, + number=7, + oneof="config", + message=ClusterConfig, + ) + default_storage_type = proto.Field( + proto.ENUM, + number=5, + enum=common.StorageType, + ) + encryption_config = proto.Field( + proto.MESSAGE, + number=6, + message=EncryptionConfig, ) - default_storage_type = proto.Field(proto.ENUM, number=5, enum=common.StorageType,) - encryption_config = proto.Field(proto.MESSAGE, number=6, message=EncryptionConfig,) class AppProfile(proto.Message): @@ -289,7 +353,10 @@ class MultiClusterRoutingUseAny(proto.Message): eligible. """ - cluster_ids = proto.RepeatedField(proto.STRING, number=1,) + cluster_ids = proto.RepeatedField( + proto.STRING, + number=1, + ) class SingleClusterRouting(proto.Message): r"""Unconditionally routes all read/write requests to a specific @@ -307,12 +374,27 @@ class SingleClusterRouting(proto.Message): table/row/column in multiple clusters. """ - cluster_id = proto.Field(proto.STRING, number=1,) - allow_transactional_writes = proto.Field(proto.BOOL, number=2,) + cluster_id = proto.Field( + proto.STRING, + number=1, + ) + allow_transactional_writes = proto.Field( + proto.BOOL, + number=2, + ) - name = proto.Field(proto.STRING, number=1,) - etag = proto.Field(proto.STRING, number=2,) - description = proto.Field(proto.STRING, number=3,) + name = proto.Field( + proto.STRING, + number=1, + ) + etag = proto.Field( + proto.STRING, + number=2, + ) + description = proto.Field( + proto.STRING, + number=3, + ) multi_cluster_routing_use_any = proto.Field( proto.MESSAGE, number=5, @@ -320,7 +402,10 @@ class SingleClusterRouting(proto.Message): message=MultiClusterRoutingUseAny, ) single_cluster_routing = proto.Field( - proto.MESSAGE, number=6, oneof="routing_policy", message=SingleClusterRouting, + proto.MESSAGE, + number=6, + oneof="routing_policy", + message=SingleClusterRouting, ) @@ -357,13 +442,36 @@ class HotTablet(proto.Message): (the node spent all cycles serving the hot tablet). """ - name = proto.Field(proto.STRING, number=1,) - table_name = proto.Field(proto.STRING, number=2,) - start_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) - end_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) - start_key = proto.Field(proto.STRING, number=5,) - end_key = proto.Field(proto.STRING, number=6,) - node_cpu_usage_percent = proto.Field(proto.FLOAT, number=7,) + name = proto.Field( + proto.STRING, + number=1, + ) + table_name = proto.Field( + proto.STRING, + number=2, + ) + start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + start_key = proto.Field( + proto.STRING, + number=5, + ) + end_key = proto.Field( + proto.STRING, + number=6, + ) + node_cpu_usage_percent = proto.Field( + proto.FLOAT, + number=7, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index b99c3a64668b..29b4ce75dffe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -57,9 +57,16 @@ class RestoreInfo(proto.Message): This field is a member of `oneof`_ ``source_info``. """ - source_type = proto.Field(proto.ENUM, number=1, enum="RestoreSourceType",) + source_type = proto.Field( + proto.ENUM, + number=1, + enum="RestoreSourceType", + ) backup_info = proto.Field( - proto.MESSAGE, number=2, oneof="source_info", message="BackupInfo", + proto.MESSAGE, + number=2, + oneof="source_info", + message="BackupInfo", ) @@ -142,21 +149,42 @@ class ReplicationState(proto.Enum): READY_OPTIMIZING = 5 replication_state = proto.Field( - proto.ENUM, number=1, enum="Table.ClusterState.ReplicationState", + proto.ENUM, + number=1, + enum="Table.ClusterState.ReplicationState", ) encryption_info = proto.RepeatedField( - proto.MESSAGE, number=2, message="EncryptionInfo", + proto.MESSAGE, + number=2, + message="EncryptionInfo", ) - name = proto.Field(proto.STRING, number=1,) + name = proto.Field( + proto.STRING, + number=1, + ) cluster_states = proto.MapField( - proto.STRING, proto.MESSAGE, number=2, message=ClusterState, + proto.STRING, + proto.MESSAGE, + number=2, + message=ClusterState, ) column_families = proto.MapField( - proto.STRING, proto.MESSAGE, number=3, message="ColumnFamily", + proto.STRING, + proto.MESSAGE, + number=3, + message="ColumnFamily", + ) + granularity = proto.Field( + proto.ENUM, + number=4, + enum=TimestampGranularity, + ) + restore_info = proto.Field( + proto.MESSAGE, + number=6, + message="RestoreInfo", ) - granularity = proto.Field(proto.ENUM, number=4, enum=TimestampGranularity,) - restore_info = proto.Field(proto.MESSAGE, number=6, message="RestoreInfo",) class ColumnFamily(proto.Message): @@ -173,7 +201,11 @@ class ColumnFamily(proto.Message): matches the active GC expression for its family. """ - gc_rule = proto.Field(proto.MESSAGE, number=1, message="GcRule",) + gc_rule = proto.Field( + proto.MESSAGE, + number=1, + message="GcRule", + ) class GcRule(proto.Message): @@ -221,7 +253,11 @@ class Intersection(proto.Message): ``rules``. """ - rules = proto.RepeatedField(proto.MESSAGE, number=1, message="GcRule",) + rules = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="GcRule", + ) class Union(proto.Message): r"""A GcRule which deletes cells matching any of the given rules. @@ -232,16 +268,35 @@ class Union(proto.Message): ``rules``. """ - rules = proto.RepeatedField(proto.MESSAGE, number=1, message="GcRule",) + rules = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="GcRule", + ) - max_num_versions = proto.Field(proto.INT32, number=1, oneof="rule",) + max_num_versions = proto.Field( + proto.INT32, + number=1, + oneof="rule", + ) max_age = proto.Field( - proto.MESSAGE, number=2, oneof="rule", message=duration_pb2.Duration, + proto.MESSAGE, + number=2, + oneof="rule", + message=duration_pb2.Duration, ) intersection = proto.Field( - proto.MESSAGE, number=3, oneof="rule", message=Intersection, + proto.MESSAGE, + number=3, + oneof="rule", + message=Intersection, + ) + union = proto.Field( + proto.MESSAGE, + number=4, + oneof="rule", + message=Union, ) - union = proto.Field(proto.MESSAGE, number=4, oneof="rule", message=Union,) class EncryptionInfo(proto.Message): @@ -271,9 +326,20 @@ class EncryptionType(proto.Enum): GOOGLE_DEFAULT_ENCRYPTION = 1 CUSTOMER_MANAGED_ENCRYPTION = 2 - encryption_type = proto.Field(proto.ENUM, number=3, enum=EncryptionType,) - encryption_status = proto.Field(proto.MESSAGE, number=4, message=status_pb2.Status,) - kms_key_version = proto.Field(proto.STRING, number=2,) + encryption_type = proto.Field( + proto.ENUM, + number=3, + enum=EncryptionType, + ) + encryption_status = proto.Field( + proto.MESSAGE, + number=4, + message=status_pb2.Status, + ) + kms_key_version = proto.Field( + proto.STRING, + number=2, + ) class Snapshot(proto.Message): @@ -322,13 +388,38 @@ class State(proto.Enum): READY = 1 CREATING = 2 - name = proto.Field(proto.STRING, number=1,) - source_table = proto.Field(proto.MESSAGE, number=2, message="Table",) - data_size_bytes = proto.Field(proto.INT64, number=3,) - create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) - delete_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) - state = proto.Field(proto.ENUM, number=6, enum=State,) - description = proto.Field(proto.STRING, number=7,) + name = proto.Field( + proto.STRING, + number=1, + ) + source_table = proto.Field( + proto.MESSAGE, + number=2, + message="Table", + ) + data_size_bytes = proto.Field( + proto.INT64, + number=3, + ) + create_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + delete_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + state = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + description = proto.Field( + proto.STRING, + number=7, + ) class Backup(proto.Message): @@ -381,14 +472,43 @@ class State(proto.Enum): CREATING = 1 READY = 2 - name = proto.Field(proto.STRING, number=1,) - source_table = proto.Field(proto.STRING, number=2,) - expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) - start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,) - end_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) - size_bytes = proto.Field(proto.INT64, number=6,) - state = proto.Field(proto.ENUM, number=7, enum=State,) - encryption_info = proto.Field(proto.MESSAGE, number=9, message="EncryptionInfo",) + name = proto.Field( + proto.STRING, + number=1, + ) + source_table = proto.Field( + proto.STRING, + number=2, + ) + expire_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + size_bytes = proto.Field( + proto.INT64, + number=6, + ) + state = proto.Field( + proto.ENUM, + number=7, + enum=State, + ) + encryption_info = proto.Field( + proto.MESSAGE, + number=9, + message="EncryptionInfo", + ) class BackupInfo(proto.Message): @@ -410,10 +530,24 @@ class BackupInfo(proto.Message): created from. """ - backup = proto.Field(proto.STRING, number=1,) - start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) - end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) - source_table = proto.Field(proto.STRING, number=4,) + backup = proto.Field( + proto.STRING, + number=1, + ) + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + source_table = proto.Field( + proto.STRING, + number=4, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index cf40edc6c7a8..6c9986f780b6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -293,7 +293,12 @@ def read_rows( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -383,7 +388,12 @@ def sample_row_keys( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -504,7 +514,12 @@ async def mutate_row( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -609,7 +624,12 @@ def mutate_rows( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -758,7 +778,12 @@ async def check_and_mutate_row( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -845,7 +870,12 @@ async def ping_and_warm( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -963,7 +993,12 @@ async def read_modify_write_row( ) # Send the request. - response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -977,7 +1012,9 @@ async def __aexit__(self, exc_type, exc, tb): try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable", + ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index e221fefb5458..c62875f3aec6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -53,7 +53,10 @@ class BigtableClientMeta(type): _transport_registry["grpc"] = BigtableGrpcTransport _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport - def get_transport_class(cls, label: str = None,) -> Type[BigtableTransport]: + def get_transport_class( + cls, + label: str = None, + ) -> Type[BigtableTransport]: """Returns an appropriate transport class. Args: @@ -160,10 +163,14 @@ def transport(self) -> BigtableTransport: return self._transport @staticmethod - def instance_path(project: str, instance: str,) -> str: + def instance_path( + project: str, + instance: str, + ) -> str: """Returns a fully-qualified instance string.""" return "projects/{project}/instances/{instance}".format( - project=project, instance=instance, + project=project, + instance=instance, ) @staticmethod @@ -173,10 +180,16 @@ def parse_instance_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def table_path(project: str, instance: str, table: str,) -> str: + def table_path( + project: str, + instance: str, + table: str, + ) -> str: """Returns a fully-qualified table string.""" return "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, instance=instance, table=table, + project=project, + instance=instance, + table=table, ) @staticmethod @@ -189,7 +202,9 @@ def parse_table_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_billing_account_path(billing_account: str,) -> str: + def common_billing_account_path( + billing_account: str, + ) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, @@ -202,9 +217,13 @@ def parse_common_billing_account_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_folder_path(folder: str,) -> str: + def common_folder_path( + folder: str, + ) -> str: """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder,) + return "folders/{folder}".format( + folder=folder, + ) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: @@ -213,9 +232,13 @@ def parse_common_folder_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_organization_path(organization: str,) -> str: + def common_organization_path( + organization: str, + ) -> str: """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization,) + return "organizations/{organization}".format( + organization=organization, + ) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: @@ -224,9 +247,13 @@ def parse_common_organization_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_project_path(project: str,) -> str: + def common_project_path( + project: str, + ) -> str: """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project,) + return "projects/{project}".format( + project=project, + ) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: @@ -235,10 +262,14 @@ def parse_common_project_path(path: str) -> Dict[str, str]: return m.groupdict() if m else {} @staticmethod - def common_location_path(project: str, location: str,) -> str: + def common_location_path( + project: str, + location: str, + ) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( - project=project, location=location, + project=project, + location=location, ) @staticmethod @@ -507,7 +538,12 @@ def read_rows( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -606,7 +642,12 @@ def sample_row_keys( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -726,7 +767,12 @@ def mutate_row( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -840,7 +886,12 @@ def mutate_rows( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -998,7 +1049,12 @@ def check_and_mutate_row( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1094,7 +1150,12 @@ def ping_and_warm( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1221,7 +1282,12 @@ def read_modify_write_row( ) # Send the request. - response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) # Done; return the response. return response @@ -1242,7 +1308,9 @@ def __exit__(self, type, value, traceback): try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable", + ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 1ced17ddc35b..6a5e0eca7139 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -29,7 +29,9 @@ try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution("google-cloud-bigtable",).version, + gapic_version=pkg_resources.get_distribution( + "google-cloud-bigtable", + ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() @@ -126,10 +128,14 @@ def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.read_rows: gapic_v1.method.wrap_method( - self.read_rows, default_timeout=43200.0, client_info=client_info, + self.read_rows, + default_timeout=43200.0, + client_info=client_info, ), self.sample_row_keys: gapic_v1.method.wrap_method( - self.sample_row_keys, default_timeout=60.0, client_info=client_info, + self.sample_row_keys, + default_timeout=60.0, + client_info=client_info, ), self.mutate_row: gapic_v1.method.wrap_method( self.mutate_row, @@ -147,7 +153,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.mutate_rows: gapic_v1.method.wrap_method( - self.mutate_rows, default_timeout=600.0, client_info=client_info, + self.mutate_rows, + default_timeout=600.0, + client_info=client_info, ), self.check_and_mutate_row: gapic_v1.method.wrap_method( self.check_and_mutate_row, @@ -155,7 +163,9 @@ def _prep_wrapped_messages(self, client_info): client_info=client_info, ), self.ping_and_warm: gapic_v1.method.wrap_method( - self.ping_and_warm, default_timeout=None, client_info=client_info, + self.ping_and_warm, + default_timeout=None, + client_info=client_info, ), self.read_modify_write_row: gapic_v1.method.wrap_method( self.read_modify_write_row, @@ -167,9 +177,9 @@ def _prep_wrapped_messages(self, client_info): def close(self): """Closes resources associated with the transport. - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! """ raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 089cab726f90..3c836cad209d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -225,8 +225,7 @@ def create_channel( @property def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ + """Return the channel designed to connect to this service.""" return self._grpc_channel @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 58fc457038ee..72785c264b03 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -67,11 +67,28 @@ class ReadRowsRequest(proto.Message): return all results. """ - table_name = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=5,) - rows = proto.Field(proto.MESSAGE, number=2, message=data.RowSet,) - filter = proto.Field(proto.MESSAGE, number=3, message=data.RowFilter,) - rows_limit = proto.Field(proto.INT64, number=4,) + table_name = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=5, + ) + rows = proto.Field( + proto.MESSAGE, + number=2, + message=data.RowSet, + ) + filter = proto.Field( + proto.MESSAGE, + number=3, + message=data.RowFilter, + ) + rows_limit = proto.Field( + proto.INT64, + number=4, + ) class ReadRowsResponse(proto.Message): @@ -165,22 +182,56 @@ class CellChunk(proto.Message): This field is a member of `oneof`_ ``row_status``. """ - row_key = proto.Field(proto.BYTES, number=1,) + row_key = proto.Field( + proto.BYTES, + number=1, + ) family_name = proto.Field( - proto.MESSAGE, number=2, message=wrappers_pb2.StringValue, + proto.MESSAGE, + number=2, + message=wrappers_pb2.StringValue, ) qualifier = proto.Field( - proto.MESSAGE, number=3, message=wrappers_pb2.BytesValue, + proto.MESSAGE, + number=3, + message=wrappers_pb2.BytesValue, + ) + timestamp_micros = proto.Field( + proto.INT64, + number=4, + ) + labels = proto.RepeatedField( + proto.STRING, + number=5, + ) + value = proto.Field( + proto.BYTES, + number=6, + ) + value_size = proto.Field( + proto.INT32, + number=7, + ) + reset_row = proto.Field( + proto.BOOL, + number=8, + oneof="row_status", + ) + commit_row = proto.Field( + proto.BOOL, + number=9, + oneof="row_status", ) - timestamp_micros = proto.Field(proto.INT64, number=4,) - labels = proto.RepeatedField(proto.STRING, number=5,) - value = proto.Field(proto.BYTES, number=6,) - value_size = proto.Field(proto.INT32, number=7,) - reset_row = proto.Field(proto.BOOL, number=8, oneof="row_status",) - commit_row = proto.Field(proto.BOOL, number=9, oneof="row_status",) - chunks = proto.RepeatedField(proto.MESSAGE, number=1, message=CellChunk,) - last_scanned_row_key = proto.Field(proto.BYTES, number=2,) + chunks = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=CellChunk, + ) + last_scanned_row_key = proto.Field( + proto.BYTES, + number=2, + ) class SampleRowKeysRequest(proto.Message): @@ -197,8 +248,14 @@ class SampleRowKeysRequest(proto.Message): profile will be used. """ - table_name = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=2,) + table_name = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=2, + ) class SampleRowKeysResponse(proto.Message): @@ -225,8 +282,14 @@ class SampleRowKeysResponse(proto.Message): fields. """ - row_key = proto.Field(proto.BYTES, number=1,) - offset_bytes = proto.Field(proto.INT64, number=2,) + row_key = proto.Field( + proto.BYTES, + number=1, + ) + offset_bytes = proto.Field( + proto.INT64, + number=2, + ) class MutateRowRequest(proto.Message): @@ -252,15 +315,27 @@ class MutateRowRequest(proto.Message): at most 100000. """ - table_name = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=4,) - row_key = proto.Field(proto.BYTES, number=2,) - mutations = proto.RepeatedField(proto.MESSAGE, number=3, message=data.Mutation,) + table_name = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=4, + ) + row_key = proto.Field( + proto.BYTES, + number=2, + ) + mutations = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=data.Mutation, + ) class MutateRowResponse(proto.Message): - r"""Response message for Bigtable.MutateRow. - """ + r"""Response message for Bigtable.MutateRow.""" class MutateRowsRequest(proto.Message): @@ -299,12 +374,29 @@ class Entry(proto.Message): You must specify at least one mutation. """ - row_key = proto.Field(proto.BYTES, number=1,) - mutations = proto.RepeatedField(proto.MESSAGE, number=2, message=data.Mutation,) + row_key = proto.Field( + proto.BYTES, + number=1, + ) + mutations = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=data.Mutation, + ) - table_name = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=3,) - entries = proto.RepeatedField(proto.MESSAGE, number=2, message=Entry,) + table_name = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=3, + ) + entries = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=Entry, + ) class MutateRowsResponse(proto.Message): @@ -332,10 +424,21 @@ class Entry(proto.Message): will be reported for both entries. """ - index = proto.Field(proto.INT64, number=1,) - status = proto.Field(proto.MESSAGE, number=2, message=status_pb2.Status,) + index = proto.Field( + proto.INT64, + number=1, + ) + status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) - entries = proto.RepeatedField(proto.MESSAGE, number=1, message=Entry,) + entries = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=Entry, + ) class CheckAndMutateRowRequest(proto.Message): @@ -376,15 +479,32 @@ class CheckAndMutateRowRequest(proto.Message): most 100000. """ - table_name = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=7,) - row_key = proto.Field(proto.BYTES, number=2,) - predicate_filter = proto.Field(proto.MESSAGE, number=6, message=data.RowFilter,) + table_name = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=7, + ) + row_key = proto.Field( + proto.BYTES, + number=2, + ) + predicate_filter = proto.Field( + proto.MESSAGE, + number=6, + message=data.RowFilter, + ) true_mutations = proto.RepeatedField( - proto.MESSAGE, number=4, message=data.Mutation, + proto.MESSAGE, + number=4, + message=data.Mutation, ) false_mutations = proto.RepeatedField( - proto.MESSAGE, number=5, message=data.Mutation, + proto.MESSAGE, + number=5, + message=data.Mutation, ) @@ -397,7 +517,10 @@ class CheckAndMutateRowResponse(proto.Message): any results for the specified row. """ - predicate_matched = proto.Field(proto.BOOL, number=1,) + predicate_matched = proto.Field( + proto.BOOL, + number=1, + ) class PingAndWarmRequest(proto.Message): @@ -414,8 +537,14 @@ class PingAndWarmRequest(proto.Message): profile will be used. """ - name = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=2,) + name = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=2, + ) class PingAndWarmResponse(proto.Message): @@ -449,11 +578,22 @@ class ReadModifyWriteRowRequest(proto.Message): later ones. """ - table_name = proto.Field(proto.STRING, number=1,) - app_profile_id = proto.Field(proto.STRING, number=4,) - row_key = proto.Field(proto.BYTES, number=2,) + table_name = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id = proto.Field( + proto.STRING, + number=4, + ) + row_key = proto.Field( + proto.BYTES, + number=2, + ) rules = proto.RepeatedField( - proto.MESSAGE, number=3, message=data.ReadModifyWriteRule, + proto.MESSAGE, + number=3, + message=data.ReadModifyWriteRule, ) @@ -466,7 +606,11 @@ class ReadModifyWriteRowResponse(proto.Message): cells modified by the request. """ - row = proto.Field(proto.MESSAGE, number=1, message=data.Row,) + row = proto.Field( + proto.MESSAGE, + number=1, + message=data.Row, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index c81358f07369..bd45a62d3238 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -53,8 +53,15 @@ class Row(proto.Message): not specified. """ - key = proto.Field(proto.BYTES, number=1,) - families = proto.RepeatedField(proto.MESSAGE, number=2, message="Family",) + key = proto.Field( + proto.BYTES, + number=1, + ) + families = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Family", + ) class Family(proto.Message): @@ -75,8 +82,15 @@ class Family(proto.Message): increasing "qualifier". """ - name = proto.Field(proto.STRING, number=1,) - columns = proto.RepeatedField(proto.MESSAGE, number=2, message="Column",) + name = proto.Field( + proto.STRING, + number=1, + ) + columns = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Column", + ) class Column(proto.Message): @@ -96,8 +110,15 @@ class Column(proto.Message): "timestamp_micros". """ - qualifier = proto.Field(proto.BYTES, number=1,) - cells = proto.RepeatedField(proto.MESSAGE, number=2, message="Cell",) + qualifier = proto.Field( + proto.BYTES, + number=1, + ) + cells = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Cell", + ) class Cell(proto.Message): @@ -122,9 +143,18 @@ class Cell(proto.Message): [RowFilter][google.bigtable.v2.RowFilter]. """ - timestamp_micros = proto.Field(proto.INT64, number=1,) - value = proto.Field(proto.BYTES, number=2,) - labels = proto.RepeatedField(proto.STRING, number=3,) + timestamp_micros = proto.Field( + proto.INT64, + number=1, + ) + value = proto.Field( + proto.BYTES, + number=2, + ) + labels = proto.RepeatedField( + proto.STRING, + number=3, + ) class RowRange(proto.Message): @@ -160,10 +190,26 @@ class RowRange(proto.Message): This field is a member of `oneof`_ ``end_key``. """ - start_key_closed = proto.Field(proto.BYTES, number=1, oneof="start_key",) - start_key_open = proto.Field(proto.BYTES, number=2, oneof="start_key",) - end_key_open = proto.Field(proto.BYTES, number=3, oneof="end_key",) - end_key_closed = proto.Field(proto.BYTES, number=4, oneof="end_key",) + start_key_closed = proto.Field( + proto.BYTES, + number=1, + oneof="start_key", + ) + start_key_open = proto.Field( + proto.BYTES, + number=2, + oneof="start_key", + ) + end_key_open = proto.Field( + proto.BYTES, + number=3, + oneof="end_key", + ) + end_key_closed = proto.Field( + proto.BYTES, + number=4, + oneof="end_key", + ) class RowSet(proto.Message): @@ -176,8 +222,15 @@ class RowSet(proto.Message): Contiguous row ranges included in the set. """ - row_keys = proto.RepeatedField(proto.BYTES, number=1,) - row_ranges = proto.RepeatedField(proto.MESSAGE, number=2, message="RowRange",) + row_keys = proto.RepeatedField( + proto.BYTES, + number=1, + ) + row_ranges = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="RowRange", + ) class ColumnRange(proto.Message): @@ -219,13 +272,30 @@ class ColumnRange(proto.Message): This field is a member of `oneof`_ ``end_qualifier``. """ - family_name = proto.Field(proto.STRING, number=1,) + family_name = proto.Field( + proto.STRING, + number=1, + ) start_qualifier_closed = proto.Field( - proto.BYTES, number=2, oneof="start_qualifier", + proto.BYTES, + number=2, + oneof="start_qualifier", + ) + start_qualifier_open = proto.Field( + proto.BYTES, + number=3, + oneof="start_qualifier", + ) + end_qualifier_closed = proto.Field( + proto.BYTES, + number=4, + oneof="end_qualifier", + ) + end_qualifier_open = proto.Field( + proto.BYTES, + number=5, + oneof="end_qualifier", ) - start_qualifier_open = proto.Field(proto.BYTES, number=3, oneof="start_qualifier",) - end_qualifier_closed = proto.Field(proto.BYTES, number=4, oneof="end_qualifier",) - end_qualifier_open = proto.Field(proto.BYTES, number=5, oneof="end_qualifier",) class TimestampRange(proto.Message): @@ -240,8 +310,14 @@ class TimestampRange(proto.Message): interpreted as infinity. """ - start_timestamp_micros = proto.Field(proto.INT64, number=1,) - end_timestamp_micros = proto.Field(proto.INT64, number=2,) + start_timestamp_micros = proto.Field( + proto.INT64, + number=1, + ) + end_timestamp_micros = proto.Field( + proto.INT64, + number=2, + ) class ValueRange(proto.Message): @@ -277,10 +353,26 @@ class ValueRange(proto.Message): This field is a member of `oneof`_ ``end_value``. """ - start_value_closed = proto.Field(proto.BYTES, number=1, oneof="start_value",) - start_value_open = proto.Field(proto.BYTES, number=2, oneof="start_value",) - end_value_closed = proto.Field(proto.BYTES, number=3, oneof="end_value",) - end_value_open = proto.Field(proto.BYTES, number=4, oneof="end_value",) + start_value_closed = proto.Field( + proto.BYTES, + number=1, + oneof="start_value", + ) + start_value_open = proto.Field( + proto.BYTES, + number=2, + oneof="start_value", + ) + end_value_closed = proto.Field( + proto.BYTES, + number=3, + oneof="end_value", + ) + end_value_open = proto.Field( + proto.BYTES, + number=4, + oneof="end_value", + ) class RowFilter(proto.Message): @@ -546,7 +638,11 @@ class Chain(proto.Message): atomically. """ - filters = proto.RepeatedField(proto.MESSAGE, number=1, message="RowFilter",) + filters = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="RowFilter", + ) class Interleave(proto.Message): r"""A RowFilter which sends each row to each of several component @@ -584,7 +680,11 @@ class Interleave(proto.Message): All interleaved filters are executed atomically. """ - filters = proto.RepeatedField(proto.MESSAGE, number=1, message="RowFilter",) + filters = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="RowFilter", + ) class Condition(proto.Message): r"""A RowFilter which evaluates one of two possible RowFilters, @@ -611,37 +711,123 @@ class Condition(proto.Message): will be returned in the false case. """ - predicate_filter = proto.Field(proto.MESSAGE, number=1, message="RowFilter",) - true_filter = proto.Field(proto.MESSAGE, number=2, message="RowFilter",) - false_filter = proto.Field(proto.MESSAGE, number=3, message="RowFilter",) - - chain = proto.Field(proto.MESSAGE, number=1, oneof="filter", message=Chain,) + predicate_filter = proto.Field( + proto.MESSAGE, + number=1, + message="RowFilter", + ) + true_filter = proto.Field( + proto.MESSAGE, + number=2, + message="RowFilter", + ) + false_filter = proto.Field( + proto.MESSAGE, + number=3, + message="RowFilter", + ) + + chain = proto.Field( + proto.MESSAGE, + number=1, + oneof="filter", + message=Chain, + ) interleave = proto.Field( - proto.MESSAGE, number=2, oneof="filter", message=Interleave, - ) - condition = proto.Field(proto.MESSAGE, number=3, oneof="filter", message=Condition,) - sink = proto.Field(proto.BOOL, number=16, oneof="filter",) - pass_all_filter = proto.Field(proto.BOOL, number=17, oneof="filter",) - block_all_filter = proto.Field(proto.BOOL, number=18, oneof="filter",) - row_key_regex_filter = proto.Field(proto.BYTES, number=4, oneof="filter",) - row_sample_filter = proto.Field(proto.DOUBLE, number=14, oneof="filter",) - family_name_regex_filter = proto.Field(proto.STRING, number=5, oneof="filter",) - column_qualifier_regex_filter = proto.Field(proto.BYTES, number=6, oneof="filter",) + proto.MESSAGE, + number=2, + oneof="filter", + message=Interleave, + ) + condition = proto.Field( + proto.MESSAGE, + number=3, + oneof="filter", + message=Condition, + ) + sink = proto.Field( + proto.BOOL, + number=16, + oneof="filter", + ) + pass_all_filter = proto.Field( + proto.BOOL, + number=17, + oneof="filter", + ) + block_all_filter = proto.Field( + proto.BOOL, + number=18, + oneof="filter", + ) + row_key_regex_filter = proto.Field( + proto.BYTES, + number=4, + oneof="filter", + ) + row_sample_filter = proto.Field( + proto.DOUBLE, + number=14, + oneof="filter", + ) + family_name_regex_filter = proto.Field( + proto.STRING, + number=5, + oneof="filter", + ) + column_qualifier_regex_filter = proto.Field( + proto.BYTES, + number=6, + oneof="filter", + ) column_range_filter = proto.Field( - proto.MESSAGE, number=7, oneof="filter", message="ColumnRange", + proto.MESSAGE, + number=7, + oneof="filter", + message="ColumnRange", ) timestamp_range_filter = proto.Field( - proto.MESSAGE, number=8, oneof="filter", message="TimestampRange", + proto.MESSAGE, + number=8, + oneof="filter", + message="TimestampRange", + ) + value_regex_filter = proto.Field( + proto.BYTES, + number=9, + oneof="filter", ) - value_regex_filter = proto.Field(proto.BYTES, number=9, oneof="filter",) value_range_filter = proto.Field( - proto.MESSAGE, number=15, oneof="filter", message="ValueRange", + proto.MESSAGE, + number=15, + oneof="filter", + message="ValueRange", + ) + cells_per_row_offset_filter = proto.Field( + proto.INT32, + number=10, + oneof="filter", + ) + cells_per_row_limit_filter = proto.Field( + proto.INT32, + number=11, + oneof="filter", + ) + cells_per_column_limit_filter = proto.Field( + proto.INT32, + number=12, + oneof="filter", + ) + strip_value_transformer = proto.Field( + proto.BOOL, + number=13, + oneof="filter", + ) + apply_label_transformer = proto.Field( + proto.STRING, + number=19, + oneof="filter", ) - cells_per_row_offset_filter = proto.Field(proto.INT32, number=10, oneof="filter",) - cells_per_row_limit_filter = proto.Field(proto.INT32, number=11, oneof="filter",) - cells_per_column_limit_filter = proto.Field(proto.INT32, number=12, oneof="filter",) - strip_value_transformer = proto.Field(proto.BOOL, number=13, oneof="filter",) - apply_label_transformer = proto.Field(proto.STRING, number=19, oneof="filter",) class Mutation(proto.Message): @@ -698,10 +884,22 @@ class SetCell(proto.Message): cell. """ - family_name = proto.Field(proto.STRING, number=1,) - column_qualifier = proto.Field(proto.BYTES, number=2,) - timestamp_micros = proto.Field(proto.INT64, number=3,) - value = proto.Field(proto.BYTES, number=4,) + family_name = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier = proto.Field( + proto.BYTES, + number=2, + ) + timestamp_micros = proto.Field( + proto.INT64, + number=3, + ) + value = proto.Field( + proto.BYTES, + number=4, + ) class DeleteFromColumn(proto.Message): r"""A Mutation which deletes cells from the specified column, @@ -720,9 +918,19 @@ class DeleteFromColumn(proto.Message): should be deleted. """ - family_name = proto.Field(proto.STRING, number=1,) - column_qualifier = proto.Field(proto.BYTES, number=2,) - time_range = proto.Field(proto.MESSAGE, number=3, message="TimestampRange",) + family_name = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier = proto.Field( + proto.BYTES, + number=2, + ) + time_range = proto.Field( + proto.MESSAGE, + number=3, + message="TimestampRange", + ) class DeleteFromFamily(proto.Message): r"""A Mutation which deletes all cells from the specified column @@ -734,21 +942,37 @@ class DeleteFromFamily(proto.Message): Must match ``[-_.a-zA-Z0-9]+`` """ - family_name = proto.Field(proto.STRING, number=1,) + family_name = proto.Field( + proto.STRING, + number=1, + ) class DeleteFromRow(proto.Message): - r"""A Mutation which deletes all cells from the containing row. - """ + r"""A Mutation which deletes all cells from the containing row.""" - set_cell = proto.Field(proto.MESSAGE, number=1, oneof="mutation", message=SetCell,) + set_cell = proto.Field( + proto.MESSAGE, + number=1, + oneof="mutation", + message=SetCell, + ) delete_from_column = proto.Field( - proto.MESSAGE, number=2, oneof="mutation", message=DeleteFromColumn, + proto.MESSAGE, + number=2, + oneof="mutation", + message=DeleteFromColumn, ) delete_from_family = proto.Field( - proto.MESSAGE, number=3, oneof="mutation", message=DeleteFromFamily, + proto.MESSAGE, + number=3, + oneof="mutation", + message=DeleteFromFamily, ) delete_from_row = proto.Field( - proto.MESSAGE, number=4, oneof="mutation", message=DeleteFromRow, + proto.MESSAGE, + number=4, + oneof="mutation", + message=DeleteFromRow, ) @@ -788,10 +1012,24 @@ class ReadModifyWriteRule(proto.Message): This field is a member of `oneof`_ ``rule``. """ - family_name = proto.Field(proto.STRING, number=1,) - column_qualifier = proto.Field(proto.BYTES, number=2,) - append_value = proto.Field(proto.BYTES, number=3, oneof="rule",) - increment_amount = proto.Field(proto.INT64, number=4, oneof="rule",) + family_name = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier = proto.Field( + proto.BYTES, + number=2, + ) + append_value = proto.Field( + proto.BYTES, + number=3, + oneof="rule", + ) + increment_amount = proto.Field( + proto.INT64, + number=4, + oneof="rule", + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 6ae044f00c8a..73ebd799cca5 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -24,7 +24,7 @@ import nox -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" @@ -59,7 +59,9 @@ def lint(session): """ session.install("flake8", BLACK_VERSION) session.run( - "black", "--check", *BLACK_PATHS, + "black", + "--check", + *BLACK_PATHS, ) session.run("flake8", "google", "tests") @@ -69,7 +71,8 @@ def blacken(session): """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) session.run( - "black", *BLACK_PATHS, + "black", + *BLACK_PATHS, ) diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index d9d4d1469bec..05df27cdb0fc 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 4c808af73ea2..949e0fde9ae1 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -29,7 +29,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING -BLACK_VERSION = "black==19.10b0" +BLACK_VERSION = "black==22.3.0" # Copy `noxfile_config.py` to your directory and modify it instead. diff --git a/packages/google-cloud-bigtable/tests/system/_helpers.py b/packages/google-cloud-bigtable/tests/system/_helpers.py index ab4b54b05b1b..95261879e66e 100644 --- a/packages/google-cloud-bigtable/tests/system/_helpers.py +++ b/packages/google-cloud-bigtable/tests/system/_helpers.py @@ -33,7 +33,9 @@ def _retry_on_unavailable(exc): retry_grpc_unavailable = retry.RetryErrors( - core_exceptions.GrpcRendezvous, error_predicate=_retry_on_unavailable, max_tries=9, + core_exceptions.GrpcRendezvous, + error_predicate=_retry_on_unavailable, + max_tries=9, ) diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index fdf111a53d93..23052f4f44bd 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -103,7 +103,9 @@ def admin_instance(admin_client, admin_instance_id, instance_labels): @pytest.fixture(scope="session") def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): return admin_instance.cluster( - admin_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + admin_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, ) @@ -169,7 +171,9 @@ def data_instance_populated( # See: https://cloud.google.com/bigtable/docs/emulator if not in_emulator: cluster = instance.cluster( - data_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + data_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, ) operation = instance.create(clusters=[cluster]) operation.result(timeout=30) diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index 36b61d6ddf6b..8c09f6d8711b 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -150,7 +150,9 @@ def test_instance_create_prod( alt_cluster_id = f"{alt_instance_id}-cluster" serve_nodes = 1 cluster = instance.cluster( - alt_cluster_id, location_id=location_id, serve_nodes=serve_nodes, + alt_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, ) operation = instance.create(clusters=[cluster]) @@ -532,7 +534,10 @@ def test_instance_update_w_type( labels=instance_labels, ) alt_cluster_id = f"{alt_instance_id}-cluster" - cluster = instance.cluster(alt_cluster_id, location_id=location_id,) + cluster = instance.cluster( + alt_cluster_id, + location_id=location_id, + ) operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) @@ -560,7 +565,9 @@ def test_cluster_exists_miss(admin_instance_populated, skip_on_emulator): def test_cluster_create( - admin_instance_populated, admin_instance_id, skip_on_emulator, + admin_instance_populated, + admin_instance_id, + skip_on_emulator, ): alt_cluster_id = f"{admin_instance_id}-c2" alt_location_id = "us-central1-f" @@ -592,7 +599,9 @@ def test_cluster_create( def test_cluster_create_w_autoscaling( - admin_instance_populated, admin_instance_id, skip_on_emulator, + admin_instance_populated, + admin_instance_id, + skip_on_emulator, ): alt_cluster_id = f"{admin_instance_id}-c2" alt_location_id = "us-central1-f" diff --git a/packages/google-cloud-bigtable/tests/system/test_table_admin.py b/packages/google-cloud-bigtable/tests/system/test_table_admin.py index 1ed540d632fd..92283d328365 100644 --- a/packages/google-cloud-bigtable/tests/system/test_table_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_table_admin.py @@ -94,7 +94,8 @@ def test_table_create(data_instance_populated, shared_table, tables_to_delete): def test_table_create_w_families( - data_instance_populated, tables_to_delete, + data_instance_populated, + tables_to_delete, ): from google.cloud.bigtable.column_family import MaxVersionsGCRule @@ -236,7 +237,9 @@ def test_table_set_iam_policy( def test_table_test_iam_permissions( - data_instance_populated, tables_to_delete, skip_on_emulator, + data_instance_populated, + tables_to_delete, + skip_on_emulator, ): temp_table_id = "test-test-iam-policy-table" temp_table = data_instance_populated.table(temp_table_id) @@ -337,7 +340,9 @@ def test_table_backup( alt_cluster_id = f"{alt_instance_id}-cluster" alt_instance = admin_client.instance(alt_instance_id, labels=instance_labels) alt_cluster = alt_instance.cluster( - cluster_id=alt_cluster_id, location_id=location_id, serve_nodes=1, + cluster_id=alt_cluster_id, + location_id=location_id, + serve_nodes=1, ) create_op = alt_instance.create(clusters=[alt_cluster]) instances_to_delete.append(alt_instance) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 226f5acf76a3..99ba131377c8 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -104,7 +104,11 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] + "client_class", + [ + BigtableInstanceAdminClient, + BigtableInstanceAdminAsyncClient, + ], ) def test_bigtable_instance_admin_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() @@ -146,7 +150,11 @@ def test_bigtable_instance_admin_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient,] + "client_class", + [ + BigtableInstanceAdminClient, + BigtableInstanceAdminAsyncClient, + ], ) def test_bigtable_instance_admin_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() @@ -528,7 +536,9 @@ def test_bigtable_instance_admin_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. - options = client_options.ClientOptions(scopes=["1", "2"],) + options = client_options.ClientOptions( + scopes=["1", "2"], + ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) @@ -677,11 +687,16 @@ def test_bigtable_instance_admin_client_create_channel_credentials_file( @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.CreateInstanceRequest, dict,] + "request_type", + [ + bigtable_instance_admin.CreateInstanceRequest, + dict, + ], ) def test_create_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -707,7 +722,8 @@ def test_create_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -724,7 +740,8 @@ async def test_create_instance_async( request_type=bigtable_instance_admin.CreateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -776,7 +793,10 @@ def test_create_instance_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -805,7 +825,10 @@ async def test_create_instance_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_create_instance_flattened(): @@ -921,11 +944,16 @@ async def test_create_instance_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.GetInstanceRequest, dict,] + "request_type", + [ + bigtable_instance_admin.GetInstanceRequest, + dict, + ], ) def test_get_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -960,7 +988,8 @@ def test_get_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -977,7 +1006,8 @@ async def test_get_instance_async( request_type=bigtable_instance_admin.GetInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1038,7 +1068,10 @@ def test_get_instance_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1065,7 +1098,10 @@ async def test_get_instance_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_get_instance_flattened(): @@ -1079,7 +1115,9 @@ def test_get_instance_flattened(): call.return_value = instance.Instance() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_instance(name="name_value",) + client.get_instance( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1099,7 +1137,8 @@ def test_get_instance_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), name="name_value", + bigtable_instance_admin.GetInstanceRequest(), + name="name_value", ) @@ -1117,7 +1156,9 @@ async def test_get_instance_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_instance(name="name_value",) + response = await client.get_instance( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1138,16 +1179,22 @@ async def test_get_instance_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), name="name_value", + bigtable_instance_admin.GetInstanceRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.ListInstancesRequest, dict,] + "request_type", + [ + bigtable_instance_admin.ListInstancesRequest, + dict, + ], ) def test_list_instances(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1179,7 +1226,8 @@ def test_list_instances_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1196,7 +1244,8 @@ async def test_list_instances_async( request_type=bigtable_instance_admin.ListInstancesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1253,7 +1302,10 @@ def test_list_instances_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1282,7 +1334,10 @@ async def test_list_instances_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_list_instances_flattened(): @@ -1296,7 +1351,9 @@ def test_list_instances_flattened(): call.return_value = bigtable_instance_admin.ListInstancesResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_instances(parent="parent_value",) + client.list_instances( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1316,7 +1373,8 @@ def test_list_instances_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), parent="parent_value", + bigtable_instance_admin.ListInstancesRequest(), + parent="parent_value", ) @@ -1336,7 +1394,9 @@ async def test_list_instances_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_instances(parent="parent_value",) + response = await client.list_instances( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1357,14 +1417,22 @@ async def test_list_instances_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), parent="parent_value", + bigtable_instance_admin.ListInstancesRequest(), + parent="parent_value", ) -@pytest.mark.parametrize("request_type", [instance.Instance, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + instance.Instance, + dict, + ], +) def test_update_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1399,7 +1467,8 @@ def test_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1415,7 +1484,8 @@ async def test_update_instance_async( transport: str = "grpc_asyncio", request_type=instance.Instance ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1476,7 +1546,10 @@ def test_update_instance_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1503,15 +1576,23 @@ async def test_update_instance_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.PartialUpdateInstanceRequest, dict,] + "request_type", + [ + bigtable_instance_admin.PartialUpdateInstanceRequest, + dict, + ], ) def test_partial_update_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1539,7 +1620,8 @@ def test_partial_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1558,7 +1640,8 @@ async def test_partial_update_instance_async( request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1614,9 +1697,10 @@ def test_partial_update_instance_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ - "metadata" - ] + assert ( + "x-goog-request-params", + "instance.name=instance.name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1647,9 +1731,10 @@ async def test_partial_update_instance_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "instance.name=instance.name/value",) in kw[ - "metadata" - ] + assert ( + "x-goog-request-params", + "instance.name=instance.name/value", + ) in kw["metadata"] def test_partial_update_instance_flattened(): @@ -1749,11 +1834,16 @@ async def test_partial_update_instance_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.DeleteInstanceRequest, dict,] + "request_type", + [ + bigtable_instance_admin.DeleteInstanceRequest, + dict, + ], ) def test_delete_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1779,7 +1869,8 @@ def test_delete_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1796,7 +1887,8 @@ async def test_delete_instance_async( request_type=bigtable_instance_admin.DeleteInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1846,7 +1938,10 @@ def test_delete_instance_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1873,7 +1968,10 @@ async def test_delete_instance_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_delete_instance_flattened(): @@ -1887,7 +1985,9 @@ def test_delete_instance_flattened(): call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_instance(name="name_value",) + client.delete_instance( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1907,7 +2007,8 @@ def test_delete_instance_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), name="name_value", + bigtable_instance_admin.DeleteInstanceRequest(), + name="name_value", ) @@ -1925,7 +2026,9 @@ async def test_delete_instance_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_instance(name="name_value",) + response = await client.delete_instance( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1946,16 +2049,22 @@ async def test_delete_instance_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), name="name_value", + bigtable_instance_admin.DeleteInstanceRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.CreateClusterRequest, dict,] + "request_type", + [ + bigtable_instance_admin.CreateClusterRequest, + dict, + ], ) def test_create_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1981,7 +2090,8 @@ def test_create_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1998,7 +2108,8 @@ async def test_create_cluster_async( request_type=bigtable_instance_admin.CreateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2050,7 +2161,10 @@ def test_create_cluster_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2079,7 +2193,10 @@ async def test_create_cluster_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_create_cluster_flattened(): @@ -2185,11 +2302,16 @@ async def test_create_cluster_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.GetClusterRequest, dict,] + "request_type", + [ + bigtable_instance_admin.GetClusterRequest, + dict, + ], ) def test_get_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2231,7 +2353,8 @@ def test_get_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2248,7 +2371,8 @@ async def test_get_cluster_async( request_type=bigtable_instance_admin.GetClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2311,7 +2435,10 @@ def test_get_cluster_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2338,7 +2465,10 @@ async def test_get_cluster_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_get_cluster_flattened(): @@ -2352,7 +2482,9 @@ def test_get_cluster_flattened(): call.return_value = instance.Cluster() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_cluster(name="name_value",) + client.get_cluster( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -2372,7 +2504,8 @@ def test_get_cluster_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), name="name_value", + bigtable_instance_admin.GetClusterRequest(), + name="name_value", ) @@ -2390,7 +2523,9 @@ async def test_get_cluster_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_cluster(name="name_value",) + response = await client.get_cluster( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -2411,16 +2546,22 @@ async def test_get_cluster_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), name="name_value", + bigtable_instance_admin.GetClusterRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.ListClustersRequest, dict,] + "request_type", + [ + bigtable_instance_admin.ListClustersRequest, + dict, + ], ) def test_list_clusters(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2452,7 +2593,8 @@ def test_list_clusters_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2469,7 +2611,8 @@ async def test_list_clusters_async( request_type=bigtable_instance_admin.ListClustersRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2526,7 +2669,10 @@ def test_list_clusters_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2555,7 +2701,10 @@ async def test_list_clusters_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_list_clusters_flattened(): @@ -2569,7 +2718,9 @@ def test_list_clusters_flattened(): call.return_value = bigtable_instance_admin.ListClustersResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_clusters(parent="parent_value",) + client.list_clusters( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -2589,7 +2740,8 @@ def test_list_clusters_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), parent="parent_value", + bigtable_instance_admin.ListClustersRequest(), + parent="parent_value", ) @@ -2609,7 +2761,9 @@ async def test_list_clusters_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_clusters(parent="parent_value",) + response = await client.list_clusters( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -2630,14 +2784,22 @@ async def test_list_clusters_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), parent="parent_value", + bigtable_instance_admin.ListClustersRequest(), + parent="parent_value", ) -@pytest.mark.parametrize("request_type", [instance.Cluster, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + instance.Cluster, + dict, + ], +) def test_update_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2663,7 +2825,8 @@ def test_update_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2679,7 +2842,8 @@ async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=instance.Cluster ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2731,7 +2895,10 @@ def test_update_cluster_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2760,15 +2927,23 @@ async def test_update_cluster_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.PartialUpdateClusterRequest, dict,] + "request_type", + [ + bigtable_instance_admin.PartialUpdateClusterRequest, + dict, + ], ) def test_partial_update_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2796,7 +2971,8 @@ def test_partial_update_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2815,7 +2991,8 @@ async def test_partial_update_cluster_async( request_type=bigtable_instance_admin.PartialUpdateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2871,9 +3048,10 @@ def test_partial_update_cluster_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "cluster.name=cluster.name/value",) in kw[ - "metadata" - ] + assert ( + "x-goog-request-params", + "cluster.name=cluster.name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2904,9 +3082,10 @@ async def test_partial_update_cluster_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "cluster.name=cluster.name/value",) in kw[ - "metadata" - ] + assert ( + "x-goog-request-params", + "cluster.name=cluster.name/value", + ) in kw["metadata"] def test_partial_update_cluster_flattened(): @@ -3006,11 +3185,16 @@ async def test_partial_update_cluster_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.DeleteClusterRequest, dict,] + "request_type", + [ + bigtable_instance_admin.DeleteClusterRequest, + dict, + ], ) def test_delete_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3036,7 +3220,8 @@ def test_delete_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3053,7 +3238,8 @@ async def test_delete_cluster_async( request_type=bigtable_instance_admin.DeleteClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3103,7 +3289,10 @@ def test_delete_cluster_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3130,7 +3319,10 @@ async def test_delete_cluster_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_delete_cluster_flattened(): @@ -3144,7 +3336,9 @@ def test_delete_cluster_flattened(): call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_cluster(name="name_value",) + client.delete_cluster( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3164,7 +3358,8 @@ def test_delete_cluster_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), name="name_value", + bigtable_instance_admin.DeleteClusterRequest(), + name="name_value", ) @@ -3182,7 +3377,9 @@ async def test_delete_cluster_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_cluster(name="name_value",) + response = await client.delete_cluster( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3203,16 +3400,22 @@ async def test_delete_cluster_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), name="name_value", + bigtable_instance_admin.DeleteClusterRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.CreateAppProfileRequest, dict,] + "request_type", + [ + bigtable_instance_admin.CreateAppProfileRequest, + dict, + ], ) def test_create_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3250,7 +3453,8 @@ def test_create_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3269,7 +3473,8 @@ async def test_create_app_profile_async( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3283,7 +3488,9 @@ async def test_create_app_profile_async( # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( instance.AppProfile( - name="name_value", etag="etag_value", description="description_value", + name="name_value", + etag="etag_value", + description="description_value", ) ) response = await client.create_app_profile(request) @@ -3330,7 +3537,10 @@ def test_create_app_profile_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3359,7 +3569,10 @@ async def test_create_app_profile_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_create_app_profile_flattened(): @@ -3467,11 +3680,16 @@ async def test_create_app_profile_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.GetAppProfileRequest, dict,] + "request_type", + [ + bigtable_instance_admin.GetAppProfileRequest, + dict, + ], ) def test_get_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3507,7 +3725,8 @@ def test_get_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3524,7 +3743,8 @@ async def test_get_app_profile_async( request_type=bigtable_instance_admin.GetAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3536,7 +3756,9 @@ async def test_get_app_profile_async( # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( instance.AppProfile( - name="name_value", etag="etag_value", description="description_value", + name="name_value", + etag="etag_value", + description="description_value", ) ) response = await client.get_app_profile(request) @@ -3581,7 +3803,10 @@ def test_get_app_profile_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3608,7 +3833,10 @@ async def test_get_app_profile_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_get_app_profile_flattened(): @@ -3622,7 +3850,9 @@ def test_get_app_profile_flattened(): call.return_value = instance.AppProfile() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_app_profile(name="name_value",) + client.get_app_profile( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3642,7 +3872,8 @@ def test_get_app_profile_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), name="name_value", + bigtable_instance_admin.GetAppProfileRequest(), + name="name_value", ) @@ -3660,7 +3891,9 @@ async def test_get_app_profile_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_app_profile(name="name_value",) + response = await client.get_app_profile( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3681,16 +3914,22 @@ async def test_get_app_profile_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), name="name_value", + bigtable_instance_admin.GetAppProfileRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.ListAppProfilesRequest, dict,] + "request_type", + [ + bigtable_instance_admin.ListAppProfilesRequest, + dict, + ], ) def test_list_app_profiles(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3723,7 +3962,8 @@ def test_list_app_profiles_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3742,7 +3982,8 @@ async def test_list_app_profiles_async( request_type=bigtable_instance_admin.ListAppProfilesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3803,7 +4044,10 @@ def test_list_app_profiles_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3834,7 +4078,10 @@ async def test_list_app_profiles_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_list_app_profiles_flattened(): @@ -3850,7 +4097,9 @@ def test_list_app_profiles_flattened(): call.return_value = bigtable_instance_admin.ListAppProfilesResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_app_profiles(parent="parent_value",) + client.list_app_profiles( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3870,7 +4119,8 @@ def test_list_app_profiles_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value", + bigtable_instance_admin.ListAppProfilesRequest(), + parent="parent_value", ) @@ -3892,7 +4142,9 @@ async def test_list_app_profiles_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_app_profiles(parent="parent_value",) + response = await client.list_app_profiles( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3913,13 +4165,15 @@ async def test_list_app_profiles_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), parent="parent_value", + bigtable_instance_admin.ListAppProfilesRequest(), + parent="parent_value", ) def test_list_app_profiles_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3937,13 +4191,20 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): next_page_token="abc", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], next_page_token="def", + app_profiles=[], + next_page_token="def", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(),], next_page_token="ghi", + app_profiles=[ + instance.AppProfile(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(), instance.AppProfile(),], + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], ), RuntimeError, ) @@ -3963,7 +4224,8 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): def test_list_app_profiles_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3981,13 +4243,20 @@ def test_list_app_profiles_pages(transport_name: str = "grpc"): next_page_token="abc", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], next_page_token="def", + app_profiles=[], + next_page_token="def", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(),], next_page_token="ghi", + app_profiles=[ + instance.AppProfile(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(), instance.AppProfile(),], + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], ), RuntimeError, ) @@ -4019,17 +4288,26 @@ async def test_list_app_profiles_async_pager(): next_page_token="abc", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], next_page_token="def", + app_profiles=[], + next_page_token="def", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(),], next_page_token="ghi", + app_profiles=[ + instance.AppProfile(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(), instance.AppProfile(),], + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], ), RuntimeError, ) - async_pager = await client.list_app_profiles(request={},) + async_pager = await client.list_app_profiles( + request={}, + ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: @@ -4062,13 +4340,20 @@ async def test_list_app_profiles_async_pages(): next_page_token="abc", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], next_page_token="def", + app_profiles=[], + next_page_token="def", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(),], next_page_token="ghi", + app_profiles=[ + instance.AppProfile(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[instance.AppProfile(), instance.AppProfile(),], + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], ), RuntimeError, ) @@ -4080,11 +4365,16 @@ async def test_list_app_profiles_async_pages(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.UpdateAppProfileRequest, dict,] + "request_type", + [ + bigtable_instance_admin.UpdateAppProfileRequest, + dict, + ], ) def test_update_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4112,7 +4402,8 @@ def test_update_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4131,7 +4422,8 @@ async def test_update_app_profile_async( request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4187,9 +4479,10 @@ def test_update_app_profile_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[ - "metadata" - ] + assert ( + "x-goog-request-params", + "app_profile.name=app_profile.name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4220,9 +4513,10 @@ async def test_update_app_profile_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "app_profile.name=app_profile.name/value",) in kw[ - "metadata" - ] + assert ( + "x-goog-request-params", + "app_profile.name=app_profile.name/value", + ) in kw["metadata"] def test_update_app_profile_flattened(): @@ -4322,11 +4616,16 @@ async def test_update_app_profile_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.DeleteAppProfileRequest, dict,] + "request_type", + [ + bigtable_instance_admin.DeleteAppProfileRequest, + dict, + ], ) def test_delete_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4354,7 +4653,8 @@ def test_delete_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4373,7 +4673,8 @@ async def test_delete_app_profile_async( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4427,7 +4728,10 @@ def test_delete_app_profile_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4456,7 +4760,10 @@ async def test_delete_app_profile_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_delete_app_profile_flattened(): @@ -4472,7 +4779,9 @@ def test_delete_app_profile_flattened(): call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_app_profile(name="name_value",) + client.delete_app_profile( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4492,7 +4801,8 @@ def test_delete_app_profile_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + bigtable_instance_admin.DeleteAppProfileRequest(), + name="name_value", ) @@ -4512,7 +4822,9 @@ async def test_delete_app_profile_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_app_profile(name="name_value",) + response = await client.delete_app_profile( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4533,14 +4845,22 @@ async def test_delete_app_profile_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + bigtable_instance_admin.DeleteAppProfileRequest(), + name="name_value", ) -@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) def test_get_iam_policy(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4550,7 +4870,10 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) response = client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -4568,7 +4891,8 @@ def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4584,7 +4908,8 @@ async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4595,7 +4920,10 @@ async def test_get_iam_policy_async( with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) ) response = await client.get_iam_policy(request) @@ -4638,7 +4966,10 @@ def test_get_iam_policy_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4665,7 +4996,10 @@ async def test_get_iam_policy_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] def test_get_iam_policy_from_dict_foreign(): @@ -4696,7 +5030,9 @@ def test_get_iam_policy_flattened(): call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_iam_policy(resource="resource_value",) + client.get_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4716,7 +5052,8 @@ def test_get_iam_policy_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", ) @@ -4734,7 +5071,9 @@ async def test_get_iam_policy_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_iam_policy(resource="resource_value",) + response = await client.get_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4755,14 +5094,22 @@ async def test_get_iam_policy_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", ) -@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) def test_set_iam_policy(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4772,7 +5119,10 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) response = client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -4790,7 +5140,8 @@ def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4806,7 +5157,8 @@ async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4817,7 +5169,10 @@ async def test_set_iam_policy_async( with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) ) response = await client.set_iam_policy(request) @@ -4860,7 +5215,10 @@ def test_set_iam_policy_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4887,7 +5245,10 @@ async def test_set_iam_policy_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] def test_set_iam_policy_from_dict_foreign(): @@ -4918,7 +5279,9 @@ def test_set_iam_policy_flattened(): call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.set_iam_policy(resource="resource_value",) + client.set_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4938,7 +5301,8 @@ def test_set_iam_policy_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", ) @@ -4956,7 +5320,9 @@ async def test_set_iam_policy_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.set_iam_policy(resource="resource_value",) + response = await client.set_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4977,16 +5343,22 @@ async def test_set_iam_policy_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", ) @pytest.mark.parametrize( - "request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,] + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], ) def test_test_iam_permissions(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5017,7 +5389,8 @@ def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5036,7 +5409,8 @@ async def test_test_iam_permissions_async( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5095,7 +5469,10 @@ def test_test_iam_permissions_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -5126,7 +5503,10 @@ async def test_test_iam_permissions_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] def test_test_iam_permissions_from_dict_foreign(): @@ -5162,7 +5542,8 @@ def test_test_iam_permissions_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.test_iam_permissions( - resource="resource_value", permissions=["permissions_value"], + resource="resource_value", + permissions=["permissions_value"], ) # Establish that the underlying call was made with the expected @@ -5211,7 +5592,8 @@ async def test_test_iam_permissions_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.test_iam_permissions( - resource="resource_value", permissions=["permissions_value"], + resource="resource_value", + permissions=["permissions_value"], ) # Establish that the underlying call was made with the expected @@ -5243,11 +5625,16 @@ async def test_test_iam_permissions_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_instance_admin.ListHotTabletsRequest, dict,] + "request_type", + [ + bigtable_instance_admin.ListHotTabletsRequest, + dict, + ], ) def test_list_hot_tablets(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5276,7 +5663,8 @@ def test_list_hot_tablets_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5293,7 +5681,8 @@ async def test_list_hot_tablets_async( request_type=bigtable_instance_admin.ListHotTabletsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5348,7 +5737,10 @@ def test_list_hot_tablets_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -5377,7 +5769,10 @@ async def test_list_hot_tablets_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_list_hot_tablets_flattened(): @@ -5391,7 +5786,9 @@ def test_list_hot_tablets_flattened(): call.return_value = bigtable_instance_admin.ListHotTabletsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_hot_tablets(parent="parent_value",) + client.list_hot_tablets( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -5411,7 +5808,8 @@ def test_list_hot_tablets_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), parent="parent_value", + bigtable_instance_admin.ListHotTabletsRequest(), + parent="parent_value", ) @@ -5431,7 +5829,9 @@ async def test_list_hot_tablets_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_hot_tablets(parent="parent_value",) + response = await client.list_hot_tablets( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -5452,13 +5852,15 @@ async def test_list_hot_tablets_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), parent="parent_value", + bigtable_instance_admin.ListHotTabletsRequest(), + parent="parent_value", ) def test_list_hot_tablets_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5474,13 +5876,20 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): next_page_token="abc", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], next_page_token="def", + hot_tablets=[], + next_page_token="def", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], ), RuntimeError, ) @@ -5500,7 +5909,8 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): def test_list_hot_tablets_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5516,13 +5926,20 @@ def test_list_hot_tablets_pages(transport_name: str = "grpc"): next_page_token="abc", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], next_page_token="def", + hot_tablets=[], + next_page_token="def", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], ), RuntimeError, ) @@ -5552,17 +5969,26 @@ async def test_list_hot_tablets_async_pager(): next_page_token="abc", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], next_page_token="def", + hot_tablets=[], + next_page_token="def", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], ), RuntimeError, ) - async_pager = await client.list_hot_tablets(request={},) + async_pager = await client.list_hot_tablets( + request={}, + ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: @@ -5593,13 +6019,20 @@ async def test_list_hot_tablets_async_pages(): next_page_token="abc", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], next_page_token="def", + hot_tablets=[], + next_page_token="def", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(),], next_page_token="ghi", + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", ), bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[instance.HotTablet(), instance.HotTablet(),], + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], ), RuntimeError, ) @@ -5617,7 +6050,8 @@ def test_credentials_transport_error(): ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # It is an error to provide a credentials file and a transport instance. @@ -5638,7 +6072,8 @@ def test_credentials_transport_error(): options.api_key = "api_key" with pytest.raises(ValueError): client = BigtableInstanceAdminClient( - client_options=options, transport=transport, + client_options=options, + transport=transport, ) # It is an error to provide an api_key and a credential. @@ -5655,7 +6090,8 @@ def test_credentials_transport_error(): ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( - client_options={"scopes": ["1", "2"]}, transport=transport, + client_options={"scopes": ["1", "2"]}, + transport=transport, ) @@ -5703,7 +6139,10 @@ def test_transport_grpc_default(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) - assert isinstance(client.transport, transports.BigtableInstanceAdminGrpcTransport,) + assert isinstance( + client.transport, + transports.BigtableInstanceAdminGrpcTransport, + ) def test_bigtable_instance_admin_base_transport_error(): @@ -5773,7 +6212,8 @@ def test_bigtable_instance_admin_base_transport_with_credentials_file(): Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableInstanceAdminTransport( - credentials_file="credentials.json", quota_project_id="octopus", + credentials_file="credentials.json", + quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", @@ -5967,7 +6407,8 @@ def test_bigtable_instance_admin_grpc_transport_channel(): # Check that channel is used if provided. transport = transports.BigtableInstanceAdminGrpcTransport( - host="squid.clam.whelk", channel=channel, + host="squid.clam.whelk", + channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" @@ -5979,7 +6420,8 @@ def test_bigtable_instance_admin_grpc_asyncio_transport_channel(): # Check that channel is used if provided. transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - host="squid.clam.whelk", channel=channel, + host="squid.clam.whelk", + channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" @@ -6088,12 +6530,16 @@ def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class def test_bigtable_instance_admin_grpc_lro_client(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) transport = client.transport # Ensure that we have a api-core operations client. - assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client @@ -6101,12 +6547,16 @@ def test_bigtable_instance_admin_grpc_lro_client(): def test_bigtable_instance_admin_grpc_lro_async_client(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) transport = client.transport # Ensure that we have a api-core operations client. - assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client @@ -6116,8 +6566,12 @@ def test_app_profile_path(): project = "squid" instance = "clam" app_profile = "whelk" - expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( - project=project, instance=instance, app_profile=app_profile, + expected = ( + "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format( + project=project, + instance=instance, + app_profile=app_profile, + ) ) actual = BigtableInstanceAdminClient.app_profile_path( project, instance, app_profile @@ -6143,7 +6597,9 @@ def test_cluster_path(): instance = "mussel" cluster = "winkle" expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, instance=instance, cluster=cluster, + project=project, + instance=instance, + cluster=cluster, ) actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster) assert expected == actual @@ -6168,7 +6624,10 @@ def test_crypto_key_path(): key_ring = "whelk" crypto_key = "octopus" expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format( - project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, + project=project, + location=location, + key_ring=key_ring, + crypto_key=crypto_key, ) actual = BigtableInstanceAdminClient.crypto_key_path( project, location, key_ring, crypto_key @@ -6196,7 +6655,10 @@ def test_hot_tablet_path(): cluster = "scallop" hot_tablet = "abalone" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format( - project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, + project=project, + instance=instance, + cluster=cluster, + hot_tablet=hot_tablet, ) actual = BigtableInstanceAdminClient.hot_tablet_path( project, instance, cluster, hot_tablet @@ -6222,7 +6684,8 @@ def test_instance_path(): project = "oyster" instance = "nudibranch" expected = "projects/{project}/instances/{instance}".format( - project=project, instance=instance, + project=project, + instance=instance, ) actual = BigtableInstanceAdminClient.instance_path(project, instance) assert expected == actual @@ -6245,7 +6708,9 @@ def test_table_path(): instance = "nautilus" table = "scallop" expected = "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, instance=instance, table=table, + project=project, + instance=instance, + table=table, ) actual = BigtableInstanceAdminClient.table_path(project, instance, table) assert expected == actual @@ -6286,7 +6751,9 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): folder = "oyster" - expected = "folders/{folder}".format(folder=folder,) + expected = "folders/{folder}".format( + folder=folder, + ) actual = BigtableInstanceAdminClient.common_folder_path(folder) assert expected == actual @@ -6304,7 +6771,9 @@ def test_parse_common_folder_path(): def test_common_organization_path(): organization = "cuttlefish" - expected = "organizations/{organization}".format(organization=organization,) + expected = "organizations/{organization}".format( + organization=organization, + ) actual = BigtableInstanceAdminClient.common_organization_path(organization) assert expected == actual @@ -6322,7 +6791,9 @@ def test_parse_common_organization_path(): def test_common_project_path(): project = "winkle" - expected = "projects/{project}".format(project=project,) + expected = "projects/{project}".format( + project=project, + ) actual = BigtableInstanceAdminClient.common_project_path(project) assert expected == actual @@ -6342,7 +6813,8 @@ def test_common_location_path(): project = "scallop" location = "abalone" expected = "projects/{project}/locations/{location}".format( - project=project, location=location, + project=project, + location=location, ) actual = BigtableInstanceAdminClient.common_location_path(project, location) assert expected == actual @@ -6367,7 +6839,8 @@ def test_client_with_default_client_info(): transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" ) as prep: client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -6376,7 +6849,8 @@ def test_client_with_default_client_info(): ) as prep: transport_class = BigtableInstanceAdminClient.get_transport_class() transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -6384,7 +6858,8 @@ def test_client_with_default_client_info(): @pytest.mark.asyncio async def test_transport_close_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 439b62113b8d..f549bde0d47e 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -106,7 +106,11 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] + "client_class", + [ + BigtableTableAdminClient, + BigtableTableAdminAsyncClient, + ], ) def test_bigtable_table_admin_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() @@ -148,7 +152,11 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient,] + "client_class", + [ + BigtableTableAdminClient, + BigtableTableAdminAsyncClient, + ], ) def test_bigtable_table_admin_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() @@ -522,7 +530,9 @@ def test_bigtable_table_admin_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. - options = client_options.ClientOptions(scopes=["1", "2"],) + options = client_options.ClientOptions( + scopes=["1", "2"], + ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) @@ -670,11 +680,16 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.CreateTableRequest, dict,] + "request_type", + [ + bigtable_table_admin.CreateTableRequest, + dict, + ], ) def test_create_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -685,7 +700,8 @@ def test_create_table(request_type, transport: str = "grpc"): with mock.patch.object(type(client.transport.create_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = gba_table.Table( - name="name_value", granularity=gba_table.Table.TimestampGranularity.MILLIS, + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, ) response = client.create_table(request) @@ -704,7 +720,8 @@ def test_create_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -721,7 +738,8 @@ async def test_create_table_async( request_type=bigtable_table_admin.CreateTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -778,7 +796,10 @@ def test_create_table_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -805,7 +826,10 @@ async def test_create_table_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_create_table_flattened(): @@ -909,11 +933,16 @@ async def test_create_table_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.CreateTableFromSnapshotRequest, dict,] + "request_type", + [ + bigtable_table_admin.CreateTableFromSnapshotRequest, + dict, + ], ) def test_create_table_from_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -941,7 +970,8 @@ def test_create_table_from_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -960,7 +990,8 @@ async def test_create_table_from_snapshot_async( request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1016,7 +1047,10 @@ def test_create_table_from_snapshot_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1047,7 +1081,10 @@ async def test_create_table_from_snapshot_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_create_table_from_snapshot_flattened(): @@ -1157,11 +1194,16 @@ async def test_create_table_from_snapshot_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.ListTablesRequest, dict,] + "request_type", + [ + bigtable_table_admin.ListTablesRequest, + dict, + ], ) def test_list_tables(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1190,7 +1232,8 @@ def test_list_tables_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1206,7 +1249,8 @@ async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1261,7 +1305,10 @@ def test_list_tables_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1290,7 +1337,10 @@ async def test_list_tables_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_list_tables_flattened(): @@ -1304,7 +1354,9 @@ def test_list_tables_flattened(): call.return_value = bigtable_table_admin.ListTablesResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_tables(parent="parent_value",) + client.list_tables( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1324,7 +1376,8 @@ def test_list_tables_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.list_tables( - bigtable_table_admin.ListTablesRequest(), parent="parent_value", + bigtable_table_admin.ListTablesRequest(), + parent="parent_value", ) @@ -1344,7 +1397,9 @@ async def test_list_tables_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_tables(parent="parent_value",) + response = await client.list_tables( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1365,13 +1420,15 @@ async def test_list_tables_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.list_tables( - bigtable_table_admin.ListTablesRequest(), parent="parent_value", + bigtable_table_admin.ListTablesRequest(), + parent="parent_value", ) def test_list_tables_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1379,15 +1436,28 @@ def test_list_tables_pager(transport_name: str = "grpc"): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(), table.Table(),], + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], next_page_token="abc", ), - bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(),], next_page_token="ghi", + tables=[], + next_page_token="def", ), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(),], + tables=[ + table.Table(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], ), RuntimeError, ) @@ -1407,7 +1477,8 @@ def test_list_tables_pager(transport_name: str = "grpc"): def test_list_tables_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1415,15 +1486,28 @@ def test_list_tables_pages(transport_name: str = "grpc"): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(), table.Table(),], + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], next_page_token="abc", ), - bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(),], next_page_token="ghi", + tables=[], + next_page_token="def", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(),], + tables=[ + table.Table(), + table.Table(), + ], ), RuntimeError, ) @@ -1445,19 +1529,34 @@ async def test_list_tables_async_pager(): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(), table.Table(),], + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], next_page_token="abc", ), - bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(),], next_page_token="ghi", + tables=[], + next_page_token="def", ), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(),], + tables=[ + table.Table(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], ), RuntimeError, ) - async_pager = await client.list_tables(request={},) + async_pager = await client.list_tables( + request={}, + ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: @@ -1480,15 +1579,28 @@ async def test_list_tables_async_pages(): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(), table.Table(),], + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], next_page_token="abc", ), - bigtable_table_admin.ListTablesResponse(tables=[], next_page_token="def",), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(),], next_page_token="ghi", + tables=[], + next_page_token="def", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListTablesResponse( - tables=[table.Table(), table.Table(),], + tables=[ + table.Table(), + table.Table(), + ], ), RuntimeError, ) @@ -1499,10 +1611,17 @@ async def test_list_tables_async_pages(): assert page_.raw_page.next_page_token == token -@pytest.mark.parametrize("request_type", [bigtable_table_admin.GetTableRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetTableRequest, + dict, + ], +) def test_get_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1513,7 +1632,8 @@ def test_get_table(request_type, transport: str = "grpc"): with mock.patch.object(type(client.transport.get_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = table.Table( - name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, ) response = client.get_table(request) @@ -1532,7 +1652,8 @@ def test_get_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1548,7 +1669,8 @@ async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1560,7 +1682,8 @@ async def test_get_table_async( # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( table.Table( - name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, ) ) response = await client.get_table(request) @@ -1604,7 +1727,10 @@ def test_get_table_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1631,7 +1757,10 @@ async def test_get_table_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_get_table_flattened(): @@ -1645,7 +1774,9 @@ def test_get_table_flattened(): call.return_value = table.Table() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_table(name="name_value",) + client.get_table( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1665,7 +1796,8 @@ def test_get_table_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_table( - bigtable_table_admin.GetTableRequest(), name="name_value", + bigtable_table_admin.GetTableRequest(), + name="name_value", ) @@ -1683,7 +1815,9 @@ async def test_get_table_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_table(name="name_value",) + response = await client.get_table( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1704,16 +1838,22 @@ async def test_get_table_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_table( - bigtable_table_admin.GetTableRequest(), name="name_value", + bigtable_table_admin.GetTableRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.DeleteTableRequest, dict,] + "request_type", + [ + bigtable_table_admin.DeleteTableRequest, + dict, + ], ) def test_delete_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1739,7 +1879,8 @@ def test_delete_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1756,7 +1897,8 @@ async def test_delete_table_async( request_type=bigtable_table_admin.DeleteTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1806,7 +1948,10 @@ def test_delete_table_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -1833,7 +1978,10 @@ async def test_delete_table_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_delete_table_flattened(): @@ -1847,7 +1995,9 @@ def test_delete_table_flattened(): call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_table(name="name_value",) + client.delete_table( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1867,7 +2017,8 @@ def test_delete_table_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_table( - bigtable_table_admin.DeleteTableRequest(), name="name_value", + bigtable_table_admin.DeleteTableRequest(), + name="name_value", ) @@ -1885,7 +2036,9 @@ async def test_delete_table_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_table(name="name_value",) + response = await client.delete_table( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -1906,16 +2059,22 @@ async def test_delete_table_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_table( - bigtable_table_admin.DeleteTableRequest(), name="name_value", + bigtable_table_admin.DeleteTableRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.ModifyColumnFamiliesRequest, dict,] + "request_type", + [ + bigtable_table_admin.ModifyColumnFamiliesRequest, + dict, + ], ) def test_modify_column_families(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1928,7 +2087,8 @@ def test_modify_column_families(request_type, transport: str = "grpc"): ) as call: # Designate an appropriate return value for the call. call.return_value = table.Table( - name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, ) response = client.modify_column_families(request) @@ -1947,7 +2107,8 @@ def test_modify_column_families_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1966,7 +2127,8 @@ async def test_modify_column_families_async( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1980,7 +2142,8 @@ async def test_modify_column_families_async( # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( table.Table( - name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, ) ) response = await client.modify_column_families(request) @@ -2026,7 +2189,10 @@ def test_modify_column_families_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2055,7 +2221,10 @@ async def test_modify_column_families_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_modify_column_families_flattened(): @@ -2173,11 +2342,16 @@ async def test_modify_column_families_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.DropRowRangeRequest, dict,] + "request_type", + [ + bigtable_table_admin.DropRowRangeRequest, + dict, + ], ) def test_drop_row_range(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2203,7 +2377,8 @@ def test_drop_row_range_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2220,7 +2395,8 @@ async def test_drop_row_range_async( request_type=bigtable_table_admin.DropRowRangeRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2270,7 +2446,10 @@ def test_drop_row_range_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2297,15 +2476,23 @@ async def test_drop_row_range_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.GenerateConsistencyTokenRequest, dict,] + "request_type", + [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, + ], ) def test_generate_consistency_token(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2336,7 +2523,8 @@ def test_generate_consistency_token_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2355,7 +2543,8 @@ async def test_generate_consistency_token_async( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2414,7 +2603,10 @@ def test_generate_consistency_token_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2445,7 +2637,10 @@ async def test_generate_consistency_token_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_generate_consistency_token_flattened(): @@ -2461,7 +2656,9 @@ def test_generate_consistency_token_flattened(): call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.generate_consistency_token(name="name_value",) + client.generate_consistency_token( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -2481,7 +2678,8 @@ def test_generate_consistency_token_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), name="name_value", + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", ) @@ -2503,7 +2701,9 @@ async def test_generate_consistency_token_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.generate_consistency_token(name="name_value",) + response = await client.generate_consistency_token( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -2524,16 +2724,22 @@ async def test_generate_consistency_token_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), name="name_value", + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.CheckConsistencyRequest, dict,] + "request_type", + [ + bigtable_table_admin.CheckConsistencyRequest, + dict, + ], ) def test_check_consistency(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2564,7 +2770,8 @@ def test_check_consistency_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2583,7 +2790,8 @@ async def test_check_consistency_async( request_type=bigtable_table_admin.CheckConsistencyRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2596,7 +2804,9 @@ async def test_check_consistency_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse(consistent=True,) + bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) ) response = await client.check_consistency(request) @@ -2640,7 +2850,10 @@ def test_check_consistency_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2671,7 +2884,10 @@ async def test_check_consistency_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_check_consistency_flattened(): @@ -2688,7 +2904,8 @@ def test_check_consistency_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.check_consistency( - name="name_value", consistency_token="consistency_token_value", + name="name_value", + consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -2737,7 +2954,8 @@ async def test_check_consistency_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.check_consistency( - name="name_value", consistency_token="consistency_token_value", + name="name_value", + consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -2769,11 +2987,16 @@ async def test_check_consistency_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.SnapshotTableRequest, dict,] + "request_type", + [ + bigtable_table_admin.SnapshotTableRequest, + dict, + ], ) def test_snapshot_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2799,7 +3022,8 @@ def test_snapshot_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2816,7 +3040,8 @@ async def test_snapshot_table_async( request_type=bigtable_table_admin.SnapshotTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2868,7 +3093,10 @@ def test_snapshot_table_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -2897,7 +3125,10 @@ async def test_snapshot_table_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_snapshot_table_flattened(): @@ -3013,11 +3244,16 @@ async def test_snapshot_table_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.GetSnapshotRequest, dict,] + "request_type", + [ + bigtable_table_admin.GetSnapshotRequest, + dict, + ], ) def test_get_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3052,7 +3288,8 @@ def test_get_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3069,7 +3306,8 @@ async def test_get_snapshot_async( request_type=bigtable_table_admin.GetSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3130,7 +3368,10 @@ def test_get_snapshot_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3157,7 +3398,10 @@ async def test_get_snapshot_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_get_snapshot_flattened(): @@ -3171,7 +3415,9 @@ def test_get_snapshot_flattened(): call.return_value = table.Snapshot() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_snapshot(name="name_value",) + client.get_snapshot( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3191,7 +3437,8 @@ def test_get_snapshot_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), name="name_value", + bigtable_table_admin.GetSnapshotRequest(), + name="name_value", ) @@ -3209,7 +3456,9 @@ async def test_get_snapshot_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_snapshot(name="name_value",) + response = await client.get_snapshot( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3230,16 +3479,22 @@ async def test_get_snapshot_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), name="name_value", + bigtable_table_admin.GetSnapshotRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.ListSnapshotsRequest, dict,] + "request_type", + [ + bigtable_table_admin.ListSnapshotsRequest, + dict, + ], ) def test_list_snapshots(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3268,7 +3523,8 @@ def test_list_snapshots_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3285,7 +3541,8 @@ async def test_list_snapshots_async( request_type=bigtable_table_admin.ListSnapshotsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3340,7 +3597,10 @@ def test_list_snapshots_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3369,7 +3629,10 @@ async def test_list_snapshots_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_list_snapshots_flattened(): @@ -3383,7 +3646,9 @@ def test_list_snapshots_flattened(): call.return_value = bigtable_table_admin.ListSnapshotsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_snapshots(parent="parent_value",) + client.list_snapshots( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3403,7 +3668,8 @@ def test_list_snapshots_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", + bigtable_table_admin.ListSnapshotsRequest(), + parent="parent_value", ) @@ -3423,7 +3689,9 @@ async def test_list_snapshots_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_snapshots(parent="parent_value",) + response = await client.list_snapshots( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3444,13 +3712,15 @@ async def test_list_snapshots_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", + bigtable_table_admin.ListSnapshotsRequest(), + parent="parent_value", ) def test_list_snapshots_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3458,17 +3728,28 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], next_page_token="abc", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], next_page_token="def", + snapshots=[], + next_page_token="def", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(),], next_page_token="ghi", + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], ), RuntimeError, ) @@ -3488,7 +3769,8 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): def test_list_snapshots_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3496,17 +3778,28 @@ def test_list_snapshots_pages(transport_name: str = "grpc"): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], next_page_token="abc", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], next_page_token="def", + snapshots=[], + next_page_token="def", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(),], next_page_token="ghi", + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], ), RuntimeError, ) @@ -3528,21 +3821,34 @@ async def test_list_snapshots_async_pager(): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], next_page_token="abc", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], next_page_token="def", + snapshots=[], + next_page_token="def", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(),], next_page_token="ghi", + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], ), RuntimeError, ) - async_pager = await client.list_snapshots(request={},) + async_pager = await client.list_snapshots( + request={}, + ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: @@ -3565,17 +3871,28 @@ async def test_list_snapshots_async_pages(): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], next_page_token="abc", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], next_page_token="def", + snapshots=[], + next_page_token="def", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(),], next_page_token="ghi", + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListSnapshotsResponse( - snapshots=[table.Snapshot(), table.Snapshot(),], + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], ), RuntimeError, ) @@ -3587,11 +3904,16 @@ async def test_list_snapshots_async_pages(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.DeleteSnapshotRequest, dict,] + "request_type", + [ + bigtable_table_admin.DeleteSnapshotRequest, + dict, + ], ) def test_delete_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3617,7 +3939,8 @@ def test_delete_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3634,7 +3957,8 @@ async def test_delete_snapshot_async( request_type=bigtable_table_admin.DeleteSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3684,7 +4008,10 @@ def test_delete_snapshot_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3711,7 +4038,10 @@ async def test_delete_snapshot_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_delete_snapshot_flattened(): @@ -3725,7 +4055,9 @@ def test_delete_snapshot_flattened(): call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_snapshot(name="name_value",) + client.delete_snapshot( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3745,7 +4077,8 @@ def test_delete_snapshot_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), name="name_value", + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", ) @@ -3763,7 +4096,9 @@ async def test_delete_snapshot_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_snapshot(name="name_value",) + response = await client.delete_snapshot( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -3784,16 +4119,22 @@ async def test_delete_snapshot_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), name="name_value", + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.CreateBackupRequest, dict,] + "request_type", + [ + bigtable_table_admin.CreateBackupRequest, + dict, + ], ) def test_create_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3819,7 +4160,8 @@ def test_create_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3836,7 +4178,8 @@ async def test_create_backup_async( request_type=bigtable_table_admin.CreateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -3888,7 +4231,10 @@ def test_create_backup_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -3917,7 +4263,10 @@ async def test_create_backup_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_create_backup_flattened(): @@ -4022,10 +4371,17 @@ async def test_create_backup_flattened_error_async(): ) -@pytest.mark.parametrize("request_type", [bigtable_table_admin.GetBackupRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetBackupRequest, + dict, + ], +) def test_get_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4060,7 +4416,8 @@ def test_get_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4076,7 +4433,8 @@ async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4137,7 +4495,10 @@ def test_get_backup_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4164,7 +4525,10 @@ async def test_get_backup_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_get_backup_flattened(): @@ -4178,7 +4542,9 @@ def test_get_backup_flattened(): call.return_value = table.Backup() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_backup(name="name_value",) + client.get_backup( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4198,7 +4564,8 @@ def test_get_backup_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_backup( - bigtable_table_admin.GetBackupRequest(), name="name_value", + bigtable_table_admin.GetBackupRequest(), + name="name_value", ) @@ -4216,7 +4583,9 @@ async def test_get_backup_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_backup(name="name_value",) + response = await client.get_backup( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4237,16 +4606,22 @@ async def test_get_backup_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_backup( - bigtable_table_admin.GetBackupRequest(), name="name_value", + bigtable_table_admin.GetBackupRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.UpdateBackupRequest, dict,] + "request_type", + [ + bigtable_table_admin.UpdateBackupRequest, + dict, + ], ) def test_update_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4281,7 +4656,8 @@ def test_update_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4298,7 +4674,8 @@ async def test_update_backup_async( request_type=bigtable_table_admin.UpdateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4359,7 +4736,10 @@ def test_update_backup_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "backup.name=backup.name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4386,7 +4766,10 @@ async def test_update_backup_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "backup.name=backup.name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "backup.name=backup.name/value", + ) in kw["metadata"] def test_update_backup_flattened(): @@ -4480,11 +4863,16 @@ async def test_update_backup_flattened_error_async(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.DeleteBackupRequest, dict,] + "request_type", + [ + bigtable_table_admin.DeleteBackupRequest, + dict, + ], ) def test_delete_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4510,7 +4898,8 @@ def test_delete_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4527,7 +4916,8 @@ async def test_delete_backup_async( request_type=bigtable_table_admin.DeleteBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4577,7 +4967,10 @@ def test_delete_backup_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4604,7 +4997,10 @@ async def test_delete_backup_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "name=name/value", + ) in kw["metadata"] def test_delete_backup_flattened(): @@ -4618,7 +5014,9 @@ def test_delete_backup_flattened(): call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_backup(name="name_value",) + client.delete_backup( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4638,7 +5036,8 @@ def test_delete_backup_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), name="name_value", + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", ) @@ -4656,7 +5055,9 @@ async def test_delete_backup_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_backup(name="name_value",) + response = await client.delete_backup( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4677,16 +5078,22 @@ async def test_delete_backup_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), name="name_value", + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", ) @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.ListBackupsRequest, dict,] + "request_type", + [ + bigtable_table_admin.ListBackupsRequest, + dict, + ], ) def test_list_backups(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4715,7 +5122,8 @@ def test_list_backups_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4732,7 +5140,8 @@ async def test_list_backups_async( request_type=bigtable_table_admin.ListBackupsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -4787,7 +5196,10 @@ def test_list_backups_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -4816,7 +5228,10 @@ async def test_list_backups_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] def test_list_backups_flattened(): @@ -4830,7 +5245,9 @@ def test_list_backups_flattened(): call.return_value = bigtable_table_admin.ListBackupsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_backups(parent="parent_value",) + client.list_backups( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4850,7 +5267,8 @@ def test_list_backups_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.list_backups( - bigtable_table_admin.ListBackupsRequest(), parent="parent_value", + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", ) @@ -4870,7 +5288,9 @@ async def test_list_backups_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_backups(parent="parent_value",) + response = await client.list_backups( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -4891,13 +5311,15 @@ async def test_list_backups_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.list_backups( - bigtable_table_admin.ListBackupsRequest(), parent="parent_value", + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", ) def test_list_backups_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4905,17 +5327,28 @@ def test_list_backups_pager(transport_name: str = "grpc"): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], next_page_token="abc", ), bigtable_table_admin.ListBackupsResponse( - backups=[], next_page_token="def", + backups=[], + next_page_token="def", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(),], next_page_token="ghi", + backups=[ + table.Backup(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + ], ), RuntimeError, ) @@ -4935,7 +5368,8 @@ def test_list_backups_pager(transport_name: str = "grpc"): def test_list_backups_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, transport=transport_name, + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4943,17 +5377,28 @@ def test_list_backups_pages(transport_name: str = "grpc"): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], next_page_token="abc", ), bigtable_table_admin.ListBackupsResponse( - backups=[], next_page_token="def", + backups=[], + next_page_token="def", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(),], next_page_token="ghi", + backups=[ + table.Backup(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + ], ), RuntimeError, ) @@ -4975,21 +5420,34 @@ async def test_list_backups_async_pager(): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], next_page_token="abc", ), bigtable_table_admin.ListBackupsResponse( - backups=[], next_page_token="def", + backups=[], + next_page_token="def", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(),], next_page_token="ghi", + backups=[ + table.Backup(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + ], ), RuntimeError, ) - async_pager = await client.list_backups(request={},) + async_pager = await client.list_backups( + request={}, + ) assert async_pager.next_page_token == "abc" responses = [] async for response in async_pager: @@ -5012,17 +5470,28 @@ async def test_list_backups_async_pages(): # Set the response to a series of pages. call.side_effect = ( bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], next_page_token="abc", ), bigtable_table_admin.ListBackupsResponse( - backups=[], next_page_token="def", + backups=[], + next_page_token="def", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(),], next_page_token="ghi", + backups=[ + table.Backup(), + ], + next_page_token="ghi", ), bigtable_table_admin.ListBackupsResponse( - backups=[table.Backup(), table.Backup(),], + backups=[ + table.Backup(), + table.Backup(), + ], ), RuntimeError, ) @@ -5034,11 +5503,16 @@ async def test_list_backups_async_pages(): @pytest.mark.parametrize( - "request_type", [bigtable_table_admin.RestoreTableRequest, dict,] + "request_type", + [ + bigtable_table_admin.RestoreTableRequest, + dict, + ], ) def test_restore_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5064,7 +5538,8 @@ def test_restore_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5081,7 +5556,8 @@ async def test_restore_table_async( request_type=bigtable_table_admin.RestoreTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5133,7 +5609,10 @@ def test_restore_table_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -5162,13 +5641,23 @@ async def test_restore_table_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "parent=parent/value", + ) in kw["metadata"] -@pytest.mark.parametrize("request_type", [iam_policy_pb2.GetIamPolicyRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) def test_get_iam_policy(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5178,7 +5667,10 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) response = client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -5196,7 +5688,8 @@ def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5212,7 +5705,8 @@ async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5223,7 +5717,10 @@ async def test_get_iam_policy_async( with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) ) response = await client.get_iam_policy(request) @@ -5266,7 +5763,10 @@ def test_get_iam_policy_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -5293,7 +5793,10 @@ async def test_get_iam_policy_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] def test_get_iam_policy_from_dict_foreign(): @@ -5324,7 +5827,9 @@ def test_get_iam_policy_flattened(): call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_iam_policy(resource="resource_value",) + client.get_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -5344,7 +5849,8 @@ def test_get_iam_policy_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", ) @@ -5362,7 +5868,9 @@ async def test_get_iam_policy_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_iam_policy(resource="resource_value",) + response = await client.get_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -5383,14 +5891,22 @@ async def test_get_iam_policy_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", ) -@pytest.mark.parametrize("request_type", [iam_policy_pb2.SetIamPolicyRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) def test_set_iam_policy(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5400,7 +5916,10 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy(version=774, etag=b"etag_blob",) + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) response = client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. @@ -5418,7 +5937,8 @@ def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5434,7 +5954,8 @@ async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5445,7 +5966,10 @@ async def test_set_iam_policy_async( with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy(version=774, etag=b"etag_blob",) + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) ) response = await client.set_iam_policy(request) @@ -5488,7 +6012,10 @@ def test_set_iam_policy_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -5515,7 +6042,10 @@ async def test_set_iam_policy_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] def test_set_iam_policy_from_dict_foreign(): @@ -5546,7 +6076,9 @@ def test_set_iam_policy_flattened(): call.return_value = policy_pb2.Policy() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.set_iam_policy(resource="resource_value",) + client.set_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -5566,7 +6098,8 @@ def test_set_iam_policy_flattened_error(): # fields is an error. with pytest.raises(ValueError): client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", ) @@ -5584,7 +6117,9 @@ async def test_set_iam_policy_flattened_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.set_iam_policy(resource="resource_value",) + response = await client.set_iam_policy( + resource="resource_value", + ) # Establish that the underlying call was made with the expected # request object values. @@ -5605,16 +6140,22 @@ async def test_set_iam_policy_flattened_error_async(): # fields is an error. with pytest.raises(ValueError): await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), resource="resource_value", + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", ) @pytest.mark.parametrize( - "request_type", [iam_policy_pb2.TestIamPermissionsRequest, dict,] + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], ) def test_test_iam_permissions(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5645,7 +6186,8 @@ def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5664,7 +6206,8 @@ async def test_test_iam_permissions_async( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -5723,7 +6266,10 @@ def test_test_iam_permissions_field_headers(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] @pytest.mark.asyncio @@ -5754,7 +6300,10 @@ async def test_test_iam_permissions_field_headers_async(): # Establish that the field header was sent. _, _, kw = call.mock_calls[0] - assert ("x-goog-request-params", "resource=resource/value",) in kw["metadata"] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] def test_test_iam_permissions_from_dict_foreign(): @@ -5790,7 +6339,8 @@ def test_test_iam_permissions_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.test_iam_permissions( - resource="resource_value", permissions=["permissions_value"], + resource="resource_value", + permissions=["permissions_value"], ) # Establish that the underlying call was made with the expected @@ -5839,7 +6389,8 @@ async def test_test_iam_permissions_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.test_iam_permissions( - resource="resource_value", permissions=["permissions_value"], + resource="resource_value", + permissions=["permissions_value"], ) # Establish that the underlying call was made with the expected @@ -5877,7 +6428,8 @@ def test_credentials_transport_error(): ) with pytest.raises(ValueError): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # It is an error to provide a credentials file and a transport instance. @@ -5897,7 +6449,10 @@ def test_credentials_transport_error(): options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): - client = BigtableTableAdminClient(client_options=options, transport=transport,) + client = BigtableTableAdminClient( + client_options=options, + transport=transport, + ) # It is an error to provide an api_key and a credential. options = mock.Mock() @@ -5913,7 +6468,8 @@ def test_credentials_transport_error(): ) with pytest.raises(ValueError): client = BigtableTableAdminClient( - client_options={"scopes": ["1", "2"]}, transport=transport, + client_options={"scopes": ["1", "2"]}, + transport=transport, ) @@ -5961,7 +6517,10 @@ def test_transport_grpc_default(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) - assert isinstance(client.transport, transports.BigtableTableAdminGrpcTransport,) + assert isinstance( + client.transport, + transports.BigtableTableAdminGrpcTransport, + ) def test_bigtable_table_admin_base_transport_error(): @@ -6032,7 +6591,8 @@ def test_bigtable_table_admin_base_transport_with_credentials_file(): Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableTableAdminTransport( - credentials_file="credentials.json", quota_project_id="octopus", + credentials_file="credentials.json", + quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", @@ -6220,7 +6780,8 @@ def test_bigtable_table_admin_grpc_transport_channel(): # Check that channel is used if provided. transport = transports.BigtableTableAdminGrpcTransport( - host="squid.clam.whelk", channel=channel, + host="squid.clam.whelk", + channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" @@ -6232,7 +6793,8 @@ def test_bigtable_table_admin_grpc_asyncio_transport_channel(): # Check that channel is used if provided. transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - host="squid.clam.whelk", channel=channel, + host="squid.clam.whelk", + channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" @@ -6341,12 +6903,16 @@ def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): def test_bigtable_table_admin_grpc_lro_client(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) transport = client.transport # Ensure that we have a api-core operations client. - assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client @@ -6354,12 +6920,16 @@ def test_bigtable_table_admin_grpc_lro_client(): def test_bigtable_table_admin_grpc_lro_async_client(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) transport = client.transport # Ensure that we have a api-core operations client. - assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) # Ensure that subsequent calls to the property send the exact same object. assert transport.operations_client is transport.operations_client @@ -6371,7 +6941,10 @@ def test_backup_path(): cluster = "whelk" backup = "octopus" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( - project=project, instance=instance, cluster=cluster, backup=backup, + project=project, + instance=instance, + cluster=cluster, + backup=backup, ) actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup) assert expected == actual @@ -6396,7 +6969,9 @@ def test_cluster_path(): instance = "nautilus" cluster = "scallop" expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( - project=project, instance=instance, cluster=cluster, + project=project, + instance=instance, + cluster=cluster, ) actual = BigtableTableAdminClient.cluster_path(project, instance, cluster) assert expected == actual @@ -6453,7 +7028,8 @@ def test_instance_path(): project = "squid" instance = "clam" expected = "projects/{project}/instances/{instance}".format( - project=project, instance=instance, + project=project, + instance=instance, ) actual = BigtableTableAdminClient.instance_path(project, instance) assert expected == actual @@ -6477,7 +7053,10 @@ def test_snapshot_path(): cluster = "cuttlefish" snapshot = "mussel" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( - project=project, instance=instance, cluster=cluster, snapshot=snapshot, + project=project, + instance=instance, + cluster=cluster, + snapshot=snapshot, ) actual = BigtableTableAdminClient.snapshot_path( project, instance, cluster, snapshot @@ -6504,7 +7083,9 @@ def test_table_path(): instance = "clam" table = "whelk" expected = "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, instance=instance, table=table, + project=project, + instance=instance, + table=table, ) actual = BigtableTableAdminClient.table_path(project, instance, table) assert expected == actual @@ -6545,7 +7126,9 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): folder = "winkle" - expected = "folders/{folder}".format(folder=folder,) + expected = "folders/{folder}".format( + folder=folder, + ) actual = BigtableTableAdminClient.common_folder_path(folder) assert expected == actual @@ -6563,7 +7146,9 @@ def test_parse_common_folder_path(): def test_common_organization_path(): organization = "scallop" - expected = "organizations/{organization}".format(organization=organization,) + expected = "organizations/{organization}".format( + organization=organization, + ) actual = BigtableTableAdminClient.common_organization_path(organization) assert expected == actual @@ -6581,7 +7166,9 @@ def test_parse_common_organization_path(): def test_common_project_path(): project = "squid" - expected = "projects/{project}".format(project=project,) + expected = "projects/{project}".format( + project=project, + ) actual = BigtableTableAdminClient.common_project_path(project) assert expected == actual @@ -6601,7 +7188,8 @@ def test_common_location_path(): project = "whelk" location = "octopus" expected = "projects/{project}/locations/{location}".format( - project=project, location=location, + project=project, + location=location, ) actual = BigtableTableAdminClient.common_location_path(project, location) assert expected == actual @@ -6626,7 +7214,8 @@ def test_client_with_default_client_info(): transports.BigtableTableAdminTransport, "_prep_wrapped_messages" ) as prep: client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -6635,7 +7224,8 @@ def test_client_with_default_client_info(): ) as prep: transport_class = BigtableTableAdminClient.get_transport_class() transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -6643,7 +7233,8 @@ def test_client_with_default_client_info(): @pytest.mark.asyncio async def test_transport_close_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index f8d18608849d..d8b694bc0fa9 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -79,7 +79,13 @@ def test__get_default_mtls_endpoint(): assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) +@pytest.mark.parametrize( + "client_class", + [ + BigtableClient, + BigtableAsyncClient, + ], +) def test_bigtable_client_from_service_account_info(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( @@ -119,7 +125,13 @@ def test_bigtable_client_service_account_always_use_jwt( use_jwt.assert_not_called() -@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient,]) +@pytest.mark.parametrize( + "client_class", + [ + BigtableClient, + BigtableAsyncClient, + ], +) def test_bigtable_client_from_service_account_file(client_class): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( @@ -464,7 +476,9 @@ def test_bigtable_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. - options = client_options.ClientOptions(scopes=["1", "2"],) + options = client_options.ClientOptions( + scopes=["1", "2"], + ) with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class(client_options=options, transport=transport_name) @@ -599,10 +613,17 @@ def test_bigtable_client_create_channel_credentials_file( ) -@pytest.mark.parametrize("request_type", [bigtable.ReadRowsRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadRowsRequest, + dict, + ], +) def test_read_rows(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -629,7 +650,8 @@ def test_read_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -645,7 +667,8 @@ async def test_read_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -677,7 +700,9 @@ async def test_read_rows_async_from_dict(): def test_read_rows_routing_parameters(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. @@ -718,7 +743,9 @@ def test_read_rows_routing_parameters(): def test_read_rows_flattened(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: @@ -727,7 +754,8 @@ def test_read_rows_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.read_rows( - table_name="table_name_value", app_profile_id="app_profile_id_value", + table_name="table_name_value", + app_profile_id="app_profile_id_value", ) # Establish that the underlying call was made with the expected @@ -743,7 +771,9 @@ def test_read_rows_flattened(): def test_read_rows_flattened_error(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -757,7 +787,9 @@ def test_read_rows_flattened_error(): @pytest.mark.asyncio async def test_read_rows_flattened_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: @@ -768,7 +800,8 @@ async def test_read_rows_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.read_rows( - table_name="table_name_value", app_profile_id="app_profile_id_value", + table_name="table_name_value", + app_profile_id="app_profile_id_value", ) # Establish that the underlying call was made with the expected @@ -785,7 +818,9 @@ async def test_read_rows_flattened_async(): @pytest.mark.asyncio async def test_read_rows_flattened_error_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -797,10 +832,17 @@ async def test_read_rows_flattened_error_async(): ) -@pytest.mark.parametrize("request_type", [bigtable.SampleRowKeysRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.SampleRowKeysRequest, + dict, + ], +) def test_sample_row_keys(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -827,7 +869,8 @@ def test_sample_row_keys_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -843,7 +886,8 @@ async def test_sample_row_keys_async( transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -875,7 +919,9 @@ async def test_sample_row_keys_async_from_dict(): def test_sample_row_keys_routing_parameters(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. @@ -916,7 +962,9 @@ def test_sample_row_keys_routing_parameters(): def test_sample_row_keys_flattened(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: @@ -925,7 +973,8 @@ def test_sample_row_keys_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.sample_row_keys( - table_name="table_name_value", app_profile_id="app_profile_id_value", + table_name="table_name_value", + app_profile_id="app_profile_id_value", ) # Establish that the underlying call was made with the expected @@ -941,7 +990,9 @@ def test_sample_row_keys_flattened(): def test_sample_row_keys_flattened_error(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -955,7 +1006,9 @@ def test_sample_row_keys_flattened_error(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: @@ -966,7 +1019,8 @@ async def test_sample_row_keys_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.sample_row_keys( - table_name="table_name_value", app_profile_id="app_profile_id_value", + table_name="table_name_value", + app_profile_id="app_profile_id_value", ) # Establish that the underlying call was made with the expected @@ -983,7 +1037,9 @@ async def test_sample_row_keys_flattened_async(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_error_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -995,10 +1051,17 @@ async def test_sample_row_keys_flattened_error_async(): ) -@pytest.mark.parametrize("request_type", [bigtable.MutateRowRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowRequest, + dict, + ], +) def test_mutate_row(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1024,7 +1087,8 @@ def test_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1040,7 +1104,8 @@ async def test_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1070,7 +1135,9 @@ async def test_mutate_row_async_from_dict(): def test_mutate_row_routing_parameters(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. @@ -1111,7 +1178,9 @@ def test_mutate_row_routing_parameters(): def test_mutate_row_flattened(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: @@ -1153,7 +1222,9 @@ def test_mutate_row_flattened(): def test_mutate_row_flattened_error(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1173,7 +1244,9 @@ def test_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_mutate_row_flattened_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: @@ -1220,7 +1293,9 @@ async def test_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_mutate_row_flattened_error_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1238,10 +1313,17 @@ async def test_mutate_row_flattened_error_async(): ) -@pytest.mark.parametrize("request_type", [bigtable.MutateRowsRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowsRequest, + dict, + ], +) def test_mutate_rows(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1268,7 +1350,8 @@ def test_mutate_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1284,7 +1367,8 @@ async def test_mutate_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1316,7 +1400,9 @@ async def test_mutate_rows_async_from_dict(): def test_mutate_rows_routing_parameters(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. @@ -1357,7 +1443,9 @@ def test_mutate_rows_routing_parameters(): def test_mutate_rows_flattened(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: @@ -1387,7 +1475,9 @@ def test_mutate_rows_flattened(): def test_mutate_rows_flattened_error(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1402,7 +1492,9 @@ def test_mutate_rows_flattened_error(): @pytest.mark.asyncio async def test_mutate_rows_flattened_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: @@ -1435,7 +1527,9 @@ async def test_mutate_rows_flattened_async(): @pytest.mark.asyncio async def test_mutate_rows_flattened_error_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1448,10 +1542,17 @@ async def test_mutate_rows_flattened_error_async(): ) -@pytest.mark.parametrize("request_type", [bigtable.CheckAndMutateRowRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.CheckAndMutateRowRequest, + dict, + ], +) def test_check_and_mutate_row(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1463,7 +1564,9 @@ def test_check_and_mutate_row(request_type, transport: str = "grpc"): type(client.transport.check_and_mutate_row), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable.CheckAndMutateRowResponse(predicate_matched=True,) + call.return_value = bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) response = client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. @@ -1480,7 +1583,8 @@ def test_check_and_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1498,7 +1602,8 @@ async def test_check_and_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1511,7 +1616,9 @@ async def test_check_and_mutate_row_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.CheckAndMutateRowResponse(predicate_matched=True,) + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) ) response = await client.check_and_mutate_row(request) @@ -1531,7 +1638,9 @@ async def test_check_and_mutate_row_async_from_dict(): def test_check_and_mutate_row_routing_parameters(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. @@ -1576,7 +1685,9 @@ def test_check_and_mutate_row_routing_parameters(): def test_check_and_mutate_row_flattened(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1654,7 +1765,9 @@ def test_check_and_mutate_row_flattened(): def test_check_and_mutate_row_flattened_error(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1690,7 +1803,9 @@ def test_check_and_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1773,7 +1888,9 @@ async def test_check_and_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_error_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1807,10 +1924,17 @@ async def test_check_and_mutate_row_flattened_error_async(): ) -@pytest.mark.parametrize("request_type", [bigtable.PingAndWarmRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.PingAndWarmRequest, + dict, + ], +) def test_ping_and_warm(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1836,7 +1960,8 @@ def test_ping_and_warm_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1852,7 +1977,8 @@ async def test_ping_and_warm_async( transport: str = "grpc_asyncio", request_type=bigtable.PingAndWarmRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -1882,7 +2008,9 @@ async def test_ping_and_warm_async_from_dict(): def test_ping_and_warm_routing_parameters(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. @@ -1923,7 +2051,9 @@ def test_ping_and_warm_routing_parameters(): def test_ping_and_warm_flattened(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: @@ -1932,7 +2062,8 @@ def test_ping_and_warm_flattened(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. client.ping_and_warm( - name="name_value", app_profile_id="app_profile_id_value", + name="name_value", + app_profile_id="app_profile_id_value", ) # Establish that the underlying call was made with the expected @@ -1948,7 +2079,9 @@ def test_ping_and_warm_flattened(): def test_ping_and_warm_flattened_error(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -1962,7 +2095,9 @@ def test_ping_and_warm_flattened_error(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: @@ -1975,7 +2110,8 @@ async def test_ping_and_warm_flattened_async(): # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. response = await client.ping_and_warm( - name="name_value", app_profile_id="app_profile_id_value", + name="name_value", + app_profile_id="app_profile_id_value", ) # Establish that the underlying call was made with the expected @@ -1992,7 +2128,9 @@ async def test_ping_and_warm_flattened_async(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_error_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -2004,10 +2142,17 @@ async def test_ping_and_warm_flattened_error_async(): ) -@pytest.mark.parametrize("request_type", [bigtable.ReadModifyWriteRowRequest, dict,]) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadModifyWriteRowRequest, + dict, + ], +) def test_read_modify_write_row(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2035,7 +2180,8 @@ def test_read_modify_write_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2053,7 +2199,8 @@ async def test_read_modify_write_row_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, @@ -2085,7 +2232,9 @@ async def test_read_modify_write_row_async_from_dict(): def test_read_modify_write_row_routing_parameters(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. @@ -2130,7 +2279,9 @@ def test_read_modify_write_row_routing_parameters(): def test_read_modify_write_row_flattened(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2166,7 +2317,9 @@ def test_read_modify_write_row_flattened(): def test_read_modify_write_row_flattened_error(): - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -2182,7 +2335,9 @@ def test_read_modify_write_row_flattened_error(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2223,7 +2378,9 @@ async def test_read_modify_write_row_flattened_async(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_error_async(): - client = BigtableAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) # Attempting to call a method with both a request object and flattened # fields is an error. @@ -2244,7 +2401,8 @@ def test_credentials_transport_error(): ) with pytest.raises(ValueError): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport, + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # It is an error to provide a credentials file and a transport instance. @@ -2264,7 +2422,10 @@ def test_credentials_transport_error(): options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): - client = BigtableClient(client_options=options, transport=transport,) + client = BigtableClient( + client_options=options, + transport=transport, + ) # It is an error to provide an api_key and a credential. options = mock.Mock() @@ -2280,7 +2441,8 @@ def test_credentials_transport_error(): ) with pytest.raises(ValueError): client = BigtableClient( - client_options={"scopes": ["1", "2"]}, transport=transport, + client_options={"scopes": ["1", "2"]}, + transport=transport, ) @@ -2310,7 +2472,10 @@ def test_transport_get_channel(): @pytest.mark.parametrize( "transport_class", - [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + ], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. @@ -2322,8 +2487,13 @@ def test_transport_adc(transport_class): def test_transport_grpc_default(): # A client should use the gRPC transport by default. - client = BigtableClient(credentials=ga_credentials.AnonymousCredentials(),) - assert isinstance(client.transport, transports.BigtableGrpcTransport,) + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.BigtableGrpcTransport, + ) def test_bigtable_base_transport_error(): @@ -2374,7 +2544,8 @@ def test_bigtable_base_transport_with_credentials_file(): Transport.return_value = None load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) transport = transports.BigtableTransport( - credentials_file="credentials.json", quota_project_id="octopus", + credentials_file="credentials.json", + quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", @@ -2423,7 +2594,10 @@ def test_bigtable_auth_adc(): @pytest.mark.parametrize( "transport_class", - [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport,], + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + ], ) def test_bigtable_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use @@ -2554,7 +2728,8 @@ def test_bigtable_grpc_transport_channel(): # Check that channel is used if provided. transport = transports.BigtableGrpcTransport( - host="squid.clam.whelk", channel=channel, + host="squid.clam.whelk", + channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" @@ -2566,7 +2741,8 @@ def test_bigtable_grpc_asyncio_transport_channel(): # Check that channel is used if provided. transport = transports.BigtableGrpcAsyncIOTransport( - host="squid.clam.whelk", channel=channel, + host="squid.clam.whelk", + channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" @@ -2669,7 +2845,8 @@ def test_instance_path(): project = "squid" instance = "clam" expected = "projects/{project}/instances/{instance}".format( - project=project, instance=instance, + project=project, + instance=instance, ) actual = BigtableClient.instance_path(project, instance) assert expected == actual @@ -2692,7 +2869,9 @@ def test_table_path(): instance = "nudibranch" table = "cuttlefish" expected = "projects/{project}/instances/{instance}/tables/{table}".format( - project=project, instance=instance, table=table, + project=project, + instance=instance, + table=table, ) actual = BigtableClient.table_path(project, instance, table) assert expected == actual @@ -2733,7 +2912,9 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): folder = "squid" - expected = "folders/{folder}".format(folder=folder,) + expected = "folders/{folder}".format( + folder=folder, + ) actual = BigtableClient.common_folder_path(folder) assert expected == actual @@ -2751,7 +2932,9 @@ def test_parse_common_folder_path(): def test_common_organization_path(): organization = "whelk" - expected = "organizations/{organization}".format(organization=organization,) + expected = "organizations/{organization}".format( + organization=organization, + ) actual = BigtableClient.common_organization_path(organization) assert expected == actual @@ -2769,7 +2952,9 @@ def test_parse_common_organization_path(): def test_common_project_path(): project = "oyster" - expected = "projects/{project}".format(project=project,) + expected = "projects/{project}".format( + project=project, + ) actual = BigtableClient.common_project_path(project) assert expected == actual @@ -2789,7 +2974,8 @@ def test_common_location_path(): project = "cuttlefish" location = "mussel" expected = "projects/{project}/locations/{location}".format( - project=project, location=location, + project=project, + location=location, ) actual = BigtableClient.common_location_path(project, location) assert expected == actual @@ -2814,7 +3000,8 @@ def test_client_with_default_client_info(): transports.BigtableTransport, "_prep_wrapped_messages" ) as prep: client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -2823,7 +3010,8 @@ def test_client_with_default_client_info(): ) as prep: transport_class = BigtableClient.get_transport_class() transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -2831,7 +3019,8 @@ def test_client_with_default_client_info(): @pytest.mark.asyncio async def test_transport_close_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) with mock.patch.object( type(getattr(client.transport, "grpc_channel")), "close" diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py index 07c686fb884e..d45c9ca0aeea 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -70,7 +70,10 @@ def test_app_profile_constructor_explicit(): instance = _Instance(INSTANCE_ID, client) app_profile1 = _make_app_profile( - APP_PROFILE_ID, instance, routing_policy_type=ANY, description=DESCRIPTION_1, + APP_PROFILE_ID, + instance, + routing_policy_type=ANY, + description=DESCRIPTION_1, ) app_profile2 = _make_app_profile( APP_PROFILE_ID_2, @@ -176,7 +179,8 @@ def test_app_profile_from_pb_success_w_routing_single(): allow_transactional_writes = True routing = RoutingPolicyType.SINGLE single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( - cluster_id=CLUSTER_ID, allow_transactional_writes=allow_transactional_writes, + cluster_id=CLUSTER_ID, + allow_transactional_writes=allow_transactional_writes, ) app_profile_pb = data_v2_pb2.AppProfile( @@ -253,7 +257,10 @@ def test_app_profile_reload_w_routing_any(): description = "routing policy any" app_profile = _make_app_profile( - APP_PROFILE_ID, instance, routing_policy_type=routing, description=description, + APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, ) # Create response_pb @@ -345,7 +352,10 @@ def test_app_profile_create_w_routing_any(): ignore_warnings = True app_profile = _make_app_profile( - APP_PROFILE_ID, instance, routing_policy_type=routing, description=description, + APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, ) expected_request_app_profile = app_profile._to_pb() diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/test_backup.py index 92e9d7307e7a..9882ca339c3b 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/test_backup.py @@ -217,7 +217,10 @@ def test_backup_cluster_setter(): def test_backup_parent_none(): - backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME),) + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME), + ) assert backup.parent is None @@ -361,7 +364,8 @@ def test_backup_create_w_grpc_error(): ) backup_pb = table.Backup( - source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + source_table=TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), ) with pytest.raises(GoogleAPICallError): @@ -390,7 +394,8 @@ def test_backup_create_w_already_exists(): ) backup_pb = table.Backup( - source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + source_table=TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), ) with pytest.raises(Conflict): @@ -419,7 +424,8 @@ def test_backup_create_w_instance_not_found(): ) backup_pb = table.Backup( - source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + source_table=TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), ) with pytest.raises(NotFound): @@ -444,7 +450,9 @@ def test_backup_create_w_cluster_not_set(): def test_backup_create_w_table_not_set(): backup = _make_backup( - BACKUP_ID, _Instance(INSTANCE_NAME), expire_time=_make_timestamp(), + BACKUP_ID, + _Instance(INSTANCE_NAME), + expire_time=_make_timestamp(), ) with pytest.raises(ValueError): @@ -452,7 +460,11 @@ def test_backup_create_w_table_not_set(): def test_backup_create_w_expire_time_not_set(): - backup = _make_backup(BACKUP_ID, _Instance(INSTANCE_NAME), table_id=TABLE_ID,) + backup = _make_backup( + BACKUP_ID, + _Instance(INSTANCE_NAME), + table_id=TABLE_ID, + ) with pytest.raises(ValueError): backup.create(CLUSTER_ID) @@ -478,7 +490,8 @@ def test_backup_create_success(): ) backup_pb = table.Backup( - source_table=TABLE_NAME, expire_time=_datetime_to_pb_timestamp(timestamp), + source_table=TABLE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), ) future = backup.create(CLUSTER_ID) @@ -657,7 +670,8 @@ def test_backup_update_expire_time_w_grpc_error(): backup.update_expire_time(expire_time) backup_update = table.Backup( - name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + name=BACKUP_NAME, + expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( @@ -682,7 +696,8 @@ def test_backup_update_expire_time_w_not_found(): backup.update_expire_time(expire_time) backup_update = table.Backup( - name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + name=BACKUP_NAME, + expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( @@ -705,7 +720,8 @@ def test_backup_update_expire_time_success(): backup.update_expire_time(expire_time) backup_update = table.Backup( - name=BACKUP_NAME, expire_time=_datetime_to_pb_timestamp(expire_time), + name=BACKUP_NAME, + expire_time=_datetime_to_pb_timestamp(expire_time), ) update_mask = field_mask_pb2.FieldMask(paths=["expire_time"]) api.update_backup.assert_called_once_with( diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/test_client.py index 00f8524bc894..5944c58a3701 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/test_client.py @@ -162,7 +162,10 @@ def test_client_constructor_w_both_admin_and_read_only(): credentials = _make_credentials() with pytest.raises(ValueError): _make_client( - project=PROJECT, credentials=credentials, admin=True, read_only=True, + project=PROJECT, + credentials=credentials, + admin=True, + read_only=True, ) @@ -275,7 +278,9 @@ def test_client__emulator_channel_w_sync(): assert channel is patched.return_value patched.assert_called_once_with( - emulator_host, lcc.return_value, options=options, + emulator_host, + lcc.return_value, + options=options, ) @@ -295,7 +300,9 @@ def test_client__emulator_channel_w_async(): assert channel is patched.return_value patched.assert_called_once_with( - emulator_host, lcc.return_value, options=options, + emulator_host, + lcc.return_value, + options=options, ) @@ -328,7 +335,8 @@ def test_client__local_composite_credentials(): wsir_patched.assert_called_once_with(client._credentials, None) request_patched.assert_called_once_with() amp_patched.assert_called_once_with( - wsir_patched.return_value, request_patched.return_value, + wsir_patched.return_value, + request_patched.return_value, ) grpc_mcc.assert_called_once_with(amp_patched.return_value) grpc_lcc.assert_called_once_with() @@ -344,7 +352,8 @@ def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None): if endpoint is not None: client._client_options = mock.Mock( - spec=["api_endpoint"], api_endpoint=endpoint, + spec=["api_endpoint"], + api_endpoint=endpoint, ) expected_host = endpoint else: @@ -363,10 +372,12 @@ def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None): if emulator_host is not None: client._emulator_channel.assert_called_once_with( - transport=grpc_transport, options=_GRPC_CHANNEL_OPTIONS, + transport=grpc_transport, + options=_GRPC_CHANNEL_OPTIONS, ) grpc_transport.assert_called_once_with( - channel=client._emulator_channel.return_value, host=expected_host, + channel=client._emulator_channel.return_value, + host=expected_host, ) else: grpc_transport.create_channel.assert_called_once_with( @@ -375,7 +386,8 @@ def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None): options=_GRPC_CHANNEL_OPTIONS, ) grpc_transport.assert_called_once_with( - channel=grpc_transport.create_channel.return_value, host=expected_host, + channel=grpc_transport.create_channel.return_value, + host=expected_host, ) @@ -490,7 +502,10 @@ def test_client_table_admin_client_not_initialized_w_client_info(): credentials = _make_credentials() client_info = mock.Mock() client = _make_client( - project=PROJECT, credentials=credentials, admin=True, client_info=client_info, + project=PROJECT, + credentials=credentials, + admin=True, + client_info=client_info, ) table_admin_client = client.table_admin_client @@ -557,7 +572,10 @@ def test_client_instance_admin_client_not_initialized_w_client_info(): credentials = _make_credentials() client_info = mock.Mock() client = _make_client( - project=PROJECT, credentials=credentials, admin=True, client_info=client_info, + project=PROJECT, + credentials=credentials, + admin=True, + client_info=client_info, ) instance_admin_client = client.instance_admin_client diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index 56c0a3cc57ea..cb0312b0c079 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -217,7 +217,8 @@ def test_cluster_from_pb_w_autoscaling(): cluster_config = data_v2_pb2.Cluster.ClusterConfig( cluster_autoscaling_config=data_v2_pb2.Cluster.ClusterAutoscalingConfig( autoscaling_limits=data_v2_pb2.AutoscalingLimits( - min_serve_nodes=MIN_SERVE_NODES, max_serve_nodes=MAX_SERVE_NODES, + min_serve_nodes=MIN_SERVE_NODES, + max_serve_nodes=MAX_SERVE_NODES, ), autoscaling_targets=data_v2_pb2.AutoscalingTargets( cpu_utilization_percent=CPU_UTILIZATION_PERCENT @@ -569,7 +570,8 @@ def test_cluster_create_w_autoscaling(): cluster_config = instance_v2_pb2.Cluster.ClusterConfig( cluster_autoscaling_config=instance_v2_pb2.Cluster.ClusterAutoscalingConfig( autoscaling_limits=instance_v2_pb2.AutoscalingLimits( - min_serve_nodes=MIN_SERVE_NODES, max_serve_nodes=MAX_SERVE_NODES, + min_serve_nodes=MIN_SERVE_NODES, + max_serve_nodes=MAX_SERVE_NODES, ), autoscaling_targets=instance_v2_pb2.AutoscalingTargets( cpu_utilization_percent=CPU_UTILIZATION_PERCENT diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index 9d4632e2a632..b464024a740e 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -265,7 +265,7 @@ def _make_client(*args, **kwargs): def test_column_family_constructor(): - column_family_id = u"column-family-id" + column_family_id = "column-family-id" table = object() gc_rule = object() column_family = _make_column_family(column_family_id, table, gc_rule=gc_rule) @@ -276,7 +276,7 @@ def test_column_family_constructor(): def test_column_family_name_property(): - column_family_id = u"column-family-id" + column_family_id = "column-family-id" table_name = "table_name" table = _Table(table_name) column_family = _make_column_family(column_family_id, table) diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index def7e3e38df1..c577adca5895 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -110,7 +110,10 @@ def test_instance__update_from_pb_success(): state = enums.Instance.State.READY # todo type to type_? instance_pb = data_v2_pb2.Instance( - display_name=DISPLAY_NAME, type_=instance_type, labels=LABELS, state=state, + display_name=DISPLAY_NAME, + type_=instance_type, + labels=LABELS, + state=state, ) instance = _make_instance(None, None) @@ -309,7 +312,11 @@ def test_instance_create(): credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) instance = _make_instance( - INSTANCE_ID, client, DISPLAY_NAME, enums.Instance.Type.PRODUCTION, LABELS, + INSTANCE_ID, + client, + DISPLAY_NAME, + enums.Instance.Type.PRODUCTION, + LABELS, ) api, response = _instance_api_response_for_create() client._instance_admin_client = api @@ -327,7 +334,9 @@ def test_instance_create(): default_storage_type=enums.StorageType.UNSPECIFIED, ) instance_pb = Instance( - display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS, + display_name=DISPLAY_NAME, + type_=enums.Instance.Type.PRODUCTION, + labels=LABELS, ) cluster_id = "{}-cluster".format(INSTANCE_ID) api.create_instance.assert_called_once_with( @@ -352,7 +361,11 @@ def test_instance_create_w_clusters(): credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) instance = _make_instance( - INSTANCE_ID, client, DISPLAY_NAME, enums.Instance.Type.PRODUCTION, LABELS, + INSTANCE_ID, + client, + DISPLAY_NAME, + enums.Instance.Type.PRODUCTION, + LABELS, ) api, response = _instance_api_response_for_create() client._instance_admin_client = api @@ -393,7 +406,9 @@ def test_instance_create_w_clusters(): default_storage_type=enums.StorageType.UNSPECIFIED, ) instance_pb = instance_pb( - display_name=DISPLAY_NAME, type_=enums.Instance.Type.PRODUCTION, labels=LABELS, + display_name=DISPLAY_NAME, + type_=enums.Instance.Type.PRODUCTION, + labels=LABELS, ) api.create_instance.assert_called_once_with( request={ @@ -460,7 +475,7 @@ def test_instance_reload(): from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 from google.cloud.bigtable import enums - DISPLAY_NAME = u"hey-hi-hello" + DISPLAY_NAME = "hey-hi-hello" credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) instance = _make_instance(INSTANCE_ID, client) diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/test_policy.py index 1b1adbed567f..77674517e0d8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_policy.py +++ b/packages/google-cloud-bigtable/tests/unit/test_policy.py @@ -150,7 +150,11 @@ def test_policy_from_pb_w_condition(): }, } ] - message = policy_pb2.Policy(etag=ETAG, version=VERSION, bindings=BINDINGS,) + message = policy_pb2.Policy( + etag=ETAG, + version=VERSION, + bindings=BINDINGS, + ) policy = Policy.from_pb(message) assert policy.etag == ETAG assert policy.version == VERSION @@ -219,7 +223,9 @@ def test_policy_to_pb_w_condition(): version=VERSION, bindings=[ policy_pb2.Binding( - role=BIGTABLE_ADMIN_ROLE, members=sorted(members), condition=condition, + role=BIGTABLE_ADMIN_ROLE, + members=sorted(members), + condition=condition, ) ], ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/test_row.py index 77475631491e..49bbfc45ce79 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row.py @@ -70,7 +70,7 @@ def test_direct_row_constructor(): def test_direct_row_constructor_with_unicode(): - row_key = u"row_key" + row_key = "row_key" row_key_bytes = b"row_key" table = object() @@ -97,8 +97,8 @@ def test_direct_row_get_mutations_size(): row_key = b"row_key" row = _make_direct_row(row_key, None) - column_family_id1 = u"column_family_id1" - column_family_id2 = u"column_family_id2" + column_family_id1 = "column_family_id1" + column_family_id2 = "column_family_id2" column1 = b"column1" column2 = b"column2" number_of_bytes = 1 * 1024 * 1024 @@ -124,7 +124,7 @@ def _set_cell_helper( import struct row_key = b"row_key" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" if column is None: column = b"column" table = object() @@ -151,7 +151,7 @@ def test_direct_row_set_cell(): def test_direct_row_set_cell_with_string_column(): column_bytes = b"column" - column_non_bytes = u"column" + column_non_bytes = "column" _set_cell_helper(column=column_non_bytes, column_bytes=column_bytes) @@ -163,7 +163,7 @@ def test_direct_row_set_cell_with_integer_value(): def test_direct_row_set_cell_with_non_bytes_value(): row_key = b"row_key" column = b"column" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" table = object() row = _make_direct_row(row_key, table) @@ -208,7 +208,7 @@ def _delete_cells(self, *args, **kwargs): row_key = b"row_key" column = b"column" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" table = object() mock_row = MockRow(row_key, table) @@ -227,7 +227,7 @@ def _delete_cells(self, *args, **kwargs): def test_direct_row_delete_cells_non_iterable(): row_key = b"row_key" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" table = object() row = _make_direct_row(row_key, table) @@ -240,7 +240,7 @@ def test_direct_row_delete_cells_all_columns(): from google.cloud.bigtable.row import DirectRow row_key = b"row_key" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" table = object() row = _make_direct_row(row_key, table) @@ -255,7 +255,7 @@ def test_direct_row_delete_cells_all_columns(): def test_direct_row_delete_cells_no_columns(): row_key = b"row_key" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" table = object() row = _make_direct_row(row_key, table) @@ -268,7 +268,7 @@ def test_direct_row_delete_cells_no_columns(): def _delete_cells_helper(time_range=None): row_key = b"row_key" column = b"column" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" table = object() row = _make_direct_row(row_key, table) @@ -306,7 +306,7 @@ def test_direct_row_delete_cells_with_bad_column(): # the row's mutations in a bad state. row_key = b"row_key" column = b"column" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" table = object() row = _make_direct_row(row_key, table) @@ -319,10 +319,10 @@ def test_direct_row_delete_cells_with_bad_column(): def test_direct_row_delete_cells_with_string_columns(): row_key = b"row_key" - column_family_id = u"column_family_id" - column1 = u"column1" + column_family_id = "column_family_id" + column1 = "column1" column1_bytes = b"column1" - column2 = u"column2" + column2 = "column2" column2_bytes = b"column2" table = object() @@ -348,7 +348,7 @@ def test_direct_row_commit(): project_id = "project-id" row_key = b"row_key" table_name = "projects/more-stuff" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" column = b"column" credentials = _make_credentials() @@ -369,7 +369,7 @@ def test_direct_row_commit_with_exception(): project_id = "project-id" row_key = b"row_key" table_name = "projects/more-stuff" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" column = b"column" credentials = _make_credentials() @@ -424,9 +424,9 @@ def test_conditional_row_commit(): row_key = b"row_key" table_name = "projects/more-stuff" app_profile_id = "app_profile_id" - column_family_id1 = u"column_family_id1" - column_family_id2 = u"column_family_id2" - column_family_id3 = u"column_family_id3" + column_family_id1 = "column_family_id1" + column_family_id2 = "column_family_id2" + column_family_id3 = "column_family_id3" column1 = b"column1" column2 = b"column2" @@ -535,7 +535,7 @@ def test_append_row_append_cell_value(): assert row._rule_pb_list == [] column = b"column" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" value = b"bytes-val" row.append_cell_value(column_family_id, column, value) expected_pb = _ReadModifyWriteRulePB( @@ -551,7 +551,7 @@ def test_append_row_increment_cell_value(): assert row._rule_pb_list == [] column = b"column" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" int_value = 281330 row.increment_cell_value(column_family_id, column, int_value) expected_pb = _ReadModifyWriteRulePB( @@ -571,7 +571,7 @@ def test_append_row_commit(): row_key = b"row_key" table_name = "projects/more-stuff" app_profile_id = "app_profile_id" - column_family_id = u"column_family_id" + column_family_id = "column_family_id" column = b"column" api = mock.create_autospec(BigtableClient) @@ -646,8 +646,8 @@ def test__parse_rmw_row_response(): from google.cloud._helpers import _datetime_from_microseconds from google.cloud.bigtable.row import _parse_rmw_row_response - col_fam1 = u"col-fam-id" - col_fam2 = u"col-fam-id2" + col_fam1 = "col-fam-id" + col_fam2 = "col-fam-id2" col_name1 = b"col-name1" col_name2 = b"col-name2" col_name3 = b"col-name3-but-other-fam" @@ -702,7 +702,7 @@ def test__parse_family_pb(): from google.cloud._helpers import _datetime_from_microseconds from google.cloud.bigtable.row import _parse_family_pb - col_fam1 = u"col-fam-id" + col_fam1 = "col-fam-id" col_name1 = b"col-name1" col_name2 = b"col-name2" cell_val1 = b"cell-val" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 60a138800209..d647bbaba0e4 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -22,7 +22,7 @@ TIMESTAMP_MICROS = 18738724000 # Make sure millis granularity ROW_KEY = b"row-key" -FAMILY_NAME = u"family" +FAMILY_NAME = "family" QUALIFIER = b"qualifier" TIMESTAMP_MICROS = 100 VALUE = b"value" @@ -64,7 +64,7 @@ def test_cell_from_pb(): def test_cell_from_pb_with_labels(): - labels = [u"label1", u"label2"] + labels = ["label1", "label2"] _cell_from_pb_test_helper(labels) @@ -156,8 +156,8 @@ def test_partial_row_data_to_dict(): cell2 = object() cell3 = object() - family_name1 = u"name1" - family_name2 = u"name2" + family_name1 = "name1" + family_name2 = "name2" qual1 = b"col1" qual2 = b"col2" qual3 = b"col3" @@ -178,7 +178,7 @@ def test_partial_row_data_to_dict(): def test_partial_row_data_cell_value(): - family_name = u"name1" + family_name = "name1" qualifier = b"col1" cell = _make_cell_pb(b"value-bytes") @@ -190,7 +190,7 @@ def test_partial_row_data_cell_value(): def test_partial_row_data_cell_value_invalid_index(): - family_name = u"name1" + family_name = "name1" qualifier = b"col1" cell = _make_cell_pb(b"") @@ -202,7 +202,7 @@ def test_partial_row_data_cell_value_invalid_index(): def test_partial_row_data_cell_value_invalid_column_family_key(): - family_name = u"name1" + family_name = "name1" qualifier = b"col1" partial_row_data = _make_partial_row_data(None) @@ -212,7 +212,7 @@ def test_partial_row_data_cell_value_invalid_column_family_key(): def test_partial_row_data_cell_value_invalid_column_key(): - family_name = u"name1" + family_name = "name1" qualifier = b"col1" partial_row_data = _make_partial_row_data(None) @@ -223,7 +223,7 @@ def test_partial_row_data_cell_value_invalid_column_key(): def test_partial_row_data_cell_values(): - family_name = u"name1" + family_name = "name1" qualifier = b"col1" cell = _make_cell_pb(b"value-bytes") @@ -238,7 +238,7 @@ def test_partial_row_data_cell_values(): def test_partial_row_data_cell_values_with_max_count(): - family_name = u"name1" + family_name = "name1" qualifier = b"col1" cell_1 = _make_cell_pb(b"value-bytes-1") cell_2 = _make_cell_pb(b"value-bytes-2") @@ -574,7 +574,7 @@ def test_partial_rows_data__copy_from_previous_unset(): cell = _PartialCellData() yrd._copy_from_previous(cell) assert cell.row_key == b"" - assert cell.family_name == u"" + assert cell.family_name == "" assert cell.qualifier is None assert cell.timestamp_micros == 0 assert cell.labels == [] @@ -582,7 +582,7 @@ def test_partial_rows_data__copy_from_previous_unset(): def test_partial_rows_data__copy_from_previous_blank(): ROW_KEY = "RK" - FAMILY_NAME = u"A" + FAMILY_NAME = "A" QUALIFIER = b"C" TIMESTAMP_MICROS = 100 LABELS = ["L1", "L2"] @@ -610,7 +610,7 @@ def test_partial_rows_data__copy_from_previous_filled(): from google.cloud.bigtable_v2.services.bigtable import BigtableClient ROW_KEY = "RK" - FAMILY_NAME = u"A" + FAMILY_NAME = "A" QUALIFIER = b"C" TIMESTAMP_MICROS = 100 LABELS = ["L1", "L2"] @@ -1324,13 +1324,13 @@ def _flatten_cells(prd): for qualifier, column in family.items(): for cell in column: yield { - u"rk": _bytes_to_unicode(row_key), - u"fm": family_name, - u"qual": _bytes_to_unicode(qualifier), - u"ts": _microseconds_from_datetime(cell.timestamp), - u"value": _bytes_to_unicode(cell.value), - u"label": u" ".join(cell.labels), - u"error": False, + "rk": _bytes_to_unicode(row_key), + "fm": family_name, + "qual": _bytes_to_unicode(qualifier), + "ts": _microseconds_from_datetime(cell.timestamp), + "value": _bytes_to_unicode(cell.value), + "label": " ".join(cell.labels), + "error": False, } @@ -1363,7 +1363,7 @@ def next(self): class _PartialCellData(object): row_key = b"" - family_name = u"" + family_name = "" qualifier = None timestamp_micros = 0 last_scanned_row_key = "" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py index 8c591e03cf33..b312cb942fdd 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_filters.py @@ -92,7 +92,7 @@ def test_regex_filterconstructor(): def test_regex_filterconstructor_non_bytes(): from google.cloud.bigtable.row_filters import _RegexFilter - regex = u"abc" + regex = "abc" row_filter = _RegexFilter(regex) assert row_filter.regex == b"abc" @@ -183,7 +183,7 @@ def test_row_sample_filter_to_pb(): def test_family_name_regex_filter_to_pb(): from google.cloud.bigtable.row_filters import FamilyNameRegexFilter - regex = u"family-regex" + regex = "family-regex" row_filter = FamilyNameRegexFilter(regex) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(family_name_regex_filter=regex) @@ -450,7 +450,7 @@ def test_column_range_filter___ne__(): def test_column_range_filter_to_pb(): from google.cloud.bigtable.row_filters import ColumnRangeFilter - column_family_id = u"column-family-id" + column_family_id = "column-family-id" row_filter = ColumnRangeFilter(column_family_id) col_range_pb = _ColumnRangePB(family_name=column_family_id) expected_pb = _RowFilterPB(column_range_filter=col_range_pb) @@ -460,7 +460,7 @@ def test_column_range_filter_to_pb(): def test_column_range_filter_to_pb_inclusive_start(): from google.cloud.bigtable.row_filters import ColumnRangeFilter - column_family_id = u"column-family-id" + column_family_id = "column-family-id" column = b"column" row_filter = ColumnRangeFilter(column_family_id, start_column=column) col_range_pb = _ColumnRangePB( @@ -473,7 +473,7 @@ def test_column_range_filter_to_pb_inclusive_start(): def test_column_range_filter_to_pb_exclusive_start(): from google.cloud.bigtable.row_filters import ColumnRangeFilter - column_family_id = u"column-family-id" + column_family_id = "column-family-id" column = b"column" row_filter = ColumnRangeFilter( column_family_id, start_column=column, inclusive_start=False @@ -488,7 +488,7 @@ def test_column_range_filter_to_pb_exclusive_start(): def test_column_range_filter_to_pb_inclusive_end(): from google.cloud.bigtable.row_filters import ColumnRangeFilter - column_family_id = u"column-family-id" + column_family_id = "column-family-id" column = b"column" row_filter = ColumnRangeFilter(column_family_id, end_column=column) col_range_pb = _ColumnRangePB( @@ -501,7 +501,7 @@ def test_column_range_filter_to_pb_inclusive_end(): def test_column_range_filter_to_pb_exclusive_end(): from google.cloud.bigtable.row_filters import ColumnRangeFilter - column_family_id = u"column-family-id" + column_family_id = "column-family-id" column = b"column" row_filter = ColumnRangeFilter( column_family_id, end_column=column, inclusive_end=False @@ -526,7 +526,7 @@ def test_value_regex_filter_to_pb_w_bytes(): def test_value_regex_filter_to_pb_w_str(): from google.cloud.bigtable.row_filters import ValueRegexFilter - value = u"value-regex" + value = "value-regex" regex = value.encode("ascii") row_filter = ValueRegexFilter(value) pb_val = row_filter.to_pb() @@ -547,7 +547,7 @@ def test_exact_value_filter_to_pb_w_bytes(): def test_exact_value_filter_to_pb_w_str(): from google.cloud.bigtable.row_filters import ExactValueFilter - value = u"value-regex" + value = "value-regex" regex = value.encode("ascii") row_filter = ExactValueFilter(value) pb_val = row_filter.to_pb() @@ -846,7 +846,7 @@ def test_apply_label_filter___ne__(): def test_apply_label_filter_to_pb(): from google.cloud.bigtable.row_filters import ApplyLabelFilter - label = u"label" + label = "label" row_filter = ApplyLabelFilter(label) pb_val = row_filter.to_pb() expected_pb = _RowFilterPB(apply_label_transformer=label) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index eacde3c3e633..fb4ec3539445 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -197,7 +197,9 @@ def test_table_name(): spec=["project", "table_data_client"], ) instance = mock.Mock( - _client=client, instance_id=INSTANCE_ID, spec=["_client", "instance_id"], + _client=client, + instance_id=INSTANCE_ID, + spec=["_client", "instance_id"], ) table = _make_table(TABLE_ID, instance) @@ -964,7 +966,9 @@ def test_table_read_retry_rows(): assert result.row_key == ROW_KEY_2 expected_request = _create_row_request( - table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2, + table.name, + start_key=ROW_KEY_1, + end_key=ROW_KEY_2, ) data_api.read_rows.mock_calls = [expected_request] * 3 @@ -1024,7 +1028,9 @@ def test_table_yield_retry_rows(): assert result.row_key == ROW_KEY_2 expected_request = _create_row_request( - table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2, + table.name, + start_key=ROW_KEY_1, + end_key=ROW_KEY_2, ) data_api.read_rows.mock_calls = [expected_request] * 3 @@ -1095,7 +1101,9 @@ def test_table_yield_rows_with_row_set(): assert rows[2].row_key == ROW_KEY_3 expected_request = _create_row_request( - table.name, start_key=ROW_KEY_1, end_key=ROW_KEY_2, + table.name, + start_key=ROW_KEY_1, + end_key=ROW_KEY_2, ) expected_request.rows.row_keys.append(ROW_KEY_3) data_api.read_rows.assert_called_once_with(expected_request, timeout=61.0) @@ -1146,7 +1154,8 @@ def test_table_truncate_w_timeout(): assert result is None table_api.drop_row_range.assert_called_once_with( - request={"name": TABLE_NAME, "delete_all_data_from_table": True}, timeout=120, + request={"name": TABLE_NAME, "delete_all_data_from_table": True}, + timeout=120, ) @@ -1183,7 +1192,8 @@ def test_table_drop_by_prefix_w_timeout(): assert result is None table_api.drop_row_range.assert_called_once_with( - request={"name": TABLE_NAME, "row_key_prefix": row_key_prefix}, timeout=120, + request={"name": TABLE_NAME, "row_key_prefix": row_key_prefix}, + timeout=120, ) @@ -1326,7 +1336,11 @@ def test_table_backup_factory_non_defaults(): instance = Instance(INSTANCE_ID, None) table = _make_table(TABLE_ID, instance) timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) - backup = table.backup(BACKUP_ID, cluster_id=CLUSTER_ID, expire_time=timestamp,) + backup = table.backup( + BACKUP_ID, + cluster_id=CLUSTER_ID, + expire_time=timestamp, + ) assert isinstance(backup, Backup) assert backup.backup_id == BACKUP_ID @@ -1624,7 +1638,8 @@ def _do_mutate_retryable_rows_helper( if prior_status is None or prior_status.code in RETRYABLES: mutations = row._get_mutations().copy() # row clears on success entry = data_messages_v2_pb2.MutateRowsRequest.Entry( - row_key=row.row_key, mutations=mutations, + row_key=row.row_key, + mutations=mutations, ) expected_entries.append(entry) @@ -1698,7 +1713,9 @@ def test_rmrw_do_mutate_retryable_rows_w_timeout(): timeout = 5 # seconds _do_mutate_retryable_rows_helper( - row_cells, responses, timeout=timeout, + row_cells, + responses, + timeout=timeout, ) @@ -1720,7 +1737,9 @@ def test_rmrw_do_mutate_retryable_rows_w_retryable_error(): responses = () _do_mutate_retryable_rows_helper( - row_cells, responses, retryable_error=True, + row_cells, + responses, + retryable_error=True, ) @@ -1744,7 +1763,9 @@ def test_rmrw_do_mutate_retryable_rows_retry(): responses = [SUCCESS, RETRYABLE_1, NON_RETRYABLE] _do_mutate_retryable_rows_helper( - row_cells, responses, raising_retry=True, + row_cells, + responses, + raising_retry=True, ) From c68087e728be21fca0804e28160250c222e24f2e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 30 Mar 2022 16:56:29 +0000 Subject: [PATCH 587/892] chore(python): add E231 to .flake8 ignore list (#546) Source-Link: https://github.com/googleapis/synthtool/commit/7ff4aad2ec5af0380e8bd6da1fa06eaadf24ec81 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:462782b0b492346b2d9099aaff52206dd30bc8e031ea97082e6facecc2373244 --- packages/google-cloud-bigtable/.flake8 | 2 +- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index 29227d4cf419..2e438749863d 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -16,7 +16,7 @@ # Generated by synthtool. DO NOT EDIT! [flake8] -ignore = E203, E266, E501, W503 +ignore = E203, E231, E266, E501, W503 exclude = # Exclude generated code. **/proto/** diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 87dd00611576..9e0a9356b6eb 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:7cffbc10910c3ab1b852c05114a08d374c195a81cdec1d4a67a1d129331d0bfe + digest: sha256:462782b0b492346b2d9099aaff52206dd30bc8e031ea97082e6facecc2373244 From 76853a461ca15fb55d4ccfc5a13bbe7ea6b67a72 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 1 Apr 2022 00:18:27 +0000 Subject: [PATCH 588/892] chore(python): update .pre-commit-config.yaml to use black==22.3.0 (#548) Source-Link: https://github.com/googleapis/synthtool/commit/7804ade3daae0d66649bee8df6c55484c6580b8d Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:eede5672562a32821444a8e803fb984a6f61f2237ea3de229d2de24453f4ae7d --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 3 ++- packages/google-cloud-bigtable/.pre-commit-config.yaml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 9e0a9356b6eb..22cc254afa2c 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:462782b0b492346b2d9099aaff52206dd30bc8e031ea97082e6facecc2373244 + digest: sha256:eede5672562a32821444a8e803fb984a6f61f2237ea3de229d2de24453f4ae7d +# created: 2022-03-30T23:44:26.560599165Z diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 62eb5a77d9a3..46d237160f6d 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/psf/black - rev: 19.10b0 + rev: 22.3.0 hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 From 7667c9cbf150dd09ac6fe0f90b3b094bfe833e27 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 1 Apr 2022 02:06:23 +0000 Subject: [PATCH 589/892] chore(python): Enable size-label bot (#550) Source-Link: https://github.com/googleapis/synthtool/commit/06e82790dd719a165ad32b8a06f8f6ec3e3cae0f Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:b3500c053313dc34e07b1632ba9e4e589f4f77036a7cf39e1fe8906811ae0fce --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.github/auto-label.yaml | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/auto-label.yaml diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 22cc254afa2c..58a0b153bf0e 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:eede5672562a32821444a8e803fb984a6f61f2237ea3de229d2de24453f4ae7d -# created: 2022-03-30T23:44:26.560599165Z + digest: sha256:b3500c053313dc34e07b1632ba9e4e589f4f77036a7cf39e1fe8906811ae0fce +# created: 2022-04-01T01:42:03.609279246Z diff --git a/packages/google-cloud-bigtable/.github/auto-label.yaml b/packages/google-cloud-bigtable/.github/auto-label.yaml new file mode 100644 index 000000000000..09c8d735b456 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/auto-label.yaml @@ -0,0 +1,2 @@ +requestsize: + enabled: true From 507aa1d3003d89919fbc30349fc81fae67a2738e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 1 Apr 2022 19:32:41 +0000 Subject: [PATCH 590/892] chore(python): refactor unit / system test dependency install (#551) Source-Link: https://github.com/googleapis/synthtool/commit/993985f0fc4b37152e588f0549bcbdaf34666023 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:1894490910e891a385484514b22eb5133578897eb5b3c380e6d8ad475c6647cd --- .../.github/.OwlBot.lock.yaml | 4 +- packages/google-cloud-bigtable/noxfile.py | 105 ++++++++++++++---- 2 files changed, 87 insertions(+), 22 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 58a0b153bf0e..fa5762290c5b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:b3500c053313dc34e07b1632ba9e4e589f4f77036a7cf39e1fe8906811ae0fce -# created: 2022-04-01T01:42:03.609279246Z + digest: sha256:1894490910e891a385484514b22eb5133578897eb5b3c380e6d8ad475c6647cd +# created: 2022-04-01T15:48:07.524222836Z diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 73ebd799cca5..9d1df22aef5e 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -20,16 +20,40 @@ import os import pathlib import shutil +import warnings import nox - BLACK_VERSION = "black==22.3.0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] + UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +UNIT_TEST_STANDARD_DEPENDENCIES = [ + "mock", + "asyncmock", + "pytest", + "pytest-cov", + "pytest-asyncio", +] +UNIT_TEST_EXTERNAL_DEPENDENCIES = [] +UNIT_TEST_LOCAL_DEPENDENCIES = [] +UNIT_TEST_DEPENDENCIES = [] +UNIT_TEST_EXTRAS = [] +UNIT_TEST_EXTRAS_BY_PYTHON = {} + +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +SYSTEM_TEST_STANDARD_DEPENDENCIES = [ + "mock", + "pytest", + "google-cloud-testutils", +] +SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [] +SYSTEM_TEST_LOCAL_DEPENDENCIES = [] +SYSTEM_TEST_DEPENDENCIES = [] +SYSTEM_TEST_EXTRAS = [] +SYSTEM_TEST_EXTRAS_BY_PYTHON = {} CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() @@ -93,23 +117,41 @@ def lint_setup_py(session): session.run("python", "setup.py", "check", "--restructuredtext", "--strict") +def install_unittest_dependencies(session, *constraints): + standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES + session.install(*standard_deps, *constraints) + + if UNIT_TEST_EXTERNAL_DEPENDENCIES: + warnings.warn( + "'unit_test_external_dependencies' is deprecated. Instead, please " + "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.", + DeprecationWarning, + ) + session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_LOCAL_DEPENDENCIES: + session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_EXTRAS_BY_PYTHON: + extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif UNIT_TEST_EXTRAS: + extras = UNIT_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + def default(session): # Install all test dependencies, then install this package in-place. constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) - session.install( - "mock", - "asyncmock", - "pytest", - "pytest-cov", - "pytest-asyncio", - "-c", - constraints_path, - ) - - session.install("-e", ".", "-c", constraints_path) + install_unittest_dependencies(session, "-c", constraints_path) # Run py.test against the unit tests. session.run( @@ -133,6 +175,35 @@ def unit(session): default(session) +def install_systemtest_dependencies(session, *constraints): + + # Use pre-release gRPC for system tests. + session.install("--pre", "grpcio") + + session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTERNAL_DEPENDENCIES: + session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_LOCAL_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTRAS_BY_PYTHON: + extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif SYSTEM_TEST_EXTRAS: + extras = SYSTEM_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system_emulated(session): import subprocess @@ -182,13 +253,7 @@ def system(session): if not system_test_exists and not system_test_folder_exists: session.skip("System tests were not found") - # Use pre-release gRPC for system tests. - session.install("--pre", "grpcio") - - # Install all test dependencies, then install this package into the - # virtualenv's dist-packages. - session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) - session.install("-e", ".", "-c", constraints_path) + install_systemtest_dependencies(session, "-c", constraints_path) # Run py.test against the system tests. if system_test_exists: From beec8ecb2c675c0db384b84a6ab4acbe69393de8 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sun, 3 Apr 2022 19:40:13 +0000 Subject: [PATCH 591/892] docs: Update `cpu_utilization_percent` limit (#547) - [ ] Regenerate this pull request now. docs: Remove the limitation that all clusters in a CMEK instance must use the same key PiperOrigin-RevId: 438385300 Source-Link: https://github.com/googleapis/googleapis/commit/c59f02e87d3ce01699ebf2adda7381af7c5eca31 Source-Link: https://github.com/googleapis/googleapis-gen/commit/04d03d17aafa7b4422f73c93600f040542817fcd Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDRkMDNkMTdhYWZhN2I0NDIyZjczYzkzNjAwZjA0MDU0MjgxN2ZjZCJ9 --- .../google/cloud/bigtable_admin_v2/types/instance.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 76733b615107..26c502b049f4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -121,10 +121,10 @@ class AutoscalingTargets(proto.Message): Attributes: cpu_utilization_percent (int): - The cpu utilization that the Autoscaler - should be trying to achieve. This number is on a - scale from 0 (no utilization) to 100 (total - utilization). + The cpu utilization that the Autoscaler should be trying to + achieve. This number is on a scale from 0 (no utilization) + to 100 (total utilization), and is limited between 10 and + 80, otherwise it will return INVALID_ARGUMENT error. """ cpu_utilization_percent = proto.Field( @@ -254,8 +254,6 @@ class EncryptionConfig(proto.Message): key. 2) Only regional keys can be used and the region of the CMEK key must match the region of the cluster. - 3) All clusters within an instance must use the same CMEK - key. """ kms_key_name = proto.Field( From 2269c6b3322c4eab27f31a8ca014f8e8d7986c13 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Mon, 4 Apr 2022 12:32:20 -0400 Subject: [PATCH 592/892] chore: allow releases on previous major versions (#552) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: allow releases on previous major versions * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../google-cloud-bigtable/.github/release-please.yml | 9 +++++++++ packages/google-cloud-bigtable/owlbot.py | 2 ++ 2 files changed, 11 insertions(+) diff --git a/packages/google-cloud-bigtable/.github/release-please.yml b/packages/google-cloud-bigtable/.github/release-please.yml index 466597e5b196..29601ad4692c 100644 --- a/packages/google-cloud-bigtable/.github/release-please.yml +++ b/packages/google-cloud-bigtable/.github/release-please.yml @@ -1,2 +1,11 @@ releaseType: python handleGHRelease: true +# NOTE: this section is generated by synthtool.languages.python +# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py +branches: +- branch: v1 + handleGHRelease: true + releaseType: python +- branch: v0 + handleGHRelease: true + releaseType: python diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 015670675a29..ba43cae949a0 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -219,6 +219,8 @@ def lint_setup_py(session): python.py_samples(skip_readmes=True) +python.configure_previous_major_version_branches() + s.replace( "samples/beam/noxfile.py", """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \( From ee3b0f18b6a3a0beff8961b9620e8001019534e1 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 5 Apr 2022 14:27:12 -0700 Subject: [PATCH 593/892] chore(main): release 2.8.0 (#544) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 14 ++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 45f5756a3d4c..09a408abaf49 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,20 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.8.0](https://github.com/googleapis/python-bigtable/compare/v2.7.1...v2.8.0) (2022-04-04) + + +### Features + +* Add ListHotTablets API method and protobufs ([#542](https://github.com/googleapis/python-bigtable/issues/542)) ([483f139](https://github.com/googleapis/python-bigtable/commit/483f139f5065d55378bd850c33e89db460119fc1)) + + +### Documentation + +* explain mutate vs mutate_rows ([#543](https://github.com/googleapis/python-bigtable/issues/543)) ([84cfb0a](https://github.com/googleapis/python-bigtable/commit/84cfb0abdfabd8aa2f292fc0bb7e6deab50f87f1)) +* Remove the limitation that all clusters in a CMEK instance must use the same key ([f008eea](https://github.com/googleapis/python-bigtable/commit/f008eea69a6c7c1a027cefc7f16d46042b524db1)) +* Update `cpu_utilization_percent` limit ([#547](https://github.com/googleapis/python-bigtable/issues/547)) ([f008eea](https://github.com/googleapis/python-bigtable/commit/f008eea69a6c7c1a027cefc7f16d46042b524db1)) + ### [2.7.1](https://github.com/googleapis/python-bigtable/compare/v2.7.0...v2.7.1) (2022-03-17) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 5869995b28b1..351622914fd6 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.7.1" +version = "2.8.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 9e5ff2da3c149964d2701c74a8bd0f204fe2699e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 6 Apr 2022 10:52:17 +0000 Subject: [PATCH 594/892] chore(python): add license header to auto-label.yaml (#558) Source-Link: https://github.com/googleapis/synthtool/commit/eb78c980b52c7c6746d2edb77d9cf7aaa99a2aab Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:8a5d3f6a2e43ed8293f34e06a2f56931d1e88a2694c3bb11b15df4eb256ad163 --- .../google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- .../google-cloud-bigtable/.github/auto-label.yaml | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index fa5762290c5b..bc893c979e20 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:1894490910e891a385484514b22eb5133578897eb5b3c380e6d8ad475c6647cd -# created: 2022-04-01T15:48:07.524222836Z + digest: sha256:8a5d3f6a2e43ed8293f34e06a2f56931d1e88a2694c3bb11b15df4eb256ad163 +# created: 2022-04-06T10:30:21.687684602Z diff --git a/packages/google-cloud-bigtable/.github/auto-label.yaml b/packages/google-cloud-bigtable/.github/auto-label.yaml index 09c8d735b456..41bff0b5375a 100644 --- a/packages/google-cloud-bigtable/.github/auto-label.yaml +++ b/packages/google-cloud-bigtable/.github/auto-label.yaml @@ -1,2 +1,15 @@ +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. requestsize: enabled: true From e3cfa4609666a636bad9206133053a1ca98a13b0 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 6 Apr 2022 13:01:43 +0200 Subject: [PATCH 595/892] chore(deps): update dependency google-cloud-bigtable to v2.8.0 (#557) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 860a1045d313..e087f947a816 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.37.0 -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index f0836a540a87..5b1294c0dfb1 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index f5c202aa0546..02a1b84e37f7 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index af3bb97b79a3..ee959f473d4b 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 google-cloud-monitoring==2.9.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 01e026f9316b..eda149180e52 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 7e0c6dad2782..43e90d935566 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 7e0c6dad2782..43e90d935566 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 094fd46386fc..09c45a6335e4 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.7.1 \ No newline at end of file +google-cloud-bigtable==2.8.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 01e026f9316b..eda149180e52 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.7.1 +google-cloud-bigtable==2.8.0 From 77a92fdea1bb42eaebb9fa3e7405569a2d5cfae5 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Thu, 7 Apr 2022 13:39:58 -0700 Subject: [PATCH 596/892] fix: Prevent sending full table scan when retrying (#554) * fix: Prevent sending full table scan when retrying Update the retry logic. Don't send empty row_key and empty row_ranges if the original message didn't ask for those. Closes internal issue 214449800 * Create InvalidRetryRequest exception. Raise InvalidRetryRequest instead of StopIteration Catch the InvalidRetryRequest Handle stop the retry request if row_limit has been reached. * Improve test coverage * Improve test coverage --- .../google/cloud/bigtable/row_data.py | 26 ++++-- .../tests/unit/test_row_data.py | 82 +++++++++++++++++++ .../tests/unit/test_table.py | 54 ++++++++++-- 3 files changed, 150 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 0517f82e1098..0c1565737428 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -321,11 +321,15 @@ def cell_values(self, column_family_id, column, max_count=None): class InvalidReadRowsResponse(RuntimeError): - """Exception raised to to invalid response data from back-end.""" + """Exception raised to invalid response data from back-end.""" class InvalidChunk(RuntimeError): - """Exception raised to to invalid chunk data from back-end.""" + """Exception raised to invalid chunk data from back-end.""" + + +class InvalidRetryRequest(RuntimeError): + """Exception raised when retry request is invalid.""" def _retry_read_rows_exception(exc): @@ -486,6 +490,9 @@ def __iter__(self): if self.state != self.NEW_ROW: raise ValueError("The row remains partial / is not committed.") break + except InvalidRetryRequest: + self._cancelled = True + break for chunk in response.chunks: if self._cancelled: @@ -629,10 +636,11 @@ def build_updated_request(self): data_messages_v2_pb2.ReadRowsRequest.copy_from(resume_request, self.message) if self.message.rows_limit != 0: - # TODO: Throw an error if rows_limit - read_so_far is 0 or negative. - resume_request.rows_limit = max( - 1, self.message.rows_limit - self.rows_read_so_far - ) + row_limit_remaining = self.message.rows_limit - self.rows_read_so_far + if row_limit_remaining > 0: + resume_request.rows_limit = row_limit_remaining + else: + raise InvalidRetryRequest # if neither RowSet.row_keys nor RowSet.row_ranges currently exist, # add row_range that starts with last_scanned_key as start_key_open @@ -643,6 +651,12 @@ def build_updated_request(self): else: row_keys = self._filter_rows_keys() row_ranges = self._filter_row_ranges() + + if len(row_keys) == 0 and len(row_ranges) == 0: + # Avoid sending empty row_keys and row_ranges + # if that was not the intention + raise InvalidRetryRequest + resume_request.rows = data_v2_pb2.RowSet( row_keys=row_keys, row_ranges=row_ranges ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index d647bbaba0e4..9b329dc9f8de 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -810,6 +810,18 @@ def test_RRRM__filter_row_key(): assert expected_row_keys == row_keys +def test_RRRM__filter_row_key_is_empty(): + table_name = "table_name" + request = _ReadRowsRequestPB(table_name=table_name) + request.rows.row_keys.extend([b"row_key1", b"row_key2", b"row_key3", b"row_key4"]) + + last_scanned_key = b"row_key4" + request_manager = _make_read_rows_request_manager(request, last_scanned_key, 4) + row_keys = request_manager._filter_rows_keys() + + assert row_keys == [] + + def test_RRRM__filter_row_ranges_all_ranges_added_back(rrrm_data): from google.cloud.bigtable_v2.types import data as data_v2_pb2 @@ -1036,6 +1048,76 @@ def test_RRRM__key_already_read(): assert not request_manager._key_already_read(b"row_key16") +def test_RRRM__rows_limit_reached(): + from google.cloud.bigtable.row_data import InvalidRetryRequest + + last_scanned_key = b"row_key14" + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request.rows_limit = 2 + request_manager = _make_read_rows_request_manager( + request, last_scanned_key=last_scanned_key, rows_read_so_far=2 + ) + with pytest.raises(InvalidRetryRequest): + request_manager.build_updated_request() + + +def test_RRRM_build_updated_request_last_row_read_raises_invalid_retry_request(): + from google.cloud.bigtable.row_data import InvalidRetryRequest + + last_scanned_key = b"row_key4" + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request.rows.row_keys.extend([b"row_key1", b"row_key2", b"row_key4"]) + + request_manager = _make_read_rows_request_manager( + request, last_scanned_key, rows_read_so_far=3 + ) + with pytest.raises(InvalidRetryRequest): + request_manager.build_updated_request() + + +def test_RRRM_build_updated_request_row_ranges_read_raises_invalid_retry_request(): + from google.cloud.bigtable.row_data import InvalidRetryRequest + from google.cloud.bigtable import row_set + + row_range1 = row_set.RowRange(b"row_key21", b"row_key29") + + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + + last_scanned_key = b"row_key4" + request = _ReadRowsRequestPB( + table_name=TABLE_NAME, + ) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + + request_manager = _make_read_rows_request_manager( + request, last_scanned_key, rows_read_so_far=2 + ) + with pytest.raises(InvalidRetryRequest): + request_manager.build_updated_request() + + +def test_RRRM_build_updated_request_row_ranges_valid(): + from google.cloud.bigtable import row_set + + row_range1 = row_set.RowRange(b"row_key21", b"row_key29") + + request = _ReadRowsRequestPB(table_name=TABLE_NAME) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + + last_scanned_key = b"row_key21" + request = _ReadRowsRequestPB( + table_name=TABLE_NAME, + ) + request.rows.row_ranges.append(row_range1.get_range_kwargs()) + + request_manager = _make_read_rows_request_manager( + request, last_scanned_key, rows_read_so_far=1 + ) + updated_request = request_manager.build_updated_request() + assert len(updated_request.rows.row_ranges) > 0 + + @pytest.fixture(scope="session") def json_tests(): dirname = os.path.dirname(__file__) diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index fb4ec3539445..883f713d8267 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -910,7 +910,6 @@ def mock_create_row_request(table_name, **kwargs): def test_table_read_retry_rows(): from google.api_core import retry - from google.cloud.bigtable.table import _create_row_request credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -965,12 +964,55 @@ def test_table_read_retry_rows(): result = rows[1] assert result.row_key == ROW_KEY_2 - expected_request = _create_row_request( - table.name, - start_key=ROW_KEY_1, - end_key=ROW_KEY_2, + assert len(data_api.read_rows.mock_calls) == 3 + + +def test_table_read_retry_rows_no_full_table_scan(): + from google.api_core import retry + + credentials = _make_credentials() + client = _make_client(project="project-id", credentials=credentials, admin=True) + data_api = client._table_data_client = _make_data_api() + instance = client.instance(instance_id=INSTANCE_ID) + table = _make_table(TABLE_ID, instance) + + retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception) + + # Create response_iterator + chunk_1 = _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY_2, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, ) - data_api.read_rows.mock_calls = [expected_request] * 3 + + response_1 = _ReadRowsResponseV2([chunk_1]) + response_failure_iterator_2 = _MockFailureIterator_2([response_1]) + + data_api.table_path.return_value = ( + f"projects/{PROJECT_ID}/instances/{INSTANCE_ID}/tables/{TABLE_ID}" + ) + + data_api.read_rows.side_effect = [ + response_failure_iterator_2, + ] + + rows = [ + row + for row in table.read_rows( + start_key="doesn't matter", end_key=ROW_KEY_2, retry=retry_read_rows + ) + ] + assert len(rows) == 1 + result = rows[0] + assert result.row_key == ROW_KEY_2 + + assert len(data_api.read_rows.mock_calls) == 1 + assert ( + len(data_api.read_rows.mock_calls[0].args[0].rows.row_ranges) > 0 + ) # not empty row_ranges def test_table_yield_retry_rows(): From 9c296e508fcf73d16b7a737c74c303ebbf9b7e0d Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 7 Apr 2022 15:33:11 -0700 Subject: [PATCH 597/892] chore(main): release 2.8.1 (#561) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 09a408abaf49..90f4b88136be 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +### [2.8.1](https://github.com/googleapis/python-bigtable/compare/v2.8.0...v2.8.1) (2022-04-07) + + +### Bug Fixes + +* Prevent sending full table scan when retrying ([#554](https://github.com/googleapis/python-bigtable/issues/554)) ([56f5357](https://github.com/googleapis/python-bigtable/commit/56f5357c09ac867491b934f6029776dcd74c6eac)) + ## [2.8.0](https://github.com/googleapis/python-bigtable/compare/v2.7.1...v2.8.0) (2022-04-04) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 351622914fd6..d0524186214a 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.8.0" +version = "2.8.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 0046d7e3e2112bdf02386397ad833d7e80c0fbb2 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 8 Apr 2022 12:47:29 +0200 Subject: [PATCH 598/892] chore(deps): update dependency google-cloud-bigtable to v2.8.1 (#563) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index e087f947a816..4566cc141555 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.37.0 -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 5b1294c0dfb1..8b23fca350a3 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 google-cloud-core==2.2.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 02a1b84e37f7..f0d4e821ae25 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index ee959f473d4b..b1de8c9a38f0 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 google-cloud-monitoring==2.9.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index eda149180e52..e9a9c1753909 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 43e90d935566..a6e307e50927 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 43e90d935566..a6e307e50927 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 09c45a6335e4..ceaefad7eede 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.8.0 \ No newline at end of file +google-cloud-bigtable==2.8.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index eda149180e52..e9a9c1753909 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.8.0 +google-cloud-bigtable==2.8.1 From 19d85cbcfaf7dc756614e7aac5760bec8dfd9866 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Sun, 10 Apr 2022 19:48:20 -0700 Subject: [PATCH 599/892] doc: Add App Profile documentation (#560) We weren't including App Profile docs in the auto-generated documentation. This adds the documentation for App Profile. --- packages/google-cloud-bigtable/docs/app-profile.rst | 6 ++++++ packages/google-cloud-bigtable/docs/usage.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 packages/google-cloud-bigtable/docs/app-profile.rst diff --git a/packages/google-cloud-bigtable/docs/app-profile.rst b/packages/google-cloud-bigtable/docs/app-profile.rst new file mode 100644 index 000000000000..5c9d426c2062 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/app-profile.rst @@ -0,0 +1,6 @@ +App Profile +~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.app_profile + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index 532f1ce8d367..53faac22ffab 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -9,6 +9,7 @@ Using the API cluster instance table + app-profile backup column-family row From 8fd165cb80727ae5d0d347fba42ffa94afd9fee7 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 11 Apr 2022 19:41:15 +0200 Subject: [PATCH 600/892] chore(deps): update dependency google-cloud-core to v2.3.0 (#565) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 4566cc141555..4805140e7179 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.37.0 google-cloud-bigtable==2.8.1 -google-cloud-core==2.2.3 +google-cloud-core==2.3.0 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 8b23fca350a3..0ba29dd99630 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.8.1 -google-cloud-core==2.2.3 +google-cloud-core==2.3.0 From 69686add52c895ce967100888e4b2853275a765d Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 14 Apr 2022 00:54:12 +0000 Subject: [PATCH 601/892] chore: use gapic-generator-python 0.65.1 (#568) - [ ] Regenerate this pull request now. PiperOrigin-RevId: 441524537 Source-Link: https://github.com/googleapis/googleapis/commit/2a273915b3f70fe86c9d2a75470a0b83e48d0abf Source-Link: https://github.com/googleapis/googleapis-gen/commit/ab6756a48c89b5bcb9fb73443cb8e55d574f4643 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYWI2NzU2YTQ4Yzg5YjViY2I5ZmI3MzQ0M2NiOGU1NWQ1NzRmNDY0MyJ9 fix(deps): require grpc-google-iam-v1 >=0.12.4 docs: fix type in docstring for map fields feat: AuditConfig for IAM v1 --- .../bigtable_instance_admin/async_client.py | 94 ++++++++++-------- .../bigtable_instance_admin/client.py | 94 ++++++++++-------- .../transports/base.py | 5 + .../transports/grpc.py | 4 + .../bigtable_table_admin/async_client.py | 90 +++++++++-------- .../services/bigtable_table_admin/client.py | 90 +++++++++-------- .../bigtable_table_admin/transports/base.py | 5 + .../bigtable_table_admin/transports/grpc.py | 4 + .../types/bigtable_instance_admin.py | 2 +- .../cloud/bigtable_admin_v2/types/instance.py | 2 +- .../cloud/bigtable_admin_v2/types/table.py | 4 +- .../services/bigtable/async_client.py | 1 + .../bigtable_v2/services/bigtable/client.py | 2 +- .../services/bigtable/transports/base.py | 5 + .../services/bigtable/transports/grpc.py | 4 + .../fixup_bigtable_admin_v2_keywords.py | 2 +- packages/google-cloud-bigtable/setup.py | 2 +- .../testing/constraints-3.6.txt | 2 +- .../test_bigtable_instance_admin.py | 92 +++++++++++++---- .../test_bigtable_table_admin.py | 98 ++++++++++++++----- .../unit/gapic/bigtable_v2/test_bigtable.py | 75 ++++++++++---- 21 files changed, 448 insertions(+), 229 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ed5862d3971f..13f1d71baee4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -241,7 +241,7 @@ async def create_instance( parent: str = None, instance_id: str = None, instance: gba_instance.Instance = None, - clusters: Dict[str, gba_instance.Cluster] = None, + clusters: Mapping[str, gba_instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -281,7 +281,7 @@ async def create_instance( This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - clusters (Dict[str, gba_instance.Cluster]): + clusters (:class:`Mapping[str, google.cloud.bigtable_admin_v2.types.Cluster]`): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than @@ -1872,21 +1872,26 @@ async def get_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -1901,17 +1906,17 @@ async def get_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -1922,11 +1927,12 @@ async def get_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -2016,21 +2022,26 @@ async def set_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -2045,17 +2056,17 @@ async def set_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -2066,11 +2077,12 @@ async def set_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index cc9317f6df59..fe14b82beb14 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -557,7 +557,7 @@ def create_instance( parent: str = None, instance_id: str = None, instance: gba_instance.Instance = None, - clusters: Dict[str, gba_instance.Cluster] = None, + clusters: Mapping[str, gba_instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), @@ -597,7 +597,7 @@ def create_instance( This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - clusters (Dict[str, gba_instance.Cluster]): + clusters (Mapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than @@ -2081,21 +2081,26 @@ def get_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -2110,17 +2115,17 @@ def get_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -2131,11 +2136,12 @@ def get_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -2212,21 +2218,26 @@ def set_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -2241,17 +2252,17 @@ def set_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -2262,11 +2273,12 @@ def set_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 8084f4e21184..8399c517cde5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -94,6 +94,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -598,5 +599,9 @@ def list_hot_tablets( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("BigtableInstanceAdminTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 005fa38ba0b8..db7d0c1c4e6d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -862,5 +862,9 @@ def list_hot_tablets( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("BigtableInstanceAdminGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 47f4754433a0..0752459ee5db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import functools import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions @@ -2087,21 +2087,26 @@ async def get_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -2116,17 +2121,17 @@ async def get_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -2137,11 +2142,12 @@ async def get_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -2231,21 +2237,26 @@ async def set_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -2260,17 +2271,17 @@ async def set_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -2281,11 +2292,12 @@ async def set_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 4d4d82f26973..3b1185e5358e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib @@ -2337,21 +2337,26 @@ def get_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -2366,17 +2371,17 @@ def get_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -2387,11 +2392,12 @@ def get_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -2468,21 +2474,26 @@ def set_iam_policy( Returns: google.iam.v1.policy_pb2.Policy: - Defines an Identity and Access Management (IAM) policy. It is used to - specify access control policies for Cloud Platform - resources. + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. A Policy is a collection of bindings. A binding binds - one or more members to a single role. Members can be - user accounts, service accounts, Google groups, and - domains (such as G Suite). A role is a named list of - permissions (defined by IAM or configured by users). - A binding can optionally specify a condition, which - is a logic expression that further constrains the - role binding based on attributes about the request - and/or target resource. - - **JSON Example** + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** { "bindings": [ @@ -2497,17 +2508,17 @@ def set_iam_policy( }, { "role": "roles/resourcemanager.organizationViewer", - "members": ["user:eve@example.com"], + "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } - ] + ], "etag": "BwWWja0YfJA=", "version": 3 } - **YAML Example** + **YAML example:** bindings: - members: - user:\ mike@example.com - group:\ admins@example.com - domain:google.com - @@ -2518,11 +2529,12 @@ def set_iam_policy( condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < - timestamp('2020-10-01T00:00:00.000Z') + timestamp('2020-10-01T00:00:00.000Z') etag: + BwWWja0YfJA= version: 3 For a description of IAM and its features, see the - [IAM developer's - guide](\ https://cloud.google.com/iam/docs). + [IAM + documentation](\ https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index d53b3d8f31a6..5370416a0802 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -94,6 +94,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -576,5 +577,9 @@ def test_iam_permissions( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("BigtableTableAdminTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 04b0e37bf649..f6c2c478d771 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -925,5 +925,9 @@ def test_iam_permissions( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("BigtableTableAdminGrpcTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 924deeb46f11..c182966b1039 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -68,7 +68,7 @@ class CreateInstanceRequest(proto.Message): instance (google.cloud.bigtable_admin_v2.types.Instance): Required. The instance to create. Fields marked ``OutputOnly`` must be left blank. - clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]): + clusters (Mapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 26c502b049f4..05d1e524de48 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -51,7 +51,7 @@ class Instance(proto.Message): (``OutputOnly``) The current state of the instance. type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): The type of the instance. Defaults to ``PRODUCTION``. - labels (Sequence[google.cloud.bigtable_admin_v2.types.Instance.LabelsEntry]): + labels (Mapping[str, str]): Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a customer's organizational needs and deployment strategies. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 29b4ce75dffe..6a4446dd3c00 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -81,14 +81,14 @@ class Table(proto.Message): ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` - cluster_states (Sequence[google.cloud.bigtable_admin_v2.types.Table.ClusterStatesEntry]): + cluster_states (Mapping[str, google.cloud.bigtable_admin_v2.types.Table.ClusterState]): Output only. Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` - column_families (Sequence[google.cloud.bigtable_admin_v2.types.Table.ColumnFamiliesEntry]): + column_families (Mapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): (``CreationOnly``) The column families configured for this table, mapped by column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 6c9986f780b6..c5f673a7452c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Mapping, Optional, AsyncIterable, Awaitable, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index c62875f3aec6..a0dfdff4e82a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -16,7 +16,7 @@ from collections import OrderedDict import os import re -from typing import Dict, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 6a5e0eca7139..922068fd0a4f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -87,6 +87,7 @@ def __init__( always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" @@ -254,5 +255,9 @@ def read_modify_write_row( ]: raise NotImplementedError() + @property + def kind(self) -> str: + raise NotImplementedError() + __all__ = ("BigtableTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 3c836cad209d..ce40cf33517c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -438,5 +438,9 @@ def read_modify_write_row( def close(self): self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc" + __all__ = ("BigtableGrpcTransport",) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 2079dd99b3d9..72354ba161ad 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -72,7 +72,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'partial_update_cluster': ('cluster', 'update_mask', ), 'partial_update_instance': ('instance', 'update_mask', ), 'restore_table': ('parent', 'table_id', 'backup', ), - 'set_iam_policy': ('resource', 'policy', ), + 'set_iam_policy': ('resource', 'policy', 'update_mask', ), 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), 'test_iam_permissions': ('resource', 'permissions', ), 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index d0524186214a..04afa44a1cd3 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -37,7 +37,7 @@ # Until this issue is closed # https://github.com/googleapis/google-cloud-python/issues/10566 "google-cloud-core >= 1.4.1, <3.0.0dev", - "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", + "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.18.0", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.6.txt b/packages/google-cloud-bigtable/testing/constraints-3.6.txt index 92345bed82b7..0f4a447dbb12 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.6.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.6.txt @@ -7,6 +7,6 @@ # Then this file should have foo==1.14.0 google-api-core==1.31.5 google-cloud-core==1.4.1 -grpc-google-iam-v1==0.12.3 +grpc-google-iam-v1==0.12.4 proto-plus==1.18.0 libcst==0.2.5 diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 99ba131377c8..3f6013fc5d21 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -104,24 +104,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - BigtableInstanceAdminClient, - BigtableInstanceAdminAsyncClient, + (BigtableInstanceAdminClient, "grpc"), + (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), ], ) -def test_bigtable_instance_admin_client_from_service_account_info(client_class): +def test_bigtable_instance_admin_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "bigtableadmin.googleapis.com:443" + assert client.transport._host == ("bigtableadmin.googleapis.com:443") @pytest.mark.parametrize( @@ -150,27 +152,33 @@ def test_bigtable_instance_admin_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - BigtableInstanceAdminClient, - BigtableInstanceAdminAsyncClient, + (BigtableInstanceAdminClient, "grpc"), + (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), ], ) -def test_bigtable_instance_admin_client_from_service_account_file(client_class): +def test_bigtable_instance_admin_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "bigtableadmin.googleapis.com:443" + assert client.transport._host == ("bigtableadmin.googleapis.com:443") def test_bigtable_instance_admin_client_get_transport_class(): @@ -4310,7 +4318,7 @@ async def test_list_app_profiles_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -4358,7 +4366,9 @@ async def test_list_app_profiles_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_app_profiles(request={})).pages: + async for page_ in ( + await client.list_app_profiles(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -5263,6 +5273,7 @@ def test_set_iam_policy_from_dict_foreign(): request={ "resource": "resource_value", "policy": policy_pb2.Policy(version=774), + "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), } ) call.assert_called() @@ -5991,7 +6002,7 @@ async def test_list_hot_tablets_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -6037,7 +6048,9 @@ async def test_list_hot_tablets_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_hot_tablets(request={})).pages: + async for page_ in ( + await client.list_hot_tablets(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6134,6 +6147,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = BigtableInstanceAdminClient( @@ -6201,6 +6227,14 @@ def test_bigtable_instance_admin_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_bigtable_instance_admin_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -6382,24 +6416,40 @@ def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( ) -def test_bigtable_instance_admin_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_bigtable_instance_admin_host_no_port(transport_name): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "bigtableadmin.googleapis.com:443" + assert client.transport._host == ("bigtableadmin.googleapis.com:443") -def test_bigtable_instance_admin_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_bigtable_instance_admin_host_with_port(transport_name): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "bigtableadmin.googleapis.com:8000" + assert client.transport._host == ("bigtableadmin.googleapis.com:8000") def test_bigtable_instance_admin_grpc_transport_channel(): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index f549bde0d47e..b706a87d2aaf 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -106,24 +106,26 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - BigtableTableAdminClient, - BigtableTableAdminAsyncClient, + (BigtableTableAdminClient, "grpc"), + (BigtableTableAdminAsyncClient, "grpc_asyncio"), ], ) -def test_bigtable_table_admin_client_from_service_account_info(client_class): +def test_bigtable_table_admin_client_from_service_account_info( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "bigtableadmin.googleapis.com:443" + assert client.transport._host == ("bigtableadmin.googleapis.com:443") @pytest.mark.parametrize( @@ -152,27 +154,33 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - BigtableTableAdminClient, - BigtableTableAdminAsyncClient, + (BigtableTableAdminClient, "grpc"), + (BigtableTableAdminAsyncClient, "grpc_asyncio"), ], ) -def test_bigtable_table_admin_client_from_service_account_file(client_class): +def test_bigtable_table_admin_client_from_service_account_file( + client_class, transport_name +): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "bigtableadmin.googleapis.com:443" + assert client.transport._host == ("bigtableadmin.googleapis.com:443") def test_bigtable_table_admin_client_get_transport_class(): @@ -1559,7 +1567,7 @@ async def test_list_tables_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -1605,7 +1613,9 @@ async def test_list_tables_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_tables(request={})).pages: + async for page_ in ( + await client.list_tables(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -3851,7 +3861,7 @@ async def test_list_snapshots_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -3897,7 +3907,9 @@ async def test_list_snapshots_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_snapshots(request={})).pages: + async for page_ in ( + await client.list_snapshots(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -5450,7 +5462,7 @@ async def test_list_backups_async_pager(): ) assert async_pager.next_page_token == "abc" responses = [] - async for response in async_pager: + async for response in async_pager: # pragma: no branch responses.append(response) assert len(responses) == 6 @@ -5496,7 +5508,9 @@ async def test_list_backups_async_pages(): RuntimeError, ) pages = [] - async for page_ in (await client.list_backups(request={})).pages: + async for page_ in ( + await client.list_backups(request={}) + ).pages: # pragma: no branch pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6060,6 +6074,7 @@ def test_set_iam_policy_from_dict_foreign(): request={ "resource": "resource_value", "policy": policy_pb2.Policy(version=774), + "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), } ) call.assert_called() @@ -6512,6 +6527,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = BigtableTableAdminClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = BigtableTableAdminClient( @@ -6580,6 +6608,14 @@ def test_bigtable_table_admin_base_transport(): with pytest.raises(NotImplementedError): transport.operations_client + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_bigtable_table_admin_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -6755,24 +6791,40 @@ def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( ) -def test_bigtable_table_admin_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_bigtable_table_admin_host_no_port(transport_name): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "bigtableadmin.googleapis.com:443" + assert client.transport._host == ("bigtableadmin.googleapis.com:443") -def test_bigtable_table_admin_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_bigtable_table_admin_host_with_port(transport_name): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "bigtableadmin.googleapis.com:8000" + assert client.transport._host == ("bigtableadmin.googleapis.com:8000") def test_bigtable_table_admin_grpc_transport_channel(): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index d8b694bc0fa9..5745b9aebba2 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -80,24 +80,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - BigtableClient, - BigtableAsyncClient, + (BigtableClient, "grpc"), + (BigtableAsyncClient, "grpc_asyncio"), ], ) -def test_bigtable_client_from_service_account_info(client_class): +def test_bigtable_client_from_service_account_info(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: factory.return_value = creds info = {"valid": True} - client = client_class.from_service_account_info(info) + client = client_class.from_service_account_info(info, transport=transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "bigtable.googleapis.com:443" + assert client.transport._host == ("bigtable.googleapis.com:443") @pytest.mark.parametrize( @@ -126,27 +126,31 @@ def test_bigtable_client_service_account_always_use_jwt( @pytest.mark.parametrize( - "client_class", + "client_class,transport_name", [ - BigtableClient, - BigtableAsyncClient, + (BigtableClient, "grpc"), + (BigtableAsyncClient, "grpc_asyncio"), ], ) -def test_bigtable_client_from_service_account_file(client_class): +def test_bigtable_client_from_service_account_file(client_class, transport_name): creds = ga_credentials.AnonymousCredentials() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - client = client_class.from_service_account_json("dummy/file/path.json") + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == "bigtable.googleapis.com:443" + assert client.transport._host == ("bigtable.googleapis.com:443") def test_bigtable_client_get_transport_class(): @@ -2485,6 +2489,19 @@ def test_transport_adc(transport_class): adc.assert_called_once() +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = BigtableClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = BigtableClient( @@ -2533,6 +2550,14 @@ def test_bigtable_base_transport(): with pytest.raises(NotImplementedError): transport.close() + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + def test_bigtable_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file @@ -2703,24 +2728,40 @@ def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): ) -def test_bigtable_host_no_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_bigtable_host_no_port(transport_name): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtable.googleapis.com" ), + transport=transport_name, ) - assert client.transport._host == "bigtable.googleapis.com:443" + assert client.transport._host == ("bigtable.googleapis.com:443") -def test_bigtable_host_with_port(): +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_bigtable_host_with_port(transport_name): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtable.googleapis.com:8000" ), + transport=transport_name, ) - assert client.transport._host == "bigtable.googleapis.com:8000" + assert client.transport._host == ("bigtable.googleapis.com:8000") def test_bigtable_grpc_transport_channel(): From f12955cb792d6612af7ff6bb06ca6473b4f09f13 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Thu, 14 Apr 2022 08:30:56 -0700 Subject: [PATCH 602/892] feat: App Profile multi cluster routing support with specified cluster ids (#549) * feat: App Profile multi cluster routing support with specified cluster ids Add the `multi_cluster_ids` parameter to AppProfile. It is used when the routing policy type is set to "any". Add tests, docstrings. Fix some typos. Closes internal issue 213627978 * Add system test for app profile with multi cluster ids. * Specify longer timeout --- .../google/cloud/bigtable/app_profile.py | 18 ++- .../google/cloud/bigtable/instance.py | 7 + .../tests/system/test_instance_admin.py | 101 +++++++++++- .../tests/unit/test_app_profile.py | 150 +++++++++++++++++- 4 files changed, 267 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py index 5d6dbdb81a1e..8cde66146f9a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/app_profile.py @@ -59,6 +59,11 @@ class AppProfile(object): when routing_policy_type is ROUTING_POLICY_TYPE_SINGLE. + :type: multi_cluster_ids: list + :param: multi_cluster_ids: (Optional) The set of clusters to route to. + The order is ignored; clusters will be tried in order of distance. + If left empty, all clusters are eligible. + :type: allow_transactional_writes: bool :param: allow_transactional_writes: (Optional) If true, allow transactional writes for @@ -72,6 +77,7 @@ def __init__( routing_policy_type=None, description=None, cluster_id=None, + multi_cluster_ids=None, allow_transactional_writes=None, ): self.app_profile_id = app_profile_id @@ -79,6 +85,7 @@ def __init__( self.routing_policy_type = routing_policy_type self.description = description self.cluster_id = cluster_id + self.multi_cluster_ids = multi_cluster_ids self.allow_transactional_writes = allow_transactional_writes @property @@ -184,13 +191,17 @@ def _update_from_pb(self, app_profile_pb): self.routing_policy_type = None self.allow_transactional_writes = None self.cluster_id = None - + self.multi_cluster_ids = None self.description = app_profile_pb.description routing_policy_type = None if app_profile_pb._pb.HasField("multi_cluster_routing_use_any"): routing_policy_type = RoutingPolicyType.ANY self.allow_transactional_writes = False + if app_profile_pb.multi_cluster_routing_use_any.cluster_ids: + self.multi_cluster_ids = ( + app_profile_pb.multi_cluster_routing_use_any.cluster_ids + ) else: routing_policy_type = RoutingPolicyType.SINGLE self.cluster_id = app_profile_pb.single_cluster_routing.cluster_id @@ -215,7 +226,9 @@ def _to_pb(self): if self.routing_policy_type == RoutingPolicyType.ANY: multi_cluster_routing_use_any = ( - instance.AppProfile.MultiClusterRoutingUseAny() + instance.AppProfile.MultiClusterRoutingUseAny( + cluster_ids=self.multi_cluster_ids + ) ) else: single_cluster_routing = instance.AppProfile.SingleClusterRouting( @@ -312,6 +325,7 @@ def update(self, ignore_warnings=None): ``routing_policy_type`` ``description`` ``cluster_id`` + ``multi_cluster_ids`` ``allow_transactional_writes`` For example: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index e838ec9adfd4..6d092cefd14f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -711,6 +711,7 @@ def app_profile( routing_policy_type=None, description=None, cluster_id=None, + multi_cluster_ids=None, allow_transactional_writes=None, ): """Factory to create AppProfile associated with this instance. @@ -742,6 +743,11 @@ def app_profile( when routing_policy_type is ROUTING_POLICY_TYPE_SINGLE. + :type: multi_cluster_ids: list + :param: multi_cluster_ids: (Optional) The set of clusters to route to. + The order is ignored; clusters will be tried in order of distance. + If left empty, all clusters are eligible. + :type: allow_transactional_writes: bool :param: allow_transactional_writes: (Optional) If true, allow transactional writes for @@ -756,6 +762,7 @@ def app_profile( routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, + multi_cluster_ids=multi_cluster_ids, allow_transactional_writes=allow_transactional_writes, ) diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index 8c09f6d8711b..a2ad229af828 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -24,6 +24,7 @@ def _create_app_profile_helper( routing_policy_type, description=None, cluster_id=None, + multi_cluster_ids=None, allow_transactional_writes=None, ignore_warnings=None, ): @@ -33,6 +34,7 @@ def _create_app_profile_helper( routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, + multi_cluster_ids=multi_cluster_ids, allow_transactional_writes=allow_transactional_writes, ) assert app_profile.allow_transactional_writes == allow_transactional_writes @@ -40,7 +42,7 @@ def _create_app_profile_helper( app_profile.create(ignore_warnings=ignore_warnings) # Load a different app_profile objec form the server and - # verrify that it is the same + # verify that it is the same alt_app_profile = instance.app_profile(app_profile_id) alt_app_profile.reload() @@ -50,6 +52,7 @@ def _create_app_profile_helper( app_profile.description == alt_app_profile.description assert not app_profile.allow_transactional_writes assert not alt_app_profile.allow_transactional_writes + assert app_profile.multi_cluster_ids == alt_app_profile.multi_cluster_ids return app_profile @@ -67,6 +70,7 @@ def _modify_app_profile_helper( routing_policy_type, description=None, cluster_id=None, + multi_cluster_ids=None, allow_transactional_writes=None, ignore_warnings=None, ): @@ -75,6 +79,7 @@ def _modify_app_profile_helper( routing_policy_type=routing_policy_type, description=description, cluster_id=cluster_id, + multi_cluster_ids=multi_cluster_ids, allow_transactional_writes=allow_transactional_writes, ) @@ -87,6 +92,7 @@ def _modify_app_profile_helper( assert alt_profile.description == description assert alt_profile.routing_policy_type == routing_policy_type assert alt_profile.cluster_id == cluster_id + assert alt_profile.multi_cluster_ids == multi_cluster_ids assert alt_profile.allow_transactional_writes == allow_transactional_writes @@ -395,6 +401,99 @@ def test_instance_create_w_two_clusters( _delete_app_profile_helper(app_profile) +def test_instance_create_app_profile_create_with_multi_cluster_ids( + admin_client, + unique_suffix, + admin_instance_populated, + admin_cluster, + location_id, + instance_labels, + instances_to_delete, + skip_on_emulator, +): + alt_instance_id = f"dif{unique_suffix}" + instance = admin_client.instance( + alt_instance_id, + instance_type=enums.Instance.Type.PRODUCTION, + labels=instance_labels, + ) + + serve_nodes = 1 + + alt_cluster_id_1 = f"{alt_instance_id}-c1" + cluster_1 = instance.cluster( + alt_cluster_id_1, + location_id=location_id, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + ) + + alt_cluster_id_2 = f"{alt_instance_id}-c2" + location_id_2 = "us-central1-f" + cluster_2 = instance.cluster( + alt_cluster_id_2, + location_id=location_id_2, + serve_nodes=serve_nodes, + default_storage_type=enums.StorageType.HDD, + ) + operation = instance.create(clusters=[cluster_1, cluster_2]) + instances_to_delete.append(instance) + operation.result(timeout=240) # Ensure the operation completes. + + # Create a new instance and make sure it is the same. + instance_alt = admin_client.instance(alt_instance_id) + instance_alt.reload() + + assert instance == instance_alt + assert instance.display_name == instance_alt.display_name + assert instance.type_ == instance_alt.type_ + + clusters, failed_locations = instance_alt.list_clusters() + assert failed_locations == [] + alt_cluster_1, alt_cluster_2 = sorted(clusters, key=lambda x: x.name) + + assert cluster_1.location_id == alt_cluster_1.location_id + assert cluster_2.location_id == alt_cluster_2.location_id + + # Test create app profile with multi_cluster_routing policy + app_profiles_to_delete = [] + description = "routing policy-multi" + app_profile_id_1 = "app_profile_id_1" + routing = enums.RoutingPolicyType.ANY + + multi_cluster_ids = [alt_cluster_id_1, alt_cluster_id_2] + app_profile_1 = _create_app_profile_helper( + app_profile_id_1, + instance, + routing_policy_type=routing, + description=description, + ignore_warnings=True, + multi_cluster_ids=multi_cluster_ids, + ) + assert len(app_profile_1.multi_cluster_ids) == len(multi_cluster_ids) + assert app_profile_1.multi_cluster_ids == multi_cluster_ids + + # remove a cluster from the multi_cluster_ids + app_profile_1.multi_cluster_ids.pop() + app_profile_1.update() + + assert len(app_profile_1.multi_cluster_ids) == 1 + assert app_profile_1.multi_cluster_ids == [alt_cluster_id_1] + + # add a cluster from the multi_cluster_ids + app_profile_1.multi_cluster_ids.append(alt_cluster_id_2) + app_profile_1.update() + + assert len(app_profile_1.multi_cluster_ids) == 2 + assert app_profile_1.multi_cluster_ids == [alt_cluster_id_1, alt_cluster_id_2] + + app_profiles_to_delete.append(app_profile_1) + + # # Test delete app profiles + for app_profile in app_profiles_to_delete: + _delete_app_profile_helper(app_profile) + + def test_instance_create_w_two_clusters_cmek( admin_client, unique_suffix, diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py index d45c9ca0aeea..660ee78998b0 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py +++ b/packages/google-cloud-bigtable/tests/unit/test_app_profile.py @@ -24,6 +24,7 @@ PROJECT, INSTANCE_ID, APP_PROFILE_ID ) CLUSTER_ID = "cluster-id" +CLUSTER_ID_2 = "cluster-id-2" OP_ID = 8765 OP_NAME = "operations/projects/{}/instances/{}/appProfiles/{}/operations/{}".format( PROJECT, INSTANCE_ID, APP_PROFILE_ID, OP_ID @@ -54,6 +55,7 @@ def test_app_profile_constructor_defaults(): assert app_profile.routing_policy_type is None assert app_profile.description is None assert app_profile.cluster_id is None + assert app_profile.multi_cluster_ids is None assert app_profile.allow_transactional_writes is None @@ -92,9 +94,32 @@ def test_app_profile_constructor_explicit(): assert app_profile2.routing_policy_type == SINGLE assert app_profile2.description == DESCRIPTION_2 assert app_profile2.cluster_id == CLUSTER_ID + assert app_profile2.multi_cluster_ids is None assert app_profile2.allow_transactional_writes == ALLOW_WRITES +def test_app_profile_constructor_multi_cluster_ids(): + from google.cloud.bigtable.enums import RoutingPolicyType + + ANY = RoutingPolicyType.ANY + DESCRIPTION_1 = "routing policy any" + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + app_profile1 = _make_app_profile( + APP_PROFILE_ID, + instance, + routing_policy_type=ANY, + description=DESCRIPTION_1, + multi_cluster_ids=[CLUSTER_ID, CLUSTER_ID_2], + ) + assert app_profile1.app_profile_id == APP_PROFILE_ID + assert app_profile1._instance is instance + assert app_profile1.routing_policy_type == ANY + assert app_profile1.description == DESCRIPTION_1 + assert app_profile1.multi_cluster_ids == [CLUSTER_ID, CLUSTER_ID_2] + + def test_app_profile_name(): credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) @@ -147,13 +172,44 @@ def test_app_profile_from_pb_success_w_routing_any(): client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) - desctiption = "routing any" + description = "routing any" routing = RoutingPolicyType.ANY multi_cluster_routing_use_any = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny() app_profile_pb = data_v2_pb2.AppProfile( name=APP_PROFILE_NAME, - description=desctiption, + description=description, + multi_cluster_routing_use_any=multi_cluster_routing_use_any, + ) + + app_profile = AppProfile.from_pb(app_profile_pb, instance) + assert isinstance(app_profile, AppProfile) + assert app_profile._instance is instance + assert app_profile.app_profile_id == APP_PROFILE_ID + assert app_profile.description == description + assert app_profile.routing_policy_type == routing + assert app_profile.cluster_id is None + assert app_profile.multi_cluster_ids is None + assert app_profile.allow_transactional_writes is False + + +def test_app_profile_from_pb_success_w_routing_any_multi_cluster_ids(): + from google.cloud.bigtable_admin_v2.types import instance as data_v2_pb2 + from google.cloud.bigtable.app_profile import AppProfile + from google.cloud.bigtable.enums import RoutingPolicyType + + client = _Client(PROJECT) + instance = _Instance(INSTANCE_ID, client) + + description = "routing any" + routing = RoutingPolicyType.ANY + multi_cluster_routing_use_any = data_v2_pb2.AppProfile.MultiClusterRoutingUseAny( + cluster_ids=[CLUSTER_ID, CLUSTER_ID_2] + ) + + app_profile_pb = data_v2_pb2.AppProfile( + name=APP_PROFILE_NAME, + description=description, multi_cluster_routing_use_any=multi_cluster_routing_use_any, ) @@ -161,10 +217,11 @@ def test_app_profile_from_pb_success_w_routing_any(): assert isinstance(app_profile, AppProfile) assert app_profile._instance is instance assert app_profile.app_profile_id == APP_PROFILE_ID - assert app_profile.description == desctiption + assert app_profile.description == description assert app_profile.routing_policy_type == routing assert app_profile.cluster_id is None assert app_profile.allow_transactional_writes is False + assert app_profile.multi_cluster_ids == [CLUSTER_ID, CLUSTER_ID_2] def test_app_profile_from_pb_success_w_routing_single(): @@ -175,7 +232,7 @@ def test_app_profile_from_pb_success_w_routing_single(): client = _Client(PROJECT) instance = _Instance(INSTANCE_ID, client) - desctiption = "routing single" + description = "routing single" allow_transactional_writes = True routing = RoutingPolicyType.SINGLE single_cluster_routing = data_v2_pb2.AppProfile.SingleClusterRouting( @@ -185,7 +242,7 @@ def test_app_profile_from_pb_success_w_routing_single(): app_profile_pb = data_v2_pb2.AppProfile( name=APP_PROFILE_NAME, - description=desctiption, + description=description, single_cluster_routing=single_cluster_routing, ) @@ -193,9 +250,10 @@ def test_app_profile_from_pb_success_w_routing_single(): assert isinstance(app_profile, AppProfile) assert app_profile._instance is instance assert app_profile.app_profile_id == APP_PROFILE_ID - assert app_profile.description == desctiption + assert app_profile.description == description assert app_profile.routing_policy_type == routing assert app_profile.cluster_id == CLUSTER_ID + assert app_profile.multi_cluster_ids is None assert app_profile.allow_transactional_writes == allow_transactional_writes @@ -290,6 +348,7 @@ def test_app_profile_reload_w_routing_any(): assert app_profile.routing_policy_type == routing assert app_profile.description == description assert app_profile.cluster_id is None + assert app_profile.multi_cluster_ids is None assert app_profile.allow_transactional_writes is None # Perform the method and check the result. @@ -298,6 +357,7 @@ def test_app_profile_reload_w_routing_any(): assert app_profile.routing_policy_type == RoutingPolicyType.SINGLE assert app_profile.description == description_from_server assert app_profile.cluster_id == cluster_id_from_server + assert app_profile.multi_cluster_ids is None assert app_profile.allow_transactional_writes == allow_transactional_writes @@ -394,6 +454,7 @@ def test_app_profile_create_w_routing_any(): assert result.description == description assert result.allow_transactional_writes is False assert result.cluster_id is None + assert result.multi_cluster_ids is None def test_app_profile_create_w_routing_single(): @@ -454,6 +515,7 @@ def test_app_profile_create_w_routing_single(): assert result.description == description assert result.allow_transactional_writes == allow_writes assert result.cluster_id == CLUSTER_ID + assert result.multi_cluster_ids is None def test_app_profile_create_w_wrong_routing_policy(): @@ -540,6 +602,82 @@ def test_app_profile_update_w_routing_any(): ) +def test_app_profile_update_w_routing_any_multi_cluster_ids(): + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + from google.cloud.bigtable_admin_v2.types import ( + bigtable_instance_admin as messages_v2_pb2, + ) + from google.cloud.bigtable.enums import RoutingPolicyType + from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( + BigtableInstanceAdminClient, + ) + from google.protobuf import field_mask_pb2 + + credentials = _make_credentials() + client = _make_client(project=PROJECT, credentials=credentials, admin=True) + instance = client.instance(INSTANCE_ID) + + routing = RoutingPolicyType.SINGLE + description = "to routing policy single" + allow_writes = True + app_profile = _make_app_profile( + APP_PROFILE_ID, + instance, + routing_policy_type=routing, + description=description, + cluster_id=CLUSTER_ID, + allow_transactional_writes=allow_writes, + multi_cluster_ids=[CLUSTER_ID, CLUSTER_ID_2], + ) + + # Create response_pb + metadata = messages_v2_pb2.UpdateAppProfileMetadata() + type_url = "type.googleapis.com/{}".format( + messages_v2_pb2.UpdateAppProfileMetadata._meta._pb.DESCRIPTOR.full_name + ) + response_pb = operations_pb2.Operation( + name=OP_NAME, + metadata=Any(type_url=type_url, value=metadata._pb.SerializeToString()), + ) + + # Patch the stub used by the API method. + instance_api = mock.create_autospec(BigtableInstanceAdminClient) + # Mock api calls + instance_api.app_profile_path.return_value = ( + "projects/project/instances/instance-id/appProfiles/app-profile-id" + ) + + client._instance_admin_client = instance_api + + # Perform the method and check the result. + ignore_warnings = True + expected_request_update_mask = field_mask_pb2.FieldMask( + paths=["description", "single_cluster_routing"] + ) + + expected_request = { + "request": { + "app_profile": app_profile._to_pb(), + "update_mask": expected_request_update_mask, + "ignore_warnings": ignore_warnings, + } + } + + instance_api.update_app_profile.return_value = response_pb + app_profile._instance._client._instance_admin_client = instance_api + result = app_profile.update(ignore_warnings=ignore_warnings) + actual_request = client._instance_admin_client.update_app_profile.call_args_list[ + 0 + ].kwargs + + assert actual_request == expected_request + assert ( + result.metadata.type_url + == "type.googleapis.com/google.bigtable.admin.v2.UpdateAppProfileMetadata" + ) + + def test_app_profile_update_w_routing_single(): from google.longrunning import operations_pb2 from google.protobuf.any_pb2 import Any From 0134af24130cd1b3514f321bc3aeac0eaf350a28 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 14 Apr 2022 08:49:20 -0700 Subject: [PATCH 603/892] chore(main): release 2.9.0 (#569) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 18 ++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 90f4b88136be..45f9f109b98e 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,24 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.9.0](https://github.com/googleapis/python-bigtable/compare/v2.8.1...v2.9.0) (2022-04-14) + + +### Features + +* App Profile multi cluster routing support with specified cluster ids ([#549](https://github.com/googleapis/python-bigtable/issues/549)) ([a0ed5b5](https://github.com/googleapis/python-bigtable/commit/a0ed5b5dfda1f3980b1a8eb349b2b5d8ab428a4b)) +* AuditConfig for IAM v1 ([4e50278](https://github.com/googleapis/python-bigtable/commit/4e50278c73f608a7c493692d8d17e7dd2aa7ba44)) + + +### Bug Fixes + +* **deps:** require grpc-google-iam-v1 >=0.12.4 ([4e50278](https://github.com/googleapis/python-bigtable/commit/4e50278c73f608a7c493692d8d17e7dd2aa7ba44)) + + +### Documentation + +* fix type in docstring for map fields ([4e50278](https://github.com/googleapis/python-bigtable/commit/4e50278c73f608a7c493692d8d17e7dd2aa7ba44)) + ### [2.8.1](https://github.com/googleapis/python-bigtable/compare/v2.8.0...v2.8.1) (2022-04-07) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 04afa44a1cd3..cd65af765058 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.8.1" +version = "2.9.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 402d99ab64c2eccee402e6af3b8a5f0217fb3bb7 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 14 Apr 2022 18:42:47 +0200 Subject: [PATCH 604/892] chore(deps): update dependency google-cloud-bigtable to v2.9.0 (#570) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 4805140e7179..a3c7e462b65c 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.37.0 -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 google-cloud-core==2.3.0 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 0ba29dd99630..7dcdef580195 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 google-cloud-core==2.3.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index f0d4e821ae25..70fe65d74c1a 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 backoff==1.11.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index b1de8c9a38f0..070d47be5ee2 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 google-cloud-monitoring==2.9.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index e9a9c1753909..a555cf279ce2 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index a6e307e50927..d74172ad3bda 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index a6e307e50927..d74172ad3bda 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index ceaefad7eede..3d3bebbaf8e6 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.8.1 \ No newline at end of file +google-cloud-bigtable==2.9.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index e9a9c1753909..a555cf279ce2 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.8.1 +google-cloud-bigtable==2.9.0 From 5cc445b14d3b6fde0b666ce06b9fda0da75513fd Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 20 Apr 2022 20:14:28 -0400 Subject: [PATCH 605/892] chore(python): add nox session to sort python imports (#572) Source-Link: https://github.com/googleapis/synthtool/commit/1b71c10e20de7ed3f97f692f99a0e3399b67049f Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:00c9d764fd1cd56265f12a5ef4b99a0c9e87cf261018099141e2ca5158890416 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +-- packages/google-cloud-bigtable/noxfile.py | 27 ++++++++++++++++--- .../samples/beam/noxfile.py | 21 +++++++++++++++ .../samples/hello/noxfile.py | 21 +++++++++++++++ .../samples/hello_happybase/noxfile.py | 21 +++++++++++++++ .../samples/instanceadmin/noxfile.py | 21 +++++++++++++++ .../samples/metricscaler/noxfile.py | 21 +++++++++++++++ .../samples/quickstart/noxfile.py | 21 +++++++++++++++ .../samples/quickstart_happybase/noxfile.py | 21 +++++++++++++++ .../samples/snippets/filters/noxfile.py | 21 +++++++++++++++ .../samples/snippets/reads/noxfile.py | 21 +++++++++++++++ .../samples/snippets/writes/noxfile.py | 21 +++++++++++++++ .../samples/tableadmin/noxfile.py | 21 +++++++++++++++ 13 files changed, 257 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index bc893c979e20..7c454abf76f3 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:8a5d3f6a2e43ed8293f34e06a2f56931d1e88a2694c3bb11b15df4eb256ad163 -# created: 2022-04-06T10:30:21.687684602Z + digest: sha256:00c9d764fd1cd56265f12a5ef4b99a0c9e87cf261018099141e2ca5158890416 +# created: 2022-04-20T23:42:53.970438194Z diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 9d1df22aef5e..740697a02fac 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -25,7 +25,8 @@ import nox BLACK_VERSION = "black==22.3.0" -BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] +ISORT_VERSION = "isort==5.10.1" +LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" @@ -85,7 +86,7 @@ def lint(session): session.run( "black", "--check", - *BLACK_PATHS, + *LINT_PATHS, ) session.run("flake8", "google", "tests") @@ -96,7 +97,27 @@ def blacken(session): session.install(BLACK_VERSION) session.run( "black", - *BLACK_PATHS, + *LINT_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def format(session): + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run( + "isort", + "--fss", + *LINT_PATHS, + ) + session.run( + "black", + *LINT_PATHS, ) diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index 05df27cdb0fc..960a011c2336 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -166,12 +167,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 949e0fde9ae1..38bb0a572b81 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -30,6 +30,7 @@ # WARNING - WARNING - WARNING - WARNING - WARNING BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" # Copy `noxfile_config.py` to your directory and modify it instead. @@ -168,12 +169,32 @@ def lint(session: nox.sessions.Session) -> None: @nox.session def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" session.install(BLACK_VERSION) python_files = [path for path in os.listdir(".") if path.endswith(".py")] session.run("black", *python_files) +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + # # Sample Tests # From 9b41d025a127617e46193133e5df2545c1f244e9 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 21 Apr 2022 03:33:15 +0200 Subject: [PATCH 606/892] chore(deps): update dependency apache-beam to v2.38.0 (#573) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index a3c7e462b65c..ce89af087013 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.37.0 +apache-beam==2.38.0 google-cloud-bigtable==2.9.0 google-cloud-core==2.3.0 From 442e7b4153d4d81f9d644749b361d3affd19bdad Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 21 Apr 2022 18:02:22 +0000 Subject: [PATCH 607/892] chore(python): use ubuntu 22.04 in docs image (#575) Source-Link: https://github.com/googleapis/synthtool/commit/f15cc72fb401b4861cedebb10af74afe428fb1f8 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:bc5eed3804aec2f05fad42aacf973821d9500c174015341f721a984a0825b6fd --- .../.github/.OwlBot.lock.yaml | 4 ++-- .../.kokoro/docker/docs/Dockerfile | 20 +++++++++++++++++-- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 7c454abf76f3..64f82d6bf4bc 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:00c9d764fd1cd56265f12a5ef4b99a0c9e87cf261018099141e2ca5158890416 -# created: 2022-04-20T23:42:53.970438194Z + digest: sha256:bc5eed3804aec2f05fad42aacf973821d9500c174015341f721a984a0825b6fd +# created: 2022-04-21T15:43:16.246106921Z diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index 4e1b1fb8b5a5..238b87b9d1c9 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ubuntu:20.04 +from ubuntu:22.04 ENV DEBIAN_FRONTEND noninteractive @@ -60,8 +60,24 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb +###################### Install python 3.8.11 + +# Download python 3.8.11 +RUN wget https://www.python.org/ftp/python/3.8.11/Python-3.8.11.tgz + +# Extract files +RUN tar -xvf Python-3.8.11.tgz + +# Install python 3.8.11 +RUN ./Python-3.8.11/configure --enable-optimizations +RUN make altinstall + +###################### Install pip RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3.8 /tmp/get-pip.py \ + && python3 /tmp/get-pip.py \ && rm /tmp/get-pip.py +# Test pip +RUN python3 -m pip + CMD ["python3.8"] From 4499495f49965238dc978549ebd3946c9cf4fead Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 25 Apr 2022 17:00:15 +0200 Subject: [PATCH 608/892] chore(deps): update dependency pytest to v7.1.2 (#576) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index fe1569ff4599..7292f2245304 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.1.1 +pytest==7.1.2 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 4f6bf643fc5e..d00689e0623a 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 678dbc8efd1b..e29e011b390c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==1.11.1 -pytest==7.1.1 +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index e3434bfa8cb1..69a7581b653e 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.1.1 +pytest==7.1.2 google-cloud-testutils==1.3.1 From 4ac206ca1e1b8ecfe8afbc26f1a6b774c8f5cd36 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 26 Apr 2022 20:05:03 +0200 Subject: [PATCH 609/892] chore(deps): update dependency backoff to v2 (#577) --- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 70fe65d74c1a..3600abf717cc 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.9.0 -backoff==1.11.1 +backoff==2.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index e29e011b390c..81c3a60c109a 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ -backoff==1.11.1 +backoff==2.0.0 pytest==7.1.2 From dde0fc4ab1083e613ea835b6a17b0f3db17fba0c Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 27 Apr 2022 19:30:52 +0200 Subject: [PATCH 610/892] chore(deps): update dependency backoff to v2.0.1 (#579) --- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 3600abf717cc..90da13ba02fc 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.9.0 -backoff==2.0.0 +backoff==2.0.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 81c3a60c109a..4d92cc9aa9dd 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ -backoff==2.0.0 +backoff==2.0.1 pytest==7.1.2 From cfad52f319d9311e7472d2785f8c04d52d200046 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 28 Apr 2022 07:22:00 -0400 Subject: [PATCH 611/892] chore: use gapic-generator-python 0.65.2 (#578) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: use gapic-generator-python 0.65.2 PiperOrigin-RevId: 444333013 Source-Link: https://github.com/googleapis/googleapis/commit/f91b6cf82e929280f6562f6110957c654bd9e2e6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/16eb36095c294e712c74a1bf23550817b42174e5 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTZlYjM2MDk1YzI5NGU3MTJjNzRhMWJmMjM1NTA4MTdiNDIxNzRlNSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../test_bigtable_instance_admin.py | 172 ++++++++--------- .../test_bigtable_table_admin.py | 182 +++++++++--------- 2 files changed, 177 insertions(+), 177 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 3f6013fc5d21..df770db6bb2b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -787,7 +787,7 @@ def test_create_instance_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateInstanceRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_instance), "__call__") as call: @@ -803,7 +803,7 @@ def test_create_instance_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -817,7 +817,7 @@ async def test_create_instance_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateInstanceRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_instance), "__call__") as call: @@ -835,7 +835,7 @@ async def test_create_instance_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1062,7 +1062,7 @@ def test_get_instance_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetInstanceRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_instance), "__call__") as call: @@ -1078,7 +1078,7 @@ def test_get_instance_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1092,7 +1092,7 @@ async def test_get_instance_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetInstanceRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_instance), "__call__") as call: @@ -1108,7 +1108,7 @@ async def test_get_instance_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1296,7 +1296,7 @@ def test_list_instances_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListInstancesRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_instances), "__call__") as call: @@ -1312,7 +1312,7 @@ def test_list_instances_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1326,7 +1326,7 @@ async def test_list_instances_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListInstancesRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_instances), "__call__") as call: @@ -1344,7 +1344,7 @@ async def test_list_instances_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1540,7 +1540,7 @@ def test_update_instance_field_headers(): # a field header. Set these to a non-empty value. request = instance.Instance() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_instance), "__call__") as call: @@ -1556,7 +1556,7 @@ def test_update_instance_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1570,7 +1570,7 @@ async def test_update_instance_field_headers_async(): # a field header. Set these to a non-empty value. request = instance.Instance() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_instance), "__call__") as call: @@ -1586,7 +1586,7 @@ async def test_update_instance_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1689,7 +1689,7 @@ def test_partial_update_instance_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.PartialUpdateInstanceRequest() - request.instance.name = "instance.name/value" + request.instance.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1707,7 +1707,7 @@ def test_partial_update_instance_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "instance.name=instance.name/value", + "instance.name=name_value", ) in kw["metadata"] @@ -1721,7 +1721,7 @@ async def test_partial_update_instance_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.PartialUpdateInstanceRequest() - request.instance.name = "instance.name/value" + request.instance.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1741,7 +1741,7 @@ async def test_partial_update_instance_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "instance.name=instance.name/value", + "instance.name=name_value", ) in kw["metadata"] @@ -1932,7 +1932,7 @@ def test_delete_instance_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteInstanceRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: @@ -1948,7 +1948,7 @@ def test_delete_instance_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1962,7 +1962,7 @@ async def test_delete_instance_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteInstanceRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: @@ -1978,7 +1978,7 @@ async def test_delete_instance_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2155,7 +2155,7 @@ def test_create_cluster_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateClusterRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: @@ -2171,7 +2171,7 @@ def test_create_cluster_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -2185,7 +2185,7 @@ async def test_create_cluster_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateClusterRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: @@ -2203,7 +2203,7 @@ async def test_create_cluster_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -2429,7 +2429,7 @@ def test_get_cluster_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetClusterRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: @@ -2445,7 +2445,7 @@ def test_get_cluster_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2459,7 +2459,7 @@ async def test_get_cluster_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetClusterRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: @@ -2475,7 +2475,7 @@ async def test_get_cluster_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2663,7 +2663,7 @@ def test_list_clusters_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListClustersRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: @@ -2679,7 +2679,7 @@ def test_list_clusters_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -2693,7 +2693,7 @@ async def test_list_clusters_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListClustersRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: @@ -2711,7 +2711,7 @@ async def test_list_clusters_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -2889,7 +2889,7 @@ def test_update_cluster_field_headers(): # a field header. Set these to a non-empty value. request = instance.Cluster() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: @@ -2905,7 +2905,7 @@ def test_update_cluster_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2919,7 +2919,7 @@ async def test_update_cluster_field_headers_async(): # a field header. Set these to a non-empty value. request = instance.Cluster() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: @@ -2937,7 +2937,7 @@ async def test_update_cluster_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3040,7 +3040,7 @@ def test_partial_update_cluster_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.PartialUpdateClusterRequest() - request.cluster.name = "cluster.name/value" + request.cluster.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -3058,7 +3058,7 @@ def test_partial_update_cluster_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "cluster.name=cluster.name/value", + "cluster.name=name_value", ) in kw["metadata"] @@ -3072,7 +3072,7 @@ async def test_partial_update_cluster_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.PartialUpdateClusterRequest() - request.cluster.name = "cluster.name/value" + request.cluster.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -3092,7 +3092,7 @@ async def test_partial_update_cluster_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "cluster.name=cluster.name/value", + "cluster.name=name_value", ) in kw["metadata"] @@ -3283,7 +3283,7 @@ def test_delete_cluster_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteClusterRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: @@ -3299,7 +3299,7 @@ def test_delete_cluster_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3313,7 +3313,7 @@ async def test_delete_cluster_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteClusterRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: @@ -3329,7 +3329,7 @@ async def test_delete_cluster_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3529,7 +3529,7 @@ def test_create_app_profile_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateAppProfileRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -3547,7 +3547,7 @@ def test_create_app_profile_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -3561,7 +3561,7 @@ async def test_create_app_profile_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.CreateAppProfileRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -3579,7 +3579,7 @@ async def test_create_app_profile_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -3797,7 +3797,7 @@ def test_get_app_profile_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetAppProfileRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: @@ -3813,7 +3813,7 @@ def test_get_app_profile_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3827,7 +3827,7 @@ async def test_get_app_profile_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.GetAppProfileRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: @@ -3843,7 +3843,7 @@ async def test_get_app_profile_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4036,7 +4036,7 @@ def test_list_app_profiles_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListAppProfilesRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4054,7 +4054,7 @@ def test_list_app_profiles_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -4068,7 +4068,7 @@ async def test_list_app_profiles_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListAppProfilesRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4088,7 +4088,7 @@ async def test_list_app_profiles_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -4225,7 +4225,7 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): assert pager._metadata == metadata - results = [i for i in pager] + results = list(pager) assert len(results) == 6 assert all(isinstance(i, instance.AppProfile) for i in results) @@ -4473,7 +4473,7 @@ def test_update_app_profile_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.UpdateAppProfileRequest() - request.app_profile.name = "app_profile.name/value" + request.app_profile.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4491,7 +4491,7 @@ def test_update_app_profile_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "app_profile.name=app_profile.name/value", + "app_profile.name=name_value", ) in kw["metadata"] @@ -4505,7 +4505,7 @@ async def test_update_app_profile_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.UpdateAppProfileRequest() - request.app_profile.name = "app_profile.name/value" + request.app_profile.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4525,7 +4525,7 @@ async def test_update_app_profile_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "app_profile.name=app_profile.name/value", + "app_profile.name=name_value", ) in kw["metadata"] @@ -4722,7 +4722,7 @@ def test_delete_app_profile_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteAppProfileRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4740,7 +4740,7 @@ def test_delete_app_profile_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4754,7 +4754,7 @@ async def test_delete_app_profile_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.DeleteAppProfileRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4772,7 +4772,7 @@ async def test_delete_app_profile_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4962,7 +4962,7 @@ def test_get_iam_policy_field_headers(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.GetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: @@ -4978,7 +4978,7 @@ def test_get_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -4992,7 +4992,7 @@ async def test_get_iam_policy_field_headers_async(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.GetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: @@ -5008,7 +5008,7 @@ async def test_get_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -5211,7 +5211,7 @@ def test_set_iam_policy_field_headers(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.SetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: @@ -5227,7 +5227,7 @@ def test_set_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -5241,7 +5241,7 @@ async def test_set_iam_policy_field_headers_async(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.SetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: @@ -5257,7 +5257,7 @@ async def test_set_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -5464,7 +5464,7 @@ def test_test_iam_permissions_field_headers(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.TestIamPermissionsRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5482,7 +5482,7 @@ def test_test_iam_permissions_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -5496,7 +5496,7 @@ async def test_test_iam_permissions_field_headers_async(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.TestIamPermissionsRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5516,7 +5516,7 @@ async def test_test_iam_permissions_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -5734,7 +5734,7 @@ def test_list_hot_tablets_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListHotTabletsRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: @@ -5750,7 +5750,7 @@ def test_list_hot_tablets_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -5764,7 +5764,7 @@ async def test_list_hot_tablets_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_instance_admin.ListHotTabletsRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: @@ -5782,7 +5782,7 @@ async def test_list_hot_tablets_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -5913,7 +5913,7 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): assert pager._metadata == metadata - results = [i for i in pager] + results = list(pager) assert len(results) == 6 assert all(isinstance(i, instance.HotTablet) for i in results) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index b706a87d2aaf..2e89585c791c 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -790,7 +790,7 @@ def test_create_table_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_table), "__call__") as call: @@ -806,7 +806,7 @@ def test_create_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -820,7 +820,7 @@ async def test_create_table_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_table), "__call__") as call: @@ -836,7 +836,7 @@ async def test_create_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1039,7 +1039,7 @@ def test_create_table_from_snapshot_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableFromSnapshotRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1057,7 +1057,7 @@ def test_create_table_from_snapshot_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1071,7 +1071,7 @@ async def test_create_table_from_snapshot_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateTableFromSnapshotRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1091,7 +1091,7 @@ async def test_create_table_from_snapshot_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1299,7 +1299,7 @@ def test_list_tables_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListTablesRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: @@ -1315,7 +1315,7 @@ def test_list_tables_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1329,7 +1329,7 @@ async def test_list_tables_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListTablesRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: @@ -1347,7 +1347,7 @@ async def test_list_tables_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -1478,7 +1478,7 @@ def test_list_tables_pager(transport_name: str = "grpc"): assert pager._metadata == metadata - results = [i for i in pager] + results = list(pager) assert len(results) == 6 assert all(isinstance(i, table.Table) for i in results) @@ -1723,7 +1723,7 @@ def test_get_table_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetTableRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_table), "__call__") as call: @@ -1739,7 +1739,7 @@ def test_get_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1753,7 +1753,7 @@ async def test_get_table_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetTableRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_table), "__call__") as call: @@ -1769,7 +1769,7 @@ async def test_get_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1944,7 +1944,7 @@ def test_delete_table_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteTableRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_table), "__call__") as call: @@ -1960,7 +1960,7 @@ def test_delete_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -1974,7 +1974,7 @@ async def test_delete_table_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteTableRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_table), "__call__") as call: @@ -1990,7 +1990,7 @@ async def test_delete_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2183,7 +2183,7 @@ def test_modify_column_families_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ModifyColumnFamiliesRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2201,7 +2201,7 @@ def test_modify_column_families_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2215,7 +2215,7 @@ async def test_modify_column_families_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ModifyColumnFamiliesRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2233,7 +2233,7 @@ async def test_modify_column_families_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2442,7 +2442,7 @@ def test_drop_row_range_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DropRowRangeRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: @@ -2458,7 +2458,7 @@ def test_drop_row_range_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2472,7 +2472,7 @@ async def test_drop_row_range_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DropRowRangeRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: @@ -2488,7 +2488,7 @@ async def test_drop_row_range_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2597,7 +2597,7 @@ def test_generate_consistency_token_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GenerateConsistencyTokenRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2615,7 +2615,7 @@ def test_generate_consistency_token_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2629,7 +2629,7 @@ async def test_generate_consistency_token_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GenerateConsistencyTokenRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2649,7 +2649,7 @@ async def test_generate_consistency_token_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2844,7 +2844,7 @@ def test_check_consistency_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CheckConsistencyRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2862,7 +2862,7 @@ def test_check_consistency_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -2876,7 +2876,7 @@ async def test_check_consistency_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CheckConsistencyRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2896,7 +2896,7 @@ async def test_check_consistency_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3089,7 +3089,7 @@ def test_snapshot_table_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.SnapshotTableRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: @@ -3105,7 +3105,7 @@ def test_snapshot_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3119,7 +3119,7 @@ async def test_snapshot_table_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.SnapshotTableRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: @@ -3137,7 +3137,7 @@ async def test_snapshot_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3364,7 +3364,7 @@ def test_get_snapshot_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetSnapshotRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: @@ -3380,7 +3380,7 @@ def test_get_snapshot_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3394,7 +3394,7 @@ async def test_get_snapshot_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetSnapshotRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: @@ -3410,7 +3410,7 @@ async def test_get_snapshot_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -3593,7 +3593,7 @@ def test_list_snapshots_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListSnapshotsRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: @@ -3609,7 +3609,7 @@ def test_list_snapshots_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -3623,7 +3623,7 @@ async def test_list_snapshots_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListSnapshotsRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: @@ -3641,7 +3641,7 @@ async def test_list_snapshots_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -3772,7 +3772,7 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): assert pager._metadata == metadata - results = [i for i in pager] + results = list(pager) assert len(results) == 6 assert all(isinstance(i, table.Snapshot) for i in results) @@ -4006,7 +4006,7 @@ def test_delete_snapshot_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteSnapshotRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: @@ -4022,7 +4022,7 @@ def test_delete_snapshot_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4036,7 +4036,7 @@ async def test_delete_snapshot_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteSnapshotRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: @@ -4052,7 +4052,7 @@ async def test_delete_snapshot_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4229,7 +4229,7 @@ def test_create_backup_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateBackupRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_backup), "__call__") as call: @@ -4245,7 +4245,7 @@ def test_create_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -4259,7 +4259,7 @@ async def test_create_backup_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.CreateBackupRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_backup), "__call__") as call: @@ -4277,7 +4277,7 @@ async def test_create_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -4493,7 +4493,7 @@ def test_get_backup_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetBackupRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_backup), "__call__") as call: @@ -4509,7 +4509,7 @@ def test_get_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4523,7 +4523,7 @@ async def test_get_backup_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.GetBackupRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_backup), "__call__") as call: @@ -4539,7 +4539,7 @@ async def test_get_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4734,7 +4734,7 @@ def test_update_backup_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.UpdateBackupRequest() - request.backup.name = "backup.name/value" + request.backup.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_backup), "__call__") as call: @@ -4750,7 +4750,7 @@ def test_update_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "backup.name=backup.name/value", + "backup.name=name_value", ) in kw["metadata"] @@ -4764,7 +4764,7 @@ async def test_update_backup_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.UpdateBackupRequest() - request.backup.name = "backup.name/value" + request.backup.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_backup), "__call__") as call: @@ -4780,7 +4780,7 @@ async def test_update_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "backup.name=backup.name/value", + "backup.name=name_value", ) in kw["metadata"] @@ -4965,7 +4965,7 @@ def test_delete_backup_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteBackupRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: @@ -4981,7 +4981,7 @@ def test_delete_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -4995,7 +4995,7 @@ async def test_delete_backup_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.DeleteBackupRequest() - request.name = "name/value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: @@ -5011,7 +5011,7 @@ async def test_delete_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name/value", + "name=name_value", ) in kw["metadata"] @@ -5194,7 +5194,7 @@ def test_list_backups_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListBackupsRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: @@ -5210,7 +5210,7 @@ def test_list_backups_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -5224,7 +5224,7 @@ async def test_list_backups_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.ListBackupsRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_backups), "__call__") as call: @@ -5242,7 +5242,7 @@ async def test_list_backups_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -5373,7 +5373,7 @@ def test_list_backups_pager(transport_name: str = "grpc"): assert pager._metadata == metadata - results = [i for i in pager] + results = list(pager) assert len(results) == 6 assert all(isinstance(i, table.Backup) for i in results) @@ -5609,7 +5609,7 @@ def test_restore_table_field_headers(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.RestoreTableRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_table), "__call__") as call: @@ -5625,7 +5625,7 @@ def test_restore_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -5639,7 +5639,7 @@ async def test_restore_table_field_headers_async(): # a field header. Set these to a non-empty value. request = bigtable_table_admin.RestoreTableRequest() - request.parent = "parent/value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_table), "__call__") as call: @@ -5657,7 +5657,7 @@ async def test_restore_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent/value", + "parent=parent_value", ) in kw["metadata"] @@ -5763,7 +5763,7 @@ def test_get_iam_policy_field_headers(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.GetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: @@ -5779,7 +5779,7 @@ def test_get_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -5793,7 +5793,7 @@ async def test_get_iam_policy_field_headers_async(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.GetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: @@ -5809,7 +5809,7 @@ async def test_get_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -6012,7 +6012,7 @@ def test_set_iam_policy_field_headers(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.SetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: @@ -6028,7 +6028,7 @@ def test_set_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -6042,7 +6042,7 @@ async def test_set_iam_policy_field_headers_async(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.SetIamPolicyRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: @@ -6058,7 +6058,7 @@ async def test_set_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -6265,7 +6265,7 @@ def test_test_iam_permissions_field_headers(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.TestIamPermissionsRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -6283,7 +6283,7 @@ def test_test_iam_permissions_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] @@ -6297,7 +6297,7 @@ async def test_test_iam_permissions_field_headers_async(): # a field header. Set these to a non-empty value. request = iam_policy_pb2.TestIamPermissionsRequest() - request.resource = "resource/value" + request.resource = "resource_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -6317,7 +6317,7 @@ async def test_test_iam_permissions_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource/value", + "resource=resource_value", ) in kw["metadata"] From a1cc870ae770eaae46025c1fd80e3f23d8a84f96 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 5 May 2022 13:21:41 -0400 Subject: [PATCH 612/892] chore: [autoapprove] update readme_gen.py to include autoescape True (#580) Source-Link: https://github.com/googleapis/synthtool/commit/6b4d5a6407d740beb4158b302194a62a4108a8a6 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f792ee1320e03eda2d13a5281a2989f7ed8a9e50b73ef6da97fac7e1e850b149 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- .../google-cloud-bigtable/scripts/readme-gen/readme_gen.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 64f82d6bf4bc..b631901e99f4 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:bc5eed3804aec2f05fad42aacf973821d9500c174015341f721a984a0825b6fd -# created: 2022-04-21T15:43:16.246106921Z + digest: sha256:f792ee1320e03eda2d13a5281a2989f7ed8a9e50b73ef6da97fac7e1e850b149 +# created: 2022-05-05T15:17:27.599381182Z diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py index d309d6e97518..91b59676bfc7 100644 --- a/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py +++ b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py @@ -28,7 +28,10 @@ jinja_env = jinja2.Environment( trim_blocks=True, loader=jinja2.FileSystemLoader( - os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')))) + os.path.abspath(os.path.join(os.path.dirname(__file__), "templates")) + ), + autoescape=True, +) README_TMPL = jinja_env.get_template('README.tmpl.rst') From 900bdbbf3907b12a3805c3f7a665f3c514ff0a1a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 5 May 2022 23:22:18 +0000 Subject: [PATCH 613/892] chore(python): auto approve template changes (#585) Source-Link: https://github.com/googleapis/synthtool/commit/453a5d9c9a55d1969240a37d36cec626d20a9024 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:81ed5ecdfc7cac5b699ba4537376f3563f6f04122c4ec9e735d3b3dc1d43dd32 --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.github/auto-approve.yml | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/auto-approve.yml diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index b631901e99f4..757c9dca75ad 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:f792ee1320e03eda2d13a5281a2989f7ed8a9e50b73ef6da97fac7e1e850b149 -# created: 2022-05-05T15:17:27.599381182Z + digest: sha256:81ed5ecdfc7cac5b699ba4537376f3563f6f04122c4ec9e735d3b3dc1d43dd32 +# created: 2022-05-05T22:08:23.383410683Z diff --git a/packages/google-cloud-bigtable/.github/auto-approve.yml b/packages/google-cloud-bigtable/.github/auto-approve.yml new file mode 100644 index 000000000000..311ebbb853a9 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/auto-approve.yml @@ -0,0 +1,3 @@ +# https://github.com/googleapis/repo-automation-bots/tree/main/packages/auto-approve +processes: + - "OwlBotTemplateChanges" From 675875a02241986f201c5eb274411a4a814dbfe3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 20 May 2022 14:24:38 -0700 Subject: [PATCH 614/892] feat: refreshes Bigtable Admin API(s) protos (#589) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: refreshes Bigtable Admin API(s) protos PiperOrigin-RevId: 448988001 Source-Link: https://github.com/googleapis/googleapis/commit/b6fa58e3e886c619018da132de53b47ea7006565 Source-Link: https://github.com/googleapis/googleapis-gen/commit/fc8b8dbc7da1a7845d57134f411302d106ea2ef2 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZmM4YjhkYmM3ZGExYTc4NDVkNTcxMzRmNDExMzAyZDEwNmVhMmVmMiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 12 +---- .../transports/base.py | 12 +---- .../types/bigtable_instance_admin.py | 52 +++++++++++++++++++ .../cloud/bigtable_admin_v2/types/instance.py | 23 ++++---- .../cloud/bigtable_admin_v2/types/table.py | 17 +++--- 5 files changed, 75 insertions(+), 41 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 13f1d71baee4..9853efdb451a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1262,17 +1262,7 @@ async def partial_update_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.partial_update_cluster, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, + default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 8399c517cde5..32261ac7bdca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -257,17 +257,7 @@ def _prep_wrapped_messages(self, client_info): ), self.partial_update_cluster: gapic_v1.method.wrap_method( self.partial_update_cluster, - default_retry=retries.Retry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, + default_timeout=None, client_info=client_info, ), self.delete_cluster: gapic_v1.method.wrap_method( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index c182966b1039..36ad5dc9cffe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -415,8 +415,54 @@ class CreateClusterMetadata(proto.Message): finish_time (google.protobuf.timestamp_pb2.Timestamp): The time at which the operation failed or was completed successfully. + tables (Mapping[str, google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress]): + Keys: the full ``name`` of each table that existed in the + instance when CreateCluster was first called, i.e. + ``projects//instances//tables/
``. + Any table added to the instance by a later API call will be + created in the new cluster by that API call, not this one. + + Values: information on how much of a table's data has been + copied to the newly-created cluster so far. """ + class TableProgress(proto.Message): + r"""Progress info for copying a table's data to the new cluster. + + Attributes: + estimated_size_bytes (int): + Estimate of the size of the table to be + copied. + estimated_copied_bytes (int): + Estimate of the number of bytes copied so far for this + table. This will eventually reach 'estimated_size_bytes' + unless the table copy is CANCELLED. + state (google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress.State): + + """ + + class State(proto.Enum): + r"""""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + COPYING = 2 + COMPLETED = 3 + CANCELLED = 4 + + estimated_size_bytes = proto.Field( + proto.INT64, + number=2, + ) + estimated_copied_bytes = proto.Field( + proto.INT64, + number=3, + ) + state = proto.Field( + proto.ENUM, + number=4, + enum="CreateClusterMetadata.TableProgress.State", + ) + original_request = proto.Field( proto.MESSAGE, number=1, @@ -432,6 +478,12 @@ class CreateClusterMetadata(proto.Message): number=3, message=timestamp_pb2.Timestamp, ) + tables = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=4, + message=TableProgress, + ) class UpdateClusterMetadata(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 05d1e524de48..1b2e4d615d02 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -169,13 +169,14 @@ class Cluster(proto.Message): The unique name of the cluster. Values are of the form ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. location (str): - (``CreationOnly``) The location where this cluster's nodes - and storage reside. For best performance, clients should be + Immutable. The location where this cluster's nodes and + storage reside. For best performance, clients should be located as close as possible to this cluster. Currently only zones are supported, so values should be of the form ``projects/{project}/locations/{zone}``. state (google.cloud.bigtable_admin_v2.types.Cluster.State): - The current state of the cluster. + Output only. The current state of the + cluster. serve_nodes (int): The number of nodes allocated to this cluster. More nodes enable higher throughput and @@ -185,9 +186,9 @@ class Cluster(proto.Message): This field is a member of `oneof`_ ``config``. default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType): - (``CreationOnly``) The type of storage used by this cluster - to serve its parent instance's tables, unless explicitly - overridden. + Immutable. The type of storage used by this + cluster to serve its parent instance's tables, + unless explicitly overridden. encryption_config (google.cloud.bigtable_admin_v2.types.Cluster.EncryptionConfig): Immutable. The encryption configuration for CMEK-protected clusters. @@ -254,6 +255,9 @@ class EncryptionConfig(proto.Message): key. 2) Only regional keys can be used and the region of the CMEK key must match the region of the cluster. + 3) All clusters within an instance must use the same CMEK + key. Values are of the form + ``projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`` """ kms_key_name = proto.Field( @@ -309,8 +313,7 @@ class AppProfile(proto.Message): Attributes: name (str): - (``OutputOnly``) The unique name of the app profile. Values - are of the form + The unique name of the app profile. Values are of the form ``projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. etag (str): Strongly validated etag for optimistic concurrency control. @@ -324,8 +327,8 @@ class AppProfile(proto.Message): 7232 `__ for more details. description (str): - Optional long form description of the use - case for this AppProfile. + Long form description of the use case for + this AppProfile. multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): Use a multi-cluster routing policy. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 6a4446dd3c00..183b8808639b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -89,14 +89,13 @@ class Table(proto.Message): with UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` column_families (Mapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): - (``CreationOnly``) The column families configured for this - table, mapped by column family ID. Views: ``SCHEMA_VIEW``, - ``FULL`` + The column families configured for this table, mapped by + column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): - (``CreationOnly``) The granularity (i.e. ``MILLIS``) at - which timestamps are stored in this table. Timestamps not - matching the granularity will be rejected. If unspecified at - creation time, the value will be set to ``MILLIS``. Views: + Immutable. The granularity (i.e. ``MILLIS``) at which + timestamps are stored in this table. Timestamps not matching + the granularity will be rejected. If unspecified at creation + time, the value will be set to ``MILLIS``. Views: ``SCHEMA_VIEW``, ``FULL``. restore_info (google.cloud.bigtable_admin_v2.types.RestoreInfo): Output only. If this table was restored from @@ -427,8 +426,8 @@ class Backup(proto.Message): Attributes: name (str): - Output only. A globally unique identifier for the backup - which cannot be changed. Values are of the form + A globally unique identifier for the backup which cannot be + changed. Values are of the form ``projects/{project}/instances/{instance}/clusters/{cluster}/ backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` The final segment of the name must be between 1 and 50 characters in length. From 69d4fe80e125044c1b906cbc68166c5ab1998b38 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Tue, 24 May 2022 11:02:27 -0700 Subject: [PATCH 615/892] docs: Add EncryptionInfo documentation (#588) Add documentation about the EncryptionInfo object. The doc is autogenerated from docstrings. --- packages/google-cloud-bigtable/docs/encryption-info.rst | 6 ++++++ packages/google-cloud-bigtable/docs/usage.rst | 1 + 2 files changed, 7 insertions(+) create mode 100644 packages/google-cloud-bigtable/docs/encryption-info.rst diff --git a/packages/google-cloud-bigtable/docs/encryption-info.rst b/packages/google-cloud-bigtable/docs/encryption-info.rst new file mode 100644 index 000000000000..46f19880fcac --- /dev/null +++ b/packages/google-cloud-bigtable/docs/encryption-info.rst @@ -0,0 +1,6 @@ +Encryption Info +~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.encryption_info + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index 53faac22ffab..33bf7bb7fd21 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -12,6 +12,7 @@ Using the API app-profile backup column-family + encryption-info row row-data row-filters From 6bacfec6404095019cab48f7eeef24e569438789 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 25 May 2022 22:35:18 +0200 Subject: [PATCH 616/892] chore(deps): update dependency apache-beam to v2.39.0 (#592) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index ce89af087013..1f34abd0b00e 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.38.0 +apache-beam==2.39.0 google-cloud-bigtable==2.9.0 google-cloud-core==2.3.0 From 6d3c013678c3865039eb7e0cfacf7d5f0942fa4c Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 30 May 2022 17:26:26 +0000 Subject: [PATCH 617/892] chore: use gapic-generator-python 1.0.0 (#593) - [ ] Regenerate this pull request now. PiperOrigin-RevId: 451250442 Source-Link: https://github.com/googleapis/googleapis/commit/cca5e8181f6442b134e8d4d206fbe9e0e74684ba Source-Link: https://github.com/googleapis/googleapis-gen/commit/0b219da161a8bdcc3c6f7b2efcd82105182a30ca Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGIyMTlkYTE2MWE4YmRjYzNjNmY3YjJlZmNkODIxMDUxODJhMzBjYSJ9 --- .../bigtable_admin_v2/test_bigtable_instance_admin.py | 8 +++++++- .../gapic/bigtable_admin_v2/test_bigtable_table_admin.py | 8 +++++++- .../tests/unit/gapic/bigtable_v2/test_bigtable.py | 8 +++++++- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index df770db6bb2b..4e7fcf2a79f5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -14,7 +14,13 @@ # limitations under the License. # import os -import mock + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock import grpc from grpc.experimental import aio diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 2e89585c791c..ff766dcd1cca 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -14,7 +14,13 @@ # limitations under the License. # import os -import mock + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock import grpc from grpc.experimental import aio diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 5745b9aebba2..3ca5041c2506 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -14,7 +14,13 @@ # limitations under the License. # import os -import mock + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: + import mock import grpc from grpc.experimental import aio From 4a4151501f283912d3548e320e463a396739a9e9 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 30 May 2022 10:52:50 -0700 Subject: [PATCH 618/892] chore(main): release 2.10.0 (#591) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 12 ++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 45f9f109b98e..1bb7c69c27bc 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.10.0](https://github.com/googleapis/python-bigtable/compare/v2.9.0...v2.10.0) (2022-05-30) + + +### Features + +* refreshes Bigtable Admin API(s) protos ([#589](https://github.com/googleapis/python-bigtable/issues/589)) ([b508e33](https://github.com/googleapis/python-bigtable/commit/b508e3321937850d65242283e82f5413feb6081a)) + + +### Documentation + +* Add EncryptionInfo documentation ([#588](https://github.com/googleapis/python-bigtable/issues/588)) ([bedbf1b](https://github.com/googleapis/python-bigtable/commit/bedbf1b1bb304ff45f31ad20004ff96041ce716c)) + ## [2.9.0](https://github.com/googleapis/python-bigtable/compare/v2.8.1...v2.9.0) (2022-04-14) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index cd65af765058..2742ecc78998 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.9.0" +version = "2.10.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 79c001c7751d4203d46a8715becc6918f180bf6f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Mon, 30 May 2022 22:45:41 +0200 Subject: [PATCH 619/892] chore(deps): update dependency google-cloud-bigtable to v2.10.0 (#594) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 1f34abd0b00e..d3294d0f21dc 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.39.0 -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 google-cloud-core==2.3.0 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 7dcdef580195..7d727111807c 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 google-cloud-core==2.3.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 90da13ba02fc..b11e1d430fc1 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 backoff==2.0.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 070d47be5ee2..565d944170d8 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 google-cloud-monitoring==2.9.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index a555cf279ce2..8d9f1299d473 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index d74172ad3bda..0522dcf65258 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index d74172ad3bda..0522dcf65258 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 3d3bebbaf8e6..53b03cadb8e8 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.9.0 \ No newline at end of file +google-cloud-bigtable==2.10.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index a555cf279ce2..8d9f1299d473 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.0 From 197c2ee2f4d08066a795ed7e632811f1c3fd6cd0 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Thu, 2 Jun 2022 13:15:51 -0400 Subject: [PATCH 620/892] fix(deps): require protobuf <4.0.0dev (#595) --- packages/google-cloud-bigtable/setup.py | 3 ++- packages/google-cloud-bigtable/testing/constraints-3.6.txt | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 2742ecc78998..651d6f18f32c 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -38,7 +38,8 @@ # https://github.com/googleapis/google-cloud-python/issues/10566 "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", - "proto-plus >= 1.18.0", + "proto-plus >= 1.18.0, <2.0.0dev", + "protobuf >= 3.19.0, <4.0.0dev", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.6.txt b/packages/google-cloud-bigtable/testing/constraints-3.6.txt index 0f4a447dbb12..455fa9b7b015 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.6.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.6.txt @@ -10,3 +10,4 @@ google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.4 proto-plus==1.18.0 libcst==0.2.5 +protobuf==3.19.0 From 8e1cb840ab1ac168dc7195cf468337f7a482c40b Mon Sep 17 00:00:00 2001 From: Dan Lee <71398022+dandhlee@users.noreply.github.com> Date: Thu, 2 Jun 2022 19:15:29 -0400 Subject: [PATCH 621/892] docs: fix changelog header to consistent size (#596) --- packages/google-cloud-bigtable/CHANGELOG.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 1bb7c69c27bc..e2d9730227d5 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -34,7 +34,7 @@ * fix type in docstring for map fields ([4e50278](https://github.com/googleapis/python-bigtable/commit/4e50278c73f608a7c493692d8d17e7dd2aa7ba44)) -### [2.8.1](https://github.com/googleapis/python-bigtable/compare/v2.8.0...v2.8.1) (2022-04-07) +## [2.8.1](https://github.com/googleapis/python-bigtable/compare/v2.8.0...v2.8.1) (2022-04-07) ### Bug Fixes @@ -55,7 +55,7 @@ * Remove the limitation that all clusters in a CMEK instance must use the same key ([f008eea](https://github.com/googleapis/python-bigtable/commit/f008eea69a6c7c1a027cefc7f16d46042b524db1)) * Update `cpu_utilization_percent` limit ([#547](https://github.com/googleapis/python-bigtable/issues/547)) ([f008eea](https://github.com/googleapis/python-bigtable/commit/f008eea69a6c7c1a027cefc7f16d46042b524db1)) -### [2.7.1](https://github.com/googleapis/python-bigtable/compare/v2.7.0...v2.7.1) (2022-03-17) +## [2.7.1](https://github.com/googleapis/python-bigtable/compare/v2.7.0...v2.7.1) (2022-03-17) ### Bug Fixes @@ -82,14 +82,14 @@ * add WarmAndPing request for channel priming ([#504](https://github.com/googleapis/python-bigtable/issues/504)) ([df5fc1f](https://github.com/googleapis/python-bigtable/commit/df5fc1f7d6ded88d9bce67f7cc6989981745931f)) -### [2.5.2](https://github.com/googleapis/python-bigtable/compare/v2.5.1...v2.5.2) (2022-02-24) +## [2.5.2](https://github.com/googleapis/python-bigtable/compare/v2.5.1...v2.5.2) (2022-02-24) ### Bug Fixes * Pass app_profile_id when building updated request ([#512](https://github.com/googleapis/python-bigtable/issues/512)) ([2f8ba7a](https://github.com/googleapis/python-bigtable/commit/2f8ba7a4801b17b5afb6180a7ace1327a2d05a52)) -### [2.5.1](https://github.com/googleapis/python-bigtable/compare/v2.5.0...v2.5.1) (2022-02-17) +## [2.5.1](https://github.com/googleapis/python-bigtable/compare/v2.5.0...v2.5.1) (2022-02-17) ### Bug Fixes @@ -134,14 +134,14 @@ * add 'dict' annotation type to 'request' ([160bfd3](https://www.github.com/googleapis/python-bigtable/commit/160bfd317a83561821acc0212d3514701a031ac6)) -### [2.3.3](https://www.github.com/googleapis/python-bigtable/compare/v2.3.2...v2.3.3) (2021-07-24) +## [2.3.3](https://www.github.com/googleapis/python-bigtable/compare/v2.3.2...v2.3.3) (2021-07-24) ### Bug Fixes * enable self signed jwt for grpc ([#397](https://www.github.com/googleapis/python-bigtable/issues/397)) ([9d43a38](https://www.github.com/googleapis/python-bigtable/commit/9d43a388470746608d324ca8d72f41bb3a4492b7)) -### [2.3.2](https://www.github.com/googleapis/python-bigtable/compare/v2.3.1...v2.3.2) (2021-07-20) +## [2.3.2](https://www.github.com/googleapis/python-bigtable/compare/v2.3.1...v2.3.2) (2021-07-20) ### Bug Fixes @@ -149,7 +149,7 @@ * **deps:** pin 'google-{api,cloud}-core', 'google-auth' to allow 2.x versions ([#379](https://www.github.com/googleapis/python-bigtable/issues/379)) ([95b2e13](https://www.github.com/googleapis/python-bigtable/commit/95b2e13b776dca4a6998313c41aa960ffe2e47e9)) * directly append to pb for beter read row performance ([#382](https://www.github.com/googleapis/python-bigtable/issues/382)) ([7040e11](https://www.github.com/googleapis/python-bigtable/commit/7040e113b93bb2e0625c054486305235d8f14c2a)) -### [2.3.1](https://www.github.com/googleapis/python-bigtable/compare/v2.3.0...v2.3.1) (2021-07-13) +## [2.3.1](https://www.github.com/googleapis/python-bigtable/compare/v2.3.0...v2.3.1) (2021-07-13) ### Bug Fixes @@ -256,7 +256,7 @@ * update python contributing guide ([#206](https://www.github.com/googleapis/python-bigtable/issues/206)) ([e301ac3](https://www.github.com/googleapis/python-bigtable/commit/e301ac3b61364d779fdb50a57ae8e2cb9952df9e)) -### [1.6.1](https://www.github.com/googleapis/python-bigtable/compare/v1.6.0...v1.6.1) (2020-12-01) +## [1.6.1](https://www.github.com/googleapis/python-bigtable/compare/v1.6.0...v1.6.1) (2020-12-01) ### Documentation @@ -271,7 +271,7 @@ * add 'timeout' arg to 'Table.mutate_rows' ([#157](https://www.github.com/googleapis/python-bigtable/issues/157)) ([6d597a1](https://www.github.com/googleapis/python-bigtable/commit/6d597a1e5be05c993c9f86beca4c1486342caf94)), closes [/github.com/googleapis/python-bigtable/issues/7#issuecomment-715538708](https://www.github.com/googleapis//github.com/googleapis/python-bigtable/issues/7/issues/issuecomment-715538708) [#7](https://www.github.com/googleapis/python-bigtable/issues/7) * Backup Level IAM ([#160](https://www.github.com/googleapis/python-bigtable/issues/160)) ([44932cb](https://www.github.com/googleapis/python-bigtable/commit/44932cb8710e12279dbd4e9271577f8bee238980)) -### [1.5.1](https://www.github.com/googleapis/python-bigtable/compare/v1.5.0...v1.5.1) (2020-10-06) +## [1.5.1](https://www.github.com/googleapis/python-bigtable/compare/v1.5.0...v1.5.1) (2020-10-06) ### Bug Fixes From 276e7a6c0067a5cc1ee792ee014ce617070697b1 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Fri, 3 Jun 2022 14:36:20 -0400 Subject: [PATCH 622/892] chore: test minimum dependencies in python 3.7 (#599) --- .../testing/constraints-3.7.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index e69de29bb2d1..455fa9b7b015 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -0,0 +1,13 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List *all* library dependencies and extras in this file. +# Pin the version to the lower bound. +# +# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", +# Then this file should have foo==1.14.0 +google-api-core==1.31.5 +google-cloud-core==1.4.1 +grpc-google-iam-v1==0.12.4 +proto-plus==1.18.0 +libcst==0.2.5 +protobuf==3.19.0 From 229b49208497bed3832961095f20ec9e9b590d1f Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 11:29:46 -0400 Subject: [PATCH 623/892] chore(main): release 2.10.1 (#597) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 12 ++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index e2d9730227d5..be1a397d818e 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.10.1](https://github.com/googleapis/python-bigtable/compare/v2.10.0...v2.10.1) (2022-06-03) + + +### Bug Fixes + +* **deps:** require protobuf <4.0.0dev ([#595](https://github.com/googleapis/python-bigtable/issues/595)) ([a4deaf7](https://github.com/googleapis/python-bigtable/commit/a4deaf7b1b5c4b7ce8f6dc5bb96d32ea8ff55c2d)) + + +### Documentation + +* fix changelog header to consistent size ([#596](https://github.com/googleapis/python-bigtable/issues/596)) ([51961c3](https://github.com/googleapis/python-bigtable/commit/51961c32686fe5851e957581b85adbe92a073e03)) + ## [2.10.0](https://github.com/googleapis/python-bigtable/compare/v2.9.0...v2.10.0) (2022-05-30) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 651d6f18f32c..f2ea88af575c 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.10.0" +version = "2.10.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 87d6b31a9ecb8fffbe26fd339478f4b4f3ef9162 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sun, 12 Jun 2022 11:00:04 -0400 Subject: [PATCH 624/892] chore: add prerelease nox session (#606) Source-Link: https://github.com/googleapis/synthtool/commit/050953d60f71b4ed4be563e032f03c192c50332f Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:65e656411895bff71cffcae97246966460160028f253c2e45b7a25d805a5b142 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/continuous/prerelease-deps.cfg | 7 ++ .../.kokoro/presubmit/prerelease-deps.cfg | 7 ++ packages/google-cloud-bigtable/noxfile.py | 64 +++++++++++++++++++ 4 files changed, 80 insertions(+), 2 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 757c9dca75ad..2185b591844c 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:81ed5ecdfc7cac5b699ba4537376f3563f6f04122c4ec9e735d3b3dc1d43dd32 -# created: 2022-05-05T22:08:23.383410683Z + digest: sha256:65e656411895bff71cffcae97246966460160028f253c2e45b7a25d805a5b142 +# created: 2022-06-12T13:11:45.905884945Z diff --git a/packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg b/packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg new file mode 100644 index 000000000000..3595fb43f5c0 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "prerelease_deps" +} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg new file mode 100644 index 000000000000..3595fb43f5c0 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "prerelease_deps" +} diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 740697a02fac..cb022e423d84 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -363,3 +363,67 @@ def docfx(session): os.path.join("docs", ""), os.path.join("docs", "_build", "html", ""), ) + + +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def prerelease_deps(session): + """Run all tests with prerelease versions of dependencies installed.""" + + prerel_deps = [ + "protobuf", + "googleapis-common-protos", + "google-auth", + "grpcio", + "grpcio-status", + "google-api-core", + "proto-plus", + # dependencies of google-auth + "cryptography", + "pyasn1", + ] + + for dep in prerel_deps: + session.install("--pre", "--no-deps", "--upgrade", dep) + + # Remaining dependencies + other_deps = ["requests"] + session.install(*other_deps) + + session.install(*UNIT_TEST_STANDARD_DEPENDENCIES) + session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES) + + # Because we test minimum dependency versions on the minimum Python + # version, the first version we test with in the unit tests sessions has a + # constraints file containing all dependencies and extras. + with open( + CURRENT_DIRECTORY + / "testing" + / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt", + encoding="utf-8", + ) as constraints_file: + constraints_text = constraints_file.read() + + # Ignore leading whitespace and comment lines. + deps = [ + match.group(1) + for match in re.finditer( + r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE + ) + ] + + # Don't overwrite prerelease packages. + deps = [dep for dep in deps if dep not in prerel_deps] + # We use --no-deps to ensure that pre-release versions aren't overwritten + # by the version ranges in setup.py. + session.install(*deps) + session.install("--no-deps", "-e", ".[all]") + + # Print out prerelease package versions + session.run( + "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" + ) + session.run("python", "-c", "import grpc; print(grpc.__version__)") + + session.run("py.test", "tests/unit") + session.run("py.test", "tests/system") + session.run("py.test", "samples/snippets") From 8408e77dd847e1511ab81ba7e95f41b22e8f3fd2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 06:16:09 -0400 Subject: [PATCH 625/892] chore(python): add missing import for prerelease testing (#607) Source-Link: https://github.com/googleapis/synthtool/commit/d2871d98e1e767d4ad49a557ff979236d64361a1 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:b2dc5f80edcf5d4486c39068c9fa11f7f851d9568eea4dcba130f994ea9b5e97 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/noxfile.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 2185b591844c..50b29ffd2050 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:65e656411895bff71cffcae97246966460160028f253c2e45b7a25d805a5b142 -# created: 2022-06-12T13:11:45.905884945Z + digest: sha256:b2dc5f80edcf5d4486c39068c9fa11f7f851d9568eea4dcba130f994ea9b5e97 +# created: 2022-06-12T16:09:31.61859086Z diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index cb022e423d84..2b89e8705d65 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -19,6 +19,7 @@ from __future__ import absolute_import import os import pathlib +import re import shutil import warnings From 39bc02ade0b6ebc3ddf9e27fe0b4f219e3873241 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Tue, 21 Jun 2022 09:45:45 -0700 Subject: [PATCH 626/892] samples: Add BigTable delete samples (#590) * doc: Add BigTable delete samples - Add the deleteion snippets - Add test cases - Update the row_key_prefix value - Remove `_sample` from function names. - Refactor the snapshot match assertion. - Renamed column to column_family_obj Closes internal issue #213629071 Co-authored-by: Owl Bot --- .../samples/snippets/README.md | 6 +- .../snippets/deletes/deletes_snippets.py | 122 +++++++ .../samples/snippets/deletes/deletes_test.py | 139 ++++++++ .../samples/snippets/deletes/noxfile.py | 312 ++++++++++++++++++ .../snippets/deletes/requirements-test.txt | 1 + .../samples/snippets/deletes/requirements.txt | 2 + .../snippets/deletes/snapshots/__init__.py | 0 .../deletes/snapshots/snap_deletes_test.py | 24 ++ 8 files changed, 603 insertions(+), 3 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py diff --git a/packages/google-cloud-bigtable/samples/snippets/README.md b/packages/google-cloud-bigtable/samples/snippets/README.md index 134b247329b5..7c0dd4463214 100644 --- a/packages/google-cloud-bigtable/samples/snippets/README.md +++ b/packages/google-cloud-bigtable/samples/snippets/README.md @@ -3,8 +3,8 @@ ## Python Samples for Cloud Bigtable -This directory contains samples for Cloud Bigtable, which may be used as a refererence for how to use this product. -Samples, quickstarts, and other documentation are available at cloud.google.com. +This directory contains samples for Cloud Bigtable, which may be used as a reference for how to use this product. +Samples, quickstarts, and other documentation are available at [cloud.google.com](https://cloud.google.com/bigtable). ### Snippets @@ -17,7 +17,7 @@ This folder contains snippets for Python Cloud Bigtable. ## Additional Information You can read the documentation for more details on API usage and use GitHub -to browse the source and [report issues][issues]. +to [browse the source](https://github.com/googleapis/python-bigtable) and [report issues][issues]. ### Contributing View the [contributing guidelines][contrib_guide], the [Python style guide][py_style] for more information. diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py new file mode 100644 index 000000000000..4e89189db8f6 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python + +# Copyright 2022, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from google.cloud import bigtable + +# Write your code here. + + +# [START bigtable_delete_from_column] +def delete_from_column(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + row = table.row("phone#4c410523#20190501") + row.delete_cell(column_family_id="cell_plan", column="data_plan_01gb") + row.commit() + + +# [END bigtable_delete_from_column] + +# [START bigtable_delete_from_column_family] +def delete_from_column_family(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + row = table.row("phone#4c410523#20190501") + row.delete_cells( + column_family_id="cell_plan", columns=["data_plan_01gb", "data_plan_05gb"] + ) + row.commit() + + +# [END bigtable_delete_from_column_family] + + +# [START bigtable_delete_from_row] +def delete_from_row(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + row = table.row("phone#4c410523#20190501") + row.delete() + row.commit() + + +# [END bigtable_delete_from_row] + +# [START bigtable_streaming_and_batching] +def streaming_and_batching(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + batcher = table.mutations_batcher(flush_count=2) + rows = table.read_rows() + for row in rows: + row = table.row(row.row_key) + row.delete_cell(column_family_id="cell_plan", column="data_plan_01gb") + + batcher.mutate_rows(rows) + + +# [END bigtable_streaming_and_batching] + +# [START bigtable_check_and_mutate] +def check_and_mutate(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + row = table.row("phone#4c410523#20190501") + row.delete_cell(column_family_id="cell_plan", column="data_plan_01gb") + row.delete_cell(column_family_id="cell_plan", column="data_plan_05gb") + row.commit() + + +# [END bigtable_check_and_mutate] + + +# [START bigtable_drop_row_range] +def drop_row_range(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + row_key_prefix = "phone#4c410523" + table.drop_by_prefix(row_key_prefix, timeout=200) + + +# [END bigtable_drop_row_range] + +# [START bigtable_delete_column_family] +def delete_column_family(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + column_family_id = "stats_summary" + column_family_obj = table.column_family(column_family_id) + column_family_obj.delete() + + +# [END bigtable_delete_column_family] + +# [START bigtable_delete_table] +def delete_table(project_id, instance_id, table_id): + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + table = instance.table(table_id) + table.delete() + + +# [END bigtable_delete_table] diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py new file mode 100644 index 000000000000..bf23daa5992d --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py @@ -0,0 +1,139 @@ +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import os +import time +import uuid + +from google.cloud import bigtable +import pytest + +import deletes_snippets + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" + + +@pytest.fixture(scope="module", autouse=True) +def table_id(): + from google.cloud.bigtable.row_set import RowSet + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={"stats_summary": None, "cell_plan": None}) + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) + + row_keys = [ + "phone#4c410523#20190501", + "phone#4c410523#20190502", + "phone#4c410523#20190505", + "phone#5c10102#20190501", + "phone#5c10102#20190502", + ] + + rows = [table.direct_row(row_key) for row_key in row_keys] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) + rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) + rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + + table.mutate_rows(rows) + + # Ensure mutations have propagated. + row_set = RowSet() + + for row_key in row_keys: + row_set.add_row_key(row_key) + + fetched = list(table.read_rows(row_set=row_set)) + + while len(fetched) < len(rows): + time.sleep(5) + fetched = list(table.read_rows(row_set=row_set)) + + yield table_id + + +def assert_snapshot_match(capsys, snapshot): + out, _ = capsys.readouterr() + snapshot.assert_match(out) + + +def test_delete_from_column(capsys, snapshot, table_id): + deletes_snippets.delete_from_column(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) + + +def test_delete_from_column_family(capsys, snapshot, table_id): + deletes_snippets.delete_from_column_family(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) + + +def test_delete_from_row(capsys, snapshot, table_id): + deletes_snippets.delete_from_row(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) + + +def test_streaming_and_batching(capsys, snapshot, table_id): + deletes_snippets.streaming_and_batching(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) + + +def test_check_and_mutate(capsys, snapshot, table_id): + deletes_snippets.check_and_mutate(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) + + +def test_drop_row_range(capsys, snapshot, table_id): + deletes_snippets.drop_row_range(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) + + +def test_delete_column_family(capsys, snapshot, table_id): + deletes_snippets.delete_column_family(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) + + +def test_delete_table(capsys, snapshot, table_id): + deletes_snippets.delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_snapshot_match(capsys, snapshot) diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py new file mode 100644 index 000000000000..38bb0a572b81 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -0,0 +1,312 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import glob +import os +from pathlib import Path +import sys +from typing import Callable, Dict, List, Optional + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" + +# Copy `noxfile_config.py` to your directory and modify it instead. + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars() -> Dict[str, str]: + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to test samples. +ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + +# +# Style Checks +# + + +def _determine_local_import_names(start_dir: str) -> List[str]: + """Determines all import names that should be considered "local". + + This is used when running the linter to insure that import order is + properly checked. + """ + file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] + return [ + basename + for basename, extension in file_ext_pairs + if extension == ".py" + or os.path.isdir(os.path.join(start_dir, basename)) + and basename not in ("__pycache__") + ] + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--import-order-style=google", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8", "flake8-import-order") + else: + session.install("flake8", "flake8-import-order", "flake8-annotations") + + local_names = _determine_local_import_names(".") + args = FLAKE8_COMMON_ARGS + [ + "--application-import-names", + ",".join(local_names), + ".", + ] + session.run("flake8", *args) + + +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + +# +# format = isort + black +# + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + # check for presence of tests + test_list = glob.glob("*_test.py") + glob.glob("test_*.py") + test_list.extend(glob.glob("tests")) + + if len(test_list) == 0: + print("No tests found, skipping directory.") + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + elif "pytest-xdist" in packages: + concurrent_args.extend(['-n', 'auto']) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session: nox.sessions.Session) -> None: + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root() -> Optional[str]: + """ Returns the root folder of the project. """ + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session: nox.sessions.Session, path: str) -> None: + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt new file mode 100644 index 000000000000..d00689e0623a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -0,0 +1 @@ +pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt new file mode 100644 index 000000000000..d74172ad3bda --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-bigtable==2.9.0 +snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/__init__.py b/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py new file mode 100644 index 000000000000..04a7db940deb --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# snapshottest: v1 - https://goo.gl/zC4yUc +from __future__ import unicode_literals + +from snapshottest import Snapshot + + +snapshots = Snapshot() + +snapshots['test_check_and_mutate 1'] = '' + +snapshots['test_delete_column_family 1'] = '' + +snapshots['test_delete_from_column 1'] = '' + +snapshots['test_delete_from_column_family 1'] = '' + +snapshots['test_delete_from_row 1'] = '' + +snapshots['test_delete_table 1'] = '' + +snapshots['test_drop_row_range 1'] = '' + +snapshots['test_streaming_and_batching 1'] = '' From c3d21520aa617c601e30c562a3028aed6b662697 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sun, 10 Jul 2022 13:05:58 -0400 Subject: [PATCH 627/892] fix: require python 3.7+ (#610) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(python): drop python 3.6 Source-Link: https://github.com/googleapis/synthtool/commit/4f89b13af10d086458f9b379e56a614f9d6dab7b Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:e7bb19d47c13839fe8c147e50e02e8b6cf5da8edd1af8b82208cd6f66cc2829c * require python 3.7+ in setup.py * remove python 3.6 sample configs * remove python 3.6 from noxfile.py * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * exclude templated README Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/workflows/unittest.yml | 2 +- .../.kokoro/samples/python3.6/common.cfg | 40 --------- .../.kokoro/samples/python3.6/continuous.cfg | 7 -- .../samples/python3.6/periodic-head.cfg | 11 --- .../.kokoro/samples/python3.6/periodic.cfg | 6 -- .../.kokoro/samples/python3.6/presubmit.cfg | 6 -- .../.kokoro/test-samples-impl.sh | 4 +- .../google-cloud-bigtable/CONTRIBUTING.rst | 6 +- packages/google-cloud-bigtable/README.rst | 4 +- packages/google-cloud-bigtable/noxfile.py | 85 ++++++++++++------- packages/google-cloud-bigtable/owlbot.py | 3 +- .../samples/beam/noxfile.py | 2 +- .../samples/hello/noxfile.py | 2 +- .../samples/hello_happybase/noxfile.py | 2 +- .../samples/instanceadmin/noxfile.py | 2 +- .../samples/metricscaler/noxfile.py | 2 +- .../samples/quickstart/noxfile.py | 2 +- .../samples/quickstart_happybase/noxfile.py | 2 +- .../samples/snippets/deletes/noxfile.py | 2 +- .../samples/snippets/filters/noxfile.py | 2 +- .../samples/snippets/reads/noxfile.py | 2 +- .../samples/snippets/writes/noxfile.py | 2 +- .../samples/tableadmin/noxfile.py | 2 +- .../templates/install_deps.tmpl.rst | 2 +- packages/google-cloud-bigtable/setup.py | 3 +- 26 files changed, 79 insertions(+), 128 deletions(-) delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 50b29ffd2050..1ce608523524 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:b2dc5f80edcf5d4486c39068c9fa11f7f851d9568eea4dcba130f994ea9b5e97 -# created: 2022-06-12T16:09:31.61859086Z + digest: sha256:e7bb19d47c13839fe8c147e50e02e8b6cf5da8edd1af8b82208cd6f66cc2829c +# created: 2022-07-05T18:31:20.838186805Z diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index e5be6edbd54d..5531b0141297 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ['3.6', '3.7', '3.8', '3.9', '3.10'] + python: ['3.7', '3.8', '3.9', '3.10'] steps: - name: Checkout uses: actions/checkout@v3 diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg deleted file mode 100644 index 21e1885071d6..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.6" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py36" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg deleted file mode 100644 index 7218af1499e5..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/continuous.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.6/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh index 8a324c9c7bc6..2c6500cae0b9 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh @@ -33,7 +33,7 @@ export PYTHONUNBUFFERED=1 env | grep KOKORO # Install nox -python3.6 -m pip install --upgrade --quiet nox +python3.9 -m pip install --upgrade --quiet nox # Use secrets acessor service account to get secrets if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then @@ -76,7 +76,7 @@ for file in samples/**/requirements.txt; do echo "------------------------------------------------------------" # Use nox to execute the tests for the project. - python3.6 -m nox -s "$RUN_TESTS_SESSION" + python3.9 -m nox -s "$RUN_TESTS_SESSION" EXIT=$? # If this is a periodic build, send the test log to the FlakyBot. diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index a15cf6527263..1579f6f6b3ce 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.6, 3.7, 3.8, 3.9 and 3.10 on both UNIX and Windows. + 3.7, 3.8, 3.9 and 3.10 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -221,13 +221,11 @@ Supported Python Versions We support: -- `Python 3.6`_ - `Python 3.7`_ - `Python 3.8`_ - `Python 3.9`_ - `Python 3.10`_ -.. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ @@ -239,7 +237,7 @@ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-bigtable/blob/main/noxfile.py -We also explicitly decided to support Python 3 beginning with version 3.6. +We also explicitly decided to support Python 3 beginning with version 3.7. Reasons for this include: - Encouraging use of newest versions of Python 3 diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 28cc372dad52..5f7d5809d130 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -52,7 +52,7 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.6 +Python >= 3.7 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -63,6 +63,8 @@ Deprecated Python Versions - Python 3.5: the last released version which supported Python 3.5 was version 1.7.0, released 2021-02-09. +- Python 3.6: the last released version which supported Python 3.6 was + version v2.10.1, released 2022-06-03. Mac/Linux ^^^^^^^^^ diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 2b89e8705d65..6875c9b449d7 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -31,7 +31,7 @@ DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", @@ -370,28 +370,15 @@ def docfx(session): def prerelease_deps(session): """Run all tests with prerelease versions of dependencies installed.""" - prerel_deps = [ - "protobuf", - "googleapis-common-protos", - "google-auth", - "grpcio", - "grpcio-status", - "google-api-core", - "proto-plus", - # dependencies of google-auth - "cryptography", - "pyasn1", - ] - - for dep in prerel_deps: - session.install("--pre", "--no-deps", "--upgrade", dep) - - # Remaining dependencies - other_deps = ["requests"] - session.install(*other_deps) - + # Install all dependencies + session.install("-e", ".[all, tests, tracing]") session.install(*UNIT_TEST_STANDARD_DEPENDENCIES) - session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES) + system_deps_all = ( + SYSTEM_TEST_STANDARD_DEPENDENCIES + + SYSTEM_TEST_EXTERNAL_DEPENDENCIES + + SYSTEM_TEST_EXTRAS + ) + session.install(*system_deps_all) # Because we test minimum dependency versions on the minimum Python # version, the first version we test with in the unit tests sessions has a @@ -405,19 +392,44 @@ def prerelease_deps(session): constraints_text = constraints_file.read() # Ignore leading whitespace and comment lines. - deps = [ + constraints_deps = [ match.group(1) for match in re.finditer( r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE ) ] - # Don't overwrite prerelease packages. - deps = [dep for dep in deps if dep not in prerel_deps] - # We use --no-deps to ensure that pre-release versions aren't overwritten - # by the version ranges in setup.py. - session.install(*deps) - session.install("--no-deps", "-e", ".[all]") + session.install(*constraints_deps) + + if os.path.exists("samples/snippets/requirements.txt"): + session.install("-r", "samples/snippets/requirements.txt") + + if os.path.exists("samples/snippets/requirements-test.txt"): + session.install("-r", "samples/snippets/requirements-test.txt") + + prerel_deps = [ + "protobuf", + # dependency of grpc + "six", + "googleapis-common-protos", + "grpcio", + "grpcio-status", + "google-api-core", + "proto-plus", + "google-cloud-testutils", + # dependencies of google-cloud-testutils" + "click", + ] + + for dep in prerel_deps: + session.install("--pre", "--no-deps", "--upgrade", dep) + + # Remaining dependencies + other_deps = [ + "requests", + "google-auth", + ] + session.install(*other_deps) # Print out prerelease package versions session.run( @@ -426,5 +438,16 @@ def prerelease_deps(session): session.run("python", "-c", "import grpc; print(grpc.__version__)") session.run("py.test", "tests/unit") - session.run("py.test", "tests/system") - session.run("py.test", "samples/snippets") + + system_test_path = os.path.join("tests", "system.py") + system_test_folder_path = os.path.join("tests", "system") + + # Only run system tests if found. + if os.path.exists(system_test_path) or os.path.exists(system_test_folder_path): + session.run("py.test", "tests/system") + + snippets_test_path = os.path.join("samples", "snippets") + + # Only run samples tests if found. + if os.path.exists(snippets_test_path): + session.run("py.test", "samples/snippets") diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index ba43cae949a0..7b35e1a9c29f 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -86,13 +86,12 @@ def get_staging_dirs( # ---------------------------------------------------------------------------- templated_files = common.py_library( samples=True, # set to True only if there are samples - unit_test_python_versions=["3.6", "3.7", "3.8", "3.9", "3.10"], split_system_tests=True, microgenerator=True, cov_level=100, ) -s.move(templated_files, excludes=[".coveragerc"]) +s.move(templated_files, excludes=[".coveragerc", "README.rst"]) # ---------------------------------------------------------------------------- # Customize noxfile.py diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index 960a011c2336..908dd1a499d5 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 38bb0a572b81..5fcb9d7461f2 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst index 275d649890d7..6f069c6c87a5 100644 --- a/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst +++ b/packages/google-cloud-bigtable/scripts/readme-gen/templates/install_deps.tmpl.rst @@ -12,7 +12,7 @@ Install Dependencies .. _Python Development Environment Setup Guide: https://cloud.google.com/python/setup -#. Create a virtualenv. Samples are compatible with Python 3.6+. +#. Create a virtualenv. Samples are compatible with Python 3.7+. .. code-block:: bash diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index f2ea88af575c..dce4f591054a 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -80,7 +80,6 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", @@ -97,7 +96,7 @@ "scripts/fixup_bigtable_v2_keywords.py", "scripts/fixup_bigtable_admin_v2_keywords.py", ], - python_requires=">=3.6", + python_requires=">=3.7", include_package_data=True, zip_safe=False, ) From fe9712e44afdbc68bb667e002a663e6bd6904b67 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Sun, 24 Jul 2022 13:29:18 +0200 Subject: [PATCH 628/892] chore(deps): update all dependencies (#601) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * revert * attempt to fix flaky test * use build specific project for test * attempt to fix flaky test Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/workflows/mypy.yml | 2 +- .../.github/workflows/system_emulated.yml | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/metricscaler.py | 14 ++++--- .../samples/metricscaler/metricscaler_test.py | 8 +++- .../samples/metricscaler/noxfile_config.py | 39 +++++++++++++++++++ .../snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 8 files changed, 58 insertions(+), 13 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/metricscaler/noxfile_config.py diff --git a/packages/google-cloud-bigtable/.github/workflows/mypy.yml b/packages/google-cloud-bigtable/.github/workflows/mypy.yml index f9f07f4de171..c63242630acd 100644 --- a/packages/google-cloud-bigtable/.github/workflows/mypy.yml +++ b/packages/google-cloud-bigtable/.github/workflows/mypy.yml @@ -10,7 +10,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: "3.8" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 303d3672409e..48b8faa42a05 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -15,7 +15,7 @@ jobs: uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: '3.8' diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index b11e1d430fc1..7df9cb242638 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.10.0 -backoff==2.0.1 +backoff==2.1.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py index d29e40a398c9..3ffa95a0002d 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -122,20 +122,22 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): if current_node_count < max_node_count: new_node_count = min(current_node_count + size_change_step, max_node_count) cluster.serve_nodes = new_node_count - cluster.update() + operation = cluster.update() + response = operation.result(60) logger.info( - "Scaled up from {} to {} nodes.".format( - current_node_count, new_node_count + "Scaled up from {} to {} nodes for {}.".format( + current_node_count, new_node_count, response.name ) ) else: if current_node_count > min_node_count: new_node_count = max(current_node_count - size_change_step, min_node_count) cluster.serve_nodes = new_node_count - cluster.update() + operation = cluster.update() + response = operation.result(60) logger.info( - "Scaled down from {} to {} nodes.".format( - current_node_count, new_node_count + "Scaled down from {} to {} nodes for {}.".format( + current_node_count, new_node_count, response.name ) ) # [END bigtable_scale] diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 4420605ecee1..52a2498cc343 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -77,7 +77,9 @@ def instance(): serve_nodes=serve_nodes, default_storage_type=storage_type, ) - instance.create(clusters=[cluster]) + operation = instance.create(clusters=[cluster]) + response = operation.result(60) + print(f"Successfully created {response.name}") # Eventual consistency check retry_found = RetryResult(bool) @@ -105,7 +107,9 @@ def dev_instance(): cluster = instance.cluster( cluster_id, location_id=BIGTABLE_ZONE, default_storage_type=storage_type ) - instance.create(clusters=[cluster]) + operation = instance.create(clusters=[cluster]) + response = operation.result(60) + print(f"Successfully created {response.name}") # Eventual consistency check retry_found = RetryResult(bool) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile_config.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile_config.py new file mode 100644 index 000000000000..8a2d55bea291 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile_config.py @@ -0,0 +1,39 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be imported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "BUILD_SPECIFIC_GCLOUD_PROJECT", + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 4d92cc9aa9dd..ce161d15f1a4 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ -backoff==2.0.1 +backoff==2.1.2 pytest==7.1.2 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 69a7581b653e..c497ac626420 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ pytest==7.1.2 -google-cloud-testutils==1.3.1 +google-cloud-testutils==1.3.2 From befa400a4040b7920869d76116522f723a0ae146 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sun, 24 Jul 2022 08:02:23 -0400 Subject: [PATCH 629/892] fix(deps): require google-api-core>=1.32.0,>=2.8.0 (#608) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit feat: add audience parameter feat: Add storage_utilization_gib_per_node to Autoscaling target feat: Cloud Bigtable Undelete Table service and message proto files * feat: add audience parameter PiperOrigin-RevId: 456827138 Source-Link: https://github.com/googleapis/googleapis/commit/23f1a157189581734c7a77cddfeb7c5bc1e440ae Source-Link: https://github.com/googleapis/googleapis-gen/commit/4075a8514f676691ec156688a5bbf183aa9893ce Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDA3NWE4NTE0ZjY3NjY5MWVjMTU2Njg4YTViYmYxODNhYTk4OTNjZSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Add storage_utilization_gib_per_node to Autoscaling target PiperOrigin-RevId: 457776307 Source-Link: https://github.com/googleapis/googleapis/commit/982bb695af9ea65d7c59764512585d8a0fc1f981 Source-Link: https://github.com/googleapis/googleapis-gen/commit/0aff3ebcc8fb3c10b671ee386b4d263012d3d013 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGFmZjNlYmNjOGZiM2MxMGI2NzFlZTM4NmI0ZDI2MzAxMmQzZDAxMyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Cloud Bigtable Undelete Table service and message proto files PiperOrigin-RevId: 457778403 Source-Link: https://github.com/googleapis/googleapis/commit/2b0fe3befa5ed45db7eb2d95c71e8fe61c98190d Source-Link: https://github.com/googleapis/googleapis-gen/commit/ca2a2c55cafca4b876305b0b12f76ce0f166ddcc Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2EyYTJjNTVjYWZjYTRiODc2MzA1YjBiMTJmNzZjZTBmMTY2ZGRjYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: use gapic-generator-python 1.1.1 PiperOrigin-RevId: 459095142 Source-Link: https://github.com/googleapis/googleapis/commit/4f1be992601ed740a581a32cedc4e7b6c6a27793 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ae686d9cde4fc3e36d0ac02efb8643b15890c1ed Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYWU2ODZkOWNkZTRmYzNlMzZkMGFjMDJlZmI4NjQzYjE1ODkwYzFlZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Synchronize new proto/yaml changes. PiperOrigin-RevId: 459539123 Source-Link: https://github.com/googleapis/googleapis/commit/2e0497d3c875dec96549036bc2e3775e646c8cd5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/d56d71bed8f8d7bc9771056f8c5f2671e7f114a3 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZDU2ZDcxYmVkOGY4ZDdiYzk3NzEwNTZmOGM1ZjI2NzFlN2YxMTRhMyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix(deps): require google-api-core>=1.32.0,>=2.8.0 * trigger ci Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../cloud/bigtable_admin_v2/__init__.py | 4 + .../bigtable_admin_v2/gapic_metadata.json | 10 + .../bigtable_instance_admin/client.py | 1 + .../transports/base.py | 16 +- .../transports/grpc.py | 2 + .../transports/grpc_asyncio.py | 2 + .../bigtable_table_admin/async_client.py | 89 ++++++ .../services/bigtable_table_admin/client.py | 90 ++++++ .../bigtable_table_admin/transports/base.py | 30 +- .../bigtable_table_admin/transports/grpc.py | 31 ++ .../transports/grpc_asyncio.py | 31 ++ .../cloud/bigtable_admin_v2/types/__init__.py | 4 + .../types/bigtable_table_admin.py | 49 +++ .../cloud/bigtable_admin_v2/types/instance.py | 12 + .../google/cloud/bigtable_v2/__init__.py | 2 + .../bigtable_v2/services/bigtable/client.py | 1 + .../services/bigtable/transports/base.py | 16 +- .../services/bigtable/transports/grpc.py | 2 + .../bigtable/transports/grpc_asyncio.py | 2 + .../cloud/bigtable_v2/types/__init__.py | 4 + .../bigtable_v2/types/response_params.py | 57 ++++ .../fixup_bigtable_admin_v2_keywords.py | 1 + packages/google-cloud-bigtable/setup.py | 8 +- .../testing/constraints-3.6.txt | 13 - .../testing/constraints-3.7.txt | 3 +- .../test_bigtable_instance_admin.py | 52 ++++ .../test_bigtable_table_admin.py | 280 ++++++++++++++++++ .../unit/gapic/bigtable_v2/test_bigtable.py | 52 ++++ 28 files changed, 828 insertions(+), 36 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py delete mode 100644 packages/google-cloud-bigtable/testing/constraints-3.6.txt diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 3713dc1e8c4e..d77671d8d479 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -73,6 +73,8 @@ from .types.bigtable_table_admin import RestoreTableRequest from .types.bigtable_table_admin import SnapshotTableMetadata from .types.bigtable_table_admin import SnapshotTableRequest +from .types.bigtable_table_admin import UndeleteTableMetadata +from .types.bigtable_table_admin import UndeleteTableRequest from .types.bigtable_table_admin import UpdateBackupRequest from .types.common import OperationProgress from .types.common import StorageType @@ -164,6 +166,8 @@ "SnapshotTableRequest", "StorageType", "Table", + "UndeleteTableMetadata", + "UndeleteTableRequest", "UpdateAppProfileMetadata", "UpdateAppProfileRequest", "UpdateBackupRequest", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index a843c42e0dcd..a9294167a2b6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -339,6 +339,11 @@ "test_iam_permissions" ] }, + "UndeleteTable": { + "methods": [ + "undelete_table" + ] + }, "UpdateBackup": { "methods": [ "update_backup" @@ -454,6 +459,11 @@ "test_iam_permissions" ] }, + "UndeleteTable": { + "methods": [ + "undelete_table" + ] + }, "UpdateBackup": { "methods": [ "update_backup" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fe14b82beb14..fc602bf423f9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -548,6 +548,7 @@ def __init__( quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, + api_audience=client_options.api_audience, ) def create_instance( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 32261ac7bdca..a5e1c40d463f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -68,6 +68,7 @@ def __init__( quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, **kwargs, ) -> None: """Instantiate the transport. @@ -95,11 +96,6 @@ def __init__( be used for service account credentials. """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ":" not in host: - host += ":443" - self._host = host - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. @@ -120,6 +116,11 @@ def __init__( credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) # If the credentials are service account credentials, then always try to use self signed JWT. if ( @@ -132,6 +133,11 @@ def __init__( # Save the credentials. self._credentials = credentials + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index db7d0c1c4e6d..fcf0cd94fbd2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -67,6 +67,7 @@ def __init__( quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, ) -> None: """Instantiate the transport. @@ -163,6 +164,7 @@ def __init__( quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index bf1b8a38e268..efcb3ed8066e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -112,6 +112,7 @@ def __init__( quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, ) -> None: """Instantiate the transport. @@ -208,6 +209,7 @@ def __init__( quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 0752459ee5db..ff852e12fc4e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -715,6 +715,95 @@ async def delete_table( metadata=metadata, ) + async def undelete_table( + self, + request: Union[bigtable_table_admin.UndeleteTableRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Restores a specified table which was accidentally + deleted. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + name (:class:`str`): + Required. The unique name of the table to be restored. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.UndeleteTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undelete_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.UndeleteTableMetadata, + ) + + # Done; return the response. + return response + async def modify_column_families( self, request: Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 3b1185e5358e..70f10e1de3da 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -550,6 +550,7 @@ def __init__( quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, + api_audience=client_options.api_audience, ) def create_table( @@ -1019,6 +1020,95 @@ def delete_table( metadata=metadata, ) + def undelete_table( + self, + request: Union[bigtable_table_admin.UndeleteTableRequest, dict] = None, + *, + name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Restores a specified table which was accidentally + deleted. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + name (str): + Required. The unique name of the table to be restored. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.UndeleteTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): + request = bigtable_table_admin.UndeleteTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undelete_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.UndeleteTableMetadata, + ) + + # Done; return the response. + return response + def modify_column_families( self, request: Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 5370416a0802..db8222d8930e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -68,6 +68,7 @@ def __init__( quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, **kwargs, ) -> None: """Instantiate the transport. @@ -95,11 +96,6 @@ def __init__( be used for service account credentials. """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ":" not in host: - host += ":443" - self._host = host - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. @@ -120,6 +116,11 @@ def __init__( credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) # If the credentials are service account credentials, then always try to use self signed JWT. if ( @@ -132,6 +133,11 @@ def __init__( # Save the credentials. self._credentials = credentials + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -180,6 +186,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.undelete_table: gapic_v1.method.wrap_method( + self.undelete_table, + default_timeout=None, + client_info=client_info, + ), self.modify_column_families: gapic_v1.method.wrap_method( self.modify_column_families, default_timeout=300.0, @@ -409,6 +420,15 @@ def delete_table( ]: raise NotImplementedError() + @property + def undelete_table( + self, + ) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def modify_column_families( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index f6c2c478d771..4c8e85609bd3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -69,6 +69,7 @@ def __init__( quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, ) -> None: """Instantiate the transport. @@ -165,6 +166,7 @@ def __init__( quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, ) if not self._grpc_channel: @@ -399,6 +401,35 @@ def delete_table( ) return self._stubs["delete_table"] + @property + def undelete_table( + self, + ) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], operations_pb2.Operation + ]: + r"""Return a callable for the undelete table method over gRPC. + + Restores a specified table which was accidentally + deleted. + + Returns: + Callable[[~.UndeleteTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undelete_table" not in self._stubs: + self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", + request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["undelete_table"] + @property def modify_column_families( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 7d2077f236b0..349f810a839a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -114,6 +114,7 @@ def __init__( quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, ) -> None: """Instantiate the transport. @@ -210,6 +211,7 @@ def __init__( quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, ) if not self._grpc_channel: @@ -409,6 +411,35 @@ def delete_table( ) return self._stubs["delete_table"] + @property + def undelete_table( + self, + ) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the undelete table method over gRPC. + + Restores a specified table which was accidentally + deleted. + + Returns: + Callable[[~.UndeleteTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "undelete_table" not in self._stubs: + self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", + request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["undelete_table"] + @property def modify_column_families( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index f35f0f4ab5c0..31f4b712f3cb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -70,6 +70,8 @@ RestoreTableRequest, SnapshotTableMetadata, SnapshotTableRequest, + UndeleteTableMetadata, + UndeleteTableRequest, UpdateBackupRequest, ) from .common import ( @@ -151,6 +153,8 @@ "RestoreTableRequest", "SnapshotTableMetadata", "SnapshotTableRequest", + "UndeleteTableMetadata", + "UndeleteTableRequest", "UpdateBackupRequest", "OperationProgress", "StorageType", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 6a366a5e42bc..2078ce922c83 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -35,6 +35,8 @@ "ListTablesResponse", "GetTableRequest", "DeleteTableRequest", + "UndeleteTableRequest", + "UndeleteTableMetadata", "ModifyColumnFamiliesRequest", "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", @@ -459,6 +461,53 @@ class DeleteTableRequest(proto.Message): ) +class UndeleteTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + + Attributes: + name (str): + Required. The unique name of the table to be restored. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + + +class UndeleteTableMetadata(proto.Message): + r"""Metadata type for the operation returned by + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable]. + + Attributes: + name (str): + The name of the table being restored. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was cancelled. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + class ModifyColumnFamiliesRequest(proto.Message): r"""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 1b2e4d615d02..06a2cf39dc70 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -125,12 +125,24 @@ class AutoscalingTargets(proto.Message): achieve. This number is on a scale from 0 (no utilization) to 100 (total utilization), and is limited between 10 and 80, otherwise it will return INVALID_ARGUMENT error. + storage_utilization_gib_per_node (int): + The storage utilization that the Autoscaler should be trying + to achieve. This number is limited between 2560 (2.5TiB) and + 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and + 16384 (16TiB) for an HDD cluster; otherwise it will return + INVALID_ARGUMENT error. If this value is set to 0, it will + be treated as if it were set to the default value: 2560 for + SSD, 8192 for HDD. """ cpu_utilization_percent = proto.Field( proto.INT32, number=2, ) + storage_utilization_gib_per_node = proto.Field( + proto.INT32, + number=3, + ) class AutoscalingLimits(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index d744bd53f4df..5f2893c50863 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -43,6 +43,7 @@ from .types.data import RowSet from .types.data import TimestampRange from .types.data import ValueRange +from .types.response_params import ResponseParams __all__ = ( "BigtableAsyncClient", @@ -65,6 +66,7 @@ "ReadModifyWriteRule", "ReadRowsRequest", "ReadRowsResponse", + "ResponseParams", "Row", "RowFilter", "RowRange", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index a0dfdff4e82a..30dd2934fc8b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -441,6 +441,7 @@ def __init__( quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, + api_audience=client_options.api_audience, ) def read_rows( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 922068fd0a4f..1a6ed755493b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -61,6 +61,7 @@ def __init__( quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, **kwargs, ) -> None: """Instantiate the transport. @@ -88,11 +89,6 @@ def __init__( be used for service account credentials. """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ":" not in host: - host += ":443" - self._host = host - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} # Save the scopes. @@ -113,6 +109,11 @@ def __init__( credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) # If the credentials are service account credentials, then always try to use self signed JWT. if ( @@ -125,6 +126,11 @@ def __init__( # Save the credentials. self._credentials = credentials + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index ce40cf33517c..b453d3bc021f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -59,6 +59,7 @@ def __init__( quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, ) -> None: """Instantiate the transport. @@ -154,6 +155,7 @@ def __init__( quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 4099e7bd77d6..88081f30a1f1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -104,6 +104,7 @@ def __init__( quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, ) -> None: """Instantiate the transport. @@ -199,6 +200,7 @@ def __init__( quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, ) if not self._grpc_channel: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index 401705715b51..ec6fbafd4064 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -43,6 +43,9 @@ TimestampRange, ValueRange, ) +from .response_params import ( + ResponseParams, +) __all__ = ( "CheckAndMutateRowRequest", @@ -71,4 +74,5 @@ "RowSet", "TimestampRange", "ValueRange", + "ResponseParams", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py new file mode 100644 index 000000000000..b11cffeefc79 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "ResponseParams", + }, +) + + +class ResponseParams(proto.Message): + r"""Response metadata proto This is an experimental feature that will be + used to get zone_id and cluster_id from response trailers to tag the + metrics. This should not be used by customers directly + + Attributes: + zone_id (str): + The cloud bigtable zone associated with the + cluster. + + This field is a member of `oneof`_ ``_zone_id``. + cluster_id (str): + Identifier for a cluster that represents set + of bigtable resources. + + This field is a member of `oneof`_ ``_cluster_id``. + """ + + zone_id = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + cluster_id = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 72354ba161ad..9622469c4509 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -75,6 +75,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'set_iam_policy': ('resource', 'policy', 'update_mask', ), 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), 'test_iam_permissions': ('resource', 'permissions', ), + 'undelete_table': ('name', ), 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index dce4f591054a..110101e5e415 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -29,13 +29,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - # NOTE: Maintainers, please do not require google-api-core>=2.x.x - # Until this issue is closed - # https://github.com/googleapis/google-cloud-python/issues/10566 - "google-api-core[grpc] >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0", - # NOTE: Maintainers, please do not require google-api-core>=2.x.x - # Until this issue is closed - # https://github.com/googleapis/google-cloud-python/issues/10566 + "google-api-core[grpc] >= 1.32.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*", "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.18.0, <2.0.0dev", diff --git a/packages/google-cloud-bigtable/testing/constraints-3.6.txt b/packages/google-cloud-bigtable/testing/constraints-3.6.txt deleted file mode 100644 index 455fa9b7b015..000000000000 --- a/packages/google-cloud-bigtable/testing/constraints-3.6.txt +++ /dev/null @@ -1,13 +0,0 @@ -# This constraints file is used to check that lower bounds -# are correct in setup.py -# List *all* library dependencies and extras in this file. -# Pin the version to the lower bound. -# -# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", -# Then this file should have foo==1.14.0 -google-api-core==1.31.5 -google-cloud-core==1.4.1 -grpc-google-iam-v1==0.12.4 -proto-plus==1.18.0 -libcst==0.2.5 -protobuf==3.19.0 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index 455fa9b7b015..4847b9e04b42 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -5,9 +5,10 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==1.31.5 +google-api-core==1.32.0 google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.4 proto-plus==1.18.0 libcst==0.2.5 protobuf==3.19.0 + diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 4e7fcf2a79f5..7d61a067c787 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -251,6 +251,7 @@ def test_bigtable_instance_admin_client_client_options( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -268,6 +269,7 @@ def test_bigtable_instance_admin_client_client_options( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -285,6 +287,7 @@ def test_bigtable_instance_admin_client_client_options( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -314,6 +317,25 @@ def test_bigtable_instance_admin_client_client_options( quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", ) @@ -391,6 +413,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -425,6 +448,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -447,6 +471,7 @@ def test_bigtable_instance_admin_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -565,6 +590,7 @@ def test_bigtable_instance_admin_client_client_options_scopes( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -603,6 +629,7 @@ def test_bigtable_instance_admin_client_client_options_credentials_file( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -623,6 +650,7 @@ def test_bigtable_instance_admin_client_client_options_from_dict(): quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -661,6 +689,7 @@ def test_bigtable_instance_admin_client_create_channel_credentials_file( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # test that the credentials from file are saved and used as the credentials. @@ -6330,6 +6359,28 @@ def test_bigtable_instance_admin_transport_auth_adc(transport_class): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + @pytest.mark.parametrize( "transport_class,grpc_helpers", [ @@ -6988,4 +7039,5 @@ def test_api_key_credentials(client_class, transport_class): quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index ff766dcd1cca..adcf50b1e5b3 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -249,6 +249,7 @@ def test_bigtable_table_admin_client_client_options( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -266,6 +267,7 @@ def test_bigtable_table_admin_client_client_options( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -283,6 +285,7 @@ def test_bigtable_table_admin_client_client_options( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -312,6 +315,25 @@ def test_bigtable_table_admin_client_client_options( quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", ) @@ -389,6 +411,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -423,6 +446,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -445,6 +469,7 @@ def test_bigtable_table_admin_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -559,6 +584,7 @@ def test_bigtable_table_admin_client_client_options_scopes( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -597,6 +623,7 @@ def test_bigtable_table_admin_client_client_options_credentials_file( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -617,6 +644,7 @@ def test_bigtable_table_admin_client_client_options_from_dict(): quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -655,6 +683,7 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # test that the credentials from file are saved and used as the credentials. @@ -2080,6 +2109,233 @@ async def test_delete_table_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UndeleteTableRequest, + dict, + ], +) +def test_undelete_table(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UndeleteTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undelete_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + client.undelete_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UndeleteTableRequest() + + +@pytest.mark.asyncio +async def test_undelete_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UndeleteTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UndeleteTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undelete_table_async_from_dict(): + await test_undelete_table_async(request_type=dict) + + +def test_undelete_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UndeleteTableRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_undelete_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UndeleteTableRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_undelete_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undelete_table( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_undelete_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_undelete_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undelete_table( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_undelete_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name="name_value", + ) + + @pytest.mark.parametrize( "request_type", [ @@ -6584,6 +6840,7 @@ def test_bigtable_table_admin_base_transport(): "list_tables", "get_table", "delete_table", + "undelete_table", "modify_column_families", "drop_row_range", "generate_consistency_token", @@ -6708,6 +6965,28 @@ def test_bigtable_table_admin_transport_auth_adc(transport_class): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + @pytest.mark.parametrize( "transport_class,grpc_helpers", [ @@ -7365,4 +7644,5 @@ def test_api_key_credentials(client_class, transport_class): quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 3ca5041c2506..644265d2bcc5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -211,6 +211,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -228,6 +229,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -245,6 +247,7 @@ def test_bigtable_client_client_options(client_class, transport_class, transport quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -274,6 +277,25 @@ def test_bigtable_client_client_options(client_class, transport_class, transport quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", ) @@ -339,6 +361,7 @@ def test_bigtable_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -373,6 +396,7 @@ def test_bigtable_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -395,6 +419,7 @@ def test_bigtable_client_mtls_env_auto( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -501,6 +526,7 @@ def test_bigtable_client_client_options_scopes( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -534,6 +560,7 @@ def test_bigtable_client_client_options_credentials_file( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -552,6 +579,7 @@ def test_bigtable_client_client_options_from_dict(): quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) @@ -585,6 +613,7 @@ def test_bigtable_client_create_channel_credentials_file( quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) # test that the credentials from file are saved and used as the credentials. @@ -2650,6 +2679,28 @@ def test_bigtable_transport_auth_adc(transport_class): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + ], +) +def test_bigtable_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + @pytest.mark.parametrize( "transport_class,grpc_helpers", [ @@ -3137,4 +3188,5 @@ def test_api_key_credentials(client_class, transport_class): quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, + api_audience=None, ) From bb09bee5334ceea6ff45b95c9ff411f270abb625 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 25 Jul 2022 22:24:08 -0400 Subject: [PATCH 630/892] chore(python): fix prerelease session [autoapprove] (#613) Source-Link: https://github.com/googleapis/synthtool/commit/1b9ad7694e44ddb4d9844df55ff7af77b51a4435 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:9db98b055a7f8bd82351238ccaacfd3cda58cdf73012ab58b8da146368330021 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +-- packages/google-cloud-bigtable/noxfile.py | 33 ++++++++++--------- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 1ce608523524..0eb02fda4c09 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:e7bb19d47c13839fe8c147e50e02e8b6cf5da8edd1af8b82208cd6f66cc2829c -# created: 2022-07-05T18:31:20.838186805Z + digest: sha256:9db98b055a7f8bd82351238ccaacfd3cda58cdf73012ab58b8da146368330021 +# created: 2022-07-25T16:02:49.174178716Z diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 6875c9b449d7..1c208b4c3f5d 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -372,7 +372,8 @@ def prerelease_deps(session): # Install all dependencies session.install("-e", ".[all, tests, tracing]") - session.install(*UNIT_TEST_STANDARD_DEPENDENCIES) + unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES + session.install(*unit_deps_all) system_deps_all = ( SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES @@ -401,12 +402,6 @@ def prerelease_deps(session): session.install(*constraints_deps) - if os.path.exists("samples/snippets/requirements.txt"): - session.install("-r", "samples/snippets/requirements.txt") - - if os.path.exists("samples/snippets/requirements-test.txt"): - session.install("-r", "samples/snippets/requirements-test.txt") - prerel_deps = [ "protobuf", # dependency of grpc @@ -443,11 +438,19 @@ def prerelease_deps(session): system_test_folder_path = os.path.join("tests", "system") # Only run system tests if found. - if os.path.exists(system_test_path) or os.path.exists(system_test_folder_path): - session.run("py.test", "tests/system") - - snippets_test_path = os.path.join("samples", "snippets") - - # Only run samples tests if found. - if os.path.exists(snippets_test_path): - session.run("py.test", "samples/snippets") + if os.path.exists(system_test_path): + session.run( + "py.test", + "--verbose", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + ) + if os.path.exists(system_test_folder_path): + session.run( + "py.test", + "--verbose", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + ) From 34118ee0a9962f5946a89b0ff2f06f9c035158ef Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 27 Jul 2022 11:33:52 -0400 Subject: [PATCH 631/892] feat: add satisfies_pzs output only field (#614) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Publish new fields PiperOrigin-RevId: 463378622 Source-Link: https://github.com/googleapis/googleapis/commit/8229ab4d3cee2d9c9da62805705599893971d5c2 Source-Link: https://github.com/googleapis/googleapis-gen/commit/bb82d0422b8f2f413158623a9d1e09422b2604b5 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYmI4MmQwNDIyYjhmMmY0MTMxNTg2MjNhOWQxZTA5NDIyYjI2MDRiNSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin_v2/types/instance.py | 9 +++++++++ .../scripts/fixup_bigtable_admin_v2_keywords.py | 2 +- .../bigtable_admin_v2/test_bigtable_instance_admin.py | 8 ++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 06a2cf39dc70..12422930ef5b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -71,6 +71,10 @@ class Instance(proto.Message): this Instance was created. For instances created before this field was added (August 2021), this value is ``seconds: 0, nanos: 1``. + satisfies_pzs (bool): + Output only. Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. """ class State(proto.Enum): @@ -113,6 +117,11 @@ class Type(proto.Enum): number=7, message=timestamp_pb2.Timestamp, ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=8, + optional=True, + ) class AutoscalingTargets(proto.Message): diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 9622469c4509..7623d5593f43 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -79,7 +79,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 7d61a067c787..d4f52ecba256 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1011,6 +1011,7 @@ def test_get_instance(request_type, transport: str = "grpc"): display_name="display_name_value", state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, ) response = client.get_instance(request) @@ -1025,6 +1026,7 @@ def test_get_instance(request_type, transport: str = "grpc"): assert response.display_name == "display_name_value" assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True def test_get_instance_empty_call(): @@ -1066,6 +1068,7 @@ async def test_get_instance_async( display_name="display_name_value", state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, ) ) response = await client.get_instance(request) @@ -1081,6 +1084,7 @@ async def test_get_instance_async( assert response.display_name == "display_name_value" assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True @pytest.mark.asyncio @@ -1490,6 +1494,7 @@ def test_update_instance(request_type, transport: str = "grpc"): display_name="display_name_value", state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, ) response = client.update_instance(request) @@ -1504,6 +1509,7 @@ def test_update_instance(request_type, transport: str = "grpc"): assert response.display_name == "display_name_value" assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True def test_update_instance_empty_call(): @@ -1544,6 +1550,7 @@ async def test_update_instance_async( display_name="display_name_value", state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, ) ) response = await client.update_instance(request) @@ -1559,6 +1566,7 @@ async def test_update_instance_async( assert response.display_name == "display_name_value" assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True @pytest.mark.asyncio From 94dcc2de5d8aa0b978160c1a6748120c1d52ca75 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 2 Aug 2022 14:57:02 +0200 Subject: [PATCH 632/892] chore(deps): update all dependencies (#616) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * revert * chore: update timeout to cater for duration of LRO Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../google-cloud-bigtable/samples/beam/requirements.txt | 6 +++--- .../google-cloud-bigtable/samples/hello/requirements.txt | 4 ++-- .../samples/instanceadmin/instanceadmin.py | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- .../samples/tableadmin/requirements.txt | 2 +- 12 files changed, 16 insertions(+), 16 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index d3294d0f21dc..52da23f84541 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.39.0 -google-cloud-bigtable==2.10.0 -google-cloud-core==2.3.0 +apache-beam==2.40.0 +google-cloud-bigtable==2.10.1 +google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 7d727111807c..b6b1a2001138 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.0 -google-cloud-core==2.3.0 +google-cloud-bigtable==2.10.1 +google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py index 13234f6c77f4..239b1dbaef52 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -159,7 +159,7 @@ def add_cluster(project_id, instance_id, cluster_id): else: operation = cluster.create() # Ensure the operation completes. - operation.result(timeout=60) + operation.result(timeout=120) print("\nCluster created: {}".format(cluster_id)) # [END bigtable_create_cluster] diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 7df9cb242638..e5d1d4ee6a2f 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.0 +google-cloud-bigtable==2.10.1 backoff==2.1.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 565d944170d8..0e95d91b740d 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.0 -google-cloud-monitoring==2.9.1 +google-cloud-bigtable==2.10.1 +google-cloud-monitoring==2.10.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 8d9f1299d473..d0504d16d195 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.10.0 +google-cloud-bigtable==2.10.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index d74172ad3bda..9e6d5e6f3a0d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.9.0 +google-cloud-bigtable==2.10.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 0522dcf65258..9e6d5e6f3a0d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.0 +google-cloud-bigtable==2.10.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 0522dcf65258..9e6d5e6f3a0d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.0 +google-cloud-bigtable==2.10.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 53b03cadb8e8..fb0107cad75e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.10.0 \ No newline at end of file +google-cloud-bigtable==2.10.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index c497ac626420..7f627052ec2c 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ pytest==7.1.2 -google-cloud-testutils==1.3.2 +google-cloud-testutils==1.3.3 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 8d9f1299d473..d0504d16d195 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.10.0 +google-cloud-bigtable==2.10.1 From a4911fda49cac10e664fed22d775f5d2f415f846 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Thu, 4 Aug 2022 17:16:12 -0400 Subject: [PATCH 633/892] perf: improve row merging (#619) The underlying GAPIC client uses protoplus for all requests and responses. However the underlying protos for ReadRowsResponse are never exposed to end users directly: the underlying chunks get merged into logic rows. The readability benefits provided by protoplus for ReadRows do not justify the costs. This change unwraps the protoplus messages and uses the raw protobuff message as input for row merging. This improves row merging performance by 10x. For 10k rows, each with 100 cells where each cell is 100 bytes and in groups of 100 rows per ReadRowsResponse, cProfile showed a 10x improvement: old: 124266037 function calls in 68.208 seconds new: 13042837 function calls in 7.787 seconds There are still a few more low hanging fruits to optimize performance and those will come in follow up PRs --- .../google/cloud/bigtable/row_data.py | 19 +++++++++++------- .../tests/unit/test_row_data.py | 20 ++++++++++++------- .../tests/unit/test_table.py | 10 ++++++---- 3 files changed, 31 insertions(+), 18 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 0c1565737428..ab0358285312 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -474,7 +474,11 @@ def _read_next(self): def _read_next_response(self): """Helper for :meth:`__iter__`.""" - return self.retry(self._read_next, on_error=self._on_error)() + resp_protoplus = self.retry(self._read_next, on_error=self._on_error)() + # unwrap the underlying protobuf, there is a significant amount of + # overhead that protoplus imposes for very little gain. The protos + # are not user visible, so we just use the raw protos for merging. + return data_messages_v2_pb2.ReadRowsResponse.pb(resp_protoplus) def __iter__(self): """Consume the ``ReadRowsResponse`` s from the stream. @@ -543,11 +547,12 @@ def _process_chunk(self, chunk): def _update_cell(self, chunk): if self._cell is None: qualifier = None - if "qualifier" in chunk: - qualifier = chunk.qualifier + if chunk.HasField("qualifier"): + qualifier = chunk.qualifier.value + family = None - if "family_name" in chunk: - family = chunk.family_name + if chunk.HasField("family_name"): + family = chunk.family_name.value self._cell = PartialCellData( chunk.row_key, @@ -577,8 +582,8 @@ def _validate_chunk_reset_row(self, chunk): # No reset with other keys _raise_if(chunk.row_key) - _raise_if("family_name" in chunk) - _raise_if("qualifier" in chunk) + _raise_if(chunk.HasField("family_name")) + _raise_if(chunk.HasField("qualifier")) _raise_if(chunk.timestamp_micros) _raise_if(chunk.labels) _raise_if(chunk.value_size) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 9b329dc9f8de..94a90aa24914 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -637,15 +637,15 @@ def test_partial_rows_data__copy_from_previous_filled(): def test_partial_rows_data_valid_last_scanned_row_key_on_start(): client = _Client() - response = _ReadRowsResponseV2(chunks=(), last_scanned_row_key="2.AFTER") + response = _ReadRowsResponseV2([], last_scanned_row_key=b"2.AFTER") iterator = _MockCancellableIterator(response) client._data_stub = mock.MagicMock() client._data_stub.read_rows.side_effect = [iterator] request = object() yrd = _make_partial_rows_data(client._data_stub.read_rows, request) - yrd.last_scanned_row_key = "1.BEFORE" + yrd.last_scanned_row_key = b"1.BEFORE" _partial_rows_data_consume_all(yrd) - assert yrd.last_scanned_row_key == "2.AFTER" + assert yrd.last_scanned_row_key == b"2.AFTER" def test_partial_rows_data_invalid_empty_chunk(): @@ -666,6 +666,7 @@ def test_partial_rows_data_invalid_empty_chunk(): def test_partial_rows_data_state_cell_in_progress(): from google.cloud.bigtable_v2.services.bigtable import BigtableClient + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 LABELS = ["L1", "L2"] @@ -682,6 +683,9 @@ def test_partial_rows_data_state_cell_in_progress(): value=VALUE, labels=LABELS, ) + # _update_cell expects to be called after the protoplus wrapper has been + # shucked + chunk = messages_v2_pb2.ReadRowsResponse.CellChunk.pb(chunk) yrd._update_cell(chunk) more_cell_data = _ReadRowsResponseCellChunkPB(value=VALUE) @@ -1455,10 +1459,12 @@ def __init__(self, **kw): self.__dict__.update(kw) -class _ReadRowsResponseV2(object): - def __init__(self, chunks, last_scanned_row_key=""): - self.chunks = chunks - self.last_scanned_row_key = last_scanned_row_key +def _ReadRowsResponseV2(chunks, last_scanned_row_key=b""): + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 + + return messages_v2_pb2.ReadRowsResponse( + chunks=chunks, last_scanned_row_key=last_scanned_row_key + ) def _generate_cell_chunks(chunk_text_pbs): diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 883f713d8267..a89e02e8c261 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -2206,10 +2206,12 @@ def next(self): __next__ = next -class _ReadRowsResponseV2(object): - def __init__(self, chunks, last_scanned_row_key=""): - self.chunks = chunks - self.last_scanned_row_key = last_scanned_row_key +def _ReadRowsResponseV2(chunks, last_scanned_row_key=b""): + from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 + + return messages_v2_pb2.ReadRowsResponse( + chunks=chunks, last_scanned_row_key=last_scanned_row_key + ) def _TablePB(*args, **kw): From a52f0f9c3d5a7ea9c8c3ce064bbe084b3842322e Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 4 Aug 2022 14:59:49 -0700 Subject: [PATCH 634/892] chore(main): release 2.11.0 (#611) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 21 +++++++++++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index be1a397d818e..38584bcbdb2f 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,27 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.11.0](https://github.com/googleapis/python-bigtable/compare/v2.10.1...v2.11.0) (2022-08-04) + + +### Features + +* add audience parameter ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e)) +* add satisfies_pzs output only field ([#614](https://github.com/googleapis/python-bigtable/issues/614)) ([7dc1469](https://github.com/googleapis/python-bigtable/commit/7dc1469fef2dc38f1509b35a37e9c97381ab7601)) +* Add storage_utilization_gib_per_node to Autoscaling target ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e)) +* Cloud Bigtable Undelete Table service and message proto files ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e)) + + +### Bug Fixes + +* **deps:** require google-api-core>=1.32.0,>=2.8.0 ([a7a7699](https://github.com/googleapis/python-bigtable/commit/a7a76998fad3c12215527e4ebb517a1526cc152e)) +* require python 3.7+ ([#610](https://github.com/googleapis/python-bigtable/issues/610)) ([10d00f5](https://github.com/googleapis/python-bigtable/commit/10d00f5af5d5878c26529f5e48a5fb8d8385696d)) + + +### Performance Improvements + +* improve row merging ([#619](https://github.com/googleapis/python-bigtable/issues/619)) ([b4853e5](https://github.com/googleapis/python-bigtable/commit/b4853e59d0efd8a7b37f3fcb06b14dbd9f5d20a4)) + ## [2.10.1](https://github.com/googleapis/python-bigtable/compare/v2.10.0...v2.10.1) (2022-06-03) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 110101e5e415..8087dddb6618 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.10.1" +version = "2.11.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From ba10a5d0066a3481aa4f29c48428d0debef84158 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 5 Aug 2022 21:28:09 +0200 Subject: [PATCH 635/892] chore(deps): update all dependencies (#622) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Don't bump protobuf yet, it's not compatible Co-authored-by: Owl Bot Co-authored-by: Mariatta Wijaya --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 52da23f84541..2ebc0a65e342 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.40.0 -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index b6b1a2001138..5e3ffcd7136f 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index e5d1d4ee6a2f..834b19ce3ac5 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 backoff==2.1.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 0e95d91b740d..e19a2bc6a67e 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 google-cloud-monitoring==2.10.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index d0504d16d195..e3d90808e56c 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 9e6d5e6f3a0d..9dd0f2b92591 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 9e6d5e6f3a0d..9dd0f2b92591 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 9e6d5e6f3a0d..9dd0f2b92591 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index fb0107cad75e..b22cdacc68d0 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.10.1 \ No newline at end of file +google-cloud-bigtable==2.11.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index d0504d16d195..e3d90808e56c 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.10.1 +google-cloud-bigtable==2.11.0 From 81391d1756764188739cce9bedac9ffa8f4908c0 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Mon, 8 Aug 2022 12:31:12 -0400 Subject: [PATCH 636/892] test: improve the row merging tests (#623) * test: improve the row merging tests - extract read-rows-acceptance-test.json based tests into own file - update the json to match the latest available in https://github.com/googleapis/conformance-tests/tree/main/bigtable/v2 - use parameterized pytest test to run all of the scenarios (instead of creating a function for each json blob) - use json protobufs to parse the file I left a TODO to allow easy updates of the file, unfortunately its not straight forward as the canonical protos get renamed for python gapic Next PR will extract row merging functionality from row_data to make it easier to maintain * fix type annotation * fix lints * fix coverage * fix pytest warning --- .../tests/unit/read-rows-acceptance-test.json | 1932 ++++++++++------- .../tests/unit/test_row_data.py | 365 +--- .../tests/unit/test_row_merger.py | 78 + 3 files changed, 1319 insertions(+), 1056 deletions(-) create mode 100644 packages/google-cloud-bigtable/tests/unit/test_row_merger.py diff --git a/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json b/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json index cfa8a17f327b..011ace2b9aa7 100644 --- a/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json +++ b/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json @@ -1,1203 +1,1663 @@ { - "tests": [ + "readRowsTests": [ { - "name": "invalid - no commit", + "description": "invalid - no commit", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - no cell key before commit", + "description": "invalid - no cell key before commit", "chunks": [ - "commit_row: true\n" + { + "commitRow": true + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - no cell key before value", + "description": "invalid - no cell key before value", "chunks": [ - "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - new col family must specify qualifier", + "description": "invalid - new col family must specify qualifier", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "bare commit implies ts=0", + "description": "bare commit implies ts=0", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "commit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" } ] }, { - "name": "simple row with timestamp", + "description": "simple row with timestamp", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" } ] }, { - "name": "missing timestamp, implied ts=0", + "description": "missing timestamp, implied ts=0", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "value": "value-VAL" } ] }, { - "name": "empty cell value", + "description": "empty cell value", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" } ] }, { - "name": "two unsplit cells", + "description": "two unsplit cells", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" } ] }, { - "name": "two qualifiers", + "description": "two qualifiers", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" } ] }, { - "name": "two families", + "description": "two families", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK", - "fm": "B", - "qual": "E", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" } ] }, { - "name": "with labels", + "description": "with labels", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "labels": [ + "L_1" + ], + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "labels": [ + "L_2" + ], + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", "value": "value-VAL_1", - "label": "L_1", - "error": false + "label": "L_1" }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 102, + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", "value": "value-VAL_2", - "label": "L_2", - "error": false + "label": "L_2" } ] }, { - "name": "split cell, bare commit", + "description": "split cell, bare commit", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL\"\ncommit_row: false\n", - "commit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUw=", + "commitRow": false + }, + { + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" } ] }, { - "name": "split cell", + "description": "split cell", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUw=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" } ] }, { - "name": "split four ways", + "description": "split four ways", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"ue-VAL\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "bA==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "dWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", "value": "value-VAL", - "label": "L", - "error": false + "label": "L" } ] }, { - "name": "two split cells", + "description": "two split cells", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" } ] }, { - "name": "multi-qualifier splits", + "description": "multi-qualifier splits", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" } ] }, { - "name": "multi-qualifier multi-split", + "description": "multi-qualifier multi-split", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"lue-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"lue-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "bHVlLVZBTF8x", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "bHVlLVZBTF8y", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" } ] }, { - "name": "multi-family split", + "description": "multi-family split", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK", - "fm": "B", - "qual": "E", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" } ] }, { - "name": "invalid - no commit between rows", + "description": "invalid - no commit between rows", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - no commit after first row", + "description": "invalid - no commit after first row", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - last row missing commit", + "description": "invalid - last row missing commit", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" }, { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - duplicate row key", + "description": "invalid - duplicate row key", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMQ==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" }, { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - new row missing row key", + "description": "invalid - new row missing row key", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" }, { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "two rows", + "description": "two rows", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" }, { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" } ] }, { - "name": "two rows implicit timestamp", + "description": "two rows implicit timestamp", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "value": "value-VAL" }, { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" } ] }, { - "name": "two rows empty value", + "description": "two rows empty value", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C" }, { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" } ] }, { - "name": "two rows, one with multiple cells", + "description": "two rows, one with multiple cells", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" }, { - "rk": "RK_2", - "fm": "B", - "qual": "D", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3" } ] }, { - "name": "two rows, multiple cells", + "description": "two rows, multiple cells", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "qualifier": "Rg==", + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK_1", - "fm": "A", - "qual": "D", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" }, { - "rk": "RK_2", - "fm": "B", - "qual": "E", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "97", + "value": "value-VAL_3" }, { - "rk": "RK_2", - "fm": "B", - "qual": "F", - "ts": 104, - "value": "value-VAL_4", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "F", + "timestampMicros": "96", + "value": "value-VAL_4" } ] }, { - "name": "two rows, multiple cells, multiple families", + "description": "two rows, multiple cells, multiple families", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", - "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "M", + "qualifier": "Tw==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "familyName": "N", + "qualifier": "UA==", + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" }, { - "rk": "RK_1", - "fm": "B", - "qual": "E", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" }, { - "rk": "RK_2", - "fm": "M", - "qual": "O", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "M", + "qualifier": "O", + "timestampMicros": "97", + "value": "value-VAL_3" }, { - "rk": "RK_2", - "fm": "N", - "qual": "P", - "ts": 104, - "value": "value-VAL_4", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "N", + "qualifier": "P", + "timestampMicros": "96", + "value": "value-VAL_4" } ] }, { - "name": "two rows, four cells, 2 labels", + "description": "two rows, four cells, 2 labels", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", - "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "labels": [ + "L_1" + ], + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "97", + "labels": [ + "L_3" + ], + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 101, + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", "value": "value-VAL_1", - "label": "L_1", - "error": false + "label": "L_1" }, { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 102, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" }, { - "rk": "RK_2", - "fm": "B", - "qual": "D", - "ts": 103, + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", "value": "value-VAL_3", - "label": "L_3", - "error": false + "label": "L_3" }, { - "rk": "RK_2", - "fm": "B", - "qual": "D", - "ts": 104, - "value": "value-VAL_4", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "96", + "value": "value-VAL_4" } ] }, { - "name": "two rows with splits, same timestamp", + "description": "two rows with splits, same timestamp", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_1\"\ncommit_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"alue-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_1" }, { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" } ] }, { - "name": "invalid - bare reset", + "description": "invalid - bare reset", "chunks": [ - "reset_row: true\n" + { + "resetRow": true + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - bad reset, no commit", + "description": "invalid - bad reset, no commit", "chunks": [ - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - missing key after reset", + "description": "invalid - missing key after reset", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "reset_row: true\n", - "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + }, + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "no data after reset", + "description": "no data after reset", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "reset_row: true\n" - ], - "results": null + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + } + ] }, { - "name": "simple reset", + "description": "simple reset", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" } ] }, { - "name": "reset to new val", + "description": "reset to new val", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" } ] }, { - "name": "reset to new qual", + "description": "reset to new qual", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "RA==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "D", - "ts": 100, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "100", + "value": "value-VAL_1" } ] }, { - "name": "reset with splits", + "description": "reset with splits", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" } ] }, { - "name": "reset two cells", + "description": "reset two cells", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "97", + "value": "value-VAL_3" } ] }, { - "name": "two resets", + "description": "two resets", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_3", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_3" } ] }, { - "name": "reset then two cells", + "description": "reset then two cells", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "B", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } ], "results": [ { - "rk": "RK", - "fm": "B", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "B", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" }, { - "rk": "RK", - "fm": "B", - "qual": "D", - "ts": 103, - "value": "value-VAL_3", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3" } ] }, { - "name": "reset to new row", + "description": "reset to new row", "chunks": [ - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } ], "results": [ { - "rk": "RK_2", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" } ] }, { - "name": "reset in between chunks", + "description": "reset in between chunks", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", - "reset_row: true\n", - "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": true + } ], "results": [ { - "rk": "RK_1", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_1" } ] }, { - "name": "invalid - reset with chunk", + "description": "invalid - reset with chunk", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "resetRow": true + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "invalid - commit with chunk", + "description": "invalid - commit with chunk", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", - "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "commitRow": true + } ], "results": [ { - "rk": "", - "fm": "", - "qual": "", - "ts": 0, - "value": "", - "label": "", "error": true } ] }, { - "name": "empty cell chunk", + "description": "empty cell chunk", "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", - "commit_row: false\n", - "commit_row: true\n" - ], - "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 100, - "value": "value-VAL", - "label": "", - "error": false + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false + "commitRow": false }, { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 0, - "value": "", - "label": "", - "error": false + "commitRow": true } - ] - }, - { - "name": "empty second qualifier", - "chunks": [ - "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 99\nvalue: \"value-VAL_1\"\ncommit_row: false\n", - "qualifier: \u003c\n value: \"\"\n\u003e\ntimestamp_micros: 98\nvalue: \"value-VAL_2\"\ncommit_row: true\n" ], "results": [ { - "rk": "RK", - "fm": "A", - "qual": "C", - "ts": 99, - "value": "value-VAL_1", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" }, { - "rk": "RK", - "fm": "A", - "qual": "", - "ts": 98, - "value": "value-VAL_2", - "label": "", - "error": false + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" } ] } diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 94a90aa24914..f087ff450ae8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -13,8 +13,6 @@ # limitations under the License. -import os - import mock import pytest @@ -24,7 +22,6 @@ ROW_KEY = b"row-key" FAMILY_NAME = "family" QUALIFIER = b"qualifier" -TIMESTAMP_MICROS = 100 VALUE = b"value" TABLE_NAME = "table_name" @@ -368,6 +365,51 @@ def test_partial_rows_data_constructor(): assert partial_rows_data.retry == DEFAULT_RETRY_READ_ROWS +def test_partial_rows_data_consume_all(): + resp = _ReadRowsResponseV2( + [ + _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ), + _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY + b"2", + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + commit_row=True, + ), + ] + ) + + call_count = 0 + iterator = _MockCancellableIterator(resp) + + def fake_read(*args, **kwargs): + nonlocal call_count + call_count += 1 + return iterator + + partial_rows_data = _make_partial_rows_data(fake_read, None) + partial_rows_data.consume_all() + + row1 = _make_partial_row_data(ROW_KEY) + row1._cells[FAMILY_NAME] = { + QUALIFIER: [_make_cell(value=VALUE, timestamp_micros=TIMESTAMP_MICROS)] + } + row2 = _make_partial_row_data(ROW_KEY + b"2") + row2._cells[FAMILY_NAME] = { + QUALIFIER: [_make_cell(value=VALUE, timestamp_micros=TIMESTAMP_MICROS)] + } + + assert partial_rows_data.rows == {row1.row_key: row1, row2.row_key: row2} + + def test_partial_rows_data_constructor_with_retry(): from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS @@ -1122,304 +1164,6 @@ def test_RRRM_build_updated_request_row_ranges_valid(): assert len(updated_request.rows.row_ranges) > 0 -@pytest.fixture(scope="session") -def json_tests(): - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, "read-rows-acceptance-test.json") - raw = _parse_readrows_acceptance_tests(filename) - tests = {} - for (name, chunks, results) in raw: - tests[name] = chunks, results - - yield tests - - -# JSON Error cases: invalid chunks - - -def _fail_during_consume(json_tests, testcase_name): - from google.cloud.bigtable.row_data import InvalidChunk - - client = _Client() - chunks, results = json_tests[testcase_name] - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - prd = _make_partial_rows_data(client._data_stub.ReadRows, request) - with pytest.raises(InvalidChunk): - prd.consume_all() - expected_result = _sort_flattend_cells( - [result for result in results if not result["error"]] - ) - flattened = _sort_flattend_cells(_flatten_cells(prd)) - assert flattened == expected_result - - -def test_prd_json_accept_invalid_no_cell_key_before_commit(json_tests): - _fail_during_consume(json_tests, "invalid - no cell key before commit") - - -def test_prd_json_accept_invalid_no_cell_key_before_value(json_tests): - _fail_during_consume(json_tests, "invalid - no cell key before value") - - -def test_prd_json_accept_invalid_new_col_family_wo_qualifier(json_tests): - _fail_during_consume(json_tests, "invalid - new col family must specify qualifier") - - -def test_prd_json_accept_invalid_no_commit_between_rows(json_tests): - _fail_during_consume(json_tests, "invalid - no commit between rows") - - -def test_prd_json_accept_invalid_no_commit_after_first_row(json_tests): - _fail_during_consume(json_tests, "invalid - no commit after first row") - - -def test_prd_json_accept_invalid_duplicate_row_key(json_tests): - _fail_during_consume(json_tests, "invalid - duplicate row key") - - -def test_prd_json_accept_invalid_new_row_missing_row_key(json_tests): - _fail_during_consume(json_tests, "invalid - new row missing row key") - - -def test_prd_json_accept_invalid_bare_reset(json_tests): - _fail_during_consume(json_tests, "invalid - bare reset") - - -def test_prd_json_accept_invalid_bad_reset_no_commit(json_tests): - _fail_during_consume(json_tests, "invalid - bad reset, no commit") - - -def test_prd_json_accept_invalid_missing_key_after_reset(json_tests): - _fail_during_consume(json_tests, "invalid - missing key after reset") - - -def test_prd_json_accept_invalid_reset_with_chunk(json_tests): - _fail_during_consume(json_tests, "invalid - reset with chunk") - - -def test_prd_json_accept_invalid_commit_with_chunk(json_tests): - _fail_during_consume(json_tests, "invalid - commit with chunk") - - -# JSON Error cases: incomplete final row - - -def _sort_flattend_cells(flattened): - import operator - - key_func = operator.itemgetter("rk", "fm", "qual") - return sorted(flattened, key=key_func) - - -def _incomplete_final_row(json_tests, testcase_name): - client = _Client() - chunks, results = json_tests[testcase_name] - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - client._data_stub = mock.MagicMock() - client._data_stub.ReadRows.side_effect = [iterator] - request = object() - prd = _make_partial_rows_data(client._data_stub.ReadRows, request) - with pytest.raises(ValueError): - prd.consume_all() - assert prd.state == prd.ROW_IN_PROGRESS - expected_result = _sort_flattend_cells( - [result for result in results if not result["error"]] - ) - flattened = _sort_flattend_cells(_flatten_cells(prd)) - assert flattened == expected_result - - -def test_prd_json_accept_invalid_no_commit(json_tests): - _incomplete_final_row(json_tests, "invalid - no commit") - - -def test_prd_json_accept_invalid_last_row_missing_commit(json_tests): - _incomplete_final_row(json_tests, "invalid - last row missing commit") - - -# Non-error cases - -_marker = object() - - -def _match_results(json_tests, testcase_name, expected_result=_marker): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - client = _Client() - chunks, results = json_tests[testcase_name] - response = _ReadRowsResponseV2(chunks) - iterator = _MockCancellableIterator(response) - data_api = mock.create_autospec(BigtableClient) - client._table_data_client = data_api - client._table_data_client.read_rows.side_effect = [iterator] - request = object() - prd = _make_partial_rows_data(client._table_data_client.read_rows, request) - prd.consume_all() - flattened = _sort_flattend_cells(_flatten_cells(prd)) - if expected_result is _marker: - expected_result = _sort_flattend_cells(results) - assert flattened == expected_result - - -def test_prd_json_accept_bare_commit_implies_ts_zero(json_tests): - _match_results(json_tests, "bare commit implies ts=0") - - -def test_prd_json_accept_simple_row_with_timestamp(json_tests): - _match_results(json_tests, "simple row with timestamp") - - -def test_prd_json_accept_missing_timestamp_implies_ts_zero(json_tests): - _match_results(json_tests, "missing timestamp, implied ts=0") - - -def test_prd_json_accept_empty_cell_value(json_tests): - _match_results(json_tests, "empty cell value") - - -def test_prd_json_accept_two_unsplit_cells(json_tests): - _match_results(json_tests, "two unsplit cells") - - -def test_prd_json_accept_two_qualifiers(json_tests): - _match_results(json_tests, "two qualifiers") - - -def test_prd_json_accept_two_families(json_tests): - _match_results(json_tests, "two families") - - -def test_prd_json_accept_with_labels(json_tests): - _match_results(json_tests, "with labels") - - -def test_prd_json_accept_split_cell_bare_commit(json_tests): - _match_results(json_tests, "split cell, bare commit") - - -def test_prd_json_accept_split_cell(json_tests): - _match_results(json_tests, "split cell") - - -def test_prd_json_accept_split_four_ways(json_tests): - _match_results(json_tests, "split four ways") - - -def test_prd_json_accept_two_split_cells(json_tests): - _match_results(json_tests, "two split cells") - - -def test_prd_json_accept_multi_qualifier_splits(json_tests): - _match_results(json_tests, "multi-qualifier splits") - - -def test_prd_json_accept_multi_qualifier_multi_split(json_tests): - _match_results(json_tests, "multi-qualifier multi-split") - - -def test_prd_json_accept_multi_family_split(json_tests): - _match_results(json_tests, "multi-family split") - - -def test_prd_json_accept_two_rows(json_tests): - _match_results(json_tests, "two rows") - - -def test_prd_json_accept_two_rows_implicit_timestamp(json_tests): - _match_results(json_tests, "two rows implicit timestamp") - - -def test_prd_json_accept_two_rows_empty_value(json_tests): - _match_results(json_tests, "two rows empty value") - - -def test_prd_json_accept_two_rows_one_with_multiple_cells(json_tests): - _match_results(json_tests, "two rows, one with multiple cells") - - -def test_prd_json_accept_two_rows_multiple_cells_multiple_families(json_tests): - _match_results(json_tests, "two rows, multiple cells, multiple families") - - -def test_prd_json_accept_two_rows_multiple_cells(json_tests): - _match_results(json_tests, "two rows, multiple cells") - - -def test_prd_json_accept_two_rows_four_cells_two_labels(json_tests): - _match_results(json_tests, "two rows, four cells, 2 labels") - - -def test_prd_json_accept_two_rows_with_splits_same_timestamp(json_tests): - _match_results(json_tests, "two rows with splits, same timestamp") - - -def test_prd_json_accept_no_data_after_reset(json_tests): - # JSON testcase has `"results": null` - _match_results(json_tests, "no data after reset", expected_result=[]) - - -def test_prd_json_accept_simple_reset(json_tests): - _match_results(json_tests, "simple reset") - - -def test_prd_json_accept_reset_to_new_val(json_tests): - _match_results(json_tests, "reset to new val") - - -def test_prd_json_accept_reset_to_new_qual(json_tests): - _match_results(json_tests, "reset to new qual") - - -def test_prd_json_accept_reset_with_splits(json_tests): - _match_results(json_tests, "reset with splits") - - -def test_prd_json_accept_two_resets(json_tests): - _match_results(json_tests, "two resets") - - -def test_prd_json_accept_reset_to_new_row(json_tests): - _match_results(json_tests, "reset to new row") - - -def test_prd_json_accept_reset_in_between_chunks(json_tests): - _match_results(json_tests, "reset in between chunks") - - -def test_prd_json_accept_empty_cell_chunk(json_tests): - _match_results(json_tests, "empty cell chunk") - - -def test_prd_json_accept_empty_second_qualifier(json_tests): - _match_results(json_tests, "empty second qualifier") - - -def _flatten_cells(prd): - # Match results format from JSON testcases. - # Doesn't handle error cases. - from google.cloud._helpers import _bytes_to_unicode - from google.cloud._helpers import _microseconds_from_datetime - - for row_key, row in prd.rows.items(): - for family_name, family in row.cells.items(): - for qualifier, column in family.items(): - for cell in column: - yield { - "rk": _bytes_to_unicode(row_key), - "fm": family_name, - "qual": _bytes_to_unicode(qualifier), - "ts": _microseconds_from_datetime(cell.timestamp), - "value": _bytes_to_unicode(cell.value), - "label": " ".join(cell.labels), - "error": False, - } - - class _MockCancellableIterator(object): cancel_calls = 0 @@ -1481,25 +1225,6 @@ def _generate_cell_chunks(chunk_text_pbs): return chunks -def _parse_readrows_acceptance_tests(filename): - """Parse acceptance tests from JSON - - See - https://github.com/googleapis/python-bigtable/blob/main/\ - tests/unit/read-rows-acceptance-test.json - """ - import json - - with open(filename) as json_file: - test_json = json.load(json_file) - - for test in test_json["tests"]: - name = test["name"] - chunks = _generate_cell_chunks(test["chunks"]) - results = test["results"] - yield name, chunks, results - - def _ReadRowsResponseCellChunkPB(*args, **kw): from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_merger.py b/packages/google-cloud-bigtable/tests/unit/test_row_merger.py new file mode 100644 index 000000000000..f336a82ffee5 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_row_merger.py @@ -0,0 +1,78 @@ +import os +from itertools import zip_longest +from typing import List + +import proto +import pytest + +from google.cloud.bigtable.row_data import PartialRowsData, PartialRowData, InvalidChunk +from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse + + +# TODO: autogenerate protos from +# https://github.com/googleapis/conformance-tests/blob/main/bigtable/v2/proto/google/cloud/conformance/bigtable/v2/tests.proto +class ReadRowsTest(proto.Message): + class Result(proto.Message): + row_key = proto.Field(proto.STRING, number=1) + family_name = proto.Field(proto.STRING, number=2) + qualifier = proto.Field(proto.STRING, number=3) + timestamp_micros = proto.Field(proto.INT64, number=4) + value = proto.Field(proto.STRING, number=5) + label = proto.Field(proto.STRING, number=6) + error = proto.Field(proto.BOOL, number=7) + + description = proto.Field(proto.STRING, number=1) + chunks = proto.RepeatedField( + proto.MESSAGE, number=2, message=ReadRowsResponse.CellChunk + ) + results = proto.RepeatedField(proto.MESSAGE, number=3, message=Result) + + +class TestFile(proto.Message): + __test__ = False + read_rows_tests = proto.RepeatedField(proto.MESSAGE, number=1, message=ReadRowsTest) + + +def parse_readrows_acceptance_tests(): + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, "read-rows-acceptance-test.json") + + with open(filename) as json_file: + test_json = TestFile.from_json(json_file.read()) + return test_json.read_rows_tests + + +def extract_results_from_row(row: PartialRowData): + results = [] + for family, col_dict in row.cells.items(): + for col, cells in col_dict.items(): + for cell in cells: + results.append( + ReadRowsTest.Result( + row_key=row.row_key, + family_name=family, + qualifier=col, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=(cell.labels[0] if cell.labels else ""), + ) + ) + return results + + +@pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description +) +def test_scenario(test_case: ReadRowsTest): + def fake_read(*args, **kwargs): + return iter([ReadRowsResponse(chunks=test_case.chunks)]) + + actual_results: List[ReadRowsTest.Result] = [] + try: + for row in PartialRowsData(fake_read, request=None): + actual_results.extend(extract_results_from_row(row)) + except (InvalidChunk, ValueError): + actual_results.append(ReadRowsTest.Result(error=True)) + + for expected, actual in zip_longest(test_case.results, actual_results): + assert actual == expected From 61ac12ef6919e4cb805449940d5d8e92b70d9a15 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Mon, 8 Aug 2022 12:50:23 -0700 Subject: [PATCH 637/892] Fix: Retry the RST Stream error in mutate rows and read rows(#624) Fix: Retry the RST Stream error in mutate rows and read rows In mutate_rows and read_rows, Internal Server with RST Stream errors is considered transient, and should be retried. --- .../google/cloud/bigtable/row_data.py | 24 +++++++- .../google/cloud/bigtable/table.py | 25 +++++++-- .../tests/unit/test_row_data.py | 25 +++++++++ .../tests/unit/test_table.py | 55 ++++++++++++++++++- 4 files changed, 123 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index ab0358285312..62ef5a20179c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -332,10 +332,32 @@ class InvalidRetryRequest(RuntimeError): """Exception raised when retry request is invalid.""" +RETRYABLE_INTERNAL_ERROR_MESSAGES = ( + "rst_stream", + "rst stream", + "received unexpected eos on data frame from server", +) +"""Internal error messages that can be retried during read row and mutation.""" + + +def _retriable_internal_server_error(exc): + """ + Return True if the internal server error is retriable. + """ + return isinstance(exc, exceptions.InternalServerError) and any( + retryable_message in exc.message.lower() + for retryable_message in RETRYABLE_INTERNAL_ERROR_MESSAGES + ) + + def _retry_read_rows_exception(exc): + """Return True if the exception is retriable for read row requests.""" if isinstance(exc, grpc.RpcError): exc = exceptions.from_grpc_error(exc) - return isinstance(exc, (exceptions.ServiceUnavailable, exceptions.DeadlineExceeded)) + + return _retriable_internal_server_error(exc) or isinstance( + exc, (exceptions.ServiceUnavailable, exceptions.DeadlineExceeded) + ) DEFAULT_RETRY_READ_ROWS = retry.Retry( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index fddd04809925..8605992baf45 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -23,6 +23,7 @@ from google.api_core.exceptions import NotFound from google.api_core.exceptions import RetryError from google.api_core.exceptions import ServiceUnavailable +from google.api_core.exceptions import InternalServerError from google.api_core.gapic_v1.method import DEFAULT from google.api_core.retry import if_exception_type from google.api_core.retry import Retry @@ -37,7 +38,10 @@ from google.cloud.bigtable.row import AppendRow from google.cloud.bigtable.row import ConditionalRow from google.cloud.bigtable.row import DirectRow -from google.cloud.bigtable.row_data import PartialRowsData +from google.cloud.bigtable.row_data import ( + PartialRowsData, + _retriable_internal_server_error, +) from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange @@ -55,9 +59,15 @@ _MAX_BULK_MUTATIONS = 100000 VIEW_NAME_ONLY = enums.Table.View.NAME_ONLY -RETRYABLE_MUTATION_ERRORS = (Aborted, DeadlineExceeded, ServiceUnavailable) +RETRYABLE_MUTATION_ERRORS = ( + Aborted, + DeadlineExceeded, + ServiceUnavailable, + InternalServerError, +) """Errors which can be retried during row mutation.""" + RETRYABLE_CODES: Set[int] = set() for retryable in RETRYABLE_MUTATION_ERRORS: @@ -1130,11 +1140,18 @@ def _do_mutate_retryable_rows(self): retry=None, **kwargs ) - except RETRYABLE_MUTATION_ERRORS: + except RETRYABLE_MUTATION_ERRORS as exc: # If an exception, considered retryable by `RETRYABLE_MUTATION_ERRORS`, is # returned from the initial call, consider # it to be retryable. Wrap as a Bigtable Retryable Error. - raise _BigtableRetryableError + # For InternalServerError, it is only retriable if the message is related to RST Stream messages + if _retriable_internal_server_error(exc) or not isinstance( + exc, InternalServerError + ): + raise _BigtableRetryableError + else: + # re-raise the original exception + raise num_responses = 0 num_retryable_responses = 0 diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index f087ff450ae8..9175bf479ef9 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -310,6 +310,31 @@ def test__retry_read_rows_exception_deadline_exceeded(): assert _retry_read_rows_exception(exception) +def test__retry_read_rows_exception_internal_server_not_retriable(): + from google.api_core.exceptions import InternalServerError + from google.cloud.bigtable.row_data import ( + _retry_read_rows_exception, + RETRYABLE_INTERNAL_ERROR_MESSAGES, + ) + + err_message = "500 Error" + exception = InternalServerError(err_message) + assert err_message not in RETRYABLE_INTERNAL_ERROR_MESSAGES + assert not _retry_read_rows_exception(exception) + + +def test__retry_read_rows_exception_internal_server_retriable(): + from google.api_core.exceptions import InternalServerError + from google.cloud.bigtable.row_data import ( + _retry_read_rows_exception, + RETRYABLE_INTERNAL_ERROR_MESSAGES, + ) + + for err_message in RETRYABLE_INTERNAL_ERROR_MESSAGES: + exception = InternalServerError(err_message) + assert _retry_read_rows_exception(exception) + + def test__retry_read_rows_exception_miss_wrapped_in_grpc(): from google.api_core.exceptions import Conflict from google.cloud.bigtable.row_data import _retry_read_rows_exception diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index a89e02e8c261..e66a8f0f6c9c 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -47,6 +47,7 @@ RETRYABLE_3 = StatusCode.UNAVAILABLE.value[0] RETRYABLES = (RETRYABLE_1, RETRYABLE_2, RETRYABLE_3) NON_RETRYABLE = StatusCode.CANCELLED.value[0] +STATUS_INTERNAL = StatusCode.INTERNAL.value[0] @mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3) @@ -1636,6 +1637,7 @@ def _do_mutate_retryable_rows_helper( raising_retry=False, retryable_error=False, timeout=None, + mutate_rows_side_effect=None, ): from google.api_core.exceptions import ServiceUnavailable from google.cloud.bigtable.row import DirectRow @@ -1664,8 +1666,13 @@ def _do_mutate_retryable_rows_helper( data_api = client._table_data_client = _make_data_api() if retryable_error: - data_api.mutate_rows.side_effect = ServiceUnavailable("testing") + if mutate_rows_side_effect is not None: + data_api.mutate_rows.side_effect = mutate_rows_side_effect + else: + data_api.mutate_rows.side_effect = ServiceUnavailable("testing") else: + if mutate_rows_side_effect is not None: + data_api.mutate_rows.side_effect = mutate_rows_side_effect data_api.mutate_rows.return_value = [response] worker = _make_worker(client, table.name, rows=rows) @@ -1785,6 +1792,52 @@ def test_rmrw_do_mutate_retryable_rows_w_retryable_error(): ) +def test_rmrw_do_mutate_retryable_rows_w_retryable_error_internal_rst_stream_error(): + # Mutate two rows + # Raise internal server error with RST STREAM error messages + # There should be no error raised and that the request is retried + from google.api_core.exceptions import InternalServerError + from google.cloud.bigtable.row_data import RETRYABLE_INTERNAL_ERROR_MESSAGES + + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] + responses = () + + for retryable_internal_error_message in RETRYABLE_INTERNAL_ERROR_MESSAGES: + for message in [ + retryable_internal_error_message, + retryable_internal_error_message.upper(), + ]: + _do_mutate_retryable_rows_helper( + row_cells, + responses, + retryable_error=True, + mutate_rows_side_effect=InternalServerError(message), + ) + + +def test_rmrw_do_mutate_rows_w_retryable_error_internal_not_retryable(): + # Mutate two rows + # Raise internal server error but not RST STREAM error messages + # mutate_rows should raise Internal Server Error + from google.api_core.exceptions import InternalServerError + + row_cells = [ + (b"row_key_1", ("cf", b"col", b"value1")), + (b"row_key_2", ("cf", b"col", b"value2")), + ] + responses = () + + with pytest.raises(InternalServerError): + _do_mutate_retryable_rows_helper( + row_cells, + responses, + mutate_rows_side_effect=InternalServerError("Error not retryable."), + ) + + def test_rmrw_do_mutate_retryable_rows_retry(): # # Setup: From 98a8d79703bf1eef7fdf7b832dedec8ee2e74479 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 8 Aug 2022 13:28:59 -0700 Subject: [PATCH 638/892] chore(main): release 2.11.1 (#627) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 38584bcbdb2f..66b8c072eae1 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.11.1](https://github.com/googleapis/python-bigtable/compare/v2.11.0...v2.11.1) (2022-08-08) + + +### Bug Fixes + +* Retry the RST Stream error in mutate rows and read rows([#624](https://github.com/googleapis/python-bigtable/issues/624)) ([d24574a](https://github.com/googleapis/python-bigtable/commit/d24574a722de61bdeffa6588bcb08f56e62ba3bd)) + ## [2.11.0](https://github.com/googleapis/python-bigtable/compare/v2.10.1...v2.11.0) (2022-08-04) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 8087dddb6618..e72d28c49f1d 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.11.0" +version = "2.11.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 3364c3caa567b397071d6d2a0601757197a0c8dd Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 9 Aug 2022 13:31:13 +0200 Subject: [PATCH 639/892] chore(deps): update all dependencies (#626) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * Keep protobuf <4 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Mariatta Wijaya Co-authored-by: Owl Bot --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index e19a2bc6a67e..5cb91e8a00ac 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.11.0 -google-cloud-monitoring==2.10.1 +google-cloud-monitoring==2.11.0 From 82a087757db49d5f3830618c78b98f15474de443 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 9 Aug 2022 21:08:27 -0400 Subject: [PATCH 640/892] chore(deps): update actions/setup-python action to v4 [autoapprove] (#629) Source-Link: https://github.com/googleapis/synthtool/commit/8e55b327bae44b6640c7ab4be91df85fc4d6fe8a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:c6c965a4bf40c19011b11f87dbc801a66d3a23fbc6704102be064ef31c51f1c3 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.github/workflows/docs.yml | 4 ++-- packages/google-cloud-bigtable/.github/workflows/lint.yml | 2 +- packages/google-cloud-bigtable/.github/workflows/unittest.yml | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 0eb02fda4c09..c701359fc58c 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:9db98b055a7f8bd82351238ccaacfd3cda58cdf73012ab58b8da146368330021 -# created: 2022-07-25T16:02:49.174178716Z + digest: sha256:c6c965a4bf40c19011b11f87dbc801a66d3a23fbc6704102be064ef31c51f1c3 +# created: 2022-08-09T15:58:56.463048506Z diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml index b46d7305d8cf..7092a139aed3 100644 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -10,7 +10,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install nox @@ -26,7 +26,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml index f512a4960beb..d2aee5b7d8ec 100644 --- a/packages/google-cloud-bigtable/.github/workflows/lint.yml +++ b/packages/google-cloud-bigtable/.github/workflows/lint.yml @@ -10,7 +10,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 5531b0141297..87ade4d54362 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -13,7 +13,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - name: Install nox @@ -39,7 +39,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 - name: Setup Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: "3.10" - name: Install coverage From bed1431846516dff3879b21c3b65f6570eec66de Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 10 Aug 2022 18:57:30 +0200 Subject: [PATCH 641/892] chore(deps): update all dependencies to v2.11.1 (#630) * chore(deps): update all dependencies to v2.11.1 * revert Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 2ebc0a65e342..51505470df1e 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.40.0 -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 5e3ffcd7136f..8ef6b6a24300 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 834b19ce3ac5..361a63138825 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 backoff==2.1.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 5cb91e8a00ac..b4ba5b8d146d 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 google-cloud-monitoring==2.11.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index e3d90808e56c..0b646227b112 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 9dd0f2b92591..a916dd4e508f 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 9dd0f2b92591..a916dd4e508f 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 9dd0f2b92591..a916dd4e508f 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index b22cdacc68d0..d6cfeaa9d908 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.0 \ No newline at end of file +google-cloud-bigtable==2.11.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index e3d90808e56c..0b646227b112 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.0 +google-cloud-bigtable==2.11.1 From 4116990fe83c93a64d38af09a53fdaf299dd74f0 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Thu, 11 Aug 2022 14:32:12 -0400 Subject: [PATCH 642/892] fix(deps): allow protobuf < 5.0.0 (#631) fix(deps): require proto-plus >= 1.22.0 --- packages/google-cloud-bigtable/setup.py | 4 ++-- packages/google-cloud-bigtable/testing/constraints-3.7.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index e72d28c49f1d..2bb760746287 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -32,8 +32,8 @@ "google-api-core[grpc] >= 1.32.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*", "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", - "proto-plus >= 1.18.0, <2.0.0dev", - "protobuf >= 3.19.0, <4.0.0dev", + "proto-plus >= 1.22.0, <2.0.0dev", + "protobuf >= 3.19.0, <5.0.0dev", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index 4847b9e04b42..f9281ed96797 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -8,7 +8,7 @@ google-api-core==1.32.0 google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.4 -proto-plus==1.18.0 +proto-plus==1.22.0 libcst==0.2.5 protobuf==3.19.0 From e3ffb92c44d701c64091b24b5c57d68f6225a605 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 16 Aug 2022 14:13:38 -0400 Subject: [PATCH 643/892] chore(main): release 2.11.2 (#632) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 8 ++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 66b8c072eae1..4b635e9c237c 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,14 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.11.2](https://github.com/googleapis/python-bigtable/compare/v2.11.1...v2.11.2) (2022-08-11) + + +### Bug Fixes + +* **deps:** allow protobuf < 5.0.0 ([#631](https://github.com/googleapis/python-bigtable/issues/631)) ([fd54fc6](https://github.com/googleapis/python-bigtable/commit/fd54fc63340a3e01fae1ccc4c648dd90900f8a94)) +* **deps:** require proto-plus >= 1.22.0 ([fd54fc6](https://github.com/googleapis/python-bigtable/commit/fd54fc63340a3e01fae1ccc4c648dd90900f8a94)) + ## [2.11.1](https://github.com/googleapis/python-bigtable/compare/v2.11.0...v2.11.1) (2022-08-08) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 2bb760746287..af5b7359bba1 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.11.1" +version = "2.11.2" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 164d549627821c7f6f5eb1e33ff62487cb13dd44 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 17 Aug 2022 16:29:57 +0200 Subject: [PATCH 644/892] chore(deps): update dependency google-cloud-monitoring to v2.11.1 (#635) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index b4ba5b8d146d..dc8a48fb24f0 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.11.1 -google-cloud-monitoring==2.11.0 +google-cloud-monitoring==2.11.1 From 8b186f2734e5ff3a4a1933c1aa8b1b7d7b4344c2 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Wed, 17 Aug 2022 13:48:32 -0400 Subject: [PATCH 645/892] chore: move row value classes out of row_data (#633) * chore: move row value classes out of row_data This is in preparation for extracting row merging into a separate class. See https://github.com/googleapis/python-bigtable/pull/628 Co-authored-by: Anthonios Partheniou --- .../google/cloud/bigtable/row.py | 252 +++++++++++++++++ .../google/cloud/bigtable/row_data.py | 257 +----------------- 2 files changed, 257 insertions(+), 252 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py index 9127a1aae695..752458a08a79 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row.py @@ -28,6 +28,15 @@ MAX_MUTATIONS = 100000 """The maximum number of mutations that a row can accumulate.""" +_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." +_MISSING_COLUMN = ( + "Column {} is not among the cells stored in this row in the column family {}." +) +_MISSING_INDEX = ( + "Index {!r} is not valid for the cells stored in this row for column {} " + "in the column family {}. There are {} such cells." +) + class Row(object): """Base representation of a Google Cloud Bigtable Row. @@ -1013,3 +1022,246 @@ def _parse_family_pb(family_pb): cells.append(val_pair) return family_pb.name, result + + +class PartialRowData(object): + """Representation of partial row in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) data. + """ + + def __init__(self, row_key): + self._row_key = row_key + self._cells = {} + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other._row_key == self._row_key and other._cells == self._cells + + def __ne__(self, other): + return not self == other + + def to_dict(self): + """Convert the cells to a dictionary. + + This is intended to be used with HappyBase, so the column family and + column qualiers are combined (with ``:``). + + :rtype: dict + :returns: Dictionary containing all the data in the cells of this row. + """ + result = {} + for column_family_id, columns in self._cells.items(): + for column_qual, cells in columns.items(): + key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual) + result[key] = cells + return result + + @property + def cells(self): + """Property returning all the cells accumulated on this partial row. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_data_cells] + :end-before: [END bigtable_api_row_data_cells] + :dedent: 4 + + :rtype: dict + :returns: Dictionary of the :class:`Cell` objects accumulated. This + dictionary has two-levels of keys (first for column families + and second for column names/qualifiers within a family). For + a given column, a list of :class:`Cell` objects is stored. + """ + return self._cells + + @property + def row_key(self): + """Getter for the current (partial) row's key. + + :rtype: bytes + :returns: The current (partial) row's key. + """ + return self._row_key + + def find_cells(self, column_family_id, column): + """Get a time series of cells stored on this instance. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_find_cells] + :end-before: [END bigtable_api_row_find_cells] + :dedent: 4 + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cells + are located. + + Returns: + List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the + specified column. + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. + """ + try: + column_family = self._cells[column_family_id] + except KeyError: + raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id)) + + try: + cells = column_family[column] + except KeyError: + raise KeyError(_MISSING_COLUMN.format(column, column_family_id)) + + return cells + + def cell_value(self, column_family_id, column, index=0): + """Get a single cell value stored on this instance. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_cell_value] + :end-before: [END bigtable_api_row_cell_value] + :dedent: 4 + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cell + is located. + index (Optional[int]): The offset within the series of values. If + not specified, will return the first cell. + + Returns: + ~google.cloud.bigtable.row_data.Cell value: The cell value stored + in the specified column and specified index. + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. + IndexError: If ``index`` cannot be found within the cells stored + in this row for the given ``column_family_id``, ``column`` + pair. + """ + cells = self.find_cells(column_family_id, column) + + try: + cell = cells[index] + except (TypeError, IndexError): + num_cells = len(cells) + msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells) + raise IndexError(msg) + + return cell.value + + def cell_values(self, column_family_id, column, max_count=None): + """Get a time series of cells stored on this instance. + + For example: + + .. literalinclude:: snippets_table.py + :start-after: [START bigtable_api_row_cell_values] + :end-before: [END bigtable_api_row_cell_values] + :dedent: 4 + + Args: + column_family_id (str): The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + column (bytes): The column within the column family where the cells + are located. + max_count (int): The maximum number of cells to use. + + Returns: + A generator which provides: cell.value, cell.timestamp_micros + for each cell in the list of cells + + Raises: + KeyError: If ``column_family_id`` is not among the cells stored + in this row. + KeyError: If ``column`` is not among the cells stored in this row + for the given ``column_family_id``. + """ + cells = self.find_cells(column_family_id, column) + if max_count is None: + max_count = len(cells) + + for index, cell in enumerate(cells): + if index == max_count: + break + + yield cell.value, cell.timestamp_micros + + +class Cell(object): + """Representation of a Google Cloud Bigtable Cell. + + :type value: bytes + :param value: The value stored in the cell. + + :type timestamp_micros: int + :param timestamp_micros: The timestamp_micros when the cell was stored. + + :type labels: list + :param labels: (Optional) List of strings. Labels applied to the cell. + """ + + def __init__(self, value, timestamp_micros, labels=None): + self.value = value + self.timestamp_micros = timestamp_micros + self.labels = list(labels) if labels is not None else [] + + @classmethod + def from_pb(cls, cell_pb): + """Create a new cell from a Cell protobuf. + + :type cell_pb: :class:`._generated.data_pb2.Cell` + :param cell_pb: The protobuf to convert. + + :rtype: :class:`Cell` + :returns: The cell corresponding to the protobuf. + """ + if cell_pb.labels: + return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels) + else: + return cls(cell_pb.value, cell_pb.timestamp_micros) + + @property + def timestamp(self): + return _datetime_from_microseconds(self.timestamp_micros) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.value == self.value + and other.timestamp_micros == self.timestamp_micros + and other.labels == self.labels + ) + + def __ne__(self, other): + return not self == other + + def __repr__(self): + return "<{name} value={value!r} timestamp={timestamp}>".format( + name=self.__class__.__name__, value=self.value, timestamp=self.timestamp + ) + + +class InvalidChunk(RuntimeError): + """Exception raised to invalid chunk data from back-end.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index 62ef5a20179c..e7d3d5bd4e81 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -21,74 +21,15 @@ from google.api_core import exceptions from google.api_core import retry -from google.cloud._helpers import _datetime_from_microseconds # type: ignore from google.cloud._helpers import _to_bytes # type: ignore from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 from google.cloud.bigtable_v2.types import data as data_v2_pb2 +from google.cloud.bigtable.row import Cell, InvalidChunk, PartialRowData -_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." -_MISSING_COLUMN = ( - "Column {} is not among the cells stored in this row in the " "column family {}." -) -_MISSING_INDEX = ( - "Index {!r} is not valid for the cells stored in this row for column {} " - "in the column family {}. There are {} such cells." -) - - -class Cell(object): - """Representation of a Google Cloud Bigtable Cell. - - :type value: bytes - :param value: The value stored in the cell. - - :type timestamp_micros: int - :param timestamp_micros: The timestamp_micros when the cell was stored. - - :type labels: list - :param labels: (Optional) List of strings. Labels applied to the cell. - """ - - def __init__(self, value, timestamp_micros, labels=None): - self.value = value - self.timestamp_micros = timestamp_micros - self.labels = list(labels) if labels is not None else [] - - @classmethod - def from_pb(cls, cell_pb): - """Create a new cell from a Cell protobuf. - - :type cell_pb: :class:`._generated.data_pb2.Cell` - :param cell_pb: The protobuf to convert. - - :rtype: :class:`Cell` - :returns: The cell corresponding to the protobuf. - """ - if cell_pb.labels: - return cls(cell_pb.value, cell_pb.timestamp_micros, labels=cell_pb.labels) - else: - return cls(cell_pb.value, cell_pb.timestamp_micros) - - @property - def timestamp(self): - return _datetime_from_microseconds(self.timestamp_micros) - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return ( - other.value == self.value - and other.timestamp_micros == self.timestamp_micros - and other.labels == self.labels - ) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - return "<{name} value={value!r} timestamp={timestamp}>".format( - name=self.__class__.__name__, value=self.value, timestamp=self.timestamp - ) +# Some classes need to be re-exported here to keep backwards +# compatibility. Those classes were moved to row_merger, but we dont want to +# break enduser's imports. This hack, ensures they don't get marked as unused. +_ = (Cell, InvalidChunk, PartialRowData) class PartialCellData(object): @@ -136,198 +77,10 @@ def append_value(self, value): self.value += value -class PartialRowData(object): - """Representation of partial row in a Google Cloud Bigtable Table. - - These are expected to be updated directly from a - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - - :type row_key: bytes - :param row_key: The key for the row holding the (partial) data. - """ - - def __init__(self, row_key): - self._row_key = row_key - self._cells = {} - - def __eq__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return other._row_key == self._row_key and other._cells == self._cells - - def __ne__(self, other): - return not self == other - - def to_dict(self): - """Convert the cells to a dictionary. - - This is intended to be used with HappyBase, so the column family and - column qualiers are combined (with ``:``). - - :rtype: dict - :returns: Dictionary containing all the data in the cells of this row. - """ - result = {} - for column_family_id, columns in self._cells.items(): - for column_qual, cells in columns.items(): - key = _to_bytes(column_family_id) + b":" + _to_bytes(column_qual) - result[key] = cells - return result - - @property - def cells(self): - """Property returning all the cells accumulated on this partial row. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_data_cells] - :end-before: [END bigtable_api_row_data_cells] - :dedent: 4 - - :rtype: dict - :returns: Dictionary of the :class:`Cell` objects accumulated. This - dictionary has two-levels of keys (first for column families - and second for column names/qualifiers within a family). For - a given column, a list of :class:`Cell` objects is stored. - """ - return self._cells - - @property - def row_key(self): - """Getter for the current (partial) row's key. - - :rtype: bytes - :returns: The current (partial) row's key. - """ - return self._row_key - - def find_cells(self, column_family_id, column): - """Get a time series of cells stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_find_cells] - :end-before: [END bigtable_api_row_find_cells] - :dedent: 4 - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cells - are located. - - Returns: - List[~google.cloud.bigtable.row_data.Cell]: The cells stored in the - specified column. - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - """ - try: - column_family = self._cells[column_family_id] - except KeyError: - raise KeyError(_MISSING_COLUMN_FAMILY.format(column_family_id)) - - try: - cells = column_family[column] - except KeyError: - raise KeyError(_MISSING_COLUMN.format(column, column_family_id)) - - return cells - - def cell_value(self, column_family_id, column, index=0): - """Get a single cell value stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_cell_value] - :end-before: [END bigtable_api_row_cell_value] - :dedent: 4 - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cell - is located. - index (Optional[int]): The offset within the series of values. If - not specified, will return the first cell. - - Returns: - ~google.cloud.bigtable.row_data.Cell value: The cell value stored - in the specified column and specified index. - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - IndexError: If ``index`` cannot be found within the cells stored - in this row for the given ``column_family_id``, ``column`` - pair. - """ - cells = self.find_cells(column_family_id, column) - - try: - cell = cells[index] - except (TypeError, IndexError): - num_cells = len(cells) - msg = _MISSING_INDEX.format(index, column, column_family_id, num_cells) - raise IndexError(msg) - - return cell.value - - def cell_values(self, column_family_id, column, max_count=None): - """Get a time series of cells stored on this instance. - - For example: - - .. literalinclude:: snippets_table.py - :start-after: [START bigtable_api_row_cell_values] - :end-before: [END bigtable_api_row_cell_values] - :dedent: 4 - - Args: - column_family_id (str): The ID of the column family. Must be of the - form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - column (bytes): The column within the column family where the cells - are located. - max_count (int): The maximum number of cells to use. - - Returns: - A generator which provides: cell.value, cell.timestamp_micros - for each cell in the list of cells - - Raises: - KeyError: If ``column_family_id`` is not among the cells stored - in this row. - KeyError: If ``column`` is not among the cells stored in this row - for the given ``column_family_id``. - """ - cells = self.find_cells(column_family_id, column) - if max_count is None: - max_count = len(cells) - - for index, cell in enumerate(cells): - if index == max_count: - break - - yield cell.value, cell.timestamp_micros - - class InvalidReadRowsResponse(RuntimeError): """Exception raised to invalid response data from back-end.""" -class InvalidChunk(RuntimeError): - """Exception raised to invalid chunk data from back-end.""" - - class InvalidRetryRequest(RuntimeError): """Exception raised when retry request is invalid.""" From 4a203deca2290f39a1def93b413cec3eb159b2b4 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Wed, 17 Aug 2022 16:04:52 -0400 Subject: [PATCH 646/892] perf: optimize row merging (#628) This PR rewrites the row merging logic to be more correct and improve performance: - extract row merging logic into its own class to simplify complexity of ReadRows handling - Use OrderedDict instead of dict() for `{family: { qualifier: [] }}` data, this should maintain serverside ordering (family in creation order and qualifier in lexiographical). - define an explicit state machine with states implemented as methods - add various optimizations like: - __slots__ on hot objects to avoid dict lookups - avoiding dict lookups for contiguous family and qualifier keys Overall this improves performance by 20% and in my opinion is a lot more readable --- .../google/cloud/bigtable/row_data.py | 206 +++------------ .../google/cloud/bigtable/row_merger.py | 250 ++++++++++++++++++ .../tests/unit/test_row_data.py | 141 ++-------- .../tests/unit/test_row_merger.py | 152 +++++++++++ 4 files changed, 470 insertions(+), 279 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/row_merger.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index e7d3d5bd4e81..a50fab1ee325 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -18,45 +18,25 @@ import copy import grpc # type: ignore - +import warnings from google.api_core import exceptions from google.api_core import retry from google.cloud._helpers import _to_bytes # type: ignore + +from google.cloud.bigtable.row_merger import _RowMerger, _State from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 from google.cloud.bigtable_v2.types import data as data_v2_pb2 from google.cloud.bigtable.row import Cell, InvalidChunk, PartialRowData + # Some classes need to be re-exported here to keep backwards # compatibility. Those classes were moved to row_merger, but we dont want to # break enduser's imports. This hack, ensures they don't get marked as unused. _ = (Cell, InvalidChunk, PartialRowData) -class PartialCellData(object): - """Representation of partial cell in a Google Cloud Bigtable Table. - - These are expected to be updated directly from a - :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` - - :type row_key: bytes - :param row_key: The key for the row holding the (partial) cell. - - :type family_name: str - :param family_name: The family name of the (partial) cell. - - :type qualifier: bytes - :param qualifier: The column qualifier of the (partial) cell. - - :type timestamp_micros: int - :param timestamp_micros: The timestamp (in microsecods) of the - (partial) cell. - - :type labels: list of str - :param labels: labels assigned to the (partial) cell - - :type value: bytes - :param value: The (accumulated) value of the (partial) cell. - """ +class PartialCellData(object): # pragma: NO COVER + """This class is no longer used and will be removed in the future""" def __init__( self, row_key, family_name, qualifier, timestamp_micros, labels=(), value=b"" @@ -69,11 +49,6 @@ def __init__( self.value = value def append_value(self, value): - """Append bytes from a new chunk to value. - - :type value: bytes - :param value: bytes to append - """ self.value += value @@ -168,14 +143,7 @@ class PartialRowsData(object): def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): # Counter for rows returned to the user self._counter = 0 - # In-progress row, unset until first response, after commit/reset - self._row = None - # Last complete row, unset until first commit - self._previous_row = None - # In-progress cell, unset until first response, after completion - self._cell = None - # Last complete cell, unset until first completion, after new row - self._previous_cell = None + self._row_merger = _RowMerger() # May be cached from previous response self.last_scanned_row_key = None @@ -192,20 +160,35 @@ def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): self.response_iterator = read_method(request, timeout=self.retry._deadline + 1) self.rows = {} - self._state = self.STATE_NEW_ROW # Flag to stop iteration, for any reason not related to self.retry() self._cancelled = False @property - def state(self): - """State machine state. - - :rtype: str - :returns: name of state corresponding to current row / chunk - processing. + def state(self): # pragma: NO COVER + """ + DEPRECATED: this property is deprecated and will be removed in the + future. """ - return self.read_states[self._state] + warnings.warn( + "`PartialRowsData#state()` is deprecated and will be removed in the future", + DeprecationWarning, + stacklevel=2, + ) + + # Best effort: try to map internal RowMerger states to old strings for + # backwards compatibility + internal_state = self._row_merger.state + if internal_state == _State.ROW_START: + return self.NEW_ROW + # note: _State.CELL_START, _State.CELL_COMPLETE are transient states + # and will not be visible in between chunks + elif internal_state == _State.CELL_IN_PROGRESS: + return self.CELL_IN_PROGRESS + elif internal_state == _State.ROW_COMPLETE: + return self.NEW_ROW + else: + raise RuntimeError("unexpected internal state: " + self._) def cancel(self): """Cancels the iterator, closing the stream.""" @@ -241,6 +224,7 @@ def _on_error(self, exc): if self.last_scanned_row_key: retry_request = self._create_retry_request() + self._row_merger = _RowMerger(self._row_merger.last_seen_row_key) self.response_iterator = self.read_method(retry_request) def _read_next(self): @@ -266,125 +250,23 @@ def __iter__(self): try: response = self._read_next_response() except StopIteration: - if self.state != self.NEW_ROW: - raise ValueError("The row remains partial / is not committed.") + self._row_merger.finalize() break except InvalidRetryRequest: self._cancelled = True break - for chunk in response.chunks: + for row in self._row_merger.process_chunks(response): + self.last_scanned_row_key = self._row_merger.last_seen_row_key + self._counter += 1 + + yield row + if self._cancelled: break - self._process_chunk(chunk) - if chunk.commit_row: - self.last_scanned_row_key = self._previous_row.row_key - self._counter += 1 - yield self._previous_row - - resp_last_key = response.last_scanned_row_key - if resp_last_key and resp_last_key > self.last_scanned_row_key: - self.last_scanned_row_key = resp_last_key - - def _process_chunk(self, chunk): - if chunk.reset_row: - self._validate_chunk_reset_row(chunk) - self._row = None - self._cell = self._previous_cell = None - self._state = self.STATE_NEW_ROW - return - - self._update_cell(chunk) - - if self._row is None: - if ( - self._previous_row is not None - and self._cell.row_key <= self._previous_row.row_key - ): - raise InvalidChunk() - self._row = PartialRowData(self._cell.row_key) - - if chunk.value_size == 0: - self._state = self.STATE_ROW_IN_PROGRESS - self._save_current_cell() - else: - self._state = self.STATE_CELL_IN_PROGRESS - - if chunk.commit_row: - if chunk.value_size > 0: - raise InvalidChunk() - - self._previous_row = self._row - self._row = None - self._previous_cell = None - self._state = self.STATE_NEW_ROW - - def _update_cell(self, chunk): - if self._cell is None: - qualifier = None - if chunk.HasField("qualifier"): - qualifier = chunk.qualifier.value - - family = None - if chunk.HasField("family_name"): - family = chunk.family_name.value - - self._cell = PartialCellData( - chunk.row_key, - family, - qualifier, - chunk.timestamp_micros, - chunk.labels, - chunk.value, - ) - self._copy_from_previous(self._cell) - self._validate_cell_data_new_cell() - else: - self._cell.append_value(chunk.value) - - def _validate_cell_data_new_cell(self): - cell = self._cell - if not cell.row_key or not cell.family_name or cell.qualifier is None: - raise InvalidChunk() - - prev = self._previous_cell - if prev and prev.row_key != cell.row_key: - raise InvalidChunk() - - def _validate_chunk_reset_row(self, chunk): - # No reset for new row - _raise_if(self._state == self.STATE_NEW_ROW) - - # No reset with other keys - _raise_if(chunk.row_key) - _raise_if(chunk.HasField("family_name")) - _raise_if(chunk.HasField("qualifier")) - _raise_if(chunk.timestamp_micros) - _raise_if(chunk.labels) - _raise_if(chunk.value_size) - _raise_if(chunk.value) - _raise_if(chunk.commit_row) - - def _save_current_cell(self): - """Helper for :meth:`consume_next`.""" - row, cell = self._row, self._cell - family = row._cells.setdefault(cell.family_name, {}) - qualified = family.setdefault(cell.qualifier, []) - complete = Cell.from_pb(cell) - qualified.append(complete) - self._cell, self._previous_cell = None, cell - - def _copy_from_previous(self, cell): - """Helper for :meth:`consume_next`.""" - previous = self._previous_cell - if previous is not None: - if not cell.row_key: - cell.row_key = previous.row_key - if not cell.family_name: - cell.family_name = previous.family_name - # NOTE: ``cell.qualifier`` **can** be empty string. - if cell.qualifier is None: - cell.qualifier = previous.qualifier + # The last response might not have generated any rows, but it + # could've updated last_scanned_row_key + self.last_scanned_row_key = self._row_merger.last_seen_row_key class _ReadRowsRequestManager(object): @@ -494,9 +376,3 @@ def _start_key_set(row_range): def _end_key_set(row_range): """Helper for :meth:`_filter_row_ranges`""" return row_range.end_key_open or row_range.end_key_closed - - -def _raise_if(predicate, *args): - """Helper for validation methods.""" - if predicate: - raise InvalidChunk(*args) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_merger.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_merger.py new file mode 100644 index 000000000000..515b91df7ef2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_merger.py @@ -0,0 +1,250 @@ +from enum import Enum +from collections import OrderedDict +from google.cloud.bigtable.row import Cell, PartialRowData, InvalidChunk + +_MISSING_COLUMN_FAMILY = "Column family {} is not among the cells stored in this row." +_MISSING_COLUMN = ( + "Column {} is not among the cells stored in this row in the column family {}." +) +_MISSING_INDEX = ( + "Index {!r} is not valid for the cells stored in this row for column {} " + "in the column family {}. There are {} such cells." +) + + +class _State(Enum): + ROW_START = "ROW_START" + CELL_START = "CELL_START" + CELL_IN_PROGRESS = "CELL_IN_PROGRESS" + CELL_COMPLETE = "CELL_COMPLETE" + ROW_COMPLETE = "ROW_COMPLETE" + + +class _PartialRow(object): + __slots__ = [ + "row_key", + "cells", + "last_family", + "last_family_cells", + "last_qualifier", + "last_qualifier_cells", + "cell", + ] + + def __init__(self, row_key): + self.row_key = row_key + self.cells = OrderedDict() + + self.last_family = None + self.last_family_cells = OrderedDict() + self.last_qualifier = None + self.last_qualifier_cells = [] + + self.cell = None + + +class _PartialCell(object): + __slots__ = ["family", "qualifier", "timestamp", "labels", "value", "value_index"] + + def __init__(self): + self.family = None + self.qualifier = None + self.timestamp = None + self.labels = None + self.value = None + self.value_index = 0 + + +class _RowMerger(object): + """ + State machine to merge chunks from a response stream into logical rows. + + The implementation is a fairly linear state machine that is implemented as + a method for every state in the _State enum. In general the states flow + from top to bottom with some repetition. Each state handler will do some + sanity checks, update in progress data and set the next state. + + There can be multiple state transitions for each chunk, i.e. a single chunk + row will flow from ROW_START -> CELL_START -> CELL_COMPLETE -> ROW_COMPLETE + in a single iteration. + """ + + __slots__ = ["state", "last_seen_row_key", "row"] + + def __init__(self, last_seen_row=b""): + self.last_seen_row_key = last_seen_row + self.state = _State.ROW_START + self.row = None + + def process_chunks(self, response): + """ + Process the chunks in the given response and yield logical rows. + This class will maintain state across multiple response protos. + """ + if response.last_scanned_row_key: + if self.last_seen_row_key >= response.last_scanned_row_key: + raise InvalidChunk("Last scanned row key is out of order") + self.last_seen_row_key = response.last_scanned_row_key + + for chunk in response.chunks: + if chunk.reset_row: + self._handle_reset(chunk) + continue + + if self.state == _State.ROW_START: + self._handle_row_start(chunk) + + if self.state == _State.CELL_START: + self._handle_cell_start(chunk) + + if self.state == _State.CELL_IN_PROGRESS: + self._handle_cell_in_progress(chunk) + + if self.state == _State.CELL_COMPLETE: + self._handle_cell_complete(chunk) + + if self.state == _State.ROW_COMPLETE: + yield self._handle_row_complete(chunk) + elif chunk.commit_row: + raise InvalidChunk( + f"Chunk tried to commit row in wrong state (${self.state})" + ) + + def _handle_reset(self, chunk): + if self.state == _State.ROW_START: + raise InvalidChunk("Bare reset") + if chunk.row_key: + raise InvalidChunk("Reset chunk has a row key") + if chunk.HasField("family_name"): + raise InvalidChunk("Reset chunk has family_name") + if chunk.HasField("qualifier"): + raise InvalidChunk("Reset chunk has qualifier") + if chunk.timestamp_micros: + raise InvalidChunk("Reset chunk has a timestamp") + if chunk.labels: + raise InvalidChunk("Reset chunk has labels") + if chunk.value: + raise InvalidChunk("Reset chunk has a value") + + self.state = _State.ROW_START + self.row = None + + def _handle_row_start(self, chunk): + if not chunk.row_key: + raise InvalidChunk("New row is missing a row key") + if self.last_seen_row_key and self.last_seen_row_key >= chunk.row_key: + raise InvalidChunk("Out of order row keys") + + self.row = _PartialRow(chunk.row_key) + self.state = _State.CELL_START + + def _handle_cell_start(self, chunk): + # Ensure that all chunks after the first one either are missing a row + # key or the row is the same + if self.row.cells and chunk.row_key and chunk.row_key != self.row.row_key: + raise InvalidChunk("row key changed mid row") + + if not self.row.cell: + self.row.cell = _PartialCell() + + # Cells can inherit family/qualifier from previous cells + # However if the family changes, then qualifier must be specified as well + if chunk.HasField("family_name"): + self.row.cell.family = chunk.family_name.value + self.row.cell.qualifier = None + if not self.row.cell.family: + raise InvalidChunk("missing family for a new cell") + + if chunk.HasField("qualifier"): + self.row.cell.qualifier = chunk.qualifier.value + if self.row.cell.qualifier is None: + raise InvalidChunk("missing qualifier for a new cell") + + self.row.cell.timestamp = chunk.timestamp_micros + self.row.cell.labels = chunk.labels + + if chunk.value_size > 0: + # explicitly avoid pre-allocation as it seems that bytearray + # concatenation performs better than slice copies. + self.row.cell.value = bytearray() + self.state = _State.CELL_IN_PROGRESS + else: + self.row.cell.value = chunk.value + self.state = _State.CELL_COMPLETE + + def _handle_cell_in_progress(self, chunk): + # if this isn't the first cell chunk, make sure that everything except + # the value stayed constant. + if self.row.cell.value_index > 0: + if chunk.row_key: + raise InvalidChunk("found row key mid cell") + if chunk.HasField("family_name"): + raise InvalidChunk("In progress cell had a family name") + if chunk.HasField("qualifier"): + raise InvalidChunk("In progress cell had a qualifier") + if chunk.timestamp_micros: + raise InvalidChunk("In progress cell had a timestamp") + if chunk.labels: + raise InvalidChunk("In progress cell had labels") + + self.row.cell.value += chunk.value + self.row.cell.value_index += len(chunk.value) + + if chunk.value_size > 0: + self.state = _State.CELL_IN_PROGRESS + else: + self.row.cell.value = bytes(self.row.cell.value) + self.state = _State.CELL_COMPLETE + + def _handle_cell_complete(self, chunk): + # since we are guaranteed that all family & qualifier cells are + # contiguous, we can optimize away the dict lookup by caching the last + # family/qualifier and simply comparing and appending + family_changed = False + if self.row.last_family != self.row.cell.family: + family_changed = True + self.row.last_family = self.row.cell.family + self.row.cells[ + self.row.cell.family + ] = self.row.last_family_cells = OrderedDict() + + if family_changed or self.row.last_qualifier != self.row.cell.qualifier: + self.row.last_qualifier = self.row.cell.qualifier + self.row.last_family_cells[ + self.row.cell.qualifier + ] = self.row.last_qualifier_cells = [] + + self.row.last_qualifier_cells.append( + Cell( + self.row.cell.value, + self.row.cell.timestamp, + self.row.cell.labels, + ) + ) + + self.row.cell.timestamp = 0 + self.row.cell.value = None + self.row.cell.value_index = 0 + + if not chunk.commit_row: + self.state = _State.CELL_START + else: + self.state = _State.ROW_COMPLETE + + def _handle_row_complete(self, chunk): + new_row = PartialRowData(self.row.row_key) + new_row._cells = self.row.cells + + self.last_seen_row_key = new_row.row_key + self.row = None + self.state = _State.ROW_START + + return new_row + + def finalize(self): + """ + Must be called at the end of the stream to ensure there are no unmerged + rows. + """ + if self.row or self.state != _State.ROW_START: + raise ValueError("The row remains partial / is not committed.") diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 9175bf479ef9..382a81ef1ddd 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -630,78 +630,6 @@ def test_partial_rows_data_cancel_between_chunks(): assert list(yrd) == [] -# 'consume_next' tested via 'TestPartialRowsData_JSON_acceptance_tests' - - -def test_partial_rows_data__copy_from_previous_unset(): - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = _make_partial_rows_data(client._data_stub.read_rows, request) - cell = _PartialCellData() - yrd._copy_from_previous(cell) - assert cell.row_key == b"" - assert cell.family_name == "" - assert cell.qualifier is None - assert cell.timestamp_micros == 0 - assert cell.labels == [] - - -def test_partial_rows_data__copy_from_previous_blank(): - ROW_KEY = "RK" - FAMILY_NAME = "A" - QUALIFIER = b"C" - TIMESTAMP_MICROS = 100 - LABELS = ["L1", "L2"] - client = _Client() - client._data_stub = mock.MagicMock() - request = object() - yrd = _make_partial_rows_data(client._data_stub.ReadRows, request) - cell = _PartialCellData( - row_key=ROW_KEY, - family_name=FAMILY_NAME, - qualifier=QUALIFIER, - timestamp_micros=TIMESTAMP_MICROS, - labels=LABELS, - ) - yrd._previous_cell = _PartialCellData() - yrd._copy_from_previous(cell) - assert cell.row_key == ROW_KEY - assert cell.family_name == FAMILY_NAME - assert cell.qualifier == QUALIFIER - assert cell.timestamp_micros == TIMESTAMP_MICROS - assert cell.labels == LABELS - - -def test_partial_rows_data__copy_from_previous_filled(): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - - ROW_KEY = "RK" - FAMILY_NAME = "A" - QUALIFIER = b"C" - TIMESTAMP_MICROS = 100 - LABELS = ["L1", "L2"] - client = _Client() - data_api = mock.create_autospec(BigtableClient) - client._data_stub = data_api - request = object() - yrd = _make_partial_rows_data(client._data_stub.read_rows, request) - yrd._previous_cell = _PartialCellData( - row_key=ROW_KEY, - family_name=FAMILY_NAME, - qualifier=QUALIFIER, - timestamp_micros=TIMESTAMP_MICROS, - labels=LABELS, - ) - cell = _PartialCellData() - yrd._copy_from_previous(cell) - assert cell.row_key == ROW_KEY - assert cell.family_name == FAMILY_NAME - assert cell.qualifier == QUALIFIER - assert cell.timestamp_micros == 0 - assert cell.labels == [] - - def test_partial_rows_data_valid_last_scanned_row_key_on_start(): client = _Client() response = _ReadRowsResponseV2([], last_scanned_row_key=b"2.AFTER") @@ -732,38 +660,36 @@ def test_partial_rows_data_invalid_empty_chunk(): def test_partial_rows_data_state_cell_in_progress(): - from google.cloud.bigtable_v2.services.bigtable import BigtableClient - from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 - - LABELS = ["L1", "L2"] - - request = object() - client = _Client() - client._data_stub = mock.create_autospec(BigtableClient) - yrd = _make_partial_rows_data(client._data_stub.read_rows, request) - - chunk = _ReadRowsResponseCellChunkPB( - row_key=ROW_KEY, - family_name=FAMILY_NAME, - qualifier=QUALIFIER, - timestamp_micros=TIMESTAMP_MICROS, - value=VALUE, - labels=LABELS, + labels = ["L1", "L2"] + resp = _ReadRowsResponseV2( + [ + _ReadRowsResponseCellChunkPB( + row_key=ROW_KEY, + family_name=FAMILY_NAME, + qualifier=QUALIFIER, + timestamp_micros=TIMESTAMP_MICROS, + value=VALUE, + value_size=(2 * len(VALUE)), + labels=labels, + ), + _ReadRowsResponseCellChunkPB(value=VALUE, commit_row=True), + ] ) - # _update_cell expects to be called after the protoplus wrapper has been - # shucked - chunk = messages_v2_pb2.ReadRowsResponse.CellChunk.pb(chunk) - yrd._update_cell(chunk) - more_cell_data = _ReadRowsResponseCellChunkPB(value=VALUE) - yrd._update_cell(more_cell_data) + def fake_read(*args, **kwargs): + return iter([resp]) + + yrd = _make_partial_rows_data(fake_read, None) + yrd.consume_all() - assert yrd._cell.row_key == ROW_KEY - assert yrd._cell.family_name == FAMILY_NAME - assert yrd._cell.qualifier == QUALIFIER - assert yrd._cell.timestamp_micros == TIMESTAMP_MICROS - assert yrd._cell.labels == LABELS - assert yrd._cell.value == VALUE + VALUE + expected_row = _make_partial_row_data(ROW_KEY) + expected_row._cells = { + QUALIFIER: [ + _make_cell( + value=(VALUE + VALUE), timestamp_micros=TIMESTAMP_MICROS, labels=labels + ) + ] + } def test_partial_rows_data_yield_rows_data(): @@ -1215,19 +1141,6 @@ def next(self): __next__ = next -class _PartialCellData(object): - - row_key = b"" - family_name = "" - qualifier = None - timestamp_micros = 0 - last_scanned_row_key = "" - - def __init__(self, **kw): - self.labels = kw.pop("labels", []) - self.__dict__.update(kw) - - def _ReadRowsResponseV2(chunks, last_scanned_row_key=b""): from google.cloud.bigtable_v2.types import bigtable as messages_v2_pb2 diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_merger.py b/packages/google-cloud-bigtable/tests/unit/test_row_merger.py index f336a82ffee5..483c04536666 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_merger.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_merger.py @@ -7,6 +7,7 @@ from google.cloud.bigtable.row_data import PartialRowsData, PartialRowData, InvalidChunk from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse +from google.cloud.bigtable.row_merger import _RowMerger # TODO: autogenerate protos from @@ -76,3 +77,154 @@ def fake_read(*args, **kwargs): for expected, actual in zip_longest(test_case.results, actual_results): assert actual == expected + + +def test_out_of_order_rows(): + row_merger = _RowMerger(last_seen_row=b"z") + with pytest.raises(InvalidChunk): + list(row_merger.process_chunks(ReadRowsResponse(last_scanned_row_key=b"a"))) + + +def test_bare_reset(): + first_chunk = ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk( + row_key=b"a", family_name="f", qualifier=b"q", value=b"v" + ) + ) + with pytest.raises(InvalidChunk): + _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, row_key=b"a") + ), + ) + with pytest.raises(InvalidChunk): + _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, family_name="f") + ), + ) + with pytest.raises(InvalidChunk): + _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, qualifier=b"q") + ), + ) + with pytest.raises(InvalidChunk): + _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, timestamp_micros=1000) + ), + ) + with pytest.raises(InvalidChunk): + _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, labels=["a"]) + ), + ) + with pytest.raises(InvalidChunk): + _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, value=b"v") + ), + ) + + +def test_missing_family(): + with pytest.raises(InvalidChunk): + _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + qualifier=b"q", + timestamp_micros=1000, + value=b"v", + commit_row=True, + ) + ) + + +def test_mid_cell_row_key_change(): + with pytest.raises(InvalidChunk): + _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(row_key=b"b", value=b"v", commit_row=True), + ) + + +def test_mid_cell_family_change(): + with pytest.raises(InvalidChunk): + _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(family_name="f2", value=b"v", commit_row=True), + ) + + +def test_mid_cell_qualifier_change(): + with pytest.raises(InvalidChunk): + _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(qualifier=b"q2", value=b"v", commit_row=True), + ) + + +def test_mid_cell_timestamp_change(): + with pytest.raises(InvalidChunk): + _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + timestamp_micros=2000, value=b"v", commit_row=True + ), + ) + + +def test_mid_cell_labels_change(): + with pytest.raises(InvalidChunk): + _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(labels=["b"], value=b"v", commit_row=True), + ) + + +def _process_chunks(*chunks): + req = ReadRowsResponse.pb(ReadRowsResponse(chunks=chunks)) + return list(_RowMerger().process_chunks(req)) From 97df5b723880ac687dc92b6c497e0f6aa5493be6 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 17 Aug 2022 13:50:26 -0700 Subject: [PATCH 647/892] chore(main): release 2.11.3 (#638) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 4b635e9c237c..5783517a78b6 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.11.3](https://github.com/googleapis/python-bigtable/compare/v2.11.2...v2.11.3) (2022-08-17) + + +### Performance Improvements + +* optimize row merging ([#628](https://github.com/googleapis/python-bigtable/issues/628)) ([c71ec70](https://github.com/googleapis/python-bigtable/commit/c71ec70e55f6e236e46127870a9ed4717eee5da5)) + ## [2.11.2](https://github.com/googleapis/python-bigtable/compare/v2.11.1...v2.11.2) (2022-08-11) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index af5b7359bba1..2c98c154b8e8 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.11.2" +version = "2.11.3" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 7e717a42176640d74222f4206643fe4728b17d7f Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Fri, 19 Aug 2022 19:24:57 +0200 Subject: [PATCH 648/892] chore(deps): update dependency google-cloud-bigtable to v2.11.3 (#640) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 51505470df1e..d8ad5b20bef3 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.40.0 -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 8ef6b6a24300..9e2138c78312 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 361a63138825..5a6a44405c57 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 backoff==2.1.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index dc8a48fb24f0..1b63203a9225 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 google-cloud-monitoring==2.11.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 0b646227b112..101c6d532dac 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index a916dd4e508f..bcbe5975a1e2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index a916dd4e508f..bcbe5975a1e2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index a916dd4e508f..bcbe5975a1e2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index d6cfeaa9d908..c6a71aed23e7 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.1 \ No newline at end of file +google-cloud-bigtable==2.11.3 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 0b646227b112..101c6d532dac 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.1 +google-cloud-bigtable==2.11.3 From 1d04a186fabdad7b3b873af54aa5a8412efe265e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 24 Aug 2022 21:39:16 +0200 Subject: [PATCH 649/892] chore(deps): update dependency apache-beam to v2.41.0 (#652) * chore(deps): update dependency apache-beam to v2.41.0 * fix flaky test * fix flaky test Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- .../samples/metricscaler/metricscaler.py | 4 ++-- .../samples/metricscaler/metricscaler_test.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index d8ad5b20bef3..f6c955f47355 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.40.0 +apache-beam==2.41.0 google-cloud-bigtable==2.11.3 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py index 3ffa95a0002d..037df7f2109f 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -123,7 +123,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): new_node_count = min(current_node_count + size_change_step, max_node_count) cluster.serve_nodes = new_node_count operation = cluster.update() - response = operation.result(60) + response = operation.result(240) logger.info( "Scaled up from {} to {} nodes for {}.".format( current_node_count, new_node_count, response.name @@ -134,7 +134,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): new_node_count = max(current_node_count - size_change_step, min_node_count) cluster.serve_nodes = new_node_count operation = cluster.update() - response = operation.result(60) + response = operation.result(240) logger.info( "Scaled down from {} to {} nodes for {}.".format( current_node_count, new_node_count, response.name diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 52a2498cc343..06b8fabe1061 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -78,7 +78,7 @@ def instance(): default_storage_type=storage_type, ) operation = instance.create(clusters=[cluster]) - response = operation.result(60) + response = operation.result(240) print(f"Successfully created {response.name}") # Eventual consistency check @@ -108,7 +108,7 @@ def dev_instance(): cluster_id, location_id=BIGTABLE_ZONE, default_storage_type=storage_type ) operation = instance.create(clusters=[cluster]) - response = operation.result(60) + response = operation.result(240) print(f"Successfully created {response.name}") # Eventual consistency check From 486b7724e3ec4c4005f6292c90e123fe122e64a9 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 26 Aug 2022 10:29:09 -0400 Subject: [PATCH 650/892] chore: remove 'pip install' statements from python_library templates [autoapprove] (#655) Source-Link: https://github.com/googleapis/synthtool/commit/48263378ad6010ec2fc4d480af7b5d08170338c8 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:60a63eddf86c87395b4bb394fdddfe30f84a7726ee8fe0b758ea132c2106ac75 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/publish-docs.sh | 4 +- .../google-cloud-bigtable/.kokoro/release.sh | 5 +- .../.kokoro/requirements.in | 8 + .../.kokoro/requirements.txt | 464 ++++++++++++++++++ packages/google-cloud-bigtable/renovate.json | 2 +- 6 files changed, 477 insertions(+), 10 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/requirements.in create mode 100644 packages/google-cloud-bigtable/.kokoro/requirements.txt diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index c701359fc58c..9ac200ab34c6 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:c6c965a4bf40c19011b11f87dbc801a66d3a23fbc6704102be064ef31c51f1c3 -# created: 2022-08-09T15:58:56.463048506Z + digest: sha256:60a63eddf86c87395b4bb394fdddfe30f84a7726ee8fe0b758ea132c2106ac75 +# created: 2022-08-24T19:47:37.288818056Z diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 8acb14e802b0..1c4d62370042 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -21,14 +21,12 @@ export PYTHONUNBUFFERED=1 export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3 -m pip install --user --upgrade --quiet nox +python3 -m pip install --require-hashes -r .kokoro/requirements.txt python3 -m nox --version # build docs nox -s docs -python3 -m pip install --user gcp-docuploader - # create metadata python3 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index f0cb9d5db53f..253a4add83a0 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -16,12 +16,9 @@ set -eo pipefail # Start the releasetool reporter -python3 -m pip install gcp-releasetool +python3 -m pip install --require-hashes -r .kokoro/requirements.txt python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script -# Ensure that we have the latest versions of Twine, Wheel, and Setuptools. -python3 -m pip install --upgrade twine wheel setuptools - # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.in b/packages/google-cloud-bigtable/.kokoro/requirements.in new file mode 100644 index 000000000000..7718391a34d7 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/requirements.in @@ -0,0 +1,8 @@ +gcp-docuploader +gcp-releasetool +importlib-metadata +typing-extensions +twine +wheel +setuptools +nox \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt new file mode 100644 index 000000000000..c4b824f247e3 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -0,0 +1,464 @@ +# +# This file is autogenerated by pip-compile with python 3.10 +# To update, run: +# +# pip-compile --allow-unsafe --generate-hashes requirements.in +# +argcomplete==2.0.0 \ + --hash=sha256:6372ad78c89d662035101418ae253668445b391755cfe94ea52f1b9d22425b20 \ + --hash=sha256:cffa11ea77999bb0dd27bb25ff6dc142a6796142f68d45b1a26b11f58724561e + # via nox +attrs==22.1.0 \ + --hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \ + --hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c + # via gcp-releasetool +bleach==5.0.1 \ + --hash=sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a \ + --hash=sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c + # via readme-renderer +cachetools==5.2.0 \ + --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ + --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db + # via google-auth +certifi==2022.6.15 \ + --hash=sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d \ + --hash=sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412 + # via requests +cffi==1.15.1 \ + --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ + --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ + --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ + --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ + --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ + --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ + --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ + --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ + --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ + --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ + --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ + --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ + --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ + --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ + --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ + --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ + --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ + --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ + --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ + --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ + --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ + --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ + --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ + --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ + --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ + --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ + --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ + --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ + --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ + --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ + --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ + --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ + --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ + --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ + --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ + --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ + --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ + --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ + --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ + --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ + --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ + --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ + --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ + --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ + --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ + --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ + --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ + --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ + --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ + --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ + --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ + --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ + --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ + --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ + --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ + --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ + --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ + --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ + --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ + --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ + --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ + --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ + --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ + --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 + # via cryptography +charset-normalizer==2.1.1 \ + --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ + --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f + # via requests +click==8.0.4 \ + --hash=sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1 \ + --hash=sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb + # via + # gcp-docuploader + # gcp-releasetool +colorlog==6.6.0 \ + --hash=sha256:344f73204009e4c83c5b6beb00b3c45dc70fcdae3c80db919e0a4171d006fde8 \ + --hash=sha256:351c51e866c86c3217f08e4b067a7974a678be78f07f85fc2d55b8babde6d94e + # via + # gcp-docuploader + # nox +commonmark==0.9.1 \ + --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ + --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 + # via rich +cryptography==37.0.4 \ + --hash=sha256:190f82f3e87033821828f60787cfa42bff98404483577b591429ed99bed39d59 \ + --hash=sha256:2be53f9f5505673eeda5f2736bea736c40f051a739bfae2f92d18aed1eb54596 \ + --hash=sha256:30788e070800fec9bbcf9faa71ea6d8068f5136f60029759fd8c3efec3c9dcb3 \ + --hash=sha256:3d41b965b3380f10e4611dbae366f6dc3cefc7c9ac4e8842a806b9672ae9add5 \ + --hash=sha256:4c590ec31550a724ef893c50f9a97a0c14e9c851c85621c5650d699a7b88f7ab \ + --hash=sha256:549153378611c0cca1042f20fd9c5030d37a72f634c9326e225c9f666d472884 \ + --hash=sha256:63f9c17c0e2474ccbebc9302ce2f07b55b3b3fcb211ded18a42d5764f5c10a82 \ + --hash=sha256:6bc95ed67b6741b2607298f9ea4932ff157e570ef456ef7ff0ef4884a134cc4b \ + --hash=sha256:7099a8d55cd49b737ffc99c17de504f2257e3787e02abe6d1a6d136574873441 \ + --hash=sha256:75976c217f10d48a8b5a8de3d70c454c249e4b91851f6838a4e48b8f41eb71aa \ + --hash=sha256:7bc997818309f56c0038a33b8da5c0bfbb3f1f067f315f9abd6fc07ad359398d \ + --hash=sha256:80f49023dd13ba35f7c34072fa17f604d2f19bf0989f292cedf7ab5770b87a0b \ + --hash=sha256:91ce48d35f4e3d3f1d83e29ef4a9267246e6a3be51864a5b7d2247d5086fa99a \ + --hash=sha256:a958c52505c8adf0d3822703078580d2c0456dd1d27fabfb6f76fe63d2971cd6 \ + --hash=sha256:b62439d7cd1222f3da897e9a9fe53bbf5c104fff4d60893ad1355d4c14a24157 \ + --hash=sha256:b7f8dd0d4c1f21759695c05a5ec8536c12f31611541f8904083f3dc582604280 \ + --hash=sha256:d204833f3c8a33bbe11eda63a54b1aad7aa7456ed769a982f21ec599ba5fa282 \ + --hash=sha256:e007f052ed10cc316df59bc90fbb7ff7950d7e2919c9757fd42a2b8ecf8a5f67 \ + --hash=sha256:f2dcb0b3b63afb6df7fd94ec6fbddac81b5492513f7b0436210d390c14d46ee8 \ + --hash=sha256:f721d1885ecae9078c3f6bbe8a88bc0786b6e749bf32ccec1ef2b18929a05046 \ + --hash=sha256:f7a6de3e98771e183645181b3627e2563dcde3ce94a9e42a3f427d2255190327 \ + --hash=sha256:f8c0a6e9e1dd3eb0414ba320f85da6b0dcbd543126e30fcc546e7372a7fbf3b9 + # via + # gcp-releasetool + # secretstorage +distlib==0.3.5 \ + --hash=sha256:a7f75737c70be3b25e2bee06288cec4e4c221de18455b2dd037fe2a795cab2fe \ + --hash=sha256:b710088c59f06338ca514800ad795a132da19fda270e3ce4affc74abf955a26c + # via virtualenv +docutils==0.19 \ + --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ + --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc + # via readme-renderer +filelock==3.8.0 \ + --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \ + --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4 + # via virtualenv +gcp-docuploader==0.6.3 \ + --hash=sha256:ba8c9d76b3bbac54b0311c503a373b00edc2dc02d6d54ea9507045adb8e870f7 \ + --hash=sha256:c0f5aaa82ce1854a386197e4e359b120ad6d4e57ae2c812fce42219a3288026b + # via -r requirements.in +gcp-releasetool==1.8.6 \ + --hash=sha256:42e51ab8e2e789bc8e22a03c09352962cd3452951c801a2230d564816630304a \ + --hash=sha256:a3518b79d1b243c494eac392a01c7fd65187fd6d52602dcab9b529bc934d4da1 + # via -r requirements.in +google-api-core==2.8.2 \ + --hash=sha256:06f7244c640322b508b125903bb5701bebabce8832f85aba9335ec00b3d02edc \ + --hash=sha256:93c6a91ccac79079ac6bbf8b74ee75db970cc899278b97d53bc012f35908cf50 + # via + # google-cloud-core + # google-cloud-storage +google-auth==2.11.0 \ + --hash=sha256:be62acaae38d0049c21ca90f27a23847245c9f161ff54ede13af2cb6afecbac9 \ + --hash=sha256:ed65ecf9f681832298e29328e1ef0a3676e3732b2e56f41532d45f70a22de0fb + # via + # gcp-releasetool + # google-api-core + # google-cloud-core + # google-cloud-storage +google-cloud-core==2.3.2 \ + --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ + --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a + # via google-cloud-storage +google-cloud-storage==2.5.0 \ + --hash=sha256:19a26c66c317ce542cea0830b7e787e8dac2588b6bfa4d3fd3b871ba16305ab0 \ + --hash=sha256:382f34b91de2212e3c2e7b40ec079d27ee2e3dbbae99b75b1bcd8c63063ce235 + # via gcp-docuploader +google-crc32c==1.3.0 \ + --hash=sha256:04e7c220798a72fd0f08242bc8d7a05986b2a08a0573396187fd32c1dcdd58b3 \ + --hash=sha256:05340b60bf05b574159e9bd940152a47d38af3fb43803ffe71f11d704b7696a6 \ + --hash=sha256:12674a4c3b56b706153a358eaa1018c4137a5a04635b92b4652440d3d7386206 \ + --hash=sha256:127f9cc3ac41b6a859bd9dc4321097b1a4f6aa7fdf71b4f9227b9e3ebffb4422 \ + --hash=sha256:13af315c3a0eec8bb8b8d80b8b128cb3fcd17d7e4edafc39647846345a3f003a \ + --hash=sha256:1926fd8de0acb9d15ee757175ce7242e235482a783cd4ec711cc999fc103c24e \ + --hash=sha256:226f2f9b8e128a6ca6a9af9b9e8384f7b53a801907425c9a292553a3a7218ce0 \ + --hash=sha256:276de6273eb074a35bc598f8efbc00c7869c5cf2e29c90748fccc8c898c244df \ + --hash=sha256:318f73f5484b5671f0c7f5f63741ab020a599504ed81d209b5c7129ee4667407 \ + --hash=sha256:3bbce1be3687bbfebe29abdb7631b83e6b25da3f4e1856a1611eb21854b689ea \ + --hash=sha256:42ae4781333e331a1743445931b08ebdad73e188fd554259e772556fc4937c48 \ + --hash=sha256:58be56ae0529c664cc04a9c76e68bb92b091e0194d6e3c50bea7e0f266f73713 \ + --hash=sha256:5da2c81575cc3ccf05d9830f9e8d3c70954819ca9a63828210498c0774fda1a3 \ + --hash=sha256:6311853aa2bba4064d0c28ca54e7b50c4d48e3de04f6770f6c60ebda1e975267 \ + --hash=sha256:650e2917660e696041ab3dcd7abac160b4121cd9a484c08406f24c5964099829 \ + --hash=sha256:6a4db36f9721fdf391646685ecffa404eb986cbe007a3289499020daf72e88a2 \ + --hash=sha256:779cbf1ce375b96111db98fca913c1f5ec11b1d870e529b1dc7354b2681a8c3a \ + --hash=sha256:7f6fe42536d9dcd3e2ffb9d3053f5d05221ae3bbcefbe472bdf2c71c793e3183 \ + --hash=sha256:891f712ce54e0d631370e1f4997b3f182f3368179198efc30d477c75d1f44942 \ + --hash=sha256:95c68a4b9b7828ba0428f8f7e3109c5d476ca44996ed9a5f8aac6269296e2d59 \ + --hash=sha256:96a8918a78d5d64e07c8ea4ed2bc44354e3f93f46a4866a40e8db934e4c0d74b \ + --hash=sha256:9c3cf890c3c0ecfe1510a452a165431b5831e24160c5fcf2071f0f85ca5a47cd \ + --hash=sha256:9f58099ad7affc0754ae42e6d87443299f15d739b0ce03c76f515153a5cda06c \ + --hash=sha256:a0b9e622c3b2b8d0ce32f77eba617ab0d6768b82836391e4f8f9e2074582bf02 \ + --hash=sha256:a7f9cbea4245ee36190f85fe1814e2d7b1e5f2186381b082f5d59f99b7f11328 \ + --hash=sha256:bab4aebd525218bab4ee615786c4581952eadc16b1ff031813a2fd51f0cc7b08 \ + --hash=sha256:c124b8c8779bf2d35d9b721e52d4adb41c9bfbde45e6a3f25f0820caa9aba73f \ + --hash=sha256:c9da0a39b53d2fab3e5467329ed50e951eb91386e9d0d5b12daf593973c3b168 \ + --hash=sha256:ca60076c388728d3b6ac3846842474f4250c91efbfe5afa872d3ffd69dd4b318 \ + --hash=sha256:cb6994fff247987c66a8a4e550ef374671c2b82e3c0d2115e689d21e511a652d \ + --hash=sha256:d1c1d6236feab51200272d79b3d3e0f12cf2cbb12b208c835b175a21efdb0a73 \ + --hash=sha256:dd7760a88a8d3d705ff562aa93f8445ead54f58fd482e4f9e2bafb7e177375d4 \ + --hash=sha256:dda4d8a3bb0b50f540f6ff4b6033f3a74e8bf0bd5320b70fab2c03e512a62812 \ + --hash=sha256:e0f1ff55dde0ebcfbef027edc21f71c205845585fffe30d4ec4979416613e9b3 \ + --hash=sha256:e7a539b9be7b9c00f11ef16b55486141bc2cdb0c54762f84e3c6fc091917436d \ + --hash=sha256:eb0b14523758e37802f27b7f8cd973f5f3d33be7613952c0df904b68c4842f0e \ + --hash=sha256:ed447680ff21c14aaceb6a9f99a5f639f583ccfe4ce1a5e1d48eb41c3d6b3217 \ + --hash=sha256:f52a4ad2568314ee713715b1e2d79ab55fab11e8b304fd1462ff5cccf4264b3e \ + --hash=sha256:fbd60c6aaa07c31d7754edbc2334aef50601b7f1ada67a96eb1eb57c7c72378f \ + --hash=sha256:fc28e0db232c62ca0c3600884933178f0825c99be4474cdd645e378a10588125 \ + --hash=sha256:fe31de3002e7b08eb20823b3735b97c86c5926dd0581c7710a680b418a8709d4 \ + --hash=sha256:fec221a051150eeddfdfcff162e6db92c65ecf46cb0f7bb1bf812a1520ec026b \ + --hash=sha256:ff71073ebf0e42258a42a0b34f2c09ec384977e7f6808999102eedd5b49920e3 + # via google-resumable-media +google-resumable-media==2.3.3 \ + --hash=sha256:27c52620bd364d1c8116eaac4ea2afcbfb81ae9139fb3199652fcac1724bfb6c \ + --hash=sha256:5b52774ea7a829a8cdaa8bd2d4c3d4bc660c91b30857ab2668d0eb830f4ea8c5 + # via google-cloud-storage +googleapis-common-protos==1.56.4 \ + --hash=sha256:8eb2cbc91b69feaf23e32452a7ae60e791e09967d81d4fcc7fc388182d1bd394 \ + --hash=sha256:c25873c47279387cfdcbdafa36149887901d36202cb645a0e4f29686bf6e4417 + # via google-api-core +idna==3.3 \ + --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \ + --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d + # via requests +importlib-metadata==4.12.0 \ + --hash=sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670 \ + --hash=sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23 + # via + # -r requirements.in + # twine +jeepney==0.8.0 \ + --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ + --hash=sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755 + # via + # keyring + # secretstorage +jinja2==3.1.2 \ + --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ + --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 + # via gcp-releasetool +keyring==23.8.2 \ + --hash=sha256:0d9973f8891850f1ade5f26aafd06bb16865fbbae3fc56b0defb6a14a2624003 \ + --hash=sha256:10d2a8639663fe2090705a00b8c47c687cacdf97598ea9c11456679fa974473a + # via + # gcp-releasetool + # twine +markupsafe==2.1.1 \ + --hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \ + --hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \ + --hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \ + --hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \ + --hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \ + --hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \ + --hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \ + --hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \ + --hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \ + --hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \ + --hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \ + --hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \ + --hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \ + --hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \ + --hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \ + --hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \ + --hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \ + --hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b \ + --hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \ + --hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \ + --hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \ + --hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \ + --hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \ + --hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \ + --hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \ + --hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \ + --hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \ + --hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \ + --hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \ + --hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \ + --hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \ + --hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \ + --hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \ + --hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \ + --hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \ + --hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \ + --hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \ + --hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \ + --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ + --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 + # via jinja2 +nox==2022.8.7 \ + --hash=sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b \ + --hash=sha256:96cca88779e08282a699d672258ec01eb7c792d35bbbf538c723172bce23212c + # via -r requirements.in +packaging==21.3 \ + --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ + --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 + # via + # gcp-releasetool + # nox +pkginfo==1.8.3 \ + --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \ + --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c + # via twine +platformdirs==2.5.2 \ + --hash=sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788 \ + --hash=sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19 + # via virtualenv +protobuf==3.20.1 \ + --hash=sha256:06059eb6953ff01e56a25cd02cca1a9649a75a7e65397b5b9b4e929ed71d10cf \ + --hash=sha256:097c5d8a9808302fb0da7e20edf0b8d4703274d140fd25c5edabddcde43e081f \ + --hash=sha256:284f86a6207c897542d7e956eb243a36bb8f9564c1742b253462386e96c6b78f \ + --hash=sha256:32ca378605b41fd180dfe4e14d3226386d8d1b002ab31c969c366549e66a2bb7 \ + --hash=sha256:3cc797c9d15d7689ed507b165cd05913acb992d78b379f6014e013f9ecb20996 \ + --hash=sha256:62f1b5c4cd6c5402b4e2d63804ba49a327e0c386c99b1675c8a0fefda23b2067 \ + --hash=sha256:69ccfdf3657ba59569c64295b7d51325f91af586f8d5793b734260dfe2e94e2c \ + --hash=sha256:6f50601512a3d23625d8a85b1638d914a0970f17920ff39cec63aaef80a93fb7 \ + --hash=sha256:7403941f6d0992d40161aa8bb23e12575637008a5a02283a930addc0508982f9 \ + --hash=sha256:755f3aee41354ae395e104d62119cb223339a8f3276a0cd009ffabfcdd46bb0c \ + --hash=sha256:77053d28427a29987ca9caf7b72ccafee011257561259faba8dd308fda9a8739 \ + --hash=sha256:7e371f10abe57cee5021797126c93479f59fccc9693dafd6bd5633ab67808a91 \ + --hash=sha256:9016d01c91e8e625141d24ec1b20fed584703e527d28512aa8c8707f105a683c \ + --hash=sha256:9be73ad47579abc26c12024239d3540e6b765182a91dbc88e23658ab71767153 \ + --hash=sha256:adc31566d027f45efe3f44eeb5b1f329da43891634d61c75a5944e9be6dd42c9 \ + --hash=sha256:adfc6cf69c7f8c50fd24c793964eef18f0ac321315439d94945820612849c388 \ + --hash=sha256:af0ebadc74e281a517141daad9d0f2c5d93ab78e9d455113719a45a49da9db4e \ + --hash=sha256:cb29edb9eab15742d791e1025dd7b6a8f6fcb53802ad2f6e3adcb102051063ab \ + --hash=sha256:cd68be2559e2a3b84f517fb029ee611546f7812b1fdd0aa2ecc9bc6ec0e4fdde \ + --hash=sha256:cdee09140e1cd184ba9324ec1df410e7147242b94b5f8b0c64fc89e38a8ba531 \ + --hash=sha256:db977c4ca738dd9ce508557d4fce0f5aebd105e158c725beec86feb1f6bc20d8 \ + --hash=sha256:dd5789b2948ca702c17027c84c2accb552fc30f4622a98ab5c51fcfe8c50d3e7 \ + --hash=sha256:e250a42f15bf9d5b09fe1b293bdba2801cd520a9f5ea2d7fb7536d4441811d20 \ + --hash=sha256:ff8d8fa42675249bb456f5db06c00de6c2f4c27a065955917b28c4f15978b9c3 + # via + # gcp-docuploader + # gcp-releasetool + # google-api-core +py==1.11.0 \ + --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ + --hash=sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378 + # via nox +pyasn1==0.4.8 \ + --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ + --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.2.8 \ + --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \ + --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74 + # via google-auth +pycparser==2.21 \ + --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ + --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 + # via cffi +pygments==2.13.0 \ + --hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \ + --hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42 + # via + # readme-renderer + # rich +pyjwt==2.4.0 \ + --hash=sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf \ + --hash=sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba + # via gcp-releasetool +pyparsing==3.0.9 \ + --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ + --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc + # via packaging +pyperclip==1.8.2 \ + --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57 + # via gcp-releasetool +python-dateutil==2.8.2 \ + --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ + --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 + # via gcp-releasetool +readme-renderer==37.0 \ + --hash=sha256:07b7ea234e03e58f77cc222e206e6abb8f4c0435becce5104794ee591f9301c5 \ + --hash=sha256:9fa416704703e509eeb900696751c908ddeb2011319d93700d8f18baff887a69 + # via twine +requests==2.28.1 \ + --hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \ + --hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349 + # via + # gcp-releasetool + # google-api-core + # google-cloud-storage + # requests-toolbelt + # twine +requests-toolbelt==0.9.1 \ + --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \ + --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 + # via twine +rfc3986==2.0.0 \ + --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ + --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c + # via twine +rich==12.5.1 \ + --hash=sha256:2eb4e6894cde1e017976d2975ac210ef515d7548bc595ba20e195fb9628acdeb \ + --hash=sha256:63a5c5ce3673d3d5fbbf23cd87e11ab84b6b451436f1b7f19ec54b6bc36ed7ca + # via twine +rsa==4.9 \ + --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ + --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 + # via google-auth +secretstorage==3.3.3 \ + --hash=sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77 \ + --hash=sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99 + # via keyring +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # bleach + # gcp-docuploader + # google-auth + # python-dateutil +twine==4.0.1 \ + --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \ + --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0 + # via -r requirements.in +typing-extensions==4.3.0 \ + --hash=sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02 \ + --hash=sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6 + # via -r requirements.in +urllib3==1.26.12 \ + --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ + --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997 + # via + # requests + # twine +virtualenv==20.16.3 \ + --hash=sha256:4193b7bc8a6cd23e4eb251ac64f29b4398ab2c233531e66e40b19a6b7b0d30c1 \ + --hash=sha256:d86ea0bb50e06252d79e6c241507cb904fcd66090c3271381372d6221a3970f9 + # via nox +webencodings==0.5.1 \ + --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ + --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 + # via bleach +wheel==0.37.1 \ + --hash=sha256:4bdcd7d840138086126cd09254dc6195fb4fc6f01c050a1d7236f2630db1d22a \ + --hash=sha256:e9a504e793efbca1b8e0e9cb979a249cf4a0a7b5b8c9e8b65a5e39d49529c1c4 + # via -r requirements.in +zipp==3.8.1 \ + --hash=sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2 \ + --hash=sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009 + # via importlib-metadata + +# The following packages are considered to be unsafe in a requirements file: +setuptools==65.2.0 \ + --hash=sha256:7f4bc85450898a09f76ebf28b72fa25bc7111f6c7d665d514a60bba9c75ef2a9 \ + --hash=sha256:a3ca5857c89f82f5c9410e8508cb32f4872a3bafd4aa7ae122a24ca33bccc750 + # via -r requirements.in diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index c21036d385e5..566a70f3cc3c 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -5,7 +5,7 @@ ":preserveSemverRanges", ":disableDependencyDashboard" ], - "ignorePaths": [".pre-commit-config.yaml"], + "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt"], "pip_requirements": { "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"] } From 7f1706c78cd56e0fde555da8631132738b6ec7f9 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 6 Sep 2022 15:04:48 -0400 Subject: [PATCH 651/892] ci(python): fix path to requirements.txt in release script (#659) * ci(python): fix path to requirements.txt in release script Source-Link: https://github.com/googleapis/synthtool/commit/fdba3ed145bdb2f4f3eff434d4284b1d03b80d34 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:1f0dbd02745fb7cf255563dab5968345989308544e52b7f460deadd5e78e63b0 * Increase the timeouts. * increase timeout * increase timeout * increase timeouts * increase timeouts * increase timeouts * increase timeouts * increase timeouts * increase timeouts Co-authored-by: Owl Bot Co-authored-by: Mariatta Wijaya Co-authored-by: Anthonios Partheniou --- .../.github/.OwlBot.lock.yaml | 3 +- .../google-cloud-bigtable/.kokoro/release.sh | 2 +- .../.kokoro/requirements.txt | 30 +++++++-------- packages/google-cloud-bigtable/noxfile.py | 7 +++- .../samples/instanceadmin/instanceadmin.py | 4 +- .../samples/metricscaler/metricscaler_test.py | 2 +- .../tests/system/conftest.py | 4 +- .../tests/system/test_instance_admin.py | 38 +++++++++---------- .../tests/system/test_table_admin.py | 6 +-- 9 files changed, 49 insertions(+), 47 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 9ac200ab34c6..0d9eb2af9352 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:60a63eddf86c87395b4bb394fdddfe30f84a7726ee8fe0b758ea132c2106ac75 -# created: 2022-08-24T19:47:37.288818056Z + digest: sha256:1f0dbd02745fb7cf255563dab5968345989308544e52b7f460deadd5e78e63b0 diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index 253a4add83a0..6b594c813d66 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -16,7 +16,7 @@ set -eo pipefail # Start the releasetool reporter -python3 -m pip install --require-hashes -r .kokoro/requirements.txt +python3 -m pip install --require-hashes -r github/python-bigtable/.kokoro/requirements.txt python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script # Disable buffering, so that the logs stream through. diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index c4b824f247e3..92b2f727e777 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -100,9 +100,9 @@ click==8.0.4 \ # via # gcp-docuploader # gcp-releasetool -colorlog==6.6.0 \ - --hash=sha256:344f73204009e4c83c5b6beb00b3c45dc70fcdae3c80db919e0a4171d006fde8 \ - --hash=sha256:351c51e866c86c3217f08e4b067a7974a678be78f07f85fc2d55b8babde6d94e +colorlog==6.7.0 \ + --hash=sha256:0d33ca236784a1ba3ff9c532d4964126d8a2c44f1f0cb1d2b0728196f512f662 \ + --hash=sha256:bd94bd21c1e13fac7bd3153f4bc3a7dc0eb0974b8bc2fdf1a989e474f6e582e5 # via # gcp-docuploader # nox @@ -136,9 +136,9 @@ cryptography==37.0.4 \ # via # gcp-releasetool # secretstorage -distlib==0.3.5 \ - --hash=sha256:a7f75737c70be3b25e2bee06288cec4e4c221de18455b2dd037fe2a795cab2fe \ - --hash=sha256:b710088c59f06338ca514800ad795a132da19fda270e3ce4affc74abf955a26c +distlib==0.3.6 \ + --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \ + --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e # via virtualenv docutils==0.19 \ --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ @@ -152,9 +152,9 @@ gcp-docuploader==0.6.3 \ --hash=sha256:ba8c9d76b3bbac54b0311c503a373b00edc2dc02d6d54ea9507045adb8e870f7 \ --hash=sha256:c0f5aaa82ce1854a386197e4e359b120ad6d4e57ae2c812fce42219a3288026b # via -r requirements.in -gcp-releasetool==1.8.6 \ - --hash=sha256:42e51ab8e2e789bc8e22a03c09352962cd3452951c801a2230d564816630304a \ - --hash=sha256:a3518b79d1b243c494eac392a01c7fd65187fd6d52602dcab9b529bc934d4da1 +gcp-releasetool==1.8.7 \ + --hash=sha256:3d2a67c9db39322194afb3b427e9cb0476ce8f2a04033695f0aeb63979fc2b37 \ + --hash=sha256:5e4d28f66e90780d77f3ecf1e9155852b0c3b13cbccb08ab07e66b2357c8da8d # via -r requirements.in google-api-core==2.8.2 \ --hash=sha256:06f7244c640322b508b125903bb5701bebabce8832f85aba9335ec00b3d02edc \ @@ -251,9 +251,9 @@ jinja2==3.1.2 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 # via gcp-releasetool -keyring==23.8.2 \ - --hash=sha256:0d9973f8891850f1ade5f26aafd06bb16865fbbae3fc56b0defb6a14a2624003 \ - --hash=sha256:10d2a8639663fe2090705a00b8c47c687cacdf97598ea9c11456679fa974473a +keyring==23.9.0 \ + --hash=sha256:4c32a31174faaee48f43a7e2c7e9c3216ec5e95acf22a2bebfb4a1d05056ee44 \ + --hash=sha256:98f060ec95ada2ab910c195a2d4317be6ef87936a766b239c46aa3c7aac4f0db # via # gcp-releasetool # twine @@ -440,9 +440,9 @@ urllib3==1.26.12 \ # via # requests # twine -virtualenv==20.16.3 \ - --hash=sha256:4193b7bc8a6cd23e4eb251ac64f29b4398ab2c233531e66e40b19a6b7b0d30c1 \ - --hash=sha256:d86ea0bb50e06252d79e6c241507cb904fcd66090c3271381372d6221a3970f9 +virtualenv==20.16.4 \ + --hash=sha256:014f766e4134d0008dcaa1f95bafa0fb0f575795d07cae50b1bee514185d6782 \ + --hash=sha256:035ed57acce4ac35c82c9d8802202b0e71adac011a511ff650cbcf9635006a22 # via nox webencodings==0.5.1 \ --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 1c208b4c3f5d..ae1810bcc69e 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -200,7 +200,9 @@ def unit(session): def install_systemtest_dependencies(session, *constraints): # Use pre-release gRPC for system tests. - session.install("--pre", "grpcio") + # Exclude version 1.49.0rc1 which has a known issue. + # See https://github.com/grpc/grpc/pull/30642 + session.install("--pre", "grpcio!=1.49.0rc1") session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints) @@ -407,7 +409,8 @@ def prerelease_deps(session): # dependency of grpc "six", "googleapis-common-protos", - "grpcio", + # Exclude version 1.49.0rc1 which has a known issue. See https://github.com/grpc/grpc/pull/30642 + "grpcio!=1.49.0rc1", "grpcio-status", "google-api-core", "proto-plus", diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py index 239b1dbaef52..18fa3be503dc 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -74,7 +74,7 @@ def run_instance_operations(project_id, instance_id, cluster_id): # Create instance with given options operation = instance.create(clusters=[cluster]) # Ensure the operation completes. - operation.result(timeout=60) + operation.result(timeout=240) print("\nCreated instance: {}".format(instance_id)) # [END bigtable_create_prod_instance] @@ -159,7 +159,7 @@ def add_cluster(project_id, instance_id, cluster_id): else: operation = cluster.create() # Ensure the operation completes. - operation.result(timeout=120) + operation.result(timeout=240) print("\nCluster created: {}".format(cluster_id)) # [END bigtable_create_cluster] diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 06b8fabe1061..8d5ab38007c9 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -78,7 +78,7 @@ def instance(): default_storage_type=storage_type, ) operation = instance.create(clusters=[cluster]) - response = operation.result(240) + response = operation.result(480) print(f"Successfully created {response.name}") # Eventual consistency check diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index 23052f4f44bd..2c5a139ed7d6 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -133,7 +133,7 @@ def admin_instance_populated(admin_instance, admin_cluster, in_emulator): # See: https://cloud.google.com/bigtable/docs/emulator if not in_emulator: operation = admin_instance.create(clusters=[admin_cluster]) - operation.result(timeout=30) + operation.result(timeout=120) yield admin_instance @@ -176,7 +176,7 @@ def data_instance_populated( serve_nodes=serve_nodes, ) operation = instance.create(clusters=[cluster]) - operation.result(timeout=30) + operation.result(timeout=120) yield instance diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index a2ad229af828..53c0174dcc05 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -84,7 +84,7 @@ def _modify_app_profile_helper( ) operation = app_profile.update(ignore_warnings=ignore_warnings) - operation.result(timeout=60) + operation.result(timeout=120) alt_profile = instance.app_profile(app_profile_id) alt_profile.reload() @@ -163,7 +163,7 @@ def test_instance_create_prod( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. assert instance.type_ is None # Create a new instance instance and make sure it is the same. @@ -194,7 +194,7 @@ def test_instance_create_development( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new instance instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -244,7 +244,7 @@ def test_instance_create_w_two_clusters( ) operation = instance.create(clusters=[cluster_1, cluster_2]) instances_to_delete.append(instance) - operation.result(timeout=120) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new instance instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -534,7 +534,7 @@ def test_instance_create_w_two_clusters_cmek( ) operation = instance.create(clusters=[cluster_1, cluster_2]) instances_to_delete.append(instance) - operation.result(timeout=120) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new instance instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -597,7 +597,7 @@ def test_instance_update_display_name_and_labels( admin_instance_populated.labels = new_labels operation = admin_instance_populated.update() - operation.result(timeout=60) # ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new instance instance and reload it. instance_alt = admin_client.instance(admin_instance_id, labels={}) @@ -614,7 +614,7 @@ def test_instance_update_display_name_and_labels( admin_instance_populated.display_name = old_display_name admin_instance_populated.labels = instance_labels operation = admin_instance_populated.update() - operation.result(timeout=60) # ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. def test_instance_update_w_type( @@ -640,12 +640,12 @@ def test_instance_update_w_type( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. instance.display_name = None instance.type_ = enums.Instance.Type.PRODUCTION operation = instance.update() - operation.result(timeout=60) # ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new instance instance and reload it. instance_alt = admin_client.instance(alt_instance_id) @@ -679,7 +679,7 @@ def test_cluster_create( default_storage_type=(enums.StorageType.SSD), ) operation = cluster_2.create() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new object instance, reload and make sure it is the same. alt_cluster = admin_instance_populated.cluster(alt_cluster_id) @@ -717,7 +717,7 @@ def test_cluster_create_w_autoscaling( default_storage_type=(enums.StorageType.SSD), ) operation = cluster_2.create() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new object instance, reload and make sure it is the same. alt_cluster = admin_instance_populated.cluster(alt_cluster_id) @@ -749,7 +749,7 @@ def test_cluster_update( admin_cluster.serve_nodes = new_serve_nodes operation = admin_cluster.update() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -759,7 +759,7 @@ def test_cluster_update( # Put the cluster back the way it was for the other test cases. admin_cluster.serve_nodes = serve_nodes operation = admin_cluster.update() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. def test_cluster_update_w_autoscaling( @@ -779,7 +779,7 @@ def test_cluster_update_w_autoscaling( admin_cluster_with_autoscaling.cpu_utilization_percent = new_cpu_utilization_percent operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -793,7 +793,7 @@ def test_cluster_update_w_autoscaling( admin_cluster_with_autoscaling.max_serve_nodes = max_serve_nodes admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. def test_cluster_update_w_autoscaling_partial( @@ -810,7 +810,7 @@ def test_cluster_update_w_autoscaling_partial( admin_cluster_with_autoscaling.min_serve_nodes = new_min_serve_nodes operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -827,7 +827,7 @@ def test_cluster_update_w_autoscaling_partial( admin_cluster_with_autoscaling.max_serve_nodes = max_serve_nodes admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. def test_cluster_disable_autoscaling( @@ -843,7 +843,7 @@ def test_cluster_disable_autoscaling( operation = admin_cluster_with_autoscaling.disable_autoscaling( serve_nodes=serve_nodes ) - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -859,4 +859,4 @@ def test_cluster_disable_autoscaling( admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent admin_cluster_with_autoscaling.serve_nodes = 0 operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=60) # Ensure the operation completes. + operation.result(timeout=240) # Ensure the operation completes. diff --git a/packages/google-cloud-bigtable/tests/system/test_table_admin.py b/packages/google-cloud-bigtable/tests/system/test_table_admin.py index 92283d328365..c501890137a3 100644 --- a/packages/google-cloud-bigtable/tests/system/test_table_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_table_admin.py @@ -293,7 +293,7 @@ def test_table_backup( # Testing `Backup.create()` method backup_op = temp_backup.create() - backup_op.result(timeout=30) + backup_op.result(timeout=240) # Implicit testing of `Backup.delete()` method backups_to_delete.append(temp_backup) @@ -346,11 +346,11 @@ def test_table_backup( ) create_op = alt_instance.create(clusters=[alt_cluster]) instances_to_delete.append(alt_instance) - create_op.result(timeout=30) + create_op.result(timeout=240) # Testing `restore()`... restore_op = temp_backup.restore(restored_table_id, alt_instance_id) - restore_op.result(timeout=30) + restore_op.result(timeout=240) restored_table = alt_instance.table(restored_table_id) assert restored_table in alt_instance.list_tables() restored_table.delete() From 7b74c62873bf5b082805d13f1ac2662055132928 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 7 Sep 2022 18:20:07 +0200 Subject: [PATCH 652/892] chore(deps): update all dependencies (#666) * chore(deps): update all dependencies * revert * increase timeouts * increase timeouts Co-authored-by: Anthonios Partheniou --- .../samples/beam/requirements-test.txt | 2 +- .../samples/hello/requirements-test.txt | 2 +- .../hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/instanceadmin.py | 4 +- .../instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/metricscaler.py | 4 +- .../samples/metricscaler/metricscaler_test.py | 2 +- .../metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../requirements-test.txt | 2 +- .../snippets/deletes/requirements-test.txt | 2 +- .../snippets/filters/requirements-test.txt | 2 +- .../snippets/reads/requirements-test.txt | 2 +- .../snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- .../tests/system/test_instance_admin.py | 38 +++++++++---------- 16 files changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py index 18fa3be503dc..7341bfc46f19 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/instanceadmin.py @@ -74,7 +74,7 @@ def run_instance_operations(project_id, instance_id, cluster_id): # Create instance with given options operation = instance.create(clusters=[cluster]) # Ensure the operation completes. - operation.result(timeout=240) + operation.result(timeout=480) print("\nCreated instance: {}".format(instance_id)) # [END bigtable_create_prod_instance] @@ -159,7 +159,7 @@ def add_cluster(project_id, instance_id, cluster_id): else: operation = cluster.create() # Ensure the operation completes. - operation.result(timeout=240) + operation.result(timeout=480) print("\nCluster created: {}".format(cluster_id)) # [END bigtable_create_cluster] diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py index 037df7f2109f..f1fe80523dd8 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler.py @@ -123,7 +123,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): new_node_count = min(current_node_count + size_change_step, max_node_count) cluster.serve_nodes = new_node_count operation = cluster.update() - response = operation.result(240) + response = operation.result(480) logger.info( "Scaled up from {} to {} nodes for {}.".format( current_node_count, new_node_count, response.name @@ -134,7 +134,7 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up): new_node_count = max(current_node_count - size_change_step, min_node_count) cluster.serve_nodes = new_node_count operation = cluster.update() - response = operation.result(240) + response = operation.result(480) logger.info( "Scaled down from {} to {} nodes for {}.".format( current_node_count, new_node_count, response.name diff --git a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py index 8d5ab38007c9..47be38187f30 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/metricscaler_test.py @@ -108,7 +108,7 @@ def dev_instance(): cluster_id, location_id=BIGTABLE_ZONE, default_storage_type=storage_type ) operation = instance.create(clusters=[cluster]) - response = operation.result(240) + response = operation.result(480) print(f"Successfully created {response.name}") # Eventual consistency check diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 7292f2245304..fa3e37e5dd4f 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.1.2 +pytest==7.1.3 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index d00689e0623a..e07168502ea9 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index ce161d15f1a4..6759e75e03a0 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.1.2 -pytest==7.1.2 +pytest==7.1.3 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 7f627052ec2c..5a9ba2b99f91 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.1.2 +pytest==7.1.3 google-cloud-testutils==1.3.3 diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index 53c0174dcc05..644264aff619 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -163,7 +163,7 @@ def test_instance_create_prod( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. assert instance.type_ is None # Create a new instance instance and make sure it is the same. @@ -194,7 +194,7 @@ def test_instance_create_development( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new instance instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -244,7 +244,7 @@ def test_instance_create_w_two_clusters( ) operation = instance.create(clusters=[cluster_1, cluster_2]) instances_to_delete.append(instance) - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new instance instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -438,7 +438,7 @@ def test_instance_create_app_profile_create_with_multi_cluster_ids( ) operation = instance.create(clusters=[cluster_1, cluster_2]) instances_to_delete.append(instance) - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -534,7 +534,7 @@ def test_instance_create_w_two_clusters_cmek( ) operation = instance.create(clusters=[cluster_1, cluster_2]) instances_to_delete.append(instance) - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new instance instance and make sure it is the same. instance_alt = admin_client.instance(alt_instance_id) @@ -597,7 +597,7 @@ def test_instance_update_display_name_and_labels( admin_instance_populated.labels = new_labels operation = admin_instance_populated.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new instance instance and reload it. instance_alt = admin_client.instance(admin_instance_id, labels={}) @@ -614,7 +614,7 @@ def test_instance_update_display_name_and_labels( admin_instance_populated.display_name = old_display_name admin_instance_populated.labels = instance_labels operation = admin_instance_populated.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. def test_instance_update_w_type( @@ -640,12 +640,12 @@ def test_instance_update_w_type( operation = instance.create(clusters=[cluster]) instances_to_delete.append(instance) - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. instance.display_name = None instance.type_ = enums.Instance.Type.PRODUCTION operation = instance.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new instance instance and reload it. instance_alt = admin_client.instance(alt_instance_id) @@ -679,7 +679,7 @@ def test_cluster_create( default_storage_type=(enums.StorageType.SSD), ) operation = cluster_2.create() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new object instance, reload and make sure it is the same. alt_cluster = admin_instance_populated.cluster(alt_cluster_id) @@ -717,7 +717,7 @@ def test_cluster_create_w_autoscaling( default_storage_type=(enums.StorageType.SSD), ) operation = cluster_2.create() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new object instance, reload and make sure it is the same. alt_cluster = admin_instance_populated.cluster(alt_cluster_id) @@ -749,7 +749,7 @@ def test_cluster_update( admin_cluster.serve_nodes = new_serve_nodes operation = admin_cluster.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -759,7 +759,7 @@ def test_cluster_update( # Put the cluster back the way it was for the other test cases. admin_cluster.serve_nodes = serve_nodes operation = admin_cluster.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. def test_cluster_update_w_autoscaling( @@ -779,7 +779,7 @@ def test_cluster_update_w_autoscaling( admin_cluster_with_autoscaling.cpu_utilization_percent = new_cpu_utilization_percent operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -793,7 +793,7 @@ def test_cluster_update_w_autoscaling( admin_cluster_with_autoscaling.max_serve_nodes = max_serve_nodes admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. def test_cluster_update_w_autoscaling_partial( @@ -810,7 +810,7 @@ def test_cluster_update_w_autoscaling_partial( admin_cluster_with_autoscaling.min_serve_nodes = new_min_serve_nodes operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -827,7 +827,7 @@ def test_cluster_update_w_autoscaling_partial( admin_cluster_with_autoscaling.max_serve_nodes = max_serve_nodes admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. def test_cluster_disable_autoscaling( @@ -843,7 +843,7 @@ def test_cluster_disable_autoscaling( operation = admin_cluster_with_autoscaling.disable_autoscaling( serve_nodes=serve_nodes ) - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. # Create a new cluster instance and reload it. alt_cluster = admin_instance_populated.cluster(admin_cluster_id) @@ -859,4 +859,4 @@ def test_cluster_disable_autoscaling( admin_cluster_with_autoscaling.cpu_utilization_percent = cpu_utilization_percent admin_cluster_with_autoscaling.serve_nodes = 0 operation = admin_cluster_with_autoscaling.update() - operation.result(timeout=240) # Ensure the operation completes. + operation.result(timeout=480) # Ensure the operation completes. From 10375004c7b0e2cdfe3b819475186f69303a7ab2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 8 Sep 2022 13:37:04 -0400 Subject: [PATCH 653/892] chore(python): exclude setup.py in renovate config (#665) * chore(python): exclude setup.py in renovate config Source-Link: https://github.com/googleapis/synthtool/commit/56da63e80c384a871356d1ea6640802017f213b4 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:993a058718e84a82fda04c3177e58f0a43281a996c7c395e0a56ccc4d6d210d7 * increase timeouts Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.kokoro/requirements.txt | 8 ++++++++ packages/google-cloud-bigtable/renovate.json | 2 +- packages/google-cloud-bigtable/tests/system/conftest.py | 4 ++-- .../tests/system/test_instance_admin.py | 2 +- 5 files changed, 13 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 0d9eb2af9352..b8dcb4a4af99 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:1f0dbd02745fb7cf255563dab5968345989308544e52b7f460deadd5e78e63b0 + digest: sha256:993a058718e84a82fda04c3177e58f0a43281a996c7c395e0a56ccc4d6d210d7 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 92b2f727e777..385f2d4d6106 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -241,6 +241,10 @@ importlib-metadata==4.12.0 \ # via # -r requirements.in # twine +jaraco-classes==3.2.2 \ + --hash=sha256:6745f113b0b588239ceb49532aa09c3ebb947433ce311ef2f8e3ad64ebb74594 \ + --hash=sha256:e6ef6fd3fcf4579a7a019d87d1e56a883f4e4c35cfe925f86731abc58804e647 + # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ --hash=sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755 @@ -299,6 +303,10 @@ markupsafe==2.1.1 \ --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 # via jinja2 +more-itertools==8.14.0 \ + --hash=sha256:1bc4f91ee5b1b31ac7ceacc17c09befe6a40a503907baf9c839c229b5095cfd2 \ + --hash=sha256:c09443cd3d5438b8dafccd867a6bc1cb0894389e90cb53d227456b0b0bccb750 + # via jaraco-classes nox==2022.8.7 \ --hash=sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b \ --hash=sha256:96cca88779e08282a699d672258ec01eb7c792d35bbbf538c723172bce23212c diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index 566a70f3cc3c..39b2a0ec9296 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -5,7 +5,7 @@ ":preserveSemverRanges", ":disableDependencyDashboard" ], - "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt"], + "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py"], "pip_requirements": { "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"] } diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index 2c5a139ed7d6..f39fcba88962 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -133,7 +133,7 @@ def admin_instance_populated(admin_instance, admin_cluster, in_emulator): # See: https://cloud.google.com/bigtable/docs/emulator if not in_emulator: operation = admin_instance.create(clusters=[admin_cluster]) - operation.result(timeout=120) + operation.result(timeout=240) yield admin_instance @@ -176,7 +176,7 @@ def data_instance_populated( serve_nodes=serve_nodes, ) operation = instance.create(clusters=[cluster]) - operation.result(timeout=120) + operation.result(timeout=240) yield instance diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index 644264aff619..e5e311213d5b 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -84,7 +84,7 @@ def _modify_app_profile_helper( ) operation = app_profile.update(ignore_warnings=ignore_warnings) - operation.result(timeout=120) + operation.result(timeout=240) alt_profile = instance.app_profile(app_profile_id) alt_profile.reload() From c0c9260d3e2c7a9729f3848b47dae94c2e8ac05a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 8 Sep 2022 21:10:16 +0000 Subject: [PATCH 654/892] chore: use gapic-generator-python 1.3.1 (#668) - [ ] Regenerate this pull request now. PiperOrigin-RevId: 472772457 Source-Link: https://github.com/googleapis/googleapis/commit/855b74d203deeb0f7a0215f9454cdde62a1f9b86 Source-Link: https://github.com/googleapis/googleapis-gen/commit/b64b1e7da3e138f15ca361552ef0545e54891b4f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjY0YjFlN2RhM2UxMzhmMTVjYTM2MTU1MmVmMDU0NWU1NDg5MWI0ZiJ9 --- .../gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | 4 ++-- .../unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py | 4 ++-- .../tests/unit/gapic/bigtable_v2/test_bigtable.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index d4f52ecba256..b841a9e5630b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -18,8 +18,8 @@ # try/except added for compatibility with python < 3.8 try: from unittest import mock - from unittest.mock import AsyncMock -except ImportError: + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER import mock import grpc diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index adcf50b1e5b3..2f53bbe5f788 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -18,8 +18,8 @@ # try/except added for compatibility with python < 3.8 try: from unittest import mock - from unittest.mock import AsyncMock -except ImportError: + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER import mock import grpc diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 644265d2bcc5..c52f124c7af7 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -18,8 +18,8 @@ # try/except added for compatibility with python < 3.8 try: from unittest import mock - from unittest.mock import AsyncMock -except ImportError: + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER import mock import grpc From 6f7386fc8f8abc83e48825252404bcc147106949 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 13 Sep 2022 15:16:19 +0000 Subject: [PATCH 655/892] chore: use gapic generator python 1.4.1 (#669) - [ ] Regenerate this pull request now. PiperOrigin-RevId: 473833416 Source-Link: https://github.com/googleapis/googleapis/commit/565a5508869557a3228b871101e4e4ebd8f93d11 Source-Link: https://github.com/googleapis/googleapis-gen/commit/1ee1a06c6de3ca8b843572c1fde0548f84236989 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMWVlMWEwNmM2ZGUzY2E4Yjg0MzU3MmMxZmRlMDU0OGY4NDIzNjk4OSJ9 --- .../gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | 2 +- .../unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py | 2 +- .../tests/unit/gapic/bigtable_v2/test_bigtable.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index b841a9e5630b..c8c1fe356a8b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -27,7 +27,7 @@ import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule - +from proto.marshal.rules import wrappers from google.api_core import client_options from google.api_core import exceptions as core_exceptions diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 2f53bbe5f788..71d3b998b12f 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -27,7 +27,7 @@ import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule - +from proto.marshal.rules import wrappers from google.api_core import client_options from google.api_core import exceptions as core_exceptions diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index c52f124c7af7..f3207869b83b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -27,7 +27,7 @@ import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule - +from proto.marshal.rules import wrappers from google.api_core import client_options from google.api_core import exceptions as core_exceptions From 4e1efadf0b47b669e6d3fff4950e10a7d12d9924 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 13 Sep 2022 19:04:15 +0000 Subject: [PATCH 656/892] chore: detect samples tests in nested directories (#671) Source-Link: https://github.com/googleapis/synthtool/commit/50db768f450a50d7c1fd62513c113c9bb96fd434 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:e09366bdf0fd9c8976592988390b24d53583dd9f002d476934da43725adbb978 --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/samples/beam/noxfile.py | 4 ++-- packages/google-cloud-bigtable/samples/hello/noxfile.py | 4 ++-- .../google-cloud-bigtable/samples/hello_happybase/noxfile.py | 4 ++-- .../google-cloud-bigtable/samples/instanceadmin/noxfile.py | 4 ++-- .../google-cloud-bigtable/samples/metricscaler/noxfile.py | 4 ++-- packages/google-cloud-bigtable/samples/quickstart/noxfile.py | 4 ++-- .../samples/quickstart_happybase/noxfile.py | 4 ++-- .../google-cloud-bigtable/samples/snippets/deletes/noxfile.py | 4 ++-- .../google-cloud-bigtable/samples/snippets/filters/noxfile.py | 4 ++-- .../google-cloud-bigtable/samples/snippets/reads/noxfile.py | 4 ++-- .../google-cloud-bigtable/samples/snippets/writes/noxfile.py | 4 ++-- packages/google-cloud-bigtable/samples/tableadmin/noxfile.py | 4 ++-- 13 files changed, 25 insertions(+), 25 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index b8dcb4a4af99..aa547962eb0a 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:993a058718e84a82fda04c3177e58f0a43281a996c7c395e0a56ccc4d6d210d7 + digest: sha256:e09366bdf0fd9c8976592988390b24d53583dd9f002d476934da43725adbb978 diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index 908dd1a499d5..c0e636500ebf 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -205,8 +205,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 5fcb9d7461f2..0398d72ff690 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -207,8 +207,8 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("*_test.py") + glob.glob("test_*.py") - test_list.extend(glob.glob("tests")) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: print("No tests found, skipping directory.") From 396f8b084d7f9823d7fad121002870b663c66a6d Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 13 Sep 2022 12:51:31 -0700 Subject: [PATCH 657/892] feat: Publish CBT deletion_protection field in Table, UpdateTableRequest, and UpdateTable API (#670) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Publish CBT deletion_protection field in Table, UpdateTableRequest, and UpdateTable API in **stable** proto to external customers PiperOrigin-RevId: 474010093 Source-Link: https://github.com/googleapis/googleapis/commit/e210283d7f45232b923d200e90ef57d41d0b3580 Source-Link: https://github.com/googleapis/googleapis-gen/commit/46d5c58b647ea9c050a00c3b6825a3cf316f8948 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDZkNWM1OGI2NDdlYTljMDUwYTAwYzNiNjgyNWEzY2YzMTZmODk0OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../cloud/bigtable_admin_v2/__init__.py | 4 + .../bigtable_admin_v2/gapic_metadata.json | 10 + .../bigtable_table_admin/async_client.py | 109 ++++++++ .../services/bigtable_table_admin/client.py | 109 ++++++++ .../bigtable_table_admin/transports/base.py | 14 + .../bigtable_table_admin/transports/grpc.py | 26 ++ .../transports/grpc_asyncio.py | 28 ++ .../cloud/bigtable_admin_v2/types/__init__.py | 4 + .../types/bigtable_table_admin.py | 65 +++++ .../cloud/bigtable_admin_v2/types/table.py | 13 + .../fixup_bigtable_admin_v2_keywords.py | 1 + .../test_bigtable_table_admin.py | 250 ++++++++++++++++++ 12 files changed, 633 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index d77671d8d479..793e16a3f6ff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -76,6 +76,8 @@ from .types.bigtable_table_admin import UndeleteTableMetadata from .types.bigtable_table_admin import UndeleteTableRequest from .types.bigtable_table_admin import UpdateBackupRequest +from .types.bigtable_table_admin import UpdateTableMetadata +from .types.bigtable_table_admin import UpdateTableRequest from .types.common import OperationProgress from .types.common import StorageType from .types.instance import AppProfile @@ -173,4 +175,6 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", + "UpdateTableMetadata", + "UpdateTableRequest", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index a9294167a2b6..7fb6ddd95698 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -348,6 +348,11 @@ "methods": [ "update_backup" ] + }, + "UpdateTable": { + "methods": [ + "update_table" + ] } } }, @@ -468,6 +473,11 @@ "methods": [ "update_backup" ] + }, + "UpdateTable": { + "methods": [ + "update_table" + ] } } } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index ff852e12fc4e..7a4e67d074a6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -646,6 +646,115 @@ async def get_table( # Done; return the response. return response + async def update_table( + self, + request: Union[bigtable_table_admin.UpdateTableRequest, dict] = None, + *, + table: gba_table.Table = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): + The request object. The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + table (:class:`google.cloud.bigtable_admin_v2.types.Table`): + Required. The table to update. The table's ``name`` + field is used to identify the table to update. Format: + ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to update. A mask + specifying which fields (e.g. ``deletion_protection``) + in the ``table`` field should be updated. This mask is + relative to the ``table`` field, not to the request + message. The wildcard (*) path is currently not + supported. Currently UpdateTable is only supported for + the following field: + + - ``deletion_protection`` If ``column_families`` is set + in ``update_mask``, it will return an UNIMPLEMENTED + error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.UpdateTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table is not None: + request.table = table + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table.name", request.table.name),) + ), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_table.Table, + metadata_type=bigtable_table_admin.UpdateTableMetadata, + ) + + # Done; return the response. + return response + async def delete_table( self, request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 70f10e1de3da..d70491c84f7f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -951,6 +951,115 @@ def get_table( # Done; return the response. return response + def update_table( + self, + request: Union[bigtable_table_admin.UpdateTableRequest, dict] = None, + *, + table: gba_table.Table = None, + update_mask: field_mask_pb2.FieldMask = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): + The request object. The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The table to update. The table's ``name`` + field is used to identify the table to update. Format: + ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. A mask + specifying which fields (e.g. ``deletion_protection``) + in the ``table`` field should be updated. This mask is + relative to the ``table`` field, not to the request + message. The wildcard (*) path is currently not + supported. Currently UpdateTable is only supported for + the following field: + + - ``deletion_protection`` If ``column_families`` is set + in ``update_mask``, it will return an UNIMPLEMENTED + error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.UpdateTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.UpdateTableRequest): + request = bigtable_table_admin.UpdateTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table is not None: + request.table = table + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table.name", request.table.name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_table.Table, + metadata_type=bigtable_table_admin.UpdateTableMetadata, + ) + + # Done; return the response. + return response + def delete_table( self, request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index db8222d8930e..ebed352843d0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -181,6 +181,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.update_table: gapic_v1.method.wrap_method( + self.update_table, + default_timeout=None, + client_info=client_info, + ), self.delete_table: gapic_v1.method.wrap_method( self.delete_table, default_timeout=60.0, @@ -411,6 +416,15 @@ def get_table( ]: raise NotImplementedError() + @property + def update_table( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateTableRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def delete_table( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 4c8e85609bd3..fc5fbcee5c44 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -374,6 +374,32 @@ def get_table( ) return self._stubs["get_table"] + @property + def update_table( + self, + ) -> Callable[[bigtable_table_admin.UpdateTableRequest], operations_pb2.Operation]: + r"""Return a callable for the update table method over gRPC. + + Updates a specified table. + + Returns: + Callable[[~.UpdateTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_table" not in self._stubs: + self._stubs["update_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", + request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_table"] + @property def delete_table( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 349f810a839a..f96770632592 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -382,6 +382,34 @@ def get_table( ) return self._stubs["get_table"] + @property + def update_table( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateTableRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the update table method over gRPC. + + Updates a specified table. + + Returns: + Callable[[~.UpdateTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_table" not in self._stubs: + self._stubs["update_table"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", + request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_table"] + @property def delete_table( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 31f4b712f3cb..5a66ddf09760 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -73,6 +73,8 @@ UndeleteTableMetadata, UndeleteTableRequest, UpdateBackupRequest, + UpdateTableMetadata, + UpdateTableRequest, ) from .common import ( OperationProgress, @@ -156,6 +158,8 @@ "UndeleteTableMetadata", "UndeleteTableRequest", "UpdateBackupRequest", + "UpdateTableMetadata", + "UpdateTableRequest", "OperationProgress", "StorageType", "AppProfile", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 2078ce922c83..fccde3eada8c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -34,6 +34,8 @@ "ListTablesRequest", "ListTablesResponse", "GetTableRequest", + "UpdateTableRequest", + "UpdateTableMetadata", "DeleteTableRequest", "UndeleteTableRequest", "UndeleteTableMetadata", @@ -444,6 +446,69 @@ class GetTableRequest(proto.Message): ) +class UpdateTableRequest(proto.Message): + r"""The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + + Attributes: + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The table to update. The table's ``name`` field is + used to identify the table to update. Format: + ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. A mask specifying + which fields (e.g. ``deletion_protection``) in the ``table`` + field should be updated. This mask is relative to the + ``table`` field, not to the request message. The wildcard + (*) path is currently not supported. Currently UpdateTable + is only supported for the following field: + + - ``deletion_protection`` If ``column_families`` is set in + ``update_mask``, it will return an UNIMPLEMENTED error. + """ + + table = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.Table, + ) + update_mask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateTableMetadata(proto.Message): + r"""Metadata type for the operation returned by + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + + Attributes: + name (str): + The name of the table being updated. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + start_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + class DeleteTableRequest(proto.Message): r"""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 183b8808639b..ef268548306b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -102,6 +102,15 @@ class Table(proto.Message): another data source (e.g. a backup), this field will be populated with information about the restore. + deletion_protection (bool): + Set to true to make the table protected + against data loss. i.e. deleting the following + resources through Admin APIs are prohibited: - + The table. + - The column families in the table. + - The instance containing the table. + Note one can still delete the data stored in the + table through Data APIs. """ class TimestampGranularity(proto.Enum): @@ -184,6 +193,10 @@ class ReplicationState(proto.Enum): number=6, message="RestoreInfo", ) + deletion_protection = proto.Field( + proto.BOOL, + number=9, + ) class ColumnFamily(proto.Message): diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 7623d5593f43..17be56f2faa2 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -80,6 +80,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'update_backup': ('backup', 'update_mask', ), 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), + 'update_table': ('table', 'update_mask', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 71d3b998b12f..5ba93c6ffa85 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -745,6 +745,7 @@ def test_create_table(request_type, transport: str = "grpc"): call.return_value = gba_table.Table( name="name_value", granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) response = client.create_table(request) @@ -757,6 +758,7 @@ def test_create_table(request_type, transport: str = "grpc"): assert isinstance(response, gba_table.Table) assert response.name == "name_value" assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True def test_create_table_empty_call(): @@ -796,6 +798,7 @@ async def test_create_table_async( gba_table.Table( name="name_value", granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) ) response = await client.create_table(request) @@ -809,6 +812,7 @@ async def test_create_table_async( assert isinstance(response, gba_table.Table) assert response.name == "name_value" assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True @pytest.mark.asyncio @@ -1679,6 +1683,7 @@ def test_get_table(request_type, transport: str = "grpc"): call.return_value = table.Table( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) response = client.get_table(request) @@ -1691,6 +1696,7 @@ def test_get_table(request_type, transport: str = "grpc"): assert isinstance(response, table.Table) assert response.name == "name_value" assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True def test_get_table_empty_call(): @@ -1729,6 +1735,7 @@ async def test_get_table_async( table.Table( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) ) response = await client.get_table(request) @@ -1742,6 +1749,7 @@ async def test_get_table_async( assert isinstance(response, table.Table) assert response.name == "name_value" assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True @pytest.mark.asyncio @@ -1888,6 +1896,243 @@ async def test_get_table_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateTableRequest, + dict, + ], +) +def test_update_table(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + client.update_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + + +@pytest.mark.asyncio +async def test_update_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_table_async_from_dict(): + await test_update_table_async(request_type=dict) + + +def test_update_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateTableRequest() + + request.table.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "table.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateTableRequest() + + request.table.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "table.name=name_value", + ) in kw["metadata"] + + +def test_update_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_table( + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table + mock_val = gba_table.Table(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_table( + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table + mock_val = gba_table.Table(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -2361,6 +2606,7 @@ def test_modify_column_families(request_type, transport: str = "grpc"): call.return_value = table.Table( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) response = client.modify_column_families(request) @@ -2373,6 +2619,7 @@ def test_modify_column_families(request_type, transport: str = "grpc"): assert isinstance(response, table.Table) assert response.name == "name_value" assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True def test_modify_column_families_empty_call(): @@ -2416,6 +2663,7 @@ async def test_modify_column_families_async( table.Table( name="name_value", granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) ) response = await client.modify_column_families(request) @@ -2429,6 +2677,7 @@ async def test_modify_column_families_async( assert isinstance(response, table.Table) assert response.name == "name_value" assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True @pytest.mark.asyncio @@ -6839,6 +7088,7 @@ def test_bigtable_table_admin_base_transport(): "create_table_from_snapshot", "list_tables", "get_table", + "update_table", "delete_table", "undelete_table", "modify_column_families", From f010ea6c6b737ea928247f9d1a74e03e9e0e562c Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 19 Sep 2022 15:22:43 -0400 Subject: [PATCH 658/892] docs: Remove unnecessary comment (#674) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Remove unnecessary comment PiperOrigin-RevId: 474807836 Source-Link: https://github.com/googleapis/googleapis/commit/dfd68f74ddecc6d83ec50db6a69f695de9e304c3 Source-Link: https://github.com/googleapis/googleapis-gen/commit/fe305e0252bc9a88466e1b96c2f2464dd729b978 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZmUzMDVlMDI1MmJjOWE4ODQ2NmUxYjk2YzJmMjQ2NGRkNzI5Yjk3OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../services/bigtable_table_admin/async_client.py | 3 +-- .../bigtable_admin_v2/services/bigtable_table_admin/client.py | 3 +-- .../cloud/bigtable_admin_v2/types/bigtable_table_admin.py | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 7a4e67d074a6..39761c3b610a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -664,8 +664,7 @@ async def update_table( [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. table (:class:`google.cloud.bigtable_admin_v2.types.Table`): Required. The table to update. The table's ``name`` - field is used to identify the table to update. Format: - ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + field is used to identify the table to update. This corresponds to the ``table`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index d70491c84f7f..481e73d6502f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -969,8 +969,7 @@ def update_table( [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. table (google.cloud.bigtable_admin_v2.types.Table): Required. The table to update. The table's ``name`` - field is used to identify the table to update. Format: - ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + field is used to identify the table to update. This corresponds to the ``table`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index fccde3eada8c..cbc3a1d8c354 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -453,8 +453,7 @@ class UpdateTableRequest(proto.Message): Attributes: table (google.cloud.bigtable_admin_v2.types.Table): Required. The table to update. The table's ``name`` field is - used to identify the table to update. Format: - ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + used to identify the table to update. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The list of fields to update. A mask specifying which fields (e.g. ``deletion_protection``) in the ``table`` From fe544d82b8e8999720ab5b18e5551278adab6881 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 21 Sep 2022 14:42:52 -0700 Subject: [PATCH 659/892] chore(main): release 2.12.0 (#673) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 12 ++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 5783517a78b6..5ed69b363902 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.12.0](https://github.com/googleapis/python-bigtable/compare/v2.11.3...v2.12.0) (2022-09-19) + + +### Features + +* Publish CBT deletion_protection field in Table, UpdateTableRequest, and UpdateTable API ([#670](https://github.com/googleapis/python-bigtable/issues/670)) ([c57289c](https://github.com/googleapis/python-bigtable/commit/c57289c03335380694580202d746ca4f679dce9b)) + + +### Documentation + +* Remove unnecessary comment ([#674](https://github.com/googleapis/python-bigtable/issues/674)) ([9c62655](https://github.com/googleapis/python-bigtable/commit/9c62655de7fecd93ee7a1bb95b208d94798727cd)) + ## [2.11.3](https://github.com/googleapis/python-bigtable/compare/v2.11.2...v2.11.3) (2022-08-17) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 2c98c154b8e8..72fc4fd30da0 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.11.3" +version = "2.12.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From d39f28ff27af0cbad7e54923751ace8703652313 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 26 Sep 2022 10:37:47 -0400 Subject: [PATCH 660/892] feat: publish the RequestStats proto (#676) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: publish the RequestStats proto PiperOrigin-RevId: 476399682 Source-Link: https://github.com/googleapis/googleapis/commit/0c4e682e302268332468f304d0411048e24c85db Source-Link: https://github.com/googleapis/googleapis-gen/commit/ff87f69aad1f2ff06772f917eecbea649797d9d5 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZmY4N2Y2OWFhZDFmMmZmMDY3NzJmOTE3ZWVjYmVhNjQ5Nzk3ZDlkNSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../google/cloud/bigtable_v2/__init__.py | 10 + .../services/bigtable/async_client.py | 7 +- .../bigtable_v2/services/bigtable/client.py | 7 +- .../cloud/bigtable_v2/types/__init__.py | 12 + .../cloud/bigtable_v2/types/bigtable.py | 55 ++++- .../cloud/bigtable_v2/types/request_stats.py | 205 ++++++++++++++++++ .../scripts/fixup_bigtable_v2_keywords.py | 2 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 1 + 8 files changed, 285 insertions(+), 14 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 5f2893c50863..6a880dfa42c9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -43,10 +43,16 @@ from .types.data import RowSet from .types.data import TimestampRange from .types.data import ValueRange +from .types.request_stats import AllReadStats +from .types.request_stats import ReadEfficiencyStats +from .types.request_stats import ReadIteratorStats +from .types.request_stats import RequestLatencyStats +from .types.request_stats import RequestStats from .types.response_params import ResponseParams __all__ = ( "BigtableAsyncClient", + "AllReadStats", "BigtableClient", "Cell", "CheckAndMutateRowRequest", @@ -61,11 +67,15 @@ "Mutation", "PingAndWarmRequest", "PingAndWarmResponse", + "ReadEfficiencyStats", + "ReadIteratorStats", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", "ReadModifyWriteRule", "ReadRowsRequest", "ReadRowsResponse", + "RequestLatencyStats", + "RequestStats", "ResponseParams", "Row", "RowFilter", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index c5f673a7452c..a5becca8df2e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -43,6 +43,7 @@ from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport from .client import BigtableClient @@ -238,10 +239,8 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. + This value specifies routing for replication. This API + only accepts the empty value of app_profile_id. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 30dd2934fc8b..df2341dbc13c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -36,6 +36,7 @@ from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableGrpcTransport from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport @@ -474,10 +475,8 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. + This value specifies routing for replication. This API + only accepts the empty value of app_profile_id. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index ec6fbafd4064..17bb66ae69e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -43,6 +43,13 @@ TimestampRange, ValueRange, ) +from .request_stats import ( + AllReadStats, + ReadEfficiencyStats, + ReadIteratorStats, + RequestLatencyStats, + RequestStats, +) from .response_params import ( ResponseParams, ) @@ -74,5 +81,10 @@ "RowSet", "TimestampRange", "ValueRange", + "AllReadStats", + "ReadEfficiencyStats", + "ReadIteratorStats", + "RequestLatencyStats", + "RequestStats", "ResponseParams", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 72785c264b03..3082fe73204e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -16,6 +16,7 @@ import proto # type: ignore from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats as gb_request_stats from google.protobuf import wrappers_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -50,9 +51,8 @@ class ReadRowsRequest(proto.Message): Values are of the form ``projects//instances//tables/
``. app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. + This value specifies routing for replication. This API only + accepts the empty value of app_profile_id. rows (google.cloud.bigtable_v2.types.RowSet): The row keys and/or ranges to read sequentially. If not specified, reads from all @@ -65,8 +65,21 @@ class ReadRowsRequest(proto.Message): The read will stop after committing to N rows' worth of results. The default (zero) is to return all results. + request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView): + The view into RequestStats, as described + above. """ + class RequestStatsView(proto.Enum): + r"""The desired view into RequestStats that should be returned in + the response. + See also: RequestStats message. + """ + REQUEST_STATS_VIEW_UNSPECIFIED = 0 + REQUEST_STATS_NONE = 1 + REQUEST_STATS_EFFICIENCY = 2 + REQUEST_STATS_FULL = 3 + table_name = proto.Field( proto.STRING, number=1, @@ -89,6 +102,11 @@ class ReadRowsRequest(proto.Message): proto.INT64, number=4, ) + request_stats_view = proto.Field( + proto.ENUM, + number=6, + enum=RequestStatsView, + ) class ReadRowsResponse(proto.Message): @@ -109,6 +127,28 @@ class ReadRowsResponse(proto.Message): that was filtered out since the last committed row key, allowing the client to skip that work on a retry. + request_stats (google.cloud.bigtable_v2.types.RequestStats): + If requested, provide enhanced query performance statistics. + The semantics dictate: + + - request_stats is empty on every (streamed) response, + except + - request_stats has non-empty information after all chunks + have been streamed, where the ReadRowsResponse message + only contains request_stats. + + - For example, if a read request would have returned an + empty response instead a single ReadRowsResponse is + streamed with empty chunks and request_stats filled. + + Visually, response messages will stream as follows: ... -> + {chunks: [...]} -> {chunks: [], request_stats: {...}} + \_\ **/ \_**\ \__________/ Primary response Trailer of + RequestStats info + + Or if the read did not return any values: {chunks: [], + request_stats: {...}} \________________________________/ + Trailer of RequestStats info """ class CellChunk(proto.Message): @@ -232,6 +272,11 @@ class CellChunk(proto.Message): proto.BYTES, number=2, ) + request_stats = proto.Field( + proto.MESSAGE, + number=3, + message=gb_request_stats.RequestStats, + ) class SampleRowKeysRequest(proto.Message): @@ -370,8 +415,8 @@ class Entry(proto.Message): Required. Changes to be atomically applied to the specified row. Mutations are applied in order, meaning that earlier mutations can be - masked by later ones. - You must specify at least one mutation. + masked by later ones. You must specify at least + one mutation. """ row_key = proto.Field( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py new file mode 100644 index 000000000000..d6f30c1c250f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "ReadIteratorStats", + "RequestLatencyStats", + "ReadEfficiencyStats", + "AllReadStats", + "RequestStats", + }, +) + + +class ReadIteratorStats(proto.Message): + r"""ReadIteratorStats captures information about the iteration of + rows or cells over the course of a read, e.g. how many results + were scanned in a read operation versus the results returned. + + Attributes: + rows_seen_count (int): + The rows seen (scanned) as part of the + request. This includes the count of rows + returned, as captured below. + rows_returned_count (int): + The rows returned as part of the request. + cells_seen_count (int): + The cells seen (scanned) as part of the + request. This includes the count of cells + returned, as captured below. + cells_returned_count (int): + The cells returned as part of the request. + deletes_seen_count (int): + The deletes seen as part of the request. + """ + + rows_seen_count = proto.Field( + proto.INT64, + number=1, + ) + rows_returned_count = proto.Field( + proto.INT64, + number=2, + ) + cells_seen_count = proto.Field( + proto.INT64, + number=3, + ) + cells_returned_count = proto.Field( + proto.INT64, + number=4, + ) + deletes_seen_count = proto.Field( + proto.INT64, + number=5, + ) + + +class RequestLatencyStats(proto.Message): + r"""RequestLatencyStats provides a measurement of the latency of + the request as it interacts with different systems over its + lifetime, e.g. how long the request took to execute within a + frontend server. + + Attributes: + frontend_server_latency (google.protobuf.duration_pb2.Duration): + The latency measured by the frontend server + handling this request, from when the request was + received, to when this value is sent back in the + response. For more context on the component that + is measuring this latency, see: + https://cloud.google.com/bigtable/docs/overview + Note: This value may be slightly shorter than + the value reported into aggregate latency + metrics in Monitoring for this request + (https://cloud.google.com/bigtable/docs/monitoring-instance) + as this value needs to be sent in the response + before the latency measurement including that + transmission is finalized. + """ + + frontend_server_latency = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + + +class ReadEfficiencyStats(proto.Message): + r"""ReadEfficiencyStats captures information about the efficiency + of a read. + + Attributes: + read_iterator_stats (google.cloud.bigtable_v2.types.ReadIteratorStats): + Iteration stats describe how efficient the + read is, e.g. comparing rows seen vs. rows + returned or cells seen vs cells returned can + provide an indication of read efficiency (the + higher the ratio of seen to retuned the better). + request_latency_stats (google.cloud.bigtable_v2.types.RequestLatencyStats): + Request latency stats describe the time taken + to complete a request, from the server side. + """ + + read_iterator_stats = proto.Field( + proto.MESSAGE, + number=1, + message="ReadIteratorStats", + ) + request_latency_stats = proto.Field( + proto.MESSAGE, + number=2, + message="RequestLatencyStats", + ) + + +class AllReadStats(proto.Message): + r"""AllReadStats captures all known information about a read. + + Attributes: + read_iterator_stats (google.cloud.bigtable_v2.types.ReadIteratorStats): + Iteration stats describe how efficient the + read is, e.g. comparing rows seen vs. rows + returned or cells seen vs cells returned can + provide an indication of read efficiency (the + higher the ratio of seen to retuned the better). + request_latency_stats (google.cloud.bigtable_v2.types.RequestLatencyStats): + Request latency stats describe the time taken + to complete a request, from the server side. + """ + + read_iterator_stats = proto.Field( + proto.MESSAGE, + number=1, + message="ReadIteratorStats", + ) + request_latency_stats = proto.Field( + proto.MESSAGE, + number=2, + message="RequestLatencyStats", + ) + + +class RequestStats(proto.Message): + r"""RequestStats is the container for additional information pertaining + to a single request, helpful for evaluating the performance of the + sent request. Currently, there are the following supported methods: + + - google.bigtable.v2.ReadRows + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + read_efficiency_stats (google.cloud.bigtable_v2.types.ReadEfficiencyStats): + Available with the + ReadRowsRequest.RequestStatsView.REQUEST_STATS_EFFICIENCY + view, see package google.bigtable.v2. + + This field is a member of `oneof`_ ``stats``. + all_read_stats (google.cloud.bigtable_v2.types.AllReadStats): + Available with the + ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL view, + see package google.bigtable.v2. + + This field is a member of `oneof`_ ``stats``. + """ + + read_efficiency_stats = proto.Field( + proto.MESSAGE, + number=1, + oneof="stats", + message="ReadEfficiencyStats", + ) + all_read_stats = proto.Field( + proto.MESSAGE, + number=2, + oneof="stats", + message="AllReadStats", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 7459d0806c48..4424228fd282 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -44,7 +44,7 @@ class bigtableCallTransformer(cst.CSTTransformer): 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), 'ping_and_warm': ('name', 'app_profile_id', ), 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', ), 'sample_row_keys': ('table_name', 'app_profile_id', ), } diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index f3207869b83b..38f1bbd80701 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -42,6 +42,7 @@ from google.cloud.bigtable_v2.services.bigtable import transports from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats from google.oauth2 import service_account import google.auth From 83fb2dc4312b6cf1fafbda2a302ab36a1a084797 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 29 Sep 2022 15:41:45 -0400 Subject: [PATCH 661/892] fix(deps): require protobuf >= 3.20.2 (#679) * chore: exclude requirements.txt file from renovate-bot Source-Link: https://github.com/googleapis/synthtool/commit/f58d3135a2fab20e225d98741dbc06d57459b816 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:7a40313731a7cb1454eef6b33d3446ebb121836738dc3ab3d2d3ded5268c35b6 * update constraints files * fix(deps): require protobuf 3.20.2 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/.OwlBot.lock.yaml | 2 +- .../.kokoro/requirements.txt | 49 +++++++++---------- packages/google-cloud-bigtable/setup.py | 2 +- .../testing/constraints-3.7.txt | 2 +- 4 files changed, 27 insertions(+), 28 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index aa547962eb0a..3815c983cb16 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:e09366bdf0fd9c8976592988390b24d53583dd9f002d476934da43725adbb978 + digest: sha256:7a40313731a7cb1454eef6b33d3446ebb121836738dc3ab3d2d3ded5268c35b6 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 385f2d4d6106..d15994bac93c 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -325,31 +325,30 @@ platformdirs==2.5.2 \ --hash=sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788 \ --hash=sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19 # via virtualenv -protobuf==3.20.1 \ - --hash=sha256:06059eb6953ff01e56a25cd02cca1a9649a75a7e65397b5b9b4e929ed71d10cf \ - --hash=sha256:097c5d8a9808302fb0da7e20edf0b8d4703274d140fd25c5edabddcde43e081f \ - --hash=sha256:284f86a6207c897542d7e956eb243a36bb8f9564c1742b253462386e96c6b78f \ - --hash=sha256:32ca378605b41fd180dfe4e14d3226386d8d1b002ab31c969c366549e66a2bb7 \ - --hash=sha256:3cc797c9d15d7689ed507b165cd05913acb992d78b379f6014e013f9ecb20996 \ - --hash=sha256:62f1b5c4cd6c5402b4e2d63804ba49a327e0c386c99b1675c8a0fefda23b2067 \ - --hash=sha256:69ccfdf3657ba59569c64295b7d51325f91af586f8d5793b734260dfe2e94e2c \ - --hash=sha256:6f50601512a3d23625d8a85b1638d914a0970f17920ff39cec63aaef80a93fb7 \ - --hash=sha256:7403941f6d0992d40161aa8bb23e12575637008a5a02283a930addc0508982f9 \ - --hash=sha256:755f3aee41354ae395e104d62119cb223339a8f3276a0cd009ffabfcdd46bb0c \ - --hash=sha256:77053d28427a29987ca9caf7b72ccafee011257561259faba8dd308fda9a8739 \ - --hash=sha256:7e371f10abe57cee5021797126c93479f59fccc9693dafd6bd5633ab67808a91 \ - --hash=sha256:9016d01c91e8e625141d24ec1b20fed584703e527d28512aa8c8707f105a683c \ - --hash=sha256:9be73ad47579abc26c12024239d3540e6b765182a91dbc88e23658ab71767153 \ - --hash=sha256:adc31566d027f45efe3f44eeb5b1f329da43891634d61c75a5944e9be6dd42c9 \ - --hash=sha256:adfc6cf69c7f8c50fd24c793964eef18f0ac321315439d94945820612849c388 \ - --hash=sha256:af0ebadc74e281a517141daad9d0f2c5d93ab78e9d455113719a45a49da9db4e \ - --hash=sha256:cb29edb9eab15742d791e1025dd7b6a8f6fcb53802ad2f6e3adcb102051063ab \ - --hash=sha256:cd68be2559e2a3b84f517fb029ee611546f7812b1fdd0aa2ecc9bc6ec0e4fdde \ - --hash=sha256:cdee09140e1cd184ba9324ec1df410e7147242b94b5f8b0c64fc89e38a8ba531 \ - --hash=sha256:db977c4ca738dd9ce508557d4fce0f5aebd105e158c725beec86feb1f6bc20d8 \ - --hash=sha256:dd5789b2948ca702c17027c84c2accb552fc30f4622a98ab5c51fcfe8c50d3e7 \ - --hash=sha256:e250a42f15bf9d5b09fe1b293bdba2801cd520a9f5ea2d7fb7536d4441811d20 \ - --hash=sha256:ff8d8fa42675249bb456f5db06c00de6c2f4c27a065955917b28c4f15978b9c3 +protobuf==3.20.2 \ + --hash=sha256:03d76b7bd42ac4a6e109742a4edf81ffe26ffd87c5993126d894fe48a120396a \ + --hash=sha256:09e25909c4297d71d97612f04f41cea8fa8510096864f2835ad2f3b3df5a5559 \ + --hash=sha256:18e34a10ae10d458b027d7638a599c964b030c1739ebd035a1dfc0e22baa3bfe \ + --hash=sha256:291fb4307094bf5ccc29f424b42268640e00d5240bf0d9b86bf3079f7576474d \ + --hash=sha256:2c0b040d0b5d5d207936ca2d02f00f765906622c07d3fa19c23a16a8ca71873f \ + --hash=sha256:384164994727f274cc34b8abd41a9e7e0562801361ee77437099ff6dfedd024b \ + --hash=sha256:3cb608e5a0eb61b8e00fe641d9f0282cd0eedb603be372f91f163cbfbca0ded0 \ + --hash=sha256:5d9402bf27d11e37801d1743eada54372f986a372ec9679673bfcc5c60441151 \ + --hash=sha256:712dca319eee507a1e7df3591e639a2b112a2f4a62d40fe7832a16fd19151750 \ + --hash=sha256:7a5037af4e76c975b88c3becdf53922b5ffa3f2cddf657574a4920a3b33b80f3 \ + --hash=sha256:8228e56a865c27163d5d1d1771d94b98194aa6917bcfb6ce139cbfa8e3c27334 \ + --hash=sha256:84a1544252a933ef07bb0b5ef13afe7c36232a774affa673fc3636f7cee1db6c \ + --hash=sha256:84fe5953b18a383fd4495d375fe16e1e55e0a3afe7b4f7b4d01a3a0649fcda9d \ + --hash=sha256:9c673c8bfdf52f903081816b9e0e612186684f4eb4c17eeb729133022d6032e3 \ + --hash=sha256:9f876a69ca55aed879b43c295a328970306e8e80a263ec91cf6e9189243c613b \ + --hash=sha256:a9e5ae5a8e8985c67e8944c23035a0dff2c26b0f5070b2f55b217a1c33bbe8b1 \ + --hash=sha256:b4fdb29c5a7406e3f7ef176b2a7079baa68b5b854f364c21abe327bbeec01cdb \ + --hash=sha256:c184485e0dfba4dfd451c3bd348c2e685d6523543a0f91b9fd4ae90eb09e8422 \ + --hash=sha256:c9cdf251c582c16fd6a9f5e95836c90828d51b0069ad22f463761d27c6c19019 \ + --hash=sha256:e39cf61bb8582bda88cdfebc0db163b774e7e03364bbf9ce1ead13863e81e359 \ + --hash=sha256:e8fbc522303e09036c752a0afcc5c0603e917222d8bedc02813fd73b4b4ed804 \ + --hash=sha256:f34464ab1207114e73bba0794d1257c150a2b89b7a9faf504e00af7c9fd58978 \ + --hash=sha256:f52dabc96ca99ebd2169dadbe018824ebda08a795c7684a0b7d203a290f3adb0 # via # gcp-docuploader # gcp-releasetool diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 72fc4fd30da0..63ca7c6ec570 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -33,7 +33,7 @@ "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", - "protobuf >= 3.19.0, <5.0.0dev", + "protobuf >= 3.20.2, <5.0.0dev", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index f9281ed96797..8dacb7c6ceb8 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -10,5 +10,5 @@ google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 libcst==0.2.5 -protobuf==3.19.0 +protobuf==3.20.2 From cad69712ec66c87fd3fb53072b2e5da5d9836c6f Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 20:47:08 -0400 Subject: [PATCH 662/892] chore(main): release 2.13.0 (#678) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 12 ++++++++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 5ed69b363902..c2e894ae8c46 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.13.0](https://github.com/googleapis/python-bigtable/compare/v2.12.0...v2.13.0) (2022-09-29) + + +### Features + +* Publish the RequestStats proto ([#676](https://github.com/googleapis/python-bigtable/issues/676)) ([199949b](https://github.com/googleapis/python-bigtable/commit/199949b2a930706654680b91a93f2a903bf112bf)) + + +### Bug Fixes + +* **deps:** Require protobuf >= 3.20.2 ([#679](https://github.com/googleapis/python-bigtable/issues/679)) ([030ef38](https://github.com/googleapis/python-bigtable/commit/030ef3868c442a8a21c4b4d6217b99cab09a1be7)) + ## [2.12.0](https://github.com/googleapis/python-bigtable/compare/v2.11.3...v2.12.0) (2022-09-19) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 63ca7c6ec570..adcfa54177f0 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.12.0" +version = "2.13.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From b198c96247e7378b89476ab5080fe8f11d2abb41 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 4 Oct 2022 16:05:29 +0200 Subject: [PATCH 663/892] chore(deps): update all dependencies (#675) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index f6c955f47355..f3278b4d15cf 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.41.0 -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 9e2138c78312..6b717a29e311 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 5a6a44405c57..40dcb9fb48e9 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 backoff==2.1.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 1b63203a9225..5b85e133fafe 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.3 -google-cloud-monitoring==2.11.1 +google-cloud-bigtable==2.13.0 +google-cloud-monitoring==2.11.2 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 101c6d532dac..aeeadefcb162 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index bcbe5975a1e2..4281da43f2b0 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index bcbe5975a1e2..4281da43f2b0 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index bcbe5975a1e2..4281da43f2b0 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index c6a71aed23e7..af6eb72ac236 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.3 \ No newline at end of file +google-cloud-bigtable==2.13.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 101c6d532dac..aeeadefcb162 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.11.3 +google-cloud-bigtable==2.13.0 From 1dc930925689d19ba8f8c350d662b7e9bc9690b2 Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Thu, 6 Oct 2022 15:40:10 +0200 Subject: [PATCH 664/892] chore(deps): update dependency backoff to v2.2.1 (#681) --- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 40dcb9fb48e9..fe20918e48fa 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.13.0 -backoff==2.1.2 +backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 6759e75e03a0..3bc76cee52db 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ -backoff==2.1.2 +backoff==2.2.1 pytest==7.1.3 From 5c2c653c6256df5dff438062185850c643b3c823 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Mon, 10 Oct 2022 13:23:06 -0400 Subject: [PATCH 665/892] fix(deps): allow protobuf 3.19.5 (#682) * fix(deps): allow protobuf 3.19.5 * explicitly exclude protobuf 4.21.0 --- packages/google-cloud-bigtable/setup.py | 2 +- packages/google-cloud-bigtable/testing/constraints-3.7.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index adcfa54177f0..1cab5d833cda 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -33,7 +33,7 @@ "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", - "protobuf >= 3.20.2, <5.0.0dev", + "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index 8dacb7c6ceb8..fb3cf92d547c 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -10,5 +10,5 @@ google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 libcst==0.2.5 -protobuf==3.20.2 +protobuf==3.19.5 From 291e3ed915a1ae328f8b41d39797c496c25a2976 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 10 Oct 2022 14:36:03 -0400 Subject: [PATCH 666/892] chore(main): release 2.13.1 (#684) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index c2e894ae8c46..7692190d4db6 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.13.1](https://github.com/googleapis/python-bigtable/compare/v2.13.0...v2.13.1) (2022-10-10) + + +### Bug Fixes + +* **deps:** Allow protobuf 3.19.5 ([#682](https://github.com/googleapis/python-bigtable/issues/682)) ([0bb3420](https://github.com/googleapis/python-bigtable/commit/0bb3420decac74058ee099d72f8932556409f2aa)) + ## [2.13.0](https://github.com/googleapis/python-bigtable/compare/v2.12.0...v2.13.0) (2022-09-29) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 1cab5d833cda..b8f40c363ff0 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.13.0" +version = "2.13.1" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From d17219b4d19ba2cdbe1acc6f6d0db5a7f6e154ca Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 18 Oct 2022 15:22:13 +0200 Subject: [PATCH 667/892] chore(deps): update all dependencies (#685) --- .../.github/workflows/system_emulated.yml | 2 +- packages/google-cloud-bigtable/samples/beam/requirements.txt | 4 ++-- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 48b8faa42a05..26b4894e52b8 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v0.6.0 + uses: google-github-actions/setup-gcloud@v0.6.2 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index f3278b4d15cf..4428b8447568 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.41.0 -google-cloud-bigtable==2.13.0 +apache-beam==2.42.0 +google-cloud-bigtable==2.13.1 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 6b717a29e311..ecfdb47bb48c 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.0 +google-cloud-bigtable==2.13.1 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index fe20918e48fa..b21dc525ca8b 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.0 +google-cloud-bigtable==2.13.1 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 5b85e133fafe..a56bc538b58c 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.0 -google-cloud-monitoring==2.11.2 +google-cloud-bigtable==2.13.1 +google-cloud-monitoring==2.11.3 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index aeeadefcb162..438688b1dd77 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.0 +google-cloud-bigtable==2.13.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 4281da43f2b0..0a1c1704d919 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.0 +google-cloud-bigtable==2.13.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 4281da43f2b0..0a1c1704d919 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.0 +google-cloud-bigtable==2.13.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 4281da43f2b0..0a1c1704d919 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.0 +google-cloud-bigtable==2.13.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index af6eb72ac236..e627360b7df7 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.0 \ No newline at end of file +google-cloud-bigtable==2.13.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index aeeadefcb162..438688b1dd77 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.0 +google-cloud-bigtable==2.13.1 From afe758b9f45df6203973daac4f8f4d199c2b4ffe Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Thu, 20 Oct 2022 16:31:32 -0400 Subject: [PATCH 668/892] fix: respect deadlines for column family operations (#687) * feat: allow end users to override the deadline for manipulating column families As a sideeffect this also fixes a bug where the default deadlines were not applied. * limit scope of change to just use the default timeout * lint --- .../google/cloud/bigtable/column_family.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py index 4660119231b5..80232958d492 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/column_family.py @@ -20,6 +20,7 @@ from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_v2_pb2, ) +from google.api_core.gapic_v1.method import DEFAULT class GarbageCollectionRule(object): @@ -275,7 +276,8 @@ def create(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - request={"name": self._table.name, "modifications": [modification]} + request={"name": self._table.name, "modifications": [modification]}, + timeout=DEFAULT, ) def update(self): @@ -302,7 +304,8 @@ def update(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - request={"name": self._table.name, "modifications": [modification]} + request={"name": self._table.name, "modifications": [modification]}, + timeout=DEFAULT, ) def delete(self): @@ -324,7 +327,8 @@ def delete(self): # data it contains are the GC rule and the column family ID already # stored on this instance. client.table_admin_client.modify_column_families( - request={"name": self._table.name, "modifications": [modification]} + request={"name": self._table.name, "modifications": [modification]}, + timeout=DEFAULT, ) From 686df9b5a74a2b62645335cbb39dfcea35a1e4fd Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 20 Oct 2022 20:52:16 +0000 Subject: [PATCH 669/892] chore(main): release 2.13.2 (#688) :robot: I have created a release *beep* *boop* --- ## [2.13.2](https://togithub.com/googleapis/python-bigtable/compare/v2.13.1...v2.13.2) (2022-10-20) ### Bug Fixes * Respect deadlines for column family operations ([#687](https://togithub.com/googleapis/python-bigtable/issues/687)) ([df2e64a](https://togithub.com/googleapis/python-bigtable/commit/df2e64a79bbd8b28d0991706607af99d539320d1)) --- This PR was generated with [Release Please](https://togithub.com/googleapis/release-please). See [documentation](https://togithub.com/googleapis/release-please#release-please). --- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ packages/google-cloud-bigtable/setup.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 7692190d4db6..5d74f2061bb0 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.13.2](https://github.com/googleapis/python-bigtable/compare/v2.13.1...v2.13.2) (2022-10-20) + + +### Bug Fixes + +* Respect deadlines for column family operations ([#687](https://github.com/googleapis/python-bigtable/issues/687)) ([df2e64a](https://github.com/googleapis/python-bigtable/commit/df2e64a79bbd8b28d0991706607af99d539320d1)) + ## [2.13.1](https://github.com/googleapis/python-bigtable/compare/v2.13.0...v2.13.1) (2022-10-10) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index b8f40c363ff0..f30c8f204a92 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.13.1" +version = "2.13.2" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' From 2fb05fc633bd5423d77e627e6649324d6bea3b9e Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Tue, 25 Oct 2022 02:42:03 +0200 Subject: [PATCH 670/892] chore(deps): update dependency google-cloud-bigtable to v2.13.2 (#689) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 4428b8447568..28ced316a27f 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.42.0 -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index ecfdb47bb48c..cd2b6a519113 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index b21dc525ca8b..486e35607438 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index a56bc538b58c..fe6c200a4d5c 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 google-cloud-monitoring==2.11.3 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 438688b1dd77..95aea8cd5c3a 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 0a1c1704d919..a270d6320583 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 0a1c1704d919..a270d6320583 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 0a1c1704d919..a270d6320583 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index e627360b7df7..0af6c5ba55ed 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.1 \ No newline at end of file +google-cloud-bigtable==2.13.2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 438688b1dd77..95aea8cd5c3a 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.1 +google-cloud-bigtable==2.13.2 From 352d523f79bfa20a5ce80d0cddd6350afae7205d Mon Sep 17 00:00:00 2001 From: WhiteSource Renovate Date: Wed, 26 Oct 2022 12:44:34 +0200 Subject: [PATCH 671/892] chore(deps): update dependency pytest to v7.2.0 (#690) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index fa3e37e5dd4f..a5b1e47bc2ff 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.1.3 +pytest==7.2.0 mock==4.0.3 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index e07168502ea9..49780e035690 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 3bc76cee52db..b90fc387d015 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.1.3 +pytest==7.2.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 5a9ba2b99f91..911d44b4e10b 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.1.3 +pytest==7.2.0 google-cloud-testutils==1.3.3 From 164c37ec010791bf745071ed7cbb7d66f5058bc3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 18 Nov 2022 11:04:49 -0800 Subject: [PATCH 672/892] chore: Update gapic-generator-python to v1.6.1 (#683) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: remove proto ReadEfficiencyStats feat: remove field RequestStats.read_efficiency_stats feat: rename proto AllReadStats to FullReadStatsView feat: rename field RequestStats.all_read_stats to full_read_stats_view feat: rename proto ReadIteratorStats to ReadIterationStats feat: remove enum value ReadRowsRequest.RequestStatsView.REQUEST_STATS_EFFICIENCY feat: remove field ReadIterationStats.deletes_seen PiperOrigin-RevId: 479370243 Source-Link: https://github.com/googleapis/googleapis/commit/959d789bef1cb2a7706c474f8db36a04dc0724de Source-Link: https://github.com/googleapis/googleapis-gen/commit/38b425264c03f3cdca279503c801c86604c8a67a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMzhiNDI1MjY0YzAzZjNjZGNhMjc5NTAzYzgwMWM4NjYwNGM4YTY3YSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: update to gapic-generator-python 1.5.0 feat: add support for `google.cloud..__version__` PiperOrigin-RevId: 484665853 Source-Link: https://github.com/googleapis/googleapis/commit/8eb249a19db926c2fbc4ecf1dc09c0e521a88b22 Source-Link: https://github.com/googleapis/googleapis-gen/commit/c8aa327b5f478865fc3fd91e3c2768e54e26ad44 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzhhYTMyN2I1ZjQ3ODg2NWZjM2ZkOTFlM2MyNzY4ZTU0ZTI2YWQ0NCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update to gapic-generator-python 1.6.0 feat(python): Add typing to proto.Message based class attributes feat(python): Snippetgen handling of repeated enum field PiperOrigin-RevId: 487326846 Source-Link: https://github.com/googleapis/googleapis/commit/da380c77bb87ba0f752baf07605dd1db30e1f7e1 Source-Link: https://github.com/googleapis/googleapis-gen/commit/61ef5762ee6731a0cbbfea22fd0eecee51ab1c8e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjFlZjU3NjJlZTY3MzFhMGNiYmZlYTIyZmQwZWVjZWU1MWFiMWM4ZSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: new APIs added to reflect updates to the filestore service - Add ENTERPRISE Tier - Add snapshot APIs: RevertInstance, ListSnapshots, CreateSnapshot, DeleteSnapshot, UpdateSnapshot - Add multi-share APIs: ListShares, GetShare, CreateShare, DeleteShare, UpdateShare - Add ConnectMode to NetworkConfig (for Private Service Access support) - New status codes (SUSPENDED/SUSPENDING, REVERTING/RESUMING) - Add SuspensionReason (for KMS related suspension) - Add new fields to Instance information: max_capacity_gb, capacity_step_size_gb, max_share_count, capacity_gb, multi_share_enabled PiperOrigin-RevId: 487492758 Source-Link: https://github.com/googleapis/googleapis/commit/5be5981f50322cf0c7388595e0f31ac5d0693469 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ab0e217f560cc2c1afc11441c2eab6b6950efd2b Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYWIwZTIxN2Y1NjBjYzJjMWFmYzExNDQxYzJlYWI2YjY5NTBlZmQyYiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Add gapic_version to google.cloud.bigtable Manually adjust the import of gapic_version * chore: Update gapic-generator-python to v1.6.1 PiperOrigin-RevId: 488036204 Source-Link: https://github.com/googleapis/googleapis/commit/08f275f5c1c0d99056e1cb68376323414459ee19 Source-Link: https://github.com/googleapis/googleapis-gen/commit/555c0945e60649e38739ae64bc45719cdf72178f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNTU1YzA5NDVlNjA2NDllMzg3MzlhZTY0YmM0NTcxOWNkZjcyMTc4ZiJ9 * Fix the version * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Remove unused import * Fix the import * Add release please config and manifest Add gapic version under bigtable_admin_v2 * Fix path to bigtable in setup.py * add google.cloud.bigtable_admin * hardcode bigtable version in owlbot.py * Remove unused gapic_version.py from bigtable_admin_v2 Co-authored-by: Owl Bot Co-authored-by: Mariatta Wijaya Co-authored-by: Mariatta Wijaya Co-authored-by: Anthonios Partheniou --- .../.github/release-please.yml | 1 + .../.release-please-config.json | 20 ++ .../.release-please-manifest.json | 3 + .../google/cloud/bigtable/__init__.py | 3 +- .../google/cloud/bigtable/gapic_version.py | 16 + .../google/cloud/bigtable_admin/__init__.py | 292 ++++++++++++++++++ .../cloud/bigtable_admin/gapic_version.py | 16 + .../google/cloud/bigtable_admin/py.typed | 2 + .../cloud/bigtable_admin_v2/__init__.py | 4 + .../bigtable_instance_admin/async_client.py | 238 ++++++++------ .../bigtable_instance_admin/client.py | 202 +++++++----- .../transports/base.py | 2 +- .../transports/grpc.py | 20 +- .../transports/grpc_asyncio.py | 16 +- .../bigtable_table_admin/async_client.py | 256 ++++++++------- .../services/bigtable_table_admin/client.py | 214 +++++++------ .../bigtable_table_admin/transports/base.py | 2 +- .../bigtable_table_admin/transports/grpc.py | 20 +- .../transports/grpc_asyncio.py | 16 +- .../types/bigtable_instance_admin.py | 154 ++++----- .../types/bigtable_table_admin.py | 184 +++++------ .../cloud/bigtable_admin_v2/types/common.py | 8 +- .../cloud/bigtable_admin_v2/types/instance.py | 80 ++--- .../cloud/bigtable_admin_v2/types/table.py | 90 +++--- .../google/cloud/bigtable_v2/__init__.py | 14 +- .../services/bigtable/async_client.py | 104 ++++--- .../bigtable_v2/services/bigtable/client.py | 107 ++++--- .../services/bigtable/transports/base.py | 2 +- .../services/bigtable/transports/grpc.py | 20 +- .../bigtable/transports/grpc_asyncio.py | 16 +- .../cloud/bigtable_v2/types/__init__.py | 10 +- .../cloud/bigtable_v2/types/bigtable.py | 123 ++++---- .../google/cloud/bigtable_v2/types/data.py | 150 ++++----- .../cloud/bigtable_v2/types/request_stats.py | 109 +++---- .../bigtable_v2/types/response_params.py | 6 +- packages/google-cloud-bigtable/owlbot.py | 9 +- packages/google-cloud-bigtable/setup.py | 10 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 28 +- 38 files changed, 1524 insertions(+), 1043 deletions(-) create mode 100644 packages/google-cloud-bigtable/.release-please-config.json create mode 100644 packages/google-cloud-bigtable/.release-please-manifest.json create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin/py.typed diff --git a/packages/google-cloud-bigtable/.github/release-please.yml b/packages/google-cloud-bigtable/.github/release-please.yml index 29601ad4692c..593e83f9fea2 100644 --- a/packages/google-cloud-bigtable/.github/release-please.yml +++ b/packages/google-cloud-bigtable/.github/release-please.yml @@ -2,6 +2,7 @@ releaseType: python handleGHRelease: true # NOTE: this section is generated by synthtool.languages.python # See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py +manifest: true branches: - branch: v1 handleGHRelease: true diff --git a/packages/google-cloud-bigtable/.release-please-config.json b/packages/google-cloud-bigtable/.release-please-config.json new file mode 100644 index 000000000000..dcbfec72b1cf --- /dev/null +++ b/packages/google-cloud-bigtable/.release-please-config.json @@ -0,0 +1,20 @@ +{ + "$schema": +"https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "packages": { + ".": { + "release-type": "python", + "extra-files": [ + "google/cloud/bigtable/gapic_version.py", + "google/cloud/bigtable_admin/gapic_version.py" + ] + } + }, + "release-type": "python", + "plugins": [ + { + "type": "sentence-case" + } + ], + "initial-version": "2.13.2" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json new file mode 100644 index 000000000000..bc12e128dd66 --- /dev/null +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "2.13.2" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index a54096624c0c..4c6e3f3d8396 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -14,12 +14,11 @@ """Google Cloud Bigtable API package.""" - from typing import Optional import pkg_resources - from google.cloud.bigtable.client import Client + __version__: Optional[str] try: __version__ = pkg_resources.get_distribution("google-cloud-bigtable").version diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py new file mode 100644 index 000000000000..c790ba98e3c2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "2.13.2" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py new file mode 100644 index 000000000000..6ddc6acb297b --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable_admin import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client import ( + BigtableInstanceAdminClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.async_client import ( + BigtableInstanceAdminAsyncClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import ( + BigtableTableAdminClient, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import ( + BigtableTableAdminAsyncClient, +) + +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateAppProfileRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateClusterMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateClusterRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateInstanceMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateInstanceRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + DeleteAppProfileRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + DeleteClusterRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + DeleteInstanceRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + GetAppProfileRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + GetClusterRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + GetInstanceRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListAppProfilesRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListAppProfilesResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListClustersRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListClustersResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListHotTabletsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListHotTabletsResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListInstancesRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListInstancesResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + PartialUpdateClusterMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + PartialUpdateClusterRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + PartialUpdateInstanceRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateAppProfileMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateAppProfileRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateClusterMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateInstanceMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CheckConsistencyRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CheckConsistencyResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateBackupMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateBackupRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateTableFromSnapshotMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateTableFromSnapshotRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DeleteBackupRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DeleteSnapshotRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DropRowRangeRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + GenerateConsistencyTokenRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + GenerateConsistencyTokenResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListBackupsResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListSnapshotsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListSnapshotsResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesResponse +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ModifyColumnFamiliesRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + OptimizeRestoredTableMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + RestoreTableMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + RestoreTableRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + SnapshotTableMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + SnapshotTableRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UndeleteTableMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UndeleteTableRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateBackupRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateTableMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateTableRequest +from google.cloud.bigtable_admin_v2.types.common import OperationProgress +from google.cloud.bigtable_admin_v2.types.common import StorageType +from google.cloud.bigtable_admin_v2.types.instance import AppProfile +from google.cloud.bigtable_admin_v2.types.instance import AutoscalingLimits +from google.cloud.bigtable_admin_v2.types.instance import AutoscalingTargets +from google.cloud.bigtable_admin_v2.types.instance import Cluster +from google.cloud.bigtable_admin_v2.types.instance import HotTablet +from google.cloud.bigtable_admin_v2.types.instance import Instance +from google.cloud.bigtable_admin_v2.types.table import Backup +from google.cloud.bigtable_admin_v2.types.table import BackupInfo +from google.cloud.bigtable_admin_v2.types.table import ColumnFamily +from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo +from google.cloud.bigtable_admin_v2.types.table import GcRule +from google.cloud.bigtable_admin_v2.types.table import RestoreInfo +from google.cloud.bigtable_admin_v2.types.table import Snapshot +from google.cloud.bigtable_admin_v2.types.table import Table +from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType + +__all__ = ( + "BigtableInstanceAdminClient", + "BigtableInstanceAdminAsyncClient", + "BigtableTableAdminClient", + "BigtableTableAdminAsyncClient", + "CreateAppProfileRequest", + "CreateClusterMetadata", + "CreateClusterRequest", + "CreateInstanceMetadata", + "CreateInstanceRequest", + "DeleteAppProfileRequest", + "DeleteClusterRequest", + "DeleteInstanceRequest", + "GetAppProfileRequest", + "GetClusterRequest", + "GetInstanceRequest", + "ListAppProfilesRequest", + "ListAppProfilesResponse", + "ListClustersRequest", + "ListClustersResponse", + "ListHotTabletsRequest", + "ListHotTabletsResponse", + "ListInstancesRequest", + "ListInstancesResponse", + "PartialUpdateClusterMetadata", + "PartialUpdateClusterRequest", + "PartialUpdateInstanceRequest", + "UpdateAppProfileMetadata", + "UpdateAppProfileRequest", + "UpdateClusterMetadata", + "UpdateInstanceMetadata", + "CheckConsistencyRequest", + "CheckConsistencyResponse", + "CreateBackupMetadata", + "CreateBackupRequest", + "CreateTableFromSnapshotMetadata", + "CreateTableFromSnapshotRequest", + "CreateTableRequest", + "DeleteBackupRequest", + "DeleteSnapshotRequest", + "DeleteTableRequest", + "DropRowRangeRequest", + "GenerateConsistencyTokenRequest", + "GenerateConsistencyTokenResponse", + "GetBackupRequest", + "GetSnapshotRequest", + "GetTableRequest", + "ListBackupsRequest", + "ListBackupsResponse", + "ListSnapshotsRequest", + "ListSnapshotsResponse", + "ListTablesRequest", + "ListTablesResponse", + "ModifyColumnFamiliesRequest", + "OptimizeRestoredTableMetadata", + "RestoreTableMetadata", + "RestoreTableRequest", + "SnapshotTableMetadata", + "SnapshotTableRequest", + "UndeleteTableMetadata", + "UndeleteTableRequest", + "UpdateBackupRequest", + "UpdateTableMetadata", + "UpdateTableRequest", + "OperationProgress", + "StorageType", + "AppProfile", + "AutoscalingLimits", + "AutoscalingTargets", + "Cluster", + "HotTablet", + "Instance", + "Backup", + "BackupInfo", + "ColumnFamily", + "EncryptionInfo", + "GcRule", + "RestoreInfo", + "Snapshot", + "Table", + "RestoreSourceType", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py new file mode 100644 index 000000000000..c790ba98e3c2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "2.13.2" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/py.typed new file mode 100644 index 000000000000..bc26f20697c2 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable-admin package uses inline types. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 793e16a3f6ff..baf3403b27a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.bigtable_admin import gapic_version as package_version + +__version__ = package_version.__version__ + from .services.bigtable_instance_admin import BigtableInstanceAdminClient from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 9853efdb451a..8bb629e176cf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -16,7 +16,17 @@ from collections import OrderedDict import functools import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) import pkg_resources from google.api_core.client_options import ClientOptions @@ -190,9 +200,9 @@ def transport(self) -> BigtableInstanceAdminTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the bigtable instance admin client. @@ -236,14 +246,16 @@ def __init__( async def create_instance( self, - request: Union[bigtable_instance_admin.CreateInstanceRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.CreateInstanceRequest, dict] + ] = None, *, - parent: str = None, - instance_id: str = None, - instance: gba_instance.Instance = None, - clusters: Mapping[str, gba_instance.Cluster] = None, + parent: Optional[str] = None, + instance_id: Optional[str] = None, + instance: Optional[gba_instance.Instance] = None, + clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Create an instance within a project. @@ -255,7 +267,7 @@ async def create_instance( non-empty, then autoscaling is enabled. Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.CreateInstance. parent (:class:`str`): @@ -281,7 +293,7 @@ async def create_instance( This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - clusters (:class:`Mapping[str, google.cloud.bigtable_admin_v2.types.Cluster]`): + clusters (:class:`MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]`): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than @@ -368,17 +380,19 @@ async def create_instance( async def get_instance( self, - request: Union[bigtable_instance_admin.GetInstanceRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.GetInstanceRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Gets information about an instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.GetInstance. name (:class:`str`): @@ -458,17 +472,19 @@ async def get_instance( async def list_instances( self, - request: Union[bigtable_instance_admin.ListInstancesRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListInstancesRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.ListInstances. parent (:class:`str`): @@ -545,10 +561,10 @@ async def list_instances( async def update_instance( self, - request: Union[instance.Instance, dict] = None, + request: Optional[Union[instance.Instance, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method @@ -557,7 +573,7 @@ async def update_instance( PartialUpdateInstance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]): The request object. A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and the resources that serve them. All tables in an instance are @@ -619,14 +635,14 @@ async def update_instance( async def partial_update_instance( self, - request: Union[ - bigtable_instance_admin.PartialUpdateInstanceRequest, dict + request: Optional[ + Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict] ] = None, *, - instance: gba_instance.Instance = None, - update_mask: field_mask_pb2.FieldMask = None, + instance: Optional[gba_instance.Instance] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates an instance within a project. This @@ -634,7 +650,7 @@ async def partial_update_instance( preferred way to update an Instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.PartialUpdateInstance. instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): @@ -735,17 +751,19 @@ async def partial_update_instance( async def delete_instance( self, - request: Union[bigtable_instance_admin.DeleteInstanceRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.DeleteInstanceRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Delete an instance from a project. Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.DeleteInstance. name (:class:`str`): @@ -803,13 +821,15 @@ async def delete_instance( async def create_cluster( self, - request: Union[bigtable_instance_admin.CreateClusterRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.CreateClusterRequest, dict] + ] = None, *, - parent: str = None, - cluster_id: str = None, - cluster: instance.Cluster = None, + parent: Optional[str] = None, + cluster_id: Optional[str] = None, + cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a cluster within an instance. @@ -821,7 +841,7 @@ async def create_cluster( non-empty, then autoscaling is enabled. Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.CreateCluster. parent (:class:`str`): @@ -920,17 +940,19 @@ async def create_cluster( async def get_cluster( self, - request: Union[bigtable_instance_admin.GetClusterRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.GetClusterRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.GetCluster. name (:class:`str`): @@ -1009,17 +1031,19 @@ async def get_cluster( async def list_clusters( self, - request: Union[bigtable_instance_admin.ListClustersRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListClustersRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.ListClusters. parent (:class:`str`): @@ -1098,10 +1122,10 @@ async def list_clusters( async def update_cluster( self, - request: Union[instance.Cluster, dict] = None, + request: Optional[Union[instance.Cluster, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates a cluster within an instance. @@ -1111,7 +1135,7 @@ async def update_cluster( it, you must use PartialUpdateCluster. Args: - request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): The request object. A resizable group of nodes in a particular cloud location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent @@ -1180,14 +1204,14 @@ async def update_cluster( async def partial_update_cluster( self, - request: Union[ - bigtable_instance_admin.PartialUpdateClusterRequest, dict + request: Optional[ + Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict] ] = None, *, - cluster: instance.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, + cluster: Optional[instance.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates a cluster within a project. This method is the @@ -1206,7 +1230,7 @@ async def partial_update_cluster( serve_node count via the update_mask. Args: - request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.PartialUpdateCluster. cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): @@ -1295,17 +1319,19 @@ async def partial_update_cluster( async def delete_cluster( self, - request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.DeleteClusterRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a cluster from an instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.DeleteCluster. name (:class:`str`): @@ -1363,19 +1389,21 @@ async def delete_cluster( async def create_app_profile( self, - request: Union[bigtable_instance_admin.CreateAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.CreateAppProfileRequest, dict] + ] = None, *, - parent: str = None, - app_profile_id: str = None, - app_profile: instance.AppProfile = None, + parent: Optional[str] = None, + app_profile_id: Optional[str] = None, + app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.CreateAppProfile. parent (:class:`str`): @@ -1463,17 +1491,19 @@ async def create_app_profile( async def get_app_profile( self, - request: Union[bigtable_instance_admin.GetAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.GetAppProfileRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.GetAppProfile. name (:class:`str`): @@ -1551,17 +1581,19 @@ async def get_app_profile( async def list_app_profiles( self, - request: Union[bigtable_instance_admin.ListAppProfilesRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListAppProfilesRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAppProfilesAsyncPager: r"""Lists information about app profiles in an instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.ListAppProfiles. parent (:class:`str`): @@ -1653,18 +1685,20 @@ async def list_app_profiles( async def update_app_profile( self, - request: Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] + ] = None, *, - app_profile: instance.AppProfile = None, - update_mask: field_mask_pb2.FieldMask = None, + app_profile: Optional[instance.AppProfile] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates an app profile within an instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.UpdateAppProfile. app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): @@ -1762,17 +1796,19 @@ async def update_app_profile( async def delete_app_profile( self, - request: Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an app profile from an instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.DeleteAppProfile. name (:class:`str`): @@ -1830,11 +1866,11 @@ async def delete_app_profile( async def get_iam_policy( self, - request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance @@ -1842,7 +1878,7 @@ async def get_iam_policy( but does not have a policy set. Args: - request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): + request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): The request object. Request message for `GetIamPolicy` method. resource (:class:`str`): @@ -1981,18 +2017,18 @@ async def get_iam_policy( async def set_iam_policy( self, - request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. Args: - request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): + request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): The request object. Request message for `SetIamPolicy` method. resource (:class:`str`): @@ -2121,19 +2157,19 @@ async def set_iam_policy( async def test_iam_permissions( self, - request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, *, - resource: str = None, - permissions: Sequence[str] = None, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. Args: - request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): + request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): The request object. Request message for `TestIamPermissions` method. resource (:class:`str`): @@ -2145,7 +2181,7 @@ async def test_iam_permissions( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - permissions (:class:`Sequence[str]`): + permissions (:class:`MutableSequence[str]`): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM @@ -2221,18 +2257,20 @@ async def test_iam_permissions( async def list_hot_tablets( self, - request: Union[bigtable_instance_admin.ListHotTabletsRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListHotTabletsRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListHotTabletsAsyncPager: r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]): The request object. Request message for BigtableInstanceAdmin.ListHotTablets. parent (:class:`str`): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fc602bf423f9..23f4e55e0f68 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -16,7 +16,18 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) import pkg_resources from google.api_core import client_options as client_options_lib @@ -66,7 +77,7 @@ class BigtableInstanceAdminClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BigtableInstanceAdminTransport]: """Returns an appropriate transport class. @@ -456,8 +467,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableInstanceAdminTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BigtableInstanceAdminTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the bigtable instance admin client. @@ -471,7 +482,7 @@ def __init__( transport (Union[str, BigtableInstanceAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -501,6 +512,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -553,14 +565,16 @@ def __init__( def create_instance( self, - request: Union[bigtable_instance_admin.CreateInstanceRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.CreateInstanceRequest, dict] + ] = None, *, - parent: str = None, - instance_id: str = None, - instance: gba_instance.Instance = None, - clusters: Mapping[str, gba_instance.Cluster] = None, + parent: Optional[str] = None, + instance_id: Optional[str] = None, + instance: Optional[gba_instance.Instance] = None, + clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Create an instance within a project. @@ -598,7 +612,7 @@ def create_instance( This corresponds to the ``instance`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - clusters (Mapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): + clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than @@ -684,11 +698,13 @@ def create_instance( def get_instance( self, - request: Union[bigtable_instance_admin.GetInstanceRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.GetInstanceRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Gets information about an instance. @@ -764,11 +780,13 @@ def get_instance( def list_instances( self, - request: Union[bigtable_instance_admin.ListInstancesRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListInstancesRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. @@ -841,10 +859,10 @@ def list_instances( def update_instance( self, - request: Union[instance.Instance, dict] = None, + request: Optional[Union[instance.Instance, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method @@ -906,14 +924,14 @@ def update_instance( def partial_update_instance( self, - request: Union[ - bigtable_instance_admin.PartialUpdateInstanceRequest, dict + request: Optional[ + Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict] ] = None, *, - instance: gba_instance.Instance = None, - update_mask: field_mask_pb2.FieldMask = None, + instance: Optional[gba_instance.Instance] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Partially updates an instance within a project. This @@ -1014,11 +1032,13 @@ def partial_update_instance( def delete_instance( self, - request: Union[bigtable_instance_admin.DeleteInstanceRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.DeleteInstanceRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Delete an instance from a project. @@ -1082,13 +1102,15 @@ def delete_instance( def create_cluster( self, - request: Union[bigtable_instance_admin.CreateClusterRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.CreateClusterRequest, dict] + ] = None, *, - parent: str = None, - cluster_id: str = None, - cluster: instance.Cluster = None, + parent: Optional[str] = None, + cluster_id: Optional[str] = None, + cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a cluster within an instance. @@ -1199,11 +1221,13 @@ def create_cluster( def get_cluster( self, - request: Union[bigtable_instance_admin.GetClusterRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.GetClusterRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. @@ -1278,11 +1302,13 @@ def get_cluster( def list_clusters( self, - request: Union[bigtable_instance_admin.ListClustersRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListClustersRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. @@ -1357,10 +1383,10 @@ def list_clusters( def update_cluster( self, - request: Union[instance.Cluster, dict] = None, + request: Optional[Union[instance.Cluster, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates a cluster within an instance. @@ -1430,14 +1456,14 @@ def update_cluster( def partial_update_cluster( self, - request: Union[ - bigtable_instance_admin.PartialUpdateClusterRequest, dict + request: Optional[ + Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict] ] = None, *, - cluster: instance.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, + cluster: Optional[instance.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Partially updates a cluster within a project. This method is the @@ -1545,11 +1571,13 @@ def partial_update_cluster( def delete_cluster( self, - request: Union[bigtable_instance_admin.DeleteClusterRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.DeleteClusterRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a cluster from an instance. @@ -1613,13 +1641,15 @@ def delete_cluster( def create_app_profile( self, - request: Union[bigtable_instance_admin.CreateAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.CreateAppProfileRequest, dict] + ] = None, *, - parent: str = None, - app_profile_id: str = None, - app_profile: instance.AppProfile = None, + parent: Optional[str] = None, + app_profile_id: Optional[str] = None, + app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. @@ -1713,11 +1743,13 @@ def create_app_profile( def get_app_profile( self, - request: Union[bigtable_instance_admin.GetAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.GetAppProfileRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. @@ -1791,11 +1823,13 @@ def get_app_profile( def list_app_profiles( self, - request: Union[bigtable_instance_admin.ListAppProfilesRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListAppProfilesRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAppProfilesPager: r"""Lists information about app profiles in an instance. @@ -1883,12 +1917,14 @@ def list_app_profiles( def update_app_profile( self, - request: Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.UpdateAppProfileRequest, dict] + ] = None, *, - app_profile: instance.AppProfile = None, - update_mask: field_mask_pb2.FieldMask = None, + app_profile: Optional[instance.AppProfile] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates an app profile within an instance. @@ -1982,11 +2018,13 @@ def update_app_profile( def delete_app_profile( self, - request: Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.DeleteAppProfileRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an app profile from an instance. @@ -2050,11 +2088,11 @@ def delete_app_profile( def get_iam_policy( self, - request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance @@ -2188,11 +2226,11 @@ def get_iam_policy( def set_iam_policy( self, - request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance @@ -2325,12 +2363,12 @@ def set_iam_policy( def test_iam_permissions( self, - request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, *, - resource: str = None, - permissions: Sequence[str] = None, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the @@ -2349,7 +2387,7 @@ def test_iam_permissions( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - permissions (Sequence[str]): + permissions (MutableSequence[str]): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM @@ -2413,11 +2451,13 @@ def test_iam_permissions( def list_hot_tablets( self, - request: Union[bigtable_instance_admin.ListHotTabletsRequest, dict] = None, + request: Optional[ + Union[bigtable_instance_admin.ListHotTabletsRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListHotTabletsPager: r"""Lists hot tablets in a cluster, within the time range diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index a5e1c40d463f..124db9eef26f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -62,7 +62,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index fcf0cd94fbd2..f037f5a44bf5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -56,14 +56,14 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -191,8 +191,8 @@ def __init__( def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index efcb3ed8066e..82b03b0bbae8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -58,7 +58,7 @@ class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -101,15 +101,15 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 39761c3b610a..cc0a5bceebcd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -16,7 +16,17 @@ from collections import OrderedDict import functools import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) import pkg_resources from google.api_core.client_options import ClientOptions @@ -186,9 +196,9 @@ def transport(self) -> BigtableTableAdminTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the bigtable table admin client. @@ -232,13 +242,13 @@ def __init__( async def create_table( self, - request: Union[bigtable_table_admin.CreateTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, *, - parent: str = None, - table_id: str = None, - table: gba_table.Table = None, + parent: Optional[str] = None, + table_id: Optional[str] = None, + table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. @@ -246,7 +256,7 @@ async def create_table( column families, specified in the request. Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] parent (:class:`str`): @@ -333,15 +343,15 @@ async def create_table( async def create_table_from_snapshot( self, - request: Union[ - bigtable_table_admin.CreateTableFromSnapshotRequest, dict + request: Optional[ + Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict] ] = None, *, - parent: str = None, - table_id: str = None, - source_snapshot: str = None, + parent: Optional[str] = None, + table_id: Optional[str] = None, + source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new table from the specified snapshot. The @@ -355,7 +365,7 @@ async def create_table_from_snapshot( SLA or deprecation policy. Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -460,17 +470,17 @@ async def create_table_from_snapshot( async def list_tables( self, - request: Union[bigtable_table_admin.ListTablesRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTablesAsyncPager: r"""Lists all tables served from a specified instance. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] parent (:class:`str`): @@ -559,17 +569,17 @@ async def list_tables( async def get_table( self, - request: Union[bigtable_table_admin.GetTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] name (:class:`str`): @@ -648,18 +658,18 @@ async def get_table( async def update_table( self, - request: Union[bigtable_table_admin.UpdateTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, *, - table: gba_table.Table = None, - update_mask: field_mask_pb2.FieldMask = None, + table: Optional[gba_table.Table] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates a specified table. Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]]): The request object. The request for [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. table (:class:`google.cloud.bigtable_admin_v2.types.Table`): @@ -756,18 +766,18 @@ async def update_table( async def delete_table( self, - request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes a specified table and all of its data. Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] name (:class:`str`): @@ -825,18 +835,20 @@ async def delete_table( async def undelete_table( self, - request: Union[bigtable_table_admin.UndeleteTableRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.UndeleteTableRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Restores a specified table which was accidentally deleted. Args: - request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] name (:class:`str`): @@ -914,14 +926,18 @@ async def undelete_table( async def modify_column_families( self, - request: Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] + ] = None, *, - name: str = None, - modifications: Sequence[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + name: Optional[str] = None, + modifications: Optional[ + MutableSequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Performs a series of column family modifications on @@ -931,7 +947,7 @@ async def modify_column_families( table where only some modifications have taken effect. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] name (:class:`str`): @@ -942,7 +958,7 @@ async def modify_column_families( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - modifications (:class:`Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): + modifications (:class:`MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): Required. Modifications to be atomically applied to the specified table's families. Entries are applied in @@ -1014,10 +1030,10 @@ async def modify_column_families( async def drop_row_range( self, - request: Union[bigtable_table_admin.DropRowRangeRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified @@ -1026,7 +1042,7 @@ async def drop_row_range( prefix. Args: - request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -1062,13 +1078,13 @@ async def drop_row_range( async def generate_consistency_token( self, - request: Union[ - bigtable_table_admin.GenerateConsistencyTokenRequest, dict + request: Optional[ + Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can @@ -1078,7 +1094,7 @@ async def generate_consistency_token( days. Args: - request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] name (:class:`str`): @@ -1155,12 +1171,14 @@ async def generate_consistency_token( async def check_consistency( self, - request: Union[bigtable_table_admin.CheckConsistencyRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.CheckConsistencyRequest, dict] + ] = None, *, - name: str = None, - consistency_token: str = None, + name: Optional[str] = None, + consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency @@ -1169,7 +1187,7 @@ async def check_consistency( request. Args: - request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] name (:class:`str`): @@ -1255,14 +1273,16 @@ async def check_consistency( async def snapshot_table( self, - request: Union[bigtable_table_admin.SnapshotTableRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.SnapshotTableRequest, dict] + ] = None, *, - name: str = None, - cluster: str = None, - snapshot_id: str = None, - description: str = None, + name: Optional[str] = None, + cluster: Optional[str] = None, + snapshot_id: Optional[str] = None, + description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new snapshot in the specified cluster from @@ -1276,7 +1296,7 @@ async def snapshot_table( SLA or deprecation policy. Args: - request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] Note: This is a private alpha release of Cloud Bigtable @@ -1396,11 +1416,11 @@ async def snapshot_table( async def get_snapshot( self, - request: Union[bigtable_table_admin.GetSnapshotRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified @@ -1413,7 +1433,7 @@ async def get_snapshot( SLA or deprecation policy. Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -1506,11 +1526,13 @@ async def get_snapshot( async def list_snapshots( self, - request: Union[bigtable_table_admin.ListSnapshotsRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.ListSnapshotsRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListSnapshotsAsyncPager: r"""Lists all snapshots associated with the specified @@ -1523,7 +1545,7 @@ async def list_snapshots( SLA or deprecation policy. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] Note: This is a private alpha release of Cloud Bigtable @@ -1628,11 +1650,13 @@ async def list_snapshots( async def delete_snapshot( self, - request: Union[bigtable_table_admin.DeleteSnapshotRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.DeleteSnapshotRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -1644,7 +1668,7 @@ async def delete_snapshot( SLA or deprecation policy. Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] Note: This is a private alpha release of Cloud Bigtable @@ -1708,13 +1732,13 @@ async def delete_snapshot( async def create_backup( self, - request: Union[bigtable_table_admin.CreateBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, *, - parent: str = None, - backup_id: str = None, - backup: table.Backup = None, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup @@ -1728,7 +1752,7 @@ async def create_backup( delete the backup. Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]): The request object. The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. parent (:class:`str`): @@ -1828,18 +1852,18 @@ async def create_backup( async def get_backup( self, - request: Union[bigtable_table_admin.GetBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud Bigtable Backup. Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]): The request object. The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. name (:class:`str`): @@ -1913,18 +1937,18 @@ async def get_backup( async def update_backup( self, - request: Union[bigtable_table_admin.UpdateBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, *, - backup: table.Backup = None, - update_mask: field_mask_pb2.FieldMask = None, + backup: Optional[table.Backup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]): The request object. The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): @@ -2008,17 +2032,17 @@ async def update_backup( async def delete_backup( self, - request: Union[bigtable_table_admin.DeleteBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]): The request object. The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. name (:class:`str`): @@ -2076,18 +2100,18 @@ async def delete_backup( async def list_backups( self, - request: Union[bigtable_table_admin.ListBackupsRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBackupsAsyncPager: r"""Lists Cloud Bigtable backups. Returns both completed and pending backups. Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]): The request object. The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. parent (:class:`str`): @@ -2179,10 +2203,10 @@ async def list_backups( async def restore_table( self, - request: Union[bigtable_table_admin.RestoreTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Create a new table by restoring from a completed backup. The new @@ -2196,7 +2220,7 @@ async def restore_table( [Table][google.bigtable.admin.v2.Table], if successful. Args: - request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -2252,11 +2276,11 @@ async def restore_table( async def get_iam_policy( self, - request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup @@ -2264,7 +2288,7 @@ async def get_iam_policy( but does not have a policy set. Args: - request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): + request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): The request object. Request message for `GetIamPolicy` method. resource (:class:`str`): @@ -2403,18 +2427,18 @@ async def get_iam_policy( async def set_iam_policy( self, - request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup resource. Replaces any existing policy. Args: - request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): + request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): The request object. Request message for `SetIamPolicy` method. resource (:class:`str`): @@ -2543,19 +2567,19 @@ async def set_iam_policy( async def test_iam_permissions( self, - request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, *, - resource: str = None, - permissions: Sequence[str] = None, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified Table or Backup resource. Args: - request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): + request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): The request object. Request message for `TestIamPermissions` method. resource (:class:`str`): @@ -2567,7 +2591,7 @@ async def test_iam_permissions( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - permissions (:class:`Sequence[str]`): + permissions (:class:`MutableSequence[str]`): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 481e73d6502f..a77b9d0ee3f1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -16,7 +16,18 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) import pkg_resources from google.api_core import client_options as client_options_lib @@ -65,7 +76,7 @@ class BigtableTableAdminClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BigtableTableAdminTransport]: """Returns an appropriate transport class. @@ -458,8 +469,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTableAdminTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BigtableTableAdminTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the bigtable table admin client. @@ -473,7 +484,7 @@ def __init__( transport (Union[str, BigtableTableAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -503,6 +514,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -555,13 +567,13 @@ def __init__( def create_table( self, - request: Union[bigtable_table_admin.CreateTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, *, - parent: str = None, - table_id: str = None, - table: gba_table.Table = None, + parent: Optional[str] = None, + table_id: Optional[str] = None, + table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. @@ -656,15 +668,15 @@ def create_table( def create_table_from_snapshot( self, - request: Union[ - bigtable_table_admin.CreateTableFromSnapshotRequest, dict + request: Optional[ + Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict] ] = None, *, - parent: str = None, - table_id: str = None, - source_snapshot: str = None, + parent: Optional[str] = None, + table_id: Optional[str] = None, + source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a new table from the specified snapshot. The @@ -785,11 +797,11 @@ def create_table_from_snapshot( def list_tables( self, - request: Union[bigtable_table_admin.ListTablesRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTablesPager: r"""Lists all tables served from a specified instance. @@ -874,11 +886,11 @@ def list_tables( def get_table( self, - request: Union[bigtable_table_admin.GetTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. @@ -953,12 +965,12 @@ def get_table( def update_table( self, - request: Union[bigtable_table_admin.UpdateTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, *, - table: gba_table.Table = None, - update_mask: field_mask_pb2.FieldMask = None, + table: Optional[gba_table.Table] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates a specified table. @@ -1061,11 +1073,11 @@ def update_table( def delete_table( self, - request: Union[bigtable_table_admin.DeleteTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes a specified table and all of its @@ -1130,11 +1142,13 @@ def delete_table( def undelete_table( self, - request: Union[bigtable_table_admin.UndeleteTableRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.UndeleteTableRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Restores a specified table which was accidentally @@ -1219,14 +1233,18 @@ def undelete_table( def modify_column_families( self, - request: Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict] + ] = None, *, - name: str = None, - modifications: Sequence[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + name: Optional[str] = None, + modifications: Optional[ + MutableSequence[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification + ] ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Performs a series of column family modifications on @@ -1247,7 +1265,7 @@ def modify_column_families( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - modifications (Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): Required. Modifications to be atomically applied to the specified table's families. Entries are applied in @@ -1319,10 +1337,10 @@ def modify_column_families( def drop_row_range( self, - request: Union[bigtable_table_admin.DropRowRangeRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified @@ -1368,13 +1386,13 @@ def drop_row_range( def generate_consistency_token( self, - request: Union[ - bigtable_table_admin.GenerateConsistencyTokenRequest, dict + request: Optional[ + Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict] ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can @@ -1455,12 +1473,14 @@ def generate_consistency_token( def check_consistency( self, - request: Union[bigtable_table_admin.CheckConsistencyRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.CheckConsistencyRequest, dict] + ] = None, *, - name: str = None, - consistency_token: str = None, + name: Optional[str] = None, + consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency @@ -1545,14 +1565,16 @@ def check_consistency( def snapshot_table( self, - request: Union[bigtable_table_admin.SnapshotTableRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.SnapshotTableRequest, dict] + ] = None, *, - name: str = None, - cluster: str = None, - snapshot_id: str = None, - description: str = None, + name: Optional[str] = None, + cluster: Optional[str] = None, + snapshot_id: Optional[str] = None, + description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a new snapshot in the specified cluster from @@ -1686,11 +1708,11 @@ def snapshot_table( def get_snapshot( self, - request: Union[bigtable_table_admin.GetSnapshotRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified @@ -1786,11 +1808,13 @@ def get_snapshot( def list_snapshots( self, - request: Union[bigtable_table_admin.ListSnapshotsRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.ListSnapshotsRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListSnapshotsPager: r"""Lists all snapshots associated with the specified @@ -1898,11 +1922,13 @@ def list_snapshots( def delete_snapshot( self, - request: Union[bigtable_table_admin.DeleteSnapshotRequest, dict] = None, + request: Optional[ + Union[bigtable_table_admin.DeleteSnapshotRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -1978,13 +2004,13 @@ def delete_snapshot( def create_backup( self, - request: Union[bigtable_table_admin.CreateBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, *, - parent: str = None, - backup_id: str = None, - backup: table.Backup = None, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup @@ -2098,11 +2124,11 @@ def create_backup( def get_backup( self, - request: Union[bigtable_table_admin.GetBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud @@ -2173,12 +2199,12 @@ def get_backup( def update_backup( self, - request: Union[bigtable_table_admin.UpdateBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, *, - backup: table.Backup = None, - update_mask: field_mask_pb2.FieldMask = None, + backup: Optional[table.Backup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. @@ -2268,11 +2294,11 @@ def update_backup( def delete_backup( self, - request: Union[bigtable_table_admin.DeleteBackupRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. @@ -2336,11 +2362,11 @@ def delete_backup( def list_backups( self, - request: Union[bigtable_table_admin.ListBackupsRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBackupsPager: r"""Lists Cloud Bigtable backups. Returns both completed @@ -2429,10 +2455,10 @@ def list_backups( def restore_table( self, - request: Union[bigtable_table_admin.RestoreTableRequest, dict] = None, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Create a new table by restoring from a completed backup. The new @@ -2503,11 +2529,11 @@ def restore_table( def get_iam_policy( self, - request: Union[iam_policy_pb2.GetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup @@ -2641,11 +2667,11 @@ def get_iam_policy( def set_iam_policy( self, - request: Union[iam_policy_pb2.SetIamPolicyRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, *, - resource: str = None, + resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup @@ -2778,12 +2804,12 @@ def set_iam_policy( def test_iam_permissions( self, - request: Union[iam_policy_pb2.TestIamPermissionsRequest, dict] = None, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, *, - resource: str = None, - permissions: Sequence[str] = None, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the @@ -2802,7 +2828,7 @@ def test_iam_permissions( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - permissions (Sequence[str]): + permissions (MutableSequence[str]): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see `IAM diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index ebed352843d0..206a2e268203 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -62,7 +62,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index fc5fbcee5c44..f8cf9f83491a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -58,14 +58,14 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -193,8 +193,8 @@ def __init__( def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index f96770632592..54eb7e524d03 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -60,7 +60,7 @@ class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): def create_channel( cls, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -103,15 +103,15 @@ def __init__( self, *, host: str = "bigtableadmin.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 36ad5dc9cffe..f5a3f1124ac0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigtable_admin_v2.types import instance as gba_instance @@ -68,7 +70,7 @@ class CreateInstanceRequest(proto.Message): instance (google.cloud.bigtable_admin_v2.types.Instance): Required. The instance to create. Fields marked ``OutputOnly`` must be left blank. - clusters (Mapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): + clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): Required. The clusters to be created within the instance, mapped by desired cluster ID, e.g., just ``mycluster`` rather than @@ -77,20 +79,20 @@ class CreateInstanceRequest(proto.Message): at most four clusters can be specified. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - instance_id = proto.Field( + instance_id: str = proto.Field( proto.STRING, number=2, ) - instance = proto.Field( + instance: gba_instance.Instance = proto.Field( proto.MESSAGE, number=3, message=gba_instance.Instance, ) - clusters = proto.MapField( + clusters: MutableMapping[str, gba_instance.Cluster] = proto.MapField( proto.STRING, proto.MESSAGE, number=4, @@ -107,7 +109,7 @@ class GetInstanceRequest(proto.Message): are of the form ``projects/{project}/instances/{instance}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -125,11 +127,11 @@ class ListInstancesRequest(proto.Message): DEPRECATED: This field is unused and ignored. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=2, ) @@ -139,9 +141,9 @@ class ListInstancesResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListInstances. Attributes: - instances (Sequence[google.cloud.bigtable_admin_v2.types.Instance]): + instances (MutableSequence[google.cloud.bigtable_admin_v2.types.Instance]): The list of requested instances. - failed_locations (Sequence[str]): + failed_locations (MutableSequence[str]): Locations from which Instance information could not be retrieved, due to an outage or some other transient condition. Instances whose Clusters are all in one of the @@ -157,16 +159,16 @@ class ListInstancesResponse(proto.Message): def raw_page(self): return self - instances = proto.RepeatedField( + instances: MutableSequence[gba_instance.Instance] = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_instance.Instance, ) - failed_locations = proto.RepeatedField( + failed_locations: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=3, ) @@ -185,12 +187,12 @@ class PartialUpdateInstanceRequest(proto.Message): should be replaced. Must be explicitly set. """ - instance = proto.Field( + instance: gba_instance.Instance = proto.Field( proto.MESSAGE, number=1, message=gba_instance.Instance, ) - update_mask = proto.Field( + update_mask: field_mask_pb2.FieldMask = proto.Field( proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, @@ -207,7 +209,7 @@ class DeleteInstanceRequest(proto.Message): ``projects/{project}/instances/{instance}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -231,15 +233,15 @@ class CreateClusterRequest(proto.Message): ``OutputOnly`` must be left blank. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - cluster_id = proto.Field( + cluster_id: str = proto.Field( proto.STRING, number=2, ) - cluster = proto.Field( + cluster: gba_instance.Cluster = proto.Field( proto.MESSAGE, number=3, message=gba_instance.Cluster, @@ -256,7 +258,7 @@ class GetClusterRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -276,11 +278,11 @@ class ListClustersRequest(proto.Message): DEPRECATED: This field is unused and ignored. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=2, ) @@ -290,9 +292,9 @@ class ListClustersResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListClusters. Attributes: - clusters (Sequence[google.cloud.bigtable_admin_v2.types.Cluster]): + clusters (MutableSequence[google.cloud.bigtable_admin_v2.types.Cluster]): The list of requested clusters. - failed_locations (Sequence[str]): + failed_locations (MutableSequence[str]): Locations from which Cluster information could not be retrieved, due to an outage or some other transient condition. Clusters from these locations may be missing from @@ -307,16 +309,16 @@ class ListClustersResponse(proto.Message): def raw_page(self): return self - clusters = proto.RepeatedField( + clusters: MutableSequence[gba_instance.Cluster] = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_instance.Cluster, ) - failed_locations = proto.RepeatedField( + failed_locations: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=3, ) @@ -332,7 +334,7 @@ class DeleteClusterRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -353,17 +355,17 @@ class CreateInstanceMetadata(proto.Message): completed successfully. """ - original_request = proto.Field( + original_request: "CreateInstanceRequest" = proto.Field( proto.MESSAGE, number=1, message="CreateInstanceRequest", ) - request_time = proto.Field( + request_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field( + finish_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, @@ -385,17 +387,17 @@ class UpdateInstanceMetadata(proto.Message): completed successfully. """ - original_request = proto.Field( + original_request: "PartialUpdateInstanceRequest" = proto.Field( proto.MESSAGE, number=1, message="PartialUpdateInstanceRequest", ) - request_time = proto.Field( + request_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field( + finish_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, @@ -415,7 +417,7 @@ class CreateClusterMetadata(proto.Message): finish_time (google.protobuf.timestamp_pb2.Timestamp): The time at which the operation failed or was completed successfully. - tables (Mapping[str, google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress]): + tables (MutableMapping[str, google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress]): Keys: the full ``name`` of each table that existed in the instance when CreateCluster was first called, i.e. ``projects//instances//tables/
``. @@ -449,36 +451,36 @@ class State(proto.Enum): COMPLETED = 3 CANCELLED = 4 - estimated_size_bytes = proto.Field( + estimated_size_bytes: int = proto.Field( proto.INT64, number=2, ) - estimated_copied_bytes = proto.Field( + estimated_copied_bytes: int = proto.Field( proto.INT64, number=3, ) - state = proto.Field( + state: "CreateClusterMetadata.TableProgress.State" = proto.Field( proto.ENUM, number=4, enum="CreateClusterMetadata.TableProgress.State", ) - original_request = proto.Field( + original_request: "CreateClusterRequest" = proto.Field( proto.MESSAGE, number=1, message="CreateClusterRequest", ) - request_time = proto.Field( + request_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field( + finish_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - tables = proto.MapField( + tables: MutableMapping[str, TableProgress] = proto.MapField( proto.STRING, proto.MESSAGE, number=4, @@ -501,17 +503,17 @@ class UpdateClusterMetadata(proto.Message): completed successfully. """ - original_request = proto.Field( + original_request: gba_instance.Cluster = proto.Field( proto.MESSAGE, number=1, message=gba_instance.Cluster, ) - request_time = proto.Field( + request_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field( + finish_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, @@ -534,17 +536,17 @@ class PartialUpdateClusterMetadata(proto.Message): PartialUpdateCluster. """ - request_time = proto.Field( + request_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field( + finish_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - original_request = proto.Field( + original_request: "PartialUpdateClusterRequest" = proto.Field( proto.MESSAGE, number=3, message="PartialUpdateClusterRequest", @@ -564,12 +566,12 @@ class PartialUpdateClusterRequest(proto.Message): should be replaced. """ - cluster = proto.Field( + cluster: gba_instance.Cluster = proto.Field( proto.MESSAGE, number=1, message=gba_instance.Cluster, ) - update_mask = proto.Field( + update_mask: field_mask_pb2.FieldMask = proto.Field( proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, @@ -597,20 +599,20 @@ class CreateAppProfileRequest(proto.Message): the app profile. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=2, ) - app_profile = proto.Field( + app_profile: gba_instance.AppProfile = proto.Field( proto.MESSAGE, number=3, message=gba_instance.AppProfile, ) - ignore_warnings = proto.Field( + ignore_warnings: bool = proto.Field( proto.BOOL, number=4, ) @@ -626,7 +628,7 @@ class GetAppProfileRequest(proto.Message): ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -659,15 +661,15 @@ class ListAppProfilesRequest(proto.Message): call. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=3, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=2, ) @@ -677,13 +679,13 @@ class ListAppProfilesResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListAppProfiles. Attributes: - app_profiles (Sequence[google.cloud.bigtable_admin_v2.types.AppProfile]): + app_profiles (MutableSequence[google.cloud.bigtable_admin_v2.types.AppProfile]): The list of requested app profiles. next_page_token (str): Set if not all app profiles could be returned in a single response. Pass this value to ``page_token`` in another request to get the next page of results. - failed_locations (Sequence[str]): + failed_locations (MutableSequence[str]): Locations from which AppProfile information could not be retrieved, due to an outage or some other transient condition. AppProfiles from these locations may be missing @@ -695,16 +697,16 @@ class ListAppProfilesResponse(proto.Message): def raw_page(self): return self - app_profiles = proto.RepeatedField( + app_profiles: MutableSequence[gba_instance.AppProfile] = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_instance.AppProfile, ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) - failed_locations = proto.RepeatedField( + failed_locations: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) @@ -726,17 +728,17 @@ class UpdateAppProfileRequest(proto.Message): the app profile. """ - app_profile = proto.Field( + app_profile: gba_instance.AppProfile = proto.Field( proto.MESSAGE, number=1, message=gba_instance.AppProfile, ) - update_mask = proto.Field( + update_mask: field_mask_pb2.FieldMask = proto.Field( proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, ) - ignore_warnings = proto.Field( + ignore_warnings: bool = proto.Field( proto.BOOL, number=3, ) @@ -755,11 +757,11 @@ class DeleteAppProfileRequest(proto.Message): deleting the app profile. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - ignore_warnings = proto.Field( + ignore_warnings: bool = proto.Field( proto.BOOL, number=2, ) @@ -807,25 +809,25 @@ class ListHotTabletsRequest(proto.Message): call. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=4, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=5, ) @@ -835,7 +837,7 @@ class ListHotTabletsResponse(proto.Message): r"""Response message for BigtableInstanceAdmin.ListHotTablets. Attributes: - hot_tablets (Sequence[google.cloud.bigtable_admin_v2.types.HotTablet]): + hot_tablets (MutableSequence[google.cloud.bigtable_admin_v2.types.HotTablet]): List of hot tablets in the tables of the requested cluster that fall within the requested time range. Hot tablets are ordered by node cpu @@ -854,12 +856,12 @@ class ListHotTabletsResponse(proto.Message): def raw_page(self): return self - hot_tablets = proto.RepeatedField( + hot_tablets: MutableSequence[gba_instance.HotTablet] = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_instance.HotTablet, ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index cbc3a1d8c354..52d2db32c858 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigtable_admin_v2.types import common @@ -88,15 +90,15 @@ class RestoreTableRequest(proto.Message): This field is a member of `oneof`_ ``source``. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - table_id = proto.Field( + table_id: str = proto.Field( proto.STRING, number=2, ) - backup = proto.Field( + backup: str = proto.Field( proto.STRING, number=3, oneof="source", @@ -136,26 +138,26 @@ class RestoreTableMetadata(proto.Message): operation. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - source_type = proto.Field( + source_type: gba_table.RestoreSourceType = proto.Field( proto.ENUM, number=2, enum=gba_table.RestoreSourceType, ) - backup_info = proto.Field( + backup_info: gba_table.BackupInfo = proto.Field( proto.MESSAGE, number=3, oneof="source_info", message=gba_table.BackupInfo, ) - optimize_table_operation_name = proto.Field( + optimize_table_operation_name: str = proto.Field( proto.STRING, number=4, ) - progress = proto.Field( + progress: common.OperationProgress = proto.Field( proto.MESSAGE, number=5, message=common.OperationProgress, @@ -177,11 +179,11 @@ class OptimizeRestoredTableMetadata(proto.Message): optimizations. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - progress = proto.Field( + progress: common.OperationProgress = proto.Field( proto.MESSAGE, number=2, message=common.OperationProgress, @@ -203,7 +205,7 @@ class CreateTableRequest(proto.Message): ``{parent}/tables/foobar``. Maximum 50 characters. table (google.cloud.bigtable_admin_v2.types.Table): Required. The Table to create. - initial_splits (Sequence[google.cloud.bigtable_admin_v2.types.CreateTableRequest.Split]): + initial_splits (MutableSequence[google.cloud.bigtable_admin_v2.types.CreateTableRequest.Split]): The optional list of row keys that will be used to initially split the table into several tablets (tablets are similar to HBase regions). Given two split keys, ``s1`` and ``s2``, @@ -236,25 +238,25 @@ class Split(proto.Message): Row key to use as an initial tablet boundary. """ - key = proto.Field( + key: bytes = proto.Field( proto.BYTES, number=1, ) - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - table_id = proto.Field( + table_id: str = proto.Field( proto.STRING, number=2, ) - table = proto.Field( + table: gba_table.Table = proto.Field( proto.MESSAGE, number=3, message=gba_table.Table, ) - initial_splits = proto.RepeatedField( + initial_splits: MutableSequence[Split] = proto.RepeatedField( proto.MESSAGE, number=4, message=Split, @@ -287,15 +289,15 @@ class CreateTableFromSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - table_id = proto.Field( + table_id: str = proto.Field( proto.STRING, number=2, ) - source_snapshot = proto.Field( + source_snapshot: str = proto.Field( proto.STRING, number=3, ) @@ -329,16 +331,16 @@ class DropRowRangeRequest(proto.Message): This field is a member of `oneof`_ ``target``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - row_key_prefix = proto.Field( + row_key_prefix: bytes = proto.Field( proto.BYTES, number=2, oneof="target", ) - delete_all_data_from_table = proto.Field( + delete_all_data_from_table: bool = proto.Field( proto.BOOL, number=3, oneof="target", @@ -374,20 +376,20 @@ class ListTablesRequest(proto.Message): call. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - view = proto.Field( + view: gba_table.Table.View = proto.Field( proto.ENUM, number=2, enum=gba_table.Table.View, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=4, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=3, ) @@ -398,7 +400,7 @@ class ListTablesResponse(proto.Message): [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] Attributes: - tables (Sequence[google.cloud.bigtable_admin_v2.types.Table]): + tables (MutableSequence[google.cloud.bigtable_admin_v2.types.Table]): The tables present in the requested instance. next_page_token (str): Set if not all tables could be returned in a single @@ -410,12 +412,12 @@ class ListTablesResponse(proto.Message): def raw_page(self): return self - tables = proto.RepeatedField( + tables: MutableSequence[gba_table.Table] = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_table.Table, ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) @@ -435,11 +437,11 @@ class GetTableRequest(proto.Message): Defaults to ``SCHEMA_VIEW`` if unspecified. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - view = proto.Field( + view: gba_table.Table.View = proto.Field( proto.ENUM, number=2, enum=gba_table.Table.View, @@ -466,12 +468,12 @@ class UpdateTableRequest(proto.Message): ``update_mask``, it will return an UNIMPLEMENTED error. """ - table = proto.Field( + table: gba_table.Table = proto.Field( proto.MESSAGE, number=1, message=gba_table.Table, ) - update_mask = proto.Field( + update_mask: field_mask_pb2.FieldMask = proto.Field( proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, @@ -492,16 +494,16 @@ class UpdateTableMetadata(proto.Message): finished or was canceled. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, @@ -519,7 +521,7 @@ class DeleteTableRequest(proto.Message): ``projects/{project}/instances/{instance}/tables/{table}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -536,7 +538,7 @@ class UndeleteTableRequest(proto.Message): ``projects/{project}/instances/{instance}/tables/{table}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -556,16 +558,16 @@ class UndeleteTableMetadata(proto.Message): finished or was cancelled. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, @@ -581,7 +583,7 @@ class ModifyColumnFamiliesRequest(proto.Message): Required. The unique name of the table whose families should be modified. Values are of the form ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (Sequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): Required. Modifications to be atomically applied to the specified table's families. Entries are applied in order, meaning that @@ -622,33 +624,33 @@ class Modification(proto.Message): This field is a member of `oneof`_ ``mod``. """ - id = proto.Field( + id: str = proto.Field( proto.STRING, number=1, ) - create = proto.Field( + create: gba_table.ColumnFamily = proto.Field( proto.MESSAGE, number=2, oneof="mod", message=gba_table.ColumnFamily, ) - update = proto.Field( + update: gba_table.ColumnFamily = proto.Field( proto.MESSAGE, number=3, oneof="mod", message=gba_table.ColumnFamily, ) - drop = proto.Field( + drop: bool = proto.Field( proto.BOOL, number=4, oneof="mod", ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - modifications = proto.RepeatedField( + modifications: MutableSequence[Modification] = proto.RepeatedField( proto.MESSAGE, number=2, message=Modification, @@ -666,7 +668,7 @@ class GenerateConsistencyTokenRequest(proto.Message): ``projects/{project}/instances/{instance}/tables/{table}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -681,7 +683,7 @@ class GenerateConsistencyTokenResponse(proto.Message): The generated consistency token. """ - consistency_token = proto.Field( + consistency_token: str = proto.Field( proto.STRING, number=1, ) @@ -701,11 +703,11 @@ class CheckConsistencyRequest(proto.Message): GenerateConsistencyToken for the Table. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - consistency_token = proto.Field( + consistency_token: str = proto.Field( proto.STRING, number=2, ) @@ -722,7 +724,7 @@ class CheckConsistencyResponse(proto.Message): the restrictions specified in the request. """ - consistent = proto.Field( + consistent: bool = proto.Field( proto.BOOL, number=1, ) @@ -763,24 +765,24 @@ class SnapshotTableRequest(proto.Message): Description of the snapshot. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - cluster = proto.Field( + cluster: str = proto.Field( proto.STRING, number=2, ) - snapshot_id = proto.Field( + snapshot_id: str = proto.Field( proto.STRING, number=3, ) - ttl = proto.Field( + ttl: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=4, message=duration_pb2.Duration, ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=5, ) @@ -803,7 +805,7 @@ class GetSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -835,15 +837,15 @@ class ListSnapshotsRequest(proto.Message): call. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=2, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=3, ) @@ -860,7 +862,7 @@ class ListSnapshotsResponse(proto.Message): any SLA or deprecation policy. Attributes: - snapshots (Sequence[google.cloud.bigtable_admin_v2.types.Snapshot]): + snapshots (MutableSequence[google.cloud.bigtable_admin_v2.types.Snapshot]): The snapshots present in the requested cluster. next_page_token (str): @@ -873,12 +875,12 @@ class ListSnapshotsResponse(proto.Message): def raw_page(self): return self - snapshots = proto.RepeatedField( + snapshots: MutableSequence[gba_table.Snapshot] = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_table.Snapshot, ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) @@ -901,7 +903,7 @@ class DeleteSnapshotRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -927,17 +929,17 @@ class SnapshotTableMetadata(proto.Message): completed successfully. """ - original_request = proto.Field( + original_request: "SnapshotTableRequest" = proto.Field( proto.MESSAGE, number=1, message="SnapshotTableRequest", ) - request_time = proto.Field( + request_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field( + finish_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, @@ -965,17 +967,17 @@ class CreateTableFromSnapshotMetadata(proto.Message): completed successfully. """ - original_request = proto.Field( + original_request: "CreateTableFromSnapshotRequest" = proto.Field( proto.MESSAGE, number=1, message="CreateTableFromSnapshotRequest", ) - request_time = proto.Field( + request_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - finish_time = proto.Field( + finish_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, @@ -1004,15 +1006,15 @@ class CreateBackupRequest(proto.Message): Required. The backup to create. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - backup_id = proto.Field( + backup_id: str = proto.Field( proto.STRING, number=2, ) - backup = proto.Field( + backup: gba_table.Backup = proto.Field( proto.MESSAGE, number=3, message=gba_table.Backup, @@ -1036,20 +1038,20 @@ class CreateBackupMetadata(proto.Message): finished or was cancelled. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - source_table = proto.Field( + source_table: str = proto.Field( proto.STRING, number=2, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, @@ -1077,12 +1079,12 @@ class UpdateBackupRequest(proto.Message): accidentally by clients that do not know about them. """ - backup = proto.Field( + backup: gba_table.Backup = proto.Field( proto.MESSAGE, number=1, message=gba_table.Backup, ) - update_mask = proto.Field( + update_mask: field_mask_pb2.FieldMask = proto.Field( proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, @@ -1099,7 +1101,7 @@ class GetBackupRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -1116,7 +1118,7 @@ class DeleteBackupRequest(proto.Message): ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -1208,23 +1210,23 @@ class ListBackupsRequest(proto.Message): to the same ``parent`` and with the same ``filter``. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - filter = proto.Field( + filter: str = proto.Field( proto.STRING, number=2, ) - order_by = proto.Field( + order_by: str = proto.Field( proto.STRING, number=3, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=4, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=5, ) @@ -1235,7 +1237,7 @@ class ListBackupsResponse(proto.Message): [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. Attributes: - backups (Sequence[google.cloud.bigtable_admin_v2.types.Backup]): + backups (MutableSequence[google.cloud.bigtable_admin_v2.types.Backup]): The list of matching backups. next_page_token (str): ``next_page_token`` can be sent in a subsequent @@ -1247,12 +1249,12 @@ class ListBackupsResponse(proto.Message): def raw_page(self): return self - backups = proto.RepeatedField( + backups: MutableSequence[gba_table.Backup] = proto.RepeatedField( proto.MESSAGE, number=1, message=gba_table.Backup, ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 704a07732339..533f9145db45 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -49,16 +51,16 @@ class OperationProgress(proto.Message): failed or was completed successfully. """ - progress_percent = proto.Field( + progress_percent: int = proto.Field( proto.INT32, number=1, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 12422930ef5b..a734fa91d901 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigtable_admin_v2.types import common @@ -51,7 +53,7 @@ class Instance(proto.Message): (``OutputOnly``) The current state of the instance. type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): The type of the instance. Defaults to ``PRODUCTION``. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a customer's organizational needs and deployment strategies. @@ -89,35 +91,35 @@ class Type(proto.Enum): PRODUCTION = 1 DEVELOPMENT = 2 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - display_name = proto.Field( + display_name: str = proto.Field( proto.STRING, number=2, ) - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=3, enum=State, ) - type_ = proto.Field( + type_: Type = proto.Field( proto.ENUM, number=4, enum=Type, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=5, ) - create_time = proto.Field( + create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp, ) - satisfies_pzs = proto.Field( + satisfies_pzs: bool = proto.Field( proto.BOOL, number=8, optional=True, @@ -144,11 +146,11 @@ class AutoscalingTargets(proto.Message): SSD, 8192 for HDD. """ - cpu_utilization_percent = proto.Field( + cpu_utilization_percent: int = proto.Field( proto.INT32, number=2, ) - storage_utilization_gib_per_node = proto.Field( + storage_utilization_gib_per_node: int = proto.Field( proto.INT32, number=3, ) @@ -167,11 +169,11 @@ class AutoscalingLimits(proto.Message): to. """ - min_serve_nodes = proto.Field( + min_serve_nodes: int = proto.Field( proto.INT32, number=1, ) - max_serve_nodes = proto.Field( + max_serve_nodes: int = proto.Field( proto.INT32, number=2, ) @@ -235,12 +237,12 @@ class ClusterAutoscalingConfig(proto.Message): cluster. """ - autoscaling_limits = proto.Field( + autoscaling_limits: "AutoscalingLimits" = proto.Field( proto.MESSAGE, number=1, message="AutoscalingLimits", ) - autoscaling_targets = proto.Field( + autoscaling_targets: "AutoscalingTargets" = proto.Field( proto.MESSAGE, number=2, message="AutoscalingTargets", @@ -254,7 +256,7 @@ class ClusterConfig(proto.Message): Autoscaling configuration for this cluster. """ - cluster_autoscaling_config = proto.Field( + cluster_autoscaling_config: "Cluster.ClusterAutoscalingConfig" = proto.Field( proto.MESSAGE, number=1, message="Cluster.ClusterAutoscalingConfig", @@ -281,40 +283,40 @@ class EncryptionConfig(proto.Message): ``projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`` """ - kms_key_name = proto.Field( + kms_key_name: str = proto.Field( proto.STRING, number=1, ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - location = proto.Field( + location: str = proto.Field( proto.STRING, number=2, ) - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=3, enum=State, ) - serve_nodes = proto.Field( + serve_nodes: int = proto.Field( proto.INT32, number=4, ) - cluster_config = proto.Field( + cluster_config: ClusterConfig = proto.Field( proto.MESSAGE, number=7, oneof="config", message=ClusterConfig, ) - default_storage_type = proto.Field( + default_storage_type: common.StorageType = proto.Field( proto.ENUM, number=5, enum=common.StorageType, ) - encryption_config = proto.Field( + encryption_config: EncryptionConfig = proto.Field( proto.MESSAGE, number=6, message=EncryptionConfig, @@ -368,14 +370,14 @@ class MultiClusterRoutingUseAny(proto.Message): sacrifices read-your-writes consistency to improve availability. Attributes: - cluster_ids (Sequence[str]): + cluster_ids (MutableSequence[str]): The set of clusters to route to. The order is ignored; clusters will be tried in order of distance. If left empty, all clusters are eligible. """ - cluster_ids = proto.RepeatedField( + cluster_ids: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) @@ -396,34 +398,34 @@ class SingleClusterRouting(proto.Message): table/row/column in multiple clusters. """ - cluster_id = proto.Field( + cluster_id: str = proto.Field( proto.STRING, number=1, ) - allow_transactional_writes = proto.Field( + allow_transactional_writes: bool = proto.Field( proto.BOOL, number=2, ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - etag = proto.Field( + etag: str = proto.Field( proto.STRING, number=2, ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=3, ) - multi_cluster_routing_use_any = proto.Field( + multi_cluster_routing_use_any: MultiClusterRoutingUseAny = proto.Field( proto.MESSAGE, number=5, oneof="routing_policy", message=MultiClusterRoutingUseAny, ) - single_cluster_routing = proto.Field( + single_cluster_routing: SingleClusterRouting = proto.Field( proto.MESSAGE, number=6, oneof="routing_policy", @@ -464,33 +466,33 @@ class HotTablet(proto.Message): (the node spent all cycles serving the hot tablet). """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - table_name = proto.Field( + table_name: str = proto.Field( proto.STRING, number=2, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) - start_key = proto.Field( + start_key: str = proto.Field( proto.STRING, number=5, ) - end_key = proto.Field( + end_key: str = proto.Field( proto.STRING, number=6, ) - node_cpu_usage_percent = proto.Field( + node_cpu_usage_percent: float = proto.Field( proto.FLOAT, number=7, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index ef268548306b..1c7854d256d2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import duration_pb2 # type: ignore @@ -57,12 +59,12 @@ class RestoreInfo(proto.Message): This field is a member of `oneof`_ ``source_info``. """ - source_type = proto.Field( + source_type: "RestoreSourceType" = proto.Field( proto.ENUM, number=1, enum="RestoreSourceType", ) - backup_info = proto.Field( + backup_info: "BackupInfo" = proto.Field( proto.MESSAGE, number=2, oneof="source_info", @@ -81,14 +83,14 @@ class Table(proto.Message): ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, ``FULL`` - cluster_states (Mapping[str, google.cloud.bigtable_admin_v2.types.Table.ClusterState]): + cluster_states (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Table.ClusterState]): Output only. Map from cluster ID to per-cluster table state. If it could not be determined whether or not the table has data in a particular cluster (for example, if its zone is unavailable), then there will be an entry for the cluster with UNKNOWN ``replication_status``. Views: ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` - column_families (Mapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): + column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): The column families configured for this table, mapped by column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): @@ -136,7 +138,7 @@ class ClusterState(proto.Message): replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): Output only. The state of replication for the table in this cluster. - encryption_info (Sequence[google.cloud.bigtable_admin_v2.types.EncryptionInfo]): + encryption_info (MutableSequence[google.cloud.bigtable_admin_v2.types.EncryptionInfo]): Output only. The encryption information for the table in this cluster. If the encryption key protecting this resource is customer managed, @@ -156,44 +158,44 @@ class ReplicationState(proto.Enum): READY = 4 READY_OPTIMIZING = 5 - replication_state = proto.Field( + replication_state: "Table.ClusterState.ReplicationState" = proto.Field( proto.ENUM, number=1, enum="Table.ClusterState.ReplicationState", ) - encryption_info = proto.RepeatedField( + encryption_info: MutableSequence["EncryptionInfo"] = proto.RepeatedField( proto.MESSAGE, number=2, message="EncryptionInfo", ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - cluster_states = proto.MapField( + cluster_states: MutableMapping[str, ClusterState] = proto.MapField( proto.STRING, proto.MESSAGE, number=2, message=ClusterState, ) - column_families = proto.MapField( + column_families: MutableMapping[str, "ColumnFamily"] = proto.MapField( proto.STRING, proto.MESSAGE, number=3, message="ColumnFamily", ) - granularity = proto.Field( + granularity: TimestampGranularity = proto.Field( proto.ENUM, number=4, enum=TimestampGranularity, ) - restore_info = proto.Field( + restore_info: "RestoreInfo" = proto.Field( proto.MESSAGE, number=6, message="RestoreInfo", ) - deletion_protection = proto.Field( + deletion_protection: bool = proto.Field( proto.BOOL, number=9, ) @@ -213,7 +215,7 @@ class ColumnFamily(proto.Message): matches the active GC expression for its family. """ - gc_rule = proto.Field( + gc_rule: "GcRule" = proto.Field( proto.MESSAGE, number=1, message="GcRule", @@ -260,12 +262,12 @@ class Intersection(proto.Message): r"""A GcRule which deletes cells matching all of the given rules. Attributes: - rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): + rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): Only delete cells which would be deleted by every element of ``rules``. """ - rules = proto.RepeatedField( + rules: MutableSequence["GcRule"] = proto.RepeatedField( proto.MESSAGE, number=1, message="GcRule", @@ -275,35 +277,35 @@ class Union(proto.Message): r"""A GcRule which deletes cells matching any of the given rules. Attributes: - rules (Sequence[google.cloud.bigtable_admin_v2.types.GcRule]): + rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): Delete cells which would be deleted by any element of ``rules``. """ - rules = proto.RepeatedField( + rules: MutableSequence["GcRule"] = proto.RepeatedField( proto.MESSAGE, number=1, message="GcRule", ) - max_num_versions = proto.Field( + max_num_versions: int = proto.Field( proto.INT32, number=1, oneof="rule", ) - max_age = proto.Field( + max_age: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=2, oneof="rule", message=duration_pb2.Duration, ) - intersection = proto.Field( + intersection: Intersection = proto.Field( proto.MESSAGE, number=3, oneof="rule", message=Intersection, ) - union = proto.Field( + union: Union = proto.Field( proto.MESSAGE, number=4, oneof="rule", @@ -338,17 +340,17 @@ class EncryptionType(proto.Enum): GOOGLE_DEFAULT_ENCRYPTION = 1 CUSTOMER_MANAGED_ENCRYPTION = 2 - encryption_type = proto.Field( + encryption_type: EncryptionType = proto.Field( proto.ENUM, number=3, enum=EncryptionType, ) - encryption_status = proto.Field( + encryption_status: status_pb2.Status = proto.Field( proto.MESSAGE, number=4, message=status_pb2.Status, ) - kms_key_version = proto.Field( + kms_key_version: str = proto.Field( proto.STRING, number=2, ) @@ -400,35 +402,35 @@ class State(proto.Enum): READY = 1 CREATING = 2 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - source_table = proto.Field( + source_table: "Table" = proto.Field( proto.MESSAGE, number=2, message="Table", ) - data_size_bytes = proto.Field( + data_size_bytes: int = proto.Field( proto.INT64, number=3, ) - create_time = proto.Field( + create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) - delete_time = proto.Field( + delete_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp, ) - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=6, enum=State, ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=7, ) @@ -484,39 +486,39 @@ class State(proto.Enum): CREATING = 1 READY = 2 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - source_table = proto.Field( + source_table: str = proto.Field( proto.STRING, number=2, ) - expire_time = proto.Field( + expire_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp, ) - size_bytes = proto.Field( + size_bytes: int = proto.Field( proto.INT64, number=6, ) - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=7, enum=State, ) - encryption_info = proto.Field( + encryption_info: "EncryptionInfo" = proto.Field( proto.MESSAGE, number=9, message="EncryptionInfo", @@ -542,21 +544,21 @@ class BackupInfo(proto.Message): created from. """ - backup = proto.Field( + backup: str = proto.Field( proto.STRING, number=1, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - source_table = proto.Field( + source_table: str = proto.Field( proto.STRING, number=4, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 6a880dfa42c9..86e5e138ea5a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.bigtable import gapic_version as package_version + +__version__ = package_version.__version__ + from .services.bigtable import BigtableClient from .services.bigtable import BigtableAsyncClient @@ -43,16 +47,14 @@ from .types.data import RowSet from .types.data import TimestampRange from .types.data import ValueRange -from .types.request_stats import AllReadStats -from .types.request_stats import ReadEfficiencyStats -from .types.request_stats import ReadIteratorStats +from .types.request_stats import FullReadStatsView +from .types.request_stats import ReadIterationStats from .types.request_stats import RequestLatencyStats from .types.request_stats import RequestStats from .types.response_params import ResponseParams __all__ = ( "BigtableAsyncClient", - "AllReadStats", "BigtableClient", "Cell", "CheckAndMutateRowRequest", @@ -60,6 +62,7 @@ "Column", "ColumnRange", "Family", + "FullReadStatsView", "MutateRowRequest", "MutateRowResponse", "MutateRowsRequest", @@ -67,8 +70,7 @@ "Mutation", "PingAndWarmRequest", "PingAndWarmResponse", - "ReadEfficiencyStats", - "ReadIteratorStats", + "ReadIterationStats", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", "ReadModifyWriteRule", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index a5becca8df2e..8ab2f1348560 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -19,6 +19,8 @@ from typing import ( Dict, Mapping, + MutableMapping, + MutableSequence, Optional, AsyncIterable, Awaitable, @@ -165,9 +167,9 @@ def transport(self) -> BigtableTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigtableTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the bigtable client. @@ -211,12 +213,12 @@ def __init__( def read_rows( self, - request: Union[bigtable.ReadRowsRequest, dict] = None, + request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, *, - table_name: str = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: r"""Streams back the contents of all requested rows in @@ -227,7 +229,7 @@ def read_rows( ReadRowsResponse documentation for details. Args: - request (Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]): + request (Optional[Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]]): The request object. Request message for Bigtable.ReadRows. table_name (:class:`str`): @@ -305,12 +307,12 @@ def read_rows( def sample_row_keys( self, - request: Union[bigtable.SampleRowKeysRequest, dict] = None, + request: Optional[Union[bigtable.SampleRowKeysRequest, dict]] = None, *, - table_name: str = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: r"""Returns a sample of row keys in the table. The @@ -320,7 +322,7 @@ def sample_row_keys( mapreduces. Args: - request (Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]): + request (Optional[Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]]): The request object. Request message for Bigtable.SampleRowKeys. table_name (:class:`str`): @@ -400,21 +402,21 @@ def sample_row_keys( async def mutate_row( self, - request: Union[bigtable.MutateRowRequest, dict] = None, + request: Optional[Union[bigtable.MutateRowRequest, dict]] = None, *, - table_name: str = None, - row_key: bytes = None, - mutations: Sequence[data.Mutation] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. Args: - request (Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]): + request (Optional[Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]]): The request object. Request message for Bigtable.MutateRow. table_name (:class:`str`): @@ -432,7 +434,7 @@ async def mutate_row( This corresponds to the ``row_key`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): Required. Changes to be atomically applied to the specified row. Entries are applied in order, meaning that @@ -526,13 +528,13 @@ async def mutate_row( def mutate_rows( self, - request: Union[bigtable.MutateRowsRequest, dict] = None, + request: Optional[Union[bigtable.MutateRowsRequest, dict]] = None, *, - table_name: str = None, - entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: r"""Mutates multiple rows in a batch. Each individual row @@ -540,7 +542,7 @@ def mutate_rows( batch is not executed atomically. Args: - request (Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]): + request (Optional[Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]]): The request object. Request message for BigtableService.MutateRows. table_name (:class:`str`): @@ -551,7 +553,7 @@ def mutate_rows( This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - entries (:class:`Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]`): + entries (:class:`MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]`): Required. The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic @@ -636,23 +638,23 @@ def mutate_rows( async def check_and_mutate_row( self, - request: Union[bigtable.CheckAndMutateRowRequest, dict] = None, + request: Optional[Union[bigtable.CheckAndMutateRowRequest, dict]] = None, *, - table_name: str = None, - row_key: bytes = None, - predicate_filter: data.RowFilter = None, - true_mutations: Sequence[data.Mutation] = None, - false_mutations: Sequence[data.Mutation] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + predicate_filter: Optional[data.RowFilter] = None, + true_mutations: Optional[MutableSequence[data.Mutation]] = None, + false_mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. Args: - request (Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]): + request (Optional[Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]]): The request object. Request message for Bigtable.CheckAndMutateRow. table_name (:class:`str`): @@ -682,7 +684,7 @@ async def check_and_mutate_row( This corresponds to the ``predicate_filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - true_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + true_mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, @@ -693,7 +695,7 @@ async def check_and_mutate_row( This corresponds to the ``true_mutations`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - false_mutations (:class:`Sequence[google.cloud.bigtable_v2.types.Mutation]`): + false_mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, @@ -790,12 +792,12 @@ async def check_and_mutate_row( async def ping_and_warm( self, - request: Union[bigtable.PingAndWarmRequest, dict] = None, + request: Optional[Union[bigtable.PingAndWarmRequest, dict]] = None, *, - name: str = None, - app_profile_id: str = None, + name: Optional[str] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this @@ -803,7 +805,7 @@ async def ping_and_warm( for connection keep-alive. Args: - request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): + request (Optional[Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]]): The request object. Request message for client connection keep-alive and warming. name (:class:`str`): @@ -882,14 +884,14 @@ async def ping_and_warm( async def read_modify_write_row( self, - request: Union[bigtable.ReadModifyWriteRowRequest, dict] = None, + request: Optional[Union[bigtable.ReadModifyWriteRowRequest, dict]] = None, *, - table_name: str = None, - row_key: bytes = None, - rules: Sequence[data.ReadModifyWriteRule] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method @@ -901,7 +903,7 @@ async def read_modify_write_row( contents of all modified cells. Args: - request (Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]): + request (Optional[Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]]): The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (:class:`str`): @@ -921,7 +923,7 @@ async def read_modify_write_row( This corresponds to the ``row_key`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - rules (:class:`Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]`): + rules (:class:`MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]`): Required. Rules specifying how the specified row's contents are to be transformed into writes. Entries are diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index df2341dbc13c..2d7eabfaca74 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -16,7 +16,19 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Iterable, + Sequence, + Tuple, + Type, + Union, + cast, +) import pkg_resources from google.api_core import client_options as client_options_lib @@ -56,7 +68,7 @@ class BigtableClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BigtableTransport]: """Returns an appropriate transport class. @@ -350,8 +362,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BigtableTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the bigtable client. @@ -365,7 +377,7 @@ def __init__( transport (Union[str, BigtableTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -395,6 +407,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -447,12 +460,12 @@ def __init__( def read_rows( self, - request: Union[bigtable.ReadRowsRequest, dict] = None, + request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, *, - table_name: str = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.ReadRowsResponse]: r"""Streams back the contents of all requested rows in @@ -550,12 +563,12 @@ def read_rows( def sample_row_keys( self, - request: Union[bigtable.SampleRowKeysRequest, dict] = None, + request: Optional[Union[bigtable.SampleRowKeysRequest, dict]] = None, *, - table_name: str = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.SampleRowKeysResponse]: r"""Returns a sample of row keys in the table. The @@ -654,14 +667,14 @@ def sample_row_keys( def mutate_row( self, - request: Union[bigtable.MutateRowRequest, dict] = None, + request: Optional[Union[bigtable.MutateRowRequest, dict]] = None, *, - table_name: str = None, - row_key: bytes = None, - mutations: Sequence[data.Mutation] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are @@ -686,7 +699,7 @@ def mutate_row( This corresponds to the ``row_key`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): Required. Changes to be atomically applied to the specified row. Entries are applied in order, meaning that @@ -779,13 +792,13 @@ def mutate_row( def mutate_rows( self, - request: Union[bigtable.MutateRowsRequest, dict] = None, + request: Optional[Union[bigtable.MutateRowsRequest, dict]] = None, *, - table_name: str = None, - entries: Sequence[bigtable.MutateRowsRequest.Entry] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.MutateRowsResponse]: r"""Mutates multiple rows in a batch. Each individual row @@ -804,7 +817,7 @@ def mutate_rows( This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): Required. The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic @@ -898,16 +911,16 @@ def mutate_rows( def check_and_mutate_row( self, - request: Union[bigtable.CheckAndMutateRowRequest, dict] = None, + request: Optional[Union[bigtable.CheckAndMutateRowRequest, dict]] = None, *, - table_name: str = None, - row_key: bytes = None, - predicate_filter: data.RowFilter = None, - true_mutations: Sequence[data.Mutation] = None, - false_mutations: Sequence[data.Mutation] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + predicate_filter: Optional[data.RowFilter] = None, + true_mutations: Optional[MutableSequence[data.Mutation]] = None, + false_mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a @@ -944,7 +957,7 @@ def check_and_mutate_row( This corresponds to the ``predicate_filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - true_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + true_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, @@ -955,7 +968,7 @@ def check_and_mutate_row( This corresponds to the ``true_mutations`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - false_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + false_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, @@ -1061,12 +1074,12 @@ def check_and_mutate_row( def ping_and_warm( self, - request: Union[bigtable.PingAndWarmRequest, dict] = None, + request: Optional[Union[bigtable.PingAndWarmRequest, dict]] = None, *, - name: str = None, - app_profile_id: str = None, + name: Optional[str] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this @@ -1162,14 +1175,14 @@ def ping_and_warm( def read_modify_write_row( self, - request: Union[bigtable.ReadModifyWriteRowRequest, dict] = None, + request: Optional[Union[bigtable.ReadModifyWriteRowRequest, dict]] = None, *, - table_name: str = None, - row_key: bytes = None, - rules: Sequence[data.ReadModifyWriteRule] = None, - app_profile_id: str = None, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, + app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method @@ -1201,7 +1214,7 @@ def read_modify_write_row( This corresponds to the ``row_key`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - rules (Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + rules (MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): Required. Rules specifying how the specified row's contents are to be transformed into writes. Entries are diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 1a6ed755493b..097ef1dc3726 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -55,7 +55,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index b453d3bc021f..cf6723678d2c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -48,14 +48,14 @@ def __init__( self, *, host: str = "bigtable.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -182,8 +182,8 @@ def __init__( def create_channel( cls, host: str = "bigtable.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 88081f30a1f1..26d89c847b24 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -50,7 +50,7 @@ class BigtableGrpcAsyncIOTransport(BigtableTransport): def create_channel( cls, host: str = "bigtable.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -93,15 +93,15 @@ def __init__( self, *, host: str = "bigtable.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index 17bb66ae69e0..3499cf5d14ec 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -44,9 +44,8 @@ ValueRange, ) from .request_stats import ( - AllReadStats, - ReadEfficiencyStats, - ReadIteratorStats, + FullReadStatsView, + ReadIterationStats, RequestLatencyStats, RequestStats, ) @@ -81,9 +80,8 @@ "RowSet", "TimestampRange", "ValueRange", - "AllReadStats", - "ReadEfficiencyStats", - "ReadIteratorStats", + "FullReadStatsView", + "ReadIterationStats", "RequestLatencyStats", "RequestStats", "ResponseParams", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 3082fe73204e..09d371e9c98c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigtable_v2.types import data @@ -77,32 +79,31 @@ class RequestStatsView(proto.Enum): """ REQUEST_STATS_VIEW_UNSPECIFIED = 0 REQUEST_STATS_NONE = 1 - REQUEST_STATS_EFFICIENCY = 2 - REQUEST_STATS_FULL = 3 + REQUEST_STATS_FULL = 2 - table_name = proto.Field( + table_name: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=5, ) - rows = proto.Field( + rows: data.RowSet = proto.Field( proto.MESSAGE, number=2, message=data.RowSet, ) - filter = proto.Field( + filter: data.RowFilter = proto.Field( proto.MESSAGE, number=3, message=data.RowFilter, ) - rows_limit = proto.Field( + rows_limit: int = proto.Field( proto.INT64, number=4, ) - request_stats_view = proto.Field( + request_stats_view: RequestStatsView = proto.Field( proto.ENUM, number=6, enum=RequestStatsView, @@ -113,7 +114,7 @@ class ReadRowsResponse(proto.Message): r"""Response message for Bigtable.ReadRows. Attributes: - chunks (Sequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): + chunks (MutableSequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): A collection of a row's contents as part of the read request. last_scanned_row_key (bytes): @@ -192,7 +193,7 @@ class CellChunk(proto.Message): will only allow values of ``timestamp_micros`` which are multiples of 1000. Timestamps are only set in the first CellChunk per cell (for cells split into multiple chunks). - labels (Sequence[str]): + labels (MutableSequence[str]): Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set on the first CellChunk per cell. @@ -222,57 +223,57 @@ class CellChunk(proto.Message): This field is a member of `oneof`_ ``row_status``. """ - row_key = proto.Field( + row_key: bytes = proto.Field( proto.BYTES, number=1, ) - family_name = proto.Field( + family_name: wrappers_pb2.StringValue = proto.Field( proto.MESSAGE, number=2, message=wrappers_pb2.StringValue, ) - qualifier = proto.Field( + qualifier: wrappers_pb2.BytesValue = proto.Field( proto.MESSAGE, number=3, message=wrappers_pb2.BytesValue, ) - timestamp_micros = proto.Field( + timestamp_micros: int = proto.Field( proto.INT64, number=4, ) - labels = proto.RepeatedField( + labels: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=5, ) - value = proto.Field( + value: bytes = proto.Field( proto.BYTES, number=6, ) - value_size = proto.Field( + value_size: int = proto.Field( proto.INT32, number=7, ) - reset_row = proto.Field( + reset_row: bool = proto.Field( proto.BOOL, number=8, oneof="row_status", ) - commit_row = proto.Field( + commit_row: bool = proto.Field( proto.BOOL, number=9, oneof="row_status", ) - chunks = proto.RepeatedField( + chunks: MutableSequence[CellChunk] = proto.RepeatedField( proto.MESSAGE, number=1, message=CellChunk, ) - last_scanned_row_key = proto.Field( + last_scanned_row_key: bytes = proto.Field( proto.BYTES, number=2, ) - request_stats = proto.Field( + request_stats: gb_request_stats.RequestStats = proto.Field( proto.MESSAGE, number=3, message=gb_request_stats.RequestStats, @@ -293,11 +294,11 @@ class SampleRowKeysRequest(proto.Message): profile will be used. """ - table_name = proto.Field( + table_name: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=2, ) @@ -327,11 +328,11 @@ class SampleRowKeysResponse(proto.Message): fields. """ - row_key = proto.Field( + row_key: bytes = proto.Field( proto.BYTES, number=1, ) - offset_bytes = proto.Field( + offset_bytes: int = proto.Field( proto.INT64, number=2, ) @@ -352,7 +353,7 @@ class MutateRowRequest(proto.Message): row_key (bytes): Required. The key of the row to which the mutation should be applied. - mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): Required. Changes to be atomically applied to the specified row. Entries are applied in order, meaning that earlier mutations can be masked by @@ -360,19 +361,19 @@ class MutateRowRequest(proto.Message): at most 100000. """ - table_name = proto.Field( + table_name: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=4, ) - row_key = proto.Field( + row_key: bytes = proto.Field( proto.BYTES, number=2, ) - mutations = proto.RepeatedField( + mutations: MutableSequence[data.Mutation] = proto.RepeatedField( proto.MESSAGE, number=3, message=data.Mutation, @@ -394,7 +395,7 @@ class MutateRowsRequest(proto.Message): This value specifies routing for replication. If not specified, the "default" application profile will be used. - entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): Required. The row keys and corresponding mutations to be applied in bulk. Each entry is applied as an atomic mutation, but the entries @@ -411,33 +412,33 @@ class Entry(proto.Message): row_key (bytes): The key of the row to which the ``mutations`` should be applied. - mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): Required. Changes to be atomically applied to the specified row. Mutations are applied in order, meaning that earlier mutations can be - masked by later ones. You must specify at least - one mutation. + masked by later ones. + You must specify at least one mutation. """ - row_key = proto.Field( + row_key: bytes = proto.Field( proto.BYTES, number=1, ) - mutations = proto.RepeatedField( + mutations: MutableSequence[data.Mutation] = proto.RepeatedField( proto.MESSAGE, number=2, message=data.Mutation, ) - table_name = proto.Field( + table_name: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=3, ) - entries = proto.RepeatedField( + entries: MutableSequence[Entry] = proto.RepeatedField( proto.MESSAGE, number=2, message=Entry, @@ -448,7 +449,7 @@ class MutateRowsResponse(proto.Message): r"""Response message for BigtableService.MutateRows. Attributes: - entries (Sequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): + entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): One or more results for Entries from the batch request. """ @@ -469,17 +470,17 @@ class Entry(proto.Message): will be reported for both entries. """ - index = proto.Field( + index: int = proto.Field( proto.INT64, number=1, ) - status = proto.Field( + status: status_pb2.Status = proto.Field( proto.MESSAGE, number=2, message=status_pb2.Status, ) - entries = proto.RepeatedField( + entries: MutableSequence[Entry] = proto.RepeatedField( proto.MESSAGE, number=1, message=Entry, @@ -508,14 +509,14 @@ class CheckAndMutateRowRequest(proto.Message): either ``true_mutations`` or ``false_mutations`` will be executed. If unset, checks that the row contains any values at all. - true_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + true_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): Changes to be atomically applied to the specified row if ``predicate_filter`` yields at least one cell when applied to ``row_key``. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if ``false_mutations`` is empty, and at most 100000. - false_mutations (Sequence[google.cloud.bigtable_v2.types.Mutation]): + false_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): Changes to be atomically applied to the specified row if ``predicate_filter`` does not yield any cells when applied to ``row_key``. Entries are applied in order, meaning that @@ -524,29 +525,29 @@ class CheckAndMutateRowRequest(proto.Message): most 100000. """ - table_name = proto.Field( + table_name: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=7, ) - row_key = proto.Field( + row_key: bytes = proto.Field( proto.BYTES, number=2, ) - predicate_filter = proto.Field( + predicate_filter: data.RowFilter = proto.Field( proto.MESSAGE, number=6, message=data.RowFilter, ) - true_mutations = proto.RepeatedField( + true_mutations: MutableSequence[data.Mutation] = proto.RepeatedField( proto.MESSAGE, number=4, message=data.Mutation, ) - false_mutations = proto.RepeatedField( + false_mutations: MutableSequence[data.Mutation] = proto.RepeatedField( proto.MESSAGE, number=5, message=data.Mutation, @@ -562,7 +563,7 @@ class CheckAndMutateRowResponse(proto.Message): any results for the specified row. """ - predicate_matched = proto.Field( + predicate_matched: bool = proto.Field( proto.BOOL, number=1, ) @@ -582,11 +583,11 @@ class PingAndWarmRequest(proto.Message): profile will be used. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=2, ) @@ -615,7 +616,7 @@ class ReadModifyWriteRowRequest(proto.Message): row_key (bytes): Required. The key of the row to which the read/modify/write rules should be applied. - rules (Sequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + rules (MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): Required. Rules specifying how the specified row's contents are to be transformed into writes. Entries are applied in order, meaning @@ -623,19 +624,19 @@ class ReadModifyWriteRowRequest(proto.Message): later ones. """ - table_name = proto.Field( + table_name: str = proto.Field( proto.STRING, number=1, ) - app_profile_id = proto.Field( + app_profile_id: str = proto.Field( proto.STRING, number=4, ) - row_key = proto.Field( + row_key: bytes = proto.Field( proto.BYTES, number=2, ) - rules = proto.RepeatedField( + rules: MutableSequence[data.ReadModifyWriteRule] = proto.RepeatedField( proto.MESSAGE, number=3, message=data.ReadModifyWriteRule, @@ -651,7 +652,7 @@ class ReadModifyWriteRowResponse(proto.Message): cells modified by the request. """ - row = proto.Field( + row: data.Row = proto.Field( proto.MESSAGE, number=1, message=data.Row, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index bd45a62d3238..a3bec7274748 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -47,17 +49,17 @@ class Row(proto.Message): used to identify the row in, for example, a MutateRowRequest. May contain any non-empty byte string up to 4KiB in length. - families (Sequence[google.cloud.bigtable_v2.types.Family]): + families (MutableSequence[google.cloud.bigtable_v2.types.Family]): May be empty, but only if the entire row is empty. The mutual ordering of column families is not specified. """ - key = proto.Field( + key: bytes = proto.Field( proto.BYTES, number=1, ) - families = proto.RepeatedField( + families: MutableSequence["Family"] = proto.RepeatedField( proto.MESSAGE, number=2, message="Family", @@ -77,16 +79,16 @@ class Family(proto.Message): ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors may produce cells in a sentinel family with an empty name. Must be no greater than 64 characters in length. - columns (Sequence[google.cloud.bigtable_v2.types.Column]): + columns (MutableSequence[google.cloud.bigtable_v2.types.Column]): Must not be empty. Sorted in order of increasing "qualifier". """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - columns = proto.RepeatedField( + columns: MutableSequence["Column"] = proto.RepeatedField( proto.MESSAGE, number=2, message="Column", @@ -105,16 +107,16 @@ class Column(proto.Message): ``column_qualifier_regex_filter`` field. May contain any byte string, including the empty string, up to 16kiB in length. - cells (Sequence[google.cloud.bigtable_v2.types.Cell]): + cells (MutableSequence[google.cloud.bigtable_v2.types.Cell]): Must not be empty. Sorted in order of decreasing "timestamp_micros". """ - qualifier = proto.Field( + qualifier: bytes = proto.Field( proto.BYTES, number=1, ) - cells = proto.RepeatedField( + cells: MutableSequence["Cell"] = proto.RepeatedField( proto.MESSAGE, number=2, message="Cell", @@ -138,20 +140,20 @@ class Cell(proto.Message): The value stored in the cell. May contain any byte string, including the empty string, up to 100MiB in length. - labels (Sequence[str]): + labels (MutableSequence[str]): Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter]. """ - timestamp_micros = proto.Field( + timestamp_micros: int = proto.Field( proto.INT64, number=1, ) - value = proto.Field( + value: bytes = proto.Field( proto.BYTES, number=2, ) - labels = proto.RepeatedField( + labels: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) @@ -190,22 +192,22 @@ class RowRange(proto.Message): This field is a member of `oneof`_ ``end_key``. """ - start_key_closed = proto.Field( + start_key_closed: bytes = proto.Field( proto.BYTES, number=1, oneof="start_key", ) - start_key_open = proto.Field( + start_key_open: bytes = proto.Field( proto.BYTES, number=2, oneof="start_key", ) - end_key_open = proto.Field( + end_key_open: bytes = proto.Field( proto.BYTES, number=3, oneof="end_key", ) - end_key_closed = proto.Field( + end_key_closed: bytes = proto.Field( proto.BYTES, number=4, oneof="end_key", @@ -216,17 +218,17 @@ class RowSet(proto.Message): r"""Specifies a non-contiguous set of rows. Attributes: - row_keys (Sequence[bytes]): + row_keys (MutableSequence[bytes]): Single rows included in the set. - row_ranges (Sequence[google.cloud.bigtable_v2.types.RowRange]): + row_ranges (MutableSequence[google.cloud.bigtable_v2.types.RowRange]): Contiguous row ranges included in the set. """ - row_keys = proto.RepeatedField( + row_keys: MutableSequence[bytes] = proto.RepeatedField( proto.BYTES, number=1, ) - row_ranges = proto.RepeatedField( + row_ranges: MutableSequence["RowRange"] = proto.RepeatedField( proto.MESSAGE, number=2, message="RowRange", @@ -272,26 +274,26 @@ class ColumnRange(proto.Message): This field is a member of `oneof`_ ``end_qualifier``. """ - family_name = proto.Field( + family_name: str = proto.Field( proto.STRING, number=1, ) - start_qualifier_closed = proto.Field( + start_qualifier_closed: bytes = proto.Field( proto.BYTES, number=2, oneof="start_qualifier", ) - start_qualifier_open = proto.Field( + start_qualifier_open: bytes = proto.Field( proto.BYTES, number=3, oneof="start_qualifier", ) - end_qualifier_closed = proto.Field( + end_qualifier_closed: bytes = proto.Field( proto.BYTES, number=4, oneof="end_qualifier", ) - end_qualifier_open = proto.Field( + end_qualifier_open: bytes = proto.Field( proto.BYTES, number=5, oneof="end_qualifier", @@ -310,11 +312,11 @@ class TimestampRange(proto.Message): interpreted as infinity. """ - start_timestamp_micros = proto.Field( + start_timestamp_micros: int = proto.Field( proto.INT64, number=1, ) - end_timestamp_micros = proto.Field( + end_timestamp_micros: int = proto.Field( proto.INT64, number=2, ) @@ -353,22 +355,22 @@ class ValueRange(proto.Message): This field is a member of `oneof`_ ``end_value``. """ - start_value_closed = proto.Field( + start_value_closed: bytes = proto.Field( proto.BYTES, number=1, oneof="start_value", ) - start_value_open = proto.Field( + start_value_open: bytes = proto.Field( proto.BYTES, number=2, oneof="start_value", ) - end_value_closed = proto.Field( + end_value_closed: bytes = proto.Field( proto.BYTES, number=3, oneof="end_value", ) - end_value_open = proto.Field( + end_value_open: bytes = proto.Field( proto.BYTES, number=4, oneof="end_value", @@ -630,7 +632,7 @@ class Chain(proto.Message): sequence. Attributes: - filters (Sequence[google.cloud.bigtable_v2.types.RowFilter]): + filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" are chained together to process the input row: in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) @@ -638,7 +640,7 @@ class Chain(proto.Message): atomically. """ - filters = proto.RepeatedField( + filters: MutableSequence["RowFilter"] = proto.RepeatedField( proto.MESSAGE, number=1, message="RowFilter", @@ -649,7 +651,7 @@ class Interleave(proto.Message): RowFilters and interleaves the results. Attributes: - filters (Sequence[google.cloud.bigtable_v2.types.RowFilter]): + filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" all process a copy of the input row, and the results are pooled, sorted, and combined into a single output row. If multiple cells are produced with the @@ -680,7 +682,7 @@ class Interleave(proto.Message): All interleaved filters are executed atomically. """ - filters = proto.RepeatedField( + filters: MutableSequence["RowFilter"] = proto.RepeatedField( proto.MESSAGE, number=1, message="RowFilter", @@ -711,119 +713,119 @@ class Condition(proto.Message): will be returned in the false case. """ - predicate_filter = proto.Field( + predicate_filter: "RowFilter" = proto.Field( proto.MESSAGE, number=1, message="RowFilter", ) - true_filter = proto.Field( + true_filter: "RowFilter" = proto.Field( proto.MESSAGE, number=2, message="RowFilter", ) - false_filter = proto.Field( + false_filter: "RowFilter" = proto.Field( proto.MESSAGE, number=3, message="RowFilter", ) - chain = proto.Field( + chain: Chain = proto.Field( proto.MESSAGE, number=1, oneof="filter", message=Chain, ) - interleave = proto.Field( + interleave: Interleave = proto.Field( proto.MESSAGE, number=2, oneof="filter", message=Interleave, ) - condition = proto.Field( + condition: Condition = proto.Field( proto.MESSAGE, number=3, oneof="filter", message=Condition, ) - sink = proto.Field( + sink: bool = proto.Field( proto.BOOL, number=16, oneof="filter", ) - pass_all_filter = proto.Field( + pass_all_filter: bool = proto.Field( proto.BOOL, number=17, oneof="filter", ) - block_all_filter = proto.Field( + block_all_filter: bool = proto.Field( proto.BOOL, number=18, oneof="filter", ) - row_key_regex_filter = proto.Field( + row_key_regex_filter: bytes = proto.Field( proto.BYTES, number=4, oneof="filter", ) - row_sample_filter = proto.Field( + row_sample_filter: float = proto.Field( proto.DOUBLE, number=14, oneof="filter", ) - family_name_regex_filter = proto.Field( + family_name_regex_filter: str = proto.Field( proto.STRING, number=5, oneof="filter", ) - column_qualifier_regex_filter = proto.Field( + column_qualifier_regex_filter: bytes = proto.Field( proto.BYTES, number=6, oneof="filter", ) - column_range_filter = proto.Field( + column_range_filter: "ColumnRange" = proto.Field( proto.MESSAGE, number=7, oneof="filter", message="ColumnRange", ) - timestamp_range_filter = proto.Field( + timestamp_range_filter: "TimestampRange" = proto.Field( proto.MESSAGE, number=8, oneof="filter", message="TimestampRange", ) - value_regex_filter = proto.Field( + value_regex_filter: bytes = proto.Field( proto.BYTES, number=9, oneof="filter", ) - value_range_filter = proto.Field( + value_range_filter: "ValueRange" = proto.Field( proto.MESSAGE, number=15, oneof="filter", message="ValueRange", ) - cells_per_row_offset_filter = proto.Field( + cells_per_row_offset_filter: int = proto.Field( proto.INT32, number=10, oneof="filter", ) - cells_per_row_limit_filter = proto.Field( + cells_per_row_limit_filter: int = proto.Field( proto.INT32, number=11, oneof="filter", ) - cells_per_column_limit_filter = proto.Field( + cells_per_column_limit_filter: int = proto.Field( proto.INT32, number=12, oneof="filter", ) - strip_value_transformer = proto.Field( + strip_value_transformer: bool = proto.Field( proto.BOOL, number=13, oneof="filter", ) - apply_label_transformer = proto.Field( + apply_label_transformer: str = proto.Field( proto.STRING, number=19, oneof="filter", @@ -884,19 +886,19 @@ class SetCell(proto.Message): cell. """ - family_name = proto.Field( + family_name: str = proto.Field( proto.STRING, number=1, ) - column_qualifier = proto.Field( + column_qualifier: bytes = proto.Field( proto.BYTES, number=2, ) - timestamp_micros = proto.Field( + timestamp_micros: int = proto.Field( proto.INT64, number=3, ) - value = proto.Field( + value: bytes = proto.Field( proto.BYTES, number=4, ) @@ -918,15 +920,15 @@ class DeleteFromColumn(proto.Message): should be deleted. """ - family_name = proto.Field( + family_name: str = proto.Field( proto.STRING, number=1, ) - column_qualifier = proto.Field( + column_qualifier: bytes = proto.Field( proto.BYTES, number=2, ) - time_range = proto.Field( + time_range: "TimestampRange" = proto.Field( proto.MESSAGE, number=3, message="TimestampRange", @@ -942,7 +944,7 @@ class DeleteFromFamily(proto.Message): Must match ``[-_.a-zA-Z0-9]+`` """ - family_name = proto.Field( + family_name: str = proto.Field( proto.STRING, number=1, ) @@ -950,25 +952,25 @@ class DeleteFromFamily(proto.Message): class DeleteFromRow(proto.Message): r"""A Mutation which deletes all cells from the containing row.""" - set_cell = proto.Field( + set_cell: SetCell = proto.Field( proto.MESSAGE, number=1, oneof="mutation", message=SetCell, ) - delete_from_column = proto.Field( + delete_from_column: DeleteFromColumn = proto.Field( proto.MESSAGE, number=2, oneof="mutation", message=DeleteFromColumn, ) - delete_from_family = proto.Field( + delete_from_family: DeleteFromFamily = proto.Field( proto.MESSAGE, number=3, oneof="mutation", message=DeleteFromFamily, ) - delete_from_row = proto.Field( + delete_from_row: DeleteFromRow = proto.Field( proto.MESSAGE, number=4, oneof="mutation", @@ -1012,20 +1014,20 @@ class ReadModifyWriteRule(proto.Message): This field is a member of `oneof`_ ``rule``. """ - family_name = proto.Field( + family_name: str = proto.Field( proto.STRING, number=1, ) - column_qualifier = proto.Field( + column_qualifier: bytes = proto.Field( proto.BYTES, number=2, ) - append_value = proto.Field( + append_value: bytes = proto.Field( proto.BYTES, number=3, oneof="rule", ) - increment_amount = proto.Field( + increment_amount: int = proto.Field( proto.INT64, number=4, oneof="rule", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py index d6f30c1c250f..621959a8c514 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import duration_pb2 # type: ignore @@ -21,19 +23,19 @@ __protobuf__ = proto.module( package="google.bigtable.v2", manifest={ - "ReadIteratorStats", + "ReadIterationStats", "RequestLatencyStats", - "ReadEfficiencyStats", - "AllReadStats", + "FullReadStatsView", "RequestStats", }, ) -class ReadIteratorStats(proto.Message): - r"""ReadIteratorStats captures information about the iteration of - rows or cells over the course of a read, e.g. how many results - were scanned in a read operation versus the results returned. +class ReadIterationStats(proto.Message): + r"""ReadIterationStats captures information about the iteration + of rows or cells over the course of a read, e.g. how many + results were scanned in a read operation versus the results + returned. Attributes: rows_seen_count (int): @@ -48,30 +50,24 @@ class ReadIteratorStats(proto.Message): returned, as captured below. cells_returned_count (int): The cells returned as part of the request. - deletes_seen_count (int): - The deletes seen as part of the request. """ - rows_seen_count = proto.Field( + rows_seen_count: int = proto.Field( proto.INT64, number=1, ) - rows_returned_count = proto.Field( + rows_returned_count: int = proto.Field( proto.INT64, number=2, ) - cells_seen_count = proto.Field( + cells_seen_count: int = proto.Field( proto.INT64, number=3, ) - cells_returned_count = proto.Field( + cells_returned_count: int = proto.Field( proto.INT64, number=4, ) - deletes_seen_count = proto.Field( - proto.INT64, - number=5, - ) class RequestLatencyStats(proto.Message): @@ -95,48 +91,31 @@ class RequestLatencyStats(proto.Message): as this value needs to be sent in the response before the latency measurement including that transmission is finalized. - """ - frontend_server_latency = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - - -class ReadEfficiencyStats(proto.Message): - r"""ReadEfficiencyStats captures information about the efficiency - of a read. - - Attributes: - read_iterator_stats (google.cloud.bigtable_v2.types.ReadIteratorStats): - Iteration stats describe how efficient the - read is, e.g. comparing rows seen vs. rows - returned or cells seen vs cells returned can - provide an indication of read efficiency (the - higher the ratio of seen to retuned the better). - request_latency_stats (google.cloud.bigtable_v2.types.RequestLatencyStats): - Request latency stats describe the time taken - to complete a request, from the server side. + Note: This value includes the end-to-end latency + of contacting nodes in the targeted cluster, + e.g. measuring from when the first byte arrives + at the frontend server, to when this value is + sent back as the last value in the response, + including any latency incurred by contacting + nodes, waiting for results from nodes, and + finally sending results from nodes back to the + caller. """ - read_iterator_stats = proto.Field( + frontend_server_latency: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=1, - message="ReadIteratorStats", - ) - request_latency_stats = proto.Field( - proto.MESSAGE, - number=2, - message="RequestLatencyStats", + message=duration_pb2.Duration, ) -class AllReadStats(proto.Message): - r"""AllReadStats captures all known information about a read. +class FullReadStatsView(proto.Message): + r"""FullReadStatsView captures all known information about a + read. Attributes: - read_iterator_stats (google.cloud.bigtable_v2.types.ReadIteratorStats): + read_iteration_stats (google.cloud.bigtable_v2.types.ReadIterationStats): Iteration stats describe how efficient the read is, e.g. comparing rows seen vs. rows returned or cells seen vs cells returned can @@ -147,12 +126,12 @@ class AllReadStats(proto.Message): to complete a request, from the server side. """ - read_iterator_stats = proto.Field( + read_iteration_stats: "ReadIterationStats" = proto.Field( proto.MESSAGE, number=1, - message="ReadIteratorStats", + message="ReadIterationStats", ) - request_latency_stats = proto.Field( + request_latency_stats: "RequestLatencyStats" = proto.Field( proto.MESSAGE, number=2, message="RequestLatencyStats", @@ -166,39 +145,23 @@ class RequestStats(proto.Message): - google.bigtable.v2.ReadRows - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: - read_efficiency_stats (google.cloud.bigtable_v2.types.ReadEfficiencyStats): - Available with the - ReadRowsRequest.RequestStatsView.REQUEST_STATS_EFFICIENCY - view, see package google.bigtable.v2. - - This field is a member of `oneof`_ ``stats``. - all_read_stats (google.cloud.bigtable_v2.types.AllReadStats): + full_read_stats_view (google.cloud.bigtable_v2.types.FullReadStatsView): Available with the ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL view, see package google.bigtable.v2. - This field is a member of `oneof`_ ``stats``. + This field is a member of `oneof`_ ``stats_view``. """ - read_efficiency_stats = proto.Field( + full_read_stats_view: "FullReadStatsView" = proto.Field( proto.MESSAGE, number=1, - oneof="stats", - message="ReadEfficiencyStats", - ) - all_read_stats = proto.Field( - proto.MESSAGE, - number=2, - oneof="stats", - message="AllReadStats", + oneof="stats_view", + message="FullReadStatsView", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index b11cffeefc79..4e9233b88202 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -42,12 +44,12 @@ class ResponseParams(proto.Message): This field is a member of `oneof`_ ``_cluster_id``. """ - zone_id = proto.Field( + zone_id: str = proto.Field( proto.STRING, number=1, optional=True, ) - cluster_id = proto.Field( + cluster_id: str = proto.Field( proto.STRING, number=2, optional=True, diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 7b35e1a9c29f..391f292aec22 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -70,12 +70,13 @@ def get_staging_dirs( bigtable_admin_default_version = "v2" for library in get_staging_dirs(bigtable_default_version, "bigtable"): - s.move(library / "google/cloud/bigtable_v*") + s.move(library / "google/cloud/bigtable_v2") s.move(library / "tests") s.move(library / "scripts") for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"): - s.move(library / "google/cloud/bigtable_admin_v*") + s.move(library / "google/cloud/bigtable_admin") + s.move(library / "google/cloud/bigtable_admin_v2") s.move(library / "tests") s.move(library / "scripts") @@ -91,7 +92,7 @@ def get_staging_dirs( cov_level=100, ) -s.move(templated_files, excludes=[".coveragerc", "README.rst"]) +s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml"]) # ---------------------------------------------------------------------------- # Customize noxfile.py @@ -218,8 +219,6 @@ def lint_setup_py(session): python.py_samples(skip_readmes=True) -python.configure_previous_major_version_branches() - s.replace( "samples/beam/noxfile.py", """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \( diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index f30c8f204a92..c564a3a0fc4d 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -18,11 +18,19 @@ import setuptools +package_root = os.path.abspath(os.path.dirname(__file__)) + # Package metadata. name = "google-cloud-bigtable" description = "Google Cloud Bigtable API client library" -version = "2.13.2" + +version = {} +with open(os.path.join(package_root, "google/cloud/bigtable/gapic_version.py")) as fp: + exec(fp.read(), version) +version = version["__version__"] + + # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta' diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 38f1bbd80701..e52fc1aba820 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -747,7 +747,7 @@ def test_read_rows_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.ReadRowsRequest( - {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) # Mock the actual call within the gRPC stub, and fake the request. @@ -765,7 +765,7 @@ def test_read_rows_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest({"app_profile_id": "sample1"}) + request = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: @@ -966,7 +966,7 @@ def test_sample_row_keys_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.SampleRowKeysRequest( - {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) # Mock the actual call within the gRPC stub, and fake the request. @@ -984,7 +984,7 @@ def test_sample_row_keys_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest({"app_profile_id": "sample1"}) + request = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: @@ -1182,7 +1182,7 @@ def test_mutate_row_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.MutateRowRequest( - {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1200,7 +1200,7 @@ def test_mutate_row_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest({"app_profile_id": "sample1"}) + request = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: @@ -1447,7 +1447,7 @@ def test_mutate_rows_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.MutateRowsRequest( - {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1465,7 +1465,7 @@ def test_mutate_rows_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest({"app_profile_id": "sample1"}) + request = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: @@ -1685,7 +1685,7 @@ def test_check_and_mutate_row_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.CheckAndMutateRowRequest( - {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1705,7 +1705,7 @@ def test_check_and_mutate_row_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest({"app_profile_id": "sample1"}) + request = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2055,7 +2055,7 @@ def test_ping_and_warm_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.PingAndWarmRequest( - {"name": "projects/sample1/instances/sample2"} + **{"name": "projects/sample1/instances/sample2"} ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2073,7 +2073,7 @@ def test_ping_and_warm_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.PingAndWarmRequest({"app_profile_id": "sample1"}) + request = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: @@ -2279,7 +2279,7 @@ def test_read_modify_write_row_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = bigtable.ReadModifyWriteRowRequest( - {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2299,7 +2299,7 @@ def test_read_modify_write_row_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest({"app_profile_id": "sample1"}) + request = bigtable.ReadModifyWriteRowRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( From 959e7d6c1493f76a1e22ebbd46a6b616f2bcb01b Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 18 Nov 2022 20:49:11 +0100 Subject: [PATCH 673/892] chore(deps): update all dependencies (#692) --- .../google-cloud-bigtable/.github/workflows/system_emulated.yml | 2 +- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 26b4894e52b8..3fbc0ae10e5d 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v0.6.2 + uses: google-github-actions/setup-gcloud@v1.0.1 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 28ced316a27f..785812001bcc 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.42.0 +apache-beam==2.43.0 google-cloud-bigtable==2.13.2 google-cloud-core==2.3.2 From 199f1ceae3694f3130b6d5a9f4cc5d4dab51d081 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 18 Nov 2022 13:20:12 -0800 Subject: [PATCH 674/892] chore(python): update release script dependencies (#694) Source-Link: https://github.com/googleapis/synthtool/commit/25083af347468dd5f90f69627420f7d452b6c50e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:e6cbd61f1838d9ff6a31436dfc13717f372a7482a82fc1863ca954ec47bff8c8 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/.OwlBot.lock.yaml | 2 +- .../.github/workflows/docs.yml | 4 +- .../.github/workflows/lint.yml | 2 +- .../.github/workflows/unittest.yml | 2 +- .../.kokoro/docker/docs/Dockerfile | 12 +- .../.kokoro/requirements.in | 4 +- .../.kokoro/requirements.txt | 354 ++++++++++-------- packages/google-cloud-bigtable/noxfile.py | 15 +- 8 files changed, 218 insertions(+), 177 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 3815c983cb16..3f1ccc085ef7 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:7a40313731a7cb1454eef6b33d3446ebb121836738dc3ab3d2d3ded5268c35b6 + digest: sha256:e6cbd61f1838d9ff6a31436dfc13717f372a7482a82fc1863ca954ec47bff8c8 diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml index 7092a139aed3..e97d89e484c9 100644 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -12,7 +12,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: "3.9" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel @@ -28,7 +28,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: "3.9" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml index d2aee5b7d8ec..16d5a9e90f6d 100644 --- a/packages/google-cloud-bigtable/.github/workflows/lint.yml +++ b/packages/google-cloud-bigtable/.github/workflows/lint.yml @@ -12,7 +12,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: "3.8" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 87ade4d54362..23000c05d9d8 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -41,7 +41,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: "3.10" + python-version: "3.8" - name: Install coverage run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index 238b87b9d1c9..f8137d0ae497 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -60,16 +60,16 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb -###################### Install python 3.8.11 +###################### Install python 3.9.13 -# Download python 3.8.11 -RUN wget https://www.python.org/ftp/python/3.8.11/Python-3.8.11.tgz +# Download python 3.9.13 +RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz # Extract files -RUN tar -xvf Python-3.8.11.tgz +RUN tar -xvf Python-3.9.13.tgz -# Install python 3.8.11 -RUN ./Python-3.8.11/configure --enable-optimizations +# Install python 3.9.13 +RUN ./Python-3.9.13/configure --enable-optimizations RUN make altinstall ###################### Install pip diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.in b/packages/google-cloud-bigtable/.kokoro/requirements.in index 7718391a34d7..cbd7e77f44db 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.in +++ b/packages/google-cloud-bigtable/.kokoro/requirements.in @@ -5,4 +5,6 @@ typing-extensions twine wheel setuptools -nox \ No newline at end of file +nox +charset-normalizer<3 +click<8.1.0 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index d15994bac93c..9c1b9be34e6b 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.2.0 \ --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db # via google-auth -certifi==2022.6.15 \ - --hash=sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d \ - --hash=sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412 +certifi==2022.9.24 \ + --hash=sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14 \ + --hash=sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382 # via requests cffi==1.15.1 \ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ @@ -93,11 +93,14 @@ cffi==1.15.1 \ charset-normalizer==2.1.1 \ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f - # via requests + # via + # -r requirements.in + # requests click==8.0.4 \ --hash=sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1 \ --hash=sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb # via + # -r requirements.in # gcp-docuploader # gcp-releasetool colorlog==6.7.0 \ @@ -110,29 +113,33 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==37.0.4 \ - --hash=sha256:190f82f3e87033821828f60787cfa42bff98404483577b591429ed99bed39d59 \ - --hash=sha256:2be53f9f5505673eeda5f2736bea736c40f051a739bfae2f92d18aed1eb54596 \ - --hash=sha256:30788e070800fec9bbcf9faa71ea6d8068f5136f60029759fd8c3efec3c9dcb3 \ - --hash=sha256:3d41b965b3380f10e4611dbae366f6dc3cefc7c9ac4e8842a806b9672ae9add5 \ - --hash=sha256:4c590ec31550a724ef893c50f9a97a0c14e9c851c85621c5650d699a7b88f7ab \ - --hash=sha256:549153378611c0cca1042f20fd9c5030d37a72f634c9326e225c9f666d472884 \ - --hash=sha256:63f9c17c0e2474ccbebc9302ce2f07b55b3b3fcb211ded18a42d5764f5c10a82 \ - --hash=sha256:6bc95ed67b6741b2607298f9ea4932ff157e570ef456ef7ff0ef4884a134cc4b \ - --hash=sha256:7099a8d55cd49b737ffc99c17de504f2257e3787e02abe6d1a6d136574873441 \ - --hash=sha256:75976c217f10d48a8b5a8de3d70c454c249e4b91851f6838a4e48b8f41eb71aa \ - --hash=sha256:7bc997818309f56c0038a33b8da5c0bfbb3f1f067f315f9abd6fc07ad359398d \ - --hash=sha256:80f49023dd13ba35f7c34072fa17f604d2f19bf0989f292cedf7ab5770b87a0b \ - --hash=sha256:91ce48d35f4e3d3f1d83e29ef4a9267246e6a3be51864a5b7d2247d5086fa99a \ - --hash=sha256:a958c52505c8adf0d3822703078580d2c0456dd1d27fabfb6f76fe63d2971cd6 \ - --hash=sha256:b62439d7cd1222f3da897e9a9fe53bbf5c104fff4d60893ad1355d4c14a24157 \ - --hash=sha256:b7f8dd0d4c1f21759695c05a5ec8536c12f31611541f8904083f3dc582604280 \ - --hash=sha256:d204833f3c8a33bbe11eda63a54b1aad7aa7456ed769a982f21ec599ba5fa282 \ - --hash=sha256:e007f052ed10cc316df59bc90fbb7ff7950d7e2919c9757fd42a2b8ecf8a5f67 \ - --hash=sha256:f2dcb0b3b63afb6df7fd94ec6fbddac81b5492513f7b0436210d390c14d46ee8 \ - --hash=sha256:f721d1885ecae9078c3f6bbe8a88bc0786b6e749bf32ccec1ef2b18929a05046 \ - --hash=sha256:f7a6de3e98771e183645181b3627e2563dcde3ce94a9e42a3f427d2255190327 \ - --hash=sha256:f8c0a6e9e1dd3eb0414ba320f85da6b0dcbd543126e30fcc546e7372a7fbf3b9 +cryptography==38.0.3 \ + --hash=sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d \ + --hash=sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd \ + --hash=sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146 \ + --hash=sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7 \ + --hash=sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436 \ + --hash=sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0 \ + --hash=sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828 \ + --hash=sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b \ + --hash=sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55 \ + --hash=sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36 \ + --hash=sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50 \ + --hash=sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2 \ + --hash=sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a \ + --hash=sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8 \ + --hash=sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0 \ + --hash=sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548 \ + --hash=sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320 \ + --hash=sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748 \ + --hash=sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249 \ + --hash=sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959 \ + --hash=sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f \ + --hash=sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0 \ + --hash=sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd \ + --hash=sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220 \ + --hash=sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c \ + --hash=sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722 # via # gcp-releasetool # secretstorage @@ -148,23 +155,23 @@ filelock==3.8.0 \ --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \ --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4 # via virtualenv -gcp-docuploader==0.6.3 \ - --hash=sha256:ba8c9d76b3bbac54b0311c503a373b00edc2dc02d6d54ea9507045adb8e870f7 \ - --hash=sha256:c0f5aaa82ce1854a386197e4e359b120ad6d4e57ae2c812fce42219a3288026b +gcp-docuploader==0.6.4 \ + --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \ + --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf # via -r requirements.in -gcp-releasetool==1.8.7 \ - --hash=sha256:3d2a67c9db39322194afb3b427e9cb0476ce8f2a04033695f0aeb63979fc2b37 \ - --hash=sha256:5e4d28f66e90780d77f3ecf1e9155852b0c3b13cbccb08ab07e66b2357c8da8d +gcp-releasetool==1.10.0 \ + --hash=sha256:72a38ca91b59c24f7e699e9227c90cbe4dd71b789383cb0164b088abae294c83 \ + --hash=sha256:8c7c99320208383d4bb2b808c6880eb7a81424afe7cdba3c8d84b25f4f0e097d # via -r requirements.in -google-api-core==2.8.2 \ - --hash=sha256:06f7244c640322b508b125903bb5701bebabce8832f85aba9335ec00b3d02edc \ - --hash=sha256:93c6a91ccac79079ac6bbf8b74ee75db970cc899278b97d53bc012f35908cf50 +google-api-core==2.10.2 \ + --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \ + --hash=sha256:34f24bd1d5f72a8c4519773d99ca6bf080a6c4e041b4e9f024fe230191dda62e # via # google-cloud-core # google-cloud-storage -google-auth==2.11.0 \ - --hash=sha256:be62acaae38d0049c21ca90f27a23847245c9f161ff54ede13af2cb6afecbac9 \ - --hash=sha256:ed65ecf9f681832298e29328e1ef0a3676e3732b2e56f41532d45f70a22de0fb +google-auth==2.14.1 \ + --hash=sha256:ccaa901f31ad5cbb562615eb8b664b3dd0bf5404a67618e642307f00613eda4d \ + --hash=sha256:f5d8701633bebc12e0deea4df8abd8aff31c28b355360597f7f2ee60f2e4d016 # via # gcp-releasetool # google-api-core @@ -174,76 +181,102 @@ google-cloud-core==2.3.2 \ --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a # via google-cloud-storage -google-cloud-storage==2.5.0 \ - --hash=sha256:19a26c66c317ce542cea0830b7e787e8dac2588b6bfa4d3fd3b871ba16305ab0 \ - --hash=sha256:382f34b91de2212e3c2e7b40ec079d27ee2e3dbbae99b75b1bcd8c63063ce235 +google-cloud-storage==2.6.0 \ + --hash=sha256:104ca28ae61243b637f2f01455cc8a05e8f15a2a18ced96cb587241cdd3820f5 \ + --hash=sha256:4ad0415ff61abdd8bb2ae81c1f8f7ec7d91a1011613f2db87c614c550f97bfe9 # via gcp-docuploader -google-crc32c==1.3.0 \ - --hash=sha256:04e7c220798a72fd0f08242bc8d7a05986b2a08a0573396187fd32c1dcdd58b3 \ - --hash=sha256:05340b60bf05b574159e9bd940152a47d38af3fb43803ffe71f11d704b7696a6 \ - --hash=sha256:12674a4c3b56b706153a358eaa1018c4137a5a04635b92b4652440d3d7386206 \ - --hash=sha256:127f9cc3ac41b6a859bd9dc4321097b1a4f6aa7fdf71b4f9227b9e3ebffb4422 \ - --hash=sha256:13af315c3a0eec8bb8b8d80b8b128cb3fcd17d7e4edafc39647846345a3f003a \ - --hash=sha256:1926fd8de0acb9d15ee757175ce7242e235482a783cd4ec711cc999fc103c24e \ - --hash=sha256:226f2f9b8e128a6ca6a9af9b9e8384f7b53a801907425c9a292553a3a7218ce0 \ - --hash=sha256:276de6273eb074a35bc598f8efbc00c7869c5cf2e29c90748fccc8c898c244df \ - --hash=sha256:318f73f5484b5671f0c7f5f63741ab020a599504ed81d209b5c7129ee4667407 \ - --hash=sha256:3bbce1be3687bbfebe29abdb7631b83e6b25da3f4e1856a1611eb21854b689ea \ - --hash=sha256:42ae4781333e331a1743445931b08ebdad73e188fd554259e772556fc4937c48 \ - --hash=sha256:58be56ae0529c664cc04a9c76e68bb92b091e0194d6e3c50bea7e0f266f73713 \ - --hash=sha256:5da2c81575cc3ccf05d9830f9e8d3c70954819ca9a63828210498c0774fda1a3 \ - --hash=sha256:6311853aa2bba4064d0c28ca54e7b50c4d48e3de04f6770f6c60ebda1e975267 \ - --hash=sha256:650e2917660e696041ab3dcd7abac160b4121cd9a484c08406f24c5964099829 \ - --hash=sha256:6a4db36f9721fdf391646685ecffa404eb986cbe007a3289499020daf72e88a2 \ - --hash=sha256:779cbf1ce375b96111db98fca913c1f5ec11b1d870e529b1dc7354b2681a8c3a \ - --hash=sha256:7f6fe42536d9dcd3e2ffb9d3053f5d05221ae3bbcefbe472bdf2c71c793e3183 \ - --hash=sha256:891f712ce54e0d631370e1f4997b3f182f3368179198efc30d477c75d1f44942 \ - --hash=sha256:95c68a4b9b7828ba0428f8f7e3109c5d476ca44996ed9a5f8aac6269296e2d59 \ - --hash=sha256:96a8918a78d5d64e07c8ea4ed2bc44354e3f93f46a4866a40e8db934e4c0d74b \ - --hash=sha256:9c3cf890c3c0ecfe1510a452a165431b5831e24160c5fcf2071f0f85ca5a47cd \ - --hash=sha256:9f58099ad7affc0754ae42e6d87443299f15d739b0ce03c76f515153a5cda06c \ - --hash=sha256:a0b9e622c3b2b8d0ce32f77eba617ab0d6768b82836391e4f8f9e2074582bf02 \ - --hash=sha256:a7f9cbea4245ee36190f85fe1814e2d7b1e5f2186381b082f5d59f99b7f11328 \ - --hash=sha256:bab4aebd525218bab4ee615786c4581952eadc16b1ff031813a2fd51f0cc7b08 \ - --hash=sha256:c124b8c8779bf2d35d9b721e52d4adb41c9bfbde45e6a3f25f0820caa9aba73f \ - --hash=sha256:c9da0a39b53d2fab3e5467329ed50e951eb91386e9d0d5b12daf593973c3b168 \ - --hash=sha256:ca60076c388728d3b6ac3846842474f4250c91efbfe5afa872d3ffd69dd4b318 \ - --hash=sha256:cb6994fff247987c66a8a4e550ef374671c2b82e3c0d2115e689d21e511a652d \ - --hash=sha256:d1c1d6236feab51200272d79b3d3e0f12cf2cbb12b208c835b175a21efdb0a73 \ - --hash=sha256:dd7760a88a8d3d705ff562aa93f8445ead54f58fd482e4f9e2bafb7e177375d4 \ - --hash=sha256:dda4d8a3bb0b50f540f6ff4b6033f3a74e8bf0bd5320b70fab2c03e512a62812 \ - --hash=sha256:e0f1ff55dde0ebcfbef027edc21f71c205845585fffe30d4ec4979416613e9b3 \ - --hash=sha256:e7a539b9be7b9c00f11ef16b55486141bc2cdb0c54762f84e3c6fc091917436d \ - --hash=sha256:eb0b14523758e37802f27b7f8cd973f5f3d33be7613952c0df904b68c4842f0e \ - --hash=sha256:ed447680ff21c14aaceb6a9f99a5f639f583ccfe4ce1a5e1d48eb41c3d6b3217 \ - --hash=sha256:f52a4ad2568314ee713715b1e2d79ab55fab11e8b304fd1462ff5cccf4264b3e \ - --hash=sha256:fbd60c6aaa07c31d7754edbc2334aef50601b7f1ada67a96eb1eb57c7c72378f \ - --hash=sha256:fc28e0db232c62ca0c3600884933178f0825c99be4474cdd645e378a10588125 \ - --hash=sha256:fe31de3002e7b08eb20823b3735b97c86c5926dd0581c7710a680b418a8709d4 \ - --hash=sha256:fec221a051150eeddfdfcff162e6db92c65ecf46cb0f7bb1bf812a1520ec026b \ - --hash=sha256:ff71073ebf0e42258a42a0b34f2c09ec384977e7f6808999102eedd5b49920e3 +google-crc32c==1.5.0 \ + --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ + --hash=sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876 \ + --hash=sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c \ + --hash=sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289 \ + --hash=sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298 \ + --hash=sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02 \ + --hash=sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f \ + --hash=sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2 \ + --hash=sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a \ + --hash=sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb \ + --hash=sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210 \ + --hash=sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5 \ + --hash=sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee \ + --hash=sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c \ + --hash=sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a \ + --hash=sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314 \ + --hash=sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd \ + --hash=sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65 \ + --hash=sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37 \ + --hash=sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4 \ + --hash=sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13 \ + --hash=sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894 \ + --hash=sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31 \ + --hash=sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e \ + --hash=sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709 \ + --hash=sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740 \ + --hash=sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc \ + --hash=sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d \ + --hash=sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c \ + --hash=sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c \ + --hash=sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d \ + --hash=sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906 \ + --hash=sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61 \ + --hash=sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57 \ + --hash=sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c \ + --hash=sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a \ + --hash=sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438 \ + --hash=sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946 \ + --hash=sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7 \ + --hash=sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96 \ + --hash=sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091 \ + --hash=sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae \ + --hash=sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d \ + --hash=sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88 \ + --hash=sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2 \ + --hash=sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd \ + --hash=sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541 \ + --hash=sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728 \ + --hash=sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178 \ + --hash=sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968 \ + --hash=sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346 \ + --hash=sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8 \ + --hash=sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93 \ + --hash=sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7 \ + --hash=sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273 \ + --hash=sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462 \ + --hash=sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94 \ + --hash=sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd \ + --hash=sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e \ + --hash=sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57 \ + --hash=sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b \ + --hash=sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9 \ + --hash=sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a \ + --hash=sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100 \ + --hash=sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325 \ + --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ + --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ + --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 # via google-resumable-media -google-resumable-media==2.3.3 \ - --hash=sha256:27c52620bd364d1c8116eaac4ea2afcbfb81ae9139fb3199652fcac1724bfb6c \ - --hash=sha256:5b52774ea7a829a8cdaa8bd2d4c3d4bc660c91b30857ab2668d0eb830f4ea8c5 +google-resumable-media==2.4.0 \ + --hash=sha256:2aa004c16d295c8f6c33b2b4788ba59d366677c0a25ae7382436cb30f776deaa \ + --hash=sha256:8d5518502f92b9ecc84ac46779bd4f09694ecb3ba38a3e7ca737a86d15cbca1f # via google-cloud-storage -googleapis-common-protos==1.56.4 \ - --hash=sha256:8eb2cbc91b69feaf23e32452a7ae60e791e09967d81d4fcc7fc388182d1bd394 \ - --hash=sha256:c25873c47279387cfdcbdafa36149887901d36202cb645a0e4f29686bf6e4417 +googleapis-common-protos==1.57.0 \ + --hash=sha256:27a849d6205838fb6cc3c1c21cb9800707a661bb21c6ce7fb13e99eb1f8a0c46 \ + --hash=sha256:a9f4a1d7f6d9809657b7f1316a1aa527f6664891531bcfcc13b6696e685f443c # via google-api-core -idna==3.3 \ - --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \ - --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d +idna==3.4 \ + --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ + --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 # via requests -importlib-metadata==4.12.0 \ - --hash=sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670 \ - --hash=sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23 +importlib-metadata==5.0.0 \ + --hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \ + --hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43 # via # -r requirements.in + # keyring # twine -jaraco-classes==3.2.2 \ - --hash=sha256:6745f113b0b588239ceb49532aa09c3ebb947433ce311ef2f8e3ad64ebb74594 \ - --hash=sha256:e6ef6fd3fcf4579a7a019d87d1e56a883f4e4c35cfe925f86731abc58804e647 +jaraco-classes==3.2.3 \ + --hash=sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158 \ + --hash=sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ @@ -255,9 +288,9 @@ jinja2==3.1.2 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 # via gcp-releasetool -keyring==23.9.0 \ - --hash=sha256:4c32a31174faaee48f43a7e2c7e9c3216ec5e95acf22a2bebfb4a1d05056ee44 \ - --hash=sha256:98f060ec95ada2ab910c195a2d4317be6ef87936a766b239c46aa3c7aac4f0db +keyring==23.11.0 \ + --hash=sha256:3dd30011d555f1345dec2c262f0153f2f0ca6bca041fb1dc4588349bb4c0ac1e \ + --hash=sha256:ad192263e2cdd5f12875dedc2da13534359a7e760e77f8d04b50968a821c2361 # via # gcp-releasetool # twine @@ -303,9 +336,9 @@ markupsafe==2.1.1 \ --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 # via jinja2 -more-itertools==8.14.0 \ - --hash=sha256:1bc4f91ee5b1b31ac7ceacc17c09befe6a40a503907baf9c839c229b5095cfd2 \ - --hash=sha256:c09443cd3d5438b8dafccd867a6bc1cb0894389e90cb53d227456b0b0bccb750 +more-itertools==9.0.0 \ + --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ + --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab # via jaraco-classes nox==2022.8.7 \ --hash=sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b \ @@ -321,34 +354,33 @@ pkginfo==1.8.3 \ --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \ --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c # via twine -platformdirs==2.5.2 \ - --hash=sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788 \ - --hash=sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19 +platformdirs==2.5.4 \ + --hash=sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7 \ + --hash=sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10 # via virtualenv -protobuf==3.20.2 \ - --hash=sha256:03d76b7bd42ac4a6e109742a4edf81ffe26ffd87c5993126d894fe48a120396a \ - --hash=sha256:09e25909c4297d71d97612f04f41cea8fa8510096864f2835ad2f3b3df5a5559 \ - --hash=sha256:18e34a10ae10d458b027d7638a599c964b030c1739ebd035a1dfc0e22baa3bfe \ - --hash=sha256:291fb4307094bf5ccc29f424b42268640e00d5240bf0d9b86bf3079f7576474d \ - --hash=sha256:2c0b040d0b5d5d207936ca2d02f00f765906622c07d3fa19c23a16a8ca71873f \ - --hash=sha256:384164994727f274cc34b8abd41a9e7e0562801361ee77437099ff6dfedd024b \ - --hash=sha256:3cb608e5a0eb61b8e00fe641d9f0282cd0eedb603be372f91f163cbfbca0ded0 \ - --hash=sha256:5d9402bf27d11e37801d1743eada54372f986a372ec9679673bfcc5c60441151 \ - --hash=sha256:712dca319eee507a1e7df3591e639a2b112a2f4a62d40fe7832a16fd19151750 \ - --hash=sha256:7a5037af4e76c975b88c3becdf53922b5ffa3f2cddf657574a4920a3b33b80f3 \ - --hash=sha256:8228e56a865c27163d5d1d1771d94b98194aa6917bcfb6ce139cbfa8e3c27334 \ - --hash=sha256:84a1544252a933ef07bb0b5ef13afe7c36232a774affa673fc3636f7cee1db6c \ - --hash=sha256:84fe5953b18a383fd4495d375fe16e1e55e0a3afe7b4f7b4d01a3a0649fcda9d \ - --hash=sha256:9c673c8bfdf52f903081816b9e0e612186684f4eb4c17eeb729133022d6032e3 \ - --hash=sha256:9f876a69ca55aed879b43c295a328970306e8e80a263ec91cf6e9189243c613b \ - --hash=sha256:a9e5ae5a8e8985c67e8944c23035a0dff2c26b0f5070b2f55b217a1c33bbe8b1 \ - --hash=sha256:b4fdb29c5a7406e3f7ef176b2a7079baa68b5b854f364c21abe327bbeec01cdb \ - --hash=sha256:c184485e0dfba4dfd451c3bd348c2e685d6523543a0f91b9fd4ae90eb09e8422 \ - --hash=sha256:c9cdf251c582c16fd6a9f5e95836c90828d51b0069ad22f463761d27c6c19019 \ - --hash=sha256:e39cf61bb8582bda88cdfebc0db163b774e7e03364bbf9ce1ead13863e81e359 \ - --hash=sha256:e8fbc522303e09036c752a0afcc5c0603e917222d8bedc02813fd73b4b4ed804 \ - --hash=sha256:f34464ab1207114e73bba0794d1257c150a2b89b7a9faf504e00af7c9fd58978 \ - --hash=sha256:f52dabc96ca99ebd2169dadbe018824ebda08a795c7684a0b7d203a290f3adb0 +protobuf==3.20.3 \ + --hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \ + --hash=sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c \ + --hash=sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2 \ + --hash=sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b \ + --hash=sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050 \ + --hash=sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9 \ + --hash=sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7 \ + --hash=sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454 \ + --hash=sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480 \ + --hash=sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469 \ + --hash=sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c \ + --hash=sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e \ + --hash=sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db \ + --hash=sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905 \ + --hash=sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b \ + --hash=sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86 \ + --hash=sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4 \ + --hash=sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402 \ + --hash=sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7 \ + --hash=sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4 \ + --hash=sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99 \ + --hash=sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee # via # gcp-docuploader # gcp-releasetool @@ -377,9 +409,9 @@ pygments==2.13.0 \ # via # readme-renderer # rich -pyjwt==2.4.0 \ - --hash=sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf \ - --hash=sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba +pyjwt==2.6.0 \ + --hash=sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd \ + --hash=sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14 # via gcp-releasetool pyparsing==3.0.9 \ --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ @@ -392,9 +424,9 @@ python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 # via gcp-releasetool -readme-renderer==37.0 \ - --hash=sha256:07b7ea234e03e58f77cc222e206e6abb8f4c0435becce5104794ee591f9301c5 \ - --hash=sha256:9fa416704703e509eeb900696751c908ddeb2011319d93700d8f18baff887a69 +readme-renderer==37.3 \ + --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ + --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 # via twine requests==2.28.1 \ --hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \ @@ -405,17 +437,17 @@ requests==2.28.1 \ # google-cloud-storage # requests-toolbelt # twine -requests-toolbelt==0.9.1 \ - --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \ - --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 +requests-toolbelt==0.10.1 \ + --hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \ + --hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d # via twine rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c # via twine -rich==12.5.1 \ - --hash=sha256:2eb4e6894cde1e017976d2975ac210ef515d7548bc595ba20e195fb9628acdeb \ - --hash=sha256:63a5c5ce3673d3d5fbbf23cd87e11ab84b6b451436f1b7f19ec54b6bc36ed7ca +rich==12.6.0 \ + --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \ + --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0 # via twine rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -437,9 +469,9 @@ twine==4.0.1 \ --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \ --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0 # via -r requirements.in -typing-extensions==4.3.0 \ - --hash=sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02 \ - --hash=sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6 +typing-extensions==4.4.0 \ + --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ + --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via -r requirements.in urllib3==1.26.12 \ --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ @@ -447,25 +479,25 @@ urllib3==1.26.12 \ # via # requests # twine -virtualenv==20.16.4 \ - --hash=sha256:014f766e4134d0008dcaa1f95bafa0fb0f575795d07cae50b1bee514185d6782 \ - --hash=sha256:035ed57acce4ac35c82c9d8802202b0e71adac011a511ff650cbcf9635006a22 +virtualenv==20.16.7 \ + --hash=sha256:8691e3ff9387f743e00f6bb20f70121f5e4f596cae754531f2b3b3a1b1ac696e \ + --hash=sha256:efd66b00386fdb7dbe4822d172303f40cd05e50e01740b19ea42425cbe653e29 # via nox webencodings==0.5.1 \ --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 # via bleach -wheel==0.37.1 \ - --hash=sha256:4bdcd7d840138086126cd09254dc6195fb4fc6f01c050a1d7236f2630db1d22a \ - --hash=sha256:e9a504e793efbca1b8e0e9cb979a249cf4a0a7b5b8c9e8b65a5e39d49529c1c4 +wheel==0.38.4 \ + --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \ + --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8 # via -r requirements.in -zipp==3.8.1 \ - --hash=sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2 \ - --hash=sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009 +zipp==3.10.0 \ + --hash=sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1 \ + --hash=sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==65.2.0 \ - --hash=sha256:7f4bc85450898a09f76ebf28b72fa25bc7111f6c7d665d514a60bba9c75ef2a9 \ - --hash=sha256:a3ca5857c89f82f5c9410e8508cb32f4872a3bafd4aa7ae122a24ca33bccc750 +setuptools==65.5.1 \ + --hash=sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31 \ + --hash=sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f # via -r requirements.in diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index ae1810bcc69e..4250d9f037bc 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -311,12 +311,16 @@ def cover(session): session.run("coverage", "erase") -@nox.session(python=DEFAULT_PYTHON_VERSION) +@nox.session(python="3.9") def docs(session): """Build the docs for this library.""" session.install("-e", ".") - session.install("sphinx==4.0.1", "alabaster", "recommonmark") + session.install( + "sphinx==4.0.1", + "alabaster", + "recommonmark", + ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( @@ -333,13 +337,16 @@ def docs(session): ) -@nox.session(python=DEFAULT_PYTHON_VERSION) +@nox.session(python="3.9") def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") session.install( - "sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml" + "sphinx==4.0.1", + "alabaster", + "recommonmark", + "gcp-sphinx-docfx-yaml", ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) From 97d956daa6e37ab36040a4ab17da4f0a9af92cd1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 25 Nov 2022 10:54:40 -0800 Subject: [PATCH 675/892] chore(python): drop flake8-import-order in samples noxfile (#704) Source-Link: https://github.com/googleapis/synthtool/commit/6ed3a831cb9ff69ef8a504c353e098ec0192ad93 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:3abfa0f1886adaf0b83f07cb117b24a639ea1cb9cffe56d43280b977033563eb Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 2 +- .../samples/beam/noxfile.py | 26 +++---------------- .../samples/hello/noxfile.py | 26 +++---------------- .../samples/hello_happybase/noxfile.py | 26 +++---------------- .../samples/instanceadmin/noxfile.py | 26 +++---------------- .../samples/metricscaler/noxfile.py | 26 +++---------------- .../samples/quickstart/noxfile.py | 26 +++---------------- .../samples/quickstart_happybase/noxfile.py | 26 +++---------------- .../samples/snippets/deletes/noxfile.py | 26 +++---------------- .../samples/snippets/filters/noxfile.py | 26 +++---------------- .../samples/snippets/reads/noxfile.py | 26 +++---------------- .../samples/snippets/writes/noxfile.py | 26 +++---------------- .../samples/tableadmin/noxfile.py | 26 +++---------------- 13 files changed, 37 insertions(+), 277 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 3f1ccc085ef7..bb21147e4c23 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:e6cbd61f1838d9ff6a31436dfc13717f372a7482a82fc1863ca954ec47bff8c8 + digest: sha256:3abfa0f1886adaf0b83f07cb117b24a639ea1cb9cffe56d43280b977033563eb diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index c0e636500ebf..e17b8192dd68 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -107,22 +107,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -137,7 +121,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -147,14 +130,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 0398d72ff690..f5c32b22789b 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) From f47f8120e702681f58e10d1f55eec632285066e4 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Fri, 25 Nov 2022 11:16:20 -0800 Subject: [PATCH 676/892] chore: Remove the dot from release please config (#703) --- .../{.release-please-config.json => release-please-config.json} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename packages/google-cloud-bigtable/{.release-please-config.json => release-please-config.json} (99%) diff --git a/packages/google-cloud-bigtable/.release-please-config.json b/packages/google-cloud-bigtable/release-please-config.json similarity index 99% rename from packages/google-cloud-bigtable/.release-please-config.json rename to packages/google-cloud-bigtable/release-please-config.json index dcbfec72b1cf..a53c32551d9e 100644 --- a/packages/google-cloud-bigtable/.release-please-config.json +++ b/packages/google-cloud-bigtable/release-please-config.json @@ -17,4 +17,4 @@ } ], "initial-version": "2.13.2" -} \ No newline at end of file +} From c45857160610ba486a0e0b8a3c41f03ac735c1c1 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 1 Dec 2022 15:47:11 -0800 Subject: [PATCH 677/892] chore(main): release 2.14.0 (#706) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 19 +++++++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../cloud/bigtable_admin/gapic_version.py | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index bc12e128dd66..851649e8452b 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.13.2" + ".": "2.14.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 5d74f2061bb0..cafe6bf86843 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,25 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.14.0](https://github.com/googleapis/python-bigtable/compare/v2.13.2...v2.14.0) (2022-11-30) + + +### Features + +* Add typing to proto.Message based class attributes ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) +* remove enum value ReadRowsRequest.RequestStatsView.REQUEST_STATS_EFFICIENCY ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) +* remove field ReadIterationStats.deletes_seen ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) +* remove field RequestStats.read_efficiency_stats ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) +* remove proto ReadEfficiencyStats ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) +* rename field RequestStats.all_read_stats to full_read_stats_view ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) +* rename proto AllReadStats to FullReadStatsView ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) +* rename proto ReadIteratorStats to ReadIterationStats ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) + + +### Bug Fixes + +* Add dict typing for client_options ([c1538d5](https://github.com/googleapis/python-bigtable/commit/c1538d5c5a001a9febb4b466d3d09fd1fd167f66)) + ## [2.13.2](https://github.com/googleapis/python-bigtable/compare/v2.13.1...v2.13.2) (2022-10-20) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index c790ba98e3c2..8be002907dd0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.13.2" # {x-release-please-version} +__version__ = "2.14.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index c790ba98e3c2..8be002907dd0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.13.2" # {x-release-please-version} +__version__ = "2.14.0" # {x-release-please-version} From 793528ab392c0a8eb3d2ebdddb1272c57ecd2a2a Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 2 Dec 2022 20:49:15 +0100 Subject: [PATCH 678/892] chore(deps): update dependency google-cloud-bigtable to v2.14.0 (#707) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 785812001bcc..49731e86a691 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.43.0 -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index cd2b6a519113..02fc4ddcadaa 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 486e35607438..3e443bdbdee6 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index fe6c200a4d5c..f2e87d0e772d 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 google-cloud-monitoring==2.11.3 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 95aea8cd5c3a..12450cfa5413 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index a270d6320583..93f521ed0d9c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index a270d6320583..93f521ed0d9c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index a270d6320583..93f521ed0d9c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 0af6c5ba55ed..65f3afdf3aeb 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.2 \ No newline at end of file +google-cloud-bigtable==2.14.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 95aea8cd5c3a..12450cfa5413 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.13.2 +google-cloud-bigtable==2.14.0 From dc12608d6358ea660102b058162d6283c671321b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 6 Dec 2022 16:04:11 -0500 Subject: [PATCH 679/892] fix(deps): Require google-api-core >=1.34.0, >=2.11.0 (#708) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(deps): Require google-api-core >=1.34.0, >=2.11.0 fix: Drop usage of pkg_resources fix: Fix timeout default values docs(samples): Snippetgen should call await on the operation coroutine before calling result PiperOrigin-RevId: 493260409 Source-Link: https://github.com/googleapis/googleapis/commit/fea43879f83a8d0dacc9353b3f75f8f46d37162f Source-Link: https://github.com/googleapis/googleapis-gen/commit/387b7344c7529ee44be84e613b19a820508c612b Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMzg3YjczNDRjNzUyOWVlNDRiZTg0ZTYxM2IxOWE4MjA1MDhjNjEyYiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * update release-please-config.json * drop pkg_resources * update the version in gapic_version.py * fix typo * lint Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../google/cloud/bigtable/__init__.py | 10 +-- .../cloud/bigtable_admin_v2/gapic_version.py | 16 +++++ .../bigtable_instance_admin/async_client.py | 56 ++++++++--------- .../bigtable_instance_admin/client.py | 56 ++++++++--------- .../transports/base.py | 14 ++--- .../bigtable_table_admin/async_client.py | 62 +++++++++---------- .../services/bigtable_table_admin/client.py | 62 +++++++++---------- .../bigtable_table_admin/transports/base.py | 14 ++--- .../google/cloud/bigtable_v2/gapic_version.py | 16 +++++ .../services/bigtable/async_client.py | 28 ++++----- .../bigtable_v2/services/bigtable/client.py | 28 ++++----- .../services/bigtable/transports/base.py | 14 ++--- packages/google-cloud-bigtable/owlbot.py | 6 +- .../release-please-config.json | 4 +- 14 files changed, 190 insertions(+), 196 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py index 4c6e3f3d8396..7331ff24150c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/__init__.py @@ -14,16 +14,12 @@ """Google Cloud Bigtable API package.""" -from typing import Optional -import pkg_resources from google.cloud.bigtable.client import Client +from google.cloud.bigtable import gapic_version as package_version -__version__: Optional[str] -try: - __version__ = pkg_resources.get_distribution("google-cloud-bigtable").version -except pkg_resources.DistributionNotFound: - __version__ = None +__version__: str +__version__ = package_version.__version__ __all__ = ["__version__", "Client"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py new file mode 100644 index 000000000000..8be002907dd0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "2.14.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 8bb629e176cf..eba5173480cd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -27,7 +27,8 @@ Type, Union, ) -import pkg_resources + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -255,7 +256,7 @@ async def create_instance( instance: Optional[gba_instance.Instance] = None, clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Create an instance within a project. @@ -386,7 +387,7 @@ async def get_instance( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Gets information about an instance. @@ -478,7 +479,7 @@ async def list_instances( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. @@ -564,7 +565,7 @@ async def update_instance( request: Optional[Union[instance.Instance, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method @@ -642,7 +643,7 @@ async def partial_update_instance( instance: Optional[gba_instance.Instance] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates an instance within a project. This @@ -757,7 +758,7 @@ async def delete_instance( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Delete an instance from a project. @@ -829,7 +830,7 @@ async def create_cluster( cluster_id: Optional[str] = None, cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a cluster within an instance. @@ -946,7 +947,7 @@ async def get_cluster( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. @@ -1037,7 +1038,7 @@ async def list_clusters( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. @@ -1125,7 +1126,7 @@ async def update_cluster( request: Optional[Union[instance.Cluster, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates a cluster within an instance. @@ -1211,7 +1212,7 @@ async def partial_update_cluster( cluster: Optional[instance.Cluster] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates a cluster within a project. This method is the @@ -1325,7 +1326,7 @@ async def delete_cluster( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a cluster from an instance. @@ -1397,7 +1398,7 @@ async def create_app_profile( app_profile_id: Optional[str] = None, app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. @@ -1497,7 +1498,7 @@ async def get_app_profile( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. @@ -1587,7 +1588,7 @@ async def list_app_profiles( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAppProfilesAsyncPager: r"""Lists information about app profiles in an instance. @@ -1692,7 +1693,7 @@ async def update_app_profile( app_profile: Optional[instance.AppProfile] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates an app profile within an instance. @@ -1802,7 +1803,7 @@ async def delete_app_profile( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an app profile from an instance. @@ -1870,7 +1871,7 @@ async def get_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance @@ -2021,7 +2022,7 @@ async def set_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance @@ -2162,7 +2163,7 @@ async def test_iam_permissions( resource: Optional[str] = None, permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the @@ -2263,7 +2264,7 @@ async def list_hot_tablets( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListHotTabletsAsyncPager: r"""Lists hot tablets in a cluster, within the time range @@ -2364,14 +2365,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable-admin", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BigtableInstanceAdminAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 23f4e55e0f68..1881cc567524 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -28,7 +28,8 @@ Union, cast, ) -import pkg_resources + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -574,7 +575,7 @@ def create_instance( instance: Optional[gba_instance.Instance] = None, clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Create an instance within a project. @@ -704,7 +705,7 @@ def get_instance( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Gets information about an instance. @@ -786,7 +787,7 @@ def list_instances( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. @@ -862,7 +863,7 @@ def update_instance( request: Optional[Union[instance.Instance, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method @@ -931,7 +932,7 @@ def partial_update_instance( instance: Optional[gba_instance.Instance] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Partially updates an instance within a project. This @@ -1038,7 +1039,7 @@ def delete_instance( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Delete an instance from a project. @@ -1110,7 +1111,7 @@ def create_cluster( cluster_id: Optional[str] = None, cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a cluster within an instance. @@ -1227,7 +1228,7 @@ def get_cluster( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. @@ -1308,7 +1309,7 @@ def list_clusters( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. @@ -1386,7 +1387,7 @@ def update_cluster( request: Optional[Union[instance.Cluster, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates a cluster within an instance. @@ -1463,7 +1464,7 @@ def partial_update_cluster( cluster: Optional[instance.Cluster] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Partially updates a cluster within a project. This method is the @@ -1577,7 +1578,7 @@ def delete_cluster( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a cluster from an instance. @@ -1649,7 +1650,7 @@ def create_app_profile( app_profile_id: Optional[str] = None, app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. @@ -1749,7 +1750,7 @@ def get_app_profile( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. @@ -1829,7 +1830,7 @@ def list_app_profiles( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAppProfilesPager: r"""Lists information about app profiles in an instance. @@ -1924,7 +1925,7 @@ def update_app_profile( app_profile: Optional[instance.AppProfile] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates an app profile within an instance. @@ -2024,7 +2025,7 @@ def delete_app_profile( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an app profile from an instance. @@ -2092,7 +2093,7 @@ def get_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance @@ -2230,7 +2231,7 @@ def set_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance @@ -2368,7 +2369,7 @@ def test_iam_permissions( resource: Optional[str] = None, permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the @@ -2457,7 +2458,7 @@ def list_hot_tablets( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListHotTabletsPager: r"""Lists hot tablets in a cluster, within the time range @@ -2555,14 +2556,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable-admin", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BigtableInstanceAdminClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index 124db9eef26f..bd45f319ff72 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -33,14 +34,9 @@ from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable-admin", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class BigtableInstanceAdminTransport(abc.ABC): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index cc0a5bceebcd..7d27ca846d09 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -27,7 +27,8 @@ Type, Union, ) -import pkg_resources + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -248,7 +249,7 @@ async def create_table( table_id: Optional[str] = None, table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. @@ -351,7 +352,7 @@ async def create_table_from_snapshot( table_id: Optional[str] = None, source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new table from the specified snapshot. The @@ -474,7 +475,7 @@ async def list_tables( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTablesAsyncPager: r"""Lists all tables served from a specified instance. @@ -573,7 +574,7 @@ async def get_table( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. @@ -663,7 +664,7 @@ async def update_table( table: Optional[gba_table.Table] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates a specified table. @@ -770,7 +771,7 @@ async def delete_table( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes a specified table and all of its @@ -841,7 +842,7 @@ async def undelete_table( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Restores a specified table which was accidentally @@ -937,7 +938,7 @@ async def modify_column_families( ] ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Performs a series of column family modifications on @@ -1033,7 +1034,7 @@ async def drop_row_range( request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified @@ -1084,7 +1085,7 @@ async def generate_consistency_token( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can @@ -1178,7 +1179,7 @@ async def check_consistency( name: Optional[str] = None, consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency @@ -1282,7 +1283,7 @@ async def snapshot_table( snapshot_id: Optional[str] = None, description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new snapshot in the specified cluster from @@ -1420,7 +1421,7 @@ async def get_snapshot( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified @@ -1532,7 +1533,7 @@ async def list_snapshots( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListSnapshotsAsyncPager: r"""Lists all snapshots associated with the specified @@ -1656,7 +1657,7 @@ async def delete_snapshot( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -1738,7 +1739,7 @@ async def create_backup( backup_id: Optional[str] = None, backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup @@ -1856,7 +1857,7 @@ async def get_backup( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud @@ -1942,7 +1943,7 @@ async def update_backup( backup: Optional[table.Backup] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. @@ -2036,7 +2037,7 @@ async def delete_backup( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. @@ -2104,7 +2105,7 @@ async def list_backups( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBackupsAsyncPager: r"""Lists Cloud Bigtable backups. Returns both completed @@ -2206,7 +2207,7 @@ async def restore_table( request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Create a new table by restoring from a completed backup. The new @@ -2280,7 +2281,7 @@ async def get_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup @@ -2431,7 +2432,7 @@ async def set_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup @@ -2572,7 +2573,7 @@ async def test_iam_permissions( resource: Optional[str] = None, permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the @@ -2672,14 +2673,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable-admin", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BigtableTableAdminAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index a77b9d0ee3f1..165103018d19 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -28,7 +28,8 @@ Union, cast, ) -import pkg_resources + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -573,7 +574,7 @@ def create_table( table_id: Optional[str] = None, table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. @@ -676,7 +677,7 @@ def create_table_from_snapshot( table_id: Optional[str] = None, source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a new table from the specified snapshot. The @@ -801,7 +802,7 @@ def list_tables( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListTablesPager: r"""Lists all tables served from a specified instance. @@ -890,7 +891,7 @@ def get_table( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. @@ -970,7 +971,7 @@ def update_table( table: Optional[gba_table.Table] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates a specified table. @@ -1077,7 +1078,7 @@ def delete_table( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes a specified table and all of its @@ -1148,7 +1149,7 @@ def undelete_table( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Restores a specified table which was accidentally @@ -1244,7 +1245,7 @@ def modify_column_families( ] ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Table: r"""Performs a series of column family modifications on @@ -1340,7 +1341,7 @@ def drop_row_range( request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified @@ -1392,7 +1393,7 @@ def generate_consistency_token( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can @@ -1480,7 +1481,7 @@ def check_consistency( name: Optional[str] = None, consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency @@ -1574,7 +1575,7 @@ def snapshot_table( snapshot_id: Optional[str] = None, description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a new snapshot in the specified cluster from @@ -1712,7 +1713,7 @@ def get_snapshot( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified @@ -1814,7 +1815,7 @@ def list_snapshots( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListSnapshotsPager: r"""Lists all snapshots associated with the specified @@ -1928,7 +1929,7 @@ def delete_snapshot( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -2010,7 +2011,7 @@ def create_backup( backup_id: Optional[str] = None, backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup @@ -2128,7 +2129,7 @@ def get_backup( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud @@ -2204,7 +2205,7 @@ def update_backup( backup: Optional[table.Backup] = None, update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. @@ -2298,7 +2299,7 @@ def delete_backup( *, name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. @@ -2366,7 +2367,7 @@ def list_backups( *, parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBackupsPager: r"""Lists Cloud Bigtable backups. Returns both completed @@ -2458,7 +2459,7 @@ def restore_table( request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Create a new table by restoring from a completed backup. The new @@ -2533,7 +2534,7 @@ def get_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup @@ -2671,7 +2672,7 @@ def set_iam_policy( *, resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup @@ -2809,7 +2810,7 @@ def test_iam_permissions( resource: Optional[str] = None, permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the @@ -2904,14 +2905,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable-admin", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BigtableTableAdminClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 206a2e268203..cade1335b605 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -34,14 +35,9 @@ from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable-admin", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class BigtableTableAdminTransport(abc.ABC): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py new file mode 100644 index 000000000000..8be002907dd0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "2.14.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 8ab2f1348560..3671d34c8dbd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -29,7 +29,8 @@ Type, Union, ) -import pkg_resources + +from google.cloud.bigtable_v2 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -218,7 +219,7 @@ def read_rows( table_name: Optional[str] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: r"""Streams back the contents of all requested rows in @@ -312,7 +313,7 @@ def sample_row_keys( table_name: Optional[str] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: r"""Returns a sample of row keys in the table. The @@ -409,7 +410,7 @@ async def mutate_row( mutations: Optional[MutableSequence[data.Mutation]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are @@ -534,7 +535,7 @@ def mutate_rows( entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: r"""Mutates multiple rows in a batch. Each individual row @@ -647,7 +648,7 @@ async def check_and_mutate_row( false_mutations: Optional[MutableSequence[data.Mutation]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a @@ -797,7 +798,7 @@ async def ping_and_warm( name: Optional[str] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this @@ -891,7 +892,7 @@ async def read_modify_write_row( rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method @@ -1012,14 +1013,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BigtableAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 2d7eabfaca74..30f7a488ddd4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -29,7 +29,8 @@ Union, cast, ) -import pkg_resources + +from google.cloud.bigtable_v2 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -465,7 +466,7 @@ def read_rows( table_name: Optional[str] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.ReadRowsResponse]: r"""Streams back the contents of all requested rows in @@ -568,7 +569,7 @@ def sample_row_keys( table_name: Optional[str] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.SampleRowKeysResponse]: r"""Returns a sample of row keys in the table. The @@ -674,7 +675,7 @@ def mutate_row( mutations: Optional[MutableSequence[data.Mutation]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are @@ -798,7 +799,7 @@ def mutate_rows( entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.MutateRowsResponse]: r"""Mutates multiple rows in a batch. Each individual row @@ -920,7 +921,7 @@ def check_and_mutate_row( false_mutations: Optional[MutableSequence[data.Mutation]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a @@ -1079,7 +1080,7 @@ def ping_and_warm( name: Optional[str] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this @@ -1182,7 +1183,7 @@ def read_modify_write_row( rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Optional[float] = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method @@ -1319,14 +1320,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BigtableClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 097ef1dc3726..a32ea682b8f9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.bigtable_v2 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -27,14 +28,9 @@ from google.cloud.bigtable_v2.types import bigtable -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-bigtable", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class BigtableTransport(abc.ABC): diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 391f292aec22..b6aa2f8a297f 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -70,13 +70,13 @@ def get_staging_dirs( bigtable_admin_default_version = "v2" for library in get_staging_dirs(bigtable_default_version, "bigtable"): - s.move(library / "google/cloud/bigtable_v2") + s.move(library / "google/cloud/bigtable_v2", excludes=["**/gapic_version.py"]) s.move(library / "tests") s.move(library / "scripts") for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"): - s.move(library / "google/cloud/bigtable_admin") - s.move(library / "google/cloud/bigtable_admin_v2") + s.move(library / "google/cloud/bigtable_admin", excludes=["**/gapic_version.py"]) + s.move(library / "google/cloud/bigtable_admin_v2", excludes=["**/gapic_version.py"]) s.move(library / "tests") s.move(library / "scripts") diff --git a/packages/google-cloud-bigtable/release-please-config.json b/packages/google-cloud-bigtable/release-please-config.json index a53c32551d9e..33d5a7e21784 100644 --- a/packages/google-cloud-bigtable/release-please-config.json +++ b/packages/google-cloud-bigtable/release-please-config.json @@ -6,7 +6,9 @@ "release-type": "python", "extra-files": [ "google/cloud/bigtable/gapic_version.py", - "google/cloud/bigtable_admin/gapic_version.py" + "google/cloud/bigtable_admin/gapic_version.py", + "google/cloud/bigtable_v2/gapic_version.py", + "google/cloud/bigtable_admin_v2/gapic_version.py" ] } }, From 36e35a7791028a89e265da764c52520c41fa9245 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 08:12:03 -0800 Subject: [PATCH 680/892] chore(main): release 2.14.1 (#709) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 14 ++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 851649e8452b..b286ef0a0f1f 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.14.0" + ".": "2.14.1" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index cafe6bf86843..a22d3a5fb3d3 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,20 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.14.1](https://github.com/googleapis/python-bigtable/compare/v2.14.0...v2.14.1) (2022-12-06) + + +### Bug Fixes + +* **deps:** Require google-api-core >=1.34.0, >=2.11.0 ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec)) +* Drop usage of pkg_resources ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec)) +* Fix timeout default values ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec)) + + +### Documentation + +* **samples:** Snippetgen should call await on the operation coroutine before calling result ([e5875cb](https://github.com/googleapis/python-bigtable/commit/e5875cbe8551329fbb64f273ca21d6b7ada641ec)) + ## [2.14.0](https://github.com/googleapis/python-bigtable/compare/v2.13.2...v2.14.0) (2022-11-30) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 8be002907dd0..ef7c50064e79 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.0" # {x-release-please-version} +__version__ = "2.14.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 8be002907dd0..ef7c50064e79 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.0" # {x-release-please-version} +__version__ = "2.14.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 8be002907dd0..ef7c50064e79 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.0" # {x-release-please-version} +__version__ = "2.14.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 8be002907dd0..ef7c50064e79 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.0" # {x-release-please-version} +__version__ = "2.14.1" # {x-release-please-version} From 06f948871f4c1a67373cdc2b4c1f8f408b140348 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 9 Dec 2022 00:39:19 +0100 Subject: [PATCH 681/892] chore(deps): update dependency google-cloud-bigtable to v2.14.1 (#710) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 49731e86a691..a5a3111324d5 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.43.0 -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 02fc4ddcadaa..adb8a74cd08c 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 3e443bdbdee6..1467a01c57ef 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index f2e87d0e772d..6e78993cd4b7 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 google-cloud-monitoring==2.11.3 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 12450cfa5413..de3798932bc0 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 93f521ed0d9c..93ad1ef1faca 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 93f521ed0d9c..93ad1ef1faca 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 93f521ed0d9c..93ad1ef1faca 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 65f3afdf3aeb..3a743acae777 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.14.0 \ No newline at end of file +google-cloud-bigtable==2.14.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 12450cfa5413..de3798932bc0 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.14.0 +google-cloud-bigtable==2.14.1 From 94f979c032ec75ff22856db86a4ac8ea30a99e89 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 8 Dec 2022 15:39:42 -0800 Subject: [PATCH 682/892] build(deps): bump certifi from 2022.9.24 to 2022.12.7 in /synthtool/gcp/templates/python_library/.kokoro (#711) Source-Link: https://github.com/googleapis/synthtool/commit/b4fe62efb5114b6738ad4b13d6f654f2bf4b7cc0 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:3bf87e47c2173d7eed42714589dc4da2c07c3268610f1e47f8e1a30decbfc7f1 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.kokoro/requirements.txt | 6 +++--- packages/google-cloud-bigtable/.pre-commit-config.yaml | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index bb21147e4c23..fccaa8e84449 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:3abfa0f1886adaf0b83f07cb117b24a639ea1cb9cffe56d43280b977033563eb + digest: sha256:3bf87e47c2173d7eed42714589dc4da2c07c3268610f1e47f8e1a30decbfc7f1 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 9c1b9be34e6b..05dc4672edaa 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.2.0 \ --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db # via google-auth -certifi==2022.9.24 \ - --hash=sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14 \ - --hash=sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382 +certifi==2022.12.7 \ + --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ + --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 # via requests cffi==1.15.1 \ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 46d237160f6d..5405cc8ff1f3 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -25,7 +25,7 @@ repos: rev: 22.3.0 hooks: - id: black -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 From b081e73fad368dccd48bbce6d9ddfa55fcb0637b Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 4 Jan 2023 20:54:14 +0100 Subject: [PATCH 683/892] chore(deps): update all dependencies (#713) --- .../samples/metricscaler/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index a5b1e47bc2ff..1d9fe051cf96 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ pytest==7.2.0 -mock==4.0.3 +mock==5.0.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 6e78993cd4b7..bf7db7ddb08b 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.14.1 -google-cloud-monitoring==2.11.3 +google-cloud-monitoring==2.12.0 From 1ef2c1c2dcda51b014f1173c67bdb28a78988995 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 6 Jan 2023 12:18:30 -0500 Subject: [PATCH 684/892] chore(python): add support for python 3.11 [autoapprove] (#714) * chore(python): add support for python 3.11 Source-Link: https://github.com/googleapis/synthtool/commit/7197a001ffb6d8ce7b0b9b11c280f0c536c1033a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:c43f1d918bcf817d337aa29ff833439494a158a0831508fda4ec75dc4c0d0320 * add constraints file for python 3.11 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/workflows/unittest.yml | 2 +- .../.kokoro/samples/python3.11/common.cfg | 40 +++++++++++++++++++ .../.kokoro/samples/python3.11/continuous.cfg | 6 +++ .../samples/python3.11/periodic-head.cfg | 11 +++++ .../.kokoro/samples/python3.11/periodic.cfg | 6 +++ .../.kokoro/samples/python3.11/presubmit.cfg | 6 +++ .../google-cloud-bigtable/CONTRIBUTING.rst | 6 ++- packages/google-cloud-bigtable/noxfile.py | 2 +- .../samples/beam/noxfile.py | 2 +- .../samples/hello/noxfile.py | 2 +- .../samples/hello_happybase/noxfile.py | 2 +- .../samples/instanceadmin/noxfile.py | 2 +- .../samples/metricscaler/noxfile.py | 2 +- .../samples/quickstart/noxfile.py | 2 +- .../samples/quickstart_happybase/noxfile.py | 2 +- .../samples/snippets/deletes/noxfile.py | 2 +- .../samples/snippets/filters/noxfile.py | 2 +- .../samples/snippets/reads/noxfile.py | 2 +- .../samples/snippets/writes/noxfile.py | 2 +- .../samples/tableadmin/noxfile.py | 2 +- .../testing/constraints-3.11.txt | 0 22 files changed, 89 insertions(+), 18 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.11.txt diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index fccaa8e84449..889f77dfa25d 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:3bf87e47c2173d7eed42714589dc4da2c07c3268610f1e47f8e1a30decbfc7f1 + digest: sha256:c43f1d918bcf817d337aa29ff833439494a158a0831508fda4ec75dc4c0d0320 diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 23000c05d9d8..8057a7691b12 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ['3.7', '3.8', '3.9', '3.10'] + python: ['3.7', '3.8', '3.9', '3.10', '3.11'] steps: - name: Checkout uses: actions/checkout@v3 diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg new file mode 100644 index 000000000000..467d405ae833 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.11" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-311" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg new file mode 100644 index 000000000000..be25a34f9ad3 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg new file mode 100644 index 000000000000..71cd1e597e38 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 1579f6f6b3ce..504fb3742e86 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.7, 3.8, 3.9 and 3.10 on both UNIX and Windows. + 3.7, 3.8, 3.9, 3.10 and 3.11 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.10 -- -k + $ nox -s unit-3.11 -- -k .. note:: @@ -225,11 +225,13 @@ We support: - `Python 3.8`_ - `Python 3.9`_ - `Python 3.10`_ +- `Python 3.11`_ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ .. _Python 3.10: https://docs.python.org/3.10/ +.. _Python 3.11: https://docs.python.org/3.11/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 4250d9f037bc..5b401e74b9fe 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -31,7 +31,7 @@ DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index e17b8192dd68..3d4395024ccd 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index f5c32b22789b..7c8a63994cbd 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/testing/constraints-3.11.txt b/packages/google-cloud-bigtable/testing/constraints-3.11.txt new file mode 100644 index 000000000000..e69de29bb2d1 From 32397afef4b18af9a63aa6d577b9b62d95699765 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 9 Jan 2023 17:14:15 +0000 Subject: [PATCH 685/892] chore(deps): update dependency google-cloud-monitoring to v2.13.0 (#716) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index bf7db7ddb08b..afe321b08e4d 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.14.1 -google-cloud-monitoring==2.12.0 +google-cloud-monitoring==2.13.0 From deccdc3f33c3b50eefb8c6e333d0baf7caf1d8e1 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 9 Jan 2023 18:25:01 +0000 Subject: [PATCH 686/892] chore(deps): update dependency mock to v5.0.1 (#717) --- .../samples/metricscaler/requirements-test.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 1d9fe051cf96..846cd3eb6eda 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ pytest==7.2.0 -mock==5.0.0 +mock==5.0.1 google-cloud-testutils From 8647df6072335476617f1450268647b7e8111c4b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 9 Jan 2023 23:17:16 -0500 Subject: [PATCH 687/892] feat: Add support for python 3.11 (#718) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add support for python 3.11 chore: Update gapic-generator-python to v1.8.0 PiperOrigin-RevId: 500768693 Source-Link: https://github.com/googleapis/googleapis/commit/190b612e3d0ff8f025875a669e5d68a1446d43c1 Source-Link: https://github.com/googleapis/googleapis-gen/commit/7bf29a414b9ecac3170f0b65bdc2a95705c0ef1a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiN2JmMjlhNDE0YjllY2FjMzE3MGYwYjY1YmRjMmE5NTcwNWMwZWYxYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * require proto-plus 1.22.2 for python 3.11 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../services/bigtable_instance_admin/async_client.py | 2 +- .../services/bigtable_instance_admin/client.py | 2 +- .../services/bigtable_table_admin/async_client.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/client.py | 2 +- .../google/cloud/bigtable_admin_v2/types/instance.py | 3 +++ .../google/cloud/bigtable_v2/services/bigtable/async_client.py | 2 +- .../google/cloud/bigtable_v2/services/bigtable/client.py | 2 +- .../google/cloud/bigtable_v2/types/response_params.py | 3 +++ packages/google-cloud-bigtable/setup.py | 2 ++ 9 files changed, 14 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index eba5173480cd..ddeaf979ae82 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -164,7 +164,7 @@ def get_mtls_endpoint_and_cert_source( The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variabel is "never", use the default API + default mTLS endpoint; if the environment variable is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 1881cc567524..728d44b97162 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -413,7 +413,7 @@ def get_mtls_endpoint_and_cert_source( The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variabel is "never", use the default API + default mTLS endpoint; if the environment variable is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 7d27ca846d09..bc85e5c5dfaa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -160,7 +160,7 @@ def get_mtls_endpoint_and_cert_source( The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variabel is "never", use the default API + default mTLS endpoint; if the environment variable is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 165103018d19..0f0534839444 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -415,7 +415,7 @@ def get_mtls_endpoint_and_cert_source( The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variabel is "never", use the default API + default mTLS endpoint; if the environment variable is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index a734fa91d901..86006a54a8be 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -40,6 +40,9 @@ class Instance(proto.Message): served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): The unique name of the instance. Values are of the form diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 3671d34c8dbd..8a25fa3af6d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -132,7 +132,7 @@ def get_mtls_endpoint_and_cert_source( The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variabel is "never", use the default API + default mTLS endpoint; if the environment variable is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 30f7a488ddd4..68726f591a0e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -308,7 +308,7 @@ def get_mtls_endpoint_and_cert_source( The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variabel is "never", use the default API + default mTLS endpoint; if the environment variable is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index 4e9233b88202..b95fc3d130c2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -31,6 +31,9 @@ class ResponseParams(proto.Message): used to get zone_id and cluster_id from response trailers to tag the metrics. This should not be used by customers directly + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: zone_id (str): The cloud bigtable zone associated with the diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index c564a3a0fc4d..884d212df3bf 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -41,6 +41,7 @@ "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", + "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", ] extras = {"libcst": "libcst >= 0.2.5"} @@ -86,6 +87,7 @@ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Operating System :: OS Independent", "Topic :: Internet", ], From a51db744cec90e14b4ec806f81e326f3a57b4839 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 14:06:38 -0500 Subject: [PATCH 688/892] chore(main): release 2.15.0 (#719) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index b286ef0a0f1f..a73bb826a85b 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.14.1" + ".": "2.15.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index a22d3a5fb3d3..41f8e8c776dd 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.15.0](https://github.com/googleapis/python-bigtable/compare/v2.14.1...v2.15.0) (2023-01-10) + + +### Features + +* Add support for python 3.11 ([#718](https://github.com/googleapis/python-bigtable/issues/718)) ([803a15e](https://github.com/googleapis/python-bigtable/commit/803a15ef0cd3713411eeb5d21258c12bbe1dcab6)) + ## [2.14.1](https://github.com/googleapis/python-bigtable/compare/v2.14.0...v2.14.1) (2022-12-06) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index ef7c50064e79..2788e5e55993 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.1" # {x-release-please-version} +__version__ = "2.15.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index ef7c50064e79..2788e5e55993 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.1" # {x-release-please-version} +__version__ = "2.15.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index ef7c50064e79..2788e5e55993 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.1" # {x-release-please-version} +__version__ = "2.15.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index ef7c50064e79..2788e5e55993 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.14.1" # {x-release-please-version} +__version__ = "2.15.0" # {x-release-please-version} From 5fb694daf227c1b2fda6acfd3cb6cee5a41fabf7 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 10 Jan 2023 20:04:38 +0000 Subject: [PATCH 689/892] chore(deps): update dependency google-cloud-monitoring to v2.14.0 (#720) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index afe321b08e4d..9dbe496c2a67 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.14.1 -google-cloud-monitoring==2.13.0 +google-cloud-monitoring==2.14.0 From bc668392a3b38ed94ea796ad18a253fe0f6ed0e6 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 10 Jan 2023 20:24:25 +0000 Subject: [PATCH 690/892] chore(deps): update dependency google-cloud-bigtable to v2.15.0 (#721) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index a5a3111324d5..65ee6219b9d5 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.43.0 -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index adb8a74cd08c..b03a5b2c7569 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 1467a01c57ef..863d2d4b9fc8 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 9dbe496c2a67..548d03c5eed1 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 google-cloud-monitoring==2.14.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index de3798932bc0..40e0f50e911b 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 93ad1ef1faca..d82b8e6c7c45 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 93ad1ef1faca..d82b8e6c7c45 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 93ad1ef1faca..d82b8e6c7c45 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 3a743acae777..17131ab77e72 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.14.1 \ No newline at end of file +google-cloud-bigtable==2.15.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index de3798932bc0..40e0f50e911b 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.14.1 +google-cloud-bigtable==2.15.0 From 13293bb15341f90ae360088fe436ba728199c5af Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Sat, 14 Jan 2023 11:37:37 +0000 Subject: [PATCH 691/892] chore(deps): update dependency apache-beam to v2.44.0 (#723) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 65ee6219b9d5..081681a27f88 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.43.0 +apache-beam==2.44.0 google-cloud-bigtable==2.15.0 google-cloud-core==2.3.2 From b394b2bb1060fb32287395a3292e8a2631c45e1f Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Sat, 14 Jan 2023 18:10:01 +0000 Subject: [PATCH 692/892] chore(deps): update dependency pytest to v7.2.1 (#724) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 846cd3eb6eda..b321a100e7ca 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.2.0 +pytest==7.2.1 mock==5.0.1 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 49780e035690..805eb2a9f845 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index b90fc387d015..9f013668bd64 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.2.0 +pytest==7.2.1 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 911d44b4e10b..6a16834f7ccd 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.2.0 +pytest==7.2.1 google-cloud-testutils==1.3.3 From 779262875ac94fe5a7908bdc3966669cf1fde88b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 20 Jan 2023 11:46:12 -0500 Subject: [PATCH 693/892] docs: Add documentation for enums (#725) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Add documentation for enums fix: Add context manager return types chore: Update gapic-generator-python to v1.8.1 PiperOrigin-RevId: 503210727 Source-Link: https://github.com/googleapis/googleapis/commit/a391fd1dac18dfdfa00c18c8404f2c3a6ff8e98e Source-Link: https://github.com/googleapis/googleapis-gen/commit/0080f830dec37c3384157082bce279e37079ea58 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDA4MGY4MzBkZWMzN2MzMzg0MTU3MDgyYmNlMjc5ZTM3MDc5ZWE1OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../bigtable_instance_admin/client.py | 2 +- .../services/bigtable_table_admin/client.py | 2 +- .../types/bigtable_instance_admin.py | 21 ++- .../cloud/bigtable_admin_v2/types/common.py | 11 +- .../cloud/bigtable_admin_v2/types/instance.py | 60 ++++++++- .../cloud/bigtable_admin_v2/types/table.py | 121 +++++++++++++++++- .../bigtable_v2/services/bigtable/client.py | 2 +- .../cloud/bigtable_v2/types/bigtable.py | 13 ++ 8 files changed, 218 insertions(+), 14 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 728d44b97162..d6553a2babc4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2542,7 +2542,7 @@ def list_hot_tablets( # Done; return the response. return response - def __enter__(self): + def __enter__(self) -> "BigtableInstanceAdminClient": return self def __exit__(self, type, value, traceback): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 0f0534839444..a68c32f67e77 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2891,7 +2891,7 @@ def test_iam_permissions( # Done; return the response. return response - def __enter__(self): + def __enter__(self) -> "BigtableTableAdminClient": return self def __exit__(self, type, value, traceback): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index f5a3f1124ac0..c48297d63d20 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -444,7 +444,26 @@ class TableProgress(proto.Message): """ class State(proto.Enum): - r"""""" + r""" + + Values: + STATE_UNSPECIFIED (0): + + PENDING (1): + The table has not yet begun copying to the + new cluster. + COPYING (2): + The table is actively being copied to the new + cluster. + COMPLETED (3): + The table has been fully copied to the new + cluster. + CANCELLED (4): + The table was deleted before it finished + copying to the new cluster. Note that tables + deleted after completion will stay marked as + COMPLETED, not CANCELLED. + """ STATE_UNSPECIFIED = 0 PENDING = 1 COPYING = 2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 533f9145db45..b98d94fe00ee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -30,7 +30,16 @@ class StorageType(proto.Enum): - r"""Storage media types for persisting Bigtable data.""" + r"""Storage media types for persisting Bigtable data. + + Values: + STORAGE_TYPE_UNSPECIFIED (0): + The user did not specify a storage type. + SSD (1): + Flash (SSD) storage should be used. + HDD (2): + Magnetic drive (HDD) storage should be used. + """ STORAGE_TYPE_UNSPECIFIED = 0 SSD = 1 HDD = 2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 86006a54a8be..f60899bacecd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -83,13 +83,41 @@ class Instance(proto.Message): """ class State(proto.Enum): - r"""Possible states of an instance.""" + r"""Possible states of an instance. + + Values: + STATE_NOT_KNOWN (0): + The state of the instance could not be + determined. + READY (1): + The instance has been successfully created + and can serve requests to its tables. + CREATING (2): + The instance is currently being created, and + may be destroyed if the creation process + encounters an error. + """ STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 class Type(proto.Enum): - r"""The type of the instance.""" + r"""The type of the instance. + + Values: + TYPE_UNSPECIFIED (0): + The type of the instance is unspecified. If set when + creating an instance, a ``PRODUCTION`` instance will be + created. If set when updating an instance, the type will be + left unchanged. + PRODUCTION (1): + An instance meant for production use. ``serve_nodes`` must + be set on the cluster. + DEVELOPMENT (2): + DEPRECATED: Prefer PRODUCTION for all use + cases, as it no longer enforces a higher minimum + node count than DEVELOPMENT. + """ TYPE_UNSPECIFIED = 0 PRODUCTION = 1 DEVELOPMENT = 2 @@ -221,7 +249,33 @@ class Cluster(proto.Message): """ class State(proto.Enum): - r"""Possible states of a cluster.""" + r"""Possible states of a cluster. + + Values: + STATE_NOT_KNOWN (0): + The state of the cluster could not be + determined. + READY (1): + The cluster has been successfully created and + is ready to serve requests. + CREATING (2): + The cluster is currently being created, and + may be destroyed if the creation process + encounters an error. A cluster may not be able + to serve requests while being created. + RESIZING (3): + The cluster is currently being resized, and + may revert to its previous node count if the + process encounters an error. A cluster is still + capable of serving requests while being resized, + but may exhibit performance as if its number of + allocated nodes is between the starting and + requested states. + DISABLED (4): + The cluster has no backing nodes. The data + (tables) still exist, but no operations can be + performed on the cluster. + """ STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 1c7854d256d2..1f91d3d89ee5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -39,7 +39,15 @@ class RestoreSourceType(proto.Enum): - r"""Indicates the type of the restore source.""" + r"""Indicates the type of the restore source. + + Values: + RESTORE_SOURCE_TYPE_UNSPECIFIED (0): + No restore associated. + BACKUP (1): + A backup was used as the source of the + restore. + """ RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 BACKUP = 1 @@ -118,12 +126,40 @@ class Table(proto.Message): class TimestampGranularity(proto.Enum): r"""Possible timestamp granularities to use when keeping multiple versions of data in a table. + + Values: + TIMESTAMP_GRANULARITY_UNSPECIFIED (0): + The user did not specify a granularity. + Should not be returned. When specified during + table creation, MILLIS will be used. + MILLIS (1): + The table keeps data versioned at a + granularity of 1ms. """ TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 MILLIS = 1 class View(proto.Enum): - r"""Defines a view over a table's fields.""" + r"""Defines a view over a table's fields. + + Values: + VIEW_UNSPECIFIED (0): + Uses the default view for each method as + documented in its request. + NAME_ONLY (1): + Only populates ``name``. + SCHEMA_VIEW (2): + Only populates ``name`` and fields related to the table's + schema. + REPLICATION_VIEW (3): + Only populates ``name`` and fields related to the table's + replication state. + ENCRYPTION_VIEW (5): + Only populates ``name`` and fields related to the table's + encryption state. + FULL (4): + Populates all fields. + """ VIEW_UNSPECIFIED = 0 NAME_ONLY = 1 SCHEMA_VIEW = 2 @@ -150,7 +186,37 @@ class ClusterState(proto.Message): """ class ReplicationState(proto.Enum): - r"""Table replication states.""" + r"""Table replication states. + + Values: + STATE_NOT_KNOWN (0): + The replication state of the table is unknown + in this cluster. + INITIALIZING (1): + The cluster was recently created, and the + table must finish copying over pre-existing data + from other clusters before it can begin + receiving live replication updates and serving + Data API requests. + PLANNED_MAINTENANCE (2): + The table is temporarily unable to serve Data + API requests from this cluster due to planned + internal maintenance. + UNPLANNED_MAINTENANCE (3): + The table is temporarily unable to serve Data + API requests from this cluster due to unplanned + or emergency maintenance. + READY (4): + The table can serve Data API requests from + this cluster. Depending on replication delay, + reads may not immediately reflect the state of + the table in other clusters. + READY_OPTIMIZING (5): + The table is fully created and ready for use after a + restore, and is being optimized for performance. When + optimizations are complete, the table will transition to + ``READY`` state. + """ STATE_NOT_KNOWN = 0 INITIALIZING = 1 PLANNED_MAINTENANCE = 2 @@ -335,7 +401,26 @@ class EncryptionInfo(proto.Message): """ class EncryptionType(proto.Enum): - r"""Possible encryption types for a resource.""" + r"""Possible encryption types for a resource. + + Values: + ENCRYPTION_TYPE_UNSPECIFIED (0): + Encryption type was not specified, though + data at rest remains encrypted. + GOOGLE_DEFAULT_ENCRYPTION (1): + The data backing this resource is encrypted + at rest with a key that is fully managed by + Google. No key version or status will be + populated. This is the default state. + CUSTOMER_MANAGED_ENCRYPTION (2): + The data backing this resource is encrypted at rest with a + key that is managed by the customer. The in-use version of + the key and its status are populated for CMEK-protected + tables. CMEK-protected backups are pinned to the key version + that was in use at the time the backup was taken. This key + version is populated but its status is not tracked and is + reported as ``UNKNOWN``. + """ ENCRYPTION_TYPE_UNSPECIFIED = 0 GOOGLE_DEFAULT_ENCRYPTION = 1 CUSTOMER_MANAGED_ENCRYPTION = 2 @@ -397,7 +482,21 @@ class Snapshot(proto.Message): """ class State(proto.Enum): - r"""Possible states of a snapshot.""" + r"""Possible states of a snapshot. + + Values: + STATE_NOT_KNOWN (0): + The state of the snapshot could not be + determined. + READY (1): + The snapshot has been successfully created + and can serve all requests. + CREATING (2): + The snapshot is currently being created, and + may be destroyed if the creation process + encounters an error. A snapshot may not be + restored to a table while it is being created. + """ STATE_NOT_KNOWN = 0 READY = 1 CREATING = 2 @@ -481,7 +580,17 @@ class Backup(proto.Message): """ class State(proto.Enum): - r"""Indicates the current state of the backup.""" + r"""Indicates the current state of the backup. + + Values: + STATE_UNSPECIFIED (0): + Not specified. + CREATING (1): + The pending backup is still being created. Operations on the + backup may fail with ``FAILED_PRECONDITION`` in this state. + READY (2): + The backup is complete and ready for use. + """ STATE_UNSPECIFIED = 0 CREATING = 1 READY = 2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 68726f591a0e..aaff4669fdaa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1306,7 +1306,7 @@ def read_modify_write_row( # Done; return the response. return response - def __enter__(self): + def __enter__(self) -> "BigtableClient": return self def __exit__(self, type, value, traceback): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 09d371e9c98c..8124cb7e387e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -76,6 +76,19 @@ class RequestStatsView(proto.Enum): r"""The desired view into RequestStats that should be returned in the response. See also: RequestStats message. + + Values: + REQUEST_STATS_VIEW_UNSPECIFIED (0): + The default / unset value. The API will + default to the NONE option below. + REQUEST_STATS_NONE (1): + Do not include any RequestStats in the + response. This will leave the RequestStats + embedded message unset in the response. + REQUEST_STATS_FULL (2): + Include the full set of available + RequestStats in the response, applicable to this + read. """ REQUEST_STATS_VIEW_UNSPECIFIED = 0 REQUEST_STATS_NONE = 1 From 2d83e6f4ff0f85b7a8c433391e88a2b7ae015679 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 24 Jan 2023 15:08:21 +0000 Subject: [PATCH 694/892] chore(deps): update dependency google-cloud-monitoring to v2.14.1 (#727) --- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 548d03c5eed1..7ac3eb922941 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.15.0 -google-cloud-monitoring==2.14.0 +google-cloud-monitoring==2.14.1 From 2b2caf67c049062378ba9723ffdcc517cdff4daf Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 25 Jan 2023 11:35:16 -0500 Subject: [PATCH 695/892] chore: Update gapic-generator-python to v1.8.2 (#728) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.8.2 PiperOrigin-RevId: 504289125 Source-Link: https://github.com/googleapis/googleapis/commit/38a48a44a44279e9cf9f2f864b588958a2d87491 Source-Link: https://github.com/googleapis/googleapis-gen/commit/b2dc22663dbe47a972c8d8c2f8a4df013dafdcbc Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjJkYzIyNjYzZGJlNDdhOTcyYzhkOGMyZjhhNGRmMDEzZGFmZGNiYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * omit gapic_version.py from coverage Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/.coveragerc | 2 ++ .../google/cloud/bigtable_admin_v2/__init__.py | 2 +- .../cloud/bigtable_admin_v2/types/bigtable_instance_admin.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 9b0751055ecc..3128ad99ec56 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -20,6 +20,8 @@ branch = True omit = google/cloud/__init__.py google/__init__.py + google/cloud/bigtable_admin/__init__.py + google/cloud/bigtable_admin/gapic_version.py [report] fail_under = 100 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index baf3403b27a2..282834fe7af4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from google.cloud.bigtable_admin import gapic_version as package_version +from google.cloud.bigtable_admin_v2 import gapic_version as package_version __version__ = package_version.__version__ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index c48297d63d20..87ff268e4cc4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -448,7 +448,7 @@ class State(proto.Enum): Values: STATE_UNSPECIFIED (0): - + No description available. PENDING (1): The table has not yet begun copying to the new cluster. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 86e5e138ea5a..b343c985dbc2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from google.cloud.bigtable import gapic_version as package_version +from google.cloud.bigtable_v2 import gapic_version as package_version __version__ = package_version.__version__ From fb58acc8c85ebb2fd7032d91a950e69296e52f39 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 16:50:13 +0000 Subject: [PATCH 696/892] chore: fix prerelease_deps nox session [autoapprove] (#729) Source-Link: https://togithub.com/googleapis/synthtool/commit/26c7505b2f76981ec1707b851e1595c8c06e90fc Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f946c75373c2b0040e8e318c5e85d0cf46bc6e61d0a01f3ef94d8de974ac6790 --- .../.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/noxfile.py | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 889f77dfa25d..f0f3b24b20cd 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:c43f1d918bcf817d337aa29ff833439494a158a0831508fda4ec75dc4c0d0320 + digest: sha256:f946c75373c2b0040e8e318c5e85d0cf46bc6e61d0a01f3ef94d8de974ac6790 diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 5b401e74b9fe..47415385a13f 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -200,9 +200,9 @@ def unit(session): def install_systemtest_dependencies(session, *constraints): # Use pre-release gRPC for system tests. - # Exclude version 1.49.0rc1 which has a known issue. - # See https://github.com/grpc/grpc/pull/30642 - session.install("--pre", "grpcio!=1.49.0rc1") + # Exclude version 1.52.0rc1 which has a known issue. + # See https://github.com/grpc/grpc/issues/32163 + session.install("--pre", "grpcio!=1.52.0rc1") session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints) @@ -384,9 +384,7 @@ def prerelease_deps(session): unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES session.install(*unit_deps_all) system_deps_all = ( - SYSTEM_TEST_STANDARD_DEPENDENCIES - + SYSTEM_TEST_EXTERNAL_DEPENDENCIES - + SYSTEM_TEST_EXTRAS + SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES ) session.install(*system_deps_all) @@ -416,8 +414,8 @@ def prerelease_deps(session): # dependency of grpc "six", "googleapis-common-protos", - # Exclude version 1.49.0rc1 which has a known issue. See https://github.com/grpc/grpc/pull/30642 - "grpcio!=1.49.0rc1", + # Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163 + "grpcio!=1.52.0rc1", "grpcio-status", "google-api-core", "proto-plus", From e01143af3e19767ba342ab88209ab11974a50ced Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 3 Feb 2023 11:17:19 +0000 Subject: [PATCH 697/892] chore(deps): update google-github-actions/setup-gcloud action to v1.1.0 (#730) --- .../google-cloud-bigtable/.github/workflows/system_emulated.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 3fbc0ae10e5d..e1f43fd40108 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v1.0.1 + uses: google-github-actions/setup-gcloud@v1.1.0 - name: Install / run Nox run: | From 42ea926af087d69d144aea2900cfee83fde4bc47 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 15:10:12 +0000 Subject: [PATCH 698/892] build(deps): bump cryptography from 38.0.3 to 39.0.1 in /synthtool/gcp/templates/python_library/.kokoro (#733) Source-Link: https://togithub.com/googleapis/synthtool/commit/bb171351c3946d3c3c32e60f5f18cee8c464ec51 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f62c53736eccb0c4934a3ea9316e0d57696bb49c1a7c86c726e9bb8a2f87dadf --- .../.github/.OwlBot.lock.yaml | 2 +- .../.kokoro/requirements.txt | 49 +++++++++---------- 2 files changed, 23 insertions(+), 28 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index f0f3b24b20cd..894fb6bc9b47 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:f946c75373c2b0040e8e318c5e85d0cf46bc6e61d0a01f3ef94d8de974ac6790 + digest: sha256:f62c53736eccb0c4934a3ea9316e0d57696bb49c1a7c86c726e9bb8a2f87dadf diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 05dc4672edaa..096e4800a9ac 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -113,33 +113,28 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==38.0.3 \ - --hash=sha256:068147f32fa662c81aebab95c74679b401b12b57494872886eb5c1139250ec5d \ - --hash=sha256:06fc3cc7b6f6cca87bd56ec80a580c88f1da5306f505876a71c8cfa7050257dd \ - --hash=sha256:25c1d1f19729fb09d42e06b4bf9895212292cb27bb50229f5aa64d039ab29146 \ - --hash=sha256:402852a0aea73833d982cabb6d0c3bb582c15483d29fb7085ef2c42bfa7e38d7 \ - --hash=sha256:4e269dcd9b102c5a3d72be3c45d8ce20377b8076a43cbed6f660a1afe365e436 \ - --hash=sha256:5419a127426084933076132d317911e3c6eb77568a1ce23c3ac1e12d111e61e0 \ - --hash=sha256:554bec92ee7d1e9d10ded2f7e92a5d70c1f74ba9524947c0ba0c850c7b011828 \ - --hash=sha256:5e89468fbd2fcd733b5899333bc54d0d06c80e04cd23d8c6f3e0542358c6060b \ - --hash=sha256:65535bc550b70bd6271984d9863a37741352b4aad6fb1b3344a54e6950249b55 \ - --hash=sha256:6ab9516b85bebe7aa83f309bacc5f44a61eeb90d0b4ec125d2d003ce41932d36 \ - --hash=sha256:6addc3b6d593cd980989261dc1cce38263c76954d758c3c94de51f1e010c9a50 \ - --hash=sha256:728f2694fa743a996d7784a6194da430f197d5c58e2f4e278612b359f455e4a2 \ - --hash=sha256:785e4056b5a8b28f05a533fab69febf5004458e20dad7e2e13a3120d8ecec75a \ - --hash=sha256:78cf5eefac2b52c10398a42765bfa981ce2372cbc0457e6bf9658f41ec3c41d8 \ - --hash=sha256:7f836217000342d448e1c9a342e9163149e45d5b5eca76a30e84503a5a96cab0 \ - --hash=sha256:8d41a46251bf0634e21fac50ffd643216ccecfaf3701a063257fe0b2be1b6548 \ - --hash=sha256:984fe150f350a3c91e84de405fe49e688aa6092b3525f407a18b9646f6612320 \ - --hash=sha256:9b24bcff7853ed18a63cfb0c2b008936a9554af24af2fb146e16d8e1aed75748 \ - --hash=sha256:b1b35d9d3a65542ed2e9d90115dfd16bbc027b3f07ee3304fc83580f26e43249 \ - --hash=sha256:b1b52c9e5f8aa2b802d48bd693190341fae201ea51c7a167d69fc48b60e8a959 \ - --hash=sha256:bbf203f1a814007ce24bd4d51362991d5cb90ba0c177a9c08825f2cc304d871f \ - --hash=sha256:be243c7e2bfcf6cc4cb350c0d5cdf15ca6383bbcb2a8ef51d3c9411a9d4386f0 \ - --hash=sha256:bfbe6ee19615b07a98b1d2287d6a6073f734735b49ee45b11324d85efc4d5cbd \ - --hash=sha256:c46837ea467ed1efea562bbeb543994c2d1f6e800785bd5a2c98bc096f5cb220 \ - --hash=sha256:dfb4f4dd568de1b6af9f4cda334adf7d72cf5bc052516e1b2608b683375dd95c \ - --hash=sha256:ed7b00096790213e09eb11c97cc6e2b757f15f3d2f85833cd2d3ec3fe37c1722 +cryptography==39.0.1 \ + --hash=sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4 \ + --hash=sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f \ + --hash=sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502 \ + --hash=sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41 \ + --hash=sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965 \ + --hash=sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e \ + --hash=sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc \ + --hash=sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad \ + --hash=sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505 \ + --hash=sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388 \ + --hash=sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6 \ + --hash=sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2 \ + --hash=sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac \ + --hash=sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695 \ + --hash=sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6 \ + --hash=sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336 \ + --hash=sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0 \ + --hash=sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c \ + --hash=sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106 \ + --hash=sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a \ + --hash=sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8 # via # gcp-releasetool # secretstorage From e0f0c6facd69c30e49c49a19439d99b035e74a1f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 17 Feb 2023 11:54:37 -0500 Subject: [PATCH 699/892] fix: Add service_yaml_parameters to py_gapic_library BUILD.bazel targets (#734) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: enable "rest" transport in Python for services supporting numeric enums PiperOrigin-RevId: 508143576 Source-Link: https://github.com/googleapis/googleapis/commit/7a702a989db3b413f39ff8994ca53fb38b6928c2 Source-Link: https://github.com/googleapis/googleapis-gen/commit/6ad1279c0e7aa787ac6b66c9fd4a210692edffcd Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNmFkMTI3OWMwZTdhYTc4N2FjNmI2NmM5ZmQ0YTIxMDY5MmVkZmZjZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: publish the Cloud Bigtable Change Streams PiperOrigin-RevId: 508465928 Source-Link: https://github.com/googleapis/googleapis/commit/3770af230796f0fe4347db68fb87199fa74d14dd Source-Link: https://github.com/googleapis/googleapis-gen/commit/0280f1f5b990d9013a1c4c555def241a31242376 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDI4MGYxZjViOTkwZDkwMTNhMWM0YzU1NWRlZjI0MWEzMTI0MjM3NiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix(deps): require google-api-core>=1.34.0,>=2.11.0 * fix: Add service_yaml_parameters to py_gapic_library BUILD.bazel targets PiperOrigin-RevId: 510187992 Source-Link: https://github.com/googleapis/googleapis/commit/5edc23561778df80d5293f20132765f8757a6b2c Source-Link: https://github.com/googleapis/googleapis-gen/commit/b0bedb72e4765a3e0b674a28c50ea0f9a9b26a89 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjBiZWRiNzJlNDc2NWEzZTBiNjc0YTI4YzUwZWEwZjlhOWIyNmE4OSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * run generator at main; fix mypy errors * remove obsolete replacements in owlbot.py * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../bigtable_admin_v2/gapic_metadata.json | 235 + .../bigtable_instance_admin/client.py | 2 + .../transports/__init__.py | 5 + .../transports/rest.py | 3160 ++++++++ .../services/bigtable_table_admin/client.py | 2 + .../transports/__init__.py | 5 + .../bigtable_table_admin/transports/rest.py | 3595 ++++++++ .../google/cloud/bigtable_v2/__init__.py | 14 + .../cloud/bigtable_v2/gapic_metadata.json | 70 + .../services/bigtable/async_client.py | 201 + .../bigtable_v2/services/bigtable/client.py | 205 + .../services/bigtable/transports/__init__.py | 5 + .../services/bigtable/transports/base.py | 34 + .../services/bigtable/transports/grpc.py | 66 + .../bigtable/transports/grpc_asyncio.py | 66 + .../services/bigtable/transports/rest.py | 1433 ++++ .../cloud/bigtable_v2/types/__init__.py | 14 + .../cloud/bigtable_v2/types/bigtable.py | 430 +- .../google/cloud/bigtable_v2/types/data.py | 61 + packages/google-cloud-bigtable/noxfile.py | 4 +- packages/google-cloud-bigtable/owlbot.py | 38 +- .../scripts/fixup_bigtable_v2_keywords.py | 2 + packages/google-cloud-bigtable/setup.py | 2 +- .../testing/constraints-3.7.txt | 2 +- .../test_bigtable_instance_admin.py | 6349 ++++++++++++++- .../test_bigtable_table_admin.py | 7220 ++++++++++++++++- .../unit/gapic/bigtable_v2/test_bigtable.py | 3578 +++++++- 27 files changed, 26340 insertions(+), 458 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index 7fb6ddd95698..d797338cce46 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -226,6 +226,116 @@ ] } } + }, + "rest": { + "libraryClient": "BigtableInstanceAdminClient", + "rpcs": { + "CreateAppProfile": { + "methods": [ + "create_app_profile" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateInstance": { + "methods": [ + "create_instance" + ] + }, + "DeleteAppProfile": { + "methods": [ + "delete_app_profile" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteInstance": { + "methods": [ + "delete_instance" + ] + }, + "GetAppProfile": { + "methods": [ + "get_app_profile" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetInstance": { + "methods": [ + "get_instance" + ] + }, + "ListAppProfiles": { + "methods": [ + "list_app_profiles" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListHotTablets": { + "methods": [ + "list_hot_tablets" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "PartialUpdateCluster": { + "methods": [ + "partial_update_cluster" + ] + }, + "PartialUpdateInstance": { + "methods": [ + "partial_update_instance" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateAppProfile": { + "methods": [ + "update_app_profile" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateInstance": { + "methods": [ + "update_instance" + ] + } + } } } }, @@ -480,6 +590,131 @@ ] } } + }, + "rest": { + "libraryClient": "BigtableTableAdminClient", + "rpcs": { + "CheckConsistency": { + "methods": [ + "check_consistency" + ] + }, + "CreateBackup": { + "methods": [ + "create_backup" + ] + }, + "CreateTable": { + "methods": [ + "create_table" + ] + }, + "CreateTableFromSnapshot": { + "methods": [ + "create_table_from_snapshot" + ] + }, + "DeleteBackup": { + "methods": [ + "delete_backup" + ] + }, + "DeleteSnapshot": { + "methods": [ + "delete_snapshot" + ] + }, + "DeleteTable": { + "methods": [ + "delete_table" + ] + }, + "DropRowRange": { + "methods": [ + "drop_row_range" + ] + }, + "GenerateConsistencyToken": { + "methods": [ + "generate_consistency_token" + ] + }, + "GetBackup": { + "methods": [ + "get_backup" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetSnapshot": { + "methods": [ + "get_snapshot" + ] + }, + "GetTable": { + "methods": [ + "get_table" + ] + }, + "ListBackups": { + "methods": [ + "list_backups" + ] + }, + "ListSnapshots": { + "methods": [ + "list_snapshots" + ] + }, + "ListTables": { + "methods": [ + "list_tables" + ] + }, + "ModifyColumnFamilies": { + "methods": [ + "modify_column_families" + ] + }, + "RestoreTable": { + "methods": [ + "restore_table" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SnapshotTable": { + "methods": [ + "snapshot_table" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UndeleteTable": { + "methods": [ + "undelete_table" + ] + }, + "UpdateBackup": { + "methods": [ + "update_backup" + ] + }, + "UpdateTable": { + "methods": [ + "update_table" + ] + } + } } } } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index d6553a2babc4..fcb767a3d642 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -60,6 +60,7 @@ from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableInstanceAdminGrpcTransport from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport +from .transports.rest import BigtableInstanceAdminRestTransport class BigtableInstanceAdminClientMeta(type): @@ -75,6 +76,7 @@ class BigtableInstanceAdminClientMeta(type): ) # type: Dict[str, Type[BigtableInstanceAdminTransport]] _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport + _transport_registry["rest"] = BigtableInstanceAdminRestTransport def get_transport_class( cls, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index bf207fb3a53b..e5637c0da2a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -19,6 +19,8 @@ from .base import BigtableInstanceAdminTransport from .grpc import BigtableInstanceAdminGrpcTransport from .grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport +from .rest import BigtableInstanceAdminRestTransport +from .rest import BigtableInstanceAdminRestInterceptor # Compile a registry of transports. @@ -27,9 +29,12 @@ ) # type: Dict[str, Type[BigtableInstanceAdminTransport]] _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport +_transport_registry["rest"] = BigtableInstanceAdminRestTransport __all__ = ( "BigtableInstanceAdminTransport", "BigtableInstanceAdminGrpcTransport", "BigtableInstanceAdminGrpcAsyncIOTransport", + "BigtableInstanceAdminRestTransport", + "BigtableInstanceAdminRestInterceptor", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py new file mode 100644 index 000000000000..c95068666c35 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -0,0 +1,3160 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from .base import ( + BigtableInstanceAdminTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class BigtableInstanceAdminRestInterceptor: + """Interceptor for BigtableInstanceAdmin. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the BigtableInstanceAdminRestTransport. + + .. code-block:: python + class MyCustomBigtableInstanceAdminInterceptor(BigtableInstanceAdminRestInterceptor): + def pre_create_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_app_profile(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_instance(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_app_profile(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_instance(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_app_profiles(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_app_profiles(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_clusters(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_clusters(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_hot_tablets(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_hot_tablets(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_instances(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_instances(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_partial_update_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_partial_update_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_partial_update_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_partial_update_instance(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_set_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_set_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_test_iam_permissions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_test_iam_permissions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_app_profile(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_instance(self, response): + logging.log(f"Received response: {response}") + return response + + transport = BigtableInstanceAdminRestTransport(interceptor=MyCustomBigtableInstanceAdminInterceptor()) + client = BigtableInstanceAdminClient(transport=transport) + + + """ + + def pre_create_app_profile( + self, + request: bigtable_instance_admin.CreateAppProfileRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.CreateAppProfileRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_app_profile( + self, response: instance.AppProfile + ) -> instance.AppProfile: + """Post-rpc interceptor for create_app_profile + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_create_cluster( + self, + request: bigtable_instance_admin.CreateClusterRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_instance_admin.CreateClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_create_instance( + self, + request: bigtable_instance_admin.CreateInstanceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.CreateInstanceRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_instance( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_delete_app_profile( + self, + request: bigtable_instance_admin.DeleteAppProfileRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.DeleteAppProfileRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_delete_cluster( + self, + request: bigtable_instance_admin.DeleteClusterRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_instance_admin.DeleteClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_delete_instance( + self, + request: bigtable_instance_admin.DeleteInstanceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.DeleteInstanceRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_get_app_profile( + self, + request: bigtable_instance_admin.GetAppProfileRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_instance_admin.GetAppProfileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_app_profile( + self, response: instance.AppProfile + ) -> instance.AppProfile: + """Post-rpc interceptor for get_app_profile + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_get_cluster( + self, + request: bigtable_instance_admin.GetClusterRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_instance_admin.GetClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_cluster(self, response: instance.Cluster) -> instance.Cluster: + """Post-rpc interceptor for get_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_get_instance( + self, + request: bigtable_instance_admin.GetInstanceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_instance_admin.GetInstanceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_instance(self, response: instance.Instance) -> instance.Instance: + """Post-rpc interceptor for get_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_list_app_profiles( + self, + request: bigtable_instance_admin.ListAppProfilesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.ListAppProfilesRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_app_profiles + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_app_profiles( + self, response: bigtable_instance_admin.ListAppProfilesResponse + ) -> bigtable_instance_admin.ListAppProfilesResponse: + """Post-rpc interceptor for list_app_profiles + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_list_clusters( + self, + request: bigtable_instance_admin.ListClustersRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_instance_admin.ListClustersRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_clusters + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_clusters( + self, response: bigtable_instance_admin.ListClustersResponse + ) -> bigtable_instance_admin.ListClustersResponse: + """Post-rpc interceptor for list_clusters + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_list_hot_tablets( + self, + request: bigtable_instance_admin.ListHotTabletsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.ListHotTabletsRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_hot_tablets + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_hot_tablets( + self, response: bigtable_instance_admin.ListHotTabletsResponse + ) -> bigtable_instance_admin.ListHotTabletsResponse: + """Post-rpc interceptor for list_hot_tablets + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_list_instances( + self, + request: bigtable_instance_admin.ListInstancesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_instance_admin.ListInstancesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_instances + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_instances( + self, response: bigtable_instance_admin.ListInstancesResponse + ) -> bigtable_instance_admin.ListInstancesResponse: + """Post-rpc interceptor for list_instances + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_partial_update_cluster( + self, + request: bigtable_instance_admin.PartialUpdateClusterRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.PartialUpdateClusterRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for partial_update_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_partial_update_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for partial_update_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_partial_update_instance( + self, + request: bigtable_instance_admin.PartialUpdateInstanceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.PartialUpdateInstanceRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for partial_update_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_partial_update_instance( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for partial_update_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_update_app_profile( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_instance_admin.UpdateAppProfileRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for update_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_app_profile( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_app_profile + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_update_cluster( + self, request: instance.Cluster, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[instance.Cluster, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_cluster( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + def pre_update_instance( + self, request: instance.Instance, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[instance.Instance, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_instance(self, response: instance.Instance) -> instance.Instance: + """Post-rpc interceptor for update_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class BigtableInstanceAdminRestStub: + _session: AuthorizedSession + _host: str + _interceptor: BigtableInstanceAdminRestInterceptor + + +class BigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): + """REST backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[BigtableInstanceAdminRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or BigtableInstanceAdminRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/v2/{name=operations/**}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/v2/{name=operations/**}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/v2/{name=operations/**}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/v2/{name=operations/projects/**}/operations", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v2", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CreateAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("CreateAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "appProfileId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.CreateAppProfileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Call the create app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.CreateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", + "body": "app_profile", + }, + ] + request, metadata = self._interceptor.pre_create_app_profile( + request, metadata + ) + pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.AppProfile() + pb_resp = instance.AppProfile.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_app_profile(resp) + return resp + + class _CreateCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("CreateCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "clusterId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.CreateClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.CreateClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/clusters", + "body": "cluster", + }, + ] + request, metadata = self._interceptor.pre_create_cluster(request, metadata) + pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_cluster(resp) + return resp + + class _CreateInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("CreateInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.CreateInstanceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.CreateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*}/instances", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_create_instance(request, metadata) + pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance(resp) + return resp + + class _DeleteAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("DeleteAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "ignoreWarnings": False, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.DeleteAppProfileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_app_profile( + request, metadata + ) + pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("DeleteCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.DeleteClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_cluster(request, metadata) + pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("DeleteInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.DeleteInstanceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_instance(request, metadata) + pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.GetAppProfileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Call the get app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", + }, + ] + request, metadata = self._interceptor.pre_get_app_profile(request, metadata) + pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.AppProfile() + pb_resp = instance.AppProfile.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_app_profile(resp) + return resp + + class _GetCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.GetClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Cluster: + r"""Call the get cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.Cluster: + A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + }, + ] + request, metadata = self._interceptor.pre_get_cluster(request, metadata) + pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Cluster() + pb_resp = instance.Cluster.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_cluster(resp) + return resp + + class _GetIamPolicy(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for ``GetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:getIamPolicy", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + class _GetInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.GetInstanceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Call the get instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.Instance: + A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*}", + }, + ] + request, metadata = self._interceptor.pre_get_instance(request, metadata) + pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_instance(resp) + return resp + + class _ListAppProfiles(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListAppProfiles") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.ListAppProfilesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListAppProfilesResponse: + r"""Call the list app profiles method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListAppProfilesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListAppProfilesResponse: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", + }, + ] + request, metadata = self._interceptor.pre_list_app_profiles( + request, metadata + ) + pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListAppProfilesResponse() + pb_resp = bigtable_instance_admin.ListAppProfilesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_app_profiles(resp) + return resp + + class _ListClusters(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListClusters") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.ListClustersRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Call the list clusters method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListClustersRequest): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/clusters", + }, + ] + request, metadata = self._interceptor.pre_list_clusters(request, metadata) + pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListClustersResponse() + pb_resp = bigtable_instance_admin.ListClustersResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_clusters(resp) + return resp + + class _ListHotTablets(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListHotTablets") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.ListHotTabletsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListHotTabletsResponse: + r"""Call the list hot tablets method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListHotTabletsRequest): + The request object. Request message for + BigtableInstanceAdmin.ListHotTablets. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListHotTabletsResponse: + Response message for + BigtableInstanceAdmin.ListHotTablets. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets", + }, + ] + request, metadata = self._interceptor.pre_list_hot_tablets( + request, metadata + ) + pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListHotTabletsResponse() + pb_resp = bigtable_instance_admin.ListHotTabletsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_hot_tablets(resp) + return resp + + class _ListInstances(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListInstances") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.ListInstancesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Call the list instances method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListInstancesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*}/instances", + }, + ] + request, metadata = self._interceptor.pre_list_instances(request, metadata) + pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListInstancesResponse() + pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instances(resp) + return resp + + class _PartialUpdateCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("PartialUpdateCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.PartialUpdateClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the partial update cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.PartialUpdateClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{cluster.name=projects/*/instances/*/clusters/*}", + "body": "cluster", + }, + ] + request, metadata = self._interceptor.pre_partial_update_cluster( + request, metadata + ) + pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_cluster(resp) + return resp + + class _PartialUpdateInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("PartialUpdateInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.PartialUpdateInstanceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the partial update instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.PartialUpdateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{instance.name=projects/*/instances/*}", + "body": "instance", + }, + ] + request, metadata = self._interceptor.pre_partial_update_instance( + request, metadata + ) + pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_instance(resp) + return resp + + class _SetIamPolicy(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("SetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for ``SetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:setIamPolicy", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + class _TestIamPermissions(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("TestIamPermissions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for ``TestIamPermissions`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:testIamPermissions", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = iam_policy_pb2.TestIamPermissionsResponse() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + class _UpdateAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("UpdateAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the update app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.UpdateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}", + "body": "app_profile", + }, + ] + request, metadata = self._interceptor.pre_update_app_profile( + request, metadata + ) + pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_app_profile(resp) + return resp + + class _UpdateCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("UpdateCluster") + + def __call__( + self, + request: instance.Cluster, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the update cluster method over HTTP. + + Args: + request (~.instance.Cluster): + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "put", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_update_cluster(request, metadata) + pb_request = instance.Cluster.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_cluster(resp) + return resp + + class _UpdateInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("UpdateInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: instance.Instance, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Call the update instance method over HTTP. + + Args: + request (~.instance.Instance): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.Instance: + A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "put", + "uri": "/v2/{name=projects/*/instances/*}", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_update_instance(request, metadata) + pb_request = instance.Instance.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_instance(resp) + return resp + + @property + def create_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], instance.AppProfile + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_app_profile( + self, + ) -> Callable[[bigtable_instance_admin.DeleteAppProfileRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_cluster( + self, + ) -> Callable[[bigtable_instance_admin.DeleteClusterRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_instance( + self, + ) -> Callable[[bigtable_instance_admin.DeleteInstanceRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_app_profile( + self, + ) -> Callable[[bigtable_instance_admin.GetAppProfileRequest], instance.AppProfile]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_cluster( + self, + ) -> Callable[[bigtable_instance_admin.GetClusterRequest], instance.Cluster]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_instance( + self, + ) -> Callable[[bigtable_instance_admin.GetInstanceRequest], instance.Instance]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_app_profiles( + self, + ) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + bigtable_instance_admin.ListAppProfilesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListAppProfiles(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_clusters( + self, + ) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + bigtable_instance_admin.ListClustersResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListClusters(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_hot_tablets( + self, + ) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + bigtable_instance_admin.ListHotTabletsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListHotTablets(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_instances( + self, + ) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + bigtable_instance_admin.ListInstancesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListInstances(self._session, self._host, self._interceptor) # type: ignore + + @property + def partial_update_cluster( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PartialUpdateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def partial_update_instance( + self, + ) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PartialUpdateInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_app_profile( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("BigtableInstanceAdminRestTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index a68c32f67e77..aa7eaa197120 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -59,6 +59,7 @@ from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableTableAdminGrpcTransport from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport +from .transports.rest import BigtableTableAdminRestTransport class BigtableTableAdminClientMeta(type): @@ -74,6 +75,7 @@ class BigtableTableAdminClientMeta(type): ) # type: Dict[str, Type[BigtableTableAdminTransport]] _transport_registry["grpc"] = BigtableTableAdminGrpcTransport _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport + _transport_registry["rest"] = BigtableTableAdminRestTransport def get_transport_class( cls, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index 78a7850e4598..585b4e437c6e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -19,6 +19,8 @@ from .base import BigtableTableAdminTransport from .grpc import BigtableTableAdminGrpcTransport from .grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport +from .rest import BigtableTableAdminRestTransport +from .rest import BigtableTableAdminRestInterceptor # Compile a registry of transports. @@ -27,9 +29,12 @@ ) # type: Dict[str, Type[BigtableTableAdminTransport]] _transport_registry["grpc"] = BigtableTableAdminGrpcTransport _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport +_transport_registry["rest"] = BigtableTableAdminRestTransport __all__ = ( "BigtableTableAdminTransport", "BigtableTableAdminGrpcTransport", "BigtableTableAdminGrpcAsyncIOTransport", + "BigtableTableAdminRestTransport", + "BigtableTableAdminRestInterceptor", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py new file mode 100644 index 000000000000..cf16261fd01d --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -0,0 +1,3595 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +from .base import ( + BigtableTableAdminTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class BigtableTableAdminRestInterceptor: + """Interceptor for BigtableTableAdmin. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the BigtableTableAdminRestTransport. + + .. code-block:: python + class MyCustomBigtableTableAdminInterceptor(BigtableTableAdminRestInterceptor): + def pre_check_consistency(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_check_consistency(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_backup(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_table_from_snapshot(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_table_from_snapshot(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_snapshot(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_drop_row_range(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_generate_consistency_token(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_consistency_token(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_backup(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_snapshot(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_snapshot(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_backups(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_backups(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_snapshots(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_snapshots(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_tables(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_tables(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_modify_column_families(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_modify_column_families(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_restore_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_restore_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_set_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_set_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_snapshot_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_snapshot_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_test_iam_permissions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_test_iam_permissions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_undelete_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_undelete_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_backup(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_table(self, response): + logging.log(f"Received response: {response}") + return response + + transport = BigtableTableAdminRestTransport(interceptor=MyCustomBigtableTableAdminInterceptor()) + client = BigtableTableAdminClient(transport=transport) + + + """ + + def pre_check_consistency( + self, + request: bigtable_table_admin.CheckConsistencyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.CheckConsistencyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for check_consistency + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_check_consistency( + self, response: bigtable_table_admin.CheckConsistencyResponse + ) -> bigtable_table_admin.CheckConsistencyResponse: + """Post-rpc interceptor for check_consistency + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_create_backup( + self, + request: bigtable_table_admin.CreateBackupRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.CreateBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_backup( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_create_table( + self, + request: bigtable_table_admin.CreateTableRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_table(self, response: gba_table.Table) -> gba_table.Table: + """Post-rpc interceptor for create_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_create_table_from_snapshot( + self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.CreateTableFromSnapshotRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_table_from_snapshot + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_table_from_snapshot( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_table_from_snapshot + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_delete_backup( + self, + request: bigtable_table_admin.DeleteBackupRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.DeleteBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_delete_snapshot( + self, + request: bigtable_table_admin.DeleteSnapshotRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.DeleteSnapshotRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_snapshot + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_delete_table( + self, + request: bigtable_table_admin.DeleteTableRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_drop_row_range( + self, + request: bigtable_table_admin.DropRowRangeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.DropRowRangeRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for drop_row_range + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_generate_consistency_token( + self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.GenerateConsistencyTokenRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for generate_consistency_token + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_generate_consistency_token( + self, response: bigtable_table_admin.GenerateConsistencyTokenResponse + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + """Post-rpc interceptor for generate_consistency_token + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_get_backup( + self, + request: bigtable_table_admin.GetBackupRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_backup(self, response: table.Backup) -> table.Backup: + """Post-rpc interceptor for get_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_get_snapshot( + self, + request: bigtable_table_admin.GetSnapshotRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_snapshot + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_snapshot(self, response: table.Snapshot) -> table.Snapshot: + """Post-rpc interceptor for get_snapshot + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_get_table( + self, + request: bigtable_table_admin.GetTableRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_table(self, response: table.Table) -> table.Table: + """Post-rpc interceptor for get_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_list_backups( + self, + request: bigtable_table_admin.ListBackupsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_backups + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_backups( + self, response: bigtable_table_admin.ListBackupsResponse + ) -> bigtable_table_admin.ListBackupsResponse: + """Post-rpc interceptor for list_backups + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_list_snapshots( + self, + request: bigtable_table_admin.ListSnapshotsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.ListSnapshotsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_snapshots + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_snapshots( + self, response: bigtable_table_admin.ListSnapshotsResponse + ) -> bigtable_table_admin.ListSnapshotsResponse: + """Post-rpc interceptor for list_snapshots + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_list_tables( + self, + request: bigtable_table_admin.ListTablesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_tables + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_tables( + self, response: bigtable_table_admin.ListTablesResponse + ) -> bigtable_table_admin.ListTablesResponse: + """Post-rpc interceptor for list_tables + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_modify_column_families( + self, + request: bigtable_table_admin.ModifyColumnFamiliesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.ModifyColumnFamiliesRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for modify_column_families + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_modify_column_families(self, response: table.Table) -> table.Table: + """Post-rpc interceptor for modify_column_families + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_restore_table( + self, + request: bigtable_table_admin.RestoreTableRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.RestoreTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for restore_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_restore_table( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for restore_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_snapshot_table( + self, + request: bigtable_table_admin.SnapshotTableRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.SnapshotTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for snapshot_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_snapshot_table( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for snapshot_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_undelete_table( + self, + request: bigtable_table_admin.UndeleteTableRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.UndeleteTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for undelete_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_undelete_table( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for undelete_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_update_backup( + self, + request: bigtable_table_admin.UpdateBackupRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.UpdateBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_backup(self, response: table.Backup) -> table.Backup: + """Post-rpc interceptor for update_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + def pre_update_table( + self, + request: bigtable_table_admin.UpdateTableRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_table( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class BigtableTableAdminRestStub: + _session: AuthorizedSession + _host: str + _interceptor: BigtableTableAdminRestInterceptor + + +class BigtableTableAdminRestTransport(BigtableTableAdminTransport): + """REST backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[BigtableTableAdminRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or BigtableTableAdminRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/v2/{name=operations/**}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/v2/{name=operations/**}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/v2/{name=operations/**}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/v2/{name=operations/projects/**}/operations", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v2", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CheckConsistency(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CheckConsistency") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CheckConsistencyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Call the check consistency method over HTTP. + + Args: + request (~.bigtable_table_admin.CheckConsistencyRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_check_consistency( + request, metadata + ) + pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.CheckConsistencyResponse() + pb_resp = bigtable_table_admin.CheckConsistencyResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_consistency(resp) + return resp + + class _CreateBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CreateBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "backupId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CreateBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create backup method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateBackupRequest): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", + "body": "backup", + }, + ] + request, metadata = self._interceptor.pre_create_backup(request, metadata) + pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_backup(resp) + return resp + + class _CreateTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CreateTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CreateTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gba_table.Table: + r"""Call the create table method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gba_table.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_create_table(request, metadata) + pb_request = bigtable_table_admin.CreateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gba_table.Table() + pb_resp = gba_table.Table.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table(resp) + return resp + + class _CreateTableFromSnapshot(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CreateTableFromSnapshot") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create table from + snapshot method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateTableFromSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_create_table_from_snapshot( + request, metadata + ) + pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table_from_snapshot(resp) + return resp + + class _DeleteBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DeleteBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.DeleteBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete backup method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteBackupRequest): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_backup(request, metadata) + pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteSnapshot(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DeleteSnapshot") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.DeleteSnapshotRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete snapshot method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) + pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DeleteTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.DeleteTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete table method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_table(request, metadata) + pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DropRowRange(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DropRowRange") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.DropRowRangeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the drop row range method over HTTP. + + Args: + request (~.bigtable_table_admin.DropRowRangeRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_drop_row_range(request, metadata) + pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GenerateConsistencyToken(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GenerateConsistencyToken") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Call the generate consistency + token method over HTTP. + + Args: + request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_generate_consistency_token( + request, metadata + ) + pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.GenerateConsistencyTokenResponse() + pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_consistency_token(resp) + return resp + + class _GetBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.GetBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Call the get backup method over HTTP. + + Args: + request (~.bigtable_table_admin.GetBackupRequest): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Backup: + A backup of a Cloud Bigtable table. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", + }, + ] + request, metadata = self._interceptor.pre_get_backup(request, metadata) + pb_request = bigtable_table_admin.GetBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Backup() + pb_resp = table.Backup.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_backup(resp) + return resp + + class _GetIamPolicy(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for ``GetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + class _GetSnapshot(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetSnapshot") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.GetSnapshotRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Snapshot: + r"""Call the get snapshot method over HTTP. + + Args: + request (~.bigtable_table_admin.GetSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", + }, + ] + request, metadata = self._interceptor.pre_get_snapshot(request, metadata) + pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Snapshot() + pb_resp = table.Snapshot.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_snapshot(resp) + return resp + + class _GetTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.GetTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Call the get table method over HTTP. + + Args: + request (~.bigtable_table_admin.GetTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*}", + }, + ] + request, metadata = self._interceptor.pre_get_table(request, metadata) + pb_request = bigtable_table_admin.GetTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Table() + pb_resp = table.Table.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_table(resp) + return resp + + class _ListBackups(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ListBackups") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.ListBackupsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.ListBackupsResponse: + r"""Call the list backups method over HTTP. + + Args: + request (~.bigtable_table_admin.ListBackupsRequest): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.ListBackupsResponse: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", + }, + ] + request, metadata = self._interceptor.pre_list_backups(request, metadata) + pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListBackupsResponse() + pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_backups(resp) + return resp + + class _ListSnapshots(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ListSnapshots") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.ListSnapshotsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.ListSnapshotsResponse: + r"""Call the list snapshots method over HTTP. + + Args: + request (~.bigtable_table_admin.ListSnapshotsRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.ListSnapshotsResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots", + }, + ] + request, metadata = self._interceptor.pre_list_snapshots(request, metadata) + pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListSnapshotsResponse() + pb_resp = bigtable_table_admin.ListSnapshotsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_snapshots(resp) + return resp + + class _ListTables(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ListTables") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.ListTablesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.ListTablesResponse: + r"""Call the list tables method over HTTP. + + Args: + request (~.bigtable_table_admin.ListTablesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.ListTablesResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/tables", + }, + ] + request, metadata = self._interceptor.pre_list_tables(request, metadata) + pb_request = bigtable_table_admin.ListTablesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListTablesResponse() + pb_resp = bigtable_table_admin.ListTablesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_tables(resp) + return resp + + class _ModifyColumnFamilies(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ModifyColumnFamilies") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.ModifyColumnFamiliesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Call the modify column families method over HTTP. + + Args: + request (~.bigtable_table_admin.ModifyColumnFamiliesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_modify_column_families( + request, metadata + ) + pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Table() + pb_resp = table.Table.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_modify_column_families(resp) + return resp + + class _RestoreTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("RestoreTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.RestoreTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the restore table method over HTTP. + + Args: + request (~.bigtable_table_admin.RestoreTableRequest): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables:restore", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_restore_table(request, metadata) + pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_restore_table(resp) + return resp + + class _SetIamPolicy(BigtableTableAdminRestStub): + def __hash__(self): + return hash("SetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for ``SetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + class _SnapshotTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("SnapshotTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.SnapshotTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the snapshot table method over HTTP. + + Args: + request (~.bigtable_table_admin.SnapshotTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:snapshot", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_snapshot_table(request, metadata) + pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_snapshot_table(resp) + return resp + + class _TestIamPermissions(BigtableTableAdminRestStub): + def __hash__(self): + return hash("TestIamPermissions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for ``TestIamPermissions`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = iam_policy_pb2.TestIamPermissionsResponse() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + class _UndeleteTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("UndeleteTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.UndeleteTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the undelete table method over HTTP. + + Args: + request (~.bigtable_table_admin.UndeleteTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:undelete", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_undelete_table(request, metadata) + pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undelete_table(resp) + return resp + + class _UpdateBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("UpdateBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.UpdateBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Call the update backup method over HTTP. + + Args: + request (~.bigtable_table_admin.UpdateBackupRequest): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Backup: + A backup of a Cloud Bigtable table. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}", + "body": "backup", + }, + ] + request, metadata = self._interceptor.pre_update_backup(request, metadata) + pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Backup() + pb_resp = table.Backup.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_backup(resp) + return resp + + class _UpdateTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("UpdateTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.UpdateTableRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the update table method over HTTP. + + Args: + request (~.bigtable_table_admin.UpdateTableRequest): + The request object. The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{table.name=projects/*/instances/*/tables/*}", + "body": "table", + }, + ] + request, metadata = self._interceptor.pre_update_table(request, metadata) + pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_table(resp) + return resp + + @property + def check_consistency( + self, + ) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + bigtable_table_admin.CheckConsistencyResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_backup( + self, + ) -> Callable[[bigtable_table_admin.CreateBackupRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_table( + self, + ) -> Callable[[bigtable_table_admin.CreateTableRequest], gba_table.Table]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_table_from_snapshot( + self, + ) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateTableFromSnapshot(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_backup( + self, + ) -> Callable[[bigtable_table_admin.DeleteBackupRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_snapshot( + self, + ) -> Callable[[bigtable_table_admin.DeleteSnapshotRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteSnapshot(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_table( + self, + ) -> Callable[[bigtable_table_admin.DeleteTableRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def drop_row_range( + self, + ) -> Callable[[bigtable_table_admin.DropRowRangeRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DropRowRange(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_consistency_token( + self, + ) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + bigtable_table_admin.GenerateConsistencyTokenResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateConsistencyToken(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_backup( + self, + ) -> Callable[[bigtable_table_admin.GetBackupRequest], table.Backup]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_snapshot( + self, + ) -> Callable[[bigtable_table_admin.GetSnapshotRequest], table.Snapshot]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetSnapshot(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_table( + self, + ) -> Callable[[bigtable_table_admin.GetTableRequest], table.Table]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_backups( + self, + ) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + bigtable_table_admin.ListBackupsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_snapshots( + self, + ) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + bigtable_table_admin.ListSnapshotsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListSnapshots(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_tables( + self, + ) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + bigtable_table_admin.ListTablesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTables(self._session, self._host, self._interceptor) # type: ignore + + @property + def modify_column_families( + self, + ) -> Callable[[bigtable_table_admin.ModifyColumnFamiliesRequest], table.Table]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ModifyColumnFamilies(self._session, self._host, self._interceptor) # type: ignore + + @property + def restore_table( + self, + ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._RestoreTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def snapshot_table( + self, + ) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SnapshotTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + @property + def undelete_table( + self, + ) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UndeleteTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_backup( + self, + ) -> Callable[[bigtable_table_admin.UpdateBackupRequest], table.Backup]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_table( + self, + ) -> Callable[[bigtable_table_admin.UpdateTableRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("BigtableTableAdminRestTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index b343c985dbc2..342718dea85d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -23,12 +23,16 @@ from .types.bigtable import CheckAndMutateRowRequest from .types.bigtable import CheckAndMutateRowResponse +from .types.bigtable import GenerateInitialChangeStreamPartitionsRequest +from .types.bigtable import GenerateInitialChangeStreamPartitionsResponse from .types.bigtable import MutateRowRequest from .types.bigtable import MutateRowResponse from .types.bigtable import MutateRowsRequest from .types.bigtable import MutateRowsResponse from .types.bigtable import PingAndWarmRequest from .types.bigtable import PingAndWarmResponse +from .types.bigtable import ReadChangeStreamRequest +from .types.bigtable import ReadChangeStreamResponse from .types.bigtable import ReadModifyWriteRowRequest from .types.bigtable import ReadModifyWriteRowResponse from .types.bigtable import ReadRowsRequest @@ -45,6 +49,9 @@ from .types.data import RowFilter from .types.data import RowRange from .types.data import RowSet +from .types.data import StreamContinuationToken +from .types.data import StreamContinuationTokens +from .types.data import StreamPartition from .types.data import TimestampRange from .types.data import ValueRange from .types.request_stats import FullReadStatsView @@ -63,6 +70,8 @@ "ColumnRange", "Family", "FullReadStatsView", + "GenerateInitialChangeStreamPartitionsRequest", + "GenerateInitialChangeStreamPartitionsResponse", "MutateRowRequest", "MutateRowResponse", "MutateRowsRequest", @@ -70,6 +79,8 @@ "Mutation", "PingAndWarmRequest", "PingAndWarmResponse", + "ReadChangeStreamRequest", + "ReadChangeStreamResponse", "ReadIterationStats", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", @@ -85,6 +96,9 @@ "RowSet", "SampleRowKeysRequest", "SampleRowKeysResponse", + "StreamContinuationToken", + "StreamContinuationTokens", + "StreamPartition", "TimestampRange", "ValueRange", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json index 4ceadc15181f..181dc8ff57a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json @@ -15,6 +15,11 @@ "check_and_mutate_row" ] }, + "GenerateInitialChangeStreamPartitions": { + "methods": [ + "generate_initial_change_stream_partitions" + ] + }, "MutateRow": { "methods": [ "mutate_row" @@ -30,6 +35,11 @@ "ping_and_warm" ] }, + "ReadChangeStream": { + "methods": [ + "read_change_stream" + ] + }, "ReadModifyWriteRow": { "methods": [ "read_modify_write_row" @@ -55,6 +65,11 @@ "check_and_mutate_row" ] }, + "GenerateInitialChangeStreamPartitions": { + "methods": [ + "generate_initial_change_stream_partitions" + ] + }, "MutateRow": { "methods": [ "mutate_row" @@ -70,6 +85,61 @@ "ping_and_warm" ] }, + "ReadChangeStream": { + "methods": [ + "read_change_stream" + ] + }, + "ReadModifyWriteRow": { + "methods": [ + "read_modify_write_row" + ] + }, + "ReadRows": { + "methods": [ + "read_rows" + ] + }, + "SampleRowKeys": { + "methods": [ + "sample_row_keys" + ] + } + } + }, + "rest": { + "libraryClient": "BigtableClient", + "rpcs": { + "CheckAndMutateRow": { + "methods": [ + "check_and_mutate_row" + ] + }, + "GenerateInitialChangeStreamPartitions": { + "methods": [ + "generate_initial_change_stream_partitions" + ] + }, + "MutateRow": { + "methods": [ + "mutate_row" + ] + }, + "MutateRows": { + "methods": [ + "mutate_rows" + ] + }, + "PingAndWarm": { + "methods": [ + "ping_and_warm" + ] + }, + "ReadChangeStream": { + "methods": [ + "read_change_stream" + ] + }, "ReadModifyWriteRow": { "methods": [ "read_modify_write_row" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 8a25fa3af6d6..3465569b34cb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1006,6 +1006,207 @@ async def read_modify_write_row( # Done; return the response. return response + def generate_initial_change_stream_partitions( + self, + request: Optional[ + Union[bigtable.GenerateInitialChangeStreamPartitionsRequest, dict] + ] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[ + AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse] + ]: + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): + The request object. NOTE: This API is intended to be + used by Apache Beam BigtableIO. Request message for + Bigtable.GenerateInitialChangeStreamPartitions. + table_name (:class:`str`): + Required. The unique name of the table from which to get + change stream partitions. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for + Bigtable.GenerateInitialChangeStreamPartitions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_initial_change_stream_partitions, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_change_stream( + self, + request: Optional[Union[bigtable.ReadChangeStreamRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): + The request object. NOTE: This API is intended to be + used by Apache Beam BigtableIO. Request message for + Bigtable.ReadChangeStream. + table_name (:class:`str`): + Required. The unique name of the table from which to + read a change stream. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable.ReadChangeStreamRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_change_stream, + default_timeout=43200.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self): return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index aaff4669fdaa..37ab65fe2a87 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -53,6 +53,7 @@ from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableGrpcTransport from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport +from .transports.rest import BigtableRestTransport class BigtableClientMeta(type): @@ -66,6 +67,7 @@ class BigtableClientMeta(type): _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] _transport_registry["grpc"] = BigtableGrpcTransport _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport + _transport_registry["rest"] = BigtableRestTransport def get_transport_class( cls, @@ -1306,6 +1308,209 @@ def read_modify_write_row( # Done; return the response. return response + def generate_initial_change_stream_partitions( + self, + request: Optional[ + Union[bigtable.GenerateInitialChangeStreamPartitionsRequest, dict] + ] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]: + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Args: + request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): + The request object. NOTE: This API is intended to be + used by Apache Beam BigtableIO. Request message for + Bigtable.GenerateInitialChangeStreamPartitions. + table_name (str): + Required. The unique name of the table from which to get + change stream partitions. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for + Bigtable.GenerateInitialChangeStreamPartitions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.GenerateInitialChangeStreamPartitionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, bigtable.GenerateInitialChangeStreamPartitionsRequest + ): + request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.generate_initial_change_stream_partitions + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_change_stream( + self, + request: Optional[Union[bigtable.ReadChangeStreamRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.ReadChangeStreamResponse]: + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Args: + request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): + The request object. NOTE: This API is intended to be + used by Apache Beam BigtableIO. Request message for + Bigtable.ReadChangeStream. + table_name (str): + Required. The unique name of the table from which to + read a change stream. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadChangeStreamRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadChangeStreamRequest): + request = bigtable.ReadChangeStreamRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_change_stream] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "BigtableClient": return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index 67a9abdf9862..1b03919f6ce5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -19,15 +19,20 @@ from .base import BigtableTransport from .grpc import BigtableGrpcTransport from .grpc_asyncio import BigtableGrpcAsyncIOTransport +from .rest import BigtableRestTransport +from .rest import BigtableRestInterceptor # Compile a registry of transports. _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] _transport_registry["grpc"] = BigtableGrpcTransport _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport +_transport_registry["rest"] = BigtableRestTransport __all__ = ( "BigtableTransport", "BigtableGrpcTransport", "BigtableGrpcAsyncIOTransport", + "BigtableRestTransport", + "BigtableRestInterceptor", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index a32ea682b8f9..5b4580c18f1c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -175,6 +175,16 @@ def _prep_wrapped_messages(self, client_info): default_timeout=20.0, client_info=client_info, ), + self.generate_initial_change_stream_partitions: gapic_v1.method.wrap_method( + self.generate_initial_change_stream_partitions, + default_timeout=60.0, + client_info=client_info, + ), + self.read_change_stream: gapic_v1.method.wrap_method( + self.read_change_stream, + default_timeout=43200.0, + client_info=client_info, + ), } def close(self): @@ -257,6 +267,30 @@ def read_modify_write_row( ]: raise NotImplementedError() + @property + def generate_initial_change_stream_partitions( + self, + ) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + Union[ + bigtable.GenerateInitialChangeStreamPartitionsResponse, + Awaitable[bigtable.GenerateInitialChangeStreamPartitionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def read_change_stream( + self, + ) -> Callable[ + [bigtable.ReadChangeStreamRequest], + Union[ + bigtable.ReadChangeStreamResponse, + Awaitable[bigtable.ReadChangeStreamResponse], + ], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index cf6723678d2c..b9e073e8a093 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -437,6 +437,72 @@ def read_modify_write_row( ) return self._stubs["read_modify_write_row"] + @property + def generate_initial_change_stream_partitions( + self, + ) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + bigtable.GenerateInitialChangeStreamPartitionsResponse, + ]: + r"""Return a callable for the generate initial change stream + partitions method over gRPC. + + NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Returns: + Callable[[~.GenerateInitialChangeStreamPartitionsRequest], + ~.GenerateInitialChangeStreamPartitionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_initial_change_stream_partitions" not in self._stubs: + self._stubs[ + "generate_initial_change_stream_partitions" + ] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", + request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, + response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, + ) + return self._stubs["generate_initial_change_stream_partitions"] + + @property + def read_change_stream( + self, + ) -> Callable[ + [bigtable.ReadChangeStreamRequest], bigtable.ReadChangeStreamResponse + ]: + r"""Return a callable for the read change stream method over gRPC. + + NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Returns: + Callable[[~.ReadChangeStreamRequest], + ~.ReadChangeStreamResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_change_stream" not in self._stubs: + self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadChangeStream", + request_serializer=bigtable.ReadChangeStreamRequest.serialize, + response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, + ) + return self._stubs["read_change_stream"] + def close(self): self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 26d89c847b24..8bf02ce774b2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -446,6 +446,72 @@ def read_modify_write_row( ) return self._stubs["read_modify_write_row"] + @property + def generate_initial_change_stream_partitions( + self, + ) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + Awaitable[bigtable.GenerateInitialChangeStreamPartitionsResponse], + ]: + r"""Return a callable for the generate initial change stream + partitions method over gRPC. + + NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Returns: + Callable[[~.GenerateInitialChangeStreamPartitionsRequest], + Awaitable[~.GenerateInitialChangeStreamPartitionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "generate_initial_change_stream_partitions" not in self._stubs: + self._stubs[ + "generate_initial_change_stream_partitions" + ] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", + request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, + response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, + ) + return self._stubs["generate_initial_change_stream_partitions"] + + @property + def read_change_stream( + self, + ) -> Callable[ + [bigtable.ReadChangeStreamRequest], Awaitable[bigtable.ReadChangeStreamResponse] + ]: + r"""Return a callable for the read change stream method over gRPC. + + NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Returns: + Callable[[~.ReadChangeStreamRequest], + Awaitable[~.ReadChangeStreamResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "read_change_stream" not in self._stubs: + self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ReadChangeStream", + request_serializer=bigtable.ReadChangeStreamRequest.serialize, + response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, + ) + return self._stubs["read_change_stream"] + def close(self): return self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py new file mode 100644 index 000000000000..ee9cb046ff0c --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -0,0 +1,1433 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.bigtable_v2.types import bigtable + +from .base import BigtableTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class BigtableRestInterceptor: + """Interceptor for Bigtable. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the BigtableRestTransport. + + .. code-block:: python + class MyCustomBigtableInterceptor(BigtableRestInterceptor): + def pre_check_and_mutate_row(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_check_and_mutate_row(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_generate_initial_change_stream_partitions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_initial_change_stream_partitions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_mutate_row(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_mutate_row(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_mutate_rows(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_mutate_rows(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_ping_and_warm(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_ping_and_warm(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_read_change_stream(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_read_change_stream(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_read_modify_write_row(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_read_modify_write_row(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_read_rows(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_read_rows(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_sample_row_keys(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_sample_row_keys(self, response): + logging.log(f"Received response: {response}") + return response + + transport = BigtableRestTransport(interceptor=MyCustomBigtableInterceptor()) + client = BigtableClient(transport=transport) + + + """ + + def pre_check_and_mutate_row( + self, + request: bigtable.CheckAndMutateRowRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for check_and_mutate_row + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_check_and_mutate_row( + self, response: bigtable.CheckAndMutateRowResponse + ) -> bigtable.CheckAndMutateRowResponse: + """Post-rpc interceptor for check_and_mutate_row + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_generate_initial_change_stream_partitions( + self, + request: bigtable.GenerateInitialChangeStreamPartitionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable.GenerateInitialChangeStreamPartitionsRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for generate_initial_change_stream_partitions + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_generate_initial_change_stream_partitions( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for generate_initial_change_stream_partitions + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_mutate_row( + self, request: bigtable.MutateRowRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for mutate_row + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_mutate_row( + self, response: bigtable.MutateRowResponse + ) -> bigtable.MutateRowResponse: + """Post-rpc interceptor for mutate_row + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_mutate_rows( + self, request: bigtable.MutateRowsRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for mutate_rows + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_mutate_rows( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for mutate_rows + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_ping_and_warm( + self, request: bigtable.PingAndWarmRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for ping_and_warm + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_ping_and_warm( + self, response: bigtable.PingAndWarmResponse + ) -> bigtable.PingAndWarmResponse: + """Post-rpc interceptor for ping_and_warm + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_read_change_stream( + self, + request: bigtable.ReadChangeStreamRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for read_change_stream + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_read_change_stream( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for read_change_stream + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_read_modify_write_row( + self, + request: bigtable.ReadModifyWriteRowRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for read_modify_write_row + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_read_modify_write_row( + self, response: bigtable.ReadModifyWriteRowResponse + ) -> bigtable.ReadModifyWriteRowResponse: + """Post-rpc interceptor for read_modify_write_row + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_read_rows( + self, request: bigtable.ReadRowsRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for read_rows + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_read_rows( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for read_rows + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + def pre_sample_row_keys( + self, + request: bigtable.SampleRowKeysRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for sample_row_keys + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_sample_row_keys( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for sample_row_keys + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class BigtableRestStub: + _session: AuthorizedSession + _host: str + _interceptor: BigtableRestInterceptor + + +class BigtableRestTransport(BigtableTransport): + """REST backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[BigtableRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or BigtableRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _CheckAndMutateRow(BigtableRestStub): + def __hash__(self): + return hash("CheckAndMutateRow") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.CheckAndMutateRowRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Call the check and mutate row method over HTTP. + + Args: + request (~.bigtable.CheckAndMutateRowRequest): + The request object. Request message for + Bigtable.CheckAndMutateRow. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_check_and_mutate_row( + request, metadata + ) + pb_request = bigtable.CheckAndMutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.CheckAndMutateRowResponse() + pb_resp = bigtable.CheckAndMutateRowResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_and_mutate_row(resp) + return resp + + class _GenerateInitialChangeStreamPartitions(BigtableRestStub): + def __hash__(self): + return hash("GenerateInitialChangeStreamPartitions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.GenerateInitialChangeStreamPartitionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the generate initial change + stream partitions method over HTTP. + + Args: + request (~.bigtable.GenerateInitialChangeStreamPartitionsRequest): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for + Bigtable.GenerateInitialChangeStreamPartitions. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for + Bigtable.GenerateInitialChangeStreamPartitions. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions", + "body": "*", + }, + ] + ( + request, + metadata, + ) = self._interceptor.pre_generate_initial_change_stream_partitions( + request, metadata + ) + pb_request = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, bigtable.GenerateInitialChangeStreamPartitionsResponse + ) + resp = self._interceptor.post_generate_initial_change_stream_partitions( + resp + ) + return resp + + class _MutateRow(BigtableRestStub): + def __hash__(self): + return hash("MutateRow") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.MutateRowRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.MutateRowResponse: + r"""Call the mutate row method over HTTP. + + Args: + request (~.bigtable.MutateRowRequest): + The request object. Request message for + Bigtable.MutateRow. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_mutate_row(request, metadata) + pb_request = bigtable.MutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.MutateRowResponse() + pb_resp = bigtable.MutateRowResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_mutate_row(resp) + return resp + + class _MutateRows(BigtableRestStub): + def __hash__(self): + return hash("MutateRows") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.MutateRowsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the mutate rows method over HTTP. + + Args: + request (~.bigtable.MutateRowsRequest): + The request object. Request message for + BigtableService.MutateRows. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.MutateRowsResponse: + Response message for + BigtableService.MutateRows. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_mutate_rows(request, metadata) + pb_request = bigtable.MutateRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, bigtable.MutateRowsResponse + ) + resp = self._interceptor.post_mutate_rows(resp) + return resp + + class _PingAndWarm(BigtableRestStub): + def __hash__(self): + return hash("PingAndWarm") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.PingAndWarmRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.PingAndWarmResponse: + r"""Call the ping and warm method over HTTP. + + Args: + request (~.bigtable.PingAndWarmRequest): + The request object. Request message for client connection + keep-alive and warming. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.PingAndWarmResponse: + Response message for + Bigtable.PingAndWarm connection + keepalive and warming. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*}:ping", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) + pb_request = bigtable.PingAndWarmRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.PingAndWarmResponse() + pb_resp = bigtable.PingAndWarmResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_ping_and_warm(resp) + return resp + + class _ReadChangeStream(BigtableRestStub): + def __hash__(self): + return hash("ReadChangeStream") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.ReadChangeStreamRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the read change stream method over HTTP. + + Args: + request (~.bigtable.ReadChangeStreamRequest): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.ReadChangeStreamResponse: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_read_change_stream( + request, metadata + ) + pb_request = bigtable.ReadChangeStreamRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, bigtable.ReadChangeStreamResponse + ) + resp = self._interceptor.post_read_change_stream(resp) + return resp + + class _ReadModifyWriteRow(BigtableRestStub): + def __hash__(self): + return hash("ReadModifyWriteRow") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.ReadModifyWriteRowRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Call the read modify write row method over HTTP. + + Args: + request (~.bigtable.ReadModifyWriteRowRequest): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_read_modify_write_row( + request, metadata + ) + pb_request = bigtable.ReadModifyWriteRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.ReadModifyWriteRowResponse() + pb_resp = bigtable.ReadModifyWriteRowResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_read_modify_write_row(resp) + return resp + + class _ReadRows(BigtableRestStub): + def __hash__(self): + return hash("ReadRows") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.ReadRowsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the read rows method over HTTP. + + Args: + request (~.bigtable.ReadRowsRequest): + The request object. Request message for + Bigtable.ReadRows. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.ReadRowsResponse: + Response message for + Bigtable.ReadRows. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readRows", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_read_rows(request, metadata) + pb_request = bigtable.ReadRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, bigtable.ReadRowsResponse) + resp = self._interceptor.post_read_rows(resp) + return resp + + class _SampleRowKeys(BigtableRestStub): + def __hash__(self): + return hash("SampleRowKeys") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.SampleRowKeysRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the sample row keys method over HTTP. + + Args: + request (~.bigtable.SampleRowKeysRequest): + The request object. Request message for + Bigtable.SampleRowKeys. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.SampleRowKeysResponse: + Response message for + Bigtable.SampleRowKeys. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys", + }, + ] + request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) + pb_request = bigtable.SampleRowKeysRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, bigtable.SampleRowKeysResponse + ) + resp = self._interceptor.post_sample_row_keys(resp) + return resp + + @property + def check_and_mutate_row( + self, + ) -> Callable[ + [bigtable.CheckAndMutateRowRequest], bigtable.CheckAndMutateRowResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CheckAndMutateRow(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_initial_change_stream_partitions( + self, + ) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + bigtable.GenerateInitialChangeStreamPartitionsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateInitialChangeStreamPartitions(self._session, self._host, self._interceptor) # type: ignore + + @property + def mutate_row( + self, + ) -> Callable[[bigtable.MutateRowRequest], bigtable.MutateRowResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._MutateRow(self._session, self._host, self._interceptor) # type: ignore + + @property + def mutate_rows( + self, + ) -> Callable[[bigtable.MutateRowsRequest], bigtable.MutateRowsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._MutateRows(self._session, self._host, self._interceptor) # type: ignore + + @property + def ping_and_warm( + self, + ) -> Callable[[bigtable.PingAndWarmRequest], bigtable.PingAndWarmResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PingAndWarm(self._session, self._host, self._interceptor) # type: ignore + + @property + def read_change_stream( + self, + ) -> Callable[ + [bigtable.ReadChangeStreamRequest], bigtable.ReadChangeStreamResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ReadChangeStream(self._session, self._host, self._interceptor) # type: ignore + + @property + def read_modify_write_row( + self, + ) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], bigtable.ReadModifyWriteRowResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ReadModifyWriteRow(self._session, self._host, self._interceptor) # type: ignore + + @property + def read_rows( + self, + ) -> Callable[[bigtable.ReadRowsRequest], bigtable.ReadRowsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ReadRows(self._session, self._host, self._interceptor) # type: ignore + + @property + def sample_row_keys( + self, + ) -> Callable[[bigtable.SampleRowKeysRequest], bigtable.SampleRowKeysResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SampleRowKeys(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("BigtableRestTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index 3499cf5d14ec..bb2533e331b7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -16,12 +16,16 @@ from .bigtable import ( CheckAndMutateRowRequest, CheckAndMutateRowResponse, + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse, MutateRowRequest, MutateRowResponse, MutateRowsRequest, MutateRowsResponse, PingAndWarmRequest, PingAndWarmResponse, + ReadChangeStreamRequest, + ReadChangeStreamResponse, ReadModifyWriteRowRequest, ReadModifyWriteRowResponse, ReadRowsRequest, @@ -40,6 +44,9 @@ RowFilter, RowRange, RowSet, + StreamContinuationToken, + StreamContinuationTokens, + StreamPartition, TimestampRange, ValueRange, ) @@ -56,12 +63,16 @@ __all__ = ( "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", + "GenerateInitialChangeStreamPartitionsRequest", + "GenerateInitialChangeStreamPartitionsResponse", "MutateRowRequest", "MutateRowResponse", "MutateRowsRequest", "MutateRowsResponse", "PingAndWarmRequest", "PingAndWarmResponse", + "ReadChangeStreamRequest", + "ReadChangeStreamResponse", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", "ReadRowsRequest", @@ -78,6 +89,9 @@ "RowFilter", "RowRange", "RowSet", + "StreamContinuationToken", + "StreamContinuationTokens", + "StreamPartition", "TimestampRange", "ValueRange", "FullReadStatsView", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 8124cb7e387e..bfe64bd122bc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -19,6 +19,8 @@ from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats as gb_request_stats +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore from google.protobuf import wrappers_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -40,6 +42,10 @@ "PingAndWarmResponse", "ReadModifyWriteRowRequest", "ReadModifyWriteRowResponse", + "GenerateInitialChangeStreamPartitionsRequest", + "GenerateInitialChangeStreamPartitionsResponse", + "ReadChangeStreamRequest", + "ReadChangeStreamResponse", }, ) @@ -429,8 +435,8 @@ class Entry(proto.Message): Required. Changes to be atomically applied to the specified row. Mutations are applied in order, meaning that earlier mutations can be - masked by later ones. - You must specify at least one mutation. + masked by later ones. You must specify at least + one mutation. """ row_key: bytes = proto.Field( @@ -672,4 +678,424 @@ class ReadModifyWriteRowResponse(proto.Message): ) +class GenerateInitialChangeStreamPartitionsRequest(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Request message for + Bigtable.GenerateInitialChangeStreamPartitions. + + Attributes: + table_name (str): + Required. The unique name of the table from which to get + change stream partitions. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. Single cluster routing + must be configured on the profile. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GenerateInitialChangeStreamPartitionsResponse(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Response message for + Bigtable.GenerateInitialChangeStreamPartitions. + + Attributes: + partition (google.cloud.bigtable_v2.types.StreamPartition): + A partition of the change stream. + """ + + partition: data.StreamPartition = proto.Field( + proto.MESSAGE, + number=1, + message=data.StreamPartition, + ) + + +class ReadChangeStreamRequest(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Request message for Bigtable.ReadChangeStream. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + table_name (str): + Required. The unique name of the table from which to read a + change stream. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. Single cluster routing + must be configured on the profile. + partition (google.cloud.bigtable_v2.types.StreamPartition): + The partition to read changes from. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Start reading the stream at the specified + timestamp. This timestamp must be within the + change stream retention period, less than or + equal to the current time, and after change + stream creation, whichever is greater. This + value is inclusive and will be truncated to + microsecond granularity. + + This field is a member of `oneof`_ ``start_from``. + continuation_tokens (google.cloud.bigtable_v2.types.StreamContinuationTokens): + Tokens that describe how to resume reading a stream where + reading previously left off. If specified, changes will be + read starting at the the position. Tokens are delivered on + the stream as part of ``Heartbeat`` and ``CloseStream`` + messages. + + If a single token is provided, the token’s partition must + exactly match the request’s partition. If multiple tokens + are provided, as in the case of a partition merge, the union + of the token partitions must exactly cover the request’s + partition. Otherwise, INVALID_ARGUMENT will be returned. + + This field is a member of `oneof`_ ``start_from``. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If specified, OK will be returned when the + stream advances beyond this time. Otherwise, + changes will be continuously delivered on the + stream. This value is inclusive and will be + truncated to microsecond granularity. + heartbeat_duration (google.protobuf.duration_pb2.Duration): + If specified, the duration between ``Heartbeat`` messages on + the stream. Otherwise, defaults to 5 seconds. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + partition: data.StreamPartition = proto.Field( + proto.MESSAGE, + number=3, + message=data.StreamPartition, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof="start_from", + message=timestamp_pb2.Timestamp, + ) + continuation_tokens: data.StreamContinuationTokens = proto.Field( + proto.MESSAGE, + number=6, + oneof="start_from", + message=data.StreamContinuationTokens, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + heartbeat_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=7, + message=duration_pb2.Duration, + ) + + +class ReadChangeStreamResponse(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Response message for Bigtable.ReadChangeStream. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + data_change (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.DataChange): + A mutation to the partition. + + This field is a member of `oneof`_ ``stream_record``. + heartbeat (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.Heartbeat): + A periodic heartbeat message. + + This field is a member of `oneof`_ ``stream_record``. + close_stream (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.CloseStream): + An indication that the stream should be + closed. + + This field is a member of `oneof`_ ``stream_record``. + """ + + class MutationChunk(proto.Message): + r"""A partial or complete mutation. + + Attributes: + chunk_info (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.MutationChunk.ChunkInfo): + If set, then the mutation is a ``SetCell`` with a chunked + value across multiple messages. + mutation (google.cloud.bigtable_v2.types.Mutation): + If this is a continuation of a chunked message + (``chunked_value_offset`` > 0), ignore all fields except the + ``SetCell``'s value and merge it with the previous message + by concatenating the value fields. + """ + + class ChunkInfo(proto.Message): + r"""Information about the chunking of this mutation. Only ``SetCell`` + mutations can be chunked, and all chunks for a ``SetCell`` will be + delivered contiguously with no other mutation types interleaved. + + Attributes: + chunked_value_size (int): + The total value size of all the chunks that make up the + ``SetCell``. + chunked_value_offset (int): + The byte offset of this chunk into the total + value size of the mutation. + last_chunk (bool): + When true, this is the last chunk of a chunked ``SetCell``. + """ + + chunked_value_size: int = proto.Field( + proto.INT32, + number=1, + ) + chunked_value_offset: int = proto.Field( + proto.INT32, + number=2, + ) + last_chunk: bool = proto.Field( + proto.BOOL, + number=3, + ) + + chunk_info: "ReadChangeStreamResponse.MutationChunk.ChunkInfo" = proto.Field( + proto.MESSAGE, + number=1, + message="ReadChangeStreamResponse.MutationChunk.ChunkInfo", + ) + mutation: data.Mutation = proto.Field( + proto.MESSAGE, + number=2, + message=data.Mutation, + ) + + class DataChange(proto.Message): + r"""A message corresponding to one or more mutations to the partition + being streamed. A single logical ``DataChange`` message may also be + split across a sequence of multiple individual messages. Messages + other than the first in a sequence will only have the ``type`` and + ``chunks`` fields populated, with the final message in the sequence + also containing ``done`` set to true. + + Attributes: + type_ (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.DataChange.Type): + The type of the mutation. + source_cluster_id (str): + The cluster where the mutation was applied. Not set when + ``type`` is ``GARBAGE_COLLECTION``. + row_key (bytes): + The row key for all mutations that are part of this + ``DataChange``. If the ``DataChange`` is chunked across + multiple messages, then this field will only be set for the + first message. + commit_timestamp (google.protobuf.timestamp_pb2.Timestamp): + The timestamp at which the mutation was + applied on the Bigtable server. + tiebreaker (int): + A value that lets stream consumers reconstruct Bigtable's + conflict resolution semantics. + https://cloud.google.com/bigtable/docs/writes#conflict-resolution + In the event that the same row key, column family, column + qualifier, timestamp are modified on different clusters at + the same ``commit_timestamp``, the mutation with the larger + ``tiebreaker`` will be the one chosen for the eventually + consistent state of the system. + chunks (MutableSequence[google.cloud.bigtable_v2.types.ReadChangeStreamResponse.MutationChunk]): + The mutations associated with this change to the partition. + May contain complete mutations or chunks of a multi-message + chunked ``DataChange`` record. + done (bool): + When true, indicates that the entire ``DataChange`` has been + read and the client can safely process the message. + token (str): + An encoded position for this stream's + partition to restart reading from. This token is + for the StreamPartition from the request. + estimated_low_watermark (google.protobuf.timestamp_pb2.Timestamp): + An estimate of the commit timestamp that is + usually lower than or equal to any timestamp for + a record that will be delivered in the future on + the stream. It is possible that, under + particular circumstances that a future record + has a timestamp is is lower than a previously + seen timestamp. For an example usage see + https://beam.apache.org/documentation/basics/#watermarks + """ + + class Type(proto.Enum): + r"""The type of mutation. + + Values: + TYPE_UNSPECIFIED (0): + The type is unspecified. + USER (1): + A user-initiated mutation. + GARBAGE_COLLECTION (2): + A system-initiated mutation as part of + garbage collection. + https://cloud.google.com/bigtable/docs/garbage-collection + CONTINUATION (3): + This is a continuation of a multi-message + change. + """ + TYPE_UNSPECIFIED = 0 + USER = 1 + GARBAGE_COLLECTION = 2 + CONTINUATION = 3 + + type_: "ReadChangeStreamResponse.DataChange.Type" = proto.Field( + proto.ENUM, + number=1, + enum="ReadChangeStreamResponse.DataChange.Type", + ) + source_cluster_id: str = proto.Field( + proto.STRING, + number=2, + ) + row_key: bytes = proto.Field( + proto.BYTES, + number=3, + ) + commit_timestamp: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + tiebreaker: int = proto.Field( + proto.INT32, + number=5, + ) + chunks: MutableSequence[ + "ReadChangeStreamResponse.MutationChunk" + ] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message="ReadChangeStreamResponse.MutationChunk", + ) + done: bool = proto.Field( + proto.BOOL, + number=8, + ) + token: str = proto.Field( + proto.STRING, + number=9, + ) + estimated_low_watermark: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + + class Heartbeat(proto.Message): + r"""A periodic message with information that can be used to + checkpoint the state of a stream. + + Attributes: + continuation_token (google.cloud.bigtable_v2.types.StreamContinuationToken): + A token that can be provided to a subsequent + ``ReadChangeStream`` call to pick up reading at the current + stream position. + estimated_low_watermark (google.protobuf.timestamp_pb2.Timestamp): + An estimate of the commit timestamp that is + usually lower than or equal to any timestamp for + a record that will be delivered in the future on + the stream. It is possible that, under + particular circumstances that a future record + has a timestamp is is lower than a previously + seen timestamp. For an example usage see + https://beam.apache.org/documentation/basics/#watermarks + """ + + continuation_token: data.StreamContinuationToken = proto.Field( + proto.MESSAGE, + number=1, + message=data.StreamContinuationToken, + ) + estimated_low_watermark: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + class CloseStream(proto.Message): + r"""A message indicating that the client should stop reading from the + stream. If status is OK and ``continuation_tokens`` is empty, the + stream has finished (for example if there was an ``end_time`` + specified). If ``continuation_tokens`` is present, then a change in + partitioning requires the client to open a new stream for each token + to resume reading. + + Attributes: + status (google.rpc.status_pb2.Status): + The status of the stream. + continuation_tokens (MutableSequence[google.cloud.bigtable_v2.types.StreamContinuationToken]): + If non-empty, contains the information needed + to start reading the new partition(s) that + contain segments of this partition's row range. + """ + + status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=1, + message=status_pb2.Status, + ) + continuation_tokens: MutableSequence[ + data.StreamContinuationToken + ] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=data.StreamContinuationToken, + ) + + data_change: DataChange = proto.Field( + proto.MESSAGE, + number=1, + oneof="stream_record", + message=DataChange, + ) + heartbeat: Heartbeat = proto.Field( + proto.MESSAGE, + number=2, + oneof="stream_record", + message=Heartbeat, + ) + close_stream: CloseStream = proto.Field( + proto.MESSAGE, + number=3, + oneof="stream_record", + message=CloseStream, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index a3bec7274748..8b74c8c7059a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -33,6 +33,9 @@ "RowFilter", "Mutation", "ReadModifyWriteRule", + "StreamPartition", + "StreamContinuationTokens", + "StreamContinuationToken", }, ) @@ -1034,4 +1037,62 @@ class ReadModifyWriteRule(proto.Message): ) +class StreamPartition(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. A partition of a change stream. + + Attributes: + row_range (google.cloud.bigtable_v2.types.RowRange): + The row range covered by this partition and is specified by + [``start_key_closed``, ``end_key_open``). + """ + + row_range: "RowRange" = proto.Field( + proto.MESSAGE, + number=1, + message="RowRange", + ) + + +class StreamContinuationTokens(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. The + information required to continue reading the data from multiple + ``StreamPartitions`` from where a previous read left off. + + Attributes: + tokens (MutableSequence[google.cloud.bigtable_v2.types.StreamContinuationToken]): + List of continuation tokens. + """ + + tokens: MutableSequence["StreamContinuationToken"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="StreamContinuationToken", + ) + + +class StreamContinuationToken(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. The + information required to continue reading the data from a + ``StreamPartition`` from where a previous read left off. + + Attributes: + partition (google.cloud.bigtable_v2.types.StreamPartition): + The partition that this token applies to. + token (str): + An encoded position in the stream to restart + reading from. + """ + + partition: "StreamPartition" = proto.Field( + proto.MESSAGE, + number=1, + message="StreamPartition", + ) + token: str = proto.Field( + proto.STRING, + number=2, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 47415385a13f..18f489e1901a 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -126,7 +126,9 @@ def format(session): def mypy(session): """Verify type hints are mypy compatible.""" session.install("-e", ".") - session.install("mypy", "types-setuptools", "types-protobuf", "types-mock") + session.install( + "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests" + ) session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package session.run("mypy", "google/", "tests/") diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index b6aa2f8a297f..78c6ca2f8223 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -166,7 +166,7 @@ def lint_setup_py\(session\): def mypy(session): """Verify type hints are mypy compatible.""" session.install("-e", ".") - session.install("mypy", "types-setuptools", "types-protobuf", "types-mock") + session.install("mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests") session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package session.run("mypy", "google/", "tests/") @@ -177,42 +177,6 @@ def lint_setup_py(session): ''', ) -# Work around https://github.com/googleapis/gapic-generator-python/issues/689 -bad_clusters_typing = r""" - clusters: Sequence\[ - bigtable_instance_admin\.CreateInstanceRequest\.ClustersEntry - \] \= None,""" - -good_clusters_typing = """ - clusters: Dict[str, gba_instance.Cluster] = None,""" - -s.replace( - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py", - bad_clusters_typing, - good_clusters_typing, -) - -bad_clusters_docstring_1 = re.escape(r""" - clusters (:class:`Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]`):""") - -bad_clusters_docstring_2 = re.escape(r""" - clusters (Sequence[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest.ClustersEntry]):""") - -good_clusters_docstring = """ - clusters (Dict[str, gba_instance.Cluster]):""" - -s.replace( - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py", - bad_clusters_docstring_1, - good_clusters_docstring, -) - -s.replace( - "google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/*client.py", - bad_clusters_docstring_2, - good_clusters_docstring, -) - # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 4424228fd282..11ffed53fb19 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -40,9 +40,11 @@ class bigtableCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), 'ping_and_warm': ('name', 'app_profile_id', ), + 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', ), 'sample_row_keys': ('table_name', 'app_profile_id', ), diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 884d212df3bf..49bb10adcf77 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -37,7 +37,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.32.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*", + "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", "google-cloud-core >= 1.4.1, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index fb3cf92d547c..d14da7c0c6c8 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -5,7 +5,7 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==1.32.0 +google-api-core==1.34.0 google-cloud-core==1.4.1 grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index c8c1fe356a8b..76715f1ed73a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -24,10 +24,17 @@ import grpc from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format from google.api_core import client_options from google.api_core import exceptions as core_exceptions @@ -114,6 +121,7 @@ def test__get_default_mtls_endpoint(): [ (BigtableInstanceAdminClient, "grpc"), (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), + (BigtableInstanceAdminClient, "rest"), ], ) def test_bigtable_instance_admin_client_from_service_account_info( @@ -129,7 +137,11 @@ def test_bigtable_instance_admin_client_from_service_account_info( assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == ("bigtableadmin.googleapis.com:443") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com" + ) @pytest.mark.parametrize( @@ -137,6 +149,7 @@ def test_bigtable_instance_admin_client_from_service_account_info( [ (transports.BigtableInstanceAdminGrpcTransport, "grpc"), (transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.BigtableInstanceAdminRestTransport, "rest"), ], ) def test_bigtable_instance_admin_client_service_account_always_use_jwt( @@ -162,6 +175,7 @@ def test_bigtable_instance_admin_client_service_account_always_use_jwt( [ (BigtableInstanceAdminClient, "grpc"), (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), + (BigtableInstanceAdminClient, "rest"), ], ) def test_bigtable_instance_admin_client_from_service_account_file( @@ -184,13 +198,18 @@ def test_bigtable_instance_admin_client_from_service_account_file( assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == ("bigtableadmin.googleapis.com:443") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com" + ) def test_bigtable_instance_admin_client_get_transport_class(): transport = BigtableInstanceAdminClient.get_transport_class() available_transports = [ transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminRestTransport, ] assert transport in available_transports @@ -211,6 +230,11 @@ def test_bigtable_instance_admin_client_get_transport_class(): transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminRestTransport, + "rest", + ), ], ) @mock.patch.object( @@ -366,6 +390,18 @@ def test_bigtable_instance_admin_client_client_options( "grpc_asyncio", "false", ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminRestTransport, + "rest", + "true", + ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminRestTransport, + "rest", + "false", + ), ], ) @mock.patch.object( @@ -569,6 +605,11 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminRestTransport, + "rest", + ), ], ) def test_bigtable_instance_admin_client_client_options_scopes( @@ -609,6 +650,12 @@ def test_bigtable_instance_admin_client_client_options_scopes( "grpc_asyncio", grpc_helpers_async, ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminRestTransport, + "rest", + None, + ), ], ) def test_bigtable_instance_admin_client_client_options_credentials_file( @@ -6099,187 +6146,6090 @@ async def test_list_hot_tablets_async_pages(): assert page_.raw_page.next_page_token == token -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateInstanceRequest, + dict, + ], +) +def test_create_instance_rest(request_type): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_instance(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_instance_rest_required_fields( + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["instance_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, ) + ) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["instanceId"] = "instance_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "instanceId" in jsonified_request + assert jsonified_request["instanceId"] == "instance_id_value" + + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, - transport=transport, - ) - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + unset_fields = transport.create_instance._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "instanceId", + "instance", + "clusters", + ) ) + ) - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( + bigtable_instance_admin.CreateInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_instance_admin.CreateInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + +def test_create_instance_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.CreateInstanceRequest +): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - client = BigtableInstanceAdminClient(transport=transport) - assert client.transport is transport + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_instance(request) + + +def test_create_instance_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] + ) + + +def test_create_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - channel = transport.grpc_channel - assert channel + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + +def test_create_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) @pytest.mark.parametrize( - "transport_name", + "request_type", [ - "grpc", + bigtable_instance_admin.GetInstanceRequest, + dict, ], ) -def test_transport_kind(transport_name): - transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert transport.kind == transport_name - - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. +def test_get_instance_rest(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.BigtableInstanceAdminGrpcTransport, + transport="rest", ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) -def test_bigtable_instance_admin_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableInstanceAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json", + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, ) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) -def test_bigtable_instance_admin_base_transport(): - # Instantiate the base transport. - with mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__" - ) as Transport: - Transport.return_value = None - transport = transports.BigtableInstanceAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_instance(request) - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - "create_instance", - "get_instance", - "list_instances", - "update_instance", - "partial_update_instance", - "delete_instance", - "create_cluster", - "get_cluster", - "list_clusters", - "update_cluster", - "partial_update_cluster", - "delete_cluster", - "create_app_profile", - "get_app_profile", - "list_app_profiles", - "update_app_profile", - "delete_app_profile", - "get_iam_policy", - "set_iam_policy", - "test_iam_permissions", - "list_hot_tablets", + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +def test_get_instance_rest_required_fields( + request_type=bigtable_instance_admin.GetInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - with pytest.raises(NotImplementedError): - transport.close() + # verify fields with default values are dropped - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - # Catch all for all remaining methods and properties - remainder = [ - "kind", - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() + # verify required fields with default values are now present + jsonified_request["name"] = "name_value" -def test_bigtable_instance_admin_base_transport_with_credentials_file(): + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetInstanceRequest.pb( + bigtable_instance_admin.GetInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.Instance.to_json(instance.Instance()) + + request = bigtable_instance_admin.GetInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + + client.get_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_instance_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.GetInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_instance(request) + + +def test_get_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] + ) + + +def test_get_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), + name="name_value", + ) + + +def test_get_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListInstancesRequest, + dict, + ], +) +def test_list_instances_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_instances(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" + + +def test_list_instances_rest_required_fields( + request_type=bigtable_instance_admin.ListInstancesRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_instances._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_instances._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_instances(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_instances_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_instances._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_instances_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListInstancesRequest.pb( + bigtable_instance_admin.ListInstancesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + bigtable_instance_admin.ListInstancesResponse.to_json( + bigtable_instance_admin.ListInstancesResponse() + ) + ) + + request = bigtable_instance_admin.ListInstancesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListInstancesResponse() + + client.list_instances( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_instances_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.ListInstancesRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_instances(request) + + +def test_list_instances_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] + ) + + +def test_list_instances_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), + parent="parent_value", + ) + + +def test_list_instances_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + instance.Instance, + dict, + ], +) +def test_update_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +def test_update_instance_rest_required_fields(request_type=instance.Instance): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["display_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["displayName"] = "display_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "displayName" in jsonified_request + assert jsonified_request["displayName"] == "display_name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "put", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("displayName",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = instance.Instance.pb(instance.Instance()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.Instance.to_json(instance.Instance()) + + request = instance.Instance() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + + client.update_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_instance_rest_bad_request( + transport: str = "rest", request_type=instance.Instance +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_instance(request) + + +def test_update_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.PartialUpdateInstanceRequest, + dict, + ], +) +def test_partial_update_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} + request_init["instance"] = { + "name": "projects/sample1/instances/sample2", + "display_name": "display_name_value", + "state": 1, + "type_": 1, + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "satisfies_pzs": True, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.partial_update_instance(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_partial_update_instance_rest_required_fields( + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_instance._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.partial_update_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_partial_update_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.partial_update_instance._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "instance", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_partial_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( + bigtable_instance_admin.PartialUpdateInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.partial_update_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_partial_update_instance_rest_bad_request( + transport: str = "rest", + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} + request_init["instance"] = { + "name": "projects/sample1/instances/sample2", + "display_name": "display_name_value", + "state": 1, + "type_": 1, + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "satisfies_pzs": True, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.partial_update_instance(request) + + +def test_partial_update_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"instance": {"name": "projects/sample1/instances/sample2"}} + + # get truthy value for each flattened field + mock_args = dict( + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.partial_update_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, + args[1], + ) + + +def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_partial_update_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteInstanceRequest, + dict, + ], +) +def test_delete_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_instance(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_instance_rest_required_fields( + request_type=bigtable_instance_admin.DeleteInstanceRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_instance(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( + bigtable_instance_admin.DeleteInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_instance_admin.DeleteInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_instance_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.DeleteInstanceRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_instance(request) + + +def test_delete_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] + ) + + +def test_delete_instance_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), + name="name_value", + ) + + +def test_delete_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateClusterRequest, + dict, + ], +) +def test_create_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["cluster"] = { + "name": "name_value", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_cluster_rest_required_fields( + request_type=bigtable_instance_admin.CreateClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + assert "clusterId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == request_init["cluster_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["clusterId"] = "cluster_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("cluster_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == "cluster_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_cluster(request) + + expected_params = [ + ( + "clusterId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("clusterId",)) + & set( + ( + "parent", + "clusterId", + "cluster", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateClusterRequest.pb( + bigtable_instance_admin.CreateClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_instance_admin.CreateClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_cluster_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.CreateClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["cluster"] = { + "name": "name_value", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_cluster(request) + + +def test_create_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, + args[1], + ) + + +def test_create_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + + +def test_create_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetClusterRequest, + dict, + ], +) +def test_get_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + cluster_config=instance.Cluster.ClusterConfig( + cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig( + autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600) + ) + ), + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + assert response.name == "name_value" + assert response.location == "location_value" + assert response.state == instance.Cluster.State.READY + assert response.serve_nodes == 1181 + assert response.default_storage_type == common.StorageType.SSD + + +def test_get_cluster_rest_required_fields( + request_type=bigtable_instance_admin.GetClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetClusterRequest.pb( + bigtable_instance_admin.GetClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.Cluster.to_json(instance.Cluster()) + + request = bigtable_instance_admin.GetClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Cluster() + + client.get_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_cluster_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.GetClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_cluster(request) + + +def test_get_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, + args[1], + ) + + +def test_get_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), + name="name_value", + ) + + +def test_get_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListClustersRequest, + dict, + ], +) +def test_list_clusters_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_clusters(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" + + +def test_list_clusters_rest_required_fields( + request_type=bigtable_instance_admin.ListClustersRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_instance_admin.ListClustersResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_clusters(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_clusters_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_clusters._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_clusters_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListClustersRequest.pb( + bigtable_instance_admin.ListClustersRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + bigtable_instance_admin.ListClustersResponse.to_json( + bigtable_instance_admin.ListClustersResponse() + ) + ) + + request = bigtable_instance_admin.ListClustersRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListClustersResponse() + + client.list_clusters( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_clusters_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.ListClustersRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_clusters(request) + + +def test_list_clusters_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_clusters(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, + args[1], + ) + + +def test_list_clusters_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), + parent="parent_value", + ) + + +def test_list_clusters_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + instance.Cluster, + dict, + ], +) +def test_update_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = instance.Cluster.pb(instance.Cluster()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = instance.Cluster() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_cluster_rest_bad_request( + transport: str = "rest", request_type=instance.Cluster +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_cluster(request) + + +def test_update_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.PartialUpdateClusterRequest, + dict, + ], +) +def test_partial_update_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + request_init["cluster"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.partial_update_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_partial_update_cluster_rest_required_fields( + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.partial_update_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_partial_update_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "cluster", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_partial_update_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( + bigtable_instance_admin.PartialUpdateClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_instance_admin.PartialUpdateClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.partial_update_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_partial_update_cluster_rest_bad_request( + transport: str = "rest", + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + request_init["cluster"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.partial_update_cluster(request) + + +def test_partial_update_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.partial_update_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" + % client.transport._host, + args[1], + ) + + +def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_partial_update_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteClusterRequest, + dict, + ], +) +def test_delete_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_cluster(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_cluster_rest_required_fields( + request_type=bigtable_instance_admin.DeleteClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( + bigtable_instance_admin.DeleteClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_instance_admin.DeleteClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_cluster_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.DeleteClusterRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_cluster(request) + + +def test_delete_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, + args[1], + ) + + +def test_delete_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), + name="name_value", + ) + + +def test_delete_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateAppProfileRequest, + dict, + ], +) +def test_create_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["app_profile"] = { + "name": "name_value", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( + cluster_ids=["cluster_ids_value"] + ), + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" + + +def test_create_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["app_profile_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + assert "appProfileId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == request_init["app_profile_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["appProfileId"] = "app_profile_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "app_profile_id", + "ignore_warnings", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == "app_profile_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_app_profile(request) + + expected_params = [ + ( + "appProfileId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "appProfileId", + "ignoreWarnings", + ) + ) + & set( + ( + "parent", + "appProfileId", + "appProfile", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( + bigtable_instance_admin.CreateAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) + + request = bigtable_instance_admin.CreateAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.AppProfile() + + client.create_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_app_profile_rest_bad_request( + transport: str = "rest", + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["app_profile"] = { + "name": "name_value", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_app_profile(request) + + +def test_create_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/appProfiles" + % client.transport._host, + args[1], + ) + + +def test_create_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + +def test_create_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetAppProfileRequest, + dict, + ], +) +def test_get_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( + cluster_ids=["cluster_ids_value"] + ), + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" + + +def test_get_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.GetAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_app_profile(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( + bigtable_instance_admin.GetAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) + + request = bigtable_instance_admin.GetAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.AppProfile() + + client.get_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_app_profile_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.GetAppProfileRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_app_profile(request) + + +def test_get_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_get_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), + name="name_value", + ) + + +def test_get_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListAppProfilesRequest, + dict, + ], +) +def test_list_app_profiles_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_app_profiles(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAppProfilesPager) + assert response.next_page_token == "next_page_token_value" + assert response.failed_locations == ["failed_locations_value"] + + +def test_list_app_profiles_rest_required_fields( + request_type=bigtable_instance_admin.ListAppProfilesRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_app_profiles._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_app_profiles._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_app_profiles(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_app_profiles_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_app_profiles._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_app_profiles_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( + bigtable_instance_admin.ListAppProfilesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + bigtable_instance_admin.ListAppProfilesResponse.to_json( + bigtable_instance_admin.ListAppProfilesResponse() + ) + ) + + request = bigtable_instance_admin.ListAppProfilesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + client.list_app_profiles( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_app_profiles_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.ListAppProfilesRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_app_profiles(request) + + +def test_list_app_profiles_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_app_profiles(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/appProfiles" + % client.transport._host, + args[1], + ) + + +def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), + parent="parent_value", + ) + + +def test_list_app_profiles_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_app_profiles(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.AppProfile) for i in results) + + pages = list(client.list_app_profiles(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateAppProfileRequest, + dict, + ], +) +def test_update_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + request_init["app_profile"] = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_app_profile(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_update_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_app_profile(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set( + ( + "appProfile", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( + bigtable_instance_admin.UpdateAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_instance_admin.UpdateAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_app_profile_rest_bad_request( + transport: str = "rest", + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + request_init["app_profile"] = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_app_profile(request) + + +def test_update_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_update_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteAppProfileRequest, + dict, + ], +) +def test_delete_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_app_profile(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["ignore_warnings"] = False + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + assert "ignoreWarnings" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] + + jsonified_request["name"] = "name_value" + jsonified_request["ignoreWarnings"] = True + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("ignore_warnings",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == True + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_app_profile(request) + + expected_params = [ + ( + "ignoreWarnings", + str(False).lower(), + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("ignoreWarnings",)) + & set( + ( + "name", + "ignoreWarnings", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( + bigtable_instance_admin.DeleteAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_instance_admin.DeleteAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_app_profile_rest_bad_request( + transport: str = "rest", + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_app_profile(request) + + +def test_delete_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), + name="name_value", + ) + + +def test_delete_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.GetIamPolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.get_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:getIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_get_iam_policy_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "policy", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_set_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.SetIamPolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.set_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:setIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_set_iam_policy_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_rest_required_fields( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + jsonified_request["permissions"] = "permissions_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == "permissions_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "permissions", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_test_iam_permissions_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + request = iam_policy_pb2.TestIamPermissionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + permissions=["permissions_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:testIamPermissions" + % client.transport._host, + args[1], + ) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_test_iam_permissions_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListHotTabletsRequest, + dict, + ], +) +def test_list_hot_tablets_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_hot_tablets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_hot_tablets_rest_required_fields( + request_type=bigtable_instance_admin.ListHotTabletsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_hot_tablets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_hot_tablets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "end_time", + "page_size", + "page_token", + "start_time", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_hot_tablets(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_hot_tablets_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "endTime", + "pageSize", + "pageToken", + "startTime", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_hot_tablets_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( + bigtable_instance_admin.ListHotTabletsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + bigtable_instance_admin.ListHotTabletsResponse.to_json( + bigtable_instance_admin.ListHotTabletsResponse() + ) + ) + + request = bigtable_instance_admin.ListHotTabletsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListHotTabletsResponse() + + client.list_hot_tablets( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_hot_tablets_rest_bad_request( + transport: str = "rest", request_type=bigtable_instance_admin.ListHotTabletsRequest +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_hot_tablets(request) + + +def test_list_hot_tablets_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_hot_tablets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" + % client.transport._host, + args[1], + ) + + +def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), + parent="parent_value", + ) + + +def test_list_hot_tablets_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_hot_tablets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.HotTablet) for i in results) + + pages = list(client.list_hot_tablets(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableInstanceAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + transports.BigtableInstanceAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.BigtableInstanceAdminGrpcTransport, + ) + + +def test_bigtable_instance_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BigtableInstanceAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_instance_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableInstanceAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_instance", + "get_instance", + "list_instances", + "update_instance", + "partial_update_instance", + "delete_instance", + "create_cluster", + "get_cluster", + "list_clusters", + "update_cluster", + "partial_update_cluster", + "delete_cluster", + "create_app_profile", + "get_app_profile", + "list_app_profiles", + "update_app_profile", + "delete_app_profile", + "get_iam_policy", + "set_iam_policy", + "test_iam_permissions", + "list_hot_tablets", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_bigtable_instance_admin_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file with mock.patch.object( google.auth, "load_credentials_from_file", autospec=True @@ -6372,6 +12322,7 @@ def test_bigtable_instance_admin_transport_auth_adc(transport_class): [ transports.BigtableInstanceAdminGrpcTransport, transports.BigtableInstanceAdminGrpcAsyncIOTransport, + transports.BigtableInstanceAdminRestTransport, ], ) def test_bigtable_instance_admin_transport_auth_gdch_credentials(transport_class): @@ -6481,11 +12432,40 @@ def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( ) +def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.BigtableInstanceAdminRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_bigtable_instance_admin_rest_lro_client(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", + "rest", ], ) def test_bigtable_instance_admin_host_no_port(transport_name): @@ -6496,7 +12476,11 @@ def test_bigtable_instance_admin_host_no_port(transport_name): ), transport=transport_name, ) - assert client.transport._host == ("bigtableadmin.googleapis.com:443") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com" + ) @pytest.mark.parametrize( @@ -6504,6 +12488,7 @@ def test_bigtable_instance_admin_host_no_port(transport_name): [ "grpc", "grpc_asyncio", + "rest", ], ) def test_bigtable_instance_admin_host_with_port(transport_name): @@ -6514,7 +12499,93 @@ def test_bigtable_instance_admin_host_with_port(transport_name): ), transport=transport_name, ) - assert client.transport._host == ("bigtableadmin.googleapis.com:8000") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_bigtable_instance_admin_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = BigtableInstanceAdminClient( + credentials=creds1, + transport=transport_name, + ) + client2 = BigtableInstanceAdminClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_instance._session + session2 = client2.transport.create_instance._session + assert session1 != session2 + session1 = client1.transport.get_instance._session + session2 = client2.transport.get_instance._session + assert session1 != session2 + session1 = client1.transport.list_instances._session + session2 = client2.transport.list_instances._session + assert session1 != session2 + session1 = client1.transport.update_instance._session + session2 = client2.transport.update_instance._session + assert session1 != session2 + session1 = client1.transport.partial_update_instance._session + session2 = client2.transport.partial_update_instance._session + assert session1 != session2 + session1 = client1.transport.delete_instance._session + session2 = client2.transport.delete_instance._session + assert session1 != session2 + session1 = client1.transport.create_cluster._session + session2 = client2.transport.create_cluster._session + assert session1 != session2 + session1 = client1.transport.get_cluster._session + session2 = client2.transport.get_cluster._session + assert session1 != session2 + session1 = client1.transport.list_clusters._session + session2 = client2.transport.list_clusters._session + assert session1 != session2 + session1 = client1.transport.update_cluster._session + session2 = client2.transport.update_cluster._session + assert session1 != session2 + session1 = client1.transport.partial_update_cluster._session + session2 = client2.transport.partial_update_cluster._session + assert session1 != session2 + session1 = client1.transport.delete_cluster._session + session2 = client2.transport.delete_cluster._session + assert session1 != session2 + session1 = client1.transport.create_app_profile._session + session2 = client2.transport.create_app_profile._session + assert session1 != session2 + session1 = client1.transport.get_app_profile._session + session2 = client2.transport.get_app_profile._session + assert session1 != session2 + session1 = client1.transport.list_app_profiles._session + session2 = client2.transport.list_app_profiles._session + assert session1 != session2 + session1 = client1.transport.update_app_profile._session + session2 = client2.transport.update_app_profile._session + assert session1 != session2 + session1 = client1.transport.delete_app_profile._session + session2 = client2.transport.delete_app_profile._session + assert session1 != session2 + session1 = client1.transport.get_iam_policy._session + session2 = client2.transport.get_iam_policy._session + assert session1 != session2 + session1 = client1.transport.set_iam_policy._session + session2 = client2.transport.set_iam_policy._session + assert session1 != session2 + session1 = client1.transport.test_iam_permissions._session + session2 = client2.transport.test_iam_permissions._session + assert session1 != session2 + session1 = client1.transport.list_hot_tablets._session + session2 = client2.transport.list_hot_tablets._session + assert session1 != session2 def test_bigtable_instance_admin_grpc_transport_channel(): @@ -6986,6 +13057,7 @@ async def test_transport_close_async(): def test_transport_close(): transports = { + "rest": "_session", "grpc": "_grpc_channel", } @@ -7003,6 +13075,7 @@ def test_transport_close(): def test_client_ctx(): transports = [ + "rest", "grpc", ] for transport in transports: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 5ba93c6ffa85..8e4004ab169a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -24,10 +24,17 @@ import grpc from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format from google.api_core import client_options from google.api_core import exceptions as core_exceptions @@ -116,6 +123,7 @@ def test__get_default_mtls_endpoint(): [ (BigtableTableAdminClient, "grpc"), (BigtableTableAdminAsyncClient, "grpc_asyncio"), + (BigtableTableAdminClient, "rest"), ], ) def test_bigtable_table_admin_client_from_service_account_info( @@ -131,7 +139,11 @@ def test_bigtable_table_admin_client_from_service_account_info( assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == ("bigtableadmin.googleapis.com:443") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com" + ) @pytest.mark.parametrize( @@ -139,6 +151,7 @@ def test_bigtable_table_admin_client_from_service_account_info( [ (transports.BigtableTableAdminGrpcTransport, "grpc"), (transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.BigtableTableAdminRestTransport, "rest"), ], ) def test_bigtable_table_admin_client_service_account_always_use_jwt( @@ -164,6 +177,7 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt( [ (BigtableTableAdminClient, "grpc"), (BigtableTableAdminAsyncClient, "grpc_asyncio"), + (BigtableTableAdminClient, "rest"), ], ) def test_bigtable_table_admin_client_from_service_account_file( @@ -186,13 +200,18 @@ def test_bigtable_table_admin_client_from_service_account_file( assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == ("bigtableadmin.googleapis.com:443") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com" + ) def test_bigtable_table_admin_client_get_transport_class(): transport = BigtableTableAdminClient.get_transport_class() available_transports = [ transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminRestTransport, ] assert transport in available_transports @@ -209,6 +228,7 @@ def test_bigtable_table_admin_client_get_transport_class(): transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", ), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), ], ) @mock.patch.object( @@ -364,6 +384,18 @@ def test_bigtable_table_admin_client_client_options( "grpc_asyncio", "false", ), + ( + BigtableTableAdminClient, + transports.BigtableTableAdminRestTransport, + "rest", + "true", + ), + ( + BigtableTableAdminClient, + transports.BigtableTableAdminRestTransport, + "rest", + "false", + ), ], ) @mock.patch.object( @@ -563,6 +595,7 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", ), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), ], ) def test_bigtable_table_admin_client_client_options_scopes( @@ -603,6 +636,12 @@ def test_bigtable_table_admin_client_client_options_scopes( "grpc_asyncio", grpc_helpers_async, ), + ( + BigtableTableAdminClient, + transports.BigtableTableAdminRestTransport, + "rest", + None, + ), ], ) def test_bigtable_table_admin_client_client_options_credentials_file( @@ -6947,150 +6986,6999 @@ async def test_test_iam_permissions_flattened_error_async(): ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableRequest, + dict, + ], +) +def test_create_table_rest(request_type): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == "name_value" + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_create_table_rest_required_fields( + request_type=bigtable_table_admin.CreateTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, ) + ) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, - transport=transport, - ) + request = request_type(**request_init) - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + "table", + ) ) + ) - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableRequest.pb( + bigtable_table_admin.CreateTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gba_table.Table.to_json(gba_table.Table()) + + request = bigtable_table_admin.CreateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gba_table.Table() + + client.create_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + +def test_create_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.CreateTableRequest +): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - client = BigtableTableAdminClient(transport=transport) - assert client.transport is transport + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_table(request) + + +def test_create_table_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, + args[1], + ) + + +def test_create_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - channel = transport.grpc_channel - assert channel + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + +def test_create_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) @pytest.mark.parametrize( - "transport_name", + "request_type", [ - "grpc", + bigtable_table_admin.CreateTableFromSnapshotRequest, + dict, ], ) -def test_transport_kind(transport_name): - transport = BigtableTableAdminClient.get_transport_class(transport_name)( +def test_create_table_from_snapshot_rest(request_type): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - assert transport.kind == transport_name + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_table_from_snapshot(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_table_from_snapshot_rest_required_fields( + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request_init["source_snapshot"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + jsonified_request["sourceSnapshot"] = "source_snapshot_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + assert "sourceSnapshot" in jsonified_request + assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_table_from_snapshot(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_table_from_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + "sourceSnapshot", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_from_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( + bigtable_table_admin.CreateTableFromSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_table_from_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_table_from_snapshot_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - assert isinstance( - client.transport, - transports.BigtableTableAdminGrpcTransport, + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_table_from_snapshot(request) + + +def test_create_table_from_snapshot_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -def test_bigtable_table_admin_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableTableAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json", + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", ) + mock_args.update(sample_request) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value -def test_bigtable_table_admin_base_transport(): - # Instantiate the base transport. - with mock.patch( - "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport.__init__" - ) as Transport: - Transport.return_value = None - transport = transports.BigtableTableAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), + client.create_table_from_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" + % client.transport._host, + args[1], ) - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - "create_table", - "create_table_from_snapshot", - "list_tables", - "get_table", - "update_table", - "delete_table", - "undelete_table", + +def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +def test_create_table_from_snapshot_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListTablesRequest, + dict, + ], +) +def test_list_tables_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_tables(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_tables_rest_required_fields( + request_type=bigtable_table_admin.ListTablesRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tables._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tables._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_tables(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_tables_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_tables._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tables_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListTablesRequest.pb( + bigtable_table_admin.ListTablesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.ListTablesResponse.to_json( + bigtable_table_admin.ListTablesResponse() + ) + + request = bigtable_table_admin.ListTablesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListTablesResponse() + + client.list_tables( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_tables_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.ListTablesRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_tables(request) + + +def test_list_tables_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_tables(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, + args[1], + ) + + +def test_list_tables_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tables( + bigtable_table_admin.ListTablesRequest(), + parent="parent_value", + ) + + +def test_list_tables_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token="def", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListTablesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_tables(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Table) for i in results) + + pages = list(client.list_tables(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetTableRequest, + dict, + ], +) +def test_get_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_get_table_rest_required_fields( + request_type=bigtable_table_admin.GetTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetTableRequest.pb( + bigtable_table_admin.GetTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Table.to_json(table.Table()) + + request = bigtable_table_admin.GetTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + + client.get_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.GetTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_table(request) + + +def test_get_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + args[1], + ) + + +def test_get_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table( + bigtable_table_admin.GetTableRequest(), + name="name_value", + ) + + +def test_get_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateTableRequest, + dict, + ], +) +def test_update_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request_init["table"] = { + "name": "projects/sample1/instances/sample2/tables/sample3", + "cluster_states": {}, + "column_families": {}, + "granularity": 1, + "restore_info": { + "source_type": 1, + "backup_info": { + "backup": "backup_value", + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + "source_table": "source_table_value", + }, + }, + "deletion_protection": True, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_update_table_rest_required_fields( + request_type=bigtable_table_admin.UpdateTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "table", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateTableRequest.pb( + bigtable_table_admin.UpdateTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.UpdateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.UpdateTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request_init["table"] = { + "name": "projects/sample1/instances/sample2/tables/sample3", + "cluster_states": {}, + "column_families": {}, + "granularity": 1, + "restore_info": { + "source_type": 1, + "backup_info": { + "backup": "backup_value", + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + "source_table": "source_table_value", + }, + }, + "deletion_protection": True, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_table(request) + + +def test_update_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table.name=projects/*/instances/*/tables/*}" + % client.transport._host, + args[1], + ) + + +def test_update_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteTableRequest, + dict, + ], +) +def test_delete_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_table(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_table_rest_required_fields( + request_type=bigtable_table_admin.DeleteTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_table" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteTableRequest.pb( + bigtable_table_admin.DeleteTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DeleteTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.DeleteTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_table(request) + + +def test_delete_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + args[1], + ) + + +def test_delete_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), + name="name_value", + ) + + +def test_delete_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UndeleteTableRequest, + dict, + ], +) +def test_undelete_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.undelete_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_undelete_table_rest_required_fields( + request_type=bigtable_table_admin.UndeleteTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).undelete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).undelete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.undelete_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_undelete_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.undelete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_undelete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_undelete_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UndeleteTableRequest.pb( + bigtable_table_admin.UndeleteTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.UndeleteTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.undelete_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_undelete_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.UndeleteTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.undelete_table(request) + + +def test_undelete_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.undelete_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" + % client.transport._host, + args[1], + ) + + +def test_undelete_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name="name_value", + ) + + +def test_undelete_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ModifyColumnFamiliesRequest, + dict, + ], +) +def test_modify_column_families_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.modify_column_families(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_modify_column_families_rest_required_fields( + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).modify_column_families._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).modify_column_families._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.modify_column_families(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_modify_column_families_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.modify_column_families._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "modifications", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_modify_column_families_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( + bigtable_table_admin.ModifyColumnFamiliesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Table.to_json(table.Table()) + + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + + client.modify_column_families( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_modify_column_families_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.modify_column_families(request) + + +def test_modify_column_families_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.modify_column_families(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" + % client.transport._host, + args[1], + ) + + +def test_modify_column_families_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + +def test_modify_column_families_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DropRowRangeRequest, + dict, + ], +) +def test_drop_row_range_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.drop_row_range(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_drop_row_range_rest_required_fields( + request_type=bigtable_table_admin.DropRowRangeRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.drop_row_range(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_drop_row_range_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.drop_row_range._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_drop_row_range_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DropRowRangeRequest.pb( + bigtable_table_admin.DropRowRangeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DropRowRangeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.drop_row_range( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_drop_row_range_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.DropRowRangeRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.drop_row_range(request) + + +def test_drop_row_range_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, + ], +) +def test_generate_consistency_token_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.generate_consistency_token(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" + + +def test_generate_consistency_token_rest_required_fields( + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_consistency_token._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_consistency_token._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.generate_consistency_token(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_consistency_token_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_consistency_token_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + bigtable_table_admin.GenerateConsistencyTokenRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) + ) + + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + client.generate_consistency_token( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_consistency_token_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_consistency_token(request) + + +def test_generate_consistency_token_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.generate_consistency_token(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" + % client.transport._host, + args[1], + ) + + +def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", + ) + + +def test_generate_consistency_token_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CheckConsistencyRequest, + dict, + ], +) +def test_check_consistency_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.check_consistency(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True + + +def test_check_consistency_rest_required_fields( + request_type=bigtable_table_admin.CheckConsistencyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["consistency_token"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_consistency._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["consistencyToken"] = "consistency_token_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_consistency._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "consistencyToken" in jsonified_request + assert jsonified_request["consistencyToken"] == "consistency_token_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.check_consistency(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_check_consistency_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.check_consistency._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "consistencyToken", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_consistency_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_check_consistency" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( + bigtable_table_admin.CheckConsistencyRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + bigtable_table_admin.CheckConsistencyResponse.to_json( + bigtable_table_admin.CheckConsistencyResponse() + ) + ) + + request = bigtable_table_admin.CheckConsistencyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.CheckConsistencyResponse() + + client.check_consistency( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_check_consistency_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.CheckConsistencyRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.check_consistency(request) + + +def test_check_consistency_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + consistency_token="consistency_token_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.check_consistency(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + % client.transport._host, + args[1], + ) + + +def test_check_consistency_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +def test_check_consistency_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.SnapshotTableRequest, + dict, + ], +) +def test_snapshot_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.snapshot_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_snapshot_table_rest_required_fields( + request_type=bigtable_table_admin.SnapshotTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["cluster"] = "" + request_init["snapshot_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).snapshot_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["cluster"] = "cluster_value" + jsonified_request["snapshotId"] = "snapshot_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).snapshot_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "cluster" in jsonified_request + assert jsonified_request["cluster"] == "cluster_value" + assert "snapshotId" in jsonified_request + assert jsonified_request["snapshotId"] == "snapshot_id_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.snapshot_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_snapshot_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.snapshot_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "cluster", + "snapshotId", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_snapshot_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.SnapshotTableRequest.pb( + bigtable_table_admin.SnapshotTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.SnapshotTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.snapshot_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_snapshot_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.SnapshotTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.snapshot_table(request) + + +def test_snapshot_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.snapshot_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" + % client.transport._host, + args[1], + ) + + +def test_snapshot_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +def test_snapshot_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetSnapshotRequest, + dict, + ], +) +def test_get_snapshot_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_snapshot(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + assert response.name == "name_value" + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" + + +def test_get_snapshot_rest_required_fields( + request_type=bigtable_table_admin.GetSnapshotRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Snapshot() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_snapshot(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetSnapshotRequest.pb( + bigtable_table_admin.GetSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Snapshot.to_json(table.Snapshot()) + + request = bigtable_table_admin.GetSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Snapshot() + + client.get_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_snapshot_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.GetSnapshotRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_snapshot(request) + + +def test_get_snapshot_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Snapshot() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + % client.transport._host, + args[1], + ) + + +def test_get_snapshot_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), + name="name_value", + ) + + +def test_get_snapshot_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListSnapshotsRequest, + dict, + ], +) +def test_list_snapshots_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_snapshots(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_snapshots_rest_required_fields( + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_snapshots._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_snapshots._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_snapshots(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_snapshots_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_snapshots._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_snapshots_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( + bigtable_table_admin.ListSnapshotsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.ListSnapshotsResponse.to_json( + bigtable_table_admin.ListSnapshotsResponse() + ) + + request = bigtable_table_admin.ListSnapshotsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListSnapshotsResponse() + + client.list_snapshots( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_snapshots_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.ListSnapshotsRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_snapshots(request) + + +def test_list_snapshots_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_snapshots(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" + % client.transport._host, + args[1], + ) + + +def test_list_snapshots_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), + parent="parent_value", + ) + + +def test_list_snapshots_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_snapshots(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) for i in results) + + pages = list(client.list_snapshots(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteSnapshotRequest, + dict, + ], +) +def test_delete_snapshot_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_snapshot(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_snapshot_rest_required_fields( + request_type=bigtable_table_admin.DeleteSnapshotRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_snapshot(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( + bigtable_table_admin.DeleteSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DeleteSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_snapshot_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.DeleteSnapshotRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_snapshot(request) + + +def test_delete_snapshot_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", + ) + + +def test_delete_snapshot_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateBackupRequest, + dict, + ], +) +def test_create_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init["backup"] = { + "name": "name_value", + "source_table": "source_table_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_backup(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_backup_rest_required_fields( + request_type=bigtable_table_admin.CreateBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + assert "backupId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == request_init["backup_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("backup_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_backup(request) + + expected_params = [ + ( + "backupId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("backupId",)) + & set( + ( + "parent", + "backupId", + "backup", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateBackupRequest.pb( + bigtable_table_admin.CreateBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.CreateBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_backup_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.CreateBackupRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init["backup"] = { + "name": "name_value", + "source_table": "source_table_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_backup(request) + + +def test_create_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + % client.transport._host, + args[1], + ) + + +def test_create_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + + +def test_create_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetBackupRequest, + dict, + ], +) +def test_get_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +def test_get_backup_rest_required_fields( + request_type=bigtable_table_admin.GetBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetBackupRequest.pb( + bigtable_table_admin.GetBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Backup.to_json(table.Backup()) + + request = bigtable_table_admin.GetBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Backup() + + client.get_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_backup_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.GetBackupRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_backup(request) + + +def test_get_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_get_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name="name_value", + ) + + +def test_get_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateBackupRequest, + dict, + ], +) +def test_update_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + request_init["backup"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", + "source_table": "source_table_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup( + name="name_value", + source_table="source_table_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +def test_update_backup_rest_required_fields( + request_type=bigtable_table_admin.UpdateBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "backup", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateBackupRequest.pb( + bigtable_table_admin.UpdateBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Backup.to_json(table.Backup()) + + request = bigtable_table_admin.UpdateBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Backup() + + client.update_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_backup_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.UpdateBackupRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + request_init["backup"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", + "source_table": "source_table_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_backup(request) + + +def test_update_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() + + # get arguments that satisfy an http rule for this method + sample_request = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + + # get truthy value for each flattened field + mock_args = dict( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_update_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteBackupRequest, + dict, + ], +) +def test_delete_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_backup(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_backup_rest_required_fields( + request_type=bigtable_table_admin.DeleteBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteBackupRequest.pb( + bigtable_table_admin.DeleteBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DeleteBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_delete_backup_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.DeleteBackupRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_backup(request) + + +def test_delete_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", + ) + + +def test_delete_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListBackupsRequest, + dict, + ], +) +def test_list_backups_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_backups(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_backups_rest_required_fields( + request_type=bigtable_table_admin.ListBackupsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_backups._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_backups._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_backups(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_backups_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_backups._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_backups_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_backups" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_backups" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListBackupsRequest.pb( + bigtable_table_admin.ListBackupsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.ListBackupsResponse.to_json( + bigtable_table_admin.ListBackupsResponse() + ) + + request = bigtable_table_admin.ListBackupsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListBackupsResponse() + + client.list_backups( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_backups_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.ListBackupsRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_backups(request) + + +def test_list_backups_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_backups(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + % client.transport._host, + args[1], + ) + + +def test_list_backups_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", + ) + + +def test_list_backups_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_backups(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) + + pages = list(client.list_backups(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.RestoreTableRequest, + dict, + ], +) +def test_restore_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.restore_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_restore_table_rest_required_fields( + request_type=bigtable_table_admin.RestoreTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.restore_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_restore_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.restore_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_restore_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_restore_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_restore_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.RestoreTableRequest.pb( + bigtable_table_admin.RestoreTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.RestoreTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.restore_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_restore_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.RestoreTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.restore_table(request) + + +def test_restore_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.GetIamPolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.get_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_get_iam_policy_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "policy", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_set_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.SetIamPolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.set_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_set_iam_policy_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_rest_required_fields( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + jsonified_request["permissions"] = "permissions_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == "permissions_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "permissions", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_test_iam_permissions_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + request = iam_policy_pb2.TestIamPermissionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + permissions=["permissions_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = return_value + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" + % client.transport._host, + args[1], + ) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_test_iam_permissions_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableTableAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + transports.BigtableTableAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = BigtableTableAdminClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.BigtableTableAdminGrpcTransport, + ) + + +def test_bigtable_table_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTableAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_table_admin_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableTableAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_table", + "create_table_from_snapshot", + "list_tables", + "get_table", + "update_table", + "delete_table", + "undelete_table", "modify_column_families", "drop_row_range", "generate_consistency_token", @@ -7220,6 +14108,7 @@ def test_bigtable_table_admin_transport_auth_adc(transport_class): [ transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminGrpcAsyncIOTransport, + transports.BigtableTableAdminRestTransport, ], ) def test_bigtable_table_admin_transport_auth_gdch_credentials(transport_class): @@ -7326,11 +14215,40 @@ def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( ) +def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.BigtableTableAdminRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_bigtable_table_admin_rest_lro_client(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", + "rest", ], ) def test_bigtable_table_admin_host_no_port(transport_name): @@ -7341,7 +14259,11 @@ def test_bigtable_table_admin_host_no_port(transport_name): ), transport=transport_name, ) - assert client.transport._host == ("bigtableadmin.googleapis.com:443") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com" + ) @pytest.mark.parametrize( @@ -7349,6 +14271,7 @@ def test_bigtable_table_admin_host_no_port(transport_name): [ "grpc", "grpc_asyncio", + "rest", ], ) def test_bigtable_table_admin_host_with_port(transport_name): @@ -7359,7 +14282,102 @@ def test_bigtable_table_admin_host_with_port(transport_name): ), transport=transport_name, ) - assert client.transport._host == ("bigtableadmin.googleapis.com:8000") + assert client.transport._host == ( + "bigtableadmin.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtableadmin.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_bigtable_table_admin_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = BigtableTableAdminClient( + credentials=creds1, + transport=transport_name, + ) + client2 = BigtableTableAdminClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_table._session + session2 = client2.transport.create_table._session + assert session1 != session2 + session1 = client1.transport.create_table_from_snapshot._session + session2 = client2.transport.create_table_from_snapshot._session + assert session1 != session2 + session1 = client1.transport.list_tables._session + session2 = client2.transport.list_tables._session + assert session1 != session2 + session1 = client1.transport.get_table._session + session2 = client2.transport.get_table._session + assert session1 != session2 + session1 = client1.transport.update_table._session + session2 = client2.transport.update_table._session + assert session1 != session2 + session1 = client1.transport.delete_table._session + session2 = client2.transport.delete_table._session + assert session1 != session2 + session1 = client1.transport.undelete_table._session + session2 = client2.transport.undelete_table._session + assert session1 != session2 + session1 = client1.transport.modify_column_families._session + session2 = client2.transport.modify_column_families._session + assert session1 != session2 + session1 = client1.transport.drop_row_range._session + session2 = client2.transport.drop_row_range._session + assert session1 != session2 + session1 = client1.transport.generate_consistency_token._session + session2 = client2.transport.generate_consistency_token._session + assert session1 != session2 + session1 = client1.transport.check_consistency._session + session2 = client2.transport.check_consistency._session + assert session1 != session2 + session1 = client1.transport.snapshot_table._session + session2 = client2.transport.snapshot_table._session + assert session1 != session2 + session1 = client1.transport.get_snapshot._session + session2 = client2.transport.get_snapshot._session + assert session1 != session2 + session1 = client1.transport.list_snapshots._session + session2 = client2.transport.list_snapshots._session + assert session1 != session2 + session1 = client1.transport.delete_snapshot._session + session2 = client2.transport.delete_snapshot._session + assert session1 != session2 + session1 = client1.transport.create_backup._session + session2 = client2.transport.create_backup._session + assert session1 != session2 + session1 = client1.transport.get_backup._session + session2 = client2.transport.get_backup._session + assert session1 != session2 + session1 = client1.transport.update_backup._session + session2 = client2.transport.update_backup._session + assert session1 != session2 + session1 = client1.transport.delete_backup._session + session2 = client2.transport.delete_backup._session + assert session1 != session2 + session1 = client1.transport.list_backups._session + session2 = client2.transport.list_backups._session + assert session1 != session2 + session1 = client1.transport.restore_table._session + session2 = client2.transport.restore_table._session + assert session1 != session2 + session1 = client1.transport.get_iam_policy._session + session2 = client2.transport.get_iam_policy._session + assert session1 != session2 + session1 = client1.transport.set_iam_policy._session + session2 = client2.transport.set_iam_policy._session + assert session1 != session2 + session1 = client1.transport.test_iam_permissions._session + session2 = client2.transport.test_iam_permissions._session + assert session1 != session2 def test_bigtable_table_admin_grpc_transport_channel(): @@ -7833,6 +14851,7 @@ async def test_transport_close_async(): def test_transport_close(): transports = { + "rest": "_session", "grpc": "_grpc_channel", } @@ -7850,6 +14869,7 @@ def test_transport_close(): def test_client_ctx(): transports = [ + "rest", "grpc", ] for transport in transports: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index e52fc1aba820..03ba3044f92b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -24,10 +24,17 @@ import grpc from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json import math import pytest from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format from google.api_core import client_options from google.api_core import exceptions as core_exceptions @@ -44,6 +51,8 @@ from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore import google.auth @@ -91,6 +100,7 @@ def test__get_default_mtls_endpoint(): [ (BigtableClient, "grpc"), (BigtableAsyncClient, "grpc_asyncio"), + (BigtableClient, "rest"), ], ) def test_bigtable_client_from_service_account_info(client_class, transport_name): @@ -104,7 +114,11 @@ def test_bigtable_client_from_service_account_info(client_class, transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == ("bigtable.googleapis.com:443") + assert client.transport._host == ( + "bigtable.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtable.googleapis.com" + ) @pytest.mark.parametrize( @@ -112,6 +126,7 @@ def test_bigtable_client_from_service_account_info(client_class, transport_name) [ (transports.BigtableGrpcTransport, "grpc"), (transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.BigtableRestTransport, "rest"), ], ) def test_bigtable_client_service_account_always_use_jwt( @@ -137,6 +152,7 @@ def test_bigtable_client_service_account_always_use_jwt( [ (BigtableClient, "grpc"), (BigtableAsyncClient, "grpc_asyncio"), + (BigtableClient, "rest"), ], ) def test_bigtable_client_from_service_account_file(client_class, transport_name): @@ -157,13 +173,18 @@ def test_bigtable_client_from_service_account_file(client_class, transport_name) assert client.transport._credentials == creds assert isinstance(client, client_class) - assert client.transport._host == ("bigtable.googleapis.com:443") + assert client.transport._host == ( + "bigtable.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtable.googleapis.com" + ) def test_bigtable_client_get_transport_class(): transport = BigtableClient.get_transport_class() available_transports = [ transports.BigtableGrpcTransport, + transports.BigtableRestTransport, ] assert transport in available_transports @@ -176,6 +197,7 @@ def test_bigtable_client_get_transport_class(): [ (BigtableClient, transports.BigtableGrpcTransport, "grpc"), (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableClient, transports.BigtableRestTransport, "rest"), ], ) @mock.patch.object( @@ -317,6 +339,8 @@ def test_bigtable_client_client_options(client_class, transport_class, transport "grpc_asyncio", "false", ), + (BigtableClient, transports.BigtableRestTransport, "rest", "true"), + (BigtableClient, transports.BigtableRestTransport, "rest", "false"), ], ) @mock.patch.object( @@ -506,6 +530,7 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): [ (BigtableClient, transports.BigtableGrpcTransport, "grpc"), (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableClient, transports.BigtableRestTransport, "rest"), ], ) def test_bigtable_client_client_options_scopes( @@ -541,6 +566,7 @@ def test_bigtable_client_client_options_scopes( "grpc_asyncio", grpc_helpers_async, ), + (BigtableClient, transports.BigtableRestTransport, "rest", None), ], ) def test_bigtable_client_client_options_credentials_file( @@ -2434,222 +2460,3348 @@ async def test_read_modify_write_row_flattened_error_async(): ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableGrpcTransport( +@pytest.mark.parametrize( + "request_type", + [ + bigtable.GenerateInitialChangeStreamPartitionsRequest, + dict, + ], +) +def test_generate_initial_change_stream_partitions( + request_type, transport: str = "grpc" +): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - with pytest.raises(ValueError): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, - transport=transport, + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [bigtable.GenerateInitialChangeStreamPartitionsResponse()] ) + response = client.generate_initial_change_stream_partitions(request) - # It is an error to provide an api_key and a credential. - options = mock.Mock() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, + # Establish that the response is the type that we expect. + for message in response: + assert isinstance( + message, bigtable.GenerateInitialChangeStreamPartitionsResponse ) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( +def test_generate_initial_change_stream_partitions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - client = BigtableClient(transport=transport) - assert client.transport is transport + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + client.generate_initial_change_stream_partitions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableGrpcAsyncIOTransport( +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_async( + transport: str = "grpc_asyncio", + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +): + client = BigtableAsyncClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - channel = transport.grpc_channel - assert channel + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableGrpcTransport, - transports.BigtableGrpcAsyncIOTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + response = await client.generate_initial_change_stream_partitions(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - ], -) -def test_transport_kind(transport_name): - transport = BigtableClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert transport.kind == transport_name + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.GenerateInitialChangeStreamPartitionsResponse) -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_async_from_dict(): + await test_generate_initial_change_stream_partitions_async(request_type=dict) + + +def test_generate_initial_change_stream_partitions_field_headers(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), ) - assert isinstance( - client.transport, - transports.BigtableGrpcTransport, - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() -def test_bigtable_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json", + request.table_name = "table_name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + call.return_value = iter( + [bigtable.GenerateInitialChangeStreamPartitionsResponse()] ) + client.generate_initial_change_stream_partitions(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_bigtable_base_transport(): - # Instantiate the base transport. - with mock.patch( - "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport.__init__" - ) as Transport: - Transport.return_value = None - transport = transports.BigtableTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "table_name=table_name_value", + ) in kw["metadata"] - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - "read_rows", - "sample_row_keys", - "mutate_row", - "mutate_rows", - "check_and_mutate_row", - "ping_and_warm", - "read_modify_write_row", - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - with pytest.raises(NotImplementedError): - transport.close() +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_field_headers_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) - # Catch all for all remaining methods and properties - remainder = [ - "kind", - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + request.table_name = "table_name_value" -def test_bigtable_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file + # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch( - "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with( - "credentials.json", - scopes=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id="octopus", + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()] ) + await client.generate_initial_change_stream_partitions(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_bigtable_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( - "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" - ) as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.BigtableTransport() - adc.assert_called_once() + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "table_name=table_name_value", + ) in kw["metadata"] -def test_bigtable_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BigtableClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - "https://www.googleapis.com/auth/bigtable.data", - "https://www.googleapis.com/auth/bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-bigtable.data", - "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - ), - quota_project_id=None, +def test_generate_initial_change_stream_partitions_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_initial_change_stream_partitions( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +def test_generate_initial_change_stream_partitions_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter( + [bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_initial_change_stream_partitions( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadChangeStreamRequest, + dict, + ], +) +def test_read_change_stream(request_type, transport: str = "grpc"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + response = client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.ReadChangeStreamResponse) + + +def test_read_change_stream_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + client.read_change_stream() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest() + + +@pytest.mark.asyncio +async def test_read_change_stream_async( + transport: str = "grpc_asyncio", request_type=bigtable.ReadChangeStreamRequest +): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadChangeStreamResponse()] + ) + response = await client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.ReadChangeStreamResponse) + + +@pytest.mark.asyncio +async def test_read_change_stream_async_from_dict(): + await test_read_change_stream_async(request_type=dict) + + +def test_read_change_stream_field_headers(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadChangeStreamRequest() + + request.table_name = "table_name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "table_name=table_name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_read_change_stream_field_headers_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadChangeStreamRequest() + + request.table_name = "table_name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadChangeStreamResponse()] + ) + await client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "table_name=table_name_value", + ) in kw["metadata"] + + +def test_read_change_stream_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_change_stream( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +def test_read_change_stream_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_change_stream( + bigtable.ReadChangeStreamRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_read_change_stream_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_change_stream( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = "table_name_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_read_change_stream_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_change_stream( + bigtable.ReadChangeStreamRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadRowsRequest, + dict, + ], +) +def test_read_rows_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse( + last_scanned_row_key=b"last_scanned_row_key_blob", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_rows(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadRowsResponse) + assert response.last_scanned_row_key == b"last_scanned_row_key_blob" + + +def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_rows(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_read_rows_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.read_rows._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_rows_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_rows" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_rows" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.ReadRowsResponse.to_json( + bigtable.ReadRowsResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.ReadRowsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadRowsResponse() + + client.read_rows( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_rows_rest_bad_request( + transport: str = "rest", request_type=bigtable.ReadRowsRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.read_rows(request) + + +def test_read_rows_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.read_rows(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readRows" + % client.transport._host, + args[1], + ) + + +def test_read_rows_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_rows( + bigtable.ReadRowsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_rows_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.SampleRowKeysRequest, + dict, + ], +) +def test_sample_row_keys_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse( + row_key=b"row_key_blob", + offset_bytes=1293, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.sample_row_keys(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.SampleRowKeysResponse) + assert response.row_key == b"row_key_blob" + assert response.offset_bytes == 1293 + + +def test_sample_row_keys_rest_required_fields( + request_type=bigtable.SampleRowKeysRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).sample_row_keys._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).sample_row_keys._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("app_profile_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.sample_row_keys(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_sample_row_keys_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.sample_row_keys._get_unset_required_fields({}) + assert set(unset_fields) == (set(("appProfileId",)) & set(("tableName",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_sample_row_keys_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_sample_row_keys" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_sample_row_keys" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.SampleRowKeysResponse.to_json( + bigtable.SampleRowKeysResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.SampleRowKeysRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.SampleRowKeysResponse() + + client.sample_row_keys( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_sample_row_keys_rest_bad_request( + transport: str = "rest", request_type=bigtable.SampleRowKeysRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.sample_row_keys(request) + + +def test_sample_row_keys_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.sample_row_keys(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" + % client.transport._host, + args[1], + ) + + +def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_sample_row_keys_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowRequest, + dict, + ], +) +def test_mutate_row_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.mutate_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request_init["row_key"] = b"" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + jsonified_request["rowKey"] = b"row_key_blob" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.mutate_row(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_mutate_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "tableName", + "rowKey", + "mutations", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_mutate_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.MutateRowResponse.to_json( + bigtable.MutateRowResponse() + ) + + request = bigtable.MutateRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.MutateRowResponse() + + client.mutate_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_mutate_row_rest_bad_request( + transport: str = "rest", request_type=bigtable.MutateRowRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.mutate_row(request) + + +def test_mutate_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.mutate_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" + % client.transport._host, + args[1], + ) + + +def test_mutate_row_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_row( + bigtable.MutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_row_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowsRequest, + dict, + ], +) +def test_mutate_rows_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.mutate_rows(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowsResponse) + + +def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.mutate_rows(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_mutate_rows_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.mutate_rows._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "tableName", + "entries", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_mutate_rows_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_rows" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_mutate_rows" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.MutateRowsResponse.to_json( + bigtable.MutateRowsResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.MutateRowsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.MutateRowsResponse() + + client.mutate_rows( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_mutate_rows_rest_bad_request( + transport: str = "rest", request_type=bigtable.MutateRowsRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.mutate_rows(request) + + +def test_mutate_rows_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.mutate_rows(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" + % client.transport._host, + args[1], + ) + + +def test_mutate_rows_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name="table_name_value", + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_rows_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.CheckAndMutateRowRequest, + dict, + ], +) +def test_check_and_mutate_row_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.check_and_mutate_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + assert response.predicate_matched is True + + +def test_check_and_mutate_row_rest_required_fields( + request_type=bigtable.CheckAndMutateRowRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request_init["row_key"] = b"" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_and_mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + jsonified_request["rowKey"] = b"row_key_blob" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).check_and_mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.check_and_mutate_row(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_check_and_mutate_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "tableName", + "rowKey", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_and_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_check_and_mutate_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_check_and_mutate_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.CheckAndMutateRowRequest.pb( + bigtable.CheckAndMutateRowRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.CheckAndMutateRowResponse.to_json( + bigtable.CheckAndMutateRowResponse() + ) + + request = bigtable.CheckAndMutateRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.CheckAndMutateRowResponse() + + client.check_and_mutate_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_check_and_mutate_row_rest_bad_request( + transport: str = "rest", request_type=bigtable.CheckAndMutateRowRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.check_and_mutate_row(request) + + +def test_check_and_mutate_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.check_and_mutate_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" + % client.transport._host, + args[1], + ) + + +def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + app_profile_id="app_profile_id_value", + ) + + +def test_check_and_mutate_row_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.PingAndWarmRequest, + dict, + ], +) +def test_ping_and_warm_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.ping_and_warm(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) + + +def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).ping_and_warm._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).ping_and_warm._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.ping_and_warm(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_ping_and_warm_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.ping_and_warm._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_ping_and_warm_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_ping_and_warm" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_ping_and_warm" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.PingAndWarmResponse.to_json( + bigtable.PingAndWarmResponse() + ) + + request = bigtable.PingAndWarmRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.PingAndWarmResponse() + + client.ping_and_warm( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_ping_and_warm_rest_bad_request( + transport: str = "rest", request_type=bigtable.PingAndWarmRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.ping_and_warm(request) + + +def test_ping_and_warm_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.ping_and_warm(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*}:ping" % client.transport._host, args[1] + ) + + +def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name="name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_ping_and_warm_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadModifyWriteRowRequest, + dict, + ], +) +def test_read_modify_write_row_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.read_modify_write_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +def test_read_modify_write_row_rest_required_fields( + request_type=bigtable.ReadModifyWriteRowRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request_init["row_key"] = b"" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_modify_write_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + jsonified_request["rowKey"] = b"row_key_blob" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_modify_write_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.read_modify_write_row(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_read_modify_write_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "tableName", + "rowKey", + "rules", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_modify_write_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_modify_write_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_modify_write_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadModifyWriteRowRequest.pb( + bigtable.ReadModifyWriteRowRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.ReadModifyWriteRowResponse.to_json( + bigtable.ReadModifyWriteRowResponse() + ) + + request = bigtable.ReadModifyWriteRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadModifyWriteRowResponse() + + client.read_modify_write_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_modify_write_row_rest_bad_request( + transport: str = "rest", request_type=bigtable.ReadModifyWriteRowRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.read_modify_write_row(request) + + +def test_read_modify_write_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.read_modify_write_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" + % client.transport._host, + args[1], + ) + + +def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], + app_profile_id="app_profile_id_value", + ) + + +def test_read_modify_write_row_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.GenerateInitialChangeStreamPartitionsRequest, + dict, + ], +) +def test_generate_initial_change_stream_partitions_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.generate_initial_change_stream_partitions(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) + + +def test_generate_initial_change_stream_partitions_rest_required_fields( + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_initial_change_stream_partitions._get_unset_required_fields( + jsonified_request + ) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).generate_initial_change_stream_partitions._get_unset_required_fields( + jsonified_request + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.generate_initial_change_stream_partitions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.generate_initial_change_stream_partitions._get_unset_required_fields( + {} + ) + ) + assert set(unset_fields) == (set(()) & set(("tableName",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, + "post_generate_initial_change_stream_partitions", + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, + "pre_generate_initial_change_stream_partitions", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( + bigtable.GenerateInitialChangeStreamPartitionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( + bigtable.GenerateInitialChangeStreamPartitionsResponse() + ) + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + client.generate_initial_change_stream_partitions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_initial_change_stream_partitions_rest_bad_request( + transport: str = "rest", + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_initial_change_stream_partitions(request) + + +def test_generate_initial_change_stream_partitions_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.generate_initial_change_stream_partitions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" + % client.transport._host, + args[1], + ) + + +def test_generate_initial_change_stream_partitions_rest_flattened_error( + transport: str = "rest", +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_generate_initial_change_stream_partitions_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadChangeStreamRequest, + dict, + ], +) +def test_read_change_stream_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse( + data_change=bigtable.ReadChangeStreamResponse.DataChange( + type_=bigtable.ReadChangeStreamResponse.DataChange.Type.USER + ), + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_change_stream(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadChangeStreamResponse) + + +def test_read_change_stream_rest_required_fields( + request_type=bigtable.ReadChangeStreamRequest, +): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_change_stream._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = "table_name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).read_change_stream._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_change_stream(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_read_change_stream_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.read_change_stream._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_change_stream_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_change_stream" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_change_stream" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadChangeStreamRequest.pb( + bigtable.ReadChangeStreamRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.ReadChangeStreamResponse.to_json( + bigtable.ReadChangeStreamResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.ReadChangeStreamRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadChangeStreamResponse() + + client.read_change_stream( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_change_stream_rest_bad_request( + transport: str = "rest", request_type=bigtable.ReadChangeStreamRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.read_change_stream(request) + + +def test_read_change_stream_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.read_change_stream(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" + % client.transport._host, + args[1], + ) + + +def test_read_change_stream_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_change_stream( + bigtable.ReadChangeStreamRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_change_stream_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + transports.BigtableRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = BigtableClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.BigtableGrpcTransport, + ) + + +def test_bigtable_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_bigtable_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.BigtableTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "read_rows", + "sample_row_keys", + "mutate_row", + "mutate_rows", + "check_and_mutate_row", + "ping_and_warm", + "read_modify_write_row", + "generate_initial_change_stream_partitions", + "read_change_stream", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_bigtable_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id="octopus", + ) + + +def test_bigtable_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport() + adc.assert_called_once() + + +def test_bigtable_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BigtableClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + "https://www.googleapis.com/auth/bigtable.data", + "https://www.googleapis.com/auth/bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-bigtable.data", + "https://www.googleapis.com/auth/cloud-bigtable.data.readonly", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + ), + quota_project_id=None, ) @@ -2685,6 +5837,7 @@ def test_bigtable_transport_auth_adc(transport_class): [ transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport, + transports.BigtableRestTransport, ], ) def test_bigtable_transport_auth_gdch_credentials(transport_class): @@ -2786,11 +5939,23 @@ def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): ) +def test_bigtable_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.BigtableRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + @pytest.mark.parametrize( "transport_name", [ "grpc", "grpc_asyncio", + "rest", ], ) def test_bigtable_host_no_port(transport_name): @@ -2801,7 +5966,11 @@ def test_bigtable_host_no_port(transport_name): ), transport=transport_name, ) - assert client.transport._host == ("bigtable.googleapis.com:443") + assert client.transport._host == ( + "bigtable.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtable.googleapis.com" + ) @pytest.mark.parametrize( @@ -2809,6 +5978,7 @@ def test_bigtable_host_no_port(transport_name): [ "grpc", "grpc_asyncio", + "rest", ], ) def test_bigtable_host_with_port(transport_name): @@ -2819,7 +5989,57 @@ def test_bigtable_host_with_port(transport_name): ), transport=transport_name, ) - assert client.transport._host == ("bigtable.googleapis.com:8000") + assert client.transport._host == ( + "bigtable.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://bigtable.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_bigtable_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = BigtableClient( + credentials=creds1, + transport=transport_name, + ) + client2 = BigtableClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.read_rows._session + session2 = client2.transport.read_rows._session + assert session1 != session2 + session1 = client1.transport.sample_row_keys._session + session2 = client2.transport.sample_row_keys._session + assert session1 != session2 + session1 = client1.transport.mutate_row._session + session2 = client2.transport.mutate_row._session + assert session1 != session2 + session1 = client1.transport.mutate_rows._session + session2 = client2.transport.mutate_rows._session + assert session1 != session2 + session1 = client1.transport.check_and_mutate_row._session + session2 = client2.transport.check_and_mutate_row._session + assert session1 != session2 + session1 = client1.transport.ping_and_warm._session + session2 = client2.transport.ping_and_warm._session + assert session1 != session2 + session1 = client1.transport.read_modify_write_row._session + session2 = client2.transport.read_modify_write_row._session + assert session1 != session2 + session1 = client1.transport.generate_initial_change_stream_partitions._session + session2 = client2.transport.generate_initial_change_stream_partitions._session + assert session1 != session2 + session1 = client1.transport.read_change_stream._session + session2 = client2.transport.read_change_stream._session + assert session1 != session2 def test_bigtable_grpc_transport_channel(): @@ -3131,6 +6351,7 @@ async def test_transport_close_async(): def test_transport_close(): transports = { + "rest": "_session", "grpc": "_grpc_channel", } @@ -3148,6 +6369,7 @@ def test_transport_close(): def test_client_ctx(): transports = [ + "rest", "grpc", ] for transport in transports: From af8dc8c0386d0049bf083440674e8fd5161e41fb Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 22 Feb 2023 17:55:32 +0000 Subject: [PATCH 700/892] chore(deps): update dependency apache-beam to v2.45.0 (#735) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 081681a27f88..88a960a14539 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.44.0 +apache-beam==2.45.0 google-cloud-bigtable==2.15.0 google-cloud-core==2.3.2 From 612a7bb6a48110b6bf78551fbfd192a01012694f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:46:20 -0500 Subject: [PATCH 701/892] chore: Update gapic-generator-python to v1.8.5 (#737) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.8.5 PiperOrigin-RevId: 511892190 Source-Link: https://github.com/googleapis/googleapis/commit/a45d9c09c1287ffdf938f4e8083e791046c0b23b Source-Link: https://github.com/googleapis/googleapis-gen/commit/1907294b1d8365ea24f8c5f2e059a64124c4ed3b Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTkwNzI5NGIxZDgzNjVlYTI0ZjhjNWYyZTA1OWE2NDEyNGM0ZWQzYiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../cloud/bigtable_admin_v2/types/bigtable_instance_admin.py | 2 ++ .../cloud/bigtable_admin_v2/types/bigtable_table_admin.py | 2 ++ .../google/cloud/bigtable_admin_v2/types/common.py | 2 ++ .../google/cloud/bigtable_admin_v2/types/instance.py | 2 ++ .../google/cloud/bigtable_admin_v2/types/table.py | 2 ++ .../google/cloud/bigtable_v2/types/bigtable.py | 2 ++ .../google/cloud/bigtable_v2/types/data.py | 2 ++ .../google/cloud/bigtable_v2/types/request_stats.py | 2 ++ .../google/cloud/bigtable_v2/types/response_params.py | 2 ++ 9 files changed, 18 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 87ff268e4cc4..a2254335498d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 52d2db32c858..9b236fea96b1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index b98d94fe00ee..2cc71fc43a6e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index f60899bacecd..2b5d8163674b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 1f91d3d89ee5..fd936df63e00 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index bfe64bd122bc..267055634e25 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 8b74c8c7059a..515e167dfdb6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py index 621959a8c514..d72ba8694653 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index b95fc3d130c2..2532e64e286a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations + from typing import MutableMapping, MutableSequence import proto # type: ignore From 2b1757947bca82ef57ca6b0c6a24e68a40fdd8d0 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 13:10:21 -0500 Subject: [PATCH 702/892] chore(python): upgrade gcp-releasetool in .kokoro [autoapprove] (#738) Source-Link: https://github.com/googleapis/synthtool/commit/5f2a6089f73abf06238fe4310f6a14d6f6d1eed3 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:8555f0e37e6261408f792bfd6635102d2da5ad73f8f09bcb24f25e6afb5fac97 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 2 +- packages/google-cloud-bigtable/.kokoro/requirements.in | 2 +- packages/google-cloud-bigtable/.kokoro/requirements.txt | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 894fb6bc9b47..5fc5daa31783 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:f62c53736eccb0c4934a3ea9316e0d57696bb49c1a7c86c726e9bb8a2f87dadf + digest: sha256:8555f0e37e6261408f792bfd6635102d2da5ad73f8f09bcb24f25e6afb5fac97 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.in b/packages/google-cloud-bigtable/.kokoro/requirements.in index cbd7e77f44db..882178ce6001 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.in +++ b/packages/google-cloud-bigtable/.kokoro/requirements.in @@ -1,5 +1,5 @@ gcp-docuploader -gcp-releasetool +gcp-releasetool>=1.10.5 # required for compatibility with cryptography>=39.x importlib-metadata typing-extensions twine diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 096e4800a9ac..fa99c12908f0 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -154,9 +154,9 @@ gcp-docuploader==0.6.4 \ --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \ --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf # via -r requirements.in -gcp-releasetool==1.10.0 \ - --hash=sha256:72a38ca91b59c24f7e699e9227c90cbe4dd71b789383cb0164b088abae294c83 \ - --hash=sha256:8c7c99320208383d4bb2b808c6880eb7a81424afe7cdba3c8d84b25f4f0e097d +gcp-releasetool==1.10.5 \ + --hash=sha256:174b7b102d704b254f2a26a3eda2c684fd3543320ec239baf771542a2e58e109 \ + --hash=sha256:e29d29927fe2ca493105a82958c6873bb2b90d503acac56be2c229e74de0eec9 # via -r requirements.in google-api-core==2.10.2 \ --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \ From 5e3f553bc7d6f81831868adeacdee359300fd9bc Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 28 Feb 2023 09:48:13 -0800 Subject: [PATCH 703/892] chore(main): release 2.16.0 (#726) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 19 +++++++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index a73bb826a85b..7a15bc188562 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.15.0" + ".": "2.16.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 41f8e8c776dd..ba43a89667d2 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,25 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.16.0](https://github.com/googleapis/python-bigtable/compare/v2.15.0...v2.16.0) (2023-02-27) + + +### Features + +* Enable "rest" transport in Python for services supporting numeric enums ([c5116e0](https://github.com/googleapis/python-bigtable/commit/c5116e097aacf9ddae249de57fab1849aff10d86)) +* Publish the Cloud Bigtable Change Streams ([c5116e0](https://github.com/googleapis/python-bigtable/commit/c5116e097aacf9ddae249de57fab1849aff10d86)) + + +### Bug Fixes + +* Add context manager return types ([beb5bf3](https://github.com/googleapis/python-bigtable/commit/beb5bf3bca4b517d095de3faa17d20e4d89fb295)) +* **deps:** Require google-api-core>=1.34.0,>=2.11.0 ([c5116e0](https://github.com/googleapis/python-bigtable/commit/c5116e097aacf9ddae249de57fab1849aff10d86)) + + +### Documentation + +* Add documentation for enums ([beb5bf3](https://github.com/googleapis/python-bigtable/commit/beb5bf3bca4b517d095de3faa17d20e4d89fb295)) + ## [2.15.0](https://github.com/googleapis/python-bigtable/compare/v2.14.1...v2.15.0) (2023-01-10) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 2788e5e55993..a2303530d547 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.15.0" # {x-release-please-version} +__version__ = "2.16.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 2788e5e55993..a2303530d547 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.15.0" # {x-release-please-version} +__version__ = "2.16.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 2788e5e55993..a2303530d547 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.15.0" # {x-release-please-version} +__version__ = "2.16.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 2788e5e55993..a2303530d547 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.15.0" # {x-release-please-version} +__version__ = "2.16.0" # {x-release-please-version} From 398dd4fab88fd9e7c72617b0bb98c1593ba14b8f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 1 Mar 2023 04:58:48 -0500 Subject: [PATCH 704/892] feat: add new_partitions field for CloseStream for Cloud Bigtable ChangeStream (#740) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add new_partitions field for CloseStream for Cloud Bigtable ChangeStream PiperOrigin-RevId: 512957844 Source-Link: https://github.com/googleapis/googleapis/commit/c5650ae0426ef1a82de84e5144ba26fcc5bb8549 Source-Link: https://github.com/googleapis/googleapis-gen/commit/a71ec9cc0a871286587f1df1c5e434c5bc338f91 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTcxZWM5Y2MwYTg3MTI4NjU4N2YxZGYxYzVlNDM0YzViYzMzOGY5MSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../cloud/bigtable_v2/types/bigtable.py | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 267055634e25..ea97588c20cb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -1052,19 +1052,28 @@ class Heartbeat(proto.Message): class CloseStream(proto.Message): r"""A message indicating that the client should stop reading from the - stream. If status is OK and ``continuation_tokens`` is empty, the - stream has finished (for example if there was an ``end_time`` - specified). If ``continuation_tokens`` is present, then a change in - partitioning requires the client to open a new stream for each token - to resume reading. + stream. If status is OK and ``continuation_tokens`` & + ``new_partitions`` are empty, the stream has finished (for example + if there was an ``end_time`` specified). If ``continuation_tokens`` + & ``new_partitions`` are present, then a change in partitioning + requires the client to open a new stream for each token to resume + reading. Example: [B, D) ends \| v new_partitions: [A, C) [C, E) + continuation_tokens.partitions: [B,C) [C,D) ^---^ ^---^ ^ ^ \| \| \| + StreamContinuationToken 2 \| StreamContinuationToken 1 To read the + new partition [A,C), supply the continuation tokens whose ranges + cover the new partition, for example ContinuationToken[A,B) & + ContinuationToken[B,C). Attributes: status (google.rpc.status_pb2.Status): The status of the stream. continuation_tokens (MutableSequence[google.cloud.bigtable_v2.types.StreamContinuationToken]): If non-empty, contains the information needed - to start reading the new partition(s) that - contain segments of this partition's row range. + to resume reading their associated partitions. + new_partitions (MutableSequence[google.cloud.bigtable_v2.types.StreamPartition]): + If non-empty, contains the new partitions to start reading + from, which are related to but not necessarily identical to + the partitions for the above ``continuation_tokens``. """ status: status_pb2.Status = proto.Field( @@ -1079,6 +1088,11 @@ class CloseStream(proto.Message): number=2, message=data.StreamContinuationToken, ) + new_partitions: MutableSequence[data.StreamPartition] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=data.StreamPartition, + ) data_change: DataChange = proto.Field( proto.MESSAGE, From 3223561fcc81f003b105e79cafdfbf257a7fdef4 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 1 Mar 2023 10:09:25 +0000 Subject: [PATCH 705/892] chore(deps): update dependency google-cloud-bigtable to v2.16.0 (#739) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 88a960a14539..ad5a58e01207 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.45.0 -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index b03a5b2c7569..02590e9b13fd 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 863d2d4b9fc8..ec2c5947099d 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 7ac3eb922941..33cce3c62881 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 google-cloud-monitoring==2.14.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 40e0f50e911b..a29a6c9604f1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index d82b8e6c7c45..0eee1690de73 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index d82b8e6c7c45..0eee1690de73 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index d82b8e6c7c45..0eee1690de73 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 17131ab77e72..d3fbb6150d9f 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.15.0 \ No newline at end of file +google-cloud-bigtable==2.16.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 40e0f50e911b..a29a6c9604f1 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.15.0 +google-cloud-bigtable==2.16.0 From fc42418a7138dd68b90f8cf66a59eed54ed47ded Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 1 Mar 2023 13:29:46 -0500 Subject: [PATCH 706/892] chore(main): release 2.17.0 (#741) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 7a15bc188562..882f663e6b84 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.16.0" + ".": "2.17.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index ba43a89667d2..78b4d1b291e0 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.17.0](https://github.com/googleapis/python-bigtable/compare/v2.16.0...v2.17.0) (2023-03-01) + + +### Features + +* Add new_partitions field for CloseStream for Cloud Bigtable ChangeStream ([#740](https://github.com/googleapis/python-bigtable/issues/740)) ([1adcad4](https://github.com/googleapis/python-bigtable/commit/1adcad440368f6d7df6710a013e7fab076461aed)) + ## [2.16.0](https://github.com/googleapis/python-bigtable/compare/v2.15.0...v2.16.0) (2023-02-27) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index a2303530d547..8d4f4cfb61d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.16.0" # {x-release-please-version} +__version__ = "2.17.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index a2303530d547..8d4f4cfb61d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.16.0" # {x-release-please-version} +__version__ = "2.17.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index a2303530d547..8d4f4cfb61d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.16.0" # {x-release-please-version} +__version__ = "2.17.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index a2303530d547..8d4f4cfb61d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.16.0" # {x-release-please-version} +__version__ = "2.17.0" # {x-release-please-version} From 163e7ecf81c2ecbf0d74cd1f7ae0a659a78720d1 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 1 Mar 2023 23:18:21 +0000 Subject: [PATCH 707/892] chore(deps): update dependency google-cloud-bigtable to v2.17.0 (#742) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index ad5a58e01207..bcb270e725f7 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.45.0 -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 02590e9b13fd..199541ffe66f 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index ec2c5947099d..04e476254af8 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 33cce3c62881..e9647809f95e 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 google-cloud-monitoring==2.14.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index a29a6c9604f1..909f8c365834 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 0eee1690de73..2006656312be 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 0eee1690de73..2006656312be 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 0eee1690de73..2006656312be 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index d3fbb6150d9f..32cead029a58 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.16.0 \ No newline at end of file +google-cloud-bigtable==2.17.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index a29a6c9604f1..909f8c365834 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.16.0 +google-cloud-bigtable==2.17.0 From 359d9eace0690a431db6353fd2a2209cea54191d Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Sat, 4 Mar 2023 11:27:40 +0000 Subject: [PATCH 708/892] chore(deps): update dependency pytest to v7.2.2 (#743) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index b321a100e7ca..82f315c7fd63 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.2.1 +pytest==7.2.2 mock==5.0.1 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 805eb2a9f845..c021c5b5b702 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 9f013668bd64..8d6117f168d6 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.2.1 +pytest==7.2.2 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 6a16834f7ccd..d3ddc990f2b6 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.2.1 +pytest==7.2.2 google-cloud-testutils==1.3.3 From 86457145f381a8823ed005e8eabd6faa89795f6f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 14 Mar 2023 17:39:49 -0400 Subject: [PATCH 709/892] chore: update Go import paths to match open source (#749) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: update Go import paths to match open source chore: update gapic-generator-go version to 0.35.2 PiperOrigin-RevId: 516528222 Source-Link: https://github.com/googleapis/googleapis/commit/dcd99fd6f43c3306bcd8eaf1ae25e8bb5f7cb708 Source-Link: https://github.com/googleapis/googleapis-gen/commit/975f4eea69a75b965e2f103be1bc82972d4a35d7 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTc1ZjRlZWE2OWE3NWI5NjVlMmYxMDNiZTFiYzgyOTcyZDRhMzVkNyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../services/bigtable_instance_admin/transports/rest.py | 6 ++++-- .../services/bigtable_table_admin/transports/rest.py | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index c95068666c35..5ae9600a9ea8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1639,7 +1639,8 @@ def __call__( "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", - "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], @@ -2472,7 +2473,8 @@ def __call__( "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", - "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index cf16261fd01d..5c25ac55624e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1910,7 +1910,8 @@ def __call__( "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", - "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], @@ -2768,7 +2769,8 @@ def __call__( "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", - "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], From e3935d658e802a2cb3bb60331ae2583ce0740f7d Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 16 Mar 2023 08:01:31 -0400 Subject: [PATCH 710/892] chore(deps): Update nox in .kokoro/requirements.in [autoapprove] (#750) Source-Link: https://github.com/googleapis/synthtool/commit/92006bb3cdc84677aa93c7f5235424ec2b157146 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:2e247c7bf5154df7f98cce087a20ca7605e236340c7d6d1a14447e5c06791bd6 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 2 +- .../google-cloud-bigtable/.kokoro/requirements.in | 2 +- .../google-cloud-bigtable/.kokoro/requirements.txt | 14 +++++--------- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 5fc5daa31783..b8edda51cf46 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:8555f0e37e6261408f792bfd6635102d2da5ad73f8f09bcb24f25e6afb5fac97 + digest: sha256:2e247c7bf5154df7f98cce087a20ca7605e236340c7d6d1a14447e5c06791bd6 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.in b/packages/google-cloud-bigtable/.kokoro/requirements.in index 882178ce6001..ec867d9fd65a 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.in +++ b/packages/google-cloud-bigtable/.kokoro/requirements.in @@ -5,6 +5,6 @@ typing-extensions twine wheel setuptools -nox +nox>=2022.11.21 # required to remove dependency on py charset-normalizer<3 click<8.1.0 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index fa99c12908f0..66a2172a76a8 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -1,6 +1,6 @@ # -# This file is autogenerated by pip-compile with python 3.10 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # # pip-compile --allow-unsafe --generate-hashes requirements.in # @@ -335,9 +335,9 @@ more-itertools==9.0.0 \ --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab # via jaraco-classes -nox==2022.8.7 \ - --hash=sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b \ - --hash=sha256:96cca88779e08282a699d672258ec01eb7c792d35bbbf538c723172bce23212c +nox==2022.11.21 \ + --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ + --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 # via -r requirements.in packaging==21.3 \ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ @@ -380,10 +380,6 @@ protobuf==3.20.3 \ # gcp-docuploader # gcp-releasetool # google-api-core -py==1.11.0 \ - --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ - --hash=sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378 - # via nox pyasn1==0.4.8 \ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba From 5e35b12ff99c213a46af4a0cd318485842be761e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 23 Mar 2023 09:44:02 -0400 Subject: [PATCH 711/892] docs: Fix formatting of request arg in docstring (#756) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Fix formatting of request arg in docstring chore: Update gapic-generator-python to v1.9.1 PiperOrigin-RevId: 518604533 Source-Link: https://github.com/googleapis/googleapis/commit/8a085aeddfa010af5bcef090827aac5255383d7e Source-Link: https://github.com/googleapis/googleapis-gen/commit/b2ab4b0a0ae2907e812c209198a74e0898afcb04 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjJhYjRiMGEwYWUyOTA3ZTgxMmMyMDkxOThhNzRlMDg5OGFmY2IwNCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 13 +++++------- .../bigtable_instance_admin/client.py | 13 +++++------- .../transports/rest.py | 18 ---------------- .../bigtable_table_admin/async_client.py | 14 +++++++------ .../services/bigtable_table_admin/client.py | 14 +++++++------ .../bigtable_table_admin/transports/rest.py | 21 ------------------- .../services/bigtable/async_client.py | 15 ++++++------- .../bigtable_v2/services/bigtable/client.py | 15 ++++++------- .../services/bigtable/transports/rest.py | 9 -------- 9 files changed, 42 insertions(+), 90 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ddeaf979ae82..12811bceae15 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1137,8 +1137,8 @@ async def update_cluster( Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): - The request object. A resizable group of nodes in a - particular cloud location, capable of serving all + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -1880,8 +1880,7 @@ async def get_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being requested. See the @@ -2030,8 +2029,7 @@ async def set_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being specified. See the @@ -2171,8 +2169,7 @@ async def test_iam_permissions( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fcb767a3d642..ecc9bf1e2892 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1400,8 +1400,8 @@ def update_cluster( Args: request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): - The request object. A resizable group of nodes in a - particular cloud location, capable of serving all + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -2104,8 +2104,7 @@ def get_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being requested. See the @@ -2241,8 +2240,7 @@ def set_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being specified. See the @@ -2379,8 +2377,7 @@ def test_iam_permissions( Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 5ae9600a9ea8..e9b94cf78fe9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -874,7 +874,6 @@ def __call__( request (~.bigtable_instance_admin.CreateAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.CreateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -978,7 +977,6 @@ def __call__( request (~.bigtable_instance_admin.CreateClusterRequest): The request object. Request message for BigtableInstanceAdmin.CreateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1076,7 +1074,6 @@ def __call__( request (~.bigtable_instance_admin.CreateInstanceRequest): The request object. Request message for BigtableInstanceAdmin.CreateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1176,7 +1173,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.DeleteAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1254,7 +1250,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteClusterRequest): The request object. Request message for BigtableInstanceAdmin.DeleteCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1330,7 +1325,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteInstanceRequest): The request object. Request message for BigtableInstanceAdmin.DeleteInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1406,7 +1400,6 @@ def __call__( request (~.bigtable_instance_admin.GetAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.GetAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1497,7 +1490,6 @@ def __call__( request (~.bigtable_instance_admin.GetClusterRequest): The request object. Request message for BigtableInstanceAdmin.GetCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1759,7 +1751,6 @@ def __call__( request (~.bigtable_instance_admin.GetInstanceRequest): The request object. Request message for BigtableInstanceAdmin.GetInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1853,7 +1844,6 @@ def __call__( request (~.bigtable_instance_admin.ListAppProfilesRequest): The request object. Request message for BigtableInstanceAdmin.ListAppProfiles. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1945,7 +1935,6 @@ def __call__( request (~.bigtable_instance_admin.ListClustersRequest): The request object. Request message for BigtableInstanceAdmin.ListClusters. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2035,7 +2024,6 @@ def __call__( request (~.bigtable_instance_admin.ListHotTabletsRequest): The request object. Request message for BigtableInstanceAdmin.ListHotTablets. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2127,7 +2115,6 @@ def __call__( request (~.bigtable_instance_admin.ListInstancesRequest): The request object. Request message for BigtableInstanceAdmin.ListInstances. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2219,7 +2206,6 @@ def __call__( request (~.bigtable_instance_admin.PartialUpdateClusterRequest): The request object. Request message for BigtableInstanceAdmin.PartialUpdateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2321,7 +2307,6 @@ def __call__( request (~.bigtable_instance_admin.PartialUpdateInstanceRequest): The request object. Request message for BigtableInstanceAdmin.PartialUpdateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2692,7 +2677,6 @@ def __call__( request (~.bigtable_instance_admin.UpdateAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.UpdateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2784,7 +2768,6 @@ def __call__( location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2885,7 +2868,6 @@ def __call__( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index bc85e5c5dfaa..91f059f8b41d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -369,6 +369,7 @@ async def create_table_from_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1300,6 +1301,7 @@ async def snapshot_table( request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1437,6 +1439,7 @@ async def get_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1549,6 +1552,7 @@ async def list_snapshots( request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1672,6 +1676,7 @@ async def delete_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2290,8 +2295,7 @@ async def get_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being requested. See the @@ -2440,8 +2444,7 @@ async def set_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being specified. See the @@ -2581,8 +2584,7 @@ async def test_iam_permissions( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index aa7eaa197120..efceae90a76d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -696,6 +696,7 @@ def create_table_from_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1594,6 +1595,7 @@ def snapshot_table( request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1731,6 +1733,7 @@ def get_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1833,6 +1836,7 @@ def list_snapshots( request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1946,6 +1950,7 @@ def delete_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2545,8 +2550,7 @@ def get_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being requested. See the @@ -2682,8 +2686,7 @@ def set_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being specified. See the @@ -2820,8 +2823,7 @@ def test_iam_permissions( Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 5c25ac55624e..4d5b2ed1c0ae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -938,7 +938,6 @@ def __call__( request (~.bigtable_table_admin.CheckConsistencyRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1041,7 +1040,6 @@ def __call__( request (~.bigtable_table_admin.CreateBackupRequest): The request object. The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1139,7 +1137,6 @@ def __call__( request (~.bigtable_table_admin.CreateTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1248,7 +1245,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1348,7 +1344,6 @@ def __call__( request (~.bigtable_table_admin.DeleteBackupRequest): The request object. The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1431,7 +1426,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1507,7 +1501,6 @@ def __call__( request (~.bigtable_table_admin.DeleteTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1583,7 +1576,6 @@ def __call__( request (~.bigtable_table_admin.DropRowRangeRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1669,7 +1661,6 @@ def __call__( request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1772,7 +1763,6 @@ def __call__( request (~.bigtable_table_admin.GetBackupRequest): The request object. The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2042,7 +2032,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2142,7 +2131,6 @@ def __call__( request (~.bigtable_table_admin.GetTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2234,7 +2222,6 @@ def __call__( request (~.bigtable_table_admin.ListBackupsRequest): The request object. The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2331,7 +2318,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2428,7 +2414,6 @@ def __call__( request (~.bigtable_table_admin.ListTablesRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2518,7 +2503,6 @@ def __call__( request (~.bigtable_table_admin.ModifyColumnFamiliesRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2621,7 +2605,6 @@ def __call__( request (~.bigtable_table_admin.RestoreTableRequest): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2901,7 +2884,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3101,7 +3083,6 @@ def __call__( request (~.bigtable_table_admin.UndeleteTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3201,7 +3182,6 @@ def __call__( request (~.bigtable_table_admin.UpdateBackupRequest): The request object. The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3300,7 +3280,6 @@ def __call__( request (~.bigtable_table_admin.UpdateTableRequest): The request object. The request for [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 3465569b34cb..1233e128868b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -807,8 +807,8 @@ async def ping_and_warm( Args: request (Optional[Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]]): - The request object. Request message for client - connection keep-alive and warming. + The request object. Request message for client connection + keep-alive and warming. name (:class:`str`): Required. The unique name of the instance to check permissions for as well as respond. Values are of the @@ -1027,8 +1027,9 @@ def generate_initial_change_stream_partitions( Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (:class:`str`): Required. The unique name of the table from which to get @@ -1126,9 +1127,9 @@ def read_change_stream( Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for - Bigtable.ReadChangeStream. + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (:class:`str`): Required. The unique name of the table from which to read a change stream. Values are of the form diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 37ab65fe2a87..38618fa31dfb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1091,8 +1091,8 @@ def ping_and_warm( Args: request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): - The request object. Request message for client - connection keep-alive and warming. + The request object. Request message for client connection + keep-alive and warming. name (str): Required. The unique name of the instance to check permissions for as well as respond. Values are of the @@ -1327,8 +1327,9 @@ def generate_initial_change_stream_partitions( Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (str): Required. The unique name of the table from which to get @@ -1430,9 +1431,9 @@ def read_change_stream( Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for - Bigtable.ReadChangeStream. + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (str): Required. The unique name of the table from which to read a change stream. Values are of the form diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index ee9cb046ff0c..4343fbb900eb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -471,7 +471,6 @@ def __call__( request (~.bigtable.CheckAndMutateRowRequest): The request object. Request message for Bigtable.CheckAndMutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -575,7 +574,6 @@ def __call__( by Apache Beam BigtableIO. Request message for Bigtable.GenerateInitialChangeStreamPartitions. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -684,7 +682,6 @@ def __call__( request (~.bigtable.MutateRowRequest): The request object. Request message for Bigtable.MutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -783,7 +780,6 @@ def __call__( request (~.bigtable.MutateRowsRequest): The request object. Request message for BigtableService.MutateRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -881,7 +877,6 @@ def __call__( request (~.bigtable.PingAndWarmRequest): The request object. Request message for client connection keep-alive and warming. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -982,7 +977,6 @@ def __call__( The request object. NOTE: This API is intended to be used by Apache Beam BigtableIO. Request message for Bigtable.ReadChangeStream. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1083,7 +1077,6 @@ def __call__( request (~.bigtable.ReadModifyWriteRowRequest): The request object. Request message for Bigtable.ReadModifyWriteRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1184,7 +1177,6 @@ def __call__( request (~.bigtable.ReadRowsRequest): The request object. Request message for Bigtable.ReadRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1280,7 +1272,6 @@ def __call__( request (~.bigtable.SampleRowKeysRequest): The request object. Request message for Bigtable.SampleRowKeys. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. From 459e8522358e9edde73e044045b62f19b1be53f6 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Thu, 30 Mar 2023 15:07:35 -0700 Subject: [PATCH 712/892] fix: Pass the "retry" when calling read_rows. (#759) --- .../google/cloud/bigtable/row_data.py | 4 +++- .../tests/unit/test_row_data.py | 4 +++- .../google-cloud-bigtable/tests/unit/test_table.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py index a50fab1ee325..e11379108c4f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_data.py @@ -157,7 +157,9 @@ def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): # Otherwise there is a risk of entering an infinite loop that resets # the timeout counter just before it being triggered. The increment # by 1 second here is customary but should not be much less than that. - self.response_iterator = read_method(request, timeout=self.retry._deadline + 1) + self.response_iterator = read_method( + request, timeout=self.retry._deadline + 1, retry=self.retry + ) self.rows = {} diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 382a81ef1ddd..fba69ceba0c6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -446,7 +446,9 @@ def test_partial_rows_data_constructor_with_retry(): client._data_stub.ReadRows, request, retry ) partial_rows_data.read_method.assert_called_once_with( - request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1 + request, + timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1, + retry=DEFAULT_RETRY_READ_ROWS, ) assert partial_rows_data.request is request assert partial_rows_data.rows == {} diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index e66a8f0f6c9c..3d7d2e8eea2f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -643,6 +643,7 @@ def _table_read_row_helper(chunks, expected_result, app_profile_id=None): from google.cloud.bigtable import table as MUT from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -691,7 +692,9 @@ def mock_create_row_request(table_name, **kwargs): assert result == expected_result assert mock_created == expected_request - data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) + data_api.read_rows.assert_called_once_with( + request_pb, timeout=61.0, retry=DEFAULT_RETRY_READ_ROWS + ) def test_table_read_row_miss_no__responses(): @@ -906,7 +909,7 @@ def mock_create_row_request(table_name, **kwargs): } assert mock_created == [(table.name, created_kwargs)] - data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) + data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0, retry=retry) def test_table_read_retry_rows(): @@ -1082,6 +1085,7 @@ def test_table_yield_rows_with_row_set(): from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -1149,7 +1153,9 @@ def test_table_yield_rows_with_row_set(): end_key=ROW_KEY_2, ) expected_request.rows.row_keys.append(ROW_KEY_3) - data_api.read_rows.assert_called_once_with(expected_request, timeout=61.0) + data_api.read_rows.assert_called_once_with( + expected_request, timeout=61.0, retry=DEFAULT_RETRY_READ_ROWS + ) def test_table_sample_row_keys(): From f1342eea1f4bedfcf8f3d5507403db76bd2118d7 Mon Sep 17 00:00:00 2001 From: Shweta Shetye-Sabharwal Date: Wed, 5 Apr 2023 18:34:04 +0000 Subject: [PATCH 713/892] chore(samples): Fixed a typo in the readme (#760) --- packages/google-cloud-bigtable/samples/hello/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/hello/README.md b/packages/google-cloud-bigtable/samples/hello/README.md index 0e1fc92f9289..b3779fb43b27 100644 --- a/packages/google-cloud-bigtable/samples/hello/README.md +++ b/packages/google-cloud-bigtable/samples/hello/README.md @@ -17,7 +17,7 @@ Demonstrates how to connect to Cloud Bigtable and run some basic operations. Mor To run this sample: -1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authentication][authentication] and you will need to [enable billing][enable_billing]. 1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. From cf243e9c1dbba9f01249dcaaa940dcdf157f383d Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 6 Apr 2023 17:14:36 +0100 Subject: [PATCH 714/892] chore(deps): update all dependencies (#746) --- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- .../google-cloud-bigtable/samples/metricscaler/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index bcb270e725f7..8be9b98e061c 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.45.0 +apache-beam==2.46.0 google-cloud-bigtable==2.17.0 google-cloud-core==2.3.2 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index e9647809f95e..02e08b4c8536 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.17.0 -google-cloud-monitoring==2.14.1 +google-cloud-monitoring==2.14.2 From 6184eac92312e1508c1b40e25b752496d184b2c6 Mon Sep 17 00:00:00 2001 From: Billy Jacobson Date: Fri, 14 Apr 2023 11:08:12 -0400 Subject: [PATCH 715/892] docs: fix delete from column family example (#764) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Make sure to open an issue as a [bug/issue](https://togithub.com/googleapis/python-bigtable/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --- .../samples/snippets/deletes/deletes_snippets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py index 4e89189db8f6..8e78083bf8b7 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py @@ -38,7 +38,7 @@ def delete_from_column_family(project_id, instance_id, table_id): table = instance.table(table_id) row = table.row("phone#4c410523#20190501") row.delete_cells( - column_family_id="cell_plan", columns=["data_plan_01gb", "data_plan_05gb"] + column_family_id="cell_plan", columns=row.ALL_COLUMNS ) row.commit() From c929cec73ba4e5fd5ffdce89cc9874960dced23e Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 18 Apr 2023 19:00:48 +0200 Subject: [PATCH 716/892] chore(deps): update all dependencies (#763) --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 4 ++-- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 82f315c7fd63..761227068cfd 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.2.2 -mock==5.0.1 +pytest==7.3.1 +mock==5.0.2 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index c021c5b5b702..c4d04a08d024 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 8d6117f168d6..96aa71dab7f6 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.2.2 +pytest==7.3.1 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index d3ddc990f2b6..ca1f33bd3f48 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.2.2 +pytest==7.3.1 google-cloud-testutils==1.3.3 From 0e02a2ddb90e51a0b0aaf3b6330d98f4146f0d86 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 4 May 2023 10:42:05 -0700 Subject: [PATCH 717/892] feat: publish RateLimitInfo and FeatureFlag protos (#768) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: publish RateLimitInfo and FeatureFlag protos PiperOrigin-RevId: 527878708 Source-Link: https://github.com/googleapis/googleapis/commit/f129f486fa0f681456b99c5cc899bec889a3185c Source-Link: https://github.com/googleapis/googleapis-gen/commit/e02c87d9d0c9a77f2b17268a86f462b5a1d66bbd Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTAyYzg3ZDlkMGM5YTc3ZjJiMTcyNjhhODZmNDYyYjVhMWQ2NmJiZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: Add feature flag proto to BUILD file PiperOrigin-RevId: 528468347 Source-Link: https://github.com/googleapis/googleapis/commit/38247e83e10ace50ec0022302e540e3b0d4be123 Source-Link: https://github.com/googleapis/googleapis-gen/commit/17e62a1ab5f22d7d537675a659157207e406e63d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTdlNjJhMWFiNWYyMmQ3ZDUzNzY3NWE2NTkxNTcyMDdlNDA2ZTYzZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_v2/__init__.py | 4 ++ .../services/bigtable/async_client.py | 6 +- .../bigtable_v2/services/bigtable/client.py | 6 +- .../cloud/bigtable_v2/types/__init__.py | 6 ++ .../cloud/bigtable_v2/types/bigtable.py | 59 ++++++++++++++++++- .../cloud/bigtable_v2/types/feature_flags.py | 54 +++++++++++++++++ 6 files changed, 129 insertions(+), 6 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 342718dea85d..ee3bd8c0c984 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -31,6 +31,7 @@ from .types.bigtable import MutateRowsResponse from .types.bigtable import PingAndWarmRequest from .types.bigtable import PingAndWarmResponse +from .types.bigtable import RateLimitInfo from .types.bigtable import ReadChangeStreamRequest from .types.bigtable import ReadChangeStreamResponse from .types.bigtable import ReadModifyWriteRowRequest @@ -54,6 +55,7 @@ from .types.data import StreamPartition from .types.data import TimestampRange from .types.data import ValueRange +from .types.feature_flags import FeatureFlags from .types.request_stats import FullReadStatsView from .types.request_stats import ReadIterationStats from .types.request_stats import RequestLatencyStats @@ -69,6 +71,7 @@ "Column", "ColumnRange", "Family", + "FeatureFlags", "FullReadStatsView", "GenerateInitialChangeStreamPartitionsRequest", "GenerateInitialChangeStreamPartitionsResponse", @@ -79,6 +82,7 @@ "Mutation", "PingAndWarmRequest", "PingAndWarmResponse", + "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", "ReadIterationStats", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 1233e128868b..abd82d4d8fbb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -242,8 +242,10 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): - This value specifies routing for replication. This API - only accepts the empty value of app_profile_id. + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 38618fa31dfb..a778aff3c0e7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -491,8 +491,10 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (str): - This value specifies routing for replication. This API - only accepts the empty value of app_profile_id. + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index bb2533e331b7..9f15efaf5e36 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -24,6 +24,7 @@ MutateRowsResponse, PingAndWarmRequest, PingAndWarmResponse, + RateLimitInfo, ReadChangeStreamRequest, ReadChangeStreamResponse, ReadModifyWriteRowRequest, @@ -50,6 +51,9 @@ TimestampRange, ValueRange, ) +from .feature_flags import ( + FeatureFlags, +) from .request_stats import ( FullReadStatsView, ReadIterationStats, @@ -71,6 +75,7 @@ "MutateRowsResponse", "PingAndWarmRequest", "PingAndWarmResponse", + "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", "ReadModifyWriteRowRequest", @@ -94,6 +99,7 @@ "StreamPartition", "TimestampRange", "ValueRange", + "FeatureFlags", "FullReadStatsView", "ReadIterationStats", "RequestLatencyStats", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index ea97588c20cb..13f6ac0db3f7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -38,6 +38,7 @@ "MutateRowResponse", "MutateRowsRequest", "MutateRowsResponse", + "RateLimitInfo", "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", "PingAndWarmRequest", @@ -61,8 +62,9 @@ class ReadRowsRequest(proto.Message): Values are of the form ``projects//instances//tables/
``. app_profile_id (str): - This value specifies routing for replication. This API only - accepts the empty value of app_profile_id. + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. rows (google.cloud.bigtable_v2.types.RowSet): The row keys and/or ranges to read sequentially. If not specified, reads from all @@ -469,10 +471,19 @@ class Entry(proto.Message): class MutateRowsResponse(proto.Message): r"""Response message for BigtableService.MutateRows. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): One or more results for Entries from the batch request. + rate_limit_info (google.cloud.bigtable_v2.types.RateLimitInfo): + Information about how client should limit the + rate (QPS). Primirily used by supported official + Cloud Bigtable clients. If unset, the rate limit + info is not provided by the server. + + This field is a member of `oneof`_ ``_rate_limit_info``. """ class Entry(proto.Message): @@ -506,6 +517,50 @@ class Entry(proto.Message): number=1, message=Entry, ) + rate_limit_info: "RateLimitInfo" = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message="RateLimitInfo", + ) + + +class RateLimitInfo(proto.Message): + r"""Information about how client should adjust the load to + Bigtable. + + Attributes: + period (google.protobuf.duration_pb2.Duration): + Time that clients should wait before + adjusting the target rate again. If clients + adjust rate too frequently, the impact of the + previous adjustment may not have been taken into + account and may over-throttle or under-throttle. + If clients adjust rate too slowly, they will not + be responsive to load changes on server side, + and may over-throttle or under-throttle. + factor (float): + If it has been at least one ``period`` since the last load + adjustment, the client should multiply the current load by + this value to get the new target load. For example, if the + current load is 100 and ``factor`` is 0.8, the new target + load should be 80. After adjusting, the client should ignore + ``factor`` until another ``period`` has passed. + + The client can measure its load using any unit that's + comparable over time For example, QPS can be used as long as + each request involves a similar amount of work. + """ + + period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + factor: float = proto.Field( + proto.DOUBLE, + number=2, + ) class CheckAndMutateRowRequest(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py new file mode 100644 index 000000000000..1b5f76e2410c --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "FeatureFlags", + }, +) + + +class FeatureFlags(proto.Message): + r"""Feature flags supported by a client. This is intended to be sent as + part of request metadata to assure the server that certain behaviors + are safe to enable. This proto is meant to be serialized and + websafe-base64 encoded under the ``bigtable-features`` metadata key. + The value will remain constant for the lifetime of a client and due + to HTTP2's HPACK compression, the request overhead will be tiny. + This is an internal implementation detail and should not be used by + endusers directly. + + Attributes: + mutate_rows_rate_limit (bool): + Notify the server that the client enables + batch write flow control by requesting + RateLimitInfo from MutateRowsResponse. + """ + + mutate_rows_rate_limit: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) From fd62b63cdcb9bf94748074bb144e7dcac874e87d Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Wed, 10 May 2023 15:42:01 -0700 Subject: [PATCH 718/892] Feat: Threaded MutationsBatcher (#722) - Batch mutations in a thread to allow concurrent batching - Flush the batch every second - Add flow control to control inflight requests Co-authored-by: Mattie Fu --- .../google-cloud-bigtable/docs/batcher.rst | 6 + packages/google-cloud-bigtable/docs/usage.rst | 1 + .../google/cloud/bigtable/batcher.py | 366 +++++++++++++++--- .../google/cloud/bigtable/table.py | 6 +- .../tests/unit/test_batcher.py | 218 ++++++++--- 5 files changed, 469 insertions(+), 128 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/batcher.rst diff --git a/packages/google-cloud-bigtable/docs/batcher.rst b/packages/google-cloud-bigtable/docs/batcher.rst new file mode 100644 index 000000000000..9ac335be1841 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/batcher.rst @@ -0,0 +1,6 @@ +Mutations Batching +~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.batcher + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index 33bf7bb7fd21..73a32b03938f 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -17,6 +17,7 @@ Using the API row-data row-filters row-set + batcher In the hierarchy of API concepts diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 3c23f44363ce..6b06ec060870 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -13,104 +13,251 @@ # limitations under the License. """User friendly container for Google Cloud Bigtable MutationBatcher.""" +import threading +import queue +import concurrent.futures +import atexit -FLUSH_COUNT = 1000 -MAX_MUTATIONS = 100000 -MAX_ROW_BYTES = 5242880 # 5MB +from google.api_core.exceptions import from_grpc_status +from dataclasses import dataclass -class MaxMutationsError(ValueError): - """The number of mutations for bulk request is too big.""" +FLUSH_COUNT = 100 # after this many elements, send out the batch + +MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch + +MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size. + +MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations. + + +class MutationsBatchError(Exception): + """Error in the batch request""" + + def __init__(self, message, exc): + self.exc = exc + self.message = message + super().__init__(self.message) + + +class _MutationsBatchQueue(object): + """Private Threadsafe Queue to hold rows for batching.""" + + def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT): + """Specify the queue constraints""" + self._queue = queue.Queue() + self.total_mutation_count = 0 + self.total_size = 0 + self.max_mutation_bytes = max_mutation_bytes + self.flush_count = flush_count + + def get(self): + """Retrieve an item from the queue. Recalculate queue size.""" + row = self._queue.get() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + + def put(self, item): + """Insert an item to the queue. Recalculate queue size.""" + + mutation_count = len(item._get_mutations()) + + self._queue.put(item) + + self.total_size += item.get_mutations_size() + self.total_mutation_count += mutation_count + + def full(self): + """Check if the queue is full.""" + if ( + self.total_mutation_count >= self.flush_count + or self.total_size >= self.max_mutation_bytes + ): + return True + return False + + def empty(self): + return self._queue.empty() + + +@dataclass +class _BatchInfo: + """Keeping track of size of a batch""" + + mutations_count: int = 0 + rows_count: int = 0 + mutations_size: int = 0 + + +class _FlowControl(object): + def __init__( + self, + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ): + """Control the inflight requests. Keep track of the mutations, row bytes and row counts. + As requests to backend are being made, adjust the number of mutations being processed. + + If threshold is reached, block the flow. + Reopen the flow as requests are finished. + """ + self.max_mutations = max_mutations + self.max_mutation_bytes = max_mutation_bytes + self.inflight_mutations = 0 + self.inflight_size = 0 + self.event = threading.Event() + self.event.set() + + def is_blocked(self): + """Returns True if: + + - inflight mutations >= max_mutations, or + - inflight bytes size >= max_mutation_bytes, or + """ + + return ( + self.inflight_mutations >= self.max_mutations + or self.inflight_size >= self.max_mutation_bytes + ) + + def control_flow(self, batch_info): + """ + Calculate the resources used by this batch + """ + + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size + self.set_flow_control_status() + + def wait(self): + """ + Wait until flow control pushback has been released. + It awakens as soon as `event` is set. + """ + self.event.wait() + + def set_flow_control_status(self): + """Check the inflight mutations and size. + + If values exceed the allowed threshold, block the event. + """ + if self.is_blocked(): + self.event.clear() # sleep + else: + self.event.set() # awaken the threads + + def release(self, batch_info): + """ + Release the resources. + Decrement the row size to allow enqueued mutations to be run. + """ + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size + self.set_flow_control_status() class MutationsBatcher(object): """A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store DirectRows in memory until one of the - size limits is reached, or an explicit call to flush() is performed. When - a flush event occurs, the DirectRows in memory will be sent to Cloud + is large or unknown. It will store :class:`DirectRow` in memory until one of the + size limits is reached, or an explicit call to :func:`flush()` is performed. When + a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud Bigtable. Batching mutations is more efficient than sending individual request. This class is not suited for usage in systems where each mutation must be guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any DirectRows remaining in + in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in memory will not necessarily be sent to the service, even after the - completion of the mutate() method. + completion of the :func:`mutate()` method. - TODO: Performance would dramatically improve if this class had the - capability of asynchronous, parallel RPCs. + Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads. :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. :type flush_count: int :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). :type max_row_bytes: int :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). + + :type flush_interval: float + :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. + Default is 1 second. """ - def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): - self.rows = [] - self.total_mutation_count = 0 - self.total_size = 0 + def __init__( + self, + table, + flush_count=FLUSH_COUNT, + max_row_bytes=MAX_MUTATION_SIZE, + flush_interval=1, + ): + self._rows = _MutationsBatchQueue( + max_mutation_bytes=max_row_bytes, flush_count=flush_count + ) self.table = table - self.flush_count = flush_count - self.max_row_bytes = max_row_bytes + self._executor = concurrent.futures.ThreadPoolExecutor() + atexit.register(self.close) + self._timer = threading.Timer(flush_interval, self.flush) + self._timer.start() + self.flow_control = _FlowControl( + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ) + self.futures_mapping = {} + self.exceptions = queue.Queue() + + @property + def flush_count(self): + return self._rows.flush_count + + @property + def max_row_bytes(self): + return self._rows.max_mutation_bytes + + def __enter__(self): + """Starting the MutationsBatcher as a context manager""" + return self def mutate(self, row): """Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate] :end-before: [END bigtable_api_batcher_mutate] :dedent: 4 :type row: class - :param row: class:`~google.cloud.bigtable.row.DirectRow`. + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - mutation_count = len(row._get_mutations()) - if mutation_count > MAX_MUTATIONS: - raise MaxMutationsError( - "The row key {} exceeds the number of mutations {}.".format( - row.row_key, mutation_count - ) - ) - - if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: - self.flush() - - self.rows.append(row) - self.total_mutation_count += mutation_count - self.total_size += row.get_mutations_size() + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + """ + self._rows.put(row) - if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: - self.flush() + if self._rows.full(): + self._flush_async() def mutate_rows(self, rows): """Add multiple rows to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate_rows] :end-before: [END bigtable_api_batcher_mutate_rows] :dedent: 4 @@ -119,28 +266,119 @@ def mutate_rows(self, rows): :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried """ for row in rows: self.mutate(row) def flush(self): - """Sends the current. batch to Cloud Bigtable. + """Sends the current batch to Cloud Bigtable synchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_flush] :end-before: [END bigtable_api_batcher_flush] :dedent: 4 + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + rows_to_flush = [] + while not self._rows.empty(): + rows_to_flush.append(self._rows.get()) + response = self._flush_rows(rows_to_flush) + return response + + def _flush_async(self): + """Sends the current batch to Cloud Bigtable asynchronously. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + + rows_to_flush = [] + mutations_count = 0 + mutations_size = 0 + rows_count = 0 + batch_info = _BatchInfo() + + while not self._rows.empty(): + row = self._rows.get() + mutations_count += len(row._get_mutations()) + mutations_size += row.get_mutations_size() + rows_count += 1 + rows_to_flush.append(row) + batch_info.mutations_count = mutations_count + batch_info.rows_count = rows_count + batch_info.mutations_size = mutations_size + + if ( + rows_count >= self.flush_count + or mutations_size >= self.max_row_bytes + or mutations_count >= self.flow_control.max_mutations + or mutations_size >= self.flow_control.max_mutation_bytes + or self._rows.empty() # submit when it reached the end of the queue + ): + # wait for resources to become available, before submitting any new batch + self.flow_control.wait() + # once unblocked, submit a batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) + + # reset and start a new batch + rows_to_flush = [] + mutations_size = 0 + rows_count = 0 + mutations_count = 0 + batch_info = _BatchInfo() + + def _batch_completed_callback(self, future): + """Callback for when the mutation has finished. + + Raise exceptions if there's any. + Release the resources locked by the flow control and allow enqueued tasks to be run. + """ + + processed_rows = self.futures_mapping[future] + self.flow_control.release(processed_rows) + del self.futures_mapping[future] + + def _flush_rows(self, rows_to_flush): + """Mutate the specified rows. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + responses = [] + if len(rows_to_flush) > 0: + response = self.table.mutate_rows(rows_to_flush) + + for result in response: + if result.code != 0: + exc = from_grpc_status(result.code, result.message) + self.exceptions.put(exc) + responses.append(result) + + return responses + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor.""" + self.close() + + def close(self): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor. + Any errors will be raised. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - if len(self.rows) != 0: - self.table.mutate_rows(self.rows) - self.total_mutation_count = 0 - self.total_size = 0 - self.rows = [] + self.flush() + self._executor.shutdown(wait=True) + atexit.unregister(self.close) + if self.exceptions.qsize() > 0: + exc = list(self.exceptions.queue) + raise MutationsBatchError("Errors in batch mutations.", exc=exc) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 8605992baf45..e3191a7297da 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -32,7 +32,7 @@ from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow @@ -844,7 +844,9 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) - def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + def mutations_batcher( + self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE + ): """Factory to create a mutation batcher associated with this instance. For example: diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py index 9ae6ed175624..a238b2852363 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -14,122 +14,118 @@ import mock +import time + import pytest from google.cloud.bigtable.row import DirectRow +from google.cloud.bigtable.batcher import ( + _FlowControl, + MutationsBatcher, + MutationsBatchError, +) TABLE_ID = "table-id" TABLE_NAME = "/tables/" + TABLE_ID -def _make_mutation_batcher(table, **kw): - from google.cloud.bigtable.batcher import MutationsBatcher - - return MutationsBatcher(table, **kw) - - def test_mutation_batcher_constructor(): table = _Table(TABLE_NAME) - - mutation_batcher = _make_mutation_batcher(table) - assert table is mutation_batcher.table + with MutationsBatcher(table) as mutation_batcher: + assert table is mutation_batcher.table def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - mutation_batcher.mutate_rows(rows) - mutation_batcher.flush() + mutation_batcher.mutate_rows(rows) assert table.mutation_calls == 1 def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) - - mutation_batcher.mutate(row) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.flush() + mutation_batcher.mutate(row) assert table.mutation_calls == 1 def test_mutation_batcher_flush_w_no_rows(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) - mutation_batcher.flush() + with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher.flush() assert table.mutation_calls == 0 def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) + with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_with_max_mutations_failure(): - from google.cloud.bigtable.batcher import MaxMutationsError - +@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - with pytest.raises(MaxMutationsError): mutation_batcher.mutate(row) + assert table.mutation_calls == 1 + -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_w_max_mutations(): +def test_mutation_batcher_mutate_w_max_row_bytes(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes - mutation_batcher.mutate(row) - mutation_batcher.flush() + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 -def test_mutation_batcher_mutate_w_max_row_bytes(): +def test_mutations_batcher_flushed_when_closed(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) + mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -137,13 +133,108 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", max_value) row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) + assert table.mutation_calls == 0 + + mutation_batcher.close() + + assert table.mutation_calls == 1 + + +def test_mutations_batcher_context_manager_flushed_when_closed(): + table = _Table(TABLE_NAME) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 +@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush") +def test_mutations_batcher_flush_interval(mocked_flush): + table = _Table(TABLE_NAME) + flush_interval = 0.5 + mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval) + + assert mutation_batcher._timer.interval == flush_interval + mocked_flush.assert_not_called() + + time.sleep(0.4) + mocked_flush.assert_not_called() + + time.sleep(0.1) + mocked_flush.assert_called_once_with() + + mutation_batcher.close() + + +def test_mutations_batcher_response_with_error_codes(): + from google.rpc.status_pb2 import Status + + mocked_response = [Status(code=1), Status(code=5)] + + with mock.patch("tests.unit.test_batcher._Table") as mocked_table: + table = mocked_table.return_value + mutation_batcher = MutationsBatcher(table=table) + + row1 = DirectRow(row_key=b"row_key") + row2 = DirectRow(row_key=b"row_key") + table.mutate_rows.return_value = mocked_response + + mutation_batcher.mutate_rows([row1, row2]) + with pytest.raises(MutationsBatchError) as exc: + mutation_batcher.close() + assert exc.value.message == "Errors in batch mutations." + assert len(exc.value.exc) == 2 + + assert exc.value.exc[0].message == mocked_response[0].message + assert exc.value.exc[1].message == mocked_response[1].message + + +def test_flow_control_event_is_set_when_not_blocked(): + flow_control = _FlowControl() + + flow_control.set_flow_control_status() + assert flow_control.event.is_set() + + +def test_flow_control_event_is_not_set_when_blocked(): + flow_control = _FlowControl() + + flow_control.inflight_mutations = flow_control.max_mutations + flow_control.inflight_size = flow_control.max_mutation_bytes + + flow_control.set_flow_control_status() + assert not flow_control.event.is_set() + + +@mock.patch("concurrent.futures.ThreadPoolExecutor.submit") +def test_flush_async_batch_count(mocked_executor_submit): + table = _Table(TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table, flush_count=2) + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + for index in range(5): + row = DirectRow(row_key=f"row_key_{index}") + row.set_cell("cf1", b"c1", max_value) + mutation_batcher.mutate(row) + mutation_batcher._flush_async() + + # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch. + assert mocked_executor_submit.call_count == 3 + + class _Instance(object): def __init__(self, client=None): self._client = client @@ -156,5 +247,8 @@ def __init__(self, name, client=None): self.mutation_calls = 0 def mutate_rows(self, rows): + from google.rpc.status_pb2 import Status + self.mutation_calls += 1 - return rows + + return [Status(code=0) for _ in rows] From fee45384bab17366dd7eb84155fe0e6279105449 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 10 May 2023 16:13:17 -0700 Subject: [PATCH 719/892] chore(main): release 2.18.0 (#757) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 19 +++++++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 882f663e6b84..a627e662e002 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.17.0" + ".": "2.18.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 78b4d1b291e0..2d7fe51413be 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,25 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.18.0](https://github.com/googleapis/python-bigtable/compare/v2.17.0...v2.18.0) (2023-05-10) + + +### Features + +* Publish RateLimitInfo and FeatureFlag protos ([#768](https://github.com/googleapis/python-bigtable/issues/768)) ([171fea6](https://github.com/googleapis/python-bigtable/commit/171fea6de57a47f92a2a56050f8bfe7518144df7)) +* Threaded MutationsBatcher ([#722](https://github.com/googleapis/python-bigtable/issues/722)) ([7521a61](https://github.com/googleapis/python-bigtable/commit/7521a617c121ead96a21ca47959a53b2db2da090)) + + +### Bug Fixes + +* Pass the "retry" when calling read_rows. ([#759](https://github.com/googleapis/python-bigtable/issues/759)) ([505273b](https://github.com/googleapis/python-bigtable/commit/505273b72bf83d8f92d0e0a92d62f22bce96cc3d)) + + +### Documentation + +* Fix delete from column family example ([#764](https://github.com/googleapis/python-bigtable/issues/764)) ([128b4e1](https://github.com/googleapis/python-bigtable/commit/128b4e1f3eea2dad903d84c8f2933b17a5f0d226)) +* Fix formatting of request arg in docstring ([#756](https://github.com/googleapis/python-bigtable/issues/756)) ([45d3e43](https://github.com/googleapis/python-bigtable/commit/45d3e4308c4f494228c2e6e18a36285c557cb0c3)) + ## [2.17.0](https://github.com/googleapis/python-bigtable/compare/v2.16.0...v2.17.0) (2023-03-01) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 8d4f4cfb61d6..f09943f6bdf7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 8d4f4cfb61d6..f09943f6bdf7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 8d4f4cfb61d6..f09943f6bdf7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 8d4f4cfb61d6..f09943f6bdf7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} From cf3ee398dc5c3a6d9a5a1f773b064c8a1ab07d17 Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Thu, 11 May 2023 15:30:12 -0400 Subject: [PATCH 720/892] fix: Revert "Feat: Threaded MutationsBatcher" (#773) Reverts googleapis/python-bigtable#722 This PR caused beam bigtableio.py failures https://togithub.com/apache/beam/issues/26673 and is blocking beam release. We're unclear why it caused the failure. So will revert this change, cut another release so we can unblock beam and investigate separately. --- .../google-cloud-bigtable/docs/batcher.rst | 6 - packages/google-cloud-bigtable/docs/usage.rst | 1 - .../google/cloud/bigtable/batcher.py | 366 +++--------------- .../google/cloud/bigtable/table.py | 6 +- .../tests/unit/test_batcher.py | 218 +++-------- 5 files changed, 128 insertions(+), 469 deletions(-) delete mode 100644 packages/google-cloud-bigtable/docs/batcher.rst diff --git a/packages/google-cloud-bigtable/docs/batcher.rst b/packages/google-cloud-bigtable/docs/batcher.rst deleted file mode 100644 index 9ac335be1841..000000000000 --- a/packages/google-cloud-bigtable/docs/batcher.rst +++ /dev/null @@ -1,6 +0,0 @@ -Mutations Batching -~~~~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.bigtable.batcher - :members: - :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index 73a32b03938f..33bf7bb7fd21 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -17,7 +17,6 @@ Using the API row-data row-filters row-set - batcher In the hierarchy of API concepts diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 6b06ec060870..3c23f44363ce 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -13,251 +13,104 @@ # limitations under the License. """User friendly container for Google Cloud Bigtable MutationBatcher.""" -import threading -import queue -import concurrent.futures -import atexit -from google.api_core.exceptions import from_grpc_status -from dataclasses import dataclass +FLUSH_COUNT = 1000 +MAX_MUTATIONS = 100000 +MAX_ROW_BYTES = 5242880 # 5MB -FLUSH_COUNT = 100 # after this many elements, send out the batch - -MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch - -MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size. - -MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations. - - -class MutationsBatchError(Exception): - """Error in the batch request""" - - def __init__(self, message, exc): - self.exc = exc - self.message = message - super().__init__(self.message) - - -class _MutationsBatchQueue(object): - """Private Threadsafe Queue to hold rows for batching.""" - - def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT): - """Specify the queue constraints""" - self._queue = queue.Queue() - self.total_mutation_count = 0 - self.total_size = 0 - self.max_mutation_bytes = max_mutation_bytes - self.flush_count = flush_count - - def get(self): - """Retrieve an item from the queue. Recalculate queue size.""" - row = self._queue.get() - mutation_size = row.get_mutations_size() - self.total_mutation_count -= len(row._get_mutations()) - self.total_size -= mutation_size - return row - - def put(self, item): - """Insert an item to the queue. Recalculate queue size.""" - - mutation_count = len(item._get_mutations()) - - self._queue.put(item) - - self.total_size += item.get_mutations_size() - self.total_mutation_count += mutation_count - - def full(self): - """Check if the queue is full.""" - if ( - self.total_mutation_count >= self.flush_count - or self.total_size >= self.max_mutation_bytes - ): - return True - return False - - def empty(self): - return self._queue.empty() - - -@dataclass -class _BatchInfo: - """Keeping track of size of a batch""" - - mutations_count: int = 0 - rows_count: int = 0 - mutations_size: int = 0 - - -class _FlowControl(object): - def __init__( - self, - max_mutations=MAX_OUTSTANDING_ELEMENTS, - max_mutation_bytes=MAX_OUTSTANDING_BYTES, - ): - """Control the inflight requests. Keep track of the mutations, row bytes and row counts. - As requests to backend are being made, adjust the number of mutations being processed. - - If threshold is reached, block the flow. - Reopen the flow as requests are finished. - """ - self.max_mutations = max_mutations - self.max_mutation_bytes = max_mutation_bytes - self.inflight_mutations = 0 - self.inflight_size = 0 - self.event = threading.Event() - self.event.set() - - def is_blocked(self): - """Returns True if: - - - inflight mutations >= max_mutations, or - - inflight bytes size >= max_mutation_bytes, or - """ - - return ( - self.inflight_mutations >= self.max_mutations - or self.inflight_size >= self.max_mutation_bytes - ) - - def control_flow(self, batch_info): - """ - Calculate the resources used by this batch - """ - - self.inflight_mutations += batch_info.mutations_count - self.inflight_size += batch_info.mutations_size - self.set_flow_control_status() - - def wait(self): - """ - Wait until flow control pushback has been released. - It awakens as soon as `event` is set. - """ - self.event.wait() - - def set_flow_control_status(self): - """Check the inflight mutations and size. - - If values exceed the allowed threshold, block the event. - """ - if self.is_blocked(): - self.event.clear() # sleep - else: - self.event.set() # awaken the threads - - def release(self, batch_info): - """ - Release the resources. - Decrement the row size to allow enqueued mutations to be run. - """ - self.inflight_mutations -= batch_info.mutations_count - self.inflight_size -= batch_info.mutations_size - self.set_flow_control_status() +class MaxMutationsError(ValueError): + """The number of mutations for bulk request is too big.""" class MutationsBatcher(object): """A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store :class:`DirectRow` in memory until one of the - size limits is reached, or an explicit call to :func:`flush()` is performed. When - a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud + is large or unknown. It will store DirectRows in memory until one of the + size limits is reached, or an explicit call to flush() is performed. When + a flush event occurs, the DirectRows in memory will be sent to Cloud Bigtable. Batching mutations is more efficient than sending individual request. This class is not suited for usage in systems where each mutation must be guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in + in-memory change. In a case of a system crash, any DirectRows remaining in memory will not necessarily be sent to the service, even after the - completion of the :func:`mutate()` method. + completion of the mutate() method. - Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads. + TODO: Performance would dramatically improve if this class had the + capability of asynchronous, parallel RPCs. :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. :type flush_count: int :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). :type max_row_bytes: int :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). - - :type flush_interval: float - :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. - Default is 1 second. + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). """ - def __init__( - self, - table, - flush_count=FLUSH_COUNT, - max_row_bytes=MAX_MUTATION_SIZE, - flush_interval=1, - ): - self._rows = _MutationsBatchQueue( - max_mutation_bytes=max_row_bytes, flush_count=flush_count - ) + def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + self.rows = [] + self.total_mutation_count = 0 + self.total_size = 0 self.table = table - self._executor = concurrent.futures.ThreadPoolExecutor() - atexit.register(self.close) - self._timer = threading.Timer(flush_interval, self.flush) - self._timer.start() - self.flow_control = _FlowControl( - max_mutations=MAX_OUTSTANDING_ELEMENTS, - max_mutation_bytes=MAX_OUTSTANDING_BYTES, - ) - self.futures_mapping = {} - self.exceptions = queue.Queue() - - @property - def flush_count(self): - return self._rows.flush_count - - @property - def max_row_bytes(self): - return self._rows.max_mutation_bytes - - def __enter__(self): - """Starting the MutationsBatcher as a context manager""" - return self + self.flush_count = flush_count + self.max_row_bytes = max_row_bytes def mutate(self, row): """Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent asynchronously. + limits, the batch is sent synchronously. For example: - .. literalinclude:: snippets_table.py + .. literalinclude:: snippets.py :start-after: [START bigtable_api_batcher_mutate] :end-before: [END bigtable_api_batcher_mutate] :dedent: 4 :type row: class - :param row: :class:`~google.cloud.bigtable.row.DirectRow`. + :param row: class:`~google.cloud.bigtable.row.DirectRow`. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried - """ - self._rows.put(row) + * :exc:`~.table._BigtableRetryableError` if any + row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't + match the number of rows that were retried + * :exc:`.batcher.MaxMutationsError` if any row exceeds max + mutations count. + """ + mutation_count = len(row._get_mutations()) + if mutation_count > MAX_MUTATIONS: + raise MaxMutationsError( + "The row key {} exceeds the number of mutations {}.".format( + row.row_key, mutation_count + ) + ) + + if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: + self.flush() + + self.rows.append(row) + self.total_mutation_count += mutation_count + self.total_size += row.get_mutations_size() - if self._rows.full(): - self._flush_async() + if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: + self.flush() def mutate_rows(self, rows): """Add multiple rows to the batch. If the current batch meets one of the size - limits, the batch is sent asynchronously. + limits, the batch is sent synchronously. For example: - .. literalinclude:: snippets_table.py + .. literalinclude:: snippets.py :start-after: [START bigtable_api_batcher_mutate_rows] :end-before: [END bigtable_api_batcher_mutate_rows] :dedent: 4 @@ -266,119 +119,28 @@ def mutate_rows(self, rows): :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + * :exc:`~.table._BigtableRetryableError` if any + row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't + match the number of rows that were retried + * :exc:`.batcher.MaxMutationsError` if any row exceeds max + mutations count. """ for row in rows: self.mutate(row) def flush(self): - """Sends the current batch to Cloud Bigtable synchronously. + """Sends the current. batch to Cloud Bigtable. For example: - .. literalinclude:: snippets_table.py + .. literalinclude:: snippets.py :start-after: [START bigtable_api_batcher_flush] :end-before: [END bigtable_api_batcher_flush] :dedent: 4 - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. - """ - rows_to_flush = [] - while not self._rows.empty(): - rows_to_flush.append(self._rows.get()) - response = self._flush_rows(rows_to_flush) - return response - - def _flush_async(self): - """Sends the current batch to Cloud Bigtable asynchronously. - - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. - """ - - rows_to_flush = [] - mutations_count = 0 - mutations_size = 0 - rows_count = 0 - batch_info = _BatchInfo() - - while not self._rows.empty(): - row = self._rows.get() - mutations_count += len(row._get_mutations()) - mutations_size += row.get_mutations_size() - rows_count += 1 - rows_to_flush.append(row) - batch_info.mutations_count = mutations_count - batch_info.rows_count = rows_count - batch_info.mutations_size = mutations_size - - if ( - rows_count >= self.flush_count - or mutations_size >= self.max_row_bytes - or mutations_count >= self.flow_control.max_mutations - or mutations_size >= self.flow_control.max_mutation_bytes - or self._rows.empty() # submit when it reached the end of the queue - ): - # wait for resources to become available, before submitting any new batch - self.flow_control.wait() - # once unblocked, submit a batch - # event flag will be set by control_flow to block subsequent thread, but not blocking this one - self.flow_control.control_flow(batch_info) - future = self._executor.submit(self._flush_rows, rows_to_flush) - self.futures_mapping[future] = batch_info - future.add_done_callback(self._batch_completed_callback) - - # reset and start a new batch - rows_to_flush = [] - mutations_size = 0 - rows_count = 0 - mutations_count = 0 - batch_info = _BatchInfo() - - def _batch_completed_callback(self, future): - """Callback for when the mutation has finished. - - Raise exceptions if there's any. - Release the resources locked by the flow control and allow enqueued tasks to be run. - """ - - processed_rows = self.futures_mapping[future] - self.flow_control.release(processed_rows) - del self.futures_mapping[future] - - def _flush_rows(self, rows_to_flush): - """Mutate the specified rows. - - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. - """ - responses = [] - if len(rows_to_flush) > 0: - response = self.table.mutate_rows(rows_to_flush) - - for result in response: - if result.code != 0: - exc = from_grpc_status(result.code, result.message) - self.exceptions.put(exc) - responses.append(result) - - return responses - - def __exit__(self, exc_type, exc_value, exc_traceback): - """Clean up resources. Flush and shutdown the ThreadPoolExecutor.""" - self.close() - - def close(self): - """Clean up resources. Flush and shutdown the ThreadPoolExecutor. - Any errors will be raised. - - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - self.flush() - self._executor.shutdown(wait=True) - atexit.unregister(self.close) - if self.exceptions.qsize() > 0: - exc = list(self.exceptions.queue) - raise MutationsBatchError("Errors in batch mutations.", exc=exc) + if len(self.rows) != 0: + self.table.mutate_rows(self.rows) + self.total_mutation_count = 0 + self.total_size = 0 + self.rows = [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index e3191a7297da..8605992baf45 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -32,7 +32,7 @@ from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow @@ -844,9 +844,7 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) - def mutations_batcher( - self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE - ): + def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): """Factory to create a mutation batcher associated with this instance. For example: diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py index a238b2852363..9ae6ed175624 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -14,118 +14,122 @@ import mock -import time - import pytest from google.cloud.bigtable.row import DirectRow -from google.cloud.bigtable.batcher import ( - _FlowControl, - MutationsBatcher, - MutationsBatchError, -) TABLE_ID = "table-id" TABLE_NAME = "/tables/" + TABLE_ID +def _make_mutation_batcher(table, **kw): + from google.cloud.bigtable.batcher import MutationsBatcher + + return MutationsBatcher(table, **kw) + + def test_mutation_batcher_constructor(): table = _Table(TABLE_NAME) - with MutationsBatcher(table) as mutation_batcher: - assert table is mutation_batcher.table + + mutation_batcher = _make_mutation_batcher(table) + assert table is mutation_batcher.table def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - mutation_batcher.mutate_rows(rows) + mutation_batcher.mutate_rows(rows) + mutation_batcher.flush() assert table.mutation_calls == 1 def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.mutate(row) + mutation_batcher.mutate(row) + + mutation_batcher.flush() assert table.mutation_calls == 1 def test_mutation_batcher_flush_w_no_rows(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: - mutation_batcher.flush() + mutation_batcher = _make_mutation_batcher(table=table) + mutation_batcher.flush() assert table.mutation_calls == 0 def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3) -def test_mutation_batcher_mutate_w_max_mutations(): +@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) +def test_mutation_batcher_mutate_with_max_mutations_failure(): + from google.cloud.bigtable.batcher import MaxMutationsError + table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) + with pytest.raises(MaxMutationsError): mutation_batcher.mutate(row) - assert table.mutation_calls == 1 - -def test_mutation_batcher_mutate_w_max_row_bytes(): +@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) - with MutationsBatcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", max_value) - row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - mutation_batcher.mutate(row) + mutation_batcher.mutate(row) + mutation_batcher.flush() assert table.mutation_calls == 1 -def test_mutations_batcher_flushed_when_closed(): +def test_mutation_batcher_mutate_w_max_row_bytes(): table = _Table(TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) + mutation_batcher = _make_mutation_batcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -133,108 +137,13 @@ def test_mutations_batcher_flushed_when_closed(): row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", max_value) row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) - assert table.mutation_calls == 0 - - mutation_batcher.close() - - assert table.mutation_calls == 1 - - -def test_mutations_batcher_context_manager_flushed_when_closed(): - table = _Table(TABLE_NAME) - with MutationsBatcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) as mutation_batcher: - - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", max_value) - row.set_cell("cf1", b"c2", max_value) - - mutation_batcher.mutate(row) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush") -def test_mutations_batcher_flush_interval(mocked_flush): - table = _Table(TABLE_NAME) - flush_interval = 0.5 - mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval) - - assert mutation_batcher._timer.interval == flush_interval - mocked_flush.assert_not_called() - - time.sleep(0.4) - mocked_flush.assert_not_called() - - time.sleep(0.1) - mocked_flush.assert_called_once_with() - - mutation_batcher.close() - - -def test_mutations_batcher_response_with_error_codes(): - from google.rpc.status_pb2 import Status - - mocked_response = [Status(code=1), Status(code=5)] - - with mock.patch("tests.unit.test_batcher._Table") as mocked_table: - table = mocked_table.return_value - mutation_batcher = MutationsBatcher(table=table) - - row1 = DirectRow(row_key=b"row_key") - row2 = DirectRow(row_key=b"row_key") - table.mutate_rows.return_value = mocked_response - - mutation_batcher.mutate_rows([row1, row2]) - with pytest.raises(MutationsBatchError) as exc: - mutation_batcher.close() - assert exc.value.message == "Errors in batch mutations." - assert len(exc.value.exc) == 2 - - assert exc.value.exc[0].message == mocked_response[0].message - assert exc.value.exc[1].message == mocked_response[1].message - - -def test_flow_control_event_is_set_when_not_blocked(): - flow_control = _FlowControl() - - flow_control.set_flow_control_status() - assert flow_control.event.is_set() - - -def test_flow_control_event_is_not_set_when_blocked(): - flow_control = _FlowControl() - - flow_control.inflight_mutations = flow_control.max_mutations - flow_control.inflight_size = flow_control.max_mutation_bytes - - flow_control.set_flow_control_status() - assert not flow_control.event.is_set() - - -@mock.patch("concurrent.futures.ThreadPoolExecutor.submit") -def test_flush_async_batch_count(mocked_executor_submit): - table = _Table(TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, flush_count=2) - - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes - for index in range(5): - row = DirectRow(row_key=f"row_key_{index}") - row.set_cell("cf1", b"c1", max_value) - mutation_batcher.mutate(row) - mutation_batcher._flush_async() - - # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch. - assert mocked_executor_submit.call_count == 3 - - class _Instance(object): def __init__(self, client=None): self._client = client @@ -247,8 +156,5 @@ def __init__(self, name, client=None): self.mutation_calls = 0 def mutate_rows(self, rows): - from google.rpc.status_pb2 import Status - self.mutation_calls += 1 - - return [Status(code=0) for _ in rows] + return rows From 7ccb2d30b663572f0d6c97eaf843960818ae4c2f Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Thu, 11 May 2023 15:59:32 -0400 Subject: [PATCH 721/892] Revert "fix: Revert "Feat: Threaded MutationsBatcher" (#773)" (#775) This reverts commit a767cff95d990994f85f5fd05cc10f952087b49d. --- .../google-cloud-bigtable/docs/batcher.rst | 6 + packages/google-cloud-bigtable/docs/usage.rst | 1 + .../google/cloud/bigtable/batcher.py | 366 +++++++++++++++--- .../google/cloud/bigtable/table.py | 6 +- .../tests/unit/test_batcher.py | 218 ++++++++--- 5 files changed, 469 insertions(+), 128 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/batcher.rst diff --git a/packages/google-cloud-bigtable/docs/batcher.rst b/packages/google-cloud-bigtable/docs/batcher.rst new file mode 100644 index 000000000000..9ac335be1841 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/batcher.rst @@ -0,0 +1,6 @@ +Mutations Batching +~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.batcher + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index 33bf7bb7fd21..73a32b03938f 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -17,6 +17,7 @@ Using the API row-data row-filters row-set + batcher In the hierarchy of API concepts diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 3c23f44363ce..6b06ec060870 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -13,104 +13,251 @@ # limitations under the License. """User friendly container for Google Cloud Bigtable MutationBatcher.""" +import threading +import queue +import concurrent.futures +import atexit -FLUSH_COUNT = 1000 -MAX_MUTATIONS = 100000 -MAX_ROW_BYTES = 5242880 # 5MB +from google.api_core.exceptions import from_grpc_status +from dataclasses import dataclass -class MaxMutationsError(ValueError): - """The number of mutations for bulk request is too big.""" +FLUSH_COUNT = 100 # after this many elements, send out the batch + +MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch + +MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size. + +MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations. + + +class MutationsBatchError(Exception): + """Error in the batch request""" + + def __init__(self, message, exc): + self.exc = exc + self.message = message + super().__init__(self.message) + + +class _MutationsBatchQueue(object): + """Private Threadsafe Queue to hold rows for batching.""" + + def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT): + """Specify the queue constraints""" + self._queue = queue.Queue() + self.total_mutation_count = 0 + self.total_size = 0 + self.max_mutation_bytes = max_mutation_bytes + self.flush_count = flush_count + + def get(self): + """Retrieve an item from the queue. Recalculate queue size.""" + row = self._queue.get() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + + def put(self, item): + """Insert an item to the queue. Recalculate queue size.""" + + mutation_count = len(item._get_mutations()) + + self._queue.put(item) + + self.total_size += item.get_mutations_size() + self.total_mutation_count += mutation_count + + def full(self): + """Check if the queue is full.""" + if ( + self.total_mutation_count >= self.flush_count + or self.total_size >= self.max_mutation_bytes + ): + return True + return False + + def empty(self): + return self._queue.empty() + + +@dataclass +class _BatchInfo: + """Keeping track of size of a batch""" + + mutations_count: int = 0 + rows_count: int = 0 + mutations_size: int = 0 + + +class _FlowControl(object): + def __init__( + self, + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ): + """Control the inflight requests. Keep track of the mutations, row bytes and row counts. + As requests to backend are being made, adjust the number of mutations being processed. + + If threshold is reached, block the flow. + Reopen the flow as requests are finished. + """ + self.max_mutations = max_mutations + self.max_mutation_bytes = max_mutation_bytes + self.inflight_mutations = 0 + self.inflight_size = 0 + self.event = threading.Event() + self.event.set() + + def is_blocked(self): + """Returns True if: + + - inflight mutations >= max_mutations, or + - inflight bytes size >= max_mutation_bytes, or + """ + + return ( + self.inflight_mutations >= self.max_mutations + or self.inflight_size >= self.max_mutation_bytes + ) + + def control_flow(self, batch_info): + """ + Calculate the resources used by this batch + """ + + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size + self.set_flow_control_status() + + def wait(self): + """ + Wait until flow control pushback has been released. + It awakens as soon as `event` is set. + """ + self.event.wait() + + def set_flow_control_status(self): + """Check the inflight mutations and size. + + If values exceed the allowed threshold, block the event. + """ + if self.is_blocked(): + self.event.clear() # sleep + else: + self.event.set() # awaken the threads + + def release(self, batch_info): + """ + Release the resources. + Decrement the row size to allow enqueued mutations to be run. + """ + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size + self.set_flow_control_status() class MutationsBatcher(object): """A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store DirectRows in memory until one of the - size limits is reached, or an explicit call to flush() is performed. When - a flush event occurs, the DirectRows in memory will be sent to Cloud + is large or unknown. It will store :class:`DirectRow` in memory until one of the + size limits is reached, or an explicit call to :func:`flush()` is performed. When + a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud Bigtable. Batching mutations is more efficient than sending individual request. This class is not suited for usage in systems where each mutation must be guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any DirectRows remaining in + in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in memory will not necessarily be sent to the service, even after the - completion of the mutate() method. + completion of the :func:`mutate()` method. - TODO: Performance would dramatically improve if this class had the - capability of asynchronous, parallel RPCs. + Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads. :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. :type flush_count: int :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). :type max_row_bytes: int :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). + + :type flush_interval: float + :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. + Default is 1 second. """ - def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): - self.rows = [] - self.total_mutation_count = 0 - self.total_size = 0 + def __init__( + self, + table, + flush_count=FLUSH_COUNT, + max_row_bytes=MAX_MUTATION_SIZE, + flush_interval=1, + ): + self._rows = _MutationsBatchQueue( + max_mutation_bytes=max_row_bytes, flush_count=flush_count + ) self.table = table - self.flush_count = flush_count - self.max_row_bytes = max_row_bytes + self._executor = concurrent.futures.ThreadPoolExecutor() + atexit.register(self.close) + self._timer = threading.Timer(flush_interval, self.flush) + self._timer.start() + self.flow_control = _FlowControl( + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ) + self.futures_mapping = {} + self.exceptions = queue.Queue() + + @property + def flush_count(self): + return self._rows.flush_count + + @property + def max_row_bytes(self): + return self._rows.max_mutation_bytes + + def __enter__(self): + """Starting the MutationsBatcher as a context manager""" + return self def mutate(self, row): """Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate] :end-before: [END bigtable_api_batcher_mutate] :dedent: 4 :type row: class - :param row: class:`~google.cloud.bigtable.row.DirectRow`. + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - mutation_count = len(row._get_mutations()) - if mutation_count > MAX_MUTATIONS: - raise MaxMutationsError( - "The row key {} exceeds the number of mutations {}.".format( - row.row_key, mutation_count - ) - ) - - if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: - self.flush() - - self.rows.append(row) - self.total_mutation_count += mutation_count - self.total_size += row.get_mutations_size() + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + """ + self._rows.put(row) - if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: - self.flush() + if self._rows.full(): + self._flush_async() def mutate_rows(self, rows): """Add multiple rows to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate_rows] :end-before: [END bigtable_api_batcher_mutate_rows] :dedent: 4 @@ -119,28 +266,119 @@ def mutate_rows(self, rows): :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried """ for row in rows: self.mutate(row) def flush(self): - """Sends the current. batch to Cloud Bigtable. + """Sends the current batch to Cloud Bigtable synchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_flush] :end-before: [END bigtable_api_batcher_flush] :dedent: 4 + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + rows_to_flush = [] + while not self._rows.empty(): + rows_to_flush.append(self._rows.get()) + response = self._flush_rows(rows_to_flush) + return response + + def _flush_async(self): + """Sends the current batch to Cloud Bigtable asynchronously. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + + rows_to_flush = [] + mutations_count = 0 + mutations_size = 0 + rows_count = 0 + batch_info = _BatchInfo() + + while not self._rows.empty(): + row = self._rows.get() + mutations_count += len(row._get_mutations()) + mutations_size += row.get_mutations_size() + rows_count += 1 + rows_to_flush.append(row) + batch_info.mutations_count = mutations_count + batch_info.rows_count = rows_count + batch_info.mutations_size = mutations_size + + if ( + rows_count >= self.flush_count + or mutations_size >= self.max_row_bytes + or mutations_count >= self.flow_control.max_mutations + or mutations_size >= self.flow_control.max_mutation_bytes + or self._rows.empty() # submit when it reached the end of the queue + ): + # wait for resources to become available, before submitting any new batch + self.flow_control.wait() + # once unblocked, submit a batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) + + # reset and start a new batch + rows_to_flush = [] + mutations_size = 0 + rows_count = 0 + mutations_count = 0 + batch_info = _BatchInfo() + + def _batch_completed_callback(self, future): + """Callback for when the mutation has finished. + + Raise exceptions if there's any. + Release the resources locked by the flow control and allow enqueued tasks to be run. + """ + + processed_rows = self.futures_mapping[future] + self.flow_control.release(processed_rows) + del self.futures_mapping[future] + + def _flush_rows(self, rows_to_flush): + """Mutate the specified rows. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + responses = [] + if len(rows_to_flush) > 0: + response = self.table.mutate_rows(rows_to_flush) + + for result in response: + if result.code != 0: + exc = from_grpc_status(result.code, result.message) + self.exceptions.put(exc) + responses.append(result) + + return responses + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor.""" + self.close() + + def close(self): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor. + Any errors will be raised. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - if len(self.rows) != 0: - self.table.mutate_rows(self.rows) - self.total_mutation_count = 0 - self.total_size = 0 - self.rows = [] + self.flush() + self._executor.shutdown(wait=True) + atexit.unregister(self.close) + if self.exceptions.qsize() > 0: + exc = list(self.exceptions.queue) + raise MutationsBatchError("Errors in batch mutations.", exc=exc) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 8605992baf45..e3191a7297da 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -32,7 +32,7 @@ from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow @@ -844,7 +844,9 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) - def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + def mutations_batcher( + self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE + ): """Factory to create a mutation batcher associated with this instance. For example: diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py index 9ae6ed175624..a238b2852363 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -14,122 +14,118 @@ import mock +import time + import pytest from google.cloud.bigtable.row import DirectRow +from google.cloud.bigtable.batcher import ( + _FlowControl, + MutationsBatcher, + MutationsBatchError, +) TABLE_ID = "table-id" TABLE_NAME = "/tables/" + TABLE_ID -def _make_mutation_batcher(table, **kw): - from google.cloud.bigtable.batcher import MutationsBatcher - - return MutationsBatcher(table, **kw) - - def test_mutation_batcher_constructor(): table = _Table(TABLE_NAME) - - mutation_batcher = _make_mutation_batcher(table) - assert table is mutation_batcher.table + with MutationsBatcher(table) as mutation_batcher: + assert table is mutation_batcher.table def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - mutation_batcher.mutate_rows(rows) - mutation_batcher.flush() + mutation_batcher.mutate_rows(rows) assert table.mutation_calls == 1 def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) - - mutation_batcher.mutate(row) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.flush() + mutation_batcher.mutate(row) assert table.mutation_calls == 1 def test_mutation_batcher_flush_w_no_rows(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) - mutation_batcher.flush() + with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher.flush() assert table.mutation_calls == 0 def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) + with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_with_max_mutations_failure(): - from google.cloud.bigtable.batcher import MaxMutationsError - +@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - with pytest.raises(MaxMutationsError): mutation_batcher.mutate(row) + assert table.mutation_calls == 1 + -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_w_max_mutations(): +def test_mutation_batcher_mutate_w_max_row_bytes(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes - mutation_batcher.mutate(row) - mutation_batcher.flush() + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 -def test_mutation_batcher_mutate_w_max_row_bytes(): +def test_mutations_batcher_flushed_when_closed(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) + mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -137,13 +133,108 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", max_value) row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) + assert table.mutation_calls == 0 + + mutation_batcher.close() + + assert table.mutation_calls == 1 + + +def test_mutations_batcher_context_manager_flushed_when_closed(): + table = _Table(TABLE_NAME) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 +@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush") +def test_mutations_batcher_flush_interval(mocked_flush): + table = _Table(TABLE_NAME) + flush_interval = 0.5 + mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval) + + assert mutation_batcher._timer.interval == flush_interval + mocked_flush.assert_not_called() + + time.sleep(0.4) + mocked_flush.assert_not_called() + + time.sleep(0.1) + mocked_flush.assert_called_once_with() + + mutation_batcher.close() + + +def test_mutations_batcher_response_with_error_codes(): + from google.rpc.status_pb2 import Status + + mocked_response = [Status(code=1), Status(code=5)] + + with mock.patch("tests.unit.test_batcher._Table") as mocked_table: + table = mocked_table.return_value + mutation_batcher = MutationsBatcher(table=table) + + row1 = DirectRow(row_key=b"row_key") + row2 = DirectRow(row_key=b"row_key") + table.mutate_rows.return_value = mocked_response + + mutation_batcher.mutate_rows([row1, row2]) + with pytest.raises(MutationsBatchError) as exc: + mutation_batcher.close() + assert exc.value.message == "Errors in batch mutations." + assert len(exc.value.exc) == 2 + + assert exc.value.exc[0].message == mocked_response[0].message + assert exc.value.exc[1].message == mocked_response[1].message + + +def test_flow_control_event_is_set_when_not_blocked(): + flow_control = _FlowControl() + + flow_control.set_flow_control_status() + assert flow_control.event.is_set() + + +def test_flow_control_event_is_not_set_when_blocked(): + flow_control = _FlowControl() + + flow_control.inflight_mutations = flow_control.max_mutations + flow_control.inflight_size = flow_control.max_mutation_bytes + + flow_control.set_flow_control_status() + assert not flow_control.event.is_set() + + +@mock.patch("concurrent.futures.ThreadPoolExecutor.submit") +def test_flush_async_batch_count(mocked_executor_submit): + table = _Table(TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table, flush_count=2) + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + for index in range(5): + row = DirectRow(row_key=f"row_key_{index}") + row.set_cell("cf1", b"c1", max_value) + mutation_batcher.mutate(row) + mutation_batcher._flush_async() + + # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch. + assert mocked_executor_submit.call_count == 3 + + class _Instance(object): def __init__(self, client=None): self._client = client @@ -156,5 +247,8 @@ def __init__(self, name, client=None): self.mutation_calls = 0 def mutate_rows(self, rows): + from google.rpc.status_pb2 import Status + self.mutation_calls += 1 - return rows + + return [Status(code=0) for _ in rows] From f2c97b864d0d72f541f2d770ba78f4b360931589 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 16 May 2023 11:56:46 +0200 Subject: [PATCH 722/892] chore(main): release 2.18.1 (#774) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index a627e662e002..e7a7a136bea6 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.18.0" + ".": "2.18.1" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 2d7fe51413be..d56f02896f00 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.18.1](https://github.com/googleapis/python-bigtable/compare/v2.18.0...v2.18.1) (2023-05-11) + + +### Bug Fixes + +* Revert "Feat: Threaded MutationsBatcher" ([#773](https://github.com/googleapis/python-bigtable/issues/773)) ([a767cff](https://github.com/googleapis/python-bigtable/commit/a767cff95d990994f85f5fd05cc10f952087b49d)) + ## [2.18.0](https://github.com/googleapis/python-bigtable/compare/v2.17.0...v2.18.0) (2023-05-10) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index f09943f6bdf7..e1b4da1deb3e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index f09943f6bdf7..e1b4da1deb3e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index f09943f6bdf7..e1b4da1deb3e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index f09943f6bdf7..e1b4da1deb3e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} From 530f8ec5d60a6f20d35427bf02dcc8cf11521c97 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 10:31:36 -0400 Subject: [PATCH 723/892] feat: add ChangeStreamConfig to CreateTable and UpdateTable (#786) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add ChangeStreamConfig to CreateTable and UpdateTable PiperOrigin-RevId: 534836567 Source-Link: https://github.com/googleapis/googleapis/commit/eb2d1f1555df526abd00aa475e8fd5d014af6489 Source-Link: https://github.com/googleapis/googleapis-gen/commit/64cebcfc2765bff5afb19c140d4b1600dfdaebad Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjRjZWJjZmMyNzY1YmZmNWFmYjE5YzE0MGQ0YjE2MDBkZmRhZWJhZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin/__init__.py | 2 ++ .../cloud/bigtable_admin_v2/__init__.py | 2 ++ .../bigtable_table_admin/async_client.py | 13 +++++--- .../services/bigtable_table_admin/client.py | 13 +++++--- .../cloud/bigtable_admin_v2/types/__init__.py | 2 ++ .../types/bigtable_table_admin.py | 20 +++++++----- .../cloud/bigtable_admin_v2/types/table.py | 31 +++++++++++++++++++ .../test_bigtable_table_admin.py | 2 ++ 8 files changed, 67 insertions(+), 18 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index 6ddc6acb297b..0ba93ec63679 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -200,6 +200,7 @@ from google.cloud.bigtable_admin_v2.types.instance import Instance from google.cloud.bigtable_admin_v2.types.table import Backup from google.cloud.bigtable_admin_v2.types.table import BackupInfo +from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig from google.cloud.bigtable_admin_v2.types.table import ColumnFamily from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo from google.cloud.bigtable_admin_v2.types.table import GcRule @@ -282,6 +283,7 @@ "Instance", "Backup", "BackupInfo", + "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 282834fe7af4..c030ec1bdb7d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -92,6 +92,7 @@ from .types.instance import Instance from .types.table import Backup from .types.table import BackupInfo +from .types.table import ChangeStreamConfig from .types.table import ColumnFamily from .types.table import EncryptionInfo from .types.table import GcRule @@ -110,6 +111,7 @@ "BackupInfo", "BigtableInstanceAdminClient", "BigtableTableAdminClient", + "ChangeStreamConfig", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 91f059f8b41d..1663c16eb8c3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -683,16 +683,19 @@ async def update_table( should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The list of fields to update. A mask - specifying which fields (e.g. ``deletion_protection``) + specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The wildcard (*) path is currently not supported. Currently UpdateTable is only supported for - the following field: + the following fields: - - ``deletion_protection`` If ``column_families`` is set - in ``update_mask``, it will return an UNIMPLEMENTED - error. + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index efceae90a76d..e043aa224b95 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -992,16 +992,19 @@ def update_table( should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The list of fields to update. A mask - specifying which fields (e.g. ``deletion_protection``) + specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The wildcard (*) path is currently not supported. Currently UpdateTable is only supported for - the following field: + the following fields: - - ``deletion_protection`` If ``column_families`` is set - in ``update_mask``, it will return an UNIMPLEMENTED - error. + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 5a66ddf09760..69153c9fc3b8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -91,6 +91,7 @@ from .table import ( Backup, BackupInfo, + ChangeStreamConfig, ColumnFamily, EncryptionInfo, GcRule, @@ -170,6 +171,7 @@ "Instance", "Backup", "BackupInfo", + "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 9b236fea96b1..4c4b9e9e2197 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -460,14 +460,18 @@ class UpdateTableRequest(proto.Message): used to identify the table to update. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The list of fields to update. A mask specifying - which fields (e.g. ``deletion_protection``) in the ``table`` - field should be updated. This mask is relative to the - ``table`` field, not to the request message. The wildcard - (*) path is currently not supported. Currently UpdateTable - is only supported for the following field: - - - ``deletion_protection`` If ``column_families`` is set in - ``update_mask``, it will return an UNIMPLEMENTED error. + which fields (e.g. ``change_stream_config``) in the + ``table`` field should be updated. This mask is relative to + the ``table`` field, not to the request message. The + wildcard (*) path is currently not supported. Currently + UpdateTable is only supported for the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it will + return an UNIMPLEMENTED error. """ table: gba_table.Table = proto.Field( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index fd936df63e00..16d136e168d2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -29,6 +29,7 @@ manifest={ "RestoreSourceType", "RestoreInfo", + "ChangeStreamConfig", "Table", "ColumnFamily", "GcRule", @@ -82,6 +83,27 @@ class RestoreInfo(proto.Message): ) +class ChangeStreamConfig(proto.Message): + r"""Change stream configuration. + + Attributes: + retention_period (google.protobuf.duration_pb2.Duration): + How long the change stream should be + retained. Change stream data older than the + retention period will not be returned when + reading the change stream from the table. + Values must be at least 1 day and at most 7 + days, and will be truncated to microsecond + granularity. + """ + + retention_period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + + class Table(proto.Message): r"""A collection of user data indexed by row, column, and timestamp. Each table is served using the resources of its @@ -114,6 +136,10 @@ class Table(proto.Message): another data source (e.g. a backup), this field will be populated with information about the restore. + change_stream_config (google.cloud.bigtable_admin_v2.types.ChangeStreamConfig): + If specified, enable the change stream on + this table. Otherwise, the change stream is + disabled and the change stream is not retained. deletion_protection (bool): Set to true to make the table protected against data loss. i.e. deleting the following @@ -263,6 +289,11 @@ class ReplicationState(proto.Enum): number=6, message="RestoreInfo", ) + change_stream_config: "ChangeStreamConfig" = proto.Field( + proto.MESSAGE, + number=8, + message="ChangeStreamConfig", + ) deletion_protection: bool = proto.Field( proto.BOOL, number=9, diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 8e4004ab169a..8498e4fa563f 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -8202,6 +8202,7 @@ def test_update_table_rest(request_type): "source_table": "source_table_value", }, }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } request = request_type(**request_init) @@ -8399,6 +8400,7 @@ def test_update_table_rest_bad_request( "source_table": "source_table_value", }, }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } request = request_type(**request_init) From dc7ce5049f4240dd43e5392905efbd9155d9bbf2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 12:38:11 -0400 Subject: [PATCH 724/892] build(deps): bump requests from 2.28.1 to 2.31.0 in /synthtool/gcp/templates/python_library/.kokoro (#790) Source-Link: https://github.com/googleapis/synthtool/commit/30bd01b4ab78bf1b2a425816e15b3e7e090993dd Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:9bc5fa3b62b091f60614c08a7fb4fd1d3e1678e326f34dd66ce1eefb5dc3267b Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 3 ++- packages/google-cloud-bigtable/.kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index b8edda51cf46..32b3c486591a 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2e247c7bf5154df7f98cce087a20ca7605e236340c7d6d1a14447e5c06791bd6 + digest: sha256:9bc5fa3b62b091f60614c08a7fb4fd1d3e1678e326f34dd66ce1eefb5dc3267b +# created: 2023-05-25T14:56:16.294623272Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 66a2172a76a8..3b8d7ee81848 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -419,9 +419,9 @@ readme-renderer==37.3 \ --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 # via twine -requests==2.28.1 \ - --hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \ - --hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349 +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 # via # gcp-releasetool # google-api-core From d9c52b7fa8fd39eb86986a8b1ddb36fb97b7d94a Mon Sep 17 00:00:00 2001 From: Sita Lakshmi Sangameswaran Date: Thu, 25 May 2023 22:29:46 +0530 Subject: [PATCH 725/892] docs(samples): add region tags (#788) * docs(samples): add read table snippet * remove snippet as it already exists --- packages/google-cloud-bigtable/samples/hello/main.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/samples/hello/main.py b/packages/google-cloud-bigtable/samples/hello/main.py index 7b2b1764a7ab..5e47b4a38bdb 100644 --- a/packages/google-cloud-bigtable/samples/hello/main.py +++ b/packages/google-cloud-bigtable/samples/hello/main.py @@ -87,26 +87,30 @@ def main(project_id, instance_id, table_id): # [START bigtable_hw_create_filter] # Create a filter to only retrieve the most recent version of the cell - # for each column accross entire row. + # for each column across entire row. row_filter = row_filters.CellsColumnLimitFilter(1) # [END bigtable_hw_create_filter] # [START bigtable_hw_get_with_filter] + # [START bigtable_hw_get_by_key] print("Getting a single greeting by row key.") key = "greeting0".encode() row = table.read_row(key, row_filter) cell = row.cells[column_family_id][column][0] print(cell.value.decode("utf-8")) + # [END bigtable_hw_get_by_key] # [END bigtable_hw_get_with_filter] # [START bigtable_hw_scan_with_filter] + # [START bigtable_hw_scan_all] print("Scanning for all greetings:") partial_rows = table.read_rows(filter_=row_filter) for row in partial_rows: cell = row.cells[column_family_id][column][0] print(cell.value.decode("utf-8")) + # [END bigtable_hw_scan_all] # [END bigtable_hw_scan_with_filter] # [START bigtable_hw_delete_table] From 50d2ab3b11b30f7a303ffbd0165123de6fb83e66 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 3 Jun 2023 18:32:05 -0400 Subject: [PATCH 726/892] build(deps): bump cryptography from 39.0.1 to 41.0.0 in /synthtool/gcp/templates/python_library/.kokoro (#793) Source-Link: https://github.com/googleapis/synthtool/commit/d0f51a0c2a9a6bcca86911eabea9e484baadf64b Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:240b5bcc2bafd450912d2da2be15e62bc6de2cf839823ae4bf94d4f392b451dc Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/requirements.txt | 42 +++++++++---------- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 32b3c486591a..02a4dedced74 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:9bc5fa3b62b091f60614c08a7fb4fd1d3e1678e326f34dd66ce1eefb5dc3267b -# created: 2023-05-25T14:56:16.294623272Z + digest: sha256:240b5bcc2bafd450912d2da2be15e62bc6de2cf839823ae4bf94d4f392b451dc +# created: 2023-06-03T21:25:37.968717478Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 3b8d7ee81848..c7929db6d152 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -113,28 +113,26 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==39.0.1 \ - --hash=sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4 \ - --hash=sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f \ - --hash=sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502 \ - --hash=sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41 \ - --hash=sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965 \ - --hash=sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e \ - --hash=sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc \ - --hash=sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad \ - --hash=sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505 \ - --hash=sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388 \ - --hash=sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6 \ - --hash=sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2 \ - --hash=sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac \ - --hash=sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695 \ - --hash=sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6 \ - --hash=sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336 \ - --hash=sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0 \ - --hash=sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c \ - --hash=sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106 \ - --hash=sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a \ - --hash=sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8 +cryptography==41.0.0 \ + --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \ + --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \ + --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \ + --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \ + --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \ + --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \ + --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \ + --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \ + --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \ + --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \ + --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \ + --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \ + --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \ + --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \ + --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \ + --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \ + --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \ + --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \ + --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be # via # gcp-releasetool # secretstorage From bee44c763f3d0ef18768b32c5d23e7b3696946dd Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Thu, 8 Jun 2023 16:36:43 -0400 Subject: [PATCH 727/892] fix: add a callback function on flush_rows (#796) * fix: add a callback function on flush_rows * reformat * address comments * update doc * update names * add a test --- .../google/cloud/bigtable/batcher.py | 13 +++++++++++- .../tests/unit/test_batcher.py | 21 +++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 6b06ec060870..a6eb806e92ba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -192,6 +192,11 @@ class MutationsBatcher(object): :type flush_interval: float :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. Default is 1 second. + + :type batch_completed_callback: Callable[list:[`~google.rpc.status_pb2.Status`]] = None + :param batch_completed_callback: (Optional) A callable for handling responses + after the current batch is sent. The callable function expect a list of grpc + Status. """ def __init__( @@ -200,6 +205,7 @@ def __init__( flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE, flush_interval=1, + batch_completed_callback=None, ): self._rows = _MutationsBatchQueue( max_mutation_bytes=max_row_bytes, flush_count=flush_count @@ -215,6 +221,7 @@ def __init__( ) self.futures_mapping = {} self.exceptions = queue.Queue() + self._user_batch_completed_callback = batch_completed_callback @property def flush_count(self): @@ -337,7 +344,8 @@ def _flush_async(self): batch_info = _BatchInfo() def _batch_completed_callback(self, future): - """Callback for when the mutation has finished. + """Callback for when the mutation has finished to clean up the current batch + and release items from the flow controller. Raise exceptions if there's any. Release the resources locked by the flow control and allow enqueued tasks to be run. @@ -357,6 +365,9 @@ def _flush_rows(self, rows_to_flush): if len(rows_to_flush) > 0: response = self.table.mutate_rows(rows_to_flush) + if self._user_batch_completed_callback: + self._user_batch_completed_callback(response) + for result in response: if result.code != 0: exc = from_grpc_status(result.code, result.message) diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py index a238b2852363..9987481415e6 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -35,6 +35,27 @@ def test_mutation_batcher_constructor(): assert table is mutation_batcher.table +def test_mutation_batcher_w_user_callback(): + table = _Table(TABLE_NAME) + + def callback_fn(response): + callback_fn.count = len(response) + + with MutationsBatcher( + table, flush_count=1, batch_completed_callback=callback_fn + ) as mutation_batcher: + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] + + mutation_batcher.mutate_rows(rows) + + assert callback_fn.count == 4 + + def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: From 31684d67866061faa3749415b05256e6c7c87570 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 18:02:10 -0400 Subject: [PATCH 728/892] chore(main): release 2.19.0 (#789) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 17 +++++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 22 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index e7a7a136bea6..b7f666a684a7 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.18.1" + ".": "2.19.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index d56f02896f00..dc80386a4f9a 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,23 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.19.0](https://github.com/googleapis/python-bigtable/compare/v2.18.1...v2.19.0) (2023-06-08) + + +### Features + +* Add ChangeStreamConfig to CreateTable and UpdateTable ([#786](https://github.com/googleapis/python-bigtable/issues/786)) ([cef70f2](https://github.com/googleapis/python-bigtable/commit/cef70f243541820225f86a520e0b2abd3a7354f7)) + + +### Bug Fixes + +* Add a callback function on flush_rows ([#796](https://github.com/googleapis/python-bigtable/issues/796)) ([589aa5d](https://github.com/googleapis/python-bigtable/commit/589aa5d04f6b5a2bd310d0bf06aeb7058fb6fcd2)) + + +### Documentation + +* **samples:** Add region tags ([#788](https://github.com/googleapis/python-bigtable/issues/788)) ([ecf539c](https://github.com/googleapis/python-bigtable/commit/ecf539c4c976fd9e5505b8abf0b697b218f09fef)) + ## [2.18.1](https://github.com/googleapis/python-bigtable/compare/v2.18.0...v2.18.1) (2023-05-11) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index e1b4da1deb3e..0f1a446f3802 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index e1b4da1deb3e..0f1a446f3802 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index e1b4da1deb3e..0f1a446f3802 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index e1b4da1deb3e..0f1a446f3802 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} From f8da71b7ec12cc86b1720bc9b4df05eb09111df0 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 03:44:12 -0400 Subject: [PATCH 729/892] chore: remove pinned Sphinx version [autoapprove] (#818) Source-Link: https://github.com/googleapis/synthtool/commit/909573ce9da2819eeb835909c795d29aea5c724e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/noxfile.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 02a4dedced74..1b3cb6c52663 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:240b5bcc2bafd450912d2da2be15e62bc6de2cf839823ae4bf94d4f392b451dc -# created: 2023-06-03T21:25:37.968717478Z + digest: sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b +# created: 2023-06-27T13:04:21.96690344Z diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 18f489e1901a..a33d64638804 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -345,10 +345,9 @@ def docfx(session): session.install("-e", ".") session.install( - "sphinx==4.0.1", + "gcp-sphinx-docfx-yaml", "alabaster", "recommonmark", - "gcp-sphinx-docfx-yaml", ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) From 7ba0a83e006097ef03dab63b1275a3b2215a6d6e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 29 Jun 2023 12:26:20 -0400 Subject: [PATCH 730/892] chore: store artifacts in placer (#822) Source-Link: https://github.com/googleapis/synthtool/commit/cb960373d12d20f8dc38beee2bf884d49627165e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:2d816f26f728ac8b24248741e7d4c461c09764ef9f7be3684d557c9632e46dbd Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- .../google-cloud-bigtable/.kokoro/release/common.cfg | 9 +++++++++ packages/google-cloud-bigtable/noxfile.py | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 1b3cb6c52663..98994f474104 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b -# created: 2023-06-27T13:04:21.96690344Z + digest: sha256:2d816f26f728ac8b24248741e7d4c461c09764ef9f7be3684d557c9632e46dbd +# created: 2023-06-28T17:03:33.371210701Z diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg index 8477e4ca6dd5..2a8fd970c2af 100644 --- a/packages/google-cloud-bigtable/.kokoro/release/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/release/common.cfg @@ -38,3 +38,12 @@ env_vars: { key: "SECRET_MANAGER_KEYS" value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" } + +# Store the packages we uploaded to PyPI. That way, we have a record of exactly +# what we published, which we can use to generate SBOMs and attestations. +action { + define_artifacts { + regex: "github/python-bigtable/**/*.tar.gz" + strip_prefix: "github/python-bigtable" + } +} diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index a33d64638804..fe9f07b4f6a1 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -419,6 +419,7 @@ def prerelease_deps(session): "grpcio!=1.52.0rc1", "grpcio-status", "google-api-core", + "google-auth", "proto-plus", "google-cloud-testutils", # dependencies of google-cloud-testutils" @@ -431,7 +432,6 @@ def prerelease_deps(session): # Remaining dependencies other_deps = [ "requests", - "google-auth", ] session.install(*other_deps) From 6e83b063e46c458e14ecb83eb7412369feb0827e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 08:37:40 -0400 Subject: [PATCH 731/892] feat: Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days (#817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add experimental reverse scan for public preview PiperOrigin-RevId: 543539118 Source-Link: https://github.com/googleapis/googleapis/commit/ae187063e3d8a43d85edb9b3084413d568ce7945 Source-Link: https://github.com/googleapis/googleapis-gen/commit/5d05516f84e53aaba63a4b8767ff955ac5bb4a87 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNWQwNTUxNmY4NGU1M2FhYmE2M2E0Yjg3NjdmZjk1NWFjNWJiNGE4NyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days PiperOrigin-RevId: 544356969 Source-Link: https://github.com/googleapis/googleapis/commit/c35889a0b917e22e26c53acafa5c27102a51d623 Source-Link: https://github.com/googleapis/googleapis-gen/commit/c00326ec78565b5d16f92c845ff0bb18f11ca05d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzAwMzI2ZWM3ODU2NWI1ZDE2ZjkyYzg0NWZmMGJiMThmMTFjYTA1ZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin_v2/types/table.py | 2 +- .../google/cloud/bigtable_v2/types/bigtable.py | 12 ++++++++++++ .../google/cloud/bigtable_v2/types/feature_flags.py | 9 +++++++++ .../scripts/fixup_bigtable_v2_keywords.py | 2 +- 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 16d136e168d2..c7925460d19c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -590,7 +590,7 @@ class Backup(proto.Message): expire_time (google.protobuf.timestamp_pb2.Timestamp): Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and - at most 30 days from the time the request is received. Once + at most 90 days from the time the request is received. Once the ``expire_time`` has passed, Cloud Bigtable will delete the backup and free the resources used by the backup. start_time (google.protobuf.timestamp_pb2.Timestamp): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 13f6ac0db3f7..c47e79d775de 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -80,6 +80,14 @@ class ReadRowsRequest(proto.Message): request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView): The view into RequestStats, as described above. + reversed_ (bool): + Experimental API - Please note that this API is currently + experimental and can change in the future. + + Return rows in lexiographical descending order of the row + keys. The row contents will not be affected by this flag. + Example result set: [ {key: "k2", "f:col1": "v1", "f:col2": + "v1"}, {key: "k1", "f:col1": "v2", "f:col2": "v2"} ]. """ class RequestStatsView(proto.Enum): @@ -131,6 +139,10 @@ class RequestStatsView(proto.Enum): number=6, enum=RequestStatsView, ) + reversed_: bool = proto.Field( + proto.BOOL, + number=7, + ) class ReadRowsResponse(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index 1b5f76e2410c..073269fbbd91 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -39,12 +39,21 @@ class FeatureFlags(proto.Message): endusers directly. Attributes: + reverse_scans (bool): + Notify the server that the client supports + reverse scans. The server will reject + ReadRowsRequests with the reverse bit set when + this is absent. mutate_rows_rate_limit (bool): Notify the server that the client enables batch write flow control by requesting RateLimitInfo from MutateRowsResponse. """ + reverse_scans: bool = proto.Field( + proto.BOOL, + number=1, + ) mutate_rows_rate_limit: bool = proto.Field( proto.BOOL, number=3, diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 11ffed53fb19..3a833f5919e1 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -46,7 +46,7 @@ class bigtableCallTransformer(cst.CSTTransformer): 'ping_and_warm': ('name', 'app_profile_id', ), 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed_', ), 'sample_row_keys': ('table_name', 'app_profile_id', ), } From a288db488babb1f4a31ecb4bbc96e90ae0f7ae75 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 15:21:01 -0400 Subject: [PATCH 732/892] fix: Add async context manager return types (#828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: Add async context manager return types chore: Mock return_value should not populate oneof message fields chore: Support snippet generation for services that only support REST transport chore: Update gapic-generator-python to v1.11.0 PiperOrigin-RevId: 545430278 Source-Link: https://github.com/googleapis/googleapis/commit/601b5326107eeb74800b426d1f9933faa233258a Source-Link: https://github.com/googleapis/googleapis-gen/commit/b3f18d0f6560a855022fd058865e7620479d7af9 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjNmMThkMGY2NTYwYTg1NTAyMmZkMDU4ODY1ZTc2MjA0NzlkN2FmOSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 2 +- .../bigtable_table_admin/async_client.py | 2 +- .../services/bigtable/async_client.py | 2 +- .../cloud/bigtable_v2/types/bigtable.py | 4 +-- .../scripts/fixup_bigtable_v2_keywords.py | 2 +- .../test_bigtable_instance_admin.py | 34 +++++-------------- .../test_bigtable_table_admin.py | 18 ++++++---- .../unit/gapic/bigtable_v2/test_bigtable.py | 6 +--- 8 files changed, 27 insertions(+), 43 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 12811bceae15..af4ceaf028ea 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -2355,7 +2355,7 @@ async def list_hot_tablets( # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 1663c16eb8c3..995b8d150376 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2671,7 +2671,7 @@ async def test_iam_permissions( # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "BigtableTableAdminAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index abd82d4d8fbb..06e5852a768f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1210,7 +1210,7 @@ def read_change_stream( # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "BigtableAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index c47e79d775de..742606553551 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -80,7 +80,7 @@ class ReadRowsRequest(proto.Message): request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView): The view into RequestStats, as described above. - reversed_ (bool): + reversed (bool): Experimental API - Please note that this API is currently experimental and can change in the future. @@ -139,7 +139,7 @@ class RequestStatsView(proto.Enum): number=6, enum=RequestStatsView, ) - reversed_: bool = proto.Field( + reversed: bool = proto.Field( proto.BOOL, number=7, ) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 3a833f5919e1..dcfe1ab1cb3f 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -46,7 +46,7 @@ class bigtableCallTransformer(cst.CSTTransformer): 'ping_and_warm': ('name', 'app_profile_id', ), 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed_', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), 'sample_row_keys': ('table_name', 'app_profile_id', ), } diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 76715f1ed73a..2cc636bbd4f8 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -2425,11 +2425,6 @@ def test_get_cluster(request_type, transport: str = "grpc"): state=instance.Cluster.State.READY, serve_nodes=1181, default_storage_type=common.StorageType.SSD, - cluster_config=instance.Cluster.ClusterConfig( - cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig( - autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600) - ) - ), ) response = client.get_cluster(request) @@ -3529,9 +3524,6 @@ def test_create_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) response = client.create_app_profile(request) @@ -3801,9 +3793,6 @@ def test_get_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) response = client.get_app_profile(request) @@ -4456,9 +4445,11 @@ async def test_list_app_profiles_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_app_profiles(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6138,9 +6129,11 @@ async def test_list_hot_tablets_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_hot_tablets(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -8088,11 +8081,6 @@ def test_get_cluster_rest(request_type): state=instance.Cluster.State.READY, serve_nodes=1181, default_storage_type=common.StorageType.SSD, - cluster_config=instance.Cluster.ClusterConfig( - cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig( - autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600) - ) - ), ) # Wrap the value into a proper Response obj @@ -9345,9 +9333,6 @@ def test_create_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) # Wrap the value into a proper Response obj @@ -9670,9 +9655,6 @@ def test_get_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) # Wrap the value into a proper Response obj diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 8498e4fa563f..21cd1e5b3111 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1691,9 +1691,11 @@ async def test_list_tables_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_tables(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4457,9 +4459,11 @@ async def test_list_snapshots_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_snapshots(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6058,9 +6062,11 @@ async def test_list_backups_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_backups(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 03ba3044f92b..5b4c27f63190 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -5316,11 +5316,7 @@ def test_read_change_stream_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse( - data_change=bigtable.ReadChangeStreamResponse.DataChange( - type_=bigtable.ReadChangeStreamResponse.DataChange.Type.USER - ), - ) + return_value = bigtable.ReadChangeStreamResponse() # Wrap the value into a proper Response obj response_value = Response() From 8a08c63744e9950222bac9e865a960c3b83bcac1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 09:06:42 -0400 Subject: [PATCH 733/892] chore: Update gapic-generator-python to v1.11.2 (#829) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.2 PiperOrigin-RevId: 546510849 Source-Link: https://github.com/googleapis/googleapis/commit/736073ad9a9763a170eceaaa54519bcc0ea55a5e Source-Link: https://github.com/googleapis/googleapis-gen/commit/deb64e8ec19d141e31089fe932b3a997ad541c4d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZGViNjRlOGVjMTlkMTQxZTMxMDg5ZmU5MzJiM2E5OTdhZDU0MWM0ZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin/__init__.py | 2 +- .../google/cloud/bigtable_admin_v2/__init__.py | 2 +- .../google/cloud/bigtable_admin_v2/services/__init__.py | 2 +- .../services/bigtable_instance_admin/__init__.py | 2 +- .../services/bigtable_instance_admin/async_client.py | 2 +- .../services/bigtable_instance_admin/client.py | 2 +- .../services/bigtable_instance_admin/pagers.py | 2 +- .../services/bigtable_instance_admin/transports/__init__.py | 2 +- .../services/bigtable_instance_admin/transports/base.py | 2 +- .../services/bigtable_instance_admin/transports/grpc.py | 2 +- .../services/bigtable_instance_admin/transports/grpc_asyncio.py | 2 +- .../services/bigtable_instance_admin/transports/rest.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/__init__.py | 2 +- .../services/bigtable_table_admin/async_client.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/client.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/pagers.py | 2 +- .../services/bigtable_table_admin/transports/__init__.py | 2 +- .../services/bigtable_table_admin/transports/base.py | 2 +- .../services/bigtable_table_admin/transports/grpc.py | 2 +- .../services/bigtable_table_admin/transports/grpc_asyncio.py | 2 +- .../services/bigtable_table_admin/transports/rest.py | 2 +- .../google/cloud/bigtable_admin_v2/types/__init__.py | 2 +- .../cloud/bigtable_admin_v2/types/bigtable_instance_admin.py | 2 +- .../cloud/bigtable_admin_v2/types/bigtable_table_admin.py | 2 +- .../google/cloud/bigtable_admin_v2/types/common.py | 2 +- .../google/cloud/bigtable_admin_v2/types/instance.py | 2 +- .../google/cloud/bigtable_admin_v2/types/table.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py | 2 +- .../google/cloud/bigtable_v2/services/__init__.py | 2 +- .../google/cloud/bigtable_v2/services/bigtable/__init__.py | 2 +- .../google/cloud/bigtable_v2/services/bigtable/async_client.py | 2 +- .../google/cloud/bigtable_v2/services/bigtable/client.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/__init__.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/base.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/grpc.py | 2 +- .../bigtable_v2/services/bigtable/transports/grpc_asyncio.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/rest.py | 2 +- .../google/cloud/bigtable_v2/types/__init__.py | 2 +- .../google/cloud/bigtable_v2/types/bigtable.py | 2 +- .../google/cloud/bigtable_v2/types/data.py | 2 +- .../google/cloud/bigtable_v2/types/feature_flags.py | 2 +- .../google/cloud/bigtable_v2/types/request_stats.py | 2 +- .../google/cloud/bigtable_v2/types/response_params.py | 2 +- .../scripts/fixup_bigtable_admin_v2_keywords.py | 2 +- .../google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py | 2 +- packages/google-cloud-bigtable/tests/__init__.py | 2 +- packages/google-cloud-bigtable/tests/unit/__init__.py | 2 +- packages/google-cloud-bigtable/tests/unit/gapic/__init__.py | 2 +- .../tests/unit/gapic/bigtable_admin_v2/__init__.py | 2 +- .../gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | 2 +- .../unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py | 2 +- .../tests/unit/gapic/bigtable_v2/__init__.py | 2 +- .../tests/unit/gapic/bigtable_v2/test_bigtable.py | 2 +- 53 files changed, 53 insertions(+), 53 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index 0ba93ec63679..43535ae208d7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index c030ec1bdb7d..8033a0af77cc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py index e8e1c3845db5..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py index 1fb10736ec78..40631d1b4bae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index af4ceaf028ea..111b8cbf7963 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index ecc9bf1e2892..33c8510b9841 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index bfcbbf23d703..0d646a96e1c1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index e5637c0da2a5..62da28c88a1b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index bd45f319ff72..d92d2545300d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index f037f5a44bf5..eca37957dbbb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 82b03b0bbae8..145aa427d852 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index e9b94cf78fe9..228f5c02ce39 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index 515696537b03..544649e90131 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 995b8d150376..02dd0153dcb7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index e043aa224b95..c83f3116fee2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index e639227df3c9..331647b4cefe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index 585b4e437c6e..be4aa8d2af55 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index cade1335b605..591c6bcfeb27 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index f8cf9f83491a..e18be126c124 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 54eb7e524d03..8f72e3fe89e5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 4d5b2ed1c0ae..8179169778e8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 69153c9fc3b8..c69e3129ba65 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index a2254335498d..87332a351eaf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 4c4b9e9e2197..dfa815dc2038 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 2cc71fc43a6e..959b9deb1f76 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 2b5d8163674b..6ae9159d0ca4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index c7925460d19c..1dd0ff0b2270 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index ee3bd8c0c984..80bd4ec09bf1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py index e8e1c3845db5..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py index cfce7b6b8340..f10a68e5bc68 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 06e5852a768f..07a782d0cf0d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index a778aff3c0e7..db393faa7f94 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index 1b03919f6ce5..c09443bc27e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 5b4580c18f1c..b580bbca77c6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index b9e073e8a093..8ba04e761db0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 8bf02ce774b2..2c0cbdad64c0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 4343fbb900eb..31d230f94fe4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index 9f15efaf5e36..f266becb9b11 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 742606553551..378f954804e3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 515e167dfdb6..3fad8f87c0a8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index 073269fbbd91..d1fd03ff6db4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py index d72ba8694653..27c2bb0284d1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index 2532e64e286a..98e3a67db584 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 17be56f2faa2..58eab8bcf850 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index dcfe1ab1cb3f..8d32e5b70438 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/__init__.py b/packages/google-cloud-bigtable/tests/__init__.py index e8e1c3845db5..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/tests/__init__.py +++ b/packages/google-cloud-bigtable/tests/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/__init__.py b/packages/google-cloud-bigtable/tests/unit/__init__.py index e8e1c3845db5..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/tests/unit/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py index e8e1c3845db5..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py index e8e1c3845db5..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 2cc636bbd4f8..f5f5fc514c5f 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 21cd1e5b3111..0a0b3b671327 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py index e8e1c3845db5..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 5b4c27f63190..597540d696f7 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 67feb4e384bffbe675566c55466923f47c02de49 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 16:40:12 -0400 Subject: [PATCH 734/892] chore: Update gapic-generator-python to v1.11.3 (#830) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.3 PiperOrigin-RevId: 546899192 Source-Link: https://github.com/googleapis/googleapis/commit/e6b16918b98fe1a35f725b56537354f22b6cdc48 Source-Link: https://github.com/googleapis/googleapis-gen/commit/0b3917c421cbda7fcb67092e16c33f3ea46f4bc7 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGIzOTE3YzQyMWNiZGE3ZmNiNjcwOTJlMTZjMzNmM2VhNDZmNGJjNyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 12 ++++----- .../bigtable_instance_admin/client.py | 12 ++++----- .../bigtable_table_admin/async_client.py | 12 ++++----- .../services/bigtable_table_admin/client.py | 12 ++++----- .../cloud/bigtable_admin_v2/types/instance.py | 4 +-- .../cloud/bigtable_admin_v2/types/table.py | 4 +-- .../services/bigtable/async_client.py | 25 +++++++++++-------- .../bigtable_v2/services/bigtable/client.py | 25 +++++++++++-------- .../services/bigtable/transports/grpc.py | 7 +++--- .../bigtable/transports/grpc_asyncio.py | 7 +++--- .../services/bigtable/transports/rest.py | 18 +++++++------ .../cloud/bigtable_v2/types/bigtable.py | 12 ++++++--- .../google/cloud/bigtable_v2/types/data.py | 11 ++++---- 13 files changed, 88 insertions(+), 73 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 111b8cbf7963..4b45774f06b9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1883,8 +1883,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2032,8 +2032,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2172,8 +2172,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 33c8510b9841..fb993b6511f6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2107,8 +2107,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2243,8 +2243,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2380,8 +2380,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 02dd0153dcb7..a7eed5d37828 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2301,8 +2301,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2450,8 +2450,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2590,8 +2590,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index c83f3116fee2..03746495472d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2556,8 +2556,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2692,8 +2692,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2829,8 +2829,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 6ae9159d0ca4..aa85eba1982f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -117,8 +117,8 @@ class Type(proto.Enum): be set on the cluster. DEVELOPMENT (2): DEPRECATED: Prefer PRODUCTION for all use - cases, as it no longer enforces a higher minimum - node count than DEVELOPMENT. + cases, as it no longer enforces + a higher minimum node count than DEVELOPMENT. """ TYPE_UNSPECIFIED = 0 PRODUCTION = 1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 1dd0ff0b2270..609d165f95ea 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -143,8 +143,8 @@ class Table(proto.Message): deletion_protection (bool): Set to true to make the table protected against data loss. i.e. deleting the following - resources through Admin APIs are prohibited: - - The table. + resources through Admin APIs are prohibited: + - The table. - The column families in the table. - The instance containing the table. Note one can still delete the data stored in the diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 07a782d0cf0d..441022ac735d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1030,8 +1030,8 @@ def generate_initial_change_stream_partitions( Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for + by Apache Beam BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (:class:`str`): Required. The unique name of the table from which to get @@ -1061,8 +1061,8 @@ def generate_initial_change_stream_partitions( Returns: AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for + by Apache Beam BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1123,15 +1123,17 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Request message for + Bigtable.ReadChangeStream. table_name (:class:`str`): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1160,8 +1162,9 @@ def read_change_stream( Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Response message for + Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index db393faa7f94..595310c88a64 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1330,8 +1330,8 @@ def generate_initial_change_stream_partitions( Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for + by Apache Beam BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (str): Required. The unique name of the table from which to get @@ -1361,8 +1361,8 @@ def generate_initial_change_stream_partitions( Returns: Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for + by Apache Beam BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1427,15 +1427,17 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Request message for + Bigtable.ReadChangeStream. table_name (str): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1464,8 +1466,9 @@ def read_change_stream( Returns: Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Response message for + Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 8ba04e761db0..0e0666242177 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -481,9 +481,10 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 2c0cbdad64c0..49259969b046 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -490,9 +490,10 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 31d230f94fe4..066b35e2ae51 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -571,8 +571,8 @@ def __call__( Args: request (~.bigtable.GenerateInitialChangeStreamPartitionsRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for + by Apache Beam BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -583,8 +583,8 @@ def __call__( Returns: ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for + by Apache Beam BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -975,8 +975,9 @@ def __call__( Args: request (~.bigtable.ReadChangeStreamRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Request message for + Bigtable.ReadChangeStream. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -986,8 +987,9 @@ def __call__( Returns: ~.bigtable.ReadChangeStreamResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Response message for + Bigtable.ReadChangeStream. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 378f954804e3..ab5358194677 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -749,7 +749,8 @@ class ReadModifyWriteRowResponse(proto.Message): class GenerateInitialChangeStreamPartitionsRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Request message for + BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -777,7 +778,8 @@ class GenerateInitialChangeStreamPartitionsRequest(proto.Message): class GenerateInitialChangeStreamPartitionsResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Response message for + BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -794,7 +796,8 @@ class GenerateInitialChangeStreamPartitionsResponse(proto.Message): class ReadChangeStreamRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Request message for Bigtable.ReadChangeStream. + BigtableIO. + Request message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -890,7 +893,8 @@ class ReadChangeStreamRequest(proto.Message): class ReadChangeStreamResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Response message for Bigtable.ReadChangeStream. + BigtableIO. + Response message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 3fad8f87c0a8..9913d9ed0e4d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -639,10 +639,10 @@ class Chain(proto.Message): Attributes: filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" are chained - together to process the input row: in row -> - f(0) -> intermediate row -> f(1) -> ... -> f(N) - -> out row The full chain is executed - atomically. + together to process the input row: + in row -> f(0) -> intermediate row -> f(1) -> + ... -> f(N) -> out row The full chain is + executed atomically. """ filters: MutableSequence["RowFilter"] = proto.RepeatedField( @@ -1041,7 +1041,8 @@ class ReadModifyWriteRule(proto.Message): class StreamPartition(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. A partition of a change stream. + BigtableIO. + A partition of a change stream. Attributes: row_range (google.cloud.bigtable_v2.types.RowRange): From b83943d2a1f43561bea217f154b7943df507af8f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 13 Jul 2023 11:21:06 -0400 Subject: [PATCH 735/892] docs: fix formatting for reversed order field example (#831) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: fix formatting for reversed order field example PiperOrigin-RevId: 547553954 Source-Link: https://github.com/googleapis/googleapis/commit/c4e6427fcefd1cd9a15a3008ae7ee8adca972276 Source-Link: https://github.com/googleapis/googleapis-gen/commit/f552269609d4183546543bfe3a022f544d4f5bdb Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjU1MjI2OTYwOWQ0MTgzNTQ2NTQzYmZlM2EwMjJmNTQ0ZDRmNWJkYiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_v2/types/bigtable.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index ab5358194677..42740eee2367 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -86,8 +86,15 @@ class ReadRowsRequest(proto.Message): Return rows in lexiographical descending order of the row keys. The row contents will not be affected by this flag. - Example result set: [ {key: "k2", "f:col1": "v1", "f:col2": - "v1"}, {key: "k1", "f:col1": "v2", "f:col2": "v2"} ]. + + Example result set: + + :: + + [ + {key: "k2", "f:col1": "v1", "f:col2": "v1"}, + {key: "k1", "f:col1": "v2", "f:col2": "v2"} + ] """ class RequestStatsView(proto.Enum): From 519c0066183ea99f70d6507caedd6349c633a61a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 10:14:17 -0400 Subject: [PATCH 736/892] chore: Update gapic-generator-python to v1.11.4 (#832) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.4 PiperOrigin-RevId: 547897126 Source-Link: https://github.com/googleapis/googleapis/commit/c09c75e087d8f9a2d466b4aaad7dd2926b5ead5a Source-Link: https://github.com/googleapis/googleapis-gen/commit/45e0ec4343517cd0aa66b5ca64232a1802c2f945 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDVlMGVjNDM0MzUxN2NkMGFhNjZiNWNhNjQyMzJhMTgwMmMyZjk0NSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin_v2/types/table.py | 1 + .../google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 609d165f95ea..e75ac00bb1da 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -144,6 +144,7 @@ class Table(proto.Message): Set to true to make the table protected against data loss. i.e. deleting the following resources through Admin APIs are prohibited: + - The table. - The column families in the table. - The instance containing the table. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 9913d9ed0e4d..0e9e0bfe5bf0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -640,6 +640,7 @@ class Chain(proto.Message): filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" are chained together to process the input row: + in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row The full chain is executed atomically. From 5647fe1c7cbb86899a990c53e9208d15821a69fd Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 11:52:31 -0400 Subject: [PATCH 737/892] build(deps): [autoapprove] bump cryptography from 41.0.0 to 41.0.2 (#834) Source-Link: https://github.com/googleapis/synthtool/commit/d6103f4a3540ba60f633a9e25c37ec5fe7e6286d Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:39f0f3f2be02ef036e297e376fe3b6256775576da8a6ccb1d5eeb80f4c8bf8fb Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.flake8 | 2 +- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/auto-label.yaml | 2 +- .../google-cloud-bigtable/.kokoro/build.sh | 2 +- .../.kokoro/docker/docs/Dockerfile | 2 +- .../.kokoro/populate-secrets.sh | 2 +- .../.kokoro/publish-docs.sh | 2 +- .../google-cloud-bigtable/.kokoro/release.sh | 2 +- .../.kokoro/requirements.txt | 44 ++++++++++--------- .../.kokoro/test-samples-against-head.sh | 2 +- .../.kokoro/test-samples-impl.sh | 2 +- .../.kokoro/test-samples.sh | 2 +- .../.kokoro/trampoline.sh | 2 +- .../.kokoro/trampoline_v2.sh | 2 +- .../.pre-commit-config.yaml | 2 +- packages/google-cloud-bigtable/.trampolinerc | 4 +- packages/google-cloud-bigtable/MANIFEST.in | 2 +- packages/google-cloud-bigtable/docs/conf.py | 2 +- packages/google-cloud-bigtable/noxfile.py | 3 +- .../scripts/decrypt-secrets.sh | 2 +- .../scripts/readme-gen/readme_gen.py | 18 ++++---- packages/google-cloud-bigtable/setup.cfg | 2 +- 22 files changed, 55 insertions(+), 52 deletions(-) diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index 2e438749863d..87f6e408c47d 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 98994f474104..ae4a522b9e5f 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2d816f26f728ac8b24248741e7d4c461c09764ef9f7be3684d557c9632e46dbd -# created: 2023-06-28T17:03:33.371210701Z + digest: sha256:39f0f3f2be02ef036e297e376fe3b6256775576da8a6ccb1d5eeb80f4c8bf8fb +# created: 2023-07-17T15:20:13.819193964Z diff --git a/packages/google-cloud-bigtable/.github/auto-label.yaml b/packages/google-cloud-bigtable/.github/auto-label.yaml index 41bff0b5375a..b2016d119b40 100644 --- a/packages/google-cloud-bigtable/.github/auto-label.yaml +++ b/packages/google-cloud-bigtable/.github/auto-label.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index 2ab1155b2e83..dec6b66a7872 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index f8137d0ae497..8e39a2cc438d 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh b/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh index f52514257ef0..6f3972140e80 100755 --- a/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh +++ b/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC. +# Copyright 2023 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 1c4d62370042..9eafe0be3bba 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index 6b594c813d66..2e1cbfa810ef 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index c7929db6d152..67d70a110897 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -113,26 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.0 \ - --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \ - --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \ - --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \ - --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \ - --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \ - --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \ - --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \ - --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \ - --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \ - --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \ - --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \ - --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \ - --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \ - --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \ - --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \ - --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \ - --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \ - --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \ - --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be +cryptography==41.0.2 \ + --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \ + --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \ + --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \ + --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \ + --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \ + --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \ + --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \ + --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \ + --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \ + --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \ + --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \ + --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \ + --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \ + --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \ + --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \ + --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \ + --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \ + --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \ + --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \ + --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \ + --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \ + --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \ + --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14 # via # gcp-releasetool # secretstorage diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh index ba3a707b040c..63ac41dfae1d 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh index 2c6500cae0b9..5a0f5fab6a89 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh index 11c042d342d7..50b35a48c190 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline.sh b/packages/google-cloud-bigtable/.kokoro/trampoline.sh index f39236e943a8..d85b1f267693 100755 --- a/packages/google-cloud-bigtable/.kokoro/trampoline.sh +++ b/packages/google-cloud-bigtable/.kokoro/trampoline.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 Google Inc. +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh index 4af6cdc26dbc..59a7cf3a9373 100755 --- a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh +++ b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 5405cc8ff1f3..9e3898fd1c12 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.trampolinerc b/packages/google-cloud-bigtable/.trampolinerc index 0eee72ab62aa..a7dfeb42c6d0 100644 --- a/packages/google-cloud-bigtable/.trampolinerc +++ b/packages/google-cloud-bigtable/.trampolinerc @@ -1,4 +1,4 @@ -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Template for .trampolinerc - # Add required env vars here. required_envvars+=( ) diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index e783f4c6209b..e0a66705318e 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index 34f3a4d08aba..b5a870f58002 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index fe9f07b4f6a1..837b60887c92 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -440,6 +440,7 @@ def prerelease_deps(session): "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" ) session.run("python", "-c", "import grpc; print(grpc.__version__)") + session.run("python", "-c", "import google.auth; print(google.auth.__version__)") session.run("py.test", "tests/unit") diff --git a/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh index 21f6d2a26d90..0018b421ddf8 100755 --- a/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh +++ b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2015 Google Inc. All rights reserved. +# Copyright 2023 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py index 91b59676bfc7..1acc119835b5 100644 --- a/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py +++ b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2016 Google Inc +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,17 +33,17 @@ autoescape=True, ) -README_TMPL = jinja_env.get_template('README.tmpl.rst') +README_TMPL = jinja_env.get_template("README.tmpl.rst") def get_help(file): - return subprocess.check_output(['python', file, '--help']).decode() + return subprocess.check_output(["python", file, "--help"]).decode() def main(): parser = argparse.ArgumentParser() - parser.add_argument('source') - parser.add_argument('--destination', default='README.rst') + parser.add_argument("source") + parser.add_argument("--destination", default="README.rst") args = parser.parse_args() @@ -51,9 +51,9 @@ def main(): root = os.path.dirname(source) destination = os.path.join(root, args.destination) - jinja_env.globals['get_help'] = get_help + jinja_env.globals["get_help"] = get_help - with io.open(source, 'r') as f: + with io.open(source, "r") as f: config = yaml.load(f) # This allows get_help to execute in the right directory. @@ -61,9 +61,9 @@ def main(): output = README_TMPL.render(config) - with io.open(destination, 'w') as f: + with io.open(destination, "w") as f: f.write(output) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/packages/google-cloud-bigtable/setup.cfg b/packages/google-cloud-bigtable/setup.cfg index c3a2b39f6528..052350089505 100644 --- a/packages/google-cloud-bigtable/setup.cfg +++ b/packages/google-cloud-bigtable/setup.cfg @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From ee58bde21bba61e91296e11b005e440ec703cc6a Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 12:51:34 -0400 Subject: [PATCH 738/892] chore(main): release 2.20.0 (#827) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 18 ++++++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 23 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index b7f666a684a7..ba3e06a78b90 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.19.0" + ".": "2.20.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index dc80386a4f9a..2e86e4d43cc5 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,24 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.20.0](https://github.com/googleapis/python-bigtable/compare/v2.19.0...v2.20.0) (2023-07-17) + + +### Features + +* Add experimental reverse scan for public preview ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304)) +* Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304)) + + +### Bug Fixes + +* Add async context manager return types ([#828](https://github.com/googleapis/python-bigtable/issues/828)) ([475a160](https://github.com/googleapis/python-bigtable/commit/475a16072f3ad41357bdb765fff608a39141ec00)) + + +### Documentation + +* Fix formatting for reversed order field example ([#831](https://github.com/googleapis/python-bigtable/issues/831)) ([fddd0ba](https://github.com/googleapis/python-bigtable/commit/fddd0ba97155e112af92a98fd8f20e59b139d177)) + ## [2.19.0](https://github.com/googleapis/python-bigtable/compare/v2.18.1...v2.19.0) (2023-06-08) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 0f1a446f3802..551f0d2ebacb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 0f1a446f3802..551f0d2ebacb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 0f1a446f3802..551f0d2ebacb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 0f1a446f3802..551f0d2ebacb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} From abd7bebd2598c3dbef63108100eb9de03f291d7a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 09:19:49 -0400 Subject: [PATCH 739/892] build(deps): [autoapprove] bump pygments from 2.13.0 to 2.15.0 (#841) Source-Link: https://github.com/googleapis/synthtool/commit/eaef28efd179e6eeb9f4e9bf697530d074a6f3b9 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f8ca7655fa8a449cadcabcbce4054f593dcbae7aeeab34aa3fcc8b5cf7a93c9e Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index ae4a522b9e5f..17c21d96d654 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:39f0f3f2be02ef036e297e376fe3b6256775576da8a6ccb1d5eeb80f4c8bf8fb -# created: 2023-07-17T15:20:13.819193964Z + digest: sha256:f8ca7655fa8a449cadcabcbce4054f593dcbae7aeeab34aa3fcc8b5cf7a93c9e +# created: 2023-07-21T02:12:46.49799314Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 67d70a110897..b563eb284459 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -396,9 +396,9 @@ pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pygments==2.13.0 \ - --hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \ - --hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42 +pygments==2.15.0 \ + --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \ + --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500 # via # readme-renderer # rich From edd6dcb90d83cfafcc5190e142a9c230f20e11f3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 26 Jul 2023 15:02:13 -0400 Subject: [PATCH 740/892] feat: add last_scanned_row_responses to FeatureFlags (#845) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add last_scanned_row_key feature PiperOrigin-RevId: 551191182 Source-Link: https://github.com/googleapis/googleapis/commit/51e04baa9eec3bee8b3e237bfd847eb06aa66d72 Source-Link: https://github.com/googleapis/googleapis-gen/commit/4b90e8ead4477eff96c31b9b0fdef36ed975b15f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGI5MGU4ZWFkNDQ3N2VmZjk2YzMxYjliMGZkZWYzNmVkOTc1YjE1ZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_v2/types/feature_flags.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index d1fd03ff6db4..c7a64db5e046 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -48,6 +48,10 @@ class FeatureFlags(proto.Message): Notify the server that the client enables batch write flow control by requesting RateLimitInfo from MutateRowsResponse. + last_scanned_row_responses (bool): + Notify the server that the client supports the + last_scanned_row field in ReadRowsResponse for long-running + sparse scans. """ reverse_scans: bool = proto.Field( @@ -58,6 +62,10 @@ class FeatureFlags(proto.Message): proto.BOOL, number=3, ) + last_scanned_row_responses: bool = proto.Field( + proto.BOOL, + number=4, + ) __all__ = tuple(sorted(__protobuf__.manifest)) From 6932f952135f53792a86a1568be578de72379ac1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 27 Jul 2023 05:54:30 -0400 Subject: [PATCH 741/892] build(deps): [autoapprove] bump certifi from 2022.12.7 to 2023.7.22 (#844) Source-Link: https://github.com/googleapis/synthtool/commit/395d53adeeacfca00b73abf197f65f3c17c8f1e9 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:6c1cbc75c74b8bdd71dada2fa1677e9d6d78a889e9a70ee75b93d1d0543f96e1 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 17c21d96d654..0ddd0e4d1873 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:f8ca7655fa8a449cadcabcbce4054f593dcbae7aeeab34aa3fcc8b5cf7a93c9e -# created: 2023-07-21T02:12:46.49799314Z + digest: sha256:6c1cbc75c74b8bdd71dada2fa1677e9d6d78a889e9a70ee75b93d1d0543f96e1 +# created: 2023-07-25T21:01:10.396410762Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index b563eb284459..76d9bba0f7d0 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.2.0 \ --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db # via google-auth -certifi==2022.12.7 \ - --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ - --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 +certifi==2023.7.22 \ + --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ + --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests cffi==1.15.1 \ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ From 22f057725071489f50921748c342c146769a08aa Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 16:18:12 -0400 Subject: [PATCH 742/892] chore: [autoapprove] Pin flake8 version (#847) Source-Link: https://github.com/googleapis/synthtool/commit/0ddbff8012e47cde4462fe3f9feab01fbc4cdfd6 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:bced5ca77c4dda0fd2f5d845d4035fc3c5d3d6b81f245246a36aee114970082b Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.pre-commit-config.yaml | 2 +- packages/google-cloud-bigtable/noxfile.py | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 0ddd0e4d1873..d71329cc807d 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:6c1cbc75c74b8bdd71dada2fa1677e9d6d78a889e9a70ee75b93d1d0543f96e1 -# created: 2023-07-25T21:01:10.396410762Z + digest: sha256:bced5ca77c4dda0fd2f5d845d4035fc3c5d3d6b81f245246a36aee114970082b +# created: 2023-08-01T17:41:45.434027321Z diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 9e3898fd1c12..19409cbd37a4 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -26,6 +26,6 @@ repos: hooks: - id: black - repo: https://github.com/pycqa/flake8 - rev: 3.9.2 + rev: 6.1.0 hooks: - id: flake8 diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 837b60887c92..b2820b309e1a 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -25,6 +25,7 @@ import nox +FLAKE8_VERSION = "flake8==6.1.0" BLACK_VERSION = "black==22.3.0" ISORT_VERSION = "isort==5.10.1" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] @@ -83,7 +84,7 @@ def lint(session): Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ - session.install("flake8", BLACK_VERSION) + session.install(FLAKE8_VERSION, BLACK_VERSION) session.run( "black", "--check", From 00e2be4b9945213ea3b747e34dc561415fd93dbf Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 2 Aug 2023 11:22:12 -0400 Subject: [PATCH 743/892] docs: Minor formatting (#851) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Minor formatting PiperOrigin-RevId: 553099804 Source-Link: https://github.com/googleapis/googleapis/commit/f48d1a329db8655ccf843c814026060436111161 Source-Link: https://github.com/googleapis/googleapis-gen/commit/9607990f4c3217bac6edd8131614cfcc71744a6f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTYwNzk5MGY0YzMyMTdiYWM2ZWRkODEzMTYxNGNmY2M3MTc0NGE2ZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 70 +------ .../bigtable_instance_admin/client.py | 70 +------ .../transports/rest.py | 172 +++++++++--------- .../bigtable_table_admin/async_client.py | 70 +------ .../services/bigtable_table_admin/client.py | 70 +------ .../bigtable_table_admin/transports/rest.py | 172 +++++++++--------- 6 files changed, 188 insertions(+), 436 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 4b45774f06b9..31e68fe95128 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1919,42 +1919,11 @@ async def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2068,42 +2037,11 @@ async def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fb993b6511f6..d38105a2ac5c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2143,42 +2143,11 @@ def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2279,42 +2248,11 @@ def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 228f5c02ce39..25bb0a0b98b5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1612,54 +1612,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. @@ -2439,54 +2439,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index a7eed5d37828..31ddfac72a61 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2337,42 +2337,11 @@ async def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2486,42 +2455,11 @@ async def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 03746495472d..b7127c0327d1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2592,42 +2592,11 @@ def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2728,42 +2697,11 @@ def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 8179169778e8..37518c8da694 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1881,54 +1881,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. @@ -2733,54 +2733,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. From a64ddbc03defb8ce13af3201439a09c86be582a2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 2 Aug 2023 11:22:39 -0400 Subject: [PATCH 744/892] build: [autoapprove] bump cryptography from 41.0.2 to 41.0.3 (#850) Source-Link: https://github.com/googleapis/synthtool/commit/352b9d4c068ce7c05908172af128b294073bf53c Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/requirements.txt | 48 +++++++++---------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index d71329cc807d..a3da1b0d4cd3 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:bced5ca77c4dda0fd2f5d845d4035fc3c5d3d6b81f245246a36aee114970082b -# created: 2023-08-01T17:41:45.434027321Z + digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 +# created: 2023-08-02T10:53:29.114535628Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 76d9bba0f7d0..029bd342de94 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -113,30 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.2 \ - --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \ - --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \ - --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \ - --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \ - --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \ - --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \ - --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \ - --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \ - --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \ - --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \ - --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \ - --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \ - --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \ - --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \ - --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \ - --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \ - --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \ - --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \ - --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \ - --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \ - --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \ - --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \ - --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14 +cryptography==41.0.3 \ + --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ + --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ + --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ + --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ + --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ + --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ + --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ + --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ + --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ + --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ + --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ + --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ + --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ + --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ + --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ + --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ + --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ + --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ + --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ + --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ + --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ + --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ + --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de # via # gcp-releasetool # secretstorage From 47e78f837c93429cdd6f3b64b340de71ee7e0482 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 9 Aug 2023 09:35:39 -0400 Subject: [PATCH 745/892] chore(main): release 2.21.0 (#846) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 12 ++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 17 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index ba3e06a78b90..5be20145ac45 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.20.0" + ".": "2.21.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 2e86e4d43cc5..1a2a6ad3a1f0 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.21.0](https://github.com/googleapis/python-bigtable/compare/v2.20.0...v2.21.0) (2023-08-02) + + +### Features + +* Add last_scanned_row_responses to FeatureFlags ([#845](https://github.com/googleapis/python-bigtable/issues/845)) ([14a6739](https://github.com/googleapis/python-bigtable/commit/14a673901f82fa247c8027730a0bba41e0ec4757)) + + +### Documentation + +* Minor formatting ([#851](https://github.com/googleapis/python-bigtable/issues/851)) ([5ebe231](https://github.com/googleapis/python-bigtable/commit/5ebe2312dab70210811fca68c6625d2546442afd)) + ## [2.20.0](https://github.com/googleapis/python-bigtable/compare/v2.19.0...v2.20.0) (2023-07-17) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 551f0d2ebacb..e546bae0531e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 551f0d2ebacb..e546bae0531e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 551f0d2ebacb..e546bae0531e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 551f0d2ebacb..e546bae0531e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} From be510f1ffb89d48cadbe8794f28d91919241df28 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 23 Aug 2023 15:53:15 -0700 Subject: [PATCH 746/892] feat: publish CopyBackup protos to external customers (#855) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: publish CopyBackup protos to external customers PiperOrigin-RevId: 557192020 Source-Link: https://github.com/googleapis/googleapis/commit/b4c238feaa1097c53798ed77035bbfeb7fc72e96 Source-Link: https://github.com/googleapis/googleapis-gen/commit/feccb30e3177da8b7b7e68149ca4bb914f8faf2a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZmVjY2IzMGUzMTc3ZGE4YjdiN2U2ODE0OWNhNGJiOTE0ZjhmYWYyYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin/__init__.py | 4 + .../cloud/bigtable_admin_v2/__init__.py | 4 + .../bigtable_admin_v2/gapic_metadata.json | 15 + .../bigtable_table_admin/async_client.py | 142 ++++- .../services/bigtable_table_admin/client.py | 142 ++++- .../bigtable_table_admin/transports/base.py | 14 + .../bigtable_table_admin/transports/grpc.py | 33 +- .../transports/grpc_asyncio.py | 35 +- .../bigtable_table_admin/transports/rest.py | 136 +++++ .../cloud/bigtable_admin_v2/types/__init__.py | 4 + .../types/bigtable_table_admin.py | 104 +++- .../cloud/bigtable_admin_v2/types/table.py | 56 +- .../fixup_bigtable_admin_v2_keywords.py | 1 + .../test_bigtable_table_admin.py | 568 ++++++++++++++++++ 14 files changed, 1221 insertions(+), 37 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index 43535ae208d7..d26d79b3c880 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -115,6 +115,8 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CheckConsistencyResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateBackupMetadata, ) @@ -242,6 +244,8 @@ "UpdateInstanceMetadata", "CheckConsistencyRequest", "CheckConsistencyResponse", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 8033a0af77cc..811b956e0344 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -51,6 +51,8 @@ from .types.bigtable_instance_admin import UpdateInstanceMetadata from .types.bigtable_table_admin import CheckConsistencyRequest from .types.bigtable_table_admin import CheckConsistencyResponse +from .types.bigtable_table_admin import CopyBackupMetadata +from .types.bigtable_table_admin import CopyBackupRequest from .types.bigtable_table_admin import CreateBackupMetadata from .types.bigtable_table_admin import CreateBackupRequest from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata @@ -116,6 +118,8 @@ "CheckConsistencyResponse", "Cluster", "ColumnFamily", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateAppProfileRequest", "CreateBackupMetadata", "CreateBackupRequest", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index d797338cce46..9b3426470f6f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -349,6 +349,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -474,6 +479,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -599,6 +609,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 31ddfac72a61..d1c1811a6056 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2143,7 +2143,7 @@ async def list_backups( Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. Iterating over this object will yield results and resolve additional pages automatically. @@ -2218,9 +2218,8 @@ async def restore_table( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + r"""Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -2283,6 +2282,141 @@ async def restore_table( # Done; return the response. return response + async def copy_backup( + self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (:class:`str`): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (:class:`str`): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CopyBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.copy_backup, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + async def get_iam_policy( self, request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index b7127c0327d1..80231fce9af7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2407,7 +2407,7 @@ def list_backups( Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. Iterating over this object will yield results and resolve additional pages automatically. @@ -2472,9 +2472,8 @@ def restore_table( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: - r"""Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + r"""Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -2538,6 +2537,141 @@ def restore_table( # Done; return the response. return response + def copy_backup( + self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (str): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CopyBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CopyBackupRequest): + request = bigtable_table_admin.CopyBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.copy_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + def get_iam_policy( self, request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 591c6bcfeb27..c3cf01a96eae 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -322,6 +322,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.copy_backup: gapic_v1.method.wrap_method( + self.copy_backup, + default_timeout=None, + client_info=client_info, + ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, default_retry=retries.Retry( @@ -577,6 +582,15 @@ def restore_table( ]: raise NotImplementedError() + @property + def copy_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def get_iam_policy( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index e18be126c124..34a1596fcc77 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -866,9 +866,8 @@ def restore_table( ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]: r"""Return a callable for the restore table method over gRPC. - Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -894,6 +893,34 @@ def restore_table( ) return self._stubs["restore_table"] + @property + def copy_backup( + self, + ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "copy_backup" not in self._stubs: + self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["copy_backup"] + @property def get_iam_policy( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 8f72e3fe89e5..b19d4e7be82f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -890,9 +890,8 @@ def restore_table( ]: r"""Return a callable for the restore table method over gRPC. - Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -918,6 +917,36 @@ def restore_table( ) return self._stubs["restore_table"] + @property + def copy_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "copy_backup" not in self._stubs: + self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["copy_backup"] + @property def get_iam_policy( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 37518c8da694..4b3a846dce37 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -84,6 +84,14 @@ def post_check_consistency(self, response): logging.log(f"Received response: {response}") return response + def pre_copy_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_copy_backup(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -281,6 +289,29 @@ def post_check_consistency( """ return response + def pre_copy_backup( + self, + request: bigtable_table_admin.CopyBackupRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for copy_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_copy_backup( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for copy_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_create_backup( self, request: bigtable_table_admin.CreateBackupRequest, @@ -1010,6 +1041,103 @@ def __call__( resp = self._interceptor.post_check_consistency(resp) return resp + class _CopyBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CopyBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CopyBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the copy backup method over HTTP. + + Args: + request (~.bigtable_table_admin.CopyBackupRequest): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_copy_backup(request, metadata) + pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_copy_backup(resp) + return resp + class _CreateBackup(BigtableTableAdminRestStub): def __hash__(self): return hash("CreateBackup") @@ -3360,6 +3488,14 @@ def check_consistency( # In C++ this would require a dynamic_cast return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore + @property + def copy_backup( + self, + ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore + @property def create_backup( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index c69e3129ba65..a2fefffc8a54 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -44,6 +44,8 @@ from .bigtable_table_admin import ( CheckConsistencyRequest, CheckConsistencyResponse, + CopyBackupMetadata, + CopyBackupRequest, CreateBackupMetadata, CreateBackupRequest, CreateTableFromSnapshotMetadata, @@ -130,6 +132,8 @@ "UpdateInstanceMetadata", "CheckConsistencyRequest", "CheckConsistencyResponse", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index dfa815dc2038..2b108351a285 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -62,6 +62,8 @@ "DeleteBackupRequest", "ListBackupsRequest", "ListBackupsResponse", + "CopyBackupRequest", + "CopyBackupMetadata", }, ) @@ -76,8 +78,7 @@ class RestoreTableRequest(proto.Message): Attributes: parent (str): Required. The name of the instance in which to create the - restored table. This instance must be in the same project as - the source backup. Values are of the form + restored table. Values are of the form ``projects//instances/``. table_id (str): Required. The id of the table to create and restore to. This @@ -359,7 +360,7 @@ class ListTablesRequest(proto.Message): should be listed. Values are of the form ``projects/{project}/instances/{instance}``. view (google.cloud.bigtable_admin_v2.types.Table.View): - The view to be applied to the returned tables' fields. Only + The view to be applied to the returned tables' fields. NAME_ONLY view (default) and REPLICATION_VIEW are supported. page_size (int): Maximum number of results per page. @@ -1192,8 +1193,15 @@ class ListBackupsRequest(proto.Message): fields in [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at https://aip.dev/132#ordering. - Fields supported are: \* name \* source_table \* expire_time - \* start_time \* end_time \* size_bytes \* state + Fields supported are: + + - name + - source_table + - expire_time + - start_time + - end_time + - size_bytes + - state For example, "start_time". The default sorting order is ascending. To specify descending order for the field, a @@ -1266,4 +1274,90 @@ def raw_page(self): ) +class CopyBackupRequest(proto.Message): + r"""The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already exists. + Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` along + with ``parent`` are combined as {parent}/backups/{backup_id} + to create the full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + source_backup (str): + Required. The source backup to be copied from. The source + backup needs to be in READY state for it to be copied. + Copying a copied backup is not allowed. Once CopyBackup is + in progress, the source backup cannot be deleted or cleaned + up on expiration until CopyBackup is finished. Values are of + the form: + ``projects//instances//clusters//backups/``. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied backup + with microsecond granularity that must be at least 6 hours + and at most 30 days from the time the request is received. + Once the ``expire_time`` has passed, Cloud Bigtable will + delete the backup and free the resources used by the backup. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + backup_id: str = proto.Field( + proto.STRING, + number=2, + ) + source_backup: str = proto.Field( + proto.STRING, + number=3, + ) + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class CopyBackupMetadata(proto.Message): + r"""Metadata type for the google.longrunning.Operation returned by + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + name (str): + The name of the backup being created through the copy + operation. Values are of the form + ``projects//instances//clusters//backups/``. + source_backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + Information about the source backup that is + being copied from. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup] + operation. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_backup_info: gba_table.BackupInfo = proto.Field( + proto.MESSAGE, + number=2, + message=gba_table.BackupInfo, + ) + progress: common.OperationProgress = proto.Field( + proto.MESSAGE, + number=3, + message=common.OperationProgress, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index e75ac00bb1da..6b885203d480 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -124,7 +124,8 @@ class Table(proto.Message): ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): The column families configured for this table, mapped by - column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` + column family ID. Views: ``SCHEMA_VIEW``, ``STATS_VIEW``, + ``FULL`` granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): Immutable. The granularity (i.e. ``MILLIS``) at which timestamps are stored in this table. Timestamps not matching @@ -141,15 +142,16 @@ class Table(proto.Message): this table. Otherwise, the change stream is disabled and the change stream is not retained. deletion_protection (bool): - Set to true to make the table protected - against data loss. i.e. deleting the following - resources through Admin APIs are prohibited: - - - The table. - - The column families in the table. - - The instance containing the table. - Note one can still delete the data stored in the - table through Data APIs. + Set to true to make the table protected against data loss. + i.e. deleting the following resources through Admin APIs are + prohibited: + + - The table. + - The column families in the table. + - The instance containing the table. + + Note one can still delete the data stored in the table + through Data APIs. """ class TimestampGranularity(proto.Enum): @@ -487,8 +489,7 @@ class Snapshot(proto.Message): Attributes: name (str): - Output only. The unique name of the snapshot. Values are of - the form + The unique name of the snapshot. Values are of the form ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. source_table (google.cloud.bigtable_admin_v2.types.Table): Output only. The source table at the time the @@ -503,16 +504,15 @@ class Snapshot(proto.Message): Output only. The time when the snapshot is created. delete_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when the snapshot will - be deleted. The maximum amount of time a - snapshot can stay active is 365 days. If 'ttl' - is not specified, the default maximum of 365 - days will be used. + The time when the snapshot will be deleted. + The maximum amount of time a snapshot can stay + active is 365 days. If 'ttl' is not specified, + the default maximum of 365 days will be used. state (google.cloud.bigtable_admin_v2.types.Snapshot.State): Output only. The current state of the snapshot. description (str): - Output only. Description of the snapshot. + Description of the snapshot. """ class State(proto.Enum): @@ -588,6 +588,12 @@ class Backup(proto.Message): backup was created. This needs to be in the same instance as the backup. Values are of the form ``projects/{project}/instances/{instance}/tables/{source_table}``. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. expire_time (google.protobuf.timestamp_pb2.Timestamp): Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and @@ -637,6 +643,10 @@ class State(proto.Enum): proto.STRING, number=2, ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) expire_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, @@ -685,6 +695,12 @@ class BackupInfo(proto.Message): source_table (str): Output only. Name of the table the backup was created from. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. """ backup: str = proto.Field( @@ -705,6 +721,10 @@ class BackupInfo(proto.Message): proto.STRING, number=4, ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 58eab8bcf850..6882feaf6050 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -40,6 +40,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'check_consistency': ('name', 'consistency_token', ), + 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), 'create_backup': ('parent', 'backup_id', 'backup', ), 'create_cluster': ('parent', 'cluster_id', 'cluster', ), diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 0a0b3b671327..a537cb9f065e 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -4960,6 +4960,7 @@ def test_get_backup(request_type, transport: str = "grpc"): call.return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -4974,6 +4975,7 @@ def test_get_backup(request_type, transport: str = "grpc"): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5014,6 +5016,7 @@ async def test_get_backup_async( table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5029,6 +5032,7 @@ async def test_get_backup_async( assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5200,6 +5204,7 @@ def test_update_backup(request_type, transport: str = "grpc"): call.return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5214,6 +5219,7 @@ def test_update_backup(request_type, transport: str = "grpc"): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5255,6 +5261,7 @@ async def test_update_backup_async( table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5270,6 +5277,7 @@ async def test_update_backup_async( assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -6217,6 +6225,262 @@ async def test_restore_table_field_headers_async(): ) in kw["metadata"] +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_copy_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + client.copy_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + +@pytest.mark.asyncio +async def test_copy_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_copy_backup_async_from_dict(): + await test_copy_backup_async(request_type=dict) + + +def test_copy_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_copy_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_copy_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +def test_copy_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -8206,6 +8470,7 @@ def test_update_table_rest(request_type): "start_time": {"seconds": 751, "nanos": 543}, "end_time": {}, "source_table": "source_table_value", + "source_backup": "source_backup_value", }, }, "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, @@ -8404,6 +8669,7 @@ def test_update_table_rest_bad_request( "start_time": {"seconds": 751, "nanos": 543}, "end_time": {}, "source_table": "source_table_value", + "source_backup": "source_backup_value", }, }, "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, @@ -11246,6 +11512,7 @@ def test_create_backup_rest(request_type): request_init["backup"] = { "name": "name_value", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11467,6 +11734,7 @@ def test_create_backup_rest_bad_request( request_init["backup"] = { "name": "name_value", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11593,6 +11861,7 @@ def test_get_backup_rest(request_type): return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -11611,6 +11880,7 @@ def test_get_backup_rest(request_type): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -11868,6 +12138,7 @@ def test_update_backup_rest(request_type): request_init["backup"] = { "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11896,6 +12167,7 @@ def test_update_backup_rest(request_type): return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -11914,6 +12186,7 @@ def test_update_backup_rest(request_type): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -12082,6 +12355,7 @@ def test_update_backup_rest_bad_request( request_init["backup"] = { "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -13012,6 +13286,296 @@ def test_restore_table_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.copy_backup(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_copy_backup_rest_required_fields( + request_type=bigtable_table_admin.CopyBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request_init["source_backup"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + jsonified_request["sourceBackup"] = "source_backup_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + assert "sourceBackup" in jsonified_request + assert jsonified_request["sourceBackup"] == "source_backup_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.copy_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_copy_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.copy_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "backupId", + "sourceBackup", + "expireTime", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_copy_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_copy_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CopyBackupRequest.pb( + bigtable_table_admin.CopyBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.CopyBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.copy_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_copy_backup_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.copy_backup(request) + + +def test_copy_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.copy_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" + % client.transport._host, + args[1], + ) + + +def test_copy_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +def test_copy_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ @@ -14001,6 +14565,7 @@ def test_bigtable_table_admin_base_transport(): "delete_backup", "list_backups", "restore_table", + "copy_backup", "get_iam_policy", "set_iam_policy", "test_iam_permissions", @@ -14377,6 +14942,9 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name) session1 = client1.transport.restore_table._session session2 = client2.transport.restore_table._session assert session1 != session2 + session1 = client1.transport.copy_backup._session + session2 = client2.transport.copy_backup._session + assert session1 != session2 session1 = client1.transport.get_iam_policy._session session2 = client2.transport.get_iam_policy._session assert session1 != session2 From 242c324f2f1c9134b8cf546c8eb138aaeb9471da Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 19 Sep 2023 20:59:35 -0400 Subject: [PATCH 747/892] fix: require google-cloud-core 1.4.4 (#866) * fix: require google-cloud-core 1.4.4 * Update sample to address missing dependency on `six` * Update sample to address missing dependency on `six` --- .../samples/hello_happybase/requirements.txt | 1 + .../samples/quickstart_happybase/requirements.txt | 1 + packages/google-cloud-bigtable/setup.py | 2 +- packages/google-cloud-bigtable/testing/constraints-3.7.txt | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt index a144f03e1bc5..d3368cd0f872 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt @@ -1 +1,2 @@ google-cloud-happybase==0.33.0 +six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt index a144f03e1bc5..d3368cd0f872 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt @@ -1 +1,2 @@ google-cloud-happybase==0.33.0 +six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 49bb10adcf77..495730888561 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -38,7 +38,7 @@ release_status = "Development Status :: 5 - Production/Stable" dependencies = [ "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "google-cloud-core >= 1.4.1, <3.0.0dev", + "google-cloud-core >= 1.4.4, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index d14da7c0c6c8..0718fa655352 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -6,7 +6,7 @@ # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 google-api-core==1.34.0 -google-cloud-core==1.4.1 +google-cloud-core==1.4.4 grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 libcst==0.2.5 From e448fd75432d5db6815217e99b632b4b944b2dc6 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 20 Sep 2023 10:30:37 -0400 Subject: [PATCH 748/892] fix: add feature flag for improved mutate rows throttling (#862) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Minor formatting chore: Update gapic-generator-python to v1.11.5 build: Update rules_python to 0.24.0 PiperOrigin-RevId: 563436317 Source-Link: https://github.com/googleapis/googleapis/commit/42fd37b18d706f6f51f52f209973b3b2c28f509a Source-Link: https://github.com/googleapis/googleapis-gen/commit/280264ca02fb9316b4237a96d0af1a2343a81a56 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjgwMjY0Y2EwMmZiOTMxNmI0MjM3YTk2ZDBhZjFhMjM0M2E4MWE1NiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: add feature flag for improved mutate rows throttling PiperOrigin-RevId: 565090488 Source-Link: https://github.com/googleapis/googleapis/commit/e8a136feaca2547dd5566ef79841d28f76a80eb5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/9a8dcca0fb2117628a1a6a6c3625a6aa32fc2f75 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWE4ZGNjYTBmYjIxMTc2MjhhMWE2YTZjMzYyNWE2YWEzMmZjMmY3NSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../bigtable_instance_admin/async_client.py | 12 +++---- .../bigtable_instance_admin/client.py | 12 +++---- .../transports/rest.py | 2 +- .../bigtable_table_admin/async_client.py | 16 ++++++---- .../services/bigtable_table_admin/client.py | 16 ++++++---- .../bigtable_table_admin/transports/grpc.py | 3 ++ .../transports/grpc_asyncio.py | 3 ++ .../bigtable_table_admin/transports/rest.py | 3 +- .../types/bigtable_table_admin.py | 1 + .../cloud/bigtable_admin_v2/types/instance.py | 4 +-- .../cloud/bigtable_admin_v2/types/table.py | 2 ++ .../services/bigtable/async_client.py | 25 +++++++-------- .../bigtable_v2/services/bigtable/client.py | 25 +++++++-------- .../services/bigtable/transports/grpc.py | 7 ++--- .../bigtable/transports/grpc_asyncio.py | 7 ++--- .../services/bigtable/transports/rest.py | 18 +++++------ .../cloud/bigtable_v2/types/bigtable.py | 12 +++---- .../google/cloud/bigtable_v2/types/data.py | 13 ++++---- .../cloud/bigtable_v2/types/feature_flags.py | 31 +++++++++++++------ .../cloud/bigtable_v2/types/request_stats.py | 1 + .../test_bigtable_instance_admin.py | 2 +- .../test_bigtable_table_admin.py | 2 +- 22 files changed, 116 insertions(+), 101 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 31e68fe95128..3f67620c0488 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1883,8 +1883,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2001,8 +2001,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2110,8 +2110,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index d38105a2ac5c..52c61ea4f8d3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2107,8 +2107,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2212,8 +2212,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2318,8 +2318,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 25bb0a0b98b5..9d5502b7eba8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -44,8 +44,8 @@ from google.cloud.bigtable_admin_v2.types import instance from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore from .base import ( BigtableInstanceAdminTransport, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index d1c1811a6056..d5edeb91db72 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -358,6 +358,7 @@ async def create_table_from_snapshot( r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1293,6 +1294,7 @@ async def snapshot_table( r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1469,6 +1471,7 @@ async def get_snapshot( time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud @@ -1668,6 +1671,7 @@ async def delete_snapshot( metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2435,8 +2439,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2553,8 +2557,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2662,8 +2666,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 80231fce9af7..d0c04ed11416 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -685,6 +685,7 @@ def create_table_from_snapshot( r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1587,6 +1588,7 @@ def snapshot_table( r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1763,6 +1765,7 @@ def get_snapshot( time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud @@ -1942,6 +1945,7 @@ def delete_snapshot( metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2690,8 +2694,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2795,8 +2799,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2901,8 +2905,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 34a1596fcc77..d765869cd7f6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -294,6 +294,7 @@ def create_table_from_snapshot( Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -591,6 +592,7 @@ def snapshot_table( Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -692,6 +694,7 @@ def delete_snapshot( r"""Return a callable for the delete snapshot method over gRPC. Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index b19d4e7be82f..b60a7351c7df 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -302,6 +302,7 @@ def create_table_from_snapshot( Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -607,6 +608,7 @@ def snapshot_table( Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -710,6 +712,7 @@ def delete_snapshot( r"""Return a callable for the delete snapshot method over gRPC. Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 4b3a846dce37..41b893eb7780 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -45,8 +45,8 @@ from google.cloud.bigtable_admin_v2.types import table as gba_table from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore from .base import ( BigtableTableAdminTransport, @@ -2172,6 +2172,7 @@ def __call__( time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 2b108351a285..6a3b31a1e394 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -918,6 +918,7 @@ class DeleteSnapshotRequest(proto.Message): class SnapshotTableMetadata(proto.Message): r"""The metadata for the Operation returned by SnapshotTable. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index aa85eba1982f..6ae9159d0ca4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -117,8 +117,8 @@ class Type(proto.Enum): be set on the cluster. DEVELOPMENT (2): DEPRECATED: Prefer PRODUCTION for all use - cases, as it no longer enforces - a higher minimum node count than DEVELOPMENT. + cases, as it no longer enforces a higher minimum + node count than DEVELOPMENT. """ TYPE_UNSPECIFIED = 0 PRODUCTION = 1 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 6b885203d480..57bd1b00f315 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -311,6 +311,7 @@ class ColumnFamily(proto.Message): gc_rule (google.cloud.bigtable_admin_v2.types.GcRule): Garbage collection rule specified as a protobuf. Must serialize to at most 500 bytes. + NOTE: Garbage collection executes opportunistically in the background, and so it's possible for reads to return a cell even if it @@ -481,6 +482,7 @@ class Snapshot(proto.Message): r"""A snapshot of a table at a particular time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 441022ac735d..07a782d0cf0d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1030,8 +1030,8 @@ def generate_initial_change_stream_partitions( Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (:class:`str`): Required. The unique name of the table from which to get @@ -1061,8 +1061,8 @@ def generate_initial_change_stream_partitions( Returns: AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for + by Apache Beam BigtableIO. Response + message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1123,17 +1123,15 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (:class:`str`): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1162,9 +1160,8 @@ def read_change_stream( Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 595310c88a64..db393faa7f94 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1330,8 +1330,8 @@ def generate_initial_change_stream_partitions( Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (str): Required. The unique name of the table from which to get @@ -1361,8 +1361,8 @@ def generate_initial_change_stream_partitions( Returns: Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for + by Apache Beam BigtableIO. Response + message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1427,17 +1427,15 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (str): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1466,9 +1464,8 @@ def read_change_stream( Returns: Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 0e0666242177..8ba04e761db0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -481,10 +481,9 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 49259969b046..2c0cbdad64c0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -490,10 +490,9 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 066b35e2ae51..31d230f94fe4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -571,8 +571,8 @@ def __call__( Args: request (~.bigtable.GenerateInitialChangeStreamPartitionsRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -583,8 +583,8 @@ def __call__( Returns: ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for + by Apache Beam BigtableIO. Response + message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -975,9 +975,8 @@ def __call__( Args: request (~.bigtable.ReadChangeStreamRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -987,9 +986,8 @@ def __call__( Returns: ~.bigtable.ReadChangeStreamResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 42740eee2367..57f8064085f4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -756,8 +756,7 @@ class ReadModifyWriteRowResponse(proto.Message): class GenerateInitialChangeStreamPartitionsRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Request message for + BigtableIO. Request message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -785,8 +784,7 @@ class GenerateInitialChangeStreamPartitionsRequest(proto.Message): class GenerateInitialChangeStreamPartitionsResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Response message for + BigtableIO. Response message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -803,8 +801,7 @@ class GenerateInitialChangeStreamPartitionsResponse(proto.Message): class ReadChangeStreamRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Request message for Bigtable.ReadChangeStream. + BigtableIO. Request message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -900,8 +897,7 @@ class ReadChangeStreamRequest(proto.Message): class ReadChangeStreamResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Response message for Bigtable.ReadChangeStream. + BigtableIO. Response message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 0e9e0bfe5bf0..e37644a761b7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -639,11 +639,10 @@ class Chain(proto.Message): Attributes: filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" are chained - together to process the input row: - - in row -> f(0) -> intermediate row -> f(1) -> - ... -> f(N) -> out row The full chain is - executed atomically. + together to process the input row: in row -> + f(0) -> intermediate row -> f(1) -> ... -> f(N) + -> out row The full chain is executed + atomically. """ filters: MutableSequence["RowFilter"] = proto.RepeatedField( @@ -698,6 +697,7 @@ class Condition(proto.Message): r"""A RowFilter which evaluates one of two possible RowFilters, depending on whether or not a predicate RowFilter outputs any cells from the input row. + IMPORTANT NOTE: The predicate filter does not execute atomically with the true and false filters, which may lead to inconsistent or unexpected results. Additionally, Condition filters have poor @@ -1042,8 +1042,7 @@ class ReadModifyWriteRule(proto.Message): class StreamPartition(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - A partition of a change stream. + BigtableIO. A partition of a change stream. Attributes: row_range (google.cloud.bigtable_v2.types.RowRange): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index c7a64db5e046..92ac5023d863 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -29,14 +29,14 @@ class FeatureFlags(proto.Message): - r"""Feature flags supported by a client. This is intended to be sent as - part of request metadata to assure the server that certain behaviors - are safe to enable. This proto is meant to be serialized and - websafe-base64 encoded under the ``bigtable-features`` metadata key. - The value will remain constant for the lifetime of a client and due - to HTTP2's HPACK compression, the request overhead will be tiny. - This is an internal implementation detail and should not be used by - endusers directly. + r"""Feature flags supported or enabled by a client. This is intended to + be sent as part of request metadata to assure the server that + certain behaviors are safe to enable. This proto is meant to be + serialized and websafe-base64 encoded under the + ``bigtable-features`` metadata key. The value will remain constant + for the lifetime of a client and due to HTTP2's HPACK compression, + the request overhead will be tiny. This is an internal + implementation detail and should not be used by end users directly. Attributes: reverse_scans (bool): @@ -47,11 +47,18 @@ class FeatureFlags(proto.Message): mutate_rows_rate_limit (bool): Notify the server that the client enables batch write flow control by requesting - RateLimitInfo from MutateRowsResponse. + RateLimitInfo from MutateRowsResponse. Due to + technical reasons, this disables partial + retries. + mutate_rows_rate_limit2 (bool): + Notify the server that the client enables + batch write flow control by requesting + RateLimitInfo from MutateRowsResponse. With + partial retries enabled. last_scanned_row_responses (bool): Notify the server that the client supports the last_scanned_row field in ReadRowsResponse for long-running - sparse scans. + scans. """ reverse_scans: bool = proto.Field( @@ -62,6 +69,10 @@ class FeatureFlags(proto.Message): proto.BOOL, number=3, ) + mutate_rows_rate_limit2: bool = proto.Field( + proto.BOOL, + number=5, + ) last_scanned_row_responses: bool = proto.Field( proto.BOOL, number=4, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py index 27c2bb0284d1..61cce949135d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -86,6 +86,7 @@ class RequestLatencyStats(proto.Message): response. For more context on the component that is measuring this latency, see: https://cloud.google.com/bigtable/docs/overview + Note: This value may be slightly shorter than the value reported into aggregate latency metrics in Monitoring for this request diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index f5f5fc514c5f..b2caa98bab8b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -63,7 +63,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index a537cb9f065e..aa717a3cb202 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -62,7 +62,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore from google.protobuf import duration_pb2 # type: ignore From 54e266a0843c889fec20db3614d0c3d88e1b1915 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 21:31:32 -0400 Subject: [PATCH 749/892] feat: Add support for Cloud Bigtable Request Priorities in App Profiles (#871) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add support for Cloud Bigtable Request Priorities in App Profiles PiperOrigin-RevId: 571158646 Source-Link: https://github.com/googleapis/googleapis/commit/bc3c83b41b1589cca21f713a500f179ef86a7e18 Source-Link: https://github.com/googleapis/googleapis-gen/commit/93366e84e4e6861e2e580eb000721d99bf54a0a4 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTMzNjZlODRlNGU2ODYxZTJlNTgwZWIwMDA3MjFkOTliZjU0YTBhNCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../cloud/bigtable_admin_v2/types/instance.py | 65 ++++++++++++++++++- .../test_bigtable_instance_admin.py | 12 ++++ 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 6ae9159d0ca4..78efd711bbc6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -173,7 +173,7 @@ class AutoscalingTargets(proto.Message): The storage utilization that the Autoscaler should be trying to achieve. This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and - 16384 (16TiB) for an HDD cluster; otherwise it will return + 16384 (16TiB) for an HDD cluster, otherwise it will return INVALID_ARGUMENT error. If this value is set to 0, it will be treated as if it were set to the default value: 2560 for SSD, 8192 for HDD. @@ -419,8 +419,43 @@ class AppProfile(proto.Message): Use a single-cluster routing policy. This field is a member of `oneof`_ ``routing_policy``. + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + This field has been deprecated in favor of + ``standard_isolation.priority``. If you set this field, + ``standard_isolation.priority`` will be set instead. + + The priority of requests sent using this app profile. + + This field is a member of `oneof`_ ``isolation``. + standard_isolation (google.cloud.bigtable_admin_v2.types.AppProfile.StandardIsolation): + The standard options used for isolating this + app profile's traffic from other use cases. + + This field is a member of `oneof`_ ``isolation``. """ + class Priority(proto.Enum): + r"""Possible priorities for an app profile. Note that higher + priority writes can sometimes queue behind lower priority writes + to the same tablet, as writes must be strictly sequenced in the + durability log. + + Values: + PRIORITY_UNSPECIFIED (0): + Default value. Mapped to PRIORITY_HIGH (the legacy behavior) + on creation. + PRIORITY_LOW (1): + No description available. + PRIORITY_MEDIUM (2): + No description available. + PRIORITY_HIGH (3): + No description available. + """ + PRIORITY_UNSPECIFIED = 0 + PRIORITY_LOW = 1 + PRIORITY_MEDIUM = 2 + PRIORITY_HIGH = 3 + class MultiClusterRoutingUseAny(proto.Message): r"""Read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is @@ -466,6 +501,22 @@ class SingleClusterRouting(proto.Message): number=2, ) + class StandardIsolation(proto.Message): + r"""Standard options for isolating this app profile's traffic + from other use cases. + + Attributes: + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + The priority of requests sent using this app + profile. + """ + + priority: "AppProfile.Priority" = proto.Field( + proto.ENUM, + number=1, + enum="AppProfile.Priority", + ) + name: str = proto.Field( proto.STRING, number=1, @@ -490,6 +541,18 @@ class SingleClusterRouting(proto.Message): oneof="routing_policy", message=SingleClusterRouting, ) + priority: Priority = proto.Field( + proto.ENUM, + number=7, + oneof="isolation", + enum=Priority, + ) + standard_isolation: StandardIsolation = proto.Field( + proto.MESSAGE, + number=11, + oneof="isolation", + message=StandardIsolation, + ) class HotTablet(proto.Message): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index b2caa98bab8b..b8508cab4888 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -3524,6 +3524,7 @@ def test_create_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) response = client.create_app_profile(request) @@ -3793,6 +3794,7 @@ def test_get_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) response = client.get_app_profile(request) @@ -9323,6 +9325,8 @@ def test_create_app_profile_rest(request_type): "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) @@ -9333,6 +9337,7 @@ def test_create_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) # Wrap the value into a proper Response obj @@ -9550,6 +9555,8 @@ def test_create_app_profile_rest_bad_request( "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) @@ -9655,6 +9662,7 @@ def test_get_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) # Wrap the value into a proper Response obj @@ -10283,6 +10291,8 @@ def test_update_app_profile_rest(request_type): "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) @@ -10489,6 +10499,8 @@ def test_update_app_profile_rest_bad_request( "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) From ef951ba3778ecfe2638ebbfa0144652dc6a0a442 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 21:36:09 -0400 Subject: [PATCH 750/892] chore: [autoapprove] bump cryptography from 41.0.3 to 41.0.4 (#868) Source-Link: https://github.com/googleapis/synthtool/commit/dede53ff326079b457cfb1aae5bbdc82cbb51dc3 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:fac304457974bb530cc5396abd4ab25d26a469cd3bc97cbfb18c8d4324c584eb Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- packages/google-cloud-bigtable/.gitignore | 1 + .../.kokoro/requirements.txt | 49 ++++++++++--------- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index a3da1b0d4cd3..a9bdb1b7ac0f 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 -# created: 2023-08-02T10:53:29.114535628Z + digest: sha256:fac304457974bb530cc5396abd4ab25d26a469cd3bc97cbfb18c8d4324c584eb +# created: 2023-10-02T21:31:03.517640371Z diff --git a/packages/google-cloud-bigtable/.gitignore b/packages/google-cloud-bigtable/.gitignore index b4243ced74e4..d083ea1ddc3e 100644 --- a/packages/google-cloud-bigtable/.gitignore +++ b/packages/google-cloud-bigtable/.gitignore @@ -50,6 +50,7 @@ docs.metadata # Virtual environment env/ +venv/ # Test logs coverage.xml diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 029bd342de94..96d593c8c82a 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -113,30 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.3 \ - --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ - --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ - --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ - --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ - --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ - --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ - --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ - --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ - --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ - --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ - --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ - --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ - --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ - --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ - --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ - --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ - --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ - --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ - --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ - --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ - --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ - --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ - --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de +cryptography==41.0.4 \ + --hash=sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67 \ + --hash=sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311 \ + --hash=sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8 \ + --hash=sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13 \ + --hash=sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143 \ + --hash=sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f \ + --hash=sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829 \ + --hash=sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd \ + --hash=sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397 \ + --hash=sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac \ + --hash=sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d \ + --hash=sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a \ + --hash=sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839 \ + --hash=sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e \ + --hash=sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6 \ + --hash=sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9 \ + --hash=sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860 \ + --hash=sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca \ + --hash=sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91 \ + --hash=sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d \ + --hash=sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714 \ + --hash=sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb \ + --hash=sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f # via # gcp-releasetool # secretstorage @@ -382,6 +382,7 @@ protobuf==3.20.3 \ # gcp-docuploader # gcp-releasetool # google-api-core + # googleapis-common-protos pyasn1==0.4.8 \ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba From 44ec498c2d8c49f5034076ca29757b41c502d126 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 11:05:02 -0400 Subject: [PATCH 751/892] chore: [autoapprove] Update `black` and `isort` to latest versions (#873) Source-Link: https://github.com/googleapis/synthtool/commit/0c7b0333f44b2b7075447f43a121a12d15a7b76a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:08e34975760f002746b1d8c86fdc90660be45945ee6d9db914d1508acdf9a547 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +-- .../.kokoro/requirements.txt | 6 ++-- .../.pre-commit-config.yaml | 2 +- .../google-cloud-bigtable/docs/snippets.py | 1 - .../docs/snippets_table.py | 1 - packages/google-cloud-bigtable/noxfile.py | 36 ++++++++++--------- .../tests/system/test_instance_admin.py | 1 - .../tests/unit/test_batcher.py | 6 ---- .../tests/unit/test_cluster.py | 4 --- .../tests/unit/test_column_family.py | 1 - .../tests/unit/test_instance.py | 1 - .../tests/unit/test_row_data.py | 2 -- .../tests/unit/test_table.py | 1 - 13 files changed, 25 insertions(+), 41 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index a9bdb1b7ac0f..dd98abbdeebe 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:fac304457974bb530cc5396abd4ab25d26a469cd3bc97cbfb18c8d4324c584eb -# created: 2023-10-02T21:31:03.517640371Z + digest: sha256:08e34975760f002746b1d8c86fdc90660be45945ee6d9db914d1508acdf9a547 +# created: 2023-10-09T14:06:13.397766266Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 96d593c8c82a..0332d3267e15 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -467,9 +467,9 @@ typing-extensions==4.4.0 \ --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via -r requirements.in -urllib3==1.26.12 \ - --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ - --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997 +urllib3==1.26.17 \ + --hash=sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21 \ + --hash=sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b # via # requests # twine diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 19409cbd37a4..6a8e16950664 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 23.7.0 hooks: - id: black - repo: https://github.com/pycqa/flake8 diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/snippets.py index 1d93fdf124fb..fa3aa3627970 100644 --- a/packages/google-cloud-bigtable/docs/snippets.py +++ b/packages/google-cloud-bigtable/docs/snippets.py @@ -448,7 +448,6 @@ def test_bigtable_create_table(): def test_bigtable_list_tables(): - # [START bigtable_api_list_tables] from google.cloud.bigtable import Client diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/snippets_table.py index f27260425321..893135275f6d 100644 --- a/packages/google-cloud-bigtable/docs/snippets_table.py +++ b/packages/google-cloud-bigtable/docs/snippets_table.py @@ -964,7 +964,6 @@ def test_bigtable_create_family_gc_nested(): def test_bigtable_row_data_cells_cell_value_cell_values(): - value = b"value_in_col1" row = Config.TABLE.row(b"row_key_1") row.set_cell( diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index b2820b309e1a..456191016790 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -17,22 +17,24 @@ # Generated by synthtool. DO NOT EDIT! from __future__ import absolute_import + import os import pathlib import re import shutil +from typing import Dict, List import warnings import nox FLAKE8_VERSION = "flake8==6.1.0" -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" +BLACK_VERSION = "black[jupyter]==23.7.0" +ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", @@ -40,23 +42,23 @@ "pytest-cov", "pytest-asyncio", ] -UNIT_TEST_EXTERNAL_DEPENDENCIES = [] -UNIT_TEST_LOCAL_DEPENDENCIES = [] -UNIT_TEST_DEPENDENCIES = [] -UNIT_TEST_EXTRAS = [] -UNIT_TEST_EXTRAS_BY_PYTHON = {} - -SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -SYSTEM_TEST_STANDARD_DEPENDENCIES = [ +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8"] +SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ "mock", "pytest", "google-cloud-testutils", ] -SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [] -SYSTEM_TEST_LOCAL_DEPENDENCIES = [] -SYSTEM_TEST_DEPENDENCIES = [] -SYSTEM_TEST_EXTRAS = [] -SYSTEM_TEST_EXTRAS_BY_PYTHON = {} +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [] +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() @@ -71,6 +73,7 @@ "lint_setup_py", "blacken", "docs", + "format", ] # Error if a python version is missing @@ -201,7 +204,6 @@ def unit(session): def install_systemtest_dependencies(session, *constraints): - # Use pre-release gRPC for system tests. # Exclude version 1.52.0rc1 which has a known issue. # See https://github.com/grpc/grpc/issues/32163 diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py index e5e311213d5b..bd5c7e9122ca 100644 --- a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/system/test_instance_admin.py @@ -28,7 +28,6 @@ def _create_app_profile_helper( allow_transactional_writes=None, ignore_warnings=None, ): - app_profile = instance.app_profile( app_profile_id=app_profile_id, routing_policy_type=routing_policy_type, diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/test_batcher.py index 9987481415e6..741d9f2825e8 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/test_batcher.py @@ -59,7 +59,6 @@ def callback_fn(response): def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - rows = [ DirectRow(row_key=b"row_key"), DirectRow(row_key=b"row_key_2"), @@ -75,7 +74,6 @@ def test_mutation_batcher_mutate_row(): def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", 1) row.set_cell("cf1", b"c2", 2) @@ -98,7 +96,6 @@ def test_mutation_batcher_flush_w_no_rows(): def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") row_2 = DirectRow(row_key=b"row_key_2") row_3 = DirectRow(row_key=b"row_key_3") @@ -114,7 +111,6 @@ def test_mutation_batcher_mutate_w_max_flush_count(): def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", 1) row.set_cell("cf1", b"c2", 2) @@ -130,7 +126,6 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): with MutationsBatcher( table=table, max_row_bytes=3 * 1024 * 1024 ) as mutation_batcher: - number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -168,7 +163,6 @@ def test_mutations_batcher_context_manager_flushed_when_closed(): with MutationsBatcher( table=table, max_row_bytes=3 * 1024 * 1024 ) as mutation_batcher: - number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/test_cluster.py index cb0312b0c079..65ed47437289 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_cluster.py +++ b/packages/google-cloud-bigtable/tests/unit/test_cluster.py @@ -752,7 +752,6 @@ def test_cluster_update_w_partial_autoscaling_config(): }, ] for config in cluster_config: - cluster = _make_cluster( CLUSTER_ID, instance, @@ -927,7 +926,6 @@ def test_cluster_disable_autoscaling(): def test_create_cluster_with_both_manual_and_autoscaling(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType @@ -955,7 +953,6 @@ def test_create_cluster_with_both_manual_and_autoscaling(): def test_create_cluster_with_partial_autoscaling_config(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType @@ -996,7 +993,6 @@ def test_create_cluster_with_partial_autoscaling_config(): def test_create_cluster_with_no_scaling_config(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/test_column_family.py index b464024a740e..80b05d7443bc 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/test_column_family.py @@ -595,7 +595,6 @@ def test__gc_rule_from_pb_unknown_field_name(): from google.cloud.bigtable.column_family import _gc_rule_from_pb class MockProto(object): - names = [] _pb = {} diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/test_instance.py index c577adca5895..797e4bd9c9d2 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/test_instance.py @@ -67,7 +67,6 @@ def _make_instance(*args, **kwargs): def test_instance_constructor_defaults(): - client = object() instance = _make_instance(INSTANCE_ID, client) assert instance.instance_id == INSTANCE_ID diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index fba69ceba0c6..9f2c40a545bb 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -1118,7 +1118,6 @@ def test_RRRM_build_updated_request_row_ranges_valid(): class _MockCancellableIterator(object): - cancel_calls = 0 def __init__(self, *values): @@ -1199,5 +1198,4 @@ def _read_rows_retry_exception(exc): class _Client(object): - data_stub = None diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index 3d7d2e8eea2f..f2dc1448581f 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1689,7 +1689,6 @@ def _do_mutate_retryable_rows_helper( expected_entries = [] for row, prior_status in zip(rows, worker.responses_statuses): - if prior_status is None or prior_status.code in RETRYABLES: mutations = row._get_mutations().copy() # row clears on success entry = data_messages_v2_pb2.MutateRowsRequest.Entry( From 5754b60a7e35417e68b4b5a2ece7f6f8a1b023d3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 28 Oct 2023 00:15:28 +0000 Subject: [PATCH 752/892] chore: Update gapic-generator-python to v1.11.9 (#874) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.7 PiperOrigin-RevId: 573230664 Source-Link: https://github.com/googleapis/googleapis/commit/93beed334607e70709cc60e6145be65fdc8ec386 Source-Link: https://github.com/googleapis/googleapis-gen/commit/f4a4edaa8057639fcf6adf9179872280d1a8f651 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjRhNGVkYWE4MDU3NjM5ZmNmNmFkZjkxNzk4NzIyODBkMWE4ZjY1MSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.11.8 PiperOrigin-RevId: 574178735 Source-Link: https://github.com/googleapis/googleapis/commit/7307199008ee2d57a4337066de29f9cd8c444bc6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ce3af21b7c559a87c2befc076be0e3aeda3a26f0 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2UzYWYyMWI3YzU1OWE4N2MyYmVmYzA3NmJlMGUzYWVkYTNhMjZmMCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.11.9 PiperOrigin-RevId: 574520922 Source-Link: https://github.com/googleapis/googleapis/commit/5183984d611beb41e90f65f08609b9d926f779bd Source-Link: https://github.com/googleapis/googleapis-gen/commit/a59af19d4ac6509faedf1cc39029141b6a5b8968 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTU5YWYxOWQ0YWM2NTA5ZmFlZGYxY2MzOTAyOTE0MWI2YTViODk2OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../test_bigtable_instance_admin.py | 587 +++++++++++++----- .../test_bigtable_table_admin.py | 461 +++++++++----- .../unit/gapic/bigtable_v2/test_bigtable.py | 135 ++-- 3 files changed, 820 insertions(+), 363 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index b8508cab4888..ddbf0032f531 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -6454,8 +6454,9 @@ def test_get_instance_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6534,8 +6535,9 @@ def test_get_instance_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6658,8 +6660,9 @@ def test_get_instance_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6723,8 +6726,9 @@ def test_list_instances_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6804,10 +6808,11 @@ def test_list_instances_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6934,8 +6939,9 @@ def test_list_instances_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7002,8 +7008,9 @@ def test_update_instance_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7081,8 +7088,9 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7210,6 +7218,75 @@ def test_partial_update_instance_rest(request_type): "create_time": {"seconds": 751, "nanos": 543}, "satisfies_pzs": True, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ + "instance" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["instance"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["instance"][field])): + del request_init["instance"][field][i][subfield] + else: + del request_init["instance"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -7390,15 +7467,6 @@ def test_partial_update_instance_rest_bad_request( # send a request that will satisfy transcoding request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request_init["instance"] = { - "name": "projects/sample1/instances/sample2", - "display_name": "display_name_value", - "state": 1, - "type_": 1, - "labels": {}, - "create_time": {"seconds": 751, "nanos": 543}, - "satisfies_pzs": True, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7761,6 +7829,73 @@ def test_create_cluster_rest(request_type): "default_storage_type": 1, "encryption_config": {"kms_key_name": "kms_key_name_value"}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -7959,26 +8094,6 @@ def test_create_cluster_rest_bad_request( # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["cluster"] = { - "name": "name_value", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8088,8 +8203,9 @@ def test_get_cluster_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8168,8 +8284,9 @@ def test_get_cluster_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8292,8 +8409,9 @@ def test_get_cluster_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8358,8 +8476,9 @@ def test_list_clusters_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8439,10 +8558,9 @@ def test_list_clusters_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8569,8 +8687,9 @@ def test_list_clusters_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8766,6 +8885,75 @@ def test_partial_update_cluster_rest(request_type): "default_storage_type": 1, "encryption_config": {"kms_key_name": "kms_key_name_value"}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ + "cluster" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -8948,26 +9136,6 @@ def test_partial_update_cluster_rest_bad_request( request_init = { "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} } - request_init["cluster"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9328,6 +9496,75 @@ def test_create_app_profile_rest(request_type): "priority": 1, "standard_isolation": {"priority": 1}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ + "app_profile" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -9343,8 +9580,9 @@ def test_create_app_profile_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9436,8 +9674,9 @@ def test_create_app_profile_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9544,20 +9783,6 @@ def test_create_app_profile_rest_bad_request( # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["app_profile"] = { - "name": "name_value", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9597,8 +9822,9 @@ def test_create_app_profile_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9668,8 +9894,9 @@ def test_get_app_profile_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9746,8 +9973,9 @@ def test_get_app_profile_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9872,8 +10100,9 @@ def test_get_app_profile_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9939,10 +10168,9 @@ def test_list_app_profiles_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10025,10 +10253,11 @@ def test_list_app_profiles_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10163,10 +10392,9 @@ def test_list_app_profiles_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10294,6 +10522,75 @@ def test_update_app_profile_rest(request_type): "priority": 1, "standard_isolation": {"priority": 1}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ + "app_profile" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -10488,20 +10785,6 @@ def test_update_app_profile_rest_bad_request( "name": "projects/sample1/instances/sample2/appProfiles/sample3" } } - request_init["app_profile"] = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10890,8 +11173,7 @@ def test_get_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10968,8 +11250,7 @@ def test_get_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11090,8 +11371,7 @@ def test_get_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11157,8 +11437,7 @@ def test_set_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11235,8 +11514,7 @@ def test_set_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11365,8 +11643,7 @@ def test_set_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11431,8 +11708,7 @@ def test_test_iam_permissions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11512,8 +11788,7 @@ def test_test_iam_permissions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11645,8 +11920,7 @@ def test_test_iam_permissions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11712,10 +11986,9 @@ def test_list_hot_tablets_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11799,10 +12072,11 @@ def test_list_hot_tablets_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11941,10 +12215,9 @@ def test_list_hot_tablets_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index aa717a3cb202..b29dc5106c7a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -7285,8 +7285,9 @@ def test_create_table_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7368,8 +7369,9 @@ def test_create_table_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7503,8 +7505,9 @@ def test_create_table_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7856,8 +7859,9 @@ def test_list_tables_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7940,8 +7944,9 @@ def test_list_tables_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8075,8 +8080,9 @@ def test_list_tables_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8199,8 +8205,9 @@ def test_get_table_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8279,8 +8286,9 @@ def test_get_table_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8403,8 +8411,9 @@ def test_get_table_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8476,6 +8485,73 @@ def test_update_table_rest(request_type): "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table"][field])): + del request_init["table"][field][i][subfield] + else: + del request_init["table"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -8657,24 +8733,6 @@ def test_update_table_rest_bad_request( request_init = { "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} } - request_init["table"] = { - "name": "projects/sample1/instances/sample2/tables/sample3", - "cluster_states": {}, - "column_families": {}, - "granularity": 1, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "start_time": {"seconds": 751, "nanos": 543}, - "end_time": {}, - "source_table": "source_table_value", - "source_backup": "source_backup_value", - }, - }, - "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, - "deletion_protection": True, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9299,8 +9357,9 @@ def test_modify_column_families_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9378,8 +9437,9 @@ def test_modify_column_families_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9516,8 +9576,9 @@ def test_modify_column_families_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9786,10 +9847,11 @@ def test_generate_consistency_token_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9865,10 +9927,11 @@ def test_generate_consistency_token_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9996,10 +10059,11 @@ def test_generate_consistency_token_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10064,8 +10128,9 @@ def test_check_consistency_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10145,10 +10210,11 @@ def test_check_consistency_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10284,8 +10350,9 @@ def test_check_consistency_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10643,8 +10710,9 @@ def test_get_snapshot_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10722,8 +10790,9 @@ def test_get_snapshot_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10850,8 +10919,9 @@ def test_get_snapshot_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10916,8 +10986,9 @@ def test_list_snapshots_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10999,10 +11070,9 @@ def test_list_snapshots_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11137,8 +11207,9 @@ def test_list_snapshots_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11533,6 +11604,73 @@ def test_create_backup_rest(request_type): "kms_key_version": "kms_key_version_value", }, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -11731,30 +11869,6 @@ def test_create_backup_rest_bad_request( # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request_init["backup"] = { - "name": "name_value", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -11869,8 +11983,9 @@ def test_get_backup_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11949,8 +12064,9 @@ def test_get_backup_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12077,8 +12193,9 @@ def test_get_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12159,6 +12276,73 @@ def test_update_backup_rest(request_type): "kms_key_version": "kms_key_version_value", }, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -12175,8 +12359,9 @@ def test_update_backup_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12253,8 +12438,9 @@ def test_update_backup_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12352,30 +12538,6 @@ def test_update_backup_rest_bad_request( "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" } } - request_init["backup"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -12418,8 +12580,9 @@ def test_update_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12744,8 +12907,9 @@ def test_list_backups_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12829,8 +12993,9 @@ def test_list_backups_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12967,8 +13132,9 @@ def test_list_backups_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13604,8 +13770,7 @@ def test_get_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13682,8 +13847,7 @@ def test_get_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13806,8 +13970,7 @@ def test_get_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13873,8 +14036,7 @@ def test_set_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13951,8 +14113,7 @@ def test_set_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14083,8 +14244,7 @@ def test_set_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14149,8 +14309,7 @@ def test_test_iam_permissions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14230,8 +14389,7 @@ def test_test_iam_permissions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14365,8 +14523,7 @@ def test_test_iam_permissions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 597540d696f7..2319306d722b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -3004,8 +3004,9 @@ def test_read_rows_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -3086,8 +3087,9 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -3215,8 +3217,9 @@ def test_read_rows_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3286,8 +3289,9 @@ def test_sample_row_keys_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -3372,8 +3376,9 @@ def test_sample_row_keys_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -3501,8 +3506,9 @@ def test_sample_row_keys_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3569,8 +3575,9 @@ def test_mutate_row_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3647,8 +3654,9 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3787,8 +3795,9 @@ def test_mutate_row_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3858,8 +3867,9 @@ def test_mutate_rows_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -3939,8 +3949,9 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -4077,8 +4088,9 @@ def test_mutate_rows_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4148,8 +4160,9 @@ def test_check_and_mutate_row_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4229,8 +4242,9 @@ def test_check_and_mutate_row_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4386,8 +4400,9 @@ def test_check_and_mutate_row_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4473,8 +4488,9 @@ def test_ping_and_warm_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4547,8 +4563,9 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4670,8 +4687,9 @@ def test_ping_and_warm_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4733,8 +4751,9 @@ def test_read_modify_write_row_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4813,8 +4832,9 @@ def test_read_modify_write_row_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4951,8 +4971,9 @@ def test_read_modify_write_row_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5018,10 +5039,11 @@ def test_generate_initial_change_stream_partitions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -5107,10 +5129,11 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -5249,10 +5272,11 @@ def test_generate_initial_change_stream_partitions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5321,8 +5345,9 @@ def test_read_change_stream_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -5404,8 +5429,9 @@ def test_read_change_stream_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -5535,8 +5561,9 @@ def test_read_change_stream_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value From 340f563486be5d9bb8ba471d43185c987d7486a3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 07:26:59 -0400 Subject: [PATCH 753/892] chore: rename rst files to avoid conflict with service names (#877) Source-Link: https://github.com/googleapis/synthtool/commit/d52e638b37b091054c869bfa6f5a9fedaba9e0dd Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:4f9b3b106ad0beafc2c8a415e3f62c1a0cc23cabea115dbe841b848f581cfe99 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index dd98abbdeebe..7f291dbd5f9b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:08e34975760f002746b1d8c86fdc90660be45945ee6d9db914d1508acdf9a547 -# created: 2023-10-09T14:06:13.397766266Z + digest: sha256:4f9b3b106ad0beafc2c8a415e3f62c1a0cc23cabea115dbe841b848f581cfe99 +# created: 2023-10-18T20:26:37.410353675Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 0332d3267e15..16170d0ca7b8 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -467,9 +467,9 @@ typing-extensions==4.4.0 \ --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via -r requirements.in -urllib3==1.26.17 \ - --hash=sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21 \ - --hash=sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b +urllib3==1.26.18 \ + --hash=sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07 \ + --hash=sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0 # via # requests # twine From 1cb84dd9a2698215abeacae0b900f5230c0a1da7 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 21:20:47 -0400 Subject: [PATCH 754/892] chore: update docfx minimum Python version (#884) Source-Link: https://github.com/googleapis/synthtool/commit/bc07fd415c39853b382bcf8315f8eeacdf334055 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:30470597773378105e239b59fce8eb27cc97375580d592699206d17d117143d0 Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.github/workflows/docs.yml | 2 +- packages/google-cloud-bigtable/noxfile.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 7f291dbd5f9b..ec696b558c35 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:4f9b3b106ad0beafc2c8a415e3f62c1a0cc23cabea115dbe841b848f581cfe99 -# created: 2023-10-18T20:26:37.410353675Z + digest: sha256:30470597773378105e239b59fce8eb27cc97375580d592699206d17d117143d0 +# created: 2023-11-03T00:57:07.335914631Z diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml index e97d89e484c9..221806cedf58 100644 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -28,7 +28,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: "3.9" + python-version: "3.10" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 456191016790..fafee6ac45c9 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -342,7 +342,7 @@ def docs(session): ) -@nox.session(python="3.9") +@nox.session(python="3.10") def docfx(session): """Build the docfx yaml files for this library.""" From e06fca110dd1bcc8a2946afa7ec538c751c1c48f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 15 Nov 2023 02:32:20 -0500 Subject: [PATCH 755/892] chore: bump urllib3 from 1.26.12 to 1.26.18 (#885) Source-Link: https://github.com/googleapis/synthtool/commit/febacccc98d6d224aff9d0bd0373bb5a4cd5969c Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:caffe0a9277daeccc4d1de5c9b55ebba0901b57c2f713ec9c876b0d4ec064f61 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/requirements.txt | 532 +++++++++--------- 2 files changed, 277 insertions(+), 259 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index ec696b558c35..453b540c1e58 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:30470597773378105e239b59fce8eb27cc97375580d592699206d17d117143d0 -# created: 2023-11-03T00:57:07.335914631Z + digest: sha256:caffe0a9277daeccc4d1de5c9b55ebba0901b57c2f713ec9c876b0d4ec064f61 +# created: 2023-11-08T19:46:45.022803742Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 16170d0ca7b8..8957e21104e2 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -4,91 +4,75 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==2.0.0 \ - --hash=sha256:6372ad78c89d662035101418ae253668445b391755cfe94ea52f1b9d22425b20 \ - --hash=sha256:cffa11ea77999bb0dd27bb25ff6dc142a6796142f68d45b1a26b11f58724561e +argcomplete==3.1.4 \ + --hash=sha256:72558ba729e4c468572609817226fb0a6e7e9a0a7d477b882be168c0b4a62b94 \ + --hash=sha256:fbe56f8cda08aa9a04b307d8482ea703e96a6a801611acb4be9bf3942017989f # via nox -attrs==22.1.0 \ - --hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \ - --hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c +attrs==23.1.0 \ + --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ + --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 # via gcp-releasetool -bleach==5.0.1 \ - --hash=sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a \ - --hash=sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c - # via readme-renderer -cachetools==5.2.0 \ - --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ - --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db +cachetools==5.3.2 \ + --hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \ + --hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1 # via google-auth certifi==2023.7.22 \ --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests -cffi==1.15.1 \ - --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ - --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ - --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ - --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ - --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ - --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ - --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ - --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ - --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ - --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ - --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ - --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ - --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ - --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ - --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ - --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ - --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ - --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ - --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ - --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ - --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ - --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ - --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ - --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ - --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ - --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ - --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ - --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ - --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ - --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ - --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ - --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ - --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ - --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ - --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ - --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ - --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ - --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ - --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ - --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ - --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ - --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ - --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ - --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ - --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ - --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ - --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ - --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ - --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ - --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ - --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ - --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ - --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ - --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ - --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ - --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ - --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ - --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ - --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ - --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ - --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ - --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ - --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ - --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 +cffi==1.16.0 \ + --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ + --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ + --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ + --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ + --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ + --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ + --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ + --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ + --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ + --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ + --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ + --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ + --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ + --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ + --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ + --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ + --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ + --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ + --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ + --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ + --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ + --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ + --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ + --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ + --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ + --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ + --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ + --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ + --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ + --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ + --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ + --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ + --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ + --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ + --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ + --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ + --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ + --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ + --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ + --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ + --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ + --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ + --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ + --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ + --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ + --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ + --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ + --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ + --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ + --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ + --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ + --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography charset-normalizer==2.1.1 \ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ @@ -109,78 +93,74 @@ colorlog==6.7.0 \ # via # gcp-docuploader # nox -commonmark==0.9.1 \ - --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ - --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 - # via rich -cryptography==41.0.4 \ - --hash=sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67 \ - --hash=sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311 \ - --hash=sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8 \ - --hash=sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13 \ - --hash=sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143 \ - --hash=sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f \ - --hash=sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829 \ - --hash=sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd \ - --hash=sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397 \ - --hash=sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac \ - --hash=sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d \ - --hash=sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a \ - --hash=sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839 \ - --hash=sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e \ - --hash=sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6 \ - --hash=sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9 \ - --hash=sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860 \ - --hash=sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca \ - --hash=sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91 \ - --hash=sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d \ - --hash=sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714 \ - --hash=sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb \ - --hash=sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f +cryptography==41.0.5 \ + --hash=sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf \ + --hash=sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84 \ + --hash=sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e \ + --hash=sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8 \ + --hash=sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7 \ + --hash=sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1 \ + --hash=sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88 \ + --hash=sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86 \ + --hash=sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179 \ + --hash=sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81 \ + --hash=sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20 \ + --hash=sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548 \ + --hash=sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d \ + --hash=sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d \ + --hash=sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5 \ + --hash=sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1 \ + --hash=sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147 \ + --hash=sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936 \ + --hash=sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797 \ + --hash=sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696 \ + --hash=sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72 \ + --hash=sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da \ + --hash=sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723 # via # gcp-releasetool # secretstorage -distlib==0.3.6 \ - --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \ - --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e +distlib==0.3.7 \ + --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ + --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -docutils==0.19 \ - --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ - --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc +docutils==0.20.1 \ + --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \ + --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b # via readme-renderer -filelock==3.8.0 \ - --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \ - --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4 +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv -gcp-docuploader==0.6.4 \ - --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \ - --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf +gcp-docuploader==0.6.5 \ + --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ + --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea # via -r requirements.in -gcp-releasetool==1.10.5 \ - --hash=sha256:174b7b102d704b254f2a26a3eda2c684fd3543320ec239baf771542a2e58e109 \ - --hash=sha256:e29d29927fe2ca493105a82958c6873bb2b90d503acac56be2c229e74de0eec9 +gcp-releasetool==1.16.0 \ + --hash=sha256:27bf19d2e87aaa884096ff941aa3c592c482be3d6a2bfe6f06afafa6af2353e3 \ + --hash=sha256:a316b197a543fd036209d0caba7a8eb4d236d8e65381c80cbc6d7efaa7606d63 # via -r requirements.in -google-api-core==2.10.2 \ - --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \ - --hash=sha256:34f24bd1d5f72a8c4519773d99ca6bf080a6c4e041b4e9f024fe230191dda62e +google-api-core==2.12.0 \ + --hash=sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553 \ + --hash=sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160 # via # google-cloud-core # google-cloud-storage -google-auth==2.14.1 \ - --hash=sha256:ccaa901f31ad5cbb562615eb8b664b3dd0bf5404a67618e642307f00613eda4d \ - --hash=sha256:f5d8701633bebc12e0deea4df8abd8aff31c28b355360597f7f2ee60f2e4d016 +google-auth==2.23.4 \ + --hash=sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3 \ + --hash=sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2 # via # gcp-releasetool # google-api-core # google-cloud-core # google-cloud-storage -google-cloud-core==2.3.2 \ - --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ - --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a +google-cloud-core==2.3.3 \ + --hash=sha256:37b80273c8d7eee1ae816b3a20ae43585ea50506cb0e60f3cf5be5f87f1373cb \ + --hash=sha256:fbd11cad3e98a7e5b0343dc07cb1039a5ffd7a5bb96e1f1e27cee4bda4a90863 # via google-cloud-storage -google-cloud-storage==2.6.0 \ - --hash=sha256:104ca28ae61243b637f2f01455cc8a05e8f15a2a18ced96cb587241cdd3820f5 \ - --hash=sha256:4ad0415ff61abdd8bb2ae81c1f8f7ec7d91a1011613f2db87c614c550f97bfe9 +google-cloud-storage==2.13.0 \ + --hash=sha256:ab0bf2e1780a1b74cf17fccb13788070b729f50c252f0c94ada2aae0ca95437d \ + --hash=sha256:f62dc4c7b6cd4360d072e3deb28035fbdad491ac3d9b0b1815a12daea10f37c7 # via gcp-docuploader google-crc32c==1.5.0 \ --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ @@ -251,29 +231,31 @@ google-crc32c==1.5.0 \ --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 - # via google-resumable-media -google-resumable-media==2.4.0 \ - --hash=sha256:2aa004c16d295c8f6c33b2b4788ba59d366677c0a25ae7382436cb30f776deaa \ - --hash=sha256:8d5518502f92b9ecc84ac46779bd4f09694ecb3ba38a3e7ca737a86d15cbca1f + # via + # google-cloud-storage + # google-resumable-media +google-resumable-media==2.6.0 \ + --hash=sha256:972852f6c65f933e15a4a210c2b96930763b47197cdf4aa5f5bea435efb626e7 \ + --hash=sha256:fc03d344381970f79eebb632a3c18bb1828593a2dc5572b5f90115ef7d11e81b # via google-cloud-storage -googleapis-common-protos==1.57.0 \ - --hash=sha256:27a849d6205838fb6cc3c1c21cb9800707a661bb21c6ce7fb13e99eb1f8a0c46 \ - --hash=sha256:a9f4a1d7f6d9809657b7f1316a1aa527f6664891531bcfcc13b6696e685f443c +googleapis-common-protos==1.61.0 \ + --hash=sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0 \ + --hash=sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b # via google-api-core idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 # via requests -importlib-metadata==5.0.0 \ - --hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \ - --hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43 +importlib-metadata==6.8.0 \ + --hash=sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb \ + --hash=sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743 # via # -r requirements.in # keyring # twine -jaraco-classes==3.2.3 \ - --hash=sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158 \ - --hash=sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a +jaraco-classes==3.3.0 \ + --hash=sha256:10afa92b6743f25c0cf5f37c6bb6e18e2c5bb84a16527ccfc0040ea377e7aaeb \ + --hash=sha256:c063dd08e89217cee02c8d5e5ec560f2c8ce6cdc2fcdc2e68f7b2e5547ed3621 # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ @@ -285,75 +267,121 @@ jinja2==3.1.2 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 # via gcp-releasetool -keyring==23.11.0 \ - --hash=sha256:3dd30011d555f1345dec2c262f0153f2f0ca6bca041fb1dc4588349bb4c0ac1e \ - --hash=sha256:ad192263e2cdd5f12875dedc2da13534359a7e760e77f8d04b50968a821c2361 +keyring==24.2.0 \ + --hash=sha256:4901caaf597bfd3bbd78c9a0c7c4c29fcd8310dab2cffefe749e916b6527acd6 \ + --hash=sha256:ca0746a19ec421219f4d713f848fa297a661a8a8c1504867e55bfb5e09091509 # via # gcp-releasetool # twine -markupsafe==2.1.1 \ - --hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \ - --hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \ - --hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \ - --hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \ - --hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \ - --hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \ - --hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \ - --hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \ - --hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \ - --hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \ - --hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \ - --hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \ - --hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \ - --hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \ - --hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \ - --hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \ - --hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \ - --hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b \ - --hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \ - --hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \ - --hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \ - --hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \ - --hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \ - --hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \ - --hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \ - --hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \ - --hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \ - --hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \ - --hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \ - --hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \ - --hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \ - --hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \ - --hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \ - --hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \ - --hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \ - --hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \ - --hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \ - --hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \ - --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ - --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 +markdown-it-py==3.0.0 \ + --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ + --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb + # via rich +markupsafe==2.1.3 \ + --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ + --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ + --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ + --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ + --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ + --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ + --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ + --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ + --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ + --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ + --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ + --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ + --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ + --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ + --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ + --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ + --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ + --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ + --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ + --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ + --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ + --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ + --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ + --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ + --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ + --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ + --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ + --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ + --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ + --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ + --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ + --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ + --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ + --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ + --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ + --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ + --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ + --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ + --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ + --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ + --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ + --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ + --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ + --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ + --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ + --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ + --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ + --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ + --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ + --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ + --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ + --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ + --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ + --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ + --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ + --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ + --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ + --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ + --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ + --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 # via jinja2 -more-itertools==9.0.0 \ - --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ - --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via markdown-it-py +more-itertools==10.1.0 \ + --hash=sha256:626c369fa0eb37bac0291bce8259b332fd59ac792fa5497b59837309cd5b114a \ + --hash=sha256:64e0735fcfdc6f3464ea133afe8ea4483b1c5fe3a3d69852e6503b43a0b222e6 # via jaraco-classes -nox==2022.11.21 \ - --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ - --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 +nh3==0.2.14 \ + --hash=sha256:116c9515937f94f0057ef50ebcbcc10600860065953ba56f14473ff706371873 \ + --hash=sha256:18415df36db9b001f71a42a3a5395db79cf23d556996090d293764436e98e8ad \ + --hash=sha256:203cac86e313cf6486704d0ec620a992c8bc164c86d3a4fd3d761dd552d839b5 \ + --hash=sha256:2b0be5c792bd43d0abef8ca39dd8acb3c0611052ce466d0401d51ea0d9aa7525 \ + --hash=sha256:377aaf6a9e7c63962f367158d808c6a1344e2b4f83d071c43fbd631b75c4f0b2 \ + --hash=sha256:525846c56c2bcd376f5eaee76063ebf33cf1e620c1498b2a40107f60cfc6054e \ + --hash=sha256:5529a3bf99402c34056576d80ae5547123f1078da76aa99e8ed79e44fa67282d \ + --hash=sha256:7771d43222b639a4cd9e341f870cee336b9d886de1ad9bec8dddab22fe1de450 \ + --hash=sha256:88c753efbcdfc2644a5012938c6b9753f1c64a5723a67f0301ca43e7b85dcf0e \ + --hash=sha256:93a943cfd3e33bd03f77b97baa11990148687877b74193bf777956b67054dcc6 \ + --hash=sha256:9be2f68fb9a40d8440cbf34cbf40758aa7f6093160bfc7fb018cce8e424f0c3a \ + --hash=sha256:a0c509894fd4dccdff557068e5074999ae3b75f4c5a2d6fb5415e782e25679c4 \ + --hash=sha256:ac8056e937f264995a82bf0053ca898a1cb1c9efc7cd68fa07fe0060734df7e4 \ + --hash=sha256:aed56a86daa43966dd790ba86d4b810b219f75b4bb737461b6886ce2bde38fd6 \ + --hash=sha256:e8986f1dd3221d1e741fda0a12eaa4a273f1d80a35e31a1ffe579e7c621d069e \ + --hash=sha256:f99212a81c62b5f22f9e7c3e347aa00491114a5647e1f13bbebd79c3e5f08d75 + # via readme-renderer +nox==2023.4.22 \ + --hash=sha256:0b1adc619c58ab4fa57d6ab2e7823fe47a32e70202f287d78474adcc7bda1891 \ + --hash=sha256:46c0560b0dc609d7d967dc99e22cb463d3c4caf54a5fda735d6c11b5177e3a9f # via -r requirements.in -packaging==21.3 \ - --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ - --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 +packaging==23.2 \ + --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ + --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via # gcp-releasetool # nox -pkginfo==1.8.3 \ - --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \ - --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c +pkginfo==1.9.6 \ + --hash=sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546 \ + --hash=sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046 # via twine -platformdirs==2.5.4 \ - --hash=sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7 \ - --hash=sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10 +platformdirs==3.11.0 \ + --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ + --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e # via virtualenv protobuf==3.20.3 \ --hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \ @@ -383,34 +411,30 @@ protobuf==3.20.3 \ # gcp-releasetool # google-api-core # googleapis-common-protos -pyasn1==0.4.8 \ - --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ - --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba +pyasn1==0.5.0 \ + --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \ + --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde # via # pyasn1-modules # rsa -pyasn1-modules==0.2.8 \ - --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \ - --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74 +pyasn1-modules==0.3.0 \ + --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \ + --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d # via google-auth pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pygments==2.15.0 \ - --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \ - --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500 +pygments==2.16.1 \ + --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \ + --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29 # via # readme-renderer # rich -pyjwt==2.6.0 \ - --hash=sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd \ - --hash=sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14 +pyjwt==2.8.0 \ + --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ + --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 # via gcp-releasetool -pyparsing==3.0.9 \ - --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ - --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc - # via packaging pyperclip==1.8.2 \ --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57 # via gcp-releasetool @@ -418,9 +442,9 @@ python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 # via gcp-releasetool -readme-renderer==37.3 \ - --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ - --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 +readme-renderer==42.0 \ + --hash=sha256:13d039515c1f24de668e2c93f2e877b9dbe6c6c32328b90a40a49d8b2b85f36d \ + --hash=sha256:2d55489f83be4992fe4454939d1a051c33edbab778e82761d060c9fc6b308cd1 # via twine requests==2.31.0 \ --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ @@ -431,17 +455,17 @@ requests==2.31.0 \ # google-cloud-storage # requests-toolbelt # twine -requests-toolbelt==0.10.1 \ - --hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \ - --hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d +requests-toolbelt==1.0.0 \ + --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \ + --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06 # via twine rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c # via twine -rich==12.6.0 \ - --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \ - --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0 +rich==13.6.0 \ + --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ + --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef # via twine rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -455,43 +479,37 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via - # bleach # gcp-docuploader - # google-auth # python-dateutil -twine==4.0.1 \ - --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \ - --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0 +twine==4.0.2 \ + --hash=sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8 \ + --hash=sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8 # via -r requirements.in -typing-extensions==4.4.0 \ - --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ - --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e +typing-extensions==4.8.0 \ + --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ + --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef # via -r requirements.in -urllib3==1.26.18 \ - --hash=sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07 \ - --hash=sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0 +urllib3==2.0.7 \ + --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ + --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e # via # requests # twine -virtualenv==20.16.7 \ - --hash=sha256:8691e3ff9387f743e00f6bb20f70121f5e4f596cae754531f2b3b3a1b1ac696e \ - --hash=sha256:efd66b00386fdb7dbe4822d172303f40cd05e50e01740b19ea42425cbe653e29 +virtualenv==20.24.6 \ + --hash=sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af \ + --hash=sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381 # via nox -webencodings==0.5.1 \ - --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ - --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 - # via bleach -wheel==0.38.4 \ - --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \ - --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8 +wheel==0.41.3 \ + --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ + --hash=sha256:4d4987ce51a49370ea65c0bfd2234e8ce80a12780820d9dc462597a6e60d0841 # via -r requirements.in -zipp==3.10.0 \ - --hash=sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1 \ - --hash=sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8 +zipp==3.17.0 \ + --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \ + --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==65.5.1 \ - --hash=sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31 \ - --hash=sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f +setuptools==68.2.2 \ + --hash=sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87 \ + --hash=sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a # via -r requirements.in From 363ff93d9ac1e460027a2edd2763c3c9b1e0baee Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 15:49:59 -0800 Subject: [PATCH 756/892] chore: Update gapic-generator-python to v1.12.0 (#891) --- .../bigtable_instance_admin/async_client.py | 74 +++++++++--------- .../bigtable_table_admin/async_client.py | 76 +++++++++---------- .../services/bigtable/async_client.py | 26 +++---- 3 files changed, 88 insertions(+), 88 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 3f67620c0488..e4c4639af412 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -33,14 +33,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -305,7 +305,7 @@ async def create_instance( This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -404,7 +404,7 @@ async def get_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -440,7 +440,7 @@ async def get_instance( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -496,7 +496,7 @@ async def list_instances( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -529,7 +529,7 @@ async def list_instances( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_instances, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -581,7 +581,7 @@ async def update_instance( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -603,7 +603,7 @@ async def update_instance( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -669,7 +669,7 @@ async def partial_update_instance( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -709,7 +709,7 @@ async def partial_update_instance( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.partial_update_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -775,7 +775,7 @@ async def delete_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -869,7 +869,7 @@ async def create_cluster( This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -964,7 +964,7 @@ async def get_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -999,7 +999,7 @@ async def get_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_cluster, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1057,7 +1057,7 @@ async def list_clusters( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1090,7 +1090,7 @@ async def list_clusters( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_clusters, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1141,7 +1141,7 @@ async def update_cluster( location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1164,7 +1164,7 @@ async def update_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_cluster, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1248,7 +1248,7 @@ async def partial_update_cluster( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1343,7 +1343,7 @@ async def delete_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1431,7 +1431,7 @@ async def create_app_profile( This corresponds to the ``app_profile`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1515,7 +1515,7 @@ async def get_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1549,7 +1549,7 @@ async def get_app_profile( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_app_profile, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1608,7 +1608,7 @@ async def list_app_profiles( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1644,7 +1644,7 @@ async def list_app_profiles( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_app_profiles, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1717,7 +1717,7 @@ async def update_app_profile( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1754,7 +1754,7 @@ async def update_app_profile( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_app_profile, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1820,7 +1820,7 @@ async def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1890,7 +1890,7 @@ async def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1953,7 +1953,7 @@ async def get_iam_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_iam_policy, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2008,7 +2008,7 @@ async def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2126,7 +2126,7 @@ async def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2160,7 +2160,7 @@ async def test_iam_permissions( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.test_iam_permissions, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2217,7 +2217,7 @@ async def list_hot_tablets( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2253,7 +2253,7 @@ async def list_hot_tablets( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_hot_tablets, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index d5edeb91db72..5a4435bde9fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -33,14 +33,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -282,7 +282,7 @@ async def create_table( This corresponds to the ``table`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -402,7 +402,7 @@ async def create_table_from_snapshot( This corresponds to the ``source_snapshot`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -494,7 +494,7 @@ async def list_tables( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -530,7 +530,7 @@ async def list_tables( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_tables, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -593,7 +593,7 @@ async def get_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -628,7 +628,7 @@ async def get_table( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_table, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -701,7 +701,7 @@ async def update_table( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -794,7 +794,7 @@ async def delete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -865,7 +865,7 @@ async def undelete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -976,7 +976,7 @@ async def modify_column_families( This corresponds to the ``modifications`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1051,7 +1051,7 @@ async def drop_row_range( request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1111,7 +1111,7 @@ async def generate_consistency_token( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1144,7 +1144,7 @@ async def generate_consistency_token( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.generate_consistency_token, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1211,7 +1211,7 @@ async def check_consistency( This corresponds to the ``consistency_token`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1246,7 +1246,7 @@ async def check_consistency( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.check_consistency, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1344,7 +1344,7 @@ async def snapshot_table( This corresponds to the ``description`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1459,7 +1459,7 @@ async def get_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1503,7 +1503,7 @@ async def get_snapshot( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_snapshot, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1576,7 +1576,7 @@ async def list_snapshots( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1619,7 +1619,7 @@ async def list_snapshots( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_snapshots, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1698,7 +1698,7 @@ async def delete_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1794,7 +1794,7 @@ async def create_backup( This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1886,7 +1886,7 @@ async def get_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1917,7 +1917,7 @@ async def get_backup( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_backup, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1987,7 +1987,7 @@ async def update_backup( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2066,7 +2066,7 @@ async def delete_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2138,7 +2138,7 @@ async def list_backups( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2174,7 +2174,7 @@ async def list_backups( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_backups, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2235,7 +2235,7 @@ async def restore_table( request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2350,7 +2350,7 @@ async def copy_backup( This corresponds to the ``expire_time`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2446,7 +2446,7 @@ async def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2509,7 +2509,7 @@ async def get_iam_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_iam_policy, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2564,7 +2564,7 @@ async def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2682,7 +2682,7 @@ async def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2716,7 +2716,7 @@ async def test_iam_permissions( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.test_iam_permissions, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 07a782d0cf0d..33686a4a8618 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -35,14 +35,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data @@ -250,7 +250,7 @@ def read_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -345,7 +345,7 @@ def sample_row_keys( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -457,7 +457,7 @@ async def mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -496,7 +496,7 @@ async def mutate_row( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.mutate_row, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.01, maximum=60.0, multiplier=2, @@ -579,7 +579,7 @@ def mutate_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -718,7 +718,7 @@ async def check_and_mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -828,7 +828,7 @@ async def ping_and_warm( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -946,7 +946,7 @@ async def read_modify_write_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1052,7 +1052,7 @@ def generate_initial_change_stream_partitions( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1151,7 +1151,7 @@ def read_change_stream( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be From 1b7c136c9d591372bd214fd883b68459f56cab2e Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Fri, 1 Dec 2023 19:42:27 -0500 Subject: [PATCH 757/892] feat: Introduce compatibility with native namespace packages (#893) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Introduce compatibility with native namespace packages * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google-cloud-bigtable/google/__init__.py | 6 --- .../google/cloud/__init__.py | 6 --- packages/google-cloud-bigtable/mypy.ini | 2 +- packages/google-cloud-bigtable/noxfile.py | 2 +- packages/google-cloud-bigtable/owlbot.py | 2 +- packages/google-cloud-bigtable/setup.py | 9 +---- .../tests/unit/test_packaging.py | 37 +++++++++++++++++++ 7 files changed, 41 insertions(+), 23 deletions(-) delete mode 100644 packages/google-cloud-bigtable/google/__init__.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/test_packaging.py diff --git a/packages/google-cloud-bigtable/google/__init__.py b/packages/google-cloud-bigtable/google/__init__.py deleted file mode 100644 index a5ba8065626d..000000000000 --- a/packages/google-cloud-bigtable/google/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - pass diff --git a/packages/google-cloud-bigtable/google/cloud/__init__.py b/packages/google-cloud-bigtable/google/cloud/__init__.py deleted file mode 100644 index a5ba8065626d..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - pass diff --git a/packages/google-cloud-bigtable/mypy.ini b/packages/google-cloud-bigtable/mypy.ini index f12ed46fc21a..31cc24223c7a 100644 --- a/packages/google-cloud-bigtable/mypy.ini +++ b/packages/google-cloud-bigtable/mypy.ini @@ -1,5 +1,5 @@ [mypy] -python_version = 3.6 +python_version = 3.8 namespace_packages = True exclude = tests/unit/gapic/ diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index fafee6ac45c9..63a169cbb6fd 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -135,7 +135,7 @@ def mypy(session): ) session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package - session.run("mypy", "google/", "tests/") + session.run("mypy", "-p", "google", "-p", "tests") @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 78c6ca2f8223..4b06aea7766d 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -169,7 +169,7 @@ def mypy(session): session.install("mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests") session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package - session.run("mypy", "google/", "tests/") + session.run("mypy", "-p", "google", "-p", "tests") @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 495730888561..617ec77dd762 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -59,16 +59,10 @@ # benchmarks, etc. packages = [ package - for package in setuptools.PEP420PackageFinder.find() + for package in setuptools.find_namespace_packages() if package.startswith("google") ] -# Determine which namespaces are needed. -namespaces = ["google"] -if "google.cloud" in packages: - namespaces.append("google.cloud") - - setuptools.setup( name=name, version=version, @@ -93,7 +87,6 @@ ], platforms="Posix; MacOS X; Windows", packages=packages, - namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, scripts=[ diff --git a/packages/google-cloud-bigtable/tests/unit/test_packaging.py b/packages/google-cloud-bigtable/tests/unit/test_packaging.py new file mode 100644 index 000000000000..93fa4d1c3881 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_packaging.py @@ -0,0 +1,37 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + + +def test_namespace_package_compat(tmp_path): + # The ``google`` namespace package should not be masked + # by the presence of ``google-cloud-bigtable``. + google = tmp_path / "google" + google.mkdir() + google.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.othermod"] + subprocess.check_call(cmd, env=env) + + # The ``google.cloud`` namespace package should not be masked + # by the presence of ``google-cloud-bigtable``. + google_cloud = tmp_path / "google" / "cloud" + google_cloud.mkdir() + google_cloud.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.cloud.othermod"] + subprocess.check_call(cmd, env=env) From b77b4531c323ad7d7a53a041a26a9f11cfc9be5a Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 12 Dec 2023 16:57:13 +0100 Subject: [PATCH 758/892] chore(deps): update all dependencies (#771) * chore(deps): update all dependencies * Pin apache-beam for python 3.7 * Pin apache-beam for python 3.7 * Testing earlier version of apache-beam for python 3.7 * revert * revert --------- Co-authored-by: Anthonios Partheniou --- .../.github/workflows/system_emulated.yml | 4 ++-- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/beam/requirements.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 4 ++-- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 4 ++-- .../samples/metricscaler/requirements.txt | 4 ++-- .../samples/quickstart/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 23 files changed, 27 insertions(+), 27 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index e1f43fd40108..f1aa7e87c216 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -7,7 +7,7 @@ on: jobs: run-systests: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v1.1.0 + uses: google-github-actions/setup-gcloud@v1.1.1 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 8be9b98e061c..9b95d0b5281f 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.46.0 google-cloud-bigtable==2.17.0 -google-cloud-core==2.3.2 +google-cloud-core==2.3.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 199541ffe66f..a76d144e6d5c 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 -google-cloud-core==2.3.2 +google-cloud-bigtable==2.20.0 +google-cloud-core==2.3.3 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index 04e476254af8..bba9ed8cf870 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 761227068cfd..d8ae088dd0b6 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.3.1 -mock==5.0.2 +pytest==7.4.0 +mock==5.1.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 02e08b4c8536..c0fce2294067 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 -google-cloud-monitoring==2.14.2 +google-cloud-bigtable==2.20.0 +google-cloud-monitoring==2.15.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 909f8c365834..83e37754ee7e 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 2006656312be..85b4e786f473 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 2006656312be..85b4e786f473 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index c4d04a08d024..70613be0cfe4 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 2006656312be..85b4e786f473 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 96aa71dab7f6..cbd0a47def45 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.3.1 +pytest==7.4.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 32cead029a58..90fa5577c016 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 \ No newline at end of file +google-cloud-bigtable==2.20.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index ca1f33bd3f48..b4ead9993662 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.3.1 +pytest==7.4.0 google-cloud-testutils==1.3.3 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 909f8c365834..83e37754ee7e 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 From 688dd1f80445ab337cf7f507b52bf26cb6005e0a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 21:25:35 +0000 Subject: [PATCH 759/892] feat: Add support for Python 3.12 (#888) --- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/workflows/unittest.yml | 2 +- .../.kokoro/samples/python3.12/common.cfg | 40 +++++++++++++++++++ .../.kokoro/samples/python3.12/continuous.cfg | 6 +++ .../samples/python3.12/periodic-head.cfg | 11 +++++ .../.kokoro/samples/python3.12/periodic.cfg | 6 +++ .../.kokoro/samples/python3.12/presubmit.cfg | 6 +++ .../google-cloud-bigtable/CONTRIBUTING.rst | 6 ++- packages/google-cloud-bigtable/noxfile.py | 2 +- .../samples/beam/noxfile.py | 2 +- .../samples/hello/noxfile.py | 2 +- .../samples/hello_happybase/noxfile.py | 2 +- .../samples/instanceadmin/noxfile.py | 2 +- .../samples/metricscaler/noxfile.py | 2 +- .../samples/quickstart/noxfile.py | 2 +- .../samples/quickstart_happybase/noxfile.py | 2 +- .../samples/snippets/deletes/noxfile.py | 2 +- .../samples/snippets/filters/noxfile.py | 2 +- .../samples/snippets/reads/noxfile.py | 2 +- .../samples/snippets/writes/noxfile.py | 2 +- .../samples/tableadmin/noxfile.py | 2 +- packages/google-cloud-bigtable/setup.py | 1 + .../testing/constraints-3.12.txt | 0 .../tests/unit/test_table.py | 8 ++-- 24 files changed, 94 insertions(+), 22 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.12.txt diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 453b540c1e58..eb4d9f794dc1 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:caffe0a9277daeccc4d1de5c9b55ebba0901b57c2f713ec9c876b0d4ec064f61 -# created: 2023-11-08T19:46:45.022803742Z + digest: sha256:bacc3af03bff793a03add584537b36b5644342931ad989e3ba1171d3bd5399f5 +# created: 2023-11-23T18:17:28.105124211Z diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 8057a7691b12..a32027b49bc2 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] steps: - name: Checkout uses: actions/checkout@v3 diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg new file mode 100644 index 000000000000..34e0a95f3cf4 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.12" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-312" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg new file mode 100644 index 000000000000..be25a34f9ad3 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg new file mode 100644 index 000000000000..71cd1e597e38 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 504fb3742e86..947c129b765e 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.7, 3.8, 3.9, 3.10 and 3.11 on both UNIX and Windows. + 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.11 -- -k + $ nox -s unit-3.12 -- -k .. note:: @@ -226,12 +226,14 @@ We support: - `Python 3.9`_ - `Python 3.10`_ - `Python 3.11`_ +- `Python 3.12`_ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ .. _Python 3.10: https://docs.python.org/3.10/ .. _Python 3.11: https://docs.python.org/3.11/ +.. _Python 3.12: https://docs.python.org/3.12/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 63a169cbb6fd..a6fb7d6f3f51 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -34,7 +34,7 @@ DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11"] +UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index 3d4395024ccd..80ffdb178317 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 7c8a63994cbd..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 617ec77dd762..e9bce0960720 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -82,6 +82,7 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Operating System :: OS Independent", "Topic :: Internet", ], diff --git a/packages/google-cloud-bigtable/testing/constraints-3.12.txt b/packages/google-cloud-bigtable/testing/constraints-3.12.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/test_table.py index f2dc1448581f..032363bd70a7 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/test_table.py @@ -1067,8 +1067,8 @@ def test_table_yield_retry_rows(): for row in table.yield_rows(start_key=ROW_KEY_1, end_key=ROW_KEY_2): rows.append(row) - assert len(warned) == 1 - assert warned[0].category is DeprecationWarning + assert len(warned) >= 1 + assert DeprecationWarning in [w.category for w in warned] result = rows[1] assert result.row_key == ROW_KEY_2 @@ -1140,8 +1140,8 @@ def test_table_yield_rows_with_row_set(): for row in table.yield_rows(row_set=row_set): rows.append(row) - assert len(warned) == 1 - assert warned[0].category is DeprecationWarning + assert len(warned) >= 1 + assert DeprecationWarning in [w.category for w in warned] assert rows[0].row_key == ROW_KEY_1 assert rows[1].row_key == ROW_KEY_2 From 2fd8559e3af1cf4241d2af4003a31c4fc09eeb9f Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 12 Dec 2023 13:58:06 -0800 Subject: [PATCH 760/892] fix: mutations batcher race condition (#896) --- .../google/cloud/bigtable/batcher.py | 118 ++++++++++-------- .../tests/system/conftest.py | 2 +- .../tests/system/test_data_api.py | 36 ++++++ 3 files changed, 104 insertions(+), 52 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index a6eb806e92ba..8f0cabaddd45 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -53,12 +53,19 @@ def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT self.flush_count = flush_count def get(self): - """Retrieve an item from the queue. Recalculate queue size.""" - row = self._queue.get() - mutation_size = row.get_mutations_size() - self.total_mutation_count -= len(row._get_mutations()) - self.total_size -= mutation_size - return row + """ + Retrieve an item from the queue. Recalculate queue size. + + If the queue is empty, return None. + """ + try: + row = self._queue.get_nowait() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + except queue.Empty: + return None def put(self, item): """Insert an item to the queue. Recalculate queue size.""" @@ -79,9 +86,6 @@ def full(self): return True return False - def empty(self): - return self._queue.empty() - @dataclass class _BatchInfo: @@ -292,8 +296,10 @@ def flush(self): * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ rows_to_flush = [] - while not self._rows.empty(): - rows_to_flush.append(self._rows.get()) + row = self._rows.get() + while row is not None: + rows_to_flush.append(row) + row = self._rows.get() response = self._flush_rows(rows_to_flush) return response @@ -303,58 +309,68 @@ def _flush_async(self): :raises: * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - - rows_to_flush = [] - mutations_count = 0 - mutations_size = 0 - rows_count = 0 - batch_info = _BatchInfo() - - while not self._rows.empty(): - row = self._rows.get() - mutations_count += len(row._get_mutations()) - mutations_size += row.get_mutations_size() - rows_count += 1 - rows_to_flush.append(row) - batch_info.mutations_count = mutations_count - batch_info.rows_count = rows_count - batch_info.mutations_size = mutations_size - - if ( - rows_count >= self.flush_count - or mutations_size >= self.max_row_bytes - or mutations_count >= self.flow_control.max_mutations - or mutations_size >= self.flow_control.max_mutation_bytes - or self._rows.empty() # submit when it reached the end of the queue + next_row = self._rows.get() + while next_row is not None: + # start a new batch + rows_to_flush = [next_row] + batch_info = _BatchInfo( + mutations_count=len(next_row._get_mutations()), + rows_count=1, + mutations_size=next_row.get_mutations_size(), + ) + # fill up batch with rows + next_row = self._rows.get() + while next_row is not None and self._row_fits_in_batch( + next_row, batch_info ): - # wait for resources to become available, before submitting any new batch - self.flow_control.wait() - # once unblocked, submit a batch - # event flag will be set by control_flow to block subsequent thread, but not blocking this one - self.flow_control.control_flow(batch_info) - future = self._executor.submit(self._flush_rows, rows_to_flush) - self.futures_mapping[future] = batch_info - future.add_done_callback(self._batch_completed_callback) - - # reset and start a new batch - rows_to_flush = [] - mutations_size = 0 - rows_count = 0 - mutations_count = 0 - batch_info = _BatchInfo() + rows_to_flush.append(next_row) + batch_info.mutations_count += len(next_row._get_mutations()) + batch_info.rows_count += 1 + batch_info.mutations_size += next_row.get_mutations_size() + next_row = self._rows.get() + # send batch over network + # wait for resources to become available + self.flow_control.wait() + # once unblocked, submit the batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + # schedule release of resources from flow control + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) def _batch_completed_callback(self, future): """Callback for when the mutation has finished to clean up the current batch and release items from the flow controller. - Raise exceptions if there's any. Release the resources locked by the flow control and allow enqueued tasks to be run. """ - processed_rows = self.futures_mapping[future] self.flow_control.release(processed_rows) del self.futures_mapping[future] + def _row_fits_in_batch(self, row, batch_info): + """Checks if a row can fit in the current batch. + + :type row: class + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. + + :type batch_info: :class:`_BatchInfo` + :param batch_info: Information about the current batch. + + :rtype: bool + :returns: True if the row can fit in the current batch. + """ + new_rows_count = batch_info.rows_count + 1 + new_mutations_count = batch_info.mutations_count + len(row._get_mutations()) + new_mutations_size = batch_info.mutations_size + row.get_mutations_size() + return ( + new_rows_count <= self.flush_count + and new_mutations_size <= self.max_row_bytes + and new_mutations_count <= self.flow_control.max_mutations + and new_mutations_size <= self.flow_control.max_mutation_bytes + ) + def _flush_rows(self, rows_to_flush): """Mutate the specified rows. diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index f39fcba88962..910c20970c34 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -58,7 +58,7 @@ def location_id(): @pytest.fixture(scope="session") def serve_nodes(): - return 3 + return 1 @pytest.fixture(scope="session") diff --git a/packages/google-cloud-bigtable/tests/system/test_data_api.py b/packages/google-cloud-bigtable/tests/system/test_data_api.py index 2ca7e150479a..579837e34176 100644 --- a/packages/google-cloud-bigtable/tests/system/test_data_api.py +++ b/packages/google-cloud-bigtable/tests/system/test_data_api.py @@ -381,3 +381,39 @@ def test_access_with_non_admin_client(data_client, data_instance_id, data_table_ instance = data_client.instance(data_instance_id) table = instance.table(data_table_id) assert table.read_row("nonesuch") is None # no raise + + +def test_mutations_batcher_threading(data_table, rows_to_delete): + """ + Test the mutations batcher by sending a bunch of mutations using different + flush methods + """ + import mock + import time + from google.cloud.bigtable.batcher import MutationsBatcher + + num_sent = 20 + all_results = [] + + def callback(results): + all_results.extend(results) + + # override flow control max elements + with mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", 2): + with MutationsBatcher( + data_table, + flush_count=5, + flush_interval=0.07, + batch_completed_callback=callback, + ) as batcher: + # send mutations in a way that timed flushes and count flushes interleave + for i in range(num_sent): + row = data_table.direct_row("row{}".format(i)) + row.set_cell( + COLUMN_FAMILY_ID1, COL_NAME1, "val{}".format(i).encode("utf-8") + ) + rows_to_delete.append(row) + batcher.mutate(row) + time.sleep(0.01) + # ensure all mutations were sent + assert len(all_results) == num_sent From d4da10adc3618d556797b38d746d0bebe8deccf9 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 12 Dec 2023 14:24:57 -0800 Subject: [PATCH 761/892] fix: add lock to flow control (#899) --- .../google/cloud/bigtable/batcher.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py index 8f0cabaddd45..f9b85386d827 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/batcher.py @@ -114,6 +114,7 @@ def __init__( self.inflight_size = 0 self.event = threading.Event() self.event.set() + self._lock = threading.Lock() def is_blocked(self): """Returns True if: @@ -132,8 +133,9 @@ def control_flow(self, batch_info): Calculate the resources used by this batch """ - self.inflight_mutations += batch_info.mutations_count - self.inflight_size += batch_info.mutations_size + with self._lock: + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size self.set_flow_control_status() def wait(self): @@ -158,8 +160,9 @@ def release(self, batch_info): Release the resources. Decrement the row size to allow enqueued mutations to be run. """ - self.inflight_mutations -= batch_info.mutations_count - self.inflight_size -= batch_info.mutations_size + with self._lock: + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size self.set_flow_control_status() From 893d25934dda8bc5cdc9afd3c3c1623c82041cab Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 22:59:34 +0000 Subject: [PATCH 762/892] chore(main): release 2.22.0 (#861) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 24 +++++++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 29 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 5be20145ac45..a5ab48803e32 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.21.0" + ".": "2.22.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 1a2a6ad3a1f0..5f86fdd88968 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,30 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.22.0](https://github.com/googleapis/python-bigtable/compare/v2.21.0...v2.22.0) (2023-12-12) + + +### Features + +* Add support for Cloud Bigtable Request Priorities in App Profiles ([#871](https://github.com/googleapis/python-bigtable/issues/871)) ([a4d551e](https://github.com/googleapis/python-bigtable/commit/a4d551e34006202ee96a395a2107d7acdc5881de)) +* Add support for Python 3.12 ([#888](https://github.com/googleapis/python-bigtable/issues/888)) ([4f050aa](https://github.com/googleapis/python-bigtable/commit/4f050aa5aed9a9dcf209779d5c10e5de8e2ff19e)) +* Introduce compatibility with native namespace packages ([#893](https://github.com/googleapis/python-bigtable/issues/893)) ([d218f4e](https://github.com/googleapis/python-bigtable/commit/d218f4ebd4ed6705721dca9318df955b40b0d0ac)) +* Publish CopyBackup protos to external customers ([#855](https://github.com/googleapis/python-bigtable/issues/855)) ([4105df7](https://github.com/googleapis/python-bigtable/commit/4105df762f1318c49bba030063897f0c50e4daee)) + + +### Bug Fixes + +* Add feature flag for improved mutate rows throttling ([e5af359](https://github.com/googleapis/python-bigtable/commit/e5af3597f45fc4c094c59abca876374f5a866c1b)) +* Add lock to flow control ([#899](https://github.com/googleapis/python-bigtable/issues/899)) ([e4e63c7](https://github.com/googleapis/python-bigtable/commit/e4e63c7b5b91273b3aae04fda59cc5a21c848de2)) +* Mutations batcher race condition ([#896](https://github.com/googleapis/python-bigtable/issues/896)) ([fe58f61](https://github.com/googleapis/python-bigtable/commit/fe58f617c7364d7e99e2ec50abd5f080852bf033)) +* Require google-cloud-core 1.4.4 ([#866](https://github.com/googleapis/python-bigtable/issues/866)) ([09f8a46](https://github.com/googleapis/python-bigtable/commit/09f8a4667d8b68a9f2048ba1aa57db4f775a2c03)) +* Use `retry_async` instead of `retry` in async client ([597efd1](https://github.com/googleapis/python-bigtable/commit/597efd11d15f20549010b4301be4d9768326e6a2)) + + +### Documentation + +* Minor formatting ([e5af359](https://github.com/googleapis/python-bigtable/commit/e5af3597f45fc4c094c59abca876374f5a866c1b)) + ## [2.21.0](https://github.com/googleapis/python-bigtable/compare/v2.20.0...v2.21.0) (2023-08-02) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index e546bae0531e..03d6d0200b82 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.22.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index e546bae0531e..03d6d0200b82 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.22.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index e546bae0531e..03d6d0200b82 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.22.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index e546bae0531e..03d6d0200b82 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.21.0" # {x-release-please-version} +__version__ = "2.22.0" # {x-release-please-version} From 315056fe131f10fc9f2716188d1704451c2eb797 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 14 Dec 2023 12:07:44 -0500 Subject: [PATCH 763/892] build: update actions/checkout and actions/setup-python (#895) Source-Link: https://github.com/googleapis/synthtool/commit/3551acd1261fd8f616cbfd054cda9bd6d6ac75f4 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:230f7fe8a0d2ed81a519cfc15c6bb11c5b46b9fb449b8b1219b3771bcb520ad2 Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/workflows/docs.yml | 8 ++-- .../.github/workflows/lint.yml | 4 +- .../.github/workflows/unittest.yml | 8 ++-- .../.kokoro/requirements.txt | 48 +++++++++---------- 5 files changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index eb4d9f794dc1..40bf99731959 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:bacc3af03bff793a03add584537b36b5644342931ad989e3ba1171d3bd5399f5 -# created: 2023-11-23T18:17:28.105124211Z + digest: sha256:230f7fe8a0d2ed81a519cfc15c6bb11c5b46b9fb449b8b1219b3771bcb520ad2 +# created: 2023-12-09T15:16:25.430769578Z diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml index 221806cedf58..698fbc5c94da 100644 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -8,9 +8,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.9" - name: Install nox @@ -24,9 +24,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml index 16d5a9e90f6d..4866193af2a9 100644 --- a/packages/google-cloud-bigtable/.github/workflows/lint.yml +++ b/packages/google-cloud-bigtable/.github/workflows/lint.yml @@ -8,9 +8,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.8" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index a32027b49bc2..d6ca65627c2d 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -11,9 +11,9 @@ jobs: python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} - name: Install nox @@ -37,9 +37,9 @@ jobs: - unit steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.8" - name: Install coverage diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 8957e21104e2..e5c1ffca94b7 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -93,30 +93,30 @@ colorlog==6.7.0 \ # via # gcp-docuploader # nox -cryptography==41.0.5 \ - --hash=sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf \ - --hash=sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84 \ - --hash=sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e \ - --hash=sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8 \ - --hash=sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7 \ - --hash=sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1 \ - --hash=sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88 \ - --hash=sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86 \ - --hash=sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179 \ - --hash=sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81 \ - --hash=sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20 \ - --hash=sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548 \ - --hash=sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d \ - --hash=sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d \ - --hash=sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5 \ - --hash=sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1 \ - --hash=sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147 \ - --hash=sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936 \ - --hash=sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797 \ - --hash=sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696 \ - --hash=sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72 \ - --hash=sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da \ - --hash=sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723 +cryptography==41.0.6 \ + --hash=sha256:068bc551698c234742c40049e46840843f3d98ad7ce265fd2bd4ec0d11306596 \ + --hash=sha256:0f27acb55a4e77b9be8d550d762b0513ef3fc658cd3eb15110ebbcbd626db12c \ + --hash=sha256:2132d5865eea673fe6712c2ed5fb4fa49dba10768bb4cc798345748380ee3660 \ + --hash=sha256:3288acccef021e3c3c10d58933f44e8602cf04dba96d9796d70d537bb2f4bbc4 \ + --hash=sha256:35f3f288e83c3f6f10752467c48919a7a94b7d88cc00b0668372a0d2ad4f8ead \ + --hash=sha256:398ae1fc711b5eb78e977daa3cbf47cec20f2c08c5da129b7a296055fbb22aed \ + --hash=sha256:422e3e31d63743855e43e5a6fcc8b4acab860f560f9321b0ee6269cc7ed70cc3 \ + --hash=sha256:48783b7e2bef51224020efb61b42704207dde583d7e371ef8fc2a5fb6c0aabc7 \ + --hash=sha256:4d03186af98b1c01a4eda396b137f29e4e3fb0173e30f885e27acec8823c1b09 \ + --hash=sha256:5daeb18e7886a358064a68dbcaf441c036cbdb7da52ae744e7b9207b04d3908c \ + --hash=sha256:60e746b11b937911dc70d164060d28d273e31853bb359e2b2033c9e93e6f3c43 \ + --hash=sha256:742ae5e9a2310e9dade7932f9576606836ed174da3c7d26bc3d3ab4bd49b9f65 \ + --hash=sha256:7e00fb556bda398b99b0da289ce7053639d33b572847181d6483ad89835115f6 \ + --hash=sha256:85abd057699b98fce40b41737afb234fef05c67e116f6f3650782c10862c43da \ + --hash=sha256:8efb2af8d4ba9dbc9c9dd8f04d19a7abb5b49eab1f3694e7b5a16a5fc2856f5c \ + --hash=sha256:ae236bb8760c1e55b7a39b6d4d32d2279bc6c7c8500b7d5a13b6fb9fc97be35b \ + --hash=sha256:afda76d84b053923c27ede5edc1ed7d53e3c9f475ebaf63c68e69f1403c405a8 \ + --hash=sha256:b27a7fd4229abef715e064269d98a7e2909ebf92eb6912a9603c7e14c181928c \ + --hash=sha256:b648fe2a45e426aaee684ddca2632f62ec4613ef362f4d681a9a6283d10e079d \ + --hash=sha256:c5a550dc7a3b50b116323e3d376241829fd326ac47bc195e04eb33a8170902a9 \ + --hash=sha256:da46e2b5df770070412c46f87bac0849b8d685c5f2679771de277a422c7d0b86 \ + --hash=sha256:f39812f70fc5c71a15aa3c97b2bbe213c3f2a460b79bd21c40d033bb34a9bf36 \ + --hash=sha256:ff369dd19e8fe0528b02e8df9f2aeb2479f89b1270d90f96a63500afe9af5cae # via # gcp-releasetool # secretstorage From 27ba8c1c8722e583553b92ae7a96cedda7ff0af2 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 15 Dec 2023 02:20:19 -0800 Subject: [PATCH 764/892] chore: add test for partial cell data (#908) --- .../tests/unit/test_row_data.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/test_row_data.py index 9f2c40a545bb..7c2987b56d18 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row_data.py +++ b/packages/google-cloud-bigtable/tests/unit/test_row_data.py @@ -362,6 +362,30 @@ def test__retry_read_rows_exception_deadline_exceeded_wrapped_in_grpc(): assert _retry_read_rows_exception(exception) +def test_partial_cell_data(): + from google.cloud.bigtable.row_data import PartialCellData + + expected_key = b"row-key" + expected_family_name = b"family-name" + expected_qualifier = b"qualifier" + expected_timestamp = 1234 + instance = PartialCellData( + expected_key, expected_family_name, expected_qualifier, expected_timestamp + ) + assert instance.row_key == expected_key + assert instance.family_name == expected_family_name + assert instance.qualifier == expected_qualifier + assert instance.timestamp_micros == expected_timestamp + assert instance.value == b"" + assert instance.labels == () + # test updating value + added_value = b"added-value" + instance.append_value(added_value) + assert instance.value == added_value + instance.append_value(added_value) + assert instance.value == added_value + added_value + + def _make_partial_rows_data(*args, **kwargs): from google.cloud.bigtable.row_data import PartialRowsData From 08806b71e171a84b62d38c54359988aa97167c9e Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Fri, 15 Dec 2023 18:39:57 +0100 Subject: [PATCH 765/892] chore(deps): update all dependencies (#898) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/.github/workflows/mypy.yml | 4 ++-- .../.github/workflows/system_emulated.yml | 6 +++--- .../samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/beam/requirements.txt | 6 +++--- .../samples/hello/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements.txt | 4 ++-- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart/requirements.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 4 ++-- .../samples/tableadmin/requirements.txt | 2 +- 24 files changed, 32 insertions(+), 32 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/mypy.yml b/packages/google-cloud-bigtable/.github/workflows/mypy.yml index c63242630acd..3915cddd3d1c 100644 --- a/packages/google-cloud-bigtable/.github/workflows/mypy.yml +++ b/packages/google-cloud-bigtable/.github/workflows/mypy.yml @@ -8,9 +8,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.8" - name: Install nox diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index f1aa7e87c216..7669901c94d4 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -12,15 +12,15 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 + uses: google-github-actions/setup-gcloud@v2.0.0 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 9b95d0b5281f..813fc8d2bd93 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.46.0 -google-cloud-bigtable==2.17.0 -google-cloud-core==2.3.3 +apache-beam==2.52.0 +google-cloud-bigtable==2.22.0 +google-cloud-core==2.4.1 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index a76d144e6d5c..68419fbcb794 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.20.0 -google-cloud-core==2.3.3 +google-cloud-bigtable==2.22.0 +google-cloud-core==2.4.1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index bba9ed8cf870..a01a0943c28e 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.20.0 +google-cloud-bigtable==2.22.0 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index d8ae088dd0b6..80ef7d3d0272 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.4.0 +pytest==7.4.3 mock==5.1.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index c0fce2294067..38c355ce349e 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.20.0 -google-cloud-monitoring==2.15.1 +google-cloud-bigtable==2.22.0 +google-cloud-monitoring==2.18.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 83e37754ee7e..6dc98589311e 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.20.0 +google-cloud-bigtable==2.22.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 85b4e786f473..ae10593d2852 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.20.0 +google-cloud-bigtable==2.22.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 85b4e786f473..ae10593d2852 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.20.0 +google-cloud-bigtable==2.22.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 70613be0cfe4..f9708e4b7cf1 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 85b4e786f473..ae10593d2852 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.20.0 +google-cloud-bigtable==2.22.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index cbd0a47def45..908e344b5dde 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.4.0 +pytest==7.4.3 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 90fa5577c016..07b0a191d101 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.20.0 \ No newline at end of file +google-cloud-bigtable==2.22.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index b4ead9993662..39d590005bda 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.0 -google-cloud-testutils==1.3.3 +pytest==7.4.3 +google-cloud-testutils==1.4.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 83e37754ee7e..6dc98589311e 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.20.0 +google-cloud-bigtable==2.22.0 From 496822eac564112fd0776890c7ddbf75e5170382 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Fri, 15 Dec 2023 12:53:54 -0500 Subject: [PATCH 766/892] chore: remove obsolete lines in .coveragerc (#902) --- packages/google-cloud-bigtable/.coveragerc | 8 -------- 1 file changed, 8 deletions(-) diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 3128ad99ec56..24e7b7e4d313 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -18,8 +18,6 @@ [run] branch = True omit = - google/cloud/__init__.py - google/__init__.py google/cloud/bigtable_admin/__init__.py google/cloud/bigtable_admin/gapic_version.py @@ -33,11 +31,5 @@ exclude_lines = def __repr__ # Ignore abstract methods raise NotImplementedError - # Ignore setuptools-less fallback - except pkg_resources.DistributionNotFound: omit = - */gapic/*.py - */proto/*.py - */core/*.py */site-packages/*.py - google/cloud/__init__.py From ba789e82677c597e15ca69e24a8fde4265a32020 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 21 Dec 2023 12:50:32 +0100 Subject: [PATCH 767/892] chore(deps): update all dependencies (#910) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google-cloud-bigtable/.github/workflows/system_emulated.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 7669901c94d4..ceb4e0c4d774 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v2.0.0 + uses: google-github-actions/setup-gcloud@v2.0.1 - name: Install / run Nox run: | From e07eacd7dfdd46cc48ffb799f3a10ef68a3d6666 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 10:42:14 -0500 Subject: [PATCH 768/892] build: update actions/upload-artifact and actions/download-artifact (#907) Source-Link: https://github.com/googleapis/synthtool/commit/280ddaed417057dfe5b1395731de07b7d09f5058 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:346ab2efb51649c5dde7756cbbdc60dd394852ba83b9bbffc292a63549f33c17 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- .../.github/workflows/unittest.yml | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 40bf99731959..9bee24097165 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:230f7fe8a0d2ed81a519cfc15c6bb11c5b46b9fb449b8b1219b3771bcb520ad2 -# created: 2023-12-09T15:16:25.430769578Z + digest: sha256:346ab2efb51649c5dde7756cbbdc60dd394852ba83b9bbffc292a63549f33c17 +# created: 2023-12-14T22:17:57.611773021Z diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index d6ca65627c2d..f4a337c496a0 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -26,9 +26,9 @@ jobs: run: | nox -s unit-${{ matrix.python }} - name: Upload coverage results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: - name: coverage-artifacts + name: coverage-artifact-${{ matrix.python }} path: .coverage-${{ matrix.python }} cover: @@ -47,11 +47,11 @@ jobs: python -m pip install --upgrade setuptools pip wheel python -m pip install coverage - name: Download coverage results - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: - name: coverage-artifacts path: .coverage-results/ - name: Report coverage results run: | - coverage combine .coverage-results/.coverage* + find .coverage-results -type f -name '*.zip' -exec unzip {} \; + coverage combine .coverage-results/**/.coverage* coverage report --show-missing --fail-under=100 From 0522e8cdb24e9073ee30a832e888b0554b592054 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 12:47:18 -0800 Subject: [PATCH 769/892] feat: Adding feature flags for routing cookie and retry info (#905) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Modify ModifyColumnFamiliesRequest proto to expose ignore_warnings field PiperOrigin-RevId: 590940407 Source-Link: https://github.com/googleapis/googleapis/commit/fb027c893ce1536d6a485748d4036d97092fb812 Source-Link: https://github.com/googleapis/googleapis-gen/commit/f0728cda227b38835822c4e5519e568ce8d2b5ac Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjA3MjhjZGEyMjdiMzg4MzU4MjJjNGU1NTE5ZTU2OGNlOGQyYjVhYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Adding feature flags for routing cookie and retry info PiperOrigin-RevId: 591912877 Source-Link: https://github.com/googleapis/googleapis/commit/f6505fe8d0daac2426c22be985ad3b745a4b5485 Source-Link: https://github.com/googleapis/googleapis-gen/commit/7499187415f8d405ef0d46dd6ff608b125c53c8f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNzQ5OTE4NzQxNWY4ZDQwNWVmMGQ0NmRkNmZmNjA4YjEyNWM1M2M4ZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../types/bigtable_table_admin.py | 7 +++++++ .../cloud/bigtable_v2/types/feature_flags.py | 16 ++++++++++++++++ .../scripts/fixup_bigtable_admin_v2_keywords.py | 2 +- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 6a3b31a1e394..c21ac4d5a031 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -597,6 +597,9 @@ class ModifyColumnFamiliesRequest(proto.Message): earlier modifications can be masked by later ones (in the case of repeated updates to the same family, for example). + ignore_warnings (bool): + Optional. If true, ignore safety checks when + modifying the column families. """ class Modification(proto.Message): @@ -662,6 +665,10 @@ class Modification(proto.Message): number=2, message=Modification, ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) class GenerateConsistencyTokenRequest(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index 92ac5023d863..45e673f750bb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -59,6 +59,14 @@ class FeatureFlags(proto.Message): Notify the server that the client supports the last_scanned_row field in ReadRowsResponse for long-running scans. + routing_cookie (bool): + Notify the server that the client supports + using encoded routing cookie strings to retry + requests with. + retry_info (bool): + Notify the server that the client supports + using retry info back off durations to retry + requests with. """ reverse_scans: bool = proto.Field( @@ -77,6 +85,14 @@ class FeatureFlags(proto.Message): proto.BOOL, number=4, ) + routing_cookie: bool = proto.Field( + proto.BOOL, + number=6, + ) + retry_info: bool = proto.Field( + proto.BOOL, + number=7, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 6882feaf6050..8c3efea109a0 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -69,7 +69,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'list_instances': ('parent', 'page_token', ), 'list_snapshots': ('parent', 'page_size', 'page_token', ), 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), - 'modify_column_families': ('name', 'modifications', ), + 'modify_column_families': ('name', 'modifications', 'ignore_warnings', ), 'partial_update_cluster': ('cluster', 'update_mask', ), 'partial_update_instance': ('instance', 'update_mask', ), 'restore_table': ('parent', 'table_id', 'backup', ), From 56f21289ce690f899ee1499a098f825b6c68c740 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 18 Jan 2024 01:25:56 +0100 Subject: [PATCH 770/892] chore(deps): update all dependencies (#911) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 12 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 80ef7d3d0272..c0d4f70035bc 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.4.3 +pytest==7.4.4 mock==5.1.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index f9708e4b7cf1..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 908e344b5dde..43b02e724796 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.4.3 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 39d590005bda..aa143f59dfbe 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.3 +pytest==7.4.4 google-cloud-testutils==1.4.0 From eccf4488d16281204cf193afad22609362e6bcbe Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 18 Jan 2024 00:36:31 +0000 Subject: [PATCH 771/892] build(python): fix `docs` and `docfx` builds (#917) Source-Link: https://github.com/googleapis/synthtool/commit/fac8444edd5f5526e804c306b766a271772a3e2f Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:5ea6d0ab82c956b50962f91d94e206d3921537ae5fe1549ec5326381d8905cfa Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 6 +++--- .../.kokoro/requirements.txt | 6 +++--- packages/google-cloud-bigtable/noxfile.py | 20 ++++++++++++++++++- 3 files changed, 25 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 9bee24097165..d8a1bbca7179 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:346ab2efb51649c5dde7756cbbdc60dd394852ba83b9bbffc292a63549f33c17 -# created: 2023-12-14T22:17:57.611773021Z + digest: sha256:5ea6d0ab82c956b50962f91d94e206d3921537ae5fe1549ec5326381d8905cfa +# created: 2024-01-15T16:32:08.142785673Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index e5c1ffca94b7..bb3d6ca38b14 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -263,9 +263,9 @@ jeepney==0.8.0 \ # via # keyring # secretstorage -jinja2==3.1.2 \ - --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ - --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 +jinja2==3.1.3 \ + --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ + --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 # via gcp-releasetool keyring==24.2.0 \ --hash=sha256:4901caaf597bfd3bbd78c9a0c7c4c29fcd8310dab2cffefe749e916b6527acd6 \ diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index a6fb7d6f3f51..8550a2b795ff 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -322,7 +322,16 @@ def docs(session): session.install("-e", ".") session.install( - "sphinx==4.0.1", + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", + "sphinx==4.5.0", "alabaster", "recommonmark", ) @@ -348,6 +357,15 @@ def docfx(session): session.install("-e", ".") session.install( + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", "gcp-sphinx-docfx-yaml", "alabaster", "recommonmark", From 89cd8744e9c2cf51ca98277d1a1d6532eb3bfad8 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 24 Jan 2024 04:01:14 -0800 Subject: [PATCH 772/892] chore: Update .repo-metadata.json and CODEOWNERS (#922) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: update CODEOWNERS update CODEOWNERS file to allow reviews from partner team * Update .repo-metadata.json * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/CODEOWNERS | 8 ++++---- packages/google-cloud-bigtable/.repo-metadata.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/CODEOWNERS b/packages/google-cloud-bigtable/.github/CODEOWNERS index 2f1fee90455c..8e8f088b7f44 100644 --- a/packages/google-cloud-bigtable/.github/CODEOWNERS +++ b/packages/google-cloud-bigtable/.github/CODEOWNERS @@ -5,8 +5,8 @@ # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax # Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json. -# @googleapis/yoshi-python @googleapis/api-bigtable are the default owners for changes in this repo -* @googleapis/yoshi-python @googleapis/api-bigtable +# @googleapis/yoshi-python @googleapis/api-bigtable @googleapis/api-bigtable-partners are the default owners for changes in this repo +* @googleapis/yoshi-python @googleapis/api-bigtable @googleapis/api-bigtable-partners -# @googleapis/python-samples-reviewers @googleapis/api-bigtable are the default owners for samples changes -/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable +# @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners are the default owners for samples changes +/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners diff --git a/packages/google-cloud-bigtable/.repo-metadata.json b/packages/google-cloud-bigtable/.repo-metadata.json index 3c65ac669823..9de4b5f92bf5 100644 --- a/packages/google-cloud-bigtable/.repo-metadata.json +++ b/packages/google-cloud-bigtable/.repo-metadata.json @@ -75,6 +75,6 @@ } ], "default_version": "v2", - "codeowner_team": "@googleapis/api-bigtable", + "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners", "api_shortname": "bigtable" } From 0a7f30459848093e6b27e6d017d3e71b3a0355db Mon Sep 17 00:00:00 2001 From: Cindy Peng <148148319+cindy-peng@users.noreply.github.com> Date: Mon, 29 Jan 2024 12:36:36 -0800 Subject: [PATCH 773/892] chore: create flakybot.yaml to change default issue priority (#928) * chore: create flakybot.yaml to change default issue priority * add google copyright license --------- Co-authored-by: cindy-peng --- .../google-cloud-bigtable/.github/flakybot.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 packages/google-cloud-bigtable/.github/flakybot.yaml diff --git a/packages/google-cloud-bigtable/.github/flakybot.yaml b/packages/google-cloud-bigtable/.github/flakybot.yaml new file mode 100644 index 000000000000..2159a1bca569 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/flakybot.yaml @@ -0,0 +1,15 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +issuePriority: p2 \ No newline at end of file From 07c1344dd983215e6c58911613fc316fc279ce93 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 5 Feb 2024 10:33:54 -0800 Subject: [PATCH 774/892] chore: add experimental async data client (#920) * feat: add new v3.0.0 API skeleton (#745) * feat: improve rows filters (#751) * feat: read rows query model class (#752) * feat: implement row and cell model classes (#753) * feat: add pooled grpc transport (#748) * feat: implement read_rows (#762) * feat: implement mutate rows (#769) * feat: literal value filter (#767) * feat: row_exists and read_row (#778) * feat: read_modify_write and check_and_mutate_row (#780) * feat: sharded read rows (#766) * feat: ping and warm with metadata (#810) * feat: mutate rows batching (#770) * chore: restructure module paths (#816) * feat: improve timeout structure (#819) * fix: api errors apply to all bulk mutations * chore: reduce public api surface (#820) * feat: improve error group tracebacks on < py11 (#825) * feat: optimize read_rows (#852) * chore: add user agent suffix (#842) * feat: optimize retries (#854) * feat: add test proxy (#836) * chore(tests): add conformance tests to CI for v3 (#870) * chore(tests): turn off fast fail for conformance tets (#882) * feat: add TABLE_DEFAULTS enum for table method arguments (#880) * fix: pass None for retry in gapic calls (#881) * feat: replace internal dictionaries with protos in gapic calls (#875) * chore: optimize gapic calls (#863) * feat: expose retryable error codes to users (#879) * chore: update api_core submodule (#897) * chore: merge main into experimental_v3 (#900) * chore: pin conformance tests to v0.0.2 (#903) * fix: bulk mutation eventual success (#909) --------- Co-authored-by: Owl Bot --- gapic-generator-fork | 1 + packages/google-cloud-bigtable/.coveragerc | 2 +- .../.github/sync-repo-settings.yaml | 18 + .../.github/workflows/conformance.yaml | 56 + .../.github/workflows/system_emulated.yml | 2 +- .../.github/workflows/unittest.yml | 2 +- packages/google-cloud-bigtable/.gitmodules | 6 + .../.kokoro/conformance.sh | 52 + .../.kokoro/presubmit/conformance.cfg | 6 + .../google/cloud/bigtable/data/__init__.py | 73 + .../cloud/bigtable/data/_async/__init__.py | 25 + .../bigtable/data/_async/_mutate_rows.py | 226 ++ .../cloud/bigtable/data/_async/_read_rows.py | 343 ++ .../cloud/bigtable/data/_async/client.py | 1228 +++++++ .../bigtable/data/_async/mutations_batcher.py | 501 +++ .../google/cloud/bigtable/data/_helpers.py | 220 ++ .../google/cloud/bigtable/data/exceptions.py | 307 ++ .../google/cloud/bigtable/data/mutations.py | 256 ++ .../bigtable/data/read_modify_write_rules.py | 77 + .../cloud/bigtable/data/read_rows_query.py | 476 +++ .../google/cloud/bigtable/data/row.py | 450 +++ .../google/cloud/bigtable/data/row_filters.py | 968 ++++++ .../google/cloud/bigtable/py.typed | 2 - .../services/bigtable/async_client.py | 102 +- .../bigtable_v2/services/bigtable/client.py | 6 +- .../services/bigtable/transports/__init__.py | 3 + .../bigtable/transports/grpc_asyncio.py | 62 + .../transports/pooled_grpc_asyncio.py | 426 +++ packages/google-cloud-bigtable/noxfile.py | 38 +- packages/google-cloud-bigtable/owlbot.py | 49 +- packages/google-cloud-bigtable/setup.py | 2 +- .../test_proxy/README.md | 60 + .../handlers/client_handler_data.py | 214 ++ .../handlers/client_handler_legacy.py | 235 ++ .../test_proxy/handlers/grpc_handler.py | 148 + .../test_proxy/noxfile.py | 80 + .../test_proxy/protos/bigtable_pb2.py | 145 + .../test_proxy/protos/bigtable_pb2_grpc.py | 363 ++ .../test_proxy/protos/data_pb2.py | 68 + .../test_proxy/protos/data_pb2_grpc.py | 4 + .../test_proxy/protos/request_stats_pb2.py | 33 + .../protos/request_stats_pb2_grpc.py | 4 + .../test_proxy/protos/test_proxy_pb2.py | 71 + .../test_proxy/protos/test_proxy_pb2_grpc.py | 433 +++ .../test_proxy/run_tests.sh | 47 + .../test_proxy/test_proxy.py | 193 ++ .../testing/constraints-3.7.txt | 4 +- .../testing/constraints-3.8.txt | 14 + .../tests/system/__init__.py | 2 +- .../tests/system/conftest.py | 204 +- .../tests/system/data/__init__.py | 15 + .../tests/system/data/setup_fixtures.py | 171 + .../tests/system/data/test_system.py | 943 ++++++ .../tests/system/v2_client/__init__.py | 15 + .../tests/system/{ => v2_client}/_helpers.py | 0 .../tests/system/v2_client/conftest.py | 209 ++ .../system/{ => v2_client}/test_data_api.py | 0 .../{ => v2_client}/test_instance_admin.py | 0 .../{ => v2_client}/test_table_admin.py | 0 .../tests/unit/data/__init__.py | 15 + .../unit/data/_async/test__mutate_rows.py | 378 +++ .../tests/unit/data/_async/test__read_rows.py | 391 +++ .../tests/unit/data/_async/test_client.py | 2957 +++++++++++++++++ .../data/_async/test_mutations_batcher.py | 1184 +++++++ .../{ => data}/read-rows-acceptance-test.json | 0 .../tests/unit/data/test__helpers.py | 248 ++ .../tests/unit/data/test_exceptions.py | 533 +++ .../tests/unit/data/test_mutations.py | 708 ++++ .../unit/data/test_read_modify_write_rules.py | 186 ++ .../unit/data/test_read_rows_acceptance.py | 331 ++ .../tests/unit/data/test_read_rows_query.py | 589 ++++ .../tests/unit/data/test_row.py | 718 ++++ .../tests/unit/data/test_row_filters.py | 2039 ++++++++++++ .../tests/unit/v2_client/__init__.py | 15 + .../tests/unit/{ => v2_client}/_testing.py | 0 .../v2_client/read-rows-acceptance-test.json | 1665 ++++++++++ .../unit/{ => v2_client}/test_app_profile.py | 0 .../tests/unit/{ => v2_client}/test_backup.py | 0 .../unit/{ => v2_client}/test_batcher.py | 2 +- .../tests/unit/{ => v2_client}/test_client.py | 0 .../unit/{ => v2_client}/test_cluster.py | 0 .../{ => v2_client}/test_column_family.py | 6 +- .../{ => v2_client}/test_encryption_info.py | 0 .../tests/unit/{ => v2_client}/test_error.py | 0 .../unit/{ => v2_client}/test_instance.py | 0 .../tests/unit/{ => v2_client}/test_policy.py | 0 .../tests/unit/{ => v2_client}/test_row.py | 4 +- .../unit/{ => v2_client}/test_row_data.py | 0 .../unit/{ => v2_client}/test_row_filters.py | 0 .../unit/{ => v2_client}/test_row_merger.py | 0 .../unit/{ => v2_client}/test_row_set.py | 0 .../tests/unit/{ => v2_client}/test_table.py | 0 python-api-core | 1 + 93 files changed, 21353 insertions(+), 277 deletions(-) create mode 160000 gapic-generator-fork create mode 100644 packages/google-cloud-bigtable/.github/workflows/conformance.yaml create mode 100644 packages/google-cloud-bigtable/.gitmodules create mode 100644 packages/google-cloud-bigtable/.kokoro/conformance.sh create mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/row_filters.py delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/py.typed create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py create mode 100644 packages/google-cloud-bigtable/test_proxy/README.md create mode 100644 packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data.py create mode 100644 packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py create mode 100644 packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py create mode 100644 packages/google-cloud-bigtable/test_proxy/noxfile.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2_grpc.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py create mode 100755 packages/google-cloud-bigtable/test_proxy/run_tests.sh create mode 100644 packages/google-cloud-bigtable/test_proxy/test_proxy.py create mode 100644 packages/google-cloud-bigtable/tests/system/data/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py create mode 100644 packages/google-cloud-bigtable/tests/system/data/test_system.py create mode 100644 packages/google-cloud-bigtable/tests/system/v2_client/__init__.py rename packages/google-cloud-bigtable/tests/system/{ => v2_client}/_helpers.py (100%) create mode 100644 packages/google-cloud-bigtable/tests/system/v2_client/conftest.py rename packages/google-cloud-bigtable/tests/system/{ => v2_client}/test_data_api.py (100%) rename packages/google-cloud-bigtable/tests/system/{ => v2_client}/test_instance_admin.py (100%) rename packages/google-cloud-bigtable/tests/system/{ => v2_client}/test_table_admin.py (100%) create mode 100644 packages/google-cloud-bigtable/tests/unit/data/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py rename packages/google-cloud-bigtable/tests/unit/{ => data}/read-rows-acceptance-test.json (100%) create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test__helpers.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_exceptions.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_mutations.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_read_modify_write_rules.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_read_rows_query.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_row.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_row_filters.py create mode 100644 packages/google-cloud-bigtable/tests/unit/v2_client/__init__.py rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/_testing.py (100%) create mode 100644 packages/google-cloud-bigtable/tests/unit/v2_client/read-rows-acceptance-test.json rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_app_profile.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_backup.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_batcher.py (98%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_client.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_cluster.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_column_family.py (99%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_encryption_info.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_error.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_instance.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_policy.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_row.py (99%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_row_data.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_row_filters.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_row_merger.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_row_set.py (100%) rename packages/google-cloud-bigtable/tests/unit/{ => v2_client}/test_table.py (100%) create mode 160000 python-api-core diff --git a/gapic-generator-fork b/gapic-generator-fork new file mode 160000 index 000000000000..b26cda7d163d --- /dev/null +++ b/gapic-generator-fork @@ -0,0 +1 @@ +Subproject commit b26cda7d163d6e0d45c9684f328ca32fb49b799a diff --git a/packages/google-cloud-bigtable/.coveragerc b/packages/google-cloud-bigtable/.coveragerc index 24e7b7e4d313..f12d4dc21a9f 100644 --- a/packages/google-cloud-bigtable/.coveragerc +++ b/packages/google-cloud-bigtable/.coveragerc @@ -22,7 +22,7 @@ omit = google/cloud/bigtable_admin/gapic_version.py [report] -fail_under = 100 +fail_under = 99 show_missing = True exclude_lines = # Re-enable the standard pragma diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml index a0d3362c94af..a8cc5b33b8c3 100644 --- a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml +++ b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml @@ -31,6 +31,24 @@ branchProtectionRules: - 'Kokoro' - 'Kokoro system-3.8' - 'cla/google' +- pattern: experimental_v3 + # Can admins overwrite branch protection. + # Defaults to `true` + isAdminEnforced: false + # Number of approving reviews required to update matching branches. + # Defaults to `1` + requiredApprovingReviewCount: 1 + # Are reviews from code owners required to update matching branches. + # Defaults to `false` + requiresCodeOwnerReviews: false + # Require up to date branches + requiresStrictStatusChecks: false + # List of required status check contexts that must pass for commits to be accepted to matching branches. + requiredStatusCheckContexts: + - 'Kokoro' + - 'Kokoro system-3.8' + - 'cla/google' + - 'Conformance / Async v3 Client / Python 3.8' # List of explicit permissions to add (additive only) permissionRules: # Team slug to add to repository permissions diff --git a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml new file mode 100644 index 000000000000..63023d162033 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml @@ -0,0 +1,56 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Github action job to test core java library features on +# downstream client libraries before they are released. +on: + push: + branches: + - main + pull_request: +name: Conformance +jobs: + conformance: + runs-on: ubuntu-latest + strategy: + matrix: + test-version: [ "v0.0.2" ] + py-version: [ 3.8 ] + client-type: [ "Async v3", "Legacy" ] + fail-fast: false + name: "${{ matrix.client-type }} Client / Python ${{ matrix.py-version }} / Test Tag ${{ matrix.test-version }}" + steps: + - uses: actions/checkout@v3 + name: "Checkout python-bigtable" + - uses: actions/checkout@v3 + name: "Checkout conformance tests" + with: + repository: googleapis/cloud-bigtable-clients-test + ref: ${{ matrix.test-version }} + path: cloud-bigtable-clients-test + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.py-version }} + - uses: actions/setup-go@v4 + with: + go-version: '>=1.20.2' + - run: chmod +x .kokoro/conformance.sh + - run: pip install -e . + name: "Install python-bigtable from HEAD" + - run: go version + - run: .kokoro/conformance.sh + name: "Run tests" + env: + CLIENT_TYPE: ${{ matrix.client-type }} + PYTHONUNBUFFERED: 1 + diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index ceb4e0c4d774..7669901c94d4 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v2.0.1 + uses: google-github-actions/setup-gcloud@v2.0.0 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index f4a337c496a0..87d08602f194 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -54,4 +54,4 @@ jobs: run: | find .coverage-results -type f -name '*.zip' -exec unzip {} \; coverage combine .coverage-results/**/.coverage* - coverage report --show-missing --fail-under=100 + coverage report --show-missing --fail-under=99 diff --git a/packages/google-cloud-bigtable/.gitmodules b/packages/google-cloud-bigtable/.gitmodules new file mode 100644 index 000000000000..5fa9b1ed5c25 --- /dev/null +++ b/packages/google-cloud-bigtable/.gitmodules @@ -0,0 +1,6 @@ +[submodule "python-api-core"] + path = python-api-core + url = git@github.com:googleapis/python-api-core.git +[submodule "gapic-generator-fork"] + path = gapic-generator-fork + url = git@github.com:googleapis/gapic-generator-python.git diff --git a/packages/google-cloud-bigtable/.kokoro/conformance.sh b/packages/google-cloud-bigtable/.kokoro/conformance.sh new file mode 100644 index 000000000000..1c0b3ee0d876 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/conformance.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +## cd to the parent directory, i.e. the root of the git repo +cd $(dirname $0)/.. + +PROXY_ARGS="" +TEST_ARGS="" +if [[ "${CLIENT_TYPE^^}" == "LEGACY" ]]; then + echo "Using legacy client" + PROXY_ARGS="--legacy-client" + # legacy client does not expose mutate_row. Disable those tests + TEST_ARGS="-skip TestMutateRow_" +fi + +# Build and start the proxy in a separate process +PROXY_PORT=9999 +pushd test_proxy +nohup python test_proxy.py --port $PROXY_PORT $PROXY_ARGS & +proxyPID=$! +popd + +# Kill proxy on exit +function cleanup() { + echo "Cleanup testbench"; + kill $proxyPID +} +trap cleanup EXIT + +# Run the conformance test +pushd cloud-bigtable-clients-test/tests +eval "go test -v -proxy_addr=:$PROXY_PORT $TEST_ARGS" +RETURN_CODE=$? +popd + +echo "exiting with ${RETURN_CODE}" +exit ${RETURN_CODE} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg new file mode 100644 index 000000000000..4f44e8a78df0 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "NOX_SESSION" + value: "conformance" +} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py new file mode 100644 index 000000000000..5229f8021139 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable import gapic_version as package_version + +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data._async.client import TableAsync + +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync + +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.read_rows_query import RowRange +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.row import Cell + +from google.cloud.bigtable.data.mutations import Mutation +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import SetCell +from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn +from google.cloud.bigtable.data.mutations import DeleteAllFromFamily +from google.cloud.bigtable.data.mutations import DeleteAllFromRow + +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data.exceptions import FailedQueryShardError + +from google.cloud.bigtable.data.exceptions import RetryExceptionGroup +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data._helpers import RowKeySamples +from google.cloud.bigtable.data._helpers import ShardedQuery + + +__version__: str = package_version.__version__ + +__all__ = ( + "BigtableDataClientAsync", + "TableAsync", + "RowKeySamples", + "ReadRowsQuery", + "RowRange", + "MutationsBatcherAsync", + "Mutation", + "RowMutationEntry", + "SetCell", + "DeleteRangeFromColumn", + "DeleteAllFromFamily", + "DeleteAllFromRow", + "Row", + "Cell", + "InvalidChunk", + "FailedMutationEntryError", + "FailedQueryShardError", + "RetryExceptionGroup", + "MutationsExceptionGroup", + "ShardedReadRowsExceptionGroup", + "ShardedQuery", + "TABLE_DEFAULT", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/__init__.py new file mode 100644 index 000000000000..e13c9acb7c0e --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/__init__.py @@ -0,0 +1,25 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data._async.client import TableAsync + +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync + + +__all__ = [ + "BigtableDataClientAsync", + "TableAsync", + "MutationsBatcherAsync", +] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py new file mode 100644 index 000000000000..7d11445532c4 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -0,0 +1,226 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import Sequence, TYPE_CHECKING +from dataclasses import dataclass +import functools + +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +import google.cloud.bigtable_v2.types.bigtable as types_pb +import google.cloud.bigtable.data.exceptions as bt_exceptions +from google.cloud.bigtable.data._helpers import _make_metadata +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _retry_exception_factory + +# mutate_rows requests are limited to this number of mutations +from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT + +if TYPE_CHECKING: + from google.cloud.bigtable_v2.services.bigtable.async_client import ( + BigtableAsyncClient, + ) + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data._async.client import TableAsync + + +@dataclass +class _EntryWithProto: + """ + A dataclass to hold a RowMutationEntry and its corresponding proto representation. + """ + + entry: RowMutationEntry + proto: types_pb.MutateRowsRequest.Entry + + +class _MutateRowsOperationAsync: + """ + MutateRowsOperation manages the logic of sending a set of row mutations, + and retrying on failed entries. It manages this using the _run_attempt + function, which attempts to mutate all outstanding entries, and raises + _MutateRowsIncomplete if any retryable errors are encountered. + + Errors are exposed as a MutationsExceptionGroup, which contains a list of + exceptions organized by the related failed mutation entries. + """ + + def __init__( + self, + gapic_client: "BigtableAsyncClient", + table: "TableAsync", + mutation_entries: list["RowMutationEntry"], + operation_timeout: float, + attempt_timeout: float | None, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + """ + Args: + - gapic_client: the client to use for the mutate_rows call + - table: the table associated with the request + - mutation_entries: a list of RowMutationEntry objects to send to the server + - operation_timeout: the timeout to use for the entire operation, in seconds. + - attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. + If not specified, the request will run until operation_timeout is reached. + """ + # check that mutations are within limits + total_mutations = sum(len(entry.mutations) for entry in mutation_entries) + if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT: + raise ValueError( + "mutate_rows requests can contain at most " + f"{_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across " + f"all entries. Found {total_mutations}." + ) + # create partial function to pass to trigger rpc call + metadata = _make_metadata(table.table_name, table.app_profile_id) + self._gapic_fn = functools.partial( + gapic_client.mutate_rows, + table_name=table.table_name, + app_profile_id=table.app_profile_id, + metadata=metadata, + retry=None, + ) + # create predicate for determining which errors are retryable + self.is_retryable = retries.if_exception_type( + # RPC level errors + *retryable_exceptions, + # Entry level errors + bt_exceptions._MutateRowsIncomplete, + ) + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + self._operation = retries.retry_target_async( + self._run_attempt, + self.is_retryable, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + # initialize state + self.timeout_generator = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.mutations = [_EntryWithProto(m, m._to_pb()) for m in mutation_entries] + self.remaining_indices = list(range(len(self.mutations))) + self.errors: dict[int, list[Exception]] = {} + + async def start(self): + """ + Start the operation, and run until completion + + Raises: + - MutationsExceptionGroup: if any mutations failed + """ + try: + # trigger mutate_rows + await self._operation + except Exception as exc: + # exceptions raised by retryable are added to the list of exceptions for all unfinalized mutations + incomplete_indices = self.remaining_indices.copy() + for idx in incomplete_indices: + self._handle_entry_error(idx, exc) + finally: + # raise exception detailing incomplete mutations + all_errors: list[Exception] = [] + for idx, exc_list in self.errors.items(): + if len(exc_list) == 0: + raise core_exceptions.ClientError( + f"Mutation {idx} failed with no associated errors" + ) + elif len(exc_list) == 1: + cause_exc = exc_list[0] + else: + cause_exc = bt_exceptions.RetryExceptionGroup(exc_list) + entry = self.mutations[idx].entry + all_errors.append( + bt_exceptions.FailedMutationEntryError(idx, entry, cause_exc) + ) + if all_errors: + raise bt_exceptions.MutationsExceptionGroup( + all_errors, len(self.mutations) + ) + + async def _run_attempt(self): + """ + Run a single attempt of the mutate_rows rpc. + + Raises: + - _MutateRowsIncomplete: if there are failed mutations eligible for + retry after the attempt is complete + - GoogleAPICallError: if the gapic rpc fails + """ + request_entries = [self.mutations[idx].proto for idx in self.remaining_indices] + # track mutations in this request that have not been finalized yet + active_request_indices = { + req_idx: orig_idx for req_idx, orig_idx in enumerate(self.remaining_indices) + } + self.remaining_indices = [] + if not request_entries: + # no more mutations. return early + return + # make gapic request + try: + result_generator = await self._gapic_fn( + timeout=next(self.timeout_generator), + entries=request_entries, + retry=None, + ) + async for result_list in result_generator: + for result in result_list.entries: + # convert sub-request index to global index + orig_idx = active_request_indices[result.index] + entry_error = core_exceptions.from_grpc_status( + result.status.code, + result.status.message, + details=result.status.details, + ) + if result.status.code != 0: + # mutation failed; update error list (and remaining_indices if retryable) + self._handle_entry_error(orig_idx, entry_error) + elif orig_idx in self.errors: + # mutation succeeded; remove from error list + del self.errors[orig_idx] + # remove processed entry from active list + del active_request_indices[result.index] + except Exception as exc: + # add this exception to list for each mutation that wasn't + # already handled, and update remaining_indices if mutation is retryable + for idx in active_request_indices.values(): + self._handle_entry_error(idx, exc) + # bubble up exception to be handled by retry wrapper + raise + # check if attempt succeeded, or needs to be retried + if self.remaining_indices: + # unfinished work; raise exception to trigger retry + raise bt_exceptions._MutateRowsIncomplete + + def _handle_entry_error(self, idx: int, exc: Exception): + """ + Add an exception to the list of exceptions for a given mutation index, + and add the index to the list of remaining indices if the exception is + retryable. + + Args: + - idx: the index of the mutation that failed + - exc: the exception to add to the list + """ + entry = self.mutations[idx].entry + self.errors.setdefault(idx, []).append(exc) + if ( + entry.is_idempotent() + and self.is_retryable(exc) + and idx not in self.remaining_indices + ): + self.remaining_indices.append(idx) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py new file mode 100644 index 000000000000..9e0fd78e1e10 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -0,0 +1,343 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + AsyncGenerator, + AsyncIterable, + Awaitable, + Sequence, +) + +from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB +from google.cloud.bigtable_v2.types import ReadRowsResponse as ReadRowsResponsePB +from google.cloud.bigtable_v2.types import RowSet as RowSetPB +from google.cloud.bigtable_v2.types import RowRange as RowRangePB + +from google.cloud.bigtable.data.row import Row, Cell +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _RowSetComplete +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _make_metadata +from google.cloud.bigtable.data._helpers import _retry_exception_factory + +from google.api_core import retry as retries +from google.api_core.retry import exponential_sleep_generator + +if TYPE_CHECKING: + from google.cloud.bigtable.data._async.client import TableAsync + + +class _ResetRow(Exception): + def __init__(self, chunk): + self.chunk = chunk + + +class _ReadRowsOperationAsync: + """ + ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream + into a stream of Row objects. + + ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse + and turns them into a stream of Row objects using an internal + StateMachine. + + ReadRowsOperation(request, client) handles row merging logic end-to-end, including + performing retries on stream errors. + """ + + __slots__ = ( + "attempt_timeout_gen", + "operation_timeout", + "request", + "table", + "_predicate", + "_metadata", + "_last_yielded_row_key", + "_remaining_count", + ) + + def __init__( + self, + query: ReadRowsQuery, + table: "TableAsync", + operation_timeout: float, + attempt_timeout: float, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + self.attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.operation_timeout = operation_timeout + if isinstance(query, dict): + self.request = ReadRowsRequestPB( + **query, + table_name=table.table_name, + app_profile_id=table.app_profile_id, + ) + else: + self.request = query._to_pb(table) + self.table = table + self._predicate = retries.if_exception_type(*retryable_exceptions) + self._metadata = _make_metadata( + table.table_name, + table.app_profile_id, + ) + self._last_yielded_row_key: bytes | None = None + self._remaining_count: int | None = self.request.rows_limit or None + + def start_operation(self) -> AsyncGenerator[Row, None]: + """ + Start the read_rows operation, retrying on retryable errors. + """ + return retries.retry_target_stream_async( + self._read_rows_attempt, + self._predicate, + exponential_sleep_generator(0.01, 60, multiplier=2), + self.operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def _read_rows_attempt(self) -> AsyncGenerator[Row, None]: + """ + Attempt a single read_rows rpc call. + This function is intended to be wrapped by retry logic, + which will call this function until it succeeds or + a non-retryable error is raised. + """ + # revise request keys and ranges between attempts + if self._last_yielded_row_key is not None: + # if this is a retry, try to trim down the request to avoid ones we've already processed + try: + self.request.rows = self._revise_request_rowset( + row_set=self.request.rows, + last_seen_row_key=self._last_yielded_row_key, + ) + except _RowSetComplete: + # if we've already seen all the rows, we're done + return self.merge_rows(None) + # revise the limit based on number of rows already yielded + if self._remaining_count is not None: + self.request.rows_limit = self._remaining_count + if self._remaining_count == 0: + return self.merge_rows(None) + # create and return a new row merger + gapic_stream = self.table.client._gapic_client.read_rows( + self.request, + timeout=next(self.attempt_timeout_gen), + metadata=self._metadata, + retry=None, + ) + chunked_stream = self.chunk_stream(gapic_stream) + return self.merge_rows(chunked_stream) + + async def chunk_stream( + self, stream: Awaitable[AsyncIterable[ReadRowsResponsePB]] + ) -> AsyncGenerator[ReadRowsResponsePB.CellChunk, None]: + """ + process chunks out of raw read_rows stream + """ + async for resp in await stream: + # extract proto from proto-plus wrapper + resp = resp._pb + + # handle last_scanned_row_key packets, sent when server + # has scanned past the end of the row range + if resp.last_scanned_row_key: + if ( + self._last_yielded_row_key is not None + and resp.last_scanned_row_key <= self._last_yielded_row_key + ): + raise InvalidChunk("last scanned out of order") + self._last_yielded_row_key = resp.last_scanned_row_key + + current_key = None + # process each chunk in the response + for c in resp.chunks: + if current_key is None: + current_key = c.row_key + if current_key is None: + raise InvalidChunk("first chunk is missing a row key") + elif ( + self._last_yielded_row_key + and current_key <= self._last_yielded_row_key + ): + raise InvalidChunk("row keys should be strictly increasing") + + yield c + + if c.reset_row: + current_key = None + elif c.commit_row: + # update row state after each commit + self._last_yielded_row_key = current_key + if self._remaining_count is not None: + self._remaining_count -= 1 + if self._remaining_count < 0: + raise InvalidChunk("emit count exceeds row limit") + current_key = None + + @staticmethod + async def merge_rows( + chunks: AsyncGenerator[ReadRowsResponsePB.CellChunk, None] | None + ): + """ + Merge chunks into rows + """ + if chunks is None: + return + it = chunks.__aiter__() + # For each row + while True: + try: + c = await it.__anext__() + except StopAsyncIteration: + # stream complete + return + row_key = c.row_key + + if not row_key: + raise InvalidChunk("first row chunk is missing key") + + cells = [] + + # shared per cell storage + family: str | None = None + qualifier: bytes | None = None + + try: + # for each cell + while True: + if c.reset_row: + raise _ResetRow(c) + k = c.row_key + f = c.family_name.value + q = c.qualifier.value if c.HasField("qualifier") else None + if k and k != row_key: + raise InvalidChunk("unexpected new row key") + if f: + family = f + if q is not None: + qualifier = q + else: + raise InvalidChunk("new family without qualifier") + elif family is None: + raise InvalidChunk("missing family") + elif q is not None: + if family is None: + raise InvalidChunk("new qualifier without family") + qualifier = q + elif qualifier is None: + raise InvalidChunk("missing qualifier") + + ts = c.timestamp_micros + labels = c.labels if c.labels else [] + value = c.value + + # merge split cells + if c.value_size > 0: + buffer = [value] + while c.value_size > 0: + # throws when premature end + c = await it.__anext__() + + t = c.timestamp_micros + cl = c.labels + k = c.row_key + if ( + c.HasField("family_name") + and c.family_name.value != family + ): + raise InvalidChunk("family changed mid cell") + if ( + c.HasField("qualifier") + and c.qualifier.value != qualifier + ): + raise InvalidChunk("qualifier changed mid cell") + if t and t != ts: + raise InvalidChunk("timestamp changed mid cell") + if cl and cl != labels: + raise InvalidChunk("labels changed mid cell") + if k and k != row_key: + raise InvalidChunk("row key changed mid cell") + + if c.reset_row: + raise _ResetRow(c) + buffer.append(c.value) + value = b"".join(buffer) + cells.append( + Cell(value, row_key, family, qualifier, ts, list(labels)) + ) + if c.commit_row: + yield Row(row_key, cells) + break + c = await it.__anext__() + except _ResetRow as e: + c = e.chunk + if ( + c.row_key + or c.HasField("family_name") + or c.HasField("qualifier") + or c.timestamp_micros + or c.labels + or c.value + ): + raise InvalidChunk("reset row with data") + continue + except StopAsyncIteration: + raise InvalidChunk("premature end of stream") + + @staticmethod + def _revise_request_rowset( + row_set: RowSetPB, + last_seen_row_key: bytes, + ) -> RowSetPB: + """ + Revise the rows in the request to avoid ones we've already processed. + + Args: + - row_set: the row set from the request + - last_seen_row_key: the last row key encountered + Raises: + - _RowSetComplete: if there are no rows left to process after the revision + """ + # if user is doing a whole table scan, start a new one with the last seen key + if row_set is None or (not row_set.row_ranges and row_set.row_keys is not None): + last_seen = last_seen_row_key + return RowSetPB(row_ranges=[RowRangePB(start_key_open=last_seen)]) + # remove seen keys from user-specific key list + adjusted_keys: list[bytes] = [ + k for k in row_set.row_keys if k > last_seen_row_key + ] + # adjust ranges to ignore keys before last seen + adjusted_ranges: list[RowRangePB] = [] + for row_range in row_set.row_ranges: + end_key = row_range.end_key_closed or row_range.end_key_open or None + if end_key is None or end_key > last_seen_row_key: + # end range is after last seen key + new_range = RowRangePB(row_range) + start_key = row_range.start_key_closed or row_range.start_key_open + if start_key is None or start_key <= last_seen_row_key: + # replace start key with last seen + new_range.start_key_open = last_seen_row_key + adjusted_ranges.append(new_range) + if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0: + # if the query is empty after revision, raise an exception + # this will avoid an unwanted full table scan + raise _RowSetComplete() + return RowSetPB(row_keys=adjusted_keys, row_ranges=adjusted_ranges) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py new file mode 100644 index 000000000000..da54b37cb29c --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -0,0 +1,1228 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import annotations + +from typing import ( + cast, + Any, + AsyncIterable, + Optional, + Set, + Sequence, + TYPE_CHECKING, +) + +import asyncio +import grpc +import time +import warnings +import sys +import random +import os + +from functools import partial + +from google.cloud.bigtable_v2.services.bigtable.client import BigtableClientMeta +from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient +from google.cloud.bigtable_v2.services.bigtable.async_client import DEFAULT_CLIENT_INFO +from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( + PooledBigtableGrpcAsyncIOTransport, + PooledChannel, +) +from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore +from google.api_core import retry as retries +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import ServiceUnavailable +from google.api_core.exceptions import Aborted +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + +import google.auth.credentials +import google.auth._default +from google.api_core import client_options as client_options_lib +from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import FailedQueryShardError +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data._helpers import _WarmedInstanceKey +from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT +from google.cloud.bigtable.data._helpers import _make_metadata +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data._helpers import _validate_timeouts +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync +from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.row_filters import RowFilter +from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter +from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter +from google.cloud.bigtable.data.row_filters import RowFilterChain + + +if TYPE_CHECKING: + from google.cloud.bigtable.data._helpers import RowKeySamples + from google.cloud.bigtable.data._helpers import ShardedQuery + + +class BigtableDataClientAsync(ClientWithProject): + def __init__( + self, + *, + project: str | None = None, + pool_size: int = 3, + credentials: google.auth.credentials.Credentials | None = None, + client_options: dict[str, Any] + | "google.api_core.client_options.ClientOptions" + | None = None, + ): + """ + Create a client instance for the Bigtable Data API + + Client should be created within an async context (running event loop) + + Args: + project: the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + pool_size: The number of grpc channels to maintain + in the internal channel pool. + credentials: + Thehe OAuth2 Credentials to use for this + client. If not passed (and if no ``_http`` object is + passed), falls back to the default inferred from the + environment. + client_options (Optional[Union[dict, google.api_core.client_options.ClientOptions]]): + Client options used to set user options + on the client. API Endpoint should be set through client_options. + Raises: + - RuntimeError if called outside of an async context (no running event loop) + - ValueError if pool_size is less than 1 + """ + # set up transport in registry + transport_str = f"pooled_grpc_asyncio_{pool_size}" + transport = PooledBigtableGrpcAsyncIOTransport.with_fixed_size(pool_size) + BigtableClientMeta._transport_registry[transport_str] = transport + # set up client info headers for veneer library + client_info = DEFAULT_CLIENT_INFO + client_info.client_library_version = self._client_version() + # parse client options + if type(client_options) is dict: + client_options = client_options_lib.from_dict(client_options) + client_options = cast( + Optional[client_options_lib.ClientOptions], client_options + ) + self._emulator_host = os.getenv(BIGTABLE_EMULATOR) + if self._emulator_host is not None: + # use insecure channel if emulator is set + if credentials is None: + credentials = google.auth.credentials.AnonymousCredentials() + if project is None: + project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT + # initialize client + ClientWithProject.__init__( + self, + credentials=credentials, + project=project, + client_options=client_options, + ) + self._gapic_client = BigtableAsyncClient( + transport=transport_str, + credentials=credentials, + client_options=client_options, + client_info=client_info, + ) + self.transport = cast( + PooledBigtableGrpcAsyncIOTransport, self._gapic_client.transport + ) + # keep track of active instances to for warmup on channel refresh + self._active_instances: Set[_WarmedInstanceKey] = set() + # keep track of table objects associated with each instance + # only remove instance from _active_instances when all associated tables remove it + self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} + self._channel_init_time = time.monotonic() + self._channel_refresh_tasks: list[asyncio.Task[None]] = [] + if self._emulator_host is not None: + # connect to an emulator host + warnings.warn( + "Connecting to Bigtable emulator at {}".format(self._emulator_host), + RuntimeWarning, + stacklevel=2, + ) + self.transport._grpc_channel = PooledChannel( + pool_size=pool_size, + host=self._emulator_host, + insecure=True, + ) + # refresh cached stubs to use emulator pool + self.transport._stubs = {} + self.transport._prep_wrapped_messages(client_info) + else: + # attempt to start background channel refresh tasks + try: + self._start_background_channel_refresh() + except RuntimeError: + warnings.warn( + f"{self.__class__.__name__} should be started in an " + "asyncio event loop. Channel refresh will not be started", + RuntimeWarning, + stacklevel=2, + ) + + @staticmethod + def _client_version() -> str: + """ + Helper function to return the client version string for this client + """ + return f"{google.cloud.bigtable.__version__}-data-async" + + def _start_background_channel_refresh(self) -> None: + """ + Starts a background task to ping and warm each channel in the pool + Raises: + - RuntimeError if not called in an asyncio event loop + """ + if not self._channel_refresh_tasks and not self._emulator_host: + # raise RuntimeError if there is no event loop + asyncio.get_running_loop() + for channel_idx in range(self.transport.pool_size): + refresh_task = asyncio.create_task(self._manage_channel(channel_idx)) + if sys.version_info >= (3, 8): + # task names supported in Python 3.8+ + refresh_task.set_name( + f"{self.__class__.__name__} channel refresh {channel_idx}" + ) + self._channel_refresh_tasks.append(refresh_task) + + async def close(self, timeout: float = 2.0): + """ + Cancel all background tasks + """ + for task in self._channel_refresh_tasks: + task.cancel() + group = asyncio.gather(*self._channel_refresh_tasks, return_exceptions=True) + await asyncio.wait_for(group, timeout=timeout) + await self.transport.close() + self._channel_refresh_tasks = [] + + async def _ping_and_warm_instances( + self, channel: grpc.aio.Channel, instance_key: _WarmedInstanceKey | None = None + ) -> list[BaseException | None]: + """ + Prepares the backend for requests on a channel + + Pings each Bigtable instance registered in `_active_instances` on the client + + Args: + - channel: grpc channel to warm + - instance_key: if provided, only warm the instance associated with the key + Returns: + - sequence of results or exceptions from the ping requests + """ + instance_list = ( + [instance_key] if instance_key is not None else self._active_instances + ) + ping_rpc = channel.unary_unary( + "/google.bigtable.v2.Bigtable/PingAndWarm", + request_serializer=PingAndWarmRequest.serialize, + ) + # prepare list of coroutines to run + tasks = [ + ping_rpc( + request={"name": instance_name, "app_profile_id": app_profile_id}, + metadata=[ + ( + "x-goog-request-params", + f"name={instance_name}&app_profile_id={app_profile_id}", + ) + ], + wait_for_ready=True, + ) + for (instance_name, table_name, app_profile_id) in instance_list + ] + # execute coroutines in parallel + result_list = await asyncio.gather(*tasks, return_exceptions=True) + # return None in place of empty successful responses + return [r or None for r in result_list] + + async def _manage_channel( + self, + channel_idx: int, + refresh_interval_min: float = 60 * 35, + refresh_interval_max: float = 60 * 45, + grace_period: float = 60 * 10, + ) -> None: + """ + Background coroutine that periodically refreshes and warms a grpc channel + + The backend will automatically close channels after 60 minutes, so + `refresh_interval` + `grace_period` should be < 60 minutes + + Runs continuously until the client is closed + + Args: + channel_idx: index of the channel in the transport's channel pool + refresh_interval_min: minimum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + refresh_interval_max: maximum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + grace_period: time to allow previous channel to serve existing + requests before closing, in seconds + """ + first_refresh = self._channel_init_time + random.uniform( + refresh_interval_min, refresh_interval_max + ) + next_sleep = max(first_refresh - time.monotonic(), 0) + if next_sleep > 0: + # warm the current channel immediately + channel = self.transport.channels[channel_idx] + await self._ping_and_warm_instances(channel) + # continuously refresh the channel every `refresh_interval` seconds + while True: + await asyncio.sleep(next_sleep) + # prepare new channel for use + new_channel = self.transport.grpc_channel._create_channel() + await self._ping_and_warm_instances(new_channel) + # cycle channel out of use, with long grace window before closure + start_timestamp = time.time() + await self.transport.replace_channel( + channel_idx, grace=grace_period, swap_sleep=10, new_channel=new_channel + ) + # subtract the time spent waiting for the channel to be replaced + next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) + next_sleep = next_refresh - (time.time() - start_timestamp) + + async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: + """ + Registers an instance with the client, and warms the channel pool + for the instance + The client will periodically refresh grpc channel pool used to make + requests, and new channels will be warmed for each registered instance + Channels will not be refreshed unless at least one instance is registered + + Args: + - instance_id: id of the instance to register. + - owner: table that owns the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey( + instance_name, owner.table_name, owner.app_profile_id + ) + self._instance_owners.setdefault(instance_key, set()).add(id(owner)) + if instance_name not in self._active_instances: + self._active_instances.add(instance_key) + if self._channel_refresh_tasks: + # refresh tasks already running + # call ping and warm on all existing channels + for channel in self.transport.channels: + await self._ping_and_warm_instances(channel, instance_key) + else: + # refresh tasks aren't active. start them as background tasks + self._start_background_channel_refresh() + + async def _remove_instance_registration( + self, instance_id: str, owner: TableAsync + ) -> bool: + """ + Removes an instance from the client's registered instances, to prevent + warming new channels for the instance + + If instance_id is not registered, or is still in use by other tables, returns False + + Args: + - instance_id: id of the instance to remove + - owner: table that owns the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration + Returns: + - True if instance was removed + """ + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey( + instance_name, owner.table_name, owner.app_profile_id + ) + owner_list = self._instance_owners.get(instance_key, set()) + try: + owner_list.remove(id(owner)) + if len(owner_list) == 0: + self._active_instances.remove(instance_key) + return True + except KeyError: + return False + + def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAsync: + """ + Returns a table instance for making data API requests. All arguments are passed + directly to the TableAsync constructor. + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + """ + return TableAsync(self, instance_id, table_id, *args, **kwargs) + + async def __aenter__(self): + self._start_background_channel_refresh() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb) + + +class TableAsync: + """ + Main Data API surface + + Table object maintains table_id, and app_profile_id context, and passes them with + each call + """ + + def __init__( + self, + client: BigtableDataClientAsync, + instance_id: str, + table_id: str, + app_profile_id: str | None = None, + *, + default_read_rows_operation_timeout: float = 600, + default_read_rows_attempt_timeout: float | None = 20, + default_mutate_rows_operation_timeout: float = 600, + default_mutate_rows_attempt_timeout: float | None = 60, + default_operation_timeout: float = 60, + default_attempt_timeout: float | None = 20, + default_read_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + ), + default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + default_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + ): + """ + Initialize a Table instance + + Must be created within an async context (running event loop) + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + - RuntimeError if called outside of an async context (no running event loop) + """ + # NOTE: any changes to the signature of this method should also be reflected + # in client.get_table() + # validate timeouts + _validate_timeouts( + default_operation_timeout, default_attempt_timeout, allow_none=True + ) + _validate_timeouts( + default_read_rows_operation_timeout, + default_read_rows_attempt_timeout, + allow_none=True, + ) + _validate_timeouts( + default_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout, + allow_none=True, + ) + + self.client = client + self.instance_id = instance_id + self.instance_name = self.client._gapic_client.instance_path( + self.client.project, instance_id + ) + self.table_id = table_id + self.table_name = self.client._gapic_client.table_path( + self.client.project, instance_id, table_id + ) + self.app_profile_id = app_profile_id + + self.default_operation_timeout = default_operation_timeout + self.default_attempt_timeout = default_attempt_timeout + self.default_read_rows_operation_timeout = default_read_rows_operation_timeout + self.default_read_rows_attempt_timeout = default_read_rows_attempt_timeout + self.default_mutate_rows_operation_timeout = ( + default_mutate_rows_operation_timeout + ) + self.default_mutate_rows_attempt_timeout = default_mutate_rows_attempt_timeout + + self.default_read_rows_retryable_errors = ( + default_read_rows_retryable_errors or () + ) + self.default_mutate_rows_retryable_errors = ( + default_mutate_rows_retryable_errors or () + ) + self.default_retryable_errors = default_retryable_errors or () + + # raises RuntimeError if called outside of an async context (no running event loop) + try: + self._register_instance_task = asyncio.create_task( + self.client._register_instance(instance_id, self) + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + async def read_rows_stream( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> AsyncIterable[Row]: + """ + Read a set of rows from the table, based on the specified query. + Returns an iterator to asynchronously stream back row data. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + - query: contains details about which rows to return + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors + Returns: + - an asynchronous iterator that yields rows returned by the query + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + """ + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + + row_merger = _ReadRowsOperationAsync( + query, + self, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_exceptions=retryable_excs, + ) + return row_merger.start_operation() + + async def read_rows( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """ + Read a set of rows from the table, based on the specified query. + Retruns results as a list of Row objects when the request is complete. + For streamed results, use read_rows_stream. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + - query: contains details about which rows to return + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + If None, defaults to the Table's default_read_rows_attempt_timeout, + or the operation_timeout if that is also None. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + - a list of Rows returned by the query + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + """ + row_generator = await self.read_rows_stream( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return [row async for row in row_generator] + + async def read_row( + self, + row_key: str | bytes, + *, + row_filter: RowFilter | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> Row | None: + """ + Read a single row from the table, based on the specified key. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + - query: contains details about which rows to return + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + - a Row object if the row exists, otherwise None + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1) + results = await self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + if len(results) == 0: + return None + return results[0] + + async def read_rows_sharded( + self, + sharded_query: ShardedQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """ + Runs a sharded query in parallel, then return the results in a single list. + Results will be returned in the order of the input queries. + + This function is intended to be run on the results on a query.shard() call: + + ``` + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(...) + shard_queries = query.shard(table_shard_keys) + results = await table.read_rows_sharded(shard_queries) + ``` + + Args: + - sharded_query: a sharded query to execute + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Raises: + - ShardedReadRowsExceptionGroup: if any of the queries failed + - ValueError: if the query_list is empty + """ + if not sharded_query: + raise ValueError("empty sharded_query") + # reduce operation_timeout between batches + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + timeout_generator = _attempt_timeout_generator( + operation_timeout, operation_timeout + ) + # submit shards in batches if the number of shards goes over _CONCURRENCY_LIMIT + batched_queries = [ + sharded_query[i : i + _CONCURRENCY_LIMIT] + for i in range(0, len(sharded_query), _CONCURRENCY_LIMIT) + ] + # run batches and collect results + results_list = [] + error_dict = {} + shard_idx = 0 + for batch in batched_queries: + batch_operation_timeout = next(timeout_generator) + routine_list = [ + self.read_rows( + query, + operation_timeout=batch_operation_timeout, + attempt_timeout=min(attempt_timeout, batch_operation_timeout), + retryable_errors=retryable_errors, + ) + for query in batch + ] + batch_result = await asyncio.gather(*routine_list, return_exceptions=True) + for result in batch_result: + if isinstance(result, Exception): + error_dict[shard_idx] = result + elif isinstance(result, BaseException): + # BaseException not expected; raise immediately + raise result + else: + results_list.extend(result) + shard_idx += 1 + if error_dict: + # if any sub-request failed, raise an exception instead of returning results + raise ShardedReadRowsExceptionGroup( + [ + FailedQueryShardError(idx, sharded_query[idx], e) + for idx, e in error_dict.items() + ], + results_list, + len(sharded_query), + ) + return results_list + + async def row_exists( + self, + row_key: str | bytes, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> bool: + """ + Return a boolean indicating whether the specified row exists in the table. + uses the filters: chain(limit cells per row = 1, strip value) + + Args: + - row_key: the key of the row to check + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + - a bool indicating whether the row exists + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + + strip_filter = StripValueTransformerFilter(flag=True) + limit_filter = CellsRowLimitFilter(1) + chain_filter = RowFilterChain(filters=[limit_filter, strip_filter]) + query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter) + results = await self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return len(results) > 0 + + async def sample_row_keys( + self, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> RowKeySamples: + """ + Return a set of RowKeySamples that delimit contiguous sections of the table of + approximately equal size + + RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that + can be parallelized across multiple backend nodes read_rows and read_rows_stream + requests will call sample_row_keys internally for this purpose when sharding is enabled + + RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of + row_keys, along with offset positions in the table + + Args: + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget.i + Defaults to the Table's default_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_retryable_errors. + Returns: + - a set of RowKeySamples the delimit contiguous sections of the table + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + """ + # prepare timeouts + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + # prepare retryable + retryable_excs = _get_retryable_errors(retryable_errors, self) + predicate = retries.if_exception_type(*retryable_excs) + + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + # prepare request + metadata = _make_metadata(self.table_name, self.app_profile_id) + + async def execute_rpc(): + results = await self.client._gapic_client.sample_row_keys( + table_name=self.table_name, + app_profile_id=self.app_profile_id, + timeout=next(attempt_timeout_gen), + metadata=metadata, + retry=None, + ) + return [(s.row_key, s.offset_bytes) async for s in results] + + return await retries.retry_target_async( + execute_rpc, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def mutations_batcher( + self, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100_000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ) -> MutationsBatcherAsync: + """ + Returns a new mutations batcher instance. + + Can be used to iteratively add mutations that are flushed as a group, + to avoid excess network calls + + Args: + - flush_interval: Automatically flush every flush_interval seconds. If None, + a table default will be used + - flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + - flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + - flow_control_max_mutation_count: Maximum number of inflight mutations. + - flow_control_max_bytes: Maximum number of inflight bytes. + - batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + Defaults to the Table's default_mutate_rows_operation_timeout + - batch_attempt_timeout: timeout for each individual request, in seconds. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + - batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + Returns: + - a MutationsBatcherAsync context manager that can batch requests + """ + return MutationsBatcherAsync( + self, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_mutation_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=batch_operation_timeout, + batch_attempt_timeout=batch_attempt_timeout, + batch_retryable_errors=batch_retryable_errors, + ) + + async def mutate_row( + self, + row_key: str | bytes, + mutations: list[Mutation] | Mutation, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ): + """ + Mutates a row atomically. + + Cells already present in the row are left unchanged unless explicitly changed + by ``mutation``. + + Idempotent operations (i.e, all mutations have an explicit timestamp) will be + retried on server failure. Non-idempotent operations will not. + + Args: + - row_key: the row to apply mutations to + - mutations: the set of mutations to apply to the row + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Only idempotent mutations will be retried. Defaults to the Table's + default_retryable_errors. + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing all + GoogleAPIError exceptions from any retries that failed + - GoogleAPIError: raised on non-idempotent operations that cannot be + safely retried. + - ValueError if invalid arguments are provided + """ + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + + if not mutations: + raise ValueError("No mutations provided") + mutations_list = mutations if isinstance(mutations, list) else [mutations] + + if all(mutation.is_idempotent() for mutation in mutations_list): + # mutations are all idempotent and safe to retry + predicate = retries.if_exception_type( + *_get_retryable_errors(retryable_errors, self) + ) + else: + # mutations should not be retried + predicate = retries.if_exception_type() + + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + target = partial( + self.client._gapic_client.mutate_row, + row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, + mutations=[mutation._to_pb() for mutation in mutations_list], + table_name=self.table_name, + app_profile_id=self.app_profile_id, + timeout=attempt_timeout, + metadata=_make_metadata(self.table_name, self.app_profile_id), + retry=None, + ) + return await retries.retry_target_async( + target, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + async def bulk_mutate_rows( + self, + mutation_entries: list[RowMutationEntry], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + """ + Applies mutations for multiple rows in a single batched request. + + Each individual RowMutationEntry is applied atomically, but separate entries + may be applied in arbitrary order (even for entries targetting the same row) + In total, the row_mutations can contain at most 100000 individual mutations + across all entries + + Idempotent entries (i.e., entries with mutations with explicit timestamps) + will be retried on failure. Non-idempotent will not, and will reported in a + raised exception group + + Args: + - mutation_entries: the batches of mutations to apply + Each entry will be applied atomically, but entries will be applied + in arbitrary order + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_mutate_rows_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors + Raises: + - MutationsExceptionGroup if one or more mutations fails + Contains details about any failed entries in .exceptions + - ValueError if invalid arguments are provided + """ + operation_timeout, attempt_timeout = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + + operation = _MutateRowsOperationAsync( + self.client._gapic_client, + self, + mutation_entries, + operation_timeout, + attempt_timeout, + retryable_exceptions=retryable_excs, + ) + await operation.start() + + async def check_and_mutate_row( + self, + row_key: str | bytes, + predicate: RowFilter | None, + *, + true_case_mutations: Mutation | list[Mutation] | None = None, + false_case_mutations: Mutation | list[Mutation] | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> bool: + """ + Mutates a row atomically based on the output of a predicate filter + + Non-idempotent operation: will not be retried + + Args: + - row_key: the key of the row to mutate + - predicate: the filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, + either true_case_mutations or false_case_mutations will be executed. + If None, checks that the row contains any values at all. + - true_case_mutations: + Changes to be atomically applied to the specified row if + predicate yields at least one cell when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + false_case_mutations is empty, and at most 100000. + - false_case_mutations: + Changes to be atomically applied to the specified row if + predicate_filter does not yield any cells when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + `true_case_mutations is empty, and at most 100000. + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. Defaults to the Table's default_operation_timeout + Returns: + - bool indicating whether the predicate was true or false + Raises: + - GoogleAPIError exceptions from grpc call + """ + operation_timeout, _ = _get_timeouts(operation_timeout, None, self) + if true_case_mutations is not None and not isinstance( + true_case_mutations, list + ): + true_case_mutations = [true_case_mutations] + true_case_list = [m._to_pb() for m in true_case_mutations or []] + if false_case_mutations is not None and not isinstance( + false_case_mutations, list + ): + false_case_mutations = [false_case_mutations] + false_case_list = [m._to_pb() for m in false_case_mutations or []] + metadata = _make_metadata(self.table_name, self.app_profile_id) + result = await self.client._gapic_client.check_and_mutate_row( + true_mutations=true_case_list, + false_mutations=false_case_list, + predicate_filter=predicate._to_pb() if predicate is not None else None, + row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, + table_name=self.table_name, + app_profile_id=self.app_profile_id, + metadata=metadata, + timeout=operation_timeout, + retry=None, + ) + return result.predicate_matched + + async def read_modify_write_row( + self, + row_key: str | bytes, + rules: ReadModifyWriteRule | list[ReadModifyWriteRule], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> Row: + """ + Reads and modifies a row atomically according to input ReadModifyWriteRules, + and returns the contents of all modified cells + + The new value for the timestamp is the greater of the existing timestamp or + the current server time. + + Non-idempotent operation: will not be retried + + Args: + - row_key: the key of the row to apply read/modify/write rules to + - rules: A rule or set of rules to apply to the row. + Rules are applied in order, meaning that earlier rules will affect the + results of later ones. + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. + Defaults to the Table's default_operation_timeout. + Returns: + - Row: containing cell data that was modified as part of the + operation + Raises: + - GoogleAPIError exceptions from grpc call + - ValueError if invalid arguments are provided + """ + operation_timeout, _ = _get_timeouts(operation_timeout, None, self) + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if rules is not None and not isinstance(rules, list): + rules = [rules] + if not rules: + raise ValueError("rules must contain at least one item") + metadata = _make_metadata(self.table_name, self.app_profile_id) + result = await self.client._gapic_client.read_modify_write_row( + rules=[rule._to_pb() for rule in rules], + row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, + table_name=self.table_name, + app_profile_id=self.app_profile_id, + metadata=metadata, + timeout=operation_timeout, + retry=None, + ) + # construct Row from result + return Row._from_pb(result.row) + + async def close(self): + """ + Called to close the Table instance and release any resources held by it. + """ + self._register_instance_task.cancel() + await self.client._remove_instance_registration(self.instance_id, self) + + async def __aenter__(self): + """ + Implement async context manager protocol + + Ensure registration task has time to run, so that + grpc channels will be warmed for the specified instance + """ + await self._register_instance_task + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """ + Implement async context manager protocol + + Unregister this instance with the client, so that + grpc channels will no longer be warmed + """ + await self.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py new file mode 100644 index 000000000000..5d5dd535ee45 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -0,0 +1,501 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import Any, Sequence, TYPE_CHECKING +import asyncio +import atexit +import warnings +from collections import deque + +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT + +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync +from google.cloud.bigtable.data._async._mutate_rows import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, +) +from google.cloud.bigtable.data.mutations import Mutation + +if TYPE_CHECKING: + from google.cloud.bigtable.data._async.client import TableAsync + +# used to make more readable default values +_MB_SIZE = 1024 * 1024 + + +class _FlowControlAsync: + """ + Manages flow control for batched mutations. Mutations are registered against + the FlowControl object before being sent, which will block if size or count + limits have reached capacity. As mutations completed, they are removed from + the FlowControl object, which will notify any blocked requests that there + is additional capacity. + + Flow limits are not hard limits. If a single mutation exceeds the configured + limits, it will be allowed as a single batch when the capacity is available. + """ + + def __init__( + self, + max_mutation_count: int, + max_mutation_bytes: int, + ): + """ + Args: + - max_mutation_count: maximum number of mutations to send in a single rpc. + This corresponds to individual mutations in a single RowMutationEntry. + - max_mutation_bytes: maximum number of bytes to send in a single rpc. + """ + self._max_mutation_count = max_mutation_count + self._max_mutation_bytes = max_mutation_bytes + if self._max_mutation_count < 1: + raise ValueError("max_mutation_count must be greater than 0") + if self._max_mutation_bytes < 1: + raise ValueError("max_mutation_bytes must be greater than 0") + self._capacity_condition = asyncio.Condition() + self._in_flight_mutation_count = 0 + self._in_flight_mutation_bytes = 0 + + def _has_capacity(self, additional_count: int, additional_size: int) -> bool: + """ + Checks if there is capacity to send a new entry with the given size and count + + FlowControl limits are not hard limits. If a single mutation exceeds + the configured flow limits, it will be sent in a single batch when + previous batches have completed. + + Args: + - additional_count: number of mutations in the pending entry + - additional_size: size of the pending entry + Returns: + - True if there is capacity to send the pending entry, False otherwise + """ + # adjust limits to allow overly large mutations + acceptable_size = max(self._max_mutation_bytes, additional_size) + acceptable_count = max(self._max_mutation_count, additional_count) + # check if we have capacity for new mutation + new_size = self._in_flight_mutation_bytes + additional_size + new_count = self._in_flight_mutation_count + additional_count + return new_size <= acceptable_size and new_count <= acceptable_count + + async def remove_from_flow( + self, mutations: RowMutationEntry | list[RowMutationEntry] + ) -> None: + """ + Removes mutations from flow control. This method should be called once + for each mutation that was sent to add_to_flow, after the corresponding + operation is complete. + + Args: + - mutations: mutation or list of mutations to remove from flow control + """ + if not isinstance(mutations, list): + mutations = [mutations] + total_count = sum(len(entry.mutations) for entry in mutations) + total_size = sum(entry.size() for entry in mutations) + self._in_flight_mutation_count -= total_count + self._in_flight_mutation_bytes -= total_size + # notify any blocked requests that there is additional capacity + async with self._capacity_condition: + self._capacity_condition.notify_all() + + async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]): + """ + Generator function that registers mutations with flow control. As mutations + are accepted into the flow control, they are yielded back to the caller, + to be sent in a batch. If the flow control is at capacity, the generator + will block until there is capacity available. + + Args: + - mutations: list mutations to break up into batches + Yields: + - list of mutations that have reserved space in the flow control. + Each batch contains at least one mutation. + """ + if not isinstance(mutations, list): + mutations = [mutations] + start_idx = 0 + end_idx = 0 + while end_idx < len(mutations): + start_idx = end_idx + batch_mutation_count = 0 + # fill up batch until we hit capacity + async with self._capacity_condition: + while end_idx < len(mutations): + next_entry = mutations[end_idx] + next_size = next_entry.size() + next_count = len(next_entry.mutations) + if ( + self._has_capacity(next_count, next_size) + # make sure not to exceed per-request mutation count limits + and (batch_mutation_count + next_count) + <= _MUTATE_ROWS_REQUEST_MUTATION_LIMIT + ): + # room for new mutation; add to batch + end_idx += 1 + batch_mutation_count += next_count + self._in_flight_mutation_bytes += next_size + self._in_flight_mutation_count += next_count + elif start_idx != end_idx: + # we have at least one mutation in the batch, so send it + break + else: + # batch is empty. Block until we have capacity + await self._capacity_condition.wait_for( + lambda: self._has_capacity(next_count, next_size) + ) + yield mutations[start_idx:end_idx] + + +class MutationsBatcherAsync: + """ + Allows users to send batches using context manager API: + + Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining + to use as few network requests as required + + Flushes: + - every flush_interval seconds + - after queue reaches flush_count in quantity + - after queue reaches flush_size_bytes in storage size + - when batcher is closed or destroyed + + async with table.mutations_batcher() as batcher: + for i in range(10): + batcher.add(row, mut) + """ + + def __init__( + self, + table: "TableAsync", + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100_000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + """ + Args: + - table: Table to preform rpc calls + - flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. + - flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + - flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + - flow_control_max_mutation_count: Maximum number of inflight mutations. + - flow_control_max_bytes: Maximum number of inflight bytes. + - batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout. + - batch_attempt_timeout: timeout for each individual request, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + - batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + """ + self._operation_timeout, self._attempt_timeout = _get_timeouts( + batch_operation_timeout, batch_attempt_timeout, table + ) + self._retryable_errors: list[type[Exception]] = _get_retryable_errors( + batch_retryable_errors, table + ) + + self.closed: bool = False + self._table = table + self._staged_entries: list[RowMutationEntry] = [] + self._staged_count, self._staged_bytes = 0, 0 + self._flow_control = _FlowControlAsync( + flow_control_max_mutation_count, flow_control_max_bytes + ) + self._flush_limit_bytes = flush_limit_bytes + self._flush_limit_count = ( + flush_limit_mutation_count + if flush_limit_mutation_count is not None + else float("inf") + ) + self._flush_timer = self._start_flush_timer(flush_interval) + self._flush_jobs: set[asyncio.Future[None]] = set() + # MutationExceptionGroup reports number of successful entries along with failures + self._entries_processed_since_last_raise: int = 0 + self._exceptions_since_last_raise: int = 0 + # keep track of the first and last _exception_list_limit exceptions + self._exception_list_limit: int = 10 + self._oldest_exceptions: list[Exception] = [] + self._newest_exceptions: deque[Exception] = deque( + maxlen=self._exception_list_limit + ) + # clean up on program exit + atexit.register(self._on_exit) + + def _start_flush_timer(self, interval: float | None) -> asyncio.Future[None]: + """ + Set up a background task to flush the batcher every interval seconds + + If interval is None, an empty future is returned + + Args: + - flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. + Returns: + - asyncio.Future that represents the background task + """ + if interval is None or self.closed: + empty_future: asyncio.Future[None] = asyncio.Future() + empty_future.set_result(None) + return empty_future + + async def timer_routine(self, interval: float): + """ + Triggers new flush tasks every `interval` seconds + """ + while not self.closed: + await asyncio.sleep(interval) + # add new flush task to list + if not self.closed and self._staged_entries: + self._schedule_flush() + + timer_task = asyncio.create_task(timer_routine(self, interval)) + return timer_task + + async def append(self, mutation_entry: RowMutationEntry): + """ + Add a new set of mutations to the internal queue + + TODO: return a future to track completion of this entry + + Args: + - mutation_entry: new entry to add to flush queue + Raises: + - RuntimeError if batcher is closed + - ValueError if an invalid mutation type is added + """ + if self.closed: + raise RuntimeError("Cannot append to closed MutationsBatcher") + if isinstance(mutation_entry, Mutation): # type: ignore + raise ValueError( + f"invalid mutation type: {type(mutation_entry).__name__}. Only RowMutationEntry objects are supported by batcher" + ) + self._staged_entries.append(mutation_entry) + # start a new flush task if limits exceeded + self._staged_count += len(mutation_entry.mutations) + self._staged_bytes += mutation_entry.size() + if ( + self._staged_count >= self._flush_limit_count + or self._staged_bytes >= self._flush_limit_bytes + ): + self._schedule_flush() + # yield to the event loop to allow flush to run + await asyncio.sleep(0) + + def _schedule_flush(self) -> asyncio.Future[None] | None: + """Update the flush task to include the latest staged entries""" + if self._staged_entries: + entries, self._staged_entries = self._staged_entries, [] + self._staged_count, self._staged_bytes = 0, 0 + new_task = self._create_bg_task(self._flush_internal, entries) + new_task.add_done_callback(self._flush_jobs.remove) + self._flush_jobs.add(new_task) + return new_task + return None + + async def _flush_internal(self, new_entries: list[RowMutationEntry]): + """ + Flushes a set of mutations to the server, and updates internal state + + Args: + - new_entries: list of RowMutationEntry objects to flush + """ + # flush new entries + in_process_requests: list[asyncio.Future[list[FailedMutationEntryError]]] = [] + async for batch in self._flow_control.add_to_flow(new_entries): + batch_task = self._create_bg_task(self._execute_mutate_rows, batch) + in_process_requests.append(batch_task) + # wait for all inflight requests to complete + found_exceptions = await self._wait_for_batch_results(*in_process_requests) + # update exception data to reflect any new errors + self._entries_processed_since_last_raise += len(new_entries) + self._add_exceptions(found_exceptions) + + async def _execute_mutate_rows( + self, batch: list[RowMutationEntry] + ) -> list[FailedMutationEntryError]: + """ + Helper to execute mutation operation on a batch + + Args: + - batch: list of RowMutationEntry objects to send to server + - timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. + If not given, will use table defaults + Returns: + - list of FailedMutationEntryError objects for mutations that failed. + FailedMutationEntryError objects will not contain index information + """ + try: + operation = _MutateRowsOperationAsync( + self._table.client._gapic_client, + self._table, + batch, + operation_timeout=self._operation_timeout, + attempt_timeout=self._attempt_timeout, + retryable_exceptions=self._retryable_errors, + ) + await operation.start() + except MutationsExceptionGroup as e: + # strip index information from exceptions, since it is not useful in a batch context + for subexc in e.exceptions: + subexc.index = None + return list(e.exceptions) + finally: + # mark batch as complete in flow control + await self._flow_control.remove_from_flow(batch) + return [] + + def _add_exceptions(self, excs: list[Exception]): + """ + Add new list of exceptions to internal store. To avoid unbounded memory, + the batcher will store the first and last _exception_list_limit exceptions, + and discard any in between. + """ + self._exceptions_since_last_raise += len(excs) + if excs and len(self._oldest_exceptions) < self._exception_list_limit: + # populate oldest_exceptions with found_exceptions + addition_count = self._exception_list_limit - len(self._oldest_exceptions) + self._oldest_exceptions.extend(excs[:addition_count]) + excs = excs[addition_count:] + if excs: + # populate newest_exceptions with remaining found_exceptions + self._newest_exceptions.extend(excs[-self._exception_list_limit :]) + + def _raise_exceptions(self): + """ + Raise any unreported exceptions from background flush operations + + Raises: + - MutationsExceptionGroup with all unreported exceptions + """ + if self._oldest_exceptions or self._newest_exceptions: + oldest, self._oldest_exceptions = self._oldest_exceptions, [] + newest = list(self._newest_exceptions) + self._newest_exceptions.clear() + entry_count, self._entries_processed_since_last_raise = ( + self._entries_processed_since_last_raise, + 0, + ) + exc_count, self._exceptions_since_last_raise = ( + self._exceptions_since_last_raise, + 0, + ) + raise MutationsExceptionGroup.from_truncated_lists( + first_list=oldest, + last_list=newest, + total_excs=exc_count, + entry_count=entry_count, + ) + + async def __aenter__(self): + """For context manager API""" + return self + + async def __aexit__(self, exc_type, exc, tb): + """For context manager API""" + await self.close() + + async def close(self): + """ + Flush queue and clean up resources + """ + self.closed = True + self._flush_timer.cancel() + self._schedule_flush() + if self._flush_jobs: + await asyncio.gather(*self._flush_jobs, return_exceptions=True) + try: + await self._flush_timer + except asyncio.CancelledError: + pass + atexit.unregister(self._on_exit) + # raise unreported exceptions + self._raise_exceptions() + + def _on_exit(self): + """ + Called when program is exited. Raises warning if unflushed mutations remain + """ + if not self.closed and self._staged_entries: + warnings.warn( + f"MutationsBatcher for table {self._table.table_name} was not closed. " + f"{len(self._staged_entries)} Unflushed mutations will not be sent to the server." + ) + + @staticmethod + def _create_bg_task(func, *args, **kwargs) -> asyncio.Future[Any]: + """ + Create a new background task, and return a future + + This method wraps asyncio to make it easier to maintain subclasses + with different concurrency models. + + Args: + - func: function to execute in background task + - *args: positional arguments to pass to func + - **kwargs: keyword arguments to pass to func + Returns: + - Future object representing the background task + """ + return asyncio.create_task(func(*args, **kwargs)) + + @staticmethod + async def _wait_for_batch_results( + *tasks: asyncio.Future[list[FailedMutationEntryError]] | asyncio.Future[None], + ) -> list[Exception]: + """ + Takes in a list of futures representing _execute_mutate_rows tasks, + waits for them to complete, and returns a list of errors encountered. + + Args: + - *tasks: futures representing _execute_mutate_rows or _flush_internal tasks + Returns: + - list of Exceptions encountered by any of the tasks. Errors are expected + to be FailedMutationEntryError, representing a failed mutation operation. + If a task fails with a different exception, it will be included in the + output list. Successful tasks will not be represented in the output list. + """ + if not tasks: + return [] + all_results = await asyncio.gather(*tasks, return_exceptions=True) + found_errors = [] + for result in all_results: + if isinstance(result, Exception): + # will receive direct Exception objects if request task fails + found_errors.append(result) + elif isinstance(result, BaseException): + # BaseException not expected from grpc calls. Raise immediately + raise result + elif result: + # completed requests will return a list of FailedMutationEntryError + for e in result: + # strip index information + e.index = None + found_errors.extend(result) + return found_errors diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py new file mode 100644 index 000000000000..a0b13cbaf0a8 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py @@ -0,0 +1,220 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +Helper functions used in various places in the library. +""" +from __future__ import annotations + +from typing import Sequence, List, Tuple, TYPE_CHECKING +import time +import enum +from collections import namedtuple +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + +from google.api_core import exceptions as core_exceptions +from google.api_core.retry import RetryFailureReason +from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + +if TYPE_CHECKING: + import grpc + from google.cloud.bigtable.data import TableAsync + +""" +Helper functions used in various places in the library. +""" + +# Type alias for the output of sample_keys +RowKeySamples = List[Tuple[bytes, int]] + +# type alias for the output of query.shard() +ShardedQuery = List[ReadRowsQuery] + +# used by read_rows_sharded to limit how many requests are attempted in parallel +_CONCURRENCY_LIMIT = 10 + +# used to register instance data with the client for channel warming +_WarmedInstanceKey = namedtuple( + "_WarmedInstanceKey", ["instance_name", "table_name", "app_profile_id"] +) + + +# enum used on method calls when table defaults should be used +class TABLE_DEFAULT(enum.Enum): + # default for mutate_row, sample_row_keys, check_and_mutate_row, and read_modify_write_row + DEFAULT = "DEFAULT" + # default for read_rows, read_rows_stream, read_rows_sharded, row_exists, and read_row + READ_ROWS = "READ_ROWS_DEFAULT" + # default for bulk_mutate_rows and mutations_batcher + MUTATE_ROWS = "MUTATE_ROWS_DEFAULT" + + +def _make_metadata( + table_name: str, app_profile_id: str | None +) -> list[tuple[str, str]]: + """ + Create properly formatted gRPC metadata for requests. + """ + params = [] + params.append(f"table_name={table_name}") + if app_profile_id is not None: + params.append(f"app_profile_id={app_profile_id}") + params_str = "&".join(params) + return [("x-goog-request-params", params_str)] + + +def _attempt_timeout_generator( + per_request_timeout: float | None, operation_timeout: float +): + """ + Generator that yields the timeout value for each attempt of a retry loop. + + Will return per_request_timeout until the operation_timeout is approached, + at which point it will return the remaining time in the operation_timeout. + + Args: + - per_request_timeout: The timeout value to use for each request, in seconds. + If None, the operation_timeout will be used for each request. + - operation_timeout: The timeout value to use for the entire operationm in seconds. + Yields: + - The timeout value to use for the next request, in seonds + """ + per_request_timeout = ( + per_request_timeout if per_request_timeout is not None else operation_timeout + ) + deadline = operation_timeout + time.monotonic() + while True: + yield max(0, min(per_request_timeout, deadline - time.monotonic())) + + +def _retry_exception_factory( + exc_list: list[Exception], + reason: RetryFailureReason, + timeout_val: float | None, +) -> tuple[Exception, Exception | None]: + """ + Build retry error based on exceptions encountered during operation + + Args: + - exc_list: list of exceptions encountered during operation + - is_timeout: whether the operation failed due to timeout + - timeout_val: the operation timeout value in seconds, for constructing + the error message + Returns: + - tuple of the exception to raise, and a cause exception if applicable + """ + if reason == RetryFailureReason.TIMEOUT: + timeout_val_str = f"of {timeout_val:0.1f}s " if timeout_val is not None else "" + # if failed due to timeout, raise deadline exceeded as primary exception + source_exc: Exception = core_exceptions.DeadlineExceeded( + f"operation_timeout{timeout_val_str} exceeded" + ) + elif exc_list: + # otherwise, raise non-retryable error as primary exception + source_exc = exc_list.pop() + else: + source_exc = RuntimeError("failed with unspecified exception") + # use the retry exception group as the cause of the exception + cause_exc: Exception | None = RetryExceptionGroup(exc_list) if exc_list else None + source_exc.__cause__ = cause_exc + return source_exc, cause_exc + + +def _get_timeouts( + operation: float | TABLE_DEFAULT, + attempt: float | None | TABLE_DEFAULT, + table: "TableAsync", +) -> tuple[float, float]: + """ + Convert passed in timeout values to floats, using table defaults if necessary. + + attempt will use operation value if None, or if larger than operation. + + Will call _validate_timeouts on the outputs, and raise ValueError if the + resulting timeouts are invalid. + + Args: + - operation: The timeout value to use for the entire operation, in seconds. + - attempt: The timeout value to use for each attempt, in seconds. + - table: The table to use for default values. + Returns: + - A tuple of (operation_timeout, attempt_timeout) + """ + # load table defaults if necessary + if operation == TABLE_DEFAULT.DEFAULT: + final_operation = table.default_operation_timeout + elif operation == TABLE_DEFAULT.READ_ROWS: + final_operation = table.default_read_rows_operation_timeout + elif operation == TABLE_DEFAULT.MUTATE_ROWS: + final_operation = table.default_mutate_rows_operation_timeout + else: + final_operation = operation + if attempt == TABLE_DEFAULT.DEFAULT: + attempt = table.default_attempt_timeout + elif attempt == TABLE_DEFAULT.READ_ROWS: + attempt = table.default_read_rows_attempt_timeout + elif attempt == TABLE_DEFAULT.MUTATE_ROWS: + attempt = table.default_mutate_rows_attempt_timeout + + if attempt is None: + # no timeout specified, use operation timeout for both + final_attempt = final_operation + else: + # cap attempt timeout at operation timeout + final_attempt = min(attempt, final_operation) if final_operation else attempt + + _validate_timeouts(final_operation, final_attempt, allow_none=False) + return final_operation, final_attempt + + +def _validate_timeouts( + operation_timeout: float, attempt_timeout: float | None, allow_none: bool = False +): + """ + Helper function that will verify that timeout values are valid, and raise + an exception if they are not. + + Args: + - operation_timeout: The timeout value to use for the entire operation, in seconds. + - attempt_timeout: The timeout value to use for each attempt, in seconds. + - allow_none: If True, attempt_timeout can be None. If False, None values will raise an exception. + Raises: + - ValueError if operation_timeout or attempt_timeout are invalid. + """ + if operation_timeout is None: + raise ValueError("operation_timeout cannot be None") + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if not allow_none and attempt_timeout is None: + raise ValueError("attempt_timeout must not be None") + elif attempt_timeout is not None: + if attempt_timeout <= 0: + raise ValueError("attempt_timeout must be greater than 0") + + +def _get_retryable_errors( + call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT, + table: "TableAsync", +) -> list[type[Exception]]: + # load table defaults if necessary + if call_codes == TABLE_DEFAULT.DEFAULT: + call_codes = table.default_retryable_errors + elif call_codes == TABLE_DEFAULT.READ_ROWS: + call_codes = table.default_read_rows_retryable_errors + elif call_codes == TABLE_DEFAULT.MUTATE_ROWS: + call_codes = table.default_mutate_rows_retryable_errors + + return [ + e if isinstance(e, type) else type(core_exceptions.from_grpc_status(e, "")) + for e in call_codes + ] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py new file mode 100644 index 000000000000..3c73ec4e9338 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py @@ -0,0 +1,307 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +import sys + +from typing import Any, TYPE_CHECKING + +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data.row import Row + +is_311_plus = sys.version_info >= (3, 11) + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + +class InvalidChunk(core_exceptions.GoogleAPICallError): + """Exception raised to invalid chunk data from back-end.""" + + +class _RowSetComplete(Exception): + """ + Internal exception for _ReadRowsOperation + Raised in revise_request_rowset when there are no rows left to process when starting a retry attempt + """ + + pass + + +class _MutateRowsIncomplete(RuntimeError): + """ + Exception raised when a mutate_rows call has unfinished work. + """ + + pass + + +class _BigtableExceptionGroup(ExceptionGroup if is_311_plus else Exception): # type: ignore # noqa: F821 + """ + Represents one or more exceptions that occur during a bulk Bigtable operation + + In Python 3.11+, this is an unmodified exception group. In < 3.10, it is a + custom exception with some exception group functionality backported, but does + Not implement the full API + """ + + def __init__(self, message, excs): + if is_311_plus: + super().__init__(message, excs) + else: + if len(excs) == 0: + raise ValueError("exceptions must be a non-empty sequence") + self.exceptions = tuple(excs) + # simulate an exception group in Python < 3.11 by adding exception info + # to the message + first_line = "--+---------------- 1 ----------------" + last_line = "+------------------------------------" + message_parts = [message + "\n" + first_line] + # print error info for each exception in the group + for idx, e in enumerate(excs[:15]): + # apply index header + if idx != 0: + message_parts.append( + f"+---------------- {str(idx+1).rjust(2)} ----------------" + ) + cause = e.__cause__ + # if this exception was had a cause, print the cause first + # used to display root causes of FailedMutationEntryError and FailedQueryShardError + # format matches the error output of Python 3.11+ + if cause is not None: + message_parts.extend( + f"| {type(cause).__name__}: {cause}".splitlines() + ) + message_parts.append("| ") + message_parts.append( + "| The above exception was the direct cause of the following exception:" + ) + message_parts.append("| ") + # attach error message for this sub-exception + # if the subexception is also a _BigtableExceptionGroup, + # error messages will be nested + message_parts.extend(f"| {type(e).__name__}: {e}".splitlines()) + # truncate the message if there are more than 15 exceptions + if len(excs) > 15: + message_parts.append("+---------------- ... ---------------") + message_parts.append(f"| and {len(excs) - 15} more") + if last_line not in message_parts[-1]: + # in the case of nested _BigtableExceptionGroups, the last line + # does not need to be added, since one was added by the final sub-exception + message_parts.append(last_line) + super().__init__("\n ".join(message_parts)) + + def __new__(cls, message, excs): + if is_311_plus: + return super().__new__(cls, message, excs) + else: + return super().__new__(cls) + + def __str__(self): + if is_311_plus: + # don't return built-in sub-exception message + return self.args[0] + return super().__str__() + + def __repr__(self): + """ + repr representation should strip out sub-exception details + """ + if is_311_plus: + return super().__repr__() + message = self.args[0].split("\n")[0] + return f"{self.__class__.__name__}({message!r}, {self.exceptions!r})" + + +class MutationsExceptionGroup(_BigtableExceptionGroup): + """ + Represents one or more exceptions that occur during a bulk mutation operation + + Exceptions will typically be of type FailedMutationEntryError, but other exceptions may + be included if they are raised during the mutation operation + """ + + @staticmethod + def _format_message( + excs: list[Exception], total_entries: int, exc_count: int | None = None + ) -> str: + """ + Format a message for the exception group + + Args: + - excs: the exceptions in the group + - total_entries: the total number of entries attempted, successful or not + - exc_count: the number of exceptions associated with the request + if None, this will be len(excs) + """ + exc_count = exc_count if exc_count is not None else len(excs) + entry_str = "entry" if exc_count == 1 else "entries" + return f"{exc_count} failed {entry_str} from {total_entries} attempted." + + def __init__( + self, excs: list[Exception], total_entries: int, message: str | None = None + ): + """ + Args: + - excs: the exceptions in the group + - total_entries: the total number of entries attempted, successful or not + - message: the message for the exception group. If None, a default message + will be generated + """ + message = ( + message + if message is not None + else self._format_message(excs, total_entries) + ) + super().__init__(message, excs) + self.total_entries_attempted = total_entries + + def __new__( + cls, excs: list[Exception], total_entries: int, message: str | None = None + ): + """ + Args: + - excs: the exceptions in the group + - total_entries: the total number of entries attempted, successful or not + - message: the message for the exception group. If None, a default message + """ + message = ( + message if message is not None else cls._format_message(excs, total_entries) + ) + instance = super().__new__(cls, message, excs) + instance.total_entries_attempted = total_entries + return instance + + @classmethod + def from_truncated_lists( + cls, + first_list: list[Exception], + last_list: list[Exception], + total_excs: int, + entry_count: int, + ) -> MutationsExceptionGroup: + """ + Create a MutationsExceptionGroup from two lists of exceptions, representing + a larger set that has been truncated. The MutationsExceptionGroup will + contain the union of the two lists as sub-exceptions, and the error message + describe the number of exceptions that were truncated. + + Args: + - first_list: the set of oldest exceptions to add to the ExceptionGroup + - last_list: the set of newest exceptions to add to the ExceptionGroup + - total_excs: the total number of exceptions associated with the request + Should be len(first_list) + len(last_list) + number of dropped exceptions + in the middle + - entry_count: the total number of entries attempted, successful or not + """ + first_count, last_count = len(first_list), len(last_list) + if first_count + last_count >= total_excs: + # no exceptions were dropped + return cls(first_list + last_list, entry_count) + excs = first_list + last_list + truncation_count = total_excs - (first_count + last_count) + base_message = cls._format_message(excs, entry_count, total_excs) + first_message = f"first {first_count}" if first_count else "" + last_message = f"last {last_count}" if last_count else "" + conjunction = " and " if first_message and last_message else "" + message = f"{base_message} ({first_message}{conjunction}{last_message} attached as sub-exceptions; {truncation_count} truncated)" + return cls(excs, entry_count, message) + + +class FailedMutationEntryError(Exception): + """ + Represents a single failed RowMutationEntry in a bulk_mutate_rows request. + A collection of FailedMutationEntryErrors will be raised in a MutationsExceptionGroup + """ + + def __init__( + self, + failed_idx: int | None, + failed_mutation_entry: "RowMutationEntry", + cause: Exception, + ): + idempotent_msg = ( + "idempotent" if failed_mutation_entry.is_idempotent() else "non-idempotent" + ) + index_msg = f" at index {failed_idx}" if failed_idx is not None else "" + message = f"Failed {idempotent_msg} mutation entry{index_msg}" + super().__init__(message) + self.__cause__ = cause + self.index = failed_idx + self.entry = failed_mutation_entry + + +class RetryExceptionGroup(_BigtableExceptionGroup): + """Represents one or more exceptions that occur during a retryable operation""" + + @staticmethod + def _format_message(excs: list[Exception]): + if len(excs) == 0: + return "No exceptions" + plural = "s" if len(excs) > 1 else "" + return f"{len(excs)} failed attempt{plural}" + + def __init__(self, excs: list[Exception]): + super().__init__(self._format_message(excs), excs) + + def __new__(cls, excs: list[Exception]): + return super().__new__(cls, cls._format_message(excs), excs) + + +class ShardedReadRowsExceptionGroup(_BigtableExceptionGroup): + """ + Represents one or more exceptions that occur during a sharded read rows operation + """ + + @staticmethod + def _format_message(excs: list[FailedQueryShardError], total_queries: int): + query_str = "query" if total_queries == 1 else "queries" + plural_str = "" if len(excs) == 1 else "s" + return f"{len(excs)} sub-exception{plural_str} (from {total_queries} {query_str} attempted)" + + def __init__( + self, + excs: list[FailedQueryShardError], + succeeded: list[Row], + total_queries: int, + ): + super().__init__(self._format_message(excs, total_queries), excs) + self.successful_rows = succeeded + + def __new__( + cls, excs: list[FailedQueryShardError], succeeded: list[Row], total_queries: int + ): + instance = super().__new__(cls, cls._format_message(excs, total_queries), excs) + instance.successful_rows = succeeded + return instance + + +class FailedQueryShardError(Exception): + """ + Represents an individual failed query in a sharded read rows operation + """ + + def __init__( + self, + failed_index: int, + failed_query: "ReadRowsQuery" | dict[str, Any], + cause: Exception, + ): + message = f"Failed query at index {failed_index}" + super().__init__(message) + self.__cause__ = cause + self.index = failed_index + self.query = failed_query diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py new file mode 100644 index 000000000000..b5729d25e6b3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py @@ -0,0 +1,256 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations +from typing import Any +import time +from dataclasses import dataclass +from abc import ABC, abstractmethod +from sys import getsizeof + +import google.cloud.bigtable_v2.types.bigtable as types_pb +import google.cloud.bigtable_v2.types.data as data_pb + +from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE + + +# special value for SetCell mutation timestamps. If set, server will assign a timestamp +_SERVER_SIDE_TIMESTAMP = -1 + +# mutation entries above this should be rejected +_MUTATE_ROWS_REQUEST_MUTATION_LIMIT = 100_000 + + +class Mutation(ABC): + """Model class for mutations""" + + @abstractmethod + def _to_dict(self) -> dict[str, Any]: + raise NotImplementedError + + def _to_pb(self) -> data_pb.Mutation: + """ + Convert the mutation to protobuf + """ + return data_pb.Mutation(**self._to_dict()) + + def is_idempotent(self) -> bool: + """ + Check if the mutation is idempotent + If false, the mutation will not be retried + """ + return True + + def __str__(self) -> str: + return str(self._to_dict()) + + def size(self) -> int: + """ + Get the size of the mutation in bytes + """ + return getsizeof(self._to_dict()) + + @classmethod + def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation: + instance: Mutation | None = None + try: + if "set_cell" in input_dict: + details = input_dict["set_cell"] + instance = SetCell( + details["family_name"], + details["column_qualifier"], + details["value"], + details["timestamp_micros"], + ) + elif "delete_from_column" in input_dict: + details = input_dict["delete_from_column"] + time_range = details.get("time_range", {}) + start = time_range.get("start_timestamp_micros", None) + end = time_range.get("end_timestamp_micros", None) + instance = DeleteRangeFromColumn( + details["family_name"], details["column_qualifier"], start, end + ) + elif "delete_from_family" in input_dict: + details = input_dict["delete_from_family"] + instance = DeleteAllFromFamily(details["family_name"]) + elif "delete_from_row" in input_dict: + instance = DeleteAllFromRow() + except KeyError as e: + raise ValueError("Invalid mutation dictionary") from e + if instance is None: + raise ValueError("No valid mutation found") + if not issubclass(instance.__class__, cls): + raise ValueError("Mutation type mismatch") + return instance + + +class SetCell(Mutation): + def __init__( + self, + family: str, + qualifier: bytes | str, + new_value: bytes | str | int, + timestamp_micros: int | None = None, + ): + """ + Mutation to set the value of a cell + + Args: + - family: The name of the column family to which the new cell belongs. + - qualifier: The column qualifier of the new cell. + - new_value: The value of the new cell. str or int input will be converted to bytes + - timestamp_micros: The timestamp of the new cell. If None, the current timestamp will be used. + Timestamps will be sent with milisecond-percision. Extra precision will be truncated. + If -1, the server will assign a timestamp. Note that SetCell mutations with server-side + timestamps are non-idempotent operations and will not be retried. + """ + qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier + if not isinstance(qualifier, bytes): + raise TypeError("qualifier must be bytes or str") + if isinstance(new_value, str): + new_value = new_value.encode() + elif isinstance(new_value, int): + if abs(new_value) > _MAX_INCREMENT_VALUE: + raise ValueError( + "int values must be between -2**63 and 2**63 (64-bit signed int)" + ) + new_value = new_value.to_bytes(8, "big", signed=True) + if not isinstance(new_value, bytes): + raise TypeError("new_value must be bytes, str, or int") + if timestamp_micros is None: + # use current timestamp, with milisecond precision + timestamp_micros = time.time_ns() // 1000 + timestamp_micros = timestamp_micros - (timestamp_micros % 1000) + if timestamp_micros < _SERVER_SIDE_TIMESTAMP: + raise ValueError( + f"timestamp_micros must be positive (or {_SERVER_SIDE_TIMESTAMP} for server-side timestamp)" + ) + self.family = family + self.qualifier = qualifier + self.new_value = new_value + self.timestamp_micros = timestamp_micros + + def _to_dict(self) -> dict[str, Any]: + """Convert the mutation to a dictionary representation""" + return { + "set_cell": { + "family_name": self.family, + "column_qualifier": self.qualifier, + "timestamp_micros": self.timestamp_micros, + "value": self.new_value, + } + } + + def is_idempotent(self) -> bool: + """Check if the mutation is idempotent""" + return self.timestamp_micros != _SERVER_SIDE_TIMESTAMP + + +@dataclass +class DeleteRangeFromColumn(Mutation): + family: str + qualifier: bytes + # None represents 0 + start_timestamp_micros: int | None = None + # None represents infinity + end_timestamp_micros: int | None = None + + def __post_init__(self): + if ( + self.start_timestamp_micros is not None + and self.end_timestamp_micros is not None + and self.start_timestamp_micros > self.end_timestamp_micros + ): + raise ValueError("start_timestamp_micros must be <= end_timestamp_micros") + + def _to_dict(self) -> dict[str, Any]: + timestamp_range = {} + if self.start_timestamp_micros is not None: + timestamp_range["start_timestamp_micros"] = self.start_timestamp_micros + if self.end_timestamp_micros is not None: + timestamp_range["end_timestamp_micros"] = self.end_timestamp_micros + return { + "delete_from_column": { + "family_name": self.family, + "column_qualifier": self.qualifier, + "time_range": timestamp_range, + } + } + + +@dataclass +class DeleteAllFromFamily(Mutation): + family_to_delete: str + + def _to_dict(self) -> dict[str, Any]: + return { + "delete_from_family": { + "family_name": self.family_to_delete, + } + } + + +@dataclass +class DeleteAllFromRow(Mutation): + def _to_dict(self) -> dict[str, Any]: + return { + "delete_from_row": {}, + } + + +class RowMutationEntry: + def __init__(self, row_key: bytes | str, mutations: Mutation | list[Mutation]): + if isinstance(row_key, str): + row_key = row_key.encode("utf-8") + if isinstance(mutations, Mutation): + mutations = [mutations] + if len(mutations) == 0: + raise ValueError("mutations must not be empty") + elif len(mutations) > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT: + raise ValueError( + f"entries must have <= {_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations" + ) + self.row_key = row_key + self.mutations = tuple(mutations) + + def _to_dict(self) -> dict[str, Any]: + return { + "row_key": self.row_key, + "mutations": [mutation._to_dict() for mutation in self.mutations], + } + + def _to_pb(self) -> types_pb.MutateRowsRequest.Entry: + return types_pb.MutateRowsRequest.Entry( + row_key=self.row_key, + mutations=[mutation._to_pb() for mutation in self.mutations], + ) + + def is_idempotent(self) -> bool: + """Check if the mutation is idempotent""" + return all(mutation.is_idempotent() for mutation in self.mutations) + + def size(self) -> int: + """ + Get the size of the mutation in bytes + """ + return getsizeof(self._to_dict()) + + @classmethod + def _from_dict(cls, input_dict: dict[str, Any]) -> RowMutationEntry: + return RowMutationEntry( + row_key=input_dict["row_key"], + mutations=[ + Mutation._from_dict(mutation) for mutation in input_dict["mutations"] + ], + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py new file mode 100644 index 000000000000..f43dbe79f175 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py @@ -0,0 +1,77 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +import abc + +import google.cloud.bigtable_v2.types.data as data_pb + +# value must fit in 64-bit signed integer +_MAX_INCREMENT_VALUE = (1 << 63) - 1 + + +class ReadModifyWriteRule(abc.ABC): + def __init__(self, family: str, qualifier: bytes | str): + qualifier = ( + qualifier if isinstance(qualifier, bytes) else qualifier.encode("utf-8") + ) + self.family = family + self.qualifier = qualifier + + @abc.abstractmethod + def _to_dict(self) -> dict[str, str | bytes | int]: + raise NotImplementedError + + def _to_pb(self) -> data_pb.ReadModifyWriteRule: + return data_pb.ReadModifyWriteRule(**self._to_dict()) + + +class IncrementRule(ReadModifyWriteRule): + def __init__(self, family: str, qualifier: bytes | str, increment_amount: int = 1): + if not isinstance(increment_amount, int): + raise TypeError("increment_amount must be an integer") + if abs(increment_amount) > _MAX_INCREMENT_VALUE: + raise ValueError( + "increment_amount must be between -2**63 and 2**63 (64-bit signed int)" + ) + super().__init__(family, qualifier) + self.increment_amount = increment_amount + + def _to_dict(self) -> dict[str, str | bytes | int]: + return { + "family_name": self.family, + "column_qualifier": self.qualifier, + "increment_amount": self.increment_amount, + } + + +class AppendValueRule(ReadModifyWriteRule): + def __init__(self, family: str, qualifier: bytes | str, append_value: bytes | str): + append_value = ( + append_value.encode("utf-8") + if isinstance(append_value, str) + else append_value + ) + if not isinstance(append_value, bytes): + raise TypeError("append_value must be bytes or str") + super().__init__(family, qualifier) + self.append_value = append_value + + def _to_dict(self) -> dict[str, str | bytes | int]: + return { + "family_name": self.family, + "column_qualifier": self.qualifier, + "append_value": self.append_value, + } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py new file mode 100644 index 000000000000..362f54c3ebfa --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py @@ -0,0 +1,476 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations +from typing import TYPE_CHECKING, Any +from bisect import bisect_left +from bisect import bisect_right +from collections import defaultdict +from google.cloud.bigtable.data.row_filters import RowFilter + +from google.cloud.bigtable_v2.types import RowRange as RowRangePB +from google.cloud.bigtable_v2.types import RowSet as RowSetPB +from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB + +if TYPE_CHECKING: + from google.cloud.bigtable.data import RowKeySamples + from google.cloud.bigtable.data import ShardedQuery + + +class RowRange: + """ + Represents a range of keys in a ReadRowsQuery + """ + + __slots__ = ("_pb",) + + def __init__( + self, + start_key: str | bytes | None = None, + end_key: str | bytes | None = None, + start_is_inclusive: bool | None = None, + end_is_inclusive: bool | None = None, + ): + """ + Args: + - start_key: The start key of the range. If empty, the range is unbounded on the left. + - end_key: The end key of the range. If empty, the range is unbounded on the right. + - start_is_inclusive: Whether the start key is inclusive. If None, the start key is + inclusive. + - end_is_inclusive: Whether the end key is inclusive. If None, the end key is not inclusive. + Raises: + - ValueError: if start_key is greater than end_key, or start_is_inclusive, + or end_is_inclusive is set when the corresponding key is None, + or start_key or end_key is not a string or bytes. + """ + # convert empty key inputs to None for consistency + start_key = None if not start_key else start_key + end_key = None if not end_key else end_key + # check for invalid combinations of arguments + if start_is_inclusive is None: + start_is_inclusive = True + + if end_is_inclusive is None: + end_is_inclusive = False + # ensure that start_key and end_key are bytes + if isinstance(start_key, str): + start_key = start_key.encode() + elif start_key is not None and not isinstance(start_key, bytes): + raise ValueError("start_key must be a string or bytes") + if isinstance(end_key, str): + end_key = end_key.encode() + elif end_key is not None and not isinstance(end_key, bytes): + raise ValueError("end_key must be a string or bytes") + # ensure that start_key is less than or equal to end_key + if start_key is not None and end_key is not None and start_key > end_key: + raise ValueError("start_key must be less than or equal to end_key") + + init_dict = {} + if start_key is not None: + if start_is_inclusive: + init_dict["start_key_closed"] = start_key + else: + init_dict["start_key_open"] = start_key + if end_key is not None: + if end_is_inclusive: + init_dict["end_key_closed"] = end_key + else: + init_dict["end_key_open"] = end_key + self._pb = RowRangePB(**init_dict) + + @property + def start_key(self) -> bytes | None: + """ + Returns the start key of the range. If None, the range is unbounded on the left. + """ + return self._pb.start_key_closed or self._pb.start_key_open or None + + @property + def end_key(self) -> bytes | None: + """ + Returns the end key of the range. If None, the range is unbounded on the right. + """ + return self._pb.end_key_closed or self._pb.end_key_open or None + + @property + def start_is_inclusive(self) -> bool: + """ + Returns whether the range is inclusive of the start key. + Returns True if the range is unbounded on the left. + """ + return not bool(self._pb.start_key_open) + + @property + def end_is_inclusive(self) -> bool: + """ + Returns whether the range is inclusive of the end key. + Returns True if the range is unbounded on the right. + """ + return not bool(self._pb.end_key_open) + + def _to_pb(self) -> RowRangePB: + """Converts this object to a protobuf""" + return self._pb + + @classmethod + def _from_pb(cls, data: RowRangePB) -> RowRange: + """Creates a RowRange from a protobuf""" + instance = cls() + instance._pb = data + return instance + + @classmethod + def _from_dict(cls, data: dict[str, bytes | str]) -> RowRange: + """Creates a RowRange from a protobuf""" + formatted_data = { + k: v.encode() if isinstance(v, str) else v for k, v in data.items() + } + instance = cls() + instance._pb = RowRangePB(**formatted_data) + return instance + + def __bool__(self) -> bool: + """ + Empty RowRanges (representing a full table scan) are falsy, because + they can be substituted with None. Non-empty RowRanges are truthy. + """ + return bool( + self._pb.start_key_closed + or self._pb.start_key_open + or self._pb.end_key_closed + or self._pb.end_key_open + ) + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, RowRange): + return NotImplemented + return self._pb == other._pb + + def __str__(self) -> str: + """ + Represent range as a string, e.g. "[b'a', b'z)" + Unbounded start or end keys are represented as "-inf" or "+inf" + """ + left = "[" if self.start_is_inclusive else "(" + right = "]" if self.end_is_inclusive else ")" + start = repr(self.start_key) if self.start_key is not None else "-inf" + end = repr(self.end_key) if self.end_key is not None else "+inf" + return f"{left}{start}, {end}{right}" + + def __repr__(self) -> str: + args_list = [] + args_list.append(f"start_key={self.start_key!r}") + args_list.append(f"end_key={self.end_key!r}") + if self.start_is_inclusive is False: + # only show start_is_inclusive if it is different from the default + args_list.append(f"start_is_inclusive={self.start_is_inclusive}") + if self.end_is_inclusive is True and self.end_key is not None: + # only show end_is_inclusive if it is different from the default + args_list.append(f"end_is_inclusive={self.end_is_inclusive}") + return f"RowRange({', '.join(args_list)})" + + +class ReadRowsQuery: + """ + Class to encapsulate details of a read row request + """ + + slots = ("_limit", "_filter", "_row_set") + + def __init__( + self, + row_keys: list[str | bytes] | str | bytes | None = None, + row_ranges: list[RowRange] | RowRange | None = None, + limit: int | None = None, + row_filter: RowFilter | None = None, + ): + """ + Create a new ReadRowsQuery + + Args: + - row_keys: row keys to include in the query + a query can contain multiple keys, but ranges should be preferred + - row_ranges: ranges of rows to include in the query + - limit: the maximum number of rows to return. None or 0 means no limit + default: None (no limit) + - row_filter: a RowFilter to apply to the query + """ + if row_keys is None: + row_keys = [] + if row_ranges is None: + row_ranges = [] + if not isinstance(row_ranges, list): + row_ranges = [row_ranges] + if not isinstance(row_keys, list): + row_keys = [row_keys] + row_keys = [key.encode() if isinstance(key, str) else key for key in row_keys] + self._row_set = RowSetPB( + row_keys=row_keys, row_ranges=[r._pb for r in row_ranges] + ) + self.limit = limit or None + self.filter = row_filter + + @property + def row_keys(self) -> list[bytes]: + return list(self._row_set.row_keys) + + @property + def row_ranges(self) -> list[RowRange]: + return [RowRange._from_pb(r) for r in self._row_set.row_ranges] + + @property + def limit(self) -> int | None: + return self._limit or None + + @limit.setter + def limit(self, new_limit: int | None): + """ + Set the maximum number of rows to return by this query. + + None or 0 means no limit + + Args: + - new_limit: the new limit to apply to this query + Returns: + - a reference to this query for chaining + Raises: + - ValueError if new_limit is < 0 + """ + if new_limit is not None and new_limit < 0: + raise ValueError("limit must be >= 0") + self._limit = new_limit + + @property + def filter(self) -> RowFilter | None: + return self._filter + + @filter.setter + def filter(self, row_filter: RowFilter | None): + """ + Set a RowFilter to apply to this query + + Args: + - row_filter: a RowFilter to apply to this query + Returns: + - a reference to this query for chaining + """ + self._filter = row_filter + + def add_key(self, row_key: str | bytes): + """ + Add a row key to this query + + A query can contain multiple keys, but ranges should be preferred + + Args: + - row_key: a key to add to this query + Returns: + - a reference to this query for chaining + Raises: + - ValueError if an input is not a string or bytes + """ + if isinstance(row_key, str): + row_key = row_key.encode() + elif not isinstance(row_key, bytes): + raise ValueError("row_key must be string or bytes") + if row_key not in self._row_set.row_keys: + self._row_set.row_keys.append(row_key) + + def add_range( + self, + row_range: RowRange, + ): + """ + Add a range of row keys to this query. + + Args: + - row_range: a range of row keys to add to this query + """ + if row_range not in self.row_ranges: + self._row_set.row_ranges.append(row_range._pb) + + def shard(self, shard_keys: RowKeySamples) -> ShardedQuery: + """ + Split this query into multiple queries that can be evenly distributed + across nodes and run in parallel + + Returns: + - a ShardedQuery that can be used in sharded_read_rows calls + Raises: + - AttributeError if the query contains a limit + """ + if self.limit is not None: + raise AttributeError("Cannot shard query with a limit") + if len(self.row_keys) == 0 and len(self.row_ranges) == 0: + # empty query represents full scan + # ensure that we have at least one key or range + full_scan_query = ReadRowsQuery( + row_ranges=RowRange(), row_filter=self.filter + ) + return full_scan_query.shard(shard_keys) + + sharded_queries: dict[int, ReadRowsQuery] = defaultdict( + lambda: ReadRowsQuery(row_filter=self.filter) + ) + # the split_points divde our key space into segments + # each split_point defines last key that belongs to a segment + # our goal is to break up the query into subqueries that each operate in a single segment + split_points = [sample[0] for sample in shard_keys if sample[0]] + + # handle row_keys + # use binary search to find the segment that each key belongs to + for this_key in list(self.row_keys): + # bisect_left: in case of exact match, pick left side (keys are inclusive ends) + segment_index = bisect_left(split_points, this_key) + sharded_queries[segment_index].add_key(this_key) + + # handle row_ranges + for this_range in self.row_ranges: + # defer to _shard_range helper + for segment_index, added_range in self._shard_range( + this_range, split_points + ): + sharded_queries[segment_index].add_range(added_range) + # return list of queries ordered by segment index + # pull populated segments out of sharded_queries dict + keys = sorted(list(sharded_queries.keys())) + # return list of queries + return [sharded_queries[k] for k in keys] + + @staticmethod + def _shard_range( + orig_range: RowRange, split_points: list[bytes] + ) -> list[tuple[int, RowRange]]: + """ + Helper function for sharding row_range into subranges that fit into + segments of the key-space, determined by split_points + + Args: + - orig_range: a row range to split + - split_points: a list of row keys that define the boundaries of segments. + each point represents the inclusive end of a segment + Returns: + - a list of tuples, containing a segment index and a new sub-range. + """ + # 1. find the index of the segment the start key belongs to + if orig_range.start_key is None: + # if range is open on the left, include first segment + start_segment = 0 + else: + # use binary search to find the segment the start key belongs to + # bisect method determines how we break ties when the start key matches a split point + # if inclusive, bisect_left to the left segment, otherwise bisect_right + bisect = bisect_left if orig_range.start_is_inclusive else bisect_right + start_segment = bisect(split_points, orig_range.start_key) + + # 2. find the index of the segment the end key belongs to + if orig_range.end_key is None: + # if range is open on the right, include final segment + end_segment = len(split_points) + else: + # use binary search to find the segment the end key belongs to. + end_segment = bisect_left( + split_points, orig_range.end_key, lo=start_segment + ) + # note: end_segment will always bisect_left, because split points represent inclusive ends + # whether the end_key is includes the split point or not, the result is the same segment + # 3. create new range definitions for each segment this_range spans + if start_segment == end_segment: + # this_range is contained in a single segment. + # Add this_range to that segment's query only + return [(start_segment, orig_range)] + else: + results: list[tuple[int, RowRange]] = [] + # this_range spans multiple segments. Create a new range for each segment's query + # 3a. add new range for first segment this_range spans + # first range spans from start_key to the split_point representing the last key in the segment + last_key_in_first_segment = split_points[start_segment] + start_range = RowRange( + start_key=orig_range.start_key, + start_is_inclusive=orig_range.start_is_inclusive, + end_key=last_key_in_first_segment, + end_is_inclusive=True, + ) + results.append((start_segment, start_range)) + # 3b. add new range for last segment this_range spans + # we start the final range using the end key from of the previous segment, with is_inclusive=False + previous_segment = end_segment - 1 + last_key_before_segment = split_points[previous_segment] + end_range = RowRange( + start_key=last_key_before_segment, + start_is_inclusive=False, + end_key=orig_range.end_key, + end_is_inclusive=orig_range.end_is_inclusive, + ) + results.append((end_segment, end_range)) + # 3c. add new spanning range to all segments other than the first and last + for this_segment in range(start_segment + 1, end_segment): + prev_segment = this_segment - 1 + prev_end_key = split_points[prev_segment] + this_end_key = split_points[prev_segment + 1] + new_range = RowRange( + start_key=prev_end_key, + start_is_inclusive=False, + end_key=this_end_key, + end_is_inclusive=True, + ) + results.append((this_segment, new_range)) + return results + + def _to_pb(self, table) -> ReadRowsRequestPB: + """ + Convert this query into a dictionary that can be used to construct a + ReadRowsRequest protobuf + """ + return ReadRowsRequestPB( + table_name=table.table_name, + app_profile_id=table.app_profile_id, + filter=self.filter._to_pb() if self.filter else None, + rows_limit=self.limit or 0, + rows=self._row_set, + ) + + def __eq__(self, other): + """ + RowRanges are equal if they have the same row keys, row ranges, + filter and limit, or if they both represent a full scan with the + same filter and limit + """ + if not isinstance(other, ReadRowsQuery): + return False + # empty queries are equal + if len(self.row_keys) == 0 and len(other.row_keys) == 0: + this_range_empty = len(self.row_ranges) == 0 or all( + [bool(r) is False for r in self.row_ranges] + ) + other_range_empty = len(other.row_ranges) == 0 or all( + [bool(r) is False for r in other.row_ranges] + ) + if this_range_empty and other_range_empty: + return self.filter == other.filter and self.limit == other.limit + # otherwise, sets should have same sizes + if len(self.row_keys) != len(other.row_keys): + return False + if len(self.row_ranges) != len(other.row_ranges): + return False + ranges_match = all([row in other.row_ranges for row in self.row_ranges]) + return ( + self.row_keys == other.row_keys + and ranges_match + and self.filter == other.filter + and self.limit == other.limit + ) + + def __repr__(self): + return f"ReadRowsQuery(row_keys={list(self.row_keys)}, row_ranges={list(self.row_ranges)}, row_filter={self.filter}, limit={self.limit})" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py new file mode 100644 index 000000000000..ecf9cea663e3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py @@ -0,0 +1,450 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from collections import OrderedDict +from typing import Generator, overload, Any +from functools import total_ordering + +from google.cloud.bigtable_v2.types import Row as RowPB + +# Type aliases used internally for readability. +_family_type = str +_qualifier_type = bytes + + +class Row: + """ + Model class for row data returned from server + + Does not represent all data contained in the row, only data returned by a + query. + Expected to be read-only to users, and written by backend + + Can be indexed: + cells = row["family", "qualifier"] + """ + + __slots__ = ("row_key", "cells", "_index_data") + + def __init__( + self, + key: bytes, + cells: list[Cell], + ): + """ + Initializes a Row object + + Row objects are not intended to be created by users. + They are returned by the Bigtable backend. + """ + self.row_key = key + self.cells: list[Cell] = cells + # index is lazily created when needed + self._index_data: OrderedDict[ + _family_type, OrderedDict[_qualifier_type, list[Cell]] + ] | None = None + + @property + def _index( + self, + ) -> OrderedDict[_family_type, OrderedDict[_qualifier_type, list[Cell]]]: + """ + Returns an index of cells associated with each family and qualifier. + + The index is lazily created when needed + """ + if self._index_data is None: + self._index_data = OrderedDict() + for cell in self.cells: + self._index_data.setdefault(cell.family, OrderedDict()).setdefault( + cell.qualifier, [] + ).append(cell) + return self._index_data + + @classmethod + def _from_pb(cls, row_pb: RowPB) -> Row: + """ + Creates a row from a protobuf representation + + Row objects are not intended to be created by users. + They are returned by the Bigtable backend. + """ + row_key: bytes = row_pb.key + cell_list: list[Cell] = [] + for family in row_pb.families: + for column in family.columns: + for cell in column.cells: + new_cell = Cell( + value=cell.value, + row_key=row_key, + family=family.name, + qualifier=column.qualifier, + timestamp_micros=cell.timestamp_micros, + labels=list(cell.labels) if cell.labels else None, + ) + cell_list.append(new_cell) + return cls(row_key, cells=cell_list) + + def get_cells( + self, family: str | None = None, qualifier: str | bytes | None = None + ) -> list[Cell]: + """ + Returns cells sorted in Bigtable native order: + - Family lexicographically ascending + - Qualifier ascending + - Timestamp in reverse chronological order + + If family or qualifier not passed, will include all + + Can also be accessed through indexing: + cells = row["family", "qualifier"] + cells = row["family"] + """ + if family is None: + if qualifier is not None: + # get_cells(None, "qualifier") is not allowed + raise ValueError("Qualifier passed without family") + else: + # return all cells on get_cells() + return self.cells + if qualifier is None: + # return all cells in family on get_cells(family) + return list(self._get_all_from_family(family)) + if isinstance(qualifier, str): + qualifier = qualifier.encode("utf-8") + # return cells in family and qualifier on get_cells(family, qualifier) + if family not in self._index: + raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") + if qualifier not in self._index[family]: + raise ValueError( + f"Qualifier '{qualifier!r}' not found in family '{family}' in row '{self.row_key!r}'" + ) + return self._index[family][qualifier] + + def _get_all_from_family(self, family: str) -> Generator[Cell, None, None]: + """ + Returns all cells in the row for the family_id + """ + if family not in self._index: + raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") + for qualifier in self._index[family]: + yield from self._index[family][qualifier] + + def __str__(self) -> str: + """ + Human-readable string representation + + { + (family='fam', qualifier=b'col'): [b'value', (+1 more),], + (family='fam', qualifier=b'col2'): [b'other'], + } + """ + output = ["{"] + for family, qualifier in self._get_column_components(): + cell_list = self[family, qualifier] + line = [f" (family={family!r}, qualifier={qualifier!r}): "] + if len(cell_list) == 0: + line.append("[],") + elif len(cell_list) == 1: + line.append(f"[{cell_list[0]}],") + else: + line.append(f"[{cell_list[0]}, (+{len(cell_list)-1} more)],") + output.append("".join(line)) + output.append("}") + return "\n".join(output) + + def __repr__(self): + cell_str_buffer = ["{"] + for family, qualifier in self._get_column_components(): + cell_list = self[family, qualifier] + repr_list = [cell._to_dict() for cell in cell_list] + cell_str_buffer.append(f" ('{family}', {qualifier!r}): {repr_list},") + cell_str_buffer.append("}") + cell_str = "\n".join(cell_str_buffer) + output = f"Row(key={self.row_key!r}, cells={cell_str})" + return output + + def _to_dict(self) -> dict[str, Any]: + """ + Returns a dictionary representation of the cell in the Bigtable Row + proto format + + https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#row + """ + family_list = [] + for family_name, qualifier_dict in self._index.items(): + qualifier_list = [] + for qualifier_name, cell_list in qualifier_dict.items(): + cell_dicts = [cell._to_dict() for cell in cell_list] + qualifier_list.append( + {"qualifier": qualifier_name, "cells": cell_dicts} + ) + family_list.append({"name": family_name, "columns": qualifier_list}) + return {"key": self.row_key, "families": family_list} + + # Sequence and Mapping methods + def __iter__(self): + """ + Allow iterating over all cells in the row + """ + return iter(self.cells) + + def __contains__(self, item): + """ + Implements `in` operator + + Works for both cells in the internal list, and `family` or + `(family, qualifier)` pairs associated with the cells + """ + if isinstance(item, _family_type): + return item in self._index + elif ( + isinstance(item, tuple) + and isinstance(item[0], _family_type) + and isinstance(item[1], (bytes, str)) + ): + q = item[1] if isinstance(item[1], bytes) else item[1].encode("utf-8") + return item[0] in self._index and q in self._index[item[0]] + # check if Cell is in Row + return item in self.cells + + @overload + def __getitem__( + self, + index: str | tuple[str, bytes | str], + ) -> list[Cell]: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: int) -> Cell: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: slice) -> list[Cell]: + # overload signature for type checking + pass + + def __getitem__(self, index): + """ + Implements [] indexing + + Supports indexing by family, (family, qualifier) pair, + numerical index, and index slicing + """ + if isinstance(index, _family_type): + return self.get_cells(family=index) + elif ( + isinstance(index, tuple) + and isinstance(index[0], _family_type) + and isinstance(index[1], (bytes, str)) + ): + return self.get_cells(family=index[0], qualifier=index[1]) + elif isinstance(index, int) or isinstance(index, slice): + # index is int or slice + return self.cells[index] + else: + raise TypeError( + "Index must be family_id, (family_id, qualifier), int, or slice" + ) + + def __len__(self): + """ + Implements `len()` operator + """ + return len(self.cells) + + def _get_column_components(self) -> list[tuple[str, bytes]]: + """ + Returns a list of (family, qualifier) pairs associated with the cells + + Pairs can be used for indexing + """ + return [(f, q) for f in self._index for q in self._index[f]] + + def __eq__(self, other): + """ + Implements `==` operator + """ + # for performance reasons, check row metadata + # before checking individual cells + if not isinstance(other, Row): + return False + if self.row_key != other.row_key: + return False + if len(self.cells) != len(other.cells): + return False + components = self._get_column_components() + other_components = other._get_column_components() + if len(components) != len(other_components): + return False + if components != other_components: + return False + for family, qualifier in components: + if len(self[family, qualifier]) != len(other[family, qualifier]): + return False + # compare individual cell lists + if self.cells != other.cells: + return False + return True + + def __ne__(self, other) -> bool: + """ + Implements `!=` operator + """ + return not self == other + + +@total_ordering +class Cell: + """ + Model class for cell data + + Does not represent all data contained in the cell, only data returned by a + query. + Expected to be read-only to users, and written by backend + """ + + __slots__ = ( + "value", + "row_key", + "family", + "qualifier", + "timestamp_micros", + "labels", + ) + + def __init__( + self, + value: bytes, + row_key: bytes, + family: str, + qualifier: bytes | str, + timestamp_micros: int, + labels: list[str] | None = None, + ): + """ + Cell constructor + + Cell objects are not intended to be constructed by users. + They are returned by the Bigtable backend. + """ + self.value = value + self.row_key = row_key + self.family = family + if isinstance(qualifier, str): + qualifier = qualifier.encode() + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels if labels is not None else [] + + def __int__(self) -> int: + """ + Allows casting cell to int + Interprets value as a 64-bit big-endian signed integer, as expected by + ReadModifyWrite increment rule + """ + return int.from_bytes(self.value, byteorder="big", signed=True) + + def _to_dict(self) -> dict[str, Any]: + """ + Returns a dictionary representation of the cell in the Bigtable Cell + proto format + + https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#cell + """ + cell_dict: dict[str, Any] = { + "value": self.value, + } + cell_dict["timestamp_micros"] = self.timestamp_micros + if self.labels: + cell_dict["labels"] = self.labels + return cell_dict + + def __str__(self) -> str: + """ + Allows casting cell to str + Prints encoded byte string, same as printing value directly. + """ + return str(self.value) + + def __repr__(self): + """ + Returns a string representation of the cell + """ + return f"Cell(value={self.value!r}, row_key={self.row_key!r}, family='{self.family}', qualifier={self.qualifier!r}, timestamp_micros={self.timestamp_micros}, labels={self.labels})" + + """For Bigtable native ordering""" + + def __lt__(self, other) -> bool: + """ + Implements `<` operator + """ + if not isinstance(other, Cell): + return NotImplemented + this_ordering = ( + self.family, + self.qualifier, + -self.timestamp_micros, + self.value, + self.labels, + ) + other_ordering = ( + other.family, + other.qualifier, + -other.timestamp_micros, + other.value, + other.labels, + ) + return this_ordering < other_ordering + + def __eq__(self, other) -> bool: + """ + Implements `==` operator + """ + if not isinstance(other, Cell): + return NotImplemented + return ( + self.row_key == other.row_key + and self.family == other.family + and self.qualifier == other.qualifier + and self.value == other.value + and self.timestamp_micros == other.timestamp_micros + and len(self.labels) == len(other.labels) + and all([label in other.labels for label in self.labels]) + ) + + def __ne__(self, other) -> bool: + """ + Implements `!=` operator + """ + return not self == other + + def __hash__(self): + """ + Implements `hash()` function to fingerprint cell + """ + return hash( + ( + self.row_key, + self.family, + self.qualifier, + self.value, + self.timestamp_micros, + tuple(self.labels), + ) + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row_filters.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row_filters.py new file mode 100644 index 000000000000..9f09133d533d --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row_filters.py @@ -0,0 +1,968 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Filters for Google Cloud Bigtable Row classes.""" +from __future__ import annotations + +import struct + +from typing import Any, Sequence, TYPE_CHECKING, overload +from abc import ABC, abstractmethod + +from google.cloud._helpers import _microseconds_from_datetime # type: ignore +from google.cloud._helpers import _to_bytes # type: ignore +from google.cloud.bigtable_v2.types import data as data_v2_pb2 + +if TYPE_CHECKING: + # import dependencies when type checking + from datetime import datetime + +_PACK_I64 = struct.Struct(">q").pack + + +class RowFilter(ABC): + """Basic filter to apply to cells in a row. + + These values can be combined via :class:`RowFilterChain`, + :class:`RowFilterUnion` and :class:`ConditionalRowFilter`. + + .. note:: + + This class is a do-nothing base class for all row filters. + """ + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + return data_v2_pb2.RowFilter(**self._to_dict()) + + @abstractmethod + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + pass + + def __repr__(self) -> str: + return f"{self.__class__.__name__}()" + + +class _BoolFilter(RowFilter, ABC): + """Row filter that uses a boolean flag. + + :type flag: bool + :param flag: An indicator if a setting is turned on or off. + """ + + def __init__(self, flag: bool): + self.flag = flag + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.flag == self.flag + + def __ne__(self, other): + return not self == other + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(flag={self.flag})" + + +class SinkFilter(_BoolFilter): + """Advanced row filter to skip parent filters. + + :type flag: bool + :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter. + Outputs all cells directly to the output of the read rather + than to any parent filter. Cannot be used within the + ``predicate_filter``, ``true_filter``, or ``false_filter`` + of a :class:`ConditionalRowFilter`. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"sink": self.flag} + + +class PassAllFilter(_BoolFilter): + """Row filter equivalent to not filtering at all. + + :type flag: bool + :param flag: Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"pass_all_filter": self.flag} + + +class BlockAllFilter(_BoolFilter): + """Row filter that doesn't match any cells. + + :type flag: bool + :param flag: Does not match any cells, regardless of input. Useful for + temporarily disabling just part of a filter. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"block_all_filter": self.flag} + + +class _RegexFilter(RowFilter, ABC): + """Row filter that uses a regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: bytes or str + :param regex: + A regular expression (RE2) for some row filter. String values + will be encoded as ASCII. + """ + + def __init__(self, regex: str | bytes): + self.regex: bytes = _to_bytes(regex) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.regex == self.regex + + def __ne__(self, other): + return not self == other + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(regex={self.regex!r})" + + +class RowKeyRegexFilter(_RegexFilter): + """Row filter for a row key regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from rows with row + keys that satisfy this regex. For a + ``CheckAndMutateRowRequest``, this filter is unnecessary + since the row key is already specified. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"row_key_regex_filter": self.regex} + + +class RowSampleFilter(RowFilter): + """Matches all cells from a row with probability p. + + :type sample: float + :param sample: The probability of matching a cell (must be in the + interval ``(0, 1)`` The end points are excluded). + """ + + def __init__(self, sample: float): + self.sample: float = sample + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.sample == self.sample + + def __ne__(self, other): + return not self == other + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"row_sample_filter": self.sample} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(sample={self.sample})" + + +class FamilyNameRegexFilter(_RegexFilter): + """Row filter for a family name regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: str + :param regex: A regular expression (RE2) to match cells from columns in a + given column family. For technical reasons, the regex must + not contain the ``':'`` character, even if it is not being + used as a literal. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"family_name_regex_filter": self.regex} + + +class ColumnQualifierRegexFilter(_RegexFilter): + """Row filter for a column qualifier regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from column that + match this regex (irrespective of column family). + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"column_qualifier_regex_filter": self.regex} + + +class TimestampRange(object): + """Range of time with inclusive lower and exclusive upper bounds. + + :type start: :class:`datetime.datetime` + :param start: (Optional) The (inclusive) lower bound of the timestamp + range. If omitted, defaults to Unix epoch. + + :type end: :class:`datetime.datetime` + :param end: (Optional) The (exclusive) upper bound of the timestamp + range. If omitted, no upper bound is used. + """ + + def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): + self.start: "datetime" | None = start + self.end: "datetime" | None = end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.start == self.start and other.end == self.end + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.TimestampRange: + """Converts the :class:`TimestampRange` to a protobuf. + + Returns: The converted current object. + """ + return data_v2_pb2.TimestampRange(**self._to_dict()) + + def _to_dict(self) -> dict[str, int]: + """Converts the timestamp range to a dict representation.""" + timestamp_range_kwargs = {} + if self.start is not None: + start_time = _microseconds_from_datetime(self.start) // 1000 * 1000 + timestamp_range_kwargs["start_timestamp_micros"] = start_time + if self.end is not None: + end_time = _microseconds_from_datetime(self.end) + if end_time % 1000 != 0: + # if not a whole milisecond value, round up + end_time = end_time // 1000 * 1000 + 1000 + timestamp_range_kwargs["end_timestamp_micros"] = end_time + return timestamp_range_kwargs + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start={self.start}, end={self.end})" + + +class TimestampRangeFilter(RowFilter): + """Row filter that limits cells to a range of time. + + :type range_: :class:`TimestampRange` + :param range_: Range of time that cells should match against. + """ + + def __init__(self, start: "datetime" | None = None, end: "datetime" | None = None): + self.range_: TimestampRange = TimestampRange(start, end) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.range_ == self.range_ + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + First converts the ``range_`` on the current object to a protobuf and + then uses it in the ``timestamp_range_filter`` field. + + Returns: The converted current object. + """ + return data_v2_pb2.RowFilter(timestamp_range_filter=self.range_._to_pb()) + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"timestamp_range_filter": self.range_._to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start={self.range_.start!r}, end={self.range_.end!r})" + + +class ColumnRangeFilter(RowFilter): + """A row filter to restrict to a range of columns. + + Both the start and end column can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type family_id: str + :param family_id: The column family that contains the columns. Must + be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type start_qualifier: bytes + :param start_qualifier: The start of the range of columns. If no value is + used, the backend applies no upper bound to the + values. + + :type end_qualifier: bytes + :param end_qualifier: The end of the range of columns. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start column should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_qualifier`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end column should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_qualifier`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError ` if ``inclusive_start`` + is set but no ``start_qualifier`` is given or if ``inclusive_end`` + is set but no ``end_qualifier`` is given + """ + + def __init__( + self, + family_id: str, + start_qualifier: bytes | None = None, + end_qualifier: bytes | None = None, + inclusive_start: bool | None = None, + inclusive_end: bool | None = None, + ): + if inclusive_start is None: + inclusive_start = True + elif start_qualifier is None: + raise ValueError( + "inclusive_start was specified but no start_qualifier was given." + ) + if inclusive_end is None: + inclusive_end = True + elif end_qualifier is None: + raise ValueError( + "inclusive_end was specified but no end_qualifier was given." + ) + + self.family_id = family_id + + self.start_qualifier = start_qualifier + self.inclusive_start = inclusive_start + + self.end_qualifier = end_qualifier + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.family_id == self.family_id + and other.start_qualifier == self.start_qualifier + and other.end_qualifier == self.end_qualifier + and other.inclusive_start == self.inclusive_start + and other.inclusive_end == self.inclusive_end + ) + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it + in the ``column_range_filter`` field. + + Returns: The converted current object. + """ + column_range = data_v2_pb2.ColumnRange(**self._range_to_dict()) + return data_v2_pb2.RowFilter(column_range_filter=column_range) + + def _range_to_dict(self) -> dict[str, str | bytes]: + """Converts the column range range to a dict representation.""" + column_range_kwargs: dict[str, str | bytes] = {} + column_range_kwargs["family_name"] = self.family_id + if self.start_qualifier is not None: + if self.inclusive_start: + key = "start_qualifier_closed" + else: + key = "start_qualifier_open" + column_range_kwargs[key] = _to_bytes(self.start_qualifier) + if self.end_qualifier is not None: + if self.inclusive_end: + key = "end_qualifier_closed" + else: + key = "end_qualifier_open" + column_range_kwargs[key] = _to_bytes(self.end_qualifier) + return column_range_kwargs + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"column_range_filter": self._range_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(family_id='{self.family_id}', start_qualifier={self.start_qualifier!r}, end_qualifier={self.end_qualifier!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" + + +class ValueRegexFilter(_RegexFilter): + """Row filter for a value regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes or str + :param regex: A regular expression (RE2) to match cells with values that + match this regex. String values will be encoded as ASCII. + """ + + def _to_dict(self) -> dict[str, bytes]: + """Converts the row filter to a dict representation.""" + return {"value_regex_filter": self.regex} + + +class LiteralValueFilter(ValueRegexFilter): + """Row filter for an exact value. + + + :type value: bytes or str or int + :param value: + a literal string, integer, or the equivalent bytes. + Integer values will be packed into signed 8-bytes. + """ + + def __init__(self, value: bytes | str | int): + if isinstance(value, int): + value = _PACK_I64(value) + elif isinstance(value, str): + value = value.encode("utf-8") + value = self._write_literal_regex(value) + super(LiteralValueFilter, self).__init__(value) + + @staticmethod + def _write_literal_regex(input_bytes: bytes) -> bytes: + """ + Escape re2 special characters from literal bytes. + + Extracted from: re2 QuoteMeta: + https://github.com/google/re2/blob/70f66454c255080a54a8da806c52d1f618707f8a/re2/re2.cc#L456 + """ + result = bytearray() + for byte in input_bytes: + # If this is the part of a UTF8 or Latin1 character, we need \ + # to copy this byte without escaping. Experimentally this is \ + # what works correctly with the regexp library. \ + utf8_latin1_check = (byte & 128) == 0 + if ( + (byte < ord("a") or byte > ord("z")) + and (byte < ord("A") or byte > ord("Z")) + and (byte < ord("0") or byte > ord("9")) + and byte != ord("_") + and utf8_latin1_check + ): + if byte == 0: + # Special handling for null chars. + # Note that this special handling is not strictly required for RE2, + # but this quoting is required for other regexp libraries such as + # PCRE. + # Can't use "\\0" since the next character might be a digit. + result.extend([ord("\\"), ord("x"), ord("0"), ord("0")]) + continue + result.append(ord(b"\\")) + result.append(byte) + return bytes(result) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(value={self.regex!r})" + + +class ValueRangeFilter(RowFilter): + """A range of values to restrict to in a row filter. + + Will only match cells that have values in this range. + + Both the start and end value can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type start_value: bytes + :param start_value: The start of the range of values. If no value is used, + the backend applies no lower bound to the values. + + :type end_value: bytes + :param end_value: The end of the range of values. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start value should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_value`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end value should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_value`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError ` if ``inclusive_start`` + is set but no ``start_value`` is given or if ``inclusive_end`` + is set but no ``end_value`` is given + """ + + def __init__( + self, + start_value: bytes | int | None = None, + end_value: bytes | int | None = None, + inclusive_start: bool | None = None, + inclusive_end: bool | None = None, + ): + if inclusive_start is None: + inclusive_start = True + elif start_value is None: + raise ValueError( + "inclusive_start was specified but no start_value was given." + ) + if inclusive_end is None: + inclusive_end = True + elif end_value is None: + raise ValueError( + "inclusive_end was specified but no end_qualifier was given." + ) + if isinstance(start_value, int): + start_value = _PACK_I64(start_value) + self.start_value = start_value + self.inclusive_start = inclusive_start + + if isinstance(end_value, int): + end_value = _PACK_I64(end_value) + self.end_value = end_value + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.start_value == self.start_value + and other.end_value == self.end_value + and other.inclusive_start == self.inclusive_start + and other.inclusive_end == self.inclusive_end + ) + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ValueRange` and then uses + it to create a row filter protobuf. + + Returns: The converted current object. + """ + value_range = data_v2_pb2.ValueRange(**self._range_to_dict()) + return data_v2_pb2.RowFilter(value_range_filter=value_range) + + def _range_to_dict(self) -> dict[str, bytes]: + """Converts the value range range to a dict representation.""" + value_range_kwargs = {} + if self.start_value is not None: + if self.inclusive_start: + key = "start_value_closed" + else: + key = "start_value_open" + value_range_kwargs[key] = _to_bytes(self.start_value) + if self.end_value is not None: + if self.inclusive_end: + key = "end_value_closed" + else: + key = "end_value_open" + value_range_kwargs[key] = _to_bytes(self.end_value) + return value_range_kwargs + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"value_range_filter": self._range_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(start_value={self.start_value!r}, end_value={self.end_value!r}, inclusive_start={self.inclusive_start}, inclusive_end={self.inclusive_end})" + + +class _CellCountFilter(RowFilter, ABC): + """Row filter that uses an integer count of cells. + + The cell count is used as an offset or a limit for the number + of results returned. + + :type num_cells: int + :param num_cells: An integer count / offset / limit. + """ + + def __init__(self, num_cells: int): + self.num_cells = num_cells + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.num_cells == self.num_cells + + def __ne__(self, other): + return not self == other + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(num_cells={self.num_cells})" + + +class CellsRowOffsetFilter(_CellCountFilter): + """Row filter to skip cells in a row. + + :type num_cells: int + :param num_cells: Skips the first N cells of the row. + """ + + def _to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_row_offset_filter": self.num_cells} + + +class CellsRowLimitFilter(_CellCountFilter): + """Row filter to limit cells in a row. + + :type num_cells: int + :param num_cells: Matches only the first N cells of the row. + """ + + def _to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_row_limit_filter": self.num_cells} + + +class CellsColumnLimitFilter(_CellCountFilter): + """Row filter to limit cells in a column. + + :type num_cells: int + :param num_cells: Matches only the most recent N cells within each column. + This filters a (family name, column) pair, based on + timestamps of each cell. + """ + + def _to_dict(self) -> dict[str, int]: + """Converts the row filter to a dict representation.""" + return {"cells_per_column_limit_filter": self.num_cells} + + +class StripValueTransformerFilter(_BoolFilter): + """Row filter that transforms cells into empty string (0 bytes). + + :type flag: bool + :param flag: If :data:`True`, replaces each cell's value with the empty + string. As the name indicates, this is more useful as a + transformer than a generic query / filter. + """ + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"strip_value_transformer": self.flag} + + +class ApplyLabelFilter(RowFilter): + """Filter to apply labels to cells. + + Intended to be used as an intermediate filter on a pre-existing filtered + result set. This way if two sets are combined, the label can tell where + the cell(s) originated.This allows the client to determine which results + were produced from which part of the filter. + + .. note:: + + Due to a technical limitation of the backend, it is not currently + possible to apply multiple labels to a cell. + + :type label: str + :param label: Label to apply to cells in the output row. Values must be + at most 15 characters long, and match the pattern + ``[a-z0-9\\-]+``. + """ + + def __init__(self, label: str): + self.label = label + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.label == self.label + + def __ne__(self, other): + return not self == other + + def _to_dict(self) -> dict[str, str]: + """Converts the row filter to a dict representation.""" + return {"apply_label_transformer": self.label} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(label={self.label})" + + +class _FilterCombination(RowFilter, Sequence[RowFilter], ABC): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def __init__(self, filters: list[RowFilter] | None = None): + if filters is None: + filters = [] + self.filters: list[RowFilter] = filters + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return other.filters == self.filters + + def __ne__(self, other): + return not self == other + + def __len__(self) -> int: + return len(self.filters) + + @overload + def __getitem__(self, index: int) -> RowFilter: + # overload signature for type checking + pass + + @overload + def __getitem__(self, index: slice) -> list[RowFilter]: + # overload signature for type checking + pass + + def __getitem__(self, index): + return self.filters[index] + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(filters={self.filters})" + + def __str__(self) -> str: + """ + Returns a string representation of the filter chain. + + Adds line breaks between each sub-filter for readability. + """ + output = [f"{self.__class__.__name__}(["] + for filter_ in self.filters: + filter_lines = f"{filter_},".splitlines() + output.extend([f" {line}" for line in filter_lines]) + output.append("])") + return "\n".join(output) + + +class RowFilterChain(_FilterCombination): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + chain = data_v2_pb2.RowFilter.Chain( + filters=[row_filter._to_pb() for row_filter in self.filters] + ) + return data_v2_pb2.RowFilter(chain=chain) + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"chain": {"filters": [f._to_dict() for f in self.filters]}} + + +class RowFilterUnion(_FilterCombination): + """Union of row filters. + + Sends rows through several filters simultaneously, then + merges / interleaves all the filtered results together. + + If multiple cells are produced with the same column and timestamp, + they will all appear in the output row in an unspecified mutual order. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + interleave = data_v2_pb2.RowFilter.Interleave( + filters=[row_filter._to_pb() for row_filter in self.filters] + ) + return data_v2_pb2.RowFilter(interleave=interleave) + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"interleave": {"filters": [f._to_dict() for f in self.filters]}} + + +class ConditionalRowFilter(RowFilter): + """Conditional row filter which exhibits ternary behavior. + + Executes one of two filters based on another filter. If the ``predicate_filter`` + returns any cells in the row, then ``true_filter`` is executed. If not, + then ``false_filter`` is executed. + + .. note:: + + The ``predicate_filter`` does not execute atomically with the true and false + filters, which may lead to inconsistent or unexpected results. + + Additionally, executing a :class:`ConditionalRowFilter` has poor + performance on the server, especially when ``false_filter`` is set. + + :type predicate_filter: :class:`RowFilter` + :param predicate_filter: The filter to condition on before executing the + true/false filters. + + :type true_filter: :class:`RowFilter` + :param true_filter: (Optional) The filter to execute if there are any cells + matching ``predicate_filter``. If not provided, no results + will be returned in the true case. + + :type false_filter: :class:`RowFilter` + :param false_filter: (Optional) The filter to execute if there are no cells + matching ``predicate_filter``. If not provided, no results + will be returned in the false case. + """ + + def __init__( + self, + predicate_filter: RowFilter, + true_filter: RowFilter | None = None, + false_filter: RowFilter | None = None, + ): + self.predicate_filter = predicate_filter + self.true_filter = true_filter + self.false_filter = false_filter + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return ( + other.predicate_filter == self.predicate_filter + and other.true_filter == self.true_filter + and other.false_filter == self.false_filter + ) + + def __ne__(self, other): + return not self == other + + def _to_pb(self) -> data_v2_pb2.RowFilter: + """Converts the row filter to a protobuf. + + Returns: The converted current object. + """ + condition_kwargs = {"predicate_filter": self.predicate_filter._to_pb()} + if self.true_filter is not None: + condition_kwargs["true_filter"] = self.true_filter._to_pb() + if self.false_filter is not None: + condition_kwargs["false_filter"] = self.false_filter._to_pb() + condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) + return data_v2_pb2.RowFilter(condition=condition) + + def _condition_to_dict(self) -> dict[str, Any]: + """Converts the condition to a dict representation.""" + condition_kwargs = {"predicate_filter": self.predicate_filter._to_dict()} + if self.true_filter is not None: + condition_kwargs["true_filter"] = self.true_filter._to_dict() + if self.false_filter is not None: + condition_kwargs["false_filter"] = self.false_filter._to_dict() + return condition_kwargs + + def _to_dict(self) -> dict[str, Any]: + """Converts the row filter to a dict representation.""" + return {"condition": self._condition_to_dict()} + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(predicate_filter={self.predicate_filter!r}, true_filter={self.true_filter!r}, false_filter={self.false_filter!r})" + + def __str__(self) -> str: + output = [f"{self.__class__.__name__}("] + for filter_type in ("predicate_filter", "true_filter", "false_filter"): + filter_ = getattr(self, filter_type) + if filter_ is None: + continue + # add the new filter set, adding indentations for readability + filter_lines = f"{filter_type}={filter_},".splitlines() + output.extend(f" {line}" for line in filter_lines) + output.append(")") + return "\n".join(output) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed deleted file mode 100644 index 7bd4705d4d9f..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable package uses inline types. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 33686a4a8618..df5d7e0de495 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import functools from collections import OrderedDict import functools import re @@ -40,9 +41,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data @@ -272,7 +273,8 @@ def read_rows( "the individual field arguments should be set." ) - request = bigtable.ReadRowsRequest(request) + if not isinstance(request, bigtable.ReadRowsRequest): + request = bigtable.ReadRowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -283,12 +285,9 @@ def read_rows( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_rows, - default_timeout=43200.0, - client_info=DEFAULT_CLIENT_INFO, - ) - + rpc = self._client._transport._wrapped_methods[ + self._client._transport.read_rows + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( @@ -367,7 +366,8 @@ def sample_row_keys( "the individual field arguments should be set." ) - request = bigtable.SampleRowKeysRequest(request) + if not isinstance(request, bigtable.SampleRowKeysRequest): + request = bigtable.SampleRowKeysRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -378,12 +378,9 @@ def sample_row_keys( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.sample_row_keys, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - + rpc = self._client._transport._wrapped_methods[ + self._client._transport.sample_row_keys + ] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( @@ -479,7 +476,8 @@ async def mutate_row( "the individual field arguments should be set." ) - request = bigtable.MutateRowRequest(request) + if not isinstance(request, bigtable.MutateRowRequest): + request = bigtable.MutateRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -494,21 +492,9 @@ async def mutate_row( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_row, - default_retry=retries.AsyncRetry( - initial=0.01, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.mutate_row + ] # Certain fields should be provided within the metadata header; # add these here. @@ -601,7 +587,8 @@ def mutate_rows( "the individual field arguments should be set." ) - request = bigtable.MutateRowsRequest(request) + if not isinstance(request, bigtable.MutateRowsRequest): + request = bigtable.MutateRowsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -614,11 +601,9 @@ def mutate_rows( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_rows, - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.mutate_rows + ] # Certain fields should be provided within the metadata header; # add these here. @@ -749,7 +734,8 @@ async def check_and_mutate_row( "the individual field arguments should be set." ) - request = bigtable.CheckAndMutateRowRequest(request) + if not isinstance(request, bigtable.CheckAndMutateRowRequest): + request = bigtable.CheckAndMutateRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -768,11 +754,9 @@ async def check_and_mutate_row( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_and_mutate_row, - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.check_and_mutate_row + ] # Certain fields should be provided within the metadata header; # add these here. @@ -851,7 +835,8 @@ async def ping_and_warm( "the individual field arguments should be set." ) - request = bigtable.PingAndWarmRequest(request) + if not isinstance(request, bigtable.PingAndWarmRequest): + request = bigtable.PingAndWarmRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -862,11 +847,9 @@ async def ping_and_warm( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.ping_and_warm, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.ping_and_warm + ] # Certain fields should be provided within the metadata header; # add these here. @@ -968,7 +951,8 @@ async def read_modify_write_row( "the individual field arguments should be set." ) - request = bigtable.ReadModifyWriteRowRequest(request) + if not isinstance(request, bigtable.ReadModifyWriteRowRequest): + request = bigtable.ReadModifyWriteRowRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -983,11 +967,9 @@ async def read_modify_write_row( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_modify_write_row, - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.read_modify_write_row + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1076,7 +1058,10 @@ def generate_initial_change_stream_partitions( "the individual field arguments should be set." ) - request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) + if not isinstance( + request, bigtable.GenerateInitialChangeStreamPartitionsRequest + ): + request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1174,7 +1159,8 @@ def read_change_stream( "the individual field arguments should be set." ) - request = bigtable.ReadChangeStreamRequest(request) + if not isinstance(request, bigtable.ReadChangeStreamRequest): + request = bigtable.ReadChangeStreamRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index db393faa7f94..54ba6af4353b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -43,9 +43,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data @@ -53,6 +53,7 @@ from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableGrpcTransport from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport +from .transports.pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport from .transports.rest import BigtableRestTransport @@ -67,6 +68,7 @@ class BigtableClientMeta(type): _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] _transport_registry["grpc"] = BigtableGrpcTransport _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport + _transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport _transport_registry["rest"] = BigtableRestTransport def get_transport_class( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index c09443bc27e0..6a9eb0e5888b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -19,6 +19,7 @@ from .base import BigtableTransport from .grpc import BigtableGrpcTransport from .grpc_asyncio import BigtableGrpcAsyncIOTransport +from .pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport from .rest import BigtableRestTransport from .rest import BigtableRestInterceptor @@ -27,12 +28,14 @@ _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] _transport_registry["grpc"] = BigtableGrpcTransport _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport +_transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport _transport_registry["rest"] = BigtableRestTransport __all__ = ( "BigtableTransport", "BigtableGrpcTransport", "BigtableGrpcAsyncIOTransport", + "PooledBigtableGrpcAsyncIOTransport", "BigtableRestTransport", "BigtableRestInterceptor", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 2c0cbdad64c0..1d0a2bc4cf3f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -18,6 +18,8 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore @@ -512,6 +514,66 @@ def read_change_stream( ) return self._stubs["read_change_stream"] + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_rows: gapic_v1.method_async.wrap_method( + self.read_rows, + default_timeout=43200.0, + client_info=client_info, + ), + self.sample_row_keys: gapic_v1.method_async.wrap_method( + self.sample_row_keys, + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_row: gapic_v1.method_async.wrap_method( + self.mutate_row, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_rows: gapic_v1.method_async.wrap_method( + self.mutate_rows, + default_timeout=600.0, + client_info=client_info, + ), + self.check_and_mutate_row: gapic_v1.method_async.wrap_method( + self.check_and_mutate_row, + default_timeout=20.0, + client_info=client_info, + ), + self.ping_and_warm: gapic_v1.method_async.wrap_method( + self.ping_and_warm, + default_timeout=None, + client_info=client_info, + ), + self.read_modify_write_row: gapic_v1.method_async.wrap_method( + self.read_modify_write_row, + default_timeout=20.0, + client_info=client_info, + ), + self.generate_initial_change_stream_partitions: gapic_v1.method_async.wrap_method( + self.generate_initial_change_stream_partitions, + default_timeout=60.0, + client_info=client_info, + ), + self.read_change_stream: gapic_v1.method_async.wrap_method( + self.read_change_stream, + default_timeout=43200.0, + client_info=client_info, + ), + } + def close(self): return self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py new file mode 100644 index 000000000000..372e5796d6bc --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py @@ -0,0 +1,426 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import asyncio +import warnings +from functools import partialmethod +from functools import partial +from typing import ( + Awaitable, + Callable, + Dict, + Optional, + Sequence, + Tuple, + Union, + List, + Type, +) + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from .base import BigtableTransport, DEFAULT_CLIENT_INFO +from .grpc_asyncio import BigtableGrpcAsyncIOTransport + + +class PooledMultiCallable: + def __init__(self, channel_pool: "PooledChannel", *args, **kwargs): + self._init_args = args + self._init_kwargs = kwargs + self.next_channel_fn = channel_pool.next_channel + + +class PooledUnaryUnaryMultiCallable(PooledMultiCallable, aio.UnaryUnaryMultiCallable): + def __call__(self, *args, **kwargs) -> aio.UnaryUnaryCall: + return self.next_channel_fn().unary_unary( + *self._init_args, **self._init_kwargs + )(*args, **kwargs) + + +class PooledUnaryStreamMultiCallable(PooledMultiCallable, aio.UnaryStreamMultiCallable): + def __call__(self, *args, **kwargs) -> aio.UnaryStreamCall: + return self.next_channel_fn().unary_stream( + *self._init_args, **self._init_kwargs + )(*args, **kwargs) + + +class PooledStreamUnaryMultiCallable(PooledMultiCallable, aio.StreamUnaryMultiCallable): + def __call__(self, *args, **kwargs) -> aio.StreamUnaryCall: + return self.next_channel_fn().stream_unary( + *self._init_args, **self._init_kwargs + )(*args, **kwargs) + + +class PooledStreamStreamMultiCallable( + PooledMultiCallable, aio.StreamStreamMultiCallable +): + def __call__(self, *args, **kwargs) -> aio.StreamStreamCall: + return self.next_channel_fn().stream_stream( + *self._init_args, **self._init_kwargs + )(*args, **kwargs) + + +class PooledChannel(aio.Channel): + def __init__( + self, + pool_size: int = 3, + host: str = "bigtable.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + quota_project_id: Optional[str] = None, + default_scopes: Optional[Sequence[str]] = None, + scopes: Optional[Sequence[str]] = None, + default_host: Optional[str] = None, + insecure: bool = False, + **kwargs, + ): + self._pool: List[aio.Channel] = [] + self._next_idx = 0 + if insecure: + self._create_channel = partial(aio.insecure_channel, host) + else: + self._create_channel = partial( + grpc_helpers_async.create_channel, + target=host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=default_scopes, + scopes=scopes, + default_host=default_host, + **kwargs, + ) + for i in range(pool_size): + self._pool.append(self._create_channel()) + + def next_channel(self) -> aio.Channel: + channel = self._pool[self._next_idx] + self._next_idx = (self._next_idx + 1) % len(self._pool) + return channel + + def unary_unary(self, *args, **kwargs) -> grpc.aio.UnaryUnaryMultiCallable: + return PooledUnaryUnaryMultiCallable(self, *args, **kwargs) + + def unary_stream(self, *args, **kwargs) -> grpc.aio.UnaryStreamMultiCallable: + return PooledUnaryStreamMultiCallable(self, *args, **kwargs) + + def stream_unary(self, *args, **kwargs) -> grpc.aio.StreamUnaryMultiCallable: + return PooledStreamUnaryMultiCallable(self, *args, **kwargs) + + def stream_stream(self, *args, **kwargs) -> grpc.aio.StreamStreamMultiCallable: + return PooledStreamStreamMultiCallable(self, *args, **kwargs) + + async def close(self, grace=None): + close_fns = [channel.close(grace=grace) for channel in self._pool] + return await asyncio.gather(*close_fns) + + async def channel_ready(self): + ready_fns = [channel.channel_ready() for channel in self._pool] + return asyncio.gather(*ready_fns) + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + def get_state(self, try_to_connect: bool = False) -> grpc.ChannelConnectivity: + raise NotImplementedError() + + async def wait_for_state_change(self, last_observed_state): + raise NotImplementedError() + + async def replace_channel( + self, channel_idx, grace=None, swap_sleep=1, new_channel=None + ) -> aio.Channel: + """ + Replaces a channel in the pool with a fresh one. + + The `new_channel` will start processing new requests immidiately, + but the old channel will continue serving existing clients for `grace` seconds + + Args: + channel_idx(int): the channel index in the pool to replace + grace(Optional[float]): The time to wait until all active RPCs are + finished. If a grace period is not specified (by passing None for + grace), all existing RPCs are cancelled immediately. + swap_sleep(Optional[float]): The number of seconds to sleep in between + replacing channels and closing the old one + new_channel(grpc.aio.Channel): a new channel to insert into the pool + at `channel_idx`. If `None`, a new channel will be created. + """ + if channel_idx >= len(self._pool) or channel_idx < 0: + raise ValueError( + f"invalid channel_idx {channel_idx} for pool size {len(self._pool)}" + ) + if new_channel is None: + new_channel = self._create_channel() + old_channel = self._pool[channel_idx] + self._pool[channel_idx] = new_channel + await asyncio.sleep(swap_sleep) + await old_channel.close(grace=grace) + return new_channel + + +class PooledBigtableGrpcAsyncIOTransport(BigtableGrpcAsyncIOTransport): + """Pooled gRPC AsyncIO backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + + This class allows channel pooling, so multiple channels can be used concurrently + when making requests. Channels are rotated in a round-robin fashion. + """ + + @classmethod + def with_fixed_size(cls, pool_size) -> Type["PooledBigtableGrpcAsyncIOTransport"]: + """ + Creates a new class with a fixed channel pool size. + + A fixed channel pool makes compatibility with other transports easier, + as the initializer signature is the same. + """ + + class PooledTransportFixed(cls): + __init__ = partialmethod(cls.__init__, pool_size=pool_size) + + PooledTransportFixed.__name__ = f"{cls.__name__}_{pool_size}" + PooledTransportFixed.__qualname__ = PooledTransportFixed.__name__ + return PooledTransportFixed + + @classmethod + def create_channel( + cls, + pool_size: int = 3, + host: str = "bigtable.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a PooledChannel object, representing a pool of gRPC AsyncIO channels + Args: + pool_size (int): The number of channels in the pool. + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + PooledChannel: a channel pool object + """ + + return PooledChannel( + pool_size, + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + pool_size: int = 3, + host: str = "bigtable.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + pool_size (int): the number of grpc channels to maintain in a pool + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + ValueError: if ``pool_size`` <= 0 + """ + if pool_size <= 0: + raise ValueError(f"invalid pool_size: {pool_size}") + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + BigtableTransport.__init__( + self, + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._quota_project_id = quota_project_id + self._grpc_channel = type(self).create_channel( + pool_size, + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=self._quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def pool_size(self) -> int: + """The number of grpc channels in the pool.""" + return len(self._grpc_channel._pool) + + @property + def channels(self) -> List[grpc.Channel]: + """Acccess the internal list of grpc channels.""" + return self._grpc_channel._pool + + async def replace_channel( + self, channel_idx, grace=None, swap_sleep=1, new_channel=None + ) -> aio.Channel: + """ + Replaces a channel in the pool with a fresh one. + + The `new_channel` will start processing new requests immidiately, + but the old channel will continue serving existing clients for `grace` seconds + + Args: + channel_idx(int): the channel index in the pool to replace + grace(Optional[float]): The time to wait until all active RPCs are + finished. If a grace period is not specified (by passing None for + grace), all existing RPCs are cancelled immediately. + swap_sleep(Optional[float]): The number of seconds to sleep in between + replacing channels and closing the old one + new_channel(grpc.aio.Channel): a new channel to insert into the pool + at `channel_idx`. If `None`, a new channel will be created. + """ + return await self._grpc_channel.replace_channel( + channel_idx, grace, swap_sleep, new_channel + ) + + +__all__ = ("PooledBigtableGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 8550a2b795ff..daf730a9a77f 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -54,7 +54,9 @@ "pytest", "google-cloud-testutils", ] -SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [ + "pytest-asyncio", +] SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] SYSTEM_TEST_DEPENDENCIES: List[str] = [] SYSTEM_TEST_EXTRAS: List[str] = [] @@ -134,8 +136,18 @@ def mypy(session): "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests" ) session.install("google-cloud-testutils") - # TODO: also verify types on tests, all of google package - session.run("mypy", "-p", "google", "-p", "tests") + session.run( + "mypy", + "-p", + "google.cloud.bigtable.data", + "--check-untyped-defs", + "--warn-unreachable", + "--disallow-any-generics", + "--exclude", + "tests/system/v2_client", + "--exclude", + "tests/unit/v2_client", + ) @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -260,6 +272,24 @@ def system_emulated(session): os.killpg(os.getpgid(p.pid), signal.SIGKILL) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def conformance(session): + TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" + CLONE_REPO_DIR = "cloud-bigtable-clients-test" + # install dependencies + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + with session.chdir("test_proxy"): + # download the conformance test suite + clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) + if not os.path.exists(clone_dir): + print("downloading copy of test repo") + session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR, external=True) + session.run("bash", "-e", "run_tests.sh", external=True) + + @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" @@ -311,7 +341,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") + session.run("coverage", "report", "--show-missing", "--fail-under=99") session.run("coverage", "erase") diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 4b06aea7766d..3fb079396a0c 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -89,7 +89,10 @@ def get_staging_dirs( samples=True, # set to True only if there are samples split_system_tests=True, microgenerator=True, - cov_level=100, + cov_level=99, + system_test_external_dependencies=[ + "pytest-asyncio", + ], ) s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml"]) @@ -142,7 +145,35 @@ def system_emulated(session): escape="()" ) -# add system_emulated nox session +conformance_session = """ +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def conformance(session): + TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" + CLONE_REPO_DIR = "cloud-bigtable-clients-test" + # install dependencies + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + with session.chdir("test_proxy"): + # download the conformance test suite + clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) + if not os.path.exists(clone_dir): + print("downloading copy of test repo") + session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR, external=True) + session.run("bash", "-e", "run_tests.sh", external=True) + +""" + +place_before( + "noxfile.py", + "@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n" + "def system(session):", + conformance_session, + escape="()" +) + +# add system_emulated and mypy and conformance to nox session s.replace("noxfile.py", """nox.options.sessions = \[ "unit", @@ -168,8 +199,18 @@ def mypy(session): session.install("-e", ".") session.install("mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests") session.install("google-cloud-testutils") - # TODO: also verify types on tests, all of google package - session.run("mypy", "-p", "google", "-p", "tests") + session.run( + "mypy", + "-p", + "google.cloud.bigtable.data", + "--check-untyped-defs", + "--warn-unreachable", + "--disallow-any-generics", + "--exclude", + "tests/system/v2_client", + "--exclude", + "tests/unit/v2_client", + ) @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index e9bce0960720..8b698a35b26a 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -37,7 +37,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "google-api-core[grpc] >= 2.16.0, <3.0.0dev", "google-cloud-core >= 1.4.4, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", diff --git a/packages/google-cloud-bigtable/test_proxy/README.md b/packages/google-cloud-bigtable/test_proxy/README.md new file mode 100644 index 000000000000..08741fd5d673 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/README.md @@ -0,0 +1,60 @@ +# CBT Python Test Proxy + +The CBT test proxy is intended for running conformance tests for Cloud Bigtable Python Client. + +## Option 1: Run Tests with Nox + +You can run the conformance tests in a single line by calling `nox -s conformance` from the repo root + + +``` +cd python-bigtable/test_proxy +nox -s conformance +``` + +## Option 2: Run processes manually + +### Start test proxy + +You can use `test_proxy.py` to launch a new test proxy process directly + +``` +cd python-bigtable/test_proxy +python test_proxy.py +``` + +The port can be set by passing in an extra positional argument + +``` +cd python-bigtable/test_proxy +python test_proxy.py --port 8080 +``` + +You can run the test proxy against the previous `v2` client by running it with the `--legacy-client` flag: + +``` +python test_proxy.py --legacy-client +``` + +### Run the test cases + +Prerequisites: +- If you have not already done so, [install golang](https://go.dev/doc/install). +- Before running tests, [launch an instance of the test proxy](#start-test-proxy) +in a separate shell session, and make note of the port + + +Clone and navigate to the go test library: + +``` +git clone https://github.com/googleapis/cloud-bigtable-clients-test.git +cd cloud-bigtable-clients-test/tests +``` + + +Launch the tests + +``` +go test -v -proxy_addr=:50055 +``` + diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data.py new file mode 100644 index 000000000000..43ff5d634901 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data.py @@ -0,0 +1,214 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains the client handler process for proxy_server.py. +""" +import os + +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.data import BigtableDataClientAsync + + +def error_safe(func): + """ + Catch and pass errors back to the grpc_server_process + Also check if client is closed before processing requests + """ + async def wrapper(self, *args, **kwargs): + try: + if self.closed: + raise RuntimeError("client is closed") + return await func(self, *args, **kwargs) + except (Exception, NotImplementedError) as e: + # exceptions should be raised in grpc_server_process + return encode_exception(e) + + return wrapper + + +def encode_exception(exc): + """ + Encode an exception or chain of exceptions to pass back to grpc_handler + """ + from google.api_core.exceptions import GoogleAPICallError + error_msg = f"{type(exc).__name__}: {exc}" + result = {"error": error_msg} + if exc.__cause__: + result["cause"] = encode_exception(exc.__cause__) + if hasattr(exc, "exceptions"): + result["subexceptions"] = [encode_exception(e) for e in exc.exceptions] + if hasattr(exc, "index"): + result["index"] = exc.index + if isinstance(exc, GoogleAPICallError): + if exc.grpc_status_code is not None: + result["code"] = exc.grpc_status_code.value[0] + elif exc.code is not None: + result["code"] = int(exc.code) + else: + result["code"] = -1 + elif result.get("cause", {}).get("code", None): + # look for code code in cause + result["code"] = result["cause"]["code"] + elif result.get("subexceptions", None): + # look for code in subexceptions + for subexc in result["subexceptions"]: + if subexc.get("code", None): + result["code"] = subexc["code"] + return result + + +class TestProxyClientHandler: + """ + Implements the same methods as the grpc server, but handles the client + library side of the request. + + Requests received in TestProxyGrpcServer are converted to a dictionary, + and supplied to the TestProxyClientHandler methods as kwargs. + The client response is then returned back to the TestProxyGrpcServer + """ + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs, + ): + self.closed = False + # use emulator + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = BigtableDataClientAsync(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + def close(self): + # TODO: call self.client.close() + self.closed = True + + @error_safe + async def ReadRows(self, request, **kwargs): + table_id = request.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + result_list = await table.read_rows(request, **kwargs) + # pack results back into protobuf-parsable format + serialized_response = [row._to_dict() for row in result_list] + return serialized_response + + @error_safe + async def ReadRow(self, row_key, **kwargs): + table_id = kwargs.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + result_row = await table.read_row(row_key, **kwargs) + # pack results back into protobuf-parsable format + if result_row: + return result_row._to_dict() + else: + return "None" + + @error_safe + async def MutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + row_key = request["row_key"] + mutations = [Mutation._from_dict(d) for d in request["mutations"]] + await table.mutate_row(row_key, mutations, **kwargs) + return "OK" + + @error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import RowMutationEntry + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + entry_list = [RowMutationEntry._from_dict(entry) for entry in request["entries"]] + await table.bulk_mutate_rows(entry_list, **kwargs) + return "OK" + + @error_safe + async def CheckAndMutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation, SetCell + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + row_key = request["row_key"] + # add default values for incomplete dicts, so they can still be parsed to objects + true_mutations = [] + for mut_dict in request.get("true_mutations", []): + try: + true_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + # invalid mutation type. Conformance test may be sending generic empty request + mutation = SetCell("", "", "", 0) + true_mutations.append(mutation) + false_mutations = [] + for mut_dict in request.get("false_mutations", []): + try: + false_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + # invalid mutation type. Conformance test may be sending generic empty request + false_mutations.append(SetCell("", "", "", 0)) + predicate_filter = request.get("predicate_filter", None) + result = await table.check_and_mutate_row( + row_key, + predicate_filter, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + **kwargs, + ) + return result + + @error_safe + async def ReadModifyWriteRow(self, request, **kwargs): + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + row_key = request["row_key"] + rules = [] + for rule_dict in request.get("rules", []): + qualifier = rule_dict["column_qualifier"] + if "append_value" in rule_dict: + new_rule = AppendValueRule(rule_dict["family_name"], qualifier, rule_dict["append_value"]) + else: + new_rule = IncrementRule(rule_dict["family_name"], qualifier, rule_dict["increment_amount"]) + rules.append(new_rule) + result = await table.read_modify_write_row(row_key, rules, **kwargs) + # pack results back into protobuf-parsable format + if result: + return result._to_dict() + else: + return "None" + + @error_safe + async def SampleRowKeys(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + result = await table.sample_row_keys(**kwargs) + return result diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py new file mode 100644 index 000000000000..400f618b514a --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py @@ -0,0 +1,235 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains the client handler process for proxy_server.py. +""" +import os + +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.client import Client + +import client_handler_data as client_handler + +import warnings +warnings.filterwarnings("ignore", category=DeprecationWarning) + + +class LegacyTestProxyClientHandler(client_handler.TestProxyClientHandler): + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs, + ): + self.closed = False + # use emulator + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = Client(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + def close(self): + self.closed = True + + @client_handler.error_safe + async def ReadRows(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + # app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + + limit = request.get("rows_limit", None) + start_key = request.get("rows", {}).get("row_keys", [None])[0] + end_key = request.get("rows", {}).get("row_keys", [None])[-1] + end_inclusive = request.get("rows", {}).get("row_ranges", [{}])[-1].get("end_key_closed", True) + + row_list = [] + for row in table.read_rows(start_key=start_key, end_key=end_key, limit=limit, end_inclusive=end_inclusive): + # parse results into proto formatted dict + dict_val = {"row_key": row.row_key} + for family, family_cells in row.cells.items(): + family_dict = {"name": family} + for qualifier, qualifier_cells in family_cells.items(): + column_dict = {"qualifier": qualifier} + for cell in qualifier_cells: + cell_dict = { + "value": cell.value, + "timestamp_micros": cell.timestamp.timestamp() * 1000000, + "labels": cell.labels, + } + column_dict.setdefault("cells", []).append(cell_dict) + family_dict.setdefault("columns", []).append(column_dict) + dict_val.setdefault("families", []).append(family_dict) + row_list.append(dict_val) + return row_list + + @client_handler.error_safe + async def ReadRow(self, row_key, **kwargs): + table_id = kwargs["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + + row = table.read_row(row_key) + # parse results into proto formatted dict + dict_val = {"row_key": row.row_key} + for family, family_cells in row.cells.items(): + family_dict = {"name": family} + for qualifier, qualifier_cells in family_cells.items(): + column_dict = {"qualifier": qualifier} + for cell in qualifier_cells: + cell_dict = { + "value": cell.value, + "timestamp_micros": cell.timestamp.timestamp() * 1000000, + "labels": cell.labels, + } + column_dict.setdefault("cells", []).append(cell_dict) + family_dict.setdefault("columns", []).append(column_dict) + dict_val.setdefault("families", []).append(family_dict) + return dict_val + + @client_handler.error_safe + async def MutateRow(self, request, **kwargs): + from datetime import datetime + from google.cloud.bigtable.row import DirectRow + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + row_key = request["row_key"] + new_row = DirectRow(row_key, table) + for m_dict in request.get("mutations", []): + details = m_dict.get("set_cell") or m_dict.get("delete_from_column") or m_dict.get("delete_from_family") or m_dict.get("delete_from_row") + timestamp = datetime.fromtimestamp(details.get("timestamp_micros")) if details.get("timestamp_micros") else None + if m_dict.get("set_cell"): + new_row.set_cell(details["family_name"], details["column_qualifier"], details["value"], timestamp=timestamp) + elif m_dict.get("delete_from_column"): + new_row.delete_cell(details["family_name"], details["column_qualifier"], timestamp=timestamp) + elif m_dict.get("delete_from_family"): + new_row.delete_cells(details["family_name"], timestamp=timestamp) + elif m_dict.get("delete_from_row"): + new_row.delete() + table.mutate_rows([new_row]) + return "OK" + + @client_handler.error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.row import DirectRow + from datetime import datetime + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + rows = [] + for entry in request.get("entries", []): + row_key = entry["row_key"] + new_row = DirectRow(row_key, table) + for m_dict in entry.get("mutations"): + details = m_dict.get("set_cell") or m_dict.get("delete_from_column") or m_dict.get("delete_from_family") or m_dict.get("delete_from_row") + timestamp = datetime.fromtimestamp(details.get("timestamp_micros")) if details.get("timestamp_micros") else None + if m_dict.get("set_cell"): + new_row.set_cell(details["family_name"], details["column_qualifier"], details["value"], timestamp=timestamp) + elif m_dict.get("delete_from_column"): + new_row.delete_cell(details["family_name"], details["column_qualifier"], timestamp=timestamp) + elif m_dict.get("delete_from_family"): + new_row.delete_cells(details["family_name"], timestamp=timestamp) + elif m_dict.get("delete_from_row"): + new_row.delete() + rows.append(new_row) + table.mutate_rows(rows) + return "OK" + + @client_handler.error_safe + async def CheckAndMutateRow(self, request, **kwargs): + from google.cloud.bigtable.row import ConditionalRow + from google.cloud.bigtable.row_filters import PassAllFilter + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + + predicate_filter = request.get("predicate_filter", PassAllFilter(True)) + new_row = ConditionalRow(request["row_key"], table, predicate_filter) + + combined_mutations = [{"state": True, **m} for m in request.get("true_mutations", [])] + combined_mutations.extend([{"state": False, **m} for m in request.get("false_mutations", [])]) + for mut_dict in combined_mutations: + if "set_cell" in mut_dict: + details = mut_dict["set_cell"] + new_row.set_cell( + details.get("family_name", ""), + details.get("column_qualifier", ""), + details.get("value", ""), + timestamp=details.get("timestamp_micros", None), + state=mut_dict["state"], + ) + elif "delete_from_column" in mut_dict: + details = mut_dict["delete_from_column"] + new_row.delete_cell( + details.get("family_name", ""), + details.get("column_qualifier", ""), + timestamp=details.get("timestamp_micros", None), + state=mut_dict["state"], + ) + elif "delete_from_family" in mut_dict: + details = mut_dict["delete_from_family"] + new_row.delete_cells( + details.get("family_name", ""), + timestamp=details.get("timestamp_micros", None), + state=mut_dict["state"], + ) + elif "delete_from_row" in mut_dict: + new_row.delete(state=mut_dict["state"]) + else: + raise RuntimeError(f"Unknown mutation type: {mut_dict}") + return new_row.commit() + + @client_handler.error_safe + async def ReadModifyWriteRow(self, request, **kwargs): + from google.cloud.bigtable.row import AppendRow + from google.cloud._helpers import _microseconds_from_datetime + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + row_key = request["row_key"] + new_row = AppendRow(row_key, table) + for rule_dict in request.get("rules", []): + qualifier = rule_dict["column_qualifier"] + family = rule_dict["family_name"] + if "append_value" in rule_dict: + new_row.append_cell_value(family, qualifier, rule_dict["append_value"]) + else: + new_row.increment_cell_value(family, qualifier, rule_dict["increment_amount"]) + raw_result = new_row.commit() + result_families = [] + for family, column_dict in raw_result.items(): + result_columns = [] + for column, cell_list in column_dict.items(): + result_cells = [] + for cell_tuple in cell_list: + cell_dict = {"value": cell_tuple[0], "timestamp_micros": _microseconds_from_datetime(cell_tuple[1])} + result_cells.append(cell_dict) + result_columns.append({"qualifier": column, "cells": result_cells}) + result_families.append({"name": family, "columns": result_columns}) + return {"key": row_key, "families": result_families} + + @client_handler.error_safe + async def SampleRowKeys(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + response = list(table.sample_row_keys()) + tuple_response = [(s.row_key, s.offset_bytes) for s in response] + return tuple_response diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py b/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py new file mode 100644 index 000000000000..2c70778ddedd --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py @@ -0,0 +1,148 @@ + +import time + +import test_proxy_pb2 +import test_proxy_pb2_grpc +import data_pb2 +import bigtable_pb2 +from google.rpc.status_pb2 import Status +from google.protobuf import json_format + + +class TestProxyGrpcServer(test_proxy_pb2_grpc.CloudBigtableV2TestProxyServicer): + """ + Implements a grpc server that proxies conformance test requests to the client library + + Due to issues with using protoc-compiled protos and client-library + proto-plus objects in the same process, this server defers requests to + matching methods in a TestProxyClientHandler instance in a separate + process. + This happens invisbly in the decorator @delegate_to_client_handler, with the + results attached to each request as a client_response kwarg + """ + + def __init__(self, request_q, queue_pool): + self.open_queues = list(range(len(queue_pool))) + self.queue_pool = queue_pool + self.request_q = request_q + + def delegate_to_client_handler(func, timeout_seconds=300): + """ + Decorator that transparently passes a request to the client + handler process, and then attaches the resonse to the wrapped call + """ + + def wrapper(self, request, context, **kwargs): + deadline = time.time() + timeout_seconds + json_dict = json_format.MessageToDict(request) + out_idx = self.open_queues.pop() + json_dict["proxy_request"] = func.__name__ + json_dict["response_queue_idx"] = out_idx + out_q = self.queue_pool[out_idx] + self.request_q.put(json_dict) + # wait for response + while time.time() < deadline: + if not out_q.empty(): + response = out_q.get() + self.open_queues.append(out_idx) + if isinstance(response, Exception): + raise response + else: + return func( + self, + request, + context, + client_response=response, + **kwargs, + ) + time.sleep(1e-4) + + return wrapper + + + @delegate_to_client_handler + def CreateClient(self, request, context, client_response=None): + return test_proxy_pb2.CreateClientResponse() + + @delegate_to_client_handler + def CloseClient(self, request, context, client_response=None): + return test_proxy_pb2.CloseClientResponse() + + @delegate_to_client_handler + def RemoveClient(self, request, context, client_response=None): + return test_proxy_pb2.RemoveClientResponse() + + @delegate_to_client_handler + def ReadRows(self, request, context, client_response=None): + status = Status() + rows = [] + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=5, message=client_response["error"]) + else: + rows = [data_pb2.Row(**d) for d in client_response] + result = test_proxy_pb2.RowsResult(row=rows, status=status) + return result + + @delegate_to_client_handler + def ReadRow(self, request, context, client_response=None): + status = Status() + row = None + if isinstance(client_response, dict) and "error" in client_response: + status=Status(code=client_response.get("code", 5), message=client_response.get("error")) + elif client_response != "None": + row = data_pb2.Row(**client_response) + result = test_proxy_pb2.RowResult(row=row, status=status) + return result + + @delegate_to_client_handler + def MutateRow(self, request, context, client_response=None): + status = Status() + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response["error"]) + return test_proxy_pb2.MutateRowResult(status=status) + + @delegate_to_client_handler + def BulkMutateRows(self, request, context, client_response=None): + status = Status() + entries = [] + if isinstance(client_response, dict) and "error" in client_response: + entries = [bigtable_pb2.MutateRowsResponse.Entry(index=exc_dict.get("index",1), status=Status(code=exc_dict.get("code", 5))) for exc_dict in client_response.get("subexceptions", [])] + if not entries: + # only return failure on the overall request if there are failed entries + status = Status(code=client_response.get("code", 5), message=client_response["error"]) + # TODO: protos were updated. entry is now entries: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3 + response = test_proxy_pb2.MutateRowsResult(status=status, entry=entries) + return response + + @delegate_to_client_handler + def CheckAndMutateRow(self, request, context, client_response=None): + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response["error"]) + response = test_proxy_pb2.CheckAndMutateRowResult(status=status) + else: + result = bigtable_pb2.CheckAndMutateRowResponse(predicate_matched=client_response) + response = test_proxy_pb2.CheckAndMutateRowResult(result=result, status=Status()) + return response + + @delegate_to_client_handler + def ReadModifyWriteRow(self, request, context, client_response=None): + status = Status() + row = None + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response.get("error")) + elif client_response != "None": + row = data_pb2.Row(**client_response) + result = test_proxy_pb2.RowResult(row=row, status=status) + return result + + @delegate_to_client_handler + def SampleRowKeys(self, request, context, client_response=None): + status = Status() + sample_list = [] + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response.get("error")) + else: + for sample in client_response: + sample_list.append(bigtable_pb2.SampleRowKeysResponse(offset_bytes=sample[1], row_key=sample[0])) + # TODO: protos were updated. sample is now samples: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3 + return test_proxy_pb2.SampleRowKeysResult(status=status, sample=sample_list) diff --git a/packages/google-cloud-bigtable/test_proxy/noxfile.py b/packages/google-cloud-bigtable/test_proxy/noxfile.py new file mode 100644 index 000000000000..bebf247b70e6 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/noxfile.py @@ -0,0 +1,80 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import os +import pathlib +import re +from colorlog.escape_codes import parse_colors + +import nox + + +DEFAULT_PYTHON_VERSION = "3.10" + +PROXY_SERVER_PORT=os.environ.get("PROXY_SERVER_PORT", "50055") +PROXY_CLIENT_VERSION=os.environ.get("PROXY_CLIENT_VERSION", None) + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() +REPO_ROOT_DIRECTORY = CURRENT_DIRECTORY.parent + +nox.options.sessions = ["run_proxy", "conformance_tests"] + +TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" +CLONE_REPO_DIR = "cloud-bigtable-clients-test" + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + + +def default(session): + """ + if nox is run directly, run the test_proxy session + """ + test_proxy(session) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def conformance_tests(session): + """ + download and run the conformance test suite against the test proxy + """ + import subprocess + import time + # download the conformance test suite + clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) + if not os.path.exists(clone_dir): + print("downloading copy of test repo") + session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR) + # start tests + with session.chdir(f"{clone_dir}/tests"): + session.run("go", "test", "-v", f"-proxy_addr=:{PROXY_SERVER_PORT}") + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def test_proxy(session): + """Start up the test proxy""" + # Install all dependencies, then install this package into the + # virtualenv's dist-packages. + # session.install( + # "grpcio", + # ) + if PROXY_CLIENT_VERSION is not None: + # install released version of the library + session.install(f"python-bigtable=={PROXY_CLIENT_VERSION}") + else: + # install the library from the source + session.install("-e", str(REPO_ROOT_DIRECTORY)) + session.install("-e", str(REPO_ROOT_DIRECTORY / "python-api-core")) + + session.run("python", "test_proxy.py", "--port", PROXY_SERVER_PORT, *session.posargs,) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py new file mode 100644 index 000000000000..936a4ed55332 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/bigtable.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.api import routing_pb2 as google_dot_api_dot_routing__pb2 +import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +import request_stats_pb2 as google_dot_bigtable_dot_v2_dot_request__stats__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x90\x03\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"n\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\xb6\x01\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xfe\x01\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xae\x02\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\xc6\x01\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xeb\t\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a{\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationTokenB\x0f\n\rstream_record2\xd7\x18\n\x08\x42igtable\x12\x9b\x02\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\xc1\x01\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xac\x02\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\xc3\x01\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xc1\x02\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xe6\x01\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xb3\x02\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\xd3\x01\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xad\x03\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\xba\x02\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x04name\xda\x41\x13name,app_profile_id\x12\xdd\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xe7\x01\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xeb\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.bigtable_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AP\n%bigtableadmin.googleapis.com/Instance\022\'projects/{project}/instances/{instance}\352A\\\n\"bigtableadmin.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}' + _READROWSREQUEST.fields_by_name['table_name']._options = None + _READROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._options = None + _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _MUTATEROWREQUEST.fields_by_name['table_name']._options = None + _MUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _MUTATEROWREQUEST.fields_by_name['row_key']._options = None + _MUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' + _MUTATEROWREQUEST.fields_by_name['mutations']._options = None + _MUTATEROWREQUEST.fields_by_name['mutations']._serialized_options = b'\340A\002' + _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._options = None + _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._serialized_options = b'\340A\002' + _MUTATEROWSREQUEST.fields_by_name['table_name']._options = None + _MUTATEROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _MUTATEROWSREQUEST.fields_by_name['entries']._options = None + _MUTATEROWSREQUEST.fields_by_name['entries']._serialized_options = b'\340A\002' + _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._options = None + _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._options = None + _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' + _PINGANDWARMREQUEST.fields_by_name['name']._options = None + _PINGANDWARMREQUEST.fields_by_name['name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._options = None + _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._options = None + _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' + _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._options = None + _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._serialized_options = b'\340A\002' + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._options = None + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _READCHANGESTREAMREQUEST.fields_by_name['table_name']._options = None + _READCHANGESTREAMREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _BIGTABLE._options = None + _BIGTABLE._serialized_options = b'\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only' + _BIGTABLE.methods_by_name['ReadRows']._options = None + _BIGTABLE.methods_by_name['ReadRows']._serialized_options = b'\202\323\344\223\002>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id' + _BIGTABLE.methods_by_name['SampleRowKeys']._options = None + _BIGTABLE.methods_by_name['SampleRowKeys']._serialized_options = b'\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id' + _BIGTABLE.methods_by_name['MutateRow']._options = None + _BIGTABLE.methods_by_name['MutateRow']._serialized_options = b'\202\323\344\223\002?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id' + _BIGTABLE.methods_by_name['MutateRows']._options = None + _BIGTABLE.methods_by_name['MutateRows']._serialized_options = b'\202\323\344\223\002@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\022table_name,entries\332A!table_name,entries,app_profile_id' + _BIGTABLE.methods_by_name['CheckAndMutateRow']._options = None + _BIGTABLE.methods_by_name['CheckAndMutateRow']._serialized_options = b'\202\323\344\223\002G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id' + _BIGTABLE.methods_by_name['PingAndWarm']._options = None + _BIGTABLE.methods_by_name['PingAndWarm']._serialized_options = b'\202\323\344\223\002+\"&/v2/{name=projects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id\332A\004name\332A\023name,app_profile_id' + _BIGTABLE.methods_by_name['ReadModifyWriteRow']._options = None + _BIGTABLE.methods_by_name['ReadModifyWriteRow']._serialized_options = b'\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\030table_name,row_key,rules\332A\'table_name,row_key,rules,app_profile_id' + _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._options = None + _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._serialized_options = b'\202\323\344\223\002[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\001*\332A\ntable_name\332A\031table_name,app_profile_id' + _BIGTABLE.methods_by_name['ReadChangeStream']._options = None + _BIGTABLE.methods_by_name['ReadChangeStream']._serialized_options = b'\202\323\344\223\002F\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\001*\332A\ntable_name\332A\031table_name,app_profile_id' + _READROWSREQUEST._serialized_start=392 + _READROWSREQUEST._serialized_end=792 + _READROWSREQUEST_REQUESTSTATSVIEW._serialized_start=690 + _READROWSREQUEST_REQUESTSTATSVIEW._serialized_end=792 + _READROWSRESPONSE._serialized_start=795 + _READROWSRESPONSE._serialized_end=1228 + _READROWSRESPONSE_CELLCHUNK._serialized_start=967 + _READROWSRESPONSE_CELLCHUNK._serialized_end=1228 + _SAMPLEROWKEYSREQUEST._serialized_start=1230 + _SAMPLEROWKEYSREQUEST._serialized_end=1340 + _SAMPLEROWKEYSRESPONSE._serialized_start=1342 + _SAMPLEROWKEYSRESPONSE._serialized_end=1404 + _MUTATEROWREQUEST._serialized_start=1407 + _MUTATEROWREQUEST._serialized_end=1589 + _MUTATEROWRESPONSE._serialized_start=1591 + _MUTATEROWRESPONSE._serialized_end=1610 + _MUTATEROWSREQUEST._serialized_start=1613 + _MUTATEROWSREQUEST._serialized_end=1867 + _MUTATEROWSREQUEST_ENTRY._serialized_start=1789 + _MUTATEROWSREQUEST_ENTRY._serialized_end=1867 + _MUTATEROWSRESPONSE._serialized_start=1870 + _MUTATEROWSRESPONSE._serialized_end=2013 + _MUTATEROWSRESPONSE_ENTRY._serialized_start=1955 + _MUTATEROWSRESPONSE_ENTRY._serialized_end=2013 + _CHECKANDMUTATEROWREQUEST._serialized_start=2016 + _CHECKANDMUTATEROWREQUEST._serialized_end=2318 + _CHECKANDMUTATEROWRESPONSE._serialized_start=2320 + _CHECKANDMUTATEROWRESPONSE._serialized_end=2374 + _PINGANDWARMREQUEST._serialized_start=2376 + _PINGANDWARMREQUEST._serialized_end=2481 + _PINGANDWARMRESPONSE._serialized_start=2483 + _PINGANDWARMRESPONSE._serialized_end=2504 + _READMODIFYWRITEROWREQUEST._serialized_start=2507 + _READMODIFYWRITEROWREQUEST._serialized_end=2705 + _READMODIFYWRITEROWRESPONSE._serialized_start=2707 + _READMODIFYWRITEROWRESPONSE._serialized_end=2773 + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_start=2776 + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_end=2910 + _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_start=2912 + _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_end=3015 + _READCHANGESTREAMREQUEST._serialized_start=3018 + _READCHANGESTREAMREQUEST._serialized_end=3429 + _READCHANGESTREAMRESPONSE._serialized_start=3432 + _READCHANGESTREAMRESPONSE._serialized_end=4691 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_start=3700 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_end=3944 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_start=3855 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_end=3944 + _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_start=3947 + _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_end=4401 + _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_start=4321 + _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_end=4401 + _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_start=4404 + _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_end=4549 + _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_start=4551 + _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_end=4674 + _BIGTABLE._serialized_start=4694 + _BIGTABLE._serialized_end=7853 +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py new file mode 100644 index 000000000000..9ce87d8696fc --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py @@ -0,0 +1,363 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2 + + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + ) + self.PingAndWarm = channel.unary_unary( + '/google.bigtable.v2.Bigtable/PingAndWarm', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + ) + self.GenerateInitialChangeStreamPartitions = channel.unary_stream( + '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, + ) + self.ReadChangeStream = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadChangeStream', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, + ) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PingAndWarm(self, request, context): + """Warm up associated instance metadata for this connection. + This call is not required but may be useful for connection keep-alive. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GenerateInitialChangeStreamPartitions(self, request, context): + """NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire keyspace. + Partitions can be read with `ReadChangeStream`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadChangeStream(self, request, context): + """NOTE: This API is intended to be used by Apache Beam BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that are caused by + garbage collection. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, + ), + 'PingAndWarm': grpc.unary_unary_rpc_method_handler( + servicer.PingAndWarm, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, + ), + 'GenerateInitialChangeStreamPartitions': grpc.unary_stream_rpc_method_handler( + servicer.GenerateInitialChangeStreamPartitions, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.SerializeToString, + ), + 'ReadChangeStream': grpc.unary_stream_rpc_method_handler( + servicer.ReadChangeStream, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Bigtable(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + @staticmethod + def ReadRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadRows', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SampleRowKeys(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/SampleRowKeys', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def MutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/MutateRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def MutateRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/MutateRows', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CheckAndMutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PingAndWarm(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/PingAndWarm', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadModifyWriteRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GenerateInitialChangeStreamPartitions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadChangeStream(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadChangeStream', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py new file mode 100644 index 000000000000..fff2120347b4 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/data.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\tB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.data_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _ROW._serialized_start=53 + _ROW._serialized_end=117 + _FAMILY._serialized_start=119 + _FAMILY._serialized_end=186 + _COLUMN._serialized_start=188 + _COLUMN._serialized_end=256 + _CELL._serialized_start=258 + _CELL._serialized_end=321 + _ROWRANGE._serialized_start=324 + _ROWRANGE._serialized_end=462 + _ROWSET._serialized_start=464 + _ROWSET._serialized_end=540 + _COLUMNRANGE._serialized_start=543 + _COLUMNRANGE._serialized_end=741 + _TIMESTAMPRANGE._serialized_start=743 + _TIMESTAMPRANGE._serialized_end=821 + _VALUERANGE._serialized_start=824 + _VALUERANGE._serialized_end=976 + _ROWFILTER._serialized_start=979 + _ROWFILTER._serialized_end=2098 + _ROWFILTER_CHAIN._serialized_start=1795 + _ROWFILTER_CHAIN._serialized_end=1850 + _ROWFILTER_INTERLEAVE._serialized_start=1852 + _ROWFILTER_INTERLEAVE._serialized_end=1912 + _ROWFILTER_CONDITION._serialized_start=1915 + _ROWFILTER_CONDITION._serialized_end=2088 + _MUTATION._serialized_start=2101 + _MUTATION._serialized_end=2686 + _MUTATION_SETCELL._serialized_start=2396 + _MUTATION_SETCELL._serialized_end=2493 + _MUTATION_DELETEFROMCOLUMN._serialized_start=2495 + _MUTATION_DELETEFROMCOLUMN._serialized_end=2616 + _MUTATION_DELETEFROMFAMILY._serialized_start=2618 + _MUTATION_DELETEFROMFAMILY._serialized_end=2657 + _MUTATION_DELETEFROMROW._serialized_start=2659 + _MUTATION_DELETEFROMROW._serialized_end=2674 + _READMODIFYWRITERULE._serialized_start=2689 + _READMODIFYWRITERULE._serialized_end=2817 + _STREAMPARTITION._serialized_start=2819 + _STREAMPARTITION._serialized_end=2885 + _STREAMCONTINUATIONTOKENS._serialized_start=2887 + _STREAMCONTINUATIONTOKENS._serialized_end=2974 + _STREAMCONTINUATIONTOKEN._serialized_start=2976 + _STREAMCONTINUATIONTOKEN._serialized_end=3072 +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2.py new file mode 100644 index 000000000000..95fcc6e0f8f2 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/request_stats.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&google/bigtable/v2/request_stats.proto\x12\x12google.bigtable.v2\x1a\x1egoogle/protobuf/duration.proto\"\x82\x01\n\x12ReadIterationStats\x12\x17\n\x0frows_seen_count\x18\x01 \x01(\x03\x12\x1b\n\x13rows_returned_count\x18\x02 \x01(\x03\x12\x18\n\x10\x63\x65lls_seen_count\x18\x03 \x01(\x03\x12\x1c\n\x14\x63\x65lls_returned_count\x18\x04 \x01(\x03\"Q\n\x13RequestLatencyStats\x12:\n\x17\x66rontend_server_latency\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xa1\x01\n\x11\x46ullReadStatsView\x12\x44\n\x14read_iteration_stats\x18\x01 \x01(\x0b\x32&.google.bigtable.v2.ReadIterationStats\x12\x46\n\x15request_latency_stats\x18\x02 \x01(\x0b\x32\'.google.bigtable.v2.RequestLatencyStats\"c\n\x0cRequestStats\x12\x45\n\x14\x66ull_read_stats_view\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.FullReadStatsViewH\x00\x42\x0c\n\nstats_viewB\xbd\x01\n\x16\x63om.google.bigtable.v2B\x11RequestStatsProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.request_stats_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\021RequestStatsProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _READITERATIONSTATS._serialized_start=95 + _READITERATIONSTATS._serialized_end=225 + _REQUESTLATENCYSTATS._serialized_start=227 + _REQUESTLATENCYSTATS._serialized_end=308 + _FULLREADSTATSVIEW._serialized_start=311 + _FULLREADSTATSVIEW._serialized_end=472 + _REQUESTSTATS._serialized_start=474 + _REQUESTSTATS._serialized_end=573 +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/request_stats_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py new file mode 100644 index 000000000000..8c7817b14259 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: test_proxy.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2 +import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10test_proxy.proto\x12\x19google.bigtable.testproxy\x1a\x17google/api/client.proto\x1a!google/bigtable/v2/bigtable.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xb8\x01\n\x13\x43reateClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61ta_target\x18\x02 \x01(\t\x12\x12\n\nproject_id\x18\x03 \x01(\t\x12\x13\n\x0binstance_id\x18\x04 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12\x38\n\x15per_operation_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x16\n\x14\x43reateClientResponse\"\'\n\x12\x43loseClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x15\n\x13\x43loseClientResponse\"(\n\x13RemoveClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x16\n\x14RemoveClientResponse\"w\n\x0eReadRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\t\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\"U\n\tRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"u\n\x0fReadRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x34\n\x07request\x18\x02 \x01(\x0b\x32#.google.bigtable.v2.ReadRowsRequest\x12\x19\n\x11\x63\x61ncel_after_rows\x18\x03 \x01(\x05\"V\n\nRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x03(\x0b\x32\x17.google.bigtable.v2.Row\"\\\n\x10MutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x35\n\x07request\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.MutateRowRequest\"5\n\x0fMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\"^\n\x11MutateRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x36\n\x07request\x18\x02 \x01(\x0b\x32%.google.bigtable.v2.MutateRowsRequest\"s\n\x10MutateRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12;\n\x05\x65ntry\x18\x02 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\"l\n\x18\x43heckAndMutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.bigtable.v2.CheckAndMutateRowRequest\"|\n\x17\x43heckAndMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x06result\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.CheckAndMutateRowResponse\"d\n\x14SampleRowKeysRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x39\n\x07request\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.SampleRowKeysRequest\"t\n\x13SampleRowKeysResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x39\n\x06sample\x18\x02 \x03(\x0b\x32).google.bigtable.v2.SampleRowKeysResponse\"n\n\x19ReadModifyWriteRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12>\n\x07request\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.ReadModifyWriteRowRequest2\xa4\t\n\x18\x43loudBigtableV2TestProxy\x12q\n\x0c\x43reateClient\x12..google.bigtable.testproxy.CreateClientRequest\x1a/.google.bigtable.testproxy.CreateClientResponse\"\x00\x12n\n\x0b\x43loseClient\x12-.google.bigtable.testproxy.CloseClientRequest\x1a..google.bigtable.testproxy.CloseClientResponse\"\x00\x12q\n\x0cRemoveClient\x12..google.bigtable.testproxy.RemoveClientRequest\x1a/.google.bigtable.testproxy.RemoveClientResponse\"\x00\x12\\\n\x07ReadRow\x12).google.bigtable.testproxy.ReadRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12_\n\x08ReadRows\x12*.google.bigtable.testproxy.ReadRowsRequest\x1a%.google.bigtable.testproxy.RowsResult\"\x00\x12\x66\n\tMutateRow\x12+.google.bigtable.testproxy.MutateRowRequest\x1a*.google.bigtable.testproxy.MutateRowResult\"\x00\x12m\n\x0e\x42ulkMutateRows\x12,.google.bigtable.testproxy.MutateRowsRequest\x1a+.google.bigtable.testproxy.MutateRowsResult\"\x00\x12~\n\x11\x43heckAndMutateRow\x12\x33.google.bigtable.testproxy.CheckAndMutateRowRequest\x1a\x32.google.bigtable.testproxy.CheckAndMutateRowResult\"\x00\x12r\n\rSampleRowKeys\x12/.google.bigtable.testproxy.SampleRowKeysRequest\x1a..google.bigtable.testproxy.SampleRowKeysResult\"\x00\x12r\n\x12ReadModifyWriteRow\x12\x34.google.bigtable.testproxy.ReadModifyWriteRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x1a\x34\xca\x41\x31\x62igtable-test-proxy-not-accessible.googleapis.comB6\n#com.google.cloud.bigtable.testproxyP\x01Z\r./testproxypbb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'test_proxy_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n#com.google.cloud.bigtable.testproxyP\001Z\r./testproxypb' + _CLOUDBIGTABLEV2TESTPROXY._options = None + _CLOUDBIGTABLEV2TESTPROXY._serialized_options = b'\312A1bigtable-test-proxy-not-accessible.googleapis.com' + _CREATECLIENTREQUEST._serialized_start=196 + _CREATECLIENTREQUEST._serialized_end=380 + _CREATECLIENTRESPONSE._serialized_start=382 + _CREATECLIENTRESPONSE._serialized_end=404 + _CLOSECLIENTREQUEST._serialized_start=406 + _CLOSECLIENTREQUEST._serialized_end=445 + _CLOSECLIENTRESPONSE._serialized_start=447 + _CLOSECLIENTRESPONSE._serialized_end=468 + _REMOVECLIENTREQUEST._serialized_start=470 + _REMOVECLIENTREQUEST._serialized_end=510 + _REMOVECLIENTRESPONSE._serialized_start=512 + _REMOVECLIENTRESPONSE._serialized_end=534 + _READROWREQUEST._serialized_start=536 + _READROWREQUEST._serialized_end=655 + _ROWRESULT._serialized_start=657 + _ROWRESULT._serialized_end=742 + _READROWSREQUEST._serialized_start=744 + _READROWSREQUEST._serialized_end=861 + _ROWSRESULT._serialized_start=863 + _ROWSRESULT._serialized_end=949 + _MUTATEROWREQUEST._serialized_start=951 + _MUTATEROWREQUEST._serialized_end=1043 + _MUTATEROWRESULT._serialized_start=1045 + _MUTATEROWRESULT._serialized_end=1098 + _MUTATEROWSREQUEST._serialized_start=1100 + _MUTATEROWSREQUEST._serialized_end=1194 + _MUTATEROWSRESULT._serialized_start=1196 + _MUTATEROWSRESULT._serialized_end=1311 + _CHECKANDMUTATEROWREQUEST._serialized_start=1313 + _CHECKANDMUTATEROWREQUEST._serialized_end=1421 + _CHECKANDMUTATEROWRESULT._serialized_start=1423 + _CHECKANDMUTATEROWRESULT._serialized_end=1547 + _SAMPLEROWKEYSREQUEST._serialized_start=1549 + _SAMPLEROWKEYSREQUEST._serialized_end=1649 + _SAMPLEROWKEYSRESULT._serialized_start=1651 + _SAMPLEROWKEYSRESULT._serialized_end=1767 + _READMODIFYWRITEROWREQUEST._serialized_start=1769 + _READMODIFYWRITEROWREQUEST._serialized_end=1879 + _CLOUDBIGTABLEV2TESTPROXY._serialized_start=1882 + _CLOUDBIGTABLEV2TESTPROXY._serialized_end=3070 +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py new file mode 100644 index 000000000000..60214a5848eb --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py @@ -0,0 +1,433 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +import test_proxy_pb2 as test__proxy__pb2 + + +class CloudBigtableV2TestProxyStub(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', + request_serializer=test__proxy__pb2.CreateClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CreateClientResponse.FromString, + ) + self.CloseClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', + request_serializer=test__proxy__pb2.CloseClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CloseClientResponse.FromString, + ) + self.RemoveClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', + request_serializer=test__proxy__pb2.RemoveClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RemoveClientResponse.FromString, + ) + self.ReadRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', + request_serializer=test__proxy__pb2.ReadRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowResult.FromString, + ) + self.ReadRows = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', + request_serializer=test__proxy__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowsResult.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', + request_serializer=test__proxy__pb2.MutateRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.MutateRowResult.FromString, + ) + self.BulkMutateRows = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', + request_serializer=test__proxy__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=test__proxy__pb2.MutateRowsResult.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', + request_serializer=test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CheckAndMutateRowResult.FromString, + ) + self.SampleRowKeys = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', + request_serializer=test__proxy__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=test__proxy__pb2.SampleRowKeysResult.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', + request_serializer=test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowResult.FromString, + ) + + +class CloudBigtableV2TestProxyServicer(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + def CreateClient(self, request, context): + """Client management: + + Creates a client in the proxy. + Each client has its own dedicated channel(s), and can be used concurrently + and independently with other clients. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CloseClient(self, request, context): + """Closes a client in the proxy, making it not accept new requests. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RemoveClient(self, request, context): + """Removes a client in the proxy, making it inaccessible. Client closing + should be done by CloseClient() separately. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadRow(self, request, context): + """Bigtable operations: for each operation, you should use the synchronous or + asynchronous variant of the client method based on the `use_async_method` + setting of the client instance. For starters, you can choose to implement + one variant, and return UNIMPLEMENTED status for the other. + + Reads a row with the client instance. + The result row may not be present in the response. + Callers should check for it (e.g. calling has_row() in C++). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadRows(self, request, context): + """Reads rows with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Writes a row with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def BulkMutateRows(self, request, context): + """Writes multiple rows with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Performs a check-and-mutate-row operation with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Obtains a row key sampling with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Performs a read-modify-write operation with the client. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CloudBigtableV2TestProxyServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateClient': grpc.unary_unary_rpc_method_handler( + servicer.CreateClient, + request_deserializer=test__proxy__pb2.CreateClientRequest.FromString, + response_serializer=test__proxy__pb2.CreateClientResponse.SerializeToString, + ), + 'CloseClient': grpc.unary_unary_rpc_method_handler( + servicer.CloseClient, + request_deserializer=test__proxy__pb2.CloseClientRequest.FromString, + response_serializer=test__proxy__pb2.CloseClientResponse.SerializeToString, + ), + 'RemoveClient': grpc.unary_unary_rpc_method_handler( + servicer.RemoveClient, + request_deserializer=test__proxy__pb2.RemoveClientRequest.FromString, + response_serializer=test__proxy__pb2.RemoveClientResponse.SerializeToString, + ), + 'ReadRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadRow, + request_deserializer=test__proxy__pb2.ReadRowRequest.FromString, + response_serializer=test__proxy__pb2.RowResult.SerializeToString, + ), + 'ReadRows': grpc.unary_unary_rpc_method_handler( + servicer.ReadRows, + request_deserializer=test__proxy__pb2.ReadRowsRequest.FromString, + response_serializer=test__proxy__pb2.RowsResult.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=test__proxy__pb2.MutateRowRequest.FromString, + response_serializer=test__proxy__pb2.MutateRowResult.SerializeToString, + ), + 'BulkMutateRows': grpc.unary_unary_rpc_method_handler( + servicer.BulkMutateRows, + request_deserializer=test__proxy__pb2.MutateRowsRequest.FromString, + response_serializer=test__proxy__pb2.MutateRowsResult.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=test__proxy__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=test__proxy__pb2.CheckAndMutateRowResult.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_unary_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=test__proxy__pb2.SampleRowKeysRequest.FromString, + response_serializer=test__proxy__pb2.SampleRowKeysResult.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=test__proxy__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=test__proxy__pb2.RowResult.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class CloudBigtableV2TestProxy(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + @staticmethod + def CreateClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', + test__proxy__pb2.CreateClientRequest.SerializeToString, + test__proxy__pb2.CreateClientResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CloseClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', + test__proxy__pb2.CloseClientRequest.SerializeToString, + test__proxy__pb2.CloseClientResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def RemoveClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', + test__proxy__pb2.RemoveClientRequest.SerializeToString, + test__proxy__pb2.RemoveClientResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', + test__proxy__pb2.ReadRowRequest.SerializeToString, + test__proxy__pb2.RowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', + test__proxy__pb2.ReadRowsRequest.SerializeToString, + test__proxy__pb2.RowsResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def MutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', + test__proxy__pb2.MutateRowRequest.SerializeToString, + test__proxy__pb2.MutateRowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def BulkMutateRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', + test__proxy__pb2.MutateRowsRequest.SerializeToString, + test__proxy__pb2.MutateRowsResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CheckAndMutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', + test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, + test__proxy__pb2.CheckAndMutateRowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SampleRowKeys(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', + test__proxy__pb2.SampleRowKeysRequest.SerializeToString, + test__proxy__pb2.SampleRowKeysResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadModifyWriteRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', + test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, + test__proxy__pb2.RowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/packages/google-cloud-bigtable/test_proxy/run_tests.sh b/packages/google-cloud-bigtable/test_proxy/run_tests.sh new file mode 100755 index 000000000000..15b146b0365e --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/run_tests.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +# attempt download golang if not found +if [[ ! -x "$(command -v go)" ]]; then + echo "Downloading golang..." + wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz + tar -xzf go1.20.2.linux-amd64.tar.gz + export GOROOT=$(pwd)/go + export PATH=$GOROOT/bin:$PATH + export GOPATH=$HOME/go + go version +fi + +# ensure the working dir is the script's folder +SCRIPT_DIR=$(realpath $(dirname "$0")) +cd $SCRIPT_DIR + +export PROXY_SERVER_PORT=50055 + +# download test suite +if [ ! -d "cloud-bigtable-clients-test" ]; then + git clone https://github.com/googleapis/cloud-bigtable-clients-test.git +fi + +# start proxy +python test_proxy.py --port $PROXY_SERVER_PORT & +PROXY_PID=$! +function finish { + kill $PROXY_PID +} +trap finish EXIT + +# run tests +pushd cloud-bigtable-clients-test/tests +go test -v -proxy_addr=:$PROXY_SERVER_PORT diff --git a/packages/google-cloud-bigtable/test_proxy/test_proxy.py b/packages/google-cloud-bigtable/test_proxy/test_proxy.py new file mode 100644 index 000000000000..a0cf2f1f0c8d --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/test_proxy.py @@ -0,0 +1,193 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The Python implementation of the `cloud-bigtable-clients-test` proxy server. + +https://github.com/googleapis/cloud-bigtable-clients-test + +This server is intended to be used to test the correctness of Bigtable +clients across languages. + +Contributor Note: the proxy implementation is split across TestProxyClientHandler +and TestProxyGrpcServer. This is due to the fact that generated protos and proto-plus +objects cannot be used in the same process, so we had to make use of the +multiprocessing module to allow them to work together. +""" + +import multiprocessing +import argparse +import sys +import os +sys.path.append("handlers") + + +def grpc_server_process(request_q, queue_pool, port=50055): + """ + Defines a process that hosts a grpc server + proxies requests to a client_handler_process + """ + sys.path.append("protos") + from concurrent import futures + + import grpc + import test_proxy_pb2_grpc + import grpc_handler + + # Start gRPC server + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + test_proxy_pb2_grpc.add_CloudBigtableV2TestProxyServicer_to_server( + grpc_handler.TestProxyGrpcServer(request_q, queue_pool), server + ) + server.add_insecure_port("[::]:" + port) + server.start() + print("grpc_server_process started, listening on " + port) + server.wait_for_termination() + + +async def client_handler_process_async(request_q, queue_pool, use_legacy_client=False): + """ + Defines a process that recives Bigtable requests from a grpc_server_process, + and runs the request using a client library instance + """ + import base64 + import re + import asyncio + import warnings + import client_handler_data + import client_handler_legacy + warnings.filterwarnings("ignore", category=RuntimeWarning, message=".*Bigtable emulator.*") + + def camel_to_snake(str): + return re.sub(r"(?= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==1.34.0 -google-cloud-core==1.4.4 +google-api-core==2.16.0 +google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 libcst==0.2.5 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.8.txt b/packages/google-cloud-bigtable/testing/constraints-3.8.txt index e69de29bb2d1..ee858c3ecf4f 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.8.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.8.txt @@ -0,0 +1,14 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List *all* library dependencies and extras in this file. +# Pin the version to the lower bound. +# +# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", +# Then this file should have foo==1.14.0 +google-api-core==2.16.0 +google-cloud-core==2.0.0 +grpc-google-iam-v1==0.12.4 +proto-plus==1.22.0 +libcst==0.2.5 +protobuf==3.19.5 +pytest-asyncio==0.21.1 diff --git a/packages/google-cloud-bigtable/tests/system/__init__.py b/packages/google-cloud-bigtable/tests/system/__init__.py index 4de65971c238..89a37dc92c5a 100644 --- a/packages/google-cloud-bigtable/tests/system/__init__.py +++ b/packages/google-cloud-bigtable/tests/system/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index 910c20970c34..b8862ea4bc6f 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -1,4 +1,4 @@ -# Copyright 2011 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,199 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +""" +Import pytest fixtures for setting up table for data client system tests +""" +import sys import os -import pytest -from test_utils.system import unique_resource_id - -from google.cloud.bigtable.client import Client -from google.cloud.environment_vars import BIGTABLE_EMULATOR - -from . import _helpers - - -@pytest.fixture(scope="session") -def in_emulator(): - return os.getenv(BIGTABLE_EMULATOR) is not None - - -@pytest.fixture(scope="session") -def kms_key_name(): - return os.getenv("KMS_KEY_NAME") - - -@pytest.fixture(scope="session") -def with_kms_key_name(kms_key_name): - if kms_key_name is None: - pytest.skip("Test requires KMS_KEY_NAME environment variable") - return kms_key_name - - -@pytest.fixture(scope="session") -def skip_on_emulator(in_emulator): - if in_emulator: - pytest.skip("Emulator does not support this feature") - - -@pytest.fixture(scope="session") -def unique_suffix(): - return unique_resource_id("-") - - -@pytest.fixture(scope="session") -def location_id(): - return "us-central1-c" - - -@pytest.fixture(scope="session") -def serve_nodes(): - return 1 - - -@pytest.fixture(scope="session") -def label_key(): - return "python-system" - - -@pytest.fixture(scope="session") -def instance_labels(label_key): - return {label_key: _helpers.label_stamp()} - - -@pytest.fixture(scope="session") -def admin_client(): - return Client(admin=True) - - -@pytest.fixture(scope="session") -def service_account(admin_client): - from google.oauth2.service_account import Credentials - - if not isinstance(admin_client._credentials, Credentials): - pytest.skip("These tests require a service account credential") - return admin_client._credentials - - -@pytest.fixture(scope="session") -def admin_instance_id(unique_suffix): - return f"g-c-p{unique_suffix}" - - -@pytest.fixture(scope="session") -def admin_cluster_id(admin_instance_id): - return f"{admin_instance_id}-cluster" - - -@pytest.fixture(scope="session") -def admin_instance(admin_client, admin_instance_id, instance_labels): - return admin_client.instance(admin_instance_id, labels=instance_labels) - - -@pytest.fixture(scope="session") -def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): - return admin_instance.cluster( - admin_cluster_id, - location_id=location_id, - serve_nodes=serve_nodes, - ) - - -@pytest.fixture(scope="session") -def admin_cluster_with_autoscaling( - admin_instance, - admin_cluster_id, - location_id, - min_serve_nodes, - max_serve_nodes, - cpu_utilization_percent, -): - return admin_instance.cluster( - admin_cluster_id, - location_id=location_id, - min_serve_nodes=min_serve_nodes, - max_serve_nodes=max_serve_nodes, - cpu_utilization_percent=cpu_utilization_percent, - ) - - -@pytest.fixture(scope="session") -def admin_instance_populated(admin_instance, admin_cluster, in_emulator): - # Emulator does not support instance admin operations (create / delete). - # See: https://cloud.google.com/bigtable/docs/emulator - if not in_emulator: - operation = admin_instance.create(clusters=[admin_cluster]) - operation.result(timeout=240) - - yield admin_instance - - if not in_emulator: - _helpers.retry_429(admin_instance.delete)() - - -@pytest.fixture(scope="session") -def data_client(): - return Client(admin=False) - - -@pytest.fixture(scope="session") -def data_instance_id(unique_suffix): - return f"g-c-p-d{unique_suffix}" - - -@pytest.fixture(scope="session") -def data_cluster_id(data_instance_id): - return f"{data_instance_id}-cluster" - - -@pytest.fixture(scope="session") -def data_instance_populated( - admin_client, - data_instance_id, - instance_labels, - data_cluster_id, - location_id, - serve_nodes, - in_emulator, -): - instance = admin_client.instance(data_instance_id, labels=instance_labels) - # Emulator does not support instance admin operations (create / delete). - # See: https://cloud.google.com/bigtable/docs/emulator - if not in_emulator: - cluster = instance.cluster( - data_cluster_id, - location_id=location_id, - serve_nodes=serve_nodes, - ) - operation = instance.create(clusters=[cluster]) - operation.result(timeout=240) - - yield instance - - if not in_emulator: - _helpers.retry_429(instance.delete)() - - -@pytest.fixture(scope="function") -def instances_to_delete(): - instances_to_delete = [] - - yield instances_to_delete - - for instance in instances_to_delete: - _helpers.retry_429(instance.delete)() - - -@pytest.fixture(scope="session") -def min_serve_nodes(in_emulator): - return 1 - - -@pytest.fixture(scope="session") -def max_serve_nodes(in_emulator): - return 8 - +script_path = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(script_path) -@pytest.fixture(scope="session") -def cpu_utilization_percent(in_emulator): - return 10 +pytest_plugins = [ + "data.setup_fixtures", +] diff --git a/packages/google-cloud-bigtable/tests/system/data/__init__.py b/packages/google-cloud-bigtable/tests/system/data/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/data/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py new file mode 100644 index 000000000000..77086b7f3e51 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py @@ -0,0 +1,171 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Contains a set of pytest fixtures for setting up and populating a +Bigtable database for testing purposes. +""" + +import pytest +import pytest_asyncio +import os +import asyncio +import uuid + + +@pytest.fixture(scope="session") +def event_loop(): + loop = asyncio.get_event_loop() + yield loop + loop.stop() + loop.close() + + +@pytest.fixture(scope="session") +def admin_client(): + """ + Client for interacting with Table and Instance admin APIs + """ + from google.cloud.bigtable.client import Client + + client = Client(admin=True) + yield client + + +@pytest.fixture(scope="session") +def instance_id(admin_client, project_id, cluster_config): + """ + Returns BIGTABLE_TEST_INSTANCE if set, otherwise creates a new temporary instance for the test session + """ + from google.cloud.bigtable_admin_v2 import types + from google.api_core import exceptions + from google.cloud.environment_vars import BIGTABLE_EMULATOR + + # use user-specified instance if available + user_specified_instance = os.getenv("BIGTABLE_TEST_INSTANCE") + if user_specified_instance: + print("Using user-specified instance: {}".format(user_specified_instance)) + yield user_specified_instance + return + + # create a new temporary test instance + instance_id = f"python-bigtable-tests-{uuid.uuid4().hex[:6]}" + if os.getenv(BIGTABLE_EMULATOR): + # don't create instance if in emulator mode + yield instance_id + else: + try: + operation = admin_client.instance_admin_client.create_instance( + parent=f"projects/{project_id}", + instance_id=instance_id, + instance=types.Instance( + display_name="Test Instance", + # labels={"python-system-test": "true"}, + ), + clusters=cluster_config, + ) + operation.result(timeout=240) + except exceptions.AlreadyExists: + pass + yield instance_id + admin_client.instance_admin_client.delete_instance( + name=f"projects/{project_id}/instances/{instance_id}" + ) + + +@pytest.fixture(scope="session") +def column_split_config(): + """ + specify initial splits to create when creating a new test table + """ + return [(num * 1000).to_bytes(8, "big") for num in range(1, 10)] + + +@pytest.fixture(scope="session") +def table_id( + admin_client, + project_id, + instance_id, + column_family_config, + init_table_id, + column_split_config, +): + """ + Returns BIGTABLE_TEST_TABLE if set, otherwise creates a new temporary table for the test session + + Args: + - admin_client: Client for interacting with the Table Admin API. Supplied by the admin_client fixture. + - project_id: The project ID of the GCP project to test against. Supplied by the project_id fixture. + - instance_id: The ID of the Bigtable instance to test against. Supplied by the instance_id fixture. + - init_column_families: A list of column families to initialize the table with, if pre-initialized table is not given with BIGTABLE_TEST_TABLE. + Supplied by the init_column_families fixture. + - init_table_id: The table ID to give to the test table, if pre-initialized table is not given with BIGTABLE_TEST_TABLE. + Supplied by the init_table_id fixture. + - column_split_config: A list of row keys to use as initial splits when creating the test table. + """ + from google.api_core import exceptions + from google.api_core import retry + + # use user-specified instance if available + user_specified_table = os.getenv("BIGTABLE_TEST_TABLE") + if user_specified_table: + print("Using user-specified table: {}".format(user_specified_table)) + yield user_specified_table + return + + retry = retry.Retry( + predicate=retry.if_exception_type(exceptions.FailedPrecondition) + ) + try: + parent_path = f"projects/{project_id}/instances/{instance_id}" + print(f"Creating table: {parent_path}/tables/{init_table_id}") + admin_client.table_admin_client.create_table( + request={ + "parent": parent_path, + "table_id": init_table_id, + "table": {"column_families": column_family_config}, + "initial_splits": [{"key": key} for key in column_split_config], + }, + retry=retry, + ) + except exceptions.AlreadyExists: + pass + yield init_table_id + print(f"Deleting table: {parent_path}/tables/{init_table_id}") + try: + admin_client.table_admin_client.delete_table( + name=f"{parent_path}/tables/{init_table_id}" + ) + except exceptions.NotFound: + print(f"Table {init_table_id} not found, skipping deletion") + + +@pytest_asyncio.fixture(scope="session") +async def client(): + from google.cloud.bigtable.data import BigtableDataClientAsync + + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + async with BigtableDataClientAsync(project=project, pool_size=4) as client: + yield client + + +@pytest.fixture(scope="session") +def project_id(client): + """Returns the project ID from the client.""" + yield client.project + + +@pytest_asyncio.fixture(scope="session") +async def table(client, table_id, instance_id): + async with client.get_table(instance_id, table_id) as table: + yield table diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system.py b/packages/google-cloud-bigtable/tests/system/data/test_system.py new file mode 100644 index 000000000000..aeb08fc1ac05 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/data/test_system.py @@ -0,0 +1,943 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import pytest_asyncio +import asyncio +import uuid +import os +from google.api_core import retry +from google.api_core.exceptions import ClientError + +from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +TEST_FAMILY = "test-family" +TEST_FAMILY_2 = "test-family-2" + + +@pytest.fixture(scope="session") +def column_family_config(): + """ + specify column families to create when creating a new test table + """ + from google.cloud.bigtable_admin_v2 import types + + return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()} + + +@pytest.fixture(scope="session") +def init_table_id(): + """ + The table_id to use when creating a new test table + """ + return f"test-table-{uuid.uuid4().hex}" + + +@pytest.fixture(scope="session") +def cluster_config(project_id): + """ + Configuration for the clusters to use when creating a new instance + """ + from google.cloud.bigtable_admin_v2 import types + + cluster = { + "test-cluster": types.Cluster( + location=f"projects/{project_id}/locations/us-central1-b", + serve_nodes=1, + ) + } + return cluster + + +class TempRowBuilder: + """ + Used to add rows to a table for testing purposes. + """ + + def __init__(self, table): + self.rows = [] + self.table = table + + async def add_row( + self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" + ): + if isinstance(value, str): + value = value.encode("utf-8") + elif isinstance(value, int): + value = value.to_bytes(8, byteorder="big", signed=True) + request = { + "table_name": self.table.table_name, + "row_key": row_key, + "mutations": [ + { + "set_cell": { + "family_name": family, + "column_qualifier": qualifier, + "value": value, + } + } + ], + } + await self.table.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + + async def delete_rows(self): + if self.rows: + request = { + "table_name": self.table.table_name, + "entries": [ + {"row_key": row, "mutations": [{"delete_from_row": {}}]} + for row in self.rows + ], + } + await self.table.client._gapic_client.mutate_rows(request) + + +@pytest.mark.usefixtures("table") +async def _retrieve_cell_value(table, row_key): + """ + Helper to read an individual row + """ + from google.cloud.bigtable.data import ReadRowsQuery + + row_list = await table.read_rows(ReadRowsQuery(row_keys=row_key)) + assert len(row_list) == 1 + row = row_list[0] + cell = row.cells[0] + return cell.value + + +async def _create_row_and_mutation( + table, temp_rows, *, start_value=b"start", new_value=b"new_value" +): + """ + Helper to create a new row, and a sample set_cell mutation to change its value + """ + from google.cloud.bigtable.data.mutations import SetCell + + row_key = uuid.uuid4().hex.encode() + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row( + row_key, family=family, qualifier=qualifier, value=start_value + ) + # ensure cell is initialized + assert (await _retrieve_cell_value(table, row_key)) == start_value + + mutation = SetCell(family=TEST_FAMILY, qualifier=qualifier, new_value=new_value) + return row_key, mutation + + +@pytest.mark.usefixtures("table") +@pytest_asyncio.fixture(scope="function") +async def temp_rows(table): + builder = TempRowBuilder(table) + yield builder + await builder.delete_rows() + + +@pytest.mark.usefixtures("table") +@pytest.mark.usefixtures("client") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=10) +@pytest.mark.asyncio +async def test_ping_and_warm_gapic(client, table): + """ + Simple ping rpc test + This test ensures channels are able to authenticate with backend + """ + request = {"name": table.instance_name} + await client._gapic_client.ping_and_warm(request) + + +@pytest.mark.usefixtures("table") +@pytest.mark.usefixtures("client") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_ping_and_warm(client, table): + """ + Test ping and warm from handwritten client + """ + try: + channel = client.transport._grpc_channel.pool[0] + except Exception: + # for sync client + channel = client.transport._grpc_channel + results = await client._ping_and_warm_instances(channel) + assert len(results) == 1 + assert results[0] is None + + +@pytest.mark.asyncio +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +async def test_mutation_set_cell(table, temp_rows): + """ + Ensure cells can be set properly + """ + row_key = b"bulk_mutate" + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await _create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + await table.mutate_row(row_key, mutation) + + # ensure cell is updated + assert (await _retrieve_cell_value(table, row_key)) == new_value + + +@pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" +) +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_sample_row_keys(client, table, temp_rows, column_split_config): + """ + Sample keys should return a single sample in small test tables + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + + results = await table.sample_row_keys() + assert len(results) == len(column_split_config) + 1 + # first keys should match the split config + for idx in range(len(column_split_config)): + assert results[idx][0] == column_split_config[idx] + assert isinstance(results[idx][1], int) + # last sample should be empty key + assert results[-1][0] == b"" + assert isinstance(results[-1][1], int) + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_bulk_mutations_set_cell(client, table, temp_rows): + """ + Ensure cells can be set properly + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await _create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + + await table.bulk_mutate_rows([bulk_mutation]) + + # ensure cell is updated + assert (await _retrieve_cell_value(table, row_key)) == new_value + + +@pytest.mark.asyncio +async def test_bulk_mutations_raise_exception(client, table): + """ + If an invalid mutation is passed, an exception should be raised + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + row_key = uuid.uuid4().hex.encode() + mutation = SetCell(family="nonexistent", qualifier=b"test-qualifier", new_value=b"") + bulk_mutation = RowMutationEntry(row_key, [mutation]) + + with pytest.raises(MutationsExceptionGroup) as exc: + await table.bulk_mutate_rows([bulk_mutation]) + assert len(exc.value.exceptions) == 1 + entry_error = exc.value.exceptions[0] + assert isinstance(entry_error, FailedMutationEntryError) + assert entry_error.index == 0 + assert entry_error.entry == bulk_mutation + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_mutations_batcher_context_manager(client, table, temp_rows): + """ + test batcher with context manager. Should flush on exit + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await _create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + row_key2, mutation2 = await _create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + async with table.mutations_batcher() as batcher: + await batcher.append(bulk_mutation) + await batcher.append(bulk_mutation2) + # ensure cell is updated + assert (await _retrieve_cell_value(table, row_key)) == new_value + assert len(batcher._staged_entries) == 0 + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_mutations_batcher_timer_flush(client, table, temp_rows): + """ + batch should occur after flush_interval seconds + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await _create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + flush_interval = 0.1 + async with table.mutations_batcher(flush_interval=flush_interval) as batcher: + await batcher.append(bulk_mutation) + await asyncio.sleep(0) + assert len(batcher._staged_entries) == 1 + await asyncio.sleep(flush_interval + 0.1) + assert len(batcher._staged_entries) == 0 + # ensure cell is updated + assert (await _retrieve_cell_value(table, row_key)) == new_value + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_mutations_batcher_count_flush(client, table, temp_rows): + """ + batch should flush after flush_limit_mutation_count mutations + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await _create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await _create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + async with table.mutations_batcher(flush_limit_mutation_count=2) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + # should be noop; flush not scheduled + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # task should now be scheduled + assert len(batcher._flush_jobs) == 1 + await asyncio.gather(*batcher._flush_jobs) + assert len(batcher._staged_entries) == 0 + assert len(batcher._flush_jobs) == 0 + # ensure cells were updated + assert (await _retrieve_cell_value(table, row_key)) == new_value + assert (await _retrieve_cell_value(table, row_key2)) == new_value2 + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_mutations_batcher_bytes_flush(client, table, temp_rows): + """ + batch should flush after flush_limit_bytes bytes + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await _create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await _create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 + + async with table.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # task should now be scheduled + assert len(batcher._flush_jobs) == 1 + assert len(batcher._staged_entries) == 0 + # let flush complete + await asyncio.gather(*batcher._flush_jobs) + # ensure cells were updated + assert (await _retrieve_cell_value(table, row_key)) == new_value + assert (await _retrieve_cell_value(table, row_key2)) == new_value2 + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_mutations_batcher_no_flush(client, table, temp_rows): + """ + test with no flush requirements met + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + start_value = b"unchanged" + row_key, mutation = await _create_row_and_mutation( + table, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await _create_row_and_mutation( + table, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 + async with table.mutations_batcher( + flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 + ) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # flush not scheduled + assert len(batcher._flush_jobs) == 0 + await asyncio.sleep(0.01) + assert len(batcher._staged_entries) == 2 + assert len(batcher._flush_jobs) == 0 + # ensure cells were not updated + assert (await _retrieve_cell_value(table, row_key)) == start_value + assert (await _retrieve_cell_value(table, row_key2)) == start_value + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@pytest.mark.parametrize( + "start,increment,expected", + [ + (0, 0, 0), + (0, 1, 1), + (0, -1, -1), + (1, 0, 1), + (0, -100, -100), + (0, 3000, 3000), + (10, 4, 14), + (_MAX_INCREMENT_VALUE, -_MAX_INCREMENT_VALUE, 0), + (_MAX_INCREMENT_VALUE, 2, -_MAX_INCREMENT_VALUE), + (-_MAX_INCREMENT_VALUE, -2, _MAX_INCREMENT_VALUE), + ], +) +@pytest.mark.asyncio +async def test_read_modify_write_row_increment( + client, table, temp_rows, start, increment, expected +): + """ + test read_modify_write_row + """ + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) + + rule = IncrementRule(family, qualifier, increment) + result = await table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert int(result[0]) == expected + # ensure that reading from server gives same value + assert (await _retrieve_cell_value(table, row_key)) == result[0].value + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@pytest.mark.parametrize( + "start,append,expected", + [ + (b"", b"", b""), + ("", "", b""), + (b"abc", b"123", b"abc123"), + (b"abc", "123", b"abc123"), + ("", b"1", b"1"), + (b"abc", "", b"abc"), + (b"hello", b"world", b"helloworld"), + ], +) +@pytest.mark.asyncio +async def test_read_modify_write_row_append( + client, table, temp_rows, start, append, expected +): + """ + test read_modify_write_row + """ + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) + + rule = AppendValueRule(family, qualifier, append) + result = await table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert result[0].value == expected + # ensure that reading from server gives same value + assert (await _retrieve_cell_value(table, row_key)) == result[0].value + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_read_modify_write_row_chained(client, table, temp_rows): + """ + test read_modify_write_row with multiple rules + """ + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + start_amount = 1 + increment_amount = 10 + await temp_rows.add_row( + row_key, value=start_amount, family=family, qualifier=qualifier + ) + rule = [ + IncrementRule(family, qualifier, increment_amount), + AppendValueRule(family, qualifier, "hello"), + AppendValueRule(family, qualifier, "world"), + AppendValueRule(family, qualifier, "!"), + ] + result = await table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert result[0].family == family + assert result[0].qualifier == qualifier + # result should be a bytes number string for the IncrementRules, followed by the AppendValueRule values + assert ( + result[0].value + == (start_amount + increment_amount).to_bytes(8, "big", signed=True) + + b"helloworld!" + ) + # ensure that reading from server gives same value + assert (await _retrieve_cell_value(table, row_key)) == result[0].value + + +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@pytest.mark.parametrize( + "start_val,predicate_range,expected_result", + [ + (1, (0, 2), True), + (-1, (0, 2), False), + ], +) +@pytest.mark.asyncio +async def test_check_and_mutate( + client, table, temp_rows, start_val, predicate_range, expected_result +): + """ + test that check_and_mutate_row works applies the right mutations, and returns the right result + """ + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + + await temp_rows.add_row( + row_key, value=start_val, family=family, qualifier=qualifier + ) + + false_mutation_value = b"false-mutation-value" + false_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=false_mutation_value + ) + true_mutation_value = b"true-mutation-value" + true_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value + ) + predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) + result = await table.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + assert result == expected_result + # ensure cell is updated + expected_value = true_mutation_value if expected_result else false_mutation_value + assert (await _retrieve_cell_value(table, row_key)) == expected_value + + +@pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", +) +@pytest.mark.usefixtures("client") +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_check_and_mutate_empty_request(client, table): + """ + check_and_mutate with no true or fale mutations should raise an error + """ + from google.api_core import exceptions + + with pytest.raises(exceptions.InvalidArgument) as e: + await table.check_and_mutate_row( + b"row_key", None, true_case_mutations=None, false_case_mutations=None + ) + assert "No mutations provided" in str(e.value) + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows_stream(table, temp_rows): + """ + Ensure that the read_rows_stream method works + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + + # full table scan + generator = await table.read_rows_stream({}) + first_row = await generator.__anext__() + second_row = await generator.__anext__() + assert first_row.row_key == b"row_key_1" + assert second_row.row_key == b"row_key_2" + with pytest.raises(StopAsyncIteration): + await generator.__anext__() + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows(table, temp_rows): + """ + Ensure that the read_rows method works + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + # full table scan + row_list = await table.read_rows({}) + assert len(row_list) == 2 + assert row_list[0].row_key == b"row_key_1" + assert row_list[1].row_key == b"row_key_2" + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows_sharded_simple(table, temp_rows): + """ + Test read rows sharded with two queries + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) + row_list = await table.read_rows_sharded([query1, query2]) + assert len(row_list) == 4 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"b" + assert row_list[3].row_key == b"d" + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows_sharded_from_sample(table, temp_rows): + """ + Test end-to-end sharding + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) + shard_queries = query.shard(table_shard_keys) + row_list = await table.read_rows_sharded(shard_queries) + assert len(row_list) == 3 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"d" + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows_sharded_filters_limits(table, temp_rows): + """ + Test read rows sharded with filters and limits + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + + label_filter1 = ApplyLabelFilter("first") + label_filter2 = ApplyLabelFilter("second") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) + row_list = await table.read_rows_sharded([query1, query2]) + assert len(row_list) == 3 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"b" + assert row_list[2].row_key == b"d" + assert row_list[0][0].labels == ["first"] + assert row_list[1][0].labels == ["second"] + assert row_list[2][0].labels == ["second"] + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows_range_query(table, temp_rows): + """ + Ensure that the read_rows method works + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # full table scan + query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) + row_list = await table.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows_single_key_query(table, temp_rows): + """ + Ensure that the read_rows method works with specified query + """ + from google.cloud.bigtable.data import ReadRowsQuery + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # retrieve specific keys + query = ReadRowsQuery(row_keys=[b"a", b"c"]) + row_list = await table.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.asyncio +async def test_read_rows_with_filter(table, temp_rows): + """ + ensure filters are applied + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # retrieve keys with filter + expected_label = "test-label" + row_filter = ApplyLabelFilter(expected_label) + query = ReadRowsQuery(row_filter=row_filter) + row_list = await table.read_rows(query) + assert len(row_list) == 4 + for row in row_list: + assert row[0].labels == [expected_label] + + +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_read_rows_stream_close(table, temp_rows): + """ + Ensure that the read_rows_stream can be closed + """ + from google.cloud.bigtable.data import ReadRowsQuery + + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + # full table scan + query = ReadRowsQuery() + generator = await table.read_rows_stream(query) + # grab first row + first_row = await generator.__anext__() + assert first_row.row_key == b"row_key_1" + # close stream early + await generator.aclose() + with pytest.raises(StopAsyncIteration): + await generator.__anext__() + + +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_read_row(table, temp_rows): + """ + Test read_row (single row helper) + """ + from google.cloud.bigtable.data import Row + + await temp_rows.add_row(b"row_key_1", value=b"value") + row = await table.read_row(b"row_key_1") + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + + +@pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", +) +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_read_row_missing(table): + """ + Test read_row when row does not exist + """ + from google.api_core import exceptions + + row_key = "row_key_not_exist" + result = await table.read_row(row_key) + assert result is None + with pytest.raises(exceptions.InvalidArgument) as e: + await table.read_row("") + assert "Row keys must be non-empty" in str(e) + + +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_read_row_w_filter(table, temp_rows): + """ + Test read_row (single row helper) + """ + from google.cloud.bigtable.data import Row + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"row_key_1", value=b"value") + expected_label = "test-label" + label_filter = ApplyLabelFilter(expected_label) + row = await table.read_row(b"row_key_1", row_filter=label_filter) + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + assert row.cells[0].labels == [expected_label] + + +@pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", +) +@pytest.mark.usefixtures("table") +@pytest.mark.asyncio +async def test_row_exists(table, temp_rows): + from google.api_core import exceptions + + """Test row_exists with rows that exist and don't exist""" + assert await table.row_exists(b"row_key_1") is False + await temp_rows.add_row(b"row_key_1") + assert await table.row_exists(b"row_key_1") is True + assert await table.row_exists("row_key_1") is True + assert await table.row_exists(b"row_key_2") is False + assert await table.row_exists("row_key_2") is False + assert await table.row_exists("3") is False + await temp_rows.add_row(b"3") + assert await table.row_exists(b"3") is True + with pytest.raises(exceptions.InvalidArgument) as e: + await table.row_exists("") + assert "Row keys must be non-empty" in str(e) + + +@pytest.mark.usefixtures("table") +@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) +@pytest.mark.parametrize( + "cell_value,filter_input,expect_match", + [ + (b"abc", b"abc", True), + (b"abc", "abc", True), + (b".", ".", True), + (".*", ".*", True), + (".*", b".*", True), + ("a", ".*", False), + (b".*", b".*", True), + (r"\a", r"\a", True), + (b"\xe2\x98\x83", "☃", True), + ("☃", "☃", True), + (r"\C☃", r"\C☃", True), + (1, 1, True), + (2, 1, False), + (68, 68, True), + ("D", 68, False), + (68, "D", False), + (-1, -1, True), + (2852126720, 2852126720, True), + (-1431655766, -1431655766, True), + (-1431655766, -1, False), + ], +) +@pytest.mark.asyncio +async def test_literal_value_filter( + table, temp_rows, cell_value, filter_input, expect_match +): + """ + Literal value filter does complex escaping on re2 strings. + Make sure inputs are properly interpreted by the server + """ + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable.data import ReadRowsQuery + + f = LiteralValueFilter(filter_input) + await temp_rows.add_row(b"row_key_1", value=cell_value) + query = ReadRowsQuery(row_filter=f) + row_list = await table.read_rows(query) + assert len(row_list) == bool( + expect_match + ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" diff --git a/packages/google-cloud-bigtable/tests/system/v2_client/__init__.py b/packages/google-cloud-bigtable/tests/system/v2_client/__init__.py new file mode 100644 index 000000000000..4de65971c238 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/v2_client/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/system/_helpers.py b/packages/google-cloud-bigtable/tests/system/v2_client/_helpers.py similarity index 100% rename from packages/google-cloud-bigtable/tests/system/_helpers.py rename to packages/google-cloud-bigtable/tests/system/v2_client/_helpers.py diff --git a/packages/google-cloud-bigtable/tests/system/v2_client/conftest.py b/packages/google-cloud-bigtable/tests/system/v2_client/conftest.py new file mode 100644 index 000000000000..f39fcba88962 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/v2_client/conftest.py @@ -0,0 +1,209 @@ +# Copyright 2011 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest +from test_utils.system import unique_resource_id + +from google.cloud.bigtable.client import Client +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from . import _helpers + + +@pytest.fixture(scope="session") +def in_emulator(): + return os.getenv(BIGTABLE_EMULATOR) is not None + + +@pytest.fixture(scope="session") +def kms_key_name(): + return os.getenv("KMS_KEY_NAME") + + +@pytest.fixture(scope="session") +def with_kms_key_name(kms_key_name): + if kms_key_name is None: + pytest.skip("Test requires KMS_KEY_NAME environment variable") + return kms_key_name + + +@pytest.fixture(scope="session") +def skip_on_emulator(in_emulator): + if in_emulator: + pytest.skip("Emulator does not support this feature") + + +@pytest.fixture(scope="session") +def unique_suffix(): + return unique_resource_id("-") + + +@pytest.fixture(scope="session") +def location_id(): + return "us-central1-c" + + +@pytest.fixture(scope="session") +def serve_nodes(): + return 3 + + +@pytest.fixture(scope="session") +def label_key(): + return "python-system" + + +@pytest.fixture(scope="session") +def instance_labels(label_key): + return {label_key: _helpers.label_stamp()} + + +@pytest.fixture(scope="session") +def admin_client(): + return Client(admin=True) + + +@pytest.fixture(scope="session") +def service_account(admin_client): + from google.oauth2.service_account import Credentials + + if not isinstance(admin_client._credentials, Credentials): + pytest.skip("These tests require a service account credential") + return admin_client._credentials + + +@pytest.fixture(scope="session") +def admin_instance_id(unique_suffix): + return f"g-c-p{unique_suffix}" + + +@pytest.fixture(scope="session") +def admin_cluster_id(admin_instance_id): + return f"{admin_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def admin_instance(admin_client, admin_instance_id, instance_labels): + return admin_client.instance(admin_instance_id, labels=instance_labels) + + +@pytest.fixture(scope="session") +def admin_cluster(admin_instance, admin_cluster_id, location_id, serve_nodes): + return admin_instance.cluster( + admin_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + ) + + +@pytest.fixture(scope="session") +def admin_cluster_with_autoscaling( + admin_instance, + admin_cluster_id, + location_id, + min_serve_nodes, + max_serve_nodes, + cpu_utilization_percent, +): + return admin_instance.cluster( + admin_cluster_id, + location_id=location_id, + min_serve_nodes=min_serve_nodes, + max_serve_nodes=max_serve_nodes, + cpu_utilization_percent=cpu_utilization_percent, + ) + + +@pytest.fixture(scope="session") +def admin_instance_populated(admin_instance, admin_cluster, in_emulator): + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator + if not in_emulator: + operation = admin_instance.create(clusters=[admin_cluster]) + operation.result(timeout=240) + + yield admin_instance + + if not in_emulator: + _helpers.retry_429(admin_instance.delete)() + + +@pytest.fixture(scope="session") +def data_client(): + return Client(admin=False) + + +@pytest.fixture(scope="session") +def data_instance_id(unique_suffix): + return f"g-c-p-d{unique_suffix}" + + +@pytest.fixture(scope="session") +def data_cluster_id(data_instance_id): + return f"{data_instance_id}-cluster" + + +@pytest.fixture(scope="session") +def data_instance_populated( + admin_client, + data_instance_id, + instance_labels, + data_cluster_id, + location_id, + serve_nodes, + in_emulator, +): + instance = admin_client.instance(data_instance_id, labels=instance_labels) + # Emulator does not support instance admin operations (create / delete). + # See: https://cloud.google.com/bigtable/docs/emulator + if not in_emulator: + cluster = instance.cluster( + data_cluster_id, + location_id=location_id, + serve_nodes=serve_nodes, + ) + operation = instance.create(clusters=[cluster]) + operation.result(timeout=240) + + yield instance + + if not in_emulator: + _helpers.retry_429(instance.delete)() + + +@pytest.fixture(scope="function") +def instances_to_delete(): + instances_to_delete = [] + + yield instances_to_delete + + for instance in instances_to_delete: + _helpers.retry_429(instance.delete)() + + +@pytest.fixture(scope="session") +def min_serve_nodes(in_emulator): + return 1 + + +@pytest.fixture(scope="session") +def max_serve_nodes(in_emulator): + return 8 + + +@pytest.fixture(scope="session") +def cpu_utilization_percent(in_emulator): + return 10 diff --git a/packages/google-cloud-bigtable/tests/system/test_data_api.py b/packages/google-cloud-bigtable/tests/system/v2_client/test_data_api.py similarity index 100% rename from packages/google-cloud-bigtable/tests/system/test_data_api.py rename to packages/google-cloud-bigtable/tests/system/v2_client/test_data_api.py diff --git a/packages/google-cloud-bigtable/tests/system/test_instance_admin.py b/packages/google-cloud-bigtable/tests/system/v2_client/test_instance_admin.py similarity index 100% rename from packages/google-cloud-bigtable/tests/system/test_instance_admin.py rename to packages/google-cloud-bigtable/tests/system/v2_client/test_instance_admin.py diff --git a/packages/google-cloud-bigtable/tests/system/test_table_admin.py b/packages/google-cloud-bigtable/tests/system/v2_client/test_table_admin.py similarity index 100% rename from packages/google-cloud-bigtable/tests/system/test_table_admin.py rename to packages/google-cloud-bigtable/tests/system/v2_client/test_table_admin.py diff --git a/packages/google-cloud-bigtable/tests/unit/data/__init__.py b/packages/google-cloud-bigtable/tests/unit/data/__init__.py new file mode 100644 index 000000000000..89a37dc92c5a --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py new file mode 100644 index 000000000000..e03028c45257 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py @@ -0,0 +1,378 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from google.cloud.bigtable_v2.types import MutateRowsResponse +from google.rpc import status_pb2 +import google.api_core.exceptions as core_exceptions + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore +except ImportError: # pragma: NO COVER + import mock # type: ignore + from mock import AsyncMock # type: ignore + + +def _make_mutation(count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation + + +class TestMutateRowsOperation: + def _target_class(self): + from google.cloud.bigtable.data._async._mutate_rows import ( + _MutateRowsOperationAsync, + ) + + return _MutateRowsOperationAsync + + def _make_one(self, *args, **kwargs): + if not args: + kwargs["gapic_client"] = kwargs.pop("gapic_client", mock.Mock()) + kwargs["table"] = kwargs.pop("table", AsyncMock()) + kwargs["operation_timeout"] = kwargs.pop("operation_timeout", 5) + kwargs["attempt_timeout"] = kwargs.pop("attempt_timeout", 0.1) + kwargs["retryable_exceptions"] = kwargs.pop("retryable_exceptions", ()) + kwargs["mutation_entries"] = kwargs.pop("mutation_entries", []) + return self._target_class()(*args, **kwargs) + + async def _mock_stream(self, mutation_list, error_dict): + for idx, entry in enumerate(mutation_list): + code = error_dict.get(idx, 0) + yield MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=idx, status=status_pb2.Status(code=code) + ) + ] + ) + + def _make_mock_gapic(self, mutation_list, error_dict=None): + mock_fn = AsyncMock() + if error_dict is None: + error_dict = {} + mock_fn.side_effect = lambda *args, **kwargs: self._mock_stream( + mutation_list, error_dict + ) + return mock_fn + + def test_ctor(self): + """ + test that constructor sets all the attributes correctly + """ + from google.cloud.bigtable.data._async._mutate_rows import _EntryWithProto + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import Aborted + + client = mock.Mock() + table = mock.Mock() + entries = [_make_mutation(), _make_mutation()] + operation_timeout = 0.05 + attempt_timeout = 0.01 + retryable_exceptions = () + instance = self._make_one( + client, + table, + entries, + operation_timeout, + attempt_timeout, + retryable_exceptions, + ) + # running gapic_fn should trigger a client call + assert client.mutate_rows.call_count == 0 + instance._gapic_fn() + assert client.mutate_rows.call_count == 1 + # gapic_fn should call with table details + inner_kwargs = client.mutate_rows.call_args[1] + assert len(inner_kwargs) == 4 + assert inner_kwargs["table_name"] == table.table_name + assert inner_kwargs["app_profile_id"] == table.app_profile_id + assert inner_kwargs["retry"] is None + metadata = inner_kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert str(table.table_name) in metadata[0][1] + assert str(table.app_profile_id) in metadata[0][1] + # entries should be passed down + entries_w_pb = [_EntryWithProto(e, e._to_pb()) for e in entries] + assert instance.mutations == entries_w_pb + # timeout_gen should generate per-attempt timeout + assert next(instance.timeout_generator) == attempt_timeout + # ensure predicate is set + assert instance.is_retryable is not None + assert instance.is_retryable(DeadlineExceeded("")) is False + assert instance.is_retryable(Aborted("")) is False + assert instance.is_retryable(_MutateRowsIncomplete("")) is True + assert instance.is_retryable(RuntimeError("")) is False + assert instance.remaining_indices == list(range(len(entries))) + assert instance.errors == {} + + def test_ctor_too_many_entries(self): + """ + should raise an error if an operation is created with more than 100,000 entries + """ + from google.cloud.bigtable.data._async._mutate_rows import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, + ) + + assert _MUTATE_ROWS_REQUEST_MUTATION_LIMIT == 100_000 + + client = mock.Mock() + table = mock.Mock() + entries = [_make_mutation()] * _MUTATE_ROWS_REQUEST_MUTATION_LIMIT + operation_timeout = 0.05 + attempt_timeout = 0.01 + # no errors if at limit + self._make_one(client, table, entries, operation_timeout, attempt_timeout) + # raise error after crossing + with pytest.raises(ValueError) as e: + self._make_one( + client, + table, + entries + [_make_mutation()], + operation_timeout, + attempt_timeout, + ) + assert "mutate_rows requests can contain at most 100000 mutations" in str( + e.value + ) + assert "Found 100001" in str(e.value) + + @pytest.mark.asyncio + async def test_mutate_rows_operation(self): + """ + Test successful case of mutate_rows_operation + """ + client = mock.Mock() + table = mock.Mock() + entries = [_make_mutation(), _make_mutation()] + operation_timeout = 0.05 + cls = self._target_class() + with mock.patch( + f"{cls.__module__}.{cls.__name__}._run_attempt", AsyncMock() + ) as attempt_mock: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance.start() + assert attempt_mock.call_count == 1 + + @pytest.mark.parametrize( + "exc_type", [RuntimeError, ZeroDivisionError, core_exceptions.Forbidden] + ) + @pytest.mark.asyncio + async def test_mutate_rows_attempt_exception(self, exc_type): + """ + exceptions raised from attempt should be raised in MutationsExceptionGroup + """ + client = AsyncMock() + table = mock.Mock() + entries = [_make_mutation(), _make_mutation()] + operation_timeout = 0.05 + expected_exception = exc_type("test") + client.mutate_rows.side_effect = expected_exception + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance._run_attempt() + except Exception as e: + found_exc = e + assert client.mutate_rows.call_count == 1 + assert type(found_exc) is exc_type + assert found_exc == expected_exception + assert len(instance.errors) == 2 + assert len(instance.remaining_indices) == 0 + + @pytest.mark.parametrize( + "exc_type", [RuntimeError, ZeroDivisionError, core_exceptions.Forbidden] + ) + @pytest.mark.asyncio + async def test_mutate_rows_exception(self, exc_type): + """ + exceptions raised from retryable should be raised in MutationsExceptionGroup + """ + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + client = mock.Mock() + table = mock.Mock() + entries = [_make_mutation(), _make_mutation()] + operation_timeout = 0.05 + expected_cause = exc_type("abort") + with mock.patch.object( + self._target_class(), + "_run_attempt", + AsyncMock(), + ) as attempt_mock: + attempt_mock.side_effect = expected_cause + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count == 1 + assert len(found_exc.exceptions) == 2 + assert isinstance(found_exc.exceptions[0], FailedMutationEntryError) + assert isinstance(found_exc.exceptions[1], FailedMutationEntryError) + assert found_exc.exceptions[0].__cause__ == expected_cause + assert found_exc.exceptions[1].__cause__ == expected_cause + + @pytest.mark.parametrize( + "exc_type", + [core_exceptions.DeadlineExceeded, RuntimeError], + ) + @pytest.mark.asyncio + async def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): + """ + If an exception fails but eventually passes, it should not raise an exception + """ + from google.cloud.bigtable.data._async._mutate_rows import ( + _MutateRowsOperationAsync, + ) + + client = mock.Mock() + table = mock.Mock() + entries = [_make_mutation()] + operation_timeout = 1 + expected_cause = exc_type("retry") + num_retries = 2 + with mock.patch.object( + _MutateRowsOperationAsync, + "_run_attempt", + AsyncMock(), + ) as attempt_mock: + attempt_mock.side_effect = [expected_cause] * num_retries + [None] + instance = self._make_one( + client, + table, + entries, + operation_timeout, + operation_timeout, + retryable_exceptions=(exc_type,), + ) + await instance.start() + assert attempt_mock.call_count == num_retries + 1 + + @pytest.mark.asyncio + async def test_mutate_rows_incomplete_ignored(self): + """ + MutateRowsIncomplete exceptions should not be added to error list + """ + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + client = mock.Mock() + table = mock.Mock() + entries = [_make_mutation()] + operation_timeout = 0.05 + with mock.patch.object( + self._target_class(), + "_run_attempt", + AsyncMock(), + ) as attempt_mock: + attempt_mock.side_effect = _MutateRowsIncomplete("ignored") + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + await instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count > 0 + assert len(found_exc.exceptions) == 1 + assert isinstance(found_exc.exceptions[0].__cause__, DeadlineExceeded) + + @pytest.mark.asyncio + async def test_run_attempt_single_entry_success(self): + """Test mutating a single entry""" + mutation = _make_mutation() + expected_timeout = 1.3 + mock_gapic_fn = self._make_mock_gapic({0: mutation}) + instance = self._make_one( + mutation_entries=[mutation], + attempt_timeout=expected_timeout, + ) + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + await instance._run_attempt() + assert len(instance.remaining_indices) == 0 + assert mock_gapic_fn.call_count == 1 + _, kwargs = mock_gapic_fn.call_args + assert kwargs["timeout"] == expected_timeout + assert kwargs["entries"] == [mutation._to_pb()] + + @pytest.mark.asyncio + async def test_run_attempt_empty_request(self): + """Calling with no mutations should result in no API calls""" + mock_gapic_fn = self._make_mock_gapic([]) + instance = self._make_one( + mutation_entries=[], + ) + await instance._run_attempt() + assert mock_gapic_fn.call_count == 0 + + @pytest.mark.asyncio + async def test_run_attempt_partial_success_retryable(self): + """Some entries succeed, but one fails. Should report the proper index, and raise incomplete exception""" + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + + success_mutation = _make_mutation() + success_mutation_2 = _make_mutation() + failure_mutation = _make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one( + mutation_entries=mutations, + ) + instance.is_retryable = lambda x: True + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + with pytest.raises(_MutateRowsIncomplete): + await instance._run_attempt() + assert instance.remaining_indices == [1] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors + + @pytest.mark.asyncio + async def test_run_attempt_partial_success_non_retryable(self): + """Some entries succeed, but one fails. Exception marked as non-retryable. Do not raise incomplete error""" + success_mutation = _make_mutation() + success_mutation_2 = _make_mutation() + failure_mutation = _make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one( + mutation_entries=mutations, + ) + instance.is_retryable = lambda x: False + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + await instance._run_attempt() + assert instance.remaining_indices == [] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py new file mode 100644 index 000000000000..4e7797c6d7c2 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py @@ -0,0 +1,391 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore +except ImportError: # pragma: NO COVER + import mock # type: ignore + from mock import AsyncMock # type: ignore # noqa F401 + +TEST_FAMILY = "family_name" +TEST_QUALIFIER = b"qualifier" +TEST_TIMESTAMP = 123456789 +TEST_LABELS = ["label1", "label2"] + + +class TestReadRowsOperation: + """ + Tests helper functions in the ReadRowsOperation class + in-depth merging logic in merge_row_response_stream and _read_rows_retryable_attempt + is tested in test_read_rows_acceptance test_client_read_rows, and conformance tests + """ + + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + + return _ReadRowsOperationAsync + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + from google.cloud.bigtable.data import ReadRowsQuery + + row_limit = 91 + query = ReadRowsQuery(limit=row_limit) + client = mock.Mock() + client.read_rows = mock.Mock() + client.read_rows.return_value = None + table = mock.Mock() + table._client = client + table.table_name = "test_table" + table.app_profile_id = "test_profile" + expected_operation_timeout = 42 + expected_request_timeout = 44 + time_gen_mock = mock.Mock() + with mock.patch( + "google.cloud.bigtable.data._async._read_rows._attempt_timeout_generator", + time_gen_mock, + ): + instance = self._make_one( + query, + table, + operation_timeout=expected_operation_timeout, + attempt_timeout=expected_request_timeout, + ) + assert time_gen_mock.call_count == 1 + time_gen_mock.assert_called_once_with( + expected_request_timeout, expected_operation_timeout + ) + assert instance._last_yielded_row_key is None + assert instance._remaining_count == row_limit + assert instance.operation_timeout == expected_operation_timeout + assert client.read_rows.call_count == 0 + assert instance._metadata == [ + ( + "x-goog-request-params", + "table_name=test_table&app_profile_id=test_profile", + ) + ] + assert instance.request.table_name == table.table_name + assert instance.request.app_profile_id == table.app_profile_id + assert instance.request.rows_limit == row_limit + + @pytest.mark.parametrize( + "in_keys,last_key,expected", + [ + (["b", "c", "d"], "a", ["b", "c", "d"]), + (["a", "b", "c"], "b", ["c"]), + (["a", "b", "c"], "c", []), + (["a", "b", "c"], "d", []), + (["d", "c", "b", "a"], "b", ["d", "c"]), + ], + ) + def test_revise_request_rowset_keys(self, in_keys, last_key, expected): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + in_keys = [key.encode("utf-8") for key in in_keys] + expected = [key.encode("utf-8") for key in expected] + last_key = last_key.encode("utf-8") + + sample_range = RowRangePB(start_key_open=last_key) + row_set = RowSetPB(row_keys=in_keys, row_ranges=[sample_range]) + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == expected + assert revised.row_ranges == [sample_range] + + @pytest.mark.parametrize( + "in_ranges,last_key,expected", + [ + ( + [{"start_key_open": "b", "end_key_closed": "d"}], + "a", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "a", + [{"start_key_closed": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_open": "a", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "a", "end_key_open": "d"}], + "b", + [{"start_key_open": "b", "end_key_open": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_open": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "e", []), + ([{"start_key_closed": "b"}], "z", [{"start_key_open": "z"}]), + ([{"start_key_closed": "b"}], "a", [{"start_key_closed": "b"}]), + ( + [{"end_key_closed": "z"}], + "a", + [{"start_key_open": "a", "end_key_closed": "z"}], + ), + ( + [{"end_key_open": "z"}], + "a", + [{"start_key_open": "a", "end_key_open": "z"}], + ), + ], + ) + def test_revise_request_rowset_ranges(self, in_ranges, last_key, expected): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + # convert to protobuf + next_key = (last_key + "a").encode("utf-8") + last_key = last_key.encode("utf-8") + in_ranges = [ + RowRangePB(**{k: v.encode("utf-8") for k, v in r.items()}) + for r in in_ranges + ] + expected = [ + RowRangePB(**{k: v.encode("utf-8") for k, v in r.items()}) for r in expected + ] + + row_set = RowSetPB(row_ranges=in_ranges, row_keys=[next_key]) + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == [next_key] + assert revised.row_ranges == expected + + @pytest.mark.parametrize("last_key", ["a", "b", "c"]) + def test_revise_request_full_table(self, last_key): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + # convert to protobuf + last_key = last_key.encode("utf-8") + row_set = RowSetPB() + for selected_set in [row_set, None]: + revised = self._get_target_class()._revise_request_rowset( + selected_set, last_key + ) + assert revised.row_keys == [] + assert len(revised.row_ranges) == 1 + assert revised.row_ranges[0] == RowRangePB(start_key_open=last_key) + + def test_revise_to_empty_rowset(self): + """revising to an empty rowset should raise error""" + from google.cloud.bigtable.data.exceptions import _RowSetComplete + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + row_keys = [b"a", b"b", b"c"] + row_range = RowRangePB(end_key_open=b"c") + row_set = RowSetPB(row_keys=row_keys, row_ranges=[row_range]) + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, b"d") + + @pytest.mark.parametrize( + "start_limit,emit_num,expected_limit", + [ + (10, 0, 10), + (10, 1, 9), + (10, 10, 0), + (None, 10, None), + (None, 0, None), + (4, 2, 2), + ], + ) + @pytest.mark.asyncio + async def test_revise_limit(self, start_limit, emit_num, expected_limit): + """ + revise_limit should revise the request's limit field + - if limit is 0 (unlimited), it should never be revised + - if start_limit-emit_num == 0, the request should end early + - if the number emitted exceeds the new limit, an exception should + should be raised (tested in test_revise_limit_over_limit) + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + + async def awaitable_stream(): + async def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table.table_name = "table_name" + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + # read emit_num rows + async for val in instance.chunk_stream(awaitable_stream()): + pass + assert instance._remaining_count == expected_limit + + @pytest.mark.parametrize("start_limit,emit_num", [(5, 10), (3, 9), (1, 10)]) + @pytest.mark.asyncio + async def test_revise_limit_over_limit(self, start_limit, emit_num): + """ + Should raise runtime error if we get in state where emit_num > start_num + (unless start_num == 0, which represents unlimited) + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + from google.cloud.bigtable.data.exceptions import InvalidChunk + + async def awaitable_stream(): + async def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table.table_name = "table_name" + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + with pytest.raises(InvalidChunk) as e: + # read emit_num rows + async for val in instance.chunk_stream(awaitable_stream()): + pass + assert "emit count exceeds row limit" in str(e.value) + + @pytest.mark.asyncio + async def test_aclose(self): + """ + should be able to close a stream safely with aclose. + Closed generators should raise StopAsyncIteration on next yield + """ + + async def mock_stream(): + while True: + yield 1 + + with mock.patch.object( + _ReadRowsOperationAsync, "_read_rows_attempt" + ) as mock_attempt: + instance = self._make_one(mock.Mock(), mock.Mock(), 1, 1) + wrapped_gen = mock_stream() + mock_attempt.return_value = wrapped_gen + gen = instance.start_operation() + # read one row + await gen.__anext__() + await gen.aclose() + with pytest.raises(StopAsyncIteration): + await gen.__anext__() + # try calling a second time + await gen.aclose() + # ensure close was propagated to wrapped generator + with pytest.raises(StopAsyncIteration): + await wrapped_gen.__anext__() + + @pytest.mark.asyncio + async def test_retryable_ignore_repeated_rows(self): + """ + Duplicate rows should cause an invalid chunk error + """ + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import ReadRowsResponse + + row_key = b"duplicate" + + async def mock_awaitable_stream(): + async def mock_stream(): + while True: + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + + return mock_stream() + + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + stream = _ReadRowsOperationAsync.chunk_stream(instance, mock_awaitable_stream()) + await stream.__anext__() + with pytest.raises(InvalidChunk) as exc: + await stream.__anext__() + assert "row keys should be strictly increasing" in str(exc.value) + + +class MockStream(_ReadRowsOperationAsync): + """ + Mock a _ReadRowsOperationAsync stream for testing + """ + + def __init__(self, items=None, errors=None, operation_timeout=None): + self.transient_errors = errors + self.operation_timeout = operation_timeout + self.next_idx = 0 + if items is None: + items = list(range(10)) + self.items = items + + def __aiter__(self): + return self + + async def __anext__(self): + if self.next_idx >= len(self.items): + raise StopAsyncIteration + item = self.items[self.next_idx] + self.next_idx += 1 + if isinstance(item, Exception): + raise item + return item + + async def aclose(self): + pass diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py new file mode 100644 index 000000000000..a0019947dc83 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -0,0 +1,2957 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import grpc +import asyncio +import re +import sys + +import pytest + +from google.cloud.bigtable.data import mutations +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_v2.types import ReadRowsResponse +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data import TABLE_DEFAULT + +from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule +from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore +except ImportError: # pragma: NO COVER + import mock # type: ignore + from mock import AsyncMock # type: ignore + +VENEER_HEADER_REGEX = re.compile( + r"gapic\/[0-9]+\.[\w.-]+ gax\/[0-9]+\.[\w.-]+ gccl\/[0-9]+\.[\w.-]+-data-async gl-python\/[0-9]+\.[\w.-]+ grpc\/[0-9]+\.[\w.-]+" +) + + +def _make_client(*args, use_emulator=True, **kwargs): + import os + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync + + env_mask = {} + # by default, use emulator mode to avoid auth issues in CI + # emulator mode must be disabled by tests that check channel pooling/refresh background tasks + if use_emulator: + env_mask["BIGTABLE_EMULATOR_HOST"] = "localhost" + else: + # set some default values + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + kwargs["project"] = kwargs.get("project", "project-id") + with mock.patch.dict(os.environ, env_mask): + return BigtableDataClientAsync(*args, **kwargs) + + +class TestBigtableDataClientAsync: + def _get_target_class(self): + from google.cloud.bigtable.data._async.client import BigtableDataClientAsync + + return BigtableDataClientAsync + + def _make_one(self, *args, **kwargs): + return _make_client(*args, **kwargs) + + @pytest.mark.asyncio + async def test_ctor(self): + expected_project = "project-id" + expected_pool_size = 11 + expected_credentials = AnonymousCredentials() + client = self._make_one( + project="project-id", + pool_size=expected_pool_size, + credentials=expected_credentials, + use_emulator=False, + ) + await asyncio.sleep(0) + assert client.project == expected_project + assert len(client.transport._grpc_channel._pool) == expected_pool_size + assert not client._active_instances + assert len(client._channel_refresh_tasks) == expected_pool_size + assert client.transport._credentials == expected_credentials + await client.close() + + @pytest.mark.asyncio + async def test_ctor_super_inits(self): + from google.cloud.bigtable_v2.services.bigtable.async_client import ( + BigtableAsyncClient, + ) + from google.cloud.client import ClientWithProject + from google.api_core import client_options as client_options_lib + + project = "project-id" + pool_size = 11 + credentials = AnonymousCredentials() + client_options = {"api_endpoint": "foo.bar:1234"} + options_parsed = client_options_lib.from_dict(client_options) + transport_str = f"pooled_grpc_asyncio_{pool_size}" + with mock.patch.object(BigtableAsyncClient, "__init__") as bigtable_client_init: + bigtable_client_init.return_value = None + with mock.patch.object( + ClientWithProject, "__init__" + ) as client_project_init: + client_project_init.return_value = None + try: + self._make_one( + project=project, + pool_size=pool_size, + credentials=credentials, + client_options=options_parsed, + use_emulator=False, + ) + except AttributeError: + pass + # test gapic superclass init was called + assert bigtable_client_init.call_count == 1 + kwargs = bigtable_client_init.call_args[1] + assert kwargs["transport"] == transport_str + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + # test mixin superclass init was called + assert client_project_init.call_count == 1 + kwargs = client_project_init.call_args[1] + assert kwargs["project"] == project + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + + @pytest.mark.asyncio + async def test_ctor_dict_options(self): + from google.cloud.bigtable_v2.services.bigtable.async_client import ( + BigtableAsyncClient, + ) + from google.api_core.client_options import ClientOptions + + client_options = {"api_endpoint": "foo.bar:1234"} + with mock.patch.object(BigtableAsyncClient, "__init__") as bigtable_client_init: + try: + self._make_one(client_options=client_options) + except TypeError: + pass + bigtable_client_init.assert_called_once() + kwargs = bigtable_client_init.call_args[1] + called_options = kwargs["client_options"] + assert called_options.api_endpoint == "foo.bar:1234" + assert isinstance(called_options, ClientOptions) + with mock.patch.object( + self._get_target_class(), "_start_background_channel_refresh" + ) as start_background_refresh: + client = self._make_one(client_options=client_options, use_emulator=False) + start_background_refresh.assert_called_once() + await client.close() + + @pytest.mark.asyncio + async def test_veneer_grpc_headers(self): + # client_info should be populated with headers to + # detect as a veneer client + patch = mock.patch("google.api_core.gapic_v1.method_async.wrap_method") + with patch as gapic_mock: + client = self._make_one(project="project-id") + wrapped_call_list = gapic_mock.call_args_list + assert len(wrapped_call_list) > 0 + # each wrapped call should have veneer headers + for call in wrapped_call_list: + client_info = call.kwargs["client_info"] + assert client_info is not None, f"{call} has no client_info" + wrapped_user_agent_sorted = " ".join( + sorted(client_info.to_user_agent().split(" ")) + ) + assert VENEER_HEADER_REGEX.match( + wrapped_user_agent_sorted + ), f"'{wrapped_user_agent_sorted}' does not match {VENEER_HEADER_REGEX}" + await client.close() + + @pytest.mark.asyncio + async def test_channel_pool_creation(self): + pool_size = 14 + with mock.patch( + "google.api_core.grpc_helpers_async.create_channel" + ) as create_channel: + create_channel.return_value = AsyncMock() + client = self._make_one(project="project-id", pool_size=pool_size) + assert create_channel.call_count == pool_size + await client.close() + # channels should be unique + client = self._make_one(project="project-id", pool_size=pool_size) + pool_list = list(client.transport._grpc_channel._pool) + pool_set = set(client.transport._grpc_channel._pool) + assert len(pool_list) == len(pool_set) + await client.close() + + @pytest.mark.asyncio + async def test_channel_pool_rotation(self): + from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( + PooledChannel, + ) + + pool_size = 7 + + with mock.patch.object(PooledChannel, "next_channel") as next_channel: + client = self._make_one(project="project-id", pool_size=pool_size) + assert len(client.transport._grpc_channel._pool) == pool_size + next_channel.reset_mock() + with mock.patch.object( + type(client.transport._grpc_channel._pool[0]), "unary_unary" + ) as unary_unary: + # calling an rpc `pool_size` times should use a different channel each time + channel_next = None + for i in range(pool_size): + channel_last = channel_next + channel_next = client.transport.grpc_channel._pool[i] + assert channel_last != channel_next + next_channel.return_value = channel_next + client.transport.ping_and_warm() + assert next_channel.call_count == i + 1 + unary_unary.assert_called_once() + unary_unary.reset_mock() + await client.close() + + @pytest.mark.asyncio + async def test_channel_pool_replace(self): + with mock.patch.object(asyncio, "sleep"): + pool_size = 7 + client = self._make_one(project="project-id", pool_size=pool_size) + for replace_idx in range(pool_size): + start_pool = [ + channel for channel in client.transport._grpc_channel._pool + ] + grace_period = 9 + with mock.patch.object( + type(client.transport._grpc_channel._pool[0]), "close" + ) as close: + new_channel = grpc.aio.insecure_channel("localhost:8080") + await client.transport.replace_channel( + replace_idx, grace=grace_period, new_channel=new_channel + ) + close.assert_called_once_with(grace=grace_period) + close.assert_awaited_once() + assert client.transport._grpc_channel._pool[replace_idx] == new_channel + for i in range(pool_size): + if i != replace_idx: + assert client.transport._grpc_channel._pool[i] == start_pool[i] + else: + assert client.transport._grpc_channel._pool[i] != start_pool[i] + await client.close() + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test__start_background_channel_refresh_sync(self): + # should raise RuntimeError if called in a sync context + client = self._make_one(project="project-id", use_emulator=False) + with pytest.raises(RuntimeError): + client._start_background_channel_refresh() + + @pytest.mark.asyncio + async def test__start_background_channel_refresh_tasks_exist(self): + # if tasks exist, should do nothing + client = self._make_one(project="project-id", use_emulator=False) + assert len(client._channel_refresh_tasks) > 0 + with mock.patch.object(asyncio, "create_task") as create_task: + client._start_background_channel_refresh() + create_task.assert_not_called() + await client.close() + + @pytest.mark.asyncio + @pytest.mark.parametrize("pool_size", [1, 3, 7]) + async def test__start_background_channel_refresh(self, pool_size): + # should create background tasks for each channel + client = self._make_one( + project="project-id", pool_size=pool_size, use_emulator=False + ) + ping_and_warm = AsyncMock() + client._ping_and_warm_instances = ping_and_warm + client._start_background_channel_refresh() + assert len(client._channel_refresh_tasks) == pool_size + for task in client._channel_refresh_tasks: + assert isinstance(task, asyncio.Task) + await asyncio.sleep(0.1) + assert ping_and_warm.call_count == pool_size + for channel in client.transport._grpc_channel._pool: + ping_and_warm.assert_any_call(channel) + await client.close() + + @pytest.mark.asyncio + @pytest.mark.skipif( + sys.version_info < (3, 8), reason="Task.name requires python3.8 or higher" + ) + async def test__start_background_channel_refresh_tasks_names(self): + # if tasks exist, should do nothing + pool_size = 3 + client = self._make_one( + project="project-id", pool_size=pool_size, use_emulator=False + ) + for i in range(pool_size): + name = client._channel_refresh_tasks[i].get_name() + assert str(i) in name + assert "BigtableDataClientAsync channel refresh " in name + await client.close() + + @pytest.mark.asyncio + async def test__ping_and_warm_instances(self): + """ + test ping and warm with mocked asyncio.gather + """ + client_mock = mock.Mock() + with mock.patch.object(asyncio, "gather", AsyncMock()) as gather: + # simulate gather by returning the same number of items as passed in + gather.side_effect = lambda *args, **kwargs: [None for _ in args] + channel = mock.Mock() + # test with no instances + client_mock._active_instances = [] + result = await self._get_target_class()._ping_and_warm_instances( + client_mock, channel + ) + assert len(result) == 0 + gather.assert_called_once() + gather.assert_awaited_once() + assert not gather.call_args.args + assert gather.call_args.kwargs == {"return_exceptions": True} + # test with instances + client_mock._active_instances = [ + (mock.Mock(), mock.Mock(), mock.Mock()) + ] * 4 + gather.reset_mock() + channel.reset_mock() + result = await self._get_target_class()._ping_and_warm_instances( + client_mock, channel + ) + assert len(result) == 4 + gather.assert_called_once() + gather.assert_awaited_once() + assert len(gather.call_args.args) == 4 + # check grpc call arguments + grpc_call_args = channel.unary_unary().call_args_list + for idx, (_, kwargs) in enumerate(grpc_call_args): + ( + expected_instance, + expected_table, + expected_app_profile, + ) = client_mock._active_instances[idx] + request = kwargs["request"] + assert request["name"] == expected_instance + assert request["app_profile_id"] == expected_app_profile + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] + == f"name={expected_instance}&app_profile_id={expected_app_profile}" + ) + + @pytest.mark.asyncio + async def test__ping_and_warm_single_instance(self): + """ + should be able to call ping and warm with single instance + """ + client_mock = mock.Mock() + with mock.patch.object(asyncio, "gather", AsyncMock()) as gather: + # simulate gather by returning the same number of items as passed in + gather.side_effect = lambda *args, **kwargs: [None for _ in args] + channel = mock.Mock() + # test with large set of instances + client_mock._active_instances = [mock.Mock()] * 100 + test_key = ("test-instance", "test-table", "test-app-profile") + result = await self._get_target_class()._ping_and_warm_instances( + client_mock, channel, test_key + ) + # should only have been called with test instance + assert len(result) == 1 + # check grpc call arguments + grpc_call_args = channel.unary_unary().call_args_list + assert len(grpc_call_args) == 1 + kwargs = grpc_call_args[0][1] + request = kwargs["request"] + assert request["name"] == "test-instance" + assert request["app_profile_id"] == "test-app-profile" + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] == "name=test-instance&app_profile_id=test-app-profile" + ) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "refresh_interval, wait_time, expected_sleep", + [ + (0, 0, 0), + (0, 1, 0), + (10, 0, 10), + (10, 5, 5), + (10, 10, 0), + (10, 15, 0), + ], + ) + async def test__manage_channel_first_sleep( + self, refresh_interval, wait_time, expected_sleep + ): + # first sleep time should be `refresh_interval` seconds after client init + import time + + with mock.patch.object(time, "monotonic") as time: + time.return_value = 0 + with mock.patch.object(asyncio, "sleep") as sleep: + sleep.side_effect = asyncio.CancelledError + try: + client = self._make_one(project="project-id") + client._channel_init_time = -wait_time + await client._manage_channel(0, refresh_interval, refresh_interval) + except asyncio.CancelledError: + pass + sleep.assert_called_once() + call_time = sleep.call_args[0][0] + assert ( + abs(call_time - expected_sleep) < 0.1 + ), f"refresh_interval: {refresh_interval}, wait_time: {wait_time}, expected_sleep: {expected_sleep}" + await client.close() + + @pytest.mark.asyncio + async def test__manage_channel_ping_and_warm(self): + """ + _manage channel should call ping and warm internally + """ + import time + + client_mock = mock.Mock() + client_mock._channel_init_time = time.monotonic() + channel_list = [mock.Mock(), mock.Mock()] + client_mock.transport.channels = channel_list + new_channel = mock.Mock() + client_mock.transport.grpc_channel._create_channel.return_value = new_channel + # should ping an warm all new channels, and old channels if sleeping + with mock.patch.object(asyncio, "sleep"): + # stop process after replace_channel is called + client_mock.transport.replace_channel.side_effect = asyncio.CancelledError + ping_and_warm = client_mock._ping_and_warm_instances = AsyncMock() + # should ping and warm old channel then new if sleep > 0 + try: + channel_idx = 1 + await self._get_target_class()._manage_channel( + client_mock, channel_idx, 10 + ) + except asyncio.CancelledError: + pass + # should have called at loop start, and after replacement + assert ping_and_warm.call_count == 2 + # should have replaced channel once + assert client_mock.transport.replace_channel.call_count == 1 + # make sure new and old channels were warmed + old_channel = channel_list[channel_idx] + assert old_channel != new_channel + called_with = [call[0][0] for call in ping_and_warm.call_args_list] + assert old_channel in called_with + assert new_channel in called_with + # should ping and warm instantly new channel only if not sleeping + ping_and_warm.reset_mock() + try: + await self._get_target_class()._manage_channel(client_mock, 0, 0, 0) + except asyncio.CancelledError: + pass + ping_and_warm.assert_called_once_with(new_channel) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "refresh_interval, num_cycles, expected_sleep", + [ + (None, 1, 60 * 35), + (10, 10, 100), + (10, 1, 10), + ], + ) + async def test__manage_channel_sleeps( + self, refresh_interval, num_cycles, expected_sleep + ): + # make sure that sleeps work as expected + import time + import random + + channel_idx = 1 + with mock.patch.object(random, "uniform") as uniform: + uniform.side_effect = lambda min_, max_: min_ + with mock.patch.object(time, "time") as time: + time.return_value = 0 + with mock.patch.object(asyncio, "sleep") as sleep: + sleep.side_effect = [None for i in range(num_cycles - 1)] + [ + asyncio.CancelledError + ] + try: + client = self._make_one(project="project-id") + if refresh_interval is not None: + await client._manage_channel( + channel_idx, refresh_interval, refresh_interval + ) + else: + await client._manage_channel(channel_idx) + except asyncio.CancelledError: + pass + assert sleep.call_count == num_cycles + total_sleep = sum([call[0][0] for call in sleep.call_args_list]) + assert ( + abs(total_sleep - expected_sleep) < 0.1 + ), f"refresh_interval={refresh_interval}, num_cycles={num_cycles}, expected_sleep={expected_sleep}" + await client.close() + + @pytest.mark.asyncio + async def test__manage_channel_random(self): + import random + + with mock.patch.object(asyncio, "sleep") as sleep: + with mock.patch.object(random, "uniform") as uniform: + uniform.return_value = 0 + try: + uniform.side_effect = asyncio.CancelledError + client = self._make_one(project="project-id", pool_size=1) + except asyncio.CancelledError: + uniform.side_effect = None + uniform.reset_mock() + sleep.reset_mock() + min_val = 200 + max_val = 205 + uniform.side_effect = lambda min_, max_: min_ + sleep.side_effect = [None, None, asyncio.CancelledError] + try: + await client._manage_channel(0, min_val, max_val) + except asyncio.CancelledError: + pass + assert uniform.call_count == 2 + uniform_args = [call[0] for call in uniform.call_args_list] + for found_min, found_max in uniform_args: + assert found_min == min_val + assert found_max == max_val + + @pytest.mark.asyncio + @pytest.mark.parametrize("num_cycles", [0, 1, 10, 100]) + async def test__manage_channel_refresh(self, num_cycles): + # make sure that channels are properly refreshed + from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( + PooledBigtableGrpcAsyncIOTransport, + ) + from google.api_core import grpc_helpers_async + + expected_grace = 9 + expected_refresh = 0.5 + channel_idx = 1 + new_channel = grpc.aio.insecure_channel("localhost:8080") + + with mock.patch.object( + PooledBigtableGrpcAsyncIOTransport, "replace_channel" + ) as replace_channel: + with mock.patch.object(asyncio, "sleep") as sleep: + sleep.side_effect = [None for i in range(num_cycles)] + [ + asyncio.CancelledError + ] + with mock.patch.object( + grpc_helpers_async, "create_channel" + ) as create_channel: + create_channel.return_value = new_channel + client = self._make_one(project="project-id", use_emulator=False) + create_channel.reset_mock() + try: + await client._manage_channel( + channel_idx, + refresh_interval_min=expected_refresh, + refresh_interval_max=expected_refresh, + grace_period=expected_grace, + ) + except asyncio.CancelledError: + pass + assert sleep.call_count == num_cycles + 1 + assert create_channel.call_count == num_cycles + assert replace_channel.call_count == num_cycles + for call in replace_channel.call_args_list: + args, kwargs = call + assert args[0] == channel_idx + assert kwargs["grace"] == expected_grace + assert kwargs["new_channel"] == new_channel + await client.close() + + @pytest.mark.asyncio + async def test__register_instance(self): + """ + test instance registration + """ + # set up mock client + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_tasks = [] + client_mock._start_background_channel_refresh.side_effect = ( + lambda: client_mock._channel_refresh_tasks.append(mock.Mock) + ) + mock_channels = [mock.Mock() for i in range(5)] + client_mock.transport.channels = mock_channels + client_mock._ping_and_warm_instances = AsyncMock() + table_mock = mock.Mock() + await self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock + ) + # first call should start background refresh + assert client_mock._start_background_channel_refresh.call_count == 1 + # ensure active_instances and instance_owners were updated properly + expected_key = ( + "prefix/instance-1", + table_mock.table_name, + table_mock.app_profile_id, + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + # should be a new task set + assert client_mock._channel_refresh_tasks + # next call should not call _start_background_channel_refresh again + table_mock2 = mock.Mock() + await self._get_target_class()._register_instance( + client_mock, "instance-2", table_mock2 + ) + assert client_mock._start_background_channel_refresh.call_count == 1 + # but it should call ping and warm with new instance key + assert client_mock._ping_and_warm_instances.call_count == len(mock_channels) + for channel in mock_channels: + assert channel in [ + call[0][0] + for call in client_mock._ping_and_warm_instances.call_args_list + ] + # check for updated lists + assert len(active_instances) == 2 + assert len(instance_owners) == 2 + expected_key2 = ( + "prefix/instance-2", + table_mock2.table_name, + table_mock2.app_profile_id, + ) + assert any( + [ + expected_key2 == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + assert any( + [ + expected_key2 == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "insert_instances,expected_active,expected_owner_keys", + [ + ([("i", "t", None)], [("i", "t", None)], [("i", "t", None)]), + ([("i", "t", "p")], [("i", "t", "p")], [("i", "t", "p")]), + ([("1", "t", "p"), ("1", "t", "p")], [("1", "t", "p")], [("1", "t", "p")]), + ( + [("1", "t", "p"), ("2", "t", "p")], + [("1", "t", "p"), ("2", "t", "p")], + [("1", "t", "p"), ("2", "t", "p")], + ), + ], + ) + async def test__register_instance_state( + self, insert_instances, expected_active, expected_owner_keys + ): + """ + test that active_instances and instance_owners are updated as expected + """ + # set up mock client + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: b + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_tasks = [] + client_mock._start_background_channel_refresh.side_effect = ( + lambda: client_mock._channel_refresh_tasks.append(mock.Mock) + ) + mock_channels = [mock.Mock() for i in range(5)] + client_mock.transport.channels = mock_channels + client_mock._ping_and_warm_instances = AsyncMock() + table_mock = mock.Mock() + # register instances + for instance, table, profile in insert_instances: + table_mock.table_name = table + table_mock.app_profile_id = profile + await self._get_target_class()._register_instance( + client_mock, instance, table_mock + ) + assert len(active_instances) == len(expected_active) + assert len(instance_owners) == len(expected_owner_keys) + for expected in expected_active: + assert any( + [ + expected == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + for expected in expected_owner_keys: + assert any( + [ + expected == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + @pytest.mark.asyncio + async def test__remove_instance_registration(self): + client = self._make_one(project="project-id") + table = mock.Mock() + await client._register_instance("instance-1", table) + await client._register_instance("instance-2", table) + assert len(client._active_instances) == 2 + assert len(client._instance_owners.keys()) == 2 + instance_1_path = client._gapic_client.instance_path( + client.project, "instance-1" + ) + instance_1_key = (instance_1_path, table.table_name, table.app_profile_id) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance-2" + ) + instance_2_key = (instance_2_path, table.table_name, table.app_profile_id) + assert len(client._instance_owners[instance_1_key]) == 1 + assert list(client._instance_owners[instance_1_key])[0] == id(table) + assert len(client._instance_owners[instance_2_key]) == 1 + assert list(client._instance_owners[instance_2_key])[0] == id(table) + success = await client._remove_instance_registration("instance-1", table) + assert success + assert len(client._active_instances) == 1 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 1 + assert client._active_instances == {instance_2_key} + success = await client._remove_instance_registration("fake-key", table) + assert not success + assert len(client._active_instances) == 1 + await client.close() + + @pytest.mark.asyncio + async def test__multiple_table_registration(self): + """ + registering with multiple tables with the same key should + add multiple owners to instance_owners, but only keep one copy + of shared key in active_instances + """ + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + + async with self._make_one(project="project-id") as client: + async with client.get_table("instance_1", "table_1") as table_1: + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.table_name, table_1.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + # duplicate table should register in instance_owners under same key + async with client.get_table("instance_1", "table_1") as table_2: + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + # unique table should register in instance_owners and active_instances + async with client.get_table("instance_1", "table_3") as table_3: + instance_3_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_3_key = _WarmedInstanceKey( + instance_3_path, table_3.table_name, table_3.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._instance_owners[instance_3_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + assert id(table_3) in client._instance_owners[instance_3_key] + # sub-tables should be unregistered, but instance should still be active + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert id(table_2) not in client._instance_owners[instance_1_key] + # both tables are gone. instance should be unregistered + assert len(client._active_instances) == 0 + assert instance_1_key not in client._active_instances + assert len(client._instance_owners[instance_1_key]) == 0 + + @pytest.mark.asyncio + async def test__multiple_instance_registration(self): + """ + registering with multiple instance keys should update the key + in instance_owners and active_instances + """ + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + + async with self._make_one(project="project-id") as client: + async with client.get_table("instance_1", "table_1") as table_1: + async with client.get_table("instance_2", "table_2") as table_2: + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.table_name, table_1.app_profile_id + ) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance_2" + ) + instance_2_key = _WarmedInstanceKey( + instance_2_path, table_2.table_name, table_2.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._instance_owners[instance_2_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_2_key] + # instance2 should be unregistered, but instance1 should still be active + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert len(client._instance_owners[instance_2_key]) == 0 + assert len(client._instance_owners[instance_1_key]) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + # both tables are gone. instances should both be unregistered + assert len(client._active_instances) == 0 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 0 + + @pytest.mark.asyncio + async def test_get_table(self): + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + + client = self._make_one(project="project-id") + assert not client._active_instances + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + table = client.get_table( + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) + await asyncio.sleep(0) + assert isinstance(table, TableAsync) + assert table.table_id == expected_table_id + assert ( + table.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert table.instance_id == expected_instance_id + assert ( + table.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.table_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + await client.close() + + @pytest.mark.asyncio + async def test_get_table_arg_passthrough(self): + """ + All arguments passed in get_table should be sent to constructor + """ + async with self._make_one(project="project-id") as client: + with mock.patch( + "google.cloud.bigtable.data._async.client.TableAsync.__init__", + ) as mock_constructor: + mock_constructor.return_value = None + assert not client._active_instances + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_args = (1, "test", {"test": 2}) + expected_kwargs = {"hello": "world", "test": 2} + + client.get_table( + expected_instance_id, + expected_table_id, + expected_app_profile_id, + *expected_args, + **expected_kwargs, + ) + mock_constructor.assert_called_once_with( + client, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + *expected_args, + **expected_kwargs, + ) + + @pytest.mark.asyncio + async def test_get_table_context_manager(self): + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_project_id = "project-id" + + with mock.patch.object(TableAsync, "close") as close_mock: + async with self._make_one(project=expected_project_id) as client: + async with client.get_table( + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) as table: + await asyncio.sleep(0) + assert isinstance(table, TableAsync) + assert table.table_id == expected_table_id + assert ( + table.table_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert table.instance_id == expected_instance_id + assert ( + table.instance_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.table_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert close_mock.call_count == 1 + + @pytest.mark.asyncio + async def test_multiple_pool_sizes(self): + # should be able to create multiple clients with different pool sizes without issue + pool_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256] + for pool_size in pool_sizes: + client = self._make_one( + project="project-id", pool_size=pool_size, use_emulator=False + ) + assert len(client._channel_refresh_tasks) == pool_size + client_duplicate = self._make_one( + project="project-id", pool_size=pool_size, use_emulator=False + ) + assert len(client_duplicate._channel_refresh_tasks) == pool_size + assert str(pool_size) in str(client.transport) + await client.close() + await client_duplicate.close() + + @pytest.mark.asyncio + async def test_close(self): + from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( + PooledBigtableGrpcAsyncIOTransport, + ) + + pool_size = 7 + client = self._make_one( + project="project-id", pool_size=pool_size, use_emulator=False + ) + assert len(client._channel_refresh_tasks) == pool_size + tasks_list = list(client._channel_refresh_tasks) + for task in client._channel_refresh_tasks: + assert not task.done() + with mock.patch.object( + PooledBigtableGrpcAsyncIOTransport, "close", AsyncMock() + ) as close_mock: + await client.close() + close_mock.assert_called_once() + close_mock.assert_awaited() + for task in tasks_list: + assert task.done() + assert task.cancelled() + assert client._channel_refresh_tasks == [] + + @pytest.mark.asyncio + async def test_close_with_timeout(self): + pool_size = 7 + expected_timeout = 19 + client = self._make_one(project="project-id", pool_size=pool_size) + tasks = list(client._channel_refresh_tasks) + with mock.patch.object(asyncio, "wait_for", AsyncMock()) as wait_for_mock: + await client.close(timeout=expected_timeout) + wait_for_mock.assert_called_once() + wait_for_mock.assert_awaited() + assert wait_for_mock.call_args[1]["timeout"] == expected_timeout + client._channel_refresh_tasks = tasks + await client.close() + + @pytest.mark.asyncio + async def test_context_manager(self): + # context manager should close the client cleanly + close_mock = AsyncMock() + true_close = None + async with self._make_one(project="project-id") as client: + true_close = client.close() + client.close = close_mock + for task in client._channel_refresh_tasks: + assert not task.done() + assert client.project == "project-id" + assert client._active_instances == set() + close_mock.assert_not_called() + close_mock.assert_called_once() + close_mock.assert_awaited() + # actually close the client + await true_close + + def test_client_ctor_sync(self): + # initializing client in a sync context should raise RuntimeError + + with pytest.warns(RuntimeWarning) as warnings: + client = _make_client(project="project-id", use_emulator=False) + expected_warning = [w for w in warnings if "client.py" in w.filename] + assert len(expected_warning) == 1 + assert ( + "BigtableDataClientAsync should be started in an asyncio event loop." + in str(expected_warning[0].message) + ) + assert client.project == "project-id" + assert client._channel_refresh_tasks == [] + + +class TestTableAsync: + @pytest.mark.asyncio + async def test_table_ctor(self): + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = _make_client() + assert not client._active_instances + + table = TableAsync( + client, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + await asyncio.sleep(0) + assert table.table_id == expected_table_id + assert table.instance_id == expected_instance_id + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.table_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert table.default_operation_timeout == expected_operation_timeout + assert table.default_attempt_timeout == expected_attempt_timeout + assert ( + table.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + table.default_read_rows_attempt_timeout + == expected_read_rows_attempt_timeout + ) + assert ( + table.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + table.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + # ensure task reaches completion + await table._register_instance_task + assert table._register_instance_task.done() + assert not table._register_instance_task.cancelled() + assert table._register_instance_task.exception() is None + await client.close() + + @pytest.mark.asyncio + async def test_table_ctor_defaults(self): + """ + should provide default timeout values and app_profile_id + """ + from google.cloud.bigtable.data._async.client import TableAsync + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + client = _make_client() + assert not client._active_instances + + table = TableAsync( + client, + expected_instance_id, + expected_table_id, + ) + await asyncio.sleep(0) + assert table.table_id == expected_table_id + assert table.instance_id == expected_instance_id + assert table.app_profile_id is None + assert table.client is client + assert table.default_operation_timeout == 60 + assert table.default_read_rows_operation_timeout == 600 + assert table.default_mutate_rows_operation_timeout == 600 + assert table.default_attempt_timeout == 20 + assert table.default_read_rows_attempt_timeout == 20 + assert table.default_mutate_rows_attempt_timeout == 60 + await client.close() + + @pytest.mark.asyncio + async def test_table_ctor_invalid_timeout_values(self): + """ + bad timeout values should raise ValueError + """ + from google.cloud.bigtable.data._async.client import TableAsync + + client = _make_client() + + timeout_pairs = [ + ("default_operation_timeout", "default_attempt_timeout"), + ( + "default_read_rows_operation_timeout", + "default_read_rows_attempt_timeout", + ), + ( + "default_mutate_rows_operation_timeout", + "default_mutate_rows_attempt_timeout", + ), + ] + for operation_timeout, attempt_timeout in timeout_pairs: + with pytest.raises(ValueError) as e: + TableAsync(client, "", "", **{attempt_timeout: -1}) + assert "attempt_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + TableAsync(client, "", "", **{operation_timeout: -1}) + assert "operation_timeout must be greater than 0" in str(e.value) + await client.close() + + def test_table_ctor_sync(self): + # initializing client in a sync context should raise RuntimeError + from google.cloud.bigtable.data._async.client import TableAsync + + client = mock.Mock() + with pytest.raises(RuntimeError) as e: + TableAsync(client, "instance-id", "table-id") + assert e.match("TableAsync must be created within an async event loop context.") + + @pytest.mark.asyncio + # iterate over all retryable rpcs + @pytest.mark.parametrize( + "fn_name,fn_args,retry_fn_path,extra_retryables", + [ + ( + "read_rows_stream", + (ReadRowsQuery(),), + "google.api_core.retry.retry_target_stream_async", + (), + ), + ( + "read_rows", + (ReadRowsQuery(),), + "google.api_core.retry.retry_target_stream_async", + (), + ), + ( + "read_row", + (b"row_key",), + "google.api_core.retry.retry_target_stream_async", + (), + ), + ( + "read_rows_sharded", + ([ReadRowsQuery()],), + "google.api_core.retry.retry_target_stream_async", + (), + ), + ( + "row_exists", + (b"row_key",), + "google.api_core.retry.retry_target_stream_async", + (), + ), + ("sample_row_keys", (), "google.api_core.retry.retry_target_async", ()), + ( + "mutate_row", + (b"row_key", [mock.Mock()]), + "google.api_core.retry.retry_target_async", + (), + ), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [mock.Mock()])],), + "google.api_core.retry.retry_target_async", + (_MutateRowsIncomplete,), + ), + ], + ) + # test different inputs for retryable exceptions + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + async def test_customizable_retryable_errors( + self, + input_retryables, + expected_retryables, + fn_name, + fn_args, + retry_fn_path, + extra_retryables, + ): + """ + Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer. + """ + with mock.patch(retry_fn_path) as retry_fn_mock: + async with _make_client() as client: + table = client.get_table("instance-id", "table-id") + expected_predicate = lambda a: a in expected_retryables # noqa + retry_fn_mock.side_effect = RuntimeError("stop early") + with mock.patch( + "google.api_core.retry.if_exception_type" + ) as predicate_builder_mock: + predicate_builder_mock.return_value = expected_predicate + with pytest.raises(Exception): + # we expect an exception from attempting to call the mock + test_fn = table.__getattribute__(fn_name) + await test_fn(*fn_args, retryable_errors=input_retryables) + # passed in errors should be used to build the predicate + predicate_builder_mock.assert_called_once_with( + *expected_retryables, *extra_retryables + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + # output of if_exception_type should be sent in to retry constructor + assert retry_call_args[1] is expected_predicate + + @pytest.mark.parametrize( + "fn_name,fn_args,gapic_fn", + [ + ("read_rows_stream", (ReadRowsQuery(),), "read_rows"), + ("read_rows", (ReadRowsQuery(),), "read_rows"), + ("read_row", (b"row_key",), "read_rows"), + ("read_rows_sharded", ([ReadRowsQuery()],), "read_rows"), + ("row_exists", (b"row_key",), "read_rows"), + ("sample_row_keys", (), "sample_row_keys"), + ("mutate_row", (b"row_key", [mock.Mock()]), "mutate_row"), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + "mutate_rows", + ), + ("check_and_mutate_row", (b"row_key", None), "check_and_mutate_row"), + ( + "read_modify_write_row", + (b"row_key", mock.Mock()), + "read_modify_write_row", + ), + ], + ) + @pytest.mark.parametrize("include_app_profile", [True, False]) + @pytest.mark.asyncio + async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): + """check that all requests attach proper metadata headers""" + from google.cloud.bigtable.data import TableAsync + + profile = "profile" if include_app_profile else None + with mock.patch( + f"google.cloud.bigtable_v2.BigtableAsyncClient.{gapic_fn}", mock.AsyncMock() + ) as gapic_mock: + gapic_mock.side_effect = RuntimeError("stop early") + async with _make_client() as client: + table = TableAsync(client, "instance-id", "table-id", profile) + try: + test_fn = table.__getattribute__(fn_name) + maybe_stream = await test_fn(*fn_args) + [i async for i in maybe_stream] + except Exception: + # we expect an exception from attempting to call the mock + pass + kwargs = gapic_mock.call_args_list[0].kwargs + metadata = kwargs["metadata"] + goog_metadata = None + for key, value in metadata: + if key == "x-goog-request-params": + goog_metadata = value + assert goog_metadata is not None, "x-goog-request-params not found" + assert "table_name=" + table.table_name in goog_metadata + if include_app_profile: + assert "app_profile_id=profile" in goog_metadata + else: + assert "app_profile_id=" not in goog_metadata + + +class TestReadRows: + """ + Tests for table.read_rows and related methods. + """ + + def _make_table(self, *args, **kwargs): + from google.cloud.bigtable.data._async.client import TableAsync + + client_mock = mock.Mock() + client_mock._register_instance.side_effect = ( + lambda *args, **kwargs: asyncio.sleep(0) + ) + client_mock._remove_instance_registration.side_effect = ( + lambda *args, **kwargs: asyncio.sleep(0) + ) + kwargs["instance_id"] = kwargs.get( + "instance_id", args[0] if args else "instance" + ) + kwargs["table_id"] = kwargs.get( + "table_id", args[1] if len(args) > 1 else "table" + ) + client_mock._gapic_client.table_path.return_value = kwargs["table_id"] + client_mock._gapic_client.instance_path.return_value = kwargs["instance_id"] + return TableAsync(client_mock, *args, **kwargs) + + def _make_stats(self): + from google.cloud.bigtable_v2.types import RequestStats + from google.cloud.bigtable_v2.types import FullReadStatsView + from google.cloud.bigtable_v2.types import ReadIterationStats + + return RequestStats( + full_read_stats_view=FullReadStatsView( + read_iteration_stats=ReadIterationStats( + rows_seen_count=1, + rows_returned_count=2, + cells_seen_count=3, + cells_returned_count=4, + ) + ) + ) + + @staticmethod + def _make_chunk(*args, **kwargs): + from google.cloud.bigtable_v2 import ReadRowsResponse + + kwargs["row_key"] = kwargs.get("row_key", b"row_key") + kwargs["family_name"] = kwargs.get("family_name", "family_name") + kwargs["qualifier"] = kwargs.get("qualifier", b"qualifier") + kwargs["value"] = kwargs.get("value", b"value") + kwargs["commit_row"] = kwargs.get("commit_row", True) + + return ReadRowsResponse.CellChunk(*args, **kwargs) + + @staticmethod + async def _make_gapic_stream( + chunk_list: list[ReadRowsResponse.CellChunk | Exception], + sleep_time=0, + ): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list, sleep_time): + self.chunk_list = chunk_list + self.idx = -1 + self.sleep_time = sleep_time + + def __aiter__(self): + return self + + async def __anext__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + if sleep_time: + await asyncio.sleep(self.sleep_time) + chunk = self.chunk_list[self.idx] + if isinstance(chunk, Exception): + raise chunk + else: + return ReadRowsResponse(chunks=[chunk]) + raise StopAsyncIteration + + def cancel(self): + pass + + return mock_stream(chunk_list, sleep_time) + + async def execute_fn(self, table, *args, **kwargs): + return await table.read_rows(*args, **kwargs) + + @pytest.mark.asyncio + async def test_read_rows(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + results = await self.execute_fn(table, query, operation_timeout=3) + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + @pytest.mark.asyncio + async def test_read_rows_stream(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + gen = await table.read_rows_stream(query, operation_timeout=3) + results = [row async for row in gen] + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + @pytest.mark.parametrize("include_app_profile", [True, False]) + @pytest.mark.asyncio + async def test_read_rows_query_matches_request(self, include_app_profile): + from google.cloud.bigtable.data import RowRange + from google.cloud.bigtable.data.row_filters import PassAllFilter + + app_profile_id = "app_profile_id" if include_app_profile else None + async with self._make_table(app_profile_id=app_profile_id) as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream([]) + row_keys = [b"test_1", "test_2"] + row_ranges = RowRange("1start", "2end") + filter_ = PassAllFilter(True) + limit = 99 + query = ReadRowsQuery( + row_keys=row_keys, + row_ranges=row_ranges, + row_filter=filter_, + limit=limit, + ) + + results = await table.read_rows(query, operation_timeout=3) + assert len(results) == 0 + call_request = read_rows.call_args_list[0][0][0] + query_pb = query._to_pb(table) + assert call_request == query_pb + + @pytest.mark.parametrize("operation_timeout", [0.001, 0.023, 0.1]) + @pytest.mark.asyncio + async def test_read_rows_timeout(self, operation_timeout): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + query = ReadRowsQuery() + chunks = [self._make_chunk(row_key=b"test_1")] + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=1 + ) + try: + await table.read_rows(query, operation_timeout=operation_timeout) + except core_exceptions.DeadlineExceeded as e: + assert ( + e.message + == f"operation_timeout of {operation_timeout:0.1f}s exceeded" + ) + + @pytest.mark.parametrize( + "per_request_t, operation_t, expected_num", + [ + (0.05, 0.08, 2), + (0.05, 0.54, 11), + (0.05, 0.14, 3), + (0.05, 0.24, 5), + ], + ) + @pytest.mark.asyncio + async def test_read_rows_attempt_timeout( + self, per_request_t, operation_t, expected_num + ): + """ + Ensures that the attempt_timeout is respected and that the number of + requests is as expected. + + operation_timeout does not cancel the request, so we expect the number of + requests to be the ceiling of operation_timeout / attempt_timeout. + """ + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + expected_last_timeout = operation_t - (expected_num - 1) * per_request_t + + # mocking uniform ensures there are no sleeps between retries + with mock.patch("random.uniform", side_effect=lambda a, b: 0): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=per_request_t + ) + query = ReadRowsQuery() + chunks = [core_exceptions.DeadlineExceeded("mock deadline")] + + try: + await table.read_rows( + query, + operation_timeout=operation_t, + attempt_timeout=per_request_t, + ) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + if expected_num == 0: + assert retry_exc is None + else: + assert type(retry_exc) is RetryExceptionGroup + assert f"{expected_num} failed attempts" in str(retry_exc) + assert len(retry_exc.exceptions) == expected_num + for sub_exc in retry_exc.exceptions: + assert sub_exc.message == "mock deadline" + assert read_rows.call_count == expected_num + # check timeouts + for _, call_kwargs in read_rows.call_args_list[:-1]: + assert call_kwargs["timeout"] == per_request_t + assert call_kwargs["retry"] is None + # last timeout should be adjusted to account for the time spent + assert ( + abs( + read_rows.call_args_list[-1][1]["timeout"] + - expected_last_timeout + ) + < 0.05 + ) + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.Aborted, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @pytest.mark.asyncio + async def test_read_rows_retryable_error(self, exc_type): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + await table.read_rows(query, operation_timeout=0.1) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + root_cause = retry_exc.exceptions[0] + assert type(root_cause) is exc_type + assert root_cause == expected_error + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.Cancelled, + core_exceptions.PreconditionFailed, + core_exceptions.NotFound, + core_exceptions.PermissionDenied, + core_exceptions.Conflict, + core_exceptions.InternalServerError, + core_exceptions.TooManyRequests, + core_exceptions.ResourceExhausted, + InvalidChunk, + ], + ) + @pytest.mark.asyncio + async def test_read_rows_non_retryable_error(self, exc_type): + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + await table.read_rows(query, operation_timeout=0.1) + except exc_type as e: + assert e == expected_error + + @pytest.mark.asyncio + async def test_read_rows_revise_request(self): + """ + Ensure that _revise_request is called between retries + """ + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import RowSet + + return_val = RowSet() + with mock.patch.object( + _ReadRowsOperationAsync, "_revise_request_rowset" + ) as revise_rowset: + revise_rowset.return_value = return_val + async with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + row_keys = [b"test_1", b"test_2", b"test_3"] + query = ReadRowsQuery(row_keys=row_keys) + chunks = [ + self._make_chunk(row_key=b"test_1"), + core_exceptions.Aborted("mock retryable error"), + ] + try: + await table.read_rows(query) + except InvalidChunk: + revise_rowset.assert_called() + first_call_kwargs = revise_rowset.call_args_list[0].kwargs + assert first_call_kwargs["row_set"] == query._to_pb(table).rows + assert first_call_kwargs["last_seen_row_key"] == b"test_1" + revised_call = read_rows.call_args_list[1].args[0] + assert revised_call.rows == return_val + + @pytest.mark.asyncio + async def test_read_rows_default_timeouts(self): + """ + Ensure that the default timeouts are set on the read rows operation when not overridden + """ + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(_ReadRowsOperationAsync, "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + async with self._make_table( + default_read_rows_operation_timeout=operation_timeout, + default_read_rows_attempt_timeout=attempt_timeout, + ) as table: + try: + await table.read_rows(ReadRowsQuery()) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + @pytest.mark.asyncio + async def test_read_rows_default_timeout_override(self): + """ + When timeouts are passed, they overwrite default values + """ + from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync + + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(_ReadRowsOperationAsync, "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + async with self._make_table( + default_operation_timeout=99, default_attempt_timeout=97 + ) as table: + try: + await table.read_rows( + ReadRowsQuery(), + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + ) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + @pytest.mark.asyncio + async def test_read_row(self): + """Test reading a single row""" + async with _make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + row = await table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert row == expected_result + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + @pytest.mark.asyncio + async def test_read_row_w_filter(self): + """Test reading a single row with an added filter""" + async with _make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + mock_filter = mock.Mock() + expected_filter = {"filter": "mock filter"} + mock_filter._to_dict.return_value = expected_filter + row = await table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + row_filter=expected_filter, + ) + assert row == expected_result + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter == expected_filter + + @pytest.mark.asyncio + async def test_read_row_no_response(self): + """should return None if row does not exist""" + async with _make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + # return no rows + read_rows.side_effect = lambda *args, **kwargs: [] + expected_op_timeout = 8 + expected_req_timeout = 4 + result = await table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert result is None + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + @pytest.mark.parametrize( + "return_value,expected_result", + [ + ([], False), + ([object()], True), + ([object(), object()], True), + ], + ) + @pytest.mark.asyncio + async def test_row_exists(self, return_value, expected_result): + """Test checking for row existence""" + async with _make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + # return no rows + read_rows.side_effect = lambda *args, **kwargs: return_value + expected_op_timeout = 1 + expected_req_timeout = 2 + result = await table.row_exists( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert expected_result == result + assert read_rows.call_count == 1 + args, kwargs = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + expected_filter = { + "chain": { + "filters": [ + {"cells_per_row_limit_filter": 1}, + {"strip_value_transformer": True}, + ] + } + } + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter._to_dict() == expected_filter + + +class TestReadRowsSharded: + @pytest.mark.asyncio + async def test_read_rows_sharded_empty_query(self): + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as exc: + await table.read_rows_sharded([]) + assert "empty sharded_query" in str(exc.value) + + @pytest.mark.asyncio + async def test_read_rows_sharded_multiple_queries(self): + """ + Test with multiple queries. Should return results from both + """ + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + read_rows.side_effect = ( + lambda *args, **kwargs: TestReadRows._make_gapic_stream( + [ + TestReadRows._make_chunk(row_key=k) + for k in args[0].rows.row_keys + ] + ) + ) + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + result = await table.read_rows_sharded([query_1, query_2]) + assert len(result) == 2 + assert result[0].row_key == b"test_1" + assert result[1].row_key == b"test_2" + + @pytest.mark.parametrize("n_queries", [1, 2, 5, 11, 24]) + @pytest.mark.asyncio + async def test_read_rows_sharded_multiple_queries_calls(self, n_queries): + """ + Each query should trigger a separate read_rows call + """ + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + query_list = [ReadRowsQuery() for _ in range(n_queries)] + await table.read_rows_sharded(query_list) + assert read_rows.call_count == n_queries + + @pytest.mark.asyncio + async def test_read_rows_sharded_errors(self): + """ + Errors should be exposed as ShardedReadRowsExceptionGroups + """ + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedQueryShardError + + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = RuntimeError("mock error") + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + await table.read_rows_sharded([query_1, query_2]) + exc_group = exc.value + assert isinstance(exc_group, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == 2 + assert isinstance(exc.value.exceptions[0], FailedQueryShardError) + assert isinstance(exc.value.exceptions[0].__cause__, RuntimeError) + assert exc.value.exceptions[0].index == 0 + assert exc.value.exceptions[0].query == query_1 + assert isinstance(exc.value.exceptions[1], FailedQueryShardError) + assert isinstance(exc.value.exceptions[1].__cause__, RuntimeError) + assert exc.value.exceptions[1].index == 1 + assert exc.value.exceptions[1].query == query_2 + + @pytest.mark.asyncio + async def test_read_rows_sharded_concurrent(self): + """ + Ensure sharded requests are concurrent + """ + import time + + async def mock_call(*args, **kwargs): + await asyncio.sleep(0.1) + return [mock.Mock()] + + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(10)] + start_time = time.monotonic() + result = await table.read_rows_sharded(queries) + call_time = time.monotonic() - start_time + assert read_rows.call_count == 10 + assert len(result) == 10 + # if run in sequence, we would expect this to take 1 second + assert call_time < 0.2 + + @pytest.mark.asyncio + async def test_read_rows_sharded_batching(self): + """ + Large queries should be processed in batches to limit concurrency + operation timeout should change between batches + """ + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.client import _CONCURRENCY_LIMIT + + assert _CONCURRENCY_LIMIT == 10 # change this test if this changes + + n_queries = 90 + expected_num_batches = n_queries // _CONCURRENCY_LIMIT + query_list = [ReadRowsQuery() for _ in range(n_queries)] + + table_mock = AsyncMock() + start_operation_timeout = 10 + start_attempt_timeout = 3 + table_mock.default_read_rows_operation_timeout = start_operation_timeout + table_mock.default_read_rows_attempt_timeout = start_attempt_timeout + # clock ticks one second on each check + with mock.patch("time.monotonic", side_effect=range(0, 100000)): + with mock.patch("asyncio.gather", AsyncMock()) as gather_mock: + await TableAsync.read_rows_sharded(table_mock, query_list) + # should have individual calls for each query + assert table_mock.read_rows.call_count == n_queries + # should have single gather call for each batch + assert gather_mock.call_count == expected_num_batches + # ensure that timeouts decrease over time + kwargs = [ + table_mock.read_rows.call_args_list[idx][1] + for idx in range(n_queries) + ] + for batch_idx in range(expected_num_batches): + batch_kwargs = kwargs[ + batch_idx + * _CONCURRENCY_LIMIT : (batch_idx + 1) + * _CONCURRENCY_LIMIT + ] + for req_kwargs in batch_kwargs: + # each batch should have the same operation_timeout, and it should decrease in each batch + expected_operation_timeout = start_operation_timeout - ( + batch_idx + 1 + ) + assert ( + req_kwargs["operation_timeout"] + == expected_operation_timeout + ) + # each attempt_timeout should start with default value, but decrease when operation_timeout reaches it + expected_attempt_timeout = min( + start_attempt_timeout, expected_operation_timeout + ) + assert req_kwargs["attempt_timeout"] == expected_attempt_timeout + # await all created coroutines to avoid warnings + for i in range(len(gather_mock.call_args_list)): + for j in range(len(gather_mock.call_args_list[i][0])): + await gather_mock.call_args_list[i][0][j] + + +class TestSampleRowKeys: + async def _make_gapic_stream(self, sample_list: list[tuple[bytes, int]]): + from google.cloud.bigtable_v2.types import SampleRowKeysResponse + + for value in sample_list: + yield SampleRowKeysResponse(row_key=value[0], offset_bytes=value[1]) + + @pytest.mark.asyncio + async def test_sample_row_keys(self): + """ + Test that method returns the expected key samples + """ + samples = [ + (b"test_1", 0), + (b"test_2", 100), + (b"test_3", 200), + ] + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", AsyncMock() + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream(samples) + result = await table.sample_row_keys() + assert len(result) == 3 + assert all(isinstance(r, tuple) for r in result) + assert all(isinstance(r[0], bytes) for r in result) + assert all(isinstance(r[1], int) for r in result) + assert result[0] == samples[0] + assert result[1] == samples[1] + assert result[2] == samples[2] + + @pytest.mark.asyncio + async def test_sample_row_keys_bad_timeout(self): + """ + should raise error if timeout is negative + """ + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.sample_row_keys(operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + await table.sample_row_keys(attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + @pytest.mark.asyncio + async def test_sample_row_keys_default_timeout(self): + """Should fallback to using table default operation_timeout""" + expected_timeout = 99 + async with _make_client() as client: + async with client.get_table( + "i", + "t", + default_operation_timeout=expected_timeout, + default_attempt_timeout=expected_timeout, + ) as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", AsyncMock() + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + result = await table.sample_row_keys() + _, kwargs = sample_row_keys.call_args + assert abs(kwargs["timeout"] - expected_timeout) < 0.1 + assert result == [] + assert kwargs["retry"] is None + + @pytest.mark.asyncio + async def test_sample_row_keys_gapic_params(self): + """ + make sure arguments are propagated to gapic call as expected + """ + expected_timeout = 10 + expected_profile = "test1" + instance = "instance_name" + table_id = "my_table" + async with _make_client() as client: + async with client.get_table( + instance, table_id, app_profile_id=expected_profile + ) as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", AsyncMock() + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + await table.sample_row_keys(attempt_timeout=expected_timeout) + args, kwargs = sample_row_keys.call_args + assert len(args) == 0 + assert len(kwargs) == 5 + assert kwargs["timeout"] == expected_timeout + assert kwargs["app_profile_id"] == expected_profile + assert kwargs["table_name"] == table.table_name + assert kwargs["metadata"] is not None + assert kwargs["retry"] is None + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @pytest.mark.asyncio + async def test_sample_row_keys_retryable_errors(self, retryable_exception): + """ + retryable errors should be retried until timeout + """ + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", AsyncMock() + ) as sample_row_keys: + sample_row_keys.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + await table.sample_row_keys(operation_timeout=0.05) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) > 0 + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + @pytest.mark.asyncio + async def test_sample_row_keys_non_retryable_errors(self, non_retryable_exception): + """ + non-retryable errors should cause a raise + """ + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "sample_row_keys", AsyncMock() + ) as sample_row_keys: + sample_row_keys.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + await table.sample_row_keys() + + +class TestMutateRow: + @pytest.mark.asyncio + @pytest.mark.parametrize( + "mutation_arg", + [ + mutations.SetCell("family", b"qualifier", b"value"), + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ), + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromFamily("family"), + mutations.DeleteAllFromRow(), + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + async def test_mutate_row(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.return_value = None + await table.mutate_row( + "row_key", + mutation_arg, + attempt_timeout=expected_attempt_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0].kwargs + assert ( + kwargs["table_name"] + == "projects/project/instances/instance/tables/table" + ) + assert kwargs["row_key"] == b"row_key" + formatted_mutations = ( + [mutation._to_pb() for mutation in mutation_arg] + if isinstance(mutation_arg, list) + else [mutation_arg._to_pb()] + ) + assert kwargs["mutations"] == formatted_mutations + assert kwargs["timeout"] == expected_attempt_timeout + # make sure gapic layer is not retrying + assert kwargs["retry"] is None + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @pytest.mark.asyncio + async def test_mutate_row_retryable_errors(self, retryable_exception): + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + mutation = mutations.DeleteAllFromRow() + assert mutation.is_idempotent() is True + await table.mutate_row( + "row_key", mutation, operation_timeout=0.01 + ) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @pytest.mark.asyncio + async def test_mutate_row_non_idempotent_retryable_errors( + self, retryable_exception + ): + """ + Non-idempotent mutations should not be retried + """ + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(retryable_exception): + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + assert mutation.is_idempotent() is False + await table.mutate_row( + "row_key", mutation, operation_timeout=0.2 + ) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + @pytest.mark.asyncio + async def test_mutate_row_non_retryable_errors(self, non_retryable_exception): + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + mutation = mutations.SetCell( + "family", + b"qualifier", + b"value", + timestamp_micros=1234567890, + ) + assert mutation.is_idempotent() is True + await table.mutate_row( + "row_key", mutation, operation_timeout=0.2 + ) + + @pytest.mark.parametrize("include_app_profile", [True, False]) + @pytest.mark.asyncio + async def test_mutate_row_metadata(self, include_app_profile): + """request should attach metadata headers""" + profile = "profile" if include_app_profile else None + async with _make_client() as client: + async with client.get_table("i", "t", app_profile_id=profile) as table: + with mock.patch.object( + client._gapic_client, "mutate_row", AsyncMock() + ) as read_rows: + await table.mutate_row("rk", mock.Mock()) + kwargs = read_rows.call_args_list[0].kwargs + metadata = kwargs["metadata"] + goog_metadata = None + for key, value in metadata: + if key == "x-goog-request-params": + goog_metadata = value + assert goog_metadata is not None, "x-goog-request-params not found" + assert "table_name=" + table.table_name in goog_metadata + if include_app_profile: + assert "app_profile_id=profile" in goog_metadata + else: + assert "app_profile_id=" not in goog_metadata + + @pytest.mark.parametrize("mutations", [[], None]) + @pytest.mark.asyncio + async def test_mutate_row_no_mutations(self, mutations): + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.mutate_row("key", mutations=mutations) + assert e.value.args[0] == "No mutations provided" + + +class TestBulkMutateRows: + async def _mock_response(self, response_list): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + statuses = [] + for response in response_list: + if isinstance(response, core_exceptions.GoogleAPICallError): + statuses.append( + status_pb2.Status( + message=str(response), code=response.grpc_status_code.value[0] + ) + ) + else: + statuses.append(status_pb2.Status(code=0)) + entries = [ + MutateRowsResponse.Entry(index=i, status=statuses[i]) + for i in range(len(response_list)) + ] + + async def generator(): + yield MutateRowsResponse(entries=entries) + + return generator() + + @pytest.mark.asyncio + @pytest.mark.asyncio + @pytest.mark.parametrize( + "mutation_arg", + [ + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ) + ], + [mutations.DeleteRangeFromColumn("family", b"qualifier")], + [mutations.DeleteAllFromFamily("family")], + [mutations.DeleteAllFromRow()], + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + async def test_bulk_mutate_rows(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None]) + bulk_mutation = mutations.RowMutationEntry(b"row_key", mutation_arg) + await table.bulk_mutate_rows( + [bulk_mutation], + attempt_timeout=expected_attempt_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + assert ( + kwargs["table_name"] + == "projects/project/instances/instance/tables/table" + ) + assert kwargs["entries"] == [bulk_mutation._to_pb()] + assert kwargs["timeout"] == expected_attempt_timeout + assert kwargs["retry"] is None + + @pytest.mark.asyncio + async def test_bulk_mutate_rows_multiple_entries(self): + """Test mutations with no errors""" + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None, None]) + mutation_list = [mutations.DeleteAllFromRow()] + entry_1 = mutations.RowMutationEntry(b"row_key_1", mutation_list) + entry_2 = mutations.RowMutationEntry(b"row_key_2", mutation_list) + await table.bulk_mutate_rows( + [entry_1, entry_2], + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + assert ( + kwargs["table_name"] + == "projects/project/instances/instance/tables/table" + ) + assert kwargs["entries"][0] == entry_1._to_pb() + assert kwargs["entries"][1] == entry_2._to_pb() + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + async def test_bulk_mutate_rows_idempotent_mutation_error_retryable( + self, exception + ): + """ + Individual idempotent mutations should be retried if they fail with a retryable error + """ + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], exception) + # last exception should be due to retry timeout + assert isinstance( + cause.exceptions[-1], core_exceptions.DeadlineExceeded + ) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + core_exceptions.Aborted, + ], + ) + async def test_bulk_mutate_rows_idempotent_mutation_error_non_retryable( + self, exception + ): + """ + Individual idempotent mutations should not be retried if they fail with a non-retryable error + """ + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, exception) + + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @pytest.mark.asyncio + async def test_bulk_mutate_idempotent_retryable_request_errors( + self, retryable_exception + ): + """ + Individual idempotent mutations should be retried if the request fails with a retryable error + """ + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + async def test_bulk_mutate_rows_non_idempotent_retryable_errors( + self, retryable_exception + ): + """Non-Idempotent mutations should never be retried""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [retryable_exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is False + await table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + ], + ) + @pytest.mark.asyncio + async def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_exception): + """ + If the request fails with a non-retryable error, mutations should not be retried + """ + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, non_retryable_exception) + + @pytest.mark.asyncio + async def test_bulk_mutate_error_index(self): + """ + Test partial failure, partial success. Errors should be associated with the correct index + """ + from google.api_core.exceptions import ( + DeadlineExceeded, + ServiceUnavailable, + FailedPrecondition, + ) + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + async with _make_client(project="project") as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + # fail with retryable errors, then a non-retryable one + mock_gapic.side_effect = [ + self._mock_response([None, ServiceUnavailable("mock"), None]), + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([FailedPrecondition("final")]), + ] + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry( + (f"row_key_{i}").encode(), [mutation] + ) + for i in range(3) + ] + assert mutation.is_idempotent() is True + await table.bulk_mutate_rows(entries, operation_timeout=1000) + assert len(e.value.exceptions) == 1 + failed = e.value.exceptions[0] + assert isinstance(failed, FailedMutationEntryError) + assert failed.index == 1 + assert failed.entry == entries[1] + cause = failed.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) == 3 + assert isinstance(cause.exceptions[0], ServiceUnavailable) + assert isinstance(cause.exceptions[1], DeadlineExceeded) + assert isinstance(cause.exceptions[2], FailedPrecondition) + + @pytest.mark.asyncio + async def test_bulk_mutate_error_recovery(self): + """ + If an error occurs, then resolves, no exception should be raised + """ + from google.api_core.exceptions import DeadlineExceeded + + async with _make_client(project="project") as client: + table = client.get_table("instance", "table") + with mock.patch.object(client._gapic_client, "mutate_rows") as mock_gapic: + # fail with a retryable error, then a non-retryable one + mock_gapic.side_effect = [ + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([None]), + ] + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry((f"row_key_{i}").encode(), [mutation]) + for i in range(3) + ] + await table.bulk_mutate_rows(entries, operation_timeout=1000) + + +class TestCheckAndMutateRow: + @pytest.mark.parametrize("gapic_result", [True, False]) + @pytest.mark.asyncio + async def test_check_and_mutate(self, gapic_result): + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + app_profile = "app_profile_id" + async with _make_client() as client: + async with client.get_table( + "instance", "table", app_profile_id=app_profile + ) as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=gapic_result + ) + row_key = b"row_key" + predicate = None + true_mutations = [mock.Mock()] + false_mutations = [mock.Mock(), mock.Mock()] + operation_timeout = 0.2 + found = await table.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + operation_timeout=operation_timeout, + ) + assert found == gapic_result + kwargs = mock_gapic.call_args[1] + assert kwargs["table_name"] == table.table_name + assert kwargs["row_key"] == row_key + assert kwargs["predicate_filter"] == predicate + assert kwargs["true_mutations"] == [ + m._to_pb() for m in true_mutations + ] + assert kwargs["false_mutations"] == [ + m._to_pb() for m in false_mutations + ] + assert kwargs["app_profile_id"] == app_profile + assert kwargs["timeout"] == operation_timeout + assert kwargs["retry"] is None + + @pytest.mark.asyncio + async def test_check_and_mutate_bad_timeout(self): + """Should raise error if operation_timeout < 0""" + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=[mock.Mock()], + false_case_mutations=[], + operation_timeout=-1, + ) + assert str(e.value) == "operation_timeout must be greater than 0" + + @pytest.mark.asyncio + async def test_check_and_mutate_single_mutations(self): + """if single mutations are passed, they should be internally wrapped in a list""" + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + true_mutation = SetCell("family", b"qualifier", b"value") + false_mutation = SetCell("family", b"qualifier", b"value") + await table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + kwargs = mock_gapic.call_args[1] + assert kwargs["true_mutations"] == [true_mutation._to_pb()] + assert kwargs["false_mutations"] == [false_mutation._to_pb()] + + @pytest.mark.asyncio + async def test_check_and_mutate_predicate_object(self): + """predicate filter should be passed to gapic request""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + mock_predicate = mock.Mock() + predicate_pb = {"predicate": "dict"} + mock_predicate._to_pb.return_value = predicate_pb + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + await table.check_and_mutate_row( + b"row_key", + mock_predicate, + false_case_mutations=[mock.Mock()], + ) + kwargs = mock_gapic.call_args[1] + assert kwargs["predicate_filter"] == predicate_pb + assert mock_predicate._to_pb.call_count == 1 + assert kwargs["retry"] is None + + @pytest.mark.asyncio + async def test_check_and_mutate_mutations_parsing(self): + """mutations objects should be converted to protos""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + mutations = [mock.Mock() for _ in range(5)] + for idx, mutation in enumerate(mutations): + mutation._to_pb.return_value = f"fake {idx}" + mutations.append(DeleteAllFromRow()) + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + await table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=mutations[0:2], + false_case_mutations=mutations[2:], + ) + kwargs = mock_gapic.call_args[1] + assert kwargs["true_mutations"] == ["fake 0", "fake 1"] + assert kwargs["false_mutations"] == [ + "fake 2", + "fake 3", + "fake 4", + DeleteAllFromRow()._to_pb(), + ] + assert all( + mutation._to_pb.call_count == 1 for mutation in mutations[:5] + ) + + +class TestReadModifyWriteRow: + @pytest.mark.parametrize( + "call_rules,expected_rules", + [ + ( + AppendValueRule("f", "c", b"1"), + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + ( + [AppendValueRule("f", "c", b"1")], + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + (IncrementRule("f", "c", 1), [IncrementRule("f", "c", 1)._to_pb()]), + ( + [AppendValueRule("f", "c", b"1"), IncrementRule("f", "c", 1)], + [ + AppendValueRule("f", "c", b"1")._to_pb(), + IncrementRule("f", "c", 1)._to_pb(), + ], + ), + ], + ) + @pytest.mark.asyncio + async def test_read_modify_write_call_rule_args(self, call_rules, expected_rules): + """ + Test that the gapic call is called with given rules + """ + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row("key", call_rules) + assert mock_gapic.call_count == 1 + found_kwargs = mock_gapic.call_args_list[0][1] + assert found_kwargs["rules"] == expected_rules + assert found_kwargs["retry"] is None + + @pytest.mark.parametrize("rules", [[], None]) + @pytest.mark.asyncio + async def test_read_modify_write_no_rules(self, rules): + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + await table.read_modify_write_row("key", rules=rules) + assert e.value.args[0] == "rules must contain at least one item" + + @pytest.mark.asyncio + async def test_read_modify_write_call_defaults(self): + instance = "instance1" + table_id = "table1" + project = "project1" + row_key = "row_key1" + async with _make_client(project=project) as client: + async with client.get_table(instance, table_id) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row(row_key, mock.Mock()) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + assert ( + kwargs["table_name"] + == f"projects/{project}/instances/{instance}/tables/{table_id}" + ) + assert kwargs["app_profile_id"] is None + assert kwargs["row_key"] == row_key.encode() + assert kwargs["timeout"] > 1 + + @pytest.mark.asyncio + async def test_read_modify_write_call_overrides(self): + row_key = b"row_key1" + expected_timeout = 12345 + profile_id = "profile1" + async with _make_client() as client: + async with client.get_table( + "instance", "table_id", app_profile_id=profile_id + ) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row( + row_key, + mock.Mock(), + operation_timeout=expected_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + assert kwargs["app_profile_id"] is profile_id + assert kwargs["row_key"] == row_key + assert kwargs["timeout"] == expected_timeout + + @pytest.mark.asyncio + async def test_read_modify_write_string_key(self): + row_key = "string_row_key1" + async with _make_client() as client: + async with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + await table.read_modify_write_row(row_key, mock.Mock()) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + assert kwargs["row_key"] == row_key.encode() + + @pytest.mark.asyncio + async def test_read_modify_write_row_building(self): + """ + results from gapic call should be used to construct row + """ + from google.cloud.bigtable.data.row import Row + from google.cloud.bigtable_v2.types import ReadModifyWriteRowResponse + from google.cloud.bigtable_v2.types import Row as RowPB + + mock_response = ReadModifyWriteRowResponse(row=RowPB()) + async with _make_client() as client: + async with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + with mock.patch.object(Row, "_from_pb") as constructor_mock: + mock_gapic.return_value = mock_response + await table.read_modify_write_row("key", mock.Mock()) + assert constructor_mock.call_count == 1 + constructor_mock.assert_called_once_with(mock_response.row) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py new file mode 100644 index 000000000000..cca7c982443d --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py @@ -0,0 +1,1184 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import asyncio +import google.api_core.exceptions as core_exceptions +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data import TABLE_DEFAULT + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock +except ImportError: # pragma: NO COVER + import mock # type: ignore + from mock import AsyncMock # type: ignore + + +def _make_mutation(count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation + + +class Test_FlowControl: + def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): + from google.cloud.bigtable.data._async.mutations_batcher import ( + _FlowControlAsync, + ) + + return _FlowControlAsync(max_mutation_count, max_mutation_bytes) + + def test_ctor(self): + max_mutation_count = 9 + max_mutation_bytes = 19 + instance = self._make_one(max_mutation_count, max_mutation_bytes) + assert instance._max_mutation_count == max_mutation_count + assert instance._max_mutation_bytes == max_mutation_bytes + assert instance._in_flight_mutation_count == 0 + assert instance._in_flight_mutation_bytes == 0 + assert isinstance(instance._capacity_condition, asyncio.Condition) + + def test_ctor_invalid_values(self): + """Test that values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(0, 1) + assert "max_mutation_count must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(1, 0) + assert "max_mutation_bytes must be greater than 0" in str(e.value) + + @pytest.mark.parametrize( + "max_count,max_size,existing_count,existing_size,new_count,new_size,expected", + [ + (1, 1, 0, 0, 0, 0, True), + (1, 1, 1, 1, 1, 1, False), + (10, 10, 0, 0, 0, 0, True), + (10, 10, 0, 0, 9, 9, True), + (10, 10, 0, 0, 11, 9, True), + (10, 10, 0, 1, 11, 9, True), + (10, 10, 1, 0, 11, 9, False), + (10, 10, 0, 0, 9, 11, True), + (10, 10, 1, 0, 9, 11, True), + (10, 10, 0, 1, 9, 11, False), + (10, 1, 0, 0, 1, 0, True), + (1, 10, 0, 0, 0, 8, True), + (float("inf"), float("inf"), 0, 0, 1e10, 1e10, True), + (8, 8, 0, 0, 1e10, 1e10, True), + (12, 12, 6, 6, 5, 5, True), + (12, 12, 5, 5, 6, 6, True), + (12, 12, 6, 6, 6, 6, True), + (12, 12, 6, 6, 7, 7, False), + # allow capacity check if new_count or new_size exceeds limits + (12, 12, 0, 0, 13, 13, True), + (12, 12, 12, 0, 0, 13, True), + (12, 12, 0, 12, 13, 0, True), + # but not if there's already values in flight + (12, 12, 1, 1, 13, 13, False), + (12, 12, 1, 1, 0, 13, False), + (12, 12, 1, 1, 13, 0, False), + ], + ) + def test__has_capacity( + self, + max_count, + max_size, + existing_count, + existing_size, + new_count, + new_size, + expected, + ): + """ + _has_capacity should return True if the new mutation will will not exceed the max count or size + """ + instance = self._make_one(max_count, max_size) + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + assert instance._has_capacity(new_count, new_size) == expected + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "existing_count,existing_size,added_count,added_size,new_count,new_size", + [ + (0, 0, 0, 0, 0, 0), + (2, 2, 1, 1, 1, 1), + (2, 0, 1, 0, 1, 0), + (0, 2, 0, 1, 0, 1), + (10, 10, 0, 0, 10, 10), + (10, 10, 5, 5, 5, 5), + (0, 0, 1, 1, -1, -1), + ], + ) + async def test_remove_from_flow_value_update( + self, + existing_count, + existing_size, + added_count, + added_size, + new_count, + new_size, + ): + """ + completed mutations should lower the inflight values + """ + instance = self._make_one() + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + mutation = _make_mutation(added_count, added_size) + await instance.remove_from_flow(mutation) + assert instance._in_flight_mutation_count == new_count + assert instance._in_flight_mutation_bytes == new_size + + @pytest.mark.asyncio + async def test__remove_from_flow_unlock(self): + """capacity condition should notify after mutation is complete""" + instance = self._make_one(10, 10) + instance._in_flight_mutation_count = 10 + instance._in_flight_mutation_bytes = 10 + + async def task_routine(): + async with instance._capacity_condition: + await instance._capacity_condition.wait_for( + lambda: instance._has_capacity(1, 1) + ) + + task = asyncio.create_task(task_routine()) + await asyncio.sleep(0.05) + # should be blocked due to capacity + assert task.done() is False + # try changing size + mutation = _make_mutation(count=0, size=5) + await instance.remove_from_flow([mutation]) + await asyncio.sleep(0.05) + assert instance._in_flight_mutation_count == 10 + assert instance._in_flight_mutation_bytes == 5 + assert task.done() is False + # try changing count + instance._in_flight_mutation_bytes = 10 + mutation = _make_mutation(count=5, size=0) + await instance.remove_from_flow([mutation]) + await asyncio.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 10 + assert task.done() is False + # try changing both + instance._in_flight_mutation_count = 10 + mutation = _make_mutation(count=5, size=5) + await instance.remove_from_flow([mutation]) + await asyncio.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 5 + # task should be complete + assert task.done() is True + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "mutations,count_cap,size_cap,expected_results", + [ + # high capacity results in no batching + ([(5, 5), (1, 1), (1, 1)], 10, 10, [[(5, 5), (1, 1), (1, 1)]]), + # low capacity splits up into batches + ([(1, 1), (1, 1), (1, 1)], 1, 1, [[(1, 1)], [(1, 1)], [(1, 1)]]), + # test count as limiting factor + ([(1, 1), (1, 1), (1, 1)], 2, 10, [[(1, 1), (1, 1)], [(1, 1)]]), + # test size as limiting factor + ([(1, 1), (1, 1), (1, 1)], 10, 2, [[(1, 1), (1, 1)], [(1, 1)]]), + # test with some bloackages and some flows + ( + [(1, 1), (5, 5), (4, 1), (1, 4), (1, 1)], + 5, + 5, + [[(1, 1)], [(5, 5)], [(4, 1), (1, 4)], [(1, 1)]], + ), + ], + ) + async def test_add_to_flow(self, mutations, count_cap, size_cap, expected_results): + """ + Test batching with various flow control settings + """ + mutation_objs = [_make_mutation(count=m[0], size=m[1]) for m in mutations] + instance = self._make_one(count_cap, size_cap) + i = 0 + async for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + # check counts + assert len(batch[j].mutations) == expected_batch[j][0] + # check sizes + assert batch[j].size() == expected_batch[j][1] + # update lock + await instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + @pytest.mark.asyncio + @pytest.mark.parametrize( + "mutations,max_limit,expected_results", + [ + ([(1, 1)] * 11, 10, [[(1, 1)] * 10, [(1, 1)]]), + ([(1, 1)] * 10, 1, [[(1, 1)] for _ in range(10)]), + ([(1, 1)] * 10, 2, [[(1, 1), (1, 1)] for _ in range(5)]), + ], + ) + async def test_add_to_flow_max_mutation_limits( + self, mutations, max_limit, expected_results + ): + """ + Test flow control running up against the max API limit + Should submit request early, even if the flow control has room for more + """ + with mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher._MUTATE_ROWS_REQUEST_MUTATION_LIMIT", + max_limit, + ): + mutation_objs = [_make_mutation(count=m[0], size=m[1]) for m in mutations] + # flow control has no limits except API restrictions + instance = self._make_one(float("inf"), float("inf")) + i = 0 + async for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + # check counts + assert len(batch[j].mutations) == expected_batch[j][0] + # check sizes + assert batch[j].size() == expected_batch[j][1] + # update lock + await instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + @pytest.mark.asyncio + async def test_add_to_flow_oversize(self): + """ + mutations over the flow control limits should still be accepted + """ + instance = self._make_one(2, 3) + large_size_mutation = _make_mutation(count=1, size=10) + large_count_mutation = _make_mutation(count=10, size=1) + results = [out async for out in instance.add_to_flow([large_size_mutation])] + assert len(results) == 1 + await instance.remove_from_flow(results[0]) + count_results = [ + out async for out in instance.add_to_flow(large_count_mutation) + ] + assert len(count_results) == 1 + + +class TestMutationsBatcherAsync: + def _get_target_class(self): + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) + + return MutationsBatcherAsync + + def _make_one(self, table=None, **kwargs): + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import ServiceUnavailable + + if table is None: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 10 + table.default_mutate_rows_retryable_errors = ( + DeadlineExceeded, + ServiceUnavailable, + ) + + return self._get_target_class()(table, **kwargs) + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer" + ) + @pytest.mark.asyncio + async def test_ctor_defaults(self, flush_timer_mock): + flush_timer_mock.return_value = asyncio.create_task(asyncio.sleep(0)) + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = [Exception] + async with self._make_one(table) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._max_mutation_count == 100000 + assert instance._flow_control._max_mutation_bytes == 104857600 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert ( + instance._operation_timeout + == table.default_mutate_rows_operation_timeout + ) + assert ( + instance._attempt_timeout == table.default_mutate_rows_attempt_timeout + ) + assert ( + instance._retryable_errors == table.default_mutate_rows_retryable_errors + ) + await asyncio.sleep(0) + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == 5 + assert isinstance(instance._flush_timer, asyncio.Future) + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer", + ) + @pytest.mark.asyncio + async def test_ctor_explicit(self, flush_timer_mock): + """Test with explicit parameters""" + flush_timer_mock.return_value = asyncio.create_task(asyncio.sleep(0)) + table = mock.Mock() + flush_interval = 20 + flush_limit_count = 17 + flush_limit_bytes = 19 + flow_control_max_mutation_count = 1001 + flow_control_max_bytes = 12 + operation_timeout = 11 + attempt_timeout = 2 + retryable_errors = [Exception] + async with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=operation_timeout, + batch_attempt_timeout=attempt_timeout, + batch_retryable_errors=retryable_errors, + ) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert ( + instance._flow_control._max_mutation_count + == flow_control_max_mutation_count + ) + assert instance._flow_control._max_mutation_bytes == flow_control_max_bytes + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert instance._operation_timeout == operation_timeout + assert instance._attempt_timeout == attempt_timeout + assert instance._retryable_errors == retryable_errors + await asyncio.sleep(0) + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == flush_interval + assert isinstance(instance._flush_timer, asyncio.Future) + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer" + ) + @pytest.mark.asyncio + async def test_ctor_no_flush_limits(self, flush_timer_mock): + """Test with None for flush limits""" + flush_timer_mock.return_value = asyncio.create_task(asyncio.sleep(0)) + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = () + flush_interval = None + flush_limit_count = None + flush_limit_bytes = None + async with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + ) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._staged_entries == [] + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + await asyncio.sleep(0) + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] is None + assert isinstance(instance._flush_timer, asyncio.Future) + + @pytest.mark.asyncio + async def test_ctor_invalid_values(self): + """Test that timeout values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(batch_operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(batch_attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + def test_default_argument_consistency(self): + """ + We supply default arguments in MutationsBatcherAsync.__init__, and in + table.mutations_batcher. Make sure any changes to defaults are applied to + both places + """ + from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) + import inspect + + get_batcher_signature = dict( + inspect.signature(TableAsync.mutations_batcher).parameters + ) + get_batcher_signature.pop("self") + batcher_init_signature = dict( + inspect.signature(MutationsBatcherAsync).parameters + ) + batcher_init_signature.pop("table") + # both should have same number of arguments + assert len(get_batcher_signature.keys()) == len(batcher_init_signature.keys()) + assert len(get_batcher_signature) == 8 # update if expected params change + # both should have same argument names + assert set(get_batcher_signature.keys()) == set(batcher_init_signature.keys()) + # both should have same default values + for arg_name in get_batcher_signature.keys(): + assert ( + get_batcher_signature[arg_name].default + == batcher_init_signature[arg_name].default + ) + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" + ) + @pytest.mark.asyncio + async def test__start_flush_timer_w_None(self, flush_mock): + """Empty timer should return immediately""" + async with self._make_one() as instance: + with mock.patch("asyncio.sleep") as sleep_mock: + await instance._start_flush_timer(None) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" + ) + @pytest.mark.asyncio + async def test__start_flush_timer_call_when_closed(self, flush_mock): + """closed batcher's timer should return immediately""" + async with self._make_one() as instance: + await instance.close() + flush_mock.reset_mock() + with mock.patch("asyncio.sleep") as sleep_mock: + await instance._start_flush_timer(1) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" + ) + @pytest.mark.asyncio + async def test__flush_timer(self, flush_mock): + """Timer should continue to call _schedule_flush in a loop""" + expected_sleep = 12 + async with self._make_one(flush_interval=expected_sleep) as instance: + instance._staged_entries = [mock.Mock()] + loop_num = 3 + with mock.patch("asyncio.sleep") as sleep_mock: + sleep_mock.side_effect = [None] * loop_num + [asyncio.CancelledError()] + try: + await instance._flush_timer + except asyncio.CancelledError: + pass + assert sleep_mock.call_count == loop_num + 1 + sleep_mock.assert_called_with(expected_sleep) + assert flush_mock.call_count == loop_num + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" + ) + @pytest.mark.asyncio + async def test__flush_timer_no_mutations(self, flush_mock): + """Timer should not flush if no new mutations have been staged""" + expected_sleep = 12 + async with self._make_one(flush_interval=expected_sleep) as instance: + loop_num = 3 + with mock.patch("asyncio.sleep") as sleep_mock: + sleep_mock.side_effect = [None] * loop_num + [asyncio.CancelledError()] + try: + await instance._flush_timer + except asyncio.CancelledError: + pass + assert sleep_mock.call_count == loop_num + 1 + sleep_mock.assert_called_with(expected_sleep) + assert flush_mock.call_count == 0 + + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" + ) + @pytest.mark.asyncio + async def test__flush_timer_close(self, flush_mock): + """Timer should continue terminate after close""" + async with self._make_one() as instance: + with mock.patch("asyncio.sleep"): + # let task run in background + await asyncio.sleep(0.5) + assert instance._flush_timer.done() is False + # close the batcher + await instance.close() + await asyncio.sleep(0.1) + # task should be complete + assert instance._flush_timer.done() is True + + @pytest.mark.asyncio + async def test_append_closed(self): + """Should raise exception""" + with pytest.raises(RuntimeError): + instance = self._make_one() + await instance.close() + await instance.append(mock.Mock()) + + @pytest.mark.asyncio + async def test_append_wrong_mutation(self): + """ + Mutation objects should raise an exception. + Only support RowMutationEntry + """ + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + async with self._make_one() as instance: + expected_error = "invalid mutation type: DeleteAllFromRow. Only RowMutationEntry objects are supported by batcher" + with pytest.raises(ValueError) as e: + await instance.append(DeleteAllFromRow()) + assert str(e.value) == expected_error + + @pytest.mark.asyncio + async def test_append_outside_flow_limits(self): + """entries larger than mutation limits are still processed""" + async with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + oversized_entry = _make_mutation(count=0, size=2) + await instance.append(oversized_entry) + assert instance._staged_entries == [oversized_entry] + assert instance._staged_count == 0 + assert instance._staged_bytes == 2 + instance._staged_entries = [] + async with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + overcount_entry = _make_mutation(count=2, size=0) + await instance.append(overcount_entry) + assert instance._staged_entries == [overcount_entry] + assert instance._staged_count == 2 + assert instance._staged_bytes == 0 + instance._staged_entries = [] + + @pytest.mark.asyncio + async def test_append_flush_runs_after_limit_hit(self): + """ + If the user appends a bunch of entries above the flush limits back-to-back, + it should still flush in a single task + """ + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) + + with mock.patch.object( + MutationsBatcherAsync, "_execute_mutate_rows" + ) as op_mock: + async with self._make_one(flush_limit_bytes=100) as instance: + # mock network calls + async def mock_call(*args, **kwargs): + return [] + + op_mock.side_effect = mock_call + # append a mutation just under the size limit + await instance.append(_make_mutation(size=99)) + # append a bunch of entries back-to-back in a loop + num_entries = 10 + for _ in range(num_entries): + await instance.append(_make_mutation(size=1)) + # let any flush jobs finish + await asyncio.gather(*instance._flush_jobs) + # should have only flushed once, with large mutation and first mutation in loop + assert op_mock.call_count == 1 + sent_batch = op_mock.call_args[0][0] + assert len(sent_batch) == 2 + # others should still be pending + assert len(instance._staged_entries) == num_entries - 1 + + @pytest.mark.parametrize( + "flush_count,flush_bytes,mutation_count,mutation_bytes,expect_flush", + [ + (10, 10, 1, 1, False), + (10, 10, 9, 9, False), + (10, 10, 10, 1, True), + (10, 10, 1, 10, True), + (10, 10, 10, 10, True), + (1, 1, 10, 10, True), + (1, 1, 0, 0, False), + ], + ) + @pytest.mark.asyncio + async def test_append( + self, flush_count, flush_bytes, mutation_count, mutation_bytes, expect_flush + ): + """test appending different mutations, and checking if it causes a flush""" + async with self._make_one( + flush_limit_mutation_count=flush_count, flush_limit_bytes=flush_bytes + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = _make_mutation(count=mutation_count, size=mutation_bytes) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + await instance.append(mutation) + assert flush_mock.call_count == bool(expect_flush) + assert instance._staged_count == mutation_count + assert instance._staged_bytes == mutation_bytes + assert instance._staged_entries == [mutation] + instance._staged_entries = [] + + @pytest.mark.asyncio + async def test_append_multiple_sequentially(self): + """Append multiple mutations""" + async with self._make_one( + flush_limit_mutation_count=8, flush_limit_bytes=8 + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = _make_mutation(count=2, size=3) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + await instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 2 + assert instance._staged_bytes == 3 + assert len(instance._staged_entries) == 1 + await instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 4 + assert instance._staged_bytes == 6 + assert len(instance._staged_entries) == 2 + await instance.append(mutation) + assert flush_mock.call_count == 1 + assert instance._staged_count == 6 + assert instance._staged_bytes == 9 + assert len(instance._staged_entries) == 3 + instance._staged_entries = [] + + @pytest.mark.asyncio + async def test_flush_flow_control_concurrent_requests(self): + """ + requests should happen in parallel if flow control breaks up single flush into batches + """ + import time + + num_calls = 10 + fake_mutations = [_make_mutation(count=1) for _ in range(num_calls)] + async with self._make_one(flow_control_max_mutation_count=1) as instance: + with mock.patch.object( + instance, "_execute_mutate_rows", AsyncMock() + ) as op_mock: + # mock network calls + async def mock_call(*args, **kwargs): + await asyncio.sleep(0.1) + return [] + + op_mock.side_effect = mock_call + start_time = time.monotonic() + # flush one large batch, that will be broken up into smaller batches + instance._staged_entries = fake_mutations + instance._schedule_flush() + await asyncio.sleep(0.01) + # make room for new mutations + for i in range(num_calls): + await instance._flow_control.remove_from_flow( + [_make_mutation(count=1)] + ) + await asyncio.sleep(0.01) + # allow flushes to complete + await asyncio.gather(*instance._flush_jobs) + duration = time.monotonic() - start_time + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + # if flushes were sequential, total duration would be 1s + assert duration < 0.5 + assert op_mock.call_count == num_calls + + @pytest.mark.asyncio + async def test_schedule_flush_no_mutations(self): + """schedule flush should return None if no staged mutations""" + async with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + for i in range(3): + assert instance._schedule_flush() is None + assert flush_mock.call_count == 0 + + @pytest.mark.asyncio + async def test_schedule_flush_with_mutations(self): + """if new mutations exist, should add a new flush task to _flush_jobs""" + async with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + for i in range(1, 4): + mutation = mock.Mock() + instance._staged_entries = [mutation] + instance._schedule_flush() + assert instance._staged_entries == [] + # let flush task run + await asyncio.sleep(0) + assert instance._staged_entries == [] + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert flush_mock.call_count == i + + @pytest.mark.asyncio + async def test__flush_internal(self): + """ + _flush_internal should: + - await previous flush call + - delegate batching to _flow_control + - call _execute_mutate_rows on each batch + - update self.exceptions and self._entries_processed_since_last_raise + """ + num_entries = 10 + async with self._make_one() as instance: + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + # mock flow control to always return a single batch + async def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [_make_mutation(count=1, size=1)] * num_entries + await instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + @pytest.mark.asyncio + async def test_flush_clears_job_list(self): + """ + a job should be added to _flush_jobs when _schedule_flush is called, + and removed when it completes + """ + async with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal", AsyncMock()): + mutations = [_make_mutation(count=1, size=1)] + instance._staged_entries = mutations + assert instance._flush_jobs == set() + new_job = instance._schedule_flush() + assert instance._flush_jobs == {new_job} + await new_job + assert instance._flush_jobs == set() + + @pytest.mark.parametrize( + "num_starting,num_new_errors,expected_total_errors", + [ + (0, 0, 0), + (0, 1, 1), + (0, 2, 2), + (1, 0, 1), + (1, 1, 2), + (10, 2, 12), + (10, 20, 20), # should cap at 20 + ], + ) + @pytest.mark.asyncio + async def test__flush_internal_with_errors( + self, num_starting, num_new_errors, expected_total_errors + ): + """ + errors returned from _execute_mutate_rows should be added to internal exceptions + """ + from google.cloud.bigtable.data import exceptions + + num_entries = 10 + expected_errors = [ + exceptions.FailedMutationEntryError(mock.Mock(), mock.Mock(), ValueError()) + ] * num_new_errors + async with self._make_one() as instance: + instance._oldest_exceptions = [mock.Mock()] * num_starting + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + execute_mock.return_value = expected_errors + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + # mock flow control to always return a single batch + async def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [_make_mutation(count=1, size=1)] * num_entries + await instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + found_exceptions = instance._oldest_exceptions + list( + instance._newest_exceptions + ) + assert len(found_exceptions) == expected_total_errors + for i in range(num_starting, expected_total_errors): + assert found_exceptions[i] == expected_errors[i - num_starting] + # errors should have index stripped + assert found_exceptions[i].index is None + # clear out exceptions + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + async def _mock_gapic_return(self, num=5): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + async def gen(num): + for i in range(num): + entry = MutateRowsResponse.Entry( + index=i, status=status_pb2.Status(code=0) + ) + yield MutateRowsResponse(entries=[entry]) + + return gen(num) + + @pytest.mark.asyncio + async def test_timer_flush_end_to_end(self): + """Flush should automatically trigger after flush_interval""" + num_nutations = 10 + mutations = [_make_mutation(count=2, size=2)] * num_nutations + + async with self._make_one(flush_interval=0.05) as instance: + instance._table.default_operation_timeout = 10 + instance._table.default_attempt_timeout = 9 + with mock.patch.object( + instance._table.client._gapic_client, "mutate_rows" + ) as gapic_mock: + gapic_mock.side_effect = ( + lambda *args, **kwargs: self._mock_gapic_return(num_nutations) + ) + for m in mutations: + await instance.append(m) + assert instance._entries_processed_since_last_raise == 0 + # let flush trigger due to timer + await asyncio.sleep(0.1) + assert instance._entries_processed_since_last_raise == num_nutations + + @pytest.mark.asyncio + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync", + ) + async def test__execute_mutate_rows(self, mutate_rows): + mutate_rows.return_value = AsyncMock() + start_operation = mutate_rows().start + table = mock.Mock() + table.table_name = "test-table" + table.app_profile_id = "test-app-profile" + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + async with self._make_one(table) as instance: + batch = [_make_mutation()] + result = await instance._execute_mutate_rows(batch) + assert start_operation.call_count == 1 + args, kwargs = mutate_rows.call_args + assert args[0] == table.client._gapic_client + assert args[1] == table + assert args[2] == batch + kwargs["operation_timeout"] == 17 + kwargs["attempt_timeout"] == 13 + assert result == [] + + @pytest.mark.asyncio + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync.start" + ) + async def test__execute_mutate_rows_returns_errors(self, mutate_rows): + """Errors from operation should be retruned as list""" + from google.cloud.bigtable.data.exceptions import ( + MutationsExceptionGroup, + FailedMutationEntryError, + ) + + err1 = FailedMutationEntryError(0, mock.Mock(), RuntimeError("test error")) + err2 = FailedMutationEntryError(1, mock.Mock(), RuntimeError("test error")) + mutate_rows.side_effect = MutationsExceptionGroup([err1, err2], 10) + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + async with self._make_one(table) as instance: + batch = [_make_mutation()] + result = await instance._execute_mutate_rows(batch) + assert len(result) == 2 + assert result[0] == err1 + assert result[1] == err2 + # indices should be set to None + assert result[0].index is None + assert result[1].index is None + + @pytest.mark.asyncio + async def test__raise_exceptions(self): + """Raise exceptions and reset error state""" + from google.cloud.bigtable.data import exceptions + + expected_total = 1201 + expected_exceptions = [RuntimeError("mock")] * 3 + async with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + instance._raise_exceptions() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + instance._oldest_exceptions, instance._newest_exceptions = ([], []) + # try calling again + instance._raise_exceptions() + + @pytest.mark.asyncio + async def test___aenter__(self): + """Should return self""" + async with self._make_one() as instance: + assert await instance.__aenter__() == instance + + @pytest.mark.asyncio + async def test___aexit__(self): + """aexit should call close""" + async with self._make_one() as instance: + with mock.patch.object(instance, "close") as close_mock: + await instance.__aexit__(None, None, None) + assert close_mock.call_count == 1 + + @pytest.mark.asyncio + async def test_close(self): + """Should clean up all resources""" + async with self._make_one() as instance: + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + with mock.patch.object(instance, "_raise_exceptions") as raise_mock: + await instance.close() + assert instance.closed is True + assert instance._flush_timer.done() is True + assert instance._flush_jobs == set() + assert flush_mock.call_count == 1 + assert raise_mock.call_count == 1 + + @pytest.mark.asyncio + async def test_close_w_exceptions(self): + """Raise exceptions on close""" + from google.cloud.bigtable.data import exceptions + + expected_total = 10 + expected_exceptions = [RuntimeError("mock")] + async with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + await instance.close() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + # clear out exceptions + instance._oldest_exceptions, instance._newest_exceptions = ([], []) + + @pytest.mark.asyncio + async def test__on_exit(self, recwarn): + """Should raise warnings if unflushed mutations exist""" + async with self._make_one() as instance: + # calling without mutations is noop + instance._on_exit() + assert len(recwarn) == 0 + # calling with existing mutations should raise warning + num_left = 4 + instance._staged_entries = [mock.Mock()] * num_left + with pytest.warns(UserWarning) as w: + instance._on_exit() + assert len(w) == 1 + assert "unflushed mutations" in str(w[0].message).lower() + assert str(num_left) in str(w[0].message) + # calling while closed is noop + instance.closed = True + instance._on_exit() + assert len(recwarn) == 0 + # reset staged mutations for cleanup + instance._staged_entries = [] + + @pytest.mark.asyncio + async def test_atexit_registration(self): + """Should run _on_exit on program termination""" + import atexit + + with mock.patch.object(atexit, "register") as register_mock: + assert register_mock.call_count == 0 + async with self._make_one(): + assert register_mock.call_count == 1 + + @pytest.mark.asyncio + @mock.patch( + "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync", + ) + async def test_timeout_args_passed(self, mutate_rows): + """ + batch_operation_timeout and batch_attempt_timeout should be used + in api calls + """ + mutate_rows.return_value = AsyncMock() + expected_operation_timeout = 17 + expected_attempt_timeout = 13 + async with self._make_one( + batch_operation_timeout=expected_operation_timeout, + batch_attempt_timeout=expected_attempt_timeout, + ) as instance: + assert instance._operation_timeout == expected_operation_timeout + assert instance._attempt_timeout == expected_attempt_timeout + # make simulated gapic call + await instance._execute_mutate_rows([_make_mutation()]) + assert mutate_rows.call_count == 1 + kwargs = mutate_rows.call_args[1] + assert kwargs["operation_timeout"] == expected_operation_timeout + assert kwargs["attempt_timeout"] == expected_attempt_timeout + + @pytest.mark.parametrize( + "limit,in_e,start_e,end_e", + [ + (10, 0, (10, 0), (10, 0)), + (1, 10, (0, 0), (1, 1)), + (10, 1, (0, 0), (1, 0)), + (10, 10, (0, 0), (10, 0)), + (10, 11, (0, 0), (10, 1)), + (3, 20, (0, 0), (3, 3)), + (10, 20, (0, 0), (10, 10)), + (10, 21, (0, 0), (10, 10)), + (2, 1, (2, 0), (2, 1)), + (2, 1, (1, 0), (2, 0)), + (2, 2, (1, 0), (2, 1)), + (3, 1, (3, 1), (3, 2)), + (3, 3, (3, 1), (3, 3)), + (1000, 5, (999, 0), (1000, 4)), + (1000, 5, (0, 0), (5, 0)), + (1000, 5, (1000, 0), (1000, 5)), + ], + ) + def test__add_exceptions(self, limit, in_e, start_e, end_e): + """ + Test that the _add_exceptions function properly updates the + _oldest_exceptions and _newest_exceptions lists + Args: + - limit: the _exception_list_limit representing the max size of either list + - in_e: size of list of exceptions to send to _add_exceptions + - start_e: a tuple of ints representing the initial sizes of _oldest_exceptions and _newest_exceptions + - end_e: a tuple of ints representing the expected sizes of _oldest_exceptions and _newest_exceptions + """ + from collections import deque + + input_list = [RuntimeError(f"mock {i}") for i in range(in_e)] + mock_batcher = mock.Mock() + mock_batcher._oldest_exceptions = [ + RuntimeError(f"starting mock {i}") for i in range(start_e[0]) + ] + mock_batcher._newest_exceptions = deque( + [RuntimeError(f"starting mock {i}") for i in range(start_e[1])], + maxlen=limit, + ) + mock_batcher._exception_list_limit = limit + mock_batcher._exceptions_since_last_raise = 0 + self._get_target_class()._add_exceptions(mock_batcher, input_list) + assert len(mock_batcher._oldest_exceptions) == end_e[0] + assert len(mock_batcher._newest_exceptions) == end_e[1] + assert mock_batcher._exceptions_since_last_raise == in_e + # make sure that the right items ended up in the right spots + # should fill the oldest slots first + oldest_list_diff = end_e[0] - start_e[0] + # new items should by added on top of the starting list + newest_list_diff = min(max(in_e - oldest_list_diff, 0), limit) + for i in range(oldest_list_diff): + assert mock_batcher._oldest_exceptions[i + start_e[0]] == input_list[i] + # then, the newest slots should be filled with the last items of the input list + for i in range(1, newest_list_diff + 1): + assert mock_batcher._newest_exceptions[-i] == input_list[-i] + + @pytest.mark.asyncio + # test different inputs for retryable exceptions + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + async def test_customizable_retryable_errors( + self, input_retryables, expected_retryables + ): + """ + Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer. + """ + from google.cloud.bigtable.data._async.client import TableAsync + + with mock.patch( + "google.api_core.retry.if_exception_type" + ) as predicate_builder_mock: + with mock.patch( + "google.api_core.retry.retry_target_async" + ) as retry_fn_mock: + table = None + with mock.patch("asyncio.create_task"): + table = TableAsync(mock.Mock(), "instance", "table") + async with self._make_one( + table, batch_retryable_errors=input_retryables + ) as instance: + assert instance._retryable_errors == expected_retryables + expected_predicate = lambda a: a in expected_retryables # noqa + predicate_builder_mock.return_value = expected_predicate + retry_fn_mock.side_effect = RuntimeError("stop early") + mutation = _make_mutation(count=1, size=1) + await instance._execute_mutate_rows([mutation]) + # passed in errors should be used to build the predicate + predicate_builder_mock.assert_called_once_with( + *expected_retryables, _MutateRowsIncomplete + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + # output of if_exception_type should be sent in to retry constructor + assert retry_call_args[1] is expected_predicate diff --git a/packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json b/packages/google-cloud-bigtable/tests/unit/data/read-rows-acceptance-test.json similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/read-rows-acceptance-test.json rename to packages/google-cloud-bigtable/tests/unit/data/read-rows-acceptance-test.json diff --git a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py new file mode 100644 index 000000000000..5a9c500ed28b --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py @@ -0,0 +1,248 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import pytest +import grpc +from google.api_core import exceptions as core_exceptions +import google.cloud.bigtable.data._helpers as _helpers +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT + +import mock + + +class TestMakeMetadata: + @pytest.mark.parametrize( + "table,profile,expected", + [ + ("table", "profile", "table_name=table&app_profile_id=profile"), + ("table", None, "table_name=table"), + ], + ) + def test__make_metadata(self, table, profile, expected): + metadata = _helpers._make_metadata(table, profile) + assert metadata == [("x-goog-request-params", expected)] + + +class TestAttemptTimeoutGenerator: + @pytest.mark.parametrize( + "request_t,operation_t,expected_list", + [ + (1, 3.5, [1, 1, 1, 0.5, 0, 0]), + (None, 3.5, [3.5, 2.5, 1.5, 0.5, 0, 0]), + (10, 5, [5, 4, 3, 2, 1, 0, 0]), + (3, 3, [3, 2, 1, 0, 0, 0, 0]), + (0, 3, [0, 0, 0]), + (3, 0, [0, 0, 0]), + (-1, 3, [0, 0, 0]), + (3, -1, [0, 0, 0]), + ], + ) + def test_attempt_timeout_generator(self, request_t, operation_t, expected_list): + """ + test different values for timeouts. Clock is incremented by 1 second for each item in expected_list + """ + timestamp_start = 123 + with mock.patch("time.monotonic") as mock_monotonic: + mock_monotonic.return_value = timestamp_start + generator = _helpers._attempt_timeout_generator(request_t, operation_t) + for val in expected_list: + mock_monotonic.return_value += 1 + assert next(generator) == val + + @pytest.mark.parametrize( + "request_t,operation_t,expected", + [ + (1, 3.5, 1), + (None, 3.5, 3.5), + (10, 5, 5), + (5, 10, 5), + (3, 3, 3), + (0, 3, 0), + (3, 0, 0), + (-1, 3, 0), + (3, -1, 0), + ], + ) + def test_attempt_timeout_frozen_time(self, request_t, operation_t, expected): + """test with time.monotonic frozen""" + timestamp_start = 123 + with mock.patch("time.monotonic") as mock_monotonic: + mock_monotonic.return_value = timestamp_start + generator = _helpers._attempt_timeout_generator(request_t, operation_t) + assert next(generator) == expected + # value should not change without time.monotonic changing + assert next(generator) == expected + + def test_attempt_timeout_w_sleeps(self): + """use real sleep values to make sure it matches expectations""" + from time import sleep + + operation_timeout = 1 + generator = _helpers._attempt_timeout_generator(None, operation_timeout) + expected_value = operation_timeout + sleep_time = 0.1 + for i in range(3): + found_value = next(generator) + assert abs(found_value - expected_value) < 0.001 + sleep(sleep_time) + expected_value -= sleep_time + + +class TestValidateTimeouts: + def test_validate_timeouts_error_messages(self): + with pytest.raises(ValueError) as e: + _helpers._validate_timeouts(operation_timeout=1, attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + _helpers._validate_timeouts(operation_timeout=-1, attempt_timeout=1) + assert "operation_timeout must be greater than 0" in str(e.value) + + @pytest.mark.parametrize( + "args,expected", + [ + ([1, None, False], False), + ([1, None, True], True), + ([1, 1, False], True), + ([1, 1, True], True), + ([1, 1], True), + ([1, None], False), + ([2, 1], True), + ([0, 1], False), + ([1, 0], False), + ([60, None], False), + ([600, None], False), + ([600, 600], True), + ], + ) + def test_validate_with_inputs(self, args, expected): + """ + test whether an exception is thrown with different inputs + """ + success = False + try: + _helpers._validate_timeouts(*args) + success = True + except ValueError: + pass + assert success == expected + + +class TestGetTimeouts: + @pytest.mark.parametrize( + "input_times,input_table,expected", + [ + ((2, 1), {}, (2, 1)), + ((2, 4), {}, (2, 2)), + ((2, None), {}, (2, 2)), + ( + (TABLE_DEFAULT.DEFAULT, TABLE_DEFAULT.DEFAULT), + {"operation": 3, "attempt": 2}, + (3, 2), + ), + ( + (TABLE_DEFAULT.READ_ROWS, TABLE_DEFAULT.READ_ROWS), + {"read_rows_operation": 3, "read_rows_attempt": 2}, + (3, 2), + ), + ( + (TABLE_DEFAULT.MUTATE_ROWS, TABLE_DEFAULT.MUTATE_ROWS), + {"mutate_rows_operation": 3, "mutate_rows_attempt": 2}, + (3, 2), + ), + ((10, TABLE_DEFAULT.DEFAULT), {"attempt": None}, (10, 10)), + ((10, TABLE_DEFAULT.DEFAULT), {"attempt": 5}, (10, 5)), + ((10, TABLE_DEFAULT.DEFAULT), {"attempt": 100}, (10, 10)), + ((TABLE_DEFAULT.DEFAULT, 10), {"operation": 12}, (12, 10)), + ((TABLE_DEFAULT.DEFAULT, 10), {"operation": 3}, (3, 3)), + ], + ) + def test_get_timeouts(self, input_times, input_table, expected): + """ + test input/output mappings for a variety of valid inputs + """ + fake_table = mock.Mock() + for key in input_table.keys(): + # set the default fields in our fake table mock + setattr(fake_table, f"default_{key}_timeout", input_table[key]) + t1, t2 = _helpers._get_timeouts(input_times[0], input_times[1], fake_table) + assert t1 == expected[0] + assert t2 == expected[1] + + @pytest.mark.parametrize( + "input_times,input_table", + [ + ([0, 1], {}), + ([1, 0], {}), + ([None, 1], {}), + ([TABLE_DEFAULT.DEFAULT, 1], {"operation": None}), + ([TABLE_DEFAULT.DEFAULT, 1], {"operation": 0}), + ([1, TABLE_DEFAULT.DEFAULT], {"attempt": 0}), + ], + ) + def test_get_timeouts_invalid(self, input_times, input_table): + """ + test with inputs that should raise error during validation step + """ + fake_table = mock.Mock() + for key in input_table.keys(): + # set the default fields in our fake table mock + setattr(fake_table, f"default_{key}_timeout", input_table[key]) + with pytest.raises(ValueError): + _helpers._get_timeouts(input_times[0], input_times[1], fake_table) + + +class TestGetRetryableErrors: + @pytest.mark.parametrize( + "input_codes,input_table,expected", + [ + ((), {}, []), + ((Exception,), {}, [Exception]), + (TABLE_DEFAULT.DEFAULT, {"default": [Exception]}, [Exception]), + ( + TABLE_DEFAULT.READ_ROWS, + {"default_read_rows": (RuntimeError, ValueError)}, + [RuntimeError, ValueError], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + {"default_mutate_rows": (ValueError,)}, + [ValueError], + ), + ((4,), {}, [core_exceptions.DeadlineExceeded]), + ( + [grpc.StatusCode.DEADLINE_EXCEEDED], + {}, + [core_exceptions.DeadlineExceeded], + ), + ( + (14, grpc.StatusCode.ABORTED, RuntimeError), + {}, + [ + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + RuntimeError, + ], + ), + ], + ) + def test_get_retryable_errors(self, input_codes, input_table, expected): + """ + test input/output mappings for a variety of valid inputs + """ + fake_table = mock.Mock() + for key in input_table.keys(): + # set the default fields in our fake table mock + setattr(fake_table, f"{key}_retryable_errors", input_table[key]) + result = _helpers._get_retryable_errors(input_codes, fake_table) + assert result == expected diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_exceptions.py b/packages/google-cloud-bigtable/tests/unit/data/test_exceptions.py new file mode 100644 index 000000000000..bc921717e596 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_exceptions.py @@ -0,0 +1,533 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import pytest +import sys + +import google.cloud.bigtable.data.exceptions as bigtable_exceptions + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +class TracebackTests311: + """ + Provides a set of tests that should be run on python 3.11 and above, + to verify that the exception traceback looks as expected + """ + + @pytest.mark.skipif( + sys.version_info < (3, 11), reason="requires python3.11 or higher" + ) + def test_311_traceback(self): + """ + Exception customizations should not break rich exception group traceback in python 3.11 + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + sub_exc2 = ZeroDivisionError("second sub exception") + sub_group = self._make_one(excs=[sub_exc2]) + exc_group = self._make_one(excs=[sub_exc1, sub_group]) + + expected_traceback = ( + f" | google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {str(exc_group)}", + " +-+---------------- 1 ----------------", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + f" | google.cloud.bigtable.data.exceptions.{type(sub_group).__name__}: {str(sub_group)}", + " +-+---------------- 1 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + @pytest.mark.skipif( + sys.version_info < (3, 11), reason="requires python3.11 or higher" + ) + def test_311_traceback_with_cause(self): + """ + traceback should display nicely with sub-exceptions with __cause__ set + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + cause_exc = ImportError("cause exception") + sub_exc1.__cause__ = cause_exc + sub_exc2 = ZeroDivisionError("second sub exception") + exc_group = self._make_one(excs=[sub_exc1, sub_exc2]) + + expected_traceback = ( + f" | google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {str(exc_group)}", + " +-+---------------- 1 ----------------", + " | ImportError: cause exception", + " | ", + " | The above exception was the direct cause of the following exception:", + " | ", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + @pytest.mark.skipif( + sys.version_info < (3, 11), reason="requires python3.11 or higher" + ) + def test_311_exception_group(self): + """ + Python 3.11+ should handle exepctions as native exception groups + """ + exceptions = [RuntimeError("mock"), ValueError("mock")] + instance = self._make_one(excs=exceptions) + # ensure split works as expected + runtime_error, others = instance.split(lambda e: isinstance(e, RuntimeError)) + assert runtime_error.exceptions[0] == exceptions[0] + assert others.exceptions[0] == exceptions[1] + + +class TracebackTests310: + """ + Provides a set of tests that should be run on python 3.10 and under, + to verify that the exception traceback looks as expected + """ + + @pytest.mark.skipif( + sys.version_info >= (3, 11), reason="requires python3.10 or lower" + ) + def test_310_traceback(self): + """ + Exception customizations should not break rich exception group traceback in python 3.10 + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + sub_exc2 = ZeroDivisionError("second sub exception") + sub_group = self._make_one(excs=[sub_exc2]) + exc_group = self._make_one(excs=[sub_exc1, sub_group]) + found_message = str(exc_group).splitlines()[0] + found_sub_message = str(sub_group).splitlines()[0] + + expected_traceback = ( + f"google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {found_message}", + "--+---------------- 1 ----------------", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + f" | {type(sub_group).__name__}: {found_sub_message}", + " --+---------------- 1 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + @pytest.mark.skipif( + sys.version_info >= (3, 11), reason="requires python3.10 or lower" + ) + def test_310_traceback_with_cause(self): + """ + traceback should display nicely with sub-exceptions with __cause__ set + """ + import traceback + + sub_exc1 = RuntimeError("first sub exception") + cause_exc = ImportError("cause exception") + sub_exc1.__cause__ = cause_exc + sub_exc2 = ZeroDivisionError("second sub exception") + exc_group = self._make_one(excs=[sub_exc1, sub_exc2]) + found_message = str(exc_group).splitlines()[0] + + expected_traceback = ( + f"google.cloud.bigtable.data.exceptions.{type(exc_group).__name__}: {found_message}", + "--+---------------- 1 ----------------", + " | ImportError: cause exception", + " | ", + " | The above exception was the direct cause of the following exception:", + " | ", + " | RuntimeError: first sub exception", + " +---------------- 2 ----------------", + " | ZeroDivisionError: second sub exception", + " +------------------------------------", + ) + exception_caught = False + try: + raise exc_group + except self._get_class(): + exception_caught = True + tb = traceback.format_exc() + tb_relevant_lines = tuple(tb.splitlines()[3:]) + assert expected_traceback == tb_relevant_lines + assert exception_caught + + +class TestBigtableExceptionGroup(TracebackTests311, TracebackTests310): + """ + Subclass for MutationsExceptionGroup, RetryExceptionGroup, and ShardedReadRowsExceptionGroup + """ + + def _get_class(self): + from google.cloud.bigtable.data.exceptions import _BigtableExceptionGroup + + return _BigtableExceptionGroup + + def _make_one(self, message="test_message", excs=None): + if excs is None: + excs = [RuntimeError("mock")] + + return self._get_class()(message, excs=excs) + + def test_raise(self): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + test_msg = "test message" + test_excs = [Exception(test_msg)] + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_msg, test_excs) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == test_msg + assert list(e.value.exceptions) == test_excs + + def test_raise_empty_list(self): + """ + Empty exception lists are not supported + """ + with pytest.raises(ValueError) as e: + raise self._make_one(excs=[]) + assert "non-empty sequence" in str(e.value) + + def test_exception_handling(self): + """ + All versions should inherit from exception + and support tranditional exception handling + """ + instance = self._make_one() + assert isinstance(instance, Exception) + try: + raise instance + except Exception as e: + assert isinstance(e, Exception) + assert e == instance + was_raised = True + assert was_raised + + +class TestMutationsExceptionGroup(TestBigtableExceptionGroup): + def _get_class(self): + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + + return MutationsExceptionGroup + + def _make_one(self, excs=None, num_entries=3): + if excs is None: + excs = [RuntimeError("mock")] + + return self._get_class()(excs, num_entries) + + @pytest.mark.parametrize( + "exception_list,total_entries,expected_message", + [ + ([Exception()], 1, "1 failed entry from 1 attempted."), + ([Exception()], 2, "1 failed entry from 2 attempted."), + ( + [Exception(), RuntimeError()], + 2, + "2 failed entries from 2 attempted.", + ), + ], + ) + def test_raise(self, exception_list, total_entries, expected_message): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list, total_entries) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == exception_list + + def test_raise_custom_message(self): + """ + should be able to set a custom error message + """ + custom_message = "custom message" + exception_list = [Exception()] + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list, 5, message=custom_message) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == custom_message + assert list(e.value.exceptions) == exception_list + + @pytest.mark.parametrize( + "first_list_len,second_list_len,total_excs,entry_count,expected_message", + [ + (3, 0, 3, 4, "3 failed entries from 4 attempted."), + (1, 0, 1, 2, "1 failed entry from 2 attempted."), + (0, 1, 1, 2, "1 failed entry from 2 attempted."), + (2, 2, 4, 4, "4 failed entries from 4 attempted."), + ( + 1, + 1, + 3, + 2, + "3 failed entries from 2 attempted. (first 1 and last 1 attached as sub-exceptions; 1 truncated)", + ), + ( + 1, + 2, + 100, + 2, + "100 failed entries from 2 attempted. (first 1 and last 2 attached as sub-exceptions; 97 truncated)", + ), + ( + 2, + 1, + 4, + 9, + "4 failed entries from 9 attempted. (first 2 and last 1 attached as sub-exceptions; 1 truncated)", + ), + ( + 3, + 0, + 10, + 10, + "10 failed entries from 10 attempted. (first 3 attached as sub-exceptions; 7 truncated)", + ), + ( + 0, + 3, + 10, + 10, + "10 failed entries from 10 attempted. (last 3 attached as sub-exceptions; 7 truncated)", + ), + ], + ) + def test_from_truncated_lists( + self, first_list_len, second_list_len, total_excs, entry_count, expected_message + ): + """ + Should be able to make MutationsExceptionGroup using a pair of + lists representing a larger truncated list of exceptions + """ + first_list = [Exception()] * first_list_len + second_list = [Exception()] * second_list_len + with pytest.raises(self._get_class()) as e: + raise self._get_class().from_truncated_lists( + first_list, second_list, total_excs, entry_count + ) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == first_list + second_list + + +class TestRetryExceptionGroup(TestBigtableExceptionGroup): + def _get_class(self): + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + return RetryExceptionGroup + + def _make_one(self, excs=None): + if excs is None: + excs = [RuntimeError("mock")] + + return self._get_class()(excs=excs) + + @pytest.mark.parametrize( + "exception_list,expected_message", + [ + ([Exception()], "1 failed attempt"), + ([Exception(), RuntimeError()], "2 failed attempts"), + ( + [Exception(), ValueError("test")], + "2 failed attempts", + ), + ( + [ + bigtable_exceptions.RetryExceptionGroup( + [Exception(), ValueError("test")] + ) + ], + "1 failed attempt", + ), + ], + ) + def test_raise(self, exception_list, expected_message): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == exception_list + + +class TestShardedReadRowsExceptionGroup(TestBigtableExceptionGroup): + def _get_class(self): + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + + return ShardedReadRowsExceptionGroup + + def _make_one(self, excs=None, succeeded=None, num_entries=3): + if excs is None: + excs = [RuntimeError("mock")] + succeeded = succeeded or [] + + return self._get_class()(excs, succeeded, num_entries) + + @pytest.mark.parametrize( + "exception_list,succeeded,total_entries,expected_message", + [ + ([Exception()], [], 1, "1 sub-exception (from 1 query attempted)"), + ([Exception()], [1], 2, "1 sub-exception (from 2 queries attempted)"), + ( + [Exception(), RuntimeError()], + [0, 1], + 2, + "2 sub-exceptions (from 2 queries attempted)", + ), + ], + ) + def test_raise(self, exception_list, succeeded, total_entries, expected_message): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + with pytest.raises(self._get_class()) as e: + raise self._get_class()(exception_list, succeeded, total_entries) + found_message = str(e.value).splitlines()[ + 0 + ] # added to prase out subexceptions in <3.11 + assert found_message == expected_message + assert list(e.value.exceptions) == exception_list + assert e.value.successful_rows == succeeded + + +class TestFailedMutationEntryError: + def _get_class(self): + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + return FailedMutationEntryError + + def _make_one(self, idx=9, entry=mock.Mock(), cause=RuntimeError("mock")): + return self._get_class()(idx, entry, cause) + + def test_raise(self): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + test_idx = 2 + test_entry = mock.Mock() + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_entry, test_exc) + assert str(e.value) == "Failed idempotent mutation entry at index 2" + assert e.value.index == test_idx + assert e.value.entry == test_entry + assert e.value.__cause__ == test_exc + assert isinstance(e.value, Exception) + assert test_entry.is_idempotent.call_count == 1 + + def test_raise_idempotent(self): + """ + Test raise with non idempotent entry + """ + test_idx = 2 + test_entry = unittest.mock.Mock() + test_entry.is_idempotent.return_value = False + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_entry, test_exc) + assert str(e.value) == "Failed non-idempotent mutation entry at index 2" + assert e.value.index == test_idx + assert e.value.entry == test_entry + assert e.value.__cause__ == test_exc + assert test_entry.is_idempotent.call_count == 1 + + def test_no_index(self): + """ + Instances without an index should display different error string + """ + test_idx = None + test_entry = unittest.mock.Mock() + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_entry, test_exc) + assert str(e.value) == "Failed idempotent mutation entry" + assert e.value.index == test_idx + assert e.value.entry == test_entry + assert e.value.__cause__ == test_exc + assert isinstance(e.value, Exception) + assert test_entry.is_idempotent.call_count == 1 + + +class TestFailedQueryShardError: + def _get_class(self): + from google.cloud.bigtable.data.exceptions import FailedQueryShardError + + return FailedQueryShardError + + def _make_one(self, idx=9, query=mock.Mock(), cause=RuntimeError("mock")): + return self._get_class()(idx, query, cause) + + def test_raise(self): + """ + Create exception in raise statement, which calls __new__ and __init__ + """ + test_idx = 2 + test_query = mock.Mock() + test_exc = ValueError("test") + with pytest.raises(self._get_class()) as e: + raise self._get_class()(test_idx, test_query, test_exc) + assert str(e.value) == "Failed query at index 2" + assert e.value.index == test_idx + assert e.value.query == test_query + assert e.value.__cause__ == test_exc + assert isinstance(e.value, Exception) diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_mutations.py b/packages/google-cloud-bigtable/tests/unit/data/test_mutations.py new file mode 100644 index 000000000000..485c86e42e3f --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_mutations.py @@ -0,0 +1,708 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +import google.cloud.bigtable.data.mutations as mutations + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +class TestBaseMutation: + def _target_class(self): + from google.cloud.bigtable.data.mutations import Mutation + + return Mutation + + def test__to_dict(self): + """Should be unimplemented in the base class""" + with pytest.raises(NotImplementedError): + self._target_class()._to_dict(mock.Mock()) + + def test_is_idempotent(self): + """is_idempotent should assume True""" + assert self._target_class().is_idempotent(mock.Mock()) + + def test___str__(self): + """Str representation of mutations should be to_dict""" + self_mock = mock.Mock() + str_value = self._target_class().__str__(self_mock) + assert self_mock._to_dict.called + assert str_value == str(self_mock._to_dict.return_value) + + @pytest.mark.parametrize("test_dict", [{}, {"key": "value"}]) + def test_size(self, test_dict): + from sys import getsizeof + + """Size should return size of dict representation""" + self_mock = mock.Mock() + self_mock._to_dict.return_value = test_dict + size_value = self._target_class().size(self_mock) + assert size_value == getsizeof(test_dict) + + @pytest.mark.parametrize( + "expected_class,input_dict", + [ + ( + mutations.SetCell, + { + "set_cell": { + "family_name": "foo", + "column_qualifier": b"bar", + "value": b"test", + "timestamp_micros": 12345, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": {}, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": {"start_timestamp_micros": 123456789}, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": {"end_timestamp_micros": 123456789}, + } + }, + ), + ( + mutations.DeleteRangeFromColumn, + { + "delete_from_column": { + "family_name": "foo", + "column_qualifier": b"bar", + "time_range": { + "start_timestamp_micros": 123, + "end_timestamp_micros": 123456789, + }, + } + }, + ), + ( + mutations.DeleteAllFromFamily, + {"delete_from_family": {"family_name": "foo"}}, + ), + (mutations.DeleteAllFromRow, {"delete_from_row": {}}), + ], + ) + def test__from_dict(self, expected_class, input_dict): + """Should be able to create instance from dict""" + instance = self._target_class()._from_dict(input_dict) + assert isinstance(instance, expected_class) + found_dict = instance._to_dict() + assert found_dict == input_dict + + @pytest.mark.parametrize( + "input_dict", + [ + {"set_cell": {}}, + { + "set_cell": { + "column_qualifier": b"bar", + "value": b"test", + "timestamp_micros": 12345, + } + }, + { + "set_cell": { + "family_name": "f", + "column_qualifier": b"bar", + "value": b"test", + } + }, + {"delete_from_family": {}}, + {"delete_from_column": {}}, + {"fake-type"}, + {}, + ], + ) + def test__from_dict_missing_fields(self, input_dict): + """If dict is malformed or fields are missing, should raise ValueError""" + with pytest.raises(ValueError): + self._target_class()._from_dict(input_dict) + + def test__from_dict_wrong_subclass(self): + """You shouldn't be able to instantiate one mutation type using the dict of another""" + subclasses = [ + mutations.SetCell("foo", b"bar", b"test"), + mutations.DeleteRangeFromColumn("foo", b"bar"), + mutations.DeleteAllFromFamily("foo"), + mutations.DeleteAllFromRow(), + ] + for instance in subclasses: + others = [other for other in subclasses if other != instance] + for other in others: + with pytest.raises(ValueError) as e: + type(other)._from_dict(instance._to_dict()) + assert "Mutation type mismatch" in str(e.value) + + +class TestSetCell: + def _target_class(self): + from google.cloud.bigtable.data.mutations import SetCell + + return SetCell + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.mark.parametrize("input_val", [2**64, -(2**64)]) + def test_ctor_large_int(self, input_val): + with pytest.raises(ValueError) as e: + self._make_one(family="f", qualifier=b"b", new_value=input_val) + assert "int values must be between" in str(e.value) + + @pytest.mark.parametrize("input_val", ["", "a", "abc", "hello world!"]) + def test_ctor_str_value(self, input_val): + found = self._make_one(family="f", qualifier=b"b", new_value=input_val) + assert found.new_value == input_val.encode("utf-8") + + def test_ctor(self): + """Ensure constructor sets expected values""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = 1234567890 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.new_value == expected_value + assert instance.timestamp_micros == expected_timestamp + + def test_ctor_str_inputs(self): + """Test with string qualifier and value""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + instance = self._make_one(expected_family, "test-qualifier", "test-value") + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.new_value == expected_value + + @pytest.mark.parametrize("input_val", [-20, -1, 0, 1, 100, int(2**60)]) + def test_ctor_int_value(self, input_val): + found = self._make_one(family="f", qualifier=b"b", new_value=input_val) + assert found.new_value == input_val.to_bytes(8, "big", signed=True) + + @pytest.mark.parametrize( + "int_value,expected_bytes", + [ + (-42, b"\xff\xff\xff\xff\xff\xff\xff\xd6"), + (-2, b"\xff\xff\xff\xff\xff\xff\xff\xfe"), + (-1, b"\xff\xff\xff\xff\xff\xff\xff\xff"), + (0, b"\x00\x00\x00\x00\x00\x00\x00\x00"), + (1, b"\x00\x00\x00\x00\x00\x00\x00\x01"), + (2, b"\x00\x00\x00\x00\x00\x00\x00\x02"), + (100, b"\x00\x00\x00\x00\x00\x00\x00d"), + ], + ) + def test_ctor_int_value_bytes(self, int_value, expected_bytes): + """Test with int value""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + instance = self._make_one(expected_family, expected_qualifier, int_value) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.new_value == expected_bytes + + def test_ctor_negative_timestamp(self): + """Only positive or -1 timestamps are valid""" + with pytest.raises(ValueError) as e: + self._make_one("test-family", b"test-qualifier", b"test-value", -2) + assert ( + "timestamp_micros must be positive (or -1 for server-side timestamp)" + in str(e.value) + ) + + @pytest.mark.parametrize( + "timestamp_ns,expected_timestamp_micros", + [ + (0, 0), + (1, 0), + (123, 0), + (999, 0), + (999_999, 0), + (1_000_000, 1000), + (1_234_567, 1000), + (1_999_999, 1000), + (2_000_000, 2000), + (1_234_567_890_123, 1_234_567_000), + ], + ) + def test_ctor_no_timestamp(self, timestamp_ns, expected_timestamp_micros): + """If no timestamp is given, should use current time with millisecond precision""" + with mock.patch("time.time_ns", return_value=timestamp_ns): + instance = self._make_one("test-family", b"test-qualifier", b"test-value") + assert instance.timestamp_micros == expected_timestamp_micros + + def test__to_dict(self): + """ensure dict representation is as expected""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["set_cell"] + got_inner_dict = got_dict["set_cell"] + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"] == expected_qualifier + assert got_inner_dict["timestamp_micros"] == expected_timestamp + assert got_inner_dict["value"] == expected_value + assert len(got_inner_dict.keys()) == 4 + + def test__to_dict_server_timestamp(self): + """test with server side timestamp -1 value""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = -1 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["set_cell"] + got_inner_dict = got_dict["set_cell"] + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"] == expected_qualifier + assert got_inner_dict["timestamp_micros"] == expected_timestamp + assert got_inner_dict["value"] == expected_value + assert len(got_inner_dict.keys()) == 4 + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.set_cell.family_name == expected_family + assert got_pb.set_cell.column_qualifier == expected_qualifier + assert got_pb.set_cell.timestamp_micros == expected_timestamp + assert got_pb.set_cell.value == expected_value + + def test__to_pb_server_timestamp(self): + """test with server side timestamp -1 value""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = b"test-value" + expected_timestamp = -1 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.set_cell.family_name == expected_family + assert got_pb.set_cell.column_qualifier == expected_qualifier + assert got_pb.set_cell.timestamp_micros == expected_timestamp + assert got_pb.set_cell.value == expected_value + + @pytest.mark.parametrize( + "timestamp,expected_value", + [ + (1234567890, True), + (1, True), + (0, True), + (-1, False), + (None, True), + ], + ) + def test_is_idempotent(self, timestamp, expected_value): + """is_idempotent is based on whether an explicit timestamp is set""" + instance = self._make_one( + "test-family", b"test-qualifier", b"test-value", timestamp + ) + assert instance.is_idempotent() is expected_value + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one( + "test-family", b"test-qualifier", b"test-value", 1234567890 + ) + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) + + +class TestDeleteRangeFromColumn: + def _target_class(self): + from google.cloud.bigtable.data.mutations import DeleteRangeFromColumn + + return DeleteRangeFromColumn + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + def test_ctor(self): + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_start = 1234567890 + expected_end = 1234567891 + instance = self._make_one( + expected_family, expected_qualifier, expected_start, expected_end + ) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.start_timestamp_micros == expected_start + assert instance.end_timestamp_micros == expected_end + + def test_ctor_no_timestamps(self): + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + instance = self._make_one(expected_family, expected_qualifier) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.start_timestamp_micros is None + assert instance.end_timestamp_micros is None + + def test_ctor_timestamps_out_of_order(self): + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_start = 10 + expected_end = 1 + with pytest.raises(ValueError) as excinfo: + self._make_one( + expected_family, expected_qualifier, expected_start, expected_end + ) + assert "start_timestamp_micros must be <= end_timestamp_micros" in str( + excinfo.value + ) + + @pytest.mark.parametrize( + "start,end", + [ + (0, 1), + (None, 1), + (0, None), + ], + ) + def test__to_dict(self, start, end): + """Should be unimplemented in the base class""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + + instance = self._make_one(expected_family, expected_qualifier, start, end) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["delete_from_column"] + got_inner_dict = got_dict["delete_from_column"] + assert len(got_inner_dict.keys()) == 3 + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"] == expected_qualifier + time_range_dict = got_inner_dict["time_range"] + expected_len = int(isinstance(start, int)) + int(isinstance(end, int)) + assert len(time_range_dict.keys()) == expected_len + if start is not None: + assert time_range_dict["start_timestamp_micros"] == start + if end is not None: + assert time_range_dict["end_timestamp_micros"] == end + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + instance = self._make_one(expected_family, expected_qualifier) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.delete_from_column.family_name == expected_family + assert got_pb.delete_from_column.column_qualifier == expected_qualifier + + def test_is_idempotent(self): + """is_idempotent is always true""" + instance = self._make_one( + "test-family", b"test-qualifier", 1234567890, 1234567891 + ) + assert instance.is_idempotent() is True + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one("test-family", b"test-qualifier") + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) + + +class TestDeleteAllFromFamily: + def _target_class(self): + from google.cloud.bigtable.data.mutations import DeleteAllFromFamily + + return DeleteAllFromFamily + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + def test_ctor(self): + expected_family = "test-family" + instance = self._make_one(expected_family) + assert instance.family_to_delete == expected_family + + def test__to_dict(self): + """Should be unimplemented in the base class""" + expected_family = "test-family" + instance = self._make_one(expected_family) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["delete_from_family"] + got_inner_dict = got_dict["delete_from_family"] + assert len(got_inner_dict.keys()) == 1 + assert got_inner_dict["family_name"] == expected_family + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + instance = self._make_one(expected_family) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.delete_from_family.family_name == expected_family + + def test_is_idempotent(self): + """is_idempotent is always true""" + instance = self._make_one("test-family") + assert instance.is_idempotent() is True + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one("test-family") + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) + + +class TestDeleteFromRow: + def _target_class(self): + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + return DeleteAllFromRow + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + def test_ctor(self): + self._make_one() + + def test__to_dict(self): + """Should be unimplemented in the base class""" + instance = self._make_one() + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["delete_from_row"] + assert len(got_dict["delete_from_row"].keys()) == 0 + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + instance = self._make_one() + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert "delete_from_row" in str(got_pb) + + def test_is_idempotent(self): + """is_idempotent is always true""" + instance = self._make_one() + assert instance.is_idempotent() is True + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one() + assert instance.__str__() == "{'delete_from_row': {}}" + + +class TestRowMutationEntry: + def _target_class(self): + from google.cloud.bigtable.data.mutations import RowMutationEntry + + return RowMutationEntry + + def _make_one(self, row_key, mutations): + return self._target_class()(row_key, mutations) + + def test_ctor(self): + expected_key = b"row_key" + expected_mutations = [mock.Mock()] + instance = self._make_one(expected_key, expected_mutations) + assert instance.row_key == expected_key + assert list(instance.mutations) == expected_mutations + + def test_ctor_over_limit(self): + """Should raise error if mutations exceed MAX_MUTATIONS_PER_ENTRY""" + from google.cloud.bigtable.data.mutations import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, + ) + + assert _MUTATE_ROWS_REQUEST_MUTATION_LIMIT == 100_000 + # no errors at limit + expected_mutations = [None for _ in range(_MUTATE_ROWS_REQUEST_MUTATION_LIMIT)] + self._make_one(b"row_key", expected_mutations) + # error if over limit + with pytest.raises(ValueError) as e: + self._make_one("key", expected_mutations + [mock.Mock()]) + assert "entries must have <= 100000 mutations" in str(e.value) + + def test_ctor_str_key(self): + expected_key = "row_key" + expected_mutations = [mock.Mock(), mock.Mock()] + instance = self._make_one(expected_key, expected_mutations) + assert instance.row_key == b"row_key" + assert list(instance.mutations) == expected_mutations + + def test_ctor_single_mutation(self): + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + expected_key = b"row_key" + expected_mutations = DeleteAllFromRow() + instance = self._make_one(expected_key, expected_mutations) + assert instance.row_key == expected_key + assert instance.mutations == (expected_mutations,) + + def test__to_dict(self): + expected_key = "row_key" + mutation_mock = mock.Mock() + n_mutations = 3 + expected_mutations = [mutation_mock for i in range(n_mutations)] + for mock_mutations in expected_mutations: + mock_mutations._to_dict.return_value = {"test": "data"} + instance = self._make_one(expected_key, expected_mutations) + expected_result = { + "row_key": b"row_key", + "mutations": [{"test": "data"}] * n_mutations, + } + assert instance._to_dict() == expected_result + assert mutation_mock._to_dict.call_count == n_mutations + + def test__to_pb(self): + from google.cloud.bigtable_v2.types.bigtable import MutateRowsRequest + from google.cloud.bigtable_v2.types.data import Mutation + + expected_key = "row_key" + mutation_mock = mock.Mock() + n_mutations = 3 + expected_mutations = [mutation_mock for i in range(n_mutations)] + for mock_mutations in expected_mutations: + mock_mutations._to_pb.return_value = Mutation() + instance = self._make_one(expected_key, expected_mutations) + pb_result = instance._to_pb() + assert isinstance(pb_result, MutateRowsRequest.Entry) + assert pb_result.row_key == b"row_key" + assert pb_result.mutations == [Mutation()] * n_mutations + assert mutation_mock._to_pb.call_count == n_mutations + + @pytest.mark.parametrize( + "mutations,result", + [ + ([mock.Mock(is_idempotent=lambda: True)], True), + ([mock.Mock(is_idempotent=lambda: False)], False), + ( + [ + mock.Mock(is_idempotent=lambda: True), + mock.Mock(is_idempotent=lambda: False), + ], + False, + ), + ( + [ + mock.Mock(is_idempotent=lambda: True), + mock.Mock(is_idempotent=lambda: True), + ], + True, + ), + ], + ) + def test_is_idempotent(self, mutations, result): + instance = self._make_one("row_key", mutations) + assert instance.is_idempotent() == result + + def test_empty_mutations(self): + with pytest.raises(ValueError) as e: + self._make_one("row_key", []) + assert "must not be empty" in str(e.value) + + @pytest.mark.parametrize("test_dict", [{}, {"key": "value"}]) + def test_size(self, test_dict): + from sys import getsizeof + + """Size should return size of dict representation""" + self_mock = mock.Mock() + self_mock._to_dict.return_value = test_dict + size_value = self._target_class().size(self_mock) + assert size_value == getsizeof(test_dict) + + def test__from_dict_mock(self): + """ + test creating instance from entry dict, with mocked mutation._from_dict + """ + expected_key = b"row_key" + expected_mutations = [mock.Mock(), mock.Mock()] + input_dict = { + "row_key": expected_key, + "mutations": [{"test": "data"}, {"another": "data"}], + } + with mock.patch.object(mutations.Mutation, "_from_dict") as inner_from_dict: + inner_from_dict.side_effect = expected_mutations + instance = self._target_class()._from_dict(input_dict) + assert instance.row_key == b"row_key" + assert inner_from_dict.call_count == 2 + assert len(instance.mutations) == 2 + assert instance.mutations[0] == expected_mutations[0] + assert instance.mutations[1] == expected_mutations[1] + + def test__from_dict(self): + """ + test creating end-to-end with a real mutation instance + """ + input_dict = { + "row_key": b"row_key", + "mutations": [{"delete_from_family": {"family_name": "test_family"}}], + } + instance = self._target_class()._from_dict(input_dict) + assert instance.row_key == b"row_key" + assert len(instance.mutations) == 1 + assert isinstance(instance.mutations[0], mutations.DeleteAllFromFamily) + assert instance.mutations[0].family_to_delete == "test_family" diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_read_modify_write_rules.py b/packages/google-cloud-bigtable/tests/unit/data/test_read_modify_write_rules.py new file mode 100644 index 000000000000..1f67da13b170 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_read_modify_write_rules.py @@ -0,0 +1,186 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + + +class TestBaseReadModifyWriteRule: + def _target_class(self): + from google.cloud.bigtable.data.read_modify_write_rules import ( + ReadModifyWriteRule, + ) + + return ReadModifyWriteRule + + def test_abstract(self): + """should not be able to instantiate""" + with pytest.raises(TypeError): + self._target_class()(family="foo", qualifier=b"bar") + + def test__to_dict(self): + """ + to_dict not implemented in base class + """ + with pytest.raises(NotImplementedError): + self._target_class()._to_dict(mock.Mock()) + + +class TestIncrementRule: + def _target_class(self): + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + return IncrementRule + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", 1), ("fam", b"qual", 1)), + (("fam", b"qual", -12), ("fam", b"qual", -12)), + (("fam", "qual", 1), ("fam", b"qual", 1)), + (("fam", "qual", 0), ("fam", b"qual", 0)), + (("", "", 0), ("", b"", 0)), + (("f", b"q"), ("f", b"q", 1)), + ], + ) + def test_ctor(self, args, expected): + instance = self._target_class()(*args) + assert instance.family == expected[0] + assert instance.qualifier == expected[1] + assert instance.increment_amount == expected[2] + + @pytest.mark.parametrize("input_amount", [1.1, None, "1", object(), "", b"", b"1"]) + def test_ctor_bad_input(self, input_amount): + with pytest.raises(TypeError) as e: + self._target_class()("fam", b"qual", input_amount) + assert "increment_amount must be an integer" in str(e.value) + + @pytest.mark.parametrize( + "large_value", [2**64, 2**64 + 1, -(2**64), -(2**64) - 1] + ) + def test_ctor_large_values(self, large_value): + with pytest.raises(ValueError) as e: + self._target_class()("fam", b"qual", large_value) + assert "too large" in str(e.value) + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", 1), ("fam", b"qual", 1)), + (("fam", b"qual", -12), ("fam", b"qual", -12)), + (("fam", "qual", 1), ("fam", b"qual", 1)), + (("fam", "qual", 0), ("fam", b"qual", 0)), + (("", "", 0), ("", b"", 0)), + (("f", b"q"), ("f", b"q", 1)), + ], + ) + def test__to_dict(self, args, expected): + instance = self._target_class()(*args) + expected = { + "family_name": expected[0], + "column_qualifier": expected[1], + "increment_amount": expected[2], + } + assert instance._to_dict() == expected + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", 1), ("fam", b"qual", 1)), + (("fam", b"qual", -12), ("fam", b"qual", -12)), + (("fam", "qual", 1), ("fam", b"qual", 1)), + (("fam", "qual", 0), ("fam", b"qual", 0)), + (("", "", 0), ("", b"", 0)), + (("f", b"q"), ("f", b"q", 1)), + ], + ) + def test__to_pb(self, args, expected): + import google.cloud.bigtable_v2.types.data as data_pb + + instance = self._target_class()(*args) + pb_result = instance._to_pb() + assert isinstance(pb_result, data_pb.ReadModifyWriteRule) + assert pb_result.family_name == expected[0] + assert pb_result.column_qualifier == expected[1] + assert pb_result.increment_amount == expected[2] + + +class TestAppendValueRule: + def _target_class(self): + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + return AppendValueRule + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", b"val"), ("fam", b"qual", b"val")), + (("fam", "qual", b"val"), ("fam", b"qual", b"val")), + (("", "", b""), ("", b"", b"")), + (("f", "q", "str_val"), ("f", b"q", b"str_val")), + (("f", "q", ""), ("f", b"q", b"")), + ], + ) + def test_ctor(self, args, expected): + instance = self._target_class()(*args) + assert instance.family == expected[0] + assert instance.qualifier == expected[1] + assert instance.append_value == expected[2] + + @pytest.mark.parametrize("input_val", [5, 1.1, None, object()]) + def test_ctor_bad_input(self, input_val): + with pytest.raises(TypeError) as e: + self._target_class()("fam", b"qual", input_val) + assert "append_value must be bytes or str" in str(e.value) + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", b"val"), ("fam", b"qual", b"val")), + (("fam", "qual", b"val"), ("fam", b"qual", b"val")), + (("", "", b""), ("", b"", b"")), + ], + ) + def test__to_dict(self, args, expected): + instance = self._target_class()(*args) + expected = { + "family_name": expected[0], + "column_qualifier": expected[1], + "append_value": expected[2], + } + assert instance._to_dict() == expected + + @pytest.mark.parametrize( + "args,expected", + [ + (("fam", b"qual", b"val"), ("fam", b"qual", b"val")), + (("fam", "qual", b"val"), ("fam", b"qual", b"val")), + (("", "", b""), ("", b"", b"")), + ], + ) + def test__to_pb(self, args, expected): + import google.cloud.bigtable_v2.types.data as data_pb + + instance = self._target_class()(*args) + pb_result = instance._to_pb() + assert isinstance(pb_result, data_pb.ReadModifyWriteRule) + assert pb_result.family_name == expected[0] + assert pb_result.column_qualifier == expected[1] + assert pb_result.append_value == expected[2] diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py b/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py new file mode 100644 index 000000000000..7cb3c08dc27d --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py @@ -0,0 +1,331 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +from itertools import zip_longest + +import pytest +import mock + +from google.cloud.bigtable_v2 import ReadRowsResponse + +from google.cloud.bigtable.data._async.client import BigtableDataClientAsync +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data.row import Row + +from ..v2_client.test_row_merger import ReadRowsTest, TestFile + + +def parse_readrows_acceptance_tests(): + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, "./read-rows-acceptance-test.json") + + with open(filename) as json_file: + test_json = TestFile.from_json(json_file.read()) + return test_json.read_rows_tests + + +def extract_results_from_row(row: Row): + results = [] + for family, col, cells in row.items(): + for cell in cells: + results.append( + ReadRowsTest.Result( + row_key=row.row_key, + family_name=family, + qualifier=col, + timestamp_micros=cell.timestamp_ns // 1000, + value=cell.value, + label=(cell.labels[0] if cell.labels else ""), + ) + ) + return results + + +@pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description +) +@pytest.mark.asyncio +async def test_row_merger_scenario(test_case: ReadRowsTest): + async def _scenerio_stream(): + for chunk in test_case.chunks: + yield ReadRowsResponse(chunks=[chunk]) + + try: + results = [] + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + chunker = _ReadRowsOperationAsync.chunk_stream( + instance, _coro_wrapper(_scenerio_stream()) + ) + merger = _ReadRowsOperationAsync.merge_rows(chunker) + async for row in merger: + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + +@pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description +) +@pytest.mark.asyncio +async def test_read_rows_scenario(test_case: ReadRowsTest): + async def _make_gapic_stream(chunk_list: list[ReadRowsResponse]): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list): + self.chunk_list = chunk_list + self.idx = -1 + + def __aiter__(self): + return self + + async def __anext__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + chunk = self.chunk_list[self.idx] + return ReadRowsResponse(chunks=[chunk]) + raise StopAsyncIteration + + def cancel(self): + pass + + return mock_stream(chunk_list) + + try: + with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): + # use emulator mode to avoid auth issues in CI + client = BigtableDataClientAsync() + table = client.get_table("instance", "table") + results = [] + with mock.patch.object(table.client._gapic_client, "read_rows") as read_rows: + # run once, then return error on retry + read_rows.return_value = _make_gapic_stream(test_case.chunks) + async for row in await table.read_rows_stream(query={}): + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + finally: + await client.close() + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + +@pytest.mark.asyncio +async def test_out_of_order_rows(): + async def _row_stream(): + yield ReadRowsResponse(last_scanned_row_key=b"a") + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = b"b" + chunker = _ReadRowsOperationAsync.chunk_stream( + instance, _coro_wrapper(_row_stream()) + ) + merger = _ReadRowsOperationAsync.merge_rows(chunker) + with pytest.raises(InvalidChunk): + async for _ in merger: + pass + + +@pytest.mark.asyncio +async def test_bare_reset(): + first_chunk = ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk( + row_key=b"a", family_name="f", qualifier=b"q", value=b"v" + ) + ) + with pytest.raises(InvalidChunk): + await _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, row_key=b"a") + ), + ) + with pytest.raises(InvalidChunk): + await _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, family_name="f") + ), + ) + with pytest.raises(InvalidChunk): + await _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, qualifier=b"q") + ), + ) + with pytest.raises(InvalidChunk): + await _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, timestamp_micros=1000) + ), + ) + with pytest.raises(InvalidChunk): + await _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, labels=["a"]) + ), + ) + with pytest.raises(InvalidChunk): + await _process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, value=b"v") + ), + ) + + +@pytest.mark.asyncio +async def test_missing_family(): + with pytest.raises(InvalidChunk): + await _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + qualifier=b"q", + timestamp_micros=1000, + value=b"v", + commit_row=True, + ) + ) + + +@pytest.mark.asyncio +async def test_mid_cell_row_key_change(): + with pytest.raises(InvalidChunk): + await _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(row_key=b"b", value=b"v", commit_row=True), + ) + + +@pytest.mark.asyncio +async def test_mid_cell_family_change(): + with pytest.raises(InvalidChunk): + await _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(family_name="f2", value=b"v", commit_row=True), + ) + + +@pytest.mark.asyncio +async def test_mid_cell_qualifier_change(): + with pytest.raises(InvalidChunk): + await _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(qualifier=b"q2", value=b"v", commit_row=True), + ) + + +@pytest.mark.asyncio +async def test_mid_cell_timestamp_change(): + with pytest.raises(InvalidChunk): + await _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + timestamp_micros=2000, value=b"v", commit_row=True + ), + ) + + +@pytest.mark.asyncio +async def test_mid_cell_labels_change(): + with pytest.raises(InvalidChunk): + await _process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(labels=["b"], value=b"v", commit_row=True), + ) + + +async def _coro_wrapper(stream): + return stream + + +async def _process_chunks(*chunks): + async def _row_stream(): + yield ReadRowsResponse(chunks=chunks) + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = None + chunker = _ReadRowsOperationAsync.chunk_stream( + instance, _coro_wrapper(_row_stream()) + ) + merger = _ReadRowsOperationAsync.merge_rows(chunker) + results = [] + async for row in merger: + results.append(row) + return results diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_query.py b/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_query.py new file mode 100644 index 000000000000..ba3b0468bbb8 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_query.py @@ -0,0 +1,589 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +TEST_ROWS = [ + "row_key_1", + b"row_key_2", +] + + +class TestRowRange: + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.read_rows_query import RowRange + + return RowRange + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor_start_end(self): + row_range = self._make_one("test_row", "test_row2") + assert row_range.start_key == "test_row".encode() + assert row_range.end_key == "test_row2".encode() + assert row_range.start_is_inclusive is True + assert row_range.end_is_inclusive is False + + def test_ctor_start_only(self): + row_range = self._make_one("test_row3") + assert row_range.start_key == "test_row3".encode() + assert row_range.start_is_inclusive is True + assert row_range.end_key is None + assert row_range.end_is_inclusive is True + + def test_ctor_end_only(self): + row_range = self._make_one(end_key="test_row4") + assert row_range.end_key == "test_row4".encode() + assert row_range.end_is_inclusive is False + assert row_range.start_key is None + assert row_range.start_is_inclusive is True + + def test_ctor_empty_strings(self): + """ + empty strings should be treated as None + """ + row_range = self._make_one("", "") + assert row_range.start_key is None + assert row_range.end_key is None + assert row_range.start_is_inclusive is True + assert row_range.end_is_inclusive is True + + def test_ctor_inclusive_flags(self): + row_range = self._make_one("test_row5", "test_row6", False, True) + assert row_range.start_key == "test_row5".encode() + assert row_range.end_key == "test_row6".encode() + assert row_range.start_is_inclusive is False + assert row_range.end_is_inclusive is True + + def test_ctor_defaults(self): + row_range = self._make_one() + assert row_range.start_key is None + assert row_range.end_key is None + + def test_ctor_invalid_keys(self): + # test with invalid keys + with pytest.raises(ValueError) as exc: + self._make_one(1, "2") + assert str(exc.value) == "start_key must be a string or bytes" + with pytest.raises(ValueError) as exc: + self._make_one("1", 2) + assert str(exc.value) == "end_key must be a string or bytes" + with pytest.raises(ValueError) as exc: + self._make_one("2", "1") + assert str(exc.value) == "start_key must be less than or equal to end_key" + + @pytest.mark.parametrize( + "dict_repr,expected", + [ + ({"start_key_closed": "test_row", "end_key_open": "test_row2"}, True), + ({"start_key_closed": b"test_row", "end_key_open": b"test_row2"}, True), + ({"start_key_open": "test_row", "end_key_closed": "test_row2"}, True), + ({"start_key_open": b"a"}, True), + ({"end_key_closed": b"b"}, True), + ({"start_key_closed": "a"}, True), + ({"end_key_open": b"b"}, True), + ({}, False), + ], + ) + def test___bool__(self, dict_repr, expected): + """ + Only row range with both points empty should be falsy + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + row_range = RowRange._from_dict(dict_repr) + assert bool(row_range) is expected + + def test__eq__(self): + """ + test that row ranges can be compared for equality + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + range1 = RowRange("1", "2") + range1_dup = RowRange("1", "2") + range2 = RowRange("1", "3") + range_w_empty = RowRange(None, "2") + assert range1 == range1_dup + assert range1 != range2 + assert range1 != range_w_empty + range_1_w_inclusive_start = RowRange("1", "2", start_is_inclusive=True) + range_1_w_exclusive_start = RowRange("1", "2", start_is_inclusive=False) + range_1_w_inclusive_end = RowRange("1", "2", end_is_inclusive=True) + range_1_w_exclusive_end = RowRange("1", "2", end_is_inclusive=False) + assert range1 == range_1_w_inclusive_start + assert range1 == range_1_w_exclusive_end + assert range1 != range_1_w_exclusive_start + assert range1 != range_1_w_inclusive_end + + @pytest.mark.parametrize( + "dict_repr,expected", + [ + ( + {"start_key_closed": "test_row", "end_key_open": "test_row2"}, + "[b'test_row', b'test_row2')", + ), + ( + {"start_key_open": "test_row", "end_key_closed": "test_row2"}, + "(b'test_row', b'test_row2']", + ), + ({"start_key_open": b"a"}, "(b'a', +inf]"), + ({"end_key_closed": b"b"}, "[-inf, b'b']"), + ({"end_key_open": b"b"}, "[-inf, b'b')"), + ({}, "[-inf, +inf]"), + ], + ) + def test___str__(self, dict_repr, expected): + """ + test string representations of row ranges + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + row_range = RowRange._from_dict(dict_repr) + assert str(row_range) == expected + + @pytest.mark.parametrize( + "dict_repr,expected", + [ + ( + {"start_key_closed": "test_row", "end_key_open": "test_row2"}, + "RowRange(start_key=b'test_row', end_key=b'test_row2')", + ), + ( + {"start_key_open": "test_row", "end_key_closed": "test_row2"}, + "RowRange(start_key=b'test_row', end_key=b'test_row2', start_is_inclusive=False, end_is_inclusive=True)", + ), + ( + {"start_key_open": b"a"}, + "RowRange(start_key=b'a', end_key=None, start_is_inclusive=False)", + ), + ( + {"end_key_closed": b"b"}, + "RowRange(start_key=None, end_key=b'b', end_is_inclusive=True)", + ), + ({"end_key_open": b"b"}, "RowRange(start_key=None, end_key=b'b')"), + ({}, "RowRange(start_key=None, end_key=None)"), + ], + ) + def test___repr__(self, dict_repr, expected): + """ + test repr representations of row ranges + """ + from google.cloud.bigtable.data.read_rows_query import RowRange + + row_range = RowRange._from_dict(dict_repr) + assert repr(row_range) == expected + + +class TestReadRowsQuery: + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + return ReadRowsQuery + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor_defaults(self): + query = self._make_one() + assert query.row_keys == list() + assert query.row_ranges == list() + assert query.filter is None + assert query.limit is None + + def test_ctor_explicit(self): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.read_rows_query import RowRange + + filter_ = RowFilterChain() + query = self._make_one( + ["row_key_1", "row_key_2"], + row_ranges=[RowRange("row_key_3", "row_key_4")], + limit=10, + row_filter=filter_, + ) + assert len(query.row_keys) == 2 + assert "row_key_1".encode() in query.row_keys + assert "row_key_2".encode() in query.row_keys + assert len(query.row_ranges) == 1 + assert RowRange("row_key_3", "row_key_4") in query.row_ranges + assert query.filter == filter_ + assert query.limit == 10 + + def test_ctor_invalid_limit(self): + with pytest.raises(ValueError) as exc: + self._make_one(limit=-1) + assert str(exc.value) == "limit must be >= 0" + + def test_set_filter(self): + from google.cloud.bigtable.data.row_filters import RowFilterChain + + filter1 = RowFilterChain() + query = self._make_one() + assert query.filter is None + query.filter = filter1 + assert query.filter == filter1 + filter2 = RowFilterChain() + query.filter = filter2 + assert query.filter == filter2 + query.filter = None + assert query.filter is None + query.filter = RowFilterChain() + assert query.filter == RowFilterChain() + + def test_set_limit(self): + query = self._make_one() + assert query.limit is None + query.limit = 10 + assert query.limit == 10 + query.limit = 9 + assert query.limit == 9 + query.limit = 0 + assert query.limit is None + with pytest.raises(ValueError) as exc: + query.limit = -1 + assert str(exc.value) == "limit must be >= 0" + with pytest.raises(ValueError) as exc: + query.limit = -100 + assert str(exc.value) == "limit must be >= 0" + + def test_add_key_str(self): + query = self._make_one() + assert query.row_keys == list() + input_str = "test_row" + query.add_key(input_str) + assert len(query.row_keys) == 1 + assert input_str.encode() in query.row_keys + input_str2 = "test_row2" + query.add_key(input_str2) + assert len(query.row_keys) == 2 + assert input_str.encode() in query.row_keys + assert input_str2.encode() in query.row_keys + + def test_add_key_bytes(self): + query = self._make_one() + assert query.row_keys == list() + input_bytes = b"test_row" + query.add_key(input_bytes) + assert len(query.row_keys) == 1 + assert input_bytes in query.row_keys + input_bytes2 = b"test_row2" + query.add_key(input_bytes2) + assert len(query.row_keys) == 2 + assert input_bytes in query.row_keys + assert input_bytes2 in query.row_keys + + def test_add_rows_batch(self): + query = self._make_one() + assert query.row_keys == list() + input_batch = ["test_row", b"test_row2", "test_row3"] + for k in input_batch: + query.add_key(k) + assert len(query.row_keys) == 3 + assert b"test_row" in query.row_keys + assert b"test_row2" in query.row_keys + assert b"test_row3" in query.row_keys + # test adding another batch + for k in ["test_row4", b"test_row5"]: + query.add_key(k) + assert len(query.row_keys) == 5 + assert input_batch[0].encode() in query.row_keys + assert input_batch[1] in query.row_keys + assert input_batch[2].encode() in query.row_keys + assert b"test_row4" in query.row_keys + assert b"test_row5" in query.row_keys + + def test_add_key_invalid(self): + query = self._make_one() + with pytest.raises(ValueError) as exc: + query.add_key(1) + assert str(exc.value) == "row_key must be string or bytes" + with pytest.raises(ValueError) as exc: + query.add_key(["s"]) + assert str(exc.value) == "row_key must be string or bytes" + + def test_add_range(self): + from google.cloud.bigtable.data.read_rows_query import RowRange + + query = self._make_one() + assert query.row_ranges == list() + input_range = RowRange(start_key=b"test_row") + query.add_range(input_range) + assert len(query.row_ranges) == 1 + assert input_range in query.row_ranges + input_range2 = RowRange(start_key=b"test_row2") + query.add_range(input_range2) + assert len(query.row_ranges) == 2 + assert input_range in query.row_ranges + assert input_range2 in query.row_ranges + + def _parse_query_string(self, query_string): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery, RowRange + + query = ReadRowsQuery() + segments = query_string.split(",") + for segment in segments: + if "-" in segment: + start, end = segment.split("-") + s_open, e_open = True, True + if start == "": + start = None + s_open = None + else: + if start[0] == "(": + s_open = False + start = start[1:] + if end == "": + end = None + e_open = None + else: + if end[-1] == ")": + e_open = False + end = end[:-1] + query.add_range(RowRange(start, end, s_open, e_open)) + else: + query.add_key(segment) + return query + + @pytest.mark.parametrize( + "query_string,shard_points", + [ + ("a,[p-q)", []), + ("0_key,[1_range_start-2_range_end)", ["3_split"]), + ("0_key,[1_range_start-2_range_end)", ["2_range_end"]), + ("0_key,[1_range_start-2_range_end]", ["2_range_end"]), + ("-1_range_end)", ["5_split"]), + ("8_key,(1_range_start-2_range_end]", ["1_range_start"]), + ("9_row_key,(5_range_start-7_range_end)", ["3_split"]), + ("3_row_key,(5_range_start-7_range_end)", ["2_row_key"]), + ("4_split,4_split,(3_split-5_split]", ["3_split", "5_split"]), + ("(3_split-", ["3_split"]), + ], + ) + def test_shard_no_split(self, query_string, shard_points): + """ + Test sharding with a set of queries that should not result in any splits. + """ + initial_query = self._parse_query_string(query_string) + row_samples = [(point.encode(), None) for point in shard_points] + sharded_queries = initial_query.shard(row_samples) + assert len(sharded_queries) == 1 + assert initial_query == sharded_queries[0] + + def test_shard_full_table_scan_empty_split(self): + """ + Sharding a full table scan with no split should return another full table scan. + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + full_scan_query = ReadRowsQuery() + split_points = [] + sharded_queries = full_scan_query.shard(split_points) + assert len(sharded_queries) == 1 + result_query = sharded_queries[0] + assert result_query == full_scan_query + + def test_shard_full_table_scan_with_split(self): + """ + Test splitting a full table scan into two queries + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + full_scan_query = ReadRowsQuery() + split_points = [(b"a", None)] + sharded_queries = full_scan_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("-a]") + assert sharded_queries[1] == self._parse_query_string("(a-") + + def test_shard_full_table_scan_with_multiple_split(self): + """ + Test splitting a full table scan into three queries + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + full_scan_query = ReadRowsQuery() + split_points = [(b"a", None), (b"z", None)] + sharded_queries = full_scan_query.shard(split_points) + assert len(sharded_queries) == 3 + assert sharded_queries[0] == self._parse_query_string("-a]") + assert sharded_queries[1] == self._parse_query_string("(a-z]") + assert sharded_queries[2] == self._parse_query_string("(z-") + + def test_shard_multiple_keys(self): + """ + Test splitting multiple individual keys into separate queries + """ + initial_query = self._parse_query_string("1_beforeSplit,2_onSplit,3_afterSplit") + split_points = [(b"2_onSplit", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("1_beforeSplit,2_onSplit") + assert sharded_queries[1] == self._parse_query_string("3_afterSplit") + + def test_shard_keys_empty_left(self): + """ + Test with the left-most split point empty + """ + initial_query = self._parse_query_string("5_test,8_test") + split_points = [(b"0_split", None), (b"6_split", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("5_test") + assert sharded_queries[1] == self._parse_query_string("8_test") + + def test_shard_keys_empty_right(self): + """ + Test with the right-most split point empty + """ + initial_query = self._parse_query_string("0_test,2_test") + split_points = [(b"1_split", None), (b"5_split", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string("0_test") + assert sharded_queries[1] == self._parse_query_string("2_test") + + def test_shard_mixed_split(self): + """ + Test splitting a complex query with multiple split points + """ + initial_query = self._parse_query_string("0,a,c,-a],-b],(c-e],(d-f],(m-") + split_points = [(s.encode(), None) for s in ["a", "d", "j", "o"]] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 5 + assert sharded_queries[0] == self._parse_query_string("0,a,-a]") + assert sharded_queries[1] == self._parse_query_string("c,(a-b],(c-d]") + assert sharded_queries[2] == self._parse_query_string("(d-e],(d-f]") + assert sharded_queries[3] == self._parse_query_string("(m-o]") + assert sharded_queries[4] == self._parse_query_string("(o-") + + def test_shard_unsorted_request(self): + """ + Test with a query that contains rows and queries in a random order + """ + initial_query = self._parse_query_string( + "7_row_key_1,2_row_key_2,[8_range_1_start-9_range_1_end),[3_range_2_start-4_range_2_end)" + ) + split_points = [(b"5-split", None)] + sharded_queries = initial_query.shard(split_points) + assert len(sharded_queries) == 2 + assert sharded_queries[0] == self._parse_query_string( + "2_row_key_2,[3_range_2_start-4_range_2_end)" + ) + assert sharded_queries[1] == self._parse_query_string( + "7_row_key_1,[8_range_1_start-9_range_1_end)" + ) + + @pytest.mark.parametrize( + "query_string,shard_points", + [ + ("a,[p-q)", []), + ("0_key,[1_range_start-2_range_end)", ["3_split"]), + ("-1_range_end)", ["5_split"]), + ("0_key,[1_range_start-2_range_end)", ["2_range_end"]), + ("9_row_key,(5_range_start-7_range_end)", ["3_split"]), + ("(5_range_start-", ["3_split"]), + ("3_split,[3_split-5_split)", ["3_split", "5_split"]), + ("[3_split-", ["3_split"]), + ("", []), + ("", ["3_split"]), + ("", ["3_split", "5_split"]), + ("1,2,3,4,5,6,7,8,9", ["3_split"]), + ], + ) + def test_shard_keeps_filter(self, query_string, shard_points): + """ + sharded queries should keep the filter from the original query + """ + initial_query = self._parse_query_string(query_string) + expected_filter = {"test": "filter"} + initial_query.filter = expected_filter + row_samples = [(point.encode(), None) for point in shard_points] + sharded_queries = initial_query.shard(row_samples) + assert len(sharded_queries) > 0 + for query in sharded_queries: + assert query.filter == expected_filter + + def test_shard_limit_exception(self): + """ + queries with a limit should raise an exception when a shard is attempted + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + query = ReadRowsQuery(limit=10) + with pytest.raises(AttributeError) as e: + query.shard([]) + assert "Cannot shard query with a limit" in str(e.value) + + @pytest.mark.parametrize( + "first_args,second_args,expected", + [ + ((), (), True), + ((), ("a",), False), + (("a",), (), False), + (("a",), ("a",), True), + ((["a"],), (["a", "b"],), False), + ((["a", "b"],), (["a", "b"],), True), + ((["a", b"b"],), ([b"a", "b"],), True), + (("a",), (b"a",), True), + (("a",), ("b",), False), + (("a",), ("a", ["b"]), False), + (("a", "b"), ("a", ["b"]), True), + (("a", ["b"]), ("a", ["b", "c"]), False), + (("a", ["b", "c"]), ("a", [b"b", "c"]), True), + (("a", ["b", "c"], 1), ("a", ["b", b"c"], 1), True), + (("a", ["b"], 1), ("a", ["b"], 2), False), + (("a", ["b"], 1, {"a": "b"}), ("a", ["b"], 1, {"a": "b"}), True), + (("a", ["b"], 1, {"a": "b"}), ("a", ["b"], 1), False), + ( + (), + (None, [None], None, None), + True, + ), # empty query is equal to empty row range + ((), (None, [None], 1, None), False), + ((), (None, [None], None, {"a": "b"}), False), + ], + ) + def test___eq__(self, first_args, second_args, expected): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange + + # replace row_range placeholders with a RowRange object + if len(first_args) > 1: + first_args = list(first_args) + first_args[1] = [RowRange(c) for c in first_args[1]] + if len(second_args) > 1: + second_args = list(second_args) + second_args[1] = [RowRange(c) for c in second_args[1]] + first = ReadRowsQuery(*first_args) + second = ReadRowsQuery(*second_args) + assert (first == second) == expected + + def test___repr__(self): + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + instance = self._make_one(row_keys=["a", "b"], row_filter={}, limit=10) + # should be able to recreate the instance from the repr + repr_str = repr(instance) + recreated = eval(repr_str) + assert isinstance(recreated, ReadRowsQuery) + assert recreated == instance + + def test_empty_row_set(self): + """Empty strings should be treated as keys inputs""" + query = self._make_one(row_keys="") + assert query.row_keys == [b""] diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_row.py b/packages/google-cloud-bigtable/tests/unit/data/test_row.py new file mode 100644 index 000000000000..10b5bdb2316f --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_row.py @@ -0,0 +1,718 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import time + +TEST_VALUE = b"1234" +TEST_ROW_KEY = b"row" +TEST_FAMILY_ID = "cf1" +TEST_QUALIFIER = b"col" +TEST_TIMESTAMP = time.time_ns() // 1000 +TEST_LABELS = ["label1", "label2"] + + +class TestRow(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.row import Row + + return Row + + def _make_one(self, *args, **kwargs): + if len(args) == 0: + args = (TEST_ROW_KEY, [self._make_cell()]) + return self._get_target_class()(*args, **kwargs) + + def _make_cell( + self, + value=TEST_VALUE, + row_key=TEST_ROW_KEY, + family_id=TEST_FAMILY_ID, + qualifier=TEST_QUALIFIER, + timestamp=TEST_TIMESTAMP, + labels=TEST_LABELS, + ): + from google.cloud.bigtable.data.row import Cell + + return Cell(value, row_key, family_id, qualifier, timestamp, labels) + + def test_ctor(self): + cells = [self._make_cell(), self._make_cell()] + row_response = self._make_one(TEST_ROW_KEY, cells) + self.assertEqual(list(row_response), cells) + self.assertEqual(row_response.row_key, TEST_ROW_KEY) + + def test__from_pb(self): + """ + Construct from protobuf. + """ + from google.cloud.bigtable_v2.types import Row as RowPB + from google.cloud.bigtable_v2.types import Family as FamilyPB + from google.cloud.bigtable_v2.types import Column as ColumnPB + from google.cloud.bigtable_v2.types import Cell as CellPB + + row_key = b"row_key" + cells = [ + CellPB( + value=str(i).encode(), + timestamp_micros=TEST_TIMESTAMP, + labels=TEST_LABELS, + ) + for i in range(2) + ] + column = ColumnPB(qualifier=TEST_QUALIFIER, cells=cells) + families_pb = [FamilyPB(name=TEST_FAMILY_ID, columns=[column])] + row_pb = RowPB(key=row_key, families=families_pb) + output = self._get_target_class()._from_pb(row_pb) + self.assertEqual(output.row_key, row_key) + self.assertEqual(len(output), 2) + self.assertEqual(output[0].value, b"0") + self.assertEqual(output[1].value, b"1") + self.assertEqual(output[0].timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(output[0].labels, TEST_LABELS) + assert output[0].row_key == row_key + assert output[0].family == TEST_FAMILY_ID + assert output[0].qualifier == TEST_QUALIFIER + + def test__from_pb_sparse(self): + """ + Construct from minimal protobuf. + """ + from google.cloud.bigtable_v2.types import Row as RowPB + + row_key = b"row_key" + row_pb = RowPB(key=row_key) + output = self._get_target_class()._from_pb(row_pb) + self.assertEqual(output.row_key, row_key) + self.assertEqual(len(output), 0) + + def test_get_cells(self): + cell_list = [] + for family_id in ["1", "2"]: + for qualifier in [b"a", b"b"]: + cell = self._make_cell(family_id=family_id, qualifier=qualifier) + cell_list.append(cell) + # test getting all cells + row_response = self._make_one(TEST_ROW_KEY, cell_list) + self.assertEqual(row_response.get_cells(), cell_list) + # test getting cells in a family + output = row_response.get_cells(family="1") + self.assertEqual(len(output), 2) + self.assertEqual(output[0].family, "1") + self.assertEqual(output[1].family, "1") + self.assertEqual(output[0], cell_list[0]) + # test getting cells in a family/qualifier + # should accept bytes or str for qualifier + for q in [b"a", "a"]: + output = row_response.get_cells(family="1", qualifier=q) + self.assertEqual(len(output), 1) + self.assertEqual(output[0].family, "1") + self.assertEqual(output[0].qualifier, b"a") + self.assertEqual(output[0], cell_list[0]) + # calling with just qualifier should raise an error + with self.assertRaises(ValueError): + row_response.get_cells(qualifier=b"a") + # test calling with bad family or qualifier + with self.assertRaises(ValueError): + row_response.get_cells(family="3", qualifier=b"a") + with self.assertRaises(ValueError): + row_response.get_cells(family="3") + with self.assertRaises(ValueError): + row_response.get_cells(family="1", qualifier=b"c") + + def test___repr__(self): + cell_str = ( + "{'value': b'1234', 'timestamp_micros': %d, 'labels': ['label1', 'label2']}" + % (TEST_TIMESTAMP) + ) + expected_prefix = "Row(key=b'row', cells=" + row = self._make_one(TEST_ROW_KEY, [self._make_cell()]) + self.assertIn(expected_prefix, repr(row)) + self.assertIn(cell_str, repr(row)) + expected_full = ( + "Row(key=b'row', cells={\n ('cf1', b'col'): [{'value': b'1234', 'timestamp_micros': %d, 'labels': ['label1', 'label2']}],\n})" + % (TEST_TIMESTAMP) + ) + self.assertEqual(expected_full, repr(row)) + # try with multiple cells + row = self._make_one(TEST_ROW_KEY, [self._make_cell(), self._make_cell()]) + self.assertIn(expected_prefix, repr(row)) + self.assertIn(cell_str, repr(row)) + + def test___str__(self): + cells = [ + self._make_cell(value=b"1234", family_id="1", qualifier=b"col"), + self._make_cell(value=b"5678", family_id="3", qualifier=b"col"), + self._make_cell(value=b"1", family_id="3", qualifier=b"col"), + self._make_cell(value=b"2", family_id="3", qualifier=b"col"), + ] + + row_response = self._make_one(TEST_ROW_KEY, cells) + expected = ( + "{\n" + + " (family='1', qualifier=b'col'): [b'1234'],\n" + + " (family='3', qualifier=b'col'): [b'5678', (+2 more)],\n" + + "}" + ) + self.assertEqual(expected, str(row_response)) + + def test_to_dict(self): + from google.cloud.bigtable_v2.types import Row + + cell1 = self._make_cell() + cell2 = self._make_cell() + cell2.value = b"other" + row = self._make_one(TEST_ROW_KEY, [cell1, cell2]) + row_dict = row._to_dict() + expected_dict = { + "key": TEST_ROW_KEY, + "families": [ + { + "name": TEST_FAMILY_ID, + "columns": [ + { + "qualifier": TEST_QUALIFIER, + "cells": [ + { + "value": TEST_VALUE, + "timestamp_micros": TEST_TIMESTAMP, + "labels": TEST_LABELS, + }, + { + "value": b"other", + "timestamp_micros": TEST_TIMESTAMP, + "labels": TEST_LABELS, + }, + ], + } + ], + }, + ], + } + self.assertEqual(len(row_dict), len(expected_dict)) + for key, value in expected_dict.items(): + self.assertEqual(row_dict[key], value) + # should be able to construct a Cell proto from the dict + row_proto = Row(**row_dict) + self.assertEqual(row_proto.key, TEST_ROW_KEY) + self.assertEqual(len(row_proto.families), 1) + family = row_proto.families[0] + self.assertEqual(family.name, TEST_FAMILY_ID) + self.assertEqual(len(family.columns), 1) + column = family.columns[0] + self.assertEqual(column.qualifier, TEST_QUALIFIER) + self.assertEqual(len(column.cells), 2) + self.assertEqual(column.cells[0].value, TEST_VALUE) + self.assertEqual(column.cells[0].timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(column.cells[0].labels, TEST_LABELS) + self.assertEqual(column.cells[1].value, cell2.value) + self.assertEqual(column.cells[1].timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(column.cells[1].labels, TEST_LABELS) + + def test_iteration(self): + from google.cloud.bigtable.data.row import Cell + + # should be able to iterate over the Row as a list + cell1 = self._make_cell(value=b"1") + cell2 = self._make_cell(value=b"2") + cell3 = self._make_cell(value=b"3") + row_response = self._make_one(TEST_ROW_KEY, [cell1, cell2, cell3]) + self.assertEqual(len(row_response), 3) + result_list = list(row_response) + self.assertEqual(len(result_list), 3) + # should be able to iterate over all cells + idx = 0 + for cell in row_response: + self.assertIsInstance(cell, Cell) + self.assertEqual(cell.value, result_list[idx].value) + self.assertEqual(cell.value, str(idx + 1).encode()) + idx += 1 + + def test_contains_cell(self): + cell3 = self._make_cell(value=b"3") + cell1 = self._make_cell(value=b"1") + cell2 = self._make_cell(value=b"2") + cell4 = self._make_cell(value=b"4") + row_response = self._make_one(TEST_ROW_KEY, [cell3, cell1, cell2]) + self.assertIn(cell1, row_response) + self.assertIn(cell2, row_response) + self.assertNotIn(cell4, row_response) + cell3_copy = self._make_cell(value=b"3") + self.assertIn(cell3_copy, row_response) + + def test_contains_family_id(self): + new_family_id = "new_family_id" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2]) + self.assertIn(TEST_FAMILY_ID, row_response) + self.assertIn("new_family_id", row_response) + self.assertIn(new_family_id, row_response) + self.assertNotIn("not_a_family_id", row_response) + self.assertNotIn(None, row_response) + + def test_contains_family_qualifier_tuple(self): + new_family_id = "new_family_id" + new_qualifier = b"new_qualifier" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + new_qualifier, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2]) + self.assertIn((TEST_FAMILY_ID, TEST_QUALIFIER), row_response) + self.assertIn(("new_family_id", "new_qualifier"), row_response) + self.assertIn(("new_family_id", b"new_qualifier"), row_response) + self.assertIn((new_family_id, new_qualifier), row_response) + + self.assertNotIn(("not_a_family_id", TEST_QUALIFIER), row_response) + self.assertNotIn((TEST_FAMILY_ID, "not_a_qualifier"), row_response) + self.assertNotIn((TEST_FAMILY_ID, new_qualifier), row_response) + self.assertNotIn(("not_a_family_id", "not_a_qualifier"), row_response) + self.assertNotIn((None, None), row_response) + self.assertNotIn(None, row_response) + + def test_int_indexing(self): + # should be able to index into underlying list with an index number directly + cell_list = [self._make_cell(value=str(i).encode()) for i in range(10)] + sorted(cell_list) + row_response = self._make_one(TEST_ROW_KEY, cell_list) + self.assertEqual(len(row_response), 10) + for i in range(10): + self.assertEqual(row_response[i].value, str(i).encode()) + # backwards indexing should work + self.assertEqual(row_response[-i - 1].value, str(9 - i).encode()) + with self.assertRaises(IndexError): + row_response[10] + with self.assertRaises(IndexError): + row_response[-11] + + def test_slice_indexing(self): + # should be able to index with a range of indices + cell_list = [self._make_cell(value=str(i).encode()) for i in range(10)] + sorted(cell_list) + row_response = self._make_one(TEST_ROW_KEY, cell_list) + self.assertEqual(len(row_response), 10) + self.assertEqual(len(row_response[0:10]), 10) + self.assertEqual(row_response[0:10], cell_list) + self.assertEqual(len(row_response[0:]), 10) + self.assertEqual(row_response[0:], cell_list) + self.assertEqual(len(row_response[:10]), 10) + self.assertEqual(row_response[:10], cell_list) + self.assertEqual(len(row_response[0:10:1]), 10) + self.assertEqual(row_response[0:10:1], cell_list) + self.assertEqual(len(row_response[0:10:2]), 5) + self.assertEqual(row_response[0:10:2], [cell_list[i] for i in range(0, 10, 2)]) + self.assertEqual(len(row_response[0:10:3]), 4) + self.assertEqual(row_response[0:10:3], [cell_list[i] for i in range(0, 10, 3)]) + self.assertEqual(len(row_response[10:0:-1]), 9) + self.assertEqual(len(row_response[10:0:-2]), 5) + self.assertEqual(row_response[10:0:-3], cell_list[10:0:-3]) + self.assertEqual(len(row_response[0:100]), 10) + + def test_family_indexing(self): + # should be able to retrieve cells in a family + new_family_id = "new_family_id" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell3 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2, cell3]) + + self.assertEqual(len(row_response[TEST_FAMILY_ID]), 2) + self.assertEqual(row_response[TEST_FAMILY_ID][0], cell) + self.assertEqual(row_response[TEST_FAMILY_ID][1], cell2) + self.assertEqual(len(row_response[new_family_id]), 1) + self.assertEqual(row_response[new_family_id][0], cell3) + with self.assertRaises(ValueError): + row_response["not_a_family_id"] + with self.assertRaises(TypeError): + row_response[None] + with self.assertRaises(TypeError): + row_response[b"new_family_id"] + + def test_family_qualifier_indexing(self): + # should be able to retrieve cells in a family/qualifier tuplw + new_family_id = "new_family_id" + new_qualifier = b"new_qualifier" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell3 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + new_qualifier, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2, cell3]) + + self.assertEqual(len(row_response[TEST_FAMILY_ID, TEST_QUALIFIER]), 2) + self.assertEqual(row_response[TEST_FAMILY_ID, TEST_QUALIFIER][0], cell) + self.assertEqual(row_response[TEST_FAMILY_ID, TEST_QUALIFIER][1], cell2) + self.assertEqual(len(row_response[new_family_id, new_qualifier]), 1) + self.assertEqual(row_response[new_family_id, new_qualifier][0], cell3) + self.assertEqual(len(row_response["new_family_id", "new_qualifier"]), 1) + self.assertEqual(len(row_response["new_family_id", b"new_qualifier"]), 1) + with self.assertRaises(ValueError): + row_response[new_family_id, "not_a_qualifier"] + with self.assertRaises(ValueError): + row_response["not_a_family_id", new_qualifier] + with self.assertRaises(TypeError): + row_response[None, None] + with self.assertRaises(TypeError): + row_response[b"new_family_id", b"new_qualifier"] + + def test_get_column_components(self): + # should be able to retrieve (family,qualifier) tuples as keys + new_family_id = "new_family_id" + new_qualifier = b"new_qualifier" + cell = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell2 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + cell3 = self._make_cell( + TEST_VALUE, + TEST_ROW_KEY, + new_family_id, + new_qualifier, + TEST_TIMESTAMP, + TEST_LABELS, + ) + row_response = self._make_one(TEST_ROW_KEY, [cell, cell2, cell3]) + + self.assertEqual(len(row_response._get_column_components()), 2) + self.assertEqual( + row_response._get_column_components(), + [(TEST_FAMILY_ID, TEST_QUALIFIER), (new_family_id, new_qualifier)], + ) + + row_response = self._make_one(TEST_ROW_KEY, []) + self.assertEqual(len(row_response._get_column_components()), 0) + self.assertEqual(row_response._get_column_components(), []) + + row_response = self._make_one(TEST_ROW_KEY, [cell]) + self.assertEqual(len(row_response._get_column_components()), 1) + self.assertEqual( + row_response._get_column_components(), [(TEST_FAMILY_ID, TEST_QUALIFIER)] + ) + + +class TestCell(unittest.TestCase): + @staticmethod + def _get_target_class(): + from google.cloud.bigtable.data.row import Cell + + return Cell + + def _make_one(self, *args, **kwargs): + if len(args) == 0: + args = ( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + cell = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(cell.value, TEST_VALUE) + self.assertEqual(cell.row_key, TEST_ROW_KEY) + self.assertEqual(cell.family, TEST_FAMILY_ID) + self.assertEqual(cell.qualifier, TEST_QUALIFIER) + self.assertEqual(cell.timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(cell.labels, TEST_LABELS) + + def test_to_dict(self): + from google.cloud.bigtable_v2.types import Cell + + cell = self._make_one() + cell_dict = cell._to_dict() + expected_dict = { + "value": TEST_VALUE, + "timestamp_micros": TEST_TIMESTAMP, + "labels": TEST_LABELS, + } + self.assertEqual(len(cell_dict), len(expected_dict)) + for key, value in expected_dict.items(): + self.assertEqual(cell_dict[key], value) + # should be able to construct a Cell proto from the dict + cell_proto = Cell(**cell_dict) + self.assertEqual(cell_proto.value, TEST_VALUE) + self.assertEqual(cell_proto.timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(cell_proto.labels, TEST_LABELS) + + def test_to_dict_no_labels(self): + from google.cloud.bigtable_v2.types import Cell + + cell_no_labels = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + None, + ) + cell_dict = cell_no_labels._to_dict() + expected_dict = { + "value": TEST_VALUE, + "timestamp_micros": TEST_TIMESTAMP, + } + self.assertEqual(len(cell_dict), len(expected_dict)) + for key, value in expected_dict.items(): + self.assertEqual(cell_dict[key], value) + # should be able to construct a Cell proto from the dict + cell_proto = Cell(**cell_dict) + self.assertEqual(cell_proto.value, TEST_VALUE) + self.assertEqual(cell_proto.timestamp_micros, TEST_TIMESTAMP) + self.assertEqual(cell_proto.labels, []) + + def test_int_value(self): + test_int = 1234 + bytes_value = test_int.to_bytes(4, "big", signed=True) + cell = self._make_one( + bytes_value, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(int(cell), test_int) + # ensure string formatting works + formatted = "%d" % cell + self.assertEqual(formatted, str(test_int)) + self.assertEqual(int(formatted), test_int) + + def test_int_value_negative(self): + test_int = -99999 + bytes_value = test_int.to_bytes(4, "big", signed=True) + cell = self._make_one( + bytes_value, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(int(cell), test_int) + # ensure string formatting works + formatted = "%d" % cell + self.assertEqual(formatted, str(test_int)) + self.assertEqual(int(formatted), test_int) + + def test___str__(self): + test_value = b"helloworld" + cell = self._make_one( + test_value, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + self.assertEqual(str(cell), "b'helloworld'") + self.assertEqual(str(cell), str(test_value)) + + def test___repr__(self): + from google.cloud.bigtable.data.row import Cell # type: ignore # noqa: F401 + + cell = self._make_one() + expected = ( + "Cell(value=b'1234', row_key=b'row', " + + "family='cf1', qualifier=b'col', " + + f"timestamp_micros={TEST_TIMESTAMP}, labels=['label1', 'label2'])" + ) + self.assertEqual(repr(cell), expected) + # should be able to construct instance from __repr__ + result = eval(repr(cell)) + self.assertEqual(result, cell) + + def test___repr___no_labels(self): + from google.cloud.bigtable.data.row import Cell # type: ignore # noqa: F401 + + cell_no_labels = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + None, + ) + expected = ( + "Cell(value=b'1234', row_key=b'row', " + + "family='cf1', qualifier=b'col', " + + f"timestamp_micros={TEST_TIMESTAMP}, labels=[])" + ) + self.assertEqual(repr(cell_no_labels), expected) + # should be able to construct instance from __repr__ + result = eval(repr(cell_no_labels)) + self.assertEqual(result, cell_no_labels) + + def test_equality(self): + cell1 = self._make_one() + cell2 = self._make_one() + self.assertEqual(cell1, cell2) + self.assertTrue(cell1 == cell2) + args = ( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + for i in range(0, len(args)): + # try changing each argument + modified_cell = self._make_one(*args[:i], args[i] + args[i], *args[i + 1 :]) + self.assertNotEqual(cell1, modified_cell) + self.assertFalse(cell1 == modified_cell) + self.assertTrue(cell1 != modified_cell) + + def test_hash(self): + # class should be hashable + cell1 = self._make_one() + d = {cell1: 1} + cell2 = self._make_one() + self.assertEqual(d[cell2], 1) + + args = ( + TEST_VALUE, + TEST_ROW_KEY, + TEST_FAMILY_ID, + TEST_QUALIFIER, + TEST_TIMESTAMP, + TEST_LABELS, + ) + for i in range(0, len(args)): + # try changing each argument + modified_cell = self._make_one(*args[:i], args[i] + args[i], *args[i + 1 :]) + with self.assertRaises(KeyError): + d[modified_cell] + + def test_ordering(self): + # create cell list in order from lowest to highest + higher_cells = [] + i = 0 + # families; alphebetical order + for family in ["z", "y", "x"]: + # qualifiers; lowest byte value first + for qualifier in [b"z", b"y", b"x"]: + # timestamps; newest first + for timestamp in [ + TEST_TIMESTAMP, + TEST_TIMESTAMP + 1, + TEST_TIMESTAMP + 2, + ]: + cell = self._make_one( + TEST_VALUE, + TEST_ROW_KEY, + family, + qualifier, + timestamp, + TEST_LABELS, + ) + # cell should be the highest priority encountered so far + self.assertEqual(i, len(higher_cells)) + i += 1 + for other in higher_cells: + self.assertLess(cell, other) + higher_cells.append(cell) + # final order should be reverse of sorted order + expected_order = higher_cells + expected_order.reverse() + self.assertEqual(expected_order, sorted(higher_cells)) diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/data/test_row_filters.py new file mode 100644 index 000000000000..e90b6f270a61 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_row_filters.py @@ -0,0 +1,2039 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import pytest + + +def test_abstract_class_constructors(): + from google.cloud.bigtable.data.row_filters import RowFilter + from google.cloud.bigtable.data.row_filters import _BoolFilter + from google.cloud.bigtable.data.row_filters import _FilterCombination + from google.cloud.bigtable.data.row_filters import _CellCountFilter + + with pytest.raises(TypeError): + RowFilter() + with pytest.raises(TypeError): + _BoolFilter(False) + with pytest.raises(TypeError): + _FilterCombination([]) + with pytest.raises(TypeError): + _CellCountFilter(0) + + +def test_bool_filter_constructor(): + for FilterType in _get_bool_filters(): + flag = True + row_filter = FilterType(flag) + assert row_filter.flag is flag + + +def test_bool_filter___eq__type_differ(): + for FilterType in _get_bool_filters(): + flag = object() + row_filter1 = FilterType(flag) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_bool_filter___eq__same_value(): + for FilterType in _get_bool_filters(): + flag = object() + row_filter1 = FilterType(flag) + row_filter2 = FilterType(flag) + assert row_filter1 == row_filter2 + + +def test_bool_filter___ne__same_value(): + for FilterType in _get_bool_filters(): + flag = object() + row_filter1 = FilterType(flag) + row_filter2 = FilterType(flag) + assert not (row_filter1 != row_filter2) + + +def test_sink_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import SinkFilter + + flag = True + row_filter = SinkFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(sink=flag) + assert pb_val == expected_pb + + +def test_sink_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import SinkFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = SinkFilter(flag) + expected_dict = {"sink": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_sink_filter___repr__(): + from google.cloud.bigtable.data.row_filters import SinkFilter + + flag = True + row_filter = SinkFilter(flag) + assert repr(row_filter) == "SinkFilter(flag={})".format(flag) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_pass_all_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + + flag = True + row_filter = PassAllFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(pass_all_filter=flag) + assert pb_val == expected_pb + + +def test_pass_all_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = PassAllFilter(flag) + expected_dict = {"pass_all_filter": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_pass_all_filter___repr__(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + + flag = True + row_filter = PassAllFilter(flag) + assert repr(row_filter) == "PassAllFilter(flag={})".format(flag) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_block_all_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import BlockAllFilter + + flag = True + row_filter = BlockAllFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(block_all_filter=flag) + assert pb_val == expected_pb + + +def test_block_all_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import BlockAllFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = BlockAllFilter(flag) + expected_dict = {"block_all_filter": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_block_all_filter___repr__(): + from google.cloud.bigtable.data.row_filters import BlockAllFilter + + flag = True + row_filter = BlockAllFilter(flag) + assert repr(row_filter) == "BlockAllFilter(flag={})".format(flag) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_regex_filterconstructor(): + for FilterType in _get_regex_filters(): + regex = b"abc" + row_filter = FilterType(regex) + assert row_filter.regex == regex + + +def test_regex_filterconstructor_non_bytes(): + for FilterType in _get_regex_filters(): + regex = "abc" + row_filter = FilterType(regex) + assert row_filter.regex == b"abc" + + +def test_regex_filter__eq__type_differ(): + for FilterType in _get_regex_filters(): + regex = b"def-rgx" + row_filter1 = FilterType(regex) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_regex_filter__eq__same_value(): + for FilterType in _get_regex_filters(): + regex = b"trex-regex" + row_filter1 = FilterType(regex) + row_filter2 = FilterType(regex) + assert row_filter1 == row_filter2 + + +def test_regex_filter__ne__same_value(): + for FilterType in _get_regex_filters(): + regex = b"abc" + row_filter1 = FilterType(regex) + row_filter2 = FilterType(regex) + assert not (row_filter1 != row_filter2) + + +def test_row_key_regex_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter + + regex = b"row-key-regex" + row_filter = RowKeyRegexFilter(regex) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(row_key_regex_filter=regex) + assert pb_val == expected_pb + + +def test_row_key_regex_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + regex = b"row-key-regex" + row_filter = RowKeyRegexFilter(regex) + expected_dict = {"row_key_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_key_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import RowKeyRegexFilter + + regex = b"row-key-regex" + row_filter = RowKeyRegexFilter(regex) + assert repr(row_filter) == "RowKeyRegexFilter(regex={})".format(regex) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_row_sample_filter_constructor(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + row_filter = RowSampleFilter(sample) + assert row_filter.sample is sample + + +def test_row_sample_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_row_sample_filter___eq__same_value(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = RowSampleFilter(sample) + assert row_filter1 == row_filter2 + + +def test_row_sample_filter___ne__(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = object() + other_sample = object() + row_filter1 = RowSampleFilter(sample) + row_filter2 = RowSampleFilter(other_sample) + assert row_filter1 != row_filter2 + + +def test_row_sample_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = 0.25 + row_filter = RowSampleFilter(sample) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(row_sample_filter=sample) + assert pb_val == expected_pb + + +def test_row_sample_filter___repr__(): + from google.cloud.bigtable.data.row_filters import RowSampleFilter + + sample = 0.25 + row_filter = RowSampleFilter(sample) + assert repr(row_filter) == "RowSampleFilter(sample={})".format(sample) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_family_name_regex_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter + + regex = "family-regex" + row_filter = FamilyNameRegexFilter(regex) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(family_name_regex_filter=regex) + assert pb_val == expected_pb + + +def test_family_name_regex_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + regex = "family-regex" + row_filter = FamilyNameRegexFilter(regex) + expected_dict = {"family_name_regex_filter": regex.encode()} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_family_name_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import FamilyNameRegexFilter + + regex = "family-regex" + row_filter = FamilyNameRegexFilter(regex) + expected = "FamilyNameRegexFilter(regex=b'family-regex')" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_column_qualifier_regex_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter + + regex = b"column-regex" + row_filter = ColumnQualifierRegexFilter(regex) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(column_qualifier_regex_filter=regex) + assert pb_val == expected_pb + + +def test_column_qualifier_regex_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + regex = b"column-regex" + row_filter = ColumnQualifierRegexFilter(regex) + expected_dict = {"column_qualifier_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_column_qualifier_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ColumnQualifierRegexFilter + + regex = b"column-regex" + row_filter = ColumnQualifierRegexFilter(regex) + assert repr(row_filter) == "ColumnQualifierRegexFilter(regex={})".format(regex) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_timestamp_range_constructor(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range = TimestampRange(start=start, end=end) + assert time_range.start is start + assert time_range.end is end + + +def test_timestamp_range___eq__(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = TimestampRange(start=start, end=end) + assert time_range1 == time_range2 + + +def test_timestamp_range___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = object() + assert not (time_range1 == time_range2) + + +def test_timestamp_range___ne__same_value(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range1 = TimestampRange(start=start, end=end) + time_range2 = TimestampRange(start=start, end=end) + assert not (time_range1 != time_range2) + + +def _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=None): + import datetime + from google.cloud._helpers import _EPOCH + from google.cloud.bigtable.data.row_filters import TimestampRange + + if start is not None: + start = _EPOCH + datetime.timedelta(microseconds=start) + if end is not None: + end = _EPOCH + datetime.timedelta(microseconds=end) + time_range = TimestampRange(start=start, end=end) + expected_pb = _TimestampRangePB(**pb_kwargs) + time_pb = time_range._to_pb() + assert time_pb.start_timestamp_micros == expected_pb.start_timestamp_micros + assert time_pb.end_timestamp_micros == expected_pb.end_timestamp_micros + assert time_pb == expected_pb + + +def test_timestamp_range_to_pb(): + start_micros = 30871234 + end_micros = 12939371234 + start_millis = start_micros // 1000 * 1000 + assert start_millis == 30871000 + end_millis = end_micros // 1000 * 1000 + 1000 + assert end_millis == 12939372000 + pb_kwargs = {} + pb_kwargs["start_timestamp_micros"] = start_millis + pb_kwargs["end_timestamp_micros"] = end_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=end_micros) + + +def test_timestamp_range_to_dict(): + from google.cloud.bigtable.data.row_filters import TimestampRange + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRange( + start=datetime.datetime(2019, 1, 1), end=datetime.datetime(2019, 1, 2) + ) + expected_dict = { + "start_timestamp_micros": 1546300800000000, + "end_timestamp_micros": 1546387200000000, + } + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.TimestampRange(**expected_dict) == expected_pb_value + + +def test_timestamp_range_to_pb_start_only(): + # Makes sure already milliseconds granularity + start_micros = 30871000 + start_millis = start_micros // 1000 * 1000 + assert start_millis == 30871000 + pb_kwargs = {} + pb_kwargs["start_timestamp_micros"] = start_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=start_micros, end=None) + + +def test_timestamp_range_to_dict_start_only(): + from google.cloud.bigtable.data.row_filters import TimestampRange + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRange(start=datetime.datetime(2019, 1, 1)) + expected_dict = {"start_timestamp_micros": 1546300800000000} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.TimestampRange(**expected_dict) == expected_pb_value + + +def test_timestamp_range_to_pb_end_only(): + # Makes sure already milliseconds granularity + end_micros = 12939371000 + end_millis = end_micros // 1000 * 1000 + assert end_millis == 12939371000 + pb_kwargs = {} + pb_kwargs["end_timestamp_micros"] = end_millis + _timestamp_range_to_pb_helper(pb_kwargs, start=None, end=end_micros) + + +def test_timestamp_range_to_dict_end_only(): + from google.cloud.bigtable.data.row_filters import TimestampRange + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRange(end=datetime.datetime(2019, 1, 2)) + expected_dict = {"end_timestamp_micros": 1546387200000000} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.TimestampRange(**expected_dict) == expected_pb_value + + +def timestamp_range___repr__(): + from google.cloud.bigtable.data.row_filters import TimestampRange + + start = object() + end = object() + time_range = TimestampRange(start=start, end=end) + assert repr(time_range) == "TimestampRange(start={}, end={})".format(start, end) + assert repr(time_range) == str(time_range) + assert eval(repr(time_range)) == time_range + + +def test_timestamp_range_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_timestamp_range_filter___eq__same_value(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = TimestampRangeFilter(range_) + assert row_filter1 == row_filter2 + + +def test_timestamp_range_filter___ne__(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + range_ = object() + other_range_ = object() + row_filter1 = TimestampRangeFilter(range_) + row_filter2 = TimestampRangeFilter(other_range_) + assert row_filter1 != row_filter2 + + +def test_timestamp_range_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + + row_filter = TimestampRangeFilter() + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(timestamp_range_filter=_TimestampRangePB()) + assert pb_val == expected_pb + + +def test_timestamp_range_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + import datetime + + row_filter = TimestampRangeFilter( + start=datetime.datetime(2019, 1, 1), end=datetime.datetime(2019, 1, 2) + ) + expected_dict = { + "timestamp_range_filter": { + "start_timestamp_micros": 1546300800000000, + "end_timestamp_micros": 1546387200000000, + } + } + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_timestamp_range_filter_empty_to_dict(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter = TimestampRangeFilter() + expected_dict = {"timestamp_range_filter": {}} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_timestamp_range_filter___repr__(): + from google.cloud.bigtable.data.row_filters import TimestampRangeFilter + import datetime + + start = datetime.datetime(2019, 1, 1) + end = datetime.datetime(2019, 1, 2) + row_filter = TimestampRangeFilter(start, end) + assert ( + repr(row_filter) + == f"TimestampRangeFilter(start={repr(start)}, end={repr(end)})" + ) + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_column_range_filter_constructor_defaults(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + row_filter = ColumnRangeFilter(family_id) + assert row_filter.family_id is family_id + assert row_filter.start_qualifier is None + assert row_filter.end_qualifier is None + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_column_range_filter_constructor_explicit(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + start_qualifier = object() + end_qualifier = object() + inclusive_start = object() + inclusive_end = object() + row_filter = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter.family_id is family_id + assert row_filter.start_qualifier is start_qualifier + assert row_filter.end_qualifier is end_qualifier + assert row_filter.inclusive_start is inclusive_start + assert row_filter.inclusive_end is inclusive_end + + +def test_column_range_filter_constructor_(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + with pytest.raises(ValueError): + ColumnRangeFilter(family_id, inclusive_start=True) + + +def test_column_range_filter_constructor_bad_end(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + with pytest.raises(ValueError): + ColumnRangeFilter(family_id, inclusive_end=True) + + +def test_column_range_filter___eq__(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + start_qualifier = object() + end_qualifier = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 == row_filter2 + + +def test_column_range_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + row_filter1 = ColumnRangeFilter(family_id) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_column_range_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = object() + other_family_id = object() + start_qualifier = object() + end_qualifier = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ColumnRangeFilter( + family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ColumnRangeFilter( + other_family_id, + start_qualifier=start_qualifier, + end_qualifier=end_qualifier, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 != row_filter2 + + +def test_column_range_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + row_filter = ColumnRangeFilter(family_id) + col_range_pb = _ColumnRangePB(family_name=family_id) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + family_id = "column-family-id" + row_filter = ColumnRangeFilter(family_id) + expected_dict = {"column_range_filter": {"family_name": family_id}} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_column_range_filter_to_pb_inclusive_start(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(family_id, start_qualifier=column) + col_range_pb = _ColumnRangePB(family_name=family_id, start_qualifier_closed=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_pb_exclusive_start(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter( + family_id, start_qualifier=column, inclusive_start=False + ) + col_range_pb = _ColumnRangePB(family_name=family_id, start_qualifier_open=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_pb_inclusive_end(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(family_id, end_qualifier=column) + col_range_pb = _ColumnRangePB(family_name=family_id, end_qualifier_closed=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter_to_pb_exclusive_end(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + column = b"column" + row_filter = ColumnRangeFilter(family_id, end_qualifier=column, inclusive_end=False) + col_range_pb = _ColumnRangePB(family_name=family_id, end_qualifier_open=column) + expected_pb = _RowFilterPB(column_range_filter=col_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_column_range_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ColumnRangeFilter + + family_id = "column-family-id" + start_qualifier = b"column" + end_qualifier = b"column2" + row_filter = ColumnRangeFilter(family_id, start_qualifier, end_qualifier) + expected = "ColumnRangeFilter(family_id='column-family-id', start_qualifier=b'column', end_qualifier=b'column2', inclusive_start=True, inclusive_end=True)" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_value_regex_filter_to_pb_w_bytes(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + + value = regex = b"value-regex" + row_filter = ValueRegexFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_value_regex_filter_to_dict_w_bytes(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = regex = b"value-regex" + row_filter = ValueRegexFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_value_regex_filter_to_pb_w_str(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + + value = "value-regex" + regex = value.encode("ascii") + row_filter = ValueRegexFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_value_regex_filter_to_dict_w_str(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = "value-regex" + regex = value.encode("ascii") + row_filter = ValueRegexFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_value_regex_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ValueRegexFilter + + value = "value-regex" + row_filter = ValueRegexFilter(value) + expected = "ValueRegexFilter(regex=b'value-regex')" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_literal_value_filter_to_pb_w_bytes(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + value = regex = b"value_regex" + row_filter = LiteralValueFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_literal_value_filter_to_dict_w_bytes(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = regex = b"value_regex" + row_filter = LiteralValueFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_literal_value_filter_to_pb_w_str(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + value = "value_regex" + regex = value.encode("ascii") + row_filter = LiteralValueFilter(value) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=regex) + assert pb_val == expected_pb + + +def test_literal_value_filter_to_dict_w_str(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + value = "value_regex" + regex = value.encode("ascii") + row_filter = LiteralValueFilter(value) + expected_dict = {"value_regex_filter": regex} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +@pytest.mark.parametrize( + "value,expected_byte_string", + [ + # null bytes are encoded as "\x00" in ascii characters + # others are just prefixed with "\" + (0, b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00"), + (1, b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\x01"), + ( + 68, + b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00D", + ), # bytes that encode to alphanum are not escaped + (570, b"\\x00\\x00\\x00\\x00\\x00\\x00\\\x02\\\x3a"), + (2852126720, b"\\x00\\x00\\x00\\x00\xaa\\x00\\x00\\x00"), + (-1, b"\xff\xff\xff\xff\xff\xff\xff\xff"), + (-1096642724096, b"\xff\xff\xff\\x00\xaa\xff\xff\\x00"), + ], +) +def test_literal_value_filter_w_int(value, expected_byte_string): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter = LiteralValueFilter(value) + # test pb + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(value_regex_filter=expected_byte_string) + assert pb_val == expected_pb + # test dict + expected_dict = {"value_regex_filter": expected_byte_string} + assert row_filter._to_dict() == expected_dict + assert data_v2_pb2.RowFilter(**expected_dict) == pb_val + + +def test_literal_value_filter___repr__(): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + value = "value_regex" + row_filter = LiteralValueFilter(value) + expected = "LiteralValueFilter(value=b'value_regex')" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_value_range_filter_constructor_defaults(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_filter = ValueRangeFilter() + + assert row_filter.start_value is None + assert row_filter.end_value is None + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_value_range_filter_constructor_explicit(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + + row_filter = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + + assert row_filter.start_value is start_value + assert row_filter.end_value is end_value + assert row_filter.inclusive_start is inclusive_start + assert row_filter.inclusive_end is inclusive_end + + +def test_value_range_filter_constructor_w_int_values(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + import struct + + start_value = 1 + end_value = 10 + + row_filter = ValueRangeFilter(start_value=start_value, end_value=end_value) + + expected_start_value = struct.Struct(">q").pack(start_value) + expected_end_value = struct.Struct(">q").pack(end_value) + + assert row_filter.start_value == expected_start_value + assert row_filter.end_value == expected_end_value + assert row_filter.inclusive_start + assert row_filter.inclusive_end + + +def test_value_range_filter_constructor_bad_start(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + with pytest.raises(ValueError): + ValueRangeFilter(inclusive_start=True) + + +def test_value_range_filter_constructor_bad_end(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + with pytest.raises(ValueError): + ValueRangeFilter(inclusive_end=True) + + +def test_value_range_filter___eq__(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 == row_filter2 + + +def test_value_range_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_filter1 = ValueRangeFilter() + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_value_range_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = object() + other_start_value = object() + end_value = object() + inclusive_start = object() + inclusive_end = object() + row_filter1 = ValueRangeFilter( + start_value=start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + row_filter2 = ValueRangeFilter( + start_value=other_start_value, + end_value=end_value, + inclusive_start=inclusive_start, + inclusive_end=inclusive_end, + ) + assert row_filter1 != row_filter2 + + +def test_value_range_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_filter = ValueRangeFilter() + expected_pb = _RowFilterPB(value_range_filter=_ValueRangePB()) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter = ValueRangeFilter() + expected_dict = {"value_range_filter": {}} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_value_range_filter_to_pb_inclusive_start(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(start_value=value) + val_range_pb = _ValueRangePB(start_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_pb_exclusive_start(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(start_value=value, inclusive_start=False) + val_range_pb = _ValueRangePB(start_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_pb_inclusive_end(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(end_value=value) + val_range_pb = _ValueRangePB(end_value_closed=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter_to_pb_exclusive_end(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + value = b"some-value" + row_filter = ValueRangeFilter(end_value=value, inclusive_end=False) + val_range_pb = _ValueRangePB(end_value_open=value) + expected_pb = _RowFilterPB(value_range_filter=val_range_pb) + assert row_filter._to_pb() == expected_pb + + +def test_value_range_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + start_value = b"some-value" + end_value = b"some-other-value" + row_filter = ValueRangeFilter( + start_value=start_value, end_value=end_value, inclusive_end=False + ) + expected = "ValueRangeFilter(start_value=b'some-value', end_value=b'some-other-value', inclusive_start=True, inclusive_end=False)" + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_cell_count_constructor(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter = FilerType(num_cells) + assert row_filter.num_cells is num_cells + + +def test_cell_count___eq__type_differ(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter1 = FilerType(num_cells) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_cell_count___eq__same_value(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter1 = FilerType(num_cells) + row_filter2 = FilerType(num_cells) + assert row_filter1 == row_filter2 + + +def test_cell_count___ne__same_value(): + for FilerType in _get_cell_count_filters(): + num_cells = object() + row_filter1 = FilerType(num_cells) + row_filter2 = FilerType(num_cells) + assert not (row_filter1 != row_filter2) + + +def test_cells_row_offset_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + + num_cells = 76 + row_filter = CellsRowOffsetFilter(num_cells) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(cells_per_row_offset_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_row_offset_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + num_cells = 76 + row_filter = CellsRowOffsetFilter(num_cells) + expected_dict = {"cells_per_row_offset_filter": num_cells} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_cells_row_offset_filter___repr__(): + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + + num_cells = 76 + row_filter = CellsRowOffsetFilter(num_cells) + expected = "CellsRowOffsetFilter(num_cells={})".format(num_cells) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_cells_row_limit_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + + num_cells = 189 + row_filter = CellsRowLimitFilter(num_cells) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(cells_per_row_limit_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_row_limit_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + num_cells = 189 + row_filter = CellsRowLimitFilter(num_cells) + expected_dict = {"cells_per_row_limit_filter": num_cells} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_cells_row_limit_filter___repr__(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + + num_cells = 189 + row_filter = CellsRowLimitFilter(num_cells) + expected = "CellsRowLimitFilter(num_cells={})".format(num_cells) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_cells_column_limit_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter + + num_cells = 10 + row_filter = CellsColumnLimitFilter(num_cells) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(cells_per_column_limit_filter=num_cells) + assert pb_val == expected_pb + + +def test_cells_column_limit_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + num_cells = 10 + row_filter = CellsColumnLimitFilter(num_cells) + expected_dict = {"cells_per_column_limit_filter": num_cells} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_cells_column_limit_filter___repr__(): + from google.cloud.bigtable.data.row_filters import CellsColumnLimitFilter + + num_cells = 10 + row_filter = CellsColumnLimitFilter(num_cells) + expected = "CellsColumnLimitFilter(num_cells={})".format(num_cells) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_strip_value_transformer_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + flag = True + row_filter = StripValueTransformerFilter(flag) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(strip_value_transformer=flag) + assert pb_val == expected_pb + + +def test_strip_value_transformer_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + flag = True + row_filter = StripValueTransformerFilter(flag) + expected_dict = {"strip_value_transformer": flag} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_strip_value_transformer_filter___repr__(): + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + flag = True + row_filter = StripValueTransformerFilter(flag) + expected = "StripValueTransformerFilter(flag={})".format(flag) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_apply_label_filter_constructor(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + row_filter = ApplyLabelFilter(label) + assert row_filter.label is label + + +def test_apply_label_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_apply_label_filter___eq__same_value(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = ApplyLabelFilter(label) + assert row_filter1 == row_filter2 + + +def test_apply_label_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = object() + other_label = object() + row_filter1 = ApplyLabelFilter(label) + row_filter2 = ApplyLabelFilter(other_label) + assert row_filter1 != row_filter2 + + +def test_apply_label_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = "label" + row_filter = ApplyLabelFilter(label) + pb_val = row_filter._to_pb() + expected_pb = _RowFilterPB(apply_label_transformer=label) + assert pb_val == expected_pb + + +def test_apply_label_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + label = "label" + row_filter = ApplyLabelFilter(label) + expected_dict = {"apply_label_transformer": label} + assert row_filter._to_dict() == expected_dict + expected_pb_value = row_filter._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_apply_label_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + label = "label" + row_filter = ApplyLabelFilter(label) + expected = "ApplyLabelFilter(label={})".format(label) + assert repr(row_filter) == expected + assert repr(row_filter) == str(row_filter) + assert eval(repr(row_filter)) == row_filter + + +def test_filter_combination_constructor_defaults(): + for FilterType in _get_filter_combination_filters(): + row_filter = FilterType() + assert row_filter.filters == [] + + +def test_filter_combination_constructor_explicit(): + for FilterType in _get_filter_combination_filters(): + filters = object() + row_filter = FilterType(filters=filters) + assert row_filter.filters is filters + + +def test_filter_combination___eq__(): + for FilterType in _get_filter_combination_filters(): + filters = object() + row_filter1 = FilterType(filters=filters) + row_filter2 = FilterType(filters=filters) + assert row_filter1 == row_filter2 + + +def test_filter_combination___eq__type_differ(): + for FilterType in _get_filter_combination_filters(): + filters = object() + row_filter1 = FilterType(filters=filters) + row_filter2 = object() + assert not (row_filter1 == row_filter2) + + +def test_filter_combination___ne__(): + for FilterType in _get_filter_combination_filters(): + filters = object() + other_filters = object() + row_filter1 = FilterType(filters=filters) + row_filter2 = FilterType(filters=other_filters) + assert row_filter1 != row_filter2 + + +def test_filter_combination_len(): + for FilterType in _get_filter_combination_filters(): + filters = [object(), object()] + row_filter = FilterType(filters=filters) + assert len(row_filter) == len(filters) + + +def test_filter_combination_iter(): + for FilterType in _get_filter_combination_filters(): + filters = [object(), object()] + row_filter = FilterType(filters=filters) + assert list(iter(row_filter)) == filters + for filter_, expected in zip(row_filter, filters): + assert filter_ is expected + + +def test_filter_combination___getitem__(): + for FilterType in _get_filter_combination_filters(): + filters = [object(), object()] + row_filter = FilterType(filters=filters) + row_filter[0] is filters[0] + row_filter[1] is filters[1] + with pytest.raises(IndexError): + row_filter[2] + row_filter[:] is filters[:] + + +def test_filter_combination___str__(): + from google.cloud.bigtable.data.row_filters import PassAllFilter + + for FilterType in _get_filter_combination_filters(): + filters = [PassAllFilter(True), PassAllFilter(False)] + row_filter = FilterType(filters=filters) + expected = ( + "([\n PassAllFilter(flag=True),\n PassAllFilter(flag=False),\n])" + ) + assert expected in str(row_filter) + + +def test_row_filter_chain_to_pb(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB(filters=[row_filter1_pb, row_filter2_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_chain_to_dict(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + filter_dict = row_filter3._to_dict() + + expected_dict = {"chain": {"filters": [row_filter1_dict, row_filter2_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_chain_to_pb_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3._to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4._to_pb() + + row_filter5 = RowFilterChain(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5._to_pb() + + expected_pb = _RowFilterPB( + chain=_RowFilterChainPB(filters=[row_filter3_pb, row_filter4_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_chain_to_dict_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + row_filter3_dict = row_filter3._to_dict() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_dict = row_filter4._to_dict() + + row_filter5 = RowFilterChain(filters=[row_filter3, row_filter4]) + filter_dict = row_filter5._to_dict() + + expected_dict = {"chain": {"filters": [row_filter3_dict, row_filter4_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter5._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_chain___repr__(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + expected = f"RowFilterChain(filters={[row_filter1, row_filter2]})" + assert repr(row_filter3) == expected + assert eval(repr(row_filter3)) == row_filter3 + + +def test_row_filter_chain___str__(): + from google.cloud.bigtable.data.row_filters import RowFilterChain + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterChain(filters=[row_filter1, row_filter2]) + expected = "RowFilterChain([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n])" + assert str(row_filter3) == expected + # test nested + row_filter4 = RowFilterChain(filters=[row_filter3]) + expected = "RowFilterChain([\n RowFilterChain([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n ]),\n])" + assert str(row_filter4) == expected + + +def test_row_filter_union_to_pb(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB(filters=[row_filter1_pb, row_filter2_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_union_to_dict(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + filter_dict = row_filter3._to_dict() + + expected_dict = {"interleave": {"filters": [row_filter1_dict, row_filter2_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_union_to_pb_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + row_filter3_pb = row_filter3._to_pb() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_pb = row_filter4._to_pb() + + row_filter5 = RowFilterUnion(filters=[row_filter3, row_filter4]) + filter_pb = row_filter5._to_pb() + + expected_pb = _RowFilterPB( + interleave=_RowFilterInterleavePB(filters=[row_filter3_pb, row_filter4_pb]) + ) + assert filter_pb == expected_pb + + +def test_row_filter_union_to_dict_nested(): + from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + row_filter3_dict = row_filter3._to_dict() + + row_filter4 = CellsRowLimitFilter(11) + row_filter4_dict = row_filter4._to_dict() + + row_filter5 = RowFilterUnion(filters=[row_filter3, row_filter4]) + filter_dict = row_filter5._to_dict() + + expected_dict = {"interleave": {"filters": [row_filter3_dict, row_filter4_dict]}} + assert filter_dict == expected_dict + expected_pb_value = row_filter5._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_row_filter_union___repr__(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + expected = "RowFilterUnion(filters=[StripValueTransformerFilter(flag=True), RowSampleFilter(sample=0.25)])" + assert repr(row_filter3) == expected + assert eval(repr(row_filter3)) == row_filter3 + + +def test_row_filter_union___str__(): + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + + row_filter3 = RowFilterUnion(filters=[row_filter1, row_filter2]) + expected = "RowFilterUnion([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n])" + assert str(row_filter3) == expected + # test nested + row_filter4 = RowFilterUnion(filters=[row_filter3]) + expected = "RowFilterUnion([\n RowFilterUnion([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n ]),\n])" + assert str(row_filter4) == expected + + +def test_conditional_row_filter_constructor(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter.predicate_filter is predicate_filter + assert cond_filter.true_filter is true_filter + assert cond_filter.false_filter is false_filter + + +def test_conditional_row_filter___eq__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter1 == cond_filter2 + + +def test_conditional_row_filter___eq__type_differ(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = object() + assert not (cond_filter1 == cond_filter2) + + +def test_conditional_row_filter___ne__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + + predicate_filter = object() + other_predicate_filter = object() + true_filter = object() + false_filter = object() + cond_filter1 = ConditionalRowFilter( + predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + cond_filter2 = ConditionalRowFilter( + other_predicate_filter, true_filter=true_filter, false_filter=false_filter + ) + assert cond_filter1 != cond_filter2 + + +def test_conditional_row_filter_to_pb(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = CellsRowOffsetFilter(11) + row_filter3_pb = row_filter3._to_pb() + + row_filter4 = ConditionalRowFilter( + row_filter1, true_filter=row_filter2, false_filter=row_filter3 + ) + filter_pb = row_filter4._to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, + true_filter=row_filter2_pb, + false_filter=row_filter3_pb, + ) + ) + assert filter_pb == expected_pb + + +def test_conditional_row_filter_to_dict(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import CellsRowOffsetFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = CellsRowOffsetFilter(11) + row_filter3_dict = row_filter3._to_dict() + + row_filter4 = ConditionalRowFilter( + row_filter1, true_filter=row_filter2, false_filter=row_filter3 + ) + filter_dict = row_filter4._to_dict() + + expected_dict = { + "condition": { + "predicate_filter": row_filter1_dict, + "true_filter": row_filter2_dict, + "false_filter": row_filter3_dict, + } + } + assert filter_dict == expected_dict + expected_pb_value = row_filter4._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_conditional_row_filter_to_pb_true_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, true_filter=row_filter2_pb + ) + ) + assert filter_pb == expected_pb + + +def test_conditional_row_filter_to_dict_true_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + filter_dict = row_filter3._to_dict() + + expected_dict = { + "condition": { + "predicate_filter": row_filter1_dict, + "true_filter": row_filter2_dict, + } + } + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_conditional_row_filter_to_pb_false_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_pb = row_filter1._to_pb() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_pb = row_filter2._to_pb() + + row_filter3 = ConditionalRowFilter(row_filter1, false_filter=row_filter2) + filter_pb = row_filter3._to_pb() + + expected_pb = _RowFilterPB( + condition=_RowFilterConditionPB( + predicate_filter=row_filter1_pb, false_filter=row_filter2_pb + ) + ) + assert filter_pb == expected_pb + + +def test_conditional_row_filter_to_dict_false_only(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + row_filter1 = StripValueTransformerFilter(True) + row_filter1_dict = row_filter1._to_dict() + + row_filter2 = RowSampleFilter(0.25) + row_filter2_dict = row_filter2._to_dict() + + row_filter3 = ConditionalRowFilter(row_filter1, false_filter=row_filter2) + filter_dict = row_filter3._to_dict() + + expected_dict = { + "condition": { + "predicate_filter": row_filter1_dict, + "false_filter": row_filter2_dict, + } + } + assert filter_dict == expected_dict + expected_pb_value = row_filter3._to_pb() + assert data_v2_pb2.RowFilter(**expected_dict) == expected_pb_value + + +def test_conditional_row_filter___repr__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + expected = ( + "ConditionalRowFilter(predicate_filter=StripValueTransformerFilter(" + "flag=True), true_filter=RowSampleFilter(sample=0.25), false_filter=None)" + ) + assert repr(row_filter3) == expected + assert eval(repr(row_filter3)) == row_filter3 + # test nested + row_filter4 = ConditionalRowFilter(row_filter3, true_filter=row_filter2) + expected = "ConditionalRowFilter(predicate_filter=ConditionalRowFilter(predicate_filter=StripValueTransformerFilter(flag=True), true_filter=RowSampleFilter(sample=0.25), false_filter=None), true_filter=RowSampleFilter(sample=0.25), false_filter=None)" + assert repr(row_filter4) == expected + assert eval(repr(row_filter4)) == row_filter4 + + +def test_conditional_row_filter___str__(): + from google.cloud.bigtable.data.row_filters import ConditionalRowFilter + from google.cloud.bigtable.data.row_filters import RowSampleFilter + from google.cloud.bigtable.data.row_filters import RowFilterUnion + from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter + + row_filter1 = StripValueTransformerFilter(True) + row_filter2 = RowSampleFilter(0.25) + row_filter3 = ConditionalRowFilter(row_filter1, true_filter=row_filter2) + expected = "ConditionalRowFilter(\n predicate_filter=StripValueTransformerFilter(flag=True),\n true_filter=RowSampleFilter(sample=0.25),\n)" + assert str(row_filter3) == expected + # test nested + row_filter4 = ConditionalRowFilter( + row_filter3, + true_filter=row_filter2, + false_filter=RowFilterUnion([row_filter1, row_filter2]), + ) + expected = "ConditionalRowFilter(\n predicate_filter=ConditionalRowFilter(\n predicate_filter=StripValueTransformerFilter(flag=True),\n true_filter=RowSampleFilter(sample=0.25),\n ),\n true_filter=RowSampleFilter(sample=0.25),\n false_filter=RowFilterUnion([\n StripValueTransformerFilter(flag=True),\n RowSampleFilter(sample=0.25),\n ]),\n)" + assert str(row_filter4) == expected + + +@pytest.mark.parametrize( + "input_arg, expected_bytes", + [ + (b"abc", b"abc"), + ("abc", b"abc"), + (1, b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\x01"), # null bytes are ascii + (b"*", b"\\*"), + (".", b"\\."), + (b"\\", b"\\\\"), + (b"h.*i", b"h\\.\\*i"), + (b'""', b'\\"\\"'), + (b"[xyz]", b"\\[xyz\\]"), + (b"\xe2\x98\xba\xef\xb8\x8f", b"\xe2\x98\xba\xef\xb8\x8f"), + ("☃", b"\xe2\x98\x83"), + (r"\C☃", b"\\\\C\xe2\x98\x83"), + ], +) +def test_literal_value__write_literal_regex(input_arg, expected_bytes): + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + + filter_ = LiteralValueFilter(input_arg) + assert filter_.regex == expected_bytes + + +def _ColumnRangePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.ColumnRange(*args, **kw) + + +def _RowFilterPB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter(*args, **kw) + + +def _RowFilterChainPB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter.Chain(*args, **kw) + + +def _RowFilterConditionPB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter.Condition(*args, **kw) + + +def _RowFilterInterleavePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.RowFilter.Interleave(*args, **kw) + + +def _TimestampRangePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.TimestampRange(*args, **kw) + + +def _ValueRangePB(*args, **kw): + from google.cloud.bigtable_v2.types import data as data_v2_pb2 + + return data_v2_pb2.ValueRange(*args, **kw) + + +def _get_regex_filters(): + from google.cloud.bigtable.data.row_filters import ( + RowKeyRegexFilter, + FamilyNameRegexFilter, + ColumnQualifierRegexFilter, + ValueRegexFilter, + LiteralValueFilter, + ) + + return [ + RowKeyRegexFilter, + FamilyNameRegexFilter, + ColumnQualifierRegexFilter, + ValueRegexFilter, + LiteralValueFilter, + ] + + +def _get_bool_filters(): + from google.cloud.bigtable.data.row_filters import ( + SinkFilter, + PassAllFilter, + BlockAllFilter, + StripValueTransformerFilter, + ) + + return [ + SinkFilter, + PassAllFilter, + BlockAllFilter, + StripValueTransformerFilter, + ] + + +def _get_cell_count_filters(): + from google.cloud.bigtable.data.row_filters import ( + CellsRowLimitFilter, + CellsRowOffsetFilter, + CellsColumnLimitFilter, + ) + + return [ + CellsRowLimitFilter, + CellsRowOffsetFilter, + CellsColumnLimitFilter, + ] + + +def _get_filter_combination_filters(): + from google.cloud.bigtable.data.row_filters import ( + RowFilterChain, + RowFilterUnion, + ) + + return [ + RowFilterChain, + RowFilterUnion, + ] diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/__init__.py b/packages/google-cloud-bigtable/tests/unit/v2_client/__init__.py new file mode 100644 index 000000000000..e8e1c3845db5 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/_testing.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/read-rows-acceptance-test.json b/packages/google-cloud-bigtable/tests/unit/v2_client/read-rows-acceptance-test.json new file mode 100644 index 000000000000..011ace2b9aa7 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/read-rows-acceptance-test.json @@ -0,0 +1,1665 @@ +{ + "readRowsTests": [ + { + "description": "invalid - no commit", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - no cell key before commit", + "chunks": [ + { + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - no cell key before value", + "chunks": [ + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - new col family must specify qualifier", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "bare commit implies ts=0", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + }, + { + "description": "simple row with timestamp", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "missing timestamp, implied ts=0", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "value": "value-VAL" + } + ] + }, + { + "description": "empty cell value", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + }, + { + "description": "two unsplit cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "two qualifiers", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "two families", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "with labels", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "labels": [ + "L_1" + ], + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "labels": [ + "L_2" + ], + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1", + "label": "L_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2", + "label": "L_2" + } + ] + }, + { + "description": "split cell, bare commit", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUw=", + "commitRow": false + }, + { + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + }, + { + "description": "split cell", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUw=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "split four ways", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "bA==", + "valueSize": 9, + "commitRow": false + }, + { + "value": "dWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL", + "label": "L" + } + ] + }, + { + "description": "two split cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "multi-qualifier splits", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "multi-qualifier multi-split", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "bHVlLVZBTF8x", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "bHVlLVZBTF8y", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "multi-family split", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" + } + ] + }, + { + "description": "invalid - no commit between rows", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - no commit after first row", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - last row missing commit", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "error": true + } + ] + }, + { + "description": "invalid - duplicate row key", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMQ==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "error": true + } + ] + }, + { + "description": "invalid - new row missing row key", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "error": true + } + ] + }, + { + "description": "two rows", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "two rows implicit timestamp", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "value": "dmFsdWUtVkFM", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "value": "value-VAL" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "two rows empty value", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "two rows, one with multiple cells", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3" + } + ] + }, + { + "description": "two rows, multiple cells", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "qualifier": "Rg==", + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "97", + "value": "value-VAL_3" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "F", + "timestampMicros": "96", + "value": "value-VAL_4" + } + ] + }, + { + "description": "two rows, multiple cells, multiple families", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "familyName": "B", + "qualifier": "RQ==", + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "M", + "qualifier": "Tw==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "familyName": "N", + "qualifier": "UA==", + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_1", + "familyName": "B", + "qualifier": "E", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "M", + "qualifier": "O", + "timestampMicros": "97", + "value": "value-VAL_3" + }, + { + "rowKey": "RK_2", + "familyName": "N", + "qualifier": "P", + "timestampMicros": "96", + "value": "value-VAL_4" + } + ] + }, + { + "description": "two rows, four cells, 2 labels", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "99", + "labels": [ + "L_1" + ], + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "B", + "qualifier": "RA==", + "timestampMicros": "97", + "labels": [ + "L_3" + ], + "value": "dmFsdWUtVkFMXzM=", + "commitRow": false + }, + { + "timestampMicros": "96", + "value": "dmFsdWUtVkFMXzQ=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "99", + "value": "value-VAL_1", + "label": "L_1" + }, + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "98", + "value": "value-VAL_2" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3", + "label": "L_3" + }, + { + "rowKey": "RK_2", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "96", + "value": "value-VAL_4" + } + ] + }, + { + "description": "two rows with splits, same timestamp", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMQ==", + "commitRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dg==", + "valueSize": 11, + "commitRow": false + }, + { + "value": "YWx1ZS1WQUxfMg==", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_1" + }, + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "invalid - bare reset", + "chunks": [ + { + "resetRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - bad reset, no commit", + "chunks": [ + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - missing key after reset", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + }, + { + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "no data after reset", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + } + ] + }, + { + "description": "simple reset", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + } + ] + }, + { + "description": "reset to new val", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "reset to new qual", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "RA==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "D", + "timestampMicros": "100", + "value": "value-VAL_1" + } + ] + }, + { + "description": "reset with splits", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "timestampMicros": "98", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "reset two cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "97", + "value": "value-VAL_3" + } + ] + }, + { + "description": "two resets", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_3" + } + ] + }, + { + "description": "reset then two cells", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "Uks=", + "familyName": "B", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": false + }, + { + "qualifier": "RA==", + "timestampMicros": "97", + "value": "dmFsdWUtVkFMXzM=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + }, + { + "rowKey": "RK", + "familyName": "B", + "qualifier": "D", + "timestampMicros": "97", + "value": "value-VAL_3" + } + ] + }, + { + "description": "reset to new row", + "chunks": [ + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "UktfMg==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzI=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_2", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_2" + } + ] + }, + { + "description": "reset in between chunks", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "commitRow": false + }, + { + "resetRow": true + }, + { + "rowKey": "UktfMQ==", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFMXzE=", + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK_1", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL_1" + } + ] + }, + { + "description": "invalid - reset with chunk", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "resetRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "invalid - commit with chunk", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "labels": [ + "L" + ], + "value": "dg==", + "valueSize": 10, + "commitRow": false + }, + { + "value": "YQ==", + "valueSize": 10, + "commitRow": true + } + ], + "results": [ + { + "error": true + } + ] + }, + { + "description": "empty cell chunk", + "chunks": [ + { + "rowKey": "Uks=", + "familyName": "A", + "qualifier": "Qw==", + "timestampMicros": "100", + "value": "dmFsdWUtVkFM", + "commitRow": false + }, + { + "commitRow": false + }, + { + "commitRow": true + } + ], + "results": [ + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C", + "timestampMicros": "100", + "value": "value-VAL" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + }, + { + "rowKey": "RK", + "familyName": "A", + "qualifier": "C" + } + ] + } + ] +} diff --git a/packages/google-cloud-bigtable/tests/unit/test_app_profile.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_app_profile.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_app_profile.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_app_profile.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_backup.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_backup.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_backup.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_backup.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_batcher.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_batcher.py similarity index 98% rename from packages/google-cloud-bigtable/tests/unit/test_batcher.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_batcher.py index 741d9f2825e8..fcf6069725fc 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_batcher.py @@ -198,7 +198,7 @@ def test_mutations_batcher_response_with_error_codes(): mocked_response = [Status(code=1), Status(code=5)] - with mock.patch("tests.unit.test_batcher._Table") as mocked_table: + with mock.patch("tests.unit.v2_client.test_batcher._Table") as mocked_table: table = mocked_table.return_value mutation_batcher = MutationsBatcher(table=table) diff --git a/packages/google-cloud-bigtable/tests/unit/test_client.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_client.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_cluster.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_cluster.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_cluster.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_cluster.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_column_family.py similarity index 99% rename from packages/google-cloud-bigtable/tests/unit/test_column_family.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_column_family.py index 80b05d7443bc..e4f74e26463b 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_column_family.py @@ -336,7 +336,7 @@ def _create_test_helper(gc_rule=None): from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_v2_pb2, ) - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( BigtableTableAdminClient, ) @@ -404,7 +404,7 @@ def test_column_family_create_with_gc_rule(): def _update_test_helper(gc_rule=None): - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_v2_pb2, ) @@ -478,7 +478,7 @@ def test_column_family_delete(): from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_v2_pb2, ) - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( BigtableTableAdminClient, ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_encryption_info.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_encryption_info.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_encryption_info.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_encryption_info.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_error.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_error.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_error.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_error.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_instance.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_instance.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_policy.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_policy.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_policy.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_policy.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_row.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_row.py similarity index 99% rename from packages/google-cloud-bigtable/tests/unit/test_row.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_row.py index 49bbfc45ce79..f04802f5cc07 100644 --- a/packages/google-cloud-bigtable/tests/unit/test_row.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_row.py @@ -480,7 +480,7 @@ def test_conditional_row_commit_too_many_mutations(): def test_conditional_row_commit_no_mutations(): - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub project_id = "project-id" row_key = b"row_key" @@ -607,7 +607,7 @@ def mock_parse_rmw_row_response(row_response): def test_append_row_commit_no_rules(): - from tests.unit._testing import _FakeStub + from ._testing import _FakeStub project_id = "project-id" row_key = b"row_key" diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_data.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_row_data.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_row_data.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_row_data.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_filters.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_row_filters.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_row_filters.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_row_filters.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_merger.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_row_merger.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_row_merger.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_row_merger.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_row_set.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_row_set.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_row_set.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_row_set.py diff --git a/packages/google-cloud-bigtable/tests/unit/test_table.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_table.py similarity index 100% rename from packages/google-cloud-bigtable/tests/unit/test_table.py rename to packages/google-cloud-bigtable/tests/unit/v2_client/test_table.py diff --git a/python-api-core b/python-api-core new file mode 160000 index 000000000000..17ff5f1d83a9 --- /dev/null +++ b/python-api-core @@ -0,0 +1 @@ +Subproject commit 17ff5f1d83a9a6f50a0226fb0e794634bd584f17 From 88fe624028d6e5d8d1de567388df29d4f58383f0 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 6 Feb 2024 11:17:36 -0800 Subject: [PATCH 775/892] build(deps): bump cryptography from 41.0.6 to 42.0.0 in /synthtool/gcp/templates/python_library/.kokoro (#933) Source-Link: https://github.com/googleapis/synthtool/commit/e13b22b1f660c80e4c3e735a9177d2f16c4b8bdc Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:97b671488ad548ef783a452a9e1276ac10f144d5ae56d98cc4bf77ba504082b4 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/requirements.txt | 57 +++++++++++-------- 2 files changed, 35 insertions(+), 26 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index d8a1bbca7179..2aefd0e91175 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5ea6d0ab82c956b50962f91d94e206d3921537ae5fe1549ec5326381d8905cfa -# created: 2024-01-15T16:32:08.142785673Z + digest: sha256:97b671488ad548ef783a452a9e1276ac10f144d5ae56d98cc4bf77ba504082b4 +# created: 2024-02-06T03:20:16.660474034Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index bb3d6ca38b14..8c11c9f3e9b6 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -93,30 +93,39 @@ colorlog==6.7.0 \ # via # gcp-docuploader # nox -cryptography==41.0.6 \ - --hash=sha256:068bc551698c234742c40049e46840843f3d98ad7ce265fd2bd4ec0d11306596 \ - --hash=sha256:0f27acb55a4e77b9be8d550d762b0513ef3fc658cd3eb15110ebbcbd626db12c \ - --hash=sha256:2132d5865eea673fe6712c2ed5fb4fa49dba10768bb4cc798345748380ee3660 \ - --hash=sha256:3288acccef021e3c3c10d58933f44e8602cf04dba96d9796d70d537bb2f4bbc4 \ - --hash=sha256:35f3f288e83c3f6f10752467c48919a7a94b7d88cc00b0668372a0d2ad4f8ead \ - --hash=sha256:398ae1fc711b5eb78e977daa3cbf47cec20f2c08c5da129b7a296055fbb22aed \ - --hash=sha256:422e3e31d63743855e43e5a6fcc8b4acab860f560f9321b0ee6269cc7ed70cc3 \ - --hash=sha256:48783b7e2bef51224020efb61b42704207dde583d7e371ef8fc2a5fb6c0aabc7 \ - --hash=sha256:4d03186af98b1c01a4eda396b137f29e4e3fb0173e30f885e27acec8823c1b09 \ - --hash=sha256:5daeb18e7886a358064a68dbcaf441c036cbdb7da52ae744e7b9207b04d3908c \ - --hash=sha256:60e746b11b937911dc70d164060d28d273e31853bb359e2b2033c9e93e6f3c43 \ - --hash=sha256:742ae5e9a2310e9dade7932f9576606836ed174da3c7d26bc3d3ab4bd49b9f65 \ - --hash=sha256:7e00fb556bda398b99b0da289ce7053639d33b572847181d6483ad89835115f6 \ - --hash=sha256:85abd057699b98fce40b41737afb234fef05c67e116f6f3650782c10862c43da \ - --hash=sha256:8efb2af8d4ba9dbc9c9dd8f04d19a7abb5b49eab1f3694e7b5a16a5fc2856f5c \ - --hash=sha256:ae236bb8760c1e55b7a39b6d4d32d2279bc6c7c8500b7d5a13b6fb9fc97be35b \ - --hash=sha256:afda76d84b053923c27ede5edc1ed7d53e3c9f475ebaf63c68e69f1403c405a8 \ - --hash=sha256:b27a7fd4229abef715e064269d98a7e2909ebf92eb6912a9603c7e14c181928c \ - --hash=sha256:b648fe2a45e426aaee684ddca2632f62ec4613ef362f4d681a9a6283d10e079d \ - --hash=sha256:c5a550dc7a3b50b116323e3d376241829fd326ac47bc195e04eb33a8170902a9 \ - --hash=sha256:da46e2b5df770070412c46f87bac0849b8d685c5f2679771de277a422c7d0b86 \ - --hash=sha256:f39812f70fc5c71a15aa3c97b2bbe213c3f2a460b79bd21c40d033bb34a9bf36 \ - --hash=sha256:ff369dd19e8fe0528b02e8df9f2aeb2479f89b1270d90f96a63500afe9af5cae +cryptography==42.0.0 \ + --hash=sha256:0a68bfcf57a6887818307600c3c0ebc3f62fbb6ccad2240aa21887cda1f8df1b \ + --hash=sha256:146e971e92a6dd042214b537a726c9750496128453146ab0ee8971a0299dc9bd \ + --hash=sha256:14e4b909373bc5bf1095311fa0f7fcabf2d1a160ca13f1e9e467be1ac4cbdf94 \ + --hash=sha256:206aaf42e031b93f86ad60f9f5d9da1b09164f25488238ac1dc488334eb5e221 \ + --hash=sha256:3005166a39b70c8b94455fdbe78d87a444da31ff70de3331cdec2c568cf25b7e \ + --hash=sha256:324721d93b998cb7367f1e6897370644751e5580ff9b370c0a50dc60a2003513 \ + --hash=sha256:33588310b5c886dfb87dba5f013b8d27df7ffd31dc753775342a1e5ab139e59d \ + --hash=sha256:35cf6ed4c38f054478a9df14f03c1169bb14bd98f0b1705751079b25e1cb58bc \ + --hash=sha256:3ca482ea80626048975360c8e62be3ceb0f11803180b73163acd24bf014133a0 \ + --hash=sha256:56ce0c106d5c3fec1038c3cca3d55ac320a5be1b44bf15116732d0bc716979a2 \ + --hash=sha256:5a217bca51f3b91971400890905a9323ad805838ca3fa1e202a01844f485ee87 \ + --hash=sha256:678cfa0d1e72ef41d48993a7be75a76b0725d29b820ff3cfd606a5b2b33fda01 \ + --hash=sha256:69fd009a325cad6fbfd5b04c711a4da563c6c4854fc4c9544bff3088387c77c0 \ + --hash=sha256:6cf9b76d6e93c62114bd19485e5cb003115c134cf9ce91f8ac924c44f8c8c3f4 \ + --hash=sha256:74f18a4c8ca04134d2052a140322002fef535c99cdbc2a6afc18a8024d5c9d5b \ + --hash=sha256:85f759ed59ffd1d0baad296e72780aa62ff8a71f94dc1ab340386a1207d0ea81 \ + --hash=sha256:87086eae86a700307b544625e3ba11cc600c3c0ef8ab97b0fda0705d6db3d4e3 \ + --hash=sha256:8814722cffcfd1fbd91edd9f3451b88a8f26a5fd41b28c1c9193949d1c689dc4 \ + --hash=sha256:8fedec73d590fd30c4e3f0d0f4bc961aeca8390c72f3eaa1a0874d180e868ddf \ + --hash=sha256:9515ea7f596c8092fdc9902627e51b23a75daa2c7815ed5aa8cf4f07469212ec \ + --hash=sha256:988b738f56c665366b1e4bfd9045c3efae89ee366ca3839cd5af53eaa1401bce \ + --hash=sha256:a2a8d873667e4fd2f34aedab02ba500b824692c6542e017075a2efc38f60a4c0 \ + --hash=sha256:bd7cf7a8d9f34cc67220f1195884151426ce616fdc8285df9054bfa10135925f \ + --hash=sha256:bdce70e562c69bb089523e75ef1d9625b7417c6297a76ac27b1b8b1eb51b7d0f \ + --hash=sha256:be14b31eb3a293fc6e6aa2807c8a3224c71426f7c4e3639ccf1a2f3ffd6df8c3 \ + --hash=sha256:be41b0c7366e5549265adf2145135dca107718fa44b6e418dc7499cfff6b4689 \ + --hash=sha256:c310767268d88803b653fffe6d6f2f17bb9d49ffceb8d70aed50ad45ea49ab08 \ + --hash=sha256:c58115384bdcfe9c7f644c72f10f6f42bed7cf59f7b52fe1bf7ae0a622b3a139 \ + --hash=sha256:c640b0ef54138fde761ec99a6c7dc4ce05e80420262c20fa239e694ca371d434 \ + --hash=sha256:ca20550bb590db16223eb9ccc5852335b48b8f597e2f6f0878bbfd9e7314eb17 \ + --hash=sha256:d97aae66b7de41cdf5b12087b5509e4e9805ed6f562406dfcf60e8481a9a28f8 \ + --hash=sha256:e9326ca78111e4c645f7e49cbce4ed2f3f85e17b61a563328c85a5208cf34440 # via # gcp-releasetool # secretstorage From d72ea4ca7860e31ea4c92dfb8bb2c2919d707566 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 6 Feb 2024 20:18:30 +0100 Subject: [PATCH 776/892] chore(deps): update all dependencies (#921) --- .../.github/workflows/conformance.yaml | 8 ++++---- .../.github/workflows/system_emulated.yml | 2 +- .../samples/beam/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/beam/requirements.txt | 2 +- .../samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/metricscaler/requirements.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 16 files changed, 19 insertions(+), 19 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml index 63023d162033..68545cbec2ec 100644 --- a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml +++ b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml @@ -30,18 +30,18 @@ jobs: fail-fast: false name: "${{ matrix.client-type }} Client / Python ${{ matrix.py-version }} / Test Tag ${{ matrix.test-version }}" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 name: "Checkout python-bigtable" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 name: "Checkout conformance tests" with: repository: googleapis/cloud-bigtable-clients-test ref: ${{ matrix.test-version }} path: cloud-bigtable-clients-test - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.py-version }} - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '>=1.20.2' - run: chmod +x .kokoro/conformance.sh diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index 7669901c94d4..fa5ef15af276 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v2.0.0 + uses: google-github-actions/setup-gcloud@v2.1.0 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 813fc8d2bd93..70b1371ae9fc 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.52.0 +apache-beam==2.53.0 google-cloud-bigtable==2.22.0 google-cloud-core==2.4.1 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index c0d4f70035bc..8b8270b6c5ba 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.4.4 +pytest==8.0.0 mock==5.1.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 38c355ce349e..be3b2b2223e5 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.22.0 -google-cloud-monitoring==2.18.0 +google-cloud-monitoring==2.19.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index cb87efc0ff71..8075a1ec560e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 43b02e724796..aaa563abc833 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.4.4 +pytest==8.0.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index aa143f59dfbe..b4d30f50557b 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.4 +pytest==8.0.0 google-cloud-testutils==1.4.0 From 54370f52331f5926a75ac94e688480c78e695f16 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 7 Feb 2024 11:26:24 -0600 Subject: [PATCH 777/892] chore(docs): add preview docstrings to v3 client (#926) --- packages/google-cloud-bigtable/README.rst | 29 ++++--- .../google/cloud/bigtable/data/README.rst | 11 +++ .../cloud/bigtable/data/_async/client.py | 86 +++++++++++++------ 3 files changed, 90 insertions(+), 36 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 5f7d5809d130..2bc151e957cd 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -20,6 +20,24 @@ Analytics, Maps, and Gmail. .. _Client Library Documentation: https://googleapis.dev/python/bigtable/latest .. _Product Documentation: https://cloud.google.com/bigtable/docs + +Preview Async Data Client +------------------------- + +:code:`v2.23.0` includes a preview release of the new :code:`BigtableDataClientAsync` client, accessible at the import path +:code:`google.cloud.bigtable.data`. + +The new client brings a simplified API and increased performance using asyncio, with a corresponding synchronous surface +coming soon. The new client is focused on the data API (i.e. reading and writing Bigtable data), with admin operations +remaining in the existing client. + +:code:`BigtableDataClientAsync` is currently in preview, and is not recommended for production use. + +Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, +or through the Github `issue tracker`_. + +.. _issue tracker: https://github.com/googleapis/python-bigtable/issues + Quick Start ----------- @@ -94,14 +112,3 @@ Next Steps to see other available methods on the client. - Read the `Product documentation`_ to learn more about the product and see How-to Guides. - -``google-cloud-happybase`` --------------------------- - -In addition to the core ``google-cloud-bigtable``, we provide a -`google-cloud-happybase -`__ library -with the same interface as the popular `HappyBase -`__ library. Unlike HappyBase, -``google-cloud-happybase`` uses ``google-cloud-bigtable`` under the covers, -rather than Apache HBase. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst b/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst new file mode 100644 index 000000000000..7a05cf913f65 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst @@ -0,0 +1,11 @@ +Async Data Client Preview +========================= + +This new client is currently in preview, and is not recommended for production use. + +Synchronous API surface and usage examples coming soon + +Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, +or through the Github `issue tracker`_. + +.. _issue tracker: https://github.com/googleapis/python-bigtable/issues diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index da54b37cb29c..ed14c618d836 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -101,6 +101,9 @@ def __init__( Client should be created within an async context (running event loop) + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: project: the project which the client acts on behalf of. If not passed, falls back to the default inferred @@ -563,6 +566,9 @@ async def read_rows_stream( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - query: contains details about which rows to return - operation_timeout: the time budget for the entire operation, in seconds. @@ -614,6 +620,9 @@ async def read_rows( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - query: contains details about which rows to return - operation_timeout: the time budget for the entire operation, in seconds. @@ -660,6 +669,9 @@ async def read_row( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - query: contains details about which rows to return - operation_timeout: the time budget for the entire operation, in seconds. @@ -715,6 +727,9 @@ async def read_rows_sharded( results = await table.read_rows_sharded(shard_queries) ``` + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - sharded_query: a sharded query to execute - operation_timeout: the time budget for the entire operation, in seconds. @@ -795,6 +810,9 @@ async def row_exists( Return a boolean indicating whether the specified row exists in the table. uses the filters: chain(limit cells per row = 1, strip value) + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - row_key: the key of the row to check - operation_timeout: the time budget for the entire operation, in seconds. @@ -849,6 +867,9 @@ async def sample_row_keys( RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of row_keys, along with offset positions in the table + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget.i @@ -921,6 +942,9 @@ def mutations_batcher( Can be used to iteratively add mutations that are flushed as a group, to avoid excess network calls + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - flush_interval: Automatically flush every flush_interval seconds. If None, a table default will be used @@ -962,35 +986,38 @@ async def mutate_row( | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, ): """ - Mutates a row atomically. + Mutates a row atomically. - Cells already present in the row are left unchanged unless explicitly changed - by ``mutation``. + Cells already present in the row are left unchanged unless explicitly changed + by ``mutation``. - Idempotent operations (i.e, all mutations have an explicit timestamp) will be - retried on server failure. Non-idempotent operations will not. + Idempotent operations (i.e, all mutations have an explicit timestamp) will be + retried on server failure. Non-idempotent operations will not. - Args: - - row_key: the row to apply mutations to - - mutations: the set of mutations to apply to the row - - operation_timeout: the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - Defaults to the Table's default_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. - If it takes longer than this time to complete, the request will be cancelled with - a DeadlineExceeded exception, and a retry will be attempted. - Defaults to the Table's default_attempt_timeout. - If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. - Only idempotent mutations will be retried. Defaults to the Table's - default_retryable_errors. + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + + Args: + - row_key: the row to apply mutations to + - mutations: the set of mutations to apply to the row + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_operation_timeout + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Only idempotent mutations will be retried. Defaults to the Table's + default_retryable_errors. Raises: - - DeadlineExceeded: raised after operation timeout - will be chained with a RetryExceptionGroup containing all - GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised on non-idempotent operations that cannot be - safely retried. - - ValueError if invalid arguments are provided + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing all + GoogleAPIError exceptions from any retries that failed + - GoogleAPIError: raised on non-idempotent operations that cannot be + safely retried. + - ValueError if invalid arguments are provided """ operation_timeout, attempt_timeout = _get_timeouts( operation_timeout, attempt_timeout, self @@ -1050,6 +1077,9 @@ async def bulk_mutate_rows( will be retried on failure. Non-idempotent will not, and will reported in a raised exception group + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - mutation_entries: the batches of mutations to apply Each entry will be applied atomically, but entries will be applied @@ -1098,6 +1128,9 @@ async def check_and_mutate_row( Non-idempotent operation: will not be retried + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - row_key: the key of the row to mutate - predicate: the filter to be applied to the contents of the specified row. @@ -1166,6 +1199,9 @@ async def read_modify_write_row( Non-idempotent operation: will not be retried + Warning: BigtableDataClientAsync is currently in preview, and is not + yet recommended for production use. + Args: - row_key: the key of the row to apply read/modify/write rules to - rules: A rule or set of rules to apply to the row. From 1138a6f6969d9373627a4364c262ff29111a882a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 11:22:06 -0800 Subject: [PATCH 778/892] fix: fix `ValueError` in `test__validate_universe_domain` (#929) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Allow users to explicitly configure universe domain chore: Update gapic-generator-python to v1.14.0 PiperOrigin-RevId: 603108274 Source-Link: https://github.com/googleapis/googleapis/commit/3d83e3652f689ab51c3f95f876458c6faef619bf Source-Link: https://github.com/googleapis/googleapis-gen/commit/baf5e9bbb14a768b2b4c9eae9feb78f18f1757fa Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYmFmNWU5YmJiMTRhNzY4YjJiNGM5ZWFlOWZlYjc4ZjE4ZjE3NTdmYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: Resolve AttributeError 'Credentials' object has no attribute 'universe_domain' fix: Add google-auth as a direct dependency fix: Add staticmethod decorator to methods added in v1.14.0 chore: Update gapic-generator-python to v1.14.1 PiperOrigin-RevId: 603728206 Source-Link: https://github.com/googleapis/googleapis/commit/9063da8b4d45339db4e2d7d92a27c6708620e694 Source-Link: https://github.com/googleapis/googleapis-gen/commit/891c67d0a855b08085eb301dabb14064ef4b2c6d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiODkxYzY3ZDBhODU1YjA4MDg1ZWIzMDFkYWJiMTQwNjRlZjRiMmM2ZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: fix `ValueError` in `test__validate_universe_domain` PiperOrigin-RevId: 604699565 Source-Link: https://github.com/googleapis/googleapis/commit/cd3eabf5968bbc91685e2eae8efb099e4d55bb5c Source-Link: https://github.com/googleapis/googleapis-gen/commit/01f69ba7a13d59e6f45e243359b91a6e896221f8 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDFmNjliYTdhMTNkNTllNmY0NWUyNDMzNTliOTFhNmU4OTYyMjFmOCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 129 ++++- .../bigtable_instance_admin/client.py | 355 +++++++++++-- .../transports/base.py | 6 +- .../transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../transports/rest.py | 6 +- .../bigtable_table_admin/async_client.py | 141 ++++- .../services/bigtable_table_admin/client.py | 365 +++++++++++-- .../bigtable_table_admin/transports/base.py | 6 +- .../bigtable_table_admin/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../bigtable_table_admin/transports/rest.py | 6 +- .../services/bigtable/async_client.py | 89 +++- .../bigtable_v2/services/bigtable/client.py | 310 +++++++++-- .../services/bigtable/transports/base.py | 6 +- .../services/bigtable/transports/grpc.py | 2 +- .../bigtable/transports/grpc_asyncio.py | 2 +- .../services/bigtable/transports/rest.py | 6 +- .../test_bigtable_instance_admin.py | 499 +++++++++++++++++- .../test_bigtable_table_admin.py | 495 +++++++++++++++-- .../unit/gapic/bigtable_v2/test_bigtable.py | 434 ++++++++++++++- 21 files changed, 2626 insertions(+), 239 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index e4c4639af412..ab14ddaedc73 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -38,9 +38,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -67,8 +67,12 @@ class BigtableInstanceAdminAsyncClient: _client: BigtableInstanceAdminClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableInstanceAdminClient._DEFAULT_UNIVERSE app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) parse_app_profile_path = staticmethod( @@ -193,6 +197,25 @@ def transport(self) -> BigtableInstanceAdminTransport: """ return self._client.transport + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + get_transport_class = functools.partial( type(BigtableInstanceAdminClient).get_transport_class, type(BigtableInstanceAdminClient), @@ -206,7 +229,7 @@ def __init__( client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable instance admin client. + """Instantiates the bigtable instance admin async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -217,23 +240,38 @@ def __init__( transport (Union[str, ~.BigtableInstanceAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -360,6 +398,9 @@ async def create_instance( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -460,6 +501,9 @@ async def get_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -549,6 +593,9 @@ async def list_instances( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -623,6 +670,9 @@ async def update_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -731,6 +781,9 @@ async def partial_update_instance( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -812,6 +865,9 @@ async def delete_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -920,6 +976,9 @@ async def create_cluster( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1019,6 +1078,9 @@ async def get_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1110,6 +1172,9 @@ async def list_clusters( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1184,6 +1249,9 @@ async def update_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1299,6 +1367,9 @@ async def partial_update_cluster( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1380,6 +1451,9 @@ async def delete_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1479,6 +1553,9 @@ async def create_app_profile( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1569,6 +1646,9 @@ async def get_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1664,6 +1744,9 @@ async def list_app_profiles( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1776,6 +1859,9 @@ async def update_app_profile( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1857,6 +1943,9 @@ async def delete_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1973,6 +2062,9 @@ async def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2081,6 +2173,9 @@ async def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2180,6 +2275,9 @@ async def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2273,6 +2371,9 @@ async def list_hot_tablets( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 52c61ea4f8d3..4c2c2998e3d4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -28,6 +28,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_admin_v2 import gapic_version as package_version @@ -42,9 +43,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -137,11 +138,15 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -403,7 +408,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -433,6 +438,11 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") @@ -466,6 +476,180 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or BigtableInstanceAdminClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, @@ -485,22 +669,32 @@ def __init__( transport (Union[str, BigtableInstanceAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -511,17 +705,34 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableInstanceAdminClient._read_environment_variables() + self._client_cert_source = BigtableInstanceAdminClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert ) + self._universe_domain = BigtableInstanceAdminClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False - api_key_value = getattr(client_options, "api_key", None) + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -530,20 +741,33 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableInstanceAdminTransport): + transport_provided = isinstance(transport, BigtableInstanceAdminTransport) + if transport_provided: # transport is a BigtableInstanceAdminTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableInstanceAdminTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or BigtableInstanceAdminClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -553,17 +777,17 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) + Transport = type(self).get_transport_class(cast(str, transport)) self._transport = Transport( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) def create_instance( @@ -680,6 +904,9 @@ def create_instance( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -770,6 +997,9 @@ def get_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -849,6 +1079,9 @@ def list_instances( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -914,6 +1147,9 @@ def update_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1014,6 +1250,9 @@ def partial_update_instance( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1095,6 +1334,9 @@ def delete_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1203,6 +1445,9 @@ def create_cluster( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1292,6 +1537,9 @@ def get_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1373,6 +1621,9 @@ def list_clusters( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1438,6 +1689,9 @@ def update_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1553,6 +1807,9 @@ def partial_update_cluster( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1634,6 +1891,9 @@ def delete_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1733,6 +1993,9 @@ def create_app_profile( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1813,6 +2076,9 @@ def get_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1898,6 +2164,9 @@ def list_app_profiles( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2000,6 +2269,9 @@ def update_app_profile( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2081,6 +2353,9 @@ def delete_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -2184,6 +2459,9 @@ def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2289,6 +2567,9 @@ def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2376,6 +2657,9 @@ def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2459,6 +2743,9 @@ def list_hot_tablets( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index d92d2545300d..aeb07556cce4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -71,7 +71,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -134,6 +134,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index eca37957dbbb..c47db6ba5ed5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -73,7 +73,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 145aa427d852..cbd77b381f23 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -118,7 +118,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 9d5502b7eba8..61f425953168 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -35,9 +35,9 @@ import warnings try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin @@ -734,7 +734,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 5a4435bde9fb..124b3ef097ef 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -38,9 +38,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -67,8 +67,12 @@ class BigtableTableAdminAsyncClient: _client: BigtableTableAdminClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableTableAdminClient._DEFAULT_UNIVERSE backup_path = staticmethod(BigtableTableAdminClient.backup_path) parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) @@ -189,6 +193,25 @@ def transport(self) -> BigtableTableAdminTransport: """ return self._client.transport + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + get_transport_class = functools.partial( type(BigtableTableAdminClient).get_transport_class, type(BigtableTableAdminClient), @@ -202,7 +225,7 @@ def __init__( client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable table admin client. + """Instantiates the bigtable table admin async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -213,23 +236,38 @@ def __init__( transport (Union[str, ~.BigtableTableAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -331,6 +369,9 @@ async def create_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -452,6 +493,9 @@ async def create_table_from_snapshot( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -550,6 +594,9 @@ async def list_tables( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -648,6 +695,9 @@ async def get_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -751,6 +801,9 @@ async def update_table( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -831,6 +884,9 @@ async def delete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -911,6 +967,9 @@ async def undelete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1023,6 +1082,9 @@ async def modify_column_families( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1074,6 +1136,9 @@ async def drop_row_range( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1164,6 +1229,9 @@ async def generate_consistency_token( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1266,6 +1334,9 @@ async def check_consistency( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1403,6 +1474,9 @@ async def snapshot_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1523,6 +1597,9 @@ async def get_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1639,6 +1716,9 @@ async def list_snapshots( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1735,6 +1815,9 @@ async def delete_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1844,6 +1927,9 @@ async def create_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1937,6 +2023,9 @@ async def get_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2032,6 +2121,9 @@ async def update_backup( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2103,6 +2195,9 @@ async def delete_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -2194,6 +2289,9 @@ async def list_backups( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2267,6 +2365,9 @@ async def restore_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2402,6 +2503,9 @@ async def copy_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2529,6 +2633,9 @@ async def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2637,6 +2744,9 @@ async def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2736,6 +2846,9 @@ async def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index d0c04ed11416..09a67e696069 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -28,6 +28,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_admin_v2 import gapic_version as package_version @@ -42,9 +43,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -137,11 +138,15 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -405,7 +410,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -435,6 +440,11 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") @@ -468,6 +478,178 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableTableAdminClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or BigtableTableAdminClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, @@ -487,22 +669,32 @@ def __init__( transport (Union[str, BigtableTableAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -513,17 +705,34 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableTableAdminClient._read_environment_variables() + self._client_cert_source = BigtableTableAdminClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = BigtableTableAdminClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env ) + self._api_endpoint = None # updated below, depending on `transport` - api_key_value = getattr(client_options, "api_key", None) + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -532,20 +741,33 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableTableAdminTransport): + transport_provided = isinstance(transport, BigtableTableAdminTransport) + if transport_provided: # transport is a BigtableTableAdminTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableTableAdminTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or BigtableTableAdminClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -555,17 +777,17 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) + Transport = type(self).get_transport_class(cast(str, transport)) self._transport = Transport( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) def create_table( @@ -658,6 +880,9 @@ def create_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -781,6 +1006,9 @@ def create_table_from_snapshot( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -869,6 +1097,9 @@ def list_tables( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -957,6 +1188,9 @@ def get_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1060,6 +1294,9 @@ def update_table( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1140,6 +1377,9 @@ def delete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1220,6 +1460,9 @@ def undelete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1332,6 +1575,9 @@ def modify_column_families( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1384,6 +1630,9 @@ def drop_row_range( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1468,6 +1717,9 @@ def generate_consistency_token( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1560,6 +1812,9 @@ def check_consistency( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1697,6 +1952,9 @@ def snapshot_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1807,6 +2065,9 @@ def get_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1913,6 +2174,9 @@ def list_snapshots( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2009,6 +2273,9 @@ def delete_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -2118,6 +2385,9 @@ def create_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2201,6 +2471,9 @@ def get_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2296,6 +2569,9 @@ def update_backup( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2367,6 +2643,9 @@ def delete_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -2448,6 +2727,9 @@ def list_backups( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2522,6 +2804,9 @@ def restore_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2657,6 +2942,9 @@ def copy_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2771,6 +3059,9 @@ def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2876,6 +3167,9 @@ def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2963,6 +3257,9 @@ def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index c3cf01a96eae..e0313a9467f0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -71,7 +71,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -134,6 +134,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index d765869cd7f6..b0c33eca9e46 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -75,7 +75,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index b60a7351c7df..3ae66f84f446 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -120,7 +120,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 41b893eb7780..ad171d8f361f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -35,9 +35,9 @@ import warnings try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin @@ -831,7 +831,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index df5d7e0de495..0421e19bcd2f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -60,8 +60,12 @@ class BigtableAsyncClient: _client: BigtableClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableClient._DEFAULT_UNIVERSE instance_path = staticmethod(BigtableClient.instance_path) parse_instance_path = staticmethod(BigtableClient.parse_instance_path) @@ -162,6 +166,25 @@ def transport(self) -> BigtableTransport: """ return self._client.transport + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + get_transport_class = functools.partial( type(BigtableClient).get_transport_class, type(BigtableClient) ) @@ -174,7 +197,7 @@ def __init__( client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable client. + """Instantiates the bigtable async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -185,23 +208,38 @@ def __init__( transport (Union[str, ~.BigtableTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -296,6 +334,9 @@ def read_rows( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -389,6 +430,9 @@ def sample_row_keys( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -504,6 +548,9 @@ async def mutate_row( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -613,6 +660,9 @@ def mutate_rows( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -766,6 +816,9 @@ async def check_and_mutate_row( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -857,6 +910,9 @@ async def ping_and_warm( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -979,6 +1035,9 @@ async def read_modify_write_row( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1086,6 +1145,9 @@ def generate_initial_change_stream_partitions( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1185,6 +1247,9 @@ def read_change_stream( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 54ba6af4353b..f53f25e90884 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -29,6 +29,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_v2 import gapic_version as package_version @@ -128,11 +129,15 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtable.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtable.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -300,7 +305,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -330,6 +335,11 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") @@ -363,6 +373,178 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = BigtableClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or BigtableClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, @@ -382,22 +564,32 @@ def __init__( transport (Union[str, BigtableTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -408,17 +600,34 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableClient._read_environment_variables() + self._client_cert_source = BigtableClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = BigtableClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env ) + self._api_endpoint = None # updated below, depending on `transport` - api_key_value = getattr(client_options, "api_key", None) + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -427,20 +636,30 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableTransport): + transport_provided = isinstance(transport, BigtableTransport) + if transport_provided: # transport is a BigtableTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = self._api_endpoint or BigtableClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -450,17 +669,17 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) + Transport = type(self).get_transport_class(cast(str, transport)) self._transport = Transport( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) def read_rows( @@ -557,6 +776,9 @@ def read_rows( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -661,6 +883,9 @@ def sample_row_keys( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -786,6 +1011,9 @@ def mutate_row( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -905,6 +1133,9 @@ def mutate_rows( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1068,6 +1299,9 @@ def check_and_mutate_row( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1169,6 +1403,9 @@ def ping_and_warm( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1301,6 +1538,9 @@ def read_modify_write_row( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1407,6 +1647,9 @@ def generate_initial_change_stream_partitions( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1505,6 +1748,9 @@ def read_change_stream( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index b580bbca77c6..7d1475eb9fbc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -64,7 +64,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -127,6 +127,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 8ba04e761db0..bec9c85f110a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -65,7 +65,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 1d0a2bc4cf3f..7765ecce81ff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -112,7 +112,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 31d230f94fe4..17b47cb1cde7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -34,9 +34,9 @@ import warnings try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_v2.types import bigtable @@ -386,7 +386,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index ddbf0032f531..7a24cab5422c 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -29,6 +29,7 @@ import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -86,6 +87,17 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -116,6 +128,298 @@ def test__get_default_mtls_endpoint(): ) +def test__read_environment_variables(): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + BigtableInstanceAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableInstanceAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableInstanceAdminClient._get_client_cert_source(None, False) is None + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableInstanceAdminClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableInstanceAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableInstanceAdminClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + BigtableInstanceAdminClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableInstanceAdminClient._get_universe_domain(None, None) + == BigtableInstanceAdminClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableInstanceAdminClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminRestTransport, + "rest", + ), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -239,13 +543,13 @@ def test_bigtable_instance_admin_client_get_transport_class(): ) @mock.patch.object( BigtableInstanceAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), ) @mock.patch.object( BigtableInstanceAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), ) def test_bigtable_instance_admin_client_client_options( client_class, transport_class, transport_name @@ -287,7 +591,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -317,15 +623,23 @@ def test_bigtable_instance_admin_client_client_options( # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -335,7 +649,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -353,7 +669,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -406,13 +724,13 @@ def test_bigtable_instance_admin_client_client_options( ) @mock.patch.object( BigtableInstanceAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), ) @mock.patch.object( BigtableInstanceAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_instance_admin_client_mtls_env_auto( @@ -435,7 +753,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -467,7 +787,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -501,7 +823,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -591,6 +915,115 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient] +) +@mock.patch.object( + BigtableInstanceAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), +) +def test_bigtable_instance_admin_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -625,7 +1058,9 @@ def test_bigtable_instance_admin_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -670,7 +1105,9 @@ def test_bigtable_instance_admin_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -730,7 +1167,9 @@ def test_bigtable_instance_admin_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -4261,7 +4700,7 @@ async def test_list_app_profiles_flattened_error_async(): def test_list_app_profiles_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4313,7 +4752,7 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): def test_list_app_profiles_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4357,7 +4796,7 @@ def test_list_app_profiles_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_app_profiles_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4409,7 +4848,7 @@ async def test_list_app_profiles_async_pager(): @pytest.mark.asyncio async def test_list_app_profiles_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5953,7 +6392,7 @@ async def test_list_hot_tablets_flattened_error_async(): def test_list_hot_tablets_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -6003,7 +6442,7 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): def test_list_hot_tablets_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -6045,7 +6484,7 @@ def test_list_hot_tablets_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_hot_tablets_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6095,7 +6534,7 @@ async def test_list_hot_tablets_async_pager(): @pytest.mark.asyncio async def test_list_hot_tablets_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -12348,7 +12787,7 @@ def test_credentials_transport_error(): ) # It is an error to provide an api_key and a credential. - options = mock.Mock() + options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = BigtableInstanceAdminClient( @@ -13381,7 +13820,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index b29dc5106c7a..b52ad06065a4 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -29,6 +29,7 @@ import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -88,6 +89,17 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -118,6 +130,286 @@ def test__get_default_mtls_endpoint(): ) +def test__read_environment_variables(): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + BigtableTableAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableTableAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableTableAdminClient._get_client_cert_source(None, False) is None + assert ( + BigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + BigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableTableAdminClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableTableAdminClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableTableAdminClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableTableAdminClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableTableAdminClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + BigtableTableAdminClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableTableAdminClient._get_universe_domain(None, None) + == BigtableTableAdminClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableTableAdminClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -233,13 +525,13 @@ def test_bigtable_table_admin_client_get_transport_class(): ) @mock.patch.object( BigtableTableAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), ) @mock.patch.object( BigtableTableAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), ) def test_bigtable_table_admin_client_client_options( client_class, transport_class, transport_name @@ -281,7 +573,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -311,15 +605,23 @@ def test_bigtable_table_admin_client_client_options( # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -329,7 +631,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -347,7 +651,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -400,13 +706,13 @@ def test_bigtable_table_admin_client_client_options( ) @mock.patch.object( BigtableTableAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), ) @mock.patch.object( BigtableTableAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_table_admin_client_mtls_env_auto( @@ -429,7 +735,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -461,7 +769,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -495,7 +805,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -585,6 +897,115 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient] +) +@mock.patch.object( + BigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), +) +def test_bigtable_table_admin_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -611,7 +1032,9 @@ def test_bigtable_table_admin_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -656,7 +1079,9 @@ def test_bigtable_table_admin_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -716,7 +1141,9 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -1513,7 +1940,7 @@ async def test_list_tables_flattened_error_async(): def test_list_tables_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -1563,7 +1990,7 @@ def test_list_tables_pager(transport_name: str = "grpc"): def test_list_tables_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -1605,7 +2032,7 @@ def test_list_tables_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_tables_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1655,7 +2082,7 @@ async def test_list_tables_async_pager(): @pytest.mark.asyncio async def test_list_tables_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4281,7 +4708,7 @@ async def test_list_snapshots_flattened_error_async(): def test_list_snapshots_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4331,7 +4758,7 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): def test_list_snapshots_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4373,7 +4800,7 @@ def test_list_snapshots_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_snapshots_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4423,7 +4850,7 @@ async def test_list_snapshots_async_pager(): @pytest.mark.asyncio async def test_list_snapshots_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5892,7 +6319,7 @@ async def test_list_backups_flattened_error_async(): def test_list_backups_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -5942,7 +6369,7 @@ def test_list_backups_pager(transport_name: str = "grpc"): def test_list_backups_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -5984,7 +6411,7 @@ def test_list_backups_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_backups_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6034,7 +6461,7 @@ async def test_list_backups_async_pager(): @pytest.mark.asyncio async def test_list_backups_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -14596,7 +15023,7 @@ def test_credentials_transport_error(): ) # It is an error to provide an api_key and a credential. - options = mock.Mock() + options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = BigtableTableAdminClient( @@ -15641,7 +16068,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 2319306d722b..ab05af42621a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -29,6 +29,7 @@ import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -71,6 +72,17 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -95,6 +107,251 @@ def test__get_default_mtls_endpoint(): assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi +def test__read_environment_variables(): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableClient._read_environment_variables() == (True, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + BigtableClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableClient._read_environment_variables() == (False, "never", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableClient._read_environment_variables() == (False, "always", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableClient._get_client_cert_source(None, False) is None + assert ( + BigtableClient._get_client_cert_source(mock_provided_cert_source, False) is None + ) + assert ( + BigtableClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), +) +@mock.patch.object( + BigtableAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableClient._DEFAULT_UNIVERSE + default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "always") + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableClient._get_universe_domain(client_universe_domain, universe_domain_env) + == client_universe_domain + ) + assert ( + BigtableClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableClient._get_universe_domain(None, None) + == BigtableClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableClient, transports.BigtableRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -201,12 +458,14 @@ def test_bigtable_client_get_transport_class(): ], ) @mock.patch.object( - BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), ) @mock.patch.object( BigtableAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), ) def test_bigtable_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. @@ -246,7 +505,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -276,15 +537,23 @@ def test_bigtable_client_client_options(client_class, transport_class, transport # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -294,7 +563,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -312,7 +583,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -344,12 +617,14 @@ def test_bigtable_client_client_options(client_class, transport_class, transport ], ) @mock.patch.object( - BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), ) @mock.patch.object( BigtableAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_client_mtls_env_auto( @@ -372,7 +647,9 @@ def test_bigtable_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -404,7 +681,9 @@ def test_bigtable_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -438,7 +717,9 @@ def test_bigtable_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -524,6 +805,113 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient]) +@mock.patch.object( + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), +) +@mock.patch.object( + BigtableAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), +) +def test_bigtable_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableClient._DEFAULT_UNIVERSE + default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -546,7 +934,9 @@ def test_bigtable_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -581,7 +971,9 @@ def test_bigtable_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -634,7 +1026,9 @@ def test_bigtable_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -5639,7 +6033,7 @@ def test_credentials_transport_error(): ) # It is an error to provide an api_key and a credential. - options = mock.Mock() + options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = BigtableClient( @@ -6428,7 +6822,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, From ecb0a5eafa2d455e8dcc16e68680b169d663f2cf Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 13:28:16 -0800 Subject: [PATCH 779/892] chore(main): release 2.23.0 (#916) --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 13 +++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index a5ab48803e32..b94f3df9f05d 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.22.0" + ".": "2.23.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 5f86fdd88968..ea8a8525da5c 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,19 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.23.0](https://github.com/googleapis/python-bigtable/compare/v2.22.0...v2.23.0) (2024-02-07) + + +### Features + +* Add async data client preview ([7088e39](https://github.com/googleapis/python-bigtable/commit/7088e39c6bac10e5f830e8fa68e181412910ec5a)) +* Adding feature flags for routing cookie and retry info ([#905](https://github.com/googleapis/python-bigtable/issues/905)) ([1859e67](https://github.com/googleapis/python-bigtable/commit/1859e67961629663a8749eea849b5b005fcbc09f)) + + +### Bug Fixes + +* Fix `ValueError` in `test__validate_universe_domain` ([#929](https://github.com/googleapis/python-bigtable/issues/929)) ([aa76a5a](https://github.com/googleapis/python-bigtable/commit/aa76a5aaa349386d5972d96e1255389e30df8764)) + ## [2.22.0](https://github.com/googleapis/python-bigtable/compare/v2.21.0...v2.22.0) (2023-12-12) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 03d6d0200b82..f01e1d3a583c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.22.0" # {x-release-please-version} +__version__ = "2.23.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 03d6d0200b82..f01e1d3a583c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.22.0" # {x-release-please-version} +__version__ = "2.23.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 03d6d0200b82..f01e1d3a583c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.22.0" # {x-release-please-version} +__version__ = "2.23.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 03d6d0200b82..f01e1d3a583c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.22.0" # {x-release-please-version} +__version__ = "2.23.0" # {x-release-please-version} From fce101109646aa51e7aeea48db4e95216557b568 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 16:07:41 -0800 Subject: [PATCH 780/892] build(deps): bump cryptography from 42.0.2 to 42.0.4 in .kokoro (#939) Source-Link: https://github.com/googleapis/synthtool/commit/d895aec3679ad22aa120481f746bf9f2f325f26f Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:98f3afd11308259de6e828e37376d18867fd321aba07826e29e4f8d9cab56bad Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/requirements.txt | 66 +++++++++---------- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 2aefd0e91175..e4e943e0259a 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:97b671488ad548ef783a452a9e1276ac10f144d5ae56d98cc4bf77ba504082b4 -# created: 2024-02-06T03:20:16.660474034Z + digest: sha256:98f3afd11308259de6e828e37376d18867fd321aba07826e29e4f8d9cab56bad +# created: 2024-02-27T15:56:18.442440378Z diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 8c11c9f3e9b6..bda8e38c4f31 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -93,39 +93,39 @@ colorlog==6.7.0 \ # via # gcp-docuploader # nox -cryptography==42.0.0 \ - --hash=sha256:0a68bfcf57a6887818307600c3c0ebc3f62fbb6ccad2240aa21887cda1f8df1b \ - --hash=sha256:146e971e92a6dd042214b537a726c9750496128453146ab0ee8971a0299dc9bd \ - --hash=sha256:14e4b909373bc5bf1095311fa0f7fcabf2d1a160ca13f1e9e467be1ac4cbdf94 \ - --hash=sha256:206aaf42e031b93f86ad60f9f5d9da1b09164f25488238ac1dc488334eb5e221 \ - --hash=sha256:3005166a39b70c8b94455fdbe78d87a444da31ff70de3331cdec2c568cf25b7e \ - --hash=sha256:324721d93b998cb7367f1e6897370644751e5580ff9b370c0a50dc60a2003513 \ - --hash=sha256:33588310b5c886dfb87dba5f013b8d27df7ffd31dc753775342a1e5ab139e59d \ - --hash=sha256:35cf6ed4c38f054478a9df14f03c1169bb14bd98f0b1705751079b25e1cb58bc \ - --hash=sha256:3ca482ea80626048975360c8e62be3ceb0f11803180b73163acd24bf014133a0 \ - --hash=sha256:56ce0c106d5c3fec1038c3cca3d55ac320a5be1b44bf15116732d0bc716979a2 \ - --hash=sha256:5a217bca51f3b91971400890905a9323ad805838ca3fa1e202a01844f485ee87 \ - --hash=sha256:678cfa0d1e72ef41d48993a7be75a76b0725d29b820ff3cfd606a5b2b33fda01 \ - --hash=sha256:69fd009a325cad6fbfd5b04c711a4da563c6c4854fc4c9544bff3088387c77c0 \ - --hash=sha256:6cf9b76d6e93c62114bd19485e5cb003115c134cf9ce91f8ac924c44f8c8c3f4 \ - --hash=sha256:74f18a4c8ca04134d2052a140322002fef535c99cdbc2a6afc18a8024d5c9d5b \ - --hash=sha256:85f759ed59ffd1d0baad296e72780aa62ff8a71f94dc1ab340386a1207d0ea81 \ - --hash=sha256:87086eae86a700307b544625e3ba11cc600c3c0ef8ab97b0fda0705d6db3d4e3 \ - --hash=sha256:8814722cffcfd1fbd91edd9f3451b88a8f26a5fd41b28c1c9193949d1c689dc4 \ - --hash=sha256:8fedec73d590fd30c4e3f0d0f4bc961aeca8390c72f3eaa1a0874d180e868ddf \ - --hash=sha256:9515ea7f596c8092fdc9902627e51b23a75daa2c7815ed5aa8cf4f07469212ec \ - --hash=sha256:988b738f56c665366b1e4bfd9045c3efae89ee366ca3839cd5af53eaa1401bce \ - --hash=sha256:a2a8d873667e4fd2f34aedab02ba500b824692c6542e017075a2efc38f60a4c0 \ - --hash=sha256:bd7cf7a8d9f34cc67220f1195884151426ce616fdc8285df9054bfa10135925f \ - --hash=sha256:bdce70e562c69bb089523e75ef1d9625b7417c6297a76ac27b1b8b1eb51b7d0f \ - --hash=sha256:be14b31eb3a293fc6e6aa2807c8a3224c71426f7c4e3639ccf1a2f3ffd6df8c3 \ - --hash=sha256:be41b0c7366e5549265adf2145135dca107718fa44b6e418dc7499cfff6b4689 \ - --hash=sha256:c310767268d88803b653fffe6d6f2f17bb9d49ffceb8d70aed50ad45ea49ab08 \ - --hash=sha256:c58115384bdcfe9c7f644c72f10f6f42bed7cf59f7b52fe1bf7ae0a622b3a139 \ - --hash=sha256:c640b0ef54138fde761ec99a6c7dc4ce05e80420262c20fa239e694ca371d434 \ - --hash=sha256:ca20550bb590db16223eb9ccc5852335b48b8f597e2f6f0878bbfd9e7314eb17 \ - --hash=sha256:d97aae66b7de41cdf5b12087b5509e4e9805ed6f562406dfcf60e8481a9a28f8 \ - --hash=sha256:e9326ca78111e4c645f7e49cbce4ed2f3f85e17b61a563328c85a5208cf34440 +cryptography==42.0.4 \ + --hash=sha256:01911714117642a3f1792c7f376db572aadadbafcd8d75bb527166009c9f1d1b \ + --hash=sha256:0e89f7b84f421c56e7ff69f11c441ebda73b8a8e6488d322ef71746224c20fce \ + --hash=sha256:12d341bd42cdb7d4937b0cabbdf2a94f949413ac4504904d0cdbdce4a22cbf88 \ + --hash=sha256:15a1fb843c48b4a604663fa30af60818cd28f895572386e5f9b8a665874c26e7 \ + --hash=sha256:1cdcdbd117681c88d717437ada72bdd5be9de117f96e3f4d50dab3f59fd9ab20 \ + --hash=sha256:1df6fcbf60560d2113b5ed90f072dc0b108d64750d4cbd46a21ec882c7aefce9 \ + --hash=sha256:3c6048f217533d89f2f8f4f0fe3044bf0b2090453b7b73d0b77db47b80af8dff \ + --hash=sha256:3e970a2119507d0b104f0a8e281521ad28fc26f2820687b3436b8c9a5fcf20d1 \ + --hash=sha256:44a64043f743485925d3bcac548d05df0f9bb445c5fcca6681889c7c3ab12764 \ + --hash=sha256:4e36685cb634af55e0677d435d425043967ac2f3790ec652b2b88ad03b85c27b \ + --hash=sha256:5f8907fcf57392cd917892ae83708761c6ff3c37a8e835d7246ff0ad251d9298 \ + --hash=sha256:69b22ab6506a3fe483d67d1ed878e1602bdd5912a134e6202c1ec672233241c1 \ + --hash=sha256:6bfadd884e7280df24d26f2186e4e07556a05d37393b0f220a840b083dc6a824 \ + --hash=sha256:6d0fbe73728c44ca3a241eff9aefe6496ab2656d6e7a4ea2459865f2e8613257 \ + --hash=sha256:6ffb03d419edcab93b4b19c22ee80c007fb2d708429cecebf1dd3258956a563a \ + --hash=sha256:810bcf151caefc03e51a3d61e53335cd5c7316c0a105cc695f0959f2c638b129 \ + --hash=sha256:831a4b37accef30cccd34fcb916a5d7b5be3cbbe27268a02832c3e450aea39cb \ + --hash=sha256:887623fe0d70f48ab3f5e4dbf234986b1329a64c066d719432d0698522749929 \ + --hash=sha256:a0298bdc6e98ca21382afe914c642620370ce0470a01e1bef6dd9b5354c36854 \ + --hash=sha256:a1327f280c824ff7885bdeef8578f74690e9079267c1c8bd7dc5cc5aa065ae52 \ + --hash=sha256:c1f25b252d2c87088abc8bbc4f1ecbf7c919e05508a7e8628e6875c40bc70923 \ + --hash=sha256:c3a5cbc620e1e17009f30dd34cb0d85c987afd21c41a74352d1719be33380885 \ + --hash=sha256:ce8613beaffc7c14f091497346ef117c1798c202b01153a8cc7b8e2ebaaf41c0 \ + --hash=sha256:d2a27aca5597c8a71abbe10209184e1a8e91c1fd470b5070a2ea60cafec35bcd \ + --hash=sha256:dad9c385ba8ee025bb0d856714f71d7840020fe176ae0229de618f14dae7a6e2 \ + --hash=sha256:db4b65b02f59035037fde0998974d84244a64c3265bdef32a827ab9b63d61b18 \ + --hash=sha256:e09469a2cec88fb7b078e16d4adec594414397e8879a4341c6ace96013463d5b \ + --hash=sha256:e53dc41cda40b248ebc40b83b31516487f7db95ab8ceac1f042626bc43a2f992 \ + --hash=sha256:f1e85a178384bf19e36779d91ff35c7617c885da487d689b05c1366f9933ad74 \ + --hash=sha256:f47be41843200f7faec0683ad751e5ef11b9a56a220d57f300376cd8aba81660 \ + --hash=sha256:fb0cef872d8193e487fc6bdb08559c3aa41b659a7d9be48b2e10747f47863925 \ + --hash=sha256:ffc73996c4fca3d2b6c1c8c12bfd3ad00def8621da24f547626bf06441400449 # via # gcp-releasetool # secretstorage From 99bef6e9162ce6c12749b5942ed26af965fbf01a Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 18 Mar 2024 13:47:48 -0700 Subject: [PATCH 781/892] chore(docs): add basic samples for async data client (#940) --- .../samples/beam/noxfile_config.py | 4 +- .../samples/beam/requirements-test.txt | 2 +- .../samples/beam/requirements.txt | 2 +- .../samples/hello/async_main.py | 140 +++++++++ .../samples/hello/async_main_test.py | 39 +++ .../samples/hello/main.py | 4 +- .../samples/hello/noxfile.py | 15 +- .../samples/hello/requirements-test.txt | 2 +- .../samples/hello/requirements.txt | 2 +- .../hello_happybase/requirements-test.txt | 2 +- .../instanceadmin/requirements-test.txt | 2 +- .../metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../requirements-test.txt | 2 +- .../data_client/data_client_snippets_async.py | 234 ++++++++++++++ .../data_client_snippets_async_test.py | 103 ++++++ .../samples/snippets/data_client/noxfile.py | 293 ++++++++++++++++++ .../data_client/requirements-test.txt | 2 + .../snippets/data_client/requirements.txt | 1 + .../snippets/deletes/deletes_snippets.py | 4 +- .../samples/snippets/deletes/deletes_test.py | 37 +-- .../samples/snippets/deletes/noxfile.py | 15 +- .../snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 1 - .../deletes/snapshots/snap_deletes_test.py | 24 -- .../snapshots => filters}/__init__.py | 0 .../samples/snippets/filters/filters_test.py | 91 +++--- .../samples/snippets/filters/noxfile.py | 15 +- .../snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements.txt | 1 - .../filters/snapshots/snap_filters_test.py | 42 +-- .../samples/snippets/reads/__init__.py | 0 .../samples/snippets/reads/noxfile.py | 15 +- .../samples/snippets/reads/reads_test.py | 39 ++- .../snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements.txt | 1 - .../reads/snapshots/snap_reads_test.py | 23 +- .../snippets/writes/requirements-test.txt | 2 +- .../samples/snippets/writes/write_batch.py | 27 +- .../samples/tableadmin/requirements-test.txt | 2 +- 40 files changed, 1006 insertions(+), 192 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/hello/async_main.py create mode 100644 packages/google-cloud-bigtable/samples/hello/async_main_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt create mode 100644 packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt delete mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py rename packages/google-cloud-bigtable/samples/snippets/{deletes/snapshots => filters}/__init__.py (100%) create mode 100644 packages/google-cloud-bigtable/samples/snippets/reads/__init__.py diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile_config.py b/packages/google-cloud-bigtable/samples/beam/noxfile_config.py index eb01435a0579..66d7bc5aca17 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile_config.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile_config.py @@ -23,8 +23,8 @@ TEST_CONFIG_OVERRIDE = { # You can opt out from the test for specific Python versions. "ignored_versions": [ - "2.7", # not supported - "3.10", # Beam wheels not yet released for Python 3.10 + "3.7", # Beam no longer supports Python 3.7 for new releases + "3.12", # Beam not yet supported for Python 3.12 ], # Old samples are opted out of enforcing Python type hints # All new samples should feature them diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 70b1371ae9fc..86e305c224cd 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.53.0 +apache-beam==2.54.0 google-cloud-bigtable==2.22.0 google-cloud-core==2.4.1 diff --git a/packages/google-cloud-bigtable/samples/hello/async_main.py b/packages/google-cloud-bigtable/samples/hello/async_main.py new file mode 100644 index 000000000000..d608bb073c70 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/async_main.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python + +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates how to connect to Cloud Bigtable and run some basic operations with the async APIs + +Prerequisites: + +- Create a Cloud Bigtable instance. + https://cloud.google.com/bigtable/docs/creating-instance +- Set your Google Application Default Credentials. + https://developers.google.com/identity/protocols/application-default-credentials +""" + +import argparse +import asyncio + +# [START bigtable_async_hw_imports] +from google.cloud import bigtable +from google.cloud.bigtable.data import row_filters +from google.cloud.bigtable.data import RowMutationEntry +from google.cloud.bigtable.data import SetCell +from google.cloud.bigtable.data import ReadRowsQuery + +# [END bigtable_async_hw_imports] + + +async def main(project_id, instance_id, table_id): + # [START bigtable_async_hw_connect] + client = bigtable.data.BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + # [END bigtable_async_hw_connect] + + # [START bigtable_async_hw_create_table] + from google.cloud.bigtable import column_family + + # the async client only supports the data API. Table creation as an admin operation + # use admin client to create the table + print("Creating the {} table.".format(table_id)) + admin_client = bigtable.Client(project=project_id, admin=True) + admin_instance = admin_client.instance(instance_id) + admin_table = admin_instance.table(table_id) + + print("Creating column family cf1 with Max Version GC rule...") + # Create a column family with GC policy : most recent N versions + # Define the GC policy to retain only the most recent 2 versions + max_versions_rule = column_family.MaxVersionsGCRule(2) + column_family_id = "cf1" + column_families = {column_family_id: max_versions_rule} + if not admin_table.exists(): + admin_table.create(column_families=column_families) + else: + print("Table {} already exists.".format(table_id)) + # [END bigtable_async_hw_create_table] + + # [START bigtable_async_hw_write_rows] + print("Writing some greetings to the table.") + greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] + mutations = [] + column = "greeting" + for i, value in enumerate(greetings): + # Note: This example uses sequential numeric IDs for simplicity, + # but this can result in poor performance in a production + # application. Since rows are stored in sorted order by key, + # sequential keys can result in poor distribution of operations + # across nodes. + # + # For more information about how to design a Bigtable schema for + # the best performance, see the documentation: + # + # https://cloud.google.com/bigtable/docs/schema-design + row_key = "greeting{}".format(i).encode() + row_mutation = RowMutationEntry( + row_key, SetCell(column_family_id, column, value) + ) + mutations.append(row_mutation) + await table.bulk_mutate_rows(mutations) + # [END bigtable_async_hw_write_rows] + + # [START bigtable_async_hw_create_filter] + # Create a filter to only retrieve the most recent version of the cell + # for each column across entire row. + row_filter = row_filters.CellsColumnLimitFilter(1) + # [END bigtable_async_hw_create_filter] + + # [START bigtable_async_hw_get_with_filter] + # [START bigtable_async_hw_get_by_key] + print("Getting a single greeting by row key.") + key = "greeting0".encode() + + row = await table.read_row(key, row_filter=row_filter) + cell = row.cells[0] + print(cell.value.decode("utf-8")) + # [END bigtable_async_hw_get_by_key] + # [END bigtable_async_hw_get_with_filter] + + # [START bigtable_async_hw_scan_with_filter] + # [START bigtable_async_hw_scan_all] + print("Scanning for all greetings:") + query = ReadRowsQuery(row_filter=row_filter) + async for row in await table.read_rows_stream(query): + cell = row.cells[0] + print(cell.value.decode("utf-8")) + # [END bigtable_async_hw_scan_all] + # [END bigtable_async_hw_scan_with_filter] + + # [START bigtable_async_hw_delete_table] + # the async client only supports the data API. Table deletion as an admin operation + # use admin client to create the table + print("Deleting the {} table.".format(table_id)) + admin_table.delete() + # [END bigtable_async_hw_delete_table] + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") + parser.add_argument( + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) + parser.add_argument( + "--table", help="Table to create and destroy.", default="Hello-Bigtable" + ) + + args = parser.parse_args() + asyncio.run(main(args.project_id, args.instance_id, args.table)) diff --git a/packages/google-cloud-bigtable/samples/hello/async_main_test.py b/packages/google-cloud-bigtable/samples/hello/async_main_test.py new file mode 100644 index 000000000000..a47ac2d3331e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/hello/async_main_test.py @@ -0,0 +1,39 @@ +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random +import asyncio + +from async_main import main + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_NAME_FORMAT = "hello-world-test-{}" +TABLE_NAME_RANGE = 10000 + + +def test_async_main(capsys): + table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) + + asyncio.run(main(PROJECT, BIGTABLE_INSTANCE, table_name)) + + out, _ = capsys.readouterr() + assert "Creating the {} table.".format(table_name) in out + assert "Writing some greetings to the table." in out + assert "Getting a single greeting by row key." in out + assert "Hello World!" in out + assert "Scanning for all greetings" in out + assert "Hello Cloud Bigtable!" in out + assert "Deleting the {} table.".format(table_name) in out diff --git a/packages/google-cloud-bigtable/samples/hello/main.py b/packages/google-cloud-bigtable/samples/hello/main.py index 5e47b4a38bdb..3b7de34b0627 100644 --- a/packages/google-cloud-bigtable/samples/hello/main.py +++ b/packages/google-cloud-bigtable/samples/hello/main.py @@ -18,8 +18,8 @@ Prerequisites: -- Create a Cloud Bigtable cluster. - https://cloud.google.com/bigtable/docs/creating-cluster +- Create a Cloud Bigtable instance. + https://cloud.google.com/bigtable/docs/creating-instance - Set your Google Application Default Credentials. https://developers.google.com/identity/protocols/application-default-credentials """ diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 483b55901791..3b7135946fd5 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -160,6 +160,7 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # + @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -187,7 +188,9 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -209,9 +212,7 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -224,9 +225,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) elif "pytest-xdist" in packages: - concurrent_args.extend(['-n', 'auto']) + concurrent_args.extend(["-n", "auto"]) session.run( "pytest", @@ -256,7 +257,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 68419fbcb794..dd4fc1fb3241 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.22.0 +google-cloud-bigtable==2.23.0 google-cloud-core==2.4.1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 8b8270b6c5ba..c0d4f70035bc 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==8.0.0 +pytest==7.4.4 mock==5.1.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py new file mode 100644 index 000000000000..cb51bdc78743 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py @@ -0,0 +1,234 @@ +#!/usr/bin/env python + +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +async def write_simple(table): + # [START bigtable_async_write_simple] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import SetCell + + async def write_simple(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + row_key = b"phone#4c410523#20190501" + + cell_mutation = SetCell(family_id, "connected_cell", 1) + wifi_mutation = SetCell(family_id, "connected_wifi", 1) + os_mutation = SetCell(family_id, "os_build", "PQ2A.190405.003") + + await table.mutate_row(row_key, cell_mutation) + await table.mutate_row(row_key, wifi_mutation) + await table.mutate_row(row_key, os_mutation) + + # [END bigtable_async_write_simple] + await write_simple(table.client.project, table.instance_id, table.table_id) + + +async def write_batch(table): + # [START bigtable_async_writes_batch] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.mutations import RowMutationEntry + + async def write_batch(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + + async with table.mutations_batcher() as batcher: + mutation_list = [ + SetCell(family_id, "connected_cell", 1), + SetCell(family_id, "connected_wifi", 1), + SetCell(family_id, "os_build", "12155.0.0-rc1"), + ] + batcher.append( + RowMutationEntry("tablet#a0b81f74#20190501", mutation_list) + ) + batcher.append( + RowMutationEntry("tablet#a0b81f74#20190502", mutation_list) + ) + # [END bigtable_async_writes_batch] + await write_batch(table.client.project, table.instance_id, table.table_id) + + +async def write_increment(table): + # [START bigtable_async_write_increment] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + async def write_increment(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + row_key = "phone#4c410523#20190501" + + # Decrement the connected_wifi value by 1. + increment_rule = IncrementRule( + family_id, "connected_wifi", increment_amount=-1 + ) + result_row = await table.read_modify_write_row(row_key, increment_rule) + + # check result + cell = result_row[0] + print(f"{cell.row_key} value: {int(cell)}") + # [END bigtable_async_write_increment] + await write_increment(table.client.project, table.instance_id, table.table_id) + + +async def write_conditional(table): + # [START bigtable_async_writes_conditional] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import SetCell + + async def write_conditional(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + family_id = "stats_summary" + row_key = "phone#4c410523#20190501" + + row_filter = row_filters.RowFilterChain( + filters=[ + row_filters.FamilyNameRegexFilter(family_id), + row_filters.ColumnQualifierRegexFilter("os_build"), + row_filters.ValueRegexFilter("PQ2A\\..*"), + ] + ) + + if_true = SetCell(family_id, "os_name", "android") + result = await table.check_and_mutate_row( + row_key, + row_filter, + true_case_mutations=if_true, + false_case_mutations=None, + ) + if result is True: + print("The row os_name was set to android") + # [END bigtable_async_writes_conditional] + await write_conditional(table.client.project, table.instance_id, table.table_id) + + +async def read_row(table): + # [START bigtable_async_reads_row] + from google.cloud.bigtable.data import BigtableDataClientAsync + + async def read_row(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + row_key = "phone#4c410523#20190501" + row = await table.read_row(row_key) + print(row) + # [END bigtable_async_reads_row] + await read_row(table.client.project, table.instance_id, table.table_id) + + +async def read_row_partial(table): + # [START bigtable_async_reads_row_partial] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import row_filters + + async def read_row_partial(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + row_key = "phone#4c410523#20190501" + col_filter = row_filters.ColumnQualifierRegexFilter(b"os_build") + + row = await table.read_row(row_key, row_filter=col_filter) + print(row) + # [END bigtable_async_reads_row_partial] + await read_row_partial(table.client.project, table.instance_id, table.table_id) + + +async def read_rows_multiple(table): + # [START bigtable_async_reads_rows] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + + async def read_rows(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + query = ReadRowsQuery(row_keys=[ + b"phone#4c410523#20190501", + b"phone#4c410523#20190502" + ]) + async for row in await table.read_rows_stream(query): + print(row) + + # [END bigtable_async_reads_rows] + await read_rows(table.client.project, table.instance_id, table.table_id) + + +async def read_row_range(table): + # [START bigtable_async_reads_row_range] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + async def read_row_range(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + row_range = RowRange( + start_key=b"phone#4c410523#20190501", + end_key=b"phone#4c410523#201906201" + ) + query = ReadRowsQuery(row_ranges=[row_range]) + + async for row in await table.read_rows_stream(query): + print(row) + # [END bigtable_async_reads_row_range] + await read_row_range(table.client.project, table.instance_id, table.table_id) + + +async def read_with_prefix(table): + # [START bigtable_async_reads_prefix] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + async def read_prefix(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + prefix = "phone#" + end_key = prefix[:-1] + chr(ord(prefix[-1]) + 1) + prefix_range = RowRange(start_key=prefix, end_key=end_key) + query = ReadRowsQuery(row_ranges=[prefix_range]) + + async for row in await table.read_rows_stream(query): + print(row) + # [END bigtable_async_reads_prefix] + await read_prefix(table.client.project, table.instance_id, table.table_id) + + +async def read_with_filter(table): + # [START bigtable_async_reads_filter] + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + async def read_with_filter(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + + row_filter = row_filters.ValueRegexFilter(b"PQ2A.*$") + query = ReadRowsQuery(row_filter=row_filter) + + async for row in await table.read_rows_stream(query): + print(row) + # [END bigtable_async_reads_filter] + await read_with_filter(table.client.project, table.instance_id, table.table_id) diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py new file mode 100644 index 000000000000..d9968e6dc6b7 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py @@ -0,0 +1,103 @@ +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest +import pytest_asyncio +import uuid +import os + +import data_client_snippets_async as data_snippets + + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_STATIC = os.getenv( + "BIGTABLE_TABLE", None +) # if not set, a temproary table will be generated + + +@pytest.fixture(scope="session") +def table_id(): + from google.cloud import bigtable + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table_id = TABLE_ID_STATIC or f"data-client-{str(uuid.uuid4())[:16]}" + + admin_table = instance.table(table_id) + if not admin_table.exists(): + admin_table.create(column_families={"family": None, "stats_summary": None}) + + yield table_id + + if not table_id == TABLE_ID_STATIC: + # clean up table when finished + admin_table.delete() + + +@pytest_asyncio.fixture +async def table(table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + + async with BigtableDataClientAsync(project=PROJECT) as client: + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + yield table + + +@pytest.mark.asyncio +async def test_write_simple(table): + await data_snippets.write_simple(table) + + +@pytest.mark.asyncio +async def test_write_batch(table): + await data_snippets.write_batch(table) + + +@pytest.mark.asyncio +async def test_write_increment(table): + await data_snippets.write_increment(table) + + +@pytest.mark.asyncio +async def test_write_conditional(table): + await data_snippets.write_conditional(table) + + +@pytest.mark.asyncio +async def test_read_row(table): + await data_snippets.read_row(table) + + +@pytest.mark.asyncio +async def test_read_row_partial(table): + await data_snippets.read_row_partial(table) + + +@pytest.mark.asyncio +async def test_read_rows_multiple(table): + await data_snippets.read_rows_multiple(table) + + +@pytest.mark.asyncio +async def test_read_row_range(table): + await data_snippets.read_row_range(table) + + +@pytest.mark.asyncio +async def test_read_with_prefix(table): + await data_snippets.read_with_prefix(table) + + +@pytest.mark.asyncio +async def test_read_with_filter(table): + await data_snippets.read_with_filter(table) diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py new file mode 100644 index 000000000000..6967925a838a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py @@ -0,0 +1,293 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import glob +import os +from pathlib import Path +import sys +from typing import Callable, Dict, Optional + +import nox + + +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING +# DO NOT EDIT THIS FILE EVER! +# WARNING - WARNING - WARNING - WARNING - WARNING +# WARNING - WARNING - WARNING - WARNING - WARNING + +BLACK_VERSION = "black==22.3.0" +ISORT_VERSION = "isort==5.10.1" + +# Copy `noxfile_config.py` to your directory and modify it instead. + +# `TEST_CONFIG` dict is a configuration hook that allows users to +# modify the test configurations. The values here should be in sync +# with `noxfile_config.py`. Users will copy `noxfile_config.py` into +# their directory and modify it. + +TEST_CONFIG = { + # You can opt out from the test for specific Python versions. + "ignored_versions": [], + # Old samples are opted out of enforcing Python type hints + # All new samples should feature them + "enforce_type_hints": False, + # An envvar key for determining the project id to use. Change it + # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a + # build specific Cloud project. You can also use your own string + # to use your own Cloud project. + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # If you need to use a specific version of pip, + # change pip_version_override to the string representation + # of the version number, for example, "20.2.4" + "pip_version_override": None, + # A dictionary you want to inject into your test. Don't put any + # secrets here. These values will override predefined values. + "envs": {}, +} + + +try: + # Ensure we can import noxfile_config in the project's directory. + sys.path.append(".") + from noxfile_config import TEST_CONFIG_OVERRIDE +except ImportError as e: + print("No user noxfile_config found: detail: {}".format(e)) + TEST_CONFIG_OVERRIDE = {} + +# Update the TEST_CONFIG with the user supplied values. +TEST_CONFIG.update(TEST_CONFIG_OVERRIDE) + + +def get_pytest_env_vars() -> Dict[str, str]: + """Returns a dict for pytest invocation.""" + ret = {} + + # Override the GCLOUD_PROJECT and the alias. + env_key = TEST_CONFIG["gcloud_project_env"] + # This should error out if not set. + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + + # Apply user supplied envs. + ret.update(TEST_CONFIG["envs"]) + return ret + + +# DO NOT EDIT - automatically generated. +# All versions used to test samples. +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] + +# Any default versions that should be ignored. +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] + +TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) + +INSTALL_LIBRARY_FROM_SOURCE = os.environ.get("INSTALL_LIBRARY_FROM_SOURCE", False) in ( + "True", + "true", +) + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + +# +# Style Checks +# + + +# Linting with flake8. +# +# We ignore the following rules: +# E203: whitespace before ‘:’ +# E266: too many leading ‘#’ for block comment +# E501: line too long +# I202: Additional newline in a section of imports +# +# We also need to specify the rules which are ignored by default: +# ['E226', 'W504', 'E126', 'E123', 'W503', 'E24', 'E704', 'E121'] +FLAKE8_COMMON_ARGS = [ + "--show-source", + "--builtin=gettext", + "--max-complexity=20", + "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", + "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", + "--max-line-length=88", +] + + +@nox.session +def lint(session: nox.sessions.Session) -> None: + if not TEST_CONFIG["enforce_type_hints"]: + session.install("flake8") + else: + session.install("flake8", "flake8-annotations") + + args = FLAKE8_COMMON_ARGS + [ + ".", + ] + session.run("flake8", *args) + + +# +# Black +# + + +@nox.session +def blacken(session: nox.sessions.Session) -> None: + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + session.run("black", *python_files) + + +# +# format = isort + black +# + + +@nox.session +def format(session: nox.sessions.Session) -> None: + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + python_files = [path for path in os.listdir(".") if path.endswith(".py")] + + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run("isort", "--fss", *python_files) + session.run("black", *python_files) + + +# +# Sample Tests +# + + +PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] + + +def _session_tests( + session: nox.sessions.Session, post_install: Callable = None +) -> None: + # check for presence of tests + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) + test_list.extend(glob.glob("**/tests", recursive=True)) + + if len(test_list) == 0: + print("No tests found, skipping directory.") + return + + if TEST_CONFIG["pip_version_override"]: + pip_version = TEST_CONFIG["pip_version_override"] + session.install(f"pip=={pip_version}") + """Runs py.test for a particular project.""" + concurrent_args = [] + if os.path.exists("requirements.txt"): + if os.path.exists("constraints.txt"): + session.install("-r", "requirements.txt", "-c", "constraints.txt") + else: + session.install("-r", "requirements.txt") + with open("requirements.txt") as rfile: + packages = rfile.read() + + if os.path.exists("requirements-test.txt"): + if os.path.exists("constraints-test.txt"): + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + else: + session.install("-r", "requirements-test.txt") + with open("requirements-test.txt") as rtfile: + packages += rtfile.read() + + if INSTALL_LIBRARY_FROM_SOURCE: + session.install("-e", _get_repo_root()) + + if post_install: + post_install(session) + + if "pytest-parallel" in packages: + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + elif "pytest-xdist" in packages: + concurrent_args.extend(["-n", "auto"]) + + session.run( + "pytest", + *(PYTEST_COMMON_ARGS + session.posargs + concurrent_args), + # Pytest will return 5 when no tests are collected. This can happen + # on travis where slow and flaky tests are excluded. + # See http://doc.pytest.org/en/latest/_modules/_pytest/main.html + success_codes=[0, 5], + env=get_pytest_env_vars(), + ) + + +@nox.session(python=ALL_VERSIONS) +def py(session: nox.sessions.Session) -> None: + """Runs py.test for a sample using the specified version of Python.""" + if session.python in TESTED_VERSIONS: + _session_tests(session) + else: + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) + + +# +# Readmegen +# + + +def _get_repo_root() -> Optional[str]: + """Returns the root folder of the project.""" + # Get root of this repository. Assume we don't have directories nested deeper than 10 items. + p = Path(os.getcwd()) + for i in range(10): + if p is None: + break + if Path(p / ".git").exists(): + return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) + p = p.parent + raise Exception("Unable to detect repository root.") + + +GENERATED_READMES = sorted([x for x in Path(".").rglob("*.rst.in")]) + + +@nox.session +@nox.parametrize("path", GENERATED_READMES) +def readmegen(session: nox.sessions.Session, path: str) -> None: + """(Re-)generates the readme for a sample.""" + session.install("jinja2", "pyyaml") + dir_ = os.path.dirname(path) + + if os.path.exists(os.path.join(dir_, "requirements.txt")): + session.install("-r", os.path.join(dir_, "requirements.txt")) + + in_file = os.path.join(dir_, "README.rst.in") + session.run( + "python", _get_repo_root() + "/scripts/readme-gen/readme_gen.py", in_file + ) diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt new file mode 100644 index 000000000000..5cb431d92b98 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt @@ -0,0 +1,2 @@ +pytest==7.4.4 +pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt new file mode 100644 index 000000000000..835e1bc780c5 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt @@ -0,0 +1 @@ +google-cloud-bigtable==2.23.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py index 8e78083bf8b7..72f812ca2bd5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py @@ -37,9 +37,7 @@ def delete_from_column_family(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") - row.delete_cells( - column_family_id="cell_plan", columns=row.ALL_COLUMNS - ) + row.delete_cells(column_family_id="cell_plan", columns=row.ALL_COLUMNS) row.commit() diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py index bf23daa5992d..bebaabafb767 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py @@ -1,4 +1,5 @@ # Copyright 2020, Google LLC + # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -94,46 +95,46 @@ def table_id(): yield table_id -def assert_snapshot_match(capsys, snapshot): +def assert_output_match(capsys, expected): out, _ = capsys.readouterr() - snapshot.assert_match(out) + assert out == expected -def test_delete_from_column(capsys, snapshot, table_id): +def test_delete_from_column(capsys, table_id): deletes_snippets.delete_from_column(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_from_column_family(capsys, snapshot, table_id): +def test_delete_from_column_family(capsys, table_id): deletes_snippets.delete_from_column_family(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_from_row(capsys, snapshot, table_id): +def test_delete_from_row(capsys, table_id): deletes_snippets.delete_from_row(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_streaming_and_batching(capsys, snapshot, table_id): +def test_streaming_and_batching(capsys, table_id): deletes_snippets.streaming_and_batching(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_check_and_mutate(capsys, snapshot, table_id): +def test_check_and_mutate(capsys, table_id): deletes_snippets.check_and_mutate(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_drop_row_range(capsys, snapshot, table_id): +def test_drop_row_range(capsys, table_id): deletes_snippets.drop_row_range(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_column_family(capsys, snapshot, table_id): +def test_delete_column_family(capsys, table_id): deletes_snippets.delete_column_family(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") -def test_delete_table(capsys, snapshot, table_id): +def test_delete_table(capsys, table_id): deletes_snippets.delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_snapshot_match(capsys, snapshot) + assert_output_match(capsys, "") diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index 483b55901791..3b7135946fd5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -160,6 +160,7 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # + @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -187,7 +188,9 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -209,9 +212,7 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -224,9 +225,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) elif "pytest-xdist" in packages: - concurrent_args.extend(['-n', 'auto']) + concurrent_args.extend(["-n", "auto"]) session.run( "pytest", @@ -256,7 +257,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index ae10593d2852..6dc98589311e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1,2 +1 @@ google-cloud-bigtable==2.22.0 -snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py deleted file mode 100644 index 04a7db940deb..000000000000 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/snap_deletes_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- -# snapshottest: v1 - https://goo.gl/zC4yUc -from __future__ import unicode_literals - -from snapshottest import Snapshot - - -snapshots = Snapshot() - -snapshots['test_check_and_mutate 1'] = '' - -snapshots['test_delete_column_family 1'] = '' - -snapshots['test_delete_from_column 1'] = '' - -snapshots['test_delete_from_column_family 1'] = '' - -snapshots['test_delete_from_row 1'] = '' - -snapshots['test_delete_table 1'] = '' - -snapshots['test_drop_row_range 1'] = '' - -snapshots['test_streaming_and_batching 1'] = '' diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/__init__.py b/packages/google-cloud-bigtable/samples/snippets/filters/__init__.py similarity index 100% rename from packages/google-cloud-bigtable/samples/snippets/deletes/snapshots/__init__.py rename to packages/google-cloud-bigtable/samples/snippets/filters/__init__.py diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py index 35cf62ff0eaa..aedd8f08d9ba 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py @@ -16,11 +16,13 @@ import os import time import uuid +import inspect from google.cloud import bigtable import pytest +from .snapshots.snap_filters_test import snapshots -import filter_snippets +from . import filter_snippets PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] @@ -97,131 +99,148 @@ def table_id(): table.delete() -def test_filter_limit_row_sample(capsys, snapshot, table_id): +def test_filter_limit_row_sample(capsys, table_id): filter_snippets.filter_limit_row_sample(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() assert "Reading data for" in out -def test_filter_limit_row_regex(capsys, snapshot, table_id): +def test_filter_limit_row_regex(capsys, table_id): filter_snippets.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_cells_per_col(capsys, snapshot, table_id): +def test_filter_limit_cells_per_col(capsys, table_id): filter_snippets.filter_limit_cells_per_col(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_cells_per_row(capsys, snapshot, table_id): +def test_filter_limit_cells_per_row(capsys, table_id): filter_snippets.filter_limit_cells_per_row(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_cells_per_row_offset(capsys, snapshot, table_id): +def test_filter_limit_cells_per_row_offset(capsys, table_id): filter_snippets.filter_limit_cells_per_row_offset( PROJECT, BIGTABLE_INSTANCE, table_id ) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_col_family_regex(capsys, snapshot, table_id): +def test_filter_limit_col_family_regex(capsys, table_id): filter_snippets.filter_limit_col_family_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_col_qualifier_regex(capsys, snapshot, table_id): +def test_filter_limit_col_qualifier_regex(capsys, table_id): filter_snippets.filter_limit_col_qualifier_regex( PROJECT, BIGTABLE_INSTANCE, table_id ) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_col_range(capsys, snapshot, table_id): +def test_filter_limit_col_range(capsys, table_id): filter_snippets.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_value_range(capsys, snapshot, table_id): +def test_filter_limit_value_range(capsys, table_id): filter_snippets.filter_limit_value_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_value_regex(capsys, snapshot, table_id): +def test_filter_limit_value_regex(capsys, table_id): filter_snippets.filter_limit_value_regex(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_timestamp_range(capsys, snapshot, table_id): +def test_filter_limit_timestamp_range(capsys, table_id): filter_snippets.filter_limit_timestamp_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_block_all(capsys, snapshot, table_id): +def test_filter_limit_block_all(capsys, table_id): filter_snippets.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_limit_pass_all(capsys, snapshot, table_id): +def test_filter_limit_pass_all(capsys, table_id): filter_snippets.filter_limit_pass_all(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_modify_strip_value(capsys, snapshot, table_id): +def test_filter_modify_strip_value(capsys, table_id): filter_snippets.filter_modify_strip_value(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_modify_apply_label(capsys, snapshot, table_id): +def test_filter_modify_apply_label(capsys, table_id): filter_snippets.filter_modify_apply_label(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_composing_chain(capsys, snapshot, table_id): +def test_filter_composing_chain(capsys, table_id): filter_snippets.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_composing_interleave(capsys, snapshot, table_id): +def test_filter_composing_interleave(capsys, table_id): filter_snippets.filter_composing_interleave(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_filter_composing_condition(capsys, snapshot, table_id): +def test_filter_composing_condition(capsys, table_id): filter_snippets.filter_composing_condition(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 483b55901791..3b7135946fd5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -160,6 +160,7 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # + @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -187,7 +188,9 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -209,9 +212,7 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -224,9 +225,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) elif "pytest-xdist" in packages: - concurrent_args.extend(['-n', 'auto']) + concurrent_args.extend(["-n", "auto"]) session.run( "pytest", @@ -256,7 +257,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index ae10593d2852..6dc98589311e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1,2 +1 @@ google-cloud-bigtable==2.22.0 -snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py index a0580f565990..2331c93bc1b6 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/snapshots/snap_filters_test.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- -# snapshottest: v1 - https://goo.gl/zC4yUc -# flake8: noqa +# this was previously implemented using the `snapshottest` package (https://goo.gl/zC4yUc), +# which is not compatible with Python 3.12. So we moved to a standard dictionary storing +# expected outputs for each test from __future__ import unicode_literals -from snapshottest import Snapshot -snapshots = Snapshot() +snapshots = {} -snapshots['test_filter_limit_row_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_row_regex'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -27,7 +27,7 @@ ''' -snapshots['test_filter_limit_cells_per_col 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_cells_per_col'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -71,7 +71,7 @@ ''' -snapshots['test_filter_limit_cells_per_row 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_cells_per_row'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -102,7 +102,7 @@ ''' -snapshots['test_filter_limit_cells_per_row_offset 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_cells_per_row_offset'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 Column Family stats_summary @@ -132,7 +132,7 @@ ''' -snapshots['test_filter_limit_col_family_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_col_family_regex'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -164,7 +164,7 @@ ''' -snapshots['test_filter_limit_col_qualifier_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_col_qualifier_regex'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -191,7 +191,7 @@ ''' -snapshots['test_filter_limit_col_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_col_range'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -207,7 +207,7 @@ ''' -snapshots['test_filter_limit_value_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_value_range'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 @@ -217,7 +217,7 @@ ''' -snapshots['test_filter_limit_value_regex 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_value_regex'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 @@ -239,15 +239,15 @@ ''' -snapshots['test_filter_limit_timestamp_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_timestamp_range'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 ''' -snapshots['test_filter_limit_block_all 1'] = '' +snapshots['test_filter_limit_block_all'] = '' -snapshots['test_filter_limit_pass_all 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_limit_pass_all'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 @@ -291,7 +291,7 @@ ''' -snapshots['test_filter_modify_strip_value 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_modify_strip_value'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: @2019-05-01 00:00:00+00:00 \tdata_plan_01gb: @2019-04-30 23:00:00+00:00 @@ -335,7 +335,7 @@ ''' -snapshots['test_filter_modify_apply_label 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_modify_apply_label'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 [labelled] \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 [labelled] @@ -379,7 +379,7 @@ ''' -snapshots['test_filter_composing_chain 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_composing_chain'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 \tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 @@ -402,7 +402,7 @@ ''' -snapshots['test_filter_composing_interleave 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_composing_interleave'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 \tdata_plan_05gb: true @2019-05-01 00:00:00+00:00 @@ -435,7 +435,7 @@ ''' -snapshots['test_filter_composing_condition 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_filter_composing_condition'] = '''Reading data for phone#4c410523#20190501: Column Family cell_plan \tdata_plan_01gb: false @2019-05-01 00:00:00+00:00 [filtered-out] \tdata_plan_01gb: true @2019-04-30 23:00:00+00:00 [filtered-out] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/__init__.py b/packages/google-cloud-bigtable/samples/snippets/reads/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 483b55901791..3b7135946fd5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -160,6 +160,7 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # + @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -187,7 +188,9 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -209,9 +212,7 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -224,9 +225,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) elif "pytest-xdist" in packages: - concurrent_args.extend(['-n', 'auto']) + concurrent_args.extend(["-n", "auto"]) session.run( "pytest", @@ -256,7 +257,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py index 0b61e341f7ed..da826d6fb347 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py @@ -14,11 +14,13 @@ import datetime import os import uuid +import inspect from google.cloud import bigtable import pytest -import read_snippets +from .snapshots.snap_reads_test import snapshots +from . import read_snippets PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] @@ -72,50 +74,57 @@ def table_id(): table.delete() -def test_read_row(capsys, snapshot, table_id): +def test_read_row(capsys, table_id): read_snippets.read_row(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_row_partial(capsys, snapshot, table_id): +def test_read_row_partial(capsys, table_id): read_snippets.read_row_partial(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_rows(capsys, snapshot, table_id): +def test_read_rows(capsys, table_id): read_snippets.read_rows(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_row_range(capsys, snapshot, table_id): +def test_read_row_range(capsys, table_id): read_snippets.read_row_range(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_row_ranges(capsys, snapshot, table_id): +def test_read_row_ranges(capsys, table_id): read_snippets.read_row_ranges(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_prefix(capsys, snapshot, table_id): +def test_read_prefix(capsys, table_id): read_snippets.read_prefix(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected -def test_read_filter(capsys, snapshot, table_id): +def test_read_filter(capsys, table_id): read_snippets.read_filter(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() - snapshot.assert_match(out) + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index 8075a1ec560e..cb87efc0ff71 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index ae10593d2852..6dc98589311e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1,2 +1 @@ google-cloud-bigtable==2.22.0 -snapshottest==0.6.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py b/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py index f45e98f2e57c..564a4df7eecc 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/snapshots/snap_reads_test.py @@ -1,19 +1,18 @@ # -*- coding: utf-8 -*- -# snapshottest: v1 - https://goo.gl/zC4yUc +# this was previously implemented using the `snapshottest` package (https://goo.gl/zC4yUc), +# which is not compatible with Python 3.12. So we moved to a standard dictionary storing +# expected outputs for each test from __future__ import unicode_literals -from snapshottest import Snapshot +snapshots = {} - -snapshots = Snapshot() - -snapshots['test_read_row_partial 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row_partial'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 ''' -snapshots['test_read_rows 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_rows'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -27,7 +26,7 @@ ''' -snapshots['test_read_row_range 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row_range'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -47,7 +46,7 @@ ''' -snapshots['test_read_row_ranges 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row_ranges'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -79,7 +78,7 @@ ''' -snapshots['test_read_prefix 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_prefix'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 @@ -111,7 +110,7 @@ ''' -snapshots['test_read_filter 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_filter'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tos_build: PQ2A.190405.003 @2019-05-01 00:00:00+00:00 @@ -133,7 +132,7 @@ ''' -snapshots['test_read_row 1'] = '''Reading data for phone#4c410523#20190501: +snapshots['test_read_row'] = '''Reading data for phone#4c410523#20190501: Column Family stats_summary \tconnected_cell: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 \tconnected_wifi: \x00\x00\x00\x00\x00\x00\x00\x01 @2019-05-01 00:00:00+00:00 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index aaa563abc833..43b02e724796 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==8.0.0 +pytest==7.4.4 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py b/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py index fd51172420b2..8ad4b07a558a 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/write_batch.py @@ -16,6 +16,7 @@ import datetime from google.cloud import bigtable +from google.cloud.bigtable.batcher import MutationsBatcher def write_batch(project_id, instance_id, table_id): @@ -23,23 +24,21 @@ def write_batch(project_id, instance_id, table_id): instance = client.instance(instance_id) table = instance.table(table_id) - timestamp = datetime.datetime.utcnow() - column_family_id = "stats_summary" + with MutationsBatcher(table=table) as batcher: + timestamp = datetime.datetime.utcnow() + column_family_id = "stats_summary" - rows = [ - table.direct_row("tablet#a0b81f74#20190501"), - table.direct_row("tablet#a0b81f74#20190502"), - ] + rows = [ + table.direct_row("tablet#a0b81f74#20190501"), + table.direct_row("tablet#a0b81f74#20190502"), + ] - rows[0].set_cell(column_family_id, "connected_wifi", 1, timestamp) - rows[0].set_cell(column_family_id, "os_build", "12155.0.0-rc1", timestamp) - rows[1].set_cell(column_family_id, "connected_wifi", 1, timestamp) - rows[1].set_cell(column_family_id, "os_build", "12145.0.0-rc6", timestamp) + rows[0].set_cell(column_family_id, "connected_wifi", 1, timestamp) + rows[0].set_cell(column_family_id, "os_build", "12155.0.0-rc1", timestamp) + rows[1].set_cell(column_family_id, "connected_wifi", 1, timestamp) + rows[1].set_cell(column_family_id, "os_build", "12145.0.0-rc6", timestamp) - response = table.mutate_rows(rows) - for i, status in enumerate(response): - if status.code != 0: - print("Error writing row: {}".format(status.message)) + batcher.mutate_rows(rows) print("Successfully wrote 2 rows.") diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index b4d30f50557b..aa143f59dfbe 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==8.0.0 +pytest==7.4.4 google-cloud-testutils==1.4.0 From 7a1477df59d7e31b016b749dafbbed39c73c3361 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 20 Mar 2024 16:43:45 -0700 Subject: [PATCH 782/892] feat: added generated docs for async client (#947) --- .../docs/async_data_client.rst | 6 ++ .../docs/async_data_exceptions.rst | 6 ++ .../docs/async_data_mutations.rst | 6 ++ .../docs/async_data_mutations_batcher.rst | 6 ++ .../async_data_read_modify_write_rules.rst | 6 ++ .../docs/async_data_read_rows_query.rst | 6 ++ .../docs/async_data_row.rst | 6 ++ .../docs/async_data_row_filters.rst | 62 +++++++++++++++++++ .../docs/async_data_usage.rst | 14 +++++ packages/google-cloud-bigtable/docs/index.rst | 1 + packages/google-cloud-bigtable/docs/usage.rst | 4 +- .../cloud/bigtable/data/_async/client.py | 2 +- .../google/cloud/bigtable/data/row.py | 10 +-- 13 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/async_data_client.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_exceptions.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_mutations.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_mutations_batcher.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_read_modify_write_rules.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_read_rows_query.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_row.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_row_filters.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_usage.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client.rst b/packages/google-cloud-bigtable/docs/async_data_client.rst new file mode 100644 index 000000000000..7d2901de41db --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_client.rst @@ -0,0 +1,6 @@ +Bigtable Data Client Async +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data._async.client + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_exceptions.rst b/packages/google-cloud-bigtable/docs/async_data_exceptions.rst new file mode 100644 index 000000000000..6180ef222f37 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_exceptions.rst @@ -0,0 +1,6 @@ +Custom Exceptions +~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.exceptions + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_mutations.rst b/packages/google-cloud-bigtable/docs/async_data_mutations.rst new file mode 100644 index 000000000000..9d7a9eab2e3f --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_mutations.rst @@ -0,0 +1,6 @@ +Mutations +~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.mutations + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_mutations_batcher.rst b/packages/google-cloud-bigtable/docs/async_data_mutations_batcher.rst new file mode 100644 index 000000000000..3e81f885a338 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_mutations_batcher.rst @@ -0,0 +1,6 @@ +Mutations Batcher Async +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data._async.mutations_batcher + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_read_modify_write_rules.rst b/packages/google-cloud-bigtable/docs/async_data_read_modify_write_rules.rst new file mode 100644 index 000000000000..2f28ddf3f723 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_read_modify_write_rules.rst @@ -0,0 +1,6 @@ +Read Modify Write Rules +~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.read_modify_write_rules + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_read_rows_query.rst b/packages/google-cloud-bigtable/docs/async_data_read_rows_query.rst new file mode 100644 index 000000000000..4e3e796d9fd8 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_read_rows_query.rst @@ -0,0 +1,6 @@ +Read Rows Query +~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.read_rows_query + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_row.rst b/packages/google-cloud-bigtable/docs/async_data_row.rst new file mode 100644 index 000000000000..63bc711434f4 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_row.rst @@ -0,0 +1,6 @@ +Rows and Cells +~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.row + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_row_filters.rst b/packages/google-cloud-bigtable/docs/async_data_row_filters.rst new file mode 100644 index 000000000000..22bda8a26131 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_row_filters.rst @@ -0,0 +1,62 @@ +Bigtable Row Filters +==================== + +It is possible to use a +:class:`RowFilter ` +when constructing a :class:`ReadRowsQuery ` + +The following basic filters +are provided: + +* :class:`SinkFilter <.data.row_filters.SinkFilter>` +* :class:`PassAllFilter <.data.row_filters.PassAllFilter>` +* :class:`BlockAllFilter <.data.row_filters.BlockAllFilter>` +* :class:`RowKeyRegexFilter <.data.row_filters.RowKeyRegexFilter>` +* :class:`RowSampleFilter <.data.row_filters.RowSampleFilter>` +* :class:`FamilyNameRegexFilter <.data.row_filters.FamilyNameRegexFilter>` +* :class:`ColumnQualifierRegexFilter <.data.row_filters.ColumnQualifierRegexFilter>` +* :class:`TimestampRangeFilter <.data.row_filters.TimestampRangeFilter>` +* :class:`ColumnRangeFilter <.data.row_filters.ColumnRangeFilter>` +* :class:`ValueRegexFilter <.data.row_filters.ValueRegexFilter>` +* :class:`ValueRangeFilter <.data.row_filters.ValueRangeFilter>` +* :class:`CellsRowOffsetFilter <.data.row_filters.CellsRowOffsetFilter>` +* :class:`CellsRowLimitFilter <.data.row_filters.CellsRowLimitFilter>` +* :class:`CellsColumnLimitFilter <.data.row_filters.CellsColumnLimitFilter>` +* :class:`StripValueTransformerFilter <.data.row_filters.StripValueTransformerFilter>` +* :class:`ApplyLabelFilter <.data.row_filters.ApplyLabelFilter>` + +In addition, these filters can be combined into composite filters with + +* :class:`RowFilterChain <.data.row_filters.RowFilterChain>` +* :class:`RowFilterUnion <.data.row_filters.RowFilterUnion>` +* :class:`ConditionalRowFilter <.data.row_filters.ConditionalRowFilter>` + +These rules can be nested arbitrarily, with a basic filter at the lowest +level. For example: + +.. code:: python + + # Filter in a specified column (matching any column family). + col1_filter = ColumnQualifierRegexFilter(b'columnbia') + + # Create a filter to label results. + label1 = u'label-red' + label1_filter = ApplyLabelFilter(label1) + + # Combine the filters to label all the cells in columnbia. + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Create a similar filter to label cells blue. + col2_filter = ColumnQualifierRegexFilter(b'columnseeya') + label2 = u'label-blue' + label2_filter = ApplyLabelFilter(label2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + +---- + +.. automodule:: google.cloud.bigtable.data.row_filters + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_usage.rst b/packages/google-cloud-bigtable/docs/async_data_usage.rst new file mode 100644 index 000000000000..c436c5988f47 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_usage.rst @@ -0,0 +1,14 @@ +Using the Async Data Client +=========================== + +.. toctree:: + :maxdepth: 2 + + async_data_client + async_data_mutations_batcher + async_data_read_rows_query + async_data_row + async_data_row_filters + async_data_mutations + async_data_read_modify_write_rules + async_data_exceptions diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index b1c8f0574073..826d8604672a 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -8,6 +8,7 @@ Using the API :maxdepth: 2 usage + async_data_usage API Reference diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/usage.rst index 73a32b03938f..de0abac9c3c8 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/usage.rst @@ -1,5 +1,5 @@ -Using the API -============= +Using the Sync Client +===================== .. toctree:: :maxdepth: 2 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index ed14c618d836..b702cce0d97e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -1150,7 +1150,7 @@ async def check_and_mutate_row( applied to row_key. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if - `true_case_mutations is empty, and at most 100000. + `true_case_mutations` is empty, and at most 100000. - operation_timeout: the time budget for the entire operation, in seconds. Failed requests will not be retried. Defaults to the Table's default_operation_timeout Returns: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py index ecf9cea663e3..13019cbdd57d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py @@ -147,10 +147,12 @@ def __str__(self) -> str: """ Human-readable string representation - { - (family='fam', qualifier=b'col'): [b'value', (+1 more),], - (family='fam', qualifier=b'col2'): [b'other'], - } + .. code-block:: python + + { + (family='fam', qualifier=b'col'): [b'value', (+1 more),], + (family='fam', qualifier=b'col2'): [b'other'], + } """ output = ["{"] for family, qualifier in self._get_column_components(): From eb635031d5b6204598b16d898148944ef49e6648 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 26 Mar 2024 07:15:47 -0400 Subject: [PATCH 783/892] chore(python): update dependencies in /.kokoro (#943) Source-Link: https://github.com/googleapis/synthtool/commit/db94845da69ccdfefd7ce55c84e6cfa74829747e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:a8a80fc6456e433df53fc2a0d72ca0345db0ddefb409f1b75b118dfd1babd952 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../google-cloud-bigtable/.kokoro/build.sh | 7 -- .../.kokoro/docker/docs/Dockerfile | 4 + .../.kokoro/docker/docs/requirements.in | 1 + .../.kokoro/docker/docs/requirements.txt | 38 ++++++ .../.kokoro/requirements.in | 3 +- .../.kokoro/requirements.txt | 114 ++++++++---------- 7 files changed, 99 insertions(+), 72 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in create mode 100644 packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index e4e943e0259a..4bdeef3904e2 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:98f3afd11308259de6e828e37376d18867fd321aba07826e29e4f8d9cab56bad -# created: 2024-02-27T15:56:18.442440378Z + digest: sha256:a8a80fc6456e433df53fc2a0d72ca0345db0ddefb409f1b75b118dfd1babd952 +# created: 2024-03-15T16:25:47.905264637Z diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index dec6b66a7872..b2212fce8f47 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -33,13 +33,6 @@ export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json # Setup project id. export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") -# Remove old nox -python3 -m pip uninstall --yes --quiet nox-automation - -# Install nox -python3 -m pip install --upgrade --quiet nox -python3 -m nox --version - # If this is a continuous build, send the test log to the FlakyBot. # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index 8e39a2cc438d..bdaf39fe22d0 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -80,4 +80,8 @@ RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ # Test pip RUN python3 -m pip +# Install build requirements +COPY requirements.txt /requirements.txt +RUN python3 -m pip install --require-hashes -r requirements.txt + CMD ["python3.8"] diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in new file mode 100644 index 000000000000..816817c672a1 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in @@ -0,0 +1 @@ +nox diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt new file mode 100644 index 000000000000..0e5d70f20f83 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -0,0 +1,38 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --allow-unsafe --generate-hashes requirements.in +# +argcomplete==3.2.3 \ + --hash=sha256:bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23 \ + --hash=sha256:c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c + # via nox +colorlog==6.8.2 \ + --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ + --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 + # via nox +distlib==0.3.8 \ + --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ + --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 + # via virtualenv +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c + # via virtualenv +nox==2024.3.2 \ + --hash=sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be \ + --hash=sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553 + # via -r requirements.in +packaging==24.0 \ + --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ + --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 + # via nox +platformdirs==4.2.0 \ + --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \ + --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768 + # via virtualenv +virtualenv==20.25.1 \ + --hash=sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a \ + --hash=sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197 + # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.in b/packages/google-cloud-bigtable/.kokoro/requirements.in index ec867d9fd65a..fff4d9ce0d0a 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.in +++ b/packages/google-cloud-bigtable/.kokoro/requirements.in @@ -1,5 +1,5 @@ gcp-docuploader -gcp-releasetool>=1.10.5 # required for compatibility with cryptography>=39.x +gcp-releasetool>=2 # required for compatibility with cryptography>=42.x importlib-metadata typing-extensions twine @@ -8,3 +8,4 @@ setuptools nox>=2022.11.21 # required to remove dependency on py charset-normalizer<3 click<8.1.0 +cryptography>=42.0.5 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index bda8e38c4f31..dd61f5f32018 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -93,40 +93,41 @@ colorlog==6.7.0 \ # via # gcp-docuploader # nox -cryptography==42.0.4 \ - --hash=sha256:01911714117642a3f1792c7f376db572aadadbafcd8d75bb527166009c9f1d1b \ - --hash=sha256:0e89f7b84f421c56e7ff69f11c441ebda73b8a8e6488d322ef71746224c20fce \ - --hash=sha256:12d341bd42cdb7d4937b0cabbdf2a94f949413ac4504904d0cdbdce4a22cbf88 \ - --hash=sha256:15a1fb843c48b4a604663fa30af60818cd28f895572386e5f9b8a665874c26e7 \ - --hash=sha256:1cdcdbd117681c88d717437ada72bdd5be9de117f96e3f4d50dab3f59fd9ab20 \ - --hash=sha256:1df6fcbf60560d2113b5ed90f072dc0b108d64750d4cbd46a21ec882c7aefce9 \ - --hash=sha256:3c6048f217533d89f2f8f4f0fe3044bf0b2090453b7b73d0b77db47b80af8dff \ - --hash=sha256:3e970a2119507d0b104f0a8e281521ad28fc26f2820687b3436b8c9a5fcf20d1 \ - --hash=sha256:44a64043f743485925d3bcac548d05df0f9bb445c5fcca6681889c7c3ab12764 \ - --hash=sha256:4e36685cb634af55e0677d435d425043967ac2f3790ec652b2b88ad03b85c27b \ - --hash=sha256:5f8907fcf57392cd917892ae83708761c6ff3c37a8e835d7246ff0ad251d9298 \ - --hash=sha256:69b22ab6506a3fe483d67d1ed878e1602bdd5912a134e6202c1ec672233241c1 \ - --hash=sha256:6bfadd884e7280df24d26f2186e4e07556a05d37393b0f220a840b083dc6a824 \ - --hash=sha256:6d0fbe73728c44ca3a241eff9aefe6496ab2656d6e7a4ea2459865f2e8613257 \ - --hash=sha256:6ffb03d419edcab93b4b19c22ee80c007fb2d708429cecebf1dd3258956a563a \ - --hash=sha256:810bcf151caefc03e51a3d61e53335cd5c7316c0a105cc695f0959f2c638b129 \ - --hash=sha256:831a4b37accef30cccd34fcb916a5d7b5be3cbbe27268a02832c3e450aea39cb \ - --hash=sha256:887623fe0d70f48ab3f5e4dbf234986b1329a64c066d719432d0698522749929 \ - --hash=sha256:a0298bdc6e98ca21382afe914c642620370ce0470a01e1bef6dd9b5354c36854 \ - --hash=sha256:a1327f280c824ff7885bdeef8578f74690e9079267c1c8bd7dc5cc5aa065ae52 \ - --hash=sha256:c1f25b252d2c87088abc8bbc4f1ecbf7c919e05508a7e8628e6875c40bc70923 \ - --hash=sha256:c3a5cbc620e1e17009f30dd34cb0d85c987afd21c41a74352d1719be33380885 \ - --hash=sha256:ce8613beaffc7c14f091497346ef117c1798c202b01153a8cc7b8e2ebaaf41c0 \ - --hash=sha256:d2a27aca5597c8a71abbe10209184e1a8e91c1fd470b5070a2ea60cafec35bcd \ - --hash=sha256:dad9c385ba8ee025bb0d856714f71d7840020fe176ae0229de618f14dae7a6e2 \ - --hash=sha256:db4b65b02f59035037fde0998974d84244a64c3265bdef32a827ab9b63d61b18 \ - --hash=sha256:e09469a2cec88fb7b078e16d4adec594414397e8879a4341c6ace96013463d5b \ - --hash=sha256:e53dc41cda40b248ebc40b83b31516487f7db95ab8ceac1f042626bc43a2f992 \ - --hash=sha256:f1e85a178384bf19e36779d91ff35c7617c885da487d689b05c1366f9933ad74 \ - --hash=sha256:f47be41843200f7faec0683ad751e5ef11b9a56a220d57f300376cd8aba81660 \ - --hash=sha256:fb0cef872d8193e487fc6bdb08559c3aa41b659a7d9be48b2e10747f47863925 \ - --hash=sha256:ffc73996c4fca3d2b6c1c8c12bfd3ad00def8621da24f547626bf06441400449 +cryptography==42.0.5 \ + --hash=sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee \ + --hash=sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576 \ + --hash=sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d \ + --hash=sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30 \ + --hash=sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413 \ + --hash=sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb \ + --hash=sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da \ + --hash=sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4 \ + --hash=sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd \ + --hash=sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc \ + --hash=sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8 \ + --hash=sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1 \ + --hash=sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc \ + --hash=sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e \ + --hash=sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8 \ + --hash=sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940 \ + --hash=sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400 \ + --hash=sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7 \ + --hash=sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16 \ + --hash=sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278 \ + --hash=sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74 \ + --hash=sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec \ + --hash=sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1 \ + --hash=sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2 \ + --hash=sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c \ + --hash=sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922 \ + --hash=sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a \ + --hash=sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6 \ + --hash=sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1 \ + --hash=sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e \ + --hash=sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac \ + --hash=sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7 # via + # -r requirements.in # gcp-releasetool # secretstorage distlib==0.3.7 \ @@ -145,9 +146,9 @@ gcp-docuploader==0.6.5 \ --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea # via -r requirements.in -gcp-releasetool==1.16.0 \ - --hash=sha256:27bf19d2e87aaa884096ff941aa3c592c482be3d6a2bfe6f06afafa6af2353e3 \ - --hash=sha256:a316b197a543fd036209d0caba7a8eb4d236d8e65381c80cbc6d7efaa7606d63 +gcp-releasetool==2.0.0 \ + --hash=sha256:3d73480b50ba243f22d7c7ec08b115a30e1c7817c4899781840c26f9c55b8277 \ + --hash=sha256:7aa9fd935ec61e581eb8458ad00823786d91756c25e492f372b2b30962f3c28f # via -r requirements.in google-api-core==2.12.0 \ --hash=sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553 \ @@ -392,29 +393,18 @@ platformdirs==3.11.0 \ --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e # via virtualenv -protobuf==3.20.3 \ - --hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \ - --hash=sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c \ - --hash=sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2 \ - --hash=sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b \ - --hash=sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050 \ - --hash=sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9 \ - --hash=sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7 \ - --hash=sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454 \ - --hash=sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480 \ - --hash=sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469 \ - --hash=sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c \ - --hash=sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e \ - --hash=sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db \ - --hash=sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905 \ - --hash=sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b \ - --hash=sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86 \ - --hash=sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4 \ - --hash=sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402 \ - --hash=sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7 \ - --hash=sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4 \ - --hash=sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99 \ - --hash=sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee +protobuf==4.25.3 \ + --hash=sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4 \ + --hash=sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8 \ + --hash=sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c \ + --hash=sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d \ + --hash=sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4 \ + --hash=sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa \ + --hash=sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c \ + --hash=sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019 \ + --hash=sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9 \ + --hash=sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c \ + --hash=sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2 # via # gcp-docuploader # gcp-releasetool @@ -518,7 +508,7 @@ zipp==3.17.0 \ # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==68.2.2 \ - --hash=sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87 \ - --hash=sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a +setuptools==69.2.0 \ + --hash=sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e \ + --hash=sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c # via -r requirements.in From 2ca3b78496c72bce73a9c6a2af7a291de9b1d403 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 28 Mar 2024 11:28:56 -0700 Subject: [PATCH 784/892] chore(docs): remove preview warning for async data client (#945) * remove preview from READMEs * removed preview warning from docstrings * added note to data_api page --- packages/google-cloud-bigtable/README.rst | 6 ++-- .../google-cloud-bigtable/docs/data-api.rst | 7 ++++ .../google/cloud/bigtable/data/README.rst | 6 ++-- .../cloud/bigtable/data/_async/client.py | 36 ------------------- 4 files changed, 11 insertions(+), 44 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 2bc151e957cd..69856e05bb12 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -21,18 +21,16 @@ Analytics, Maps, and Gmail. .. _Product Documentation: https://cloud.google.com/bigtable/docs -Preview Async Data Client +Async Data Client ------------------------- -:code:`v2.23.0` includes a preview release of the new :code:`BigtableDataClientAsync` client, accessible at the import path +:code:`v2.23.0` includes a release of the new :code:`BigtableDataClientAsync` client, accessible at the import path :code:`google.cloud.bigtable.data`. The new client brings a simplified API and increased performance using asyncio, with a corresponding synchronous surface coming soon. The new client is focused on the data API (i.e. reading and writing Bigtable data), with admin operations remaining in the existing client. -:code:`BigtableDataClientAsync` is currently in preview, and is not recommended for production use. - Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, or through the Github `issue tracker`_. diff --git a/packages/google-cloud-bigtable/docs/data-api.rst b/packages/google-cloud-bigtable/docs/data-api.rst index 01a49178fd4d..9b50e9ec9a8e 100644 --- a/packages/google-cloud-bigtable/docs/data-api.rst +++ b/packages/google-cloud-bigtable/docs/data-api.rst @@ -1,6 +1,13 @@ Data API ======== +.. note:: + This page describes how to use the Data API with the synchronous Bigtable client. + Examples for using the Data API with the async client can be found in the + `Getting Started Guide`_. + +.. _Getting Started Guide: https://cloud.google.com/bigtable/docs/samples-python-hello + After creating a :class:`Table ` and some column families, you are ready to store and retrieve data. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst b/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst index 7a05cf913f65..8142cc34d9c8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/README.rst @@ -1,7 +1,5 @@ -Async Data Client Preview -========================= - -This new client is currently in preview, and is not recommended for production use. +Async Data Client +================= Synchronous API surface and usage examples coming soon diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index b702cce0d97e..e385ecde783a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -101,9 +101,6 @@ def __init__( Client should be created within an async context (running event loop) - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: project: the project which the client acts on behalf of. If not passed, falls back to the default inferred @@ -566,9 +563,6 @@ async def read_rows_stream( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - query: contains details about which rows to return - operation_timeout: the time budget for the entire operation, in seconds. @@ -620,9 +614,6 @@ async def read_rows( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - query: contains details about which rows to return - operation_timeout: the time budget for the entire operation, in seconds. @@ -669,9 +660,6 @@ async def read_row( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - query: contains details about which rows to return - operation_timeout: the time budget for the entire operation, in seconds. @@ -727,9 +715,6 @@ async def read_rows_sharded( results = await table.read_rows_sharded(shard_queries) ``` - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - sharded_query: a sharded query to execute - operation_timeout: the time budget for the entire operation, in seconds. @@ -810,9 +795,6 @@ async def row_exists( Return a boolean indicating whether the specified row exists in the table. uses the filters: chain(limit cells per row = 1, strip value) - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - row_key: the key of the row to check - operation_timeout: the time budget for the entire operation, in seconds. @@ -867,9 +849,6 @@ async def sample_row_keys( RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of row_keys, along with offset positions in the table - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget.i @@ -942,9 +921,6 @@ def mutations_batcher( Can be used to iteratively add mutations that are flushed as a group, to avoid excess network calls - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - flush_interval: Automatically flush every flush_interval seconds. If None, a table default will be used @@ -994,9 +970,6 @@ async def mutate_row( Idempotent operations (i.e, all mutations have an explicit timestamp) will be retried on server failure. Non-idempotent operations will not. - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - row_key: the row to apply mutations to - mutations: the set of mutations to apply to the row @@ -1077,9 +1050,6 @@ async def bulk_mutate_rows( will be retried on failure. Non-idempotent will not, and will reported in a raised exception group - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - mutation_entries: the batches of mutations to apply Each entry will be applied atomically, but entries will be applied @@ -1128,9 +1098,6 @@ async def check_and_mutate_row( Non-idempotent operation: will not be retried - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - row_key: the key of the row to mutate - predicate: the filter to be applied to the contents of the specified row. @@ -1199,9 +1166,6 @@ async def read_modify_write_row( Non-idempotent operation: will not be retried - Warning: BigtableDataClientAsync is currently in preview, and is not - yet recommended for production use. - Args: - row_key: the key of the row to apply read/modify/write rules to - rules: A rule or set of rules to apply to the row. From 424398e08f993c8c7c360081b9b03712b29d9c53 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 11 Apr 2024 11:58:47 -0700 Subject: [PATCH 785/892] fix: use insecure grpc channel with emulator (#946) --- .../google/cloud/bigtable/client.py | 52 +++--------------- .../tests/unit/v2_client/test_client.py | 54 ++----------------- 2 files changed, 12 insertions(+), 94 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index c82a268c63fa..0c89ea562097 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -32,7 +32,6 @@ import grpc # type: ignore from google.api_core.gapic_v1 import client_info as client_info_lib -import google.auth # type: ignore from google.auth.credentials import AnonymousCredentials # type: ignore from google.cloud import bigtable_v2 @@ -215,58 +214,21 @@ def _get_scopes(self): return scopes def _emulator_channel(self, transport, options): - """Create a channel using self._credentials + """Create a channel for use with the Bigtable emulator. - Works in a similar way to ``grpc.secure_channel`` but using - ``grpc.local_channel_credentials`` rather than - ``grpc.ssh_channel_credentials`` to allow easy connection to a - local emulator. + Insecure channels are used for the emulator as secure channels + cannot be used to communicate on some environments. + https://github.com/googleapis/python-firestore/issues/359 Returns: grpc.Channel or grpc.aio.Channel """ - # TODO: Implement a special credentials type for emulator and use - # "transport.create_channel" to create gRPC channels once google-auth - # extends it's allowed credentials types. # Note: this code also exists in the firestore client. if "GrpcAsyncIOTransport" in str(transport.__name__): - return grpc.aio.secure_channel( - self._emulator_host, - self._local_composite_credentials(), - options=options, - ) + channel_fn = grpc.aio.insecure_channel else: - return grpc.secure_channel( - self._emulator_host, - self._local_composite_credentials(), - options=options, - ) - - def _local_composite_credentials(self): - """Create credentials for the local emulator channel. - - :return: grpc.ChannelCredentials - """ - credentials = google.auth.credentials.with_scopes_if_required( - self._credentials, None - ) - request = google.auth.transport.requests.Request() - - # Create the metadata plugin for inserting the authorization header. - metadata_plugin = google.auth.transport.grpc.AuthMetadataPlugin( - credentials, request - ) - - # Create a set of grpc.CallCredentials using the metadata plugin. - google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin) - - # Using the local_credentials to allow connection to emulator - local_credentials = grpc.local_channel_credentials() - - # Combine the local credentials and the authorization credentials. - return grpc.composite_channel_credentials( - local_credentials, google_auth_credentials - ) + channel_fn = grpc.insecure_channel + return channel_fn(self._emulator_host, options=options) def _create_gapic_client_channel(self, client_class, grpc_transport): if self._emulator_host is not None: diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py index 5944c58a3701..b6eb6ac96162 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py @@ -176,7 +176,7 @@ def test_client_constructor_w_emulator_host(): emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: + with mock.patch("grpc.insecure_channel") as factory: client = _make_client() # don't test local_composite_credentials # client._local_composite_credentials = lambda: credentials @@ -188,7 +188,6 @@ def test_client_constructor_w_emulator_host(): assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT factory.assert_called_once_with( emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below options=_GRPC_CHANNEL_OPTIONS, ) @@ -199,7 +198,7 @@ def test_client_constructor_w_emulator_host_w_project(): emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: + with mock.patch("grpc.insecure_channel") as factory: client = _make_client(project=PROJECT) # channels are formed when needed, so access a client # create a gapic channel @@ -209,7 +208,6 @@ def test_client_constructor_w_emulator_host_w_project(): assert client.project == PROJECT factory.assert_called_once_with( emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below options=_GRPC_CHANNEL_OPTIONS, ) @@ -222,7 +220,7 @@ def test_client_constructor_w_emulator_host_w_credentials(): emulator_host = "localhost:8081" credentials = _make_credentials() with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.secure_channel") as factory: + with mock.patch("grpc.insecure_channel") as factory: client = _make_client(credentials=credentials) # channels are formed when needed, so access a client # create a gapic channel @@ -232,7 +230,6 @@ def test_client_constructor_w_emulator_host_w_credentials(): assert client.project == _DEFAULT_BIGTABLE_EMULATOR_CLIENT factory.assert_called_once_with( emulator_host, - mock.ANY, # test of creds wrapping in '_emulator_host' below options=_GRPC_CHANNEL_OPTIONS, ) @@ -271,15 +268,13 @@ def test_client__emulator_channel_w_sync(): project=PROJECT, credentials=_make_credentials(), read_only=True ) client._emulator_host = emulator_host - lcc = client._local_composite_credentials = mock.Mock(spec=[]) - with mock.patch("grpc.secure_channel") as patched: + with mock.patch("grpc.insecure_channel") as patched: channel = client._emulator_channel(transport, options) assert channel is patched.return_value patched.assert_called_once_with( emulator_host, - lcc.return_value, options=options, ) @@ -293,56 +288,17 @@ def test_client__emulator_channel_w_async(): project=PROJECT, credentials=_make_credentials(), read_only=True ) client._emulator_host = emulator_host - lcc = client._local_composite_credentials = mock.Mock(spec=[]) - with mock.patch("grpc.aio.secure_channel") as patched: + with mock.patch("grpc.aio.insecure_channel") as patched: channel = client._emulator_channel(transport, options) assert channel is patched.return_value patched.assert_called_once_with( emulator_host, - lcc.return_value, options=options, ) -def test_client__local_composite_credentials(): - client = _make_client( - project=PROJECT, credentials=_make_credentials(), read_only=True - ) - - wsir_patch = mock.patch("google.auth.credentials.with_scopes_if_required") - request_patch = mock.patch("google.auth.transport.requests.Request") - amp_patch = mock.patch("google.auth.transport.grpc.AuthMetadataPlugin") - grpc_patches = mock.patch.multiple( - "grpc", - metadata_call_credentials=mock.DEFAULT, - local_channel_credentials=mock.DEFAULT, - composite_channel_credentials=mock.DEFAULT, - ) - with wsir_patch as wsir_patched: - with request_patch as request_patched: - with amp_patch as amp_patched: - with grpc_patches as grpc_patched: - credentials = client._local_composite_credentials() - - grpc_mcc = grpc_patched["metadata_call_credentials"] - grpc_lcc = grpc_patched["local_channel_credentials"] - grpc_ccc = grpc_patched["composite_channel_credentials"] - - assert credentials is grpc_ccc.return_value - - wsir_patched.assert_called_once_with(client._credentials, None) - request_patched.assert_called_once_with() - amp_patched.assert_called_once_with( - wsir_patched.return_value, - request_patched.return_value, - ) - grpc_mcc.assert_called_once_with(amp_patched.return_value) - grpc_lcc.assert_called_once_with() - grpc_ccc.assert_called_once_with(grpc_lcc.return_value, grpc_mcc.return_value) - - def _create_gapic_client_channel_helper(endpoint=None, emulator_host=None): from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS From 596509061e376f0fde7fc661fbffbbd8fedf4388 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 12 Apr 2024 17:38:46 -0400 Subject: [PATCH 786/892] chore(python): bump idna from 3.4 to 3.7 in .kokoro (#954) * chore(python): bump idna from 3.4 to 3.7 in .kokoro Source-Link: https://github.com/googleapis/synthtool/commit/d50980e704793a2d3310bfb3664f3a82f24b5796 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:5a4c19d17e597b92d786e569be101e636c9c2817731f80a5adec56b2aa8fe070 * Apply changes from googleapis/synthtool#1950 --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/.OwlBot.lock.yaml | 4 ++-- .../.github/auto-label.yaml | 5 +++++ .../.github/blunderbuss.yml | 20 +++++++++++++++++ .../.kokoro/requirements.txt | 6 ++--- packages/google-cloud-bigtable/docs/index.rst | 5 +++++ .../docs/summary_overview.md | 22 +++++++++++++++++++ .../samples/hello/noxfile.py | 15 ++++++------- .../samples/snippets/data_client/noxfile.py | 17 +++++++------- .../samples/snippets/deletes/noxfile.py | 15 ++++++------- .../samples/snippets/filters/noxfile.py | 15 ++++++------- .../samples/snippets/reads/noxfile.py | 15 ++++++------- 11 files changed, 93 insertions(+), 46 deletions(-) create mode 100644 packages/google-cloud-bigtable/.github/blunderbuss.yml create mode 100644 packages/google-cloud-bigtable/docs/summary_overview.md diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 4bdeef3904e2..81f87c56917d 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:a8a80fc6456e433df53fc2a0d72ca0345db0ddefb409f1b75b118dfd1babd952 -# created: 2024-03-15T16:25:47.905264637Z + digest: sha256:5a4c19d17e597b92d786e569be101e636c9c2817731f80a5adec56b2aa8fe070 +# created: 2024-04-12T11:35:58.922854369Z diff --git a/packages/google-cloud-bigtable/.github/auto-label.yaml b/packages/google-cloud-bigtable/.github/auto-label.yaml index b2016d119b40..8b37ee89711f 100644 --- a/packages/google-cloud-bigtable/.github/auto-label.yaml +++ b/packages/google-cloud-bigtable/.github/auto-label.yaml @@ -13,3 +13,8 @@ # limitations under the License. requestsize: enabled: true + +path: + pullrequest: true + paths: + samples: "samples" diff --git a/packages/google-cloud-bigtable/.github/blunderbuss.yml b/packages/google-cloud-bigtable/.github/blunderbuss.yml new file mode 100644 index 000000000000..1e27e789aaa0 --- /dev/null +++ b/packages/google-cloud-bigtable/.github/blunderbuss.yml @@ -0,0 +1,20 @@ +# Blunderbuss config +# +# This file controls who is assigned for pull requests and issues. +# Note: This file is autogenerated. To make changes to the assignee +# team, please update `codeowner_team` in `.repo-metadata.json`. +assign_issues: + - googleapis/api-bigtable + - googleapis/api-bigtable-partners + +assign_issues_by: + - labels: + - "samples" + to: + - googleapis/python-samples-reviewers + - googleapis/api-bigtable + - googleapis/api-bigtable-partners + +assign_prs: + - googleapis/api-bigtable + - googleapis/api-bigtable-partners diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index dd61f5f32018..51f92b8e12f1 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -252,9 +252,9 @@ googleapis-common-protos==1.61.0 \ --hash=sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0 \ --hash=sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b # via google-api-core -idna==3.4 \ - --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ - --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 +idna==3.7 \ + --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ + --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 # via requests importlib-metadata==6.8.0 \ --hash=sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb \ diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 826d8604672a..0f04542cc57c 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -30,3 +30,8 @@ For a list of all ``google-cloud-datastore`` releases: :maxdepth: 2 changelog + +.. toctree:: + :hidden: + + summary_overview.md diff --git a/packages/google-cloud-bigtable/docs/summary_overview.md b/packages/google-cloud-bigtable/docs/summary_overview.md new file mode 100644 index 000000000000..2379e8b6bc1f --- /dev/null +++ b/packages/google-cloud-bigtable/docs/summary_overview.md @@ -0,0 +1,22 @@ +[ +This is a templated file. Adding content to this file may result in it being +reverted. Instead, if you want to place additional content, create an +"overview_content.md" file in `docs/` directory. The Sphinx tool will +pick up on the content and merge the content. +]: # + +# Cloud Bigtable API + +Overview of the APIs available for Cloud Bigtable API. + +## All entries + +Classes, methods and properties & attributes for +Cloud Bigtable API. + +[classes](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_class.html) + +[methods](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_method.html) + +[properties and +attributes](https://cloud.google.com/python/docs/reference/bigtable/latest/summary_property.html) diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 3b7135946fd5..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -160,7 +160,6 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # - @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -188,9 +187,7 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -212,7 +209,9 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -225,9 +224,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) + concurrent_args.extend(['-n', 'auto']) session.run( "pytest", @@ -257,7 +256,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" + """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py index 6967925a838a..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py @@ -1,4 +1,4 @@ -# Copyright 2024 Google LLC +# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -160,7 +160,6 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # - @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -188,9 +187,7 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -212,7 +209,9 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -225,9 +224,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) + concurrent_args.extend(['-n', 'auto']) session.run( "pytest", @@ -257,7 +256,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" + """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index 3b7135946fd5..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -160,7 +160,6 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # - @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -188,9 +187,7 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -212,7 +209,9 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -225,9 +224,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) + concurrent_args.extend(['-n', 'auto']) session.run( "pytest", @@ -257,7 +256,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" + """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 3b7135946fd5..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -160,7 +160,6 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # - @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -188,9 +187,7 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -212,7 +209,9 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -225,9 +224,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) + concurrent_args.extend(['-n', 'auto']) session.run( "pytest", @@ -257,7 +256,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" + """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 3b7135946fd5..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -160,7 +160,6 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # - @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -188,9 +187,7 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -212,7 +209,9 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -225,9 +224,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) + concurrent_args.extend(['-n', 'auto']) session.run( "pytest", @@ -257,7 +256,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" + """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): From ccd878165afd1dad419de3cde69176e2d0fee63f Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 15 Apr 2024 10:30:03 -0700 Subject: [PATCH 787/892] chore: fix prerelease_deps (#955) --- .../transports/rest.py | 64 +++---------- .../bigtable_table_admin/transports/rest.py | 91 ++++--------------- .../services/bigtable/transports/rest.py | 43 ++------- .../test_bigtable_instance_admin.py | 20 ---- .../test_bigtable_table_admin.py | 25 ----- .../unit/gapic/bigtable_v2/test_bigtable.py | 9 -- 6 files changed, 37 insertions(+), 215 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 61f425953168..879702e864fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -905,7 +905,6 @@ def __call__( body = json_format.MessageToJson( transcoded_request["body"], - including_default_value_fields=False, use_integers_for_enums=True, ) uri = transcoded_request["uri"] @@ -915,7 +914,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1005,9 +1003,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1016,7 +1012,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1102,9 +1097,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1113,7 +1106,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1199,7 +1191,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1274,7 +1265,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1349,7 +1339,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1431,7 +1420,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1522,7 +1510,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1680,9 +1667,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1691,7 +1676,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1785,7 +1769,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1876,7 +1859,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1965,7 +1947,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2056,7 +2037,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2145,7 +2125,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2236,9 +2215,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2247,7 +2224,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2339,9 +2315,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2350,7 +2324,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2507,9 +2480,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2518,7 +2489,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2604,9 +2574,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2615,7 +2583,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2707,9 +2674,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2718,7 +2683,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2796,9 +2760,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2807,7 +2769,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2899,9 +2860,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2910,7 +2869,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index ad171d8f361f..49bc756e177d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -998,9 +998,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1009,7 +1007,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1097,9 +1094,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1108,7 +1103,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1196,9 +1190,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1207,7 +1199,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1294,9 +1285,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1305,7 +1294,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1403,9 +1391,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1414,7 +1400,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1496,7 +1481,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1578,7 +1562,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1653,7 +1636,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1725,9 +1707,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1736,7 +1716,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1820,9 +1799,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1831,7 +1808,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1919,7 +1895,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2082,9 +2057,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2093,7 +2066,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2201,7 +2173,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2292,7 +2263,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2381,7 +2351,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2484,7 +2453,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2573,7 +2541,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2663,9 +2630,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2674,7 +2639,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2762,9 +2726,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2773,7 +2735,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -2935,9 +2896,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -2946,7 +2905,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -3041,9 +2999,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3052,7 +3008,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -3141,9 +3096,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3152,7 +3105,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -3240,9 +3192,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3251,7 +3201,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -3336,9 +3285,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3347,7 +3294,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -3437,9 +3383,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -3448,7 +3392,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 17b47cb1cde7..d77291a650ea 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -500,9 +500,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -511,7 +509,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -610,9 +607,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -621,7 +616,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -709,9 +703,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -720,7 +712,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -807,9 +798,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -818,7 +807,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -905,9 +893,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -916,7 +902,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1007,9 +992,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1018,7 +1001,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1106,9 +1088,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1117,7 +1097,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1204,9 +1183,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - including_default_value_fields=False, - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1215,7 +1192,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) @@ -1302,7 +1278,6 @@ def __call__( query_params = json.loads( json_format.MessageToJson( transcoded_request["query_params"], - including_default_value_fields=False, use_integers_for_enums=True, ) ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 7a24cab5422c..10e9d101b736 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -6628,7 +6628,6 @@ def test_create_instance_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -6922,7 +6921,6 @@ def test_get_instance_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -7193,7 +7191,6 @@ def test_list_instances_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -7474,7 +7471,6 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -7757,7 +7753,6 @@ def test_partial_update_instance_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -8029,7 +8024,6 @@ def test_delete_instance_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -8368,7 +8362,6 @@ def test_create_cluster_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -8671,7 +8664,6 @@ def test_get_cluster_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -8943,7 +8935,6 @@ def test_list_clusters_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -9424,7 +9415,6 @@ def test_partial_update_cluster_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -9701,7 +9691,6 @@ def test_delete_cluster_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -10047,7 +10036,6 @@ def test_create_app_profile_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -10360,7 +10348,6 @@ def test_get_app_profile_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -10633,7 +10620,6 @@ def test_list_app_profiles_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -11061,7 +11047,6 @@ def test_update_app_profile_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -11353,7 +11338,6 @@ def test_delete_app_profile_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -11636,7 +11620,6 @@ def test_get_iam_policy_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -11900,7 +11883,6 @@ def test_set_iam_policy_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -12171,7 +12153,6 @@ def test_test_iam_permissions_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -12450,7 +12431,6 @@ def test_list_hot_tablets_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index b52ad06065a4..67f02f9ce032 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -7740,7 +7740,6 @@ def test_create_table_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -8022,7 +8021,6 @@ def test_create_table_from_snapshot_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -8311,7 +8309,6 @@ def test_list_tables_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -8659,7 +8656,6 @@ def test_get_table_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -9010,7 +9006,6 @@ def test_update_table_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -9286,7 +9281,6 @@ def test_delete_table_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -9538,7 +9532,6 @@ def test_undelete_table_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -9811,7 +9804,6 @@ def test_modify_column_families_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -10095,7 +10087,6 @@ def test_drop_row_range_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -10301,7 +10292,6 @@ def test_generate_consistency_token_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -10581,7 +10571,6 @@ def test_check_consistency_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -10867,7 +10856,6 @@ def test_snapshot_table_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -11165,7 +11153,6 @@ def test_get_snapshot_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -11438,7 +11425,6 @@ def test_list_snapshots_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -11782,7 +11768,6 @@ def test_delete_snapshot_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -12131,7 +12116,6 @@ def test_create_backup_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -12439,7 +12423,6 @@ def test_get_backup_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -12814,7 +12797,6 @@ def test_update_backup_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -13097,7 +13079,6 @@ def test_delete_backup_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -13359,7 +13340,6 @@ def test_list_backups_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -13706,7 +13686,6 @@ def test_restore_table_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -13928,7 +13907,6 @@ def test_copy_backup_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -14221,7 +14199,6 @@ def test_get_iam_policy_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -14487,7 +14464,6 @@ def test_set_iam_policy_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -14760,7 +14736,6 @@ def test_test_iam_permissions_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index ab05af42621a..105f9e49ef1c 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -3428,7 +3428,6 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -3716,7 +3715,6 @@ def test_sample_row_keys_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -3992,7 +3990,6 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -4290,7 +4287,6 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -4580,7 +4576,6 @@ def test_check_and_mutate_row_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -4904,7 +4899,6 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -5170,7 +5164,6 @@ def test_read_modify_write_row_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -5466,7 +5459,6 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) @@ -5770,7 +5762,6 @@ def test_read_change_stream_rest_required_fields( jsonified_request = json.loads( json_format.MessageToJson( pb_request, - including_default_value_fields=False, use_integers_for_enums=False, ) ) From d74140848da6782b0313f0d2f812640d7106a30c Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 15 Apr 2024 13:53:42 -0400 Subject: [PATCH 788/892] chore(main): release 2.23.1 (#948) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index b94f3df9f05d..ab46db83efc2 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.23.0" + ".": "2.23.1" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index ea8a8525da5c..0731c14a3eb8 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.23.1](https://github.com/googleapis/python-bigtable/compare/v2.23.0...v2.23.1) (2024-04-15) + + +### Bug Fixes + +* Use insecure grpc channel with emulator ([#946](https://github.com/googleapis/python-bigtable/issues/946)) ([aa31706](https://github.com/googleapis/python-bigtable/commit/aa3170663f9bd09d70c99d4e76c07f7f293ad935)) + ## [2.23.0](https://github.com/googleapis/python-bigtable/compare/v2.22.0...v2.23.0) (2024-02-07) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index f01e1d3a583c..008f4dd36be8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.0" # {x-release-please-version} +__version__ = "2.23.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index f01e1d3a583c..008f4dd36be8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.0" # {x-release-please-version} +__version__ = "2.23.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index f01e1d3a583c..008f4dd36be8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.0" # {x-release-please-version} +__version__ = "2.23.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index f01e1d3a583c..008f4dd36be8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.0" # {x-release-please-version} +__version__ = "2.23.1" # {x-release-please-version} From d31e366be8b78a64c026839d012ae98ddc294abb Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 10 May 2024 13:14:01 -0700 Subject: [PATCH 789/892] chore: Update gapic-generator-python to v1.17.1 (#936) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: extend timeouts for deleting snapshots, backups and tables PiperOrigin-RevId: 605388988 Source-Link: https://github.com/googleapis/googleapis/commit/fbcfef09510b842774530989889ed1584a8b5acb Source-Link: https://github.com/googleapis/googleapis-gen/commit/716b6e6a6a0e8c87a48a86e31272a2826f2df38c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNzE2YjZlNmE2YTBlOGM4N2E0OGE4NmUzMTI3MmEyODI2ZjJkZjM4YyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix(deps): Require `google-api-core>=1.34.1` fix: Resolve issue with missing import for certain enums in `**/types/…` PiperOrigin-RevId: 607041732 Source-Link: https://github.com/googleapis/googleapis/commit/b4532678459355676c95c00e39866776b7f40b2e Source-Link: https://github.com/googleapis/googleapis-gen/commit/cd796416f0f54cb22b2c44fb2d486960e693a346 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2Q3OTY0MTZmMGY1NGNiMjJiMmM0NGZiMmQ0ODY5NjBlNjkzYTM0NiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix(deps): Exclude google-auth 2.24.0 and 2.25.0 chore: Update gapic-generator-python to v1.14.4 PiperOrigin-RevId: 611561820 Source-Link: https://github.com/googleapis/googleapis/commit/87ef1fe57feede1f23b523f3c7fc4c3f2b92d6d2 Source-Link: https://github.com/googleapis/googleapis-gen/commit/197316137594aafad94dea31226528fbcc39310c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTk3MzE2MTM3NTk0YWFmYWQ5NGRlYTMxMjI2NTI4ZmJjYzM5MzEwYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Cloud Bigtable Authorized Views admin APIs protos PiperOrigin-RevId: 612537460 Source-Link: https://github.com/googleapis/googleapis/commit/b98fe7ff808454e9d11a83946f40259ea9c6a63b Source-Link: https://github.com/googleapis/googleapis-gen/commit/03d9b5c5517cf9123f120461180ebdd387a47bcc Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDNkOWI1YzU1MTdjZjkxMjNmMTIwNDYxMTgwZWJkZDM4N2E0N2JjYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Add authorized view bindings to Cloud Bigtable data APIs and messages docs: The field `table_name` in message `.google.bigtable.v2.ReadRowsRequest` is changed from required to optional docs: The field `table_name` in message `.google.bigtable.v2.SampleRowKeysRequest` is changed from required to optional docs: The field `table_name` in message `.google.bigtable.v2.MutateRowRequest` is changed from required to optional docs: The field `table_name` in message `.google.bigtable.v2.MutateRowsRequest` is changed from required to optional docs: The field `table_name` in message `.google.bigtable.v2.CheckAndMutateRowRequest` is changed from required to optional docs: The field `table_name` in message `.google.bigtable.v2.ReadModifyWriteRowRequest` is changed from required to optional PiperOrigin-RevId: 612537984 Source-Link: https://github.com/googleapis/googleapis/commit/6465963c92930626473457717ff697aeb1bf4a12 Source-Link: https://github.com/googleapis/googleapis-gen/commit/f4a996071801f559bb6f4d0c99bb9a3c0ecf4844 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjRhOTk2MDcxODAxZjU1OWJiNmY0ZDBjOTliYjlhM2MwZWNmNDg0NCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Add include_recaptcha_script for as a new action in firewall policies PiperOrigin-RevId: 612851792 Source-Link: https://github.com/googleapis/googleapis/commit/49ea2c0fc42dd48996b833f05a258ad7e8590d3d Source-Link: https://github.com/googleapis/googleapis-gen/commit/460fdcbbbe00f35b1c591b1f3ef0c77ebd3ce277 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDYwZmRjYmJiZTAwZjM1YjFjNTkxYjFmM2VmMGM3N2ViZDNjZTI3NyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Publish new bigtable APIs for types and aggregates Bigtable aggregates will allow users to configure column families whose cells accumulate values via an aggregation function rather than simply overwrite them PiperOrigin-RevId: 613716423 Source-Link: https://github.com/googleapis/googleapis/commit/66fc31d257cabb2d4462ce3149da9e3a232b3ad1 Source-Link: https://github.com/googleapis/googleapis-gen/commit/b983c8f87e6643d9a74d7b8183d66349943b436e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjk4M2M4Zjg3ZTY2NDNkOWE3NGQ3YjgxODNkNjYzNDk5NDNiNDM2ZSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * added logic to keep pooled transport to owlbot.py * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fixed insert paths * fixed bad format * fixed bad format * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * added escape to last insert * fixed indentation * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add Data Boost configurations to admin API PiperOrigin-RevId: 617925342 Source-Link: https://github.com/googleapis/googleapis/commit/6f289d775912966eb0cf04bda91e5e355c998d30 Source-Link: https://github.com/googleapis/googleapis-gen/commit/92da6d5d435af533f726a97bcfff3c717832c877 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTJkYTZkNWQ0MzVhZjUzM2Y3MjZhOTdiY2ZmZjNjNzE3ODMyYzg3NyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.16.1 PiperOrigin-RevId: 618243632 Source-Link: https://github.com/googleapis/googleapis/commit/078a38bd240827be8e69a5b62993380d1b047994 Source-Link: https://github.com/googleapis/googleapis-gen/commit/7af768c3f8ce58994482350f7401173329950a31 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiN2FmNzY4YzNmOGNlNTg5OTQ0ODIzNTBmNzQwMTE3MzMyOTk1MGEzMSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add feature flag for client side metrics PiperOrigin-RevId: 619540187 Source-Link: https://github.com/googleapis/googleapis/commit/cbe62016a4eb24e71186899b79b9a4736f858653 Source-Link: https://github.com/googleapis/googleapis-gen/commit/1587174866b7ab761aed1dbfb9588f5b36ee1590 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTU4NzE3NDg2NmI3YWI3NjFhZWQxZGJmYjk1ODhmNWIzNmVlMTU5MCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: publish Automated Backups protos PiperOrigin-RevId: 620381983 Source-Link: https://github.com/googleapis/googleapis/commit/a70aa2c04ddad801a518be4f5b67345cf758a6ba Source-Link: https://github.com/googleapis/googleapis-gen/commit/e3fb57f9dd4a10b6c20359ec92a72e87631991b8 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTNmYjU3ZjlkZDRhMTBiNmMyMDM1OWVjOTJhNzJlODc2MzE5OTFiOCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.17.0 PiperOrigin-RevId: 626992299 Source-Link: https://github.com/googleapis/googleapis/commit/e495ff587351369637ecee17bfd260d2e76a41f7 Source-Link: https://github.com/googleapis/googleapis-gen/commit/2463c3c27110a92d1fab175109ef94bfe5967168 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjQ2M2MzYzI3MTEwYTkyZDFmYWIxNzUxMDllZjk0YmZlNTk2NzE2OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.17.0 PiperOrigin-RevId: 627075268 Source-Link: https://github.com/googleapis/googleapis/commit/b0a5b9d2b7021525100441756e3914ed3d616cb6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/56b44dca0ceea3ad2afe9ce4a9aeadf9bdf1b445 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNTZiNDRkY2EwY2VlYTNhZDJhZmU5Y2U0YTlhZWFkZjliZGYxYjQ0NSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.17.1 PiperOrigin-RevId: 629071173 Source-Link: https://github.com/googleapis/googleapis/commit/4afa392105cc62e965631d15b772ff68454ecf1c Source-Link: https://github.com/googleapis/googleapis-gen/commit/16dbbb4d0457db5e61ac9f99b0d52a46154455ac Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTZkYmJiNGQwNDU3ZGI1ZTYxYWM5Zjk5YjBkNTJhNDYxNTQ0NTVhYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fixed test error * fixed broken test * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * removed constraint on pytest-asyncio * added back constraints * moved constraint into SYSTEM_TEST_EXTERNAL_DEPENDENCIES --------- Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- .../google/cloud/bigtable_admin/__init__.py | 46 +- .../cloud/bigtable_admin_v2/__init__.py | 26 +- .../bigtable_admin_v2/gapic_metadata.json | 75 + .../bigtable_admin_v2/services/__init__.py | 2 +- .../bigtable_instance_admin/__init__.py | 2 +- .../bigtable_instance_admin/async_client.py | 508 +- .../bigtable_instance_admin/client.py | 227 +- .../bigtable_instance_admin/pagers.py | 2 +- .../transports/__init__.py | 2 +- .../transports/base.py | 2 +- .../transports/grpc.py | 27 +- .../transports/grpc_asyncio.py | 268 +- .../services/bigtable_table_admin/__init__.py | 2 +- .../bigtable_table_admin/async_client.py | 1038 +- .../services/bigtable_table_admin/client.py | 764 +- .../services/bigtable_table_admin/pagers.py | 134 +- .../transports/__init__.py | 2 +- .../bigtable_table_admin/transports/base.py | 81 +- .../bigtable_table_admin/transports/grpc.py | 166 +- .../transports/grpc_asyncio.py | 426 +- .../bigtable_table_admin/transports/rest.py | 729 +- .../cloud/bigtable_admin_v2/types/__init__.py | 28 +- .../types/bigtable_instance_admin.py | 2 +- .../types/bigtable_table_admin.py | 338 +- .../cloud/bigtable_admin_v2/types/common.py | 2 +- .../cloud/bigtable_admin_v2/types/instance.py | 61 +- .../cloud/bigtable_admin_v2/types/table.py | 188 +- .../cloud/bigtable_admin_v2/types/types.py | 267 + .../google/cloud/bigtable_v2/__init__.py | 4 +- .../cloud/bigtable_v2/services/__init__.py | 2 +- .../bigtable_v2/services/bigtable/__init__.py | 2 +- .../services/bigtable/async_client.py | 130 +- .../bigtable_v2/services/bigtable/client.py | 234 +- .../services/bigtable/transports/__init__.py | 2 +- .../services/bigtable/transports/base.py | 2 +- .../services/bigtable/transports/grpc.py | 27 +- .../bigtable/transports/grpc_asyncio.py | 32 +- .../services/bigtable/transports/rest.py | 51 +- .../cloud/bigtable_v2/types/__init__.py | 4 +- .../cloud/bigtable_v2/types/bigtable.py | 98 +- .../google/cloud/bigtable_v2/types/data.py | 103 +- .../cloud/bigtable_v2/types/feature_flags.py | 9 +- .../cloud/bigtable_v2/types/request_stats.py | 2 +- .../bigtable_v2/types/response_params.py | 2 +- packages/google-cloud-bigtable/noxfile.py | 2 +- packages/google-cloud-bigtable/owlbot.py | 50 +- .../fixup_bigtable_admin_v2_keywords.py | 9 +- .../scripts/fixup_bigtable_v2_keywords.py | 14 +- .../testing/constraints-3.8.txt | 1 - .../google-cloud-bigtable/tests/__init__.py | 2 +- .../tests/system/data/test_system.py | 1 - .../tests/unit/__init__.py | 2 +- .../tests/unit/gapic/__init__.py | 2 +- .../unit/gapic/bigtable_admin_v2/__init__.py | 2 +- .../test_bigtable_instance_admin.py | 3974 ++++- .../test_bigtable_table_admin.py | 13451 +++++++++++++--- .../tests/unit/gapic/bigtable_v2/__init__.py | 2 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 2091 ++- .../tests/unit/v2_client/test_client.py | 13 +- 59 files changed, 21724 insertions(+), 4011 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index d26d79b3c880..2884a96ab748 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -117,6 +117,12 @@ ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateAuthorizedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateBackupMetadata, ) @@ -130,6 +136,12 @@ CreateTableFromSnapshotRequest, ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DataBoostReadLocalWrites, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DeleteAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( DeleteBackupRequest, ) @@ -146,9 +158,18 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( GenerateConsistencyTokenResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + GetAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListAuthorizedViewsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListAuthorizedViewsResponse, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( ListBackupsResponse, @@ -179,12 +200,21 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( SnapshotTableRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + StandardReadRemoteWrites, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UndeleteTableMetadata, ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UndeleteTableRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateAuthorizedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateAuthorizedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UpdateBackupRequest, ) @@ -200,6 +230,7 @@ from google.cloud.bigtable_admin_v2.types.instance import Cluster from google.cloud.bigtable_admin_v2.types.instance import HotTablet from google.cloud.bigtable_admin_v2.types.instance import Instance +from google.cloud.bigtable_admin_v2.types.table import AuthorizedView from google.cloud.bigtable_admin_v2.types.table import Backup from google.cloud.bigtable_admin_v2.types.table import BackupInfo from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig @@ -210,6 +241,7 @@ from google.cloud.bigtable_admin_v2.types.table import Snapshot from google.cloud.bigtable_admin_v2.types.table import Table from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType +from google.cloud.bigtable_admin_v2.types.types import Type __all__ = ( "BigtableInstanceAdminClient", @@ -246,20 +278,27 @@ "CheckConsistencyResponse", "CopyBackupMetadata", "CopyBackupRequest", + "CreateAuthorizedViewMetadata", + "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", + "DataBoostReadLocalWrites", + "DeleteAuthorizedViewRequest", "DeleteBackupRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", + "GetAuthorizedViewRequest", "GetBackupRequest", "GetSnapshotRequest", "GetTableRequest", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", "ListSnapshotsRequest", @@ -272,8 +311,11 @@ "RestoreTableRequest", "SnapshotTableMetadata", "SnapshotTableRequest", + "StandardReadRemoteWrites", "UndeleteTableMetadata", "UndeleteTableRequest", + "UpdateAuthorizedViewMetadata", + "UpdateAuthorizedViewRequest", "UpdateBackupRequest", "UpdateTableMetadata", "UpdateTableRequest", @@ -285,6 +327,7 @@ "Cluster", "HotTablet", "Instance", + "AuthorizedView", "Backup", "BackupInfo", "ChangeStreamConfig", @@ -295,4 +338,5 @@ "Snapshot", "Table", "RestoreSourceType", + "Type", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 811b956e0344..f2aea1667712 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -53,20 +53,27 @@ from .types.bigtable_table_admin import CheckConsistencyResponse from .types.bigtable_table_admin import CopyBackupMetadata from .types.bigtable_table_admin import CopyBackupRequest +from .types.bigtable_table_admin import CreateAuthorizedViewMetadata +from .types.bigtable_table_admin import CreateAuthorizedViewRequest from .types.bigtable_table_admin import CreateBackupMetadata from .types.bigtable_table_admin import CreateBackupRequest from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata from .types.bigtable_table_admin import CreateTableFromSnapshotRequest from .types.bigtable_table_admin import CreateTableRequest +from .types.bigtable_table_admin import DataBoostReadLocalWrites +from .types.bigtable_table_admin import DeleteAuthorizedViewRequest from .types.bigtable_table_admin import DeleteBackupRequest from .types.bigtable_table_admin import DeleteSnapshotRequest from .types.bigtable_table_admin import DeleteTableRequest from .types.bigtable_table_admin import DropRowRangeRequest from .types.bigtable_table_admin import GenerateConsistencyTokenRequest from .types.bigtable_table_admin import GenerateConsistencyTokenResponse +from .types.bigtable_table_admin import GetAuthorizedViewRequest from .types.bigtable_table_admin import GetBackupRequest from .types.bigtable_table_admin import GetSnapshotRequest from .types.bigtable_table_admin import GetTableRequest +from .types.bigtable_table_admin import ListAuthorizedViewsRequest +from .types.bigtable_table_admin import ListAuthorizedViewsResponse from .types.bigtable_table_admin import ListBackupsRequest from .types.bigtable_table_admin import ListBackupsResponse from .types.bigtable_table_admin import ListSnapshotsRequest @@ -79,8 +86,11 @@ from .types.bigtable_table_admin import RestoreTableRequest from .types.bigtable_table_admin import SnapshotTableMetadata from .types.bigtable_table_admin import SnapshotTableRequest +from .types.bigtable_table_admin import StandardReadRemoteWrites from .types.bigtable_table_admin import UndeleteTableMetadata from .types.bigtable_table_admin import UndeleteTableRequest +from .types.bigtable_table_admin import UpdateAuthorizedViewMetadata +from .types.bigtable_table_admin import UpdateAuthorizedViewRequest from .types.bigtable_table_admin import UpdateBackupRequest from .types.bigtable_table_admin import UpdateTableMetadata from .types.bigtable_table_admin import UpdateTableRequest @@ -92,6 +102,7 @@ from .types.instance import Cluster from .types.instance import HotTablet from .types.instance import Instance +from .types.table import AuthorizedView from .types.table import Backup from .types.table import BackupInfo from .types.table import ChangeStreamConfig @@ -102,11 +113,13 @@ from .types.table import Snapshot from .types.table import Table from .types.table import RestoreSourceType +from .types.types import Type __all__ = ( "BigtableInstanceAdminAsyncClient", "BigtableTableAdminAsyncClient", "AppProfile", + "AuthorizedView", "AutoscalingLimits", "AutoscalingTargets", "Backup", @@ -121,6 +134,8 @@ "CopyBackupMetadata", "CopyBackupRequest", "CreateAppProfileRequest", + "CreateAuthorizedViewMetadata", + "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateClusterMetadata", @@ -130,7 +145,9 @@ "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", + "DataBoostReadLocalWrites", "DeleteAppProfileRequest", + "DeleteAuthorizedViewRequest", "DeleteBackupRequest", "DeleteClusterRequest", "DeleteInstanceRequest", @@ -142,6 +159,7 @@ "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", "GetAppProfileRequest", + "GetAuthorizedViewRequest", "GetBackupRequest", "GetClusterRequest", "GetInstanceRequest", @@ -151,6 +169,8 @@ "Instance", "ListAppProfilesRequest", "ListAppProfilesResponse", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", "ListClustersRequest", @@ -176,12 +196,16 @@ "Snapshot", "SnapshotTableMetadata", "SnapshotTableRequest", + "StandardReadRemoteWrites", "StorageType", "Table", + "Type", "UndeleteTableMetadata", "UndeleteTableRequest", "UpdateAppProfileMetadata", "UpdateAppProfileRequest", + "UpdateAuthorizedViewMetadata", + "UpdateAuthorizedViewRequest", "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index 9b3426470f6f..7cd09c43b60f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -354,6 +354,11 @@ "copy_backup" ] }, + "CreateAuthorizedView": { + "methods": [ + "create_authorized_view" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -369,6 +374,11 @@ "create_table_from_snapshot" ] }, + "DeleteAuthorizedView": { + "methods": [ + "delete_authorized_view" + ] + }, "DeleteBackup": { "methods": [ "delete_backup" @@ -394,6 +404,11 @@ "generate_consistency_token" ] }, + "GetAuthorizedView": { + "methods": [ + "get_authorized_view" + ] + }, "GetBackup": { "methods": [ "get_backup" @@ -414,6 +429,11 @@ "get_table" ] }, + "ListAuthorizedViews": { + "methods": [ + "list_authorized_views" + ] + }, "ListBackups": { "methods": [ "list_backups" @@ -459,6 +479,11 @@ "undelete_table" ] }, + "UpdateAuthorizedView": { + "methods": [ + "update_authorized_view" + ] + }, "UpdateBackup": { "methods": [ "update_backup" @@ -484,6 +509,11 @@ "copy_backup" ] }, + "CreateAuthorizedView": { + "methods": [ + "create_authorized_view" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -499,6 +529,11 @@ "create_table_from_snapshot" ] }, + "DeleteAuthorizedView": { + "methods": [ + "delete_authorized_view" + ] + }, "DeleteBackup": { "methods": [ "delete_backup" @@ -524,6 +559,11 @@ "generate_consistency_token" ] }, + "GetAuthorizedView": { + "methods": [ + "get_authorized_view" + ] + }, "GetBackup": { "methods": [ "get_backup" @@ -544,6 +584,11 @@ "get_table" ] }, + "ListAuthorizedViews": { + "methods": [ + "list_authorized_views" + ] + }, "ListBackups": { "methods": [ "list_backups" @@ -589,6 +634,11 @@ "undelete_table" ] }, + "UpdateAuthorizedView": { + "methods": [ + "update_authorized_view" + ] + }, "UpdateBackup": { "methods": [ "update_backup" @@ -614,6 +664,11 @@ "copy_backup" ] }, + "CreateAuthorizedView": { + "methods": [ + "create_authorized_view" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -629,6 +684,11 @@ "create_table_from_snapshot" ] }, + "DeleteAuthorizedView": { + "methods": [ + "delete_authorized_view" + ] + }, "DeleteBackup": { "methods": [ "delete_backup" @@ -654,6 +714,11 @@ "generate_consistency_token" ] }, + "GetAuthorizedView": { + "methods": [ + "get_authorized_view" + ] + }, "GetBackup": { "methods": [ "get_backup" @@ -674,6 +739,11 @@ "get_table" ] }, + "ListAuthorizedViews": { + "methods": [ + "list_authorized_views" + ] + }, "ListBackups": { "methods": [ "list_backups" @@ -719,6 +789,11 @@ "undelete_table" ] }, + "UpdateAuthorizedView": { + "methods": [ + "update_authorized_view" + ] + }, "UpdateBackup": { "methods": [ "update_backup" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py index 89a37dc92c5a..8f6cf068242c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py index 40631d1b4bae..09a827f872e5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ab14ddaedc73..52c537260c79 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -225,7 +226,13 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", + transport: Optional[ + Union[ + str, + BigtableInstanceAdminTransport, + Callable[..., BigtableInstanceAdminTransport], + ] + ] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -237,9 +244,11 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.BigtableInstanceAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. + transport (Optional[Union[str,BigtableInstanceAdminTransport,Callable[..., BigtableInstanceAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableInstanceAdminTransport constructor. + If set to None, a transport is chosen automatically. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -361,8 +370,8 @@ async def create_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, instance_id, instance, clusters]) if request is not None and has_flattened_params: raise ValueError( @@ -370,7 +379,10 @@ async def create_instance( "the individual field arguments should be set." ) - request = bigtable_instance_admin.CreateInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): + request = bigtable_instance_admin.CreateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -386,11 +398,9 @@ async def create_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_instance, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -461,8 +471,8 @@ async def get_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -470,7 +480,10 @@ async def get_instance( "the individual field arguments should be set." ) - request = bigtable_instance_admin.GetInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): + request = bigtable_instance_admin.GetInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -479,21 +492,9 @@ async def get_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_instance, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -553,8 +554,8 @@ async def list_instances( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -562,7 +563,10 @@ async def list_instances( "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListInstancesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): + request = bigtable_instance_admin.ListInstancesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -571,21 +575,9 @@ async def list_instances( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_instances, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_instances + ] # Certain fields should be provided within the metadata header; # add these here. @@ -644,25 +636,16 @@ async def update_instance( """ # Create or coerce a protobuf request object. - request = instance.Instance(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, instance.Instance): + request = instance.Instance(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_instance, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -737,8 +720,8 @@ async def partial_update_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([instance, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -746,7 +729,12 @@ async def partial_update_instance( "the individual field arguments should be set." ) - request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.PartialUpdateInstanceRequest + ): + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -757,21 +745,9 @@ async def partial_update_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.partial_update_instance, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.partial_update_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -835,8 +811,8 @@ async def delete_instance( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -844,7 +820,10 @@ async def delete_instance( "the individual field arguments should be set." ) - request = bigtable_instance_admin.DeleteInstanceRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): + request = bigtable_instance_admin.DeleteInstanceRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -853,11 +832,9 @@ async def delete_instance( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_instance, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_instance + ] # Certain fields should be provided within the metadata header; # add these here. @@ -942,8 +919,8 @@ async def create_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, cluster_id, cluster]) if request is not None and has_flattened_params: raise ValueError( @@ -951,7 +928,10 @@ async def create_cluster( "the individual field arguments should be set." ) - request = bigtable_instance_admin.CreateClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): + request = bigtable_instance_admin.CreateClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -964,11 +944,9 @@ async def create_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_cluster, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1038,8 +1016,8 @@ async def get_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1047,7 +1025,10 @@ async def get_cluster( "the individual field arguments should be set." ) - request = bigtable_instance_admin.GetClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetClusterRequest): + request = bigtable_instance_admin.GetClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1056,21 +1037,9 @@ async def get_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_cluster, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1132,8 +1101,8 @@ async def list_clusters( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -1141,7 +1110,10 @@ async def list_clusters( "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListClustersRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListClustersRequest): + request = bigtable_instance_admin.ListClustersRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1150,21 +1122,9 @@ async def list_clusters( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_clusters, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_clusters + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1223,25 +1183,16 @@ async def update_cluster( """ # Create or coerce a protobuf request object. - request = instance.Cluster(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, instance.Cluster): + request = instance.Cluster(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_cluster, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1333,8 +1284,8 @@ async def partial_update_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([cluster, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -1342,7 +1293,10 @@ async def partial_update_cluster( "the individual field arguments should be set." ) - request = bigtable_instance_admin.PartialUpdateClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): + request = bigtable_instance_admin.PartialUpdateClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1353,11 +1307,9 @@ async def partial_update_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.partial_update_cluster, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.partial_update_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1421,8 +1373,8 @@ async def delete_cluster( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1430,7 +1382,10 @@ async def delete_cluster( "the individual field arguments should be set." ) - request = bigtable_instance_admin.DeleteClusterRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): + request = bigtable_instance_admin.DeleteClusterRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1439,11 +1394,9 @@ async def delete_cluster( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_cluster, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_cluster + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1519,8 +1472,8 @@ async def create_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, app_profile_id, app_profile]) if request is not None and has_flattened_params: raise ValueError( @@ -1528,7 +1481,10 @@ async def create_app_profile( "the individual field arguments should be set." ) - request = bigtable_instance_admin.CreateAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): + request = bigtable_instance_admin.CreateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1541,11 +1497,9 @@ async def create_app_profile( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_app_profile, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1606,8 +1560,8 @@ async def get_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1615,7 +1569,10 @@ async def get_app_profile( "the individual field arguments should be set." ) - request = bigtable_instance_admin.GetAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): + request = bigtable_instance_admin.GetAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1624,21 +1581,9 @@ async def get_app_profile( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_app_profile, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1704,8 +1649,8 @@ async def list_app_profiles( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -1713,7 +1658,10 @@ async def list_app_profiles( "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListAppProfilesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): + request = bigtable_instance_admin.ListAppProfilesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1722,21 +1670,9 @@ async def list_app_profiles( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_app_profiles, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_app_profiles + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1815,8 +1751,8 @@ async def update_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([app_profile, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -1824,7 +1760,10 @@ async def update_app_profile( "the individual field arguments should be set." ) - request = bigtable_instance_admin.UpdateAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): + request = bigtable_instance_admin.UpdateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1835,21 +1774,9 @@ async def update_app_profile( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_app_profile, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1913,8 +1840,8 @@ async def delete_app_profile( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1922,7 +1849,10 @@ async def delete_app_profile( "the individual field arguments should be set." ) - request = bigtable_instance_admin.DeleteAppProfileRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): + request = bigtable_instance_admin.DeleteAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1931,11 +1861,9 @@ async def delete_app_profile( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_app_profile, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_app_profile + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2020,8 +1948,8 @@ async def get_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -2029,32 +1957,18 @@ async def get_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - ) + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_iam_policy, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2141,8 +2055,8 @@ async def set_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -2150,22 +2064,18 @@ async def set_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - ) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_iam_policy, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.set_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2232,8 +2142,8 @@ async def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: raise ValueError( @@ -2241,33 +2151,20 @@ async def test_iam_permissions( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, + resource=resource, permissions=permissions ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.test_iam_permissions, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2331,8 +2228,8 @@ async def list_hot_tablets( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -2340,7 +2237,10 @@ async def list_hot_tablets( "the individual field arguments should be set." ) - request = bigtable_instance_admin.ListHotTabletsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): + request = bigtable_instance_admin.ListHotTabletsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -2349,21 +2249,9 @@ async def list_hot_tablets( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_hot_tablets, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_hot_tablets + ] # Certain fields should be provided within the metadata header; # add these here. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 4c2c2998e3d4..550bcb1e7b7f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -654,7 +655,13 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableInstanceAdminTransport]] = None, + transport: Optional[ + Union[ + str, + BigtableInstanceAdminTransport, + Callable[..., BigtableInstanceAdminTransport], + ] + ] = None, client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -666,9 +673,11 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, BigtableInstanceAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. + transport (Optional[Union[str,BigtableInstanceAdminTransport,Callable[..., BigtableInstanceAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableInstanceAdminTransport constructor. + If set to None, a transport is chosen automatically. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -777,8 +786,16 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(cast(str, transport)) - self._transport = Transport( + transport_init: Union[ + Type[BigtableInstanceAdminTransport], + Callable[..., BigtableInstanceAdminTransport], + ] = ( + type(self).get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., BigtableInstanceAdminTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( credentials=credentials, credentials_file=self._client_options.credentials_file, host=self._api_endpoint, @@ -868,8 +885,8 @@ def create_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, instance_id, instance, clusters]) if request is not None and has_flattened_params: raise ValueError( @@ -877,10 +894,8 @@ def create_instance( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): request = bigtable_instance_admin.CreateInstanceRequest(request) # If we have keyword arguments corresponding to fields on the @@ -967,8 +982,8 @@ def get_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -976,10 +991,8 @@ def get_instance( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): request = bigtable_instance_admin.GetInstanceRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1049,8 +1062,8 @@ def list_instances( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -1058,10 +1071,8 @@ def list_instances( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListInstancesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): request = bigtable_instance_admin.ListInstancesRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1130,10 +1141,8 @@ def update_instance( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a instance.Instance. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, instance.Instance): request = instance.Instance(request) @@ -1214,8 +1223,8 @@ def partial_update_instance( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([instance, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -1223,10 +1232,8 @@ def partial_update_instance( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.PartialUpdateInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance( request, bigtable_instance_admin.PartialUpdateInstanceRequest ): @@ -1304,8 +1311,8 @@ def delete_instance( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1313,10 +1320,8 @@ def delete_instance( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): request = bigtable_instance_admin.DeleteInstanceRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1411,8 +1416,8 @@ def create_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, cluster_id, cluster]) if request is not None and has_flattened_params: raise ValueError( @@ -1420,10 +1425,8 @@ def create_cluster( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): request = bigtable_instance_admin.CreateClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1507,8 +1510,8 @@ def get_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1516,10 +1519,8 @@ def get_cluster( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.GetClusterRequest): request = bigtable_instance_admin.GetClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1591,8 +1592,8 @@ def list_clusters( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -1600,10 +1601,8 @@ def list_clusters( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListClustersRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListClustersRequest): request = bigtable_instance_admin.ListClustersRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1672,10 +1671,8 @@ def update_cluster( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a instance.Cluster. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, instance.Cluster): request = instance.Cluster(request) @@ -1773,8 +1770,8 @@ def partial_update_cluster( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([cluster, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -1782,10 +1779,8 @@ def partial_update_cluster( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.PartialUpdateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): request = bigtable_instance_admin.PartialUpdateClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1861,8 +1856,8 @@ def delete_cluster( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1870,10 +1865,8 @@ def delete_cluster( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): request = bigtable_instance_admin.DeleteClusterRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1959,8 +1952,8 @@ def create_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, app_profile_id, app_profile]) if request is not None and has_flattened_params: raise ValueError( @@ -1968,10 +1961,8 @@ def create_app_profile( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): request = bigtable_instance_admin.CreateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2046,8 +2037,8 @@ def get_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -2055,10 +2046,8 @@ def get_app_profile( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): request = bigtable_instance_admin.GetAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2134,8 +2123,8 @@ def list_app_profiles( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -2143,10 +2132,8 @@ def list_app_profiles( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListAppProfilesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): request = bigtable_instance_admin.ListAppProfilesRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2235,8 +2222,8 @@ def update_app_profile( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([app_profile, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -2244,10 +2231,8 @@ def update_app_profile( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.UpdateAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): request = bigtable_instance_admin.UpdateAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2323,8 +2308,8 @@ def delete_app_profile( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -2332,10 +2317,8 @@ def delete_app_profile( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): request = bigtable_instance_admin.DeleteAppProfileRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2430,8 +2413,8 @@ def get_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -2440,8 +2423,8 @@ def get_iam_policy( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: # Null request, just make one. @@ -2538,8 +2521,8 @@ def set_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -2548,8 +2531,8 @@ def set_iam_policy( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: # Null request, just make one. @@ -2626,8 +2609,8 @@ def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: raise ValueError( @@ -2636,8 +2619,8 @@ def test_iam_permissions( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: # Null request, just make one. @@ -2713,8 +2696,8 @@ def list_hot_tablets( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -2722,10 +2705,8 @@ def list_hot_tablets( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListHotTabletsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): request = bigtable_instance_admin.ListHotTabletsRequest(request) # If we have keyword arguments corresponding to fields on the diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index 0d646a96e1c1..f76da7622bd7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index 62da28c88a1b..45cf579fbeb5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index aeb07556cce4..fc346c9bbe97 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index c47db6ba5ed5..49a1b9e113de 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -59,7 +59,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -79,14 +79,17 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -96,11 +99,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -127,7 +130,7 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. @@ -168,7 +171,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index cbd77b381f23..b85a696d9b96 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore @@ -74,7 +76,6 @@ def create_channel( the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -104,7 +105,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -124,15 +125,18 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -142,11 +146,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -173,7 +177,7 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. @@ -213,7 +217,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -888,6 +894,246 @@ def list_hot_tablets( ) return self._stubs["list_hot_tablets"] + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_instance: gapic_v1.method_async.wrap_method( + self.create_instance, + default_timeout=300.0, + client_info=client_info, + ), + self.get_instance: gapic_v1.method_async.wrap_method( + self.get_instance, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_instances: gapic_v1.method_async.wrap_method( + self.list_instances, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_instance: gapic_v1.method_async.wrap_method( + self.update_instance, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_instance: gapic_v1.method_async.wrap_method( + self.partial_update_instance, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_instance: gapic_v1.method_async.wrap_method( + self.delete_instance, + default_timeout=60.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method_async.wrap_method( + self.create_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method_async.wrap_method( + self.get_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_clusters: gapic_v1.method_async.wrap_method( + self.list_clusters, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method_async.wrap_method( + self.update_cluster, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_cluster: gapic_v1.method_async.wrap_method( + self.partial_update_cluster, + default_timeout=None, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method_async.wrap_method( + self.delete_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.create_app_profile: gapic_v1.method_async.wrap_method( + self.create_app_profile, + default_timeout=60.0, + client_info=client_info, + ), + self.get_app_profile: gapic_v1.method_async.wrap_method( + self.get_app_profile, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_app_profiles: gapic_v1.method_async.wrap_method( + self.list_app_profiles, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_app_profile: gapic_v1.method_async.wrap_method( + self.update_app_profile, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_app_profile: gapic_v1.method_async.wrap_method( + self.delete_app_profile, + default_timeout=60.0, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method_async.wrap_method( + self.get_iam_policy, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method_async.wrap_method( + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method_async.wrap_method( + self.test_iam_permissions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_hot_tablets: gapic_v1.method_async.wrap_method( + self.list_hot_tablets, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + def close(self): return self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index 544649e90131..7fdf89eb6356 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 124b3ef097ef..2747e403726f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -74,6 +75,10 @@ class BigtableTableAdminAsyncClient: _DEFAULT_ENDPOINT_TEMPLATE = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE _DEFAULT_UNIVERSE = BigtableTableAdminClient._DEFAULT_UNIVERSE + authorized_view_path = staticmethod(BigtableTableAdminClient.authorized_view_path) + parse_authorized_view_path = staticmethod( + BigtableTableAdminClient.parse_authorized_view_path + ) backup_path = staticmethod(BigtableTableAdminClient.backup_path) parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) @@ -221,7 +226,13 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -233,9 +244,11 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.BigtableTableAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -335,8 +348,8 @@ async def create_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, table]) if request is not None and has_flattened_params: raise ValueError( @@ -344,7 +357,10 @@ async def create_table( "the individual field arguments should be set." ) - request = bigtable_table_admin.CreateTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateTableRequest): + request = bigtable_table_admin.CreateTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -357,11 +373,9 @@ async def create_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_table, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -459,8 +473,8 @@ async def create_table_from_snapshot( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, source_snapshot]) if request is not None and has_flattened_params: raise ValueError( @@ -468,7 +482,10 @@ async def create_table_from_snapshot( "the individual field arguments should be set." ) - request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -481,11 +498,9 @@ async def create_table_from_snapshot( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_table_from_snapshot, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_table_from_snapshot + ] # Certain fields should be provided within the metadata header; # add these here. @@ -554,8 +569,8 @@ async def list_tables( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -563,7 +578,10 @@ async def list_tables( "the individual field arguments should be set." ) - request = bigtable_table_admin.ListTablesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListTablesRequest): + request = bigtable_table_admin.ListTablesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -572,21 +590,9 @@ async def list_tables( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tables, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_tables + ] # Certain fields should be provided within the metadata header; # add these here. @@ -655,8 +661,8 @@ async def get_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -664,7 +670,10 @@ async def get_table( "the individual field arguments should be set." ) - request = bigtable_table_admin.GetTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetTableRequest): + request = bigtable_table_admin.GetTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -673,21 +682,9 @@ async def get_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_table, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -767,8 +764,8 @@ async def update_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -776,7 +773,10 @@ async def update_table( "the individual field arguments should be set." ) - request = bigtable_table_admin.UpdateTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateTableRequest): + request = bigtable_table_admin.UpdateTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -787,11 +787,9 @@ async def update_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -854,8 +852,8 @@ async def delete_table( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -863,7 +861,10 @@ async def delete_table( "the individual field arguments should be set." ) - request = bigtable_table_admin.DeleteTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteTableRequest): + request = bigtable_table_admin.DeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -872,11 +873,9 @@ async def delete_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_table, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -937,8 +936,8 @@ async def undelete_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -946,7 +945,10 @@ async def undelete_table( "the individual field arguments should be set." ) - request = bigtable_table_admin.UndeleteTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): + request = bigtable_table_admin.UndeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -955,11 +957,9 @@ async def undelete_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undelete_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.undelete_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -989,6 +989,494 @@ async def undelete_table( # Done; return the response. return response + async def create_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.CreateAuthorizedViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + authorized_view: Optional[table.AuthorizedView] = None, + authorized_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new AuthorizedView in a table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]]): + The request object. The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + parent (:class:`str`): + Required. This is the name of the table the + AuthorizedView belongs to. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`): + Required. The AuthorizedView to + create. + + This corresponds to the ``authorized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + authorized_view_id (:class:`str`): + Required. The id of the AuthorizedView to create. This + AuthorizedView must not already exist. The + ``authorized_view_id`` appended to ``parent`` forms the + full AuthorizedView name of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. + + This corresponds to the ``authorized_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, authorized_view, authorized_view_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateAuthorizedViewRequest): + request = bigtable_table_admin.CreateAuthorizedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if authorized_view is not None: + request.authorized_view = authorized_view + if authorized_view_id is not None: + request.authorized_view_id = authorized_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_authorized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.CreateAuthorizedViewMetadata, + ) + + # Done; return the response. + return response + + async def list_authorized_views( + self, + request: Optional[ + Union[bigtable_table_admin.ListAuthorizedViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAuthorizedViewsAsyncPager: + r"""Lists all AuthorizedViews from a specific table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + parent (:class:`str`): + Required. The unique name of the table for which + AuthorizedViews should be listed. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListAuthorizedViewsRequest): + request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_authorized_views + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAuthorizedViewsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.GetAuthorizedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.AuthorizedView: + r"""Gets information from a specified AuthorizedView. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] + name (:class:`str`): + Required. The unique name of the requested + AuthorizedView. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AuthorizedView: + AuthorizedViews represent subsets of + a particular Cloud Bigtable table. Users + can configure access to each Authorized + View independently from the table and + use the existing Data APIs to access the + subset of data. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetAuthorizedViewRequest): + request = bigtable_table_admin.GetAuthorizedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_authorized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.UpdateAuthorizedViewRequest, dict] + ] = None, + *, + authorized_view: Optional[table.AuthorizedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an AuthorizedView in a table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]]): + The request object. The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`): + Required. The AuthorizedView to update. The ``name`` in + ``authorized_view`` is used to identify the + AuthorizedView. AuthorizedView name must in this format + projects//instances//tables//authorizedViews/ + + This corresponds to the ``authorized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to update. A mask + specifying which fields in the AuthorizedView resource + should be updated. This mask is relative to the + AuthorizedView resource, not to the request message. A + field will be overwritten if it is in the mask. If + empty, all fields set in the request will be + overwritten. A special value ``*`` means to overwrite + all fields (including fields not set in the request). + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([authorized_view, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateAuthorizedViewRequest): + request = bigtable_table_admin.UpdateAuthorizedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if authorized_view is not None: + request.authorized_view = authorized_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_authorized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("authorized_view.name", request.authorized_view.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.UpdateAuthorizedViewMetadata, + ) + + # Done; return the response. + return response + + async def delete_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.DeleteAuthorizedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified AuthorizedView. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] + name (:class:`str`): + Required. The unique name of the AuthorizedView to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteAuthorizedViewRequest): + request = bigtable_table_admin.DeleteAuthorizedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_authorized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + async def modify_column_families( self, request: Optional[ @@ -1050,8 +1538,8 @@ async def modify_column_families( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, modifications]) if request is not None and has_flattened_params: raise ValueError( @@ -1059,7 +1547,10 @@ async def modify_column_families( "the individual field arguments should be set." ) - request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1070,11 +1561,9 @@ async def modify_column_families( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.modify_column_families, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.modify_column_families + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1120,15 +1609,16 @@ async def drop_row_range( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - request = bigtable_table_admin.DropRowRangeRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): + request = bigtable_table_admin.DropRowRangeRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.drop_row_range, - default_timeout=3600.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.drop_row_range + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1189,8 +1679,8 @@ async def generate_consistency_token( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1198,7 +1688,12 @@ async def generate_consistency_token( "the individual field arguments should be set." ) - request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_table_admin.GenerateConsistencyTokenRequest + ): + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1207,21 +1702,9 @@ async def generate_consistency_token( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.generate_consistency_token, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.generate_consistency_token + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1292,8 +1775,8 @@ async def check_consistency( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, consistency_token]) if request is not None and has_flattened_params: raise ValueError( @@ -1301,7 +1784,10 @@ async def check_consistency( "the individual field arguments should be set." ) - request = bigtable_table_admin.CheckConsistencyRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): + request = bigtable_table_admin.CheckConsistencyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1312,21 +1798,9 @@ async def check_consistency( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_consistency, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.check_consistency + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1438,8 +1912,8 @@ async def snapshot_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, cluster, snapshot_id, description]) if request is not None and has_flattened_params: raise ValueError( @@ -1447,7 +1921,10 @@ async def snapshot_table( "the individual field arguments should be set." ) - request = bigtable_table_admin.SnapshotTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): + request = bigtable_table_admin.SnapshotTableRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1462,11 +1939,9 @@ async def snapshot_table( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.snapshot_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.snapshot_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1557,8 +2032,8 @@ async def get_snapshot( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1566,7 +2041,10 @@ async def get_snapshot( "the individual field arguments should be set." ) - request = bigtable_table_admin.GetSnapshotRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): + request = bigtable_table_admin.GetSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1575,21 +2053,9 @@ async def get_snapshot( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_snapshot, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_snapshot + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1676,8 +2142,8 @@ async def list_snapshots( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -1685,7 +2151,10 @@ async def list_snapshots( "the individual field arguments should be set." ) - request = bigtable_table_admin.ListSnapshotsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): + request = bigtable_table_admin.ListSnapshotsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1694,21 +2163,9 @@ async def list_snapshots( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_snapshots, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_snapshots + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1785,8 +2242,8 @@ async def delete_snapshot( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1794,7 +2251,10 @@ async def delete_snapshot( "the individual field arguments should be set." ) - request = bigtable_table_admin.DeleteSnapshotRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): + request = bigtable_table_admin.DeleteSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1803,11 +2263,9 @@ async def delete_snapshot( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_snapshot, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_snapshot + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1893,8 +2351,8 @@ async def create_backup( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, backup_id, backup]) if request is not None and has_flattened_params: raise ValueError( @@ -1902,7 +2360,10 @@ async def create_backup( "the individual field arguments should be set." ) - request = bigtable_table_admin.CreateBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateBackupRequest): + request = bigtable_table_admin.CreateBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -1915,11 +2376,9 @@ async def create_backup( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_backup + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1983,8 +2442,8 @@ async def get_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1992,7 +2451,10 @@ async def get_backup( "the individual field arguments should be set." ) - request = bigtable_table_admin.GetBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetBackupRequest): + request = bigtable_table_admin.GetBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -2001,21 +2463,9 @@ async def get_backup( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_backup, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_backup + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2087,8 +2537,8 @@ async def update_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([backup, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -2096,7 +2546,10 @@ async def update_backup( "the individual field arguments should be set." ) - request = bigtable_table_admin.UpdateBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): + request = bigtable_table_admin.UpdateBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -2107,11 +2560,9 @@ async def update_backup( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_backup + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2165,8 +2616,8 @@ async def delete_backup( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -2174,7 +2625,10 @@ async def delete_backup( "the individual field arguments should be set." ) - request = bigtable_table_admin.DeleteBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): + request = bigtable_table_admin.DeleteBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -2183,11 +2637,9 @@ async def delete_backup( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_backup + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2249,8 +2701,8 @@ async def list_backups( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -2258,7 +2710,10 @@ async def list_backups( "the individual field arguments should be set." ) - request = bigtable_table_admin.ListBackupsRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListBackupsRequest): + request = bigtable_table_admin.ListBackupsRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -2267,21 +2722,9 @@ async def list_backups( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_backups, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_backups + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2349,15 +2792,16 @@ async def restore_table( """ # Create or coerce a protobuf request object. - request = bigtable_table_admin.RestoreTableRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.RestoreTableRequest): + request = bigtable_table_admin.RestoreTableRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.restore_table, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.restore_table + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2467,8 +2911,8 @@ async def copy_backup( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, backup_id, source_backup, expire_time]) if request is not None and has_flattened_params: raise ValueError( @@ -2476,7 +2920,10 @@ async def copy_backup( "the individual field arguments should be set." ) - request = bigtable_table_admin.CopyBackupRequest(request) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CopyBackupRequest): + request = bigtable_table_admin.CopyBackupRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. @@ -2491,11 +2938,9 @@ async def copy_backup( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.copy_backup, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.copy_backup + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2591,8 +3036,8 @@ async def get_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -2600,32 +3045,18 @@ async def get_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.GetIamPolicyRequest( - resource=resource, - ) + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_iam_policy, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2712,8 +3143,8 @@ async def set_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -2721,22 +3152,18 @@ async def set_iam_policy( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: - request = iam_policy_pb2.SetIamPolicyRequest( - resource=resource, - ) + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_iam_policy, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.set_iam_policy + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2803,8 +3230,8 @@ async def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: raise ValueError( @@ -2812,33 +3239,20 @@ async def test_iam_permissions( "the individual field arguments should be set." ) - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. if isinstance(request, dict): request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: request = iam_policy_pb2.TestIamPermissionsRequest( - resource=resource, - permissions=permissions, + resource=resource, permissions=permissions ) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.test_iam_permissions, - default_retry=retries.AsyncRetry( - initial=1.0, - maximum=60.0, - multiplier=2, - predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.test_iam_permissions + ] # Certain fields should be provided within the metadata header; # add these here. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 09a67e696069..e9b06965c02e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -194,6 +195,30 @@ def transport(self) -> BigtableTableAdminTransport: """ return self._transport + @staticmethod + def authorized_view_path( + project: str, + instance: str, + table: str, + authorized_view: str, + ) -> str: + """Returns a fully-qualified authorized_view string.""" + return "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( + project=project, + instance=instance, + table=table, + authorized_view=authorized_view, + ) + + @staticmethod + def parse_authorized_view_path(path: str) -> Dict[str, str]: + """Parses a authorized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)/authorizedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def backup_path( project: str, @@ -654,7 +679,13 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableTableAdminTransport]] = None, + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = None, client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -666,9 +697,11 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, BigtableTableAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -777,8 +810,16 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(cast(str, transport)) - self._transport = Transport( + transport_init: Union[ + Type[BigtableTableAdminTransport], + Callable[..., BigtableTableAdminTransport], + ] = ( + type(self).get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., BigtableTableAdminTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( credentials=credentials, credentials_file=self._client_options.credentials_file, host=self._api_endpoint, @@ -846,8 +887,8 @@ def create_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, table]) if request is not None and has_flattened_params: raise ValueError( @@ -855,10 +896,8 @@ def create_table( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.CreateTableRequest): request = bigtable_table_admin.CreateTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -970,8 +1009,8 @@ def create_table_from_snapshot( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, table_id, source_snapshot]) if request is not None and has_flattened_params: raise ValueError( @@ -979,10 +1018,8 @@ def create_table_from_snapshot( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateTableFromSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1067,8 +1104,8 @@ def list_tables( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -1076,10 +1113,8 @@ def list_tables( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListTablesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.ListTablesRequest): request = bigtable_table_admin.ListTablesRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1158,8 +1193,8 @@ def get_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1167,10 +1202,8 @@ def get_table( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.GetTableRequest): request = bigtable_table_admin.GetTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1260,8 +1293,8 @@ def update_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -1269,10 +1302,8 @@ def update_table( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UpdateTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.UpdateTableRequest): request = bigtable_table_admin.UpdateTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1347,8 +1378,8 @@ def delete_table( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1356,10 +1387,8 @@ def delete_table( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.DeleteTableRequest): request = bigtable_table_admin.DeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1430,8 +1459,8 @@ def undelete_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1439,10 +1468,8 @@ def undelete_table( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UndeleteTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): request = bigtable_table_admin.UndeleteTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1482,6 +1509,479 @@ def undelete_table( # Done; return the response. return response + def create_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.CreateAuthorizedViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + authorized_view: Optional[table.AuthorizedView] = None, + authorized_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new AuthorizedView in a table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]): + The request object. The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + parent (str): + Required. This is the name of the table the + AuthorizedView belongs to. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to + create. + + This corresponds to the ``authorized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + authorized_view_id (str): + Required. The id of the AuthorizedView to create. This + AuthorizedView must not already exist. The + ``authorized_view_id`` appended to ``parent`` forms the + full AuthorizedView name of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. + + This corresponds to the ``authorized_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, authorized_view, authorized_view_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateAuthorizedViewRequest): + request = bigtable_table_admin.CreateAuthorizedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if authorized_view is not None: + request.authorized_view = authorized_view + if authorized_view_id is not None: + request.authorized_view_id = authorized_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_authorized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.CreateAuthorizedViewMetadata, + ) + + # Done; return the response. + return response + + def list_authorized_views( + self, + request: Optional[ + Union[bigtable_table_admin.ListAuthorizedViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAuthorizedViewsPager: + r"""Lists all AuthorizedViews from a specific table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + parent (str): + Required. The unique name of the table for which + AuthorizedViews should be listed. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListAuthorizedViewsRequest): + request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_authorized_views] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAuthorizedViewsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.GetAuthorizedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.AuthorizedView: + r"""Gets information from a specified AuthorizedView. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] + name (str): + Required. The unique name of the requested + AuthorizedView. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AuthorizedView: + AuthorizedViews represent subsets of + a particular Cloud Bigtable table. Users + can configure access to each Authorized + View independently from the table and + use the existing Data APIs to access the + subset of data. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetAuthorizedViewRequest): + request = bigtable_table_admin.GetAuthorizedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_authorized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.UpdateAuthorizedViewRequest, dict] + ] = None, + *, + authorized_view: Optional[table.AuthorizedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates an AuthorizedView in a table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]): + The request object. The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to update. The ``name`` in + ``authorized_view`` is used to identify the + AuthorizedView. AuthorizedView name must in this format + projects//instances//tables//authorizedViews/ + + This corresponds to the ``authorized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. A mask + specifying which fields in the AuthorizedView resource + should be updated. This mask is relative to the + AuthorizedView resource, not to the request message. A + field will be overwritten if it is in the mask. If + empty, all fields set in the request will be + overwritten. A special value ``*`` means to overwrite + all fields (including fields not set in the request). + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AuthorizedView` AuthorizedViews represent subsets of a particular Cloud Bigtable table. Users + can configure access to each Authorized View + independently from the table and use the existing + Data APIs to access the subset of data. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([authorized_view, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateAuthorizedViewRequest): + request = bigtable_table_admin.UpdateAuthorizedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if authorized_view is not None: + request.authorized_view = authorized_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_authorized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("authorized_view.name", request.authorized_view.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.AuthorizedView, + metadata_type=bigtable_table_admin.UpdateAuthorizedViewMetadata, + ) + + # Done; return the response. + return response + + def delete_authorized_view( + self, + request: Optional[ + Union[bigtable_table_admin.DeleteAuthorizedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified AuthorizedView. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] + name (str): + Required. The unique name of the AuthorizedView to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteAuthorizedViewRequest): + request = bigtable_table_admin.DeleteAuthorizedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_authorized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + def modify_column_families( self, request: Optional[ @@ -1543,8 +2043,8 @@ def modify_column_families( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, modifications]) if request is not None and has_flattened_params: raise ValueError( @@ -1552,10 +2052,8 @@ def modify_column_families( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ModifyColumnFamiliesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1613,10 +2111,8 @@ def drop_row_range( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DropRowRangeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): request = bigtable_table_admin.DropRowRangeRequest(request) @@ -1683,8 +2179,8 @@ def generate_consistency_token( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -1692,10 +2188,8 @@ def generate_consistency_token( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GenerateConsistencyTokenRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance( request, bigtable_table_admin.GenerateConsistencyTokenRequest ): @@ -1780,8 +2274,8 @@ def check_consistency( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, consistency_token]) if request is not None and has_flattened_params: raise ValueError( @@ -1789,10 +2283,8 @@ def check_consistency( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CheckConsistencyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): request = bigtable_table_admin.CheckConsistencyRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1916,8 +2408,8 @@ def snapshot_table( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, cluster, snapshot_id, description]) if request is not None and has_flattened_params: raise ValueError( @@ -1925,10 +2417,8 @@ def snapshot_table( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.SnapshotTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): request = bigtable_table_admin.SnapshotTableRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2035,8 +2525,8 @@ def get_snapshot( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -2044,10 +2534,8 @@ def get_snapshot( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): request = bigtable_table_admin.GetSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2144,8 +2632,8 @@ def list_snapshots( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -2153,10 +2641,8 @@ def list_snapshots( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListSnapshotsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): request = bigtable_table_admin.ListSnapshotsRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2243,8 +2729,8 @@ def delete_snapshot( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -2252,10 +2738,8 @@ def delete_snapshot( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): request = bigtable_table_admin.DeleteSnapshotRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2351,8 +2835,8 @@ def create_backup( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, backup_id, backup]) if request is not None and has_flattened_params: raise ValueError( @@ -2360,10 +2844,8 @@ def create_backup( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.CreateBackupRequest): request = bigtable_table_admin.CreateBackupRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2441,8 +2923,8 @@ def get_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -2450,10 +2932,8 @@ def get_backup( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.GetBackupRequest): request = bigtable_table_admin.GetBackupRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2535,8 +3015,8 @@ def update_backup( A backup of a Cloud Bigtable table. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([backup, update_mask]) if request is not None and has_flattened_params: raise ValueError( @@ -2544,10 +3024,8 @@ def update_backup( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UpdateBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): request = bigtable_table_admin.UpdateBackupRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2613,8 +3091,8 @@ def delete_backup( sent along with the request as metadata. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( @@ -2622,10 +3100,8 @@ def delete_backup( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): request = bigtable_table_admin.DeleteBackupRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2697,8 +3173,8 @@ def list_backups( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent]) if request is not None and has_flattened_params: raise ValueError( @@ -2706,10 +3182,8 @@ def list_backups( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListBackupsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.ListBackupsRequest): request = bigtable_table_admin.ListBackupsRequest(request) # If we have keyword arguments corresponding to fields on the @@ -2787,10 +3261,8 @@ def restore_table( """ # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.RestoreTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.RestoreTableRequest): request = bigtable_table_admin.RestoreTableRequest(request) @@ -2906,8 +3378,8 @@ def copy_backup( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([parent, backup_id, source_backup, expire_time]) if request is not None and has_flattened_params: raise ValueError( @@ -2915,10 +3387,8 @@ def copy_backup( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CopyBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable_table_admin.CopyBackupRequest): request = bigtable_table_admin.CopyBackupRequest(request) # If we have keyword arguments corresponding to fields on the @@ -3030,8 +3500,8 @@ def get_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -3040,8 +3510,8 @@ def get_iam_policy( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.GetIamPolicyRequest(**request) elif not request: # Null request, just make one. @@ -3138,8 +3608,8 @@ def set_iam_policy( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource]) if request is not None and has_flattened_params: raise ValueError( @@ -3148,8 +3618,8 @@ def set_iam_policy( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.SetIamPolicyRequest(**request) elif not request: # Null request, just make one. @@ -3226,8 +3696,8 @@ def test_iam_permissions( Response message for TestIamPermissions method. """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([resource, permissions]) if request is not None and has_flattened_params: raise ValueError( @@ -3236,8 +3706,8 @@ def test_iam_permissions( ) if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. + # - The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. request = iam_policy_pb2.TestIamPermissionsRequest(**request) elif not request: # Null request, just make one. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 331647b4cefe..d6277bce2ce9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -156,6 +156,138 @@ def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) +class ListAuthorizedViewsPager: + """A pager for iterating through ``list_authorized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``authorized_views`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAuthorizedViews`` requests and continue to iterate + through the ``authorized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListAuthorizedViewsResponse], + request: bigtable_table_admin.ListAuthorizedViewsRequest, + response: bigtable_table_admin.ListAuthorizedViewsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_table_admin.ListAuthorizedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[table.AuthorizedView]: + for page in self.pages: + yield from page.authorized_views + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAuthorizedViewsAsyncPager: + """A pager for iterating through ``list_authorized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``authorized_views`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAuthorizedViews`` requests and continue to iterate + through the ``authorized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse] + ], + request: bigtable_table_admin.ListAuthorizedViewsRequest, + response: bigtable_table_admin.ListAuthorizedViewsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_table_admin.ListAuthorizedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[table.AuthorizedView]: + async def async_generator(): + async for page in self.pages: + for response in page.authorized_views: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + class ListSnapshotsPager: """A pager for iterating through ``list_snapshots`` requests. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index be4aa8d2af55..11a7f83292bf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index e0313a9467f0..1ec3be85ef5b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -188,7 +188,7 @@ def _prep_wrapped_messages(self, client_info): ), self.delete_table: gapic_v1.method.wrap_method( self.delete_table, - default_timeout=60.0, + default_timeout=300.0, client_info=client_info, ), self.undelete_table: gapic_v1.method.wrap_method( @@ -196,6 +196,31 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.create_authorized_view: gapic_v1.method.wrap_method( + self.create_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_authorized_views: gapic_v1.method.wrap_method( + self.list_authorized_views, + default_timeout=None, + client_info=client_info, + ), + self.get_authorized_view: gapic_v1.method.wrap_method( + self.get_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.update_authorized_view: gapic_v1.method.wrap_method( + self.update_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_authorized_view: gapic_v1.method.wrap_method( + self.delete_authorized_view, + default_timeout=None, + client_info=client_info, + ), self.modify_column_families: gapic_v1.method.wrap_method( self.modify_column_families, default_timeout=300.0, @@ -273,7 +298,7 @@ def _prep_wrapped_messages(self, client_info): ), self.delete_snapshot: gapic_v1.method.wrap_method( self.delete_snapshot, - default_timeout=60.0, + default_timeout=300.0, client_info=client_info, ), self.create_backup: gapic_v1.method.wrap_method( @@ -303,7 +328,7 @@ def _prep_wrapped_messages(self, client_info): ), self.delete_backup: gapic_v1.method.wrap_method( self.delete_backup, - default_timeout=60.0, + default_timeout=300.0, client_info=client_info, ), self.list_backups: gapic_v1.method.wrap_method( @@ -448,6 +473,54 @@ def undelete_table( ]: raise NotImplementedError() + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + Union[ + bigtable_table_admin.ListAuthorizedViewsResponse, + Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], + Union[table.AuthorizedView, Awaitable[table.AuthorizedView]], + ]: + raise NotImplementedError() + + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteAuthorizedViewRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + @property def modify_column_families( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index b0c33eca9e46..01cec4e0b1e9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -81,14 +81,17 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -98,11 +101,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -129,7 +132,7 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. @@ -170,7 +173,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -457,6 +462,145 @@ def undelete_table( ) return self._stubs["undelete_table"] + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create authorized view method over gRPC. + + Creates a new AuthorizedView in a table. + + Returns: + Callable[[~.CreateAuthorizedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_authorized_view" not in self._stubs: + self._stubs["create_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", + request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_authorized_view"] + + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + bigtable_table_admin.ListAuthorizedViewsResponse, + ]: + r"""Return a callable for the list authorized views method over gRPC. + + Lists all AuthorizedViews from a specific table. + + Returns: + Callable[[~.ListAuthorizedViewsRequest], + ~.ListAuthorizedViewsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_authorized_views" not in self._stubs: + self._stubs["list_authorized_views"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", + request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, + response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, + ) + return self._stubs["list_authorized_views"] + + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], table.AuthorizedView + ]: + r"""Return a callable for the get authorized view method over gRPC. + + Gets information from a specified AuthorizedView. + + Returns: + Callable[[~.GetAuthorizedViewRequest], + ~.AuthorizedView]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_authorized_view" not in self._stubs: + self._stubs["get_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", + request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, + response_deserializer=table.AuthorizedView.deserialize, + ) + return self._stubs["get_authorized_view"] + + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update authorized view method over gRPC. + + Updates an AuthorizedView in a table. + + Returns: + Callable[[~.UpdateAuthorizedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_authorized_view" not in self._stubs: + self._stubs["update_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", + request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_authorized_view"] + + @property + def delete_authorized_view( + self, + ) -> Callable[[bigtable_table_admin.DeleteAuthorizedViewRequest], empty_pb2.Empty]: + r"""Return a callable for the delete authorized view method over gRPC. + + Permanently deletes a specified AuthorizedView. + + Returns: + Callable[[~.DeleteAuthorizedViewRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_authorized_view" not in self._stubs: + self._stubs["delete_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", + request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_authorized_view"] + @property def modify_column_families( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 3ae66f84f446..f20ed0a494b1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,8 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async +from google.api_core import exceptions as core_exceptions +from google.api_core import retry_async as retries from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore @@ -76,7 +78,6 @@ def create_channel( the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -106,7 +107,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -126,15 +127,18 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -144,11 +148,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -175,7 +179,7 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. @@ -215,7 +219,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -469,6 +475,149 @@ def undelete_table( ) return self._stubs["undelete_table"] + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create authorized view method over gRPC. + + Creates a new AuthorizedView in a table. + + Returns: + Callable[[~.CreateAuthorizedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_authorized_view" not in self._stubs: + self._stubs["create_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", + request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_authorized_view"] + + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + Awaitable[bigtable_table_admin.ListAuthorizedViewsResponse], + ]: + r"""Return a callable for the list authorized views method over gRPC. + + Lists all AuthorizedViews from a specific table. + + Returns: + Callable[[~.ListAuthorizedViewsRequest], + Awaitable[~.ListAuthorizedViewsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_authorized_views" not in self._stubs: + self._stubs["list_authorized_views"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", + request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, + response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, + ) + return self._stubs["list_authorized_views"] + + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], Awaitable[table.AuthorizedView] + ]: + r"""Return a callable for the get authorized view method over gRPC. + + Gets information from a specified AuthorizedView. + + Returns: + Callable[[~.GetAuthorizedViewRequest], + Awaitable[~.AuthorizedView]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_authorized_view" not in self._stubs: + self._stubs["get_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", + request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, + response_deserializer=table.AuthorizedView.deserialize, + ) + return self._stubs["get_authorized_view"] + + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update authorized view method over gRPC. + + Updates an AuthorizedView in a table. + + Returns: + Callable[[~.UpdateAuthorizedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_authorized_view" not in self._stubs: + self._stubs["update_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", + request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_authorized_view"] + + @property + def delete_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteAuthorizedViewRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete authorized view method over gRPC. + + Permanently deletes a specified AuthorizedView. + + Returns: + Callable[[~.DeleteAuthorizedViewRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_authorized_view" not in self._stubs: + self._stubs["delete_authorized_view"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", + request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_authorized_view"] + @property def modify_column_families( self, @@ -1035,6 +1184,261 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + def _prep_wrapped_messages(self, client_info): + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" + self._wrapped_methods = { + self.create_table: gapic_v1.method_async.wrap_method( + self.create_table, + default_timeout=300.0, + client_info=client_info, + ), + self.create_table_from_snapshot: gapic_v1.method_async.wrap_method( + self.create_table_from_snapshot, + default_timeout=None, + client_info=client_info, + ), + self.list_tables: gapic_v1.method_async.wrap_method( + self.list_tables, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_table: gapic_v1.method_async.wrap_method( + self.get_table, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_table: gapic_v1.method_async.wrap_method( + self.update_table, + default_timeout=None, + client_info=client_info, + ), + self.delete_table: gapic_v1.method_async.wrap_method( + self.delete_table, + default_timeout=300.0, + client_info=client_info, + ), + self.undelete_table: gapic_v1.method_async.wrap_method( + self.undelete_table, + default_timeout=None, + client_info=client_info, + ), + self.create_authorized_view: gapic_v1.method_async.wrap_method( + self.create_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_authorized_views: gapic_v1.method_async.wrap_method( + self.list_authorized_views, + default_timeout=None, + client_info=client_info, + ), + self.get_authorized_view: gapic_v1.method_async.wrap_method( + self.get_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.update_authorized_view: gapic_v1.method_async.wrap_method( + self.update_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_authorized_view: gapic_v1.method_async.wrap_method( + self.delete_authorized_view, + default_timeout=None, + client_info=client_info, + ), + self.modify_column_families: gapic_v1.method_async.wrap_method( + self.modify_column_families, + default_timeout=300.0, + client_info=client_info, + ), + self.drop_row_range: gapic_v1.method_async.wrap_method( + self.drop_row_range, + default_timeout=3600.0, + client_info=client_info, + ), + self.generate_consistency_token: gapic_v1.method_async.wrap_method( + self.generate_consistency_token, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.check_consistency: gapic_v1.method_async.wrap_method( + self.check_consistency, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.snapshot_table: gapic_v1.method_async.wrap_method( + self.snapshot_table, + default_timeout=None, + client_info=client_info, + ), + self.get_snapshot: gapic_v1.method_async.wrap_method( + self.get_snapshot, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_snapshots: gapic_v1.method_async.wrap_method( + self.list_snapshots, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_snapshot: gapic_v1.method_async.wrap_method( + self.delete_snapshot, + default_timeout=300.0, + client_info=client_info, + ), + self.create_backup: gapic_v1.method_async.wrap_method( + self.create_backup, + default_timeout=60.0, + client_info=client_info, + ), + self.get_backup: gapic_v1.method_async.wrap_method( + self.get_backup, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_backup: gapic_v1.method_async.wrap_method( + self.update_backup, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_backup: gapic_v1.method_async.wrap_method( + self.delete_backup, + default_timeout=300.0, + client_info=client_info, + ), + self.list_backups: gapic_v1.method_async.wrap_method( + self.list_backups, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.restore_table: gapic_v1.method_async.wrap_method( + self.restore_table, + default_timeout=60.0, + client_info=client_info, + ), + self.copy_backup: gapic_v1.method_async.wrap_method( + self.copy_backup, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method_async.wrap_method( + self.get_iam_policy, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method_async.wrap_method( + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method_async.wrap_method( + self.test_iam_permissions, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + def close(self): return self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 49bc756e177d..230b13a43e7c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -92,6 +92,14 @@ def post_copy_backup(self, response): logging.log(f"Received response: {response}") return response + def pre_create_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_authorized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -116,6 +124,10 @@ def post_create_table_from_snapshot(self, response): logging.log(f"Received response: {response}") return response + def pre_delete_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + def pre_delete_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -140,6 +152,14 @@ def post_generate_consistency_token(self, response): logging.log(f"Received response: {response}") return response + def pre_get_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_authorized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_get_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -172,6 +192,14 @@ def post_get_table(self, response): logging.log(f"Received response: {response}") return response + def pre_list_authorized_views(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_authorized_views(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_backups(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -244,6 +272,14 @@ def post_undelete_table(self, response): logging.log(f"Received response: {response}") return response + def pre_update_authorized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_authorized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_update_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -312,6 +348,31 @@ def post_copy_backup( """ return response + def pre_create_authorized_view( + self, + request: bigtable_table_admin.CreateAuthorizedViewRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.CreateAuthorizedViewRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_authorized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_authorized_view + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_create_backup( self, request: bigtable_table_admin.CreateBackupRequest, @@ -381,6 +442,20 @@ def post_create_table_from_snapshot( """ return response + def pre_delete_authorized_view( + self, + request: bigtable_table_admin.DeleteAuthorizedViewRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.DeleteAuthorizedViewRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + def pre_delete_backup( self, request: bigtable_table_admin.DeleteBackupRequest, @@ -454,6 +529,31 @@ def post_generate_consistency_token( """ return response + def pre_get_authorized_view( + self, + request: bigtable_table_admin.GetAuthorizedViewRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.GetAuthorizedViewRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for get_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_authorized_view( + self, response: table.AuthorizedView + ) -> table.AuthorizedView: + """Post-rpc interceptor for get_authorized_view + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_get_backup( self, request: bigtable_table_admin.GetBackupRequest, @@ -538,6 +638,31 @@ def post_get_table(self, response: table.Table) -> table.Table: """ return response + def pre_list_authorized_views( + self, + request: bigtable_table_admin.ListAuthorizedViewsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.ListAuthorizedViewsRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_authorized_views + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_authorized_views( + self, response: bigtable_table_admin.ListAuthorizedViewsResponse + ) -> bigtable_table_admin.ListAuthorizedViewsResponse: + """Post-rpc interceptor for list_authorized_views + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_list_backups( self, request: bigtable_table_admin.ListBackupsRequest, @@ -743,6 +868,31 @@ def post_undelete_table( """ return response + def pre_update_authorized_view( + self, + request: bigtable_table_admin.UpdateAuthorizedViewRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + bigtable_table_admin.UpdateAuthorizedViewRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for update_authorized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_authorized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_authorized_view + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_update_backup( self, request: bigtable_table_admin.UpdateBackupRequest, @@ -1132,6 +1282,104 @@ def __call__( resp = self._interceptor.post_copy_backup(resp) return resp + class _CreateAuthorizedView(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CreateAuthorizedView") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "authorizedViewId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CreateAuthorizedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create authorized view method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateAuthorizedViewRequest): + The request object. The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", + "body": "authorized_view", + }, + ] + request, metadata = self._interceptor.pre_create_authorized_view( + request, metadata + ) + pb_request = bigtable_table_admin.CreateAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_authorized_view(resp) + return resp + class _CreateBackup(BigtableTableAdminRestStub): def __hash__(self): return hash("CreateBackup") @@ -1429,6 +1677,82 @@ def __call__( resp = self._interceptor.post_create_table_from_snapshot(resp) return resp + class _DeleteAuthorizedView(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DeleteAuthorizedView") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.DeleteAuthorizedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the delete authorized view method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteAuthorizedViewRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_authorized_view( + request, metadata + ) + pb_request = bigtable_table_admin.DeleteAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + class _DeleteBackup(BigtableTableAdminRestStub): def __hash__(self): return hash("DeleteBackup") @@ -1696,12 +2020,104 @@ def __call__( http_options: List[Dict[str, str]] = [ { "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_drop_row_range(request, metadata) + pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GenerateConsistencyToken(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GenerateConsistencyToken") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Call the generate consistency + token method over HTTP. + + Args: + request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", "body": "*", }, ] - request, metadata = self._interceptor.pre_drop_row_range(request, metadata) - pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) + request, metadata = self._interceptor.pre_generate_consistency_token( + request, metadata + ) + pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + request + ) transcoded_request = path_template.transcode(http_options, pb_request) # Jsonify the request body @@ -1739,9 +2155,17 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _GenerateConsistencyToken(BigtableTableAdminRestStub): + # Return the response + resp = bigtable_table_admin.GenerateConsistencyTokenResponse() + pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_consistency_token(resp) + return resp + + class _GetAuthorizedView(BigtableTableAdminRestStub): def __hash__(self): - return hash("GenerateConsistencyToken") + return hash("GetAuthorizedView") __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} @@ -1755,52 +2179,47 @@ def _get_unset_required_fields(cls, message_dict): def __call__( self, - request: bigtable_table_admin.GenerateConsistencyTokenRequest, + request: bigtable_table_admin.GetAuthorizedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Call the generate consistency - token method over HTTP. + ) -> table.AuthorizedView: + r"""Call the get authorized view method over HTTP. - Args: - request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + Args: + request (~.bigtable_table_admin.GetAuthorizedViewRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. - Returns: - ~.bigtable_table_admin.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + Returns: + ~.table.AuthorizedView: + AuthorizedViews represent subsets of + a particular Cloud Bigtable table. Users + can configure access to each Authorized + View independently from the table and + use the existing Data APIs to access the + subset of data. """ http_options: List[Dict[str, str]] = [ { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", - "body": "*", + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", }, ] - request, metadata = self._interceptor.pre_generate_consistency_token( + request, metadata = self._interceptor.pre_get_authorized_view( request, metadata ) - pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - request - ) + pb_request = bigtable_table_admin.GetAuthorizedViewRequest.pb(request) transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True - ) uri = transcoded_request["uri"] method = transcoded_request["method"] @@ -1823,7 +2242,6 @@ def __call__( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1832,11 +2250,11 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = bigtable_table_admin.GenerateConsistencyTokenResponse() - pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) + resp = table.AuthorizedView() + pb_resp = table.AuthorizedView.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_generate_consistency_token(resp) + resp = self._interceptor.post_get_authorized_view(resp) return resp class _GetBackup(BigtableTableAdminRestStub): @@ -2293,6 +2711,96 @@ def __call__( resp = self._interceptor.post_get_table(resp) return resp + class _ListAuthorizedViews(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ListAuthorizedViews") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.ListAuthorizedViewsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.ListAuthorizedViewsResponse: + r"""Call the list authorized views method over HTTP. + + Args: + request (~.bigtable_table_admin.ListAuthorizedViewsRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.ListAuthorizedViewsResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", + }, + ] + request, metadata = self._interceptor.pre_list_authorized_views( + request, metadata + ) + pb_request = bigtable_table_admin.ListAuthorizedViewsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListAuthorizedViewsResponse() + pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_authorized_views(resp) + return resp + class _ListBackups(BigtableTableAdminRestStub): def __hash__(self): return hash("ListBackups") @@ -3230,6 +3738,102 @@ def __call__( resp = self._interceptor.post_undelete_table(resp) return resp + class _UpdateAuthorizedView(BigtableTableAdminRestStub): + def __hash__(self): + return hash("UpdateAuthorizedView") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.UpdateAuthorizedViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the update authorized view method over HTTP. + + Args: + request (~.bigtable_table_admin.UpdateAuthorizedViewRequest): + The request object. The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}", + "body": "authorized_view", + }, + ] + request, metadata = self._interceptor.pre_update_authorized_view( + request, metadata + ) + pb_request = bigtable_table_admin.UpdateAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_authorized_view(resp) + return resp + class _UpdateBackup(BigtableTableAdminRestStub): def __hash__(self): return hash("UpdateBackup") @@ -3440,6 +4044,16 @@ def copy_backup( # In C++ this would require a dynamic_cast return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def create_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.CreateAuthorizedViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def create_backup( self, @@ -3466,6 +4080,14 @@ def create_table_from_snapshot( # In C++ this would require a dynamic_cast return self._CreateTableFromSnapshot(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_authorized_view( + self, + ) -> Callable[[bigtable_table_admin.DeleteAuthorizedViewRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def delete_backup( self, @@ -3509,6 +4131,16 @@ def generate_consistency_token( # In C++ this would require a dynamic_cast return self._GenerateConsistencyToken(self._session, self._host, self._interceptor) # type: ignore + @property + def get_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.GetAuthorizedViewRequest], table.AuthorizedView + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def get_backup( self, @@ -3541,6 +4173,17 @@ def get_table( # In C++ this would require a dynamic_cast return self._GetTable(self._session, self._host, self._interceptor) # type: ignore + @property + def list_authorized_views( + self, + ) -> Callable[ + [bigtable_table_admin.ListAuthorizedViewsRequest], + bigtable_table_admin.ListAuthorizedViewsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListAuthorizedViews(self._session, self._host, self._interceptor) # type: ignore + @property def list_backups( self, @@ -3629,6 +4272,16 @@ def undelete_table( # In C++ this would require a dynamic_cast return self._UndeleteTable(self._session, self._host, self._interceptor) # type: ignore + @property + def update_authorized_view( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateAuthorizedViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateAuthorizedView(self._session, self._host, self._interceptor) # type: ignore + @property def update_backup( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index a2fefffc8a54..3ff9075d21a7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,20 +46,27 @@ CheckConsistencyResponse, CopyBackupMetadata, CopyBackupRequest, + CreateAuthorizedViewMetadata, + CreateAuthorizedViewRequest, CreateBackupMetadata, CreateBackupRequest, CreateTableFromSnapshotMetadata, CreateTableFromSnapshotRequest, CreateTableRequest, + DataBoostReadLocalWrites, + DeleteAuthorizedViewRequest, DeleteBackupRequest, DeleteSnapshotRequest, DeleteTableRequest, DropRowRangeRequest, GenerateConsistencyTokenRequest, GenerateConsistencyTokenResponse, + GetAuthorizedViewRequest, GetBackupRequest, GetSnapshotRequest, GetTableRequest, + ListAuthorizedViewsRequest, + ListAuthorizedViewsResponse, ListBackupsRequest, ListBackupsResponse, ListSnapshotsRequest, @@ -72,8 +79,11 @@ RestoreTableRequest, SnapshotTableMetadata, SnapshotTableRequest, + StandardReadRemoteWrites, UndeleteTableMetadata, UndeleteTableRequest, + UpdateAuthorizedViewMetadata, + UpdateAuthorizedViewRequest, UpdateBackupRequest, UpdateTableMetadata, UpdateTableRequest, @@ -91,6 +101,7 @@ Instance, ) from .table import ( + AuthorizedView, Backup, BackupInfo, ChangeStreamConfig, @@ -102,6 +113,9 @@ Table, RestoreSourceType, ) +from .types import ( + Type, +) __all__ = ( "CreateAppProfileRequest", @@ -134,20 +148,27 @@ "CheckConsistencyResponse", "CopyBackupMetadata", "CopyBackupRequest", + "CreateAuthorizedViewMetadata", + "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", + "DataBoostReadLocalWrites", + "DeleteAuthorizedViewRequest", "DeleteBackupRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", + "GetAuthorizedViewRequest", "GetBackupRequest", "GetSnapshotRequest", "GetTableRequest", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", "ListSnapshotsRequest", @@ -160,8 +181,11 @@ "RestoreTableRequest", "SnapshotTableMetadata", "SnapshotTableRequest", + "StandardReadRemoteWrites", "UndeleteTableMetadata", "UndeleteTableRequest", + "UpdateAuthorizedViewMetadata", + "UpdateAuthorizedViewRequest", "UpdateBackupRequest", "UpdateTableMetadata", "UpdateTableRequest", @@ -173,6 +197,7 @@ "Cluster", "HotTablet", "Instance", + "AuthorizedView", "Backup", "BackupInfo", "ChangeStreamConfig", @@ -183,4 +208,5 @@ "Snapshot", "Table", "RestoreSourceType", + "Type", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 87332a351eaf..4e5ddfd6efa0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index c21ac4d5a031..0bc3b6b81d11 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -47,6 +47,8 @@ "GenerateConsistencyTokenRequest", "GenerateConsistencyTokenResponse", "CheckConsistencyRequest", + "StandardReadRemoteWrites", + "DataBoostReadLocalWrites", "CheckConsistencyResponse", "SnapshotTableRequest", "GetSnapshotRequest", @@ -64,6 +66,14 @@ "ListBackupsResponse", "CopyBackupRequest", "CopyBackupMetadata", + "CreateAuthorizedViewRequest", + "CreateAuthorizedViewMetadata", + "ListAuthorizedViewsRequest", + "ListAuthorizedViewsResponse", + "GetAuthorizedViewRequest", + "UpdateAuthorizedViewRequest", + "UpdateAuthorizedViewMetadata", + "DeleteAuthorizedViewRequest", }, ) @@ -632,6 +642,11 @@ class Modification(proto.Message): given ID, or fail if no such family exists. This field is a member of `oneof`_ ``mod``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. A mask specifying which fields (e.g. ``gc_rule``) + in the ``update`` mod should be updated, ignored for other + modification types. If unset or empty, we treat it as + updating ``gc_rule`` to be backward compatible. """ id: str = proto.Field( @@ -655,6 +670,11 @@ class Modification(proto.Message): number=4, oneof="mod", ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=6, + message=field_mask_pb2.FieldMask, + ) name: str = proto.Field( proto.STRING, @@ -707,6 +727,13 @@ class CheckConsistencyRequest(proto.Message): r"""Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): Required. The unique name of the Table for which to check @@ -715,6 +742,20 @@ class CheckConsistencyRequest(proto.Message): consistency_token (str): Required. The token created using GenerateConsistencyToken for the Table. + standard_read_remote_writes (google.cloud.bigtable_admin_v2.types.StandardReadRemoteWrites): + Checks that reads using an app profile with + ``StandardIsolation`` can see all writes committed before + the token was created, even if the read and write target + different clusters. + + This field is a member of `oneof`_ ``mode``. + data_boost_read_local_writes (google.cloud.bigtable_admin_v2.types.DataBoostReadLocalWrites): + Checks that reads using an app profile with + ``DataBoostIsolationReadOnly`` can see all writes committed + before the token was created, but only if the read and write + target the same cluster. + + This field is a member of `oneof`_ ``mode``. """ name: str = proto.Field( @@ -725,6 +766,32 @@ class CheckConsistencyRequest(proto.Message): proto.STRING, number=2, ) + standard_read_remote_writes: "StandardReadRemoteWrites" = proto.Field( + proto.MESSAGE, + number=3, + oneof="mode", + message="StandardReadRemoteWrites", + ) + data_boost_read_local_writes: "DataBoostReadLocalWrites" = proto.Field( + proto.MESSAGE, + number=4, + oneof="mode", + message="DataBoostReadLocalWrites", + ) + + +class StandardReadRemoteWrites(proto.Message): + r"""Checks that all writes before the consistency token was + generated are replicated in every cluster and readable. + + """ + + +class DataBoostReadLocalWrites(proto.Message): + r"""Checks that all writes before the consistency token was + generated in the same cluster are readable by Databoost. + + """ class CheckConsistencyResponse(proto.Message): @@ -1368,4 +1435,273 @@ class CopyBackupMetadata(proto.Message): ) +class CreateAuthorizedViewRequest(proto.Message): + r"""The request for + [CreateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView] + + Attributes: + parent (str): + Required. This is the name of the table the AuthorizedView + belongs to. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + authorized_view_id (str): + Required. The id of the AuthorizedView to create. This + AuthorizedView must not already exist. The + ``authorized_view_id`` appended to ``parent`` forms the full + AuthorizedView name of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedView/{authorized_view}``. + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + authorized_view_id: str = proto.Field( + proto.STRING, + number=2, + ) + authorized_view: gba_table.AuthorizedView = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.AuthorizedView, + ) + + +class CreateAuthorizedViewMetadata(proto.Message): + r"""The metadata for the Operation returned by + CreateAuthorizedView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest): + The request that prompted the initiation of + this CreateInstance operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: "CreateAuthorizedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="CreateAuthorizedViewRequest", + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class ListAuthorizedViewsRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Attributes: + parent (str): + Required. The unique name of the table for which + AuthorizedViews should be listed. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + page_size (int): + Optional. Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + Optional. The value of ``next_page_token`` returned by a + previous call. + view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView): + Optional. The resource_view to be applied to the returned + views' fields. Default to NAME_ONLY. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + view: gba_table.AuthorizedView.ResponseView = proto.Field( + proto.ENUM, + number=4, + enum=gba_table.AuthorizedView.ResponseView, + ) + + +class ListAuthorizedViewsResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + Attributes: + authorized_views (MutableSequence[google.cloud.bigtable_admin_v2.types.AuthorizedView]): + The AuthorizedViews present in the requested + table. + next_page_token (str): + Set if not all tables could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + authorized_views: MutableSequence[gba_table.AuthorizedView] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.AuthorizedView, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetAuthorizedViewRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView] + + Attributes: + name (str): + Required. The unique name of the requested AuthorizedView. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView): + Optional. The resource_view to be applied to the returned + AuthorizedView's fields. Default to BASIC. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + view: gba_table.AuthorizedView.ResponseView = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.AuthorizedView.ResponseView, + ) + + +class UpdateAuthorizedViewRequest(proto.Message): + r"""The request for + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + + Attributes: + authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): + Required. The AuthorizedView to update. The ``name`` in + ``authorized_view`` is used to identify the AuthorizedView. + AuthorizedView name must in this format + projects//instances//tables//authorizedViews/ + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. A mask specifying + which fields in the AuthorizedView resource should be + updated. This mask is relative to the AuthorizedView + resource, not to the request message. A field will be + overwritten if it is in the mask. If empty, all fields set + in the request will be overwritten. A special value ``*`` + means to overwrite all fields (including fields not set in + the request). + ignore_warnings (bool): + Optional. If true, ignore the safety checks + when updating the AuthorizedView. + """ + + authorized_view: gba_table.AuthorizedView = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.AuthorizedView, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class UpdateAuthorizedViewMetadata(proto.Message): + r"""Metadata for the google.longrunning.Operation returned by + [UpdateAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView]. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest): + The request that prompted the initiation of + this UpdateAuthorizedView operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: "UpdateAuthorizedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="UpdateAuthorizedViewRequest", + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteAuthorizedViewRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView][google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView] + + Attributes: + name (str): + Required. The unique name of the AuthorizedView to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. + etag (str): + Optional. The current etag of the + AuthorizedView. If an etag is provided and does + not match the current etag of the + AuthorizedView, deletion will be blocked and an + ABORTED error will be returned. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 959b9deb1f76..1ab52a0e3c69 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 78efd711bbc6..f7916d44b830 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -431,6 +431,11 @@ class AppProfile(proto.Message): The standard options used for isolating this app profile's traffic from other use cases. + This field is a member of `oneof`_ ``isolation``. + data_boost_isolation_read_only (google.cloud.bigtable_admin_v2.types.AppProfile.DataBoostIsolationReadOnly): + Specifies that this app profile is intended + for read-only usage via the Data Boost feature. + This field is a member of `oneof`_ ``isolation``. """ @@ -517,6 +522,54 @@ class StandardIsolation(proto.Message): enum="AppProfile.Priority", ) + class DataBoostIsolationReadOnly(proto.Message): + r"""Data Boost is a serverless compute capability that lets you + run high-throughput read jobs on your Bigtable data, without + impacting the performance of the clusters that handle your + application traffic. Currently, Data Boost exclusively supports + read-only use-cases with single-cluster routing. + + Data Boost reads are only guaranteed to see the results of + writes that were written at least 30 minutes ago. This means + newly written values may not become visible for up to 30m, and + also means that old values may remain visible for up to 30m + after being deleted or overwritten. To mitigate the staleness of + the data, users may either wait 30m, or use CheckConsistency. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + compute_billing_owner (google.cloud.bigtable_admin_v2.types.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner): + The Compute Billing Owner for this Data Boost + App Profile. + + This field is a member of `oneof`_ ``_compute_billing_owner``. + """ + + class ComputeBillingOwner(proto.Enum): + r"""Compute Billing Owner specifies how usage should be accounted + when using Data Boost. Compute Billing Owner also configures + which Cloud Project is charged for relevant quota. + + Values: + COMPUTE_BILLING_OWNER_UNSPECIFIED (0): + Unspecified value. + HOST_PAYS (1): + The host Cloud Project containing the + targeted Bigtable Instance / Table pays for + compute. + """ + COMPUTE_BILLING_OWNER_UNSPECIFIED = 0 + HOST_PAYS = 1 + + compute_billing_owner: "AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner" = proto.Field( + proto.ENUM, + number=1, + optional=True, + enum="AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner", + ) + name: str = proto.Field( proto.STRING, number=1, @@ -553,6 +606,12 @@ class StandardIsolation(proto.Message): oneof="isolation", message=StandardIsolation, ) + data_boost_isolation_read_only: DataBoostIsolationReadOnly = proto.Field( + proto.MESSAGE, + number=10, + oneof="isolation", + message=DataBoostIsolationReadOnly, + ) class HotTablet(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 57bd1b00f315..ef162bee1fde 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ import proto # type: ignore +from google.cloud.bigtable_admin_v2.types import types from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -31,6 +32,7 @@ "RestoreInfo", "ChangeStreamConfig", "Table", + "AuthorizedView", "ColumnFamily", "GcRule", "EncryptionInfo", @@ -109,6 +111,9 @@ class Table(proto.Message): timestamp. Each table is served using the resources of its parent cluster. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: name (str): The unique name of the table. Values are of the form @@ -152,6 +157,12 @@ class Table(proto.Message): Note one can still delete the data stored in the table through Data APIs. + automated_backup_policy (google.cloud.bigtable_admin_v2.types.Table.AutomatedBackupPolicy): + If specified, automated backups are enabled + for this table. Otherwise, automated backups are + disabled. + + This field is a member of `oneof`_ ``automated_backup_config``. """ class TimestampGranularity(proto.Enum): @@ -266,6 +277,31 @@ class ReplicationState(proto.Enum): message="EncryptionInfo", ) + class AutomatedBackupPolicy(proto.Message): + r"""Defines an automated backup policy for a table + + Attributes: + retention_period (google.protobuf.duration_pb2.Duration): + Required. How long the automated backups + should be retained. The only supported value at + this time is 3 days. + frequency (google.protobuf.duration_pb2.Duration): + Required. How frequently automated backups + should occur. The only supported value at this + time is 24 hours. + """ + + retention_period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + frequency: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + message=duration_pb2.Duration, + ) + name: str = proto.Field( proto.STRING, number=1, @@ -301,6 +337,137 @@ class ReplicationState(proto.Enum): proto.BOOL, number=9, ) + automated_backup_policy: AutomatedBackupPolicy = proto.Field( + proto.MESSAGE, + number=13, + oneof="automated_backup_config", + message=AutomatedBackupPolicy, + ) + + +class AuthorizedView(proto.Message): + r"""AuthorizedViews represent subsets of a particular Cloud + Bigtable table. Users can configure access to each Authorized + View independently from the table and use the existing Data APIs + to access the subset of data. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Identifier. The name of this AuthorizedView. Values are of + the form + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}`` + subset_view (google.cloud.bigtable_admin_v2.types.AuthorizedView.SubsetView): + An AuthorizedView permitting access to an + explicit subset of a Table. + + This field is a member of `oneof`_ ``authorized_view``. + etag (str): + The etag for this AuthorizedView. + If this is provided on update, it must match the + server's etag. The server returns ABORTED error + on a mismatched etag. + deletion_protection (bool): + Set to true to make the AuthorizedView + protected against deletion. The parent Table and + containing Instance cannot be deleted if an + AuthorizedView has this bit set. + """ + + class ResponseView(proto.Enum): + r"""Defines a subset of an AuthorizedView's fields. + + Values: + RESPONSE_VIEW_UNSPECIFIED (0): + Uses the default view for each method as + documented in the request. + NAME_ONLY (1): + Only populates ``name``. + BASIC (2): + Only populates the AuthorizedView's basic metadata. This + includes: name, deletion_protection, etag. + FULL (3): + Populates every fields. + """ + RESPONSE_VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + BASIC = 2 + FULL = 3 + + class FamilySubsets(proto.Message): + r"""Subsets of a column family that are included in this + AuthorizedView. + + Attributes: + qualifiers (MutableSequence[bytes]): + Individual exact column qualifiers to be + included in the AuthorizedView. + qualifier_prefixes (MutableSequence[bytes]): + Prefixes for qualifiers to be included in the + AuthorizedView. Every qualifier starting with + one of these prefixes is included in the + AuthorizedView. To provide access to all + qualifiers, include the empty string as a prefix + (""). + """ + + qualifiers: MutableSequence[bytes] = proto.RepeatedField( + proto.BYTES, + number=1, + ) + qualifier_prefixes: MutableSequence[bytes] = proto.RepeatedField( + proto.BYTES, + number=2, + ) + + class SubsetView(proto.Message): + r"""Defines a simple AuthorizedView that is a subset of the + underlying Table. + + Attributes: + row_prefixes (MutableSequence[bytes]): + Row prefixes to be included in the + AuthorizedView. To provide access to all rows, + include the empty string as a prefix (""). + family_subsets (MutableMapping[str, google.cloud.bigtable_admin_v2.types.AuthorizedView.FamilySubsets]): + Map from column family name to the columns in + this family to be included in the + AuthorizedView. + """ + + row_prefixes: MutableSequence[bytes] = proto.RepeatedField( + proto.BYTES, + number=1, + ) + family_subsets: MutableMapping[ + str, "AuthorizedView.FamilySubsets" + ] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message="AuthorizedView.FamilySubsets", + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + subset_view: SubsetView = proto.Field( + proto.MESSAGE, + number=2, + oneof="authorized_view", + message=SubsetView, + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + deletion_protection: bool = proto.Field( + proto.BOOL, + number=4, + ) class ColumnFamily(proto.Message): @@ -316,6 +483,20 @@ class ColumnFamily(proto.Message): opportunistically in the background, and so it's possible for reads to return a cell even if it matches the active GC expression for its family. + value_type (google.cloud.bigtable_admin_v2.types.Type): + The type of data stored in each of this family's cell + values, including its full encoding. If omitted, the family + only serves raw untyped bytes. + + For now, only the ``Aggregate`` type is supported. + + ``Aggregate`` can only be set at family creation and is + immutable afterwards. + + If ``value_type`` is ``Aggregate``, written data must be + compatible with: + + - ``value_type.input_type`` for ``AddInput`` mutations """ gc_rule: "GcRule" = proto.Field( @@ -323,6 +504,11 @@ class ColumnFamily(proto.Message): number=1, message="GcRule", ) + value_type: types.Type = proto.Field( + proto.MESSAGE, + number=3, + message=types.Type, + ) class GcRule(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py new file mode 100644 index 000000000000..d57d1cdf3e79 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "Type", + }, +) + + +class Type(proto.Message): + r"""``Type`` represents the type of data that is written to, read from, + or stored in Bigtable. It is heavily based on the GoogleSQL standard + to help maintain familiarity and consistency across products and + features. + + For compatibility with Bigtable's existing untyped APIs, each + ``Type`` includes an ``Encoding`` which describes how to convert + to/from the underlying data. This might involve composing a series + of steps into an "encoding chain," for example to convert from INT64 + -> STRING -> raw bytes. In most cases, a "link" in the encoding + chain will be based an on existing GoogleSQL conversion function + like ``CAST``. + + Each link in the encoding chain also defines the following + properties: + + - Natural sort: Does the encoded value sort consistently with the + original typed value? Note that Bigtable will always sort data + based on the raw encoded value, *not* the decoded type. + + - Example: STRING values sort in the same order as their UTF-8 + encodings. + - Counterexample: Encoding INT64 to a fixed-width STRING does + *not* preserve sort order when dealing with negative numbers. + INT64(1) > INT64(-1), but STRING("-00001") > STRING("00001). + - The overall encoding chain sorts naturally if *every* link + does. + + - Self-delimiting: If we concatenate two encoded values, can we + always tell where the first one ends and the second one begins? + + - Example: If we encode INT64s to fixed-width STRINGs, the first + value will always contain exactly N digits, possibly preceded + by a sign. + - Counterexample: If we concatenate two UTF-8 encoded STRINGs, + we have no way to tell where the first one ends. + - The overall encoding chain is self-delimiting if *any* link + is. + + - Compatibility: Which other systems have matching encoding + schemes? For example, does this encoding have a GoogleSQL + equivalent? HBase? Java? + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): + Bytes + + This field is a member of `oneof`_ ``kind``. + int64_type (google.cloud.bigtable_admin_v2.types.Type.Int64): + Int64 + + This field is a member of `oneof`_ ``kind``. + aggregate_type (google.cloud.bigtable_admin_v2.types.Type.Aggregate): + Aggregate + + This field is a member of `oneof`_ ``kind``. + """ + + class Bytes(proto.Message): + r"""Bytes Values of type ``Bytes`` are stored in ``Value.bytes_value``. + + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.Bytes.Encoding): + The encoding to use when converting to/from + lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to/from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + raw (google.cloud.bigtable_admin_v2.types.Type.Bytes.Encoding.Raw): + Use ``Raw`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Raw(proto.Message): + r"""Leaves the value "as-is" + + - Natural sort? Yes + - Self-delimiting? No + - Compatibility? N/A + + """ + + raw: "Type.Bytes.Encoding.Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Bytes.Encoding.Raw", + ) + + encoding: "Type.Bytes.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes.Encoding", + ) + + class Int64(proto.Message): + r"""Int64 Values of type ``Int64`` are stored in ``Value.int_value``. + + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding): + The encoding to use when converting to/from + lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to/from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + big_endian_bytes (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding.BigEndianBytes): + Use ``BigEndianBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class BigEndianBytes(proto.Message): + r"""Encodes the value as an 8-byte big endian twos complement ``Bytes`` + value. + + - Natural sort? No (positive values only) + - Self-delimiting? Yes + - Compatibility? + + - BigQuery Federation ``BINARY`` encoding + - HBase ``Bytes.toBytes`` + - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` + + Attributes: + bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): + The underlying ``Bytes`` type, which may be able to encode + further. + """ + + bytes_type: "Type.Bytes" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes", + ) + + big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding.BigEndianBytes", + ) + + encoding: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Int64.Encoding", + ) + + class Aggregate(proto.Message): + r"""A value that combines incremental updates into a summarized value. + + Data is never directly written or read using type ``Aggregate``. + Writes will provide either the ``input_type`` or ``state_type``, and + reads will always return the ``state_type`` . + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + input_type (google.cloud.bigtable_admin_v2.types.Type): + Type of the inputs that are accumulated by this + ``Aggregate``, which must specify a full encoding. Use + ``AddInput`` mutations to accumulate new inputs. + state_type (google.cloud.bigtable_admin_v2.types.Type): + Output only. Type that holds the internal accumulator state + for the ``Aggregate``. This is a function of the + ``input_type`` and ``aggregator`` chosen, and will always + specify a full encoding. + sum (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Sum): + Sum aggregator. + + This field is a member of `oneof`_ ``aggregator``. + """ + + class Sum(proto.Message): + r"""Computes the sum of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + input_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + state_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + sum: "Type.Aggregate.Sum" = proto.Field( + proto.MESSAGE, + number=4, + oneof="aggregator", + message="Type.Aggregate.Sum", + ) + + bytes_type: Bytes = proto.Field( + proto.MESSAGE, + number=1, + oneof="kind", + message=Bytes, + ) + int64_type: Int64 = proto.Field( + proto.MESSAGE, + number=5, + oneof="kind", + message=Int64, + ) + aggregate_type: Aggregate = proto.Field( + proto.MESSAGE, + number=6, + oneof="kind", + message=Aggregate, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 80bd4ec09bf1..56748d882ba5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -54,6 +54,7 @@ from .types.data import StreamContinuationTokens from .types.data import StreamPartition from .types.data import TimestampRange +from .types.data import Value from .types.data import ValueRange from .types.feature_flags import FeatureFlags from .types.request_stats import FullReadStatsView @@ -104,5 +105,6 @@ "StreamContinuationTokens", "StreamPartition", "TimestampRange", + "Value", "ValueRange", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py index 89a37dc92c5a..8f6cf068242c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py index f10a68e5bc68..191b24851d83 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 0421e19bcd2f..70daa63e3e75 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,12 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # -import functools from collections import OrderedDict import functools import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -67,6 +67,8 @@ class BigtableAsyncClient: _DEFAULT_ENDPOINT_TEMPLATE = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE _DEFAULT_UNIVERSE = BigtableClient._DEFAULT_UNIVERSE + authorized_view_path = staticmethod(BigtableClient.authorized_view_path) + parse_authorized_view_path = staticmethod(BigtableClient.parse_authorized_view_path) instance_path = staticmethod(BigtableClient.instance_path) parse_instance_path = staticmethod(BigtableClient.parse_instance_path) table_path = staticmethod(BigtableClient.table_path) @@ -193,7 +195,9 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTransport] = "grpc_asyncio", + transport: Optional[ + Union[str, BigtableTransport, Callable[..., BigtableTransport]] + ] = "grpc_asyncio", client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -205,9 +209,11 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.BigtableTransport]): The - transport to use. If set to None, a transport is chosen - automatically. + transport (Optional[Union[str,BigtableTransport,Callable[..., BigtableTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTransport constructor. + If set to None, a transport is chosen automatically. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -273,8 +279,10 @@ def read_rows( The request object. Request message for Bigtable.ReadRows. table_name (:class:`str`): - Required. The unique name of the table from which to - read. Values are of the form + Optional. The unique name of the table from which to + read. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -302,8 +310,8 @@ def read_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -311,6 +319,8 @@ def read_rows( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadRowsRequest): request = bigtable.ReadRowsRequest(request) @@ -326,6 +336,7 @@ def read_rows( rpc = self._client._transport._wrapped_methods[ self._client._transport.read_rows ] + # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( @@ -369,8 +380,10 @@ def sample_row_keys( The request object. Request message for Bigtable.SampleRowKeys. table_name (:class:`str`): - Required. The unique name of the table from which to - sample row keys. Values are of the form + Optional. The unique name of the table from which to + sample row keys. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -398,8 +411,8 @@ def sample_row_keys( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -407,6 +420,8 @@ def sample_row_keys( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.SampleRowKeysRequest): request = bigtable.SampleRowKeysRequest(request) @@ -422,6 +437,7 @@ def sample_row_keys( rpc = self._client._transport._wrapped_methods[ self._client._transport.sample_row_keys ] + # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( @@ -464,8 +480,10 @@ async def mutate_row( The request object. Request message for Bigtable.MutateRow. table_name (:class:`str`): - Required. The unique name of the table to which the - mutation should be applied. Values are of the form + Optional. The unique name of the table to which the + mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -511,8 +529,8 @@ async def mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -520,6 +538,8 @@ async def mutate_row( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.MutateRowRequest): request = bigtable.MutateRowRequest(request) @@ -582,9 +602,11 @@ def mutate_rows( The request object. Request message for BigtableService.MutateRows. table_name (:class:`str`): - Required. The unique name of the - table to which the mutations should be - applied. + Optional. The unique name of the table to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
``. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -625,8 +647,8 @@ def mutate_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, entries, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -634,6 +656,8 @@ def mutate_rows( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.MutateRowsRequest): request = bigtable.MutateRowsRequest(request) @@ -696,9 +720,10 @@ async def check_and_mutate_row( The request object. Request message for Bigtable.CheckAndMutateRow. table_name (:class:`str`): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of - the form + Optional. The unique name of the table to which the + conditional mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -766,8 +791,8 @@ async def check_and_mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any( [ table_name, @@ -784,6 +809,8 @@ async def check_and_mutate_row( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.CheckAndMutateRowRequest): request = bigtable.CheckAndMutateRowRequest(request) @@ -879,8 +906,8 @@ async def ping_and_warm( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -888,6 +915,8 @@ async def ping_and_warm( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.PingAndWarmRequest): request = bigtable.PingAndWarmRequest(request) @@ -949,9 +978,10 @@ async def read_modify_write_row( The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (:class:`str`): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of - the form + Optional. The unique name of the table to which the + read/modify/write rules should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -998,8 +1028,8 @@ async def read_modify_write_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, rules, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1007,6 +1037,8 @@ async def read_modify_write_row( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadModifyWriteRowRequest): request = bigtable.ReadModifyWriteRowRequest(request) @@ -1108,8 +1140,8 @@ def generate_initial_change_stream_partitions( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1117,6 +1149,8 @@ def generate_initial_change_stream_partitions( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance( request, bigtable.GenerateInitialChangeStreamPartitionsRequest ): @@ -1131,11 +1165,9 @@ def generate_initial_change_stream_partitions( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.generate_initial_change_stream_partitions, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.generate_initial_change_stream_partitions + ] # Certain fields should be provided within the metadata header; # add these here. @@ -1212,8 +1244,8 @@ def read_change_stream( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1221,6 +1253,8 @@ def read_change_stream( "the individual field arguments should be set." ) + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadChangeStreamRequest): request = bigtable.ReadChangeStreamRequest(request) @@ -1233,11 +1267,9 @@ def read_change_stream( # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_change_stream, - default_timeout=43200.0, - client_info=DEFAULT_CLIENT_INFO, - ) + rpc = self._client._transport._wrapped_methods[ + self._client._transport.read_change_stream + ] # Certain fields should be provided within the metadata header; # add these here. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index f53f25e90884..7eda705b9382 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ import re from typing import ( Dict, + Callable, Mapping, MutableMapping, MutableSequence, @@ -185,6 +186,30 @@ def transport(self) -> BigtableTransport: """ return self._transport + @staticmethod + def authorized_view_path( + project: str, + instance: str, + table: str, + authorized_view: str, + ) -> str: + """Returns a fully-qualified authorized_view string.""" + return "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( + project=project, + instance=instance, + table=table, + authorized_view=authorized_view, + ) + + @staticmethod + def parse_authorized_view_path(path: str) -> Dict[str, str]: + """Parses a authorized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)/authorizedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def instance_path( project: str, @@ -549,7 +574,9 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableTransport]] = None, + transport: Optional[ + Union[str, BigtableTransport, Callable[..., BigtableTransport]] + ] = None, client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -561,9 +588,11 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, BigtableTransport]): The - transport to use. If set to None, a transport is chosen - automatically. + transport (Optional[Union[str,BigtableTransport,Callable[..., BigtableTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTransport constructor. + If set to None, a transport is chosen automatically. client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. @@ -669,8 +698,15 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(cast(str, transport)) - self._transport = Transport( + transport_init: Union[ + Type[BigtableTransport], Callable[..., BigtableTransport] + ] = ( + type(self).get_transport_class(transport) + if isinstance(transport, str) or transport is None + else cast(Callable[..., BigtableTransport], transport) + ) + # initialize with the provided callable or the passed in class + self._transport = transport_init( credentials=credentials, credentials_file=self._client_options.credentials_file, host=self._api_endpoint, @@ -704,8 +740,10 @@ def read_rows( The request object. Request message for Bigtable.ReadRows. table_name (str): - Required. The unique name of the table from which to - read. Values are of the form + Optional. The unique name of the table from which to + read. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -733,8 +771,8 @@ def read_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -742,10 +780,8 @@ def read_rows( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadRowsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadRowsRequest): request = bigtable.ReadRowsRequest(request) # If we have keyword arguments corresponding to fields on the @@ -771,6 +807,15 @@ def read_rows( if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" + ) + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -811,8 +856,10 @@ def sample_row_keys( The request object. Request message for Bigtable.SampleRowKeys. table_name (str): - Required. The unique name of the table from which to - sample row keys. Values are of the form + Optional. The unique name of the table from which to + sample row keys. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -840,8 +887,8 @@ def sample_row_keys( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -849,10 +896,8 @@ def sample_row_keys( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.SampleRowKeysRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.SampleRowKeysRequest): request = bigtable.SampleRowKeysRequest(request) # If we have keyword arguments corresponding to fields on the @@ -878,6 +923,15 @@ def sample_row_keys( if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" + ) + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -917,8 +971,10 @@ def mutate_row( The request object. Request message for Bigtable.MutateRow. table_name (str): - Required. The unique name of the table to which the - mutation should be applied. Values are of the form + Optional. The unique name of the table to which the + mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -964,8 +1020,8 @@ def mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -973,10 +1029,8 @@ def mutate_row( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.MutateRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.MutateRowRequest): request = bigtable.MutateRowRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1006,6 +1060,15 @@ def mutate_row( if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" + ) + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -1045,9 +1108,11 @@ def mutate_rows( The request object. Request message for BigtableService.MutateRows. table_name (str): - Required. The unique name of the - table to which the mutations should be - applied. + Optional. The unique name of the table to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
``. This corresponds to the ``table_name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1088,8 +1153,8 @@ def mutate_rows( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, entries, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1097,10 +1162,8 @@ def mutate_rows( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.MutateRowsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.MutateRowsRequest): request = bigtable.MutateRowsRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1128,6 +1191,15 @@ def mutate_rows( if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" + ) + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -1169,9 +1241,10 @@ def check_and_mutate_row( The request object. Request message for Bigtable.CheckAndMutateRow. table_name (str): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of - the form + Optional. The unique name of the table to which the + conditional mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -1239,8 +1312,8 @@ def check_and_mutate_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any( [ table_name, @@ -1257,10 +1330,8 @@ def check_and_mutate_row( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.CheckAndMutateRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.CheckAndMutateRowRequest): request = bigtable.CheckAndMutateRowRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1294,6 +1365,15 @@ def check_and_mutate_row( if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" + ) + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -1362,8 +1442,8 @@ def ping_and_warm( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1371,10 +1451,8 @@ def ping_and_warm( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.PingAndWarmRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.PingAndWarmRequest): request = bigtable.PingAndWarmRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1442,9 +1520,10 @@ def read_modify_write_row( The request object. Request message for Bigtable.ReadModifyWriteRow. table_name (str): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of - the form + Optional. The unique name of the table to which the + read/modify/write rules should be applied. + + Values are of the form ``projects//instances//tables/
``. This corresponds to the ``table_name`` field @@ -1491,8 +1570,8 @@ def read_modify_write_row( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, row_key, rules, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1500,10 +1579,8 @@ def read_modify_write_row( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadModifyWriteRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadModifyWriteRowRequest): request = bigtable.ReadModifyWriteRowRequest(request) # If we have keyword arguments corresponding to fields on the @@ -1533,6 +1610,15 @@ def read_modify_write_row( if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" + ) + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -1609,8 +1695,8 @@ def generate_initial_change_stream_partitions( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1618,10 +1704,8 @@ def generate_initial_change_stream_partitions( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.GenerateInitialChangeStreamPartitionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance( request, bigtable.GenerateInitialChangeStreamPartitionsRequest ): @@ -1714,8 +1798,8 @@ def read_change_stream( """ # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. has_flattened_params = any([table_name, app_profile_id]) if request is not None and has_flattened_params: raise ValueError( @@ -1723,10 +1807,8 @@ def read_change_stream( "the individual field arguments should be set." ) - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadChangeStreamRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. if not isinstance(request, bigtable.ReadChangeStreamRequest): request = bigtable.ReadChangeStreamRequest(request) # If we have keyword arguments corresponding to fields on the diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index 6a9eb0e5888b..ae5c1cf7281b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 7d1475eb9fbc..d93379723af0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index bec9c85f110a..2a1a9a284f0c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,7 +51,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, + channel: Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -71,14 +71,17 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. + ignored if a ``channel`` instance is provided. + channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -88,11 +91,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -118,7 +121,7 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. @@ -159,7 +162,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 7765ecce81ff..2d04f79af2c8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore @@ -68,7 +68,6 @@ def create_channel( the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -98,7 +97,7 @@ def __init__( credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, + channel: Optional[Union[aio.Channel, Callable[..., aio.Channel]]] = None, api_mtls_endpoint: Optional[str] = None, client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, @@ -118,15 +117,18 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if a ``channel`` instance is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. + channel (Optional[Union[aio.Channel, Callable[..., aio.Channel]]]): + A ``Channel`` instance through which to make calls, or a Callable + that constructs and returns one. If set to None, ``self.create_channel`` + is used to create the channel. If a Callable is given, it will be called + with the same arguments as used in ``self.create_channel``. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from @@ -136,11 +138,11 @@ def __init__( private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. + for the grpc channel. It is ignored if a ``channel`` instance is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + ignored if a ``channel`` instance or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -166,7 +168,7 @@ def __init__( if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) - if channel: + if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. @@ -206,7 +208,9 @@ def __init__( ) if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( + # initialize with the provided callable or the default channel + channel_init = channel or type(self).create_channel + self._grpc_channel = channel_init( self._host, # use the credentials which are saved credentials=self._credentials, @@ -515,7 +519,7 @@ def read_change_stream( return self._stubs["read_change_stream"] def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. + """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { self.read_rows: gapic_v1.method_async.wrap_method( self.read_rows, @@ -529,7 +533,7 @@ def _prep_wrapped_messages(self, client_info): ), self.mutate_row: gapic_v1.method_async.wrap_method( self.mutate_row, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.01, maximum=60.0, multiplier=2, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index d77291a650ea..a4d8e0ce9681 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -490,6 +490,11 @@ def __call__( "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow", "body": "*", }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow", + "body": "*", + }, ] request, metadata = self._interceptor.pre_check_and_mutate_row( request, metadata @@ -695,6 +700,11 @@ def __call__( "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow", "body": "*", }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow", + "body": "*", + }, ] request, metadata = self._interceptor.pre_mutate_row(request, metadata) pb_request = bigtable.MutateRowRequest.pb(request) @@ -790,6 +800,11 @@ def __call__( "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows", "body": "*", }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows", + "body": "*", + }, ] request, metadata = self._interceptor.pre_mutate_rows(request, metadata) pb_request = bigtable.MutateRowsRequest.pb(request) @@ -1078,6 +1093,11 @@ def __call__( "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow", "body": "*", }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow", + "body": "*", + }, ] request, metadata = self._interceptor.pre_read_modify_write_row( request, metadata @@ -1132,16 +1152,6 @@ class _ReadRows(BigtableRestStub): def __hash__(self): return hash("ReadRows") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - def __call__( self, request: bigtable.ReadRowsRequest, @@ -1175,6 +1185,11 @@ def __call__( "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readRows", "body": "*", }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows", + "body": "*", + }, ] request, metadata = self._interceptor.pre_read_rows(request, metadata) pb_request = bigtable.ReadRowsRequest.pb(request) @@ -1195,7 +1210,6 @@ def __call__( use_integers_for_enums=True, ) ) - query_params.update(self._get_unset_required_fields(query_params)) query_params["$alt"] = "json;enum-encoding=int" @@ -1224,16 +1238,6 @@ class _SampleRowKeys(BigtableRestStub): def __hash__(self): return hash("SampleRowKeys") - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } - def __call__( self, request: bigtable.SampleRowKeysRequest, @@ -1266,6 +1270,10 @@ def __call__( "method": "get", "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys", }, + { + "method": "get", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys", + }, ] request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) pb_request = bigtable.SampleRowKeysRequest.pb(request) @@ -1281,7 +1289,6 @@ def __call__( use_integers_for_enums=True, ) ) - query_params.update(self._get_unset_required_fields(query_params)) query_params["$alt"] = "json;enum-encoding=int" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index f266becb9b11..a7961a9107af 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,6 +49,7 @@ StreamContinuationTokens, StreamPartition, TimestampRange, + Value, ValueRange, ) from .feature_flags import ( @@ -98,6 +99,7 @@ "StreamContinuationTokens", "StreamPartition", "TimestampRange", + "Value", "ValueRange", "FeatureFlags", "FullReadStatsView", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 57f8064085f4..fa6c566a2348 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -58,9 +58,16 @@ class ReadRowsRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table from which to read. + Optional. The unique name of the table from which to read. + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView from which + to read. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -123,6 +130,10 @@ class RequestStatsView(proto.Enum): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=9, + ) app_profile_id: str = proto.Field( proto.STRING, number=5, @@ -327,9 +338,17 @@ class SampleRowKeysRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table from which to sample - row keys. Values are of the form + Optional. The unique name of the table from which to sample + row keys. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView from which + to sample row keys. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -340,6 +359,10 @@ class SampleRowKeysRequest(proto.Message): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=4, + ) app_profile_id: str = proto.Field( proto.STRING, number=2, @@ -385,9 +408,17 @@ class MutateRowRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to which the mutation - should be applied. Values are of the form + Optional. The unique name of the table to which the mutation + should be applied. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + mutation should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -407,6 +438,10 @@ class MutateRowRequest(proto.Message): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=6, + ) app_profile_id: str = proto.Field( proto.STRING, number=4, @@ -431,8 +466,17 @@ class MutateRowsRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to - which the mutations should be applied. + Optional. The unique name of the table to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + mutations should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -476,6 +520,10 @@ class Entry(proto.Message): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=5, + ) app_profile_id: str = proto.Field( proto.STRING, number=3, @@ -587,10 +635,17 @@ class CheckAndMutateRowRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of the - form + Optional. The unique name of the table to which the + conditional mutation should be applied. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + conditional mutation should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -624,6 +679,10 @@ class CheckAndMutateRowRequest(proto.Message): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=9, + ) app_profile_id: str = proto.Field( proto.STRING, number=7, @@ -700,10 +759,17 @@ class ReadModifyWriteRowRequest(proto.Message): Attributes: table_name (str): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the - form + Optional. The unique name of the table to which the + read/modify/write rules should be applied. + + Values are of the form ``projects//instances//tables/
``. + authorized_view_name (str): + Optional. The unique name of the AuthorizedView to which the + read/modify/write rules should be applied. + + Values are of the form + ``projects//instances//tables/
/authorizedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -723,6 +789,10 @@ class ReadModifyWriteRowRequest(proto.Message): proto.STRING, number=1, ) + authorized_view_name: str = proto.Field( + proto.STRING, + number=6, + ) app_profile_id: str = proto.Field( proto.STRING, number=4, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index e37644a761b7..b2b853c64d80 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ "Family", "Column", "Cell", + "Value", "RowRange", "RowSet", "ColumnRange", @@ -164,6 +165,54 @@ class Cell(proto.Message): ) +class Value(proto.Message): + r"""``Value`` represents a dynamically typed value. The typed fields in + ``Value`` are used as a transport encoding for the actual value + (which may be of a more complex type). See the documentation of the + ``Type`` message for more details. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + raw_value (bytes): + Represents a raw byte sequence with no type information. The + ``type`` field must be omitted. + + This field is a member of `oneof`_ ``kind``. + raw_timestamp_micros (int): + Represents a raw cell timestamp with no type information. + The ``type`` field must be omitted. + + This field is a member of `oneof`_ ``kind``. + int_value (int): + Represents a typed value transported as an integer. Default + type for writes: ``Int64`` + + This field is a member of `oneof`_ ``kind``. + """ + + raw_value: bytes = proto.Field( + proto.BYTES, + number=8, + oneof="kind", + ) + raw_timestamp_micros: int = proto.Field( + proto.INT64, + number=9, + oneof="kind", + ) + int_value: int = proto.Field( + proto.INT64, + number=6, + oneof="kind", + ) + + class RowRange(proto.Message): r"""Specifies a contiguous range of rows. @@ -853,6 +902,10 @@ class Mutation(proto.Message): set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): Set a cell's value. + This field is a member of `oneof`_ ``mutation``. + add_to_cell (google.cloud.bigtable_v2.types.Mutation.AddToCell): + Incrementally updates an ``Aggregate`` cell. + This field is a member of `oneof`_ ``mutation``. delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): Deletes cells from a column. @@ -909,6 +962,48 @@ class SetCell(proto.Message): number=4, ) + class AddToCell(proto.Message): + r"""A Mutation which incrementally updates a cell in an ``Aggregate`` + family. + + Attributes: + family_name (str): + The name of the ``Aggregate`` family into which new data + should be added. This must be a family with a ``value_type`` + of ``Aggregate``. Format: ``[-_.a-zA-Z0-9]+`` + column_qualifier (google.cloud.bigtable_v2.types.Value): + The qualifier of the column into which new data should be + added. This must be a ``raw_value``. + timestamp (google.cloud.bigtable_v2.types.Value): + The timestamp of the cell to which new data should be added. + This must be a ``raw_timestamp_micros`` that matches the + table's ``granularity``. + input (google.cloud.bigtable_v2.types.Value): + The input value to be accumulated into the specified cell. + This must be compatible with the family's + ``value_type.input_type``. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: "Value" = proto.Field( + proto.MESSAGE, + number=2, + message="Value", + ) + timestamp: "Value" = proto.Field( + proto.MESSAGE, + number=3, + message="Value", + ) + input: "Value" = proto.Field( + proto.MESSAGE, + number=4, + message="Value", + ) + class DeleteFromColumn(proto.Message): r"""A Mutation which deletes cells from the specified column, optionally restricting the deletions to a given timestamp range. @@ -964,6 +1059,12 @@ class DeleteFromRow(proto.Message): oneof="mutation", message=SetCell, ) + add_to_cell: AddToCell = proto.Field( + proto.MESSAGE, + number=5, + oneof="mutation", + message=AddToCell, + ) delete_from_column: DeleteFromColumn = proto.Field( proto.MESSAGE, number=2, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index 45e673f750bb..bad6c163b1c9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -67,6 +67,9 @@ class FeatureFlags(proto.Message): Notify the server that the client supports using retry info back off durations to retry requests with. + client_side_metrics_enabled (bool): + Notify the server that the client has client + side metrics enabled. """ reverse_scans: bool = proto.Field( @@ -93,6 +96,10 @@ class FeatureFlags(proto.Message): proto.BOOL, number=7, ) + client_side_metrics_enabled: bool = proto.Field( + proto.BOOL, + number=8, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py index 61cce949135d..115f76af5835 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index 98e3a67db584..3bbf3163ffe8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index daf730a9a77f..f175c66da084 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -55,7 +55,7 @@ "google-cloud-testutils", ] SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [ - "pytest-asyncio", + "pytest-asyncio==0.21.2", ] SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] SYSTEM_TEST_DEPENDENCIES: List[str] = [] diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 3fb079396a0c..cde9fce64947 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -91,7 +91,7 @@ def get_staging_dirs( microgenerator=True, cov_level=99, system_test_external_dependencies=[ - "pytest-asyncio", + "pytest-asyncio==0.21.2", ], ) @@ -218,6 +218,54 @@ def lint_setup_py(session): ''', ) + +# ---------------------------------------------------------------------------- +# Customize gapics to include PooledBigtableGrpcAsyncIOTransport +# ---------------------------------------------------------------------------- +def insert(file, before_line, insert_line, after_line, escape=None): + target = before_line + "\n" + after_line + if escape: + for c in escape: + target = target.replace(c, '\\' + c) + replacement = before_line + "\n" + insert_line + "\n" + after_line + s.replace(file, target, replacement) + + +insert( + "google/cloud/bigtable_v2/services/bigtable/client.py", + "from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport", + "from .transports.pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport", + "from .transports.rest import BigtableRestTransport" +) +insert( + "google/cloud/bigtable_v2/services/bigtable/client.py", + ' _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport', + ' _transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport', + ' _transport_registry["rest"] = BigtableRestTransport', + escape='[]"' +) +insert( + "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", + '_transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport', + '_transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport', + '_transport_registry["rest"] = BigtableRestTransport', + escape='[]"' +) +insert( + "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", + "from .grpc_asyncio import BigtableGrpcAsyncIOTransport", + "from .pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport", + "from .rest import BigtableRestTransport" +) +insert( + "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", + ' "BigtableGrpcAsyncIOTransport",', + ' "PooledBigtableGrpcAsyncIOTransport",', + ' "BigtableRestTransport",', + escape='"' +) + + # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 8c3efea109a0..073b1ad00237 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,15 +39,17 @@ def partition( class bigtable_adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_consistency': ('name', 'consistency_token', ), + 'check_consistency': ('name', 'consistency_token', 'standard_read_remote_writes', 'data_boost_read_local_writes', ), 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_authorized_view': ('parent', 'authorized_view_id', 'authorized_view', ), 'create_backup': ('parent', 'backup_id', 'backup', ), 'create_cluster': ('parent', 'cluster_id', 'cluster', ), 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_authorized_view': ('name', 'etag', ), 'delete_backup': ('name', ), 'delete_cluster': ('name', ), 'delete_instance': ('name', ), @@ -56,6 +58,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), 'generate_consistency_token': ('name', ), 'get_app_profile': ('name', ), + 'get_authorized_view': ('name', 'view', ), 'get_backup': ('name', ), 'get_cluster': ('name', ), 'get_iam_policy': ('resource', 'options', ), @@ -63,6 +66,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'get_snapshot': ('name', ), 'get_table': ('name', 'view', ), 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_authorized_views': ('parent', 'page_size', 'page_token', 'view', ), 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), 'list_clusters': ('parent', 'page_token', ), 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ), @@ -78,6 +82,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'test_iam_permissions': ('resource', 'permissions', ), 'undelete_table': ('name', ), 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 8d32e5b70438..3d1381c49463 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,15 +39,15 @@ def partition( class bigtableCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'check_and_mutate_row': ('row_key', 'table_name', 'authorized_view_name', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), - 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), - 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), + 'mutate_row': ('row_key', 'mutations', 'table_name', 'authorized_view_name', 'app_profile_id', ), + 'mutate_rows': ('entries', 'table_name', 'authorized_view_name', 'app_profile_id', ), 'ping_and_warm': ('name', 'app_profile_id', ), 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), - 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), - 'sample_row_keys': ('table_name', 'app_profile_id', ), + 'read_modify_write_row': ('row_key', 'rules', 'table_name', 'authorized_view_name', 'app_profile_id', ), + 'read_rows': ('table_name', 'authorized_view_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), + 'sample_row_keys': ('table_name', 'authorized_view_name', 'app_profile_id', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/packages/google-cloud-bigtable/testing/constraints-3.8.txt b/packages/google-cloud-bigtable/testing/constraints-3.8.txt index ee858c3ecf4f..d96846bb5a6b 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.8.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.8.txt @@ -11,4 +11,3 @@ grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 libcst==0.2.5 protobuf==3.19.5 -pytest-asyncio==0.21.1 diff --git a/packages/google-cloud-bigtable/tests/__init__.py b/packages/google-cloud-bigtable/tests/__init__.py index 89a37dc92c5a..8f6cf068242c 100644 --- a/packages/google-cloud-bigtable/tests/__init__.py +++ b/packages/google-cloud-bigtable/tests/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system.py b/packages/google-cloud-bigtable/tests/system/data/test_system.py index aeb08fc1ac05..9fe208551d78 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system.py @@ -140,7 +140,6 @@ async def _create_row_and_mutation( return row_key, mutation -@pytest.mark.usefixtures("table") @pytest_asyncio.fixture(scope="function") async def temp_rows(table): builder = TempRowBuilder(table) diff --git a/packages/google-cloud-bigtable/tests/unit/__init__.py b/packages/google-cloud-bigtable/tests/unit/__init__.py index 89a37dc92c5a..8f6cf068242c 100644 --- a/packages/google-cloud-bigtable/tests/unit/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py index 89a37dc92c5a..8f6cf068242c 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py index 89a37dc92c5a..8f6cf068242c 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 10e9d101b736..9a418047fa9e 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1241,7 +1241,8 @@ def test_create_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + request = bigtable_instance_admin.CreateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -1257,12 +1258,155 @@ def test_create_instance_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.create_instance() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.CreateInstanceRequest() +def test_create_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_instance(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + ) + + +def test_create_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc + request = {} + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_instance_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + +@pytest.mark.asyncio +async def test_create_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_instance + ] = mock_object + + request = {} + await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_create_instance_async( transport: str = "grpc_asyncio", @@ -1288,7 +1432,8 @@ async def test_create_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + request = bigtable_instance_admin.CreateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -1504,7 +1649,8 @@ def test_get_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() + request = bigtable_instance_admin.GetInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -1525,12 +1671,151 @@ def test_get_instance_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.get_instance() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.GetInstanceRequest() +def test_get_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetInstanceRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_instance(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetInstanceRequest( + name="name_value", + ) + + +def test_get_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc + request = {} + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_instance_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + ) + response = await client.get_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + +@pytest.mark.asyncio +async def test_get_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_instance + ] = mock_object + + request = {} + await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_get_instance_async( transport: str = "grpc_asyncio", @@ -1562,7 +1847,8 @@ async def test_get_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() + request = bigtable_instance_admin.GetInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -1746,7 +2032,8 @@ def test_list_instances(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() + request = bigtable_instance_admin.ListInstancesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response.raw_page is response @@ -1765,12 +2052,150 @@ def test_list_instances_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.list_instances() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.ListInstancesRequest() +def test_list_instances_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListInstancesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_instances(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListInstancesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_instances_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_instances in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc + request = {} + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_instances(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_instances_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + response = await client.list_instances() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + +@pytest.mark.asyncio +async def test_list_instances_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_instances + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_instances + ] = mock_object + + request = {} + await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_instances(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_list_instances_async( transport: str = "grpc_asyncio", @@ -1799,7 +2224,8 @@ async def test_list_instances_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() + request = bigtable_instance_admin.ListInstancesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) @@ -1987,7 +2413,8 @@ def test_update_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() + request = instance.Instance() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -2008,17 +2435,158 @@ def test_update_instance_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.update_instance() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == instance.Instance() -@pytest.mark.asyncio -async def test_update_instance_async( - transport: str = "grpc_asyncio", request_type=instance.Instance -): - client = BigtableInstanceAdminAsyncClient( +def test_update_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = instance.Instance( + name="name_value", + display_name="display_name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_instance(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Instance( + name="name_value", + display_name="display_name_value", + ) + + +def test_update_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc + request = {} + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_instance_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + ) + response = await client.update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Instance() + + +@pytest.mark.asyncio +async def test_update_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.update_instance + ] = mock_object + + request = {} + await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_instance_async( + transport: str = "grpc_asyncio", request_type=instance.Instance +): + client = BigtableInstanceAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2044,7 +2612,8 @@ async def test_update_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() + request = instance.Instance() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Instance) @@ -2147,7 +2716,8 @@ def test_partial_update_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2165,12 +2735,158 @@ def test_partial_update_instance_empty_call(): with mock.patch.object( type(client.transport.partial_update_instance), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.partial_update_instance() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() +def test_partial_update_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.partial_update_instance(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + +def test_partial_update_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_instance + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_instance + ] = mock_rpc + request = {} + client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_partial_update_instance_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.partial_update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + +@pytest.mark.asyncio +async def test_partial_update_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.partial_update_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.partial_update_instance + ] = mock_object + + request = {} + await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.partial_update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_partial_update_instance_async( transport: str = "grpc_asyncio", @@ -2198,7 +2914,8 @@ async def test_partial_update_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2396,7 +3113,8 @@ def test_delete_instance(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + request = bigtable_instance_admin.DeleteInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -2412,12 +3130,143 @@ def test_delete_instance_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.delete_instance() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() +def test_delete_instance_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteInstanceRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_instance(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest( + name="name_value", + ) + + +def test_delete_instance_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc + request = {} + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_instance_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + +@pytest.mark.asyncio +async def test_delete_instance_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_instance + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_instance + ] = mock_object + + request = {} + await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.delete_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_delete_instance_async( transport: str = "grpc_asyncio", @@ -2441,7 +3290,8 @@ async def test_delete_instance_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + request = bigtable_instance_admin.DeleteInstanceRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -2617,7 +3467,8 @@ def test_create_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() + request = bigtable_instance_admin.CreateClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2633,18 +3484,161 @@ def test_create_cluster_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.create_cluster() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.CreateClusterRequest() -@pytest.mark.asyncio -async def test_create_cluster_async( - transport: str = "grpc_asyncio", - request_type=bigtable_instance_admin.CreateClusterRequest, -): - client = BigtableInstanceAdminAsyncClient( +def test_create_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + +def test_create_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + +@pytest.mark.asyncio +async def test_create_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_cluster + ] = mock_object + + request = {} + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_cluster_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateClusterRequest, +): + client = BigtableInstanceAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2664,7 +3658,8 @@ async def test_create_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() + request = bigtable_instance_admin.CreateClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2870,7 +3865,8 @@ def test_get_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() + request = bigtable_instance_admin.GetClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Cluster) @@ -2891,12 +3887,151 @@ def test_get_cluster_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.get_cluster() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.GetClusterRequest() +def test_get_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetClusterRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetClusterRequest( + name="name_value", + ) + + +def test_get_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + ) + ) + response = await client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + +@pytest.mark.asyncio +async def test_get_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_cluster + ] = mock_object + + request = {} + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_get_cluster_async( transport: str = "grpc_asyncio", @@ -2928,7 +4063,8 @@ async def test_get_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() + request = bigtable_instance_admin.GetClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.Cluster) @@ -3112,7 +4248,8 @@ def test_list_clusters(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() + request = bigtable_instance_admin.ListClustersRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response.raw_page is response @@ -3131,12 +4268,150 @@ def test_list_clusters_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.list_clusters() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.ListClustersRequest() +def test_list_clusters_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListClustersRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_clusters(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListClustersRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_clusters_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_clusters_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + response = await client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + +@pytest.mark.asyncio +async def test_list_clusters_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_clusters + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_clusters + ] = mock_object + + request = {} + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_list_clusters_async( transport: str = "grpc_asyncio", @@ -3165,7 +4440,8 @@ async def test_list_clusters_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() + request = bigtable_instance_admin.ListClustersRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable_instance_admin.ListClustersResponse) @@ -3347,7 +4623,8 @@ def test_update_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() + request = instance.Cluster() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -3363,12 +4640,155 @@ def test_update_cluster_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.update_cluster() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == instance.Cluster() +def test_update_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = instance.Cluster( + name="name_value", + location="location_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Cluster( + name="name_value", + location="location_value", + ) + + +def test_update_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Cluster() + + +@pytest.mark.asyncio +async def test_update_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.update_cluster + ] = mock_object + + request = {} + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=instance.Cluster @@ -3393,7 +4813,8 @@ async def test_update_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() + request = instance.Cluster() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -3493,30 +4914,177 @@ def test_partial_update_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.PartialUpdateClusterRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_partial_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.partial_update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) +def test_partial_update_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.PartialUpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.partial_update_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + + +def test_partial_update_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_cluster + ] = mock_rpc + request = {} + client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_cluster(request) -def test_partial_update_cluster_empty_call(): + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_partial_update_cluster_empty_call_async(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( + client = BigtableInstanceAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="grpc_asyncio", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.partial_update_cluster), "__call__" ) as call: - client.partial_update_cluster() + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.partial_update_cluster() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() +@pytest.mark.asyncio +async def test_partial_update_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.partial_update_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.partial_update_cluster + ] = mock_object + + request = {} + await client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.partial_update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_partial_update_cluster_async( transport: str = "grpc_asyncio", @@ -3544,7 +5112,8 @@ async def test_partial_update_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + request = bigtable_instance_admin.PartialUpdateClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -3742,7 +5311,8 @@ def test_delete_cluster(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + request = bigtable_instance_admin.DeleteClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -3758,12 +5328,143 @@ def test_delete_cluster_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.delete_cluster() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.DeleteClusterRequest() +def test_delete_cluster_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteClusterRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_cluster(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteClusterRequest( + name="name_value", + ) + + +def test_delete_cluster_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_cluster_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + +@pytest.mark.asyncio +async def test_delete_cluster_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_cluster + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_cluster + ] = mock_object + + request = {} + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_delete_cluster_async( transport: str = "grpc_asyncio", @@ -3787,7 +5488,8 @@ async def test_delete_cluster_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + request = bigtable_instance_admin.DeleteClusterRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -3970,7 +5672,8 @@ def test_create_app_profile(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + request = bigtable_instance_admin.CreateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -3991,12 +5694,159 @@ def test_create_app_profile_empty_call(): with mock.patch.object( type(client.transport.create_app_profile), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.create_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() +def test_create_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_app_profile(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + ) + + +def test_create_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_app_profile + ] = mock_rpc + request = {} + client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_app_profile_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + response = await client.create_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + +@pytest.mark.asyncio +async def test_create_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_app_profile + ] = mock_object + + request = {} + await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.create_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_create_app_profile_async( transport: str = "grpc_asyncio", @@ -4028,7 +5878,8 @@ async def test_create_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + request = bigtable_instance_admin.CreateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -4240,7 +6091,8 @@ def test_get_app_profile(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + request = bigtable_instance_admin.GetAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -4254,17 +6106,154 @@ def test_get_app_profile_empty_call(): # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + +def test_get_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetAppProfileRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_app_profile(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetAppProfileRequest( + name="name_value", + ) + + +def test_get_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_app_profile in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc + request = {} + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_app_profile_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - client.get_app_profile() + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + response = await client.get_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.GetAppProfileRequest() +@pytest.mark.asyncio +async def test_get_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_app_profile + ] = mock_object + + request = {} + await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_get_app_profile_async( transport: str = "grpc_asyncio", @@ -4294,7 +6283,8 @@ async def test_get_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + request = bigtable_instance_admin.GetAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, instance.AppProfile) @@ -4478,7 +6468,8 @@ def test_list_app_profiles(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + request = bigtable_instance_admin.ListAppProfilesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAppProfilesPager) @@ -4498,12 +6489,156 @@ def test_list_app_profiles_empty_call(): with mock.patch.object( type(client.transport.list_app_profiles), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.list_app_profiles() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() +def test_list_app_profiles_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListAppProfilesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_app_profiles(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_app_profiles_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_app_profiles in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_app_profiles + ] = mock_rpc + request = {} + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_app_profiles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_app_profiles_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + ) + response = await client.list_app_profiles() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_app_profiles + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_app_profiles + ] = mock_object + + request = {} + await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_app_profiles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_list_app_profiles_async( transport: str = "grpc_asyncio", @@ -4534,7 +6669,8 @@ async def test_list_app_profiles_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + request = bigtable_instance_admin.ListAppProfilesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListAppProfilesAsyncPager) @@ -4924,7 +7060,8 @@ def test_update_app_profile(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + request = bigtable_instance_admin.UpdateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -4942,12 +7079,157 @@ def test_update_app_profile_empty_call(): with mock.patch.object( type(client.transport.update_app_profile), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.update_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() +def test_update_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.UpdateAppProfileRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_app_profile(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + +def test_update_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_app_profile + ] = mock_rpc + request = {} + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_app_profile_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + +@pytest.mark.asyncio +async def test_update_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.update_app_profile + ] = mock_object + + request = {} + await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_update_app_profile_async( transport: str = "grpc_asyncio", @@ -4975,7 +7257,8 @@ async def test_update_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + request = bigtable_instance_admin.UpdateAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -5173,32 +7456,172 @@ def test_delete_app_profile(request_type, transport: str = "grpc"): response = client.delete_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteAppProfileRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + +def test_delete_app_profile_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteAppProfileRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_app_profile(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest( + name="name_value", + ) + + +def test_delete_app_profile_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_app_profile + ] = mock_rpc + request = {} + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # Establish that the response is the type that we expect. - assert response is None + client.delete_app_profile(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_delete_app_profile_empty_call(): + +@pytest.mark.asyncio +async def test_delete_app_profile_empty_call_async(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( + client = BigtableInstanceAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="grpc_asyncio", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_app_profile), "__call__" ) as call: - client.delete_app_profile() + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_app_profile() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() +@pytest.mark.asyncio +async def test_delete_app_profile_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_app_profile + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_app_profile + ] = mock_object + + request = {} + await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.delete_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_delete_app_profile_async( transport: str = "grpc_asyncio", @@ -5224,7 +7647,8 @@ async def test_delete_app_profile_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + request = bigtable_instance_admin.DeleteAppProfileRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -5411,7 +7835,8 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -5429,12 +7854,148 @@ def test_get_iam_policy_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.get_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.GetIamPolicyRequest() +def test_get_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + +def test_get_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_iam_policy + ] = mock_object + + request = {} + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest @@ -5462,7 +8023,8 @@ async def test_get_iam_policy_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -5660,7 +8222,8 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -5678,12 +8241,148 @@ def test_set_iam_policy_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.set_iam_policy() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.SetIamPolicyRequest() +def test_set_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.set_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + +def test_set_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.set_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.set_iam_policy + ] = mock_object + + request = {} + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest @@ -5711,7 +8410,8 @@ async def test_set_iam_policy_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, policy_pb2.Policy) @@ -5891,51 +8591,195 @@ async def test_set_iam_policy_flattened_error_async(): def test_test_iam_permissions(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + + +def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = client.test_iam_permissions(request) + client.test_iam_permissions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + +def test_test_iam_permissions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + request = {} + client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + assert mock_rpc.call_count == 1 - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] + client.test_iam_permissions(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_test_iam_permissions_empty_call(): + +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_async(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( + client = BigtableInstanceAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="grpc_asyncio", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.test_iam_permissions), "__call__" ) as call: - client.test_iam_permissions() + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + response = await client.test_iam_permissions() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() +@pytest.mark.asyncio +async def test_test_iam_permissions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.test_iam_permissions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.test_iam_permissions + ] = mock_object + + request = {} + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", @@ -5965,7 +8809,8 @@ async def test_test_iam_permissions_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) @@ -6185,7 +9030,8 @@ def test_list_hot_tablets(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + request = bigtable_instance_admin.ListHotTabletsRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListHotTabletsPager) @@ -6202,12 +9048,151 @@ def test_list_hot_tablets_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.list_hot_tablets() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() +def test_list_hot_tablets_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListHotTabletsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_hot_tablets(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_hot_tablets_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_hot_tablets in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_hot_tablets + ] = mock_rpc + request = {} + client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_hot_tablets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_hot_tablets_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_hot_tablets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_hot_tablets + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_hot_tablets + ] = mock_object + + request = {} + await client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_hot_tablets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_list_hot_tablets_async( transport: str = "grpc_asyncio", @@ -6235,7 +9220,8 @@ async def test_list_hot_tablets_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + request = bigtable_instance_admin.ListHotTabletsRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListHotTabletsAsyncPager) @@ -6615,6 +9601,46 @@ def test_create_instance_rest(request_type): assert response.operation.name == "operations/spam" +def test_create_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc + + request = {} + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_create_instance_rest_required_fields( request_type=bigtable_instance_admin.CreateInstanceRequest, ): @@ -6909,6 +9935,42 @@ def test_get_instance_rest(request_type): assert response.satisfies_pzs is True +def test_get_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc + + request = {} + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_get_instance_rest_required_fields( request_type=bigtable_instance_admin.GetInstanceRequest, ): @@ -7179,6 +10241,42 @@ def test_list_instances_rest(request_type): assert response.next_page_token == "next_page_token_value" +def test_list_instances_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_instances in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc + + request = {} + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_instances(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_list_instances_rest_required_fields( request_type=bigtable_instance_admin.ListInstancesRequest, ): @@ -7461,6 +10559,42 @@ def test_update_instance_rest(request_type): assert response.satisfies_pzs is True +def test_update_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc + + request = {} + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_update_instance_rest_required_fields(request_type=instance.Instance): transport_class = transports.BigtableInstanceAdminRestTransport @@ -7742,6 +10876,51 @@ def get_message_fields(field): assert response.operation.name == "operations/spam" +def test_partial_update_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_instance + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_instance + ] = mock_rpc + + request = {} + client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_partial_update_instance_rest_required_fields( request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): @@ -8008,8 +11187,44 @@ def test_delete_instance_rest(request_type): req.return_value = response_value response = client.delete_instance(request) - # Establish that the response is the type that we expect. - assert response is None + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_instance in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc + + request = {} + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_instance(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 def test_delete_instance_rest_required_fields( @@ -8349,6 +11564,46 @@ def get_message_fields(field): assert response.operation.name == "operations/spam" +def test_create_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc + + request = {} + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_create_cluster_rest_required_fields( request_type=bigtable_instance_admin.CreateClusterRequest, ): @@ -8652,6 +11907,42 @@ def test_get_cluster_rest(request_type): assert response.default_storage_type == common.StorageType.SSD +def test_get_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc + + request = {} + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_get_cluster_rest_required_fields( request_type=bigtable_instance_admin.GetClusterRequest, ): @@ -8923,6 +12214,42 @@ def test_list_clusters_rest(request_type): assert response.next_page_token == "next_page_token_value" +def test_list_clusters_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc + + request = {} + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_clusters(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_list_clusters_rest_required_fields( request_type=bigtable_instance_admin.ListClustersRequest, ): @@ -9191,6 +12518,46 @@ def test_update_cluster_rest(request_type): assert response.operation.name == "operations/spam" +def test_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + + request = {} + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + @pytest.mark.parametrize("null_interceptor", [True, False]) def test_update_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( @@ -9404,6 +12771,51 @@ def get_message_fields(field): assert response.operation.name == "operations/spam" +def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.partial_update_cluster + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_cluster + ] = mock_rpc + + request = {} + client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_partial_update_cluster_rest_required_fields( request_type=bigtable_instance_admin.PartialUpdateClusterRequest, ): @@ -9679,6 +13091,42 @@ def test_delete_cluster_rest(request_type): assert response is None +def test_delete_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc + + request = {} + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_delete_cluster_rest_required_fields( request_type=bigtable_instance_admin.DeleteClusterRequest, ): @@ -9923,6 +13371,7 @@ def test_create_app_profile_rest(request_type): }, "priority": 1, "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -10023,6 +13472,46 @@ def get_message_fields(field): assert response.description == "description_value" +def test_create_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_app_profile + ] = mock_rpc + + request = {} + client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_create_app_profile_rest_required_fields( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): @@ -10336,6 +13825,42 @@ def test_get_app_profile_rest(request_type): assert response.description == "description_value" +def test_get_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_app_profile in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc + + request = {} + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_get_app_profile_rest_required_fields( request_type=bigtable_instance_admin.GetAppProfileRequest, ): @@ -10608,6 +14133,44 @@ def test_list_app_profiles_rest(request_type): assert response.failed_locations == ["failed_locations_value"] +def test_list_app_profiles_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_app_profiles in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_app_profiles + ] = mock_rpc + + request = {} + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_app_profiles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_list_app_profiles_rest_required_fields( request_type=bigtable_instance_admin.ListAppProfilesRequest, ): @@ -10946,6 +14509,7 @@ def test_update_app_profile_rest(request_type): }, "priority": 1, "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -11036,6 +14600,50 @@ def get_message_fields(field): assert response.operation.name == "operations/spam" +def test_update_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_app_profile + ] = mock_rpc + + request = {} + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_update_app_profile_rest_required_fields( request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): @@ -11325,6 +14933,46 @@ def test_delete_app_profile_rest(request_type): assert response is None +def test_delete_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_app_profile + ] = mock_rpc + + request = {} + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_delete_app_profile_rest_required_fields( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): @@ -11608,6 +15256,42 @@ def test_get_iam_policy_rest(request_type): assert response.etag == b"etag_blob" +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_get_iam_policy_rest_required_fields( request_type=iam_policy_pb2.GetIamPolicyRequest, ): @@ -11871,6 +15555,42 @@ def test_set_iam_policy_rest(request_type): assert response.etag == b"etag_blob" +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_set_iam_policy_rest_required_fields( request_type=iam_policy_pb2.SetIamPolicyRequest, ): @@ -12140,6 +15860,46 @@ def test_test_iam_permissions_rest(request_type): assert response.permissions == ["permissions_value"] +def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_test_iam_permissions_rest_required_fields( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): @@ -12419,6 +16179,44 @@ def test_list_hot_tablets_rest(request_type): assert response.next_page_token == "next_page_token_value" +def test_list_hot_tablets_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_hot_tablets in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_hot_tablets + ] = mock_rpc + + request = {} + client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_hot_tablets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_list_hot_tablets_rest_required_fields( request_type=bigtable_instance_admin.ListHotTabletsRequest, ): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 67f02f9ce032..455ec88d8fbd 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -60,6 +60,7 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.cloud.bigtable_admin_v2.types import types from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore @@ -1218,7 +1219,8 @@ def test_create_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() + request = bigtable_table_admin.CreateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, gba_table.Table) @@ -1237,12 +1239,151 @@ def test_create_table_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.create_table() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.CreateTableRequest() +def test_create_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + +def test_create_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_table] = mock_rpc + request = {} + client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_table_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + response = await client.create_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableRequest() + + +@pytest.mark.asyncio +async def test_create_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_table + ] = mock_object + + request = {} + await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.create_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_create_table_async( transport: str = "grpc_asyncio", @@ -1272,7 +1413,8 @@ async def test_create_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() + request = bigtable_table_admin.CreateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, gba_table.Table) @@ -1473,7 +1615,8 @@ def test_create_table_from_snapshot(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -1491,12 +1634,166 @@ def test_create_table_from_snapshot_empty_call(): with mock.patch.object( type(client.transport.create_table_from_snapshot), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.create_table_from_snapshot() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() +def test_create_table_from_snapshot_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_table_from_snapshot(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +def test_create_table_from_snapshot_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_table_from_snapshot + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_table_from_snapshot + ] = mock_rpc + request = {} + client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_table_from_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_table_from_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_table_from_snapshot + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_table_from_snapshot + ] = mock_object + + request = {} + await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_table_from_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_create_table_from_snapshot_async( transport: str = "grpc_asyncio", @@ -1524,7 +1821,8 @@ async def test_create_table_from_snapshot_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -1734,7 +2032,8 @@ def test_list_tables(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() + request = bigtable_table_admin.ListTablesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListTablesPager) @@ -1751,12 +2050,149 @@ def test_list_tables_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.list_tables() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.ListTablesRequest() +def test_list_tables_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListTablesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_tables(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListTablesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_tables_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_tables in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc + request = {} + client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_tables(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_tables_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_tables() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListTablesRequest() + + +@pytest.mark.asyncio +async def test_list_tables_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_tables + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_tables + ] = mock_object + + request = {} + await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_tables(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest @@ -1783,7 +2219,8 @@ async def test_list_tables_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() + request = bigtable_table_admin.ListTablesRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListTablesAsyncPager) @@ -2158,7 +2595,8 @@ def test_get_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() + request = bigtable_table_admin.GetTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, table.Table) @@ -2177,13 +2615,148 @@ def test_get_table_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.get_table() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.GetTableRequest() -@pytest.mark.asyncio +def test_get_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetTableRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetTableRequest( + name="name_value", + ) + + +def test_get_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_table] = mock_rpc + request = {} + client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_table_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + response = await client.get_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetTableRequest() + + +@pytest.mark.asyncio +async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_table + ] = mock_object + + request = {} + await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest ): @@ -2211,7 +2784,8 @@ async def test_get_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() + request = bigtable_table_admin.GetTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, table.Table) @@ -2390,7 +2964,8 @@ def test_update_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() + request = bigtable_table_admin.UpdateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2406,12 +2981,149 @@ def test_update_table_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.update_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.update_table() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.UpdateTableRequest() +def test_update_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateTableRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + + +def test_update_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_table] = mock_rpc + request = {} + client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_table_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + + +@pytest.mark.asyncio +async def test_update_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.update_table + ] = mock_object + + request = {} + await client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_update_table_async( transport: str = "grpc_asyncio", @@ -2437,7 +3149,8 @@ async def test_update_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() + request = bigtable_table_admin.UpdateTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2627,7 +3340,8 @@ def test_delete_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() + request = bigtable_table_admin.DeleteTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -2643,12 +3357,143 @@ def test_delete_table_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.delete_table() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.DeleteTableRequest() +def test_delete_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteTableRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteTableRequest( + name="name_value", + ) + + +def test_delete_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc + request = {} + client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_table_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + +@pytest.mark.asyncio +async def test_delete_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_table + ] = mock_object + + request = {} + await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.delete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_delete_table_async( transport: str = "grpc_asyncio", @@ -2672,7 +3517,8 @@ async def test_delete_table_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() + request = bigtable_table_admin.DeleteTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @@ -2848,7 +3694,8 @@ def test_undelete_table(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() + request = bigtable_table_admin.UndeleteTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -2864,39 +3711,181 @@ def test_undelete_table_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.undelete_table() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.UndeleteTableRequest() -@pytest.mark.asyncio -async def test_undelete_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UndeleteTableRequest, -): - client = BigtableTableAdminAsyncClient( +def test_undelete_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UndeleteTableRequest( + name="name_value", + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = await client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + client.undelete_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UndeleteTableRequest( + name="name_value", + ) + + +def test_undelete_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.undelete_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc + request = {} + client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.undelete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_undelete_table_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.undelete_table() + call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.UndeleteTableRequest() + +@pytest.mark.asyncio +async def test_undelete_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.undelete_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.undelete_table + ] = mock_object + + request = {} + await client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.undelete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_undelete_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UndeleteTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.UndeleteTableRequest() + assert args[0] == request + # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @@ -3052,11 +4041,11 @@ async def test_undelete_table_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ModifyColumnFamiliesRequest, + bigtable_table_admin.CreateAuthorizedViewRequest, dict, ], ) -def test_modify_column_families(request_type, transport: str = "grpc"): +def test_create_authorized_view(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -3068,29 +4057,23 @@ def test_modify_column_families(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client.modify_column_families(request) + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert isinstance(response, future.Future) -def test_modify_column_families_empty_call(): +def test_create_authorized_view_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -3100,18 +4083,170 @@ def test_modify_column_families_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: - client.modify_column_families() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_authorized_view() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + assert args[0] == bigtable_table_admin.CreateAuthorizedViewRequest() + + +def test_create_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_authorized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + +def test_create_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_authorized_view + ] = mock_rpc + request = {} + client.create_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_modify_column_families_async( +async def test_create_authorized_view_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_authorized_view() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateAuthorizedViewRequest() + + +@pytest.mark.asyncio +async def test_create_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_authorized_view + ] = mock_object + + request = {} + await client.create_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -3124,52 +4259,46 @@ async def test_modify_column_families_async( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) + operations_pb2.Operation(name="operations/spam") ) - response = await client.modify_column_families(request) + response = await client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_modify_column_families_async_from_dict(): - await test_modify_column_families_async(request_type=dict) +async def test_create_authorized_view_async_from_dict(): + await test_create_authorized_view_async(request_type=dict) -def test_modify_column_families_field_headers(): +def test_create_authorized_view_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: - call.return_value = table.Table() - client.modify_column_families(request) + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -3180,28 +4309,30 @@ def test_modify_column_families_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_modify_column_families_field_headers_async(): +async def test_create_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client.modify_column_families(request) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -3212,47 +4343,45 @@ async def test_modify_column_families_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] -def test_modify_column_families_flattened(): +def test_create_authorized_view_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Table() + call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.modify_column_families( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + client.create_authorized_view( + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val - arg = args[0].modifications - mock_val = [ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") - ] + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") + assert arg == mock_val + arg = args[0].authorized_view_id + mock_val = "authorized_view_id_value" assert arg == mock_val -def test_modify_column_families_flattened_error(): +def test_create_authorized_view_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3260,58 +4389,55 @@ def test_modify_column_families_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) @pytest.mark.asyncio -async def test_modify_column_families_flattened_async(): +async def test_create_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.modify_column_families), "__call__" + type(client.transport.create_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Table() + call.return_value = operations_pb2.Operation(name="operations/op") - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.modify_column_families( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + response = await client.create_authorized_view( + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val - arg = args[0].modifications - mock_val = [ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") - ] + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") + assert arg == mock_val + arg = args[0].authorized_view_id + mock_val = "authorized_view_id_value" assert arg == mock_val @pytest.mark.asyncio -async def test_modify_column_families_flattened_error_async(): +async def test_create_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3319,25 +4445,22 @@ async def test_modify_column_families_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], + await client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DropRowRangeRequest, + bigtable_table_admin.ListAuthorizedViewsRequest, dict, ], ) -def test_drop_row_range(request_type, transport: str = "grpc"): +def test_list_authorized_views(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -3348,21 +4471,27 @@ def test_drop_row_range(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = None - response = client.drop_row_range(request) + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, pagers.ListAuthorizedViewsPager) + assert response.next_page_token == "next_page_token_value" -def test_drop_row_range_empty_call(): +def test_list_authorized_views_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -3371,165 +4500,165 @@ def test_drop_row_range_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - client.drop_row_range() + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_authorized_views() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() + assert args[0] == bigtable_table_admin.ListAuthorizedViewsRequest() -@pytest.mark.asyncio -async def test_drop_row_range_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DropRowRangeRequest, -): - client = BigtableTableAdminAsyncClient( +def test_list_authorized_views_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListAuthorizedViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_authorized_views(request=request) + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_drop_row_range_async_from_dict(): - await test_drop_row_range_async(request_type=dict) + assert args[0] == bigtable_table_admin.ListAuthorizedViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) -def test_drop_row_range_field_headers(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) +def test_list_authorized_views_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - request.name = "name_value" + # Ensure method has been cached + assert ( + client._transport.list_authorized_views + in client._transport._wrapped_methods + ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = None - client.drop_row_range(request) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_authorized_views + ] = mock_rpc + request = {} + client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request + assert mock_rpc.call_count == 1 - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] + client.list_authorized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_drop_row_range_field_headers_async(): +async def test_list_authorized_views_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() - - request.name = "name_value" - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_authorized_views() + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "name=name_value", - ) in kw["metadata"] + assert args[0] == bigtable_table_admin.ListAuthorizedViewsRequest() -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GenerateConsistencyTokenRequest, - dict, - ], -) -def test_generate_consistency_token(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +@pytest.mark.asyncio +async def test_list_authorized_views_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", + # Ensure method has been cached + assert ( + client._client._transport.list_authorized_views + in client._client._transport._wrapped_methods ) - response = client.generate_consistency_token(request) - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_authorized_views + ] = mock_object + request = {} + await client.list_authorized_views(request) -def test_generate_consistency_token_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - client.generate_consistency_token() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + await client.list_authorized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 @pytest.mark.asyncio -async def test_generate_consistency_token_async( +async def test_list_authorized_views_async( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -3542,48 +4671,49 @@ async def test_generate_consistency_token_async( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", + bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", ) ) - response = await client.generate_consistency_token(request) + response = await client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" + assert isinstance(response, pagers.ListAuthorizedViewsAsyncPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio -async def test_generate_consistency_token_async_from_dict(): - await test_generate_consistency_token_async(request_type=dict) +async def test_list_authorized_views_async_from_dict(): + await test_list_authorized_views_async(request_type=dict) -def test_generate_consistency_token_field_headers(): +def test_list_authorized_views_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - client.generate_consistency_token(request) + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -3594,30 +4724,30 @@ def test_generate_consistency_token_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_generate_consistency_token_field_headers_async(): +async def test_list_authorized_views_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse() + bigtable_table_admin.ListAuthorizedViewsResponse() ) - await client.generate_consistency_token(request) + await client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -3628,37 +4758,37 @@ async def test_generate_consistency_token_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] -def test_generate_consistency_token_flattened(): +def test_list_authorized_views_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.generate_consistency_token( - name="name_value", + client.list_authorized_views( + parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val -def test_generate_consistency_token_flattened_error(): +def test_list_authorized_views_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3666,45 +4796,45 @@ def test_generate_consistency_token_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", + client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), + parent="parent_value", ) @pytest.mark.asyncio -async def test_generate_consistency_token_flattened_async(): +async def test_list_authorized_views_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" + type(client.transport.list_authorized_views), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse() + bigtable_table_admin.ListAuthorizedViewsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.generate_consistency_token( - name="name_value", + response = await client.list_authorized_views( + parent="parent_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" + arg = args[0].parent + mock_val = "parent_value" assert arg == mock_val @pytest.mark.asyncio -async def test_generate_consistency_token_flattened_error_async(): +async def test_list_authorized_views_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3712,20 +4842,218 @@ async def test_generate_consistency_token_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name="name_value", + await client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), + parent="parent_value", + ) + + +def test_list_authorized_views_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_authorized_views(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.AuthorizedView) for i in results) + + +def test_list_authorized_views_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, + ) + pages = list(client.list_authorized_views(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_authorized_views_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_authorized_views( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.AuthorizedView) for i in responses) + + +@pytest.mark.asyncio +async def test_list_authorized_views_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + RuntimeError, ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_authorized_views(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CheckConsistencyRequest, + bigtable_table_admin.GetAuthorizedViewRequest, dict, ], ) -def test_check_consistency(request_type, transport: str = "grpc"): +def test_get_authorized_view(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -3737,25 +5065,30 @@ def test_check_consistency(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, + call.return_value = table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, ) - response = client.check_consistency(request) + response = client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True + assert isinstance(response, table.AuthorizedView) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True -def test_check_consistency_empty_call(): +def test_get_authorized_view_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -3765,18 +5098,163 @@ def test_check_consistency_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: - client.check_consistency() + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_authorized_view() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + assert args[0] == bigtable_table_admin.GetAuthorizedViewRequest() + + +def test_get_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetAuthorizedViewRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_authorized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetAuthorizedViewRequest( + name="name_value", + ) + + +def test_get_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_authorized_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_authorized_view + ] = mock_rpc + request = {} + client.get_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_check_consistency_async( +async def test_get_authorized_view_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) + ) + response = await client.get_authorized_view() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetAuthorizedViewRequest() + + +@pytest.mark.asyncio +async def test_get_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CheckConsistencyRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_authorized_view + ] = mock_object + + request = {} + await client.get_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetAuthorizedViewRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -3789,48 +5267,53 @@ async def test_check_consistency_async( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse( - consistent=True, + table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, ) ) - response = await client.check_consistency(request) + response = await client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True + assert isinstance(response, table.AuthorizedView) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True @pytest.mark.asyncio -async def test_check_consistency_async_from_dict(): - await test_check_consistency_async(request_type=dict) +async def test_get_authorized_view_async_from_dict(): + await test_get_authorized_view_async(request_type=dict) -def test_check_consistency_field_headers(): +def test_get_authorized_view_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - client.check_consistency(request) + call.return_value = table.AuthorizedView() + client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -3846,25 +5329,25 @@ def test_check_consistency_field_headers(): @pytest.mark.asyncio -async def test_check_consistency_field_headers_async(): +async def test_get_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse() + table.AuthorizedView() ) - await client.check_consistency(request) + await client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -3879,22 +5362,21 @@ async def test_check_consistency_field_headers_async(): ) in kw["metadata"] -def test_check_consistency_flattened(): +def test_get_authorized_view_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() + call.return_value = table.AuthorizedView() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.check_consistency( + client.get_authorized_view( name="name_value", - consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -3904,12 +5386,9 @@ def test_check_consistency_flattened(): arg = args[0].name mock_val = "name_value" assert arg == mock_val - arg = args[0].consistency_token - mock_val = "consistency_token_value" - assert arg == mock_val -def test_check_consistency_flattened_error(): +def test_get_authorized_view_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3917,34 +5396,32 @@ def test_check_consistency_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), + client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), name="name_value", - consistency_token="consistency_token_value", ) @pytest.mark.asyncio -async def test_check_consistency_flattened_async(): +async def test_get_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.check_consistency), "__call__" + type(client.transport.get_authorized_view), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() + call.return_value = table.AuthorizedView() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse() + table.AuthorizedView() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.check_consistency( + response = await client.get_authorized_view( name="name_value", - consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -3954,13 +5431,10 @@ async def test_check_consistency_flattened_async(): arg = args[0].name mock_val = "name_value" assert arg == mock_val - arg = args[0].consistency_token - mock_val = "consistency_token_value" - assert arg == mock_val @pytest.mark.asyncio -async def test_check_consistency_flattened_error_async(): +async def test_get_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3968,21 +5442,20 @@ async def test_check_consistency_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), + await client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), name="name_value", - consistency_token="consistency_token_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.SnapshotTableRequest, + bigtable_table_admin.UpdateAuthorizedViewRequest, dict, ], ) -def test_snapshot_table(request_type, transport: str = "grpc"): +def test_update_authorized_view(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -3993,21 +5466,24 @@ def test_snapshot_table(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.snapshot_table(request) + response = client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) -def test_snapshot_table_empty_call(): +def test_update_authorized_view_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -4016,17 +5492,165 @@ def test_snapshot_table_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - client.snapshot_table() + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_authorized_view() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() + assert args[0] == bigtable_table_admin.UpdateAuthorizedViewRequest() + + +def test_update_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_authorized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateAuthorizedViewRequest() + + +def test_update_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_authorized_view + ] = mock_rpc + request = {} + client.update_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_snapshot_table_async( +async def test_update_authorized_view_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_authorized_view() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateAuthorizedViewRequest() + + +@pytest.mark.asyncio +async def test_update_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.SnapshotTableRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.update_authorized_view + ] = mock_object + + request = {} + await client.update_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -4038,42 +5662,47 @@ async def test_snapshot_table_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.snapshot_table(request) + response = await client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_snapshot_table_async_from_dict(): - await test_snapshot_table_async(request_type=dict) +async def test_update_authorized_view_async_from_dict(): + await test_update_authorized_view_async(request_type=dict) -def test_snapshot_table_field_headers(): +def test_update_authorized_view_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() - request.name = "name_value" + request.authorized_view.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.snapshot_table(request) + client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -4084,28 +5713,30 @@ def test_snapshot_table_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "authorized_view.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_snapshot_table_field_headers_async(): +async def test_update_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() - request.name = "name_value" + request.authorized_view.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.snapshot_table(request) + await client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -4116,47 +5747,41 @@ async def test_snapshot_table_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "authorized_view.name=name_value", ) in kw["metadata"] -def test_snapshot_table_flattened(): +def test_update_authorized_view_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.snapshot_table( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + client.update_authorized_view( + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = "cluster_value" + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") assert arg == mock_val - arg = args[0].snapshot_id - mock_val = "snapshot_id_value" - assert arg == mock_val - arg = args[0].description - mock_val = "description_value" + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val -def test_snapshot_table_flattened_error(): +def test_update_authorized_view_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4164,23 +5789,23 @@ def test_snapshot_table_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio -async def test_snapshot_table_flattened_async(): +async def test_update_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/op") @@ -4189,33 +5814,25 @@ async def test_snapshot_table_flattened_async(): ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.snapshot_table( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + response = await client.update_authorized_view( + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - arg = args[0].cluster - mock_val = "cluster_value" - assert arg == mock_val - arg = args[0].snapshot_id - mock_val = "snapshot_id_value" + arg = args[0].authorized_view + mock_val = table.AuthorizedView(name="name_value") assert arg == mock_val - arg = args[0].description - mock_val = "description_value" + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio -async def test_snapshot_table_flattened_error_async(): +async def test_update_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4223,23 +5840,21 @@ async def test_snapshot_table_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + await client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetSnapshotRequest, + bigtable_table_admin.DeleteAuthorizedViewRequest, dict, ], ) -def test_get_snapshot(request_type, transport: str = "grpc"): +def test_delete_authorized_view(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -4250,30 +5865,24 @@ def test_get_snapshot(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - response = client.get_snapshot(request) + call.return_value = None + response = client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" + assert response is None -def test_get_snapshot_empty_call(): +def test_delete_authorized_view_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -4282,73 +5891,211 @@ def test_get_snapshot_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - client.get_snapshot() + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_authorized_view() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() + assert args[0] == bigtable_table_admin.DeleteAuthorizedViewRequest() -@pytest.mark.asyncio -async def test_get_snapshot_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.GetSnapshotRequest, -): - client = BigtableTableAdminAsyncClient( +def test_delete_authorized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteAuthorizedViewRequest( + name="name_value", + etag="etag_value", + ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = await client.get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + client.delete_authorized_view(request=request) + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" + assert args[0] == bigtable_table_admin.DeleteAuthorizedViewRequest( + name="name_value", + etag="etag_value", + ) + + +def test_delete_authorized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_authorized_view + ] = mock_rpc + request = {} + client.delete_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_get_snapshot_async_from_dict(): - await test_get_snapshot_async(request_type=dict) +async def test_delete_authorized_view_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_authorized_view() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteAuthorizedViewRequest() -def test_get_snapshot_field_headers(): +@pytest.mark.asyncio +async def test_delete_authorized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_authorized_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_authorized_view + ] = mock_object + + request = {} + await client.delete_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.delete_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_authorized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteAuthorizedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_authorized_view_async_from_dict(): + await test_delete_authorized_view_async(request_type=dict) + + +def test_delete_authorized_view_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = table.Snapshot() - client.get_snapshot(request) + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value = None + client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -4364,21 +6111,23 @@ def test_get_snapshot_field_headers(): @pytest.mark.asyncio -async def test_get_snapshot_field_headers_async(): +async def test_delete_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) - await client.get_snapshot(request) + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -4393,18 +6142,20 @@ async def test_get_snapshot_field_headers_async(): ) in kw["metadata"] -def test_get_snapshot_flattened(): +def test_delete_authorized_view_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() + call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_snapshot( + client.delete_authorized_view( name="name_value", ) @@ -4417,7 +6168,7 @@ def test_get_snapshot_flattened(): assert arg == mock_val -def test_get_snapshot_flattened_error(): +def test_delete_authorized_view_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4425,27 +6176,29 @@ def test_get_snapshot_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), + client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), name="name_value", ) @pytest.mark.asyncio -async def test_get_snapshot_flattened_async(): +async def test_delete_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() + call.return_value = None - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_snapshot( + response = await client.delete_authorized_view( name="name_value", ) @@ -4459,7 +6212,7 @@ async def test_get_snapshot_flattened_async(): @pytest.mark.asyncio -async def test_get_snapshot_flattened_error_async(): +async def test_delete_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4467,8 +6220,8 @@ async def test_get_snapshot_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), + await client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), name="name_value", ) @@ -4476,11 +6229,11 @@ async def test_get_snapshot_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListSnapshotsRequest, + bigtable_table_admin.ModifyColumnFamiliesRequest, dict, ], ) -def test_list_snapshots(request_type, transport: str = "grpc"): +def test_modify_column_families(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -4491,24 +6244,31 @@ def test_list_snapshots(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", + call.return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, ) - response = client.list_snapshots(request) + response = client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True -def test_list_snapshots_empty_call(): +def test_modify_column_families_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -4517,138 +6277,307 @@ def test_list_snapshots_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - client.list_snapshots() + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.modify_column_families() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() -@pytest.mark.asyncio -async def test_list_snapshots_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListSnapshotsRequest, -): - client = BigtableTableAdminAsyncClient( +def test_modify_column_families_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ModifyColumnFamiliesRequest( + name="name_value", + ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = await client.list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + client.modify_column_families(request=request) + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsAsyncPager) - assert response.next_page_token == "next_page_token_value" - - -@pytest.mark.asyncio -async def test_list_snapshots_async_from_dict(): - await test_list_snapshots_async(request_type=dict) + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest( + name="name_value", + ) -def test_list_snapshots_field_headers(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) +def test_modify_column_families_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - request.parent = "parent_value" + # Ensure method has been cached + assert ( + client._transport.modify_column_families + in client._transport._wrapped_methods + ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - client.list_snapshots(request) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.modify_column_families + ] = mock_rpc + request = {} + client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request + assert mock_rpc.call_count == 1 - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] + client.modify_column_families(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_list_snapshots_field_headers_async(): +async def test_modify_column_families_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() - - request.parent = "parent_value" - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse() + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) ) - await client.list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + response = await client.modify_column_families() + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", + +@pytest.mark.asyncio +async def test_modify_column_families_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.modify_column_families + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.modify_column_families + ] = mock_object + + request = {} + await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.modify_column_families(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_modify_column_families_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + response = await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.asyncio +async def test_modify_column_families_async_from_dict(): + await test_modify_column_families_async(request_type=dict) + + +def test_modify_column_families_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = table.Table() + client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", ) in kw["metadata"] -def test_list_snapshots_flattened(): +@pytest.mark.asyncio +async def test_modify_column_families_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_modify_column_families_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() + call.return_value = table.Table() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_snapshots( - parent="parent_value", + client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].modifications + mock_val = [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] assert arg == mock_val -def test_list_snapshots_flattened_error(): +def test_modify_column_families_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4656,43 +6585,58 @@ def test_list_snapshots_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) @pytest.mark.asyncio -async def test_list_snapshots_flattened_async(): +async def test_modify_column_families_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() + call.return_value = table.Table() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse() - ) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_snapshots( - parent="parent_value", + response = await client.modify_column_families( + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].modifications + mock_val = [ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id="id_value") + ] assert arg == mock_val @pytest.mark.asyncio -async def test_list_snapshots_flattened_error_async(): +async def test_modify_column_families_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4700,254 +6644,201 @@ async def test_list_snapshots_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", + await client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) -def test_list_snapshots_pager(transport_name: str = "grpc"): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DropRowRangeRequest, + dict, + ], +) +def test_drop_row_range(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, + transport=transport, ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), - ) - pager = client.list_snapshots(request={}) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.drop_row_range(request) - assert pager._metadata == metadata + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DropRowRangeRequest() + assert args[0] == request - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) for i in results) + # Establish that the response is the type that we expect. + assert response is None -def test_list_snapshots_pages(transport_name: str = "grpc"): +def test_drop_row_range_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport_name, + transport="grpc", ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - pages = list(client.list_snapshots(request={}).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + client.drop_row_range() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DropRowRangeRequest() -@pytest.mark.asyncio -async def test_list_snapshots_async_pager(): - client = BigtableTableAdminAsyncClient( +def test_drop_row_range_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DropRowRangeRequest( + name="name_value", ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - async_pager = await client.list_snapshots( - request={}, + client.drop_row_range(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DropRowRangeRequest( + name="name_value", ) - assert async_pager.next_page_token == "abc" - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.Snapshot) for i in responses) - -@pytest.mark.asyncio -async def test_list_snapshots_async_pages(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock - ) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, +def test_drop_row_range_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_snapshots(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteSnapshotRequest, - dict, - ], -) -def test_delete_snapshot(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Ensure method has been cached + assert client._transport.drop_row_range in client._transport._wrapped_methods - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_snapshot(request) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc + request = {} + client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + assert mock_rpc.call_count == 1 - # Establish that the response is the type that we expect. - assert response is None + client.drop_row_range(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_delete_snapshot_empty_call(): + +@pytest.mark.asyncio +async def test_drop_row_range_empty_call_async(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( + client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="grpc_asyncio", ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - client.delete_snapshot() + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.drop_row_range() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + assert args[0] == bigtable_table_admin.DropRowRangeRequest() @pytest.mark.asyncio -async def test_delete_snapshot_async( +async def test_drop_row_range_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteSnapshotRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.drop_row_range + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.drop_row_range + ] = mock_object + + request = {} + await client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.drop_row_range(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_drop_row_range_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DropRowRangeRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -4959,40 +6850,41 @@ async def test_delete_snapshot_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_snapshot(request) + response = await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.DropRowRangeRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert response is None @pytest.mark.asyncio -async def test_delete_snapshot_async_from_dict(): - await test_delete_snapshot_async(request_type=dict) +async def test_drop_row_range_async_from_dict(): + await test_drop_row_range_async(request_type=dict) -def test_delete_snapshot_field_headers(): +def test_drop_row_range_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.DropRowRangeRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: call.return_value = None - client.delete_snapshot(request) + client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5008,21 +6900,21 @@ def test_delete_snapshot_field_headers(): @pytest.mark.asyncio -async def test_delete_snapshot_field_headers_async(): +async def test_drop_row_range_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.DropRowRangeRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_snapshot(request) + await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5037,138 +6929,210 @@ async def test_delete_snapshot_field_headers_async(): ) in kw["metadata"] -def test_delete_snapshot_flattened(): - client = BigtableTableAdminClient( +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, + ], +) +def test_generate_consistency_token(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_snapshot( - name="name_value", + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", ) + response = client.generate_consistency_token(request) - # Establish that the underlying call was made with the expected - # request object values. + # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" -def test_delete_snapshot_flattened_error(): +def test_generate_consistency_token_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name="name_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) + client.generate_consistency_token() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() -@pytest.mark.asyncio -async def test_delete_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( +def test_generate_consistency_token_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = None + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GenerateConsistencyTokenRequest( + name="name_value", + ) - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_snapshot( - name="name_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) + client.generate_consistency_token(request=request) + call.assert_called() _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = "name_value" - assert arg == mock_val - - -@pytest.mark.asyncio -async def test_delete_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest( name="name_value", ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateBackupRequest, - dict, - ], -) -def test_create_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_generate_consistency_token_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.create_backup(request) + # Ensure method has been cached + assert ( + client._transport.generate_consistency_token + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_consistency_token + ] = mock_rpc + request = {} + client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() + assert mock_rpc.call_count == 1 - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + client.generate_consistency_token(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_create_backup_empty_call(): + +@pytest.mark.asyncio +async def test_generate_consistency_token_empty_call_async(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( + client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="grpc_asyncio", ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - client.create_backup() + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + ) + response = await client.generate_consistency_token() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() @pytest.mark.asyncio -async def test_create_backup_async( +async def test_generate_consistency_token_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.CreateBackupRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.generate_consistency_token + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.generate_consistency_token + ] = mock_object + + request = {} + await client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.generate_consistency_token(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_generate_consistency_token_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5180,42 +7144,50 @@ async def test_create_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) ) - response = await client.create_backup(request) + response = await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" @pytest.mark.asyncio -async def test_create_backup_async_from_dict(): - await test_create_backup_async(request_type=dict) +async def test_generate_consistency_token_async_from_dict(): + await test_generate_consistency_token_async(request_type=dict) -def test_create_backup_field_headers(): +def test_generate_consistency_token_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_backup(request) + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5226,28 +7198,30 @@ def test_create_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_create_backup_field_headers_async(): +async def test_generate_consistency_token_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") + bigtable_table_admin.GenerateConsistencyTokenResponse() ) - await client.create_backup(request) + await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5258,43 +7232,37 @@ async def test_create_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] -def test_create_backup_flattened(): +def test_generate_consistency_token_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.create_backup( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + client.generate_consistency_token( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val -def test_create_backup_flattened_error(): +def test_generate_consistency_token_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5302,53 +7270,45 @@ def test_create_backup_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", ) @pytest.mark.asyncio -async def test_create_backup_flattened_async(): +async def test_generate_consistency_token_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + bigtable_table_admin.GenerateConsistencyTokenResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.create_backup( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + response = await client.generate_consistency_token( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio -async def test_create_backup_flattened_error_async(): +async def test_generate_consistency_token_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5356,22 +7316,20 @@ async def test_create_backup_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + await client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetBackupRequest, + bigtable_table_admin.CheckConsistencyRequest, dict, ], ) -def test_get_backup(request_type, transport: str = "grpc"): +def test_check_consistency(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -5382,32 +7340,27 @@ def test_get_backup(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, + call.return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, ) - response = client.get_backup(request) + response = client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True -def test_get_backup_empty_call(): +def test_check_consistency_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -5416,16 +7369,162 @@ def test_get_backup_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - client.get_backup() + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.check_consistency() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + +def test_check_consistency_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.check_consistency(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + +def test_check_consistency_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.check_consistency in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_consistency + ] = mock_rpc + request = {} + client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.check_consistency(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_get_backup_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest +async def test_check_consistency_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + ) + response = await client.check_consistency() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + +@pytest.mark.asyncio +async def test_check_consistency_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.check_consistency + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.check_consistency + ] = mock_object + + request = {} + await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.check_consistency(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_check_consistency_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CheckConsistencyRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5437,53 +7536,50 @@ async def test_get_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, + bigtable_table_admin.CheckConsistencyResponse( + consistent=True, ) ) - response = await client.get_backup(request) + response = await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True @pytest.mark.asyncio -async def test_get_backup_async_from_dict(): - await test_get_backup_async(request_type=dict) +async def test_check_consistency_async_from_dict(): + await test_check_consistency_async(request_type=dict) -def test_get_backup_field_headers(): +def test_check_consistency_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = table.Backup() - client.get_backup(request) + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5499,21 +7595,25 @@ def test_get_backup_field_headers(): @pytest.mark.asyncio -async def test_get_backup_field_headers_async(): +async def test_check_consistency_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CheckConsistencyRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.get_backup(request) + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) + await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5528,19 +7628,22 @@ async def test_get_backup_field_headers_async(): ) in kw["metadata"] -def test_get_backup_flattened(): +def test_check_consistency_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = bigtable_table_admin.CheckConsistencyResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_backup( + client.check_consistency( name="name_value", + consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -5550,9 +7653,12 @@ def test_get_backup_flattened(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].consistency_token + mock_val = "consistency_token_value" + assert arg == mock_val -def test_get_backup_flattened_error(): +def test_check_consistency_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5560,28 +7666,34 @@ def test_get_backup_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), name="name_value", + consistency_token="consistency_token_value", ) @pytest.mark.asyncio -async def test_get_backup_flattened_async(): +async def test_check_consistency_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = bigtable_table_admin.CheckConsistencyResponse() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse() + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_backup( + response = await client.check_consistency( name="name_value", + consistency_token="consistency_token_value", ) # Establish that the underlying call was made with the expected @@ -5591,10 +7703,13 @@ async def test_get_backup_flattened_async(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].consistency_token + mock_val = "consistency_token_value" + assert arg == mock_val @pytest.mark.asyncio -async def test_get_backup_flattened_error_async(): +async def test_check_consistency_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5602,20 +7717,21 @@ async def test_get_backup_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.get_backup( - bigtable_table_admin.GetBackupRequest(), + await client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), name="name_value", + consistency_token="consistency_token_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UpdateBackupRequest, + bigtable_table_admin.SnapshotTableRequest, dict, ], ) -def test_update_backup(request_type, transport: str = "grpc"): +def test_snapshot_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -5626,32 +7742,22 @@ def test_update_backup(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - ) - response = client.update_backup(request) + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, future.Future) -def test_update_backup_empty_call(): +def test_snapshot_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -5660,17 +7766,164 @@ def test_update_backup_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - client.update_backup() + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.snapshot_table() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + +def test_snapshot_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.snapshot_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", + ) + + +def test_snapshot_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.snapshot_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc + request = {} + client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.snapshot_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_update_backup_async( +async def test_snapshot_table_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.snapshot_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + +@pytest.mark.asyncio +async def test_snapshot_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.UpdateBackupRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.snapshot_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.snapshot_table + ] = mock_object + + request = {} + await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.snapshot_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_snapshot_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.SnapshotTableRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5682,53 +7935,43 @@ async def test_update_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - ) + operations_pb2.Operation(name="operations/spam") ) - response = await client.update_backup(request) + response = await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING + assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_update_backup_async_from_dict(): - await test_update_backup_async(request_type=dict) +async def test_snapshot_table_async_from_dict(): + await test_snapshot_table_async(request_type=dict) -def test_update_backup_field_headers(): +def test_snapshot_table_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() - request.backup.name = "name_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = table.Backup() - client.update_backup(request) + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5739,26 +7982,28 @@ def test_update_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "backup.name=name_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_update_backup_field_headers_async(): +async def test_snapshot_table_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() + request = bigtable_table_admin.SnapshotTableRequest() - request.backup.name = "name_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.update_backup(request) + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -5769,39 +8014,47 @@ async def test_update_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "backup.name=name_value", + "name=name_value", ) in kw["metadata"] -def test_update_backup_flattened(): +def test_snapshot_table_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.update_backup( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].cluster + mock_val = "cluster_value" + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = "snapshot_id_value" + assert arg == mock_val + arg = args[0].description + mock_val = "description_value" assert arg == mock_val -def test_update_backup_flattened_error(): +def test_snapshot_table_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5809,46 +8062,58 @@ def test_update_backup_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) @pytest.mark.asyncio -async def test_update_backup_flattened_async(): +async def test_snapshot_table_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = table.Backup() + call.return_value = operations_pb2.Operation(name="operations/op") - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.update_backup( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + response = await client.snapshot_table( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name="name_value") + arg = args[0].name + mock_val = "name_value" assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + arg = args[0].cluster + mock_val = "cluster_value" + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = "snapshot_id_value" + assert arg == mock_val + arg = args[0].description + mock_val = "description_value" assert arg == mock_val @pytest.mark.asyncio -async def test_update_backup_flattened_error_async(): +async def test_snapshot_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5856,21 +8121,23 @@ async def test_update_backup_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + await client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteBackupRequest, + bigtable_table_admin.GetSnapshotRequest, dict, ], ) -def test_delete_backup(request_type, transport: str = "grpc"): +def test_get_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -5881,21 +8148,31 @@ def test_delete_backup(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_backup(request) + call.return_value = table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + response = client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, table.Snapshot) + assert response.name == "name_value" + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" -def test_delete_backup_empty_call(): +def test_get_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -5904,17 +8181,155 @@ def test_delete_backup_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - client.delete_backup() + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_snapshot() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + +def test_get_snapshot_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetSnapshotRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_snapshot(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetSnapshotRequest( + name="name_value", + ) + + +def test_get_snapshot_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_snapshot in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc + request = {} + client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_delete_backup_async( +async def test_get_snapshot_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + response = await client.get_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + +@pytest.mark.asyncio +async def test_get_snapshot_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.DeleteBackupRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_snapshot + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_snapshot + ] = mock_object + + request = {} + await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetSnapshotRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5926,40 +8341,52 @@ async def test_delete_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_backup(request) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + response = await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, table.Snapshot) + assert response.name == "name_value" + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" @pytest.mark.asyncio -async def test_delete_backup_async_from_dict(): - await test_delete_backup_async(request_type=dict) +async def test_get_snapshot_async_from_dict(): + await test_get_snapshot_async(request_type=dict) -def test_delete_backup_field_headers(): +def test_get_snapshot_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = None - client.delete_backup(request) + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = table.Snapshot() + client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -5975,21 +8402,21 @@ def test_delete_backup_field_headers(): @pytest.mark.asyncio -async def test_delete_backup_field_headers_async(): +async def test_get_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() + request = bigtable_table_admin.GetSnapshotRequest() request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_backup(request) + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -6004,18 +8431,18 @@ async def test_delete_backup_field_headers_async(): ) in kw["metadata"] -def test_delete_backup_flattened(): +def test_get_snapshot_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = None + call.return_value = table.Snapshot() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.delete_backup( + client.get_snapshot( name="name_value", ) @@ -6028,7 +8455,7 @@ def test_delete_backup_flattened(): assert arg == mock_val -def test_delete_backup_flattened_error(): +def test_get_snapshot_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6036,27 +8463,27 @@ def test_delete_backup_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", ) @pytest.mark.asyncio -async def test_delete_backup_flattened_async(): +async def test_get_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = None + call.return_value = table.Snapshot() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.delete_backup( + response = await client.get_snapshot( name="name_value", ) @@ -6070,7 +8497,7 @@ async def test_delete_backup_flattened_async(): @pytest.mark.asyncio -async def test_delete_backup_flattened_error_async(): +async def test_get_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6078,8 +8505,8 @@ async def test_delete_backup_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), + await client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", ) @@ -6087,11 +8514,11 @@ async def test_delete_backup_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListBackupsRequest, + bigtable_table_admin.ListSnapshotsRequest, dict, ], ) -def test_list_backups(request_type, transport: str = "grpc"): +def test_list_snapshots(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6102,24 +8529,25 @@ def test_list_backups(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse( + call.return_value = bigtable_table_admin.ListSnapshotsResponse( next_page_token="next_page_token_value", ) - response = client.list_backups(request) + response = client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.ListSnapshotsRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) + assert isinstance(response, pagers.ListSnapshotsPager) assert response.next_page_token == "next_page_token_value" -def test_list_backups_empty_call(): +def test_list_snapshots_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -6128,67 +8556,205 @@ def test_list_backups_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - client.list_backups() + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_snapshots() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() -@pytest.mark.asyncio -async def test_list_backups_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.ListBackupsRequest, -): - client = BigtableTableAdminAsyncClient( +def test_list_snapshots_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListSnapshotsRequest( + parent="parent_value", + page_token="page_token_value", + ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", - ) + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = await client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + client.list_snapshots(request=request) + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() + assert args[0] == bigtable_table_admin.ListSnapshotsRequest( + parent="parent_value", + page_token="page_token_value", + ) - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsAsyncPager) - assert response.next_page_token == "next_page_token_value" + +def test_list_snapshots_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_snapshots in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc + request = {} + client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_snapshots(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_list_backups_async_from_dict(): - await test_list_backups_async(request_type=dict) +async def test_list_snapshots_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_snapshots() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() -def test_list_backups_field_headers(): +@pytest.mark.asyncio +async def test_list_snapshots_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_snapshots + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_snapshots + ] = mock_object + + request = {} + await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_snapshots(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_snapshots_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListSnapshotsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_snapshots_async_from_dict(): + await test_list_snapshots_async(request_type=dict) + + +def test_list_snapshots_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.ListSnapshotsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value = bigtable_table_admin.ListBackupsResponse() - client.list_backups(request) + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -6204,23 +8770,23 @@ def test_list_backups_field_headers(): @pytest.mark.asyncio -async def test_list_backups_field_headers_async(): +async def test_list_snapshots_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() + request = bigtable_table_admin.ListSnapshotsRequest() request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse() + bigtable_table_admin.ListSnapshotsResponse() ) - await client.list_backups(request) + await client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -6235,18 +8801,18 @@ async def test_list_backups_field_headers_async(): ) in kw["metadata"] -def test_list_backups_flattened(): +def test_list_snapshots_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() + call.return_value = bigtable_table_admin.ListSnapshotsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.list_backups( + client.list_snapshots( parent="parent_value", ) @@ -6259,7 +8825,7 @@ def test_list_backups_flattened(): assert arg == mock_val -def test_list_backups_flattened_error(): +def test_list_snapshots_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6267,29 +8833,29 @@ def test_list_backups_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", ) @pytest.mark.asyncio -async def test_list_backups_flattened_async(): +async def test_list_snapshots_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() + call.return_value = bigtable_table_admin.ListSnapshotsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse() + bigtable_table_admin.ListSnapshotsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.list_backups( + response = await client.list_snapshots( parent="parent_value", ) @@ -6303,7 +8869,7 @@ async def test_list_backups_flattened_async(): @pytest.mark.asyncio -async def test_list_backups_flattened_error_async(): +async def test_list_snapshots_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6311,44 +8877,44 @@ async def test_list_backups_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.list_backups( - bigtable_table_admin.ListBackupsRequest(), + await client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), parent="parent_value", ) -def test_list_backups_pager(transport_name: str = "grpc"): +def test_list_snapshots_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, @@ -6358,95 +8924,95 @@ def test_list_backups_pager(transport_name: str = "grpc"): metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_backups(request={}) + pager = client.list_snapshots(request={}) assert pager._metadata == metadata results = list(pager) assert len(results) == 6 - assert all(isinstance(i, table.Backup) for i in results) + assert all(isinstance(i, table.Snapshot) for i in results) -def test_list_backups_pages(transport_name: str = "grpc"): +def test_list_snapshots_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, ) - pages = list(client.list_backups(request={}).pages) + pages = list(client.list_snapshots(request={}).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @pytest.mark.asyncio -async def test_list_backups_async_pager(): +async def test_list_snapshots_async_pager(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, ) - async_pager = await client.list_backups( + async_pager = await client.list_snapshots( request={}, ) assert async_pager.next_page_token == "abc" @@ -6455,43 +9021,43 @@ async def test_list_backups_async_pager(): responses.append(response) assert len(responses) == 6 - assert all(isinstance(i, table.Backup) for i in responses) + assert all(isinstance(i, table.Snapshot) for i in responses) @pytest.mark.asyncio -async def test_list_backups_async_pages(): +async def test_list_snapshots_async_pages(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + type(client.transport.list_snapshots), "__call__", new_callable=mock.AsyncMock ) as call: # Set the response to a series of pages. call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), ], next_page_token="abc", ), - bigtable_table_admin.ListBackupsResponse( - backups=[], + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], next_page_token="def", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), ], next_page_token="ghi", ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), ], ), RuntimeError, @@ -6500,7 +9066,7 @@ async def test_list_backups_async_pages(): # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 async for page_ in ( # pragma: no branch - await client.list_backups(request={}) + await client.list_snapshots(request={}) ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): @@ -6510,11 +9076,11 @@ async def test_list_backups_async_pages(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.RestoreTableRequest, + bigtable_table_admin.DeleteSnapshotRequest, dict, ], ) -def test_restore_table(request_type, transport: str = "grpc"): +def test_delete_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6525,21 +9091,22 @@ def test_restore_table(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.restore_table(request) + call.return_value = None + response = client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert response is None -def test_restore_table_empty_call(): +def test_delete_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -6548,161 +9115,148 @@ def test_restore_table_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - client.restore_table() + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_snapshot() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() -@pytest.mark.asyncio -async def test_restore_table_async( - transport: str = "grpc_asyncio", - request_type=bigtable_table_admin.RestoreTableRequest, -): - client = BigtableTableAdminAsyncClient( +def test_delete_snapshot_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteSnapshotRequest( + name="name_value", + ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - response = await client.restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + client.delete_snapshot(request=request) + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_restore_table_async_from_dict(): - await test_restore_table_async(request_type=dict) + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest( + name="name_value", + ) -def test_restore_table_field_headers(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) +def test_delete_snapshot_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - request.parent = "parent_value" + # Ensure method has been cached + assert client._transport.delete_snapshot in client._transport._wrapped_methods - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.restore_table(request) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc + request = {} + client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request + assert mock_rpc.call_count == 1 - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] + client.delete_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_restore_table_field_headers_async(): +async def test_delete_snapshot_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() - - request.parent = "parent_value" - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_snapshot() + call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - "x-goog-request-params", - "parent=parent_value", - ) in kw["metadata"] +@pytest.mark.asyncio +async def test_delete_snapshot_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CopyBackupRequest, - dict, - ], -) -def test_copy_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Ensure method has been cached + assert ( + client._client._transport.delete_snapshot + in client._client._transport._wrapped_methods + ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.copy_backup(request) + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_snapshot + ] = mock_object - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + request = {} + await client.delete_snapshot(request) + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 -def test_copy_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + await client.delete_snapshot(request) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - client.copy_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 @pytest.mark.asyncio -async def test_copy_backup_async( - transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest +async def test_delete_snapshot_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteSnapshotRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6714,42 +9268,41 @@ async def test_copy_backup_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.copy_backup(request) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) + assert response is None @pytest.mark.asyncio -async def test_copy_backup_async_from_dict(): - await test_copy_backup_async(request_type=dict) +async def test_delete_snapshot_async_from_dict(): + await test_delete_snapshot_async(request_type=dict) -def test_copy_backup_field_headers(): +def test_delete_snapshot_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.copy_backup(request) + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = None + client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -6760,28 +9313,26 @@ def test_copy_backup_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_copy_backup_field_headers_async(): +async def test_delete_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() - request.parent = "parent_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/op") - ) - await client.copy_backup(request) + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -6792,47 +9343,35 @@ async def test_copy_backup_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "name=name_value", ) in kw["metadata"] -def test_copy_backup_flattened(): +def test_delete_snapshot_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = None # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.copy_backup( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) + client.delete_snapshot( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].source_backup - mock_val = "source_backup_value" + arg = args[0].name + mock_val = "name_value" assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( - seconds=751 - ) -def test_copy_backup_flattened_error(): +def test_delete_snapshot_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6840,58 +9379,41 @@ def test_copy_backup_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", ) @pytest.mark.asyncio -async def test_copy_backup_flattened_async(): +async def test_delete_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name="operations/op") + call.return_value = None - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.copy_backup( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + response = await client.delete_snapshot( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_id - mock_val = "backup_id_value" - assert arg == mock_val - arg = args[0].source_backup - mock_val = "source_backup_value" + arg = args[0].name + mock_val = "name_value" assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( - seconds=751 - ) @pytest.mark.asyncio -async def test_copy_backup_flattened_error_async(): +async def test_delete_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6899,23 +9421,20 @@ async def test_copy_backup_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + await client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", ) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.GetIamPolicyRequest, + bigtable_table_admin.CreateBackupRequest, dict, ], ) -def test_get_iam_policy(request_type, transport: str = "grpc"): +def test_create_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6926,26 +9445,22 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - response = client.get_iam_policy(request) + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.CreateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, future.Future) -def test_get_iam_policy_empty_call(): +def test_create_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -6954,16 +9469,160 @@ def test_get_iam_policy_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - client.get_iam_policy() + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_backup() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + +def test_create_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_backup(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + ) + + +def test_create_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc + request = {} + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_get_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest +async def test_create_backup_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + +@pytest.mark.asyncio +async def test_create_backup_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.create_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.create_backup + ] = mock_object + + request = {} + await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateBackupRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6975,47 +9634,43 @@ async def test_get_iam_policy_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) + operations_pb2.Operation(name="operations/spam") ) - response = await client.get_iam_policy(request) + response = await client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.CreateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, future.Future) @pytest.mark.asyncio -async def test_get_iam_policy_async_from_dict(): - await test_get_iam_policy_async(request_type=dict) +async def test_create_backup_async_from_dict(): + await test_create_backup_async(request_type=dict) -def test_get_iam_policy_field_headers(): +def test_create_backup_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.CreateBackupRequest() - request.resource = "resource_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request) + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -7026,26 +9681,28 @@ def test_get_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_get_iam_policy_field_headers_async(): +async def test_create_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.CreateBackupRequest() - request.resource = "resource_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.get_iam_policy(request) + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -7056,52 +9713,43 @@ async def test_get_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "parent=parent_value", ) in kw["metadata"] -def test_get_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.get_iam_policy( - request={ - "resource": "resource_value", - "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), - } - ) - call.assert_called() - - -def test_get_iam_policy_flattened(): +def test_create_backup_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = operations_pb2.Operation(name="operations/op") # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_iam_policy( - resource="resource_value", + client.create_backup( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].backup + mock_val = table.Backup(name="name_value") assert arg == mock_val -def test_get_iam_policy_flattened_error(): +def test_create_backup_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7109,41 +9757,53 @@ def test_get_iam_policy_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), ) @pytest.mark.asyncio -async def test_get_iam_policy_flattened_async(): +async def test_create_backup_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = operations_pb2.Operation(name="operations/op") - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.get_iam_policy( - resource="resource_value", + response = await client.create_backup( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].backup + mock_val = table.Backup(name="name_value") assert arg == mock_val @pytest.mark.asyncio -async def test_get_iam_policy_flattened_error_async(): +async def test_create_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7151,20 +9811,22 @@ async def test_get_iam_policy_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", + await client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), ) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.SetIamPolicyRequest, + bigtable_table_admin.GetBackupRequest, dict, ], ) -def test_set_iam_policy(request_type, transport: str = "grpc"): +def test_get_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -7175,26 +9837,33 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, ) - response = client.set_iam_policy(request) + response = client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING -def test_set_iam_policy_empty_call(): +def test_get_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -7203,16 +9872,153 @@ def test_set_iam_policy_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - client.set_iam_policy() + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_backup() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == bigtable_table_admin.GetBackupRequest() + + +def test_get_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetBackupRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_backup(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetBackupRequest( + name="name_value", + ) + + +def test_get_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc + request = {} + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_set_iam_policy_async( - transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest +async def test_get_backup_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + ) + response = await client.get_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetBackupRequest() + + +@pytest.mark.asyncio +async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_backup + ] = mock_object + + request = {} + await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7224,47 +10030,54 @@ async def test_set_iam_policy_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, ) ) - response = await client.set_iam_policy(request) + response = await client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING @pytest.mark.asyncio -async def test_set_iam_policy_async_from_dict(): - await test_set_iam_policy_async(request_type=dict) +async def test_get_backup_async_from_dict(): + await test_get_backup_async(request_type=dict) -def test_set_iam_policy_field_headers(): +def test_get_backup_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() - request.resource = "resource_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request) + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = table.Backup() + client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -7275,26 +10088,26 @@ def test_set_iam_policy_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_set_iam_policy_field_headers_async(): +async def test_get_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.GetBackupRequest() - request.resource = "resource_value" + request.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.set_iam_policy(request) + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + await client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -7305,53 +10118,35 @@ async def test_set_iam_policy_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "name=name_value", ) in kw["metadata"] -def test_set_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.set_iam_policy( - request={ - "resource": "resource_value", - "policy": policy_pb2.Policy(version=774), - "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), - } - ) - call.assert_called() - - -def test_set_iam_policy_flattened(): +def test_get_backup_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = table.Backup() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.set_iam_policy( - resource="resource_value", - ) + client.get_backup( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].name + mock_val = "name_value" assert arg == mock_val -def test_set_iam_policy_flattened_error(): +def test_get_backup_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7359,41 +10154,41 @@ def test_set_iam_policy_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", + client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name="name_value", ) @pytest.mark.asyncio -async def test_set_iam_policy_flattened_async(): +async def test_get_backup_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() + call.return_value = table.Backup() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.set_iam_policy( - resource="resource_value", + response = await client.get_backup( + name="name_value", ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].name + mock_val = "name_value" assert arg == mock_val @pytest.mark.asyncio -async def test_set_iam_policy_flattened_error_async(): +async def test_get_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7401,20 +10196,20 @@ async def test_set_iam_policy_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", + await client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name="name_value", ) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, + bigtable_table_admin.UpdateBackupRequest, dict, ], ) -def test_test_iam_permissions(request_type, transport: str = "grpc"): +def test_update_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -7425,26 +10220,33 @@ def test_test_iam_permissions(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], + call.return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, ) - response = client.test_iam_permissions(request) + response = client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.UpdateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING -def test_test_iam_permissions_empty_call(): +def test_update_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( @@ -7453,19 +10255,152 @@ def test_test_iam_permissions_empty_call(): ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - client.test_iam_permissions() + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_backup() call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + +def test_update_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateBackupRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_backup(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + +def test_update_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc + request = {} + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio -async def test_test_iam_permissions_async( +async def test_update_backup_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + ) + response = await client.update_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + +@pytest.mark.asyncio +async def test_update_backup_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", - request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.update_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.update_backup + ] = mock_object + + request = {} + await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.update_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateBackupRequest, ): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7477,49 +10412,54 @@ async def test_test_iam_permissions_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, ) ) - response = await client.test_iam_permissions(request) + response = await client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.UpdateBackupRequest() + assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING @pytest.mark.asyncio -async def test_test_iam_permissions_async_from_dict(): - await test_test_iam_permissions_async(request_type=dict) +async def test_update_backup_async_from_dict(): + await test_update_backup_async(request_type=dict) -def test_test_iam_permissions_field_headers(): +def test_update_backup_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.UpdateBackupRequest() - request.resource = "resource_value" + request.backup.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request) + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = table.Backup() + client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -7530,30 +10470,26 @@ def test_test_iam_permissions_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "backup.name=name_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_test_iam_permissions_field_headers_async(): +async def test_update_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.UpdateBackupRequest() - request.resource = "resource_value" + request.backup.name = "name_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) - await client.test_iam_permissions(request) + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + await client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -7564,60 +10500,39 @@ async def test_test_iam_permissions_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "resource=resource_value", + "backup.name=name_value", ) in kw["metadata"] -def test_test_iam_permissions_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - response = client.test_iam_permissions( - request={ - "resource": "resource_value", - "permissions": ["permissions_value"], - } - ) - call.assert_called() - - -def test_test_iam_permissions_flattened(): +def test_update_backup_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + call.return_value = table.Backup() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], + client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].backup + mock_val = table.Backup(name="name_value") assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val -def test_test_iam_permissions_flattened_error(): +def test_update_backup_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7625,116 +10540,4694 @@ def test_test_iam_permissions_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) @pytest.mark.asyncio -async def test_test_iam_permissions_flattened_async(): +async def test_update_backup_flattened_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + call.return_value = table.Backup() - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse() - ) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.test_iam_permissions( - resource="resource_value", - permissions=["permissions_value"], + response = await client.update_backup( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = "resource_value" + arg = args[0].backup + mock_val = table.Backup(name="name_value") assert arg == mock_val - arg = args[0].permissions - mock_val = ["permissions_value"] + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) assert arg == mock_val @pytest.mark.asyncio -async def test_test_iam_permissions_flattened_error_async(): +async def test_update_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteBackupRequest, + dict, + ], +) +def test_delete_backup(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteBackupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + +def test_delete_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteBackupRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_backup(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteBackupRequest( + name="name_value", + ) + + +def test_delete_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc + request = {} + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_backup_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + +@pytest.mark.asyncio +async def test_delete_backup_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.delete_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_backup + ] = mock_object + + request = {} + await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.delete_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_backup_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteBackupRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteBackupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_backup_async_from_dict(): + await test_delete_backup_async(request_type=dict) + + +def test_delete_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_backup( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_backup( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListBackupsRequest, + dict, + ], +) +def test_list_backups(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListBackupsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_backups_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_backups() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + +def test_list_backups_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListBackupsRequest( + parent="parent_value", + filter="filter_value", + order_by="order_by_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_backups(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListBackupsRequest( + parent="parent_value", + filter="filter_value", + order_by="order_by_value", + page_token="page_token_value", + ) + + +def test_list_backups_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_backups in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc + request = {} + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_backups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_backups_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_backups() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + +@pytest.mark.asyncio +async def test_list_backups_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.list_backups + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.list_backups + ] = mock_object + + request = {} + await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.list_backups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_backups_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListBackupsRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListBackupsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_backups_async_from_dict(): + await test_list_backups_async(request_type=dict) + + +def test_list_backups_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_backups_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() + ) + await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_backups_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_backups( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_backups_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_backups_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_backups( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_backups_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", + ) + + +def test_list_backups_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_backups(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) + + +def test_list_backups_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + pages = list(client.list_backups(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_backups_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_backups( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Backup) for i in responses) + + +@pytest.mark.asyncio +async def test_list_backups_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_backups(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.RestoreTableRequest, + dict, + ], +) +def test_restore_table(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.RestoreTableRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_restore_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.restore_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + +def test_restore_table_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.RestoreTableRequest( + parent="parent_value", + table_id="table_id_value", + backup="backup_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.restore_table(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.RestoreTableRequest( + parent="parent_value", + table_id="table_id_value", + backup="backup_value", + ) + + +def test_restore_table_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.restore_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc + request = {} + client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.restore_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_restore_table_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.restore_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + +@pytest.mark.asyncio +async def test_restore_table_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.restore_table + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.restore_table + ] = mock_object + + request = {} + await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.restore_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_restore_table_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.RestoreTableRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.RestoreTableRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_restore_table_async_from_dict(): + await test_restore_table_async(request_type=dict) + + +def test_restore_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_restore_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CopyBackupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_copy_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.copy_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + +def test_copy_backup_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.copy_backup(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + +def test_copy_backup_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.copy_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc + request = {} + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.copy_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_copy_backup_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.copy_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + +@pytest.mark.asyncio +async def test_copy_backup_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.copy_backup + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.copy_backup + ] = mock_object + + request = {} + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.copy_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_copy_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CopyBackupRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_copy_backup_async_from_dict(): + await test_copy_backup_async(request_type=dict) + + +def test_copy_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_copy_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_copy_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +def test_copy_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + + +def test_get_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + +def test_get_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.get_iam_policy + ] = mock_object + + request = {} + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.GetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +def test_get_iam_policy_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + + +def test_set_iam_policy_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.set_iam_policy(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + +def test_set_iam_policy_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.set_iam_policy + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.set_iam_policy + ] = mock_object + + request = {} + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_set_iam_policy_async( + transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.SetIamPolicyRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + "update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +def test_set_iam_policy_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy( + resource="resource_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + + +def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.test_iam_permissions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + ) + + +def test_test_iam_permissions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + response = await client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.test_iam_permissions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.test_iam_permissions + ] = mock_object + + request = {} + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async( + transport: str = "grpc_asyncio", + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = "resource_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource_value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict_foreign(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource="resource_value", + permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val + + +def test_test_iam_permissions_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource="resource_value", + permissions=["permissions_value"], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = "resource_value" + assert arg == mock_val + arg = args[0].permissions + mock_val = ["permissions_value"] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableRequest, + dict, + ], +) +def test_create_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == "name_value" + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_create_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_table] = mock_rpc + + request = {} + client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.create_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_table_rest_required_fields( + request_type=bigtable_table_admin.CreateTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + "table", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableRequest.pb( + bigtable_table_admin.CreateTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gba_table.Table.to_json(gba_table.Table()) + + request = bigtable_table_admin.CreateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gba_table.Table() + + client.create_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.CreateTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_table(request) + + +def test_create_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, + args[1], + ) + + +def test_create_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), + ) + + +def test_create_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableFromSnapshotRequest, + dict, + ], +) +def test_create_table_from_snapshot_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_table_from_snapshot(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_table_from_snapshot + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_table_from_snapshot + ] = mock_rpc + + request = {} + client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_table_from_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_table_from_snapshot_rest_required_fields( + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request_init["source_snapshot"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + jsonified_request["sourceSnapshot"] = "source_snapshot_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + assert "sourceSnapshot" in jsonified_request + assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_table_from_snapshot(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_table_from_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + "sourceSnapshot", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_from_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( + bigtable_table_admin.CreateTableFromSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_table_from_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_table_from_snapshot_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_table_from_snapshot(request) + + +def test_create_table_from_snapshot_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_table_from_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" + % client.transport._host, + args[1], + ) + + +def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + +def test_create_table_from_snapshot_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListTablesRequest, + dict, + ], +) +def test_list_tables_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_tables(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_tables_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_tables in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc + + request = {} + client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_tables(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_tables_rest_required_fields( + request_type=bigtable_table_admin.ListTablesRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tables._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tables._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_tables(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_tables_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_tables._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tables_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListTablesRequest.pb( + bigtable_table_admin.ListTablesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.ListTablesResponse.to_json( + bigtable_table_admin.ListTablesResponse() + ) + + request = bigtable_table_admin.ListTablesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListTablesResponse() + + client.list_tables( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_tables_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.ListTablesRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_tables(request) + + +def test_list_tables_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_tables(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, + args[1], + ) + + +def test_list_tables_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tables( + bigtable_table_admin.ListTablesRequest(), + parent="parent_value", + ) + + +def test_list_tables_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token="def", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListTablesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_tables(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Table) for i in results) + + pages = list(client.list_tables(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetTableRequest, + dict, + ], +) +def test_get_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_get_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_table] = mock_rpc + + request = {} + client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_table_rest_required_fields( + request_type=bigtable_table_admin.GetTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetTableRequest.pb( + bigtable_table_admin.GetTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Table.to_json(table.Table()) + + request = bigtable_table_admin.GetTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + + client.get_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.GetTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_table(request) + + +def test_get_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + args[1], + ) + + +def test_get_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table( + bigtable_table_admin.GetTableRequest(), + name="name_value", + ) + + +def test_get_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateTableRequest, + dict, + ], +) +def test_update_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request_init["table"] = { + "name": "projects/sample1/instances/sample2/tables/sample3", + "cluster_states": {}, + "column_families": {}, + "granularity": 1, + "restore_info": { + "source_type": 1, + "backup_info": { + "backup": "backup_value", + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + "source_table": "source_table_value", + "source_backup": "source_backup_value", + }, + }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, + "deletion_protection": True, + "automated_backup_policy": {"retention_period": {}, "frequency": {}}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table"][field])): + del request_init["table"][field][i][subfield] + else: + del request_init["table"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_update_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_table] = mock_rpc + + request = {} + client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_table_rest_required_fields( + request_type=bigtable_table_admin.UpdateTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_table(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "table", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateTableRequest.pb( + bigtable_table_admin.UpdateTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.UpdateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.UpdateTableRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_table(request) + + +def test_update_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table.name=projects/*/instances/*/tables/*}" + % client.transport._host, + args[1], + ) + + +def test_update_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) +def test_update_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateTableRequest, + bigtable_table_admin.DeleteTableRequest, dict, ], ) -def test_create_table_rest(request_type): +def test_delete_table_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) + return_value = None # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_table(request) + response = client.delete_table(request) # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert response is None -def test_create_table_rest_required_fields( - request_type=bigtable_table_admin.CreateTableRequest, +def test_delete_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc + + request = {} + client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_table_rest_required_fields( + request_type=bigtable_table_admin.DeleteTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -7748,24 +15241,21 @@ def test_create_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) + ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) + ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7774,7 +15264,7 @@ def test_create_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = gba_table.Table() + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -7786,49 +15276,36 @@ def test_create_table_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_table(request) + response = client.delete_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_create_table_rest_unset_required_fields(): +def test_delete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.create_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "table", - ) - ) - ) + unset_fields = transport.delete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_rest_interceptors(null_interceptor): +def test_delete_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -7841,14 +15318,11 @@ def test_create_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table" + transports.BigtableTableAdminRestInterceptor, "pre_delete_table" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableRequest.pb( - bigtable_table_admin.CreateTableRequest() + pb_message = bigtable_table_admin.DeleteTableRequest.pb( + bigtable_table_admin.DeleteTableRequest() ) transcode.return_value = { "method": "post", @@ -7860,17 +15334,15 @@ def test_create_table_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = gba_table.Table.to_json(gba_table.Table()) - request = bigtable_table_admin.CreateTableRequest() + request = bigtable_table_admin.DeleteTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = gba_table.Table() - client.create_table( + client.delete_table( request, metadata=[ ("key", "val"), @@ -7879,11 +15351,10 @@ def test_create_table_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_create_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CreateTableRequest +def test_delete_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.DeleteTableRequest ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7891,7 +15362,7 @@ def test_create_table_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7903,10 +15374,10 @@ def test_create_table_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.create_table(request) + client.delete_table(request) -def test_create_table_rest_flattened(): +def test_delete_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -7915,41 +15386,37 @@ def test_create_table_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = gba_table.Table() + return_value = None # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), + name="name_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.create_table(**mock_args) + client.delete_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1], ) -def test_create_table_rest_flattened_error(transport: str = "rest"): +def test_delete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -7958,15 +15425,13 @@ def test_create_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_table( - bigtable_table_admin.CreateTableRequest(), - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), + name="name_value", ) -def test_create_table_rest_error(): +def test_delete_table_rest_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -7975,18 +15440,18 @@ def test_create_table_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateTableFromSnapshotRequest, + bigtable_table_admin.UndeleteTableRequest, dict, ], ) -def test_create_table_from_snapshot_rest(request_type): +def test_undelete_table_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -8001,21 +15466,59 @@ def test_create_table_from_snapshot_rest(request_type): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_table_from_snapshot(request) + response = client.undelete_table(request) # Establish that the response is the type that we expect. assert response.operation.name == "operations/spam" -def test_create_table_from_snapshot_rest_required_fields( - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +def test_undelete_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.undelete_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc + + request = {} + client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.undelete_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_undelete_table_rest_required_fields( + request_type=bigtable_table_admin.UndeleteTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request_init["source_snapshot"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -8029,27 +15532,21 @@ def test_create_table_from_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" - jsonified_request["sourceSnapshot"] = "source_snapshot_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" - assert "sourceSnapshot" in jsonified_request - assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8083,33 +15580,24 @@ def test_create_table_from_snapshot_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_table_from_snapshot(request) + response = client.undelete_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_create_table_from_snapshot_rest_unset_required_fields(): +def test_undelete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "sourceSnapshot", - ) - ) - ) + unset_fields = transport.undelete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_from_snapshot_rest_interceptors(null_interceptor): +def test_undelete_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8124,14 +15612,14 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_undelete_table" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" + transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( - bigtable_table_admin.CreateTableFromSnapshotRequest() + pb_message = bigtable_table_admin.UndeleteTableRequest.pb( + bigtable_table_admin.UndeleteTableRequest() ) transcode.return_value = { "method": "post", @@ -8147,7 +15635,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): operations_pb2.Operation() ) - request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request = bigtable_table_admin.UndeleteTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -8155,7 +15643,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): pre.return_value = request, metadata post.return_value = operations_pb2.Operation() - client.create_table_from_snapshot( + client.undelete_table( request, metadata=[ ("key", "val"), @@ -8167,9 +15655,8 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): post.assert_called_once() -def test_create_table_from_snapshot_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +def test_undelete_table_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.UndeleteTableRequest ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8177,7 +15664,7 @@ def test_create_table_from_snapshot_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8189,10 +15676,10 @@ def test_create_table_from_snapshot_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.create_table_from_snapshot(request) + client.undelete_table(request) -def test_create_table_from_snapshot_rest_flattened(): +def test_undelete_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -8204,13 +15691,11 @@ def test_create_table_from_snapshot_rest_flattened(): return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", + name="name_value", ) mock_args.update(sample_request) @@ -8221,20 +15706,20 @@ def test_create_table_from_snapshot_rest_flattened(): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.create_table_from_snapshot(**mock_args) + client.undelete_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" + "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" % client.transport._host, args[1], ) -def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): +def test_undelete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -8243,15 +15728,13 @@ def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest" # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name="name_value", ) -def test_create_table_from_snapshot_rest_error(): +def test_undelete_table_rest_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -8260,86 +15743,202 @@ def test_create_table_from_snapshot_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListTablesRequest, + bigtable_table_admin.CreateAuthorizedViewRequest, dict, ], ) -def test_list_tables_rest(request_type): +def test_create_authorized_view_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init["authorized_view"] = { + "name": "name_value", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, + }, + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] + else: + del request_init["authorized_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_tables(request) + response = client.create_authorized_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == "next_page_token_value" + assert response.operation.name == "operations/spam" -def test_list_tables_rest_required_fields( - request_type=bigtable_table_admin.ListTablesRequest, +def test_create_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_authorized_view + ] = mock_rpc + + request = {} + client.create_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} request_init["parent"] = "" + request_init["authorized_view_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped + assert "authorizedViewId" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) + ).create_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"] jsonified_request["parent"] = "parent_value" + jsonified_request["authorizedViewId"] = "authorized_view_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) + ).create_authorized_view._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - "view", - ) - ) + assert not set(unset_fields) - set(("authorized_view_id",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8348,7 +15947,7 @@ def test_list_tables_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -8360,48 +15959,52 @@ def test_list_tables_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_tables(request) + response = client.create_authorized_view(request) - expected_params = [("$alt", "json;enum-encoding=int")] + expected_params = [ + ( + "authorizedViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_tables_rest_unset_required_fields(): +def test_create_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_tables._get_unset_required_fields({}) + unset_fields = transport.create_authorized_view._get_unset_required_fields({}) assert set(unset_fields) == ( - set( + set(("authorizedViewId",)) + & set( ( - "pageSize", - "pageToken", - "view", + "parent", + "authorizedViewId", + "authorizedView", ) ) - & set(("parent",)) ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_tables_rest_interceptors(null_interceptor): +def test_create_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8414,14 +16017,16 @@ def test_list_tables_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_tables" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.ListTablesRequest.pb( - bigtable_table_admin.ListTablesRequest() + pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( + bigtable_table_admin.CreateAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -8433,19 +16038,19 @@ def test_list_tables_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListTablesResponse.to_json( - bigtable_table_admin.ListTablesResponse() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() ) - request = bigtable_table_admin.ListTablesRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListTablesResponse() + post.return_value = operations_pb2.Operation() - client.list_tables( + client.create_authorized_view( request, metadata=[ ("key", "val"), @@ -8457,8 +16062,9 @@ def test_list_tables_rest_interceptors(null_interceptor): post.assert_called_once() -def test_list_tables_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ListTablesRequest +def test_create_authorized_view_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8466,7 +16072,7 @@ def test_list_tables_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8478,10 +16084,10 @@ def test_list_tables_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.list_tables(request) + client.create_authorized_view(request) -def test_list_tables_rest_flattened(): +def test_create_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -8490,197 +16096,183 @@ def test_list_tables_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.list_tables(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, - args[1], - ) - - -def test_list_tables_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent="parent_value", - ) - - -def test_list_tables_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - ) - # Two responses for two calls - response = response + response + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", + ) + mock_args.update(sample_request) - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListTablesResponse.to_json(x) for x in response + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_authorized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" + % client.transport._host, + args[1], ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - sample_request = {"parent": "projects/sample1/instances/sample2"} - pager = client.list_tables(request=sample_request) +def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Table) for i in results) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", + ) - pages = list(client.list_tables(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + +def test_create_authorized_view_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetTableRequest, + bigtable_table_admin.ListAuthorizedViewsRequest, dict, ], ) -def test_get_table_rest(request_type): +def test_list_authorized_views_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, + return_value = bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Table.pb(return_value) + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_table(request) + response = client.list_authorized_views(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert isinstance(response, pagers.ListAuthorizedViewsPager) + assert response.next_page_token == "next_page_token_value" -def test_get_table_rest_required_fields( - request_type=bigtable_table_admin.GetTableRequest, +def test_list_authorized_views_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_authorized_views + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_authorized_views + ] = mock_rpc + + request = {} + client.list_authorized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_authorized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_authorized_views_rest_required_fields( + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) + ).list_authorized_views._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) + ).list_authorized_views._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view",)) + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8689,7 +16281,7 @@ def test_get_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Table() + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -8710,30 +16302,41 @@ def test_get_table_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Table.pb(return_value) + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_table(request) + response = client.list_authorized_views(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_table_rest_unset_required_fields(): +def test_list_authorized_views_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view",)) & set(("name",))) + unset_fields = transport.list_authorized_views._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_table_rest_interceptors(null_interceptor): +def test_list_authorized_views_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -8746,14 +16349,14 @@ def test_get_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_table" + transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_table" + transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.GetTableRequest.pb( - bigtable_table_admin.GetTableRequest() + pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( + bigtable_table_admin.ListAuthorizedViewsRequest() ) transcode.return_value = { "method": "post", @@ -8765,17 +16368,21 @@ def test_get_table_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = table.Table.to_json(table.Table()) + req.return_value._content = ( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + bigtable_table_admin.ListAuthorizedViewsResponse() + ) + ) - request = bigtable_table_admin.GetTableRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Table() + post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - client.get_table( + client.list_authorized_views( request, metadata=[ ("key", "val"), @@ -8787,8 +16394,9 @@ def test_get_table_rest_interceptors(null_interceptor): post.assert_called_once() -def test_get_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetTableRequest +def test_list_authorized_views_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -8796,7 +16404,7 @@ def test_get_table_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8808,10 +16416,10 @@ def test_get_table_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.get_table(request) + client.list_authorized_views(request) -def test_get_table_rest_flattened(): +def test_list_authorized_views_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -8820,14 +16428,14 @@ def test_get_table_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Table() + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", ) mock_args.update(sample_request) @@ -8835,24 +16443,25 @@ def test_get_table_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Table.pb(return_value) + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.get_table(**mock_args) + client.list_authorized_views(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" + % client.transport._host, args[1], ) -def test_get_table_rest_flattened_error(transport: str = "rest"): +def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -8861,172 +16470,196 @@ def test_get_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_table( - bigtable_table_admin.GetTableRequest(), - name="name_value", + client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), + parent="parent_value", ) -def test_get_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateTableRequest, - dict, - ], -) -def test_update_table_rest(request_type): +def test_list_authorized_views_rest_pager(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - request_init["table"] = { - "name": "projects/sample1/instances/sample2/tables/sample3", - "cluster_states": {}, - "column_families": {}, - "granularity": 1, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "start_time": {"seconds": 751, "nanos": 543}, - "end_time": {}, - "source_table": "source_table_value", - "source_backup": "source_backup_value", - }, - }, - "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), + ) + # Two responses for two calls + response = response + response - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] + pager = client.list_authorized_views(request=sample_request) - subfields_not_in_runtime = [] + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.AuthorizedView) for i in results) - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["table"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value + pages = list(client.list_authorized_views(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["table"][field])): - del request_init["table"][field][i][subfield] - else: - del request_init["table"][field][subfield] +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetAuthorizedViewRequest, + dict, + ], +) +def test_get_authorized_view_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_table(request) + response = client.get_authorized_view(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + assert isinstance(response, table.AuthorizedView) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True -def test_update_table_rest_required_fields( - request_type=bigtable_table_admin.UpdateTableRequest, +def test_get_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_authorized_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_authorized_view + ] = mock_rpc + + request = {} + client.get_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.GetAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) + ).get_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["name"] = "name_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) + ).get_authorized_view._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) + assert not set(unset_fields) - set(("view",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9035,7 +16668,7 @@ def test_update_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = table.AuthorizedView() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -9047,45 +16680,39 @@ def test_update_table_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_table(request) + response = client.get_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_table_rest_unset_required_fields(): +def test_get_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.update_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "table", - "updateMask", - ) - ) - ) + unset_fields = transport.get_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_table_rest_interceptors(null_interceptor): +def test_get_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9098,16 +16725,14 @@ def test_update_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_table" + transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_table" + transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_table_admin.UpdateTableRequest.pb( - bigtable_table_admin.UpdateTableRequest() + pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( + bigtable_table_admin.GetAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -9119,19 +16744,17 @@ def test_update_table_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + req.return_value._content = table.AuthorizedView.to_json(table.AuthorizedView()) - request = bigtable_table_admin.UpdateTableRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = table.AuthorizedView() - client.update_table( + client.get_authorized_view( request, metadata=[ ("key", "val"), @@ -9143,8 +16766,8 @@ def test_update_table_rest_interceptors(null_interceptor): post.assert_called_once() -def test_update_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UpdateTableRequest +def test_get_authorized_view_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.GetAuthorizedViewRequest ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9153,7 +16776,7 @@ def test_update_table_rest_bad_request( # send a request that will satisfy transcoding request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" } request = request_type(**request_init) @@ -9166,10 +16789,10 @@ def test_update_table_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.update_table(request) + client.get_authorized_view(request) -def test_update_table_rest_flattened(): +def test_get_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9178,41 +16801,42 @@ def test_update_table_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = table.AuthorizedView() # get arguments that satisfy an http rule for this method sample_request = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" } # get truthy value for each flattened field mock_args = dict( - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + name="name_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.update_table(**mock_args) + client.get_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table.name=projects/*/instances/*/tables/*}" + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_update_table_rest_flattened_error(transport: str = "rest"): +def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9221,14 +16845,13 @@ def test_update_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), + name="name_value", ) -def test_update_table_rest_error(): +def test_get_authorized_view_rest_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -9237,73 +16860,199 @@ def test_update_table_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteTableRequest, + bigtable_table_admin.UpdateAuthorizedViewRequest, dict, ], ) -def test_delete_table_rest(request_type): +def test_update_authorized_view_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } + request_init["authorized_view"] = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, + }, + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] + else: + del request_init["authorized_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_table(request) + response = client.update_authorized_view(request) # Establish that the response is the type that we expect. - assert response is None + assert response.operation.name == "operations/spam" -def test_delete_table_rest_required_fields( - request_type=bigtable_table_admin.DeleteTableRequest, +def test_update_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_authorized_view + ] = mock_rpc + + request = {} + client.update_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) + ).update_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" - unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) + ).update_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9312,7 +17061,7 @@ def test_delete_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -9324,36 +17073,45 @@ def test_delete_table_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "patch", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_table(request) + response = client.update_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_table_rest_unset_required_fields(): +def test_update_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - + unset_fields = transport.update_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set(("authorizedView",)) + ) + @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_table_rest_interceptors(null_interceptor): +def test_update_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9366,11 +17124,16 @@ def test_delete_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_table" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteTableRequest.pb( - bigtable_table_admin.DeleteTableRequest() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( + bigtable_table_admin.UpdateAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -9382,15 +17145,19 @@ def test_delete_table_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) - request = bigtable_table_admin.DeleteTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() - client.delete_table( + client.update_authorized_view( request, metadata=[ ("key", "val"), @@ -9399,10 +17166,12 @@ def test_delete_table_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() -def test_delete_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DeleteTableRequest +def test_update_authorized_view_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9410,7 +17179,11 @@ def test_delete_table_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9422,10 +17195,10 @@ def test_delete_table_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.delete_table(request) + client.update_authorized_view(request) -def test_delete_table_rest_flattened(): +def test_update_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9434,37 +17207,43 @@ def test_delete_table_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } # get truthy value for each flattened field mock_args = dict( - name="name_value", + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.delete_table(**mock_args) + client.update_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}" + % client.transport._host, args[1], ) -def test_delete_table_rest_flattened_error(transport: str = "rest"): +def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9473,13 +17252,14 @@ def test_delete_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name="name_value", + client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_delete_table_rest_error(): +def test_update_authorized_view_rest_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -9488,40 +17268,83 @@ def test_delete_table_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UndeleteTableRequest, + bigtable_table_admin.DeleteAuthorizedViewRequest, dict, ], ) -def test_undelete_table_rest(request_type): +def test_delete_authorized_view_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.undelete_table(request) + response = client.delete_authorized_view(request) # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + assert response is None -def test_undelete_table_rest_required_fields( - request_type=bigtable_table_admin.UndeleteTableRequest, +def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_authorized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_authorized_view + ] = mock_rpc + + request = {} + client.delete_authorized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_authorized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -9530,17 +17353,14 @@ def test_undelete_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) + ).delete_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -9549,7 +17369,9 @@ def test_undelete_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) + ).delete_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -9563,7 +17385,7 @@ def test_undelete_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -9575,37 +17397,36 @@ def test_undelete_table_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.undelete_table(request) + response = client.delete_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_undelete_table_rest_unset_required_fields(): +def test_delete_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.undelete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.delete_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undelete_table_rest_interceptors(null_interceptor): +def test_delete_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -9618,16 +17439,11 @@ def test_undelete_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_undelete_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" + transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UndeleteTableRequest.pb( - bigtable_table_admin.UndeleteTableRequest() + pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb( + bigtable_table_admin.DeleteAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -9639,19 +17455,15 @@ def test_undelete_table_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - request = bigtable_table_admin.UndeleteTableRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - client.undelete_table( + client.delete_authorized_view( request, metadata=[ ("key", "val"), @@ -9660,11 +17472,11 @@ def test_undelete_table_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_undelete_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UndeleteTableRequest +def test_delete_authorized_view_rest_bad_request( + transport: str = "rest", + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9672,7 +17484,9 @@ def test_undelete_table_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9684,10 +17498,10 @@ def test_undelete_table_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.undelete_table(request) + client.delete_authorized_view(request) -def test_undelete_table_rest_flattened(): +def test_delete_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -9696,10 +17510,12 @@ def test_undelete_table_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } # get truthy value for each flattened field mock_args = dict( @@ -9710,24 +17526,24 @@ def test_undelete_table_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.undelete_table(**mock_args) + client.delete_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_undelete_table_rest_flattened_error(transport: str = "rest"): +def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9736,13 +17552,13 @@ def test_undelete_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), + client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), name="name_value", ) -def test_undelete_table_rest_error(): +def test_delete_authorized_view_rest_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -9792,6 +17608,47 @@ def test_modify_column_families_rest(request_type): assert response.deletion_protection is True +def test_modify_column_families_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.modify_column_families + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.modify_column_families + ] = mock_rpc + + request = {} + client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.modify_column_families(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_modify_column_families_rest_required_fields( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): @@ -10075,6 +17932,42 @@ def test_drop_row_range_rest(request_type): assert response is None +def test_drop_row_range_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.drop_row_range in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc + + request = {} + client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.drop_row_range(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_drop_row_range_rest_required_fields( request_type=bigtable_table_admin.DropRowRangeRequest, ): @@ -10280,6 +18173,47 @@ def test_generate_consistency_token_rest(request_type): assert response.consistency_token == "consistency_token_value" +def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.generate_consistency_token + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_consistency_token + ] = mock_rpc + + request = {} + client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.generate_consistency_token(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_generate_consistency_token_rest_required_fields( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): @@ -10558,6 +18492,44 @@ def test_check_consistency_rest(request_type): assert response.consistent is True +def test_check_consistency_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.check_consistency in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_consistency + ] = mock_rpc + + request = {} + client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.check_consistency(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_check_consistency_rest_required_fields( request_type=bigtable_table_admin.CheckConsistencyRequest, ): @@ -10842,6 +18814,46 @@ def test_snapshot_table_rest(request_type): assert response.operation.name == "operations/spam" +def test_snapshot_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.snapshot_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc + + request = {} + client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.snapshot_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_snapshot_table_rest_required_fields( request_type=bigtable_table_admin.SnapshotTableRequest, ): @@ -11141,6 +19153,42 @@ def test_get_snapshot_rest(request_type): assert response.description == "description_value" +def test_get_snapshot_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_snapshot in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc + + request = {} + client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_get_snapshot_rest_required_fields( request_type=bigtable_table_admin.GetSnapshotRequest, ): @@ -11404,13 +19452,49 @@ def test_list_snapshots_rest(request_type): return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_snapshots(request) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_snapshots(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_snapshots_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_snapshots in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc + + request = {} + client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_snapshots(request) - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 def test_list_snapshots_rest_required_fields( @@ -11756,6 +19840,42 @@ def test_delete_snapshot_rest(request_type): assert response is None +def test_delete_snapshot_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_snapshot in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc + + request = {} + client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_snapshot(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_delete_snapshot_rest_required_fields( request_type=bigtable_table_admin.DeleteSnapshotRequest, ): @@ -12103,6 +20223,46 @@ def get_message_fields(field): assert response.operation.name == "operations/spam" +def test_create_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.create_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc + + request = {} + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_create_backup_rest_required_fields( request_type=bigtable_table_admin.CreateBackupRequest, ): @@ -12411,6 +20571,42 @@ def test_get_backup_rest(request_type): assert response.state == table.Backup.State.CREATING +def test_get_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc + + request = {} + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_get_backup_rest_required_fields( request_type=bigtable_table_admin.GetBackupRequest, ): @@ -12786,6 +20982,42 @@ def get_message_fields(field): assert response.state == table.Backup.State.CREATING +def test_update_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc + + request = {} + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.update_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_update_backup_rest_required_fields( request_type=bigtable_table_admin.UpdateBackupRequest, ): @@ -13067,6 +21299,42 @@ def test_delete_backup_rest(request_type): assert response is None +def test_delete_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.delete_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc + + request = {} + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_delete_backup_rest_required_fields( request_type=bigtable_table_admin.DeleteBackupRequest, ): @@ -13328,6 +21596,42 @@ def test_list_backups_rest(request_type): assert response.next_page_token == "next_page_token_value" +def test_list_backups_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_backups in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc + + request = {} + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_backups(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_list_backups_rest_required_fields( request_type=bigtable_table_admin.ListBackupsRequest, ): @@ -13673,6 +21977,46 @@ def test_restore_table_rest(request_type): assert response.operation.name == "operations/spam" +def test_restore_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.restore_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc + + request = {} + client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.restore_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_restore_table_rest_required_fields( request_type=bigtable_table_admin.RestoreTableRequest, ): @@ -13893,6 +22237,46 @@ def test_copy_backup_rest(request_type): assert response.operation.name == "operations/spam" +def test_copy_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.copy_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc + + request = {} + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.copy_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_copy_backup_rest_required_fields( request_type=bigtable_table_admin.CopyBackupRequest, ): @@ -14187,6 +22571,42 @@ def test_get_iam_policy_rest(request_type): assert response.etag == b"etag_blob" +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_get_iam_policy_rest_required_fields( request_type=iam_policy_pb2.GetIamPolicyRequest, ): @@ -14452,6 +22872,42 @@ def test_set_iam_policy_rest(request_type): assert response.etag == b"etag_blob" +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_set_iam_policy_rest_required_fields( request_type=iam_policy_pb2.SetIamPolicyRequest, ): @@ -14723,6 +23179,46 @@ def test_test_iam_permissions_rest(request_type): assert response.permissions == ["permissions_value"] +def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_test_iam_permissions_rest_required_fields( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): @@ -15110,6 +23606,11 @@ def test_bigtable_table_admin_base_transport(): "update_table", "delete_table", "undelete_table", + "create_authorized_view", + "list_authorized_views", + "get_authorized_view", + "update_authorized_view", + "delete_authorized_view", "modify_column_families", "drop_row_range", "generate_consistency_token", @@ -15459,6 +23960,21 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name) session1 = client1.transport.undelete_table._session session2 = client2.transport.undelete_table._session assert session1 != session2 + session1 = client1.transport.create_authorized_view._session + session2 = client2.transport.create_authorized_view._session + assert session1 != session2 + session1 = client1.transport.list_authorized_views._session + session2 = client2.transport.list_authorized_views._session + assert session1 != session2 + session1 = client1.transport.get_authorized_view._session + session2 = client2.transport.get_authorized_view._session + assert session1 != session2 + session1 = client1.transport.update_authorized_view._session + session2 = client2.transport.update_authorized_view._session + assert session1 != session2 + session1 = client1.transport.delete_authorized_view._session + session2 = client2.transport.delete_authorized_view._session + assert session1 != session2 session1 = client1.transport.modify_column_families._session session2 = client2.transport.modify_column_families._session assert session1 != session2 @@ -15675,11 +24191,42 @@ def test_bigtable_table_admin_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_backup_path(): +def test_authorized_view_path(): project = "squid" instance = "clam" - cluster = "whelk" - backup = "octopus" + table = "whelk" + authorized_view = "octopus" + expected = "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( + project=project, + instance=instance, + table=table, + authorized_view=authorized_view, + ) + actual = BigtableTableAdminClient.authorized_view_path( + project, instance, table, authorized_view + ) + assert expected == actual + + +def test_parse_authorized_view_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "table": "cuttlefish", + "authorized_view": "mussel", + } + path = BigtableTableAdminClient.authorized_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_authorized_view_path(path) + assert expected == actual + + +def test_backup_path(): + project = "winkle" + instance = "nautilus" + cluster = "scallop" + backup = "abalone" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format( project=project, instance=instance, @@ -15692,10 +24239,10 @@ def test_backup_path(): def test_parse_backup_path(): expected = { - "project": "oyster", - "instance": "nudibranch", - "cluster": "cuttlefish", - "backup": "mussel", + "project": "squid", + "instance": "clam", + "cluster": "whelk", + "backup": "octopus", } path = BigtableTableAdminClient.backup_path(**expected) @@ -15705,9 +24252,9 @@ def test_parse_backup_path(): def test_cluster_path(): - project = "winkle" - instance = "nautilus" - cluster = "scallop" + project = "oyster" + instance = "nudibranch" + cluster = "cuttlefish" expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format( project=project, instance=instance, @@ -15719,9 +24266,9 @@ def test_cluster_path(): def test_parse_cluster_path(): expected = { - "project": "abalone", - "instance": "squid", - "cluster": "clam", + "project": "mussel", + "instance": "winkle", + "cluster": "nautilus", } path = BigtableTableAdminClient.cluster_path(**expected) @@ -15731,11 +24278,11 @@ def test_parse_cluster_path(): def test_crypto_key_version_path(): - project = "whelk" - location = "octopus" - key_ring = "oyster" - crypto_key = "nudibranch" - crypto_key_version = "cuttlefish" + project = "scallop" + location = "abalone" + key_ring = "squid" + crypto_key = "clam" + crypto_key_version = "whelk" expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format( project=project, location=location, @@ -15751,11 +24298,11 @@ def test_crypto_key_version_path(): def test_parse_crypto_key_version_path(): expected = { - "project": "mussel", - "location": "winkle", - "key_ring": "nautilus", - "crypto_key": "scallop", - "crypto_key_version": "abalone", + "project": "octopus", + "location": "oyster", + "key_ring": "nudibranch", + "crypto_key": "cuttlefish", + "crypto_key_version": "mussel", } path = BigtableTableAdminClient.crypto_key_version_path(**expected) @@ -15765,8 +24312,8 @@ def test_parse_crypto_key_version_path(): def test_instance_path(): - project = "squid" - instance = "clam" + project = "winkle" + instance = "nautilus" expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, @@ -15777,8 +24324,8 @@ def test_instance_path(): def test_parse_instance_path(): expected = { - "project": "whelk", - "instance": "octopus", + "project": "scallop", + "instance": "abalone", } path = BigtableTableAdminClient.instance_path(**expected) @@ -15788,10 +24335,10 @@ def test_parse_instance_path(): def test_snapshot_path(): - project = "oyster" - instance = "nudibranch" - cluster = "cuttlefish" - snapshot = "mussel" + project = "squid" + instance = "clam" + cluster = "whelk" + snapshot = "octopus" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( project=project, instance=instance, @@ -15806,10 +24353,10 @@ def test_snapshot_path(): def test_parse_snapshot_path(): expected = { - "project": "winkle", - "instance": "nautilus", - "cluster": "scallop", - "snapshot": "abalone", + "project": "oyster", + "instance": "nudibranch", + "cluster": "cuttlefish", + "snapshot": "mussel", } path = BigtableTableAdminClient.snapshot_path(**expected) @@ -15819,9 +24366,9 @@ def test_parse_snapshot_path(): def test_table_path(): - project = "squid" - instance = "clam" - table = "whelk" + project = "winkle" + instance = "nautilus" + table = "scallop" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, @@ -15833,9 +24380,9 @@ def test_table_path(): def test_parse_table_path(): expected = { - "project": "octopus", - "instance": "oyster", - "table": "nudibranch", + "project": "abalone", + "instance": "squid", + "table": "clam", } path = BigtableTableAdminClient.table_path(**expected) @@ -15845,7 +24392,7 @@ def test_parse_table_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "whelk" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -15855,7 +24402,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "octopus", } path = BigtableTableAdminClient.common_billing_account_path(**expected) @@ -15865,7 +24412,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "oyster" expected = "folders/{folder}".format( folder=folder, ) @@ -15875,7 +24422,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "nudibranch", } path = BigtableTableAdminClient.common_folder_path(**expected) @@ -15885,7 +24432,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "cuttlefish" expected = "organizations/{organization}".format( organization=organization, ) @@ -15895,7 +24442,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "mussel", } path = BigtableTableAdminClient.common_organization_path(**expected) @@ -15905,7 +24452,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "winkle" expected = "projects/{project}".format( project=project, ) @@ -15915,7 +24462,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nautilus", } path = BigtableTableAdminClient.common_project_path(**expected) @@ -15925,8 +24472,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "scallop" + location = "abalone" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -15937,8 +24484,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "squid", + "location": "clam", } path = BigtableTableAdminClient.common_location_path(**expected) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py index 89a37dc92c5a..8f6cf068242c 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 105f9e49ef1c..5a62b3dfaa80 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1099,7 +1099,8 @@ def test_read_rows(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() + request = bigtable.ReadRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: @@ -1116,12 +1117,148 @@ def test_read_rows_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.read_rows() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.ReadRowsRequest() +def test_read_rows_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ReadRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.read_rows(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_rows_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.read_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.read_rows] = mock_rpc + request = {} + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_rows_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + response = await client.read_rows() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadRowsRequest() + + +@pytest.mark.asyncio +async def test_read_rows_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.read_rows + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.read_rows + ] = mock_object + + request = {} + await client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.read_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_read_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest @@ -1147,7 +1284,8 @@ async def test_read_rows_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() + request = bigtable.ReadRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -1200,6 +1338,27 @@ def test_read_rows_routing_parameters(): _, _, kw = call.mock_calls[0] # This test doesn't assert anything useful. assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_read_rows_flattened(): @@ -1318,7 +1477,8 @@ def test_sample_row_keys(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() + request = bigtable.SampleRowKeysRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: @@ -1335,12 +1495,150 @@ def test_sample_row_keys_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.sample_row_keys() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.SampleRowKeysRequest() +def test_sample_row_keys_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.SampleRowKeysRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.sample_row_keys(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.SampleRowKeysRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_sample_row_keys_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.sample_row_keys in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.sample_row_keys] = mock_rpc + request = {} + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.sample_row_keys(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_sample_row_keys_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + response = await client.sample_row_keys() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.SampleRowKeysRequest() + + +@pytest.mark.asyncio +async def test_sample_row_keys_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.sample_row_keys + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.sample_row_keys + ] = mock_object + + request = {} + await client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.sample_row_keys(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_sample_row_keys_async( transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest @@ -1366,7 +1664,8 @@ async def test_sample_row_keys_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() + request = bigtable.SampleRowKeysRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -1419,6 +1718,27 @@ def test_sample_row_keys_routing_parameters(): _, _, kw = call.mock_calls[0] # This test doesn't assert anything useful. assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.SampleRowKeysRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_sample_row_keys_flattened(): @@ -1537,7 +1857,8 @@ def test_mutate_row(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() + request = bigtable.MutateRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.MutateRowResponse) @@ -1553,12 +1874,147 @@ def test_mutate_row_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.mutate_row() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.MutateRowRequest() +def test_mutate_row_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.MutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.mutate_row(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_row_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_row in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_row] = mock_rpc + request = {} + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_mutate_row_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + response = await client.mutate_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowRequest() + + +@pytest.mark.asyncio +async def test_mutate_row_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.mutate_row + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.mutate_row + ] = mock_object + + request = {} + await client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest @@ -1583,7 +2039,8 @@ async def test_mutate_row_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() + request = bigtable.MutateRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.MutateRowResponse) @@ -1635,6 +2092,27 @@ def test_mutate_row_routing_parameters(): _, _, kw = call.mock_calls[0] # This test doesn't assert anything useful. assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_mutate_row_flattened(): @@ -1799,7 +2277,8 @@ def test_mutate_rows(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() + request = bigtable.MutateRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: @@ -1816,12 +2295,150 @@ def test_mutate_rows_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.mutate_rows() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.MutateRowsRequest() +def test_mutate_rows_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.MutateRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.mutate_rows(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowsRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_mutate_rows_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_rows] = mock_rpc + request = {} + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_mutate_rows_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + response = await client.mutate_rows() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowsRequest() + + +@pytest.mark.asyncio +async def test_mutate_rows_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.mutate_rows + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.mutate_rows + ] = mock_object + + request = {} + await client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.mutate_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_mutate_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest @@ -1847,7 +2464,8 @@ async def test_mutate_rows_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() + request = bigtable.MutateRowsRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -1900,6 +2518,27 @@ def test_mutate_rows_routing_parameters(): _, _, kw = call.mock_calls[0] # This test doesn't assert anything useful. assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_mutate_rows_flattened(): @@ -2015,48 +2654,196 @@ def test_check_and_mutate_row(request_type, transport: str = "grpc"): transport=transport, ) - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + response = client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable.CheckAndMutateRowRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + assert response.predicate_matched is True + + +def test_check_and_mutate_row_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.check_and_mutate_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.CheckAndMutateRowRequest() + + +def test_check_and_mutate_row_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.CheckAndMutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.check_and_mutate_row(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.CheckAndMutateRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_check_and_mutate_row_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.CheckAndMutateRowResponse( - predicate_matched=True, + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.check_and_mutate_row in client._transport._wrapped_methods ) - response = client.check_and_mutate_row(request) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_and_mutate_row + ] = mock_rpc + request = {} + client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() + assert mock_rpc.call_count == 1 - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True + client.check_and_mutate_row(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_check_and_mutate_row_empty_call(): + +@pytest.mark.asyncio +async def test_check_and_mutate_row_empty_call_async(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( + client = BigtableAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="grpc_asyncio", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.check_and_mutate_row), "__call__" ) as call: - client.check_and_mutate_row() + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + response = await client.check_and_mutate_row() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.CheckAndMutateRowRequest() +@pytest.mark.asyncio +async def test_check_and_mutate_row_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.check_and_mutate_row + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.check_and_mutate_row + ] = mock_object + + request = {} + await client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.check_and_mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_check_and_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest @@ -2085,7 +2872,8 @@ async def test_check_and_mutate_row_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() + request = bigtable.CheckAndMutateRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.CheckAndMutateRowResponse) @@ -2142,6 +2930,29 @@ def test_check_and_mutate_row_routing_parameters(): _, _, kw = call.mock_calls[0] # This test doesn't assert anything useful. assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.CheckAndMutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] def test_check_and_mutate_row_flattened(): @@ -2410,7 +3221,8 @@ def test_ping_and_warm(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() + request = bigtable.PingAndWarmRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.PingAndWarmResponse) @@ -2426,12 +3238,147 @@ def test_ping_and_warm_empty_call(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.ping_and_warm() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.PingAndWarmRequest() +def test_ping_and_warm_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.PingAndWarmRequest( + name="name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.ping_and_warm(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest( + name="name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_ping_and_warm_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.ping_and_warm in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.ping_and_warm] = mock_rpc + request = {} + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.ping_and_warm(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_ping_and_warm_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + response = await client.ping_and_warm() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest() + + +@pytest.mark.asyncio +async def test_ping_and_warm_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.ping_and_warm + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.ping_and_warm + ] = mock_object + + request = {} + await client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.ping_and_warm(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_ping_and_warm_async( transport: str = "grpc_asyncio", request_type=bigtable.PingAndWarmRequest @@ -2456,7 +3403,8 @@ async def test_ping_and_warm_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() + request = bigtable.PingAndWarmRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.PingAndWarmResponse) @@ -2630,7 +3578,8 @@ def test_read_modify_write_row(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() + request = bigtable.ReadModifyWriteRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.ReadModifyWriteRowResponse) @@ -2648,12 +3597,158 @@ def test_read_modify_write_row_empty_call(): with mock.patch.object( type(client.transport.read_modify_write_row), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.read_modify_write_row() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.ReadModifyWriteRowRequest() +def test_read_modify_write_row_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ReadModifyWriteRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.read_modify_write_row(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadModifyWriteRowRequest( + table_name="table_name_value", + authorized_view_name="authorized_view_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_modify_write_row_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_modify_write_row + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_modify_write_row + ] = mock_rpc + request = {} + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_modify_write_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_modify_write_row_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + response = await client.read_modify_write_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + +@pytest.mark.asyncio +async def test_read_modify_write_row_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.read_modify_write_row + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.read_modify_write_row + ] = mock_object + + request = {} + await client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.read_modify_write_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_read_modify_write_row_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest @@ -2680,7 +3775,8 @@ async def test_read_modify_write_row_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() + request = bigtable.ReadModifyWriteRowRequest() + assert args[0] == request # Establish that the response is the type that we expect. assert isinstance(response, bigtable.ReadModifyWriteRowResponse) @@ -2698,9 +3794,28 @@ def test_read_modify_write_row_routing_parameters(): # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) + request = bigtable.ReadModifyWriteRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadModifyWriteRowRequest(**{"app_profile_id": "sample1"}) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2719,7 +3834,11 @@ def test_read_modify_write_row_routing_parameters(): assert kw["metadata"] # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest(**{"app_profile_id": "sample1"}) + request = bigtable.ReadModifyWriteRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2886,7 +4005,8 @@ def test_generate_initial_change_stream_partitions( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: @@ -2907,12 +4027,157 @@ def test_generate_initial_change_stream_partitions_empty_call(): with mock.patch.object( type(client.transport.generate_initial_change_stream_partitions), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.generate_initial_change_stream_partitions() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() +def test_generate_initial_change_stream_partitions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.GenerateInitialChangeStreamPartitionsRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.generate_initial_change_stream_partitions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_generate_initial_change_stream_partitions_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.generate_initial_change_stream_partitions + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_initial_change_stream_partitions + ] = mock_rpc + request = {} + client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.generate_initial_change_stream_partitions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + response = await client.generate_initial_change_stream_partitions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.generate_initial_change_stream_partitions + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.generate_initial_change_stream_partitions + ] = mock_object + + request = {} + await client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.generate_initial_change_stream_partitions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_async( transport: str = "grpc_asyncio", @@ -2941,7 +4206,8 @@ async def test_generate_initial_change_stream_partitions_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -3147,7 +4413,8 @@ def test_read_change_stream(request_type, transport: str = "grpc"): # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() + request = bigtable.ReadChangeStreamRequest() + assert args[0] == request # Establish that the response is the type that we expect. for message in response: @@ -3166,12 +4433,156 @@ def test_read_change_stream_empty_call(): with mock.patch.object( type(client.transport.read_change_stream), "__call__" ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) client.read_change_stream() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable.ReadChangeStreamRequest() +def test_read_change_stream_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ReadChangeStreamRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.read_change_stream(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_change_stream_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_change_stream in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_change_stream + ] = mock_rpc + request = {} + client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_change_stream(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_read_change_stream_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadChangeStreamResponse()] + ) + response = await client.read_change_stream() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest() + + +@pytest.mark.asyncio +async def test_read_change_stream_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.read_change_stream + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + class AwaitableMock(mock.AsyncMock): + def __await__(self): + self.await_count += 1 + return iter([]) + + mock_object = AwaitableMock() + client._client._transport._wrapped_methods[ + client._client._transport.read_change_stream + ] = mock_object + + request = {} + await client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + + await client.read_change_stream(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + @pytest.mark.asyncio async def test_read_change_stream_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadChangeStreamRequest @@ -3199,7 +4610,8 @@ async def test_read_change_stream_async( # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() + request = bigtable.ReadChangeStreamRequest() + assert args[0] == request # Establish that the response is the type that we expect. message = await response.read() @@ -3418,92 +4830,40 @@ def test_read_rows_rest(request_type): assert response.last_scanned_row_key == b"last_scanned_row_key_blob" -def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, +def test_read_rows_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = "table_name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).read_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" - - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Ensure method has been cached + assert client._transport.read_rows in client._transport._wrapped_methods - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_rows(request) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.read_rows] = mock_rpc - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + request = {} + client.read_rows(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -def test_read_rows_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + client.read_rows(request) - unset_fields = transport.read_rows._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName",))) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -3703,95 +5063,40 @@ def test_sample_row_keys_rest(request_type): assert response.offset_bytes == 1293 -def test_sample_row_keys_rest_required_fields( - request_type=bigtable.SampleRowKeysRequest, -): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, +def test_sample_row_keys_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).sample_row_keys._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = "table_name_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).sample_row_keys._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("app_profile_id",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" - - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Ensure method has been cached + assert client._transport.sample_row_keys in client._transport._wrapped_methods - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.sample_row_keys(request) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.sample_row_keys] = mock_rpc - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + request = {} + client.sample_row_keys(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -def test_sample_row_keys_rest_unset_required_fields(): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + client.sample_row_keys(request) - unset_fields = transport.sample_row_keys._get_unset_required_fields({}) - assert set(unset_fields) == (set(("appProfileId",)) & set(("tableName",))) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -3979,11 +5284,46 @@ def test_mutate_row_rest(request_type): assert isinstance(response, bigtable.MutateRowResponse) +def test_mutate_row_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_row in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_row] = mock_rpc + + request = {} + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): transport_class = transports.BigtableRestTransport request_init = {} - request_init["table_name"] = "" request_init["row_key"] = b"" request = request_type(**request_init) pb_request = request_type.pb(request) @@ -4003,7 +5343,6 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) # verify required fields with default values are now present - jsonified_request["tableName"] = "table_name_value" jsonified_request["rowKey"] = b"row_key_blob" unset_fields = transport_class( @@ -4012,8 +5351,6 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" assert "rowKey" in jsonified_request assert jsonified_request["rowKey"] == b"row_key_blob" @@ -4069,7 +5406,6 @@ def test_mutate_row_rest_unset_required_fields(): set(()) & set( ( - "tableName", "rowKey", "mutations", ) @@ -4277,11 +5613,46 @@ def test_mutate_rows_rest(request_type): assert isinstance(response, bigtable.MutateRowsResponse) +def test_mutate_rows_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_rows] = mock_rpc + + request = {} + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): transport_class = transports.BigtableRestTransport request_init = {} - request_init["table_name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -4300,16 +5671,12 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques # verify required fields with default values are now present - jsonified_request["tableName"] = "table_name_value" - unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() ).mutate_rows._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -4362,15 +5729,7 @@ def test_mutate_rows_rest_unset_required_fields(): ) unset_fields = transport.mutate_rows._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "tableName", - "entries", - ) - ) - ) + assert set(unset_fields) == (set(()) & set(("entries",))) @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -4563,13 +5922,52 @@ def test_check_and_mutate_row_rest(request_type): assert response.predicate_matched is True +def test_check_and_mutate_row_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.check_and_mutate_row in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.check_and_mutate_row + ] = mock_rpc + + request = {} + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.check_and_mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_check_and_mutate_row_rest_required_fields( request_type=bigtable.CheckAndMutateRowRequest, ): transport_class = transports.BigtableRestTransport request_init = {} - request_init["table_name"] = "" request_init["row_key"] = b"" request = request_type(**request_init) pb_request = request_type.pb(request) @@ -4589,7 +5987,6 @@ def test_check_and_mutate_row_rest_required_fields( # verify required fields with default values are now present - jsonified_request["tableName"] = "table_name_value" jsonified_request["rowKey"] = b"row_key_blob" unset_fields = transport_class( @@ -4598,8 +5995,6 @@ def test_check_and_mutate_row_rest_required_fields( jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" assert "rowKey" in jsonified_request assert jsonified_request["rowKey"] == b"row_key_blob" @@ -4651,15 +6046,7 @@ def test_check_and_mutate_row_rest_unset_required_fields(): ) unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "tableName", - "rowKey", - ) - ) - ) + assert set(unset_fields) == (set(()) & set(("rowKey",))) @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -4889,6 +6276,42 @@ def test_ping_and_warm_rest(request_type): assert isinstance(response, bigtable.PingAndWarmResponse) +def test_ping_and_warm_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.ping_and_warm in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.ping_and_warm] = mock_rpc + + request = {} + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.ping_and_warm(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): transport_class = transports.BigtableRestTransport @@ -5151,13 +6574,53 @@ def test_read_modify_write_row_rest(request_type): assert isinstance(response, bigtable.ReadModifyWriteRowResponse) +def test_read_modify_write_row_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_modify_write_row + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_modify_write_row + ] = mock_rpc + + request = {} + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_modify_write_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_read_modify_write_row_rest_required_fields( request_type=bigtable.ReadModifyWriteRowRequest, ): transport_class = transports.BigtableRestTransport request_init = {} - request_init["table_name"] = "" request_init["row_key"] = b"" request = request_type(**request_init) pb_request = request_type.pb(request) @@ -5177,7 +6640,6 @@ def test_read_modify_write_row_rest_required_fields( # verify required fields with default values are now present - jsonified_request["tableName"] = "table_name_value" jsonified_request["rowKey"] = b"row_key_blob" unset_fields = transport_class( @@ -5186,8 +6648,6 @@ def test_read_modify_write_row_rest_required_fields( jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" assert "rowKey" in jsonified_request assert jsonified_request["rowKey"] == b"row_key_blob" @@ -5243,7 +6703,6 @@ def test_read_modify_write_row_rest_unset_required_fields(): set(()) & set( ( - "tableName", "rowKey", "rules", ) @@ -5447,6 +6906,47 @@ def test_generate_initial_change_stream_partitions_rest(request_type): assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) +def test_generate_initial_change_stream_partitions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.generate_initial_change_stream_partitions + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.generate_initial_change_stream_partitions + ] = mock_rpc + + request = {} + client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.generate_initial_change_stream_partitions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_generate_initial_change_stream_partitions_rest_required_fields( request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, ): @@ -5750,6 +7250,46 @@ def test_read_change_stream_rest(request_type): assert isinstance(response, bigtable.ReadChangeStreamResponse) +def test_read_change_stream_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.read_change_stream in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.read_change_stream + ] = mock_rpc + + request = {} + client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_change_stream(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + def test_read_change_stream_rest_required_fields( request_type=bigtable.ReadChangeStreamRequest, ): @@ -6568,9 +8108,40 @@ def test_bigtable_transport_channel_mtls_with_adc(transport_class): assert transport.grpc_channel == mock_grpc_channel -def test_instance_path(): +def test_authorized_view_path(): project = "squid" instance = "clam" + table = "whelk" + authorized_view = "octopus" + expected = "projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}".format( + project=project, + instance=instance, + table=table, + authorized_view=authorized_view, + ) + actual = BigtableClient.authorized_view_path( + project, instance, table, authorized_view + ) + assert expected == actual + + +def test_parse_authorized_view_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "table": "cuttlefish", + "authorized_view": "mussel", + } + path = BigtableClient.authorized_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_authorized_view_path(path) + assert expected == actual + + +def test_instance_path(): + project = "winkle" + instance = "nautilus" expected = "projects/{project}/instances/{instance}".format( project=project, instance=instance, @@ -6581,8 +8152,8 @@ def test_instance_path(): def test_parse_instance_path(): expected = { - "project": "whelk", - "instance": "octopus", + "project": "scallop", + "instance": "abalone", } path = BigtableClient.instance_path(**expected) @@ -6592,9 +8163,9 @@ def test_parse_instance_path(): def test_table_path(): - project = "oyster" - instance = "nudibranch" - table = "cuttlefish" + project = "squid" + instance = "clam" + table = "whelk" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, @@ -6606,9 +8177,9 @@ def test_table_path(): def test_parse_table_path(): expected = { - "project": "mussel", - "instance": "winkle", - "table": "nautilus", + "project": "octopus", + "instance": "oyster", + "table": "nudibranch", } path = BigtableClient.table_path(**expected) @@ -6618,7 +8189,7 @@ def test_parse_table_path(): def test_common_billing_account_path(): - billing_account = "scallop" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6628,7 +8199,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "abalone", + "billing_account": "mussel", } path = BigtableClient.common_billing_account_path(**expected) @@ -6638,7 +8209,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "squid" + folder = "winkle" expected = "folders/{folder}".format( folder=folder, ) @@ -6648,7 +8219,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "clam", + "folder": "nautilus", } path = BigtableClient.common_folder_path(**expected) @@ -6658,7 +8229,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "whelk" + organization = "scallop" expected = "organizations/{organization}".format( organization=organization, ) @@ -6668,7 +8239,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "octopus", + "organization": "abalone", } path = BigtableClient.common_organization_path(**expected) @@ -6678,7 +8249,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "oyster" + project = "squid" expected = "projects/{project}".format( project=project, ) @@ -6688,7 +8259,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nudibranch", + "project": "clam", } path = BigtableClient.common_project_path(**expected) @@ -6698,8 +8269,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "cuttlefish" - location = "mussel" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -6710,8 +8281,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "winkle", - "location": "nautilus", + "project": "oyster", + "location": "nudibranch", } path = BigtableClient.common_location_path(**expected) diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py index b6eb6ac96162..4338f8553014 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py @@ -173,10 +173,13 @@ def test_client_constructor_w_emulator_host(): from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + import grpc emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.insecure_channel") as factory: + channel = grpc.insecure_channel("no-host") + with mock.patch("grpc.insecure_channel", return_value=channel) as factory: + factory.return_value = channel client = _make_client() # don't test local_composite_credentials # client._local_composite_credentials = lambda: credentials @@ -195,10 +198,12 @@ def test_client_constructor_w_emulator_host(): def test_client_constructor_w_emulator_host_w_project(): from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + import grpc emulator_host = "localhost:8081" with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.insecure_channel") as factory: + channel = grpc.insecure_channel("no-host") + with mock.patch("grpc.insecure_channel", return_value=channel) as factory: client = _make_client(project=PROJECT) # channels are formed when needed, so access a client # create a gapic channel @@ -216,11 +221,13 @@ def test_client_constructor_w_emulator_host_w_credentials(): from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT from google.cloud.bigtable.client import _GRPC_CHANNEL_OPTIONS + import grpc emulator_host = "localhost:8081" credentials = _make_credentials() with mock.patch("os.environ", {BIGTABLE_EMULATOR: emulator_host}): - with mock.patch("grpc.insecure_channel") as factory: + channel = grpc.insecure_channel("no-host") + with mock.patch("grpc.insecure_channel", return_value=channel) as factory: client = _make_client(credentials=credentials) # channels are formed when needed, so access a client # create a gapic channel From 2ae115e51e32f3249140786b6e2c62bdb863da71 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Wed, 29 May 2024 10:31:26 -0400 Subject: [PATCH 790/892] doc: add samples for filtering using async apis (#961) * doc: add samples for filtering using async apis * format * suffix snippets --- .../snippets/filters/filter_snippets_async.py | 337 +++++++++++++ .../filters/filter_snippets_async_test.py | 463 ++++++++++++++++++ 2 files changed, 800 insertions(+) create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py new file mode 100644 index 000000000000..72dac824dc2e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py @@ -0,0 +1,337 @@ +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +from google.cloud.bigtable.data import Row +from google.cloud._helpers import _datetime_from_microseconds + + +# [START bigtable_filters_limit_row_sample_asyncio] +async def filter_limit_row_sample(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.RowSampleFilter(0.75)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_row_sample_asyncio] +# [START bigtable_filters_limit_row_regex_asyncio] +async def filter_limit_row_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_row_regex_asyncio] +# [START bigtable_filters_limit_cells_per_col_asyncio] +async def filter_limit_cells_per_col(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.CellsColumnLimitFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_col_asyncio] +# [START bigtable_filters_limit_cells_per_row_asyncio] +async def filter_limit_cells_per_row(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.CellsRowLimitFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row_asyncio] +# [START bigtable_filters_limit_cells_per_row_offset_asyncio] +async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.CellsRowOffsetFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row_offset_asyncio] +# [START bigtable_filters_limit_col_family_regex_asyncio] +async def filter_limit_col_family_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_family_regex_asyncio] +# [START bigtable_filters_limit_col_qualifier_regex_asyncio] +async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ColumnQualifierRegexFilter( + "connected_.*$".encode("utf-8") + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_qualifier_regex_asyncio] +# [START bigtable_filters_limit_col_range_asyncio] +async def filter_limit_col_range(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ColumnRangeFilter( + "cell_plan", b"data_plan_01gb", b"data_plan_10gb", inclusive_end=False + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_range_asyncio] +# [START bigtable_filters_limit_value_range_asyncio] +async def filter_limit_value_range(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406") + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_value_range_asyncio] +# [START bigtable_filters_limit_value_regex_asyncio] + + +async def filter_limit_value_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_value_regex_asyncio] +# [START bigtable_filters_limit_timestamp_range_asyncio] +async def filter_limit_timestamp_range(project_id, instance_id, table_id): + import datetime + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + end = datetime.datetime(2019, 5, 1) + + query = ReadRowsQuery(row_filter=row_filters.TimestampRangeFilter(end=end)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_timestamp_range_asyncio] +# [START bigtable_filters_limit_block_all_asyncio] +async def filter_limit_block_all(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.BlockAllFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_block_all_asyncio] +# [START bigtable_filters_limit_pass_all_asyncio] +async def filter_limit_pass_all(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.PassAllFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_pass_all_asyncio] +# [START bigtable_filters_modify_strip_value_asyncio] +async def filter_modify_strip_value(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.StripValueTransformerFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_modify_strip_value_asyncio] +# [START bigtable_filters_modify_apply_label_asyncio] +async def filter_modify_apply_label(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.ApplyLabelFilter(label="labelled")) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_modify_apply_label_asyncio] +# [START bigtable_filters_composing_chain_asyncio] +async def filter_composing_chain(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterChain( + filters=[ + row_filters.CellsColumnLimitFilter(1), + row_filters.FamilyNameRegexFilter("cell_plan"), + ] + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_chain_asyncio] +# [START bigtable_filters_composing_interleave_asyncio] +async def filter_composing_interleave(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterUnion( + filters=[ + row_filters.ValueRegexFilter("true"), + row_filters.ColumnQualifierRegexFilter("os_build"), + ] + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_interleave_asyncio] +# [START bigtable_filters_composing_condition_asyncio] +async def filter_composing_condition(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ConditionalRowFilter( + predicate_filter=row_filters.RowFilterChain( + filters=[ + row_filters.ColumnQualifierRegexFilter("data_plan_10gb"), + row_filters.ValueRegexFilter("true"), + ] + ), + true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), + false_filter=row_filters.ApplyLabelFilter(label="filtered-out"), + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_condition_asyncio] +# [END_EXCLUDE] + + +def print_row(row: Row): + print("Reading data for {}:".format(row.row_key.decode("utf-8"))) + last_family = None + for cell in row.cells: + if last_family != cell.family: + print("Column Family {}".format(cell.family)) + last_family = cell.family + + labels = " [{}]".format(",".join(cell.labels)) if len(cell.labels) else "" + print( + "\t{}: {} @{}{}".format( + cell.qualifier.decode("utf-8"), + cell.value.decode("utf-8"), + _datetime_from_microseconds(cell.timestamp_micros), + labels, + ) + ) + print("") diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py new file mode 100644 index 000000000000..18c93102d17e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py @@ -0,0 +1,463 @@ +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import os +import time + +import inspect +from typing import AsyncGenerator + +import pytest +import pytest_asyncio +from .snapshots.snap_filters_test import snapshots + +from . import filter_snippets_async +from google.cloud._helpers import ( + _microseconds_from_datetime, + _datetime_from_microseconds, +) + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" + + +@pytest_asyncio.fixture +async def table_id() -> AsyncGenerator[str, None]: + table_id = _create_table() + await _populate_table(table_id) + yield table_id + _delete_table(table_id) + + +def _create_table(): + from google.cloud import bigtable + import uuid + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={"stats_summary": None, "cell_plan": None}) + return table_id + + +def _delete_table(table_id: str): + from google.cloud import bigtable + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + table.delete() + + +async def _populate_table(table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + RowMutationEntry, + SetCell, + ) + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) + + async with (BigtableDataClientAsync(project=PROJECT) as client): + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + async with table.mutations_batcher() as batcher: + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.003", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "true", + _microseconds_from_datetime(timestamp_minus_hr), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "false", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.004", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190505", + [ + SetCell( + "stats_summary", + "connected_cell", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190401.002", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + + +def _datetime_to_micros(value: datetime.datetime) -> int: + """Uses the same conversion rules as the old client in""" + if not value.tzinfo: + value = value.replace(tzinfo=datetime.timezone.utc) + # Regardless of what timezone is on the value, convert it to UTC. + value = value.astimezone(datetime.timezone.utc) + # Convert the datetime to a microsecond timestamp. + return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond + return int(dt.timestamp() * 1000 * 1000) + + +@pytest.mark.asyncio +async def test_filter_limit_row_sample(capsys, table_id): + await filter_snippets_async.filter_limit_row_sample( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + assert "Reading data for" in out + + +@pytest.mark.asyncio +async def test_filter_limit_row_regex(capsys, table_id): + await filter_snippets_async.filter_limit_row_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_col(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_col( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_row(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_row( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_row_offset(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_row_offset( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_family_regex(capsys, table_id): + await filter_snippets_async.filter_limit_col_family_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_qualifier_regex(capsys, table_id): + await filter_snippets_async.filter_limit_col_qualifier_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_range(capsys, table_id): + await filter_snippets_async.filter_limit_col_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_value_range(capsys, table_id): + await filter_snippets_async.filter_limit_value_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_value_regex(capsys, table_id): + await filter_snippets_async.filter_limit_value_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_timestamp_range(capsys, table_id): + await filter_snippets_async.filter_limit_timestamp_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_block_all(capsys, table_id): + await filter_snippets_async.filter_limit_block_all( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_pass_all(capsys, table_id): + await filter_snippets_async.filter_limit_pass_all( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_modify_strip_value(capsys, table_id): + await filter_snippets_async.filter_modify_strip_value( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_modify_apply_label(capsys, table_id): + await filter_snippets_async.filter_modify_apply_label( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_chain(capsys, table_id): + await filter_snippets_async.filter_composing_chain( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_interleave(capsys, table_id): + await filter_snippets_async.filter_composing_interleave( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_condition(capsys, table_id): + await filter_snippets_async.filter_composing_condition( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected From 703e597196bef49fb86eaf836c326f5f18ca81c5 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 29 May 2024 11:52:32 -0700 Subject: [PATCH 791/892] chore(docs): improve docstrings in async classes (#964) --- .../bigtable/data/_async/_mutate_rows.py | 29 ++- .../cloud/bigtable/data/_async/_read_rows.py | 33 ++- .../cloud/bigtable/data/_async/client.py | 212 +++++++++--------- .../bigtable/data/_async/mutations_batcher.py | 145 ++++++------ .../google/cloud/bigtable/data/_helpers.py | 41 ++-- .../google/cloud/bigtable/data/exceptions.py | 40 ++-- .../google/cloud/bigtable/data/mutations.py | 153 +++++++++++-- .../bigtable/data/read_modify_write_rules.py | 35 +++ .../cloud/bigtable/data/read_rows_query.py | 134 ++++++++--- .../google/cloud/bigtable/data/row.py | 104 ++++++++- .../tests/unit/data/_async/test_client.py | 1 - 11 files changed, 642 insertions(+), 285 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py index 7d11445532c4..99b9944cd943 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -56,6 +56,14 @@ class _MutateRowsOperationAsync: Errors are exposed as a MutationsExceptionGroup, which contains a list of exceptions organized by the related failed mutation entries. + + Args: + gapic_client: the client to use for the mutate_rows call + table: the table associated with the request + mutation_entries: a list of RowMutationEntry objects to send to the server + operation_timeout: the timeout to use for the entire operation, in seconds. + attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. + If not specified, the request will run until operation_timeout is reached. """ def __init__( @@ -67,15 +75,6 @@ def __init__( attempt_timeout: float | None, retryable_exceptions: Sequence[type[Exception]] = (), ): - """ - Args: - - gapic_client: the client to use for the mutate_rows call - - table: the table associated with the request - - mutation_entries: a list of RowMutationEntry objects to send to the server - - operation_timeout: the timeout to use for the entire operation, in seconds. - - attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. - If not specified, the request will run until operation_timeout is reached. - """ # check that mutations are within limits total_mutations = sum(len(entry.mutations) for entry in mutation_entries) if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT: @@ -121,7 +120,7 @@ async def start(self): Start the operation, and run until completion Raises: - - MutationsExceptionGroup: if any mutations failed + MutationsExceptionGroup: if any mutations failed """ try: # trigger mutate_rows @@ -157,9 +156,9 @@ async def _run_attempt(self): Run a single attempt of the mutate_rows rpc. Raises: - - _MutateRowsIncomplete: if there are failed mutations eligible for - retry after the attempt is complete - - GoogleAPICallError: if the gapic rpc fails + _MutateRowsIncomplete: if there are failed mutations eligible for + retry after the attempt is complete + GoogleAPICallError: if the gapic rpc fails """ request_entries = [self.mutations[idx].proto for idx in self.remaining_indices] # track mutations in this request that have not been finalized yet @@ -213,8 +212,8 @@ def _handle_entry_error(self, idx: int, exc: Exception): retryable. Args: - - idx: the index of the mutation that failed - - exc: the exception to add to the list + idx: the index of the mutation that failed + exc: the exception to add to the list """ entry = self.mutations[idx].entry self.errors.setdefault(idx, []).append(exc) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py index 9e0fd78e1e10..7f6e8e507aa3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -59,6 +59,13 @@ class _ReadRowsOperationAsync: ReadRowsOperation(request, client) handles row merging logic end-to-end, including performing retries on stream errors. + + Args: + query: The query to execute + table: The table to send the request to + operation_timeout: The total time to allow for the operation, in seconds + attempt_timeout: The time to allow for each individual attempt, in seconds + retryable_exceptions: A list of exceptions that should trigger a retry """ __slots__ = ( @@ -104,6 +111,9 @@ def __init__( def start_operation(self) -> AsyncGenerator[Row, None]: """ Start the read_rows operation, retrying on retryable errors. + + Yields: + Row: The next row in the stream """ return retries.retry_target_stream_async( self._read_rows_attempt, @@ -119,6 +129,9 @@ def _read_rows_attempt(self) -> AsyncGenerator[Row, None]: This function is intended to be wrapped by retry logic, which will call this function until it succeeds or a non-retryable error is raised. + + Yields: + Row: The next row in the stream """ # revise request keys and ranges between attempts if self._last_yielded_row_key is not None: @@ -151,6 +164,11 @@ async def chunk_stream( ) -> AsyncGenerator[ReadRowsResponsePB.CellChunk, None]: """ process chunks out of raw read_rows stream + + Args: + stream: the raw read_rows stream from the gapic client + Yields: + ReadRowsResponsePB.CellChunk: the next chunk in the stream """ async for resp in await stream: # extract proto from proto-plus wrapper @@ -195,9 +213,14 @@ async def chunk_stream( @staticmethod async def merge_rows( chunks: AsyncGenerator[ReadRowsResponsePB.CellChunk, None] | None - ): + ) -> AsyncGenerator[Row, None]: """ Merge chunks into rows + + Args: + chunks: the chunk stream to merge + Yields: + Row: the next row in the stream """ if chunks is None: return @@ -311,10 +334,12 @@ def _revise_request_rowset( Revise the rows in the request to avoid ones we've already processed. Args: - - row_set: the row set from the request - - last_seen_row_key: the last row key encountered + row_set: the row set from the request + last_seen_row_key: the last row key encountered + Returns: + RowSetPB: the new rowset after adusting for the last seen key Raises: - - _RowSetComplete: if there are no rows left to process after the revision + _RowSetComplete: if there are no rows left to process after the revision """ # if user is doing a whole table scan, start a new one with the last seen key if row_set is None or (not row_set.row_ranges and row_set.row_keys is not None): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index e385ecde783a..7d75fab0079a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -116,8 +116,8 @@ def __init__( Client options used to set user options on the client. API Endpoint should be set through client_options. Raises: - - RuntimeError if called outside of an async context (no running event loop) - - ValueError if pool_size is less than 1 + RuntimeError: if called outside of an async context (no running event loop) + ValueError: if pool_size is less than 1 """ # set up transport in registry transport_str = f"pooled_grpc_asyncio_{pool_size}" @@ -199,8 +199,9 @@ def _client_version() -> str: def _start_background_channel_refresh(self) -> None: """ Starts a background task to ping and warm each channel in the pool + Raises: - - RuntimeError if not called in an asyncio event loop + RuntimeError: if not called in an asyncio event loop """ if not self._channel_refresh_tasks and not self._emulator_host: # raise RuntimeError if there is no event loop @@ -234,10 +235,10 @@ async def _ping_and_warm_instances( Pings each Bigtable instance registered in `_active_instances` on the client Args: - - channel: grpc channel to warm - - instance_key: if provided, only warm the instance associated with the key + channel: grpc channel to warm + instance_key: if provided, only warm the instance associated with the key Returns: - - sequence of results or exceptions from the ping requests + list[BaseException | None]: sequence of results or exceptions from the ping requests """ instance_list = ( [instance_key] if instance_key is not None else self._active_instances @@ -323,10 +324,10 @@ async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: Channels will not be refreshed unless at least one instance is registered Args: - - instance_id: id of the instance to register. - - owner: table that owns the instance. Owners will be tracked in - _instance_owners, and instances will only be unregistered when all - owners call _remove_instance_registration + instance_id: id of the instance to register. + owner: table that owns the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration """ instance_name = self._gapic_client.instance_path(self.project, instance_id) instance_key = _WarmedInstanceKey( @@ -354,12 +355,12 @@ async def _remove_instance_registration( If instance_id is not registered, or is still in use by other tables, returns False Args: - - instance_id: id of the instance to remove - - owner: table that owns the instance. Owners will be tracked in + instance_id: id of the instance to remove + owner: table that owns the instance. Owners will be tracked in _instance_owners, and instances will only be unregistered when all owners call _remove_instance_registration Returns: - - True if instance was removed + bool: True if instance was removed, else False """ instance_name = self._gapic_client.instance_path(self.project, instance_id) instance_key = _WarmedInstanceKey( @@ -408,6 +409,10 @@ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAs default_retryable_errors: a list of errors that will be retried if encountered during all other operations. Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Returns: + TableAsync: a table instance for making data API requests + Raises: + RuntimeError: if called outside of an async context (no running event loop) """ return TableAsync(self, instance_id, table_id, *args, **kwargs) @@ -490,7 +495,7 @@ def __init__( encountered during all other operations. Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) Raises: - - RuntimeError if called outside of an async context (no running event loop) + RuntimeError: if called outside of an async context (no running event loop) """ # NOTE: any changes to the signature of this method should also be reflected # in client.get_table() @@ -564,24 +569,24 @@ async def read_rows_stream( retryable_errors list until operation_timeout is reached. Args: - - query: contains details about which rows to return - - operation_timeout: the time budget for the entire operation, in seconds. + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to the Table's default_read_rows_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the Table's default_read_rows_attempt_timeout. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_read_rows_retryable_errors Returns: - - an asynchronous iterator that yields rows returned by the query + AsyncIterable[Row]: an asynchronous iterator that yields rows returned by the query Raises: - - DeadlineExceeded: raised after operation timeout + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error """ operation_timeout, attempt_timeout = _get_timeouts( operation_timeout, attempt_timeout, self @@ -615,26 +620,26 @@ async def read_rows( retryable_errors list until operation_timeout is reached. Args: - - query: contains details about which rows to return - - operation_timeout: the time budget for the entire operation, in seconds. + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to the Table's default_read_rows_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the Table's default_read_rows_attempt_timeout. If None, defaults to operation_timeout. If None, defaults to the Table's default_read_rows_attempt_timeout, or the operation_timeout if that is also None. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_read_rows_retryable_errors. Returns: - - a list of Rows returned by the query + list[Row]: a list of Rows returned by the query Raises: - - DeadlineExceeded: raised after operation timeout + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error """ row_generator = await self.read_rows_stream( query, @@ -661,24 +666,24 @@ async def read_row( retryable_errors list until operation_timeout is reached. Args: - - query: contains details about which rows to return - - operation_timeout: the time budget for the entire operation, in seconds. + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to the Table's default_read_rows_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the Table's default_read_rows_attempt_timeout. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_read_rows_retryable_errors. Returns: - - a Row object if the row exists, otherwise None + Row | None: a Row object if the row exists, otherwise None Raises: - - DeadlineExceeded: raised after operation timeout + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error """ if row_key is None: raise ValueError("row_key must be string or bytes") @@ -716,20 +721,22 @@ async def read_rows_sharded( ``` Args: - - sharded_query: a sharded query to execute - - operation_timeout: the time budget for the entire operation, in seconds. + sharded_query: a sharded query to execute + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to the Table's default_read_rows_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the Table's default_read_rows_attempt_timeout. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_read_rows_retryable_errors. + Returns: + list[Row]: a list of Rows returned by the query Raises: - - ShardedReadRowsExceptionGroup: if any of the queries failed - - ValueError: if the query_list is empty + ShardedReadRowsExceptionGroup: if any of the queries failed + ValueError: if the query_list is empty """ if not sharded_query: raise ValueError("empty sharded_query") @@ -796,24 +803,24 @@ async def row_exists( uses the filters: chain(limit cells per row = 1, strip value) Args: - - row_key: the key of the row to check - - operation_timeout: the time budget for the entire operation, in seconds. + row_key: the key of the row to check + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to the Table's default_read_rows_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the Table's default_read_rows_attempt_timeout. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_read_rows_retryable_errors. Returns: - - a bool indicating whether the row exists + bool: a bool indicating whether the row exists Raises: - - DeadlineExceeded: raised after operation timeout + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error """ if row_key is None: raise ValueError("row_key must be string or bytes") @@ -847,26 +854,26 @@ async def sample_row_keys( requests will call sample_row_keys internally for this purpose when sharding is enabled RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of - row_keys, along with offset positions in the table + row_keys, along with offset positions in the table Args: - - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget.i Defaults to the Table's default_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the Table's default_attempt_timeout. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_retryable_errors. Returns: - - a set of RowKeySamples the delimit contiguous sections of the table + RowKeySamples: a set of RowKeySamples the delimit contiguous sections of the table Raises: - - DeadlineExceeded: raised after operation timeout + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error """ # prepare timeouts operation_timeout, attempt_timeout = _get_timeouts( @@ -922,22 +929,22 @@ def mutations_batcher( to avoid excess network calls Args: - - flush_interval: Automatically flush every flush_interval seconds. If None, + flush_interval: Automatically flush every flush_interval seconds. If None, a table default will be used - - flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count mutations are added across all entries. If None, this limit is ignored. - - flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. - - flow_control_max_mutation_count: Maximum number of inflight mutations. - - flow_control_max_bytes: Maximum number of inflight bytes. - - batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. Defaults to the Table's default_mutate_rows_operation_timeout - - batch_attempt_timeout: timeout for each individual request, in seconds. + batch_attempt_timeout: timeout for each individual request, in seconds. Defaults to the Table's default_mutate_rows_attempt_timeout. If None, defaults to batch_operation_timeout. - - batch_retryable_errors: a list of errors that will be retried if encountered. + batch_retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_mutate_rows_retryable_errors. Returns: - - a MutationsBatcherAsync context manager that can batch requests + MutationsBatcherAsync: a MutationsBatcherAsync context manager that can batch requests """ return MutationsBatcherAsync( self, @@ -971,26 +978,26 @@ async def mutate_row( retried on server failure. Non-idempotent operations will not. Args: - - row_key: the row to apply mutations to - - mutations: the set of mutations to apply to the row - - operation_timeout: the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - Defaults to the Table's default_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. - If it takes longer than this time to complete, the request will be cancelled with - a DeadlineExceeded exception, and a retry will be attempted. - Defaults to the Table's default_attempt_timeout. - If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. - Only idempotent mutations will be retried. Defaults to the Table's - default_retryable_errors. + row_key: the row to apply mutations to + mutations: the set of mutations to apply to the row + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Only idempotent mutations will be retried. Defaults to the Table's + default_retryable_errors. Raises: - - DeadlineExceeded: raised after operation timeout - will be chained with a RetryExceptionGroup containing all - GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised on non-idempotent operations that cannot be - safely retried. - - ValueError if invalid arguments are provided + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing all + GoogleAPIError exceptions from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised on non-idempotent operations that cannot be + safely retried. + ValueError: if invalid arguments are provided """ operation_timeout, attempt_timeout = _get_timeouts( operation_timeout, attempt_timeout, self @@ -1051,23 +1058,23 @@ async def bulk_mutate_rows( raised exception group Args: - - mutation_entries: the batches of mutations to apply + mutation_entries: the batches of mutations to apply Each entry will be applied atomically, but entries will be applied in arbitrary order - - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to the Table's default_mutate_rows_operation_timeout - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the Table's default_mutate_rows_attempt_timeout. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to the Table's default_mutate_rows_retryable_errors Raises: - - MutationsExceptionGroup if one or more mutations fails + MutationsExceptionGroup: if one or more mutations fails Contains details about any failed entries in .exceptions - - ValueError if invalid arguments are provided + ValueError: if invalid arguments are provided """ operation_timeout, attempt_timeout = _get_timeouts( operation_timeout, attempt_timeout, self @@ -1099,31 +1106,31 @@ async def check_and_mutate_row( Non-idempotent operation: will not be retried Args: - - row_key: the key of the row to mutate - - predicate: the filter to be applied to the contents of the specified row. + row_key: the key of the row to mutate + predicate: the filter to be applied to the contents of the specified row. Depending on whether or not any results are yielded, either true_case_mutations or false_case_mutations will be executed. If None, checks that the row contains any values at all. - - true_case_mutations: + true_case_mutations: Changes to be atomically applied to the specified row if predicate yields at least one cell when applied to row_key. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if false_case_mutations is empty, and at most 100000. - - false_case_mutations: + false_case_mutations: Changes to be atomically applied to the specified row if predicate_filter does not yield any cells when applied to row_key. Entries are applied in order, meaning that earlier mutations can be masked by later ones. Must contain at least one entry if `true_case_mutations` is empty, and at most 100000. - - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will not be retried. Defaults to the Table's default_operation_timeout Returns: - - bool indicating whether the predicate was true or false + bool indicating whether the predicate was true or false Raises: - - GoogleAPIError exceptions from grpc call + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call """ operation_timeout, _ = _get_timeouts(operation_timeout, None, self) if true_case_mutations is not None and not isinstance( @@ -1167,19 +1174,18 @@ async def read_modify_write_row( Non-idempotent operation: will not be retried Args: - - row_key: the key of the row to apply read/modify/write rules to - - rules: A rule or set of rules to apply to the row. + row_key: the key of the row to apply read/modify/write rules to + rules: A rule or set of rules to apply to the row. Rules are applied in order, meaning that earlier rules will affect the results of later ones. - - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will not be retried. Defaults to the Table's default_operation_timeout. Returns: - - Row: containing cell data that was modified as part of the - operation + Row: a Row containing cell data that was modified as part of the operation Raises: - - GoogleAPIError exceptions from grpc call - - ValueError if invalid arguments are provided + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call + ValueError: if invalid arguments are provided """ operation_timeout, _ = _get_timeouts(operation_timeout, None, self) if operation_timeout <= 0: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py index 5d5dd535ee45..76d13f00bf83 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -50,6 +50,13 @@ class _FlowControlAsync: Flow limits are not hard limits. If a single mutation exceeds the configured limits, it will be allowed as a single batch when the capacity is available. + + Args: + max_mutation_count: maximum number of mutations to send in a single rpc. + This corresponds to individual mutations in a single RowMutationEntry. + max_mutation_bytes: maximum number of bytes to send in a single rpc. + Raises: + ValueError: if max_mutation_count or max_mutation_bytes is less than 0 """ def __init__( @@ -57,12 +64,6 @@ def __init__( max_mutation_count: int, max_mutation_bytes: int, ): - """ - Args: - - max_mutation_count: maximum number of mutations to send in a single rpc. - This corresponds to individual mutations in a single RowMutationEntry. - - max_mutation_bytes: maximum number of bytes to send in a single rpc. - """ self._max_mutation_count = max_mutation_count self._max_mutation_bytes = max_mutation_bytes if self._max_mutation_count < 1: @@ -82,10 +83,10 @@ def _has_capacity(self, additional_count: int, additional_size: int) -> bool: previous batches have completed. Args: - - additional_count: number of mutations in the pending entry - - additional_size: size of the pending entry + additional_count: number of mutations in the pending entry + additional_size: size of the pending entry Returns: - - True if there is capacity to send the pending entry, False otherwise + bool: True if there is capacity to send the pending entry, False otherwise """ # adjust limits to allow overly large mutations acceptable_size = max(self._max_mutation_bytes, additional_size) @@ -104,7 +105,7 @@ async def remove_from_flow( operation is complete. Args: - - mutations: mutation or list of mutations to remove from flow control + mutations: mutation or list of mutations to remove from flow control """ if not isinstance(mutations, list): mutations = [mutations] @@ -124,10 +125,11 @@ async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry] will block until there is capacity available. Args: - - mutations: list mutations to break up into batches + mutations: list mutations to break up into batches Yields: - - list of mutations that have reserved space in the flow control. - Each batch contains at least one mutation. + list[RowMutationEntry]: + list of mutations that have reserved space in the flow control. + Each batch contains at least one mutation. """ if not isinstance(mutations, list): mutations = [mutations] @@ -171,15 +173,28 @@ class MutationsBatcherAsync: Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining to use as few network requests as required - Flushes: - - every flush_interval seconds - - after queue reaches flush_count in quantity - - after queue reaches flush_size_bytes in storage size - - when batcher is closed or destroyed - - async with table.mutations_batcher() as batcher: - for i in range(10): - batcher.add(row, mut) + Will automatically flush the batcher: + - every flush_interval seconds + - after queue size reaches flush_limit_mutation_count + - after queue reaches flush_limit_bytes + - when batcher is closed or destroyed + + Args: + table: Table to preform rpc calls + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout. + batch_attempt_timeout: timeout for each individual request, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. """ def __init__( @@ -196,24 +211,6 @@ def __init__( batch_retryable_errors: Sequence[type[Exception]] | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, ): - """ - Args: - - table: Table to preform rpc calls - - flush_interval: Automatically flush every flush_interval seconds. - If None, no time-based flushing is performed. - - flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count - mutations are added across all entries. If None, this limit is ignored. - - flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. - - flow_control_max_mutation_count: Maximum number of inflight mutations. - - flow_control_max_bytes: Maximum number of inflight bytes. - - batch_operation_timeout: timeout for each mutate_rows operation, in seconds. - If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout. - - batch_attempt_timeout: timeout for each individual request, in seconds. - If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout. - If None, defaults to batch_operation_timeout. - - batch_retryable_errors: a list of errors that will be retried if encountered. - Defaults to the Table's default_mutate_rows_retryable_errors. - """ self._operation_timeout, self._attempt_timeout = _get_timeouts( batch_operation_timeout, batch_attempt_timeout, table ) @@ -255,10 +252,10 @@ def _start_flush_timer(self, interval: float | None) -> asyncio.Future[None]: If interval is None, an empty future is returned Args: - - flush_interval: Automatically flush every flush_interval seconds. - If None, no time-based flushing is performed. + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. Returns: - - asyncio.Future that represents the background task + asyncio.Future[None]: future representing the background task """ if interval is None or self.closed: empty_future: asyncio.Future[None] = asyncio.Future() @@ -282,14 +279,13 @@ async def append(self, mutation_entry: RowMutationEntry): """ Add a new set of mutations to the internal queue - TODO: return a future to track completion of this entry - Args: - - mutation_entry: new entry to add to flush queue + mutation_entry: new entry to add to flush queue Raises: - - RuntimeError if batcher is closed - - ValueError if an invalid mutation type is added + RuntimeError: if batcher is closed + ValueError: if an invalid mutation type is added """ + # TODO: return a future to track completion of this entry if self.closed: raise RuntimeError("Cannot append to closed MutationsBatcher") if isinstance(mutation_entry, Mutation): # type: ignore @@ -309,7 +305,13 @@ async def append(self, mutation_entry: RowMutationEntry): await asyncio.sleep(0) def _schedule_flush(self) -> asyncio.Future[None] | None: - """Update the flush task to include the latest staged entries""" + """ + Update the flush task to include the latest staged entries + + Returns: + asyncio.Future[None] | None: + future representing the background task, if started + """ if self._staged_entries: entries, self._staged_entries = self._staged_entries, [] self._staged_count, self._staged_bytes = 0, 0 @@ -324,7 +326,7 @@ async def _flush_internal(self, new_entries: list[RowMutationEntry]): Flushes a set of mutations to the server, and updates internal state Args: - - new_entries: list of RowMutationEntry objects to flush + new_entries list of RowMutationEntry objects to flush """ # flush new entries in_process_requests: list[asyncio.Future[list[FailedMutationEntryError]]] = [] @@ -344,12 +346,13 @@ async def _execute_mutate_rows( Helper to execute mutation operation on a batch Args: - - batch: list of RowMutationEntry objects to send to server - - timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. - If not given, will use table defaults + batch: list of RowMutationEntry objects to send to server + timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. + If not given, will use table defaults Returns: - - list of FailedMutationEntryError objects for mutations that failed. - FailedMutationEntryError objects will not contain index information + list[FailedMutationEntryError]: + list of FailedMutationEntryError objects for mutations that failed. + FailedMutationEntryError objects will not contain index information """ try: operation = _MutateRowsOperationAsync( @@ -376,6 +379,9 @@ def _add_exceptions(self, excs: list[Exception]): Add new list of exceptions to internal store. To avoid unbounded memory, the batcher will store the first and last _exception_list_limit exceptions, and discard any in between. + + Args: + excs: list of exceptions to add to the internal store """ self._exceptions_since_last_raise += len(excs) if excs and len(self._oldest_exceptions) < self._exception_list_limit: @@ -392,7 +398,7 @@ def _raise_exceptions(self): Raise any unreported exceptions from background flush operations Raises: - - MutationsExceptionGroup with all unreported exceptions + MutationsExceptionGroup: exception group with all unreported exceptions """ if self._oldest_exceptions or self._newest_exceptions: oldest, self._oldest_exceptions = self._oldest_exceptions, [] @@ -414,11 +420,15 @@ def _raise_exceptions(self): ) async def __aenter__(self): - """For context manager API""" + """Allow use of context manager API""" return self async def __aexit__(self, exc_type, exc, tb): - """For context manager API""" + """ + Allow use of context manager API. + + Flushes the batcher and cleans up resources. + """ await self.close() async def close(self): @@ -457,11 +467,11 @@ def _create_bg_task(func, *args, **kwargs) -> asyncio.Future[Any]: with different concurrency models. Args: - - func: function to execute in background task - - *args: positional arguments to pass to func - - **kwargs: keyword arguments to pass to func + func: function to execute in background task + *args: positional arguments to pass to func + **kwargs: keyword arguments to pass to func Returns: - - Future object representing the background task + asyncio.Future: Future object representing the background task """ return asyncio.create_task(func(*args, **kwargs)) @@ -474,12 +484,13 @@ async def _wait_for_batch_results( waits for them to complete, and returns a list of errors encountered. Args: - - *tasks: futures representing _execute_mutate_rows or _flush_internal tasks + *tasks: futures representing _execute_mutate_rows or _flush_internal tasks Returns: - - list of Exceptions encountered by any of the tasks. Errors are expected - to be FailedMutationEntryError, representing a failed mutation operation. - If a task fails with a different exception, it will be included in the - output list. Successful tasks will not be represented in the output list. + list[Exception]: + list of Exceptions encountered by any of the tasks. Errors are expected + to be FailedMutationEntryError, representing a failed mutation operation. + If a task fails with a different exception, it will be included in the + output list. Successful tasks will not be represented in the output list. """ if not tasks: return [] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py index a0b13cbaf0a8..a8fba9ef109f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py @@ -83,11 +83,11 @@ def _attempt_timeout_generator( at which point it will return the remaining time in the operation_timeout. Args: - - per_request_timeout: The timeout value to use for each request, in seconds. + per_request_timeout: The timeout value to use for each request, in seconds. If None, the operation_timeout will be used for each request. - - operation_timeout: The timeout value to use for the entire operationm in seconds. + operation_timeout: The timeout value to use for the entire operationm in seconds. Yields: - - The timeout value to use for the next request, in seonds + float: The timeout value to use for the next request, in seonds """ per_request_timeout = ( per_request_timeout if per_request_timeout is not None else operation_timeout @@ -106,12 +106,13 @@ def _retry_exception_factory( Build retry error based on exceptions encountered during operation Args: - - exc_list: list of exceptions encountered during operation - - is_timeout: whether the operation failed due to timeout - - timeout_val: the operation timeout value in seconds, for constructing + exc_list: list of exceptions encountered during operation + is_timeout: whether the operation failed due to timeout + timeout_val: the operation timeout value in seconds, for constructing the error message Returns: - - tuple of the exception to raise, and a cause exception if applicable + tuple[Exception, Exception|None]: + tuple of the exception to raise, and a cause exception if applicable """ if reason == RetryFailureReason.TIMEOUT: timeout_val_str = f"of {timeout_val:0.1f}s " if timeout_val is not None else "" @@ -144,11 +145,11 @@ def _get_timeouts( resulting timeouts are invalid. Args: - - operation: The timeout value to use for the entire operation, in seconds. - - attempt: The timeout value to use for each attempt, in seconds. - - table: The table to use for default values. + operation: The timeout value to use for the entire operation, in seconds. + attempt: The timeout value to use for each attempt, in seconds. + table: The table to use for default values. Returns: - - A tuple of (operation_timeout, attempt_timeout) + typle[float, float]: A tuple of (operation_timeout, attempt_timeout) """ # load table defaults if necessary if operation == TABLE_DEFAULT.DEFAULT: @@ -185,11 +186,11 @@ def _validate_timeouts( an exception if they are not. Args: - - operation_timeout: The timeout value to use for the entire operation, in seconds. - - attempt_timeout: The timeout value to use for each attempt, in seconds. - - allow_none: If True, attempt_timeout can be None. If False, None values will raise an exception. + operation_timeout: The timeout value to use for the entire operation, in seconds. + attempt_timeout: The timeout value to use for each attempt, in seconds. + allow_none: If True, attempt_timeout can be None. If False, None values will raise an exception. Raises: - - ValueError if operation_timeout or attempt_timeout are invalid. + ValueError: if operation_timeout or attempt_timeout are invalid. """ if operation_timeout is None: raise ValueError("operation_timeout cannot be None") @@ -206,6 +207,16 @@ def _get_retryable_errors( call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT, table: "TableAsync", ) -> list[type[Exception]]: + """ + Convert passed in retryable error codes to a list of exception types. + + Args: + call_codes: The error codes to convert. Can be a list of grpc.StatusCode values, + int values, or Exception types, or a TABLE_DEFAULT value. + table: The table to use for default values. + Returns: + list[type[Exception]]: A list of exception types to retry on. + """ # load table defaults if necessary if call_codes == TABLE_DEFAULT.DEFAULT: call_codes = table.default_retryable_errors diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py index 3c73ec4e9338..8d97640aa6d8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py @@ -142,10 +142,12 @@ def _format_message( Format a message for the exception group Args: - - excs: the exceptions in the group - - total_entries: the total number of entries attempted, successful or not - - exc_count: the number of exceptions associated with the request - if None, this will be len(excs) + excs: the exceptions in the group + total_entries: the total number of entries attempted, successful or not + exc_count: the number of exceptions associated with the request + if None, this will be len(excs) + Returns: + str: the formatted message """ exc_count = exc_count if exc_count is not None else len(excs) entry_str = "entry" if exc_count == 1 else "entries" @@ -156,10 +158,10 @@ def __init__( ): """ Args: - - excs: the exceptions in the group - - total_entries: the total number of entries attempted, successful or not - - message: the message for the exception group. If None, a default message - will be generated + excs: the exceptions in the group + total_entries: the total number of entries attempted, successful or not + message: the message for the exception group. If None, a default message + will be generated """ message = ( message @@ -174,9 +176,11 @@ def __new__( ): """ Args: - - excs: the exceptions in the group - - total_entries: the total number of entries attempted, successful or not - - message: the message for the exception group. If None, a default message + excs: the exceptions in the group + total_entries: the total number of entries attempted, successful or not + message: the message for the exception group. If None, a default message + Returns: + MutationsExceptionGroup: the new instance """ message = ( message if message is not None else cls._format_message(excs, total_entries) @@ -200,12 +204,14 @@ def from_truncated_lists( describe the number of exceptions that were truncated. Args: - - first_list: the set of oldest exceptions to add to the ExceptionGroup - - last_list: the set of newest exceptions to add to the ExceptionGroup - - total_excs: the total number of exceptions associated with the request - Should be len(first_list) + len(last_list) + number of dropped exceptions - in the middle - - entry_count: the total number of entries attempted, successful or not + first_list: the set of oldest exceptions to add to the ExceptionGroup + last_list: the set of newest exceptions to add to the ExceptionGroup + total_excs: the total number of exceptions associated with the request + Should be len(first_list) + len(last_list) + number of dropped exceptions + in the middle + entry_count: the total number of entries attempted, successful or not + Returns: + MutationsExceptionGroup: the new instance """ first_count, last_count = len(first_list), len(last_list) if first_count + last_count >= total_excs: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py index b5729d25e6b3..fd9b2c24e057 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py @@ -33,36 +33,75 @@ class Mutation(ABC): - """Model class for mutations""" + """ + Abstract base class for mutations. + + This class defines the interface for different types of mutations that can be + applied to Bigtable rows. + """ @abstractmethod def _to_dict(self) -> dict[str, Any]: + """ + Convert the mutation to a dictionary representation. + + Returns: + dict[str, Any]: A dictionary representation of the mutation. + """ raise NotImplementedError def _to_pb(self) -> data_pb.Mutation: """ - Convert the mutation to protobuf + Convert the mutation to a protobuf representation. + + Returns: + Mutation: A protobuf representation of the mutation. """ return data_pb.Mutation(**self._to_dict()) def is_idempotent(self) -> bool: """ Check if the mutation is idempotent - If false, the mutation will not be retried + + Idempotent mutations can be safely retried on failure. + + Returns: + bool: True if the mutation is idempotent, False otherwise. """ return True def __str__(self) -> str: + """ + Return a string representation of the mutation. + + Returns: + str: A string representation of the mutation. + """ return str(self._to_dict()) def size(self) -> int: """ Get the size of the mutation in bytes + + Returns: + int: The size of the mutation in bytes. """ return getsizeof(self._to_dict()) @classmethod def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation: + """ + Create a `Mutation` instance from a dictionary representation. + + Args: + input_dict (dict[str, Any]): A dictionary representation of the mutation. + + Returns: + Mutation: A Mutation instance created from the dictionary. + + Raises: + ValueError: If the input dictionary is invalid or does not represent a valid mutation type. + """ instance: Mutation | None = None try: if "set_cell" in input_dict: @@ -96,6 +135,25 @@ def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation: class SetCell(Mutation): + """ + Mutation to set the value of a cell. + + Args: + family (str): The name of the column family to which the new cell belongs. + qualifier (bytes | str): The column qualifier of the new cell. + new_value (bytes | str | int): The value of the new cell. + timestamp_micros (int | None): The timestamp of the new cell. If `None`, + the current timestamp will be used. Timestamps will be sent with + millisecond precision. Extra precision will be truncated. If -1, the + server will assign a timestamp. Note that `SetCell` mutations with + server-side timestamps are non-idempotent operations and will not be retried. + + Raises: + TypeError: If `qualifier` is not `bytes` or `str`. + TypeError: If `new_value` is not `bytes`, `str`, or `int`. + ValueError: If `timestamp_micros` is less than `_SERVER_SIDE_TIMESTAMP`. + """ + def __init__( self, family: str, @@ -103,18 +161,6 @@ def __init__( new_value: bytes | str | int, timestamp_micros: int | None = None, ): - """ - Mutation to set the value of a cell - - Args: - - family: The name of the column family to which the new cell belongs. - - qualifier: The column qualifier of the new cell. - - new_value: The value of the new cell. str or int input will be converted to bytes - - timestamp_micros: The timestamp of the new cell. If None, the current timestamp will be used. - Timestamps will be sent with milisecond-percision. Extra precision will be truncated. - If -1, the server will assign a timestamp. Note that SetCell mutations with server-side - timestamps are non-idempotent operations and will not be retried. - """ qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier if not isinstance(qualifier, bytes): raise TypeError("qualifier must be bytes or str") @@ -142,7 +188,6 @@ def __init__( self.timestamp_micros = timestamp_micros def _to_dict(self) -> dict[str, Any]: - """Convert the mutation to a dictionary representation""" return { "set_cell": { "family_name": self.family, @@ -153,12 +198,26 @@ def _to_dict(self) -> dict[str, Any]: } def is_idempotent(self) -> bool: - """Check if the mutation is idempotent""" return self.timestamp_micros != _SERVER_SIDE_TIMESTAMP @dataclass class DeleteRangeFromColumn(Mutation): + """ + Mutation to delete a range of cells from a column. + + Args: + family (str): The name of the column family. + qualifier (bytes): The column qualifier. + start_timestamp_micros (int | None): The start timestamp of the range to + delete. `None` represents 0. Defaults to `None`. + end_timestamp_micros (int | None): The end timestamp of the range to + delete. `None` represents infinity. Defaults to `None`. + + Raises: + ValueError: If `start_timestamp_micros` is greater than `end_timestamp_micros`. + """ + family: str qualifier: bytes # None represents 0 @@ -191,6 +250,13 @@ def _to_dict(self) -> dict[str, Any]: @dataclass class DeleteAllFromFamily(Mutation): + """ + Mutation to delete all cells from a column family. + + Args: + family_to_delete (str): The name of the column family to delete. + """ + family_to_delete: str def _to_dict(self) -> dict[str, Any]: @@ -203,6 +269,10 @@ def _to_dict(self) -> dict[str, Any]: @dataclass class DeleteAllFromRow(Mutation): + """ + Mutation to delete all cells from a row. + """ + def _to_dict(self) -> dict[str, Any]: return { "delete_from_row": {}, @@ -210,6 +280,22 @@ def _to_dict(self) -> dict[str, Any]: class RowMutationEntry: + """ + A single entry in a `MutateRows` request. + + This class represents a set of mutations to apply to a specific row in a + Bigtable table. + + Args: + row_key (bytes | str): The key of the row to mutate. + mutations (Mutation | list[Mutation]): The mutation or list of mutations to apply + to the row. + + Raises: + ValueError: If `mutations` is empty or contains more than + `_MUTATE_ROWS_REQUEST_MUTATION_LIMIT` mutations. + """ + def __init__(self, row_key: bytes | str, mutations: Mutation | list[Mutation]): if isinstance(row_key, str): row_key = row_key.encode("utf-8") @@ -225,29 +311,58 @@ def __init__(self, row_key: bytes | str, mutations: Mutation | list[Mutation]): self.mutations = tuple(mutations) def _to_dict(self) -> dict[str, Any]: + """ + Convert the mutation entry to a dictionary representation. + + Returns: + dict[str, Any]: A dictionary representation of the mutation entry + """ return { "row_key": self.row_key, "mutations": [mutation._to_dict() for mutation in self.mutations], } def _to_pb(self) -> types_pb.MutateRowsRequest.Entry: + """ + Convert the mutation entry to a protobuf representation. + + Returns: + MutateRowsRequest.Entry: A protobuf representation of the mutation entry. + """ return types_pb.MutateRowsRequest.Entry( row_key=self.row_key, mutations=[mutation._to_pb() for mutation in self.mutations], ) def is_idempotent(self) -> bool: - """Check if the mutation is idempotent""" + """ + Check if all mutations in the entry are idempotent. + + Returns: + bool: True if all mutations in the entry are idempotent, False otherwise. + """ return all(mutation.is_idempotent() for mutation in self.mutations) def size(self) -> int: """ - Get the size of the mutation in bytes + Get the size of the mutation entry in bytes. + + Returns: + int: The size of the mutation entry in bytes. """ return getsizeof(self._to_dict()) @classmethod def _from_dict(cls, input_dict: dict[str, Any]) -> RowMutationEntry: + """ + Create a `RowMutationEntry` instance from a dictionary representation. + + Args: + input_dict (dict[str, Any]): A dictionary representation of the mutation entry. + + Returns: + RowMutationEntry: A RowMutationEntry instance created from the dictionary. + """ return RowMutationEntry( row_key=input_dict["row_key"], mutations=[ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py index f43dbe79f175..e2d3b9f4f354 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py @@ -23,6 +23,10 @@ class ReadModifyWriteRule(abc.ABC): + """ + Abstract base class for read-modify-write rules. + """ + def __init__(self, family: str, qualifier: bytes | str): qualifier = ( qualifier if isinstance(qualifier, bytes) else qualifier.encode("utf-8") @@ -39,6 +43,23 @@ def _to_pb(self) -> data_pb.ReadModifyWriteRule: class IncrementRule(ReadModifyWriteRule): + """ + Rule to increment a cell's value. + + Args: + family (str): + The family name of the cell to increment. + qualifier (bytes | str): + The qualifier of the cell to increment. + increment_amount (int): + The amount to increment the cell's value. Must be between -2**63 and 2**63 (64-bit signed int). + Raises: + TypeError: + If increment_amount is not an integer. + ValueError: + If increment_amount is not between -2**63 and 2**63 (64-bit signed int). + """ + def __init__(self, family: str, qualifier: bytes | str, increment_amount: int = 1): if not isinstance(increment_amount, int): raise TypeError("increment_amount must be an integer") @@ -58,6 +79,20 @@ def _to_dict(self) -> dict[str, str | bytes | int]: class AppendValueRule(ReadModifyWriteRule): + """ + Rule to append a value to a cell's value. + + Args: + family (str): + The family name of the cell to append to. + qualifier (bytes | str): + The qualifier of the cell to append to. + append_value (bytes | str): + The value to append to the cell's value. + Raises: + TypeError: If append_value is not bytes or str. + """ + def __init__(self, family: str, qualifier: bytes | str, append_value: bytes | str): append_value = ( append_value.encode("utf-8") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py index 362f54c3ebfa..5e414391ce40 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py @@ -44,15 +44,15 @@ def __init__( ): """ Args: - - start_key: The start key of the range. If empty, the range is unbounded on the left. - - end_key: The end key of the range. If empty, the range is unbounded on the right. - - start_is_inclusive: Whether the start key is inclusive. If None, the start key is + start_key: The start key of the range. If empty, the range is unbounded on the left. + end_key: The end key of the range. If empty, the range is unbounded on the right. + start_is_inclusive: Whether the start key is inclusive. If None, the start key is inclusive. - - end_is_inclusive: Whether the end key is inclusive. If None, the end key is not inclusive. + end_is_inclusive: Whether the end key is inclusive. If None, the end key is not inclusive. Raises: - - ValueError: if start_key is greater than end_key, or start_is_inclusive, - or end_is_inclusive is set when the corresponding key is None, - or start_key or end_key is not a string or bytes. + ValueError: if start_key is greater than end_key, or start_is_inclusive + ValueError: if end_is_inclusive is set when the corresponding key is None + ValueError: if start_key or end_key is not a string or bytes. """ # convert empty key inputs to None for consistency start_key = None if not start_key else start_key @@ -100,39 +100,69 @@ def start_key(self) -> bytes | None: def end_key(self) -> bytes | None: """ Returns the end key of the range. If None, the range is unbounded on the right. + + Returns: + bytes | None: The end key of the range, or None if the range is unbounded on the right. """ return self._pb.end_key_closed or self._pb.end_key_open or None @property def start_is_inclusive(self) -> bool: """ - Returns whether the range is inclusive of the start key. - Returns True if the range is unbounded on the left. + Indicates if the range is inclusive of the start key. + + If the range is unbounded on the left, this will return True. + + Returns: + bool: Whether the range is inclusive of the start key. """ return not bool(self._pb.start_key_open) @property def end_is_inclusive(self) -> bool: """ - Returns whether the range is inclusive of the end key. - Returns True if the range is unbounded on the right. + Indicates if the range is inclusive of the end key. + + If the range is unbounded on the right, this will return True. + + Returns: + bool: Whether the range is inclusive of the end key. """ return not bool(self._pb.end_key_open) def _to_pb(self) -> RowRangePB: - """Converts this object to a protobuf""" + """ + Converts this object to a protobuf + + Returns: + RowRangePB: The protobuf representation of this object + """ return self._pb @classmethod def _from_pb(cls, data: RowRangePB) -> RowRange: - """Creates a RowRange from a protobuf""" + """ + Creates a RowRange from a protobuf + + Args: + data (RowRangePB): The protobuf to convert + Returns: + RowRange: The converted RowRange + """ instance = cls() instance._pb = data return instance @classmethod def _from_dict(cls, data: dict[str, bytes | str]) -> RowRange: - """Creates a RowRange from a protobuf""" + """ + Creates a RowRange from a protobuf + + Args: + data (dict[str, bytes | str]): The dictionary to convert + Returns: + RowRange: The converted RowRange + """ formatted_data = { k: v.encode() if isinstance(v, str) else v for k, v in data.items() } @@ -144,6 +174,9 @@ def __bool__(self) -> bool: """ Empty RowRanges (representing a full table scan) are falsy, because they can be substituted with None. Non-empty RowRanges are truthy. + + Returns: + bool: True if the RowRange is not empty, False otherwise """ return bool( self._pb.start_key_closed @@ -160,7 +193,11 @@ def __eq__(self, other: Any) -> bool: def __str__(self) -> str: """ Represent range as a string, e.g. "[b'a', b'z)" + Unbounded start or end keys are represented as "-inf" or "+inf" + + Returns: + str: The string representation of the range """ left = "[" if self.start_is_inclusive else "(" right = "]" if self.end_is_inclusive else ")" @@ -199,12 +236,12 @@ def __init__( Create a new ReadRowsQuery Args: - - row_keys: row keys to include in the query + row_keys: row keys to include in the query a query can contain multiple keys, but ranges should be preferred - - row_ranges: ranges of rows to include in the query - - limit: the maximum number of rows to return. None or 0 means no limit + row_ranges: ranges of rows to include in the query + limit: the maximum number of rows to return. None or 0 means no limit default: None (no limit) - - row_filter: a RowFilter to apply to the query + row_filter: a RowFilter to apply to the query """ if row_keys is None: row_keys = [] @@ -223,14 +260,34 @@ def __init__( @property def row_keys(self) -> list[bytes]: + """ + Return the row keys in this query + + Returns: + list[bytes]: the row keys in this query + """ return list(self._row_set.row_keys) @property def row_ranges(self) -> list[RowRange]: + """ + Return the row ranges in this query + + Returns: + list[RowRange]: the row ranges in this query + """ return [RowRange._from_pb(r) for r in self._row_set.row_ranges] @property def limit(self) -> int | None: + """ + Return the maximum number of rows to return by this query + + None or 0 means no limit + + Returns: + int | None: the maximum number of rows to return by this query + """ return self._limit or None @limit.setter @@ -241,11 +298,9 @@ def limit(self, new_limit: int | None): None or 0 means no limit Args: - - new_limit: the new limit to apply to this query - Returns: - - a reference to this query for chaining + new_limit: the new limit to apply to this query Raises: - - ValueError if new_limit is < 0 + ValueError: if new_limit is < 0 """ if new_limit is not None and new_limit < 0: raise ValueError("limit must be >= 0") @@ -253,6 +308,12 @@ def limit(self, new_limit: int | None): @property def filter(self) -> RowFilter | None: + """ + Return the RowFilter applied to this query + + Returns: + RowFilter | None: the RowFilter applied to this query + """ return self._filter @filter.setter @@ -261,9 +322,7 @@ def filter(self, row_filter: RowFilter | None): Set a RowFilter to apply to this query Args: - - row_filter: a RowFilter to apply to this query - Returns: - - a reference to this query for chaining + row_filter: a RowFilter to apply to this query """ self._filter = row_filter @@ -274,11 +333,9 @@ def add_key(self, row_key: str | bytes): A query can contain multiple keys, but ranges should be preferred Args: - - row_key: a key to add to this query - Returns: - - a reference to this query for chaining + row_key: a key to add to this query Raises: - - ValueError if an input is not a string or bytes + ValueError: if an input is not a string or bytes """ if isinstance(row_key, str): row_key = row_key.encode() @@ -295,7 +352,7 @@ def add_range( Add a range of row keys to this query. Args: - - row_range: a range of row keys to add to this query + row_range: a range of row keys to add to this query """ if row_range not in self.row_ranges: self._row_set.row_ranges.append(row_range._pb) @@ -305,10 +362,12 @@ def shard(self, shard_keys: RowKeySamples) -> ShardedQuery: Split this query into multiple queries that can be evenly distributed across nodes and run in parallel + Args: + shard_keys: a list of row keys that define the boundaries of segments. Returns: - - a ShardedQuery that can be used in sharded_read_rows calls + ShardedQuery: a ShardedQuery that can be used in sharded_read_rows calls Raises: - - AttributeError if the query contains a limit + AttributeError: if the query contains a limit """ if self.limit is not None: raise AttributeError("Cannot shard query with a limit") @@ -357,11 +416,11 @@ def _shard_range( segments of the key-space, determined by split_points Args: - - orig_range: a row range to split - - split_points: a list of row keys that define the boundaries of segments. + orig_range: a row range to split + split_points: a list of row keys that define the boundaries of segments. each point represents the inclusive end of a segment Returns: - - a list of tuples, containing a segment index and a new sub-range. + list[tuple[int, RowRange]]: a list of tuples, containing a segment index and a new sub-range. """ # 1. find the index of the segment the start key belongs to if orig_range.start_key is None: @@ -446,6 +505,11 @@ def __eq__(self, other): RowRanges are equal if they have the same row keys, row ranges, filter and limit, or if they both represent a full scan with the same filter and limit + + Args: + other: the object to compare to + Returns: + bool: True if the objects are equal, False otherwise """ if not isinstance(other, ReadRowsQuery): return False diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py index 13019cbdd57d..28f0260a9747 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py @@ -49,6 +49,10 @@ def __init__( Row objects are not intended to be created by users. They are returned by the Bigtable backend. + + Args: + key (bytes): Row key + cells (list[Cell]): List of cells in the row """ self.row_key = key self.cells: list[Cell] = cells @@ -65,6 +69,9 @@ def _index( Returns an index of cells associated with each family and qualifier. The index is lazily created when needed + + Returns: + OrderedDict: Index of cells """ if self._index_data is None: self._index_data = OrderedDict() @@ -81,6 +88,11 @@ def _from_pb(cls, row_pb: RowPB) -> Row: Row objects are not intended to be created by users. They are returned by the Bigtable backend. + + Args: + row_pb (RowPB): Protobuf representation of the row + Returns: + Row: Row object created from the protobuf representation """ row_key: bytes = row_pb.key cell_list: list[Cell] = [] @@ -112,6 +124,14 @@ def get_cells( Can also be accessed through indexing: cells = row["family", "qualifier"] cells = row["family"] + + Args: + family: family to filter cells by + qualifier: qualifier to filter cells by + Returns: + list[Cell]: List of cells in the row matching the filter + Raises: + ValueError: If family or qualifier is not found in the row """ if family is None: if qualifier is not None: @@ -137,6 +157,13 @@ def get_cells( def _get_all_from_family(self, family: str) -> Generator[Cell, None, None]: """ Returns all cells in the row for the family_id + + Args: + family: family to filter cells by + Yields: + Cell: cells in the row for the family_id + Raises: + ValueError: If family is not found in the row """ if family not in self._index: raise ValueError(f"Family '{family}' not found in row '{self.row_key!r}'") @@ -153,6 +180,9 @@ def __str__(self) -> str: (family='fam', qualifier=b'col'): [b'value', (+1 more),], (family='fam', qualifier=b'col2'): [b'other'], } + + Returns: + str: Human-readable string representation of the row """ output = ["{"] for family, qualifier in self._get_column_components(): @@ -201,6 +231,9 @@ def _to_dict(self) -> dict[str, Any]: def __iter__(self): """ Allow iterating over all cells in the row + + Returns: + Iterator: Iterator over the cells in the row """ return iter(self.cells) @@ -210,6 +243,11 @@ def __contains__(self, item): Works for both cells in the internal list, and `family` or `(family, qualifier)` pairs associated with the cells + + Args: + item: item to check for in the row + Returns: + bool: True if item is in the row, False otherwise """ if isinstance(item, _family_type): return item in self._index @@ -266,7 +304,10 @@ def __getitem__(self, index): def __len__(self): """ - Implements `len()` operator + Returns the number of cells in the row + + Returns: + int: Number of cells in the row """ return len(self.cells) @@ -275,12 +316,18 @@ def _get_column_components(self) -> list[tuple[str, bytes]]: Returns a list of (family, qualifier) pairs associated with the cells Pairs can be used for indexing + + Returns: + list[tuple[str, bytes]]: List of (family, qualifier) pairs """ return [(f, q) for f in self._index for q in self._index[f]] def __eq__(self, other): """ Implements `==` operator + + Returns: + bool: True if rows are equal, False otherwise """ # for performance reasons, check row metadata # before checking individual cells @@ -307,6 +354,9 @@ def __eq__(self, other): def __ne__(self, other) -> bool: """ Implements `!=` operator + + Returns: + bool: True if rows are not equal, False otherwise """ return not self == other @@ -319,6 +369,14 @@ class Cell: Does not represent all data contained in the cell, only data returned by a query. Expected to be read-only to users, and written by backend + + Args: + value: the byte string value of the cell + row_key: the row key of the cell + family: the family associated with the cell + qualifier: the column qualifier associated with the cell + timestamp_micros: the timestamp of the cell in microseconds + labels: the list of labels associated with the cell """ __slots__ = ( @@ -339,12 +397,8 @@ def __init__( timestamp_micros: int, labels: list[str] | None = None, ): - """ - Cell constructor - - Cell objects are not intended to be constructed by users. - They are returned by the Bigtable backend. - """ + # Cell objects are not intended to be constructed by users. + # They are returned by the Bigtable backend. self.value = value self.row_key = row_key self.family = family @@ -359,6 +413,9 @@ def __int__(self) -> int: Allows casting cell to int Interprets value as a 64-bit big-endian signed integer, as expected by ReadModifyWrite increment rule + + Returns: + int: Value of the cell as a 64-bit big-endian signed integer """ return int.from_bytes(self.value, byteorder="big", signed=True) @@ -368,6 +425,9 @@ def _to_dict(self) -> dict[str, Any]: proto format https://cloud.google.com/bigtable/docs/reference/data/rpc/google.bigtable.v2#cell + + Returns: + dict: Dictionary representation of the cell """ cell_dict: dict[str, Any] = { "value": self.value, @@ -381,12 +441,18 @@ def __str__(self) -> str: """ Allows casting cell to str Prints encoded byte string, same as printing value directly. + + Returns: + str: Encoded byte string of the value """ return str(self.value) def __repr__(self): """ Returns a string representation of the cell + + Returns: + str: String representation of the cell """ return f"Cell(value={self.value!r}, row_key={self.row_key!r}, family='{self.family}', qualifier={self.qualifier!r}, timestamp_micros={self.timestamp_micros}, labels={self.labels})" @@ -395,9 +461,16 @@ def __repr__(self): def __lt__(self, other) -> bool: """ Implements `<` operator + + Args: + other: Cell to compare with + Returns: + bool: True if this cell is less than the other cell, False otherwise + Raises: + NotImplementedError: If other is not a Cell """ if not isinstance(other, Cell): - return NotImplemented + raise NotImplementedError this_ordering = ( self.family, self.qualifier, @@ -417,9 +490,14 @@ def __lt__(self, other) -> bool: def __eq__(self, other) -> bool: """ Implements `==` operator + + Args: + other: Cell to compare with + Returns: + bool: True if cells are equal, False otherwise """ if not isinstance(other, Cell): - return NotImplemented + return False return ( self.row_key == other.row_key and self.family == other.family @@ -433,12 +511,20 @@ def __eq__(self, other) -> bool: def __ne__(self, other) -> bool: """ Implements `!=` operator + + Args: + other: Cell to compare with + Returns: + bool: True if cells are not equal, False otherwise """ return not self == other def __hash__(self): """ Implements `hash()` function to fingerprint cell + + Returns: + int: hash value of the cell """ return hash( ( diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index a0019947dc83..7593572d86c4 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -1498,7 +1498,6 @@ async def test_read_rows_timeout(self, operation_timeout): "per_request_t, operation_t, expected_num", [ (0.05, 0.08, 2), - (0.05, 0.54, 11), (0.05, 0.14, 3), (0.05, 0.24, 5), ], From 4c42301189f4b23dc3c7b586d6afe1b7874d82c2 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 29 May 2024 16:25:40 -0700 Subject: [PATCH 792/892] chore(docs): improve write_batch async sample (#966) --- .../data_client/data_client_snippets_async.py | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py index cb51bdc78743..742e7cb8e7e2 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py @@ -42,24 +42,34 @@ async def write_batch(table): from google.cloud.bigtable.data import BigtableDataClientAsync from google.cloud.bigtable.data.mutations import SetCell from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup async def write_batch(project_id, instance_id, table_id): async with BigtableDataClientAsync(project=project_id) as client: async with client.get_table(instance_id, table_id) as table: family_id = "stats_summary" - - async with table.mutations_batcher() as batcher: - mutation_list = [ - SetCell(family_id, "connected_cell", 1), - SetCell(family_id, "connected_wifi", 1), - SetCell(family_id, "os_build", "12155.0.0-rc1"), - ] - batcher.append( - RowMutationEntry("tablet#a0b81f74#20190501", mutation_list) - ) - batcher.append( - RowMutationEntry("tablet#a0b81f74#20190502", mutation_list) - ) + try: + async with table.mutations_batcher() as batcher: + mutation_list = [ + SetCell(family_id, "connected_cell", 1), + SetCell(family_id, "connected_wifi", 1), + SetCell(family_id, "os_build", "12155.0.0-rc1"), + ] + # awaiting the batcher.append method adds the RowMutationEntry + # to the batcher's queue to be written in the next flush. + await batcher.append( + RowMutationEntry("tablet#a0b81f74#20190501", mutation_list) + ) + await batcher.append( + RowMutationEntry("tablet#a0b81f74#20190502", mutation_list) + ) + except MutationsExceptionGroup as e: + # MutationsExceptionGroup contains a FailedMutationEntryError for + # each mutation that failed. + for sub_exception in e.exceptions: + failed_entry: RowMutationEntry = sub_exception.entry + cause: Exception = sub_exception.__cause__ + print(f"Failed mutation: {failed_entry.row_key} with error: {cause!r}") # [END bigtable_async_writes_batch] await write_batch(table.client.project, table.instance_id, table.table_id) From b565d5f07fb85e0ad6c45a0c0f57dab1e190e51b Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 29 May 2024 16:33:14 -0700 Subject: [PATCH 793/892] chore(docs): improve devsite structure (#962) --- .../async_data_client.rst | 2 +- .../async_data_exceptions.rst | 0 .../async_data_mutations.rst | 0 .../async_data_mutations_batcher.rst | 0 .../async_data_read_modify_write_rules.rst | 0 .../async_data_read_rows_query.rst | 0 .../async_data_row.rst | 0 .../async_data_row_filters.rst | 0 .../async_data_client/async_data_table.rst | 6 + .../async_data_usage.rst | 5 +- .../docs/{ => classic_client}/app-profile.rst | 0 .../docs/{ => classic_client}/backup.rst | 0 .../docs/{ => classic_client}/batcher.rst | 0 .../{ => classic_client}/client-intro.rst | 0 .../docs/{ => classic_client}/client.rst | 0 .../docs/{ => classic_client}/cluster.rst | 0 .../{ => classic_client}/column-family.rst | 0 .../docs/{ => classic_client}/data-api.rst | 0 .../{ => classic_client}/encryption-info.rst | 0 .../{ => classic_client}/instance-api.rst | 0 .../docs/{ => classic_client}/instance.rst | 0 .../docs/{ => classic_client}/row-data.rst | 0 .../docs/{ => classic_client}/row-filters.rst | 0 .../docs/{ => classic_client}/row-set.rst | 0 .../docs/{ => classic_client}/row.rst | 0 .../docs/{ => classic_client}/snippets.py | 0 .../{ => classic_client}/snippets_table.py | 0 .../docs/{ => classic_client}/table-api.rst | 0 .../docs/{ => classic_client}/table.rst | 0 .../docs/{ => classic_client}/usage.rst | 9 +- packages/google-cloud-bigtable/docs/index.rst | 18 +- .../docs/scripts/patch_devsite_toc.py | 201 ++++++++++++++++++ packages/google-cloud-bigtable/noxfile.py | 3 + packages/google-cloud-bigtable/owlbot.py | 16 ++ 34 files changed, 241 insertions(+), 19 deletions(-) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_client.rst (52%) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_exceptions.rst (100%) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_mutations.rst (100%) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_mutations_batcher.rst (100%) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_read_modify_write_rules.rst (100%) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_read_rows_query.rst (100%) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_row.rst (100%) rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_row_filters.rst (100%) create mode 100644 packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst rename packages/google-cloud-bigtable/docs/{ => async_data_client}/async_data_usage.rst (80%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/app-profile.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/backup.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/batcher.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/client-intro.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/client.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/cluster.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/column-family.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/data-api.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/encryption-info.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/instance-api.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/instance.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/row-data.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/row-filters.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/row-set.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/row.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/snippets.py (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/snippets_table.py (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/table-api.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/table.rst (100%) rename packages/google-cloud-bigtable/docs/{ => classic_client}/usage.rst (91%) create mode 100644 packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py diff --git a/packages/google-cloud-bigtable/docs/async_data_client.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst similarity index 52% rename from packages/google-cloud-bigtable/docs/async_data_client.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst index 7d2901de41db..c5cc7074098b 100644 --- a/packages/google-cloud-bigtable/docs/async_data_client.rst +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst @@ -1,6 +1,6 @@ Bigtable Data Client Async ~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. automodule:: google.cloud.bigtable.data._async.client +.. autoclass:: google.cloud.bigtable.data._async.client.BigtableDataClientAsync :members: :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_exceptions.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_exceptions.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_exceptions.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_exceptions.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_mutations.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_mutations.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_mutations_batcher.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations_batcher.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_mutations_batcher.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations_batcher.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_read_modify_write_rules.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_read_modify_write_rules.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_read_modify_write_rules.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_read_modify_write_rules.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_read_rows_query.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_read_rows_query.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_read_rows_query.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_read_rows_query.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_row.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_row.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_row.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_row.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_row_filters.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_row_filters.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_row_filters.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_row_filters.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst new file mode 100644 index 000000000000..a977beb6a4e3 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst @@ -0,0 +1,6 @@ +Table Async +~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data._async.client.TableAsync + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_usage.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst similarity index 80% rename from packages/google-cloud-bigtable/docs/async_data_usage.rst rename to packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst index c436c5988f47..8843b506bc82 100644 --- a/packages/google-cloud-bigtable/docs/async_data_usage.rst +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst @@ -1,10 +1,11 @@ -Using the Async Data Client -=========================== +Async Data Client +================= .. toctree:: :maxdepth: 2 async_data_client + async_data_table async_data_mutations_batcher async_data_read_rows_query async_data_row diff --git a/packages/google-cloud-bigtable/docs/app-profile.rst b/packages/google-cloud-bigtable/docs/classic_client/app-profile.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/app-profile.rst rename to packages/google-cloud-bigtable/docs/classic_client/app-profile.rst diff --git a/packages/google-cloud-bigtable/docs/backup.rst b/packages/google-cloud-bigtable/docs/classic_client/backup.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/backup.rst rename to packages/google-cloud-bigtable/docs/classic_client/backup.rst diff --git a/packages/google-cloud-bigtable/docs/batcher.rst b/packages/google-cloud-bigtable/docs/classic_client/batcher.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/batcher.rst rename to packages/google-cloud-bigtable/docs/classic_client/batcher.rst diff --git a/packages/google-cloud-bigtable/docs/client-intro.rst b/packages/google-cloud-bigtable/docs/classic_client/client-intro.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/client-intro.rst rename to packages/google-cloud-bigtable/docs/classic_client/client-intro.rst diff --git a/packages/google-cloud-bigtable/docs/client.rst b/packages/google-cloud-bigtable/docs/classic_client/client.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/client.rst rename to packages/google-cloud-bigtable/docs/classic_client/client.rst diff --git a/packages/google-cloud-bigtable/docs/cluster.rst b/packages/google-cloud-bigtable/docs/classic_client/cluster.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/cluster.rst rename to packages/google-cloud-bigtable/docs/classic_client/cluster.rst diff --git a/packages/google-cloud-bigtable/docs/column-family.rst b/packages/google-cloud-bigtable/docs/classic_client/column-family.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/column-family.rst rename to packages/google-cloud-bigtable/docs/classic_client/column-family.rst diff --git a/packages/google-cloud-bigtable/docs/data-api.rst b/packages/google-cloud-bigtable/docs/classic_client/data-api.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/data-api.rst rename to packages/google-cloud-bigtable/docs/classic_client/data-api.rst diff --git a/packages/google-cloud-bigtable/docs/encryption-info.rst b/packages/google-cloud-bigtable/docs/classic_client/encryption-info.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/encryption-info.rst rename to packages/google-cloud-bigtable/docs/classic_client/encryption-info.rst diff --git a/packages/google-cloud-bigtable/docs/instance-api.rst b/packages/google-cloud-bigtable/docs/classic_client/instance-api.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/instance-api.rst rename to packages/google-cloud-bigtable/docs/classic_client/instance-api.rst diff --git a/packages/google-cloud-bigtable/docs/instance.rst b/packages/google-cloud-bigtable/docs/classic_client/instance.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/instance.rst rename to packages/google-cloud-bigtable/docs/classic_client/instance.rst diff --git a/packages/google-cloud-bigtable/docs/row-data.rst b/packages/google-cloud-bigtable/docs/classic_client/row-data.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/row-data.rst rename to packages/google-cloud-bigtable/docs/classic_client/row-data.rst diff --git a/packages/google-cloud-bigtable/docs/row-filters.rst b/packages/google-cloud-bigtable/docs/classic_client/row-filters.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/row-filters.rst rename to packages/google-cloud-bigtable/docs/classic_client/row-filters.rst diff --git a/packages/google-cloud-bigtable/docs/row-set.rst b/packages/google-cloud-bigtable/docs/classic_client/row-set.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/row-set.rst rename to packages/google-cloud-bigtable/docs/classic_client/row-set.rst diff --git a/packages/google-cloud-bigtable/docs/row.rst b/packages/google-cloud-bigtable/docs/classic_client/row.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/row.rst rename to packages/google-cloud-bigtable/docs/classic_client/row.rst diff --git a/packages/google-cloud-bigtable/docs/snippets.py b/packages/google-cloud-bigtable/docs/classic_client/snippets.py similarity index 100% rename from packages/google-cloud-bigtable/docs/snippets.py rename to packages/google-cloud-bigtable/docs/classic_client/snippets.py diff --git a/packages/google-cloud-bigtable/docs/snippets_table.py b/packages/google-cloud-bigtable/docs/classic_client/snippets_table.py similarity index 100% rename from packages/google-cloud-bigtable/docs/snippets_table.py rename to packages/google-cloud-bigtable/docs/classic_client/snippets_table.py diff --git a/packages/google-cloud-bigtable/docs/table-api.rst b/packages/google-cloud-bigtable/docs/classic_client/table-api.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/table-api.rst rename to packages/google-cloud-bigtable/docs/classic_client/table-api.rst diff --git a/packages/google-cloud-bigtable/docs/table.rst b/packages/google-cloud-bigtable/docs/classic_client/table.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/table.rst rename to packages/google-cloud-bigtable/docs/classic_client/table.rst diff --git a/packages/google-cloud-bigtable/docs/usage.rst b/packages/google-cloud-bigtable/docs/classic_client/usage.rst similarity index 91% rename from packages/google-cloud-bigtable/docs/usage.rst rename to packages/google-cloud-bigtable/docs/classic_client/usage.rst index de0abac9c3c8..7a47f4d4a418 100644 --- a/packages/google-cloud-bigtable/docs/usage.rst +++ b/packages/google-cloud-bigtable/docs/classic_client/usage.rst @@ -1,10 +1,15 @@ -Using the Sync Client -===================== +Classic Client +============== .. toctree:: :maxdepth: 2 client-intro + + instance-api + table-api + data-api + client cluster instance diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 0f04542cc57c..4204e981d1f7 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -2,29 +2,19 @@ .. include:: multiprocessing.rst -Using the API +Client Types ------------- .. toctree:: :maxdepth: 2 - usage - async_data_usage - - -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - instance-api - table-api - data-api + classic_client/usage + async_data_client/async_data_usage Changelog --------- -For a list of all ``google-cloud-datastore`` releases: +For a list of all ``google-cloud-bigtable`` releases: .. toctree:: :maxdepth: 2 diff --git a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py new file mode 100644 index 000000000000..6338128ddb8e --- /dev/null +++ b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py @@ -0,0 +1,201 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script will run after ``nox -s docfx`` is run. docfx is the api doc format used by +google cloud. It is described here: https://github.com/googleapis/docuploader?tab=readme-ov-file#requirements-for-docfx-yaml-tarballs. + +One of the file used by docfx is toc.yml which is used to generate the table of contents sidebar. +This script will patch file to create subfolders for each of the clients +""" + + +import yaml +import os +import shutil + +# set working directory to /docs +os.chdir(f"{os.path.dirname(os.path.abspath(__file__))}/{os.pardir}") + + +def add_sections(toc_file_path, section_list, output_file_path=None): + """ + Add new sections to the autogenerated docfx table of contents file + + Takes in a list of TocSection objects, which should point to a directory of rst files + within the main /docs directory, which represents a self-contained section of content + + :param toc_file_path: path to the autogenerated toc file + :param section_list: list of TocSection objects to add + :param output_file_path: path to save the updated toc file. If None, save to the input file + """ + # remove any sections that are already in the toc + remove_sections(toc_file_path, [section.title for section in section_list]) + # add new sections + current_toc = yaml.safe_load(open(toc_file_path, "r")) + for section in section_list: + print(f"Adding section {section.title}...") + current_toc[0]["items"].insert(-1, section.to_dict()) + section.copy_markdown() + # save file + if output_file_path is None: + output_file_path = toc_file_path + with open(output_file_path, "w") as f: + yaml.dump(current_toc, f) + + +def remove_sections(toc_file_path, section_list, output_file_path=None): + """ + Remove sections from the autogenerated docfx table of contents file + + Takes in a list of string section names to remove from the toc file + + :param toc_file_path: path to the autogenerated toc file + :param section_list: list of section names to remove + :param output_file_path: path to save the updated toc file. If None, save to the input file + """ + current_toc = yaml.safe_load(open(toc_file_path, "r")) + print(f"Removing sections {section_list}...") + new_items = [d for d in current_toc[0]["items"] if d["name"] not in section_list] + current_toc[0]["items"] = new_items + # save file + if output_file_path is None: + output_file_path = toc_file_path + with open(output_file_path, "w") as f: + yaml.dump(current_toc, f) + + +class TocSection: + def __init__(self, dir_name, index_file_name): + """ + :param dir_name: name of the directory containing the rst files + :param index_file_name: name of an index file within dir_name. This file + will not be included in the table of contents, but provides an ordered + list of the other files which should be included + """ + self.dir_name = dir_name + self.index_file_name = index_file_name + index_file_path = os.path.join(dir_name, index_file_name) + # find set of files referenced by the index file + with open(index_file_path, "r") as f: + self.title = f.readline().strip() + in_toc = False + self.items = [] + for line in f: + # ignore empty lines + if not line.strip(): + continue + if line.startswith(".. toctree::"): + in_toc = True + continue + # ignore directives + if ":" in line: + continue + if not in_toc: + continue + # bail when toc indented block is done + if not line.startswith(" ") and not line.startswith("\t"): + break + # extract entries + self.items.append(self.extract_toc_entry(line.strip())) + + def extract_toc_entry(self, file_name): + """ + Given the name of a file, extract the title and href for the toc entry, + and return as a dictionary + """ + # load the file to get the title + with open(f"{self.dir_name}/{file_name}.rst", "r") as f2: + file_title = f2.readline().strip() + return {"name": file_title, "href": f"{file_name}.md"} + + def to_dict(self): + """ + Convert the TocSection object to a dictionary that can be written to a yaml file + """ + return {"name": self.title, "items": self.items} + + def copy_markdown(self): + """ + Copy markdown files from _build/markdown/dir_name to _build/html/docfx_yaml + + This is necessary because the markdown files in sub-directories + are not copied over by the docfx build by default + """ + for file in os.listdir("_build/markdown/" + self.dir_name): + shutil.copy( + f"_build/markdown/{self.dir_name}/{file}", + f"_build/html/docfx_yaml", + ) + + +def validate_toc(toc_file_path, expected_section_list, added_sections): + current_toc = yaml.safe_load(open(toc_file_path, "r")) + # make sure the set of sections matches what we expect + found_sections = [d["name"] for d in current_toc[0]["items"]] + assert found_sections == expected_section_list + # make sure each customs ection is in the toc + for section in added_sections: + assert section.title in found_sections + # make sure each rst file in each custom section dir is listed in the toc + for section in added_sections: + items_in_toc = [ + d["items"] + for d in current_toc[0]["items"] + if d["name"] == section.title and ".rst" + ][0] + items_in_dir = [f for f in os.listdir(section.dir_name) if f.endswith(".rst")] + # subtract 1 for index + assert len(items_in_toc) == len(items_in_dir) - 1 + for file in items_in_dir: + if file != section.index_file_name: + base_name, _ = os.path.splitext(file) + assert any(d["href"] == f"{base_name}.md" for d in items_in_toc) + # make sure the markdown files are present in the docfx_yaml directory + for section in added_sections: + items_in_toc = [ + d["items"] + for d in current_toc[0]["items"] + if d["name"] == section.title and ".rst" + ][0] + md_files = [d["href"] for d in items_in_toc] + for file in md_files: + assert os.path.exists(f"_build/html/docfx_yaml/{file}") + print("Toc validation passed") + + +if __name__ == "__main__": + # Add secrtions for the async_data_client and classic_client directories + toc_path = "_build/html/docfx_yaml/toc.yml" + custom_sections = [ + TocSection( + dir_name="async_data_client", index_file_name="async_data_usage.rst" + ), + TocSection(dir_name="classic_client", index_file_name="usage.rst"), + ] + add_sections(toc_path, custom_sections) + # Remove the Bigtable section, since it has duplicated data + remove_sections(toc_path, ["Bigtable"]) + # run validation to make sure yaml is structured as we expect + validate_toc( + toc_file_path=toc_path, + expected_section_list=[ + "Overview", + "bigtable APIs", + "Changelog", + "Multiprocessing", + "Async Data Client", + "Classic Client", + ], + added_sections=custom_sections, + ) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index f175c66da084..3ea12c187a77 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -425,6 +425,9 @@ def docfx(session): os.path.join("docs", ""), os.path.join("docs", "_build", "html", ""), ) + # Customization: Add extra sections to the table of contents for the Classic vs Async clients + session.install("pyyaml") + session.run("python", "docs/scripts/patch_devsite_toc.py") @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index cde9fce64947..170bc08d4dca 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -213,6 +213,22 @@ def mypy(session): ) +# add customization to docfx +docfx_postprocess = """ + # Customization: Add extra sections to the table of contents for the Classic vs Async clients + session.install("pyyaml") + session.run("python", "docs/scripts/patch_devsite_toc.py") +""" + +place_before( + "noxfile.py", + "@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n" + "def prerelease_deps(session):", + docfx_postprocess, + escape="()" +) + + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): ''', From 2e1f7ffe3491b8c31d54e398a5c0519778444de6 Mon Sep 17 00:00:00 2001 From: Amit Matsil Date: Thu, 30 May 2024 02:55:18 +0300 Subject: [PATCH 794/892] fix(backup): backup name regex (#970) Change the regular expression to match the format specified in the Bigtable Admin API documentation: https://cloud.google.com/bigtable/docs/reference/admin/rest/v2/projects.instances.clusters.backups/create --- packages/google-cloud-bigtable/google/cloud/bigtable/backup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 6986d730a791..5b2cafc543e9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -28,7 +28,7 @@ r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)/" r"clusters/(?P[a-z][-a-z0-9]*)/" - r"backups/(?P[a-z][a-z0-9_\-]*[a-z0-9])$" + r"backups/(?P[_a-zA-Z0-9][-_.a-zA-Z0-9]*)$" ) _TABLE_NAME_RE = re.compile( From e6f7f5db21931b5fadd97feb6f3a06e37b0d5f2e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 29 May 2024 17:06:05 -0700 Subject: [PATCH 795/892] feat: Add String type with Utf8Raw encoding to Bigtable API (#968) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add String type with Utf8Raw encoding to Bigtable API Bigtable will allow users to configure the type of a column family with string type PiperOrigin-RevId: 636631633 Source-Link: https://github.com/googleapis/googleapis/commit/89a836483eaf7e3f8f41bde6c56831bca4b46e26 Source-Link: https://github.com/googleapis/googleapis-gen/commit/d7767007eae0fe87755b21cfe569b8779f02151c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZDc3NjcwMDdlYWUwZmU4Nzc1NWIyMWNmZTU2OWI4Nzc5ZjAyMTUxYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../transports/rest.py | 3 +- .../cloud/bigtable_admin_v2/types/types.py | 66 ++++++++- .../test_bigtable_instance_admin.py | 100 +++----------- .../test_bigtable_table_admin.py | 125 ++++-------------- .../unit/gapic/bigtable_v2/test_bigtable.py | 35 +---- 5 files changed, 115 insertions(+), 214 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 879702e864fb..e1737add138f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -904,8 +904,7 @@ def __call__( # Jsonify the request body body = json_format.MessageToJson( - transcoded_request["body"], - use_integers_for_enums=True, + transcoded_request["body"], use_integers_for_enums=True ) uri = transcoded_request["uri"] method = transcoded_request["method"] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py index d57d1cdf3e79..362effbabb87 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py @@ -49,12 +49,12 @@ class Type(proto.Message): original typed value? Note that Bigtable will always sort data based on the raw encoded value, *not* the decoded type. - - Example: STRING values sort in the same order as their UTF-8 + - Example: BYTES values sort in the same order as their raw encodings. - Counterexample: Encoding INT64 to a fixed-width STRING does *not* preserve sort order when dealing with negative numbers. INT64(1) > INT64(-1), but STRING("-00001") > STRING("00001). - - The overall encoding chain sorts naturally if *every* link + - The overall encoding chain has this property if *every* link does. - Self-delimiting: If we concatenate two encoded values, can we @@ -65,8 +65,8 @@ class Type(proto.Message): by a sign. - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have no way to tell where the first one ends. - - The overall encoding chain is self-delimiting if *any* link - is. + - The overall encoding chain has this property if *any* link + does. - Compatibility: Which other systems have matching encoding schemes? For example, does this encoding have a GoogleSQL @@ -83,6 +83,10 @@ class Type(proto.Message): bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): Bytes + This field is a member of `oneof`_ ``kind``. + string_type (google.cloud.bigtable_admin_v2.types.Type.String): + String + This field is a member of `oneof`_ ``kind``. int64_type (google.cloud.bigtable_admin_v2.types.Type.Int64): Int64 @@ -137,6 +141,54 @@ class Raw(proto.Message): message="Type.Bytes.Encoding", ) + class String(proto.Message): + r"""String Values of type ``String`` are stored in + ``Value.string_value``. + + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.String.Encoding): + The encoding to use when converting to/from + lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to/from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + utf8_raw (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Raw): + Use ``Utf8Raw`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Utf8Raw(proto.Message): + r"""UTF-8 encoding + + - Natural sort? No (ASCII characters only) + - Self-delimiting? No + - Compatibility? + + - BigQuery Federation ``TEXT`` encoding + - HBase ``Bytes.toBytes`` + - Java ``String#getBytes(StandardCharsets.UTF_8)`` + + """ + + utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.String.Encoding.Utf8Raw", + ) + + encoding: "Type.String.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.String.Encoding", + ) + class Int64(proto.Message): r"""Int64 Values of type ``Int64`` are stored in ``Value.int_value``. @@ -250,6 +302,12 @@ class Sum(proto.Message): oneof="kind", message=Bytes, ) + string_type: String = proto.Field( + proto.MESSAGE, + number=2, + oneof="kind", + message=String, + ) int64_type: Int64 = proto.Field( proto.MESSAGE, number=5, diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 9a418047fa9e..e0de275cc153 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -9652,10 +9652,7 @@ def test_create_instance_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -9981,10 +9978,7 @@ def test_get_instance_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -10287,10 +10281,7 @@ def test_list_instances_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -10603,10 +10594,7 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -10930,10 +10918,7 @@ def test_partial_update_instance_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -11237,10 +11222,7 @@ def test_delete_instance_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -11615,10 +11597,7 @@ def test_create_cluster_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -11953,10 +11932,7 @@ def test_get_cluster_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -12260,10 +12236,7 @@ def test_list_clusters_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -12825,10 +12798,7 @@ def test_partial_update_cluster_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -13137,10 +13107,7 @@ def test_delete_cluster_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -13523,10 +13490,7 @@ def test_create_app_profile_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -13871,10 +13835,7 @@ def test_get_app_profile_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -14181,10 +14142,7 @@ def test_list_app_profiles_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -14653,10 +14611,7 @@ def test_update_app_profile_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -14984,10 +14939,7 @@ def test_delete_app_profile_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -15302,10 +15254,7 @@ def test_get_iam_policy_rest_required_fields( request = request_type(**request_init) pb_request = request jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -15601,10 +15550,7 @@ def test_set_iam_policy_rest_required_fields( request = request_type(**request_init) pb_request = request jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -15911,10 +15857,7 @@ def test_test_iam_permissions_rest_required_fields( request = request_type(**request_init) pb_request = request jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -16227,10 +16170,7 @@ def test_list_hot_tablets_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 455ec88d8fbd..9676ce4fa6a8 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -13496,10 +13496,7 @@ def test_create_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -13822,10 +13819,7 @@ def test_create_table_from_snapshot_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -14146,10 +14140,7 @@ def test_list_tables_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -14529,10 +14520,7 @@ def test_get_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -14920,10 +14908,7 @@ def test_update_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -15231,10 +15216,7 @@ def test_delete_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -15522,10 +15504,7 @@ def test_undelete_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -17659,10 +17638,7 @@ def test_modify_column_families_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -17978,10 +17954,7 @@ def test_drop_row_range_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -18224,10 +18197,7 @@ def test_generate_consistency_token_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -18541,10 +18511,7 @@ def test_check_consistency_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -18866,10 +18833,7 @@ def test_snapshot_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -19199,10 +19163,7 @@ def test_get_snapshot_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -19507,10 +19468,7 @@ def test_list_snapshots_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -19886,10 +19844,7 @@ def test_delete_snapshot_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -20274,10 +20229,7 @@ def test_create_backup_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -20617,10 +20569,7 @@ def test_get_backup_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -21027,10 +20976,7 @@ def test_update_backup_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -21345,10 +21291,7 @@ def test_delete_backup_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -21642,10 +21585,7 @@ def test_list_backups_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -22028,10 +21968,7 @@ def test_restore_table_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -22289,10 +22226,7 @@ def test_copy_backup_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -22617,10 +22551,7 @@ def test_get_iam_policy_rest_required_fields( request = request_type(**request_init) pb_request = request jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -22918,10 +22849,7 @@ def test_set_iam_policy_rest_required_fields( request = request_type(**request_init) pb_request = request jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -23230,10 +23158,7 @@ def test_test_iam_permissions_rest_required_fields( request = request_type(**request_init) pb_request = request jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 5a62b3dfaa80..4d8a6ec6b8a8 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -5328,10 +5328,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -5656,10 +5653,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -5972,10 +5966,7 @@ def test_check_and_mutate_row_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -6320,10 +6311,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -6625,10 +6613,7 @@ def test_read_modify_write_row_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -6957,10 +6942,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped @@ -7300,10 +7282,7 @@ def test_read_change_stream_rest_required_fields( request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( - json_format.MessageToJson( - pb_request, - use_integers_for_enums=False, - ) + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) # verify fields with default values are dropped From dab1ae61efb6c8d223a956aeee8bf030e7a0194f Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 4 Jun 2024 10:05:47 -0400 Subject: [PATCH 796/892] chore(docs): add missing quickstart samples for asyncio (#974) --- .../samples/quickstart/main_async.py | 61 +++++++++++++++ .../samples/quickstart/main_async_test.py | 78 +++++++++++++++++++ 2 files changed, 139 insertions(+) create mode 100644 packages/google-cloud-bigtable/samples/quickstart/main_async.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart/main_async_test.py diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_async.py b/packages/google-cloud-bigtable/samples/quickstart/main_async.py new file mode 100644 index 000000000000..c38985592e42 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/main_async.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python + +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START bigtable_quickstart_asyncio] +import argparse +import asyncio + +from google.cloud.bigtable.data import BigtableDataClientAsync + + +async def main(project_id="project-id", instance_id="instance-id", table_id="my-table"): + # Create a Cloud Bigtable client. + client = BigtableDataClientAsync(project=project_id) + + # Open an existing table. + table = client.get_table(instance_id, table_id) + + row_key = "r1" + row = await table.read_row(row_key) + + column_family_id = "cf1" + column_id = b"c1" + value = row.get_cells(column_family_id, column_id)[0].value.decode("utf-8") + + await table.close() + await client.close() + + print("Row key: {}\nData: {}".format(row_key, value)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument("project_id", help="Your Cloud Platform project ID.") + parser.add_argument( + "instance_id", help="ID of the Cloud Bigtable instance to connect to." + ) + parser.add_argument( + "--table", help="Existing table used in the quickstart.", default="my-table" + ) + + args = parser.parse_args() + asyncio.get_event_loop().run_until_complete( + main(args.project_id, args.instance_id, args.table) + ) + +# [END bigtable_quickstart_asyncio] diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py new file mode 100644 index 000000000000..26a09b1f1fd2 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py @@ -0,0 +1,78 @@ +# Copyright 2024 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from typing import AsyncGenerator + +from google.cloud.bigtable.data import BigtableDataClientAsync, SetCell +import pytest, pytest_asyncio + +from main_async import main + + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_FORMAT = "quickstart-test-{}" + + +@pytest_asyncio.fixture +async def table_id() -> AsyncGenerator[str, None]: + table_id = _create_table() + await _populate_table(table_id) + + yield table_id + + _delete_table(table_id) + + +def _create_table(): + from google.cloud import bigtable + import uuid + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={"cf1": None}) + + client.close() + return table_id + + +async def _populate_table(table_id: str): + async with BigtableDataClientAsync(project=PROJECT) as client: + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + await table.mutate_row("r1", SetCell("cf1", "c1", "test-value")) + + +def _delete_table(table_id: str): + from google.cloud import bigtable + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + table.delete() + client.close() + + +@pytest.mark.asyncio +async def test_main(capsys, table_id): + await main(PROJECT, BIGTABLE_INSTANCE, table_id) + + out, _ = capsys.readouterr() + assert "Row key: r1\nData: test-value\n" in out From e5d779492be614f935cf07040f4dbb82a1b05703 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 4 Jun 2024 07:06:43 -0700 Subject: [PATCH 797/892] chore(tests): fix async filter sample tests (#976) --- .../snippets/filters/filter_snippets_async.py | 130 +++++++++++++----- .../filters/filter_snippets_async_test.py | 6 +- .../samples/snippets/filters/filters_test.py | 5 +- .../samples/snippets/filters/noxfile.py | 16 +-- .../snippets/filters/requirements-test.txt | 1 + .../samples/snippets/filters/requirements.txt | 2 +- 6 files changed, 107 insertions(+), 53 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py index 72dac824dc2e..e47bbb3fb2fd 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py @@ -11,15 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime -from google.cloud.bigtable.data import Row from google.cloud._helpers import _datetime_from_microseconds +from google.cloud.bigtable.data import Row # [START bigtable_filters_limit_row_sample_asyncio] async def filter_limit_row_sample(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.RowSampleFilter(0.75)) @@ -32,8 +34,11 @@ async def filter_limit_row_sample(project_id, instance_id, table_id): # [END bigtable_filters_limit_row_sample_asyncio] # [START bigtable_filters_limit_row_regex_asyncio] async def filter_limit_row_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8")) @@ -48,8 +53,11 @@ async def filter_limit_row_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_row_regex_asyncio] # [START bigtable_filters_limit_cells_per_col_asyncio] async def filter_limit_cells_per_col(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.CellsColumnLimitFilter(2)) @@ -62,8 +70,11 @@ async def filter_limit_cells_per_col(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_col_asyncio] # [START bigtable_filters_limit_cells_per_row_asyncio] async def filter_limit_cells_per_row(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.CellsRowLimitFilter(2)) @@ -76,8 +87,11 @@ async def filter_limit_cells_per_row(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_row_asyncio] # [START bigtable_filters_limit_cells_per_row_offset_asyncio] async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.CellsRowOffsetFilter(2)) @@ -90,8 +104,11 @@ async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_row_offset_asyncio] # [START bigtable_filters_limit_col_family_regex_asyncio] async def filter_limit_col_family_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")) @@ -106,8 +123,11 @@ async def filter_limit_col_family_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_family_regex_asyncio] # [START bigtable_filters_limit_col_qualifier_regex_asyncio] async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.ColumnQualifierRegexFilter( @@ -124,8 +144,11 @@ async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_qualifier_regex_asyncio] # [START bigtable_filters_limit_col_range_asyncio] async def filter_limit_col_range(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.ColumnRangeFilter( @@ -142,8 +165,11 @@ async def filter_limit_col_range(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_range_asyncio] # [START bigtable_filters_limit_value_range_asyncio] async def filter_limit_value_range(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406") @@ -160,8 +186,11 @@ async def filter_limit_value_range(project_id, instance_id, table_id): async def filter_limit_value_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")) @@ -177,8 +206,12 @@ async def filter_limit_value_regex(project_id, instance_id, table_id): # [START bigtable_filters_limit_timestamp_range_asyncio] async def filter_limit_timestamp_range(project_id, instance_id, table_id): import datetime - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) end = datetime.datetime(2019, 5, 1) @@ -193,8 +226,11 @@ async def filter_limit_timestamp_range(project_id, instance_id, table_id): # [END bigtable_filters_limit_timestamp_range_asyncio] # [START bigtable_filters_limit_block_all_asyncio] async def filter_limit_block_all(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.BlockAllFilter(True)) @@ -207,8 +243,11 @@ async def filter_limit_block_all(project_id, instance_id, table_id): # [END bigtable_filters_limit_block_all_asyncio] # [START bigtable_filters_limit_pass_all_asyncio] async def filter_limit_pass_all(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.PassAllFilter(True)) @@ -221,8 +260,11 @@ async def filter_limit_pass_all(project_id, instance_id, table_id): # [END bigtable_filters_limit_pass_all_asyncio] # [START bigtable_filters_modify_strip_value_asyncio] async def filter_modify_strip_value(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.StripValueTransformerFilter(True)) @@ -235,8 +277,11 @@ async def filter_modify_strip_value(project_id, instance_id, table_id): # [END bigtable_filters_modify_strip_value_asyncio] # [START bigtable_filters_modify_apply_label_asyncio] async def filter_modify_apply_label(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery(row_filter=row_filters.ApplyLabelFilter(label="labelled")) @@ -249,8 +294,11 @@ async def filter_modify_apply_label(project_id, instance_id, table_id): # [END bigtable_filters_modify_apply_label_asyncio] # [START bigtable_filters_composing_chain_asyncio] async def filter_composing_chain(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.RowFilterChain( @@ -270,8 +318,11 @@ async def filter_composing_chain(project_id, instance_id, table_id): # [END bigtable_filters_composing_chain_asyncio] # [START bigtable_filters_composing_interleave_asyncio] async def filter_composing_interleave(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.RowFilterUnion( @@ -291,8 +342,11 @@ async def filter_composing_interleave(project_id, instance_id, table_id): # [END bigtable_filters_composing_interleave_asyncio] # [START bigtable_filters_composing_condition_asyncio] async def filter_composing_condition(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + ReadRowsQuery, + row_filters, + ) query = ReadRowsQuery( row_filter=row_filters.ConditionalRowFilter( diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py index 18c93102d17e..76751feafc61 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py @@ -14,7 +14,6 @@ import datetime import os -import time import inspect from typing import AsyncGenerator @@ -26,7 +25,6 @@ from . import filter_snippets_async from google.cloud._helpers import ( _microseconds_from_datetime, - _datetime_from_microseconds, ) PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] @@ -77,7 +75,7 @@ async def _populate_table(table_id): timestamp = datetime.datetime(2019, 5, 1) timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) - async with (BigtableDataClientAsync(project=PROJECT) as client): + async with BigtableDataClientAsync(project=PROJECT) as client: async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: async with table.mutations_batcher() as batcher: await batcher.append( @@ -257,6 +255,8 @@ async def _populate_table(table_id): def _datetime_to_micros(value: datetime.datetime) -> int: """Uses the same conversion rules as the old client in""" + import calendar + import datetime as dt if not value.tzinfo: value = value.replace(tzinfo=datetime.timezone.utc) # Regardless of what timezone is on the value, convert it to UTC. diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py index aedd8f08d9ba..a849320395aa 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py @@ -13,17 +13,16 @@ import datetime +import inspect import os import time import uuid -import inspect from google.cloud import bigtable import pytest -from .snapshots.snap_filters_test import snapshots from . import filter_snippets - +from .snapshots.snap_filters_test import snapshots PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 483b55901791..c36d5f2d81f3 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -22,7 +22,6 @@ import nox - # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING # DO NOT EDIT THIS FILE EVER! @@ -160,6 +159,7 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # + @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -187,7 +187,9 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( + "**/test_*.py", recursive=True + ) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -209,9 +211,7 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install( - "-r", "requirements-test.txt", "-c", "constraints-test.txt" - ) + session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -224,9 +224,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) + concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) elif "pytest-xdist" in packages: - concurrent_args.extend(['-n', 'auto']) + concurrent_args.extend(["-n", "auto"]) session.run( "pytest", @@ -256,7 +256,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """ Returns the root folder of the project. """ + """Returns the root folder of the project.""" # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index cb87efc0ff71..5cb431d92b98 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1 +1,2 @@ pytest==7.4.4 +pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 6dc98589311e..835e1bc780c5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.22.0 +google-cloud-bigtable==2.23.0 From 31d96c9d85a4ba518fffa871dfc0a03d612d1393 Mon Sep 17 00:00:00 2001 From: Igor Bernstein Date: Tue, 4 Jun 2024 10:07:35 -0400 Subject: [PATCH 798/892] chore(docs): add missing samples for async deletes (#973) --- .../snippets/deletes/deletes_async_test.py | 291 ++++++++++++++++++ .../deletes/deletes_snippets_async.py | 113 +++++++ 2 files changed, 404 insertions(+) create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py new file mode 100644 index 000000000000..8770733b4b74 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py @@ -0,0 +1,291 @@ +# Copyright 2024, Google LLC + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import os +from typing import AsyncGenerator + +from google.cloud._helpers import _microseconds_from_datetime +import pytest, pytest_asyncio + +import deletes_snippets_async + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" + + +@pytest_asyncio.fixture +async def table_id() -> AsyncGenerator[str, None]: + table_id = _create_table() + await _populate_table(table_id) + yield table_id + _delete_table(table_id) + + +def _create_table(): + from google.cloud import bigtable + import uuid + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={"stats_summary": None, "cell_plan": None}) + client.close() + return table_id + + +def _delete_table(table_id: str): + from google.cloud import bigtable + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + table.delete() + client.close() + + +async def _populate_table(table_id): + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + RowMutationEntry, + SetCell, + ) + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) + + async with (BigtableDataClientAsync(project=PROJECT) as client): + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + async with table.mutations_batcher() as batcher: + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.003", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "true", + _microseconds_from_datetime(timestamp_minus_hr), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "false", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.004", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190505", + [ + SetCell( + "stats_summary", + "connected_cell", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190401.002", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + + +def assert_output_match(capsys, expected): + out, _ = capsys.readouterr() + assert out == expected + + +@pytest.mark.asyncio +async def test_delete_from_column(capsys, table_id): + await deletes_snippets_async.delete_from_column( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_delete_from_column_family(capsys, table_id): + await deletes_snippets_async.delete_from_column_family( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_delete_from_row(capsys, table_id): + await deletes_snippets_async.delete_from_row(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_streaming_and_batching(capsys, table_id): + await deletes_snippets_async.streaming_and_batching( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + assert_output_match(capsys, "") + + +@pytest.mark.asyncio +async def test_check_and_mutate(capsys, table_id): + await deletes_snippets_async.check_and_mutate(PROJECT, BIGTABLE_INSTANCE, table_id) + assert_output_match(capsys, "") diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py new file mode 100644 index 000000000000..8f3711e0649c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python + +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + DeleteRangeFromColumn, + DeleteAllFromFamily, + DeleteAllFromRow, + RowMutationEntry, + row_filters, + ReadRowsQuery, +) + + +# Write your code here. + + +# [START bigtable_delete_from_column_asyncio] +async def delete_from_column(project_id, instance_id, table_id): + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.mutate_row( + "phone#4c410523#20190501", + DeleteRangeFromColumn(family="cell_plan", qualifier=b"data_plan_01gb"), + ) + + await table.close() + await client.close() + + +# [END bigtable_delete_from_column_asyncio] + +# [START bigtable_delete_from_column_family_asyncio] +async def delete_from_column_family(project_id, instance_id, table_id): + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.mutate_row("phone#4c410523#20190501", DeleteAllFromFamily("cell_plan")) + + await table.close() + await client.close() + + +# [END bigtable_delete_from_column_family_asyncio] + + +# [START bigtable_delete_from_row_asyncio] +async def delete_from_row(project_id, instance_id, table_id): + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.mutate_row("phone#4c410523#20190501", DeleteAllFromRow()) + + await table.close() + await client.close() + + +# [END bigtable_delete_from_row_asyncio] + +# [START bigtable_streaming_and_batching_asyncio] +async def streaming_and_batching(project_id, instance_id, table_id): + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + async with table.mutations_batcher() as batcher: + async for row in await table.read_rows_stream(ReadRowsQuery(limit=10)): + await batcher.append( + RowMutationEntry( + row.row_key, + DeleteRangeFromColumn( + family="cell_plan", qualifier=b"data_plan_01gb" + ), + ) + ) + + await table.close() + await client.close() + + +# [END bigtable_streaming_and_batching_asyncio] + +# [START bigtable_check_and_mutate_asyncio] +async def check_and_mutate(project_id, instance_id, table_id): + client = BigtableDataClientAsync(project=project_id) + table = client.get_table(instance_id, table_id) + + await table.check_and_mutate_row( + "phone#4c410523#20190501", + predicate=row_filters.LiteralValueFilter("PQ2A.190405.003"), + true_case_mutations=DeleteRangeFromColumn( + family="cell_plan", qualifier=b"data_plan_01gb" + ), + ) + + await table.close() + await client.close() + + +# [END bigtable_check_and_mutate_asyncio] From da75e46b54539013be7c081ccfa21a7516ff3ea4 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 5 Jun 2024 11:14:09 -0700 Subject: [PATCH 799/892] chore(docs): update async data content in README (#975) --- packages/google-cloud-bigtable/README.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 69856e05bb12..63c50591c366 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -27,9 +27,9 @@ Async Data Client :code:`v2.23.0` includes a release of the new :code:`BigtableDataClientAsync` client, accessible at the import path :code:`google.cloud.bigtable.data`. -The new client brings a simplified API and increased performance using asyncio, with a corresponding synchronous surface -coming soon. The new client is focused on the data API (i.e. reading and writing Bigtable data), with admin operations -remaining in the existing client. +The new client brings a simplified API and increased performance using asyncio. +The new client is focused on the data API (i.e. reading and writing Bigtable data), with admin operations +remaining exclusively in the existing synchronous client. Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, or through the Github `issue tracker`_. From 48605dd40f341b5ab252a72ae17ae81bf0f1a795 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 7 Jun 2024 16:36:20 -0700 Subject: [PATCH 800/892] fix: improve rowset revision (#979) --- .../cloud/bigtable/data/_async/_read_rows.py | 2 +- .../tests/unit/data/_async/test__read_rows.py | 48 ++++++++++++++----- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py index 7f6e8e507aa3..78cb7a991f66 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -342,7 +342,7 @@ def _revise_request_rowset( _RowSetComplete: if there are no rows left to process after the revision """ # if user is doing a whole table scan, start a new one with the last seen key - if row_set is None or (not row_set.row_ranges and row_set.row_keys is not None): + if row_set is None or (not row_set.row_ranges and not row_set.row_keys): last_seen = last_seen_row_key return RowSetPB(row_ranges=[RowRangePB(start_key_open=last_seen)]) # remove seen keys from user-specific key list diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py index 4e7797c6d7c2..2bf8688fd396 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py @@ -98,19 +98,31 @@ def test_ctor(self): (["d", "c", "b", "a"], "b", ["d", "c"]), ], ) - def test_revise_request_rowset_keys(self, in_keys, last_key, expected): + @pytest.mark.parametrize("with_range", [True, False]) + def test_revise_request_rowset_keys_with_range( + self, in_keys, last_key, expected, with_range + ): from google.cloud.bigtable_v2.types import RowSet as RowSetPB from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete in_keys = [key.encode("utf-8") for key in in_keys] expected = [key.encode("utf-8") for key in expected] last_key = last_key.encode("utf-8") - sample_range = RowRangePB(start_key_open=last_key) - row_set = RowSetPB(row_keys=in_keys, row_ranges=[sample_range]) - revised = self._get_target_class()._revise_request_rowset(row_set, last_key) - assert revised.row_keys == expected - assert revised.row_ranges == [sample_range] + if with_range: + sample_range = [RowRangePB(start_key_open=last_key)] + else: + sample_range = [] + row_set = RowSetPB(row_keys=in_keys, row_ranges=sample_range) + if not with_range and expected == []: + # expect exception if we are revising to an empty rowset + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == expected + assert revised.row_ranges == sample_range @pytest.mark.parametrize( "in_ranges,last_key,expected", @@ -157,9 +169,13 @@ def test_revise_request_rowset_keys(self, in_keys, last_key, expected): ), ], ) - def test_revise_request_rowset_ranges(self, in_ranges, last_key, expected): + @pytest.mark.parametrize("with_key", [True, False]) + def test_revise_request_rowset_ranges( + self, in_ranges, last_key, expected, with_key + ): from google.cloud.bigtable_v2.types import RowSet as RowSetPB from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete # convert to protobuf next_key = (last_key + "a").encode("utf-8") @@ -172,10 +188,20 @@ def test_revise_request_rowset_ranges(self, in_ranges, last_key, expected): RowRangePB(**{k: v.encode("utf-8") for k, v in r.items()}) for r in expected ] - row_set = RowSetPB(row_ranges=in_ranges, row_keys=[next_key]) - revised = self._get_target_class()._revise_request_rowset(row_set, last_key) - assert revised.row_keys == [next_key] - assert revised.row_ranges == expected + if with_key: + row_keys = [next_key] + else: + row_keys = [] + + row_set = RowSetPB(row_ranges=in_ranges, row_keys=row_keys) + if not with_key and expected == []: + # expect exception if we are revising to an empty rowset + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == row_keys + assert revised.row_ranges == expected @pytest.mark.parametrize("last_key", ["a", "b", "c"]) def test_revise_request_full_table(self, last_key): From 37064052d685c9b1b20c5b7daabcde646579842e Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 10 Jun 2024 11:03:11 -0700 Subject: [PATCH 801/892] chore(docs): improve async docstrings (#978) --- .../cloud/bigtable/data/_async/client.py | 19 ++++----- .../google/cloud/bigtable/data/mutations.py | 29 ++++++------- .../bigtable/data/read_modify_write_rules.py | 12 +++--- .../cloud/bigtable/data/read_rows_query.py | 42 +++++++++---------- .../google/cloud/bigtable/data/row.py | 25 +++++------ .../samples/quickstart/main_async_test.py | 3 +- .../samples/quickstart/requirements-test.txt | 1 + .../samples/quickstart/requirements.txt | 2 +- .../snippets/deletes/deletes_async_test.py | 5 ++- .../snippets/deletes/requirements-test.txt | 1 + .../samples/snippets/deletes/requirements.txt | 2 +- 11 files changed, 67 insertions(+), 74 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 7d75fab0079a..f8f5feceacc0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -112,12 +112,12 @@ def __init__( client. If not passed (and if no ``_http`` object is passed), falls back to the default inferred from the environment. - client_options (Optional[Union[dict, google.api_core.client_options.ClientOptions]]): + client_options: Client options used to set user options on the client. API Endpoint should be set through client_options. Raises: - RuntimeError: if called outside of an async context (no running event loop) - ValueError: if pool_size is less than 1 + RuntimeError: if called outside of an async context (no running event loop) + ValueError: if pool_size is less than 1 """ # set up transport in registry transport_str = f"pooled_grpc_asyncio_{pool_size}" @@ -711,14 +711,13 @@ async def read_rows_sharded( Runs a sharded query in parallel, then return the results in a single list. Results will be returned in the order of the input queries. - This function is intended to be run on the results on a query.shard() call: + This function is intended to be run on the results on a query.shard() call. + For example:: - ``` - table_shard_keys = await table.sample_row_keys() - query = ReadRowsQuery(...) - shard_queries = query.shard(table_shard_keys) - results = await table.read_rows_sharded(shard_queries) - ``` + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(...) + shard_queries = query.shard(table_shard_keys) + results = await table.read_rows_sharded(shard_queries) Args: sharded_query: a sharded query to execute diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py index fd9b2c24e057..335a15e12f01 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py @@ -94,11 +94,9 @@ def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation: Create a `Mutation` instance from a dictionary representation. Args: - input_dict (dict[str, Any]): A dictionary representation of the mutation. - + input_dict: A dictionary representation of the mutation. Returns: Mutation: A Mutation instance created from the dictionary. - Raises: ValueError: If the input dictionary is invalid or does not represent a valid mutation type. """ @@ -139,10 +137,10 @@ class SetCell(Mutation): Mutation to set the value of a cell. Args: - family (str): The name of the column family to which the new cell belongs. - qualifier (bytes | str): The column qualifier of the new cell. - new_value (bytes | str | int): The value of the new cell. - timestamp_micros (int | None): The timestamp of the new cell. If `None`, + family: The name of the column family to which the new cell belongs. + qualifier: The column qualifier of the new cell. + new_value: The value of the new cell. + timestamp_micros: The timestamp of the new cell. If `None`, the current timestamp will be used. Timestamps will be sent with millisecond precision. Extra precision will be truncated. If -1, the server will assign a timestamp. Note that `SetCell` mutations with @@ -207,13 +205,12 @@ class DeleteRangeFromColumn(Mutation): Mutation to delete a range of cells from a column. Args: - family (str): The name of the column family. - qualifier (bytes): The column qualifier. - start_timestamp_micros (int | None): The start timestamp of the range to + family: The name of the column family. + qualifier: The column qualifier. + start_timestamp_micros: The start timestamp of the range to delete. `None` represents 0. Defaults to `None`. - end_timestamp_micros (int | None): The end timestamp of the range to + end_timestamp_micros: The end timestamp of the range to delete. `None` represents infinity. Defaults to `None`. - Raises: ValueError: If `start_timestamp_micros` is greater than `end_timestamp_micros`. """ @@ -254,7 +251,7 @@ class DeleteAllFromFamily(Mutation): Mutation to delete all cells from a column family. Args: - family_to_delete (str): The name of the column family to delete. + family_to_delete: The name of the column family to delete. """ family_to_delete: str @@ -287,8 +284,8 @@ class RowMutationEntry: Bigtable table. Args: - row_key (bytes | str): The key of the row to mutate. - mutations (Mutation | list[Mutation]): The mutation or list of mutations to apply + row_key: The key of the row to mutate. + mutations: The mutation or list of mutations to apply to the row. Raises: @@ -358,7 +355,7 @@ def _from_dict(cls, input_dict: dict[str, Any]) -> RowMutationEntry: Create a `RowMutationEntry` instance from a dictionary representation. Args: - input_dict (dict[str, Any]): A dictionary representation of the mutation entry. + input_dict: A dictionary representation of the mutation entry. Returns: RowMutationEntry: A RowMutationEntry instance created from the dictionary. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py index e2d3b9f4f354..e4446f755c00 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_modify_write_rules.py @@ -47,11 +47,11 @@ class IncrementRule(ReadModifyWriteRule): Rule to increment a cell's value. Args: - family (str): + family: The family name of the cell to increment. - qualifier (bytes | str): + qualifier: The qualifier of the cell to increment. - increment_amount (int): + increment_amount: The amount to increment the cell's value. Must be between -2**63 and 2**63 (64-bit signed int). Raises: TypeError: @@ -83,11 +83,11 @@ class AppendValueRule(ReadModifyWriteRule): Rule to append a value to a cell's value. Args: - family (str): + family: The family name of the cell to append to. - qualifier (bytes | str): + qualifier: The qualifier of the cell to append to. - append_value (bytes | str): + append_value: The value to append to the cell's value. Raises: TypeError: If append_value is not bytes or str. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py index 5e414391ce40..e0839a2af7be 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py @@ -31,6 +31,17 @@ class RowRange: """ Represents a range of keys in a ReadRowsQuery + + Args: + start_key: The start key of the range. If empty, the range is unbounded on the left. + end_key: The end key of the range. If empty, the range is unbounded on the right. + start_is_inclusive: Whether the start key is inclusive. If None, the start key is + inclusive. + end_is_inclusive: Whether the end key is inclusive. If None, the end key is not inclusive. + Raises: + ValueError: if start_key is greater than end_key, or start_is_inclusive + ValueError: if end_is_inclusive is set when the corresponding key is None + ValueError: if start_key or end_key is not a string or bytes. """ __slots__ = ("_pb",) @@ -42,18 +53,6 @@ def __init__( start_is_inclusive: bool | None = None, end_is_inclusive: bool | None = None, ): - """ - Args: - start_key: The start key of the range. If empty, the range is unbounded on the left. - end_key: The end key of the range. If empty, the range is unbounded on the right. - start_is_inclusive: Whether the start key is inclusive. If None, the start key is - inclusive. - end_is_inclusive: Whether the end key is inclusive. If None, the end key is not inclusive. - Raises: - ValueError: if start_key is greater than end_key, or start_is_inclusive - ValueError: if end_is_inclusive is set when the corresponding key is None - ValueError: if start_key or end_key is not a string or bytes. - """ # convert empty key inputs to None for consistency start_key = None if not start_key else start_key end_key = None if not end_key else end_key @@ -221,6 +220,14 @@ def __repr__(self) -> str: class ReadRowsQuery: """ Class to encapsulate details of a read row request + + Args: + row_keys: row keys to include in the query + a query can contain multiple keys, but ranges should be preferred + row_ranges: ranges of rows to include in the query + limit: the maximum number of rows to return. None or 0 means no limit + default: None (no limit) + row_filter: a RowFilter to apply to the query """ slots = ("_limit", "_filter", "_row_set") @@ -232,17 +239,6 @@ def __init__( limit: int | None = None, row_filter: RowFilter | None = None, ): - """ - Create a new ReadRowsQuery - - Args: - row_keys: row keys to include in the query - a query can contain multiple keys, but ranges should be preferred - row_ranges: ranges of rows to include in the query - limit: the maximum number of rows to return. None or 0 means no limit - default: None (no limit) - row_filter: a RowFilter to apply to the query - """ if row_keys is None: row_keys = [] if row_ranges is None: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py index 28f0260a9747..a5575b83ac2b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py @@ -33,8 +33,13 @@ class Row: query. Expected to be read-only to users, and written by backend - Can be indexed: - cells = row["family", "qualifier"] + Can be indexed by family and qualifier to get cells in the row:: + + cells = row["family", "qualifier"] + + Args: + key: Row key + cells: List of cells in the row """ __slots__ = ("row_key", "cells", "_index_data") @@ -45,14 +50,8 @@ def __init__( cells: list[Cell], ): """ - Initializes a Row object - Row objects are not intended to be created by users. They are returned by the Bigtable backend. - - Args: - key (bytes): Row key - cells (list[Cell]): List of cells in the row """ self.row_key = key self.cells: list[Cell] = cells @@ -121,9 +120,9 @@ def get_cells( If family or qualifier not passed, will include all - Can also be accessed through indexing: - cells = row["family", "qualifier"] - cells = row["family"] + Can also be accessed through indexing:: + cells = row["family", "qualifier"] + cells = row["family"] Args: family: family to filter cells by @@ -172,9 +171,7 @@ def _get_all_from_family(self, family: str) -> Generator[Cell, None, None]: def __str__(self) -> str: """ - Human-readable string representation - - .. code-block:: python + Human-readable string representation:: { (family='fam', qualifier=b'col'): [b'value', (+1 more),], diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py index 26a09b1f1fd2..841cfc18025c 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py @@ -16,7 +16,8 @@ from typing import AsyncGenerator from google.cloud.bigtable.data import BigtableDataClientAsync, SetCell -import pytest, pytest_asyncio +import pytest +import pytest_asyncio from main_async import main diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index cb87efc0ff71..5cb431d92b98 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1 +1,2 @@ pytest==7.4.4 +pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 6dc98589311e..835e1bc780c5 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.22.0 +google-cloud-bigtable==2.23.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py index 8770733b4b74..b708bd52e3fe 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py @@ -18,7 +18,8 @@ from typing import AsyncGenerator from google.cloud._helpers import _microseconds_from_datetime -import pytest, pytest_asyncio +import pytest +import pytest_asyncio import deletes_snippets_async @@ -72,7 +73,7 @@ async def _populate_table(table_id): timestamp = datetime.datetime(2019, 5, 1) timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) - async with (BigtableDataClientAsync(project=PROJECT) as client): + async with BigtableDataClientAsync(project=PROJECT) as client: async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: async with table.mutations_batcher() as batcher: await batcher.append( diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index cb87efc0ff71..5cb431d92b98 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1 +1,2 @@ pytest==7.4.4 +pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 6dc98589311e..835e1bc780c5 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.22.0 +google-cloud-bigtable==2.23.0 From 9c359f81390839df6aeb19ebaf391e8d4db2d7b1 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 11 Jun 2024 13:26:22 -0700 Subject: [PATCH 802/892] feat: improve async sharding (#977) --- .../cloud/bigtable/data/_async/client.py | 63 +++---- .../tests/unit/data/_async/test_client.py | 155 ++++++++++++------ 2 files changed, 141 insertions(+), 77 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index f8f5feceacc0..34fdf847a2d1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -739,43 +739,48 @@ async def read_rows_sharded( """ if not sharded_query: raise ValueError("empty sharded_query") - # reduce operation_timeout between batches operation_timeout, attempt_timeout = _get_timeouts( operation_timeout, attempt_timeout, self ) - timeout_generator = _attempt_timeout_generator( + # make sure each rpc stays within overall operation timeout + rpc_timeout_generator = _attempt_timeout_generator( operation_timeout, operation_timeout ) - # submit shards in batches if the number of shards goes over _CONCURRENCY_LIMIT - batched_queries = [ - sharded_query[i : i + _CONCURRENCY_LIMIT] - for i in range(0, len(sharded_query), _CONCURRENCY_LIMIT) - ] - # run batches and collect results - results_list = [] - error_dict = {} - shard_idx = 0 - for batch in batched_queries: - batch_operation_timeout = next(timeout_generator) - routine_list = [ - self.read_rows( + + # limit the number of concurrent requests using a semaphore + concurrency_sem = asyncio.Semaphore(_CONCURRENCY_LIMIT) + + async def read_rows_with_semaphore(query): + async with concurrency_sem: + # calculate new timeout based on time left in overall operation + shard_timeout = next(rpc_timeout_generator) + if shard_timeout <= 0: + raise DeadlineExceeded( + "Operation timeout exceeded before starting query" + ) + return await self.read_rows( query, - operation_timeout=batch_operation_timeout, - attempt_timeout=min(attempt_timeout, batch_operation_timeout), + operation_timeout=shard_timeout, + attempt_timeout=min(attempt_timeout, shard_timeout), retryable_errors=retryable_errors, ) - for query in batch - ] - batch_result = await asyncio.gather(*routine_list, return_exceptions=True) - for result in batch_result: - if isinstance(result, Exception): - error_dict[shard_idx] = result - elif isinstance(result, BaseException): - # BaseException not expected; raise immediately - raise result - else: - results_list.extend(result) - shard_idx += 1 + + routine_list = [read_rows_with_semaphore(query) for query in sharded_query] + batch_result = await asyncio.gather(*routine_list, return_exceptions=True) + + # collect results and errors + error_dict = {} + shard_idx = 0 + results_list = [] + for result in batch_result: + if isinstance(result, Exception): + error_dict[shard_idx] = result + elif isinstance(result, BaseException): + # BaseException not expected; raise immediately + raise result + else: + results_list.extend(result) + shard_idx += 1 if error_dict: # if any sub-request failed, raise an exception instead of returning results raise ShardedReadRowsExceptionGroup( diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 7593572d86c4..9ebc403ce10d 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -1927,62 +1927,121 @@ async def mock_call(*args, **kwargs): assert call_time < 0.2 @pytest.mark.asyncio - async def test_read_rows_sharded_batching(self): + async def test_read_rows_sharded_concurrency_limit(self): """ - Large queries should be processed in batches to limit concurrency - operation timeout should change between batches + Only 10 queries should be processed concurrently. Others should be queued + + Should start a new query as soon as previous finishes """ - from google.cloud.bigtable.data._async.client import TableAsync from google.cloud.bigtable.data._async.client import _CONCURRENCY_LIMIT assert _CONCURRENCY_LIMIT == 10 # change this test if this changes + num_queries = 15 - n_queries = 90 - expected_num_batches = n_queries // _CONCURRENCY_LIMIT - query_list = [ReadRowsQuery() for _ in range(n_queries)] - - table_mock = AsyncMock() - start_operation_timeout = 10 - start_attempt_timeout = 3 - table_mock.default_read_rows_operation_timeout = start_operation_timeout - table_mock.default_read_rows_attempt_timeout = start_attempt_timeout - # clock ticks one second on each check - with mock.patch("time.monotonic", side_effect=range(0, 100000)): - with mock.patch("asyncio.gather", AsyncMock()) as gather_mock: - await TableAsync.read_rows_sharded(table_mock, query_list) - # should have individual calls for each query - assert table_mock.read_rows.call_count == n_queries - # should have single gather call for each batch - assert gather_mock.call_count == expected_num_batches - # ensure that timeouts decrease over time - kwargs = [ - table_mock.read_rows.call_args_list[idx][1] - for idx in range(n_queries) - ] - for batch_idx in range(expected_num_batches): - batch_kwargs = kwargs[ - batch_idx - * _CONCURRENCY_LIMIT : (batch_idx + 1) - * _CONCURRENCY_LIMIT + # each of the first 10 queries take longer than the last + # later rpcs will have to wait on first 10 + increment_time = 0.05 + max_time = increment_time * (_CONCURRENCY_LIMIT - 1) + rpc_times = [min(i * increment_time, max_time) for i in range(num_queries)] + + async def mock_call(*args, **kwargs): + next_sleep = rpc_times.pop(0) + await asyncio.sleep(next_sleep) + return [mock.Mock()] + + starting_timeout = 10 + + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + await table.read_rows_sharded( + queries, operation_timeout=starting_timeout + ) + assert read_rows.call_count == num_queries + # check operation timeouts to see how far into the operation each rpc started + rpc_start_list = [ + starting_timeout - kwargs["operation_timeout"] + for _, kwargs in read_rows.call_args_list ] - for req_kwargs in batch_kwargs: - # each batch should have the same operation_timeout, and it should decrease in each batch - expected_operation_timeout = start_operation_timeout - ( - batch_idx + 1 - ) - assert ( - req_kwargs["operation_timeout"] - == expected_operation_timeout - ) - # each attempt_timeout should start with default value, but decrease when operation_timeout reaches it - expected_attempt_timeout = min( - start_attempt_timeout, expected_operation_timeout + eps = 0.01 + # first 10 should start immediately + assert all( + rpc_start_list[i] < eps for i in range(_CONCURRENCY_LIMIT) + ) + # next rpcs should start as first ones finish + for i in range(num_queries - _CONCURRENCY_LIMIT): + idx = i + _CONCURRENCY_LIMIT + assert rpc_start_list[idx] - (i * increment_time) < eps + + @pytest.mark.asyncio + async def test_read_rows_sharded_expirary(self): + """ + If the operation times out before all shards complete, should raise + a ShardedReadRowsExceptionGroup + """ + from google.cloud.bigtable.data._async.client import _CONCURRENCY_LIMIT + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + operation_timeout = 0.1 + + # let the first batch complete, but the next batch times out + num_queries = 15 + sleeps = [0] * _CONCURRENCY_LIMIT + [DeadlineExceeded("times up")] * ( + num_queries - _CONCURRENCY_LIMIT + ) + + async def mock_call(*args, **kwargs): + next_item = sleeps.pop(0) + if isinstance(next_item, Exception): + raise next_item + else: + await asyncio.sleep(next_item) + return [mock.Mock()] + + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + await table.read_rows_sharded( + queries, operation_timeout=operation_timeout ) - assert req_kwargs["attempt_timeout"] == expected_attempt_timeout - # await all created coroutines to avoid warnings - for i in range(len(gather_mock.call_args_list)): - for j in range(len(gather_mock.call_args_list[i][0])): - await gather_mock.call_args_list[i][0][j] + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == num_queries - _CONCURRENCY_LIMIT + # should keep successful queries + assert len(exc.value.successful_rows) == _CONCURRENCY_LIMIT + + @pytest.mark.asyncio + async def test_read_rows_sharded_negative_batch_timeout(self): + """ + try to run with batch that starts after operation timeout + + They should raise DeadlineExceeded errors + """ + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + async def mock_call(*args, **kwargs): + await asyncio.sleep(0.05) + return [mock.Mock()] + + async with _make_client() as client: + async with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(15)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + await table.read_rows_sharded(queries, operation_timeout=0.01) + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == 5 + assert all( + isinstance(e.__cause__, DeadlineExceeded) + for e in exc.value.exceptions + ) class TestSampleRowKeys: From 2a079c2833b016bdf4948fa80cab6e0789315a7a Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 11 Jun 2024 15:44:24 -0700 Subject: [PATCH 803/892] chore(main): release 2.24.0 (#971) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 14 ++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index ab46db83efc2..355b3955b70d 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.23.1" + ".": "2.24.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 0731c14a3eb8..d82467b2792b 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,20 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.24.0](https://github.com/googleapis/python-bigtable/compare/v2.23.1...v2.24.0) (2024-06-11) + + +### Features + +* Add String type with Utf8Raw encoding to Bigtable API ([#968](https://github.com/googleapis/python-bigtable/issues/968)) ([2a2bbfd](https://github.com/googleapis/python-bigtable/commit/2a2bbfdba6737c508ab1073d37fef680ca2a8c2f)) +* Improve async sharding ([#977](https://github.com/googleapis/python-bigtable/issues/977)) ([fd1f7da](https://github.com/googleapis/python-bigtable/commit/fd1f7dafd38f7f0e714a3384a27176f485523682)) + + +### Bug Fixes + +* **backup:** Backup name regex ([#970](https://github.com/googleapis/python-bigtable/issues/970)) ([6ef122a](https://github.com/googleapis/python-bigtable/commit/6ef122ad49f43e3a22cde5cb6fdaefd947670136)) +* Improve rowset revision ([#979](https://github.com/googleapis/python-bigtable/issues/979)) ([da27527](https://github.com/googleapis/python-bigtable/commit/da275279a7e619e4cd3e72b10ac629d6e0e1fe47)) + ## [2.23.1](https://github.com/googleapis/python-bigtable/compare/v2.23.0...v2.23.1) (2024-04-15) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 008f4dd36be8..07de09d568ba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.1" # {x-release-please-version} +__version__ = "2.24.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 008f4dd36be8..07de09d568ba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.1" # {x-release-please-version} +__version__ = "2.24.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 008f4dd36be8..07de09d568ba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.1" # {x-release-please-version} +__version__ = "2.24.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 008f4dd36be8..07de09d568ba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.23.1" # {x-release-please-version} +__version__ = "2.24.0" # {x-release-please-version} From a5900cfd2966d522d83f53fdb7af5ad1871ebe9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 12:55:34 -0700 Subject: [PATCH 804/892] chore(deps): bump zipp from 3.17.0 to 3.19.1 in /.kokoro (#989) Bumps [zipp](https://github.com/jaraco/zipp) from 3.17.0 to 3.19.1. - [Release notes](https://github.com/jaraco/zipp/releases) - [Changelog](https://github.com/jaraco/zipp/blob/main/NEWS.rst) - [Commits](https://github.com/jaraco/zipp/compare/v3.17.0...v3.19.1) --- updated-dependencies: - dependency-name: zipp dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- packages/google-cloud-bigtable/.kokoro/requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 51f92b8e12f1..4c4c77cd0d53 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -502,9 +502,9 @@ wheel==0.41.3 \ --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ --hash=sha256:4d4987ce51a49370ea65c0bfd2234e8ce80a12780820d9dc462597a6e60d0841 # via -r requirements.in -zipp==3.17.0 \ - --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \ - --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0 +zipp==3.19.1 \ + --hash=sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091 \ + --hash=sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: From e08c20263bf2457180aa0ceb4407576ead83ce87 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Wed, 17 Jul 2024 17:00:41 -0400 Subject: [PATCH 805/892] chore: update templated files (#986) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: update templated files * update replacement in owlbot.py * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * upgrade place_before owlbot functionality * Revert "upgrade place_before owlbot functionality" This reverts commit e29fdec4e014c6e1b72f7246a0f096e45e6491cd. * fixed replacement for docfx patch * fix missing close quote * fixed quote style * added line breaks * remove escape * Add 'OwlBot Post Processor' as a required check * remove noxfile from owlbot control * removed experimental_v3 branch customized protection settings * added test tag requirement --------- Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- packages/google-cloud-bigtable/.flake8 | 2 +- .../.github/.OwlBot.lock.yaml | 3 +- .../.github/auto-label.yaml | 2 +- .../.github/sync-repo-settings.yaml | 20 +- .../google-cloud-bigtable/.kokoro/build.sh | 2 +- .../.kokoro/docker/docs/Dockerfile | 2 +- .../.kokoro/populate-secrets.sh | 2 +- .../.kokoro/publish-docs.sh | 2 +- .../google-cloud-bigtable/.kokoro/release.sh | 2 +- .../.kokoro/requirements.txt | 509 +++++++++--------- .../.kokoro/test-samples-against-head.sh | 2 +- .../.kokoro/test-samples-impl.sh | 2 +- .../.kokoro/test-samples.sh | 2 +- .../.kokoro/trampoline.sh | 2 +- .../.kokoro/trampoline_v2.sh | 2 +- .../.pre-commit-config.yaml | 2 +- packages/google-cloud-bigtable/.trampolinerc | 2 +- packages/google-cloud-bigtable/MANIFEST.in | 2 +- packages/google-cloud-bigtable/docs/conf.py | 2 +- packages/google-cloud-bigtable/noxfile.py | 56 +- packages/google-cloud-bigtable/owlbot.py | 140 +---- .../samples/snippets/filters/noxfile.py | 16 +- .../scripts/decrypt-secrets.sh | 2 +- .../scripts/readme-gen/readme_gen.py | 2 +- 24 files changed, 339 insertions(+), 441 deletions(-) diff --git a/packages/google-cloud-bigtable/.flake8 b/packages/google-cloud-bigtable/.flake8 index 87f6e408c47d..32986c79287a 100644 --- a/packages/google-cloud-bigtable/.flake8 +++ b/packages/google-cloud-bigtable/.flake8 @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 81f87c56917d..620159621881 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5a4c19d17e597b92d786e569be101e636c9c2817731f80a5adec56b2aa8fe070 -# created: 2024-04-12T11:35:58.922854369Z + digest: sha256:5651442a6336971a2fb2df40fb56b3337df67cafa14c0809cc89cb34ccee1b8e diff --git a/packages/google-cloud-bigtable/.github/auto-label.yaml b/packages/google-cloud-bigtable/.github/auto-label.yaml index 8b37ee89711f..21786a4eb085 100644 --- a/packages/google-cloud-bigtable/.github/auto-label.yaml +++ b/packages/google-cloud-bigtable/.github/auto-label.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml index a8cc5b33b8c3..1319e555dbe5 100644 --- a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml +++ b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml @@ -31,24 +31,8 @@ branchProtectionRules: - 'Kokoro' - 'Kokoro system-3.8' - 'cla/google' -- pattern: experimental_v3 - # Can admins overwrite branch protection. - # Defaults to `true` - isAdminEnforced: false - # Number of approving reviews required to update matching branches. - # Defaults to `1` - requiredApprovingReviewCount: 1 - # Are reviews from code owners required to update matching branches. - # Defaults to `false` - requiresCodeOwnerReviews: false - # Require up to date branches - requiresStrictStatusChecks: false - # List of required status check contexts that must pass for commits to be accepted to matching branches. - requiredStatusCheckContexts: - - 'Kokoro' - - 'Kokoro system-3.8' - - 'cla/google' - - 'Conformance / Async v3 Client / Python 3.8' + - 'Conformance / Async v3 Client / Python 3.8 / Test Tag v0.0.2' + - 'OwlBot Post Processor' # List of explicit permissions to add (additive only) permissionRules: # Team slug to add to repository permissions diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index b2212fce8f47..b00036db318a 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index bdaf39fe22d0..a26ce61930f5 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh b/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh index 6f3972140e80..c435402f473e 100755 --- a/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh +++ b/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC. +# Copyright 2024 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 9eafe0be3bba..38f083f05aa0 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index 2e1cbfa810ef..d21aacc5e220 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 4c4c77cd0d53..35ece0e4d2e9 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -4,21 +4,25 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.1.4 \ - --hash=sha256:72558ba729e4c468572609817226fb0a6e7e9a0a7d477b882be168c0b4a62b94 \ - --hash=sha256:fbe56f8cda08aa9a04b307d8482ea703e96a6a801611acb4be9bf3942017989f +argcomplete==3.4.0 \ + --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ + --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f # via nox -attrs==23.1.0 \ - --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ - --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 +attrs==23.2.0 \ + --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ + --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 # via gcp-releasetool -cachetools==5.3.2 \ - --hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \ - --hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1 +backports-tarfile==1.2.0 \ + --hash=sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34 \ + --hash=sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991 + # via jaraco-context +cachetools==5.3.3 \ + --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ + --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 # via google-auth -certifi==2023.7.22 \ - --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ - --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 +certifi==2024.6.2 \ + --hash=sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516 \ + --hash=sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56 # via requests cffi==1.16.0 \ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ @@ -87,90 +91,90 @@ click==8.0.4 \ # -r requirements.in # gcp-docuploader # gcp-releasetool -colorlog==6.7.0 \ - --hash=sha256:0d33ca236784a1ba3ff9c532d4964126d8a2c44f1f0cb1d2b0728196f512f662 \ - --hash=sha256:bd94bd21c1e13fac7bd3153f4bc3a7dc0eb0974b8bc2fdf1a989e474f6e582e5 +colorlog==6.8.2 \ + --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ + --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 # via # gcp-docuploader # nox -cryptography==42.0.5 \ - --hash=sha256:0270572b8bd2c833c3981724b8ee9747b3ec96f699a9665470018594301439ee \ - --hash=sha256:111a0d8553afcf8eb02a4fea6ca4f59d48ddb34497aa8706a6cf536f1a5ec576 \ - --hash=sha256:16a48c23a62a2f4a285699dba2e4ff2d1cff3115b9df052cdd976a18856d8e3d \ - --hash=sha256:1b95b98b0d2af784078fa69f637135e3c317091b615cd0905f8b8a087e86fa30 \ - --hash=sha256:1f71c10d1e88467126f0efd484bd44bca5e14c664ec2ede64c32f20875c0d413 \ - --hash=sha256:2424ff4c4ac7f6b8177b53c17ed5d8fa74ae5955656867f5a8affaca36a27abb \ - --hash=sha256:2bce03af1ce5a5567ab89bd90d11e7bbdff56b8af3acbbec1faded8f44cb06da \ - --hash=sha256:329906dcc7b20ff3cad13c069a78124ed8247adcac44b10bea1130e36caae0b4 \ - --hash=sha256:37dd623507659e08be98eec89323469e8c7b4c1407c85112634ae3dbdb926fdd \ - --hash=sha256:3eaafe47ec0d0ffcc9349e1708be2aaea4c6dd4978d76bf6eb0cb2c13636c6fc \ - --hash=sha256:5e6275c09d2badf57aea3afa80d975444f4be8d3bc58f7f80d2a484c6f9485c8 \ - --hash=sha256:6fe07eec95dfd477eb9530aef5bead34fec819b3aaf6c5bd6d20565da607bfe1 \ - --hash=sha256:7367d7b2eca6513681127ebad53b2582911d1736dc2ffc19f2c3ae49997496bc \ - --hash=sha256:7cde5f38e614f55e28d831754e8a3bacf9ace5d1566235e39d91b35502d6936e \ - --hash=sha256:9481ffe3cf013b71b2428b905c4f7a9a4f76ec03065b05ff499bb5682a8d9ad8 \ - --hash=sha256:98d8dc6d012b82287f2c3d26ce1d2dd130ec200c8679b6213b3c73c08b2b7940 \ - --hash=sha256:a011a644f6d7d03736214d38832e030d8268bcff4a41f728e6030325fea3e400 \ - --hash=sha256:a2913c5375154b6ef2e91c10b5720ea6e21007412f6437504ffea2109b5a33d7 \ - --hash=sha256:a30596bae9403a342c978fb47d9b0ee277699fa53bbafad14706af51fe543d16 \ - --hash=sha256:b03c2ae5d2f0fc05f9a2c0c997e1bc18c8229f392234e8a0194f202169ccd278 \ - --hash=sha256:b6cd2203306b63e41acdf39aa93b86fb566049aeb6dc489b70e34bcd07adca74 \ - --hash=sha256:b7ffe927ee6531c78f81aa17e684e2ff617daeba7f189f911065b2ea2d526dec \ - --hash=sha256:b8cac287fafc4ad485b8a9b67d0ee80c66bf3574f655d3b97ef2e1082360faf1 \ - --hash=sha256:ba334e6e4b1d92442b75ddacc615c5476d4ad55cc29b15d590cc6b86efa487e2 \ - --hash=sha256:ba3e4a42397c25b7ff88cdec6e2a16c2be18720f317506ee25210f6d31925f9c \ - --hash=sha256:c41fb5e6a5fe9ebcd58ca3abfeb51dffb5d83d6775405305bfa8715b76521922 \ - --hash=sha256:cd2030f6650c089aeb304cf093f3244d34745ce0cfcc39f20c6fbfe030102e2a \ - --hash=sha256:cd65d75953847815962c84a4654a84850b2bb4aed3f26fadcc1c13892e1e29f6 \ - --hash=sha256:e4985a790f921508f36f81831817cbc03b102d643b5fcb81cd33df3fa291a1a1 \ - --hash=sha256:e807b3188f9eb0eaa7bbb579b462c5ace579f1cedb28107ce8b48a9f7ad3679e \ - --hash=sha256:f12764b8fffc7a123f641d7d049d382b73f96a34117e0b637b80643169cec8ac \ - --hash=sha256:f8837fe1d6ac4a8052a9a8ddab256bc006242696f03368a4009be7ee3075cdb7 +cryptography==42.0.8 \ + --hash=sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad \ + --hash=sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583 \ + --hash=sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b \ + --hash=sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c \ + --hash=sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1 \ + --hash=sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648 \ + --hash=sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949 \ + --hash=sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba \ + --hash=sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c \ + --hash=sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9 \ + --hash=sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d \ + --hash=sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c \ + --hash=sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e \ + --hash=sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2 \ + --hash=sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d \ + --hash=sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7 \ + --hash=sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70 \ + --hash=sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2 \ + --hash=sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7 \ + --hash=sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14 \ + --hash=sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe \ + --hash=sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e \ + --hash=sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71 \ + --hash=sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961 \ + --hash=sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7 \ + --hash=sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c \ + --hash=sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28 \ + --hash=sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842 \ + --hash=sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902 \ + --hash=sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801 \ + --hash=sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a \ + --hash=sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e # via # -r requirements.in # gcp-releasetool # secretstorage -distlib==0.3.7 \ - --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ - --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 +distlib==0.3.8 \ + --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ + --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -docutils==0.20.1 \ - --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \ - --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b +docutils==0.21.2 \ + --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ + --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 # via readme-renderer -filelock==3.13.1 \ - --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ - --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c +filelock==3.15.4 \ + --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ + --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 # via virtualenv gcp-docuploader==0.6.5 \ --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea # via -r requirements.in -gcp-releasetool==2.0.0 \ - --hash=sha256:3d73480b50ba243f22d7c7ec08b115a30e1c7817c4899781840c26f9c55b8277 \ - --hash=sha256:7aa9fd935ec61e581eb8458ad00823786d91756c25e492f372b2b30962f3c28f +gcp-releasetool==2.0.1 \ + --hash=sha256:34314a910c08e8911d9c965bd44f8f2185c4f556e737d719c33a41f6a610de96 \ + --hash=sha256:b0d5863c6a070702b10883d37c4bdfd74bf930fe417f36c0c965d3b7c779ae62 # via -r requirements.in -google-api-core==2.12.0 \ - --hash=sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553 \ - --hash=sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160 +google-api-core==2.19.1 \ + --hash=sha256:f12a9b8309b5e21d92483bbd47ce2c445861ec7d269ef6784ecc0ea8c1fa6125 \ + --hash=sha256:f4695f1e3650b316a795108a76a1c416e6afb036199d1c1f1f110916df479ffd # via # google-cloud-core # google-cloud-storage -google-auth==2.23.4 \ - --hash=sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3 \ - --hash=sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2 +google-auth==2.31.0 \ + --hash=sha256:042c4702efa9f7d3c48d3a69341c209381b125faa6dbf3ebe56bc7e40ae05c23 \ + --hash=sha256:87805c36970047247c8afe614d4e3af8eceafc1ebba0c679fe75ddd1d575e871 # via # gcp-releasetool # google-api-core # google-cloud-core # google-cloud-storage -google-cloud-core==2.3.3 \ - --hash=sha256:37b80273c8d7eee1ae816b3a20ae43585ea50506cb0e60f3cf5be5f87f1373cb \ - --hash=sha256:fbd11cad3e98a7e5b0343dc07cb1039a5ffd7a5bb96e1f1e27cee4bda4a90863 +google-cloud-core==2.4.1 \ + --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \ + --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61 # via google-cloud-storage -google-cloud-storage==2.13.0 \ - --hash=sha256:ab0bf2e1780a1b74cf17fccb13788070b729f50c252f0c94ada2aae0ca95437d \ - --hash=sha256:f62dc4c7b6cd4360d072e3deb28035fbdad491ac3d9b0b1815a12daea10f37c7 +google-cloud-storage==2.17.0 \ + --hash=sha256:49378abff54ef656b52dca5ef0f2eba9aa83dc2b2c72c78714b03a1a95fe9388 \ + --hash=sha256:5b393bc766b7a3bc6f5407b9e665b2450d36282614b7945e570b3480a456d1e1 # via gcp-docuploader google-crc32c==1.5.0 \ --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ @@ -244,28 +248,36 @@ google-crc32c==1.5.0 \ # via # google-cloud-storage # google-resumable-media -google-resumable-media==2.6.0 \ - --hash=sha256:972852f6c65f933e15a4a210c2b96930763b47197cdf4aa5f5bea435efb626e7 \ - --hash=sha256:fc03d344381970f79eebb632a3c18bb1828593a2dc5572b5f90115ef7d11e81b +google-resumable-media==2.7.1 \ + --hash=sha256:103ebc4ba331ab1bfdac0250f8033627a2cd7cde09e7ccff9181e31ba4315b2c \ + --hash=sha256:eae451a7b2e2cdbaaa0fd2eb00cc8a1ee5e95e16b55597359cbc3d27d7d90e33 # via google-cloud-storage -googleapis-common-protos==1.61.0 \ - --hash=sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0 \ - --hash=sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b +googleapis-common-protos==1.63.2 \ + --hash=sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945 \ + --hash=sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87 # via google-api-core idna==3.7 \ --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 # via requests -importlib-metadata==6.8.0 \ - --hash=sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb \ - --hash=sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743 +importlib-metadata==8.0.0 \ + --hash=sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f \ + --hash=sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812 # via # -r requirements.in # keyring # twine -jaraco-classes==3.3.0 \ - --hash=sha256:10afa92b6743f25c0cf5f37c6bb6e18e2c5bb84a16527ccfc0040ea377e7aaeb \ - --hash=sha256:c063dd08e89217cee02c8d5e5ec560f2c8ce6cdc2fcdc2e68f7b2e5547ed3621 +jaraco-classes==3.4.0 \ + --hash=sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd \ + --hash=sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790 + # via keyring +jaraco-context==5.3.0 \ + --hash=sha256:3e16388f7da43d384a1a7cd3452e72e14732ac9fe459678773a3608a812bf266 \ + --hash=sha256:c2f67165ce1f9be20f32f650f25d8edfc1646a8aeee48ae06fb35f90763576d2 + # via keyring +jaraco-functools==4.0.1 \ + --hash=sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664 \ + --hash=sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8 # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ @@ -273,13 +285,13 @@ jeepney==0.8.0 \ # via # keyring # secretstorage -jinja2==3.1.3 \ - --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \ - --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90 +jinja2==3.1.4 \ + --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ + --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d # via gcp-releasetool -keyring==24.2.0 \ - --hash=sha256:4901caaf597bfd3bbd78c9a0c7c4c29fcd8310dab2cffefe749e916b6527acd6 \ - --hash=sha256:ca0746a19ec421219f4d713f848fa297a661a8a8c1504867e55bfb5e09091509 +keyring==25.2.1 \ + --hash=sha256:2458681cdefc0dbc0b7eb6cf75d0b98e59f9ad9b2d4edd319d18f68bdca95e50 \ + --hash=sha256:daaffd42dbda25ddafb1ad5fec4024e5bbcfe424597ca1ca452b299861e49f1b # via # gcp-releasetool # twine @@ -287,146 +299,153 @@ markdown-it-py==3.0.0 \ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb # via rich -markupsafe==2.1.3 \ - --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ - --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ - --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ - --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ - --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ - --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ - --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ - --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ - --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ - --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ - --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ - --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ - --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ - --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ - --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ - --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ - --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ - --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ - --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ - --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ - --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ - --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ - --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ - --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ - --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ - --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ - --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ - --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ - --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ - --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ - --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ - --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ - --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ - --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ - --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ - --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ - --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ - --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ - --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ - --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ - --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ - --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ - --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ - --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ - --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ - --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ - --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ - --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ - --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ - --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ - --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ - --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ - --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ - --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ - --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ - --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ - --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ - --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ - --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ - --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 +markupsafe==2.1.5 \ + --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ + --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ + --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ + --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ + --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ + --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ + --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ + --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ + --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ + --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ + --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ + --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ + --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ + --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ + --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ + --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ + --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ + --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ + --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ + --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ + --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ + --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ + --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ + --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ + --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ + --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ + --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ + --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ + --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ + --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ + --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ + --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ + --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ + --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ + --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ + --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ + --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ + --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ + --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ + --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ + --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ + --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ + --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ + --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ + --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ + --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ + --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ + --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ + --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ + --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ + --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ + --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ + --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ + --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ + --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ + --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ + --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ + --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ + --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ + --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 # via jinja2 mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -more-itertools==10.1.0 \ - --hash=sha256:626c369fa0eb37bac0291bce8259b332fd59ac792fa5497b59837309cd5b114a \ - --hash=sha256:64e0735fcfdc6f3464ea133afe8ea4483b1c5fe3a3d69852e6503b43a0b222e6 - # via jaraco-classes -nh3==0.2.14 \ - --hash=sha256:116c9515937f94f0057ef50ebcbcc10600860065953ba56f14473ff706371873 \ - --hash=sha256:18415df36db9b001f71a42a3a5395db79cf23d556996090d293764436e98e8ad \ - --hash=sha256:203cac86e313cf6486704d0ec620a992c8bc164c86d3a4fd3d761dd552d839b5 \ - --hash=sha256:2b0be5c792bd43d0abef8ca39dd8acb3c0611052ce466d0401d51ea0d9aa7525 \ - --hash=sha256:377aaf6a9e7c63962f367158d808c6a1344e2b4f83d071c43fbd631b75c4f0b2 \ - --hash=sha256:525846c56c2bcd376f5eaee76063ebf33cf1e620c1498b2a40107f60cfc6054e \ - --hash=sha256:5529a3bf99402c34056576d80ae5547123f1078da76aa99e8ed79e44fa67282d \ - --hash=sha256:7771d43222b639a4cd9e341f870cee336b9d886de1ad9bec8dddab22fe1de450 \ - --hash=sha256:88c753efbcdfc2644a5012938c6b9753f1c64a5723a67f0301ca43e7b85dcf0e \ - --hash=sha256:93a943cfd3e33bd03f77b97baa11990148687877b74193bf777956b67054dcc6 \ - --hash=sha256:9be2f68fb9a40d8440cbf34cbf40758aa7f6093160bfc7fb018cce8e424f0c3a \ - --hash=sha256:a0c509894fd4dccdff557068e5074999ae3b75f4c5a2d6fb5415e782e25679c4 \ - --hash=sha256:ac8056e937f264995a82bf0053ca898a1cb1c9efc7cd68fa07fe0060734df7e4 \ - --hash=sha256:aed56a86daa43966dd790ba86d4b810b219f75b4bb737461b6886ce2bde38fd6 \ - --hash=sha256:e8986f1dd3221d1e741fda0a12eaa4a273f1d80a35e31a1ffe579e7c621d069e \ - --hash=sha256:f99212a81c62b5f22f9e7c3e347aa00491114a5647e1f13bbebd79c3e5f08d75 +more-itertools==10.3.0 \ + --hash=sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463 \ + --hash=sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320 + # via + # jaraco-classes + # jaraco-functools +nh3==0.2.17 \ + --hash=sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a \ + --hash=sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911 \ + --hash=sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb \ + --hash=sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a \ + --hash=sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc \ + --hash=sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028 \ + --hash=sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9 \ + --hash=sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3 \ + --hash=sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351 \ + --hash=sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10 \ + --hash=sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71 \ + --hash=sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f \ + --hash=sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b \ + --hash=sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a \ + --hash=sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062 \ + --hash=sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a # via readme-renderer -nox==2023.4.22 \ - --hash=sha256:0b1adc619c58ab4fa57d6ab2e7823fe47a32e70202f287d78474adcc7bda1891 \ - --hash=sha256:46c0560b0dc609d7d967dc99e22cb463d3c4caf54a5fda735d6c11b5177e3a9f +nox==2024.4.15 \ + --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ + --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f # via -r requirements.in -packaging==23.2 \ - --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ - --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 +packaging==24.1 \ + --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ + --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via # gcp-releasetool # nox -pkginfo==1.9.6 \ - --hash=sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546 \ - --hash=sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046 +pkginfo==1.10.0 \ + --hash=sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297 \ + --hash=sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097 # via twine -platformdirs==3.11.0 \ - --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ - --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e +platformdirs==4.2.2 \ + --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ + --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 # via virtualenv -protobuf==4.25.3 \ - --hash=sha256:19b270aeaa0099f16d3ca02628546b8baefe2955bbe23224aaf856134eccf1e4 \ - --hash=sha256:209ba4cc916bab46f64e56b85b090607a676f66b473e6b762e6f1d9d591eb2e8 \ - --hash=sha256:25b5d0b42fd000320bd7830b349e3b696435f3b329810427a6bcce6a5492cc5c \ - --hash=sha256:7c8daa26095f82482307bc717364e7c13f4f1c99659be82890dcfc215194554d \ - --hash=sha256:c053062984e61144385022e53678fbded7aea14ebb3e0305ae3592fb219ccfa4 \ - --hash=sha256:d4198877797a83cbfe9bffa3803602bbe1625dc30d8a097365dbc762e5790faa \ - --hash=sha256:e3c97a1555fd6388f857770ff8b9703083de6bf1f9274a002a332d65fbb56c8c \ - --hash=sha256:e7cb0ae90dd83727f0c0718634ed56837bfeeee29a5f82a7514c03ee1364c019 \ - --hash=sha256:f0700d54bcf45424477e46a9f0944155b46fb0639d69728739c0e47bab83f2b9 \ - --hash=sha256:f1279ab38ecbfae7e456a108c5c0681e4956d5b1090027c1de0f934dfdb4b35c \ - --hash=sha256:f4f118245c4a087776e0a8408be33cf09f6c547442c00395fbfb116fac2f8ac2 +proto-plus==1.24.0 \ + --hash=sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445 \ + --hash=sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12 + # via google-api-core +protobuf==5.27.2 \ + --hash=sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505 \ + --hash=sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b \ + --hash=sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38 \ + --hash=sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863 \ + --hash=sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470 \ + --hash=sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6 \ + --hash=sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce \ + --hash=sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca \ + --hash=sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5 \ + --hash=sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e \ + --hash=sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714 # via # gcp-docuploader # gcp-releasetool # google-api-core # googleapis-common-protos -pyasn1==0.5.0 \ - --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \ - --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde + # proto-plus +pyasn1==0.6.0 \ + --hash=sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c \ + --hash=sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473 # via # pyasn1-modules # rsa -pyasn1-modules==0.3.0 \ - --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \ - --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d +pyasn1-modules==0.4.0 \ + --hash=sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6 \ + --hash=sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b # via google-auth -pycparser==2.21 \ - --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ - --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 +pycparser==2.22 \ + --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ + --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc # via cffi -pygments==2.16.1 \ - --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \ - --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29 +pygments==2.18.0 \ + --hash=sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199 \ + --hash=sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a # via # readme-renderer # rich @@ -434,20 +453,20 @@ pyjwt==2.8.0 \ --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 # via gcp-releasetool -pyperclip==1.8.2 \ - --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57 +pyperclip==1.9.0 \ + --hash=sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310 # via gcp-releasetool -python-dateutil==2.8.2 \ - --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ - --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 +python-dateutil==2.9.0.post0 \ + --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ + --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via gcp-releasetool -readme-renderer==42.0 \ - --hash=sha256:13d039515c1f24de668e2c93f2e877b9dbe6c6c32328b90a40a49d8b2b85f36d \ - --hash=sha256:2d55489f83be4992fe4454939d1a051c33edbab778e82761d060c9fc6b308cd1 +readme-renderer==43.0 \ + --hash=sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311 \ + --hash=sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9 # via twine -requests==2.31.0 \ - --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ - --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 +requests==2.32.3 \ + --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ + --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 # via # gcp-releasetool # google-api-core @@ -462,9 +481,9 @@ rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c # via twine -rich==13.6.0 \ - --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ - --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef +rich==13.7.1 \ + --hash=sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222 \ + --hash=sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432 # via twine rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -480,35 +499,39 @@ six==1.16.0 \ # via # gcp-docuploader # python-dateutil -twine==4.0.2 \ - --hash=sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8 \ - --hash=sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8 +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f + # via nox +twine==5.1.1 \ + --hash=sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997 \ + --hash=sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db # via -r requirements.in -typing-extensions==4.8.0 \ - --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ - --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef +typing-extensions==4.12.2 \ + --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ + --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 # via -r requirements.in -urllib3==2.0.7 \ - --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ - --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e +urllib3==2.2.2 \ + --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ + --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 # via # requests # twine -virtualenv==20.24.6 \ - --hash=sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af \ - --hash=sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381 +virtualenv==20.26.3 \ + --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ + --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 # via nox -wheel==0.41.3 \ - --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ - --hash=sha256:4d4987ce51a49370ea65c0bfd2234e8ce80a12780820d9dc462597a6e60d0841 +wheel==0.43.0 \ + --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \ + --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81 # via -r requirements.in -zipp==3.19.1 \ - --hash=sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091 \ - --hash=sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f +zipp==3.19.2 \ + --hash=sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19 \ + --hash=sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==69.2.0 \ - --hash=sha256:0ff4183f8f42cd8fa3acea16c45205521a4ef28f73c6391d8a25e92893134f2e \ - --hash=sha256:c21c49fb1042386df081cb5d86759792ab89efca84cf114889191cd09aacc80c +setuptools==70.2.0 \ + --hash=sha256:b8b8060bb426838fbe942479c90296ce976249451118ef566a5a0b7d8b78fb05 \ + --hash=sha256:bd63e505105011b25c3c11f753f7e3b8465ea739efddaccef8f0efac2137bac1 # via -r requirements.in diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh index 63ac41dfae1d..e9d8bd79a644 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh index 5a0f5fab6a89..55910c8ba178 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh index 50b35a48c190..7933d820149a 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline.sh b/packages/google-cloud-bigtable/.kokoro/trampoline.sh index d85b1f267693..48f79699706e 100755 --- a/packages/google-cloud-bigtable/.kokoro/trampoline.sh +++ b/packages/google-cloud-bigtable/.kokoro/trampoline.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh index 59a7cf3a9373..35fa529231dc 100755 --- a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh +++ b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.pre-commit-config.yaml b/packages/google-cloud-bigtable/.pre-commit-config.yaml index 6a8e16950664..1d74695f70b6 100644 --- a/packages/google-cloud-bigtable/.pre-commit-config.yaml +++ b/packages/google-cloud-bigtable/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/.trampolinerc b/packages/google-cloud-bigtable/.trampolinerc index a7dfeb42c6d0..0080152373d5 100644 --- a/packages/google-cloud-bigtable/.trampolinerc +++ b/packages/google-cloud-bigtable/.trampolinerc @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/MANIFEST.in b/packages/google-cloud-bigtable/MANIFEST.in index e0a66705318e..d6814cd60037 100644 --- a/packages/google-cloud-bigtable/MANIFEST.in +++ b/packages/google-cloud-bigtable/MANIFEST.in @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/docs/conf.py b/packages/google-cloud-bigtable/docs/conf.py index b5a870f58002..d8f0352cdd1c 100644 --- a/packages/google-cloud-bigtable/docs/conf.py +++ b/packages/google-cloud-bigtable/docs/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 3ea12c187a77..5fb94526dbea 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -185,14 +185,28 @@ def install_unittest_dependencies(session, *constraints): session.install("-e", ".", *constraints) -def default(session): +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def unit(session, protobuf_implementation): # Install all test dependencies, then install this package in-place. + if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12"): + session.skip("cpp implementation is not supported in python 3.11+") + constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) install_unittest_dependencies(session, "-c", constraints_path) + # TODO(https://github.com/googleapis/synthtool/issues/1976): + # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped. + # The 'cpp' implementation requires Protobuf<4. + if protobuf_implementation == "cpp": + session.install("protobuf<4") + # Run py.test against the unit tests. session.run( "py.test", @@ -206,15 +220,12 @@ def default(session): "--cov-fail-under=0", os.path.join("tests", "unit"), *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, ) -@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) -def unit(session): - """Run the unit test suite.""" - default(session) - - def install_systemtest_dependencies(session, *constraints): # Use pre-release gRPC for system tests. # Exclude version 1.52.0rc1 which has a known issue. @@ -430,10 +441,17 @@ def docfx(session): session.run("python", "docs/scripts/patch_devsite_toc.py") -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def prerelease_deps(session): +@nox.session(python="3.12") +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def prerelease_deps(session, protobuf_implementation): """Run all tests with prerelease versions of dependencies installed.""" + if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12"): + session.skip("cpp implementation is not supported in python 3.11+") + # Install all dependencies session.install("-e", ".[all, tests, tracing]") unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES @@ -468,9 +486,9 @@ def prerelease_deps(session): "protobuf", # dependency of grpc "six", + "grpc-google-iam-v1", "googleapis-common-protos", - # Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163 - "grpcio!=1.52.0rc1", + "grpcio", "grpcio-status", "google-api-core", "google-auth", @@ -496,7 +514,13 @@ def prerelease_deps(session): session.run("python", "-c", "import grpc; print(grpc.__version__)") session.run("python", "-c", "import google.auth; print(google.auth.__version__)") - session.run("py.test", "tests/unit") + session.run( + "py.test", + "tests/unit", + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") @@ -509,6 +533,9 @@ def prerelease_deps(session): f"--junitxml=system_{session.python}_sponge_log.xml", system_test_path, *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, ) if os.path.exists(system_test_folder_path): session.run( @@ -517,4 +544,7 @@ def prerelease_deps(session): f"--junitxml=system_{session.python}_sponge_log.xml", system_test_folder_path, *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, ) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 170bc08d4dca..84aa3d61b3bd 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -95,145 +95,7 @@ def get_staging_dirs( ], ) -s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml"]) - -# ---------------------------------------------------------------------------- -# Customize noxfile.py -# ---------------------------------------------------------------------------- - -def place_before(path, text, *before_text, escape=None): - replacement = "\n".join(before_text) + "\n" + text - if escape: - for c in escape: - text = text.replace(c, '\\' + c) - s.replace([path], text, replacement) - -system_emulated_session = """ -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def system_emulated(session): - import subprocess - import signal - - try: - subprocess.call(["gcloud", "--version"]) - except OSError: - session.skip("gcloud not found but required for emulator support") - - # Currently, CI/CD doesn't have beta component of gcloud. - subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) - - hostport = "localhost:8789" - session.env["BIGTABLE_EMULATOR_HOST"] = hostport - - p = subprocess.Popen( - ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] - ) - - try: - system(session) - finally: - # Stop Emulator - os.killpg(os.getpgid(p.pid), signal.SIGKILL) - -""" - -place_before( - "noxfile.py", - "@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n" - "def system(session):", - system_emulated_session, - escape="()" -) - -conformance_session = """ -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def conformance(session): - TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" - CLONE_REPO_DIR = "cloud-bigtable-clients-test" - # install dependencies - constraints_path = str( - CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" - ) - install_unittest_dependencies(session, "-c", constraints_path) - with session.chdir("test_proxy"): - # download the conformance test suite - clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) - if not os.path.exists(clone_dir): - print("downloading copy of test repo") - session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR, external=True) - session.run("bash", "-e", "run_tests.sh", external=True) - -""" - -place_before( - "noxfile.py", - "@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n" - "def system(session):", - conformance_session, - escape="()" -) - -# add system_emulated and mypy and conformance to nox session -s.replace("noxfile.py", - """nox.options.sessions = \[ - "unit", - "system",""", - """nox.options.sessions = [ - "unit", - "system_emulated", - "system", - "mypy",""", -) - - -s.replace( - "noxfile.py", - """\ -@nox.session\(python=DEFAULT_PYTHON_VERSION\) -def lint_setup_py\(session\): -""", - '''\ -@nox.session(python=DEFAULT_PYTHON_VERSION) -def mypy(session): - """Verify type hints are mypy compatible.""" - session.install("-e", ".") - session.install("mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests") - session.install("google-cloud-testutils") - session.run( - "mypy", - "-p", - "google.cloud.bigtable.data", - "--check-untyped-defs", - "--warn-unreachable", - "--disallow-any-generics", - "--exclude", - "tests/system/v2_client", - "--exclude", - "tests/unit/v2_client", - ) - - -# add customization to docfx -docfx_postprocess = """ - # Customization: Add extra sections to the table of contents for the Classic vs Async clients - session.install("pyyaml") - session.run("python", "docs/scripts/patch_devsite_toc.py") -""" - -place_before( - "noxfile.py", - "@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)\n" - "def prerelease_deps(session):", - docfx_postprocess, - escape="()" -) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint_setup_py(session): -''', -) - +s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py"]) # ---------------------------------------------------------------------------- # Customize gapics to include PooledBigtableGrpcAsyncIOTransport diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index c36d5f2d81f3..483b55901791 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -22,6 +22,7 @@ import nox + # WARNING - WARNING - WARNING - WARNING - WARNING # WARNING - WARNING - WARNING - WARNING - WARNING # DO NOT EDIT THIS FILE EVER! @@ -159,7 +160,6 @@ def blacken(session: nox.sessions.Session) -> None: # format = isort + black # - @nox.session def format(session: nox.sessions.Session) -> None: """ @@ -187,9 +187,7 @@ def _session_tests( session: nox.sessions.Session, post_install: Callable = None ) -> None: # check for presence of tests - test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob( - "**/test_*.py", recursive=True - ) + test_list = glob.glob("**/*_test.py", recursive=True) + glob.glob("**/test_*.py", recursive=True) test_list.extend(glob.glob("**/tests", recursive=True)) if len(test_list) == 0: @@ -211,7 +209,9 @@ def _session_tests( if os.path.exists("requirements-test.txt"): if os.path.exists("constraints-test.txt"): - session.install("-r", "requirements-test.txt", "-c", "constraints-test.txt") + session.install( + "-r", "requirements-test.txt", "-c", "constraints-test.txt" + ) else: session.install("-r", "requirements-test.txt") with open("requirements-test.txt") as rtfile: @@ -224,9 +224,9 @@ def _session_tests( post_install(session) if "pytest-parallel" in packages: - concurrent_args.extend(["--workers", "auto", "--tests-per-worker", "auto"]) + concurrent_args.extend(['--workers', 'auto', '--tests-per-worker', 'auto']) elif "pytest-xdist" in packages: - concurrent_args.extend(["-n", "auto"]) + concurrent_args.extend(['-n', 'auto']) session.run( "pytest", @@ -256,7 +256,7 @@ def py(session: nox.sessions.Session) -> None: def _get_repo_root() -> Optional[str]: - """Returns the root folder of the project.""" + """ Returns the root folder of the project. """ # Get root of this repository. Assume we don't have directories nested deeper than 10 items. p = Path(os.getcwd()) for i in range(10): diff --git a/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh index 0018b421ddf8..120b0ddc4364 100755 --- a/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh +++ b/packages/google-cloud-bigtable/scripts/decrypt-secrets.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2023 Google LLC All rights reserved. +# Copyright 2024 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py index 1acc119835b5..8f5e248a0da1 100644 --- a/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py +++ b/packages/google-cloud-bigtable/scripts/readme-gen/readme_gen.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 2bab5aed6ff6f560e890483439fb9f863ae1e059 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 18 Jul 2024 06:29:24 -0700 Subject: [PATCH 806/892] fix: Allow protobuf 5.x (#972) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.18.0 PiperOrigin-RevId: 638650618 Source-Link: https://github.com/googleapis/googleapis/commit/6330f0389afdd04235c59898cc44f715b077aa25 Source-Link: https://github.com/googleapis/googleapis-gen/commit/44fa4f1979dc45c1778fd7caf13f8e61c6d1cae8 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDRmYTRmMTk3OWRjNDVjMTc3OGZkN2NhZjEzZjhlNjFjNmQxY2FlOCJ9 * feat(spanner): Add support for Cloud Spanner Scheduled Backups PiperOrigin-RevId: 649277844 Source-Link: https://github.com/googleapis/googleapis/commit/fd7efa2da3860e813485e63661d3bdd21fc9ba82 Source-Link: https://github.com/googleapis/googleapis-gen/commit/50be251329d8db5b555626ebd4886721f547d3cc Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNTBiZTI1MTMyOWQ4ZGI1YjU1NTYyNmViZDQ4ODY3MjFmNTQ3ZDNjYyJ9 * feat: publish the Cloud Bigtable ExecuteQuery API The ExecuteQuery API will allow users to query Bigtable using SQL PiperOrigin-RevId: 650660213 Source-Link: https://github.com/googleapis/googleapis/commit/f681f79a93814d8b974da9dd8cdc62228d0f4758 Source-Link: https://github.com/googleapis/googleapis-gen/commit/3180845487136794952b8f365fe6c6868999d9c0 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMzE4MDg0NTQ4NzEzNjc5NDk1MmI4ZjM2NWZlNmM2ODY4OTk5ZDljMCJ9 * feat: publish ProtoRows Message This is needed to parse ExecuteQuery responses PiperOrigin-RevId: 651386373 Source-Link: https://github.com/googleapis/googleapis/commit/a5be6fa5ff1603b2cab067408e2640d270f0e300 Source-Link: https://github.com/googleapis/googleapis-gen/commit/d467ce893a04c41e504983346c215d41fd263650 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZDQ2N2NlODkzYTA0YzQxZTUwNDk4MzM0NmMyMTVkNDFmZDI2MzY1MCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * update setup.py to match googleapis/gapic-generator-python/blob/main/gapic/templates/setup.py.j2 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * update constraints --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../bigtable_instance_admin/async_client.py | 1 + .../transports/base.py | 4 +- .../transports/grpc.py | 3 +- .../transports/grpc_asyncio.py | 3 +- .../bigtable_table_admin/async_client.py | 1 + .../bigtable_table_admin/transports/base.py | 4 +- .../bigtable_table_admin/transports/grpc.py | 3 +- .../transports/grpc_asyncio.py | 3 +- .../google/cloud/bigtable_v2/__init__.py | 22 + .../cloud/bigtable_v2/gapic_metadata.json | 15 + .../services/bigtable/async_client.py | 104 + .../bigtable_v2/services/bigtable/client.py | 107 + .../services/bigtable/transports/base.py | 18 +- .../services/bigtable/transports/grpc.py | 30 +- .../bigtable/transports/grpc_asyncio.py | 37 +- .../services/bigtable/transports/rest.py | 131 + .../cloud/bigtable_v2/types/__init__.py | 24 + .../cloud/bigtable_v2/types/bigtable.py | 123 + .../google/cloud/bigtable_v2/types/data.py | 292 ++- .../google/cloud/bigtable_v2/types/types.py | 561 +++++ .../scripts/fixup_bigtable_v2_keywords.py | 1 + packages/google-cloud-bigtable/setup.py | 6 +- .../testing/constraints-3.7.txt | 5 +- .../testing/constraints-3.8.txt | 5 +- .../test_bigtable_instance_admin.py | 159 +- .../test_bigtable_table_admin.py | 234 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 2111 +++++++++++------ 27 files changed, 2935 insertions(+), 1072 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 52c537260c79..171dd8298bb8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index fc346c9bbe97..bc2f819b82ff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -96,6 +96,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -108,7 +110,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 49a1b9e113de..cc3e7098629e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -132,7 +132,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index b85a696d9b96..1fa85551cf93 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -179,7 +179,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 2747e403726f..5e429f7e530e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -38,6 +38,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 1ec3be85ef5b..bb7875d87c60 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -96,6 +96,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -108,7 +110,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 01cec4e0b1e9..71f06947f6d8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -134,7 +134,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index f20ed0a494b1..bdd6e20c810c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -181,7 +181,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 56748d882ba5..f2b3ddf284e4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -23,6 +23,8 @@ from .types.bigtable import CheckAndMutateRowRequest from .types.bigtable import CheckAndMutateRowResponse +from .types.bigtable import ExecuteQueryRequest +from .types.bigtable import ExecuteQueryResponse from .types.bigtable import GenerateInitialChangeStreamPartitionsRequest from .types.bigtable import GenerateInitialChangeStreamPartitionsResponse from .types.bigtable import MutateRowRequest @@ -40,12 +42,20 @@ from .types.bigtable import ReadRowsResponse from .types.bigtable import SampleRowKeysRequest from .types.bigtable import SampleRowKeysResponse +from .types.data import ArrayValue from .types.data import Cell from .types.data import Column +from .types.data import ColumnMetadata from .types.data import ColumnRange from .types.data import Family from .types.data import Mutation +from .types.data import PartialResultSet +from .types.data import ProtoFormat +from .types.data import ProtoRows +from .types.data import ProtoRowsBatch +from .types.data import ProtoSchema from .types.data import ReadModifyWriteRule +from .types.data import ResultSetMetadata from .types.data import Row from .types.data import RowFilter from .types.data import RowRange @@ -62,15 +72,20 @@ from .types.request_stats import RequestLatencyStats from .types.request_stats import RequestStats from .types.response_params import ResponseParams +from .types.types import Type __all__ = ( "BigtableAsyncClient", + "ArrayValue", "BigtableClient", "Cell", "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", "Column", + "ColumnMetadata", "ColumnRange", + "ExecuteQueryRequest", + "ExecuteQueryResponse", "Family", "FeatureFlags", "FullReadStatsView", @@ -81,8 +96,13 @@ "MutateRowsRequest", "MutateRowsResponse", "Mutation", + "PartialResultSet", "PingAndWarmRequest", "PingAndWarmResponse", + "ProtoFormat", + "ProtoRows", + "ProtoRowsBatch", + "ProtoSchema", "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", @@ -95,6 +115,7 @@ "RequestLatencyStats", "RequestStats", "ResponseParams", + "ResultSetMetadata", "Row", "RowFilter", "RowRange", @@ -105,6 +126,7 @@ "StreamContinuationTokens", "StreamPartition", "TimestampRange", + "Type", "Value", "ValueRange", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json index 181dc8ff57a2..fd47c04350f9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json @@ -15,6 +15,11 @@ "check_and_mutate_row" ] }, + "ExecuteQuery": { + "methods": [ + "execute_query" + ] + }, "GenerateInitialChangeStreamPartitions": { "methods": [ "generate_initial_change_stream_partitions" @@ -65,6 +70,11 @@ "check_and_mutate_row" ] }, + "ExecuteQuery": { + "methods": [ + "execute_query" + ] + }, "GenerateInitialChangeStreamPartitions": { "methods": [ "generate_initial_change_stream_partitions" @@ -115,6 +125,11 @@ "check_and_mutate_row" ] }, + "ExecuteQuery": { + "methods": [ + "execute_query" + ] + }, "GenerateInitialChangeStreamPartitions": { "methods": [ "generate_initial_change_stream_partitions" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 70daa63e3e75..12432dda7609 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -40,6 +40,7 @@ from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore + try: OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER @@ -1293,6 +1294,109 @@ def read_change_stream( # Done; return the response. return response + def execute_query( + self, + request: Optional[Union[bigtable.ExecuteQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.ExecuteQueryResponse]]: + r"""Executes a BTQL query against a particular Cloud + Bigtable instance. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.ExecuteQueryRequest, dict]]): + The request object. Request message for + Bigtable.ExecuteQuery + instance_name (:class:`str`): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`str`): + Required. The query string. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + Optional. This value specifies routing for replication. + If not specified, the ``default`` application profile + will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: + Response message for + Bigtable.ExecuteQuery + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance_name, query, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.ExecuteQueryRequest): + request = bigtable.ExecuteQueryRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.execute_query + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance_name", request.instance_name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "BigtableAsyncClient": return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 7eda705b9382..0937c90fe761 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1844,6 +1844,113 @@ def read_change_stream( # Done; return the response. return response + def execute_query( + self, + request: Optional[Union[bigtable.ExecuteQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.ExecuteQueryResponse]: + r"""Executes a BTQL query against a particular Cloud + Bigtable instance. + + Args: + request (Union[google.cloud.bigtable_v2.types.ExecuteQueryRequest, dict]): + The request object. Request message for + Bigtable.ExecuteQuery + instance_name (str): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (str): + Required. The query string. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + Optional. This value specifies routing for replication. + If not specified, the ``default`` application profile + will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: + Response message for + Bigtable.ExecuteQuery + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance_name, query, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.ExecuteQueryRequest): + request = bigtable.ExecuteQueryRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.execute_query] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "BigtableClient": return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index d93379723af0..17ff3fb3d1d8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -89,6 +89,8 @@ def __init__( # Save the scopes. self._scopes = scopes + if not hasattr(self, "_ignore_credentials"): + self._ignore_credentials: bool = False # If no credentials are provided, then determine the appropriate # defaults. @@ -101,7 +103,7 @@ def __init__( credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) - elif credentials is None: + elif credentials is None and not self._ignore_credentials: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) @@ -189,6 +191,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=43200.0, client_info=client_info, ), + self.execute_query: gapic_v1.method.wrap_method( + self.execute_query, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -295,6 +302,15 @@ def read_change_stream( ]: raise NotImplementedError() + @property + def execute_query( + self, + ) -> Callable[ + [bigtable.ExecuteQueryRequest], + Union[bigtable.ExecuteQueryResponse, Awaitable[bigtable.ExecuteQueryResponse]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 2a1a9a284f0c..febdd441dd29 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -123,7 +123,8 @@ def __init__( if isinstance(channel, grpc.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -508,6 +509,33 @@ def read_change_stream( ) return self._stubs["read_change_stream"] + @property + def execute_query( + self, + ) -> Callable[[bigtable.ExecuteQueryRequest], bigtable.ExecuteQueryResponse]: + r"""Return a callable for the execute query method over gRPC. + + Executes a BTQL query against a particular Cloud + Bigtable instance. + + Returns: + Callable[[~.ExecuteQueryRequest], + ~.ExecuteQueryResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_query" not in self._stubs: + self._stubs["execute_query"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ExecuteQuery", + request_serializer=bigtable.ExecuteQueryRequest.serialize, + response_deserializer=bigtable.ExecuteQueryResponse.deserialize, + ) + return self._stubs["execute_query"] + def close(self): self.grpc_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 2d04f79af2c8..40d6a3fa466f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -170,7 +170,8 @@ def __init__( if isinstance(channel, aio.Channel): # Ignore credentials if a channel was passed. - credentials = False + credentials = None + self._ignore_credentials = True # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None @@ -518,6 +519,35 @@ def read_change_stream( ) return self._stubs["read_change_stream"] + @property + def execute_query( + self, + ) -> Callable[ + [bigtable.ExecuteQueryRequest], Awaitable[bigtable.ExecuteQueryResponse] + ]: + r"""Return a callable for the execute query method over gRPC. + + Executes a BTQL query against a particular Cloud + Bigtable instance. + + Returns: + Callable[[~.ExecuteQueryRequest], + Awaitable[~.ExecuteQueryResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_query" not in self._stubs: + self._stubs["execute_query"] = self.grpc_channel.unary_stream( + "/google.bigtable.v2.Bigtable/ExecuteQuery", + request_serializer=bigtable.ExecuteQueryRequest.serialize, + response_deserializer=bigtable.ExecuteQueryResponse.deserialize, + ) + return self._stubs["execute_query"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -576,6 +606,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=43200.0, client_info=client_info, ), + self.execute_query: gapic_v1.method_async.wrap_method( + self.execute_query, + default_timeout=None, + client_info=client_info, + ), } def close(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index a4d8e0ce9681..a3391005f34c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -74,6 +74,14 @@ def post_check_and_mutate_row(self, response): logging.log(f"Received response: {response}") return response + def pre_execute_query(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_execute_query(self, response): + logging.log(f"Received response: {response}") + return response + def pre_generate_initial_change_stream_partitions(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -167,6 +175,27 @@ def post_check_and_mutate_row( """ return response + def pre_execute_query( + self, request: bigtable.ExecuteQueryRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[bigtable.ExecuteQueryRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for execute_query + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_execute_query( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for execute_query + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_generate_initial_change_stream_partitions( self, request: bigtable.GenerateInitialChangeStreamPartitionsRequest, @@ -545,6 +574,100 @@ def __call__( resp = self._interceptor.post_check_and_mutate_row(resp) return resp + class _ExecuteQuery(BigtableRestStub): + def __hash__(self): + return hash("ExecuteQuery") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable.ExecuteQueryRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the execute query method over HTTP. + + Args: + request (~.bigtable.ExecuteQueryRequest): + The request object. Request message for + Bigtable.ExecuteQuery + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.ExecuteQueryResponse: + Response message for + Bigtable.ExecuteQuery + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{instance_name=projects/*/instances/*}:executeQuery", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_execute_query(request, metadata) + pb_request = bigtable.ExecuteQueryRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator( + response, bigtable.ExecuteQueryResponse + ) + resp = self._interceptor.post_execute_query(resp) + return resp + class _GenerateInitialChangeStreamPartitions(BigtableRestStub): def __hash__(self): return hash("GenerateInitialChangeStreamPartitions") @@ -1324,6 +1447,14 @@ def check_and_mutate_row( # In C++ this would require a dynamic_cast return self._CheckAndMutateRow(self._session, self._host, self._interceptor) # type: ignore + @property + def execute_query( + self, + ) -> Callable[[bigtable.ExecuteQueryRequest], bigtable.ExecuteQueryResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExecuteQuery(self._session, self._host, self._interceptor) # type: ignore + @property def generate_initial_change_stream_partitions( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index a7961a9107af..e524627cd736 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -16,6 +16,8 @@ from .bigtable import ( CheckAndMutateRowRequest, CheckAndMutateRowResponse, + ExecuteQueryRequest, + ExecuteQueryResponse, GenerateInitialChangeStreamPartitionsRequest, GenerateInitialChangeStreamPartitionsResponse, MutateRowRequest, @@ -35,12 +37,20 @@ SampleRowKeysResponse, ) from .data import ( + ArrayValue, Cell, Column, + ColumnMetadata, ColumnRange, Family, Mutation, + PartialResultSet, + ProtoFormat, + ProtoRows, + ProtoRowsBatch, + ProtoSchema, ReadModifyWriteRule, + ResultSetMetadata, Row, RowFilter, RowRange, @@ -64,10 +74,15 @@ from .response_params import ( ResponseParams, ) +from .types import ( + Type, +) __all__ = ( "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", + "ExecuteQueryRequest", + "ExecuteQueryResponse", "GenerateInitialChangeStreamPartitionsRequest", "GenerateInitialChangeStreamPartitionsResponse", "MutateRowRequest", @@ -85,12 +100,20 @@ "ReadRowsResponse", "SampleRowKeysRequest", "SampleRowKeysResponse", + "ArrayValue", "Cell", "Column", + "ColumnMetadata", "ColumnRange", "Family", "Mutation", + "PartialResultSet", + "ProtoFormat", + "ProtoRows", + "ProtoRowsBatch", + "ProtoSchema", "ReadModifyWriteRule", + "ResultSetMetadata", "Row", "RowFilter", "RowRange", @@ -107,4 +130,5 @@ "RequestLatencyStats", "RequestStats", "ResponseParams", + "Type", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index fa6c566a2348..3818decb6179 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -49,6 +49,8 @@ "GenerateInitialChangeStreamPartitionsResponse", "ReadChangeStreamRequest", "ReadChangeStreamResponse", + "ExecuteQueryRequest", + "ExecuteQueryResponse", }, ) @@ -1258,4 +1260,125 @@ class CloseStream(proto.Message): ) +class ExecuteQueryRequest(proto.Message): + r"""Request message for Bigtable.ExecuteQuery + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + instance_name (str): + Required. The unique name of the instance against which the + query should be executed. Values are of the form + ``projects//instances/`` + app_profile_id (str): + Optional. This value specifies routing for replication. If + not specified, the ``default`` application profile will be + used. + query (str): + Required. The query string. + proto_format (google.cloud.bigtable_v2.types.ProtoFormat): + Protocol buffer format as described by + ProtoSchema and ProtoRows messages. + + This field is a member of `oneof`_ ``data_format``. + resume_token (bytes): + Optional. If this request is resuming a previously + interrupted query execution, ``resume_token`` should be + copied from the last PartialResultSet yielded before the + interruption. Doing this enables the query execution to + resume where the last one left off. The rest of the request + parameters must exactly match the request that yielded this + token. Otherwise the request will fail. + params (MutableMapping[str, google.cloud.bigtable_v2.types.Value]): + Required. params contains string type keys and Bigtable type + values that bind to placeholders in the query string. In + query string, a parameter placeholder consists of the ``@`` + character followed by the parameter name (for example, + ``@firstName``) in the query string. + + For example, if + ``params["firstName"] = bytes_value: "foo" type {bytes_type {}}`` + then ``@firstName`` will be replaced with googlesql bytes + value "foo" in the query string during query evaluation. + + In case of Value.kind is not set, it will be set to + corresponding null value in googlesql. + ``params["firstName"] = type {string_type {}}`` then + ``@firstName`` will be replaced with googlesql null string. + + Value.type should always be set and no inference of type + will be made from Value.kind. If Value.type is not set, we + will return INVALID_ARGUMENT error. + """ + + instance_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + query: str = proto.Field( + proto.STRING, + number=3, + ) + proto_format: data.ProtoFormat = proto.Field( + proto.MESSAGE, + number=4, + oneof="data_format", + message=data.ProtoFormat, + ) + resume_token: bytes = proto.Field( + proto.BYTES, + number=8, + ) + params: MutableMapping[str, data.Value] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=7, + message=data.Value, + ) + + +class ExecuteQueryResponse(proto.Message): + r"""Response message for Bigtable.ExecuteQuery + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + metadata (google.cloud.bigtable_v2.types.ResultSetMetadata): + Structure of rows in this response stream. + The first (and only the first) response streamed + from the server will be of this type. + + This field is a member of `oneof`_ ``response``. + results (google.cloud.bigtable_v2.types.PartialResultSet): + A partial result set with row data + potentially including additional instructions on + how recent past and future partial responses + should be interpreted. + + This field is a member of `oneof`_ ``response``. + """ + + metadata: data.ResultSetMetadata = proto.Field( + proto.MESSAGE, + number=1, + oneof="response", + message=data.ResultSetMetadata, + ) + results: data.PartialResultSet = proto.Field( + proto.MESSAGE, + number=2, + oneof="response", + message=data.PartialResultSet, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index b2b853c64d80..ec32cac8216e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -19,6 +19,10 @@ import proto # type: ignore +from google.cloud.bigtable_v2.types import types +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import date_pb2 # type: ignore + __protobuf__ = proto.module( package="google.bigtable.v2", @@ -28,6 +32,7 @@ "Column", "Cell", "Value", + "ArrayValue", "RowRange", "RowSet", "ColumnRange", @@ -39,6 +44,13 @@ "StreamPartition", "StreamContinuationTokens", "StreamContinuationToken", + "ProtoFormat", + "ColumnMetadata", + "ProtoSchema", + "ResultSetMetadata", + "ProtoRows", + "ProtoRowsBatch", + "PartialResultSet", }, ) @@ -179,6 +191,23 @@ class Value(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + type_ (google.cloud.bigtable_v2.types.Type): + The verified ``Type`` of this ``Value``, if it cannot be + inferred. + + Read results will never specify the encoding for ``type`` + since the value will already have been decoded by the + server. Furthermore, the ``type`` will be omitted entirely + if it can be inferred from a previous response. The exact + semantics for inferring ``type`` will vary, and are + therefore documented separately for each read method. + + When using composite types (Struct, Array, Map) only the + outermost ``Value`` will specify the ``type``. This + top-level ``type`` will define the types for any nested + ``Struct' fields,``\ Array\ ``elements, or``\ Map\ ``key/value pairs. If a nested``\ Value\ ``provides a``\ type\` + on write, the request will be rejected with + INVALID_ARGUMENT. raw_value (bytes): Represents a raw byte sequence with no type information. The ``type`` field must be omitted. @@ -188,14 +217,58 @@ class Value(proto.Message): Represents a raw cell timestamp with no type information. The ``type`` field must be omitted. + This field is a member of `oneof`_ ``kind``. + bytes_value (bytes): + Represents a typed value transported as a + byte sequence. + + This field is a member of `oneof`_ ``kind``. + string_value (str): + Represents a typed value transported as a + string. + This field is a member of `oneof`_ ``kind``. int_value (int): - Represents a typed value transported as an integer. Default - type for writes: ``Int64`` + Represents a typed value transported as an + integer. + + This field is a member of `oneof`_ ``kind``. + bool_value (bool): + Represents a typed value transported as a + boolean. + + This field is a member of `oneof`_ ``kind``. + float_value (float): + Represents a typed value transported as a + floating point number. + + This field is a member of `oneof`_ ``kind``. + timestamp_value (google.protobuf.timestamp_pb2.Timestamp): + Represents a typed value transported as a + timestamp. + + This field is a member of `oneof`_ ``kind``. + date_value (google.type.date_pb2.Date): + Represents a typed value transported as a + date. + + This field is a member of `oneof`_ ``kind``. + array_value (google.cloud.bigtable_v2.types.ArrayValue): + Represents a typed value transported as a sequence of + values. To differentiate between ``Struct``, ``Array``, and + ``Map``, the outermost ``Value`` must provide an explicit + ``type`` on write. This ``type`` will apply recursively to + the nested ``Struct`` fields, ``Array`` elements, or ``Map`` + key/value pairs, which *must not* supply their own ``type``. This field is a member of `oneof`_ ``kind``. """ + type_: types.Type = proto.Field( + proto.MESSAGE, + number=7, + message=types.Type, + ) raw_value: bytes = proto.Field( proto.BYTES, number=8, @@ -206,11 +279,64 @@ class Value(proto.Message): number=9, oneof="kind", ) + bytes_value: bytes = proto.Field( + proto.BYTES, + number=2, + oneof="kind", + ) + string_value: str = proto.Field( + proto.STRING, + number=3, + oneof="kind", + ) int_value: int = proto.Field( proto.INT64, number=6, oneof="kind", ) + bool_value: bool = proto.Field( + proto.BOOL, + number=10, + oneof="kind", + ) + float_value: float = proto.Field( + proto.DOUBLE, + number=11, + oneof="kind", + ) + timestamp_value: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=12, + oneof="kind", + message=timestamp_pb2.Timestamp, + ) + date_value: date_pb2.Date = proto.Field( + proto.MESSAGE, + number=13, + oneof="kind", + message=date_pb2.Date, + ) + array_value: "ArrayValue" = proto.Field( + proto.MESSAGE, + number=4, + oneof="kind", + message="ArrayValue", + ) + + +class ArrayValue(proto.Message): + r"""``ArrayValue`` is an ordered list of ``Value``. + + Attributes: + values (MutableSequence[google.cloud.bigtable_v2.types.Value]): + The ordered elements in the array. + """ + + values: MutableSequence["Value"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Value", + ) class RowRange(proto.Message): @@ -1199,4 +1325,166 @@ class StreamContinuationToken(proto.Message): ) +class ProtoFormat(proto.Message): + r"""Protocol buffers format descriptor, as described by Messages + ProtoSchema and ProtoRows + + """ + + +class ColumnMetadata(proto.Message): + r"""Describes a column in a Bigtable Query Language result set. + + Attributes: + name (str): + The name of the column. + type_ (google.cloud.bigtable_v2.types.Type): + The type of the column. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + type_: types.Type = proto.Field( + proto.MESSAGE, + number=2, + message=types.Type, + ) + + +class ProtoSchema(proto.Message): + r"""ResultSet schema in proto format + + Attributes: + columns (MutableSequence[google.cloud.bigtable_v2.types.ColumnMetadata]): + The columns in the result set. + """ + + columns: MutableSequence["ColumnMetadata"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ColumnMetadata", + ) + + +class ResultSetMetadata(proto.Message): + r"""Describes the structure of a Bigtable result set. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + proto_schema (google.cloud.bigtable_v2.types.ProtoSchema): + Schema in proto format + + This field is a member of `oneof`_ ``schema``. + """ + + proto_schema: "ProtoSchema" = proto.Field( + proto.MESSAGE, + number=1, + oneof="schema", + message="ProtoSchema", + ) + + +class ProtoRows(proto.Message): + r"""Rows represented in proto format. + + This should be constructed by concatenating the ``batch_data`` from + each of the relevant ``ProtoRowsBatch`` messages and parsing the + result as a ``ProtoRows`` message. + + Attributes: + values (MutableSequence[google.cloud.bigtable_v2.types.Value]): + A proto rows message consists of a list of values. Every N + complete values defines a row, where N is equal to the + number of entries in the ``metadata.proto_schema.columns`` + value received in the first response. + """ + + values: MutableSequence["Value"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="Value", + ) + + +class ProtoRowsBatch(proto.Message): + r"""Batch of serialized ProtoRows. + + Attributes: + batch_data (bytes): + Merge partial results by concatenating these bytes, then + parsing the overall value as a ``ProtoRows`` message. + """ + + batch_data: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + +class PartialResultSet(proto.Message): + r"""A partial result set from the streaming query API. CBT client will + buffer partial_rows from result_sets until it gets a + resumption_token. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + proto_rows_batch (google.cloud.bigtable_v2.types.ProtoRowsBatch): + Partial rows in serialized ProtoRows format. + + This field is a member of `oneof`_ ``partial_rows``. + resume_token (bytes): + An opaque token sent by the server to allow query resumption + and signal the client to accumulate ``partial_rows`` since + the last non-empty ``resume_token``. On resumption, the + resumed query will return the remaining rows for this query. + + If there is a batch in progress, a non-empty + ``resume_token`` means that that the batch of + ``partial_rows`` will be complete after merging the + ``partial_rows`` from this response. The client must only + yield completed batches to the application, and must ensure + that any future retries send the latest token to avoid + returning duplicate data. + + The server may set 'resume_token' without a 'partial_rows'. + If there is a batch in progress the client should yield it. + + The server will also send a sentinel ``resume_token`` when + last batch of ``partial_rows`` is sent. If the client + retries the ExecuteQueryRequest with the sentinel + ``resume_token``, the server will emit it again without any + ``partial_rows``, then return OK. + estimated_batch_size (int): + Estimated size of a new batch. The server will always set + this when returning the first ``partial_rows`` of a batch, + and will not set it at any other time. + + The client can use this estimate to allocate an initial + buffer for the batched results. This helps minimize the + number of allocations required, though the buffer size may + still need to be increased if the estimate is too low. + """ + + proto_rows_batch: "ProtoRowsBatch" = proto.Field( + proto.MESSAGE, + number=3, + oneof="partial_rows", + message="ProtoRowsBatch", + ) + resume_token: bytes = proto.Field( + proto.BYTES, + number=5, + ) + estimated_batch_size: int = proto.Field( + proto.INT32, + number=4, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py new file mode 100644 index 000000000000..8eb307b3e958 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py @@ -0,0 +1,561 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "Type", + }, +) + + +class Type(proto.Message): + r"""``Type`` represents the type of data that is written to, read from, + or stored in Bigtable. It is heavily based on the GoogleSQL standard + to help maintain familiarity and consistency across products and + features. + + For compatibility with Bigtable's existing untyped APIs, each + ``Type`` includes an ``Encoding`` which describes how to convert + to/from the underlying data. + + Each encoding also defines the following properties: + + - Order-preserving: Does the encoded value sort consistently with + the original typed value? Note that Bigtable will always sort + data based on the raw encoded value, *not* the decoded type. + + - Example: BYTES values sort in the same order as their raw + encodings. + - Counterexample: Encoding INT64 as a fixed-width decimal string + does *not* preserve sort order when dealing with negative + numbers. ``INT64(1) > INT64(-1)``, but + ``STRING("-00001") > STRING("00001)``. + + - Self-delimiting: If we concatenate two encoded values, can we + always tell where the first one ends and the second one begins? + + - Example: If we encode INT64s to fixed-width STRINGs, the first + value will always contain exactly N digits, possibly preceded + by a sign. + - Counterexample: If we concatenate two UTF-8 encoded STRINGs, + we have no way to tell where the first one ends. + + - Compatibility: Which other systems have matching encoding + schemes? For example, does this encoding have a GoogleSQL + equivalent? HBase? Java? + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + bytes_type (google.cloud.bigtable_v2.types.Type.Bytes): + Bytes + + This field is a member of `oneof`_ ``kind``. + string_type (google.cloud.bigtable_v2.types.Type.String): + String + + This field is a member of `oneof`_ ``kind``. + int64_type (google.cloud.bigtable_v2.types.Type.Int64): + Int64 + + This field is a member of `oneof`_ ``kind``. + float32_type (google.cloud.bigtable_v2.types.Type.Float32): + Float32 + + This field is a member of `oneof`_ ``kind``. + float64_type (google.cloud.bigtable_v2.types.Type.Float64): + Float64 + + This field is a member of `oneof`_ ``kind``. + bool_type (google.cloud.bigtable_v2.types.Type.Bool): + Bool + + This field is a member of `oneof`_ ``kind``. + timestamp_type (google.cloud.bigtable_v2.types.Type.Timestamp): + Timestamp + + This field is a member of `oneof`_ ``kind``. + date_type (google.cloud.bigtable_v2.types.Type.Date): + Date + + This field is a member of `oneof`_ ``kind``. + aggregate_type (google.cloud.bigtable_v2.types.Type.Aggregate): + Aggregate + + This field is a member of `oneof`_ ``kind``. + struct_type (google.cloud.bigtable_v2.types.Type.Struct): + Struct + + This field is a member of `oneof`_ ``kind``. + array_type (google.cloud.bigtable_v2.types.Type.Array): + Array + + This field is a member of `oneof`_ ``kind``. + map_type (google.cloud.bigtable_v2.types.Type.Map): + Map + + This field is a member of `oneof`_ ``kind``. + """ + + class Bytes(proto.Message): + r"""Bytes Values of type ``Bytes`` are stored in ``Value.bytes_value``. + + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.Bytes.Encoding): + The encoding to use when converting to/from + lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to/from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + raw (google.cloud.bigtable_v2.types.Type.Bytes.Encoding.Raw): + Use ``Raw`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Raw(proto.Message): + r"""Leaves the value "as-is" + + - Order-preserving? Yes + - Self-delimiting? No + - Compatibility? N/A + + """ + + raw: "Type.Bytes.Encoding.Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Bytes.Encoding.Raw", + ) + + encoding: "Type.Bytes.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes.Encoding", + ) + + class String(proto.Message): + r"""String Values of type ``String`` are stored in + ``Value.string_value``. + + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.String.Encoding): + The encoding to use when converting to/from + lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to/from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + utf8_bytes (google.cloud.bigtable_v2.types.Type.String.Encoding.Utf8Bytes): + Use ``Utf8Bytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Utf8Bytes(proto.Message): + r"""UTF-8 encoding + + - Order-preserving? Yes (code point order) + - Self-delimiting? No + - Compatibility? + + - BigQuery Federation ``TEXT`` encoding + - HBase ``Bytes.toBytes`` + - Java ``String#getBytes(StandardCharsets.UTF_8)`` + + """ + + utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.String.Encoding.Utf8Bytes", + ) + + encoding: "Type.String.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.String.Encoding", + ) + + class Int64(proto.Message): + r"""Int64 Values of type ``Int64`` are stored in ``Value.int_value``. + + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.Int64.Encoding): + The encoding to use when converting to/from + lower level types. + """ + + class Encoding(proto.Message): + r"""Rules used to convert to/from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + big_endian_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.BigEndianBytes): + Use ``BigEndianBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class BigEndianBytes(proto.Message): + r"""Encodes the value as an 8-byte big endian twos complement ``Bytes`` + value. + + - Order-preserving? No (positive values only) + - Self-delimiting? Yes + - Compatibility? + + - BigQuery Federation ``BINARY`` encoding + - HBase ``Bytes.toBytes`` + - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` + + Attributes: + bytes_type (google.cloud.bigtable_v2.types.Type.Bytes): + Deprecated: ignored if set. + """ + + bytes_type: "Type.Bytes" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Bytes", + ) + + big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding.BigEndianBytes", + ) + + encoding: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Int64.Encoding", + ) + + class Bool(proto.Message): + r"""bool Values of type ``Bool`` are stored in ``Value.bool_value``.""" + + class Float32(proto.Message): + r"""Float32 Values of type ``Float32`` are stored in + ``Value.float_value``. + + """ + + class Float64(proto.Message): + r"""Float64 Values of type ``Float64`` are stored in + ``Value.float_value``. + + """ + + class Timestamp(proto.Message): + r"""Timestamp Values of type ``Timestamp`` are stored in + ``Value.timestamp_value``. + + """ + + class Date(proto.Message): + r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" + + class Struct(proto.Message): + r"""A structured data value, consisting of fields which map to + dynamically typed values. Values of type ``Struct`` are stored in + ``Value.array_value`` where entries are in the same order and number + as ``field_types``. + + Attributes: + fields (MutableSequence[google.cloud.bigtable_v2.types.Type.Struct.Field]): + The names and types of the fields in this + struct. + """ + + class Field(proto.Message): + r"""A struct field and its type. + + Attributes: + field_name (str): + The field name (optional). Fields without a ``field_name`` + are considered anonymous and cannot be referenced by name. + type_ (google.cloud.bigtable_v2.types.Type): + The type of values in this field. + """ + + field_name: str = proto.Field( + proto.STRING, + number=1, + ) + type_: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Type.Struct.Field", + ) + + class Array(proto.Message): + r"""An ordered list of elements of a given type. Values of type + ``Array`` are stored in ``Value.array_value``. + + Attributes: + element_type (google.cloud.bigtable_v2.types.Type): + The type of the elements in the array. This must not be + ``Array``. + """ + + element_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + + class Map(proto.Message): + r"""A mapping of keys to values of a given type. Values of type ``Map`` + are stored in a ``Value.array_value`` where each entry is another + ``Value.array_value`` with two elements (the key and the value, in + that order). Normally encoded Map values won't have repeated keys, + however, clients are expected to handle the case in which they do. + If the same key appears multiple times, the *last* value takes + precedence. + + Attributes: + key_type (google.cloud.bigtable_v2.types.Type): + The type of a map key. Only ``Bytes``, ``String``, and + ``Int64`` are allowed as key types. + value_type (google.cloud.bigtable_v2.types.Type): + The type of the values in a map. + """ + + key_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + value_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + class Aggregate(proto.Message): + r"""A value that combines incremental updates into a summarized value. + + Data is never directly written or read using type ``Aggregate``. + Writes will provide either the ``input_type`` or ``state_type``, and + reads will always return the ``state_type`` . + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + input_type (google.cloud.bigtable_v2.types.Type): + Type of the inputs that are accumulated by this + ``Aggregate``, which must specify a full encoding. Use + ``AddInput`` mutations to accumulate new inputs. + state_type (google.cloud.bigtable_v2.types.Type): + Output only. Type that holds the internal accumulator state + for the ``Aggregate``. This is a function of the + ``input_type`` and ``aggregator`` chosen, and will always + specify a full encoding. + sum (google.cloud.bigtable_v2.types.Type.Aggregate.Sum): + Sum aggregator. + + This field is a member of `oneof`_ ``aggregator``. + hllpp_unique_count (google.cloud.bigtable_v2.types.Type.Aggregate.HyperLogLogPlusPlusUniqueCount): + HyperLogLogPlusPlusUniqueCount aggregator. + + This field is a member of `oneof`_ ``aggregator``. + max_ (google.cloud.bigtable_v2.types.Type.Aggregate.Max): + Max aggregator. + + This field is a member of `oneof`_ ``aggregator``. + min_ (google.cloud.bigtable_v2.types.Type.Aggregate.Min): + Min aggregator. + + This field is a member of `oneof`_ ``aggregator``. + """ + + class Sum(proto.Message): + r"""Computes the sum of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Max(proto.Message): + r"""Computes the max of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Min(proto.Message): + r"""Computes the min of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class HyperLogLogPlusPlusUniqueCount(proto.Message): + r"""Computes an approximate unique count over the input values. When + using raw data as input, be careful to use a consistent encoding. + Otherwise the same value encoded differently could count more than + once, or two distinct values could count as identical. Input: Any, + or omit for Raw State: TBD Special state conversions: ``Int64`` (the + unique count estimate) + + """ + + input_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + state_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + sum: "Type.Aggregate.Sum" = proto.Field( + proto.MESSAGE, + number=4, + oneof="aggregator", + message="Type.Aggregate.Sum", + ) + hllpp_unique_count: "Type.Aggregate.HyperLogLogPlusPlusUniqueCount" = ( + proto.Field( + proto.MESSAGE, + number=5, + oneof="aggregator", + message="Type.Aggregate.HyperLogLogPlusPlusUniqueCount", + ) + ) + max_: "Type.Aggregate.Max" = proto.Field( + proto.MESSAGE, + number=6, + oneof="aggregator", + message="Type.Aggregate.Max", + ) + min_: "Type.Aggregate.Min" = proto.Field( + proto.MESSAGE, + number=7, + oneof="aggregator", + message="Type.Aggregate.Min", + ) + + bytes_type: Bytes = proto.Field( + proto.MESSAGE, + number=1, + oneof="kind", + message=Bytes, + ) + string_type: String = proto.Field( + proto.MESSAGE, + number=2, + oneof="kind", + message=String, + ) + int64_type: Int64 = proto.Field( + proto.MESSAGE, + number=5, + oneof="kind", + message=Int64, + ) + float32_type: Float32 = proto.Field( + proto.MESSAGE, + number=12, + oneof="kind", + message=Float32, + ) + float64_type: Float64 = proto.Field( + proto.MESSAGE, + number=9, + oneof="kind", + message=Float64, + ) + bool_type: Bool = proto.Field( + proto.MESSAGE, + number=8, + oneof="kind", + message=Bool, + ) + timestamp_type: Timestamp = proto.Field( + proto.MESSAGE, + number=10, + oneof="kind", + message=Timestamp, + ) + date_type: Date = proto.Field( + proto.MESSAGE, + number=11, + oneof="kind", + message=Date, + ) + aggregate_type: Aggregate = proto.Field( + proto.MESSAGE, + number=6, + oneof="kind", + message=Aggregate, + ) + struct_type: Struct = proto.Field( + proto.MESSAGE, + number=7, + oneof="kind", + message=Struct, + ) + array_type: Array = proto.Field( + proto.MESSAGE, + number=3, + oneof="kind", + message=Array, + ) + map_type: Map = proto.Field( + proto.MESSAGE, + number=4, + oneof="kind", + message=Map, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 3d1381c49463..218a54902e02 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -40,6 +40,7 @@ class bigtableCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'check_and_mutate_row': ('row_key', 'table_name', 'authorized_view_name', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'execute_query': ('instance_name', 'query', 'params', 'app_profile_id', 'proto_format', 'resume_token', ), 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), 'mutate_row': ('row_key', 'mutations', 'table_name', 'authorized_view_name', 'app_profile_id', ), 'mutate_rows': ('entries', 'table_name', 'authorized_view_name', 'app_profile_id', ), diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 8b698a35b26a..c47167487c0b 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -39,10 +39,10 @@ dependencies = [ "google-api-core[grpc] >= 2.16.0, <3.0.0dev", "google-cloud-core >= 1.4.4, <3.0.0dev", + "google-auth >= 2.14.1, <3.0.0dev,!=2.24.0,!=2.25.0", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", - "proto-plus >= 1.22.0, <2.0.0dev", - "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", - "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "proto-plus >= 1.22.3, <2.0.0dev", + "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index c684ca534397..5a3f3e3fc3f6 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -6,9 +6,10 @@ # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 google-api-core==2.16.0 +google-auth==2.14.1 google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 -proto-plus==1.22.0 +proto-plus==1.22.3 libcst==0.2.5 -protobuf==3.19.5 +protobuf==3.20.2 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.8.txt b/packages/google-cloud-bigtable/testing/constraints-3.8.txt index d96846bb5a6b..fa7c56db10ee 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.8.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.8.txt @@ -6,8 +6,9 @@ # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 google-api-core==2.16.0 +google-auth==2.14.1 google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 -proto-plus==1.22.0 +proto-plus==1.22.3 libcst==0.2.5 -protobuf==3.19.5 +protobuf==3.20.2 diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index e0de275cc153..64fa98937b58 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1380,12 +1380,7 @@ async def test_create_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_instance ] = mock_object @@ -1793,12 +1788,7 @@ async def test_get_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_instance ] = mock_object @@ -2173,12 +2163,7 @@ async def test_list_instances_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_instances ] = mock_object @@ -2559,12 +2544,7 @@ async def test_update_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_instance ] = mock_object @@ -2860,12 +2840,7 @@ async def test_partial_update_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.partial_update_instance ] = mock_object @@ -3244,12 +3219,7 @@ async def test_delete_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_instance ] = mock_object @@ -3606,12 +3576,7 @@ async def test_create_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_cluster ] = mock_object @@ -4009,12 +3974,7 @@ async def test_get_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_cluster ] = mock_object @@ -4389,12 +4349,7 @@ async def test_list_clusters_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_clusters ] = mock_object @@ -4762,12 +4717,7 @@ async def test_update_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_cluster ] = mock_object @@ -5058,12 +5008,7 @@ async def test_partial_update_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.partial_update_cluster ] = mock_object @@ -5442,12 +5387,7 @@ async def test_delete_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_cluster ] = mock_object @@ -5824,12 +5764,7 @@ async def test_create_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_app_profile ] = mock_object @@ -6231,12 +6166,7 @@ async def test_get_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_app_profile ] = mock_object @@ -6616,12 +6546,7 @@ async def test_list_app_profiles_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_app_profiles ] = mock_object @@ -6873,13 +6798,13 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_app_profiles(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -7203,12 +7128,7 @@ async def test_update_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_app_profile ] = mock_object @@ -7599,12 +7519,7 @@ async def test_delete_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_app_profile ] = mock_object @@ -7973,12 +7888,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy ] = mock_object @@ -8360,12 +8270,7 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy ] = mock_object @@ -8757,12 +8662,7 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions ] = mock_object @@ -9170,12 +9070,7 @@ async def test_list_hot_tablets_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_hot_tablets ] = mock_object @@ -9413,13 +9308,13 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_hot_tablets(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 9676ce4fa6a8..4c888da7ca96 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1361,12 +1361,7 @@ async def test_create_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_table ] = mock_object @@ -1767,12 +1762,7 @@ async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_table_from_snapshot ] = mock_object @@ -2170,12 +2160,7 @@ async def test_list_tables_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_tables ] = mock_object @@ -2412,13 +2397,13 @@ def test_list_tables_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_tables(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -2733,12 +2718,7 @@ async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_table ] = mock_object @@ -3097,12 +3077,7 @@ async def test_update_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_table ] = mock_object @@ -3471,12 +3446,7 @@ async def test_delete_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_table ] = mock_object @@ -3831,12 +3801,7 @@ async def test_undelete_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.undelete_table ] = mock_object @@ -4216,12 +4181,7 @@ async def test_create_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_authorized_view ] = mock_object @@ -4632,12 +4592,7 @@ async def test_list_authorized_views_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_authorized_views ] = mock_object @@ -4887,13 +4842,13 @@ def test_list_authorized_views_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_authorized_views(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -5228,12 +5183,7 @@ async def test_get_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_authorized_view ] = mock_object @@ -5620,12 +5570,7 @@ async def test_update_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_authorized_view ] = mock_object @@ -6019,12 +5964,7 @@ async def test_delete_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_authorized_view ] = mock_object @@ -6409,12 +6349,7 @@ async def test_modify_column_families_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.modify_column_families ] = mock_object @@ -6812,12 +6747,7 @@ async def test_drop_row_range_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.drop_row_range ] = mock_object @@ -7106,12 +7036,7 @@ async def test_generate_consistency_token_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.generate_consistency_token ] = mock_object @@ -7498,12 +7423,7 @@ async def test_check_consistency_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.check_consistency ] = mock_object @@ -7893,12 +7813,7 @@ async def test_snapshot_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.snapshot_table ] = mock_object @@ -8303,12 +8218,7 @@ async def test_get_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_snapshot ] = mock_object @@ -8677,12 +8587,7 @@ async def test_list_snapshots_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_snapshots ] = mock_object @@ -8920,13 +8825,13 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_snapshots(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -9230,12 +9135,7 @@ async def test_delete_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_snapshot ] = mock_object @@ -9592,12 +9492,7 @@ async def test_create_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.create_backup ] = mock_object @@ -9993,12 +9888,7 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_backup ] = mock_object @@ -10374,12 +10264,7 @@ async def test_update_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.update_backup ] = mock_object @@ -10751,12 +10636,7 @@ async def test_delete_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.delete_backup ] = mock_object @@ -11118,12 +10998,7 @@ async def test_list_backups_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.list_backups ] = mock_object @@ -11361,13 +11236,13 @@ def test_list_backups_pager(transport_name: str = "grpc"): RuntimeError, ) - metadata = () - metadata = tuple(metadata) + ( + expected_metadata = () + expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) pager = client.list_backups(request={}) - assert pager._metadata == metadata + assert pager._metadata == expected_metadata results = list(pager) assert len(results) == 6 @@ -11681,12 +11556,7 @@ async def test_restore_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.restore_table ] = mock_object @@ -11973,12 +11843,7 @@ async def test_copy_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.copy_backup ] = mock_object @@ -12376,12 +12241,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy ] = mock_object @@ -12763,12 +12623,7 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy ] = mock_object @@ -13160,12 +13015,7 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions ] = mock_object diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 4d8a6ec6b8a8..348338d18cc2 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -51,9 +51,11 @@ from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats +from google.cloud.bigtable_v2.types import types from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore +from google.type import date_pb2 # type: ignore import google.auth @@ -1236,12 +1238,7 @@ async def test_read_rows_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_rows ] = mock_object @@ -1616,12 +1613,7 @@ async def test_sample_row_keys_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.sample_row_keys ] = mock_object @@ -1992,12 +1984,7 @@ async def test_mutate_row_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.mutate_row ] = mock_object @@ -2416,12 +2403,7 @@ async def test_mutate_rows_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.mutate_rows ] = mock_object @@ -2821,12 +2803,7 @@ async def test_check_and_mutate_row_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.check_and_mutate_row ] = mock_object @@ -3356,12 +3333,7 @@ async def test_ping_and_warm_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.ping_and_warm ] = mock_object @@ -3726,12 +3698,7 @@ async def test_read_modify_write_row_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_modify_write_row ] = mock_object @@ -4155,12 +4122,7 @@ async def test_generate_initial_change_stream_partitions_async_use_cached_wrappe ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.generate_initial_change_stream_partitions ] = mock_object @@ -4560,12 +4522,7 @@ async def test_read_change_stream_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - class AwaitableMock(mock.AsyncMock): - def __await__(self): - self.await_count += 1 - return iter([]) - - mock_object = AwaitableMock() + mock_object = mock.AsyncMock() client._client._transport._wrapped_methods[ client._client._transport.read_change_stream ] = mock_object @@ -4786,57 +4743,95 @@ async def test_read_change_stream_flattened_error_async(): @pytest.mark.parametrize( "request_type", [ - bigtable.ReadRowsRequest, + bigtable.ExecuteQueryRequest, dict, ], ) -def test_read_rows_rest(request_type): +def test_execute_query(request_type, transport: str = "grpc"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse( - last_scanned_row_key=b"last_scanned_row_key_blob", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + response = client.execute_query(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable.ExecuteQueryRequest() + assert args[0] == request - json_return_value = "[{}]".format(json_return_value) + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.ExecuteQueryResponse) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_rows(request) - assert isinstance(response, Iterable) - response = next(response) +def test_execute_query_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadRowsResponse) - assert response.last_scanned_row_key == b"last_scanned_row_key_blob" + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.execute_query() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ExecuteQueryRequest() -def test_read_rows_rest_use_cached_wrapped_rpc(): +def test_execute_query_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.ExecuteQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.execute_query(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ExecuteQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", + ) + + +def test_execute_query_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -4844,184 +4839,279 @@ def test_read_rows_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.read_rows in client._transport._wrapped_methods + assert client._transport.execute_query in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.read_rows] = mock_rpc - + client._transport._wrapped_methods[client._transport.execute_query] = mock_rpc request = {} - client.read_rows(request) + client.execute_query(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.read_rows(request) + client.execute_query(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_rows_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +@pytest.mark.asyncio +async def test_execute_query_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="grpc_asyncio", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_rows" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_rows" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadRowsResponse.to_json( - bigtable.ReadRowsResponse() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] ) - req.return_value._content = "[{}]".format(req.return_value._content) + response = await client.execute_query() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ExecuteQueryRequest() - request = bigtable.ReadRowsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadRowsResponse() - client.read_rows( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], +@pytest.mark.asyncio +async def test_execute_query_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - pre.assert_called_once() - post.assert_called_once() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + # Ensure method has been cached + assert ( + client._client._transport.execute_query + in client._client._transport._wrapped_methods + ) -def test_read_rows_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadRowsRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Replace cached wrapped function with mock + mock_object = mock.AsyncMock() + client._client._transport._wrapped_methods[ + client._client._transport.execute_query + ] = mock_object - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + request = {} + await client.execute_query(request) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_rows(request) + # Establish that the underlying gRPC stub method was called. + assert mock_object.call_count == 1 + await client.execute_query(request) -def test_read_rows_rest_flattened(): - client = BigtableClient( + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_object.call_count == 2 + + +@pytest.mark.asyncio +async def test_execute_query_async( + transport: str = "grpc_asyncio", request_type=bigtable.ExecuteQueryRequest +): + client = BigtableAsyncClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - app_profile_id="app_profile_id_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] ) - mock_args.update(sample_request) + response = await client.execute_query(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable.ExecuteQueryRequest() + assert args[0] == request - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.read_rows(**mock_args) + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.ExecuteQueryResponse) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:readRows" - % client.transport._host, - args[1], - ) +@pytest.mark.asyncio +async def test_execute_query_async_from_dict(): + await test_execute_query_async(request_type=dict) -def test_read_rows_rest_flattened_error(transport: str = "rest"): + +def test_execute_query_routing_parameters(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_rows( - bigtable.ReadRowsRequest(), - table_name="table_name_value", + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw["metadata"] + + +def test_execute_query_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.execute_query( + instance_name="instance_name_value", + query="query_value", app_profile_id="app_profile_id_value", ) + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + -def test_read_rows_rest_error(): +def test_execute_query_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.execute_query( + bigtable.ExecuteQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_execute_query_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.execute_query( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_execute_query_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.execute_query( + bigtable.ExecuteQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + @pytest.mark.parametrize( "request_type", [ - bigtable.SampleRowKeysRequest, + bigtable.ReadRowsRequest, dict, ], ) -def test_sample_row_keys_rest(request_type): +def test_read_rows_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -5034,16 +5124,15 @@ def test_sample_row_keys_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse( - row_key=b"row_key_blob", - offset_bytes=1293, + return_value = bigtable.ReadRowsResponse( + last_scanned_row_key=b"last_scanned_row_key_blob", ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.SampleRowKeysResponse.pb(return_value) + return_value = bigtable.ReadRowsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -5052,55 +5141,598 @@ def test_sample_row_keys_rest(request_type): req.return_value = response_value with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) - response = client.sample_row_keys(request) + response = client.read_rows(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadRowsResponse) + assert response.last_scanned_row_key == b"last_scanned_row_key_blob" + + +def test_read_rows_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.read_rows in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.read_rows] = mock_rpc + + request = {} + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.read_rows(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_rows_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_rows" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_rows" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.ReadRowsResponse.to_json( + bigtable.ReadRowsResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.ReadRowsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadRowsResponse() + + client.read_rows( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_rows_rest_bad_request( + transport: str = "rest", request_type=bigtable.ReadRowsRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.read_rows(request) + + +def test_read_rows_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.read_rows(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readRows" + % client.transport._host, + args[1], + ) + + +def test_read_rows_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_rows( + bigtable.ReadRowsRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_read_rows_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.SampleRowKeysRequest, + dict, + ], +) +def test_sample_row_keys_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse( + row_key=b"row_key_blob", + offset_bytes=1293, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.sample_row_keys(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.SampleRowKeysResponse) + assert response.row_key == b"row_key_blob" + assert response.offset_bytes == 1293 + + +def test_sample_row_keys_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.sample_row_keys in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.sample_row_keys] = mock_rpc + + request = {} + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.sample_row_keys(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_sample_row_keys_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_sample_row_keys" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_sample_row_keys" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.SampleRowKeysResponse.to_json( + bigtable.SampleRowKeysResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.SampleRowKeysRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.SampleRowKeysResponse() + + client.sample_row_keys( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_sample_row_keys_rest_bad_request( + transport: str = "rest", request_type=bigtable.SampleRowKeysRequest +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.sample_row_keys(request) + + +def test_sample_row_keys_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.sample_row_keys(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" + % client.transport._host, + args[1], + ) + + +def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name="table_name_value", + app_profile_id="app_profile_id_value", + ) + + +def test_sample_row_keys_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowRequest, + dict, + ], +) +def test_mutate_row_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.mutate_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +def test_mutate_row_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.mutate_row in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.mutate_row] = mock_rpc + + request = {} + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.mutate_row(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["row_key"] = b"" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["rowKey"] = b"row_key_blob" - assert isinstance(response, Iterable) - response = next(response) + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.SampleRowKeysResponse) - assert response.row_key == b"row_key_blob" - assert response.offset_bytes == 1293 + # verify required fields with non-default values are left alone + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) -def test_sample_row_keys_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + response_value = Response() + response_value.status_code = 200 - # Ensure method has been cached - assert client._transport.sample_row_keys in client._transport._wrapped_methods + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.sample_row_keys] = mock_rpc + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value - request = {} - client.sample_row_keys(request) + response = client.mutate_row(request) - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - client.sample_row_keys(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +def test_mutate_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "rowKey", + "mutations", + ) + ) + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_sample_row_keys_rest_interceptors(null_interceptor): +def test_mutate_row_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -5111,13 +5743,13 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_sample_row_keys" + transports.BigtableRestInterceptor, "post_mutate_row" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_sample_row_keys" + transports.BigtableRestInterceptor, "pre_mutate_row" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) + pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -5128,20 +5760,19 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.SampleRowKeysResponse.to_json( - bigtable.SampleRowKeysResponse() + req.return_value._content = bigtable.MutateRowResponse.to_json( + bigtable.MutateRowResponse() ) - req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.SampleRowKeysRequest() + request = bigtable.MutateRowRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.SampleRowKeysResponse() + post.return_value = bigtable.MutateRowResponse() - client.sample_row_keys( + client.mutate_row( request, metadata=[ ("key", "val"), @@ -5153,8 +5784,8 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): post.assert_called_once() -def test_sample_row_keys_rest_bad_request( - transport: str = "rest", request_type=bigtable.SampleRowKeysRequest +def test_mutate_row_rest_bad_request( + transport: str = "rest", request_type=bigtable.MutateRowRequest ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5174,10 +5805,10 @@ def test_sample_row_keys_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.sample_row_keys(request) + client.mutate_row(request) -def test_sample_row_keys_rest_flattened(): +def test_mutate_row_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -5186,7 +5817,7 @@ def test_sample_row_keys_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse() + return_value = bigtable.MutateRowResponse() # get arguments that satisfy an http rule for this method sample_request = { @@ -5196,6 +5827,12 @@ def test_sample_row_keys_rest_flattened(): # get truthy value for each flattened field mock_args = dict( table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -5204,28 +5841,25 @@ def test_sample_row_keys_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.SampleRowKeysResponse.pb(return_value) + return_value = bigtable.MutateRowResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.sample_row_keys(**mock_args) + client.mutate_row(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" + "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" % client.transport._host, args[1], ) -def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): +def test_mutate_row_rest_flattened_error(transport: str = "rest"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -5234,14 +5868,20 @@ def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.sample_row_keys( - bigtable.SampleRowKeysRequest(), + client.mutate_row( + bigtable.MutateRowRequest(), table_name="table_name_value", + row_key=b"row_key_blob", + mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], app_profile_id="app_profile_id_value", ) -def test_sample_row_keys_rest_error(): +def test_mutate_row_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -5250,11 +5890,11 @@ def test_sample_row_keys_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable.MutateRowRequest, + bigtable.MutateRowsRequest, dict, ], ) -def test_mutate_row_rest(request_type): +def test_mutate_rows_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -5267,24 +5907,31 @@ def test_mutate_row_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() + return_value = bigtable.MutateRowsResponse() # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.MutateRowResponse.pb(return_value) + return_value = bigtable.MutateRowsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.mutate_row(request) + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.mutate_rows(request) + + assert isinstance(response, Iterable) + response = next(response) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowResponse) + assert isinstance(response, bigtable.MutateRowsResponse) -def test_mutate_row_rest_use_cached_wrapped_rpc(): +def test_mutate_rows_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -5298,33 +5945,32 @@ def test_mutate_row_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.mutate_row in client._transport._wrapped_methods + assert client._transport.mutate_rows in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.mutate_row] = mock_rpc + client._transport._wrapped_methods[client._transport.mutate_rows] = mock_rpc request = {} - client.mutate_row(request) + client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.mutate_row(request) + client.mutate_rows(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): +def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): transport_class = transports.BigtableRestTransport request_init = {} - request_init["row_key"] = b"" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -5335,21 +5981,17 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).mutate_row._get_unset_required_fields(jsonified_request) + ).mutate_rows._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["rowKey"] = b"row_key_blob" - unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).mutate_row._get_unset_required_fields(jsonified_request) + ).mutate_rows._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b"row_key_blob" client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5358,7 +6000,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() + return_value = bigtable.MutateRowsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -5380,38 +6022,33 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.MutateRowResponse.pb(return_value) + return_value = bigtable.MutateRowsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.mutate_row(request) + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.mutate_rows(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_mutate_row_rest_unset_required_fields(): +def test_mutate_rows_rest_unset_required_fields(): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.mutate_row._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "rowKey", - "mutations", - ) - ) - ) + unset_fields = transport.mutate_rows._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("entries",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_mutate_row_rest_interceptors(null_interceptor): +def test_mutate_rows_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -5422,13 +6059,13 @@ def test_mutate_row_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_mutate_row" + transports.BigtableRestInterceptor, "post_mutate_rows" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_mutate_row" + transports.BigtableRestInterceptor, "pre_mutate_rows" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) + pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -5439,19 +6076,20 @@ def test_mutate_row_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowResponse.to_json( - bigtable.MutateRowResponse() + req.return_value._content = bigtable.MutateRowsResponse.to_json( + bigtable.MutateRowsResponse() ) + req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.MutateRowRequest() + request = bigtable.MutateRowsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.MutateRowResponse() + post.return_value = bigtable.MutateRowsResponse() - client.mutate_row( + client.mutate_rows( request, metadata=[ ("key", "val"), @@ -5463,8 +6101,8 @@ def test_mutate_row_rest_interceptors(null_interceptor): post.assert_called_once() -def test_mutate_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.MutateRowRequest +def test_mutate_rows_rest_bad_request( + transport: str = "rest", request_type=bigtable.MutateRowsRequest ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5484,10 +6122,10 @@ def test_mutate_row_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.mutate_row(request) + client.mutate_rows(request) -def test_mutate_row_rest_flattened(): +def test_mutate_rows_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -5496,7 +6134,7 @@ def test_mutate_row_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() + return_value = bigtable.MutateRowsResponse() # get arguments that satisfy an http rule for this method sample_request = { @@ -5506,12 +6144,7 @@ def test_mutate_row_rest_flattened(): # get truthy value for each flattened field mock_args = dict( table_name="table_name_value", - row_key=b"row_key_blob", - mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -5520,25 +6153,28 @@ def test_mutate_row_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.MutateRowResponse.pb(return_value) + return_value = bigtable.MutateRowsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.mutate_row(**mock_args) + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.mutate_rows(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" + "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" % client.transport._host, args[1], ) -def test_mutate_row_rest_flattened_error(transport: str = "rest"): +def test_mutate_rows_rest_flattened_error(transport: str = "rest"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -5547,20 +6183,15 @@ def test_mutate_row_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.mutate_row( - bigtable.MutateRowRequest(), + client.mutate_rows( + bigtable.MutateRowsRequest(), table_name="table_name_value", - row_key=b"row_key_blob", - mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], + entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], app_profile_id="app_profile_id_value", ) -def test_mutate_row_rest_error(): +def test_mutate_rows_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -5569,11 +6200,11 @@ def test_mutate_row_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable.MutateRowsRequest, + bigtable.CheckAndMutateRowRequest, dict, ], ) -def test_mutate_rows_rest(request_type): +def test_check_and_mutate_row_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -5586,31 +6217,27 @@ def test_mutate_rows_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() + return_value = bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.MutateRowsResponse.pb(return_value) + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.mutate_rows(request) - - assert isinstance(response, Iterable) - response = next(response) + response = client.check_and_mutate_row(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowsResponse) + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + assert response.predicate_matched is True -def test_mutate_rows_rest_use_cached_wrapped_rpc(): +def test_check_and_mutate_row_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -5624,32 +6251,39 @@ def test_mutate_rows_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.mutate_rows in client._transport._wrapped_methods + assert ( + client._transport.check_and_mutate_row in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.mutate_rows] = mock_rpc + client._transport._wrapped_methods[ + client._transport.check_and_mutate_row + ] = mock_rpc request = {} - client.mutate_rows(request) + client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.mutate_rows(request) + client.check_and_mutate_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): +def test_check_and_mutate_row_rest_required_fields( + request_type=bigtable.CheckAndMutateRowRequest, +): transport_class = transports.BigtableRestTransport request_init = {} + request_init["row_key"] = b"" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -5660,17 +6294,21 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).mutate_rows._get_unset_required_fields(jsonified_request) + ).check_and_mutate_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["rowKey"] = b"row_key_blob" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).mutate_rows._get_unset_required_fields(jsonified_request) + ).check_and_mutate_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5679,7 +6317,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() + return_value = bigtable.CheckAndMutateRowResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -5701,33 +6339,30 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.MutateRowsResponse.pb(return_value) + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.mutate_rows(request) + response = client.check_and_mutate_row(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_mutate_rows_rest_unset_required_fields(): +def test_check_and_mutate_row_rest_unset_required_fields(): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.mutate_rows._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("entries",))) + unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("rowKey",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_mutate_rows_rest_interceptors(null_interceptor): +def test_check_and_mutate_row_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -5738,13 +6373,15 @@ def test_mutate_rows_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_mutate_rows" + transports.BigtableRestInterceptor, "post_check_and_mutate_row" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_mutate_rows" + transports.BigtableRestInterceptor, "pre_check_and_mutate_row" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) + pb_message = bigtable.CheckAndMutateRowRequest.pb( + bigtable.CheckAndMutateRowRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -5755,20 +6392,19 @@ def test_mutate_rows_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowsResponse.to_json( - bigtable.MutateRowsResponse() + req.return_value._content = bigtable.CheckAndMutateRowResponse.to_json( + bigtable.CheckAndMutateRowResponse() ) - req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.MutateRowsRequest() + request = bigtable.CheckAndMutateRowRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.MutateRowsResponse() + post.return_value = bigtable.CheckAndMutateRowResponse() - client.mutate_rows( + client.check_and_mutate_row( request, metadata=[ ("key", "val"), @@ -5780,8 +6416,8 @@ def test_mutate_rows_rest_interceptors(null_interceptor): post.assert_called_once() -def test_mutate_rows_rest_bad_request( - transport: str = "rest", request_type=bigtable.MutateRowsRequest +def test_check_and_mutate_row_rest_bad_request( + transport: str = "rest", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5801,10 +6437,10 @@ def test_mutate_rows_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.mutate_rows(request) + client.check_and_mutate_row(request) -def test_mutate_rows_rest_flattened(): +def test_check_and_mutate_row_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -5813,7 +6449,7 @@ def test_mutate_rows_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() + return_value = bigtable.CheckAndMutateRowResponse() # get arguments that satisfy an http rule for this method sample_request = { @@ -5823,7 +6459,28 @@ def test_mutate_rows_rest_flattened(): # get truthy value for each flattened field mock_args = dict( table_name="table_name_value", - entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -5832,28 +6489,25 @@ def test_mutate_rows_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.MutateRowsResponse.pb(return_value) + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.mutate_rows(**mock_args) + client.check_and_mutate_row(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" + "%s/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" % client.transport._host, args[1], ) -def test_mutate_rows_rest_flattened_error(transport: str = "rest"): +def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -5862,15 +6516,36 @@ def test_mutate_rows_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.mutate_rows( - bigtable.MutateRowsRequest(), + client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), table_name="table_name_value", - entries=[bigtable.MutateRowsRequest.Entry(row_key=b"row_key_blob")], + row_key=b"row_key_blob", + predicate_filter=data.RowFilter( + chain=data.RowFilter.Chain( + filters=[ + data.RowFilter( + chain=data.RowFilter.Chain( + filters=[data.RowFilter(chain=None)] + ) + ) + ] + ) + ), + true_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], + false_mutations=[ + data.Mutation( + set_cell=data.Mutation.SetCell(family_name="family_name_value") + ) + ], app_profile_id="app_profile_id_value", ) -def test_mutate_rows_rest_error(): +def test_check_and_mutate_row_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -5879,44 +6554,41 @@ def test_mutate_rows_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable.CheckAndMutateRowRequest, + bigtable.PingAndWarmRequest, dict, ], ) -def test_check_and_mutate_row_rest(request_type): +def test_ping_and_warm_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"name": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse( - predicate_matched=True, - ) + return_value = bigtable.PingAndWarmResponse() # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + return_value = bigtable.PingAndWarmResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.check_and_mutate_row(request) + response = client.ping_and_warm(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True + assert isinstance(response, bigtable.PingAndWarmResponse) -def test_check_and_mutate_row_rest_use_cached_wrapped_rpc(): +def test_ping_and_warm_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -5930,39 +6602,33 @@ def test_check_and_mutate_row_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.check_and_mutate_row in client._transport._wrapped_methods - ) + assert client._transport.ping_and_warm in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.check_and_mutate_row - ] = mock_rpc + client._transport._wrapped_methods[client._transport.ping_and_warm] = mock_rpc request = {} - client.check_and_mutate_row(request) + client.ping_and_warm(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.check_and_mutate_row(request) + client.ping_and_warm(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_check_and_mutate_row_rest_required_fields( - request_type=bigtable.CheckAndMutateRowRequest, -): +def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): transport_class = transports.BigtableRestTransport request_init = {} - request_init["row_key"] = b"" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -5973,21 +6639,21 @@ def test_check_and_mutate_row_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).check_and_mutate_row._get_unset_required_fields(jsonified_request) + ).ping_and_warm._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["rowKey"] = b"row_key_blob" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).check_and_mutate_row._get_unset_required_fields(jsonified_request) + ).ping_and_warm._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b"row_key_blob" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5996,7 +6662,7 @@ def test_check_and_mutate_row_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse() + return_value = bigtable.PingAndWarmResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -6018,30 +6684,30 @@ def test_check_and_mutate_row_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + return_value = bigtable.PingAndWarmResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.check_and_mutate_row(request) + response = client.ping_and_warm(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_check_and_mutate_row_rest_unset_required_fields(): +def test_ping_and_warm_rest_unset_required_fields(): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("rowKey",))) + unset_fields = transport.ping_and_warm._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_and_mutate_row_rest_interceptors(null_interceptor): +def test_ping_and_warm_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -6052,15 +6718,13 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_check_and_mutate_row" + transports.BigtableRestInterceptor, "post_ping_and_warm" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_check_and_mutate_row" + transports.BigtableRestInterceptor, "pre_ping_and_warm" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.CheckAndMutateRowRequest.pb( - bigtable.CheckAndMutateRowRequest() - ) + pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -6071,19 +6735,19 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.CheckAndMutateRowResponse.to_json( - bigtable.CheckAndMutateRowResponse() + req.return_value._content = bigtable.PingAndWarmResponse.to_json( + bigtable.PingAndWarmResponse() ) - request = bigtable.CheckAndMutateRowRequest() + request = bigtable.PingAndWarmRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.CheckAndMutateRowResponse() + post.return_value = bigtable.PingAndWarmResponse() - client.check_and_mutate_row( + client.ping_and_warm( request, metadata=[ ("key", "val"), @@ -6095,8 +6759,8 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): post.assert_called_once() -def test_check_and_mutate_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.CheckAndMutateRowRequest +def test_ping_and_warm_rest_bad_request( + transport: str = "rest", request_type=bigtable.PingAndWarmRequest ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6104,7 +6768,7 @@ def test_check_and_mutate_row_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"name": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -6116,10 +6780,10 @@ def test_check_and_mutate_row_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.check_and_mutate_row(request) + client.ping_and_warm(request) -def test_check_and_mutate_row_rest_flattened(): +def test_ping_and_warm_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -6128,38 +6792,14 @@ def test_check_and_mutate_row_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse() + return_value = bigtable.PingAndWarmResponse() # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } + sample_request = {"name": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( - table_name="table_name_value", - row_key=b"row_key_blob", - predicate_filter=data.RowFilter( - chain=data.RowFilter.Chain( - filters=[ - data.RowFilter( - chain=data.RowFilter.Chain( - filters=[data.RowFilter(chain=None)] - ) - ) - ] - ) - ), - true_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], - false_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], + name="name_value", app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -6168,25 +6808,23 @@ def test_check_and_mutate_row_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + return_value = bigtable.PingAndWarmResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.check_and_mutate_row(**mock_args) + client.ping_and_warm(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" - % client.transport._host, - args[1], + "%s/v2/{name=projects/*/instances/*}:ping" % client.transport._host, args[1] ) -def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): +def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6195,36 +6833,14 @@ def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.check_and_mutate_row( - bigtable.CheckAndMutateRowRequest(), - table_name="table_name_value", - row_key=b"row_key_blob", - predicate_filter=data.RowFilter( - chain=data.RowFilter.Chain( - filters=[ - data.RowFilter( - chain=data.RowFilter.Chain( - filters=[data.RowFilter(chain=None)] - ) - ) - ] - ) - ), - true_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], - false_mutations=[ - data.Mutation( - set_cell=data.Mutation.SetCell(family_name="family_name_value") - ) - ], + client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name="name_value", app_profile_id="app_profile_id_value", ) -def test_check_and_mutate_row_rest_error(): +def test_ping_and_warm_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -6233,41 +6849,41 @@ def test_check_and_mutate_row_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable.PingAndWarmRequest, + bigtable.ReadModifyWriteRowRequest, dict, ], ) -def test_ping_and_warm_rest(request_type): +def test_read_modify_write_row_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() + return_value = bigtable.ReadModifyWriteRowResponse() # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.PingAndWarmResponse.pb(return_value) + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.ping_and_warm(request) + response = client.read_modify_write_row(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.PingAndWarmResponse) + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) -def test_ping_and_warm_rest_use_cached_wrapped_rpc(): +def test_read_modify_write_row_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -6281,33 +6897,40 @@ def test_ping_and_warm_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.ping_and_warm in client._transport._wrapped_methods + assert ( + client._transport.read_modify_write_row + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.ping_and_warm] = mock_rpc + client._transport._wrapped_methods[ + client._transport.read_modify_write_row + ] = mock_rpc request = {} - client.ping_and_warm(request) + client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.ping_and_warm(request) + client.read_modify_write_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): +def test_read_modify_write_row_rest_required_fields( + request_type=bigtable.ReadModifyWriteRowRequest, +): transport_class = transports.BigtableRestTransport request_init = {} - request_init["name"] = "" + request_init["row_key"] = b"" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -6318,21 +6941,21 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).ping_and_warm._get_unset_required_fields(jsonified_request) + ).read_modify_write_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["rowKey"] = b"row_key_blob" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).ping_and_warm._get_unset_required_fields(jsonified_request) + ).read_modify_write_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b"row_key_blob" client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6341,7 +6964,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() + return_value = bigtable.ReadModifyWriteRowResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -6363,30 +6986,38 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.PingAndWarmResponse.pb(return_value) + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.ping_and_warm(request) + response = client.read_modify_write_row(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_ping_and_warm_rest_unset_required_fields(): +def test_read_modify_write_row_rest_unset_required_fields(): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.ping_and_warm._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "rowKey", + "rules", + ) + ) + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_ping_and_warm_rest_interceptors(null_interceptor): +def test_read_modify_write_row_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -6397,13 +7028,15 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_ping_and_warm" + transports.BigtableRestInterceptor, "post_read_modify_write_row" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_ping_and_warm" + transports.BigtableRestInterceptor, "pre_read_modify_write_row" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) + pb_message = bigtable.ReadModifyWriteRowRequest.pb( + bigtable.ReadModifyWriteRowRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -6414,19 +7047,19 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.PingAndWarmResponse.to_json( - bigtable.PingAndWarmResponse() + req.return_value._content = bigtable.ReadModifyWriteRowResponse.to_json( + bigtable.ReadModifyWriteRowResponse() ) - request = bigtable.PingAndWarmRequest() + request = bigtable.ReadModifyWriteRowRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.PingAndWarmResponse() + post.return_value = bigtable.ReadModifyWriteRowResponse() - client.ping_and_warm( + client.read_modify_write_row( request, metadata=[ ("key", "val"), @@ -6438,8 +7071,8 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): post.assert_called_once() -def test_ping_and_warm_rest_bad_request( - transport: str = "rest", request_type=bigtable.PingAndWarmRequest +def test_read_modify_write_row_rest_bad_request( + transport: str = "rest", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6447,7 +7080,7 @@ def test_ping_and_warm_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -6459,10 +7092,10 @@ def test_ping_and_warm_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.ping_and_warm(request) + client.read_modify_write_row(request) -def test_ping_and_warm_rest_flattened(): +def test_read_modify_write_row_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -6471,14 +7104,18 @@ def test_ping_and_warm_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() + return_value = bigtable.ReadModifyWriteRowResponse() # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } # get truthy value for each flattened field mock_args = dict( - name="name_value", + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -6487,23 +7124,25 @@ def test_ping_and_warm_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.PingAndWarmResponse.pb(return_value) + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.ping_and_warm(**mock_args) + client.read_modify_write_row(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}:ping" % client.transport._host, args[1] + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" + % client.transport._host, + args[1], ) -def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): +def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6512,14 +7151,16 @@ def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.ping_and_warm( - bigtable.PingAndWarmRequest(), - name="name_value", + client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name="table_name_value", + row_key=b"row_key_blob", + rules=[data.ReadModifyWriteRule(family_name="family_name_value")], app_profile_id="app_profile_id_value", ) -def test_ping_and_warm_rest_error(): +def test_read_modify_write_row_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -6528,11 +7169,11 @@ def test_ping_and_warm_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable.ReadModifyWriteRowRequest, + bigtable.GenerateInitialChangeStreamPartitionsRequest, dict, ], ) -def test_read_modify_write_row_rest(request_type): +def test_generate_initial_change_stream_partitions_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -6545,24 +7186,33 @@ def test_read_modify_write_row_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.read_modify_write_row(request) + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.generate_initial_change_stream_partitions(request) + + assert isinstance(response, Iterable) + response = next(response) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) -def test_read_modify_write_row_rest_use_cached_wrapped_rpc(): +def test_generate_initial_change_stream_partitions_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -6577,7 +7227,7 @@ def test_read_modify_write_row_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.read_modify_write_row + client._transport.generate_initial_change_stream_partitions in client._transport._wrapped_methods ) @@ -6587,29 +7237,29 @@ def test_read_modify_write_row_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.read_modify_write_row + client._transport.generate_initial_change_stream_partitions ] = mock_rpc request = {} - client.read_modify_write_row(request) + client.generate_initial_change_stream_partitions(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.read_modify_write_row(request) + client.generate_initial_change_stream_partitions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_read_modify_write_row_rest_required_fields( - request_type=bigtable.ReadModifyWriteRowRequest, +def test_generate_initial_change_stream_partitions_rest_required_fields( + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, ): transport_class = transports.BigtableRestTransport request_init = {} - request_init["row_key"] = b"" + request_init["table_name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -6620,21 +7270,25 @@ def test_read_modify_write_row_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).read_modify_write_row._get_unset_required_fields(jsonified_request) + ).generate_initial_change_stream_partitions._get_unset_required_fields( + jsonified_request + ) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["rowKey"] = b"row_key_blob" + jsonified_request["tableName"] = "table_name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).read_modify_write_row._get_unset_required_fields(jsonified_request) + ).generate_initial_change_stream_partitions._get_unset_required_fields( + jsonified_request + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b"row_key_blob" + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == "table_name_value" client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6643,7 +7297,7 @@ def test_read_modify_write_row_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -6665,38 +7319,39 @@ def test_read_modify_write_row_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.read_modify_write_row(request) + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.generate_initial_change_stream_partitions(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_read_modify_write_row_rest_unset_required_fields(): +def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "rowKey", - "rules", - ) + unset_fields = ( + transport.generate_initial_change_stream_partitions._get_unset_required_fields( + {} ) ) + assert set(unset_fields) == (set(()) & set(("tableName",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_modify_write_row_rest_interceptors(null_interceptor): +def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -6707,14 +7362,16 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_modify_write_row" + transports.BigtableRestInterceptor, + "post_generate_initial_change_stream_partitions", ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_modify_write_row" + transports.BigtableRestInterceptor, + "pre_generate_initial_change_stream_partitions", ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.ReadModifyWriteRowRequest.pb( - bigtable.ReadModifyWriteRowRequest() + pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( + bigtable.GenerateInitialChangeStreamPartitionsRequest() ) transcode.return_value = { "method": "post", @@ -6726,19 +7383,22 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadModifyWriteRowResponse.to_json( - bigtable.ReadModifyWriteRowResponse() + req.return_value._content = ( + bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( + bigtable.GenerateInitialChangeStreamPartitionsResponse() + ) ) + req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.ReadModifyWriteRowRequest() + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.ReadModifyWriteRowResponse() + post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - client.read_modify_write_row( + client.generate_initial_change_stream_partitions( request, metadata=[ ("key", "val"), @@ -6750,8 +7410,9 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): post.assert_called_once() -def test_read_modify_write_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadModifyWriteRowRequest +def test_generate_initial_change_stream_partitions_rest_bad_request( + transport: str = "rest", + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6771,10 +7432,10 @@ def test_read_modify_write_row_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.read_modify_write_row(request) + client.generate_initial_change_stream_partitions(request) -def test_read_modify_write_row_rest_flattened(): +def test_generate_initial_change_stream_partitions_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -6783,7 +7444,7 @@ def test_read_modify_write_row_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() # get arguments that satisfy an http rule for this method sample_request = { @@ -6793,8 +7454,6 @@ def test_read_modify_write_row_rest_flattened(): # get truthy value for each flattened field mock_args = dict( table_name="table_name_value", - row_key=b"row_key_blob", - rules=[data.ReadModifyWriteRule(family_name="family_name_value")], app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -6803,25 +7462,32 @@ def test_read_modify_write_row_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.read_modify_write_row(**mock_args) + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.generate_initial_change_stream_partitions(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" + "%s/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" % client.transport._host, args[1], ) -def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): +def test_generate_initial_change_stream_partitions_rest_flattened_error( + transport: str = "rest", +): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -6830,16 +7496,14 @@ def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.read_modify_write_row( - bigtable.ReadModifyWriteRowRequest(), + client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), table_name="table_name_value", - row_key=b"row_key_blob", - rules=[data.ReadModifyWriteRule(family_name="family_name_value")], app_profile_id="app_profile_id_value", ) -def test_read_modify_write_row_rest_error(): +def test_generate_initial_change_stream_partitions_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -6848,11 +7512,11 @@ def test_read_modify_write_row_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable.GenerateInitialChangeStreamPartitionsRequest, + bigtable.ReadChangeStreamRequest, dict, ], ) -def test_generate_initial_change_stream_partitions_rest(request_type): +def test_read_change_stream_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -6865,15 +7529,13 @@ def test_generate_initial_change_stream_partitions_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + return_value = bigtable.ReadChangeStreamResponse() # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( - return_value - ) + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -6882,16 +7544,16 @@ def test_generate_initial_change_stream_partitions_rest(request_type): req.return_value = response_value with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) - response = client.generate_initial_change_stream_partitions(request) + response = client.read_change_stream(request) assert isinstance(response, Iterable) response = next(response) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) + assert isinstance(response, bigtable.ReadChangeStreamResponse) -def test_generate_initial_change_stream_partitions_rest_use_cached_wrapped_rpc(): +def test_read_change_stream_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -6906,8 +7568,7 @@ def test_generate_initial_change_stream_partitions_rest_use_cached_wrapped_rpc() # Ensure method has been cached assert ( - client._transport.generate_initial_change_stream_partitions - in client._transport._wrapped_methods + client._transport.read_change_stream in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -6916,24 +7577,24 @@ def test_generate_initial_change_stream_partitions_rest_use_cached_wrapped_rpc() "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.generate_initial_change_stream_partitions + client._transport.read_change_stream ] = mock_rpc request = {} - client.generate_initial_change_stream_partitions(request) + client.read_change_stream(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.generate_initial_change_stream_partitions(request) + client.read_change_stream(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_generate_initial_change_stream_partitions_rest_required_fields( - request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +def test_read_change_stream_rest_required_fields( + request_type=bigtable.ReadChangeStreamRequest, ): transport_class = transports.BigtableRestTransport @@ -6949,9 +7610,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).generate_initial_change_stream_partitions._get_unset_required_fields( - jsonified_request - ) + ).read_change_stream._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -6960,9 +7619,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).generate_initial_change_stream_partitions._get_unset_required_fields( - jsonified_request - ) + ).read_change_stream._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -6976,7 +7633,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + return_value = bigtable.ReadChangeStreamResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -6998,9 +7655,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( - return_value - ) + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -7009,28 +7664,24 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) - response = client.generate_initial_change_stream_partitions(request) + response = client.read_change_stream(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): +def test_read_change_stream_rest_unset_required_fields(): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = ( - transport.generate_initial_change_stream_partitions._get_unset_required_fields( - {} - ) - ) + unset_fields = transport.read_change_stream._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("tableName",))) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): +def test_read_change_stream_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -7041,16 +7692,14 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, - "post_generate_initial_change_stream_partitions", + transports.BigtableRestInterceptor, "post_read_change_stream" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, - "pre_generate_initial_change_stream_partitions", + transports.BigtableRestInterceptor, "pre_read_change_stream" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( - bigtable.GenerateInitialChangeStreamPartitionsRequest() + pb_message = bigtable.ReadChangeStreamRequest.pb( + bigtable.ReadChangeStreamRequest() ) transcode.return_value = { "method": "post", @@ -7062,22 +7711,20 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( - bigtable.GenerateInitialChangeStreamPartitionsResponse() - ) + req.return_value._content = bigtable.ReadChangeStreamResponse.to_json( + bigtable.ReadChangeStreamResponse() ) req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + request = bigtable.ReadChangeStreamRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + post.return_value = bigtable.ReadChangeStreamResponse() - client.generate_initial_change_stream_partitions( + client.read_change_stream( request, metadata=[ ("key", "val"), @@ -7089,9 +7736,8 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc post.assert_called_once() -def test_generate_initial_change_stream_partitions_rest_bad_request( - transport: str = "rest", - request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +def test_read_change_stream_rest_bad_request( + transport: str = "rest", request_type=bigtable.ReadChangeStreamRequest ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7111,10 +7757,10 @@ def test_generate_initial_change_stream_partitions_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.generate_initial_change_stream_partitions(request) + client.read_change_stream(request) -def test_generate_initial_change_stream_partitions_rest_flattened(): +def test_read_change_stream_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -7123,7 +7769,7 @@ def test_generate_initial_change_stream_partitions_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + return_value = bigtable.ReadChangeStreamResponse() # get arguments that satisfy an http rule for this method sample_request = { @@ -7141,9 +7787,7 @@ def test_generate_initial_change_stream_partitions_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( - return_value - ) + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -7151,22 +7795,20 @@ def test_generate_initial_change_stream_partitions_rest_flattened(): with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) - client.generate_initial_change_stream_partitions(**mock_args) + client.read_change_stream(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" + "%s/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" % client.transport._host, args[1], ) -def test_generate_initial_change_stream_partitions_rest_flattened_error( - transport: str = "rest", -): +def test_read_change_stream_rest_flattened_error(transport: str = "rest"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -7175,14 +7817,14 @@ def test_generate_initial_change_stream_partitions_rest_flattened_error( # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.generate_initial_change_stream_partitions( - bigtable.GenerateInitialChangeStreamPartitionsRequest(), + client.read_change_stream( + bigtable.ReadChangeStreamRequest(), table_name="table_name_value", app_profile_id="app_profile_id_value", ) -def test_generate_initial_change_stream_partitions_rest_error(): +def test_read_change_stream_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -7191,30 +7833,30 @@ def test_generate_initial_change_stream_partitions_rest_error(): @pytest.mark.parametrize( "request_type", [ - bigtable.ReadChangeStreamRequest, + bigtable.ExecuteQueryRequest, dict, ], ) -def test_read_change_stream_rest(request_type): +def test_execute_query_rest(request_type): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"instance_name": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() + return_value = bigtable.ExecuteQueryResponse() # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + return_value = bigtable.ExecuteQueryResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -7223,16 +7865,16 @@ def test_read_change_stream_rest(request_type): req.return_value = response_value with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) - response = client.read_change_stream(request) + response = client.execute_query(request) assert isinstance(response, Iterable) response = next(response) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadChangeStreamResponse) + assert isinstance(response, bigtable.ExecuteQueryResponse) -def test_read_change_stream_rest_use_cached_wrapped_rpc(): +def test_execute_query_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -7246,39 +7888,34 @@ def test_read_change_stream_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.read_change_stream in client._transport._wrapped_methods - ) + assert client._transport.execute_query in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.read_change_stream - ] = mock_rpc + client._transport._wrapped_methods[client._transport.execute_query] = mock_rpc request = {} - client.read_change_stream(request) + client.execute_query(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.read_change_stream(request) + client.execute_query(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_read_change_stream_rest_required_fields( - request_type=bigtable.ReadChangeStreamRequest, -): +def test_execute_query_rest_required_fields(request_type=bigtable.ExecuteQueryRequest): transport_class = transports.BigtableRestTransport request_init = {} - request_init["table_name"] = "" + request_init["instance_name"] = "" + request_init["query"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -7289,21 +7926,24 @@ def test_read_change_stream_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).read_change_stream._get_unset_required_fields(jsonified_request) + ).execute_query._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["tableName"] = "table_name_value" + jsonified_request["instanceName"] = "instance_name_value" + jsonified_request["query"] = "query_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).read_change_stream._get_unset_required_fields(jsonified_request) + ).execute_query._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == "table_name_value" + assert "instanceName" in jsonified_request + assert jsonified_request["instanceName"] == "instance_name_value" + assert "query" in jsonified_request + assert jsonified_request["query"] == "query_value" client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7312,7 +7952,7 @@ def test_read_change_stream_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() + return_value = bigtable.ExecuteQueryResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -7334,7 +7974,7 @@ def test_read_change_stream_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + return_value = bigtable.ExecuteQueryResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -7343,24 +7983,33 @@ def test_read_change_stream_rest_required_fields( with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) - response = client.read_change_stream(request) + response = client.execute_query(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_read_change_stream_rest_unset_required_fields(): +def test_execute_query_rest_unset_required_fields(): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.read_change_stream._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName",))) + unset_fields = transport.execute_query._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "instanceName", + "query", + "params", + ) + ) + ) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_change_stream_rest_interceptors(null_interceptor): +def test_execute_query_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), @@ -7371,15 +8020,13 @@ def test_read_change_stream_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_change_stream" + transports.BigtableRestInterceptor, "post_execute_query" ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_change_stream" + transports.BigtableRestInterceptor, "pre_execute_query" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable.ReadChangeStreamRequest.pb( - bigtable.ReadChangeStreamRequest() - ) + pb_message = bigtable.ExecuteQueryRequest.pb(bigtable.ExecuteQueryRequest()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -7390,20 +8037,20 @@ def test_read_change_stream_rest_interceptors(null_interceptor): req.return_value = Response() req.return_value.status_code = 200 req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadChangeStreamResponse.to_json( - bigtable.ReadChangeStreamResponse() + req.return_value._content = bigtable.ExecuteQueryResponse.to_json( + bigtable.ExecuteQueryResponse() ) req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.ReadChangeStreamRequest() + request = bigtable.ExecuteQueryRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable.ReadChangeStreamResponse() + post.return_value = bigtable.ExecuteQueryResponse() - client.read_change_stream( + client.execute_query( request, metadata=[ ("key", "val"), @@ -7415,8 +8062,8 @@ def test_read_change_stream_rest_interceptors(null_interceptor): post.assert_called_once() -def test_read_change_stream_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadChangeStreamRequest +def test_execute_query_rest_bad_request( + transport: str = "rest", request_type=bigtable.ExecuteQueryRequest ): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7424,7 +8071,7 @@ def test_read_change_stream_rest_bad_request( ) # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"instance_name": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7436,10 +8083,10 @@ def test_read_change_stream_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value - client.read_change_stream(request) + client.execute_query(request) -def test_read_change_stream_rest_flattened(): +def test_execute_query_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -7448,16 +8095,15 @@ def test_read_change_stream_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() + return_value = bigtable.ExecuteQueryResponse() # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } + sample_request = {"instance_name": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( - table_name="table_name_value", + instance_name="instance_name_value", + query="query_value", app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -7466,7 +8112,7 @@ def test_read_change_stream_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + return_value = bigtable.ExecuteQueryResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -7474,20 +8120,20 @@ def test_read_change_stream_rest_flattened(): with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) - client.read_change_stream(**mock_args) + client.execute_query(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" + "%s/v2/{instance_name=projects/*/instances/*}:executeQuery" % client.transport._host, args[1], ) -def test_read_change_stream_rest_flattened_error(transport: str = "rest"): +def test_execute_query_rest_flattened_error(transport: str = "rest"): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -7496,14 +8142,15 @@ def test_read_change_stream_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.read_change_stream( - bigtable.ReadChangeStreamRequest(), - table_name="table_name_value", + client.execute_query( + bigtable.ExecuteQueryRequest(), + instance_name="instance_name_value", + query="query_value", app_profile_id="app_profile_id_value", ) -def test_read_change_stream_rest_error(): +def test_execute_query_rest_error(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -7657,6 +8304,7 @@ def test_bigtable_base_transport(): "read_modify_write_row", "generate_initial_change_stream_partitions", "read_change_stream", + "execute_query", ) for method in methods: with pytest.raises(NotImplementedError): @@ -7967,6 +8615,9 @@ def test_bigtable_client_transport_session_collision(transport_name): session1 = client1.transport.read_change_stream._session session2 = client2.transport.read_change_stream._session assert session1 != session2 + session1 = client1.transport.execute_query._session + session2 = client2.transport.execute_query._session + assert session1 != session2 def test_bigtable_grpc_transport_channel(): From f65da4ebbb833e2eb88c27cd5badd152cd926006 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 11:56:46 -0700 Subject: [PATCH 807/892] chore(main): release 2.25.0 (#1001) --- .../.github/sync-repo-settings.yaml | 1 - .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 13 +++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 7 files changed, 18 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml index 1319e555dbe5..df49eafcc962 100644 --- a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml +++ b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml @@ -31,7 +31,6 @@ branchProtectionRules: - 'Kokoro' - 'Kokoro system-3.8' - 'cla/google' - - 'Conformance / Async v3 Client / Python 3.8 / Test Tag v0.0.2' - 'OwlBot Post Processor' # List of explicit permissions to add (additive only) permissionRules: diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 355b3955b70d..d6c7e9d687cb 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.24.0" + ".": "2.25.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index d82467b2792b..92b498748f2b 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,19 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.25.0](https://github.com/googleapis/python-bigtable/compare/v2.24.0...v2.25.0) (2024-07-18) + + +### Features + +* Publish ProtoRows Message ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a)) +* Publish the Cloud Bigtable ExecuteQuery API ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a)) + + +### Bug Fixes + +* Allow protobuf 5.x ([7ac8e14](https://github.com/googleapis/python-bigtable/commit/7ac8e142f99a6891b6bc286858f764def503e89a)) + ## [2.24.0](https://github.com/googleapis/python-bigtable/compare/v2.23.1...v2.24.0) (2024-06-11) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 07de09d568ba..e5fa8f60b9fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.24.0" # {x-release-please-version} +__version__ = "2.25.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 07de09d568ba..e5fa8f60b9fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.24.0" # {x-release-please-version} +__version__ = "2.25.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 07de09d568ba..e5fa8f60b9fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.24.0" # {x-release-please-version} +__version__ = "2.25.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 07de09d568ba..e5fa8f60b9fc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.24.0" # {x-release-please-version} +__version__ = "2.25.0" # {x-release-please-version} From 06ec7a9daee604caef2e3ee05817f1f64d514bda Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 30 Jul 2024 17:40:21 -0600 Subject: [PATCH 808/892] fix: use single routing metadata header (#1005) --- .../services/bigtable/async_client.py | 116 ++++++++++-------- .../bigtable_v2/services/bigtable/client.py | 72 ++++++----- packages/google-cloud-bigtable/owlbot.py | 11 ++ .../tests/unit/data/_async/test_client.py | 57 +++++---- 4 files changed, 147 insertions(+), 109 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 12432dda7609..1ed7a47408d9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -340,11 +340,13 @@ def read_rows( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -441,11 +443,13 @@ def sample_row_keys( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -563,11 +567,13 @@ async def mutate_row( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -679,11 +685,13 @@ def mutate_rows( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -838,11 +846,13 @@ async def check_and_mutate_row( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -936,9 +946,11 @@ async def ping_and_warm( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1062,11 +1074,13 @@ async def read_modify_write_row( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1172,11 +1186,13 @@ def generate_initial_change_stream_partitions( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1274,11 +1290,13 @@ def read_change_stream( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1377,11 +1395,13 @@ def execute_query( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("instance_name", request.instance_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("instance_name", request.instance_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 0937c90fe761..4a3f19ce6fd4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -817,9 +817,9 @@ def read_rows( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -933,9 +933,9 @@ def sample_row_keys( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1070,9 +1070,9 @@ def mutate_row( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1201,9 +1201,9 @@ def mutate_rows( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1375,9 +1375,9 @@ def check_and_mutate_row( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1477,9 +1477,9 @@ def ping_and_warm( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1620,9 +1620,9 @@ def read_modify_write_row( ) if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() @@ -1725,11 +1725,13 @@ def generate_initial_change_stream_partitions( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1824,11 +1826,13 @@ def read_change_stream( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1933,9 +1937,9 @@ def execute_query( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._validate_universe_domain() diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 84aa3d61b3bd..090f7ee93c8d 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -143,6 +143,17 @@ def insert(file, before_line, insert_line, after_line, escape=None): escape='"' ) +# ---------------------------------------------------------------------------- +# Patch duplicate routing header: https://github.com/googleapis/gapic-generator-python/issues/2078 +# ---------------------------------------------------------------------------- +for file in ["client.py", "async_client.py"]: + s.replace( + f"google/cloud/bigtable_v2/services/bigtable/{file}", + "metadata \= tuple\(metadata\) \+ \(", + """metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (""" + ) # ---------------------------------------------------------------------------- # Samples templates diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 9ebc403ce10d..6c49ca0da692 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -1277,7 +1277,7 @@ async def test_customizable_retryable_errors( ("read_rows_sharded", ([ReadRowsQuery()],), "read_rows"), ("row_exists", (b"row_key",), "read_rows"), ("sample_row_keys", (), "sample_row_keys"), - ("mutate_row", (b"row_key", [mock.Mock()]), "mutate_row"), + ("mutate_row", (b"row_key", [mutations.DeleteAllFromRow()]), "mutate_row"), ( "bulk_mutate_rows", ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), @@ -1286,7 +1286,7 @@ async def test_customizable_retryable_errors( ("check_and_mutate_row", (b"row_key", None), "check_and_mutate_row"), ( "read_modify_write_row", - (b"row_key", mock.Mock()), + (b"row_key", IncrementRule("f", "q")), "read_modify_write_row", ), ], @@ -1298,31 +1298,34 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ from google.cloud.bigtable.data import TableAsync profile = "profile" if include_app_profile else None - with mock.patch( - f"google.cloud.bigtable_v2.BigtableAsyncClient.{gapic_fn}", mock.AsyncMock() - ) as gapic_mock: - gapic_mock.side_effect = RuntimeError("stop early") - async with _make_client() as client: - table = TableAsync(client, "instance-id", "table-id", profile) - try: - test_fn = table.__getattribute__(fn_name) - maybe_stream = await test_fn(*fn_args) - [i async for i in maybe_stream] - except Exception: - # we expect an exception from attempting to call the mock - pass - kwargs = gapic_mock.call_args_list[0].kwargs - metadata = kwargs["metadata"] - goog_metadata = None - for key, value in metadata: - if key == "x-goog-request-params": - goog_metadata = value - assert goog_metadata is not None, "x-goog-request-params not found" - assert "table_name=" + table.table_name in goog_metadata - if include_app_profile: - assert "app_profile_id=profile" in goog_metadata - else: - assert "app_profile_id=" not in goog_metadata + client = _make_client() + # create mock for rpc stub + transport_mock = mock.MagicMock() + rpc_mock = mock.AsyncMock() + transport_mock._wrapped_methods.__getitem__.return_value = rpc_mock + client._gapic_client._client._transport = transport_mock + client._gapic_client._client._is_universe_domain_valid = True + table = TableAsync(client, "instance-id", "table-id", profile) + try: + test_fn = table.__getattribute__(fn_name) + maybe_stream = await test_fn(*fn_args) + [i async for i in maybe_stream] + except Exception: + # we expect an exception from attempting to call the mock + pass + assert rpc_mock.call_count == 1 + kwargs = rpc_mock.call_args_list[0].kwargs + metadata = kwargs["metadata"] + # expect single metadata entry + assert len(metadata) == 1 + # expect x-goog-request-params tag + assert metadata[0][0] == "x-goog-request-params" + routing_str = metadata[0][1] + assert "table_name=" + table.table_name in routing_str + if include_app_profile: + assert "app_profile_id=profile" in routing_str + else: + assert "app_profile_id=" not in routing_str class TestReadRows: From bc707b612a6635a9815eb2bb0fe7427ce293032f Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:43:34 -0700 Subject: [PATCH 809/892] chore: Update gapic-generator-python to v1.18.4 (#1002) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add min, max, hll aggregators and more types docs: Corrected various type documentation PiperOrigin-RevId: 654022916 Source-Link: https://github.com/googleapis/googleapis/commit/157e3bf69c47a280139758ffe59f19834679ec5e Source-Link: https://github.com/googleapis/googleapis-gen/commit/f781685ad52d58b198baf95fa120d87877b3e46e Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjc4MTY4NWFkNTJkNThiMTk4YmFmOTVmYTEyMGQ4Nzg3N2IzZTQ2ZSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add MergeToCell to Mutation APIs PiperOrigin-RevId: 654025780 Source-Link: https://github.com/googleapis/googleapis/commit/9effffdf94e20cafb0beeada3727abfff2a32346 Source-Link: https://github.com/googleapis/googleapis-gen/commit/28db5a5df7c4c24adb3b01086c3db2af976241b3 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjhkYjVhNWRmN2M0YzI0YWRiM2IwMTA4NmMzZGIyYWY5NzYyNDFiMyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.18.3 PiperOrigin-RevId: 655567917 Source-Link: https://github.com/googleapis/googleapis/commit/43aa65e3897557c11d947f3133ddb76e5c4b2a6c Source-Link: https://github.com/googleapis/googleapis-gen/commit/0e38378753074c0f66ff63348d6864929e104d5c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGUzODM3ODc1MzA3NGMwZjY2ZmY2MzM0OGQ2ODY0OTI5ZTEwNGQ1YyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.18.3 PiperOrigin-RevId: 656040068 Source-Link: https://github.com/googleapis/googleapis/commit/3f4e29a88f2e1f412439e61c48c88f81dec0bbbf Source-Link: https://github.com/googleapis/googleapis-gen/commit/b8feb2109dde7b0938c22c993d002251ac6714dc Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjhmZWIyMTA5ZGRlN2IwOTM4YzIyYzk5M2QwMDIyNTFhYzY3MTRkYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.18.4 PiperOrigin-RevId: 657207628 Source-Link: https://github.com/googleapis/googleapis/commit/33fe71e5a2061402283e0455636a98e5b78eaf7f Source-Link: https://github.com/googleapis/googleapis-gen/commit/e02739d122ed15bd5ef5771c57f12a83d47a1dda Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTAyNzM5ZDEyMmVkMTViZDVlZjU3NzFjNTdmMTJhODNkNDdhMWRkYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 4 + .../bigtable_instance_admin/client.py | 4 + .../bigtable_instance_admin/pagers.py | 69 +++- .../bigtable_table_admin/async_client.py | 8 + .../services/bigtable_table_admin/client.py | 8 + .../services/bigtable_table_admin/pagers.py | 125 +++++++- .../cloud/bigtable_admin_v2/types/types.py | 300 ++++++++++++++++-- .../google/cloud/bigtable_v2/types/data.py | 53 ++++ .../google/cloud/bigtable_v2/types/types.py | 18 ++ .../test_bigtable_instance_admin.py | 13 +- .../test_bigtable_table_admin.py | 25 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 1 + 12 files changed, 587 insertions(+), 41 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 171dd8298bb8..abed851d59ef 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1698,6 +1698,8 @@ async def list_app_profiles( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2277,6 +2279,8 @@ async def list_hot_tablets( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 550bcb1e7b7f..5877342c4f16 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2168,6 +2168,8 @@ def list_app_profiles( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2741,6 +2743,8 @@ def list_hot_tablets( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index f76da7622bd7..bb7ee001f599 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async from typing import ( Any, AsyncIterator, @@ -22,8 +25,18 @@ Tuple, Optional, Iterator, + Union, ) +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -52,6 +65,8 @@ def __init__( request: bigtable_instance_admin.ListAppProfilesRequest, response: bigtable_instance_admin.ListAppProfilesResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -63,12 +78,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -79,7 +99,12 @@ def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[instance.AppProfile]: @@ -116,6 +141,8 @@ def __init__( request: bigtable_instance_admin.ListAppProfilesRequest, response: bigtable_instance_admin.ListAppProfilesResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -127,12 +154,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -145,7 +177,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[instance.AppProfile]: @@ -184,6 +221,8 @@ def __init__( request: bigtable_instance_admin.ListHotTabletsRequest, response: bigtable_instance_admin.ListHotTabletsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -195,12 +234,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -211,7 +255,12 @@ def pages(self) -> Iterator[bigtable_instance_admin.ListHotTabletsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[instance.HotTablet]: @@ -248,6 +297,8 @@ def __init__( request: bigtable_instance_admin.ListHotTabletsRequest, response: bigtable_instance_admin.ListHotTabletsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -259,12 +310,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -277,7 +333,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[instance.HotTablet]: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 5e429f7e530e..7454e08ac0d5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -618,6 +618,8 @@ async def list_tables( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -1197,6 +1199,8 @@ async def list_authorized_views( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2191,6 +2195,8 @@ async def list_snapshots( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2750,6 +2756,8 @@ async def list_backups( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index e9b06965c02e..4645d4f3b077 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1149,6 +1149,8 @@ def list_tables( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -1710,6 +1712,8 @@ def list_authorized_views( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -2677,6 +2681,8 @@ def list_snapshots( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) @@ -3218,6 +3224,8 @@ def list_backups( method=rpc, request=request, response=response, + retry=retry, + timeout=timeout, metadata=metadata, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index d6277bce2ce9..5e20fbc5f5f3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import retry_async as retries_async from typing import ( Any, AsyncIterator, @@ -22,8 +25,18 @@ Tuple, Optional, Iterator, + Union, ) +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] + OptionalAsyncRetry = Union[ + retries_async.AsyncRetry, gapic_v1.method._MethodDefault, None + ] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + OptionalAsyncRetry = Union[retries_async.AsyncRetry, object, None] # type: ignore + from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -52,6 +65,8 @@ def __init__( request: bigtable_table_admin.ListTablesRequest, response: bigtable_table_admin.ListTablesResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -63,12 +78,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -79,7 +99,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Table]: @@ -114,6 +139,8 @@ def __init__( request: bigtable_table_admin.ListTablesRequest, response: bigtable_table_admin.ListTablesResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -125,12 +152,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -141,7 +173,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Table]: @@ -180,6 +217,8 @@ def __init__( request: bigtable_table_admin.ListAuthorizedViewsRequest, response: bigtable_table_admin.ListAuthorizedViewsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -191,12 +230,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -207,7 +251,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListAuthorizedViewsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.AuthorizedView]: @@ -244,6 +293,8 @@ def __init__( request: bigtable_table_admin.ListAuthorizedViewsRequest, response: bigtable_table_admin.ListAuthorizedViewsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -255,12 +306,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -273,7 +329,12 @@ async def pages( yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.AuthorizedView]: @@ -312,6 +373,8 @@ def __init__( request: bigtable_table_admin.ListSnapshotsRequest, response: bigtable_table_admin.ListSnapshotsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -323,12 +386,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -339,7 +407,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Snapshot]: @@ -374,6 +447,8 @@ def __init__( request: bigtable_table_admin.ListSnapshotsRequest, response: bigtable_table_admin.ListSnapshotsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -385,12 +460,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -401,7 +481,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsRespons yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Snapshot]: @@ -440,6 +525,8 @@ def __init__( request: bigtable_table_admin.ListBackupsRequest, response: bigtable_table_admin.ListBackupsResponse, *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiate the pager. @@ -451,12 +538,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -467,7 +559,12 @@ def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]: yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __iter__(self) -> Iterator[table.Backup]: @@ -502,6 +599,8 @@ def __init__( request: bigtable_table_admin.ListBackupsRequest, response: bigtable_table_admin.ListBackupsResponse, *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = () ): """Instantiates the pager. @@ -513,12 +612,17 @@ def __init__( The initial request object. response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) self._response = response + self._retry = retry + self._timeout = timeout self._metadata = metadata def __getattr__(self, name: str) -> Any: @@ -529,7 +633,12 @@ async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse] yield self._response while self._response.next_page_token: self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) yield self._response def __aiter__(self) -> AsyncIterator[table.Backup]: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py index 362effbabb87..7d1d9903470c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py @@ -36,26 +36,20 @@ class Type(proto.Message): For compatibility with Bigtable's existing untyped APIs, each ``Type`` includes an ``Encoding`` which describes how to convert - to/from the underlying data. This might involve composing a series - of steps into an "encoding chain," for example to convert from INT64 - -> STRING -> raw bytes. In most cases, a "link" in the encoding - chain will be based an on existing GoogleSQL conversion function - like ``CAST``. + to/from the underlying data. - Each link in the encoding chain also defines the following - properties: + Each encoding also defines the following properties: - - Natural sort: Does the encoded value sort consistently with the - original typed value? Note that Bigtable will always sort data - based on the raw encoded value, *not* the decoded type. + - Order-preserving: Does the encoded value sort consistently with + the original typed value? Note that Bigtable will always sort + data based on the raw encoded value, *not* the decoded type. - Example: BYTES values sort in the same order as their raw encodings. - - Counterexample: Encoding INT64 to a fixed-width STRING does - *not* preserve sort order when dealing with negative numbers. - INT64(1) > INT64(-1), but STRING("-00001") > STRING("00001). - - The overall encoding chain has this property if *every* link - does. + - Counterexample: Encoding INT64 as a fixed-width decimal string + does *not* preserve sort order when dealing with negative + numbers. ``INT64(1) > INT64(-1)``, but + ``STRING("-00001") > STRING("00001)``. - Self-delimiting: If we concatenate two encoded values, can we always tell where the first one ends and the second one begins? @@ -65,8 +59,6 @@ class Type(proto.Message): by a sign. - Counterexample: If we concatenate two UTF-8 encoded STRINGs, we have no way to tell where the first one ends. - - The overall encoding chain has this property if *any* link - does. - Compatibility: Which other systems have matching encoding schemes? For example, does this encoding have a GoogleSQL @@ -91,10 +83,42 @@ class Type(proto.Message): int64_type (google.cloud.bigtable_admin_v2.types.Type.Int64): Int64 + This field is a member of `oneof`_ ``kind``. + float32_type (google.cloud.bigtable_admin_v2.types.Type.Float32): + Float32 + + This field is a member of `oneof`_ ``kind``. + float64_type (google.cloud.bigtable_admin_v2.types.Type.Float64): + Float64 + + This field is a member of `oneof`_ ``kind``. + bool_type (google.cloud.bigtable_admin_v2.types.Type.Bool): + Bool + + This field is a member of `oneof`_ ``kind``. + timestamp_type (google.cloud.bigtable_admin_v2.types.Type.Timestamp): + Timestamp + + This field is a member of `oneof`_ ``kind``. + date_type (google.cloud.bigtable_admin_v2.types.Type.Date): + Date + This field is a member of `oneof`_ ``kind``. aggregate_type (google.cloud.bigtable_admin_v2.types.Type.Aggregate): Aggregate + This field is a member of `oneof`_ ``kind``. + struct_type (google.cloud.bigtable_admin_v2.types.Type.Struct): + Struct + + This field is a member of `oneof`_ ``kind``. + array_type (google.cloud.bigtable_admin_v2.types.Type.Array): + Array + + This field is a member of `oneof`_ ``kind``. + map_type (google.cloud.bigtable_admin_v2.types.Type.Map): + Map + This field is a member of `oneof`_ ``kind``. """ @@ -122,7 +146,7 @@ class Encoding(proto.Message): class Raw(proto.Message): r"""Leaves the value "as-is" - - Natural sort? Yes + - Order-preserving? Yes - Self-delimiting? No - Compatibility? N/A @@ -154,19 +178,31 @@ class String(proto.Message): class Encoding(proto.Message): r"""Rules used to convert to/from lower level types. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: utf8_raw (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Raw): - Use ``Utf8Raw`` encoding. + Deprecated: if set, converts to an empty ``utf8_bytes``. + + This field is a member of `oneof`_ ``encoding``. + utf8_bytes (google.cloud.bigtable_admin_v2.types.Type.String.Encoding.Utf8Bytes): + Use ``Utf8Bytes`` encoding. This field is a member of `oneof`_ ``encoding``. """ class Utf8Raw(proto.Message): + r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" + + class Utf8Bytes(proto.Message): r"""UTF-8 encoding - - Natural sort? No (ASCII characters only) + - Order-preserving? Yes (code point order) - Self-delimiting? No - Compatibility? @@ -182,6 +218,12 @@ class Utf8Raw(proto.Message): oneof="encoding", message="Type.String.Encoding.Utf8Raw", ) + utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.String.Encoding.Utf8Bytes", + ) encoding: "Type.String.Encoding" = proto.Field( proto.MESSAGE, @@ -214,7 +256,7 @@ class BigEndianBytes(proto.Message): r"""Encodes the value as an 8-byte big endian twos complement ``Bytes`` value. - - Natural sort? No (positive values only) + - Order-preserving? No (positive values only) - Self-delimiting? Yes - Compatibility? @@ -224,8 +266,7 @@ class BigEndianBytes(proto.Message): Attributes: bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): - The underlying ``Bytes`` type, which may be able to encode - further. + Deprecated: ignored if set. """ bytes_type: "Type.Bytes" = proto.Field( @@ -247,6 +288,113 @@ class BigEndianBytes(proto.Message): message="Type.Int64.Encoding", ) + class Bool(proto.Message): + r"""bool Values of type ``Bool`` are stored in ``Value.bool_value``.""" + + class Float32(proto.Message): + r"""Float32 Values of type ``Float32`` are stored in + ``Value.float_value``. + + """ + + class Float64(proto.Message): + r"""Float64 Values of type ``Float64`` are stored in + ``Value.float_value``. + + """ + + class Timestamp(proto.Message): + r"""Timestamp Values of type ``Timestamp`` are stored in + ``Value.timestamp_value``. + + """ + + class Date(proto.Message): + r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" + + class Struct(proto.Message): + r"""A structured data value, consisting of fields which map to + dynamically typed values. Values of type ``Struct`` are stored in + ``Value.array_value`` where entries are in the same order and number + as ``field_types``. + + Attributes: + fields (MutableSequence[google.cloud.bigtable_admin_v2.types.Type.Struct.Field]): + The names and types of the fields in this + struct. + """ + + class Field(proto.Message): + r"""A struct field and its type. + + Attributes: + field_name (str): + The field name (optional). Fields without a ``field_name`` + are considered anonymous and cannot be referenced by name. + type_ (google.cloud.bigtable_admin_v2.types.Type): + The type of values in this field. + """ + + field_name: str = proto.Field( + proto.STRING, + number=1, + ) + type_: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + + fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="Type.Struct.Field", + ) + + class Array(proto.Message): + r"""An ordered list of elements of a given type. Values of type + ``Array`` are stored in ``Value.array_value``. + + Attributes: + element_type (google.cloud.bigtable_admin_v2.types.Type): + The type of the elements in the array. This must not be + ``Array``. + """ + + element_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + + class Map(proto.Message): + r"""A mapping of keys to values of a given type. Values of type ``Map`` + are stored in a ``Value.array_value`` where each entry is another + ``Value.array_value`` with two elements (the key and the value, in + that order). Normally encoded Map values won't have repeated keys, + however, clients are expected to handle the case in which they do. + If the same key appears multiple times, the *last* value takes + precedence. + + Attributes: + key_type (google.cloud.bigtable_admin_v2.types.Type): + The type of a map key. Only ``Bytes``, ``String``, and + ``Int64`` are allowed as key types. + value_type (google.cloud.bigtable_admin_v2.types.Type): + The type of the values in a map. + """ + + key_type: "Type" = proto.Field( + proto.MESSAGE, + number=1, + message="Type", + ) + value_type: "Type" = proto.Field( + proto.MESSAGE, + number=2, + message="Type", + ) + class Aggregate(proto.Message): r"""A value that combines incremental updates into a summarized value. @@ -254,6 +402,10 @@ class Aggregate(proto.Message): Writes will provide either the ``input_type`` or ``state_type``, and reads will always return the ``state_type`` . + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -270,6 +422,18 @@ class Aggregate(proto.Message): sum (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Sum): Sum aggregator. + This field is a member of `oneof`_ ``aggregator``. + hllpp_unique_count (google.cloud.bigtable_admin_v2.types.Type.Aggregate.HyperLogLogPlusPlusUniqueCount): + HyperLogLogPlusPlusUniqueCount aggregator. + + This field is a member of `oneof`_ ``aggregator``. + max_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Max): + Max aggregator. + + This field is a member of `oneof`_ ``aggregator``. + min_ (google.cloud.bigtable_admin_v2.types.Type.Aggregate.Min): + Min aggregator. + This field is a member of `oneof`_ ``aggregator``. """ @@ -279,6 +443,28 @@ class Sum(proto.Message): """ + class Max(proto.Message): + r"""Computes the max of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class Min(proto.Message): + r"""Computes the min of the input values. Allowed input: ``Int64`` + State: same as input + + """ + + class HyperLogLogPlusPlusUniqueCount(proto.Message): + r"""Computes an approximate unique count over the input values. When + using raw data as input, be careful to use a consistent encoding. + Otherwise the same value encoded differently could count more than + once, or two distinct values could count as identical. Input: Any, + or omit for Raw State: TBD Special state conversions: ``Int64`` (the + unique count estimate) + + """ + input_type: "Type" = proto.Field( proto.MESSAGE, number=1, @@ -295,6 +481,26 @@ class Sum(proto.Message): oneof="aggregator", message="Type.Aggregate.Sum", ) + hllpp_unique_count: "Type.Aggregate.HyperLogLogPlusPlusUniqueCount" = ( + proto.Field( + proto.MESSAGE, + number=5, + oneof="aggregator", + message="Type.Aggregate.HyperLogLogPlusPlusUniqueCount", + ) + ) + max_: "Type.Aggregate.Max" = proto.Field( + proto.MESSAGE, + number=6, + oneof="aggregator", + message="Type.Aggregate.Max", + ) + min_: "Type.Aggregate.Min" = proto.Field( + proto.MESSAGE, + number=7, + oneof="aggregator", + message="Type.Aggregate.Min", + ) bytes_type: Bytes = proto.Field( proto.MESSAGE, @@ -314,12 +520,60 @@ class Sum(proto.Message): oneof="kind", message=Int64, ) + float32_type: Float32 = proto.Field( + proto.MESSAGE, + number=12, + oneof="kind", + message=Float32, + ) + float64_type: Float64 = proto.Field( + proto.MESSAGE, + number=9, + oneof="kind", + message=Float64, + ) + bool_type: Bool = proto.Field( + proto.MESSAGE, + number=8, + oneof="kind", + message=Bool, + ) + timestamp_type: Timestamp = proto.Field( + proto.MESSAGE, + number=10, + oneof="kind", + message=Timestamp, + ) + date_type: Date = proto.Field( + proto.MESSAGE, + number=11, + oneof="kind", + message=Date, + ) aggregate_type: Aggregate = proto.Field( proto.MESSAGE, number=6, oneof="kind", message=Aggregate, ) + struct_type: Struct = proto.Field( + proto.MESSAGE, + number=7, + oneof="kind", + message=Struct, + ) + array_type: Array = proto.Field( + proto.MESSAGE, + number=3, + oneof="kind", + message=Array, + ) + map_type: Map = proto.Field( + proto.MESSAGE, + number=4, + oneof="kind", + message=Map, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index ec32cac8216e..9d964a4f6131 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -1032,6 +1032,10 @@ class Mutation(proto.Message): add_to_cell (google.cloud.bigtable_v2.types.Mutation.AddToCell): Incrementally updates an ``Aggregate`` cell. + This field is a member of `oneof`_ ``mutation``. + merge_to_cell (google.cloud.bigtable_v2.types.Mutation.MergeToCell): + Merges accumulated state to an ``Aggregate`` cell. + This field is a member of `oneof`_ ``mutation``. delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): Deletes cells from a column. @@ -1130,6 +1134,49 @@ class AddToCell(proto.Message): message="Value", ) + class MergeToCell(proto.Message): + r"""A Mutation which merges accumulated state into a cell in an + ``Aggregate`` family. + + Attributes: + family_name (str): + The name of the ``Aggregate`` family into which new data + should be added. This must be a family with a ``value_type`` + of ``Aggregate``. Format: ``[-_.a-zA-Z0-9]+`` + column_qualifier (google.cloud.bigtable_v2.types.Value): + The qualifier of the column into which new data should be + added. This must be a ``raw_value``. + timestamp (google.cloud.bigtable_v2.types.Value): + The timestamp of the cell to which new data should be added. + This must be a ``raw_timestamp_micros`` that matches the + table's ``granularity``. + input (google.cloud.bigtable_v2.types.Value): + The input value to be merged into the specified cell. This + must be compatible with the family's + ``value_type.state_type``. Merging ``NULL`` is allowed, but + has no effect. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: "Value" = proto.Field( + proto.MESSAGE, + number=2, + message="Value", + ) + timestamp: "Value" = proto.Field( + proto.MESSAGE, + number=3, + message="Value", + ) + input: "Value" = proto.Field( + proto.MESSAGE, + number=4, + message="Value", + ) + class DeleteFromColumn(proto.Message): r"""A Mutation which deletes cells from the specified column, optionally restricting the deletions to a given timestamp range. @@ -1191,6 +1238,12 @@ class DeleteFromRow(proto.Message): oneof="mutation", message=AddToCell, ) + merge_to_cell: MergeToCell = proto.Field( + proto.MESSAGE, + number=6, + oneof="mutation", + message=MergeToCell, + ) delete_from_column: DeleteFromColumn = proto.Field( proto.MESSAGE, number=2, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py index 8eb307b3e958..153420e45655 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py @@ -178,15 +178,27 @@ class String(proto.Message): class Encoding(proto.Message): r"""Rules used to convert to/from lower level types. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: + utf8_raw (google.cloud.bigtable_v2.types.Type.String.Encoding.Utf8Raw): + Deprecated: if set, converts to an empty ``utf8_bytes``. + + This field is a member of `oneof`_ ``encoding``. utf8_bytes (google.cloud.bigtable_v2.types.Type.String.Encoding.Utf8Bytes): Use ``Utf8Bytes`` encoding. This field is a member of `oneof`_ ``encoding``. """ + class Utf8Raw(proto.Message): + r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" + class Utf8Bytes(proto.Message): r"""UTF-8 encoding @@ -200,6 +212,12 @@ class Utf8Bytes(proto.Message): """ + utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.String.Encoding.Utf8Raw", + ) utf8_bytes: "Type.String.Encoding.Utf8Bytes" = proto.Field( proto.MESSAGE, number=2, diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 64fa98937b58..ea6737973a22 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -47,6 +47,7 @@ from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import ( @@ -6799,12 +6800,16 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_app_profiles(request={}) + pager = client.list_app_profiles(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -9309,12 +9314,16 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_hot_tablets(request={}) + pager = client.list_hot_tablets(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 4c888da7ca96..2b84213bc68a 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -47,6 +47,7 @@ from google.api_core import operation_async # type: ignore from google.api_core import operations_v1 from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( @@ -2398,12 +2399,16 @@ def test_list_tables_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_tables(request={}) + pager = client.list_tables(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -4843,12 +4848,16 @@ def test_list_authorized_views_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_authorized_views(request={}) + pager = client.list_authorized_views(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -8826,12 +8835,16 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_snapshots(request={}) + pager = client.list_snapshots(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 @@ -11237,12 +11250,16 @@ def test_list_backups_pager(transport_name: str = "grpc"): ) expected_metadata = () + retry = retries.Retry() + timeout = 5 expected_metadata = tuple(expected_metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), ) - pager = client.list_backups(request={}) + pager = client.list_backups(request={}, retry=retry, timeout=timeout) assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout results = list(pager) assert len(results) == 6 diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 348338d18cc2..60cc7fd6e952 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -43,6 +43,7 @@ from google.api_core import grpc_helpers from google.api_core import grpc_helpers_async from google.api_core import path_template +from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient From d5b39fc6869b6ea44679100e60c18e607ea2a214 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 5 Aug 2024 17:11:13 -0600 Subject: [PATCH 810/892] chore: revert sync client customizations (#1009) --- .../bigtable_v2/services/bigtable/client.py | 72 +++++++++---------- packages/google-cloud-bigtable/owlbot.py | 2 +- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 4a3f19ce6fd4..0937c90fe761 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -817,9 +817,9 @@ def read_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -933,9 +933,9 @@ def sample_row_keys( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1070,9 +1070,9 @@ def mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1201,9 +1201,9 @@ def mutate_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1375,9 +1375,9 @@ def check_and_mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1477,9 +1477,9 @@ def ping_and_warm( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1620,9 +1620,9 @@ def read_modify_write_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1725,13 +1725,11 @@ def generate_initial_change_stream_partitions( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1826,13 +1824,11 @@ def read_change_stream( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._validate_universe_domain() @@ -1937,9 +1933,9 @@ def execute_query( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._validate_universe_domain() diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 090f7ee93c8d..0ec4cd61c7b2 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -146,7 +146,7 @@ def insert(file, before_line, insert_line, after_line, escape=None): # ---------------------------------------------------------------------------- # Patch duplicate routing header: https://github.com/googleapis/gapic-generator-python/issues/2078 # ---------------------------------------------------------------------------- -for file in ["client.py", "async_client.py"]: +for file in ["async_client.py"]: s.replace( f"google/cloud/bigtable_v2/services/bigtable/{file}", "metadata \= tuple\(metadata\) \+ \(", From 88f070ea78acb5b4d72c02ff9e0ca507740f3bc1 Mon Sep 17 00:00:00 2001 From: Kajetan Boroszko Date: Thu, 8 Aug 2024 22:12:25 +0200 Subject: [PATCH 811/892] feat: async execute query client (#1011) Co-authored-by: Mateusz Walkiewicz Co-authored-by: Owl Bot --- .../google/cloud/bigtable/data/__init__.py | 2 + .../bigtable/data/_async/_mutate_rows.py | 4 +- .../cloud/bigtable/data/_async/_read_rows.py | 3 +- .../cloud/bigtable/data/_async/client.py | 234 ++++-- .../google/cloud/bigtable/data/_helpers.py | 38 +- .../google/cloud/bigtable/data/exceptions.py | 8 + .../bigtable/data/execute_query/__init__.py | 38 + .../data/execute_query/_async/__init__.py | 13 + .../_async/execute_query_iterator.py | 211 ++++++ .../data/execute_query/_byte_cursor.py | 144 ++++ .../execute_query/_parameters_formatting.py | 118 +++ .../_query_result_parsing_utils.py | 133 ++++ .../bigtable/data/execute_query/_reader.py | 149 ++++ .../bigtable/data/execute_query/metadata.py | 354 +++++++++ .../bigtable/data/execute_query/values.py | 116 +++ .../google/cloud/bigtable/helpers.py | 31 + .../google/cloud/bigtable/instance.py | 1 + .../data_client/data_client_snippets_async.py | 45 +- .../data_client_snippets_async_test.py | 5 + .../testing/constraints-3.8.txt | 1 + .../google-cloud-bigtable/tests/_testing.py | 36 + .../system/data/test_execute_query_async.py | 288 +++++++ .../system/data/test_execute_query_utils.py | 272 +++++++ .../tests/unit/_testing.py | 16 + .../tests/unit/data/_async/__init__.py | 13 + .../tests/unit/data/_testing.py | 18 + .../tests/unit/data/execute_query/__init__.py | 13 + .../data/execute_query/_async/__init__.py | 13 + .../data/execute_query/_async/_testing.py | 36 + .../_async/test_query_iterator.py | 156 ++++ .../tests/unit/data/execute_query/_testing.py | 17 + .../data/execute_query/test_byte_cursor.py | 149 ++++ .../test_execute_query_parameters_parsing.py | 134 ++++ .../test_query_result_parsing_utils.py | 715 ++++++++++++++++++ .../test_query_result_row_reader.py | 310 ++++++++ .../tests/unit/data/test__helpers.py | 25 +- .../tests/unit/data/test_helpers.py | 45 ++ .../tests/unit/v2_client/_testing.py | 3 + .../tests/unit/v2_client/test_instance.py | 26 + 39 files changed, 3855 insertions(+), 78 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/helpers.py create mode 100644 packages/google-cloud-bigtable/tests/_testing.py create mode 100644 packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py create mode 100644 packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py create mode 100644 packages/google-cloud-bigtable/tests/unit/_testing.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_testing.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_helpers.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py index 5229f8021139..68dc22891660 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py @@ -39,6 +39,7 @@ from google.cloud.bigtable.data.exceptions import RetryExceptionGroup from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup +from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed from google.cloud.bigtable.data._helpers import TABLE_DEFAULT from google.cloud.bigtable.data._helpers import RowKeySamples @@ -68,6 +69,7 @@ "RetryExceptionGroup", "MutationsExceptionGroup", "ShardedReadRowsExceptionGroup", + "ParameterTypeInferenceFailed", "ShardedQuery", "TABLE_DEFAULT", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py index 99b9944cd943..465378aa43ce 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -84,7 +84,9 @@ def __init__( f"all entries. Found {total_mutations}." ) # create partial function to pass to trigger rpc call - metadata = _make_metadata(table.table_name, table.app_profile_id) + metadata = _make_metadata( + table.table_name, table.app_profile_id, instance_name=None + ) self._gapic_fn = functools.partial( gapic_client.mutate_rows, table_name=table.table_name, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py index 78cb7a991f66..6034ae6cfffb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -102,8 +102,7 @@ def __init__( self.table = table self._predicate = retries.if_exception_type(*retryable_exceptions) self._metadata = _make_metadata( - table.table_name, - table.app_profile_id, + table.table_name, table.app_profile_id, instance_name=None ) self._last_yielded_row_key: bytes | None = None self._remaining_count: int | None = self.request.rows_limit or None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 34fdf847a2d1..600937df856d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -15,74 +15,88 @@ from __future__ import annotations +import asyncio +from functools import partial +import os +import random +import sys +import time from typing import ( - cast, + TYPE_CHECKING, Any, AsyncIterable, + Dict, Optional, - Set, Sequence, - TYPE_CHECKING, + Set, + Union, + cast, ) - -import asyncio -import grpc -import time import warnings -import sys -import random -import os -from functools import partial +from google.api_core import client_options as client_options_lib +from google.api_core import retry as retries +from google.api_core.exceptions import Aborted, DeadlineExceeded, ServiceUnavailable +import google.auth._default +import google.auth.credentials +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore +import grpc +from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT +from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, +) +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data._async.mutations_batcher import ( + _MB_SIZE, + MutationsBatcherAsync, +) +from google.cloud.bigtable.data._helpers import ( + _CONCURRENCY_LIMIT, + TABLE_DEFAULT, + _attempt_timeout_generator, + _get_error_type, + _get_retryable_errors, + _get_timeouts, + _make_metadata, + _retry_exception_factory, + _validate_timeouts, + _WarmedInstanceKey, +) +from google.cloud.bigtable.data.exceptions import ( + FailedQueryShardError, + ShardedReadRowsExceptionGroup, +) +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.row_filters import ( + CellsRowLimitFilter, + RowFilter, + RowFilterChain, + StripValueTransformerFilter, +) +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, +) +from google.cloud.bigtable_v2.services.bigtable.async_client import ( + DEFAULT_CLIENT_INFO, + BigtableAsyncClient, +) from google.cloud.bigtable_v2.services.bigtable.client import BigtableClientMeta -from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient -from google.cloud.bigtable_v2.services.bigtable.async_client import DEFAULT_CLIENT_INFO from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( PooledBigtableGrpcAsyncIOTransport, PooledChannel, ) from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest -from google.cloud.client import ClientWithProject -from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore -from google.api_core import retry as retries -from google.api_core.exceptions import DeadlineExceeded -from google.api_core.exceptions import ServiceUnavailable -from google.api_core.exceptions import Aborted -from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync - -import google.auth.credentials -import google.auth._default -from google.api_core import client_options as client_options_lib -from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT -from google.cloud.bigtable.data.row import Row -from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery -from google.cloud.bigtable.data.exceptions import FailedQueryShardError -from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup - -from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry -from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync -from google.cloud.bigtable.data._helpers import TABLE_DEFAULT -from google.cloud.bigtable.data._helpers import _WarmedInstanceKey -from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT -from google.cloud.bigtable.data._helpers import _make_metadata -from google.cloud.bigtable.data._helpers import _retry_exception_factory -from google.cloud.bigtable.data._helpers import _validate_timeouts -from google.cloud.bigtable.data._helpers import _get_retryable_errors -from google.cloud.bigtable.data._helpers import _get_timeouts -from google.cloud.bigtable.data._helpers import _attempt_timeout_generator -from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync -from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE -from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule -from google.cloud.bigtable.data.row_filters import RowFilter -from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter -from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter -from google.cloud.bigtable.data.row_filters import RowFilterChain - if TYPE_CHECKING: - from google.cloud.bigtable.data._helpers import RowKeySamples - from google.cloud.bigtable.data._helpers import ShardedQuery + from google.cloud.bigtable.data._helpers import RowKeySamples, ShardedQuery class BigtableDataClientAsync(ClientWithProject): @@ -315,7 +329,9 @@ async def _manage_channel( next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) next_sleep = next_refresh - (time.time() - start_timestamp) - async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: + async def _register_instance( + self, instance_id: str, owner: Union[TableAsync, ExecuteQueryIteratorAsync] + ) -> None: """ Registers an instance with the client, and warms the channel pool for the instance @@ -346,7 +362,7 @@ async def _register_instance(self, instance_id: str, owner: TableAsync) -> None: self._start_background_channel_refresh() async def _remove_instance_registration( - self, instance_id: str, owner: TableAsync + self, instance_id: str, owner: Union[TableAsync, ExecuteQueryIteratorAsync] ) -> bool: """ Removes an instance from the client's registered instances, to prevent @@ -416,6 +432,102 @@ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAs """ return TableAsync(self, instance_id, table_id, *args, **kwargs) + async def execute_query( + self, + query: str, + instance_id: str, + *, + parameters: Dict[str, ExecuteQueryValueType] | None = None, + parameter_types: Dict[str, SqlType.Type] | None = None, + app_profile_id: str | None = None, + operation_timeout: float = 600, + attempt_timeout: float | None = 20, + retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + ), + ) -> "ExecuteQueryIteratorAsync": + """ + Executes an SQL query on an instance. + Returns an iterator to asynchronously stream back columns from selected rows. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + - query: Query to be run on Bigtable instance. The query can use ``@param`` + placeholders to use parameter interpolation on the server. Values for all + parameters should be provided in ``parameters``. Types of parameters are + inferred but should be provided in ``parameter_types`` if the inference is + not possible (i.e. when value can be None, an empty list or an empty dict). + - instance_id: The Bigtable instance ID to perform the query on. + instance_id is combined with the client's project to fully + specify the instance. + - parameters: Dictionary with values for all parameters used in the ``query``. + - parameter_types: Dictionary with types of parameters used in the ``query``. + Required to contain entries only for parameters whose type cannot be + detected automatically (i.e. the value can be None, an empty list or + an empty dict). + - app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + - operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + - attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to operation_timeout. + - retryable_errors: a list of errors that will be retried if encountered. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + Returns: + - an asynchronous iterator that yields rows returned by the query + Raises: + - DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + - GoogleAPIError: raised if the request encounters an unrecoverable error + """ + warnings.warn( + "ExecuteQuery is in preview and may change in the future.", + category=RuntimeWarning, + ) + + retryable_excs = [_get_error_type(e) for e in retryable_errors] + + pb_params = _format_execute_query_params(parameters, parameter_types) + + instance_name = self._gapic_client.instance_path(self.project, instance_id) + + request_body = { + "instance_name": instance_name, + "app_profile_id": app_profile_id, + "query": query, + "params": pb_params, + "proto_format": {}, + } + + # app_profile_id should be set to an empty string for ExecuteQueryRequest only + app_profile_id_for_metadata = app_profile_id or "" + + req_metadata = _make_metadata( + table_name=None, + app_profile_id=app_profile_id_for_metadata, + instance_name=instance_name, + ) + + return ExecuteQueryIteratorAsync( + self, + instance_id, + app_profile_id, + request_body, + attempt_timeout, + operation_timeout, + req_metadata, + retryable_excs, + ) + async def __aenter__(self): self._start_background_channel_refresh() return self @@ -893,7 +1005,9 @@ async def sample_row_keys( sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) # prepare request - metadata = _make_metadata(self.table_name, self.app_profile_id) + metadata = _make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ) async def execute_rpc(): results = await self.client._gapic_client.sample_row_keys( @@ -1029,7 +1143,9 @@ async def mutate_row( table_name=self.table_name, app_profile_id=self.app_profile_id, timeout=attempt_timeout, - metadata=_make_metadata(self.table_name, self.app_profile_id), + metadata=_make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ), retry=None, ) return await retries.retry_target_async( @@ -1147,7 +1263,9 @@ async def check_and_mutate_row( ): false_case_mutations = [false_case_mutations] false_case_list = [m._to_pb() for m in false_case_mutations or []] - metadata = _make_metadata(self.table_name, self.app_profile_id) + metadata = _make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ) result = await self.client._gapic_client.check_and_mutate_row( true_mutations=true_case_list, false_mutations=false_case_list, @@ -1198,7 +1316,9 @@ async def read_modify_write_row( rules = [rules] if not rules: raise ValueError("rules must contain at least one item") - metadata = _make_metadata(self.table_name, self.app_profile_id) + metadata = _make_metadata( + self.table_name, self.app_profile_id, instance_name=None + ) result = await self.client._gapic_client.read_modify_write_row( rules=[rule._to_pb() for rule in rules], row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py index a8fba9ef109f..2d36c521faee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py @@ -16,7 +16,7 @@ """ from __future__ import annotations -from typing import Sequence, List, Tuple, TYPE_CHECKING +from typing import Sequence, List, Tuple, TYPE_CHECKING, Union import time import enum from collections import namedtuple @@ -60,15 +60,26 @@ class TABLE_DEFAULT(enum.Enum): def _make_metadata( - table_name: str, app_profile_id: str | None + table_name: str | None, app_profile_id: str | None, instance_name: str | None ) -> list[tuple[str, str]]: """ Create properly formatted gRPC metadata for requests. """ params = [] - params.append(f"table_name={table_name}") + + if table_name is not None and instance_name is not None: + raise ValueError("metadata can't contain both instance_name and table_name") + + if table_name is not None: + params.append(f"table_name={table_name}") + if instance_name is not None: + params.append(f"name={instance_name}") if app_profile_id is not None: params.append(f"app_profile_id={app_profile_id}") + if len(params) == 0: + raise ValueError( + "At least one of table_name and app_profile_id should be not None." + ) params_str = "&".join(params) return [("x-goog-request-params", params_str)] @@ -203,6 +214,22 @@ def _validate_timeouts( raise ValueError("attempt_timeout must be greater than 0") +def _get_error_type( + call_code: Union["grpc.StatusCode", int, type[Exception]] +) -> type[Exception]: + """Helper function for ensuring the object is an exception type. + If it is not, the proper GoogleAPICallError type is infered from the status + code. + + Args: + - call_code: Exception type or gRPC status code. + """ + if isinstance(call_code, type): + return call_code + else: + return type(core_exceptions.from_grpc_status(call_code, "")) + + def _get_retryable_errors( call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT, table: "TableAsync", @@ -225,7 +252,4 @@ def _get_retryable_errors( elif call_codes == TABLE_DEFAULT.MUTATE_ROWS: call_codes = table.default_mutate_rows_retryable_errors - return [ - e if isinstance(e, type) else type(core_exceptions.from_grpc_status(e, "")) - for e in call_codes - ] + return [_get_error_type(e) for e in call_codes] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py index 8d97640aa6d8..95cd44f2c77c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py @@ -311,3 +311,11 @@ def __init__( self.__cause__ = cause self.index = failed_index self.query = failed_query + + +class InvalidExecuteQueryResponse(core_exceptions.GoogleAPICallError): + """Exception raised to invalid query response data from back-end.""" + + +class ParameterTypeInferenceFailed(ValueError): + """Exception raised when query parameter types were not provided and cannot be inferred.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py new file mode 100644 index 000000000000..94af7d1cd0e8 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py @@ -0,0 +1,38 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, +) +from google.cloud.bigtable.data.execute_query.metadata import ( + Metadata, + ProtoMetadata, + SqlType, +) +from google.cloud.bigtable.data.execute_query.values import ( + ExecuteQueryValueType, + QueryResultRow, + Struct, +) + + +__all__ = [ + "ExecuteQueryValueType", + "SqlType", + "QueryResultRow", + "Struct", + "Metadata", + "ProtoMetadata", + "ExecuteQueryIteratorAsync", +] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/__init__.py new file mode 100644 index 000000000000..6d5e14bcf4a0 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py new file mode 100644 index 000000000000..3660c0b0ff9f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -0,0 +1,211 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import asyncio +from typing import ( + Any, + AsyncIterator, + Dict, + List, + Optional, + Sequence, + Tuple, +) + +from google.api_core import retry as retries + +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor +from google.cloud.bigtable.data._helpers import ( + _attempt_timeout_generator, + _retry_exception_factory, +) +from google.cloud.bigtable.data.exceptions import InvalidExecuteQueryResponse +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import Metadata, ProtoMetadata +from google.cloud.bigtable.data.execute_query._reader import ( + _QueryResultRowReader, + _Reader, +) +from google.cloud.bigtable_v2.types.bigtable import ( + ExecuteQueryRequest as ExecuteQueryRequestPB, +) + + +class ExecuteQueryIteratorAsync: + """ + ExecuteQueryIteratorAsync handles collecting streaming responses from the + ExecuteQuery RPC and parsing them to `QueryResultRow`s. + + ExecuteQueryIteratorAsync implements Asynchronous Iterator interface and can + be used with "async for" syntax. It is also a context manager. + + It is **not thread-safe**. It should not be used by multiple asyncio Tasks. + + Args: + client (google.cloud.bigtable.data._async.BigtableDataClientAsync): bigtable client + instance_id (str): id of the instance on which the query is executed + request_body (Dict[str, Any]): dict representing the body of the ExecuteQueryRequest + attempt_timeout (float | None): the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + operation_timeout (float): the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. If None, defaults to operation_timeout. + req_metadata (Sequence[Tuple[str, str]]): metadata used while sending the gRPC request + retryable_excs (List[type[Exception]]): a list of errors that will be retried if encountered. + """ + + def __init__( + self, + client: Any, + instance_id: str, + app_profile_id: Optional[str], + request_body: Dict[str, Any], + attempt_timeout: float | None, + operation_timeout: float, + req_metadata: Sequence[Tuple[str, str]], + retryable_excs: List[type[Exception]], + ) -> None: + self._table_name = None + self._app_profile_id = app_profile_id + self._client = client + self._instance_id = instance_id + self._byte_cursor = _ByteCursor[ProtoMetadata]() + self._reader: _Reader[QueryResultRow] = _QueryResultRowReader(self._byte_cursor) + self._result_generator = self._next_impl() + self._register_instance_task = None + self._is_closed = False + self._request_body = request_body + self._attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self._async_stream = retries.retry_target_stream_async( + self._make_request_with_resume_token, + retries.if_exception_type(*retryable_excs), + retries.exponential_sleep_generator(0.01, 60, multiplier=2), + operation_timeout, + exception_factory=_retry_exception_factory, + ) + self._req_metadata = req_metadata + + try: + self._register_instance_task = asyncio.create_task( + self._client._register_instance(instance_id, self) + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + @property + def is_closed(self): + return self._is_closed + + @property + def app_profile_id(self): + return self._app_profile_id + + @property + def table_name(self): + return self._table_name + + async def _make_request_with_resume_token(self): + """ + perfoms the rpc call using the correct resume token. + """ + resume_token = self._byte_cursor.prepare_for_new_request() + request = ExecuteQueryRequestPB( + { + **self._request_body, + "resume_token": resume_token, + } + ) + return await self._client._gapic_client.execute_query( + request, + timeout=next(self._attempt_timeout_gen), + metadata=self._req_metadata, + retry=None, + ) + + async def _await_metadata(self) -> None: + """ + If called before the first response was recieved, the first response + is awaited as part of this call. + """ + if self._byte_cursor.metadata is None: + metadata_msg = await self._async_stream.__anext__() + self._byte_cursor.consume_metadata(metadata_msg) + + async def _next_impl(self) -> AsyncIterator[QueryResultRow]: + """ + Generator wrapping the response stream which parses the stream results + and returns full `QueryResultRow`s. + """ + await self._await_metadata() + + async for response in self._async_stream: + try: + bytes_to_parse = self._byte_cursor.consume(response) + if bytes_to_parse is None: + continue + + results = self._reader.consume(bytes_to_parse) + if results is None: + continue + + except ValueError as e: + raise InvalidExecuteQueryResponse( + "Invalid ExecuteQuery response received" + ) from e + + for result in results: + yield result + await self.close() + + async def __anext__(self): + if self._is_closed: + raise StopAsyncIteration + return await self._result_generator.__anext__() + + def __aiter__(self): + return self + + async def metadata(self) -> Optional[Metadata]: + """ + Returns query metadata from the server or None if the iterator was + explicitly closed. + """ + if self._is_closed: + return None + # Metadata should be present in the first response in a stream. + if self._byte_cursor.metadata is None: + try: + await self._await_metadata() + except StopIteration: + return None + return self._byte_cursor.metadata + + async def close(self) -> None: + """ + Cancel all background tasks. Should be called all rows were processed. + """ + if self._is_closed: + return + self._is_closed = True + if self._register_instance_task is not None: + self._register_instance_task.cancel() + await self._client._remove_instance_registration(self._instance_id, self) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py new file mode 100644 index 000000000000..60f23f54127e --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py @@ -0,0 +1,144 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Generic, Optional, TypeVar + +from google.cloud.bigtable_v2 import ExecuteQueryResponse +from google.cloud.bigtable.data.execute_query.metadata import ( + Metadata, + _pb_metadata_to_metadata_types, +) + +MT = TypeVar("MT", bound=Metadata) # metadata type + + +class _ByteCursor(Generic[MT]): + """ + Buffers bytes from `ExecuteQuery` responses until resume_token is received or end-of-stream + is reached. :class:`google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse` obtained from + the server should be passed to ``consume`` or ``consume_metadata`` methods and its non-None + results should be passed to appropriate + :class:`google.cloud.bigtable.execute_query_reader._Reader` for parsing gathered bytes. + + This class consumes data obtained externally to be usable in both sync and async clients. + + See :class:`google.cloud.bigtable.execute_query_reader._Reader` for more context. + """ + + def __init__(self): + self._metadata: Optional[MT] = None + self._buffer = bytearray() + self._resume_token = None + self._last_response_results_field = None + + @property + def metadata(self) -> Optional[MT]: + """ + Returns: + Metadata or None: Metadata read from the first response of the stream + or None if no response was consumed yet. + """ + return self._metadata + + def prepare_for_new_request(self): + """ + Prepares this ``_ByteCursor`` for retrying an ``ExecuteQuery`` request. + + Clears internal buffers of this ``_ByteCursor`` and returns last received + ``resume_token`` to be used in retried request. + + This is the only method that returns ``resume_token`` to the user. + Returning the token to the user is tightly coupled with clearing internal + buffers to prevent accidental retry without clearing the state, what would + cause invalid results. ``resume_token`` are not needed in other cases, + thus they is no separate getter for it. + + Returns: + bytes: Last received resume_token. + """ + self._buffer = bytearray() + # metadata is sent in the first response in a stream, + # if we've already received one, but it was not already commited + # by a subsequent resume_token, then we should clear it as well. + if not self._resume_token: + self._metadata = None + + return self._resume_token + + def consume_metadata(self, response: ExecuteQueryResponse) -> None: + """ + Reads metadata from first response of ``ExecuteQuery`` responses stream. + Should be called only once. + + Args: + response (google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse): First response + from the stream. + + Raises: + ValueError: If this method was already called or if metadata received from the server + cannot be parsed. + """ + if self._metadata is not None: + raise ValueError("Invalid state - metadata already consumed") + + if "metadata" in response: + metadata: Any = _pb_metadata_to_metadata_types(response.metadata) + self._metadata = metadata + else: + raise ValueError("Invalid parameter - response without metadata") + + return None + + def consume(self, response: ExecuteQueryResponse) -> Optional[bytes]: + """ + Reads results bytes from an ``ExecuteQuery`` response and adds them to a buffer. + + If the response contains a ``resume_token``: + - the ``resume_token`` is saved in this ``_ByteCursor``, and + - internal buffers are flushed and returned to the caller. + + ``resume_token`` is not available directly, but can be retrieved by calling + :meth:`._ByteCursor.prepare_for_new_request` when preparing to retry a request. + + Args: + response (google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse): + Response obtained from the stream. + + Returns: + bytes or None: bytes if buffers were flushed or None otherwise. + + Raises: + ValueError: If provided ``ExecuteQueryResponse`` is not valid + or contains bytes representing response of a different kind than previously + processed responses. + """ + response_pb = response._pb # proto-plus attribute retrieval is slow. + + if response_pb.HasField("results"): + results = response_pb.results + if results.HasField("proto_rows_batch"): + self._buffer.extend(results.proto_rows_batch.batch_data) + + if results.resume_token: + self._resume_token = results.resume_token + + if self._buffer: + return_value = memoryview(self._buffer) + self._buffer = bytearray() + return return_value + elif response_pb.HasField("metadata"): + self.consume_metadata(response) + else: + raise ValueError(f"Invalid ExecuteQueryResponse: {response}") + return None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py new file mode 100644 index 000000000000..edb7a6380cc6 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py @@ -0,0 +1,118 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Optional +import datetime +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable.data.execute_query.metadata import SqlType + + +def _format_execute_query_params( + params: Optional[Dict[str, ExecuteQueryValueType]], + parameter_types: Optional[Dict[str, SqlType.Type]], +) -> Any: + """ + Takes a dictionary of param_name -> param_value and optionally parameter types. + If the parameters types are not provided, this function tries to infer them. + + Args: + params (Optional[Dict[str, ExecuteQueryValueType]]): mapping from parameter names + like they appear in query (without @ at the beginning) to their values. + Only values of type ExecuteQueryValueType are permitted. + parameter_types (Optional[Dict[str, SqlType.Type]]): mapping of parameter names + to their types. + + Raises: + ValueError: raised when parameter types cannot be inferred and were not + provided explicitly. + + Returns: + dictionary prasable to a protobuf represenging parameters as defined + in ExecuteQueryRequest.params + """ + if not params: + return {} + parameter_types = parameter_types or {} + + result_values = {} + + for key, value in params.items(): + user_provided_type = parameter_types.get(key) + try: + if user_provided_type: + if not isinstance(user_provided_type, SqlType.Type): + raise ValueError( + f"Parameter type for {key} should be provided as an instance of SqlType.Type subclass." + ) + param_type = user_provided_type + else: + param_type = _detect_type(value) + + value_pb_dict = _convert_value_to_pb_value_dict(value, param_type) + except ValueError as err: + raise ValueError(f"Error when parsing parameter {key}") from err + result_values[key] = value_pb_dict + + return result_values + + +def _convert_value_to_pb_value_dict( + value: ExecuteQueryValueType, param_type: SqlType.Type +) -> Any: + """ + Takes a value and converts it to a dictionary parsable to a protobuf. + + Args: + value (ExecuteQueryValueType): value + param_type (SqlType.Type): object describing which ExecuteQuery type the value represents. + + Returns: + dictionary parsable to a protobuf. + """ + # type field will be set only in top-level Value. + value_dict = param_type._to_value_pb_dict(value) + value_dict["type_"] = param_type._to_type_pb_dict() + return value_dict + + +_TYPES_TO_TYPE_DICTS = [ + (bytes, SqlType.Bytes()), + (str, SqlType.String()), + (bool, SqlType.Bool()), + (int, SqlType.Int64()), + (DatetimeWithNanoseconds, SqlType.Timestamp()), + (datetime.datetime, SqlType.Timestamp()), + (datetime.date, SqlType.Date()), +] + + +def _detect_type(value: ExecuteQueryValueType) -> SqlType.Type: + """ + Infers the ExecuteQuery type based on value. Raises error if type is amiguous. + raises ParameterTypeInferenceFailed if not possible. + """ + if value is None: + raise ParameterTypeInferenceFailed( + "Cannot infer type of None, please provide the type manually." + ) + + for field_type, type_dict in _TYPES_TO_TYPE_DICTS: + if isinstance(value, field_type): + return type_dict + + raise ParameterTypeInferenceFailed( + f"Cannot infer type of {type(value).__name__}, please provide the type manually." + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py new file mode 100644 index 000000000000..b65dce27b85f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py @@ -0,0 +1,133 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, Type +from google.cloud.bigtable.data.execute_query.values import Struct +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable_v2 import Value as PBValue +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +_REQUIRED_PROTO_FIELDS = { + SqlType.Bytes: "bytes_value", + SqlType.String: "string_value", + SqlType.Int64: "int_value", + SqlType.Float64: "float_value", + SqlType.Bool: "bool_value", + SqlType.Timestamp: "timestamp_value", + SqlType.Date: "date_value", + SqlType.Struct: "array_value", + SqlType.Array: "array_value", + SqlType.Map: "array_value", +} + + +def _parse_array_type(value: PBValue, metadata_type: SqlType.Array) -> Any: + """ + used for parsing an array represented as a protobuf to a python list. + """ + return list( + map( + lambda val: _parse_pb_value_to_python_value( + val, metadata_type.element_type + ), + value.array_value.values, + ) + ) + + +def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any: + """ + used for parsing a map represented as a protobuf to a python dict. + + Values of type `Map` are stored in a `Value.array_value` where each entry + is another `Value.array_value` with two elements (the key and the value, + in that order). + Normally encoded Map values won't have repeated keys, however, the client + must handle the case in which they do. If the same key appears + multiple times, the _last_ value takes precedence. + """ + + try: + return dict( + map( + lambda map_entry: ( + _parse_pb_value_to_python_value( + map_entry.array_value.values[0], metadata_type.key_type + ), + _parse_pb_value_to_python_value( + map_entry.array_value.values[1], metadata_type.value_type + ), + ), + value.array_value.values, + ) + ) + except IndexError: + raise ValueError("Invalid map entry - less or more than two values.") + + +def _parse_struct_type(value: PBValue, metadata_type: SqlType.Struct) -> Struct: + """ + used for parsing a struct represented as a protobuf to a + google.cloud.bigtable.data.execute_query.Struct + """ + if len(value.array_value.values) != len(metadata_type.fields): + raise ValueError("Mismatched lengths of values and types.") + + struct = Struct() + for value, field in zip(value.array_value.values, metadata_type.fields): + field_name, field_type = field + struct.add_field(field_name, _parse_pb_value_to_python_value(value, field_type)) + + return struct + + +def _parse_timestamp_type( + value: PBValue, metadata_type: SqlType.Timestamp +) -> DatetimeWithNanoseconds: + """ + used for parsing a timestamp represented as a protobuf to DatetimeWithNanoseconds + """ + return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) + + +_TYPE_PARSERS: Dict[Type[SqlType.Type], Callable[[PBValue, Any], Any]] = { + SqlType.Timestamp: _parse_timestamp_type, + SqlType.Struct: _parse_struct_type, + SqlType.Array: _parse_array_type, + SqlType.Map: _parse_map_type, +} + + +def _parse_pb_value_to_python_value(value: PBValue, metadata_type: SqlType.Type) -> Any: + """ + used for converting the value represented as a protobufs to a python object. + """ + value_kind = value.WhichOneof("kind") + if not value_kind: + return None + + kind = type(metadata_type) + if not value.HasField(_REQUIRED_PROTO_FIELDS[kind]): + raise ValueError( + f"{_REQUIRED_PROTO_FIELDS[kind]} field for {kind.__name__} type not found in a Value." + ) + + if kind in _TYPE_PARSERS: + parser = _TYPE_PARSERS[kind] + return parser(value, metadata_type) + elif kind in _REQUIRED_PROTO_FIELDS: + field_name = _REQUIRED_PROTO_FIELDS[kind] + return getattr(value, field_name) + else: + raise ValueError(f"Unknown kind {kind}") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py new file mode 100644 index 000000000000..9c0259cde638 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py @@ -0,0 +1,149 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import ( + TypeVar, + Generic, + Iterable, + Optional, + List, + Sequence, + cast, +) +from abc import ABC, abstractmethod + +from google.cloud.bigtable_v2 import ProtoRows, Value as PBValue +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + +from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import ( + _parse_pb_value_to_python_value, +) + +from google.cloud.bigtable.helpers import batched + +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import ProtoMetadata + + +T = TypeVar("T") + + +class _Reader(ABC, Generic[T]): + """ + An interface for classes that consume and parse bytes returned by ``_ByteCursor``. + Parsed bytes should be gathered into bundles (rows or columns) of expected size + and converted to an appropriate type ``T`` that will be returned as a semantically + meaningful result to the library user by + :meth:`google.cloud.bigtable.instance.Instance.execute_query` or + :meth:`google.cloud.bigtable.data._async.client.BigtableDataClientAsync.execute_query` + methods. + + This class consumes data obtained externally to be usable in both sync and async clients. + + See :class:`google.cloud.bigtable.byte_cursor._ByteCursor` for more context. + """ + + @abstractmethod + def consume(self, bytes_to_consume: bytes) -> Optional[Iterable[T]]: + """This method receives a parsable chunk of bytes and returns either a None if there is + not enough chunks to return to the user yet (e.g. we haven't received all columns in a + row yet), or a list of appropriate values gathered from one or more parsable chunks. + + Args: + bytes_to_consume (bytes): chunk of parsable bytes received from + :meth:`google.cloud.bigtable.byte_cursor._ByteCursor.consume` + method. + + Returns: + Iterable[T] or None: Iterable if gathered values can form one or more instances of T, + or None if there is not enough data to construct at least one instance of T with + appropriate number of entries. + """ + raise NotImplementedError + + +class _QueryResultRowReader(_Reader[QueryResultRow]): + """ + A :class:`._Reader` consuming bytes representing + :class:`google.cloud.bigtable_v2.types.Type` + and producing :class:`google.cloud.bigtable.execute_query.QueryResultRow`. + + Number of entries in each row is determined by number of columns in + :class:`google.cloud.bigtable.execute_query.Metadata` obtained from + :class:`google.cloud.bigtable.byte_cursor._ByteCursor` passed in the constructor. + """ + + def __init__(self, byte_cursor: _ByteCursor[ProtoMetadata]): + """ + Constructs new instance of ``_QueryResultRowReader``. + + Args: + byte_cursor (google.cloud.bigtable.byte_cursor._ByteCursor): + byte_cursor that will be used to gather bytes for this instance of ``_Reader``, + needed to obtain :class:`google.cloud.bigtable.execute_query.Metadata` about + processed stream. + """ + self._values: List[PBValue] = [] + self._byte_cursor = byte_cursor + + @property + def _metadata(self) -> Optional[ProtoMetadata]: + return self._byte_cursor.metadata + + def _construct_query_result_row(self, values: Sequence[PBValue]) -> QueryResultRow: + result = QueryResultRow() + # The logic, not defined by mypy types, ensures that the value of + # "metadata" is never null at the time it is retrieved here + metadata = cast(ProtoMetadata, self._metadata) + columns = metadata.columns + + assert len(values) == len( + columns + ), "This function should be called only when count of values matches count of columns." + + for column, value in zip(columns, values): + parsed_value = _parse_pb_value_to_python_value(value, column.column_type) + result.add_field(column.column_name, parsed_value) + return result + + def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]: + proto_rows = ProtoRows.pb().FromString(bytes_to_parse) + return proto_rows.values + + def consume(self, bytes_to_consume: bytes) -> Optional[Iterable[QueryResultRow]]: + if bytes_to_consume is None: + raise ValueError("bytes_to_consume shouldn't be None") + + self._values.extend(self._parse_proto_rows(bytes_to_consume)) + + # The logic, not defined by mypy types, ensures that the value of + # "metadata" is never null at the time it is retrieved here + num_columns = len(cast(ProtoMetadata, self._metadata).columns) + + if len(self._values) < num_columns: + return None + + rows = [] + for batch in batched(self._values, n=num_columns): + if len(batch) == num_columns: + rows.append(self._construct_query_result_row(batch)) + else: + raise ValueError( + "Server error, recieved bad number of values. " + f"Expected {num_columns} got {len(batch)}." + ) + + self._values = [] + + return rows diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py new file mode 100644 index 000000000000..98b94a644bde --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py @@ -0,0 +1,354 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This module provides the SqlType class used for specifying types in +ExecuteQuery and some utilities. + +The SqlTypes are used in Metadata returned by the ExecuteQuery operation as well +as for specifying query parameter types explicitly. +""" + +from collections import defaultdict +from typing import ( + Optional, + List, + Dict, + Set, + Type, + Union, + Tuple, + Any, +) +from google.cloud.bigtable.data.execute_query.values import _NamedList +from google.cloud.bigtable_v2 import ResultSetMetadata +from google.cloud.bigtable_v2 import Type as PBType +from google.type import date_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +import datetime + + +class SqlType: + """ + Classes denoting types of values returned by Bigtable's ExecuteQuery operation. + + Used in :class:`.Metadata`. + """ + + class Type: + expected_type: Optional[type] = None + value_pb_dict_field_name: Optional[str] = None + type_field_name: Optional[str] = None + + @classmethod + def from_pb_type(cls, pb_type: Optional[PBType] = None): + return cls() + + def _to_type_pb_dict(self) -> Dict[str, Any]: + if not self.type_field_name: + raise NotImplementedError( + "Fill in expected_type and value_pb_dict_field_name" + ) + + return {self.type_field_name: {}} + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if self.expected_type is None or self.value_pb_dict_field_name is None: + raise NotImplementedError( + "Fill in expected_type and value_pb_dict_field_name" + ) + + if value is None: + return {} + + if not isinstance(value, self.expected_type): + raise ValueError( + f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}" + ) + + return {self.value_pb_dict_field_name: value} + + def __eq__(self, other): + return isinstance(other, type(self)) + + def __str__(self) -> str: + return self.__class__.__name__ + + def __repr__(self) -> str: + return self.__str__() + + class Struct(_NamedList[Type], Type): + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Struct": + if type_pb is None: + raise ValueError("missing required argument type_pb") + fields: List[Tuple[Optional[str], SqlType.Type]] = [] + for field in type_pb.struct_type.fields: + fields.append((field.field_name, _pb_type_to_metadata_type(field.type))) + return cls(fields) + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Struct is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Struct is not supported as a query parameter") + + def __eq__(self, other: object): + # Cannot use super() here - we'd either have to: + # - call super() in these base classes, which would in turn call Object.__eq__ + # to compare objects by identity and return a False, or + # - do not call super() in these base classes, which would result in calling only + # one of the __eq__ methods (a super() in the base class would be required to call the other one), or + # - call super() in only one of the base classes, but that would be error prone and changing + # the order of base classes would introduce unexpected behaviour. + # we also have to disable mypy because it doesn't see that SqlType.Struct == _NamedList[Type] + return SqlType.Type.__eq__(self, other) and _NamedList.__eq__(self, other) # type: ignore + + def __str__(self): + return super(_NamedList, self).__str__() + + class Array(Type): + def __init__(self, element_type: "SqlType.Type"): + if isinstance(element_type, SqlType.Array): + raise ValueError("Arrays of arrays are not supported.") + self._element_type = element_type + + @property + def element_type(self): + return self._element_type + + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Array": + if type_pb is None: + raise ValueError("missing required argument type_pb") + return cls(_pb_type_to_metadata_type(type_pb.array_type.element_type)) + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Array is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Array is not supported as a query parameter") + + def __eq__(self, other): + return super().__eq__(other) and self.element_type == other.element_type + + def __str__(self) -> str: + return f"{self.__class__.__name__}<{str(self.element_type)}>" + + class Map(Type): + def __init__(self, key_type: "SqlType.Type", value_type: "SqlType.Type"): + self._key_type = key_type + self._value_type = value_type + + @property + def key_type(self): + return self._key_type + + @property + def value_type(self): + return self._value_type + + @classmethod + def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Map": + if type_pb is None: + raise ValueError("missing required argument type_pb") + return cls( + _pb_type_to_metadata_type(type_pb.map_type.key_type), + _pb_type_to_metadata_type(type_pb.map_type.value_type), + ) + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Map is not supported as a query parameter") + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Map is not supported as a query parameter") + + def __eq__(self, other): + return ( + super().__eq__(other) + and self.key_type == other.key_type + and self.value_type == other.value_type + ) + + def __str__(self) -> str: + return ( + f"{self.__class__.__name__}<" + f"{str(self._key_type)},{str(self._value_type)}>" + ) + + class Bytes(Type): + expected_type = bytes + value_pb_dict_field_name = "bytes_value" + type_field_name = "bytes_type" + + class String(Type): + expected_type = str + value_pb_dict_field_name = "string_value" + type_field_name = "string_type" + + class Int64(Type): + expected_type = int + value_pb_dict_field_name = "int_value" + type_field_name = "int64_type" + + class Float64(Type): + expected_type = float + value_pb_dict_field_name = "float_value" + type_field_name = "float64_type" + + class Bool(Type): + expected_type = bool + value_pb_dict_field_name = "bool_value" + type_field_name = "bool_type" + + class Timestamp(Type): + type_field_name = "timestamp_type" + expected_types = ( + datetime.datetime, + DatetimeWithNanoseconds, + ) + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if value is None: + return {} + + if not isinstance(value, self.expected_types): + raise ValueError( + f"Expected one of {', '.join((_type.__name__ for _type in self.expected_types))}" + ) + + if isinstance(value, DatetimeWithNanoseconds): + return {"timestamp_value": value.timestamp_pb()} + else: # value must be an instance of datetime.datetime + ts = timestamp_pb2.Timestamp() + ts.FromDatetime(value) + return {"timestamp_value": ts} + + class Date(Type): + type_field_name = "date_type" + expected_type = datetime.date + + def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: + if value is None: + return {} + + if not isinstance(value, self.expected_type): + raise ValueError( + f"Expected query parameter of type {self.expected_type.__name__}, got {type(value).__name__}" + ) + + return { + "date_value": date_pb2.Date( + year=value.year, + month=value.month, + day=value.day, + ) + } + + +class Metadata: + pass + + +class ProtoMetadata(Metadata): + class Column: + def __init__(self, column_name: Optional[str], column_type: SqlType.Type): + self._column_name = column_name + self._column_type = column_type + + @property + def column_name(self) -> Optional[str]: + return self._column_name + + @property + def column_type(self) -> SqlType.Type: + return self._column_type + + @property + def columns(self) -> List[Column]: + return self._columns + + def __init__( + self, columns: Optional[List[Tuple[Optional[str], SqlType.Type]]] = None + ): + self._columns: List[ProtoMetadata.Column] = [] + self._column_indexes: Dict[str, List[int]] = defaultdict(list) + self._duplicate_names: Set[str] = set() + + if columns: + for column_name, column_type in columns: + if column_name is not None: + if column_name in self._column_indexes: + self._duplicate_names.add(column_name) + self._column_indexes[column_name].append(len(self._columns)) + self._columns.append(ProtoMetadata.Column(column_name, column_type)) + + def __getitem__(self, index_or_name: Union[str, int]) -> Column: + if isinstance(index_or_name, str): + if index_or_name in self._duplicate_names: + raise KeyError( + f"Ambigious column name: '{index_or_name}', use index instead." + f" Field present on indexes {', '.join(map(str, self._column_indexes[index_or_name]))}." + ) + if index_or_name not in self._column_indexes: + raise KeyError(f"No such column: {index_or_name}") + index = self._column_indexes[index_or_name][0] + else: + index = index_or_name + return self._columns[index] + + def __len__(self): + return len(self._columns) + + def __str__(self) -> str: + columns_str = ", ".join([str(column) for column in self._columns]) + return f"{self.__class__.__name__}([{columns_str}])" + + def __repr__(self) -> str: + return self.__str__() + + +def _pb_metadata_to_metadata_types( + metadata_pb: ResultSetMetadata, +) -> Metadata: + if "proto_schema" in metadata_pb: + fields: List[Tuple[Optional[str], SqlType.Type]] = [] + for column_metadata in metadata_pb.proto_schema.columns: + fields.append( + (column_metadata.name, _pb_type_to_metadata_type(column_metadata.type)) + ) + return ProtoMetadata(fields) + raise ValueError("Invalid ResultSetMetadata object received.") + + +_PROTO_TYPE_TO_METADATA_TYPE_FACTORY: Dict[str, Type[SqlType.Type]] = { + "bytes_type": SqlType.Bytes, + "string_type": SqlType.String, + "int64_type": SqlType.Int64, + "float64_type": SqlType.Float64, + "bool_type": SqlType.Bool, + "timestamp_type": SqlType.Timestamp, + "date_type": SqlType.Date, + "struct_type": SqlType.Struct, + "array_type": SqlType.Array, + "map_type": SqlType.Map, +} + + +def _pb_type_to_metadata_type(type_pb: PBType) -> SqlType.Type: + kind = PBType.pb(type_pb).WhichOneof("kind") + if kind in _PROTO_TYPE_TO_METADATA_TYPE_FACTORY: + return _PROTO_TYPE_TO_METADATA_TYPE_FACTORY[kind].from_pb_type(type_pb) + raise ValueError(f"Unrecognized response data type: {type_pb}") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py new file mode 100644 index 000000000000..450f6f855148 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py @@ -0,0 +1,116 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import defaultdict +from typing import ( + Optional, + List, + Dict, + Set, + Union, + TypeVar, + Generic, + Tuple, + Mapping, +) +from google.type import date_pb2 # type: ignore +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +T = TypeVar("T") + + +class _NamedList(Generic[T]): + """ + A class designed to store a list of elements, which can be accessed by + name or index. + This class is different from namedtuple, because namedtuple has some + restrictions on names of fields and we do not want to have them. + """ + + _str_cls_name = "_NamedList" + + def __init__(self, fields: Optional[List[Tuple[Optional[str], T]]] = None): + self._fields: List[Tuple[Optional[str], T]] = [] + self._field_indexes: Dict[str, List[int]] = defaultdict(list) + self._duplicate_names: Set[str] = set() + + if fields: + for field_name, field_type in fields: + self.add_field(field_name, field_type) + + def add_field(self, name: Optional[str], value: T): + if name: + if name in self._field_indexes: + self._duplicate_names.add(name) + self._field_indexes[name].append(len(self._fields)) + self._fields.append((name, value)) + + @property + def fields(self): + return self._fields + + def __getitem__(self, index_or_name: Union[str, int]): + if isinstance(index_or_name, str): + if index_or_name in self._duplicate_names: + raise KeyError( + f"Ambigious field name: '{index_or_name}', use index instead." + f" Field present on indexes {', '.join(map(str, self._field_indexes[index_or_name]))}." + ) + if index_or_name not in self._field_indexes: + raise KeyError(f"No such field: {index_or_name}") + index = self._field_indexes[index_or_name][0] + else: + index = index_or_name + return self._fields[index][1] + + def __len__(self): + return len(self._fields) + + def __eq__(self, other): + if not isinstance(other, _NamedList): + return False + + return ( + self._fields == other._fields + and self._field_indexes == other._field_indexes + ) + + def __str__(self) -> str: + fields_str = ", ".join([str(field) for field in self._fields]) + return f"{self.__class__.__name__}([{fields_str}])" + + def __repr__(self) -> str: + return self.__str__() + + +ExecuteQueryValueType = Union[ + int, + float, + bool, + bytes, + str, + DatetimeWithNanoseconds, + date_pb2.Date, + "Struct", + List["ExecuteQueryValueType"], + Mapping[Union[str, int, bytes], "ExecuteQueryValueType"], +] + + +class QueryResultRow(_NamedList[ExecuteQueryValueType]): + pass + + +class Struct(_NamedList[ExecuteQueryValueType]): + pass diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/helpers.py new file mode 100644 index 000000000000..78af430892fc --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/helpers.py @@ -0,0 +1,31 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TypeVar, Iterable, Generator, Tuple + +from itertools import islice + +T = TypeVar("T") + + +# batched landed in standard library in Python 3.11. +def batched(iterable: Iterable[T], n) -> Generator[Tuple[T, ...], None, None]: + # batched('ABCDEFG', 3) → ABC DEF G + if n < 1: + raise ValueError("n must be at least one") + it = iter(iterable) + batch = tuple(islice(it, n)) + while batch: + yield batch + batch = tuple(islice(it, n)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py index 6d092cefd14f..23fb1c95dece 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/instance.py @@ -32,6 +32,7 @@ import warnings + _INSTANCE_NAME_RE = re.compile( r"^projects/(?P[^/]+)/" r"instances/(?P[a-z][-a-z0-9]*)$" ) diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py index 742e7cb8e7e2..dabbcb839b1e 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async.py @@ -69,7 +69,10 @@ async def write_batch(project_id, instance_id, table_id): for sub_exception in e.exceptions: failed_entry: RowMutationEntry = sub_exception.entry cause: Exception = sub_exception.__cause__ - print(f"Failed mutation: {failed_entry.row_key} with error: {cause!r}") + print( + f"Failed mutation: {failed_entry.row_key} with error: {cause!r}" + ) + # [END bigtable_async_writes_batch] await write_batch(table.client.project, table.instance_id, table.table_id) @@ -94,6 +97,7 @@ async def write_increment(project_id, instance_id, table_id): # check result cell = result_row[0] print(f"{cell.row_key} value: {int(cell)}") + # [END bigtable_async_write_increment] await write_increment(table.client.project, table.instance_id, table.table_id) @@ -127,6 +131,7 @@ async def write_conditional(project_id, instance_id, table_id): ) if result is True: print("The row os_name was set to android") + # [END bigtable_async_writes_conditional] await write_conditional(table.client.project, table.instance_id, table.table_id) @@ -141,6 +146,7 @@ async def read_row(project_id, instance_id, table_id): row_key = "phone#4c410523#20190501" row = await table.read_row(row_key) print(row) + # [END bigtable_async_reads_row] await read_row(table.client.project, table.instance_id, table.table_id) @@ -158,6 +164,7 @@ async def read_row_partial(project_id, instance_id, table_id): row = await table.read_row(row_key, row_filter=col_filter) print(row) + # [END bigtable_async_reads_row_partial] await read_row_partial(table.client.project, table.instance_id, table.table_id) @@ -171,10 +178,9 @@ async def read_rows(project_id, instance_id, table_id): async with BigtableDataClientAsync(project=project_id) as client: async with client.get_table(instance_id, table_id) as table: - query = ReadRowsQuery(row_keys=[ - b"phone#4c410523#20190501", - b"phone#4c410523#20190502" - ]) + query = ReadRowsQuery( + row_keys=[b"phone#4c410523#20190501", b"phone#4c410523#20190502"] + ) async for row in await table.read_rows_stream(query): print(row) @@ -194,12 +200,13 @@ async def read_row_range(project_id, instance_id, table_id): row_range = RowRange( start_key=b"phone#4c410523#20190501", - end_key=b"phone#4c410523#201906201" + end_key=b"phone#4c410523#201906201", ) query = ReadRowsQuery(row_ranges=[row_range]) async for row in await table.read_rows_stream(query): print(row) + # [END bigtable_async_reads_row_range] await read_row_range(table.client.project, table.instance_id, table.table_id) @@ -221,6 +228,7 @@ async def read_prefix(project_id, instance_id, table_id): async for row in await table.read_rows_stream(query): print(row) + # [END bigtable_async_reads_prefix] await read_prefix(table.client.project, table.instance_id, table.table_id) @@ -240,5 +248,30 @@ async def read_with_filter(project_id, instance_id, table_id): async for row in await table.read_rows_stream(query): print(row) + # [END bigtable_async_reads_filter] await read_with_filter(table.client.project, table.instance_id, table.table_id) + + +async def execute_query(table): + # [START bigtable_async_execute_query] + from google.cloud.bigtable.data import BigtableDataClientAsync + + async def execute_query(project_id, instance_id, table_id): + async with BigtableDataClientAsync(project=project_id) as client: + query = ( + "SELECT _key, stats_summary['os_build'], " + "stats_summary['connected_cell'], " + "stats_summary['connected_wifi'] " + f"from `{table_id}` WHERE _key=@row_key" + ) + result = await client.execute_query( + query, + instance_id, + parameters={"row_key": b"phone#4c410523#20190501"}, + ) + results = [r async for r in result] + print(results) + + # [END bigtable_async_execute_query] + await execute_query(table.client.project, table.instance_id, table.table_id) diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py index d9968e6dc6b7..2e0fb9b8153c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py @@ -101,3 +101,8 @@ async def test_read_with_prefix(table): @pytest.mark.asyncio async def test_read_with_filter(table): await data_snippets.read_with_filter(table) + + +@pytest.mark.asyncio +async def test_execute_query(table): + await data_snippets.execute_query(table) diff --git a/packages/google-cloud-bigtable/testing/constraints-3.8.txt b/packages/google-cloud-bigtable/testing/constraints-3.8.txt index fa7c56db10ee..5ed0c2fb9a10 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.8.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.8.txt @@ -12,3 +12,4 @@ grpc-google-iam-v1==0.12.4 proto-plus==1.22.3 libcst==0.2.5 protobuf==3.20.2 +pytest-asyncio==0.21.2 diff --git a/packages/google-cloud-bigtable/tests/_testing.py b/packages/google-cloud-bigtable/tests/_testing.py new file mode 100644 index 000000000000..81cce7b78454 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/_testing.py @@ -0,0 +1,36 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue + + +TYPE_INT = { + "int64_type": { + "encoding": {"big_endian_bytes": {"bytes_type": {"encoding": {"raw": {}}}}} + } +} + + +def proto_rows_bytes(*args): + return ProtoRows.serialize(ProtoRows(values=[PBValue(**arg) for arg in args])) + + +def split_bytes_into_chunks(bytes_to_split, num_chunks): + from google.cloud.bigtable.helpers import batched + + assert num_chunks <= len(bytes_to_split) + bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 + result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) + assert len(result) == num_chunks + return result diff --git a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py new file mode 100644 index 000000000000..a680d2de098d --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py @@ -0,0 +1,288 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +import os +from unittest import mock +from .test_execute_query_utils import ( + ChannelMockAsync, + response_with_metadata, + response_with_result, +) +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data import BigtableDataClientAsync +import google.cloud.bigtable.data._async.client + +TABLE_NAME = "TABLE_NAME" +INSTANCE_NAME = "INSTANCE_NAME" + + +class TestAsyncExecuteQuery: + @pytest.fixture() + def async_channel_mock(self): + with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): + yield ChannelMockAsync() + + @pytest.fixture() + def async_client(self, async_channel_mock): + with mock.patch.dict( + os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"} + ), mock.patch.object( + google.cloud.bigtable.data._async.client, + "PooledChannel", + return_value=async_channel_mock, + ): + yield BigtableDataClientAsync() + + @pytest.mark.asyncio + async def test_execute_query(self, async_client, async_channel_mock): + values = [ + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(async_channel_mock.execute_query_calls) == 1 + + @pytest.mark.asyncio + async def test_execute_query_with_params(self, async_client, async_channel_mock): + values = [ + response_with_metadata(), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME} WHERE b=@b", + INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r async for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert len(async_channel_mock.execute_query_calls) == 1 + + @pytest.mark.asyncio + async def test_execute_query_error_before_metadata( + self, async_client, async_channel_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert len(async_channel_mock.execute_query_calls) == 2 + + @pytest.mark.asyncio + async def test_execute_query_error_after_metadata( + self, async_client, async_channel_mock + ): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + response_with_metadata(), + DeadlineExceeded(""), + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert len(async_channel_mock.execute_query_calls) == 2 + assert async_channel_mock.resume_tokens == [] + + @pytest.mark.asyncio + async def test_execute_query_with_retries(self, async_client, async_channel_mock): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + DeadlineExceeded(""), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + DeadlineExceeded(""), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(async_channel_mock.execute_query_calls) == 3 + assert async_channel_mock.resume_tokens == [b"r1", b"r2"] + + @pytest.mark.parametrize( + "exception", + [ + (core_exceptions.DeadlineExceeded("")), + (core_exceptions.Aborted("")), + (core_exceptions.ServiceUnavailable("")), + ], + ) + @pytest.mark.asyncio + async def test_execute_query_retryable_error( + self, async_client, async_channel_mock, exception + ): + values = [ + response_with_metadata(), + response_with_result("test", resume_token=b"t1"), + exception, + response_with_result(8, resume_token=b"t2"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 1 + assert len(async_channel_mock.execute_query_calls) == 2 + assert async_channel_mock.resume_tokens == [b"t1"] + + @pytest.mark.asyncio + async def test_execute_query_retry_partial_row( + self, async_client, async_channel_mock + ): + values = [ + response_with_metadata(), + response_with_result("test", resume_token=b"t1"), + core_exceptions.DeadlineExceeded(""), + response_with_result(8, resume_token=b"t2"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert len(async_channel_mock.execute_query_calls) == 2 + assert async_channel_mock.resume_tokens == [b"t1"] + + @pytest.mark.parametrize( + "ExceptionType", + [ + (core_exceptions.InvalidArgument), + (core_exceptions.FailedPrecondition), + (core_exceptions.PermissionDenied), + (core_exceptions.MethodNotImplemented), + (core_exceptions.Cancelled), + (core_exceptions.AlreadyExists), + (core_exceptions.OutOfRange), + (core_exceptions.DataLoss), + (core_exceptions.Unauthenticated), + (core_exceptions.NotFound), + (core_exceptions.ResourceExhausted), + (core_exceptions.Unknown), + (core_exceptions.InternalServerError), + ], + ) + @pytest.mark.asyncio + async def test_execute_query_non_retryable( + self, async_client, async_channel_mock, ExceptionType + ): + values = [ + response_with_metadata(), + response_with_result("test"), + response_with_result(8, resume_token=b"r1"), + ExceptionType(""), + response_with_result("test2"), + response_with_result(9, resume_token=b"r2"), + response_with_result("test3"), + response_with_result(None, resume_token=b"r3"), + ] + async_channel_mock.set_values(values) + + result = await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + r = await result.__anext__() + assert r["a"] == "test" + assert r["b"] == 8 + + with pytest.raises(ExceptionType): + r = await result.__anext__() + + assert len(async_channel_mock.execute_query_calls) == 1 + assert async_channel_mock.resume_tokens == [] + + @pytest.mark.asyncio + async def test_execute_query_metadata_received_multiple_times_detected( + self, async_client, async_channel_mock + ): + values = [ + response_with_metadata(), + response_with_metadata(), + ] + async_channel_mock.set_values(values) + + with pytest.raises(Exception, match="Invalid ExecuteQuery response received"): + [ + r + async for r in await async_client.execute_query( + f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME + ) + ] diff --git a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py new file mode 100644 index 000000000000..9e27b95f294f --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py @@ -0,0 +1,272 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import mock + +import google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio as pga +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue +import grpc.aio + + +try: + # async mock for python3.7-10 + from asyncio import coroutine + + def async_mock(return_value=None): + coro = mock.Mock(name="CoroutineResult") + corofunc = mock.Mock(name="CoroutineFunction", side_effect=coroutine(coro)) + corofunc.coro = coro + corofunc.coro.return_value = return_value + return corofunc + +except ImportError: + # async mock for python3.11 or later + from unittest.mock import AsyncMock + + def async_mock(return_value=None): + return AsyncMock(return_value=return_value) + + +# ExecuteQueryResponse( +# metadata={ +# "proto_schema": { +# "columns": [ +# {"name": "test1", "type_": TYPE_INT}, +# {"name": "test2", "type_": TYPE_INT}, +# ] +# } +# } +# ), +# ExecuteQueryResponse( +# results={"proto_rows_batch": {"batch_data": messages[0]}} +# ), + + +def response_with_metadata(): + schema = {"a": "string_type", "b": "int64_type"} + return ExecuteQueryResponse( + { + "metadata": { + "proto_schema": { + "columns": [ + {"name": name, "type_": {_type: {}}} + for name, _type in schema.items() + ] + } + } + } + ) + + +def response_with_result(*args, resume_token=None): + if resume_token is None: + resume_token_dict = {} + else: + resume_token_dict = {"resume_token": resume_token} + + values = [] + for column_value in args: + if column_value is None: + pb_value = PBValue({}) + else: + pb_value = PBValue( + { + "int_value" + if isinstance(column_value, int) + else "string_value": column_value + } + ) + values.append(pb_value) + rows = ProtoRows(values=values) + + return ExecuteQueryResponse( + { + "results": { + "proto_rows_batch": { + "batch_data": ProtoRows.serialize(rows), + }, + **resume_token_dict, + } + } + ) + + +class ExecuteQueryStreamMock: + def __init__(self, parent): + self.parent = parent + self.iter = iter(self.parent.values) + + def __call__(self, *args, **kwargs): + request = args[0] + + self.parent.execute_query_calls.append(request) + if request.resume_token: + self.parent.resume_tokens.append(request.resume_token) + + def stream(): + for value in self.iter: + if isinstance(value, Exception): + raise value + else: + yield value + + return stream() + + +class ChannelMock: + def __init__(self): + self.execute_query_calls = [] + self.values = [] + self.resume_tokens = [] + + def set_values(self, values): + self.values = values + + def unary_unary(self, *args, **kwargs): + return mock.MagicMock() + + def unary_stream(self, *args, **kwargs): + if args[0] == "/google.bigtable.v2.Bigtable/ExecuteQuery": + return ExecuteQueryStreamMock(self) + return mock.MagicMock() + + +class ChannelMockAsync(pga.PooledChannel, mock.MagicMock): + def __init__(self, *args, **kwargs): + mock.MagicMock.__init__(self, *args, **kwargs) + self.execute_query_calls = [] + self.values = [] + self.resume_tokens = [] + self._iter = [] + + def get_async_get(self, *args, **kwargs): + return self.async_gen + + def set_values(self, values): + self.values = values + self._iter = iter(self.values) + + def unary_unary(self, *args, **kwargs): + return async_mock() + + def unary_stream(self, *args, **kwargs): + if args[0] == "/google.bigtable.v2.Bigtable/ExecuteQuery": + + async def async_gen(*args, **kwargs): + for value in self._iter: + yield value + + iter = async_gen() + + class UnaryStreamCallMock(grpc.aio.UnaryStreamCall): + def __aiter__(self): + async def _impl(*args, **kwargs): + try: + while True: + yield await self.read() + except StopAsyncIteration: + pass + + return _impl() + + async def read(self): + value = await iter.__anext__() + if isinstance(value, Exception): + raise value + return value + + def add_done_callback(*args, **kwargs): + pass + + def cancel(*args, **kwargs): + pass + + def cancelled(*args, **kwargs): + pass + + def code(*args, **kwargs): + pass + + def details(*args, **kwargs): + pass + + def done(*args, **kwargs): + pass + + def initial_metadata(*args, **kwargs): + pass + + def time_remaining(*args, **kwargs): + pass + + def trailing_metadata(*args, **kwargs): + pass + + async def wait_for_connection(*args, **kwargs): + return async_mock() + + class UnaryStreamMultiCallableMock(grpc.aio.UnaryStreamMultiCallable): + def __init__(self, parent): + self.parent = parent + + def __call__( + self, + request, + *, + timeout=None, + metadata=None, + credentials=None, + wait_for_ready=None, + compression=None + ): + self.parent.execute_query_calls.append(request) + if request.resume_token: + self.parent.resume_tokens.append(request.resume_token) + return UnaryStreamCallMock() + + def add_done_callback(*args, **kwargs): + pass + + def cancel(*args, **kwargs): + pass + + def cancelled(*args, **kwargs): + pass + + def code(*args, **kwargs): + pass + + def details(*args, **kwargs): + pass + + def done(*args, **kwargs): + pass + + def initial_metadata(*args, **kwargs): + pass + + def time_remaining(*args, **kwargs): + pass + + def trailing_metadata(*args, **kwargs): + pass + + def wait_for_connection(*args, **kwargs): + pass + + # unary_stream should return https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.UnaryStreamMultiCallable + # PTAL https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.Channel.unary_stream + return UnaryStreamMultiCallableMock(self) + return async_mock() diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py new file mode 100644 index 000000000000..e0d8d2a22166 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/_testing.py @@ -0,0 +1,16 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/__init__.py b/packages/google-cloud-bigtable/tests/unit/data/_async/__init__.py new file mode 100644 index 000000000000..6d5e14bcf4a0 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/packages/google-cloud-bigtable/tests/unit/data/_testing.py b/packages/google-cloud-bigtable/tests/unit/data/_testing.py new file mode 100644 index 000000000000..b5dd3f444f66 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_testing.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from unittest.mock import Mock +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/__init__.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/__init__.py new file mode 100644 index 000000000000..6d5e14bcf4a0 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/__init__.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/__init__.py new file mode 100644 index 000000000000..6d5e14bcf4a0 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py new file mode 100644 index 000000000000..5a7acbdd94a8 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py @@ -0,0 +1,36 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes + + +try: + # async mock for python3.7-10 + from unittest.mock import Mock + from asyncio import coroutine + + def async_mock(return_value=None): + coro = Mock(name="CoroutineResult") + corofunc = Mock(name="CoroutineFunction", side_effect=coroutine(coro)) + corofunc.coro = coro + corofunc.coro.return_value = return_value + return corofunc + +except ImportError: + # async mock for python3.11 or later + from unittest.mock import AsyncMock + + def async_mock(return_value=None): + return AsyncMock(return_value=return_value) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py new file mode 100644 index 000000000000..5c577ed74702 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +from unittest.mock import Mock +from mock import patch +import pytest +from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, +) +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from ._testing import TYPE_INT, proto_rows_bytes, split_bytes_into_chunks, async_mock + + +class MockIteratorAsync: + def __init__(self, values, delay=None): + self._values = values + self.idx = 0 + self._delay = delay + + def __aiter__(self): + return self + + async def __anext__(self): + if self.idx >= len(self._values): + raise StopAsyncIteration + if self._delay is not None: + await asyncio.sleep(self._delay) + value = self._values[self.idx] + self.idx += 1 + return value + + +@pytest.fixture +def proto_byte_stream(): + proto_rows = [ + proto_rows_bytes({"int_value": 1}, {"int_value": 2}), + proto_rows_bytes({"int_value": 3}, {"int_value": 4}), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + ] + + messages = [ + *split_bytes_into_chunks(proto_rows[0], num_chunks=2), + *split_bytes_into_chunks(proto_rows[1], num_chunks=3), + proto_rows[2], + ] + + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": { + "columns": [ + {"name": "test1", "type_": TYPE_INT}, + {"name": "test2", "type_": TYPE_INT}, + ] + } + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[0]}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[1]}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[2]}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[3]}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[4]}, + "resume_token": b"token2", + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[5]}, + "resume_token": b"token3", + } + ), + ] + return stream + + +@pytest.mark.asyncio +async def test_iterator(proto_byte_stream): + client_mock = Mock() + + client_mock._register_instance = async_mock() + client_mock._remove_instance_registration = async_mock() + mock_async_iterator = MockIteratorAsync(proto_byte_stream) + iterator = None + + with patch( + "google.api_core.retry.retry_target_stream_async", + return_value=mock_async_iterator, + ): + iterator = ExecuteQueryIteratorAsync( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + result = [] + async for value in iterator: + result.append(tuple(value)) + assert result == [(1, 2), (3, 4), (5, 6)] + + assert iterator.is_closed + client_mock._register_instance.assert_called_once() + client_mock._remove_instance_registration.assert_called_once() + + assert mock_async_iterator.idx == len(proto_byte_stream) + + +@pytest.mark.asyncio +async def test_iterator_awaits_metadata(proto_byte_stream): + client_mock = Mock() + + client_mock._register_instance = async_mock() + client_mock._remove_instance_registration = async_mock() + mock_async_iterator = MockIteratorAsync(proto_byte_stream) + iterator = None + with patch( + "google.api_core.retry.retry_target_stream_async", + return_value=mock_async_iterator, + ): + iterator = ExecuteQueryIteratorAsync( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + + await iterator.metadata() + + assert mock_async_iterator.idx == 1 diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py new file mode 100644 index 000000000000..9d24eee342cb --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py new file mode 100644 index 000000000000..e283e1ca215c --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py @@ -0,0 +1,149 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + +from ._testing import TYPE_INT + + +def pass_values_to_byte_cursor(byte_cursor, iterable): + for value in iterable: + result = byte_cursor.consume(value) + if result is not None: + yield result + + +class TestByteCursor: + def test__proto_rows_batch__complete_data(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"456"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"789"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"abc"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"def"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"ghi"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"j"}, + "resume_token": b"token2", + } + ), + ] + assert byte_cursor.metadata is None + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value == b"1234567890" + assert byte_cursor._resume_token == b"token1" + assert byte_cursor.metadata.columns[0].column_name == "test1" + + value = next(byte_cursor_iter) + assert value == b"abcdefghij" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__empty_proto_rows_batch(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {}, "resume_token": b"token1"} + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token2", + } + ), + ] + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value == b"1230" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__no_proto_rows_batch(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse(results={"resume_token": b"token1"}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token2", + } + ), + ] + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + value = next(byte_cursor_iter) + assert value == b"1230" + assert byte_cursor._resume_token == b"token2" + + def test__proto_rows_batch__no_resume_token_at_the_end_of_stream(self): + byte_cursor = _ByteCursor() + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"0"}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"abc"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"def"}}), + ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"ghi"}}), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": b"j"}, + } + ), + ] + assert byte_cursor.metadata is None + assert byte_cursor.consume(stream[0]) is None + value = byte_cursor.consume(stream[1]) + assert value == b"0" + assert byte_cursor._resume_token == b"token1" + assert byte_cursor.metadata.columns[0].column_name == "test1" + + assert byte_cursor.consume(stream[2]) is None + assert byte_cursor.consume(stream[3]) is None + assert byte_cursor.consume(stream[3]) is None + assert byte_cursor.consume(stream[4]) is None + assert byte_cursor.consume(stream[5]) is None diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py new file mode 100644 index 000000000000..914a0920ab41 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py @@ -0,0 +1,134 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, +) +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query.values import Struct +import datetime + +from google.type import date_pb2 +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + + +timestamp = int( + datetime.datetime(2024, 5, 12, 17, 44, 12, tzinfo=datetime.timezone.utc).timestamp() +) +dt_micros_non_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, 123, nanosecond=0, tzinfo=datetime.timezone.utc +).timestamp_pb() +dt_nanos_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=0, tzinfo=datetime.timezone.utc +).timestamp_pb() +dt_nanos_non_zero = DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=12, tzinfo=datetime.timezone.utc +).timestamp_pb() +pb_date = date_pb2.Date(year=2024, month=5, day=15) + + +@pytest.mark.parametrize( + "input_value,value_field,type_field,expected_value", + [ + (1, "int_value", "int64_type", 1), + ("2", "string_value", "string_type", "2"), + (b"3", "bytes_value", "bytes_type", b"3"), + (True, "bool_value", "bool_type", True), + ( + datetime.datetime.fromtimestamp(timestamp), + "timestamp_value", + "timestamp_type", + dt_nanos_zero, + ), + ( + datetime.datetime( + 2024, 5, 12, 17, 44, 12, 123, tzinfo=datetime.timezone.utc + ), + "timestamp_value", + "timestamp_type", + dt_micros_non_zero, + ), + (datetime.date(2024, 5, 15), "date_value", "date_type", pb_date), + ( + DatetimeWithNanoseconds( + 2024, 5, 12, 17, 44, 12, nanosecond=12, tzinfo=datetime.timezone.utc + ), + "timestamp_value", + "timestamp_type", + dt_nanos_non_zero, + ), + ], +) +def test_instance_execute_query_parameters_simple_types_parsing( + input_value, value_field, type_field, expected_value +): + result = _format_execute_query_params( + { + "test": input_value, + }, + None, + ) + assert result["test"][value_field] == expected_value + assert type_field in result["test"]["type_"] + + +def test_instance_execute_query_parameters_not_supported_types(): + with pytest.raises(ValueError): + _format_execute_query_params({"test1": 1.1}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": {"a": 1}}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": [1]}, None) + + with pytest.raises(ValueError): + _format_execute_query_params({"test1": Struct([("field1", 1)])}, None) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": {"a": 1}}, + { + "test1": SqlType.Map(SqlType.String(), SqlType.Int64()), + }, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": [1]}, + { + "test1": SqlType.Array(SqlType.Int64()), + }, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": Struct([("field1", 1)])}, + {"test1": SqlType.Struct([("field1", SqlType.Int64())])}, + ) + + +def test_instance_execute_query_parameters_not_match(): + with pytest.raises(ValueError, match="test2"): + _format_execute_query_params( + { + "test1": 1, + "test2": 1, + }, + { + "test1": SqlType.Int64(), + "test2": SqlType.String(), + }, + ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py new file mode 100644 index 000000000000..ff7211654545 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py @@ -0,0 +1,715 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from google.cloud.bigtable.data.execute_query.values import Struct +from google.cloud.bigtable_v2 import Type as PBType, Value as PBValue +from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import ( + _parse_pb_value_to_python_value, +) +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_type_to_metadata_type, + SqlType, +) + +from google.type import date_pb2 +from google.api_core.datetime_helpers import DatetimeWithNanoseconds + +import datetime + +from ._testing import TYPE_INT + +TYPE_BYTES = {"bytes_type": {}} +TYPE_TIMESTAMP = {"timestamp_type": {}} + + +class TestQueryResultParsingUtils: + @pytest.mark.parametrize( + "type_dict,value_dict,expected_metadata_type,expected_value", + [ + (TYPE_INT, {"int_value": 1}, SqlType.Int64, 1), + ( + {"string_type": {}}, + {"string_value": "test"}, + SqlType.String, + "test", + ), + ({"bool_type": {}}, {"bool_value": False}, SqlType.Bool, False), + ( + {"bytes_type": {}}, + {"bytes_value": b"test"}, + SqlType.Bytes, + b"test", + ), + ( + {"float64_type": {}}, + {"float_value": 17.21}, + SqlType.Float64, + 17.21, + ), + ( + {"timestamp_type": {}}, + {"timestamp_value": {"seconds": 1715864647, "nanos": 12}}, + SqlType.Timestamp, + DatetimeWithNanoseconds( + 2024, 5, 16, 13, 4, 7, nanosecond=12, tzinfo=datetime.timezone.utc + ), + ), + ( + {"date_type": {}}, + {"date_value": {"year": 1800, "month": 12, "day": 0}}, + SqlType.Date, + date_pb2.Date(year=1800, month=12, day=0), + ), + ], + ) + def test_basic_types( + self, type_dict, value_dict, expected_metadata_type, expected_value + ): + _type = PBType(type_dict) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is expected_metadata_type + value = PBValue(value_dict) + assert ( + _parse_pb_value_to_python_value(value._pb, metadata_type) == expected_value + ) + + # Larger test cases were extracted for readability + def test__array(self): + _type = PBType({"array_type": {"element_type": TYPE_INT}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Int64 + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 1}, + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + ] + } + } + ) + assert _parse_pb_value_to_python_value(value._pb, metadata_type) == [1, 2, 3, 4] + + def test__struct(self): + _type = PBType( + { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": TYPE_INT, + }, + { + "field_name": None, + "type_": {"string_type": {}}, + }, + { + "field_name": "field3", + "type_": {"array_type": {"element_type": TYPE_INT}}, + }, + { + "field_name": "field3", + "type_": {"string_type": {}}, + }, + ] + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test2"}, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + {"int_value": 5}, + ] + } + }, + {"string_value": "test4"}, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Struct + assert type(metadata_type["field1"]) is SqlType.Int64 + assert type(metadata_type[1]) is SqlType.String + assert type(metadata_type[2]) is SqlType.Array + assert type(metadata_type[2].element_type) is SqlType.Int64 + assert type(metadata_type[3]) is SqlType.String + + # duplicate fields not accesible by name + with pytest.raises(KeyError, match="Ambigious field name"): + metadata_type["field3"] + + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert isinstance(result, Struct) + assert result["field1"] == result[0] == 1 + assert result[1] == "test2" + + # duplicate fields not accesible by name + with pytest.raises(KeyError, match="Ambigious field name"): + result["field3"] + + # duplicate fields accessible by index + assert result[2] == [2, 3, 4, 5] + assert result[3] == "test4" + + def test__array_of_structs(self): + _type = PBType( + { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": TYPE_INT, + }, + { + "field_name": None, + "type_": {"string_type": {}}, + }, + { + "field_name": "field3", + "type_": {"bool_type": {}}, + }, + ] + } + } + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + {"bool_value": True}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"string_value": "test2"}, + {"bool_value": False}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 3}, + {"string_value": "test3"}, + {"bool_value": True}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 4}, + {"string_value": "test4"}, + {"bool_value": False}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Struct + assert type(metadata_type.element_type["field1"]) is SqlType.Int64 + assert type(metadata_type.element_type[1]) is SqlType.String + assert type(metadata_type.element_type["field3"]) is SqlType.Bool + + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert isinstance(result, list) + assert len(result) == 4 + + assert isinstance(result[0], Struct) + assert result[0]["field1"] == 1 + assert result[0][1] == "test1" + assert result[0]["field3"] + + assert isinstance(result[1], Struct) + assert result[1]["field1"] == 2 + assert result[1][1] == "test2" + assert not result[1]["field3"] + + assert isinstance(result[2], Struct) + assert result[2]["field1"] == 3 + assert result[2][1] == "test3" + assert result[2]["field3"] + + assert isinstance(result[3], Struct) + assert result[3]["field1"] == 4 + assert result[3][1] == "test4" + assert not result[3]["field3"] + + def test__map(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_INT, + "value_type": {"string_type": {}}, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"string_value": "test2"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 3}, + {"string_value": "test3"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 4}, + {"string_value": "test4"}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.String + + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert isinstance(result, dict) + assert len(result) == 4 + + assert result == { + 1: "test1", + 2: "test2", + 3: "test3", + 4: "test4", + } + + def test__map_repeated_values(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_INT, + "value_type": {"string_type": {}}, + } + }, + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test1"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test2"}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 1}, + {"string_value": "test3"}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + assert len(result) == 1 + + assert result == { + 1: "test3", + } + + def test__map_of_maps_of_structs(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_INT, + "value_type": { + "map_type": { + "key_type": {"string_type": {}}, + "value_type": { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": TYPE_INT, + }, + { + "field_name": "field2", + "type_": {"string_type": {}}, + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (int, map) tuples + { + "array_value": { + "values": [ # (int, map) tuple + {"int_value": 1}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "1_1"}, + { + "array_value": { + "values": [ + { + "int_value": 1 + }, + { + "string_value": "test1" + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "1_2"}, + { + "array_value": { + "values": [ + { + "int_value": 2 + }, + { + "string_value": "test2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (int, map) tuple + {"int_value": 2}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "2_1"}, + { + "array_value": { + "values": [ + { + "int_value": 3 + }, + { + "string_value": "test3" + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuple + {"string_value": "2_2"}, + { + "array_value": { + "values": [ + { + "int_value": 4 + }, + { + "string_value": "test4" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Map + assert type(metadata_type.value_type.key_type) is SqlType.String + assert type(metadata_type.value_type.value_type) is SqlType.Struct + assert type(metadata_type.value_type.value_type["field1"]) is SqlType.Int64 + assert type(metadata_type.value_type.value_type["field2"]) is SqlType.String + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + + assert result[1]["1_1"]["field1"] == 1 + assert result[1]["1_1"]["field2"] == "test1" + + assert result[1]["1_2"]["field1"] == 2 + assert result[1]["1_2"]["field2"] == "test2" + + assert result[2]["2_1"]["field1"] == 3 + assert result[2]["2_1"]["field2"] == "test3" + + assert result[2]["2_2"]["field1"] == 4 + assert result[2]["2_2"]["field2"] == "test4" + + def test__map_of_lists_of_structs(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": TYPE_BYTES, + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 + } + }, + { + "bytes_value": b"key1-value1" + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 2222222222 + } + }, + { + "bytes_value": b"key1-value2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key2"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 3333333333 + } + }, + { + "bytes_value": b"key2-value1" + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 4444444444 + } + }, + { + "bytes_value": b"key2-value2" + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Bytes + assert type(metadata_type.value_type) is SqlType.Array + assert type(metadata_type.value_type.element_type) is SqlType.Struct + assert ( + type(metadata_type.value_type.element_type["timestamp"]) + is SqlType.Timestamp + ) + assert type(metadata_type.value_type.element_type["value"]) is SqlType.Bytes + result = _parse_pb_value_to_python_value(value._pb, metadata_type) + + timestamp1 = DatetimeWithNanoseconds( + 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc + ) + timestamp2 = DatetimeWithNanoseconds( + 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc + ) + timestamp3 = DatetimeWithNanoseconds( + 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc + ) + timestamp4 = DatetimeWithNanoseconds( + 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc + ) + + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == b"key1-value1" + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == b"key1-value2" + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == b"key2-value1" + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == b"key2-value2" + + def test__invalid_type_throws_exception(self): + _type = PBType({"string_type": {}}) + value = PBValue({"int_value": 1}) + metadata_type = _pb_type_to_metadata_type(_type) + + with pytest.raises( + ValueError, + match="string_value field for String type not found in a Value.", + ): + _parse_pb_value_to_python_value(value._pb, metadata_type) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py new file mode 100644 index 000000000000..2bb1e4da01e2 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py @@ -0,0 +1,310 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from unittest import mock +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable_v2.types.data import Value as PBValue +from google.cloud.bigtable.data.execute_query._reader import _QueryResultRowReader + +from google.cloud.bigtable.data.execute_query.metadata import ProtoMetadata, SqlType + +import google.cloud.bigtable.data.execute_query._reader +from ._testing import TYPE_INT, proto_rows_bytes + + +class TestQueryResultRowReader: + def test__single_values_received(self): + byte_cursor = mock.Mock( + metadata=ProtoMetadata( + [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] + ) + ) + values = [ + proto_rows_bytes({"int_value": 1}), + proto_rows_bytes({"int_value": 2}), + proto_rows_bytes({"int_value": 3}), + ] + + reader = _QueryResultRowReader(byte_cursor) + + assert reader.consume(values[0]) is None + result = reader.consume(values[1]) + assert len(result) == 1 + assert len(result[0]) == 2 + assert reader.consume(values[2]) is None + + def test__multiple_rows_received(self): + values = [ + proto_rows_bytes( + {"int_value": 1}, + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + ), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + proto_rows_bytes({"int_value": 7}, {"int_value": 8}), + ] + + byte_cursor = mock.Mock( + metadata=ProtoMetadata( + [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] + ) + ) + + reader = _QueryResultRowReader(byte_cursor) + + result = reader.consume(values[0]) + assert len(result) == 2 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 1 + assert result[0][1] == result[0]["test2"] == 2 + + assert len(result[1]) == 2 + assert result[1][0] == result[1]["test1"] == 3 + assert result[1][1] == result[1]["test2"] == 4 + + result = reader.consume(values[1]) + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 5 + assert result[0][1] == result[0]["test2"] == 6 + + result = reader.consume(values[2]) + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == result[0]["test1"] == 7 + assert result[0][1] == result[0]["test2"] == 8 + + def test__received_values_are_passed_to_parser_in_batches(self): + byte_cursor = mock.Mock( + metadata=ProtoMetadata( + [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] + ) + ) + + assert SqlType.Struct([("a", SqlType.Int64())]) == SqlType.Struct( + [("a", SqlType.Int64())] + ) + assert SqlType.Struct([("a", SqlType.String())]) != SqlType.Struct( + [("a", SqlType.Int64())] + ) + assert SqlType.Struct([("a", SqlType.Int64())]) != SqlType.Struct( + [("b", SqlType.Int64())] + ) + + assert SqlType.Array(SqlType.Int64()) == SqlType.Array(SqlType.Int64()) + assert SqlType.Array(SqlType.Int64()) != SqlType.Array(SqlType.String()) + + assert SqlType.Map(SqlType.Int64(), SqlType.String()) == SqlType.Map( + SqlType.Int64(), SqlType.String() + ) + assert SqlType.Map(SqlType.Int64(), SqlType.String()) != SqlType.Map( + SqlType.String(), SqlType.String() + ) + + values = [ + {"int_value": 1}, + {"int_value": 2}, + ] + + reader = _QueryResultRowReader(byte_cursor) + with mock.patch.object( + google.cloud.bigtable.data.execute_query._reader, + "_parse_pb_value_to_python_value", + ) as parse_mock: + reader.consume(proto_rows_bytes(values[0])) + parse_mock.assert_not_called() + reader.consume(proto_rows_bytes(values[1])) + parse_mock.assert_has_calls( + [ + mock.call(PBValue(values[0]), SqlType.Int64()), + mock.call(PBValue(values[1]), SqlType.Int64()), + ] + ) + + def test__parser_errors_are_forwarded(self): + byte_cursor = mock.Mock(metadata=ProtoMetadata([("test1", SqlType.Int64())])) + + values = [ + {"string_value": "test"}, + ] + + reader = _QueryResultRowReader(byte_cursor) + with mock.patch.object( + google.cloud.bigtable.data.execute_query._reader, + "_parse_pb_value_to_python_value", + side_effect=ValueError("test"), + ) as parse_mock: + with pytest.raises(ValueError, match="test"): + reader.consume(proto_rows_bytes(values[0])) + + parse_mock.assert_has_calls( + [ + mock.call(PBValue(values[0]), SqlType.Int64()), + ] + ) + + def test__multiple_proto_rows_received_with_one_resume_token(self): + from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor + + def split_bytes_into_chunks(bytes_to_split, num_chunks): + from google.cloud.bigtable.helpers import batched + + assert num_chunks <= len(bytes_to_split) + bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 + result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) + assert len(result) == num_chunks + return result + + def pass_values_to_byte_cursor(byte_cursor, iterable): + for value in iterable: + result = byte_cursor.consume(value) + if result is not None: + yield result + + proto_rows = [ + proto_rows_bytes({"int_value": 1}, {"int_value": 2}), + proto_rows_bytes({"int_value": 3}, {"int_value": 4}), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + ] + + messages = [ + *split_bytes_into_chunks(proto_rows[0], num_chunks=2), + *split_bytes_into_chunks(proto_rows[1], num_chunks=3), + proto_rows[2], + ] + + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": { + "columns": [ + {"name": "test1", "type_": TYPE_INT}, + {"name": "test2", "type_": TYPE_INT}, + ] + } + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[0]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[1]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[2]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[3]}} + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[4]}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[5]}, + "resume_token": b"token2", + } + ), + ] + + byte_cursor = _ByteCursor() + + reader = _QueryResultRowReader(byte_cursor) + + byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + + returned_values = [] + + def intercept_return_values(func): + nonlocal intercept_return_values + + def wrapped(*args, **kwargs): + value = func(*args, **kwargs) + returned_values.append(value) + return value + + return wrapped + + with mock.patch.object( + reader, + "_parse_proto_rows", + wraps=intercept_return_values(reader._parse_proto_rows), + ): + result = reader.consume(next(byte_cursor_iter)) + + # Despite the fact that two ProtoRows were received, a single resume_token after the second ProtoRows object forces us to parse them together. + # We will interpret them as one larger ProtoRows object. + assert len(returned_values) == 1 + assert len(returned_values[0]) == 4 + assert returned_values[0][0].int_value == 1 + assert returned_values[0][1].int_value == 2 + assert returned_values[0][2].int_value == 3 + assert returned_values[0][3].int_value == 4 + + assert len(result) == 2 + assert len(result[0]) == 2 + assert result[0][0] == 1 + assert result[0]["test1"] == 1 + assert result[0][1] == 2 + assert result[0]["test2"] == 2 + assert len(result[1]) == 2 + assert result[1][0] == 3 + assert result[1]["test1"] == 3 + assert result[1][1] == 4 + assert result[1]["test2"] == 4 + assert byte_cursor._resume_token == b"token1" + + returned_values = [] + with mock.patch.object( + reader, + "_parse_proto_rows", + wraps=intercept_return_values(reader._parse_proto_rows), + ): + result = reader.consume(next(byte_cursor_iter)) + + assert len(result) == 1 + assert len(result[0]) == 2 + assert result[0][0] == 5 + assert result[0]["test1"] == 5 + assert result[0][1] == 6 + assert result[0]["test2"] == 6 + assert byte_cursor._resume_token == b"token2" + + +class TestProtoMetadata: + def test__duplicate_column_names(self): + metadata = ProtoMetadata( + [ + ("test1", SqlType.Int64()), + ("test2", SqlType.Bytes()), + ("test2", SqlType.String()), + ] + ) + assert metadata[0].column_name == "test1" + assert metadata["test1"].column_type == SqlType.Int64() + + # duplicate columns not accesible by name + with pytest.raises(KeyError, match="Ambigious column name"): + metadata["test2"] + + # duplicate columns accessible by index + assert metadata[1].column_type == SqlType.Bytes() + assert metadata[1].column_name == "test2" + assert metadata[2].column_type == SqlType.String() + assert metadata[2].column_name == "test2" diff --git a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py index 5a9c500ed28b..12ab3181eda4 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py +++ b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py @@ -23,16 +23,31 @@ class TestMakeMetadata: @pytest.mark.parametrize( - "table,profile,expected", + "table,profile,instance,expected", [ - ("table", "profile", "table_name=table&app_profile_id=profile"), - ("table", None, "table_name=table"), + ("table", "profile", None, "table_name=table&app_profile_id=profile"), + ("table", None, None, "table_name=table"), + (None, None, "instance", "name=instance"), + (None, "profile", None, "app_profile_id=profile"), + (None, "profile", "instance", "name=instance&app_profile_id=profile"), ], ) - def test__make_metadata(self, table, profile, expected): - metadata = _helpers._make_metadata(table, profile) + def test__make_metadata(self, table, profile, instance, expected): + metadata = _helpers._make_metadata(table, profile, instance) assert metadata == [("x-goog-request-params", expected)] + @pytest.mark.parametrize( + "table,profile,instance", + [ + ("table", None, "instance"), + ("table", "profile", "instance"), + (None, None, None), + ], + ) + def test__make_metadata_invalid_params(self, table, profile, instance): + with pytest.raises(ValueError): + _helpers._make_metadata(table, profile, instance) + class TestAttemptTimeoutGenerator: @pytest.mark.parametrize( diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_helpers.py b/packages/google-cloud-bigtable/tests/unit/data/test_helpers.py new file mode 100644 index 000000000000..5d1ad70f8e97 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_helpers.py @@ -0,0 +1,45 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import pytest +from google.cloud.bigtable.helpers import batched + + +class TestBatched: + @pytest.mark.parametrize( + "input_list,batch_size,expected", + [ + ([1, 2, 3, 4, 5], 3, [[1, 2, 3], [4, 5]]), + ([1, 2, 3, 4, 5, 6], 3, [[1, 2, 3], [4, 5, 6]]), + ([1, 2, 3, 4, 5], 2, [[1, 2], [3, 4], [5]]), + ([1, 2, 3, 4, 5], 1, [[1], [2], [3], [4], [5]]), + ([1, 2, 3, 4, 5], 5, [[1, 2, 3, 4, 5]]), + ([], 1, []), + ], + ) + def test_batched(self, input_list, batch_size, expected): + result = list(batched(input_list, batch_size)) + assert list(map(list, result)) == expected + + @pytest.mark.parametrize( + "input_list,batch_size", + [ + ([1], 0), + ([1], -1), + ], + ) + def test_batched_errs(self, input_list, batch_size): + with pytest.raises(ValueError): + list(batched(input_list, batch_size)) diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py b/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py index 302d33ac1540..855c0c10e95d 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py @@ -17,6 +17,9 @@ import mock +# flake8: noqa +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes + class _FakeStub(object): """Acts as a gPRC stub.""" diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py index 797e4bd9c9d2..de6844a165b6 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py @@ -19,6 +19,7 @@ from ._testing import _make_credentials from google.cloud.bigtable.cluster import Cluster + PROJECT = "project" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT + "/instances/" + INSTANCE_ID @@ -943,3 +944,28 @@ def _next_page(self): assert isinstance(app_profile_2, AppProfile) assert app_profile_2.name == app_profile_name2 + + +@pytest.fixture() +def data_api(): + from google.cloud.bigtable_v2.services.bigtable import BigtableClient + + data_api_mock = mock.create_autospec(BigtableClient) + data_api_mock.instance_path.return_value = ( + f"projects/{PROJECT}/instances/{INSTANCE_ID}" + ) + return data_api_mock + + +@pytest.fixture() +def client(data_api): + result = _make_client( + project="project-id", credentials=_make_credentials(), admin=True + ) + result._table_data_client = data_api + return result + + +@pytest.fixture() +def instance(client): + return client.instance(instance_id=INSTANCE_ID) From 026e9695d1d61a4033c268b0c8bdf348927afd0c Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Thu, 8 Aug 2024 18:28:11 -0400 Subject: [PATCH 812/892] docs: add clarification around SQL timestamps (#1012) --- .../google/cloud/bigtable/data/execute_query/metadata.py | 6 ++++++ .../google/cloud/bigtable/data/execute_query/values.py | 3 +++ 2 files changed, 9 insertions(+) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py index 98b94a644bde..4c08cbad310b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py @@ -214,6 +214,12 @@ class Bool(Type): type_field_name = "bool_type" class Timestamp(Type): + """ + Timestamp supports :class:`DatetimeWithNanoseconds` but Bigtable SQL does + not currently support nanoseconds precision. We support this for potential + compatibility in the future. Nanoseconds are currently ignored. + """ + type_field_name = "timestamp_type" expected_types = ( datetime.datetime, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py index 450f6f855148..394bef71ec01 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py @@ -100,6 +100,9 @@ def __repr__(self) -> str: bool, bytes, str, + # Note that Bigtable SQL does not currently support nanosecond precision, + # only microseconds. We use this for compatibility with potential future + # support DatetimeWithNanoseconds, date_pb2.Date, "Struct", From 5cf2241383706e3be14b630fdf639e2034b666d8 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 15:29:29 -0700 Subject: [PATCH 813/892] chore(python): fix docs build (#1008) Source-Link: https://github.com/googleapis/synthtool/commit/bef813d194de29ddf3576eda60148b6b3dcc93d9 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:94bb690db96e6242b2567a4860a94d48fa48696d092e51b0884a1a2c0a79a407 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 3 +- .../.kokoro/docker/docs/Dockerfile | 26 ++++++----- .../.kokoro/docker/docs/requirements.txt | 40 ++++++++-------- .../.kokoro/publish-docs.sh | 20 ++++---- .../.kokoro/requirements.txt | 46 +++++++++---------- 5 files changed, 71 insertions(+), 64 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 620159621881..6d064ddb9b06 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5651442a6336971a2fb2df40fb56b3337df67cafa14c0809cc89cb34ccee1b8e + digest: sha256:94bb690db96e6242b2567a4860a94d48fa48696d092e51b0884a1a2c0a79a407 +# created: 2024-07-31T14:52:44.926548819Z diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index a26ce61930f5..e5410e296bd8 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ubuntu:22.04 +from ubuntu:24.04 ENV DEBIAN_FRONTEND noninteractive @@ -40,7 +40,6 @@ RUN apt-get update \ libssl-dev \ libsqlite3-dev \ portaudio19-dev \ - python3-distutils \ redis-server \ software-properties-common \ ssh \ @@ -60,28 +59,31 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb -###################### Install python 3.9.13 -# Download python 3.9.13 -RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz +###################### Install python 3.10.14 for docs/docfx session + +# Download python 3.10.14 +RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz # Extract files -RUN tar -xvf Python-3.9.13.tgz +RUN tar -xvf Python-3.10.14.tgz -# Install python 3.9.13 -RUN ./Python-3.9.13/configure --enable-optimizations +# Install python 3.10.14 +RUN ./Python-3.10.14/configure --enable-optimizations RUN make altinstall +ENV PATH /usr/local/bin/python3.10:$PATH + ###################### Install pip RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3 /tmp/get-pip.py \ + && python3.10 /tmp/get-pip.py \ && rm /tmp/get-pip.py # Test pip -RUN python3 -m pip +RUN python3.10 -m pip # Install build requirements COPY requirements.txt /requirements.txt -RUN python3 -m pip install --require-hashes -r requirements.txt +RUN python3.10 -m pip install --require-hashes -r requirements.txt -CMD ["python3.8"] +CMD ["python3.10"] diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index 0e5d70f20f83..7129c7715594 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.2.3 \ - --hash=sha256:bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23 \ - --hash=sha256:c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c +argcomplete==3.4.0 \ + --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ + --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f # via nox colorlog==6.8.2 \ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ @@ -16,23 +16,27 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.13.1 \ - --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ - --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c +filelock==3.15.4 \ + --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ + --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 # via virtualenv -nox==2024.3.2 \ - --hash=sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be \ - --hash=sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553 +nox==2024.4.15 \ + --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ + --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f # via -r requirements.in -packaging==24.0 \ - --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ - --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 +packaging==24.1 \ + --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ + --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via nox -platformdirs==4.2.0 \ - --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \ - --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768 +platformdirs==4.2.2 \ + --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ + --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 # via virtualenv -virtualenv==20.25.1 \ - --hash=sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a \ - --hash=sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197 +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f + # via nox +virtualenv==20.26.3 \ + --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ + --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 38f083f05aa0..233205d580e9 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -21,18 +21,18 @@ export PYTHONUNBUFFERED=1 export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3 -m pip install --require-hashes -r .kokoro/requirements.txt -python3 -m nox --version +python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt +python3.10 -m nox --version # build docs nox -s docs # create metadata -python3 -m docuploader create-metadata \ +python3.10 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ + --version=$(python3.10 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ + --distribution-name=$(python3.10 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -40,18 +40,18 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" +python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" # docfx yaml files nox -s docfx # create metadata. -python3 -m docuploader create-metadata \ +python3.10 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ + --version=$(python3.10 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ + --distribution-name=$(python3.10 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -59,4 +59,4 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" +python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 35ece0e4d2e9..9622baf0ba38 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.3.3 \ --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 # via google-auth -certifi==2024.6.2 \ - --hash=sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516 \ - --hash=sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56 +certifi==2024.7.4 \ + --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ + --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 # via requests cffi==1.16.0 \ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ @@ -371,23 +371,23 @@ more-itertools==10.3.0 \ # via # jaraco-classes # jaraco-functools -nh3==0.2.17 \ - --hash=sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a \ - --hash=sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911 \ - --hash=sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb \ - --hash=sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a \ - --hash=sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc \ - --hash=sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028 \ - --hash=sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9 \ - --hash=sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3 \ - --hash=sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351 \ - --hash=sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10 \ - --hash=sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71 \ - --hash=sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f \ - --hash=sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b \ - --hash=sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a \ - --hash=sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062 \ - --hash=sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a +nh3==0.2.18 \ + --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \ + --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \ + --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \ + --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \ + --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \ + --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \ + --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \ + --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \ + --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \ + --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \ + --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \ + --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \ + --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \ + --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \ + --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ + --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe # via readme-renderer nox==2024.4.15 \ --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ @@ -460,9 +460,9 @@ python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via gcp-releasetool -readme-renderer==43.0 \ - --hash=sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311 \ - --hash=sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9 +readme-renderer==44.0 \ + --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \ + --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1 # via twine requests==2.32.3 \ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ From cbe581fb75a28c0e4684fa37fdea9383db378b7c Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 15:30:33 -0700 Subject: [PATCH 814/892] feat: add fields and the BackupType proto for Hot Backups (#1010) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add fields and the BackupType proto for Hot Backups docs: clarify comments and fix typos PiperOrigin-RevId: 658791576 Source-Link: https://github.com/googleapis/googleapis/commit/c93b54fa3060c7185f6dc724f0f9ec0c12bc44fc Source-Link: https://github.com/googleapis/googleapis-gen/commit/e52ba38a95a82f7588d0dd3a2284c98850dab9e1 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTUyYmEzOGE5NWE4MmY3NTg4ZDBkZDNhMjI4NGM5ODg1MGRhYjllMSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_table_admin/async_client.py | 6 +- .../services/bigtable_table_admin/client.py | 6 +- .../bigtable_table_admin/transports/grpc.py | 2 +- .../transports/grpc_asyncio.py | 2 +- .../types/bigtable_table_admin.py | 2 +- .../cloud/bigtable_admin_v2/types/table.py | 66 +++++++++++++++++-- .../test_bigtable_table_admin.py | 18 +++++ 7 files changed, 86 insertions(+), 16 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 7454e08ac0d5..a59302efb4e8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2777,7 +2777,7 @@ async def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. @@ -2862,8 +2862,8 @@ async def copy_backup( [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. parent (:class:`str`): Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: + contain the backup copy. The cluster must already exist. + Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. This corresponds to the ``parent`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 4645d4f3b077..b7be597effb2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -3245,7 +3245,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. @@ -3328,8 +3328,8 @@ def copy_backup( [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. parent (str): Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: + contain the backup copy. The cluster must already exist. + Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. This corresponds to the ``parent`` field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 71f06947f6d8..8b0eadbbcf8a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1019,7 +1019,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index bdd6e20c810c..e8b31ed36d7d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1048,7 +1048,7 @@ def restore_table( operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + [RestoreTableMetadata][google.bigtable.admin.v2.RestoreTableMetadata]. The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 0bc3b6b81d11..9d1bf3ef5774 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1356,7 +1356,7 @@ class CopyBackupRequest(proto.Message): Attributes: parent (str): Required. The name of the destination cluster that will - contain the backup copy. The cluster must already exists. + contain the backup copy. The cluster must already exist. Values are of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}``. backup_id (str): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index ef162bee1fde..241d7853c13d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -781,13 +781,18 @@ class Backup(proto.Message): this backup was copied. If a backup is not created by copying a backup, this field will be empty. Values are of the form: - projects//instances//backups/. + + projects//instances//clusters//backups/ expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. The expiration time of the backup, with - microseconds granularity that must be at least 6 hours and - at most 90 days from the time the request is received. Once - the ``expire_time`` has passed, Cloud Bigtable will delete - the backup and free the resources used by the backup. + Required. The expiration time of the backup. When creating a + backup or updating its ``expire_time``, the value must be + greater than the backup creation time by: + + - At least 6 hours + - At most 90 days + + Once the ``expire_time`` has passed, Cloud Bigtable will + delete the backup. start_time (google.protobuf.timestamp_pb2.Timestamp): Output only. ``start_time`` is the time that the backup was started (i.e. approximately the time the @@ -805,6 +810,20 @@ class Backup(proto.Message): encryption_info (google.cloud.bigtable_admin_v2.types.EncryptionInfo): Output only. The encryption information for the backup. + backup_type (google.cloud.bigtable_admin_v2.types.Backup.BackupType): + Indicates the backup type of the backup. + hot_to_standard_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the hot backup will be converted to a + standard backup. Once the ``hot_to_standard_time`` has + passed, Cloud Bigtable will convert the hot backup to a + standard backup. This value must be greater than the backup + creation time by: + + - At least 24 hours + + This field only applies for hot backups. When creating or + updating a standard backup, attempting to set this field + will fail the request. """ class State(proto.Enum): @@ -823,6 +842,28 @@ class State(proto.Enum): CREATING = 1 READY = 2 + class BackupType(proto.Enum): + r"""The type of the backup. + + Values: + BACKUP_TYPE_UNSPECIFIED (0): + Not specified. + STANDARD (1): + The default type for Cloud Bigtable managed + backups. Supported for backups created in both + HDD and SSD instances. Requires optimization + when restored to a table in an SSD instance. + HOT (2): + A backup type with faster restore to SSD + performance. Only supported for backups created + in SSD instances. A new SSD table restored from + a hot backup reaches production performance more + quickly than a standard backup. + """ + BACKUP_TYPE_UNSPECIFIED = 0 + STANDARD = 1 + HOT = 2 + name: str = proto.Field( proto.STRING, number=1, @@ -864,6 +905,16 @@ class State(proto.Enum): number=9, message="EncryptionInfo", ) + backup_type: BackupType = proto.Field( + proto.ENUM, + number=11, + enum=BackupType, + ) + hot_to_standard_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=12, + message=timestamp_pb2.Timestamp, + ) class BackupInfo(proto.Message): @@ -888,7 +939,8 @@ class BackupInfo(proto.Message): this backup was copied. If a backup is not created by copying a backup, this field will be empty. Values are of the form: - projects//instances//backups/. + + projects//instances//clusters//backups/ """ backup: str = proto.Field( diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 2b84213bc68a..5801890449d5 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -9753,6 +9753,7 @@ def test_get_backup(request_type, transport: str = "grpc"): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) response = client.get_backup(request) @@ -9769,6 +9770,7 @@ def test_get_backup(request_type, transport: str = "grpc"): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_get_backup_empty_call(): @@ -9872,6 +9874,7 @@ async def test_get_backup_empty_call_async(): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.get_backup() @@ -9942,6 +9945,7 @@ async def test_get_backup_async( source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.get_backup(request) @@ -9959,6 +9963,7 @@ async def test_get_backup_async( assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.asyncio @@ -10131,6 +10136,7 @@ def test_update_backup(request_type, transport: str = "grpc"): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) response = client.update_backup(request) @@ -10147,6 +10153,7 @@ def test_update_backup(request_type, transport: str = "grpc"): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_update_backup_empty_call(): @@ -10246,6 +10253,7 @@ async def test_update_backup_empty_call_async(): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.update_backup() @@ -10319,6 +10327,7 @@ async def test_update_backup_async( source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) ) response = await client.update_backup(request) @@ -10336,6 +10345,7 @@ async def test_update_backup_async( assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.asyncio @@ -19957,6 +19967,8 @@ def test_create_backup_rest(request_type): }, "kms_key_version": "kms_key_version_value", }, + "backup_type": 1, + "hot_to_standard_time": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -20368,6 +20380,7 @@ def test_get_backup_rest(request_type): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) # Wrap the value into a proper Response obj @@ -20388,6 +20401,7 @@ def test_get_backup_rest(request_type): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_get_backup_rest_use_cached_wrapped_rpc(): @@ -20697,6 +20711,8 @@ def test_update_backup_rest(request_type): }, "kms_key_version": "kms_key_version_value", }, + "backup_type": 1, + "hot_to_standard_time": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -20776,6 +20792,7 @@ def get_message_fields(field): source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) # Wrap the value into a proper Response obj @@ -20796,6 +20813,7 @@ def get_message_fields(field): assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD def test_update_backup_rest_use_cached_wrapped_rpc(): From 75509ee7406f9c99a4294206bea280d7326caebe Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 9 Aug 2024 15:03:56 -0600 Subject: [PATCH 815/892] Revert "chore(python): fix docs build (#1008)" (#1013) This reverts commit 6686abfab4cd641b37ea5241afbfeee7ff9a81f1. --- .../.github/.OwlBot.lock.yaml | 3 +- .../.kokoro/docker/docs/Dockerfile | 26 +++++------ .../.kokoro/docker/docs/requirements.txt | 40 ++++++++-------- .../.kokoro/publish-docs.sh | 20 ++++---- .../.kokoro/requirements.txt | 46 +++++++++---------- 5 files changed, 64 insertions(+), 71 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 6d064ddb9b06..620159621881 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:94bb690db96e6242b2567a4860a94d48fa48696d092e51b0884a1a2c0a79a407 -# created: 2024-07-31T14:52:44.926548819Z + digest: sha256:5651442a6336971a2fb2df40fb56b3337df67cafa14c0809cc89cb34ccee1b8e diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index e5410e296bd8..a26ce61930f5 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ubuntu:24.04 +from ubuntu:22.04 ENV DEBIAN_FRONTEND noninteractive @@ -40,6 +40,7 @@ RUN apt-get update \ libssl-dev \ libsqlite3-dev \ portaudio19-dev \ + python3-distutils \ redis-server \ software-properties-common \ ssh \ @@ -59,31 +60,28 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb +###################### Install python 3.9.13 -###################### Install python 3.10.14 for docs/docfx session - -# Download python 3.10.14 -RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz +# Download python 3.9.13 +RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz # Extract files -RUN tar -xvf Python-3.10.14.tgz +RUN tar -xvf Python-3.9.13.tgz -# Install python 3.10.14 -RUN ./Python-3.10.14/configure --enable-optimizations +# Install python 3.9.13 +RUN ./Python-3.9.13/configure --enable-optimizations RUN make altinstall -ENV PATH /usr/local/bin/python3.10:$PATH - ###################### Install pip RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3.10 /tmp/get-pip.py \ + && python3 /tmp/get-pip.py \ && rm /tmp/get-pip.py # Test pip -RUN python3.10 -m pip +RUN python3 -m pip # Install build requirements COPY requirements.txt /requirements.txt -RUN python3.10 -m pip install --require-hashes -r requirements.txt +RUN python3 -m pip install --require-hashes -r requirements.txt -CMD ["python3.10"] +CMD ["python3.8"] diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index 7129c7715594..0e5d70f20f83 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.4.0 \ - --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ - --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f +argcomplete==3.2.3 \ + --hash=sha256:bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23 \ + --hash=sha256:c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c # via nox colorlog==6.8.2 \ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ @@ -16,27 +16,23 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.15.4 \ - --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ - --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv -nox==2024.4.15 \ - --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ - --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f +nox==2024.3.2 \ + --hash=sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be \ + --hash=sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553 # via -r requirements.in -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 +packaging==24.0 \ + --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ + --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 # via nox -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.2.0 \ + --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \ + --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768 # via virtualenv -tomli==2.0.1 \ - --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ - --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f - # via nox -virtualenv==20.26.3 \ - --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ - --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 +virtualenv==20.25.1 \ + --hash=sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a \ + --hash=sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197 # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 233205d580e9..38f083f05aa0 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -21,18 +21,18 @@ export PYTHONUNBUFFERED=1 export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt -python3.10 -m nox --version +python3 -m pip install --require-hashes -r .kokoro/requirements.txt +python3 -m nox --version # build docs nox -s docs # create metadata -python3.10 -m docuploader create-metadata \ +python3 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ + --version=$(python3 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ + --distribution-name=$(python3 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -40,18 +40,18 @@ python3.10 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" # docfx yaml files nox -s docfx # create metadata. -python3.10 -m docuploader create-metadata \ +python3 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ + --version=$(python3 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ + --distribution-name=$(python3 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -59,4 +59,4 @@ python3.10 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 9622baf0ba38..35ece0e4d2e9 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.3.3 \ --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 # via google-auth -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 +certifi==2024.6.2 \ + --hash=sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516 \ + --hash=sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56 # via requests cffi==1.16.0 \ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ @@ -371,23 +371,23 @@ more-itertools==10.3.0 \ # via # jaraco-classes # jaraco-functools -nh3==0.2.18 \ - --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \ - --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \ - --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \ - --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \ - --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \ - --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \ - --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \ - --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \ - --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \ - --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \ - --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \ - --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \ - --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \ - --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \ - --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ - --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe +nh3==0.2.17 \ + --hash=sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a \ + --hash=sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911 \ + --hash=sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb \ + --hash=sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a \ + --hash=sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc \ + --hash=sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028 \ + --hash=sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9 \ + --hash=sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3 \ + --hash=sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351 \ + --hash=sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10 \ + --hash=sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71 \ + --hash=sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f \ + --hash=sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b \ + --hash=sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a \ + --hash=sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062 \ + --hash=sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a # via readme-renderer nox==2024.4.15 \ --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ @@ -460,9 +460,9 @@ python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via gcp-releasetool -readme-renderer==44.0 \ - --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \ - --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1 +readme-renderer==43.0 \ + --hash=sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311 \ + --hash=sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9 # via twine requests==2.32.3 \ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ From 3c22951908654e86f27c0e9a0c053261dfe9d965 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 14:04:45 -0700 Subject: [PATCH 816/892] chore: Update gapic-generator-python to v1.18.5 (#1015) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.18.5 PiperOrigin-RevId: 661268868 Source-Link: https://github.com/googleapis/googleapis/commit/f7d214cb08cd7d9b018d44564a8b184263f64177 Source-Link: https://github.com/googleapis/googleapis-gen/commit/79a8411bbdb25a983fa3aae8c0e14327df129f94 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNzlhODQxMWJiZGIyNWE5ODNmYTNhYWU4YzBlMTQzMjdkZjEyOWY5NCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 6 +- .../bigtable_instance_admin/client.py | 2 +- .../bigtable_table_admin/async_client.py | 6 +- .../services/bigtable_table_admin/client.py | 2 +- .../services/bigtable/async_client.py | 5 +- .../bigtable_v2/services/bigtable/client.py | 2 +- .../test_bigtable_instance_admin.py | 249 ++++++------ .../test_bigtable_table_admin.py | 360 ++++++++++-------- .../unit/gapic/bigtable_v2/test_bigtable.py | 90 +++-- 9 files changed, 401 insertions(+), 321 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index abed851d59ef..b6e77aaea334 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -218,10 +217,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(BigtableInstanceAdminClient).get_transport_class, - type(BigtableInstanceAdminClient), - ) + get_transport_class = BigtableInstanceAdminClient.get_transport_class def __init__( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 5877342c4f16..b8173bf4b628 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -790,7 +790,7 @@ def __init__( Type[BigtableInstanceAdminTransport], Callable[..., BigtableInstanceAdminTransport], ] = ( - type(self).get_transport_class(transport) + BigtableInstanceAdminClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., BigtableInstanceAdminTransport], transport) ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index a59302efb4e8..2e9eb13ebf5a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -218,10 +217,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(BigtableTableAdminClient).get_transport_class, - type(BigtableTableAdminClient), - ) + get_transport_class = BigtableTableAdminClient.get_transport_class def __init__( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index b7be597effb2..55d50ee819ac 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -814,7 +814,7 @@ def __init__( Type[BigtableTableAdminTransport], Callable[..., BigtableTableAdminTransport], ] = ( - type(self).get_transport_class(transport) + BigtableTableAdminClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., BigtableTableAdminTransport], transport) ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 1ed7a47408d9..54b7f2c63cf3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -14,7 +14,6 @@ # limitations under the License. # from collections import OrderedDict -import functools import re from typing import ( Dict, @@ -188,9 +187,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = functools.partial( - type(BigtableClient).get_transport_class, type(BigtableClient) - ) + get_transport_class = BigtableClient.get_transport_class def __init__( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 0937c90fe761..86fa6b3a5eb2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -701,7 +701,7 @@ def __init__( transport_init: Union[ Type[BigtableTransport], Callable[..., BigtableTransport] ] = ( - type(self).get_transport_class(transport) + BigtableClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., BigtableTransport], transport) ) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index ea6737973a22..26a7989a195d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1326,8 +1326,9 @@ def test_create_instance_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_instance(request) @@ -1381,26 +1382,28 @@ async def test_create_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_instance - ] = mock_object + ] = mock_rpc request = {} await client.create_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1789,22 +1792,23 @@ async def test_get_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_instance - ] = mock_object + ] = mock_rpc request = {} await client.get_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2164,22 +2168,23 @@ async def test_list_instances_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_instances - ] = mock_object + ] = mock_rpc request = {} await client.list_instances(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_instances(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2545,22 +2550,23 @@ async def test_update_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_instance - ] = mock_object + ] = mock_rpc request = {} await client.update_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.update_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2784,8 +2790,9 @@ def test_partial_update_instance_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.partial_update_instance(request) @@ -2841,26 +2848,28 @@ async def test_partial_update_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.partial_update_instance - ] = mock_object + ] = mock_rpc request = {} await client.partial_update_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.partial_update_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3220,22 +3229,23 @@ async def test_delete_instance_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_instance - ] = mock_object + ] = mock_rpc request = {} await client.delete_instance(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3522,8 +3532,9 @@ def test_create_cluster_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_cluster(request) @@ -3577,26 +3588,28 @@ async def test_create_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_cluster - ] = mock_object + ] = mock_rpc request = {} await client.create_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3975,22 +3988,23 @@ async def test_get_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_cluster - ] = mock_object + ] = mock_rpc request = {} await client.get_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4350,22 +4364,23 @@ async def test_list_clusters_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_clusters - ] = mock_object + ] = mock_rpc request = {} await client.list_clusters(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_clusters(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4663,8 +4678,9 @@ def test_update_cluster_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_cluster(request) @@ -4718,26 +4734,28 @@ async def test_update_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_cluster - ] = mock_object + ] = mock_rpc request = {} await client.update_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4952,8 +4970,9 @@ def test_partial_update_cluster_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.partial_update_cluster(request) @@ -5009,26 +5028,28 @@ async def test_partial_update_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.partial_update_cluster - ] = mock_object + ] = mock_rpc request = {} await client.partial_update_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.partial_update_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5388,22 +5409,23 @@ async def test_delete_cluster_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_cluster - ] = mock_object + ] = mock_rpc request = {} await client.delete_cluster(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5765,22 +5787,23 @@ async def test_create_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.create_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.create_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6167,22 +6190,23 @@ async def test_get_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.get_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6547,22 +6571,23 @@ async def test_list_app_profiles_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_app_profiles - ] = mock_object + ] = mock_rpc request = {} await client.list_app_profiles(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_app_profiles(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7076,8 +7101,9 @@ def test_update_app_profile_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_app_profile(request) @@ -7133,26 +7159,28 @@ async def test_update_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.update_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7524,22 +7552,23 @@ async def test_delete_app_profile_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_app_profile - ] = mock_object + ] = mock_rpc request = {} await client.delete_app_profile(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7893,22 +7922,23 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8275,22 +8305,23 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.set_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8667,22 +8698,23 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions - ] = mock_object + ] = mock_rpc request = {} await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.test_iam_permissions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9075,22 +9107,23 @@ async def test_list_hot_tablets_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_hot_tablets - ] = mock_object + ] = mock_rpc request = {} await client.list_hot_tablets(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_hot_tablets(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 5801890449d5..c9455cd5fe9e 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1362,22 +1362,23 @@ async def test_create_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_table - ] = mock_object + ] = mock_rpc request = {} await client.create_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.create_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1706,8 +1707,9 @@ def test_create_table_from_snapshot_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_table_from_snapshot(request) @@ -1763,26 +1765,28 @@ async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_table_from_snapshot - ] = mock_object + ] = mock_rpc request = {} await client.create_table_from_snapshot(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_table_from_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2161,22 +2165,23 @@ async def test_list_tables_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_tables - ] = mock_object + ] = mock_rpc request = {} await client.list_tables(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_tables(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2723,22 +2728,23 @@ async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_table - ] = mock_object + ] = mock_rpc request = {} await client.get_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3027,8 +3033,9 @@ def test_update_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_table(request) @@ -3082,26 +3089,28 @@ async def test_update_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_table - ] = mock_object + ] = mock_rpc request = {} await client.update_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3451,22 +3460,23 @@ async def test_delete_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_table - ] = mock_object + ] = mock_rpc request = {} await client.delete_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3751,8 +3761,9 @@ def test_undelete_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.undelete_table(request) @@ -3806,26 +3817,28 @@ async def test_undelete_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.undelete_table - ] = mock_object + ] = mock_rpc request = {} await client.undelete_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.undelete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4129,8 +4142,9 @@ def test_create_authorized_view_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_authorized_view(request) @@ -4186,26 +4200,28 @@ async def test_create_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4597,22 +4613,23 @@ async def test_list_authorized_views_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_authorized_views - ] = mock_object + ] = mock_rpc request = {} await client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_authorized_views(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5192,22 +5209,23 @@ async def test_get_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5522,8 +5540,9 @@ def test_update_authorized_view_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.update_authorized_view(request) @@ -5579,26 +5598,28 @@ async def test_update_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.update_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -5973,22 +5994,23 @@ async def test_delete_authorized_view_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_authorized_view - ] = mock_object + ] = mock_rpc request = {} await client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6358,22 +6380,23 @@ async def test_modify_column_families_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.modify_column_families - ] = mock_object + ] = mock_rpc request = {} await client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.modify_column_families(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -6756,22 +6779,23 @@ async def test_drop_row_range_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.drop_row_range - ] = mock_object + ] = mock_rpc request = {} await client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.drop_row_range(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7045,22 +7069,23 @@ async def test_generate_consistency_token_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.generate_consistency_token - ] = mock_object + ] = mock_rpc request = {} await client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.generate_consistency_token(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7432,22 +7457,23 @@ async def test_check_consistency_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.check_consistency - ] = mock_object + ] = mock_rpc request = {} await client.check_consistency(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.check_consistency(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -7767,8 +7793,9 @@ def test_snapshot_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.snapshot_table(request) @@ -7822,26 +7849,28 @@ async def test_snapshot_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.snapshot_table - ] = mock_object + ] = mock_rpc request = {} await client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.snapshot_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8227,22 +8256,23 @@ async def test_get_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_snapshot - ] = mock_object + ] = mock_rpc request = {} await client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -8596,22 +8626,23 @@ async def test_list_snapshots_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_snapshots - ] = mock_object + ] = mock_rpc request = {} await client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_snapshots(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9148,22 +9179,23 @@ async def test_delete_snapshot_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_snapshot - ] = mock_object + ] = mock_rpc request = {} await client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9450,8 +9482,9 @@ def test_create_backup_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.create_backup(request) @@ -9505,26 +9538,28 @@ async def test_create_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.create_backup - ] = mock_object + ] = mock_rpc request = {} await client.create_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.create_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -9904,22 +9939,23 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_backup - ] = mock_object + ] = mock_rpc request = {} await client.get_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -10285,22 +10321,23 @@ async def test_update_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.update_backup - ] = mock_object + ] = mock_rpc request = {} await client.update_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.update_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -10659,22 +10696,23 @@ async def test_delete_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.delete_backup - ] = mock_object + ] = mock_rpc request = {} await client.delete_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.delete_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11021,22 +11059,23 @@ async def test_list_backups_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.list_backups - ] = mock_object + ] = mock_rpc request = {} await client.list_backups(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.list_backups(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11528,8 +11567,9 @@ def test_restore_table_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.restore_table(request) @@ -11583,26 +11623,28 @@ async def test_restore_table_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.restore_table - ] = mock_object + ] = mock_rpc request = {} await client.restore_table(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.restore_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -11815,8 +11857,9 @@ def test_copy_backup_use_cached_wrapped_rpc(): # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() client.copy_backup(request) @@ -11870,26 +11913,28 @@ async def test_copy_backup_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.copy_backup - ] = mock_object + ] = mock_rpc request = {} await client.copy_backup(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() await client.copy_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -12268,22 +12313,23 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.get_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.get_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -12650,22 +12696,23 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.set_iam_policy - ] = mock_object + ] = mock_rpc request = {} await client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.set_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -13042,22 +13089,23 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.test_iam_permissions - ] = mock_object + ] = mock_rpc request = {} await client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.test_iam_permissions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 60cc7fd6e952..2be864732054 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1239,22 +1239,23 @@ async def test_read_rows_async_use_cached_wrapped_rpc(transport: str = "grpc_asy ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.read_rows - ] = mock_object + ] = mock_rpc request = {} await client.read_rows(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.read_rows(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1614,22 +1615,23 @@ async def test_sample_row_keys_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.sample_row_keys - ] = mock_object + ] = mock_rpc request = {} await client.sample_row_keys(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.sample_row_keys(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -1985,22 +1987,23 @@ async def test_mutate_row_async_use_cached_wrapped_rpc(transport: str = "grpc_as ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.mutate_row - ] = mock_object + ] = mock_rpc request = {} await client.mutate_row(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.mutate_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2404,22 +2407,23 @@ async def test_mutate_rows_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.mutate_rows - ] = mock_object + ] = mock_rpc request = {} await client.mutate_rows(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.mutate_rows(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -2804,22 +2808,23 @@ async def test_check_and_mutate_row_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.check_and_mutate_row - ] = mock_object + ] = mock_rpc request = {} await client.check_and_mutate_row(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.check_and_mutate_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3334,22 +3339,23 @@ async def test_ping_and_warm_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.ping_and_warm - ] = mock_object + ] = mock_rpc request = {} await client.ping_and_warm(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.ping_and_warm(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -3699,22 +3705,23 @@ async def test_read_modify_write_row_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.read_modify_write_row - ] = mock_object + ] = mock_rpc request = {} await client.read_modify_write_row(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.read_modify_write_row(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4123,22 +4130,23 @@ async def test_generate_initial_change_stream_partitions_async_use_cached_wrappe ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.generate_initial_change_stream_partitions - ] = mock_object + ] = mock_rpc request = {} await client.generate_initial_change_stream_partitions(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.generate_initial_change_stream_partitions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4523,22 +4531,23 @@ async def test_read_change_stream_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.read_change_stream - ] = mock_object + ] = mock_rpc request = {} await client.read_change_stream(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.read_change_stream(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio @@ -4906,22 +4915,23 @@ async def test_execute_query_async_use_cached_wrapped_rpc( ) # Replace cached wrapped function with mock - mock_object = mock.AsyncMock() + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ client._client._transport.execute_query - ] = mock_object + ] = mock_rpc request = {} await client.execute_query(request) # Establish that the underlying gRPC stub method was called. - assert mock_object.call_count == 1 + assert mock_rpc.call_count == 1 await client.execute_query(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 - assert mock_object.call_count == 2 + assert mock_rpc.call_count == 2 @pytest.mark.asyncio From 57416c3aa988fc89ca04f2de5cb0b35af0486938 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 9 Aug 2024 16:36:41 -0600 Subject: [PATCH 817/892] chore(docs): add execute query docs pages (#1014) --- .../async_data_execute_query_iterator.rst | 6 +++ .../async_data_execute_query_metadata.rst | 6 +++ .../async_data_execute_query_values.rst | 6 +++ .../async_data_client/async_data_usage.rst | 3 ++ .../cloud/bigtable/data/_async/client.py | 22 ++++----- .../_async/execute_query_iterator.py | 45 +++++++++++-------- .../bigtable/data/execute_query/metadata.py | 33 ++++++++++++++ .../bigtable/data/execute_query/values.py | 8 +++- 8 files changed, 98 insertions(+), 31 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_iterator.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_metadata.rst create mode 100644 packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_values.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_iterator.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_iterator.rst new file mode 100644 index 000000000000..b911fab7fc6f --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_iterator.rst @@ -0,0 +1,6 @@ +Execute Query Iterator Async +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data.execute_query.ExecuteQueryIteratorAsync + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_metadata.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_metadata.rst new file mode 100644 index 000000000000..69add630de3f --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_metadata.rst @@ -0,0 +1,6 @@ +Execute Query Metadata +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.execute_query.metadata + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_values.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_values.rst new file mode 100644 index 000000000000..6c4fb71c1337 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_values.rst @@ -0,0 +1,6 @@ +Execute Query Values +~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data.execute_query.values + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst index 8843b506bc82..61d5837fdaea 100644 --- a/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst @@ -13,3 +13,6 @@ Async Data Client async_data_mutations async_data_read_modify_write_rules async_data_exceptions + async_data_execute_query_iterator + async_data_execute_query_values + async_data_execute_query_metadata diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 600937df856d..82a874918529 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -456,38 +456,38 @@ async def execute_query( retryable_errors list until operation_timeout is reached. Args: - - query: Query to be run on Bigtable instance. The query can use ``@param`` + query: Query to be run on Bigtable instance. The query can use ``@param`` placeholders to use parameter interpolation on the server. Values for all parameters should be provided in ``parameters``. Types of parameters are inferred but should be provided in ``parameter_types`` if the inference is not possible (i.e. when value can be None, an empty list or an empty dict). - - instance_id: The Bigtable instance ID to perform the query on. + instance_id: The Bigtable instance ID to perform the query on. instance_id is combined with the client's project to fully specify the instance. - - parameters: Dictionary with values for all parameters used in the ``query``. - - parameter_types: Dictionary with types of parameters used in the ``query``. + parameters: Dictionary with values for all parameters used in the ``query``. + parameter_types: Dictionary with types of parameters used in the ``query``. Required to contain entries only for parameters whose type cannot be detected automatically (i.e. the value can be None, an empty list or an empty dict). - - app_profile_id: The app profile to associate with requests. + app_profile_id: The app profile to associate with requests. https://cloud.google.com/bigtable/docs/app-profiles - - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire operation, in seconds. Failed requests will be retried within the budget. Defaults to 600 seconds. - - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the 20 seconds. If None, defaults to operation_timeout. - - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered. Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) Returns: - - an asynchronous iterator that yields rows returned by the query + ExecuteQueryIteratorAsync: an asynchronous iterator that yields rows returned by the query Raises: - - DeadlineExceeded: raised after operation timeout + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed - - GoogleAPIError: raised if the request encounters an unrecoverable error + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error """ warnings.warn( "ExecuteQuery is in preview and may change in the future.", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index 3660c0b0ff9f..32081939b4ed 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -23,6 +23,7 @@ Optional, Sequence, Tuple, + TYPE_CHECKING, ) from google.api_core import retry as retries @@ -43,11 +44,14 @@ ExecuteQueryRequest as ExecuteQueryRequestPB, ) +if TYPE_CHECKING: + from google.cloud.bigtable.data import BigtableDataClientAsync + class ExecuteQueryIteratorAsync: """ ExecuteQueryIteratorAsync handles collecting streaming responses from the - ExecuteQuery RPC and parsing them to `QueryResultRow`s. + ExecuteQuery RPC and parsing them to QueryResultRows. ExecuteQueryIteratorAsync implements Asynchronous Iterator interface and can be used with "async for" syntax. It is also a context manager. @@ -55,23 +59,25 @@ class ExecuteQueryIteratorAsync: It is **not thread-safe**. It should not be used by multiple asyncio Tasks. Args: - client (google.cloud.bigtable.data._async.BigtableDataClientAsync): bigtable client - instance_id (str): id of the instance on which the query is executed - request_body (Dict[str, Any]): dict representing the body of the ExecuteQueryRequest - attempt_timeout (float | None): the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - Defaults to 600 seconds. - operation_timeout (float): the time budget for an individual network request, in seconds. - If it takes longer than this time to complete, the request will be cancelled with - a DeadlineExceeded exception, and a retry will be attempted. - Defaults to the 20 seconds. If None, defaults to operation_timeout. - req_metadata (Sequence[Tuple[str, str]]): metadata used while sending the gRPC request - retryable_excs (List[type[Exception]]): a list of errors that will be retried if encountered. + client: bigtable client + instance_id: id of the instance on which the query is executed + request_body: dict representing the body of the ExecuteQueryRequest + attempt_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + operation_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. If None, defaults to operation_timeout. + req_metadata: metadata used while sending the gRPC request + retryable_excs: a list of errors that will be retried if encountered. + Raises: + RuntimeError: if the instance is not created within an async event loop context. """ def __init__( self, - client: Any, + client: BigtableDataClientAsync, instance_id: str, app_profile_id: Optional[str], request_body: Dict[str, Any], @@ -112,15 +118,18 @@ def __init__( ) from e @property - def is_closed(self): + def is_closed(self) -> bool: + """Returns True if the iterator is closed, False otherwise.""" return self._is_closed @property - def app_profile_id(self): + def app_profile_id(self) -> Optional[str]: + """Returns the app_profile_id of the iterator.""" return self._app_profile_id @property - def table_name(self): + def table_name(self) -> Optional[str]: + """Returns the table_name of the iterator.""" return self._table_name async def _make_request_with_resume_token(self): @@ -176,7 +185,7 @@ async def _next_impl(self) -> AsyncIterator[QueryResultRow]: yield result await self.close() - async def __anext__(self): + async def __anext__(self) -> QueryResultRow: if self._is_closed: raise StopAsyncIteration return await self._result_generator.__anext__() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py index 4c08cbad310b..0c9cf969791a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py @@ -90,6 +90,8 @@ def __repr__(self) -> str: return self.__str__() class Struct(_NamedList[Type], Type): + """Struct SQL type.""" + @classmethod def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Struct": if type_pb is None: @@ -120,6 +122,8 @@ def __str__(self): return super(_NamedList, self).__str__() class Array(Type): + """Array SQL type.""" + def __init__(self, element_type: "SqlType.Type"): if isinstance(element_type, SqlType.Array): raise ValueError("Arrays of arrays are not supported.") @@ -148,6 +152,8 @@ def __str__(self) -> str: return f"{self.__class__.__name__}<{str(self.element_type)}>" class Map(Type): + """Map SQL type.""" + def __init__(self, key_type: "SqlType.Type", value_type: "SqlType.Type"): self._key_type = key_type self._value_type = value_type @@ -189,32 +195,44 @@ def __str__(self) -> str: ) class Bytes(Type): + """Bytes SQL type.""" + expected_type = bytes value_pb_dict_field_name = "bytes_value" type_field_name = "bytes_type" class String(Type): + """String SQL type.""" + expected_type = str value_pb_dict_field_name = "string_value" type_field_name = "string_type" class Int64(Type): + """Int64 SQL type.""" + expected_type = int value_pb_dict_field_name = "int_value" type_field_name = "int64_type" class Float64(Type): + """Float64 SQL type.""" + expected_type = float value_pb_dict_field_name = "float_value" type_field_name = "float64_type" class Bool(Type): + """Bool SQL type.""" + expected_type = bool value_pb_dict_field_name = "bool_value" type_field_name = "bool_type" class Timestamp(Type): """ + Timestamp SQL type. + Timestamp supports :class:`DatetimeWithNanoseconds` but Bigtable SQL does not currently support nanoseconds precision. We support this for potential compatibility in the future. Nanoseconds are currently ignored. @@ -243,6 +261,8 @@ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: return {"timestamp_value": ts} class Date(Type): + """Date SQL type.""" + type_field_name = "date_type" expected_type = datetime.date @@ -265,10 +285,23 @@ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: class Metadata: + """ + Base class for metadata returned by the ExecuteQuery operation. + """ + pass class ProtoMetadata(Metadata): + """ + Metadata class for the ExecuteQuery operation. + + Args: + columns (List[Tuple[Optional[str], SqlType.Type]]): List of column + metadata tuples. Each tuple contains the column name and the column + type. + """ + class Column: def __init__(self, column_name: Optional[str], column_type: SqlType.Type): self._column_name = column_name diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py index 394bef71ec01..80a0bff6f7b9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/values.py @@ -112,8 +112,12 @@ def __repr__(self) -> str: class QueryResultRow(_NamedList[ExecuteQueryValueType]): - pass + """ + Represents a single row of the result + """ class Struct(_NamedList[ExecuteQueryValueType]): - pass + """ + Represents a struct value in the result + """ From 9362b100a6621a5d70dda6db70849adc69fca579 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 12 Aug 2024 14:57:19 -0600 Subject: [PATCH 818/892] chore(docs): add async note to docs (#984) --- packages/google-cloud-bigtable/README.rst | 8 +++++++ .../async_data_client/async_data_client.rst | 6 +++++ .../async_data_client/async_data_table.rst | 5 ++++ .../docs/scripts/patch_devsite_toc.py | 24 +++++++++++++++---- 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/README.rst b/packages/google-cloud-bigtable/README.rst index 63c50591c366..2ecbd0185ae7 100644 --- a/packages/google-cloud-bigtable/README.rst +++ b/packages/google-cloud-bigtable/README.rst @@ -34,8 +34,16 @@ remaining exclusively in the existing synchronous client. Feedback and bug reports are welcome at cbt-python-client-v3-feedback@google.com, or through the Github `issue tracker`_. + + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + + .. _issue tracker: https://github.com/googleapis/python-bigtable/issues + Quick Start ----------- diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst index c5cc7074098b..0e1d9e23e809 100644 --- a/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst @@ -1,6 +1,12 @@ Bigtable Data Client Async ~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + + .. autoclass:: google.cloud.bigtable.data._async.client.BigtableDataClientAsync :members: :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst index a977beb6a4e3..3b7973e8eeff 100644 --- a/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst +++ b/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst @@ -1,6 +1,11 @@ Table Async ~~~~~~~~~~~ + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + .. autoclass:: google.cloud.bigtable.data._async.client.TableAsync :members: :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py index 6338128ddb8e..456d0af7b8cf 100644 --- a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py +++ b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py @@ -88,19 +88,31 @@ def __init__(self, dir_name, index_file_name): index_file_path = os.path.join(dir_name, index_file_name) # find set of files referenced by the index file with open(index_file_path, "r") as f: - self.title = f.readline().strip() + self.title = None in_toc = False self.items = [] for line in f: # ignore empty lines if not line.strip(): continue + # add files explictly included in the toc + if line.startswith(".. include::"): + file_base = os.path.splitext(line.split("::")[1].strip())[0] + self.items.append( + self.extract_toc_entry( + file_base, file_title=file_base.capitalize() + ) + ) + continue if line.startswith(".. toctree::"): in_toc = True continue # ignore directives if ":" in line: continue + # set tile as first line with no directive + if self.title is None: + self.title = line.strip() if not in_toc: continue # bail when toc indented block is done @@ -109,14 +121,16 @@ def __init__(self, dir_name, index_file_name): # extract entries self.items.append(self.extract_toc_entry(line.strip())) - def extract_toc_entry(self, file_name): + def extract_toc_entry(self, file_name, file_title=None): """ Given the name of a file, extract the title and href for the toc entry, and return as a dictionary """ # load the file to get the title with open(f"{self.dir_name}/{file_name}.rst", "r") as f2: - file_title = f2.readline().strip() + if file_title is None: + # use first line as title if not provided + file_title = f2.readline().strip() return {"name": file_title, "href": f"{file_name}.md"} def to_dict(self): @@ -143,7 +157,9 @@ def validate_toc(toc_file_path, expected_section_list, added_sections): current_toc = yaml.safe_load(open(toc_file_path, "r")) # make sure the set of sections matches what we expect found_sections = [d["name"] for d in current_toc[0]["items"]] - assert found_sections == expected_section_list + assert ( + found_sections == expected_section_list + ), f"Expected {expected_section_list}, found {found_sections}" # make sure each customs ection is in the toc for section in added_sections: assert section.title in found_sections From 4d4747e7e7e99ac506676958484064a2f745c1b8 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:32:50 -0700 Subject: [PATCH 819/892] chore(main): release 2.26.0 (#1006) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 21 +++++++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 26 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index d6c7e9d687cb..d6de1e7f8832 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.25.0" + ".": "2.26.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 92b498748f2b..09bffa32da38 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,27 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.26.0](https://github.com/googleapis/python-bigtable/compare/v2.25.0...v2.26.0) (2024-08-12) + + +### Features + +* Add fields and the BackupType proto for Hot Backups ([#1010](https://github.com/googleapis/python-bigtable/issues/1010)) ([b95801f](https://github.com/googleapis/python-bigtable/commit/b95801ffa8081e0072232247fbc5879105c109a6)) +* Add MergeToCell to Mutation APIs ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) +* Add min, max, hll aggregators and more types ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) +* Async execute query client ([#1011](https://github.com/googleapis/python-bigtable/issues/1011)) ([45bc8c4](https://github.com/googleapis/python-bigtable/commit/45bc8c4a0fe567ce5e0126a1a70e7eb3dca93e92)) + + +### Bug Fixes + +* Use single routing metadata header ([#1005](https://github.com/googleapis/python-bigtable/issues/1005)) ([20eeb0a](https://github.com/googleapis/python-bigtable/commit/20eeb0a68d7b44d07a6d84bc7a7e040ad63bb96d)) + + +### Documentation + +* Add clarification around SQL timestamps ([#1012](https://github.com/googleapis/python-bigtable/issues/1012)) ([6e80190](https://github.com/googleapis/python-bigtable/commit/6e801900bbe9385d3b579b8c3327c87c3617d92f)) +* Corrected various type documentation ([f029a24](https://github.com/googleapis/python-bigtable/commit/f029a242e2b0e6020d0b87ef256a414194321fad)) + ## [2.25.0](https://github.com/googleapis/python-bigtable/compare/v2.24.0...v2.25.0) (2024-07-18) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index e5fa8f60b9fc..d56eed5c5db7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index e5fa8f60b9fc..d56eed5c5db7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index e5fa8f60b9fc..d56eed5c5db7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index e5fa8f60b9fc..d56eed5c5db7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.25.0" # {x-release-please-version} +__version__ = "2.26.0" # {x-release-please-version} From 193e6013f20fe8950761778c9c117a6b5a384846 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 11:58:44 -0400 Subject: [PATCH 820/892] chore: removes docs-presubmit.cfg template (#1016) Source-Link: https://github.com/googleapis/synthtool/commit/373d00fed32729afc9f53e24dce3f1cdd339678e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:2dc6f67639bee669c33c6277a624ab9857d363e2fd33ac5b02d417b7d25f1ffc Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 3 +- .../.kokoro/docker/docs/Dockerfile | 26 ++++++----- .../.kokoro/docker/docs/requirements.txt | 40 ++++++++-------- .../.kokoro/publish-docs.sh | 20 ++++---- .../.kokoro/requirements.txt | 46 +++++++++---------- 5 files changed, 71 insertions(+), 64 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 620159621881..8b90899d2137 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,4 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5651442a6336971a2fb2df40fb56b3337df67cafa14c0809cc89cb34ccee1b8e + digest: sha256:2dc6f67639bee669c33c6277a624ab9857d363e2fd33ac5b02d417b7d25f1ffc +# created: 2024-08-15T17:41:26.438340772Z diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile index a26ce61930f5..e5410e296bd8 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ubuntu:22.04 +from ubuntu:24.04 ENV DEBIAN_FRONTEND noninteractive @@ -40,7 +40,6 @@ RUN apt-get update \ libssl-dev \ libsqlite3-dev \ portaudio19-dev \ - python3-distutils \ redis-server \ software-properties-common \ ssh \ @@ -60,28 +59,31 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb -###################### Install python 3.9.13 -# Download python 3.9.13 -RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz +###################### Install python 3.10.14 for docs/docfx session + +# Download python 3.10.14 +RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz # Extract files -RUN tar -xvf Python-3.9.13.tgz +RUN tar -xvf Python-3.10.14.tgz -# Install python 3.9.13 -RUN ./Python-3.9.13/configure --enable-optimizations +# Install python 3.10.14 +RUN ./Python-3.10.14/configure --enable-optimizations RUN make altinstall +ENV PATH /usr/local/bin/python3.10:$PATH + ###################### Install pip RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3 /tmp/get-pip.py \ + && python3.10 /tmp/get-pip.py \ && rm /tmp/get-pip.py # Test pip -RUN python3 -m pip +RUN python3.10 -m pip # Install build requirements COPY requirements.txt /requirements.txt -RUN python3 -m pip install --require-hashes -r requirements.txt +RUN python3.10 -m pip install --require-hashes -r requirements.txt -CMD ["python3.8"] +CMD ["python3.10"] diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index 0e5d70f20f83..7129c7715594 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -4,9 +4,9 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.2.3 \ - --hash=sha256:bf7900329262e481be5a15f56f19736b376df6f82ed27576fa893652c5de6c23 \ - --hash=sha256:c12355e0494c76a2a7b73e3a59b09024ca0ba1e279fb9ed6c1b82d5b74b6a70c +argcomplete==3.4.0 \ + --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ + --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f # via nox colorlog==6.8.2 \ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ @@ -16,23 +16,27 @@ distlib==0.3.8 \ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv -filelock==3.13.1 \ - --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ - --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c +filelock==3.15.4 \ + --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ + --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 # via virtualenv -nox==2024.3.2 \ - --hash=sha256:e53514173ac0b98dd47585096a55572fe504fecede58ced708979184d05440be \ - --hash=sha256:f521ae08a15adbf5e11f16cb34e8d0e6ea521e0b92868f684e91677deb974553 +nox==2024.4.15 \ + --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ + --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f # via -r requirements.in -packaging==24.0 \ - --hash=sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5 \ - --hash=sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9 +packaging==24.1 \ + --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ + --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via nox -platformdirs==4.2.0 \ - --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \ - --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768 +platformdirs==4.2.2 \ + --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ + --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 # via virtualenv -virtualenv==20.25.1 \ - --hash=sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a \ - --hash=sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197 +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f + # via nox +virtualenv==20.26.3 \ + --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ + --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 38f083f05aa0..233205d580e9 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -21,18 +21,18 @@ export PYTHONUNBUFFERED=1 export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3 -m pip install --require-hashes -r .kokoro/requirements.txt -python3 -m nox --version +python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt +python3.10 -m nox --version # build docs nox -s docs # create metadata -python3 -m docuploader create-metadata \ +python3.10 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ + --version=$(python3.10 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ + --distribution-name=$(python3.10 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -40,18 +40,18 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" +python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" # docfx yaml files nox -s docfx # create metadata. -python3 -m docuploader create-metadata \ +python3.10 -m docuploader create-metadata \ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3 setup.py --version) \ + --version=$(python3.10 setup.py --version) \ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3 setup.py --name) \ + --distribution-name=$(python3.10 setup.py --name) \ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) @@ -59,4 +59,4 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" +python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 35ece0e4d2e9..9622baf0ba38 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.3.3 \ --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 # via google-auth -certifi==2024.6.2 \ - --hash=sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516 \ - --hash=sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56 +certifi==2024.7.4 \ + --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ + --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 # via requests cffi==1.16.0 \ --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ @@ -371,23 +371,23 @@ more-itertools==10.3.0 \ # via # jaraco-classes # jaraco-functools -nh3==0.2.17 \ - --hash=sha256:0316c25b76289cf23be6b66c77d3608a4fdf537b35426280032f432f14291b9a \ - --hash=sha256:1a814dd7bba1cb0aba5bcb9bebcc88fd801b63e21e2450ae6c52d3b3336bc911 \ - --hash=sha256:1aa52a7def528297f256de0844e8dd680ee279e79583c76d6fa73a978186ddfb \ - --hash=sha256:22c26e20acbb253a5bdd33d432a326d18508a910e4dcf9a3316179860d53345a \ - --hash=sha256:40015514022af31975c0b3bca4014634fa13cb5dc4dbcbc00570acc781316dcc \ - --hash=sha256:40d0741a19c3d645e54efba71cb0d8c475b59135c1e3c580f879ad5514cbf028 \ - --hash=sha256:551672fd71d06cd828e282abdb810d1be24e1abb7ae2543a8fa36a71c1006fe9 \ - --hash=sha256:66f17d78826096291bd264f260213d2b3905e3c7fae6dfc5337d49429f1dc9f3 \ - --hash=sha256:85cdbcca8ef10733bd31f931956f7fbb85145a4d11ab9e6742bbf44d88b7e351 \ - --hash=sha256:a3f55fabe29164ba6026b5ad5c3151c314d136fd67415a17660b4aaddacf1b10 \ - --hash=sha256:b4427ef0d2dfdec10b641ed0bdaf17957eb625b2ec0ea9329b3d28806c153d71 \ - --hash=sha256:ba73a2f8d3a1b966e9cdba7b211779ad8a2561d2dba9674b8a19ed817923f65f \ - --hash=sha256:c21bac1a7245cbd88c0b0e4a420221b7bfa838a2814ee5bb924e9c2f10a1120b \ - --hash=sha256:c551eb2a3876e8ff2ac63dff1585236ed5dfec5ffd82216a7a174f7c5082a78a \ - --hash=sha256:c790769152308421283679a142dbdb3d1c46c79c823008ecea8e8141db1a2062 \ - --hash=sha256:d7a25fd8c86657f5d9d576268e3b3767c5cd4f42867c9383618be8517f0f022a +nh3==0.2.18 \ + --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \ + --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \ + --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \ + --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \ + --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \ + --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \ + --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \ + --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \ + --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \ + --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \ + --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \ + --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \ + --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \ + --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \ + --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ + --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe # via readme-renderer nox==2024.4.15 \ --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ @@ -460,9 +460,9 @@ python-dateutil==2.9.0.post0 \ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 # via gcp-releasetool -readme-renderer==43.0 \ - --hash=sha256:1818dd28140813509eeed8d62687f7cd4f7bad90d4db586001c5dc09d4fde311 \ - --hash=sha256:19db308d86ecd60e5affa3b2a98f017af384678c63c88e5d4556a380e674f3f9 +readme-renderer==44.0 \ + --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \ + --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1 # via twine requests==2.32.3 \ --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ From 5b71ff1eed56acf2181a5b4c201261669fe2eefe Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:10:46 -0700 Subject: [PATCH 821/892] feat: Add support for Cloud Bigtable Node Scaling Factor for CBT Clusters (#1023) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add support for Cloud Bigtable Row Affinity in App Profiles PiperOrigin-RevId: 673093969 Source-Link: https://github.com/googleapis/googleapis/commit/cbf696d38a963c5ab333f85fc9a910b5698ad415 Source-Link: https://github.com/googleapis/googleapis-gen/commit/a2f7ec1191813304b3bd0097caa33956bdb3b637 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTJmN2VjMTE5MTgxMzMwNGIzYmQwMDk3Y2FhMzM5NTZiZGIzYjYzNyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Add support for Cloud Bigtable Node Scaling Factor for CBT Clusters PiperOrigin-RevId: 676993928 Source-Link: https://github.com/googleapis/googleapis/commit/407deca15c5c09ccf5050c8c8388f44ed0ff937d Source-Link: https://github.com/googleapis/googleapis-gen/commit/4fae77920da0f4503bbf5f3ce34fc07bcd6d3d9a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGZhZTc3OTIwZGEwZjQ1MDNiYmY1ZjNjZTM0ZmMwN2JjZDZkM2Q5YSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../cloud/bigtable_admin_v2/types/instance.py | 59 +++++++++++++++++++ .../fixup_bigtable_admin_v2_keywords.py | 2 +- .../test_bigtable_instance_admin.py | 24 +++++++- 3 files changed, 82 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index f7916d44b830..34b52acd2350 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -237,6 +237,9 @@ class Cluster(proto.Message): The number of nodes allocated to this cluster. More nodes enable higher throughput and more consistent performance. + node_scaling_factor (google.cloud.bigtable_admin_v2.types.Cluster.NodeScalingFactor): + Immutable. The node scaling factor of this + cluster. cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig): Configuration for this cluster. @@ -284,6 +287,28 @@ class State(proto.Enum): RESIZING = 3 DISABLED = 4 + class NodeScalingFactor(proto.Enum): + r"""Possible node scaling factors of the clusters. Node scaling + delivers better latency and more throughput by removing node + boundaries. + + Values: + NODE_SCALING_FACTOR_UNSPECIFIED (0): + No node scaling specified. Defaults to + NODE_SCALING_FACTOR_1X. + NODE_SCALING_FACTOR_1X (1): + The cluster is running with a scaling factor + of 1. + NODE_SCALING_FACTOR_2X (2): + The cluster is running with a scaling factor of 2. All node + count values must be in increments of 2 with this scaling + factor enabled, otherwise an INVALID_ARGUMENT error will be + returned. + """ + NODE_SCALING_FACTOR_UNSPECIFIED = 0 + NODE_SCALING_FACTOR_1X = 1 + NODE_SCALING_FACTOR_2X = 2 + class ClusterAutoscalingConfig(proto.Message): r"""Autoscaling config for a cluster. @@ -364,6 +389,11 @@ class EncryptionConfig(proto.Message): proto.INT32, number=4, ) + node_scaling_factor: NodeScalingFactor = proto.Field( + proto.ENUM, + number=9, + enum=NodeScalingFactor, + ) cluster_config: ClusterConfig = proto.Field( proto.MESSAGE, number=7, @@ -468,18 +498,47 @@ class MultiClusterRoutingUseAny(proto.Message): in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: cluster_ids (MutableSequence[str]): The set of clusters to route to. The order is ignored; clusters will be tried in order of distance. If left empty, all clusters are eligible. + row_affinity (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny.RowAffinity): + Row affinity sticky routing based on the row + key of the request. Requests that span multiple + rows are routed non-deterministically. + + This field is a member of `oneof`_ ``affinity``. """ + class RowAffinity(proto.Message): + r"""If enabled, Bigtable will route the request based on the row + key of the request, rather than randomly. Instead, each row key + will be assigned to a cluster, and will stick to that cluster. + If clusters are added or removed, then this may affect which row + keys stick to which clusters. To avoid this, users can use a + cluster group to specify which clusters are to be used. In this + case, new clusters that are not a part of the cluster group will + not be routed to, and routing will be unaffected by the new + cluster. Moreover, clusters specified in the cluster group + cannot be deleted unless removed from the cluster group. + + """ + cluster_ids: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) + row_affinity: "AppProfile.MultiClusterRoutingUseAny.RowAffinity" = proto.Field( + proto.MESSAGE, + number=3, + oneof="affinity", + message="AppProfile.MultiClusterRoutingUseAny.RowAffinity", + ) class SingleClusterRouting(proto.Message): r"""Unconditionally routes all read/write requests to a specific diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 073b1ad00237..0c242cb09ce2 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -84,7 +84,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), 'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), - 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), + 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'node_scaling_factor', 'cluster_config', 'default_storage_type', 'encryption_config', ), 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), 'update_table': ('table', 'update_mask', ), } diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 26a7989a195d..961183b717f9 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -3837,6 +3837,7 @@ def test_get_cluster(request_type, transport: str = "grpc"): location="location_value", state=instance.Cluster.State.READY, serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, default_storage_type=common.StorageType.SSD, ) response = client.get_cluster(request) @@ -3853,6 +3854,10 @@ def test_get_cluster(request_type, transport: str = "grpc"): assert response.location == "location_value" assert response.state == instance.Cluster.State.READY assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X + ) assert response.default_storage_type == common.StorageType.SSD @@ -3956,6 +3961,7 @@ async def test_get_cluster_empty_call_async(): location="location_value", state=instance.Cluster.State.READY, serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, default_storage_type=common.StorageType.SSD, ) ) @@ -4030,6 +4036,7 @@ async def test_get_cluster_async( location="location_value", state=instance.Cluster.State.READY, serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, default_storage_type=common.StorageType.SSD, ) ) @@ -4047,6 +4054,10 @@ async def test_get_cluster_async( assert response.location == "location_value" assert response.state == instance.Cluster.State.READY assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X + ) assert response.default_storage_type == common.StorageType.SSD @@ -11381,6 +11392,7 @@ def test_create_cluster_rest(request_type): "location": "location_value", "state": 1, "serve_nodes": 1181, + "node_scaling_factor": 1, "cluster_config": { "cluster_autoscaling_config": { "autoscaling_limits": { @@ -11800,6 +11812,7 @@ def test_get_cluster_rest(request_type): location="location_value", state=instance.Cluster.State.READY, serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, default_storage_type=common.StorageType.SSD, ) @@ -11820,6 +11833,10 @@ def test_get_cluster_rest(request_type): assert response.location == "location_value" assert response.state == instance.Cluster.State.READY assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X + ) assert response.default_storage_type == common.StorageType.SSD @@ -12577,6 +12594,7 @@ def test_partial_update_cluster_rest(request_type): "location": "location_value", "state": 1, "serve_nodes": 1181, + "node_scaling_factor": 1, "cluster_config": { "cluster_autoscaling_config": { "autoscaling_limits": { @@ -13267,7 +13285,8 @@ def test_create_app_profile_rest(request_type): "etag": "etag_value", "description": "description_value", "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, }, "single_cluster_routing": { "cluster_id": "cluster_id_value", @@ -14396,7 +14415,8 @@ def test_update_app_profile_rest(request_type): "etag": "etag_value", "description": "description_value", "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, }, "single_cluster_routing": { "cluster_id": "cluster_id_value", From d4d990d89e05318538d3ed1ce85e337e7ab7a479 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 25 Sep 2024 16:11:36 -0700 Subject: [PATCH 822/892] build(python): release script update (#1024) Source-Link: https://github.com/googleapis/synthtool/commit/71a72973dddbc66ea64073b53eda49f0d22e0942 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:e8dcfd7cbfd8beac3a3ff8d3f3185287ea0625d859168cc80faccfc9a7a00455 Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/.github/workflows/unittest.yml | 1 + packages/google-cloud-bigtable/.kokoro/release.sh | 2 +- packages/google-cloud-bigtable/.kokoro/release/common.cfg | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 8b90899d2137..597e0c3261ca 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2dc6f67639bee669c33c6277a624ab9857d363e2fd33ac5b02d417b7d25f1ffc -# created: 2024-08-15T17:41:26.438340772Z + digest: sha256:e8dcfd7cbfd8beac3a3ff8d3f3185287ea0625d859168cc80faccfc9a7a00455 +# created: 2024-09-16T21:04:09.091105552Z diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 87d08602f194..04ade4f43f9f 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -30,6 +30,7 @@ jobs: with: name: coverage-artifact-${{ matrix.python }} path: .coverage-${{ matrix.python }} + include-hidden-files: true cover: runs-on: ubuntu-latest diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index d21aacc5e220..cfc431647232 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -23,7 +23,7 @@ python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source / export PYTHONUNBUFFERED=1 # Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-1") +TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-2") cd github/python-bigtable python3 setup.py sdist bdist_wheel twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg index 2a8fd970c2af..b79e3a67dc9f 100644 --- a/packages/google-cloud-bigtable/.kokoro/release/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/release/common.cfg @@ -28,7 +28,7 @@ before_action { fetch_keystore { keystore_resource { keystore_config_id: 73713 - keyname: "google-cloud-pypi-token-keystore-1" + keyname: "google-cloud-pypi-token-keystore-2" } } } From c70eec7b8da93b094eb31def23d1e323205fbb2a Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 25 Oct 2024 09:09:49 -0600 Subject: [PATCH 823/892] chore(docs): include imports in snippets (#1027) * chore(docs): added imports into snippets * simplify print regions --- .../snippets/deletes/deletes_snippets.py | 37 ++++++---- .../deletes/deletes_snippets_async.py | 34 +++++----- .../snippets/filters/filter_snippets.py | 67 ++++++++++++++++--- .../snippets/filters/filter_snippets_async.py | 8 +-- .../samples/snippets/reads/read_snippets.py | 32 ++++++--- 5 files changed, 123 insertions(+), 55 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py index 72f812ca2bd5..6cdbf33a69db 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets.py @@ -14,14 +14,11 @@ # limitations under the License. -from google.cloud import bigtable - -# Write your code here. - - # [START bigtable_delete_from_column] def delete_from_column(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") @@ -33,7 +30,9 @@ def delete_from_column(project_id, instance_id, table_id): # [START bigtable_delete_from_column_family] def delete_from_column_family(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") @@ -46,7 +45,9 @@ def delete_from_column_family(project_id, instance_id, table_id): # [START bigtable_delete_from_row] def delete_from_row(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") @@ -58,7 +59,9 @@ def delete_from_row(project_id, instance_id, table_id): # [START bigtable_streaming_and_batching] def streaming_and_batching(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) batcher = table.mutations_batcher(flush_count=2) @@ -74,7 +77,9 @@ def streaming_and_batching(project_id, instance_id, table_id): # [START bigtable_check_and_mutate] def check_and_mutate(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row = table.row("phone#4c410523#20190501") @@ -88,7 +93,9 @@ def check_and_mutate(project_id, instance_id, table_id): # [START bigtable_drop_row_range] def drop_row_range(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) row_key_prefix = "phone#4c410523" @@ -99,7 +106,9 @@ def drop_row_range(project_id, instance_id, table_id): # [START bigtable_delete_column_family] def delete_column_family(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) column_family_id = "stats_summary" @@ -111,7 +120,9 @@ def delete_column_family(project_id, instance_id, table_id): # [START bigtable_delete_table] def delete_table(project_id, instance_id, table_id): - client = bigtable.Client(project=project_id, admin=True) + from google.cloud.bigtable import Client + + client = Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) table.delete() diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py index 8f3711e0649c..2241fab4a71b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_snippets_async.py @@ -14,22 +14,11 @@ # limitations under the License. -from google.cloud.bigtable.data import ( - BigtableDataClientAsync, - DeleteRangeFromColumn, - DeleteAllFromFamily, - DeleteAllFromRow, - RowMutationEntry, - row_filters, - ReadRowsQuery, -) - - -# Write your code here. - - # [START bigtable_delete_from_column_asyncio] async def delete_from_column(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteRangeFromColumn + client = BigtableDataClientAsync(project=project_id) table = client.get_table(instance_id, table_id) @@ -46,6 +35,9 @@ async def delete_from_column(project_id, instance_id, table_id): # [START bigtable_delete_from_column_family_asyncio] async def delete_from_column_family(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteAllFromFamily + client = BigtableDataClientAsync(project=project_id) table = client.get_table(instance_id, table_id) @@ -60,6 +52,9 @@ async def delete_from_column_family(project_id, instance_id, table_id): # [START bigtable_delete_from_row_asyncio] async def delete_from_row(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteAllFromRow + client = BigtableDataClientAsync(project=project_id) table = client.get_table(instance_id, table_id) @@ -73,6 +68,11 @@ async def delete_from_row(project_id, instance_id, table_id): # [START bigtable_streaming_and_batching_asyncio] async def streaming_and_batching(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteRangeFromColumn + from google.cloud.bigtable.data import RowMutationEntry + from google.cloud.bigtable.data import ReadRowsQuery + client = BigtableDataClientAsync(project=project_id) table = client.get_table(instance_id, table_id) @@ -95,12 +95,16 @@ async def streaming_and_batching(project_id, instance_id, table_id): # [START bigtable_check_and_mutate_asyncio] async def check_and_mutate(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync + from google.cloud.bigtable.data import DeleteRangeFromColumn + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + client = BigtableDataClientAsync(project=project_id) table = client.get_table(instance_id, table_id) await table.check_and_mutate_row( "phone#4c410523#20190501", - predicate=row_filters.LiteralValueFilter("PQ2A.190405.003"), + predicate=LiteralValueFilter("PQ2A.190405.003"), true_case_mutations=DeleteRangeFromColumn( family="cell_plan", qualifier=b"data_plan_01gb" ), diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py index 4211378f3deb..d17c773a4730 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets.py @@ -13,18 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START bigtable_filters_print] -import datetime - -from google.cloud import bigtable -import google.cloud.bigtable.row_filters as row_filters - -# Write your code here. -# [START_EXCLUDE] - # [START bigtable_filters_limit_row_sample] def filter_limit_row_sample(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -37,6 +31,9 @@ def filter_limit_row_sample(project_id, instance_id, table_id): # [END bigtable_filters_limit_row_sample] # [START bigtable_filters_limit_row_regex] def filter_limit_row_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -51,6 +48,9 @@ def filter_limit_row_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_row_regex] # [START bigtable_filters_limit_cells_per_col] def filter_limit_cells_per_col(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -63,6 +63,9 @@ def filter_limit_cells_per_col(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_col] # [START bigtable_filters_limit_cells_per_row] def filter_limit_cells_per_row(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -75,6 +78,9 @@ def filter_limit_cells_per_row(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_row] # [START bigtable_filters_limit_cells_per_row_offset] def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -87,6 +93,9 @@ def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): # [END bigtable_filters_limit_cells_per_row_offset] # [START bigtable_filters_limit_col_family_regex] def filter_limit_col_family_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -101,6 +110,9 @@ def filter_limit_col_family_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_family_regex] # [START bigtable_filters_limit_col_qualifier_regex] def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -115,6 +127,9 @@ def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_qualifier_regex] # [START bigtable_filters_limit_col_range] def filter_limit_col_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -131,6 +146,9 @@ def filter_limit_col_range(project_id, instance_id, table_id): # [END bigtable_filters_limit_col_range] # [START bigtable_filters_limit_value_range] def filter_limit_value_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -148,6 +166,9 @@ def filter_limit_value_range(project_id, instance_id, table_id): def filter_limit_value_regex(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -162,6 +183,10 @@ def filter_limit_value_regex(project_id, instance_id, table_id): # [END bigtable_filters_limit_value_regex] # [START bigtable_filters_limit_timestamp_range] def filter_limit_timestamp_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + import datetime + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -178,6 +203,9 @@ def filter_limit_timestamp_range(project_id, instance_id, table_id): # [END bigtable_filters_limit_timestamp_range] # [START bigtable_filters_limit_block_all] def filter_limit_block_all(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -190,6 +218,9 @@ def filter_limit_block_all(project_id, instance_id, table_id): # [END bigtable_filters_limit_block_all] # [START bigtable_filters_limit_pass_all] def filter_limit_pass_all(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -202,6 +233,9 @@ def filter_limit_pass_all(project_id, instance_id, table_id): # [END bigtable_filters_limit_pass_all] # [START bigtable_filters_modify_strip_value] def filter_modify_strip_value(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -214,6 +248,9 @@ def filter_modify_strip_value(project_id, instance_id, table_id): # [END bigtable_filters_modify_strip_value] # [START bigtable_filters_modify_apply_label] def filter_modify_apply_label(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -226,6 +263,9 @@ def filter_modify_apply_label(project_id, instance_id, table_id): # [END bigtable_filters_modify_apply_label] # [START bigtable_filters_composing_chain] def filter_composing_chain(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -245,6 +285,9 @@ def filter_composing_chain(project_id, instance_id, table_id): # [END bigtable_filters_composing_chain] # [START bigtable_filters_composing_interleave] def filter_composing_interleave(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -264,6 +307,9 @@ def filter_composing_interleave(project_id, instance_id, table_id): # [END bigtable_filters_composing_interleave] # [START bigtable_filters_composing_condition] def filter_composing_condition(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -285,9 +331,8 @@ def filter_composing_condition(project_id, instance_id, table_id): # [END bigtable_filters_composing_condition] -# [END_EXCLUDE] - +# [START bigtable_filters_print] def print_row(row): print("Reading data for {}:".format(row.row_key.decode("utf-8"))) for cf, cols in sorted(row.cells.items()): diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py index e47bbb3fb2fd..899d4c5c78e9 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async.py @@ -11,9 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from google.cloud._helpers import _datetime_from_microseconds -from google.cloud.bigtable.data import Row - # [START bigtable_filters_limit_row_sample_asyncio] async def filter_limit_row_sample(project_id, instance_id, table_id): @@ -368,10 +365,11 @@ async def filter_composing_condition(project_id, instance_id, table_id): # [END bigtable_filters_composing_condition_asyncio] -# [END_EXCLUDE] -def print_row(row: Row): +def print_row(row): + from google.cloud._helpers import _datetime_from_microseconds + print("Reading data for {}:".format(row.row_key.decode("utf-8"))) last_family = None for cell in row.cells: diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py index afd0955b8bbf..210ca73a71bd 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/read_snippets.py @@ -13,17 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -# [START bigtable_reads_print] -from google.cloud import bigtable -import google.cloud.bigtable.row_filters as row_filters -from google.cloud.bigtable.row_set import RowSet - -# Write your code here. -# [START_EXCLUDE] - - # [START bigtable_reads_row] def read_row(project_id, instance_id, table_id): + from google.cloud import bigtable + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -38,6 +31,9 @@ def read_row(project_id, instance_id, table_id): # [START bigtable_reads_row_partial] def read_row_partial(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -52,6 +48,9 @@ def read_row_partial(project_id, instance_id, table_id): # [END bigtable_reads_row_partial] # [START bigtable_reads_rows] def read_rows(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -68,6 +67,9 @@ def read_rows(project_id, instance_id, table_id): # [END bigtable_reads_rows] # [START bigtable_reads_row_range] def read_row_range(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -85,6 +87,9 @@ def read_row_range(project_id, instance_id, table_id): # [END bigtable_reads_row_range] # [START bigtable_reads_row_ranges] def read_row_ranges(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -105,6 +110,9 @@ def read_row_ranges(project_id, instance_id, table_id): # [END bigtable_reads_row_ranges] # [START bigtable_reads_prefix] def read_prefix(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable.row_set import RowSet + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -122,6 +130,9 @@ def read_prefix(project_id, instance_id, table_id): # [END bigtable_reads_prefix] # [START bigtable_reads_filter] def read_filter(project_id, instance_id, table_id): + from google.cloud import bigtable + from google.cloud.bigtable import row_filters + client = bigtable.Client(project=project_id, admin=True) instance = client.instance(instance_id) table = instance.table(table_id) @@ -132,9 +143,8 @@ def read_filter(project_id, instance_id, table_id): # [END bigtable_reads_filter] -# [END_EXCLUDE] - +# [START bigtable_reads_print] def print_row(row): print("Reading data for {}:".format(row.row_key.decode("utf-8"))) for cf, cols in sorted(row.cells.items()): From 1ce05ac327812dcc69c0b42c58d63f7ccba2b0d7 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 28 Oct 2024 19:30:14 +0100 Subject: [PATCH 824/892] chore(deps): update all dependencies (#934) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(deps): update all dependencies * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../.github/workflows/system_emulated.yml | 2 +- .../google-cloud-bigtable/samples/beam/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/beam/requirements.txt | 4 ++-- .../google-cloud-bigtable/samples/hello/requirements-test.txt | 2 +- packages/google-cloud-bigtable/samples/hello/requirements.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../samples/quickstart/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/quickstart/requirements.txt | 2 +- .../samples/quickstart_happybase/requirements-test.txt | 2 +- .../samples/snippets/data_client/requirements-test.txt | 2 +- .../samples/snippets/data_client/requirements.txt | 2 +- .../samples/snippets/deletes/requirements-test.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- .../google-cloud-bigtable/samples/tableadmin/requirements.txt | 2 +- 25 files changed, 27 insertions(+), 27 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index fa5ef15af276..c9dab998c992 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v2.1.0 + uses: google-github-actions/setup-gcloud@v2.1.1 - name: Install / run Nox run: | diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index cb87efc0ff71..fe93bd52ff68 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.3.2 diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 86e305c224cd..9010a422b9f2 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.54.0 -google-cloud-bigtable==2.22.0 +apache-beam==2.57.0 +google-cloud-bigtable==2.25.0 google-cloud-core==2.4.1 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index cb87efc0ff71..fe93bd52ff68 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.3.2 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index dd4fc1fb3241..9a665c3be4d8 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.23.0 +google-cloud-bigtable==2.25.0 google-cloud-core==2.4.1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index cb87efc0ff71..fe93bd52ff68 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index cb87efc0ff71..fe93bd52ff68 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.3.2 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index a01a0943c28e..bb8b24a679fe 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.22.0 +google-cloud-bigtable==2.25.0 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index c0d4f70035bc..caf5f029cb6e 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.4.4 +pytest==8.3.2 mock==5.1.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index be3b2b2223e5..9136f4763d34 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.22.0 -google-cloud-monitoring==2.19.0 +google-cloud-bigtable==2.25.0 +google-cloud-monitoring==2.22.2 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index 5cb431d92b98..a636261208aa 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.4 +pytest==8.3.2 pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 835e1bc780c5..3760ce41505d 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.23.0 +google-cloud-bigtable==2.25.0 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index cb87efc0ff71..fe93bd52ff68 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.3.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt index 5cb431d92b98..a636261208aa 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.4 +pytest==8.3.2 pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt index 835e1bc780c5..3760ce41505d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.23.0 +google-cloud-bigtable==2.25.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index 5cb431d92b98..a636261208aa 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.4 +pytest==8.3.2 pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 835e1bc780c5..3760ce41505d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.23.0 +google-cloud-bigtable==2.25.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index 5cb431d92b98..a636261208aa 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.4 +pytest==8.3.2 pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 835e1bc780c5..3760ce41505d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.23.0 +google-cloud-bigtable==2.25.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index cb87efc0ff71..fe93bd52ff68 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.4.4 +pytest==8.3.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 6dc98589311e..3760ce41505d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.22.0 +google-cloud-bigtable==2.25.0 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 43b02e724796..0f4b18778fcb 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.4.4 +pytest==8.3.2 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 07b0a191d101..82d7fad3307c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.22.0 \ No newline at end of file +google-cloud-bigtable==2.25.0 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index aa143f59dfbe..7f86b7bc42c5 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.4.4 +pytest==8.3.2 google-cloud-testutils==1.4.0 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 6dc98589311e..3760ce41505d 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.22.0 +google-cloud-bigtable==2.25.0 From 91a27dfe9233c28e576cbb2dfe7a624ad07a62ec Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 4 Nov 2024 14:02:25 -0800 Subject: [PATCH 825/892] fix: registering duplicate instance (#1033) --- .../cloud/bigtable/data/_async/client.py | 2 +- .../tests/unit/data/_async/test_client.py | 42 +++++++++++++++++++ 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 82a874918529..b48921623d90 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -350,7 +350,7 @@ async def _register_instance( instance_name, owner.table_name, owner.app_profile_id ) self._instance_owners.setdefault(instance_key, set()).add(id(owner)) - if instance_name not in self._active_instances: + if instance_key not in self._active_instances: self._active_instances.add(instance_key) if self._channel_refresh_tasks: # refresh tasks already running diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 6c49ca0da692..1c1c14cd3c8d 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -653,6 +653,48 @@ async def test__register_instance(self): ] ) + @pytest.mark.asyncio + async def test__register_instance_duplicate(self): + """ + test double instance registration. Should be no-op + """ + # set up mock client + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_tasks = [object()] + mock_channels = [mock.Mock()] + client_mock.transport.channels = mock_channels + client_mock._ping_and_warm_instances = AsyncMock() + table_mock = mock.Mock() + expected_key = ( + "prefix/instance-1", + table_mock.table_name, + table_mock.app_profile_id, + ) + # fake first registration + await self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + # should have called ping and warm + assert client_mock._ping_and_warm_instances.call_count == 1 + # next call should do nothing + await self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + assert client_mock._ping_and_warm_instances.call_count == 1 + @pytest.mark.asyncio @pytest.mark.parametrize( "insert_instances,expected_active,expected_owner_keys", From 448d8504872e8277f1f8cb9e43cacd8a5c220cf1 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 13:22:19 -0800 Subject: [PATCH 826/892] chore: Update gapic-generator-python to v1.20.2 (#1026) --- .../bigtable_instance_admin/client.py | 40 +- .../transports/README.rst | 9 + .../transports/grpc_asyncio.py | 55 +- .../transports/rest.py | 1834 +- .../transports/rest_base.py | 1194 ++ .../services/bigtable_table_admin/client.py | 40 +- .../transports/README.rst | 9 + .../transports/grpc_asyncio.py | 73 +- .../bigtable_table_admin/transports/rest.py | 2637 +-- .../transports/rest_base.py | 1714 ++ .../services/bigtable/async_client.py | 232 +- .../bigtable_v2/services/bigtable/client.py | 40 +- .../services/bigtable/transports/README.rst | 9 + .../bigtable/transports/grpc_asyncio.py | 33 +- .../transports/pooled_grpc_asyncio.py | 4 + .../services/bigtable/transports/rest.py | 920 +- .../services/bigtable/transports/rest_base.py | 654 + .../cloud/bigtable_v2/types/feature_flags.py | 14 + .../test_bigtable_instance_admin.py | 9446 +++++----- .../test_bigtable_table_admin.py | 14887 ++++++++-------- .../unit/gapic/bigtable_v2/test_bigtable.py | 6628 ++++--- 21 files changed, 23449 insertions(+), 17023 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/README.rst create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index b8173bf4b628..b717eac8b47e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -586,36 +586,6 @@ def _get_universe_domain( raise ValueError("Universe Domain cannot be an empty string.") return universe_domain - @staticmethod - def _compare_universes( - client_universe: str, credentials: ga_credentials.Credentials - ) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. - - Returns: - bool: True iff client_universe matches the universe in credentials. - - Raises: - ValueError: when client_universe does not match the universe in credentials. - """ - - default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - credentials_universe = getattr(credentials, "universe_domain", default_universe) - - if client_universe != credentials_universe: - raise ValueError( - "The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default." - ) - return True - def _validate_universe_domain(self): """Validates client's and credentials' universe domains are consistent. @@ -625,13 +595,9 @@ def _validate_universe_domain(self): Raises: ValueError: If the configured universe domain is not valid. """ - self._is_universe_domain_valid = ( - self._is_universe_domain_valid - or BigtableInstanceAdminClient._compare_universes( - self.universe_domain, self.transport._credentials - ) - ) - return self._is_universe_domain_valid + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True @property def api_endpoint(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst new file mode 100644 index 000000000000..9a01ee7c3032 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`BigtableInstanceAdminTransport` is the ABC for all transports. +- public child `BigtableInstanceAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `BigtableInstanceAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseBigtableInstanceAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `BigtableInstanceAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 1fa85551cf93..716e14a863c5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import inspect import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -237,6 +238,9 @@ def __init__( ) # Wrap messages. This must be done after self._grpc_channel exists + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) self._prep_wrapped_messages(client_info) @property @@ -898,12 +902,12 @@ def list_hot_tablets( def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { - self.create_instance: gapic_v1.method_async.wrap_method( + self.create_instance: self._wrap_method( self.create_instance, default_timeout=300.0, client_info=client_info, ), - self.get_instance: gapic_v1.method_async.wrap_method( + self.get_instance: self._wrap_method( self.get_instance, default_retry=retries.AsyncRetry( initial=1.0, @@ -918,7 +922,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.list_instances: gapic_v1.method_async.wrap_method( + self.list_instances: self._wrap_method( self.list_instances, default_retry=retries.AsyncRetry( initial=1.0, @@ -933,7 +937,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.update_instance: gapic_v1.method_async.wrap_method( + self.update_instance: self._wrap_method( self.update_instance, default_retry=retries.AsyncRetry( initial=1.0, @@ -948,7 +952,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.partial_update_instance: gapic_v1.method_async.wrap_method( + self.partial_update_instance: self._wrap_method( self.partial_update_instance, default_retry=retries.AsyncRetry( initial=1.0, @@ -963,17 +967,17 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.delete_instance: gapic_v1.method_async.wrap_method( + self.delete_instance: self._wrap_method( self.delete_instance, default_timeout=60.0, client_info=client_info, ), - self.create_cluster: gapic_v1.method_async.wrap_method( + self.create_cluster: self._wrap_method( self.create_cluster, default_timeout=60.0, client_info=client_info, ), - self.get_cluster: gapic_v1.method_async.wrap_method( + self.get_cluster: self._wrap_method( self.get_cluster, default_retry=retries.AsyncRetry( initial=1.0, @@ -988,7 +992,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.list_clusters: gapic_v1.method_async.wrap_method( + self.list_clusters: self._wrap_method( self.list_clusters, default_retry=retries.AsyncRetry( initial=1.0, @@ -1003,7 +1007,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.update_cluster: gapic_v1.method_async.wrap_method( + self.update_cluster: self._wrap_method( self.update_cluster, default_retry=retries.AsyncRetry( initial=1.0, @@ -1018,22 +1022,22 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.partial_update_cluster: gapic_v1.method_async.wrap_method( + self.partial_update_cluster: self._wrap_method( self.partial_update_cluster, default_timeout=None, client_info=client_info, ), - self.delete_cluster: gapic_v1.method_async.wrap_method( + self.delete_cluster: self._wrap_method( self.delete_cluster, default_timeout=60.0, client_info=client_info, ), - self.create_app_profile: gapic_v1.method_async.wrap_method( + self.create_app_profile: self._wrap_method( self.create_app_profile, default_timeout=60.0, client_info=client_info, ), - self.get_app_profile: gapic_v1.method_async.wrap_method( + self.get_app_profile: self._wrap_method( self.get_app_profile, default_retry=retries.AsyncRetry( initial=1.0, @@ -1048,7 +1052,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.list_app_profiles: gapic_v1.method_async.wrap_method( + self.list_app_profiles: self._wrap_method( self.list_app_profiles, default_retry=retries.AsyncRetry( initial=1.0, @@ -1063,7 +1067,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.update_app_profile: gapic_v1.method_async.wrap_method( + self.update_app_profile: self._wrap_method( self.update_app_profile, default_retry=retries.AsyncRetry( initial=1.0, @@ -1078,12 +1082,12 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.delete_app_profile: gapic_v1.method_async.wrap_method( + self.delete_app_profile: self._wrap_method( self.delete_app_profile, default_timeout=60.0, client_info=client_info, ), - self.get_iam_policy: gapic_v1.method_async.wrap_method( + self.get_iam_policy: self._wrap_method( self.get_iam_policy, default_retry=retries.AsyncRetry( initial=1.0, @@ -1098,12 +1102,12 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.set_iam_policy: gapic_v1.method_async.wrap_method( + self.set_iam_policy: self._wrap_method( self.set_iam_policy, default_timeout=60.0, client_info=client_info, ), - self.test_iam_permissions: gapic_v1.method_async.wrap_method( + self.test_iam_permissions: self._wrap_method( self.test_iam_permissions, default_retry=retries.AsyncRetry( initial=1.0, @@ -1118,7 +1122,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.list_hot_tablets: gapic_v1.method_async.wrap_method( + self.list_hot_tablets: self._wrap_method( self.list_hot_tablets, default_retry=retries.AsyncRetry( initial=1.0, @@ -1135,8 +1139,17 @@ def _prep_wrapped_messages(self, client_info): ), } + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + def close(self): return self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc_asyncio" + __all__ = ("BigtableInstanceAdminGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index e1737add138f..45f08fa6454e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -16,29 +16,21 @@ from google.auth.transport.requests import AuthorizedSession # type: ignore import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries from google.api_core import rest_helpers from google.api_core import rest_streaming -from google.api_core import path_template from google.api_core import gapic_v1 from google.protobuf import json_format from google.api_core import operations_v1 + from requests import __version__ as requests_version import dataclasses -import re from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -47,16 +39,20 @@ from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from .base import ( - BigtableInstanceAdminTransport, - DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, -) + +from .rest_base import _BaseBigtableInstanceAdminRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, grpc_version=None, - rest_version=requests_version, + rest_version=f"requests@{requests_version}", ) @@ -699,8 +695,8 @@ class BigtableInstanceAdminRestStub: _interceptor: BigtableInstanceAdminRestInterceptor -class BigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): - """REST backend transport for BigtableInstanceAdmin. +class BigtableInstanceAdminRestTransport(_BaseBigtableInstanceAdminRestTransport): + """REST backend synchronous transport for BigtableInstanceAdmin. Service for creating, configuring, and deleting Cloud Bigtable Instances and Clusters. Provides access to the Instance @@ -712,7 +708,6 @@ class BigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): and call it. It sends JSON representations of protocol buffers over HTTP/1.1 - """ def __init__( @@ -766,21 +761,12 @@ def __init__( # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - super().__init__( host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, api_audience=api_audience, ) self._session = AuthorizedSession( @@ -844,21 +830,35 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: # Return the client from cache. return self._operations_client - class _CreateAppProfile(BigtableInstanceAdminRestStub): + class _CreateAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("CreateAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "appProfileId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.CreateAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -888,47 +888,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", - "body": "app_profile", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_http_options() + ) request, metadata = self._interceptor.pre_create_app_profile( request, metadata ) - pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._CreateAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -944,21 +933,35 @@ def __call__( resp = self._interceptor.post_create_app_profile(resp) return resp - class _CreateCluster(BigtableInstanceAdminRestStub): + class _CreateCluster( + _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("CreateCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "clusterId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.CreateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -988,45 +991,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/clusters", - "body": "cluster", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_http_options() + ) request, metadata = self._interceptor.pre_create_cluster(request, metadata) - pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._CreateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1040,19 +1030,35 @@ def __call__( resp = self._interceptor.post_create_cluster(resp) return resp - class _CreateInstance(BigtableInstanceAdminRestStub): + class _CreateInstance( + _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("CreateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.CreateInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1082,45 +1088,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*}/instances", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_http_options() + ) request, metadata = self._interceptor.pre_create_instance(request, metadata) - pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._CreateInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1134,21 +1127,34 @@ def __call__( resp = self._interceptor.post_create_instance(resp) return resp - class _DeleteAppProfile(BigtableInstanceAdminRestStub): + class _DeleteAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("DeleteAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "ignoreWarnings": False, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.DeleteAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1171,40 +1177,31 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_http_options() + ) request, metadata = self._interceptor.pre_delete_app_profile( request, metadata ) - pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableInstanceAdminRestTransport._DeleteAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1212,19 +1209,34 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteCluster(BigtableInstanceAdminRestStub): + class _DeleteCluster( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("DeleteCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.DeleteCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1247,38 +1259,27 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_http_options() + ) request, metadata = self._interceptor.pre_delete_cluster(request, metadata) - pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._DeleteCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1286,19 +1287,34 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteInstance(BigtableInstanceAdminRestStub): + class _DeleteInstance( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("DeleteInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.DeleteInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1321,38 +1337,27 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*}", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_http_options() + ) request, metadata = self._interceptor.pre_delete_instance(request, metadata) - pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._DeleteInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1360,19 +1365,34 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _GetAppProfile(BigtableInstanceAdminRestStub): + class _GetAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1402,38 +1422,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_http_options() + ) request, metadata = self._interceptor.pre_get_app_profile(request, metadata) - pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._GetAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1449,19 +1458,34 @@ def __call__( resp = self._interceptor.post_get_app_profile(resp) return resp - class _GetCluster(BigtableInstanceAdminRestStub): + class _GetCluster( + _BaseBigtableInstanceAdminRestTransport._BaseGetCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1492,38 +1516,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_http_options() + ) request, metadata = self._interceptor.pre_get_cluster(request, metadata) - pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._GetCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1539,19 +1552,35 @@ def __call__( resp = self._interceptor.post_get_cluster(resp) return resp - class _GetIamPolicy(BigtableInstanceAdminRestStub): + class _GetIamPolicy( + _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1652,45 +1681,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:getIamPolicy", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_http_options() + ) request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1706,19 +1722,34 @@ def __call__( resp = self._interceptor.post_get_iam_policy(resp) return resp - class _GetInstance(BigtableInstanceAdminRestStub): + class _GetInstance( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("GetInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.GetInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1751,38 +1782,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*}", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_http_options() + ) request, metadata = self._interceptor.pre_get_instance(request, metadata) - pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._GetInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1798,19 +1818,34 @@ def __call__( resp = self._interceptor.post_get_instance(resp) return resp - class _ListAppProfiles(BigtableInstanceAdminRestStub): + class _ListAppProfiles( + _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListAppProfiles") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListAppProfiles") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1839,40 +1874,31 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_http_options() + ) request, metadata = self._interceptor.pre_list_app_profiles( request, metadata ) - pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableInstanceAdminRestTransport._ListAppProfiles._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1888,19 +1914,34 @@ def __call__( resp = self._interceptor.post_list_app_profiles(resp) return resp - class _ListClusters(BigtableInstanceAdminRestStub): + class _ListClusters( + _BaseBigtableInstanceAdminRestTransport._BaseListClusters, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListClusters") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListClusters") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1929,38 +1970,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/clusters", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_http_options() + ) request, metadata = self._interceptor.pre_list_clusters(request, metadata) - pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._ListClusters._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1976,19 +2006,34 @@ def __call__( resp = self._interceptor.post_list_clusters(resp) return resp - class _ListHotTablets(BigtableInstanceAdminRestStub): + class _ListHotTablets( + _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListHotTablets") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListHotTablets") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2017,40 +2062,29 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_http_options() + ) request, metadata = self._interceptor.pre_list_hot_tablets( request, metadata ) - pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._ListHotTablets._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2066,19 +2100,34 @@ def __call__( resp = self._interceptor.post_list_hot_tablets(resp) return resp - class _ListInstances(BigtableInstanceAdminRestStub): + class _ListInstances( + _BaseBigtableInstanceAdminRestTransport._BaseListInstances, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("ListInstances") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.ListInstances") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2107,38 +2156,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*}/instances", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_http_options() + ) request, metadata = self._interceptor.pre_list_instances(request, metadata) - pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableInstanceAdminRestTransport._ListInstances._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2154,21 +2192,35 @@ def __call__( resp = self._interceptor.post_list_instances(resp) return resp - class _PartialUpdateCluster(BigtableInstanceAdminRestStub): + class _PartialUpdateCluster( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("PartialUpdateCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.PartialUpdateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2198,47 +2250,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{cluster.name=projects/*/instances/*/clusters/*}", - "body": "cluster", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_http_options() + ) request, metadata = self._interceptor.pre_partial_update_cluster( request, metadata ) - pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._PartialUpdateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2252,21 +2293,35 @@ def __call__( resp = self._interceptor.post_partial_update_cluster(resp) return resp - class _PartialUpdateInstance(BigtableInstanceAdminRestStub): + class _PartialUpdateInstance( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("PartialUpdateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.PartialUpdateInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2296,49 +2351,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{instance.name=projects/*/instances/*}", - "body": "instance", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_http_options() + ) request, metadata = self._interceptor.pre_partial_update_instance( request, metadata ) - pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( - request + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_transcoded_request( + http_options, request ) - transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._PartialUpdateInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2352,19 +2394,35 @@ def __call__( resp = self._interceptor.post_partial_update_instance(resp) return resp - class _SetIamPolicy(BigtableInstanceAdminRestStub): + class _SetIamPolicy( + _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("SetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2465,45 +2523,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:setIamPolicy", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_http_options() + ) request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2519,19 +2564,35 @@ def __call__( resp = self._interceptor.post_set_iam_policy(resp) return resp - class _TestIamPermissions(BigtableInstanceAdminRestStub): + class _TestIamPermissions( + _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("TestIamPermissions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.TestIamPermissions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2557,47 +2618,36 @@ def __call__( Response message for ``TestIamPermissions`` method. """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*}:testIamPermissions", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_http_options() + ) request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2613,21 +2663,35 @@ def __call__( resp = self._interceptor.post_test_iam_permissions(resp) return resp - class _UpdateAppProfile(BigtableInstanceAdminRestStub): + class _UpdateAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("UpdateAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.UpdateAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2657,47 +2721,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}", - "body": "app_profile", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_http_options() + ) request, metadata = self._interceptor.pre_update_app_profile( request, metadata ) - pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableInstanceAdminRestTransport._UpdateAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2711,9 +2764,35 @@ def __call__( resp = self._interceptor.post_update_app_profile(resp) return resp - class _UpdateCluster(BigtableInstanceAdminRestStub): + class _UpdateCluster( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("UpdateCluster") + return hash("BigtableInstanceAdminRestTransport.UpdateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2745,44 +2824,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "put", - "uri": "/v2/{name=projects/*/instances/*/clusters/*}", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_http_options() + ) request, metadata = self._interceptor.pre_update_cluster(request, metadata) - pb_request = instance.Cluster.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_query_params_json( + transcoded_request ) - query_params["$alt"] = "json;enum-encoding=int" - # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._UpdateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2796,19 +2863,35 @@ def __call__( resp = self._interceptor.post_update_cluster(resp) return resp - class _UpdateInstance(BigtableInstanceAdminRestStub): + class _UpdateInstance( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance, + BigtableInstanceAdminRestStub, + ): def __hash__(self): - return hash("UpdateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableInstanceAdminRestTransport.UpdateInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2845,45 +2928,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "put", - "uri": "/v2/{name=projects/*/instances/*}", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_http_options() + ) request, metadata = self._interceptor.pre_update_instance(request, metadata) - pb_request = instance.Instance.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableInstanceAdminRestTransport._UpdateInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py new file mode 100644 index 000000000000..7b0c1a4ba343 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py @@ -0,0 +1,1194 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + + +class _BaseBigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): + """Base REST backend transport for BigtableInstanceAdmin. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCreateAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "appProfileId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", + "body": "app_profile", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "clusterId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/clusters", + "body": "cluster", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*}/instances", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "ignoreWarnings": False, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/appProfiles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:getIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAppProfiles: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/appProfiles", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListClusters: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/clusters", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListHotTablets: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListInstances: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*}/instances", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BasePartialUpdateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{cluster.name=projects/*/instances/*/clusters/*}", + "body": "cluster", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BasePartialUpdateInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{instance.name=projects/*/instances/*}", + "body": "instance", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:setIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseTestIamPermissions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*}:testIamPermissions", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateAppProfile: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}", + "body": "app_profile", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateCluster: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "put", + "uri": "/v2/{name=projects/*/instances/*/clusters/*}", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = instance.Cluster.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateInstance: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "put", + "uri": "/v2/{name=projects/*/instances/*}", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = instance.Instance.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + +__all__ = ("_BaseBigtableInstanceAdminRestTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 55d50ee819ac..502f0085c295 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -610,36 +610,6 @@ def _get_universe_domain( raise ValueError("Universe Domain cannot be an empty string.") return universe_domain - @staticmethod - def _compare_universes( - client_universe: str, credentials: ga_credentials.Credentials - ) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. - - Returns: - bool: True iff client_universe matches the universe in credentials. - - Raises: - ValueError: when client_universe does not match the universe in credentials. - """ - - default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE - credentials_universe = getattr(credentials, "universe_domain", default_universe) - - if client_universe != credentials_universe: - raise ValueError( - "The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default." - ) - return True - def _validate_universe_domain(self): """Validates client's and credentials' universe domains are consistent. @@ -649,13 +619,9 @@ def _validate_universe_domain(self): Raises: ValueError: If the configured universe domain is not valid. """ - self._is_universe_domain_valid = ( - self._is_universe_domain_valid - or BigtableTableAdminClient._compare_universes( - self.universe_domain, self.transport._credentials - ) - ) - return self._is_universe_domain_valid + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True @property def api_endpoint(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst new file mode 100644 index 000000000000..0e8f40ec3a60 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`BigtableTableAdminTransport` is the ABC for all transports. +- public child `BigtableTableAdminGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `BigtableTableAdminGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseBigtableTableAdminRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `BigtableTableAdminRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index e8b31ed36d7d..520c7c83c9ca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import inspect import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -239,6 +240,9 @@ def __init__( ) # Wrap messages. This must be done after self._grpc_channel exists + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) self._prep_wrapped_messages(client_info) @property @@ -1188,17 +1192,17 @@ def test_iam_permissions( def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { - self.create_table: gapic_v1.method_async.wrap_method( + self.create_table: self._wrap_method( self.create_table, default_timeout=300.0, client_info=client_info, ), - self.create_table_from_snapshot: gapic_v1.method_async.wrap_method( + self.create_table_from_snapshot: self._wrap_method( self.create_table_from_snapshot, default_timeout=None, client_info=client_info, ), - self.list_tables: gapic_v1.method_async.wrap_method( + self.list_tables: self._wrap_method( self.list_tables, default_retry=retries.AsyncRetry( initial=1.0, @@ -1213,7 +1217,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.get_table: gapic_v1.method_async.wrap_method( + self.get_table: self._wrap_method( self.get_table, default_retry=retries.AsyncRetry( initial=1.0, @@ -1228,57 +1232,57 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.update_table: gapic_v1.method_async.wrap_method( + self.update_table: self._wrap_method( self.update_table, default_timeout=None, client_info=client_info, ), - self.delete_table: gapic_v1.method_async.wrap_method( + self.delete_table: self._wrap_method( self.delete_table, default_timeout=300.0, client_info=client_info, ), - self.undelete_table: gapic_v1.method_async.wrap_method( + self.undelete_table: self._wrap_method( self.undelete_table, default_timeout=None, client_info=client_info, ), - self.create_authorized_view: gapic_v1.method_async.wrap_method( + self.create_authorized_view: self._wrap_method( self.create_authorized_view, default_timeout=None, client_info=client_info, ), - self.list_authorized_views: gapic_v1.method_async.wrap_method( + self.list_authorized_views: self._wrap_method( self.list_authorized_views, default_timeout=None, client_info=client_info, ), - self.get_authorized_view: gapic_v1.method_async.wrap_method( + self.get_authorized_view: self._wrap_method( self.get_authorized_view, default_timeout=None, client_info=client_info, ), - self.update_authorized_view: gapic_v1.method_async.wrap_method( + self.update_authorized_view: self._wrap_method( self.update_authorized_view, default_timeout=None, client_info=client_info, ), - self.delete_authorized_view: gapic_v1.method_async.wrap_method( + self.delete_authorized_view: self._wrap_method( self.delete_authorized_view, default_timeout=None, client_info=client_info, ), - self.modify_column_families: gapic_v1.method_async.wrap_method( + self.modify_column_families: self._wrap_method( self.modify_column_families, default_timeout=300.0, client_info=client_info, ), - self.drop_row_range: gapic_v1.method_async.wrap_method( + self.drop_row_range: self._wrap_method( self.drop_row_range, default_timeout=3600.0, client_info=client_info, ), - self.generate_consistency_token: gapic_v1.method_async.wrap_method( + self.generate_consistency_token: self._wrap_method( self.generate_consistency_token, default_retry=retries.AsyncRetry( initial=1.0, @@ -1293,7 +1297,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.check_consistency: gapic_v1.method_async.wrap_method( + self.check_consistency: self._wrap_method( self.check_consistency, default_retry=retries.AsyncRetry( initial=1.0, @@ -1308,12 +1312,12 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.snapshot_table: gapic_v1.method_async.wrap_method( + self.snapshot_table: self._wrap_method( self.snapshot_table, default_timeout=None, client_info=client_info, ), - self.get_snapshot: gapic_v1.method_async.wrap_method( + self.get_snapshot: self._wrap_method( self.get_snapshot, default_retry=retries.AsyncRetry( initial=1.0, @@ -1328,7 +1332,7 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.list_snapshots: gapic_v1.method_async.wrap_method( + self.list_snapshots: self._wrap_method( self.list_snapshots, default_retry=retries.AsyncRetry( initial=1.0, @@ -1343,17 +1347,17 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.delete_snapshot: gapic_v1.method_async.wrap_method( + self.delete_snapshot: self._wrap_method( self.delete_snapshot, default_timeout=300.0, client_info=client_info, ), - self.create_backup: gapic_v1.method_async.wrap_method( + self.create_backup: self._wrap_method( self.create_backup, default_timeout=60.0, client_info=client_info, ), - self.get_backup: gapic_v1.method_async.wrap_method( + self.get_backup: self._wrap_method( self.get_backup, default_retry=retries.AsyncRetry( initial=1.0, @@ -1368,17 +1372,17 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.update_backup: gapic_v1.method_async.wrap_method( + self.update_backup: self._wrap_method( self.update_backup, default_timeout=60.0, client_info=client_info, ), - self.delete_backup: gapic_v1.method_async.wrap_method( + self.delete_backup: self._wrap_method( self.delete_backup, default_timeout=300.0, client_info=client_info, ), - self.list_backups: gapic_v1.method_async.wrap_method( + self.list_backups: self._wrap_method( self.list_backups, default_retry=retries.AsyncRetry( initial=1.0, @@ -1393,17 +1397,17 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.restore_table: gapic_v1.method_async.wrap_method( + self.restore_table: self._wrap_method( self.restore_table, default_timeout=60.0, client_info=client_info, ), - self.copy_backup: gapic_v1.method_async.wrap_method( + self.copy_backup: self._wrap_method( self.copy_backup, default_timeout=None, client_info=client_info, ), - self.get_iam_policy: gapic_v1.method_async.wrap_method( + self.get_iam_policy: self._wrap_method( self.get_iam_policy, default_retry=retries.AsyncRetry( initial=1.0, @@ -1418,12 +1422,12 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.set_iam_policy: gapic_v1.method_async.wrap_method( + self.set_iam_policy: self._wrap_method( self.set_iam_policy, default_timeout=60.0, client_info=client_info, ), - self.test_iam_permissions: gapic_v1.method_async.wrap_method( + self.test_iam_permissions: self._wrap_method( self.test_iam_permissions, default_retry=retries.AsyncRetry( initial=1.0, @@ -1440,8 +1444,17 @@ def _prep_wrapped_messages(self, client_info): ), } + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + def close(self): return self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc_asyncio" + __all__ = ("BigtableTableAdminGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 230b13a43e7c..b25ddec60503 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -16,29 +16,21 @@ from google.auth.transport.requests import AuthorizedSession # type: ignore import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries from google.api_core import rest_helpers from google.api_core import rest_streaming -from google.api_core import path_template from google.api_core import gapic_v1 from google.protobuf import json_format from google.api_core import operations_v1 + from requests import __version__ as requests_version import dataclasses -import re from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -48,16 +40,20 @@ from google.protobuf import empty_pb2 # type: ignore from google.longrunning import operations_pb2 # type: ignore -from .base import ( - BigtableTableAdminTransport, - DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, -) + +from .rest_base import _BaseBigtableTableAdminRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, grpc_version=None, - rest_version=requests_version, + rest_version=f"requests@{requests_version}", ) @@ -945,8 +941,8 @@ class BigtableTableAdminRestStub: _interceptor: BigtableTableAdminRestInterceptor -class BigtableTableAdminRestTransport(BigtableTableAdminTransport): - """REST backend transport for BigtableTableAdmin. +class BigtableTableAdminRestTransport(_BaseBigtableTableAdminRestTransport): + """REST backend synchronous transport for BigtableTableAdmin. Service for creating, configuring, and deleting Cloud Bigtable tables. @@ -959,7 +955,6 @@ class BigtableTableAdminRestTransport(BigtableTableAdminTransport): and call it. It sends JSON representations of protocol buffers over HTTP/1.1 - """ def __init__( @@ -1013,21 +1008,12 @@ def __init__( # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - super().__init__( host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, api_audience=api_audience, ) self._session = AuthorizedSession( @@ -1091,19 +1077,35 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: # Return the client from cache. return self._operations_client - class _CheckConsistency(BigtableTableAdminRestStub): + class _CheckConsistency( + _BaseBigtableTableAdminRestTransport._BaseCheckConsistency, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CheckConsistency") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CheckConsistency") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1132,47 +1134,34 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_http_options() + ) request, metadata = self._interceptor.pre_check_consistency( request, metadata ) - pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._CheckConsistency._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1188,19 +1177,34 @@ def __call__( resp = self._interceptor.post_check_consistency(resp) return resp - class _CopyBackup(BigtableTableAdminRestStub): + class _CopyBackup( + _BaseBigtableTableAdminRestTransport._BaseCopyBackup, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("CopyBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CopyBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1230,45 +1234,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_http_options() + ) request, metadata = self._interceptor.pre_copy_backup(request, metadata) - pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._CopyBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1282,21 +1273,35 @@ def __call__( resp = self._interceptor.post_copy_backup(resp) return resp - class _CreateAuthorizedView(BigtableTableAdminRestStub): + class _CreateAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CreateAuthorizedView") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "authorizedViewId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CreateAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1326,47 +1331,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", - "body": "authorized_view", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_http_options() + ) request, metadata = self._interceptor.pre_create_authorized_view( request, metadata ) - pb_request = bigtable_table_admin.CreateAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._CreateAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1380,21 +1374,35 @@ def __call__( resp = self._interceptor.post_create_authorized_view(resp) return resp - class _CreateBackup(BigtableTableAdminRestStub): + class _CreateBackup( + _BaseBigtableTableAdminRestTransport._BaseCreateBackup, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CreateBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "backupId": "", - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CreateBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1424,45 +1432,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", - "body": "backup", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_http_options() + ) request, metadata = self._interceptor.pre_create_backup(request, metadata) - pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._CreateBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1476,19 +1471,35 @@ def __call__( resp = self._interceptor.post_create_backup(resp) return resp - class _CreateTable(BigtableTableAdminRestStub): + class _CreateTable( + _BaseBigtableTableAdminRestTransport._BaseCreateTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CreateTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CreateTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1519,45 +1530,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options() + ) request, metadata = self._interceptor.pre_create_table(request, metadata) - pb_request = bigtable_table_admin.CreateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._CreateTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1573,19 +1571,35 @@ def __call__( resp = self._interceptor.post_create_table(resp) return resp - class _CreateTableFromSnapshot(BigtableTableAdminRestStub): + class _CreateTableFromSnapshot( + _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("CreateTableFromSnapshot") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.CreateTableFromSnapshot") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1623,47 +1637,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_http_options() + ) request, metadata = self._interceptor.pre_create_table_from_snapshot( request, metadata ) - pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._CreateTableFromSnapshot._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1677,19 +1680,34 @@ def __call__( resp = self._interceptor.post_create_table_from_snapshot(resp) return resp - class _DeleteAuthorizedView(BigtableTableAdminRestStub): + class _DeleteAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DeleteAuthorizedView") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DeleteAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1712,40 +1730,31 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_http_options() + ) request, metadata = self._interceptor.pre_delete_authorized_view( request, metadata ) - pb_request = bigtable_table_admin.DeleteAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableTableAdminRestTransport._DeleteAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1753,19 +1762,34 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteBackup(BigtableTableAdminRestStub): + class _DeleteBackup( + _BaseBigtableTableAdminRestTransport._BaseDeleteBackup, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DeleteBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DeleteBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1788,38 +1812,27 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_http_options() + ) request, metadata = self._interceptor.pre_delete_backup(request, metadata) - pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._DeleteBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1827,19 +1840,34 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteSnapshot(BigtableTableAdminRestStub): + class _DeleteSnapshot( + _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DeleteSnapshot") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DeleteSnapshot") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1869,38 +1897,27 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_http_options() + ) request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) - pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._DeleteSnapshot._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1908,19 +1925,34 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteTable(BigtableTableAdminRestStub): + class _DeleteTable( + _BaseBigtableTableAdminRestTransport._BaseDeleteTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DeleteTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DeleteTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -1943,38 +1975,27 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "delete", - "uri": "/v2/{name=projects/*/instances/*/tables/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_http_options() + ) request, metadata = self._interceptor.pre_delete_table(request, metadata) - pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._DeleteTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1982,19 +2003,35 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DropRowRange(BigtableTableAdminRestStub): + class _DropRowRange( + _BaseBigtableTableAdminRestTransport._BaseDropRowRange, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("DropRowRange") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.DropRowRange") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2017,45 +2054,32 @@ def __call__( sent along with the request as metadata. """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_http_options() + ) request, metadata = self._interceptor.pre_drop_row_range(request, metadata) - pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._DropRowRange._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2063,19 +2087,35 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _GenerateConsistencyToken(BigtableTableAdminRestStub): + class _GenerateConsistencyToken( + _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("GenerateConsistencyToken") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GenerateConsistencyToken") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2105,49 +2145,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_http_options() + ) request, metadata = self._interceptor.pre_generate_consistency_token( request, metadata ) - pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - request + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_transcoded_request( + http_options, request ) - transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._GenerateConsistencyToken._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2163,19 +2190,34 @@ def __call__( resp = self._interceptor.post_generate_consistency_token(resp) return resp - class _GetAuthorizedView(BigtableTableAdminRestStub): + class _GetAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("GetAuthorizedView") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2208,40 +2250,29 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_http_options() + ) request, metadata = self._interceptor.pre_get_authorized_view( request, metadata ) - pb_request = bigtable_table_admin.GetAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._GetAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2257,19 +2288,33 @@ def __call__( resp = self._interceptor.post_get_authorized_view(resp) return resp - class _GetBackup(BigtableTableAdminRestStub): + class _GetBackup( + _BaseBigtableTableAdminRestTransport._BaseGetBackup, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("GetBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2296,38 +2341,27 @@ def __call__( A backup of a Cloud Bigtable table. """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_http_options() + ) request, metadata = self._interceptor.pre_get_backup(request, metadata) - pb_request = bigtable_table_admin.GetBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._GetBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2343,19 +2377,35 @@ def __call__( resp = self._interceptor.post_get_backup(resp) return resp - class _GetIamPolicy(BigtableTableAdminRestStub): + class _GetIamPolicy( + _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("GetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -2456,50 +2506,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_http_options() + ) request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2515,19 +2547,34 @@ def __call__( resp = self._interceptor.post_get_iam_policy(resp) return resp - class _GetSnapshot(BigtableTableAdminRestStub): + class _GetSnapshot( + _BaseBigtableTableAdminRestTransport._BaseGetSnapshot, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("GetSnapshot") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetSnapshot") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2574,38 +2621,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_http_options() + ) request, metadata = self._interceptor.pre_get_snapshot(request, metadata) - pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._GetSnapshot._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2621,19 +2657,33 @@ def __call__( resp = self._interceptor.post_get_snapshot(resp) return resp - class _GetTable(BigtableTableAdminRestStub): + class _GetTable( + _BaseBigtableTableAdminRestTransport._BaseGetTable, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("GetTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.GetTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2664,38 +2714,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{name=projects/*/instances/*/tables/*}", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetTable._get_http_options() + ) request, metadata = self._interceptor.pre_get_table(request, metadata) - pb_request = bigtable_table_admin.GetTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._GetTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2711,19 +2750,34 @@ def __call__( resp = self._interceptor.post_get_table(resp) return resp - class _ListAuthorizedViews(BigtableTableAdminRestStub): + class _ListAuthorizedViews( + _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("ListAuthorizedViews") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ListAuthorizedViews") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2752,40 +2806,31 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options() + ) request, metadata = self._interceptor.pre_list_authorized_views( request, metadata ) - pb_request = bigtable_table_admin.ListAuthorizedViewsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = ( + BigtableTableAdminRestTransport._ListAuthorizedViews._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2801,19 +2846,34 @@ def __call__( resp = self._interceptor.post_list_authorized_views(resp) return resp - class _ListBackups(BigtableTableAdminRestStub): + class _ListBackups( + _BaseBigtableTableAdminRestTransport._BaseListBackups, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("ListBackups") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ListBackups") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2842,38 +2902,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options() + ) request, metadata = self._interceptor.pre_list_backups(request, metadata) - pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._ListBackups._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2889,19 +2938,34 @@ def __call__( resp = self._interceptor.post_list_backups(resp) return resp - class _ListSnapshots(BigtableTableAdminRestStub): + class _ListSnapshots( + _BaseBigtableTableAdminRestTransport._BaseListSnapshots, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("ListSnapshots") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ListSnapshots") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -2944,38 +3008,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_http_options() + ) request, metadata = self._interceptor.pre_list_snapshots(request, metadata) - pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._ListSnapshots._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2991,19 +3044,33 @@ def __call__( resp = self._interceptor.post_list_snapshots(resp) return resp - class _ListTables(BigtableTableAdminRestStub): + class _ListTables( + _BaseBigtableTableAdminRestTransport._BaseListTables, BigtableTableAdminRestStub + ): def __hash__(self): - return hash("ListTables") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ListTables") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response def __call__( self, @@ -3032,38 +3099,27 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{parent=projects/*/instances/*}/tables", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListTables._get_http_options() + ) request, metadata = self._interceptor.pre_list_tables(request, metadata) - pb_request = bigtable_table_admin.ListTablesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListTables._get_transcoded_request( + http_options, request + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseListTables._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableTableAdminRestTransport._ListTables._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3079,19 +3135,35 @@ def __call__( resp = self._interceptor.post_list_tables(resp) return resp - class _ModifyColumnFamilies(BigtableTableAdminRestStub): + class _ModifyColumnFamilies( + _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("ModifyColumnFamilies") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.ModifyColumnFamilies") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3122,47 +3194,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_http_options() + ) request, metadata = self._interceptor.pre_modify_column_families( request, metadata ) - pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._ModifyColumnFamilies._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3178,19 +3239,35 @@ def __call__( resp = self._interceptor.post_modify_column_families(resp) return resp - class _RestoreTable(BigtableTableAdminRestStub): + class _RestoreTable( + _BaseBigtableTableAdminRestTransport._BaseRestoreTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("RestoreTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.RestoreTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3220,45 +3297,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{parent=projects/*/instances/*}/tables:restore", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_http_options() + ) request, metadata = self._interceptor.pre_restore_table(request, metadata) - pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._RestoreTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3272,19 +3336,35 @@ def __call__( resp = self._interceptor.post_restore_table(resp) return resp - class _SetIamPolicy(BigtableTableAdminRestStub): + class _SetIamPolicy( + _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("SetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.SetIamPolicy") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3385,50 +3465,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_http_options() + ) request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._SetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3444,19 +3506,35 @@ def __call__( resp = self._interceptor.post_set_iam_policy(resp) return resp - class _SnapshotTable(BigtableTableAdminRestStub): + class _SnapshotTable( + _BaseBigtableTableAdminRestTransport._BaseSnapshotTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("SnapshotTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.SnapshotTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3493,45 +3571,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:snapshot", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_http_options() + ) request, metadata = self._interceptor.pre_snapshot_table(request, metadata) - pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._SnapshotTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3545,19 +3610,35 @@ def __call__( resp = self._interceptor.post_snapshot_table(resp) return resp - class _TestIamPermissions(BigtableTableAdminRestStub): + class _TestIamPermissions( + _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("TestIamPermissions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.TestIamPermissions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3583,52 +3664,36 @@ def __call__( Response message for ``TestIamPermissions`` method. """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_http_options() + ) request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._TestIamPermissions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3644,19 +3709,35 @@ def __call__( resp = self._interceptor.post_test_iam_permissions(resp) return resp - class _UndeleteTable(BigtableTableAdminRestStub): + class _UndeleteTable( + _BaseBigtableTableAdminRestTransport._BaseUndeleteTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("UndeleteTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.UndeleteTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3686,45 +3767,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*/tables/*}:undelete", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_http_options() + ) request, metadata = self._interceptor.pre_undelete_table(request, metadata) - pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._UndeleteTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3738,19 +3806,35 @@ def __call__( resp = self._interceptor.post_undelete_table(resp) return resp - class _UpdateAuthorizedView(BigtableTableAdminRestStub): + class _UpdateAuthorizedView( + _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("UpdateAuthorizedView") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.UpdateAuthorizedView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3780,47 +3864,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}", - "body": "authorized_view", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_http_options() + ) request, metadata = self._interceptor.pre_update_authorized_view( request, metadata ) - pb_request = bigtable_table_admin.UpdateAuthorizedViewRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = ( + BigtableTableAdminRestTransport._UpdateAuthorizedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3834,21 +3907,35 @@ def __call__( resp = self._interceptor.post_update_authorized_view(resp) return resp - class _UpdateBackup(BigtableTableAdminRestStub): + class _UpdateBackup( + _BaseBigtableTableAdminRestTransport._BaseUpdateBackup, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("UpdateBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.UpdateBackup") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3875,45 +3962,32 @@ def __call__( A backup of a Cloud Bigtable table. """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}", - "body": "backup", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_http_options() + ) request, metadata = self._interceptor.pre_update_backup(request, metadata) - pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._UpdateBackup._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -3929,21 +4003,35 @@ def __call__( resp = self._interceptor.post_update_backup(resp) return resp - class _UpdateTable(BigtableTableAdminRestStub): + class _UpdateTable( + _BaseBigtableTableAdminRestTransport._BaseUpdateTable, + BigtableTableAdminRestStub, + ): def __hash__(self): - return hash("UpdateTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask": {}, - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableTableAdminRestTransport.UpdateTable") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -3973,45 +4061,32 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "patch", - "uri": "/v2/{table.name=projects/*/instances/*/tables/*}", - "body": "table", - }, - ] + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_http_options() + ) request, metadata = self._interceptor.pre_update_table(request, metadata) - pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableTableAdminRestTransport._UpdateTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py new file mode 100644 index 000000000000..fbaf89e52d33 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py @@ -0,0 +1,1714 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + + +class _BaseBigtableTableAdminRestTransport(BigtableTableAdminTransport): + """Base REST backend transport for BigtableTableAdmin. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "bigtableadmin.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCheckConsistency: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCopyBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "authorizedViewId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", + "body": "authorized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "backupId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", + "body": "backup", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateTableFromSnapshot: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteSnapshot: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDropRowRange: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGenerateConsistencyToken: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/backups/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetSnapshot: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListAuthorizedViews: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListAuthorizedViewsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListBackups: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListBackups._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListSnapshots: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListTables: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/tables", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListTablesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListTables._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseModifyColumnFamilies: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseRestoreTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/tables:restore", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSetIamPolicy: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSnapshotTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:snapshot", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseTestIamPermissions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUndeleteTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*/tables/*}:undelete", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateAuthorizedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}", + "body": "authorized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateAuthorizedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateBackup: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}", + "body": "backup", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateTable: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{table.name=projects/*/instances/*/tables/*}", + "body": "table", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + +__all__ = ("_BaseBigtableTableAdminRestTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 54b7f2c63cf3..b05e171c1e6c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -335,16 +335,32 @@ def read_rows( self._client._transport.read_rows ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" ) + if header_params: + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + # Validate the universe domain. self._client._validate_universe_domain() @@ -438,16 +454,32 @@ def sample_row_keys( self._client._transport.sample_row_keys ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" ) + if header_params: + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + # Validate the universe domain. self._client._validate_universe_domain() @@ -562,16 +594,32 @@ async def mutate_row( self._client._transport.mutate_row ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" ) + if header_params: + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + # Validate the universe domain. self._client._validate_universe_domain() @@ -680,16 +728,32 @@ def mutate_rows( self._client._transport.mutate_rows ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" ) + if header_params: + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + # Validate the universe domain. self._client._validate_universe_domain() @@ -841,16 +905,32 @@ async def check_and_mutate_row( self._client._transport.check_and_mutate_row ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" ) + if header_params: + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + # Validate the universe domain. self._client._validate_universe_domain() @@ -941,13 +1021,20 @@ async def ping_and_warm( self._client._transport.ping_and_warm ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), - ) + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._client._validate_universe_domain() @@ -1069,16 +1156,32 @@ async def read_modify_write_row( self._client._transport.read_modify_write_row ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), + header_params = {} + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + ) + regex_match = routing_param_regex.match(request.authorized_view_name) + if regex_match and regex_match.group("authorized_view_name"): + header_params["authorized_view_name"] = regex_match.group( + "authorized_view_name" ) + if header_params: + metadata = tuple(metadata) + if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + # Validate the universe domain. self._client._validate_universe_domain() @@ -1390,15 +1493,20 @@ def execute_query( self._client._transport.execute_query ] - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("instance_name", request.instance_name),) - ), - ) + metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) # Validate the universe domain. self._client._validate_universe_domain() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 86fa6b3a5eb2..a90a4a1a78c0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -505,36 +505,6 @@ def _get_universe_domain( raise ValueError("Universe Domain cannot be an empty string.") return universe_domain - @staticmethod - def _compare_universes( - client_universe: str, credentials: ga_credentials.Credentials - ) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. - - Returns: - bool: True iff client_universe matches the universe in credentials. - - Raises: - ValueError: when client_universe does not match the universe in credentials. - """ - - default_universe = BigtableClient._DEFAULT_UNIVERSE - credentials_universe = getattr(credentials, "universe_domain", default_universe) - - if client_universe != credentials_universe: - raise ValueError( - "The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default." - ) - return True - def _validate_universe_domain(self): """Validates client's and credentials' universe domains are consistent. @@ -544,13 +514,9 @@ def _validate_universe_domain(self): Raises: ValueError: If the configured universe domain is not valid. """ - self._is_universe_domain_valid = ( - self._is_universe_domain_valid - or BigtableClient._compare_universes( - self.universe_domain, self.transport._credentials - ) - ) - return self._is_universe_domain_valid + + # NOTE (b/349488459): universe validation is disabled until further notice. + return True @property def api_endpoint(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/README.rst b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/README.rst new file mode 100644 index 000000000000..254812cd366f --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/README.rst @@ -0,0 +1,9 @@ + +transport inheritance structure +_______________________________ + +`BigtableTransport` is the ABC for all transports. +- public child `BigtableGrpcTransport` for sync gRPC transport (defined in `grpc.py`). +- public child `BigtableGrpcAsyncIOTransport` for async gRPC transport (defined in `grpc_asyncio.py`). +- private child `_BaseBigtableRestTransport` for base REST transport with inner classes `_BaseMETHOD` (defined in `rest_base.py`). +- public child `BigtableRestTransport` for sync REST transport with inner classes `METHOD` derived from the parent's corresponding `_BaseMETHOD` classes (defined in `rest.py`). diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 40d6a3fa466f..6f6e1fe850c6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import inspect import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -228,6 +229,9 @@ def __init__( ) # Wrap messages. This must be done after self._grpc_channel exists + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) self._prep_wrapped_messages(client_info) @property @@ -551,17 +555,17 @@ def execute_query( def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { - self.read_rows: gapic_v1.method_async.wrap_method( + self.read_rows: self._wrap_method( self.read_rows, default_timeout=43200.0, client_info=client_info, ), - self.sample_row_keys: gapic_v1.method_async.wrap_method( + self.sample_row_keys: self._wrap_method( self.sample_row_keys, default_timeout=60.0, client_info=client_info, ), - self.mutate_row: gapic_v1.method_async.wrap_method( + self.mutate_row: self._wrap_method( self.mutate_row, default_retry=retries.AsyncRetry( initial=0.01, @@ -576,45 +580,54 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), - self.mutate_rows: gapic_v1.method_async.wrap_method( + self.mutate_rows: self._wrap_method( self.mutate_rows, default_timeout=600.0, client_info=client_info, ), - self.check_and_mutate_row: gapic_v1.method_async.wrap_method( + self.check_and_mutate_row: self._wrap_method( self.check_and_mutate_row, default_timeout=20.0, client_info=client_info, ), - self.ping_and_warm: gapic_v1.method_async.wrap_method( + self.ping_and_warm: self._wrap_method( self.ping_and_warm, default_timeout=None, client_info=client_info, ), - self.read_modify_write_row: gapic_v1.method_async.wrap_method( + self.read_modify_write_row: self._wrap_method( self.read_modify_write_row, default_timeout=20.0, client_info=client_info, ), - self.generate_initial_change_stream_partitions: gapic_v1.method_async.wrap_method( + self.generate_initial_change_stream_partitions: self._wrap_method( self.generate_initial_change_stream_partitions, default_timeout=60.0, client_info=client_info, ), - self.read_change_stream: gapic_v1.method_async.wrap_method( + self.read_change_stream: self._wrap_method( self.read_change_stream, default_timeout=43200.0, client_info=client_info, ), - self.execute_query: gapic_v1.method_async.wrap_method( + self.execute_query: self._wrap_method( self.execute_query, default_timeout=None, client_info=client_info, ), } + def _wrap_method(self, func, *args, **kwargs): + if self._wrap_with_kind: # pragma: NO COVER + kwargs["kind"] = self.kind + return gapic_v1.method_async.wrap_method(func, *args, **kwargs) + def close(self): return self.grpc_channel.close() + @property + def kind(self) -> str: + return "grpc_asyncio" + __all__ = ("BigtableGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py index 372e5796d6bc..ce8fec4e9cb2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py @@ -14,6 +14,7 @@ # limitations under the License. # import asyncio +import inspect import warnings from functools import partialmethod from functools import partial @@ -387,6 +388,9 @@ def __init__( ) # Wrap messages. This must be done after self._grpc_channel exists + self._wrap_with_kind = ( + "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters + ) self._prep_wrapped_messages(client_info) @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index a3391005f34c..221b04b8a42b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -16,38 +16,37 @@ from google.auth.transport.requests import AuthorizedSession # type: ignore import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries from google.api_core import rest_helpers from google.api_core import rest_streaming -from google.api_core import path_template from google.api_core import gapic_v1 from google.protobuf import json_format + from requests import __version__ as requests_version import dataclasses -import re from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union import warnings + +from google.cloud.bigtable_v2.types import bigtable + + +from .rest_base import _BaseBigtableRestTransport +from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore -from google.cloud.bigtable_v2.types import bigtable - -from .base import BigtableTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, grpc_version=None, - rest_version=requests_version, + rest_version=f"requests@{requests_version}", ) @@ -382,8 +381,8 @@ class BigtableRestStub: _interceptor: BigtableRestInterceptor -class BigtableRestTransport(BigtableTransport): - """REST backend transport for Bigtable. +class BigtableRestTransport(_BaseBigtableRestTransport): + """REST backend synchronous transport for Bigtable. Service for reading from and writing to existing Bigtable tables. @@ -393,7 +392,6 @@ class BigtableRestTransport(BigtableTransport): and call it. It sends JSON representations of protocol buffers over HTTP/1.1 - """ def __init__( @@ -447,21 +445,12 @@ def __init__( # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError( - f"Unexpected hostname structure: {host}" - ) # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - super().__init__( host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access, + url_scheme=url_scheme, api_audience=api_audience, ) self._session = AuthorizedSession( @@ -472,19 +461,34 @@ def __init__( self._interceptor = interceptor or BigtableRestInterceptor() self._prep_wrapped_messages(client_info) - class _CheckAndMutateRow(BigtableRestStub): + class _CheckAndMutateRow( + _BaseBigtableRestTransport._BaseCheckAndMutateRow, BigtableRestStub + ): def __hash__(self): - return hash("CheckAndMutateRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.CheckAndMutateRow") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -513,52 +517,34 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_http_options() + ) request, metadata = self._interceptor.pre_check_and_mutate_row( request, metadata ) - pb_request = bigtable.CheckAndMutateRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._CheckAndMutateRow._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -574,19 +560,33 @@ def __call__( resp = self._interceptor.post_check_and_mutate_row(resp) return resp - class _ExecuteQuery(BigtableRestStub): + class _ExecuteQuery(_BaseBigtableRestTransport._BaseExecuteQuery, BigtableRestStub): def __hash__(self): - return hash("ExecuteQuery") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.ExecuteQuery") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -615,45 +615,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{instance_name=projects/*/instances/*}:executeQuery", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseExecuteQuery._get_http_options() + ) request, metadata = self._interceptor.pre_execute_query(request, metadata) - pb_request = bigtable.ExecuteQueryRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = ( + _BaseBigtableRestTransport._BaseExecuteQuery._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BaseExecuteQuery._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseExecuteQuery._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._ExecuteQuery._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -668,19 +659,36 @@ def __call__( resp = self._interceptor.post_execute_query(resp) return resp - class _GenerateInitialChangeStreamPartitions(BigtableRestStub): + class _GenerateInitialChangeStreamPartitions( + _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions, + BigtableRestStub, + ): def __hash__(self): - return hash("GenerateInitialChangeStreamPartitions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.GenerateInitialChangeStreamPartitions") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -714,52 +722,37 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_http_options() + ) ( request, metadata, ) = self._interceptor.pre_generate_initial_change_stream_partitions( request, metadata ) - pb_request = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( - request + transcoded_request = _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_transcoded_request( + http_options, request ) - transcoded_request = path_template.transcode(http_options, pb_request) - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._GenerateInitialChangeStreamPartitions._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -776,19 +769,32 @@ def __call__( ) return resp - class _MutateRow(BigtableRestStub): + class _MutateRow(_BaseBigtableRestTransport._BaseMutateRow, BigtableRestStub): def __hash__(self): - return hash("MutateRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.MutateRow") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -817,50 +823,34 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow", - "body": "*", - }, - ] + http_options = _BaseBigtableRestTransport._BaseMutateRow._get_http_options() request, metadata = self._interceptor.pre_mutate_row(request, metadata) - pb_request = bigtable.MutateRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = ( + _BaseBigtableRestTransport._BaseMutateRow._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BaseMutateRow._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseMutateRow._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._MutateRow._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -876,19 +866,33 @@ def __call__( resp = self._interceptor.post_mutate_row(resp) return resp - class _MutateRows(BigtableRestStub): + class _MutateRows(_BaseBigtableRestTransport._BaseMutateRows, BigtableRestStub): def __hash__(self): - return hash("MutateRows") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.MutateRows") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -917,50 +921,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseMutateRows._get_http_options() + ) request, metadata = self._interceptor.pre_mutate_rows(request, metadata) - pb_request = bigtable.MutateRowsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = ( + _BaseBigtableRestTransport._BaseMutateRows._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BaseMutateRows._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseMutateRows._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._MutateRows._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -975,19 +965,32 @@ def __call__( resp = self._interceptor.post_mutate_rows(resp) return resp - class _PingAndWarm(BigtableRestStub): + class _PingAndWarm(_BaseBigtableRestTransport._BasePingAndWarm, BigtableRestStub): def __hash__(self): - return hash("PingAndWarm") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.PingAndWarm") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1017,45 +1020,36 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{name=projects/*/instances/*}:ping", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BasePingAndWarm._get_http_options() + ) request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) - pb_request = bigtable.PingAndWarmRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = ( + _BaseBigtableRestTransport._BasePingAndWarm._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BasePingAndWarm._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BasePingAndWarm._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._PingAndWarm._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1071,19 +1065,35 @@ def __call__( resp = self._interceptor.post_ping_and_warm(resp) return resp - class _ReadChangeStream(BigtableRestStub): + class _ReadChangeStream( + _BaseBigtableRestTransport._BaseReadChangeStream, BigtableRestStub + ): def __hash__(self): - return hash("ReadChangeStream") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.ReadChangeStream") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -1114,47 +1124,38 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseReadChangeStream._get_http_options() + ) request, metadata = self._interceptor.pre_read_change_stream( request, metadata ) - pb_request = bigtable.ReadChangeStreamRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableRestTransport._BaseReadChangeStream._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = ( + _BaseBigtableRestTransport._BaseReadChangeStream._get_request_body_json( + transcoded_request + ) ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseReadChangeStream._get_query_params_json( + transcoded_request ) ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._ReadChangeStream._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1169,19 +1170,34 @@ def __call__( resp = self._interceptor.post_read_change_stream(resp) return resp - class _ReadModifyWriteRow(BigtableRestStub): + class _ReadModifyWriteRow( + _BaseBigtableRestTransport._BaseReadModifyWriteRow, BigtableRestStub + ): def __hash__(self): - return hash("ReadModifyWriteRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return { - k: v - for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() - if k not in message_dict - } + return hash("BigtableRestTransport.ReadModifyWriteRow") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response def __call__( self, @@ -1210,52 +1226,34 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow", - "body": "*", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_http_options() + ) request, metadata = self._interceptor.pre_read_modify_write_row( request, metadata ) - pb_request = bigtable.ReadModifyWriteRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_transcoded_request( + http_options, request + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, - ) + query_params = _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_query_params_json( + transcoded_request ) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._ReadModifyWriteRow._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1271,9 +1269,33 @@ def __call__( resp = self._interceptor.post_read_modify_write_row(resp) return resp - class _ReadRows(BigtableRestStub): + class _ReadRows(_BaseBigtableRestTransport._BaseReadRows, BigtableRestStub): def __hash__(self): - return hash("ReadRows") + return hash("BigtableRestTransport.ReadRows") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + stream=True, + ) + return response def __call__( self, @@ -1302,49 +1324,34 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "post", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readRows", - "body": "*", - }, - { - "method": "post", - "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows", - "body": "*", - }, - ] + http_options = _BaseBigtableRestTransport._BaseReadRows._get_http_options() request, metadata = self._interceptor.pre_read_rows(request, metadata) - pb_request = bigtable.ReadRowsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body + transcoded_request = ( + _BaseBigtableRestTransport._BaseReadRows._get_transcoded_request( + http_options, request + ) + ) - body = json_format.MessageToJson( - transcoded_request["body"], use_integers_for_enums=True + body = _BaseBigtableRestTransport._BaseReadRows._get_request_body_json( + transcoded_request ) - uri = transcoded_request["uri"] - method = transcoded_request["method"] # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseReadRows._get_query_params_json( + transcoded_request ) ) - query_params["$alt"] = "json;enum-encoding=int" - # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, + response = BigtableRestTransport._ReadRows._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1357,9 +1364,34 @@ def __call__( resp = self._interceptor.post_read_rows(resp) return resp - class _SampleRowKeys(BigtableRestStub): + class _SampleRowKeys( + _BaseBigtableRestTransport._BaseSampleRowKeys, BigtableRestStub + ): def __hash__(self): - return hash("SampleRowKeys") + return hash("BigtableRestTransport.SampleRowKeys") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + stream=True, + ) + return response def __call__( self, @@ -1388,41 +1420,31 @@ def __call__( """ - http_options: List[Dict[str, str]] = [ - { - "method": "get", - "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys", - }, - { - "method": "get", - "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys", - }, - ] + http_options = ( + _BaseBigtableRestTransport._BaseSampleRowKeys._get_http_options() + ) request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) - pb_request = bigtable.SampleRowKeysRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request["uri"] - method = transcoded_request["method"] + transcoded_request = ( + _BaseBigtableRestTransport._BaseSampleRowKeys._get_transcoded_request( + http_options, request + ) + ) # Jsonify the query params - query_params = json.loads( - json_format.MessageToJson( - transcoded_request["query_params"], - use_integers_for_enums=True, + query_params = ( + _BaseBigtableRestTransport._BaseSampleRowKeys._get_query_params_json( + transcoded_request ) ) - query_params["$alt"] = "json;enum-encoding=int" - # Send the request - headers = dict(metadata) - headers["Content-Type"] = "application/json" - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), + response = BigtableRestTransport._SampleRowKeys._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py new file mode 100644 index 000000000000..9d2292a3c45b --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py @@ -0,0 +1,654 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json # type: ignore +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from .base import BigtableTransport, DEFAULT_CLIENT_INFO + +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union + + +from google.cloud.bigtable_v2.types import bigtable + + +class _BaseBigtableRestTransport(BigtableTransport): + """Base REST backend transport for Bigtable. + + Note: This class is not meant to be used directly. Use its sync and + async sub-classes instead. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + + def __init__( + self, + *, + host: str = "bigtable.googleapis.com", + credentials: Optional[Any] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + Args: + host (Optional[str]): + The hostname to connect to (default: 'bigtable.googleapis.com'). + credentials (Optional[Any]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + class _BaseCheckAndMutateRow: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.CheckAndMutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseExecuteQuery: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{instance_name=projects/*/instances/*}:executeQuery", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ExecuteQueryRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseExecuteQuery._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGenerateInitialChangeStreamPartitions: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseMutateRow: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.MutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseMutateRow._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseMutateRows: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.MutateRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseMutateRows._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BasePingAndWarm: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{name=projects/*/instances/*}:ping", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.PingAndWarmRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BasePingAndWarm._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseReadChangeStream: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ReadChangeStreamRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseReadChangeStream._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseReadModifyWriteRow: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ReadModifyWriteRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseReadRows: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:readRows", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.ReadRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseSampleRowKeys: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys", + }, + { + "method": "get", + "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.SampleRowKeysRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + +__all__ = ("_BaseBigtableRestTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index bad6c163b1c9..1e408bb3a7bd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -70,6 +70,12 @@ class FeatureFlags(proto.Message): client_side_metrics_enabled (bool): Notify the server that the client has client side metrics enabled. + traffic_director_enabled (bool): + Notify the server that the client using + Traffic Director endpoint. + direct_access_requested (bool): + Notify the server that the client explicitly + opted in for Direct Access. """ reverse_scans: bool = proto.Field( @@ -100,6 +106,14 @@ class FeatureFlags(proto.Message): proto.BOOL, number=8, ) + traffic_director_enabled: bool = proto.Field( + proto.BOOL, + number=9, + ) + direct_access_requested: bool = proto.Field( + proto.BOOL, + number=10, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 961183b717f9..3f79e11a4e4d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -24,7 +24,7 @@ import grpc from grpc.experimental import aio -from collections.abc import Iterable +from collections.abc import Iterable, AsyncIterable from google.protobuf import json_format import json import math @@ -37,6 +37,13 @@ from requests.sessions import Session from google.protobuf import json_format +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future @@ -73,10 +80,24 @@ import google.auth +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. @@ -333,94 +354,6 @@ def test__get_universe_domain(): assert str(excinfo.value) == "Universe Domain cannot be an empty string." -@pytest.mark.parametrize( - "client_class,transport_class,transport_name", - [ - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminGrpcTransport, - "grpc", - ), - ( - BigtableInstanceAdminClient, - transports.BigtableInstanceAdminRestTransport, - "rest", - ), - ], -) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) - ) - assert client._validate_universe_domain() == True - - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True - - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - transport = transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True - - # TODO: This is needed to cater for older versions of google-auth - # Make this test unconditional once the minimum supported version of - # google-auth becomes 2.23.0 or higher. - google_auth_major, google_auth_minor = [ - int(part) for part in google.auth.__version__.split(".")[0:2] - ] - if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): - credentials = ga_credentials.AnonymousCredentials() - credentials._universe_domain = "foo.com" - # Test the case when there is a universe mismatch from the credentials. - client = client_class(transport=transport_class(credentials=credentials)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor = [ - int(part) for part in api_core_version.__version__.split(".")[0:2] - ] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class( - client_options={"universe_domain": "bar.com"}, - transport=transport_class( - credentials=ga_credentials.AnonymousCredentials(), - ), - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test that ValueError is raised if universe_domain is provided via client options and credentials is None - with pytest.raises(ValueError): - client._compare_universes("foo.bar", None) - - @pytest.mark.parametrize( "client_class,transport_name", [ @@ -1249,25 +1182,6 @@ def test_create_instance(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_create_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() - - def test_create_instance_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -1338,27 +1252,6 @@ def test_create_instance_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_create_instance_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() - - @pytest.mark.asyncio async def test_create_instance_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -1367,7 +1260,7 @@ async def test_create_instance_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1412,7 +1305,7 @@ async def test_create_instance_async( request_type=bigtable_instance_admin.CreateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1475,7 +1368,7 @@ def test_create_instance_field_headers(): @pytest.mark.asyncio async def test_create_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1560,7 +1453,7 @@ def test_create_instance_flattened_error(): @pytest.mark.asyncio async def test_create_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1601,7 +1494,7 @@ async def test_create_instance_flattened_async(): @pytest.mark.asyncio async def test_create_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1660,25 +1553,6 @@ def test_get_instance(request_type, transport: str = "grpc"): assert response.satisfies_pzs is True -def test_get_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() - - def test_get_instance_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -1742,33 +1616,6 @@ def test_get_instance_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_instance_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - ) - response = await client.get_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() - - @pytest.mark.asyncio async def test_get_instance_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -1777,7 +1624,7 @@ async def test_get_instance_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1817,7 +1664,7 @@ async def test_get_instance_async( request_type=bigtable_instance_admin.GetInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1891,7 +1738,7 @@ def test_get_instance_field_headers(): @pytest.mark.asyncio async def test_get_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1959,7 +1806,7 @@ def test_get_instance_flattened_error(): @pytest.mark.asyncio async def test_get_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1986,7 +1833,7 @@ async def test_get_instance_flattened_async(): @pytest.mark.asyncio async def test_get_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2037,25 +1884,6 @@ def test_list_instances(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_instances_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_instances() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() - - def test_list_instances_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -2121,30 +1949,6 @@ def test_list_instances_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_list_instances_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - response = await client.list_instances() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() - - @pytest.mark.asyncio async def test_list_instances_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -2153,7 +1957,7 @@ async def test_list_instances_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2193,7 +1997,7 @@ async def test_list_instances_async( request_type=bigtable_instance_admin.ListInstancesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2261,7 +2065,7 @@ def test_list_instances_field_headers(): @pytest.mark.asyncio async def test_list_instances_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2331,7 +2135,7 @@ def test_list_instances_flattened_error(): @pytest.mark.asyncio async def test_list_instances_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2360,7 +2164,7 @@ async def test_list_instances_flattened_async(): @pytest.mark.asyncio async def test_list_instances_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2416,25 +2220,6 @@ def test_update_instance(request_type, transport: str = "grpc"): assert response.satisfies_pzs is True -def test_update_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() - - def test_update_instance_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -2500,33 +2285,6 @@ def test_update_instance_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_update_instance_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - ) - response = await client.update_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() - - @pytest.mark.asyncio async def test_update_instance_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -2535,7 +2293,7 @@ async def test_update_instance_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2574,7 +2332,7 @@ async def test_update_instance_async( transport: str = "grpc_asyncio", request_type=instance.Instance ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2648,7 +2406,7 @@ def test_update_instance_field_headers(): @pytest.mark.asyncio async def test_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2710,27 +2468,6 @@ def test_partial_update_instance(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_partial_update_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.partial_update_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() - - def test_partial_update_instance_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -2802,29 +2539,6 @@ def test_partial_update_instance_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_partial_update_instance_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.partial_update_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() - - @pytest.mark.asyncio async def test_partial_update_instance_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -2833,7 +2547,7 @@ async def test_partial_update_instance_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2878,7 +2592,7 @@ async def test_partial_update_instance_async( request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2945,7 +2659,7 @@ def test_partial_update_instance_field_headers(): @pytest.mark.asyncio async def test_partial_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3024,7 +2738,7 @@ def test_partial_update_instance_flattened_error(): @pytest.mark.asyncio async def test_partial_update_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3059,7 +2773,7 @@ async def test_partial_update_instance_flattened_async(): @pytest.mark.asyncio async def test_partial_update_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3105,25 +2819,6 @@ def test_delete_instance(request_type, transport: str = "grpc"): assert response is None -def test_delete_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() - - def test_delete_instance_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3187,25 +2882,6 @@ def test_delete_instance_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_delete_instance_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() - - @pytest.mark.asyncio async def test_delete_instance_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3214,7 +2890,7 @@ async def test_delete_instance_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3254,7 +2930,7 @@ async def test_delete_instance_async( request_type=bigtable_instance_admin.DeleteInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3315,7 +2991,7 @@ def test_delete_instance_field_headers(): @pytest.mark.asyncio async def test_delete_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3383,7 +3059,7 @@ def test_delete_instance_flattened_error(): @pytest.mark.asyncio async def test_delete_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3410,7 +3086,7 @@ async def test_delete_instance_flattened_async(): @pytest.mark.asyncio async def test_delete_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3455,25 +3131,6 @@ def test_create_cluster(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_create_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() - - def test_create_cluster_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3544,27 +3201,6 @@ def test_create_cluster_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_create_cluster_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() - - @pytest.mark.asyncio async def test_create_cluster_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3573,7 +3209,7 @@ async def test_create_cluster_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3618,7 +3254,7 @@ async def test_create_cluster_async( request_type=bigtable_instance_admin.CreateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3681,7 +3317,7 @@ def test_create_cluster_field_headers(): @pytest.mark.asyncio async def test_create_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3761,7 +3397,7 @@ def test_create_cluster_flattened_error(): @pytest.mark.asyncio async def test_create_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3798,7 +3434,7 @@ async def test_create_cluster_flattened_async(): @pytest.mark.asyncio async def test_create_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3861,25 +3497,6 @@ def test_get_cluster(request_type, transport: str = "grpc"): assert response.default_storage_type == common.StorageType.SSD -def test_get_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() - - def test_get_cluster_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3943,34 +3560,6 @@ def test_get_cluster_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_cluster_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) - ) - response = await client.get_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() - - @pytest.mark.asyncio async def test_get_cluster_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3979,7 +3568,7 @@ async def test_get_cluster_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4019,7 +3608,7 @@ async def test_get_cluster_async( request_type=bigtable_instance_admin.GetClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4098,7 +3687,7 @@ def test_get_cluster_field_headers(): @pytest.mark.asyncio async def test_get_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4166,7 +3755,7 @@ def test_get_cluster_flattened_error(): @pytest.mark.asyncio async def test_get_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4193,7 +3782,7 @@ async def test_get_cluster_flattened_async(): @pytest.mark.asyncio async def test_get_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4244,25 +3833,6 @@ def test_list_clusters(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_clusters_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_clusters() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() - - def test_list_clusters_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4328,30 +3898,6 @@ def test_list_clusters_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_list_clusters_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - response = await client.list_clusters() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() - - @pytest.mark.asyncio async def test_list_clusters_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -4360,7 +3906,7 @@ async def test_list_clusters_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4400,7 +3946,7 @@ async def test_list_clusters_async( request_type=bigtable_instance_admin.ListClustersRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4468,7 +4014,7 @@ def test_list_clusters_field_headers(): @pytest.mark.asyncio async def test_list_clusters_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4538,7 +4084,7 @@ def test_list_clusters_flattened_error(): @pytest.mark.asyncio async def test_list_clusters_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4567,7 +4113,7 @@ async def test_list_clusters_flattened_async(): @pytest.mark.asyncio async def test_list_clusters_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4612,25 +4158,6 @@ def test_update_cluster(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() - - def test_update_cluster_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4701,27 +4228,6 @@ def test_update_cluster_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_update_cluster_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() - - @pytest.mark.asyncio async def test_update_cluster_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -4730,7 +4236,7 @@ async def test_update_cluster_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4774,7 +4280,7 @@ async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=instance.Cluster ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4837,7 +4343,7 @@ def test_update_cluster_field_headers(): @pytest.mark.asyncio async def test_update_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4901,27 +4407,6 @@ def test_partial_update_cluster(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_partial_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.partial_update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() - - def test_partial_update_cluster_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4993,29 +4478,6 @@ def test_partial_update_cluster_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_partial_update_cluster_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.partial_update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() - - @pytest.mark.asyncio async def test_partial_update_cluster_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -5024,7 +4486,7 @@ async def test_partial_update_cluster_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5069,7 +4531,7 @@ async def test_partial_update_cluster_async( request_type=bigtable_instance_admin.PartialUpdateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5136,7 +4598,7 @@ def test_partial_update_cluster_field_headers(): @pytest.mark.asyncio async def test_partial_update_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5215,7 +4677,7 @@ def test_partial_update_cluster_flattened_error(): @pytest.mark.asyncio async def test_partial_update_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5250,7 +4712,7 @@ async def test_partial_update_cluster_flattened_async(): @pytest.mark.asyncio async def test_partial_update_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5296,25 +4758,6 @@ def test_delete_cluster(request_type, transport: str = "grpc"): assert response is None -def test_delete_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() - - def test_delete_cluster_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -5378,25 +4821,6 @@ def test_delete_cluster_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_delete_cluster_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() - - @pytest.mark.asyncio async def test_delete_cluster_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -5405,7 +4829,7 @@ async def test_delete_cluster_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5445,7 +4869,7 @@ async def test_delete_cluster_async( request_type=bigtable_instance_admin.DeleteClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5506,7 +4930,7 @@ def test_delete_cluster_field_headers(): @pytest.mark.asyncio async def test_delete_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5574,7 +4998,7 @@ def test_delete_cluster_flattened_error(): @pytest.mark.asyncio async def test_delete_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5601,7 +5025,7 @@ async def test_delete_cluster_flattened_async(): @pytest.mark.asyncio async def test_delete_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5656,27 +5080,6 @@ def test_create_app_profile(request_type, transport: str = "grpc"): assert response.description == "description_value" -def test_create_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() - - def test_create_app_profile_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -5748,33 +5151,6 @@ def test_create_app_profile_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_create_app_profile_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) - ) - response = await client.create_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() - - @pytest.mark.asyncio async def test_create_app_profile_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -5783,7 +5159,7 @@ async def test_create_app_profile_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5823,7 +5199,7 @@ async def test_create_app_profile_async( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5897,7 +5273,7 @@ def test_create_app_profile_field_headers(): @pytest.mark.asyncio async def test_create_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5979,7 +5355,7 @@ def test_create_app_profile_flattened_error(): @pytest.mark.asyncio async def test_create_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6016,7 +5392,7 @@ async def test_create_app_profile_flattened_async(): @pytest.mark.asyncio async def test_create_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -6071,25 +5447,6 @@ def test_get_app_profile(request_type, transport: str = "grpc"): assert response.description == "description_value" -def test_get_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() - - def test_get_app_profile_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -6153,31 +5510,6 @@ def test_get_app_profile_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_app_profile_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) - ) - response = await client.get_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() - - @pytest.mark.asyncio async def test_get_app_profile_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -6186,7 +5518,7 @@ async def test_get_app_profile_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6226,7 +5558,7 @@ async def test_get_app_profile_async( request_type=bigtable_instance_admin.GetAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6296,7 +5628,7 @@ def test_get_app_profile_field_headers(): @pytest.mark.asyncio async def test_get_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6364,7 +5696,7 @@ def test_get_app_profile_flattened_error(): @pytest.mark.asyncio async def test_get_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6391,7 +5723,7 @@ async def test_get_app_profile_flattened_async(): @pytest.mark.asyncio async def test_get_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -6443,27 +5775,6 @@ def test_list_app_profiles(request_type, transport: str = "grpc"): assert response.failed_locations == ["failed_locations_value"] -def test_list_app_profiles_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_app_profiles() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() - - def test_list_app_profiles_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -6533,32 +5844,6 @@ def test_list_app_profiles_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_list_app_profiles_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], - ) - ) - response = await client.list_app_profiles() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() - - @pytest.mark.asyncio async def test_list_app_profiles_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -6567,7 +5852,7 @@ async def test_list_app_profiles_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6607,7 +5892,7 @@ async def test_list_app_profiles_async( request_type=bigtable_instance_admin.ListAppProfilesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6679,7 +5964,7 @@ def test_list_app_profiles_field_headers(): @pytest.mark.asyncio async def test_list_app_profiles_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6753,7 +6038,7 @@ def test_list_app_profiles_flattened_error(): @pytest.mark.asyncio async def test_list_app_profiles_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6784,7 +6069,7 @@ async def test_list_app_profiles_flattened_async(): @pytest.mark.asyncio async def test_list_app_profiles_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -6898,7 +6183,7 @@ def test_list_app_profiles_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_app_profiles_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6950,7 +6235,7 @@ async def test_list_app_profiles_async_pager(): @pytest.mark.asyncio async def test_list_app_profiles_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -7033,27 +6318,6 @@ def test_update_app_profile(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_update_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() - - def test_update_app_profile_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -7124,29 +6388,6 @@ def test_update_app_profile_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_update_app_profile_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() - - @pytest.mark.asyncio async def test_update_app_profile_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -7155,7 +6396,7 @@ async def test_update_app_profile_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7200,7 +6441,7 @@ async def test_update_app_profile_async( request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7267,7 +6508,7 @@ def test_update_app_profile_field_headers(): @pytest.mark.asyncio async def test_update_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -7346,7 +6587,7 @@ def test_update_app_profile_flattened_error(): @pytest.mark.asyncio async def test_update_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -7381,7 +6622,7 @@ async def test_update_app_profile_flattened_async(): @pytest.mark.asyncio async def test_update_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -7429,27 +6670,6 @@ def test_delete_app_profile(request_type, transport: str = "grpc"): assert response is None -def test_delete_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() - - def test_delete_app_profile_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -7519,27 +6739,6 @@ def test_delete_app_profile_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_delete_app_profile_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() - - @pytest.mark.asyncio async def test_delete_app_profile_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -7548,7 +6747,7 @@ async def test_delete_app_profile_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7588,7 +6787,7 @@ async def test_delete_app_profile_async( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7653,7 +6852,7 @@ def test_delete_app_profile_field_headers(): @pytest.mark.asyncio async def test_delete_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -7725,7 +6924,7 @@ def test_delete_app_profile_flattened_error(): @pytest.mark.asyncio async def test_delete_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -7754,7 +6953,7 @@ async def test_delete_app_profile_flattened_async(): @pytest.mark.asyncio async def test_delete_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -7804,25 +7003,6 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): assert response.etag == b"etag_blob" -def test_get_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - def test_get_iam_policy_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -7886,30 +7066,6 @@ def test_get_iam_policy_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_iam_policy_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.get_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - @pytest.mark.asyncio async def test_get_iam_policy_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -7918,7 +7074,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7957,7 +7113,7 @@ async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -8025,7 +7181,7 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -8110,7 +7266,7 @@ def test_get_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -8137,7 +7293,7 @@ async def test_get_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -8187,25 +7343,6 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): assert response.etag == b"etag_blob" -def test_set_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.set_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - def test_set_iam_policy_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -8270,40 +7407,16 @@ def test_set_iam_policy_use_cached_wrapped_rpc(): @pytest.mark.asyncio -async def test_set_iam_policy_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.set_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - -@pytest.mark.asyncio -async def test_set_iam_policy_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +async def test_set_iam_policy_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) # Should wrap all calls on client creation assert wrapper_fn.call_count > 0 @@ -8340,7 +7453,7 @@ async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -8408,7 +7521,7 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -8494,7 +7607,7 @@ def test_set_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -8521,7 +7634,7 @@ async def test_set_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -8571,27 +7684,6 @@ def test_test_iam_permissions(request_type, transport: str = "grpc"): assert response.permissions == ["permissions_value"] -def test_test_iam_permissions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.test_iam_permissions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -8661,31 +7753,6 @@ def test_test_iam_permissions_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_test_iam_permissions_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - ) - response = await client.test_iam_permissions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - @pytest.mark.asyncio async def test_test_iam_permissions_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -8694,7 +7761,7 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -8734,7 +7801,7 @@ async def test_test_iam_permissions_async( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -8804,7 +7871,7 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -8902,7 +7969,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -8937,7 +8004,7 @@ async def test_test_iam_permissions_flattened_async(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -8986,25 +8053,6 @@ def test_list_hot_tablets(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_hot_tablets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_hot_tablets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() - - def test_list_hot_tablets_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -9072,29 +8120,6 @@ def test_list_hot_tablets_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_list_hot_tablets_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_hot_tablets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() - - @pytest.mark.asyncio async def test_list_hot_tablets_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -9103,7 +8128,7 @@ async def test_list_hot_tablets_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -9143,7 +8168,7 @@ async def test_list_hot_tablets_async( request_type=bigtable_instance_admin.ListHotTabletsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -9209,7 +8234,7 @@ def test_list_hot_tablets_field_headers(): @pytest.mark.asyncio async def test_list_hot_tablets_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -9279,7 +8304,7 @@ def test_list_hot_tablets_flattened_error(): @pytest.mark.asyncio async def test_list_hot_tablets_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -9308,7 +8333,7 @@ async def test_list_hot_tablets_flattened_async(): @pytest.mark.asyncio async def test_list_hot_tablets_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -9418,7 +8443,7 @@ def test_list_hot_tablets_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_hot_tablets_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -9468,7 +8493,7 @@ async def test_list_hot_tablets_async_pager(): @pytest.mark.asyncio async def test_list_hot_tablets_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -9514,41 +8539,6 @@ async def test_list_hot_tablets_async_pages(): assert page_.raw_page.next_page_token == token -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateInstanceRequest, - dict, - ], -) -def test_create_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_instance(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - def test_create_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -9684,89 +8674,6 @@ def test_create_instance_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( - bigtable_instance_admin.CreateInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_instance_admin.CreateInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_instance_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.CreateInstanceRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_instance(request) - - def test_create_instance_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9826,82 +8733,28 @@ def test_create_instance_rest_flattened_error(transport: str = "rest"): ) -def test_create_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +def test_get_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetInstanceRequest, - dict, - ], -) -def test_get_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Ensure method has been cached + assert client._transport.get_instance in client._transport._wrapped_methods - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_instance(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - -def test_get_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.get_instance in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc request = {} client.get_instance(request) @@ -9999,85 +8852,6 @@ def test_get_instance_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetInstanceRequest.pb( - bigtable_instance_admin.GetInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Instance.to_json(instance.Instance()) - - request = bigtable_instance_admin.GetInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Instance() - - client.get_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_instance_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.GetInstanceRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_instance(request) - - def test_get_instance_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -10133,56 +8907,6 @@ def test_get_instance_rest_flattened_error(transport: str = "rest"): ) -def test_get_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListInstancesRequest, - dict, - ], -) -def test_list_instances_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_instances(request) - - assert response.raw_page is response - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - def test_list_instances_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -10306,89 +9030,6 @@ def test_list_instances_rest_unset_required_fields(): assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_instances_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListInstancesRequest.pb( - bigtable_instance_admin.ListInstancesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListInstancesResponse.to_json( - bigtable_instance_admin.ListInstancesResponse() - ) - ) - - request = bigtable_instance_admin.ListInstancesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListInstancesResponse() - - client.list_instances( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_instances_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListInstancesRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_instances(request) - - def test_list_instances_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -10444,60 +9085,6 @@ def test_list_instances_rest_flattened_error(transport: str = "rest"): ) -def test_list_instances_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - instance.Instance, - dict, - ], -) -def test_update_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.update_instance(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - def test_update_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -10616,202 +9203,6 @@ def test_update_instance_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("displayName",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = instance.Instance.pb(instance.Instance()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Instance.to_json(instance.Instance()) - - request = instance.Instance() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Instance() - - client.update_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_instance_rest_bad_request( - transport: str = "rest", request_type=instance.Instance -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_instance(request) - - -def test_update_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.PartialUpdateInstanceRequest, - dict, - ], -) -def test_partial_update_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request_init["instance"] = { - "name": "projects/sample1/instances/sample2", - "display_name": "display_name_value", - "state": 1, - "type_": 1, - "labels": {}, - "create_time": {"seconds": 751, "nanos": 543}, - "satisfies_pzs": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ - "instance" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["instance"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["instance"][field])): - del request_init["instance"][field][i][subfield] - else: - del request_init["instance"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.partial_update_instance(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - def test_partial_update_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -10943,94 +9334,10 @@ def test_partial_update_instance_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( +def test_partial_update_instance_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( - bigtable_instance_admin.PartialUpdateInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.partial_update_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_partial_update_instance_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.partial_update_instance(request) - - -def test_partial_update_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="rest", ) # Mock the http request call within the method and fake a response. @@ -11083,47 +9390,6 @@ def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): ) -def test_partial_update_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteInstanceRequest, - dict, - ], -) -def test_delete_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_instance(request) - - # Establish that the response is the type that we expect. - assert response is None - - def test_delete_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -11240,79 +9506,6 @@ def test_delete_instance_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( - bigtable_instance_admin.DeleteInstanceRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_instance_admin.DeleteInstanceRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_instance( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_delete_instance_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.DeleteInstanceRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_instance(request) - - def test_delete_instance_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -11366,169 +9559,40 @@ def test_delete_instance_rest_flattened_error(transport: str = "rest"): ) -def test_delete_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - +def test_create_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateClusterRequest, - dict, - ], -) -def test_create_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["cluster"] = { - "name": "name_value", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "node_scaling_factor": 1, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] + request = {} + client.create_cluster(request) - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] - else: - del request_init["cluster"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_cluster(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_cluster in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc - - request = {} - client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_cluster(request) + client.create_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -11640,89 +9704,6 @@ def test_create_cluster_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.CreateClusterRequest.pb( - bigtable_instance_admin.CreateClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_instance_admin.CreateClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_cluster_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.CreateClusterRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_cluster(request) - - def test_create_cluster_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -11781,65 +9762,6 @@ def test_create_cluster_rest_flattened_error(transport: str = "rest"): ) -def test_create_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetClusterRequest, - dict, - ], -) -def test_get_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_cluster(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert ( - response.node_scaling_factor - == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X - ) - assert response.default_storage_type == common.StorageType.SSD - - def test_get_cluster_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -11959,89 +9881,10 @@ def test_get_cluster_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( +def test_get_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetClusterRequest.pb( - bigtable_instance_admin.GetClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Cluster.to_json(instance.Cluster()) - - request = bigtable_instance_admin.GetClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Cluster() - - client.get_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_cluster_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.GetClusterRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_cluster(request) - - -def test_get_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="rest", ) # Mock the http request call within the method and fake a response. @@ -12094,56 +9937,6 @@ def test_get_cluster_rest_flattened_error(transport: str = "rest"): ) -def test_get_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListClustersRequest, - dict, - ], -) -def test_list_clusters_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_clusters(request) - - assert response.raw_page is response - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" - - def test_list_clusters_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -12265,89 +10058,6 @@ def test_list_clusters_rest_unset_required_fields(): assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_clusters_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListClustersRequest.pb( - bigtable_instance_admin.ListClustersRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListClustersResponse.to_json( - bigtable_instance_admin.ListClustersResponse() - ) - ) - - request = bigtable_instance_admin.ListClustersRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListClustersResponse() - - client.list_clusters( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_clusters_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListClustersRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_clusters(request) - - def test_list_clusters_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -12404,48 +10114,47 @@ def test_list_clusters_rest_flattened_error(transport: str = "rest"): ) -def test_list_clusters_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +def test_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -@pytest.mark.parametrize( - "request_type", - [ - instance.Cluster, - dict, - ], -) -def test_update_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + request = {} + client.update_cluster(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.update_cluster(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + client.update_cluster(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_update_cluster_rest_use_cached_wrapped_rpc(): +def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -12459,17 +10168,22 @@ def test_update_cluster_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.update_cluster in client._transport._wrapped_methods + assert ( + client._transport.partial_update_cluster + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc + client._transport._wrapped_methods[ + client._transport.partial_update_cluster + ] = mock_rpc request = {} - client.update_cluster(request) + client.partial_update_cluster(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -12478,266 +10192,7 @@ def test_update_cluster_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.update_cluster(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = instance.Cluster.pb(instance.Cluster()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = instance.Cluster() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.update_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_cluster_rest_bad_request( - transport: str = "rest", request_type=instance.Cluster -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_cluster(request) - - -def test_update_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.PartialUpdateClusterRequest, - dict, - ], -) -def test_partial_update_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - request_init["cluster"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "node_scaling_factor": 1, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ - "cluster" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] - else: - del request_init["cluster"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.partial_update_cluster(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.partial_update_cluster - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.partial_update_cluster - ] = mock_rpc - - request = {} - client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.partial_update_cluster(request) + client.partial_update_cluster(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -12830,96 +10285,10 @@ def test_partial_update_cluster_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( +def test_partial_update_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( - bigtable_instance_admin.PartialUpdateClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_instance_admin.PartialUpdateClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.partial_update_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_partial_update_cluster_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.partial_update_cluster(request) - - -def test_partial_update_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="rest", ) # Mock the http request call within the method and fake a response. @@ -12975,47 +10344,6 @@ def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): ) -def test_partial_update_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteClusterRequest, - dict, - ], -) -def test_delete_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_cluster(request) - - # Establish that the response is the type that we expect. - assert response is None - - def test_delete_cluster_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -13132,79 +10460,6 @@ def test_delete_cluster_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( - bigtable_instance_admin.DeleteClusterRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_instance_admin.DeleteClusterRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_cluster( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_delete_cluster_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.DeleteClusterRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_cluster(request) - - def test_delete_cluster_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -13259,176 +10514,40 @@ def test_delete_cluster_rest_flattened_error(transport: str = "rest"): ) -def test_delete_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +def test_create_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.CreateAppProfileRequest, - dict, - ], -) -def test_create_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Ensure method has been cached + assert ( + client._transport.create_app_profile in client._transport._wrapped_methods + ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["app_profile"] = { - "name": "name_value", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], - "row_affinity": {}, - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - "data_boost_isolation_read_only": {"compute_billing_owner": 1}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_app_profile + ] = mock_rpc - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ - "app_profile" - ] + request = {} + client.create_app_profile(request) - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] - else: - del request_init["app_profile"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_app_profile(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - -def test_create_app_profile_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_app_profile - ] = mock_rpc - - request = {} - client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.create_app_profile(request) + client.create_app_profile(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -13553,86 +10672,6 @@ def test_create_app_profile_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( - bigtable_instance_admin.CreateAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) - - request = bigtable_instance_admin.CreateAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.AppProfile() - - client.create_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_app_profile_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.CreateAppProfileRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_app_profile(request) - - def test_create_app_profile_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -13694,57 +10733,6 @@ def test_create_app_profile_rest_flattened_error(transport: str = "rest"): ) -def test_create_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.GetAppProfileRequest, - dict, - ], -) -def test_get_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_app_profile(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" - - def test_get_app_profile_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -13864,89 +10852,10 @@ def test_get_app_profile_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( +def test_get_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( - bigtable_instance_admin.GetAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) - - request = bigtable_instance_admin.GetAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.AppProfile() - - client.get_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_app_profile_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.GetAppProfileRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_app_profile(request) - - -def test_get_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="rest", ) # Mock the http request call within the method and fake a response. @@ -14002,54 +10911,6 @@ def test_get_app_profile_rest_flattened_error(transport: str = "rest"): ) -def test_get_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListAppProfilesRequest, - dict, - ], -) -def test_list_app_profiles_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_app_profiles(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) - assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] - - def test_list_app_profiles_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -14188,89 +11049,6 @@ def test_list_app_profiles_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_app_profiles_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( - bigtable_instance_admin.ListAppProfilesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListAppProfilesResponse.to_json( - bigtable_instance_admin.ListAppProfilesResponse() - ) - ) - - request = bigtable_instance_admin.ListAppProfilesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListAppProfilesResponse() - - client.list_app_profiles( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_app_profiles_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListAppProfilesRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_app_profiles(request) - - def test_list_app_profiles_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -14391,159 +11169,35 @@ def test_list_app_profiles_rest_pager(transport: str = "rest"): assert page_.raw_page.next_page_token == token -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.UpdateAppProfileRequest, - dict, - ], -) -def test_update_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) +def test_update_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # send a request that will satisfy transcoding - request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - request_init["app_profile"] = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], - "row_affinity": {}, - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - "data_boost_isolation_read_only": {"compute_billing_owner": 1}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ - "app_profile" - ] + # Ensure method has been cached + assert ( + client._transport.update_app_profile in client._transport._wrapped_methods + ) - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_app_profile + ] = mock_rpc - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] - else: - del request_init["app_profile"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.update_app_profile(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_update_app_profile_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.update_app_profile in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.update_app_profile - ] = mock_rpc - - request = {} - client.update_app_profile(request) + request = {} + client.update_app_profile(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -14655,94 +11309,6 @@ def test_update_app_profile_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( - bigtable_instance_admin.UpdateAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_instance_admin.UpdateAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.update_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_app_profile_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.UpdateAppProfileRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_app_profile(request) - - def test_update_app_profile_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -14804,47 +11370,6 @@ def test_update_app_profile_rest_flattened_error(transport: str = "rest"): ) -def test_update_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteAppProfileRequest, - dict, - ], -) -def test_delete_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_app_profile(request) - - # Establish that the response is the type that we expect. - assert response is None - - def test_delete_app_profile_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -14988,90 +11513,16 @@ def test_delete_app_profile_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( +def test_delete_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), + transport="rest", ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( - bigtable_instance_admin.DeleteAppProfileRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_instance_admin.DeleteAppProfileRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_app_profile( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_delete_app_profile_rest_bad_request( - transport: str = "rest", - request_type=bigtable_instance_admin.DeleteAppProfileRequest, -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_app_profile(request) - - -def test_delete_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None # get arguments that satisfy an http rule for this method sample_request = { @@ -15119,52 +11570,6 @@ def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): ) -def test_delete_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.GetIamPolicyRequest, - dict, - ], -) -def test_get_iam_policy_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - def test_get_iam_policy_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -15283,83 +11688,6 @@ def test_get_iam_policy_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("resource",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) - - request = iam_policy_pb2.GetIamPolicyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - - client.get_iam_policy( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_iam_policy(request) - - def test_get_iam_policy_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -15415,52 +11743,6 @@ def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): ) -def test_get_iam_policy_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.SetIamPolicyRequest, - dict, - ], -) -def test_set_iam_policy_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.set_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - def test_set_iam_policy_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -15587,96 +11869,19 @@ def test_set_iam_policy_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( +def test_set_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), + transport="rest", ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() - request = iam_policy_pb2.SetIamPolicyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - - client.set_iam_policy( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_set_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.set_iam_policy(request) - - -def test_set_iam_policy_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( @@ -15719,50 +11924,6 @@ def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): ) -def test_set_iam_policy_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, - ], -) -def test_test_iam_permissions_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.test_iam_permissions(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] - - def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -15897,85 +12058,6 @@ def test_test_iam_permissions_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) - - request = iam_policy_pb2.TestIamPermissionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() - - client.test_iam_permissions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_test_iam_permissions_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest -): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.test_iam_permissions(request) - - def test_test_iam_permissions_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16033,52 +12115,6 @@ def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): ) -def test_test_iam_permissions_rest_error(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.ListHotTabletsRequest, - dict, - ], -) -def test_list_hot_tablets_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_hot_tablets(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsPager) - assert response.next_page_token == "next_page_token_value" - - def test_list_hot_tablets_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -16221,55 +12257,4195 @@ def test_list_hot_tablets_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_hot_tablets_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( +def test_list_hot_tablets_rest_flattened(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableInstanceAdminRestInterceptor(), + transport="rest", ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( - bigtable_instance_admin.ListHotTabletsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_instance_admin.ListHotTabletsResponse.to_json( - bigtable_instance_admin.ListHotTabletsResponse() - ) + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", ) + mock_args.update(sample_request) - request = bigtable_instance_admin.ListHotTabletsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListHotTabletsResponse() - - client.list_hot_tablets( - request, + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_hot_tablets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" + % client.transport._host, + args[1], + ) + + +def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), + parent="parent_value", + ) + + +def test_list_hot_tablets_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_hot_tablets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.HotTablet) for i in results) + + pages = list(client.list_hot_tablets(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableInstanceAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + transports.BigtableInstanceAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = BigtableInstanceAdminClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = instance.Instance() + client.get_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_instances_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = bigtable_instance_admin.ListInstancesResponse() + client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = instance.Instance() + client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = None + client.delete_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = instance.Cluster() + client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_clusters_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = bigtable_instance_admin.ListClustersResponse() + client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = None + client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value = instance.AppProfile() + client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value = instance.AppProfile() + client.get_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_app_profiles_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + client.list_app_profiles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value = None + client.delete_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_hot_tablets_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + client.list_hot_tablets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = BigtableInstanceAdminAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + ) + await client.get_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_instances_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + ) + await client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_partial_update_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, + default_storage_type=common.StorageType.SSD, + ) + ) + await client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_clusters_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_partial_update_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + await client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + await client.get_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_app_profiles_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + ) + await client.list_app_profiles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_hot_tablets_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_hot_tablets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = BigtableInstanceAdminClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_create_instance_rest_bad_request( + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.create_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateInstanceRequest, + dict, + ], +) +def test_create_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_instance(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( + bigtable_instance_admin.CreateInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.CreateInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_instance_rest_bad_request( + request_type=bigtable_instance_admin.GetInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetInstanceRequest, + dict, + ], +) +def test_get_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetInstanceRequest.pb( + bigtable_instance_admin.GetInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = instance.Instance.to_json(instance.Instance()) + req.return_value.content = return_value + + request = bigtable_instance_admin.GetInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + + client.get_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_instances_rest_bad_request( + request_type=bigtable_instance_admin.ListInstancesRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_instances(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListInstancesRequest, + dict, + ], +) +def test_list_instances_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_instances(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_instances_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListInstancesRequest.pb( + bigtable_instance_admin.ListInstancesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_instance_admin.ListInstancesResponse.to_json( + bigtable_instance_admin.ListInstancesResponse() + ) + req.return_value.content = return_value + + request = bigtable_instance_admin.ListInstancesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListInstancesResponse() + + client.list_instances( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_instance_rest_bad_request(request_type=instance.Instance): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.update_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + instance.Instance, + dict, + ], +) +def test_update_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = instance.Instance.pb(instance.Instance()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = instance.Instance.to_json(instance.Instance()) + req.return_value.content = return_value + + request = instance.Instance() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + + client.update_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_partial_update_instance_rest_bad_request( + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.partial_update_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.PartialUpdateInstanceRequest, + dict, + ], +) +def test_partial_update_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} + request_init["instance"] = { + "name": "projects/sample1/instances/sample2", + "display_name": "display_name_value", + "state": 1, + "type_": 1, + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "satisfies_pzs": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ + "instance" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["instance"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["instance"][field])): + del request_init["instance"][field][i][subfield] + else: + del request_init["instance"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.partial_update_instance(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_partial_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( + bigtable_instance_admin.PartialUpdateInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.partial_update_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_instance_rest_bad_request( + request_type=bigtable_instance_admin.DeleteInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.delete_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteInstanceRequest, + dict, + ], +) +def test_delete_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_instance(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( + bigtable_instance_admin.DeleteInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_instance_admin.DeleteInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_create_cluster_rest_bad_request( + request_type=bigtable_instance_admin.CreateClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.create_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateClusterRequest, + dict, + ], +) +def test_create_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["cluster"] = { + "name": "name_value", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "node_scaling_factor": 1, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateClusterRequest.pb( + bigtable_instance_admin.CreateClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.CreateClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_cluster_rest_bad_request( + request_type=bigtable_instance_admin.GetClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetClusterRequest, + dict, + ], +) +def test_get_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, + default_storage_type=common.StorageType.SSD, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + assert response.name == "name_value" + assert response.location == "location_value" + assert response.state == instance.Cluster.State.READY + assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X + ) + assert response.default_storage_type == common.StorageType.SSD + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetClusterRequest.pb( + bigtable_instance_admin.GetClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = instance.Cluster.to_json(instance.Cluster()) + req.return_value.content = return_value + + request = bigtable_instance_admin.GetClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Cluster() + + client.get_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_clusters_rest_bad_request( + request_type=bigtable_instance_admin.ListClustersRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_clusters(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListClustersRequest, + dict, + ], +) +def test_list_clusters_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_clusters(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_clusters_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListClustersRequest.pb( + bigtable_instance_admin.ListClustersRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_instance_admin.ListClustersResponse.to_json( + bigtable_instance_admin.ListClustersResponse() + ) + req.return_value.content = return_value + + request = bigtable_instance_admin.ListClustersRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListClustersResponse() + + client.list_clusters( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_cluster_rest_bad_request(request_type=instance.Cluster): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.update_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + instance.Cluster, + dict, + ], +) +def test_update_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = instance.Cluster.pb(instance.Cluster()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = instance.Cluster() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_partial_update_cluster_rest_bad_request( + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.partial_update_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.PartialUpdateClusterRequest, + dict, + ], +) +def test_partial_update_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + request_init["cluster"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "node_scaling_factor": 1, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ + "cluster" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.partial_update_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_partial_update_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( + bigtable_instance_admin.PartialUpdateClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.PartialUpdateClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.partial_update_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_cluster_rest_bad_request( + request_type=bigtable_instance_admin.DeleteClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.delete_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteClusterRequest, + dict, + ], +) +def test_delete_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_cluster(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( + bigtable_instance_admin.DeleteClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_instance_admin.DeleteClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_create_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.create_app_profile(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateAppProfileRequest, + dict, + ], +) +def test_create_app_profile_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["app_profile"] = { + "name": "name_value", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + "priority": 1, + "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ + "app_profile" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( + bigtable_instance_admin.CreateAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.content = return_value + + request = bigtable_instance_admin.CreateAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.AppProfile() + + client.create_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.GetAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_app_profile(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetAppProfileRequest, + dict, + ], +) +def test_get_app_profile_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( + bigtable_instance_admin.GetAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.content = return_value + + request = bigtable_instance_admin.GetAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.AppProfile() + + client.get_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_app_profiles_rest_bad_request( + request_type=bigtable_instance_admin.ListAppProfilesRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_app_profiles(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListAppProfilesRequest, + dict, + ], +) +def test_list_app_profiles_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_app_profiles(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAppProfilesPager) + assert response.next_page_token == "next_page_token_value" + assert response.failed_locations == ["failed_locations_value"] + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_app_profiles_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( + bigtable_instance_admin.ListAppProfilesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_instance_admin.ListAppProfilesResponse.to_json( + bigtable_instance_admin.ListAppProfilesResponse() + ) + req.return_value.content = return_value + + request = bigtable_instance_admin.ListAppProfilesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + client.list_app_profiles( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.update_app_profile(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateAppProfileRequest, + dict, + ], +) +def test_update_app_profile_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + request_init["app_profile"] = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + "priority": 1, + "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ + "app_profile" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_app_profile(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( + bigtable_instance_admin.UpdateAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.UpdateAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.delete_app_profile(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteAppProfileRequest, + dict, + ], +) +def test_delete_app_profile_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_app_profile(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( + bigtable_instance_admin.DeleteAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_instance_admin.DeleteAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_get_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value + + request = iam_policy_pb2.GetIamPolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.get_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_set_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_set_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value + + request = iam_policy_pb2.SetIamPolicyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.set_iam_policy( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_test_iam_permissions_rest_bad_request( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_test_iam_permissions_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() + ) + req.return_value.content = return_value + + request = iam_policy_pb2.TestIamPermissionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_hot_tablets_rest_bad_request( + request_type=bigtable_instance_admin.ListHotTabletsRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_hot_tablets(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListHotTabletsRequest, + dict, + ], +) +def test_list_hot_tablets_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_hot_tablets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_hot_tablets_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( + bigtable_instance_admin.ListHotTabletsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_instance_admin.ListHotTabletsResponse.to_json( + bigtable_instance_admin.ListHotTabletsResponse() + ) + req.return_value.content = return_value + + request = bigtable_instance_admin.ListHotTabletsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListHotTabletsResponse() + + client.list_hot_tablets( + request, metadata=[ ("key", "val"), ("cephalopod", "squid"), @@ -16280,258 +16456,462 @@ def test_list_hot_tablets_rest_interceptors(null_interceptor): post.assert_called_once() -def test_list_hot_tablets_rest_bad_request( - transport: str = "rest", request_type=bigtable_instance_admin.ListHotTabletsRequest -): +def test_initialize_client_w_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_instance_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + client.create_instance(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_hot_tablets(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + assert args[0] == request_msg -def test_list_hot_tablets_rest_flattened(): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_instance_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + client.get_instance(request=None) - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) + assert args[0] == request_msg - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.list_hot_tablets(**mock_args) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_instances_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" - % client.transport._host, - args[1], - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_instance_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_instance_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_instance_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + client.delete_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_clusters_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_app_profile_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg -def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_app_profile_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent="parent_value", - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + client.get_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() + assert args[0] == request_msg -def test_list_hot_tablets_rest_pager(transport: str = "rest"): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_app_profiles_empty_call_rest(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + client.list_app_profiles(request=None) - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + assert args[0] == request_msg - pager = client.list_hot_tablets(request=sample_request) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.HotTablet) for i in results) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_app_profile_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - pages = list(client.list_hot_tablets(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + client.update_app_profile(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + assert args[0] == request_msg - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_app_profile_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, - transport=transport, - ) - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + client.delete_app_profile(request=None) - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy(request=None) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - client = BigtableInstanceAdminClient(transport=transport) - assert client.transport is transport + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_hot_tablets_empty_call_rest(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + client.list_hot_tablets(request=None) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - transports.BigtableInstanceAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + assert args[0] == request_msg -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "rest", - ], -) -def test_transport_kind(transport_name): - transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( + +def test_bigtable_instance_admin_rest_lro_client(): + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have an api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, ) - assert transport.kind == transport_name + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client def test_transport_grpc_default(): @@ -16824,23 +17204,6 @@ def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) -def test_bigtable_instance_admin_rest_lro_client(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - @pytest.mark.parametrize( "transport_name", [ @@ -17422,36 +17785,41 @@ def test_client_with_default_client_info(): prep.assert_called_once_with(client_info) +def test_transport_close_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + @pytest.mark.asyncio -async def test_transport_close_async(): +async def test_transport_close_grpc_asyncio(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", + credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) with mock.patch.object( - type(getattr(client.transport, "grpc_channel")), "close" + type(getattr(client.transport, "_grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - with mock.patch.object( - type(getattr(client.transport, close_name)), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() +def test_transport_close_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + with mock.patch.object( + type(getattr(client.transport, "_session")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() def test_client_ctx(): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index c9455cd5fe9e..53788921f411 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -24,7 +24,7 @@ import grpc from grpc.experimental import aio -from collections.abc import Iterable +from collections.abc import Iterable, AsyncIterable from google.protobuf import json_format import json import math @@ -37,6 +37,13 @@ from requests.sessions import Session from google.protobuf import json_format +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import future @@ -76,10 +83,24 @@ import google.auth +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. @@ -332,86 +353,6 @@ def test__get_universe_domain(): assert str(excinfo.value) == "Universe Domain cannot be an empty string." -@pytest.mark.parametrize( - "client_class,transport_class,transport_name", - [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), - ], -) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) - ) - assert client._validate_universe_domain() == True - - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True - - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - transport = transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True - - # TODO: This is needed to cater for older versions of google-auth - # Make this test unconditional once the minimum supported version of - # google-auth becomes 2.23.0 or higher. - google_auth_major, google_auth_minor = [ - int(part) for part in google.auth.__version__.split(".")[0:2] - ] - if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): - credentials = ga_credentials.AnonymousCredentials() - credentials._universe_domain = "foo.com" - # Test the case when there is a universe mismatch from the credentials. - client = client_class(transport=transport_class(credentials=credentials)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor = [ - int(part) for part in api_core_version.__version__.split(".")[0:2] - ] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class( - client_options={"universe_domain": "bar.com"}, - transport=transport_class( - credentials=ga_credentials.AnonymousCredentials(), - ), - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test that ValueError is raised if universe_domain is provided via client options and credentials is None - with pytest.raises(ValueError): - client._compare_universes("foo.bar", None) - - @pytest.mark.parametrize( "client_class,transport_name", [ @@ -1230,25 +1171,6 @@ def test_create_table(request_type, transport: str = "grpc"): assert response.deletion_protection is True -def test_create_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() - - def test_create_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -1314,31 +1236,6 @@ def test_create_table_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_create_table_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - response = await client.create_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() - - @pytest.mark.asyncio async def test_create_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -1347,7 +1244,7 @@ async def test_create_table_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1387,7 +1284,7 @@ async def test_create_table_async( request_type=bigtable_table_admin.CreateTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1457,7 +1354,7 @@ def test_create_table_field_headers(): @pytest.mark.asyncio async def test_create_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1535,7 +1432,7 @@ def test_create_table_flattened_error(): @pytest.mark.asyncio async def test_create_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1570,7 +1467,7 @@ async def test_create_table_flattened_async(): @pytest.mark.asyncio async def test_create_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1619,27 +1516,6 @@ def test_create_table_from_snapshot(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_create_table_from_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_table_from_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() - - def test_create_table_from_snapshot_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -1719,29 +1595,6 @@ def test_create_table_from_snapshot_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_create_table_from_snapshot_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_table_from_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() - - @pytest.mark.asyncio async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -1750,7 +1603,7 @@ async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1795,7 +1648,7 @@ async def test_create_table_from_snapshot_async( request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1862,7 +1715,7 @@ def test_create_table_from_snapshot_field_headers(): @pytest.mark.asyncio async def test_create_table_from_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1946,7 +1799,7 @@ def test_create_table_from_snapshot_flattened_error(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1985,7 +1838,7 @@ async def test_create_table_from_snapshot_flattened_async(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2035,25 +1888,6 @@ def test_list_tables(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_tables_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_tables() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() - - def test_list_tables_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -2119,29 +1953,6 @@ def test_list_tables_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_list_tables_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_tables() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() - - @pytest.mark.asyncio async def test_list_tables_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -2150,7 +1961,7 @@ async def test_list_tables_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2189,7 +2000,7 @@ async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2255,7 +2066,7 @@ def test_list_tables_field_headers(): @pytest.mark.asyncio async def test_list_tables_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2325,7 +2136,7 @@ def test_list_tables_flattened_error(): @pytest.mark.asyncio async def test_list_tables_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2354,7 +2165,7 @@ async def test_list_tables_flattened_async(): @pytest.mark.asyncio async def test_list_tables_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2464,7 +2275,7 @@ def test_list_tables_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_tables_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2514,7 +2325,7 @@ async def test_list_tables_async_pager(): @pytest.mark.asyncio async def test_list_tables_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2600,25 +2411,6 @@ def test_get_table(request_type, transport: str = "grpc"): assert response.deletion_protection is True -def test_get_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() - - def test_get_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -2682,38 +2474,13 @@ def test_get_table_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_table_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - response = await client.get_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() - - @pytest.mark.asyncio async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2752,7 +2519,7 @@ async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2822,7 +2589,7 @@ def test_get_table_field_headers(): @pytest.mark.asyncio async def test_get_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2890,7 +2657,7 @@ def test_get_table_flattened_error(): @pytest.mark.asyncio async def test_get_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2917,7 +2684,7 @@ async def test_get_table_flattened_async(): @pytest.mark.asyncio async def test_get_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2962,25 +2729,6 @@ def test_update_table(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_update_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() - - def test_update_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3045,27 +2793,6 @@ def test_update_table_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_update_table_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() - - @pytest.mark.asyncio async def test_update_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3074,7 +2801,7 @@ async def test_update_table_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3119,7 +2846,7 @@ async def test_update_table_async( request_type=bigtable_table_admin.UpdateTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3182,7 +2909,7 @@ def test_update_table_field_headers(): @pytest.mark.asyncio async def test_update_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3257,7 +2984,7 @@ def test_update_table_flattened_error(): @pytest.mark.asyncio async def test_update_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3290,7 +3017,7 @@ async def test_update_table_flattened_async(): @pytest.mark.asyncio async def test_update_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3336,25 +3063,6 @@ def test_delete_table(request_type, transport: str = "grpc"): assert response is None -def test_delete_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() - - def test_delete_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3418,25 +3126,6 @@ def test_delete_table_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_delete_table_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() - - @pytest.mark.asyncio async def test_delete_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3445,7 +3134,7 @@ async def test_delete_table_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3485,7 +3174,7 @@ async def test_delete_table_async( request_type=bigtable_table_admin.DeleteTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3546,7 +3235,7 @@ def test_delete_table_field_headers(): @pytest.mark.asyncio async def test_delete_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3614,7 +3303,7 @@ def test_delete_table_flattened_error(): @pytest.mark.asyncio async def test_delete_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3641,7 +3330,7 @@ async def test_delete_table_flattened_async(): @pytest.mark.asyncio async def test_delete_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3686,25 +3375,6 @@ def test_undelete_table(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_undelete_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.undelete_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() - - def test_undelete_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3773,27 +3443,6 @@ def test_undelete_table_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_undelete_table_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.undelete_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() - - @pytest.mark.asyncio async def test_undelete_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3802,7 +3451,7 @@ async def test_undelete_table_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3847,7 +3496,7 @@ async def test_undelete_table_async( request_type=bigtable_table_admin.UndeleteTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3910,7 +3559,7 @@ def test_undelete_table_field_headers(): @pytest.mark.asyncio async def test_undelete_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3980,7 +3629,7 @@ def test_undelete_table_flattened_error(): @pytest.mark.asyncio async def test_undelete_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4009,7 +3658,7 @@ async def test_undelete_table_flattened_async(): @pytest.mark.asyncio async def test_undelete_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4056,27 +3705,6 @@ def test_create_authorized_view(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_create_authorized_view_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateAuthorizedViewRequest() - - def test_create_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4154,29 +3782,6 @@ def test_create_authorized_view_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_create_authorized_view_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateAuthorizedViewRequest() - - @pytest.mark.asyncio async def test_create_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -4185,7 +3790,7 @@ async def test_create_authorized_view_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4230,7 +3835,7 @@ async def test_create_authorized_view_async( request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4297,7 +3902,7 @@ def test_create_authorized_view_field_headers(): @pytest.mark.asyncio async def test_create_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4381,7 +3986,7 @@ def test_create_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_create_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4420,7 +4025,7 @@ async def test_create_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_create_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4472,27 +4077,6 @@ def test_list_authorized_views(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_authorized_views_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_authorized_views() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListAuthorizedViewsRequest() - - def test_list_authorized_views_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4565,31 +4149,6 @@ def test_list_authorized_views_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_list_authorized_views_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_authorized_views() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListAuthorizedViewsRequest() - - @pytest.mark.asyncio async def test_list_authorized_views_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -4598,7 +4157,7 @@ async def test_list_authorized_views_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4638,7 +4197,7 @@ async def test_list_authorized_views_async( request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4708,7 +4267,7 @@ def test_list_authorized_views_field_headers(): @pytest.mark.asyncio async def test_list_authorized_views_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4782,7 +4341,7 @@ def test_list_authorized_views_flattened_error(): @pytest.mark.asyncio async def test_list_authorized_views_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4813,7 +4372,7 @@ async def test_list_authorized_views_flattened_async(): @pytest.mark.asyncio async def test_list_authorized_views_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4927,7 +4486,7 @@ def test_list_authorized_views_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_authorized_views_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4979,7 +4538,7 @@ async def test_list_authorized_views_async_pager(): @pytest.mark.asyncio async def test_list_authorized_views_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5069,27 +4628,6 @@ def test_get_authorized_view(request_type, transport: str = "grpc"): assert response.deletion_protection is True -def test_get_authorized_view_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetAuthorizedViewRequest() - - def test_get_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -5159,33 +4697,6 @@ def test_get_authorized_view_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_authorized_view_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.AuthorizedView( - name="name_value", - etag="etag_value", - deletion_protection=True, - ) - ) - response = await client.get_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetAuthorizedViewRequest() - - @pytest.mark.asyncio async def test_get_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -5194,7 +4705,7 @@ async def test_get_authorized_view_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5234,7 +4745,7 @@ async def test_get_authorized_view_async( request_type=bigtable_table_admin.GetAuthorizedViewRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5308,7 +4819,7 @@ def test_get_authorized_view_field_headers(): @pytest.mark.asyncio async def test_get_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5382,7 +4893,7 @@ def test_get_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_get_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5413,7 +4924,7 @@ async def test_get_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_get_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5460,27 +4971,6 @@ def test_update_authorized_view(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_update_authorized_view_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateAuthorizedViewRequest() - - def test_update_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -5552,29 +5042,6 @@ def test_update_authorized_view_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_update_authorized_view_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.update_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateAuthorizedViewRequest() - - @pytest.mark.asyncio async def test_update_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -5583,7 +5050,7 @@ async def test_update_authorized_view_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5628,7 +5095,7 @@ async def test_update_authorized_view_async( request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -5695,7 +5162,7 @@ def test_update_authorized_view_field_headers(): @pytest.mark.asyncio async def test_update_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5774,7 +5241,7 @@ def test_update_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_update_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5809,7 +5276,7 @@ async def test_update_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_update_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5857,27 +5324,6 @@ def test_delete_authorized_view(request_type, transport: str = "grpc"): assert response is None -def test_delete_authorized_view_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteAuthorizedViewRequest() - - def test_delete_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -5950,27 +5396,6 @@ def test_delete_authorized_view_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_delete_authorized_view_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_authorized_view() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteAuthorizedViewRequest() - - @pytest.mark.asyncio async def test_delete_authorized_view_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -5979,7 +5404,7 @@ async def test_delete_authorized_view_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6019,7 +5444,7 @@ async def test_delete_authorized_view_async( request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6084,7 +5509,7 @@ def test_delete_authorized_view_field_headers(): @pytest.mark.asyncio async def test_delete_authorized_view_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6156,7 +5581,7 @@ def test_delete_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_delete_authorized_view_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6185,7 +5610,7 @@ async def test_delete_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_delete_authorized_view_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -6239,30 +5664,9 @@ def test_modify_column_families(request_type, transport: str = "grpc"): assert response.deletion_protection is True -def test_modify_column_families_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.modify_column_families() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() - - -def test_modify_column_families_non_empty_request_with_auto_populated_field(): - # This test is a coverage failsafe to make sure that UUID4 fields are - # automatically populated, according to AIP-4235, with non-empty requests. +def test_modify_column_families_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", @@ -6330,33 +5734,6 @@ def test_modify_column_families_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_modify_column_families_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - response = await client.modify_column_families() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() - - @pytest.mark.asyncio async def test_modify_column_families_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -6365,7 +5742,7 @@ async def test_modify_column_families_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6405,7 +5782,7 @@ async def test_modify_column_families_async( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6479,7 +5856,7 @@ def test_modify_column_families_field_headers(): @pytest.mark.asyncio async def test_modify_column_families_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6566,7 +5943,7 @@ def test_modify_column_families_flattened_error(): @pytest.mark.asyncio async def test_modify_column_families_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6605,7 +5982,7 @@ async def test_modify_column_families_flattened_async(): @pytest.mark.asyncio async def test_modify_column_families_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -6655,25 +6032,6 @@ def test_drop_row_range(request_type, transport: str = "grpc"): assert response is None -def test_drop_row_range_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.drop_row_range() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - - def test_drop_row_range_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -6737,25 +6095,6 @@ def test_drop_row_range_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_drop_row_range_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.drop_row_range() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - - @pytest.mark.asyncio async def test_drop_row_range_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -6764,7 +6103,7 @@ async def test_drop_row_range_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6804,7 +6143,7 @@ async def test_drop_row_range_async( request_type=bigtable_table_admin.DropRowRangeRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -6865,7 +6204,7 @@ def test_drop_row_range_field_headers(): @pytest.mark.asyncio async def test_drop_row_range_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6930,27 +6269,6 @@ def test_generate_consistency_token(request_type, transport: str = "grpc"): assert response.consistency_token == "consistency_token_value" -def test_generate_consistency_token_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.generate_consistency_token() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() - - def test_generate_consistency_token_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -7021,31 +6339,6 @@ def test_generate_consistency_token_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_generate_consistency_token_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", - ) - ) - response = await client.generate_consistency_token() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() - - @pytest.mark.asyncio async def test_generate_consistency_token_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -7054,7 +6347,7 @@ async def test_generate_consistency_token_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7094,7 +6387,7 @@ async def test_generate_consistency_token_async( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7164,7 +6457,7 @@ def test_generate_consistency_token_field_headers(): @pytest.mark.asyncio async def test_generate_consistency_token_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -7238,7 +6531,7 @@ def test_generate_consistency_token_flattened_error(): @pytest.mark.asyncio async def test_generate_consistency_token_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -7269,7 +6562,7 @@ async def test_generate_consistency_token_flattened_async(): @pytest.mark.asyncio async def test_generate_consistency_token_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -7319,27 +6612,6 @@ def test_check_consistency(request_type, transport: str = "grpc"): assert response.consistent is True -def test_check_consistency_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.check_consistency() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() - - def test_check_consistency_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -7409,31 +6681,6 @@ def test_check_consistency_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_check_consistency_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - ) - response = await client.check_consistency() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() - - @pytest.mark.asyncio async def test_check_consistency_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -7442,7 +6689,7 @@ async def test_check_consistency_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7482,7 +6729,7 @@ async def test_check_consistency_async( request_type=bigtable_table_admin.CheckConsistencyRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7552,7 +6799,7 @@ def test_check_consistency_field_headers(): @pytest.mark.asyncio async def test_check_consistency_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -7631,7 +6878,7 @@ def test_check_consistency_flattened_error(): @pytest.mark.asyncio async def test_check_consistency_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -7666,7 +6913,7 @@ async def test_check_consistency_flattened_async(): @pytest.mark.asyncio async def test_check_consistency_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -7712,25 +6959,6 @@ def test_snapshot_table(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_snapshot_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.snapshot_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() - - def test_snapshot_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -7805,27 +7033,6 @@ def test_snapshot_table_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_snapshot_table_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.snapshot_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() - - @pytest.mark.asyncio async def test_snapshot_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -7834,7 +7041,7 @@ async def test_snapshot_table_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7879,7 +7086,7 @@ async def test_snapshot_table_async( request_type=bigtable_table_admin.SnapshotTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -7942,7 +7149,7 @@ def test_snapshot_table_field_headers(): @pytest.mark.asyncio async def test_snapshot_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -8027,7 +7234,7 @@ def test_snapshot_table_flattened_error(): @pytest.mark.asyncio async def test_snapshot_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -8068,7 +7275,7 @@ async def test_snapshot_table_flattened_async(): @pytest.mark.asyncio async def test_snapshot_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -8125,25 +7332,6 @@ def test_get_snapshot(request_type, transport: str = "grpc"): assert response.description == "description_value" -def test_get_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() - - def test_get_snapshot_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -8207,32 +7395,6 @@ def test_get_snapshot_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_snapshot_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) - ) - response = await client.get_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() - - @pytest.mark.asyncio async def test_get_snapshot_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -8241,7 +7403,7 @@ async def test_get_snapshot_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -8281,7 +7443,7 @@ async def test_get_snapshot_async( request_type=bigtable_table_admin.GetSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -8353,7 +7515,7 @@ def test_get_snapshot_field_headers(): @pytest.mark.asyncio async def test_get_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -8421,7 +7583,7 @@ def test_get_snapshot_flattened_error(): @pytest.mark.asyncio async def test_get_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -8448,7 +7610,7 @@ async def test_get_snapshot_flattened_async(): @pytest.mark.asyncio async def test_get_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -8496,25 +7658,6 @@ def test_list_snapshots(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_snapshots_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_snapshots() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() - - def test_list_snapshots_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -8581,39 +7724,16 @@ def test_list_snapshots_use_cached_wrapped_rpc(): @pytest.mark.asyncio -async def test_list_snapshots_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_snapshots() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() - - -@pytest.mark.asyncio -async def test_list_snapshots_async_use_cached_wrapped_rpc( - transport: str = "grpc_asyncio", -): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +async def test_list_snapshots_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) # Should wrap all calls on client creation assert wrapper_fn.call_count > 0 @@ -8651,7 +7771,7 @@ async def test_list_snapshots_async( request_type=bigtable_table_admin.ListSnapshotsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -8717,7 +7837,7 @@ def test_list_snapshots_field_headers(): @pytest.mark.asyncio async def test_list_snapshots_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -8787,7 +7907,7 @@ def test_list_snapshots_flattened_error(): @pytest.mark.asyncio async def test_list_snapshots_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -8816,7 +7936,7 @@ async def test_list_snapshots_flattened_async(): @pytest.mark.asyncio async def test_list_snapshots_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -8926,7 +8046,7 @@ def test_list_snapshots_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_snapshots_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -8976,7 +8096,7 @@ async def test_list_snapshots_async_pager(): @pytest.mark.asyncio async def test_list_snapshots_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -9055,25 +8175,6 @@ def test_delete_snapshot(request_type, transport: str = "grpc"): assert response is None -def test_delete_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() - - def test_delete_snapshot_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -9137,25 +8238,6 @@ def test_delete_snapshot_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_delete_snapshot_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() - - @pytest.mark.asyncio async def test_delete_snapshot_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -9164,7 +8246,7 @@ async def test_delete_snapshot_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -9204,7 +8286,7 @@ async def test_delete_snapshot_async( request_type=bigtable_table_admin.DeleteSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -9265,7 +8347,7 @@ def test_delete_snapshot_field_headers(): @pytest.mark.asyncio async def test_delete_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -9333,7 +8415,7 @@ def test_delete_snapshot_flattened_error(): @pytest.mark.asyncio async def test_delete_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -9360,7 +8442,7 @@ async def test_delete_snapshot_flattened_async(): @pytest.mark.asyncio async def test_delete_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -9405,25 +8487,6 @@ def test_create_backup(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_create_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.create_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() - - def test_create_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -9494,27 +8557,6 @@ def test_create_backup_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_create_backup_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.create_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() - - @pytest.mark.asyncio async def test_create_backup_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -9523,7 +8565,7 @@ async def test_create_backup_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -9568,7 +8610,7 @@ async def test_create_backup_async( request_type=bigtable_table_admin.CreateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -9631,7 +8673,7 @@ def test_create_backup_field_headers(): @pytest.mark.asyncio async def test_create_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -9711,7 +8753,7 @@ def test_create_backup_flattened_error(): @pytest.mark.asyncio async def test_create_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -9748,7 +8790,7 @@ async def test_create_backup_flattened_async(): @pytest.mark.asyncio async def test_create_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -9808,25 +8850,6 @@ def test_get_backup(request_type, transport: str = "grpc"): assert response.backup_type == table.Backup.BackupType.STANDARD -def test_get_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() - - def test_get_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -9890,41 +8913,13 @@ def test_get_backup_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_backup_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - response = await client.get_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() - - @pytest.mark.asyncio async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -9963,7 +8958,7 @@ async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -10039,7 +9034,7 @@ def test_get_backup_field_headers(): @pytest.mark.asyncio async def test_get_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -10107,7 +9102,7 @@ def test_get_backup_flattened_error(): @pytest.mark.asyncio async def test_get_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -10134,7 +9129,7 @@ async def test_get_backup_flattened_async(): @pytest.mark.asyncio async def test_get_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -10192,25 +9187,6 @@ def test_update_backup(request_type, transport: str = "grpc"): assert response.backup_type == table.Backup.BackupType.STANDARD -def test_update_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.update_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() - - def test_update_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -10270,34 +9246,6 @@ def test_update_backup_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_update_backup_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - response = await client.update_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() - - @pytest.mark.asyncio async def test_update_backup_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -10306,7 +9254,7 @@ async def test_update_backup_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -10346,7 +9294,7 @@ async def test_update_backup_async( request_type=bigtable_table_admin.UpdateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -10422,7 +9370,7 @@ def test_update_backup_field_headers(): @pytest.mark.asyncio async def test_update_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -10495,7 +9443,7 @@ def test_update_backup_flattened_error(): @pytest.mark.asyncio async def test_update_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -10526,7 +9474,7 @@ async def test_update_backup_flattened_async(): @pytest.mark.asyncio async def test_update_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -10572,25 +9520,6 @@ def test_delete_backup(request_type, transport: str = "grpc"): assert response is None -def test_delete_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.delete_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() - - def test_delete_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -10654,25 +9583,6 @@ def test_delete_backup_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_delete_backup_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() - - @pytest.mark.asyncio async def test_delete_backup_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -10681,7 +9591,7 @@ async def test_delete_backup_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -10721,7 +9631,7 @@ async def test_delete_backup_async( request_type=bigtable_table_admin.DeleteBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -10782,7 +9692,7 @@ def test_delete_backup_field_headers(): @pytest.mark.asyncio async def test_delete_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -10850,7 +9760,7 @@ def test_delete_backup_flattened_error(): @pytest.mark.asyncio async def test_delete_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -10877,7 +9787,7 @@ async def test_delete_backup_flattened_async(): @pytest.mark.asyncio async def test_delete_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -10925,25 +9835,6 @@ def test_list_backups(request_type, transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" -def test_list_backups_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.list_backups() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() - - def test_list_backups_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -11013,29 +9904,6 @@ def test_list_backups_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_list_backups_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", - ) - ) - response = await client.list_backups() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() - - @pytest.mark.asyncio async def test_list_backups_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -11044,7 +9912,7 @@ async def test_list_backups_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -11084,7 +9952,7 @@ async def test_list_backups_async( request_type=bigtable_table_admin.ListBackupsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -11150,7 +10018,7 @@ def test_list_backups_field_headers(): @pytest.mark.asyncio async def test_list_backups_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -11220,7 +10088,7 @@ def test_list_backups_flattened_error(): @pytest.mark.asyncio async def test_list_backups_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -11249,7 +10117,7 @@ async def test_list_backups_flattened_async(): @pytest.mark.asyncio async def test_list_backups_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -11359,7 +10227,7 @@ def test_list_backups_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_backups_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -11409,7 +10277,7 @@ async def test_list_backups_async_pager(): @pytest.mark.asyncio async def test_list_backups_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -11488,25 +10356,6 @@ def test_restore_table(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_restore_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.restore_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() - - def test_restore_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -11579,27 +10428,6 @@ def test_restore_table_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_restore_table_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.restore_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() - - @pytest.mark.asyncio async def test_restore_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -11608,7 +10436,7 @@ async def test_restore_table_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -11653,7 +10481,7 @@ async def test_restore_table_async( request_type=bigtable_table_admin.RestoreTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -11716,7 +10544,7 @@ def test_restore_table_field_headers(): @pytest.mark.asyncio async def test_restore_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -11778,25 +10606,6 @@ def test_copy_backup(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_copy_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.copy_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() - - def test_copy_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -11869,27 +10678,6 @@ def test_copy_backup_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_copy_backup_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - response = await client.copy_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() - - @pytest.mark.asyncio async def test_copy_backup_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -11898,7 +10686,7 @@ async def test_copy_backup_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -11942,7 +10730,7 @@ async def test_copy_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -12005,7 +10793,7 @@ def test_copy_backup_field_headers(): @pytest.mark.asyncio async def test_copy_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -12090,7 +10878,7 @@ def test_copy_backup_flattened_error(): @pytest.mark.asyncio async def test_copy_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -12131,7 +10919,7 @@ async def test_copy_backup_flattened_async(): @pytest.mark.asyncio async def test_copy_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -12184,25 +10972,6 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): assert response.etag == b"etag_blob" -def test_get_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.get_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - def test_get_iam_policy_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -12266,30 +11035,6 @@ def test_get_iam_policy_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_get_iam_policy_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.get_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - @pytest.mark.asyncio async def test_get_iam_policy_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -12298,7 +11043,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -12337,7 +11082,7 @@ async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -12405,7 +11150,7 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -12490,7 +11235,7 @@ def test_get_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -12517,7 +11262,7 @@ async def test_get_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -12567,25 +11312,6 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): assert response.etag == b"etag_blob" -def test_set_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.set_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - def test_set_iam_policy_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -12649,30 +11375,6 @@ def test_set_iam_policy_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_set_iam_policy_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - response = await client.set_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - @pytest.mark.asyncio async def test_set_iam_policy_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -12681,7 +11383,7 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -12720,7 +11422,7 @@ async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -12788,7 +11490,7 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -12874,7 +11576,7 @@ def test_set_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -12901,7 +11603,7 @@ async def test_set_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -12951,27 +11653,6 @@ def test_test_iam_permissions(request_type, transport: str = "grpc"): assert response.permissions == ["permissions_value"] -def test_test_iam_permissions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.test_iam_permissions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -13041,31 +11722,6 @@ def test_test_iam_permissions_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_test_iam_permissions_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) - ) - response = await client.test_iam_permissions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - @pytest.mark.asyncio async def test_test_iam_permissions_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -13074,7 +11730,7 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -13114,7 +11770,7 @@ async def test_test_iam_permissions_async( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -13184,7 +11840,7 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -13282,7 +11938,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -13317,7 +11973,7 @@ async def test_test_iam_permissions_flattened_async(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -13330,50 +11986,6 @@ async def test_test_iam_permissions_flattened_error_async(): ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateTableRequest, - dict, - ], -) -def test_create_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_table(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - def test_create_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -13507,89 +12119,10 @@ def test_create_table_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_create_table_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableRequest.pb( - bigtable_table_admin.CreateTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gba_table.Table.to_json(gba_table.Table()) - - request = bigtable_table_admin.CreateTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gba_table.Table() - - client.create_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CreateTableRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_table(request) - - -def test_create_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="rest", ) # Mock the http request call within the method and fake a response. @@ -13646,47 +12179,6 @@ def test_create_table_rest_flattened_error(transport: str = "rest"): ) -def test_create_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateTableFromSnapshotRequest, - dict, - ], -) -def test_create_table_from_snapshot_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_table_from_snapshot(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -13830,90 +12322,6 @@ def test_create_table_from_snapshot_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_from_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( - bigtable_table_admin.CreateTableFromSnapshotRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_table_from_snapshot( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_table_from_snapshot_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_table_from_snapshot(request) - - def test_create_table_from_snapshot_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -13973,52 +12381,6 @@ def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest" ) -def test_create_table_from_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListTablesRequest, - dict, - ], -) -def test_list_tables_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_tables(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == "next_page_token_value" - - def test_list_tables_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -14155,106 +12517,25 @@ def test_list_tables_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_tables_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_list_tables_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_tables" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_tables" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ListTablesRequest.pb( - bigtable_table_admin.ListTablesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListTablesResponse.to_json( - bigtable_table_admin.ListTablesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", ) - - request = bigtable_table_admin.ListTablesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListTablesResponse() - - client.list_tables( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_tables_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ListTablesRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_tables(request) - - -def test_list_tables_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) + mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() @@ -14355,50 +12636,6 @@ def test_list_tables_rest_pager(transport: str = "rest"): assert page_.raw_page.next_page_token == token -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetTableRequest, - dict, - ], -) -def test_get_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_table(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - def test_get_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -14520,85 +12757,6 @@ def test_get_table_rest_unset_required_fields(): assert set(unset_fields) == (set(("view",)) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GetTableRequest.pb( - bigtable_table_admin.GetTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Table.to_json(table.Table()) - - request = bigtable_table_admin.GetTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Table() - - client.get_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetTableRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_table(request) - - def test_get_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -14655,136 +12813,192 @@ def test_get_table_rest_flattened_error(transport: str = "rest"): ) -def test_get_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" +def test_update_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.update_table in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_table] = mock_rpc + + request = {} + client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_table(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_table_rest_required_fields( + request_type=bigtable_table_admin.UpdateTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateTableRequest, - dict, - ], -) -def test_update_table_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) + request = request_type(**request_init) - # send a request that will satisfy transcoding - request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - request_init["table"] = { - "name": "projects/sample1/instances/sample2/tables/sample3", - "cluster_states": {}, - "column_families": {}, - "granularity": 1, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "start_time": {"seconds": 751, "nanos": 543}, - "end_time": {}, - "source_table": "source_table_value", - "source_backup": "source_backup_value", - }, - }, - "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, - "deletion_protection": True, - "automated_backup_policy": {"retention_period": {}, "frequency": {}}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + response = client.update_table(request) - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - subfields_not_in_runtime = [] +def test_update_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["table"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value + unset_fields = transport.update_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "table", + "updateMask", + ) + ) + ) - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["table"][field])): - del request_init["table"][field][i][subfield] - else: - del request_init["table"][field][subfield] - request = request_type(**request_init) +def test_update_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. return_value = operations_pb2.Operation(name="operations/spam") + # get arguments that satisfy an http rule for this method + sample_request = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_table(request) - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + client.update_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table.name=projects/*/instances/*/tables/*}" + % client.transport._host, + args[1], + ) -def test_update_table_rest_use_cached_wrapped_rpc(): +def test_update_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -14798,38 +13012,35 @@ def test_update_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.update_table in client._transport._wrapped_methods + assert client._transport.delete_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.update_table] = mock_rpc + client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc request = {} - client.update_table(request) + client.delete_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_table(request) + client.delete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_update_table_rest_required_fields( - request_type=bigtable_table_admin.UpdateTableRequest, +def test_delete_table_rest_required_fields( + request_type=bigtable_table_admin.DeleteTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -14840,19 +13051,21 @@ def test_update_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) + ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["name"] = "name_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) + ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -14861,7 +13074,7 @@ def test_update_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -14873,172 +13086,74 @@ def test_update_table_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_table(request) + response = client.delete_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_table_rest_unset_required_fields(): +def test_delete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.update_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "table", - "updateMask", - ) - ) - ) + unset_fields = transport.delete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_delete_table_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UpdateTableRequest.pb( - bigtable_table_admin.UpdateTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_table_admin.UpdateTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.update_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UpdateTableRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_table(request) - - -def test_update_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="rest", ) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # get arguments that satisfy an http rule for this method - sample_request = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + name="name_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.update_table(**mock_args) + client.delete_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{table.name=projects/*/instances/*/tables/*}" - % client.transport._host, + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1], ) -def test_update_table_rest_flattened_error(transport: str = "rest"): +def test_delete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -15047,55 +13162,13 @@ def test_update_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), + name="name_value", ) -def test_update_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteTableRequest, - dict, - ], -) -def test_delete_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_table(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_table_rest_use_cached_wrapped_rpc(): +def test_undelete_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -15109,30 +13182,34 @@ def test_delete_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.delete_table in client._transport._wrapped_methods + assert client._transport.undelete_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc + client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc request = {} - client.delete_table(request) + client.undelete_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_table(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.undelete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_table_rest_required_fields( - request_type=bigtable_table_admin.DeleteTableRequest, +def test_undelete_table_rest_required_fields( + request_type=bigtable_table_admin.UndeleteTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -15148,7 +13225,7 @@ def test_delete_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) + ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -15157,7 +13234,7 @@ def test_delete_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) + ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -15171,7 +13248,7 @@ def test_delete_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -15183,108 +13260,36 @@ def test_delete_table_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_table(request) + response = client.undelete_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_table_rest_unset_required_fields(): +def test_undelete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_table._get_unset_required_fields({}) + unset_fields = transport.undelete_table._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_table" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteTableRequest.pb( - bigtable_table_admin.DeleteTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DeleteTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_delete_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DeleteTableRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_table(request) - - -def test_delete_table_rest_flattened(): +def test_undelete_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -15293,7 +13298,7 @@ def test_delete_table_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} @@ -15307,23 +13312,24 @@ def test_delete_table_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.delete_table(**mock_args) + client.undelete_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, + "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" + % client.transport._host, args[1], ) -def test_delete_table_rest_flattened_error(transport: str = "rest"): +def test_undelete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -15332,78 +13338,42 @@ def test_delete_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), name="name_value", ) -def test_delete_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +def test_create_authorized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UndeleteTableRequest, - dict, - ], -) -def test_undelete_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.undelete_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_undelete_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.undelete_table in client._transport._wrapped_methods + assert ( + client._transport.create_authorized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc + client._transport._wrapped_methods[ + client._transport.create_authorized_view + ] = mock_rpc request = {} - client.undelete_table(request) + client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -15412,20 +13382,21 @@ def test_undelete_table_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.undelete_table(request) + client.create_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_undelete_table_rest_required_fields( - request_type=bigtable_table_admin.UndeleteTableRequest, +def test_create_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" + request_init["authorized_view_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -15433,24 +13404,32 @@ def test_undelete_table_rest_required_fields( ) # verify fields with default values are dropped + assert "authorizedViewId" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) + ).create_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"] - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" + jsonified_request["authorizedViewId"] = "authorized_view_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) + ).create_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("authorized_view_id",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -15484,106 +13463,38 @@ def test_undelete_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.undelete_table(request) + response = client.create_authorized_view(request) - expected_params = [("$alt", "json;enum-encoding=int")] + expected_params = [ + ( + "authorizedViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_undelete_table_rest_unset_required_fields(): +def test_create_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.undelete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undelete_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_undelete_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UndeleteTableRequest.pb( - bigtable_table_admin.UndeleteTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_table_admin.UndeleteTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.undelete_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + unset_fields = transport.create_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("authorizedViewId",)) + & set( + ( + "parent", + "authorizedViewId", + "authorizedView", + ) ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_undelete_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UndeleteTableRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.undelete_table(request) - -def test_undelete_table_rest_flattened(): +def test_create_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -15595,11 +13506,13 @@ def test_undelete_table_rest_flattened(): return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) mock_args.update(sample_request) @@ -15610,20 +13523,20 @@ def test_undelete_table_rest_flattened(): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.undelete_table(**mock_args) + client.create_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" + "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" % client.transport._host, args[1], ) -def test_undelete_table_rest_flattened_error(transport: str = "rest"): +def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -15632,184 +13545,62 @@ def test_undelete_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name="name_value", + client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) -def test_undelete_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +def test_list_authorized_views_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateAuthorizedViewRequest, - dict, - ], -) -def test_create_authorized_view_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Ensure method has been cached + assert ( + client._transport.list_authorized_views + in client._transport._wrapped_methods + ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request_init["authorized_view"] = { - "name": "name_value", - "subset_view": { - "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], - "family_subsets": {}, - }, - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_authorized_views + ] = mock_rpc - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[ - "authorized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["authorized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["authorized_view"][field])): - del request_init["authorized_view"][field][i][subfield] - else: - del request_init["authorized_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_authorized_view(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_authorized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.create_authorized_view - in client._transport._wrapped_methods - ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.create_authorized_view - ] = mock_rpc - - request = {} - client.create_authorized_view(request) + request = {} + client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_authorized_view(request) + client.list_authorized_views(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.CreateAuthorizedViewRequest, +def test_list_authorized_views_rest_required_fields( + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} request_init["parent"] = "" - request_init["authorized_view_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -15817,32 +13608,32 @@ def test_create_authorized_view_rest_required_fields( ) # verify fields with default values are dropped - assert "authorizedViewId" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_authorized_view._get_unset_required_fields(jsonified_request) + ).list_authorized_views._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - assert "authorizedViewId" in jsonified_request - assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"] jsonified_request["parent"] = "parent_value" - jsonified_request["authorizedViewId"] = "authorized_view_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_authorized_view._get_unset_required_fields(jsonified_request) + ).list_authorized_views._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("authorized_view_id",)) + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" - assert "authorizedViewId" in jsonified_request - assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -15851,7 +13642,7 @@ def test_create_authorized_view_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -15863,135 +13654,49 @@ def test_create_authorized_view_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_authorized_view(request) + response = client.list_authorized_views(request) - expected_params = [ - ( - "authorizedViewId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_create_authorized_view_rest_unset_required_fields(): +def test_list_authorized_views_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.create_authorized_view._get_unset_required_fields({}) + unset_fields = transport.list_authorized_views._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("authorizedViewId",)) - & set( + set( ( - "parent", - "authorizedViewId", - "authorizedView", + "pageSize", + "pageToken", + "view", ) ) + & set(("parent",)) ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( - bigtable_table_admin.CreateAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_table_admin.CreateAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_authorized_view_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.CreateAuthorizedViewRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_authorized_view(request) - - -def test_create_authorized_view_rest_flattened(): +def test_list_authorized_views_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -16000,7 +13705,7 @@ def test_create_authorized_view_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # get arguments that satisfy an http rule for this method sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} @@ -16008,19 +13713,19 @@ def test_create_authorized_view_rest_flattened(): # get truthy value for each flattened field mock_args = dict( parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.create_authorized_view(**mock_args) + client.list_authorized_views(**mock_args) # Establish that the underlying call was made with the expected # request object values. @@ -16033,7 +13738,7 @@ def test_create_authorized_view_rest_flattened(): ) -def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -16042,61 +13747,77 @@ def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_authorized_view( - bigtable_table_admin.CreateAuthorizedViewRequest(), + client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", ) -def test_create_authorized_view_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListAuthorizedViewsRequest, - dict, - ], -) -def test_list_authorized_views_rest(request_type): +def test_list_authorized_views_rest_pager(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], + next_page_token="def", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + ], + ), ) + # Two responses for two calls + response = response + response - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_authorized_views(request) + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAuthorizedViewsPager) - assert response.next_page_token == "next_page_token_value" + pager = client.list_authorized_views(request=sample_request) + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.AuthorizedView) for i in results) -def test_list_authorized_views_rest_use_cached_wrapped_rpc(): + pages = list(client.list_authorized_views(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -16111,8 +13832,7 @@ def test_list_authorized_views_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.list_authorized_views - in client._transport._wrapped_methods + client._transport.get_authorized_view in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -16121,29 +13841,29 @@ def test_list_authorized_views_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_authorized_views + client._transport.get_authorized_view ] = mock_rpc request = {} - client.list_authorized_views(request) + client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_authorized_views(request) + client.get_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_authorized_views_rest_required_fields( - request_type=bigtable_table_admin.ListAuthorizedViewsRequest, +def test_get_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.GetAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -16154,29 +13874,23 @@ def test_list_authorized_views_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_authorized_views._get_unset_required_fields(jsonified_request) + ).get_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_authorized_views._get_unset_required_fields(jsonified_request) + ).get_authorized_view._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - "view", - ) - ) + assert not set(unset_fields) - set(("view",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16185,7 +13899,7 @@ def test_list_authorized_views_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + return_value = table.AuthorizedView() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -16206,124 +13920,29 @@ def test_list_authorized_views_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb( - return_value - ) + return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_authorized_views(request) + response = client.get_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_authorized_views_rest_unset_required_fields(): +def test_get_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_authorized_views._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - "view", - ) - ) - & set(("parent",)) - ) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_authorized_views_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( - bigtable_table_admin.ListAuthorizedViewsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_table_admin.ListAuthorizedViewsResponse.to_json( - bigtable_table_admin.ListAuthorizedViewsResponse() - ) - ) - - request = bigtable_table_admin.ListAuthorizedViewsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - - client.list_authorized_views( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_authorized_views_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.ListAuthorizedViewsRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_authorized_views(request) + unset_fields = transport.get_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) -def test_list_authorized_views_rest_flattened(): +def test_get_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -16332,14 +13951,16 @@ def test_list_authorized_views_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + return_value = table.AuthorizedView() # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } # get truthy value for each flattened field mock_args = dict( - parent="parent_value", + name="name_value", ) mock_args.update(sample_request) @@ -16347,25 +13968,25 @@ def test_list_authorized_views_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) + return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.list_authorized_views(**mock_args) + client.get_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): +def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -16374,123 +13995,13 @@ def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_authorized_views( - bigtable_table_admin.ListAuthorizedViewsRequest(), - parent="parent_value", - ) - - -def test_list_authorized_views_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - table.AuthorizedView(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[], - next_page_token="def", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x) - for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - - pager = client.list_authorized_views(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.AuthorizedView) for i in results) - - pages = list(client.list_authorized_views(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetAuthorizedViewRequest, - dict, - ], -) -def test_get_authorized_view_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView( + client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), name="name_value", - etag="etag_value", - deletion_protection=True, ) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_authorized_view(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.AuthorizedView) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True - -def test_get_authorized_view_rest_use_cached_wrapped_rpc(): +def test_update_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -16505,7 +14016,8 @@ def test_get_authorized_view_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.get_authorized_view in client._transport._wrapped_methods + client._transport.update_authorized_view + in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -16514,29 +14026,32 @@ def test_get_authorized_view_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.get_authorized_view + client._transport.update_authorized_view ] = mock_rpc request = {} - client.get_authorized_view(request) + client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_authorized_view(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.GetAuthorizedViewRequest, +def test_update_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -16547,23 +14062,24 @@ def test_get_authorized_view_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_authorized_view._get_unset_required_fields(jsonified_request) + ).update_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" - unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_authorized_view._get_unset_required_fields(jsonified_request) + ).update_authorized_view._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view",)) + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16572,7 +14088,7 @@ def test_get_authorized_view_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -16584,119 +14100,44 @@ def test_get_authorized_view_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "patch", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_authorized_view(request) + response = client.update_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_authorized_view_rest_unset_required_fields(): +def test_update_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view",)) & set(("name",))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( - bigtable_table_admin.GetAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.AuthorizedView.to_json(table.AuthorizedView()) - - request = bigtable_table_admin.GetAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.AuthorizedView() - - client.get_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + unset_fields = transport.update_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_authorized_view_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetAuthorizedViewRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + & set(("authorizedView",)) ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_authorized_view(request) - -def test_get_authorized_view_rest_flattened(): +def test_update_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -16705,42 +14146,43 @@ def test_get_authorized_view_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } } # get truthy value for each flattened field mock_args = dict( - name="name_value", + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.get_authorized_view(**mock_args) + client.update_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" + "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -16749,136 +14191,14 @@ def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_authorized_view( - bigtable_table_admin.GetAuthorizedViewRequest(), - name="name_value", + client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_get_authorized_view_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateAuthorizedViewRequest, - dict, - ], -) -def test_update_authorized_view_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } - request_init["authorized_view"] = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", - "subset_view": { - "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], - "family_subsets": {}, - }, - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[ - "authorized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["authorized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["authorized_view"][field])): - del request_init["authorized_view"][field][i][subfield] - else: - del request_init["authorized_view"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.update_authorized_view(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_update_authorized_view_rest_use_cached_wrapped_rpc(): +def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -16893,7 +14213,7 @@ def test_update_authorized_view_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.update_authorized_view + client._transport.delete_authorized_view in client._transport._wrapped_methods ) @@ -16903,32 +14223,29 @@ def test_update_authorized_view_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.update_authorized_view + client._transport.delete_authorized_view ] = mock_rpc request = {} - client.update_authorized_view(request) + client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_authorized_view(request) + client.delete_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_update_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, +def test_delete_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -16939,24 +14256,23 @@ def test_update_authorized_view_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_authorized_view._get_unset_required_fields(jsonified_request) + ).delete_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["name"] = "name_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_authorized_view._get_unset_required_fields(jsonified_request) + ).delete_authorized_view._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", - ) - ) + assert not set(unset_fields) - set(("etag",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16965,7 +14281,7 @@ def test_update_authorized_view_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -16977,177 +14293,77 @@ def test_update_authorized_view_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_authorized_view(request) + response = client.delete_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_authorized_view_rest_unset_required_fields(): +def test_delete_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.update_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) - & set(("authorizedView",)) - ) + unset_fields = transport.delete_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_delete_authorized_view_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( - bigtable_table_admin.UpdateAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None - request = bigtable_table_admin.UpdateAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } - client.update_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + # get truthy value for each flattened field + mock_args = dict( + name="name_value", ) + mock_args.update(sample_request) - pre.assert_called_once() - post.assert_called_once() + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value - -def test_update_authorized_view_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_authorized_view(request) - - -def test_update_authorized_view_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } - - # get truthy value for each flattened field - mock_args = dict( - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.update_authorized_view(**mock_args) + client.delete_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}" + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -17156,57 +14372,13 @@ def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_authorized_view( - bigtable_table_admin.UpdateAuthorizedViewRequest(), - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), + name="name_value", ) -def test_update_authorized_view_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteAuthorizedViewRequest, - dict, - ], -) -def test_delete_authorized_view_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_authorized_view(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): +def test_modify_column_families_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17221,7 +14393,7 @@ def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.delete_authorized_view + client._transport.modify_column_families in client._transport._wrapped_methods ) @@ -17231,24 +14403,24 @@ def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.delete_authorized_view + client._transport.modify_column_families ] = mock_rpc request = {} - client.delete_authorized_view(request) + client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_authorized_view(request) + client.modify_column_families(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +def test_modify_column_families_rest_required_fields( + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -17264,7 +14436,7 @@ def test_delete_authorized_view_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_authorized_view._get_unset_required_fields(jsonified_request) + ).modify_column_families._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -17273,9 +14445,7 @@ def test_delete_authorized_view_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("etag",)) + ).modify_column_families._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -17289,7 +14459,7 @@ def test_delete_authorized_view_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = table.Table() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -17301,111 +14471,47 @@ def test_delete_authorized_view_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_authorized_view(request) + response = client.modify_column_families(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_authorized_view_rest_unset_required_fields(): +def test_modify_column_families_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("etag",)) & set(("name",))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_authorized_view_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb( - bigtable_table_admin.DeleteAuthorizedViewRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DeleteAuthorizedViewRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_authorized_view( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + unset_fields = transport.modify_column_families._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "modifications", + ) ) - - pre.assert_called_once() - - -def test_delete_authorized_view_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_authorized_view(request) - -def test_delete_authorized_view_rest_flattened(): +def test_modify_column_families_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -17414,40 +14520,45 @@ def test_delete_authorized_view_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = table.Table() # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.delete_authorized_view(**mock_args) + client.modify_column_families(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" + "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" % client.transport._host, args[1], ) -def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_modify_column_families_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -17456,63 +14567,135 @@ def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_authorized_view( - bigtable_table_admin.DeleteAuthorizedViewRequest(), + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), name="name_value", - ) - - -def test_delete_authorized_view_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], + ) + + +def test_drop_row_range_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.drop_row_range in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc + + request = {} + client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.drop_row_range(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_drop_row_range_rest_required_fields( + request_type=bigtable_table_admin.DropRowRangeRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ModifyColumnFamiliesRequest, - dict, - ], -) -def test_modify_column_families_rest(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) + # Designate an appropriate value for the returned response. + return_value = None # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + response_value = Response() + response_value.status_code = 200 + json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.modify_column_families(request) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + response = client.drop_row_range(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params -def test_modify_column_families_rest_use_cached_wrapped_rpc(): +def test_drop_row_range_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.drop_row_range._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17527,7 +14710,7 @@ def test_modify_column_families_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.modify_column_families + client._transport.generate_consistency_token in client._transport._wrapped_methods ) @@ -17537,24 +14720,24 @@ def test_modify_column_families_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.modify_column_families + client._transport.generate_consistency_token ] = mock_rpc request = {} - client.modify_column_families(request) + client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.modify_column_families(request) + client.generate_consistency_token(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_modify_column_families_rest_required_fields( - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +def test_generate_consistency_token_rest_required_fields( + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -17570,7 +14753,7 @@ def test_modify_column_families_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) + ).generate_consistency_token._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -17579,7 +14762,7 @@ def test_modify_column_families_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) + ).generate_consistency_token._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -17593,7 +14776,7 @@ def test_modify_column_families_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Table() + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -17615,164 +14798,75 @@ def test_modify_column_families_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Table.pb(return_value) + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.modify_column_families(request) + response = client.generate_consistency_token(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_modify_column_families_rest_unset_required_fields(): +def test_generate_consistency_token_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.modify_column_families._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "modifications", - ) - ) - ) + unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_modify_column_families_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_generate_consistency_token_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( - bigtable_table_admin.ModifyColumnFamiliesRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Table.to_json(table.Table()) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Table() + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - client.modify_column_families( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + # get truthy value for each flattened field + mock_args = dict( + name="name_value", ) + mock_args.update(sample_request) - pre.assert_called_once() - post.assert_called_once() + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value - -def test_modify_column_families_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.modify_column_families(request) - - -def test_modify_column_families_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.modify_column_families(**mock_args) + client.generate_consistency_token(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" + "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" % client.transport._host, args[1], ) -def test_modify_column_families_rest_flattened_error(transport: str = "rest"): +def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -17781,59 +14875,13 @@ def test_modify_column_families_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], ) -def test_modify_column_families_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DropRowRangeRequest, - dict, - ], -) -def test_drop_row_range_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.drop_row_range(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_drop_row_range_rest_use_cached_wrapped_rpc(): +def test_check_consistency_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17847,35 +14895,38 @@ def test_drop_row_range_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.drop_row_range in client._transport._wrapped_methods + assert client._transport.check_consistency in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc + client._transport._wrapped_methods[ + client._transport.check_consistency + ] = mock_rpc request = {} - client.drop_row_range(request) + client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.drop_row_range(request) + client.check_consistency(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_drop_row_range_rest_required_fields( - request_type=bigtable_table_admin.DropRowRangeRequest, +def test_check_consistency_rest_required_fields( + request_type=bigtable_table_admin.CheckConsistencyRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} request_init["name"] = "" + request_init["consistency_token"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -17886,21 +14937,24 @@ def test_drop_row_range_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) + ).check_consistency._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present jsonified_request["name"] = "name_value" + jsonified_request["consistencyToken"] = "consistency_token_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) + ).check_consistency._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" + assert "consistencyToken" in jsonified_request + assert jsonified_request["consistencyToken"] == "consistency_token_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -17909,7 +14963,7 @@ def test_drop_row_range_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_table_admin.CheckConsistencyResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -17929,149 +14983,100 @@ def test_drop_row_range_rest_required_fields( response_value = Response() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.drop_row_range(request) + response = client.check_consistency(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_drop_row_range_rest_unset_required_fields(): +def test_check_consistency_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.drop_row_range._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_drop_row_range_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DropRowRangeRequest.pb( - bigtable_table_admin.DropRowRangeRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DropRowRangeRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.drop_row_range( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + unset_fields = transport.check_consistency._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "consistencyToken", + ) ) - - pre.assert_called_once() - - -def test_drop_row_range_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DropRowRangeRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.drop_row_range(request) - - -def test_drop_row_range_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GenerateConsistencyTokenRequest, - dict, - ], -) -def test_generate_consistency_token_rest(request_type): +def test_check_consistency_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + return_value = bigtable_table_admin.CheckConsistencyResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", consistency_token="consistency_token_value", ) + mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.generate_consistency_token(request) - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" + client.check_consistency(**mock_args) + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + % client.transport._host, + args[1], + ) -def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): + +def test_check_consistency_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +def test_snapshot_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -18085,40 +15090,41 @@ def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.generate_consistency_token - in client._transport._wrapped_methods - ) + assert client._transport.snapshot_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.generate_consistency_token - ] = mock_rpc + client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc request = {} - client.generate_consistency_token(request) + client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.generate_consistency_token(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.snapshot_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_generate_consistency_token_rest_required_fields( - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +def test_snapshot_table_rest_required_fields( + request_type=bigtable_table_admin.SnapshotTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} request_init["name"] = "" + request_init["cluster"] = "" + request_init["snapshot_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -18129,21 +15135,27 @@ def test_generate_consistency_token_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) + ).snapshot_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present jsonified_request["name"] = "name_value" + jsonified_request["cluster"] = "cluster_value" + jsonified_request["snapshotId"] = "snapshot_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) + ).snapshot_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" + assert "cluster" in jsonified_request + assert jsonified_request["cluster"] == "cluster_value" + assert "snapshotId" in jsonified_request + assert jsonified_request["snapshotId"] == "snapshot_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -18152,7 +15164,7 @@ def test_generate_consistency_token_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -18172,117 +15184,37 @@ def test_generate_consistency_token_rest_required_fields( response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.generate_consistency_token(request) + response = client.snapshot_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_generate_consistency_token_rest_unset_required_fields(): +def test_snapshot_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_consistency_token_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - bigtable_table_admin.GenerateConsistencyTokenRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( - bigtable_table_admin.GenerateConsistencyTokenResponse() + unset_fields = transport.snapshot_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "cluster", + "snapshotId", ) ) - - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - - client.generate_consistency_token( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_generate_consistency_token_rest_bad_request( - transport: str = "rest", - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.generate_consistency_token(request) - -def test_generate_consistency_token_rest_flattened(): +def test_snapshot_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -18291,7 +15223,7 @@ def test_generate_consistency_token_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} @@ -18299,34 +15231,33 @@ def test_generate_consistency_token_rest_flattened(): # get truthy value for each flattened field mock_args = dict( name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.generate_consistency_token(**mock_args) + client.snapshot_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" + "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" % client.transport._host, args[1], ) -def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): +def test_snapshot_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -18335,59 +15266,16 @@ def test_generate_consistency_token_rest_flattened_error(transport: str = "rest" # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) -def test_generate_consistency_token_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CheckConsistencyRequest, - dict, - ], -) -def test_check_consistency_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.check_consistency(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -def test_check_consistency_rest_use_cached_wrapped_rpc(): +def test_get_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -18401,38 +15289,35 @@ def test_check_consistency_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.check_consistency in client._transport._wrapped_methods + assert client._transport.get_snapshot in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.check_consistency - ] = mock_rpc + client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc request = {} - client.check_consistency(request) + client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.check_consistency(request) + client.get_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_check_consistency_rest_required_fields( - request_type=bigtable_table_admin.CheckConsistencyRequest, +def test_get_snapshot_rest_required_fields( + request_type=bigtable_table_admin.GetSnapshotRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} request_init["name"] = "" - request_init["consistency_token"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -18443,24 +15328,21 @@ def test_check_consistency_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) + ).get_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present jsonified_request["name"] = "name_value" - jsonified_request["consistencyToken"] = "consistency_token_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) + ).get_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - assert "consistencyToken" in jsonified_request - assert jsonified_request["consistencyToken"] == "consistency_token_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -18469,7 +15351,7 @@ def test_check_consistency_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() + return_value = table.Snapshot() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -18481,132 +15363,38 @@ def test_check_consistency_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb( - return_value - ) + return_value = table.Snapshot.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.check_consistency(request) + response = client.get_snapshot(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_check_consistency_rest_unset_required_fields(): +def test_get_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.check_consistency._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "consistencyToken", - ) - ) - ) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_consistency_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_check_consistency" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( - bigtable_table_admin.CheckConsistencyRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable_table_admin.CheckConsistencyResponse.to_json( - bigtable_table_admin.CheckConsistencyResponse() - ) - ) - - request = bigtable_table_admin.CheckConsistencyRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.CheckConsistencyResponse() - - client.check_consistency( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_check_consistency_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CheckConsistencyRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.check_consistency(request) + unset_fields = transport.get_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_check_consistency_rest_flattened(): +def test_get_snapshot_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -18615,15 +15403,16 @@ def test_check_consistency_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() + return_value = table.Snapshot() # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } # get truthy value for each flattened field mock_args = dict( name="name_value", - consistency_token="consistency_token_value", ) mock_args.update(sample_request) @@ -18631,25 +15420,25 @@ def test_check_consistency_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + return_value = table.Snapshot.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.check_consistency(**mock_args) + client.get_snapshot(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" % client.transport._host, args[1], ) -def test_check_consistency_rest_flattened_error(transport: str = "rest"): +def test_get_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -18658,55 +15447,13 @@ def test_check_consistency_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), name="name_value", - consistency_token="consistency_token_value", ) -def test_check_consistency_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.SnapshotTableRequest, - dict, - ], -) -def test_snapshot_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.snapshot_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_snapshot_table_rest_use_cached_wrapped_rpc(): +def test_list_snapshots_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -18720,41 +15467,35 @@ def test_snapshot_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.snapshot_table in client._transport._wrapped_methods + assert client._transport.list_snapshots in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc + client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc request = {} - client.snapshot_table(request) + client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.snapshot_table(request) + client.list_snapshots(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_snapshot_table_rest_required_fields( - request_type=bigtable_table_admin.SnapshotTableRequest, +def test_list_snapshots_rest_required_fields( + request_type=bigtable_table_admin.ListSnapshotsRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" - request_init["cluster"] = "" - request_init["snapshot_id"] = "" + request_init["parent"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -18765,27 +15506,28 @@ def test_snapshot_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) + ).list_snapshots._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" - jsonified_request["cluster"] = "cluster_value" - jsonified_request["snapshotId"] = "snapshot_id_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) + ).list_snapshots._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "cluster" in jsonified_request - assert jsonified_request["cluster"] == "cluster_value" - assert "snapshotId" in jsonified_request - assert jsonified_request["snapshotId"] == "snapshot_id_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -18794,7 +15536,7 @@ def test_snapshot_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.ListSnapshotsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -18806,171 +15548,90 @@ def test_snapshot_table_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.snapshot_table(request) + response = client.list_snapshots(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_snapshot_table_rest_unset_required_fields(): +def test_list_snapshots_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.snapshot_table._get_unset_required_fields({}) + unset_fields = transport.list_snapshots._get_unset_required_fields({}) assert set(unset_fields) == ( - set(()) - & set( + set( ( - "name", - "cluster", - "snapshotId", + "pageSize", + "pageToken", ) ) + & set(("parent",)) ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_snapshot_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_list_snapshots_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.SnapshotTableRequest.pb( - bigtable_table_admin.SnapshotTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) - - request = bigtable_table_admin.SnapshotTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.snapshot_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_snapshot_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.SnapshotTableRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.snapshot_table(request) - - -def test_snapshot_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="rest", ) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.ListSnapshotsResponse() # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } # get truthy value for each flattened field mock_args = dict( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + parent="parent_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.snapshot_table(**mock_args) + client.list_snapshots(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" + "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" % client.transport._host, args[1], ) -def test_snapshot_table_rest_flattened_error(transport: str = "rest"): +def test_list_snapshots_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -18979,70 +15640,78 @@ def test_snapshot_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), + parent="parent_value", ) -def test_snapshot_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetSnapshotRequest, - dict, - ], -) -def test_get_snapshot_rest(request_type): +def test_list_snapshots_rest_pager(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), ) + # Two responses for two calls + response = response + response - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_snapshot(request) + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" + pager = client.list_snapshots(request=sample_request) + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) for i in results) -def test_get_snapshot_rest_use_cached_wrapped_rpc(): + pages = list(client.list_snapshots(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -19056,30 +15725,30 @@ def test_get_snapshot_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_snapshot in client._transport._wrapped_methods + assert client._transport.delete_snapshot in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc + client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc request = {} - client.get_snapshot(request) + client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_snapshot(request) + client.delete_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_snapshot_rest_required_fields( - request_type=bigtable_table_admin.GetSnapshotRequest, +def test_delete_snapshot_rest_required_fields( + request_type=bigtable_table_admin.DeleteSnapshotRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -19095,7 +15764,7 @@ def test_get_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) + ).delete_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -19104,7 +15773,7 @@ def test_get_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) + ).delete_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -19118,7 +15787,7 @@ def test_get_snapshot_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Snapshot() + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -19130,133 +15799,49 @@ def test_get_snapshot_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "delete", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_snapshot(request) + response = client.delete_snapshot(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_snapshot_rest_unset_required_fields(): +def test_delete_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_snapshot._get_unset_required_fields({}) + unset_fields = transport.delete_snapshot._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_delete_snapshot_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GetSnapshotRequest.pb( - bigtable_table_admin.GetSnapshotRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Snapshot.to_json(table.Snapshot()) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None - request = bigtable_table_admin.GetSnapshotRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Snapshot() - - client.get_snapshot( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_snapshot_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetSnapshotRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_snapshot(request) - - -def test_get_snapshot_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Snapshot() - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } # get truthy value for each flattened field mock_args = dict( @@ -19267,13 +15852,11 @@ def test_get_snapshot_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.get_snapshot(**mock_args) + client.delete_snapshot(**mock_args) # Establish that the underlying call was made with the expected # request object values. @@ -19286,7 +15869,7 @@ def test_get_snapshot_rest_flattened(): ) -def test_get_snapshot_rest_flattened_error(transport: str = "rest"): +def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -19295,59 +15878,13 @@ def test_get_snapshot_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), name="name_value", ) -def test_get_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListSnapshotsRequest, - dict, - ], -) -def test_list_snapshots_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_snapshots(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" - - -def test_list_snapshots_rest_use_cached_wrapped_rpc(): +def test_create_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -19361,35 +15898,40 @@ def test_list_snapshots_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_snapshots in client._transport._wrapped_methods + assert client._transport.create_backup in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc + client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc request = {} - client.list_snapshots(request) + client.create_backup(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_snapshots(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_snapshots_rest_required_fields( - request_type=bigtable_table_admin.ListSnapshotsRequest, +def test_create_backup_rest_required_fields( + request_type=bigtable_table_admin.CreateBackupRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} request_init["parent"] = "" + request_init["backup_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -19397,31 +15939,32 @@ def test_list_snapshots_rest_required_fields( ) # verify fields with default values are dropped + assert "backupId" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) + ).create_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == request_init["backup_id"] jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) + ).create_backup._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) + assert not set(unset_fields) - set(("backup_id",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -19430,7 +15973,7 @@ def test_list_snapshots_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -19442,127 +15985,51 @@ def test_list_snapshots_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_snapshots(request) + response = client.create_backup(request) - expected_params = [("$alt", "json;enum-encoding=int")] + expected_params = [ + ( + "backupId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_snapshots_rest_unset_required_fields(): +def test_create_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_snapshots._get_unset_required_fields({}) + unset_fields = transport.create_backup._get_unset_required_fields({}) assert set(unset_fields) == ( - set( + set(("backupId",)) + & set( ( - "pageSize", - "pageToken", + "parent", + "backupId", + "backup", ) ) - & set(("parent",)) - ) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_snapshots_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( - bigtable_table_admin.ListSnapshotsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListSnapshotsResponse.to_json( - bigtable_table_admin.ListSnapshotsResponse() - ) - - request = bigtable_table_admin.ListSnapshotsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListSnapshotsResponse() - - client.list_snapshots( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_snapshots_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ListSnapshotsRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_snapshots(request) - -def test_list_snapshots_rest_flattened(): +def test_create_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -19571,7 +16038,7 @@ def test_list_snapshots_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = { @@ -19581,32 +16048,32 @@ def test_list_snapshots_rest_flattened(): # get truthy value for each flattened field mock_args = dict( parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.list_snapshots(**mock_args) + client.create_backup(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" % client.transport._host, args[1], ) -def test_list_snapshots_rest_flattened_error(transport: str = "rest"): +def test_create_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -19615,115 +16082,15 @@ def test_list_snapshots_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), ) -def test_list_snapshots_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token="def", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response - ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client.list_snapshots(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) for i in results) - - pages = list(client.list_snapshots(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteSnapshotRequest, - dict, - ], -) -def test_delete_snapshot_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_snapshot(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_snapshot_rest_use_cached_wrapped_rpc(): +def test_get_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -19737,30 +16104,30 @@ def test_delete_snapshot_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.delete_snapshot in client._transport._wrapped_methods + assert client._transport.get_backup in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc + client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc request = {} - client.delete_snapshot(request) + client.get_backup(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_snapshot(request) + client.get_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_snapshot_rest_required_fields( - request_type=bigtable_table_admin.DeleteSnapshotRequest, +def test_get_backup_rest_required_fields( + request_type=bigtable_table_admin.GetBackupRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -19776,7 +16143,7 @@ def test_delete_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) + ).get_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -19785,7 +16152,7 @@ def test_delete_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) + ).get_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -19799,7 +16166,7 @@ def test_delete_snapshot_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = table.Backup() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -19811,110 +16178,38 @@ def test_delete_snapshot_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "get", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_snapshot(request) + response = client.get_backup(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_snapshot_rest_unset_required_fields(): +def test_get_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_snapshot._get_unset_required_fields({}) + unset_fields = transport.get_backup._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( - bigtable_table_admin.DeleteSnapshotRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DeleteSnapshotRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_snapshot( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_delete_snapshot_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DeleteSnapshotRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_snapshot(request) - - -def test_delete_snapshot_rest_flattened(): +def test_get_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -19923,11 +16218,11 @@ def test_delete_snapshot_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = table.Backup() # get arguments that satisfy an http rule for this method sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" } # get truthy value for each flattened field @@ -19939,24 +16234,26 @@ def test_delete_snapshot_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.delete_snapshot(**mock_args) + client.get_backup(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1], ) -def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): +def test_get_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -19965,194 +16262,54 @@ def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), + client.get_backup( + bigtable_table_admin.GetBackupRequest(), name="name_value", ) -def test_delete_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) +def test_update_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CreateBackupRequest, - dict, - ], -) -def test_create_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request_init["backup"] = { - "name": "name_value", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "backup_type": 1, - "hot_to_standard_time": {}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.create_backup(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_backup_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.create_backup in client._transport._wrapped_methods + assert client._transport.update_backup in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc + client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc request = {} - client.create_backup(request) + client.update_backup(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.create_backup(request) + client.update_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_backup_rest_required_fields( - request_type=bigtable_table_admin.CreateBackupRequest, +def test_update_backup_rest_required_fields( + request_type=bigtable_table_admin.UpdateBackupRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -20160,32 +16317,22 @@ def test_create_backup_rest_required_fields( ) # verify fields with default values are dropped - assert "backupId" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) + ).update_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == request_init["backup_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) + ).update_backup._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("backup_id",)) + assert not set(unset_fields) - set(("update_mask",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -20194,7 +16341,7 @@ def test_create_backup_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = table.Backup() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -20206,7 +16353,7 @@ def test_create_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "patch", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -20214,170 +16361,86 @@ def test_create_backup_rest_required_fields( response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_backup(request) + response = client.update_backup(request) - expected_params = [ - ( - "backupId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_create_backup_rest_unset_required_fields(): +def test_update_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.create_backup._get_unset_required_fields({}) + unset_fields = transport.update_backup._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("backupId",)) + set(("updateMask",)) & set( ( - "parent", - "backupId", "backup", + "updateMask", ) ) ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_update_backup_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateBackupRequest.pb( - bigtable_table_admin.CreateBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() - request = bigtable_table_admin.CreateBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + # get arguments that satisfy an http rule for this method + sample_request = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } - client.create_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + # get truthy value for each flattened field + mock_args = dict( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CreateBackupRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_backup(request) - - -def test_create_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), - ) - mock_args.update(sample_request) + mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.create_backup(**mock_args) + client.update_backup(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1], ) -def test_create_backup_rest_flattened_error(transport: str = "rest"): +def test_update_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -20386,73 +16449,14 @@ def test_create_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_create_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.GetBackupRequest, - dict, - ], -) -def test_get_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_backup(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -def test_get_backup_rest_use_cached_wrapped_rpc(): +def test_delete_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -20466,30 +16470,30 @@ def test_get_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_backup in client._transport._wrapped_methods + assert client._transport.delete_backup in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc + client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc request = {} - client.get_backup(request) + client.delete_backup(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_backup(request) + client.delete_backup(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_backup_rest_required_fields( - request_type=bigtable_table_admin.GetBackupRequest, +def test_delete_backup_rest_required_fields( + request_type=bigtable_table_admin.DeleteBackupRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -20505,7 +16509,7 @@ def test_get_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) + ).delete_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -20514,7 +16518,7 @@ def test_get_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) + ).delete_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -20528,7 +16532,7 @@ def test_get_backup_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Backup() + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -20540,119 +16544,35 @@ def test_get_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "delete", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_backup(request) + response = client.delete_backup(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_backup_rest_unset_required_fields(): +def test_delete_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_backup._get_unset_required_fields({}) + unset_fields = transport.delete_backup._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GetBackupRequest.pb( - bigtable_table_admin.GetBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Backup.to_json(table.Backup()) - - request = bigtable_table_admin.GetBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Backup() - - client.get_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.GetBackupRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_backup(request) - - -def test_get_backup_rest_flattened(): +def test_delete_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -20661,7 +16581,7 @@ def test_get_backup_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Backup() + return_value = None # get arguments that satisfy an http rule for this method sample_request = { @@ -20677,13 +16597,11 @@ def test_get_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.get_backup(**mock_args) + client.delete_backup(**mock_args) # Establish that the underlying call was made with the expected # request object values. @@ -20696,7 +16614,7 @@ def test_get_backup_rest_flattened(): ) -def test_get_backup_rest_flattened_error(transport: str = "rest"): +def test_delete_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -20705,207 +16623,55 @@ def test_get_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), name="name_value", ) -def test_get_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.UpdateBackupRequest, - dict, - ], -) -def test_update_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - request_init["backup"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "backup_type": 1, - "hot_to_standard_time": {}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.update_backup(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD - - -def test_update_backup_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) +def test_list_backups_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) # Should wrap all calls on client creation assert wrapper_fn.call_count > 0 wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.update_backup in client._transport._wrapped_methods + assert client._transport.list_backups in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc + client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc request = {} - client.update_backup(request) + client.list_backups(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.update_backup(request) + client.list_backups(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_update_backup_rest_required_fields( - request_type=bigtable_table_admin.UpdateBackupRequest, +def test_list_backups_rest_required_fields( + request_type=bigtable_table_admin.ListBackupsRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} + request_init["parent"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -20916,19 +16682,30 @@ def test_update_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) + ).list_backups._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["parent"] = "parent_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) + ).list_backups._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -20937,7 +16714,7 @@ def test_update_backup_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Backup() + return_value = bigtable_table_admin.ListBackupsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -20949,151 +16726,66 @@ def test_update_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_backup(request) + response = client.list_backups(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_backup_rest_unset_required_fields(): +def test_list_backups_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.update_backup._get_unset_required_fields({}) + unset_fields = transport.list_backups._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("updateMask",)) - & set( + set( ( - "backup", - "updateMask", + "filter", + "orderBy", + "pageSize", + "pageToken", ) ) + & set(("parent",)) ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_list_backups_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UpdateBackupRequest.pb( - bigtable_table_admin.UpdateBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Backup.to_json(table.Backup()) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() - request = bigtable_table_admin.UpdateBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Backup() - - client.update_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.UpdateBackupRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_backup(request) - - -def test_update_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup() - - # get arguments that satisfy an http rule for this method - sample_request = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } # get truthy value for each flattened field mock_args = dict( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + parent="parent_value", ) mock_args.update(sample_request) @@ -21101,25 +16793,25 @@ def test_update_backup_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.update_backup(**mock_args) + client.list_backups(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" % client.transport._host, args[1], ) -def test_update_backup_rest_flattened_error(transport: str = "rest"): +def test_list_backups_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -21128,57 +16820,78 @@ def test_update_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", ) -def test_update_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteBackupRequest, - dict, - ], -) -def test_delete_backup_rest(request_type): +def test_list_backups_rest_pager(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + ) + # Two responses for two calls + response = response + response - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.delete_backup(request) + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } - # Establish that the response is the type that we expect. - assert response is None + pager = client.list_backups(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) + + pages = list(client.list_backups(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -def test_delete_backup_rest_use_cached_wrapped_rpc(): +def test_restore_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -21192,35 +16905,40 @@ def test_delete_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.delete_backup in client._transport._wrapped_methods + assert client._transport.restore_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc + client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc request = {} - client.delete_backup(request) + client.restore_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_backup(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.restore_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_backup_rest_required_fields( - request_type=bigtable_table_admin.DeleteBackupRequest, +def test_restore_table_rest_required_fields( + request_type=bigtable_table_admin.RestoreTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" + request_init["table_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -21231,21 +16949,24 @@ def test_delete_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) + ).restore_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) + ).restore_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -21254,7 +16975,7 @@ def test_delete_backup_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -21266,110 +16987,183 @@ def test_delete_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_backup(request) + response = client.restore_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_backup_rest_unset_required_fields(): +def test_restore_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.restore_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + ) + ) + ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteBackupRequest.pb( - bigtable_table_admin.DeleteBackupRequest() +def test_copy_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - request = bigtable_table_admin.DeleteBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata + # Ensure method has been cached + assert client._transport.copy_backup in client._transport._wrapped_methods - client.delete_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) + client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc - pre.assert_called_once() + request = {} + client.copy_backup(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -def test_delete_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.DeleteBackupRequest + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.copy_backup(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_copy_backup_rest_required_fields( + request_type=bigtable_table_admin.CopyBackupRequest, ): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request_init["source_backup"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + jsonified_request["sourceBackup"] = "source_backup_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + assert "sourceBackup" in jsonified_request + assert jsonified_request["sourceBackup"] == "source_backup_value" + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - - # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } request = request_type(**request_init) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_backup(request) + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) -def test_delete_backup_rest_flattened(): + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.copy_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_copy_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.copy_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "backupId", + "sourceBackup", + "expireTime", + ) + ) + ) + + +def test_copy_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -21378,40 +17172,43 @@ def test_delete_backup_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + "parent": "projects/sample1/instances/sample2/clusters/sample3" } # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.delete_backup(**mock_args) + client.copy_backup(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" % client.transport._host, args[1], ) -def test_delete_backup_rest_flattened_error(transport: str = "rest"): +def test_copy_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -21420,59 +17217,16 @@ def test_delete_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name="name_value", - ) - - -def test_delete_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.ListBackupsRequest, - dict, - ], -) -def test_list_backups_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), ) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.list_backups(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == "next_page_token_value" - -def test_list_backups_rest_use_cached_wrapped_rpc(): +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -21486,37 +17240,37 @@ def test_list_backups_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_backups in client._transport._wrapped_methods + assert client._transport.get_iam_policy in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc request = {} - client.list_backups(request) + client.get_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_backups(request) + client.get_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_backups_rest_required_fields( - request_type=bigtable_table_admin.ListBackupsRequest, +def test_get_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.GetIamPolicyRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" + request_init["resource"] = "" request = request_type(**request_init) - pb_request = request_type.pb(request) + pb_request = request jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -21525,30 +17279,21 @@ def test_list_backups_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) + ).get_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" + jsonified_request["resource"] = "resource_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "order_by", - "page_size", - "page_token", - ) - ) + ).get_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -21557,7 +17302,7 @@ def test_list_backups_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() + return_value = policy_pb2.Policy() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -21566,132 +17311,40 @@ def test_list_backups_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request_type.pb(request) + pb_request = request transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_backups(request) + response = client.get_iam_policy(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_backups_rest_unset_required_fields(): +def test_get_iam_policy_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_backups._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "filter", - "orderBy", - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_backups_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_backups" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_backups" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ListBackupsRequest.pb( - bigtable_table_admin.ListBackupsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListBackupsResponse.to_json( - bigtable_table_admin.ListBackupsResponse() - ) - - request = bigtable_table_admin.ListBackupsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListBackupsResponse() - - client.list_backups( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_backups_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.ListBackupsRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_backups(request) + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource",))) -def test_list_backups_rest_flattened(): +def test_get_iam_policy_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -21700,42 +17353,40 @@ def test_list_backups_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() + return_value = policy_pb2.Policy() # get arguments that satisfy an http rule for this method sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" + "resource": "projects/sample1/instances/sample2/tables/sample3" } # get truthy value for each flattened field mock_args = dict( - parent="parent_value", + resource="resource_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.list_backups(**mock_args) + client.get_iam_policy(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" % client.transport._host, args[1], ) -def test_list_backups_rest_flattened_error(transport: str = "rest"): +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -21744,113 +17395,13 @@ def test_list_backups_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent="parent_value", - ) - - -def test_list_backups_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client.list_backups(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Backup) for i in results) - - pages = list(client.list_backups(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.RestoreTableRequest, - dict, - ], -) -def test_restore_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.restore_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" -def test_restore_table_rest_use_cached_wrapped_rpc(): +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -21864,42 +17415,37 @@ def test_restore_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.restore_table in client._transport._wrapped_methods + assert client._transport.set_iam_policy in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc request = {} - client.restore_table(request) + client.set_iam_policy(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.restore_table(request) + client.set_iam_policy(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_restore_table_rest_required_fields( - request_type=bigtable_table_admin.RestoreTableRequest, +def test_set_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.SetIamPolicyRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" + request_init["resource"] = "" request = request_type(**request_init) - pb_request = request_type.pb(request) + pb_request = request jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -21908,24 +17454,21 @@ def test_restore_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) + ).set_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" + jsonified_request["resource"] = "resource_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) + ).set_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -21934,7 +17477,7 @@ def test_restore_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = policy_pb2.Policy() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -21943,7 +17486,7 @@ def test_restore_table_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request_type.pb(request) + pb_request = request transcode_result = { "uri": "v1/sample_method", "method": "post", @@ -21954,160 +17497,94 @@ def test_restore_table_rest_required_fields( response_value = Response() response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.restore_table(request) + response = client.set_iam_policy(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_restore_table_rest_unset_required_fields(): +def test_set_iam_policy_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.restore_table._get_unset_required_fields({}) + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) assert set(unset_fields) == ( set(()) & set( ( - "parent", - "tableId", + "resource", + "policy", ) ) ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_restore_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_set_iam_policy_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_restore_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_restore_table" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.RestoreTableRequest.pb( - bigtable_table_admin.RestoreTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() - request = bigtable_table_admin.RestoreTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } - client.restore_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", ) + mock_args.update(sample_request) - pre.assert_called_once() - post.assert_called_once() - - -def test_restore_table_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.RestoreTableRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): # Wrap the value into a proper Response obj response_value = Response() - response_value.status_code = 400 - response_value.request = Request() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.restore_table(request) + client.set_iam_policy(**mock_args) -def test_restore_table_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" + % client.transport._host, + args[1], + ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.CopyBackupRequest, - dict, - ], -) -def test_copy_backup_rest(request_type): +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.copy_backup(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) -def test_copy_backup_rest_use_cached_wrapped_rpc(): +def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -22121,43 +17598,42 @@ def test_copy_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.copy_backup in client._transport._wrapped_methods + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc request = {} - client.copy_backup(request) + client.test_iam_permissions(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.copy_backup(request) + client.test_iam_permissions(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_copy_backup_rest_required_fields( - request_type=bigtable_table_admin.CopyBackupRequest, +def test_test_iam_permissions_rest_required_fields( + request_type=iam_policy_pb2.TestIamPermissionsRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request_init["source_backup"] = "" + request_init["resource"] = "" + request_init["permissions"] = "" request = request_type(**request_init) - pb_request = request_type.pb(request) + pb_request = request jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -22166,27 +17642,24 @@ def test_copy_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) + ).test_iam_permissions._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" - jsonified_request["sourceBackup"] = "source_backup_value" + jsonified_request["resource"] = "resource_value" + jsonified_request["permissions"] = "permissions_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) + ).test_iam_permissions._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" - assert "sourceBackup" in jsonified_request - assert jsonified_request["sourceBackup"] == "source_backup_value" + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == "permissions_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -22195,7 +17668,7 @@ def test_copy_backup_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = iam_policy_pb2.TestIamPermissionsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -22204,7 +17677,7 @@ def test_copy_backup_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request_type.pb(request) + pb_request = request transcode_result = { "uri": "v1/sample_method", "method": "post", @@ -22215,144 +17688,58 @@ def test_copy_backup_rest_required_fields( response_value = Response() response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.copy_backup(request) + response = client.test_iam_permissions(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_copy_backup_rest_unset_required_fields(): +def test_test_iam_permissions_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.copy_backup._get_unset_required_fields({}) + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) assert set(unset_fields) == ( set(()) & set( ( - "parent", - "backupId", - "sourceBackup", - "expireTime", + "resource", + "permissions", ) ) ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_copy_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( +def test_test_iam_permissions_rest_flattened(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_copy_backup" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CopyBackupRequest.pb( - bigtable_table_admin.CopyBackupRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - operations_pb2.Operation() + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + permissions=["permissions_value"], ) - - request = bigtable_table_admin.CopyBackupRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.copy_backup( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_copy_backup_rest_bad_request( - transport: str = "rest", request_type=bigtable_table_admin.CopyBackupRequest -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.copy_backup(request) - - -def test_copy_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - mock_args.update(sample_request) + mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() @@ -22361,20 +17748,20 @@ def test_copy_backup_rest_flattened(): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value - client.copy_backup(**mock_args) + client.test_iam_permissions(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" + "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" % client.transport._host, args[1], ) -def test_copy_backup_rest_flattened_error(transport: str = "rest"): +def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -22383,177 +17770,5303 @@ def test_copy_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], ) -def test_copy_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) - -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.GetIamPolicyRequest, - dict, - ], -) -def test_get_iam_policy_rest(request_type): - client = BigtableTableAdminClient( + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, ) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, + transport=transport, + ) + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) -def test_get_iam_policy_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + client_options={"scopes": ["1", "2"]}, + transport=transport, ) - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - # Ensure method has been cached - assert client._transport.get_iam_policy in client._transport._wrapped_methods +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableTableAdminClient(transport=transport) + assert client.transport is transport - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc - request = {} - client.get_iam_policy(request) +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel - client.get_iam_policy(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + transports.BigtableTableAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() -def test_get_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.GetIamPolicyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport +def test_transport_kind_grpc(): + transport = BigtableTableAdminClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + +def test_initialize_client_w_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" ) + assert client is not None - # verify fields with default values are dropped - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) - # verify required fields with default values are now present + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value = gba_table.Table() + client.create_table(request=None) - jsonified_request["resource"] = "resource_value" + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + assert args[0] == request_msg - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_from_snapshot_empty_call_grpc(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_table_from_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_tables_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value = bigtable_table_admin.ListTablesResponse() + client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value = table.Table() + client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value = None + client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_undelete_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_authorized_views_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + client.list_authorized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + call.return_value = table.AuthorizedView() + client.get_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value = None + client.delete_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_modify_column_families_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = table.Table() + client.modify_column_families(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_drop_row_range_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value = None + client.drop_row_range(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_consistency_token_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + client.generate_consistency_token(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_consistency_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + client.check_consistency(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_snapshot_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.snapshot_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_snapshot_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = table.Snapshot() + client.get_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_snapshots_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + client.list_snapshots(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_snapshot_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = None + client.delete_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = table.Backup() + client.get_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = table.Backup() + client.update_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + client.delete_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_backups_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + client.list_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restore_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.restore_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_copy_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = BigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.create_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_table_from_snapshot_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_table_from_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_tables_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_undelete_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_authorized_views_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_authorized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) + ) + await client.get_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_modify_column_families_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.modify_column_families(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_drop_row_range_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.drop_row_range(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_generate_consistency_token_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + ) + await client.generate_consistency_token(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_check_consistency_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + ) + await client.check_consistency(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_snapshot_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.snapshot_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_snapshot_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + await client.get_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_snapshots_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_snapshots(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_snapshot_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + ) + await client.get_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + ) + await client.update_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_backups_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_restore_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.restore_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_copy_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.copy_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = BigtableTableAdminClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_create_table_rest_bad_request( + request_type=bigtable_table_admin.CreateTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.create_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableRequest, + dict, + ], +) +def test_create_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == "name_value" + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableRequest.pb( + bigtable_table_admin.CreateTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = gba_table.Table.to_json(gba_table.Table()) + req.return_value.content = return_value + + request = bigtable_table_admin.CreateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gba_table.Table() + + client.create_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_table_from_snapshot_rest_bad_request( + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.create_table_from_snapshot(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableFromSnapshotRequest, + dict, + ], +) +def test_create_table_from_snapshot_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_table_from_snapshot(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_from_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( + bigtable_table_admin.CreateTableFromSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_table_from_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_tables_rest_bad_request( + request_type=bigtable_table_admin.ListTablesRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_tables(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListTablesRequest, + dict, + ], +) +def test_list_tables_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_tables(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tables_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListTablesRequest.pb( + bigtable_table_admin.ListTablesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_table_admin.ListTablesResponse.to_json( + bigtable_table_admin.ListTablesResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.ListTablesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListTablesResponse() + + client.list_tables( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetTableRequest, + dict, + ], +) +def test_get_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetTableRequest.pb( + bigtable_table_admin.GetTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = table.Table.to_json(table.Table()) + req.return_value.content = return_value + + request = bigtable_table_admin.GetTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + + client.get_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_table_rest_bad_request( + request_type=bigtable_table_admin.UpdateTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.update_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateTableRequest, + dict, + ], +) +def test_update_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request_init["table"] = { + "name": "projects/sample1/instances/sample2/tables/sample3", + "cluster_states": {}, + "column_families": {}, + "granularity": 1, + "restore_info": { + "source_type": 1, + "backup_info": { + "backup": "backup_value", + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + "source_table": "source_table_value", + "source_backup": "source_backup_value", + }, + }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, + "deletion_protection": True, + "automated_backup_policy": {"retention_period": {}, "frequency": {}}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table"][field])): + del request_init["table"][field][i][subfield] + else: + del request_init["table"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_table(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateTableRequest.pb( + bigtable_table_admin.UpdateTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.UpdateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_table_rest_bad_request( + request_type=bigtable_table_admin.DeleteTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.delete_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteTableRequest, + dict, + ], +) +def test_delete_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_table(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_table" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteTableRequest.pb( + bigtable_table_admin.DeleteTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_table_admin.DeleteTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_undelete_table_rest_bad_request( + request_type=bigtable_table_admin.UndeleteTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.undelete_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UndeleteTableRequest, + dict, + ], +) +def test_undelete_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.undelete_table(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_undelete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_undelete_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UndeleteTableRequest.pb( + bigtable_table_admin.UndeleteTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.UndeleteTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.undelete_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.create_authorized_view(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateAuthorizedViewRequest, + dict, + ], +) +def test_create_authorized_view_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init["authorized_view"] = { + "name": "name_value", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, + }, + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] + else: + del request_init["authorized_view"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_authorized_view(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_authorized_view_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( + bigtable_table_admin.CreateAuthorizedViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.CreateAuthorizedViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_authorized_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_authorized_views_rest_bad_request( + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_authorized_views(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListAuthorizedViewsRequest, + dict, + ], +) +def test_list_authorized_views_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_authorized_views(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAuthorizedViewsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_authorized_views_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( + bigtable_table_admin.ListAuthorizedViewsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + bigtable_table_admin.ListAuthorizedViewsResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.ListAuthorizedViewsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + + client.list_authorized_views( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.GetAuthorizedViewRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_authorized_view(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetAuthorizedViewRequest, + dict, + ], +) +def test_get_authorized_view_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_authorized_view(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.AuthorizedView) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_authorized_view_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( + bigtable_table_admin.GetAuthorizedViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = table.AuthorizedView.to_json(table.AuthorizedView()) + req.return_value.content = return_value + + request = bigtable_table_admin.GetAuthorizedViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.AuthorizedView() + + client.get_authorized_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.update_authorized_view(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateAuthorizedViewRequest, + dict, + ], +) +def test_update_authorized_view_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + } + request_init["authorized_view"] = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, + }, + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] + else: + del request_init["authorized_view"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_authorized_view(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_authorized_view_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( + bigtable_table_admin.UpdateAuthorizedViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.UpdateAuthorizedViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_authorized_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.delete_authorized_view(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteAuthorizedViewRequest, + dict, + ], +) +def test_delete_authorized_view_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_authorized_view(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_authorized_view_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb( + bigtable_table_admin.DeleteAuthorizedViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_table_admin.DeleteAuthorizedViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_authorized_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_modify_column_families_rest_bad_request( + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.modify_column_families(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ModifyColumnFamiliesRequest, + dict, + ], +) +def test_modify_column_families_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.modify_column_families(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_modify_column_families_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( + bigtable_table_admin.ModifyColumnFamiliesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = table.Table.to_json(table.Table()) + req.return_value.content = return_value + + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + + client.modify_column_families( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_drop_row_range_rest_bad_request( + request_type=bigtable_table_admin.DropRowRangeRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.drop_row_range(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DropRowRangeRequest, + dict, + ], +) +def test_drop_row_range_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.drop_row_range(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_drop_row_range_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DropRowRangeRequest.pb( + bigtable_table_admin.DropRowRangeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_table_admin.DropRowRangeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.drop_row_range( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_generate_consistency_token_rest_bad_request( + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.generate_consistency_token(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, + ], +) +def test_generate_consistency_token_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.generate_consistency_token(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_consistency_token_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + bigtable_table_admin.GenerateConsistencyTokenRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + client.generate_consistency_token( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_check_consistency_rest_bad_request( + request_type=bigtable_table_admin.CheckConsistencyRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.check_consistency(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CheckConsistencyRequest, + dict, + ], +) +def test_check_consistency_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.check_consistency(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_consistency_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_check_consistency" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( + bigtable_table_admin.CheckConsistencyRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_table_admin.CheckConsistencyResponse.to_json( + bigtable_table_admin.CheckConsistencyResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.CheckConsistencyRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.CheckConsistencyResponse() + + client.check_consistency( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_snapshot_table_rest_bad_request( + request_type=bigtable_table_admin.SnapshotTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.snapshot_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.SnapshotTableRequest, + dict, + ], +) +def test_snapshot_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.snapshot_table(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_snapshot_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.SnapshotTableRequest.pb( + bigtable_table_admin.SnapshotTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.SnapshotTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.snapshot_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_snapshot_rest_bad_request( + request_type=bigtable_table_admin.GetSnapshotRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_snapshot(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetSnapshotRequest, + dict, + ], +) +def test_get_snapshot_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_snapshot(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + assert response.name == "name_value" + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetSnapshotRequest.pb( + bigtable_table_admin.GetSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = table.Snapshot.to_json(table.Snapshot()) + req.return_value.content = return_value + + request = bigtable_table_admin.GetSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Snapshot() + + client.get_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_snapshots_rest_bad_request( + request_type=bigtable_table_admin.ListSnapshotsRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_snapshots(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListSnapshotsRequest, + dict, + ], +) +def test_list_snapshots_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_snapshots(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_snapshots_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( + bigtable_table_admin.ListSnapshotsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_table_admin.ListSnapshotsResponse.to_json( + bigtable_table_admin.ListSnapshotsResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.ListSnapshotsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListSnapshotsResponse() + + client.list_snapshots( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_snapshot_rest_bad_request( + request_type=bigtable_table_admin.DeleteSnapshotRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.delete_snapshot(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteSnapshotRequest, + dict, + ], +) +def test_delete_snapshot_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_snapshot(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( + bigtable_table_admin.DeleteSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_table_admin.DeleteSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_create_backup_rest_bad_request( + request_type=bigtable_table_admin.CreateBackupRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.create_backup(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateBackupRequest, + dict, + ], +) +def test_create_backup_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init["backup"] = { + "name": "name_value", + "source_table": "source_table_value", + "source_backup": "source_backup_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + "backup_type": 1, + "hot_to_standard_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_backup(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateBackupRequest.pb( + bigtable_table_admin.CreateBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.CreateBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_backup_rest_bad_request( + request_type=bigtable_table_admin.GetBackupRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_backup(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetBackupRequest, + dict, + ], +) +def test_get_backup_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetBackupRequest.pb( + bigtable_table_admin.GetBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = table.Backup.to_json(table.Backup()) + req.return_value.content = return_value + + request = bigtable_table_admin.GetBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Backup() + + client.get_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_backup_rest_bad_request( + request_type=bigtable_table_admin.UpdateBackupRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.update_backup(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateBackupRequest, + dict, + ], +) +def test_update_backup_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + request_init["backup"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", + "source_table": "source_table_value", + "source_backup": "source_backup_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + "backup_type": 1, + "hot_to_standard_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateBackupRequest.pb( + bigtable_table_admin.UpdateBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = table.Backup.to_json(table.Backup()) + req.return_value.content = return_value + + request = bigtable_table_admin.UpdateBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Backup() + + client.update_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_backup_rest_bad_request( + request_type=bigtable_table_admin.DeleteBackupRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.delete_backup(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteBackupRequest, + dict, + ], +) +def test_delete_backup_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_backup(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteBackupRequest.pb( + bigtable_table_admin.DeleteBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + + request = bigtable_table_admin.DeleteBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_list_backups_rest_bad_request( + request_type=bigtable_table_admin.ListBackupsRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.list_backups(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListBackupsRequest, + dict, + ], +) +def test_list_backups_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_backups(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_backups_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_backups" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_backups" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListBackupsRequest.pb( + bigtable_table_admin.ListBackupsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable_table_admin.ListBackupsResponse.to_json( + bigtable_table_admin.ListBackupsResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.ListBackupsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListBackupsResponse() - response_value = Response() - response_value.status_code = 200 + client.list_backups( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) - json_return_value = json_format.MessageToJson(return_value) + pre.assert_called_once() + post.assert_called_once() - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.get_iam_policy(request) +def test_restore_table_rest_bad_request( + request_type=bigtable_table_admin.RestoreTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.restore_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.RestoreTableRequest, + dict, + ], +) +def test_restore_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.restore_table(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_restore_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_restore_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_restore_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.RestoreTableRequest.pb( + bigtable_table_admin.RestoreTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.RestoreTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.restore_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_copy_backup_rest_bad_request( + request_type=bigtable_table_admin.CopyBackupRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.copy_backup(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.copy_backup(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_copy_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_copy_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CopyBackupRequest.pb( + bigtable_table_admin.CopyBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.CopyBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.copy_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) -def test_get_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_iam_policy(request) - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource",))) + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -22565,6 +23078,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): else transports.BigtableTableAdminRestInterceptor(), ) client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -22584,10 +23098,10 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value request = iam_policy_pb2.GetIamPolicyRequest() metadata = [ @@ -22609,14 +23123,12 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): post.assert_called_once() -def test_get_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +def test_set_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.SetIamPolicyRequest, ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -22626,74 +23138,13 @@ def test_get_iam_policy_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_iam_policy(request) - - -def test_get_iam_policy_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.request = mock.Mock() req.return_value = response_value - - client.get_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" - % client.transport._host, - args[1], - ) - - -def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", - ) - - -def test_get_iam_policy_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + client.set_iam_policy(request) @pytest.mark.parametrize( @@ -22703,163 +23154,35 @@ def test_get_iam_policy_rest_error(): dict, ], ) -def test_set_iam_policy_rest(request_type): +def test_set_iam_policy_rest_call_success(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.set_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" - - -def test_set_iam_policy_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.set_iam_policy in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc - - request = {} - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.set_iam_policy(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_set_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = "resource_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.set_iam_policy(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_set_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "policy", - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", ) - ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -22871,6 +23194,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): else transports.BigtableTableAdminRestInterceptor(), ) client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( type(client.transport._session), "request" ) as req, mock.patch.object( @@ -22890,10 +23214,10 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): "query_params": pb_message, } - req.return_value = Response() + req.return_value = mock.Mock() req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value request = iam_policy_pb2.SetIamPolicyRequest() metadata = [ @@ -22915,14 +23239,12 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): post.assert_called_once() -def test_set_iam_policy_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +def test_test_iam_permissions_rest_bad_request( + request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) - # send a request that will satisfy transcoding request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) @@ -22932,496 +23254,749 @@ def test_set_iam_policy_rest_bad_request( core_exceptions.BadRequest ): # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) response_value.status_code = 400 - response_value.request = Request() + response_value.request = mock.Mock() req.return_value = response_value - client.set_iam_policy(request) + client.test_iam_permissions(request) -def test_set_iam_policy_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest_call_success(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], ) - mock_args.update(sample_request) # Wrap the value into a proper Response obj - response_value = Response() + response_value = mock.Mock() response_value.status_code = 200 json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") + response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + response = client.test_iam_permissions(request) - client.set_iam_policy(**mock_args) + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" - % client.transport._host, - args[1], + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_test_iam_permissions_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() + ) + req.return_value.content = return_value + + request = iam_policy_pb2.TestIamPermissionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], ) + pre.assert_called_once() + post.assert_called_once() + + +def test_initialize_client_w_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + client.create_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_from_snapshot_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + client.create_table_from_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_tables_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_table_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_table_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_table_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_undelete_table_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_authorized_view_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_authorized_views_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + client.list_authorized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_authorized_view_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + client.get_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() + + assert args[0] == request_msg -def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_authorized_view_empty_call_rest(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + client.update_authorized_view(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() -def test_set_iam_policy_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + assert args[0] == request_msg -@pytest.mark.parametrize( - "request_type", - [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, - ], -) -def test_test_iam_permissions_rest(request_type): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_authorized_view_empty_call_rest(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + client.delete_authorized_view(request=None) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.test_iam_permissions(request) - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_modify_column_families_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + client.modify_column_families(request=None) -def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + assert args[0] == request_msg - # Ensure method has been cached - assert ( - client._transport.test_iam_permissions in client._transport._wrapped_methods - ) - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.test_iam_permissions - ] = mock_rpc +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_drop_row_range_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - request = {} - client.test_iam_permissions(request) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + client.drop_row_range(request=None) - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() - client.test_iam_permissions(request) + assert args[0] == request_msg - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_consistency_token_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) -def test_test_iam_permissions_rest_required_fields( - request_type=iam_policy_pb2.TestIamPermissionsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + client.generate_consistency_token(request=None) - request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() - # verify fields with default values are dropped + assert args[0] == request_msg - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with default values are now present +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_consistency_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - jsonified_request["resource"] = "resource_value" - jsonified_request["permissions"] = "permissions_value" + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + client.check_consistency(request=None) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == "permissions_value" + assert args[0] == request_msg + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_snapshot_table_empty_call_rest(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + client.snapshot_table(request=None) - response_value = Response() - response_value.status_code = 200 + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() - json_return_value = json_format.MessageToJson(return_value) + assert args[0] == request_msg - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.test_iam_permissions(request) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_snapshot_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + client.get_snapshot(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() -def test_test_iam_permissions_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + assert args[0] == request_msg - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "permissions", - ) - ) + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_snapshots_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + client.list_snapshots(request=None) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_snapshot_empty_call_rest(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) - request = iam_policy_pb2.TestIamPermissionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + client.delete_snapshot(request=None) - client.test_iam_permissions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() - pre.assert_called_once() - post.assert_called_once() + assert args[0] == request_msg -def test_test_iam_permissions_rest_bad_request( - transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest -): +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_backup_empty_call_rest(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + client.create_backup(request=None) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.test_iam_permissions(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() + assert args[0] == request_msg -def test_test_iam_permissions_rest_flattened(): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_backup_empty_call_rest(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + client.get_backup(request=None) - # get arguments that satisfy an http rule for this method - sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" - } + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - permissions=["permissions_value"], - ) - mock_args.update(sample_request) + assert args[0] == request_msg - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.test_iam_permissions(**mock_args) +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_backup_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" - % client.transport._host, - args[1], - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + client.update_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() + assert args[0] == request_msg -def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_backup_empty_call_rest(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + client.delete_backup(request=None) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() -def test_test_iam_permissions_rest_error(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + assert args[0] == request_msg -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_backups_empty_call_rest(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + client.list_backups(request=None) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restore_table_empty_call_rest(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, - transport=transport, - ) - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + client.restore_table(request=None) - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_copy_backup_empty_call_rest(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + client.copy_backup(request=None) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_rest(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - client = BigtableTableAdminClient(transport=transport) - assert client.transport is transport + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_rest(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_rest(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + client.test_iam_permissions(request=None) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - transports.BigtableTableAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + assert args[0] == request_msg -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "rest", - ], -) -def test_transport_kind(transport_name): - transport = BigtableTableAdminClient.get_transport_class(transport_name)( + +def test_bigtable_table_admin_rest_lro_client(): + client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have an api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, ) - assert transport.kind == transport_name + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client def test_transport_grpc_default(): @@ -23717,23 +24292,6 @@ def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) -def test_bigtable_table_admin_rest_lro_client(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - @pytest.mark.parametrize( "transport_name", [ @@ -24375,36 +24933,41 @@ def test_client_with_default_client_info(): prep.assert_called_once_with(client_info) +def test_transport_close_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + @pytest.mark.asyncio -async def test_transport_close_async(): +async def test_transport_close_grpc_asyncio(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", + credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) with mock.patch.object( - type(getattr(client.transport, "grpc_channel")), "close" + type(getattr(client.transport, "_grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - with mock.patch.object( - type(getattr(client.transport, close_name)), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() +def test_transport_close_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + with mock.patch.object( + type(getattr(client.transport, "_session")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() def test_client_ctx(): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 2be864732054..37b4bbfcaeaa 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -24,7 +24,7 @@ import grpc from grpc.experimental import aio -from collections.abc import Iterable +from collections.abc import Iterable, AsyncIterable from google.protobuf import json_format import json import math @@ -37,6 +37,13 @@ from requests.sessions import Session from google.protobuf import json_format +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + from google.api_core import client_options from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 @@ -60,10 +67,24 @@ import google.auth +async def mock_async_gen(data, chunk_size=1): + for i in range(0, len(data)): # pragma: NO COVER + chunk = data[i : i + chunk_size] + yield chunk.encode("utf-8") + + def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# TODO: use async auth anon credentials by default once the minimum version of google-auth is upgraded. +# See related issue: https://github.com/googleapis/gapic-generator-python/issues/2107. +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + # If default endpoint is localhost, then default mtls endpoint will be the same. # This method modifies the default endpoint so the client can produce a different # mtls endpoint for endpoint testing purposes. @@ -275,86 +296,6 @@ def test__get_universe_domain(): assert str(excinfo.value) == "Universe Domain cannot be an empty string." -@pytest.mark.parametrize( - "client_class,transport_class,transport_name", - [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc"), - (BigtableClient, transports.BigtableRestTransport, "rest"), - ], -) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) - ) - assert client._validate_universe_domain() == True - - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True - - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - transport = transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True - - # TODO: This is needed to cater for older versions of google-auth - # Make this test unconditional once the minimum supported version of - # google-auth becomes 2.23.0 or higher. - google_auth_major, google_auth_minor = [ - int(part) for part in google.auth.__version__.split(".")[0:2] - ] - if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): - credentials = ga_credentials.AnonymousCredentials() - credentials._universe_domain = "foo.com" - # Test the case when there is a universe mismatch from the credentials. - client = client_class(transport=transport_class(credentials=credentials)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor = [ - int(part) for part in api_core_version.__version__.split(".")[0:2] - ] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class( - client_options={"universe_domain": "bar.com"}, - transport=transport_class( - credentials=ga_credentials.AnonymousCredentials(), - ), - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test that ValueError is raised if universe_domain is provided via client options and credentials is None - with pytest.raises(ValueError): - client._compare_universes("foo.bar", None) - - @pytest.mark.parametrize( "client_class,transport_name", [ @@ -1110,25 +1051,6 @@ def test_read_rows(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.ReadRowsResponse) -def test_read_rows_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.read_rows() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() - - def test_read_rows_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -1196,35 +1118,13 @@ def test_read_rows_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_read_rows_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.ReadRowsResponse()] - ) - response = await client.read_rows() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() - - @pytest.mark.asyncio async def test_read_rows_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1263,7 +1163,7 @@ async def test_read_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1296,70 +1196,6 @@ async def test_read_rows_async_from_dict(): await test_read_rows_async(request_type=dict) -def test_read_rows_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest( - **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.read_rows), "__call__") as call: - call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_read_rows_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1406,7 +1242,7 @@ def test_read_rows_flattened_error(): @pytest.mark.asyncio async def test_read_rows_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1437,7 +1273,7 @@ async def test_read_rows_flattened_async(): @pytest.mark.asyncio async def test_read_rows_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1484,25 +1320,6 @@ def test_sample_row_keys(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.SampleRowKeysResponse) -def test_sample_row_keys_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.sample_row_keys() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() - - def test_sample_row_keys_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -1570,28 +1387,6 @@ def test_sample_row_keys_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_sample_row_keys_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.SampleRowKeysResponse()] - ) - response = await client.sample_row_keys() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() - - @pytest.mark.asyncio async def test_sample_row_keys_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -1600,7 +1395,7 @@ async def test_sample_row_keys_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1639,7 +1434,7 @@ async def test_sample_row_keys_async( transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -1672,70 +1467,6 @@ async def test_sample_row_keys_async_from_dict(): await test_sample_row_keys_async(request_type=dict) -def test_sample_row_keys_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest( - **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_sample_row_keys_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -1782,7 +1513,7 @@ def test_sample_row_keys_flattened_error(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1813,7 +1544,7 @@ async def test_sample_row_keys_flattened_async(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -1859,25 +1590,6 @@ def test_mutate_row(request_type, transport: str = "grpc"): assert isinstance(response, bigtable.MutateRowResponse) -def test_mutate_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.mutate_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() - - def test_mutate_row_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -1945,34 +1657,13 @@ def test_mutate_row_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_mutate_row_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.MutateRowResponse() - ) - response = await client.mutate_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() - - @pytest.mark.asyncio async def test_mutate_row_async_use_cached_wrapped_rpc(transport: str = "grpc_asyncio"): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2011,7 +1702,7 @@ async def test_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2042,78 +1733,14 @@ async def test_mutate_row_async_from_dict(): await test_mutate_row_async(request_type=dict) -def test_mutate_row_routing_parameters(): +def test_mutate_row_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), ) - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest( - **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - -def test_mutate_row_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: - # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. call.return_value = bigtable.MutateRowResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. @@ -2174,7 +1801,7 @@ def test_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_mutate_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2223,7 +1850,7 @@ async def test_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_mutate_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2276,25 +1903,6 @@ def test_mutate_rows(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.MutateRowsResponse) -def test_mutate_rows_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.mutate_rows() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() - - def test_mutate_rows_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -2362,28 +1970,6 @@ def test_mutate_rows_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_mutate_rows_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.MutateRowsResponse()] - ) - response = await client.mutate_rows() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() - - @pytest.mark.asyncio async def test_mutate_rows_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -2392,7 +1978,7 @@ async def test_mutate_rows_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2431,7 +2017,7 @@ async def test_mutate_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2464,70 +2050,6 @@ async def test_mutate_rows_async_from_dict(): await test_mutate_rows_async(request_type=dict) -def test_mutate_rows_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest( - **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: - call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_mutate_rows_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -2579,7 +2101,7 @@ def test_mutate_rows_flattened_error(): @pytest.mark.asyncio async def test_mutate_rows_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2614,7 +2136,7 @@ async def test_mutate_rows_flattened_async(): @pytest.mark.asyncio async def test_mutate_rows_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -2666,27 +2188,6 @@ def test_check_and_mutate_row(request_type, transport: str = "grpc"): assert response.predicate_matched is True -def test_check_and_mutate_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.check_and_mutate_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() - - def test_check_and_mutate_row_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -2760,31 +2261,6 @@ def test_check_and_mutate_row_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_check_and_mutate_row_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.CheckAndMutateRowResponse( - predicate_matched=True, - ) - ) - response = await client.check_and_mutate_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() - - @pytest.mark.asyncio async def test_check_and_mutate_row_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -2793,7 +2269,7 @@ async def test_check_and_mutate_row_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2832,7 +2308,7 @@ async def test_check_and_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -2868,76 +2344,6 @@ async def test_check_and_mutate_row_async_from_dict(): await test_check_and_mutate_row_async(request_type=dict) -def test_check_and_mutate_row_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest( - **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), "__call__" - ) as call: - call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_check_and_mutate_row_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -3058,7 +2464,7 @@ def test_check_and_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3143,7 +2549,7 @@ async def test_check_and_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3211,25 +2617,6 @@ def test_ping_and_warm(request_type, transport: str = "grpc"): assert isinstance(response, bigtable.PingAndWarmResponse) -def test_ping_and_warm_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.ping_and_warm() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() - - def test_ping_and_warm_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3295,27 +2682,6 @@ def test_ping_and_warm_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_ping_and_warm_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.PingAndWarmResponse() - ) - response = await client.ping_and_warm() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() - - @pytest.mark.asyncio async def test_ping_and_warm_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3324,7 +2690,7 @@ async def test_ping_and_warm_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3363,7 +2729,7 @@ async def test_ping_and_warm_async( transport: str = "grpc_asyncio", request_type=bigtable.PingAndWarmRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3394,49 +2760,6 @@ async def test_ping_and_warm_async_from_dict(): await test_ping_and_warm_async(request_type=dict) -def test_ping_and_warm_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.PingAndWarmRequest( - **{"name": "projects/sample1/instances/sample2"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: - call.return_value = bigtable.PingAndWarmResponse() - client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: - call.return_value = bigtable.PingAndWarmResponse() - client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_ping_and_warm_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -3483,7 +2806,7 @@ def test_ping_and_warm_flattened_error(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3516,7 +2839,7 @@ async def test_ping_and_warm_flattened_async(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3564,27 +2887,6 @@ def test_read_modify_write_row(request_type, transport: str = "grpc"): assert isinstance(response, bigtable.ReadModifyWriteRowResponse) -def test_read_modify_write_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.read_modify_write_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() - - def test_read_modify_write_row_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -3659,29 +2961,6 @@ def test_read_modify_write_row_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_read_modify_write_row_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable.ReadModifyWriteRowResponse() - ) - response = await client.read_modify_write_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() - - @pytest.mark.asyncio async def test_read_modify_write_row_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -3690,7 +2969,7 @@ async def test_read_modify_write_row_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3729,7 +3008,7 @@ async def test_read_modify_write_row_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -3762,76 +3041,6 @@ async def test_read_modify_write_row_async_from_dict(): await test_read_modify_write_row_async(request_type=dict) -def test_read_modify_write_row_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest( - **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), "__call__" - ) as call: - call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), "__call__" - ) as call: - call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest( - **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), "__call__" - ) as call: - call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_read_modify_write_row_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -3890,7 +3099,7 @@ def test_read_modify_write_row_flattened_error(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3933,7 +3142,7 @@ async def test_read_modify_write_row_flattened_async(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -3990,27 +3199,6 @@ def test_generate_initial_change_stream_partitions( ) -def test_generate_initial_change_stream_partitions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.generate_initial_change_stream_partitions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() - - def test_generate_initial_change_stream_partitions_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4083,30 +3271,6 @@ def test_generate_initial_change_stream_partitions_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_generate_initial_change_stream_partitions_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()] - ) - response = await client.generate_initial_change_stream_partitions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() - - @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -4115,7 +3279,7 @@ async def test_generate_initial_change_stream_partitions_async_use_cached_wrappe # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4155,7 +3319,7 @@ async def test_generate_initial_change_stream_partitions_async( request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4226,7 +3390,7 @@ def test_generate_initial_change_stream_partitions_field_headers(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_field_headers_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4308,7 +3472,7 @@ def test_generate_initial_change_stream_partitions_flattened_error(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4343,7 +3507,7 @@ async def test_generate_initial_change_stream_partitions_flattened_async(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4392,27 +3556,6 @@ def test_read_change_stream(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.ReadChangeStreamResponse) -def test_read_change_stream_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), "__call__" - ) as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.read_change_stream() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() - - def test_read_change_stream_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4484,30 +3627,6 @@ def test_read_change_stream_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_read_change_stream_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.ReadChangeStreamResponse()] - ) - response = await client.read_change_stream() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() - - @pytest.mark.asyncio async def test_read_change_stream_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -4516,7 +3635,7 @@ async def test_read_change_stream_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4555,7 +3674,7 @@ async def test_read_change_stream_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadChangeStreamRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4624,7 +3743,7 @@ def test_read_change_stream_field_headers(): @pytest.mark.asyncio async def test_read_change_stream_field_headers_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4704,7 +3823,7 @@ def test_read_change_stream_flattened_error(): @pytest.mark.asyncio async def test_read_change_stream_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4737,7 +3856,7 @@ async def test_read_change_stream_flattened_async(): @pytest.mark.asyncio async def test_read_change_stream_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -4784,25 +3903,6 @@ def test_execute_query(request_type, transport: str = "grpc"): assert isinstance(message, bigtable.ExecuteQueryResponse) -def test_execute_query_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.execute_query), "__call__") as call: - call.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client.execute_query() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ExecuteQueryRequest() - - def test_execute_query_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. @@ -4870,28 +3970,6 @@ def test_execute_query_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.asyncio -async def test_execute_query_empty_call_async(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.execute_query), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock( - side_effect=[bigtable.ExecuteQueryResponse()] - ) - response = await client.execute_query() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ExecuteQueryRequest() - - @pytest.mark.asyncio async def test_execute_query_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", @@ -4900,7 +3978,7 @@ async def test_execute_query_async_use_cached_wrapped_rpc( # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4939,7 +4017,7 @@ async def test_execute_query_async( transport: str = "grpc_asyncio", request_type=bigtable.ExecuteQueryRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), transport=transport, ) @@ -4972,49 +4050,6 @@ async def test_execute_query_async_from_dict(): await test_execute_query_async(request_type=dict) -def test_execute_query_routing_parameters(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ExecuteQueryRequest( - **{"instance_name": "projects/sample1/instances/sample2"} - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.execute_query), "__call__") as call: - call.return_value = iter([bigtable.ExecuteQueryResponse()]) - client.execute_query(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object(type(client.transport.execute_query), "__call__") as call: - call.return_value = iter([bigtable.ExecuteQueryResponse()]) - client.execute_query(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw["metadata"] - - def test_execute_query_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5066,7 +4101,7 @@ def test_execute_query_flattened_error(): @pytest.mark.asyncio async def test_execute_query_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5101,7 +4136,7 @@ async def test_execute_query_flattened_async(): @pytest.mark.asyncio async def test_execute_query_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened @@ -5115,53 +4150,6 @@ async def test_execute_query_flattened_error_async(): ) -@pytest.mark.parametrize( - "request_type", - [ - bigtable.ReadRowsRequest, - dict, - ], -) -def test_read_rows_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse( - last_scanned_row_key=b"last_scanned_row_key_blob", - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_rows(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadRowsResponse) - assert response.last_scanned_row_key == b"last_scanned_row_key_blob" - - def test_read_rows_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -5198,84 +4186,6 @@ def test_read_rows_rest_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_rows_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_rows" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_rows" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadRowsResponse.to_json( - bigtable.ReadRowsResponse() - ) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.ReadRowsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadRowsResponse() - - client.read_rows( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_read_rows_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadRowsRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_rows(request) - - def test_read_rows_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5340,61 +4250,6 @@ def test_read_rows_rest_flattened_error(transport: str = "rest"): ) -def test_read_rows_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.SampleRowKeysRequest, - dict, - ], -) -def test_sample_row_keys_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse( - row_key=b"row_key_blob", - offset_bytes=1293, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.sample_row_keys(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.SampleRowKeysResponse) - assert response.row_key == b"row_key_blob" - assert response.offset_bytes == 1293 - - def test_sample_row_keys_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -5431,104 +4286,26 @@ def test_sample_row_keys_rest_use_cached_wrapped_rpc(): assert mock_rpc.call_count == 2 -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_sample_row_keys_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +def test_sample_row_keys_rest_flattened(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="rest", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_sample_row_keys" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_sample_row_keys" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.SampleRowKeysResponse.to_json( - bigtable.SampleRowKeysResponse() - ) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.SampleRowKeysRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.SampleRowKeysResponse() - - client.sample_row_keys( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_sample_row_keys_rest_bad_request( - transport: str = "rest", request_type=bigtable.SampleRowKeysRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.sample_row_keys(request) - - -def test_sample_row_keys_rest_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - app_profile_id="app_profile_id_value", + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", ) mock_args.update(sample_request) @@ -5573,49 +4350,6 @@ def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): ) -def test_sample_row_keys_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.MutateRowRequest, - dict, - ], -) -def test_mutate_row_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.mutate_row(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowResponse) - - def test_mutate_row_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -5742,83 +4476,6 @@ def test_mutate_row_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_mutate_row_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_mutate_row" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_mutate_row" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowResponse.to_json( - bigtable.MutateRowResponse() - ) - - request = bigtable.MutateRowRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.MutateRowResponse() - - client.mutate_row( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_mutate_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.MutateRowRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.mutate_row(request) - - def test_mutate_row_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -5892,56 +4549,6 @@ def test_mutate_row_rest_flattened_error(transport: str = "rest"): ) -def test_mutate_row_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.MutateRowsRequest, - dict, - ], -) -def test_mutate_rows_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.mutate_rows(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowsResponse) - - def test_mutate_rows_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -6058,94 +4665,16 @@ def test_mutate_rows_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("entries",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_mutate_rows_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +def test_mutate_rows_rest_flattened(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="rest", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_mutate_rows" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_mutate_rows" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowsResponse.to_json( - bigtable.MutateRowsResponse() - ) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.MutateRowsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.MutateRowsResponse() - - client.mutate_rows( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_mutate_rows_rest_bad_request( - transport: str = "rest", request_type=bigtable.MutateRowsRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.mutate_rows(request) - - -def test_mutate_rows_rest_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() # get arguments that satisfy an http rule for this method sample_request = { @@ -6202,52 +4731,6 @@ def test_mutate_rows_rest_flattened_error(transport: str = "rest"): ) -def test_mutate_rows_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.CheckAndMutateRowRequest, - dict, - ], -) -def test_check_and_mutate_row_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse( - predicate_matched=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.check_and_mutate_row(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True - - def test_check_and_mutate_row_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -6372,85 +4855,6 @@ def test_check_and_mutate_row_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("rowKey",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_and_mutate_row_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_check_and_mutate_row" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_check_and_mutate_row" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.CheckAndMutateRowRequest.pb( - bigtable.CheckAndMutateRowRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.CheckAndMutateRowResponse.to_json( - bigtable.CheckAndMutateRowResponse() - ) - - request = bigtable.CheckAndMutateRowRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.CheckAndMutateRowResponse() - - client.check_and_mutate_row( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_check_and_mutate_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.CheckAndMutateRowRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.check_and_mutate_row(request) - - def test_check_and_mutate_row_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6556,49 +4960,6 @@ def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): ) -def test_check_and_mutate_row_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.PingAndWarmRequest, - dict, - ], -) -def test_ping_and_warm_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.ping_and_warm(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.PingAndWarmResponse) - - def test_ping_and_warm_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -6717,96 +5078,19 @@ def test_ping_and_warm_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("name",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_ping_and_warm_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +def test_ping_and_warm_rest_flattened(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="rest", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_ping_and_warm" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_ping_and_warm" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.PingAndWarmResponse.to_json( - bigtable.PingAndWarmResponse() - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() - request = bigtable.PingAndWarmRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.PingAndWarmResponse() - - client.ping_and_warm( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_ping_and_warm_rest_bad_request( - transport: str = "rest", request_type=bigtable.PingAndWarmRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.ping_and_warm(request) - - -def test_ping_and_warm_rest_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( @@ -6851,49 +5135,6 @@ def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): ) -def test_ping_and_warm_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.ReadModifyWriteRowRequest, - dict, - ], -) -def test_read_modify_write_row_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - response = client.read_modify_write_row(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadModifyWriteRowResponse) - - def test_read_modify_write_row_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -7027,85 +5268,6 @@ def test_read_modify_write_row_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_modify_write_row_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_modify_write_row" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_modify_write_row" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadModifyWriteRowRequest.pb( - bigtable.ReadModifyWriteRowRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadModifyWriteRowResponse.to_json( - bigtable.ReadModifyWriteRowResponse() - ) - - request = bigtable.ReadModifyWriteRowRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadModifyWriteRowResponse() - - client.read_modify_write_row( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_read_modify_write_row_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadModifyWriteRowRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_modify_write_row(request) - - def test_read_modify_write_row_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7171,58 +5333,6 @@ def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): ) -def test_read_modify_write_row_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.GenerateInitialChangeStreamPartitionsRequest, - dict, - ], -) -def test_generate_initial_change_stream_partitions_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.generate_initial_change_stream_partitions(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) - - def test_generate_initial_change_stream_partitions_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -7361,113 +5471,28 @@ def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("tableName",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +def test_generate_initial_change_stream_partitions_rest_flattened(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="rest", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, - "post_generate_initial_change_stream_partitions", - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, - "pre_generate_initial_change_stream_partitions", - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( - bigtable.GenerateInitialChangeStreamPartitionsRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" } - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = ( - bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( - bigtable.GenerateInitialChangeStreamPartitionsResponse() - ) + # get truthy value for each flattened field + mock_args = dict( + table_name="table_name_value", + app_profile_id="app_profile_id_value", ) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.GenerateInitialChangeStreamPartitionsRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - - client.generate_initial_change_stream_partitions( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_generate_initial_change_stream_partitions_rest_bad_request( - transport: str = "rest", - request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.generate_initial_change_stream_partitions(request) - - -def test_generate_initial_change_stream_partitions_rest_flattened(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - table_name="table_name_value", - app_profile_id="app_profile_id_value", - ) - mock_args.update(sample_request) + mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() @@ -7514,56 +5539,6 @@ def test_generate_initial_change_stream_partitions_rest_flattened_error( ) -def test_generate_initial_change_stream_partitions_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.ReadChangeStreamRequest, - dict, - ], -) -def test_read_change_stream_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_change_stream(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadChangeStreamResponse) - - def test_read_change_stream_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -7691,86 +5666,6 @@ def test_read_change_stream_rest_unset_required_fields(): assert set(unset_fields) == (set(()) & set(("tableName",))) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_change_stream_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_read_change_stream" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_read_change_stream" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadChangeStreamRequest.pb( - bigtable.ReadChangeStreamRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadChangeStreamResponse.to_json( - bigtable.ReadChangeStreamResponse() - ) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.ReadChangeStreamRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadChangeStreamResponse() - - client.read_change_stream( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - post.assert_called_once() - - -def test_read_change_stream_rest_bad_request( - transport: str = "rest", request_type=bigtable.ReadChangeStreamRequest -): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_change_stream(request) - - def test_read_change_stream_rest_flattened(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -7835,56 +5730,6 @@ def test_read_change_stream_rest_flattened_error(transport: str = "rest"): ) -def test_read_change_stream_rest_error(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable.ExecuteQueryRequest, - dict, - ], -) -def test_execute_query_rest(request_type): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {"instance_name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ExecuteQueryResponse() - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ExecuteQueryResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.execute_query(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ExecuteQueryResponse) - - def test_execute_query_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call @@ -8019,258 +5864,4084 @@ def test_execute_query_rest_unset_required_fields(): ) -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_execute_query_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( +def test_execute_query_rest_flattened(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + transport="rest", ) - client = BigtableClient(transport=transport) - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableRestInterceptor, "post_execute_query" - ) as post, mock.patch.object( - transports.BigtableRestInterceptor, "pre_execute_query" - ) as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ExecuteQueryRequest.pb(bigtable.ExecuteQueryRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ExecuteQueryResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"instance_name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ExecuteQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.execute_query(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{instance_name=projects/*/instances/*}:executeQuery" + % client.transport._host, + args[1], + ) + + +def test_execute_query_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.execute_query( + bigtable.ExecuteQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + transports.BigtableRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = BigtableClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_rows_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_sample_row_keys_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_row_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_rows_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_and_mutate_row_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_ping_and_warm_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_modify_write_row_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_initial_change_stream_partitions_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + call.return_value = iter( + [bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + client.generate_initial_change_stream_partitions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_change_stream_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + client.read_change_stream(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadChangeStreamRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_execute_query_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest() + + assert args[0] == request_msg + + +def test_read_rows_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_read_rows_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_read_rows_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_sample_row_keys_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_sample_row_keys_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_sample_row_keys_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_row_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_row_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_row_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_rows_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_rows_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_rows_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_check_and_mutate_row_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_check_and_mutate_row_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_check_and_mutate_row_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_ping_and_warm_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request={"name": "projects/sample1/instances/sample2"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest( + **{"name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_ping_and_warm_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_read_modify_write_row_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_read_modify_write_row_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"app_profile_id": "sample1"} + ) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_read_modify_write_row_routing_parameters_request_3_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_execute_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_execute_query_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_transport_kind_grpc_asyncio(): + transport = BigtableAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_read_rows_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_sample_row_keys_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_mutate_row_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_mutate_rows_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_check_and_mutate_row_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_ping_and_warm_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + await client.ping_and_warm(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_read_modify_write_row_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()] + ) + await client.generate_initial_change_stream_partitions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_read_change_stream_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadChangeStreamResponse()] + ) + await client.read_change_stream(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadChangeStreamRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_execute_query_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest() + + assert args[0] == request_msg + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_mutate_row_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_mutate_row_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_mutate_row_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.MutateRowResponse() + ) + await client.mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_mutate_rows_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.MutateRowsResponse()] + ) + await client.mutate_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + ) + await client.check_and_mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + await client.ping_and_warm( + request={"name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest( + **{"name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_ping_and_warm_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PingAndWarmResponse() + ) + await client.ping_and_warm(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"app_profile_id": "sample1"} + ) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.ReadModifyWriteRowResponse() + ) + await client.read_modify_write_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_execute_query_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_transport_kind_rest(): + transport = BigtableClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_read_rows_rest_bad_request(request_type=bigtable.ReadRowsRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.read_rows(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadRowsRequest, + dict, + ], +) +def test_read_rows_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse( + last_scanned_row_key=b"last_scanned_row_key_blob", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + response = client.read_rows(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadRowsResponse) + assert response.last_scanned_row_key == b"last_scanned_row_key_blob" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_rows_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_rows" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_rows" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.ReadRowsResponse.to_json(bigtable.ReadRowsResponse()) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) + + request = bigtable.ReadRowsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadRowsResponse() + + client.read_rows( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_sample_row_keys_rest_bad_request(request_type=bigtable.SampleRowKeysRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.sample_row_keys(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.SampleRowKeysRequest, + dict, + ], +) +def test_sample_row_keys_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse( + row_key=b"row_key_blob", + offset_bytes=1293, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + response = client.sample_row_keys(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.SampleRowKeysResponse) + assert response.row_key == b"row_key_blob" + assert response.offset_bytes == 1293 + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_sample_row_keys_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_sample_row_keys" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_sample_row_keys" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.SampleRowKeysResponse.to_json( + bigtable.SampleRowKeysResponse() + ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) + + request = bigtable.SampleRowKeysRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.SampleRowKeysResponse() + + client.sample_row_keys( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_mutate_row_rest_bad_request(request_type=bigtable.MutateRowRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.mutate_row(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowRequest, + dict, + ], +) +def test_mutate_row_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.mutate_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_mutate_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.MutateRowResponse.to_json(bigtable.MutateRowResponse()) + req.return_value.content = return_value + + request = bigtable.MutateRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.MutateRowResponse() + + client.mutate_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_mutate_rows_rest_bad_request(request_type=bigtable.MutateRowsRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.mutate_rows(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.MutateRowsRequest, + dict, + ], +) +def test_mutate_rows_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + response = client.mutate_rows(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowsResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_mutate_rows_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_rows" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_mutate_rows" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.MutateRowsResponse.to_json( + bigtable.MutateRowsResponse() + ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) + + request = bigtable.MutateRowsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.MutateRowsResponse() + + client.mutate_rows( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_check_and_mutate_row_rest_bad_request( + request_type=bigtable.CheckAndMutateRowRequest, +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.check_and_mutate_row(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.CheckAndMutateRowRequest, + dict, + ], +) +def test_check_and_mutate_row_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.check_and_mutate_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + assert response.predicate_matched is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_and_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_check_and_mutate_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_check_and_mutate_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.CheckAndMutateRowRequest.pb( + bigtable.CheckAndMutateRowRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.CheckAndMutateRowResponse.to_json( + bigtable.CheckAndMutateRowResponse() + ) + req.return_value.content = return_value + + request = bigtable.CheckAndMutateRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.CheckAndMutateRowResponse() + + client.check_and_mutate_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_ping_and_warm_rest_bad_request(request_type=bigtable.PingAndWarmRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.ping_and_warm(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.PingAndWarmRequest, + dict, + ], +) +def test_ping_and_warm_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.ping_and_warm(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_ping_and_warm_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_ping_and_warm" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_ping_and_warm" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.PingAndWarmResponse.to_json( + bigtable.PingAndWarmResponse() + ) + req.return_value.content = return_value + + request = bigtable.PingAndWarmRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.PingAndWarmResponse() + + client.ping_and_warm( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_modify_write_row_rest_bad_request( + request_type=bigtable.ReadModifyWriteRowRequest, +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.read_modify_write_row(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadModifyWriteRowRequest, + dict, + ], +) +def test_read_modify_write_row_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.read_modify_write_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_modify_write_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_modify_write_row" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_modify_write_row" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadModifyWriteRowRequest.pb( + bigtable.ReadModifyWriteRowRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.ReadModifyWriteRowResponse.to_json( + bigtable.ReadModifyWriteRowResponse() + ) + req.return_value.content = return_value + + request = bigtable.ReadModifyWriteRowRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadModifyWriteRowResponse() + + client.read_modify_write_row( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_initial_change_stream_partitions_rest_bad_request( + request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.generate_initial_change_stream_partitions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.GenerateInitialChangeStreamPartitionsRequest, + dict, + ], +) +def test_generate_initial_change_stream_partitions_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + response = client.generate_initial_change_stream_partitions(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, + "post_generate_initial_change_stream_partitions", + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, + "pre_generate_initial_change_stream_partitions", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( + bigtable.GenerateInitialChangeStreamPartitionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( + bigtable.GenerateInitialChangeStreamPartitionsResponse() + ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) + + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + client.generate_initial_change_stream_partitions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_change_stream_rest_bad_request( + request_type=bigtable.ReadChangeStreamRequest, +): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.read_change_stream(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ReadChangeStreamRequest, + dict, + ], +) +def test_read_change_stream_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"table_name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + response = client.read_change_stream(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadChangeStreamResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_change_stream_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_change_stream" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_read_change_stream" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadChangeStreamRequest.pb( + bigtable.ReadChangeStreamRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.ReadChangeStreamResponse.to_json( + bigtable.ReadChangeStreamResponse() + ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) + + request = bigtable.ReadChangeStreamRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadChangeStreamResponse() + + client.read_change_stream( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_execute_query_rest_bad_request(request_type=bigtable.ExecuteQueryRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"instance_name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + client.execute_query(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.ExecuteQueryRequest, + dict, + ], +) +def test_execute_query_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"instance_name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ExecuteQueryResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ExecuteQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) + req.return_value = response_value + response = client.execute_query(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ExecuteQueryResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_execute_query_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_execute_query" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "pre_execute_query" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ExecuteQueryRequest.pb(bigtable.ExecuteQueryRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + return_value = bigtable.ExecuteQueryResponse.to_json( + bigtable.ExecuteQueryResponse() + ) + req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) + + request = bigtable.ExecuteQueryRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ExecuteQueryResponse() + + client.execute_query( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_initialize_client_w_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_rows_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_sample_row_keys_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_row_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_mutate_rows_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_and_mutate_row_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_ping_and_warm_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + client.ping_and_warm(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_modify_write_row_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_initial_change_stream_partitions_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), "__call__" + ) as call: + client.generate_initial_change_stream_partitions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_read_change_stream_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), "__call__" + ) as call: + client.read_change_stream(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ReadChangeStreamRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_execute_query_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + client.execute_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest() + + assert args[0] == request_msg + + +def test_read_rows_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ExecuteQueryResponse.to_json( - bigtable.ExecuteQueryResponse() + +def test_read_rows_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) - req.return_value._content = "[{}]".format(req.return_value._content) - request = bigtable.ExecuteQueryRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ExecuteQueryResponse() - client.execute_query( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) +def test_read_rows_routing_parameters_request_3_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_sample_row_keys_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_sample_row_keys_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_sample_row_keys_routing_parameters_request_3_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_row_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_row_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_row_routing_parameters_request_3_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: + client.mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_rows_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_rows_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_mutate_rows_routing_parameters_request_3_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: + client.mutate_rows( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.MutateRowsRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + + assert args[0] == request_msg + + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_check_and_mutate_row_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_check_and_mutate_row_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg - pre.assert_called_once() - post.assert_called_once() + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) -def test_execute_query_rest_bad_request( - transport: str = "rest", request_type=bigtable.ExecuteQueryRequest -): +def test_check_and_mutate_row_routing_parameters_request_3_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # send a request that will satisfy transcoding - request_init = {"instance_name": "projects/sample1/instances/sample2"} - request = request_type(**request_init) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), "__call__" + ) as call: + client.check_and_mutate_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.execute_query(request) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.CheckAndMutateRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) + assert args[0] == request_msg -def test_execute_query_rest_flattened(): + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_ping_and_warm_routing_parameters_request_1_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ExecuteQueryResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"instance_name": "projects/sample1/instances/sample2"} + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + client.ping_and_warm(request={"name": "projects/sample1/instances/sample2"}) - # get truthy value for each flattened field - mock_args = dict( - instance_name="instance_name_value", - query="query_value", - app_profile_id="app_profile_id_value", + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest( + **{"name": "projects/sample1/instances/sample2"} ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ExecuteQueryResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - with mock.patch.object(response_value, "iter_content") as iter_content: - iter_content.return_value = iter(json_return_value) - client.execute_query(**mock_args) + assert args[0] == request_msg - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{instance_name=projects/*/instances/*}:executeQuery" - % client.transport._host, - args[1], + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) -def test_execute_query_rest_flattened_error(transport: str = "rest"): +def test_ping_and_warm_routing_parameters_request_2_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.execute_query( - bigtable.ExecuteQueryRequest(), - instance_name="instance_name_value", - query="query_value", - app_profile_id="app_profile_id_value", + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.ping_and_warm), "__call__") as call: + client.ping_and_warm(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) -def test_execute_query_rest_error(): +def test_read_modify_write_row_routing_parameters_request_1_rest(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row( + request={"table_name": "projects/sample1/instances/sample2/tables/sample3"} + ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"table_name": "projects/sample1/instances/sample2/tables/sample3"} ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, + assert args[0] == request_msg + + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableGrpcTransport( + +def test_read_modify_write_row_routing_parameters_request_2_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, - transport=transport, + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{"app_profile_id": "sample1"} ) - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableGrpcTransport( + +def test_read_modify_write_row_routing_parameters_request_3_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), "__call__" + ) as call: + client.read_modify_write_row( + request={ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } ) + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadModifyWriteRowRequest( + **{ + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + ) -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = BigtableClient(transport=transport) - assert client.transport is transport + assert args[0] == request_msg + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - transport = transports.BigtableGrpcAsyncIOTransport( +def test_execute_query_routing_parameters_request_1_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableGrpcTransport, - transports.BigtableGrpcAsyncIOTransport, - transports.BigtableRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + assert args[0] == request_msg -@pytest.mark.parametrize( - "transport_name", - [ - "grpc", - "rest", - ], -) -def test_transport_kind(transport_name): - transport = BigtableClient.get_transport_class(transport_name)( + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_execute_query_routing_parameters_request_2_rest(): + client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - assert transport.kind == transport_name + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + client.execute_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) def test_transport_grpc_default(): @@ -8955,36 +10626,41 @@ def test_client_with_default_client_info(): prep.assert_called_once_with(client_info) +def test_transport_close_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + with mock.patch.object( + type(getattr(client.transport, "_grpc_channel")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + @pytest.mark.asyncio -async def test_transport_close_async(): +async def test_transport_close_grpc_asyncio(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc_asyncio", + credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) with mock.patch.object( - type(getattr(client.transport, "grpc_channel")), "close" + type(getattr(client.transport, "_grpc_channel")), "close" ) as close: async with client: close.assert_not_called() close.assert_called_once() -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport - ) - with mock.patch.object( - type(getattr(client.transport, close_name)), "close" - ) as close: - with client: - close.assert_not_called() - close.assert_called_once() +def test_transport_close_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + with mock.patch.object( + type(getattr(client.transport, "_session")), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() def test_client_ctx(): From 76b03e27c233bfdec91aa29c3d78207e6d015a9b Mon Sep 17 00:00:00 2001 From: ayu Date: Thu, 7 Nov 2024 15:02:36 -0800 Subject: [PATCH 827/892] feat: surface `retry` param to `Table.read_row` api (#982) --- .../google/cloud/bigtable/table.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index e3191a7297da..7429bd36f251 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -533,7 +533,7 @@ def get_encryption_info(self): for cluster_id, value_pb in table_pb.cluster_states.items() } - def read_row(self, row_key, filter_=None): + def read_row(self, row_key, filter_=None, retry=DEFAULT_RETRY_READ_ROWS): """Read a single row from this table. For example: @@ -550,6 +550,14 @@ def read_row(self, row_key, filter_=None): :param filter_: (Optional) The filter to apply to the contents of the row. If unset, returns the entire row. + :type retry: :class:`~google.api_core.retry.Retry` + :param retry: + (Optional) Retry delay and deadline arguments. To override, the + default value :attr:`DEFAULT_RETRY_READ_ROWS` can be used and + modified with the :meth:`~google.api_core.retry.Retry.with_delay` + method or the :meth:`~google.api_core.retry.Retry.with_deadline` + method. + :rtype: :class:`.PartialRowData`, :data:`NoneType ` :returns: The contents of the row if any chunks were returned in the response, otherwise :data:`None`. @@ -558,7 +566,9 @@ def read_row(self, row_key, filter_=None): """ row_set = RowSet() row_set.add_row_key(row_key) - result_iter = iter(self.read_rows(filter_=filter_, row_set=row_set)) + result_iter = iter( + self.read_rows(filter_=filter_, row_set=row_set, retry=retry) + ) row = next(result_iter, None) if next(result_iter, None) is not None: raise ValueError("More than one row was returned.") From 2ea9b53b1522cb249fe7ee9031dc135d72801f70 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 8 Nov 2024 09:07:10 -0800 Subject: [PATCH 828/892] chore: remove custom routing metadata (#1036) * remove custom _make_metadata * remove gapic customizations * fixed lint --- .../bigtable/data/_async/_mutate_rows.py | 5 -- .../cloud/bigtable/data/_async/_read_rows.py | 6 --- .../cloud/bigtable/data/_async/client.py | 52 +++++-------------- .../google/cloud/bigtable/data/_helpers.py | 25 --------- .../_async/execute_query_iterator.py | 6 +-- .../services/bigtable/async_client.py | 24 ++++----- packages/google-cloud-bigtable/owlbot.py | 12 ----- .../unit/data/_async/test__mutate_rows.py | 7 +-- .../tests/unit/data/_async/test__read_rows.py | 6 --- .../tests/unit/data/_async/test_client.py | 27 +--------- .../tests/unit/data/test__helpers.py | 28 ---------- 11 files changed, 27 insertions(+), 171 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py index 465378aa43ce..914cfecf475a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -22,7 +22,6 @@ from google.api_core import retry as retries import google.cloud.bigtable_v2.types.bigtable as types_pb import google.cloud.bigtable.data.exceptions as bt_exceptions -from google.cloud.bigtable.data._helpers import _make_metadata from google.cloud.bigtable.data._helpers import _attempt_timeout_generator from google.cloud.bigtable.data._helpers import _retry_exception_factory @@ -84,14 +83,10 @@ def __init__( f"all entries. Found {total_mutations}." ) # create partial function to pass to trigger rpc call - metadata = _make_metadata( - table.table_name, table.app_profile_id, instance_name=None - ) self._gapic_fn = functools.partial( gapic_client.mutate_rows, table_name=table.table_name, app_profile_id=table.app_profile_id, - metadata=metadata, retry=None, ) # create predicate for determining which errors are retryable diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py index 6034ae6cfffb..5617e6418476 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -33,7 +33,6 @@ from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable.data.exceptions import _RowSetComplete from google.cloud.bigtable.data._helpers import _attempt_timeout_generator -from google.cloud.bigtable.data._helpers import _make_metadata from google.cloud.bigtable.data._helpers import _retry_exception_factory from google.api_core import retry as retries @@ -74,7 +73,6 @@ class _ReadRowsOperationAsync: "request", "table", "_predicate", - "_metadata", "_last_yielded_row_key", "_remaining_count", ) @@ -101,9 +99,6 @@ def __init__( self.request = query._to_pb(table) self.table = table self._predicate = retries.if_exception_type(*retryable_exceptions) - self._metadata = _make_metadata( - table.table_name, table.app_profile_id, instance_name=None - ) self._last_yielded_row_key: bytes | None = None self._remaining_count: int | None = self.request.rows_limit or None @@ -152,7 +147,6 @@ def _read_rows_attempt(self) -> AsyncGenerator[Row, None]: gapic_stream = self.table.client._gapic_client.read_rows( self.request, timeout=next(self.attempt_timeout_gen), - metadata=self._metadata, retry=None, ) chunked_stream = self.chunk_stream(gapic_stream) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index b48921623d90..6b920f5c4faf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -60,7 +60,6 @@ _get_error_type, _get_retryable_errors, _get_timeouts, - _make_metadata, _retry_exception_factory, _validate_timeouts, _WarmedInstanceKey, @@ -262,19 +261,18 @@ async def _ping_and_warm_instances( request_serializer=PingAndWarmRequest.serialize, ) # prepare list of coroutines to run - tasks = [ - ping_rpc( - request={"name": instance_name, "app_profile_id": app_profile_id}, - metadata=[ - ( - "x-goog-request-params", - f"name={instance_name}&app_profile_id={app_profile_id}", - ) - ], - wait_for_ready=True, + tasks = [] + for instance_name, table_name, app_profile_id in instance_list: + metadata_str = f"name={instance_name}" + if app_profile_id is not None: + metadata_str = f"{metadata_str}&app_profile_id={app_profile_id}" + tasks.append( + ping_rpc( + request={"name": instance_name, "app_profile_id": app_profile_id}, + metadata=[("x-goog-request-params", metadata_str)], + wait_for_ready=True, + ) ) - for (instance_name, table_name, app_profile_id) in instance_list - ] # execute coroutines in parallel result_list = await asyncio.gather(*tasks, return_exceptions=True) # return None in place of empty successful responses @@ -508,15 +506,6 @@ async def execute_query( "proto_format": {}, } - # app_profile_id should be set to an empty string for ExecuteQueryRequest only - app_profile_id_for_metadata = app_profile_id or "" - - req_metadata = _make_metadata( - table_name=None, - app_profile_id=app_profile_id_for_metadata, - instance_name=instance_name, - ) - return ExecuteQueryIteratorAsync( self, instance_id, @@ -524,8 +513,7 @@ async def execute_query( request_body, attempt_timeout, operation_timeout, - req_metadata, - retryable_excs, + retryable_excs=retryable_excs, ) async def __aenter__(self): @@ -1005,16 +993,11 @@ async def sample_row_keys( sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) # prepare request - metadata = _make_metadata( - self.table_name, self.app_profile_id, instance_name=None - ) - async def execute_rpc(): results = await self.client._gapic_client.sample_row_keys( table_name=self.table_name, app_profile_id=self.app_profile_id, timeout=next(attempt_timeout_gen), - metadata=metadata, retry=None, ) return [(s.row_key, s.offset_bytes) async for s in results] @@ -1143,9 +1126,6 @@ async def mutate_row( table_name=self.table_name, app_profile_id=self.app_profile_id, timeout=attempt_timeout, - metadata=_make_metadata( - self.table_name, self.app_profile_id, instance_name=None - ), retry=None, ) return await retries.retry_target_async( @@ -1263,9 +1243,6 @@ async def check_and_mutate_row( ): false_case_mutations = [false_case_mutations] false_case_list = [m._to_pb() for m in false_case_mutations or []] - metadata = _make_metadata( - self.table_name, self.app_profile_id, instance_name=None - ) result = await self.client._gapic_client.check_and_mutate_row( true_mutations=true_case_list, false_mutations=false_case_list, @@ -1273,7 +1250,6 @@ async def check_and_mutate_row( row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, table_name=self.table_name, app_profile_id=self.app_profile_id, - metadata=metadata, timeout=operation_timeout, retry=None, ) @@ -1316,15 +1292,11 @@ async def read_modify_write_row( rules = [rules] if not rules: raise ValueError("rules must contain at least one item") - metadata = _make_metadata( - self.table_name, self.app_profile_id, instance_name=None - ) result = await self.client._gapic_client.read_modify_write_row( rules=[rule._to_pb() for rule in rules], row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, table_name=self.table_name, app_profile_id=self.app_profile_id, - metadata=metadata, timeout=operation_timeout, retry=None, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py index 2d36c521faee..bd1c09d523f0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py @@ -59,31 +59,6 @@ class TABLE_DEFAULT(enum.Enum): MUTATE_ROWS = "MUTATE_ROWS_DEFAULT" -def _make_metadata( - table_name: str | None, app_profile_id: str | None, instance_name: str | None -) -> list[tuple[str, str]]: - """ - Create properly formatted gRPC metadata for requests. - """ - params = [] - - if table_name is not None and instance_name is not None: - raise ValueError("metadata can't contain both instance_name and table_name") - - if table_name is not None: - params.append(f"table_name={table_name}") - if instance_name is not None: - params.append(f"name={instance_name}") - if app_profile_id is not None: - params.append(f"app_profile_id={app_profile_id}") - if len(params) == 0: - raise ValueError( - "At least one of table_name and app_profile_id should be not None." - ) - params_str = "&".join(params) - return [("x-goog-request-params", params_str)] - - def _attempt_timeout_generator( per_request_timeout: float | None, operation_timeout: float ): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index 32081939b4ed..6146ad4516f5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -19,7 +19,6 @@ Any, AsyncIterator, Dict, - List, Optional, Sequence, Tuple, @@ -83,8 +82,8 @@ def __init__( request_body: Dict[str, Any], attempt_timeout: float | None, operation_timeout: float, - req_metadata: Sequence[Tuple[str, str]], - retryable_excs: List[type[Exception]], + req_metadata: Sequence[Tuple[str, str]] = (), + retryable_excs: Sequence[type[Exception]] = (), ) -> None: self._table_name = None self._app_profile_id = app_profile_id @@ -99,6 +98,7 @@ def __init__( self._attempt_timeout_gen = _attempt_timeout_generator( attempt_timeout, operation_timeout ) + retryable_excs = retryable_excs or [] self._async_stream = retries.retry_target_stream_async( self._make_request_with_resume_token, retries.if_exception_type(*retryable_excs), diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index b05e171c1e6c..b36f525fa217 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1286,13 +1286,11 @@ def generate_initial_change_stream_partitions( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1390,13 +1388,11 @@ def read_change_stream( # Certain fields should be provided within the metadata header; # add these here. - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += ( - gapic_v1.routing_header.to_grpc_metadata( - (("table_name", request.table_name),) - ), - ) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("table_name", request.table_name),) + ), + ) # Validate the universe domain. self._client._validate_universe_domain() diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 0ec4cd61c7b2..323e65d4681d 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -143,18 +143,6 @@ def insert(file, before_line, insert_line, after_line, escape=None): escape='"' ) -# ---------------------------------------------------------------------------- -# Patch duplicate routing header: https://github.com/googleapis/gapic-generator-python/issues/2078 -# ---------------------------------------------------------------------------- -for file in ["async_client.py"]: - s.replace( - f"google/cloud/bigtable_v2/services/bigtable/{file}", - "metadata \= tuple\(metadata\) \+ \(", - """metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (""" - ) - # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py index e03028c45257..73da1b46d5d8 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py @@ -101,15 +101,10 @@ def test_ctor(self): assert client.mutate_rows.call_count == 1 # gapic_fn should call with table details inner_kwargs = client.mutate_rows.call_args[1] - assert len(inner_kwargs) == 4 + assert len(inner_kwargs) == 3 assert inner_kwargs["table_name"] == table.table_name assert inner_kwargs["app_profile_id"] == table.app_profile_id assert inner_kwargs["retry"] is None - metadata = inner_kwargs["metadata"] - assert len(metadata) == 1 - assert metadata[0][0] == "x-goog-request-params" - assert str(table.table_name) in metadata[0][1] - assert str(table.app_profile_id) in metadata[0][1] # entries should be passed down entries_w_pb = [_EntryWithProto(e, e._to_pb()) for e in entries] assert instance.mutations == entries_w_pb diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py index 2bf8688fd396..e2b02517fb6e 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py @@ -78,12 +78,6 @@ def test_ctor(self): assert instance._remaining_count == row_limit assert instance.operation_timeout == expected_operation_timeout assert client.read_rows.call_count == 0 - assert instance._metadata == [ - ( - "x-goog-request-params", - "table_name=test_table&app_profile_id=test_profile", - ) - ] assert instance.request.table_name == table.table_name assert instance.request.app_profile_id == table.app_profile_id assert instance.request.rows_limit == row_limit diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 1c1c14cd3c8d..8c8cf6082663 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -2176,11 +2176,10 @@ async def test_sample_row_keys_gapic_params(self): await table.sample_row_keys(attempt_timeout=expected_timeout) args, kwargs = sample_row_keys.call_args assert len(args) == 0 - assert len(kwargs) == 5 + assert len(kwargs) == 4 assert kwargs["timeout"] == expected_timeout assert kwargs["app_profile_id"] == expected_profile assert kwargs["table_name"] == table.table_name - assert kwargs["metadata"] is not None assert kwargs["retry"] is None @pytest.mark.parametrize( @@ -2375,30 +2374,6 @@ async def test_mutate_row_non_retryable_errors(self, non_retryable_exception): "row_key", mutation, operation_timeout=0.2 ) - @pytest.mark.parametrize("include_app_profile", [True, False]) - @pytest.mark.asyncio - async def test_mutate_row_metadata(self, include_app_profile): - """request should attach metadata headers""" - profile = "profile" if include_app_profile else None - async with _make_client() as client: - async with client.get_table("i", "t", app_profile_id=profile) as table: - with mock.patch.object( - client._gapic_client, "mutate_row", AsyncMock() - ) as read_rows: - await table.mutate_row("rk", mock.Mock()) - kwargs = read_rows.call_args_list[0].kwargs - metadata = kwargs["metadata"] - goog_metadata = None - for key, value in metadata: - if key == "x-goog-request-params": - goog_metadata = value - assert goog_metadata is not None, "x-goog-request-params not found" - assert "table_name=" + table.table_name in goog_metadata - if include_app_profile: - assert "app_profile_id=profile" in goog_metadata - else: - assert "app_profile_id=" not in goog_metadata - @pytest.mark.parametrize("mutations", [[], None]) @pytest.mark.asyncio async def test_mutate_row_no_mutations(self, mutations): diff --git a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py index 12ab3181eda4..58889026522b 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py +++ b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py @@ -21,34 +21,6 @@ import mock -class TestMakeMetadata: - @pytest.mark.parametrize( - "table,profile,instance,expected", - [ - ("table", "profile", None, "table_name=table&app_profile_id=profile"), - ("table", None, None, "table_name=table"), - (None, None, "instance", "name=instance"), - (None, "profile", None, "app_profile_id=profile"), - (None, "profile", "instance", "name=instance&app_profile_id=profile"), - ], - ) - def test__make_metadata(self, table, profile, instance, expected): - metadata = _helpers._make_metadata(table, profile, instance) - assert metadata == [("x-goog-request-params", expected)] - - @pytest.mark.parametrize( - "table,profile,instance", - [ - ("table", None, "instance"), - ("table", "profile", "instance"), - (None, None, None), - ], - ) - def test__make_metadata_invalid_params(self, table, profile, instance): - with pytest.raises(ValueError): - _helpers._make_metadata(table, profile, instance) - - class TestAttemptTimeoutGenerator: @pytest.mark.parametrize( "request_t,operation_t,expected_list", From f6a35be4efd3b142c995734016e4c1dab56513e5 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 8 Nov 2024 10:07:45 -0800 Subject: [PATCH 829/892] chore: remove pooled transport (#1035) --- gapic-generator-fork | 1 - .../cloud/bigtable/data/_async/client.py | 113 ++--- .../bigtable_v2/services/bigtable/client.py | 2 - .../services/bigtable/transports/__init__.py | 3 - .../transports/pooled_grpc_asyncio.py | 430 ------------------ packages/google-cloud-bigtable/owlbot.py | 46 -- .../system/data/test_execute_query_async.py | 7 +- .../system/data/test_execute_query_utils.py | 27 +- .../tests/system/data/test_system.py | 7 +- .../tests/unit/data/_async/test_client.py | 342 ++++---------- python-api-core | 1 - 11 files changed, 172 insertions(+), 807 deletions(-) delete mode 160000 gapic-generator-fork delete mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py delete mode 160000 python-api-core diff --git a/gapic-generator-fork b/gapic-generator-fork deleted file mode 160000 index b26cda7d163d..000000000000 --- a/gapic-generator-fork +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b26cda7d163d6e0d45c9684f328ca32fb49b799a diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 6b920f5c4faf..f1f7ad1a3099 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -87,10 +87,8 @@ DEFAULT_CLIENT_INFO, BigtableAsyncClient, ) -from google.cloud.bigtable_v2.services.bigtable.client import BigtableClientMeta -from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( - PooledBigtableGrpcAsyncIOTransport, - PooledChannel, +from google.cloud.bigtable_v2.services.bigtable.transports import ( + BigtableGrpcAsyncIOTransport, ) from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest @@ -103,11 +101,11 @@ def __init__( self, *, project: str | None = None, - pool_size: int = 3, credentials: google.auth.credentials.Credentials | None = None, client_options: dict[str, Any] | "google.api_core.client_options.ClientOptions" | None = None, + **kwargs, ): """ Create a client instance for the Bigtable Data API @@ -118,8 +116,6 @@ def __init__( project: the project which the client acts on behalf of. If not passed, falls back to the default inferred from the environment. - pool_size: The number of grpc channels to maintain - in the internal channel pool. credentials: Thehe OAuth2 Credentials to use for this client. If not passed (and if no ``_http`` object is @@ -130,12 +126,9 @@ def __init__( on the client. API Endpoint should be set through client_options. Raises: RuntimeError: if called outside of an async context (no running event loop) - ValueError: if pool_size is less than 1 """ - # set up transport in registry - transport_str = f"pooled_grpc_asyncio_{pool_size}" - transport = PooledBigtableGrpcAsyncIOTransport.with_fixed_size(pool_size) - BigtableClientMeta._transport_registry[transport_str] = transport + if "pool_size" in kwargs: + warnings.warn("pool_size no longer supported") # set up client info headers for veneer library client_info = DEFAULT_CLIENT_INFO client_info.client_library_version = self._client_version() @@ -145,9 +138,16 @@ def __init__( client_options = cast( Optional[client_options_lib.ClientOptions], client_options ) + custom_channel = None self._emulator_host = os.getenv(BIGTABLE_EMULATOR) if self._emulator_host is not None: + warnings.warn( + "Connecting to Bigtable emulator at {}".format(self._emulator_host), + RuntimeWarning, + stacklevel=2, + ) # use insecure channel if emulator is set + custom_channel = grpc.aio.insecure_channel(self._emulator_host) if credentials is None: credentials = google.auth.credentials.AnonymousCredentials() if project is None: @@ -160,13 +160,15 @@ def __init__( client_options=client_options, ) self._gapic_client = BigtableAsyncClient( - transport=transport_str, credentials=credentials, client_options=client_options, client_info=client_info, + transport=lambda *args, **kwargs: BigtableGrpcAsyncIOTransport( + *args, **kwargs, channel=custom_channel + ), ) self.transport = cast( - PooledBigtableGrpcAsyncIOTransport, self._gapic_client.transport + BigtableGrpcAsyncIOTransport, self._gapic_client.transport ) # keep track of active instances to for warmup on channel refresh self._active_instances: Set[_WarmedInstanceKey] = set() @@ -174,23 +176,8 @@ def __init__( # only remove instance from _active_instances when all associated tables remove it self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} self._channel_init_time = time.monotonic() - self._channel_refresh_tasks: list[asyncio.Task[None]] = [] - if self._emulator_host is not None: - # connect to an emulator host - warnings.warn( - "Connecting to Bigtable emulator at {}".format(self._emulator_host), - RuntimeWarning, - stacklevel=2, - ) - self.transport._grpc_channel = PooledChannel( - pool_size=pool_size, - host=self._emulator_host, - insecure=True, - ) - # refresh cached stubs to use emulator pool - self.transport._stubs = {} - self.transport._prep_wrapped_messages(client_info) - else: + self._channel_refresh_task: asyncio.Task[None] | None = None + if self._emulator_host is None: # attempt to start background channel refresh tasks try: self._start_background_channel_refresh() @@ -211,36 +198,38 @@ def _client_version() -> str: def _start_background_channel_refresh(self) -> None: """ - Starts a background task to ping and warm each channel in the pool + Starts a background task to ping and warm grpc channel Raises: RuntimeError: if not called in an asyncio event loop """ - if not self._channel_refresh_tasks and not self._emulator_host: + if not self._channel_refresh_task and not self._emulator_host: # raise RuntimeError if there is no event loop asyncio.get_running_loop() - for channel_idx in range(self.transport.pool_size): - refresh_task = asyncio.create_task(self._manage_channel(channel_idx)) - if sys.version_info >= (3, 8): - # task names supported in Python 3.8+ - refresh_task.set_name( - f"{self.__class__.__name__} channel refresh {channel_idx}" - ) - self._channel_refresh_tasks.append(refresh_task) + self._channel_refresh_task = asyncio.create_task(self._manage_channel()) + if sys.version_info >= (3, 8): + # task names supported in Python 3.8+ + self._channel_refresh_task.set_name( + f"{self.__class__.__name__} channel refresh" + ) async def close(self, timeout: float = 2.0): """ Cancel all background tasks """ - for task in self._channel_refresh_tasks: - task.cancel() - group = asyncio.gather(*self._channel_refresh_tasks, return_exceptions=True) - await asyncio.wait_for(group, timeout=timeout) + if self._channel_refresh_task: + self._channel_refresh_task.cancel() + try: + await asyncio.wait_for(self._channel_refresh_task, timeout=timeout) + except asyncio.CancelledError: + pass await self.transport.close() - self._channel_refresh_tasks = [] + self._channel_refresh_task = None async def _ping_and_warm_instances( - self, channel: grpc.aio.Channel, instance_key: _WarmedInstanceKey | None = None + self, + instance_key: _WarmedInstanceKey | None = None, + channel: grpc.aio.Channel | None = None, ) -> list[BaseException | None]: """ Prepares the backend for requests on a channel @@ -248,11 +237,12 @@ async def _ping_and_warm_instances( Pings each Bigtable instance registered in `_active_instances` on the client Args: - channel: grpc channel to warm instance_key: if provided, only warm the instance associated with the key + channel: grpc channel to warm. If none, warms `self.transport.grpc_channel` Returns: list[BaseException | None]: sequence of results or exceptions from the ping requests """ + channel = channel or self.transport.grpc_channel instance_list = ( [instance_key] if instance_key is not None else self._active_instances ) @@ -280,7 +270,6 @@ async def _ping_and_warm_instances( async def _manage_channel( self, - channel_idx: int, refresh_interval_min: float = 60 * 35, refresh_interval_max: float = 60 * 45, grace_period: float = 60 * 10, @@ -294,7 +283,6 @@ async def _manage_channel( Runs continuously until the client is closed Args: - channel_idx: index of the channel in the transport's channel pool refresh_interval_min: minimum interval before initiating refresh process in seconds. Actual interval will be a random value between `refresh_interval_min` and `refresh_interval_max` @@ -310,19 +298,18 @@ async def _manage_channel( next_sleep = max(first_refresh - time.monotonic(), 0) if next_sleep > 0: # warm the current channel immediately - channel = self.transport.channels[channel_idx] - await self._ping_and_warm_instances(channel) + await self._ping_and_warm_instances(channel=self.transport.grpc_channel) # continuously refresh the channel every `refresh_interval` seconds while True: await asyncio.sleep(next_sleep) + start_timestamp = time.time() # prepare new channel for use - new_channel = self.transport.grpc_channel._create_channel() - await self._ping_and_warm_instances(new_channel) + old_channel = self.transport.grpc_channel + new_channel = self.transport.create_channel() + await self._ping_and_warm_instances(channel=new_channel) # cycle channel out of use, with long grace window before closure - start_timestamp = time.time() - await self.transport.replace_channel( - channel_idx, grace=grace_period, swap_sleep=10, new_channel=new_channel - ) + self.transport._grpc_channel = new_channel + await old_channel.close(grace_period) # subtract the time spent waiting for the channel to be replaced next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) next_sleep = next_refresh - (time.time() - start_timestamp) @@ -331,9 +318,8 @@ async def _register_instance( self, instance_id: str, owner: Union[TableAsync, ExecuteQueryIteratorAsync] ) -> None: """ - Registers an instance with the client, and warms the channel pool - for the instance - The client will periodically refresh grpc channel pool used to make + Registers an instance with the client, and warms the channel for the instance + The client will periodically refresh grpc channel used to make requests, and new channels will be warmed for each registered instance Channels will not be refreshed unless at least one instance is registered @@ -350,11 +336,10 @@ async def _register_instance( self._instance_owners.setdefault(instance_key, set()).add(id(owner)) if instance_key not in self._active_instances: self._active_instances.add(instance_key) - if self._channel_refresh_tasks: + if self._channel_refresh_task: # refresh tasks already running # call ping and warm on all existing channels - for channel in self.transport.channels: - await self._ping_and_warm_instances(channel, instance_key) + await self._ping_and_warm_instances(instance_key) else: # refresh tasks aren't active. start them as background tasks self._start_background_channel_refresh() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index a90a4a1a78c0..a2534d5393c6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -55,7 +55,6 @@ from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableGrpcTransport from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport -from .transports.pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport from .transports.rest import BigtableRestTransport @@ -70,7 +69,6 @@ class BigtableClientMeta(type): _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] _transport_registry["grpc"] = BigtableGrpcTransport _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport - _transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport _transport_registry["rest"] = BigtableRestTransport def get_transport_class( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index ae5c1cf7281b..ae007bc2bd04 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -19,7 +19,6 @@ from .base import BigtableTransport from .grpc import BigtableGrpcTransport from .grpc_asyncio import BigtableGrpcAsyncIOTransport -from .pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport from .rest import BigtableRestTransport from .rest import BigtableRestInterceptor @@ -28,14 +27,12 @@ _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] _transport_registry["grpc"] = BigtableGrpcTransport _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport -_transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport _transport_registry["rest"] = BigtableRestTransport __all__ = ( "BigtableTransport", "BigtableGrpcTransport", "BigtableGrpcAsyncIOTransport", - "PooledBigtableGrpcAsyncIOTransport", "BigtableRestTransport", "BigtableRestInterceptor", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py deleted file mode 100644 index ce8fec4e9cb2..000000000000 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/pooled_grpc_asyncio.py +++ /dev/null @@ -1,430 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2022 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import asyncio -import inspect -import warnings -from functools import partialmethod -from functools import partial -from typing import ( - Awaitable, - Callable, - Dict, - Optional, - Sequence, - Tuple, - Union, - List, - Type, -) - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.bigtable_v2.types import bigtable -from .base import BigtableTransport, DEFAULT_CLIENT_INFO -from .grpc_asyncio import BigtableGrpcAsyncIOTransport - - -class PooledMultiCallable: - def __init__(self, channel_pool: "PooledChannel", *args, **kwargs): - self._init_args = args - self._init_kwargs = kwargs - self.next_channel_fn = channel_pool.next_channel - - -class PooledUnaryUnaryMultiCallable(PooledMultiCallable, aio.UnaryUnaryMultiCallable): - def __call__(self, *args, **kwargs) -> aio.UnaryUnaryCall: - return self.next_channel_fn().unary_unary( - *self._init_args, **self._init_kwargs - )(*args, **kwargs) - - -class PooledUnaryStreamMultiCallable(PooledMultiCallable, aio.UnaryStreamMultiCallable): - def __call__(self, *args, **kwargs) -> aio.UnaryStreamCall: - return self.next_channel_fn().unary_stream( - *self._init_args, **self._init_kwargs - )(*args, **kwargs) - - -class PooledStreamUnaryMultiCallable(PooledMultiCallable, aio.StreamUnaryMultiCallable): - def __call__(self, *args, **kwargs) -> aio.StreamUnaryCall: - return self.next_channel_fn().stream_unary( - *self._init_args, **self._init_kwargs - )(*args, **kwargs) - - -class PooledStreamStreamMultiCallable( - PooledMultiCallable, aio.StreamStreamMultiCallable -): - def __call__(self, *args, **kwargs) -> aio.StreamStreamCall: - return self.next_channel_fn().stream_stream( - *self._init_args, **self._init_kwargs - )(*args, **kwargs) - - -class PooledChannel(aio.Channel): - def __init__( - self, - pool_size: int = 3, - host: str = "bigtable.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - quota_project_id: Optional[str] = None, - default_scopes: Optional[Sequence[str]] = None, - scopes: Optional[Sequence[str]] = None, - default_host: Optional[str] = None, - insecure: bool = False, - **kwargs, - ): - self._pool: List[aio.Channel] = [] - self._next_idx = 0 - if insecure: - self._create_channel = partial(aio.insecure_channel, host) - else: - self._create_channel = partial( - grpc_helpers_async.create_channel, - target=host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=default_scopes, - scopes=scopes, - default_host=default_host, - **kwargs, - ) - for i in range(pool_size): - self._pool.append(self._create_channel()) - - def next_channel(self) -> aio.Channel: - channel = self._pool[self._next_idx] - self._next_idx = (self._next_idx + 1) % len(self._pool) - return channel - - def unary_unary(self, *args, **kwargs) -> grpc.aio.UnaryUnaryMultiCallable: - return PooledUnaryUnaryMultiCallable(self, *args, **kwargs) - - def unary_stream(self, *args, **kwargs) -> grpc.aio.UnaryStreamMultiCallable: - return PooledUnaryStreamMultiCallable(self, *args, **kwargs) - - def stream_unary(self, *args, **kwargs) -> grpc.aio.StreamUnaryMultiCallable: - return PooledStreamUnaryMultiCallable(self, *args, **kwargs) - - def stream_stream(self, *args, **kwargs) -> grpc.aio.StreamStreamMultiCallable: - return PooledStreamStreamMultiCallable(self, *args, **kwargs) - - async def close(self, grace=None): - close_fns = [channel.close(grace=grace) for channel in self._pool] - return await asyncio.gather(*close_fns) - - async def channel_ready(self): - ready_fns = [channel.channel_ready() for channel in self._pool] - return asyncio.gather(*ready_fns) - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.close() - - def get_state(self, try_to_connect: bool = False) -> grpc.ChannelConnectivity: - raise NotImplementedError() - - async def wait_for_state_change(self, last_observed_state): - raise NotImplementedError() - - async def replace_channel( - self, channel_idx, grace=None, swap_sleep=1, new_channel=None - ) -> aio.Channel: - """ - Replaces a channel in the pool with a fresh one. - - The `new_channel` will start processing new requests immidiately, - but the old channel will continue serving existing clients for `grace` seconds - - Args: - channel_idx(int): the channel index in the pool to replace - grace(Optional[float]): The time to wait until all active RPCs are - finished. If a grace period is not specified (by passing None for - grace), all existing RPCs are cancelled immediately. - swap_sleep(Optional[float]): The number of seconds to sleep in between - replacing channels and closing the old one - new_channel(grpc.aio.Channel): a new channel to insert into the pool - at `channel_idx`. If `None`, a new channel will be created. - """ - if channel_idx >= len(self._pool) or channel_idx < 0: - raise ValueError( - f"invalid channel_idx {channel_idx} for pool size {len(self._pool)}" - ) - if new_channel is None: - new_channel = self._create_channel() - old_channel = self._pool[channel_idx] - self._pool[channel_idx] = new_channel - await asyncio.sleep(swap_sleep) - await old_channel.close(grace=grace) - return new_channel - - -class PooledBigtableGrpcAsyncIOTransport(BigtableGrpcAsyncIOTransport): - """Pooled gRPC AsyncIO backend transport for Bigtable. - - Service for reading from and writing to existing Bigtable - tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - - This class allows channel pooling, so multiple channels can be used concurrently - when making requests. Channels are rotated in a round-robin fashion. - """ - - @classmethod - def with_fixed_size(cls, pool_size) -> Type["PooledBigtableGrpcAsyncIOTransport"]: - """ - Creates a new class with a fixed channel pool size. - - A fixed channel pool makes compatibility with other transports easier, - as the initializer signature is the same. - """ - - class PooledTransportFixed(cls): - __init__ = partialmethod(cls.__init__, pool_size=pool_size) - - PooledTransportFixed.__name__ = f"{cls.__name__}_{pool_size}" - PooledTransportFixed.__qualname__ = PooledTransportFixed.__name__ - return PooledTransportFixed - - @classmethod - def create_channel( - cls, - pool_size: int = 3, - host: str = "bigtable.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs, - ) -> aio.Channel: - """Create and return a PooledChannel object, representing a pool of gRPC AsyncIO channels - Args: - pool_size (int): The number of channels in the pool. - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - PooledChannel: a channel pool object - """ - - return PooledChannel( - pool_size, - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs, - ) - - def __init__( - self, - *, - pool_size: int = 3, - host: str = "bigtable.googleapis.com", - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - pool_size (int): the number of grpc channels to maintain in a pool - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - ValueError: if ``pool_size`` <= 0 - """ - if pool_size <= 0: - raise ValueError(f"invalid pool_size: {pool_size}") - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - BigtableTransport.__init__( - self, - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - self._quota_project_id = quota_project_id - self._grpc_channel = type(self).create_channel( - pool_size, - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=self._quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._wrap_with_kind = ( - "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters - ) - self._prep_wrapped_messages(client_info) - - @property - def pool_size(self) -> int: - """The number of grpc channels in the pool.""" - return len(self._grpc_channel._pool) - - @property - def channels(self) -> List[grpc.Channel]: - """Acccess the internal list of grpc channels.""" - return self._grpc_channel._pool - - async def replace_channel( - self, channel_idx, grace=None, swap_sleep=1, new_channel=None - ) -> aio.Channel: - """ - Replaces a channel in the pool with a fresh one. - - The `new_channel` will start processing new requests immidiately, - but the old channel will continue serving existing clients for `grace` seconds - - Args: - channel_idx(int): the channel index in the pool to replace - grace(Optional[float]): The time to wait until all active RPCs are - finished. If a grace period is not specified (by passing None for - grace), all existing RPCs are cancelled immediately. - swap_sleep(Optional[float]): The number of seconds to sleep in between - replacing channels and closing the old one - new_channel(grpc.aio.Channel): a new channel to insert into the pool - at `channel_idx`. If `None`, a new channel will be created. - """ - return await self._grpc_channel.replace_channel( - channel_idx, grace, swap_sleep, new_channel - ) - - -__all__ = ("PooledBigtableGrpcAsyncIOTransport",) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 323e65d4681d..16ce11b4f342 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -97,52 +97,6 @@ def get_staging_dirs( s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py"]) -# ---------------------------------------------------------------------------- -# Customize gapics to include PooledBigtableGrpcAsyncIOTransport -# ---------------------------------------------------------------------------- -def insert(file, before_line, insert_line, after_line, escape=None): - target = before_line + "\n" + after_line - if escape: - for c in escape: - target = target.replace(c, '\\' + c) - replacement = before_line + "\n" + insert_line + "\n" + after_line - s.replace(file, target, replacement) - - -insert( - "google/cloud/bigtable_v2/services/bigtable/client.py", - "from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport", - "from .transports.pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport", - "from .transports.rest import BigtableRestTransport" -) -insert( - "google/cloud/bigtable_v2/services/bigtable/client.py", - ' _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport', - ' _transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport', - ' _transport_registry["rest"] = BigtableRestTransport', - escape='[]"' -) -insert( - "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", - '_transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport', - '_transport_registry["pooled_grpc_asyncio"] = PooledBigtableGrpcAsyncIOTransport', - '_transport_registry["rest"] = BigtableRestTransport', - escape='[]"' -) -insert( - "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", - "from .grpc_asyncio import BigtableGrpcAsyncIOTransport", - "from .pooled_grpc_asyncio import PooledBigtableGrpcAsyncIOTransport", - "from .rest import BigtableRestTransport" -) -insert( - "google/cloud/bigtable_v2/services/bigtable/transports/__init__.py", - ' "BigtableGrpcAsyncIOTransport",', - ' "PooledBigtableGrpcAsyncIOTransport",', - ' "BigtableRestTransport",', - escape='"' -) - # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py index a680d2de098d..489dfeab6b6c 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py @@ -23,7 +23,6 @@ ) from google.api_core import exceptions as core_exceptions from google.cloud.bigtable.data import BigtableDataClientAsync -import google.cloud.bigtable.data._async.client TABLE_NAME = "TABLE_NAME" INSTANCE_NAME = "INSTANCE_NAME" @@ -39,11 +38,7 @@ def async_channel_mock(self): def async_client(self, async_channel_mock): with mock.patch.dict( os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"} - ), mock.patch.object( - google.cloud.bigtable.data._async.client, - "PooledChannel", - return_value=async_channel_mock, - ): + ), mock.patch("grpc.aio.insecure_channel", return_value=async_channel_mock): yield BigtableDataClientAsync() @pytest.mark.asyncio diff --git a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py index 9e27b95f294f..3439e04d2ccc 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py @@ -14,7 +14,6 @@ from unittest import mock -import google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio as pga from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue import grpc.aio @@ -143,7 +142,7 @@ def unary_stream(self, *args, **kwargs): return mock.MagicMock() -class ChannelMockAsync(pga.PooledChannel, mock.MagicMock): +class ChannelMockAsync(grpc.aio.Channel, mock.MagicMock): def __init__(self, *args, **kwargs): mock.MagicMock.__init__(self, *args, **kwargs) self.execute_query_calls = [] @@ -270,3 +269,27 @@ def wait_for_connection(*args, **kwargs): # PTAL https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.Channel.unary_stream return UnaryStreamMultiCallableMock(self) return async_mock() + + def stream_unary(self, *args, **kwargs) -> grpc.aio.StreamUnaryMultiCallable: + raise NotImplementedError() + + def stream_stream(self, *args, **kwargs) -> grpc.aio.StreamStreamMultiCallable: + raise NotImplementedError() + + async def close(self, grace=None): + return + + async def channel_ready(self): + return + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + def get_state(self, try_to_connect: bool = False) -> grpc.ChannelConnectivity: + raise NotImplementedError() + + async def wait_for_state_change(self, last_observed_state): + raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system.py b/packages/google-cloud-bigtable/tests/system/data/test_system.py index 9fe208551d78..8f31827edd78 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system.py @@ -168,12 +168,7 @@ async def test_ping_and_warm(client, table): """ Test ping and warm from handwritten client """ - try: - channel = client.transport._grpc_channel.pool[0] - except Exception: - # for sync client - channel = client.transport._grpc_channel - results = await client._ping_and_warm_instances(channel) + results = await client._ping_and_warm_instances() assert len(results) == 1 assert results[0] is None diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 8c8cf6082663..fdc86e924a30 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -51,7 +51,7 @@ def _make_client(*args, use_emulator=True, **kwargs): env_mask = {} # by default, use emulator mode to avoid auth issues in CI - # emulator mode must be disabled by tests that check channel pooling/refresh background tasks + # emulator mode must be disabled by tests that check refresh background tasks if use_emulator: env_mask["BIGTABLE_EMULATOR_HOST"] = "localhost" else: @@ -74,19 +74,16 @@ def _make_one(self, *args, **kwargs): @pytest.mark.asyncio async def test_ctor(self): expected_project = "project-id" - expected_pool_size = 11 expected_credentials = AnonymousCredentials() client = self._make_one( project="project-id", - pool_size=expected_pool_size, credentials=expected_credentials, use_emulator=False, ) await asyncio.sleep(0) assert client.project == expected_project - assert len(client.transport._grpc_channel._pool) == expected_pool_size assert not client._active_instances - assert len(client._channel_refresh_tasks) == expected_pool_size + assert client._channel_refresh_task is not None assert client.transport._credentials == expected_credentials await client.close() @@ -99,11 +96,9 @@ async def test_ctor_super_inits(self): from google.api_core import client_options as client_options_lib project = "project-id" - pool_size = 11 credentials = AnonymousCredentials() client_options = {"api_endpoint": "foo.bar:1234"} options_parsed = client_options_lib.from_dict(client_options) - transport_str = f"pooled_grpc_asyncio_{pool_size}" with mock.patch.object(BigtableAsyncClient, "__init__") as bigtable_client_init: bigtable_client_init.return_value = None with mock.patch.object( @@ -113,7 +108,6 @@ async def test_ctor_super_inits(self): try: self._make_one( project=project, - pool_size=pool_size, credentials=credentials, client_options=options_parsed, use_emulator=False, @@ -123,7 +117,6 @@ async def test_ctor_super_inits(self): # test gapic superclass init was called assert bigtable_client_init.call_count == 1 kwargs = bigtable_client_init.call_args[1] - assert kwargs["transport"] == transport_str assert kwargs["credentials"] == credentials assert kwargs["client_options"] == options_parsed # test mixin superclass init was called @@ -179,78 +172,6 @@ async def test_veneer_grpc_headers(self): ), f"'{wrapped_user_agent_sorted}' does not match {VENEER_HEADER_REGEX}" await client.close() - @pytest.mark.asyncio - async def test_channel_pool_creation(self): - pool_size = 14 - with mock.patch( - "google.api_core.grpc_helpers_async.create_channel" - ) as create_channel: - create_channel.return_value = AsyncMock() - client = self._make_one(project="project-id", pool_size=pool_size) - assert create_channel.call_count == pool_size - await client.close() - # channels should be unique - client = self._make_one(project="project-id", pool_size=pool_size) - pool_list = list(client.transport._grpc_channel._pool) - pool_set = set(client.transport._grpc_channel._pool) - assert len(pool_list) == len(pool_set) - await client.close() - - @pytest.mark.asyncio - async def test_channel_pool_rotation(self): - from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( - PooledChannel, - ) - - pool_size = 7 - - with mock.patch.object(PooledChannel, "next_channel") as next_channel: - client = self._make_one(project="project-id", pool_size=pool_size) - assert len(client.transport._grpc_channel._pool) == pool_size - next_channel.reset_mock() - with mock.patch.object( - type(client.transport._grpc_channel._pool[0]), "unary_unary" - ) as unary_unary: - # calling an rpc `pool_size` times should use a different channel each time - channel_next = None - for i in range(pool_size): - channel_last = channel_next - channel_next = client.transport.grpc_channel._pool[i] - assert channel_last != channel_next - next_channel.return_value = channel_next - client.transport.ping_and_warm() - assert next_channel.call_count == i + 1 - unary_unary.assert_called_once() - unary_unary.reset_mock() - await client.close() - - @pytest.mark.asyncio - async def test_channel_pool_replace(self): - with mock.patch.object(asyncio, "sleep"): - pool_size = 7 - client = self._make_one(project="project-id", pool_size=pool_size) - for replace_idx in range(pool_size): - start_pool = [ - channel for channel in client.transport._grpc_channel._pool - ] - grace_period = 9 - with mock.patch.object( - type(client.transport._grpc_channel._pool[0]), "close" - ) as close: - new_channel = grpc.aio.insecure_channel("localhost:8080") - await client.transport.replace_channel( - replace_idx, grace=grace_period, new_channel=new_channel - ) - close.assert_called_once_with(grace=grace_period) - close.assert_awaited_once() - assert client.transport._grpc_channel._pool[replace_idx] == new_channel - for i in range(pool_size): - if i != replace_idx: - assert client.transport._grpc_channel._pool[i] == start_pool[i] - else: - assert client.transport._grpc_channel._pool[i] != start_pool[i] - await client.close() - @pytest.mark.filterwarnings("ignore::RuntimeWarning") def test__start_background_channel_refresh_sync(self): # should raise RuntimeError if called in a sync context @@ -259,48 +180,37 @@ def test__start_background_channel_refresh_sync(self): client._start_background_channel_refresh() @pytest.mark.asyncio - async def test__start_background_channel_refresh_tasks_exist(self): + async def test__start_background_channel_refresh_task_exists(self): # if tasks exist, should do nothing client = self._make_one(project="project-id", use_emulator=False) - assert len(client._channel_refresh_tasks) > 0 + assert client._channel_refresh_task is not None with mock.patch.object(asyncio, "create_task") as create_task: client._start_background_channel_refresh() create_task.assert_not_called() await client.close() @pytest.mark.asyncio - @pytest.mark.parametrize("pool_size", [1, 3, 7]) - async def test__start_background_channel_refresh(self, pool_size): + async def test__start_background_channel_refresh(self): # should create background tasks for each channel - client = self._make_one( - project="project-id", pool_size=pool_size, use_emulator=False - ) + client = self._make_one(project="project-id", use_emulator=False) ping_and_warm = AsyncMock() client._ping_and_warm_instances = ping_and_warm client._start_background_channel_refresh() - assert len(client._channel_refresh_tasks) == pool_size - for task in client._channel_refresh_tasks: - assert isinstance(task, asyncio.Task) + assert client._channel_refresh_task is not None + assert isinstance(client._channel_refresh_task, asyncio.Task) await asyncio.sleep(0.1) - assert ping_and_warm.call_count == pool_size - for channel in client.transport._grpc_channel._pool: - ping_and_warm.assert_any_call(channel) + assert ping_and_warm.call_count == 1 await client.close() @pytest.mark.asyncio @pytest.mark.skipif( sys.version_info < (3, 8), reason="Task.name requires python3.8 or higher" ) - async def test__start_background_channel_refresh_tasks_names(self): + async def test__start_background_channel_refresh_task_names(self): # if tasks exist, should do nothing - pool_size = 3 - client = self._make_one( - project="project-id", pool_size=pool_size, use_emulator=False - ) - for i in range(pool_size): - name = client._channel_refresh_tasks[i].get_name() - assert str(i) in name - assert "BigtableDataClientAsync channel refresh " in name + client = self._make_one(project="project-id", use_emulator=False) + name = client._channel_refresh_task.get_name() + assert "BigtableDataClientAsync channel refresh" in name await client.close() @pytest.mark.asyncio @@ -316,7 +226,7 @@ async def test__ping_and_warm_instances(self): # test with no instances client_mock._active_instances = [] result = await self._get_target_class()._ping_and_warm_instances( - client_mock, channel + client_mock, channel=channel ) assert len(result) == 0 gather.assert_called_once() @@ -330,7 +240,7 @@ async def test__ping_and_warm_instances(self): gather.reset_mock() channel.reset_mock() result = await self._get_target_class()._ping_and_warm_instances( - client_mock, channel + client_mock, channel=channel ) assert len(result) == 4 gather.assert_called_once() @@ -364,17 +274,18 @@ async def test__ping_and_warm_single_instance(self): with mock.patch.object(asyncio, "gather", AsyncMock()) as gather: # simulate gather by returning the same number of items as passed in gather.side_effect = lambda *args, **kwargs: [None for _ in args] - channel = mock.Mock() # test with large set of instances client_mock._active_instances = [mock.Mock()] * 100 test_key = ("test-instance", "test-table", "test-app-profile") result = await self._get_target_class()._ping_and_warm_instances( - client_mock, channel, test_key + client_mock, test_key ) # should only have been called with test instance assert len(result) == 1 # check grpc call arguments - grpc_call_args = channel.unary_unary().call_args_list + grpc_call_args = ( + client_mock.transport.grpc_channel.unary_unary().call_args_list + ) assert len(grpc_call_args) == 1 kwargs = grpc_call_args[0][1] request = kwargs["request"] @@ -412,7 +323,7 @@ async def test__manage_channel_first_sleep( try: client = self._make_one(project="project-id") client._channel_init_time = -wait_time - await client._manage_channel(0, refresh_interval, refresh_interval) + await client._manage_channel(refresh_interval, refresh_interval) except asyncio.CancelledError: pass sleep.assert_called_once() @@ -431,40 +342,25 @@ async def test__manage_channel_ping_and_warm(self): client_mock = mock.Mock() client_mock._channel_init_time = time.monotonic() - channel_list = [mock.Mock(), mock.Mock()] - client_mock.transport.channels = channel_list - new_channel = mock.Mock() - client_mock.transport.grpc_channel._create_channel.return_value = new_channel + orig_channel = client_mock.transport.grpc_channel # should ping an warm all new channels, and old channels if sleeping with mock.patch.object(asyncio, "sleep"): - # stop process after replace_channel is called - client_mock.transport.replace_channel.side_effect = asyncio.CancelledError + # stop process after close is called + orig_channel.close.side_effect = asyncio.CancelledError ping_and_warm = client_mock._ping_and_warm_instances = AsyncMock() # should ping and warm old channel then new if sleep > 0 try: - channel_idx = 1 - await self._get_target_class()._manage_channel( - client_mock, channel_idx, 10 - ) + await self._get_target_class()._manage_channel(client_mock, 10) except asyncio.CancelledError: pass # should have called at loop start, and after replacement assert ping_and_warm.call_count == 2 # should have replaced channel once - assert client_mock.transport.replace_channel.call_count == 1 + assert client_mock.transport._grpc_channel != orig_channel # make sure new and old channels were warmed - old_channel = channel_list[channel_idx] - assert old_channel != new_channel - called_with = [call[0][0] for call in ping_and_warm.call_args_list] - assert old_channel in called_with - assert new_channel in called_with - # should ping and warm instantly new channel only if not sleeping - ping_and_warm.reset_mock() - try: - await self._get_target_class()._manage_channel(client_mock, 0, 0, 0) - except asyncio.CancelledError: - pass - ping_and_warm.assert_called_once_with(new_channel) + called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list] + assert orig_channel in called_with + assert client_mock.transport.grpc_channel in called_with @pytest.mark.asyncio @pytest.mark.parametrize( @@ -482,7 +378,8 @@ async def test__manage_channel_sleeps( import time import random - channel_idx = 1 + channel = mock.Mock() + channel.close = mock.AsyncMock() with mock.patch.object(random, "uniform") as uniform: uniform.side_effect = lambda min_, max_: min_ with mock.patch.object(time, "time") as time: @@ -493,12 +390,16 @@ async def test__manage_channel_sleeps( ] try: client = self._make_one(project="project-id") - if refresh_interval is not None: - await client._manage_channel( - channel_idx, refresh_interval, refresh_interval - ) - else: - await client._manage_channel(channel_idx) + client.transport._grpc_channel = channel + with mock.patch.object( + client.transport, "create_channel", return_value=channel + ): + if refresh_interval is not None: + await client._manage_channel( + refresh_interval, refresh_interval + ) + else: + await client._manage_channel() except asyncio.CancelledError: pass assert sleep.call_count == num_cycles @@ -517,70 +418,57 @@ async def test__manage_channel_random(self): uniform.return_value = 0 try: uniform.side_effect = asyncio.CancelledError - client = self._make_one(project="project-id", pool_size=1) + client = self._make_one(project="project-id") except asyncio.CancelledError: uniform.side_effect = None uniform.reset_mock() sleep.reset_mock() - min_val = 200 - max_val = 205 - uniform.side_effect = lambda min_, max_: min_ - sleep.side_effect = [None, None, asyncio.CancelledError] - try: - await client._manage_channel(0, min_val, max_val) - except asyncio.CancelledError: - pass - assert uniform.call_count == 2 - uniform_args = [call[0] for call in uniform.call_args_list] - for found_min, found_max in uniform_args: - assert found_min == min_val - assert found_max == max_val + with mock.patch.object(client.transport, "create_channel"): + min_val = 200 + max_val = 205 + uniform.side_effect = lambda min_, max_: min_ + sleep.side_effect = [None, asyncio.CancelledError] + try: + await client._manage_channel(min_val, max_val) + except asyncio.CancelledError: + pass + assert uniform.call_count == 2 + uniform_args = [call[0] for call in uniform.call_args_list] + for found_min, found_max in uniform_args: + assert found_min == min_val + assert found_max == max_val @pytest.mark.asyncio @pytest.mark.parametrize("num_cycles", [0, 1, 10, 100]) async def test__manage_channel_refresh(self, num_cycles): # make sure that channels are properly refreshed - from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( - PooledBigtableGrpcAsyncIOTransport, - ) from google.api_core import grpc_helpers_async expected_grace = 9 expected_refresh = 0.5 - channel_idx = 1 new_channel = grpc.aio.insecure_channel("localhost:8080") - with mock.patch.object( - PooledBigtableGrpcAsyncIOTransport, "replace_channel" - ) as replace_channel: - with mock.patch.object(asyncio, "sleep") as sleep: - sleep.side_effect = [None for i in range(num_cycles)] + [ - asyncio.CancelledError - ] - with mock.patch.object( - grpc_helpers_async, "create_channel" - ) as create_channel: - create_channel.return_value = new_channel - client = self._make_one(project="project-id", use_emulator=False) - create_channel.reset_mock() - try: - await client._manage_channel( - channel_idx, - refresh_interval_min=expected_refresh, - refresh_interval_max=expected_refresh, - grace_period=expected_grace, - ) - except asyncio.CancelledError: - pass - assert sleep.call_count == num_cycles + 1 - assert create_channel.call_count == num_cycles - assert replace_channel.call_count == num_cycles - for call in replace_channel.call_args_list: - args, kwargs = call - assert args[0] == channel_idx - assert kwargs["grace"] == expected_grace - assert kwargs["new_channel"] == new_channel - await client.close() + with mock.patch.object(asyncio, "sleep") as sleep: + sleep.side_effect = [None for i in range(num_cycles)] + [ + asyncio.CancelledError + ] + with mock.patch.object( + grpc_helpers_async, "create_channel" + ) as create_channel: + create_channel.return_value = new_channel + client = self._make_one(project="project-id", use_emulator=False) + create_channel.reset_mock() + try: + await client._manage_channel( + refresh_interval_min=expected_refresh, + refresh_interval_max=expected_refresh, + grace_period=expected_grace, + ) + except asyncio.CancelledError: + pass + assert sleep.call_count == num_cycles + 1 + assert create_channel.call_count == num_cycles + await client.close() @pytest.mark.asyncio async def test__register_instance(self): @@ -594,12 +482,7 @@ async def test__register_instance(self): instance_owners = {} client_mock._active_instances = active_instances client_mock._instance_owners = instance_owners - client_mock._channel_refresh_tasks = [] - client_mock._start_background_channel_refresh.side_effect = ( - lambda: client_mock._channel_refresh_tasks.append(mock.Mock) - ) - mock_channels = [mock.Mock() for i in range(5)] - client_mock.transport.channels = mock_channels + client_mock._channel_refresh_task = None client_mock._ping_and_warm_instances = AsyncMock() table_mock = mock.Mock() await self._get_target_class()._register_instance( @@ -617,21 +500,20 @@ async def test__register_instance(self): assert expected_key == tuple(list(active_instances)[0]) assert len(instance_owners) == 1 assert expected_key == tuple(list(instance_owners)[0]) - # should be a new task set - assert client_mock._channel_refresh_tasks + # simulate creation of refresh task + client_mock._channel_refresh_task = mock.Mock() # next call should not call _start_background_channel_refresh again table_mock2 = mock.Mock() await self._get_target_class()._register_instance( client_mock, "instance-2", table_mock2 ) assert client_mock._start_background_channel_refresh.call_count == 1 + assert ( + client_mock._ping_and_warm_instances.call_args[0][0][0] + == "prefix/instance-2" + ) # but it should call ping and warm with new instance key - assert client_mock._ping_and_warm_instances.call_count == len(mock_channels) - for channel in mock_channels: - assert channel in [ - call[0][0] - for call in client_mock._ping_and_warm_instances.call_args_list - ] + assert client_mock._ping_and_warm_instances.call_count == 1 # check for updated lists assert len(active_instances) == 2 assert len(instance_owners) == 2 @@ -980,60 +862,29 @@ async def test_get_table_context_manager(self): assert client._instance_owners[instance_key] == {id(table)} assert close_mock.call_count == 1 - @pytest.mark.asyncio - async def test_multiple_pool_sizes(self): - # should be able to create multiple clients with different pool sizes without issue - pool_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256] - for pool_size in pool_sizes: - client = self._make_one( - project="project-id", pool_size=pool_size, use_emulator=False - ) - assert len(client._channel_refresh_tasks) == pool_size - client_duplicate = self._make_one( - project="project-id", pool_size=pool_size, use_emulator=False - ) - assert len(client_duplicate._channel_refresh_tasks) == pool_size - assert str(pool_size) in str(client.transport) - await client.close() - await client_duplicate.close() - @pytest.mark.asyncio async def test_close(self): - from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( - PooledBigtableGrpcAsyncIOTransport, - ) - - pool_size = 7 - client = self._make_one( - project="project-id", pool_size=pool_size, use_emulator=False - ) - assert len(client._channel_refresh_tasks) == pool_size - tasks_list = list(client._channel_refresh_tasks) - for task in client._channel_refresh_tasks: - assert not task.done() - with mock.patch.object( - PooledBigtableGrpcAsyncIOTransport, "close", AsyncMock() - ) as close_mock: + client = self._make_one(project="project-id", use_emulator=False) + task = client._channel_refresh_task + assert task is not None + assert not task.done() + with mock.patch.object(client.transport, "close", AsyncMock()) as close_mock: await client.close() close_mock.assert_called_once() close_mock.assert_awaited() - for task in tasks_list: - assert task.done() - assert task.cancelled() - assert client._channel_refresh_tasks == [] + assert task.done() + assert task.cancelled() + assert client._channel_refresh_task is None @pytest.mark.asyncio async def test_close_with_timeout(self): - pool_size = 7 expected_timeout = 19 - client = self._make_one(project="project-id", pool_size=pool_size) - tasks = list(client._channel_refresh_tasks) + client = self._make_one(project="project-id", use_emulator=False) with mock.patch.object(asyncio, "wait_for", AsyncMock()) as wait_for_mock: await client.close(timeout=expected_timeout) wait_for_mock.assert_called_once() wait_for_mock.assert_awaited() assert wait_for_mock.call_args[1]["timeout"] == expected_timeout - client._channel_refresh_tasks = tasks await client.close() @pytest.mark.asyncio @@ -1041,11 +892,10 @@ async def test_context_manager(self): # context manager should close the client cleanly close_mock = AsyncMock() true_close = None - async with self._make_one(project="project-id") as client: + async with self._make_one(project="project-id", use_emulator=False) as client: true_close = client.close() client.close = close_mock - for task in client._channel_refresh_tasks: - assert not task.done() + assert not client._channel_refresh_task.done() assert client.project == "project-id" assert client._active_instances == set() close_mock.assert_not_called() @@ -1066,7 +916,7 @@ def test_client_ctor_sync(self): in str(expected_warning[0].message) ) assert client.project == "project-id" - assert client._channel_refresh_tasks == [] + assert client._channel_refresh_task is None class TestTableAsync: diff --git a/python-api-core b/python-api-core deleted file mode 160000 index 17ff5f1d83a9..000000000000 --- a/python-api-core +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 17ff5f1d83a9a6f50a0226fb0e794634bd584f17 From 83abdd5f7e65441e8bb3b899e5bbd619f4675f45 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 8 Nov 2024 10:37:12 -0800 Subject: [PATCH 830/892] chore: add cross_sync (#999) --- .../.cross_sync/README.md | 73 +++ .../.cross_sync/generate.py | 107 ++++ .../.cross_sync/transformers.py | 333 ++++++++++ .../bigtable/data/_cross_sync/__init__.py | 20 + .../bigtable/data/_cross_sync/_decorators.py | 441 +++++++++++++ .../data/_cross_sync/_mapping_meta.py | 64 ++ .../bigtable/data/_cross_sync/cross_sync.py | 334 ++++++++++ packages/google-cloud-bigtable/noxfile.py | 6 +- .../cross_sync/test_cases/async_to_sync.yaml | 76 +++ .../test_cases/cross_sync_files.yaml | 469 ++++++++++++++ .../system/cross_sync/test_cases/rm_aio.yaml | 109 ++++ .../strip_async_conditional_branches.yaml | 74 +++ .../test_cases/symbol_replacer.yaml | 82 +++ .../system/cross_sync/test_cross_sync_e2e.py | 65 ++ .../unit/data/_cross_sync/test_cross_sync.py | 579 ++++++++++++++++++ .../_cross_sync/test_cross_sync_decorators.py | 542 ++++++++++++++++ 16 files changed, 3372 insertions(+), 2 deletions(-) create mode 100644 packages/google-cloud-bigtable/.cross_sync/README.md create mode 100644 packages/google-cloud-bigtable/.cross_sync/generate.py create mode 100644 packages/google-cloud-bigtable/.cross_sync/transformers.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/cross_sync.py create mode 100644 packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/async_to_sync.yaml create mode 100644 packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/cross_sync_files.yaml create mode 100644 packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/rm_aio.yaml create mode 100644 packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/strip_async_conditional_branches.yaml create mode 100644 packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/symbol_replacer.yaml create mode 100644 packages/google-cloud-bigtable/tests/system/cross_sync/test_cross_sync_e2e.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync_decorators.py diff --git a/packages/google-cloud-bigtable/.cross_sync/README.md b/packages/google-cloud-bigtable/.cross_sync/README.md new file mode 100644 index 000000000000..4214e0d78475 --- /dev/null +++ b/packages/google-cloud-bigtable/.cross_sync/README.md @@ -0,0 +1,73 @@ +# CrossSync + +CrossSync provides a simple way to share logic between async and sync code. +It is made up of a small library that provides: +1. a set of shims that provide a shared sync/async API surface +2. annotations that are used to guide generation of a sync version from an async class + +Using CrossSync, the async code is treated as the source of truth, and sync code is generated from it. + +## Usage + +### CrossSync Shims + +Many Asyncio components have direct, 1:1 threaded counterparts for use in non-asyncio code. CrossSync +provides a compatibility layer that works with both + +| CrossSync | Asyncio Version | Sync Version | +| --- | --- | --- | +| CrossSync.Queue | asyncio.Queue | queue.Queue | +| CrossSync.Condition | asyncio.Condition | threading.Condition | +| CrossSync.Future | asyncio.Future | Concurrent.futures.Future | +| CrossSync.Task | asyncio.Task | Concurrent.futures.Future | +| CrossSync.Event | asyncio.Event | threading.Event | +| CrossSync.Semaphore | asyncio.Semaphore | threading.Semaphore | +| CrossSync.Awaitable | typing.Awaitable | typing.Union (no-op type) | +| CrossSync.Iterable | typing.AsyncIterable | typing.Iterable | +| CrossSync.Iterator | typing.AsyncIterator | typing.Iterator | +| CrossSync.Generator | typing.AsyncGenerator | typing.Generator | +| CrossSync.Retry | google.api_core.retry.AsyncRetry | google.api_core.retry.Retry | +| CrossSync.StopIteration | StopAsyncIteration | StopIteration | +| CrossSync.Mock | unittest.mock.AsyncMock | unittest.mock.Mock | + +Custom aliases can be added using `CrossSync.add_mapping(class, name)` + +Additionally, CrossSync provides method implementations that work equivalently in async and sync code: +- `CrossSync.sleep()` +- `CrossSync.gather_partials()` +- `CrossSync.wait()` +- `CrossSync.condition_wait()` +- `CrossSync,event_wait()` +- `CrossSync.create_task()` +- `CrossSync.retry_target()` +- `CrossSync.retry_target_stream()` + +### Annotations + +CrossSync provides a set of annotations to mark up async classes, to guide the generation of sync code. + +- `@CrossSync.convert_sync` + - marks classes for conversion. Unmarked classes will be copied as-is + - if add_mapping is included, the async and sync classes can be accessed using a shared CrossSync.X alias +- `@CrossSync.convert` + - marks async functions for conversion. Unmarked methods will be copied as-is +- `@CrossSync.drop` + - marks functions or classes that should not be included in sync output +- `@CrossSync.pytest` + - marks test functions. Test functions automatically have all async keywords stripped (i.e., rm_aio is unneeded) +- `CrossSync.add_mapping` + - manually registers a new CrossSync.X alias, for custom types +- `CrossSync.rm_aio` + - Marks regions of the code that include asyncio keywords that should be stripped during generation + +### Code Generation + +Generation can be initiated using `python .cross_sync/generate.py .` +from the root of the project. This will find all classes with the `__CROSS_SYNC_OUTPUT__ = "path/to/output"` +annotation, and generate a sync version of classes marked with `@CrossSync.convert_sync` at the output path. + +## Architecture + +CrossSync is made up of two parts: +- the runtime shims and annotations live in `/google/cloud/bigtable/_cross_sync` +- the code generation logic lives in `/.cross_sync/` in the repo root diff --git a/packages/google-cloud-bigtable/.cross_sync/generate.py b/packages/google-cloud-bigtable/.cross_sync/generate.py new file mode 100644 index 000000000000..5158d0f37338 --- /dev/null +++ b/packages/google-cloud-bigtable/.cross_sync/generate.py @@ -0,0 +1,107 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations +from typing import Sequence +import ast +""" +Entrypoint for initiating an async -> sync conversion using CrossSync + +Finds all python files rooted in a given directory, and uses +transformers.CrossSyncFileProcessor to handle any files marked with +__CROSS_SYNC_OUTPUT__ +""" + + +def extract_header_comments(file_path) -> str: + """ + Extract the file header. Header is defined as the top-level + comments before any code or imports + """ + header = [] + with open(file_path, "r") as f: + for line in f: + if line.startswith("#") or line.strip() == "": + header.append(line) + else: + break + header.append("\n# This file is automatically generated by CrossSync. Do not edit manually.\n\n") + return "".join(header) + + +class CrossSyncOutputFile: + + def __init__(self, output_path: str, ast_tree, header: str | None = None): + self.output_path = output_path + self.tree = ast_tree + self.header = header or "" + + def render(self, with_formatter=True, save_to_disk: bool = True) -> str: + """ + Render the file to a string, and optionally save to disk + + Args: + with_formatter: whether to run the output through black before returning + save_to_disk: whether to write the output to the file path + """ + full_str = self.header + ast.unparse(self.tree) + if with_formatter: + import black # type: ignore + import autoflake # type: ignore + + full_str = black.format_str( + autoflake.fix_code(full_str, remove_all_unused_imports=True), + mode=black.FileMode(), + ) + if save_to_disk: + import os + os.makedirs(os.path.dirname(self.output_path), exist_ok=True) + with open(self.output_path, "w") as f: + f.write(full_str) + return full_str + + +def convert_files_in_dir(directory: str) -> set[CrossSyncOutputFile]: + import glob + from transformers import CrossSyncFileProcessor + + # find all python files in the directory + files = glob.glob(directory + "/**/*.py", recursive=True) + # keep track of the output files pointed to by the annotated classes + artifacts: set[CrossSyncOutputFile] = set() + file_transformer = CrossSyncFileProcessor() + # run each file through ast transformation to find all annotated classes + for file_path in files: + ast_tree = ast.parse(open(file_path).read()) + output_path = file_transformer.get_output_path(ast_tree) + if output_path is not None: + # contains __CROSS_SYNC_OUTPUT__ annotation + converted_tree = file_transformer.visit(ast_tree) + header = extract_header_comments(file_path) + artifacts.add(CrossSyncOutputFile(output_path, converted_tree, header)) + # return set of output artifacts + return artifacts + + +def save_artifacts(artifacts: Sequence[CrossSyncOutputFile]): + for a in artifacts: + a.render(save_to_disk=True) + + +if __name__ == "__main__": + import sys + + search_root = sys.argv[1] + outputs = convert_files_in_dir(search_root) + print(f"Generated {len(outputs)} artifacts: {[a.output_path for a in outputs]}") + save_artifacts(outputs) diff --git a/packages/google-cloud-bigtable/.cross_sync/transformers.py b/packages/google-cloud-bigtable/.cross_sync/transformers.py new file mode 100644 index 000000000000..ab2d5dd63d56 --- /dev/null +++ b/packages/google-cloud-bigtable/.cross_sync/transformers.py @@ -0,0 +1,333 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Provides a set of ast.NodeTransformer subclasses that are composed to generate +async code into sync code. + +At a high level: +- The main entrypoint is CrossSyncFileProcessor, which is used to find files in + the codebase that include __CROSS_SYNC_OUTPUT__, and transform them + according to the `CrossSync` annotations they contains +- SymbolReplacer is used to swap out CrossSync.X with CrossSync._Sync_Impl.X +- RmAioFunctions is used to strip out asyncio keywords marked with CrossSync.rm_aio + (deferring to AsyncToSync to handle the actual transformation) +- StripAsyncConditionalBranches finds `if CrossSync.is_async:` conditionals, and strips out + the unneeded branch for the sync output +""" +from __future__ import annotations + +import ast + +import sys +# add cross_sync to path +sys.path.append("google/cloud/bigtable/data/_cross_sync") +from _decorators import AstDecorator + + +class SymbolReplacer(ast.NodeTransformer): + """ + Replaces all instances of a symbol in an AST with a replacement + + Works for function signatures, method calls, docstrings, and type annotations + """ + def __init__(self, replacements: dict[str, str]): + self.replacements = replacements + + def visit_Name(self, node): + if node.id in self.replacements: + node.id = self.replacements[node.id] + return node + + def visit_Attribute(self, node): + return ast.copy_location( + ast.Attribute( + self.visit(node.value), + self.replacements.get(node.attr, node.attr), + node.ctx, + ), + node, + ) + + def visit_AsyncFunctionDef(self, node): + """ + Replace async function docstrings + """ + # use same logic as FunctionDef + return self.visit_FunctionDef(node) + + def visit_FunctionDef(self, node): + """ + Replace function docstrings + """ + docstring = ast.get_docstring(node) + if docstring and isinstance(node.body[0], ast.Expr) and isinstance( + node.body[0].value, ast.Str + ): + for key_word, replacement in self.replacements.items(): + docstring = docstring.replace(key_word, replacement) + node.body[0].value.s = docstring + return self.generic_visit(node) + + def visit_Constant(self, node): + """Replace string type annotations""" + node.s = self.replacements.get(node.s, node.s) + return node + + +class AsyncToSync(ast.NodeTransformer): + """ + Replaces or strips all async keywords from a given AST + """ + def visit_Await(self, node): + """ + Strips await keyword + """ + return self.visit(node.value) + + def visit_AsyncFor(self, node): + """ + Replaces `async for` with `for` + """ + return ast.copy_location( + ast.For( + self.visit(node.target), + self.visit(node.iter), + [self.visit(stmt) for stmt in node.body], + [self.visit(stmt) for stmt in node.orelse], + ), + node, + ) + + def visit_AsyncWith(self, node): + """ + Replaces `async with` with `with` + """ + return ast.copy_location( + ast.With( + [self.visit(item) for item in node.items], + [self.visit(stmt) for stmt in node.body], + ), + node, + ) + + def visit_AsyncFunctionDef(self, node): + """ + Replaces `async def` with `def` + """ + return ast.copy_location( + ast.FunctionDef( + node.name, + self.visit(node.args), + [self.visit(stmt) for stmt in node.body], + [self.visit(decorator) for decorator in node.decorator_list], + node.returns and self.visit(node.returns), + ), + node, + ) + + def visit_ListComp(self, node): + """ + Replaces `async for` with `for` in list comprehensions + """ + for generator in node.generators: + generator.is_async = False + return self.generic_visit(node) + + +class RmAioFunctions(ast.NodeTransformer): + """ + Visits all calls marked with CrossSync.rm_aio, and removes asyncio keywords + """ + RM_AIO_FN_NAME = "rm_aio" + RM_AIO_CLASS_NAME = "CrossSync" + + def __init__(self): + self.to_sync = AsyncToSync() + + def _is_rm_aio_call(self, node) -> bool: + """ + Check if a node is a CrossSync.rm_aio call + """ + if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute) and isinstance(node.func.value, ast.Name): + if node.func.attr == self.RM_AIO_FN_NAME and node.func.value.id == self.RM_AIO_CLASS_NAME: + return True + return False + + def visit_Call(self, node): + if self._is_rm_aio_call(node): + return self.visit(self.to_sync.visit(node.args[0])) + return self.generic_visit(node) + + def visit_AsyncWith(self, node): + """ + `async with` statements can contain multiple async context managers. + + If any of them contains a CrossSync.rm_aio statement, convert into standard `with` statement + """ + if any(self._is_rm_aio_call(item.context_expr) for item in node.items + ): + new_node = ast.copy_location( + ast.With( + [self.visit(item) for item in node.items], + [self.visit(stmt) for stmt in node.body], + ), + node, + ) + return self.generic_visit(new_node) + return self.generic_visit(node) + + def visit_AsyncFor(self, node): + """ + Async for statements are not fully wrapped by calls + """ + it = node.iter + if self._is_rm_aio_call(it): + return ast.copy_location( + ast.For( + self.visit(node.target), + self.visit(it), + [self.visit(stmt) for stmt in node.body], + [self.visit(stmt) for stmt in node.orelse], + ), + node, + ) + return self.generic_visit(node) + + +class StripAsyncConditionalBranches(ast.NodeTransformer): + """ + Visits all if statements in an AST, and removes branches marked with CrossSync.is_async + """ + + def visit_If(self, node): + """ + remove CrossSync.is_async branches from top-level if statements + """ + kept_branch = None + # check for CrossSync.is_async + if self._is_async_check(node.test): + kept_branch = node.orelse + # check for not CrossSync.is_async + elif isinstance(node.test, ast.UnaryOp) and isinstance(node.test.op, ast.Not) and self._is_async_check(node.test.operand): + kept_branch = node.body + if kept_branch is not None: + # only keep the statements in the kept branch + return [self.visit(n) for n in kept_branch] + else: + # keep the entire if statement + return self.generic_visit(node) + + def _is_async_check(self, node) -> bool: + """ + Check for CrossSync.is_async or CrossSync.is_async == True checks + """ + if isinstance(node, ast.Attribute): + # for CrossSync.is_async + return isinstance(node.value, ast.Name) and node.value.id == "CrossSync" and node.attr == "is_async" + elif isinstance(node, ast.Compare): + # for CrossSync.is_async == True + return self._is_async_check(node.left) and (isinstance(node.ops[0], ast.Eq) or isinstance(node.ops[0], ast.Is)) and len(node.comparators) == 1 and node.comparators[0].value == True + return False + + +class CrossSyncFileProcessor(ast.NodeTransformer): + """ + Visits a file, looking for __CROSS_SYNC_OUTPUT__ annotations + + If found, the file is processed with the following steps: + - Strip out asyncio keywords within CrossSync.rm_aio calls + - transform classes and methods annotated with CrossSync decorators + - statements behind CrossSync.is_async conditional branches are removed + - Replace remaining CrossSync statements with corresponding CrossSync._Sync_Impl calls + - save changes in an output file at path specified by __CROSS_SYNC_OUTPUT__ + """ + FILE_ANNOTATION = "__CROSS_SYNC_OUTPUT__" + + def get_output_path(self, node): + for n in node.body: + if isinstance(n, ast.Assign): + for target in n.targets: + if isinstance(target, ast.Name) and target.id == self.FILE_ANNOTATION: + # return the output path + return n.value.s.replace(".", "/") + ".py" + + def visit_Module(self, node): + # look for __CROSS_SYNC_OUTPUT__ Assign statement + output_path = self.get_output_path(node) + if output_path: + # if found, process the file + converted = self.generic_visit(node) + # strip out CrossSync.rm_aio calls + converted = RmAioFunctions().visit(converted) + # strip out CrossSync.is_async branches + converted = StripAsyncConditionalBranches().visit(converted) + # replace CrossSync statements + converted = SymbolReplacer({"CrossSync": "CrossSync._Sync_Impl"}).visit(converted) + return converted + else: + # not cross_sync file. Return None + return None + + def visit_ClassDef(self, node): + """ + Called for each class in file. If class has a CrossSync decorator, it will be transformed + according to the decorator arguments. Otherwise, class is returned unchanged + """ + orig_decorators = node.decorator_list + for decorator in orig_decorators: + try: + handler = AstDecorator.get_for_node(decorator) + # transformation is handled in sync_ast_transform method of the decorator + node = handler.sync_ast_transform(node, globals()) + except ValueError: + # not cross_sync decorator + continue + return self.generic_visit(node) if node else None + + def visit_Assign(self, node): + """ + strip out __CROSS_SYNC_OUTPUT__ assignments + """ + if isinstance(node.targets[0], ast.Name) and node.targets[0].id == self.FILE_ANNOTATION: + return None + return self.generic_visit(node) + + def visit_FunctionDef(self, node): + """ + Visit any sync methods marked with CrossSync decorators + """ + return self.visit_AsyncFunctionDef(node) + + def visit_AsyncFunctionDef(self, node): + """ + Visit and transform any async methods marked with CrossSync decorators + """ + try: + if hasattr(node, "decorator_list"): + found_list, node.decorator_list = node.decorator_list, [] + for decorator in found_list: + try: + handler = AstDecorator.get_for_node(decorator) + node = handler.sync_ast_transform(node, globals()) + if node is None: + return None + # recurse to any nested functions + node = self.generic_visit(node) + except ValueError: + # keep unknown decorators + node.decorator_list.append(decorator) + continue + return self.generic_visit(node) + except ValueError as e: + raise ValueError(f"node {node.name} failed") from e diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/__init__.py new file mode 100644 index 000000000000..77a9ddae9d38 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .cross_sync import CrossSync + + +__all__ = [ + "CrossSync", +] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py new file mode 100644 index 000000000000..f37b05b64ed7 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py @@ -0,0 +1,441 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Contains a set of AstDecorator classes, which define the behavior of CrossSync decorators. +Each AstDecorator class is used through @CrossSync. +""" +from __future__ import annotations +from typing import TYPE_CHECKING, Iterable + +if TYPE_CHECKING: + import ast + from typing import Callable, Any + + +class AstDecorator: + """ + Helper class for CrossSync decorators used for guiding ast transformations. + + AstDecorators are accessed in two ways: + 1. The decorations are used directly as method decorations in the async client, + wrapping existing classes and methods + 2. The decorations are read back when processing the AST transformations when + generating sync code. + + This class allows the same decorator to be used in both contexts. + + Typically, AstDecorators act as a no-op in async code, and the arguments simply + provide configuration guidance for the sync code generation. + """ + + @classmethod + def decorator(cls, *args, **kwargs) -> Callable[..., Any]: + """ + Provides a callable that can be used as a decorator function in async code + + AstDecorator.decorate is called by CrossSync when attaching decorators to + the CrossSync class. + + This method creates a new instance of the class, using the arguments provided + to the decorator, and defers to the async_decorator method of the instance + to build the wrapper function. + + Arguments: + *args: arguments to the decorator + **kwargs: keyword arguments to the decorator + """ + # decorators with no arguments will provide the function to be wrapped + # as the first argument. Pull it out if it exists + func = None + if len(args) == 1 and callable(args[0]): + func = args[0] + args = args[1:] + # create new AstDecorator instance from given decorator arguments + new_instance = cls(*args, **kwargs) + # build wrapper + wrapper = new_instance.async_decorator() + if wrapper is None: + # if no wrapper, return no-op decorator + return func or (lambda f: f) + elif func: + # if we can, return single wrapped function + return wrapper(func) + else: + # otherwise, return decorator function + return wrapper + + def async_decorator(self) -> Callable[..., Any] | None: + """ + Decorator to apply the async_impl decorator to the wrapped function + + Default implementation is a no-op + """ + return None + + def sync_ast_transform( + self, wrapped_node: ast.AST, transformers_globals: dict[str, Any] + ) -> ast.AST | None: + """ + When this decorator is encountered in the ast during sync generation, this method is called + to transform the wrapped node. + + If None is returned, the node will be dropped from the output file. + + Args: + wrapped_node: ast node representing the wrapped function or class that is being wrapped + transformers_globals: the set of globals() from the transformers module. This is used to access + ast transformer classes that live outside the main codebase + Returns: + transformed ast node, or None if the node should be dropped + """ + return wrapped_node + + @classmethod + def get_for_node(cls, node: ast.Call | ast.Attribute | ast.Name) -> "AstDecorator": + """ + Build an AstDecorator instance from an ast decorator node + + The right subclass is found by comparing the string representation of the + decorator name to the class name. (Both names are converted to lowercase and + underscores are removed for comparison). If a matching subclass is found, + a new instance is created with the provided arguments. + + Args: + node: ast.Call node representing the decorator + Returns: + AstDecorator instance corresponding to the decorator + Raises: + ValueError: if the decorator cannot be parsed + """ + import ast + + # expect decorators in format @CrossSync. + # (i.e. should be an ast.Call or an ast.Attribute) + root_attr = node.func if isinstance(node, ast.Call) else node + if not isinstance(root_attr, ast.Attribute): + raise ValueError("Unexpected decorator format") + # extract the module and decorator names + if "CrossSync" in ast.dump(root_attr): + decorator_name = root_attr.attr + got_kwargs = ( + {kw.arg: cls._convert_ast_to_py(kw.value) for kw in node.keywords} + if hasattr(node, "keywords") + else {} + ) + got_args = ( + [cls._convert_ast_to_py(arg) for arg in node.args] + if hasattr(node, "args") + else [] + ) + # convert to standardized representation + formatted_name = decorator_name.replace("_", "").lower() + for subclass in cls.get_subclasses(): + if subclass.__name__.lower() == formatted_name: + return subclass(*got_args, **got_kwargs) + raise ValueError(f"Unknown decorator encountered: {decorator_name}") + else: + raise ValueError("Not a CrossSync decorator") + + @classmethod + def get_subclasses(cls) -> Iterable[type["AstDecorator"]]: + """ + Get all subclasses of AstDecorator + + Returns: + list of all subclasses of AstDecorator + """ + for subclass in cls.__subclasses__(): + yield from subclass.get_subclasses() + yield subclass + + @classmethod + def _convert_ast_to_py(cls, ast_node: ast.expr | None) -> Any: + """ + Helper to convert ast primitives to python primitives. Used when unwrapping arguments + """ + import ast + + if ast_node is None: + return None + if isinstance(ast_node, ast.Constant): + return ast_node.value + if isinstance(ast_node, ast.List): + return [cls._convert_ast_to_py(node) for node in ast_node.elts] + if isinstance(ast_node, ast.Tuple): + return tuple(cls._convert_ast_to_py(node) for node in ast_node.elts) + if isinstance(ast_node, ast.Dict): + return { + cls._convert_ast_to_py(k): cls._convert_ast_to_py(v) + for k, v in zip(ast_node.keys, ast_node.values) + } + raise ValueError(f"Unsupported type {type(ast_node)}") + + +class ConvertClass(AstDecorator): + """ + Class decorator for guiding generation of sync classes + + Args: + sync_name: use a new name for the sync class + replace_symbols: a dict of symbols and replacements to use when generating sync class + docstring_format_vars: a dict of variables to replace in the docstring + rm_aio: if True, automatically strip all asyncio keywords from method. If false, + only keywords wrapped in CrossSync.rm_aio() calls to be removed. + add_mapping_for_name: when given, will add a new attribute to CrossSync, + so the original class and its sync version can be accessed from CrossSync. + """ + + def __init__( + self, + sync_name: str | None = None, + *, + replace_symbols: dict[str, str] | None = None, + docstring_format_vars: dict[str, tuple[str | None, str | None]] | None = None, + rm_aio: bool = False, + add_mapping_for_name: str | None = None, + ): + self.sync_name = sync_name + self.replace_symbols = replace_symbols + docstring_format_vars = docstring_format_vars or {} + self.async_docstring_format_vars = { + k: v[0] or "" for k, v in docstring_format_vars.items() + } + self.sync_docstring_format_vars = { + k: v[1] or "" for k, v in docstring_format_vars.items() + } + self.rm_aio = rm_aio + self.add_mapping_for_name = add_mapping_for_name + + def async_decorator(self): + """ + Use async decorator as a hook to update CrossSync mappings + """ + from .cross_sync import CrossSync + + if not self.add_mapping_for_name and not self.async_docstring_format_vars: + # return None if no changes needed + return None + + new_mapping = self.add_mapping_for_name + + def decorator(cls): + if new_mapping: + CrossSync.add_mapping(new_mapping, cls) + if self.async_docstring_format_vars: + cls.__doc__ = cls.__doc__.format(**self.async_docstring_format_vars) + return cls + + return decorator + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + Transform async class into sync copy + """ + import ast + import copy + + # copy wrapped node + wrapped_node = copy.deepcopy(wrapped_node) + # update name + if self.sync_name: + wrapped_node.name = self.sync_name + # strip CrossSync decorators + if hasattr(wrapped_node, "decorator_list"): + wrapped_node.decorator_list = [ + d for d in wrapped_node.decorator_list if "CrossSync" not in ast.dump(d) + ] + else: + wrapped_node.decorator_list = [] + # strip async keywords if specified + if self.rm_aio: + wrapped_node = transformers_globals["AsyncToSync"]().visit(wrapped_node) + # add mapping decorator if needed + if self.add_mapping_for_name: + wrapped_node.decorator_list.append( + ast.Call( + func=ast.Attribute( + value=ast.Name(id="CrossSync", ctx=ast.Load()), + attr="add_mapping_decorator", + ctx=ast.Load(), + ), + args=[ + ast.Constant(value=self.add_mapping_for_name), + ], + keywords=[], + ) + ) + # replace symbols if specified + if self.replace_symbols: + wrapped_node = transformers_globals["SymbolReplacer"]( + self.replace_symbols + ).visit(wrapped_node) + # update docstring if specified + if self.sync_docstring_format_vars: + docstring = ast.get_docstring(wrapped_node) + if docstring: + wrapped_node.body[0].value = ast.Constant( + value=docstring.format(**self.sync_docstring_format_vars) + ) + return wrapped_node + + +class Convert(ConvertClass): + """ + Method decorator to mark async methods to be converted to sync methods + + Args: + sync_name: use a new name for the sync method + replace_symbols: a dict of symbols and replacements to use when generating sync method + docstring_format_vars: a dict of variables to replace in the docstring + rm_aio: if True, automatically strip all asyncio keywords from method. If False, + only the signature `async def` is stripped. Other keywords must be wrapped in + CrossSync.rm_aio() calls to be removed. + """ + + def __init__( + self, + sync_name: str | None = None, + *, + replace_symbols: dict[str, str] | None = None, + docstring_format_vars: dict[str, tuple[str | None, str | None]] | None = None, + rm_aio: bool = True, + ): + super().__init__( + sync_name=sync_name, + replace_symbols=replace_symbols, + docstring_format_vars=docstring_format_vars, + rm_aio=rm_aio, + add_mapping_for_name=None, + ) + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + Transform async method into sync + """ + import ast + + # replace async function with sync function + converted = ast.copy_location( + ast.FunctionDef( + wrapped_node.name, + wrapped_node.args, + wrapped_node.body, + wrapped_node.decorator_list + if hasattr(wrapped_node, "decorator_list") + else [], + wrapped_node.returns if hasattr(wrapped_node, "returns") else None, + ), + wrapped_node, + ) + # transform based on arguments + return super().sync_ast_transform(converted, transformers_globals) + + +class Drop(AstDecorator): + """ + Method decorator to drop methods or classes from the sync output + """ + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + Drop from sync output + """ + return None + + +class Pytest(AstDecorator): + """ + Used in place of pytest.mark.asyncio to mark tests + + When generating sync version, also runs rm_aio to remove async keywords from + entire test function + + Args: + rm_aio: if True, automatically strip all asyncio keywords from test code. + Defaults to True, to simplify test code generation. + """ + + def __init__(self, rm_aio=True): + self.rm_aio = rm_aio + + def async_decorator(self): + import pytest + + return pytest.mark.asyncio + + def sync_ast_transform(self, wrapped_node, transformers_globals): + """ + convert async to sync + """ + import ast + + # always convert method to sync + converted = ast.copy_location( + ast.FunctionDef( + wrapped_node.name, + wrapped_node.args, + wrapped_node.body, + wrapped_node.decorator_list + if hasattr(wrapped_node, "decorator_list") + else [], + wrapped_node.returns if hasattr(wrapped_node, "returns") else None, + ), + wrapped_node, + ) + # convert entire body to sync if rm_aio is set + if self.rm_aio: + converted = transformers_globals["AsyncToSync"]().visit(converted) + return converted + + +class PytestFixture(AstDecorator): + """ + Used in place of pytest.fixture or pytest.mark.asyncio to mark fixtures + + Args: + *args: all arguments to pass to pytest.fixture + **kwargs: all keyword arguments to pass to pytest.fixture + """ + + def __init__(self, *args, **kwargs): + self._args = args + self._kwargs = kwargs + + def async_decorator(self): + import pytest_asyncio # type: ignore + + return lambda f: pytest_asyncio.fixture(*self._args, **self._kwargs)(f) + + def sync_ast_transform(self, wrapped_node, transformers_globals): + import ast + import copy + + new_node = copy.deepcopy(wrapped_node) + if not hasattr(new_node, "decorator_list"): + new_node.decorator_list = [] + new_node.decorator_list.append( + ast.Call( + func=ast.Attribute( + value=ast.Name(id="pytest", ctx=ast.Load()), + attr="fixture", + ctx=ast.Load(), + ), + args=[ast.Constant(value=a) for a in self._args], + keywords=[ + ast.keyword(arg=k, value=ast.Constant(value=v)) + for k, v in self._kwargs.items() + ], + ) + ) + return new_node diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py new file mode 100644 index 000000000000..5312708ccc46 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_mapping_meta.py @@ -0,0 +1,64 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations +from typing import Any + + +class MappingMeta(type): + """ + Metaclass to provide add_mapping functionality, allowing users to add + custom attributes to derived classes at runtime. + + Using a metaclass allows us to share functionality between CrossSync + and CrossSync._Sync_Impl, and it works better with mypy checks than + monkypatching + """ + + # list of attributes that can be added to the derived class at runtime + _runtime_replacements: dict[tuple[MappingMeta, str], Any] = {} + + def add_mapping(cls: MappingMeta, name: str, value: Any): + """ + Add a new attribute to the class, for replacing library-level symbols + + Raises: + - AttributeError if the attribute already exists with a different value + """ + key = (cls, name) + old_value = cls._runtime_replacements.get(key) + if old_value is None: + cls._runtime_replacements[key] = value + elif old_value != value: + raise AttributeError(f"Conflicting assignments for CrossSync.{name}") + + def add_mapping_decorator(cls: MappingMeta, name: str): + """ + Exposes add_mapping as a class decorator + """ + + def decorator(wrapped_cls): + cls.add_mapping(name, wrapped_cls) + return wrapped_cls + + return decorator + + def __getattr__(cls: MappingMeta, name: str): + """ + Retrieve custom attributes + """ + key = (cls, name) + found = cls._runtime_replacements.get(key) + if found is not None: + return found + raise AttributeError(f"CrossSync has no attribute {name}") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/cross_sync.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/cross_sync.py new file mode 100644 index 000000000000..1f1ee111aee9 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/cross_sync.py @@ -0,0 +1,334 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +CrossSync provides a toolset for sharing logic between async and sync codebases, including: +- A set of decorators for annotating async classes and functions + (@CrossSync.export_sync, @CrossSync.convert, @CrossSync.drop_method, ...) +- A set of wrappers to wrap common objects and types that have corresponding async and sync implementations + (CrossSync.Queue, CrossSync.Condition, CrossSync.Future, ...) +- A set of function implementations for common async operations that can be used in both async and sync codebases + (CrossSync.gather_partials, CrossSync.wait, CrossSync.condition_wait, ...) +- CrossSync.rm_aio(), which is used to annotate regions of the code containing async keywords to strip + +A separate module will use CrossSync annotations to generate a corresponding sync +class based on a decorated async class. + +Usage Example: +```python +@CrossSync.export_sync(path="path/to/sync_module.py") + + @CrossSync.convert + async def async_func(self, arg: int) -> int: + await CrossSync.sleep(1) + return arg +``` +""" + +from __future__ import annotations + +from typing import ( + TypeVar, + Any, + Callable, + Coroutine, + Sequence, + Union, + AsyncIterable, + AsyncIterator, + AsyncGenerator, + TYPE_CHECKING, +) +import typing + +import asyncio +import sys +import concurrent.futures +import google.api_core.retry as retries +import queue +import threading +import time +from ._decorators import ( + ConvertClass, + Convert, + Drop, + Pytest, + PytestFixture, +) +from ._mapping_meta import MappingMeta + +if TYPE_CHECKING: + from typing_extensions import TypeAlias + +T = TypeVar("T") + + +class CrossSync(metaclass=MappingMeta): + # support CrossSync.is_async to check if the current environment is async + is_async = True + + # provide aliases for common async functions and types + sleep = asyncio.sleep + retry_target = retries.retry_target_async + retry_target_stream = retries.retry_target_stream_async + Retry = retries.AsyncRetry + Queue: TypeAlias = asyncio.Queue + Condition: TypeAlias = asyncio.Condition + Future: TypeAlias = asyncio.Future + Task: TypeAlias = asyncio.Task + Event: TypeAlias = asyncio.Event + Semaphore: TypeAlias = asyncio.Semaphore + StopIteration: TypeAlias = StopAsyncIteration + # provide aliases for common async type annotations + Awaitable: TypeAlias = typing.Awaitable + Iterable: TypeAlias = AsyncIterable + Iterator: TypeAlias = AsyncIterator + Generator: TypeAlias = AsyncGenerator + + # decorators + convert_class = ConvertClass.decorator # decorate classes to convert + convert = Convert.decorator # decorate methods to convert from async to sync + drop = Drop.decorator # decorate methods to remove from sync version + pytest = Pytest.decorator # decorate test methods to run with pytest-asyncio + pytest_fixture = ( + PytestFixture.decorator + ) # decorate test methods to run with pytest fixture + + @classmethod + def next(cls, iterable): + return iterable.__anext__() + + @classmethod + def Mock(cls, *args, **kwargs): + """ + Alias for AsyncMock, importing at runtime to avoid hard dependency on mock + """ + try: + from unittest.mock import AsyncMock # type: ignore + except ImportError: # pragma: NO COVER + from mock import AsyncMock # type: ignore + return AsyncMock(*args, **kwargs) + + @staticmethod + async def gather_partials( + partial_list: Sequence[Callable[[], Awaitable[T]]], + return_exceptions: bool = False, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + ) -> list[T | BaseException]: + """ + abstraction over asyncio.gather, but with a set of partial functions instead + of coroutines, to work with sync functions. + To use gather with a set of futures instead of partials, use CrpssSync.wait + + In the async version, the partials are expected to return an awaitable object. Patials + are unpacked and awaited in the gather call. + + Sync version implemented with threadpool executor + + Returns: + - a list of results (or exceptions, if return_exceptions=True) in the same order as partial_list + """ + if not partial_list: + return [] + awaitable_list = [partial() for partial in partial_list] + return await asyncio.gather( + *awaitable_list, return_exceptions=return_exceptions + ) + + @staticmethod + async def wait( + futures: Sequence[CrossSync.Future[T]], timeout: float | None = None + ) -> tuple[set[CrossSync.Future[T]], set[CrossSync.Future[T]]]: + """ + abstraction over asyncio.wait + + Return: + - a tuple of (done, pending) sets of futures + """ + if not futures: + return set(), set() + return await asyncio.wait(futures, timeout=timeout) + + @staticmethod + async def event_wait( + event: CrossSync.Event, + timeout: float | None = None, + async_break_early: bool = True, + ) -> None: + """ + abstraction over asyncio.Event.wait + + Args: + - event: event to wait for + - timeout: if set, will break out early after `timeout` seconds + - async_break_early: if False, the async version will wait for + the full timeout even if the event is set before the timeout. + This avoids creating a new background task + """ + if timeout is None: + await event.wait() + elif not async_break_early: + if not event.is_set(): + await asyncio.sleep(timeout) + else: + try: + await asyncio.wait_for(event.wait(), timeout=timeout) + except asyncio.TimeoutError: + pass + + @staticmethod + def create_task( + fn: Callable[..., Coroutine[Any, Any, T]], + *fn_args, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + task_name: str | None = None, + **fn_kwargs, + ) -> CrossSync.Task[T]: + """ + abstraction over asyncio.create_task. Sync version implemented with threadpool executor + + sync_executor: ThreadPoolExecutor to use for sync operations. Ignored in async version + """ + task: CrossSync.Task[T] = asyncio.create_task(fn(*fn_args, **fn_kwargs)) + if task_name and sys.version_info >= (3, 8): + task.set_name(task_name) + return task + + @staticmethod + async def yield_to_event_loop() -> None: + """ + Call asyncio.sleep(0) to yield to allow other tasks to run + """ + await asyncio.sleep(0) + + @staticmethod + def verify_async_event_loop() -> None: + """ + Raises RuntimeError if the event loop is not running + """ + asyncio.get_running_loop() + + @staticmethod + def rm_aio(statement: T) -> T: + """ + Used to annotate regions of the code containing async keywords to strip + + All async keywords inside an rm_aio call are removed, along with + `async with` and `async for` statements containing CrossSync.rm_aio() in the body + """ + return statement + + class _Sync_Impl(metaclass=MappingMeta): + """ + Provide sync versions of the async functions and types in CrossSync + """ + + is_async = False + + sleep = time.sleep + next = next + retry_target = retries.retry_target + retry_target_stream = retries.retry_target_stream + Retry = retries.Retry + Queue: TypeAlias = queue.Queue + Condition: TypeAlias = threading.Condition + Future: TypeAlias = concurrent.futures.Future + Task: TypeAlias = concurrent.futures.Future + Event: TypeAlias = threading.Event + Semaphore: TypeAlias = threading.Semaphore + StopIteration: TypeAlias = StopIteration + # type annotations + Awaitable: TypeAlias = Union[T] + Iterable: TypeAlias = typing.Iterable + Iterator: TypeAlias = typing.Iterator + Generator: TypeAlias = typing.Generator + + @classmethod + def Mock(cls, *args, **kwargs): + from unittest.mock import Mock + + return Mock(*args, **kwargs) + + @staticmethod + def event_wait( + event: CrossSync._Sync_Impl.Event, + timeout: float | None = None, + async_break_early: bool = True, + ) -> None: + event.wait(timeout=timeout) + + @staticmethod + def gather_partials( + partial_list: Sequence[Callable[[], T]], + return_exceptions: bool = False, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + ) -> list[T | BaseException]: + if not partial_list: + return [] + if not sync_executor: + raise ValueError("sync_executor is required for sync version") + futures_list = [sync_executor.submit(partial) for partial in partial_list] + results_list: list[T | BaseException] = [] + for future in futures_list: + found_exc = future.exception() + if found_exc is not None: + if return_exceptions: + results_list.append(found_exc) + else: + raise found_exc + else: + results_list.append(future.result()) + return results_list + + @staticmethod + def wait( + futures: Sequence[CrossSync._Sync_Impl.Future[T]], + timeout: float | None = None, + ) -> tuple[ + set[CrossSync._Sync_Impl.Future[T]], set[CrossSync._Sync_Impl.Future[T]] + ]: + if not futures: + return set(), set() + return concurrent.futures.wait(futures, timeout=timeout) + + @staticmethod + def create_task( + fn: Callable[..., T], + *fn_args, + sync_executor: concurrent.futures.ThreadPoolExecutor | None = None, + task_name: str | None = None, + **fn_kwargs, + ) -> CrossSync._Sync_Impl.Task[T]: + """ + abstraction over asyncio.create_task. Sync version implemented with threadpool executor + + sync_executor: ThreadPoolExecutor to use for sync operations. Ignored in async version + """ + if not sync_executor: + raise ValueError("sync_executor is required for sync version") + return sync_executor.submit(fn, *fn_args, **fn_kwargs) + + @staticmethod + def yield_to_event_loop() -> None: + """ + No-op for sync version + """ + pass + + @staticmethod + def verify_async_event_loop() -> None: + """ + No-op for sync version + """ + pass diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 5fb94526dbea..1e153efe2286 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -48,7 +48,7 @@ UNIT_TEST_EXTRAS: List[str] = [] UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} -SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8"] +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8", "3.12"] SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ "mock", "pytest", @@ -56,6 +56,8 @@ ] SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [ "pytest-asyncio==0.21.2", + "black==23.7.0", + "pyyaml==6.0.2", ] SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] SYSTEM_TEST_DEPENDENCIES: List[str] = [] @@ -256,7 +258,7 @@ def install_systemtest_dependencies(session, *constraints): session.install("-e", ".", *constraints) -@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +@nox.session(python="3.8") def system_emulated(session): import subprocess import signal diff --git a/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/async_to_sync.yaml b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/async_to_sync.yaml new file mode 100644 index 000000000000..99d39cbc51d9 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/async_to_sync.yaml @@ -0,0 +1,76 @@ +tests: + - description: "async for loop fn" + before: | + async def func_name(): + async for i in range(10): + await routine() + return 42 + transformers: [AsyncToSync] + after: | + def func_name(): + for i in range(10): + routine() + return 42 + + - description: "async with statement" + before: | + async def func_name(): + async with context_manager() as cm: + await do_something(cm) + transformers: [AsyncToSync] + after: | + def func_name(): + with context_manager() as cm: + do_something(cm) + + - description: "async function definition" + before: | + async def async_function(param1, param2): + result = await some_coroutine() + return result + transformers: [AsyncToSync] + after: | + def async_function(param1, param2): + result = some_coroutine() + return result + + - description: "list comprehension with async for" + before: | + async def func_name(): + result = [x async for x in aiter() if await predicate(x)] + transformers: [AsyncToSync] + after: | + def func_name(): + result = [x for x in aiter() if predicate(x)] + + - description: "multiple async features in one function" + before: | + async def complex_function(): + async with resource_manager() as res: + async for item in res.items(): + if await check(item): + yield await process(item) + transformers: [AsyncToSync] + after: | + def complex_function(): + with resource_manager() as res: + for item in res.items(): + if check(item): + yield process(item) + + - description: "nested async constructs" + before: | + async def nested_async(): + async with outer_context(): + async for x in outer_iter(): + async with inner_context(x): + async for y in inner_iter(x): + await process(x, y) + transformers: [AsyncToSync] + after: | + def nested_async(): + with outer_context(): + for x in outer_iter(): + with inner_context(x): + for y in inner_iter(x): + process(x, y) diff --git a/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/cross_sync_files.yaml b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/cross_sync_files.yaml new file mode 100644 index 000000000000..5666325cea03 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/cross_sync_files.yaml @@ -0,0 +1,469 @@ +tests: + - description: "No output annotation" + before: | + class MyAsyncClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: null + + - description: "CrossSync.convert_class with default sync_name" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class + class MyClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + async def my_method(self): + pass + + - description: "CrossSync.convert_class with custom sync_name" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + class MyAsyncClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + async def my_method(self): + pass + + - description: "CrossSync.convert_class with replace_symbols" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class( + sync_name="MyClass", + replace_symbols={"AsyncBase": "SyncBase", "ParentA": "ParentB"} + ) + class MyAsyncClass(ParentA): + def __init__(self, base: AsyncBase): + self.base = base + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass(ParentB): + + def __init__(self, base: SyncBase): + self.base = base + + - description: "CrossSync.convert_class with docstring formatting" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class( + sync_name="MyClass", + docstring_format_vars={"type": ("async", "sync")} + ) + class MyAsyncClass: + """This is a {type} class.""" + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + """This is a sync class.""" + + - description: "CrossSync.convert_class with multiple decorators and methods" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + @some_other_decorator + class MyAsyncClass: + @CrossSync.convert(rm_aio=False) + async def my_method(self): + async with self.base.connection(): + return await self.base.my_method() + + @CrossSync.drop + async def async_only_method(self): + await self.async_operation() + + def sync_method(self): + return "This method stays the same" + + @CrossSync.pytest_fixture + def fixture(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + @some_other_decorator + class MyClass: + + def my_method(self): + async with self.base.connection(): + return await self.base.my_method() + + def sync_method(self): + return "This method stays the same" + + @pytest.fixture() + def fixture(self): + pass + + - description: "CrossSync.convert_class with nested classes drop" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + class MyAsyncClass: + @CrossSync.drop + class NestedAsyncClass: + async def nested_method(self, base: AsyncBase): + pass + + @CrossSync.convert + async def use_nested(self): + nested = self.NestedAsyncClass() + CrossSync.rm_aio(await nested.nested_method()) + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + def use_nested(self): + nested = self.NestedAsyncClass() + nested.nested_method() + + - description: "CrossSync.convert_class with nested classes explicit" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass", replace_symbols={"AsyncBase": "SyncBase"}) + class MyAsyncClass: + @CrossSync.convert_class + class NestedClass: + async def nested_method(self, base: AsyncBase): + pass + + @CrossSync.convert + async def use_nested(self): + nested = self.NestedAsyncClass() + CrossSync.rm_aio(await nested.nested_method()) + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + class NestedClass: + + async def nested_method(self, base: SyncBase): + pass + + def use_nested(self): + nested = self.NestedAsyncClass() + nested.nested_method() + + - description: "CrossSync.convert_class with nested classes implicit" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass", replace_symbols={"AsyncBase": "SyncBase"}) + class MyAsyncClass: + + class NestedClass: + async def nested_method(self, base: AsyncBase): + pass + + @CrossSync.convert + async def use_nested(self): + nested = self.NestedAsyncClass() + CrossSync.rm_aio(await nested.nested_method()) + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + class NestedClass: + + async def nested_method(self, base: SyncBase): + pass + + def use_nested(self): + nested = self.NestedAsyncClass() + nested.nested_method() + + - description: "CrossSync.convert_class with add_mapping" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class( + sync_name="MyClass", + add_mapping_for_name="MyClass" + ) + class MyAsyncClass: + async def my_method(self): + pass + + transformers: + - name: CrossSyncFileProcessor + after: | + @CrossSync._Sync_Impl.add_mapping_decorator("MyClass") + class MyClass: + + async def my_method(self): + pass + + - description: "CrossSync.convert_class with rm_aio" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(rm_aio=True) + class MyClass: + async def my_method(self): + async for item in self.items: + await self.process(item) + transformers: [CrossSyncFileProcessor] + after: | + class MyClass: + + def my_method(self): + for item in self.items: + self.process(item) + + - description: "CrossSync.convert_class with CrossSync calls" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class(sync_name="MyClass") + class MyAsyncClass: + @CrossSync.convert + async def my_method(self): + async with CrossSync.rm_aio(CrossSync.Condition()) as c: + CrossSync.rm_aio(await CrossSync.yield_to_event_loop()) + + transformers: + - name: CrossSyncFileProcessor + after: | + class MyClass: + + def my_method(self): + with CrossSync._Sync_Impl.Condition() as c: + CrossSync._Sync_Impl.yield_to_event_loop() + + - description: "Convert async method with @CrossSync.convert" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert + async def my_method(self, arg): + pass + transformers: [CrossSyncFileProcessor] + after: | + def my_method(self, arg): + pass + + - description: "Convert async method with custom sync name" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(sync_name="sync_method") + async def async_method(self, arg): + return await self.helper(arg) + transformers: [CrossSyncFileProcessor] + after: | + def sync_method(self, arg): + return self.helper(arg) + + - description: "Convert async method with rm_aio=True" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(rm_aio=True) + async def async_method(self): + async with self.lock: + async for item in self.items: + await self.process(item) + transformers: [CrossSyncFileProcessor] + after: | + def async_method(self): + with self.lock: + for item in self.items: + self.process(item) + + - description: "Drop method from sync version" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + def keep_method(self): + pass + + @CrossSync.drop + async def async_only_method(self): + await self.async_operation() + transformers: [CrossSyncFileProcessor] + after: | + def keep_method(self): + pass + + - description: "Drop class from sync version" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.drop + class DropMe: + pass + class Keeper: + pass + transformers: [CrossSyncFileProcessor] + after: | + class Keeper: + pass + + - description: "Convert.pytest" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest + async def test_async_function(): + result = await async_operation() + assert result == expected_value + transformers: [CrossSyncFileProcessor] + after: | + def test_async_function(): + result = async_operation() + assert result == expected_value + + - description: "CrossSync.pytest with rm_aio=False" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest(rm_aio=False) + async def test_partial_async(): + async with context_manager(): + result = await async_function() + assert result == expected_value + transformers: [CrossSyncFileProcessor] + after: | + def test_partial_async(): + async with context_manager(): + result = await async_function() + assert result == expected_value + + - description: "Convert async pytest fixture" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest_fixture + @CrossSync.convert(rm_aio=True) + async def my_fixture(): + resource = await setup_resource() + yield resource + await cleanup_resource(resource) + transformers: [CrossSyncFileProcessor] + after: | + @pytest.fixture() + def my_fixture(): + resource = setup_resource() + yield resource + cleanup_resource(resource) + + - description: "Convert pytest fixture with custom parameters" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.pytest_fixture(scope="module", autouse=True) + def my_fixture(): + resource = setup_resource() + yield resource + cleanup_resource(resource) + transformers: [CrossSyncFileProcessor] + after: | + @pytest.fixture(scope="module", autouse=True) + def my_fixture(): + resource = setup_resource() + yield resource + cleanup_resource(resource) + + - description: "Convert method with multiple stacked decorators" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(sync_name="sync_multi_decorated") + @CrossSync.pytest + @some_other_decorator + async def async_multi_decorated(self, arg): + result = await self.async_operation(arg) + return result + transformers: [CrossSyncFileProcessor] + after: | + @some_other_decorator + def sync_multi_decorated(self, arg): + result = self.async_operation(arg) + return result + + - description: "Convert method with multiple stacked decorators in class" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert_class + class MyClass: + @CrossSync.convert(sync_name="sync_multi_decorated") + @CrossSync.pytest + @some_other_decorator + async def async_multi_decorated(self, arg): + result = await self.async_operation(arg) + return result + transformers: [CrossSyncFileProcessor] + after: | + class MyClass: + + @some_other_decorator + def sync_multi_decorated(self, arg): + result = self.async_operation(arg) + return result + + - description: "Convert method with stacked decorators including rm_aio" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + @CrossSync.convert(rm_aio=True) + @CrossSync.pytest_fixture(scope="function") + @another_decorator + async def async_fixture_with_context(): + async with some_async_context(): + resource = await setup_async_resource() + yield resource + await cleanup_async_resource(resource) + transformers: [CrossSyncFileProcessor] + after: | + @pytest.fixture(scope="function") + @another_decorator + def async_fixture_with_context(): + with some_async_context(): + resource = setup_async_resource() + yield resource + cleanup_async_resource(resource) + + - description: "Handle CrossSync.is_async conditional" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + if CrossSync.is_async: + import a + else: + import b + + def my_method(self): + if CrossSync.is_async: + return "async version" + else: + return "sync version" + transformers: [CrossSyncFileProcessor] + after: | + import b + + def my_method(self): + return "sync version" + + - description: "Replace CrossSync symbols" + before: | + __CROSS_SYNC_OUTPUT__ = "out.path" + CrossSync.sleep(1) + @CrossSync.convert_class + class MyClass: + event = CrossSync.Event() + def my_method(self): + return CrossSync.some_function() + transformers: [CrossSyncFileProcessor] + after: | + CrossSync._Sync_Impl.sleep(1) + class MyClass: + event = CrossSync._Sync_Impl.Event() + def my_method(self): + return CrossSync._Sync_Impl.some_function() diff --git a/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/rm_aio.yaml b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/rm_aio.yaml new file mode 100644 index 000000000000..89acda630d80 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/rm_aio.yaml @@ -0,0 +1,109 @@ +tests: + - description: "remove await" + before: | + CrossSync.rm_aio(await routine()) + transformers: [RmAioFunctions] + after: | + routine() + - description: "async for loop fn" + before: | + async def func_name(): + async for i in CrossSync.rm_aio(range(10)): + await routine() + return 42 + transformers: [RmAioFunctions] + after: | + async def func_name(): + for i in range(10): + await routine() + return 42 + + - description: "async with statement" + before: | + async def func_name(): + async with CrossSync.rm_aio(context_manager()) as cm: + await do_something(cm) + transformers: [RmAioFunctions] + after: | + async def func_name(): + with context_manager() as cm: + await do_something(cm) + + - description: "list comprehension with async for" + before: | + async def func_name(): + result = CrossSync.rm_aio([x async for x in aiter() if await predicate(x)]) + transformers: [RmAioFunctions] + after: | + async def func_name(): + result = [x for x in aiter() if predicate(x)] + + - description: "multiple async features in one call" + before: | + CrossSync.rm_aio([x async for x in aiter() if await predicate(x)] + await routine()) + transformers: [RmAioFunctions] + after: | + [x for x in aiter() if predicate(x)] + routine() + + - description: "do nothing with no CrossSync.rm_aio" + before: | + async def nested_async(): + async with outer_context(): + async for x in outer_iter(): + async with inner_context(x): + async for y in inner_iter(x): + await process(x, y) + transformers: [RmAioFunctions] + after: | + async def nested_async(): + async with outer_context(): + async for x in outer_iter(): + async with inner_context(x): + async for y in inner_iter(x): + await process(x, y) + + - description: "nested async for loops with rm_aio" + before: | + async def nested_loops(): + async for x in CrossSync.rm_aio(outer_iter()): + async for y in CrossSync.rm_aio(inner_iter(x)): + await process(x, y) + transformers: [RmAioFunctions] + after: | + async def nested_loops(): + for x in outer_iter(): + for y in inner_iter(x): + await process(x, y) + + - description: "async generator function with rm_aio" + before: | + async def async_gen(): + yield CrossSync.rm_aio(await async_value()) + async for item in CrossSync.rm_aio(async_iterator()): + yield item + transformers: [RmAioFunctions] + after: | + async def async_gen(): + yield async_value() + for item in async_iterator(): + yield item + + - description: "async with statement with multiple context managers" + before: | + async def multi_context(): + async with CrossSync.rm_aio(cm1()), CrossSync.rm_aio(cm2()) as c2, CrossSync.rm_aio(cm3()) as c3: + await do_something(c2, c3) + transformers: [RmAioFunctions] + after: | + async def multi_context(): + with cm1(), cm2() as c2, cm3() as c3: + await do_something(c2, c3) + + - description: "async comprehension with multiple async for and if clauses" + before: | + async def complex_comprehension(): + result = CrossSync.rm_aio([x async for x in aiter1() if await pred1(x) async for y in aiter2(x) if await pred2(y)]) + transformers: [RmAioFunctions] + after: | + async def complex_comprehension(): + result = [x for x in aiter1() if pred1(x) for y in aiter2(x) if pred2(y)] diff --git a/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/strip_async_conditional_branches.yaml b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/strip_async_conditional_branches.yaml new file mode 100644 index 000000000000..0c192fb37ed6 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/strip_async_conditional_branches.yaml @@ -0,0 +1,74 @@ +tests: + - description: "top level conditional" + before: | + if CrossSync.is_async: + print("async") + else: + print("sync") + transformers: [StripAsyncConditionalBranches] + after: | + print("sync") + - description: "nested conditional" + before: | + if CrossSync.is_async: + print("async") + else: + print("hello") + if CrossSync.is_async: + print("async") + else: + print("world") + transformers: [StripAsyncConditionalBranches] + after: | + print("hello") + print("world") + - description: "conditional within class" + before: | + class MyClass: + def my_method(self): + if CrossSync.is_async: + return "async result" + else: + return "sync result" + transformers: [StripAsyncConditionalBranches] + after: | + class MyClass: + + def my_method(self): + return "sync result" + - description: "multiple branches" + before: | + if CrossSync.is_async: + print("async branch 1") + elif some_condition: + print("other condition") + elif CrossSync.is_async: + print("async branch 2") + else: + print("sync branch") + transformers: [StripAsyncConditionalBranches] + after: | + if some_condition: + print("other condition") + else: + print("sync branch") + - description: "negated conditionals" + before: | + if not CrossSync.is_async: + print("sync code") + else: + print("async code") + + transformers: [StripAsyncConditionalBranches] + after: | + print("sync code") + - description: "is check" + before: | + if CrossSync.is_async is True: + print("async code") + else: + print("sync code") + + transformers: [StripAsyncConditionalBranches] + after: | + print("sync code") diff --git a/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/symbol_replacer.yaml b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/symbol_replacer.yaml new file mode 100644 index 000000000000..fa50045f86e8 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cases/symbol_replacer.yaml @@ -0,0 +1,82 @@ +tests: + - description: "Does not Replace function name" + before: | + def function(): + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"function": "new_function"} + after: | + def function(): + pass + + - description: "Does not replace async function name" + before: | + async def async_func(): + await old_coroutine() + transformers: + - name: SymbolReplacer + args: + replacements: {"async_func": "new_async_func", "old_coroutine": "new_coroutine"} + after: | + async def async_func(): + await new_coroutine() + + - description: "Replace method call" + before: | + result = obj.old_method() + transformers: + - name: SymbolReplacer + args: + replacements: {"old_method": "new_method"} + after: | + result = obj.new_method() + + - description: "Replace in docstring" + before: | + def func(): + """This is a docstring mentioning old_name.""" + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"old_name": "new_name"} + after: | + def func(): + """This is a docstring mentioning new_name.""" + pass + + - description: "Replace in type annotation" + before: | + def func(param: OldType) -> OldReturnType: + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"OldType": "NewType", "OldReturnType": "NewReturnType"} + after: | + def func(param: NewType) -> NewReturnType: + pass + + - description: "Replace in nested attribute" + before: | + result = obj.attr1.attr2.old_attr + transformers: + - name: SymbolReplacer + args: + replacements: {"old_attr": "new_attr"} + after: | + result = obj.attr1.attr2.new_attr + + - description: "No replacement when symbol not found" + before: | + def unchanged_function(): + pass + transformers: + - name: SymbolReplacer + args: + replacements: {"non_existent": "replacement"} + after: | + def unchanged_function(): + pass diff --git a/packages/google-cloud-bigtable/tests/system/cross_sync/test_cross_sync_e2e.py b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cross_sync_e2e.py new file mode 100644 index 000000000000..86911b1631ea --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/cross_sync/test_cross_sync_e2e.py @@ -0,0 +1,65 @@ +import ast +import sys +import os +import black +import pytest +import yaml + +# add cross_sync to path +test_dir_name = os.path.dirname(__file__) +cross_sync_path = os.path.join(test_dir_name, "..", "..", "..", ".cross_sync") +sys.path.append(cross_sync_path) + +from transformers import ( # noqa: F401 E402 + SymbolReplacer, + AsyncToSync, + RmAioFunctions, + StripAsyncConditionalBranches, + CrossSyncFileProcessor, +) + + +def loader(): + dir_name = os.path.join(test_dir_name, "test_cases") + for file_name in os.listdir(dir_name): + if not file_name.endswith(".yaml"): + print(f"Skipping {file_name}") + continue + test_case_file = os.path.join(dir_name, file_name) + # load test cases + with open(test_case_file) as f: + print(f"Loading test cases from {test_case_file}") + test_cases = yaml.safe_load(f) + for test in test_cases["tests"]: + test["file_name"] = file_name + yield test + + +@pytest.mark.parametrize( + "test_dict", loader(), ids=lambda x: f"{x['file_name']}: {x.get('description', '')}" +) +@pytest.mark.skipif( + sys.version_info < (3, 9), reason="ast.unparse requires python3.9 or higher" +) +def test_e2e_scenario(test_dict): + before_ast = ast.parse(test_dict["before"]) + got_ast = before_ast + for transformer_info in test_dict["transformers"]: + # transformer can be passed as a string, or a dict with name and args + if isinstance(transformer_info, str): + transformer_class = globals()[transformer_info] + transformer_args = {} + else: + transformer_class = globals()[transformer_info["name"]] + transformer_args = transformer_info.get("args", {}) + transformer = transformer_class(**transformer_args) + got_ast = transformer.visit(got_ast) + if got_ast is None: + final_str = "" + else: + final_str = black.format_str(ast.unparse(got_ast), mode=black.FileMode()) + if test_dict.get("after") is None: + expected_str = "" + else: + expected_str = black.format_str(test_dict["after"], mode=black.FileMode()) + assert final_str == expected_str, f"Expected:\n{expected_str}\nGot:\n{final_str}" diff --git a/packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync.py b/packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync.py new file mode 100644 index 000000000000..410f59437711 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync.py @@ -0,0 +1,579 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import typing +import asyncio +import pytest +import pytest_asyncio +import threading +import concurrent.futures +import time +import queue +import functools +import sys +from google import api_core +from google.cloud.bigtable.data._cross_sync.cross_sync import CrossSync, T + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore +except ImportError: # pragma: NO COVER + import mock # type: ignore + from mock import AsyncMock # type: ignore + + +class TestCrossSync: + async def async_iter(self, in_list): + for i in in_list: + yield i + + @pytest.fixture + def cs_sync(self): + return CrossSync._Sync_Impl + + @pytest_asyncio.fixture + def cs_async(self): + return CrossSync + + @pytest.mark.parametrize( + "attr, async_version, sync_version", + [ + ("is_async", True, False), + ("sleep", asyncio.sleep, time.sleep), + ( + "retry_target", + api_core.retry.retry_target_async, + api_core.retry.retry_target, + ), + ( + "retry_target_stream", + api_core.retry.retry_target_stream_async, + api_core.retry.retry_target_stream, + ), + ("Retry", api_core.retry.AsyncRetry, api_core.retry.Retry), + ("Queue", asyncio.Queue, queue.Queue), + ("Condition", asyncio.Condition, threading.Condition), + ("Future", asyncio.Future, concurrent.futures.Future), + ("Task", asyncio.Task, concurrent.futures.Future), + ("Event", asyncio.Event, threading.Event), + ("Semaphore", asyncio.Semaphore, threading.Semaphore), + ("StopIteration", StopAsyncIteration, StopIteration), + # types + ("Awaitable", typing.Awaitable, typing.Union[T]), + ("Iterable", typing.AsyncIterable, typing.Iterable), + ("Iterator", typing.AsyncIterator, typing.Iterator), + ("Generator", typing.AsyncGenerator, typing.Generator), + ], + ) + def test_alias_attributes( + self, attr, async_version, sync_version, cs_sync, cs_async + ): + """ + Test basic alias attributes, to ensure they point to the right place + in both sync and async versions. + """ + assert ( + getattr(cs_async, attr) == async_version + ), f"Failed async version for {attr}" + assert getattr(cs_sync, attr) == sync_version, f"Failed sync version for {attr}" + + @pytest.mark.asyncio + async def test_Mock(self, cs_sync, cs_async): + """ + Test Mock class in both sync and async versions + """ + import unittest.mock + + assert isinstance(cs_async.Mock(), AsyncMock) + assert isinstance(cs_sync.Mock(), unittest.mock.Mock) + # test with return value + assert await cs_async.Mock(return_value=1)() == 1 + assert cs_sync.Mock(return_value=1)() == 1 + + def test_next(self, cs_sync): + """ + Test sync version of CrossSync.next() + """ + it = iter([1, 2, 3]) + assert cs_sync.next(it) == 1 + assert cs_sync.next(it) == 2 + assert cs_sync.next(it) == 3 + with pytest.raises(StopIteration): + cs_sync.next(it) + with pytest.raises(cs_sync.StopIteration): + cs_sync.next(it) + + @pytest.mark.asyncio + async def test_next_async(self, cs_async): + """ + test async version of CrossSync.next() + """ + async_it = self.async_iter([1, 2, 3]) + assert await cs_async.next(async_it) == 1 + assert await cs_async.next(async_it) == 2 + assert await cs_async.next(async_it) == 3 + with pytest.raises(StopAsyncIteration): + await cs_async.next(async_it) + with pytest.raises(cs_async.StopIteration): + await cs_async.next(async_it) + + def test_gather_partials(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() + """ + with concurrent.futures.ThreadPoolExecutor() as e: + partials = [lambda i=i: i + 1 for i in range(5)] + results = cs_sync.gather_partials(partials, sync_executor=e) + assert results == [1, 2, 3, 4, 5] + + def test_gather_partials_with_excepptions(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() with exceptions + """ + with concurrent.futures.ThreadPoolExecutor() as e: + partials = [lambda i=i: i + 1 if i != 3 else 1 / 0 for i in range(5)] + with pytest.raises(ZeroDivisionError): + cs_sync.gather_partials(partials, sync_executor=e) + + def test_gather_partials_return_exceptions(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() with return_exceptions=True + """ + with concurrent.futures.ThreadPoolExecutor() as e: + partials = [lambda i=i: i + 1 if i != 3 else 1 / 0 for i in range(5)] + results = cs_sync.gather_partials( + partials, return_exceptions=True, sync_executor=e + ) + assert len(results) == 5 + assert results[0] == 1 + assert results[1] == 2 + assert results[2] == 3 + assert isinstance(results[3], ZeroDivisionError) + assert results[4] == 5 + + def test_gather_partials_no_executor(self, cs_sync): + """ + Test sync version of CrossSync.gather_partials() without an executor + """ + partials = [lambda i=i: i + 1 for i in range(5)] + with pytest.raises(ValueError) as e: + cs_sync.gather_partials(partials) + assert "sync_executor is required" in str(e.value) + + @pytest.mark.asyncio + async def test_gather_partials_async(self, cs_async): + """ + Test async version of CrossSync.gather_partials() + """ + + async def coro(i): + return i + 1 + + partials = [functools.partial(coro, i) for i in range(5)] + results = await cs_async.gather_partials(partials) + assert results == [1, 2, 3, 4, 5] + + @pytest.mark.asyncio + async def test_gather_partials_async_with_exceptions(self, cs_async): + """ + Test async version of CrossSync.gather_partials() with exceptions + """ + + async def coro(i): + return i + 1 if i != 3 else 1 / 0 + + partials = [functools.partial(coro, i) for i in range(5)] + with pytest.raises(ZeroDivisionError): + await cs_async.gather_partials(partials) + + @pytest.mark.asyncio + async def test_gather_partials_async_return_exceptions(self, cs_async): + """ + Test async version of CrossSync.gather_partials() with return_exceptions=True + """ + + async def coro(i): + return i + 1 if i != 3 else 1 / 0 + + partials = [functools.partial(coro, i) for i in range(5)] + results = await cs_async.gather_partials(partials, return_exceptions=True) + assert len(results) == 5 + assert results[0] == 1 + assert results[1] == 2 + assert results[2] == 3 + assert isinstance(results[3], ZeroDivisionError) + assert results[4] == 5 + + @pytest.mark.asyncio + async def test_gather_partials_async_uses_asyncio_gather(self, cs_async): + """ + CrossSync.gather_partials() should use asyncio.gather() internally + """ + + async def coro(i): + return i + 1 + + return_exceptions = object() + partials = [functools.partial(coro, i) for i in range(5)] + with mock.patch.object(asyncio, "gather", AsyncMock()) as gather: + await cs_async.gather_partials( + partials, return_exceptions=return_exceptions + ) + gather.assert_called_once() + found_args, found_kwargs = gather.call_args + assert found_kwargs["return_exceptions"] == return_exceptions + for coro in found_args: + await coro + + def test_wait(self, cs_sync): + """ + Test sync version of CrossSync.wait() + + If future is complete, it should be in the first (complete) set + """ + future = concurrent.futures.Future() + future.set_result(1) + s1, s2 = cs_sync.wait([future]) + assert s1 == {future} + assert s2 == set() + + def test_wait_timeout(self, cs_sync): + """ + If timeout occurs, future should be in the second (incomplete) set + """ + future = concurrent.futures.Future() + timeout = 0.1 + start_time = time.monotonic() + s1, s2 = cs_sync.wait([future], timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + assert s1 == set() + assert s2 == {future} + + def test_wait_passthrough(self, cs_sync): + """ + sync version of CrossSync.wait() should pass through to concurrent.futures.wait() + """ + future = object() + timeout = object() + with mock.patch.object(concurrent.futures, "wait", mock.Mock()) as wait: + result = cs_sync.wait([future], timeout) + assert wait.call_count == 1 + assert wait.call_args == (([future],), {"timeout": timeout}) + assert result == wait.return_value + + def test_wait_empty_input(self, cs_sync): + """ + If no futures are provided, return empty sets + """ + s1, s2 = cs_sync.wait([]) + assert s1 == set() + assert s2 == set() + + @pytest.mark.asyncio + async def test_wait_async(self, cs_async): + """ + Test async version of CrossSync.wait() + """ + future = asyncio.Future() + future.set_result(1) + s1, s2 = await cs_async.wait([future]) + assert s1 == {future} + assert s2 == set() + + @pytest.mark.asyncio + async def test_wait_async_timeout(self, cs_async): + """ + If timeout occurs, future should be in the second (incomplete) set + """ + future = asyncio.Future() + timeout = 0.1 + start_time = time.monotonic() + s1, s2 = await cs_async.wait([future], timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + assert s1 == set() + assert s2 == {future} + + @pytest.mark.asyncio + async def test_wait_async_passthrough(self, cs_async): + """ + async version of CrossSync.wait() should pass through to asyncio.wait() + """ + future = object() + timeout = object() + with mock.patch.object(asyncio, "wait", AsyncMock()) as wait: + result = await cs_async.wait([future], timeout) + assert wait.call_count == 1 + assert wait.call_args == (([future],), {"timeout": timeout}) + assert result == wait.return_value + + @pytest.mark.asyncio + async def test_wait_async_empty_input(self, cs_async): + """ + If no futures are provided, return empty sets + """ + s1, s2 = await cs_async.wait([]) + assert s1 == set() + assert s2 == set() + + def test_event_wait_passthrough(self, cs_sync): + """ + Test sync version of CrossSync.event_wait() + should pass through timeout directly to the event.wait() call + """ + event = mock.Mock() + timeout = object() + cs_sync.event_wait(event, timeout) + event.wait.assert_called_once_with(timeout=timeout) + + @pytest.mark.parametrize("timeout", [0, 0.01, 0.05]) + def test_event_wait_timeout_exceeded(self, cs_sync, timeout): + """ + Test sync version of CrossSync.event_wait() + """ + event = threading.Event() + start_time = time.monotonic() + cs_sync.event_wait(event, timeout=timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + + def test_event_wait_already_set(self, cs_sync): + """ + if event is already set, do not block + """ + event = threading.Event() + event.set() + start_time = time.monotonic() + cs_sync.event_wait(event, timeout=10) + end_time = time.monotonic() + assert end_time - start_time < 0.01 + + @pytest.mark.parametrize("break_early", [True, False]) + @pytest.mark.asyncio + async def test_event_wait_async(self, cs_async, break_early): + """ + With no timeout, call event.wait() with no arguments + """ + event = AsyncMock() + await cs_async.event_wait(event, async_break_early=break_early) + event.wait.assert_called_once_with() + + @pytest.mark.asyncio + async def test_event_wait_async_with_timeout(self, cs_async): + """ + In with timeout set, should call event.wait(), wrapped in wait_for() + for the timeout + """ + event = mock.Mock() + event.wait.return_value = object() + timeout = object() + with mock.patch.object(asyncio, "wait_for", AsyncMock()) as wait_for: + await cs_async.event_wait(event, timeout=timeout) + assert wait_for.await_count == 1 + assert wait_for.call_count == 1 + wait_for.assert_called_once_with(event.wait(), timeout=timeout) + + @pytest.mark.asyncio + async def test_event_wait_async_timeout_exceeded(self, cs_async): + """ + If tiemout exceeded, break without throwing exception + """ + event = asyncio.Event() + timeout = 0.5 + start_time = time.monotonic() + await cs_async.event_wait(event, timeout=timeout) + end_time = time.monotonic() + assert abs((end_time - start_time) - timeout) < 0.01 + + @pytest.mark.parametrize("break_early", [True, False]) + @pytest.mark.asyncio + async def test_event_wait_async_already_set(self, cs_async, break_early): + """ + if event is already set, return immediately + """ + event = AsyncMock() + event.is_set = lambda: True + start_time = time.monotonic() + await cs_async.event_wait(event, async_break_early=break_early) + end_time = time.monotonic() + assert abs(end_time - start_time) < 0.01 + + @pytest.mark.asyncio + async def test_event_wait_no_break_early(self, cs_async): + """ + if async_break_early is False, and the event is not set, + simply sleep for the timeout + """ + event = mock.Mock() + event.is_set.return_value = False + timeout = object() + with mock.patch.object(asyncio, "sleep", AsyncMock()) as sleep: + await cs_async.event_wait(event, timeout=timeout, async_break_early=False) + sleep.assert_called_once_with(timeout) + + def test_create_task(self, cs_sync): + """ + Test creating Future using create_task() + """ + executor = concurrent.futures.ThreadPoolExecutor() + fn = lambda x, y: x + y # noqa: E731 + result = cs_sync.create_task(fn, 1, y=4, sync_executor=executor) + assert isinstance(result, cs_sync.Task) + assert result.result() == 5 + + def test_create_task_passthrough(self, cs_sync): + """ + sync version passed through to executor.submit() + """ + fn = object() + executor = mock.Mock() + executor.submit.return_value = object() + args = [1, 2, 3] + kwargs = {"a": 1, "b": 2} + result = cs_sync.create_task(fn, *args, **kwargs, sync_executor=executor) + assert result == executor.submit.return_value + assert executor.submit.call_count == 1 + assert executor.submit.call_args == ((fn, *args), kwargs) + + def test_create_task_no_executor(self, cs_sync): + """ + if no executor is provided, raise an exception + """ + with pytest.raises(ValueError) as e: + cs_sync.create_task(lambda: None) + assert "sync_executor is required" in str(e.value) + + @pytest.mark.asyncio + async def test_create_task_async(self, cs_async): + """ + Test creating Future using create_task() + """ + + async def coro_fn(x, y): + return x + y + + result = cs_async.create_task(coro_fn, 1, y=4) + assert isinstance(result, asyncio.Task) + assert await result == 5 + + @pytest.mark.asyncio + async def test_create_task_async_passthrough(self, cs_async): + """ + async version passed through to asyncio.create_task() + """ + coro_fn = mock.Mock() + coro_fn.return_value = object() + args = [1, 2, 3] + kwargs = {"a": 1, "b": 2} + with mock.patch.object(asyncio, "create_task", mock.Mock()) as create_task: + cs_async.create_task(coro_fn, *args, **kwargs) + create_task.assert_called_once() + create_task.assert_called_once_with(coro_fn.return_value) + coro_fn.assert_called_once_with(*args, **kwargs) + + @pytest.mark.skipif( + sys.version_info < (3, 8), reason="Task names require python 3.8" + ) + @pytest.mark.asyncio + async def test_create_task_async_with_name(self, cs_async): + """ + Test creating a task with a name + """ + + async def coro_fn(): + return None + + name = "test-name-456" + result = cs_async.create_task(coro_fn, task_name=name) + assert isinstance(result, asyncio.Task) + assert result.get_name() == name + + def test_yeild_to_event_loop(self, cs_sync): + """ + no-op in sync version + """ + assert cs_sync.yield_to_event_loop() is None + + @pytest.mark.asyncio + async def test_yield_to_event_loop_async(self, cs_async): + """ + should call await asyncio.sleep(0) + """ + with mock.patch.object(asyncio, "sleep", AsyncMock()) as sleep: + await cs_async.yield_to_event_loop() + sleep.assert_called_once_with(0) + + def test_verify_async_event_loop(self, cs_sync): + """ + no-op in sync version + """ + assert cs_sync.verify_async_event_loop() is None + + @pytest.mark.asyncio + async def test_verify_async_event_loop_async(self, cs_async): + """ + should call asyncio.get_running_loop() + """ + with mock.patch.object(asyncio, "get_running_loop") as get_running_loop: + cs_async.verify_async_event_loop() + get_running_loop.assert_called_once() + + def test_verify_async_event_loop_no_event_loop(self, cs_async): + """ + Should raise an exception if no event loop is running + """ + with pytest.raises(RuntimeError) as e: + cs_async.verify_async_event_loop() + assert "no running event loop" in str(e.value) + + def test_rmaio(self, cs_async): + """ + rm_aio should return whatever is passed to it + """ + assert cs_async.rm_aio(1) == 1 + assert cs_async.rm_aio("test") == "test" + obj = object() + assert cs_async.rm_aio(obj) == obj + + def test_add_mapping(self, cs_sync, cs_async): + """ + Add dynamic attributes to each class using add_mapping() + """ + for cls in [cs_sync, cs_async]: + cls.add_mapping("test", 1) + assert cls.test == 1 + assert cls._runtime_replacements[(cls, "test")] == 1 + + def test_add_duplicate_mapping(self, cs_sync, cs_async): + """ + Adding the same attribute twice should raise an exception + """ + for cls in [cs_sync, cs_async]: + cls.add_mapping("duplicate", 1) + with pytest.raises(AttributeError) as e: + cls.add_mapping("duplicate", 2) + assert "Conflicting assignments" in str(e.value) + + def test_add_mapping_decorator(self, cs_sync, cs_async): + """ + add_mapping_decorator should allow wrapping classes with add_mapping() + """ + for cls in [cs_sync, cs_async]: + + @cls.add_mapping_decorator("decorated") + class Decorated: + pass + + assert cls.decorated == Decorated diff --git a/packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync_decorators.py b/packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync_decorators.py new file mode 100644 index 000000000000..3be579379597 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_cross_sync/test_cross_sync_decorators.py @@ -0,0 +1,542 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import pytest_asyncio +import ast +from unittest import mock +from google.cloud.bigtable.data._cross_sync.cross_sync import CrossSync +from google.cloud.bigtable.data._cross_sync._decorators import ( + ConvertClass, + Convert, + Drop, + Pytest, + PytestFixture, +) + + +@pytest.fixture +def globals_mock(): + mock_transform = mock.Mock() + mock_transform().visit = lambda x: x + global_dict = { + k: mock_transform + for k in ["RmAioFunctions", "SymbolReplacer", "CrossSyncMethodDecoratorHandler"] + } + return global_dict + + +class TestConvertClassDecorator: + def _get_class(self): + return ConvertClass + + def test_ctor_defaults(self): + """ + Should set default values for path, add_mapping_for_name, and docstring_format_vars + """ + instance = self._get_class()() + assert instance.sync_name is None + assert instance.replace_symbols is None + assert instance.add_mapping_for_name is None + assert instance.async_docstring_format_vars == {} + assert instance.sync_docstring_format_vars == {} + assert instance.rm_aio is False + + def test_ctor(self): + sync_name = "sync_name" + replace_symbols = {"a": "b"} + docstring_format_vars = {"A": (1, 2)} + add_mapping_for_name = "test_name" + rm_aio = True + + instance = self._get_class()( + sync_name, + replace_symbols=replace_symbols, + docstring_format_vars=docstring_format_vars, + add_mapping_for_name=add_mapping_for_name, + rm_aio=rm_aio, + ) + assert instance.sync_name is sync_name + assert instance.replace_symbols is replace_symbols + assert instance.add_mapping_for_name is add_mapping_for_name + assert instance.async_docstring_format_vars == {"A": 1} + assert instance.sync_docstring_format_vars == {"A": 2} + assert instance.rm_aio is rm_aio + + def test_class_decorator(self): + """ + Should return class being decorated + """ + unwrapped_class = mock.Mock + wrapped_class = self._get_class().decorator(unwrapped_class, sync_name="s") + assert unwrapped_class == wrapped_class + + def test_class_decorator_adds_mapping(self): + """ + If add_mapping_for_name is set, should call CrossSync.add_mapping with the class being decorated + """ + with mock.patch.object(CrossSync, "add_mapping") as add_mapping: + mock_cls = mock.Mock + # check decoration with no add_mapping + self._get_class().decorator(sync_name="s")(mock_cls) + assert add_mapping.call_count == 0 + # check decoration with add_mapping + name = "test_name" + self._get_class().decorator(sync_name="s", add_mapping_for_name=name)( + mock_cls + ) + assert add_mapping.call_count == 1 + add_mapping.assert_called_once_with(name, mock_cls) + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "1"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "1 3"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello world"], + ["{empty}", {"empty": ("", "")}, ""], + ["{empty}", {"empty": (None, None)}, ""], + ["maybe{empty}", {"empty": (None, "yes")}, "maybe"], + ["maybe{empty}", {"empty": (" no", None)}, "maybe no"], + ], + ) + def test_class_decorator_docstring_update(self, docstring, format_vars, expected): + """ + If docstring_format_vars is set, should update the docstring + of the class being decorated + """ + + @ConvertClass.decorator(sync_name="s", docstring_format_vars=format_vars) + class Class: + __doc__ = docstring + + assert Class.__doc__ == expected + # check internal state + instance = self._get_class()(sync_name="s", docstring_format_vars=format_vars) + async_replacements = {k: v[0] or "" for k, v in format_vars.items()} + sync_replacements = {k: v[1] or "" for k, v in format_vars.items()} + assert instance.async_docstring_format_vars == async_replacements + assert instance.sync_docstring_format_vars == sync_replacements + + def test_sync_ast_transform_replaces_name(self, globals_mock): + """ + Should update the name of the new class + """ + decorator = self._get_class()("SyncClass") + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert result.name == "SyncClass" + + def test_sync_ast_transform_strips_cross_sync_decorators(self, globals_mock): + """ + should remove all CrossSync decorators from the class + """ + decorator = self._get_class()("path") + cross_sync_decorator = ast.Call( + func=ast.Attribute( + value=ast.Name(id="CrossSync", ctx=ast.Load()), + attr="some_decorator", + ctx=ast.Load(), + ), + args=[], + keywords=[], + ) + other_decorator = ast.Name(id="other_decorator", ctx=ast.Load()) + mock_node = ast.ClassDef( + name="AsyncClass", + bases=[], + keywords=[], + body=[], + decorator_list=[cross_sync_decorator, other_decorator], + ) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert len(result.decorator_list) == 1 + assert isinstance(result.decorator_list[0], ast.Name) + assert result.decorator_list[0].id == "other_decorator" + + def test_sync_ast_transform_add_mapping(self, globals_mock): + """ + If add_mapping_for_name is set, should add CrossSync.add_mapping_decorator to new class + """ + decorator = self._get_class()("path", add_mapping_for_name="sync_class") + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert len(result.decorator_list) == 1 + assert isinstance(result.decorator_list[0], ast.Call) + assert isinstance(result.decorator_list[0].func, ast.Attribute) + assert result.decorator_list[0].func.attr == "add_mapping_decorator" + assert result.decorator_list[0].args[0].value == "sync_class" + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "2"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "2 4"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello moon"], + ], + ) + def test_sync_ast_transform_add_docstring_format( + self, docstring, format_vars, expected, globals_mock + ): + """ + If docstring_format_vars is set, should format the docstring of the new class + """ + decorator = self._get_class()( + "path.to.SyncClass", docstring_format_vars=format_vars + ) + mock_node = ast.ClassDef( + name="AsyncClass", + bases=[], + keywords=[], + body=[ast.Expr(value=ast.Constant(value=docstring))], + ) + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.ClassDef) + assert isinstance(result.body[0], ast.Expr) + assert isinstance(result.body[0].value, ast.Constant) + assert result.body[0].value.value == expected + + def test_sync_ast_transform_replace_symbols(self, globals_mock): + """ + SymbolReplacer should be called with replace_symbols + """ + replace_symbols = {"a": "b", "c": "d"} + decorator = self._get_class()( + "path.to.SyncClass", replace_symbols=replace_symbols + ) + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + symbol_transform_mock = mock.Mock() + globals_mock = {**globals_mock, "SymbolReplacer": symbol_transform_mock} + decorator.sync_ast_transform(mock_node, globals_mock) + # make sure SymbolReplacer was called with replace_symbols + assert symbol_transform_mock.call_count == 1 + found_dict = symbol_transform_mock.call_args[0][0] + assert "a" in found_dict + for k, v in replace_symbols.items(): + assert found_dict[k] == v + + def test_sync_ast_transform_rmaio_calls_async_to_sync(self): + """ + Should call AsyncToSync if rm_aio is set + """ + decorator = self._get_class()(rm_aio=True) + mock_node = ast.ClassDef(name="AsyncClass", bases=[], keywords=[], body=[]) + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.side_effect = lambda x: x + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 1 + + +class TestConvertDecorator: + def _get_class(self): + return Convert + + def test_ctor_defaults(self): + instance = self._get_class()() + assert instance.sync_name is None + assert instance.replace_symbols is None + assert instance.async_docstring_format_vars == {} + assert instance.sync_docstring_format_vars == {} + assert instance.rm_aio is True + + def test_ctor(self): + sync_name = "sync_name" + replace_symbols = {"a": "b"} + docstring_format_vars = {"A": (1, 2)} + rm_aio = False + + instance = self._get_class()( + sync_name=sync_name, + replace_symbols=replace_symbols, + docstring_format_vars=docstring_format_vars, + rm_aio=rm_aio, + ) + assert instance.sync_name is sync_name + assert instance.replace_symbols is replace_symbols + assert instance.async_docstring_format_vars == {"A": 1} + assert instance.sync_docstring_format_vars == {"A": 2} + assert instance.rm_aio is rm_aio + + def test_async_decorator_no_docstring(self): + """ + If no docstring_format_vars is set, should be a no-op + """ + unwrapped_class = mock.Mock + wrapped_class = self._get_class().decorator(unwrapped_class) + assert unwrapped_class == wrapped_class + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "1"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "1 3"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello world"], + ["{empty}", {"empty": ("", "")}, ""], + ["{empty}", {"empty": (None, None)}, ""], + ["maybe{empty}", {"empty": (None, "yes")}, "maybe"], + ["maybe{empty}", {"empty": (" no", None)}, "maybe no"], + ], + ) + def test_async_decorator_docstring_update(self, docstring, format_vars, expected): + """ + If docstring_format_vars is set, should update the docstring + of the class being decorated + """ + + @Convert.decorator(docstring_format_vars=format_vars) + class Class: + __doc__ = docstring + + assert Class.__doc__ == expected + # check internal state + instance = self._get_class()(docstring_format_vars=format_vars) + async_replacements = {k: v[0] or "" for k, v in format_vars.items()} + sync_replacements = {k: v[1] or "" for k, v in format_vars.items()} + assert instance.async_docstring_format_vars == async_replacements + assert instance.sync_docstring_format_vars == sync_replacements + + def test_sync_ast_transform_remove_adef(self): + """ + Should convert `async def` methods to `def` methods + """ + decorator = self._get_class()(rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert isinstance(result, ast.FunctionDef) + assert result.name == "test_method" + + def test_sync_ast_transform_replaces_name(self, globals_mock): + """ + Should update the name of the method if sync_name is set + """ + decorator = self._get_class()(sync_name="new_method_name", rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="old_method_name", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, globals_mock) + + assert isinstance(result, ast.FunctionDef) + assert result.name == "new_method_name" + + def test_sync_ast_transform_rmaio_calls_async_to_sync(self): + """ + Should call AsyncToSync if rm_aio is set + """ + decorator = self._get_class()(rm_aio=True) + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.return_value = mock_node + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 1 + + def test_sync_ast_transform_replace_symbols(self): + """ + Should call SymbolReplacer with replace_symbols if replace_symbols is set + """ + replace_symbols = {"old_symbol": "new_symbol"} + decorator = self._get_class()(replace_symbols=replace_symbols, rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + symbol_replacer_mock = mock.Mock() + globals_mock = {"SymbolReplacer": symbol_replacer_mock} + + decorator.sync_ast_transform(mock_node, globals_mock) + + assert symbol_replacer_mock.call_count == 1 + assert symbol_replacer_mock.call_args[0][0] == replace_symbols + assert symbol_replacer_mock(replace_symbols).visit.call_count == 1 + + @pytest.mark.parametrize( + "docstring,format_vars,expected", + [ + ["test docstring", {}, "test docstring"], + ["{}", {}, "{}"], + ["test_docstring", {"A": (1, 2)}, "test_docstring"], + ["{A}", {"A": (1, 2)}, "2"], + ["{A} {B}", {"A": (1, 2), "B": (3, 4)}, "2 4"], + ["hello {world_var}", {"world_var": ("world", "moon")}, "hello moon"], + ], + ) + def test_sync_ast_transform_add_docstring_format( + self, docstring, format_vars, expected + ): + """ + If docstring_format_vars is set, should format the docstring of the new method + """ + decorator = self._get_class()(docstring_format_vars=format_vars, rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="test_method", + args=ast.arguments(), + body=[ast.Expr(value=ast.Constant(value=docstring))], + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert isinstance(result, ast.FunctionDef) + assert isinstance(result.body[0], ast.Expr) + assert isinstance(result.body[0].value, ast.Constant) + assert result.body[0].value.value == expected + + +class TestDropDecorator: + def _get_class(self): + return Drop + + def test_decorator_functionality(self): + """ + applying the decorator should be a no-op + """ + unwrapped = lambda x: x # noqa: E731 + wrapped = self._get_class().decorator(unwrapped) + assert unwrapped == wrapped + assert unwrapped(1) == wrapped(1) + assert wrapped(1) == 1 + + def test_sync_ast_transform(self): + """ + Should return None for any input method + """ + decorator = self._get_class()() + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert result is None + + +class TestPytestDecorator: + def _get_class(self): + return Pytest + + def test_ctor(self): + instance = self._get_class()() + assert instance.rm_aio is True + instance = self._get_class()(rm_aio=False) + assert instance.rm_aio is False + + def test_decorator_functionality(self): + """ + Should wrap the class with pytest.mark.asyncio + """ + unwrapped_fn = mock.Mock + wrapped_class = self._get_class().decorator(unwrapped_fn) + assert wrapped_class == pytest.mark.asyncio(unwrapped_fn) + + def test_sync_ast_transform(self): + """ + If rm_aio is True (default), should call AsyncToSync on the class + """ + decorator = self._get_class()() + mock_node = ast.AsyncFunctionDef( + name="AsyncMethod", args=ast.arguments(), body=[] + ) + + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.side_effect = lambda x: x + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + transformed = decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 1 + assert isinstance(transformed, ast.FunctionDef) + + def test_sync_ast_transform_no_rm_aio(self): + """ + if rm_aio is False, should remove the async keyword from the method + """ + decorator = self._get_class()(rm_aio=False) + mock_node = ast.AsyncFunctionDef( + name="AsyncMethod", args=ast.arguments(), body=[] + ) + + async_to_sync_mock = mock.Mock() + async_to_sync_mock.visit.return_value = mock_node + globals_mock = {"AsyncToSync": lambda: async_to_sync_mock} + + transformed = decorator.sync_ast_transform(mock_node, globals_mock) + assert async_to_sync_mock.visit.call_count == 0 + assert isinstance(transformed, ast.FunctionDef) + + +class TestPytestFixtureDecorator: + def _get_class(self): + return PytestFixture + + def test_decorator_functionality(self): + """ + Should wrap the class with pytest_asyncio.fixture + """ + with mock.patch.object(pytest_asyncio, "fixture") as fixture: + + @PytestFixture.decorator(1, 2, scope="function", params=[3, 4]) + def fn(): + pass + + assert fixture.call_count == 1 + assert fixture.call_args[0] == (1, 2) + assert fixture.call_args[1] == {"scope": "function", "params": [3, 4]} + + def test_sync_ast_transform(self): + """ + Should attach pytest.fixture to generated method + """ + decorator = self._get_class()(1, 2, scope="function") + + mock_node = ast.AsyncFunctionDef( + name="test_method", args=ast.arguments(), body=[] + ) + + result = decorator.sync_ast_transform(mock_node, {}) + + assert isinstance(result, ast.AsyncFunctionDef) + assert len(result.decorator_list) == 1 + assert isinstance(result.decorator_list[0], ast.Call) + assert result.decorator_list[0].func.value.id == "pytest" + assert result.decorator_list[0].func.attr == "fixture" + assert result.decorator_list[0].args[0].value == 1 + assert result.decorator_list[0].args[1].value == 2 + assert result.decorator_list[0].keywords[0].arg == "scope" + assert result.decorator_list[0].keywords[0].value.value == "function" From 4cb2cf5ff58949260b48ef0112798c877a74983b Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 11 Nov 2024 11:24:30 -0800 Subject: [PATCH 831/892] chore(tests): clean up sample test tables (#1030) --- .../google-cloud-bigtable/samples/__init__.py | 0 .../samples/beam/__init__.py | 0 .../samples/beam/hello_world_write_test.py | 32 +- .../samples/hello/__init__.py | 0 .../samples/hello/async_main.py | 115 ++++--- .../samples/hello/async_main_test.py | 15 +- .../samples/hello/main.py | 116 ++++--- .../samples/hello/main_test.py | 15 +- .../samples/hello_happybase/__init__.py | 0 .../samples/hello_happybase/main.py | 6 +- .../samples/hello_happybase/main_test.py | 35 +- .../samples/quickstart/__init__.py | 0 .../samples/quickstart/main_async_test.py | 44 +-- .../samples/quickstart/main_test.py | 25 +- .../samples/quickstart_happybase/__init__.py | 0 .../samples/quickstart_happybase/main_test.py | 25 +- .../samples/snippets/__init__.py | 0 .../samples/snippets/data_client/__init__.py | 0 .../data_client_snippets_async_test.py | 26 +- .../samples/snippets/deletes/__init__.py | 0 .../snippets/deletes/deletes_async_test.py | 42 +-- .../samples/snippets/deletes/deletes_test.py | 125 ++++--- .../filters/filter_snippets_async_test.py | 38 +-- .../samples/snippets/filters/filters_test.py | 115 +++---- .../samples/snippets/reads/reads_test.py | 77 ++--- .../samples/snippets/writes/writes_test.py | 33 +- .../samples/tableadmin/__init__.py | 0 .../samples/tableadmin/tableadmin.py | 320 ++++++++---------- .../samples/tableadmin/tableadmin_test.py | 27 +- .../google-cloud-bigtable/samples/utils.py | 87 +++++ 30 files changed, 605 insertions(+), 713 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/beam/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/hello/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/hello_happybase/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/quickstart_happybase/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/data_client/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/snippets/deletes/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/tableadmin/__init__.py create mode 100644 packages/google-cloud-bigtable/samples/utils.py diff --git a/packages/google-cloud-bigtable/samples/__init__.py b/packages/google-cloud-bigtable/samples/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/beam/__init__.py b/packages/google-cloud-bigtable/samples/beam/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py b/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py index 4e9a47c7dabc..ba0e980964bb 100644 --- a/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py +++ b/packages/google-cloud-bigtable/samples/beam/hello_world_write_test.py @@ -14,45 +14,33 @@ import os import uuid -from google.cloud import bigtable import pytest -import hello_world_write +from . import hello_world_write +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-beam-{str(uuid.uuid4())[:16]}" @pytest.fixture(scope="module", autouse=True) -def table_id(): - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) +def table(): + with create_table_cm( + PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None} + ) as table: + yield table - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - table.create(column_families={"stats_summary": None}) - yield table_id - - table.delete() - - -def test_hello_world_write(table_id): +def test_hello_world_write(table): hello_world_write.run( [ "--bigtable-project=%s" % PROJECT, "--bigtable-instance=%s" % BIGTABLE_INSTANCE, - "--bigtable-table=%s" % table_id, + "--bigtable-table=%s" % TABLE_ID, ] ) - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) - rows = table.read_rows() count = 0 for _ in rows: diff --git a/packages/google-cloud-bigtable/samples/hello/__init__.py b/packages/google-cloud-bigtable/samples/hello/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/hello/async_main.py b/packages/google-cloud-bigtable/samples/hello/async_main.py index d608bb073c70..0130161e1179 100644 --- a/packages/google-cloud-bigtable/samples/hello/async_main.py +++ b/packages/google-cloud-bigtable/samples/hello/async_main.py @@ -26,6 +26,7 @@ import argparse import asyncio +from ..utils import wait_for_table # [START bigtable_async_hw_imports] from google.cloud import bigtable @@ -33,7 +34,6 @@ from google.cloud.bigtable.data import RowMutationEntry from google.cloud.bigtable.data import SetCell from google.cloud.bigtable.data import ReadRowsQuery - # [END bigtable_async_hw_imports] @@ -65,63 +65,66 @@ async def main(project_id, instance_id, table_id): print("Table {} already exists.".format(table_id)) # [END bigtable_async_hw_create_table] - # [START bigtable_async_hw_write_rows] - print("Writing some greetings to the table.") - greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] - mutations = [] - column = "greeting" - for i, value in enumerate(greetings): - # Note: This example uses sequential numeric IDs for simplicity, - # but this can result in poor performance in a production - # application. Since rows are stored in sorted order by key, - # sequential keys can result in poor distribution of operations - # across nodes. - # - # For more information about how to design a Bigtable schema for - # the best performance, see the documentation: - # - # https://cloud.google.com/bigtable/docs/schema-design - row_key = "greeting{}".format(i).encode() - row_mutation = RowMutationEntry( - row_key, SetCell(column_family_id, column, value) - ) - mutations.append(row_mutation) - await table.bulk_mutate_rows(mutations) - # [END bigtable_async_hw_write_rows] - - # [START bigtable_async_hw_create_filter] - # Create a filter to only retrieve the most recent version of the cell - # for each column across entire row. - row_filter = row_filters.CellsColumnLimitFilter(1) - # [END bigtable_async_hw_create_filter] - - # [START bigtable_async_hw_get_with_filter] - # [START bigtable_async_hw_get_by_key] - print("Getting a single greeting by row key.") - key = "greeting0".encode() - - row = await table.read_row(key, row_filter=row_filter) - cell = row.cells[0] - print(cell.value.decode("utf-8")) - # [END bigtable_async_hw_get_by_key] - # [END bigtable_async_hw_get_with_filter] - - # [START bigtable_async_hw_scan_with_filter] - # [START bigtable_async_hw_scan_all] - print("Scanning for all greetings:") - query = ReadRowsQuery(row_filter=row_filter) - async for row in await table.read_rows_stream(query): + try: + # let table creation complete + wait_for_table(admin_table) + # [START bigtable_async_hw_write_rows] + print("Writing some greetings to the table.") + greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] + mutations = [] + column = "greeting" + for i, value in enumerate(greetings): + # Note: This example uses sequential numeric IDs for simplicity, + # but this can result in poor performance in a production + # application. Since rows are stored in sorted order by key, + # sequential keys can result in poor distribution of operations + # across nodes. + # + # For more information about how to design a Bigtable schema for + # the best performance, see the documentation: + # + # https://cloud.google.com/bigtable/docs/schema-design + row_key = "greeting{}".format(i).encode() + row_mutation = RowMutationEntry( + row_key, SetCell(column_family_id, column, value) + ) + mutations.append(row_mutation) + await table.bulk_mutate_rows(mutations) + # [END bigtable_async_hw_write_rows] + + # [START bigtable_async_hw_create_filter] + # Create a filter to only retrieve the most recent version of the cell + # for each column across entire row. + row_filter = row_filters.CellsColumnLimitFilter(1) + # [END bigtable_async_hw_create_filter] + + # [START bigtable_async_hw_get_with_filter] + # [START bigtable_async_hw_get_by_key] + print("Getting a single greeting by row key.") + key = "greeting0".encode() + + row = await table.read_row(key, row_filter=row_filter) cell = row.cells[0] print(cell.value.decode("utf-8")) - # [END bigtable_async_hw_scan_all] - # [END bigtable_async_hw_scan_with_filter] - - # [START bigtable_async_hw_delete_table] - # the async client only supports the data API. Table deletion as an admin operation - # use admin client to create the table - print("Deleting the {} table.".format(table_id)) - admin_table.delete() - # [END bigtable_async_hw_delete_table] + # [END bigtable_async_hw_get_by_key] + # [END bigtable_async_hw_get_with_filter] + + # [START bigtable_async_hw_scan_with_filter] + # [START bigtable_async_hw_scan_all] + print("Scanning for all greetings:") + query = ReadRowsQuery(row_filter=row_filter) + async for row in await table.read_rows_stream(query): + cell = row.cells[0] + print(cell.value.decode("utf-8")) + # [END bigtable_async_hw_scan_all] + # [END bigtable_async_hw_scan_with_filter] + finally: + # [START bigtable_async_hw_delete_table] + # the async client only supports the data API. Table deletion as an admin operation + # use admin client to create the table + print("Deleting the {} table.".format(table_id)) + admin_table.delete() + # [END bigtable_async_hw_delete_table] if __name__ == "__main__": diff --git a/packages/google-cloud-bigtable/samples/hello/async_main_test.py b/packages/google-cloud-bigtable/samples/hello/async_main_test.py index a47ac2d3331e..aa65a86523f4 100644 --- a/packages/google-cloud-bigtable/samples/hello/async_main_test.py +++ b/packages/google-cloud-bigtable/samples/hello/async_main_test.py @@ -13,27 +13,24 @@ # limitations under the License. import os -import random import asyncio +import uuid -from async_main import main +from .async_main import main PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_NAME_FORMAT = "hello-world-test-{}" -TABLE_NAME_RANGE = 10000 +TABLE_ID = f"hello-world-test-async-{str(uuid.uuid4())[:16]}" def test_async_main(capsys): - table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) - - asyncio.run(main(PROJECT, BIGTABLE_INSTANCE, table_name)) + asyncio.run(main(PROJECT, BIGTABLE_INSTANCE, TABLE_ID)) out, _ = capsys.readouterr() - assert "Creating the {} table.".format(table_name) in out + assert "Creating the {} table.".format(TABLE_ID) in out assert "Writing some greetings to the table." in out assert "Getting a single greeting by row key." in out assert "Hello World!" in out assert "Scanning for all greetings" in out assert "Hello Cloud Bigtable!" in out - assert "Deleting the {} table.".format(table_name) in out + assert "Deleting the {} table.".format(TABLE_ID) in out diff --git a/packages/google-cloud-bigtable/samples/hello/main.py b/packages/google-cloud-bigtable/samples/hello/main.py index 3b7de34b0627..3e5078608eb6 100644 --- a/packages/google-cloud-bigtable/samples/hello/main.py +++ b/packages/google-cloud-bigtable/samples/hello/main.py @@ -25,6 +25,7 @@ """ import argparse +from ..utils import wait_for_table # [START bigtable_hw_imports] import datetime @@ -60,63 +61,68 @@ def main(project_id, instance_id, table_id): print("Table {} already exists.".format(table_id)) # [END bigtable_hw_create_table] - # [START bigtable_hw_write_rows] - print("Writing some greetings to the table.") - greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] - rows = [] - column = "greeting".encode() - for i, value in enumerate(greetings): - # Note: This example uses sequential numeric IDs for simplicity, - # but this can result in poor performance in a production - # application. Since rows are stored in sorted order by key, - # sequential keys can result in poor distribution of operations - # across nodes. - # - # For more information about how to design a Bigtable schema for - # the best performance, see the documentation: - # - # https://cloud.google.com/bigtable/docs/schema-design - row_key = "greeting{}".format(i).encode() - row = table.direct_row(row_key) - row.set_cell( - column_family_id, column, value, timestamp=datetime.datetime.utcnow() - ) - rows.append(row) - table.mutate_rows(rows) - # [END bigtable_hw_write_rows] - - # [START bigtable_hw_create_filter] - # Create a filter to only retrieve the most recent version of the cell - # for each column across entire row. - row_filter = row_filters.CellsColumnLimitFilter(1) - # [END bigtable_hw_create_filter] - - # [START bigtable_hw_get_with_filter] - # [START bigtable_hw_get_by_key] - print("Getting a single greeting by row key.") - key = "greeting0".encode() - - row = table.read_row(key, row_filter) - cell = row.cells[column_family_id][column][0] - print(cell.value.decode("utf-8")) - # [END bigtable_hw_get_by_key] - # [END bigtable_hw_get_with_filter] - - # [START bigtable_hw_scan_with_filter] - # [START bigtable_hw_scan_all] - print("Scanning for all greetings:") - partial_rows = table.read_rows(filter_=row_filter) - - for row in partial_rows: + try: + # let table creation complete + wait_for_table(table) + + # [START bigtable_hw_write_rows] + print("Writing some greetings to the table.") + greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] + rows = [] + column = "greeting".encode() + for i, value in enumerate(greetings): + # Note: This example uses sequential numeric IDs for simplicity, + # but this can result in poor performance in a production + # application. Since rows are stored in sorted order by key, + # sequential keys can result in poor distribution of operations + # across nodes. + # + # For more information about how to design a Bigtable schema for + # the best performance, see the documentation: + # + # https://cloud.google.com/bigtable/docs/schema-design + row_key = "greeting{}".format(i).encode() + row = table.direct_row(row_key) + row.set_cell( + column_family_id, column, value, timestamp=datetime.datetime.utcnow() + ) + rows.append(row) + table.mutate_rows(rows) + # [END bigtable_hw_write_rows] + + # [START bigtable_hw_create_filter] + # Create a filter to only retrieve the most recent version of the cell + # for each column across entire row. + row_filter = row_filters.CellsColumnLimitFilter(1) + # [END bigtable_hw_create_filter] + + # [START bigtable_hw_get_with_filter] + # [START bigtable_hw_get_by_key] + print("Getting a single greeting by row key.") + key = "greeting0".encode() + + row = table.read_row(key, row_filter) cell = row.cells[column_family_id][column][0] print(cell.value.decode("utf-8")) - # [END bigtable_hw_scan_all] - # [END bigtable_hw_scan_with_filter] - - # [START bigtable_hw_delete_table] - print("Deleting the {} table.".format(table_id)) - table.delete() - # [END bigtable_hw_delete_table] + # [END bigtable_hw_get_by_key] + # [END bigtable_hw_get_with_filter] + + # [START bigtable_hw_scan_with_filter] + # [START bigtable_hw_scan_all] + print("Scanning for all greetings:") + partial_rows = table.read_rows(filter_=row_filter) + + for row in partial_rows: + cell = row.cells[column_family_id][column][0] + print(cell.value.decode("utf-8")) + # [END bigtable_hw_scan_all] + # [END bigtable_hw_scan_with_filter] + + finally: + # [START bigtable_hw_delete_table] + print("Deleting the {} table.".format(table_id)) + table.delete() + # [END bigtable_hw_delete_table] if __name__ == "__main__": diff --git a/packages/google-cloud-bigtable/samples/hello/main_test.py b/packages/google-cloud-bigtable/samples/hello/main_test.py index 641b34d11e5f..28814d909d2c 100644 --- a/packages/google-cloud-bigtable/samples/hello/main_test.py +++ b/packages/google-cloud-bigtable/samples/hello/main_test.py @@ -13,26 +13,23 @@ # limitations under the License. import os -import random +import uuid -from main import main +from .main import main PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_NAME_FORMAT = "hello-world-test-{}" -TABLE_NAME_RANGE = 10000 +TABLE_ID = f"hello-world-test-{str(uuid.uuid4())[:16]}" def test_main(capsys): - table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) - - main(PROJECT, BIGTABLE_INSTANCE, table_name) + main(PROJECT, BIGTABLE_INSTANCE, TABLE_ID) out, _ = capsys.readouterr() - assert "Creating the {} table.".format(table_name) in out + assert "Creating the {} table.".format(TABLE_ID) in out assert "Writing some greetings to the table." in out assert "Getting a single greeting by row key." in out assert "Hello World!" in out assert "Scanning for all greetings" in out assert "Hello Cloud Bigtable!" in out - assert "Deleting the {} table.".format(table_name) in out + assert "Deleting the {} table.".format(TABLE_ID) in out diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/__init__.py b/packages/google-cloud-bigtable/samples/hello_happybase/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/main.py b/packages/google-cloud-bigtable/samples/hello_happybase/main.py index 7999fd0064e4..50820febde8b 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/main.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/main.py @@ -25,6 +25,7 @@ """ import argparse +from ..utils import wait_for_table # [START bigtable_hw_imports_happybase] from google.cloud import bigtable @@ -51,6 +52,8 @@ def main(project_id, instance_id, table_name): ) # [END bigtable_hw_create_table_happybase] + wait_for_table(instance.table(table_name)) + # [START bigtable_hw_write_rows_happybase] print("Writing some greetings to the table.") table = connection.table(table_name) @@ -90,12 +93,11 @@ def main(project_id, instance_id, table_name): print("\t{}: {}".format(key, row[column_name.encode("utf-8")])) # [END bigtable_hw_scan_all_happybase] + finally: # [START bigtable_hw_delete_table_happybase] print("Deleting the {} table.".format(table_name)) connection.delete_table(table_name) # [END bigtable_hw_delete_table_happybase] - - finally: connection.close() diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py index 6a63750da95b..252f4ccaf9e7 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/main_test.py @@ -13,25 +13,32 @@ # limitations under the License. import os -import random +import uuid -from main import main +from .main import main +from google.cloud import bigtable PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_NAME_FORMAT = "hello-world-hb-test-{}" -TABLE_NAME_RANGE = 10000 +TABLE_ID = f"hello-world-hb-test-{str(uuid.uuid4())[:16]}" def test_main(capsys): - table_name = TABLE_NAME_FORMAT.format(random.randrange(TABLE_NAME_RANGE)) - main(PROJECT, BIGTABLE_INSTANCE, table_name) + try: + main(PROJECT, BIGTABLE_INSTANCE, TABLE_ID) - out, _ = capsys.readouterr() - assert "Creating the {} table.".format(table_name) in out - assert "Writing some greetings to the table." in out - assert "Getting a single greeting by row key." in out - assert "Hello World!" in out - assert "Scanning for all greetings" in out - assert "Hello Cloud Bigtable!" in out - assert "Deleting the {} table.".format(table_name) in out + out, _ = capsys.readouterr() + assert "Creating the {} table.".format(TABLE_ID) in out + assert "Writing some greetings to the table." in out + assert "Getting a single greeting by row key." in out + assert "Hello World!" in out + assert "Scanning for all greetings" in out + assert "Hello Cloud Bigtable!" in out + assert "Deleting the {} table.".format(TABLE_ID) in out + finally: + # delete table + client = bigtable.Client(PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(TABLE_ID) + if table.exists(): + table.delete() diff --git a/packages/google-cloud-bigtable/samples/quickstart/__init__.py b/packages/google-cloud-bigtable/samples/quickstart/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py index 841cfc18025c..0749cbd316a3 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart/main_async_test.py @@ -13,46 +13,26 @@ # limitations under the License. import os +import uuid from typing import AsyncGenerator from google.cloud.bigtable.data import BigtableDataClientAsync, SetCell import pytest import pytest_asyncio -from main_async import main - +from .main_async import main +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_FORMAT = "quickstart-test-{}" +TABLE_ID = f"quickstart-async-test-{str(uuid.uuid4())[:16]}" @pytest_asyncio.fixture async def table_id() -> AsyncGenerator[str, None]: - table_id = _create_table() - await _populate_table(table_id) - - yield table_id - - _delete_table(table_id) - - -def _create_table(): - from google.cloud import bigtable - import uuid - - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"cf1": None}) - - client.close() - return table_id + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"cf1": None}): + await _populate_table(TABLE_ID) + yield TABLE_ID async def _populate_table(table_id: str): @@ -61,16 +41,6 @@ async def _populate_table(table_id: str): await table.mutate_row("r1", SetCell("cf1", "c1", "test-value")) -def _delete_table(table_id: str): - from google.cloud import bigtable - - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) - table.delete() - client.close() - - @pytest.mark.asyncio async def test_main(capsys, table_id): await main(PROJECT, BIGTABLE_INSTANCE, table_id) diff --git a/packages/google-cloud-bigtable/samples/quickstart/main_test.py b/packages/google-cloud-bigtable/samples/quickstart/main_test.py index 46d578b6b93c..f58161f231b1 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart/main_test.py @@ -14,35 +14,28 @@ import os import uuid - -from google.cloud import bigtable import pytest -from main import main +from .main import main + +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_FORMAT = "quickstart-test-{}" +TABLE_ID = f"quickstart-test-{str(uuid.uuid4())[:16]}" @pytest.fixture() def table(): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) column_family_id = "cf1" column_families = {column_family_id: None} - table.create(column_families=column_families) - - row = table.direct_row("r1") - row.set_cell(column_family_id, "c1", "test-value") - row.commit() - - yield table_id + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, column_families) as table: + row = table.direct_row("r1") + row.set_cell(column_family_id, "c1", "test-value") + row.commit() - table.delete() + yield TABLE_ID def test_main(capsys, table): diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/__init__.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py index dc62ebede8dd..343ec800a96d 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/main_test.py @@ -14,35 +14,26 @@ import os import uuid - -from google.cloud import bigtable import pytest -from main import main - +from .main import main +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_FORMAT = "quickstart-hb-test-{}" +TABLE_ID = f"quickstart-hb-test-{str(uuid.uuid4())[:16]}" @pytest.fixture() def table(): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) column_family_id = "cf1" column_families = {column_family_id: None} - table.create(column_families=column_families) - - row = table.direct_row("r1") - row.set_cell(column_family_id, "c1", "test-value") - row.commit() - - yield table_id + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, column_families) as table: + row = table.direct_row("r1") + row.set_cell(column_family_id, "c1", "test-value") + row.commit() - table.delete() + yield TABLE_ID def test_main(capsys, table): diff --git a/packages/google-cloud-bigtable/samples/snippets/__init__.py b/packages/google-cloud-bigtable/samples/snippets/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/__init__.py b/packages/google-cloud-bigtable/samples/snippets/data_client/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py index 2e0fb9b8153c..8dfff50d135b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/data_client_snippets_async_test.py @@ -12,36 +12,22 @@ # limitations under the License. import pytest import pytest_asyncio -import uuid import os +import uuid -import data_client_snippets_async as data_snippets +from . import data_client_snippets_async as data_snippets +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_STATIC = os.getenv( - "BIGTABLE_TABLE", None -) # if not set, a temproary table will be generated +TABLE_ID = f"data-client-{str(uuid.uuid4())[:16]}" @pytest.fixture(scope="session") def table_id(): - from google.cloud import bigtable - - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table_id = TABLE_ID_STATIC or f"data-client-{str(uuid.uuid4())[:16]}" - - admin_table = instance.table(table_id) - if not admin_table.exists(): - admin_table.create(column_families={"family": None, "stats_summary": None}) - - yield table_id - - if not table_id == TABLE_ID_STATIC: - # clean up table when finished - admin_table.delete() + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"family": None, "stats_summary": None}): + yield TABLE_ID @pytest_asyncio.fixture diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/__init__.py b/packages/google-cloud-bigtable/samples/snippets/deletes/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py index b708bd52e3fe..9408a832037c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py @@ -15,52 +15,26 @@ import datetime import os +import uuid from typing import AsyncGenerator from google.cloud._helpers import _microseconds_from_datetime import pytest import pytest_asyncio -import deletes_snippets_async +from . import deletes_snippets_async +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-deletes-async-{str(uuid.uuid4())[:16]}" -@pytest_asyncio.fixture +@pytest_asyncio.fixture(scope="module", autouse=True) async def table_id() -> AsyncGenerator[str, None]: - table_id = _create_table() - await _populate_table(table_id) - yield table_id - _delete_table(table_id) - - -def _create_table(): - from google.cloud import bigtable - import uuid - - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None, "cell_plan": None}) - client.close() - return table_id - - -def _delete_table(table_id: str): - from google.cloud import bigtable - - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) - table.delete() - client.close() + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}, verbose=False): + await _populate_table(TABLE_ID) + yield TABLE_ID async def _populate_table(table_id): diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py index bebaabafb767..3284c37da739 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_test.py @@ -18,81 +18,72 @@ import time import uuid -from google.cloud import bigtable import pytest -import deletes_snippets +from . import deletes_snippets +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-deletes-{str(uuid.uuid4())[:16]}" -@pytest.fixture(scope="module", autouse=True) +@pytest.fixture(scope="module") def table_id(): from google.cloud.bigtable.row_set import RowSet - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None, "cell_plan": None}) - - timestamp = datetime.datetime(2019, 5, 1) - timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) - - row_keys = [ - "phone#4c410523#20190501", - "phone#4c410523#20190502", - "phone#4c410523#20190505", - "phone#5c10102#20190501", - "phone#5c10102#20190502", - ] - - rows = [table.direct_row(row_key) for row_key in row_keys] - - rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) - rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) - rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) - rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) - rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) - rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) - rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) - rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - - table.mutate_rows(rows) - - # Ensure mutations have propagated. - row_set = RowSet() - - for row_key in row_keys: - row_set.add_row_key(row_key) - - fetched = list(table.read_rows(row_set=row_set)) - - while len(fetched) < len(rows): - time.sleep(5) + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}, verbose=False) as table: + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) + + row_keys = [ + "phone#4c410523#20190501", + "phone#4c410523#20190502", + "phone#4c410523#20190505", + "phone#5c10102#20190501", + "phone#5c10102#20190502", + ] + + rows = [table.direct_row(row_key) for row_key in row_keys] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) + rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) + rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + + table.mutate_rows(rows) + + # Ensure mutations have propagated. + row_set = RowSet() + + for row_key in row_keys: + row_set.add_row_key(row_key) + fetched = list(table.read_rows(row_set=row_set)) - yield table_id + while len(fetched) < len(rows): + time.sleep(5) + fetched = list(table.read_rows(row_set=row_set)) + + yield TABLE_ID def assert_output_match(capsys, expected): @@ -135,6 +126,8 @@ def test_delete_column_family(capsys, table_id): assert_output_match(capsys, "") -def test_delete_table(capsys, table_id): - deletes_snippets.delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) - assert_output_match(capsys, "") +def test_delete_table(capsys): + delete_table_id = f"to-delete-table-{str(uuid.uuid4())[:16]}" + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, delete_table_id, verbose=False): + deletes_snippets.delete_table(PROJECT, BIGTABLE_INSTANCE, delete_table_id) + assert_output_match(capsys, "") diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py index 76751feafc61..124db8157906 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py @@ -14,6 +14,7 @@ import datetime import os +import uuid import inspect from typing import AsyncGenerator @@ -23,46 +24,21 @@ from .snapshots.snap_filters_test import snapshots from . import filter_snippets_async +from ...utils import create_table_cm from google.cloud._helpers import ( _microseconds_from_datetime, ) PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-filters-async-{str(uuid.uuid4())[:16]}" -@pytest_asyncio.fixture +@pytest_asyncio.fixture(scope="module", autouse=True) async def table_id() -> AsyncGenerator[str, None]: - table_id = _create_table() - await _populate_table(table_id) - yield table_id - _delete_table(table_id) - - -def _create_table(): - from google.cloud import bigtable - import uuid - - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None, "cell_plan": None}) - return table_id - - -def _delete_table(table_id: str): - from google.cloud import bigtable - - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) - table.delete() + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}): + await _populate_table(TABLE_ID) + yield TABLE_ID async def _populate_table(table_id): diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py index a849320395aa..fe99886bdb0c 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filters_test.py @@ -18,84 +18,75 @@ import time import uuid -from google.cloud import bigtable import pytest from . import filter_snippets from .snapshots.snap_filters_test import snapshots +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-filters-{str(uuid.uuid4())[:16]}" @pytest.fixture(scope="module", autouse=True) def table_id(): from google.cloud.bigtable.row_set import RowSet - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None, "cell_plan": None}) - - timestamp = datetime.datetime(2019, 5, 1) - timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) - - row_keys = [ - "phone#4c410523#20190501", - "phone#4c410523#20190502", - "phone#4c410523#20190505", - "phone#5c10102#20190501", - "phone#5c10102#20190502", - ] - - rows = [table.direct_row(row_key) for row_key in row_keys] - - rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) - rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) - rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) - rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) - rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) - rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) - rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) - rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) - rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) - - table.mutate_rows(rows) - - # Ensure mutations have propagated. - row_set = RowSet() - - for row_key in row_keys: - row_set.add_row_key(row_key) - - fetched = list(table.read_rows(row_set=row_set)) - - while len(fetched) < len(rows): - time.sleep(5) + table_id = TABLE_ID + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, table_id, {"stats_summary": None, "cell_plan": None}) as table: + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = datetime.datetime(2019, 5, 1) - datetime.timedelta(hours=1) + + row_keys = [ + "phone#4c410523#20190501", + "phone#4c410523#20190502", + "phone#4c410523#20190505", + "phone#5c10102#20190501", + "phone#5c10102#20190502", + ] + + rows = [table.direct_row(row_key) for row_key in row_keys] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[0].set_cell("cell_plan", "data_plan_01gb", "true", timestamp_minus_hr) + rows[0].set_cell("cell_plan", "data_plan_01gb", "false", timestamp) + rows[0].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[1].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[2].set_cell("cell_plan", "data_plan_05gb", "true", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[3].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[4].set_cell("cell_plan", "data_plan_10gb", "true", timestamp) + + table.mutate_rows(rows) + + # Ensure mutations have propagated. + row_set = RowSet() + + for row_key in row_keys: + row_set.add_row_key(row_key) + fetched = list(table.read_rows(row_set=row_set)) - yield table_id + while len(fetched) < len(rows): + time.sleep(5) + fetched = list(table.read_rows(row_set=row_set)) - table.delete() + yield table_id def test_filter_limit_row_sample(capsys, table_id): diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py index da826d6fb347..0078ce5981af 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/reads_test.py @@ -13,65 +13,52 @@ import datetime import os -import uuid import inspect +import uuid -from google.cloud import bigtable import pytest from .snapshots.snap_reads_test import snapshots from . import read_snippets +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" +TABLE_ID = f"mobile-time-series-reads-{str(uuid.uuid4())[:16]}" @pytest.fixture(scope="module", autouse=True) def table_id(): - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() - - table.create(column_families={"stats_summary": None}) - - # table = instance.table(table_id) - - timestamp = datetime.datetime(2019, 5, 1) - rows = [ - table.direct_row("phone#4c410523#20190501"), - table.direct_row("phone#4c410523#20190502"), - table.direct_row("phone#4c410523#20190505"), - table.direct_row("phone#5c10102#20190501"), - table.direct_row("phone#5c10102#20190502"), - ] - - rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) - rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) - rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) - rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) - rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) - rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) - rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) - rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) - - table.mutate_rows(rows) - - yield table_id - - table.delete() + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None}) as table: + timestamp = datetime.datetime(2019, 5, 1) + rows = [ + table.direct_row("phone#4c410523#20190501"), + table.direct_row("phone#4c410523#20190502"), + table.direct_row("phone#4c410523#20190505"), + table.direct_row("phone#5c10102#20190501"), + table.direct_row("phone#5c10102#20190502"), + ] + + rows[0].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[0].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[0].set_cell("stats_summary", "os_build", "PQ2A.190405.003", timestamp) + rows[1].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[1].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[1].set_cell("stats_summary", "os_build", "PQ2A.190405.004", timestamp) + rows[2].set_cell("stats_summary", "connected_cell", 0, timestamp) + rows[2].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[2].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + rows[3].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[3].set_cell("stats_summary", "connected_wifi", 1, timestamp) + rows[3].set_cell("stats_summary", "os_build", "PQ2A.190401.002", timestamp) + rows[4].set_cell("stats_summary", "connected_cell", 1, timestamp) + rows[4].set_cell("stats_summary", "connected_wifi", 0, timestamp) + rows[4].set_cell("stats_summary", "os_build", "PQ2A.190406.000", timestamp) + + table.mutate_rows(rows) + + yield TABLE_ID def test_read_row(capsys, table_id): diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py index 77ae883d609e..2c7a3d62b162 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/writes_test.py @@ -13,48 +13,27 @@ # limitations under the License. import os -import uuid import backoff from google.api_core.exceptions import DeadlineExceeded -from google.cloud import bigtable import pytest +import uuid from .write_batch import write_batch from .write_conditionally import write_conditional from .write_increment import write_increment from .write_simple import write_simple - +from ...utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_PREFIX = "mobile-time-series-{}" - - -@pytest.fixture -def bigtable_client(): - return bigtable.Client(project=PROJECT, admin=True) +TABLE_ID = f"mobile-time-series-writes-{str(uuid.uuid4())[:16]}" @pytest.fixture -def bigtable_instance(bigtable_client): - return bigtable_client.instance(BIGTABLE_INSTANCE) - - -@pytest.fixture -def table_id(bigtable_instance): - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = bigtable_instance.table(table_id) - if table.exists(): - table.delete() - - column_family_id = "stats_summary" - column_families = {column_family_id: None} - table.create(column_families=column_families) - - yield table_id - - table.delete() +def table_id(): + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None}): + yield TABLE_ID def test_writes(capsys, table_id): diff --git a/packages/google-cloud-bigtable/samples/tableadmin/__init__.py b/packages/google-cloud-bigtable/samples/tableadmin/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py index 7c28601fb075..ad00e57887c3 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin.py @@ -35,36 +35,7 @@ from google.cloud import bigtable from google.cloud.bigtable import column_family - - -def create_table(project_id, instance_id, table_id): - """Create a Bigtable table - - :type project_id: str - :param project_id: Project id of the client. - - :type instance_id: str - :param instance_id: Instance of the client. - - :type table_id: str - :param table_id: Table id to create table. - """ - - client = bigtable.Client(project=project_id, admin=True) - instance = client.instance(instance_id) - table = instance.table(table_id) - - # Check whether table exists in an instance. - # Create table if it does not exists. - print("Checking if table {} exists...".format(table_id)) - if table.exists(): - print("Table {} already exists.".format(table_id)) - else: - print("Creating the {} table.".format(table_id)) - table.create() - print("Created table {}.".format(table_id)) - - return client, instance, table +from ..utils import create_table_cm def run_table_operations(project_id, instance_id, table_id): @@ -80,154 +51,155 @@ def run_table_operations(project_id, instance_id, table_id): :param table_id: Table id to create table. """ - client, instance, table = create_table(project_id, instance_id, table_id) + client = bigtable.Client(project=project_id, admin=True) + instance = client.instance(instance_id) + with create_table_cm(project_id, instance_id, table_id, verbose=False) as table: + # [START bigtable_list_tables] + tables = instance.list_tables() + print("Listing tables in current project...") + if tables != []: + for tbl in tables: + print(tbl.table_id) + else: + print("No table exists in current project...") + # [END bigtable_list_tables] + + # [START bigtable_create_family_gc_max_age] + print("Creating column family cf1 with with MaxAge GC Rule...") + # Create a column family with GC policy : maximum age + # where age = current time minus cell timestamp + + # Define the GC rule to retain data with max age of 5 days + max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) + + column_family1 = table.column_family("cf1", max_age_rule) + column_family1.create() + print("Created column family cf1 with MaxAge GC Rule.") + # [END bigtable_create_family_gc_max_age] + + # [START bigtable_create_family_gc_max_versions] + print("Creating column family cf2 with max versions GC rule...") + # Create a column family with GC policy : most recent N versions + # where 1 = most recent version + + # Define the GC policy to retain only the most recent 2 versions + max_versions_rule = column_family.MaxVersionsGCRule(2) + + column_family2 = table.column_family("cf2", max_versions_rule) + column_family2.create() + print("Created column family cf2 with Max Versions GC Rule.") + # [END bigtable_create_family_gc_max_versions] + + # [START bigtable_create_family_gc_union] + print("Creating column family cf3 with union GC rule...") + # Create a column family with GC policy to drop data that matches + # at least one condition. + # Define a GC rule to drop cells older than 5 days or not the + # most recent version + union_rule = column_family.GCRuleUnion( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2), + ] + ) - # [START bigtable_list_tables] - tables = instance.list_tables() - print("Listing tables in current project...") - if tables != []: - for tbl in tables: - print(tbl.table_id) - else: - print("No table exists in current project...") - # [END bigtable_list_tables] - - # [START bigtable_create_family_gc_max_age] - print("Creating column family cf1 with with MaxAge GC Rule...") - # Create a column family with GC policy : maximum age - # where age = current time minus cell timestamp - - # Define the GC rule to retain data with max age of 5 days - max_age_rule = column_family.MaxAgeGCRule(datetime.timedelta(days=5)) - - column_family1 = table.column_family("cf1", max_age_rule) - column_family1.create() - print("Created column family cf1 with MaxAge GC Rule.") - # [END bigtable_create_family_gc_max_age] - - # [START bigtable_create_family_gc_max_versions] - print("Creating column family cf2 with max versions GC rule...") - # Create a column family with GC policy : most recent N versions - # where 1 = most recent version - - # Define the GC policy to retain only the most recent 2 versions - max_versions_rule = column_family.MaxVersionsGCRule(2) - - column_family2 = table.column_family("cf2", max_versions_rule) - column_family2.create() - print("Created column family cf2 with Max Versions GC Rule.") - # [END bigtable_create_family_gc_max_versions] - - # [START bigtable_create_family_gc_union] - print("Creating column family cf3 with union GC rule...") - # Create a column family with GC policy to drop data that matches - # at least one condition. - # Define a GC rule to drop cells older than 5 days or not the - # most recent version - union_rule = column_family.GCRuleUnion( - [ - column_family.MaxAgeGCRule(datetime.timedelta(days=5)), - column_family.MaxVersionsGCRule(2), - ] - ) + column_family3 = table.column_family("cf3", union_rule) + column_family3.create() + print("Created column family cf3 with Union GC rule") + # [END bigtable_create_family_gc_union] + + # [START bigtable_create_family_gc_intersection] + print("Creating column family cf4 with Intersection GC rule...") + # Create a column family with GC policy to drop data that matches + # all conditions + # GC rule: Drop cells older than 5 days AND older than the most + # recent 2 versions + intersection_rule = column_family.GCRuleIntersection( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=5)), + column_family.MaxVersionsGCRule(2), + ] + ) - column_family3 = table.column_family("cf3", union_rule) - column_family3.create() - print("Created column family cf3 with Union GC rule") - # [END bigtable_create_family_gc_union] - - # [START bigtable_create_family_gc_intersection] - print("Creating column family cf4 with Intersection GC rule...") - # Create a column family with GC policy to drop data that matches - # all conditions - # GC rule: Drop cells older than 5 days AND older than the most - # recent 2 versions - intersection_rule = column_family.GCRuleIntersection( - [ - column_family.MaxAgeGCRule(datetime.timedelta(days=5)), - column_family.MaxVersionsGCRule(2), - ] - ) + column_family4 = table.column_family("cf4", intersection_rule) + column_family4.create() + print("Created column family cf4 with Intersection GC rule.") + # [END bigtable_create_family_gc_intersection] + + # [START bigtable_create_family_gc_nested] + print("Creating column family cf5 with a Nested GC rule...") + # Create a column family with nested GC policies. + # Create a nested GC rule: + # Drop cells that are either older than the 10 recent versions + # OR + # Drop cells that are older than a month AND older than the + # 2 recent versions + rule1 = column_family.MaxVersionsGCRule(10) + rule2 = column_family.GCRuleIntersection( + [ + column_family.MaxAgeGCRule(datetime.timedelta(days=30)), + column_family.MaxVersionsGCRule(2), + ] + ) - column_family4 = table.column_family("cf4", intersection_rule) - column_family4.create() - print("Created column family cf4 with Intersection GC rule.") - # [END bigtable_create_family_gc_intersection] - - # [START bigtable_create_family_gc_nested] - print("Creating column family cf5 with a Nested GC rule...") - # Create a column family with nested GC policies. - # Create a nested GC rule: - # Drop cells that are either older than the 10 recent versions - # OR - # Drop cells that are older than a month AND older than the - # 2 recent versions - rule1 = column_family.MaxVersionsGCRule(10) - rule2 = column_family.GCRuleIntersection( - [ - column_family.MaxAgeGCRule(datetime.timedelta(days=30)), - column_family.MaxVersionsGCRule(2), - ] - ) + nested_rule = column_family.GCRuleUnion([rule1, rule2]) + + column_family5 = table.column_family("cf5", nested_rule) + column_family5.create() + print("Created column family cf5 with a Nested GC rule.") + # [END bigtable_create_family_gc_nested] + + # [START bigtable_list_column_families] + print("Printing Column Family and GC Rule for all column families...") + column_families = table.list_column_families() + for column_family_name, gc_rule in sorted(column_families.items()): + print("Column Family:", column_family_name) + print("GC Rule:") + print(gc_rule.to_pb()) + # Sample output: + # Column Family: cf4 + # GC Rule: + # gc_rule { + # intersection { + # rules { + # max_age { + # seconds: 432000 + # } + # } + # rules { + # max_num_versions: 2 + # } + # } + # } + # [END bigtable_list_column_families] + + print("Print column family cf1 GC rule before update...") + print("Column Family: cf1") + print(column_family1.to_pb()) + + # [START bigtable_update_gc_rule] + print("Updating column family cf1 GC rule...") + # Update the column family cf1 to update the GC rule + column_family1 = table.column_family("cf1", column_family.MaxVersionsGCRule(1)) + column_family1.update() + print("Updated column family cf1 GC rule\n") + # [END bigtable_update_gc_rule] + + print("Print column family cf1 GC rule after update...") + print("Column Family: cf1") + print(column_family1.to_pb()) + + # [START bigtable_delete_family] + print("Delete a column family cf2...") + # Delete a column family + column_family2.delete() + print("Column family cf2 deleted successfully.") + # [END bigtable_delete_family] - nested_rule = column_family.GCRuleUnion([rule1, rule2]) - - column_family5 = table.column_family("cf5", nested_rule) - column_family5.create() - print("Created column family cf5 with a Nested GC rule.") - # [END bigtable_create_family_gc_nested] - - # [START bigtable_list_column_families] - print("Printing Column Family and GC Rule for all column families...") - column_families = table.list_column_families() - for column_family_name, gc_rule in sorted(column_families.items()): - print("Column Family:", column_family_name) - print("GC Rule:") - print(gc_rule.to_pb()) - # Sample output: - # Column Family: cf4 - # GC Rule: - # gc_rule { - # intersection { - # rules { - # max_age { - # seconds: 432000 - # } - # } - # rules { - # max_num_versions: 2 - # } - # } - # } - # [END bigtable_list_column_families] - - print("Print column family cf1 GC rule before update...") - print("Column Family: cf1") - print(column_family1.to_pb()) - - # [START bigtable_update_gc_rule] - print("Updating column family cf1 GC rule...") - # Update the column family cf1 to update the GC rule - column_family1 = table.column_family("cf1", column_family.MaxVersionsGCRule(1)) - column_family1.update() - print("Updated column family cf1 GC rule\n") - # [END bigtable_update_gc_rule] - - print("Print column family cf1 GC rule after update...") - print("Column Family: cf1") - print(column_family1.to_pb()) - - # [START bigtable_delete_family] - print("Delete a column family cf2...") - # Delete a column family - column_family2.delete() - print("Column family cf2 deleted successfully.") - # [END bigtable_delete_family] - - print( - 'execute command "python tableadmin.py delete [project_id] \ - [instance_id] --table [tableName]" to delete the table.' - ) + print( + 'execute command "python tableadmin.py delete [project_id] \ + [instance_id] --table [tableName]" to delete the table.' + ) def delete_table(project_id, instance_id, table_id): diff --git a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py index 3063eee9fb06..0ffdc75c9066 100755 --- a/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/tableadmin_test.py @@ -14,29 +14,25 @@ # limitations under the License. import os -import uuid - -from google.api_core import exceptions from test_utils.retry import RetryErrors +from google.api_core import exceptions +import uuid -from tableadmin import create_table -from tableadmin import delete_table -from tableadmin import run_table_operations +from .tableadmin import delete_table +from .tableadmin import run_table_operations +from ..utils import create_table_cm PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] -TABLE_ID_FORMAT = "tableadmin-test-{}" +TABLE_ID = f"tableadmin-test-{str(uuid.uuid4())[:16]}" retry_429_503 = RetryErrors(exceptions.TooManyRequests, exceptions.ServiceUnavailable) def test_run_table_operations(capsys): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - - retry_429_503(run_table_operations)(PROJECT, BIGTABLE_INSTANCE, table_id) + retry_429_503(run_table_operations)(PROJECT, BIGTABLE_INSTANCE, TABLE_ID) out, _ = capsys.readouterr() - assert "Creating the " + table_id + " table." in out assert "Listing tables in current project." in out assert "Creating column family cf1 with with MaxAge GC Rule" in out assert "Created column family cf1 with MaxAge GC Rule." in out @@ -53,14 +49,11 @@ def test_run_table_operations(capsys): assert "Delete a column family cf2..." in out assert "Column family cf2 deleted successfully." in out - retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) - def test_delete_table(capsys): - table_id = TABLE_ID_FORMAT.format(uuid.uuid4().hex[:8]) - retry_429_503(create_table)(PROJECT, BIGTABLE_INSTANCE, table_id) - - retry_429_503(delete_table)(PROJECT, BIGTABLE_INSTANCE, table_id) + table_id = f"table-admin-to-delete-{str(uuid.uuid4())[:16]}" + with create_table_cm(PROJECT, BIGTABLE_INSTANCE, table_id, verbose=False): + delete_table(PROJECT, BIGTABLE_INSTANCE, table_id) out, _ = capsys.readouterr() assert "Table " + table_id + " exists." in out diff --git a/packages/google-cloud-bigtable/samples/utils.py b/packages/google-cloud-bigtable/samples/utils.py new file mode 100644 index 000000000000..eb0ca68f9af3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/utils.py @@ -0,0 +1,87 @@ +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Provides helper logic used across samples +""" + + +from google.cloud import bigtable +from google.api_core import exceptions +from google.api_core.retry import Retry +from google.api_core.retry import if_exception_type + +delete_retry = Retry(if_exception_type(exceptions.TooManyRequests, exceptions.ServiceUnavailable)) + +class create_table_cm: + """ + Create a new table using a context manager, to ensure that table.delete() is called to clean up + the table, even if an exception is thrown + """ + def __init__(self, *args, verbose=True, **kwargs): + self._args = args + self._kwargs = kwargs + self._verbose = verbose + + def __enter__(self): + self._table = create_table(*self._args, **self._kwargs) + if self._verbose: + print(f"created table: {self._table.table_id}") + return self._table + + def __exit__(self, *args): + if self._table.exists(): + if self._verbose: + print(f"deleting table: {self._table.table_id}") + delete_retry(self._table.delete()) + else: + if self._verbose: + print(f"table {self._table.table_id} not found") + + +def create_table(project, instance_id, table_id, column_families={}): + """ + Creates a new table, and blocks until it reaches a ready state + """ + client = bigtable.Client(project=project, admin=True) + instance = client.instance(instance_id) + + table = instance.table(table_id) + if table.exists(): + table.delete() + + kwargs = {} + if column_families: + kwargs["column_families"] = column_families + table.create(**kwargs) + + wait_for_table(table) + + return table + +@Retry( + on_error=if_exception_type( + exceptions.PreconditionFailed, + exceptions.FailedPrecondition, + exceptions.NotFound, + ), + timeout=120, +) +def wait_for_table(table): + """ + raises an exception if the table does not exist or is not ready to use + + Because this method is wrapped with an api_core.Retry decorator, it will + retry with backoff if the table is not ready + """ + if not table.exists(): + raise exceptions.NotFound \ No newline at end of file From 7e391c6070976816674c67af6c087f5514a86c9e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 13:45:26 -0800 Subject: [PATCH 832/892] chore(python): enable checks for python 3.13 (#1039) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(python): remove obsolete release scripts and config files Source-Link: https://github.com/googleapis/synthtool/commit/635751753776b1a7cabd4dcaa48013a96274372d Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:91d0075c6f2fd6a073a06168feee19fa2a8507692f2519a1dc7de3366d157e99 * added 3.13 to noxfile * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/release-trigger.yml | 1 + .../.github/workflows/unittest.yml | 2 +- .../.kokoro/docker/docs/requirements.txt | 42 +++++++++---------- .../.kokoro/docs/common.cfg | 2 +- .../.kokoro/samples/python3.13/common.cfg | 40 ++++++++++++++++++ .../.kokoro/samples/python3.13/continuous.cfg | 6 +++ .../samples/python3.13/periodic-head.cfg | 11 +++++ .../.kokoro/samples/python3.13/periodic.cfg | 6 +++ .../.kokoro/samples/python3.13/presubmit.cfg | 6 +++ .../.kokoro/test-samples-impl.sh | 3 +- .../google-cloud-bigtable/CONTRIBUTING.rst | 6 ++- packages/google-cloud-bigtable/noxfile.py | 14 +++++-- .../samples/beam/noxfile.py | 2 +- .../samples/hello/noxfile.py | 2 +- .../samples/hello_happybase/noxfile.py | 2 +- .../samples/instanceadmin/noxfile.py | 2 +- .../samples/metricscaler/noxfile.py | 2 +- .../samples/quickstart/noxfile.py | 2 +- .../samples/quickstart_happybase/noxfile.py | 2 +- .../samples/snippets/data_client/noxfile.py | 2 +- .../samples/snippets/deletes/noxfile.py | 2 +- .../samples/snippets/filters/noxfile.py | 2 +- .../samples/snippets/reads/noxfile.py | 2 +- .../samples/snippets/writes/noxfile.py | 2 +- .../samples/tableadmin/noxfile.py | 2 +- .../testing/constraints-3.13.txt | 0 27 files changed, 125 insertions(+), 44 deletions(-) create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.13.txt diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 597e0c3261ca..b2770d4e0379 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:e8dcfd7cbfd8beac3a3ff8d3f3185287ea0625d859168cc80faccfc9a7a00455 -# created: 2024-09-16T21:04:09.091105552Z + digest: sha256:91d0075c6f2fd6a073a06168feee19fa2a8507692f2519a1dc7de3366d157e99 +# created: 2024-11-11T16:13:09.302418532Z diff --git a/packages/google-cloud-bigtable/.github/release-trigger.yml b/packages/google-cloud-bigtable/.github/release-trigger.yml index d4ca94189e16..0bbdd8e4cabb 100644 --- a/packages/google-cloud-bigtable/.github/release-trigger.yml +++ b/packages/google-cloud-bigtable/.github/release-trigger.yml @@ -1 +1,2 @@ enabled: true +multiScmName: python-bigtable diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 04ade4f43f9f..6eca3149c126 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] + python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout uses: actions/checkout@v4 diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index 7129c7715594..66eacc82f041 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -4,39 +4,39 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.4.0 \ - --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ - --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f +argcomplete==3.5.1 \ + --hash=sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363 \ + --hash=sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4 # via nox colorlog==6.8.2 \ --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 # via nox -distlib==0.3.8 \ - --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ - --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 +distlib==0.3.9 \ + --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ + --hash=sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403 # via virtualenv -filelock==3.15.4 \ - --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ - --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 +filelock==3.16.1 \ + --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ + --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 # via virtualenv -nox==2024.4.15 \ - --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ - --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f +nox==2024.10.9 \ + --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ + --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 # via -r requirements.in packaging==24.1 \ --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via nox -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.3.6 \ + --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ + --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via virtualenv -tomli==2.0.1 \ - --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ - --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f +tomli==2.0.2 \ + --hash=sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38 \ + --hash=sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed # via nox -virtualenv==20.26.3 \ - --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ - --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 +virtualenv==20.26.6 \ + --hash=sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48 \ + --hash=sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2 # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg index 9b8937c571bb..5646c98aaa6f 100644 --- a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg @@ -63,4 +63,4 @@ before_action { keyname: "docuploader_service_account" } } -} \ No newline at end of file +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg new file mode 100644 index 000000000000..15ba807cb6cd --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.13" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-313" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg new file mode 100644 index 000000000000..be25a34f9ad3 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg new file mode 100644 index 000000000000..71cd1e597e38 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh index 55910c8ba178..53e365bc4e79 100755 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh +++ b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh @@ -33,7 +33,8 @@ export PYTHONUNBUFFERED=1 env | grep KOKORO # Install nox -python3.9 -m pip install --upgrade --quiet nox +# `virtualenv==20.26.6` is added for Python 3.7 compatibility +python3.9 -m pip install --upgrade --quiet nox virtualenv==20.26.6 # Use secrets acessor service account to get secrets if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 947c129b765e..985538f489d5 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows. + 3.7, 3.8, 3.9, 3.10, 3.11, 3.12 and 3.13 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.12 -- -k + $ nox -s unit-3.13 -- -k .. note:: @@ -227,6 +227,7 @@ We support: - `Python 3.10`_ - `Python 3.11`_ - `Python 3.12`_ +- `Python 3.13`_ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ @@ -234,6 +235,7 @@ We support: .. _Python 3.10: https://docs.python.org/3.10/ .. _Python 3.11: https://docs.python.org/3.11/ .. _Python 3.12: https://docs.python.org/3.12/ +.. _Python 3.13: https://docs.python.org/3.13/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 1e153efe2286..9fbc22d3aa79 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -34,7 +34,15 @@ DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +UNIT_TEST_PYTHON_VERSIONS: List[str] = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", + "3.12", + "3.13", +] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", @@ -195,7 +203,7 @@ def install_unittest_dependencies(session, *constraints): def unit(session, protobuf_implementation): # Install all test dependencies, then install this package in-place. - if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12"): + if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12", "3.13"): session.skip("cpp implementation is not supported in python 3.11+") constraints_path = str( @@ -451,7 +459,7 @@ def docfx(session): def prerelease_deps(session, protobuf_implementation): """Run all tests with prerelease versions of dependencies installed.""" - if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12"): + if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12", "3.13"): session.skip("cpp implementation is not supported in python 3.11+") # Install all dependencies diff --git a/packages/google-cloud-bigtable/samples/beam/noxfile.py b/packages/google-cloud-bigtable/samples/beam/noxfile.py index 80ffdb178317..d0b343a9167c 100644 --- a/packages/google-cloud-bigtable/samples/beam/noxfile.py +++ b/packages/google-cloud-bigtable/samples/beam/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello/noxfile.py b/packages/google-cloud-bigtable/samples/hello/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/hello/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/hello_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/instanceadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py +++ b/packages/google-cloud-bigtable/samples/metricscaler/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/reads/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py +++ b/packages/google-cloud-bigtable/samples/snippets/writes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py index 483b55901791..a169b5b5b464 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py +++ b/packages/google-cloud-bigtable/samples/tableadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/packages/google-cloud-bigtable/testing/constraints-3.13.txt b/packages/google-cloud-bigtable/testing/constraints-3.13.txt new file mode 100644 index 000000000000..e69de29bb2d1 From 730abc4a65f58385b83b1939fd78c808b888ea70 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 11:37:12 -0800 Subject: [PATCH 833/892] chore(main): release 2.27.0 (#1025) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 13 +++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index d6de1e7f8832..2da95504a139 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.26.0" + ".": "2.27.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 09bffa32da38..8abd58f89747 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,19 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.27.0](https://github.com/googleapis/python-bigtable/compare/v2.26.0...v2.27.0) (2024-11-12) + + +### Features + +* Add support for Cloud Bigtable Node Scaling Factor for CBT Clusters ([#1023](https://github.com/googleapis/python-bigtable/issues/1023)) ([0809c6a](https://github.com/googleapis/python-bigtable/commit/0809c6ac274e909103ad160a8bcab95f8bb46f31)) +* Surface `retry` param to `Table.read_row` api ([#982](https://github.com/googleapis/python-bigtable/issues/982)) ([a8286d2](https://github.com/googleapis/python-bigtable/commit/a8286d2a510f654f9c270c3c761c02e4ab3817d4)) + + +### Bug Fixes + +* Registering duplicate instance ([#1033](https://github.com/googleapis/python-bigtable/issues/1033)) ([2bca8fb](https://github.com/googleapis/python-bigtable/commit/2bca8fb220eeb1906fc6a3cf1f879f3d41fbbff8)) + ## [2.26.0](https://github.com/googleapis/python-bigtable/compare/v2.25.0...v2.26.0) (2024-08-12) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index d56eed5c5db7..f0fcebfa4138 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.26.0" # {x-release-please-version} +__version__ = "2.27.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index d56eed5c5db7..f0fcebfa4138 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.26.0" # {x-release-please-version} +__version__ = "2.27.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index d56eed5c5db7..f0fcebfa4138 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.26.0" # {x-release-please-version} +__version__ = "2.27.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index d56eed5c5db7..f0fcebfa4138 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.26.0" # {x-release-please-version} +__version__ = "2.27.0" # {x-release-please-version} From e6a16032074d471be7c1e37cf60a91134284e195 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 12:34:09 -0800 Subject: [PATCH 834/892] chore(python): update dependencies in .kokoro/docker/docs (#1040) Source-Link: https://github.com/googleapis/synthtool/commit/59171c8f83f3522ce186e4d110d27e772da4ba7a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:2ed982f884312e4883e01b5ab8af8b6935f0216a5a2d82928d273081fc3be562 Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- .../.github/.OwlBot.lock.yaml | 4 ++-- .../.kokoro/docker/docs/requirements.txt | 20 +++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index b2770d4e0379..6301519a9a05 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:91d0075c6f2fd6a073a06168feee19fa2a8507692f2519a1dc7de3366d157e99 -# created: 2024-11-11T16:13:09.302418532Z + digest: sha256:2ed982f884312e4883e01b5ab8af8b6935f0216a5a2d82928d273081fc3be562 +# created: 2024-11-12T12:09:45.821174897Z diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index 66eacc82f041..8bb0764594b1 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # # pip-compile --allow-unsafe --generate-hashes requirements.in @@ -8,9 +8,9 @@ argcomplete==3.5.1 \ --hash=sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363 \ --hash=sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4 # via nox -colorlog==6.8.2 \ - --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ - --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 +colorlog==6.9.0 \ + --hash=sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff \ + --hash=sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2 # via nox distlib==0.3.9 \ --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ @@ -24,9 +24,9 @@ nox==2024.10.9 \ --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 # via -r requirements.in -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 +packaging==24.2 \ + --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ + --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f # via nox platformdirs==4.3.6 \ --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ @@ -36,7 +36,7 @@ tomli==2.0.2 \ --hash=sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38 \ --hash=sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed # via nox -virtualenv==20.26.6 \ - --hash=sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48 \ - --hash=sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2 +virtualenv==20.27.1 \ + --hash=sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba \ + --hash=sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4 # via nox From 8a5317ff30ff01172c49e41f760e8060649c831d Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 14 Nov 2024 09:40:14 -0800 Subject: [PATCH 835/892] build: Use python 3.10 for docs session (#1042) docs test container now requires python 3.10. Related: https://github.com/googleapis/python-bigquery/pull/2058 --- packages/google-cloud-bigtable/noxfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 9fbc22d3aa79..4dfebe068c40 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -367,7 +367,7 @@ def cover(session): session.run("coverage", "erase") -@nox.session(python="3.9") +@nox.session(python="3.10") def docs(session): """Build the docs for this library.""" From f88a6835c2ed4347710454d9b06e5689803631d4 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 14 Nov 2024 13:44:02 -0800 Subject: [PATCH 836/892] chore(build): use multiScm for Kokoro release builds (#1032) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * build: use multiScm for Kokoro release builds Source-Link: https://github.com/googleapis/synthtool/commit/0da16589204e7f61911f64fcb30ac2d3b6e59b31 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:5cddfe2fb5019bbf78335bc55f15bc13e18354a56b3ff46e1834f8e540807f05 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/release-trigger.yml | 2 +- .../.kokoro/docker/docs/requirements.txt | 20 +- .../google-cloud-bigtable/.kokoro/release.sh | 2 +- .../.kokoro/release/common.cfg | 8 +- .../.kokoro/requirements.txt | 610 +++++++++--------- 6 files changed, 306 insertions(+), 340 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 6301519a9a05..2fda9335f2a2 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2ed982f884312e4883e01b5ab8af8b6935f0216a5a2d82928d273081fc3be562 -# created: 2024-11-12T12:09:45.821174897Z + digest: sha256:5cddfe2fb5019bbf78335bc55f15bc13e18354a56b3ff46e1834f8e540807f05 +# created: 2024-10-31T01:41:07.349286254Z \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.github/release-trigger.yml b/packages/google-cloud-bigtable/.github/release-trigger.yml index 0bbdd8e4cabb..4bb79e58eadf 100644 --- a/packages/google-cloud-bigtable/.github/release-trigger.yml +++ b/packages/google-cloud-bigtable/.github/release-trigger.yml @@ -1,2 +1,2 @@ enabled: true -multiScmName: python-bigtable +multiScmName: diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index 8bb0764594b1..66eacc82f041 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.9 # by the following command: # # pip-compile --allow-unsafe --generate-hashes requirements.in @@ -8,9 +8,9 @@ argcomplete==3.5.1 \ --hash=sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363 \ --hash=sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4 # via nox -colorlog==6.9.0 \ - --hash=sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff \ - --hash=sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2 +colorlog==6.8.2 \ + --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ + --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 # via nox distlib==0.3.9 \ --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ @@ -24,9 +24,9 @@ nox==2024.10.9 \ --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 # via -r requirements.in -packaging==24.2 \ - --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ - --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f +packaging==24.1 \ + --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ + --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 # via nox platformdirs==4.3.6 \ --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ @@ -36,7 +36,7 @@ tomli==2.0.2 \ --hash=sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38 \ --hash=sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed # via nox -virtualenv==20.27.1 \ - --hash=sha256:142c6be10212543b32c6c45d3d3893dff89112cc588b7d0879ae5a1ec03a47ba \ - --hash=sha256:f11f1b8a29525562925f745563bfd48b189450f61fb34c4f9cc79dd5aa32a1f4 +virtualenv==20.26.6 \ + --hash=sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48 \ + --hash=sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2 # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh index cfc431647232..4f0d14588cba 100755 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ b/packages/google-cloud-bigtable/.kokoro/release.sh @@ -23,7 +23,7 @@ python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source / export PYTHONUNBUFFERED=1 # Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-2") +TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-3") cd github/python-bigtable python3 setup.py sdist bdist_wheel twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg index b79e3a67dc9f..6b4c17d34467 100644 --- a/packages/google-cloud-bigtable/.kokoro/release/common.cfg +++ b/packages/google-cloud-bigtable/.kokoro/release/common.cfg @@ -28,17 +28,11 @@ before_action { fetch_keystore { keystore_resource { keystore_config_id: 73713 - keyname: "google-cloud-pypi-token-keystore-2" + keyname: "google-cloud-pypi-token-keystore-3" } } } -# Tokens needed to report release status back to GitHub -env_vars: { - key: "SECRET_MANAGER_KEYS" - value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" -} - # Store the packages we uploaded to PyPI. That way, we have a record of exactly # what we published, which we can use to generate SBOMs and attestations. action { diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt index 9622baf0ba38..006d8ef931bf 100644 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/requirements.txt @@ -4,79 +4,94 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.4.0 \ - --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ - --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f +argcomplete==3.5.1 \ + --hash=sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363 \ + --hash=sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4 # via nox -attrs==23.2.0 \ - --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ - --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 +attrs==24.2.0 \ + --hash=sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346 \ + --hash=sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2 # via gcp-releasetool backports-tarfile==1.2.0 \ --hash=sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34 \ --hash=sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991 # via jaraco-context -cachetools==5.3.3 \ - --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ - --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 +cachetools==5.5.0 \ + --hash=sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 \ + --hash=sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a # via google-auth -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 +certifi==2024.8.30 \ + --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ + --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 # via requests -cffi==1.16.0 \ - --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ - --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ - --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ - --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ - --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ - --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ - --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ - --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ - --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ - --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ - --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ - --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ - --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ - --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ - --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ - --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ - --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ - --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ - --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ - --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ - --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ - --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ - --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ - --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ - --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ - --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ - --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ - --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ - --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ - --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ - --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ - --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ - --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ - --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ - --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ - --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ - --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ - --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ - --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ - --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ - --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ - --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ - --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ - --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ - --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ - --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ - --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ - --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ - --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ - --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ - --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ - --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 +cffi==1.17.1 \ + --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ + --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ + --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ + --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ + --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ + --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ + --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ + --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ + --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ + --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ + --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ + --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ + --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ + --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ + --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ + --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ + --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ + --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ + --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ + --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ + --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ + --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ + --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ + --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ + --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ + --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ + --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ + --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ + --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ + --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ + --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ + --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ + --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ + --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ + --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ + --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ + --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ + --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ + --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ + --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ + --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ + --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ + --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ + --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ + --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ + --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ + --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ + --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ + --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ + --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ + --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ + --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ + --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ + --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ + --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ + --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ + --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ + --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ + --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ + --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ + --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ + --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ + --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ + --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ + --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ + --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ + --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b # via cryptography charset-normalizer==2.1.1 \ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ @@ -97,72 +112,67 @@ colorlog==6.8.2 \ # via # gcp-docuploader # nox -cryptography==42.0.8 \ - --hash=sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad \ - --hash=sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583 \ - --hash=sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b \ - --hash=sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c \ - --hash=sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1 \ - --hash=sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648 \ - --hash=sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949 \ - --hash=sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba \ - --hash=sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c \ - --hash=sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9 \ - --hash=sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d \ - --hash=sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c \ - --hash=sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e \ - --hash=sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2 \ - --hash=sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d \ - --hash=sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7 \ - --hash=sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70 \ - --hash=sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2 \ - --hash=sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7 \ - --hash=sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14 \ - --hash=sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe \ - --hash=sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e \ - --hash=sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71 \ - --hash=sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961 \ - --hash=sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7 \ - --hash=sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c \ - --hash=sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28 \ - --hash=sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842 \ - --hash=sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902 \ - --hash=sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801 \ - --hash=sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a \ - --hash=sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e +cryptography==43.0.1 \ + --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ + --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ + --hash=sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d \ + --hash=sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062 \ + --hash=sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2 \ + --hash=sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4 \ + --hash=sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1 \ + --hash=sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 \ + --hash=sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84 \ + --hash=sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042 \ + --hash=sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d \ + --hash=sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962 \ + --hash=sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2 \ + --hash=sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa \ + --hash=sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d \ + --hash=sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365 \ + --hash=sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96 \ + --hash=sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47 \ + --hash=sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d \ + --hash=sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d \ + --hash=sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c \ + --hash=sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb \ + --hash=sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277 \ + --hash=sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172 \ + --hash=sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034 \ + --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ + --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 # via # -r requirements.in # gcp-releasetool # secretstorage -distlib==0.3.8 \ - --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ - --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 +distlib==0.3.9 \ + --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ + --hash=sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403 # via virtualenv docutils==0.21.2 \ --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 # via readme-renderer -filelock==3.15.4 \ - --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ - --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 +filelock==3.16.1 \ + --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ + --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 # via virtualenv gcp-docuploader==0.6.5 \ --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea # via -r requirements.in -gcp-releasetool==2.0.1 \ - --hash=sha256:34314a910c08e8911d9c965bd44f8f2185c4f556e737d719c33a41f6a610de96 \ - --hash=sha256:b0d5863c6a070702b10883d37c4bdfd74bf930fe417f36c0c965d3b7c779ae62 +gcp-releasetool==2.1.1 \ + --hash=sha256:25639269f4eae510094f9dbed9894977e1966933211eb155a451deebc3fc0b30 \ + --hash=sha256:845f4ded3d9bfe8cc7fdaad789e83f4ea014affa77785259a7ddac4b243e099e # via -r requirements.in -google-api-core==2.19.1 \ - --hash=sha256:f12a9b8309b5e21d92483bbd47ce2c445861ec7d269ef6784ecc0ea8c1fa6125 \ - --hash=sha256:f4695f1e3650b316a795108a76a1c416e6afb036199d1c1f1f110916df479ffd +google-api-core==2.21.0 \ + --hash=sha256:4a152fd11a9f774ea606388d423b68aa7e6d6a0ffe4c8266f74979613ec09f81 \ + --hash=sha256:6869eacb2a37720380ba5898312af79a4d30b8bca1548fb4093e0697dc4bdf5d # via # google-cloud-core # google-cloud-storage -google-auth==2.31.0 \ - --hash=sha256:042c4702efa9f7d3c48d3a69341c209381b125faa6dbf3ebe56bc7e40ae05c23 \ - --hash=sha256:87805c36970047247c8afe614d4e3af8eceafc1ebba0c679fe75ddd1d575e871 +google-auth==2.35.0 \ + --hash=sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f \ + --hash=sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a # via # gcp-releasetool # google-api-core @@ -172,97 +182,56 @@ google-cloud-core==2.4.1 \ --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \ --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61 # via google-cloud-storage -google-cloud-storage==2.17.0 \ - --hash=sha256:49378abff54ef656b52dca5ef0f2eba9aa83dc2b2c72c78714b03a1a95fe9388 \ - --hash=sha256:5b393bc766b7a3bc6f5407b9e665b2450d36282614b7945e570b3480a456d1e1 +google-cloud-storage==2.18.2 \ + --hash=sha256:97a4d45c368b7d401ed48c4fdfe86e1e1cb96401c9e199e419d289e2c0370166 \ + --hash=sha256:aaf7acd70cdad9f274d29332673fcab98708d0e1f4dceb5a5356aaef06af4d99 # via gcp-docuploader -google-crc32c==1.5.0 \ - --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ - --hash=sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876 \ - --hash=sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c \ - --hash=sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289 \ - --hash=sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298 \ - --hash=sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02 \ - --hash=sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f \ - --hash=sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2 \ - --hash=sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a \ - --hash=sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb \ - --hash=sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210 \ - --hash=sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5 \ - --hash=sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee \ - --hash=sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c \ - --hash=sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a \ - --hash=sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314 \ - --hash=sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd \ - --hash=sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65 \ - --hash=sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37 \ - --hash=sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4 \ - --hash=sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13 \ - --hash=sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894 \ - --hash=sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31 \ - --hash=sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e \ - --hash=sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709 \ - --hash=sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740 \ - --hash=sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc \ - --hash=sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d \ - --hash=sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c \ - --hash=sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c \ - --hash=sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d \ - --hash=sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906 \ - --hash=sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61 \ - --hash=sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57 \ - --hash=sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c \ - --hash=sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a \ - --hash=sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438 \ - --hash=sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946 \ - --hash=sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7 \ - --hash=sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96 \ - --hash=sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091 \ - --hash=sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae \ - --hash=sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d \ - --hash=sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88 \ - --hash=sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2 \ - --hash=sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd \ - --hash=sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541 \ - --hash=sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728 \ - --hash=sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178 \ - --hash=sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968 \ - --hash=sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346 \ - --hash=sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8 \ - --hash=sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93 \ - --hash=sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7 \ - --hash=sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273 \ - --hash=sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462 \ - --hash=sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94 \ - --hash=sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd \ - --hash=sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e \ - --hash=sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57 \ - --hash=sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b \ - --hash=sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9 \ - --hash=sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a \ - --hash=sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100 \ - --hash=sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325 \ - --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ - --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ - --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 +google-crc32c==1.6.0 \ + --hash=sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24 \ + --hash=sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d \ + --hash=sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e \ + --hash=sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57 \ + --hash=sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2 \ + --hash=sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8 \ + --hash=sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc \ + --hash=sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42 \ + --hash=sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f \ + --hash=sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa \ + --hash=sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b \ + --hash=sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc \ + --hash=sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760 \ + --hash=sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d \ + --hash=sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7 \ + --hash=sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d \ + --hash=sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0 \ + --hash=sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3 \ + --hash=sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3 \ + --hash=sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00 \ + --hash=sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871 \ + --hash=sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c \ + --hash=sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9 \ + --hash=sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205 \ + --hash=sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc \ + --hash=sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d \ + --hash=sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4 # via # google-cloud-storage # google-resumable-media -google-resumable-media==2.7.1 \ - --hash=sha256:103ebc4ba331ab1bfdac0250f8033627a2cd7cde09e7ccff9181e31ba4315b2c \ - --hash=sha256:eae451a7b2e2cdbaaa0fd2eb00cc8a1ee5e95e16b55597359cbc3d27d7d90e33 +google-resumable-media==2.7.2 \ + --hash=sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa \ + --hash=sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0 # via google-cloud-storage -googleapis-common-protos==1.63.2 \ - --hash=sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945 \ - --hash=sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87 +googleapis-common-protos==1.65.0 \ + --hash=sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63 \ + --hash=sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0 # via google-api-core -idna==3.7 \ - --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ - --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 # via requests -importlib-metadata==8.0.0 \ - --hash=sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f \ - --hash=sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812 +importlib-metadata==8.5.0 \ + --hash=sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b \ + --hash=sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7 # via # -r requirements.in # keyring @@ -271,13 +240,13 @@ jaraco-classes==3.4.0 \ --hash=sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd \ --hash=sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790 # via keyring -jaraco-context==5.3.0 \ - --hash=sha256:3e16388f7da43d384a1a7cd3452e72e14732ac9fe459678773a3608a812bf266 \ - --hash=sha256:c2f67165ce1f9be20f32f650f25d8edfc1646a8aeee48ae06fb35f90763576d2 +jaraco-context==6.0.1 \ + --hash=sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3 \ + --hash=sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4 # via keyring -jaraco-functools==4.0.1 \ - --hash=sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664 \ - --hash=sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8 +jaraco-functools==4.1.0 \ + --hash=sha256:70f7e0e2ae076498e212562325e805204fc092d7b4c17e0e86c959e249701a9d \ + --hash=sha256:ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649 # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ @@ -289,9 +258,9 @@ jinja2==3.1.4 \ --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d # via gcp-releasetool -keyring==25.2.1 \ - --hash=sha256:2458681cdefc0dbc0b7eb6cf75d0b98e59f9ad9b2d4edd319d18f68bdca95e50 \ - --hash=sha256:daaffd42dbda25ddafb1ad5fec4024e5bbcfe424597ca1ca452b299861e49f1b +keyring==25.4.1 \ + --hash=sha256:5426f817cf7f6f007ba5ec722b1bcad95a75b27d780343772ad76b17cb47b0bf \ + --hash=sha256:b07ebc55f3e8ed86ac81dd31ef14e81ace9dd9c3d4b5d77a6e9a2016d0d71a1b # via # gcp-releasetool # twine @@ -299,75 +268,76 @@ markdown-it-py==3.0.0 \ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb # via rich -markupsafe==2.1.5 \ - --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ - --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ - --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ - --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ - --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ - --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ - --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ - --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ - --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ - --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ - --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ - --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ - --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ - --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ - --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ - --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ - --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ - --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ - --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ - --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ - --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ - --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ - --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ - --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ - --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ - --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ - --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ - --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ - --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ - --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ - --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ - --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ - --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ - --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ - --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ - --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ - --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ - --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ - --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ - --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ - --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ - --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ - --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ - --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ - --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ - --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ - --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ - --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ - --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ - --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ - --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ - --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ - --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ - --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ - --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ - --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ - --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ - --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ - --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ - --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 +markupsafe==3.0.1 \ + --hash=sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396 \ + --hash=sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38 \ + --hash=sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a \ + --hash=sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8 \ + --hash=sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b \ + --hash=sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad \ + --hash=sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a \ + --hash=sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a \ + --hash=sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da \ + --hash=sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6 \ + --hash=sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8 \ + --hash=sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344 \ + --hash=sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a \ + --hash=sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8 \ + --hash=sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5 \ + --hash=sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7 \ + --hash=sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170 \ + --hash=sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132 \ + --hash=sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9 \ + --hash=sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd \ + --hash=sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9 \ + --hash=sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346 \ + --hash=sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc \ + --hash=sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589 \ + --hash=sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5 \ + --hash=sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915 \ + --hash=sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295 \ + --hash=sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453 \ + --hash=sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea \ + --hash=sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b \ + --hash=sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d \ + --hash=sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b \ + --hash=sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4 \ + --hash=sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b \ + --hash=sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7 \ + --hash=sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf \ + --hash=sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f \ + --hash=sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91 \ + --hash=sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd \ + --hash=sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50 \ + --hash=sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b \ + --hash=sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583 \ + --hash=sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a \ + --hash=sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984 \ + --hash=sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c \ + --hash=sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c \ + --hash=sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25 \ + --hash=sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa \ + --hash=sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4 \ + --hash=sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3 \ + --hash=sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97 \ + --hash=sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1 \ + --hash=sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd \ + --hash=sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772 \ + --hash=sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a \ + --hash=sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729 \ + --hash=sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca \ + --hash=sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6 \ + --hash=sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635 \ + --hash=sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b \ + --hash=sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f # via jinja2 mdurl==0.1.2 \ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba # via markdown-it-py -more-itertools==10.3.0 \ - --hash=sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463 \ - --hash=sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320 +more-itertools==10.5.0 \ + --hash=sha256:037b0d3203ce90cca8ab1defbbdac29d5f993fc20131f3664dc8d6acfa872aef \ + --hash=sha256:5482bfef7849c25dc3c6dd53a6173ae4795da2a41a80faea6700d9f5846c5da6 # via # jaraco-classes # jaraco-functools @@ -389,9 +359,9 @@ nh3==0.2.18 \ --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe # via readme-renderer -nox==2024.4.15 \ - --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ - --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f +nox==2024.10.9 \ + --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ + --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 # via -r requirements.in packaging==24.1 \ --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ @@ -403,41 +373,41 @@ pkginfo==1.10.0 \ --hash=sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297 \ --hash=sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097 # via twine -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 +platformdirs==4.3.6 \ + --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ + --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via virtualenv proto-plus==1.24.0 \ --hash=sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445 \ --hash=sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12 # via google-api-core -protobuf==5.27.2 \ - --hash=sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505 \ - --hash=sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b \ - --hash=sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38 \ - --hash=sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863 \ - --hash=sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470 \ - --hash=sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6 \ - --hash=sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce \ - --hash=sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca \ - --hash=sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5 \ - --hash=sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e \ - --hash=sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714 +protobuf==5.28.2 \ + --hash=sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132 \ + --hash=sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f \ + --hash=sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece \ + --hash=sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0 \ + --hash=sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f \ + --hash=sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0 \ + --hash=sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276 \ + --hash=sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7 \ + --hash=sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3 \ + --hash=sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36 \ + --hash=sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d # via # gcp-docuploader # gcp-releasetool # google-api-core # googleapis-common-protos # proto-plus -pyasn1==0.6.0 \ - --hash=sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c \ - --hash=sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473 +pyasn1==0.6.1 \ + --hash=sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629 \ + --hash=sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034 # via # pyasn1-modules # rsa -pyasn1-modules==0.4.0 \ - --hash=sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6 \ - --hash=sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b +pyasn1-modules==0.4.1 \ + --hash=sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd \ + --hash=sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c # via google-auth pycparser==2.22 \ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ @@ -449,9 +419,9 @@ pygments==2.18.0 \ # via # readme-renderer # rich -pyjwt==2.8.0 \ - --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ - --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 +pyjwt==2.9.0 \ + --hash=sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850 \ + --hash=sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c # via gcp-releasetool pyperclip==1.9.0 \ --hash=sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310 @@ -481,9 +451,9 @@ rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c # via twine -rich==13.7.1 \ - --hash=sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222 \ - --hash=sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432 +rich==13.9.2 \ + --hash=sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c \ + --hash=sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1 # via twine rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -499,9 +469,9 @@ six==1.16.0 \ # via # gcp-docuploader # python-dateutil -tomli==2.0.1 \ - --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ - --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f +tomli==2.0.2 \ + --hash=sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38 \ + --hash=sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed # via nox twine==5.1.1 \ --hash=sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997 \ @@ -510,28 +480,30 @@ twine==5.1.1 \ typing-extensions==4.12.2 \ --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 - # via -r requirements.in -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 + # via + # -r requirements.in + # rich +urllib3==2.2.3 \ + --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ + --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 # via # requests # twine -virtualenv==20.26.3 \ - --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ - --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 +virtualenv==20.26.6 \ + --hash=sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48 \ + --hash=sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2 # via nox -wheel==0.43.0 \ - --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \ - --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81 +wheel==0.44.0 \ + --hash=sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f \ + --hash=sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49 # via -r requirements.in -zipp==3.19.2 \ - --hash=sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19 \ - --hash=sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c +zipp==3.20.2 \ + --hash=sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350 \ + --hash=sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==70.2.0 \ - --hash=sha256:b8b8060bb426838fbe942479c90296ce976249451118ef566a5a0b7d8b78fb05 \ - --hash=sha256:bd63e505105011b25c3c11f753f7e3b8465ea739efddaccef8f0efac2137bac1 +setuptools==75.1.0 \ + --hash=sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2 \ + --hash=sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538 # via -r requirements.in From 6cafd03ef57857db60898c5805f21e11042a4928 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 19 Nov 2024 16:41:57 -0800 Subject: [PATCH 837/892] chore: add cross_sync annotations (#1000) --- .../.github/workflows/conformance.yaml | 4 +- .../.kokoro/conformance.sh | 3 +- .../google/cloud/bigtable/data/__init__.py | 16 +- .../bigtable/data/_async/_mutate_rows.py | 40 +- .../cloud/bigtable/data/_async/_read_rows.py | 46 +- .../cloud/bigtable/data/_async/client.py | 395 ++++-- .../bigtable/data/_async/mutations_batcher.py | 190 +-- .../google/cloud/bigtable/data/exceptions.py | 15 + .../bigtable/data/execute_query/__init__.py | 2 + .../_async/execute_query_iterator.py | 103 +- .../google/cloud/bigtable/data/mutations.py | 12 + packages/google-cloud-bigtable/noxfile.py | 19 +- .../test_proxy/README.md | 7 +- ...r_data.py => client_handler_data_async.py} | 29 +- .../handlers/client_handler_legacy.py | 4 +- .../test_proxy/noxfile.py | 80 -- .../test_proxy/run_tests.sh | 3 +- .../test_proxy/test_proxy.py | 24 +- .../tests/system/data/__init__.py | 3 + .../tests/system/data/setup_fixtures.py | 25 - .../system/data/test_execute_query_async.py | 283 ---- .../system/data/test_execute_query_utils.py | 295 ---- .../tests/system/data/test_system.py | 937 ------------- .../tests/system/data/test_system_async.py | 1016 ++++++++++++++ .../unit/data/_async/test__mutate_rows.py | 110 +- .../tests/unit/data/_async/test__read_rows.py | 76 +- .../tests/unit/data/_async/test_client.py | 1195 ++++++++++++----- .../data/_async/test_mutations_batcher.py | 806 +++++------ .../data/_async/test_read_rows_acceptance.py | 355 +++++ .../data/execute_query/_async/_testing.py | 36 - .../_async/test_query_iterator.py | 267 ++-- .../unit/data/test_read_rows_acceptance.py | 331 ----- 32 files changed, 3430 insertions(+), 3297 deletions(-) rename packages/google-cloud-bigtable/test_proxy/handlers/{client_handler_data.py => client_handler_data_async.py} (90%) delete mode 100644 packages/google-cloud-bigtable/test_proxy/noxfile.py delete mode 100644 packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py delete mode 100644 packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py delete mode 100644 packages/google-cloud-bigtable/tests/system/data/test_system.py create mode 100644 packages/google-cloud-bigtable/tests/system/data/test_system_async.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py diff --git a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml index 68545cbec2ec..448e1cc3a2da 100644 --- a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml +++ b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml @@ -26,9 +26,9 @@ jobs: matrix: test-version: [ "v0.0.2" ] py-version: [ 3.8 ] - client-type: [ "Async v3", "Legacy" ] + client-type: [ "async", "legacy" ] fail-fast: false - name: "${{ matrix.client-type }} Client / Python ${{ matrix.py-version }} / Test Tag ${{ matrix.test-version }}" + name: "${{ matrix.client-type }} client / python ${{ matrix.py-version }} / test tag ${{ matrix.test-version }}" steps: - uses: actions/checkout@v4 name: "Checkout python-bigtable" diff --git a/packages/google-cloud-bigtable/.kokoro/conformance.sh b/packages/google-cloud-bigtable/.kokoro/conformance.sh index 1c0b3ee0d876..e85fc1394cd7 100644 --- a/packages/google-cloud-bigtable/.kokoro/conformance.sh +++ b/packages/google-cloud-bigtable/.kokoro/conformance.sh @@ -23,7 +23,6 @@ PROXY_ARGS="" TEST_ARGS="" if [[ "${CLIENT_TYPE^^}" == "LEGACY" ]]; then echo "Using legacy client" - PROXY_ARGS="--legacy-client" # legacy client does not expose mutate_row. Disable those tests TEST_ARGS="-skip TestMutateRow_" fi @@ -31,7 +30,7 @@ fi # Build and start the proxy in a separate process PROXY_PORT=9999 pushd test_proxy -nohup python test_proxy.py --port $PROXY_PORT $PROXY_ARGS & +nohup python test_proxy.py --port $PROXY_PORT --client_type=$CLIENT_TYPE & proxyPID=$! popd diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py index 68dc22891660..43ea69fdfa48 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py @@ -45,16 +45,30 @@ from google.cloud.bigtable.data._helpers import RowKeySamples from google.cloud.bigtable.data._helpers import ShardedQuery +# setup custom CrossSync mappings for library +from google.cloud.bigtable_v2.services.bigtable.async_client import ( + BigtableAsyncClient, +) +from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync + +from google.cloud.bigtable.data._cross_sync import CrossSync + +CrossSync.add_mapping("GapicClient", BigtableAsyncClient) +CrossSync.add_mapping("_ReadRowsOperation", _ReadRowsOperationAsync) +CrossSync.add_mapping("_MutateRowsOperation", _MutateRowsOperationAsync) +CrossSync.add_mapping("MutationsBatcher", MutationsBatcherAsync) + __version__: str = package_version.__version__ __all__ = ( "BigtableDataClientAsync", "TableAsync", + "MutationsBatcherAsync", "RowKeySamples", "ReadRowsQuery", "RowRange", - "MutationsBatcherAsync", "Mutation", "RowMutationEntry", "SetCell", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py index 914cfecf475a..c5795c464417 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -15,37 +15,38 @@ from __future__ import annotations from typing import Sequence, TYPE_CHECKING -from dataclasses import dataclass import functools from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries -import google.cloud.bigtable_v2.types.bigtable as types_pb import google.cloud.bigtable.data.exceptions as bt_exceptions from google.cloud.bigtable.data._helpers import _attempt_timeout_generator from google.cloud.bigtable.data._helpers import _retry_exception_factory # mutate_rows requests are limited to this number of mutations from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT +from google.cloud.bigtable.data.mutations import _EntryWithProto + +from google.cloud.bigtable.data._cross_sync import CrossSync if TYPE_CHECKING: - from google.cloud.bigtable_v2.services.bigtable.async_client import ( - BigtableAsyncClient, - ) from google.cloud.bigtable.data.mutations import RowMutationEntry - from google.cloud.bigtable.data._async.client import TableAsync - -@dataclass -class _EntryWithProto: - """ - A dataclass to hold a RowMutationEntry and its corresponding proto representation. - """ + if CrossSync.is_async: + from google.cloud.bigtable_v2.services.bigtable.async_client import ( + BigtableAsyncClient as GapicClientType, + ) + from google.cloud.bigtable.data._async.client import TableAsync as TableType + else: + from google.cloud.bigtable_v2.services.bigtable.client import ( # type: ignore + BigtableClient as GapicClientType, + ) + from google.cloud.bigtable.data._sync_autogen.client import Table as TableType # type: ignore - entry: RowMutationEntry - proto: types_pb.MutateRowsRequest.Entry +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._mutate_rows" +@CrossSync.convert_class("_MutateRowsOperation") class _MutateRowsOperationAsync: """ MutateRowsOperation manages the logic of sending a set of row mutations, @@ -65,10 +66,11 @@ class _MutateRowsOperationAsync: If not specified, the request will run until operation_timeout is reached. """ + @CrossSync.convert def __init__( self, - gapic_client: "BigtableAsyncClient", - table: "TableAsync", + gapic_client: GapicClientType, + table: TableType, mutation_entries: list["RowMutationEntry"], operation_timeout: float, attempt_timeout: float | None, @@ -97,7 +99,7 @@ def __init__( bt_exceptions._MutateRowsIncomplete, ) sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) - self._operation = retries.retry_target_async( + self._operation = lambda: CrossSync.retry_target( self._run_attempt, self.is_retryable, sleep_generator, @@ -112,6 +114,7 @@ def __init__( self.remaining_indices = list(range(len(self.mutations))) self.errors: dict[int, list[Exception]] = {} + @CrossSync.convert async def start(self): """ Start the operation, and run until completion @@ -121,7 +124,7 @@ async def start(self): """ try: # trigger mutate_rows - await self._operation + await self._operation() except Exception as exc: # exceptions raised by retryable are added to the list of exceptions for all unfinalized mutations incomplete_indices = self.remaining_indices.copy() @@ -148,6 +151,7 @@ async def start(self): all_errors, len(self.mutations) ) + @CrossSync.convert async def _run_attempt(self): """ Run a single attempt of the mutate_rows rpc. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py index 5617e6418476..c02b3750d6fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -15,13 +15,7 @@ from __future__ import annotations -from typing import ( - TYPE_CHECKING, - AsyncGenerator, - AsyncIterable, - Awaitable, - Sequence, -) +from typing import Sequence, TYPE_CHECKING from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB from google.cloud.bigtable_v2.types import ReadRowsResponse as ReadRowsResponsePB @@ -32,21 +26,25 @@ from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable.data.exceptions import _RowSetComplete +from google.cloud.bigtable.data.exceptions import _ResetRow from google.cloud.bigtable.data._helpers import _attempt_timeout_generator from google.cloud.bigtable.data._helpers import _retry_exception_factory from google.api_core import retry as retries from google.api_core.retry import exponential_sleep_generator -if TYPE_CHECKING: - from google.cloud.bigtable.data._async.client import TableAsync +from google.cloud.bigtable.data._cross_sync import CrossSync +if TYPE_CHECKING: + if CrossSync.is_async: + from google.cloud.bigtable.data._async.client import TableAsync as TableType + else: + from google.cloud.bigtable.data._sync_autogen.client import Table as TableType # type: ignore -class _ResetRow(Exception): - def __init__(self, chunk): - self.chunk = chunk +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._read_rows" +@CrossSync.convert_class("_ReadRowsOperation") class _ReadRowsOperationAsync: """ ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream @@ -80,7 +78,7 @@ class _ReadRowsOperationAsync: def __init__( self, query: ReadRowsQuery, - table: "TableAsync", + table: TableType, operation_timeout: float, attempt_timeout: float, retryable_exceptions: Sequence[type[Exception]] = (), @@ -102,14 +100,14 @@ def __init__( self._last_yielded_row_key: bytes | None = None self._remaining_count: int | None = self.request.rows_limit or None - def start_operation(self) -> AsyncGenerator[Row, None]: + def start_operation(self) -> CrossSync.Iterable[Row]: """ Start the read_rows operation, retrying on retryable errors. Yields: Row: The next row in the stream """ - return retries.retry_target_stream_async( + return CrossSync.retry_target_stream( self._read_rows_attempt, self._predicate, exponential_sleep_generator(0.01, 60, multiplier=2), @@ -117,7 +115,7 @@ def start_operation(self) -> AsyncGenerator[Row, None]: exception_factory=_retry_exception_factory, ) - def _read_rows_attempt(self) -> AsyncGenerator[Row, None]: + def _read_rows_attempt(self) -> CrossSync.Iterable[Row]: """ Attempt a single read_rows rpc call. This function is intended to be wrapped by retry logic, @@ -152,9 +150,10 @@ def _read_rows_attempt(self) -> AsyncGenerator[Row, None]: chunked_stream = self.chunk_stream(gapic_stream) return self.merge_rows(chunked_stream) + @CrossSync.convert() async def chunk_stream( - self, stream: Awaitable[AsyncIterable[ReadRowsResponsePB]] - ) -> AsyncGenerator[ReadRowsResponsePB.CellChunk, None]: + self, stream: CrossSync.Awaitable[CrossSync.Iterable[ReadRowsResponsePB]] + ) -> CrossSync.Iterable[ReadRowsResponsePB.CellChunk]: """ process chunks out of raw read_rows stream @@ -204,9 +203,12 @@ async def chunk_stream( current_key = None @staticmethod + @CrossSync.convert( + replace_symbols={"__aiter__": "__iter__", "__anext__": "__next__"}, + ) async def merge_rows( - chunks: AsyncGenerator[ReadRowsResponsePB.CellChunk, None] | None - ) -> AsyncGenerator[Row, None]: + chunks: CrossSync.Iterable[ReadRowsResponsePB.CellChunk] | None, + ) -> CrossSync.Iterable[Row]: """ Merge chunks into rows @@ -222,7 +224,7 @@ async def merge_rows( while True: try: c = await it.__anext__() - except StopAsyncIteration: + except CrossSync.StopIteration: # stream complete return row_key = c.row_key @@ -315,7 +317,7 @@ async def merge_rows( ): raise InvalidChunk("reset row with data") continue - except StopAsyncIteration: + except CrossSync.StopIteration: raise InvalidChunk("premature end of stream") @staticmethod diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index f1f7ad1a3099..d560d7e1ee86 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -15,88 +15,113 @@ from __future__ import annotations -import asyncio -from functools import partial -import os -import random -import sys -import time from typing import ( - TYPE_CHECKING, + cast, Any, AsyncIterable, - Dict, Optional, - Sequence, Set, - Union, - cast, + Sequence, + TYPE_CHECKING, ) + +import time import warnings +import random +import os +import concurrent.futures -from google.api_core import client_options as client_options_lib -from google.api_core import retry as retries -from google.api_core.exceptions import Aborted, DeadlineExceeded, ServiceUnavailable -import google.auth._default -import google.auth.credentials -from google.cloud.client import ClientWithProject -from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore -import grpc +from functools import partial +from grpc import Channel -from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT -from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( - ExecuteQueryIteratorAsync, -) -from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync -from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync -from google.cloud.bigtable.data._async.mutations_batcher import ( - _MB_SIZE, - MutationsBatcherAsync, -) -from google.cloud.bigtable.data._helpers import ( - _CONCURRENCY_LIMIT, - TABLE_DEFAULT, - _attempt_timeout_generator, - _get_error_type, - _get_retryable_errors, - _get_timeouts, - _retry_exception_factory, - _validate_timeouts, - _WarmedInstanceKey, -) -from google.cloud.bigtable.data.exceptions import ( - FailedQueryShardError, - ShardedReadRowsExceptionGroup, -) -from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry -from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule -from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery -from google.cloud.bigtable.data.row import Row -from google.cloud.bigtable.data.row_filters import ( - CellsRowLimitFilter, - RowFilter, - RowFilterChain, - StripValueTransformerFilter, -) from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.execute_query._parameters_formatting import ( _format_execute_query_params, ) -from google.cloud.bigtable_v2.services.bigtable.async_client import ( +from google.cloud.bigtable_v2.services.bigtable.transports.base import ( DEFAULT_CLIENT_INFO, - BigtableAsyncClient, -) -from google.cloud.bigtable_v2.services.bigtable.transports import ( - BigtableGrpcAsyncIOTransport, ) from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore +from google.api_core import retry as retries +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import ServiceUnavailable +from google.api_core.exceptions import Aborted + +import google.auth.credentials +import google.auth._default +from google.api_core import client_options as client_options_lib +from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import FailedQueryShardError +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data._helpers import _WarmedInstanceKey +from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data._helpers import _validate_timeouts +from google.cloud.bigtable.data._helpers import _get_error_type +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry + +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.row_filters import RowFilter +from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter +from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter +from google.cloud.bigtable.data.row_filters import RowFilterChain + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if CrossSync.is_async: + from grpc.aio import insecure_channel + from google.cloud.bigtable_v2.services.bigtable.transports import ( + BigtableGrpcAsyncIOTransport as TransportType, + ) + from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE +else: + from grpc import insecure_channel + from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore + if TYPE_CHECKING: - from google.cloud.bigtable.data._helpers import RowKeySamples, ShardedQuery + from google.cloud.bigtable.data._helpers import RowKeySamples + from google.cloud.bigtable.data._helpers import ShardedQuery + + if CrossSync.is_async: + from google.cloud.bigtable.data._async.mutations_batcher import ( + MutationsBatcherAsync, + ) + from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( + ExecuteQueryIteratorAsync, + ) +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.client" + + +@CrossSync.convert_class( + sync_name="BigtableDataClient", + add_mapping_for_name="DataClient", +) class BigtableDataClientAsync(ClientWithProject): + @CrossSync.convert( + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Client should be created within an async context (running event loop)", + None, + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + None, + ), + } + ) def __init__( self, *, @@ -110,7 +135,7 @@ def __init__( """ Create a client instance for the Bigtable Data API - Client should be created within an async context (running event loop) + {LOOP_MESSAGE} Args: project: the project which the client acts on behalf of. @@ -125,7 +150,7 @@ def __init__( Client options used to set user options on the client. API Endpoint should be set through client_options. Raises: - RuntimeError: if called outside of an async context (no running event loop) + {RAISE_NO_LOOP} """ if "pool_size" in kwargs: warnings.warn("pool_size no longer supported") @@ -147,7 +172,7 @@ def __init__( stacklevel=2, ) # use insecure channel if emulator is set - custom_channel = grpc.aio.insecure_channel(self._emulator_host) + custom_channel = insecure_channel(self._emulator_host) if credentials is None: credentials = google.auth.credentials.AnonymousCredentials() if project is None: @@ -159,24 +184,26 @@ def __init__( project=project, client_options=client_options, ) - self._gapic_client = BigtableAsyncClient( + self._gapic_client = CrossSync.GapicClient( credentials=credentials, client_options=client_options, client_info=client_info, - transport=lambda *args, **kwargs: BigtableGrpcAsyncIOTransport( + transport=lambda *args, **kwargs: TransportType( *args, **kwargs, channel=custom_channel ), ) - self.transport = cast( - BigtableGrpcAsyncIOTransport, self._gapic_client.transport - ) + self._is_closed = CrossSync.Event() + self.transport = cast(TransportType, self._gapic_client.transport) # keep track of active instances to for warmup on channel refresh self._active_instances: Set[_WarmedInstanceKey] = set() # keep track of table objects associated with each instance # only remove instance from _active_instances when all associated tables remove it self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} self._channel_init_time = time.monotonic() - self._channel_refresh_task: asyncio.Task[None] | None = None + self._channel_refresh_task: CrossSync.Task[None] | None = None + self._executor = ( + concurrent.futures.ThreadPoolExecutor() if not CrossSync.is_async else None + ) if self._emulator_host is None: # attempt to start background channel refresh tasks try: @@ -194,42 +221,58 @@ def _client_version() -> str: """ Helper function to return the client version string for this client """ - return f"{google.cloud.bigtable.__version__}-data-async" - + version_str = f"{google.cloud.bigtable.__version__}-data" + if CrossSync.is_async: + version_str += "-async" + return version_str + + @CrossSync.convert( + docstring_format_vars={ + "RAISE_NO_LOOP": ( + "RuntimeError: if not called in an asyncio event loop", + "None", + ) + } + ) def _start_background_channel_refresh(self) -> None: """ Starts a background task to ping and warm grpc channel Raises: - RuntimeError: if not called in an asyncio event loop + {RAISE_NO_LOOP} """ - if not self._channel_refresh_task and not self._emulator_host: - # raise RuntimeError if there is no event loop - asyncio.get_running_loop() - self._channel_refresh_task = asyncio.create_task(self._manage_channel()) - if sys.version_info >= (3, 8): - # task names supported in Python 3.8+ - self._channel_refresh_task.set_name( - f"{self.__class__.__name__} channel refresh" - ) + if ( + not self._channel_refresh_task + and not self._emulator_host + and not self._is_closed.is_set() + ): + # raise error if not in an event loop in async client + CrossSync.verify_async_event_loop() + self._channel_refresh_task = CrossSync.create_task( + self._manage_channel, + sync_executor=self._executor, + task_name=f"{self.__class__.__name__} channel refresh", + ) - async def close(self, timeout: float = 2.0): + @CrossSync.convert + async def close(self, timeout: float | None = 2.0): """ Cancel all background tasks """ - if self._channel_refresh_task: + self._is_closed.set() + if self._channel_refresh_task is not None: self._channel_refresh_task.cancel() - try: - await asyncio.wait_for(self._channel_refresh_task, timeout=timeout) - except asyncio.CancelledError: - pass + await CrossSync.wait([self._channel_refresh_task], timeout=timeout) await self.transport.close() + if self._executor: + self._executor.shutdown(wait=False) self._channel_refresh_task = None + @CrossSync.convert async def _ping_and_warm_instances( self, instance_key: _WarmedInstanceKey | None = None, - channel: grpc.aio.Channel | None = None, + channel: Channel | None = None, ) -> list[BaseException | None]: """ Prepares the backend for requests on a channel @@ -251,23 +294,26 @@ async def _ping_and_warm_instances( request_serializer=PingAndWarmRequest.serialize, ) # prepare list of coroutines to run - tasks = [] - for instance_name, table_name, app_profile_id in instance_list: - metadata_str = f"name={instance_name}" - if app_profile_id is not None: - metadata_str = f"{metadata_str}&app_profile_id={app_profile_id}" - tasks.append( - ping_rpc( - request={"name": instance_name, "app_profile_id": app_profile_id}, - metadata=[("x-goog-request-params", metadata_str)], - wait_for_ready=True, - ) + partial_list = [ + partial( + ping_rpc, + request={"name": instance_name, "app_profile_id": app_profile_id}, + metadata=[ + ( + "x-goog-request-params", + f"name={instance_name}&app_profile_id={app_profile_id}", + ) + ], + wait_for_ready=True, ) - # execute coroutines in parallel - result_list = await asyncio.gather(*tasks, return_exceptions=True) - # return None in place of empty successful responses + for (instance_name, table_name, app_profile_id) in instance_list + ] + result_list = await CrossSync.gather_partials( + partial_list, return_exceptions=True, sync_executor=self._executor + ) return [r or None for r in result_list] + @CrossSync.convert async def _manage_channel( self, refresh_interval_min: float = 60 * 35, @@ -275,7 +321,7 @@ async def _manage_channel( grace_period: float = 60 * 10, ) -> None: """ - Background coroutine that periodically refreshes and warms a grpc channel + Background task that periodically refreshes and warms a grpc channel The backend will automatically close channels after 60 minutes, so `refresh_interval` + `grace_period` should be < 60 minutes @@ -300,22 +346,41 @@ async def _manage_channel( # warm the current channel immediately await self._ping_and_warm_instances(channel=self.transport.grpc_channel) # continuously refresh the channel every `refresh_interval` seconds - while True: - await asyncio.sleep(next_sleep) - start_timestamp = time.time() + while not self._is_closed.is_set(): + await CrossSync.event_wait( + self._is_closed, + next_sleep, + async_break_early=False, # no need to interrupt sleep. Task will be cancelled on close + ) + if self._is_closed.is_set(): + # don't refresh if client is closed + break + start_timestamp = time.monotonic() # prepare new channel for use old_channel = self.transport.grpc_channel new_channel = self.transport.create_channel() await self._ping_and_warm_instances(channel=new_channel) # cycle channel out of use, with long grace window before closure self.transport._grpc_channel = new_channel - await old_channel.close(grace_period) - # subtract the time spent waiting for the channel to be replaced + # give old_channel a chance to complete existing rpcs + if CrossSync.is_async: + await old_channel.close(grace_period) + else: + if grace_period: + self._is_closed.wait(grace_period) # type: ignore + old_channel.close() # type: ignore + # subtract thed time spent waiting for the channel to be replaced next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) - next_sleep = next_refresh - (time.time() - start_timestamp) + next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0) + @CrossSync.convert( + replace_symbols={ + "TableAsync": "Table", + "ExecuteQueryIteratorAsync": "ExecuteQueryIterator", + } + ) async def _register_instance( - self, instance_id: str, owner: Union[TableAsync, ExecuteQueryIteratorAsync] + self, instance_id: str, owner: TableAsync | ExecuteQueryIteratorAsync ) -> None: """ Registers an instance with the client, and warms the channel for the instance @@ -344,8 +409,14 @@ async def _register_instance( # refresh tasks aren't active. start them as background tasks self._start_background_channel_refresh() + @CrossSync.convert( + replace_symbols={ + "TableAsync": "Table", + "ExecuteQueryIteratorAsync": "ExecuteQueryIterator", + } + ) async def _remove_instance_registration( - self, instance_id: str, owner: Union[TableAsync, ExecuteQueryIteratorAsync] + self, instance_id: str, owner: TableAsync | "ExecuteQueryIteratorAsync" ) -> bool: """ Removes an instance from the client's registered instances, to prevent @@ -374,11 +445,26 @@ async def _remove_instance_registration( except KeyError: return False + @CrossSync.convert( + replace_symbols={"TableAsync": "Table"}, + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + }, + ) def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAsync: """ Returns a table instance for making data API requests. All arguments are passed directly to the TableAsync constructor. + {LOOP_MESSAGE} + Args: instance_id: The Bigtable instance ID to associate with this client. instance_id is combined with the client's project to fully @@ -411,17 +497,20 @@ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAs Returns: TableAsync: a table instance for making data API requests Raises: - RuntimeError: if called outside of an async context (no running event loop) + {RAISE_NO_LOOP} """ return TableAsync(self, instance_id, table_id, *args, **kwargs) + @CrossSync.convert( + replace_symbols={"ExecuteQueryIteratorAsync": "ExecuteQueryIterator"} + ) async def execute_query( self, query: str, instance_id: str, *, - parameters: Dict[str, ExecuteQueryValueType] | None = None, - parameter_types: Dict[str, SqlType.Type] | None = None, + parameters: dict[str, ExecuteQueryValueType] | None = None, + parameter_types: dict[str, SqlType.Type] | None = None, app_profile_id: str | None = None, operation_timeout: float = 600, attempt_timeout: float | None = 20, @@ -491,7 +580,7 @@ async def execute_query( "proto_format": {}, } - return ExecuteQueryIteratorAsync( + return CrossSync.ExecuteQueryIterator( self, instance_id, app_profile_id, @@ -501,15 +590,18 @@ async def execute_query( retryable_excs=retryable_excs, ) + @CrossSync.convert(sync_name="__enter__") async def __aenter__(self): self._start_background_channel_refresh() return self + @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"}) async def __aexit__(self, exc_type, exc_val, exc_tb): await self.close() await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb) +@CrossSync.convert_class(sync_name="Table", add_mapping_for_name="Table") class TableAsync: """ Main Data API surface @@ -518,6 +610,19 @@ class TableAsync: each call """ + @CrossSync.convert( + replace_symbols={"BigtableDataClientAsync": "BigtableDataClient"}, + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + }, + ) def __init__( self, client: BigtableDataClientAsync, @@ -548,7 +653,7 @@ def __init__( """ Initialize a Table instance - Must be created within an async context (running event loop) + {LOOP_MESSAGE} Args: instance_id: The Bigtable instance ID to associate with this client. @@ -580,7 +685,7 @@ def __init__( encountered during all other operations. Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) Raises: - RuntimeError: if called outside of an async context (no running event loop) + {RAISE_NO_LOOP} """ # NOTE: any changes to the signature of this method should also be reflected # in client.get_table() @@ -626,17 +731,19 @@ def __init__( default_mutate_rows_retryable_errors or () ) self.default_retryable_errors = default_retryable_errors or () - - # raises RuntimeError if called outside of an async context (no running event loop) try: - self._register_instance_task = asyncio.create_task( - self.client._register_instance(instance_id, self) + self._register_instance_future = CrossSync.create_task( + self.client._register_instance, + self.instance_id, + self, + sync_executor=self.client._executor, ) except RuntimeError as e: raise RuntimeError( f"{self.__class__.__name__} must be created within an async event loop context." ) from e + @CrossSync.convert(replace_symbols={"AsyncIterable": "Iterable"}) async def read_rows_stream( self, query: ReadRowsQuery, @@ -678,7 +785,7 @@ async def read_rows_stream( ) retryable_excs = _get_retryable_errors(retryable_errors, self) - row_merger = _ReadRowsOperationAsync( + row_merger = CrossSync._ReadRowsOperation( query, self, operation_timeout=operation_timeout, @@ -687,6 +794,7 @@ async def read_rows_stream( ) return row_merger.start_operation() + @CrossSync.convert async def read_rows( self, query: ReadRowsQuery, @@ -734,6 +842,7 @@ async def read_rows( ) return [row async for row in row_generator] + @CrossSync.convert async def read_row( self, row_key: str | bytes, @@ -783,6 +892,7 @@ async def read_row( return None return results[0] + @CrossSync.convert async def read_rows_sharded( self, sharded_query: ShardedQuery, @@ -833,8 +943,9 @@ async def read_rows_sharded( ) # limit the number of concurrent requests using a semaphore - concurrency_sem = asyncio.Semaphore(_CONCURRENCY_LIMIT) + concurrency_sem = CrossSync.Semaphore(_CONCURRENCY_LIMIT) + @CrossSync.convert async def read_rows_with_semaphore(query): async with concurrency_sem: # calculate new timeout based on time left in overall operation @@ -850,8 +961,14 @@ async def read_rows_with_semaphore(query): retryable_errors=retryable_errors, ) - routine_list = [read_rows_with_semaphore(query) for query in sharded_query] - batch_result = await asyncio.gather(*routine_list, return_exceptions=True) + routine_list = [ + partial(read_rows_with_semaphore, query) for query in sharded_query + ] + batch_result = await CrossSync.gather_partials( + routine_list, + return_exceptions=True, + sync_executor=self.client._executor, + ) # collect results and errors error_dict = {} @@ -878,6 +995,7 @@ async def read_rows_with_semaphore(query): ) return results_list + @CrossSync.convert async def row_exists( self, row_key: str | bytes, @@ -926,6 +1044,7 @@ async def row_exists( ) return len(results) > 0 + @CrossSync.convert async def sample_row_keys( self, *, @@ -977,7 +1096,7 @@ async def sample_row_keys( sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) - # prepare request + @CrossSync.convert async def execute_rpc(): results = await self.client._gapic_client.sample_row_keys( table_name=self.table_name, @@ -987,7 +1106,7 @@ async def execute_rpc(): ) return [(s.row_key, s.offset_bytes) async for s in results] - return await retries.retry_target_async( + return await CrossSync.retry_target( execute_rpc, predicate, sleep_generator, @@ -995,6 +1114,7 @@ async def execute_rpc(): exception_factory=_retry_exception_factory, ) + @CrossSync.convert(replace_symbols={"MutationsBatcherAsync": "MutationsBatcher"}) def mutations_batcher( self, *, @@ -1007,7 +1127,7 @@ def mutations_batcher( batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, batch_retryable_errors: Sequence[type[Exception]] | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, - ) -> MutationsBatcherAsync: + ) -> "MutationsBatcherAsync": """ Returns a new mutations batcher instance. @@ -1032,7 +1152,7 @@ def mutations_batcher( Returns: MutationsBatcherAsync: a MutationsBatcherAsync context manager that can batch requests """ - return MutationsBatcherAsync( + return CrossSync.MutationsBatcher( self, flush_interval=flush_interval, flush_limit_mutation_count=flush_limit_mutation_count, @@ -1044,6 +1164,7 @@ def mutations_batcher( batch_retryable_errors=batch_retryable_errors, ) + @CrossSync.convert async def mutate_row( self, row_key: str | bytes, @@ -1113,7 +1234,7 @@ async def mutate_row( timeout=attempt_timeout, retry=None, ) - return await retries.retry_target_async( + return await CrossSync.retry_target( target, predicate, sleep_generator, @@ -1121,6 +1242,7 @@ async def mutate_row( exception_factory=_retry_exception_factory, ) + @CrossSync.convert async def bulk_mutate_rows( self, mutation_entries: list[RowMutationEntry], @@ -1166,7 +1288,7 @@ async def bulk_mutate_rows( ) retryable_excs = _get_retryable_errors(retryable_errors, self) - operation = _MutateRowsOperationAsync( + operation = CrossSync._MutateRowsOperation( self.client._gapic_client, self, mutation_entries, @@ -1176,6 +1298,7 @@ async def bulk_mutate_rows( ) await operation.start() + @CrossSync.convert async def check_and_mutate_row( self, row_key: str | bytes, @@ -1240,6 +1363,7 @@ async def check_and_mutate_row( ) return result.predicate_matched + @CrossSync.convert async def read_modify_write_row( self, row_key: str | bytes, @@ -1288,13 +1412,16 @@ async def read_modify_write_row( # construct Row from result return Row._from_pb(result.row) + @CrossSync.convert async def close(self): """ Called to close the Table instance and release any resources held by it. """ - self._register_instance_task.cancel() + if self._register_instance_future: + self._register_instance_future.cancel() await self.client._remove_instance_registration(self.instance_id, self) + @CrossSync.convert(sync_name="__enter__") async def __aenter__(self): """ Implement async context manager protocol @@ -1302,9 +1429,11 @@ async def __aenter__(self): Ensure registration task has time to run, so that grpc channels will be warmed for the specified instance """ - await self._register_instance_task + if self._register_instance_future: + await self._register_instance_future return self + @CrossSync.convert(sync_name="__exit__") async def __aexit__(self, exc_type, exc_val, exc_tb): """ Implement async context manager protocol diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py index 76d13f00bf83..65070c880a8c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -14,32 +14,40 @@ # from __future__ import annotations -from typing import Any, Sequence, TYPE_CHECKING -import asyncio +from typing import Sequence, TYPE_CHECKING import atexit import warnings from collections import deque +import concurrent.futures -from google.cloud.bigtable.data.mutations import RowMutationEntry from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup from google.cloud.bigtable.data.exceptions import FailedMutationEntryError from google.cloud.bigtable.data._helpers import _get_retryable_errors from google.cloud.bigtable.data._helpers import _get_timeouts from google.cloud.bigtable.data._helpers import TABLE_DEFAULT -from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync -from google.cloud.bigtable.data._async._mutate_rows import ( +from google.cloud.bigtable.data.mutations import ( _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, ) from google.cloud.bigtable.data.mutations import Mutation +from google.cloud.bigtable.data._cross_sync import CrossSync + if TYPE_CHECKING: - from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data.mutations import RowMutationEntry + + if CrossSync.is_async: + from google.cloud.bigtable.data._async.client import TableAsync as TableType + else: + from google.cloud.bigtable.data._sync_autogen.client import Table as TableType # type: ignore + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.mutations_batcher" # used to make more readable default values _MB_SIZE = 1024 * 1024 +@CrossSync.convert_class(sync_name="_FlowControl", add_mapping_for_name="_FlowControl") class _FlowControlAsync: """ Manages flow control for batched mutations. Mutations are registered against @@ -70,7 +78,7 @@ def __init__( raise ValueError("max_mutation_count must be greater than 0") if self._max_mutation_bytes < 1: raise ValueError("max_mutation_bytes must be greater than 0") - self._capacity_condition = asyncio.Condition() + self._capacity_condition = CrossSync.Condition() self._in_flight_mutation_count = 0 self._in_flight_mutation_bytes = 0 @@ -96,6 +104,7 @@ def _has_capacity(self, additional_count: int, additional_size: int) -> bool: new_count = self._in_flight_mutation_count + additional_count return new_size <= acceptable_size and new_count <= acceptable_count + @CrossSync.convert async def remove_from_flow( self, mutations: RowMutationEntry | list[RowMutationEntry] ) -> None: @@ -117,6 +126,7 @@ async def remove_from_flow( async with self._capacity_condition: self._capacity_condition.notify_all() + @CrossSync.convert async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]): """ Generator function that registers mutations with flow control. As mutations @@ -166,6 +176,7 @@ async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry] yield mutations[start_idx:end_idx] +@CrossSync.convert_class(sync_name="MutationsBatcher") class MutationsBatcherAsync: """ Allows users to send batches using context manager API: @@ -199,7 +210,7 @@ class MutationsBatcherAsync: def __init__( self, - table: "TableAsync", + table: TableType, *, flush_interval: float | None = 5, flush_limit_mutation_count: int | None = 1000, @@ -218,11 +229,11 @@ def __init__( batch_retryable_errors, table ) - self.closed: bool = False + self._closed = CrossSync.Event() self._table = table self._staged_entries: list[RowMutationEntry] = [] self._staged_count, self._staged_bytes = 0, 0 - self._flow_control = _FlowControlAsync( + self._flow_control = CrossSync._FlowControl( flow_control_max_mutation_count, flow_control_max_bytes ) self._flush_limit_bytes = flush_limit_bytes @@ -231,8 +242,22 @@ def __init__( if flush_limit_mutation_count is not None else float("inf") ) - self._flush_timer = self._start_flush_timer(flush_interval) - self._flush_jobs: set[asyncio.Future[None]] = set() + # used by sync class to run mutate_rows operations + self._sync_rpc_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=8) + if not CrossSync.is_async + else None + ) + # used by sync class to manage flush_internal tasks + self._sync_flush_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=1) + if not CrossSync.is_async + else None + ) + self._flush_timer = CrossSync.create_task( + self._timer_routine, flush_interval, sync_executor=self._sync_flush_executor + ) + self._flush_jobs: set[CrossSync.Future[None]] = set() # MutationExceptionGroup reports number of successful entries along with failures self._entries_processed_since_last_raise: int = 0 self._exceptions_since_last_raise: int = 0 @@ -245,7 +270,8 @@ def __init__( # clean up on program exit atexit.register(self._on_exit) - def _start_flush_timer(self, interval: float | None) -> asyncio.Future[None]: + @CrossSync.convert + async def _timer_routine(self, interval: float | None) -> None: """ Set up a background task to flush the batcher every interval seconds @@ -254,27 +280,18 @@ def _start_flush_timer(self, interval: float | None) -> asyncio.Future[None]: Args: flush_interval: Automatically flush every flush_interval seconds. If None, no time-based flushing is performed. - Returns: - asyncio.Future[None]: future representing the background task """ - if interval is None or self.closed: - empty_future: asyncio.Future[None] = asyncio.Future() - empty_future.set_result(None) - return empty_future - - async def timer_routine(self, interval: float): - """ - Triggers new flush tasks every `interval` seconds - """ - while not self.closed: - await asyncio.sleep(interval) - # add new flush task to list - if not self.closed and self._staged_entries: - self._schedule_flush() - - timer_task = asyncio.create_task(timer_routine(self, interval)) - return timer_task + if not interval or interval <= 0: + return None + while not self._closed.is_set(): + # wait until interval has passed, or until closed + await CrossSync.event_wait( + self._closed, timeout=interval, async_break_early=False + ) + if not self._closed.is_set() and self._staged_entries: + self._schedule_flush() + @CrossSync.convert async def append(self, mutation_entry: RowMutationEntry): """ Add a new set of mutations to the internal queue @@ -286,7 +303,7 @@ async def append(self, mutation_entry: RowMutationEntry): ValueError: if an invalid mutation type is added """ # TODO: return a future to track completion of this entry - if self.closed: + if self._closed.is_set(): raise RuntimeError("Cannot append to closed MutationsBatcher") if isinstance(mutation_entry, Mutation): # type: ignore raise ValueError( @@ -302,25 +319,29 @@ async def append(self, mutation_entry: RowMutationEntry): ): self._schedule_flush() # yield to the event loop to allow flush to run - await asyncio.sleep(0) + await CrossSync.yield_to_event_loop() - def _schedule_flush(self) -> asyncio.Future[None] | None: + def _schedule_flush(self) -> CrossSync.Future[None] | None: """ Update the flush task to include the latest staged entries Returns: - asyncio.Future[None] | None: + Future[None] | None: future representing the background task, if started """ if self._staged_entries: entries, self._staged_entries = self._staged_entries, [] self._staged_count, self._staged_bytes = 0, 0 - new_task = self._create_bg_task(self._flush_internal, entries) - new_task.add_done_callback(self._flush_jobs.remove) - self._flush_jobs.add(new_task) + new_task = CrossSync.create_task( + self._flush_internal, entries, sync_executor=self._sync_flush_executor + ) + if not new_task.done(): + self._flush_jobs.add(new_task) + new_task.add_done_callback(self._flush_jobs.remove) return new_task return None + @CrossSync.convert async def _flush_internal(self, new_entries: list[RowMutationEntry]): """ Flushes a set of mutations to the server, and updates internal state @@ -329,9 +350,11 @@ async def _flush_internal(self, new_entries: list[RowMutationEntry]): new_entries list of RowMutationEntry objects to flush """ # flush new entries - in_process_requests: list[asyncio.Future[list[FailedMutationEntryError]]] = [] + in_process_requests: list[CrossSync.Future[list[FailedMutationEntryError]]] = [] async for batch in self._flow_control.add_to_flow(new_entries): - batch_task = self._create_bg_task(self._execute_mutate_rows, batch) + batch_task = CrossSync.create_task( + self._execute_mutate_rows, batch, sync_executor=self._sync_rpc_executor + ) in_process_requests.append(batch_task) # wait for all inflight requests to complete found_exceptions = await self._wait_for_batch_results(*in_process_requests) @@ -339,6 +362,7 @@ async def _flush_internal(self, new_entries: list[RowMutationEntry]): self._entries_processed_since_last_raise += len(new_entries) self._add_exceptions(found_exceptions) + @CrossSync.convert async def _execute_mutate_rows( self, batch: list[RowMutationEntry] ) -> list[FailedMutationEntryError]: @@ -355,7 +379,7 @@ async def _execute_mutate_rows( FailedMutationEntryError objects will not contain index information """ try: - operation = _MutateRowsOperationAsync( + operation = CrossSync._MutateRowsOperation( self._table.client._gapic_client, self._table, batch, @@ -419,10 +443,12 @@ def _raise_exceptions(self): entry_count=entry_count, ) + @CrossSync.convert(sync_name="__enter__") async def __aenter__(self): """Allow use of context manager API""" return self + @CrossSync.convert(sync_name="__exit__") async def __aexit__(self, exc_type, exc, tb): """ Allow use of context manager API. @@ -431,19 +457,30 @@ async def __aexit__(self, exc_type, exc, tb): """ await self.close() + @property + def closed(self) -> bool: + """ + Returns: + - True if the batcher is closed, False otherwise + """ + return self._closed.is_set() + + @CrossSync.convert async def close(self): """ Flush queue and clean up resources """ - self.closed = True + self._closed.set() self._flush_timer.cancel() self._schedule_flush() - if self._flush_jobs: - await asyncio.gather(*self._flush_jobs, return_exceptions=True) - try: - await self._flush_timer - except asyncio.CancelledError: - pass + # shut down executors + if self._sync_flush_executor: + with self._sync_flush_executor: + self._sync_flush_executor.shutdown(wait=True) + if self._sync_rpc_executor: + with self._sync_rpc_executor: + self._sync_rpc_executor.shutdown(wait=True) + await CrossSync.wait([*self._flush_jobs, self._flush_timer]) atexit.unregister(self._on_exit) # raise unreported exceptions self._raise_exceptions() @@ -452,32 +489,17 @@ def _on_exit(self): """ Called when program is exited. Raises warning if unflushed mutations remain """ - if not self.closed and self._staged_entries: + if not self._closed.is_set() and self._staged_entries: warnings.warn( f"MutationsBatcher for table {self._table.table_name} was not closed. " f"{len(self._staged_entries)} Unflushed mutations will not be sent to the server." ) @staticmethod - def _create_bg_task(func, *args, **kwargs) -> asyncio.Future[Any]: - """ - Create a new background task, and return a future - - This method wraps asyncio to make it easier to maintain subclasses - with different concurrency models. - - Args: - func: function to execute in background task - *args: positional arguments to pass to func - **kwargs: keyword arguments to pass to func - Returns: - asyncio.Future: Future object representing the background task - """ - return asyncio.create_task(func(*args, **kwargs)) - - @staticmethod + @CrossSync.convert async def _wait_for_batch_results( - *tasks: asyncio.Future[list[FailedMutationEntryError]] | asyncio.Future[None], + *tasks: CrossSync.Future[list[FailedMutationEntryError]] + | CrossSync.Future[None], ) -> list[Exception]: """ Takes in a list of futures representing _execute_mutate_rows tasks, @@ -494,19 +516,19 @@ async def _wait_for_batch_results( """ if not tasks: return [] - all_results = await asyncio.gather(*tasks, return_exceptions=True) - found_errors = [] - for result in all_results: - if isinstance(result, Exception): - # will receive direct Exception objects if request task fails - found_errors.append(result) - elif isinstance(result, BaseException): - # BaseException not expected from grpc calls. Raise immediately - raise result - elif result: - # completed requests will return a list of FailedMutationEntryError - for e in result: - # strip index information - e.index = None - found_errors.extend(result) - return found_errors + exceptions: list[Exception] = [] + for task in tasks: + if CrossSync.is_async: + # futures don't need to be awaited in sync mode + await task + try: + exc_list = task.result() + if exc_list: + # expect a list of FailedMutationEntryError objects + for exc in exc_list: + # strip index information + exc.index = None + exceptions.extend(exc_list) + except Exception as e: + exceptions.append(e) + return exceptions diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py index 95cd44f2c77c..62f0b62fc9b1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py @@ -41,6 +41,21 @@ class _RowSetComplete(Exception): pass +class _ResetRow(Exception): # noqa: F811 + """ + Internal exception for _ReadRowsOperation + + Denotes that the server sent a reset_row marker, telling the client to drop + all previous chunks for row_key and re-read from the beginning. + + Args: + chunk: the reset_row chunk + """ + + def __init__(self, chunk): + self.chunk = chunk + + class _MutateRowsIncomplete(RuntimeError): """ Exception raised when a mutate_rows call has unfinished work. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py index 94af7d1cd0e8..0ff258365fdb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py @@ -25,7 +25,9 @@ QueryResultRow, Struct, ) +from google.cloud.bigtable.data._cross_sync import CrossSync +CrossSync.add_mapping("ExecuteQueryIterator", ExecuteQueryIteratorAsync) __all__ = [ "ExecuteQueryValueType", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index 6146ad4516f5..ba82bbccaf53 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -14,10 +14,8 @@ from __future__ import annotations -import asyncio from typing import ( Any, - AsyncIterator, Dict, Optional, Sequence, @@ -43,40 +41,31 @@ ExecuteQueryRequest as ExecuteQueryRequestPB, ) +from google.cloud.bigtable.data._cross_sync import CrossSync + if TYPE_CHECKING: - from google.cloud.bigtable.data import BigtableDataClientAsync + if CrossSync.is_async: + from google.cloud.bigtable.data import BigtableDataClientAsync as DataClientType +__CROSS_SYNC_OUTPUT__ = ( + "google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator" +) -class ExecuteQueryIteratorAsync: - """ - ExecuteQueryIteratorAsync handles collecting streaming responses from the - ExecuteQuery RPC and parsing them to QueryResultRows. - - ExecuteQueryIteratorAsync implements Asynchronous Iterator interface and can - be used with "async for" syntax. It is also a context manager. - - It is **not thread-safe**. It should not be used by multiple asyncio Tasks. - - Args: - client: bigtable client - instance_id: id of the instance on which the query is executed - request_body: dict representing the body of the ExecuteQueryRequest - attempt_timeout: the time budget for the entire operation, in seconds. - Failed requests will be retried within the budget. - Defaults to 600 seconds. - operation_timeout: the time budget for an individual network request, in seconds. - If it takes longer than this time to complete, the request will be cancelled with - a DeadlineExceeded exception, and a retry will be attempted. - Defaults to the 20 seconds. If None, defaults to operation_timeout. - req_metadata: metadata used while sending the gRPC request - retryable_excs: a list of errors that will be retried if encountered. - Raises: - RuntimeError: if the instance is not created within an async event loop context. - """ +@CrossSync.convert_class(sync_name="ExecuteQueryIterator") +class ExecuteQueryIteratorAsync: + @CrossSync.convert( + docstring_format_vars={ + "NO_LOOP": ( + "RuntimeError: if the instance is not created within an async event loop context.", + "None", + ), + "TASK_OR_THREAD": ("asyncio Tasks", "threads"), + } + ) def __init__( self, - client: BigtableDataClientAsync, + client: DataClientType, instance_id: str, app_profile_id: Optional[str], request_body: Dict[str, Any], @@ -85,6 +74,25 @@ def __init__( req_metadata: Sequence[Tuple[str, str]] = (), retryable_excs: Sequence[type[Exception]] = (), ) -> None: + """ + Collects responses from ExecuteQuery requests and parses them into QueryResultRows. + + It is **not thread-safe**. It should not be used by multiple {TASK_OR_THREAD}. + + Args: + client: bigtable client + instance_id: id of the instance on which the query is executed + request_body: dict representing the body of the ExecuteQueryRequest + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget + req_metadata: metadata used while sending the gRPC request + retryable_excs: a list of errors that will be retried if encountered. + Raises: + {NO_LOOP} + """ self._table_name = None self._app_profile_id = app_profile_id self._client = client @@ -98,8 +106,7 @@ def __init__( self._attempt_timeout_gen = _attempt_timeout_generator( attempt_timeout, operation_timeout ) - retryable_excs = retryable_excs or [] - self._async_stream = retries.retry_target_stream_async( + self._stream = CrossSync.retry_target_stream( self._make_request_with_resume_token, retries.if_exception_type(*retryable_excs), retries.exponential_sleep_generator(0.01, 60, multiplier=2), @@ -109,8 +116,11 @@ def __init__( self._req_metadata = req_metadata try: - self._register_instance_task = asyncio.create_task( - self._client._register_instance(instance_id, self) + self._register_instance_task = CrossSync.create_task( + self._client._register_instance, + instance_id, + self, + sync_executor=self._client._executor, ) except RuntimeError as e: raise RuntimeError( @@ -132,6 +142,7 @@ def table_name(self) -> Optional[str]: """Returns the table_name of the iterator.""" return self._table_name + @CrossSync.convert async def _make_request_with_resume_token(self): """ perfoms the rpc call using the correct resume token. @@ -150,23 +161,25 @@ async def _make_request_with_resume_token(self): retry=None, ) - async def _await_metadata(self) -> None: + @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) + async def _fetch_metadata(self) -> None: """ If called before the first response was recieved, the first response - is awaited as part of this call. + is retrieved as part of this call. """ if self._byte_cursor.metadata is None: - metadata_msg = await self._async_stream.__anext__() + metadata_msg = await self._stream.__anext__() self._byte_cursor.consume_metadata(metadata_msg) - async def _next_impl(self) -> AsyncIterator[QueryResultRow]: + @CrossSync.convert + async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]: """ Generator wrapping the response stream which parses the stream results and returns full `QueryResultRow`s. """ - await self._await_metadata() + await self._fetch_metadata() - async for response in self._async_stream: + async for response in self._stream: try: bytes_to_parse = self._byte_cursor.consume(response) if bytes_to_parse is None: @@ -185,14 +198,17 @@ async def _next_impl(self) -> AsyncIterator[QueryResultRow]: yield result await self.close() + @CrossSync.convert(sync_name="__next__", replace_symbols={"__anext__": "__next__"}) async def __anext__(self) -> QueryResultRow: if self._is_closed: - raise StopAsyncIteration + raise CrossSync.StopIteration return await self._result_generator.__anext__() + @CrossSync.convert(sync_name="__iter__") def __aiter__(self): return self + @CrossSync.convert async def metadata(self) -> Optional[Metadata]: """ Returns query metadata from the server or None if the iterator was @@ -203,11 +219,12 @@ async def metadata(self) -> Optional[Metadata]: # Metadata should be present in the first response in a stream. if self._byte_cursor.metadata is None: try: - await self._await_metadata() - except StopIteration: + await self._fetch_metadata() + except CrossSync.StopIteration: return None return self._byte_cursor.metadata + @CrossSync.convert async def close(self) -> None: """ Cancel all background tasks. Should be called all rows were processed. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py index 335a15e12f01..2f4e441ede81 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py @@ -366,3 +366,15 @@ def _from_dict(cls, input_dict: dict[str, Any]) -> RowMutationEntry: Mutation._from_dict(mutation) for mutation in input_dict["mutations"] ], ) + + +@dataclass +class _EntryWithProto: + """ + A dataclass to hold a RowMutationEntry and its corresponding proto representation. + + Used in _MutateRowsOperation to avoid repeated conversion of RowMutationEntry to proto. + """ + + entry: RowMutationEntry + proto: types_pb.MutateRowsRequest.Entry diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 4dfebe068c40..f6a2291fc865 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -157,6 +157,8 @@ def mypy(session): "tests/system/v2_client", "--exclude", "tests/unit/v2_client", + "--disable-error-code", + "func-returns-value", # needed for CrossSync.rm_aio ) @@ -294,9 +296,8 @@ def system_emulated(session): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -def conformance(session): - TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" - CLONE_REPO_DIR = "cloud-bigtable-clients-test" +@nox.parametrize("client_type", ["async"]) +def conformance(session, client_type): # install dependencies constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" @@ -304,11 +305,13 @@ def conformance(session): install_unittest_dependencies(session, "-c", constraints_path) with session.chdir("test_proxy"): # download the conformance test suite - clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) - if not os.path.exists(clone_dir): - print("downloading copy of test repo") - session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR, external=True) - session.run("bash", "-e", "run_tests.sh", external=True) + session.run( + "bash", + "-e", + "run_tests.sh", + external=True, + env={"CLIENT_TYPE": client_type}, + ) @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) diff --git a/packages/google-cloud-bigtable/test_proxy/README.md b/packages/google-cloud-bigtable/test_proxy/README.md index 08741fd5d673..266fba7cd6eb 100644 --- a/packages/google-cloud-bigtable/test_proxy/README.md +++ b/packages/google-cloud-bigtable/test_proxy/README.md @@ -8,7 +8,7 @@ You can run the conformance tests in a single line by calling `nox -s conformanc ``` -cd python-bigtable/test_proxy +cd python-bigtable nox -s conformance ``` @@ -30,10 +30,11 @@ cd python-bigtable/test_proxy python test_proxy.py --port 8080 ``` -You can run the test proxy against the previous `v2` client by running it with the `--legacy-client` flag: +By default, the test_proxy targets the async client. You can change this by passing in the `--client_type` flag. +Valid options are `async` and `legacy`. ``` -python test_proxy.py --legacy-client +python test_proxy.py --client_type=legacy ``` ### Run the test cases diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py similarity index 90% rename from packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data.py rename to packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py index 43ff5d634901..7f6cc413fb11 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py @@ -18,8 +18,15 @@ from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.data import BigtableDataClientAsync +from google.cloud.bigtable.data._cross_sync import CrossSync +if not CrossSync.is_async: + from client_handler_data_async import error_safe +__CROSS_SYNC_OUTPUT__ = "test_proxy.handlers.client_handler_data_sync_autogen" + + +@CrossSync.drop def error_safe(func): """ Catch and pass errors back to the grpc_server_process @@ -37,6 +44,7 @@ async def wrapper(self, *args, **kwargs): return wrapper +@CrossSync.drop def encode_exception(exc): """ Encode an exception or chain of exceptions to pass back to grpc_handler @@ -68,7 +76,8 @@ def encode_exception(exc): return result -class TestProxyClientHandler: +@CrossSync.convert_class("TestProxyClientHandler") +class TestProxyClientHandlerAsync: """ Implements the same methods as the grpc server, but handles the client library side of the request. @@ -90,7 +99,7 @@ def __init__( self.closed = False # use emulator os.environ[BIGTABLE_EMULATOR] = data_target - self.client = BigtableDataClientAsync(project=project_id) + self.client = CrossSync.DataClient(project=project_id) self.instance_id = instance_id self.app_profile_id = app_profile_id self.per_operation_timeout = per_operation_timeout @@ -105,7 +114,7 @@ async def ReadRows(self, request, **kwargs): app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 - result_list = await table.read_rows(request, **kwargs) + result_list = CrossSync.rm_aio(await table.read_rows(request, **kwargs)) # pack results back into protobuf-parsable format serialized_response = [row._to_dict() for row in result_list] return serialized_response @@ -116,7 +125,7 @@ async def ReadRow(self, row_key, **kwargs): app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 - result_row = await table.read_row(row_key, **kwargs) + result_row = CrossSync.rm_aio(await table.read_row(row_key, **kwargs)) # pack results back into protobuf-parsable format if result_row: return result_row._to_dict() @@ -132,7 +141,7 @@ async def MutateRow(self, request, **kwargs): kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 row_key = request["row_key"] mutations = [Mutation._from_dict(d) for d in request["mutations"]] - await table.mutate_row(row_key, mutations, **kwargs) + CrossSync.rm_aio(await table.mutate_row(row_key, mutations, **kwargs)) return "OK" @error_safe @@ -143,7 +152,7 @@ async def BulkMutateRows(self, request, **kwargs): table = self.client.get_table(self.instance_id, table_id, app_profile_id) kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 entry_list = [RowMutationEntry._from_dict(entry) for entry in request["entries"]] - await table.bulk_mutate_rows(entry_list, **kwargs) + CrossSync.rm_aio(await table.bulk_mutate_rows(entry_list, **kwargs)) return "OK" @error_safe @@ -171,13 +180,13 @@ async def CheckAndMutateRow(self, request, **kwargs): # invalid mutation type. Conformance test may be sending generic empty request false_mutations.append(SetCell("", "", "", 0)) predicate_filter = request.get("predicate_filter", None) - result = await table.check_and_mutate_row( + result = CrossSync.rm_aio(await table.check_and_mutate_row( row_key, predicate_filter, true_case_mutations=true_mutations, false_case_mutations=false_mutations, **kwargs, - ) + )) return result @error_safe @@ -197,7 +206,7 @@ async def ReadModifyWriteRow(self, request, **kwargs): else: new_rule = IncrementRule(rule_dict["family_name"], qualifier, rule_dict["increment_amount"]) rules.append(new_rule) - result = await table.read_modify_write_row(row_key, rules, **kwargs) + result = CrossSync.rm_aio(await table.read_modify_write_row(row_key, rules, **kwargs)) # pack results back into protobuf-parsable format if result: return result._to_dict() @@ -210,5 +219,5 @@ async def SampleRowKeys(self, request, **kwargs): app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 - result = await table.sample_row_keys(**kwargs) + result = CrossSync.rm_aio(await table.sample_row_keys(**kwargs)) return result diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py index 400f618b514a..63fe357b0b33 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_legacy.py @@ -19,13 +19,13 @@ from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.client import Client -import client_handler_data as client_handler +import client_handler_data_async as client_handler import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) -class LegacyTestProxyClientHandler(client_handler.TestProxyClientHandler): +class LegacyTestProxyClientHandler(client_handler.TestProxyClientHandlerAsync): def __init__( self, diff --git a/packages/google-cloud-bigtable/test_proxy/noxfile.py b/packages/google-cloud-bigtable/test_proxy/noxfile.py deleted file mode 100644 index bebf247b70e6..000000000000 --- a/packages/google-cloud-bigtable/test_proxy/noxfile.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -import os -import pathlib -import re -from colorlog.escape_codes import parse_colors - -import nox - - -DEFAULT_PYTHON_VERSION = "3.10" - -PROXY_SERVER_PORT=os.environ.get("PROXY_SERVER_PORT", "50055") -PROXY_CLIENT_VERSION=os.environ.get("PROXY_CLIENT_VERSION", None) - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() -REPO_ROOT_DIRECTORY = CURRENT_DIRECTORY.parent - -nox.options.sessions = ["run_proxy", "conformance_tests"] - -TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" -CLONE_REPO_DIR = "cloud-bigtable-clients-test" - -# Error if a python version is missing -nox.options.error_on_missing_interpreters = True - - -def default(session): - """ - if nox is run directly, run the test_proxy session - """ - test_proxy(session) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def conformance_tests(session): - """ - download and run the conformance test suite against the test proxy - """ - import subprocess - import time - # download the conformance test suite - clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) - if not os.path.exists(clone_dir): - print("downloading copy of test repo") - session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR) - # start tests - with session.chdir(f"{clone_dir}/tests"): - session.run("go", "test", "-v", f"-proxy_addr=:{PROXY_SERVER_PORT}") - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def test_proxy(session): - """Start up the test proxy""" - # Install all dependencies, then install this package into the - # virtualenv's dist-packages. - # session.install( - # "grpcio", - # ) - if PROXY_CLIENT_VERSION is not None: - # install released version of the library - session.install(f"python-bigtable=={PROXY_CLIENT_VERSION}") - else: - # install the library from the source - session.install("-e", str(REPO_ROOT_DIRECTORY)) - session.install("-e", str(REPO_ROOT_DIRECTORY / "python-api-core")) - - session.run("python", "test_proxy.py", "--port", PROXY_SERVER_PORT, *session.posargs,) diff --git a/packages/google-cloud-bigtable/test_proxy/run_tests.sh b/packages/google-cloud-bigtable/test_proxy/run_tests.sh index 15b146b0365e..c2e9c6312041 100755 --- a/packages/google-cloud-bigtable/test_proxy/run_tests.sh +++ b/packages/google-cloud-bigtable/test_proxy/run_tests.sh @@ -35,7 +35,8 @@ if [ ! -d "cloud-bigtable-clients-test" ]; then fi # start proxy -python test_proxy.py --port $PROXY_SERVER_PORT & +echo "starting with client type: $CLIENT_TYPE" +python test_proxy.py --port $PROXY_SERVER_PORT --client_type $CLIENT_TYPE & PROXY_PID=$! function finish { kill $PROXY_PID diff --git a/packages/google-cloud-bigtable/test_proxy/test_proxy.py b/packages/google-cloud-bigtable/test_proxy/test_proxy.py index a0cf2f1f0c8d..9e03f1e5cf49 100644 --- a/packages/google-cloud-bigtable/test_proxy/test_proxy.py +++ b/packages/google-cloud-bigtable/test_proxy/test_proxy.py @@ -55,7 +55,7 @@ def grpc_server_process(request_q, queue_pool, port=50055): server.wait_for_termination() -async def client_handler_process_async(request_q, queue_pool, use_legacy_client=False): +async def client_handler_process_async(request_q, queue_pool, client_type="async"): """ Defines a process that recives Bigtable requests from a grpc_server_process, and runs the request using a client library instance @@ -64,8 +64,7 @@ async def client_handler_process_async(request_q, queue_pool, use_legacy_client= import re import asyncio import warnings - import client_handler_data - import client_handler_legacy + import client_handler_data_async warnings.filterwarnings("ignore", category=RuntimeWarning, message=".*Bigtable emulator.*") def camel_to_snake(str): @@ -98,9 +97,7 @@ def format_dict(input_obj): return input_obj # Listen to requests from grpc server process - print_msg = "client_handler_process started" - if use_legacy_client: - print_msg += ", using legacy client" + print_msg = f"client_handler_process started with client_type={client_type}" print(print_msg) client_map = {} background_tasks = set() @@ -114,10 +111,11 @@ def format_dict(input_obj): client = client_map.get(client_id, None) # handle special cases for client creation and deletion if fn_name == "CreateClient": - if use_legacy_client: + if client_type == "legacy": + import client_handler_legacy client = client_handler_legacy.LegacyTestProxyClientHandler(**json_data) else: - client = client_handler_data.TestProxyClientHandler(**json_data) + client = client_handler_data_async.TestProxyClientHandlerAsync(**json_data) client_map[client_id] = client out_q.put(True) elif client is None: @@ -142,21 +140,21 @@ async def _run_fn(out_q, fn, **kwargs): await asyncio.sleep(0.01) -def client_handler_process(request_q, queue_pool, legacy_client=False): +def client_handler_process(request_q, queue_pool, client_type="async"): """ Sync entrypoint for client_handler_process_async """ import asyncio - asyncio.run(client_handler_process_async(request_q, queue_pool, legacy_client)) + asyncio.run(client_handler_process_async(request_q, queue_pool, client_type)) p = argparse.ArgumentParser() p.add_argument("--port", dest='port', default="50055") -p.add_argument('--legacy-client', dest='use_legacy', action='store_true', default=False) +p.add_argument("--client_type", dest='client_type', default="async", choices=["async", "legacy"]) if __name__ == "__main__": port = p.parse_args().port - use_legacy_client = p.parse_args().use_legacy + client_type = p.parse_args().client_type # start and run both processes # larger pools support more concurrent requests @@ -176,7 +174,7 @@ def client_handler_process(request_q, queue_pool, legacy_client=False): ), ) proxy.start() - client_handler_process(request_q, response_queue_pool, use_legacy_client) + client_handler_process(request_q, response_queue_pool, client_type) proxy.join() else: # run proxy in forground and client in background diff --git a/packages/google-cloud-bigtable/tests/system/data/__init__.py b/packages/google-cloud-bigtable/tests/system/data/__init__.py index 89a37dc92c5a..f2952b2cdb1b 100644 --- a/packages/google-cloud-bigtable/tests/system/data/__init__.py +++ b/packages/google-cloud-bigtable/tests/system/data/__init__.py @@ -13,3 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +TEST_FAMILY = "test-family" +TEST_FAMILY_2 = "test-family-2" diff --git a/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py index 77086b7f3e51..3b5a0af0681c 100644 --- a/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py +++ b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py @@ -17,20 +17,10 @@ """ import pytest -import pytest_asyncio import os -import asyncio import uuid -@pytest.fixture(scope="session") -def event_loop(): - loop = asyncio.get_event_loop() - yield loop - loop.stop() - loop.close() - - @pytest.fixture(scope="session") def admin_client(): """ @@ -150,22 +140,7 @@ def table_id( print(f"Table {init_table_id} not found, skipping deletion") -@pytest_asyncio.fixture(scope="session") -async def client(): - from google.cloud.bigtable.data import BigtableDataClientAsync - - project = os.getenv("GOOGLE_CLOUD_PROJECT") or None - async with BigtableDataClientAsync(project=project, pool_size=4) as client: - yield client - - @pytest.fixture(scope="session") def project_id(client): """Returns the project ID from the client.""" yield client.project - - -@pytest_asyncio.fixture(scope="session") -async def table(client, table_id, instance_id): - async with client.get_table(instance_id, table_id) as table: - yield table diff --git a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py deleted file mode 100644 index 489dfeab6b6c..000000000000 --- a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_async.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - -import os -from unittest import mock -from .test_execute_query_utils import ( - ChannelMockAsync, - response_with_metadata, - response_with_result, -) -from google.api_core import exceptions as core_exceptions -from google.cloud.bigtable.data import BigtableDataClientAsync - -TABLE_NAME = "TABLE_NAME" -INSTANCE_NAME = "INSTANCE_NAME" - - -class TestAsyncExecuteQuery: - @pytest.fixture() - def async_channel_mock(self): - with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): - yield ChannelMockAsync() - - @pytest.fixture() - def async_client(self, async_channel_mock): - with mock.patch.dict( - os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"} - ), mock.patch("grpc.aio.insecure_channel", return_value=async_channel_mock): - yield BigtableDataClientAsync() - - @pytest.mark.asyncio - async def test_execute_query(self, async_client, async_channel_mock): - values = [ - response_with_metadata(), - response_with_result("test"), - response_with_result(8, resume_token=b"r1"), - response_with_result("test2"), - response_with_result(9, resume_token=b"r2"), - response_with_result("test3"), - response_with_result(None, resume_token=b"r3"), - ] - async_channel_mock.set_values(values) - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - results = [r async for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert results[1]["a"] == "test2" - assert results[1]["b"] == 9 - assert results[2]["a"] == "test3" - assert results[2]["b"] is None - assert len(async_channel_mock.execute_query_calls) == 1 - - @pytest.mark.asyncio - async def test_execute_query_with_params(self, async_client, async_channel_mock): - values = [ - response_with_metadata(), - response_with_result("test2"), - response_with_result(9, resume_token=b"r2"), - ] - async_channel_mock.set_values(values) - - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME} WHERE b=@b", - INSTANCE_NAME, - parameters={"b": 9}, - ) - results = [r async for r in result] - assert len(results) == 1 - assert results[0]["a"] == "test2" - assert results[0]["b"] == 9 - assert len(async_channel_mock.execute_query_calls) == 1 - - @pytest.mark.asyncio - async def test_execute_query_error_before_metadata( - self, async_client, async_channel_mock - ): - from google.api_core.exceptions import DeadlineExceeded - - values = [ - DeadlineExceeded(""), - response_with_metadata(), - response_with_result("test"), - response_with_result(8, resume_token=b"r1"), - response_with_result("test2"), - response_with_result(9, resume_token=b"r2"), - response_with_result("test3"), - response_with_result(None, resume_token=b"r3"), - ] - async_channel_mock.set_values(values) - - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - results = [r async for r in result] - assert len(results) == 3 - assert len(async_channel_mock.execute_query_calls) == 2 - - @pytest.mark.asyncio - async def test_execute_query_error_after_metadata( - self, async_client, async_channel_mock - ): - from google.api_core.exceptions import DeadlineExceeded - - values = [ - response_with_metadata(), - DeadlineExceeded(""), - response_with_metadata(), - response_with_result("test"), - response_with_result(8, resume_token=b"r1"), - response_with_result("test2"), - response_with_result(9, resume_token=b"r2"), - response_with_result("test3"), - response_with_result(None, resume_token=b"r3"), - ] - async_channel_mock.set_values(values) - - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - results = [r async for r in result] - assert len(results) == 3 - assert len(async_channel_mock.execute_query_calls) == 2 - assert async_channel_mock.resume_tokens == [] - - @pytest.mark.asyncio - async def test_execute_query_with_retries(self, async_client, async_channel_mock): - from google.api_core.exceptions import DeadlineExceeded - - values = [ - response_with_metadata(), - response_with_result("test"), - response_with_result(8, resume_token=b"r1"), - DeadlineExceeded(""), - response_with_result("test2"), - response_with_result(9, resume_token=b"r2"), - response_with_result("test3"), - DeadlineExceeded(""), - response_with_result("test3"), - response_with_result(None, resume_token=b"r3"), - ] - async_channel_mock.set_values(values) - - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - results = [r async for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert results[1]["a"] == "test2" - assert results[1]["b"] == 9 - assert results[2]["a"] == "test3" - assert results[2]["b"] is None - assert len(async_channel_mock.execute_query_calls) == 3 - assert async_channel_mock.resume_tokens == [b"r1", b"r2"] - - @pytest.mark.parametrize( - "exception", - [ - (core_exceptions.DeadlineExceeded("")), - (core_exceptions.Aborted("")), - (core_exceptions.ServiceUnavailable("")), - ], - ) - @pytest.mark.asyncio - async def test_execute_query_retryable_error( - self, async_client, async_channel_mock, exception - ): - values = [ - response_with_metadata(), - response_with_result("test", resume_token=b"t1"), - exception, - response_with_result(8, resume_token=b"t2"), - ] - async_channel_mock.set_values(values) - - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - results = [r async for r in result] - assert len(results) == 1 - assert len(async_channel_mock.execute_query_calls) == 2 - assert async_channel_mock.resume_tokens == [b"t1"] - - @pytest.mark.asyncio - async def test_execute_query_retry_partial_row( - self, async_client, async_channel_mock - ): - values = [ - response_with_metadata(), - response_with_result("test", resume_token=b"t1"), - core_exceptions.DeadlineExceeded(""), - response_with_result(8, resume_token=b"t2"), - ] - async_channel_mock.set_values(values) - - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - results = [r async for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert len(async_channel_mock.execute_query_calls) == 2 - assert async_channel_mock.resume_tokens == [b"t1"] - - @pytest.mark.parametrize( - "ExceptionType", - [ - (core_exceptions.InvalidArgument), - (core_exceptions.FailedPrecondition), - (core_exceptions.PermissionDenied), - (core_exceptions.MethodNotImplemented), - (core_exceptions.Cancelled), - (core_exceptions.AlreadyExists), - (core_exceptions.OutOfRange), - (core_exceptions.DataLoss), - (core_exceptions.Unauthenticated), - (core_exceptions.NotFound), - (core_exceptions.ResourceExhausted), - (core_exceptions.Unknown), - (core_exceptions.InternalServerError), - ], - ) - @pytest.mark.asyncio - async def test_execute_query_non_retryable( - self, async_client, async_channel_mock, ExceptionType - ): - values = [ - response_with_metadata(), - response_with_result("test"), - response_with_result(8, resume_token=b"r1"), - ExceptionType(""), - response_with_result("test2"), - response_with_result(9, resume_token=b"r2"), - response_with_result("test3"), - response_with_result(None, resume_token=b"r3"), - ] - async_channel_mock.set_values(values) - - result = await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - r = await result.__anext__() - assert r["a"] == "test" - assert r["b"] == 8 - - with pytest.raises(ExceptionType): - r = await result.__anext__() - - assert len(async_channel_mock.execute_query_calls) == 1 - assert async_channel_mock.resume_tokens == [] - - @pytest.mark.asyncio - async def test_execute_query_metadata_received_multiple_times_detected( - self, async_client, async_channel_mock - ): - values = [ - response_with_metadata(), - response_with_metadata(), - ] - async_channel_mock.set_values(values) - - with pytest.raises(Exception, match="Invalid ExecuteQuery response received"): - [ - r - async for r in await async_client.execute_query( - f"SELECT a, b FROM {TABLE_NAME}", INSTANCE_NAME - ) - ] diff --git a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py b/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py deleted file mode 100644 index 3439e04d2ccc..000000000000 --- a/packages/google-cloud-bigtable/tests/system/data/test_execute_query_utils.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from unittest import mock - -from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse -from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue -import grpc.aio - - -try: - # async mock for python3.7-10 - from asyncio import coroutine - - def async_mock(return_value=None): - coro = mock.Mock(name="CoroutineResult") - corofunc = mock.Mock(name="CoroutineFunction", side_effect=coroutine(coro)) - corofunc.coro = coro - corofunc.coro.return_value = return_value - return corofunc - -except ImportError: - # async mock for python3.11 or later - from unittest.mock import AsyncMock - - def async_mock(return_value=None): - return AsyncMock(return_value=return_value) - - -# ExecuteQueryResponse( -# metadata={ -# "proto_schema": { -# "columns": [ -# {"name": "test1", "type_": TYPE_INT}, -# {"name": "test2", "type_": TYPE_INT}, -# ] -# } -# } -# ), -# ExecuteQueryResponse( -# results={"proto_rows_batch": {"batch_data": messages[0]}} -# ), - - -def response_with_metadata(): - schema = {"a": "string_type", "b": "int64_type"} - return ExecuteQueryResponse( - { - "metadata": { - "proto_schema": { - "columns": [ - {"name": name, "type_": {_type: {}}} - for name, _type in schema.items() - ] - } - } - } - ) - - -def response_with_result(*args, resume_token=None): - if resume_token is None: - resume_token_dict = {} - else: - resume_token_dict = {"resume_token": resume_token} - - values = [] - for column_value in args: - if column_value is None: - pb_value = PBValue({}) - else: - pb_value = PBValue( - { - "int_value" - if isinstance(column_value, int) - else "string_value": column_value - } - ) - values.append(pb_value) - rows = ProtoRows(values=values) - - return ExecuteQueryResponse( - { - "results": { - "proto_rows_batch": { - "batch_data": ProtoRows.serialize(rows), - }, - **resume_token_dict, - } - } - ) - - -class ExecuteQueryStreamMock: - def __init__(self, parent): - self.parent = parent - self.iter = iter(self.parent.values) - - def __call__(self, *args, **kwargs): - request = args[0] - - self.parent.execute_query_calls.append(request) - if request.resume_token: - self.parent.resume_tokens.append(request.resume_token) - - def stream(): - for value in self.iter: - if isinstance(value, Exception): - raise value - else: - yield value - - return stream() - - -class ChannelMock: - def __init__(self): - self.execute_query_calls = [] - self.values = [] - self.resume_tokens = [] - - def set_values(self, values): - self.values = values - - def unary_unary(self, *args, **kwargs): - return mock.MagicMock() - - def unary_stream(self, *args, **kwargs): - if args[0] == "/google.bigtable.v2.Bigtable/ExecuteQuery": - return ExecuteQueryStreamMock(self) - return mock.MagicMock() - - -class ChannelMockAsync(grpc.aio.Channel, mock.MagicMock): - def __init__(self, *args, **kwargs): - mock.MagicMock.__init__(self, *args, **kwargs) - self.execute_query_calls = [] - self.values = [] - self.resume_tokens = [] - self._iter = [] - - def get_async_get(self, *args, **kwargs): - return self.async_gen - - def set_values(self, values): - self.values = values - self._iter = iter(self.values) - - def unary_unary(self, *args, **kwargs): - return async_mock() - - def unary_stream(self, *args, **kwargs): - if args[0] == "/google.bigtable.v2.Bigtable/ExecuteQuery": - - async def async_gen(*args, **kwargs): - for value in self._iter: - yield value - - iter = async_gen() - - class UnaryStreamCallMock(grpc.aio.UnaryStreamCall): - def __aiter__(self): - async def _impl(*args, **kwargs): - try: - while True: - yield await self.read() - except StopAsyncIteration: - pass - - return _impl() - - async def read(self): - value = await iter.__anext__() - if isinstance(value, Exception): - raise value - return value - - def add_done_callback(*args, **kwargs): - pass - - def cancel(*args, **kwargs): - pass - - def cancelled(*args, **kwargs): - pass - - def code(*args, **kwargs): - pass - - def details(*args, **kwargs): - pass - - def done(*args, **kwargs): - pass - - def initial_metadata(*args, **kwargs): - pass - - def time_remaining(*args, **kwargs): - pass - - def trailing_metadata(*args, **kwargs): - pass - - async def wait_for_connection(*args, **kwargs): - return async_mock() - - class UnaryStreamMultiCallableMock(grpc.aio.UnaryStreamMultiCallable): - def __init__(self, parent): - self.parent = parent - - def __call__( - self, - request, - *, - timeout=None, - metadata=None, - credentials=None, - wait_for_ready=None, - compression=None - ): - self.parent.execute_query_calls.append(request) - if request.resume_token: - self.parent.resume_tokens.append(request.resume_token) - return UnaryStreamCallMock() - - def add_done_callback(*args, **kwargs): - pass - - def cancel(*args, **kwargs): - pass - - def cancelled(*args, **kwargs): - pass - - def code(*args, **kwargs): - pass - - def details(*args, **kwargs): - pass - - def done(*args, **kwargs): - pass - - def initial_metadata(*args, **kwargs): - pass - - def time_remaining(*args, **kwargs): - pass - - def trailing_metadata(*args, **kwargs): - pass - - def wait_for_connection(*args, **kwargs): - pass - - # unary_stream should return https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.UnaryStreamMultiCallable - # PTAL https://grpc.github.io/grpc/python/grpc_asyncio.html#grpc.aio.Channel.unary_stream - return UnaryStreamMultiCallableMock(self) - return async_mock() - - def stream_unary(self, *args, **kwargs) -> grpc.aio.StreamUnaryMultiCallable: - raise NotImplementedError() - - def stream_stream(self, *args, **kwargs) -> grpc.aio.StreamStreamMultiCallable: - raise NotImplementedError() - - async def close(self, grace=None): - return - - async def channel_ready(self): - return - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.close() - - def get_state(self, try_to_connect: bool = False) -> grpc.ChannelConnectivity: - raise NotImplementedError() - - async def wait_for_state_change(self, last_observed_state): - raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system.py b/packages/google-cloud-bigtable/tests/system/data/test_system.py deleted file mode 100644 index 8f31827edd78..000000000000 --- a/packages/google-cloud-bigtable/tests/system/data/test_system.py +++ /dev/null @@ -1,937 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -import pytest_asyncio -import asyncio -import uuid -import os -from google.api_core import retry -from google.api_core.exceptions import ClientError - -from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE -from google.cloud.environment_vars import BIGTABLE_EMULATOR - -TEST_FAMILY = "test-family" -TEST_FAMILY_2 = "test-family-2" - - -@pytest.fixture(scope="session") -def column_family_config(): - """ - specify column families to create when creating a new test table - """ - from google.cloud.bigtable_admin_v2 import types - - return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()} - - -@pytest.fixture(scope="session") -def init_table_id(): - """ - The table_id to use when creating a new test table - """ - return f"test-table-{uuid.uuid4().hex}" - - -@pytest.fixture(scope="session") -def cluster_config(project_id): - """ - Configuration for the clusters to use when creating a new instance - """ - from google.cloud.bigtable_admin_v2 import types - - cluster = { - "test-cluster": types.Cluster( - location=f"projects/{project_id}/locations/us-central1-b", - serve_nodes=1, - ) - } - return cluster - - -class TempRowBuilder: - """ - Used to add rows to a table for testing purposes. - """ - - def __init__(self, table): - self.rows = [] - self.table = table - - async def add_row( - self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" - ): - if isinstance(value, str): - value = value.encode("utf-8") - elif isinstance(value, int): - value = value.to_bytes(8, byteorder="big", signed=True) - request = { - "table_name": self.table.table_name, - "row_key": row_key, - "mutations": [ - { - "set_cell": { - "family_name": family, - "column_qualifier": qualifier, - "value": value, - } - } - ], - } - await self.table.client._gapic_client.mutate_row(request) - self.rows.append(row_key) - - async def delete_rows(self): - if self.rows: - request = { - "table_name": self.table.table_name, - "entries": [ - {"row_key": row, "mutations": [{"delete_from_row": {}}]} - for row in self.rows - ], - } - await self.table.client._gapic_client.mutate_rows(request) - - -@pytest.mark.usefixtures("table") -async def _retrieve_cell_value(table, row_key): - """ - Helper to read an individual row - """ - from google.cloud.bigtable.data import ReadRowsQuery - - row_list = await table.read_rows(ReadRowsQuery(row_keys=row_key)) - assert len(row_list) == 1 - row = row_list[0] - cell = row.cells[0] - return cell.value - - -async def _create_row_and_mutation( - table, temp_rows, *, start_value=b"start", new_value=b"new_value" -): - """ - Helper to create a new row, and a sample set_cell mutation to change its value - """ - from google.cloud.bigtable.data.mutations import SetCell - - row_key = uuid.uuid4().hex.encode() - family = TEST_FAMILY - qualifier = b"test-qualifier" - await temp_rows.add_row( - row_key, family=family, qualifier=qualifier, value=start_value - ) - # ensure cell is initialized - assert (await _retrieve_cell_value(table, row_key)) == start_value - - mutation = SetCell(family=TEST_FAMILY, qualifier=qualifier, new_value=new_value) - return row_key, mutation - - -@pytest_asyncio.fixture(scope="function") -async def temp_rows(table): - builder = TempRowBuilder(table) - yield builder - await builder.delete_rows() - - -@pytest.mark.usefixtures("table") -@pytest.mark.usefixtures("client") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=10) -@pytest.mark.asyncio -async def test_ping_and_warm_gapic(client, table): - """ - Simple ping rpc test - This test ensures channels are able to authenticate with backend - """ - request = {"name": table.instance_name} - await client._gapic_client.ping_and_warm(request) - - -@pytest.mark.usefixtures("table") -@pytest.mark.usefixtures("client") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_ping_and_warm(client, table): - """ - Test ping and warm from handwritten client - """ - results = await client._ping_and_warm_instances() - assert len(results) == 1 - assert results[0] is None - - -@pytest.mark.asyncio -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -async def test_mutation_set_cell(table, temp_rows): - """ - Ensure cells can be set properly - """ - row_key = b"bulk_mutate" - new_value = uuid.uuid4().hex.encode() - row_key, mutation = await _create_row_and_mutation( - table, temp_rows, new_value=new_value - ) - await table.mutate_row(row_key, mutation) - - # ensure cell is updated - assert (await _retrieve_cell_value(table, row_key)) == new_value - - -@pytest.mark.skipif( - bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" -) -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_sample_row_keys(client, table, temp_rows, column_split_config): - """ - Sample keys should return a single sample in small test tables - """ - await temp_rows.add_row(b"row_key_1") - await temp_rows.add_row(b"row_key_2") - - results = await table.sample_row_keys() - assert len(results) == len(column_split_config) + 1 - # first keys should match the split config - for idx in range(len(column_split_config)): - assert results[idx][0] == column_split_config[idx] - assert isinstance(results[idx][1], int) - # last sample should be empty key - assert results[-1][0] == b"" - assert isinstance(results[-1][1], int) - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_bulk_mutations_set_cell(client, table, temp_rows): - """ - Ensure cells can be set properly - """ - from google.cloud.bigtable.data.mutations import RowMutationEntry - - new_value = uuid.uuid4().hex.encode() - row_key, mutation = await _create_row_and_mutation( - table, temp_rows, new_value=new_value - ) - bulk_mutation = RowMutationEntry(row_key, [mutation]) - - await table.bulk_mutate_rows([bulk_mutation]) - - # ensure cell is updated - assert (await _retrieve_cell_value(table, row_key)) == new_value - - -@pytest.mark.asyncio -async def test_bulk_mutations_raise_exception(client, table): - """ - If an invalid mutation is passed, an exception should be raised - """ - from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell - from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup - from google.cloud.bigtable.data.exceptions import FailedMutationEntryError - - row_key = uuid.uuid4().hex.encode() - mutation = SetCell(family="nonexistent", qualifier=b"test-qualifier", new_value=b"") - bulk_mutation = RowMutationEntry(row_key, [mutation]) - - with pytest.raises(MutationsExceptionGroup) as exc: - await table.bulk_mutate_rows([bulk_mutation]) - assert len(exc.value.exceptions) == 1 - entry_error = exc.value.exceptions[0] - assert isinstance(entry_error, FailedMutationEntryError) - assert entry_error.index == 0 - assert entry_error.entry == bulk_mutation - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_mutations_batcher_context_manager(client, table, temp_rows): - """ - test batcher with context manager. Should flush on exit - """ - from google.cloud.bigtable.data.mutations import RowMutationEntry - - new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] - row_key, mutation = await _create_row_and_mutation( - table, temp_rows, new_value=new_value - ) - row_key2, mutation2 = await _create_row_and_mutation( - table, temp_rows, new_value=new_value2 - ) - bulk_mutation = RowMutationEntry(row_key, [mutation]) - bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - - async with table.mutations_batcher() as batcher: - await batcher.append(bulk_mutation) - await batcher.append(bulk_mutation2) - # ensure cell is updated - assert (await _retrieve_cell_value(table, row_key)) == new_value - assert len(batcher._staged_entries) == 0 - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_mutations_batcher_timer_flush(client, table, temp_rows): - """ - batch should occur after flush_interval seconds - """ - from google.cloud.bigtable.data.mutations import RowMutationEntry - - new_value = uuid.uuid4().hex.encode() - row_key, mutation = await _create_row_and_mutation( - table, temp_rows, new_value=new_value - ) - bulk_mutation = RowMutationEntry(row_key, [mutation]) - flush_interval = 0.1 - async with table.mutations_batcher(flush_interval=flush_interval) as batcher: - await batcher.append(bulk_mutation) - await asyncio.sleep(0) - assert len(batcher._staged_entries) == 1 - await asyncio.sleep(flush_interval + 0.1) - assert len(batcher._staged_entries) == 0 - # ensure cell is updated - assert (await _retrieve_cell_value(table, row_key)) == new_value - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_mutations_batcher_count_flush(client, table, temp_rows): - """ - batch should flush after flush_limit_mutation_count mutations - """ - from google.cloud.bigtable.data.mutations import RowMutationEntry - - new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] - row_key, mutation = await _create_row_and_mutation( - table, temp_rows, new_value=new_value - ) - bulk_mutation = RowMutationEntry(row_key, [mutation]) - row_key2, mutation2 = await _create_row_and_mutation( - table, temp_rows, new_value=new_value2 - ) - bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - - async with table.mutations_batcher(flush_limit_mutation_count=2) as batcher: - await batcher.append(bulk_mutation) - assert len(batcher._flush_jobs) == 0 - # should be noop; flush not scheduled - assert len(batcher._staged_entries) == 1 - await batcher.append(bulk_mutation2) - # task should now be scheduled - assert len(batcher._flush_jobs) == 1 - await asyncio.gather(*batcher._flush_jobs) - assert len(batcher._staged_entries) == 0 - assert len(batcher._flush_jobs) == 0 - # ensure cells were updated - assert (await _retrieve_cell_value(table, row_key)) == new_value - assert (await _retrieve_cell_value(table, row_key2)) == new_value2 - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_mutations_batcher_bytes_flush(client, table, temp_rows): - """ - batch should flush after flush_limit_bytes bytes - """ - from google.cloud.bigtable.data.mutations import RowMutationEntry - - new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] - row_key, mutation = await _create_row_and_mutation( - table, temp_rows, new_value=new_value - ) - bulk_mutation = RowMutationEntry(row_key, [mutation]) - row_key2, mutation2 = await _create_row_and_mutation( - table, temp_rows, new_value=new_value2 - ) - bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - - flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 - - async with table.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: - await batcher.append(bulk_mutation) - assert len(batcher._flush_jobs) == 0 - assert len(batcher._staged_entries) == 1 - await batcher.append(bulk_mutation2) - # task should now be scheduled - assert len(batcher._flush_jobs) == 1 - assert len(batcher._staged_entries) == 0 - # let flush complete - await asyncio.gather(*batcher._flush_jobs) - # ensure cells were updated - assert (await _retrieve_cell_value(table, row_key)) == new_value - assert (await _retrieve_cell_value(table, row_key2)) == new_value2 - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_mutations_batcher_no_flush(client, table, temp_rows): - """ - test with no flush requirements met - """ - from google.cloud.bigtable.data.mutations import RowMutationEntry - - new_value = uuid.uuid4().hex.encode() - start_value = b"unchanged" - row_key, mutation = await _create_row_and_mutation( - table, temp_rows, start_value=start_value, new_value=new_value - ) - bulk_mutation = RowMutationEntry(row_key, [mutation]) - row_key2, mutation2 = await _create_row_and_mutation( - table, temp_rows, start_value=start_value, new_value=new_value - ) - bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - - size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 - async with table.mutations_batcher( - flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 - ) as batcher: - await batcher.append(bulk_mutation) - assert len(batcher._staged_entries) == 1 - await batcher.append(bulk_mutation2) - # flush not scheduled - assert len(batcher._flush_jobs) == 0 - await asyncio.sleep(0.01) - assert len(batcher._staged_entries) == 2 - assert len(batcher._flush_jobs) == 0 - # ensure cells were not updated - assert (await _retrieve_cell_value(table, row_key)) == start_value - assert (await _retrieve_cell_value(table, row_key2)) == start_value - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@pytest.mark.parametrize( - "start,increment,expected", - [ - (0, 0, 0), - (0, 1, 1), - (0, -1, -1), - (1, 0, 1), - (0, -100, -100), - (0, 3000, 3000), - (10, 4, 14), - (_MAX_INCREMENT_VALUE, -_MAX_INCREMENT_VALUE, 0), - (_MAX_INCREMENT_VALUE, 2, -_MAX_INCREMENT_VALUE), - (-_MAX_INCREMENT_VALUE, -2, _MAX_INCREMENT_VALUE), - ], -) -@pytest.mark.asyncio -async def test_read_modify_write_row_increment( - client, table, temp_rows, start, increment, expected -): - """ - test read_modify_write_row - """ - from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule - - row_key = b"test-row-key" - family = TEST_FAMILY - qualifier = b"test-qualifier" - await temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) - - rule = IncrementRule(family, qualifier, increment) - result = await table.read_modify_write_row(row_key, rule) - assert result.row_key == row_key - assert len(result) == 1 - assert result[0].family == family - assert result[0].qualifier == qualifier - assert int(result[0]) == expected - # ensure that reading from server gives same value - assert (await _retrieve_cell_value(table, row_key)) == result[0].value - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@pytest.mark.parametrize( - "start,append,expected", - [ - (b"", b"", b""), - ("", "", b""), - (b"abc", b"123", b"abc123"), - (b"abc", "123", b"abc123"), - ("", b"1", b"1"), - (b"abc", "", b"abc"), - (b"hello", b"world", b"helloworld"), - ], -) -@pytest.mark.asyncio -async def test_read_modify_write_row_append( - client, table, temp_rows, start, append, expected -): - """ - test read_modify_write_row - """ - from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule - - row_key = b"test-row-key" - family = TEST_FAMILY - qualifier = b"test-qualifier" - await temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) - - rule = AppendValueRule(family, qualifier, append) - result = await table.read_modify_write_row(row_key, rule) - assert result.row_key == row_key - assert len(result) == 1 - assert result[0].family == family - assert result[0].qualifier == qualifier - assert result[0].value == expected - # ensure that reading from server gives same value - assert (await _retrieve_cell_value(table, row_key)) == result[0].value - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_read_modify_write_row_chained(client, table, temp_rows): - """ - test read_modify_write_row with multiple rules - """ - from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule - from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule - - row_key = b"test-row-key" - family = TEST_FAMILY - qualifier = b"test-qualifier" - start_amount = 1 - increment_amount = 10 - await temp_rows.add_row( - row_key, value=start_amount, family=family, qualifier=qualifier - ) - rule = [ - IncrementRule(family, qualifier, increment_amount), - AppendValueRule(family, qualifier, "hello"), - AppendValueRule(family, qualifier, "world"), - AppendValueRule(family, qualifier, "!"), - ] - result = await table.read_modify_write_row(row_key, rule) - assert result.row_key == row_key - assert result[0].family == family - assert result[0].qualifier == qualifier - # result should be a bytes number string for the IncrementRules, followed by the AppendValueRule values - assert ( - result[0].value - == (start_amount + increment_amount).to_bytes(8, "big", signed=True) - + b"helloworld!" - ) - # ensure that reading from server gives same value - assert (await _retrieve_cell_value(table, row_key)) == result[0].value - - -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@pytest.mark.parametrize( - "start_val,predicate_range,expected_result", - [ - (1, (0, 2), True), - (-1, (0, 2), False), - ], -) -@pytest.mark.asyncio -async def test_check_and_mutate( - client, table, temp_rows, start_val, predicate_range, expected_result -): - """ - test that check_and_mutate_row works applies the right mutations, and returns the right result - """ - from google.cloud.bigtable.data.mutations import SetCell - from google.cloud.bigtable.data.row_filters import ValueRangeFilter - - row_key = b"test-row-key" - family = TEST_FAMILY - qualifier = b"test-qualifier" - - await temp_rows.add_row( - row_key, value=start_val, family=family, qualifier=qualifier - ) - - false_mutation_value = b"false-mutation-value" - false_mutation = SetCell( - family=TEST_FAMILY, qualifier=qualifier, new_value=false_mutation_value - ) - true_mutation_value = b"true-mutation-value" - true_mutation = SetCell( - family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value - ) - predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) - result = await table.check_and_mutate_row( - row_key, - predicate, - true_case_mutations=true_mutation, - false_case_mutations=false_mutation, - ) - assert result == expected_result - # ensure cell is updated - expected_value = true_mutation_value if expected_result else false_mutation_value - assert (await _retrieve_cell_value(table, row_key)) == expected_value - - -@pytest.mark.skipif( - bool(os.environ.get(BIGTABLE_EMULATOR)), - reason="emulator doesn't raise InvalidArgument", -) -@pytest.mark.usefixtures("client") -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_check_and_mutate_empty_request(client, table): - """ - check_and_mutate with no true or fale mutations should raise an error - """ - from google.api_core import exceptions - - with pytest.raises(exceptions.InvalidArgument) as e: - await table.check_and_mutate_row( - b"row_key", None, true_case_mutations=None, false_case_mutations=None - ) - assert "No mutations provided" in str(e.value) - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows_stream(table, temp_rows): - """ - Ensure that the read_rows_stream method works - """ - await temp_rows.add_row(b"row_key_1") - await temp_rows.add_row(b"row_key_2") - - # full table scan - generator = await table.read_rows_stream({}) - first_row = await generator.__anext__() - second_row = await generator.__anext__() - assert first_row.row_key == b"row_key_1" - assert second_row.row_key == b"row_key_2" - with pytest.raises(StopAsyncIteration): - await generator.__anext__() - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows(table, temp_rows): - """ - Ensure that the read_rows method works - """ - await temp_rows.add_row(b"row_key_1") - await temp_rows.add_row(b"row_key_2") - # full table scan - row_list = await table.read_rows({}) - assert len(row_list) == 2 - assert row_list[0].row_key == b"row_key_1" - assert row_list[1].row_key == b"row_key_2" - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows_sharded_simple(table, temp_rows): - """ - Test read rows sharded with two queries - """ - from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery - - await temp_rows.add_row(b"a") - await temp_rows.add_row(b"b") - await temp_rows.add_row(b"c") - await temp_rows.add_row(b"d") - query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) - query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) - row_list = await table.read_rows_sharded([query1, query2]) - assert len(row_list) == 4 - assert row_list[0].row_key == b"a" - assert row_list[1].row_key == b"c" - assert row_list[2].row_key == b"b" - assert row_list[3].row_key == b"d" - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows_sharded_from_sample(table, temp_rows): - """ - Test end-to-end sharding - """ - from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery - from google.cloud.bigtable.data.read_rows_query import RowRange - - await temp_rows.add_row(b"a") - await temp_rows.add_row(b"b") - await temp_rows.add_row(b"c") - await temp_rows.add_row(b"d") - - table_shard_keys = await table.sample_row_keys() - query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) - shard_queries = query.shard(table_shard_keys) - row_list = await table.read_rows_sharded(shard_queries) - assert len(row_list) == 3 - assert row_list[0].row_key == b"b" - assert row_list[1].row_key == b"c" - assert row_list[2].row_key == b"d" - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows_sharded_filters_limits(table, temp_rows): - """ - Test read rows sharded with filters and limits - """ - from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery - from google.cloud.bigtable.data.row_filters import ApplyLabelFilter - - await temp_rows.add_row(b"a") - await temp_rows.add_row(b"b") - await temp_rows.add_row(b"c") - await temp_rows.add_row(b"d") - - label_filter1 = ApplyLabelFilter("first") - label_filter2 = ApplyLabelFilter("second") - query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) - query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) - row_list = await table.read_rows_sharded([query1, query2]) - assert len(row_list) == 3 - assert row_list[0].row_key == b"a" - assert row_list[1].row_key == b"b" - assert row_list[2].row_key == b"d" - assert row_list[0][0].labels == ["first"] - assert row_list[1][0].labels == ["second"] - assert row_list[2][0].labels == ["second"] - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows_range_query(table, temp_rows): - """ - Ensure that the read_rows method works - """ - from google.cloud.bigtable.data import ReadRowsQuery - from google.cloud.bigtable.data import RowRange - - await temp_rows.add_row(b"a") - await temp_rows.add_row(b"b") - await temp_rows.add_row(b"c") - await temp_rows.add_row(b"d") - # full table scan - query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) - row_list = await table.read_rows(query) - assert len(row_list) == 2 - assert row_list[0].row_key == b"b" - assert row_list[1].row_key == b"c" - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows_single_key_query(table, temp_rows): - """ - Ensure that the read_rows method works with specified query - """ - from google.cloud.bigtable.data import ReadRowsQuery - - await temp_rows.add_row(b"a") - await temp_rows.add_row(b"b") - await temp_rows.add_row(b"c") - await temp_rows.add_row(b"d") - # retrieve specific keys - query = ReadRowsQuery(row_keys=[b"a", b"c"]) - row_list = await table.read_rows(query) - assert len(row_list) == 2 - assert row_list[0].row_key == b"a" - assert row_list[1].row_key == b"c" - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.asyncio -async def test_read_rows_with_filter(table, temp_rows): - """ - ensure filters are applied - """ - from google.cloud.bigtable.data import ReadRowsQuery - from google.cloud.bigtable.data.row_filters import ApplyLabelFilter - - await temp_rows.add_row(b"a") - await temp_rows.add_row(b"b") - await temp_rows.add_row(b"c") - await temp_rows.add_row(b"d") - # retrieve keys with filter - expected_label = "test-label" - row_filter = ApplyLabelFilter(expected_label) - query = ReadRowsQuery(row_filter=row_filter) - row_list = await table.read_rows(query) - assert len(row_list) == 4 - for row in row_list: - assert row[0].labels == [expected_label] - - -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_read_rows_stream_close(table, temp_rows): - """ - Ensure that the read_rows_stream can be closed - """ - from google.cloud.bigtable.data import ReadRowsQuery - - await temp_rows.add_row(b"row_key_1") - await temp_rows.add_row(b"row_key_2") - # full table scan - query = ReadRowsQuery() - generator = await table.read_rows_stream(query) - # grab first row - first_row = await generator.__anext__() - assert first_row.row_key == b"row_key_1" - # close stream early - await generator.aclose() - with pytest.raises(StopAsyncIteration): - await generator.__anext__() - - -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_read_row(table, temp_rows): - """ - Test read_row (single row helper) - """ - from google.cloud.bigtable.data import Row - - await temp_rows.add_row(b"row_key_1", value=b"value") - row = await table.read_row(b"row_key_1") - assert isinstance(row, Row) - assert row.row_key == b"row_key_1" - assert row.cells[0].value == b"value" - - -@pytest.mark.skipif( - bool(os.environ.get(BIGTABLE_EMULATOR)), - reason="emulator doesn't raise InvalidArgument", -) -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_read_row_missing(table): - """ - Test read_row when row does not exist - """ - from google.api_core import exceptions - - row_key = "row_key_not_exist" - result = await table.read_row(row_key) - assert result is None - with pytest.raises(exceptions.InvalidArgument) as e: - await table.read_row("") - assert "Row keys must be non-empty" in str(e) - - -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_read_row_w_filter(table, temp_rows): - """ - Test read_row (single row helper) - """ - from google.cloud.bigtable.data import Row - from google.cloud.bigtable.data.row_filters import ApplyLabelFilter - - await temp_rows.add_row(b"row_key_1", value=b"value") - expected_label = "test-label" - label_filter = ApplyLabelFilter(expected_label) - row = await table.read_row(b"row_key_1", row_filter=label_filter) - assert isinstance(row, Row) - assert row.row_key == b"row_key_1" - assert row.cells[0].value == b"value" - assert row.cells[0].labels == [expected_label] - - -@pytest.mark.skipif( - bool(os.environ.get(BIGTABLE_EMULATOR)), - reason="emulator doesn't raise InvalidArgument", -) -@pytest.mark.usefixtures("table") -@pytest.mark.asyncio -async def test_row_exists(table, temp_rows): - from google.api_core import exceptions - - """Test row_exists with rows that exist and don't exist""" - assert await table.row_exists(b"row_key_1") is False - await temp_rows.add_row(b"row_key_1") - assert await table.row_exists(b"row_key_1") is True - assert await table.row_exists("row_key_1") is True - assert await table.row_exists(b"row_key_2") is False - assert await table.row_exists("row_key_2") is False - assert await table.row_exists("3") is False - await temp_rows.add_row(b"3") - assert await table.row_exists(b"3") is True - with pytest.raises(exceptions.InvalidArgument) as e: - await table.row_exists("") - assert "Row keys must be non-empty" in str(e) - - -@pytest.mark.usefixtures("table") -@retry.AsyncRetry(predicate=retry.if_exception_type(ClientError), initial=1, maximum=5) -@pytest.mark.parametrize( - "cell_value,filter_input,expect_match", - [ - (b"abc", b"abc", True), - (b"abc", "abc", True), - (b".", ".", True), - (".*", ".*", True), - (".*", b".*", True), - ("a", ".*", False), - (b".*", b".*", True), - (r"\a", r"\a", True), - (b"\xe2\x98\x83", "☃", True), - ("☃", "☃", True), - (r"\C☃", r"\C☃", True), - (1, 1, True), - (2, 1, False), - (68, 68, True), - ("D", 68, False), - (68, "D", False), - (-1, -1, True), - (2852126720, 2852126720, True), - (-1431655766, -1431655766, True), - (-1431655766, -1, False), - ], -) -@pytest.mark.asyncio -async def test_literal_value_filter( - table, temp_rows, cell_value, filter_input, expect_match -): - """ - Literal value filter does complex escaping on re2 strings. - Make sure inputs are properly interpreted by the server - """ - from google.cloud.bigtable.data.row_filters import LiteralValueFilter - from google.cloud.bigtable.data import ReadRowsQuery - - f = LiteralValueFilter(filter_input) - await temp_rows.add_row(b"row_key_1", value=cell_value) - query = ReadRowsQuery(row_filter=f) - row_list = await table.read_rows(query) - assert len(row_list) == bool( - expect_match - ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py new file mode 100644 index 000000000000..c0e9f39d230e --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -0,0 +1,1016 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import asyncio +import uuid +import os +from google.api_core import retry +from google.api_core.exceptions import ClientError + +from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from google.cloud.bigtable.data._cross_sync import CrossSync + +from . import TEST_FAMILY, TEST_FAMILY_2 + + +__CROSS_SYNC_OUTPUT__ = "tests.system.data.test_system_autogen" + + +@CrossSync.convert_class( + sync_name="TempRowBuilder", + add_mapping_for_name="TempRowBuilder", +) +class TempRowBuilderAsync: + """ + Used to add rows to a table for testing purposes. + """ + + def __init__(self, table): + self.rows = [] + self.table = table + + @CrossSync.convert + async def add_row( + self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" + ): + if isinstance(value, str): + value = value.encode("utf-8") + elif isinstance(value, int): + value = value.to_bytes(8, byteorder="big", signed=True) + request = { + "table_name": self.table.table_name, + "row_key": row_key, + "mutations": [ + { + "set_cell": { + "family_name": family, + "column_qualifier": qualifier, + "value": value, + } + } + ], + } + await self.table.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + + @CrossSync.convert + async def delete_rows(self): + if self.rows: + request = { + "table_name": self.table.table_name, + "entries": [ + {"row_key": row, "mutations": [{"delete_from_row": {}}]} + for row in self.rows + ], + } + await self.table.client._gapic_client.mutate_rows(request) + + +@CrossSync.convert_class(sync_name="TestSystem") +class TestSystemAsync: + @CrossSync.convert + @CrossSync.pytest_fixture(scope="session") + async def client(self): + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + async with CrossSync.DataClient(project=project) as client: + yield client + + @CrossSync.convert + @CrossSync.pytest_fixture(scope="session") + async def table(self, client, table_id, instance_id): + async with client.get_table(instance_id, table_id) as table: + yield table + + @CrossSync.drop + @pytest.fixture(scope="session") + def event_loop(self): + loop = asyncio.get_event_loop() + yield loop + loop.stop() + loop.close() + + @pytest.fixture(scope="session") + def column_family_config(self): + """ + specify column families to create when creating a new test table + """ + from google.cloud.bigtable_admin_v2 import types + + return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()} + + @pytest.fixture(scope="session") + def init_table_id(self): + """ + The table_id to use when creating a new test table + """ + return f"test-table-{uuid.uuid4().hex}" + + @pytest.fixture(scope="session") + def cluster_config(self, project_id): + """ + Configuration for the clusters to use when creating a new instance + """ + from google.cloud.bigtable_admin_v2 import types + + cluster = { + "test-cluster": types.Cluster( + location=f"projects/{project_id}/locations/us-central1-b", + serve_nodes=1, + ) + } + return cluster + + @CrossSync.convert + @pytest.mark.usefixtures("table") + async def _retrieve_cell_value(self, table, row_key): + """ + Helper to read an individual row + """ + from google.cloud.bigtable.data import ReadRowsQuery + + row_list = await table.read_rows(ReadRowsQuery(row_keys=row_key)) + assert len(row_list) == 1 + row = row_list[0] + cell = row.cells[0] + return cell.value + + @CrossSync.convert + async def _create_row_and_mutation( + self, table, temp_rows, *, start_value=b"start", new_value=b"new_value" + ): + """ + Helper to create a new row, and a sample set_cell mutation to change its value + """ + from google.cloud.bigtable.data.mutations import SetCell + + row_key = uuid.uuid4().hex.encode() + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row( + row_key, family=family, qualifier=qualifier, value=start_value + ) + # ensure cell is initialized + assert await self._retrieve_cell_value(table, row_key) == start_value + + mutation = SetCell(family=TEST_FAMILY, qualifier=qualifier, new_value=new_value) + return row_key, mutation + + @CrossSync.convert + @CrossSync.pytest_fixture(scope="function") + async def temp_rows(self, table): + builder = CrossSync.TempRowBuilder(table) + yield builder + await builder.delete_rows() + + @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=10 + ) + @CrossSync.pytest + async def test_ping_and_warm_gapic(self, client, table): + """ + Simple ping rpc test + This test ensures channels are able to authenticate with backend + """ + request = {"name": table.instance_name} + await client._gapic_client.ping_and_warm(request) + + @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_ping_and_warm(self, client, table): + """ + Test ping and warm from handwritten client + """ + results = await client._ping_and_warm_instances() + assert len(results) == 1 + assert results[0] is None + + @CrossSync.pytest + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_mutation_set_cell(self, table, temp_rows): + """ + Ensure cells can be set properly + """ + row_key = b"bulk_mutate" + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + await table.mutate_row(row_key, mutation) + + # ensure cell is updated + assert (await self._retrieve_cell_value(table, row_key)) == new_value + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_sample_row_keys(self, client, table, temp_rows, column_split_config): + """ + Sample keys should return a single sample in small test tables + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + + results = await table.sample_row_keys() + assert len(results) == len(column_split_config) + 1 + # first keys should match the split config + for idx in range(len(column_split_config)): + assert results[idx][0] == column_split_config[idx] + assert isinstance(results[idx][1], int) + # last sample should be empty key + assert results[-1][0] == b"" + assert isinstance(results[-1][1], int) + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_bulk_mutations_set_cell(self, client, table, temp_rows): + """ + Ensure cells can be set properly + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + + await table.bulk_mutate_rows([bulk_mutation]) + + # ensure cell is updated + assert (await self._retrieve_cell_value(table, row_key)) == new_value + + @CrossSync.pytest + async def test_bulk_mutations_raise_exception(self, client, table): + """ + If an invalid mutation is passed, an exception should be raised + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + row_key = uuid.uuid4().hex.encode() + mutation = SetCell( + family="nonexistent", qualifier=b"test-qualifier", new_value=b"" + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + + with pytest.raises(MutationsExceptionGroup) as exc: + await table.bulk_mutate_rows([bulk_mutation]) + assert len(exc.value.exceptions) == 1 + entry_error = exc.value.exceptions[0] + assert isinstance(entry_error, FailedMutationEntryError) + assert entry_error.index == 0 + assert entry_error.entry == bulk_mutation + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_context_manager(self, client, table, temp_rows): + """ + test batcher with context manager. Should flush on exit + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + row_key2, mutation2 = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + async with table.mutations_batcher() as batcher: + await batcher.append(bulk_mutation) + await batcher.append(bulk_mutation2) + # ensure cell is updated + assert (await self._retrieve_cell_value(table, row_key)) == new_value + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_timer_flush(self, client, table, temp_rows): + """ + batch should occur after flush_interval seconds + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + row_key, mutation = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + flush_interval = 0.1 + async with table.mutations_batcher(flush_interval=flush_interval) as batcher: + await batcher.append(bulk_mutation) + await CrossSync.yield_to_event_loop() + assert len(batcher._staged_entries) == 1 + await CrossSync.sleep(flush_interval + 0.1) + assert len(batcher._staged_entries) == 0 + # ensure cell is updated + assert (await self._retrieve_cell_value(table, row_key)) == new_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_count_flush(self, client, table, temp_rows): + """ + batch should flush after flush_limit_mutation_count mutations + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + async with table.mutations_batcher(flush_limit_mutation_count=2) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + # should be noop; flush not scheduled + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # task should now be scheduled + assert len(batcher._flush_jobs) == 1 + # let flush complete + for future in list(batcher._flush_jobs): + await future + # for sync version: grab result + future.result() + assert len(batcher._staged_entries) == 0 + assert len(batcher._flush_jobs) == 0 + # ensure cells were updated + assert (await self._retrieve_cell_value(table, row_key)) == new_value + assert (await self._retrieve_cell_value(table, row_key2)) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_bytes_flush(self, client, table, temp_rows): + """ + batch should flush after flush_limit_bytes bytes + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await self._create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 + + async with table.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # task should now be scheduled + assert len(batcher._flush_jobs) == 1 + assert len(batcher._staged_entries) == 0 + # let flush complete + for future in list(batcher._flush_jobs): + await future + # for sync version: grab result + future.result() + # ensure cells were updated + assert (await self._retrieve_cell_value(table, row_key)) == new_value + assert (await self._retrieve_cell_value(table, row_key2)) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_mutations_batcher_no_flush(self, client, table, temp_rows): + """ + test with no flush requirements met + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + start_value = b"unchanged" + row_key, mutation = await self._create_row_and_mutation( + table, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + row_key2, mutation2 = await self._create_row_and_mutation( + table, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + + size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 + async with table.mutations_batcher( + flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 + ) as batcher: + await batcher.append(bulk_mutation) + assert len(batcher._staged_entries) == 1 + await batcher.append(bulk_mutation2) + # flush not scheduled + assert len(batcher._flush_jobs) == 0 + await CrossSync.yield_to_event_loop() + assert len(batcher._staged_entries) == 2 + assert len(batcher._flush_jobs) == 0 + # ensure cells were not updated + assert (await self._retrieve_cell_value(table, row_key)) == start_value + assert (await self._retrieve_cell_value(table, row_key2)) == start_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_mutations_batcher_large_batch(self, client, table, temp_rows): + """ + test batcher with large batch of mutations + """ + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + + add_mutation = SetCell( + family=TEST_FAMILY, qualifier=b"test-qualifier", new_value=b"a" + ) + row_mutations = [] + for i in range(50_000): + row_key = uuid.uuid4().hex.encode() + row_mutations.append(RowMutationEntry(row_key, [add_mutation])) + # append row key for eventual deletion + temp_rows.rows.append(row_key) + + async with table.mutations_batcher() as batcher: + for mutation in row_mutations: + await batcher.append(mutation) + # ensure cell is updated + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @pytest.mark.parametrize( + "start,increment,expected", + [ + (0, 0, 0), + (0, 1, 1), + (0, -1, -1), + (1, 0, 1), + (0, -100, -100), + (0, 3000, 3000), + (10, 4, 14), + (_MAX_INCREMENT_VALUE, -_MAX_INCREMENT_VALUE, 0), + (_MAX_INCREMENT_VALUE, 2, -_MAX_INCREMENT_VALUE), + (-_MAX_INCREMENT_VALUE, -2, _MAX_INCREMENT_VALUE), + ], + ) + @CrossSync.pytest + async def test_read_modify_write_row_increment( + self, client, table, temp_rows, start, increment, expected + ): + """ + test read_modify_write_row + """ + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row( + row_key, value=start, family=family, qualifier=qualifier + ) + + rule = IncrementRule(family, qualifier, increment) + result = await table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert int(result[0]) == expected + # ensure that reading from server gives same value + assert (await self._retrieve_cell_value(table, row_key)) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @pytest.mark.parametrize( + "start,append,expected", + [ + (b"", b"", b""), + ("", "", b""), + (b"abc", b"123", b"abc123"), + (b"abc", "123", b"abc123"), + ("", b"1", b"1"), + (b"abc", "", b"abc"), + (b"hello", b"world", b"helloworld"), + ], + ) + @CrossSync.pytest + async def test_read_modify_write_row_append( + self, client, table, temp_rows, start, append, expected + ): + """ + test read_modify_write_row + """ + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + await temp_rows.add_row( + row_key, value=start, family=family, qualifier=qualifier + ) + + rule = AppendValueRule(family, qualifier, append) + result = await table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert result[0].value == expected + # ensure that reading from server gives same value + assert (await self._retrieve_cell_value(table, row_key)) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_read_modify_write_row_chained(self, client, table, temp_rows): + """ + test read_modify_write_row with multiple rules + """ + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + start_amount = 1 + increment_amount = 10 + await temp_rows.add_row( + row_key, value=start_amount, family=family, qualifier=qualifier + ) + rule = [ + IncrementRule(family, qualifier, increment_amount), + AppendValueRule(family, qualifier, "hello"), + AppendValueRule(family, qualifier, "world"), + AppendValueRule(family, qualifier, "!"), + ] + result = await table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert result[0].family == family + assert result[0].qualifier == qualifier + # result should be a bytes number string for the IncrementRules, followed by the AppendValueRule values + assert ( + result[0].value + == (start_amount + increment_amount).to_bytes(8, "big", signed=True) + + b"helloworld!" + ) + # ensure that reading from server gives same value + assert (await self._retrieve_cell_value(table, row_key)) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @pytest.mark.parametrize( + "start_val,predicate_range,expected_result", + [ + (1, (0, 2), True), + (-1, (0, 2), False), + ], + ) + @CrossSync.pytest + async def test_check_and_mutate( + self, client, table, temp_rows, start_val, predicate_range, expected_result + ): + """ + test that check_and_mutate_row works applies the right mutations, and returns the right result + """ + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + + await temp_rows.add_row( + row_key, value=start_val, family=family, qualifier=qualifier + ) + + false_mutation_value = b"false-mutation-value" + false_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=false_mutation_value + ) + true_mutation_value = b"true-mutation-value" + true_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value + ) + predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) + result = await table.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + assert result == expected_result + # ensure cell is updated + expected_value = ( + true_mutation_value if expected_result else false_mutation_value + ) + assert (await self._retrieve_cell_value(table, row_key)) == expected_value + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_check_and_mutate_empty_request(self, client, table): + """ + check_and_mutate with no true or fale mutations should raise an error + """ + from google.api_core import exceptions + + with pytest.raises(exceptions.InvalidArgument) as e: + await table.check_and_mutate_row( + b"row_key", None, true_case_mutations=None, false_case_mutations=None + ) + assert "No mutations provided" in str(e.value) + + @pytest.mark.usefixtures("table") + @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_stream(self, table, temp_rows): + """ + Ensure that the read_rows_stream method works + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + + # full table scan + generator = await table.read_rows_stream({}) + first_row = await generator.__anext__() + second_row = await generator.__anext__() + assert first_row.row_key == b"row_key_1" + assert second_row.row_key == b"row_key_2" + with pytest.raises(CrossSync.StopIteration): + await generator.__anext__() + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows(self, table, temp_rows): + """ + Ensure that the read_rows method works + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + # full table scan + row_list = await table.read_rows({}) + assert len(row_list) == 2 + assert row_list[0].row_key == b"row_key_1" + assert row_list[1].row_key == b"row_key_2" + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_sharded_simple(self, table, temp_rows): + """ + Test read rows sharded with two queries + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) + row_list = await table.read_rows_sharded([query1, query2]) + assert len(row_list) == 4 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"b" + assert row_list[3].row_key == b"d" + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_sharded_from_sample(self, table, temp_rows): + """ + Test end-to-end sharding + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) + shard_queries = query.shard(table_shard_keys) + row_list = await table.read_rows_sharded(shard_queries) + assert len(row_list) == 3 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"d" + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_sharded_filters_limits(self, table, temp_rows): + """ + Test read rows sharded with filters and limits + """ + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + + label_filter1 = ApplyLabelFilter("first") + label_filter2 = ApplyLabelFilter("second") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) + row_list = await table.read_rows_sharded([query1, query2]) + assert len(row_list) == 3 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"b" + assert row_list[2].row_key == b"d" + assert row_list[0][0].labels == ["first"] + assert row_list[1][0].labels == ["second"] + assert row_list[2][0].labels == ["second"] + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_range_query(self, table, temp_rows): + """ + Ensure that the read_rows method works + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # full table scan + query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) + row_list = await table.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_single_key_query(self, table, temp_rows): + """ + Ensure that the read_rows method works with specified query + """ + from google.cloud.bigtable.data import ReadRowsQuery + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # retrieve specific keys + query = ReadRowsQuery(row_keys=[b"a", b"c"]) + row_list = await table.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @CrossSync.pytest + async def test_read_rows_with_filter(self, table, temp_rows): + """ + ensure filters are applied + """ + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"a") + await temp_rows.add_row(b"b") + await temp_rows.add_row(b"c") + await temp_rows.add_row(b"d") + # retrieve keys with filter + expected_label = "test-label" + row_filter = ApplyLabelFilter(expected_label) + query = ReadRowsQuery(row_filter=row_filter) + row_list = await table.read_rows(query) + assert len(row_list) == 4 + for row in row_list: + assert row[0].labels == [expected_label] + + @pytest.mark.usefixtures("table") + @CrossSync.convert(replace_symbols={"__anext__": "__next__", "aclose": "close"}) + @CrossSync.pytest + async def test_read_rows_stream_close(self, table, temp_rows): + """ + Ensure that the read_rows_stream can be closed + """ + from google.cloud.bigtable.data import ReadRowsQuery + + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + # full table scan + query = ReadRowsQuery() + generator = await table.read_rows_stream(query) + # grab first row + first_row = await generator.__anext__() + assert first_row.row_key == b"row_key_1" + # close stream early + await generator.aclose() + with pytest.raises(CrossSync.StopIteration): + await generator.__anext__() + + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_read_row(self, table, temp_rows): + """ + Test read_row (single row helper) + """ + from google.cloud.bigtable.data import Row + + await temp_rows.add_row(b"row_key_1", value=b"value") + row = await table.read_row(b"row_key_1") + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_read_row_missing(self, table): + """ + Test read_row when row does not exist + """ + from google.api_core import exceptions + + row_key = "row_key_not_exist" + result = await table.read_row(row_key) + assert result is None + with pytest.raises(exceptions.InvalidArgument) as e: + await table.read_row("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_read_row_w_filter(self, table, temp_rows): + """ + Test read_row (single row helper) + """ + from google.cloud.bigtable.data import Row + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + await temp_rows.add_row(b"row_key_1", value=b"value") + expected_label = "test-label" + label_filter = ApplyLabelFilter(expected_label) + row = await table.read_row(b"row_key_1", row_filter=label_filter) + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + assert row.cells[0].labels == [expected_label] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("table") + @CrossSync.pytest + async def test_row_exists(self, table, temp_rows): + from google.api_core import exceptions + + """Test row_exists with rows that exist and don't exist""" + assert await table.row_exists(b"row_key_1") is False + await temp_rows.add_row(b"row_key_1") + assert await table.row_exists(b"row_key_1") is True + assert await table.row_exists("row_key_1") is True + assert await table.row_exists(b"row_key_2") is False + assert await table.row_exists("row_key_2") is False + assert await table.row_exists("3") is False + await temp_rows.add_row(b"3") + assert await table.row_exists(b"3") is True + with pytest.raises(exceptions.InvalidArgument) as e: + await table.row_exists("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @pytest.mark.parametrize( + "cell_value,filter_input,expect_match", + [ + (b"abc", b"abc", True), + (b"abc", "abc", True), + (b".", ".", True), + (".*", ".*", True), + (".*", b".*", True), + ("a", ".*", False), + (b".*", b".*", True), + (r"\a", r"\a", True), + (b"\xe2\x98\x83", "☃", True), + ("☃", "☃", True), + (r"\C☃", r"\C☃", True), + (1, 1, True), + (2, 1, False), + (68, 68, True), + ("D", 68, False), + (68, "D", False), + (-1, -1, True), + (2852126720, 2852126720, True), + (-1431655766, -1431655766, True), + (-1431655766, -1, False), + ], + ) + @CrossSync.pytest + async def test_literal_value_filter( + self, table, temp_rows, cell_value, filter_input, expect_match + ): + """ + Literal value filter does complex escaping on re2 strings. + Make sure inputs are properly interpreted by the server + """ + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable.data import ReadRowsQuery + + f = LiteralValueFilter(filter_input) + await temp_rows.add_row(b"row_key_1", value=cell_value) + query = ReadRowsQuery(row_filter=f) + row_list = await table.read_rows(query) + assert len(row_list) == bool( + expect_match + ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py index 73da1b46d5d8..621f4d9a21d5 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py @@ -16,42 +16,42 @@ from google.cloud.bigtable_v2.types import MutateRowsResponse from google.rpc import status_pb2 -import google.api_core.exceptions as core_exceptions +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import Forbidden + +from google.cloud.bigtable.data._cross_sync import CrossSync # try/except added for compatibility with python < 3.8 try: from unittest import mock - from unittest.mock import AsyncMock # type: ignore except ImportError: # pragma: NO COVER import mock # type: ignore - from mock import AsyncMock # type: ignore - -def _make_mutation(count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count - return mutation +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test__mutate_rows" -class TestMutateRowsOperation: +@CrossSync.convert_class("TestMutateRowsOperation") +class TestMutateRowsOperationAsync: def _target_class(self): - from google.cloud.bigtable.data._async._mutate_rows import ( - _MutateRowsOperationAsync, - ) - - return _MutateRowsOperationAsync + return CrossSync._MutateRowsOperation def _make_one(self, *args, **kwargs): if not args: kwargs["gapic_client"] = kwargs.pop("gapic_client", mock.Mock()) - kwargs["table"] = kwargs.pop("table", AsyncMock()) + kwargs["table"] = kwargs.pop("table", CrossSync.Mock()) kwargs["operation_timeout"] = kwargs.pop("operation_timeout", 5) kwargs["attempt_timeout"] = kwargs.pop("attempt_timeout", 0.1) kwargs["retryable_exceptions"] = kwargs.pop("retryable_exceptions", ()) kwargs["mutation_entries"] = kwargs.pop("mutation_entries", []) return self._target_class()(*args, **kwargs) + def _make_mutation(self, count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation + + @CrossSync.convert async def _mock_stream(self, mutation_list, error_dict): for idx, entry in enumerate(mutation_list): code = error_dict.get(idx, 0) @@ -64,7 +64,7 @@ async def _mock_stream(self, mutation_list, error_dict): ) def _make_mock_gapic(self, mutation_list, error_dict=None): - mock_fn = AsyncMock() + mock_fn = CrossSync.Mock() if error_dict is None: error_dict = {} mock_fn.side_effect = lambda *args, **kwargs: self._mock_stream( @@ -83,7 +83,7 @@ def test_ctor(self): client = mock.Mock() table = mock.Mock() - entries = [_make_mutation(), _make_mutation()] + entries = [self._make_mutation(), self._make_mutation()] operation_timeout = 0.05 attempt_timeout = 0.01 retryable_exceptions = () @@ -131,17 +131,14 @@ def test_ctor_too_many_entries(self): client = mock.Mock() table = mock.Mock() - entries = [_make_mutation()] * _MUTATE_ROWS_REQUEST_MUTATION_LIMIT + entries = [self._make_mutation()] * (_MUTATE_ROWS_REQUEST_MUTATION_LIMIT + 1) operation_timeout = 0.05 attempt_timeout = 0.01 - # no errors if at limit - self._make_one(client, table, entries, operation_timeout, attempt_timeout) - # raise error after crossing with pytest.raises(ValueError) as e: self._make_one( client, table, - entries + [_make_mutation()], + entries, operation_timeout, attempt_timeout, ) @@ -150,18 +147,18 @@ def test_ctor_too_many_entries(self): ) assert "Found 100001" in str(e.value) - @pytest.mark.asyncio + @CrossSync.pytest async def test_mutate_rows_operation(self): """ Test successful case of mutate_rows_operation """ client = mock.Mock() table = mock.Mock() - entries = [_make_mutation(), _make_mutation()] + entries = [self._make_mutation(), self._make_mutation()] operation_timeout = 0.05 cls = self._target_class() with mock.patch( - f"{cls.__module__}.{cls.__name__}._run_attempt", AsyncMock() + f"{cls.__module__}.{cls.__name__}._run_attempt", CrossSync.Mock() ) as attempt_mock: instance = self._make_one( client, table, entries, operation_timeout, operation_timeout @@ -169,17 +166,15 @@ async def test_mutate_rows_operation(self): await instance.start() assert attempt_mock.call_count == 1 - @pytest.mark.parametrize( - "exc_type", [RuntimeError, ZeroDivisionError, core_exceptions.Forbidden] - ) - @pytest.mark.asyncio + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + @CrossSync.pytest async def test_mutate_rows_attempt_exception(self, exc_type): """ exceptions raised from attempt should be raised in MutationsExceptionGroup """ - client = AsyncMock() + client = CrossSync.Mock() table = mock.Mock() - entries = [_make_mutation(), _make_mutation()] + entries = [self._make_mutation(), self._make_mutation()] operation_timeout = 0.05 expected_exception = exc_type("test") client.mutate_rows.side_effect = expected_exception @@ -197,10 +192,8 @@ async def test_mutate_rows_attempt_exception(self, exc_type): assert len(instance.errors) == 2 assert len(instance.remaining_indices) == 0 - @pytest.mark.parametrize( - "exc_type", [RuntimeError, ZeroDivisionError, core_exceptions.Forbidden] - ) - @pytest.mark.asyncio + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + @CrossSync.pytest async def test_mutate_rows_exception(self, exc_type): """ exceptions raised from retryable should be raised in MutationsExceptionGroup @@ -210,13 +203,13 @@ async def test_mutate_rows_exception(self, exc_type): client = mock.Mock() table = mock.Mock() - entries = [_make_mutation(), _make_mutation()] + entries = [self._make_mutation(), self._make_mutation()] operation_timeout = 0.05 expected_cause = exc_type("abort") with mock.patch.object( self._target_class(), "_run_attempt", - AsyncMock(), + CrossSync.Mock(), ) as attempt_mock: attempt_mock.side_effect = expected_cause found_exc = None @@ -236,27 +229,24 @@ async def test_mutate_rows_exception(self, exc_type): @pytest.mark.parametrize( "exc_type", - [core_exceptions.DeadlineExceeded, RuntimeError], + [DeadlineExceeded, RuntimeError], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): """ If an exception fails but eventually passes, it should not raise an exception """ - from google.cloud.bigtable.data._async._mutate_rows import ( - _MutateRowsOperationAsync, - ) client = mock.Mock() table = mock.Mock() - entries = [_make_mutation()] + entries = [self._make_mutation()] operation_timeout = 1 expected_cause = exc_type("retry") num_retries = 2 with mock.patch.object( - _MutateRowsOperationAsync, + self._target_class(), "_run_attempt", - AsyncMock(), + CrossSync.Mock(), ) as attempt_mock: attempt_mock.side_effect = [expected_cause] * num_retries + [None] instance = self._make_one( @@ -270,7 +260,7 @@ async def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): await instance.start() assert attempt_mock.call_count == num_retries + 1 - @pytest.mark.asyncio + @CrossSync.pytest async def test_mutate_rows_incomplete_ignored(self): """ MutateRowsIncomplete exceptions should not be added to error list @@ -281,12 +271,12 @@ async def test_mutate_rows_incomplete_ignored(self): client = mock.Mock() table = mock.Mock() - entries = [_make_mutation()] + entries = [self._make_mutation()] operation_timeout = 0.05 with mock.patch.object( self._target_class(), "_run_attempt", - AsyncMock(), + CrossSync.Mock(), ) as attempt_mock: attempt_mock.side_effect = _MutateRowsIncomplete("ignored") found_exc = None @@ -301,10 +291,10 @@ async def test_mutate_rows_incomplete_ignored(self): assert len(found_exc.exceptions) == 1 assert isinstance(found_exc.exceptions[0].__cause__, DeadlineExceeded) - @pytest.mark.asyncio + @CrossSync.pytest async def test_run_attempt_single_entry_success(self): """Test mutating a single entry""" - mutation = _make_mutation() + mutation = self._make_mutation() expected_timeout = 1.3 mock_gapic_fn = self._make_mock_gapic({0: mutation}) instance = self._make_one( @@ -319,7 +309,7 @@ async def test_run_attempt_single_entry_success(self): assert kwargs["timeout"] == expected_timeout assert kwargs["entries"] == [mutation._to_pb()] - @pytest.mark.asyncio + @CrossSync.pytest async def test_run_attempt_empty_request(self): """Calling with no mutations should result in no API calls""" mock_gapic_fn = self._make_mock_gapic([]) @@ -329,14 +319,14 @@ async def test_run_attempt_empty_request(self): await instance._run_attempt() assert mock_gapic_fn.call_count == 0 - @pytest.mark.asyncio + @CrossSync.pytest async def test_run_attempt_partial_success_retryable(self): """Some entries succeed, but one fails. Should report the proper index, and raise incomplete exception""" from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete - success_mutation = _make_mutation() - success_mutation_2 = _make_mutation() - failure_mutation = _make_mutation() + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() mutations = [success_mutation, failure_mutation, success_mutation_2] mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) instance = self._make_one( @@ -352,12 +342,12 @@ async def test_run_attempt_partial_success_retryable(self): assert instance.errors[1][0].grpc_status_code == 300 assert 2 not in instance.errors - @pytest.mark.asyncio + @CrossSync.pytest async def test_run_attempt_partial_success_non_retryable(self): """Some entries succeed, but one fails. Exception marked as non-retryable. Do not raise incomplete error""" - success_mutation = _make_mutation() - success_mutation_2 = _make_mutation() - failure_mutation = _make_mutation() + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() mutations = [success_mutation, failure_mutation, success_mutation_2] mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) instance = self._make_one( diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py index e2b02517fb6e..6a4583a7b9cb 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py @@ -13,23 +13,22 @@ import pytest -from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync +from google.cloud.bigtable.data._cross_sync import CrossSync # try/except added for compatibility with python < 3.8 try: from unittest import mock - from unittest.mock import AsyncMock # type: ignore except ImportError: # pragma: NO COVER import mock # type: ignore - from mock import AsyncMock # type: ignore # noqa F401 -TEST_FAMILY = "family_name" -TEST_QUALIFIER = b"qualifier" -TEST_TIMESTAMP = 123456789 -TEST_LABELS = ["label1", "label2"] +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test__read_rows" -class TestReadRowsOperation: + +@CrossSync.convert_class( + sync_name="TestReadRowsOperation", +) +class TestReadRowsOperationAsync: """ Tests helper functions in the ReadRowsOperation class in-depth merging logic in merge_row_response_stream and _read_rows_retryable_attempt @@ -37,10 +36,9 @@ class TestReadRowsOperation: """ @staticmethod + @CrossSync.convert def _get_target_class(): - from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync - - return _ReadRowsOperationAsync + return CrossSync._ReadRowsOperation def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) @@ -60,8 +58,9 @@ def test_ctor(self): expected_operation_timeout = 42 expected_request_timeout = 44 time_gen_mock = mock.Mock() + subpath = "_async" if CrossSync.is_async else "_sync_autogen" with mock.patch( - "google.cloud.bigtable.data._async._read_rows._attempt_timeout_generator", + f"google.cloud.bigtable.data.{subpath}._read_rows._attempt_timeout_generator", time_gen_mock, ): instance = self._make_one( @@ -236,7 +235,7 @@ def test_revise_to_empty_rowset(self): (4, 2, 2), ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_revise_limit(self, start_limit, emit_num, expected_limit): """ revise_limit should revise the request's limit field @@ -277,7 +276,7 @@ async def mock_stream(): assert instance._remaining_count == expected_limit @pytest.mark.parametrize("start_limit,emit_num", [(5, 10), (3, 9), (1, 10)]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_revise_limit_over_limit(self, start_limit, emit_num): """ Should raise runtime error if we get in state where emit_num > start_num @@ -316,7 +315,11 @@ async def mock_stream(): pass assert "emit count exceeds row limit" in str(e.value) - @pytest.mark.asyncio + @CrossSync.pytest + @CrossSync.convert( + sync_name="test_close", + replace_symbols={"aclose": "close", "__anext__": "__next__"}, + ) async def test_aclose(self): """ should be able to close a stream safely with aclose. @@ -328,7 +331,7 @@ async def mock_stream(): yield 1 with mock.patch.object( - _ReadRowsOperationAsync, "_read_rows_attempt" + self._get_target_class(), "_read_rows_attempt" ) as mock_attempt: instance = self._make_one(mock.Mock(), mock.Mock(), 1, 1) wrapped_gen = mock_stream() @@ -337,20 +340,20 @@ async def mock_stream(): # read one row await gen.__anext__() await gen.aclose() - with pytest.raises(StopAsyncIteration): + with pytest.raises(CrossSync.StopIteration): await gen.__anext__() # try calling a second time await gen.aclose() # ensure close was propagated to wrapped generator - with pytest.raises(StopAsyncIteration): + with pytest.raises(CrossSync.StopIteration): await wrapped_gen.__anext__() - @pytest.mark.asyncio + @CrossSync.pytest + @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) async def test_retryable_ignore_repeated_rows(self): """ Duplicate rows should cause an invalid chunk error """ - from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable_v2.types import ReadRowsResponse @@ -375,37 +378,10 @@ async def mock_stream(): instance = mock.Mock() instance._last_yielded_row_key = None instance._remaining_count = None - stream = _ReadRowsOperationAsync.chunk_stream(instance, mock_awaitable_stream()) + stream = self._get_target_class().chunk_stream( + instance, mock_awaitable_stream() + ) await stream.__anext__() with pytest.raises(InvalidChunk) as exc: await stream.__anext__() assert "row keys should be strictly increasing" in str(exc.value) - - -class MockStream(_ReadRowsOperationAsync): - """ - Mock a _ReadRowsOperationAsync stream for testing - """ - - def __init__(self, items=None, errors=None, operation_timeout=None): - self.transient_errors = errors - self.operation_timeout = operation_timeout - self.next_idx = 0 - if items is None: - items = list(range(10)) - self.items = items - - def __aiter__(self): - return self - - async def __anext__(self): - if self.next_idx >= len(self.items): - raise StopAsyncIteration - item = self.items[self.next_idx] - self.next_idx += 1 - if isinstance(item, Exception): - raise item - return item - - async def aclose(self): - pass diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index fdc86e924a30..c24fa3d98c35 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -19,6 +19,7 @@ import sys import pytest +import mock from google.cloud.bigtable.data import mutations from google.auth.credentials import AnonymousCredentials @@ -31,67 +32,71 @@ from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # type: ignore -except ImportError: # pragma: NO COVER - import mock # type: ignore - from mock import AsyncMock # type: ignore +from google.cloud.bigtable.data._cross_sync import CrossSync -VENEER_HEADER_REGEX = re.compile( - r"gapic\/[0-9]+\.[\w.-]+ gax\/[0-9]+\.[\w.-]+ gccl\/[0-9]+\.[\w.-]+-data-async gl-python\/[0-9]+\.[\w.-]+ grpc\/[0-9]+\.[\w.-]+" -) +if CrossSync.is_async: + from google.api_core import grpc_helpers_async + from google.cloud.bigtable.data._async.client import TableAsync + CrossSync.add_mapping("grpc_helpers", grpc_helpers_async) +else: + from google.api_core import grpc_helpers + from google.cloud.bigtable.data._sync_autogen.client import Table # noqa: F401 -def _make_client(*args, use_emulator=True, **kwargs): - import os - from google.cloud.bigtable.data._async.client import BigtableDataClientAsync + CrossSync.add_mapping("grpc_helpers", grpc_helpers) - env_mask = {} - # by default, use emulator mode to avoid auth issues in CI - # emulator mode must be disabled by tests that check refresh background tasks - if use_emulator: - env_mask["BIGTABLE_EMULATOR_HOST"] = "localhost" - else: - # set some default values - kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) - kwargs["project"] = kwargs.get("project", "project-id") - with mock.patch.dict(os.environ, env_mask): - return BigtableDataClientAsync(*args, **kwargs) +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_client" +@CrossSync.convert_class( + sync_name="TestBigtableDataClient", + add_mapping_for_name="TestBigtableDataClient", +) class TestBigtableDataClientAsync: - def _get_target_class(self): - from google.cloud.bigtable.data._async.client import BigtableDataClientAsync - - return BigtableDataClientAsync - - def _make_one(self, *args, **kwargs): - return _make_client(*args, **kwargs) + @staticmethod + @CrossSync.convert + def _get_target_class(): + return CrossSync.DataClient + + @classmethod + def _make_client(cls, *args, use_emulator=True, **kwargs): + import os + + env_mask = {} + # by default, use emulator mode to avoid auth issues in CI + # emulator mode must be disabled by tests that check channel pooling/refresh background tasks + if use_emulator: + env_mask["BIGTABLE_EMULATOR_HOST"] = "localhost" + import warnings + + warnings.filterwarnings("ignore", category=RuntimeWarning) + else: + # set some default values + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + kwargs["project"] = kwargs.get("project", "project-id") + with mock.patch.dict(os.environ, env_mask): + return cls._get_target_class()(*args, **kwargs) - @pytest.mark.asyncio + @CrossSync.pytest async def test_ctor(self): expected_project = "project-id" expected_credentials = AnonymousCredentials() - client = self._make_one( + client = self._make_client( project="project-id", credentials=expected_credentials, use_emulator=False, ) - await asyncio.sleep(0) + await CrossSync.yield_to_event_loop() assert client.project == expected_project assert not client._active_instances assert client._channel_refresh_task is not None assert client.transport._credentials == expected_credentials await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test_ctor_super_inits(self): - from google.cloud.bigtable_v2.services.bigtable.async_client import ( - BigtableAsyncClient, - ) from google.cloud.client import ClientWithProject from google.api_core import client_options as client_options_lib @@ -99,14 +104,16 @@ async def test_ctor_super_inits(self): credentials = AnonymousCredentials() client_options = {"api_endpoint": "foo.bar:1234"} options_parsed = client_options_lib.from_dict(client_options) - with mock.patch.object(BigtableAsyncClient, "__init__") as bigtable_client_init: + with mock.patch.object( + CrossSync.GapicClient, "__init__" + ) as bigtable_client_init: bigtable_client_init.return_value = None with mock.patch.object( ClientWithProject, "__init__" ) as client_project_init: client_project_init.return_value = None try: - self._make_one( + self._make_client( project=project, credentials=credentials, client_options=options_parsed, @@ -126,17 +133,16 @@ async def test_ctor_super_inits(self): assert kwargs["credentials"] == credentials assert kwargs["client_options"] == options_parsed - @pytest.mark.asyncio + @CrossSync.pytest async def test_ctor_dict_options(self): - from google.cloud.bigtable_v2.services.bigtable.async_client import ( - BigtableAsyncClient, - ) from google.api_core.client_options import ClientOptions client_options = {"api_endpoint": "foo.bar:1234"} - with mock.patch.object(BigtableAsyncClient, "__init__") as bigtable_client_init: + with mock.patch.object( + CrossSync.GapicClient, "__init__" + ) as bigtable_client_init: try: - self._make_one(client_options=client_options) + self._make_client(client_options=client_options) except TypeError: pass bigtable_client_init.assert_called_once() @@ -147,17 +153,29 @@ async def test_ctor_dict_options(self): with mock.patch.object( self._get_target_class(), "_start_background_channel_refresh" ) as start_background_refresh: - client = self._make_one(client_options=client_options, use_emulator=False) + client = self._make_client( + client_options=client_options, use_emulator=False + ) start_background_refresh.assert_called_once() await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test_veneer_grpc_headers(self): + client_component = "data-async" if CrossSync.is_async else "data" + VENEER_HEADER_REGEX = re.compile( + r"gapic\/[0-9]+\.[\w.-]+ gax\/[0-9]+\.[\w.-]+ gccl\/[0-9]+\.[\w.-]+-" + + client_component + + r" gl-python\/[0-9]+\.[\w.-]+ grpc\/[0-9]+\.[\w.-]+" + ) + # client_info should be populated with headers to # detect as a veneer client - patch = mock.patch("google.api_core.gapic_v1.method_async.wrap_method") + if CrossSync.is_async: + patch = mock.patch("google.api_core.gapic_v1.method_async.wrap_method") + else: + patch = mock.patch("google.api_core.gapic_v1.method.wrap_method") with patch as gapic_mock: - client = self._make_one(project="project-id") + client = self._make_client(project="project-id") wrapped_call_list = gapic_mock.call_args_list assert len(wrapped_call_list) > 0 # each wrapped call should have veneer headers @@ -172,56 +190,67 @@ async def test_veneer_grpc_headers(self): ), f"'{wrapped_user_agent_sorted}' does not match {VENEER_HEADER_REGEX}" await client.close() + @CrossSync.drop @pytest.mark.filterwarnings("ignore::RuntimeWarning") def test__start_background_channel_refresh_sync(self): # should raise RuntimeError if called in a sync context - client = self._make_one(project="project-id", use_emulator=False) + client = self._make_client(project="project-id", use_emulator=False) with pytest.raises(RuntimeError): client._start_background_channel_refresh() - @pytest.mark.asyncio + @CrossSync.pytest async def test__start_background_channel_refresh_task_exists(self): # if tasks exist, should do nothing - client = self._make_one(project="project-id", use_emulator=False) + client = self._make_client(project="project-id", use_emulator=False) assert client._channel_refresh_task is not None with mock.patch.object(asyncio, "create_task") as create_task: client._start_background_channel_refresh() create_task.assert_not_called() await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test__start_background_channel_refresh(self): # should create background tasks for each channel - client = self._make_one(project="project-id", use_emulator=False) - ping_and_warm = AsyncMock() - client._ping_and_warm_instances = ping_and_warm - client._start_background_channel_refresh() - assert client._channel_refresh_task is not None - assert isinstance(client._channel_refresh_task, asyncio.Task) - await asyncio.sleep(0.1) - assert ping_and_warm.call_count == 1 - await client.close() + client = self._make_client(project="project-id") + with mock.patch.object( + client, "_ping_and_warm_instances", CrossSync.Mock() + ) as ping_and_warm: + client._emulator_host = None + client._start_background_channel_refresh() + assert client._channel_refresh_task is not None + assert isinstance(client._channel_refresh_task, CrossSync.Task) + await CrossSync.sleep(0.1) + assert ping_and_warm.call_count == 1 + await client.close() - @pytest.mark.asyncio + @CrossSync.drop + @CrossSync.pytest @pytest.mark.skipif( sys.version_info < (3, 8), reason="Task.name requires python3.8 or higher" ) async def test__start_background_channel_refresh_task_names(self): # if tasks exist, should do nothing - client = self._make_one(project="project-id", use_emulator=False) + client = self._make_client(project="project-id", use_emulator=False) name = client._channel_refresh_task.get_name() - assert "BigtableDataClientAsync channel refresh" in name + assert "channel refresh" in name await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test__ping_and_warm_instances(self): """ test ping and warm with mocked asyncio.gather """ client_mock = mock.Mock() - with mock.patch.object(asyncio, "gather", AsyncMock()) as gather: - # simulate gather by returning the same number of items as passed in - gather.side_effect = lambda *args, **kwargs: [None for _ in args] + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync, "gather_partials", CrossSync.Mock() + ) as gather: + # gather_partials is expected to call the function passed, and return the result + gather.side_effect = lambda partials, **kwargs: [None for _ in partials] channel = mock.Mock() # test with no instances client_mock._active_instances = [] @@ -229,10 +258,8 @@ async def test__ping_and_warm_instances(self): client_mock, channel=channel ) assert len(result) == 0 - gather.assert_called_once() - gather.assert_awaited_once() - assert not gather.call_args.args - assert gather.call_args.kwargs == {"return_exceptions": True} + assert gather.call_args[1]["return_exceptions"] is True + assert gather.call_args[1]["sync_executor"] == client_mock._executor # test with instances client_mock._active_instances = [ (mock.Mock(), mock.Mock(), mock.Mock()) @@ -244,8 +271,11 @@ async def test__ping_and_warm_instances(self): ) assert len(result) == 4 gather.assert_called_once() - gather.assert_awaited_once() - assert len(gather.call_args.args) == 4 + # expect one partial for each instance + partial_list = gather.call_args.args[0] + assert len(partial_list) == 4 + if CrossSync.is_async: + gather.assert_awaited_once() # check grpc call arguments grpc_call_args = channel.unary_unary().call_args_list for idx, (_, kwargs) in enumerate(grpc_call_args): @@ -265,15 +295,21 @@ async def test__ping_and_warm_instances(self): == f"name={expected_instance}&app_profile_id={expected_app_profile}" ) - @pytest.mark.asyncio + @CrossSync.pytest async def test__ping_and_warm_single_instance(self): """ should be able to call ping and warm with single instance """ client_mock = mock.Mock() - with mock.patch.object(asyncio, "gather", AsyncMock()) as gather: - # simulate gather by returning the same number of items as passed in - gather.side_effect = lambda *args, **kwargs: [None for _ in args] + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync, "gather_partials", CrossSync.Mock() + ) as gather: + gather.side_effect = lambda *args, **kwargs: [fn() for fn in args[0]] # test with large set of instances client_mock._active_instances = [mock.Mock()] * 100 test_key = ("test-instance", "test-table", "test-app-profile") @@ -298,7 +334,7 @@ async def test__ping_and_warm_single_instance(self): metadata[0][1] == "name=test-instance&app_profile_id=test-app-profile" ) - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "refresh_interval, wait_time, expected_sleep", [ @@ -316,38 +352,43 @@ async def test__manage_channel_first_sleep( # first sleep time should be `refresh_interval` seconds after client init import time - with mock.patch.object(time, "monotonic") as time: - time.return_value = 0 - with mock.patch.object(asyncio, "sleep") as sleep: + with mock.patch.object(time, "monotonic") as monotonic: + monotonic.return_value = 0 + with mock.patch.object(CrossSync, "event_wait") as sleep: sleep.side_effect = asyncio.CancelledError try: - client = self._make_one(project="project-id") + client = self._make_client(project="project-id") client._channel_init_time = -wait_time await client._manage_channel(refresh_interval, refresh_interval) except asyncio.CancelledError: pass sleep.assert_called_once() - call_time = sleep.call_args[0][0] + call_time = sleep.call_args[0][1] assert ( abs(call_time - expected_sleep) < 0.1 ), f"refresh_interval: {refresh_interval}, wait_time: {wait_time}, expected_sleep: {expected_sleep}" await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test__manage_channel_ping_and_warm(self): """ _manage channel should call ping and warm internally """ import time + import threading client_mock = mock.Mock() + client_mock._is_closed.is_set.return_value = False client_mock._channel_init_time = time.monotonic() orig_channel = client_mock.transport.grpc_channel # should ping an warm all new channels, and old channels if sleeping - with mock.patch.object(asyncio, "sleep"): + sleep_tuple = ( + (asyncio, "sleep") if CrossSync.is_async else (threading.Event, "wait") + ) + with mock.patch.object(*sleep_tuple): # stop process after close is called orig_channel.close.side_effect = asyncio.CancelledError - ping_and_warm = client_mock._ping_and_warm_instances = AsyncMock() + ping_and_warm = client_mock._ping_and_warm_instances = CrossSync.Mock() # should ping and warm old channel then new if sleep > 0 try: await self._get_target_class()._manage_channel(client_mock, 10) @@ -362,7 +403,7 @@ async def test__manage_channel_ping_and_warm(self): assert orig_channel in called_with assert client_mock.transport.grpc_channel in called_with - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "refresh_interval, num_cycles, expected_sleep", [ @@ -379,46 +420,46 @@ async def test__manage_channel_sleeps( import random channel = mock.Mock() - channel.close = mock.AsyncMock() + channel.close = CrossSync.Mock() with mock.patch.object(random, "uniform") as uniform: uniform.side_effect = lambda min_, max_: min_ - with mock.patch.object(time, "time") as time: - time.return_value = 0 - with mock.patch.object(asyncio, "sleep") as sleep: + with mock.patch.object(time, "time") as time_mock: + time_mock.return_value = 0 + with mock.patch.object(CrossSync, "event_wait") as sleep: sleep.side_effect = [None for i in range(num_cycles - 1)] + [ asyncio.CancelledError ] - try: - client = self._make_one(project="project-id") - client.transport._grpc_channel = channel - with mock.patch.object( - client.transport, "create_channel", return_value=channel - ): + client = self._make_client(project="project-id") + client.transport._grpc_channel = channel + with mock.patch.object( + client.transport, "create_channel", CrossSync.Mock + ): + try: if refresh_interval is not None: await client._manage_channel( - refresh_interval, refresh_interval + refresh_interval, refresh_interval, grace_period=0 ) else: - await client._manage_channel() - except asyncio.CancelledError: - pass + await client._manage_channel(grace_period=0) + except asyncio.CancelledError: + pass assert sleep.call_count == num_cycles - total_sleep = sum([call[0][0] for call in sleep.call_args_list]) + total_sleep = sum([call[0][1] for call in sleep.call_args_list]) assert ( abs(total_sleep - expected_sleep) < 0.1 ), f"refresh_interval={refresh_interval}, num_cycles={num_cycles}, expected_sleep={expected_sleep}" await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test__manage_channel_random(self): import random - with mock.patch.object(asyncio, "sleep") as sleep: + with mock.patch.object(CrossSync, "event_wait") as sleep: with mock.patch.object(random, "uniform") as uniform: uniform.return_value = 0 try: uniform.side_effect = asyncio.CancelledError - client = self._make_one(project="project-id") + client = self._make_client(project="project-id") except asyncio.CancelledError: uniform.side_effect = None uniform.reset_mock() @@ -429,7 +470,7 @@ async def test__manage_channel_random(self): uniform.side_effect = lambda min_, max_: min_ sleep.side_effect = [None, asyncio.CancelledError] try: - await client._manage_channel(min_val, max_val) + await client._manage_channel(min_val, max_val, grace_period=0) except asyncio.CancelledError: pass assert uniform.call_count == 2 @@ -438,39 +479,35 @@ async def test__manage_channel_random(self): assert found_min == min_val assert found_max == max_val - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize("num_cycles", [0, 1, 10, 100]) async def test__manage_channel_refresh(self, num_cycles): # make sure that channels are properly refreshed - from google.api_core import grpc_helpers_async - - expected_grace = 9 expected_refresh = 0.5 - new_channel = grpc.aio.insecure_channel("localhost:8080") + grpc_lib = grpc.aio if CrossSync.is_async else grpc + new_channel = grpc_lib.insecure_channel("localhost:8080") - with mock.patch.object(asyncio, "sleep") as sleep: - sleep.side_effect = [None for i in range(num_cycles)] + [ - asyncio.CancelledError - ] + with mock.patch.object(CrossSync, "event_wait") as sleep: + sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError] with mock.patch.object( - grpc_helpers_async, "create_channel" + CrossSync.grpc_helpers, "create_channel" ) as create_channel: create_channel.return_value = new_channel - client = self._make_one(project="project-id", use_emulator=False) + client = self._make_client(project="project-id") create_channel.reset_mock() try: await client._manage_channel( refresh_interval_min=expected_refresh, refresh_interval_max=expected_refresh, - grace_period=expected_grace, + grace_period=0, ) - except asyncio.CancelledError: + except RuntimeError: pass assert sleep.call_count == num_cycles + 1 assert create_channel.call_count == num_cycles await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test__register_instance(self): """ test instance registration @@ -483,7 +520,7 @@ async def test__register_instance(self): client_mock._active_instances = active_instances client_mock._instance_owners = instance_owners client_mock._channel_refresh_task = None - client_mock._ping_and_warm_instances = AsyncMock() + client_mock._ping_and_warm_instances = CrossSync.Mock() table_mock = mock.Mock() await self._get_target_class()._register_instance( client_mock, "instance-1", table_mock @@ -535,7 +572,7 @@ async def test__register_instance(self): ] ) - @pytest.mark.asyncio + @CrossSync.pytest async def test__register_instance_duplicate(self): """ test double instance registration. Should be no-op @@ -547,10 +584,10 @@ async def test__register_instance_duplicate(self): instance_owners = {} client_mock._active_instances = active_instances client_mock._instance_owners = instance_owners - client_mock._channel_refresh_tasks = [object()] + client_mock._channel_refresh_task = object() mock_channels = [mock.Mock()] client_mock.transport.channels = mock_channels - client_mock._ping_and_warm_instances = AsyncMock() + client_mock._ping_and_warm_instances = CrossSync.Mock() table_mock = mock.Mock() expected_key = ( "prefix/instance-1", @@ -577,7 +614,7 @@ async def test__register_instance_duplicate(self): assert expected_key == tuple(list(instance_owners)[0]) assert client_mock._ping_and_warm_instances.call_count == 1 - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "insert_instances,expected_active,expected_owner_keys", [ @@ -604,13 +641,8 @@ async def test__register_instance_state( instance_owners = {} client_mock._active_instances = active_instances client_mock._instance_owners = instance_owners - client_mock._channel_refresh_tasks = [] - client_mock._start_background_channel_refresh.side_effect = ( - lambda: client_mock._channel_refresh_tasks.append(mock.Mock) - ) - mock_channels = [mock.Mock() for i in range(5)] - client_mock.transport.channels = mock_channels - client_mock._ping_and_warm_instances = AsyncMock() + client_mock._channel_refresh_task = None + client_mock._ping_and_warm_instances = CrossSync.Mock() table_mock = mock.Mock() # register instances for instance, table, profile in insert_instances: @@ -636,9 +668,9 @@ async def test__register_instance_state( ] ) - @pytest.mark.asyncio + @CrossSync.pytest async def test__remove_instance_registration(self): - client = self._make_one(project="project-id") + client = self._make_client(project="project-id") table = mock.Mock() await client._register_instance("instance-1", table) await client._register_instance("instance-2", table) @@ -667,16 +699,16 @@ async def test__remove_instance_registration(self): assert len(client._active_instances) == 1 await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test__multiple_table_registration(self): """ registering with multiple tables with the same key should add multiple owners to instance_owners, but only keep one copy of shared key in active_instances """ - from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey - async with self._make_one(project="project-id") as client: + async with self._make_client(project="project-id") as client: async with client.get_table("instance_1", "table_1") as table_1: instance_1_path = client._gapic_client.instance_path( client.project, "instance_1" @@ -689,12 +721,20 @@ async def test__multiple_table_registration(self): assert id(table_1) in client._instance_owners[instance_1_key] # duplicate table should register in instance_owners under same key async with client.get_table("instance_1", "table_1") as table_2: + assert table_2._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_2._register_instance_future.result() assert len(client._instance_owners[instance_1_key]) == 2 assert len(client._active_instances) == 1 assert id(table_1) in client._instance_owners[instance_1_key] assert id(table_2) in client._instance_owners[instance_1_key] # unique table should register in instance_owners and active_instances async with client.get_table("instance_1", "table_3") as table_3: + assert table_3._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_3._register_instance_future.result() instance_3_path = client._gapic_client.instance_path( client.project, "instance_1" ) @@ -716,17 +756,25 @@ async def test__multiple_table_registration(self): assert instance_1_key not in client._active_instances assert len(client._instance_owners[instance_1_key]) == 0 - @pytest.mark.asyncio + @CrossSync.pytest async def test__multiple_instance_registration(self): """ registering with multiple instance keys should update the key in instance_owners and active_instances """ - from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey - async with self._make_one(project="project-id") as client: + async with self._make_client(project="project-id") as client: async with client.get_table("instance_1", "table_1") as table_1: + assert table_1._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_1._register_instance_future.result() async with client.get_table("instance_2", "table_2") as table_2: + assert table_2._register_instance_future is not None + if not CrossSync.is_async: + # give the background task time to run + table_2._register_instance_future.result() instance_1_path = client._gapic_client.instance_path( client.project, "instance_1" ) @@ -755,12 +803,11 @@ async def test__multiple_instance_registration(self): assert len(client._instance_owners[instance_1_key]) == 0 assert len(client._instance_owners[instance_2_key]) == 0 - @pytest.mark.asyncio + @CrossSync.pytest async def test_get_table(self): - from google.cloud.bigtable.data._async.client import TableAsync - from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey - client = self._make_one(project="project-id") + client = self._make_client(project="project-id") assert not client._active_instances expected_table_id = "table-id" expected_instance_id = "instance-id" @@ -770,8 +817,8 @@ async def test_get_table(self): expected_table_id, expected_app_profile_id, ) - await asyncio.sleep(0) - assert isinstance(table, TableAsync) + await CrossSync.yield_to_event_loop() + assert isinstance(table, CrossSync.TestTable._get_target_class()) assert table.table_id == expected_table_id assert ( table.table_name @@ -791,14 +838,14 @@ async def test_get_table(self): assert client._instance_owners[instance_key] == {id(table)} await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test_get_table_arg_passthrough(self): """ All arguments passed in get_table should be sent to constructor """ - async with self._make_one(project="project-id") as client: - with mock.patch( - "google.cloud.bigtable.data._async.client.TableAsync.__init__", + async with self._make_client(project="project-id") as client: + with mock.patch.object( + CrossSync.TestTable._get_target_class(), "__init__" ) as mock_constructor: mock_constructor.return_value = None assert not client._active_instances @@ -824,25 +871,26 @@ async def test_get_table_arg_passthrough(self): **expected_kwargs, ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_get_table_context_manager(self): - from google.cloud.bigtable.data._async.client import TableAsync - from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey expected_table_id = "table-id" expected_instance_id = "instance-id" expected_app_profile_id = "app-profile-id" expected_project_id = "project-id" - with mock.patch.object(TableAsync, "close") as close_mock: - async with self._make_one(project=expected_project_id) as client: + with mock.patch.object( + CrossSync.TestTable._get_target_class(), "close" + ) as close_mock: + async with self._make_client(project=expected_project_id) as client: async with client.get_table( expected_instance_id, expected_table_id, expected_app_profile_id, ) as table: - await asyncio.sleep(0) - assert isinstance(table, TableAsync) + await CrossSync.yield_to_event_loop() + assert isinstance(table, CrossSync.TestTable._get_target_class()) assert table.table_id == expected_table_id assert ( table.table_name @@ -862,53 +910,63 @@ async def test_get_table_context_manager(self): assert client._instance_owners[instance_key] == {id(table)} assert close_mock.call_count == 1 - @pytest.mark.asyncio + @CrossSync.pytest async def test_close(self): - client = self._make_one(project="project-id", use_emulator=False) + client = self._make_client(project="project-id", use_emulator=False) task = client._channel_refresh_task assert task is not None assert not task.done() - with mock.patch.object(client.transport, "close", AsyncMock()) as close_mock: + with mock.patch.object( + client.transport, "close", CrossSync.Mock() + ) as close_mock: await client.close() close_mock.assert_called_once() - close_mock.assert_awaited() + if CrossSync.is_async: + close_mock.assert_awaited() assert task.done() - assert task.cancelled() assert client._channel_refresh_task is None - @pytest.mark.asyncio + @CrossSync.pytest async def test_close_with_timeout(self): expected_timeout = 19 - client = self._make_one(project="project-id", use_emulator=False) - with mock.patch.object(asyncio, "wait_for", AsyncMock()) as wait_for_mock: + client = self._make_client(project="project-id", use_emulator=False) + with mock.patch.object(CrossSync, "wait", CrossSync.Mock()) as wait_for_mock: await client.close(timeout=expected_timeout) wait_for_mock.assert_called_once() - wait_for_mock.assert_awaited() + if CrossSync.is_async: + wait_for_mock.assert_awaited() assert wait_for_mock.call_args[1]["timeout"] == expected_timeout await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test_context_manager(self): + from functools import partial + # context manager should close the client cleanly - close_mock = AsyncMock() + close_mock = CrossSync.Mock() true_close = None - async with self._make_one(project="project-id", use_emulator=False) as client: - true_close = client.close() + async with self._make_client( + project="project-id", use_emulator=False + ) as client: + # grab reference to close coro for async test + true_close = partial(client.close) client.close = close_mock assert not client._channel_refresh_task.done() assert client.project == "project-id" assert client._active_instances == set() close_mock.assert_not_called() close_mock.assert_called_once() - close_mock.assert_awaited() + if CrossSync.is_async: + close_mock.assert_awaited() # actually close the client - await true_close + await true_close() + @CrossSync.drop def test_client_ctor_sync(self): # initializing client in a sync context should raise RuntimeError with pytest.warns(RuntimeWarning) as warnings: - client = _make_client(project="project-id", use_emulator=False) + client = self._make_client(project="project-id", use_emulator=False) expected_warning = [w for w in warnings if "client.py" in w.filename] assert len(expected_warning) == 1 assert ( @@ -919,11 +977,20 @@ def test_client_ctor_sync(self): assert client._channel_refresh_task is None +@CrossSync.convert_class("TestTable", add_mapping_for_name="TestTable") class TestTableAsync: - @pytest.mark.asyncio + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @staticmethod + @CrossSync.convert + def _get_target_class(): + return CrossSync.Table + + @CrossSync.pytest async def test_table_ctor(self): - from google.cloud.bigtable.data._async.client import TableAsync - from google.cloud.bigtable.data._async.client import _WarmedInstanceKey + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey expected_table_id = "table-id" expected_instance_id = "instance-id" @@ -934,10 +1001,10 @@ async def test_table_ctor(self): expected_read_rows_attempt_timeout = 0.5 expected_mutate_rows_operation_timeout = 2.5 expected_mutate_rows_attempt_timeout = 0.75 - client = _make_client() + client = self._make_client() assert not client._active_instances - table = TableAsync( + table = self._get_target_class()( client, expected_instance_id, expected_table_id, @@ -949,7 +1016,7 @@ async def test_table_ctor(self): default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, ) - await asyncio.sleep(0) + await CrossSync.yield_to_event_loop() assert table.table_id == expected_table_id assert table.instance_id == expected_instance_id assert table.app_profile_id == expected_app_profile_id @@ -978,30 +1045,28 @@ async def test_table_ctor(self): == expected_mutate_rows_attempt_timeout ) # ensure task reaches completion - await table._register_instance_task - assert table._register_instance_task.done() - assert not table._register_instance_task.cancelled() - assert table._register_instance_task.exception() is None + await table._register_instance_future + assert table._register_instance_future.done() + assert not table._register_instance_future.cancelled() + assert table._register_instance_future.exception() is None await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test_table_ctor_defaults(self): """ should provide default timeout values and app_profile_id """ - from google.cloud.bigtable.data._async.client import TableAsync - expected_table_id = "table-id" expected_instance_id = "instance-id" - client = _make_client() + client = self._make_client() assert not client._active_instances - table = TableAsync( + table = self._get_target_class()( client, expected_instance_id, expected_table_id, ) - await asyncio.sleep(0) + await CrossSync.yield_to_event_loop() assert table.table_id == expected_table_id assert table.instance_id == expected_instance_id assert table.app_profile_id is None @@ -1014,14 +1079,12 @@ async def test_table_ctor_defaults(self): assert table.default_mutate_rows_attempt_timeout == 60 await client.close() - @pytest.mark.asyncio + @CrossSync.pytest async def test_table_ctor_invalid_timeout_values(self): """ bad timeout values should raise ValueError """ - from google.cloud.bigtable.data._async.client import TableAsync - - client = _make_client() + client = self._make_client() timeout_pairs = [ ("default_operation_timeout", "default_attempt_timeout"), @@ -1036,68 +1099,67 @@ async def test_table_ctor_invalid_timeout_values(self): ] for operation_timeout, attempt_timeout in timeout_pairs: with pytest.raises(ValueError) as e: - TableAsync(client, "", "", **{attempt_timeout: -1}) + self._get_target_class()(client, "", "", **{attempt_timeout: -1}) assert "attempt_timeout must be greater than 0" in str(e.value) with pytest.raises(ValueError) as e: - TableAsync(client, "", "", **{operation_timeout: -1}) + self._get_target_class()(client, "", "", **{operation_timeout: -1}) assert "operation_timeout must be greater than 0" in str(e.value) await client.close() + @CrossSync.drop def test_table_ctor_sync(self): # initializing client in a sync context should raise RuntimeError - from google.cloud.bigtable.data._async.client import TableAsync - client = mock.Mock() with pytest.raises(RuntimeError) as e: TableAsync(client, "instance-id", "table-id") assert e.match("TableAsync must be created within an async event loop context.") - @pytest.mark.asyncio + @CrossSync.pytest # iterate over all retryable rpcs @pytest.mark.parametrize( - "fn_name,fn_args,retry_fn_path,extra_retryables", + "fn_name,fn_args,is_stream,extra_retryables", [ ( "read_rows_stream", (ReadRowsQuery(),), - "google.api_core.retry.retry_target_stream_async", + True, (), ), ( "read_rows", (ReadRowsQuery(),), - "google.api_core.retry.retry_target_stream_async", + True, (), ), ( "read_row", (b"row_key",), - "google.api_core.retry.retry_target_stream_async", + True, (), ), ( "read_rows_sharded", ([ReadRowsQuery()],), - "google.api_core.retry.retry_target_stream_async", + True, (), ), ( "row_exists", (b"row_key",), - "google.api_core.retry.retry_target_stream_async", + True, (), ), - ("sample_row_keys", (), "google.api_core.retry.retry_target_async", ()), + ("sample_row_keys", (), False, ()), ( "mutate_row", (b"row_key", [mock.Mock()]), - "google.api_core.retry.retry_target_async", + False, (), ), ( "bulk_mutate_rows", - ([mutations.RowMutationEntry(b"key", [mock.Mock()])],), - "google.api_core.retry.retry_target_async", + ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + False, (_MutateRowsIncomplete,), ), ], @@ -1132,17 +1194,26 @@ async def test_customizable_retryable_errors( expected_retryables, fn_name, fn_args, - retry_fn_path, + is_stream, extra_retryables, ): """ Test that retryable functions support user-configurable arguments, and that the configured retryables are passed down to the gapic layer. """ - with mock.patch(retry_fn_path) as retry_fn_mock: - async with _make_client() as client: + retry_fn = "retry_target" + if is_stream: + retry_fn += "_stream" + if CrossSync.is_async: + retry_fn = f"CrossSync.{retry_fn}" + else: + retry_fn = f"CrossSync._Sync_Impl.{retry_fn}" + with mock.patch( + f"google.cloud.bigtable.data._cross_sync.{retry_fn}" + ) as retry_fn_mock: + async with self._make_client() as client: table = client.get_table("instance-id", "table-id") - expected_predicate = lambda a: a in expected_retryables # noqa + expected_predicate = expected_retryables.__contains__ retry_fn_mock.side_effect = RuntimeError("stop early") with mock.patch( "google.api_core.retry.if_exception_type" @@ -1184,20 +1255,22 @@ async def test_customizable_retryable_errors( ], ) @pytest.mark.parametrize("include_app_profile", [True, False]) - @pytest.mark.asyncio + @CrossSync.pytest + @CrossSync.convert async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): - """check that all requests attach proper metadata headers""" - from google.cloud.bigtable.data import TableAsync - profile = "profile" if include_app_profile else None - client = _make_client() + client = self._make_client() # create mock for rpc stub transport_mock = mock.MagicMock() - rpc_mock = mock.AsyncMock() + rpc_mock = CrossSync.Mock() transport_mock._wrapped_methods.__getitem__.return_value = rpc_mock - client._gapic_client._client._transport = transport_mock - client._gapic_client._client._is_universe_domain_valid = True - table = TableAsync(client, "instance-id", "table-id", profile) + gapic_client = client._gapic_client + if CrossSync.is_async: + # inner BigtableClient is held as ._client for BigtableAsyncClient + gapic_client = gapic_client._client + gapic_client._transport = transport_mock + gapic_client._is_universe_domain_valid = True + table = self._get_target_class()(client, "instance-id", "table-id", profile) try: test_fn = table.__getattribute__(fn_name) maybe_stream = await test_fn(*fn_args) @@ -1220,20 +1293,32 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ assert "app_profile_id=" not in routing_str -class TestReadRows: +@CrossSync.convert_class( + "TestReadRows", + add_mapping_for_name="TestReadRows", +) +class TestReadRowsAsync: """ Tests for table.read_rows and related methods. """ - def _make_table(self, *args, **kwargs): - from google.cloud.bigtable.data._async.client import TableAsync + @staticmethod + @CrossSync.convert + def _get_operation_class(): + return CrossSync._ReadRowsOperation + + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + @CrossSync.convert + def _make_table(self, *args, **kwargs): client_mock = mock.Mock() client_mock._register_instance.side_effect = ( - lambda *args, **kwargs: asyncio.sleep(0) + lambda *args, **kwargs: CrossSync.yield_to_event_loop() ) client_mock._remove_instance_registration.side_effect = ( - lambda *args, **kwargs: asyncio.sleep(0) + lambda *args, **kwargs: CrossSync.yield_to_event_loop() ) kwargs["instance_id"] = kwargs.get( "instance_id", args[0] if args else "instance" @@ -1243,7 +1328,7 @@ def _make_table(self, *args, **kwargs): ) client_mock._gapic_client.table_path.return_value = kwargs["table_id"] client_mock._gapic_client.instance_path.return_value = kwargs["instance_id"] - return TableAsync(client_mock, *args, **kwargs) + return CrossSync.TestTable._get_target_class()(client_mock, *args, **kwargs) def _make_stats(self): from google.cloud.bigtable_v2.types import RequestStats @@ -1274,6 +1359,7 @@ def _make_chunk(*args, **kwargs): return ReadRowsResponse.CellChunk(*args, **kwargs) @staticmethod + @CrossSync.convert async def _make_gapic_stream( chunk_list: list[ReadRowsResponse.CellChunk | Exception], sleep_time=0, @@ -1286,30 +1372,33 @@ def __init__(self, chunk_list, sleep_time): self.idx = -1 self.sleep_time = sleep_time + @CrossSync.convert(sync_name="__iter__") def __aiter__(self): return self + @CrossSync.convert(sync_name="__next__") async def __anext__(self): self.idx += 1 if len(self.chunk_list) > self.idx: if sleep_time: - await asyncio.sleep(self.sleep_time) + await CrossSync.sleep(self.sleep_time) chunk = self.chunk_list[self.idx] if isinstance(chunk, Exception): raise chunk else: return ReadRowsResponse(chunks=[chunk]) - raise StopAsyncIteration + raise CrossSync.StopIteration def cancel(self): pass return mock_stream(chunk_list, sleep_time) + @CrossSync.convert async def execute_fn(self, table, *args, **kwargs): return await table.read_rows(*args, **kwargs) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows(self): query = ReadRowsQuery() chunks = [ @@ -1326,7 +1415,7 @@ async def test_read_rows(self): assert results[0].row_key == b"test_1" assert results[1].row_key == b"test_2" - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_stream(self): query = ReadRowsQuery() chunks = [ @@ -1345,7 +1434,7 @@ async def test_read_rows_stream(self): assert results[1].row_key == b"test_2" @pytest.mark.parametrize("include_app_profile", [True, False]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_query_matches_request(self, include_app_profile): from google.cloud.bigtable.data import RowRange from google.cloud.bigtable.data.row_filters import PassAllFilter @@ -1372,14 +1461,14 @@ async def test_read_rows_query_matches_request(self, include_app_profile): assert call_request == query_pb @pytest.mark.parametrize("operation_timeout", [0.001, 0.023, 0.1]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_timeout(self, operation_timeout): async with self._make_table() as table: read_rows = table.client._gapic_client.read_rows query = ReadRowsQuery() chunks = [self._make_chunk(row_key=b"test_1")] read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( - chunks, sleep_time=1 + chunks, sleep_time=0.15 ) try: await table.read_rows(query, operation_timeout=operation_timeout) @@ -1397,7 +1486,7 @@ async def test_read_rows_timeout(self, operation_timeout): (0.05, 0.24, 5), ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_attempt_timeout( self, per_request_t, operation_t, expected_num ): @@ -1460,7 +1549,7 @@ async def test_read_rows_attempt_timeout( core_exceptions.ServiceUnavailable, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_retryable_error(self, exc_type): async with self._make_table() as table: read_rows = table.client._gapic_client.read_rows @@ -1491,7 +1580,7 @@ async def test_read_rows_retryable_error(self, exc_type): InvalidChunk, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_non_retryable_error(self, exc_type): async with self._make_table() as table: read_rows = table.client._gapic_client.read_rows @@ -1505,18 +1594,17 @@ async def test_read_rows_non_retryable_error(self, exc_type): except exc_type as e: assert e == expected_error - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_revise_request(self): """ Ensure that _revise_request is called between retries """ - from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable_v2.types import RowSet return_val = RowSet() with mock.patch.object( - _ReadRowsOperationAsync, "_revise_request_rowset" + self._get_operation_class(), "_revise_request_rowset" ) as revise_rowset: revise_rowset.return_value = return_val async with self._make_table() as table: @@ -1540,16 +1628,14 @@ async def test_read_rows_revise_request(self): revised_call = read_rows.call_args_list[1].args[0] assert revised_call.rows == return_val - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_default_timeouts(self): """ Ensure that the default timeouts are set on the read rows operation when not overridden """ - from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync - operation_timeout = 8 attempt_timeout = 4 - with mock.patch.object(_ReadRowsOperationAsync, "__init__") as mock_op: + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: mock_op.side_effect = RuntimeError("mock error") async with self._make_table( default_read_rows_operation_timeout=operation_timeout, @@ -1563,16 +1649,14 @@ async def test_read_rows_default_timeouts(self): assert kwargs["operation_timeout"] == operation_timeout assert kwargs["attempt_timeout"] == attempt_timeout - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_default_timeout_override(self): """ When timeouts are passed, they overwrite default values """ - from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync - operation_timeout = 8 attempt_timeout = 4 - with mock.patch.object(_ReadRowsOperationAsync, "__init__") as mock_op: + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: mock_op.side_effect = RuntimeError("mock error") async with self._make_table( default_operation_timeout=99, default_attempt_timeout=97 @@ -1589,10 +1673,10 @@ async def test_read_rows_default_timeout_override(self): assert kwargs["operation_timeout"] == operation_timeout assert kwargs["attempt_timeout"] == attempt_timeout - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_row(self): """Test reading a single row""" - async with _make_client() as client: + async with self._make_client() as client: table = client.get_table("instance", "table") row_key = b"test_1" with mock.patch.object(table, "read_rows") as read_rows: @@ -1617,10 +1701,10 @@ async def test_read_row(self): assert query.row_ranges == [] assert query.limit == 1 - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_row_w_filter(self): """Test reading a single row with an added filter""" - async with _make_client() as client: + async with self._make_client() as client: table = client.get_table("instance", "table") row_key = b"test_1" with mock.patch.object(table, "read_rows") as read_rows: @@ -1650,10 +1734,10 @@ async def test_read_row_w_filter(self): assert query.limit == 1 assert query.filter == expected_filter - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_row_no_response(self): """should return None if row does not exist""" - async with _make_client() as client: + async with self._make_client() as client: table = client.get_table("instance", "table") row_key = b"test_1" with mock.patch.object(table, "read_rows") as read_rows: @@ -1685,10 +1769,10 @@ async def test_read_row_no_response(self): ([object(), object()], True), ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_row_exists(self, return_value, expected_result): """Test checking for row existence""" - async with _make_client() as client: + async with self._make_client() as client: table = client.get_table("instance", "table") row_key = b"test_1" with mock.patch.object(table, "read_rows") as read_rows: @@ -1722,32 +1806,35 @@ async def test_row_exists(self, return_value, expected_result): assert query.filter._to_dict() == expected_filter -class TestReadRowsSharded: - @pytest.mark.asyncio +@CrossSync.convert_class("TestReadRowsSharded") +class TestReadRowsShardedAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.pytest async def test_read_rows_sharded_empty_query(self): - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with pytest.raises(ValueError) as exc: await table.read_rows_sharded([]) assert "empty sharded_query" in str(exc.value) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_sharded_multiple_queries(self): """ Test with multiple queries. Should return results from both """ - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( table.client._gapic_client, "read_rows" ) as read_rows: - read_rows.side_effect = ( - lambda *args, **kwargs: TestReadRows._make_gapic_stream( - [ - TestReadRows._make_chunk(row_key=k) - for k in args[0].rows.row_keys - ] - ) + read_rows.side_effect = lambda *args, **kwargs: CrossSync.TestReadRows._make_gapic_stream( + [ + CrossSync.TestReadRows._make_chunk(row_key=k) + for k in args[0].rows.row_keys + ] ) query_1 = ReadRowsQuery(b"test_1") query_2 = ReadRowsQuery(b"test_2") @@ -1757,19 +1844,19 @@ async def test_read_rows_sharded_multiple_queries(self): assert result[1].row_key == b"test_2" @pytest.mark.parametrize("n_queries", [1, 2, 5, 11, 24]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_sharded_multiple_queries_calls(self, n_queries): """ Each query should trigger a separate read_rows call """ - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object(table, "read_rows") as read_rows: query_list = [ReadRowsQuery() for _ in range(n_queries)] await table.read_rows_sharded(query_list) assert read_rows.call_count == n_queries - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_sharded_errors(self): """ Errors should be exposed as ShardedReadRowsExceptionGroups @@ -1777,7 +1864,7 @@ async def test_read_rows_sharded_errors(self): from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup from google.cloud.bigtable.data.exceptions import FailedQueryShardError - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object(table, "read_rows") as read_rows: read_rows.side_effect = RuntimeError("mock error") @@ -1797,7 +1884,7 @@ async def test_read_rows_sharded_errors(self): assert exc.value.exceptions[1].index == 1 assert exc.value.exceptions[1].query == query_2 - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_sharded_concurrent(self): """ Ensure sharded requests are concurrent @@ -1805,10 +1892,10 @@ async def test_read_rows_sharded_concurrent(self): import time async def mock_call(*args, **kwargs): - await asyncio.sleep(0.1) + await CrossSync.sleep(0.1) return [mock.Mock()] - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object(table, "read_rows") as read_rows: read_rows.side_effect = mock_call @@ -1821,14 +1908,14 @@ async def mock_call(*args, **kwargs): # if run in sequence, we would expect this to take 1 second assert call_time < 0.2 - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_sharded_concurrency_limit(self): """ Only 10 queries should be processed concurrently. Others should be queued Should start a new query as soon as previous finishes """ - from google.cloud.bigtable.data._async.client import _CONCURRENCY_LIMIT + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT assert _CONCURRENCY_LIMIT == 10 # change this test if this changes num_queries = 15 @@ -1846,7 +1933,7 @@ async def mock_call(*args, **kwargs): starting_timeout = 10 - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object(table, "read_rows") as read_rows: read_rows.side_effect = mock_call @@ -1870,13 +1957,13 @@ async def mock_call(*args, **kwargs): idx = i + _CONCURRENCY_LIMIT assert rpc_start_list[idx] - (i * increment_time) < eps - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_sharded_expirary(self): """ If the operation times out before all shards complete, should raise a ShardedReadRowsExceptionGroup """ - from google.cloud.bigtable.data._async.client import _CONCURRENCY_LIMIT + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup from google.api_core.exceptions import DeadlineExceeded @@ -1896,7 +1983,7 @@ async def mock_call(*args, **kwargs): await asyncio.sleep(next_item) return [mock.Mock()] - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object(table, "read_rows") as read_rows: read_rows.side_effect = mock_call @@ -1910,7 +1997,7 @@ async def mock_call(*args, **kwargs): # should keep successful queries assert len(exc.value.successful_rows) == _CONCURRENCY_LIMIT - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_rows_sharded_negative_batch_timeout(self): """ try to run with batch that starts after operation timeout @@ -1921,10 +2008,10 @@ async def test_read_rows_sharded_negative_batch_timeout(self): from google.api_core.exceptions import DeadlineExceeded async def mock_call(*args, **kwargs): - await asyncio.sleep(0.05) + await CrossSync.sleep(0.05) return [mock.Mock()] - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object(table, "read_rows") as read_rows: read_rows.side_effect = mock_call @@ -1939,14 +2026,20 @@ async def mock_call(*args, **kwargs): ) -class TestSampleRowKeys: +@CrossSync.convert_class("TestSampleRowKeys") +class TestSampleRowKeysAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.convert async def _make_gapic_stream(self, sample_list: list[tuple[bytes, int]]): from google.cloud.bigtable_v2.types import SampleRowKeysResponse for value in sample_list: yield SampleRowKeysResponse(row_key=value[0], offset_bytes=value[1]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_sample_row_keys(self): """ Test that method returns the expected key samples @@ -1956,10 +2049,10 @@ async def test_sample_row_keys(self): (b"test_2", 100), (b"test_3", 200), ] - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( - table.client._gapic_client, "sample_row_keys", AsyncMock() + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() ) as sample_row_keys: sample_row_keys.return_value = self._make_gapic_stream(samples) result = await table.sample_row_keys() @@ -1971,12 +2064,12 @@ async def test_sample_row_keys(self): assert result[1] == samples[1] assert result[2] == samples[2] - @pytest.mark.asyncio + @CrossSync.pytest async def test_sample_row_keys_bad_timeout(self): """ should raise error if timeout is negative """ - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with pytest.raises(ValueError) as e: await table.sample_row_keys(operation_timeout=-1) @@ -1985,11 +2078,11 @@ async def test_sample_row_keys_bad_timeout(self): await table.sample_row_keys(attempt_timeout=-1) assert "attempt_timeout must be greater than 0" in str(e.value) - @pytest.mark.asyncio + @CrossSync.pytest async def test_sample_row_keys_default_timeout(self): """Should fallback to using table default operation_timeout""" expected_timeout = 99 - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table( "i", "t", @@ -1997,7 +2090,7 @@ async def test_sample_row_keys_default_timeout(self): default_attempt_timeout=expected_timeout, ) as table: with mock.patch.object( - table.client._gapic_client, "sample_row_keys", AsyncMock() + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() ) as sample_row_keys: sample_row_keys.return_value = self._make_gapic_stream([]) result = await table.sample_row_keys() @@ -2006,7 +2099,7 @@ async def test_sample_row_keys_default_timeout(self): assert result == [] assert kwargs["retry"] is None - @pytest.mark.asyncio + @CrossSync.pytest async def test_sample_row_keys_gapic_params(self): """ make sure arguments are propagated to gapic call as expected @@ -2015,12 +2108,12 @@ async def test_sample_row_keys_gapic_params(self): expected_profile = "test1" instance = "instance_name" table_id = "my_table" - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table( instance, table_id, app_profile_id=expected_profile ) as table: with mock.patch.object( - table.client._gapic_client, "sample_row_keys", AsyncMock() + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() ) as sample_row_keys: sample_row_keys.return_value = self._make_gapic_stream([]) await table.sample_row_keys(attempt_timeout=expected_timeout) @@ -2039,7 +2132,7 @@ async def test_sample_row_keys_gapic_params(self): core_exceptions.ServiceUnavailable, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_sample_row_keys_retryable_errors(self, retryable_exception): """ retryable errors should be retried until timeout @@ -2047,10 +2140,10 @@ async def test_sample_row_keys_retryable_errors(self, retryable_exception): from google.api_core.exceptions import DeadlineExceeded from google.cloud.bigtable.data.exceptions import RetryExceptionGroup - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( - table.client._gapic_client, "sample_row_keys", AsyncMock() + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() ) as sample_row_keys: sample_row_keys.side_effect = retryable_exception("mock") with pytest.raises(DeadlineExceeded) as e: @@ -2071,23 +2164,28 @@ async def test_sample_row_keys_retryable_errors(self, retryable_exception): core_exceptions.Aborted, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_sample_row_keys_non_retryable_errors(self, non_retryable_exception): """ non-retryable errors should cause a raise """ - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( - table.client._gapic_client, "sample_row_keys", AsyncMock() + table.client._gapic_client, "sample_row_keys", CrossSync.Mock() ) as sample_row_keys: sample_row_keys.side_effect = non_retryable_exception("mock") with pytest.raises(non_retryable_exception): await table.sample_row_keys() -class TestMutateRow: - @pytest.mark.asyncio +@CrossSync.convert_class("TestMutateRow") +class TestMutateRowAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.pytest @pytest.mark.parametrize( "mutation_arg", [ @@ -2108,7 +2206,7 @@ class TestMutateRow: async def test_mutate_row(self, mutation_arg): """Test mutations with no errors""" expected_attempt_timeout = 19 - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_row" @@ -2143,12 +2241,12 @@ async def test_mutate_row(self, mutation_arg): core_exceptions.ServiceUnavailable, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_mutate_row_retryable_errors(self, retryable_exception): from google.api_core.exceptions import DeadlineExceeded from google.cloud.bigtable.data.exceptions import RetryExceptionGroup - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_row" @@ -2171,14 +2269,14 @@ async def test_mutate_row_retryable_errors(self, retryable_exception): core_exceptions.ServiceUnavailable, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_mutate_row_non_idempotent_retryable_errors( self, retryable_exception ): """ Non-idempotent mutations should not be retried """ - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_row" @@ -2204,9 +2302,9 @@ async def test_mutate_row_non_idempotent_retryable_errors( core_exceptions.Aborted, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_mutate_row_non_retryable_errors(self, non_retryable_exception): - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_row" @@ -2225,16 +2323,22 @@ async def test_mutate_row_non_retryable_errors(self, non_retryable_exception): ) @pytest.mark.parametrize("mutations", [[], None]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_mutate_row_no_mutations(self, mutations): - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with pytest.raises(ValueError) as e: await table.mutate_row("key", mutations=mutations) assert e.value.args[0] == "No mutations provided" -class TestBulkMutateRows: +@CrossSync.convert_class("TestBulkMutateRows") +class TestBulkMutateRowsAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.convert async def _mock_response(self, response_list): from google.cloud.bigtable_v2.types import MutateRowsResponse from google.rpc import status_pb2 @@ -2254,13 +2358,14 @@ async def _mock_response(self, response_list): for i in range(len(response_list)) ] + @CrossSync.convert async def generator(): yield MutateRowsResponse(entries=entries) return generator() - @pytest.mark.asyncio - @pytest.mark.asyncio + @CrossSync.pytest + @CrossSync.pytest @pytest.mark.parametrize( "mutation_arg", [ @@ -2283,7 +2388,7 @@ async def generator(): async def test_bulk_mutate_rows(self, mutation_arg): """Test mutations with no errors""" expected_attempt_timeout = 19 - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2304,10 +2409,10 @@ async def test_bulk_mutate_rows(self, mutation_arg): assert kwargs["timeout"] == expected_attempt_timeout assert kwargs["retry"] is None - @pytest.mark.asyncio + @CrossSync.pytest async def test_bulk_mutate_rows_multiple_entries(self): """Test mutations with no errors""" - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2328,7 +2433,7 @@ async def test_bulk_mutate_rows_multiple_entries(self): assert kwargs["entries"][0] == entry_1._to_pb() assert kwargs["entries"][1] == entry_2._to_pb() - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "exception", [ @@ -2348,7 +2453,7 @@ async def test_bulk_mutate_rows_idempotent_mutation_error_retryable( MutationsExceptionGroup, ) - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2373,7 +2478,7 @@ async def test_bulk_mutate_rows_idempotent_mutation_error_retryable( cause.exceptions[-1], core_exceptions.DeadlineExceeded ) - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "exception", [ @@ -2394,7 +2499,7 @@ async def test_bulk_mutate_rows_idempotent_mutation_error_non_retryable( MutationsExceptionGroup, ) - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2421,7 +2526,7 @@ async def test_bulk_mutate_rows_idempotent_mutation_error_non_retryable( core_exceptions.ServiceUnavailable, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_bulk_mutate_idempotent_retryable_request_errors( self, retryable_exception ): @@ -2434,7 +2539,7 @@ async def test_bulk_mutate_idempotent_retryable_request_errors( MutationsExceptionGroup, ) - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2455,7 +2560,7 @@ async def test_bulk_mutate_idempotent_retryable_request_errors( assert isinstance(cause, RetryExceptionGroup) assert isinstance(cause.exceptions[0], retryable_exception) - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "retryable_exception", [ @@ -2472,7 +2577,7 @@ async def test_bulk_mutate_rows_non_idempotent_retryable_errors( MutationsExceptionGroup, ) - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2504,7 +2609,7 @@ async def test_bulk_mutate_rows_non_idempotent_retryable_errors( ValueError, ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_exception): """ If the request fails with a non-retryable error, mutations should not be retried @@ -2514,7 +2619,7 @@ async def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_excepti MutationsExceptionGroup, ) - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2534,7 +2639,7 @@ async def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_excepti cause = failed_exception.__cause__ assert isinstance(cause, non_retryable_exception) - @pytest.mark.asyncio + @CrossSync.pytest async def test_bulk_mutate_error_index(self): """ Test partial failure, partial success. Errors should be associated with the correct index @@ -2550,7 +2655,7 @@ async def test_bulk_mutate_error_index(self): MutationsExceptionGroup, ) - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "mutate_rows" @@ -2585,14 +2690,14 @@ async def test_bulk_mutate_error_index(self): assert isinstance(cause.exceptions[1], DeadlineExceeded) assert isinstance(cause.exceptions[2], FailedPrecondition) - @pytest.mark.asyncio + @CrossSync.pytest async def test_bulk_mutate_error_recovery(self): """ If an error occurs, then resolves, no exception should be raised """ from google.api_core.exceptions import DeadlineExceeded - async with _make_client(project="project") as client: + async with self._make_client(project="project") as client: table = client.get_table("instance", "table") with mock.patch.object(client._gapic_client, "mutate_rows") as mock_gapic: # fail with a retryable error, then a non-retryable one @@ -2610,14 +2715,19 @@ async def test_bulk_mutate_error_recovery(self): await table.bulk_mutate_rows(entries, operation_timeout=1000) -class TestCheckAndMutateRow: +@CrossSync.convert_class("TestCheckAndMutateRow") +class TestCheckAndMutateRowAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + @pytest.mark.parametrize("gapic_result", [True, False]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_check_and_mutate(self, gapic_result): from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse app_profile = "app_profile_id" - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table( "instance", "table", app_profile_id=app_profile ) as table: @@ -2654,10 +2764,10 @@ async def test_check_and_mutate(self, gapic_result): assert kwargs["timeout"] == operation_timeout assert kwargs["retry"] is None - @pytest.mark.asyncio + @CrossSync.pytest async def test_check_and_mutate_bad_timeout(self): """Should raise error if operation_timeout < 0""" - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with pytest.raises(ValueError) as e: await table.check_and_mutate_row( @@ -2669,13 +2779,13 @@ async def test_check_and_mutate_bad_timeout(self): ) assert str(e.value) == "operation_timeout must be greater than 0" - @pytest.mark.asyncio + @CrossSync.pytest async def test_check_and_mutate_single_mutations(self): """if single mutations are passed, they should be internally wrapped in a list""" from google.cloud.bigtable.data.mutations import SetCell from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "check_and_mutate_row" @@ -2695,7 +2805,7 @@ async def test_check_and_mutate_single_mutations(self): assert kwargs["true_mutations"] == [true_mutation._to_pb()] assert kwargs["false_mutations"] == [false_mutation._to_pb()] - @pytest.mark.asyncio + @CrossSync.pytest async def test_check_and_mutate_predicate_object(self): """predicate filter should be passed to gapic request""" from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse @@ -2703,7 +2813,7 @@ async def test_check_and_mutate_predicate_object(self): mock_predicate = mock.Mock() predicate_pb = {"predicate": "dict"} mock_predicate._to_pb.return_value = predicate_pb - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "check_and_mutate_row" @@ -2721,7 +2831,7 @@ async def test_check_and_mutate_predicate_object(self): assert mock_predicate._to_pb.call_count == 1 assert kwargs["retry"] is None - @pytest.mark.asyncio + @CrossSync.pytest async def test_check_and_mutate_mutations_parsing(self): """mutations objects should be converted to protos""" from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse @@ -2731,7 +2841,7 @@ async def test_check_and_mutate_mutations_parsing(self): for idx, mutation in enumerate(mutations): mutation._to_pb.return_value = f"fake {idx}" mutations.append(DeleteAllFromRow()) - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "check_and_mutate_row" @@ -2758,7 +2868,12 @@ async def test_check_and_mutate_mutations_parsing(self): ) -class TestReadModifyWriteRow: +@CrossSync.convert_class("TestReadModifyWriteRow") +class TestReadModifyWriteRowAsync: + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + @pytest.mark.parametrize( "call_rules,expected_rules", [ @@ -2780,12 +2895,12 @@ class TestReadModifyWriteRow: ), ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_modify_write_call_rule_args(self, call_rules, expected_rules): """ Test that the gapic call is called with given rules """ - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object( client._gapic_client, "read_modify_write_row" @@ -2797,21 +2912,21 @@ async def test_read_modify_write_call_rule_args(self, call_rules, expected_rules assert found_kwargs["retry"] is None @pytest.mark.parametrize("rules", [[], None]) - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_modify_write_no_rules(self, rules): - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table") as table: with pytest.raises(ValueError) as e: await table.read_modify_write_row("key", rules=rules) assert e.value.args[0] == "rules must contain at least one item" - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_modify_write_call_defaults(self): instance = "instance1" table_id = "table1" project = "project1" row_key = "row_key1" - async with _make_client(project=project) as client: + async with self._make_client(project=project) as client: async with client.get_table(instance, table_id) as table: with mock.patch.object( client._gapic_client, "read_modify_write_row" @@ -2827,12 +2942,12 @@ async def test_read_modify_write_call_defaults(self): assert kwargs["row_key"] == row_key.encode() assert kwargs["timeout"] > 1 - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_modify_write_call_overrides(self): row_key = b"row_key1" expected_timeout = 12345 profile_id = "profile1" - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table( "instance", "table_id", app_profile_id=profile_id ) as table: @@ -2850,10 +2965,10 @@ async def test_read_modify_write_call_overrides(self): assert kwargs["row_key"] == row_key assert kwargs["timeout"] == expected_timeout - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_modify_write_string_key(self): row_key = "string_row_key1" - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table_id") as table: with mock.patch.object( client._gapic_client, "read_modify_write_row" @@ -2863,7 +2978,7 @@ async def test_read_modify_write_string_key(self): kwargs = mock_gapic.call_args_list[0][1] assert kwargs["row_key"] == row_key.encode() - @pytest.mark.asyncio + @CrossSync.pytest async def test_read_modify_write_row_building(self): """ results from gapic call should be used to construct row @@ -2873,7 +2988,7 @@ async def test_read_modify_write_row_building(self): from google.cloud.bigtable_v2.types import Row as RowPB mock_response = ReadModifyWriteRowResponse(row=RowPB()) - async with _make_client() as client: + async with self._make_client() as client: async with client.get_table("instance", "table_id") as table: with mock.patch.object( client._gapic_client, "read_modify_write_row" @@ -2883,3 +2998,363 @@ async def test_read_modify_write_row_building(self): await table.read_modify_write_row("key", mock.Mock()) assert constructor_mock.call_count == 1 constructor_mock.assert_called_once_with(mock_response.row) + + +@CrossSync.convert_class("TestExecuteQuery") +class TestExecuteQueryAsync: + TABLE_NAME = "TABLE_NAME" + INSTANCE_NAME = "INSTANCE_NAME" + + @CrossSync.convert + def _make_client(self, *args, **kwargs): + return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + + @CrossSync.convert + def _make_gapic_stream(self, sample_list: list["ExecuteQueryResponse" | Exception]): + class MockStream: + def __init__(self, sample_list): + self.sample_list = sample_list + + def __aiter__(self): + return self + + def __iter__(self): + return self + + def __next__(self): + if not self.sample_list: + raise CrossSync.StopIteration + value = self.sample_list.pop(0) + if isinstance(value, Exception): + raise value + return value + + async def __anext__(self): + return self.__next__() + + return MockStream(sample_list) + + def resonse_with_metadata(self): + from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse + + schema = {"a": "string_type", "b": "int64_type"} + return ExecuteQueryResponse( + { + "metadata": { + "proto_schema": { + "columns": [ + {"name": name, "type_": {_type: {}}} + for name, _type in schema.items() + ] + } + } + } + ) + + def resonse_with_result(self, *args, resume_token=None): + from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue + from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse + + if resume_token is None: + resume_token_dict = {} + else: + resume_token_dict = {"resume_token": resume_token} + + values = [] + for column_value in args: + if column_value is None: + pb_value = PBValue({}) + else: + pb_value = PBValue( + { + "int_value" + if isinstance(column_value, int) + else "string_value": column_value + } + ) + values.append(pb_value) + rows = ProtoRows(values=values) + + return ExecuteQueryResponse( + { + "results": { + "proto_rows_batch": { + "batch_data": ProtoRows.serialize(rows), + }, + **resume_token_dict, + } + } + ) + + @CrossSync.pytest + async def test_execute_query(self): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert execute_query_mock.call_count == 1 + + @CrossSync.pytest + async def test_execute_query_with_params(self): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", + self.INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r async for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert execute_query_mock.call_count == 1 + + @CrossSync.pytest + async def test_execute_query_error_before_metadata(self): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + + @CrossSync.pytest + async def test_execute_query_error_after_metadata(self): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + self.resonse_with_metadata(), + DeadlineExceeded(""), + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + @CrossSync.pytest + async def test_execute_query_with_retries(self): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + DeadlineExceeded(""), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + DeadlineExceeded(""), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(results) == 3 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"r1", b"r2"] + + @pytest.mark.parametrize( + "exception", + [ + (core_exceptions.DeadlineExceeded("")), + (core_exceptions.Aborted("")), + (core_exceptions.ServiceUnavailable("")), + ], + ) + @CrossSync.pytest + async def test_execute_query_retryable_error(self, exception): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test", resume_token=b"t1"), + exception, + self.resonse_with_result(8, resume_token=b"t2"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 1 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] + + @CrossSync.pytest + async def test_execute_query_retry_partial_row(self): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test", resume_token=b"t1"), + core_exceptions.DeadlineExceeded(""), + self.resonse_with_result(8, resume_token=b"t2"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] + + @pytest.mark.parametrize( + "ExceptionType", + [ + (core_exceptions.InvalidArgument), + (core_exceptions.FailedPrecondition), + (core_exceptions.PermissionDenied), + (core_exceptions.MethodNotImplemented), + (core_exceptions.Cancelled), + (core_exceptions.AlreadyExists), + (core_exceptions.OutOfRange), + (core_exceptions.DataLoss), + (core_exceptions.Unauthenticated), + (core_exceptions.NotFound), + (core_exceptions.ResourceExhausted), + (core_exceptions.Unknown), + (core_exceptions.InternalServerError), + ], + ) + @CrossSync.pytest + async def test_execute_query_non_retryable(self, ExceptionType): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + ExceptionType(""), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + r = await CrossSync.next(result) + assert r["a"] == "test" + assert r["b"] == 8 + + with pytest.raises(ExceptionType): + r = await CrossSync.next(result) + + assert execute_query_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + @CrossSync.pytest + async def test_execute_query_metadata_received_multiple_times_detected(self): + values = [ + self.resonse_with_metadata(), + self.resonse_with_metadata(), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + with pytest.raises( + Exception, match="Invalid ExecuteQuery response received" + ): + [ + r + async for r in await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + ] diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py index cca7c982443d..cd442d392b08 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py @@ -13,34 +13,35 @@ # limitations under the License. import pytest +import mock import asyncio +import time import google.api_core.exceptions as core_exceptions +import google.api_core.retry from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete from google.cloud.bigtable.data import TABLE_DEFAULT -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock -except ImportError: # pragma: NO COVER - import mock # type: ignore - from mock import AsyncMock # type: ignore +from google.cloud.bigtable.data._cross_sync import CrossSync +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_mutations_batcher" -def _make_mutation(count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count - return mutation +@CrossSync.convert_class(sync_name="Test_FlowControl") +class Test_FlowControlAsync: + @staticmethod + @CrossSync.convert + def _target_class(): + return CrossSync._FlowControl -class Test_FlowControl: def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): - from google.cloud.bigtable.data._async.mutations_batcher import ( - _FlowControlAsync, - ) + return self._target_class()(max_mutation_count, max_mutation_bytes) - return _FlowControlAsync(max_mutation_count, max_mutation_bytes) + @staticmethod + def _make_mutation(count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation def test_ctor(self): max_mutation_count = 9 @@ -50,7 +51,7 @@ def test_ctor(self): assert instance._max_mutation_bytes == max_mutation_bytes assert instance._in_flight_mutation_count == 0 assert instance._in_flight_mutation_bytes == 0 - assert isinstance(instance._capacity_condition, asyncio.Condition) + assert isinstance(instance._capacity_condition, CrossSync.Condition) def test_ctor_invalid_values(self): """Test that values are positive, and fit within expected limits""" @@ -110,7 +111,7 @@ def test__has_capacity( instance._in_flight_mutation_bytes = existing_size assert instance._has_capacity(new_count, new_size) == expected - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "existing_count,existing_size,added_count,added_size,new_count,new_size", [ @@ -138,12 +139,12 @@ async def test_remove_from_flow_value_update( instance = self._make_one() instance._in_flight_mutation_count = existing_count instance._in_flight_mutation_bytes = existing_size - mutation = _make_mutation(added_count, added_size) + mutation = self._make_mutation(added_count, added_size) await instance.remove_from_flow(mutation) assert instance._in_flight_mutation_count == new_count assert instance._in_flight_mutation_bytes == new_size - @pytest.mark.asyncio + @CrossSync.pytest async def test__remove_from_flow_unlock(self): """capacity condition should notify after mutation is complete""" instance = self._make_one(10, 10) @@ -156,36 +157,50 @@ async def task_routine(): lambda: instance._has_capacity(1, 1) ) - task = asyncio.create_task(task_routine()) - await asyncio.sleep(0.05) + if CrossSync.is_async: + # for async class, build task to test flow unlock + task = asyncio.create_task(task_routine()) + + def task_alive(): + return not task.done() + + else: + # this branch will be tested in sync version of this test + import threading + + thread = threading.Thread(target=task_routine) + thread.start() + task_alive = thread.is_alive + await CrossSync.sleep(0.05) # should be blocked due to capacity - assert task.done() is False + assert task_alive() is True # try changing size - mutation = _make_mutation(count=0, size=5) + mutation = self._make_mutation(count=0, size=5) + await instance.remove_from_flow([mutation]) - await asyncio.sleep(0.05) + await CrossSync.sleep(0.05) assert instance._in_flight_mutation_count == 10 assert instance._in_flight_mutation_bytes == 5 - assert task.done() is False + assert task_alive() is True # try changing count instance._in_flight_mutation_bytes = 10 - mutation = _make_mutation(count=5, size=0) + mutation = self._make_mutation(count=5, size=0) await instance.remove_from_flow([mutation]) - await asyncio.sleep(0.05) + await CrossSync.sleep(0.05) assert instance._in_flight_mutation_count == 5 assert instance._in_flight_mutation_bytes == 10 - assert task.done() is False + assert task_alive() is True # try changing both instance._in_flight_mutation_count = 10 - mutation = _make_mutation(count=5, size=5) + mutation = self._make_mutation(count=5, size=5) await instance.remove_from_flow([mutation]) - await asyncio.sleep(0.05) + await CrossSync.sleep(0.05) assert instance._in_flight_mutation_count == 5 assert instance._in_flight_mutation_bytes == 5 # task should be complete - assert task.done() is True + assert task_alive() is False - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "mutations,count_cap,size_cap,expected_results", [ @@ -210,7 +225,7 @@ async def test_add_to_flow(self, mutations, count_cap, size_cap, expected_result """ Test batching with various flow control settings """ - mutation_objs = [_make_mutation(count=m[0], size=m[1]) for m in mutations] + mutation_objs = [self._make_mutation(count=m[0], size=m[1]) for m in mutations] instance = self._make_one(count_cap, size_cap) i = 0 async for batch in instance.add_to_flow(mutation_objs): @@ -226,7 +241,7 @@ async def test_add_to_flow(self, mutations, count_cap, size_cap, expected_result i += 1 assert i == len(expected_results) - @pytest.mark.asyncio + @CrossSync.pytest @pytest.mark.parametrize( "mutations,max_limit,expected_results", [ @@ -242,11 +257,12 @@ async def test_add_to_flow_max_mutation_limits( Test flow control running up against the max API limit Should submit request early, even if the flow control has room for more """ - with mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher._MUTATE_ROWS_REQUEST_MUTATION_LIMIT", - max_limit, - ): - mutation_objs = [_make_mutation(count=m[0], size=m[1]) for m in mutations] + subpath = "_async" if CrossSync.is_async else "_sync_autogen" + path = f"google.cloud.bigtable.data.{subpath}.mutations_batcher._MUTATE_ROWS_REQUEST_MUTATION_LIMIT" + with mock.patch(path, max_limit): + mutation_objs = [ + self._make_mutation(count=m[0], size=m[1]) for m in mutations + ] # flow control has no limits except API restrictions instance = self._make_one(float("inf"), float("inf")) i = 0 @@ -263,14 +279,14 @@ async def test_add_to_flow_max_mutation_limits( i += 1 assert i == len(expected_results) - @pytest.mark.asyncio + @CrossSync.pytest async def test_add_to_flow_oversize(self): """ mutations over the flow control limits should still be accepted """ instance = self._make_one(2, 3) - large_size_mutation = _make_mutation(count=1, size=10) - large_count_mutation = _make_mutation(count=10, size=1) + large_size_mutation = self._make_mutation(count=1, size=10) + large_count_mutation = self._make_mutation(count=10, size=1) results = [out async for out in instance.add_to_flow([large_size_mutation])] assert len(results) == 1 await instance.remove_from_flow(results[0]) @@ -280,13 +296,11 @@ async def test_add_to_flow_oversize(self): assert len(count_results) == 1 +@CrossSync.convert_class(sync_name="TestMutationsBatcher") class TestMutationsBatcherAsync: + @CrossSync.convert def _get_target_class(self): - from google.cloud.bigtable.data._async.mutations_batcher import ( - MutationsBatcherAsync, - ) - - return MutationsBatcherAsync + return CrossSync.MutationsBatcher def _make_one(self, table=None, **kwargs): from google.api_core.exceptions import DeadlineExceeded @@ -303,132 +317,140 @@ def _make_one(self, table=None, **kwargs): return self._get_target_class()(table, **kwargs) - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer" - ) - @pytest.mark.asyncio - async def test_ctor_defaults(self, flush_timer_mock): - flush_timer_mock.return_value = asyncio.create_task(asyncio.sleep(0)) - table = mock.Mock() - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 8 - table.default_mutate_rows_retryable_errors = [Exception] - async with self._make_one(table) as instance: - assert instance._table == table - assert instance.closed is False - assert instance._flush_jobs == set() - assert len(instance._staged_entries) == 0 - assert len(instance._oldest_exceptions) == 0 - assert len(instance._newest_exceptions) == 0 - assert instance._exception_list_limit == 10 - assert instance._exceptions_since_last_raise == 0 - assert instance._flow_control._max_mutation_count == 100000 - assert instance._flow_control._max_mutation_bytes == 104857600 - assert instance._flow_control._in_flight_mutation_count == 0 - assert instance._flow_control._in_flight_mutation_bytes == 0 - assert instance._entries_processed_since_last_raise == 0 - assert ( - instance._operation_timeout - == table.default_mutate_rows_operation_timeout - ) - assert ( - instance._attempt_timeout == table.default_mutate_rows_attempt_timeout - ) - assert ( - instance._retryable_errors == table.default_mutate_rows_retryable_errors - ) - await asyncio.sleep(0) - assert flush_timer_mock.call_count == 1 - assert flush_timer_mock.call_args[0][0] == 5 - assert isinstance(instance._flush_timer, asyncio.Future) + @staticmethod + def _make_mutation(count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer", - ) - @pytest.mark.asyncio - async def test_ctor_explicit(self, flush_timer_mock): + @CrossSync.pytest + async def test_ctor_defaults(self): + with mock.patch.object( + self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = [Exception] + async with self._make_one(table) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._max_mutation_count == 100000 + assert instance._flow_control._max_mutation_bytes == 104857600 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert ( + instance._operation_timeout + == table.default_mutate_rows_operation_timeout + ) + assert ( + instance._attempt_timeout + == table.default_mutate_rows_attempt_timeout + ) + assert ( + instance._retryable_errors + == table.default_mutate_rows_retryable_errors + ) + await CrossSync.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == 5 + assert isinstance(instance._flush_timer, CrossSync.Future) + + @CrossSync.pytest + async def test_ctor_explicit(self): """Test with explicit parameters""" - flush_timer_mock.return_value = asyncio.create_task(asyncio.sleep(0)) - table = mock.Mock() - flush_interval = 20 - flush_limit_count = 17 - flush_limit_bytes = 19 - flow_control_max_mutation_count = 1001 - flow_control_max_bytes = 12 - operation_timeout = 11 - attempt_timeout = 2 - retryable_errors = [Exception] - async with self._make_one( - table, - flush_interval=flush_interval, - flush_limit_mutation_count=flush_limit_count, - flush_limit_bytes=flush_limit_bytes, - flow_control_max_mutation_count=flow_control_max_mutation_count, - flow_control_max_bytes=flow_control_max_bytes, - batch_operation_timeout=operation_timeout, - batch_attempt_timeout=attempt_timeout, - batch_retryable_errors=retryable_errors, - ) as instance: - assert instance._table == table - assert instance.closed is False - assert instance._flush_jobs == set() - assert len(instance._staged_entries) == 0 - assert len(instance._oldest_exceptions) == 0 - assert len(instance._newest_exceptions) == 0 - assert instance._exception_list_limit == 10 - assert instance._exceptions_since_last_raise == 0 - assert ( - instance._flow_control._max_mutation_count - == flow_control_max_mutation_count - ) - assert instance._flow_control._max_mutation_bytes == flow_control_max_bytes - assert instance._flow_control._in_flight_mutation_count == 0 - assert instance._flow_control._in_flight_mutation_bytes == 0 - assert instance._entries_processed_since_last_raise == 0 - assert instance._operation_timeout == operation_timeout - assert instance._attempt_timeout == attempt_timeout - assert instance._retryable_errors == retryable_errors - await asyncio.sleep(0) - assert flush_timer_mock.call_count == 1 - assert flush_timer_mock.call_args[0][0] == flush_interval - assert isinstance(instance._flush_timer, asyncio.Future) - - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._start_flush_timer" - ) - @pytest.mark.asyncio - async def test_ctor_no_flush_limits(self, flush_timer_mock): + with mock.patch.object( + self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() + ) as flush_timer_mock: + table = mock.Mock() + flush_interval = 20 + flush_limit_count = 17 + flush_limit_bytes = 19 + flow_control_max_mutation_count = 1001 + flow_control_max_bytes = 12 + operation_timeout = 11 + attempt_timeout = 2 + retryable_errors = [Exception] + async with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=operation_timeout, + batch_attempt_timeout=attempt_timeout, + batch_retryable_errors=retryable_errors, + ) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert ( + instance._flow_control._max_mutation_count + == flow_control_max_mutation_count + ) + assert ( + instance._flow_control._max_mutation_bytes == flow_control_max_bytes + ) + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert instance._operation_timeout == operation_timeout + assert instance._attempt_timeout == attempt_timeout + assert instance._retryable_errors == retryable_errors + await CrossSync.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == flush_interval + assert isinstance(instance._flush_timer, CrossSync.Future) + + @CrossSync.pytest + async def test_ctor_no_flush_limits(self): """Test with None for flush limits""" - flush_timer_mock.return_value = asyncio.create_task(asyncio.sleep(0)) - table = mock.Mock() - table.default_mutate_rows_operation_timeout = 10 - table.default_mutate_rows_attempt_timeout = 8 - table.default_mutate_rows_retryable_errors = () - flush_interval = None - flush_limit_count = None - flush_limit_bytes = None - async with self._make_one( - table, - flush_interval=flush_interval, - flush_limit_mutation_count=flush_limit_count, - flush_limit_bytes=flush_limit_bytes, - ) as instance: - assert instance._table == table - assert instance.closed is False - assert instance._staged_entries == [] - assert len(instance._oldest_exceptions) == 0 - assert len(instance._newest_exceptions) == 0 - assert instance._exception_list_limit == 10 - assert instance._exceptions_since_last_raise == 0 - assert instance._flow_control._in_flight_mutation_count == 0 - assert instance._flow_control._in_flight_mutation_bytes == 0 - assert instance._entries_processed_since_last_raise == 0 - await asyncio.sleep(0) - assert flush_timer_mock.call_count == 1 - assert flush_timer_mock.call_args[0][0] is None - assert isinstance(instance._flush_timer, asyncio.Future) + with mock.patch.object( + self._get_target_class(), "_timer_routine", return_value=CrossSync.Future() + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = () + flush_interval = None + flush_limit_count = None + flush_limit_bytes = None + async with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + ) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._staged_entries == [] + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + await CrossSync.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] is None + assert isinstance(instance._flush_timer, CrossSync.Future) - @pytest.mark.asyncio + @CrossSync.pytest async def test_ctor_invalid_values(self): """Test that timeout values are positive, and fit within expected limits""" with pytest.raises(ValueError) as e: @@ -438,24 +460,21 @@ async def test_ctor_invalid_values(self): self._make_one(batch_attempt_timeout=-1) assert "attempt_timeout must be greater than 0" in str(e.value) + @CrossSync.convert def test_default_argument_consistency(self): """ We supply default arguments in MutationsBatcherAsync.__init__, and in table.mutations_batcher. Make sure any changes to defaults are applied to both places """ - from google.cloud.bigtable.data._async.client import TableAsync - from google.cloud.bigtable.data._async.mutations_batcher import ( - MutationsBatcherAsync, - ) import inspect get_batcher_signature = dict( - inspect.signature(TableAsync.mutations_batcher).parameters + inspect.signature(CrossSync.Table.mutations_batcher).parameters ) get_batcher_signature.pop("self") batcher_init_signature = dict( - inspect.signature(MutationsBatcherAsync).parameters + inspect.signature(self._get_target_class()).parameters ) batcher_init_signature.pop("table") # both should have same number of arguments @@ -470,97 +489,96 @@ def test_default_argument_consistency(self): == batcher_init_signature[arg_name].default ) - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" - ) - @pytest.mark.asyncio - async def test__start_flush_timer_w_None(self, flush_mock): - """Empty timer should return immediately""" - async with self._make_one() as instance: - with mock.patch("asyncio.sleep") as sleep_mock: - await instance._start_flush_timer(None) - assert sleep_mock.call_count == 0 - assert flush_mock.call_count == 0 + @CrossSync.pytest + @pytest.mark.parametrize("input_val", [None, 0, -1]) + async def test__start_flush_timer_w_empty_input(self, input_val): + """Empty/invalid timer should return immediately""" + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + # mock different method depending on sync vs async + async with self._make_one() as instance: + if CrossSync.is_async: + sleep_obj, sleep_method = asyncio, "wait_for" + else: + sleep_obj, sleep_method = instance._closed, "wait" + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + result = await instance._timer_routine(input_val) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + assert result is None - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" - ) - @pytest.mark.asyncio - async def test__start_flush_timer_call_when_closed(self, flush_mock): + @CrossSync.pytest + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + async def test__start_flush_timer_call_when_closed( + self, + ): """closed batcher's timer should return immediately""" - async with self._make_one() as instance: - await instance.close() - flush_mock.reset_mock() - with mock.patch("asyncio.sleep") as sleep_mock: - await instance._start_flush_timer(1) - assert sleep_mock.call_count == 0 - assert flush_mock.call_count == 0 + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + async with self._make_one() as instance: + await instance.close() + flush_mock.reset_mock() + # mock different method depending on sync vs async + if CrossSync.is_async: + sleep_obj, sleep_method = asyncio, "wait_for" + else: + sleep_obj, sleep_method = instance._closed, "wait" + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + await instance._timer_routine(10) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" - ) - @pytest.mark.asyncio - async def test__flush_timer(self, flush_mock): + @CrossSync.pytest + @pytest.mark.parametrize("num_staged", [0, 1, 10]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + async def test__flush_timer(self, num_staged): """Timer should continue to call _schedule_flush in a loop""" - expected_sleep = 12 - async with self._make_one(flush_interval=expected_sleep) as instance: - instance._staged_entries = [mock.Mock()] - loop_num = 3 - with mock.patch("asyncio.sleep") as sleep_mock: - sleep_mock.side_effect = [None] * loop_num + [asyncio.CancelledError()] - try: - await instance._flush_timer - except asyncio.CancelledError: - pass - assert sleep_mock.call_count == loop_num + 1 - sleep_mock.assert_called_with(expected_sleep) - assert flush_mock.call_count == loop_num - - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" - ) - @pytest.mark.asyncio - async def test__flush_timer_no_mutations(self, flush_mock): - """Timer should not flush if no new mutations have been staged""" - expected_sleep = 12 - async with self._make_one(flush_interval=expected_sleep) as instance: - loop_num = 3 - with mock.patch("asyncio.sleep") as sleep_mock: - sleep_mock.side_effect = [None] * loop_num + [asyncio.CancelledError()] - try: - await instance._flush_timer - except asyncio.CancelledError: - pass - assert sleep_mock.call_count == loop_num + 1 - sleep_mock.assert_called_with(expected_sleep) - assert flush_mock.call_count == 0 + from google.cloud.bigtable.data._cross_sync import CrossSync - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher.MutationsBatcherAsync._schedule_flush" - ) - @pytest.mark.asyncio - async def test__flush_timer_close(self, flush_mock): + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + expected_sleep = 12 + async with self._make_one(flush_interval=expected_sleep) as instance: + loop_num = 3 + instance._staged_entries = [mock.Mock()] * num_staged + with mock.patch.object(CrossSync, "event_wait") as sleep_mock: + sleep_mock.side_effect = [None] * loop_num + [TabError("expected")] + with pytest.raises(TabError): + await self._get_target_class()._timer_routine( + instance, expected_sleep + ) + if CrossSync.is_async: + # replace with np-op so there are no issues on close + instance._flush_timer = CrossSync.Future() + assert sleep_mock.call_count == loop_num + 1 + sleep_kwargs = sleep_mock.call_args[1] + assert sleep_kwargs["timeout"] == expected_sleep + assert flush_mock.call_count == (0 if num_staged == 0 else loop_num) + + @CrossSync.pytest + async def test__flush_timer_close(self): """Timer should continue terminate after close""" - async with self._make_one() as instance: - with mock.patch("asyncio.sleep"): + with mock.patch.object(self._get_target_class(), "_schedule_flush"): + async with self._make_one() as instance: # let task run in background - await asyncio.sleep(0.5) assert instance._flush_timer.done() is False # close the batcher await instance.close() - await asyncio.sleep(0.1) # task should be complete assert instance._flush_timer.done() is True - @pytest.mark.asyncio + @CrossSync.pytest async def test_append_closed(self): """Should raise exception""" + instance = self._make_one() + await instance.close() with pytest.raises(RuntimeError): - instance = self._make_one() - await instance.close() await instance.append(mock.Mock()) - @pytest.mark.asyncio + @CrossSync.pytest async def test_append_wrong_mutation(self): """ Mutation objects should raise an exception. @@ -574,13 +592,13 @@ async def test_append_wrong_mutation(self): await instance.append(DeleteAllFromRow()) assert str(e.value) == expected_error - @pytest.mark.asyncio + @CrossSync.pytest async def test_append_outside_flow_limits(self): """entries larger than mutation limits are still processed""" async with self._make_one( flow_control_max_mutation_count=1, flow_control_max_bytes=1 ) as instance: - oversized_entry = _make_mutation(count=0, size=2) + oversized_entry = self._make_mutation(count=0, size=2) await instance.append(oversized_entry) assert instance._staged_entries == [oversized_entry] assert instance._staged_count == 0 @@ -589,25 +607,21 @@ async def test_append_outside_flow_limits(self): async with self._make_one( flow_control_max_mutation_count=1, flow_control_max_bytes=1 ) as instance: - overcount_entry = _make_mutation(count=2, size=0) + overcount_entry = self._make_mutation(count=2, size=0) await instance.append(overcount_entry) assert instance._staged_entries == [overcount_entry] assert instance._staged_count == 2 assert instance._staged_bytes == 0 instance._staged_entries = [] - @pytest.mark.asyncio + @CrossSync.pytest async def test_append_flush_runs_after_limit_hit(self): """ If the user appends a bunch of entries above the flush limits back-to-back, it should still flush in a single task """ - from google.cloud.bigtable.data._async.mutations_batcher import ( - MutationsBatcherAsync, - ) - with mock.patch.object( - MutationsBatcherAsync, "_execute_mutate_rows" + self._get_target_class(), "_execute_mutate_rows" ) as op_mock: async with self._make_one(flush_limit_bytes=100) as instance: # mock network calls @@ -616,13 +630,13 @@ async def mock_call(*args, **kwargs): op_mock.side_effect = mock_call # append a mutation just under the size limit - await instance.append(_make_mutation(size=99)) + await instance.append(self._make_mutation(size=99)) # append a bunch of entries back-to-back in a loop num_entries = 10 for _ in range(num_entries): - await instance.append(_make_mutation(size=1)) + await instance.append(self._make_mutation(size=1)) # let any flush jobs finish - await asyncio.gather(*instance._flush_jobs) + await instance._wait_for_batch_results(*instance._flush_jobs) # should have only flushed once, with large mutation and first mutation in loop assert op_mock.call_count == 1 sent_batch = op_mock.call_args[0][0] @@ -642,7 +656,8 @@ async def mock_call(*args, **kwargs): (1, 1, 0, 0, False), ], ) - @pytest.mark.asyncio + @CrossSync.pytest + @pytest.mark.filterwarnings("ignore::RuntimeWarning") async def test_append( self, flush_count, flush_bytes, mutation_count, mutation_bytes, expect_flush ): @@ -653,7 +668,7 @@ async def test_append( assert instance._staged_count == 0 assert instance._staged_bytes == 0 assert instance._staged_entries == [] - mutation = _make_mutation(count=mutation_count, size=mutation_bytes) + mutation = self._make_mutation(count=mutation_count, size=mutation_bytes) with mock.patch.object(instance, "_schedule_flush") as flush_mock: await instance.append(mutation) assert flush_mock.call_count == bool(expect_flush) @@ -662,7 +677,7 @@ async def test_append( assert instance._staged_entries == [mutation] instance._staged_entries = [] - @pytest.mark.asyncio + @CrossSync.pytest async def test_append_multiple_sequentially(self): """Append multiple mutations""" async with self._make_one( @@ -671,7 +686,7 @@ async def test_append_multiple_sequentially(self): assert instance._staged_count == 0 assert instance._staged_bytes == 0 assert instance._staged_entries == [] - mutation = _make_mutation(count=2, size=3) + mutation = self._make_mutation(count=2, size=3) with mock.patch.object(instance, "_schedule_flush") as flush_mock: await instance.append(mutation) assert flush_mock.call_count == 0 @@ -690,7 +705,7 @@ async def test_append_multiple_sequentially(self): assert len(instance._staged_entries) == 3 instance._staged_entries = [] - @pytest.mark.asyncio + @CrossSync.pytest async def test_flush_flow_control_concurrent_requests(self): """ requests should happen in parallel if flow control breaks up single flush into batches @@ -698,14 +713,14 @@ async def test_flush_flow_control_concurrent_requests(self): import time num_calls = 10 - fake_mutations = [_make_mutation(count=1) for _ in range(num_calls)] + fake_mutations = [self._make_mutation(count=1) for _ in range(num_calls)] async with self._make_one(flow_control_max_mutation_count=1) as instance: with mock.patch.object( - instance, "_execute_mutate_rows", AsyncMock() + instance, "_execute_mutate_rows", CrossSync.Mock() ) as op_mock: # mock network calls async def mock_call(*args, **kwargs): - await asyncio.sleep(0.1) + await CrossSync.sleep(0.1) return [] op_mock.side_effect = mock_call @@ -713,15 +728,15 @@ async def mock_call(*args, **kwargs): # flush one large batch, that will be broken up into smaller batches instance._staged_entries = fake_mutations instance._schedule_flush() - await asyncio.sleep(0.01) + await CrossSync.sleep(0.01) # make room for new mutations for i in range(num_calls): await instance._flow_control.remove_from_flow( - [_make_mutation(count=1)] + [self._make_mutation(count=1)] ) - await asyncio.sleep(0.01) + await CrossSync.sleep(0.01) # allow flushes to complete - await asyncio.gather(*instance._flush_jobs) + await instance._wait_for_batch_results(*instance._flush_jobs) duration = time.monotonic() - start_time assert len(instance._oldest_exceptions) == 0 assert len(instance._newest_exceptions) == 0 @@ -729,7 +744,7 @@ async def mock_call(*args, **kwargs): assert duration < 0.5 assert op_mock.call_count == num_calls - @pytest.mark.asyncio + @CrossSync.pytest async def test_schedule_flush_no_mutations(self): """schedule flush should return None if no staged mutations""" async with self._make_one() as instance: @@ -738,11 +753,15 @@ async def test_schedule_flush_no_mutations(self): assert instance._schedule_flush() is None assert flush_mock.call_count == 0 - @pytest.mark.asyncio + @CrossSync.pytest + @pytest.mark.filterwarnings("ignore::RuntimeWarning") async def test_schedule_flush_with_mutations(self): """if new mutations exist, should add a new flush task to _flush_jobs""" async with self._make_one() as instance: with mock.patch.object(instance, "_flush_internal") as flush_mock: + if not CrossSync.is_async: + # simulate operation + flush_mock.side_effect = lambda x: time.sleep(0.1) for i in range(1, 4): mutation = mock.Mock() instance._staged_entries = [mutation] @@ -753,9 +772,10 @@ async def test_schedule_flush_with_mutations(self): assert instance._staged_entries == [] assert instance._staged_count == 0 assert instance._staged_bytes == 0 - assert flush_mock.call_count == i + assert flush_mock.call_count == 1 + flush_mock.reset_mock() - @pytest.mark.asyncio + @CrossSync.pytest async def test__flush_internal(self): """ _flush_internal should: @@ -775,7 +795,7 @@ async def gen(x): yield x flow_mock.side_effect = lambda x: gen(x) - mutations = [_make_mutation(count=1, size=1)] * num_entries + mutations = [self._make_mutation(count=1, size=1)] * num_entries await instance._flush_internal(mutations) assert instance._entries_processed_since_last_raise == num_entries assert execute_mock.call_count == 1 @@ -783,20 +803,28 @@ async def gen(x): instance._oldest_exceptions.clear() instance._newest_exceptions.clear() - @pytest.mark.asyncio + @CrossSync.pytest async def test_flush_clears_job_list(self): """ a job should be added to _flush_jobs when _schedule_flush is called, and removed when it completes """ async with self._make_one() as instance: - with mock.patch.object(instance, "_flush_internal", AsyncMock()): - mutations = [_make_mutation(count=1, size=1)] + with mock.patch.object( + instance, "_flush_internal", CrossSync.Mock() + ) as flush_mock: + if not CrossSync.is_async: + # simulate operation + flush_mock.side_effect = lambda x: time.sleep(0.1) + mutations = [self._make_mutation(count=1, size=1)] instance._staged_entries = mutations assert instance._flush_jobs == set() new_job = instance._schedule_flush() assert instance._flush_jobs == {new_job} - await new_job + if CrossSync.is_async: + await new_job + else: + new_job.result() assert instance._flush_jobs == set() @pytest.mark.parametrize( @@ -811,7 +839,7 @@ async def test_flush_clears_job_list(self): (10, 20, 20), # should cap at 20 ], ) - @pytest.mark.asyncio + @CrossSync.pytest async def test__flush_internal_with_errors( self, num_starting, num_new_errors, expected_total_errors ): @@ -836,7 +864,7 @@ async def gen(x): yield x flow_mock.side_effect = lambda x: gen(x) - mutations = [_make_mutation(count=1, size=1)] * num_entries + mutations = [self._make_mutation(count=1, size=1)] * num_entries await instance._flush_internal(mutations) assert instance._entries_processed_since_last_raise == num_entries assert execute_mock.call_count == 1 @@ -853,10 +881,12 @@ async def gen(x): instance._oldest_exceptions.clear() instance._newest_exceptions.clear() + @CrossSync.convert async def _mock_gapic_return(self, num=5): from google.cloud.bigtable_v2.types import MutateRowsResponse from google.rpc import status_pb2 + @CrossSync.convert async def gen(num): for i in range(num): entry = MutateRowsResponse.Entry( @@ -866,11 +896,11 @@ async def gen(num): return gen(num) - @pytest.mark.asyncio + @CrossSync.pytest async def test_timer_flush_end_to_end(self): """Flush should automatically trigger after flush_interval""" - num_nutations = 10 - mutations = [_make_mutation(count=2, size=2)] * num_nutations + num_mutations = 10 + mutations = [self._make_mutation(count=2, size=2)] * num_mutations async with self._make_one(flush_interval=0.05) as instance: instance._table.default_operation_timeout = 10 @@ -879,69 +909,65 @@ async def test_timer_flush_end_to_end(self): instance._table.client._gapic_client, "mutate_rows" ) as gapic_mock: gapic_mock.side_effect = ( - lambda *args, **kwargs: self._mock_gapic_return(num_nutations) + lambda *args, **kwargs: self._mock_gapic_return(num_mutations) ) for m in mutations: await instance.append(m) assert instance._entries_processed_since_last_raise == 0 # let flush trigger due to timer - await asyncio.sleep(0.1) - assert instance._entries_processed_since_last_raise == num_nutations - - @pytest.mark.asyncio - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync", - ) - async def test__execute_mutate_rows(self, mutate_rows): - mutate_rows.return_value = AsyncMock() - start_operation = mutate_rows().start - table = mock.Mock() - table.table_name = "test-table" - table.app_profile_id = "test-app-profile" - table.default_mutate_rows_operation_timeout = 17 - table.default_mutate_rows_attempt_timeout = 13 - table.default_mutate_rows_retryable_errors = () - async with self._make_one(table) as instance: - batch = [_make_mutation()] - result = await instance._execute_mutate_rows(batch) - assert start_operation.call_count == 1 - args, kwargs = mutate_rows.call_args - assert args[0] == table.client._gapic_client - assert args[1] == table - assert args[2] == batch - kwargs["operation_timeout"] == 17 - kwargs["attempt_timeout"] == 13 - assert result == [] - - @pytest.mark.asyncio - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync.start" - ) - async def test__execute_mutate_rows_returns_errors(self, mutate_rows): + await CrossSync.sleep(0.1) + assert instance._entries_processed_since_last_raise == num_mutations + + @CrossSync.pytest + async def test__execute_mutate_rows(self): + with mock.patch.object(CrossSync, "_MutateRowsOperation") as mutate_rows: + mutate_rows.return_value = CrossSync.Mock() + start_operation = mutate_rows().start + table = mock.Mock() + table.table_name = "test-table" + table.app_profile_id = "test-app-profile" + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + async with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = await instance._execute_mutate_rows(batch) + assert start_operation.call_count == 1 + args, kwargs = mutate_rows.call_args + assert args[0] == table.client._gapic_client + assert args[1] == table + assert args[2] == batch + kwargs["operation_timeout"] == 17 + kwargs["attempt_timeout"] == 13 + assert result == [] + + @CrossSync.pytest + async def test__execute_mutate_rows_returns_errors(self): """Errors from operation should be retruned as list""" from google.cloud.bigtable.data.exceptions import ( MutationsExceptionGroup, FailedMutationEntryError, ) - err1 = FailedMutationEntryError(0, mock.Mock(), RuntimeError("test error")) - err2 = FailedMutationEntryError(1, mock.Mock(), RuntimeError("test error")) - mutate_rows.side_effect = MutationsExceptionGroup([err1, err2], 10) - table = mock.Mock() - table.default_mutate_rows_operation_timeout = 17 - table.default_mutate_rows_attempt_timeout = 13 - table.default_mutate_rows_retryable_errors = () - async with self._make_one(table) as instance: - batch = [_make_mutation()] - result = await instance._execute_mutate_rows(batch) - assert len(result) == 2 - assert result[0] == err1 - assert result[1] == err2 - # indices should be set to None - assert result[0].index is None - assert result[1].index is None - - @pytest.mark.asyncio + with mock.patch.object(CrossSync._MutateRowsOperation, "start") as mutate_rows: + err1 = FailedMutationEntryError(0, mock.Mock(), RuntimeError("test error")) + err2 = FailedMutationEntryError(1, mock.Mock(), RuntimeError("test error")) + mutate_rows.side_effect = MutationsExceptionGroup([err1, err2], 10) + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + async with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = await instance._execute_mutate_rows(batch) + assert len(result) == 2 + assert result[0] == err1 + assert result[1] == err2 + # indices should be set to None + assert result[0].index is None + assert result[1].index is None + + @CrossSync.pytest async def test__raise_exceptions(self): """Raise exceptions and reset error state""" from google.cloud.bigtable.data import exceptions @@ -961,13 +987,19 @@ async def test__raise_exceptions(self): # try calling again instance._raise_exceptions() - @pytest.mark.asyncio + @CrossSync.pytest + @CrossSync.convert( + sync_name="test___enter__", replace_symbols={"__aenter__": "__enter__"} + ) async def test___aenter__(self): """Should return self""" async with self._make_one() as instance: assert await instance.__aenter__() == instance - @pytest.mark.asyncio + @CrossSync.pytest + @CrossSync.convert( + sync_name="test___exit__", replace_symbols={"__aexit__": "__exit__"} + ) async def test___aexit__(self): """aexit should call close""" async with self._make_one() as instance: @@ -975,7 +1007,7 @@ async def test___aexit__(self): await instance.__aexit__(None, None, None) assert close_mock.call_count == 1 - @pytest.mark.asyncio + @CrossSync.pytest async def test_close(self): """Should clean up all resources""" async with self._make_one() as instance: @@ -988,7 +1020,7 @@ async def test_close(self): assert flush_mock.call_count == 1 assert raise_mock.call_count == 1 - @pytest.mark.asyncio + @CrossSync.pytest async def test_close_w_exceptions(self): """Raise exceptions on close""" from google.cloud.bigtable.data import exceptions @@ -1007,7 +1039,7 @@ async def test_close_w_exceptions(self): # clear out exceptions instance._oldest_exceptions, instance._newest_exceptions = ([], []) - @pytest.mark.asyncio + @CrossSync.pytest async def test__on_exit(self, recwarn): """Should raise warnings if unflushed mutations exist""" async with self._make_one() as instance: @@ -1023,13 +1055,13 @@ async def test__on_exit(self, recwarn): assert "unflushed mutations" in str(w[0].message).lower() assert str(num_left) in str(w[0].message) # calling while closed is noop - instance.closed = True + instance._closed.set() instance._on_exit() assert len(recwarn) == 0 # reset staged mutations for cleanup instance._staged_entries = [] - @pytest.mark.asyncio + @CrossSync.pytest async def test_atexit_registration(self): """Should run _on_exit on program termination""" import atexit @@ -1039,30 +1071,29 @@ async def test_atexit_registration(self): async with self._make_one(): assert register_mock.call_count == 1 - @pytest.mark.asyncio - @mock.patch( - "google.cloud.bigtable.data._async.mutations_batcher._MutateRowsOperationAsync", - ) - async def test_timeout_args_passed(self, mutate_rows): + @CrossSync.pytest + async def test_timeout_args_passed(self): """ batch_operation_timeout and batch_attempt_timeout should be used in api calls """ - mutate_rows.return_value = AsyncMock() - expected_operation_timeout = 17 - expected_attempt_timeout = 13 - async with self._make_one( - batch_operation_timeout=expected_operation_timeout, - batch_attempt_timeout=expected_attempt_timeout, - ) as instance: - assert instance._operation_timeout == expected_operation_timeout - assert instance._attempt_timeout == expected_attempt_timeout - # make simulated gapic call - await instance._execute_mutate_rows([_make_mutation()]) - assert mutate_rows.call_count == 1 - kwargs = mutate_rows.call_args[1] - assert kwargs["operation_timeout"] == expected_operation_timeout - assert kwargs["attempt_timeout"] == expected_attempt_timeout + with mock.patch.object( + CrossSync, "_MutateRowsOperation", return_value=CrossSync.Mock() + ) as mutate_rows: + expected_operation_timeout = 17 + expected_attempt_timeout = 13 + async with self._make_one( + batch_operation_timeout=expected_operation_timeout, + batch_attempt_timeout=expected_attempt_timeout, + ) as instance: + assert instance._operation_timeout == expected_operation_timeout + assert instance._attempt_timeout == expected_attempt_timeout + # make simulated gapic call + await instance._execute_mutate_rows([self._make_mutation()]) + assert mutate_rows.call_count == 1 + kwargs = mutate_rows.call_args[1] + assert kwargs["operation_timeout"] == expected_operation_timeout + assert kwargs["attempt_timeout"] == expected_attempt_timeout @pytest.mark.parametrize( "limit,in_e,start_e,end_e", @@ -1123,7 +1154,7 @@ def test__add_exceptions(self, limit, in_e, start_e, end_e): for i in range(1, newest_list_diff + 1): assert mock_batcher._newest_exceptions[-i] == input_list[-i] - @pytest.mark.asyncio + @CrossSync.pytest # test different inputs for retryable exceptions @pytest.mark.parametrize( "input_retryables,expected_retryables", @@ -1148,6 +1179,7 @@ def test__add_exceptions(self, limit, in_e, start_e, end_e): ([4], [core_exceptions.DeadlineExceeded]), ], ) + @CrossSync.convert async def test_customizable_retryable_errors( self, input_retryables, expected_retryables ): @@ -1155,25 +1187,21 @@ async def test_customizable_retryable_errors( Test that retryable functions support user-configurable arguments, and that the configured retryables are passed down to the gapic layer. """ - from google.cloud.bigtable.data._async.client import TableAsync - - with mock.patch( - "google.api_core.retry.if_exception_type" + with mock.patch.object( + google.api_core.retry, "if_exception_type" ) as predicate_builder_mock: - with mock.patch( - "google.api_core.retry.retry_target_async" - ) as retry_fn_mock: + with mock.patch.object(CrossSync, "retry_target") as retry_fn_mock: table = None with mock.patch("asyncio.create_task"): - table = TableAsync(mock.Mock(), "instance", "table") + table = CrossSync.Table(mock.Mock(), "instance", "table") async with self._make_one( table, batch_retryable_errors=input_retryables ) as instance: assert instance._retryable_errors == expected_retryables - expected_predicate = lambda a: a in expected_retryables # noqa + expected_predicate = expected_retryables.__contains__ predicate_builder_mock.return_value = expected_predicate retry_fn_mock.side_effect = RuntimeError("stop early") - mutation = _make_mutation(count=1, size=1) + mutation = self._make_mutation(count=1, size=1) await instance._execute_mutate_rows([mutation]) # passed in errors should be used to build the predicate predicate_builder_mock.assert_called_once_with( @@ -1182,3 +1210,25 @@ async def test_customizable_retryable_errors( retry_call_args = retry_fn_mock.call_args_list[0].args # output of if_exception_type should be sent in to retry constructor assert retry_call_args[1] is expected_predicate + + @CrossSync.pytest + async def test_large_batch_write(self): + """ + Test that a large batch of mutations can be written + """ + import math + + num_mutations = 10_000 + flush_limit = 1000 + mutations = [self._make_mutation(count=1, size=1)] * num_mutations + async with self._make_one(flush_limit_mutation_count=flush_limit) as instance: + operation_mock = mock.Mock() + rpc_call_mock = CrossSync.Mock() + operation_mock().start = rpc_call_mock + CrossSync._MutateRowsOperation = operation_mock + for m in mutations: + await instance.append(m) + expected_calls = math.ceil(num_mutations / flush_limit) + assert rpc_call_mock.call_count == expected_calls + assert instance._entries_processed_since_last_raise == num_mutations + assert len(instance._staged_entries) == 0 diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py new file mode 100644 index 000000000000..45d139182383 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py @@ -0,0 +1,355 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +import os +import warnings +import pytest +import mock + +from itertools import zip_longest + +from google.cloud.bigtable_v2 import ReadRowsResponse + +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.row import Row + +from ...v2_client.test_row_merger import ReadRowsTest, TestFile + +from google.cloud.bigtable.data._cross_sync import CrossSync + + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_read_rows_acceptance" + + +@CrossSync.convert_class( + sync_name="TestReadRowsAcceptance", +) +class TestReadRowsAcceptanceAsync: + @staticmethod + @CrossSync.convert + def _get_operation_class(): + return CrossSync._ReadRowsOperation + + @staticmethod + @CrossSync.convert + def _get_client_class(): + return CrossSync.DataClient + + def parse_readrows_acceptance_tests(): + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, "../read-rows-acceptance-test.json") + + with open(filename) as json_file: + test_json = TestFile.from_json(json_file.read()) + return test_json.read_rows_tests + + @staticmethod + def extract_results_from_row(row: Row): + results = [] + for family, col, cells in row.items(): + for cell in cells: + results.append( + ReadRowsTest.Result( + row_key=row.row_key, + family_name=family, + qualifier=col, + timestamp_micros=cell.timestamp_ns // 1000, + value=cell.value, + label=(cell.labels[0] if cell.labels else ""), + ) + ) + return results + + @staticmethod + @CrossSync.convert + async def _coro_wrapper(stream): + return stream + + @CrossSync.convert + async def _process_chunks(self, *chunks): + @CrossSync.convert + async def _row_stream(): + yield ReadRowsResponse(chunks=chunks) + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + results = [] + async for row in merger: + results.append(row) + return results + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + @CrossSync.pytest + async def test_row_merger_scenario(self, test_case: ReadRowsTest): + async def _scenerio_stream(): + for chunk in test_case.chunks: + yield ReadRowsResponse(chunks=[chunk]) + + try: + results = [] + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_scenerio_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + async for row in merger: + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + @CrossSync.pytest + async def test_read_rows_scenario(self, test_case: ReadRowsTest): + async def _make_gapic_stream(chunk_list: list[ReadRowsResponse]): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list): + self.chunk_list = chunk_list + self.idx = -1 + + def __aiter__(self): + return self + + def __iter__(self): + return self + + async def __anext__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + chunk = self.chunk_list[self.idx] + return ReadRowsResponse(chunks=[chunk]) + raise CrossSync.StopIteration + + def __next__(self): + return self.__anext__() + + def cancel(self): + pass + + return mock_stream(chunk_list) + + with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # use emulator mode to avoid auth issues in CI + client = self._get_client_class()() + try: + table = client.get_table("instance", "table") + results = [] + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + # run once, then return error on retry + read_rows.return_value = _make_gapic_stream(test_case.chunks) + async for row in await table.read_rows_stream(query={}): + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + finally: + await client.close() + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + @CrossSync.pytest + async def test_out_of_order_rows(self): + async def _row_stream(): + yield ReadRowsResponse(last_scanned_row_key=b"a") + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = b"b" + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + with pytest.raises(InvalidChunk): + async for _ in merger: + pass + + @CrossSync.pytest + async def test_bare_reset(self): + first_chunk = ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk( + row_key=b"a", family_name="f", qualifier=b"q", value=b"v" + ) + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, row_key=b"a") + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, family_name="f") + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, qualifier=b"q") + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, timestamp_micros=1000) + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, labels=["a"]) + ), + ) + with pytest.raises(InvalidChunk): + await self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, value=b"v") + ), + ) + + @CrossSync.pytest + async def test_missing_family(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + qualifier=b"q", + timestamp_micros=1000, + value=b"v", + commit_row=True, + ) + ) + + @CrossSync.pytest + async def test_mid_cell_row_key_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(row_key=b"b", value=b"v", commit_row=True), + ) + + @CrossSync.pytest + async def test_mid_cell_family_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + family_name="f2", value=b"v", commit_row=True + ), + ) + + @CrossSync.pytest + async def test_mid_cell_qualifier_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + qualifier=b"q2", value=b"v", commit_row=True + ), + ) + + @CrossSync.pytest + async def test_mid_cell_timestamp_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + timestamp_micros=2000, value=b"v", commit_row=True + ), + ) + + @CrossSync.pytest + async def test_mid_cell_labels_change(self): + with pytest.raises(InvalidChunk): + await self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(labels=["b"], value=b"v", commit_row=True), + ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py deleted file mode 100644 index 5a7acbdd94a8..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/_testing.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa -from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes - - -try: - # async mock for python3.7-10 - from unittest.mock import Mock - from asyncio import coroutine - - def async_mock(return_value=None): - coro = Mock(name="CoroutineResult") - corofunc = Mock(name="CoroutineFunction", side_effect=coroutine(coro)) - corofunc.coro = coro - corofunc.coro.return_value = return_value - return corofunc - -except ImportError: - # async mock for python3.11 or later - from unittest.mock import AsyncMock - - def async_mock(return_value=None): - return AsyncMock(return_value=return_value) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py index 5c577ed74702..9bdf17c27fff 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py @@ -13,144 +13,171 @@ # See the License for the specific language governing permissions and # limitations under the License. -import asyncio -from unittest.mock import Mock -from mock import patch import pytest -from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( - ExecuteQueryIteratorAsync, -) +import concurrent.futures from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse -from ._testing import TYPE_INT, proto_rows_bytes, split_bytes_into_chunks, async_mock +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes + +from google.cloud.bigtable.data._cross_sync import CrossSync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore -class MockIteratorAsync: +__CROSS_SYNC_OUTPUT__ = ( + "tests.unit.data.execute_query._sync_autogen.test_query_iterator" +) + + +@CrossSync.convert_class(sync_name="MockIterator") +class MockIterator: def __init__(self, values, delay=None): self._values = values self.idx = 0 self._delay = delay + @CrossSync.convert(sync_name="__iter__") def __aiter__(self): return self + @CrossSync.convert(sync_name="__next__") async def __anext__(self): if self.idx >= len(self._values): - raise StopAsyncIteration + raise CrossSync.StopIteration if self._delay is not None: - await asyncio.sleep(self._delay) + await CrossSync.sleep(self._delay) value = self._values[self.idx] self.idx += 1 return value -@pytest.fixture -def proto_byte_stream(): - proto_rows = [ - proto_rows_bytes({"int_value": 1}, {"int_value": 2}), - proto_rows_bytes({"int_value": 3}, {"int_value": 4}), - proto_rows_bytes({"int_value": 5}, {"int_value": 6}), - ] - - messages = [ - *split_bytes_into_chunks(proto_rows[0], num_chunks=2), - *split_bytes_into_chunks(proto_rows[1], num_chunks=3), - proto_rows[2], - ] - - stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": { - "columns": [ - {"name": "test1", "type_": TYPE_INT}, - {"name": "test2", "type_": TYPE_INT}, - ] +@CrossSync.convert_class(sync_name="TestQueryIterator") +class TestQueryIteratorAsync: + @staticmethod + def _target_class(): + return CrossSync.ExecuteQueryIterator + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.fixture + def proto_byte_stream(self): + proto_rows = [ + proto_rows_bytes({"int_value": 1}, {"int_value": 2}), + proto_rows_bytes({"int_value": 3}, {"int_value": 4}), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + ] + + messages = [ + *split_bytes_into_chunks(proto_rows[0], num_chunks=2), + *split_bytes_into_chunks(proto_rows[1], num_chunks=3), + proto_rows[2], + ] + + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": { + "columns": [ + {"name": "test1", "type_": TYPE_INT}, + {"name": "test2", "type_": TYPE_INT}, + ] + } + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[0]}} + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[1]}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[2]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[3]}} + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[4]}, + "resume_token": b"token2", + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[5]}, + "resume_token": b"token3", } - } - ), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[0]}}), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[1]}, - "resume_token": b"token1", - } - ), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[2]}}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": messages[3]}}), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[4]}, - "resume_token": b"token2", - } - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[5]}, - "resume_token": b"token3", - } - ), - ] - return stream - - -@pytest.mark.asyncio -async def test_iterator(proto_byte_stream): - client_mock = Mock() - - client_mock._register_instance = async_mock() - client_mock._remove_instance_registration = async_mock() - mock_async_iterator = MockIteratorAsync(proto_byte_stream) - iterator = None - - with patch( - "google.api_core.retry.retry_target_stream_async", - return_value=mock_async_iterator, - ): - iterator = ExecuteQueryIteratorAsync( - client=client_mock, - instance_id="test-instance", - app_profile_id="test_profile", - request_body={}, - attempt_timeout=10, - operation_timeout=10, - req_metadata=(), - retryable_excs=[], - ) - result = [] - async for value in iterator: - result.append(tuple(value)) - assert result == [(1, 2), (3, 4), (5, 6)] - - assert iterator.is_closed - client_mock._register_instance.assert_called_once() - client_mock._remove_instance_registration.assert_called_once() - - assert mock_async_iterator.idx == len(proto_byte_stream) - - -@pytest.mark.asyncio -async def test_iterator_awaits_metadata(proto_byte_stream): - client_mock = Mock() - - client_mock._register_instance = async_mock() - client_mock._remove_instance_registration = async_mock() - mock_async_iterator = MockIteratorAsync(proto_byte_stream) - iterator = None - with patch( - "google.api_core.retry.retry_target_stream_async", - return_value=mock_async_iterator, - ): - iterator = ExecuteQueryIteratorAsync( - client=client_mock, - instance_id="test-instance", - app_profile_id="test_profile", - request_body={}, - attempt_timeout=10, - operation_timeout=10, - req_metadata=(), - retryable_excs=[], - ) - - await iterator.metadata() - - assert mock_async_iterator.idx == 1 + ), + ] + return stream + + @CrossSync.pytest + async def test_iterator(self, proto_byte_stream): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + result = [] + async for value in iterator: + result.append(tuple(value)) + assert result == [(1, 2), (3, 4), (5, 6)] + + assert iterator.is_closed + client_mock._register_instance.assert_called_once() + client_mock._remove_instance_registration.assert_called_once() + + assert mock_async_iterator.idx == len(proto_byte_stream) + + @CrossSync.pytest + async def test_iterator_awaits_metadata(self, proto_byte_stream): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + + await iterator.metadata() + + assert mock_async_iterator.idx == 1 diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py b/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py deleted file mode 100644 index 7cb3c08dc27d..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/data/test_read_rows_acceptance.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations - -import os -from itertools import zip_longest - -import pytest -import mock - -from google.cloud.bigtable_v2 import ReadRowsResponse - -from google.cloud.bigtable.data._async.client import BigtableDataClientAsync -from google.cloud.bigtable.data.exceptions import InvalidChunk -from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync -from google.cloud.bigtable.data.row import Row - -from ..v2_client.test_row_merger import ReadRowsTest, TestFile - - -def parse_readrows_acceptance_tests(): - dirname = os.path.dirname(__file__) - filename = os.path.join(dirname, "./read-rows-acceptance-test.json") - - with open(filename) as json_file: - test_json = TestFile.from_json(json_file.read()) - return test_json.read_rows_tests - - -def extract_results_from_row(row: Row): - results = [] - for family, col, cells in row.items(): - for cell in cells: - results.append( - ReadRowsTest.Result( - row_key=row.row_key, - family_name=family, - qualifier=col, - timestamp_micros=cell.timestamp_ns // 1000, - value=cell.value, - label=(cell.labels[0] if cell.labels else ""), - ) - ) - return results - - -@pytest.mark.parametrize( - "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description -) -@pytest.mark.asyncio -async def test_row_merger_scenario(test_case: ReadRowsTest): - async def _scenerio_stream(): - for chunk in test_case.chunks: - yield ReadRowsResponse(chunks=[chunk]) - - try: - results = [] - instance = mock.Mock() - instance._last_yielded_row_key = None - instance._remaining_count = None - chunker = _ReadRowsOperationAsync.chunk_stream( - instance, _coro_wrapper(_scenerio_stream()) - ) - merger = _ReadRowsOperationAsync.merge_rows(chunker) - async for row in merger: - for cell in row: - cell_result = ReadRowsTest.Result( - row_key=cell.row_key, - family_name=cell.family, - qualifier=cell.qualifier, - timestamp_micros=cell.timestamp_micros, - value=cell.value, - label=cell.labels[0] if cell.labels else "", - ) - results.append(cell_result) - except InvalidChunk: - results.append(ReadRowsTest.Result(error=True)) - for expected, actual in zip_longest(test_case.results, results): - assert actual == expected - - -@pytest.mark.parametrize( - "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description -) -@pytest.mark.asyncio -async def test_read_rows_scenario(test_case: ReadRowsTest): - async def _make_gapic_stream(chunk_list: list[ReadRowsResponse]): - from google.cloud.bigtable_v2 import ReadRowsResponse - - class mock_stream: - def __init__(self, chunk_list): - self.chunk_list = chunk_list - self.idx = -1 - - def __aiter__(self): - return self - - async def __anext__(self): - self.idx += 1 - if len(self.chunk_list) > self.idx: - chunk = self.chunk_list[self.idx] - return ReadRowsResponse(chunks=[chunk]) - raise StopAsyncIteration - - def cancel(self): - pass - - return mock_stream(chunk_list) - - try: - with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): - # use emulator mode to avoid auth issues in CI - client = BigtableDataClientAsync() - table = client.get_table("instance", "table") - results = [] - with mock.patch.object(table.client._gapic_client, "read_rows") as read_rows: - # run once, then return error on retry - read_rows.return_value = _make_gapic_stream(test_case.chunks) - async for row in await table.read_rows_stream(query={}): - for cell in row: - cell_result = ReadRowsTest.Result( - row_key=cell.row_key, - family_name=cell.family, - qualifier=cell.qualifier, - timestamp_micros=cell.timestamp_micros, - value=cell.value, - label=cell.labels[0] if cell.labels else "", - ) - results.append(cell_result) - except InvalidChunk: - results.append(ReadRowsTest.Result(error=True)) - finally: - await client.close() - for expected, actual in zip_longest(test_case.results, results): - assert actual == expected - - -@pytest.mark.asyncio -async def test_out_of_order_rows(): - async def _row_stream(): - yield ReadRowsResponse(last_scanned_row_key=b"a") - - instance = mock.Mock() - instance._remaining_count = None - instance._last_yielded_row_key = b"b" - chunker = _ReadRowsOperationAsync.chunk_stream( - instance, _coro_wrapper(_row_stream()) - ) - merger = _ReadRowsOperationAsync.merge_rows(chunker) - with pytest.raises(InvalidChunk): - async for _ in merger: - pass - - -@pytest.mark.asyncio -async def test_bare_reset(): - first_chunk = ReadRowsResponse.CellChunk( - ReadRowsResponse.CellChunk( - row_key=b"a", family_name="f", qualifier=b"q", value=b"v" - ) - ) - with pytest.raises(InvalidChunk): - await _process_chunks( - first_chunk, - ReadRowsResponse.CellChunk( - ReadRowsResponse.CellChunk(reset_row=True, row_key=b"a") - ), - ) - with pytest.raises(InvalidChunk): - await _process_chunks( - first_chunk, - ReadRowsResponse.CellChunk( - ReadRowsResponse.CellChunk(reset_row=True, family_name="f") - ), - ) - with pytest.raises(InvalidChunk): - await _process_chunks( - first_chunk, - ReadRowsResponse.CellChunk( - ReadRowsResponse.CellChunk(reset_row=True, qualifier=b"q") - ), - ) - with pytest.raises(InvalidChunk): - await _process_chunks( - first_chunk, - ReadRowsResponse.CellChunk( - ReadRowsResponse.CellChunk(reset_row=True, timestamp_micros=1000) - ), - ) - with pytest.raises(InvalidChunk): - await _process_chunks( - first_chunk, - ReadRowsResponse.CellChunk( - ReadRowsResponse.CellChunk(reset_row=True, labels=["a"]) - ), - ) - with pytest.raises(InvalidChunk): - await _process_chunks( - first_chunk, - ReadRowsResponse.CellChunk( - ReadRowsResponse.CellChunk(reset_row=True, value=b"v") - ), - ) - - -@pytest.mark.asyncio -async def test_missing_family(): - with pytest.raises(InvalidChunk): - await _process_chunks( - ReadRowsResponse.CellChunk( - row_key=b"a", - qualifier=b"q", - timestamp_micros=1000, - value=b"v", - commit_row=True, - ) - ) - - -@pytest.mark.asyncio -async def test_mid_cell_row_key_change(): - with pytest.raises(InvalidChunk): - await _process_chunks( - ReadRowsResponse.CellChunk( - row_key=b"a", - family_name="f", - qualifier=b"q", - timestamp_micros=1000, - value_size=2, - value=b"v", - ), - ReadRowsResponse.CellChunk(row_key=b"b", value=b"v", commit_row=True), - ) - - -@pytest.mark.asyncio -async def test_mid_cell_family_change(): - with pytest.raises(InvalidChunk): - await _process_chunks( - ReadRowsResponse.CellChunk( - row_key=b"a", - family_name="f", - qualifier=b"q", - timestamp_micros=1000, - value_size=2, - value=b"v", - ), - ReadRowsResponse.CellChunk(family_name="f2", value=b"v", commit_row=True), - ) - - -@pytest.mark.asyncio -async def test_mid_cell_qualifier_change(): - with pytest.raises(InvalidChunk): - await _process_chunks( - ReadRowsResponse.CellChunk( - row_key=b"a", - family_name="f", - qualifier=b"q", - timestamp_micros=1000, - value_size=2, - value=b"v", - ), - ReadRowsResponse.CellChunk(qualifier=b"q2", value=b"v", commit_row=True), - ) - - -@pytest.mark.asyncio -async def test_mid_cell_timestamp_change(): - with pytest.raises(InvalidChunk): - await _process_chunks( - ReadRowsResponse.CellChunk( - row_key=b"a", - family_name="f", - qualifier=b"q", - timestamp_micros=1000, - value_size=2, - value=b"v", - ), - ReadRowsResponse.CellChunk( - timestamp_micros=2000, value=b"v", commit_row=True - ), - ) - - -@pytest.mark.asyncio -async def test_mid_cell_labels_change(): - with pytest.raises(InvalidChunk): - await _process_chunks( - ReadRowsResponse.CellChunk( - row_key=b"a", - family_name="f", - qualifier=b"q", - timestamp_micros=1000, - value_size=2, - value=b"v", - ), - ReadRowsResponse.CellChunk(labels=["b"], value=b"v", commit_row=True), - ) - - -async def _coro_wrapper(stream): - return stream - - -async def _process_chunks(*chunks): - async def _row_stream(): - yield ReadRowsResponse(chunks=chunks) - - instance = mock.Mock() - instance._remaining_count = None - instance._last_yielded_row_key = None - chunker = _ReadRowsOperationAsync.chunk_stream( - instance, _coro_wrapper(_row_stream()) - ) - merger = _ReadRowsOperationAsync.merge_rows(chunker) - results = [] - async for row in merger: - results.append(row) - return results From fe23e8aa4cb9b346275a55467177371edc9c6eb2 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 6 Dec 2024 11:23:34 -0600 Subject: [PATCH 838/892] chore: use more verbose paths in hello snippets (#1045) --- .../samples/beam/requirements-test.txt | 2 +- .../samples/hello/async_main.py | 14 +++++++------- .../google-cloud-bigtable/samples/hello/main.py | 8 ++++++-- .../samples/hello/requirements-test.txt | 2 +- .../samples/hello_happybase/requirements-test.txt | 2 +- .../samples/instanceadmin/requirements-test.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/quickstart/requirements-test.txt | 2 +- .../quickstart_happybase/requirements-test.txt | 2 +- .../snippets/data_client/requirements-test.txt | 2 +- .../samples/snippets/deletes/deletes_async_test.py | 8 ++++++++ .../samples/snippets/deletes/requirements-test.txt | 2 +- .../snippets/filters/filter_snippets_async_test.py | 8 ++++++++ .../samples/snippets/filters/requirements-test.txt | 2 +- .../samples/snippets/reads/requirements-test.txt | 2 +- .../samples/snippets/writes/requirements-test.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- 17 files changed, 42 insertions(+), 22 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt index fe93bd52ff68..e079f8a6038d 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==8.3.2 +pytest diff --git a/packages/google-cloud-bigtable/samples/hello/async_main.py b/packages/google-cloud-bigtable/samples/hello/async_main.py index 0130161e1179..34159bedb4c9 100644 --- a/packages/google-cloud-bigtable/samples/hello/async_main.py +++ b/packages/google-cloud-bigtable/samples/hello/async_main.py @@ -31,11 +31,11 @@ # [START bigtable_async_hw_imports] from google.cloud import bigtable from google.cloud.bigtable.data import row_filters -from google.cloud.bigtable.data import RowMutationEntry -from google.cloud.bigtable.data import SetCell -from google.cloud.bigtable.data import ReadRowsQuery # [END bigtable_async_hw_imports] +# use to ignore warnings +row_filters + async def main(project_id, instance_id, table_id): # [START bigtable_async_hw_connect] @@ -85,8 +85,8 @@ async def main(project_id, instance_id, table_id): # # https://cloud.google.com/bigtable/docs/schema-design row_key = "greeting{}".format(i).encode() - row_mutation = RowMutationEntry( - row_key, SetCell(column_family_id, column, value) + row_mutation = bigtable.data.RowMutationEntry( + row_key, bigtable.data.SetCell(column_family_id, column, value) ) mutations.append(row_mutation) await table.bulk_mutate_rows(mutations) @@ -95,7 +95,7 @@ async def main(project_id, instance_id, table_id): # [START bigtable_async_hw_create_filter] # Create a filter to only retrieve the most recent version of the cell # for each column across entire row. - row_filter = row_filters.CellsColumnLimitFilter(1) + row_filter = bigtable.data.row_filters.CellsColumnLimitFilter(1) # [END bigtable_async_hw_create_filter] # [START bigtable_async_hw_get_with_filter] @@ -112,7 +112,7 @@ async def main(project_id, instance_id, table_id): # [START bigtable_async_hw_scan_with_filter] # [START bigtable_async_hw_scan_all] print("Scanning for all greetings:") - query = ReadRowsQuery(row_filter=row_filter) + query = bigtable.data.ReadRowsQuery(row_filter=row_filter) async for row in await table.read_rows_stream(query): cell = row.cells[0] print(cell.value.decode("utf-8")) diff --git a/packages/google-cloud-bigtable/samples/hello/main.py b/packages/google-cloud-bigtable/samples/hello/main.py index 3e5078608eb6..41124e82675b 100644 --- a/packages/google-cloud-bigtable/samples/hello/main.py +++ b/packages/google-cloud-bigtable/samples/hello/main.py @@ -36,6 +36,10 @@ # [END bigtable_hw_imports] +# use to avoid warnings +row_filters +column_family + def main(project_id, instance_id, table_id): # [START bigtable_hw_connect] @@ -52,7 +56,7 @@ def main(project_id, instance_id, table_id): print("Creating column family cf1 with Max Version GC rule...") # Create a column family with GC policy : most recent N versions # Define the GC policy to retain only the most recent 2 versions - max_versions_rule = column_family.MaxVersionsGCRule(2) + max_versions_rule = bigtable.column_family.MaxVersionsGCRule(2) column_family_id = "cf1" column_families = {column_family_id: max_versions_rule} if not table.exists(): @@ -93,7 +97,7 @@ def main(project_id, instance_id, table_id): # [START bigtable_hw_create_filter] # Create a filter to only retrieve the most recent version of the cell # for each column across entire row. - row_filter = row_filters.CellsColumnLimitFilter(1) + row_filter = bigtable.row_filters.CellsColumnLimitFilter(1) # [END bigtable_hw_create_filter] # [START bigtable_hw_get_with_filter] diff --git a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt index fe93bd52ff68..e079f8a6038d 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==8.3.2 +pytest diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt index fe93bd52ff68..e079f8a6038d 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==8.3.2 +pytest diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt index fe93bd52ff68..e079f8a6038d 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==8.3.2 +pytest diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index caf5f029cb6e..13d73437842a 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==8.3.2 +pytest mock==5.1.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt index a636261208aa..ee4ba018603b 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==8.3.2 +pytest pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt index fe93bd52ff68..55b033e901cd 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==8.3.2 +pytest \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt index a636261208aa..ee4ba018603b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==8.3.2 +pytest pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py index 9408a832037c..4fb4898e5270 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/deletes_async_test.py @@ -30,6 +30,14 @@ TABLE_ID = f"mobile-time-series-deletes-async-{str(uuid.uuid4())[:16]}" +@pytest.fixture(scope="module") +def event_loop(): + import asyncio + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + @pytest_asyncio.fixture(scope="module", autouse=True) async def table_id() -> AsyncGenerator[str, None]: with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}, verbose=False): diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt index a636261208aa..ee4ba018603b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==8.3.2 +pytest pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py index 124db8157906..a3f83a6f2404 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py +++ b/packages/google-cloud-bigtable/samples/snippets/filters/filter_snippets_async_test.py @@ -34,6 +34,14 @@ TABLE_ID = f"mobile-time-series-filters-async-{str(uuid.uuid4())[:16]}" +@pytest.fixture(scope="module") +def event_loop(): + import asyncio + loop = asyncio.get_event_loop_policy().new_event_loop() + yield loop + loop.close() + + @pytest_asyncio.fixture(scope="module", autouse=True) async def table_id() -> AsyncGenerator[str, None]: with create_table_cm(PROJECT, BIGTABLE_INSTANCE, TABLE_ID, {"stats_summary": None, "cell_plan": None}): diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt index a636261208aa..ee4ba018603b 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==8.3.2 +pytest pytest-asyncio diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt index fe93bd52ff68..e079f8a6038d 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==8.3.2 +pytest diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt index 0f4b18778fcb..5e15eb26f589 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==8.3.2 +pytest diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index 7f86b7bc42c5..a4c9e9c0b59c 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==8.3.2 +pytest google-cloud-testutils==1.4.0 From 5e861cc7b14a851f1a9451e18af716f34fe4a030 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 12 Dec 2024 15:41:19 -0600 Subject: [PATCH 839/892] feat: add generated sync client (#1017) --- .../.cross_sync/README.md | 2 +- .../async_data_client/async_data_usage.rst | 18 - .../async_data_client.rst | 2 +- .../async_data_execute_query_iterator.rst | 0 .../async_data_mutations_batcher.rst | 0 .../async_data_table.rst | 0 .../common_data_exceptions.rst} | 0 .../common_data_execute_query_metadata.rst} | 0 .../common_data_execute_query_values.rst} | 0 .../common_data_mutations.rst} | 0 .../common_data_read_modify_write_rules.rst} | 0 .../common_data_read_rows_query.rst} | 0 .../common_data_row.rst} | 0 .../common_data_row_filters.rst} | 0 .../docs/data_client/data_client_usage.rst | 39 + .../docs/data_client/sync_data_client.rst | 6 + .../sync_data_execute_query_iterator.rst | 6 + .../sync_data_mutations_batcher.rst | 6 + .../docs/data_client/sync_data_table.rst | 6 + packages/google-cloud-bigtable/docs/index.rst | 4 +- .../docs/scripts/patch_devsite_toc.py | 9 +- .../google/cloud/bigtable/data/__init__.py | 18 +- .../bigtable/data/_async/_mutate_rows.py | 2 +- .../cloud/bigtable/data/_async/_read_rows.py | 2 +- .../cloud/bigtable/data/_async/client.py | 11 +- .../bigtable/data/_async/mutations_batcher.py | 8 +- .../google/cloud/bigtable/data/_helpers.py | 5 +- .../data/_sync_autogen/_mutate_rows.py | 182 ++ .../bigtable/data/_sync_autogen/_read_rows.py | 304 ++ .../bigtable/data/_sync_autogen/client.py | 1234 +++++++ .../data/_sync_autogen/mutations_batcher.py | 449 +++ .../bigtable/data/execute_query/__init__.py | 5 + .../_async/execute_query_iterator.py | 2 + .../_sync_autogen/execute_query_iterator.py | 186 ++ packages/google-cloud-bigtable/noxfile.py | 16 +- .../handlers/client_handler_data_async.py | 2 +- .../tests/system/data/test_system_async.py | 2 +- .../tests/system/data/test_system_autogen.py | 828 +++++ .../unit/data/_async/test__mutate_rows.py | 2 +- .../tests/unit/data/_async/test__read_rows.py | 1 + .../tests/unit/data/_async/test_client.py | 18 +- .../data/_async/test_mutations_batcher.py | 2 +- .../data/_async/test_read_rows_acceptance.py | 2 +- .../tests/unit/data/_sync_autogen/__init__.py | 0 .../data/_sync_autogen/test__mutate_rows.py | 307 ++ .../data/_sync_autogen/test__read_rows.py | 354 ++ .../unit/data/_sync_autogen/test_client.py | 2889 +++++++++++++++++ .../_sync_autogen/test_mutations_batcher.py | 1078 ++++++ .../test_read_rows_acceptance.py | 328 ++ .../_async/test_query_iterator.py | 1 - .../execute_query/_sync_autogen/__init__.py | 0 .../_sync_autogen/test_query_iterator.py | 163 + .../test_execute_query_parameters_parsing.py | 2 +- .../tests/unit/data/test__helpers.py | 2 +- 54 files changed, 8448 insertions(+), 55 deletions(-) delete mode 100644 packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst rename packages/google-cloud-bigtable/docs/{async_data_client => data_client}/async_data_client.rst (79%) rename packages/google-cloud-bigtable/docs/{async_data_client => data_client}/async_data_execute_query_iterator.rst (100%) rename packages/google-cloud-bigtable/docs/{async_data_client => data_client}/async_data_mutations_batcher.rst (100%) rename packages/google-cloud-bigtable/docs/{async_data_client => data_client}/async_data_table.rst (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_exceptions.rst => data_client/common_data_exceptions.rst} (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_execute_query_metadata.rst => data_client/common_data_execute_query_metadata.rst} (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_execute_query_values.rst => data_client/common_data_execute_query_values.rst} (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_mutations.rst => data_client/common_data_mutations.rst} (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_read_modify_write_rules.rst => data_client/common_data_read_modify_write_rules.rst} (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_read_rows_query.rst => data_client/common_data_read_rows_query.rst} (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_row.rst => data_client/common_data_row.rst} (100%) rename packages/google-cloud-bigtable/docs/{async_data_client/async_data_row_filters.rst => data_client/common_data_row_filters.rst} (100%) create mode 100644 packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst create mode 100644 packages/google-cloud-bigtable/docs/data_client/sync_data_client.rst create mode 100644 packages/google-cloud-bigtable/docs/data_client/sync_data_execute_query_iterator.rst create mode 100644 packages/google-cloud-bigtable/docs/data_client/sync_data_mutations_batcher.rst create mode 100644 packages/google-cloud-bigtable/docs/data_client/sync_data_table.rst create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py create mode 100644 packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_read_rows_acceptance.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py diff --git a/packages/google-cloud-bigtable/.cross_sync/README.md b/packages/google-cloud-bigtable/.cross_sync/README.md index 4214e0d78475..18a9aafdf6ce 100644 --- a/packages/google-cloud-bigtable/.cross_sync/README.md +++ b/packages/google-cloud-bigtable/.cross_sync/README.md @@ -62,7 +62,7 @@ CrossSync provides a set of annotations to mark up async classes, to guide the g ### Code Generation -Generation can be initiated using `python .cross_sync/generate.py .` +Generation can be initiated using `nox -s generate_sync` from the root of the project. This will find all classes with the `__CROSS_SYNC_OUTPUT__ = "path/to/output"` annotation, and generate a sync version of classes marked with `@CrossSync.convert_sync` at the output path. diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst b/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst deleted file mode 100644 index 61d5837fdaea..000000000000 --- a/packages/google-cloud-bigtable/docs/async_data_client/async_data_usage.rst +++ /dev/null @@ -1,18 +0,0 @@ -Async Data Client -================= - -.. toctree:: - :maxdepth: 2 - - async_data_client - async_data_table - async_data_mutations_batcher - async_data_read_rows_query - async_data_row - async_data_row_filters - async_data_mutations - async_data_read_modify_write_rules - async_data_exceptions - async_data_execute_query_iterator - async_data_execute_query_values - async_data_execute_query_metadata diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_client.rst similarity index 79% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst rename to packages/google-cloud-bigtable/docs/data_client/async_data_client.rst index 0e1d9e23e809..2ddcc090cbda 100644 --- a/packages/google-cloud-bigtable/docs/async_data_client/async_data_client.rst +++ b/packages/google-cloud-bigtable/docs/data_client/async_data_client.rst @@ -7,6 +7,6 @@ Bigtable Data Client Async performance benefits, the codebase should be designed to be async from the ground up. -.. autoclass:: google.cloud.bigtable.data._async.client.BigtableDataClientAsync +.. autoclass:: google.cloud.bigtable.data.BigtableDataClientAsync :members: :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_iterator.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_execute_query_iterator.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_iterator.rst rename to packages/google-cloud-bigtable/docs/data_client/async_data_execute_query_iterator.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations_batcher.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_mutations_batcher.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations_batcher.rst rename to packages/google-cloud-bigtable/docs/data_client/async_data_mutations_batcher.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_table.rst rename to packages/google-cloud-bigtable/docs/data_client/async_data_table.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_exceptions.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_exceptions.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_exceptions.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_exceptions.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_metadata.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_metadata.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_metadata.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_metadata.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_values.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_values.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_execute_query_values.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_execute_query_values.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_mutations.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_mutations.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_mutations.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_read_modify_write_rules.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_read_modify_write_rules.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_read_modify_write_rules.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_read_modify_write_rules.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_read_rows_query.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_read_rows_query.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_read_rows_query.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_read_rows_query.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_row.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_row.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_row.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_row.rst diff --git a/packages/google-cloud-bigtable/docs/async_data_client/async_data_row_filters.rst b/packages/google-cloud-bigtable/docs/data_client/common_data_row_filters.rst similarity index 100% rename from packages/google-cloud-bigtable/docs/async_data_client/async_data_row_filters.rst rename to packages/google-cloud-bigtable/docs/data_client/common_data_row_filters.rst diff --git a/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst b/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst new file mode 100644 index 000000000000..f5bbac278f7b --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst @@ -0,0 +1,39 @@ +Data Client +=========== + +Sync Surface +------------ + +.. toctree:: + :maxdepth: 3 + + sync_data_client + sync_data_table + sync_data_mutations_batcher + sync_data_execute_query_iterator + +Async Surface +------------- + +.. toctree:: + :maxdepth: 3 + + async_data_client + async_data_table + async_data_mutations_batcher + async_data_execute_query_iterator + +Common Classes +-------------- + +.. toctree:: + :maxdepth: 3 + + common_data_read_rows_query + common_data_row + common_data_row_filters + common_data_mutations + common_data_read_modify_write_rules + common_data_exceptions + common_data_execute_query_values + common_data_execute_query_metadata diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_client.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_client.rst new file mode 100644 index 000000000000..cf7c00dad5b2 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_client.rst @@ -0,0 +1,6 @@ +Bigtable Data Client +~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data.BigtableDataClient + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_execute_query_iterator.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_execute_query_iterator.rst new file mode 100644 index 000000000000..6eb9f84db6b6 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_execute_query_iterator.rst @@ -0,0 +1,6 @@ +Execute Query Iterator +~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data.execute_query.ExecuteQueryIterator + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_mutations_batcher.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_mutations_batcher.rst new file mode 100644 index 000000000000..2b7d1bfe094d --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_mutations_batcher.rst @@ -0,0 +1,6 @@ +Mutations Batcher +~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.data._sync_autogen.mutations_batcher + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_table.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_table.rst new file mode 100644 index 000000000000..95c91eb27981 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_table.rst @@ -0,0 +1,6 @@ +Table +~~~~~ + +.. autoclass:: google.cloud.bigtable.data.Table + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index 4204e981d1f7..c7f9721f383e 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -5,10 +5,10 @@ Client Types ------------- .. toctree:: - :maxdepth: 2 + :maxdepth: 3 + data_client/data_client_usage classic_client/usage - async_data_client/async_data_usage Changelog diff --git a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py index 456d0af7b8cf..5889300d265a 100644 --- a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py +++ b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py @@ -117,7 +117,8 @@ def __init__(self, dir_name, index_file_name): continue # bail when toc indented block is done if not line.startswith(" ") and not line.startswith("\t"): - break + in_toc = False + continue # extract entries self.items.append(self.extract_toc_entry(line.strip())) @@ -194,9 +195,7 @@ def validate_toc(toc_file_path, expected_section_list, added_sections): # Add secrtions for the async_data_client and classic_client directories toc_path = "_build/html/docfx_yaml/toc.yml" custom_sections = [ - TocSection( - dir_name="async_data_client", index_file_name="async_data_usage.rst" - ), + TocSection(dir_name="data_client", index_file_name="data_client_usage.rst"), TocSection(dir_name="classic_client", index_file_name="usage.rst"), ] add_sections(toc_path, custom_sections) @@ -210,7 +209,7 @@ def validate_toc(toc_file_path, expected_section_list, added_sections): "bigtable APIs", "Changelog", "Multiprocessing", - "Async Data Client", + "Data Client", "Classic Client", ], added_sections=custom_sections, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py index 43ea69fdfa48..15f9bc1675f0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py @@ -17,8 +17,10 @@ from google.cloud.bigtable.data._async.client import BigtableDataClientAsync from google.cloud.bigtable.data._async.client import TableAsync - from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync +from google.cloud.bigtable.data._sync_autogen.client import BigtableDataClient +from google.cloud.bigtable.data._sync_autogen.client import Table +from google.cloud.bigtable.data._sync_autogen.mutations_batcher import MutationsBatcher from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.cloud.bigtable.data.read_rows_query import RowRange @@ -52,13 +54,22 @@ from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync from google.cloud.bigtable.data._async._mutate_rows import _MutateRowsOperationAsync +from google.cloud.bigtable_v2.services.bigtable.client import ( + BigtableClient, +) +from google.cloud.bigtable.data._sync_autogen._read_rows import _ReadRowsOperation +from google.cloud.bigtable.data._sync_autogen._mutate_rows import _MutateRowsOperation + from google.cloud.bigtable.data._cross_sync import CrossSync CrossSync.add_mapping("GapicClient", BigtableAsyncClient) +CrossSync._Sync_Impl.add_mapping("GapicClient", BigtableClient) CrossSync.add_mapping("_ReadRowsOperation", _ReadRowsOperationAsync) +CrossSync._Sync_Impl.add_mapping("_ReadRowsOperation", _ReadRowsOperation) CrossSync.add_mapping("_MutateRowsOperation", _MutateRowsOperationAsync) +CrossSync._Sync_Impl.add_mapping("_MutateRowsOperation", _MutateRowsOperation) CrossSync.add_mapping("MutationsBatcher", MutationsBatcherAsync) - +CrossSync._Sync_Impl.add_mapping("MutationsBatcher", MutationsBatcher) __version__: str = package_version.__version__ @@ -66,6 +77,9 @@ "BigtableDataClientAsync", "TableAsync", "MutationsBatcherAsync", + "BigtableDataClient", + "Table", + "MutationsBatcher", "RowKeySamples", "ReadRowsQuery", "RowRange", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py index c5795c464417..bf618bf04247 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py index c02b3750d6fb..6d2fa3a7d717 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index d560d7e1ee86..c7cc0de6bf7f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -85,8 +85,10 @@ ) from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE else: + from typing import Iterable # noqa: F401 from grpc import insecure_channel from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore + from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE if TYPE_CHECKING: @@ -100,6 +102,13 @@ from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( ExecuteQueryIteratorAsync, ) + else: + from google.cloud.bigtable.data._sync_autogen.mutations_batcher import ( # noqa: F401 + MutationsBatcher, + ) + from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import ( # noqa: F401 + ExecuteQueryIterator, + ) __CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.client" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py index 65070c880a8c..6e15bb5f33fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # from __future__ import annotations -from typing import Sequence, TYPE_CHECKING +from typing import Sequence, TYPE_CHECKING, cast import atexit import warnings from collections import deque @@ -250,7 +250,7 @@ def __init__( ) # used by sync class to manage flush_internal tasks self._sync_flush_executor = ( - concurrent.futures.ThreadPoolExecutor(max_workers=1) + concurrent.futures.ThreadPoolExecutor(max_workers=4) if not CrossSync.is_async else None ) @@ -305,7 +305,7 @@ async def append(self, mutation_entry: RowMutationEntry): # TODO: return a future to track completion of this entry if self._closed.is_set(): raise RuntimeError("Cannot append to closed MutationsBatcher") - if isinstance(mutation_entry, Mutation): # type: ignore + if isinstance(cast(Mutation, mutation_entry), Mutation): raise ValueError( f"invalid mutation type: {type(mutation_entry).__name__}. Only RowMutationEntry objects are supported by batcher" ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py index bd1c09d523f0..4c45e5c1c7dd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py @@ -29,6 +29,7 @@ if TYPE_CHECKING: import grpc from google.cloud.bigtable.data import TableAsync + from google.cloud.bigtable.data import Table """ Helper functions used in various places in the library. @@ -120,7 +121,7 @@ def _retry_exception_factory( def _get_timeouts( operation: float | TABLE_DEFAULT, attempt: float | None | TABLE_DEFAULT, - table: "TableAsync", + table: "TableAsync" | "Table", ) -> tuple[float, float]: """ Convert passed in timeout values to floats, using table defaults if necessary. @@ -207,7 +208,7 @@ def _get_error_type( def _get_retryable_errors( call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT, - table: "TableAsync", + table: "TableAsync" | "Table", ) -> list[type[Exception]]: """ Convert passed in retryable error codes to a list of exception types. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py new file mode 100644 index 000000000000..8e8c5ca89dde --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py @@ -0,0 +1,182 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Sequence, TYPE_CHECKING +import functools +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +import google.cloud.bigtable.data.exceptions as bt_exceptions +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT +from google.cloud.bigtable.data.mutations import _EntryWithProto +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable_v2.services.bigtable.client import ( + BigtableClient as GapicClientType, + ) + from google.cloud.bigtable.data._sync_autogen.client import Table as TableType + + +class _MutateRowsOperation: + """ + MutateRowsOperation manages the logic of sending a set of row mutations, + and retrying on failed entries. It manages this using the _run_attempt + function, which attempts to mutate all outstanding entries, and raises + _MutateRowsIncomplete if any retryable errors are encountered. + + Errors are exposed as a MutationsExceptionGroup, which contains a list of + exceptions organized by the related failed mutation entries. + + Args: + gapic_client: the client to use for the mutate_rows call + table: the table associated with the request + mutation_entries: a list of RowMutationEntry objects to send to the server + operation_timeout: the timeout to use for the entire operation, in seconds. + attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. + If not specified, the request will run until operation_timeout is reached. + """ + + def __init__( + self, + gapic_client: GapicClientType, + table: TableType, + mutation_entries: list["RowMutationEntry"], + operation_timeout: float, + attempt_timeout: float | None, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + total_mutations = sum((len(entry.mutations) for entry in mutation_entries)) + if total_mutations > _MUTATE_ROWS_REQUEST_MUTATION_LIMIT: + raise ValueError( + f"mutate_rows requests can contain at most {_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across all entries. Found {total_mutations}." + ) + self._gapic_fn = functools.partial( + gapic_client.mutate_rows, + table_name=table.table_name, + app_profile_id=table.app_profile_id, + retry=None, + ) + self.is_retryable = retries.if_exception_type( + *retryable_exceptions, bt_exceptions._MutateRowsIncomplete + ) + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + self._operation = lambda: CrossSync._Sync_Impl.retry_target( + self._run_attempt, + self.is_retryable, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + self.timeout_generator = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.mutations = [_EntryWithProto(m, m._to_pb()) for m in mutation_entries] + self.remaining_indices = list(range(len(self.mutations))) + self.errors: dict[int, list[Exception]] = {} + + def start(self): + """Start the operation, and run until completion + + Raises: + MutationsExceptionGroup: if any mutations failed""" + try: + self._operation() + except Exception as exc: + incomplete_indices = self.remaining_indices.copy() + for idx in incomplete_indices: + self._handle_entry_error(idx, exc) + finally: + all_errors: list[Exception] = [] + for idx, exc_list in self.errors.items(): + if len(exc_list) == 0: + raise core_exceptions.ClientError( + f"Mutation {idx} failed with no associated errors" + ) + elif len(exc_list) == 1: + cause_exc = exc_list[0] + else: + cause_exc = bt_exceptions.RetryExceptionGroup(exc_list) + entry = self.mutations[idx].entry + all_errors.append( + bt_exceptions.FailedMutationEntryError(idx, entry, cause_exc) + ) + if all_errors: + raise bt_exceptions.MutationsExceptionGroup( + all_errors, len(self.mutations) + ) + + def _run_attempt(self): + """Run a single attempt of the mutate_rows rpc. + + Raises: + _MutateRowsIncomplete: if there are failed mutations eligible for + retry after the attempt is complete + GoogleAPICallError: if the gapic rpc fails""" + request_entries = [self.mutations[idx].proto for idx in self.remaining_indices] + active_request_indices = { + req_idx: orig_idx + for (req_idx, orig_idx) in enumerate(self.remaining_indices) + } + self.remaining_indices = [] + if not request_entries: + return + try: + result_generator = self._gapic_fn( + timeout=next(self.timeout_generator), + entries=request_entries, + retry=None, + ) + for result_list in result_generator: + for result in result_list.entries: + orig_idx = active_request_indices[result.index] + entry_error = core_exceptions.from_grpc_status( + result.status.code, + result.status.message, + details=result.status.details, + ) + if result.status.code != 0: + self._handle_entry_error(orig_idx, entry_error) + elif orig_idx in self.errors: + del self.errors[orig_idx] + del active_request_indices[result.index] + except Exception as exc: + for idx in active_request_indices.values(): + self._handle_entry_error(idx, exc) + raise + if self.remaining_indices: + raise bt_exceptions._MutateRowsIncomplete + + def _handle_entry_error(self, idx: int, exc: Exception): + """Add an exception to the list of exceptions for a given mutation index, + and add the index to the list of remaining indices if the exception is + retryable. + + Args: + idx: the index of the mutation that failed + exc: the exception to add to the list""" + entry = self.mutations[idx].entry + self.errors.setdefault(idx, []).append(exc) + if ( + entry.is_idempotent() + and self.is_retryable(exc) + and (idx not in self.remaining_indices) + ): + self.remaining_indices.append(idx) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py new file mode 100644 index 000000000000..92619c6a4740 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py @@ -0,0 +1,304 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Sequence, TYPE_CHECKING +from google.cloud.bigtable_v2.types import ReadRowsRequest as ReadRowsRequestPB +from google.cloud.bigtable_v2.types import ReadRowsResponse as ReadRowsResponsePB +from google.cloud.bigtable_v2.types import RowSet as RowSetPB +from google.cloud.bigtable_v2.types import RowRange as RowRangePB +from google.cloud.bigtable.data.row import Row, Cell +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _RowSetComplete +from google.cloud.bigtable.data.exceptions import _ResetRow +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.api_core import retry as retries +from google.api_core.retry import exponential_sleep_generator +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data._sync_autogen.client import Table as TableType + + +class _ReadRowsOperation: + """ + ReadRowsOperation handles the logic of merging chunks from a ReadRowsResponse stream + into a stream of Row objects. + + ReadRowsOperation.merge_row_response_stream takes in a stream of ReadRowsResponse + and turns them into a stream of Row objects using an internal + StateMachine. + + ReadRowsOperation(request, client) handles row merging logic end-to-end, including + performing retries on stream errors. + + Args: + query: The query to execute + table: The table to send the request to + operation_timeout: The total time to allow for the operation, in seconds + attempt_timeout: The time to allow for each individual attempt, in seconds + retryable_exceptions: A list of exceptions that should trigger a retry + """ + + __slots__ = ( + "attempt_timeout_gen", + "operation_timeout", + "request", + "table", + "_predicate", + "_last_yielded_row_key", + "_remaining_count", + ) + + def __init__( + self, + query: ReadRowsQuery, + table: TableType, + operation_timeout: float, + attempt_timeout: float, + retryable_exceptions: Sequence[type[Exception]] = (), + ): + self.attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self.operation_timeout = operation_timeout + if isinstance(query, dict): + self.request = ReadRowsRequestPB( + **query, + table_name=table.table_name, + app_profile_id=table.app_profile_id, + ) + else: + self.request = query._to_pb(table) + self.table = table + self._predicate = retries.if_exception_type(*retryable_exceptions) + self._last_yielded_row_key: bytes | None = None + self._remaining_count: int | None = self.request.rows_limit or None + + def start_operation(self) -> CrossSync._Sync_Impl.Iterable[Row]: + """Start the read_rows operation, retrying on retryable errors. + + Yields: + Row: The next row in the stream""" + return CrossSync._Sync_Impl.retry_target_stream( + self._read_rows_attempt, + self._predicate, + exponential_sleep_generator(0.01, 60, multiplier=2), + self.operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def _read_rows_attempt(self) -> CrossSync._Sync_Impl.Iterable[Row]: + """Attempt a single read_rows rpc call. + This function is intended to be wrapped by retry logic, + which will call this function until it succeeds or + a non-retryable error is raised. + + Yields: + Row: The next row in the stream""" + if self._last_yielded_row_key is not None: + try: + self.request.rows = self._revise_request_rowset( + row_set=self.request.rows, + last_seen_row_key=self._last_yielded_row_key, + ) + except _RowSetComplete: + return self.merge_rows(None) + if self._remaining_count is not None: + self.request.rows_limit = self._remaining_count + if self._remaining_count == 0: + return self.merge_rows(None) + gapic_stream = self.table.client._gapic_client.read_rows( + self.request, timeout=next(self.attempt_timeout_gen), retry=None + ) + chunked_stream = self.chunk_stream(gapic_stream) + return self.merge_rows(chunked_stream) + + def chunk_stream( + self, + stream: CrossSync._Sync_Impl.Awaitable[ + CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB] + ], + ) -> CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB.CellChunk]: + """process chunks out of raw read_rows stream + + Args: + stream: the raw read_rows stream from the gapic client + Yields: + ReadRowsResponsePB.CellChunk: the next chunk in the stream""" + for resp in stream: + resp = resp._pb + if resp.last_scanned_row_key: + if ( + self._last_yielded_row_key is not None + and resp.last_scanned_row_key <= self._last_yielded_row_key + ): + raise InvalidChunk("last scanned out of order") + self._last_yielded_row_key = resp.last_scanned_row_key + current_key = None + for c in resp.chunks: + if current_key is None: + current_key = c.row_key + if current_key is None: + raise InvalidChunk("first chunk is missing a row key") + elif ( + self._last_yielded_row_key + and current_key <= self._last_yielded_row_key + ): + raise InvalidChunk("row keys should be strictly increasing") + yield c + if c.reset_row: + current_key = None + elif c.commit_row: + self._last_yielded_row_key = current_key + if self._remaining_count is not None: + self._remaining_count -= 1 + if self._remaining_count < 0: + raise InvalidChunk("emit count exceeds row limit") + current_key = None + + @staticmethod + def merge_rows( + chunks: CrossSync._Sync_Impl.Iterable[ReadRowsResponsePB.CellChunk] | None, + ) -> CrossSync._Sync_Impl.Iterable[Row]: + """Merge chunks into rows + + Args: + chunks: the chunk stream to merge + Yields: + Row: the next row in the stream""" + if chunks is None: + return + it = chunks.__iter__() + while True: + try: + c = it.__next__() + except CrossSync._Sync_Impl.StopIteration: + return + row_key = c.row_key + if not row_key: + raise InvalidChunk("first row chunk is missing key") + cells = [] + family: str | None = None + qualifier: bytes | None = None + try: + while True: + if c.reset_row: + raise _ResetRow(c) + k = c.row_key + f = c.family_name.value + q = c.qualifier.value if c.HasField("qualifier") else None + if k and k != row_key: + raise InvalidChunk("unexpected new row key") + if f: + family = f + if q is not None: + qualifier = q + else: + raise InvalidChunk("new family without qualifier") + elif family is None: + raise InvalidChunk("missing family") + elif q is not None: + if family is None: + raise InvalidChunk("new qualifier without family") + qualifier = q + elif qualifier is None: + raise InvalidChunk("missing qualifier") + ts = c.timestamp_micros + labels = c.labels if c.labels else [] + value = c.value + if c.value_size > 0: + buffer = [value] + while c.value_size > 0: + c = it.__next__() + t = c.timestamp_micros + cl = c.labels + k = c.row_key + if ( + c.HasField("family_name") + and c.family_name.value != family + ): + raise InvalidChunk("family changed mid cell") + if ( + c.HasField("qualifier") + and c.qualifier.value != qualifier + ): + raise InvalidChunk("qualifier changed mid cell") + if t and t != ts: + raise InvalidChunk("timestamp changed mid cell") + if cl and cl != labels: + raise InvalidChunk("labels changed mid cell") + if k and k != row_key: + raise InvalidChunk("row key changed mid cell") + if c.reset_row: + raise _ResetRow(c) + buffer.append(c.value) + value = b"".join(buffer) + cells.append( + Cell(value, row_key, family, qualifier, ts, list(labels)) + ) + if c.commit_row: + yield Row(row_key, cells) + break + c = it.__next__() + except _ResetRow as e: + c = e.chunk + if ( + c.row_key + or c.HasField("family_name") + or c.HasField("qualifier") + or c.timestamp_micros + or c.labels + or c.value + ): + raise InvalidChunk("reset row with data") + continue + except CrossSync._Sync_Impl.StopIteration: + raise InvalidChunk("premature end of stream") + + @staticmethod + def _revise_request_rowset(row_set: RowSetPB, last_seen_row_key: bytes) -> RowSetPB: + """Revise the rows in the request to avoid ones we've already processed. + + Args: + row_set: the row set from the request + last_seen_row_key: the last row key encountered + Returns: + RowSetPB: the new rowset after adusting for the last seen key + Raises: + _RowSetComplete: if there are no rows left to process after the revision""" + if row_set is None or (not row_set.row_ranges and (not row_set.row_keys)): + last_seen = last_seen_row_key + return RowSetPB(row_ranges=[RowRangePB(start_key_open=last_seen)]) + adjusted_keys: list[bytes] = [ + k for k in row_set.row_keys if k > last_seen_row_key + ] + adjusted_ranges: list[RowRangePB] = [] + for row_range in row_set.row_ranges: + end_key = row_range.end_key_closed or row_range.end_key_open or None + if end_key is None or end_key > last_seen_row_key: + new_range = RowRangePB(row_range) + start_key = row_range.start_key_closed or row_range.start_key_open + if start_key is None or start_key <= last_seen_row_key: + new_range.start_key_open = last_seen_row_key + adjusted_ranges.append(new_range) + if len(adjusted_keys) == 0 and len(adjusted_ranges) == 0: + raise _RowSetComplete() + return RowSetPB(row_keys=adjusted_keys, row_ranges=adjusted_ranges) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py new file mode 100644 index 000000000000..37e192147311 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -0,0 +1,1234 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import cast, Any, Optional, Set, Sequence, TYPE_CHECKING +import time +import warnings +import random +import os +import concurrent.futures +from functools import partial +from grpc import Channel +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query._parameters_formatting import ( + _format_execute_query_params, +) +from google.cloud.bigtable_v2.services.bigtable.transports.base import ( + DEFAULT_CLIENT_INFO, +) +from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.client import ClientWithProject +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.api_core import retry as retries +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import ServiceUnavailable +from google.api_core.exceptions import Aborted +import google.auth.credentials +import google.auth._default +from google.api_core import client_options as client_options_lib +from google.cloud.bigtable.client import _DEFAULT_BIGTABLE_EMULATOR_CLIENT +from google.cloud.bigtable.data.row import Row +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.cloud.bigtable.data.exceptions import FailedQueryShardError +from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data._helpers import _WarmedInstanceKey +from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT +from google.cloud.bigtable.data._helpers import _retry_exception_factory +from google.cloud.bigtable.data._helpers import _validate_timeouts +from google.cloud.bigtable.data._helpers import _get_error_type +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import _attempt_timeout_generator +from google.cloud.bigtable.data.mutations import Mutation, RowMutationEntry +from google.cloud.bigtable.data.read_modify_write_rules import ReadModifyWriteRule +from google.cloud.bigtable.data.row_filters import RowFilter +from google.cloud.bigtable.data.row_filters import StripValueTransformerFilter +from google.cloud.bigtable.data.row_filters import CellsRowLimitFilter +from google.cloud.bigtable.data.row_filters import RowFilterChain +from google.cloud.bigtable.data._cross_sync import CrossSync +from typing import Iterable +from grpc import insecure_channel +from google.cloud.bigtable_v2.services.bigtable.transports import ( + BigtableGrpcTransport as TransportType, +) +from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE + +if TYPE_CHECKING: + from google.cloud.bigtable.data._helpers import RowKeySamples + from google.cloud.bigtable.data._helpers import ShardedQuery + from google.cloud.bigtable.data._sync_autogen.mutations_batcher import ( + MutationsBatcher, + ) + from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import ( + ExecuteQueryIterator, + ) + + +@CrossSync._Sync_Impl.add_mapping_decorator("DataClient") +class BigtableDataClient(ClientWithProject): + def __init__( + self, + *, + project: str | None = None, + credentials: google.auth.credentials.Credentials | None = None, + client_options: dict[str, Any] + | "google.api_core.client_options.ClientOptions" + | None = None, + **kwargs, + ): + """Create a client instance for the Bigtable Data API + + + + Args: + project: the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + credentials: + Thehe OAuth2 Credentials to use for this + client. If not passed (and if no ``_http`` object is + passed), falls back to the default inferred from the + environment. + client_options: + Client options used to set user options + on the client. API Endpoint should be set through client_options. + Raises: + """ + if "pool_size" in kwargs: + warnings.warn("pool_size no longer supported") + client_info = DEFAULT_CLIENT_INFO + client_info.client_library_version = self._client_version() + if type(client_options) is dict: + client_options = client_options_lib.from_dict(client_options) + client_options = cast( + Optional[client_options_lib.ClientOptions], client_options + ) + custom_channel = None + self._emulator_host = os.getenv(BIGTABLE_EMULATOR) + if self._emulator_host is not None: + warnings.warn( + "Connecting to Bigtable emulator at {}".format(self._emulator_host), + RuntimeWarning, + stacklevel=2, + ) + custom_channel = insecure_channel(self._emulator_host) + if credentials is None: + credentials = google.auth.credentials.AnonymousCredentials() + if project is None: + project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT + ClientWithProject.__init__( + self, + credentials=credentials, + project=project, + client_options=client_options, + ) + self._gapic_client = CrossSync._Sync_Impl.GapicClient( + credentials=credentials, + client_options=client_options, + client_info=client_info, + transport=lambda *args, **kwargs: TransportType( + *args, **kwargs, channel=custom_channel + ), + ) + self._is_closed = CrossSync._Sync_Impl.Event() + self.transport = cast(TransportType, self._gapic_client.transport) + self._active_instances: Set[_WarmedInstanceKey] = set() + self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} + self._channel_init_time = time.monotonic() + self._channel_refresh_task: CrossSync._Sync_Impl.Task[None] | None = None + self._executor = ( + concurrent.futures.ThreadPoolExecutor() + if not CrossSync._Sync_Impl.is_async + else None + ) + if self._emulator_host is None: + try: + self._start_background_channel_refresh() + except RuntimeError: + warnings.warn( + f"{self.__class__.__name__} should be started in an asyncio event loop. Channel refresh will not be started", + RuntimeWarning, + stacklevel=2, + ) + + @staticmethod + def _client_version() -> str: + """Helper function to return the client version string for this client""" + version_str = f"{google.cloud.bigtable.__version__}-data" + return version_str + + def _start_background_channel_refresh(self) -> None: + """Starts a background task to ping and warm grpc channel + + Raises: + None""" + if ( + not self._channel_refresh_task + and (not self._emulator_host) + and (not self._is_closed.is_set()) + ): + CrossSync._Sync_Impl.verify_async_event_loop() + self._channel_refresh_task = CrossSync._Sync_Impl.create_task( + self._manage_channel, + sync_executor=self._executor, + task_name=f"{self.__class__.__name__} channel refresh", + ) + + def close(self, timeout: float | None = 2.0): + """Cancel all background tasks""" + self._is_closed.set() + if self._channel_refresh_task is not None: + self._channel_refresh_task.cancel() + CrossSync._Sync_Impl.wait([self._channel_refresh_task], timeout=timeout) + self.transport.close() + if self._executor: + self._executor.shutdown(wait=False) + self._channel_refresh_task = None + + def _ping_and_warm_instances( + self, + instance_key: _WarmedInstanceKey | None = None, + channel: Channel | None = None, + ) -> list[BaseException | None]: + """Prepares the backend for requests on a channel + + Pings each Bigtable instance registered in `_active_instances` on the client + + Args: + instance_key: if provided, only warm the instance associated with the key + channel: grpc channel to warm. If none, warms `self.transport.grpc_channel` + Returns: + list[BaseException | None]: sequence of results or exceptions from the ping requests + """ + channel = channel or self.transport.grpc_channel + instance_list = ( + [instance_key] if instance_key is not None else self._active_instances + ) + ping_rpc = channel.unary_unary( + "/google.bigtable.v2.Bigtable/PingAndWarm", + request_serializer=PingAndWarmRequest.serialize, + ) + partial_list = [ + partial( + ping_rpc, + request={"name": instance_name, "app_profile_id": app_profile_id}, + metadata=[ + ( + "x-goog-request-params", + f"name={instance_name}&app_profile_id={app_profile_id}", + ) + ], + wait_for_ready=True, + ) + for (instance_name, table_name, app_profile_id) in instance_list + ] + result_list = CrossSync._Sync_Impl.gather_partials( + partial_list, return_exceptions=True, sync_executor=self._executor + ) + return [r or None for r in result_list] + + def _manage_channel( + self, + refresh_interval_min: float = 60 * 35, + refresh_interval_max: float = 60 * 45, + grace_period: float = 60 * 10, + ) -> None: + """Background task that periodically refreshes and warms a grpc channel + + The backend will automatically close channels after 60 minutes, so + `refresh_interval` + `grace_period` should be < 60 minutes + + Runs continuously until the client is closed + + Args: + refresh_interval_min: minimum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + refresh_interval_max: maximum interval before initiating refresh + process in seconds. Actual interval will be a random value + between `refresh_interval_min` and `refresh_interval_max` + grace_period: time to allow previous channel to serve existing + requests before closing, in seconds""" + first_refresh = self._channel_init_time + random.uniform( + refresh_interval_min, refresh_interval_max + ) + next_sleep = max(first_refresh - time.monotonic(), 0) + if next_sleep > 0: + self._ping_and_warm_instances(channel=self.transport.grpc_channel) + while not self._is_closed.is_set(): + CrossSync._Sync_Impl.event_wait( + self._is_closed, next_sleep, async_break_early=False + ) + if self._is_closed.is_set(): + break + start_timestamp = time.monotonic() + old_channel = self.transport.grpc_channel + new_channel = self.transport.create_channel() + self._ping_and_warm_instances(channel=new_channel) + self.transport._grpc_channel = new_channel + if grace_period: + self._is_closed.wait(grace_period) + old_channel.close() + next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) + next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0) + + def _register_instance( + self, instance_id: str, owner: Table | ExecuteQueryIterator + ) -> None: + """Registers an instance with the client, and warms the channel for the instance + The client will periodically refresh grpc channel used to make + requests, and new channels will be warmed for each registered instance + Channels will not be refreshed unless at least one instance is registered + + Args: + instance_id: id of the instance to register. + owner: table that owns the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration""" + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey( + instance_name, owner.table_name, owner.app_profile_id + ) + self._instance_owners.setdefault(instance_key, set()).add(id(owner)) + if instance_key not in self._active_instances: + self._active_instances.add(instance_key) + if self._channel_refresh_task: + self._ping_and_warm_instances(instance_key) + else: + self._start_background_channel_refresh() + + def _remove_instance_registration( + self, instance_id: str, owner: Table | "ExecuteQueryIterator" + ) -> bool: + """Removes an instance from the client's registered instances, to prevent + warming new channels for the instance + + If instance_id is not registered, or is still in use by other tables, returns False + + Args: + instance_id: id of the instance to remove + owner: table that owns the instance. Owners will be tracked in + _instance_owners, and instances will only be unregistered when all + owners call _remove_instance_registration + Returns: + bool: True if instance was removed, else False""" + instance_name = self._gapic_client.instance_path(self.project, instance_id) + instance_key = _WarmedInstanceKey( + instance_name, owner.table_name, owner.app_profile_id + ) + owner_list = self._instance_owners.get(instance_key, set()) + try: + owner_list.remove(id(owner)) + if len(owner_list) == 0: + self._active_instances.remove(instance_key) + return True + except KeyError: + return False + + def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> Table: + """Returns a table instance for making data API requests. All arguments are passed + directly to the Table constructor. + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Returns: + Table: a table instance for making data API requests + Raises: + None""" + return Table(self, instance_id, table_id, *args, **kwargs) + + def execute_query( + self, + query: str, + instance_id: str, + *, + parameters: dict[str, ExecuteQueryValueType] | None = None, + parameter_types: dict[str, SqlType.Type] | None = None, + app_profile_id: str | None = None, + operation_timeout: float = 600, + attempt_timeout: float | None = 20, + retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + ), + ) -> "ExecuteQueryIterator": + """Executes an SQL query on an instance. + Returns an iterator to asynchronously stream back columns from selected rows. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: Query to be run on Bigtable instance. The query can use ``@param`` + placeholders to use parameter interpolation on the server. Values for all + parameters should be provided in ``parameters``. Types of parameters are + inferred but should be provided in ``parameter_types`` if the inference is + not possible (i.e. when value can be None, an empty list or an empty dict). + instance_id: The Bigtable instance ID to perform the query on. + instance_id is combined with the client's project to fully + specify the instance. + parameters: Dictionary with values for all parameters used in the ``query``. + parameter_types: Dictionary with types of parameters used in the ``query``. + Required to contain entries only for parameters whose type cannot be + detected automatically (i.e. the value can be None, an empty list or + an empty dict). + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 600 seconds. + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + Returns: + ExecuteQueryIterator: an asynchronous iterator that yields rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + warnings.warn( + "ExecuteQuery is in preview and may change in the future.", + category=RuntimeWarning, + ) + retryable_excs = [_get_error_type(e) for e in retryable_errors] + pb_params = _format_execute_query_params(parameters, parameter_types) + instance_name = self._gapic_client.instance_path(self.project, instance_id) + request_body = { + "instance_name": instance_name, + "app_profile_id": app_profile_id, + "query": query, + "params": pb_params, + "proto_format": {}, + } + return CrossSync._Sync_Impl.ExecuteQueryIterator( + self, + instance_id, + app_profile_id, + request_body, + attempt_timeout, + operation_timeout, + retryable_excs=retryable_excs, + ) + + def __enter__(self): + self._start_background_channel_refresh() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + self._gapic_client.__exit__(exc_type, exc_val, exc_tb) + + +@CrossSync._Sync_Impl.add_mapping_decorator("Table") +class Table: + """ + Main Data API surface + + Table object maintains table_id, and app_profile_id context, and passes them with + each call + """ + + def __init__( + self, + client: BigtableDataClient, + instance_id: str, + table_id: str, + app_profile_id: str | None = None, + *, + default_read_rows_operation_timeout: float = 600, + default_read_rows_attempt_timeout: float | None = 20, + default_mutate_rows_operation_timeout: float = 600, + default_mutate_rows_attempt_timeout: float | None = 60, + default_operation_timeout: float = 60, + default_attempt_timeout: float | None = 20, + default_read_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + Aborted, + ), + default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + default_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), + ): + """Initialize a Table instance + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + None""" + _validate_timeouts( + default_operation_timeout, default_attempt_timeout, allow_none=True + ) + _validate_timeouts( + default_read_rows_operation_timeout, + default_read_rows_attempt_timeout, + allow_none=True, + ) + _validate_timeouts( + default_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout, + allow_none=True, + ) + self.client = client + self.instance_id = instance_id + self.instance_name = self.client._gapic_client.instance_path( + self.client.project, instance_id + ) + self.table_id = table_id + self.table_name = self.client._gapic_client.table_path( + self.client.project, instance_id, table_id + ) + self.app_profile_id = app_profile_id + self.default_operation_timeout = default_operation_timeout + self.default_attempt_timeout = default_attempt_timeout + self.default_read_rows_operation_timeout = default_read_rows_operation_timeout + self.default_read_rows_attempt_timeout = default_read_rows_attempt_timeout + self.default_mutate_rows_operation_timeout = ( + default_mutate_rows_operation_timeout + ) + self.default_mutate_rows_attempt_timeout = default_mutate_rows_attempt_timeout + self.default_read_rows_retryable_errors = ( + default_read_rows_retryable_errors or () + ) + self.default_mutate_rows_retryable_errors = ( + default_mutate_rows_retryable_errors or () + ) + self.default_retryable_errors = default_retryable_errors or () + try: + self._register_instance_future = CrossSync._Sync_Impl.create_task( + self.client._register_instance, + self.instance_id, + self, + sync_executor=self.client._executor, + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + def read_rows_stream( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> Iterable[Row]: + """Read a set of rows from the table, based on the specified query. + Returns an iterator to asynchronously stream back row data. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors + Returns: + Iterable[Row]: an asynchronous iterator that yields rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + row_merger = CrossSync._Sync_Impl._ReadRowsOperation( + query, + self, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_exceptions=retryable_excs, + ) + return row_merger.start_operation() + + def read_rows( + self, + query: ReadRowsQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """Read a set of rows from the table, based on the specified query. + Retruns results as a list of Row objects when the request is complete. + For streamed results, use read_rows_stream. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + If None, defaults to the Table's default_read_rows_attempt_timeout, + or the operation_timeout if that is also None. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + list[Row]: a list of Rows returned by the query + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + row_generator = self.read_rows_stream( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return [row for row in row_generator] + + def read_row( + self, + row_key: str | bytes, + *, + row_filter: RowFilter | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> Row | None: + """Read a single row from the table, based on the specified key. + + Failed requests within operation_timeout will be retried based on the + retryable_errors list until operation_timeout is reached. + + Args: + query: contains details about which rows to return + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + Row | None: a Row object if the row exists, otherwise None + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + query = ReadRowsQuery(row_keys=row_key, row_filter=row_filter, limit=1) + results = self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + if len(results) == 0: + return None + return results[0] + + def read_rows_sharded( + self, + sharded_query: ShardedQuery, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> list[Row]: + """Runs a sharded query in parallel, then return the results in a single list. + Results will be returned in the order of the input queries. + + This function is intended to be run on the results on a query.shard() call. + For example:: + + table_shard_keys = await table.sample_row_keys() + query = ReadRowsQuery(...) + shard_queries = query.shard(table_shard_keys) + results = await table.read_rows_sharded(shard_queries) + + Args: + sharded_query: a sharded query to execute + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + list[Row]: a list of Rows returned by the query + Raises: + ShardedReadRowsExceptionGroup: if any of the queries failed + ValueError: if the query_list is empty""" + if not sharded_query: + raise ValueError("empty sharded_query") + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + rpc_timeout_generator = _attempt_timeout_generator( + operation_timeout, operation_timeout + ) + concurrency_sem = CrossSync._Sync_Impl.Semaphore(_CONCURRENCY_LIMIT) + + def read_rows_with_semaphore(query): + with concurrency_sem: + shard_timeout = next(rpc_timeout_generator) + if shard_timeout <= 0: + raise DeadlineExceeded( + "Operation timeout exceeded before starting query" + ) + return self.read_rows( + query, + operation_timeout=shard_timeout, + attempt_timeout=min(attempt_timeout, shard_timeout), + retryable_errors=retryable_errors, + ) + + routine_list = [ + partial(read_rows_with_semaphore, query) for query in sharded_query + ] + batch_result = CrossSync._Sync_Impl.gather_partials( + routine_list, return_exceptions=True, sync_executor=self.client._executor + ) + error_dict = {} + shard_idx = 0 + results_list = [] + for result in batch_result: + if isinstance(result, Exception): + error_dict[shard_idx] = result + elif isinstance(result, BaseException): + raise result + else: + results_list.extend(result) + shard_idx += 1 + if error_dict: + raise ShardedReadRowsExceptionGroup( + [ + FailedQueryShardError(idx, sharded_query[idx], e) + for (idx, e) in error_dict.items() + ], + results_list, + len(sharded_query), + ) + return results_list + + def row_exists( + self, + row_key: str | bytes, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.READ_ROWS, + ) -> bool: + """Return a boolean indicating whether the specified row exists in the table. + uses the filters: chain(limit cells per row = 1, strip value) + + Args: + row_key: the key of the row to check + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_read_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_read_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_read_rows_retryable_errors. + Returns: + bool: a bool indicating whether the row exists + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + if row_key is None: + raise ValueError("row_key must be string or bytes") + strip_filter = StripValueTransformerFilter(flag=True) + limit_filter = CellsRowLimitFilter(1) + chain_filter = RowFilterChain(filters=[limit_filter, strip_filter]) + query = ReadRowsQuery(row_keys=row_key, limit=1, row_filter=chain_filter) + results = self.read_rows( + query, + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + retryable_errors=retryable_errors, + ) + return len(results) > 0 + + def sample_row_keys( + self, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> RowKeySamples: + """Return a set of RowKeySamples that delimit contiguous sections of the table of + approximately equal size + + RowKeySamples output can be used with ReadRowsQuery.shard() to create a sharded query that + can be parallelized across multiple backend nodes read_rows and read_rows_stream + requests will call sample_row_keys internally for this purpose when sharding is enabled + + RowKeySamples is simply a type alias for list[tuple[bytes, int]]; a list of + row_keys, along with offset positions in the table + + Args: + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget.i + Defaults to the Table's default_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_retryable_errors. + Returns: + RowKeySamples: a set of RowKeySamples the delimit contiguous sections of the table + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions + from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + """ + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + predicate = retries.if_exception_type(*retryable_excs) + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + def execute_rpc(): + results = self.client._gapic_client.sample_row_keys( + table_name=self.table_name, + app_profile_id=self.app_profile_id, + timeout=next(attempt_timeout_gen), + retry=None, + ) + return [(s.row_key, s.offset_bytes) for s in results] + + return CrossSync._Sync_Impl.retry_target( + execute_rpc, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def mutations_batcher( + self, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ) -> "MutationsBatcher": + """Returns a new mutations batcher instance. + + Can be used to iteratively add mutations that are flushed as a group, + to avoid excess network calls + + Args: + flush_interval: Automatically flush every flush_interval seconds. If None, + a table default will be used + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + Defaults to the Table's default_mutate_rows_operation_timeout + batch_attempt_timeout: timeout for each individual request, in seconds. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + Returns: + MutationsBatcher: a MutationsBatcher context manager that can batch requests + """ + return CrossSync._Sync_Impl.MutationsBatcher( + self, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_mutation_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=batch_operation_timeout, + batch_attempt_timeout=batch_attempt_timeout, + batch_retryable_errors=batch_retryable_errors, + ) + + def mutate_row( + self, + row_key: str | bytes, + mutations: list[Mutation] | Mutation, + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ): + """Mutates a row atomically. + + Cells already present in the row are left unchanged unless explicitly changed + by ``mutation``. + + Idempotent operations (i.e, all mutations have an explicit timestamp) will be + retried on server failure. Non-idempotent operations will not. + + Args: + row_key: the row to apply mutations to + mutations: the set of mutations to apply to the row + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Only idempotent mutations will be retried. Defaults to the Table's + default_retryable_errors. + Raises: + google.api_core.exceptions.DeadlineExceeded: raised after operation timeout + will be chained with a RetryExceptionGroup containing all + GoogleAPIError exceptions from any retries that failed + google.api_core.exceptions.GoogleAPIError: raised on non-idempotent operations that cannot be + safely retried. + ValueError: if invalid arguments are provided""" + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + if not mutations: + raise ValueError("No mutations provided") + mutations_list = mutations if isinstance(mutations, list) else [mutations] + if all((mutation.is_idempotent() for mutation in mutations_list)): + predicate = retries.if_exception_type( + *_get_retryable_errors(retryable_errors, self) + ) + else: + predicate = retries.if_exception_type() + sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + target = partial( + self.client._gapic_client.mutate_row, + row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, + mutations=[mutation._to_pb() for mutation in mutations_list], + table_name=self.table_name, + app_profile_id=self.app_profile_id, + timeout=attempt_timeout, + retry=None, + ) + return CrossSync._Sync_Impl.retry_target( + target, + predicate, + sleep_generator, + operation_timeout, + exception_factory=_retry_exception_factory, + ) + + def bulk_mutate_rows( + self, + mutation_entries: list[RowMutationEntry], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + """Applies mutations for multiple rows in a single batched request. + + Each individual RowMutationEntry is applied atomically, but separate entries + may be applied in arbitrary order (even for entries targetting the same row) + In total, the row_mutations can contain at most 100000 individual mutations + across all entries + + Idempotent entries (i.e., entries with mutations with explicit timestamps) + will be retried on failure. Non-idempotent will not, and will reported in a + raised exception group + + Args: + mutation_entries: the batches of mutations to apply + Each entry will be applied atomically, but entries will be applied + in arbitrary order + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget. + Defaults to the Table's default_mutate_rows_operation_timeout + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to operation_timeout. + retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors + Raises: + MutationsExceptionGroup: if one or more mutations fails + Contains details about any failed entries in .exceptions + ValueError: if invalid arguments are provided""" + (operation_timeout, attempt_timeout) = _get_timeouts( + operation_timeout, attempt_timeout, self + ) + retryable_excs = _get_retryable_errors(retryable_errors, self) + operation = CrossSync._Sync_Impl._MutateRowsOperation( + self.client._gapic_client, + self, + mutation_entries, + operation_timeout, + attempt_timeout, + retryable_exceptions=retryable_excs, + ) + operation.start() + + def check_and_mutate_row( + self, + row_key: str | bytes, + predicate: RowFilter | None, + *, + true_case_mutations: Mutation | list[Mutation] | None = None, + false_case_mutations: Mutation | list[Mutation] | None = None, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> bool: + """Mutates a row atomically based on the output of a predicate filter + + Non-idempotent operation: will not be retried + + Args: + row_key: the key of the row to mutate + predicate: the filter to be applied to the contents of the specified row. + Depending on whether or not any results are yielded, + either true_case_mutations or false_case_mutations will be executed. + If None, checks that the row contains any values at all. + true_case_mutations: + Changes to be atomically applied to the specified row if + predicate yields at least one cell when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + false_case_mutations is empty, and at most 100000. + false_case_mutations: + Changes to be atomically applied to the specified row if + predicate_filter does not yield any cells when + applied to row_key. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + `true_case_mutations` is empty, and at most 100000. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. Defaults to the Table's default_operation_timeout + Returns: + bool indicating whether the predicate was true or false + Raises: + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call""" + (operation_timeout, _) = _get_timeouts(operation_timeout, None, self) + if true_case_mutations is not None and ( + not isinstance(true_case_mutations, list) + ): + true_case_mutations = [true_case_mutations] + true_case_list = [m._to_pb() for m in true_case_mutations or []] + if false_case_mutations is not None and ( + not isinstance(false_case_mutations, list) + ): + false_case_mutations = [false_case_mutations] + false_case_list = [m._to_pb() for m in false_case_mutations or []] + result = self.client._gapic_client.check_and_mutate_row( + true_mutations=true_case_list, + false_mutations=false_case_list, + predicate_filter=predicate._to_pb() if predicate is not None else None, + row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, + table_name=self.table_name, + app_profile_id=self.app_profile_id, + timeout=operation_timeout, + retry=None, + ) + return result.predicate_matched + + def read_modify_write_row( + self, + row_key: str | bytes, + rules: ReadModifyWriteRule | list[ReadModifyWriteRule], + *, + operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.DEFAULT, + ) -> Row: + """Reads and modifies a row atomically according to input ReadModifyWriteRules, + and returns the contents of all modified cells + + The new value for the timestamp is the greater of the existing timestamp or + the current server time. + + Non-idempotent operation: will not be retried + + Args: + row_key: the key of the row to apply read/modify/write rules to + rules: A rule or set of rules to apply to the row. + Rules are applied in order, meaning that earlier rules will affect the + results of later ones. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will not be retried. + Defaults to the Table's default_operation_timeout. + Returns: + Row: a Row containing cell data that was modified as part of the operation + Raises: + google.api_core.exceptions.GoogleAPIError: exceptions from grpc call + ValueError: if invalid arguments are provided""" + (operation_timeout, _) = _get_timeouts(operation_timeout, None, self) + if operation_timeout <= 0: + raise ValueError("operation_timeout must be greater than 0") + if rules is not None and (not isinstance(rules, list)): + rules = [rules] + if not rules: + raise ValueError("rules must contain at least one item") + result = self.client._gapic_client.read_modify_write_row( + rules=[rule._to_pb() for rule in rules], + row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, + table_name=self.table_name, + app_profile_id=self.app_profile_id, + timeout=operation_timeout, + retry=None, + ) + return Row._from_pb(result.row) + + def close(self): + """Called to close the Table instance and release any resources held by it.""" + if self._register_instance_future: + self._register_instance_future.cancel() + self.client._remove_instance_registration(self.instance_id, self) + + def __enter__(self): + """Implement async context manager protocol + + Ensure registration task has time to run, so that + grpc channels will be warmed for the specified instance""" + if self._register_instance_future: + self._register_instance_future + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Implement async context manager protocol + + Unregister this instance with the client, so that + grpc channels will no longer be warmed""" + self.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py new file mode 100644 index 000000000000..2e4237b741a4 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py @@ -0,0 +1,449 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Sequence, TYPE_CHECKING, cast +import atexit +import warnings +from collections import deque +import concurrent.futures +from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup +from google.cloud.bigtable.data.exceptions import FailedMutationEntryError +from google.cloud.bigtable.data._helpers import _get_retryable_errors +from google.cloud.bigtable.data._helpers import _get_timeouts +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data.mutations import _MUTATE_ROWS_REQUEST_MUTATION_LIMIT +from google.cloud.bigtable.data.mutations import Mutation +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data.mutations import RowMutationEntry + from google.cloud.bigtable.data._sync_autogen.client import Table as TableType +_MB_SIZE = 1024 * 1024 + + +@CrossSync._Sync_Impl.add_mapping_decorator("_FlowControl") +class _FlowControl: + """ + Manages flow control for batched mutations. Mutations are registered against + the FlowControl object before being sent, which will block if size or count + limits have reached capacity. As mutations completed, they are removed from + the FlowControl object, which will notify any blocked requests that there + is additional capacity. + + Flow limits are not hard limits. If a single mutation exceeds the configured + limits, it will be allowed as a single batch when the capacity is available. + + Args: + max_mutation_count: maximum number of mutations to send in a single rpc. + This corresponds to individual mutations in a single RowMutationEntry. + max_mutation_bytes: maximum number of bytes to send in a single rpc. + Raises: + ValueError: if max_mutation_count or max_mutation_bytes is less than 0 + """ + + def __init__(self, max_mutation_count: int, max_mutation_bytes: int): + self._max_mutation_count = max_mutation_count + self._max_mutation_bytes = max_mutation_bytes + if self._max_mutation_count < 1: + raise ValueError("max_mutation_count must be greater than 0") + if self._max_mutation_bytes < 1: + raise ValueError("max_mutation_bytes must be greater than 0") + self._capacity_condition = CrossSync._Sync_Impl.Condition() + self._in_flight_mutation_count = 0 + self._in_flight_mutation_bytes = 0 + + def _has_capacity(self, additional_count: int, additional_size: int) -> bool: + """Checks if there is capacity to send a new entry with the given size and count + + FlowControl limits are not hard limits. If a single mutation exceeds + the configured flow limits, it will be sent in a single batch when + previous batches have completed. + + Args: + additional_count: number of mutations in the pending entry + additional_size: size of the pending entry + Returns: + bool: True if there is capacity to send the pending entry, False otherwise + """ + acceptable_size = max(self._max_mutation_bytes, additional_size) + acceptable_count = max(self._max_mutation_count, additional_count) + new_size = self._in_flight_mutation_bytes + additional_size + new_count = self._in_flight_mutation_count + additional_count + return new_size <= acceptable_size and new_count <= acceptable_count + + def remove_from_flow( + self, mutations: RowMutationEntry | list[RowMutationEntry] + ) -> None: + """Removes mutations from flow control. This method should be called once + for each mutation that was sent to add_to_flow, after the corresponding + operation is complete. + + Args: + mutations: mutation or list of mutations to remove from flow control""" + if not isinstance(mutations, list): + mutations = [mutations] + total_count = sum((len(entry.mutations) for entry in mutations)) + total_size = sum((entry.size() for entry in mutations)) + self._in_flight_mutation_count -= total_count + self._in_flight_mutation_bytes -= total_size + with self._capacity_condition: + self._capacity_condition.notify_all() + + def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]): + """Generator function that registers mutations with flow control. As mutations + are accepted into the flow control, they are yielded back to the caller, + to be sent in a batch. If the flow control is at capacity, the generator + will block until there is capacity available. + + Args: + mutations: list mutations to break up into batches + Yields: + list[RowMutationEntry]: + list of mutations that have reserved space in the flow control. + Each batch contains at least one mutation.""" + if not isinstance(mutations, list): + mutations = [mutations] + start_idx = 0 + end_idx = 0 + while end_idx < len(mutations): + start_idx = end_idx + batch_mutation_count = 0 + with self._capacity_condition: + while end_idx < len(mutations): + next_entry = mutations[end_idx] + next_size = next_entry.size() + next_count = len(next_entry.mutations) + if ( + self._has_capacity(next_count, next_size) + and batch_mutation_count + next_count + <= _MUTATE_ROWS_REQUEST_MUTATION_LIMIT + ): + end_idx += 1 + batch_mutation_count += next_count + self._in_flight_mutation_bytes += next_size + self._in_flight_mutation_count += next_count + elif start_idx != end_idx: + break + else: + self._capacity_condition.wait_for( + lambda: self._has_capacity(next_count, next_size) + ) + yield mutations[start_idx:end_idx] + + +class MutationsBatcher: + """ + Allows users to send batches using context manager API: + + Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining + to use as few network requests as required + + Will automatically flush the batcher: + - every flush_interval seconds + - after queue size reaches flush_limit_mutation_count + - after queue reaches flush_limit_bytes + - when batcher is closed or destroyed + + Args: + table: Table to preform rpc calls + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed. + flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count + mutations are added across all entries. If None, this limit is ignored. + flush_limit_bytes: Flush immediately after flush_limit_bytes bytes are added. + flow_control_max_mutation_count: Maximum number of inflight mutations. + flow_control_max_bytes: Maximum number of inflight bytes. + batch_operation_timeout: timeout for each mutate_rows operation, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_operation_timeout. + batch_attempt_timeout: timeout for each individual request, in seconds. + If TABLE_DEFAULT, defaults to the Table's default_mutate_rows_attempt_timeout. + If None, defaults to batch_operation_timeout. + batch_retryable_errors: a list of errors that will be retried if encountered. + Defaults to the Table's default_mutate_rows_retryable_errors. + """ + + def __init__( + self, + table: TableType, + *, + flush_interval: float | None = 5, + flush_limit_mutation_count: int | None = 1000, + flush_limit_bytes: int = 20 * _MB_SIZE, + flow_control_max_mutation_count: int = 100000, + flow_control_max_bytes: int = 100 * _MB_SIZE, + batch_operation_timeout: float | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_attempt_timeout: float | None | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + batch_retryable_errors: Sequence[type[Exception]] + | TABLE_DEFAULT = TABLE_DEFAULT.MUTATE_ROWS, + ): + (self._operation_timeout, self._attempt_timeout) = _get_timeouts( + batch_operation_timeout, batch_attempt_timeout, table + ) + self._retryable_errors: list[type[Exception]] = _get_retryable_errors( + batch_retryable_errors, table + ) + self._closed = CrossSync._Sync_Impl.Event() + self._table = table + self._staged_entries: list[RowMutationEntry] = [] + (self._staged_count, self._staged_bytes) = (0, 0) + self._flow_control = CrossSync._Sync_Impl._FlowControl( + flow_control_max_mutation_count, flow_control_max_bytes + ) + self._flush_limit_bytes = flush_limit_bytes + self._flush_limit_count = ( + flush_limit_mutation_count + if flush_limit_mutation_count is not None + else float("inf") + ) + self._sync_rpc_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=8) + if not CrossSync._Sync_Impl.is_async + else None + ) + self._sync_flush_executor = ( + concurrent.futures.ThreadPoolExecutor(max_workers=4) + if not CrossSync._Sync_Impl.is_async + else None + ) + self._flush_timer = CrossSync._Sync_Impl.create_task( + self._timer_routine, flush_interval, sync_executor=self._sync_flush_executor + ) + self._flush_jobs: set[CrossSync._Sync_Impl.Future[None]] = set() + self._entries_processed_since_last_raise: int = 0 + self._exceptions_since_last_raise: int = 0 + self._exception_list_limit: int = 10 + self._oldest_exceptions: list[Exception] = [] + self._newest_exceptions: deque[Exception] = deque( + maxlen=self._exception_list_limit + ) + atexit.register(self._on_exit) + + def _timer_routine(self, interval: float | None) -> None: + """Set up a background task to flush the batcher every interval seconds + + If interval is None, an empty future is returned + + Args: + flush_interval: Automatically flush every flush_interval seconds. + If None, no time-based flushing is performed.""" + if not interval or interval <= 0: + return None + while not self._closed.is_set(): + CrossSync._Sync_Impl.event_wait( + self._closed, timeout=interval, async_break_early=False + ) + if not self._closed.is_set() and self._staged_entries: + self._schedule_flush() + + def append(self, mutation_entry: RowMutationEntry): + """Add a new set of mutations to the internal queue + + Args: + mutation_entry: new entry to add to flush queue + Raises: + RuntimeError: if batcher is closed + ValueError: if an invalid mutation type is added""" + if self._closed.is_set(): + raise RuntimeError("Cannot append to closed MutationsBatcher") + if isinstance(cast(Mutation, mutation_entry), Mutation): + raise ValueError( + f"invalid mutation type: {type(mutation_entry).__name__}. Only RowMutationEntry objects are supported by batcher" + ) + self._staged_entries.append(mutation_entry) + self._staged_count += len(mutation_entry.mutations) + self._staged_bytes += mutation_entry.size() + if ( + self._staged_count >= self._flush_limit_count + or self._staged_bytes >= self._flush_limit_bytes + ): + self._schedule_flush() + CrossSync._Sync_Impl.yield_to_event_loop() + + def _schedule_flush(self) -> CrossSync._Sync_Impl.Future[None] | None: + """Update the flush task to include the latest staged entries + + Returns: + Future[None] | None: + future representing the background task, if started""" + if self._staged_entries: + (entries, self._staged_entries) = (self._staged_entries, []) + (self._staged_count, self._staged_bytes) = (0, 0) + new_task = CrossSync._Sync_Impl.create_task( + self._flush_internal, entries, sync_executor=self._sync_flush_executor + ) + if not new_task.done(): + self._flush_jobs.add(new_task) + new_task.add_done_callback(self._flush_jobs.remove) + return new_task + return None + + def _flush_internal(self, new_entries: list[RowMutationEntry]): + """Flushes a set of mutations to the server, and updates internal state + + Args: + new_entries list of RowMutationEntry objects to flush""" + in_process_requests: list[ + CrossSync._Sync_Impl.Future[list[FailedMutationEntryError]] + ] = [] + for batch in self._flow_control.add_to_flow(new_entries): + batch_task = CrossSync._Sync_Impl.create_task( + self._execute_mutate_rows, batch, sync_executor=self._sync_rpc_executor + ) + in_process_requests.append(batch_task) + found_exceptions = self._wait_for_batch_results(*in_process_requests) + self._entries_processed_since_last_raise += len(new_entries) + self._add_exceptions(found_exceptions) + + def _execute_mutate_rows( + self, batch: list[RowMutationEntry] + ) -> list[FailedMutationEntryError]: + """Helper to execute mutation operation on a batch + + Args: + batch: list of RowMutationEntry objects to send to server + timeout: timeout in seconds. Used as operation_timeout and attempt_timeout. + If not given, will use table defaults + Returns: + list[FailedMutationEntryError]: + list of FailedMutationEntryError objects for mutations that failed. + FailedMutationEntryError objects will not contain index information""" + try: + operation = CrossSync._Sync_Impl._MutateRowsOperation( + self._table.client._gapic_client, + self._table, + batch, + operation_timeout=self._operation_timeout, + attempt_timeout=self._attempt_timeout, + retryable_exceptions=self._retryable_errors, + ) + operation.start() + except MutationsExceptionGroup as e: + for subexc in e.exceptions: + subexc.index = None + return list(e.exceptions) + finally: + self._flow_control.remove_from_flow(batch) + return [] + + def _add_exceptions(self, excs: list[Exception]): + """Add new list of exceptions to internal store. To avoid unbounded memory, + the batcher will store the first and last _exception_list_limit exceptions, + and discard any in between. + + Args: + excs: list of exceptions to add to the internal store""" + self._exceptions_since_last_raise += len(excs) + if excs and len(self._oldest_exceptions) < self._exception_list_limit: + addition_count = self._exception_list_limit - len(self._oldest_exceptions) + self._oldest_exceptions.extend(excs[:addition_count]) + excs = excs[addition_count:] + if excs: + self._newest_exceptions.extend(excs[-self._exception_list_limit :]) + + def _raise_exceptions(self): + """Raise any unreported exceptions from background flush operations + + Raises: + MutationsExceptionGroup: exception group with all unreported exceptions""" + if self._oldest_exceptions or self._newest_exceptions: + (oldest, self._oldest_exceptions) = (self._oldest_exceptions, []) + newest = list(self._newest_exceptions) + self._newest_exceptions.clear() + (entry_count, self._entries_processed_since_last_raise) = ( + self._entries_processed_since_last_raise, + 0, + ) + (exc_count, self._exceptions_since_last_raise) = ( + self._exceptions_since_last_raise, + 0, + ) + raise MutationsExceptionGroup.from_truncated_lists( + first_list=oldest, + last_list=newest, + total_excs=exc_count, + entry_count=entry_count, + ) + + def __enter__(self): + """Allow use of context manager API""" + return self + + def __exit__(self, exc_type, exc, tb): + """Allow use of context manager API. + + Flushes the batcher and cleans up resources.""" + self.close() + + @property + def closed(self) -> bool: + """Returns: + - True if the batcher is closed, False otherwise""" + return self._closed.is_set() + + def close(self): + """Flush queue and clean up resources""" + self._closed.set() + self._flush_timer.cancel() + self._schedule_flush() + if self._sync_flush_executor: + with self._sync_flush_executor: + self._sync_flush_executor.shutdown(wait=True) + if self._sync_rpc_executor: + with self._sync_rpc_executor: + self._sync_rpc_executor.shutdown(wait=True) + CrossSync._Sync_Impl.wait([*self._flush_jobs, self._flush_timer]) + atexit.unregister(self._on_exit) + self._raise_exceptions() + + def _on_exit(self): + """Called when program is exited. Raises warning if unflushed mutations remain""" + if not self._closed.is_set() and self._staged_entries: + warnings.warn( + f"MutationsBatcher for table {self._table.table_name} was not closed. {len(self._staged_entries)} Unflushed mutations will not be sent to the server." + ) + + @staticmethod + def _wait_for_batch_results( + *tasks: CrossSync._Sync_Impl.Future[list[FailedMutationEntryError]] + | CrossSync._Sync_Impl.Future[None], + ) -> list[Exception]: + """Takes in a list of futures representing _execute_mutate_rows tasks, + waits for them to complete, and returns a list of errors encountered. + + Args: + *tasks: futures representing _execute_mutate_rows or _flush_internal tasks + Returns: + list[Exception]: + list of Exceptions encountered by any of the tasks. Errors are expected + to be FailedMutationEntryError, representing a failed mutation operation. + If a task fails with a different exception, it will be included in the + output list. Successful tasks will not be represented in the output list. + """ + if not tasks: + return [] + exceptions: list[Exception] = [] + for task in tasks: + try: + exc_list = task.result() + if exc_list: + for exc in exc_list: + exc.index = None + exceptions.extend(exc_list) + except Exception as e: + exceptions.append(e) + return exceptions diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py index 0ff258365fdb..31fd5e3cca14 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py @@ -15,6 +15,9 @@ from google.cloud.bigtable.data.execute_query._async.execute_query_iterator import ( ExecuteQueryIteratorAsync, ) +from google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator import ( + ExecuteQueryIterator, +) from google.cloud.bigtable.data.execute_query.metadata import ( Metadata, ProtoMetadata, @@ -28,6 +31,7 @@ from google.cloud.bigtable.data._cross_sync import CrossSync CrossSync.add_mapping("ExecuteQueryIterator", ExecuteQueryIteratorAsync) +CrossSync._Sync_Impl.add_mapping("ExecuteQueryIterator", ExecuteQueryIterator) __all__ = [ "ExecuteQueryValueType", @@ -37,4 +41,5 @@ "Metadata", "ProtoMetadata", "ExecuteQueryIteratorAsync", + "ExecuteQueryIterator", ] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index ba82bbccaf53..66f264610247 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -46,6 +46,8 @@ if TYPE_CHECKING: if CrossSync.is_async: from google.cloud.bigtable.data import BigtableDataClientAsync as DataClientType + else: + from google.cloud.bigtable.data import BigtableDataClient as DataClientType __CROSS_SYNC_OUTPUT__ = ( "google.cloud.bigtable.data.execute_query._sync_autogen.execute_query_iterator" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py new file mode 100644 index 000000000000..854148ff3567 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py @@ -0,0 +1,186 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Any, Dict, Optional, Sequence, Tuple, TYPE_CHECKING +from google.api_core import retry as retries +from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor +from google.cloud.bigtable.data._helpers import ( + _attempt_timeout_generator, + _retry_exception_factory, +) +from google.cloud.bigtable.data.exceptions import InvalidExecuteQueryResponse +from google.cloud.bigtable.data.execute_query.values import QueryResultRow +from google.cloud.bigtable.data.execute_query.metadata import Metadata, ProtoMetadata +from google.cloud.bigtable.data.execute_query._reader import ( + _QueryResultRowReader, + _Reader, +) +from google.cloud.bigtable_v2.types.bigtable import ( + ExecuteQueryRequest as ExecuteQueryRequestPB, +) +from google.cloud.bigtable.data._cross_sync import CrossSync + +if TYPE_CHECKING: + from google.cloud.bigtable.data import BigtableDataClient as DataClientType + + +class ExecuteQueryIterator: + def __init__( + self, + client: DataClientType, + instance_id: str, + app_profile_id: Optional[str], + request_body: Dict[str, Any], + attempt_timeout: float | None, + operation_timeout: float, + req_metadata: Sequence[Tuple[str, str]] = (), + retryable_excs: Sequence[type[Exception]] = (), + ) -> None: + """Collects responses from ExecuteQuery requests and parses them into QueryResultRows. + + It is **not thread-safe**. It should not be used by multiple threads. + + Args: + client: bigtable client + instance_id: id of the instance on which the query is executed + request_body: dict representing the body of the ExecuteQueryRequest + attempt_timeout: the time budget for an individual network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + operation_timeout: the time budget for the entire operation, in seconds. + Failed requests will be retried within the budget + req_metadata: metadata used while sending the gRPC request + retryable_excs: a list of errors that will be retried if encountered. + Raises: + None""" + self._table_name = None + self._app_profile_id = app_profile_id + self._client = client + self._instance_id = instance_id + self._byte_cursor = _ByteCursor[ProtoMetadata]() + self._reader: _Reader[QueryResultRow] = _QueryResultRowReader(self._byte_cursor) + self._result_generator = self._next_impl() + self._register_instance_task = None + self._is_closed = False + self._request_body = request_body + self._attempt_timeout_gen = _attempt_timeout_generator( + attempt_timeout, operation_timeout + ) + self._stream = CrossSync._Sync_Impl.retry_target_stream( + self._make_request_with_resume_token, + retries.if_exception_type(*retryable_excs), + retries.exponential_sleep_generator(0.01, 60, multiplier=2), + operation_timeout, + exception_factory=_retry_exception_factory, + ) + self._req_metadata = req_metadata + try: + self._register_instance_task = CrossSync._Sync_Impl.create_task( + self._client._register_instance, + instance_id, + self, + sync_executor=self._client._executor, + ) + except RuntimeError as e: + raise RuntimeError( + f"{self.__class__.__name__} must be created within an async event loop context." + ) from e + + @property + def is_closed(self) -> bool: + """Returns True if the iterator is closed, False otherwise.""" + return self._is_closed + + @property + def app_profile_id(self) -> Optional[str]: + """Returns the app_profile_id of the iterator.""" + return self._app_profile_id + + @property + def table_name(self) -> Optional[str]: + """Returns the table_name of the iterator.""" + return self._table_name + + def _make_request_with_resume_token(self): + """perfoms the rpc call using the correct resume token.""" + resume_token = self._byte_cursor.prepare_for_new_request() + request = ExecuteQueryRequestPB( + {**self._request_body, "resume_token": resume_token} + ) + return self._client._gapic_client.execute_query( + request, + timeout=next(self._attempt_timeout_gen), + metadata=self._req_metadata, + retry=None, + ) + + def _fetch_metadata(self) -> None: + """If called before the first response was recieved, the first response + is retrieved as part of this call.""" + if self._byte_cursor.metadata is None: + metadata_msg = self._stream.__next__() + self._byte_cursor.consume_metadata(metadata_msg) + + def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]: + """Generator wrapping the response stream which parses the stream results + and returns full `QueryResultRow`s.""" + self._fetch_metadata() + for response in self._stream: + try: + bytes_to_parse = self._byte_cursor.consume(response) + if bytes_to_parse is None: + continue + results = self._reader.consume(bytes_to_parse) + if results is None: + continue + except ValueError as e: + raise InvalidExecuteQueryResponse( + "Invalid ExecuteQuery response received" + ) from e + for result in results: + yield result + self.close() + + def __next__(self) -> QueryResultRow: + if self._is_closed: + raise CrossSync._Sync_Impl.StopIteration + return self._result_generator.__next__() + + def __iter__(self): + return self + + def metadata(self) -> Optional[Metadata]: + """Returns query metadata from the server or None if the iterator was + explicitly closed.""" + if self._is_closed: + return None + if self._byte_cursor.metadata is None: + try: + self._fetch_metadata() + except CrossSync._Sync_Impl.StopIteration: + return None + return self._byte_cursor.metadata + + def close(self) -> None: + """Cancel all background tasks. Should be called all rows were processed.""" + if self._is_closed: + return + self._is_closed = True + if self._register_instance_task is not None: + self._register_instance_task.cancel() + self._client._remove_instance_registration(self._instance_id, self) diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index f6a2291fc865..8576fed85de6 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -28,7 +28,7 @@ import nox FLAKE8_VERSION = "flake8==6.1.0" -BLACK_VERSION = "black[jupyter]==23.7.0" +BLACK_VERSION = "black[jupyter]==23.3.0" ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] @@ -49,6 +49,8 @@ "pytest", "pytest-cov", "pytest-asyncio", + BLACK_VERSION, + "autoflake", ] UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] @@ -64,7 +66,7 @@ ] SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [ "pytest-asyncio==0.21.2", - "black==23.7.0", + BLACK_VERSION, "pyyaml==6.0.2", ] SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] @@ -561,3 +563,13 @@ def prerelease_deps(session, protobuf_implementation): "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, }, ) + + +@nox.session(python="3.10") +def generate_sync(session): + """ + Re-generate sync files for the library from CrossSync-annotated async source + """ + session.install(BLACK_VERSION) + session.install("autoflake") + session.run("python", ".cross_sync/generate.py", ".") diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py index 7f6cc413fb11..49539c1aa2c5 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index c0e9f39d230e..b97859de11f6 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py new file mode 100644 index 000000000000..2dde82bf16d1 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -0,0 +1,828 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +import uuid +import os +from google.api_core import retry +from google.api_core.exceptions import ClientError +from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.data._cross_sync import CrossSync +from . import TEST_FAMILY, TEST_FAMILY_2 + + +@CrossSync._Sync_Impl.add_mapping_decorator("TempRowBuilder") +class TempRowBuilder: + """ + Used to add rows to a table for testing purposes. + """ + + def __init__(self, table): + self.rows = [] + self.table = table + + def add_row( + self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" + ): + if isinstance(value, str): + value = value.encode("utf-8") + elif isinstance(value, int): + value = value.to_bytes(8, byteorder="big", signed=True) + request = { + "table_name": self.table.table_name, + "row_key": row_key, + "mutations": [ + { + "set_cell": { + "family_name": family, + "column_qualifier": qualifier, + "value": value, + } + } + ], + } + self.table.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + + def delete_rows(self): + if self.rows: + request = { + "table_name": self.table.table_name, + "entries": [ + {"row_key": row, "mutations": [{"delete_from_row": {}}]} + for row in self.rows + ], + } + self.table.client._gapic_client.mutate_rows(request) + + +class TestSystem: + @pytest.fixture(scope="session") + def client(self): + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + with CrossSync._Sync_Impl.DataClient(project=project) as client: + yield client + + @pytest.fixture(scope="session") + def table(self, client, table_id, instance_id): + with client.get_table(instance_id, table_id) as table: + yield table + + @pytest.fixture(scope="session") + def column_family_config(self): + """specify column families to create when creating a new test table""" + from google.cloud.bigtable_admin_v2 import types + + return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()} + + @pytest.fixture(scope="session") + def init_table_id(self): + """The table_id to use when creating a new test table""" + return f"test-table-{uuid.uuid4().hex}" + + @pytest.fixture(scope="session") + def cluster_config(self, project_id): + """Configuration for the clusters to use when creating a new instance""" + from google.cloud.bigtable_admin_v2 import types + + cluster = { + "test-cluster": types.Cluster( + location=f"projects/{project_id}/locations/us-central1-b", serve_nodes=1 + ) + } + return cluster + + @pytest.mark.usefixtures("table") + def _retrieve_cell_value(self, table, row_key): + """Helper to read an individual row""" + from google.cloud.bigtable.data import ReadRowsQuery + + row_list = table.read_rows(ReadRowsQuery(row_keys=row_key)) + assert len(row_list) == 1 + row = row_list[0] + cell = row.cells[0] + return cell.value + + def _create_row_and_mutation( + self, table, temp_rows, *, start_value=b"start", new_value=b"new_value" + ): + """Helper to create a new row, and a sample set_cell mutation to change its value""" + from google.cloud.bigtable.data.mutations import SetCell + + row_key = uuid.uuid4().hex.encode() + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row( + row_key, family=family, qualifier=qualifier, value=start_value + ) + assert self._retrieve_cell_value(table, row_key) == start_value + mutation = SetCell(family=TEST_FAMILY, qualifier=qualifier, new_value=new_value) + return (row_key, mutation) + + @pytest.fixture(scope="function") + def temp_rows(self, table): + builder = CrossSync._Sync_Impl.TempRowBuilder(table) + yield builder + builder.delete_rows() + + @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=10 + ) + def test_ping_and_warm_gapic(self, client, table): + """Simple ping rpc test + This test ensures channels are able to authenticate with backend""" + request = {"name": table.instance_name} + client._gapic_client.ping_and_warm(request) + + @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_ping_and_warm(self, client, table): + """Test ping and warm from handwritten client""" + results = client._ping_and_warm_instances() + assert len(results) == 1 + assert results[0] is None + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutation_set_cell(self, table, temp_rows): + """Ensure cells can be set properly""" + row_key = b"bulk_mutate" + new_value = uuid.uuid4().hex.encode() + (row_key, mutation) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + table.mutate_row(row_key, mutation) + assert self._retrieve_cell_value(table, row_key) == new_value + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_sample_row_keys(self, client, table, temp_rows, column_split_config): + """Sample keys should return a single sample in small test tables""" + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + results = table.sample_row_keys() + assert len(results) == len(column_split_config) + 1 + for idx in range(len(column_split_config)): + assert results[idx][0] == column_split_config[idx] + assert isinstance(results[idx][1], int) + assert results[-1][0] == b"" + assert isinstance(results[-1][1], int) + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + def test_bulk_mutations_set_cell(self, client, table, temp_rows): + """Ensure cells can be set properly""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + (row_key, mutation) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + table.bulk_mutate_rows([bulk_mutation]) + assert self._retrieve_cell_value(table, row_key) == new_value + + def test_bulk_mutations_raise_exception(self, client, table): + """If an invalid mutation is passed, an exception should be raised""" + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + row_key = uuid.uuid4().hex.encode() + mutation = SetCell( + family="nonexistent", qualifier=b"test-qualifier", new_value=b"" + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + with pytest.raises(MutationsExceptionGroup) as exc: + table.bulk_mutate_rows([bulk_mutation]) + assert len(exc.value.exceptions) == 1 + entry_error = exc.value.exceptions[0] + assert isinstance(entry_error, FailedMutationEntryError) + assert entry_error.index == 0 + assert entry_error.entry == bulk_mutation + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_context_manager(self, client, table, temp_rows): + """test batcher with context manager. Should flush on exit""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + (row_key2, mutation2) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + with table.mutations_batcher() as batcher: + batcher.append(bulk_mutation) + batcher.append(bulk_mutation2) + assert self._retrieve_cell_value(table, row_key) == new_value + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_timer_flush(self, client, table, temp_rows): + """batch should occur after flush_interval seconds""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + (row_key, mutation) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + flush_interval = 0.1 + with table.mutations_batcher(flush_interval=flush_interval) as batcher: + batcher.append(bulk_mutation) + CrossSync._Sync_Impl.yield_to_event_loop() + assert len(batcher._staged_entries) == 1 + CrossSync._Sync_Impl.sleep(flush_interval + 0.1) + assert len(batcher._staged_entries) == 0 + assert self._retrieve_cell_value(table, row_key) == new_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_count_flush(self, client, table, temp_rows): + """batch should flush after flush_limit_mutation_count mutations""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + (row_key2, mutation2) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + with table.mutations_batcher(flush_limit_mutation_count=2) as batcher: + batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + assert len(batcher._staged_entries) == 1 + batcher.append(bulk_mutation2) + assert len(batcher._flush_jobs) == 1 + for future in list(batcher._flush_jobs): + future + future.result() + assert len(batcher._staged_entries) == 0 + assert len(batcher._flush_jobs) == 0 + assert self._retrieve_cell_value(table, row_key) == new_value + assert self._retrieve_cell_value(table, row_key2) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_bytes_flush(self, client, table, temp_rows): + """batch should flush after flush_limit_bytes bytes""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + (row_key2, mutation2) = self._create_row_and_mutation( + table, temp_rows, new_value=new_value2 + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 + with table.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: + batcher.append(bulk_mutation) + assert len(batcher._flush_jobs) == 0 + assert len(batcher._staged_entries) == 1 + batcher.append(bulk_mutation2) + assert len(batcher._flush_jobs) == 1 + assert len(batcher._staged_entries) == 0 + for future in list(batcher._flush_jobs): + future + future.result() + assert self._retrieve_cell_value(table, row_key) == new_value + assert self._retrieve_cell_value(table, row_key2) == new_value2 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + def test_mutations_batcher_no_flush(self, client, table, temp_rows): + """test with no flush requirements met""" + from google.cloud.bigtable.data.mutations import RowMutationEntry + + new_value = uuid.uuid4().hex.encode() + start_value = b"unchanged" + (row_key, mutation) = self._create_row_and_mutation( + table, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation = RowMutationEntry(row_key, [mutation]) + (row_key2, mutation2) = self._create_row_and_mutation( + table, temp_rows, start_value=start_value, new_value=new_value + ) + bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) + size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 + with table.mutations_batcher( + flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 + ) as batcher: + batcher.append(bulk_mutation) + assert len(batcher._staged_entries) == 1 + batcher.append(bulk_mutation2) + assert len(batcher._flush_jobs) == 0 + CrossSync._Sync_Impl.yield_to_event_loop() + assert len(batcher._staged_entries) == 2 + assert len(batcher._flush_jobs) == 0 + assert self._retrieve_cell_value(table, row_key) == start_value + assert self._retrieve_cell_value(table, row_key2) == start_value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutations_batcher_large_batch(self, client, table, temp_rows): + """test batcher with large batch of mutations""" + from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell + + add_mutation = SetCell( + family=TEST_FAMILY, qualifier=b"test-qualifier", new_value=b"a" + ) + row_mutations = [] + for i in range(50000): + row_key = uuid.uuid4().hex.encode() + row_mutations.append(RowMutationEntry(row_key, [add_mutation])) + temp_rows.rows.append(row_key) + with table.mutations_batcher() as batcher: + for mutation in row_mutations: + batcher.append(mutation) + assert len(batcher._staged_entries) == 0 + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @pytest.mark.parametrize( + "start,increment,expected", + [ + (0, 0, 0), + (0, 1, 1), + (0, -1, -1), + (1, 0, 1), + (0, -100, -100), + (0, 3000, 3000), + (10, 4, 14), + (_MAX_INCREMENT_VALUE, -_MAX_INCREMENT_VALUE, 0), + (_MAX_INCREMENT_VALUE, 2, -_MAX_INCREMENT_VALUE), + (-_MAX_INCREMENT_VALUE, -2, _MAX_INCREMENT_VALUE), + ], + ) + def test_read_modify_write_row_increment( + self, client, table, temp_rows, start, increment, expected + ): + """test read_modify_write_row""" + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) + rule = IncrementRule(family, qualifier, increment) + result = table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert int(result[0]) == expected + assert self._retrieve_cell_value(table, row_key) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @pytest.mark.parametrize( + "start,append,expected", + [ + (b"", b"", b""), + ("", "", b""), + (b"abc", b"123", b"abc123"), + (b"abc", "123", b"abc123"), + ("", b"1", b"1"), + (b"abc", "", b"abc"), + (b"hello", b"world", b"helloworld"), + ], + ) + def test_read_modify_write_row_append( + self, client, table, temp_rows, start, append, expected + ): + """test read_modify_write_row""" + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) + rule = AppendValueRule(family, qualifier, append) + result = table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert len(result) == 1 + assert result[0].family == family + assert result[0].qualifier == qualifier + assert result[0].value == expected + assert self._retrieve_cell_value(table, row_key) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + def test_read_modify_write_row_chained(self, client, table, temp_rows): + """test read_modify_write_row with multiple rules""" + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + start_amount = 1 + increment_amount = 10 + temp_rows.add_row( + row_key, value=start_amount, family=family, qualifier=qualifier + ) + rule = [ + IncrementRule(family, qualifier, increment_amount), + AppendValueRule(family, qualifier, "hello"), + AppendValueRule(family, qualifier, "world"), + AppendValueRule(family, qualifier, "!"), + ] + result = table.read_modify_write_row(row_key, rule) + assert result.row_key == row_key + assert result[0].family == family + assert result[0].qualifier == qualifier + assert ( + result[0].value + == (start_amount + increment_amount).to_bytes(8, "big", signed=True) + + b"helloworld!" + ) + assert self._retrieve_cell_value(table, row_key) == result[0].value + + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + @pytest.mark.parametrize( + "start_val,predicate_range,expected_result", + [(1, (0, 2), True), (-1, (0, 2), False)], + ) + def test_check_and_mutate( + self, client, table, temp_rows, start_val, predicate_range, expected_result + ): + """test that check_and_mutate_row works applies the right mutations, and returns the right result""" + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable.data.row_filters import ValueRangeFilter + + row_key = b"test-row-key" + family = TEST_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_row(row_key, value=start_val, family=family, qualifier=qualifier) + false_mutation_value = b"false-mutation-value" + false_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=false_mutation_value + ) + true_mutation_value = b"true-mutation-value" + true_mutation = SetCell( + family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value + ) + predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) + result = table.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + assert result == expected_result + expected_value = ( + true_mutation_value if expected_result else false_mutation_value + ) + assert self._retrieve_cell_value(table, row_key) == expected_value + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("client") + @pytest.mark.usefixtures("table") + def test_check_and_mutate_empty_request(self, client, table): + """check_and_mutate with no true or fale mutations should raise an error""" + from google.api_core import exceptions + + with pytest.raises(exceptions.InvalidArgument) as e: + table.check_and_mutate_row( + b"row_key", None, true_case_mutations=None, false_case_mutations=None + ) + assert "No mutations provided" in str(e.value) + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_stream(self, table, temp_rows): + """Ensure that the read_rows_stream method works""" + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + generator = table.read_rows_stream({}) + first_row = generator.__next__() + second_row = generator.__next__() + assert first_row.row_key == b"row_key_1" + assert second_row.row_key == b"row_key_2" + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + generator.__next__() + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows(self, table, temp_rows): + """Ensure that the read_rows method works""" + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + row_list = table.read_rows({}) + assert len(row_list) == 2 + assert row_list[0].row_key == b"row_key_1" + assert row_list[1].row_key == b"row_key_2" + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_sharded_simple(self, table, temp_rows): + """Test read rows sharded with two queries""" + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) + row_list = table.read_rows_sharded([query1, query2]) + assert len(row_list) == 4 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"b" + assert row_list[3].row_key == b"d" + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_sharded_from_sample(self, table, temp_rows): + """Test end-to-end sharding""" + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.read_rows_query import RowRange + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + table_shard_keys = table.sample_row_keys() + query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) + shard_queries = query.shard(table_shard_keys) + row_list = table.read_rows_sharded(shard_queries) + assert len(row_list) == 3 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + assert row_list[2].row_key == b"d" + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_sharded_filters_limits(self, table, temp_rows): + """Test read rows sharded with filters and limits""" + from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + label_filter1 = ApplyLabelFilter("first") + label_filter2 = ApplyLabelFilter("second") + query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) + query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) + row_list = table.read_rows_sharded([query1, query2]) + assert len(row_list) == 3 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"b" + assert row_list[2].row_key == b"d" + assert row_list[0][0].labels == ["first"] + assert row_list[1][0].labels == ["second"] + assert row_list[2][0].labels == ["second"] + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_range_query(self, table, temp_rows): + """Ensure that the read_rows method works""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data import RowRange + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) + row_list = table.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"b" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_single_key_query(self, table, temp_rows): + """Ensure that the read_rows method works with specified query""" + from google.cloud.bigtable.data import ReadRowsQuery + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + query = ReadRowsQuery(row_keys=[b"a", b"c"]) + row_list = table.read_rows(query) + assert len(row_list) == 2 + assert row_list[0].row_key == b"a" + assert row_list[1].row_key == b"c" + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_read_rows_with_filter(self, table, temp_rows): + """ensure filters are applied""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + temp_rows.add_row(b"a") + temp_rows.add_row(b"b") + temp_rows.add_row(b"c") + temp_rows.add_row(b"d") + expected_label = "test-label" + row_filter = ApplyLabelFilter(expected_label) + query = ReadRowsQuery(row_filter=row_filter) + row_list = table.read_rows(query) + assert len(row_list) == 4 + for row in row_list: + assert row[0].labels == [expected_label] + + @pytest.mark.usefixtures("table") + def test_read_rows_stream_close(self, table, temp_rows): + """Ensure that the read_rows_stream can be closed""" + from google.cloud.bigtable.data import ReadRowsQuery + + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + query = ReadRowsQuery() + generator = table.read_rows_stream(query) + first_row = generator.__next__() + assert first_row.row_key == b"row_key_1" + generator.close() + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + generator.__next__() + + @pytest.mark.usefixtures("table") + def test_read_row(self, table, temp_rows): + """Test read_row (single row helper)""" + from google.cloud.bigtable.data import Row + + temp_rows.add_row(b"row_key_1", value=b"value") + row = table.read_row(b"row_key_1") + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("table") + def test_read_row_missing(self, table): + """Test read_row when row does not exist""" + from google.api_core import exceptions + + row_key = "row_key_not_exist" + result = table.read_row(row_key) + assert result is None + with pytest.raises(exceptions.InvalidArgument) as e: + table.read_row("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("table") + def test_read_row_w_filter(self, table, temp_rows): + """Test read_row (single row helper)""" + from google.cloud.bigtable.data import Row + from google.cloud.bigtable.data.row_filters import ApplyLabelFilter + + temp_rows.add_row(b"row_key_1", value=b"value") + expected_label = "test-label" + label_filter = ApplyLabelFilter(expected_label) + row = table.read_row(b"row_key_1", row_filter=label_filter) + assert isinstance(row, Row) + assert row.row_key == b"row_key_1" + assert row.cells[0].value == b"value" + assert row.cells[0].labels == [expected_label] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't raise InvalidArgument", + ) + @pytest.mark.usefixtures("table") + def test_row_exists(self, table, temp_rows): + from google.api_core import exceptions + + "Test row_exists with rows that exist and don't exist" + assert table.row_exists(b"row_key_1") is False + temp_rows.add_row(b"row_key_1") + assert table.row_exists(b"row_key_1") is True + assert table.row_exists("row_key_1") is True + assert table.row_exists(b"row_key_2") is False + assert table.row_exists("row_key_2") is False + assert table.row_exists("3") is False + temp_rows.add_row(b"3") + assert table.row_exists(b"3") is True + with pytest.raises(exceptions.InvalidArgument) as e: + table.row_exists("") + assert "Row keys must be non-empty" in str(e) + + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + @pytest.mark.parametrize( + "cell_value,filter_input,expect_match", + [ + (b"abc", b"abc", True), + (b"abc", "abc", True), + (b".", ".", True), + (".*", ".*", True), + (".*", b".*", True), + ("a", ".*", False), + (b".*", b".*", True), + ("\\a", "\\a", True), + (b"\xe2\x98\x83", "☃", True), + ("☃", "☃", True), + ("\\C☃", "\\C☃", True), + (1, 1, True), + (2, 1, False), + (68, 68, True), + ("D", 68, False), + (68, "D", False), + (-1, -1, True), + (2852126720, 2852126720, True), + (-1431655766, -1431655766, True), + (-1431655766, -1, False), + ], + ) + def test_literal_value_filter( + self, table, temp_rows, cell_value, filter_input, expect_match + ): + """Literal value filter does complex escaping on re2 strings. + Make sure inputs are properly interpreted by the server""" + from google.cloud.bigtable.data.row_filters import LiteralValueFilter + from google.cloud.bigtable.data import ReadRowsQuery + + f = LiteralValueFilter(filter_input) + temp_rows.add_row(b"row_key_1", value=cell_value) + query = ReadRowsQuery(row_filter=f) + row_list = table.read_rows(query) + assert len(row_list) == bool( + expect_match + ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py index 621f4d9a21d5..13f668fd34f3 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py index 6a4583a7b9cb..944681a84c96 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py @@ -1,3 +1,4 @@ +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index c24fa3d98c35..8d829a363ee7 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -1279,7 +1279,7 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ # we expect an exception from attempting to call the mock pass assert rpc_mock.call_count == 1 - kwargs = rpc_mock.call_args_list[0].kwargs + kwargs = rpc_mock.call_args_list[0][1] metadata = kwargs["metadata"] # expect single metadata entry assert len(metadata) == 1 @@ -1906,7 +1906,7 @@ async def mock_call(*args, **kwargs): assert read_rows.call_count == 10 assert len(result) == 10 # if run in sequence, we would expect this to take 1 second - assert call_time < 0.2 + assert call_time < 0.5 @CrossSync.pytest async def test_read_rows_sharded_concurrency_limit(self): @@ -2005,21 +2005,25 @@ async def test_read_rows_sharded_negative_batch_timeout(self): They should raise DeadlineExceeded errors """ from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT from google.api_core.exceptions import DeadlineExceeded async def mock_call(*args, **kwargs): - await CrossSync.sleep(0.05) + await CrossSync.sleep(0.06) return [mock.Mock()] async with self._make_client() as client: async with client.get_table("instance", "table") as table: with mock.patch.object(table, "read_rows") as read_rows: read_rows.side_effect = mock_call - queries = [ReadRowsQuery() for _ in range(15)] + num_calls = 15 + queries = [ReadRowsQuery() for _ in range(num_calls)] with pytest.raises(ShardedReadRowsExceptionGroup) as exc: - await table.read_rows_sharded(queries, operation_timeout=0.01) + await table.read_rows_sharded(queries, operation_timeout=0.05) assert isinstance(exc.value, ShardedReadRowsExceptionGroup) - assert len(exc.value.exceptions) == 5 + # _CONCURRENCY_LIMIT calls will run, and won't be interrupted + # calls after the limit will be cancelled due to timeout + assert len(exc.value.exceptions) >= num_calls - _CONCURRENCY_LIMIT assert all( isinstance(e.__cause__, DeadlineExceeded) for e in exc.value.exceptions diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py index cd442d392b08..2df8dde6d216 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py index 45d139182383..ab9502223c21 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_read_rows_acceptance.py @@ -1,4 +1,4 @@ -# Copyright 2023 Google LLC +# Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/__init__.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py new file mode 100644 index 000000000000..2173c88fb1ae --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py @@ -0,0 +1,307 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +from google.cloud.bigtable_v2.types import MutateRowsResponse +from google.rpc import status_pb2 +from google.api_core.exceptions import DeadlineExceeded +from google.api_core.exceptions import Forbidden +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock + + +class TestMutateRowsOperation: + def _target_class(self): + return CrossSync._Sync_Impl._MutateRowsOperation + + def _make_one(self, *args, **kwargs): + if not args: + kwargs["gapic_client"] = kwargs.pop("gapic_client", mock.Mock()) + kwargs["table"] = kwargs.pop("table", CrossSync._Sync_Impl.Mock()) + kwargs["operation_timeout"] = kwargs.pop("operation_timeout", 5) + kwargs["attempt_timeout"] = kwargs.pop("attempt_timeout", 0.1) + kwargs["retryable_exceptions"] = kwargs.pop("retryable_exceptions", ()) + kwargs["mutation_entries"] = kwargs.pop("mutation_entries", []) + return self._target_class()(*args, **kwargs) + + def _make_mutation(self, count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation + + def _mock_stream(self, mutation_list, error_dict): + for idx, entry in enumerate(mutation_list): + code = error_dict.get(idx, 0) + yield MutateRowsResponse( + entries=[ + MutateRowsResponse.Entry( + index=idx, status=status_pb2.Status(code=code) + ) + ] + ) + + def _make_mock_gapic(self, mutation_list, error_dict=None): + mock_fn = CrossSync._Sync_Impl.Mock() + if error_dict is None: + error_dict = {} + mock_fn.side_effect = lambda *args, **kwargs: self._mock_stream( + mutation_list, error_dict + ) + return mock_fn + + def test_ctor(self): + """test that constructor sets all the attributes correctly""" + from google.cloud.bigtable.data._async._mutate_rows import _EntryWithProto + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import Aborted + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + attempt_timeout = 0.01 + retryable_exceptions = () + instance = self._make_one( + client, + table, + entries, + operation_timeout, + attempt_timeout, + retryable_exceptions, + ) + assert client.mutate_rows.call_count == 0 + instance._gapic_fn() + assert client.mutate_rows.call_count == 1 + inner_kwargs = client.mutate_rows.call_args[1] + assert len(inner_kwargs) == 3 + assert inner_kwargs["table_name"] == table.table_name + assert inner_kwargs["app_profile_id"] == table.app_profile_id + assert inner_kwargs["retry"] is None + entries_w_pb = [_EntryWithProto(e, e._to_pb()) for e in entries] + assert instance.mutations == entries_w_pb + assert next(instance.timeout_generator) == attempt_timeout + assert instance.is_retryable is not None + assert instance.is_retryable(DeadlineExceeded("")) is False + assert instance.is_retryable(Aborted("")) is False + assert instance.is_retryable(_MutateRowsIncomplete("")) is True + assert instance.is_retryable(RuntimeError("")) is False + assert instance.remaining_indices == list(range(len(entries))) + assert instance.errors == {} + + def test_ctor_too_many_entries(self): + """should raise an error if an operation is created with more than 100,000 entries""" + from google.cloud.bigtable.data._async._mutate_rows import ( + _MUTATE_ROWS_REQUEST_MUTATION_LIMIT, + ) + + assert _MUTATE_ROWS_REQUEST_MUTATION_LIMIT == 100000 + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] * (_MUTATE_ROWS_REQUEST_MUTATION_LIMIT + 1) + operation_timeout = 0.05 + attempt_timeout = 0.01 + with pytest.raises(ValueError) as e: + self._make_one(client, table, entries, operation_timeout, attempt_timeout) + assert "mutate_rows requests can contain at most 100000 mutations" in str( + e.value + ) + assert "Found 100001" in str(e.value) + + def test_mutate_rows_operation(self): + """Test successful case of mutate_rows_operation""" + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + cls = self._target_class() + with mock.patch( + f"{cls.__module__}.{cls.__name__}._run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance.start() + assert attempt_mock.call_count == 1 + + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + def test_mutate_rows_attempt_exception(self, exc_type): + """exceptions raised from attempt should be raised in MutationsExceptionGroup""" + client = CrossSync._Sync_Impl.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + expected_exception = exc_type("test") + client.mutate_rows.side_effect = expected_exception + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance._run_attempt() + except Exception as e: + found_exc = e + assert client.mutate_rows.call_count == 1 + assert type(found_exc) is exc_type + assert found_exc == expected_exception + assert len(instance.errors) == 2 + assert len(instance.remaining_indices) == 0 + + @pytest.mark.parametrize("exc_type", [RuntimeError, ZeroDivisionError, Forbidden]) + def test_mutate_rows_exception(self, exc_type): + """exceptions raised from retryable should be raised in MutationsExceptionGroup""" + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedMutationEntryError + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation(), self._make_mutation()] + operation_timeout = 0.05 + expected_cause = exc_type("abort") + with mock.patch.object( + self._target_class(), "_run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + attempt_mock.side_effect = expected_cause + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count == 1 + assert len(found_exc.exceptions) == 2 + assert isinstance(found_exc.exceptions[0], FailedMutationEntryError) + assert isinstance(found_exc.exceptions[1], FailedMutationEntryError) + assert found_exc.exceptions[0].__cause__ == expected_cause + assert found_exc.exceptions[1].__cause__ == expected_cause + + @pytest.mark.parametrize("exc_type", [DeadlineExceeded, RuntimeError]) + def test_mutate_rows_exception_retryable_eventually_pass(self, exc_type): + """If an exception fails but eventually passes, it should not raise an exception""" + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] + operation_timeout = 1 + expected_cause = exc_type("retry") + num_retries = 2 + with mock.patch.object( + self._target_class(), "_run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + attempt_mock.side_effect = [expected_cause] * num_retries + [None] + instance = self._make_one( + client, + table, + entries, + operation_timeout, + operation_timeout, + retryable_exceptions=(exc_type,), + ) + instance.start() + assert attempt_mock.call_count == num_retries + 1 + + def test_mutate_rows_incomplete_ignored(self): + """MutateRowsIncomplete exceptions should not be added to error list""" + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + client = mock.Mock() + table = mock.Mock() + entries = [self._make_mutation()] + operation_timeout = 0.05 + with mock.patch.object( + self._target_class(), "_run_attempt", CrossSync._Sync_Impl.Mock() + ) as attempt_mock: + attempt_mock.side_effect = _MutateRowsIncomplete("ignored") + found_exc = None + try: + instance = self._make_one( + client, table, entries, operation_timeout, operation_timeout + ) + instance.start() + except MutationsExceptionGroup as e: + found_exc = e + assert attempt_mock.call_count > 0 + assert len(found_exc.exceptions) == 1 + assert isinstance(found_exc.exceptions[0].__cause__, DeadlineExceeded) + + def test_run_attempt_single_entry_success(self): + """Test mutating a single entry""" + mutation = self._make_mutation() + expected_timeout = 1.3 + mock_gapic_fn = self._make_mock_gapic({0: mutation}) + instance = self._make_one( + mutation_entries=[mutation], attempt_timeout=expected_timeout + ) + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + instance._run_attempt() + assert len(instance.remaining_indices) == 0 + assert mock_gapic_fn.call_count == 1 + (_, kwargs) = mock_gapic_fn.call_args + assert kwargs["timeout"] == expected_timeout + assert kwargs["entries"] == [mutation._to_pb()] + + def test_run_attempt_empty_request(self): + """Calling with no mutations should result in no API calls""" + mock_gapic_fn = self._make_mock_gapic([]) + instance = self._make_one(mutation_entries=[]) + instance._run_attempt() + assert mock_gapic_fn.call_count == 0 + + def test_run_attempt_partial_success_retryable(self): + """Some entries succeed, but one fails. Should report the proper index, and raise incomplete exception""" + from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete + + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one(mutation_entries=mutations) + instance.is_retryable = lambda x: True + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + with pytest.raises(_MutateRowsIncomplete): + instance._run_attempt() + assert instance.remaining_indices == [1] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors + + def test_run_attempt_partial_success_non_retryable(self): + """Some entries succeed, but one fails. Exception marked as non-retryable. Do not raise incomplete error""" + success_mutation = self._make_mutation() + success_mutation_2 = self._make_mutation() + failure_mutation = self._make_mutation() + mutations = [success_mutation, failure_mutation, success_mutation_2] + mock_gapic_fn = self._make_mock_gapic(mutations, error_dict={1: 300}) + instance = self._make_one(mutation_entries=mutations) + instance.is_retryable = lambda x: False + with mock.patch.object(instance, "_gapic_fn", mock_gapic_fn): + instance._run_attempt() + assert instance.remaining_indices == [] + assert 0 not in instance.errors + assert len(instance.errors[1]) == 1 + assert instance.errors[1][0].grpc_status_code == 300 + assert 2 not in instance.errors diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py new file mode 100644 index 000000000000..973b07bcb27c --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py @@ -0,0 +1,354 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock + + +class TestReadRowsOperation: + """ + Tests helper functions in the ReadRowsOperation class + in-depth merging logic in merge_row_response_stream and _read_rows_retryable_attempt + is tested in test_read_rows_acceptance test_client_read_rows, and conformance tests + """ + + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl._ReadRowsOperation + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + from google.cloud.bigtable.data import ReadRowsQuery + + row_limit = 91 + query = ReadRowsQuery(limit=row_limit) + client = mock.Mock() + client.read_rows = mock.Mock() + client.read_rows.return_value = None + table = mock.Mock() + table._client = client + table.table_name = "test_table" + table.app_profile_id = "test_profile" + expected_operation_timeout = 42 + expected_request_timeout = 44 + time_gen_mock = mock.Mock() + subpath = "_async" if CrossSync._Sync_Impl.is_async else "_sync_autogen" + with mock.patch( + f"google.cloud.bigtable.data.{subpath}._read_rows._attempt_timeout_generator", + time_gen_mock, + ): + instance = self._make_one( + query, + table, + operation_timeout=expected_operation_timeout, + attempt_timeout=expected_request_timeout, + ) + assert time_gen_mock.call_count == 1 + time_gen_mock.assert_called_once_with( + expected_request_timeout, expected_operation_timeout + ) + assert instance._last_yielded_row_key is None + assert instance._remaining_count == row_limit + assert instance.operation_timeout == expected_operation_timeout + assert client.read_rows.call_count == 0 + assert instance.request.table_name == table.table_name + assert instance.request.app_profile_id == table.app_profile_id + assert instance.request.rows_limit == row_limit + + @pytest.mark.parametrize( + "in_keys,last_key,expected", + [ + (["b", "c", "d"], "a", ["b", "c", "d"]), + (["a", "b", "c"], "b", ["c"]), + (["a", "b", "c"], "c", []), + (["a", "b", "c"], "d", []), + (["d", "c", "b", "a"], "b", ["d", "c"]), + ], + ) + @pytest.mark.parametrize("with_range", [True, False]) + def test_revise_request_rowset_keys_with_range( + self, in_keys, last_key, expected, with_range + ): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete + + in_keys = [key.encode("utf-8") for key in in_keys] + expected = [key.encode("utf-8") for key in expected] + last_key = last_key.encode("utf-8") + if with_range: + sample_range = [RowRangePB(start_key_open=last_key)] + else: + sample_range = [] + row_set = RowSetPB(row_keys=in_keys, row_ranges=sample_range) + if not with_range and expected == []: + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == expected + assert revised.row_ranges == sample_range + + @pytest.mark.parametrize( + "in_ranges,last_key,expected", + [ + ( + [{"start_key_open": "b", "end_key_closed": "d"}], + "a", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "a", + [{"start_key_closed": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_open": "a", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ( + [{"start_key_closed": "a", "end_key_open": "d"}], + "b", + [{"start_key_open": "b", "end_key_open": "d"}], + ), + ( + [{"start_key_closed": "b", "end_key_closed": "d"}], + "b", + [{"start_key_open": "b", "end_key_closed": "d"}], + ), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_open": "d"}], "d", []), + ([{"start_key_closed": "b", "end_key_closed": "d"}], "e", []), + ([{"start_key_closed": "b"}], "z", [{"start_key_open": "z"}]), + ([{"start_key_closed": "b"}], "a", [{"start_key_closed": "b"}]), + ( + [{"end_key_closed": "z"}], + "a", + [{"start_key_open": "a", "end_key_closed": "z"}], + ), + ( + [{"end_key_open": "z"}], + "a", + [{"start_key_open": "a", "end_key_open": "z"}], + ), + ], + ) + @pytest.mark.parametrize("with_key", [True, False]) + def test_revise_request_rowset_ranges( + self, in_ranges, last_key, expected, with_key + ): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + from google.cloud.bigtable.data.exceptions import _RowSetComplete + + next_key = (last_key + "a").encode("utf-8") + last_key = last_key.encode("utf-8") + in_ranges = [ + RowRangePB(**{k: v.encode("utf-8") for (k, v) in r.items()}) + for r in in_ranges + ] + expected = [ + RowRangePB(**{k: v.encode("utf-8") for (k, v) in r.items()}) + for r in expected + ] + if with_key: + row_keys = [next_key] + else: + row_keys = [] + row_set = RowSetPB(row_ranges=in_ranges, row_keys=row_keys) + if not with_key and expected == []: + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, last_key) + else: + revised = self._get_target_class()._revise_request_rowset(row_set, last_key) + assert revised.row_keys == row_keys + assert revised.row_ranges == expected + + @pytest.mark.parametrize("last_key", ["a", "b", "c"]) + def test_revise_request_full_table(self, last_key): + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + last_key = last_key.encode("utf-8") + row_set = RowSetPB() + for selected_set in [row_set, None]: + revised = self._get_target_class()._revise_request_rowset( + selected_set, last_key + ) + assert revised.row_keys == [] + assert len(revised.row_ranges) == 1 + assert revised.row_ranges[0] == RowRangePB(start_key_open=last_key) + + def test_revise_to_empty_rowset(self): + """revising to an empty rowset should raise error""" + from google.cloud.bigtable.data.exceptions import _RowSetComplete + from google.cloud.bigtable_v2.types import RowSet as RowSetPB + from google.cloud.bigtable_v2.types import RowRange as RowRangePB + + row_keys = [b"a", b"b", b"c"] + row_range = RowRangePB(end_key_open=b"c") + row_set = RowSetPB(row_keys=row_keys, row_ranges=[row_range]) + with pytest.raises(_RowSetComplete): + self._get_target_class()._revise_request_rowset(row_set, b"d") + + @pytest.mark.parametrize( + "start_limit,emit_num,expected_limit", + [ + (10, 0, 10), + (10, 1, 9), + (10, 10, 0), + (None, 10, None), + (None, 0, None), + (4, 2, 2), + ], + ) + def test_revise_limit(self, start_limit, emit_num, expected_limit): + """revise_limit should revise the request's limit field + - if limit is 0 (unlimited), it should never be revised + - if start_limit-emit_num == 0, the request should end early + - if the number emitted exceeds the new limit, an exception should + should be raised (tested in test_revise_limit_over_limit)""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + + def awaitable_stream(): + def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table.table_name = "table_name" + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + for val in instance.chunk_stream(awaitable_stream()): + pass + assert instance._remaining_count == expected_limit + + @pytest.mark.parametrize("start_limit,emit_num", [(5, 10), (3, 9), (1, 10)]) + def test_revise_limit_over_limit(self, start_limit, emit_num): + """Should raise runtime error if we get in state where emit_num > start_num + (unless start_num == 0, which represents unlimited)""" + from google.cloud.bigtable.data import ReadRowsQuery + from google.cloud.bigtable_v2.types import ReadRowsResponse + from google.cloud.bigtable.data.exceptions import InvalidChunk + + def awaitable_stream(): + def mock_stream(): + for i in range(emit_num): + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk( + row_key=str(i).encode(), + family_name="b", + qualifier=b"c", + value=b"d", + commit_row=True, + ) + ] + ) + + return mock_stream() + + query = ReadRowsQuery(limit=start_limit) + table = mock.Mock() + table.table_name = "table_name" + table.app_profile_id = "app_profile_id" + instance = self._make_one(query, table, 10, 10) + assert instance._remaining_count == start_limit + with pytest.raises(InvalidChunk) as e: + for val in instance.chunk_stream(awaitable_stream()): + pass + assert "emit count exceeds row limit" in str(e.value) + + def test_close(self): + """should be able to close a stream safely with close. + Closed generators should raise StopAsyncIteration on next yield""" + + def mock_stream(): + while True: + yield 1 + + with mock.patch.object( + self._get_target_class(), "_read_rows_attempt" + ) as mock_attempt: + instance = self._make_one(mock.Mock(), mock.Mock(), 1, 1) + wrapped_gen = mock_stream() + mock_attempt.return_value = wrapped_gen + gen = instance.start_operation() + gen.__next__() + gen.close() + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + gen.__next__() + gen.close() + with pytest.raises(CrossSync._Sync_Impl.StopIteration): + wrapped_gen.__next__() + + def test_retryable_ignore_repeated_rows(self): + """Duplicate rows should cause an invalid chunk error""" + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import ReadRowsResponse + + row_key = b"duplicate" + + def mock_awaitable_stream(): + def mock_stream(): + while True: + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + yield ReadRowsResponse( + chunks=[ + ReadRowsResponse.CellChunk(row_key=row_key, commit_row=True) + ] + ) + + return mock_stream() + + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + stream = self._get_target_class().chunk_stream( + instance, mock_awaitable_stream() + ) + stream.__next__() + with pytest.raises(InvalidChunk) as exc: + stream.__next__() + assert "row keys should be strictly increasing" in str(exc.value) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py new file mode 100644 index 000000000000..51c88c63eb55 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -0,0 +1,2889 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +import grpc +import asyncio +import re +import pytest +import mock +from google.cloud.bigtable.data import mutations +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_v2.types import ReadRowsResponse +from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery +from google.api_core import exceptions as core_exceptions +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data import TABLE_DEFAULT +from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule +from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from google.cloud.bigtable.data._cross_sync import CrossSync +from google.api_core import grpc_helpers + +CrossSync._Sync_Impl.add_mapping("grpc_helpers", grpc_helpers) + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestBigtableDataClient") +class TestBigtableDataClient: + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl.DataClient + + @classmethod + def _make_client(cls, *args, use_emulator=True, **kwargs): + import os + + env_mask = {} + if use_emulator: + env_mask["BIGTABLE_EMULATOR_HOST"] = "localhost" + import warnings + + warnings.filterwarnings("ignore", category=RuntimeWarning) + else: + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + kwargs["project"] = kwargs.get("project", "project-id") + with mock.patch.dict(os.environ, env_mask): + return cls._get_target_class()(*args, **kwargs) + + def test_ctor(self): + expected_project = "project-id" + expected_credentials = AnonymousCredentials() + client = self._make_client( + project="project-id", credentials=expected_credentials, use_emulator=False + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert client.project == expected_project + assert not client._active_instances + assert client._channel_refresh_task is not None + assert client.transport._credentials == expected_credentials + client.close() + + def test_ctor_super_inits(self): + from google.cloud.client import ClientWithProject + from google.api_core import client_options as client_options_lib + + project = "project-id" + credentials = AnonymousCredentials() + client_options = {"api_endpoint": "foo.bar:1234"} + options_parsed = client_options_lib.from_dict(client_options) + with mock.patch.object( + CrossSync._Sync_Impl.GapicClient, "__init__" + ) as bigtable_client_init: + bigtable_client_init.return_value = None + with mock.patch.object( + ClientWithProject, "__init__" + ) as client_project_init: + client_project_init.return_value = None + try: + self._make_client( + project=project, + credentials=credentials, + client_options=options_parsed, + use_emulator=False, + ) + except AttributeError: + pass + assert bigtable_client_init.call_count == 1 + kwargs = bigtable_client_init.call_args[1] + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + assert client_project_init.call_count == 1 + kwargs = client_project_init.call_args[1] + assert kwargs["project"] == project + assert kwargs["credentials"] == credentials + assert kwargs["client_options"] == options_parsed + + def test_ctor_dict_options(self): + from google.api_core.client_options import ClientOptions + + client_options = {"api_endpoint": "foo.bar:1234"} + with mock.patch.object( + CrossSync._Sync_Impl.GapicClient, "__init__" + ) as bigtable_client_init: + try: + self._make_client(client_options=client_options) + except TypeError: + pass + bigtable_client_init.assert_called_once() + kwargs = bigtable_client_init.call_args[1] + called_options = kwargs["client_options"] + assert called_options.api_endpoint == "foo.bar:1234" + assert isinstance(called_options, ClientOptions) + with mock.patch.object( + self._get_target_class(), "_start_background_channel_refresh" + ) as start_background_refresh: + client = self._make_client( + client_options=client_options, use_emulator=False + ) + start_background_refresh.assert_called_once() + client.close() + + def test_veneer_grpc_headers(self): + client_component = "data-async" if CrossSync._Sync_Impl.is_async else "data" + VENEER_HEADER_REGEX = re.compile( + "gapic\\/[0-9]+\\.[\\w.-]+ gax\\/[0-9]+\\.[\\w.-]+ gccl\\/[0-9]+\\.[\\w.-]+-" + + client_component + + " gl-python\\/[0-9]+\\.[\\w.-]+ grpc\\/[0-9]+\\.[\\w.-]+" + ) + patch = mock.patch("google.api_core.gapic_v1.method.wrap_method") + with patch as gapic_mock: + client = self._make_client(project="project-id") + wrapped_call_list = gapic_mock.call_args_list + assert len(wrapped_call_list) > 0 + for call in wrapped_call_list: + client_info = call.kwargs["client_info"] + assert client_info is not None, f"{call} has no client_info" + wrapped_user_agent_sorted = " ".join( + sorted(client_info.to_user_agent().split(" ")) + ) + assert VENEER_HEADER_REGEX.match( + wrapped_user_agent_sorted + ), f"'{wrapped_user_agent_sorted}' does not match {VENEER_HEADER_REGEX}" + client.close() + + def test__start_background_channel_refresh_task_exists(self): + client = self._make_client(project="project-id", use_emulator=False) + assert client._channel_refresh_task is not None + with mock.patch.object(asyncio, "create_task") as create_task: + client._start_background_channel_refresh() + create_task.assert_not_called() + client.close() + + def test__start_background_channel_refresh(self): + client = self._make_client(project="project-id") + with mock.patch.object( + client, "_ping_and_warm_instances", CrossSync._Sync_Impl.Mock() + ) as ping_and_warm: + client._emulator_host = None + client._start_background_channel_refresh() + assert client._channel_refresh_task is not None + assert isinstance(client._channel_refresh_task, CrossSync._Sync_Impl.Task) + CrossSync._Sync_Impl.sleep(0.1) + assert ping_and_warm.call_count == 1 + client.close() + + def test__ping_and_warm_instances(self): + """test ping and warm with mocked asyncio.gather""" + client_mock = mock.Mock() + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync._Sync_Impl, "gather_partials", CrossSync._Sync_Impl.Mock() + ) as gather: + gather.side_effect = lambda partials, **kwargs: [None for _ in partials] + channel = mock.Mock() + client_mock._active_instances = [] + result = self._get_target_class()._ping_and_warm_instances( + client_mock, channel=channel + ) + assert len(result) == 0 + assert gather.call_args[1]["return_exceptions"] is True + assert gather.call_args[1]["sync_executor"] == client_mock._executor + client_mock._active_instances = [ + (mock.Mock(), mock.Mock(), mock.Mock()) + ] * 4 + gather.reset_mock() + channel.reset_mock() + result = self._get_target_class()._ping_and_warm_instances( + client_mock, channel=channel + ) + assert len(result) == 4 + gather.assert_called_once() + partial_list = gather.call_args.args[0] + assert len(partial_list) == 4 + grpc_call_args = channel.unary_unary().call_args_list + for idx, (_, kwargs) in enumerate(grpc_call_args): + ( + expected_instance, + expected_table, + expected_app_profile, + ) = client_mock._active_instances[idx] + request = kwargs["request"] + assert request["name"] == expected_instance + assert request["app_profile_id"] == expected_app_profile + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] + == f"name={expected_instance}&app_profile_id={expected_app_profile}" + ) + + def test__ping_and_warm_single_instance(self): + """should be able to call ping and warm with single instance""" + client_mock = mock.Mock() + client_mock._execute_ping_and_warms = ( + lambda *args: self._get_target_class()._execute_ping_and_warms( + client_mock, *args + ) + ) + with mock.patch.object( + CrossSync._Sync_Impl, "gather_partials", CrossSync._Sync_Impl.Mock() + ) as gather: + gather.side_effect = lambda *args, **kwargs: [fn() for fn in args[0]] + client_mock._active_instances = [mock.Mock()] * 100 + test_key = ("test-instance", "test-table", "test-app-profile") + result = self._get_target_class()._ping_and_warm_instances( + client_mock, test_key + ) + assert len(result) == 1 + grpc_call_args = ( + client_mock.transport.grpc_channel.unary_unary().call_args_list + ) + assert len(grpc_call_args) == 1 + kwargs = grpc_call_args[0][1] + request = kwargs["request"] + assert request["name"] == "test-instance" + assert request["app_profile_id"] == "test-app-profile" + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + assert ( + metadata[0][1] == "name=test-instance&app_profile_id=test-app-profile" + ) + + @pytest.mark.parametrize( + "refresh_interval, wait_time, expected_sleep", + [(0, 0, 0), (0, 1, 0), (10, 0, 10), (10, 5, 5), (10, 10, 0), (10, 15, 0)], + ) + def test__manage_channel_first_sleep( + self, refresh_interval, wait_time, expected_sleep + ): + import time + + with mock.patch.object(time, "monotonic") as monotonic: + monotonic.return_value = 0 + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + sleep.side_effect = asyncio.CancelledError + try: + client = self._make_client(project="project-id") + client._channel_init_time = -wait_time + client._manage_channel(refresh_interval, refresh_interval) + except asyncio.CancelledError: + pass + sleep.assert_called_once() + call_time = sleep.call_args[0][1] + assert ( + abs(call_time - expected_sleep) < 0.1 + ), f"refresh_interval: {refresh_interval}, wait_time: {wait_time}, expected_sleep: {expected_sleep}" + client.close() + + def test__manage_channel_ping_and_warm(self): + """_manage channel should call ping and warm internally""" + import time + import threading + + client_mock = mock.Mock() + client_mock._is_closed.is_set.return_value = False + client_mock._channel_init_time = time.monotonic() + orig_channel = client_mock.transport.grpc_channel + sleep_tuple = ( + (asyncio, "sleep") + if CrossSync._Sync_Impl.is_async + else (threading.Event, "wait") + ) + with mock.patch.object(*sleep_tuple): + orig_channel.close.side_effect = asyncio.CancelledError + ping_and_warm = ( + client_mock._ping_and_warm_instances + ) = CrossSync._Sync_Impl.Mock() + try: + self._get_target_class()._manage_channel(client_mock, 10) + except asyncio.CancelledError: + pass + assert ping_and_warm.call_count == 2 + assert client_mock.transport._grpc_channel != orig_channel + called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list] + assert orig_channel in called_with + assert client_mock.transport.grpc_channel in called_with + + @pytest.mark.parametrize( + "refresh_interval, num_cycles, expected_sleep", + [(None, 1, 60 * 35), (10, 10, 100), (10, 1, 10)], + ) + def test__manage_channel_sleeps(self, refresh_interval, num_cycles, expected_sleep): + import time + import random + + channel = mock.Mock() + channel.close = CrossSync._Sync_Impl.Mock() + with mock.patch.object(random, "uniform") as uniform: + uniform.side_effect = lambda min_, max_: min_ + with mock.patch.object(time, "time") as time_mock: + time_mock.return_value = 0 + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + sleep.side_effect = [None for i in range(num_cycles - 1)] + [ + asyncio.CancelledError + ] + client = self._make_client(project="project-id") + client.transport._grpc_channel = channel + with mock.patch.object( + client.transport, "create_channel", CrossSync._Sync_Impl.Mock + ): + try: + if refresh_interval is not None: + client._manage_channel( + refresh_interval, refresh_interval, grace_period=0 + ) + else: + client._manage_channel(grace_period=0) + except asyncio.CancelledError: + pass + assert sleep.call_count == num_cycles + total_sleep = sum([call[0][1] for call in sleep.call_args_list]) + assert ( + abs(total_sleep - expected_sleep) < 0.1 + ), f"refresh_interval={refresh_interval}, num_cycles={num_cycles}, expected_sleep={expected_sleep}" + client.close() + + def test__manage_channel_random(self): + import random + + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + with mock.patch.object(random, "uniform") as uniform: + uniform.return_value = 0 + try: + uniform.side_effect = asyncio.CancelledError + client = self._make_client(project="project-id") + except asyncio.CancelledError: + uniform.side_effect = None + uniform.reset_mock() + sleep.reset_mock() + with mock.patch.object(client.transport, "create_channel"): + min_val = 200 + max_val = 205 + uniform.side_effect = lambda min_, max_: min_ + sleep.side_effect = [None, asyncio.CancelledError] + try: + client._manage_channel(min_val, max_val, grace_period=0) + except asyncio.CancelledError: + pass + assert uniform.call_count == 2 + uniform_args = [call[0] for call in uniform.call_args_list] + for found_min, found_max in uniform_args: + assert found_min == min_val + assert found_max == max_val + + @pytest.mark.parametrize("num_cycles", [0, 1, 10, 100]) + def test__manage_channel_refresh(self, num_cycles): + expected_refresh = 0.5 + grpc_lib = grpc.aio if CrossSync._Sync_Impl.is_async else grpc + new_channel = grpc_lib.insecure_channel("localhost:8080") + with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: + sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError] + with mock.patch.object( + CrossSync._Sync_Impl.grpc_helpers, "create_channel" + ) as create_channel: + create_channel.return_value = new_channel + client = self._make_client(project="project-id") + create_channel.reset_mock() + try: + client._manage_channel( + refresh_interval_min=expected_refresh, + refresh_interval_max=expected_refresh, + grace_period=0, + ) + except RuntimeError: + pass + assert sleep.call_count == num_cycles + 1 + assert create_channel.call_count == num_cycles + client.close() + + def test__register_instance(self): + """test instance registration""" + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = None + client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() + table_mock = mock.Mock() + self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock + ) + assert client_mock._start_background_channel_refresh.call_count == 1 + expected_key = ( + "prefix/instance-1", + table_mock.table_name, + table_mock.app_profile_id, + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + client_mock._channel_refresh_task = mock.Mock() + table_mock2 = mock.Mock() + self._get_target_class()._register_instance( + client_mock, "instance-2", table_mock2 + ) + assert client_mock._start_background_channel_refresh.call_count == 1 + assert ( + client_mock._ping_and_warm_instances.call_args[0][0][0] + == "prefix/instance-2" + ) + assert client_mock._ping_and_warm_instances.call_count == 1 + assert len(active_instances) == 2 + assert len(instance_owners) == 2 + expected_key2 = ( + "prefix/instance-2", + table_mock2.table_name, + table_mock2.app_profile_id, + ) + assert any( + [ + expected_key2 == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + assert any( + [ + expected_key2 == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + def test__register_instance_duplicate(self): + """test double instance registration. Should be no-op""" + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: f"prefix/{b}" + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = object() + mock_channels = [mock.Mock()] + client_mock.transport.channels = mock_channels + client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() + table_mock = mock.Mock() + expected_key = ( + "prefix/instance-1", + table_mock.table_name, + table_mock.app_profile_id, + ) + self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + assert client_mock._ping_and_warm_instances.call_count == 1 + self._get_target_class()._register_instance( + client_mock, "instance-1", table_mock + ) + assert len(active_instances) == 1 + assert expected_key == tuple(list(active_instances)[0]) + assert len(instance_owners) == 1 + assert expected_key == tuple(list(instance_owners)[0]) + assert client_mock._ping_and_warm_instances.call_count == 1 + + @pytest.mark.parametrize( + "insert_instances,expected_active,expected_owner_keys", + [ + ([("i", "t", None)], [("i", "t", None)], [("i", "t", None)]), + ([("i", "t", "p")], [("i", "t", "p")], [("i", "t", "p")]), + ([("1", "t", "p"), ("1", "t", "p")], [("1", "t", "p")], [("1", "t", "p")]), + ( + [("1", "t", "p"), ("2", "t", "p")], + [("1", "t", "p"), ("2", "t", "p")], + [("1", "t", "p"), ("2", "t", "p")], + ), + ], + ) + def test__register_instance_state( + self, insert_instances, expected_active, expected_owner_keys + ): + """test that active_instances and instance_owners are updated as expected""" + client_mock = mock.Mock() + client_mock._gapic_client.instance_path.side_effect = lambda a, b: b + active_instances = set() + instance_owners = {} + client_mock._active_instances = active_instances + client_mock._instance_owners = instance_owners + client_mock._channel_refresh_task = None + client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() + table_mock = mock.Mock() + for instance, table, profile in insert_instances: + table_mock.table_name = table + table_mock.app_profile_id = profile + self._get_target_class()._register_instance( + client_mock, instance, table_mock + ) + assert len(active_instances) == len(expected_active) + assert len(instance_owners) == len(expected_owner_keys) + for expected in expected_active: + assert any( + [ + expected == tuple(list(active_instances)[i]) + for i in range(len(active_instances)) + ] + ) + for expected in expected_owner_keys: + assert any( + [ + expected == tuple(list(instance_owners)[i]) + for i in range(len(instance_owners)) + ] + ) + + def test__remove_instance_registration(self): + client = self._make_client(project="project-id") + table = mock.Mock() + client._register_instance("instance-1", table) + client._register_instance("instance-2", table) + assert len(client._active_instances) == 2 + assert len(client._instance_owners.keys()) == 2 + instance_1_path = client._gapic_client.instance_path( + client.project, "instance-1" + ) + instance_1_key = (instance_1_path, table.table_name, table.app_profile_id) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance-2" + ) + instance_2_key = (instance_2_path, table.table_name, table.app_profile_id) + assert len(client._instance_owners[instance_1_key]) == 1 + assert list(client._instance_owners[instance_1_key])[0] == id(table) + assert len(client._instance_owners[instance_2_key]) == 1 + assert list(client._instance_owners[instance_2_key])[0] == id(table) + success = client._remove_instance_registration("instance-1", table) + assert success + assert len(client._active_instances) == 1 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 1 + assert client._active_instances == {instance_2_key} + success = client._remove_instance_registration("fake-key", table) + assert not success + assert len(client._active_instances) == 1 + client.close() + + def test__multiple_table_registration(self): + """registering with multiple tables with the same key should + add multiple owners to instance_owners, but only keep one copy + of shared key in active_instances""" + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + with self._make_client(project="project-id") as client: + with client.get_table("instance_1", "table_1") as table_1: + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.table_name, table_1.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + with client.get_table("instance_1", "table_1") as table_2: + assert table_2._register_instance_future is not None + table_2._register_instance_future.result() + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._active_instances) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + with client.get_table("instance_1", "table_3") as table_3: + assert table_3._register_instance_future is not None + table_3._register_instance_future.result() + instance_3_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_3_key = _WarmedInstanceKey( + instance_3_path, table_3.table_name, table_3.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 2 + assert len(client._instance_owners[instance_3_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_1_key] + assert id(table_3) in client._instance_owners[instance_3_key] + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert id(table_2) not in client._instance_owners[instance_1_key] + assert len(client._active_instances) == 0 + assert instance_1_key not in client._active_instances + assert len(client._instance_owners[instance_1_key]) == 0 + + def test__multiple_instance_registration(self): + """registering with multiple instance keys should update the key + in instance_owners and active_instances""" + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + with self._make_client(project="project-id") as client: + with client.get_table("instance_1", "table_1") as table_1: + assert table_1._register_instance_future is not None + table_1._register_instance_future.result() + with client.get_table("instance_2", "table_2") as table_2: + assert table_2._register_instance_future is not None + table_2._register_instance_future.result() + instance_1_path = client._gapic_client.instance_path( + client.project, "instance_1" + ) + instance_1_key = _WarmedInstanceKey( + instance_1_path, table_1.table_name, table_1.app_profile_id + ) + instance_2_path = client._gapic_client.instance_path( + client.project, "instance_2" + ) + instance_2_key = _WarmedInstanceKey( + instance_2_path, table_2.table_name, table_2.app_profile_id + ) + assert len(client._instance_owners[instance_1_key]) == 1 + assert len(client._instance_owners[instance_2_key]) == 1 + assert len(client._active_instances) == 2 + assert id(table_1) in client._instance_owners[instance_1_key] + assert id(table_2) in client._instance_owners[instance_2_key] + assert len(client._active_instances) == 1 + assert instance_1_key in client._active_instances + assert len(client._instance_owners[instance_2_key]) == 0 + assert len(client._instance_owners[instance_1_key]) == 1 + assert id(table_1) in client._instance_owners[instance_1_key] + assert len(client._active_instances) == 0 + assert len(client._instance_owners[instance_1_key]) == 0 + assert len(client._instance_owners[instance_2_key]) == 0 + + def test_get_table(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + client = self._make_client(project="project-id") + assert not client._active_instances + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + table = client.get_table( + expected_instance_id, expected_table_id, expected_app_profile_id + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert isinstance(table, CrossSync._Sync_Impl.TestTable._get_target_class()) + assert table.table_id == expected_table_id + assert ( + table.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert table.instance_id == expected_instance_id + assert ( + table.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.table_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + client.close() + + def test_get_table_arg_passthrough(self): + """All arguments passed in get_table should be sent to constructor""" + with self._make_client(project="project-id") as client: + with mock.patch.object( + CrossSync._Sync_Impl.TestTable._get_target_class(), "__init__" + ) as mock_constructor: + mock_constructor.return_value = None + assert not client._active_instances + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_args = (1, "test", {"test": 2}) + expected_kwargs = {"hello": "world", "test": 2} + client.get_table( + expected_instance_id, + expected_table_id, + expected_app_profile_id, + *expected_args, + **expected_kwargs, + ) + mock_constructor.assert_called_once_with( + client, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + *expected_args, + **expected_kwargs, + ) + + def test_get_table_context_manager(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_project_id = "project-id" + with mock.patch.object( + CrossSync._Sync_Impl.TestTable._get_target_class(), "close" + ) as close_mock: + with self._make_client(project=expected_project_id) as client: + with client.get_table( + expected_instance_id, expected_table_id, expected_app_profile_id + ) as table: + CrossSync._Sync_Impl.yield_to_event_loop() + assert isinstance( + table, CrossSync._Sync_Impl.TestTable._get_target_class() + ) + assert table.table_id == expected_table_id + assert ( + table.table_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert table.instance_id == expected_instance_id + assert ( + table.instance_name + == f"projects/{expected_project_id}/instances/{expected_instance_id}" + ) + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.table_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert close_mock.call_count == 1 + + def test_close(self): + client = self._make_client(project="project-id", use_emulator=False) + task = client._channel_refresh_task + assert task is not None + assert not task.done() + with mock.patch.object( + client.transport, "close", CrossSync._Sync_Impl.Mock() + ) as close_mock: + client.close() + close_mock.assert_called_once() + assert task.done() + assert client._channel_refresh_task is None + + def test_close_with_timeout(self): + expected_timeout = 19 + client = self._make_client(project="project-id", use_emulator=False) + with mock.patch.object( + CrossSync._Sync_Impl, "wait", CrossSync._Sync_Impl.Mock() + ) as wait_for_mock: + client.close(timeout=expected_timeout) + wait_for_mock.assert_called_once() + assert wait_for_mock.call_args[1]["timeout"] == expected_timeout + client.close() + + def test_context_manager(self): + from functools import partial + + close_mock = CrossSync._Sync_Impl.Mock() + true_close = None + with self._make_client(project="project-id", use_emulator=False) as client: + true_close = partial(client.close) + client.close = close_mock + assert not client._channel_refresh_task.done() + assert client.project == "project-id" + assert client._active_instances == set() + close_mock.assert_not_called() + close_mock.assert_called_once() + true_close() + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestTable") +class TestTable: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl.Table + + def test_table_ctor(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = self._make_client() + assert not client._active_instances + table = self._get_target_class()( + client, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert table.table_id == expected_table_id + assert table.instance_id == expected_instance_id + assert table.app_profile_id == expected_app_profile_id + assert table.client is client + instance_key = _WarmedInstanceKey( + table.instance_name, table.table_name, table.app_profile_id + ) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(table)} + assert table.default_operation_timeout == expected_operation_timeout + assert table.default_attempt_timeout == expected_attempt_timeout + assert ( + table.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + table.default_read_rows_attempt_timeout + == expected_read_rows_attempt_timeout + ) + assert ( + table.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + table.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + table._register_instance_future + assert table._register_instance_future.done() + assert not table._register_instance_future.cancelled() + assert table._register_instance_future.exception() is None + client.close() + + def test_table_ctor_defaults(self): + """should provide default timeout values and app_profile_id""" + expected_table_id = "table-id" + expected_instance_id = "instance-id" + client = self._make_client() + assert not client._active_instances + table = self._get_target_class()( + client, expected_instance_id, expected_table_id + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert table.table_id == expected_table_id + assert table.instance_id == expected_instance_id + assert table.app_profile_id is None + assert table.client is client + assert table.default_operation_timeout == 60 + assert table.default_read_rows_operation_timeout == 600 + assert table.default_mutate_rows_operation_timeout == 600 + assert table.default_attempt_timeout == 20 + assert table.default_read_rows_attempt_timeout == 20 + assert table.default_mutate_rows_attempt_timeout == 60 + client.close() + + def test_table_ctor_invalid_timeout_values(self): + """bad timeout values should raise ValueError""" + client = self._make_client() + timeout_pairs = [ + ("default_operation_timeout", "default_attempt_timeout"), + ( + "default_read_rows_operation_timeout", + "default_read_rows_attempt_timeout", + ), + ( + "default_mutate_rows_operation_timeout", + "default_mutate_rows_attempt_timeout", + ), + ] + for operation_timeout, attempt_timeout in timeout_pairs: + with pytest.raises(ValueError) as e: + self._get_target_class()(client, "", "", **{attempt_timeout: -1}) + assert "attempt_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._get_target_class()(client, "", "", **{operation_timeout: -1}) + assert "operation_timeout must be greater than 0" in str(e.value) + client.close() + + @pytest.mark.parametrize( + "fn_name,fn_args,is_stream,extra_retryables", + [ + ("read_rows_stream", (ReadRowsQuery(),), True, ()), + ("read_rows", (ReadRowsQuery(),), True, ()), + ("read_row", (b"row_key",), True, ()), + ("read_rows_sharded", ([ReadRowsQuery()],), True, ()), + ("row_exists", (b"row_key",), True, ()), + ("sample_row_keys", (), False, ()), + ("mutate_row", (b"row_key", [mock.Mock()]), False, ()), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + False, + (_MutateRowsIncomplete,), + ), + ], + ) + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + def test_customizable_retryable_errors( + self, + input_retryables, + expected_retryables, + fn_name, + fn_args, + is_stream, + extra_retryables, + ): + """Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer.""" + retry_fn = "retry_target" + if is_stream: + retry_fn += "_stream" + retry_fn = f"CrossSync._Sync_Impl.{retry_fn}" + with mock.patch( + f"google.cloud.bigtable.data._cross_sync.{retry_fn}" + ) as retry_fn_mock: + with self._make_client() as client: + table = client.get_table("instance-id", "table-id") + expected_predicate = expected_retryables.__contains__ + retry_fn_mock.side_effect = RuntimeError("stop early") + with mock.patch( + "google.api_core.retry.if_exception_type" + ) as predicate_builder_mock: + predicate_builder_mock.return_value = expected_predicate + with pytest.raises(Exception): + test_fn = table.__getattribute__(fn_name) + test_fn(*fn_args, retryable_errors=input_retryables) + predicate_builder_mock.assert_called_once_with( + *expected_retryables, *extra_retryables + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + assert retry_call_args[1] is expected_predicate + + @pytest.mark.parametrize( + "fn_name,fn_args,gapic_fn", + [ + ("read_rows_stream", (ReadRowsQuery(),), "read_rows"), + ("read_rows", (ReadRowsQuery(),), "read_rows"), + ("read_row", (b"row_key",), "read_rows"), + ("read_rows_sharded", ([ReadRowsQuery()],), "read_rows"), + ("row_exists", (b"row_key",), "read_rows"), + ("sample_row_keys", (), "sample_row_keys"), + ("mutate_row", (b"row_key", [mutations.DeleteAllFromRow()]), "mutate_row"), + ( + "bulk_mutate_rows", + ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + "mutate_rows", + ), + ("check_and_mutate_row", (b"row_key", None), "check_and_mutate_row"), + ( + "read_modify_write_row", + (b"row_key", IncrementRule("f", "q")), + "read_modify_write_row", + ), + ], + ) + @pytest.mark.parametrize("include_app_profile", [True, False]) + def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): + profile = "profile" if include_app_profile else None + client = self._make_client() + transport_mock = mock.MagicMock() + rpc_mock = CrossSync._Sync_Impl.Mock() + transport_mock._wrapped_methods.__getitem__.return_value = rpc_mock + gapic_client = client._gapic_client + gapic_client._transport = transport_mock + gapic_client._is_universe_domain_valid = True + table = self._get_target_class()(client, "instance-id", "table-id", profile) + try: + test_fn = table.__getattribute__(fn_name) + maybe_stream = test_fn(*fn_args) + [i for i in maybe_stream] + except Exception: + pass + assert rpc_mock.call_count == 1 + kwargs = rpc_mock.call_args_list[0][1] + metadata = kwargs["metadata"] + assert len(metadata) == 1 + assert metadata[0][0] == "x-goog-request-params" + routing_str = metadata[0][1] + assert "table_name=" + table.table_name in routing_str + if include_app_profile: + assert "app_profile_id=profile" in routing_str + else: + assert "app_profile_id=" not in routing_str + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestReadRows") +class TestReadRows: + """ + Tests for table.read_rows and related methods. + """ + + @staticmethod + def _get_operation_class(): + return CrossSync._Sync_Impl._ReadRowsOperation + + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def _make_table(self, *args, **kwargs): + client_mock = mock.Mock() + client_mock._register_instance.side_effect = ( + lambda *args, **kwargs: CrossSync._Sync_Impl.yield_to_event_loop() + ) + client_mock._remove_instance_registration.side_effect = ( + lambda *args, **kwargs: CrossSync._Sync_Impl.yield_to_event_loop() + ) + kwargs["instance_id"] = kwargs.get( + "instance_id", args[0] if args else "instance" + ) + kwargs["table_id"] = kwargs.get( + "table_id", args[1] if len(args) > 1 else "table" + ) + client_mock._gapic_client.table_path.return_value = kwargs["table_id"] + client_mock._gapic_client.instance_path.return_value = kwargs["instance_id"] + return CrossSync._Sync_Impl.TestTable._get_target_class()( + client_mock, *args, **kwargs + ) + + def _make_stats(self): + from google.cloud.bigtable_v2.types import RequestStats + from google.cloud.bigtable_v2.types import FullReadStatsView + from google.cloud.bigtable_v2.types import ReadIterationStats + + return RequestStats( + full_read_stats_view=FullReadStatsView( + read_iteration_stats=ReadIterationStats( + rows_seen_count=1, + rows_returned_count=2, + cells_seen_count=3, + cells_returned_count=4, + ) + ) + ) + + @staticmethod + def _make_chunk(*args, **kwargs): + from google.cloud.bigtable_v2 import ReadRowsResponse + + kwargs["row_key"] = kwargs.get("row_key", b"row_key") + kwargs["family_name"] = kwargs.get("family_name", "family_name") + kwargs["qualifier"] = kwargs.get("qualifier", b"qualifier") + kwargs["value"] = kwargs.get("value", b"value") + kwargs["commit_row"] = kwargs.get("commit_row", True) + return ReadRowsResponse.CellChunk(*args, **kwargs) + + @staticmethod + def _make_gapic_stream( + chunk_list: list[ReadRowsResponse.CellChunk | Exception], sleep_time=0 + ): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list, sleep_time): + self.chunk_list = chunk_list + self.idx = -1 + self.sleep_time = sleep_time + + def __iter__(self): + return self + + def __next__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + if sleep_time: + CrossSync._Sync_Impl.sleep(self.sleep_time) + chunk = self.chunk_list[self.idx] + if isinstance(chunk, Exception): + raise chunk + else: + return ReadRowsResponse(chunks=[chunk]) + raise CrossSync._Sync_Impl.StopIteration + + def cancel(self): + pass + + return mock_stream(chunk_list, sleep_time) + + def execute_fn(self, table, *args, **kwargs): + return table.read_rows(*args, **kwargs) + + def test_read_rows(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + results = self.execute_fn(table, query, operation_timeout=3) + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + def test_read_rows_stream(self): + query = ReadRowsQuery() + chunks = [ + self._make_chunk(row_key=b"test_1"), + self._make_chunk(row_key=b"test_2"), + ] + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + gen = table.read_rows_stream(query, operation_timeout=3) + results = [row for row in gen] + assert len(results) == 2 + assert results[0].row_key == b"test_1" + assert results[1].row_key == b"test_2" + + @pytest.mark.parametrize("include_app_profile", [True, False]) + def test_read_rows_query_matches_request(self, include_app_profile): + from google.cloud.bigtable.data import RowRange + from google.cloud.bigtable.data.row_filters import PassAllFilter + + app_profile_id = "app_profile_id" if include_app_profile else None + with self._make_table(app_profile_id=app_profile_id) as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream([]) + row_keys = [b"test_1", "test_2"] + row_ranges = RowRange("1start", "2end") + filter_ = PassAllFilter(True) + limit = 99 + query = ReadRowsQuery( + row_keys=row_keys, + row_ranges=row_ranges, + row_filter=filter_, + limit=limit, + ) + results = table.read_rows(query, operation_timeout=3) + assert len(results) == 0 + call_request = read_rows.call_args_list[0][0][0] + query_pb = query._to_pb(table) + assert call_request == query_pb + + @pytest.mark.parametrize("operation_timeout", [0.001, 0.023, 0.1]) + def test_read_rows_timeout(self, operation_timeout): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + query = ReadRowsQuery() + chunks = [self._make_chunk(row_key=b"test_1")] + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=0.15 + ) + try: + table.read_rows(query, operation_timeout=operation_timeout) + except core_exceptions.DeadlineExceeded as e: + assert ( + e.message + == f"operation_timeout of {operation_timeout:0.1f}s exceeded" + ) + + @pytest.mark.parametrize( + "per_request_t, operation_t, expected_num", + [(0.05, 0.08, 2), (0.05, 0.14, 3), (0.05, 0.24, 5)], + ) + def test_read_rows_attempt_timeout(self, per_request_t, operation_t, expected_num): + """Ensures that the attempt_timeout is respected and that the number of + requests is as expected. + + operation_timeout does not cancel the request, so we expect the number of + requests to be the ceiling of operation_timeout / attempt_timeout.""" + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + expected_last_timeout = operation_t - (expected_num - 1) * per_request_t + with mock.patch("random.uniform", side_effect=lambda a, b: 0): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks, sleep_time=per_request_t + ) + query = ReadRowsQuery() + chunks = [core_exceptions.DeadlineExceeded("mock deadline")] + try: + table.read_rows( + query, + operation_timeout=operation_t, + attempt_timeout=per_request_t, + ) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + if expected_num == 0: + assert retry_exc is None + else: + assert type(retry_exc) is RetryExceptionGroup + assert f"{expected_num} failed attempts" in str(retry_exc) + assert len(retry_exc.exceptions) == expected_num + for sub_exc in retry_exc.exceptions: + assert sub_exc.message == "mock deadline" + assert read_rows.call_count == expected_num + for _, call_kwargs in read_rows.call_args_list[:-1]: + assert call_kwargs["timeout"] == per_request_t + assert call_kwargs["retry"] is None + assert ( + abs( + read_rows.call_args_list[-1][1]["timeout"] + - expected_last_timeout + ) + < 0.05 + ) + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.Aborted, + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + def test_read_rows_retryable_error(self, exc_type): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + table.read_rows(query, operation_timeout=0.1) + except core_exceptions.DeadlineExceeded as e: + retry_exc = e.__cause__ + root_cause = retry_exc.exceptions[0] + assert type(root_cause) is exc_type + assert root_cause == expected_error + + @pytest.mark.parametrize( + "exc_type", + [ + core_exceptions.Cancelled, + core_exceptions.PreconditionFailed, + core_exceptions.NotFound, + core_exceptions.PermissionDenied, + core_exceptions.Conflict, + core_exceptions.InternalServerError, + core_exceptions.TooManyRequests, + core_exceptions.ResourceExhausted, + InvalidChunk, + ], + ) + def test_read_rows_non_retryable_error(self, exc_type): + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + [expected_error] + ) + query = ReadRowsQuery() + expected_error = exc_type("mock error") + try: + table.read_rows(query, operation_timeout=0.1) + except exc_type as e: + assert e == expected_error + + def test_read_rows_revise_request(self): + """Ensure that _revise_request is called between retries""" + from google.cloud.bigtable.data.exceptions import InvalidChunk + from google.cloud.bigtable_v2.types import RowSet + + return_val = RowSet() + with mock.patch.object( + self._get_operation_class(), "_revise_request_rowset" + ) as revise_rowset: + revise_rowset.return_value = return_val + with self._make_table() as table: + read_rows = table.client._gapic_client.read_rows + read_rows.side_effect = lambda *args, **kwargs: self._make_gapic_stream( + chunks + ) + row_keys = [b"test_1", b"test_2", b"test_3"] + query = ReadRowsQuery(row_keys=row_keys) + chunks = [ + self._make_chunk(row_key=b"test_1"), + core_exceptions.Aborted("mock retryable error"), + ] + try: + table.read_rows(query) + except InvalidChunk: + revise_rowset.assert_called() + first_call_kwargs = revise_rowset.call_args_list[0].kwargs + assert first_call_kwargs["row_set"] == query._to_pb(table).rows + assert first_call_kwargs["last_seen_row_key"] == b"test_1" + revised_call = read_rows.call_args_list[1].args[0] + assert revised_call.rows == return_val + + def test_read_rows_default_timeouts(self): + """Ensure that the default timeouts are set on the read rows operation when not overridden""" + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + with self._make_table( + default_read_rows_operation_timeout=operation_timeout, + default_read_rows_attempt_timeout=attempt_timeout, + ) as table: + try: + table.read_rows(ReadRowsQuery()) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + def test_read_rows_default_timeout_override(self): + """When timeouts are passed, they overwrite default values""" + operation_timeout = 8 + attempt_timeout = 4 + with mock.patch.object(self._get_operation_class(), "__init__") as mock_op: + mock_op.side_effect = RuntimeError("mock error") + with self._make_table( + default_operation_timeout=99, default_attempt_timeout=97 + ) as table: + try: + table.read_rows( + ReadRowsQuery(), + operation_timeout=operation_timeout, + attempt_timeout=attempt_timeout, + ) + except RuntimeError: + pass + kwargs = mock_op.call_args_list[0].kwargs + assert kwargs["operation_timeout"] == operation_timeout + assert kwargs["attempt_timeout"] == attempt_timeout + + def test_read_row(self): + """Test reading a single row""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + row = table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert row == expected_result + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + def test_read_row_w_filter(self): + """Test reading a single row with an added filter""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + expected_result = object() + read_rows.side_effect = lambda *args, **kwargs: [expected_result] + expected_op_timeout = 8 + expected_req_timeout = 4 + mock_filter = mock.Mock() + expected_filter = {"filter": "mock filter"} + mock_filter._to_dict.return_value = expected_filter + row = table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + row_filter=expected_filter, + ) + assert row == expected_result + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert len(args) == 1 + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter == expected_filter + + def test_read_row_no_response(self): + """should return None if row does not exist""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = lambda *args, **kwargs: [] + expected_op_timeout = 8 + expected_req_timeout = 4 + result = table.read_row( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert result is None + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + + @pytest.mark.parametrize( + "return_value,expected_result", + [([], False), ([object()], True), ([object(), object()], True)], + ) + def test_row_exists(self, return_value, expected_result): + """Test checking for row existence""" + with self._make_client() as client: + table = client.get_table("instance", "table") + row_key = b"test_1" + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = lambda *args, **kwargs: return_value + expected_op_timeout = 1 + expected_req_timeout = 2 + result = table.row_exists( + row_key, + operation_timeout=expected_op_timeout, + attempt_timeout=expected_req_timeout, + ) + assert expected_result == result + assert read_rows.call_count == 1 + (args, kwargs) = read_rows.call_args_list[0] + assert kwargs["operation_timeout"] == expected_op_timeout + assert kwargs["attempt_timeout"] == expected_req_timeout + assert isinstance(args[0], ReadRowsQuery) + expected_filter = { + "chain": { + "filters": [ + {"cells_per_row_limit_filter": 1}, + {"strip_value_transformer": True}, + ] + } + } + query = args[0] + assert query.row_keys == [row_key] + assert query.row_ranges == [] + assert query.limit == 1 + assert query.filter._to_dict() == expected_filter + + +class TestReadRowsSharded: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def test_read_rows_sharded_empty_query(self): + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as exc: + table.read_rows_sharded([]) + assert "empty sharded_query" in str(exc.value) + + def test_read_rows_sharded_multiple_queries(self): + """Test with multiple queries. Should return results from both""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + read_rows.side_effect = lambda *args, **kwargs: CrossSync._Sync_Impl.TestReadRows._make_gapic_stream( + [ + CrossSync._Sync_Impl.TestReadRows._make_chunk(row_key=k) + for k in args[0].rows.row_keys + ] + ) + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + result = table.read_rows_sharded([query_1, query_2]) + assert len(result) == 2 + assert result[0].row_key == b"test_1" + assert result[1].row_key == b"test_2" + + @pytest.mark.parametrize("n_queries", [1, 2, 5, 11, 24]) + def test_read_rows_sharded_multiple_queries_calls(self, n_queries): + """Each query should trigger a separate read_rows call""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + query_list = [ReadRowsQuery() for _ in range(n_queries)] + table.read_rows_sharded(query_list) + assert read_rows.call_count == n_queries + + def test_read_rows_sharded_errors(self): + """Errors should be exposed as ShardedReadRowsExceptionGroups""" + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data.exceptions import FailedQueryShardError + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = RuntimeError("mock error") + query_1 = ReadRowsQuery(b"test_1") + query_2 = ReadRowsQuery(b"test_2") + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + table.read_rows_sharded([query_1, query_2]) + exc_group = exc.value + assert isinstance(exc_group, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == 2 + assert isinstance(exc.value.exceptions[0], FailedQueryShardError) + assert isinstance(exc.value.exceptions[0].__cause__, RuntimeError) + assert exc.value.exceptions[0].index == 0 + assert exc.value.exceptions[0].query == query_1 + assert isinstance(exc.value.exceptions[1], FailedQueryShardError) + assert isinstance(exc.value.exceptions[1].__cause__, RuntimeError) + assert exc.value.exceptions[1].index == 1 + assert exc.value.exceptions[1].query == query_2 + + def test_read_rows_sharded_concurrent(self): + """Ensure sharded requests are concurrent""" + import time + + def mock_call(*args, **kwargs): + CrossSync._Sync_Impl.sleep(0.1) + return [mock.Mock()] + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(10)] + start_time = time.monotonic() + result = table.read_rows_sharded(queries) + call_time = time.monotonic() - start_time + assert read_rows.call_count == 10 + assert len(result) == 10 + assert call_time < 0.5 + + def test_read_rows_sharded_concurrency_limit(self): + """Only 10 queries should be processed concurrently. Others should be queued + + Should start a new query as soon as previous finishes""" + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + + assert _CONCURRENCY_LIMIT == 10 + num_queries = 15 + increment_time = 0.05 + max_time = increment_time * (_CONCURRENCY_LIMIT - 1) + rpc_times = [min(i * increment_time, max_time) for i in range(num_queries)] + + def mock_call(*args, **kwargs): + next_sleep = rpc_times.pop(0) + asyncio.sleep(next_sleep) + return [mock.Mock()] + + starting_timeout = 10 + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + table.read_rows_sharded(queries, operation_timeout=starting_timeout) + assert read_rows.call_count == num_queries + rpc_start_list = [ + starting_timeout - kwargs["operation_timeout"] + for (_, kwargs) in read_rows.call_args_list + ] + eps = 0.01 + assert all( + (rpc_start_list[i] < eps for i in range(_CONCURRENCY_LIMIT)) + ) + for i in range(num_queries - _CONCURRENCY_LIMIT): + idx = i + _CONCURRENCY_LIMIT + assert rpc_start_list[idx] - i * increment_time < eps + + def test_read_rows_sharded_expirary(self): + """If the operation times out before all shards complete, should raise + a ShardedReadRowsExceptionGroup""" + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.api_core.exceptions import DeadlineExceeded + + operation_timeout = 0.1 + num_queries = 15 + sleeps = [0] * _CONCURRENCY_LIMIT + [DeadlineExceeded("times up")] * ( + num_queries - _CONCURRENCY_LIMIT + ) + + def mock_call(*args, **kwargs): + next_item = sleeps.pop(0) + if isinstance(next_item, Exception): + raise next_item + else: + asyncio.sleep(next_item) + return [mock.Mock()] + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + queries = [ReadRowsQuery() for _ in range(num_queries)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + table.read_rows_sharded( + queries, operation_timeout=operation_timeout + ) + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) == num_queries - _CONCURRENCY_LIMIT + assert len(exc.value.successful_rows) == _CONCURRENCY_LIMIT + + def test_read_rows_sharded_negative_batch_timeout(self): + """try to run with batch that starts after operation timeout + + They should raise DeadlineExceeded errors""" + from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup + from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT + from google.api_core.exceptions import DeadlineExceeded + + def mock_call(*args, **kwargs): + CrossSync._Sync_Impl.sleep(0.06) + return [mock.Mock()] + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object(table, "read_rows") as read_rows: + read_rows.side_effect = mock_call + num_calls = 15 + queries = [ReadRowsQuery() for _ in range(num_calls)] + with pytest.raises(ShardedReadRowsExceptionGroup) as exc: + table.read_rows_sharded(queries, operation_timeout=0.05) + assert isinstance(exc.value, ShardedReadRowsExceptionGroup) + assert len(exc.value.exceptions) >= num_calls - _CONCURRENCY_LIMIT + assert all( + ( + isinstance(e.__cause__, DeadlineExceeded) + for e in exc.value.exceptions + ) + ) + + +class TestSampleRowKeys: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def _make_gapic_stream(self, sample_list: list[tuple[bytes, int]]): + from google.cloud.bigtable_v2.types import SampleRowKeysResponse + + for value in sample_list: + yield SampleRowKeysResponse(row_key=value[0], offset_bytes=value[1]) + + def test_sample_row_keys(self): + """Test that method returns the expected key samples""" + samples = [(b"test_1", 0), (b"test_2", 100), (b"test_3", 200)] + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream(samples) + result = table.sample_row_keys() + assert len(result) == 3 + assert all((isinstance(r, tuple) for r in result)) + assert all((isinstance(r[0], bytes) for r in result)) + assert all((isinstance(r[1], int) for r in result)) + assert result[0] == samples[0] + assert result[1] == samples[1] + assert result[2] == samples[2] + + def test_sample_row_keys_bad_timeout(self): + """should raise error if timeout is negative""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.sample_row_keys(operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + table.sample_row_keys(attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + def test_sample_row_keys_default_timeout(self): + """Should fallback to using table default operation_timeout""" + expected_timeout = 99 + with self._make_client() as client: + with client.get_table( + "i", + "t", + default_operation_timeout=expected_timeout, + default_attempt_timeout=expected_timeout, + ) as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + result = table.sample_row_keys() + (_, kwargs) = sample_row_keys.call_args + assert abs(kwargs["timeout"] - expected_timeout) < 0.1 + assert result == [] + assert kwargs["retry"] is None + + def test_sample_row_keys_gapic_params(self): + """make sure arguments are propagated to gapic call as expected""" + expected_timeout = 10 + expected_profile = "test1" + instance = "instance_name" + table_id = "my_table" + with self._make_client() as client: + with client.get_table( + instance, table_id, app_profile_id=expected_profile + ) as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.return_value = self._make_gapic_stream([]) + table.sample_row_keys(attempt_timeout=expected_timeout) + (args, kwargs) = sample_row_keys.call_args + assert len(args) == 0 + assert len(kwargs) == 4 + assert kwargs["timeout"] == expected_timeout + assert kwargs["app_profile_id"] == expected_profile + assert kwargs["table_name"] == table.table_name + assert kwargs["retry"] is None + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_sample_row_keys_retryable_errors(self, retryable_exception): + """retryable errors should be retried until timeout""" + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + table.sample_row_keys(operation_timeout=0.05) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) > 0 + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + def test_sample_row_keys_non_retryable_errors(self, non_retryable_exception): + """non-retryable errors should cause a raise""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + table.client._gapic_client, + "sample_row_keys", + CrossSync._Sync_Impl.Mock(), + ) as sample_row_keys: + sample_row_keys.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + table.sample_row_keys() + + +class TestMutateRow: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize( + "mutation_arg", + [ + mutations.SetCell("family", b"qualifier", b"value"), + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ), + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromFamily("family"), + mutations.DeleteAllFromRow(), + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + def test_mutate_row(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.return_value = None + table.mutate_row( + "row_key", + mutation_arg, + attempt_timeout=expected_attempt_timeout, + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0].kwargs + assert ( + kwargs["table_name"] + == "projects/project/instances/instance/tables/table" + ) + assert kwargs["row_key"] == b"row_key" + formatted_mutations = ( + [mutation._to_pb() for mutation in mutation_arg] + if isinstance(mutation_arg, list) + else [mutation_arg._to_pb()] + ) + assert kwargs["mutations"] == formatted_mutations + assert kwargs["timeout"] == expected_attempt_timeout + assert kwargs["retry"] is None + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_mutate_row_retryable_errors(self, retryable_exception): + from google.api_core.exceptions import DeadlineExceeded + from google.cloud.bigtable.data.exceptions import RetryExceptionGroup + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(DeadlineExceeded) as e: + mutation = mutations.DeleteAllFromRow() + assert mutation.is_idempotent() is True + table.mutate_row("row_key", mutation, operation_timeout=0.01) + cause = e.value.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_mutate_row_non_idempotent_retryable_errors(self, retryable_exception): + """Non-idempotent mutations should not be retried""" + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(retryable_exception): + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + assert mutation.is_idempotent() is False + table.mutate_row("row_key", mutation, operation_timeout=0.2) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + core_exceptions.Aborted, + ], + ) + def test_mutate_row_non_retryable_errors(self, non_retryable_exception): + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_row" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(non_retryable_exception): + mutation = mutations.SetCell( + "family", + b"qualifier", + b"value", + timestamp_micros=1234567890, + ) + assert mutation.is_idempotent() is True + table.mutate_row("row_key", mutation, operation_timeout=0.2) + + @pytest.mark.parametrize("mutations", [[], None]) + def test_mutate_row_no_mutations(self, mutations): + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.mutate_row("key", mutations=mutations) + assert e.value.args[0] == "No mutations provided" + + +class TestBulkMutateRows: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def _mock_response(self, response_list): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + statuses = [] + for response in response_list: + if isinstance(response, core_exceptions.GoogleAPICallError): + statuses.append( + status_pb2.Status( + message=str(response), code=response.grpc_status_code.value[0] + ) + ) + else: + statuses.append(status_pb2.Status(code=0)) + entries = [ + MutateRowsResponse.Entry(index=i, status=statuses[i]) + for i in range(len(response_list)) + ] + + def generator(): + yield MutateRowsResponse(entries=entries) + + return generator() + + @pytest.mark.parametrize( + "mutation_arg", + [ + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=1234567890 + ) + ], + [mutations.DeleteRangeFromColumn("family", b"qualifier")], + [mutations.DeleteAllFromFamily("family")], + [mutations.DeleteAllFromRow()], + [mutations.SetCell("family", b"qualifier", b"value")], + [ + mutations.DeleteRangeFromColumn("family", b"qualifier"), + mutations.DeleteAllFromRow(), + ], + ], + ) + def test_bulk_mutate_rows(self, mutation_arg): + """Test mutations with no errors""" + expected_attempt_timeout = 19 + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None]) + bulk_mutation = mutations.RowMutationEntry(b"row_key", mutation_arg) + table.bulk_mutate_rows( + [bulk_mutation], attempt_timeout=expected_attempt_timeout + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + assert ( + kwargs["table_name"] + == "projects/project/instances/instance/tables/table" + ) + assert kwargs["entries"] == [bulk_mutation._to_pb()] + assert kwargs["timeout"] == expected_attempt_timeout + assert kwargs["retry"] is None + + def test_bulk_mutate_rows_multiple_entries(self): + """Test mutations with no errors""" + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.return_value = self._mock_response([None, None]) + mutation_list = [mutations.DeleteAllFromRow()] + entry_1 = mutations.RowMutationEntry(b"row_key_1", mutation_list) + entry_2 = mutations.RowMutationEntry(b"row_key_2", mutation_list) + table.bulk_mutate_rows([entry_1, entry_2]) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args[1] + assert ( + kwargs["table_name"] + == "projects/project/instances/instance/tables/table" + ) + assert kwargs["entries"][0] == entry_1._to_pb() + assert kwargs["entries"][1] == entry_2._to_pb() + + @pytest.mark.parametrize( + "exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_bulk_mutate_rows_idempotent_mutation_error_retryable(self, exception): + """Individual idempotent mutations should be retried if they fail with a retryable error""" + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], exception) + assert isinstance( + cause.exceptions[-1], core_exceptions.DeadlineExceeded + ) + + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + core_exceptions.Aborted, + ], + ) + def test_bulk_mutate_rows_idempotent_mutation_error_non_retryable(self, exception): + """Individual idempotent mutations should not be retried if they fail with a non-retryable error""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.DeleteAllFromRow() + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert "non-idempotent" not in str(failed_exception) + assert isinstance(failed_exception, FailedMutationEntryError) + cause = failed_exception.__cause__ + assert isinstance(cause, exception) + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_bulk_mutate_idempotent_retryable_request_errors(self, retryable_exception): + """Individual idempotent mutations should be retried if the request fails with a retryable error""" + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.05) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert isinstance(cause.exceptions[0], retryable_exception) + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_bulk_mutate_rows_non_idempotent_retryable_errors( + self, retryable_exception + ): + """Non-Idempotent mutations should never be retried""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = lambda *a, **k: self._mock_response( + [retryable_exception("mock")] + ) + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", -1 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is False + table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, retryable_exception) + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.OutOfRange, + core_exceptions.NotFound, + core_exceptions.FailedPrecondition, + RuntimeError, + ValueError, + ], + ) + def test_bulk_mutate_rows_non_retryable_errors(self, non_retryable_exception): + """If the request fails with a non-retryable error, mutations should not be retried""" + from google.cloud.bigtable.data.exceptions import ( + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = non_retryable_exception("mock") + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entry = mutations.RowMutationEntry(b"row_key", [mutation]) + assert mutation.is_idempotent() is True + table.bulk_mutate_rows([entry], operation_timeout=0.2) + assert len(e.value.exceptions) == 1 + failed_exception = e.value.exceptions[0] + assert isinstance(failed_exception, FailedMutationEntryError) + assert "non-idempotent" not in str(failed_exception) + cause = failed_exception.__cause__ + assert isinstance(cause, non_retryable_exception) + + def test_bulk_mutate_error_index(self): + """Test partial failure, partial success. Errors should be associated with the correct index""" + from google.api_core.exceptions import ( + DeadlineExceeded, + ServiceUnavailable, + FailedPrecondition, + ) + from google.cloud.bigtable.data.exceptions import ( + RetryExceptionGroup, + FailedMutationEntryError, + MutationsExceptionGroup, + ) + + with self._make_client(project="project") as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "mutate_rows" + ) as mock_gapic: + mock_gapic.side_effect = [ + self._mock_response([None, ServiceUnavailable("mock"), None]), + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([FailedPrecondition("final")]), + ] + with pytest.raises(MutationsExceptionGroup) as e: + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry( + f"row_key_{i}".encode(), [mutation] + ) + for i in range(3) + ] + assert mutation.is_idempotent() is True + table.bulk_mutate_rows(entries, operation_timeout=1000) + assert len(e.value.exceptions) == 1 + failed = e.value.exceptions[0] + assert isinstance(failed, FailedMutationEntryError) + assert failed.index == 1 + assert failed.entry == entries[1] + cause = failed.__cause__ + assert isinstance(cause, RetryExceptionGroup) + assert len(cause.exceptions) == 3 + assert isinstance(cause.exceptions[0], ServiceUnavailable) + assert isinstance(cause.exceptions[1], DeadlineExceeded) + assert isinstance(cause.exceptions[2], FailedPrecondition) + + def test_bulk_mutate_error_recovery(self): + """If an error occurs, then resolves, no exception should be raised""" + from google.api_core.exceptions import DeadlineExceeded + + with self._make_client(project="project") as client: + table = client.get_table("instance", "table") + with mock.patch.object(client._gapic_client, "mutate_rows") as mock_gapic: + mock_gapic.side_effect = [ + self._mock_response([DeadlineExceeded("mock")]), + self._mock_response([None]), + ] + mutation = mutations.SetCell( + "family", b"qualifier", b"value", timestamp_micros=123 + ) + entries = [ + mutations.RowMutationEntry(f"row_key_{i}".encode(), [mutation]) + for i in range(3) + ] + table.bulk_mutate_rows(entries, operation_timeout=1000) + + +class TestCheckAndMutateRow: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize("gapic_result", [True, False]) + def test_check_and_mutate(self, gapic_result): + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + app_profile = "app_profile_id" + with self._make_client() as client: + with client.get_table( + "instance", "table", app_profile_id=app_profile + ) as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=gapic_result + ) + row_key = b"row_key" + predicate = None + true_mutations = [mock.Mock()] + false_mutations = [mock.Mock(), mock.Mock()] + operation_timeout = 0.2 + found = table.check_and_mutate_row( + row_key, + predicate, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + operation_timeout=operation_timeout, + ) + assert found == gapic_result + kwargs = mock_gapic.call_args[1] + assert kwargs["table_name"] == table.table_name + assert kwargs["row_key"] == row_key + assert kwargs["predicate_filter"] == predicate + assert kwargs["true_mutations"] == [ + m._to_pb() for m in true_mutations + ] + assert kwargs["false_mutations"] == [ + m._to_pb() for m in false_mutations + ] + assert kwargs["app_profile_id"] == app_profile + assert kwargs["timeout"] == operation_timeout + assert kwargs["retry"] is None + + def test_check_and_mutate_bad_timeout(self): + """Should raise error if operation_timeout < 0""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=[mock.Mock()], + false_case_mutations=[], + operation_timeout=-1, + ) + assert str(e.value) == "operation_timeout must be greater than 0" + + def test_check_and_mutate_single_mutations(self): + """if single mutations are passed, they should be internally wrapped in a list""" + from google.cloud.bigtable.data.mutations import SetCell + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + true_mutation = SetCell("family", b"qualifier", b"value") + false_mutation = SetCell("family", b"qualifier", b"value") + table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=true_mutation, + false_case_mutations=false_mutation, + ) + kwargs = mock_gapic.call_args[1] + assert kwargs["true_mutations"] == [true_mutation._to_pb()] + assert kwargs["false_mutations"] == [false_mutation._to_pb()] + + def test_check_and_mutate_predicate_object(self): + """predicate filter should be passed to gapic request""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + + mock_predicate = mock.Mock() + predicate_pb = {"predicate": "dict"} + mock_predicate._to_pb.return_value = predicate_pb + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + table.check_and_mutate_row( + b"row_key", mock_predicate, false_case_mutations=[mock.Mock()] + ) + kwargs = mock_gapic.call_args[1] + assert kwargs["predicate_filter"] == predicate_pb + assert mock_predicate._to_pb.call_count == 1 + assert kwargs["retry"] is None + + def test_check_and_mutate_mutations_parsing(self): + """mutations objects should be converted to protos""" + from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + mutations = [mock.Mock() for _ in range(5)] + for idx, mutation in enumerate(mutations): + mutation._to_pb.return_value = f"fake {idx}" + mutations.append(DeleteAllFromRow()) + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "check_and_mutate_row" + ) as mock_gapic: + mock_gapic.return_value = CheckAndMutateRowResponse( + predicate_matched=True + ) + table.check_and_mutate_row( + b"row_key", + None, + true_case_mutations=mutations[0:2], + false_case_mutations=mutations[2:], + ) + kwargs = mock_gapic.call_args[1] + assert kwargs["true_mutations"] == ["fake 0", "fake 1"] + assert kwargs["false_mutations"] == [ + "fake 2", + "fake 3", + "fake 4", + DeleteAllFromRow()._to_pb(), + ] + assert all( + (mutation._to_pb.call_count == 1 for mutation in mutations[:5]) + ) + + +class TestReadModifyWriteRow: + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + @pytest.mark.parametrize( + "call_rules,expected_rules", + [ + ( + AppendValueRule("f", "c", b"1"), + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + ( + [AppendValueRule("f", "c", b"1")], + [AppendValueRule("f", "c", b"1")._to_pb()], + ), + (IncrementRule("f", "c", 1), [IncrementRule("f", "c", 1)._to_pb()]), + ( + [AppendValueRule("f", "c", b"1"), IncrementRule("f", "c", 1)], + [ + AppendValueRule("f", "c", b"1")._to_pb(), + IncrementRule("f", "c", 1)._to_pb(), + ], + ), + ], + ) + def test_read_modify_write_call_rule_args(self, call_rules, expected_rules): + """Test that the gapic call is called with given rules""" + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row("key", call_rules) + assert mock_gapic.call_count == 1 + found_kwargs = mock_gapic.call_args_list[0][1] + assert found_kwargs["rules"] == expected_rules + assert found_kwargs["retry"] is None + + @pytest.mark.parametrize("rules", [[], None]) + def test_read_modify_write_no_rules(self, rules): + with self._make_client() as client: + with client.get_table("instance", "table") as table: + with pytest.raises(ValueError) as e: + table.read_modify_write_row("key", rules=rules) + assert e.value.args[0] == "rules must contain at least one item" + + def test_read_modify_write_call_defaults(self): + instance = "instance1" + table_id = "table1" + project = "project1" + row_key = "row_key1" + with self._make_client(project=project) as client: + with client.get_table(instance, table_id) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row(row_key, mock.Mock()) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + assert ( + kwargs["table_name"] + == f"projects/{project}/instances/{instance}/tables/{table_id}" + ) + assert kwargs["app_profile_id"] is None + assert kwargs["row_key"] == row_key.encode() + assert kwargs["timeout"] > 1 + + def test_read_modify_write_call_overrides(self): + row_key = b"row_key1" + expected_timeout = 12345 + profile_id = "profile1" + with self._make_client() as client: + with client.get_table( + "instance", "table_id", app_profile_id=profile_id + ) as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row( + row_key, mock.Mock(), operation_timeout=expected_timeout + ) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + assert kwargs["app_profile_id"] is profile_id + assert kwargs["row_key"] == row_key + assert kwargs["timeout"] == expected_timeout + + def test_read_modify_write_string_key(self): + row_key = "string_row_key1" + with self._make_client() as client: + with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + table.read_modify_write_row(row_key, mock.Mock()) + assert mock_gapic.call_count == 1 + kwargs = mock_gapic.call_args_list[0][1] + assert kwargs["row_key"] == row_key.encode() + + def test_read_modify_write_row_building(self): + """results from gapic call should be used to construct row""" + from google.cloud.bigtable.data.row import Row + from google.cloud.bigtable_v2.types import ReadModifyWriteRowResponse + from google.cloud.bigtable_v2.types import Row as RowPB + + mock_response = ReadModifyWriteRowResponse(row=RowPB()) + with self._make_client() as client: + with client.get_table("instance", "table_id") as table: + with mock.patch.object( + client._gapic_client, "read_modify_write_row" + ) as mock_gapic: + with mock.patch.object(Row, "_from_pb") as constructor_mock: + mock_gapic.return_value = mock_response + table.read_modify_write_row("key", mock.Mock()) + assert constructor_mock.call_count == 1 + constructor_mock.assert_called_once_with(mock_response.row) + + +class TestExecuteQuery: + TABLE_NAME = "TABLE_NAME" + INSTANCE_NAME = "INSTANCE_NAME" + + def _make_client(self, *args, **kwargs): + return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + + def _make_gapic_stream(self, sample_list: list["ExecuteQueryResponse" | Exception]): + class MockStream: + def __init__(self, sample_list): + self.sample_list = sample_list + + def __aiter__(self): + return self + + def __iter__(self): + return self + + def __next__(self): + if not self.sample_list: + raise CrossSync._Sync_Impl.StopIteration + value = self.sample_list.pop(0) + if isinstance(value, Exception): + raise value + return value + + def __anext__(self): + return self.__next__() + + return MockStream(sample_list) + + def resonse_with_metadata(self): + from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse + + schema = {"a": "string_type", "b": "int64_type"} + return ExecuteQueryResponse( + { + "metadata": { + "proto_schema": { + "columns": [ + {"name": name, "type_": {_type: {}}} + for (name, _type) in schema.items() + ] + } + } + } + ) + + def resonse_with_result(self, *args, resume_token=None): + from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue + from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse + + if resume_token is None: + resume_token_dict = {} + else: + resume_token_dict = {"resume_token": resume_token} + values = [] + for column_value in args: + if column_value is None: + pb_value = PBValue({}) + else: + pb_value = PBValue( + { + "int_value" + if isinstance(column_value, int) + else "string_value": column_value + } + ) + values.append(pb_value) + rows = ProtoRows(values=values) + return ExecuteQueryResponse( + { + "results": { + "proto_rows_batch": {"batch_data": ProtoRows.serialize(rows)}, + **resume_token_dict, + } + } + ) + + def test_execute_query(self): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert execute_query_mock.call_count == 1 + + def test_execute_query_with_params(self): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", + self.INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert execute_query_mock.call_count == 1 + + def test_execute_query_error_before_metadata(self): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + DeadlineExceeded(""), + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + + def test_execute_query_error_after_metadata(self): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + self.resonse_with_metadata(), + DeadlineExceeded(""), + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + def test_execute_query_with_retries(self): + from google.api_core.exceptions import DeadlineExceeded + + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + DeadlineExceeded(""), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + DeadlineExceeded(""), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(results) == 3 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"r1", b"r2"] + + @pytest.mark.parametrize( + "exception", + [ + core_exceptions.DeadlineExceeded(""), + core_exceptions.Aborted(""), + core_exceptions.ServiceUnavailable(""), + ], + ) + def test_execute_query_retryable_error(self, exception): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test", resume_token=b"t1"), + exception, + self.resonse_with_result(8, resume_token=b"t2"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 1 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] + + def test_execute_query_retry_partial_row(self): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test", resume_token=b"t1"), + core_exceptions.DeadlineExceeded(""), + self.resonse_with_result(8, resume_token=b"t2"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] + + @pytest.mark.parametrize( + "ExceptionType", + [ + core_exceptions.InvalidArgument, + core_exceptions.FailedPrecondition, + core_exceptions.PermissionDenied, + core_exceptions.MethodNotImplemented, + core_exceptions.Cancelled, + core_exceptions.AlreadyExists, + core_exceptions.OutOfRange, + core_exceptions.DataLoss, + core_exceptions.Unauthenticated, + core_exceptions.NotFound, + core_exceptions.ResourceExhausted, + core_exceptions.Unknown, + core_exceptions.InternalServerError, + ], + ) + def test_execute_query_non_retryable(self, ExceptionType): + values = [ + self.resonse_with_metadata(), + self.resonse_with_result("test"), + self.resonse_with_result(8, resume_token=b"r1"), + ExceptionType(""), + self.resonse_with_result("test2"), + self.resonse_with_result(9, resume_token=b"r2"), + self.resonse_with_result("test3"), + self.resonse_with_result(None, resume_token=b"r3"), + ] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + r = CrossSync._Sync_Impl.next(result) + assert r["a"] == "test" + assert r["b"] == 8 + with pytest.raises(ExceptionType): + r = CrossSync._Sync_Impl.next(result) + assert execute_query_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + def test_execute_query_metadata_received_multiple_times_detected(self): + values = [self.resonse_with_metadata(), self.resonse_with_metadata()] + client = self._make_client() + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + execute_query_mock.return_value = self._make_gapic_stream(values) + with pytest.raises( + Exception, match="Invalid ExecuteQuery response received" + ): + [ + r + for r in client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + ] diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py new file mode 100644 index 000000000000..59ea621ac3d3 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py @@ -0,0 +1,1078 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +import mock +import asyncio +import time +import google.api_core.exceptions as core_exceptions +import google.api_core.retry +from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data import TABLE_DEFAULT +from google.cloud.bigtable.data._cross_sync import CrossSync + + +class Test_FlowControl: + @staticmethod + def _target_class(): + return CrossSync._Sync_Impl._FlowControl + + def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): + return self._target_class()(max_mutation_count, max_mutation_bytes) + + @staticmethod + def _make_mutation(count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation + + def test_ctor(self): + max_mutation_count = 9 + max_mutation_bytes = 19 + instance = self._make_one(max_mutation_count, max_mutation_bytes) + assert instance._max_mutation_count == max_mutation_count + assert instance._max_mutation_bytes == max_mutation_bytes + assert instance._in_flight_mutation_count == 0 + assert instance._in_flight_mutation_bytes == 0 + assert isinstance(instance._capacity_condition, CrossSync._Sync_Impl.Condition) + + def test_ctor_invalid_values(self): + """Test that values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(0, 1) + assert "max_mutation_count must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(1, 0) + assert "max_mutation_bytes must be greater than 0" in str(e.value) + + @pytest.mark.parametrize( + "max_count,max_size,existing_count,existing_size,new_count,new_size,expected", + [ + (1, 1, 0, 0, 0, 0, True), + (1, 1, 1, 1, 1, 1, False), + (10, 10, 0, 0, 0, 0, True), + (10, 10, 0, 0, 9, 9, True), + (10, 10, 0, 0, 11, 9, True), + (10, 10, 0, 1, 11, 9, True), + (10, 10, 1, 0, 11, 9, False), + (10, 10, 0, 0, 9, 11, True), + (10, 10, 1, 0, 9, 11, True), + (10, 10, 0, 1, 9, 11, False), + (10, 1, 0, 0, 1, 0, True), + (1, 10, 0, 0, 0, 8, True), + (float("inf"), float("inf"), 0, 0, 10000000000.0, 10000000000.0, True), + (8, 8, 0, 0, 10000000000.0, 10000000000.0, True), + (12, 12, 6, 6, 5, 5, True), + (12, 12, 5, 5, 6, 6, True), + (12, 12, 6, 6, 6, 6, True), + (12, 12, 6, 6, 7, 7, False), + (12, 12, 0, 0, 13, 13, True), + (12, 12, 12, 0, 0, 13, True), + (12, 12, 0, 12, 13, 0, True), + (12, 12, 1, 1, 13, 13, False), + (12, 12, 1, 1, 0, 13, False), + (12, 12, 1, 1, 13, 0, False), + ], + ) + def test__has_capacity( + self, + max_count, + max_size, + existing_count, + existing_size, + new_count, + new_size, + expected, + ): + """_has_capacity should return True if the new mutation will will not exceed the max count or size""" + instance = self._make_one(max_count, max_size) + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + assert instance._has_capacity(new_count, new_size) == expected + + @pytest.mark.parametrize( + "existing_count,existing_size,added_count,added_size,new_count,new_size", + [ + (0, 0, 0, 0, 0, 0), + (2, 2, 1, 1, 1, 1), + (2, 0, 1, 0, 1, 0), + (0, 2, 0, 1, 0, 1), + (10, 10, 0, 0, 10, 10), + (10, 10, 5, 5, 5, 5), + (0, 0, 1, 1, -1, -1), + ], + ) + def test_remove_from_flow_value_update( + self, + existing_count, + existing_size, + added_count, + added_size, + new_count, + new_size, + ): + """completed mutations should lower the inflight values""" + instance = self._make_one() + instance._in_flight_mutation_count = existing_count + instance._in_flight_mutation_bytes = existing_size + mutation = self._make_mutation(added_count, added_size) + instance.remove_from_flow(mutation) + assert instance._in_flight_mutation_count == new_count + assert instance._in_flight_mutation_bytes == new_size + + def test__remove_from_flow_unlock(self): + """capacity condition should notify after mutation is complete""" + instance = self._make_one(10, 10) + instance._in_flight_mutation_count = 10 + instance._in_flight_mutation_bytes = 10 + + def task_routine(): + with instance._capacity_condition: + instance._capacity_condition.wait_for( + lambda: instance._has_capacity(1, 1) + ) + + import threading + + thread = threading.Thread(target=task_routine) + thread.start() + task_alive = thread.is_alive + CrossSync._Sync_Impl.sleep(0.05) + assert task_alive() is True + mutation = self._make_mutation(count=0, size=5) + instance.remove_from_flow([mutation]) + CrossSync._Sync_Impl.sleep(0.05) + assert instance._in_flight_mutation_count == 10 + assert instance._in_flight_mutation_bytes == 5 + assert task_alive() is True + instance._in_flight_mutation_bytes = 10 + mutation = self._make_mutation(count=5, size=0) + instance.remove_from_flow([mutation]) + CrossSync._Sync_Impl.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 10 + assert task_alive() is True + instance._in_flight_mutation_count = 10 + mutation = self._make_mutation(count=5, size=5) + instance.remove_from_flow([mutation]) + CrossSync._Sync_Impl.sleep(0.05) + assert instance._in_flight_mutation_count == 5 + assert instance._in_flight_mutation_bytes == 5 + assert task_alive() is False + + @pytest.mark.parametrize( + "mutations,count_cap,size_cap,expected_results", + [ + ([(5, 5), (1, 1), (1, 1)], 10, 10, [[(5, 5), (1, 1), (1, 1)]]), + ([(1, 1), (1, 1), (1, 1)], 1, 1, [[(1, 1)], [(1, 1)], [(1, 1)]]), + ([(1, 1), (1, 1), (1, 1)], 2, 10, [[(1, 1), (1, 1)], [(1, 1)]]), + ([(1, 1), (1, 1), (1, 1)], 10, 2, [[(1, 1), (1, 1)], [(1, 1)]]), + ( + [(1, 1), (5, 5), (4, 1), (1, 4), (1, 1)], + 5, + 5, + [[(1, 1)], [(5, 5)], [(4, 1), (1, 4)], [(1, 1)]], + ), + ], + ) + def test_add_to_flow(self, mutations, count_cap, size_cap, expected_results): + """Test batching with various flow control settings""" + mutation_objs = [self._make_mutation(count=m[0], size=m[1]) for m in mutations] + instance = self._make_one(count_cap, size_cap) + i = 0 + for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + assert len(batch[j].mutations) == expected_batch[j][0] + assert batch[j].size() == expected_batch[j][1] + instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + @pytest.mark.parametrize( + "mutations,max_limit,expected_results", + [ + ([(1, 1)] * 11, 10, [[(1, 1)] * 10, [(1, 1)]]), + ([(1, 1)] * 10, 1, [[(1, 1)] for _ in range(10)]), + ([(1, 1)] * 10, 2, [[(1, 1), (1, 1)] for _ in range(5)]), + ], + ) + def test_add_to_flow_max_mutation_limits( + self, mutations, max_limit, expected_results + ): + """Test flow control running up against the max API limit + Should submit request early, even if the flow control has room for more""" + subpath = "_async" if CrossSync._Sync_Impl.is_async else "_sync_autogen" + path = f"google.cloud.bigtable.data.{subpath}.mutations_batcher._MUTATE_ROWS_REQUEST_MUTATION_LIMIT" + with mock.patch(path, max_limit): + mutation_objs = [ + self._make_mutation(count=m[0], size=m[1]) for m in mutations + ] + instance = self._make_one(float("inf"), float("inf")) + i = 0 + for batch in instance.add_to_flow(mutation_objs): + expected_batch = expected_results[i] + assert len(batch) == len(expected_batch) + for j in range(len(expected_batch)): + assert len(batch[j].mutations) == expected_batch[j][0] + assert batch[j].size() == expected_batch[j][1] + instance.remove_from_flow(batch) + i += 1 + assert i == len(expected_results) + + def test_add_to_flow_oversize(self): + """mutations over the flow control limits should still be accepted""" + instance = self._make_one(2, 3) + large_size_mutation = self._make_mutation(count=1, size=10) + large_count_mutation = self._make_mutation(count=10, size=1) + results = [out for out in instance.add_to_flow([large_size_mutation])] + assert len(results) == 1 + instance.remove_from_flow(results[0]) + count_results = [out for out in instance.add_to_flow(large_count_mutation)] + assert len(count_results) == 1 + + +class TestMutationsBatcher: + def _get_target_class(self): + return CrossSync._Sync_Impl.MutationsBatcher + + def _make_one(self, table=None, **kwargs): + from google.api_core.exceptions import DeadlineExceeded + from google.api_core.exceptions import ServiceUnavailable + + if table is None: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 10 + table.default_mutate_rows_retryable_errors = ( + DeadlineExceeded, + ServiceUnavailable, + ) + return self._get_target_class()(table, **kwargs) + + @staticmethod + def _make_mutation(count=1, size=1): + mutation = mock.Mock() + mutation.size.return_value = size + mutation.mutations = [mock.Mock()] * count + return mutation + + def test_ctor_defaults(self): + with mock.patch.object( + self._get_target_class(), + "_timer_routine", + return_value=CrossSync._Sync_Impl.Future(), + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = [Exception] + with self._make_one(table) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._max_mutation_count == 100000 + assert instance._flow_control._max_mutation_bytes == 104857600 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert ( + instance._operation_timeout + == table.default_mutate_rows_operation_timeout + ) + assert ( + instance._attempt_timeout + == table.default_mutate_rows_attempt_timeout + ) + assert ( + instance._retryable_errors + == table.default_mutate_rows_retryable_errors + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == 5 + assert isinstance(instance._flush_timer, CrossSync._Sync_Impl.Future) + + def test_ctor_explicit(self): + """Test with explicit parameters""" + with mock.patch.object( + self._get_target_class(), + "_timer_routine", + return_value=CrossSync._Sync_Impl.Future(), + ) as flush_timer_mock: + table = mock.Mock() + flush_interval = 20 + flush_limit_count = 17 + flush_limit_bytes = 19 + flow_control_max_mutation_count = 1001 + flow_control_max_bytes = 12 + operation_timeout = 11 + attempt_timeout = 2 + retryable_errors = [Exception] + with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + flow_control_max_mutation_count=flow_control_max_mutation_count, + flow_control_max_bytes=flow_control_max_bytes, + batch_operation_timeout=operation_timeout, + batch_attempt_timeout=attempt_timeout, + batch_retryable_errors=retryable_errors, + ) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._flush_jobs == set() + assert len(instance._staged_entries) == 0 + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert ( + instance._flow_control._max_mutation_count + == flow_control_max_mutation_count + ) + assert ( + instance._flow_control._max_mutation_bytes == flow_control_max_bytes + ) + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + assert instance._operation_timeout == operation_timeout + assert instance._attempt_timeout == attempt_timeout + assert instance._retryable_errors == retryable_errors + CrossSync._Sync_Impl.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] == flush_interval + assert isinstance(instance._flush_timer, CrossSync._Sync_Impl.Future) + + def test_ctor_no_flush_limits(self): + """Test with None for flush limits""" + with mock.patch.object( + self._get_target_class(), + "_timer_routine", + return_value=CrossSync._Sync_Impl.Future(), + ) as flush_timer_mock: + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 10 + table.default_mutate_rows_attempt_timeout = 8 + table.default_mutate_rows_retryable_errors = () + flush_interval = None + flush_limit_count = None + flush_limit_bytes = None + with self._make_one( + table, + flush_interval=flush_interval, + flush_limit_mutation_count=flush_limit_count, + flush_limit_bytes=flush_limit_bytes, + ) as instance: + assert instance._table == table + assert instance.closed is False + assert instance._staged_entries == [] + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert instance._exception_list_limit == 10 + assert instance._exceptions_since_last_raise == 0 + assert instance._flow_control._in_flight_mutation_count == 0 + assert instance._flow_control._in_flight_mutation_bytes == 0 + assert instance._entries_processed_since_last_raise == 0 + CrossSync._Sync_Impl.yield_to_event_loop() + assert flush_timer_mock.call_count == 1 + assert flush_timer_mock.call_args[0][0] is None + assert isinstance(instance._flush_timer, CrossSync._Sync_Impl.Future) + + def test_ctor_invalid_values(self): + """Test that timeout values are positive, and fit within expected limits""" + with pytest.raises(ValueError) as e: + self._make_one(batch_operation_timeout=-1) + assert "operation_timeout must be greater than 0" in str(e.value) + with pytest.raises(ValueError) as e: + self._make_one(batch_attempt_timeout=-1) + assert "attempt_timeout must be greater than 0" in str(e.value) + + def test_default_argument_consistency(self): + """We supply default arguments in MutationsBatcherAsync.__init__, and in + table.mutations_batcher. Make sure any changes to defaults are applied to + both places""" + import inspect + + get_batcher_signature = dict( + inspect.signature(CrossSync._Sync_Impl.Table.mutations_batcher).parameters + ) + get_batcher_signature.pop("self") + batcher_init_signature = dict( + inspect.signature(self._get_target_class()).parameters + ) + batcher_init_signature.pop("table") + assert len(get_batcher_signature.keys()) == len(batcher_init_signature.keys()) + assert len(get_batcher_signature) == 8 + assert set(get_batcher_signature.keys()) == set(batcher_init_signature.keys()) + for arg_name in get_batcher_signature.keys(): + assert ( + get_batcher_signature[arg_name].default + == batcher_init_signature[arg_name].default + ) + + @pytest.mark.parametrize("input_val", [None, 0, -1]) + def test__start_flush_timer_w_empty_input(self, input_val): + """Empty/invalid timer should return immediately""" + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + with self._make_one() as instance: + (sleep_obj, sleep_method) = (instance._closed, "wait") + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + result = instance._timer_routine(input_val) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + assert result is None + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test__start_flush_timer_call_when_closed(self): + """closed batcher's timer should return immediately""" + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + with self._make_one() as instance: + instance.close() + flush_mock.reset_mock() + (sleep_obj, sleep_method) = (instance._closed, "wait") + with mock.patch.object(sleep_obj, sleep_method) as sleep_mock: + instance._timer_routine(10) + assert sleep_mock.call_count == 0 + assert flush_mock.call_count == 0 + + @pytest.mark.parametrize("num_staged", [0, 1, 10]) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test__flush_timer(self, num_staged): + """Timer should continue to call _schedule_flush in a loop""" + from google.cloud.bigtable.data._cross_sync import CrossSync + + with mock.patch.object( + self._get_target_class(), "_schedule_flush" + ) as flush_mock: + expected_sleep = 12 + with self._make_one(flush_interval=expected_sleep) as instance: + loop_num = 3 + instance._staged_entries = [mock.Mock()] * num_staged + with mock.patch.object( + CrossSync._Sync_Impl, "event_wait" + ) as sleep_mock: + sleep_mock.side_effect = [None] * loop_num + [TabError("expected")] + with pytest.raises(TabError): + self._get_target_class()._timer_routine( + instance, expected_sleep + ) + assert sleep_mock.call_count == loop_num + 1 + sleep_kwargs = sleep_mock.call_args[1] + assert sleep_kwargs["timeout"] == expected_sleep + assert flush_mock.call_count == (0 if num_staged == 0 else loop_num) + + def test__flush_timer_close(self): + """Timer should continue terminate after close""" + with mock.patch.object(self._get_target_class(), "_schedule_flush"): + with self._make_one() as instance: + assert instance._flush_timer.done() is False + instance.close() + assert instance._flush_timer.done() is True + + def test_append_closed(self): + """Should raise exception""" + instance = self._make_one() + instance.close() + with pytest.raises(RuntimeError): + instance.append(mock.Mock()) + + def test_append_wrong_mutation(self): + """Mutation objects should raise an exception. + Only support RowMutationEntry""" + from google.cloud.bigtable.data.mutations import DeleteAllFromRow + + with self._make_one() as instance: + expected_error = "invalid mutation type: DeleteAllFromRow. Only RowMutationEntry objects are supported by batcher" + with pytest.raises(ValueError) as e: + instance.append(DeleteAllFromRow()) + assert str(e.value) == expected_error + + def test_append_outside_flow_limits(self): + """entries larger than mutation limits are still processed""" + with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + oversized_entry = self._make_mutation(count=0, size=2) + instance.append(oversized_entry) + assert instance._staged_entries == [oversized_entry] + assert instance._staged_count == 0 + assert instance._staged_bytes == 2 + instance._staged_entries = [] + with self._make_one( + flow_control_max_mutation_count=1, flow_control_max_bytes=1 + ) as instance: + overcount_entry = self._make_mutation(count=2, size=0) + instance.append(overcount_entry) + assert instance._staged_entries == [overcount_entry] + assert instance._staged_count == 2 + assert instance._staged_bytes == 0 + instance._staged_entries = [] + + def test_append_flush_runs_after_limit_hit(self): + """If the user appends a bunch of entries above the flush limits back-to-back, + it should still flush in a single task""" + with mock.patch.object( + self._get_target_class(), "_execute_mutate_rows" + ) as op_mock: + with self._make_one(flush_limit_bytes=100) as instance: + + def mock_call(*args, **kwargs): + return [] + + op_mock.side_effect = mock_call + instance.append(self._make_mutation(size=99)) + num_entries = 10 + for _ in range(num_entries): + instance.append(self._make_mutation(size=1)) + instance._wait_for_batch_results(*instance._flush_jobs) + assert op_mock.call_count == 1 + sent_batch = op_mock.call_args[0][0] + assert len(sent_batch) == 2 + assert len(instance._staged_entries) == num_entries - 1 + + @pytest.mark.parametrize( + "flush_count,flush_bytes,mutation_count,mutation_bytes,expect_flush", + [ + (10, 10, 1, 1, False), + (10, 10, 9, 9, False), + (10, 10, 10, 1, True), + (10, 10, 1, 10, True), + (10, 10, 10, 10, True), + (1, 1, 10, 10, True), + (1, 1, 0, 0, False), + ], + ) + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_append( + self, flush_count, flush_bytes, mutation_count, mutation_bytes, expect_flush + ): + """test appending different mutations, and checking if it causes a flush""" + with self._make_one( + flush_limit_mutation_count=flush_count, flush_limit_bytes=flush_bytes + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = self._make_mutation(count=mutation_count, size=mutation_bytes) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + instance.append(mutation) + assert flush_mock.call_count == bool(expect_flush) + assert instance._staged_count == mutation_count + assert instance._staged_bytes == mutation_bytes + assert instance._staged_entries == [mutation] + instance._staged_entries = [] + + def test_append_multiple_sequentially(self): + """Append multiple mutations""" + with self._make_one( + flush_limit_mutation_count=8, flush_limit_bytes=8 + ) as instance: + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert instance._staged_entries == [] + mutation = self._make_mutation(count=2, size=3) + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 2 + assert instance._staged_bytes == 3 + assert len(instance._staged_entries) == 1 + instance.append(mutation) + assert flush_mock.call_count == 0 + assert instance._staged_count == 4 + assert instance._staged_bytes == 6 + assert len(instance._staged_entries) == 2 + instance.append(mutation) + assert flush_mock.call_count == 1 + assert instance._staged_count == 6 + assert instance._staged_bytes == 9 + assert len(instance._staged_entries) == 3 + instance._staged_entries = [] + + def test_flush_flow_control_concurrent_requests(self): + """requests should happen in parallel if flow control breaks up single flush into batches""" + import time + + num_calls = 10 + fake_mutations = [self._make_mutation(count=1) for _ in range(num_calls)] + with self._make_one(flow_control_max_mutation_count=1) as instance: + with mock.patch.object( + instance, "_execute_mutate_rows", CrossSync._Sync_Impl.Mock() + ) as op_mock: + + def mock_call(*args, **kwargs): + CrossSync._Sync_Impl.sleep(0.1) + return [] + + op_mock.side_effect = mock_call + start_time = time.monotonic() + instance._staged_entries = fake_mutations + instance._schedule_flush() + CrossSync._Sync_Impl.sleep(0.01) + for i in range(num_calls): + instance._flow_control.remove_from_flow( + [self._make_mutation(count=1)] + ) + CrossSync._Sync_Impl.sleep(0.01) + instance._wait_for_batch_results(*instance._flush_jobs) + duration = time.monotonic() - start_time + assert len(instance._oldest_exceptions) == 0 + assert len(instance._newest_exceptions) == 0 + assert duration < 0.5 + assert op_mock.call_count == num_calls + + def test_schedule_flush_no_mutations(self): + """schedule flush should return None if no staged mutations""" + with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + for i in range(3): + assert instance._schedule_flush() is None + assert flush_mock.call_count == 0 + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_schedule_flush_with_mutations(self): + """if new mutations exist, should add a new flush task to _flush_jobs""" + with self._make_one() as instance: + with mock.patch.object(instance, "_flush_internal") as flush_mock: + flush_mock.side_effect = lambda x: time.sleep(0.1) + for i in range(1, 4): + mutation = mock.Mock() + instance._staged_entries = [mutation] + instance._schedule_flush() + assert instance._staged_entries == [] + asyncio.sleep(0) + assert instance._staged_entries == [] + assert instance._staged_count == 0 + assert instance._staged_bytes == 0 + assert flush_mock.call_count == 1 + flush_mock.reset_mock() + + def test__flush_internal(self): + """_flush_internal should: + - await previous flush call + - delegate batching to _flow_control + - call _execute_mutate_rows on each batch + - update self.exceptions and self._entries_processed_since_last_raise""" + num_entries = 10 + with self._make_one() as instance: + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + + def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [self._make_mutation(count=1, size=1)] * num_entries + instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + def test_flush_clears_job_list(self): + """a job should be added to _flush_jobs when _schedule_flush is called, + and removed when it completes""" + with self._make_one() as instance: + with mock.patch.object( + instance, "_flush_internal", CrossSync._Sync_Impl.Mock() + ) as flush_mock: + flush_mock.side_effect = lambda x: time.sleep(0.1) + mutations = [self._make_mutation(count=1, size=1)] + instance._staged_entries = mutations + assert instance._flush_jobs == set() + new_job = instance._schedule_flush() + assert instance._flush_jobs == {new_job} + new_job.result() + assert instance._flush_jobs == set() + + @pytest.mark.parametrize( + "num_starting,num_new_errors,expected_total_errors", + [ + (0, 0, 0), + (0, 1, 1), + (0, 2, 2), + (1, 0, 1), + (1, 1, 2), + (10, 2, 12), + (10, 20, 20), + ], + ) + def test__flush_internal_with_errors( + self, num_starting, num_new_errors, expected_total_errors + ): + """errors returned from _execute_mutate_rows should be added to internal exceptions""" + from google.cloud.bigtable.data import exceptions + + num_entries = 10 + expected_errors = [ + exceptions.FailedMutationEntryError(mock.Mock(), mock.Mock(), ValueError()) + ] * num_new_errors + with self._make_one() as instance: + instance._oldest_exceptions = [mock.Mock()] * num_starting + with mock.patch.object(instance, "_execute_mutate_rows") as execute_mock: + execute_mock.return_value = expected_errors + with mock.patch.object( + instance._flow_control, "add_to_flow" + ) as flow_mock: + + def gen(x): + yield x + + flow_mock.side_effect = lambda x: gen(x) + mutations = [self._make_mutation(count=1, size=1)] * num_entries + instance._flush_internal(mutations) + assert instance._entries_processed_since_last_raise == num_entries + assert execute_mock.call_count == 1 + assert flow_mock.call_count == 1 + found_exceptions = instance._oldest_exceptions + list( + instance._newest_exceptions + ) + assert len(found_exceptions) == expected_total_errors + for i in range(num_starting, expected_total_errors): + assert found_exceptions[i] == expected_errors[i - num_starting] + assert found_exceptions[i].index is None + instance._oldest_exceptions.clear() + instance._newest_exceptions.clear() + + def _mock_gapic_return(self, num=5): + from google.cloud.bigtable_v2.types import MutateRowsResponse + from google.rpc import status_pb2 + + def gen(num): + for i in range(num): + entry = MutateRowsResponse.Entry( + index=i, status=status_pb2.Status(code=0) + ) + yield MutateRowsResponse(entries=[entry]) + + return gen(num) + + def test_timer_flush_end_to_end(self): + """Flush should automatically trigger after flush_interval""" + num_mutations = 10 + mutations = [self._make_mutation(count=2, size=2)] * num_mutations + with self._make_one(flush_interval=0.05) as instance: + instance._table.default_operation_timeout = 10 + instance._table.default_attempt_timeout = 9 + with mock.patch.object( + instance._table.client._gapic_client, "mutate_rows" + ) as gapic_mock: + gapic_mock.side_effect = ( + lambda *args, **kwargs: self._mock_gapic_return(num_mutations) + ) + for m in mutations: + instance.append(m) + assert instance._entries_processed_since_last_raise == 0 + CrossSync._Sync_Impl.sleep(0.1) + assert instance._entries_processed_since_last_raise == num_mutations + + def test__execute_mutate_rows(self): + with mock.patch.object( + CrossSync._Sync_Impl, "_MutateRowsOperation" + ) as mutate_rows: + mutate_rows.return_value = CrossSync._Sync_Impl.Mock() + start_operation = mutate_rows().start + table = mock.Mock() + table.table_name = "test-table" + table.app_profile_id = "test-app-profile" + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = instance._execute_mutate_rows(batch) + assert start_operation.call_count == 1 + (args, kwargs) = mutate_rows.call_args + assert args[0] == table.client._gapic_client + assert args[1] == table + assert args[2] == batch + kwargs["operation_timeout"] == 17 + kwargs["attempt_timeout"] == 13 + assert result == [] + + def test__execute_mutate_rows_returns_errors(self): + """Errors from operation should be retruned as list""" + from google.cloud.bigtable.data.exceptions import ( + MutationsExceptionGroup, + FailedMutationEntryError, + ) + + with mock.patch.object( + CrossSync._Sync_Impl._MutateRowsOperation, "start" + ) as mutate_rows: + err1 = FailedMutationEntryError(0, mock.Mock(), RuntimeError("test error")) + err2 = FailedMutationEntryError(1, mock.Mock(), RuntimeError("test error")) + mutate_rows.side_effect = MutationsExceptionGroup([err1, err2], 10) + table = mock.Mock() + table.default_mutate_rows_operation_timeout = 17 + table.default_mutate_rows_attempt_timeout = 13 + table.default_mutate_rows_retryable_errors = () + with self._make_one(table) as instance: + batch = [self._make_mutation()] + result = instance._execute_mutate_rows(batch) + assert len(result) == 2 + assert result[0] == err1 + assert result[1] == err2 + assert result[0].index is None + assert result[1].index is None + + def test__raise_exceptions(self): + """Raise exceptions and reset error state""" + from google.cloud.bigtable.data import exceptions + + expected_total = 1201 + expected_exceptions = [RuntimeError("mock")] * 3 + with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + instance._raise_exceptions() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + (instance._oldest_exceptions, instance._newest_exceptions) = ([], []) + instance._raise_exceptions() + + def test___enter__(self): + """Should return self""" + with self._make_one() as instance: + assert instance.__enter__() == instance + + def test___exit__(self): + """aexit should call close""" + with self._make_one() as instance: + with mock.patch.object(instance, "close") as close_mock: + instance.__exit__(None, None, None) + assert close_mock.call_count == 1 + + def test_close(self): + """Should clean up all resources""" + with self._make_one() as instance: + with mock.patch.object(instance, "_schedule_flush") as flush_mock: + with mock.patch.object(instance, "_raise_exceptions") as raise_mock: + instance.close() + assert instance.closed is True + assert instance._flush_timer.done() is True + assert instance._flush_jobs == set() + assert flush_mock.call_count == 1 + assert raise_mock.call_count == 1 + + def test_close_w_exceptions(self): + """Raise exceptions on close""" + from google.cloud.bigtable.data import exceptions + + expected_total = 10 + expected_exceptions = [RuntimeError("mock")] + with self._make_one() as instance: + instance._oldest_exceptions = expected_exceptions + instance._entries_processed_since_last_raise = expected_total + try: + instance.close() + except exceptions.MutationsExceptionGroup as exc: + assert list(exc.exceptions) == expected_exceptions + assert str(expected_total) in str(exc) + assert instance._entries_processed_since_last_raise == 0 + (instance._oldest_exceptions, instance._newest_exceptions) = ([], []) + + def test__on_exit(self, recwarn): + """Should raise warnings if unflushed mutations exist""" + with self._make_one() as instance: + instance._on_exit() + assert len(recwarn) == 0 + num_left = 4 + instance._staged_entries = [mock.Mock()] * num_left + with pytest.warns(UserWarning) as w: + instance._on_exit() + assert len(w) == 1 + assert "unflushed mutations" in str(w[0].message).lower() + assert str(num_left) in str(w[0].message) + instance._closed.set() + instance._on_exit() + assert len(recwarn) == 0 + instance._staged_entries = [] + + def test_atexit_registration(self): + """Should run _on_exit on program termination""" + import atexit + + with mock.patch.object(atexit, "register") as register_mock: + assert register_mock.call_count == 0 + with self._make_one(): + assert register_mock.call_count == 1 + + def test_timeout_args_passed(self): + """batch_operation_timeout and batch_attempt_timeout should be used + in api calls""" + with mock.patch.object( + CrossSync._Sync_Impl, + "_MutateRowsOperation", + return_value=CrossSync._Sync_Impl.Mock(), + ) as mutate_rows: + expected_operation_timeout = 17 + expected_attempt_timeout = 13 + with self._make_one( + batch_operation_timeout=expected_operation_timeout, + batch_attempt_timeout=expected_attempt_timeout, + ) as instance: + assert instance._operation_timeout == expected_operation_timeout + assert instance._attempt_timeout == expected_attempt_timeout + instance._execute_mutate_rows([self._make_mutation()]) + assert mutate_rows.call_count == 1 + kwargs = mutate_rows.call_args[1] + assert kwargs["operation_timeout"] == expected_operation_timeout + assert kwargs["attempt_timeout"] == expected_attempt_timeout + + @pytest.mark.parametrize( + "limit,in_e,start_e,end_e", + [ + (10, 0, (10, 0), (10, 0)), + (1, 10, (0, 0), (1, 1)), + (10, 1, (0, 0), (1, 0)), + (10, 10, (0, 0), (10, 0)), + (10, 11, (0, 0), (10, 1)), + (3, 20, (0, 0), (3, 3)), + (10, 20, (0, 0), (10, 10)), + (10, 21, (0, 0), (10, 10)), + (2, 1, (2, 0), (2, 1)), + (2, 1, (1, 0), (2, 0)), + (2, 2, (1, 0), (2, 1)), + (3, 1, (3, 1), (3, 2)), + (3, 3, (3, 1), (3, 3)), + (1000, 5, (999, 0), (1000, 4)), + (1000, 5, (0, 0), (5, 0)), + (1000, 5, (1000, 0), (1000, 5)), + ], + ) + def test__add_exceptions(self, limit, in_e, start_e, end_e): + """Test that the _add_exceptions function properly updates the + _oldest_exceptions and _newest_exceptions lists + Args: + - limit: the _exception_list_limit representing the max size of either list + - in_e: size of list of exceptions to send to _add_exceptions + - start_e: a tuple of ints representing the initial sizes of _oldest_exceptions and _newest_exceptions + - end_e: a tuple of ints representing the expected sizes of _oldest_exceptions and _newest_exceptions + """ + from collections import deque + + input_list = [RuntimeError(f"mock {i}") for i in range(in_e)] + mock_batcher = mock.Mock() + mock_batcher._oldest_exceptions = [ + RuntimeError(f"starting mock {i}") for i in range(start_e[0]) + ] + mock_batcher._newest_exceptions = deque( + [RuntimeError(f"starting mock {i}") for i in range(start_e[1])], + maxlen=limit, + ) + mock_batcher._exception_list_limit = limit + mock_batcher._exceptions_since_last_raise = 0 + self._get_target_class()._add_exceptions(mock_batcher, input_list) + assert len(mock_batcher._oldest_exceptions) == end_e[0] + assert len(mock_batcher._newest_exceptions) == end_e[1] + assert mock_batcher._exceptions_since_last_raise == in_e + oldest_list_diff = end_e[0] - start_e[0] + newest_list_diff = min(max(in_e - oldest_list_diff, 0), limit) + for i in range(oldest_list_diff): + assert mock_batcher._oldest_exceptions[i + start_e[0]] == input_list[i] + for i in range(1, newest_list_diff + 1): + assert mock_batcher._newest_exceptions[-i] == input_list[-i] + + @pytest.mark.parametrize( + "input_retryables,expected_retryables", + [ + ( + TABLE_DEFAULT.READ_ROWS, + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + core_exceptions.Aborted, + ], + ), + ( + TABLE_DEFAULT.DEFAULT, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ( + TABLE_DEFAULT.MUTATE_ROWS, + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ), + ([], []), + ([4], [core_exceptions.DeadlineExceeded]), + ], + ) + def test_customizable_retryable_errors(self, input_retryables, expected_retryables): + """Test that retryable functions support user-configurable arguments, and that the configured retryables are passed + down to the gapic layer.""" + with mock.patch.object( + google.api_core.retry, "if_exception_type" + ) as predicate_builder_mock: + with mock.patch.object( + CrossSync._Sync_Impl, "retry_target" + ) as retry_fn_mock: + table = None + with mock.patch("asyncio.create_task"): + table = CrossSync._Sync_Impl.Table(mock.Mock(), "instance", "table") + with self._make_one( + table, batch_retryable_errors=input_retryables + ) as instance: + assert instance._retryable_errors == expected_retryables + expected_predicate = expected_retryables.__contains__ + predicate_builder_mock.return_value = expected_predicate + retry_fn_mock.side_effect = RuntimeError("stop early") + mutation = self._make_mutation(count=1, size=1) + instance._execute_mutate_rows([mutation]) + predicate_builder_mock.assert_called_once_with( + *expected_retryables, _MutateRowsIncomplete + ) + retry_call_args = retry_fn_mock.call_args_list[0].args + assert retry_call_args[1] is expected_predicate + + def test_large_batch_write(self): + """Test that a large batch of mutations can be written""" + import math + + num_mutations = 10000 + flush_limit = 1000 + mutations = [self._make_mutation(count=1, size=1)] * num_mutations + with self._make_one(flush_limit_mutation_count=flush_limit) as instance: + operation_mock = mock.Mock() + rpc_call_mock = CrossSync._Sync_Impl.Mock() + operation_mock().start = rpc_call_mock + CrossSync._Sync_Impl._MutateRowsOperation = operation_mock + for m in mutations: + instance.append(m) + expected_calls = math.ceil(num_mutations / flush_limit) + assert rpc_call_mock.call_count == expected_calls + assert instance._entries_processed_since_last_raise == num_mutations + assert len(instance._staged_entries) == 0 diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_read_rows_acceptance.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_read_rows_acceptance.py new file mode 100644 index 000000000000..8ceb0daf764d --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_read_rows_acceptance.py @@ -0,0 +1,328 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +import os +import warnings +import pytest +import mock +from itertools import zip_longest +from google.cloud.bigtable_v2 import ReadRowsResponse +from google.cloud.bigtable.data.exceptions import InvalidChunk +from google.cloud.bigtable.data.row import Row +from ...v2_client.test_row_merger import ReadRowsTest, TestFile +from google.cloud.bigtable.data._cross_sync import CrossSync + + +class TestReadRowsAcceptance: + @staticmethod + def _get_operation_class(): + return CrossSync._Sync_Impl._ReadRowsOperation + + @staticmethod + def _get_client_class(): + return CrossSync._Sync_Impl.DataClient + + def parse_readrows_acceptance_tests(): + dirname = os.path.dirname(__file__) + filename = os.path.join(dirname, "../read-rows-acceptance-test.json") + with open(filename) as json_file: + test_json = TestFile.from_json(json_file.read()) + return test_json.read_rows_tests + + @staticmethod + def extract_results_from_row(row: Row): + results = [] + for family, col, cells in row.items(): + for cell in cells: + results.append( + ReadRowsTest.Result( + row_key=row.row_key, + family_name=family, + qualifier=col, + timestamp_micros=cell.timestamp_ns // 1000, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + ) + return results + + @staticmethod + def _coro_wrapper(stream): + return stream + + def _process_chunks(self, *chunks): + def _row_stream(): + yield ReadRowsResponse(chunks=chunks) + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + results = [] + for row in merger: + results.append(row) + return results + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + def test_row_merger_scenario(self, test_case: ReadRowsTest): + def _scenerio_stream(): + for chunk in test_case.chunks: + yield ReadRowsResponse(chunks=[chunk]) + + try: + results = [] + instance = mock.Mock() + instance._last_yielded_row_key = None + instance._remaining_count = None + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_scenerio_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + for row in merger: + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + @pytest.mark.parametrize( + "test_case", parse_readrows_acceptance_tests(), ids=lambda t: t.description + ) + def test_read_rows_scenario(self, test_case: ReadRowsTest): + def _make_gapic_stream(chunk_list: list[ReadRowsResponse]): + from google.cloud.bigtable_v2 import ReadRowsResponse + + class mock_stream: + def __init__(self, chunk_list): + self.chunk_list = chunk_list + self.idx = -1 + + def __aiter__(self): + return self + + def __iter__(self): + return self + + def __anext__(self): + self.idx += 1 + if len(self.chunk_list) > self.idx: + chunk = self.chunk_list[self.idx] + return ReadRowsResponse(chunks=[chunk]) + raise CrossSync._Sync_Impl.StopIteration + + def __next__(self): + return self.__anext__() + + def cancel(self): + pass + + return mock_stream(chunk_list) + + with mock.patch.dict(os.environ, {"BIGTABLE_EMULATOR_HOST": "localhost"}): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + client = self._get_client_class()() + try: + table = client.get_table("instance", "table") + results = [] + with mock.patch.object( + table.client._gapic_client, "read_rows" + ) as read_rows: + read_rows.return_value = _make_gapic_stream(test_case.chunks) + for row in table.read_rows_stream(query={}): + for cell in row: + cell_result = ReadRowsTest.Result( + row_key=cell.row_key, + family_name=cell.family, + qualifier=cell.qualifier, + timestamp_micros=cell.timestamp_micros, + value=cell.value, + label=cell.labels[0] if cell.labels else "", + ) + results.append(cell_result) + except InvalidChunk: + results.append(ReadRowsTest.Result(error=True)) + finally: + client.close() + for expected, actual in zip_longest(test_case.results, results): + assert actual == expected + + def test_out_of_order_rows(self): + def _row_stream(): + yield ReadRowsResponse(last_scanned_row_key=b"a") + + instance = mock.Mock() + instance._remaining_count = None + instance._last_yielded_row_key = b"b" + chunker = self._get_operation_class().chunk_stream( + instance, self._coro_wrapper(_row_stream()) + ) + merger = self._get_operation_class().merge_rows(chunker) + with pytest.raises(InvalidChunk): + for _ in merger: + pass + + def test_bare_reset(self): + first_chunk = ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk( + row_key=b"a", family_name="f", qualifier=b"q", value=b"v" + ) + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, row_key=b"a") + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, family_name="f") + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, qualifier=b"q") + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, timestamp_micros=1000) + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, labels=["a"]) + ), + ) + with pytest.raises(InvalidChunk): + self._process_chunks( + first_chunk, + ReadRowsResponse.CellChunk( + ReadRowsResponse.CellChunk(reset_row=True, value=b"v") + ), + ) + + def test_missing_family(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + qualifier=b"q", + timestamp_micros=1000, + value=b"v", + commit_row=True, + ) + ) + + def test_mid_cell_row_key_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(row_key=b"b", value=b"v", commit_row=True), + ) + + def test_mid_cell_family_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + family_name="f2", value=b"v", commit_row=True + ), + ) + + def test_mid_cell_qualifier_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + qualifier=b"q2", value=b"v", commit_row=True + ), + ) + + def test_mid_cell_timestamp_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk( + timestamp_micros=2000, value=b"v", commit_row=True + ), + ) + + def test_mid_cell_labels_change(self): + with pytest.raises(InvalidChunk): + self._process_chunks( + ReadRowsResponse.CellChunk( + row_key=b"a", + family_name="f", + qualifier=b"q", + timestamp_micros=1000, + value_size=2, + value=b"v", + ), + ReadRowsResponse.CellChunk(labels=["b"], value=b"v", commit_row=True), + ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py index 9bdf17c27fff..ea93fed552c5 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2024 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/__init__.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py new file mode 100644 index 000000000000..77a28ea92d1e --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py @@ -0,0 +1,163 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +import concurrent.futures +from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse +from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock + + +class MockIterator: + def __init__(self, values, delay=None): + self._values = values + self.idx = 0 + self._delay = delay + + def __iter__(self): + return self + + def __next__(self): + if self.idx >= len(self._values): + raise CrossSync._Sync_Impl.StopIteration + if self._delay is not None: + CrossSync._Sync_Impl.sleep(self._delay) + value = self._values[self.idx] + self.idx += 1 + return value + + +class TestQueryIterator: + @staticmethod + def _target_class(): + return CrossSync._Sync_Impl.ExecuteQueryIterator + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.fixture + def proto_byte_stream(self): + proto_rows = [ + proto_rows_bytes({"int_value": 1}, {"int_value": 2}), + proto_rows_bytes({"int_value": 3}, {"int_value": 4}), + proto_rows_bytes({"int_value": 5}, {"int_value": 6}), + ] + messages = [ + *split_bytes_into_chunks(proto_rows[0], num_chunks=2), + *split_bytes_into_chunks(proto_rows[1], num_chunks=3), + proto_rows[2], + ] + stream = [ + ExecuteQueryResponse( + metadata={ + "proto_schema": { + "columns": [ + {"name": "test1", "type_": TYPE_INT}, + {"name": "test2", "type_": TYPE_INT}, + ] + } + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[0]}} + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[1]}, + "resume_token": b"token1", + } + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[2]}} + ), + ExecuteQueryResponse( + results={"proto_rows_batch": {"batch_data": messages[3]}} + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[4]}, + "resume_token": b"token2", + } + ), + ExecuteQueryResponse( + results={ + "proto_rows_batch": {"batch_data": messages[5]}, + "resume_token": b"token3", + } + ), + ] + return stream + + def test_iterator(self, proto_byte_stream): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + result = [] + for value in iterator: + result.append(tuple(value)) + assert result == [(1, 2), (3, 4), (5, 6)] + assert iterator.is_closed + client_mock._register_instance.assert_called_once() + client_mock._remove_instance_registration.assert_called_once() + assert mock_async_iterator.idx == len(proto_byte_stream) + + def test_iterator_awaits_metadata(self, proto_byte_stream): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + iterator.metadata() + assert mock_async_iterator.idx == 1 diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py index 914a0920ab41..f7159fb71beb 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py @@ -47,7 +47,7 @@ (b"3", "bytes_value", "bytes_type", b"3"), (True, "bool_value", "bool_type", True), ( - datetime.datetime.fromtimestamp(timestamp), + datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc), "timestamp_value", "timestamp_type", dt_nanos_zero, diff --git a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py index 58889026522b..39db0668991d 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py +++ b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py @@ -81,7 +81,7 @@ def test_attempt_timeout_w_sleeps(self): sleep_time = 0.1 for i in range(3): found_value = next(generator) - assert abs(found_value - expected_value) < 0.001 + assert abs(found_value - expected_value) < 0.1 sleep(sleep_time) expected_value -= sleep_time From 3635195d569601bd96e36770b6c076ffab9b3667 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 18 Dec 2024 15:31:03 -0600 Subject: [PATCH 840/892] chore(tests): sync client verification tests (#1046) --- .../.cross_sync/README.md | 2 + .../.github/workflows/conformance.yaml | 12 +- .../.kokoro/conformance.sh | 10 +- packages/google-cloud-bigtable/noxfile.py | 2 +- .../test_proxy/README.md | 2 +- .../client_handler_data_sync_autogen.py | 185 ++++++++++++++++++ .../test_proxy/run_tests.sh | 17 +- .../test_proxy/test_proxy.py | 5 +- .../tests/unit/data/test_sync_up_to_date.py | 99 ++++++++++ 9 files changed, 319 insertions(+), 15 deletions(-) create mode 100644 packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py diff --git a/packages/google-cloud-bigtable/.cross_sync/README.md b/packages/google-cloud-bigtable/.cross_sync/README.md index 18a9aafdf6ce..0d8a1cf8c2c1 100644 --- a/packages/google-cloud-bigtable/.cross_sync/README.md +++ b/packages/google-cloud-bigtable/.cross_sync/README.md @@ -66,6 +66,8 @@ Generation can be initiated using `nox -s generate_sync` from the root of the project. This will find all classes with the `__CROSS_SYNC_OUTPUT__ = "path/to/output"` annotation, and generate a sync version of classes marked with `@CrossSync.convert_sync` at the output path. +There is a unit test at `tests/unit/data/test_sync_up_to_date.py` that verifies that the generated code is up to date + ## Architecture CrossSync is made up of two parts: diff --git a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml index 448e1cc3a2da..8445240c3ea9 100644 --- a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml +++ b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml @@ -26,7 +26,15 @@ jobs: matrix: test-version: [ "v0.0.2" ] py-version: [ 3.8 ] - client-type: [ "async", "legacy" ] + client-type: [ "async", "sync", "legacy" ] + include: + - client-type: "sync" + # sync client does not support concurrent streams + test_args: "-skip _Generic_MultiStream" + - client-type: "legacy" + # legacy client is synchronous and does not support concurrent streams + # legacy client does not expose mutate_row. Disable those tests + test_args: "-skip _Generic_MultiStream -skip TestMutateRow_" fail-fast: false name: "${{ matrix.client-type }} client / python ${{ matrix.py-version }} / test tag ${{ matrix.test-version }}" steps: @@ -53,4 +61,6 @@ jobs: env: CLIENT_TYPE: ${{ matrix.client-type }} PYTHONUNBUFFERED: 1 + TEST_ARGS: ${{ matrix.test_args }} + PROXY_PORT: 9999 diff --git a/packages/google-cloud-bigtable/.kokoro/conformance.sh b/packages/google-cloud-bigtable/.kokoro/conformance.sh index e85fc1394cd7..fd585142ec27 100644 --- a/packages/google-cloud-bigtable/.kokoro/conformance.sh +++ b/packages/google-cloud-bigtable/.kokoro/conformance.sh @@ -19,16 +19,7 @@ set -eo pipefail ## cd to the parent directory, i.e. the root of the git repo cd $(dirname $0)/.. -PROXY_ARGS="" -TEST_ARGS="" -if [[ "${CLIENT_TYPE^^}" == "LEGACY" ]]; then - echo "Using legacy client" - # legacy client does not expose mutate_row. Disable those tests - TEST_ARGS="-skip TestMutateRow_" -fi - # Build and start the proxy in a separate process -PROXY_PORT=9999 pushd test_proxy nohup python test_proxy.py --port $PROXY_PORT --client_type=$CLIENT_TYPE & proxyPID=$! @@ -42,6 +33,7 @@ function cleanup() { trap cleanup EXIT # Run the conformance test +echo "running tests with args: $TEST_ARGS" pushd cloud-bigtable-clients-test/tests eval "go test -v -proxy_addr=:$PROXY_PORT $TEST_ARGS" RETURN_CODE=$? diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 8576fed85de6..548bfd0ec97c 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -298,7 +298,7 @@ def system_emulated(session): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -@nox.parametrize("client_type", ["async"]) +@nox.parametrize("client_type", ["async", "sync", "legacy"]) def conformance(session, client_type): # install dependencies constraints_path = str( diff --git a/packages/google-cloud-bigtable/test_proxy/README.md b/packages/google-cloud-bigtable/test_proxy/README.md index 266fba7cd6eb..5c87c729a93d 100644 --- a/packages/google-cloud-bigtable/test_proxy/README.md +++ b/packages/google-cloud-bigtable/test_proxy/README.md @@ -31,7 +31,7 @@ python test_proxy.py --port 8080 ``` By default, the test_proxy targets the async client. You can change this by passing in the `--client_type` flag. -Valid options are `async` and `legacy`. +Valid options are `async`, `sync`, and `legacy`. ``` python test_proxy.py --client_type=legacy diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py new file mode 100644 index 000000000000..eabae0ffa2a7 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py @@ -0,0 +1,185 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +""" +This module contains the client handler process for proxy_server.py. +""" +import os +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.data._cross_sync import CrossSync +from client_handler_data_async import error_safe + + +class TestProxyClientHandler: + """ + Implements the same methods as the grpc server, but handles the client + library side of the request. + + Requests received in TestProxyGrpcServer are converted to a dictionary, + and supplied to the TestProxyClientHandler methods as kwargs. + The client response is then returned back to the TestProxyGrpcServer + """ + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs + ): + self.closed = False + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = CrossSync._Sync_Impl.DataClient(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + def close(self): + self.closed = True + + @error_safe + async def ReadRows(self, request, **kwargs): + table_id = request.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result_list = table.read_rows(request, **kwargs) + serialized_response = [row._to_dict() for row in result_list] + return serialized_response + + @error_safe + async def ReadRow(self, row_key, **kwargs): + table_id = kwargs.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result_row = table.read_row(row_key, **kwargs) + if result_row: + return result_row._to_dict() + else: + return "None" + + @error_safe + async def MutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + mutations = [Mutation._from_dict(d) for d in request["mutations"]] + table.mutate_row(row_key, mutations, **kwargs) + return "OK" + + @error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import RowMutationEntry + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + entry_list = [ + RowMutationEntry._from_dict(entry) for entry in request["entries"] + ] + table.bulk_mutate_rows(entry_list, **kwargs) + return "OK" + + @error_safe + async def CheckAndMutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation, SetCell + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + true_mutations = [] + for mut_dict in request.get("true_mutations", []): + try: + true_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + mutation = SetCell("", "", "", 0) + true_mutations.append(mutation) + false_mutations = [] + for mut_dict in request.get("false_mutations", []): + try: + false_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + false_mutations.append(SetCell("", "", "", 0)) + predicate_filter = request.get("predicate_filter", None) + result = table.check_and_mutate_row( + row_key, + predicate_filter, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + **kwargs + ) + return result + + @error_safe + async def ReadModifyWriteRow(self, request, **kwargs): + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + row_key = request["row_key"] + rules = [] + for rule_dict in request.get("rules", []): + qualifier = rule_dict["column_qualifier"] + if "append_value" in rule_dict: + new_rule = AppendValueRule( + rule_dict["family_name"], qualifier, rule_dict["append_value"] + ) + else: + new_rule = IncrementRule( + rule_dict["family_name"], qualifier, rule_dict["increment_amount"] + ) + rules.append(new_rule) + result = table.read_modify_write_row(row_key, rules, **kwargs) + if result: + return result._to_dict() + else: + return "None" + + @error_safe + async def SampleRowKeys(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result = table.sample_row_keys(**kwargs) + return result diff --git a/packages/google-cloud-bigtable/test_proxy/run_tests.sh b/packages/google-cloud-bigtable/test_proxy/run_tests.sh index c2e9c6312041..b6f1291a6b79 100755 --- a/packages/google-cloud-bigtable/test_proxy/run_tests.sh +++ b/packages/google-cloud-bigtable/test_proxy/run_tests.sh @@ -27,7 +27,7 @@ fi SCRIPT_DIR=$(realpath $(dirname "$0")) cd $SCRIPT_DIR -export PROXY_SERVER_PORT=50055 +export PROXY_SERVER_PORT=$(shuf -i 50000-60000 -n 1) # download test suite if [ ! -d "cloud-bigtable-clients-test" ]; then @@ -43,6 +43,19 @@ function finish { } trap finish EXIT +if [[ $CLIENT_TYPE == "legacy" ]]; then + echo "Using legacy client" + # legacy client does not expose mutate_row. Disable those tests + TEST_ARGS="-skip TestMutateRow_" +fi + +if [[ $CLIENT_TYPE != "async" ]]; then + echo "Using legacy client" + # sync and legacy client do not support concurrent streams + TEST_ARGS="$TEST_ARGS -skip _Generic_MultiStream " +fi + # run tests pushd cloud-bigtable-clients-test/tests -go test -v -proxy_addr=:$PROXY_SERVER_PORT +echo "Running with $TEST_ARGS" +go test -v -proxy_addr=:$PROXY_SERVER_PORT $TEST_ARGS diff --git a/packages/google-cloud-bigtable/test_proxy/test_proxy.py b/packages/google-cloud-bigtable/test_proxy/test_proxy.py index 9e03f1e5cf49..7935007688d4 100644 --- a/packages/google-cloud-bigtable/test_proxy/test_proxy.py +++ b/packages/google-cloud-bigtable/test_proxy/test_proxy.py @@ -114,6 +114,9 @@ def format_dict(input_obj): if client_type == "legacy": import client_handler_legacy client = client_handler_legacy.LegacyTestProxyClientHandler(**json_data) + elif client_type == "sync": + import client_handler_data_sync_autogen + client = client_handler_data_sync_autogen.TestProxyClientHandler(**json_data) else: client = client_handler_data_async.TestProxyClientHandlerAsync(**json_data) client_map[client_id] = client @@ -150,7 +153,7 @@ def client_handler_process(request_q, queue_pool, client_type="async"): p = argparse.ArgumentParser() p.add_argument("--port", dest='port', default="50055") -p.add_argument("--client_type", dest='client_type', default="async", choices=["async", "legacy"]) +p.add_argument("--client_type", dest='client_type', default="async", choices=["async", "sync", "legacy"]) if __name__ == "__main__": port = p.parse_args().port diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py b/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py new file mode 100644 index 000000000000..492d35ddf0fc --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py @@ -0,0 +1,99 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +import hashlib +import pytest +import ast +import re +from difflib import unified_diff + +# add cross_sync to path +test_dir_name = os.path.dirname(__file__) +repo_root = os.path.join(test_dir_name, "..", "..", "..") +cross_sync_path = os.path.join(repo_root, ".cross_sync") +sys.path.append(cross_sync_path) + +from generate import convert_files_in_dir, CrossSyncOutputFile # noqa: E402 + +sync_files = list(convert_files_in_dir(repo_root)) + + +def test_found_files(): + """ + Make sure sync_test is populated with some of the files we expect to see, + to ensure that later tests are actually running. + """ + assert len(sync_files) > 0, "No sync files found" + assert len(sync_files) > 10, "Unexpectedly few sync files found" + # test for key files + outputs = [os.path.basename(f.output_path) for f in sync_files] + assert "client.py" in outputs + assert "execute_query_iterator.py" in outputs + assert "test_client.py" in outputs + assert "test_system_autogen.py" in outputs, "system tests not found" + assert ( + "client_handler_data_sync_autogen.py" in outputs + ), "test proxy handler not found" + + +@pytest.mark.skipif( + sys.version_info < (3, 9), reason="ast.unparse is only available in 3.9+" +) +@pytest.mark.parametrize("sync_file", sync_files, ids=lambda f: f.output_path) +def test_sync_up_to_date(sync_file): + """ + Generate a fresh copy of each cross_sync file, and compare hashes with the existing file. + + If this test fails, run `nox -s generate_sync` to update the sync files. + """ + path = sync_file.output_path + new_render = sync_file.render(with_formatter=True, save_to_disk=False) + found_render = CrossSyncOutputFile( + output_path="", ast_tree=ast.parse(open(path).read()), header=sync_file.header + ).render(with_formatter=True, save_to_disk=False) + # compare by content + diff = unified_diff(found_render.splitlines(), new_render.splitlines(), lineterm="") + diff_str = "\n".join(diff) + assert ( + not diff_str + ), f"Found differences. Run `nox -s generate_sync` to update:\n{diff_str}" + # compare by hash + new_hash = hashlib.md5(new_render.encode()).hexdigest() + found_hash = hashlib.md5(found_render.encode()).hexdigest() + assert new_hash == found_hash, f"md5 mismatch for {path}" + + +@pytest.mark.parametrize("sync_file", sync_files, ids=lambda f: f.output_path) +def test_verify_headers(sync_file): + license_regex = r""" + \#\ Copyright\ \d{4}\ Google\ LLC\n + \#\n + \#\ Licensed\ under\ the\ Apache\ License,\ Version\ 2\.0\ \(the\ \"License\"\);\n + \#\ you\ may\ not\ use\ this\ file\ except\ in\ compliance\ with\ the\ License\.\n + \#\ You\ may\ obtain\ a\ copy\ of\ the\ License\ at\ + \#\n + \#\s+http:\/\/www\.apache\.org\/licenses\/LICENSE-2\.0\n + \#\n + \#\ Unless\ required\ by\ applicable\ law\ or\ agreed\ to\ in\ writing,\ software\n + \#\ distributed\ under\ the\ License\ is\ distributed\ on\ an\ \"AS\ IS\"\ BASIS,\n + \#\ WITHOUT\ WARRANTIES\ OR\ CONDITIONS\ OF\ ANY\ KIND,\ either\ express\ or\ implied\.\n + \#\ See\ the\ License\ for\ the\ specific\ language\ governing\ permissions\ and\n + \#\ limitations\ under\ the\ License\. + """ + pattern = re.compile(license_regex, re.VERBOSE) + + with open(sync_file.output_path, "r") as f: + content = f.read() + assert pattern.search(content), "Missing license header" From 22652f0a40c01804b12f3bb4b21aa7a715e5c39b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 19 Dec 2024 10:42:31 -0800 Subject: [PATCH 841/892] chore(python): update dependencies in .kokoro/docker/docs (#1052) Source-Link: https://github.com/googleapis/synthtool/commit/e808c98e1ab7eec3df2a95a05331619f7001daef Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:8e3e7e18255c22d1489258d0374c901c01f9c4fd77a12088670cd73d580aa737 Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/release-trigger.yml | 2 +- .../.kokoro/docker/docs/requirements.txt | 66 ++++++++++++++----- 3 files changed, 51 insertions(+), 21 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 2fda9335f2a2..26306af66f81 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5cddfe2fb5019bbf78335bc55f15bc13e18354a56b3ff46e1834f8e540807f05 -# created: 2024-10-31T01:41:07.349286254Z \ No newline at end of file + digest: sha256:8e3e7e18255c22d1489258d0374c901c01f9c4fd77a12088670cd73d580aa737 +# created: 2024-12-17T00:59:58.625514486Z diff --git a/packages/google-cloud-bigtable/.github/release-trigger.yml b/packages/google-cloud-bigtable/.github/release-trigger.yml index 4bb79e58eadf..0bbdd8e4cabb 100644 --- a/packages/google-cloud-bigtable/.github/release-trigger.yml +++ b/packages/google-cloud-bigtable/.github/release-trigger.yml @@ -1,2 +1,2 @@ enabled: true -multiScmName: +multiScmName: python-bigtable diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index 66eacc82f041..f99a5c4aac7f 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -1,16 +1,16 @@ # -# This file is autogenerated by pip-compile with Python 3.9 +# This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --allow-unsafe --generate-hashes requirements.in +# pip-compile --allow-unsafe --generate-hashes synthtool/gcp/templates/python_library/.kokoro/docker/docs/requirements.in # -argcomplete==3.5.1 \ - --hash=sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363 \ - --hash=sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4 +argcomplete==3.5.2 \ + --hash=sha256:036d020d79048a5d525bc63880d7a4b8d1668566b8a76daf1144c0bbe0f63472 \ + --hash=sha256:23146ed7ac4403b70bd6026402468942ceba34a6732255b9edf5b7354f68a6bb # via nox -colorlog==6.8.2 \ - --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ - --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 +colorlog==6.9.0 \ + --hash=sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff \ + --hash=sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2 # via nox distlib==0.3.9 \ --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ @@ -23,20 +23,50 @@ filelock==3.16.1 \ nox==2024.10.9 \ --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 - # via -r requirements.in -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 + # via -r synthtool/gcp/templates/python_library/.kokoro/docker/docs/requirements.in +packaging==24.2 \ + --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ + --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f # via nox platformdirs==4.3.6 \ --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via virtualenv -tomli==2.0.2 \ - --hash=sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38 \ - --hash=sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed +tomli==2.2.1 \ + --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \ + --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \ + --hash=sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c \ + --hash=sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b \ + --hash=sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8 \ + --hash=sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6 \ + --hash=sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77 \ + --hash=sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff \ + --hash=sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea \ + --hash=sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192 \ + --hash=sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249 \ + --hash=sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee \ + --hash=sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4 \ + --hash=sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98 \ + --hash=sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8 \ + --hash=sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4 \ + --hash=sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281 \ + --hash=sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744 \ + --hash=sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69 \ + --hash=sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13 \ + --hash=sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140 \ + --hash=sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e \ + --hash=sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e \ + --hash=sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc \ + --hash=sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff \ + --hash=sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec \ + --hash=sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2 \ + --hash=sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222 \ + --hash=sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106 \ + --hash=sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272 \ + --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \ + --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7 # via nox -virtualenv==20.26.6 \ - --hash=sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48 \ - --hash=sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2 +virtualenv==20.28.0 \ + --hash=sha256:23eae1b4516ecd610481eda647f3a7c09aea295055337331bb4e6892ecce47b0 \ + --hash=sha256:2c9c3262bb8e7b87ea801d715fae4495e6032450c71d2309be9550e7364049aa # via nox From f99afe232561d248d019ef9d0dc8bf48d69bf150 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:10:16 -0800 Subject: [PATCH 842/892] chore(python): Update the python version in docs presubmit to use 3.10 (#1061) * chore(python): Update the python version in docs presubmit to use 3.10 Source-Link: https://github.com/googleapis/synthtool/commit/de3def663b75d8b9ae1e5d548364c960ff13af8f Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:a1c5112b81d645f5bbc4d4bbc99d7dcb5089a52216c0e3fb1203a0eeabadd7d5 * loosen assertion * update sync * fixed mypy issue --------- Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 6 +++--- packages/google-cloud-bigtable/.github/workflows/docs.yml | 2 +- .../google-cloud-bigtable/.github/workflows/unittest.yml | 5 ++++- .../google/cloud/bigtable/data/_cross_sync/_decorators.py | 4 ++-- .../tests/unit/data/_async/test_client.py | 2 +- .../tests/unit/data/_sync_autogen/test_client.py | 2 +- 6 files changed, 12 insertions(+), 9 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 26306af66f81..1d0fd7e7878b 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -1,4 +1,4 @@ -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:8e3e7e18255c22d1489258d0374c901c01f9c4fd77a12088670cd73d580aa737 -# created: 2024-12-17T00:59:58.625514486Z + digest: sha256:a1c5112b81d645f5bbc4d4bbc99d7dcb5089a52216c0e3fb1203a0eeabadd7d5 +# created: 2025-01-02T23:09:36.975468657Z diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml index 698fbc5c94da..2833fe98fff0 100644 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ b/packages/google-cloud-bigtable/.github/workflows/docs.yml @@ -12,7 +12,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: "3.9" + python-version: "3.10" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 6eca3149c126..6a0429d96101 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -5,7 +5,10 @@ on: name: unittest jobs: unit: - runs-on: ubuntu-latest + # TODO(https://github.com/googleapis/gapic-generator-python/issues/2303): use `ubuntu-latest` once this bug is fixed. + # Use ubuntu-22.04 until Python 3.7 is removed from the test matrix + # https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories + runs-on: ubuntu-22.04 strategy: matrix: python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py index f37b05b64ed7..ea86e83af8d7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py @@ -128,8 +128,8 @@ def get_for_node(cls, node: ast.Call | ast.Attribute | ast.Name) -> "AstDecorato # extract the module and decorator names if "CrossSync" in ast.dump(root_attr): decorator_name = root_attr.attr - got_kwargs = ( - {kw.arg: cls._convert_ast_to_py(kw.value) for kw in node.keywords} + got_kwargs: dict[str, Any] = ( + {str(kw.arg): cls._convert_ast_to_py(kw.value) for kw in node.keywords} if hasattr(node, "keywords") else {} ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 8d829a363ee7..18ff69ffd2f8 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -446,7 +446,7 @@ async def test__manage_channel_sleeps( assert sleep.call_count == num_cycles total_sleep = sum([call[0][1] for call in sleep.call_args_list]) assert ( - abs(total_sleep - expected_sleep) < 0.1 + abs(total_sleep - expected_sleep) < 0.5 ), f"refresh_interval={refresh_interval}, num_cycles={num_cycles}, expected_sleep={expected_sleep}" await client.close() diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index 51c88c63eb55..c5c6bac30bcf 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -349,7 +349,7 @@ def test__manage_channel_sleeps(self, refresh_interval, num_cycles, expected_sle assert sleep.call_count == num_cycles total_sleep = sum([call[0][1] for call in sleep.call_args_list]) assert ( - abs(total_sleep - expected_sleep) < 0.1 + abs(total_sleep - expected_sleep) < 0.5 ), f"refresh_interval={refresh_interval}, num_cycles={num_cycles}, expected_sleep={expected_sleep}" client.close() From 3062b12c282317f44b3c8b5b4310bc390a5cb560 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 8 Jan 2025 13:16:04 -0800 Subject: [PATCH 843/892] chore(main): release 2.28.0 (#1049) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 2da95504a139..c5dfacd6ecc0 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.27.0" + ".": "2.28.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 8abd58f89747..6df20bec0139 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.28.0](https://github.com/googleapis/python-bigtable/compare/v2.27.0...v2.28.0) (2025-01-08) + + +### Features + +* Add generated sync client ([#1017](https://github.com/googleapis/python-bigtable/issues/1017)) ([f974823](https://github.com/googleapis/python-bigtable/commit/f974823bf8a74c2f8b1bc69997b13bc1acaf8bef)) + ## [2.27.0](https://github.com/googleapis/python-bigtable/compare/v2.26.0...v2.27.0) (2024-11-12) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index f0fcebfa4138..8f0f03c065a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.27.0" # {x-release-please-version} +__version__ = "2.28.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index f0fcebfa4138..8f0f03c065a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.27.0" # {x-release-please-version} +__version__ = "2.28.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index f0fcebfa4138..8f0f03c065a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.27.0" # {x-release-please-version} +__version__ = "2.28.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index f0fcebfa4138..8f0f03c065a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.27.0" # {x-release-please-version} +__version__ = "2.28.0" # {x-release-please-version} From de326c0f92d6d33cf4841ad2221e36b8bdbaf7fc Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:26:21 -0500 Subject: [PATCH 844/892] chore(python): exclude .github/workflows/unittest.yml in renovate config (#1067) Source-Link: https://github.com/googleapis/synthtool/commit/106d292bd234e5d9977231dcfbc4831e34eba13a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:8ff1efe878e18bd82a0fb7b70bb86f77e7ab6901fed394440b6135db0ba8d84a Co-authored-by: Owl Bot --- packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml | 4 ++-- packages/google-cloud-bigtable/renovate.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 1d0fd7e7878b..10cf433a8b00 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:a1c5112b81d645f5bbc4d4bbc99d7dcb5089a52216c0e3fb1203a0eeabadd7d5 -# created: 2025-01-02T23:09:36.975468657Z + digest: sha256:8ff1efe878e18bd82a0fb7b70bb86f77e7ab6901fed394440b6135db0ba8d84a +# created: 2025-01-09T12:01:16.422459506Z diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index 39b2a0ec9296..c7875c469bd5 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -5,7 +5,7 @@ ":preserveSemverRanges", ":disableDependencyDashboard" ], - "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py"], + "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py", ".github/workflows/unittest.yml"], "pip_requirements": { "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"] } From baca72f1d6480527be83867f90b6cc209c26f8aa Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 16 Jan 2025 16:25:10 -0800 Subject: [PATCH 845/892] fix: allow empty headers for btql routing (#1072) --- .../services/bigtable/async_client.py | 3 ++- .../bigtable_v2/services/bigtable/client.py | 3 ++- .../tests/system/data/test_system_async.py | 13 +++++++++++++ .../tests/system/data/test_system_autogen.py | 12 ++++++++++++ .../unit/gapic/bigtable_v2/test_bigtable.py | 18 +++++++++++++++--- 5 files changed, 44 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index b36f525fa217..08317e1ebb08 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1496,7 +1496,8 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if request.app_profile_id is not None: + # execute_query currently requires empty header support. TODO: remove after support is added header_params["app_profile_id"] = request.app_profile_id if header_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index a2534d5393c6..42723c6612a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1893,7 +1893,8 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if request.app_profile_id is not None: + # execute_query currently requires empty header support. TODO: remove after support is adde header_params["app_profile_id"] = request.app_profile_id if header_params: diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index b97859de11f6..74f318d39925 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -1014,3 +1014,16 @@ async def test_literal_value_filter( assert len(row_list) == bool( expect_match ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" + + @CrossSync.pytest + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_execute_query_simple(self, client, table_id, instance_id): + result = await client.execute_query("SELECT 1 AS a, 'foo' AS b", instance_id) + rows = [r async for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["a"] == 1 + assert row["b"] == "foo" diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index 2dde82bf16d1..c96cfdb50581 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -826,3 +826,15 @@ def test_literal_value_filter( assert len(row_list) == bool( expect_match ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" + + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_query_simple(self, client, table_id, instance_id): + result = client.execute_query("SELECT 1 AS a, 'foo' AS b", instance_id) + rows = [r for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["a"] == 1 + assert row["b"] == "foo" diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 37b4bbfcaeaa..10543bd3acbe 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -6868,7 +6868,11 @@ def test_execute_query_routing_parameters_request_1_grpc(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -7894,7 +7898,11 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -9915,7 +9923,11 @@ def test_execute_query_routing_parameters_request_1_rest(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) From 4003dbe364efa9ebb23232171bf362e3bddf8a8c Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 09:17:37 -0800 Subject: [PATCH 846/892] chore(main): release 2.28.1 (#1074) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index c5dfacd6ecc0..a0a9763a46c6 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.28.0" + ".": "2.28.1" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 6df20bec0139..ba398feff37a 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.28.1](https://github.com/googleapis/python-bigtable/compare/v2.28.0...v2.28.1) (2025-01-17) + + +### Bug Fixes + +* Allow empty headers for btql routing ([#1072](https://github.com/googleapis/python-bigtable/issues/1072)) ([e7ecfeb](https://github.com/googleapis/python-bigtable/commit/e7ecfeb8984a45c880d9483305964fff347eb4b8)) + ## [2.28.0](https://github.com/googleapis/python-bigtable/compare/v2.27.0...v2.28.0) (2025-01-08) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 8f0f03c065a2..c0c0d8b11009 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.0" # {x-release-please-version} +__version__ = "2.28.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 8f0f03c065a2..c0c0d8b11009 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.0" # {x-release-please-version} +__version__ = "2.28.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 8f0f03c065a2..c0c0d8b11009 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.0" # {x-release-please-version} +__version__ = "2.28.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 8f0f03c065a2..c0c0d8b11009 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.0" # {x-release-please-version} +__version__ = "2.28.1" # {x-release-please-version} From 81873073ea96c16edac20d4aa207c3959c12f75e Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 17 Jan 2025 10:34:50 -0800 Subject: [PATCH 847/892] chore: update protoplus for python 3.13 (#1051) Fixes https://github.com/googleapis/python-bigtable/issues/1029 --- packages/google-cloud-bigtable/setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index c47167487c0b..23eb8d3607c9 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -42,6 +42,7 @@ "google-auth >= 2.14.1, <3.0.0dev,!=2.24.0,!=2.25.0", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.3, <2.0.0dev", + "proto-plus >= 1.25.0, <2.0.0dev; python_version>='3.13'", "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", ] extras = {"libcst": "libcst >= 0.2.5"} From 98ad8d7970feb9daebfd34aa4aae881149a00034 Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Mon, 3 Feb 2025 19:56:00 -0500 Subject: [PATCH 848/892] feat: Add support for array and float32 SQL query params (#1078) --- .../cloud/bigtable/data/_async/client.py | 2 + .../bigtable/data/_sync_autogen/client.py | 2 + .../_async/execute_query_iterator.py | 2 - .../execute_query/_parameters_formatting.py | 17 +- .../_query_result_parsing_utils.py | 1 + .../bigtable/data/execute_query/metadata.py | 46 +++-- .../tests/system/data/test_system_async.py | 83 ++++++++ .../tests/system/data/test_system_autogen.py | 74 +++++++ .../test_execute_query_parameters_parsing.py | 190 ++++++++++++++++-- 9 files changed, 381 insertions(+), 36 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index c7cc0de6bf7f..ecf481889ac4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -569,6 +569,8 @@ async def execute_query( will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if + a parameter is passed without an explicit type, and the type cannot be infered """ warnings.warn( "ExecuteQuery is in preview and may change in the future.", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index 37e192147311..492e86224036 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -439,6 +439,8 @@ def execute_query( will be chained with a RetryExceptionGroup containing GoogleAPIError exceptions from any retries that failed google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error + google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if + a parameter is passed without an explicit type, and the type cannot be infered """ warnings.warn( "ExecuteQuery is in preview and may change in the future.", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index 66f264610247..a8f60be36820 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -22,7 +22,6 @@ Tuple, TYPE_CHECKING, ) - from google.api_core import retry as retries from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor @@ -116,7 +115,6 @@ def __init__( exception_factory=_retry_exception_factory, ) self._req_metadata = req_metadata - try: self._register_instance_task = CrossSync.create_task( self._client._register_instance, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py index edb7a6380cc6..eadda21f4a44 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, Optional import datetime +from typing import Any, Dict, Optional + from google.api_core.datetime_helpers import DatetimeWithNanoseconds + from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed -from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType def _format_execute_query_params( @@ -48,7 +50,6 @@ def _format_execute_query_params( parameter_types = parameter_types or {} result_values = {} - for key, value in params.items(): user_provided_type = parameter_types.get(key) try: @@ -109,6 +110,16 @@ def _detect_type(value: ExecuteQueryValueType) -> SqlType.Type: "Cannot infer type of None, please provide the type manually." ) + if isinstance(value, list): + raise ParameterTypeInferenceFailed( + "Cannot infer type of ARRAY parameters, please provide the type manually." + ) + + if isinstance(value, float): + raise ParameterTypeInferenceFailed( + "Cannot infer type of float, must specify either FLOAT32 or FLOAT64 type manually." + ) + for field_type, type_dict in _TYPES_TO_TYPE_DICTS: if isinstance(value, field_type): return type_dict diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py index b65dce27b85f..4cb5db2911de 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py @@ -22,6 +22,7 @@ SqlType.Bytes: "bytes_value", SqlType.String: "string_value", SqlType.Int64: "int_value", + SqlType.Float32: "float_value", SqlType.Float64: "float_value", SqlType.Bool: "bool_value", SqlType.Timestamp: "timestamp_value", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py index 0c9cf969791a..bb29588d0e4b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py @@ -21,23 +21,16 @@ """ from collections import defaultdict -from typing import ( - Optional, - List, - Dict, - Set, - Type, - Union, - Tuple, - Any, -) +import datetime +from typing import Any, Dict, List, Optional, Set, Tuple, Type, Union + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import date_pb2 # type: ignore + from google.cloud.bigtable.data.execute_query.values import _NamedList from google.cloud.bigtable_v2 import ResultSetMetadata from google.cloud.bigtable_v2 import Type as PBType -from google.type import date_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.api_core.datetime_helpers import DatetimeWithNanoseconds -import datetime class SqlType: @@ -127,6 +120,8 @@ class Array(Type): def __init__(self, element_type: "SqlType.Type"): if isinstance(element_type, SqlType.Array): raise ValueError("Arrays of arrays are not supported.") + if isinstance(element_type, SqlType.Map): + raise ValueError("Arrays of Maps are not supported.") self._element_type = element_type @property @@ -140,10 +135,21 @@ def from_pb_type(cls, type_pb: Optional[PBType] = None) -> "SqlType.Array": return cls(_pb_type_to_metadata_type(type_pb.array_type.element_type)) def _to_value_pb_dict(self, value: Any): - raise NotImplementedError("Array is not supported as a query parameter") + if value is None: + return {} + + return { + "array_value": { + "values": [ + self.element_type._to_value_pb_dict(entry) for entry in value + ] + } + } def _to_type_pb_dict(self) -> Dict[str, Any]: - raise NotImplementedError("Array is not supported as a query parameter") + return { + "array_type": {"element_type": self.element_type._to_type_pb_dict()} + } def __eq__(self, other): return super().__eq__(other) and self.element_type == other.element_type @@ -222,6 +228,13 @@ class Float64(Type): value_pb_dict_field_name = "float_value" type_field_name = "float64_type" + class Float32(Type): + """Float32 SQL type.""" + + expected_type = float + value_pb_dict_field_name = "float_value" + type_field_name = "float32_type" + class Bool(Type): """Bool SQL type.""" @@ -376,6 +389,7 @@ def _pb_metadata_to_metadata_types( "bytes_type": SqlType.Bytes, "string_type": SqlType.String, "int64_type": SqlType.Int64, + "float32_type": SqlType.Float32, "float64_type": SqlType.Float64, "bool_type": SqlType.Bool, "timestamp_type": SqlType.Timestamp, diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index 74f318d39925..5c11c1990bb5 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -14,13 +14,16 @@ import pytest import asyncio +import datetime import uuid import os from google.api_core import retry from google.api_core.exceptions import ClientError +from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.type import date_pb2 from google.cloud.bigtable.data._cross_sync import CrossSync @@ -1027,3 +1030,83 @@ async def test_execute_query_simple(self, client, table_id, instance_id): row = rows[0] assert row["a"] == 1 assert row["b"] == "foo" + + @CrossSync.pytest + @pytest.mark.usefixtures("client") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_execute_query_params(self, client, table_id, instance_id): + query = ( + "SELECT @stringParam AS strCol, @bytesParam as bytesCol, @int64Param AS intCol, " + "@float32Param AS float32Col, @float64Param AS float64Col, @boolParam AS boolCol, " + "@tsParam AS tsCol, @dateParam AS dateCol, @byteArrayParam AS byteArrayCol, " + "@stringArrayParam AS stringArrayCol, @intArrayParam AS intArrayCol, " + "@float32ArrayParam AS float32ArrayCol, @float64ArrayParam AS float64ArrayCol, " + "@boolArrayParam AS boolArrayCol, @tsArrayParam AS tsArrayCol, " + "@dateArrayParam AS dateArrayCol" + ) + parameters = { + "stringParam": "foo", + "bytesParam": b"bar", + "int64Param": 12, + "float32Param": 1.1, + "float64Param": 1.2, + "boolParam": True, + "tsParam": datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + "dateParam": datetime.date(2025, 1, 16), + "byteArrayParam": [b"foo", b"bar", None], + "stringArrayParam": ["foo", "bar", None], + "intArrayParam": [1, None, 2], + "float32ArrayParam": [1.2, None, 1.3], + "float64ArrayParam": [1.4, None, 1.5], + "boolArrayParam": [None, False, True], + "tsArrayParam": [ + datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + datetime.datetime.fromtimestamp(2000, tz=datetime.timezone.utc), + None, + ], + "dateArrayParam": [ + datetime.date(2025, 1, 16), + datetime.date(2025, 1, 17), + None, + ], + } + param_types = { + "float32Param": SqlType.Float32(), + "float64Param": SqlType.Float64(), + "byteArrayParam": SqlType.Array(SqlType.Bytes()), + "stringArrayParam": SqlType.Array(SqlType.String()), + "intArrayParam": SqlType.Array(SqlType.Int64()), + "float32ArrayParam": SqlType.Array(SqlType.Float32()), + "float64ArrayParam": SqlType.Array(SqlType.Float64()), + "boolArrayParam": SqlType.Array(SqlType.Bool()), + "tsArrayParam": SqlType.Array(SqlType.Timestamp()), + "dateArrayParam": SqlType.Array(SqlType.Date()), + } + result = await client.execute_query( + query, instance_id, parameters=parameters, parameter_types=param_types + ) + rows = [r async for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["strCol"] == parameters["stringParam"] + assert row["bytesCol"] == parameters["bytesParam"] + assert row["intCol"] == parameters["int64Param"] + assert row["float32Col"] == pytest.approx(parameters["float32Param"]) + assert row["float64Col"] == pytest.approx(parameters["float64Param"]) + assert row["boolCol"] == parameters["boolParam"] + assert row["tsCol"] == parameters["tsParam"] + assert row["dateCol"] == date_pb2.Date(year=2025, month=1, day=16) + assert row["stringArrayCol"] == parameters["stringArrayParam"] + assert row["byteArrayCol"] == parameters["byteArrayParam"] + assert row["intArrayCol"] == parameters["intArrayParam"] + assert row["float32ArrayCol"] == pytest.approx(parameters["float32ArrayParam"]) + assert row["float64ArrayCol"] == pytest.approx(parameters["float64ArrayParam"]) + assert row["boolArrayCol"] == parameters["boolArrayParam"] + assert row["tsArrayCol"] == parameters["tsArrayParam"] + assert row["dateArrayCol"] == [ + date_pb2.Date(year=2025, month=1, day=16), + date_pb2.Date(year=2025, month=1, day=17), + None, + ] diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index c96cfdb50581..cbaaf5a8fb6f 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -16,12 +16,15 @@ # This file is automatically generated by CrossSync. Do not edit manually. import pytest +import datetime import uuid import os from google.api_core import retry from google.api_core.exceptions import ClientError +from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.type import date_pb2 from google.cloud.bigtable.data._cross_sync import CrossSync from . import TEST_FAMILY, TEST_FAMILY_2 @@ -838,3 +841,74 @@ def test_execute_query_simple(self, client, table_id, instance_id): row = rows[0] assert row["a"] == 1 assert row["b"] == "foo" + + @pytest.mark.usefixtures("client") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_query_params(self, client, table_id, instance_id): + query = "SELECT @stringParam AS strCol, @bytesParam as bytesCol, @int64Param AS intCol, @float32Param AS float32Col, @float64Param AS float64Col, @boolParam AS boolCol, @tsParam AS tsCol, @dateParam AS dateCol, @byteArrayParam AS byteArrayCol, @stringArrayParam AS stringArrayCol, @intArrayParam AS intArrayCol, @float32ArrayParam AS float32ArrayCol, @float64ArrayParam AS float64ArrayCol, @boolArrayParam AS boolArrayCol, @tsArrayParam AS tsArrayCol, @dateArrayParam AS dateArrayCol" + parameters = { + "stringParam": "foo", + "bytesParam": b"bar", + "int64Param": 12, + "float32Param": 1.1, + "float64Param": 1.2, + "boolParam": True, + "tsParam": datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + "dateParam": datetime.date(2025, 1, 16), + "byteArrayParam": [b"foo", b"bar", None], + "stringArrayParam": ["foo", "bar", None], + "intArrayParam": [1, None, 2], + "float32ArrayParam": [1.2, None, 1.3], + "float64ArrayParam": [1.4, None, 1.5], + "boolArrayParam": [None, False, True], + "tsArrayParam": [ + datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + datetime.datetime.fromtimestamp(2000, tz=datetime.timezone.utc), + None, + ], + "dateArrayParam": [ + datetime.date(2025, 1, 16), + datetime.date(2025, 1, 17), + None, + ], + } + param_types = { + "float32Param": SqlType.Float32(), + "float64Param": SqlType.Float64(), + "byteArrayParam": SqlType.Array(SqlType.Bytes()), + "stringArrayParam": SqlType.Array(SqlType.String()), + "intArrayParam": SqlType.Array(SqlType.Int64()), + "float32ArrayParam": SqlType.Array(SqlType.Float32()), + "float64ArrayParam": SqlType.Array(SqlType.Float64()), + "boolArrayParam": SqlType.Array(SqlType.Bool()), + "tsArrayParam": SqlType.Array(SqlType.Timestamp()), + "dateArrayParam": SqlType.Array(SqlType.Date()), + } + result = client.execute_query( + query, instance_id, parameters=parameters, parameter_types=param_types + ) + rows = [r for r in result] + assert len(rows) == 1 + row = rows[0] + assert row["strCol"] == parameters["stringParam"] + assert row["bytesCol"] == parameters["bytesParam"] + assert row["intCol"] == parameters["int64Param"] + assert row["float32Col"] == pytest.approx(parameters["float32Param"]) + assert row["float64Col"] == pytest.approx(parameters["float64Param"]) + assert row["boolCol"] == parameters["boolParam"] + assert row["tsCol"] == parameters["tsParam"] + assert row["dateCol"] == date_pb2.Date(year=2025, month=1, day=16) + assert row["stringArrayCol"] == parameters["stringArrayParam"] + assert row["byteArrayCol"] == parameters["byteArrayParam"] + assert row["intArrayCol"] == parameters["intArrayParam"] + assert row["float32ArrayCol"] == pytest.approx(parameters["float32ArrayParam"]) + assert row["float64ArrayCol"] == pytest.approx(parameters["float64ArrayParam"]) + assert row["boolArrayCol"] == parameters["boolArrayParam"] + assert row["tsArrayCol"] == parameters["tsArrayParam"] + assert row["dateArrayCol"] == [ + date_pb2.Date(year=2025, month=1, day=16), + date_pb2.Date(year=2025, month=1, day=17), + None, + ] diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py index f7159fb71beb..bebbd8d45c0a 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py @@ -12,17 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +import datetime + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.type import date_pb2 import pytest + from google.cloud.bigtable.data.execute_query._parameters_formatting import ( _format_execute_query_params, ) from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.execute_query.values import Struct -import datetime - -from google.type import date_pb2 -from google.api_core.datetime_helpers import DatetimeWithNanoseconds - +from google.protobuf import timestamp_pb2 timestamp = int( datetime.datetime(2024, 5, 12, 17, 44, 12, tzinfo=datetime.timezone.utc).timestamp() @@ -71,7 +72,7 @@ ), ], ) -def test_instance_execute_query_parameters_simple_types_parsing( +def test_execute_query_parameters_inferred_types_parsing( input_value, value_field, type_field, expected_value ): result = _format_execute_query_params( @@ -84,7 +85,161 @@ def test_instance_execute_query_parameters_simple_types_parsing( assert type_field in result["test"]["type_"] -def test_instance_execute_query_parameters_not_supported_types(): +@pytest.mark.parametrize( + "value, sql_type, proto_result", + [ + (1.3, SqlType.Float32(), {"type_": {"float32_type": {}}, "float_value": 1.3}), + (1.3, SqlType.Float64(), {"type_": {"float64_type": {}}, "float_value": 1.3}), + ( + [1, 2, 3, 4], + SqlType.Array(SqlType.Int64()), + { + "type_": {"array_type": {"element_type": {"int64_type": {}}}}, + "array_value": { + "values": [ + {"int_value": 1}, + {"int_value": 2}, + {"int_value": 3}, + {"int_value": 4}, + ] + }, + }, + ), + ( + [1, None, 2, None], + SqlType.Array(SqlType.Int64()), + { + "type_": {"array_type": {"element_type": {"int64_type": {}}}}, + "array_value": { + "values": [ + {"int_value": 1}, + {}, + {"int_value": 2}, + {}, + ] + }, + }, + ), + ( + None, + SqlType.Array(SqlType.Int64()), + { + "type_": {"array_type": {"element_type": {"int64_type": {}}}}, + }, + ), + ( + ["foo", "bar", None], + SqlType.Array(SqlType.String()), + { + "type_": {"array_type": {"element_type": {"string_type": {}}}}, + "array_value": { + "values": [ + {"string_value": "foo"}, + {"string_value": "bar"}, + {}, + ] + }, + }, + ), + ( + [b"foo", b"bar", None], + SqlType.Array(SqlType.Bytes()), + { + "type_": {"array_type": {"element_type": {"bytes_type": {}}}}, + "array_value": { + "values": [ + {"bytes_value": b"foo"}, + {"bytes_value": b"bar"}, + {}, + ] + }, + }, + ), + ( + [ + datetime.datetime.fromtimestamp(1000, tz=datetime.timezone.utc), + datetime.datetime.fromtimestamp(2000, tz=datetime.timezone.utc), + None, + ], + SqlType.Array(SqlType.Timestamp()), + { + "type_": {"array_type": {"element_type": {"timestamp_type": {}}}}, + "array_value": { + "values": [ + {"timestamp_value": timestamp_pb2.Timestamp(seconds=1000)}, + {"timestamp_value": timestamp_pb2.Timestamp(seconds=2000)}, + {}, + ], + }, + }, + ), + ( + [True, False, None], + SqlType.Array(SqlType.Bool()), + { + "type_": {"array_type": {"element_type": {"bool_type": {}}}}, + "array_value": { + "values": [ + {"bool_value": True}, + {"bool_value": False}, + {}, + ], + }, + }, + ), + ( + [datetime.date(2025, 1, 16), datetime.date(2025, 1, 17), None], + SqlType.Array(SqlType.Date()), + { + "type_": {"array_type": {"element_type": {"date_type": {}}}}, + "array_value": { + "values": [ + {"date_value": date_pb2.Date(year=2025, month=1, day=16)}, + {"date_value": date_pb2.Date(year=2025, month=1, day=17)}, + {}, + ], + }, + }, + ), + ( + [1.1, 1.2, None], + SqlType.Array(SqlType.Float32()), + { + "type_": {"array_type": {"element_type": {"float32_type": {}}}}, + "array_value": { + "values": [ + {"float_value": 1.1}, + {"float_value": 1.2}, + {}, + ] + }, + }, + ), + ( + [1.1, 1.2, None], + SqlType.Array(SqlType.Float64()), + { + "type_": {"array_type": {"element_type": {"float64_type": {}}}}, + "array_value": { + "values": [ + {"float_value": 1.1}, + {"float_value": 1.2}, + {}, + ] + }, + }, + ), + ], +) +def test_execute_query_explicit_parameter_parsing(value, sql_type, proto_result): + result = _format_execute_query_params( + {"param_name": value}, {"param_name": sql_type} + ) + print(result) + assert result["param_name"] == proto_result + + +def test_execute_query_parameters_not_supported_types(): with pytest.raises(ValueError): _format_execute_query_params({"test1": 1.1}, None) @@ -105,14 +260,6 @@ def test_instance_execute_query_parameters_not_supported_types(): }, ) - with pytest.raises(NotImplementedError, match="not supported"): - _format_execute_query_params( - {"test1": [1]}, - { - "test1": SqlType.Array(SqlType.Int64()), - }, - ) - with pytest.raises(NotImplementedError, match="not supported"): _format_execute_query_params( {"test1": Struct([("field1", 1)])}, @@ -132,3 +279,16 @@ def test_instance_execute_query_parameters_not_match(): "test2": SqlType.String(), }, ) + + +def test_array_params_enforce_element_type(): + with pytest.raises(ValueError, match="Error when parsing parameter p") as e1: + _format_execute_query_params( + {"p": ["a", 1, None]}, {"p": SqlType.Array(SqlType.String())} + ) + with pytest.raises(ValueError, match="Error when parsing parameter p") as e2: + _format_execute_query_params( + {"p": ["a", 1, None]}, {"p": SqlType.Array(SqlType.Int64())} + ) + assert "Expected query parameter of type str, got int" in str(e1.value.__cause__) + assert "Expected query parameter of type int, got str" in str(e2.value.__cause__) From c28965da49f135c9c5dc04e644c25f7c11980a4b Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 26 Feb 2025 11:03:16 -0800 Subject: [PATCH 849/892] fix: grpc channel refresh (#1087) * added failing test * prevent _start_bg_task from running * let bg task run * invalidate transport stubs after channel refresh * added sync implementation --- .../cloud/bigtable/data/_async/client.py | 9 ++++-- .../bigtable/data/_sync_autogen/client.py | 8 +++-- .../tests/system/data/test_system_async.py | 32 +++++++++++++++++++ .../tests/system/data/test_system_autogen.py | 27 ++++++++++++++++ 4 files changed, 70 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index ecf481889ac4..5c9649c41fd5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -164,8 +164,8 @@ def __init__( if "pool_size" in kwargs: warnings.warn("pool_size no longer supported") # set up client info headers for veneer library - client_info = DEFAULT_CLIENT_INFO - client_info.client_library_version = self._client_version() + self.client_info = DEFAULT_CLIENT_INFO + self.client_info.client_library_version = self._client_version() # parse client options if type(client_options) is dict: client_options = client_options_lib.from_dict(client_options) @@ -196,7 +196,7 @@ def __init__( self._gapic_client = CrossSync.GapicClient( credentials=credentials, client_options=client_options, - client_info=client_info, + client_info=self.client_info, transport=lambda *args, **kwargs: TransportType( *args, **kwargs, channel=custom_channel ), @@ -371,6 +371,9 @@ async def _manage_channel( await self._ping_and_warm_instances(channel=new_channel) # cycle channel out of use, with long grace window before closure self.transport._grpc_channel = new_channel + # invalidate caches + self.transport._stubs = {} + self.transport._prep_wrapped_messages(self.client_info) # give old_channel a chance to complete existing rpcs if CrossSync.is_async: await old_channel.close(grace_period) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index 492e86224036..b89e232070b4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -114,8 +114,8 @@ def __init__( """ if "pool_size" in kwargs: warnings.warn("pool_size no longer supported") - client_info = DEFAULT_CLIENT_INFO - client_info.client_library_version = self._client_version() + self.client_info = DEFAULT_CLIENT_INFO + self.client_info.client_library_version = self._client_version() if type(client_options) is dict: client_options = client_options_lib.from_dict(client_options) client_options = cast( @@ -143,7 +143,7 @@ def __init__( self._gapic_client = CrossSync._Sync_Impl.GapicClient( credentials=credentials, client_options=client_options, - client_info=client_info, + client_info=self.client_info, transport=lambda *args, **kwargs: TransportType( *args, **kwargs, channel=custom_channel ), @@ -284,6 +284,8 @@ def _manage_channel( new_channel = self.transport.create_channel() self._ping_and_warm_instances(channel=new_channel) self.transport._grpc_channel = new_channel + self.transport._stubs = {} + self.transport._prep_wrapped_messages(self.client_info) if grace_period: self._is_closed.wait(grace_period) old_channel.close() diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index 5c11c1990bb5..d10c71d78a71 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -207,6 +207,38 @@ async def test_ping_and_warm(self, client, table): assert len(results) == 1 assert results[0] is None + @CrossSync.pytest + async def test_channel_refresh(self, table_id, instance_id, temp_rows): + """ + change grpc channel to refresh after 1 second. Schedule a read_rows call after refresh, + to ensure new channel works + """ + await temp_rows.add_row(b"row_key_1") + await temp_rows.add_row(b"row_key_2") + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + client = CrossSync.DataClient(project=project) + # start custom refresh task + try: + client._channel_refresh_task = CrossSync.create_task( + client._manage_channel, + refresh_interval_min=1, + refresh_interval_max=1, + sync_executor=client._executor, + ) + # let task run + await CrossSync.yield_to_event_loop() + async with client.get_table(instance_id, table_id) as table: + rows = await table.read_rows({}) + first_channel = client.transport.grpc_channel + assert len(rows) == 2 + await CrossSync.sleep(2) + rows_after_refresh = await table.read_rows({}) + assert len(rows_after_refresh) == 2 + assert client.transport.grpc_channel is not first_channel + print(table) + finally: + await client.close() + @CrossSync.pytest @pytest.mark.usefixtures("table") @CrossSync.Retry( diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index cbaaf5a8fb6f..18d65b21c5a0 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -165,6 +165,33 @@ def test_ping_and_warm(self, client, table): assert len(results) == 1 assert results[0] is None + def test_channel_refresh(self, table_id, instance_id, temp_rows): + """change grpc channel to refresh after 1 second. Schedule a read_rows call after refresh, + to ensure new channel works""" + temp_rows.add_row(b"row_key_1") + temp_rows.add_row(b"row_key_2") + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + client = CrossSync._Sync_Impl.DataClient(project=project) + try: + client._channel_refresh_task = CrossSync._Sync_Impl.create_task( + client._manage_channel, + refresh_interval_min=1, + refresh_interval_max=1, + sync_executor=client._executor, + ) + CrossSync._Sync_Impl.yield_to_event_loop() + with client.get_table(instance_id, table_id) as table: + rows = table.read_rows({}) + first_channel = client.transport.grpc_channel + assert len(rows) == 2 + CrossSync._Sync_Impl.sleep(2) + rows_after_refresh = table.read_rows({}) + assert len(rows_after_refresh) == 2 + assert client.transport.grpc_channel is not first_channel + print(table) + finally: + client.close() + @pytest.mark.usefixtures("table") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 From 8463f5f3cdb3094c053a86f146ceae10919ecedb Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 26 Feb 2025 12:49:27 -0800 Subject: [PATCH 850/892] chore(main): release 2.29.0 (#1079) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 12 ++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 17 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index a0a9763a46c6..26729a93f7ec 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.28.1" + ".": "2.29.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index ba398feff37a..75ec4c5acc26 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.29.0](https://github.com/googleapis/python-bigtable/compare/v2.28.1...v2.29.0) (2025-02-26) + + +### Features + +* Add support for array and float32 SQL query params ([#1078](https://github.com/googleapis/python-bigtable/issues/1078)) ([89b8da8](https://github.com/googleapis/python-bigtable/commit/89b8da8a445aeb08854d9fa77cbc0e4fc042c87f)) + + +### Bug Fixes + +* Grpc channel refresh ([#1087](https://github.com/googleapis/python-bigtable/issues/1087)) ([f44b36b](https://github.com/googleapis/python-bigtable/commit/f44b36bf51e3e4e3b8a774f96e682d3f1f8d4b16)) + ## [2.28.1](https://github.com/googleapis/python-bigtable/compare/v2.28.0...v2.28.1) (2025-01-17) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index c0c0d8b11009..07483fa04d24 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.1" # {x-release-please-version} +__version__ = "2.29.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index c0c0d8b11009..07483fa04d24 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.1" # {x-release-please-version} +__version__ = "2.29.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index c0c0d8b11009..07483fa04d24 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.1" # {x-release-please-version} +__version__ = "2.29.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index c0c0d8b11009..07483fa04d24 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.28.1" # {x-release-please-version} +__version__ = "2.29.0" # {x-release-please-version} From d56360c9267b0be61caff3f91ded299551252e39 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 11:44:25 -0500 Subject: [PATCH 851/892] chore(python): conditionally load credentials in .kokoro/build.sh (#1086) Source-Link: https://github.com/googleapis/synthtool/commit/aa69fb74717c8f4c58c60f8cc101d3f4b2c07b09 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f016446d6e520e5fb552c45b110cba3f217bffdd3d06bdddd076e9e6d13266cf Co-authored-by: Owl Bot --- .../.github/.OwlBot.lock.yaml | 4 +- .../google-cloud-bigtable/.kokoro/build.sh | 20 +- .../.kokoro/docker/docs/requirements.in | 1 + .../.kokoro/docker/docs/requirements.txt | 243 +++++++++++++++++- .../.kokoro/publish-docs.sh | 4 - 5 files changed, 251 insertions(+), 21 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 10cf433a8b00..3f7634f25f8e 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:8ff1efe878e18bd82a0fb7b70bb86f77e7ab6901fed394440b6135db0ba8d84a -# created: 2025-01-09T12:01:16.422459506Z + digest: sha256:f016446d6e520e5fb552c45b110cba3f217bffdd3d06bdddd076e9e6d13266cf +# created: 2025-02-21T19:32:52.01306189Z diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh index b00036db318a..d41b45aa1dd0 100755 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ b/packages/google-cloud-bigtable/.kokoro/build.sh @@ -15,11 +15,13 @@ set -eo pipefail +CURRENT_DIR=$(dirname "${BASH_SOURCE[0]}") + if [[ -z "${PROJECT_ROOT:-}" ]]; then - PROJECT_ROOT="github/python-bigtable" + PROJECT_ROOT=$(realpath "${CURRENT_DIR}/..") fi -cd "${PROJECT_ROOT}" +pushd "${PROJECT_ROOT}" # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -28,10 +30,16 @@ export PYTHONUNBUFFERED=1 env | grep KOKORO # Setup service account credentials. -export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +if [[ -f "${KOKORO_GFILE_DIR}/service-account.json" ]] +then + export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +fi # Setup project id. -export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") +if [[ -f "${KOKORO_GFILE_DIR}/project-id.json" ]] +then + export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") +fi # If this is a continuous build, send the test log to the FlakyBot. # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. @@ -46,7 +54,7 @@ fi # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then - python3 -m nox -s ${NOX_SESSION:-} + python3 -m nox -s ${NOX_SESSION:-} else - python3 -m nox + python3 -m nox fi diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in index 816817c672a1..586bd07037ae 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in @@ -1 +1,2 @@ nox +gcp-docuploader diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt index f99a5c4aac7f..a9360a25b707 100644 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt @@ -2,16 +2,124 @@ # This file is autogenerated by pip-compile with Python 3.10 # by the following command: # -# pip-compile --allow-unsafe --generate-hashes synthtool/gcp/templates/python_library/.kokoro/docker/docs/requirements.in +# pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==3.5.2 \ - --hash=sha256:036d020d79048a5d525bc63880d7a4b8d1668566b8a76daf1144c0bbe0f63472 \ - --hash=sha256:23146ed7ac4403b70bd6026402468942ceba34a6732255b9edf5b7354f68a6bb +argcomplete==3.5.3 \ + --hash=sha256:2ab2c4a215c59fd6caaff41a869480a23e8f6a5f910b266c1808037f4e375b61 \ + --hash=sha256:c12bf50eded8aebb298c7b7da7a5ff3ee24dffd9f5281867dfe1424b58c55392 # via nox +cachetools==5.5.0 \ + --hash=sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 \ + --hash=sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a + # via google-auth +certifi==2024.12.14 \ + --hash=sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 \ + --hash=sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db + # via requests +charset-normalizer==3.4.1 \ + --hash=sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537 \ + --hash=sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa \ + --hash=sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a \ + --hash=sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294 \ + --hash=sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b \ + --hash=sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd \ + --hash=sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601 \ + --hash=sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd \ + --hash=sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4 \ + --hash=sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d \ + --hash=sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2 \ + --hash=sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313 \ + --hash=sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd \ + --hash=sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa \ + --hash=sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8 \ + --hash=sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1 \ + --hash=sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2 \ + --hash=sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496 \ + --hash=sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d \ + --hash=sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b \ + --hash=sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e \ + --hash=sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a \ + --hash=sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4 \ + --hash=sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca \ + --hash=sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78 \ + --hash=sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408 \ + --hash=sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5 \ + --hash=sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3 \ + --hash=sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f \ + --hash=sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a \ + --hash=sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765 \ + --hash=sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6 \ + --hash=sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146 \ + --hash=sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6 \ + --hash=sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9 \ + --hash=sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd \ + --hash=sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c \ + --hash=sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f \ + --hash=sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545 \ + --hash=sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176 \ + --hash=sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770 \ + --hash=sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824 \ + --hash=sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f \ + --hash=sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf \ + --hash=sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487 \ + --hash=sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d \ + --hash=sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd \ + --hash=sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b \ + --hash=sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534 \ + --hash=sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f \ + --hash=sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b \ + --hash=sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9 \ + --hash=sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd \ + --hash=sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125 \ + --hash=sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9 \ + --hash=sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de \ + --hash=sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11 \ + --hash=sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d \ + --hash=sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35 \ + --hash=sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f \ + --hash=sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda \ + --hash=sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7 \ + --hash=sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a \ + --hash=sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971 \ + --hash=sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8 \ + --hash=sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41 \ + --hash=sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d \ + --hash=sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f \ + --hash=sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757 \ + --hash=sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a \ + --hash=sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886 \ + --hash=sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77 \ + --hash=sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76 \ + --hash=sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247 \ + --hash=sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85 \ + --hash=sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb \ + --hash=sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7 \ + --hash=sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e \ + --hash=sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6 \ + --hash=sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037 \ + --hash=sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1 \ + --hash=sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e \ + --hash=sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807 \ + --hash=sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407 \ + --hash=sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c \ + --hash=sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12 \ + --hash=sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3 \ + --hash=sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089 \ + --hash=sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd \ + --hash=sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e \ + --hash=sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00 \ + --hash=sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616 + # via requests +click==8.1.8 \ + --hash=sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2 \ + --hash=sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a + # via gcp-docuploader colorlog==6.9.0 \ --hash=sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff \ --hash=sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2 - # via nox + # via + # gcp-docuploader + # nox distlib==0.3.9 \ --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ --hash=sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403 @@ -20,10 +128,78 @@ filelock==3.16.1 \ --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 # via virtualenv +gcp-docuploader==0.6.5 \ + --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ + --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea + # via -r requirements.in +google-api-core==2.24.0 \ + --hash=sha256:10d82ac0fca69c82a25b3efdeefccf6f28e02ebb97925a8cce8edbfe379929d9 \ + --hash=sha256:e255640547a597a4da010876d333208ddac417d60add22b6851a0c66a831fcaf + # via + # google-cloud-core + # google-cloud-storage +google-auth==2.37.0 \ + --hash=sha256:0054623abf1f9c83492c63d3f47e77f0a544caa3d40b2d98e099a611c2dd5d00 \ + --hash=sha256:42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0 + # via + # google-api-core + # google-cloud-core + # google-cloud-storage +google-cloud-core==2.4.1 \ + --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \ + --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61 + # via google-cloud-storage +google-cloud-storage==2.19.0 \ + --hash=sha256:aeb971b5c29cf8ab98445082cbfe7b161a1f48ed275822f59ed3f1524ea54fba \ + --hash=sha256:cd05e9e7191ba6cb68934d8eb76054d9be4562aa89dbc4236feee4d7d51342b2 + # via gcp-docuploader +google-crc32c==1.6.0 \ + --hash=sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24 \ + --hash=sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d \ + --hash=sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e \ + --hash=sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57 \ + --hash=sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2 \ + --hash=sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8 \ + --hash=sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc \ + --hash=sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42 \ + --hash=sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f \ + --hash=sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa \ + --hash=sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b \ + --hash=sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc \ + --hash=sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760 \ + --hash=sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d \ + --hash=sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7 \ + --hash=sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d \ + --hash=sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0 \ + --hash=sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3 \ + --hash=sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3 \ + --hash=sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00 \ + --hash=sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871 \ + --hash=sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c \ + --hash=sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9 \ + --hash=sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205 \ + --hash=sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc \ + --hash=sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d \ + --hash=sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4 + # via + # google-cloud-storage + # google-resumable-media +google-resumable-media==2.7.2 \ + --hash=sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa \ + --hash=sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0 + # via google-cloud-storage +googleapis-common-protos==1.66.0 \ + --hash=sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c \ + --hash=sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed + # via google-api-core +idna==3.10 \ + --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ + --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 + # via requests nox==2024.10.9 \ --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 - # via -r synthtool/gcp/templates/python_library/.kokoro/docker/docs/requirements.in + # via -r requirements.in packaging==24.2 \ --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f @@ -32,6 +208,51 @@ platformdirs==4.3.6 \ --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb # via virtualenv +proto-plus==1.25.0 \ + --hash=sha256:c91fc4a65074ade8e458e95ef8bac34d4008daa7cce4a12d6707066fca648961 \ + --hash=sha256:fbb17f57f7bd05a68b7707e745e26528b0b3c34e378db91eef93912c54982d91 + # via google-api-core +protobuf==5.29.3 \ + --hash=sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f \ + --hash=sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7 \ + --hash=sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888 \ + --hash=sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620 \ + --hash=sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da \ + --hash=sha256:84a57163a0ccef3f96e4b6a20516cedcf5bb3a95a657131c5c3ac62200d23252 \ + --hash=sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a \ + --hash=sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e \ + --hash=sha256:b89c115d877892a512f79a8114564fb435943b59067615894c3b13cd3e1fa107 \ + --hash=sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f \ + --hash=sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84 + # via + # gcp-docuploader + # google-api-core + # googleapis-common-protos + # proto-plus +pyasn1==0.6.1 \ + --hash=sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629 \ + --hash=sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 \ + --hash=sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd \ + --hash=sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c + # via google-auth +requests==2.32.3 \ + --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ + --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 + # via + # google-api-core + # google-cloud-storage +rsa==4.9 \ + --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ + --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 + # via google-auth +six==1.17.0 \ + --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ + --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 + # via gcp-docuploader tomli==2.2.1 \ --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \ --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \ @@ -66,7 +287,11 @@ tomli==2.2.1 \ --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \ --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7 # via nox -virtualenv==20.28.0 \ - --hash=sha256:23eae1b4516ecd610481eda647f3a7c09aea295055337331bb4e6892ecce47b0 \ - --hash=sha256:2c9c3262bb8e7b87ea801d715fae4495e6032450c71d2309be9550e7364049aa +urllib3==2.3.0 \ + --hash=sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df \ + --hash=sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d + # via requests +virtualenv==20.28.1 \ + --hash=sha256:412773c85d4dab0409b83ec36f7a6499e72eaf08c80e81e9576bca61831c71cb \ + --hash=sha256:5d34ab240fdb5d21549b76f9e8ff3af28252f5499fb6d6f031adac4e5a8c5329 # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh index 233205d580e9..4ed4aaf1346f 100755 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh @@ -20,10 +20,6 @@ export PYTHONUNBUFFERED=1 export PATH="${HOME}/.local/bin:${PATH}" -# Install nox -python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt -python3.10 -m nox --version - # build docs nox -s docs From b45592ee0fef4500f989086ec95d49d5bf95b28e Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Mon, 10 Mar 2025 11:01:57 -0400 Subject: [PATCH 852/892] chore: remove unused files (#1091) --- .../.github/.OwlBot.lock.yaml | 4 +- .../.kokoro/docker/docs/Dockerfile | 89 --- .../.kokoro/docker/docs/fetch_gpg_keys.sh | 45 -- .../.kokoro/docker/docs/requirements.in | 2 - .../.kokoro/docker/docs/requirements.txt | 297 ---------- .../.kokoro/docs/common.cfg | 66 --- .../.kokoro/docs/docs-presubmit.cfg | 28 - .../.kokoro/docs/docs.cfg | 1 - .../.kokoro/publish-docs.sh | 58 -- .../google-cloud-bigtable/.kokoro/release.sh | 29 - .../.kokoro/release/common.cfg | 43 -- .../.kokoro/release/release.cfg | 1 - .../.kokoro/requirements.in | 11 - .../.kokoro/requirements.txt | 509 ------------------ 14 files changed, 2 insertions(+), 1181 deletions(-) delete mode 100644 packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile delete mode 100755 packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh delete mode 100644 packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in delete mode 100644 packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt delete mode 100644 packages/google-cloud-bigtable/.kokoro/docs/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/docs/docs.cfg delete mode 100755 packages/google-cloud-bigtable/.kokoro/publish-docs.sh delete mode 100755 packages/google-cloud-bigtable/.kokoro/release.sh delete mode 100644 packages/google-cloud-bigtable/.kokoro/release/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/release/release.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/requirements.in delete mode 100644 packages/google-cloud-bigtable/.kokoro/requirements.txt diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index 3f7634f25f8e..c631e1f7d7e9 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:f016446d6e520e5fb552c45b110cba3f217bffdd3d06bdddd076e9e6d13266cf -# created: 2025-02-21T19:32:52.01306189Z + digest: sha256:5581906b957284864632cde4e9c51d1cc66b0094990b27e689132fe5cd036046 +# created: 2025-03-05 diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile b/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile deleted file mode 100644 index e5410e296bd8..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/Dockerfile +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ubuntu:24.04 - -ENV DEBIAN_FRONTEND noninteractive - -# Ensure local Python is preferred over distribution Python. -ENV PATH /usr/local/bin:$PATH - -# Install dependencies. -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - apt-transport-https \ - build-essential \ - ca-certificates \ - curl \ - dirmngr \ - git \ - gpg-agent \ - graphviz \ - libbz2-dev \ - libdb5.3-dev \ - libexpat1-dev \ - libffi-dev \ - liblzma-dev \ - libreadline-dev \ - libsnappy-dev \ - libssl-dev \ - libsqlite3-dev \ - portaudio19-dev \ - redis-server \ - software-properties-common \ - ssh \ - sudo \ - tcl \ - tcl-dev \ - tk \ - tk-dev \ - uuid-dev \ - wget \ - zlib1g-dev \ - && add-apt-repository universe \ - && apt-get update \ - && apt-get -y install jq \ - && apt-get clean autoclean \ - && apt-get autoremove -y \ - && rm -rf /var/lib/apt/lists/* \ - && rm -f /var/cache/apt/archives/*.deb - - -###################### Install python 3.10.14 for docs/docfx session - -# Download python 3.10.14 -RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz - -# Extract files -RUN tar -xvf Python-3.10.14.tgz - -# Install python 3.10.14 -RUN ./Python-3.10.14/configure --enable-optimizations -RUN make altinstall - -ENV PATH /usr/local/bin/python3.10:$PATH - -###################### Install pip -RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3.10 /tmp/get-pip.py \ - && rm /tmp/get-pip.py - -# Test pip -RUN python3.10 -m pip - -# Install build requirements -COPY requirements.txt /requirements.txt -RUN python3.10 -m pip install --require-hashes -r requirements.txt - -CMD ["python3.10"] diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh b/packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh deleted file mode 100755 index d653dd868e4b..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/fetch_gpg_keys.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A script to fetch gpg keys with retry. -# Avoid jinja parsing the file. -# - -function retry { - if [[ "${#}" -le 1 ]]; then - echo "Usage: ${0} retry_count commands.." - exit 1 - fi - local retries=${1} - local command="${@:2}" - until [[ "${retries}" -le 0 ]]; do - $command && return 0 - if [[ $? -ne 0 ]]; then - echo "command failed, retrying" - ((retries--)) - fi - done - return 1 -} - -# 3.6.9, 3.7.5 (Ned Deily) -retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ - 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D - -# 3.8.0 (Łukasz Langa) -retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ - E3FF2839C048B25C084DEBE9B26995E310250568 - -# diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in deleted file mode 100644 index 586bd07037ae..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.in +++ /dev/null @@ -1,2 +0,0 @@ -nox -gcp-docuploader diff --git a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt b/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt deleted file mode 100644 index a9360a25b707..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/docker/docs/requirements.txt +++ /dev/null @@ -1,297 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --allow-unsafe --generate-hashes requirements.in -# -argcomplete==3.5.3 \ - --hash=sha256:2ab2c4a215c59fd6caaff41a869480a23e8f6a5f910b266c1808037f4e375b61 \ - --hash=sha256:c12bf50eded8aebb298c7b7da7a5ff3ee24dffd9f5281867dfe1424b58c55392 - # via nox -cachetools==5.5.0 \ - --hash=sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 \ - --hash=sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a - # via google-auth -certifi==2024.12.14 \ - --hash=sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56 \ - --hash=sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db - # via requests -charset-normalizer==3.4.1 \ - --hash=sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537 \ - --hash=sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa \ - --hash=sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a \ - --hash=sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294 \ - --hash=sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b \ - --hash=sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd \ - --hash=sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601 \ - --hash=sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd \ - --hash=sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4 \ - --hash=sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d \ - --hash=sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2 \ - --hash=sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313 \ - --hash=sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd \ - --hash=sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa \ - --hash=sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8 \ - --hash=sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1 \ - --hash=sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2 \ - --hash=sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496 \ - --hash=sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d \ - --hash=sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b \ - --hash=sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e \ - --hash=sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a \ - --hash=sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4 \ - --hash=sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca \ - --hash=sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78 \ - --hash=sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408 \ - --hash=sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5 \ - --hash=sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3 \ - --hash=sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f \ - --hash=sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a \ - --hash=sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765 \ - --hash=sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6 \ - --hash=sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146 \ - --hash=sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6 \ - --hash=sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9 \ - --hash=sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd \ - --hash=sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c \ - --hash=sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f \ - --hash=sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545 \ - --hash=sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176 \ - --hash=sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770 \ - --hash=sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824 \ - --hash=sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f \ - --hash=sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf \ - --hash=sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487 \ - --hash=sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d \ - --hash=sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd \ - --hash=sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b \ - --hash=sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534 \ - --hash=sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f \ - --hash=sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b \ - --hash=sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9 \ - --hash=sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd \ - --hash=sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125 \ - --hash=sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9 \ - --hash=sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de \ - --hash=sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11 \ - --hash=sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d \ - --hash=sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35 \ - --hash=sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f \ - --hash=sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda \ - --hash=sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7 \ - --hash=sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a \ - --hash=sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971 \ - --hash=sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8 \ - --hash=sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41 \ - --hash=sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d \ - --hash=sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f \ - --hash=sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757 \ - --hash=sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a \ - --hash=sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886 \ - --hash=sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77 \ - --hash=sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76 \ - --hash=sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247 \ - --hash=sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85 \ - --hash=sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb \ - --hash=sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7 \ - --hash=sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e \ - --hash=sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6 \ - --hash=sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037 \ - --hash=sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1 \ - --hash=sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e \ - --hash=sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807 \ - --hash=sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407 \ - --hash=sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c \ - --hash=sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12 \ - --hash=sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3 \ - --hash=sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089 \ - --hash=sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd \ - --hash=sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e \ - --hash=sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00 \ - --hash=sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616 - # via requests -click==8.1.8 \ - --hash=sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2 \ - --hash=sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a - # via gcp-docuploader -colorlog==6.9.0 \ - --hash=sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff \ - --hash=sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2 - # via - # gcp-docuploader - # nox -distlib==0.3.9 \ - --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ - --hash=sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403 - # via virtualenv -filelock==3.16.1 \ - --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ - --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 - # via virtualenv -gcp-docuploader==0.6.5 \ - --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ - --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea - # via -r requirements.in -google-api-core==2.24.0 \ - --hash=sha256:10d82ac0fca69c82a25b3efdeefccf6f28e02ebb97925a8cce8edbfe379929d9 \ - --hash=sha256:e255640547a597a4da010876d333208ddac417d60add22b6851a0c66a831fcaf - # via - # google-cloud-core - # google-cloud-storage -google-auth==2.37.0 \ - --hash=sha256:0054623abf1f9c83492c63d3f47e77f0a544caa3d40b2d98e099a611c2dd5d00 \ - --hash=sha256:42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0 - # via - # google-api-core - # google-cloud-core - # google-cloud-storage -google-cloud-core==2.4.1 \ - --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \ - --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61 - # via google-cloud-storage -google-cloud-storage==2.19.0 \ - --hash=sha256:aeb971b5c29cf8ab98445082cbfe7b161a1f48ed275822f59ed3f1524ea54fba \ - --hash=sha256:cd05e9e7191ba6cb68934d8eb76054d9be4562aa89dbc4236feee4d7d51342b2 - # via gcp-docuploader -google-crc32c==1.6.0 \ - --hash=sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24 \ - --hash=sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d \ - --hash=sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e \ - --hash=sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57 \ - --hash=sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2 \ - --hash=sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8 \ - --hash=sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc \ - --hash=sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42 \ - --hash=sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f \ - --hash=sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa \ - --hash=sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b \ - --hash=sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc \ - --hash=sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760 \ - --hash=sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d \ - --hash=sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7 \ - --hash=sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d \ - --hash=sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0 \ - --hash=sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3 \ - --hash=sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3 \ - --hash=sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00 \ - --hash=sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871 \ - --hash=sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c \ - --hash=sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9 \ - --hash=sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205 \ - --hash=sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc \ - --hash=sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d \ - --hash=sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4 - # via - # google-cloud-storage - # google-resumable-media -google-resumable-media==2.7.2 \ - --hash=sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa \ - --hash=sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0 - # via google-cloud-storage -googleapis-common-protos==1.66.0 \ - --hash=sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c \ - --hash=sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed - # via google-api-core -idna==3.10 \ - --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ - --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 - # via requests -nox==2024.10.9 \ - --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ - --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 - # via -r requirements.in -packaging==24.2 \ - --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ - --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f - # via nox -platformdirs==4.3.6 \ - --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ - --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb - # via virtualenv -proto-plus==1.25.0 \ - --hash=sha256:c91fc4a65074ade8e458e95ef8bac34d4008daa7cce4a12d6707066fca648961 \ - --hash=sha256:fbb17f57f7bd05a68b7707e745e26528b0b3c34e378db91eef93912c54982d91 - # via google-api-core -protobuf==5.29.3 \ - --hash=sha256:0a18ed4a24198528f2333802eb075e59dea9d679ab7a6c5efb017a59004d849f \ - --hash=sha256:0eb32bfa5219fc8d4111803e9a690658aa2e6366384fd0851064b963b6d1f2a7 \ - --hash=sha256:3ea51771449e1035f26069c4c7fd51fba990d07bc55ba80701c78f886bf9c888 \ - --hash=sha256:5da0f41edaf117bde316404bad1a486cb4ededf8e4a54891296f648e8e076620 \ - --hash=sha256:6ce8cc3389a20693bfde6c6562e03474c40851b44975c9b2bf6df7d8c4f864da \ - --hash=sha256:84a57163a0ccef3f96e4b6a20516cedcf5bb3a95a657131c5c3ac62200d23252 \ - --hash=sha256:a4fa6f80816a9a0678429e84973f2f98cbc218cca434abe8db2ad0bffc98503a \ - --hash=sha256:a8434404bbf139aa9e1300dbf989667a83d42ddda9153d8ab76e0d5dcaca484e \ - --hash=sha256:b89c115d877892a512f79a8114564fb435943b59067615894c3b13cd3e1fa107 \ - --hash=sha256:c027e08a08be10b67c06bf2370b99c811c466398c357e615ca88c91c07f0910f \ - --hash=sha256:daaf63f70f25e8689c072cfad4334ca0ac1d1e05a92fc15c54eb9cf23c3efd84 - # via - # gcp-docuploader - # google-api-core - # googleapis-common-protos - # proto-plus -pyasn1==0.6.1 \ - --hash=sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629 \ - --hash=sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034 - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.4.1 \ - --hash=sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd \ - --hash=sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c - # via google-auth -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 - # via - # google-api-core - # google-cloud-storage -rsa==4.9 \ - --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ - --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 - # via google-auth -six==1.17.0 \ - --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ - --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 - # via gcp-docuploader -tomli==2.2.1 \ - --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \ - --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \ - --hash=sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c \ - --hash=sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b \ - --hash=sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8 \ - --hash=sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6 \ - --hash=sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77 \ - --hash=sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff \ - --hash=sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea \ - --hash=sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192 \ - --hash=sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249 \ - --hash=sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee \ - --hash=sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4 \ - --hash=sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98 \ - --hash=sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8 \ - --hash=sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4 \ - --hash=sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281 \ - --hash=sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744 \ - --hash=sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69 \ - --hash=sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13 \ - --hash=sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140 \ - --hash=sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e \ - --hash=sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e \ - --hash=sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc \ - --hash=sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff \ - --hash=sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec \ - --hash=sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2 \ - --hash=sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222 \ - --hash=sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106 \ - --hash=sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272 \ - --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \ - --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7 - # via nox -urllib3==2.3.0 \ - --hash=sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df \ - --hash=sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d - # via requests -virtualenv==20.28.1 \ - --hash=sha256:412773c85d4dab0409b83ec36f7a6499e72eaf08c80e81e9576bca61831c71cb \ - --hash=sha256:5d34ab240fdb5d21549b76f9e8ff3af28252f5499fb6d6f031adac4e5a8c5329 - # via nox diff --git a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg b/packages/google-cloud-bigtable/.kokoro/docs/common.cfg deleted file mode 100644 index 5646c98aaa6f..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/docs/common.cfg +++ /dev/null @@ -1,66 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/publish-docs.sh" -} - -env_vars: { - key: "STAGING_BUCKET" - value: "docs-staging" -} - -env_vars: { - key: "V2_STAGING_BUCKET" - # Push google cloud library docs to the Cloud RAD bucket `docs-staging-v2` - value: "docs-staging-v2" -} - -# It will upload the docker image after successful builds. -env_vars: { - key: "TRAMPOLINE_IMAGE_UPLOAD" - value: "true" -} - -# It will always build the docker image. -env_vars: { - key: "TRAMPOLINE_DOCKERFILE" - value: ".kokoro/docker/docs/Dockerfile" -} - -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "docuploader_service_account" - } - } -} diff --git a/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg deleted file mode 100644 index 001770ea6f12..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/docs/docs-presubmit.cfg +++ /dev/null @@ -1,28 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "STAGING_BUCKET" - value: "gcloud-python-test" -} - -env_vars: { - key: "V2_STAGING_BUCKET" - value: "gcloud-python-test" -} - -# We only upload the image in the main `docs` build. -env_vars: { - key: "TRAMPOLINE_IMAGE_UPLOAD" - value: "false" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/build.sh" -} - -# Only run this nox session. -env_vars: { - key: "NOX_SESSION" - value: "docs docfx" -} diff --git a/packages/google-cloud-bigtable/.kokoro/docs/docs.cfg b/packages/google-cloud-bigtable/.kokoro/docs/docs.cfg deleted file mode 100644 index 8f43917d92fe..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/docs/docs.cfg +++ /dev/null @@ -1 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh b/packages/google-cloud-bigtable/.kokoro/publish-docs.sh deleted file mode 100755 index 4ed4aaf1346f..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/publish-docs.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -export PATH="${HOME}/.local/bin:${PATH}" - -# build docs -nox -s docs - -# create metadata -python3.10 -m docuploader create-metadata \ - --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ - --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ - --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ - --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ - --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) - -cat docs.metadata - -# upload docs -python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" - - -# docfx yaml files -nox -s docfx - -# create metadata. -python3.10 -m docuploader create-metadata \ - --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ - --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ - --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ - --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ - --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) - -cat docs.metadata - -# upload docs -python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/packages/google-cloud-bigtable/.kokoro/release.sh b/packages/google-cloud-bigtable/.kokoro/release.sh deleted file mode 100755 index 4f0d14588cba..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/release.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -# Start the releasetool reporter -python3 -m pip install --require-hashes -r github/python-bigtable/.kokoro/requirements.txt -python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-3") -cd github/python-bigtable -python3 setup.py sdist bdist_wheel -twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/packages/google-cloud-bigtable/.kokoro/release/common.cfg b/packages/google-cloud-bigtable/.kokoro/release/common.cfg deleted file mode 100644 index 6b4c17d34467..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/release/common.cfg +++ /dev/null @@ -1,43 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/release.sh" -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google-cloud-pypi-token-keystore-3" - } - } -} - -# Store the packages we uploaded to PyPI. That way, we have a record of exactly -# what we published, which we can use to generate SBOMs and attestations. -action { - define_artifacts { - regex: "github/python-bigtable/**/*.tar.gz" - strip_prefix: "github/python-bigtable" - } -} diff --git a/packages/google-cloud-bigtable/.kokoro/release/release.cfg b/packages/google-cloud-bigtable/.kokoro/release/release.cfg deleted file mode 100644 index 8f43917d92fe..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/release/release.cfg +++ /dev/null @@ -1 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.in b/packages/google-cloud-bigtable/.kokoro/requirements.in deleted file mode 100644 index fff4d9ce0d0a..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/requirements.in +++ /dev/null @@ -1,11 +0,0 @@ -gcp-docuploader -gcp-releasetool>=2 # required for compatibility with cryptography>=42.x -importlib-metadata -typing-extensions -twine -wheel -setuptools -nox>=2022.11.21 # required to remove dependency on py -charset-normalizer<3 -click<8.1.0 -cryptography>=42.0.5 diff --git a/packages/google-cloud-bigtable/.kokoro/requirements.txt b/packages/google-cloud-bigtable/.kokoro/requirements.txt deleted file mode 100644 index 006d8ef931bf..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/requirements.txt +++ /dev/null @@ -1,509 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --allow-unsafe --generate-hashes requirements.in -# -argcomplete==3.5.1 \ - --hash=sha256:1a1d148bdaa3e3b93454900163403df41448a248af01b6e849edc5ac08e6c363 \ - --hash=sha256:eb1ee355aa2557bd3d0145de7b06b2a45b0ce461e1e7813f5d066039ab4177b4 - # via nox -attrs==24.2.0 \ - --hash=sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346 \ - --hash=sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2 - # via gcp-releasetool -backports-tarfile==1.2.0 \ - --hash=sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34 \ - --hash=sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991 - # via jaraco-context -cachetools==5.5.0 \ - --hash=sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292 \ - --hash=sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a - # via google-auth -certifi==2024.8.30 \ - --hash=sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8 \ - --hash=sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9 - # via requests -cffi==1.17.1 \ - --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \ - --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \ - --hash=sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1 \ - --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \ - --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \ - --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \ - --hash=sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8 \ - --hash=sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36 \ - --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \ - --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \ - --hash=sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc \ - --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \ - --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \ - --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \ - --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \ - --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \ - --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \ - --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \ - --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \ - --hash=sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b \ - --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \ - --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \ - --hash=sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c \ - --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \ - --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \ - --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \ - --hash=sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8 \ - --hash=sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1 \ - --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \ - --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \ - --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \ - --hash=sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595 \ - --hash=sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0 \ - --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \ - --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \ - --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \ - --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \ - --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \ - --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \ - --hash=sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16 \ - --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \ - --hash=sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e \ - --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \ - --hash=sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964 \ - --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \ - --hash=sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576 \ - --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \ - --hash=sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3 \ - --hash=sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662 \ - --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \ - --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \ - --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \ - --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \ - --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \ - --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \ - --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \ - --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \ - --hash=sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9 \ - --hash=sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7 \ - --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \ - --hash=sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a \ - --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \ - --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \ - --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \ - --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \ - --hash=sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87 \ - --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b - # via cryptography -charset-normalizer==2.1.1 \ - --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ - --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f - # via - # -r requirements.in - # requests -click==8.0.4 \ - --hash=sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1 \ - --hash=sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb - # via - # -r requirements.in - # gcp-docuploader - # gcp-releasetool -colorlog==6.8.2 \ - --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ - --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 - # via - # gcp-docuploader - # nox -cryptography==43.0.1 \ - --hash=sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494 \ - --hash=sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806 \ - --hash=sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d \ - --hash=sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062 \ - --hash=sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2 \ - --hash=sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4 \ - --hash=sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1 \ - --hash=sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85 \ - --hash=sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84 \ - --hash=sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042 \ - --hash=sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d \ - --hash=sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962 \ - --hash=sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2 \ - --hash=sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa \ - --hash=sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d \ - --hash=sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365 \ - --hash=sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96 \ - --hash=sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47 \ - --hash=sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d \ - --hash=sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d \ - --hash=sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c \ - --hash=sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb \ - --hash=sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277 \ - --hash=sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172 \ - --hash=sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034 \ - --hash=sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a \ - --hash=sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289 - # via - # -r requirements.in - # gcp-releasetool - # secretstorage -distlib==0.3.9 \ - --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ - --hash=sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403 - # via virtualenv -docutils==0.21.2 \ - --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ - --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 - # via readme-renderer -filelock==3.16.1 \ - --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ - --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 - # via virtualenv -gcp-docuploader==0.6.5 \ - --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ - --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea - # via -r requirements.in -gcp-releasetool==2.1.1 \ - --hash=sha256:25639269f4eae510094f9dbed9894977e1966933211eb155a451deebc3fc0b30 \ - --hash=sha256:845f4ded3d9bfe8cc7fdaad789e83f4ea014affa77785259a7ddac4b243e099e - # via -r requirements.in -google-api-core==2.21.0 \ - --hash=sha256:4a152fd11a9f774ea606388d423b68aa7e6d6a0ffe4c8266f74979613ec09f81 \ - --hash=sha256:6869eacb2a37720380ba5898312af79a4d30b8bca1548fb4093e0697dc4bdf5d - # via - # google-cloud-core - # google-cloud-storage -google-auth==2.35.0 \ - --hash=sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f \ - --hash=sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a - # via - # gcp-releasetool - # google-api-core - # google-cloud-core - # google-cloud-storage -google-cloud-core==2.4.1 \ - --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \ - --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61 - # via google-cloud-storage -google-cloud-storage==2.18.2 \ - --hash=sha256:97a4d45c368b7d401ed48c4fdfe86e1e1cb96401c9e199e419d289e2c0370166 \ - --hash=sha256:aaf7acd70cdad9f274d29332673fcab98708d0e1f4dceb5a5356aaef06af4d99 - # via gcp-docuploader -google-crc32c==1.6.0 \ - --hash=sha256:05e2d8c9a2f853ff116db9706b4a27350587f341eda835f46db3c0a8c8ce2f24 \ - --hash=sha256:18e311c64008f1f1379158158bb3f0c8d72635b9eb4f9545f8cf990c5668e59d \ - --hash=sha256:236c87a46cdf06384f614e9092b82c05f81bd34b80248021f729396a78e55d7e \ - --hash=sha256:35834855408429cecf495cac67ccbab802de269e948e27478b1e47dfb6465e57 \ - --hash=sha256:386122eeaaa76951a8196310432c5b0ef3b53590ef4c317ec7588ec554fec5d2 \ - --hash=sha256:40b05ab32a5067525670880eb5d169529089a26fe35dce8891127aeddc1950e8 \ - --hash=sha256:48abd62ca76a2cbe034542ed1b6aee851b6f28aaca4e6551b5599b6f3ef175cc \ - --hash=sha256:50cf2a96da226dcbff8671233ecf37bf6e95de98b2a2ebadbfdf455e6d05df42 \ - --hash=sha256:51c4f54dd8c6dfeb58d1df5e4f7f97df8abf17a36626a217f169893d1d7f3e9f \ - --hash=sha256:5bcc90b34df28a4b38653c36bb5ada35671ad105c99cfe915fb5bed7ad6924aa \ - --hash=sha256:62f6d4a29fea082ac4a3c9be5e415218255cf11684ac6ef5488eea0c9132689b \ - --hash=sha256:6eceb6ad197656a1ff49ebfbbfa870678c75be4344feb35ac1edf694309413dc \ - --hash=sha256:7aec8e88a3583515f9e0957fe4f5f6d8d4997e36d0f61624e70469771584c760 \ - --hash=sha256:91ca8145b060679ec9176e6de4f89b07363d6805bd4760631ef254905503598d \ - --hash=sha256:a184243544811e4a50d345838a883733461e67578959ac59964e43cca2c791e7 \ - --hash=sha256:a9e4b426c3702f3cd23b933436487eb34e01e00327fac20c9aebb68ccf34117d \ - --hash=sha256:bb0966e1c50d0ef5bc743312cc730b533491d60585a9a08f897274e57c3f70e0 \ - --hash=sha256:bb8b3c75bd157010459b15222c3fd30577042a7060e29d42dabce449c087f2b3 \ - --hash=sha256:bd5e7d2445d1a958c266bfa5d04c39932dc54093fa391736dbfdb0f1929c1fb3 \ - --hash=sha256:c87d98c7c4a69066fd31701c4e10d178a648c2cac3452e62c6b24dc51f9fcc00 \ - --hash=sha256:d2952396dc604544ea7476b33fe87faedc24d666fb0c2d5ac971a2b9576ab871 \ - --hash=sha256:d8797406499f28b5ef791f339594b0b5fdedf54e203b5066675c406ba69d705c \ - --hash=sha256:d9e9913f7bd69e093b81da4535ce27af842e7bf371cde42d1ae9e9bd382dc0e9 \ - --hash=sha256:e2806553238cd076f0a55bddab37a532b53580e699ed8e5606d0de1f856b5205 \ - --hash=sha256:ebab974b1687509e5c973b5c4b8b146683e101e102e17a86bd196ecaa4d099fc \ - --hash=sha256:ed767bf4ba90104c1216b68111613f0d5926fb3780660ea1198fc469af410e9d \ - --hash=sha256:f7a1fc29803712f80879b0806cb83ab24ce62fc8daf0569f2204a0cfd7f68ed4 - # via - # google-cloud-storage - # google-resumable-media -google-resumable-media==2.7.2 \ - --hash=sha256:3ce7551e9fe6d99e9a126101d2536612bb73486721951e9562fee0f90c6ababa \ - --hash=sha256:5280aed4629f2b60b847b0d42f9857fd4935c11af266744df33d8074cae92fe0 - # via google-cloud-storage -googleapis-common-protos==1.65.0 \ - --hash=sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63 \ - --hash=sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0 - # via google-api-core -idna==3.10 \ - --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \ - --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3 - # via requests -importlib-metadata==8.5.0 \ - --hash=sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b \ - --hash=sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7 - # via - # -r requirements.in - # keyring - # twine -jaraco-classes==3.4.0 \ - --hash=sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd \ - --hash=sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790 - # via keyring -jaraco-context==6.0.1 \ - --hash=sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3 \ - --hash=sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4 - # via keyring -jaraco-functools==4.1.0 \ - --hash=sha256:70f7e0e2ae076498e212562325e805204fc092d7b4c17e0e86c959e249701a9d \ - --hash=sha256:ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649 - # via keyring -jeepney==0.8.0 \ - --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ - --hash=sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755 - # via - # keyring - # secretstorage -jinja2==3.1.4 \ - --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ - --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d - # via gcp-releasetool -keyring==25.4.1 \ - --hash=sha256:5426f817cf7f6f007ba5ec722b1bcad95a75b27d780343772ad76b17cb47b0bf \ - --hash=sha256:b07ebc55f3e8ed86ac81dd31ef14e81ace9dd9c3d4b5d77a6e9a2016d0d71a1b - # via - # gcp-releasetool - # twine -markdown-it-py==3.0.0 \ - --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ - --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb - # via rich -markupsafe==3.0.1 \ - --hash=sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396 \ - --hash=sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38 \ - --hash=sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a \ - --hash=sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8 \ - --hash=sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b \ - --hash=sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad \ - --hash=sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a \ - --hash=sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a \ - --hash=sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da \ - --hash=sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6 \ - --hash=sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8 \ - --hash=sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344 \ - --hash=sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a \ - --hash=sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8 \ - --hash=sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5 \ - --hash=sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7 \ - --hash=sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170 \ - --hash=sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132 \ - --hash=sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9 \ - --hash=sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd \ - --hash=sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9 \ - --hash=sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346 \ - --hash=sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc \ - --hash=sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589 \ - --hash=sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5 \ - --hash=sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915 \ - --hash=sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295 \ - --hash=sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453 \ - --hash=sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea \ - --hash=sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b \ - --hash=sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d \ - --hash=sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b \ - --hash=sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4 \ - --hash=sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b \ - --hash=sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7 \ - --hash=sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf \ - --hash=sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f \ - --hash=sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91 \ - --hash=sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd \ - --hash=sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50 \ - --hash=sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b \ - --hash=sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583 \ - --hash=sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a \ - --hash=sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984 \ - --hash=sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c \ - --hash=sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c \ - --hash=sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25 \ - --hash=sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa \ - --hash=sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4 \ - --hash=sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3 \ - --hash=sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97 \ - --hash=sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1 \ - --hash=sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd \ - --hash=sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772 \ - --hash=sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a \ - --hash=sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729 \ - --hash=sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca \ - --hash=sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6 \ - --hash=sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635 \ - --hash=sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b \ - --hash=sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f - # via jinja2 -mdurl==0.1.2 \ - --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ - --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba - # via markdown-it-py -more-itertools==10.5.0 \ - --hash=sha256:037b0d3203ce90cca8ab1defbbdac29d5f993fc20131f3664dc8d6acfa872aef \ - --hash=sha256:5482bfef7849c25dc3c6dd53a6173ae4795da2a41a80faea6700d9f5846c5da6 - # via - # jaraco-classes - # jaraco-functools -nh3==0.2.18 \ - --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \ - --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \ - --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \ - --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \ - --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \ - --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \ - --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \ - --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \ - --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \ - --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \ - --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \ - --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \ - --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \ - --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \ - --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ - --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe - # via readme-renderer -nox==2024.10.9 \ - --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ - --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 - # via -r requirements.in -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 - # via - # gcp-releasetool - # nox -pkginfo==1.10.0 \ - --hash=sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297 \ - --hash=sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097 - # via twine -platformdirs==4.3.6 \ - --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ - --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb - # via virtualenv -proto-plus==1.24.0 \ - --hash=sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445 \ - --hash=sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12 - # via google-api-core -protobuf==5.28.2 \ - --hash=sha256:2c69461a7fcc8e24be697624c09a839976d82ae75062b11a0972e41fd2cd9132 \ - --hash=sha256:35cfcb15f213449af7ff6198d6eb5f739c37d7e4f1c09b5d0641babf2cc0c68f \ - --hash=sha256:52235802093bd8a2811abbe8bf0ab9c5f54cca0a751fdd3f6ac2a21438bffece \ - --hash=sha256:59379674ff119717404f7454647913787034f03fe7049cbef1d74a97bb4593f0 \ - --hash=sha256:5e8a95246d581eef20471b5d5ba010d55f66740942b95ba9b872d918c459452f \ - --hash=sha256:87317e9bcda04a32f2ee82089a204d3a2f0d3c8aeed16568c7daf4756e4f1fe0 \ - --hash=sha256:8ddc60bf374785fb7cb12510b267f59067fa10087325b8e1855b898a0d81d276 \ - --hash=sha256:a8b9403fc70764b08d2f593ce44f1d2920c5077bf7d311fefec999f8c40f78b7 \ - --hash=sha256:c0ea0123dac3399a2eeb1a1443d82b7afc9ff40241433296769f7da42d142ec3 \ - --hash=sha256:ca53faf29896c526863366a52a8f4d88e69cd04ec9571ed6082fa117fac3ab36 \ - --hash=sha256:eeea10f3dc0ac7e6b4933d32db20662902b4ab81bf28df12218aa389e9c2102d - # via - # gcp-docuploader - # gcp-releasetool - # google-api-core - # googleapis-common-protos - # proto-plus -pyasn1==0.6.1 \ - --hash=sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629 \ - --hash=sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034 - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.4.1 \ - --hash=sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd \ - --hash=sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c - # via google-auth -pycparser==2.22 \ - --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ - --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc - # via cffi -pygments==2.18.0 \ - --hash=sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199 \ - --hash=sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a - # via - # readme-renderer - # rich -pyjwt==2.9.0 \ - --hash=sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850 \ - --hash=sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c - # via gcp-releasetool -pyperclip==1.9.0 \ - --hash=sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310 - # via gcp-releasetool -python-dateutil==2.9.0.post0 \ - --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ - --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 - # via gcp-releasetool -readme-renderer==44.0 \ - --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \ - --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1 - # via twine -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 - # via - # gcp-releasetool - # google-api-core - # google-cloud-storage - # requests-toolbelt - # twine -requests-toolbelt==1.0.0 \ - --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \ - --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06 - # via twine -rfc3986==2.0.0 \ - --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ - --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c - # via twine -rich==13.9.2 \ - --hash=sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c \ - --hash=sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1 - # via twine -rsa==4.9 \ - --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ - --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 - # via google-auth -secretstorage==3.3.3 \ - --hash=sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77 \ - --hash=sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99 - # via keyring -six==1.16.0 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # gcp-docuploader - # python-dateutil -tomli==2.0.2 \ - --hash=sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38 \ - --hash=sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed - # via nox -twine==5.1.1 \ - --hash=sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997 \ - --hash=sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db - # via -r requirements.in -typing-extensions==4.12.2 \ - --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ - --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 - # via - # -r requirements.in - # rich -urllib3==2.2.3 \ - --hash=sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac \ - --hash=sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9 - # via - # requests - # twine -virtualenv==20.26.6 \ - --hash=sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48 \ - --hash=sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2 - # via nox -wheel==0.44.0 \ - --hash=sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f \ - --hash=sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49 - # via -r requirements.in -zipp==3.20.2 \ - --hash=sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350 \ - --hash=sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29 - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -setuptools==75.1.0 \ - --hash=sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2 \ - --hash=sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538 - # via -r requirements.in From 35e4afbbfe4200bc2b4b04b37e5d62a7d1b83fa2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 10 Mar 2025 14:35:40 -0700 Subject: [PATCH 853/892] chore: update gapic (#1048) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add support for opt-in debug logging fix: Fix typing issue with gRPC metadata when key ends in -bin chore: Update gapic-generator-python to v1.21.0 PiperOrigin-RevId: 705285820 Source-Link: https://github.com/googleapis/googleapis/commit/f9b8b9150f7fcd600b0acaeef91236b1843f5e49 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ca1e0a1e472d6e6f5de883a5cb54724f112ce348 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2ExZTBhMWU0NzJkNmU2ZjVkZTg4M2E1Y2I1NDcyNGYxMTJjZTM0OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: extend timeouts for check consistency PiperOrigin-RevId: 717421943 Source-Link: https://github.com/googleapis/googleapis/commit/07737e56be021ca2d11a24fb759ff3de79d83fa0 Source-Link: https://github.com/googleapis/googleapis-gen/commit/c41ade9ef7a90a1e38bda78132447a4b7e50c11d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzQxYWRlOWVmN2E5MGExZTM4YmRhNzgxMzI0NDdhNGI3ZTUwYzExZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Add REST Interceptors which support reading metadata feat: Add support for reading selective GAPIC generation methods from service YAML chore: Update gapic-generator-python to v1.22.0 PiperOrigin-RevId: 724026024 Source-Link: https://github.com/googleapis/googleapis/commit/ad9963857109513e77eed153a66264481789109f Source-Link: https://github.com/googleapis/googleapis-gen/commit/e291c4dd1d670eda19998de76f967e1603a48993 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTI5MWM0ZGQxZDY3MGVkYTE5OTk4ZGU3NmY5NjdlMTYwM2E0ODk5MyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.22.1 fix(deps): Require grpc-google-iam-v1>=0.14.0 PiperOrigin-RevId: 726142856 Source-Link: https://github.com/googleapis/googleapis/commit/25989cb753bf7d69ee446bda9d9794b61912707d Source-Link: https://github.com/googleapis/googleapis-gen/commit/677041b91cef1598cc55727d59a2804b198a5bbf Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjc3MDQxYjkxY2VmMTU5OGNjNTU3MjdkNTlhMjgwNGIxOThhNWJiZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: publish row_key_schema fields in table proto and relevant admin APIs to setup a table with a row_key_schema PiperOrigin-RevId: 732197624 Source-Link: https://github.com/googleapis/googleapis/commit/33b23a795cf6fa480df56074540fc2f9a7936012 Source-Link: https://github.com/googleapis/googleapis-gen/commit/cfb78ae9b01c9f6bb091d84678bcd0dc9907e734 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2ZiNzhhZTliMDFjOWY2YmIwOTFkODQ2NzhiY2QwZGM5OTA3ZTczNCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.23.2 PiperOrigin-RevId: 732281673 Source-Link: https://github.com/googleapis/googleapis/commit/2f37e0ad56637325b24f8603284ccb6f05796f9a Source-Link: https://github.com/googleapis/googleapis-gen/commit/016b7538ba5a798f2ae423d4ccd7f82b06cdf6d2 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDE2Yjc1MzhiYTVhNzk4ZjJhZTQyM2Q0Y2NkN2Y4MmIwNmNkZjZkMiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.23.3 PiperOrigin-RevId: 732994462 Source-Link: https://github.com/googleapis/googleapis/commit/50cbb15ee738d6a049af68756a9709ea50421e87 Source-Link: https://github.com/googleapis/googleapis-gen/commit/6ca4b8730c4e5cc7d3e54049cbd6f99d8d7cb33c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNmNhNGI4NzMwYzRlNWNjN2QzZTU0MDQ5Y2JkNmY5OWQ4ZDdjYjMzYyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add MaterializedViews and LogicalViews APIs PiperOrigin-RevId: 733101782 Source-Link: https://github.com/googleapis/googleapis/commit/05f571eb755baad00ed592fb946004fc9c12d2cc Source-Link: https://github.com/googleapis/googleapis-gen/commit/6e6954c2d468aa89e56e3463ef1ae4c7d01fcce6 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNmU2OTU0YzJkNDY4YWE4OWU1NmUzNDYzZWYxYWU0YzdkMDFmY2NlNiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * docs: Fixed formatting of resource path strings PiperOrigin-RevId: 733415839 Source-Link: https://github.com/googleapis/googleapis/commit/da20dfe4f5bb94a0aeb178d90847c1410f5416dc Source-Link: https://github.com/googleapis/googleapis-gen/commit/86b7c0cbbaeec8134a76e82d9b24dcb977c9fb4a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiODZiN2MwY2JiYWVlYzgxMzRhNzZlODJkOWIyNGRjYjk3N2M5ZmI0YSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Add grpc service config for ExecuteQuery API PiperOrigin-RevId: 733462032 Source-Link: https://github.com/googleapis/googleapis/commit/03183b76c8c37b7442e4f20dc50c3d1ab65c4e4d Source-Link: https://github.com/googleapis/googleapis-gen/commit/532cf74c6dce2aeee0a505e63955f3e498f0e1aa Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNTMyY2Y3NGM2ZGNlMmFlZWUwYTUwNWU2Mzk1NWYzZTQ5OGYwZTFhYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Add PrepareQuery api and update ExecuteQuery to support it docs: Update ExecuteQuery API docs to reflect changes PiperOrigin-RevId: 734273312 Source-Link: https://github.com/googleapis/googleapis/commit/9513189365a4cd150cbd62024ea23b0a4d3265c4 Source-Link: https://github.com/googleapis/googleapis-gen/commit/a950280d506b2fdd9c66a1098c00f91d8f780b66 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTk1MDI4MGQ1MDZiMmZkZDljNjZhMTA5OGMwMGY5MWQ4Zjc4MGI2NiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * update logged_channel reference during channel refresh * fixed logged channel update * use different wrap method for sync * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add MaterializedViewName to ReadRows and SampleRowKeys PiperOrigin-RevId: 735384675 Source-Link: https://github.com/googleapis/googleapis/commit/47d236a058fee1cf4cab357c852dc935d095bb69 Source-Link: https://github.com/googleapis/googleapis-gen/commit/7d15ec91a4d0adb90c851632e3f74541e78dc520 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiN2QxNWVjOTFhNGQwYWRiOTBjODUxNjMyZTNmNzQ1NDFlNzhkYzUyMCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: Allow Protobuf 6.x chore: Update gapic-generator-python to v1.23.5 PiperOrigin-RevId: 735388698 Source-Link: https://github.com/googleapis/googleapis/commit/a3dda51e8733481e68c86316d6531ed73aa1e44f Source-Link: https://github.com/googleapis/googleapis-gen/commit/c329c693d2da063a89ecc29e15dc196769aa854b Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzMyOWM2OTNkMmRhMDYzYTg5ZWNjMjllMTVkYzE5Njc2OWFhODU0YiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add MaterializedViews and LogicalViews APIs PiperOrigin-RevId: 735407006 Source-Link: https://github.com/googleapis/googleapis/commit/b80f49d1bcb3b0f1de695d2d093ad3a43ac59f3b Source-Link: https://github.com/googleapis/googleapis-gen/commit/9d5789e45af87d371fbbab4df14689807fc2c323 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWQ1Nzg5ZTQ1YWY4N2QzNzFmYmJhYjRkZjE0Njg5ODA3ZmMyYzMyMyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fixed test * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- .../cloud/bigtable/data/_async/client.py | 11 + .../bigtable/data/_sync_autogen/client.py | 3 + .../google/cloud/bigtable_admin/__init__.py | 68 + .../cloud/bigtable_admin_v2/__init__.py | 36 + .../bigtable_admin_v2/gapic_metadata.json | 150 + .../bigtable_instance_admin/async_client.py | 1333 +- .../bigtable_instance_admin/client.py | 1371 +- .../bigtable_instance_admin/pagers.py | 352 +- .../transports/base.py | 146 + .../transports/grpc.py | 419 +- .../transports/grpc_asyncio.py | 472 +- .../transports/rest.py | 4275 +++- .../transports/rest_base.py | 552 + .../bigtable_table_admin/async_client.py | 414 +- .../services/bigtable_table_admin/client.py | 448 +- .../services/bigtable_table_admin/pagers.py | 64 +- .../bigtable_table_admin/transports/base.py | 4 +- .../bigtable_table_admin/transports/grpc.py | 158 +- .../transports/grpc_asyncio.py | 159 +- .../bigtable_table_admin/transports/rest.py | 2627 ++- .../cloud/bigtable_admin_v2/types/__init__.py | 36 + .../types/bigtable_instance_admin.py | 477 +- .../types/bigtable_table_admin.py | 8 + .../cloud/bigtable_admin_v2/types/instance.py | 118 +- .../cloud/bigtable_admin_v2/types/table.py | 58 + .../cloud/bigtable_admin_v2/types/types.py | 309 +- .../google/cloud/bigtable_v2/__init__.py | 4 + .../cloud/bigtable_v2/gapic_metadata.json | 15 + .../services/bigtable/async_client.py | 359 +- .../bigtable_v2/services/bigtable/client.py | 360 +- .../services/bigtable/transports/base.py | 26 +- .../services/bigtable/transports/grpc.py | 141 +- .../bigtable/transports/grpc_asyncio.py | 159 +- .../services/bigtable/transports/rest.py | 1072 +- .../services/bigtable/transports/rest_base.py | 57 + .../cloud/bigtable_v2/types/__init__.py | 4 + .../cloud/bigtable_v2/types/bigtable.py | 157 +- .../google/cloud/bigtable_v2/types/data.py | 127 +- .../fixup_bigtable_admin_v2_keywords.py | 14 +- .../scripts/fixup_bigtable_v2_keywords.py | 7 +- .../tests/unit/data/_async/test_client.py | 10 + .../unit/data/_sync_autogen/test_client.py | 4 + .../test_bigtable_instance_admin.py | 16086 ++++++++++++---- .../test_bigtable_table_admin.py | 411 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 1093 +- 45 files changed, 28992 insertions(+), 5182 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 5c9649c41fd5..4d52c64c2e5d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -87,6 +87,7 @@ else: from typing import Iterable # noqa: F401 from grpc import insecure_channel + from grpc import intercept_channel from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE @@ -366,11 +367,21 @@ async def _manage_channel( break start_timestamp = time.monotonic() # prepare new channel for use + # TODO: refactor to avoid using internal references: https://github.com/googleapis/python-bigtable/issues/1094 old_channel = self.transport.grpc_channel new_channel = self.transport.create_channel() + if CrossSync.is_async: + new_channel._unary_unary_interceptors.append( + self.transport._interceptor + ) + else: + new_channel = intercept_channel( + new_channel, self.transport._interceptor + ) await self._ping_and_warm_instances(channel=new_channel) # cycle channel out of use, with long grace window before closure self.transport._grpc_channel = new_channel + self.transport._logged_channel = new_channel # invalidate caches self.transport._stubs = {} self.transport._prep_wrapped_messages(self.client_info) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index b89e232070b4..7b1e72ad6f67 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -66,6 +66,7 @@ from google.cloud.bigtable.data._cross_sync import CrossSync from typing import Iterable from grpc import insecure_channel +from grpc import intercept_channel from google.cloud.bigtable_v2.services.bigtable.transports import ( BigtableGrpcTransport as TransportType, ) @@ -282,8 +283,10 @@ def _manage_channel( start_timestamp = time.monotonic() old_channel = self.transport.grpc_channel new_channel = self.transport.create_channel() + new_channel = intercept_channel(new_channel, self.transport._interceptor) self._ping_and_warm_instances(channel=new_channel) self.transport._grpc_channel = new_channel + self.transport._logged_channel = new_channel self.transport._stubs = {} self.transport._prep_wrapped_messages(self.client_info) if grace_period: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index 2884a96ab748..319c1f3320e0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -46,6 +46,18 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( CreateInstanceRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateLogicalViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateMaterializedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + CreateMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( DeleteAppProfileRequest, ) @@ -55,6 +67,12 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( DeleteInstanceRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + DeleteLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + DeleteMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( GetAppProfileRequest, ) @@ -64,6 +82,12 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( GetInstanceRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + GetLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + GetMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( ListAppProfilesRequest, ) @@ -88,6 +112,18 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( ListInstancesResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListLogicalViewsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListLogicalViewsResponse, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListMaterializedViewsRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + ListMaterializedViewsResponse, +) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( PartialUpdateClusterMetadata, ) @@ -109,6 +145,18 @@ from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( UpdateInstanceMetadata, ) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateLogicalViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateLogicalViewRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateMaterializedViewMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( + UpdateMaterializedViewRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CheckConsistencyRequest, ) @@ -230,6 +278,8 @@ from google.cloud.bigtable_admin_v2.types.instance import Cluster from google.cloud.bigtable_admin_v2.types.instance import HotTablet from google.cloud.bigtable_admin_v2.types.instance import Instance +from google.cloud.bigtable_admin_v2.types.instance import LogicalView +from google.cloud.bigtable_admin_v2.types.instance import MaterializedView from google.cloud.bigtable_admin_v2.types.table import AuthorizedView from google.cloud.bigtable_admin_v2.types.table import Backup from google.cloud.bigtable_admin_v2.types.table import BackupInfo @@ -253,12 +303,20 @@ "CreateClusterRequest", "CreateInstanceMetadata", "CreateInstanceRequest", + "CreateLogicalViewMetadata", + "CreateLogicalViewRequest", + "CreateMaterializedViewMetadata", + "CreateMaterializedViewRequest", "DeleteAppProfileRequest", "DeleteClusterRequest", "DeleteInstanceRequest", + "DeleteLogicalViewRequest", + "DeleteMaterializedViewRequest", "GetAppProfileRequest", "GetClusterRequest", "GetInstanceRequest", + "GetLogicalViewRequest", + "GetMaterializedViewRequest", "ListAppProfilesRequest", "ListAppProfilesResponse", "ListClustersRequest", @@ -267,6 +325,10 @@ "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", "PartialUpdateClusterMetadata", "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", @@ -274,6 +336,10 @@ "UpdateAppProfileRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", + "UpdateLogicalViewMetadata", + "UpdateLogicalViewRequest", + "UpdateMaterializedViewMetadata", + "UpdateMaterializedViewRequest", "CheckConsistencyRequest", "CheckConsistencyResponse", "CopyBackupMetadata", @@ -327,6 +393,8 @@ "Cluster", "HotTablet", "Instance", + "LogicalView", + "MaterializedView", "AuthorizedView", "Backup", "BackupInfo", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index f2aea1667712..1d2d13cf0712 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -28,12 +28,20 @@ from .types.bigtable_instance_admin import CreateClusterRequest from .types.bigtable_instance_admin import CreateInstanceMetadata from .types.bigtable_instance_admin import CreateInstanceRequest +from .types.bigtable_instance_admin import CreateLogicalViewMetadata +from .types.bigtable_instance_admin import CreateLogicalViewRequest +from .types.bigtable_instance_admin import CreateMaterializedViewMetadata +from .types.bigtable_instance_admin import CreateMaterializedViewRequest from .types.bigtable_instance_admin import DeleteAppProfileRequest from .types.bigtable_instance_admin import DeleteClusterRequest from .types.bigtable_instance_admin import DeleteInstanceRequest +from .types.bigtable_instance_admin import DeleteLogicalViewRequest +from .types.bigtable_instance_admin import DeleteMaterializedViewRequest from .types.bigtable_instance_admin import GetAppProfileRequest from .types.bigtable_instance_admin import GetClusterRequest from .types.bigtable_instance_admin import GetInstanceRequest +from .types.bigtable_instance_admin import GetLogicalViewRequest +from .types.bigtable_instance_admin import GetMaterializedViewRequest from .types.bigtable_instance_admin import ListAppProfilesRequest from .types.bigtable_instance_admin import ListAppProfilesResponse from .types.bigtable_instance_admin import ListClustersRequest @@ -42,6 +50,10 @@ from .types.bigtable_instance_admin import ListHotTabletsResponse from .types.bigtable_instance_admin import ListInstancesRequest from .types.bigtable_instance_admin import ListInstancesResponse +from .types.bigtable_instance_admin import ListLogicalViewsRequest +from .types.bigtable_instance_admin import ListLogicalViewsResponse +from .types.bigtable_instance_admin import ListMaterializedViewsRequest +from .types.bigtable_instance_admin import ListMaterializedViewsResponse from .types.bigtable_instance_admin import PartialUpdateClusterMetadata from .types.bigtable_instance_admin import PartialUpdateClusterRequest from .types.bigtable_instance_admin import PartialUpdateInstanceRequest @@ -49,6 +61,10 @@ from .types.bigtable_instance_admin import UpdateAppProfileRequest from .types.bigtable_instance_admin import UpdateClusterMetadata from .types.bigtable_instance_admin import UpdateInstanceMetadata +from .types.bigtable_instance_admin import UpdateLogicalViewMetadata +from .types.bigtable_instance_admin import UpdateLogicalViewRequest +from .types.bigtable_instance_admin import UpdateMaterializedViewMetadata +from .types.bigtable_instance_admin import UpdateMaterializedViewRequest from .types.bigtable_table_admin import CheckConsistencyRequest from .types.bigtable_table_admin import CheckConsistencyResponse from .types.bigtable_table_admin import CopyBackupMetadata @@ -102,6 +118,8 @@ from .types.instance import Cluster from .types.instance import HotTablet from .types.instance import Instance +from .types.instance import LogicalView +from .types.instance import MaterializedView from .types.table import AuthorizedView from .types.table import Backup from .types.table import BackupInfo @@ -142,6 +160,10 @@ "CreateClusterRequest", "CreateInstanceMetadata", "CreateInstanceRequest", + "CreateLogicalViewMetadata", + "CreateLogicalViewRequest", + "CreateMaterializedViewMetadata", + "CreateMaterializedViewRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", @@ -151,6 +173,8 @@ "DeleteBackupRequest", "DeleteClusterRequest", "DeleteInstanceRequest", + "DeleteLogicalViewRequest", + "DeleteMaterializedViewRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", @@ -163,6 +187,8 @@ "GetBackupRequest", "GetClusterRequest", "GetInstanceRequest", + "GetLogicalViewRequest", + "GetMaterializedViewRequest", "GetSnapshotRequest", "GetTableRequest", "HotTablet", @@ -179,10 +205,16 @@ "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", "ListTablesRequest", "ListTablesResponse", + "LogicalView", + "MaterializedView", "ModifyColumnFamiliesRequest", "OperationProgress", "OptimizeRestoredTableMetadata", @@ -209,6 +241,10 @@ "UpdateBackupRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", + "UpdateLogicalViewMetadata", + "UpdateLogicalViewRequest", + "UpdateMaterializedViewMetadata", + "UpdateMaterializedViewRequest", "UpdateTableMetadata", "UpdateTableRequest", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index 7cd09c43b60f..c56fde6e7dba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -25,6 +25,16 @@ "create_instance" ] }, + "CreateLogicalView": { + "methods": [ + "create_logical_view" + ] + }, + "CreateMaterializedView": { + "methods": [ + "create_materialized_view" + ] + }, "DeleteAppProfile": { "methods": [ "delete_app_profile" @@ -40,6 +50,16 @@ "delete_instance" ] }, + "DeleteLogicalView": { + "methods": [ + "delete_logical_view" + ] + }, + "DeleteMaterializedView": { + "methods": [ + "delete_materialized_view" + ] + }, "GetAppProfile": { "methods": [ "get_app_profile" @@ -60,6 +80,16 @@ "get_instance" ] }, + "GetLogicalView": { + "methods": [ + "get_logical_view" + ] + }, + "GetMaterializedView": { + "methods": [ + "get_materialized_view" + ] + }, "ListAppProfiles": { "methods": [ "list_app_profiles" @@ -80,6 +110,16 @@ "list_instances" ] }, + "ListLogicalViews": { + "methods": [ + "list_logical_views" + ] + }, + "ListMaterializedViews": { + "methods": [ + "list_materialized_views" + ] + }, "PartialUpdateCluster": { "methods": [ "partial_update_cluster" @@ -114,6 +154,16 @@ "methods": [ "update_instance" ] + }, + "UpdateLogicalView": { + "methods": [ + "update_logical_view" + ] + }, + "UpdateMaterializedView": { + "methods": [ + "update_materialized_view" + ] } } }, @@ -135,6 +185,16 @@ "create_instance" ] }, + "CreateLogicalView": { + "methods": [ + "create_logical_view" + ] + }, + "CreateMaterializedView": { + "methods": [ + "create_materialized_view" + ] + }, "DeleteAppProfile": { "methods": [ "delete_app_profile" @@ -150,6 +210,16 @@ "delete_instance" ] }, + "DeleteLogicalView": { + "methods": [ + "delete_logical_view" + ] + }, + "DeleteMaterializedView": { + "methods": [ + "delete_materialized_view" + ] + }, "GetAppProfile": { "methods": [ "get_app_profile" @@ -170,6 +240,16 @@ "get_instance" ] }, + "GetLogicalView": { + "methods": [ + "get_logical_view" + ] + }, + "GetMaterializedView": { + "methods": [ + "get_materialized_view" + ] + }, "ListAppProfiles": { "methods": [ "list_app_profiles" @@ -190,6 +270,16 @@ "list_instances" ] }, + "ListLogicalViews": { + "methods": [ + "list_logical_views" + ] + }, + "ListMaterializedViews": { + "methods": [ + "list_materialized_views" + ] + }, "PartialUpdateCluster": { "methods": [ "partial_update_cluster" @@ -224,6 +314,16 @@ "methods": [ "update_instance" ] + }, + "UpdateLogicalView": { + "methods": [ + "update_logical_view" + ] + }, + "UpdateMaterializedView": { + "methods": [ + "update_materialized_view" + ] } } }, @@ -245,6 +345,16 @@ "create_instance" ] }, + "CreateLogicalView": { + "methods": [ + "create_logical_view" + ] + }, + "CreateMaterializedView": { + "methods": [ + "create_materialized_view" + ] + }, "DeleteAppProfile": { "methods": [ "delete_app_profile" @@ -260,6 +370,16 @@ "delete_instance" ] }, + "DeleteLogicalView": { + "methods": [ + "delete_logical_view" + ] + }, + "DeleteMaterializedView": { + "methods": [ + "delete_materialized_view" + ] + }, "GetAppProfile": { "methods": [ "get_app_profile" @@ -280,6 +400,16 @@ "get_instance" ] }, + "GetLogicalView": { + "methods": [ + "get_logical_view" + ] + }, + "GetMaterializedView": { + "methods": [ + "get_materialized_view" + ] + }, "ListAppProfiles": { "methods": [ "list_app_profiles" @@ -300,6 +430,16 @@ "list_instances" ] }, + "ListLogicalViews": { + "methods": [ + "list_logical_views" + ] + }, + "ListMaterializedViews": { + "methods": [ + "list_materialized_views" + ] + }, "PartialUpdateCluster": { "methods": [ "partial_update_cluster" @@ -334,6 +474,16 @@ "methods": [ "update_instance" ] + }, + "UpdateLogicalView": { + "methods": [ + "update_logical_view" + ] + }, + "UpdateMaterializedView": { + "methods": [ + "update_materialized_view" + ] } } } diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index b6e77aaea334..ad3745c0699c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -58,6 +59,15 @@ from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport from .client import BigtableInstanceAdminClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableInstanceAdminAsyncClient: """Service for creating, configuring, and deleting Cloud @@ -91,6 +101,16 @@ class BigtableInstanceAdminAsyncClient: ) instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) + logical_view_path = staticmethod(BigtableInstanceAdminClient.logical_view_path) + parse_logical_view_path = staticmethod( + BigtableInstanceAdminClient.parse_logical_view_path + ) + materialized_view_path = staticmethod( + BigtableInstanceAdminClient.materialized_view_path + ) + parse_materialized_view_path = staticmethod( + BigtableInstanceAdminClient.parse_materialized_view_path + ) table_path = staticmethod(BigtableInstanceAdminClient.table_path) parse_table_path = staticmethod(BigtableInstanceAdminClient.parse_table_path) common_billing_account_path = staticmethod( @@ -289,6 +309,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableInstanceAdminAsyncClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "credentialsType": None, + }, + ) + async def create_instance( self, request: Optional[ @@ -301,7 +343,7 @@ async def create_instance( clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Create an instance within a project. @@ -344,7 +386,6 @@ async def create_instance( ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this @@ -352,8 +393,10 @@ async def create_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -369,7 +412,10 @@ async def create_instance( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, instance_id, instance, clusters]) + flattened_params = [parent, instance_id, instance, clusters] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -436,7 +482,7 @@ async def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Gets information about an instance. @@ -455,8 +501,10 @@ async def get_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -470,7 +518,10 @@ async def get_instance( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -522,7 +573,7 @@ async def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. @@ -541,8 +592,10 @@ async def list_instances( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListInstancesResponse: @@ -553,7 +606,10 @@ async def list_instances( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -602,7 +658,7 @@ async def update_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method updates only the display name and type for an Instance. @@ -620,8 +676,10 @@ async def update_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -674,7 +732,7 @@ async def partial_update_instance( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates an instance within a project. This method can modify all fields of an Instance and is the @@ -702,8 +760,10 @@ async def partial_update_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -719,7 +779,10 @@ async def partial_update_instance( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance, update_mask]) + flattened_params = [instance, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -785,7 +848,7 @@ async def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Delete an instance from a project. @@ -804,13 +867,18 @@ async def delete_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -861,7 +929,7 @@ async def create_cluster( cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a cluster within an instance. @@ -902,8 +970,10 @@ async def create_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -918,7 +988,10 @@ async def create_cluster( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, cluster_id, cluster]) + flattened_params = [parent, cluster_id, cluster] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -982,7 +1055,7 @@ async def get_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. @@ -1001,8 +1074,10 @@ async def get_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Cluster: @@ -1015,7 +1090,10 @@ async def get_cluster( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1067,7 +1145,7 @@ async def list_clusters( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. @@ -1088,8 +1166,10 @@ async def list_clusters( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListClustersResponse: @@ -1100,7 +1180,10 @@ async def list_clusters( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1149,7 +1232,7 @@ async def update_cluster( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates a cluster within an instance. @@ -1166,8 +1249,10 @@ async def update_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1229,7 +1314,7 @@ async def partial_update_cluster( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates a cluster within a project. This method is the preferred way to update a Cluster. @@ -1267,8 +1352,10 @@ async def partial_update_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1283,7 +1370,10 @@ async def partial_update_cluster( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([cluster, update_mask]) + flattened_params = [cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1347,7 +1437,7 @@ async def delete_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a cluster from an instance. @@ -1366,13 +1456,18 @@ async def delete_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1423,7 +1518,7 @@ async def create_app_profile( app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. @@ -1458,8 +1553,10 @@ async def create_app_profile( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1471,7 +1568,10 @@ async def create_app_profile( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, app_profile_id, app_profile]) + flattened_params = [parent, app_profile_id, app_profile] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1527,7 +1627,7 @@ async def get_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. @@ -1546,8 +1646,10 @@ async def get_app_profile( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1559,7 +1661,10 @@ async def get_app_profile( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1611,7 +1716,7 @@ async def list_app_profiles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAppProfilesAsyncPager: r"""Lists information about app profiles in an instance. @@ -1633,8 +1738,10 @@ async def list_app_profiles( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: @@ -1648,7 +1755,10 @@ async def list_app_profiles( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1712,7 +1822,7 @@ async def update_app_profile( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates an app profile within an instance. @@ -1738,8 +1848,10 @@ async def update_app_profile( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1752,7 +1864,10 @@ async def update_app_profile( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([app_profile, update_mask]) + flattened_params = [app_profile, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1814,9 +1929,10 @@ async def delete_app_profile( ] = None, *, name: Optional[str] = None, + ignore_warnings: Optional[bool] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an app profile from an instance. @@ -1832,16 +1948,28 @@ async def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + ignore_warnings (:class:`bool`): + Required. If true, ignore safety + checks when deleting the app profile. + + This corresponds to the ``ignore_warnings`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name, ignore_warnings] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1857,6 +1985,8 @@ async def delete_app_profile( # request, apply these. if name is not None: request.name = name + if ignore_warnings is not None: + request.ignore_warnings = ignore_warnings # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -1888,7 +2018,7 @@ async def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists @@ -1909,8 +2039,10 @@ async def get_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -1949,7 +2081,10 @@ async def get_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1996,7 +2131,7 @@ async def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -2016,8 +2151,10 @@ async def set_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2056,7 +2193,10 @@ async def set_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2104,7 +2244,7 @@ async def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -2133,8 +2273,10 @@ async def test_iam_permissions( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -2143,7 +2285,10 @@ async def test_iam_permissions( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2194,7 +2339,7 @@ async def list_hot_tablets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListHotTabletsAsyncPager: r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. @@ -2214,8 +2359,10 @@ async def list_hot_tablets( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager: @@ -2229,7 +2376,10 @@ async def list_hot_tablets( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2283,6 +2433,1023 @@ async def list_hot_tablets( # Done; return the response. return response + async def create_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateLogicalViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + logical_view: Optional[instance.LogicalView] = None, + logical_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a logical view within an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.CreateLogicalView. + parent (:class:`str`): + Required. The parent instance where this logical view + will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`): + Required. The logical view to create. + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view_id (:class:`str`): + Required. The ID to use for the + logical view, which will become the + final component of the logical view's + resource name. + + This corresponds to the ``logical_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, logical_view, logical_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateLogicalViewRequest): + request = bigtable_instance_admin.CreateLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if logical_view is not None: + request.logical_view = logical_view + if logical_view_id is not None: + request.logical_view_id = logical_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.CreateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + async def get_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.LogicalView: + r"""Gets information about a logical view. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.GetLogicalView. + name (:class:`str`): + Required. The unique name of the requested logical view. + Values are of the form + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.LogicalView: + A SQL logical view object that can be + referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetLogicalViewRequest): + request = bigtable_instance_admin.GetLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_logical_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListLogicalViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListLogicalViewsAsyncPager: + r"""Lists information about logical views in an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListLogicalViews. + parent (:class:`str`): + Required. The unique name of the instance for which the + list of logical views is requested. Values are of the + form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager: + Response message for + BigtableInstanceAdmin.ListLogicalViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListLogicalViewsRequest): + request = bigtable_instance_admin.ListLogicalViewsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_logical_views + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListLogicalViewsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateLogicalViewRequest, dict] + ] = None, + *, + logical_view: Optional[instance.LogicalView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a logical view within an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.UpdateLogicalView. + logical_view (:class:`google.cloud.bigtable_admin_v2.types.LogicalView`): + Required. The logical view to update. + + The logical view's ``name`` field is used to identify + the view to update. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [logical_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.UpdateLogicalViewRequest): + request = bigtable_instance_admin.UpdateLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if logical_view is not None: + request.logical_view = logical_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("logical_view.name", request.logical_view.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.UpdateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + async def delete_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a logical view from an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.DeleteLogicalView. + name (:class:`str`): + Required. The unique name of the logical view to be + deleted. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteLogicalViewRequest): + request = bigtable_instance_admin.DeleteLogicalViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_logical_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateMaterializedViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + materialized_view: Optional[instance.MaterializedView] = None, + materialized_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a materialized view within an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.CreateMaterializedView. + parent (:class:`str`): + Required. The parent instance where this materialized + view will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`): + Required. The materialized view to + create. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view_id (:class:`str`): + Required. The ID to use for the + materialized view, which will become the + final component of the materialized + view's resource name. + + This corresponds to the ``materialized_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, materialized_view, materialized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.CreateMaterializedViewRequest + ): + request = bigtable_instance_admin.CreateMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if materialized_view is not None: + request.materialized_view = materialized_view + if materialized_view_id is not None: + request.materialized_view_id = materialized_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.CreateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + async def get_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.MaterializedView: + r"""Gets information about a materialized view. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.GetMaterializedView. + name (:class:`str`): + Required. The unique name of the requested materialized + view. Values are of the form + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.MaterializedView: + A materialized view object that can + be referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetMaterializedViewRequest): + request = bigtable_instance_admin.GetMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_materialized_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListMaterializedViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListMaterializedViewsAsyncPager: + r"""Lists information about materialized views in an + instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListMaterializedViews. + parent (:class:`str`): + Required. The unique name of the instance for which the + list of materialized views is requested. Values are of + the form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager: + Response message for + BigtableInstanceAdmin.ListMaterializedViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.ListMaterializedViewsRequest + ): + request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_materialized_views + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListMaterializedViewsAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateMaterializedViewRequest, dict] + ] = None, + *, + materialized_view: Optional[instance.MaterializedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a materialized view within an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.UpdateMaterializedView. + materialized_view (:class:`google.cloud.bigtable_admin_v2.types.MaterializedView`): + Required. The materialized view to update. + + The materialized view's ``name`` field is used to + identify the view to update. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [materialized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.UpdateMaterializedViewRequest + ): + request = bigtable_instance_admin.UpdateMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if materialized_view is not None: + request.materialized_view = materialized_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("materialized_view.name", request.materialized_view.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.UpdateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + async def delete_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a materialized view from an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.DeleteMaterializedView. + name (:class:`str`): + Required. The unique name of the materialized view to be + deleted. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.DeleteMaterializedViewRequest + ): + request = bigtable_instance_admin.DeleteMaterializedViewRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_materialized_view + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index b717eac8b47e..f9635515610f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -14,6 +14,9 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( @@ -48,6 +51,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers @@ -306,6 +318,50 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def logical_view_path( + project: str, + instance: str, + logical_view: str, + ) -> str: + """Returns a fully-qualified logical_view string.""" + return "projects/{project}/instances/{instance}/logicalViews/{logical_view}".format( + project=project, + instance=instance, + logical_view=logical_view, + ) + + @staticmethod + def parse_logical_view_path(path: str) -> Dict[str, str]: + """Parses a logical_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/logicalViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def materialized_view_path( + project: str, + instance: str, + materialized_view: str, + ) -> str: + """Returns a fully-qualified materialized_view string.""" + return "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + + @staticmethod + def parse_materialized_view_path(path: str) -> Dict[str, str]: + """Parses a materialized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/materializedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def table_path( project: str, @@ -599,6 +655,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -707,6 +790,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -773,6 +860,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableInstanceAdminClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "credentialsType": None, + }, + ) + def create_instance( self, request: Optional[ @@ -785,7 +895,7 @@ def create_instance( clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Create an instance within a project. @@ -828,7 +938,6 @@ def create_instance( ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this @@ -836,8 +945,10 @@ def create_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -853,7 +964,10 @@ def create_instance( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, instance_id, instance, clusters]) + flattened_params = [parent, instance_id, instance, clusters] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -916,7 +1030,7 @@ def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Gets information about an instance. @@ -935,8 +1049,10 @@ def get_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -950,7 +1066,10 @@ def get_instance( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -999,7 +1118,7 @@ def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. @@ -1018,8 +1137,10 @@ def list_instances( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListInstancesResponse: @@ -1030,7 +1151,10 @@ def list_instances( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1076,7 +1200,7 @@ def update_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method updates only the display name and type for an Instance. @@ -1094,8 +1218,10 @@ def update_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -1146,7 +1272,7 @@ def partial_update_instance( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Partially updates an instance within a project. This method can modify all fields of an Instance and is the @@ -1174,8 +1300,10 @@ def partial_update_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1191,7 +1319,10 @@ def partial_update_instance( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance, update_mask]) + flattened_params = [instance, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1254,7 +1385,7 @@ def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Delete an instance from a project. @@ -1273,13 +1404,18 @@ def delete_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1327,7 +1463,7 @@ def create_cluster( cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a cluster within an instance. @@ -1368,8 +1504,10 @@ def create_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1384,7 +1522,10 @@ def create_cluster( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, cluster_id, cluster]) + flattened_params = [parent, cluster_id, cluster] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1445,7 +1586,7 @@ def get_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. @@ -1464,8 +1605,10 @@ def get_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Cluster: @@ -1478,7 +1621,10 @@ def get_cluster( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1527,7 +1673,7 @@ def list_clusters( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. @@ -1548,8 +1694,10 @@ def list_clusters( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListClustersResponse: @@ -1560,7 +1708,10 @@ def list_clusters( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1606,7 +1757,7 @@ def update_cluster( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates a cluster within an instance. @@ -1623,8 +1774,10 @@ def update_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1684,7 +1837,7 @@ def partial_update_cluster( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Partially updates a cluster within a project. This method is the preferred way to update a Cluster. @@ -1722,8 +1875,10 @@ def partial_update_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1738,7 +1893,10 @@ def partial_update_cluster( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([cluster, update_mask]) + flattened_params = [cluster, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1799,7 +1957,7 @@ def delete_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a cluster from an instance. @@ -1818,13 +1976,18 @@ def delete_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1872,7 +2035,7 @@ def create_app_profile( app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. @@ -1907,8 +2070,10 @@ def create_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1920,7 +2085,10 @@ def create_app_profile( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, app_profile_id, app_profile]) + flattened_params = [parent, app_profile_id, app_profile] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1973,7 +2141,7 @@ def get_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. @@ -1992,8 +2160,10 @@ def get_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -2005,7 +2175,10 @@ def get_app_profile( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2054,7 +2227,7 @@ def list_app_profiles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAppProfilesPager: r"""Lists information about app profiles in an instance. @@ -2076,8 +2249,10 @@ def list_app_profiles( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: @@ -2091,7 +2266,10 @@ def list_app_profiles( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2152,7 +2330,7 @@ def update_app_profile( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates an app profile within an instance. @@ -2178,8 +2356,10 @@ def update_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2192,7 +2372,10 @@ def update_app_profile( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([app_profile, update_mask]) + flattened_params = [app_profile, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2251,9 +2434,10 @@ def delete_app_profile( ] = None, *, name: Optional[str] = None, + ignore_warnings: Optional[bool] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an app profile from an instance. @@ -2269,16 +2453,28 @@ def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. + ignore_warnings (bool): + Required. If true, ignore safety + checks when deleting the app profile. + + This corresponds to the ``ignore_warnings`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name, ignore_warnings] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2293,6 +2489,8 @@ def delete_app_profile( # request, apply these. if name is not None: request.name = name + if ignore_warnings is not None: + request.ignore_warnings = ignore_warnings # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -2322,7 +2520,7 @@ def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists @@ -2343,8 +2541,10 @@ def get_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2383,7 +2583,10 @@ def get_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2431,7 +2634,7 @@ def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -2451,8 +2654,10 @@ def set_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2491,7 +2696,10 @@ def set_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2540,7 +2748,7 @@ def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -2569,8 +2777,10 @@ def test_iam_permissions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -2579,7 +2789,10 @@ def test_iam_permissions( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2631,7 +2844,7 @@ def list_hot_tablets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListHotTabletsPager: r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. @@ -2651,8 +2864,10 @@ def list_hot_tablets( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager: @@ -2666,7 +2881,10 @@ def list_hot_tablets( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2717,6 +2935,993 @@ def list_hot_tablets( # Done; return the response. return response + def create_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateLogicalViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + logical_view: Optional[instance.LogicalView] = None, + logical_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a logical view within an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.CreateLogicalView. + parent (str): + Required. The parent instance where this logical view + will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to create. + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logical_view_id (str): + Required. The ID to use for the + logical view, which will become the + final component of the logical view's + resource name. + + This corresponds to the ``logical_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, logical_view, logical_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.CreateLogicalViewRequest): + request = bigtable_instance_admin.CreateLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if logical_view is not None: + request.logical_view = logical_view + if logical_view_id is not None: + request.logical_view_id = logical_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.CreateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + def get_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.LogicalView: + r"""Gets information about a logical view. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetLogicalView. + name (str): + Required. The unique name of the requested logical view. + Values are of the form + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.LogicalView: + A SQL logical view object that can be + referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetLogicalViewRequest): + request = bigtable_instance_admin.GetLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_logical_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListLogicalViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListLogicalViewsPager: + r"""Lists information about logical views in an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListLogicalViews. + parent (str): + Required. The unique name of the instance for which the + list of logical views is requested. Values are of the + form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsPager: + Response message for + BigtableInstanceAdmin.ListLogicalViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.ListLogicalViewsRequest): + request = bigtable_instance_admin.ListLogicalViewsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_logical_views] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListLogicalViewsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateLogicalViewRequest, dict] + ] = None, + *, + logical_view: Optional[instance.LogicalView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates a logical view within an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.UpdateLogicalView. + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to update. + + The logical view's ``name`` field is used to identify + the view to update. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``logical_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.LogicalView` + A SQL logical view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [logical_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.UpdateLogicalViewRequest): + request = bigtable_instance_admin.UpdateLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if logical_view is not None: + request.logical_view = logical_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("logical_view.name", request.logical_view.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.LogicalView, + metadata_type=bigtable_instance_admin.UpdateLogicalViewMetadata, + ) + + # Done; return the response. + return response + + def delete_logical_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteLogicalViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a logical view from an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.DeleteLogicalView. + name (str): + Required. The unique name of the logical view to be + deleted. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.DeleteLogicalViewRequest): + request = bigtable_instance_admin.DeleteLogicalViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_logical_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.CreateMaterializedViewRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + materialized_view: Optional[instance.MaterializedView] = None, + materialized_view_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a materialized view within an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.CreateMaterializedView. + parent (str): + Required. The parent instance where this materialized + view will be created. Format: + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to + create. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + materialized_view_id (str): + Required. The ID to use for the + materialized view, which will become the + final component of the materialized + view's resource name. + + This corresponds to the ``materialized_view_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, materialized_view, materialized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.CreateMaterializedViewRequest + ): + request = bigtable_instance_admin.CreateMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if materialized_view is not None: + request.materialized_view = materialized_view + if materialized_view_id is not None: + request.materialized_view_id = materialized_view_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.CreateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + def get_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.GetMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.MaterializedView: + r"""Gets information about a materialized view. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetMaterializedView. + name (str): + Required. The unique name of the requested materialized + view. Values are of the form + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.MaterializedView: + A materialized view object that can + be referenced in SQL queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_instance_admin.GetMaterializedViewRequest): + request = bigtable_instance_admin.GetMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_materialized_views( + self, + request: Optional[ + Union[bigtable_instance_admin.ListMaterializedViewsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListMaterializedViewsPager: + r"""Lists information about materialized views in an + instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListMaterializedViews. + parent (str): + Required. The unique name of the instance for which the + list of materialized views is requested. Values are of + the form ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsPager: + Response message for + BigtableInstanceAdmin.ListMaterializedViews. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.ListMaterializedViewsRequest + ): + request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_materialized_views] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListMaterializedViewsPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.UpdateMaterializedViewRequest, dict] + ] = None, + *, + materialized_view: Optional[instance.MaterializedView] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates a materialized view within an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.UpdateMaterializedView. + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to update. + + The materialized view's ``name`` field is used to + identify the view to update. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``materialized_view`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.MaterializedView` + A materialized view object that can be referenced in SQL + queries. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [materialized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.UpdateMaterializedViewRequest + ): + request = bigtable_instance_admin.UpdateMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if materialized_view is not None: + request.materialized_view = materialized_view + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("materialized_view.name", request.materialized_view.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.MaterializedView, + metadata_type=bigtable_instance_admin.UpdateMaterializedViewMetadata, + ) + + # Done; return the response. + return response + + def delete_materialized_view( + self, + request: Optional[ + Union[bigtable_instance_admin.DeleteMaterializedViewRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a materialized view from an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.DeleteMaterializedView. + name (str): + Required. The unique name of the materialized view to be + deleted. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, bigtable_instance_admin.DeleteMaterializedViewRequest + ): + request = bigtable_instance_admin.DeleteMaterializedViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_materialized_view] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + def __enter__(self) -> "BigtableInstanceAdminClient": return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index bb7ee001f599..355d641e40d4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -67,7 +67,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -81,8 +81,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) @@ -143,7 +145,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -157,8 +159,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) @@ -223,7 +227,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -237,8 +241,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) @@ -299,7 +305,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -313,8 +319,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) @@ -351,3 +359,323 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogicalViewsPager: + """A pager for iterating through ``list_logical_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``logical_views`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListLogicalViews`` requests and continue to iterate + through the ``logical_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_instance_admin.ListLogicalViewsResponse], + request: bigtable_instance_admin.ListLogicalViewsRequest, + response: bigtable_instance_admin.ListLogicalViewsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListLogicalViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_instance_admin.ListLogicalViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[instance.LogicalView]: + for page in self.pages: + yield from page.logical_views + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListLogicalViewsAsyncPager: + """A pager for iterating through ``list_logical_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``logical_views`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListLogicalViews`` requests and continue to iterate + through the ``logical_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_instance_admin.ListLogicalViewsResponse] + ], + request: bigtable_instance_admin.ListLogicalViewsRequest, + response: bigtable_instance_admin.ListLogicalViewsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListLogicalViewsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListLogicalViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_instance_admin.ListLogicalViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[instance.LogicalView]: + async def async_generator(): + async for page in self.pages: + for response in page.logical_views: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMaterializedViewsPager: + """A pager for iterating through ``list_materialized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``materialized_views`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListMaterializedViews`` requests and continue to iterate + through the ``materialized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_instance_admin.ListMaterializedViewsResponse], + request: bigtable_instance_admin.ListMaterializedViewsRequest, + response: bigtable_instance_admin.ListMaterializedViewsResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_instance_admin.ListMaterializedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[instance.MaterializedView]: + for page in self.pages: + yield from page.materialized_views + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListMaterializedViewsAsyncPager: + """A pager for iterating through ``list_materialized_views`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``materialized_views`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListMaterializedViews`` requests and continue to iterate + through the ``materialized_views`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse] + ], + request: bigtable_instance_admin.ListMaterializedViewsRequest, + response: bigtable_instance_admin.ListMaterializedViewsResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListMaterializedViewsResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_instance_admin.ListMaterializedViewsRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_instance_admin.ListMaterializedViewsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[instance.MaterializedView]: + async def async_generator(): + async for page in self.pages: + for response in page.materialized_views: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index bc2f819b82ff..f2576c676f86 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -378,6 +378,56 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.create_logical_view: gapic_v1.method.wrap_method( + self.create_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.get_logical_view: gapic_v1.method.wrap_method( + self.get_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.list_logical_views: gapic_v1.method.wrap_method( + self.list_logical_views, + default_timeout=None, + client_info=client_info, + ), + self.update_logical_view: gapic_v1.method.wrap_method( + self.update_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_logical_view: gapic_v1.method.wrap_method( + self.delete_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.create_materialized_view: gapic_v1.method.wrap_method( + self.create_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.get_materialized_view: gapic_v1.method.wrap_method( + self.get_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_materialized_views: gapic_v1.method.wrap_method( + self.list_materialized_views, + default_timeout=None, + client_info=client_info, + ), + self.update_materialized_view: gapic_v1.method.wrap_method( + self.update_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_materialized_view: gapic_v1.method.wrap_method( + self.delete_materialized_view, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -597,6 +647,102 @@ def list_hot_tablets( ]: raise NotImplementedError() + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], + Union[instance.LogicalView, Awaitable[instance.LogicalView]], + ]: + raise NotImplementedError() + + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + Union[ + bigtable_instance_admin.ListLogicalViewsResponse, + Awaitable[bigtable_instance_admin.ListLogicalViewsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteLogicalViewRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], + Union[instance.MaterializedView, Awaitable[instance.MaterializedView]], + ]: + raise NotImplementedError() + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + Union[ + bigtable_instance_admin.ListMaterializedViewsResponse, + Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index cc3e7098629e..eb13e683ba71 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -33,6 +39,81 @@ from google.protobuf import empty_pb2 # type: ignore from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): """gRPC backend transport for BigtableInstanceAdmin. @@ -190,7 +271,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -254,7 +340,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -286,7 +374,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -312,7 +400,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, response_deserializer=instance.Instance.deserialize, @@ -341,7 +429,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, @@ -368,7 +456,7 @@ def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", request_serializer=instance.Instance.serialize, response_deserializer=instance.Instance.deserialize, @@ -398,7 +486,7 @@ def partial_update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -424,7 +512,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -458,7 +546,7 @@ def create_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + self._stubs["create_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -484,7 +572,7 @@ def get_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + self._stubs["get_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, response_deserializer=instance.Cluster.deserialize, @@ -513,7 +601,7 @@ def list_clusters( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + self._stubs["list_clusters"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, @@ -541,7 +629,7 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operatio # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -582,7 +670,7 @@ def partial_update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -608,7 +696,7 @@ def delete_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -636,7 +724,7 @@ def create_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["create_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -662,7 +750,7 @@ def get_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["get_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -691,7 +779,7 @@ def list_app_profiles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, @@ -719,7 +807,7 @@ def update_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["update_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -745,7 +833,7 @@ def delete_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -773,7 +861,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -800,7 +888,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -830,7 +918,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -860,15 +948,298 @@ def list_hot_tablets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, ) return self._stubs["list_hot_tablets"] + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create logical view method over gRPC. + + Creates a logical view within an instance. + + Returns: + Callable[[~.CreateLogicalViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_logical_view" not in self._stubs: + self._stubs["create_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateLogicalView", + request_serializer=bigtable_instance_admin.CreateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_logical_view"] + + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], instance.LogicalView + ]: + r"""Return a callable for the get logical view method over gRPC. + + Gets information about a logical view. + + Returns: + Callable[[~.GetLogicalViewRequest], + ~.LogicalView]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_logical_view" not in self._stubs: + self._stubs["get_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetLogicalView", + request_serializer=bigtable_instance_admin.GetLogicalViewRequest.serialize, + response_deserializer=instance.LogicalView.deserialize, + ) + return self._stubs["get_logical_view"] + + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + bigtable_instance_admin.ListLogicalViewsResponse, + ]: + r"""Return a callable for the list logical views method over gRPC. + + Lists information about logical views in an instance. + + Returns: + Callable[[~.ListLogicalViewsRequest], + ~.ListLogicalViewsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_logical_views" not in self._stubs: + self._stubs["list_logical_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListLogicalViews", + request_serializer=bigtable_instance_admin.ListLogicalViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListLogicalViewsResponse.deserialize, + ) + return self._stubs["list_logical_views"] + + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update logical view method over gRPC. + + Updates a logical view within an instance. + + Returns: + Callable[[~.UpdateLogicalViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_logical_view" not in self._stubs: + self._stubs["update_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateLogicalView", + request_serializer=bigtable_instance_admin.UpdateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_logical_view"] + + @property + def delete_logical_view( + self, + ) -> Callable[[bigtable_instance_admin.DeleteLogicalViewRequest], empty_pb2.Empty]: + r"""Return a callable for the delete logical view method over gRPC. + + Deletes a logical view from an instance. + + Returns: + Callable[[~.DeleteLogicalViewRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_logical_view" not in self._stubs: + self._stubs["delete_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteLogicalView", + request_serializer=bigtable_instance_admin.DeleteLogicalViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_logical_view"] + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the create materialized view method over gRPC. + + Creates a materialized view within an instance. + + Returns: + Callable[[~.CreateMaterializedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_materialized_view" not in self._stubs: + self._stubs["create_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateMaterializedView", + request_serializer=bigtable_instance_admin.CreateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_materialized_view"] + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], instance.MaterializedView + ]: + r"""Return a callable for the get materialized view method over gRPC. + + Gets information about a materialized view. + + Returns: + Callable[[~.GetMaterializedViewRequest], + ~.MaterializedView]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_materialized_view" not in self._stubs: + self._stubs["get_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetMaterializedView", + request_serializer=bigtable_instance_admin.GetMaterializedViewRequest.serialize, + response_deserializer=instance.MaterializedView.deserialize, + ) + return self._stubs["get_materialized_view"] + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + bigtable_instance_admin.ListMaterializedViewsResponse, + ]: + r"""Return a callable for the list materialized views method over gRPC. + + Lists information about materialized views in an + instance. + + Returns: + Callable[[~.ListMaterializedViewsRequest], + ~.ListMaterializedViewsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_materialized_views" not in self._stubs: + self._stubs["list_materialized_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListMaterializedViews", + request_serializer=bigtable_instance_admin.ListMaterializedViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListMaterializedViewsResponse.deserialize, + ) + return self._stubs["list_materialized_views"] + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the update materialized view method over gRPC. + + Updates a materialized view within an instance. + + Returns: + Callable[[~.UpdateMaterializedViewRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_materialized_view" not in self._stubs: + self._stubs["update_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateMaterializedView", + request_serializer=bigtable_instance_admin.UpdateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_materialized_view"] + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], empty_pb2.Empty + ]: + r"""Return a callable for the delete materialized view method over gRPC. + + Deletes a materialized view from an instance. + + Returns: + Callable[[~.DeleteMaterializedViewRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_materialized_view" not in self._stubs: + self._stubs["delete_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteMaterializedView", + request_serializer=bigtable_instance_admin.DeleteMaterializedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_materialized_view"] + def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 716e14a863c5..12e63f7fe1bc 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -24,8 +27,11 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin @@ -37,6 +43,82 @@ from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableInstanceAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): """gRPC AsyncIO backend transport for BigtableInstanceAdmin. @@ -237,10 +319,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -263,7 +348,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -297,7 +382,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -325,7 +410,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, response_deserializer=instance.Instance.deserialize, @@ -354,7 +439,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, @@ -383,7 +468,7 @@ def update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", request_serializer=instance.Instance.serialize, response_deserializer=instance.Instance.deserialize, @@ -414,7 +499,7 @@ def partial_update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -442,7 +527,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -477,7 +562,7 @@ def create_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + self._stubs["create_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -505,7 +590,7 @@ def get_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + self._stubs["get_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, response_deserializer=instance.Cluster.deserialize, @@ -534,7 +619,7 @@ def list_clusters( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + self._stubs["list_clusters"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, @@ -564,7 +649,7 @@ def update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -606,7 +691,7 @@ def partial_update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -634,7 +719,7 @@ def delete_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -663,7 +748,7 @@ def create_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["create_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -691,7 +776,7 @@ def get_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["get_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -720,7 +805,7 @@ def list_app_profiles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, @@ -749,7 +834,7 @@ def update_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["update_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -777,7 +862,7 @@ def delete_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -805,7 +890,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -832,7 +917,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -862,7 +947,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -892,13 +977,302 @@ def list_hot_tablets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, ) return self._stubs["list_hot_tablets"] + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create logical view method over gRPC. + + Creates a logical view within an instance. + + Returns: + Callable[[~.CreateLogicalViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_logical_view" not in self._stubs: + self._stubs["create_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateLogicalView", + request_serializer=bigtable_instance_admin.CreateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_logical_view"] + + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], Awaitable[instance.LogicalView] + ]: + r"""Return a callable for the get logical view method over gRPC. + + Gets information about a logical view. + + Returns: + Callable[[~.GetLogicalViewRequest], + Awaitable[~.LogicalView]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_logical_view" not in self._stubs: + self._stubs["get_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetLogicalView", + request_serializer=bigtable_instance_admin.GetLogicalViewRequest.serialize, + response_deserializer=instance.LogicalView.deserialize, + ) + return self._stubs["get_logical_view"] + + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + Awaitable[bigtable_instance_admin.ListLogicalViewsResponse], + ]: + r"""Return a callable for the list logical views method over gRPC. + + Lists information about logical views in an instance. + + Returns: + Callable[[~.ListLogicalViewsRequest], + Awaitable[~.ListLogicalViewsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_logical_views" not in self._stubs: + self._stubs["list_logical_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListLogicalViews", + request_serializer=bigtable_instance_admin.ListLogicalViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListLogicalViewsResponse.deserialize, + ) + return self._stubs["list_logical_views"] + + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update logical view method over gRPC. + + Updates a logical view within an instance. + + Returns: + Callable[[~.UpdateLogicalViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_logical_view" not in self._stubs: + self._stubs["update_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateLogicalView", + request_serializer=bigtable_instance_admin.UpdateLogicalViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_logical_view"] + + @property + def delete_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteLogicalViewRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete logical view method over gRPC. + + Deletes a logical view from an instance. + + Returns: + Callable[[~.DeleteLogicalViewRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_logical_view" not in self._stubs: + self._stubs["delete_logical_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteLogicalView", + request_serializer=bigtable_instance_admin.DeleteLogicalViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_logical_view"] + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create materialized view method over gRPC. + + Creates a materialized view within an instance. + + Returns: + Callable[[~.CreateMaterializedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_materialized_view" not in self._stubs: + self._stubs["create_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateMaterializedView", + request_serializer=bigtable_instance_admin.CreateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_materialized_view"] + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], + Awaitable[instance.MaterializedView], + ]: + r"""Return a callable for the get materialized view method over gRPC. + + Gets information about a materialized view. + + Returns: + Callable[[~.GetMaterializedViewRequest], + Awaitable[~.MaterializedView]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_materialized_view" not in self._stubs: + self._stubs["get_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetMaterializedView", + request_serializer=bigtable_instance_admin.GetMaterializedViewRequest.serialize, + response_deserializer=instance.MaterializedView.deserialize, + ) + return self._stubs["get_materialized_view"] + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + Awaitable[bigtable_instance_admin.ListMaterializedViewsResponse], + ]: + r"""Return a callable for the list materialized views method over gRPC. + + Lists information about materialized views in an + instance. + + Returns: + Callable[[~.ListMaterializedViewsRequest], + Awaitable[~.ListMaterializedViewsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_materialized_views" not in self._stubs: + self._stubs["list_materialized_views"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListMaterializedViews", + request_serializer=bigtable_instance_admin.ListMaterializedViewsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListMaterializedViewsResponse.deserialize, + ) + return self._stubs["list_materialized_views"] + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update materialized view method over gRPC. + + Updates a materialized view within an instance. + + Returns: + Callable[[~.UpdateMaterializedViewRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_materialized_view" not in self._stubs: + self._stubs["update_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateMaterializedView", + request_serializer=bigtable_instance_admin.UpdateMaterializedViewRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_materialized_view"] + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], + Awaitable[empty_pb2.Empty], + ]: + r"""Return a callable for the delete materialized view method over gRPC. + + Deletes a materialized view from an instance. + + Returns: + Callable[[~.DeleteMaterializedViewRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_materialized_view" not in self._stubs: + self._stubs["delete_materialized_view"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteMaterializedView", + request_serializer=bigtable_instance_admin.DeleteMaterializedViewRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_materialized_view"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -1137,6 +1511,56 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.create_logical_view: self._wrap_method( + self.create_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.get_logical_view: self._wrap_method( + self.get_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.list_logical_views: self._wrap_method( + self.list_logical_views, + default_timeout=None, + client_info=client_info, + ), + self.update_logical_view: self._wrap_method( + self.update_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_logical_view: self._wrap_method( + self.delete_logical_view, + default_timeout=None, + client_info=client_info, + ), + self.create_materialized_view: self._wrap_method( + self.create_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.get_materialized_view: self._wrap_method( + self.get_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.list_materialized_views: self._wrap_method( + self.list_materialized_views, + default_timeout=None, + client_info=client_info, + ), + self.update_materialized_view: self._wrap_method( + self.update_materialized_view, + default_timeout=None, + client_info=client_info, + ), + self.delete_materialized_view: self._wrap_method( + self.delete_materialized_view, + default_timeout=None, + client_info=client_info, + ), } def _wrap_method(self, func, *args, **kwargs): @@ -1145,7 +1569,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 45f08fa6454e..858055974c93 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -48,6 +49,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -95,6 +104,22 @@ def post_create_instance(self, response): logging.log(f"Received response: {response}") return response + def pre_create_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_logical_view(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_materialized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_delete_app_profile(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -107,6 +132,14 @@ def pre_delete_instance(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata + def pre_delete_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + def pre_get_app_profile(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -139,6 +172,22 @@ def post_get_instance(self, response): logging.log(f"Received response: {response}") return response + def pre_get_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_logical_view(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_materialized_view(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_app_profiles(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -171,6 +220,22 @@ def post_list_instances(self, response): logging.log(f"Received response: {response}") return response + def pre_list_logical_views(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_logical_views(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_materialized_views(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_materialized_views(self, response): + logging.log(f"Received response: {response}") + return response + def pre_partial_update_cluster(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -227,6 +292,22 @@ def post_update_instance(self, response): logging.log(f"Received response: {response}") return response + def pre_update_logical_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_logical_view(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_materialized_view(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_materialized_view(self, response): + logging.log(f"Received response: {response}") + return response + transport = BigtableInstanceAdminRestTransport(interceptor=MyCustomBigtableInstanceAdminInterceptor()) client = BigtableInstanceAdminClient(transport=transport) @@ -236,9 +317,10 @@ def post_update_instance(self, response): def pre_create_app_profile( self, request: bigtable_instance_admin.CreateAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.CreateAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.CreateAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_app_profile @@ -252,17 +334,43 @@ def post_create_app_profile( ) -> instance.AppProfile: """Post-rpc interceptor for create_app_profile - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_app_profile_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_app_profile` interceptor runs + before the `post_create_app_profile_with_metadata` interceptor. """ return response + def post_create_app_profile_with_metadata( + self, + response: instance.AppProfile, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.AppProfile, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_app_profile + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_app_profile_with_metadata` + interceptor in new development instead of the `post_create_app_profile` interceptor. + When both interceptors are used, this `post_create_app_profile_with_metadata` interceptor runs after the + `post_create_app_profile` interceptor. The (possibly modified) response returned by + `post_create_app_profile` will be passed to + `post_create_app_profile_with_metadata`. + """ + return response, metadata + def pre_create_cluster( self, request: bigtable_instance_admin.CreateClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.CreateClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.CreateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_cluster Override in a subclass to manipulate the request or metadata @@ -275,18 +383,42 @@ def post_create_cluster( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_cluster` interceptor runs + before the `post_create_cluster_with_metadata` interceptor. """ return response + def post_create_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_cluster_with_metadata` + interceptor in new development instead of the `post_create_cluster` interceptor. + When both interceptors are used, this `post_create_cluster_with_metadata` interceptor runs after the + `post_create_cluster` interceptor. The (possibly modified) response returned by + `post_create_cluster` will be passed to + `post_create_cluster_with_metadata`. + """ + return response, metadata + def pre_create_instance( self, request: bigtable_instance_admin.CreateInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.CreateInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.CreateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_instance @@ -300,18 +432,140 @@ def post_create_instance( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_create_instance` interceptor runs + before the `post_create_instance_with_metadata` interceptor. + """ + return response + + def post_create_instance_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_instance_with_metadata` + interceptor in new development instead of the `post_create_instance` interceptor. + When both interceptors are used, this `post_create_instance_with_metadata` interceptor runs after the + `post_create_instance` interceptor. The (possibly modified) response returned by + `post_create_instance` will be passed to + `post_create_instance_with_metadata`. + """ + return response, metadata + + def pre_create_logical_view( + self, + request: bigtable_instance_admin.CreateLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.CreateLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_logical_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_logical_view + + DEPRECATED. Please use the `post_create_logical_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_create_logical_view` interceptor runs + before the `post_create_logical_view_with_metadata` interceptor. + """ + return response + + def post_create_logical_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_logical_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_logical_view_with_metadata` + interceptor in new development instead of the `post_create_logical_view` interceptor. + When both interceptors are used, this `post_create_logical_view_with_metadata` interceptor runs after the + `post_create_logical_view` interceptor. The (possibly modified) response returned by + `post_create_logical_view` will be passed to + `post_create_logical_view_with_metadata`. + """ + return response, metadata + + def pre_create_materialized_view( + self, + request: bigtable_instance_admin.CreateMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.CreateMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_materialized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_materialized_view + + DEPRECATED. Please use the `post_create_materialized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_materialized_view` interceptor runs + before the `post_create_materialized_view_with_metadata` interceptor. """ return response + def post_create_materialized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_materialized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_materialized_view_with_metadata` + interceptor in new development instead of the `post_create_materialized_view` interceptor. + When both interceptors are used, this `post_create_materialized_view_with_metadata` interceptor runs after the + `post_create_materialized_view` interceptor. The (possibly modified) response returned by + `post_create_materialized_view` will be passed to + `post_create_materialized_view_with_metadata`. + """ + return response, metadata + def pre_delete_app_profile( self, request: bigtable_instance_admin.DeleteAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.DeleteAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.DeleteAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_app_profile @@ -323,8 +577,11 @@ def pre_delete_app_profile( def pre_delete_cluster( self, request: bigtable_instance_admin.DeleteClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.DeleteClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.DeleteClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_cluster Override in a subclass to manipulate the request or metadata @@ -335,9 +592,10 @@ def pre_delete_cluster( def pre_delete_instance( self, request: bigtable_instance_admin.DeleteInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.DeleteInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.DeleteInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_instance @@ -346,11 +604,44 @@ def pre_delete_instance( """ return request, metadata + def pre_delete_logical_view( + self, + request: bigtable_instance_admin.DeleteLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.DeleteLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_delete_materialized_view( + self, + request: bigtable_instance_admin.DeleteMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.DeleteMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + def pre_get_app_profile( self, request: bigtable_instance_admin.GetAppProfileRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetAppProfileRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_app_profile Override in a subclass to manipulate the request or metadata @@ -363,17 +654,43 @@ def post_get_app_profile( ) -> instance.AppProfile: """Post-rpc interceptor for get_app_profile - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_app_profile_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_app_profile` interceptor runs + before the `post_get_app_profile_with_metadata` interceptor. """ return response + def post_get_app_profile_with_metadata( + self, + response: instance.AppProfile, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.AppProfile, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_app_profile + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_app_profile_with_metadata` + interceptor in new development instead of the `post_get_app_profile` interceptor. + When both interceptors are used, this `post_get_app_profile_with_metadata` interceptor runs after the + `post_get_app_profile` interceptor. The (possibly modified) response returned by + `post_get_app_profile` will be passed to + `post_get_app_profile_with_metadata`. + """ + return response, metadata + def pre_get_cluster( self, request: bigtable_instance_admin.GetClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_cluster Override in a subclass to manipulate the request or metadata @@ -384,17 +701,42 @@ def pre_get_cluster( def post_get_cluster(self, response: instance.Cluster) -> instance.Cluster: """Post-rpc interceptor for get_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_cluster` interceptor runs + before the `post_get_cluster_with_metadata` interceptor. """ return response + def post_get_cluster_with_metadata( + self, + response: instance.Cluster, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_cluster_with_metadata` + interceptor in new development instead of the `post_get_cluster` interceptor. + When both interceptors are used, this `post_get_cluster_with_metadata` interceptor runs after the + `post_get_cluster` interceptor. The (possibly modified) response returned by + `post_get_cluster` will be passed to + `post_get_cluster_with_metadata`. + """ + return response, metadata + def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -405,17 +747,43 @@ def pre_get_iam_policy( def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for get_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_iam_policy` interceptor runs + before the `post_get_iam_policy_with_metadata` interceptor. """ return response + def post_get_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_iam_policy_with_metadata` + interceptor in new development instead of the `post_get_iam_policy` interceptor. + When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the + `post_get_iam_policy` interceptor. The (possibly modified) response returned by + `post_get_iam_policy` will be passed to + `post_get_iam_policy_with_metadata`. + """ + return response, metadata + def pre_get_instance( self, request: bigtable_instance_admin.GetInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_instance Override in a subclass to manipulate the request or metadata @@ -426,18 +794,140 @@ def pre_get_instance( def post_get_instance(self, response: instance.Instance) -> instance.Instance: """Post-rpc interceptor for get_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_get_instance` interceptor runs + before the `post_get_instance_with_metadata` interceptor. + """ + return response + + def post_get_instance_with_metadata( + self, + response: instance.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_instance_with_metadata` + interceptor in new development instead of the `post_get_instance` interceptor. + When both interceptors are used, this `post_get_instance_with_metadata` interceptor runs after the + `post_get_instance` interceptor. The (possibly modified) response returned by + `post_get_instance` will be passed to + `post_get_instance_with_metadata`. + """ + return response, metadata + + def pre_get_logical_view( + self, + request: bigtable_instance_admin.GetLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_logical_view( + self, response: instance.LogicalView + ) -> instance.LogicalView: + """Post-rpc interceptor for get_logical_view + + DEPRECATED. Please use the `post_get_logical_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_get_logical_view` interceptor runs + before the `post_get_logical_view_with_metadata` interceptor. + """ + return response + + def post_get_logical_view_with_metadata( + self, + response: instance.LogicalView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.LogicalView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_logical_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_logical_view_with_metadata` + interceptor in new development instead of the `post_get_logical_view` interceptor. + When both interceptors are used, this `post_get_logical_view_with_metadata` interceptor runs after the + `post_get_logical_view` interceptor. The (possibly modified) response returned by + `post_get_logical_view` will be passed to + `post_get_logical_view_with_metadata`. + """ + return response, metadata + + def pre_get_materialized_view( + self, + request: bigtable_instance_admin.GetMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_materialized_view( + self, response: instance.MaterializedView + ) -> instance.MaterializedView: + """Post-rpc interceptor for get_materialized_view + + DEPRECATED. Please use the `post_get_materialized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_materialized_view` interceptor runs + before the `post_get_materialized_view_with_metadata` interceptor. """ return response + def post_get_materialized_view_with_metadata( + self, + response: instance.MaterializedView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.MaterializedView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_materialized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_materialized_view_with_metadata` + interceptor in new development instead of the `post_get_materialized_view` interceptor. + When both interceptors are used, this `post_get_materialized_view_with_metadata` interceptor runs after the + `post_get_materialized_view` interceptor. The (possibly modified) response returned by + `post_get_materialized_view` will be passed to + `post_get_materialized_view_with_metadata`. + """ + return response, metadata + def pre_list_app_profiles( self, request: bigtable_instance_admin.ListAppProfilesRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.ListAppProfilesRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.ListAppProfilesRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_app_profiles @@ -451,17 +941,46 @@ def post_list_app_profiles( ) -> bigtable_instance_admin.ListAppProfilesResponse: """Post-rpc interceptor for list_app_profiles - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_app_profiles_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_app_profiles` interceptor runs + before the `post_list_app_profiles_with_metadata` interceptor. """ return response + def post_list_app_profiles_with_metadata( + self, + response: bigtable_instance_admin.ListAppProfilesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListAppProfilesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_app_profiles + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_app_profiles_with_metadata` + interceptor in new development instead of the `post_list_app_profiles` interceptor. + When both interceptors are used, this `post_list_app_profiles_with_metadata` interceptor runs after the + `post_list_app_profiles` interceptor. The (possibly modified) response returned by + `post_list_app_profiles` will be passed to + `post_list_app_profiles_with_metadata`. + """ + return response, metadata + def pre_list_clusters( self, request: bigtable_instance_admin.ListClustersRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.ListClustersRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListClustersRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_clusters Override in a subclass to manipulate the request or metadata @@ -474,18 +993,45 @@ def post_list_clusters( ) -> bigtable_instance_admin.ListClustersResponse: """Post-rpc interceptor for list_clusters - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_clusters_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_clusters` interceptor runs + before the `post_list_clusters_with_metadata` interceptor. """ return response + def post_list_clusters_with_metadata( + self, + response: bigtable_instance_admin.ListClustersResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListClustersResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_clusters + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_clusters_with_metadata` + interceptor in new development instead of the `post_list_clusters` interceptor. + When both interceptors are used, this `post_list_clusters_with_metadata` interceptor runs after the + `post_list_clusters` interceptor. The (possibly modified) response returned by + `post_list_clusters` will be passed to + `post_list_clusters_with_metadata`. + """ + return response, metadata + def pre_list_hot_tablets( self, request: bigtable_instance_admin.ListHotTabletsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.ListHotTabletsRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.ListHotTabletsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_hot_tablets @@ -499,17 +1045,46 @@ def post_list_hot_tablets( ) -> bigtable_instance_admin.ListHotTabletsResponse: """Post-rpc interceptor for list_hot_tablets - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_hot_tablets_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_hot_tablets` interceptor runs + before the `post_list_hot_tablets_with_metadata` interceptor. """ return response + def post_list_hot_tablets_with_metadata( + self, + response: bigtable_instance_admin.ListHotTabletsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListHotTabletsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_hot_tablets + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_hot_tablets_with_metadata` + interceptor in new development instead of the `post_list_hot_tablets` interceptor. + When both interceptors are used, this `post_list_hot_tablets_with_metadata` interceptor runs after the + `post_list_hot_tablets` interceptor. The (possibly modified) response returned by + `post_list_hot_tablets` will be passed to + `post_list_hot_tablets_with_metadata`. + """ + return response, metadata + def pre_list_instances( self, request: bigtable_instance_admin.ListInstancesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.ListInstancesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListInstancesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_instances Override in a subclass to manipulate the request or metadata @@ -522,23 +1097,154 @@ def post_list_instances( ) -> bigtable_instance_admin.ListInstancesResponse: """Post-rpc interceptor for list_instances - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_instances_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_instances` interceptor runs + before the `post_list_instances_with_metadata` interceptor. """ return response - def pre_partial_update_cluster( + def post_list_instances_with_metadata( self, - request: bigtable_instance_admin.PartialUpdateClusterRequest, - metadata: Sequence[Tuple[str, str]], + response: bigtable_instance_admin.ListInstancesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.PartialUpdateClusterRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.ListInstancesResponse, + Sequence[Tuple[str, Union[str, bytes]]], ]: - """Pre-rpc interceptor for partial_update_cluster + """Post-rpc interceptor for list_instances - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_instances_with_metadata` + interceptor in new development instead of the `post_list_instances` interceptor. + When both interceptors are used, this `post_list_instances_with_metadata` interceptor runs after the + `post_list_instances` interceptor. The (possibly modified) response returned by + `post_list_instances` will be passed to + `post_list_instances_with_metadata`. + """ + return response, metadata + + def pre_list_logical_views( + self, + request: bigtable_instance_admin.ListLogicalViewsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListLogicalViewsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_logical_views + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_logical_views( + self, response: bigtable_instance_admin.ListLogicalViewsResponse + ) -> bigtable_instance_admin.ListLogicalViewsResponse: + """Post-rpc interceptor for list_logical_views + + DEPRECATED. Please use the `post_list_logical_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_list_logical_views` interceptor runs + before the `post_list_logical_views_with_metadata` interceptor. + """ + return response + + def post_list_logical_views_with_metadata( + self, + response: bigtable_instance_admin.ListLogicalViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListLogicalViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_logical_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_logical_views_with_metadata` + interceptor in new development instead of the `post_list_logical_views` interceptor. + When both interceptors are used, this `post_list_logical_views_with_metadata` interceptor runs after the + `post_list_logical_views` interceptor. The (possibly modified) response returned by + `post_list_logical_views` will be passed to + `post_list_logical_views_with_metadata`. + """ + return response, metadata + + def pre_list_materialized_views( + self, + request: bigtable_instance_admin.ListMaterializedViewsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListMaterializedViewsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_materialized_views + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_materialized_views( + self, response: bigtable_instance_admin.ListMaterializedViewsResponse + ) -> bigtable_instance_admin.ListMaterializedViewsResponse: + """Post-rpc interceptor for list_materialized_views + + DEPRECATED. Please use the `post_list_materialized_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_list_materialized_views` interceptor runs + before the `post_list_materialized_views_with_metadata` interceptor. + """ + return response + + def post_list_materialized_views_with_metadata( + self, + response: bigtable_instance_admin.ListMaterializedViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListMaterializedViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_materialized_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_materialized_views_with_metadata` + interceptor in new development instead of the `post_list_materialized_views` interceptor. + When both interceptors are used, this `post_list_materialized_views_with_metadata` interceptor runs after the + `post_list_materialized_views` interceptor. The (possibly modified) response returned by + `post_list_materialized_views` will be passed to + `post_list_materialized_views_with_metadata`. + """ + return response, metadata + + def pre_partial_update_cluster( + self, + request: bigtable_instance_admin.PartialUpdateClusterRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.PartialUpdateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for partial_update_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. """ return request, metadata @@ -547,18 +1253,42 @@ def post_partial_update_cluster( ) -> operations_pb2.Operation: """Post-rpc interceptor for partial_update_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_partial_update_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_partial_update_cluster` interceptor runs + before the `post_partial_update_cluster_with_metadata` interceptor. """ return response + def post_partial_update_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for partial_update_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_partial_update_cluster_with_metadata` + interceptor in new development instead of the `post_partial_update_cluster` interceptor. + When both interceptors are used, this `post_partial_update_cluster_with_metadata` interceptor runs after the + `post_partial_update_cluster` interceptor. The (possibly modified) response returned by + `post_partial_update_cluster` will be passed to + `post_partial_update_cluster_with_metadata`. + """ + return response, metadata + def pre_partial_update_instance( self, request: bigtable_instance_admin.PartialUpdateInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.PartialUpdateInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.PartialUpdateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for partial_update_instance @@ -572,17 +1302,42 @@ def post_partial_update_instance( ) -> operations_pb2.Operation: """Post-rpc interceptor for partial_update_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_partial_update_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_partial_update_instance` interceptor runs + before the `post_partial_update_instance_with_metadata` interceptor. """ return response + def post_partial_update_instance_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for partial_update_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_partial_update_instance_with_metadata` + interceptor in new development instead of the `post_partial_update_instance` interceptor. + When both interceptors are used, this `post_partial_update_instance_with_metadata` interceptor runs after the + `post_partial_update_instance` interceptor. The (possibly modified) response returned by + `post_partial_update_instance` will be passed to + `post_partial_update_instance_with_metadata`. + """ + return response, metadata + def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -593,17 +1348,43 @@ def pre_set_iam_policy( def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for set_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_set_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_set_iam_policy` interceptor runs + before the `post_set_iam_policy_with_metadata` interceptor. """ return response + def post_set_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_set_iam_policy_with_metadata` + interceptor in new development instead of the `post_set_iam_policy` interceptor. + When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the + `post_set_iam_policy` interceptor. The (possibly modified) response returned by + `post_set_iam_policy` will be passed to + `post_set_iam_policy_with_metadata`. + """ + return response, metadata + def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -616,18 +1397,45 @@ def post_test_iam_permissions( ) -> iam_policy_pb2.TestIamPermissionsResponse: """Post-rpc interceptor for test_iam_permissions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_test_iam_permissions` interceptor runs + before the `post_test_iam_permissions_with_metadata` interceptor. """ return response + def post_test_iam_permissions_with_metadata( + self, + response: iam_policy_pb2.TestIamPermissionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_test_iam_permissions_with_metadata` + interceptor in new development instead of the `post_test_iam_permissions` interceptor. + When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the + `post_test_iam_permissions` interceptor. The (possibly modified) response returned by + `post_test_iam_permissions` will be passed to + `post_test_iam_permissions_with_metadata`. + """ + return response, metadata + def pre_update_app_profile( self, request: bigtable_instance_admin.UpdateAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.UpdateAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.UpdateAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_app_profile @@ -641,15 +1449,40 @@ def post_update_app_profile( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_app_profile - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_app_profile_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_app_profile` interceptor runs + before the `post_update_app_profile_with_metadata` interceptor. """ return response + def post_update_app_profile_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_app_profile + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_app_profile_with_metadata` + interceptor in new development instead of the `post_update_app_profile` interceptor. + When both interceptors are used, this `post_update_app_profile_with_metadata` interceptor runs after the + `post_update_app_profile` interceptor. The (possibly modified) response returned by + `post_update_app_profile` will be passed to + `post_update_app_profile_with_metadata`. + """ + return response, metadata + def pre_update_cluster( - self, request: instance.Cluster, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[instance.Cluster, Sequence[Tuple[str, str]]]: + self, + request: instance.Cluster, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_cluster Override in a subclass to manipulate the request or metadata @@ -662,15 +1495,40 @@ def post_update_cluster( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_cluster - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_cluster_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_cluster` interceptor runs + before the `post_update_cluster_with_metadata` interceptor. """ return response + def post_update_cluster_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_cluster + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_cluster_with_metadata` + interceptor in new development instead of the `post_update_cluster` interceptor. + When both interceptors are used, this `post_update_cluster_with_metadata` interceptor runs after the + `post_update_cluster` interceptor. The (possibly modified) response returned by + `post_update_cluster` will be passed to + `post_update_cluster_with_metadata`. + """ + return response, metadata + def pre_update_instance( - self, request: instance.Instance, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[instance.Instance, Sequence[Tuple[str, str]]]: + self, + request: instance.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_instance Override in a subclass to manipulate the request or metadata @@ -681,12 +1539,133 @@ def pre_update_instance( def post_update_instance(self, response: instance.Instance) -> instance.Instance: """Post-rpc interceptor for update_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_update_instance` interceptor runs + before the `post_update_instance_with_metadata` interceptor. + """ + return response + + def post_update_instance_with_metadata( + self, + response: instance.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_instance_with_metadata` + interceptor in new development instead of the `post_update_instance` interceptor. + When both interceptors are used, this `post_update_instance_with_metadata` interceptor runs after the + `post_update_instance` interceptor. The (possibly modified) response returned by + `post_update_instance` will be passed to + `post_update_instance_with_metadata`. + """ + return response, metadata + + def pre_update_logical_view( + self, + request: bigtable_instance_admin.UpdateLogicalViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.UpdateLogicalViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_logical_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_logical_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_logical_view + + DEPRECATED. Please use the `post_update_logical_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. This `post_update_logical_view` interceptor runs + before the `post_update_logical_view_with_metadata` interceptor. + """ + return response + + def post_update_logical_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_logical_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_logical_view_with_metadata` + interceptor in new development instead of the `post_update_logical_view` interceptor. + When both interceptors are used, this `post_update_logical_view_with_metadata` interceptor runs after the + `post_update_logical_view` interceptor. The (possibly modified) response returned by + `post_update_logical_view` will be passed to + `post_update_logical_view_with_metadata`. + """ + return response, metadata + + def pre_update_materialized_view( + self, + request: bigtable_instance_admin.UpdateMaterializedViewRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.UpdateMaterializedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_materialized_view + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_materialized_view( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_materialized_view + + DEPRECATED. Please use the `post_update_materialized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_materialized_view` interceptor runs + before the `post_update_materialized_view_with_metadata` interceptor. """ return response + def post_update_materialized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_materialized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableInstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_materialized_view_with_metadata` + interceptor in new development instead of the `post_update_materialized_view` interceptor. + When both interceptors are used, this `post_update_materialized_view_with_metadata` interceptor runs after the + `post_update_materialized_view` interceptor. The (possibly modified) response returned by + `post_update_materialized_view` will be passed to + `post_update_materialized_view_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class BigtableInstanceAdminRestStub: @@ -866,7 +1845,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Call the create app profile method over HTTP. @@ -877,8 +1856,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.AppProfile: @@ -891,6 +1872,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_http_options() ) + request, metadata = self._interceptor.pre_create_app_profile( request, metadata ) @@ -907,6 +1889,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._CreateAppProfile._get_response( @@ -930,7 +1939,33 @@ def __call__( pb_resp = instance.AppProfile.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_app_profile(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_app_profile_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.AppProfile.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateCluster( @@ -969,7 +2004,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create cluster method over HTTP. @@ -980,8 +2015,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -994,6 +2031,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_http_options() ) + request, metadata = self._interceptor.pre_create_cluster(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_transcoded_request( http_options, request @@ -1008,6 +2046,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._CreateCluster._get_response( self._host, @@ -1027,7 +2092,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateInstance( @@ -1066,7 +2157,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create instance method over HTTP. @@ -1077,8 +2168,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1091,6 +2184,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_http_options() ) + request, metadata = self._interceptor.pre_create_instance(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_transcoded_request( http_options, request @@ -1105,6 +2199,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._CreateInstance._get_response( self._host, @@ -1124,15 +2245,41 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _DeleteAppProfile( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile, + class _CreateLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView, BigtableInstanceAdminRestStub, ): def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteAppProfile") + return hash("BigtableInstanceAdminRestTransport.CreateLogicalView") @staticmethod def _get_response( @@ -1153,54 +2300,97 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, ) return response def __call__( self, - request: bigtable_instance_admin.DeleteAppProfileRequest, + request: bigtable_instance_admin.CreateLogicalViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ): - r"""Call the delete app profile method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create logical view method over HTTP. Args: - request (~.bigtable_instance_admin.DeleteAppProfileRequest): + request (~.bigtable_instance_admin.CreateLogicalViewRequest): The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. + BigtableInstanceAdmin.CreateLogicalView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + """ http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_http_options() + _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_http_options() ) - request, metadata = self._interceptor.pre_delete_app_profile( + + request, metadata = self._interceptor.pre_create_logical_view( request, metadata ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_transcoded_request( + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_transcoded_request( http_options, request ) + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_request_body_json( + transcoded_request + ) + # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_query_params_json( + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_query_params_json( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( - BigtableInstanceAdminRestTransport._DeleteAppProfile._get_response( + BigtableInstanceAdminRestTransport._CreateLogicalView._get_response( self._host, metadata, query_params, self._session, timeout, transcoded_request, + body, ) ) @@ -1209,12 +2399,44 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteCluster( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster, + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_logical_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_logical_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_logical_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateLogicalView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView, BigtableInstanceAdminRestStub, ): def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteCluster") + return hash("BigtableInstanceAdminRestTransport.CreateMaterializedView") @staticmethod def _get_response( @@ -1235,51 +2457,96 @@ def _get_response( timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, ) return response def __call__( self, - request: bigtable_instance_admin.DeleteClusterRequest, + request: bigtable_instance_admin.CreateMaterializedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ): - r"""Call the delete cluster method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create materialized view method over HTTP. Args: - request (~.bigtable_instance_admin.DeleteClusterRequest): + request (~.bigtable_instance_admin.CreateMaterializedViewRequest): The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. + BigtableInstanceAdmin.CreateMaterializedView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + """ http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_http_options() + _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_http_options() ) - request, metadata = self._interceptor.pre_delete_cluster(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_transcoded_request( + + request, metadata = self._interceptor.pre_create_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_transcoded_request( http_options, request ) + body = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_request_body_json( + transcoded_request + ) + # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_query_params_json( + query_params = _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_query_params_json( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request - response = BigtableInstanceAdminRestTransport._DeleteCluster._get_response( + response = BigtableInstanceAdminRestTransport._CreateMaterializedView._get_response( self._host, metadata, query_params, self._session, timeout, transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1287,12 +2554,484 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) - class _DeleteInstance( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance, + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_materialized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_materialized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_materialized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateMaterializedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _DeleteAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.DeleteAppProfileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_app_profile( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._DeleteAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteCluster( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.DeleteClusterRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_cluster(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._DeleteCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteInstance( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.DeleteInstanceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_instance(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._DeleteInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.DeleteLogicalView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.DeleteLogicalViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete logical view method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteLogicalViewRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteLogicalView. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_logical_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._DeleteLogicalView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView, BigtableInstanceAdminRestStub, ): def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.DeleteInstance") + return hash("BigtableInstanceAdminRestTransport.DeleteMaterializedView") @staticmethod def _get_response( @@ -1318,40 +3057,72 @@ def _get_response( def __call__( self, - request: bigtable_instance_admin.DeleteInstanceRequest, + request: bigtable_instance_admin.DeleteMaterializedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): - r"""Call the delete instance method over HTTP. + r"""Call the delete materialized view method over HTTP. Args: - request (~.bigtable_instance_admin.DeleteInstanceRequest): + request (~.bigtable_instance_admin.DeleteMaterializedViewRequest): The request object. Request message for - BigtableInstanceAdmin.DeleteInstance. + BigtableInstanceAdmin.DeleteMaterializedView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_http_options() + _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_http_options() ) - request, metadata = self._interceptor.pre_delete_instance(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_transcoded_request( + + request, metadata = self._interceptor.pre_delete_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_query_params_json( + query_params = _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_query_params_json( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request - response = BigtableInstanceAdminRestTransport._DeleteInstance._get_response( + response = BigtableInstanceAdminRestTransport._DeleteMaterializedView._get_response( self._host, metadata, query_params, @@ -1400,7 +3171,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Call the get app profile method over HTTP. @@ -1411,8 +3182,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.AppProfile: @@ -1425,6 +3198,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_http_options() ) + request, metadata = self._interceptor.pre_get_app_profile(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_transcoded_request( http_options, request @@ -1435,6 +3209,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._GetAppProfile._get_response( self._host, @@ -1455,7 +3256,33 @@ def __call__( pb_resp = instance.AppProfile.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_app_profile(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_app_profile_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.AppProfile.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetCluster( @@ -1493,7 +3320,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Call the get cluster method over HTTP. @@ -1504,8 +3331,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.Cluster: @@ -1519,6 +3348,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_http_options() ) + request, metadata = self._interceptor.pre_get_cluster(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_transcoded_request( http_options, request @@ -1529,6 +3359,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._GetCluster._get_response( self._host, @@ -1549,7 +3406,33 @@ def __call__( pb_resp = instance.Cluster.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Cluster.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetIamPolicy( @@ -1588,7 +3471,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -1598,8 +3481,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -1684,6 +3569,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( http_options, request @@ -1698,15 +3584,344 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._GetIamPolicy._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetInstance( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.GetInstance") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.GetInstanceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.Instance: + r"""Call the get instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.instance.Instance: + A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_instance(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._GetInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Instance.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.GetLogicalView") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.GetLogicalViewRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.LogicalView: + r"""Call the get logical view method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetLogicalViewRequest): + The request object. Request message for + BigtableInstanceAdmin.GetLogicalView. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.instance.LogicalView: + A SQL logical view object that can be + referenced in SQL queries. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_logical_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request - response = BigtableInstanceAdminRestTransport._GetIamPolicy._get_response( + response = BigtableInstanceAdminRestTransport._GetLogicalView._get_response( self._host, metadata, query_params, self._session, timeout, transcoded_request, - body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1715,19 +3930,45 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = policy_pb2.Policy() - pb_resp = resp + resp = instance.LogicalView() + pb_resp = instance.LogicalView.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_iam_policy(resp) + + resp = self._interceptor.post_get_logical_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_logical_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.LogicalView.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_logical_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetLogicalView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _GetInstance( - _BaseBigtableInstanceAdminRestTransport._BaseGetInstance, + class _GetMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView, BigtableInstanceAdminRestStub, ): def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.GetInstance") + return hash("BigtableInstanceAdminRestTransport.GetMaterializedView") @staticmethod def _get_response( @@ -1753,56 +3994,86 @@ def _get_response( def __call__( self, - request: bigtable_instance_admin.GetInstanceRequest, + request: bigtable_instance_admin.GetMaterializedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Instance: - r"""Call the get instance method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.MaterializedView: + r"""Call the get materialized view method over HTTP. Args: - request (~.bigtable_instance_admin.GetInstanceRequest): + request (~.bigtable_instance_admin.GetMaterializedViewRequest): The request object. Request message for - BigtableInstanceAdmin.GetInstance. + BigtableInstanceAdmin.GetMaterializedView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.instance.Instance: - A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. + ~.instance.MaterializedView: + A materialized view object that can + be referenced in SQL queries. """ http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_http_options() + _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_http_options() ) - request, metadata = self._interceptor.pre_get_instance(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_transcoded_request( + + request, metadata = self._interceptor.pre_get_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_query_params_json( + query_params = _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_query_params_json( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request - response = BigtableInstanceAdminRestTransport._GetInstance._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, + response = ( + BigtableInstanceAdminRestTransport._GetMaterializedView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -1811,11 +4082,37 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) + resp = instance.MaterializedView() + pb_resp = instance.MaterializedView.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_instance(resp) + + resp = self._interceptor.post_get_materialized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_materialized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.MaterializedView.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_materialized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetMaterializedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListAppProfiles( @@ -1853,7 +4150,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListAppProfilesResponse: r"""Call the list app profiles method over HTTP. @@ -1864,8 +4161,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListAppProfilesResponse: @@ -1877,6 +4176,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_http_options() ) + request, metadata = self._interceptor.pre_list_app_profiles( request, metadata ) @@ -1889,6 +4189,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListAppProfiles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListAppProfiles", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._ListAppProfiles._get_response( @@ -1911,7 +4238,37 @@ def __call__( pb_resp = bigtable_instance_admin.ListAppProfilesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_app_profiles(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_app_profiles_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListAppProfilesResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_app_profiles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListAppProfiles", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListClusters( @@ -1949,7 +4306,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Call the list clusters method over HTTP. @@ -1960,8 +4317,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListClustersResponse: @@ -1973,6 +4332,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_http_options() ) + request, metadata = self._interceptor.pre_list_clusters(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_transcoded_request( http_options, request @@ -1983,6 +4343,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListClusters", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListClusters", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._ListClusters._get_response( self._host, @@ -2003,7 +4390,35 @@ def __call__( pb_resp = bigtable_instance_admin.ListClustersResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_clusters(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_clusters_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListClustersResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_clusters", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListClusters", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListHotTablets( @@ -2041,7 +4456,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListHotTabletsResponse: r"""Call the list hot tablets method over HTTP. @@ -2052,8 +4467,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListHotTabletsResponse: @@ -2065,6 +4482,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_http_options() ) + request, metadata = self._interceptor.pre_list_hot_tablets( request, metadata ) @@ -2077,6 +4495,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListHotTablets", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListHotTablets", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._ListHotTablets._get_response( self._host, @@ -2097,7 +4542,35 @@ def __call__( pb_resp = bigtable_instance_admin.ListHotTabletsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_hot_tablets(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_hot_tablets_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListHotTabletsResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_hot_tablets", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListHotTablets", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListInstances( @@ -2135,7 +4608,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Call the list instances method over HTTP. @@ -2146,8 +4619,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListInstancesResponse: @@ -2159,6 +4634,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_http_options() ) + request, metadata = self._interceptor.pre_list_instances(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_transcoded_request( http_options, request @@ -2169,6 +4645,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListInstances", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListInstances", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._ListInstances._get_response( self._host, @@ -2185,11 +4688,351 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = bigtable_instance_admin.ListInstancesResponse() - pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) + resp = bigtable_instance_admin.ListInstancesResponse() + pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_instances(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_instances_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListInstancesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_instances", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListInstances", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListLogicalViews( + _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.ListLogicalViews") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.ListLogicalViewsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_instance_admin.ListLogicalViewsResponse: + r"""Call the list logical views method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListLogicalViewsRequest): + The request object. Request message for + BigtableInstanceAdmin.ListLogicalViews. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable_instance_admin.ListLogicalViewsResponse: + Response message for + BigtableInstanceAdmin.ListLogicalViews. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_logical_views( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListLogicalViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListLogicalViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._ListLogicalViews._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListLogicalViewsResponse() + pb_resp = bigtable_instance_admin.ListLogicalViewsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_logical_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_logical_views_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListLogicalViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_logical_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListLogicalViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListMaterializedViews( + _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.ListMaterializedViews") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.ListMaterializedViewsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_instance_admin.ListMaterializedViewsResponse: + r"""Call the list materialized views method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListMaterializedViewsRequest): + The request object. Request message for + BigtableInstanceAdmin.ListMaterializedViews. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable_instance_admin.ListMaterializedViewsResponse: + Response message for + BigtableInstanceAdmin.ListMaterializedViews. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_materialized_views( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListMaterializedViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListMaterializedViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._ListMaterializedViews._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListMaterializedViewsResponse() + pb_resp = bigtable_instance_admin.ListMaterializedViewsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_instances(resp) + + resp = self._interceptor.post_list_materialized_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_materialized_views_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListMaterializedViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_materialized_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListMaterializedViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _PartialUpdateCluster( @@ -2228,7 +5071,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the partial update cluster method over HTTP. @@ -2239,8 +5082,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2253,6 +5098,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_http_options() ) + request, metadata = self._interceptor.pre_partial_update_cluster( request, metadata ) @@ -2269,6 +5115,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._PartialUpdateCluster._get_response( @@ -2290,7 +5163,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_partial_update_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _PartialUpdateInstance( @@ -2329,7 +5228,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the partial update instance method over HTTP. @@ -2340,8 +5239,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2354,6 +5255,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_http_options() ) + request, metadata = self._interceptor.pre_partial_update_instance( request, metadata ) @@ -2370,6 +5272,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._PartialUpdateInstance._get_response( @@ -2391,7 +5320,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_partial_update_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SetIamPolicy( @@ -2430,7 +5385,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -2440,8 +5395,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2526,6 +5483,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( http_options, request @@ -2540,6 +5498,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._SetIamPolicy._get_response( self._host, @@ -2561,7 +5546,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_set_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.set_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _TestIamPermissions( @@ -2600,7 +5611,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Call the test iam permissions method over HTTP. @@ -2610,8 +5621,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: @@ -2621,6 +5634,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_http_options() ) + request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) @@ -2637,6 +5651,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._TestIamPermissions._get_response( @@ -2656,19 +5697,357 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = iam_policy_pb2.TestIamPermissionsResponse() - pb_resp = resp + resp = iam_policy_pb2.TestIamPermissionsResponse() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_test_iam_permissions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateAppProfile( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.UpdateAppProfile") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable_instance_admin.UpdateAppProfileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.UpdateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_http_options() + ) + + request, metadata = self._interceptor.pre_update_app_profile( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableInstanceAdminRestTransport._UpdateAppProfile._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_app_profile(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_app_profile_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _UpdateCluster( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster, + BigtableInstanceAdminRestStub, + ): + def __hash__(self): + return hash("BigtableInstanceAdminRestTransport.UpdateCluster") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: instance.Cluster, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update cluster method over HTTP. + + Args: + request (~.instance.Cluster): + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_http_options() + ) + + request, metadata = self._interceptor.pre_update_cluster(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableInstanceAdminRestTransport._UpdateCluster._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_test_iam_permissions(resp) + resp = self._interceptor.post_update_cluster(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_cluster_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UpdateAppProfile( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile, + class _UpdateInstance( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance, BigtableInstanceAdminRestStub, ): def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateAppProfile") + return hash("BigtableInstanceAdminRestTransport.UpdateInstance") @staticmethod def _get_response( @@ -2695,62 +6074,95 @@ def _get_response( def __call__( self, - request: bigtable_instance_admin.UpdateAppProfileRequest, + request: instance.Instance, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operations_pb2.Operation: - r"""Call the update app profile method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> instance.Instance: + r"""Call the update instance method over HTTP. Args: - request (~.bigtable_instance_admin.UpdateAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. + request (~.instance.Instance): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. + ~.instance.Instance: + A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. """ http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_http_options() - ) - request, metadata = self._interceptor.pre_update_app_profile( - request, metadata + _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_http_options() ) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_transcoded_request( + + request, metadata = self._interceptor.pre_update_instance(request, metadata) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_transcoded_request( http_options, request ) - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_request_body_json( + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_query_params_json( + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_query_params_json( transcoded_request ) - # Send the request - response = ( - BigtableInstanceAdminRestTransport._UpdateAppProfile._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, ) + + # Send the request + response = BigtableInstanceAdminRestTransport._UpdateInstance._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2759,17 +6171,45 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_app_profile(resp) + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Instance.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UpdateCluster( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster, + class _UpdateLogicalView( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView, BigtableInstanceAdminRestStub, ): def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateCluster") + return hash("BigtableInstanceAdminRestTransport.UpdateLogicalView") @staticmethod def _get_response( @@ -2796,25 +6236,25 @@ def _get_response( def __call__( self, - request: instance.Cluster, + request: bigtable_instance_admin.UpdateLogicalViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: - r"""Call the update cluster method over HTTP. + r"""Call the update logical view method over HTTP. Args: - request (~.instance.Cluster): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. + request (~.bigtable_instance_admin.UpdateLogicalViewRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateLogicalView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2825,31 +6265,63 @@ def __call__( """ http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_http_options() + _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_http_options() ) - request, metadata = self._interceptor.pre_update_cluster(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_transcoded_request( + + request, metadata = self._interceptor.pre_update_logical_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_transcoded_request( http_options, request ) - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_request_body_json( + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_query_params_json( + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_query_params_json( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateLogicalView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateLogicalView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request - response = BigtableInstanceAdminRestTransport._UpdateCluster._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, + response = ( + BigtableInstanceAdminRestTransport._UpdateLogicalView._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2860,15 +6332,41 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_cluster(resp) + + resp = self._interceptor.post_update_logical_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_logical_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_logical_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateLogicalView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp - class _UpdateInstance( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance, + class _UpdateMaterializedView( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView, BigtableInstanceAdminRestStub, ): def __hash__(self): - return hash("BigtableInstanceAdminRestTransport.UpdateInstance") + return hash("BigtableInstanceAdminRestTransport.UpdateMaterializedView") @staticmethod def _get_response( @@ -2895,58 +6393,83 @@ def _get_response( def __call__( self, - request: instance.Instance, + request: bigtable_instance_admin.UpdateMaterializedViewRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Instance: - r"""Call the update instance method over HTTP. + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update materialized view method over HTTP. Args: - request (~.instance.Instance): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. + request (~.bigtable_instance_admin.UpdateMaterializedViewRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateMaterializedView. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: - ~.instance.Instance: - A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. """ http_options = ( - _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_http_options() + _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_http_options() ) - request, metadata = self._interceptor.pre_update_instance(request, metadata) - transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_transcoded_request( + + request, metadata = self._interceptor.pre_update_materialized_view( + request, metadata + ) + transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_transcoded_request( http_options, request ) - body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_request_body_json( + body = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_query_params_json( + query_params = _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_query_params_json( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateMaterializedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateMaterializedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request - response = BigtableInstanceAdminRestTransport._UpdateInstance._get_response( + response = BigtableInstanceAdminRestTransport._UpdateMaterializedView._get_response( self._host, metadata, query_params, @@ -2962,11 +6485,35 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_instance(resp) + resp = self._interceptor.post_update_materialized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_materialized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_materialized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateMaterializedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property @@ -2999,6 +6546,27 @@ def create_instance( # In C++ this would require a dynamic_cast return self._CreateInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def create_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateLogicalViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.CreateMaterializedViewRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def delete_app_profile( self, @@ -3023,6 +6591,24 @@ def delete_instance( # In C++ this would require a dynamic_cast return self._DeleteInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_logical_view( + self, + ) -> Callable[[bigtable_instance_admin.DeleteLogicalViewRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.DeleteMaterializedViewRequest], empty_pb2.Empty + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def get_app_profile( self, @@ -3055,6 +6641,26 @@ def get_instance( # In C++ this would require a dynamic_cast return self._GetInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def get_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetLogicalViewRequest], instance.LogicalView + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.GetMaterializedViewRequest], instance.MaterializedView + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def list_app_profiles( self, @@ -3099,6 +6705,28 @@ def list_instances( # In C++ this would require a dynamic_cast return self._ListInstances(self._session, self._host, self._interceptor) # type: ignore + @property + def list_logical_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListLogicalViewsRequest], + bigtable_instance_admin.ListLogicalViewsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListLogicalViews(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_materialized_views( + self, + ) -> Callable[ + [bigtable_instance_admin.ListMaterializedViewsRequest], + bigtable_instance_admin.ListMaterializedViewsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListMaterializedViews(self._session, self._host, self._interceptor) # type: ignore + @property def partial_update_cluster( self, @@ -3160,6 +6788,27 @@ def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: # In C++ this would require a dynamic_cast return self._UpdateInstance(self._session, self._host, self._interceptor) # type: ignore + @property + def update_logical_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateLogicalViewRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateLogicalView(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_materialized_view( + self, + ) -> Callable[ + [bigtable_instance_admin.UpdateMaterializedViewRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateMaterializedView(self._session, self._host, self._interceptor) # type: ignore + @property def kind(self) -> str: return "rest" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py index 7b0c1a4ba343..5851243ed2a9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py @@ -269,6 +269,126 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseCreateLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "logicalViewId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/logicalViews", + "body": "logical_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseCreateMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "materializedViewId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*}/materializedViews", + "body": "materialized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.CreateMaterializedViewRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseCreateMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseDeleteAppProfile: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -412,6 +532,102 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseDeleteLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/logicalViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseDeleteMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/materializedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.DeleteMaterializedViewRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseDeleteMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseGetAppProfile: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -528,6 +744,16 @@ def _get_http_options(): "uri": "/v2/{resource=projects/*/instances/*}:getIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:getIamPolicy", + "body": "*", + }, ] return http_options @@ -610,6 +836,100 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseGetLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/logicalViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseGetMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/materializedViews/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.GetMaterializedViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseGetMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseListAppProfiles: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -798,6 +1118,102 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseListLogicalViews: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/logicalViews", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListLogicalViewsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListLogicalViews._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseListMaterializedViews: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*}/materializedViews", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.ListMaterializedViewsRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseListMaterializedViews._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BasePartialUpdateCluster: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -940,6 +1356,16 @@ def _get_http_options(): "uri": "/v2/{resource=projects/*/instances/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -997,6 +1423,16 @@ def _get_http_options(): "uri": "/v2/{resource=projects/*/instances/*}:testIamPermissions", "body": "*", }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/materializedViews/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/logicalViews/*}:testIamPermissions", + "body": "*", + }, ] return http_options @@ -1190,5 +1626,121 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseUpdateLogicalView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{logical_view.name=projects/*/instances/*/logicalViews/*}", + "body": "logical_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.UpdateLogicalViewRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateLogicalView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + + class _BaseUpdateMaterializedView: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{materialized_view.name=projects/*/instances/*/materializedViews/*}", + "body": "materialized_view", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_instance_admin.UpdateMaterializedViewRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableInstanceAdminRestTransport._BaseUpdateMaterializedView._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + __all__ = ("_BaseBigtableInstanceAdminRestTransport",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 2e9eb13ebf5a..a10691b71ea7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -49,6 +50,7 @@ from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.cloud.bigtable_admin_v2.types import types from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -57,6 +59,15 @@ from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport from .client import BigtableTableAdminClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableTableAdminAsyncClient: """Service for creating, configuring, and deleting Cloud @@ -289,6 +300,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableTableAdminAsyncClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "credentialsType": None, + }, + ) + async def create_table( self, request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, @@ -298,7 +331,7 @@ async def create_table( table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. The table can be created with a full set of initial @@ -333,8 +366,10 @@ async def create_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -347,7 +382,10 @@ async def create_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, table]) + flattened_params = [parent, table_id, table] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -405,7 +443,7 @@ async def create_table_from_snapshot( source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table @@ -457,8 +495,10 @@ async def create_table_from_snapshot( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -472,7 +512,10 @@ async def create_table_from_snapshot( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, source_snapshot]) + flattened_params = [parent, table_id, source_snapshot] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -534,7 +577,7 @@ async def list_tables( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTablesAsyncPager: r"""Lists all tables served from a specified instance. @@ -553,8 +596,10 @@ async def list_tables( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: @@ -568,7 +613,10 @@ async def list_tables( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -629,7 +677,7 @@ async def get_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. @@ -648,8 +696,10 @@ async def get_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -662,7 +712,10 @@ async def get_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -713,7 +766,7 @@ async def update_table( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates a specified table. @@ -740,6 +793,7 @@ async def update_table( - ``change_stream_config`` - ``change_stream_config.retention_period`` - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. @@ -750,8 +804,10 @@ async def update_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -765,7 +821,10 @@ async def update_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table, update_mask]) + flattened_params = [table, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -827,7 +886,7 @@ async def delete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified table and all of its data. @@ -847,13 +906,18 @@ async def delete_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -902,7 +966,7 @@ async def undelete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Restores a specified table which was accidentally deleted. @@ -922,8 +986,10 @@ async def undelete_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -937,7 +1003,10 @@ async def undelete_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -999,7 +1068,7 @@ async def create_authorized_view( authorized_view_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new AuthorizedView in a table. @@ -1035,8 +1104,10 @@ async def create_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1051,7 +1122,10 @@ async def create_authorized_view( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, authorized_view, authorized_view_id]) + flattened_params = [parent, authorized_view, authorized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1115,7 +1189,7 @@ async def list_authorized_views( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAuthorizedViewsAsyncPager: r"""Lists all AuthorizedViews from a specific table. @@ -1134,8 +1208,10 @@ async def list_authorized_views( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager: @@ -1149,7 +1225,10 @@ async def list_authorized_views( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1212,7 +1291,7 @@ async def get_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.AuthorizedView: r"""Gets information from a specified AuthorizedView. @@ -1231,8 +1310,10 @@ async def get_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AuthorizedView: @@ -1247,7 +1328,10 @@ async def get_authorized_view( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1300,7 +1384,7 @@ async def update_authorized_view( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates an AuthorizedView in a table. @@ -1333,8 +1417,10 @@ async def update_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1349,7 +1435,10 @@ async def update_authorized_view( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([authorized_view, update_mask]) + flattened_params = [authorized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1413,7 +1502,7 @@ async def delete_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified AuthorizedView. @@ -1432,13 +1521,18 @@ async def delete_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1492,7 +1586,7 @@ async def modify_column_families( ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Performs a series of column family modifications on the specified table. Either all or none of the @@ -1527,8 +1621,10 @@ async def modify_column_families( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -1541,7 +1637,10 @@ async def modify_column_families( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, modifications]) + flattened_params = [name, modifications] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1592,7 +1691,7 @@ async def drop_row_range( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified table. The request can specify whether to delete all @@ -1606,8 +1705,10 @@ async def drop_row_range( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Use the request object if provided (there's no risk of modifying the input as @@ -1647,7 +1748,7 @@ async def generate_consistency_token( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations @@ -1670,8 +1771,10 @@ async def generate_consistency_token( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: @@ -1682,7 +1785,10 @@ async def generate_consistency_token( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1737,7 +1843,7 @@ async def check_consistency( consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency token, that is, if replication has caught up based on @@ -1766,8 +1872,10 @@ async def check_consistency( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: @@ -1778,7 +1886,10 @@ async def check_consistency( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, consistency_token]) + flattened_params = [name, consistency_token] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1835,7 +1946,7 @@ async def snapshot_table( description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table @@ -1893,8 +2004,10 @@ async def snapshot_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1915,7 +2028,10 @@ async def snapshot_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, cluster, snapshot_id, description]) + flattened_params = [name, cluster, snapshot_id, description] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1979,7 +2095,7 @@ async def get_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified snapshot. @@ -2012,8 +2128,10 @@ async def get_snapshot( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Snapshot: @@ -2035,7 +2153,10 @@ async def get_snapshot( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2087,7 +2208,7 @@ async def list_snapshots( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListSnapshotsAsyncPager: r"""Lists all snapshots associated with the specified cluster. @@ -2123,8 +2244,10 @@ async def list_snapshots( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: @@ -2145,7 +2268,10 @@ async def list_snapshots( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2208,7 +2334,7 @@ async def delete_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -2241,13 +2367,18 @@ async def delete_snapshot( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2296,7 +2427,7 @@ async def create_backup( backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup [long-running operation][google.longrunning.Operation] can be @@ -2341,8 +2472,10 @@ async def create_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2356,7 +2489,10 @@ async def create_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, backup]) + flattened_params = [parent, backup_id, backup] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2418,7 +2554,7 @@ async def get_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -2437,8 +2573,10 @@ async def get_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -2447,7 +2585,10 @@ async def get_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2498,7 +2639,7 @@ async def update_backup( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. @@ -2532,8 +2673,10 @@ async def update_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -2542,7 +2685,10 @@ async def update_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([backup, update_mask]) + flattened_params = [backup, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2596,7 +2742,7 @@ async def delete_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. @@ -2615,13 +2761,18 @@ async def delete_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2668,7 +2819,7 @@ async def list_backups( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupsAsyncPager: r"""Lists Cloud Bigtable backups. Returns both completed and pending backups. @@ -2691,8 +2842,10 @@ async def list_backups( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: @@ -2706,7 +2859,10 @@ async def list_backups( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2766,7 +2922,7 @@ async def restore_table( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Create a new table by restoring from a completed backup. The returned table [long-running @@ -2784,8 +2940,10 @@ async def restore_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2846,7 +3004,7 @@ async def copy_backup( expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Copy a Cloud Bigtable backup to a new backup in the destination cluster located in the destination instance @@ -2903,8 +3061,10 @@ async def copy_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2918,7 +3078,10 @@ async def copy_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + flattened_params = [parent, backup_id, source_backup, expire_time] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2982,7 +3145,7 @@ async def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists @@ -3003,8 +3166,10 @@ async def get_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3043,7 +3208,10 @@ async def get_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3090,7 +3258,7 @@ async def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup resource. Replaces any existing policy. @@ -3110,8 +3278,10 @@ async def set_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3150,7 +3320,10 @@ async def set_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3198,7 +3371,7 @@ async def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified Table or Backup resource. @@ -3227,8 +3400,10 @@ async def test_iam_permissions( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -3237,7 +3412,10 @@ async def test_iam_permissions( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 502f0085c295..3204f43a1b64 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -14,6 +14,9 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( @@ -48,12 +51,22 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.cloud.bigtable_admin_v2.types import types from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -623,6 +636,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -731,6 +771,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -797,6 +841,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableTableAdminClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "credentialsType": None, + }, + ) + def create_table( self, request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, @@ -806,7 +873,7 @@ def create_table( table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. The table can be created with a full set of initial @@ -841,8 +908,10 @@ def create_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -855,7 +924,10 @@ def create_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, table]) + flattened_params = [parent, table_id, table] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -910,7 +982,7 @@ def create_table_from_snapshot( source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table @@ -962,8 +1034,10 @@ def create_table_from_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -977,7 +1051,10 @@ def create_table_from_snapshot( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, source_snapshot]) + flattened_params = [parent, table_id, source_snapshot] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1038,7 +1115,7 @@ def list_tables( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTablesPager: r"""Lists all tables served from a specified instance. @@ -1057,8 +1134,10 @@ def list_tables( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: @@ -1072,7 +1151,10 @@ def list_tables( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1130,7 +1212,7 @@ def get_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. @@ -1149,8 +1231,10 @@ def get_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -1163,7 +1247,10 @@ def get_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1211,7 +1298,7 @@ def update_table( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates a specified table. @@ -1238,6 +1325,7 @@ def update_table( - ``change_stream_config`` - ``change_stream_config.retention_period`` - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. @@ -1248,8 +1336,10 @@ def update_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1263,7 +1353,10 @@ def update_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table, update_mask]) + flattened_params = [table, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1322,7 +1415,7 @@ def delete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified table and all of its data. @@ -1342,13 +1435,18 @@ def delete_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1394,7 +1492,7 @@ def undelete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Restores a specified table which was accidentally deleted. @@ -1414,8 +1512,10 @@ def undelete_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1429,7 +1529,10 @@ def undelete_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1488,7 +1591,7 @@ def create_authorized_view( authorized_view_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new AuthorizedView in a table. @@ -1524,8 +1627,10 @@ def create_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1540,7 +1645,10 @@ def create_authorized_view( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, authorized_view, authorized_view_id]) + flattened_params = [parent, authorized_view, authorized_view_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1601,7 +1709,7 @@ def list_authorized_views( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAuthorizedViewsPager: r"""Lists all AuthorizedViews from a specific table. @@ -1620,8 +1728,10 @@ def list_authorized_views( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager: @@ -1635,7 +1745,10 @@ def list_authorized_views( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1695,7 +1808,7 @@ def get_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.AuthorizedView: r"""Gets information from a specified AuthorizedView. @@ -1714,8 +1827,10 @@ def get_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AuthorizedView: @@ -1730,7 +1845,10 @@ def get_authorized_view( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1780,7 +1898,7 @@ def update_authorized_view( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates an AuthorizedView in a table. @@ -1813,8 +1931,10 @@ def update_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1829,7 +1949,10 @@ def update_authorized_view( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([authorized_view, update_mask]) + flattened_params = [authorized_view, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1890,7 +2013,7 @@ def delete_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified AuthorizedView. @@ -1909,13 +2032,18 @@ def delete_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1966,7 +2094,7 @@ def modify_column_families( ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Performs a series of column family modifications on the specified table. Either all or none of the @@ -2001,8 +2129,10 @@ def modify_column_families( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -2015,7 +2145,10 @@ def modify_column_families( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, modifications]) + flattened_params = [name, modifications] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2063,7 +2196,7 @@ def drop_row_range( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified table. The request can specify whether to delete all @@ -2077,8 +2210,10 @@ def drop_row_range( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Use the request object if provided (there's no risk of modifying the input as @@ -2116,7 +2251,7 @@ def generate_consistency_token( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations @@ -2139,8 +2274,10 @@ def generate_consistency_token( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: @@ -2151,7 +2288,10 @@ def generate_consistency_token( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2205,7 +2345,7 @@ def check_consistency( consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency token, that is, if replication has caught up based on @@ -2234,8 +2374,10 @@ def check_consistency( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: @@ -2246,7 +2388,10 @@ def check_consistency( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, consistency_token]) + flattened_params = [name, consistency_token] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2300,7 +2445,7 @@ def snapshot_table( description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table @@ -2358,8 +2503,10 @@ def snapshot_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2380,7 +2527,10 @@ def snapshot_table( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, cluster, snapshot_id, description]) + flattened_params = [name, cluster, snapshot_id, description] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2441,7 +2591,7 @@ def get_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified snapshot. @@ -2474,8 +2624,10 @@ def get_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Snapshot: @@ -2497,7 +2649,10 @@ def get_snapshot( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2546,7 +2701,7 @@ def list_snapshots( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListSnapshotsPager: r"""Lists all snapshots associated with the specified cluster. @@ -2582,8 +2737,10 @@ def list_snapshots( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: @@ -2604,7 +2761,10 @@ def list_snapshots( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2664,7 +2824,7 @@ def delete_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -2697,13 +2857,18 @@ def delete_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2749,7 +2914,7 @@ def create_backup( backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup [long-running operation][google.longrunning.Operation] can be @@ -2794,8 +2959,10 @@ def create_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2809,7 +2976,10 @@ def create_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, backup]) + flattened_params = [parent, backup_id, backup] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2868,7 +3038,7 @@ def get_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -2887,8 +3057,10 @@ def get_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -2897,7 +3069,10 @@ def get_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -2945,7 +3120,7 @@ def update_backup( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. @@ -2979,8 +3154,10 @@ def update_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -2989,7 +3166,10 @@ def update_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([backup, update_mask]) + flattened_params = [backup, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3040,7 +3220,7 @@ def delete_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. @@ -3059,13 +3239,18 @@ def delete_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3109,7 +3294,7 @@ def list_backups( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupsPager: r"""Lists Cloud Bigtable backups. Returns both completed and pending backups. @@ -3132,8 +3317,10 @@ def list_backups( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: @@ -3147,7 +3334,10 @@ def list_backups( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3204,7 +3394,7 @@ def restore_table( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Create a new table by restoring from a completed backup. The returned table [long-running @@ -3222,8 +3412,10 @@ def restore_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3282,7 +3474,7 @@ def copy_backup( expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Copy a Cloud Bigtable backup to a new backup in the destination cluster located in the destination instance @@ -3339,8 +3531,10 @@ def copy_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3354,7 +3548,10 @@ def copy_backup( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + flattened_params = [parent, backup_id, source_backup, expire_time] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3415,7 +3612,7 @@ def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists @@ -3436,8 +3633,10 @@ def get_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3476,7 +3675,10 @@ def get_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3524,7 +3726,7 @@ def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup resource. Replaces any existing policy. @@ -3544,8 +3746,10 @@ def set_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3584,7 +3788,10 @@ def set_iam_policy( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) + flattened_params = [resource] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -3633,7 +3840,7 @@ def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified Table or Backup resource. @@ -3662,8 +3869,10 @@ def test_iam_permissions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -3672,7 +3881,10 @@ def test_iam_permissions( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) + flattened_params = [resource, permissions] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 5e20fbc5f5f3..4351a58148b7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -67,7 +67,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -81,8 +81,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) @@ -141,7 +143,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -155,8 +157,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) @@ -219,7 +223,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -233,8 +237,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) @@ -295,7 +301,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -309,8 +315,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) @@ -375,7 +383,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -389,8 +397,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) @@ -449,7 +459,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -463,8 +473,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) @@ -527,7 +539,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -541,8 +553,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) @@ -601,7 +615,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -615,8 +629,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index bb7875d87c60..5f74859a5762 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -258,9 +258,9 @@ def _prep_wrapped_messages(self, client_info): core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), - deadline=60.0, + deadline=3600.0, ), - default_timeout=60.0, + default_timeout=3600.0, client_info=client_info, ), self.snapshot_table: gapic_v1.method.wrap_method( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 8b0eadbbcf8a..59c701b8fca3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -34,6 +40,81 @@ from google.protobuf import empty_pb2 # type: ignore from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): """gRPC backend transport for BigtableTableAdmin. @@ -192,7 +273,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -256,7 +342,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -282,7 +370,7 @@ def create_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table" not in self._stubs: - self._stubs["create_table"] = self.grpc_channel.unary_unary( + self._stubs["create_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", request_serializer=bigtable_table_admin.CreateTableRequest.serialize, response_deserializer=gba_table.Table.deserialize, @@ -319,7 +407,9 @@ def create_table_from_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table_from_snapshot" not in self._stubs: - self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + self._stubs[ + "create_table_from_snapshot" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -348,7 +438,7 @@ def list_tables( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self.grpc_channel.unary_unary( + self._stubs["list_tables"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", request_serializer=bigtable_table_admin.ListTablesRequest.serialize, response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, @@ -374,7 +464,7 @@ def get_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table" not in self._stubs: - self._stubs["get_table"] = self.grpc_channel.unary_unary( + self._stubs["get_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", request_serializer=bigtable_table_admin.GetTableRequest.serialize, response_deserializer=table.Table.deserialize, @@ -400,7 +490,7 @@ def update_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table" not in self._stubs: - self._stubs["update_table"] = self.grpc_channel.unary_unary( + self._stubs["update_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -427,7 +517,7 @@ def delete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self.grpc_channel.unary_unary( + self._stubs["delete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -456,7 +546,7 @@ def undelete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + self._stubs["undelete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -484,7 +574,7 @@ def create_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_authorized_view" not in self._stubs: - self._stubs["create_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -513,7 +603,7 @@ def list_authorized_views( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_authorized_views" not in self._stubs: - self._stubs["list_authorized_views"] = self.grpc_channel.unary_unary( + self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, @@ -541,7 +631,7 @@ def get_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_authorized_view" not in self._stubs: - self._stubs["get_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, response_deserializer=table.AuthorizedView.deserialize, @@ -569,7 +659,7 @@ def update_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_authorized_view" not in self._stubs: - self._stubs["update_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -595,7 +685,7 @@ def delete_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_authorized_view" not in self._stubs: - self._stubs["delete_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -625,7 +715,7 @@ def modify_column_families( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + self._stubs["modify_column_families"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, response_deserializer=table.Table.deserialize, @@ -654,7 +744,7 @@ def drop_row_range( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + self._stubs["drop_row_range"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -687,7 +777,9 @@ def generate_consistency_token( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "generate_consistency_token" not in self._stubs: - self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + self._stubs[ + "generate_consistency_token" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, @@ -719,7 +811,7 @@ def check_consistency( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + self._stubs["check_consistency"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, @@ -756,7 +848,7 @@ def snapshot_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + self._stubs["snapshot_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -789,7 +881,7 @@ def get_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["get_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, response_deserializer=table.Snapshot.deserialize, @@ -825,7 +917,7 @@ def list_snapshots( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + self._stubs["list_snapshots"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, @@ -858,7 +950,7 @@ def delete_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -892,7 +984,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -919,7 +1011,7 @@ def get_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", request_serializer=bigtable_table_admin.GetBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -945,7 +1037,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -971,7 +1063,7 @@ def delete_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1001,7 +1093,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, @@ -1034,7 +1126,7 @@ def restore_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self.grpc_channel.unary_unary( + self._stubs["restore_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1062,7 +1154,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1090,7 +1182,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1117,7 +1209,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1147,7 +1239,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -1155,7 +1247,7 @@ def test_iam_permissions( return self._stubs["test_iam_permissions"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 520c7c83c9ca..751828e68dc0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -24,8 +27,11 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin @@ -38,6 +44,82 @@ from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableTableAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): """gRPC AsyncIO backend transport for BigtableTableAdmin. @@ -239,10 +321,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -265,7 +350,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -294,7 +379,7 @@ def create_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table" not in self._stubs: - self._stubs["create_table"] = self.grpc_channel.unary_unary( + self._stubs["create_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", request_serializer=bigtable_table_admin.CreateTableRequest.serialize, response_deserializer=gba_table.Table.deserialize, @@ -332,7 +417,9 @@ def create_table_from_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table_from_snapshot" not in self._stubs: - self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + self._stubs[ + "create_table_from_snapshot" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -361,7 +448,7 @@ def list_tables( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self.grpc_channel.unary_unary( + self._stubs["list_tables"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", request_serializer=bigtable_table_admin.ListTablesRequest.serialize, response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, @@ -387,7 +474,7 @@ def get_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table" not in self._stubs: - self._stubs["get_table"] = self.grpc_channel.unary_unary( + self._stubs["get_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", request_serializer=bigtable_table_admin.GetTableRequest.serialize, response_deserializer=table.Table.deserialize, @@ -415,7 +502,7 @@ def update_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table" not in self._stubs: - self._stubs["update_table"] = self.grpc_channel.unary_unary( + self._stubs["update_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -444,7 +531,7 @@ def delete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self.grpc_channel.unary_unary( + self._stubs["delete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -473,7 +560,7 @@ def undelete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + self._stubs["undelete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -502,7 +589,7 @@ def create_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_authorized_view" not in self._stubs: - self._stubs["create_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -531,7 +618,7 @@ def list_authorized_views( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_authorized_views" not in self._stubs: - self._stubs["list_authorized_views"] = self.grpc_channel.unary_unary( + self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, @@ -559,7 +646,7 @@ def get_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_authorized_view" not in self._stubs: - self._stubs["get_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, response_deserializer=table.AuthorizedView.deserialize, @@ -588,7 +675,7 @@ def update_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_authorized_view" not in self._stubs: - self._stubs["update_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -616,7 +703,7 @@ def delete_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_authorized_view" not in self._stubs: - self._stubs["delete_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -648,7 +735,7 @@ def modify_column_families( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + self._stubs["modify_column_families"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, response_deserializer=table.Table.deserialize, @@ -679,7 +766,7 @@ def drop_row_range( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + self._stubs["drop_row_range"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -712,7 +799,9 @@ def generate_consistency_token( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "generate_consistency_token" not in self._stubs: - self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + self._stubs[ + "generate_consistency_token" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, @@ -744,7 +833,7 @@ def check_consistency( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + self._stubs["check_consistency"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, @@ -781,7 +870,7 @@ def snapshot_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + self._stubs["snapshot_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -814,7 +903,7 @@ def get_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["get_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, response_deserializer=table.Snapshot.deserialize, @@ -850,7 +939,7 @@ def list_snapshots( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + self._stubs["list_snapshots"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, @@ -885,7 +974,7 @@ def delete_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -921,7 +1010,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -948,7 +1037,7 @@ def get_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", request_serializer=bigtable_table_admin.GetBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -974,7 +1063,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -1002,7 +1091,7 @@ def delete_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1032,7 +1121,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, @@ -1067,7 +1156,7 @@ def restore_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self.grpc_channel.unary_unary( + self._stubs["restore_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1097,7 +1186,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1125,7 +1214,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1152,7 +1241,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1182,7 +1271,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -1307,9 +1396,9 @@ def _prep_wrapped_messages(self, client_info): core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), - deadline=60.0, + deadline=3600.0, ), - default_timeout=60.0, + default_timeout=3600.0, client_info=client_info, ), self.snapshot_table: self._wrap_method( @@ -1450,7 +1539,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index b25ddec60503..80d485bd02ed 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -49,6 +50,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -301,8 +310,11 @@ def post_update_table(self, response): def pre_check_consistency( self, request: bigtable_table_admin.CheckConsistencyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CheckConsistencyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CheckConsistencyRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for check_consistency Override in a subclass to manipulate the request or metadata @@ -315,17 +327,45 @@ def post_check_consistency( ) -> bigtable_table_admin.CheckConsistencyResponse: """Post-rpc interceptor for check_consistency - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_check_consistency_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_check_consistency` interceptor runs + before the `post_check_consistency_with_metadata` interceptor. """ return response + def post_check_consistency_with_metadata( + self, + response: bigtable_table_admin.CheckConsistencyResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CheckConsistencyResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for check_consistency + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_check_consistency_with_metadata` + interceptor in new development instead of the `post_check_consistency` interceptor. + When both interceptors are used, this `post_check_consistency_with_metadata` interceptor runs after the + `post_check_consistency` interceptor. The (possibly modified) response returned by + `post_check_consistency` will be passed to + `post_check_consistency_with_metadata`. + """ + return response, metadata + def pre_copy_backup( self, request: bigtable_table_admin.CopyBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for copy_backup Override in a subclass to manipulate the request or metadata @@ -338,18 +378,42 @@ def post_copy_backup( ) -> operations_pb2.Operation: """Post-rpc interceptor for copy_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_copy_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_copy_backup` interceptor runs + before the `post_copy_backup_with_metadata` interceptor. """ return response + def post_copy_backup_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for copy_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_copy_backup_with_metadata` + interceptor in new development instead of the `post_copy_backup` interceptor. + When both interceptors are used, this `post_copy_backup_with_metadata` interceptor runs after the + `post_copy_backup` interceptor. The (possibly modified) response returned by + `post_copy_backup` will be passed to + `post_copy_backup_with_metadata`. + """ + return response, metadata + def pre_create_authorized_view( self, request: bigtable_table_admin.CreateAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.CreateAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.CreateAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_authorized_view @@ -363,17 +427,43 @@ def post_create_authorized_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_authorized_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_authorized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_authorized_view` interceptor runs + before the `post_create_authorized_view_with_metadata` interceptor. """ return response + def post_create_authorized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_authorized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_authorized_view_with_metadata` + interceptor in new development instead of the `post_create_authorized_view` interceptor. + When both interceptors are used, this `post_create_authorized_view_with_metadata` interceptor runs after the + `post_create_authorized_view` interceptor. The (possibly modified) response returned by + `post_create_authorized_view` will be passed to + `post_create_authorized_view_with_metadata`. + """ + return response, metadata + def pre_create_backup( self, request: bigtable_table_admin.CreateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CreateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_backup Override in a subclass to manipulate the request or metadata @@ -386,17 +476,42 @@ def post_create_backup( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_backup` interceptor runs + before the `post_create_backup_with_metadata` interceptor. """ return response + def post_create_backup_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_backup_with_metadata` + interceptor in new development instead of the `post_create_backup` interceptor. + When both interceptors are used, this `post_create_backup_with_metadata` interceptor runs after the + `post_create_backup` interceptor. The (possibly modified) response returned by + `post_create_backup` will be passed to + `post_create_backup_with_metadata`. + """ + return response, metadata + def pre_create_table( self, request: bigtable_table_admin.CreateTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for create_table Override in a subclass to manipulate the request or metadata @@ -407,18 +522,42 @@ def pre_create_table( def post_create_table(self, response: gba_table.Table) -> gba_table.Table: """Post-rpc interceptor for create_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_table` interceptor runs + before the `post_create_table_with_metadata` interceptor. """ return response + def post_create_table_with_metadata( + self, + response: gba_table.Table, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gba_table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_table_with_metadata` + interceptor in new development instead of the `post_create_table` interceptor. + When both interceptors are used, this `post_create_table_with_metadata` interceptor runs after the + `post_create_table` interceptor. The (possibly modified) response returned by + `post_create_table` will be passed to + `post_create_table_with_metadata`. + """ + return response, metadata + def pre_create_table_from_snapshot( self, request: bigtable_table_admin.CreateTableFromSnapshotRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.CreateTableFromSnapshotRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.CreateTableFromSnapshotRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_table_from_snapshot @@ -432,18 +571,42 @@ def post_create_table_from_snapshot( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_table_from_snapshot - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_table_from_snapshot_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_table_from_snapshot` interceptor runs + before the `post_create_table_from_snapshot_with_metadata` interceptor. """ return response + def post_create_table_from_snapshot_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_table_from_snapshot + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_table_from_snapshot_with_metadata` + interceptor in new development instead of the `post_create_table_from_snapshot` interceptor. + When both interceptors are used, this `post_create_table_from_snapshot_with_metadata` interceptor runs after the + `post_create_table_from_snapshot` interceptor. The (possibly modified) response returned by + `post_create_table_from_snapshot` will be passed to + `post_create_table_from_snapshot_with_metadata`. + """ + return response, metadata + def pre_delete_authorized_view( self, request: bigtable_table_admin.DeleteAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.DeleteAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.DeleteAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_authorized_view @@ -455,8 +618,11 @@ def pre_delete_authorized_view( def pre_delete_backup( self, request: bigtable_table_admin.DeleteBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_backup Override in a subclass to manipulate the request or metadata @@ -467,8 +633,11 @@ def pre_delete_backup( def pre_delete_snapshot( self, request: bigtable_table_admin.DeleteSnapshotRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteSnapshotRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteSnapshotRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_snapshot Override in a subclass to manipulate the request or metadata @@ -479,8 +648,10 @@ def pre_delete_snapshot( def pre_delete_table( self, request: bigtable_table_admin.DeleteTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for delete_table Override in a subclass to manipulate the request or metadata @@ -491,8 +662,11 @@ def pre_delete_table( def pre_drop_row_range( self, request: bigtable_table_admin.DropRowRangeRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DropRowRangeRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DropRowRangeRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for drop_row_range Override in a subclass to manipulate the request or metadata @@ -503,9 +677,10 @@ def pre_drop_row_range( def pre_generate_consistency_token( self, request: bigtable_table_admin.GenerateConsistencyTokenRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.GenerateConsistencyTokenRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.GenerateConsistencyTokenRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for generate_consistency_token @@ -519,18 +694,45 @@ def post_generate_consistency_token( ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: """Post-rpc interceptor for generate_consistency_token - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_generate_consistency_token_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_generate_consistency_token` interceptor runs + before the `post_generate_consistency_token_with_metadata` interceptor. """ return response + def post_generate_consistency_token_with_metadata( + self, + response: bigtable_table_admin.GenerateConsistencyTokenResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GenerateConsistencyTokenResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for generate_consistency_token + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_generate_consistency_token_with_metadata` + interceptor in new development instead of the `post_generate_consistency_token` interceptor. + When both interceptors are used, this `post_generate_consistency_token_with_metadata` interceptor runs after the + `post_generate_consistency_token` interceptor. The (possibly modified) response returned by + `post_generate_consistency_token` will be passed to + `post_generate_consistency_token_with_metadata`. + """ + return response, metadata + def pre_get_authorized_view( self, request: bigtable_table_admin.GetAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.GetAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.GetAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for get_authorized_view @@ -544,17 +746,42 @@ def post_get_authorized_view( ) -> table.AuthorizedView: """Post-rpc interceptor for get_authorized_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_authorized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_authorized_view` interceptor runs + before the `post_get_authorized_view_with_metadata` interceptor. """ return response + def post_get_authorized_view_with_metadata( + self, + response: table.AuthorizedView, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[table.AuthorizedView, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_authorized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_authorized_view_with_metadata` + interceptor in new development instead of the `post_get_authorized_view` interceptor. + When both interceptors are used, this `post_get_authorized_view_with_metadata` interceptor runs after the + `post_get_authorized_view` interceptor. The (possibly modified) response returned by + `post_get_authorized_view` will be passed to + `post_get_authorized_view_with_metadata`. + """ + return response, metadata + def pre_get_backup( self, request: bigtable_table_admin.GetBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_backup Override in a subclass to manipulate the request or metadata @@ -565,17 +792,40 @@ def pre_get_backup( def post_get_backup(self, response: table.Backup) -> table.Backup: """Post-rpc interceptor for get_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_backup` interceptor runs + before the `post_get_backup_with_metadata` interceptor. """ return response + def post_get_backup_with_metadata( + self, response: table.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_backup_with_metadata` + interceptor in new development instead of the `post_get_backup` interceptor. + When both interceptors are used, this `post_get_backup_with_metadata` interceptor runs after the + `post_get_backup` interceptor. The (possibly modified) response returned by + `post_get_backup` will be passed to + `post_get_backup_with_metadata`. + """ + return response, metadata + def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -586,17 +836,42 @@ def pre_get_iam_policy( def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for get_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_iam_policy` interceptor runs + before the `post_get_iam_policy_with_metadata` interceptor. """ return response + def post_get_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_iam_policy_with_metadata` + interceptor in new development instead of the `post_get_iam_policy` interceptor. + When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the + `post_get_iam_policy` interceptor. The (possibly modified) response returned by + `post_get_iam_policy` will be passed to + `post_get_iam_policy_with_metadata`. + """ + return response, metadata + def pre_get_snapshot( self, request: bigtable_table_admin.GetSnapshotRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_snapshot Override in a subclass to manipulate the request or metadata @@ -607,17 +882,42 @@ def pre_get_snapshot( def post_get_snapshot(self, response: table.Snapshot) -> table.Snapshot: """Post-rpc interceptor for get_snapshot - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_snapshot_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_snapshot` interceptor runs + before the `post_get_snapshot_with_metadata` interceptor. """ return response + def post_get_snapshot_with_metadata( + self, + response: table.Snapshot, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[table.Snapshot, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_snapshot + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_snapshot_with_metadata` + interceptor in new development instead of the `post_get_snapshot` interceptor. + When both interceptors are used, this `post_get_snapshot_with_metadata` interceptor runs after the + `post_get_snapshot` interceptor. The (possibly modified) response returned by + `post_get_snapshot` will be passed to + `post_get_snapshot_with_metadata`. + """ + return response, metadata + def pre_get_table( self, request: bigtable_table_admin.GetTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_table Override in a subclass to manipulate the request or metadata @@ -628,18 +928,40 @@ def pre_get_table( def post_get_table(self, response: table.Table) -> table.Table: """Post-rpc interceptor for get_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_table` interceptor runs + before the `post_get_table_with_metadata` interceptor. """ return response + def post_get_table_with_metadata( + self, response: table.Table, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_table_with_metadata` + interceptor in new development instead of the `post_get_table` interceptor. + When both interceptors are used, this `post_get_table_with_metadata` interceptor runs after the + `post_get_table` interceptor. The (possibly modified) response returned by + `post_get_table` will be passed to + `post_get_table_with_metadata`. + """ + return response, metadata + def pre_list_authorized_views( self, request: bigtable_table_admin.ListAuthorizedViewsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.ListAuthorizedViewsRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.ListAuthorizedViewsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_authorized_views @@ -653,17 +975,45 @@ def post_list_authorized_views( ) -> bigtable_table_admin.ListAuthorizedViewsResponse: """Post-rpc interceptor for list_authorized_views - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_authorized_views_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_authorized_views` interceptor runs + before the `post_list_authorized_views_with_metadata` interceptor. """ return response + def post_list_authorized_views_with_metadata( + self, + response: bigtable_table_admin.ListAuthorizedViewsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListAuthorizedViewsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_authorized_views + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_authorized_views_with_metadata` + interceptor in new development instead of the `post_list_authorized_views` interceptor. + When both interceptors are used, this `post_list_authorized_views_with_metadata` interceptor runs after the + `post_list_authorized_views` interceptor. The (possibly modified) response returned by + `post_list_authorized_views` will be passed to + `post_list_authorized_views_with_metadata`. + """ + return response, metadata + def pre_list_backups( self, request: bigtable_table_admin.ListBackupsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_backups Override in a subclass to manipulate the request or metadata @@ -676,17 +1026,46 @@ def post_list_backups( ) -> bigtable_table_admin.ListBackupsResponse: """Post-rpc interceptor for list_backups - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_backups_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_backups` interceptor runs + before the `post_list_backups_with_metadata` interceptor. """ return response + def post_list_backups_with_metadata( + self, + response: bigtable_table_admin.ListBackupsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListBackupsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_backups + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_backups_with_metadata` + interceptor in new development instead of the `post_list_backups` interceptor. + When both interceptors are used, this `post_list_backups_with_metadata` interceptor runs after the + `post_list_backups` interceptor. The (possibly modified) response returned by + `post_list_backups` will be passed to + `post_list_backups_with_metadata`. + """ + return response, metadata + def pre_list_snapshots( self, request: bigtable_table_admin.ListSnapshotsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListSnapshotsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSnapshotsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_snapshots Override in a subclass to manipulate the request or metadata @@ -699,17 +1078,45 @@ def post_list_snapshots( ) -> bigtable_table_admin.ListSnapshotsResponse: """Post-rpc interceptor for list_snapshots - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_snapshots_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_snapshots` interceptor runs + before the `post_list_snapshots_with_metadata` interceptor. """ return response + def post_list_snapshots_with_metadata( + self, + response: bigtable_table_admin.ListSnapshotsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSnapshotsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_snapshots + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_snapshots_with_metadata` + interceptor in new development instead of the `post_list_snapshots` interceptor. + When both interceptors are used, this `post_list_snapshots_with_metadata` interceptor runs after the + `post_list_snapshots` interceptor. The (possibly modified) response returned by + `post_list_snapshots` will be passed to + `post_list_snapshots_with_metadata`. + """ + return response, metadata + def pre_list_tables( self, request: bigtable_table_admin.ListTablesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_tables Override in a subclass to manipulate the request or metadata @@ -722,18 +1129,44 @@ def post_list_tables( ) -> bigtable_table_admin.ListTablesResponse: """Post-rpc interceptor for list_tables - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_tables_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_tables` interceptor runs + before the `post_list_tables_with_metadata` interceptor. """ return response + def post_list_tables_with_metadata( + self, + response: bigtable_table_admin.ListTablesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListTablesResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_tables + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_tables_with_metadata` + interceptor in new development instead of the `post_list_tables` interceptor. + When both interceptors are used, this `post_list_tables_with_metadata` interceptor runs after the + `post_list_tables` interceptor. The (possibly modified) response returned by + `post_list_tables` will be passed to + `post_list_tables_with_metadata`. + """ + return response, metadata + def pre_modify_column_families( self, request: bigtable_table_admin.ModifyColumnFamiliesRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.ModifyColumnFamiliesRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.ModifyColumnFamiliesRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for modify_column_families @@ -745,17 +1178,41 @@ def pre_modify_column_families( def post_modify_column_families(self, response: table.Table) -> table.Table: """Post-rpc interceptor for modify_column_families - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_modify_column_families_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_modify_column_families` interceptor runs + before the `post_modify_column_families_with_metadata` interceptor. """ return response + def post_modify_column_families_with_metadata( + self, response: table.Table, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Table, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for modify_column_families + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_modify_column_families_with_metadata` + interceptor in new development instead of the `post_modify_column_families` interceptor. + When both interceptors are used, this `post_modify_column_families_with_metadata` interceptor runs after the + `post_modify_column_families` interceptor. The (possibly modified) response returned by + `post_modify_column_families` will be passed to + `post_modify_column_families_with_metadata`. + """ + return response, metadata + def pre_restore_table( self, request: bigtable_table_admin.RestoreTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.RestoreTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.RestoreTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for restore_table Override in a subclass to manipulate the request or metadata @@ -768,17 +1225,42 @@ def post_restore_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for restore_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_restore_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_restore_table` interceptor runs + before the `post_restore_table_with_metadata` interceptor. """ return response + def post_restore_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for restore_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_restore_table_with_metadata` + interceptor in new development instead of the `post_restore_table` interceptor. + When both interceptors are used, this `post_restore_table_with_metadata` interceptor runs after the + `post_restore_table` interceptor. The (possibly modified) response returned by + `post_restore_table` will be passed to + `post_restore_table_with_metadata`. + """ + return response, metadata + def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -789,17 +1271,43 @@ def pre_set_iam_policy( def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for set_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_set_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_set_iam_policy` interceptor runs + before the `post_set_iam_policy_with_metadata` interceptor. """ return response + def post_set_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_set_iam_policy_with_metadata` + interceptor in new development instead of the `post_set_iam_policy` interceptor. + When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the + `post_set_iam_policy` interceptor. The (possibly modified) response returned by + `post_set_iam_policy` will be passed to + `post_set_iam_policy_with_metadata`. + """ + return response, metadata + def pre_snapshot_table( self, request: bigtable_table_admin.SnapshotTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.SnapshotTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.SnapshotTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for snapshot_table Override in a subclass to manipulate the request or metadata @@ -812,17 +1320,43 @@ def post_snapshot_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for snapshot_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_snapshot_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_snapshot_table` interceptor runs + before the `post_snapshot_table_with_metadata` interceptor. """ return response + def post_snapshot_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for snapshot_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_snapshot_table_with_metadata` + interceptor in new development instead of the `post_snapshot_table` interceptor. + When both interceptors are used, this `post_snapshot_table_with_metadata` interceptor runs after the + `post_snapshot_table` interceptor. The (possibly modified) response returned by + `post_snapshot_table` will be passed to + `post_snapshot_table_with_metadata`. + """ + return response, metadata + def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -835,17 +1369,46 @@ def post_test_iam_permissions( ) -> iam_policy_pb2.TestIamPermissionsResponse: """Post-rpc interceptor for test_iam_permissions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_test_iam_permissions` interceptor runs + before the `post_test_iam_permissions_with_metadata` interceptor. """ return response + def post_test_iam_permissions_with_metadata( + self, + response: iam_policy_pb2.TestIamPermissionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_test_iam_permissions_with_metadata` + interceptor in new development instead of the `post_test_iam_permissions` interceptor. + When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the + `post_test_iam_permissions` interceptor. The (possibly modified) response returned by + `post_test_iam_permissions` will be passed to + `post_test_iam_permissions_with_metadata`. + """ + return response, metadata + def pre_undelete_table( self, request: bigtable_table_admin.UndeleteTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UndeleteTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UndeleteTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for undelete_table Override in a subclass to manipulate the request or metadata @@ -858,18 +1421,42 @@ def post_undelete_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for undelete_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_undelete_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_undelete_table` interceptor runs + before the `post_undelete_table_with_metadata` interceptor. """ return response + def post_undelete_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for undelete_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_undelete_table_with_metadata` + interceptor in new development instead of the `post_undelete_table` interceptor. + When both interceptors are used, this `post_undelete_table_with_metadata` interceptor runs after the + `post_undelete_table` interceptor. The (possibly modified) response returned by + `post_undelete_table` will be passed to + `post_undelete_table_with_metadata`. + """ + return response, metadata + def pre_update_authorized_view( self, request: bigtable_table_admin.UpdateAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.UpdateAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.UpdateAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_authorized_view @@ -883,17 +1470,43 @@ def post_update_authorized_view( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_authorized_view - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_authorized_view_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_authorized_view` interceptor runs + before the `post_update_authorized_view_with_metadata` interceptor. """ return response + def post_update_authorized_view_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_authorized_view + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_authorized_view_with_metadata` + interceptor in new development instead of the `post_update_authorized_view` interceptor. + When both interceptors are used, this `post_update_authorized_view_with_metadata` interceptor runs after the + `post_update_authorized_view` interceptor. The (possibly modified) response returned by + `post_update_authorized_view` will be passed to + `post_update_authorized_view_with_metadata`. + """ + return response, metadata + def pre_update_backup( self, request: bigtable_table_admin.UpdateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UpdateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for update_backup Override in a subclass to manipulate the request or metadata @@ -904,17 +1517,40 @@ def pre_update_backup( def post_update_backup(self, response: table.Backup) -> table.Backup: """Post-rpc interceptor for update_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_backup` interceptor runs + before the `post_update_backup_with_metadata` interceptor. """ return response + def post_update_backup_with_metadata( + self, response: table.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[table.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_backup_with_metadata` + interceptor in new development instead of the `post_update_backup` interceptor. + When both interceptors are used, this `post_update_backup_with_metadata` interceptor runs after the + `post_update_backup` interceptor. The (possibly modified) response returned by + `post_update_backup` will be passed to + `post_update_backup_with_metadata`. + """ + return response, metadata + def pre_update_table( self, request: bigtable_table_admin.UpdateTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for update_table Override in a subclass to manipulate the request or metadata @@ -927,12 +1563,35 @@ def post_update_table( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_table - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_table_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the BigtableTableAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_table` interceptor runs + before the `post_update_table_with_metadata` interceptor. """ return response + def post_update_table_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_table + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_table_with_metadata` + interceptor in new development instead of the `post_update_table` interceptor. + When both interceptors are used, this `post_update_table_with_metadata` interceptor runs after the + `post_update_table` interceptor. The (possibly modified) response returned by + `post_update_table` will be passed to + `post_update_table_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class BigtableTableAdminRestStub: @@ -1113,7 +1772,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Call the check consistency method over HTTP. @@ -1124,8 +1783,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.CheckConsistencyResponse: @@ -1137,6 +1798,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_http_options() ) + request, metadata = self._interceptor.pre_check_consistency( request, metadata ) @@ -1153,6 +1815,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CheckConsistency", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CheckConsistency", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CheckConsistency._get_response( self._host, @@ -1174,7 +1863,35 @@ def __call__( pb_resp = bigtable_table_admin.CheckConsistencyResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_consistency(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_check_consistency_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.CheckConsistencyResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.check_consistency", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CheckConsistency", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CopyBackup( @@ -1212,7 +1929,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the copy backup method over HTTP. @@ -1223,8 +1940,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1237,6 +1956,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_http_options() ) + request, metadata = self._interceptor.pre_copy_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_transcoded_request( http_options, request @@ -1251,6 +1971,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CopyBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CopyBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CopyBackup._get_response( self._host, @@ -1270,7 +2017,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_copy_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_copy_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.copy_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CopyBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateAuthorizedView( @@ -1309,7 +2082,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create authorized view method over HTTP. @@ -1320,8 +2093,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1334,6 +2109,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_create_authorized_view( request, metadata ) @@ -1350,6 +2126,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._CreateAuthorizedView._get_response( @@ -1371,7 +2174,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_authorized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_authorized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateBackup( @@ -1410,7 +2239,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create backup method over HTTP. @@ -1421,8 +2250,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1435,6 +2266,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_http_options() ) + request, metadata = self._interceptor.pre_create_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_transcoded_request( http_options, request @@ -1449,6 +2281,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CreateBackup._get_response( self._host, @@ -1468,7 +2327,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateTable( @@ -1507,7 +2392,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Call the create table method over HTTP. @@ -1518,8 +2403,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gba_table.Table: @@ -1533,6 +2420,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options() ) + request, metadata = self._interceptor.pre_create_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request( http_options, request @@ -1547,6 +2435,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CreateTable._get_response( self._host, @@ -1568,7 +2483,33 @@ def __call__( pb_resp = gba_table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gba_table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateTableFromSnapshot( @@ -1607,7 +2548,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create table from snapshot method over HTTP. @@ -1626,8 +2567,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1640,6 +2583,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_http_options() ) + request, metadata = self._interceptor.pre_create_table_from_snapshot( request, metadata ) @@ -1656,6 +2600,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTableFromSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTableFromSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._CreateTableFromSnapshot._get_response( @@ -1677,7 +2648,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table_from_snapshot(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_table_from_snapshot_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table_from_snapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTableFromSnapshot", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteAuthorizedView( @@ -1715,7 +2712,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete authorized view method over HTTP. @@ -1726,13 +2723,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_delete_authorized_view( request, metadata ) @@ -1745,6 +2745,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._DeleteAuthorizedView._get_response( @@ -1797,7 +2824,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete backup method over HTTP. @@ -1808,13 +2835,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_http_options() ) + request, metadata = self._interceptor.pre_delete_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_transcoded_request( http_options, request @@ -1825,6 +2855,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DeleteBackup._get_response( self._host, @@ -1875,7 +2932,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete snapshot method over HTTP. @@ -1893,13 +2950,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_http_options() ) + request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_transcoded_request( http_options, request @@ -1910,6 +2970,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DeleteSnapshot._get_response( self._host, @@ -1960,7 +3047,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete table method over HTTP. @@ -1971,13 +3058,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_http_options() ) + request, metadata = self._interceptor.pre_delete_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_transcoded_request( http_options, request @@ -1988,6 +3078,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DeleteTable._get_response( self._host, @@ -2039,7 +3156,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the drop row range method over HTTP. @@ -2050,13 +3167,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_http_options() ) + request, metadata = self._interceptor.pre_drop_row_range(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_transcoded_request( http_options, request @@ -2071,6 +3191,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DropRowRange", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DropRowRange", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DropRowRange._get_response( self._host, @@ -2123,7 +3270,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Call the generate consistency token method over HTTP. @@ -2135,8 +3282,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.GenerateConsistencyTokenResponse: @@ -2148,6 +3297,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_http_options() ) + request, metadata = self._interceptor.pre_generate_consistency_token( request, metadata ) @@ -2164,6 +3314,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GenerateConsistencyToken", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GenerateConsistencyToken", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._GenerateConsistencyToken._get_response( @@ -2187,7 +3364,37 @@ def __call__( pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_consistency_token(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_generate_consistency_token_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.generate_consistency_token", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GenerateConsistencyToken", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetAuthorizedView( @@ -2225,7 +3432,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.AuthorizedView: r"""Call the get authorized view method over HTTP. @@ -2236,8 +3443,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.AuthorizedView: @@ -2253,6 +3462,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_get_authorized_view( request, metadata ) @@ -2265,6 +3475,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetAuthorizedView._get_response( self._host, @@ -2285,7 +3522,33 @@ def __call__( pb_resp = table.AuthorizedView.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_authorized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_authorized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.AuthorizedView.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetBackup( @@ -2322,7 +3585,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Call the get backup method over HTTP. @@ -2333,8 +3596,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Backup: @@ -2344,6 +3609,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_http_options() ) + request, metadata = self._interceptor.pre_get_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_transcoded_request( http_options, request @@ -2354,6 +3620,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetBackup._get_response( self._host, @@ -2374,7 +3667,33 @@ def __call__( pb_resp = table.Backup.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetIamPolicy( @@ -2413,7 +3732,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -2423,8 +3742,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2509,6 +3830,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( http_options, request @@ -2523,6 +3845,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetIamPolicy._get_response( self._host, @@ -2544,7 +3893,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetSnapshot( @@ -2582,7 +3957,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Snapshot: r"""Call the get snapshot method over HTTP. @@ -2600,8 +3975,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Snapshot: @@ -2624,6 +4001,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_http_options() ) + request, metadata = self._interceptor.pre_get_snapshot(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_transcoded_request( http_options, request @@ -2634,6 +4012,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetSnapshot._get_response( self._host, @@ -2654,7 +4059,33 @@ def __call__( pb_resp = table.Snapshot.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_snapshot(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_snapshot_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Snapshot.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_snapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSnapshot", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetTable( @@ -2691,7 +4122,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Call the get table method over HTTP. @@ -2702,8 +4133,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Table: @@ -2717,6 +4150,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetTable._get_http_options() ) + request, metadata = self._interceptor.pre_get_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_transcoded_request( http_options, request @@ -2727,6 +4161,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetTable._get_response( self._host, @@ -2747,7 +4208,33 @@ def __call__( pb_resp = table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListAuthorizedViews( @@ -2785,7 +4272,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListAuthorizedViewsResponse: r"""Call the list authorized views method over HTTP. @@ -2796,8 +4283,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListAuthorizedViewsResponse: @@ -2809,6 +4298,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options() ) + request, metadata = self._interceptor.pre_list_authorized_views( request, metadata ) @@ -2821,6 +4311,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListAuthorizedViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._ListAuthorizedViews._get_response( @@ -2843,7 +4360,37 @@ def __call__( pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_authorized_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_authorized_views_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_authorized_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListBackups( @@ -2881,7 +4428,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListBackupsResponse: r"""Call the list backups method over HTTP. @@ -2892,8 +4439,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListBackupsResponse: @@ -2905,6 +4454,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options() ) + request, metadata = self._interceptor.pre_list_backups(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request( http_options, request @@ -2915,6 +4465,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListBackups", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListBackups", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._ListBackups._get_response( self._host, @@ -2935,7 +4512,35 @@ def __call__( pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_backups(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_backups_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable_table_admin.ListBackupsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_backups", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListBackups", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListSnapshots( @@ -2973,7 +4578,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListSnapshotsResponse: r"""Call the list snapshots method over HTTP. @@ -2991,8 +4596,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListSnapshotsResponse: @@ -3011,6 +4618,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_http_options() ) + request, metadata = self._interceptor.pre_list_snapshots(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_transcoded_request( http_options, request @@ -3021,6 +4629,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListSnapshots", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSnapshots", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._ListSnapshots._get_response( self._host, @@ -3041,7 +4676,35 @@ def __call__( pb_resp = bigtable_table_admin.ListSnapshotsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_snapshots(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_snapshots_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListSnapshotsResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_snapshots", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSnapshots", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListTables( @@ -3078,7 +4741,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListTablesResponse: r"""Call the list tables method over HTTP. @@ -3089,8 +4752,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListTablesResponse: @@ -3102,6 +4767,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListTables._get_http_options() ) + request, metadata = self._interceptor.pre_list_tables(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListTables._get_transcoded_request( http_options, request @@ -3112,6 +4778,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListTables", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListTables", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._ListTables._get_response( self._host, @@ -3132,7 +4825,35 @@ def __call__( pb_resp = bigtable_table_admin.ListTablesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_tables(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_tables_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable_table_admin.ListTablesResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_tables", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListTables", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ModifyColumnFamilies( @@ -3171,7 +4892,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Call the modify column families method over HTTP. @@ -3182,8 +4903,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Table: @@ -3197,6 +4920,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_http_options() ) + request, metadata = self._interceptor.pre_modify_column_families( request, metadata ) @@ -3213,6 +4937,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ModifyColumnFamilies", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ModifyColumnFamilies", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._ModifyColumnFamilies._get_response( @@ -3236,7 +4987,33 @@ def __call__( pb_resp = table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_modify_column_families(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_modify_column_families_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.modify_column_families", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ModifyColumnFamilies", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _RestoreTable( @@ -3275,7 +5052,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the restore table method over HTTP. @@ -3286,8 +5063,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3300,6 +5079,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_http_options() ) + request, metadata = self._interceptor.pre_restore_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_transcoded_request( http_options, request @@ -3314,6 +5094,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.RestoreTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "RestoreTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._RestoreTable._get_response( self._host, @@ -3333,7 +5140,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_restore_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_restore_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.restore_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "RestoreTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SetIamPolicy( @@ -3372,7 +5205,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -3382,8 +5215,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -3468,6 +5303,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( http_options, request @@ -3482,6 +5318,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._SetIamPolicy._get_response( self._host, @@ -3503,7 +5366,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_set_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.set_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SnapshotTable( @@ -3542,7 +5431,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the snapshot table method over HTTP. @@ -3560,8 +5449,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3574,6 +5465,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_http_options() ) + request, metadata = self._interceptor.pre_snapshot_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_transcoded_request( http_options, request @@ -3588,6 +5480,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SnapshotTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SnapshotTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._SnapshotTable._get_response( self._host, @@ -3607,7 +5526,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_snapshot_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_snapshot_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.snapshot_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SnapshotTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _TestIamPermissions( @@ -3646,7 +5591,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Call the test iam permissions method over HTTP. @@ -3656,8 +5601,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: @@ -3667,6 +5614,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_http_options() ) + request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) @@ -3683,6 +5631,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._TestIamPermissions._get_response( @@ -3706,7 +5681,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UndeleteTable( @@ -3745,7 +5746,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the undelete table method over HTTP. @@ -3756,8 +5757,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3770,6 +5773,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_http_options() ) + request, metadata = self._interceptor.pre_undelete_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_transcoded_request( http_options, request @@ -3784,6 +5788,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UndeleteTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UndeleteTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._UndeleteTable._get_response( self._host, @@ -3803,7 +5834,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undelete_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_undelete_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.undelete_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UndeleteTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateAuthorizedView( @@ -3842,7 +5899,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update authorized view method over HTTP. @@ -3853,8 +5910,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3867,6 +5926,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_update_authorized_view( request, metadata ) @@ -3883,6 +5943,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._UpdateAuthorizedView._get_response( @@ -3904,7 +5991,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_authorized_view(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_authorized_view_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateBackup( @@ -3943,7 +6056,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Call the update backup method over HTTP. @@ -3954,8 +6067,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Backup: @@ -3965,6 +6080,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_http_options() ) + request, metadata = self._interceptor.pre_update_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_transcoded_request( http_options, request @@ -3979,6 +6095,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._UpdateBackup._get_response( self._host, @@ -4000,7 +6143,33 @@ def __call__( pb_resp = table.Backup.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateTable( @@ -4039,7 +6208,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update table method over HTTP. @@ -4050,8 +6219,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -4064,6 +6235,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_http_options() ) + request, metadata = self._interceptor.pre_update_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_transcoded_request( http_options, request @@ -4078,6 +6250,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._UpdateTable._get_response( self._host, @@ -4097,7 +6296,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 3ff9075d21a7..817bbc89adbf 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -19,12 +19,20 @@ CreateClusterRequest, CreateInstanceMetadata, CreateInstanceRequest, + CreateLogicalViewMetadata, + CreateLogicalViewRequest, + CreateMaterializedViewMetadata, + CreateMaterializedViewRequest, DeleteAppProfileRequest, DeleteClusterRequest, DeleteInstanceRequest, + DeleteLogicalViewRequest, + DeleteMaterializedViewRequest, GetAppProfileRequest, GetClusterRequest, GetInstanceRequest, + GetLogicalViewRequest, + GetMaterializedViewRequest, ListAppProfilesRequest, ListAppProfilesResponse, ListClustersRequest, @@ -33,6 +41,10 @@ ListHotTabletsResponse, ListInstancesRequest, ListInstancesResponse, + ListLogicalViewsRequest, + ListLogicalViewsResponse, + ListMaterializedViewsRequest, + ListMaterializedViewsResponse, PartialUpdateClusterMetadata, PartialUpdateClusterRequest, PartialUpdateInstanceRequest, @@ -40,6 +52,10 @@ UpdateAppProfileRequest, UpdateClusterMetadata, UpdateInstanceMetadata, + UpdateLogicalViewMetadata, + UpdateLogicalViewRequest, + UpdateMaterializedViewMetadata, + UpdateMaterializedViewRequest, ) from .bigtable_table_admin import ( CheckConsistencyRequest, @@ -99,6 +115,8 @@ Cluster, HotTablet, Instance, + LogicalView, + MaterializedView, ) from .table import ( AuthorizedView, @@ -123,12 +141,20 @@ "CreateClusterRequest", "CreateInstanceMetadata", "CreateInstanceRequest", + "CreateLogicalViewMetadata", + "CreateLogicalViewRequest", + "CreateMaterializedViewMetadata", + "CreateMaterializedViewRequest", "DeleteAppProfileRequest", "DeleteClusterRequest", "DeleteInstanceRequest", + "DeleteLogicalViewRequest", + "DeleteMaterializedViewRequest", "GetAppProfileRequest", "GetClusterRequest", "GetInstanceRequest", + "GetLogicalViewRequest", + "GetMaterializedViewRequest", "ListAppProfilesRequest", "ListAppProfilesResponse", "ListClustersRequest", @@ -137,6 +163,10 @@ "ListHotTabletsResponse", "ListInstancesRequest", "ListInstancesResponse", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", "PartialUpdateClusterMetadata", "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", @@ -144,6 +174,10 @@ "UpdateAppProfileRequest", "UpdateClusterMetadata", "UpdateInstanceMetadata", + "UpdateLogicalViewMetadata", + "UpdateLogicalViewRequest", + "UpdateMaterializedViewMetadata", + "UpdateMaterializedViewRequest", "CheckConsistencyRequest", "CheckConsistencyResponse", "CopyBackupMetadata", @@ -197,6 +231,8 @@ "Cluster", "HotTablet", "Instance", + "LogicalView", + "MaterializedView", "AuthorizedView", "Backup", "BackupInfo", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 4e5ddfd6efa0..5bed1c4f782d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -53,6 +53,22 @@ "UpdateAppProfileMetadata", "ListHotTabletsRequest", "ListHotTabletsResponse", + "CreateLogicalViewRequest", + "CreateLogicalViewMetadata", + "GetLogicalViewRequest", + "ListLogicalViewsRequest", + "ListLogicalViewsResponse", + "UpdateLogicalViewRequest", + "UpdateLogicalViewMetadata", + "DeleteLogicalViewRequest", + "CreateMaterializedViewRequest", + "CreateMaterializedViewMetadata", + "GetMaterializedViewRequest", + "ListMaterializedViewsRequest", + "ListMaterializedViewsResponse", + "UpdateMaterializedViewRequest", + "UpdateMaterializedViewMetadata", + "DeleteMaterializedViewRequest", }, ) @@ -77,8 +93,7 @@ class CreateInstanceRequest(proto.Message): mapped by desired cluster ID, e.g., just ``mycluster`` rather than ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. Currently, - at most four clusters can be specified. + Fields marked ``OutputOnly`` must be left blank. """ parent: str = proto.Field( @@ -888,4 +903,462 @@ def raw_page(self): ) +class CreateLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateLogicalView. + + Attributes: + parent (str): + Required. The parent instance where this logical view will + be created. Format: + ``projects/{project}/instances/{instance}``. + logical_view_id (str): + Required. The ID to use for the logical view, + which will become the final component of the + logical view's resource name. + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + logical_view_id: str = proto.Field( + proto.STRING, + number=2, + ) + logical_view: gba_instance.LogicalView = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.LogicalView, + ) + + +class CreateLogicalViewMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateLogicalView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest): + The request that prompted the initiation of + this CreateLogicalView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "CreateLogicalViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="CreateLogicalViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class GetLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetLogicalView. + + Attributes: + name (str): + Required. The unique name of the requested logical view. + Values are of the form + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListLogicalViewsRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListLogicalViews. + + Attributes: + parent (str): + Required. The unique name of the instance for which the list + of logical views is requested. Values are of the form + ``projects/{project}/instances/{instance}``. + page_size (int): + Optional. The maximum number of logical views + to return. The service may return fewer than + this value + page_token (str): + Optional. A page token, received from a previous + ``ListLogicalViews`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``ListLogicalViews`` must match the call that provided the + page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListLogicalViewsResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListLogicalViews. + + Attributes: + logical_views (MutableSequence[google.cloud.bigtable_admin_v2.types.LogicalView]): + The list of requested logical views. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + """ + + @property + def raw_page(self): + return self + + logical_views: MutableSequence[gba_instance.LogicalView] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.LogicalView, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.UpdateLogicalView. + + Attributes: + logical_view (google.cloud.bigtable_admin_v2.types.LogicalView): + Required. The logical view to update. + + The logical view's ``name`` field is used to identify the + view to update. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. + """ + + logical_view: gba_instance.LogicalView = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.LogicalView, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateLogicalViewMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateLogicalView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest): + The request that prompted the initiation of + this UpdateLogicalView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "UpdateLogicalViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="UpdateLogicalViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteLogicalViewRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteLogicalView. + + Attributes: + name (str): + Required. The unique name of the logical view to be deleted. + Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}``. + etag (str): + Optional. The current etag of the logical + view. If an etag is provided and does not match + the current etag of the logical view, deletion + will be blocked and an ABORTED error will be + returned. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CreateMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.CreateMaterializedView. + + Attributes: + parent (str): + Required. The parent instance where this materialized view + will be created. Format: + ``projects/{project}/instances/{instance}``. + materialized_view_id (str): + Required. The ID to use for the materialized + view, which will become the final component of + the materialized view's resource name. + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + materialized_view_id: str = proto.Field( + proto.STRING, + number=2, + ) + materialized_view: gba_instance.MaterializedView = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.MaterializedView, + ) + + +class CreateMaterializedViewMetadata(proto.Message): + r"""The metadata for the Operation returned by + CreateMaterializedView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest): + The request that prompted the initiation of + this CreateMaterializedView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "CreateMaterializedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="CreateMaterializedViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class GetMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.GetMaterializedView. + + Attributes: + name (str): + Required. The unique name of the requested materialized + view. Values are of the form + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListMaterializedViewsRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.ListMaterializedViews. + + Attributes: + parent (str): + Required. The unique name of the instance for which the list + of materialized views is requested. Values are of the form + ``projects/{project}/instances/{instance}``. + page_size (int): + Optional. The maximum number of materialized + views to return. The service may return fewer + than this value + page_token (str): + Optional. A page token, received from a previous + ``ListMaterializedViews`` call. Provide this to retrieve the + subsequent page. + + When paginating, all other parameters provided to + ``ListMaterializedViews`` must match the call that provided + the page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListMaterializedViewsResponse(proto.Message): + r"""Response message for + BigtableInstanceAdmin.ListMaterializedViews. + + Attributes: + materialized_views (MutableSequence[google.cloud.bigtable_admin_v2.types.MaterializedView]): + The list of requested materialized views. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + """ + + @property + def raw_page(self): + return self + + materialized_views: MutableSequence[ + gba_instance.MaterializedView + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.MaterializedView, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdateMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.UpdateMaterializedView. + + Attributes: + materialized_view (google.cloud.bigtable_admin_v2.types.MaterializedView): + Required. The materialized view to update. + + The materialized view's ``name`` field is used to identify + the view to update. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. + """ + + materialized_view: gba_instance.MaterializedView = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.MaterializedView, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateMaterializedViewMetadata(proto.Message): + r"""The metadata for the Operation returned by + UpdateMaterializedView. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest): + The request that prompted the initiation of + this UpdateMaterializedView operation. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation was started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + original_request: "UpdateMaterializedViewRequest" = proto.Field( + proto.MESSAGE, + number=1, + message="UpdateMaterializedViewRequest", + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteMaterializedViewRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.DeleteMaterializedView. + + Attributes: + name (str): + Required. The unique name of the materialized view to be + deleted. Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}``. + etag (str): + Optional. The current etag of the + materialized view. If an etag is provided and + does not match the current etag of the + materialized view, deletion will be blocked and + an ABORTED error will be returned. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 9d1bf3ef5774..ab8273a0a84c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -480,9 +480,13 @@ class UpdateTableRequest(proto.Message): - ``change_stream_config`` - ``change_stream_config.retention_period`` - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. + ignore_warnings (bool): + Optional. If true, ignore safety checks when + updating the table. """ table: gba_table.Table = proto.Field( @@ -495,6 +499,10 @@ class UpdateTableRequest(proto.Message): number=2, message=field_mask_pb2.FieldMask, ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) class UpdateTableMetadata(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 34b52acd2350..19a17c698dc7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -32,6 +32,8 @@ "Cluster", "AppProfile", "HotTablet", + "LogicalView", + "MaterializedView", }, ) @@ -55,7 +57,8 @@ class Instance(proto.Message): any time, but should be kept globally unique to avoid confusion. state (google.cloud.bigtable_admin_v2.types.Instance.State): - (``OutputOnly``) The current state of the instance. + Output only. The current state of the + instance. type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): The type of the instance. Defaults to ``PRODUCTION``. labels (MutableMapping[str, str]): @@ -74,14 +77,18 @@ class Instance(proto.Message): resource. - Keys and values must both be under 128 bytes. create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. A server-assigned timestamp representing when - this Instance was created. For instances created before this + Output only. A commit timestamp representing when this + Instance was created. For instances created before this field was added (August 2021), this value is ``seconds: 0, nanos: 1``. satisfies_pzs (bool): Output only. Reserved for future use. This field is a member of `oneof`_ ``_satisfies_pzs``. + satisfies_pzi (bool): + Output only. Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzi``. """ class State(proto.Enum): @@ -157,6 +164,11 @@ class Type(proto.Enum): number=8, optional=True, ) + satisfies_pzi: bool = proto.Field( + proto.BOOL, + number=11, + optional=True, + ) class AutoscalingTargets(proto.Message): @@ -234,9 +246,10 @@ class Cluster(proto.Message): Output only. The current state of the cluster. serve_nodes (int): - The number of nodes allocated to this - cluster. More nodes enable higher throughput and - more consistent performance. + The number of nodes in the cluster. If no + value is set, Cloud Bigtable automatically + allocates nodes based on your data footprint and + optimized for 50% storage utilization. node_scaling_factor (google.cloud.bigtable_admin_v2.types.Cluster.NodeScalingFactor): Immutable. The node scaling factor of this cluster. @@ -361,9 +374,8 @@ class EncryptionConfig(proto.Message): ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK key. 2) Only regional keys can be used and the region of the CMEK - key must match the region of the cluster. - 3) All clusters within an instance must use the same CMEK - key. Values are of the form + key must match the region of the cluster. Values are of + the form ``projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`` """ @@ -583,17 +595,10 @@ class StandardIsolation(proto.Message): class DataBoostIsolationReadOnly(proto.Message): r"""Data Boost is a serverless compute capability that lets you - run high-throughput read jobs on your Bigtable data, without - impacting the performance of the clusters that handle your - application traffic. Currently, Data Boost exclusively supports - read-only use-cases with single-cluster routing. - - Data Boost reads are only guaranteed to see the results of - writes that were written at least 30 minutes ago. This means - newly written values may not become visible for up to 30m, and - also means that old values may remain visible for up to 30m - after being deleted or overwritten. To mitigate the staleness of - the data, users may either wait 30m, or use CheckConsistency. + run high-throughput read jobs and queries on your Bigtable data, + without impacting the performance of the clusters that handle + your application traffic. Data Boost supports read-only use + cases with single-cluster routing. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -738,4 +743,77 @@ class HotTablet(proto.Message): ) +class LogicalView(proto.Message): + r"""A SQL logical view object that can be referenced in SQL + queries. + + Attributes: + name (str): + Identifier. The unique name of the logical view. Format: + ``projects/{project}/instances/{instance}/logicalViews/{logical_view}`` + query (str): + Required. The logical view's select query. + etag (str): + Optional. The etag for this logical view. + This may be sent on update requests to ensure + that the client has an up-to-date value before + proceeding. The server returns an ABORTED error + on a mismatched etag. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + query: str = proto.Field( + proto.STRING, + number=2, + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + + +class MaterializedView(proto.Message): + r"""A materialized view object that can be referenced in SQL + queries. + + Attributes: + name (str): + Identifier. The unique name of the materialized view. + Format: + ``projects/{project}/instances/{instance}/materializedViews/{materialized_view}`` + query (str): + Required. Immutable. The materialized view's + select query. + etag (str): + Optional. The etag for this materialized + view. This may be sent on update requests to + ensure that the client has an up-to-date value + before proceeding. The server returns an ABORTED + error on a mismatched etag. + deletion_protection (bool): + Set to true to make the MaterializedView + protected against deletion. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + query: str = proto.Field( + proto.STRING, + number=2, + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + deletion_protection: bool = proto.Field( + proto.BOOL, + number=6, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 241d7853c13d..6dcf7b4a885a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -163,6 +163,59 @@ class Table(proto.Message): disabled. This field is a member of `oneof`_ ``automated_backup_config``. + row_key_schema (google.cloud.bigtable_admin_v2.types.Type.Struct): + The row key schema for this table. The schema is used to + decode the raw row key bytes into a structured format. The + order of field declarations in this schema is important, as + it reflects how the raw row key bytes are structured. + Currently, this only affects how the key is read via a + GoogleSQL query from the ExecuteQuery API. + + For a SQL query, the \_key column is still read as raw + bytes. But queries can reference the key fields by name, + which will be decoded from \_key using provided type and + encoding. Queries that reference key fields will fail if + they encounter an invalid row key. + + For example, if \_key = + "some_id#2024-04-30#\x00\x13\x00\xf3" with the following + schema: { fields { field_name: "id" type { string { + encoding: utf8_bytes {} } } } fields { field_name: "date" + type { string { encoding: utf8_bytes {} } } } fields { + field_name: "product_code" type { int64 { encoding: + big_endian_bytes {} } } } encoding { delimited_bytes { + delimiter: "#" } } } + + | The decoded key parts would be: id = "some_id", date = + "2024-04-30", product_code = 1245427 The query "SELECT + \_key, product_code FROM table" will return two columns: + /------------------------------------------------------ + | \| \_key \| product_code \| \| + --------------------------------------|--------------\| \| + "some_id#2024-04-30#\x00\x13\x00\xf3" \| 1245427 \| + ------------------------------------------------------/ + + The schema has the following invariants: (1) The decoded + field values are order-preserved. For read, the field values + will be decoded in sorted mode from the raw bytes. (2) Every + field in the schema must specify a non-empty name. (3) Every + field must specify a type with an associated encoding. The + type is limited to scalar types only: Array, Map, Aggregate, + and Struct are not allowed. (4) The field names must not + collide with existing column family names and reserved + keywords "_key" and "_timestamp". + + The following update operations are allowed for + row_key_schema: + + - Update from an empty schema to a new schema. + - Remove the existing schema. This operation requires + setting the ``ignore_warnings`` flag to ``true``, since + it might be a backward incompatible change. Without the + flag, the update request will fail with an + INVALID_ARGUMENT error. Any other row key schema update + operation (e.g. update existing schema columns names or + types) is currently unsupported. """ class TimestampGranularity(proto.Enum): @@ -343,6 +396,11 @@ class AutomatedBackupPolicy(proto.Message): oneof="automated_backup_config", message=AutomatedBackupPolicy, ) + row_key_schema: types.Type.Struct = proto.Field( + proto.MESSAGE, + number=15, + message=types.Type.Struct, + ) class AuthorizedView(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py index 7d1d9903470c..ec5744156746 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py @@ -35,34 +35,27 @@ class Type(proto.Message): features. For compatibility with Bigtable's existing untyped APIs, each - ``Type`` includes an ``Encoding`` which describes how to convert - to/from the underlying data. - - Each encoding also defines the following properties: - - - Order-preserving: Does the encoded value sort consistently with - the original typed value? Note that Bigtable will always sort - data based on the raw encoded value, *not* the decoded type. - - - Example: BYTES values sort in the same order as their raw - encodings. - - Counterexample: Encoding INT64 as a fixed-width decimal string - does *not* preserve sort order when dealing with negative - numbers. ``INT64(1) > INT64(-1)``, but - ``STRING("-00001") > STRING("00001)``. - - - Self-delimiting: If we concatenate two encoded values, can we - always tell where the first one ends and the second one begins? - - - Example: If we encode INT64s to fixed-width STRINGs, the first - value will always contain exactly N digits, possibly preceded - by a sign. - - Counterexample: If we concatenate two UTF-8 encoded STRINGs, - we have no way to tell where the first one ends. - - - Compatibility: Which other systems have matching encoding - schemes? For example, does this encoding have a GoogleSQL - equivalent? HBase? Java? + ``Type`` includes an ``Encoding`` which describes how to convert to + or from the underlying data. + + Each encoding can operate in one of two modes: + + - Sorted: In this mode, Bigtable guarantees that + ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is + useful anywhere sort order is important, for example when + encoding keys. + - Distinct: In this mode, Bigtable guarantees that if ``X != Y`` + then ``Encode(X) != Encode(Y)``. However, the converse is not + guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and + "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON + value. + + The API clearly documents which mode is used wherever an encoding + can be configured. Each encoding also documents which values are + supported in which modes. For example, when encoding INT64 as a + numeric STRING, negative numbers cannot be encoded in sorted mode. + This is because ``INT64(1) > INT64(-1)``, but + ``STRING("-00001") > STRING("00001")``. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -127,12 +120,12 @@ class Bytes(proto.Message): Attributes: encoding (google.cloud.bigtable_admin_v2.types.Type.Bytes.Encoding): - The encoding to use when converting to/from - lower level types. + The encoding to use when converting to or + from lower level types. """ class Encoding(proto.Message): - r"""Rules used to convert to/from lower level types. + r"""Rules used to convert to or from lower level types. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -144,11 +137,11 @@ class Encoding(proto.Message): """ class Raw(proto.Message): - r"""Leaves the value "as-is" + r"""Leaves the value as-is. + + Sorted mode: all values are supported. - - Order-preserving? Yes - - Self-delimiting? No - - Compatibility? N/A + Distinct mode: all values are supported. """ @@ -171,12 +164,12 @@ class String(proto.Message): Attributes: encoding (google.cloud.bigtable_admin_v2.types.Type.String.Encoding): - The encoding to use when converting to/from - lower level types. + The encoding to use when converting to or + from lower level types. """ class Encoding(proto.Message): - r"""Rules used to convert to/from lower level types. + r"""Rules used to convert to or from lower level types. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -200,15 +193,20 @@ class Utf8Raw(proto.Message): r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" class Utf8Bytes(proto.Message): - r"""UTF-8 encoding + r"""UTF-8 encoding. - - Order-preserving? Yes (code point order) - - Self-delimiting? No - - Compatibility? + Sorted mode: - - BigQuery Federation ``TEXT`` encoding - - HBase ``Bytes.toBytes`` - - Java ``String#getBytes(StandardCharsets.UTF_8)`` + - All values are supported. + - Code point order is preserved. + + Distinct mode: all values are supported. + + Compatible with: + + - BigQuery ``TEXT`` encoding + - HBase ``Bytes.toBytes`` + - Java ``String#getBytes(StandardCharsets.UTF_8)`` """ @@ -236,12 +234,17 @@ class Int64(proto.Message): Attributes: encoding (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding): - The encoding to use when converting to/from - lower level types. + The encoding to use when converting to or + from lower level types. """ class Encoding(proto.Message): - r"""Rules used to convert to/from lower level types. + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -249,20 +252,25 @@ class Encoding(proto.Message): big_endian_bytes (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding.BigEndianBytes): Use ``BigEndianBytes`` encoding. + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding.OrderedCodeBytes): + Use ``OrderedCodeBytes`` encoding. + This field is a member of `oneof`_ ``encoding``. """ class BigEndianBytes(proto.Message): - r"""Encodes the value as an 8-byte big endian twos complement ``Bytes`` - value. + r"""Encodes the value as an 8-byte big-endian two's complement value. + + Sorted mode: non-negative values are supported. - - Order-preserving? No (positive values only) - - Self-delimiting? Yes - - Compatibility? + Distinct mode: all values are supported. - - BigQuery Federation ``BINARY`` encoding - - HBase ``Bytes.toBytes`` - - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` + Compatible with: + + - BigQuery ``BINARY`` encoding + - HBase ``Bytes.toBytes`` + - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` Attributes: bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): @@ -275,12 +283,28 @@ class BigEndianBytes(proto.Message): message="Type.Bytes", ) + class OrderedCodeBytes(proto.Message): + r"""Encodes the value in a variable length binary format of up to + 10 bytes. Values that are closer to zero use fewer bytes. + + Sorted mode: all values are supported. + + Distinct mode: all values are supported. + + """ + big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field( proto.MESSAGE, number=1, oneof="encoding", message="Type.Int64.Encoding.BigEndianBytes", ) + ordered_code_bytes: "Type.Int64.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Int64.Encoding.OrderedCodeBytes", + ) encoding: "Type.Int64.Encoding" = proto.Field( proto.MESSAGE, @@ -307,8 +331,43 @@ class Timestamp(proto.Message): r"""Timestamp Values of type ``Timestamp`` are stored in ``Value.timestamp_value``. + Attributes: + encoding (google.cloud.bigtable_admin_v2.types.Type.Timestamp.Encoding): + The encoding to use when converting to or + from lower level types. """ + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + unix_micros_int64 (google.cloud.bigtable_admin_v2.types.Type.Int64.Encoding): + Encodes the number of microseconds since the Unix epoch + using the given ``Int64`` encoding. Values must be + microsecond-aligned. + + Compatible with: + + - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS`` + + This field is a member of `oneof`_ ``encoding``. + """ + + unix_micros_int64: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding", + ) + + encoding: "Type.Timestamp.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Timestamp.Encoding", + ) + class Date(proto.Message): r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" @@ -322,6 +381,9 @@ class Struct(proto.Message): fields (MutableSequence[google.cloud.bigtable_admin_v2.types.Type.Struct.Field]): The names and types of the fields in this struct. + encoding (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding): + The encoding to use when converting to or + from lower level types. """ class Field(proto.Message): @@ -345,11 +407,146 @@ class Field(proto.Message): message="Type", ) + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + singleton (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.Singleton): + Use ``Singleton`` encoding. + + This field is a member of `oneof`_ ``encoding``. + delimited_bytes (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.DelimitedBytes): + Use ``DelimitedBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_admin_v2.types.Type.Struct.Encoding.OrderedCodeBytes): + User ``OrderedCodeBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Singleton(proto.Message): + r"""Uses the encoding of ``fields[0].type`` as-is. Only valid if + ``fields.size == 1``. + + """ + + class DelimitedBytes(proto.Message): + r"""Fields are encoded independently and concatenated with a + configurable ``delimiter`` in between. + + A struct with no fields defined is encoded as a single + ``delimiter``. + + Sorted mode: + + - Fields are encoded in sorted mode. + - Encoded field values must not contain any bytes <= + ``delimiter[0]`` + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - Encoded field values must not contain ``delimiter[0]``. + + Attributes: + delimiter (bytes): + Byte sequence used to delimit concatenated + fields. The delimiter must contain at least 1 + character and at most 50 characters. + """ + + delimiter: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + class OrderedCodeBytes(proto.Message): + r"""Fields are encoded independently and concatenated with the fixed + byte pair {0x00, 0x01} in between. + + Any null (0x00) byte in an encoded field is replaced by the fixed + byte pair {0x00, 0xFF}. + + Fields that encode to the empty string "" have special handling: + + - If *every* field encodes to "", or if the STRUCT has no fields + defined, then the STRUCT is encoded as the fixed byte pair {0x00, + 0x00}. + - Otherwise, the STRUCT only encodes until the last non-empty + field, omitting any trailing empty fields. Any empty fields that + aren't omitted are replaced with the fixed byte pair {0x00, + 0x00}. + + Examples: + + - STRUCT() -> "\00\00" + - STRUCT("") -> "\00\00" + - STRUCT("", "") -> "\00\00" + - STRUCT("", "B") -> "\00\00" + "\00\01" + "B" + - STRUCT("A", "") -> "A" + - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B" + - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" + + "C" + + Since null bytes are always escaped, this encoding can cause size + blowup for encodings like ``Int64.BigEndianBytes`` that are likely + to produce many such bytes. + + Sorted mode: + + - Fields are encoded in sorted mode. + - All values supported by the field encodings are allowed + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - All values supported by the field encodings are allowed. + + """ + + singleton: "Type.Struct.Encoding.Singleton" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Struct.Encoding.Singleton", + ) + delimited_bytes: "Type.Struct.Encoding.DelimitedBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Struct.Encoding.DelimitedBytes", + ) + ordered_code_bytes: "Type.Struct.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=3, + oneof="encoding", + message="Type.Struct.Encoding.OrderedCodeBytes", + ) + fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( proto.MESSAGE, number=1, message="Type.Struct.Field", ) + encoding: "Type.Struct.Encoding" = proto.Field( + proto.MESSAGE, + number=2, + message="Type.Struct.Encoding", + ) class Array(proto.Message): r"""An ordered list of elements of a given type. Values of type diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index f2b3ddf284e4..a7ff5ac1dfa2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -33,6 +33,8 @@ from .types.bigtable import MutateRowsResponse from .types.bigtable import PingAndWarmRequest from .types.bigtable import PingAndWarmResponse +from .types.bigtable import PrepareQueryRequest +from .types.bigtable import PrepareQueryResponse from .types.bigtable import RateLimitInfo from .types.bigtable import ReadChangeStreamRequest from .types.bigtable import ReadChangeStreamResponse @@ -99,6 +101,8 @@ "PartialResultSet", "PingAndWarmRequest", "PingAndWarmResponse", + "PrepareQueryRequest", + "PrepareQueryResponse", "ProtoFormat", "ProtoRows", "ProtoRowsBatch", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json index fd47c04350f9..83504fbc12e2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_metadata.json @@ -40,6 +40,11 @@ "ping_and_warm" ] }, + "PrepareQuery": { + "methods": [ + "prepare_query" + ] + }, "ReadChangeStream": { "methods": [ "read_change_stream" @@ -95,6 +100,11 @@ "ping_and_warm" ] }, + "PrepareQuery": { + "methods": [ + "prepare_query" + ] + }, "ReadChangeStream": { "methods": [ "read_change_stream" @@ -150,6 +160,11 @@ "ping_and_warm" ] }, + "PrepareQuery": { + "methods": [ + "prepare_query" + ] + }, "ReadChangeStream": { "methods": [ "read_change_stream" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 08317e1ebb08..3d4e2373d17e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -48,10 +49,20 @@ from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport from .client import BigtableClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableAsyncClient: """Service for reading from and writing to existing Bigtable @@ -71,6 +82,10 @@ class BigtableAsyncClient: parse_authorized_view_path = staticmethod(BigtableClient.parse_authorized_view_path) instance_path = staticmethod(BigtableClient.instance_path) parse_instance_path = staticmethod(BigtableClient.parse_instance_path) + materialized_view_path = staticmethod(BigtableClient.materialized_view_path) + parse_materialized_view_path = staticmethod( + BigtableClient.parse_materialized_view_path + ) table_path = staticmethod(BigtableClient.table_path) parse_table_path = staticmethod(BigtableClient.parse_table_path) common_billing_account_path = staticmethod( @@ -255,6 +270,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable_v2.BigtableAsyncClient`.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.v2.Bigtable", + "credentialsType": None, + }, + ) + def read_rows( self, request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, @@ -263,7 +300,7 @@ def read_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: r"""Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to @@ -298,8 +335,10 @@ def read_rows( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: @@ -310,7 +349,10 @@ def read_rows( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -357,9 +399,9 @@ def read_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -383,7 +425,7 @@ def sample_row_keys( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: r"""Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of @@ -417,8 +459,10 @@ def sample_row_keys( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: @@ -429,7 +473,10 @@ def sample_row_keys( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -476,9 +523,9 @@ def sample_row_keys( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -504,7 +551,7 @@ async def mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -553,8 +600,10 @@ async def mutate_row( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.MutateRowResponse: @@ -565,7 +614,10 @@ async def mutate_row( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + flattened_params = [table_name, row_key, mutations, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -616,9 +668,9 @@ async def mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -643,7 +695,7 @@ def mutate_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: r"""Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire @@ -689,8 +741,10 @@ def mutate_rows( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: @@ -701,7 +755,10 @@ def mutate_rows( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, entries, app_profile_id]) + flattened_params = [table_name, entries, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -750,9 +807,9 @@ def mutate_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -780,7 +837,7 @@ async def check_and_mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. @@ -851,8 +908,10 @@ async def check_and_mutate_row( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: @@ -863,15 +922,16 @@ async def check_and_mutate_row( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [ - table_name, - row_key, - predicate_filter, - true_mutations, - false_mutations, - app_profile_id, - ] + flattened_params = [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -927,9 +987,9 @@ async def check_and_mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -953,7 +1013,7 @@ async def ping_and_warm( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this connection. This call is not required but may be useful @@ -983,8 +1043,10 @@ async def ping_and_warm( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.PingAndWarmResponse: @@ -996,7 +1058,10 @@ async def ping_and_warm( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, app_profile_id]) + flattened_params = [name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1032,9 +1097,9 @@ async def ping_and_warm( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1060,7 +1125,7 @@ async def read_modify_write_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the @@ -1115,8 +1180,10 @@ async def read_modify_write_row( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: @@ -1127,7 +1194,10 @@ async def read_modify_write_row( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + flattened_params = [table_name, row_key, rules, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1178,9 +1248,9 @@ async def read_modify_write_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1206,7 +1276,7 @@ def generate_initial_change_stream_partitions( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[ AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse] ]: @@ -1243,8 +1313,10 @@ def generate_initial_change_stream_partitions( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: @@ -1257,7 +1329,10 @@ def generate_initial_change_stream_partitions( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1314,7 +1389,7 @@ def read_change_stream( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. Reads changes from a table's change stream. @@ -1348,8 +1423,10 @@ def read_change_stream( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: @@ -1361,7 +1438,10 @@ def read_change_stream( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1408,6 +1488,124 @@ def read_change_stream( # Done; return the response. return response + async def prepare_query( + self, + request: Optional[Union[bigtable.PrepareQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable.PrepareQueryResponse: + r"""Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.PrepareQueryRequest, dict]]): + The request object. Request message for + Bigtable.PrepareQuery + instance_name (:class:`str`): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`str`): + Required. The query string. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + Optional. This value specifies routing for preparing the + query. Note that this ``app_profile_id`` is only used + for preparing the query. The actual query execution will + use the app profile specified in the + ``ExecuteQueryRequest``. If not specified, the + ``default`` application profile will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_v2.types.PrepareQueryResponse: + Response message for + Bigtable.PrepareQueryResponse + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.PrepareQueryRequest): + request = bigtable.PrepareQueryRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.prepare_query + ] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def execute_query( self, request: Optional[Union[bigtable.ExecuteQueryRequest, dict]] = None, @@ -1417,10 +1615,10 @@ def execute_query( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ExecuteQueryResponse]]: - r"""Executes a BTQL query against a particular Cloud - Bigtable instance. + r"""Executes a SQL query against a particular Bigtable + instance. Args: request (Optional[Union[google.cloud.bigtable_v2.types.ExecuteQueryRequest, dict]]): @@ -1436,6 +1634,11 @@ def execute_query( should not be set. query (:class:`str`): Required. The query string. + + Exactly one of ``query`` and ``prepared_query`` is + required. Setting both or neither is an + ``INVALID_ARGUMENT``. + This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1450,8 +1653,10 @@ def execute_query( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: @@ -1462,7 +1667,10 @@ def execute_query( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance_name, query, app_profile_id]) + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1496,14 +1704,13 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id is not None: - # execute_query currently requires empty header support. TODO: remove after support is added + if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 42723c6612a5..330a2252099c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -14,6 +14,9 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( @@ -49,9 +52,19 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats +from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO from .transports.grpc import BigtableGrpcTransport from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport @@ -225,6 +238,28 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def materialized_view_path( + project: str, + instance: str, + materialized_view: str, + ) -> str: + """Returns a fully-qualified materialized_view string.""" + return "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + + @staticmethod + def parse_materialized_view_path(path: str) -> Dict[str, str]: + """Parses a materialized_view path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/materializedViews/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def table_path( project: str, @@ -516,6 +551,33 @@ def _validate_universe_domain(self): # NOTE (b/349488459): universe validation is disabled until further notice. return True + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. + + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. + """ + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) + @property def api_endpoint(self): """Return the API endpoint used by the client instance. @@ -620,6 +682,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -682,6 +748,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable_v2.BigtableClient`.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.v2.Bigtable", + "credentialsType": None, + }, + ) + def read_rows( self, request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, @@ -690,7 +779,7 @@ def read_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ReadRowsResponse]: r"""Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to @@ -725,8 +814,10 @@ def read_rows( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: @@ -737,7 +828,10 @@ def read_rows( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -807,7 +901,7 @@ def sample_row_keys( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.SampleRowKeysResponse]: r"""Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of @@ -841,8 +935,10 @@ def sample_row_keys( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: @@ -853,7 +949,10 @@ def sample_row_keys( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -925,7 +1024,7 @@ def mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -974,8 +1073,10 @@ def mutate_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.MutateRowResponse: @@ -986,7 +1087,10 @@ def mutate_row( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + flattened_params = [table_name, row_key, mutations, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1061,7 +1165,7 @@ def mutate_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.MutateRowsResponse]: r"""Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire @@ -1107,8 +1211,10 @@ def mutate_rows( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: @@ -1119,7 +1225,10 @@ def mutate_rows( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, entries, app_profile_id]) + flattened_params = [table_name, entries, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1195,7 +1304,7 @@ def check_and_mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. @@ -1266,8 +1375,10 @@ def check_and_mutate_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: @@ -1278,15 +1389,16 @@ def check_and_mutate_row( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any( - [ - table_name, - row_key, - predicate_filter, - true_mutations, - false_mutations, - app_profile_id, - ] + flattened_params = [ + table_name, + row_key, + predicate_filter, + true_mutations, + false_mutations, + app_profile_id, + ] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 ) if request is not None and has_flattened_params: raise ValueError( @@ -1365,7 +1477,7 @@ def ping_and_warm( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this connection. This call is not required but may be useful @@ -1395,8 +1507,10 @@ def ping_and_warm( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.PingAndWarmResponse: @@ -1408,7 +1522,10 @@ def ping_and_warm( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, app_profile_id]) + flattened_params = [name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1469,7 +1586,7 @@ def read_modify_write_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the @@ -1524,8 +1641,10 @@ def read_modify_write_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: @@ -1536,7 +1655,10 @@ def read_modify_write_row( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + flattened_params = [table_name, row_key, rules, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1612,7 +1734,7 @@ def generate_initial_change_stream_partitions( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]: r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. Returns the current list of partitions that make up the table's @@ -1647,8 +1769,10 @@ def generate_initial_change_stream_partitions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: @@ -1661,7 +1785,10 @@ def generate_initial_change_stream_partitions( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1717,7 +1844,7 @@ def read_change_stream( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. Reads changes from a table's change stream. @@ -1751,8 +1878,10 @@ def read_change_stream( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: @@ -1764,7 +1893,10 @@ def read_change_stream( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) + flattened_params = [table_name, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1808,6 +1940,121 @@ def read_change_stream( # Done; return the response. return response + def prepare_query( + self, + request: Optional[Union[bigtable.PrepareQueryRequest, dict]] = None, + *, + instance_name: Optional[str] = None, + query: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable.PrepareQueryResponse: + r"""Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Args: + request (Union[google.cloud.bigtable_v2.types.PrepareQueryRequest, dict]): + The request object. Request message for + Bigtable.PrepareQuery + instance_name (str): + Required. The unique name of the instance against which + the query should be executed. Values are of the form + ``projects//instances/`` + + This corresponds to the ``instance_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (str): + Required. The query string. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + Optional. This value specifies routing for preparing the + query. Note that this ``app_profile_id`` is only used + for preparing the query. The actual query execution will + use the app profile specified in the + ``ExecuteQueryRequest``. If not specified, the + ``default`` application profile will be used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_v2.types.PrepareQueryResponse: + Response message for + Bigtable.PrepareQueryResponse + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable.PrepareQueryRequest): + request = bigtable.PrepareQueryRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance_name is not None: + request.instance_name = instance_name + if query is not None: + request.query = query + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.prepare_query] + + header_params = {} + + routing_param_regex = re.compile("^(?Pprojects/[^/]+/instances/[^/]+)$") + regex_match = routing_param_regex.match(request.instance_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def execute_query( self, request: Optional[Union[bigtable.ExecuteQueryRequest, dict]] = None, @@ -1817,10 +2064,10 @@ def execute_query( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ExecuteQueryResponse]: - r"""Executes a BTQL query against a particular Cloud - Bigtable instance. + r"""Executes a SQL query against a particular Bigtable + instance. Args: request (Union[google.cloud.bigtable_v2.types.ExecuteQueryRequest, dict]): @@ -1836,6 +2083,11 @@ def execute_query( should not be set. query (str): Required. The query string. + + Exactly one of ``query`` and ``prepared_query`` is + required. Setting both or neither is an + ``INVALID_ARGUMENT``. + This corresponds to the ``query`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1850,8 +2102,10 @@ def execute_query( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: @@ -1862,7 +2116,10 @@ def execute_query( # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance_name, query, app_profile_id]) + flattened_params = [instance_name, query, app_profile_id] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " @@ -1893,8 +2150,7 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id is not None: - # execute_query currently requires empty header support. TODO: remove after support is adde + if request.app_profile_id: header_params["app_profile_id"] = request.app_profile_id if header_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 17ff3fb3d1d8..72d0638281d2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -191,9 +191,24 @@ def _prep_wrapped_messages(self, client_info): default_timeout=43200.0, client_info=client_info, ), + self.prepare_query: gapic_v1.method.wrap_method( + self.prepare_query, + default_timeout=None, + client_info=client_info, + ), self.execute_query: gapic_v1.method.wrap_method( self.execute_query, - default_timeout=None, + default_retry=retries.Retry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=43200.0, + ), + default_timeout=43200.0, client_info=client_info, ), } @@ -302,6 +317,15 @@ def read_change_stream( ]: raise NotImplementedError() + @property + def prepare_query( + self, + ) -> Callable[ + [bigtable.PrepareQueryRequest], + Union[bigtable.PrepareQueryResponse, Awaitable[bigtable.PrepareQueryResponse]], + ]: + raise NotImplementedError() + @property def execute_query( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index febdd441dd29..84bc1dd43c11 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -21,12 +24,90 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_v2.types import bigtable from .base import BigtableTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableGrpcTransport(BigtableTransport): """gRPC backend transport for Bigtable. @@ -181,7 +262,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -260,7 +346,7 @@ def read_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_rows" not in self._stubs: - self._stubs["read_rows"] = self.grpc_channel.unary_stream( + self._stubs["read_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadRows", request_serializer=bigtable.ReadRowsRequest.serialize, response_deserializer=bigtable.ReadRowsResponse.deserialize, @@ -290,7 +376,7 @@ def sample_row_keys( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "sample_row_keys" not in self._stubs: - self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + self._stubs["sample_row_keys"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/SampleRowKeys", request_serializer=bigtable.SampleRowKeysRequest.serialize, response_deserializer=bigtable.SampleRowKeysResponse.deserialize, @@ -317,7 +403,7 @@ def mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_row" not in self._stubs: - self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/MutateRow", request_serializer=bigtable.MutateRowRequest.serialize, response_deserializer=bigtable.MutateRowResponse.deserialize, @@ -345,7 +431,7 @@ def mutate_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_rows" not in self._stubs: - self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + self._stubs["mutate_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/MutateRows", request_serializer=bigtable.MutateRowsRequest.serialize, response_deserializer=bigtable.MutateRowsResponse.deserialize, @@ -374,7 +460,7 @@ def check_and_mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_and_mutate_row" not in self._stubs: - self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["check_and_mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/CheckAndMutateRow", request_serializer=bigtable.CheckAndMutateRowRequest.serialize, response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, @@ -402,7 +488,7 @@ def ping_and_warm( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "ping_and_warm" not in self._stubs: - self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + self._stubs["ping_and_warm"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/PingAndWarm", request_serializer=bigtable.PingAndWarmRequest.serialize, response_deserializer=bigtable.PingAndWarmResponse.deserialize, @@ -436,7 +522,7 @@ def read_modify_write_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_modify_write_row" not in self._stubs: - self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + self._stubs["read_modify_write_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, @@ -471,7 +557,7 @@ def generate_initial_change_stream_partitions( if "generate_initial_change_stream_partitions" not in self._stubs: self._stubs[ "generate_initial_change_stream_partitions" - ] = self.grpc_channel.unary_stream( + ] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, @@ -502,21 +588,48 @@ def read_change_stream( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_change_stream" not in self._stubs: - self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + self._stubs["read_change_stream"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadChangeStream", request_serializer=bigtable.ReadChangeStreamRequest.serialize, response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, ) return self._stubs["read_change_stream"] + @property + def prepare_query( + self, + ) -> Callable[[bigtable.PrepareQueryRequest], bigtable.PrepareQueryResponse]: + r"""Return a callable for the prepare query method over gRPC. + + Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Returns: + Callable[[~.PrepareQueryRequest], + ~.PrepareQueryResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "prepare_query" not in self._stubs: + self._stubs["prepare_query"] = self._logged_channel.unary_unary( + "/google.bigtable.v2.Bigtable/PrepareQuery", + request_serializer=bigtable.PrepareQueryRequest.serialize, + response_deserializer=bigtable.PrepareQueryResponse.deserialize, + ) + return self._stubs["prepare_query"] + @property def execute_query( self, ) -> Callable[[bigtable.ExecuteQueryRequest], bigtable.ExecuteQueryResponse]: r"""Return a callable for the execute query method over gRPC. - Executes a BTQL query against a particular Cloud - Bigtable instance. + Executes a SQL query against a particular Bigtable + instance. Returns: Callable[[~.ExecuteQueryRequest], @@ -529,7 +642,7 @@ def execute_query( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_query" not in self._stubs: - self._stubs["execute_query"] = self.grpc_channel.unary_stream( + self._stubs["execute_query"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ExecuteQuery", request_serializer=bigtable.ExecuteQueryRequest.serialize, response_deserializer=bigtable.ExecuteQueryResponse.deserialize, @@ -537,7 +650,7 @@ def execute_query( return self._stubs["execute_query"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 6f6e1fe850c6..192ce82810ff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -23,14 +26,93 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_v2.types import bigtable from .base import BigtableTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableGrpcAsyncIOTransport(BigtableTransport): """gRPC AsyncIO backend transport for Bigtable. @@ -228,10 +310,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -268,7 +353,7 @@ def read_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_rows" not in self._stubs: - self._stubs["read_rows"] = self.grpc_channel.unary_stream( + self._stubs["read_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadRows", request_serializer=bigtable.ReadRowsRequest.serialize, response_deserializer=bigtable.ReadRowsResponse.deserialize, @@ -300,7 +385,7 @@ def sample_row_keys( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "sample_row_keys" not in self._stubs: - self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + self._stubs["sample_row_keys"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/SampleRowKeys", request_serializer=bigtable.SampleRowKeysRequest.serialize, response_deserializer=bigtable.SampleRowKeysResponse.deserialize, @@ -327,7 +412,7 @@ def mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_row" not in self._stubs: - self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/MutateRow", request_serializer=bigtable.MutateRowRequest.serialize, response_deserializer=bigtable.MutateRowResponse.deserialize, @@ -355,7 +440,7 @@ def mutate_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_rows" not in self._stubs: - self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + self._stubs["mutate_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/MutateRows", request_serializer=bigtable.MutateRowsRequest.serialize, response_deserializer=bigtable.MutateRowsResponse.deserialize, @@ -385,7 +470,7 @@ def check_and_mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_and_mutate_row" not in self._stubs: - self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["check_and_mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/CheckAndMutateRow", request_serializer=bigtable.CheckAndMutateRowRequest.serialize, response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, @@ -415,7 +500,7 @@ def ping_and_warm( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "ping_and_warm" not in self._stubs: - self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + self._stubs["ping_and_warm"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/PingAndWarm", request_serializer=bigtable.PingAndWarmRequest.serialize, response_deserializer=bigtable.PingAndWarmResponse.deserialize, @@ -450,7 +535,7 @@ def read_modify_write_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_modify_write_row" not in self._stubs: - self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + self._stubs["read_modify_write_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, @@ -485,7 +570,7 @@ def generate_initial_change_stream_partitions( if "generate_initial_change_stream_partitions" not in self._stubs: self._stubs[ "generate_initial_change_stream_partitions" - ] = self.grpc_channel.unary_stream( + ] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, @@ -516,13 +601,42 @@ def read_change_stream( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_change_stream" not in self._stubs: - self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + self._stubs["read_change_stream"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadChangeStream", request_serializer=bigtable.ReadChangeStreamRequest.serialize, response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, ) return self._stubs["read_change_stream"] + @property + def prepare_query( + self, + ) -> Callable[ + [bigtable.PrepareQueryRequest], Awaitable[bigtable.PrepareQueryResponse] + ]: + r"""Return a callable for the prepare query method over gRPC. + + Prepares a GoogleSQL query for execution on a + particular Bigtable instance. + + Returns: + Callable[[~.PrepareQueryRequest], + Awaitable[~.PrepareQueryResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "prepare_query" not in self._stubs: + self._stubs["prepare_query"] = self._logged_channel.unary_unary( + "/google.bigtable.v2.Bigtable/PrepareQuery", + request_serializer=bigtable.PrepareQueryRequest.serialize, + response_deserializer=bigtable.PrepareQueryResponse.deserialize, + ) + return self._stubs["prepare_query"] + @property def execute_query( self, @@ -531,8 +645,8 @@ def execute_query( ]: r"""Return a callable for the execute query method over gRPC. - Executes a BTQL query against a particular Cloud - Bigtable instance. + Executes a SQL query against a particular Bigtable + instance. Returns: Callable[[~.ExecuteQueryRequest], @@ -545,7 +659,7 @@ def execute_query( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_query" not in self._stubs: - self._stubs["execute_query"] = self.grpc_channel.unary_stream( + self._stubs["execute_query"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ExecuteQuery", request_serializer=bigtable.ExecuteQueryRequest.serialize, response_deserializer=bigtable.ExecuteQueryResponse.deserialize, @@ -610,9 +724,24 @@ def _prep_wrapped_messages(self, client_info): default_timeout=43200.0, client_info=client_info, ), + self.prepare_query: self._wrap_method( + self.prepare_query, + default_timeout=None, + client_info=client_info, + ), self.execute_query: self._wrap_method( self.execute_query, - default_timeout=None, + default_retry=retries.AsyncRetry( + initial=0.01, + maximum=60.0, + multiplier=2, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=43200.0, + ), + default_timeout=43200.0, client_info=client_info, ), } @@ -623,7 +752,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 221b04b8a42b..fb0af2af9351 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -42,6 +43,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -113,6 +122,14 @@ def post_ping_and_warm(self, response): logging.log(f"Received response: {response}") return response + def pre_prepare_query(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_prepare_query(self, response): + logging.log(f"Received response: {response}") + return response + def pre_read_change_stream(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -154,8 +171,10 @@ def post_sample_row_keys(self, response): def pre_check_and_mutate_row( self, request: bigtable.CheckAndMutateRowRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for check_and_mutate_row Override in a subclass to manipulate the request or metadata @@ -168,15 +187,42 @@ def post_check_and_mutate_row( ) -> bigtable.CheckAndMutateRowResponse: """Post-rpc interceptor for check_and_mutate_row - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_check_and_mutate_row_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_check_and_mutate_row` interceptor runs + before the `post_check_and_mutate_row_with_metadata` interceptor. """ return response + def post_check_and_mutate_row_with_metadata( + self, + response: bigtable.CheckAndMutateRowResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.CheckAndMutateRowResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for check_and_mutate_row + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_check_and_mutate_row_with_metadata` + interceptor in new development instead of the `post_check_and_mutate_row` interceptor. + When both interceptors are used, this `post_check_and_mutate_row_with_metadata` interceptor runs after the + `post_check_and_mutate_row` interceptor. The (possibly modified) response returned by + `post_check_and_mutate_row` will be passed to + `post_check_and_mutate_row_with_metadata`. + """ + return response, metadata + def pre_execute_query( - self, request: bigtable.ExecuteQueryRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.ExecuteQueryRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.ExecuteQueryRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.ExecuteQueryRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for execute_query Override in a subclass to manipulate the request or metadata @@ -189,18 +235,44 @@ def post_execute_query( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for execute_query - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_execute_query_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_execute_query` interceptor runs + before the `post_execute_query_with_metadata` interceptor. """ return response + def post_execute_query_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for execute_query + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_execute_query_with_metadata` + interceptor in new development instead of the `post_execute_query` interceptor. + When both interceptors are used, this `post_execute_query_with_metadata` interceptor runs after the + `post_execute_query` interceptor. The (possibly modified) response returned by + `post_execute_query` will be passed to + `post_execute_query_with_metadata`. + """ + return response, metadata + def pre_generate_initial_change_stream_partitions( self, request: bigtable.GenerateInitialChangeStreamPartitionsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable.GenerateInitialChangeStreamPartitionsRequest, Sequence[Tuple[str, str]] + bigtable.GenerateInitialChangeStreamPartitionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for generate_initial_change_stream_partitions @@ -214,15 +286,42 @@ def post_generate_initial_change_stream_partitions( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for generate_initial_change_stream_partitions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_generate_initial_change_stream_partitions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_generate_initial_change_stream_partitions` interceptor runs + before the `post_generate_initial_change_stream_partitions_with_metadata` interceptor. """ return response + def post_generate_initial_change_stream_partitions_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for generate_initial_change_stream_partitions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_generate_initial_change_stream_partitions_with_metadata` + interceptor in new development instead of the `post_generate_initial_change_stream_partitions` interceptor. + When both interceptors are used, this `post_generate_initial_change_stream_partitions_with_metadata` interceptor runs after the + `post_generate_initial_change_stream_partitions` interceptor. The (possibly modified) response returned by + `post_generate_initial_change_stream_partitions` will be passed to + `post_generate_initial_change_stream_partitions_with_metadata`. + """ + return response, metadata + def pre_mutate_row( - self, request: bigtable.MutateRowRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.MutateRowRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for mutate_row Override in a subclass to manipulate the request or metadata @@ -235,15 +334,40 @@ def post_mutate_row( ) -> bigtable.MutateRowResponse: """Post-rpc interceptor for mutate_row - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_row_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_mutate_row` interceptor runs + before the `post_mutate_row_with_metadata` interceptor. """ return response + def post_mutate_row_with_metadata( + self, + response: bigtable.MutateRowResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for mutate_row + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_mutate_row_with_metadata` + interceptor in new development instead of the `post_mutate_row` interceptor. + When both interceptors are used, this `post_mutate_row_with_metadata` interceptor runs after the + `post_mutate_row` interceptor. The (possibly modified) response returned by + `post_mutate_row` will be passed to + `post_mutate_row_with_metadata`. + """ + return response, metadata + def pre_mutate_rows( - self, request: bigtable.MutateRowsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.MutateRowsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for mutate_rows Override in a subclass to manipulate the request or metadata @@ -256,15 +380,42 @@ def post_mutate_rows( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for mutate_rows - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_mutate_rows_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_mutate_rows` interceptor runs + before the `post_mutate_rows_with_metadata` interceptor. """ return response + def post_mutate_rows_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for mutate_rows + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_mutate_rows_with_metadata` + interceptor in new development instead of the `post_mutate_rows` interceptor. + When both interceptors are used, this `post_mutate_rows_with_metadata` interceptor runs after the + `post_mutate_rows` interceptor. The (possibly modified) response returned by + `post_mutate_rows` will be passed to + `post_mutate_rows_with_metadata`. + """ + return response, metadata + def pre_ping_and_warm( - self, request: bigtable.PingAndWarmRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.PingAndWarmRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for ping_and_warm Override in a subclass to manipulate the request or metadata @@ -277,17 +428,88 @@ def post_ping_and_warm( ) -> bigtable.PingAndWarmResponse: """Post-rpc interceptor for ping_and_warm - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_ping_and_warm_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_ping_and_warm` interceptor runs + before the `post_ping_and_warm_with_metadata` interceptor. """ return response + def post_ping_and_warm_with_metadata( + self, + response: bigtable.PingAndWarmResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PingAndWarmResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for ping_and_warm + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_ping_and_warm_with_metadata` + interceptor in new development instead of the `post_ping_and_warm` interceptor. + When both interceptors are used, this `post_ping_and_warm_with_metadata` interceptor runs after the + `post_ping_and_warm` interceptor. The (possibly modified) response returned by + `post_ping_and_warm` will be passed to + `post_ping_and_warm_with_metadata`. + """ + return response, metadata + + def pre_prepare_query( + self, + request: bigtable.PrepareQueryRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PrepareQueryRequest, Sequence[Tuple[str, Union[str, bytes]]]]: + """Pre-rpc interceptor for prepare_query + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_prepare_query( + self, response: bigtable.PrepareQueryResponse + ) -> bigtable.PrepareQueryResponse: + """Post-rpc interceptor for prepare_query + + DEPRECATED. Please use the `post_prepare_query_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. This `post_prepare_query` interceptor runs + before the `post_prepare_query_with_metadata` interceptor. + """ + return response + + def post_prepare_query_with_metadata( + self, + response: bigtable.PrepareQueryResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PrepareQueryResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for prepare_query + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_prepare_query_with_metadata` + interceptor in new development instead of the `post_prepare_query` interceptor. + When both interceptors are used, this `post_prepare_query_with_metadata` interceptor runs after the + `post_prepare_query` interceptor. The (possibly modified) response returned by + `post_prepare_query` will be passed to + `post_prepare_query_with_metadata`. + """ + return response, metadata + def pre_read_change_stream( self, request: bigtable.ReadChangeStreamRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for read_change_stream Override in a subclass to manipulate the request or metadata @@ -300,17 +522,44 @@ def post_read_change_stream( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for read_change_stream - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_change_stream_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_read_change_stream` interceptor runs + before the `post_read_change_stream_with_metadata` interceptor. """ return response + def post_read_change_stream_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for read_change_stream + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_read_change_stream_with_metadata` + interceptor in new development instead of the `post_read_change_stream` interceptor. + When both interceptors are used, this `post_read_change_stream_with_metadata` interceptor runs after the + `post_read_change_stream` interceptor. The (possibly modified) response returned by + `post_read_change_stream` will be passed to + `post_read_change_stream_with_metadata`. + """ + return response, metadata + def pre_read_modify_write_row( self, request: bigtable.ReadModifyWriteRowRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for read_modify_write_row Override in a subclass to manipulate the request or metadata @@ -323,15 +572,42 @@ def post_read_modify_write_row( ) -> bigtable.ReadModifyWriteRowResponse: """Post-rpc interceptor for read_modify_write_row - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_modify_write_row_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_read_modify_write_row` interceptor runs + before the `post_read_modify_write_row_with_metadata` interceptor. """ return response + def post_read_modify_write_row_with_metadata( + self, + response: bigtable.ReadModifyWriteRowResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadModifyWriteRowResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for read_modify_write_row + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_read_modify_write_row_with_metadata` + interceptor in new development instead of the `post_read_modify_write_row` interceptor. + When both interceptors are used, this `post_read_modify_write_row_with_metadata` interceptor runs after the + `post_read_modify_write_row` interceptor. The (possibly modified) response returned by + `post_read_modify_write_row` will be passed to + `post_read_modify_write_row_with_metadata`. + """ + return response, metadata + def pre_read_rows( - self, request: bigtable.ReadRowsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.ReadRowsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for read_rows Override in a subclass to manipulate the request or metadata @@ -344,17 +620,42 @@ def post_read_rows( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for read_rows - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_rows_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_read_rows` interceptor runs + before the `post_read_rows_with_metadata` interceptor. """ return response + def post_read_rows_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for read_rows + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_read_rows_with_metadata` + interceptor in new development instead of the `post_read_rows` interceptor. + When both interceptors are used, this `post_read_rows_with_metadata` interceptor runs after the + `post_read_rows` interceptor. The (possibly modified) response returned by + `post_read_rows` will be passed to + `post_read_rows_with_metadata`. + """ + return response, metadata + def pre_sample_row_keys( self, request: bigtable.SampleRowKeysRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for sample_row_keys Override in a subclass to manipulate the request or metadata @@ -367,12 +668,37 @@ def post_sample_row_keys( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for sample_row_keys - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_sample_row_keys_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Bigtable server but before - it is returned to user code. + it is returned to user code. This `post_sample_row_keys` interceptor runs + before the `post_sample_row_keys_with_metadata` interceptor. """ return response + def post_sample_row_keys_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for sample_row_keys + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Bigtable server but before it is returned to user code. + + We recommend only using this `post_sample_row_keys_with_metadata` + interceptor in new development instead of the `post_sample_row_keys` interceptor. + When both interceptors are used, this `post_sample_row_keys_with_metadata` interceptor runs after the + `post_sample_row_keys` interceptor. The (possibly modified) response returned by + `post_sample_row_keys` will be passed to + `post_sample_row_keys_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class BigtableRestStub: @@ -496,7 +822,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Call the check and mutate row method over HTTP. @@ -507,8 +833,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.CheckAndMutateRowResponse: @@ -520,6 +848,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_http_options() ) + request, metadata = self._interceptor.pre_check_and_mutate_row( request, metadata ) @@ -536,6 +865,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.CheckAndMutateRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "CheckAndMutateRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._CheckAndMutateRow._get_response( self._host, @@ -557,7 +913,35 @@ def __call__( pb_resp = bigtable.CheckAndMutateRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_and_mutate_row(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_check_and_mutate_row_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.CheckAndMutateRowResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.check_and_mutate_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "CheckAndMutateRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExecuteQuery(_BaseBigtableRestTransport._BaseExecuteQuery, BigtableRestStub): @@ -594,7 +978,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the execute query method over HTTP. @@ -605,8 +989,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ExecuteQueryResponse: @@ -618,6 +1004,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseExecuteQuery._get_http_options() ) + request, metadata = self._interceptor.pre_execute_query(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseExecuteQuery._get_transcoded_request( @@ -636,6 +1023,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ExecuteQuery", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ExecuteQuery", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ExecuteQuery._get_response( self._host, @@ -656,7 +1070,12 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.ExecuteQueryResponse ) + resp = self._interceptor.post_execute_query(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_execute_query_with_metadata( + resp, response_metadata + ) return resp class _GenerateInitialChangeStreamPartitions( @@ -696,7 +1115,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the generate initial change stream partitions method over HTTP. @@ -710,8 +1129,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: @@ -725,6 +1146,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_http_options() ) + ( request, metadata, @@ -744,6 +1166,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.GenerateInitialChangeStreamPartitions", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "GenerateInitialChangeStreamPartitions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._GenerateInitialChangeStreamPartitions._get_response( self._host, @@ -764,9 +1213,17 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.GenerateInitialChangeStreamPartitionsResponse ) + resp = self._interceptor.post_generate_initial_change_stream_partitions( resp ) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_generate_initial_change_stream_partitions_with_metadata( + resp, response_metadata + ) return resp class _MutateRow(_BaseBigtableRestTransport._BaseMutateRow, BigtableRestStub): @@ -802,7 +1259,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Call the mutate row method over HTTP. @@ -813,8 +1270,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.MutateRowResponse: @@ -824,6 +1283,7 @@ def __call__( """ http_options = _BaseBigtableRestTransport._BaseMutateRow._get_http_options() + request, metadata = self._interceptor.pre_mutate_row(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseMutateRow._get_transcoded_request( @@ -842,6 +1302,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.MutateRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._MutateRow._get_response( self._host, @@ -863,7 +1350,33 @@ def __call__( pb_resp = bigtable.MutateRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_mutate_row(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_mutate_row_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.MutateRowResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.mutate_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _MutateRows(_BaseBigtableRestTransport._BaseMutateRows, BigtableRestStub): @@ -900,7 +1413,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the mutate rows method over HTTP. @@ -911,8 +1424,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.MutateRowsResponse: @@ -924,6 +1439,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseMutateRows._get_http_options() ) + request, metadata = self._interceptor.pre_mutate_rows(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseMutateRows._get_transcoded_request( @@ -942,6 +1458,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.MutateRows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRows", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._MutateRows._get_response( self._host, @@ -962,7 +1505,12 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.MutateRowsResponse ) + resp = self._interceptor.post_mutate_rows(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_mutate_rows_with_metadata( + resp, response_metadata + ) return resp class _PingAndWarm(_BaseBigtableRestTransport._BasePingAndWarm, BigtableRestStub): @@ -998,7 +1546,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Call the ping and warm method over HTTP. @@ -1009,8 +1557,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.PingAndWarmResponse: @@ -1023,6 +1573,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BasePingAndWarm._get_http_options() ) + request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BasePingAndWarm._get_transcoded_request( @@ -1041,6 +1592,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.PingAndWarm", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PingAndWarm", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._PingAndWarm._get_response( self._host, @@ -1062,7 +1640,188 @@ def __call__( pb_resp = bigtable.PingAndWarmResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_ping_and_warm(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_ping_and_warm_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.PingAndWarmResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.ping_and_warm", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PingAndWarm", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _PrepareQuery(_BaseBigtableRestTransport._BasePrepareQuery, BigtableRestStub): + def __hash__(self): + return hash("BigtableRestTransport.PrepareQuery") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable.PrepareQueryRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable.PrepareQueryResponse: + r"""Call the prepare query method over HTTP. + + Args: + request (~.bigtable.PrepareQueryRequest): + The request object. Request message for + Bigtable.PrepareQuery + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable.PrepareQueryResponse: + Response message for + Bigtable.PrepareQueryResponse + + """ + + http_options = ( + _BaseBigtableRestTransport._BasePrepareQuery._get_http_options() + ) + + request, metadata = self._interceptor.pre_prepare_query(request, metadata) + transcoded_request = ( + _BaseBigtableRestTransport._BasePrepareQuery._get_transcoded_request( + http_options, request + ) + ) + + body = _BaseBigtableRestTransport._BasePrepareQuery._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = ( + _BaseBigtableRestTransport._BasePrepareQuery._get_query_params_json( + transcoded_request + ) + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.PrepareQuery", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PrepareQuery", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableRestTransport._PrepareQuery._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.PrepareQueryResponse() + pb_resp = bigtable.PrepareQueryResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_prepare_query(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_prepare_query_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.PrepareQueryResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.prepare_query", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PrepareQuery", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ReadChangeStream( @@ -1101,7 +1860,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the read change stream method over HTTP. @@ -1113,8 +1872,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadChangeStreamResponse: @@ -1127,6 +1888,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseReadChangeStream._get_http_options() ) + request, metadata = self._interceptor.pre_read_change_stream( request, metadata ) @@ -1147,6 +1909,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadChangeStream", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadChangeStream", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ReadChangeStream._get_response( self._host, @@ -1167,7 +1956,12 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.ReadChangeStreamResponse ) + resp = self._interceptor.post_read_change_stream(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_change_stream_with_metadata( + resp, response_metadata + ) return resp class _ReadModifyWriteRow( @@ -1205,7 +1999,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Call the read modify write row method over HTTP. @@ -1216,8 +2010,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadModifyWriteRowResponse: @@ -1229,6 +2025,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_http_options() ) + request, metadata = self._interceptor.pre_read_modify_write_row( request, metadata ) @@ -1245,6 +2042,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadModifyWriteRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadModifyWriteRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ReadModifyWriteRow._get_response( self._host, @@ -1266,7 +2090,35 @@ def __call__( pb_resp = bigtable.ReadModifyWriteRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_read_modify_write_row(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_modify_write_row_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.ReadModifyWriteRowResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.read_modify_write_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadModifyWriteRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ReadRows(_BaseBigtableRestTransport._BaseReadRows, BigtableRestStub): @@ -1303,7 +2155,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the read rows method over HTTP. @@ -1314,8 +2166,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadRowsResponse: @@ -1325,6 +2179,7 @@ def __call__( """ http_options = _BaseBigtableRestTransport._BaseReadRows._get_http_options() + request, metadata = self._interceptor.pre_read_rows(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseReadRows._get_transcoded_request( @@ -1343,6 +2198,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadRows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadRows", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ReadRows._get_response( self._host, @@ -1361,7 +2243,12 @@ def __call__( # Return the response resp = rest_streaming.ResponseIterator(response, bigtable.ReadRowsResponse) + resp = self._interceptor.post_read_rows(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_rows_with_metadata( + resp, response_metadata + ) return resp class _SampleRowKeys( @@ -1399,7 +2286,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the sample row keys method over HTTP. @@ -1410,8 +2297,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.SampleRowKeysResponse: @@ -1423,6 +2312,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseSampleRowKeys._get_http_options() ) + request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseSampleRowKeys._get_transcoded_request( @@ -1437,6 +2327,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.SampleRowKeys", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "SampleRowKeys", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._SampleRowKeys._get_response( self._host, @@ -1456,7 +2373,12 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.SampleRowKeysResponse ) + resp = self._interceptor.post_sample_row_keys(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_sample_row_keys_with_metadata( + resp, response_metadata + ) return resp @property @@ -1512,6 +2434,14 @@ def ping_and_warm( # In C++ this would require a dynamic_cast return self._PingAndWarm(self._session, self._host, self._interceptor) # type: ignore + @property + def prepare_query( + self, + ) -> Callable[[bigtable.PrepareQueryRequest], bigtable.PrepareQueryResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PrepareQuery(self._session, self._host, self._interceptor) # type: ignore + @property def read_change_stream( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py index 9d2292a3c45b..c33fc1e83864 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py @@ -448,6 +448,63 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BasePrepareQuery: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{instance_name=projects/*/instances/*}:prepareQuery", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable.PrepareQueryRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableRestTransport._BasePrepareQuery._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseReadChangeStream: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index e524627cd736..c15a1d3078d7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -26,6 +26,8 @@ MutateRowsResponse, PingAndWarmRequest, PingAndWarmResponse, + PrepareQueryRequest, + PrepareQueryResponse, RateLimitInfo, ReadChangeStreamRequest, ReadChangeStreamResponse, @@ -91,6 +93,8 @@ "MutateRowsResponse", "PingAndWarmRequest", "PingAndWarmResponse", + "PrepareQueryRequest", + "PrepareQueryResponse", "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 3818decb6179..6d9be1438887 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -21,6 +21,7 @@ from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats as gb_request_stats +from google.cloud.bigtable_v2.types import types from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.protobuf import wrappers_pb2 # type: ignore @@ -51,6 +52,8 @@ "ReadChangeStreamResponse", "ExecuteQueryRequest", "ExecuteQueryResponse", + "PrepareQueryRequest", + "PrepareQueryResponse", }, ) @@ -70,6 +73,12 @@ class ReadRowsRequest(proto.Message): Values are of the form ``projects//instances//tables/
/authorizedViews/``. + materialized_view_name (str): + Optional. The unique name of the MaterializedView from which + to read. + + Values are of the form + ``projects//instances//materializedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -136,6 +145,10 @@ class RequestStatsView(proto.Enum): proto.STRING, number=9, ) + materialized_view_name: str = proto.Field( + proto.STRING, + number=11, + ) app_profile_id: str = proto.Field( proto.STRING, number=5, @@ -351,6 +364,12 @@ class SampleRowKeysRequest(proto.Message): Values are of the form ``projects//instances//tables/
/authorizedViews/``. + materialized_view_name (str): + Optional. The unique name of the MaterializedView from which + to read. + + Values are of the form + ``projects//instances//materializedViews/``. app_profile_id (str): This value specifies routing for replication. If not specified, the "default" application @@ -365,6 +384,10 @@ class SampleRowKeysRequest(proto.Message): proto.STRING, number=4, ) + materialized_view_name: str = proto.Field( + proto.STRING, + number=5, + ) app_profile_id: str = proto.Field( proto.STRING, number=2, @@ -1276,6 +1299,23 @@ class ExecuteQueryRequest(proto.Message): used. query (str): Required. The query string. + + Exactly one of ``query`` and ``prepared_query`` is required. + Setting both or neither is an ``INVALID_ARGUMENT``. + prepared_query (bytes): + A prepared query that was returned from + ``PrepareQueryResponse``. + + Exactly one of ``query`` and ``prepared_query`` is required. + Setting both or neither is an ``INVALID_ARGUMENT``. + + Setting this field also places restrictions on several other + fields: + + - ``data_format`` must be empty. + - ``validate_only`` must be false. + - ``params`` must match the ``param_types`` set in the + ``PrepareQueryRequest``. proto_format (google.cloud.bigtable_v2.types.ProtoFormat): Protocol buffer format as described by ProtoSchema and ProtoRows messages. @@ -1301,14 +1341,19 @@ class ExecuteQueryRequest(proto.Message): then ``@firstName`` will be replaced with googlesql bytes value "foo" in the query string during query evaluation. - In case of Value.kind is not set, it will be set to - corresponding null value in googlesql. + If ``Value.kind`` is not set, the value is treated as a NULL + value of the given type. For example, if ``params["firstName"] = type {string_type {}}`` then ``@firstName`` will be replaced with googlesql null string. - Value.type should always be set and no inference of type - will be made from Value.kind. If Value.type is not set, we - will return INVALID_ARGUMENT error. + If ``query`` is set, any empty ``Value.type`` in the map + will be rejected with ``INVALID_ARGUMENT``. + + If ``prepared_query`` is set, any empty ``Value.type`` in + the map will be inferred from the ``param_types`` in the + ``PrepareQueryRequest``. Any non-empty ``Value.type`` must + match the corresponding ``param_types`` entry, or be + rejected with ``INVALID_ARGUMENT``. """ instance_name: str = proto.Field( @@ -1323,6 +1368,10 @@ class ExecuteQueryRequest(proto.Message): proto.STRING, number=3, ) + prepared_query: bytes = proto.Field( + proto.BYTES, + number=9, + ) proto_format: data.ProtoFormat = proto.Field( proto.MESSAGE, number=4, @@ -1381,4 +1430,102 @@ class ExecuteQueryResponse(proto.Message): ) +class PrepareQueryRequest(proto.Message): + r"""Request message for Bigtable.PrepareQuery + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + instance_name (str): + Required. The unique name of the instance against which the + query should be executed. Values are of the form + ``projects//instances/`` + app_profile_id (str): + Optional. This value specifies routing for preparing the + query. Note that this ``app_profile_id`` is only used for + preparing the query. The actual query execution will use the + app profile specified in the ``ExecuteQueryRequest``. If not + specified, the ``default`` application profile will be used. + query (str): + Required. The query string. + proto_format (google.cloud.bigtable_v2.types.ProtoFormat): + Protocol buffer format as described by + ProtoSchema and ProtoRows messages. + + This field is a member of `oneof`_ ``data_format``. + param_types (MutableMapping[str, google.cloud.bigtable_v2.types.Type]): + Required. ``param_types`` is a map of parameter identifier + strings to their ``Type``\ s. + + In query string, a parameter placeholder consists of the + ``@`` character followed by the parameter name (for example, + ``@firstName``) in the query string. + + For example, if param_types["firstName"] = Bytes then + @firstName will be a query parameter of type Bytes. The + specific ``Value`` to be used for the query execution must + be sent in ``ExecuteQueryRequest`` in the ``params`` map. + """ + + instance_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + query: str = proto.Field( + proto.STRING, + number=3, + ) + proto_format: data.ProtoFormat = proto.Field( + proto.MESSAGE, + number=4, + oneof="data_format", + message=data.ProtoFormat, + ) + param_types: MutableMapping[str, types.Type] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=6, + message=types.Type, + ) + + +class PrepareQueryResponse(proto.Message): + r"""Response message for Bigtable.PrepareQueryResponse + + Attributes: + metadata (google.cloud.bigtable_v2.types.ResultSetMetadata): + Structure of rows in the response stream of + ``ExecuteQueryResponse`` for the returned + ``prepared_query``. + prepared_query (bytes): + A serialized prepared query. Clients should treat this as an + opaque blob of bytes to send in ``ExecuteQueryRequest``. + valid_until (google.protobuf.timestamp_pb2.Timestamp): + The time at which the prepared query token + becomes invalid. A token may become invalid + early due to changes in the data being read, but + it provides a guideline to refresh query plans + asynchronously. + """ + + metadata: data.ResultSetMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=data.ResultSetMetadata, + ) + prepared_query: bytes = proto.Field( + proto.BYTES, + number=2, + ) + valid_until: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 9d964a4f6131..97e32197e9fd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -1464,12 +1464,15 @@ class ProtoRows(proto.Message): class ProtoRowsBatch(proto.Message): - r"""Batch of serialized ProtoRows. + r"""A part of a serialized ``ProtoRows`` message. Attributes: batch_data (bytes): - Merge partial results by concatenating these bytes, then - parsing the overall value as a ``ProtoRows`` message. + Part of a serialized ``ProtoRows`` message. A complete, + parseable ProtoRows message is constructed by concatenating + ``batch_data`` from multiple ``ProtoRowsBatch`` messages. + The ``PartialResultSet`` that contains the last part has + ``complete_batch`` set to ``true``. """ batch_data: bytes = proto.Field( @@ -1479,9 +1482,30 @@ class ProtoRowsBatch(proto.Message): class PartialResultSet(proto.Message): - r"""A partial result set from the streaming query API. CBT client will - buffer partial_rows from result_sets until it gets a - resumption_token. + r"""A partial result set from the streaming query API. Cloud Bigtable + clients buffer partial results received in this message until a + ``resume_token`` is received. + + The pseudocode below describes how to buffer and parse a stream of + ``PartialResultSet`` messages. + + Having: + + - queue of row results waiting to be returned ``queue`` + - extensible buffer of bytes ``buffer`` + - a place to keep track of the most recent ``resume_token`` for + each PartialResultSet ``p`` received { if p.reset { ensure + ``queue`` is empty ensure ``buffer`` is empty } if + p.estimated_batch_size != 0 { (optional) ensure ``buffer`` is + sized to at least ``p.estimated_batch_size`` } if + ``p.proto_rows_batch`` is set { append + ``p.proto_rows_batch.bytes`` to ``buffer`` } if p.batch_checksum + is set and ``buffer`` is not empty { validate the checksum + matches the contents of ``buffer`` (see comments on + ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message, + clearing ``buffer`` add parsed rows to end of ``queue`` } if + p.resume_token is set { release results in ``queue`` save + ``p.resume_token`` in ``resume_token`` } } .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -1491,37 +1515,73 @@ class PartialResultSet(proto.Message): Partial rows in serialized ProtoRows format. This field is a member of `oneof`_ ``partial_rows``. - resume_token (bytes): - An opaque token sent by the server to allow query resumption - and signal the client to accumulate ``partial_rows`` since - the last non-empty ``resume_token``. On resumption, the - resumed query will return the remaining rows for this query. + batch_checksum (int): + CRC32C checksum of concatenated ``partial_rows`` data for + the current batch. + + When present, the buffered data from ``partial_rows`` forms + a complete parseable message of the appropriate type. - If there is a batch in progress, a non-empty - ``resume_token`` means that that the batch of - ``partial_rows`` will be complete after merging the - ``partial_rows`` from this response. The client must only - yield completed batches to the application, and must ensure - that any future retries send the latest token to avoid - returning duplicate data. + The client should mark the end of a parseable message and + prepare to receive a new one starting from the next + ``PartialResultSet`` message. Clients must verify the + checksum of the serialized batch before yielding it to the + caller. - The server may set 'resume_token' without a 'partial_rows'. - If there is a batch in progress the client should yield it. + This does NOT mean the values can be yielded to the callers + since a ``resume_token`` is required to safely do so. + + If ``resume_token`` is non-empty and any data has been + received since the last one, this field is guaranteed to be + non-empty. In other words, clients may assume that a batch + will never cross a ``resume_token`` boundary. + + This field is a member of `oneof`_ ``_batch_checksum``. + resume_token (bytes): + An opaque token sent by the server to allow query resumption + and signal that the buffered values constructed from + received ``partial_rows`` can be yielded to the caller. + Clients can provide this token in a subsequent request to + resume the result stream from the current point. + + When ``resume_token`` is non-empty, the buffered values + received from ``partial_rows`` since the last non-empty + ``resume_token`` can be yielded to the callers, provided + that the client keeps the value of ``resume_token`` and uses + it on subsequent retries. + + A ``resume_token`` may be sent without information in + ``partial_rows`` to checkpoint the progress of a sparse + query. Any previous ``partial_rows`` data should still be + yielded in this case, and the new ``resume_token`` should be + saved for future retries as normal. + + A ``resume_token`` will only be sent on a boundary where + there is either no ongoing result batch, or + ``batch_checksum`` is also populated. The server will also send a sentinel ``resume_token`` when last batch of ``partial_rows`` is sent. If the client retries the ExecuteQueryRequest with the sentinel ``resume_token``, the server will emit it again without any - ``partial_rows``, then return OK. + data in ``partial_rows``, then return OK. + reset (bool): + If ``true``, any data buffered since the last non-empty + ``resume_token`` must be discarded before the other parts of + this message, if any, are handled. estimated_batch_size (int): - Estimated size of a new batch. The server will always set - this when returning the first ``partial_rows`` of a batch, - and will not set it at any other time. - - The client can use this estimate to allocate an initial - buffer for the batched results. This helps minimize the - number of allocations required, though the buffer size may - still need to be increased if the estimate is too low. + Estimated size of the buffer required to hold the next batch + of results. + + This value will be sent with the first ``partial_rows`` of a + batch. That is, on the first ``partial_rows`` received in a + stream, on the first message after a ``batch_checksum`` + message, and any time ``reset`` is true. + + The client can use this estimate to allocate a buffer for + the next batch of results. This helps minimize the number of + allocations required, though the buffer size may still need + to be increased if the estimate is too low. """ proto_rows_batch: "ProtoRowsBatch" = proto.Field( @@ -1530,10 +1590,19 @@ class PartialResultSet(proto.Message): oneof="partial_rows", message="ProtoRowsBatch", ) + batch_checksum: int = proto.Field( + proto.UINT32, + number=6, + optional=True, + ) resume_token: bytes = proto.Field( proto.BYTES, number=5, ) + reset: bool = proto.Field( + proto.BOOL, + number=7, + ) estimated_batch_size: int = proto.Field( proto.INT32, number=4, diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 0c242cb09ce2..9e2dd2794163 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -46,6 +46,8 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'create_backup': ('parent', 'backup_id', 'backup', ), 'create_cluster': ('parent', 'cluster_id', 'cluster', ), 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_logical_view': ('parent', 'logical_view_id', 'logical_view', ), + 'create_materialized_view': ('parent', 'materialized_view_id', 'materialized_view', ), 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), 'delete_app_profile': ('name', 'ignore_warnings', ), @@ -53,6 +55,8 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'delete_backup': ('name', ), 'delete_cluster': ('name', ), 'delete_instance': ('name', ), + 'delete_logical_view': ('name', 'etag', ), + 'delete_materialized_view': ('name', 'etag', ), 'delete_snapshot': ('name', ), 'delete_table': ('name', ), 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), @@ -63,6 +67,8 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'get_cluster': ('name', ), 'get_iam_policy': ('resource', 'options', ), 'get_instance': ('name', ), + 'get_logical_view': ('name', ), + 'get_materialized_view': ('name', ), 'get_snapshot': ('name', ), 'get_table': ('name', 'view', ), 'list_app_profiles': ('parent', 'page_size', 'page_token', ), @@ -71,6 +77,8 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'list_clusters': ('parent', 'page_token', ), 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ), 'list_instances': ('parent', 'page_token', ), + 'list_logical_views': ('parent', 'page_size', 'page_token', ), + 'list_materialized_views': ('parent', 'page_size', 'page_token', ), 'list_snapshots': ('parent', 'page_size', 'page_token', ), 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), 'modify_column_families': ('name', 'modifications', 'ignore_warnings', ), @@ -85,8 +93,10 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'node_scaling_factor', 'cluster_config', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), - 'update_table': ('table', 'update_mask', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', 'satisfies_pzi', ), + 'update_logical_view': ('logical_view', 'update_mask', ), + 'update_materialized_view': ('materialized_view', 'update_mask', ), + 'update_table': ('table', 'update_mask', 'ignore_warnings', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 218a54902e02..466b1d1c7e2e 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -40,15 +40,16 @@ class bigtableCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'check_and_mutate_row': ('row_key', 'table_name', 'authorized_view_name', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), - 'execute_query': ('instance_name', 'query', 'params', 'app_profile_id', 'proto_format', 'resume_token', ), + 'execute_query': ('instance_name', 'query', 'params', 'app_profile_id', 'prepared_query', 'proto_format', 'resume_token', ), 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), 'mutate_row': ('row_key', 'mutations', 'table_name', 'authorized_view_name', 'app_profile_id', ), 'mutate_rows': ('entries', 'table_name', 'authorized_view_name', 'app_profile_id', ), 'ping_and_warm': ('name', 'app_profile_id', ), + 'prepare_query': ('instance_name', 'query', 'param_types', 'app_profile_id', 'proto_format', ), 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), 'read_modify_write_row': ('row_key', 'rules', 'table_name', 'authorized_view_name', 'app_profile_id', ), - 'read_rows': ('table_name', 'authorized_view_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), - 'sample_row_keys': ('table_name', 'authorized_view_name', 'app_profile_id', ), + 'read_rows': ('table_name', 'authorized_view_name', 'materialized_view_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), + 'sample_row_keys': ('table_name', 'authorized_view_name', 'materialized_view_name', 'app_profile_id', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 18ff69ffd2f8..d59a8618750c 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -377,7 +377,17 @@ async def test__manage_channel_ping_and_warm(self): import time import threading + if CrossSync.is_async: + from google.cloud.bigtable_v2.services.bigtable.transports.grpc_asyncio import ( + _LoggingClientAIOInterceptor as Interceptor, + ) + else: + from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( + _LoggingClientInterceptor as Interceptor, + ) + client_mock = mock.Mock() + client_mock.transport._interceptor = Interceptor() client_mock._is_closed.is_set.return_value = False client_mock._channel_init_time = time.monotonic() orig_channel = client_mock.transport.grpc_channel diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index c5c6bac30bcf..c7738128002a 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -289,8 +289,12 @@ def test__manage_channel_ping_and_warm(self): """_manage channel should call ping and warm internally""" import time import threading + from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( + _LoggingClientInterceptor as Interceptor, + ) client_mock = mock.Mock() + client_mock.transport._interceptor = Interceptor() client_mock._is_closed.is_set.return_value = False client_mock._channel_init_time = time.monotonic() orig_channel = client_mock.transport.grpc_channel diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 3f79e11a4e4d..eeb014f54706 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -80,6 +80,14 @@ import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + async def mock_async_gen(data, chunk_size=1): for i in range(0, len(data)): # pragma: NO COVER chunk = data[i : i + chunk_size] @@ -354,6 +362,49 @@ def test__get_universe_domain(): assert str(excinfo.value) == "Universe Domain cannot be an empty string." +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = BigtableInstanceAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = BigtableInstanceAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -1535,6 +1586,7 @@ def test_get_instance(request_type, transport: str = "grpc"): state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) response = client.get_instance(request) @@ -1551,6 +1603,7 @@ def test_get_instance(request_type, transport: str = "grpc"): assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_get_instance_non_empty_request_with_auto_populated_field(): @@ -1682,6 +1735,7 @@ async def test_get_instance_async( state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.get_instance(request) @@ -1699,6 +1753,7 @@ async def test_get_instance_async( assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -2202,6 +2257,7 @@ def test_update_instance(request_type, transport: str = "grpc"): state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) response = client.update_instance(request) @@ -2218,6 +2274,7 @@ def test_update_instance(request_type, transport: str = "grpc"): assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True def test_update_instance_non_empty_request_with_auto_populated_field(): @@ -2350,6 +2407,7 @@ async def test_update_instance_async( state=instance.Instance.State.READY, type_=instance.Instance.Type.PRODUCTION, satisfies_pzs=True, + satisfies_pzi=True, ) ) response = await client.update_instance(request) @@ -2367,6 +2425,7 @@ async def test_update_instance_async( assert response.state == instance.Instance.State.READY assert response.type_ == instance.Instance.Type.PRODUCTION assert response.satisfies_pzs is True + assert response.satisfies_pzi is True @pytest.mark.asyncio @@ -6896,6 +6955,7 @@ def test_delete_app_profile_flattened(): # using the keyword arguments to the method. client.delete_app_profile( name="name_value", + ignore_warnings=True, ) # Establish that the underlying call was made with the expected @@ -6905,6 +6965,9 @@ def test_delete_app_profile_flattened(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].ignore_warnings + mock_val = True + assert arg == mock_val def test_delete_app_profile_flattened_error(): @@ -6918,6 +6981,7 @@ def test_delete_app_profile_flattened_error(): client.delete_app_profile( bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ignore_warnings=True, ) @@ -6939,6 +7003,7 @@ async def test_delete_app_profile_flattened_async(): # using the keyword arguments to the method. response = await client.delete_app_profile( name="name_value", + ignore_warnings=True, ) # Establish that the underlying call was made with the expected @@ -6948,6 +7013,9 @@ async def test_delete_app_profile_flattened_async(): arg = args[0].name mock_val = "name_value" assert arg == mock_val + arg = args[0].ignore_warnings + mock_val = True + assert arg == mock_val @pytest.mark.asyncio @@ -6962,6 +7030,7 @@ async def test_delete_app_profile_flattened_error_async(): await client.delete_app_profile( bigtable_instance_admin.DeleteAppProfileRequest(), name="name_value", + ignore_warnings=True, ) @@ -8539,13 +8608,80 @@ async def test_list_hot_tablets_async_pages(): assert page_.raw_page.next_page_token == token -def test_create_instance_rest_use_cached_wrapped_rpc(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateLogicalViewRequest, + dict, + ], +) +def test_create_logical_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + ) + + +def test_create_logical_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -8553,193 +8689,368 @@ def test_create_instance_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.create_instance in client._transport._wrapped_methods + assert ( + client._transport.create_logical_view in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.create_logical_view + ] = mock_rpc request = {} - client.create_instance(request) + client.create_logical_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.create_instance(request) + client.create_logical_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_instance_rest_required_fields( - request_type=bigtable_instance_admin.CreateInstanceRequest, +@pytest.mark.asyncio +async def test_create_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request_init["instance_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.create_logical_view + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_logical_view + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.create_logical_view(request) - jsonified_request["parent"] = "parent_value" - jsonified_request["instanceId"] = "instance_id_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "instanceId" in jsonified_request - assert jsonified_request["instanceId"] == "instance_id_value" + await client.create_logical_view(request) - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) +@pytest.mark.asyncio +async def test_create_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateLogicalViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response = client.create_instance(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_logical_view(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateLogicalViewRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_create_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.create_instance._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "instanceId", - "instance", - "clusters", - ) - ) - ) +@pytest.mark.asyncio +async def test_create_logical_view_async_from_dict(): + await test_create_logical_view_async(request_type=dict) -def test_create_instance_rest_flattened(): +def test_create_logical_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1"} + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateLogicalViewRequest() - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, - ) - mock_args.update(sample_request) + request.parent = "parent_value" - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_logical_view(request) - client.create_instance(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateLogicalViewRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) + await client.create_logical_view(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request -def test_create_instance_rest_flattened_error(transport: str = "rest"): + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_logical_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_logical_view( + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].logical_view_id + mock_val = "logical_view_id_value" + assert arg == mock_val + + +def test_create_logical_view_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), + client.create_logical_view( + bigtable_instance_admin.CreateLogicalViewRequest(), parent="parent_value", - instance_id="instance_id_value", - instance=gba_instance.Instance(name="name_value"), - clusters={"key_value": gba_instance.Cluster(name="name_value")}, + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", ) -def test_get_instance_rest_use_cached_wrapped_rpc(): +@pytest.mark.asyncio +async def test_create_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_logical_view( + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].logical_view_id + mock_val = "logical_view_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_logical_view( + bigtable_instance_admin.CreateLogicalViewRequest(), + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetLogicalViewRequest, + dict, + ], +) +def test_get_logical_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.LogicalView( + name="name_value", + query="query_value", + etag="etag_value", + ) + response = client.get_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.LogicalView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + + +def test_get_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetLogicalViewRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetLogicalViewRequest( + name="name_value", + ) + + +def test_get_logical_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -8747,173 +9058,335 @@ def test_get_instance_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_instance in client._transport._wrapped_methods + assert client._transport.get_logical_view in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.get_logical_view + ] = mock_rpc request = {} - client.get_instance(request) + client.get_logical_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_instance(request) + client.get_logical_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_instance_rest_required_fields( - request_type=bigtable_instance_admin.GetInstanceRequest, +@pytest.mark.asyncio +async def test_get_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.get_logical_view + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_logical_view + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.get_logical_view(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + await client.get_logical_view(request) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result +@pytest.mark.asyncio +async def test_get_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetLogicalViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value = Response() - response_value.status_code = 200 + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView( + name="name_value", + query="query_value", + etag="etag_value", + ) + ) + response = await client.get_logical_view(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetLogicalViewRequest() + assert args[0] == request - response = client.get_instance(request) + # Establish that the response is the type that we expect. + assert isinstance(response, instance.LogicalView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_get_logical_view_async_from_dict(): + await test_get_logical_view_async(request_type=dict) -def test_get_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + +def test_get_logical_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.get_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetLogicalViewRequest() + + request.name = "name_value" + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value = instance.LogicalView() + client.get_logical_view(request) -def test_get_instance_rest_flattened(): + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetLogicalViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView() + ) + await client.get_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_logical_view_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.LogicalView() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_logical_view( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( + +def test_get_logical_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_logical_view( + bigtable_instance_admin.GetLogicalViewRequest(), name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.get_instance(**mock_args) +@pytest.mark.asyncio +async def test_get_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = instance.LogicalView() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_logical_view( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_get_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_get_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), + await client.get_logical_view( + bigtable_instance_admin.GetLogicalViewRequest(), name="name_value", ) -def test_list_instances_rest_use_cached_wrapped_rpc(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListLogicalViewsRequest, + dict, + ], +) +def test_list_logical_views(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_logical_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListLogicalViewsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListLogicalViewsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_logical_views_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListLogicalViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_logical_views(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListLogicalViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_logical_views_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -8921,295 +9394,536 @@ def test_list_instances_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_instances in client._transport._wrapped_methods + assert ( + client._transport.list_logical_views in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.list_logical_views + ] = mock_rpc request = {} - client.list_instances(request) + client.list_logical_views(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_instances(request) + client.list_logical_views(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_instances_rest_required_fields( - request_type=bigtable_instance_admin.ListInstancesRequest, +@pytest.mark.asyncio +async def test_list_logical_views_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.list_logical_views + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instances._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_logical_views + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.list_logical_views(request) - jsonified_request["parent"] = "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_instances._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token",)) - jsonified_request.update(unset_fields) + await client.list_logical_views(request) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result +@pytest.mark.asyncio +async def test_list_logical_views_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListLogicalViewsRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value = Response() - response_value.status_code = 200 + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb( - return_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse( + next_page_token="next_page_token_value", ) - json_return_value = json_format.MessageToJson(return_value) + ) + response = await client.list_logical_views(request) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListLogicalViewsRequest() + assert args[0] == request - response = client.list_instances(request) + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListLogicalViewsAsyncPager) + assert response.next_page_token == "next_page_token_value" - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_list_logical_views_async_from_dict(): + await test_list_logical_views_async(request_type=dict) -def test_list_instances_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + +def test_list_logical_views_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.list_instances._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListLogicalViewsRequest() + request.parent = "parent_value" -def test_list_instances_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + client.list_logical_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_logical_views_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListLogicalViewsRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1"} + request.parent = "parent_value" - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse() ) - mock_args.update(sample_request) + await client.list_logical_views(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.list_instances(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_logical_views_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_logical_views( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_list_instances_rest_flattened_error(transport: str = "rest"): +def test_list_logical_views_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), + client.list_logical_views( + bigtable_instance_admin.ListLogicalViewsRequest(), parent="parent_value", ) -def test_update_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() +@pytest.mark.asyncio +async def test_list_logical_views_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Ensure method has been cached - assert client._transport.update_instance in client._transport._wrapped_methods + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_logical_views( + parent="parent_value", ) - client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc - - request = {} - client.update_instance(request) - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val - client.update_instance(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +@pytest.mark.asyncio +async def test_list_logical_views_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_logical_views( + bigtable_instance_admin.ListLogicalViewsRequest(), + parent="parent_value", + ) -def test_update_instance_rest_required_fields(request_type=instance.Instance): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["display_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +def test_list_logical_views_pager(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, ) - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) - jsonified_request["displayName"] = "display_name_value" + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_logical_views(request={}, retry=retry, timeout=timeout) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout - # verify required fields with non-default values are left alone - assert "displayName" in jsonified_request - assert jsonified_request["displayName"] == "display_name_value" + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.LogicalView) for i in results) + +def test_list_logical_views_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport_name, ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "put", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) + pages = list(client.list_logical_views(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) +@pytest.mark.asyncio +async def test_list_logical_views_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_logical_views( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) - response = client.update_instance(request) + assert len(responses) == 6 + assert all(isinstance(i, instance.LogicalView) for i in responses) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_list_logical_views_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) -def test_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_logical_views(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateLogicalViewRequest, + dict, + ], +) +def test_update_logical_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - unset_fields = transport.update_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("displayName",))) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_logical_view(request) -def test_partial_update_instance_rest_use_cached_wrapped_rpc(): + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.UpdateLogicalViewRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.update_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateLogicalViewRequest() + + +def test_update_logical_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -9218,8 +9932,7 @@ def test_partial_update_instance_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.partial_update_instance - in client._transport._wrapped_methods + client._transport.update_logical_view in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -9228,344 +9941,347 @@ def test_partial_update_instance_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.partial_update_instance + client._transport.update_logical_view ] = mock_rpc - request = {} - client.partial_update_instance(request) + client.update_logical_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.partial_update_instance(request) + client.update_logical_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_partial_update_instance_rest_required_fields( - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +@pytest.mark.asyncio +async def test_update_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.update_logical_view + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_logical_view + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.update_logical_view(request) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_instance._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # verify required fields with non-default values are left alone + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.update_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.UpdateLogicalViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateLogicalViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_logical_view_async_from_dict(): + await test_update_logical_view_async(request_type=dict) + +def test_update_logical_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateLogicalViewRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.logical_view.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_logical_view(request) - response = client.partial_update_instance(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "logical_view.name=name_value", + ) in kw["metadata"] -def test_partial_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_update_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.partial_update_instance._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "instance", - "updateMask", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateLogicalViewRequest() + + request.logical_view.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - ) + await client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "logical_view.name=name_value", + ) in kw["metadata"] -def test_partial_update_instance_rest_flattened(): + +def test_update_logical_view_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"instance": {"name": "projects/sample1/instances/sample2"}} - - # get truthy value for each flattened field - mock_args = dict( - instance=gba_instance.Instance(name="name_value"), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_logical_view( + logical_view=instance.LogicalView(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.partial_update_instance(**mock_args) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val -def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): +def test_update_logical_view_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name="name_value"), + client.update_logical_view( + bigtable_instance_admin.UpdateLogicalViewRequest(), + logical_view=instance.LogicalView(name="name_value"), update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_delete_instance_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() +@pytest.mark.asyncio +async def test_update_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Ensure method has been cached - assert client._transport.delete_instance in client._transport._wrapped_methods + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_logical_view( + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc - - request = {} - client.delete_instance(request) - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].logical_view + mock_val = instance.LogicalView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val - client.delete_instance(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +@pytest.mark.asyncio +async def test_update_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_logical_view( + bigtable_instance_admin.UpdateLogicalViewRequest(), + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) -def test_delete_instance_rest_required_fields( - request_type=bigtable_instance_admin.DeleteInstanceRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteLogicalViewRequest, + dict, + ], +) +def test_delete_logical_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # verify required fields with default values are now present + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_logical_view(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteLogicalViewRequest() + assert args[0] == request - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the response is the type that we expect. + assert response is None - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +def test_delete_logical_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.delete_instance(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_delete_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + transport="grpc", ) - unset_fields = transport.delete_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) - - -def test_delete_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteLogicalViewRequest( + name="name_value", + etag="etag_value", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.delete_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - - -def test_delete_instance_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), + client.delete_logical_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteLogicalViewRequest( name="name_value", + etag="etag_value", ) -def test_create_cluster_rest_use_cached_wrapped_rpc(): +def test_delete_logical_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -9573,202 +10289,331 @@ def test_create_cluster_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.create_cluster in client._transport._wrapped_methods + assert ( + client._transport.delete_logical_view in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.delete_logical_view + ] = mock_rpc request = {} - client.create_cluster(request) + client.delete_logical_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + client.delete_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_delete_logical_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 wrapper_fn.reset_mock() - client.create_cluster(request) + # Ensure method has been cached + assert ( + client._client._transport.delete_logical_view + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_logical_view + ] = mock_rpc + + request = {} + await client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.delete_logical_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_cluster_rest_required_fields( - request_type=bigtable_instance_admin.CreateClusterRequest, +@pytest.mark.asyncio +async def test_delete_logical_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteLogicalViewRequest, ): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["cluster_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped - assert "clusterId" not in jsonified_request + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_logical_view(request) - # verify required fields with default values are now present - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == request_init["cluster_id"] + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteLogicalViewRequest() + assert args[0] == request - jsonified_request["parent"] = "parent_value" - jsonified_request["clusterId"] = "cluster_id_value" + # Establish that the response is the type that we expect. + assert response is None - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("cluster_id",)) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == "cluster_id_value" +@pytest.mark.asyncio +async def test_delete_logical_view_async_from_dict(): + await test_delete_logical_view_async(request_type=dict) + +def test_delete_logical_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteLogicalViewRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value = None + client.delete_logical_view(request) - response = client.create_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [ - ( - "clusterId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_create_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_delete_logical_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.create_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("clusterId",)) - & set( - ( - "parent", - "clusterId", - "cluster", - ) - ) - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteLogicalViewRequest() + request.name = "name_value" -def test_create_cluster_rest_flattened(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_logical_view_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_logical_view( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), + +def test_delete_logical_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_logical_view( + bigtable_instance_admin.DeleteLogicalViewRequest(), + name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.create_cluster(**mock_args) +@pytest.mark.asyncio +async def test_delete_logical_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_logical_view( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, - args[1], + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_logical_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_logical_view( + bigtable_instance_admin.DeleteLogicalViewRequest(), + name="name_value", ) -def test_create_cluster_rest_flattened_error(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateMaterializedViewRequest, + dict, + ], +) +def test_create_materialized_view(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateMaterializedViewRequest( parent="parent_value", - cluster_id="cluster_id_value", - cluster=instance.Cluster(name="name_value"), + materialized_view_id="materialized_view_id_value", ) -def test_get_cluster_rest_use_cached_wrapped_rpc(): +def test_create_materialized_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -9776,351 +10621,375 @@ def test_get_cluster_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_cluster in client._transport._wrapped_methods + assert ( + client._transport.create_materialized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.create_materialized_view + ] = mock_rpc request = {} - client.get_cluster(request) + client.create_materialized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_cluster(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_materialized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_cluster_rest_required_fields( - request_type=bigtable_instance_admin.GetClusterRequest, +@pytest.mark.asyncio +async def test_create_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - # verify fields with default values are dropped + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert ( + client._client._transport.create_materialized_view + in client._client._transport._wrapped_methods + ) - # verify required fields with default values are now present + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_materialized_view + ] = mock_rpc - jsonified_request["name"] = "name_value" + request = {} + await client.create_materialized_view(request) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + await client.create_materialized_view(request) - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) +@pytest.mark.asyncio +async def test_create_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.CreateMaterializedViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response = client.get_cluster(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_materialized_view(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.CreateMaterializedViewRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_get_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.get_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) +@pytest.mark.asyncio +async def test_create_materialized_view_async_from_dict(): + await test_create_materialized_view_async(request_type=dict) -def test_get_cluster_rest_flattened(): +def test_create_materialized_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateMaterializedViewRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request.parent = "parent_value" - # get truthy value for each flattened field - mock_args = dict( - name="name_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateMaterializedViewRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - mock_args.update(sample_request) + await client.create_materialized_view(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.get_cluster(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_materialized_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_materialized_view( + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].materialized_view_id + mock_val = "materialized_view_id_value" + assert arg == mock_val -def test_get_cluster_rest_flattened_error(transport: str = "rest"): +def test_create_materialized_view_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name="name_value", + client.create_materialized_view( + bigtable_instance_admin.CreateMaterializedViewRequest(), + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", ) -def test_list_clusters_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() +@pytest.mark.asyncio +async def test_create_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Ensure method has been cached - assert client._transport.list_clusters in client._transport._wrapped_methods + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_materialized_view( + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", ) - client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc - - request = {} - client.list_clusters(request) - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].materialized_view_id + mock_val = "materialized_view_id_value" + assert arg == mock_val - client.list_clusters(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +@pytest.mark.asyncio +async def test_create_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_materialized_view( + bigtable_instance_admin.CreateMaterializedViewRequest(), + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) -def test_list_clusters_rest_required_fields( - request_type=bigtable_instance_admin.ListClustersRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_clusters._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_clusters._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token",)) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetMaterializedViewRequest, + dict, + ], +) +def test_get_materialized_view(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - response = client.list_clusters(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) + response = client.get_materialized_view(request) -def test_list_clusters_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetMaterializedViewRequest() + assert args[0] == request - unset_fields = transport.list_clusters._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) + # Establish that the response is the type that we expect. + assert isinstance(response, instance.MaterializedView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True -def test_list_clusters_rest_flattened(): +def test_get_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.list_clusters(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, - args[1], - ) - - -def test_list_clusters_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.GetMaterializedViewRequest( + name="name_value", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent="parent_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetMaterializedViewRequest( + name="name_value", ) -def test_update_cluster_rest_use_cached_wrapped_rpc(): +def test_get_materialized_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -10128,39 +10997,42 @@ def test_update_cluster_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.update_cluster in client._transport._wrapped_methods + assert ( + client._transport.get_materialized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.get_materialized_view + ] = mock_rpc request = {} - client.update_cluster(request) + client.get_materialized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_cluster(request) + client.get_materialized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): +@pytest.mark.asyncio +async def test_get_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) # Should wrap all calls on client creation @@ -10169,188 +11041,306 @@ def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.partial_update_cluster - in client._transport._wrapped_methods + client._client._transport.get_materialized_view + in client._client._transport._wrapped_methods ) # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[ - client._transport.partial_update_cluster + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_materialized_view ] = mock_rpc request = {} - client.partial_update_cluster(request) + await client.get_materialized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.partial_update_cluster(request) + await client.get_materialized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_partial_update_cluster_rest_required_fields( - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +@pytest.mark.asyncio +async def test_get_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.GetMaterializedViewRequest, ): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) + ) + response = await client.get_materialized_view(request) - # verify required fields with default values are now present + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.GetMaterializedViewRequest() + assert args[0] == request - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).partial_update_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) - jsonified_request.update(unset_fields) + # Establish that the response is the type that we expect. + assert isinstance(response, instance.MaterializedView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True + + +@pytest.mark.asyncio +async def test_get_materialized_view_async_from_dict(): + await test_get_materialized_view_async(request_type=dict) - # verify required fields with non-default values are left alone +def test_get_materialized_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetMaterializedViewRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value = instance.MaterializedView() + client.get_materialized_view(request) - response = client.partial_update_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_partial_update_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_get_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("updateMask",)) - & set( - ( - "cluster", - "updateMask", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetMaterializedViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView() ) - ) + await client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_partial_update_cluster_rest_flattened(): +def test_get_materialized_view_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.MaterializedView() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_materialized_view( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + +def test_get_materialized_view_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_materialized_view( + bigtable_instance_admin.GetMaterializedViewRequest(), + name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - client.partial_update_cluster(**mock_args) +@pytest.mark.asyncio +async def test_get_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = instance.MaterializedView() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_materialized_view( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" - % client.transport._host, - args[1], + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_materialized_view( + bigtable_instance_admin.GetMaterializedViewRequest(), + name="name_value", ) -def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListMaterializedViewsRequest, + dict, + ], +) +def test_list_materialized_views(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", ) + response = client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListMaterializedViewsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMaterializedViewsPager) + assert response.next_page_token == "next_page_token_value" -def test_delete_cluster_rest_use_cached_wrapped_rpc(): +def test_list_materialized_views_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.ListMaterializedViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_materialized_views(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListMaterializedViewsRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_materialized_views_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -10358,388 +11348,537 @@ def test_delete_cluster_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.delete_cluster in client._transport._wrapped_methods + assert ( + client._transport.list_materialized_views + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.list_materialized_views + ] = mock_rpc request = {} - client.delete_cluster(request) + client.list_materialized_views(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_cluster(request) + client.list_materialized_views(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_cluster_rest_required_fields( - request_type=bigtable_instance_admin.DeleteClusterRequest, +@pytest.mark.asyncio +async def test_list_materialized_views_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - # verify fields with default values are dropped + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Ensure method has been cached + assert ( + client._client._transport.list_materialized_views + in client._client._transport._wrapped_methods + ) - # verify required fields with default values are now present + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_materialized_views + ] = mock_rpc - jsonified_request["name"] = "name_value" + request = {} + await client.list_materialized_views(request) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + await client.list_materialized_views(request) - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 - json_return_value = "" +@pytest.mark.asyncio +async def test_list_materialized_views_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.ListMaterializedViewsRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response = client.delete_cluster(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_materialized_views(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.ListMaterializedViewsRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListMaterializedViewsAsyncPager) + assert response.next_page_token == "next_page_token_value" -def test_delete_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.delete_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) +@pytest.mark.asyncio +async def test_list_materialized_views_async_from_dict(): + await test_list_materialized_views_async(request_type=dict) -def test_delete_cluster_rest_flattened(): +def test_list_materialized_views_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListMaterializedViewsRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request.parent = "parent_value" - # get truthy value for each flattened field - mock_args = dict( - name="name_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_materialized_views_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListMaterializedViewsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse() ) - mock_args.update(sample_request) + await client.list_materialized_views(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.delete_cluster(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_materialized_views_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_materialized_views( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_delete_cluster_rest_flattened_error(transport: str = "rest"): +def test_list_materialized_views_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name="name_value", + client.list_materialized_views( + bigtable_instance_admin.ListMaterializedViewsRequest(), + parent="parent_value", ) -def test_create_app_profile_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) +@pytest.mark.asyncio +async def test_list_materialized_views_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() - # Ensure method has been cached - assert ( - client._transport.create_app_profile in client._transport._wrapped_methods + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse() ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_materialized_views( + parent="parent_value", ) - client._transport._wrapped_methods[ - client._transport.create_app_profile - ] = mock_rpc - request = {} - client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val - client.create_app_profile(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +@pytest.mark.asyncio +async def test_list_materialized_views_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_materialized_views( + bigtable_instance_admin.ListMaterializedViewsRequest(), + parent="parent_value", + ) -def test_create_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.CreateAppProfileRequest, -): - transport_class = transports.BigtableInstanceAdminRestTransport - request_init = {} - request_init["parent"] = "" - request_init["app_profile_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +def test_list_materialized_views_pager(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, ) - # verify fields with default values are dropped - assert "appProfileId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, + ) - # verify required fields with default values are now present - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == request_init["app_profile_id"] + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_materialized_views(request={}, retry=retry, timeout=timeout) - jsonified_request["parent"] = "parent_value" - jsonified_request["appProfileId"] = "app_profile_id_value" + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "app_profile_id", - "ignore_warnings", - ) - ) - jsonified_request.update(unset_fields) + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.MaterializedView) for i in results) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == "app_profile_id_value" +def test_list_materialized_views_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport_name, ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - response_value = Response() - response_value.status_code = 200 + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, + ) + pages = list(client.list_materialized_views(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value +@pytest.mark.asyncio +async def test_list_materialized_views_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - response = client.create_app_profile(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_materialized_views( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) - expected_params = [ - ( - "appProfileId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + assert len(responses) == 6 + assert all(isinstance(i, instance.MaterializedView) for i in responses) -def test_create_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_list_materialized_views_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.create_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "appProfileId", - "ignoreWarnings", - ) - ) - & set( - ( - "parent", - "appProfileId", - "appProfile", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + RuntimeError, ) - ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_materialized_views(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -def test_create_app_profile_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.UpdateMaterializedViewRequest, + dict, + ], +) +def test_update_materialized_view(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), - ) - mock_args.update(sample_request) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_materialized_view(request) - client.create_app_profile(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/appProfiles" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_create_app_profile_rest_flattened_error(transport: str = "rest"): +def test_update_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent="parent_value", - app_profile_id="app_profile_id_value", - app_profile=instance.AppProfile(name="name_value"), + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) + client.update_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateMaterializedViewRequest() -def test_get_app_profile_rest_use_cached_wrapped_rpc(): +def test_update_materialized_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -10747,185 +11886,369 @@ def test_get_app_profile_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_app_profile in client._transport._wrapped_methods + assert ( + client._transport.update_materialized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.update_materialized_view + ] = mock_rpc request = {} - client.get_app_profile(request) + client.update_materialized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_app_profile(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_materialized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.GetAppProfileRequest, +@pytest.mark.asyncio +async def test_update_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.update_materialized_view + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_materialized_view + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.update_materialized_view(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + await client.update_materialized_view(request) - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response_value = Response() - response_value.status_code = 200 + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_materialized_view(request) - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + assert args[0] == request - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - response = client.get_app_profile(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_update_materialized_view_async_from_dict(): + await test_update_materialized_view_async(request_type=dict) -def test_get_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +def test_update_materialized_view_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.get_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateMaterializedViewRequest() + request.materialized_view.name = "name_value" -def test_get_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "materialized_view.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateMaterializedViewRequest() - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } + request.materialized_view.name = "name_value" - # get truthy value for each flattened field - mock_args = dict( - name="name_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - mock_args.update(sample_request) + await client.update_materialized_view(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.get_app_profile(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "materialized_view.name=name_value", + ) in kw["metadata"] + + +def test_update_materialized_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_materialized_view( + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val -def test_get_app_profile_rest_flattened_error(transport: str = "rest"): +def test_update_materialized_view_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name="name_value", + client.update_materialized_view( + bigtable_instance_admin.UpdateMaterializedViewRequest(), + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_list_app_profiles_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) +@pytest.mark.asyncio +async def test_update_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_materialized_view( + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].materialized_view + mock_val = instance.MaterializedView(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_materialized_view( + bigtable_instance_admin.UpdateMaterializedViewRequest(), + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteMaterializedViewRequest, + dict, + ], +) +def test_delete_materialized_view(request_type, transport: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteMaterializedViewRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_materialized_view_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_instance_admin.DeleteMaterializedViewRequest( + name="name_value", + etag="etag_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_materialized_view(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteMaterializedViewRequest( + name="name_value", + etag="etag_value", + ) + + +def test_delete_materialized_view_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) # Should wrap all calls on client creation assert wrapper_fn.call_count > 0 wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_app_profiles in client._transport._wrapped_methods + assert ( + client._transport.delete_materialized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() @@ -10933,243 +12256,248 @@ def test_list_app_profiles_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_app_profiles + client._transport.delete_materialized_view ] = mock_rpc - request = {} - client.list_app_profiles(request) + client.delete_materialized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_app_profiles(request) + client.delete_materialized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_app_profiles_rest_required_fields( - request_type=bigtable_instance_admin.ListAppProfilesRequest, +@pytest.mark.asyncio +async def test_delete_materialized_view_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_app_profiles._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify required fields with default values are now present + # Ensure method has been cached + assert ( + client._client._transport.delete_materialized_view + in client._client._transport._wrapped_methods + ) - jsonified_request["parent"] = "parent_value" + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_materialized_view + ] = mock_rpc - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_app_profiles._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) + request = {} + await client.delete_materialized_view(request) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + await client.delete_materialized_view(request) - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) +@pytest.mark.asyncio +async def test_delete_materialized_view_async( + transport: str = "grpc_asyncio", + request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, +): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response = client.list_app_profiles(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_materialized_view(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_instance_admin.DeleteMaterializedViewRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert response is None -def test_list_app_profiles_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.list_app_profiles._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) +@pytest.mark.asyncio +async def test_delete_materialized_view_async_from_dict(): + await test_delete_materialized_view_async(request_type=dict) -def test_list_app_profiles_rest_flattened(): +def test_delete_materialized_view_field_headers(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteMaterializedViewRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} + request.name = "name_value" - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value = None + client.delete_materialized_view(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.list_app_profiles(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_materialized_view_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteMaterializedViewRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_materialized_view_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_materialized_view( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/appProfiles" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): +def test_delete_materialized_view_flattened_error(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent="parent_value", + client.delete_materialized_view( + bigtable_instance_admin.DeleteMaterializedViewRequest(), + name="name_value", ) -def test_list_app_profiles_rest_pager(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_delete_materialized_view_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token="def", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_materialized_view( + name="name_value", ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - sample_request = {"parent": "projects/sample1/instances/sample2"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - pager = client.list_app_profiles(request=sample_request) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.AppProfile) for i in results) +@pytest.mark.asyncio +async def test_delete_materialized_view_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - pages = list(client.list_app_profiles(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_materialized_view( + bigtable_instance_admin.DeleteMaterializedViewRequest(), + name="name_value", + ) -def test_update_app_profile_rest_use_cached_wrapped_rpc(): +def test_create_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -11183,21 +12511,17 @@ def test_update_app_profile_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.update_app_profile in client._transport._wrapped_methods - ) + assert client._transport.create_instance in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.update_app_profile - ] = mock_rpc + client._transport._wrapped_methods[client._transport.create_instance] = mock_rpc request = {} - client.update_app_profile(request) + client.create_instance(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -11206,19 +12530,21 @@ def test_update_app_profile_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.update_app_profile(request) + client.create_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_update_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.UpdateAppProfileRequest, +def test_create_instance_rest_required_fields( + request_type=bigtable_instance_admin.CreateInstanceRequest, ): transport_class = transports.BigtableInstanceAdminRestTransport request_init = {} + request_init["parent"] = "" + request_init["instance_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -11229,24 +12555,24 @@ def test_update_app_profile_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_app_profile._get_unset_required_fields(jsonified_request) + ).create_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["parent"] = "parent_value" + jsonified_request["instanceId"] = "instance_id_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", - ) - ) + ).create_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "instanceId" in jsonified_request + assert jsonified_request["instanceId"] == "instance_id_value" client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -11267,7 +12593,7 @@ def test_update_app_profile_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "post", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -11279,37 +12605,35 @@ def test_update_app_profile_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_app_profile(request) + response = client.create_instance(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_app_profile_rest_unset_required_fields(): +def test_create_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.update_app_profile._get_unset_required_fields({}) + unset_fields = transport.create_instance._get_unset_required_fields({}) assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) + set(()) & set( ( - "appProfile", - "updateMask", + "parent", + "instanceId", + "instance", + "clusters", ) ) ) -def test_update_app_profile_rest_flattened(): +def test_create_instance_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -11321,16 +12645,14 @@ def test_update_app_profile_rest_flattened(): return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } - } + sample_request = {"parent": "projects/sample1"} # get truthy value for each flattened field mock_args = dict( - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, ) mock_args.update(sample_request) @@ -11340,21 +12662,20 @@ def test_update_app_profile_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_app_profile(**mock_args) + client.create_instance(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], + "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] ) -def test_update_app_profile_rest_flattened_error(transport: str = "rest"): +def test_create_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -11363,14 +12684,16 @@ def test_update_app_profile_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - - -def test_delete_app_profile_rest_use_cached_wrapped_rpc(): + client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent="parent_value", + instance_id="instance_id_value", + instance=gba_instance.Instance(name="name_value"), + clusters={"key_value": gba_instance.Cluster(name="name_value")}, + ) + + +def test_get_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -11384,40 +12707,35 @@ def test_delete_app_profile_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.delete_app_profile in client._transport._wrapped_methods - ) + assert client._transport.get_instance in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.delete_app_profile - ] = mock_rpc + client._transport._wrapped_methods[client._transport.get_instance] = mock_rpc request = {} - client.delete_app_profile(request) + client.get_instance(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_app_profile(request) + client.get_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_app_profile_rest_required_fields( - request_type=bigtable_instance_admin.DeleteAppProfileRequest, +def test_get_instance_rest_required_fields( + request_type=bigtable_instance_admin.GetInstanceRequest, ): transport_class = transports.BigtableInstanceAdminRestTransport request_init = {} request_init["name"] = "" - request_init["ignore_warnings"] = False request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -11425,32 +12743,24 @@ def test_delete_app_profile_rest_required_fields( ) # verify fields with default values are dropped - assert "ignoreWarnings" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_app_profile._get_unset_required_fields(jsonified_request) + ).get_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] jsonified_request["name"] = "name_value" - jsonified_request["ignoreWarnings"] = True unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("ignore_warnings",)) + ).get_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == True client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -11459,7 +12769,7 @@ def test_delete_app_profile_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = instance.Instance() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -11471,49 +12781,39 @@ def test_delete_app_profile_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "get", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_app_profile(request) + response = client.get_instance(request) - expected_params = [ - ( - "ignoreWarnings", - str(False).lower(), - ), - ("$alt", "json;enum-encoding=int"), - ] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_app_profile_rest_unset_required_fields(): +def test_get_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("ignoreWarnings",)) - & set( - ( - "name", - "ignoreWarnings", - ) - ) - ) + unset_fields = transport.get_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_delete_app_profile_rest_flattened(): +def test_get_instance_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -11522,12 +12822,10 @@ def test_delete_app_profile_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = instance.Instance() # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" - } + sample_request = {"name": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( @@ -11538,24 +12836,25 @@ def test_delete_app_profile_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_app_profile(**mock_args) + client.get_instance(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/appProfiles/*}" - % client.transport._host, - args[1], + "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] ) -def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): +def test_get_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -11564,13 +12863,13 @@ def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), + client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), name="name_value", ) -def test_get_iam_policy_rest_use_cached_wrapped_rpc(): +def test_list_instances_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -11584,37 +12883,37 @@ def test_get_iam_policy_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_iam_policy in client._transport._wrapped_methods + assert client._transport.list_instances in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + client._transport._wrapped_methods[client._transport.list_instances] = mock_rpc request = {} - client.get_iam_policy(request) + client.list_instances(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_iam_policy(request) + client.list_instances(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.GetIamPolicyRequest, +def test_list_instances_rest_required_fields( + request_type=bigtable_instance_admin.ListInstancesRequest, ): transport_class = transports.BigtableInstanceAdminRestTransport request_init = {} - request_init["resource"] = "" + request_init["parent"] = "" request = request_type(**request_init) - pb_request = request + pb_request = request_type.pb(request) jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -11623,21 +12922,23 @@ def test_get_iam_policy_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) + ).list_instances._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["resource"] = "resource_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) + ).list_instances._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -11646,7 +12947,7 @@ def test_get_iam_policy_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + return_value = bigtable_instance_admin.ListInstancesResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -11655,40 +12956,44 @@ def test_get_iam_policy_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request + pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_iam_policy(request) + response = client.list_instances(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_iam_policy_rest_unset_required_fields(): +def test_list_instances_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource",))) + unset_fields = transport.list_instances._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) -def test_get_iam_policy_rest_flattened(): +def test_list_instances_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -11697,38 +13002,39 @@ def test_get_iam_policy_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + return_value = bigtable_instance_admin.ListInstancesResponse() # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} + sample_request = {"parent": "projects/sample1"} # get truthy value for each flattened field mock_args = dict( - resource="resource_value", + parent="parent_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_iam_policy(**mock_args) + client.list_instances(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:getIamPolicy" - % client.transport._host, - args[1], + "%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1] ) -def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): +def test_list_instances_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -11737,13 +13043,13 @@ def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", + client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), + parent="parent_value", ) -def test_set_iam_policy_rest_use_cached_wrapped_rpc(): +def test_update_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -11757,37 +13063,35 @@ def test_set_iam_policy_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.set_iam_policy in client._transport._wrapped_methods + assert client._transport.update_instance in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + client._transport._wrapped_methods[client._transport.update_instance] = mock_rpc request = {} - client.set_iam_policy(request) + client.update_instance(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.set_iam_policy(request) + client.update_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_set_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.SetIamPolicyRequest, -): +def test_update_instance_rest_required_fields(request_type=instance.Instance): transport_class = transports.BigtableInstanceAdminRestTransport request_init = {} - request_init["resource"] = "" + request_init["display_name"] = "" request = request_type(**request_init) - pb_request = request + pb_request = request_type.pb(request) jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -11796,21 +13100,21 @@ def test_set_iam_policy_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) + ).update_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["resource"] = "resource_value" + jsonified_request["displayName"] = "display_name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) + ).update_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" + assert "displayName" in jsonified_request + assert jsonified_request["displayName"] == "display_name_value" client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -11819,7 +13123,7 @@ def test_set_iam_policy_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + return_value = instance.Instance() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -11828,10 +13132,10 @@ def test_set_iam_policy_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request + pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "put", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -11840,106 +13144,47 @@ def test_set_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.set_iam_policy(request) + response = client.update_instance(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_set_iam_policy_rest_unset_required_fields(): +def test_update_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "policy", - ) - ) - ) - + unset_fields = transport.update_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("displayName",))) -def test_set_iam_policy_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() +def test_partial_update_instance_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # get truthy value for each flattened field - mock_args = dict( - resource="resource_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - - client.set_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:setIamPolicy" - % client.transport._host, - args[1], - ) - - -def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", - ) - - -def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert ( - client._transport.test_iam_permissions in client._transport._wrapped_methods + # Ensure method has been cached + assert ( + client._transport.partial_update_instance + in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -11948,32 +13193,34 @@ def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.test_iam_permissions + client._transport.partial_update_instance ] = mock_rpc request = {} - client.test_iam_permissions(request) + client.partial_update_instance(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.test_iam_permissions(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.partial_update_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_test_iam_permissions_rest_required_fields( - request_type=iam_policy_pb2.TestIamPermissionsRequest, +def test_partial_update_instance_rest_required_fields( + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): transport_class = transports.BigtableInstanceAdminRestTransport request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" request = request_type(**request_init) - pb_request = request + pb_request = request_type.pb(request) jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -11982,24 +13229,19 @@ def test_test_iam_permissions_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) + ).partial_update_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["resource"] = "resource_value" - jsonified_request["permissions"] = "permissions_value" - unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) + ).partial_update_instance._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == "permissions_value" client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -12008,7 +13250,7 @@ def test_test_iam_permissions_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -12017,10 +13259,10 @@ def test_test_iam_permissions_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request + pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "patch", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -12028,37 +13270,37 @@ def test_test_iam_permissions_rest_required_fields( response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.test_iam_permissions(request) + response = client.partial_update_instance(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_test_iam_permissions_rest_unset_required_fields(): +def test_partial_update_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + unset_fields = transport.partial_update_instance._get_unset_required_fields({}) assert set(unset_fields) == ( - set(()) + set(("updateMask",)) & set( ( - "resource", - "permissions", + "instance", + "updateMask", ) ) ) -def test_test_iam_permissions_rest_flattened(): +def test_partial_update_instance_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -12067,15 +13309,15 @@ def test_test_iam_permissions_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = {"resource": "projects/sample1/instances/sample2"} + sample_request = {"instance": {"name": "projects/sample1/instances/sample2"}} # get truthy value for each flattened field mock_args = dict( - resource="resource_value", - permissions=["permissions_value"], + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) mock_args.update(sample_request) @@ -12085,21 +13327,21 @@ def test_test_iam_permissions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.test_iam_permissions(**mock_args) + client.partial_update_instance(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*}:testIamPermissions" - % client.transport._host, + "%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, args[1], ) -def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): +def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -12108,14 +13350,14 @@ def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], + client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_list_hot_tablets_rest_use_cached_wrapped_rpc(): +def test_delete_instance_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -12129,37 +13371,35 @@ def test_list_hot_tablets_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_hot_tablets in client._transport._wrapped_methods + assert client._transport.delete_instance in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.list_hot_tablets - ] = mock_rpc + client._transport._wrapped_methods[client._transport.delete_instance] = mock_rpc request = {} - client.list_hot_tablets(request) + client.delete_instance(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_hot_tablets(request) + client.delete_instance(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_hot_tablets_rest_required_fields( - request_type=bigtable_instance_admin.ListHotTabletsRequest, +def test_delete_instance_rest_required_fields( + request_type=bigtable_instance_admin.DeleteInstanceRequest, ): transport_class = transports.BigtableInstanceAdminRestTransport request_init = {} - request_init["parent"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -12170,30 +13410,21 @@ def test_list_hot_tablets_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_hot_tablets._get_unset_required_fields(jsonified_request) + ).delete_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_hot_tablets._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "end_time", - "page_size", - "page_token", - "start_time", - ) - ) + ).delete_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -12202,7 +13433,7 @@ def test_list_hot_tablets_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -12214,50 +13445,36 @@ def test_list_hot_tablets_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "delete", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_hot_tablets(request) + response = client.delete_instance(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_hot_tablets_rest_unset_required_fields(): +def test_delete_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "endTime", - "pageSize", - "pageToken", - "startTime", - ) - ) - & set(("parent",)) - ) + unset_fields = transport.delete_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_list_hot_tablets_rest_flattened(): +def test_delete_instance_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -12266,42 +13483,37 @@ def test_list_hot_tablets_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() + return_value = None # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + sample_request = {"name": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( - parent="parent_value", + name="name_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_hot_tablets(**mock_args) + client.delete_instance(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" - % client.transport._host, - args[1], + "%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1] ) -def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): +def test_delete_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -12310,1246 +13522,7791 @@ def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent="parent_value", + client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), + name="name_value", ) -def test_list_hot_tablets_rest_pager(transport: str = "rest"): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) +def test_create_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token="abc", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token="def", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token="ghi", - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Wrap the values into proper Response objs - response = tuple( - bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response + # Ensure method has been cached + assert client._transport.create_cluster in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values + client._transport._wrapped_methods[client._transport.create_cluster] = mock_rpc - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + request = {} + client.create_cluster(request) - pager = client.list_hot_tablets(request=sample_request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.HotTablet) for i in results) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - pages = list(client.list_hot_tablets(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + client.create_cluster(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) +def test_create_cluster_rest_required_fields( + request_type=bigtable_instance_admin.CreateClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + request_init = {} + request_init["parent"] = "" + request_init["cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, - transport=transport, - ) - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) + # verify fields with default values are dropped + assert "clusterId" not in jsonified_request - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == request_init["cluster_id"] + jsonified_request["parent"] = "parent_value" + jsonified_request["clusterId"] = "cluster_id_value" -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = BigtableInstanceAdminClient(transport=transport) - assert client.transport is transport + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("cluster_id",)) + jsonified_request.update(unset_fields) + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == "cluster_id_value" -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( + client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - channel = transport.grpc_channel - assert channel + request = request_type(**request_init) - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - transports.BigtableInstanceAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_cluster(request) -def test_transport_kind_grpc(): - transport = BigtableInstanceAdminClient.get_transport_class("grpc")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "grpc" + expected_params = [ + ( + "clusterId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params -def test_initialize_client_w_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc" +def test_create_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - assert client is not None - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + unset_fields = transport.create_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("clusterId",)) + & set( + ( + "parent", + "clusterId", + "cluster", + ) + ) ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_instance(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateInstanceRequest() - - assert args[0] == request_msg - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_instance_empty_call_grpc(): +def test_create_cluster_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - call.return_value = instance.Instance() - client.get_instance(request=None) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetInstanceRequest() + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} - assert args[0] == request_msg - - -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_instances_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) + mock_args.update(sample_request) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - call.return_value = bigtable_instance_admin.ListInstancesResponse() - client.list_instances(request=None) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListInstancesRequest() + client.create_cluster(**mock_args) - assert args[0] == request_msg + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_instance_empty_call_grpc(): +def test_create_cluster_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - call.return_value = instance.Instance() - client.update_instance(request=None) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent="parent_value", + cluster_id="cluster_id_value", + cluster=instance.Cluster(name="name_value"), + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Instance() - assert args[0] == request_msg +def test_get_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_partial_update_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Ensure method has been cached + assert client._transport.get_cluster in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.partial_update_instance(request=None) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_cluster] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + request = {} + client.get_cluster(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.get_cluster(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_instance_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - call.return_value = None - client.delete_instance(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteInstanceRequest() +def test_get_cluster_rest_required_fields( + request_type=bigtable_instance_admin.GetClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport - assert args[0] == request_msg + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + # verify fields with default values are dropped -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_cluster_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_cluster(request=None) + # verify required fields with default values are now present - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateClusterRequest() + jsonified_request["name"] = "name_value" - assert args[0] == request_msg + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_cluster_empty_call_grpc(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - call.return_value = instance.Cluster() - client.get_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetClusterRequest() - - assert args[0] == request_msg + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + response_value = Response() + response_value.status_code = 200 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_clusters_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - call.return_value = bigtable_instance_admin.ListClustersResponse() - client.list_clusters(request=None) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListClustersRequest() + response = client.get_cluster(request) - assert args[0] == request_msg + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_cluster_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", +def test_get_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_cluster(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Cluster() - - assert args[0] == request_msg + unset_fields = transport.get_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_partial_update_cluster_empty_call_grpc(): +def test_get_cluster_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.partial_update_cluster(request=None) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} - assert args[0] == request_msg + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_cluster_empty_call_grpc(): + client.get_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, + args[1], + ) + + +def test_get_cluster_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - call.return_value = None - client.delete_cluster(request=None) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), + name="name_value", + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteClusterRequest() - assert args[0] == request_msg +def test_list_clusters_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_app_profile_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Ensure method has been cached + assert client._transport.list_clusters in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - call.return_value = instance.AppProfile() - client.create_app_profile(request=None) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_clusters] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateAppProfileRequest() + request = {} + client.list_clusters(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.list_clusters(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_app_profile_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - call.return_value = instance.AppProfile() - client.get_app_profile(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetAppProfileRequest() +def test_list_clusters_rest_required_fields( + request_type=bigtable_instance_admin.ListClustersRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport - assert args[0] == request_msg + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + # verify fields with default values are dropped -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_app_profiles_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - client.list_app_profiles(request=None) + # verify required fields with default values are now present - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListAppProfilesRequest() + jsonified_request["parent"] = "parent_value" - assert args[0] == request_msg + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token",)) + jsonified_request.update(unset_fields) + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_app_profile_empty_call_grpc(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_app_profile(request=None) + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + response_value = Response() + response_value.status_code = 200 - assert args[0] == request_msg + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_app_profile_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + response = client.list_clusters(request) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - call.return_value = None - client.delete_app_profile(request=None) + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteAppProfileRequest() - assert args[0] == request_msg +def test_list_clusters_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + unset_fields = transport.list_clusters._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken",)) & set(("parent",))) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_iam_policy_empty_call_grpc(): + +def test_list_clusters_rest_flattened(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() - - assert args[0] == request_msg + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_set_iam_policy_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request=None) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() + client.list_clusters(**mock_args) - assert args[0] == request_msg + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_test_iam_permissions_empty_call_grpc(): +def test_list_clusters_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request=None) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), + parent="parent_value", + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - assert args[0] == request_msg +def test_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_hot_tablets_empty_call_grpc(): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Ensure method has been cached + assert client._transport.update_cluster in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - client.list_hot_tablets(request=None) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_cluster] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListHotTabletsRequest() + request = {} + client.update_cluster(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() -def test_transport_kind_grpc_asyncio(): - transport = BigtableInstanceAdminAsyncClient.get_transport_class("grpc_asyncio")( - credentials=async_anonymous_credentials() - ) - assert transport.kind == "grpc_asyncio" + client.update_cluster(request) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 -def test_initialize_client_w_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), transport="grpc_asyncio" - ) - assert client is not None +def test_partial_update_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Ensure method has been cached + assert ( + client._transport.partial_update_cluster + in client._transport._wrapped_methods ) - await client.create_instance(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateInstanceRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.partial_update_cluster + ] = mock_rpc - assert args[0] == request_msg + request = {} + client.partial_update_cluster(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - ) - await client.get_instance(request=None) + client.partial_update_cluster(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetInstanceRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - assert args[0] == request_msg +def test_partial_update_cluster_rest_required_fields( + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_instances_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_instances), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - await client.list_instances(request=None) + # verify fields with default values are dropped - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListInstancesRequest() + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - assert args[0] == request_msg + # verify required fields with default values are now present + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).partial_update_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # verify required fields with non-default values are left alone - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - ) - await client.update_instance(request=None) + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Instance() + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - assert args[0] == request_msg + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_partial_update_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + response = client.partial_update_cluster(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_partial_update_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "cluster", + "updateMask", + ) ) - await client.partial_update_instance(request=None) + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() - assert args[0] == request_msg +def test_partial_update_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_instance_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # get arguments that satisfy an http rule for this method + sample_request = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_instance(request=None) + # get truthy value for each flattened field + mock_args = dict( + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteInstanceRequest() + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - assert args[0] == request_msg + client.partial_update_cluster(**mock_args) + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" + % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + +def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - await client.create_cluster(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateClusterRequest() - assert args[0] == request_msg +def test_delete_cluster_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Ensure method has been cached + assert client._transport.delete_cluster in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - await client.get_cluster(request=None) + client._transport._wrapped_methods[client._transport.delete_cluster] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetClusterRequest() + request = {} + client.delete_cluster(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.delete_cluster(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_clusters_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) - ) - await client.list_clusters(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListClustersRequest() +def test_delete_cluster_rest_required_fields( + request_type=bigtable_instance_admin.DeleteClusterRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport - assert args[0] == request_msg + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + # verify fields with default values are dropped -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_cluster(request=None) + # verify required fields with default values are now present - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = instance.Cluster() + jsonified_request["name"] = "name_value" - assert args[0] == request_msg + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_partial_update_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.partial_update_cluster(request=None) + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + response_value = Response() + response_value.status_code = 200 + json_return_value = "" - assert args[0] == request_msg + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_cluster(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_cluster_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_cluster(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteClusterRequest() +def test_delete_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) - assert args[0] == request_msg + unset_fields = transport.delete_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_delete_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", ) - await client.create_app_profile(request=None) + mock_args.update(sample_request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.CreateAppProfileRequest() + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - assert args[0] == request_msg + client.delete_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_delete_cluster_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), + name="name_value", ) - await client.get_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.GetAppProfileRequest() - assert args[0] == request_msg +def test_create_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_app_profiles_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListAppProfilesResponse( - next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], - ) + # Ensure method has been cached + assert ( + client._transport.create_app_profile in client._transport._wrapped_methods ) - await client.list_app_profiles(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListAppProfilesRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_app_profile + ] = mock_rpc - assert args[0] == request_msg + request = {} + client.create_app_profile(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + client.create_app_profile(request) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_app_profile(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - assert args[0] == request_msg +def test_create_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.CreateAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_app_profile_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + request_init = {} + request_init["parent"] = "" + request_init["app_profile_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_app_profile(request=None) + # verify fields with default values are dropped + assert "appProfileId" not in jsonified_request - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - assert args[0] == request_msg + # verify required fields with default values are now present + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == request_init["app_profile_id"] + jsonified_request["parent"] = "parent_value" + jsonified_request["appProfileId"] = "app_profile_id_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_iam_policy_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "app_profile_id", + "ignore_warnings", + ) ) + jsonified_request.update(unset_fields) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - await client.get_iam_policy(request=None) + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == "app_profile_id_value" - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) - assert args[0] == request_msg + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + response_value = Response() + response_value.status_code = 200 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_set_iam_policy_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) - ) - await client.set_iam_policy(request=None) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() + response = client.create_app_profile(request) - assert args[0] == request_msg + expected_params = [ + ( + "appProfileId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_test_iam_permissions_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_create_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], + unset_fields = transport.create_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "appProfileId", + "ignoreWarnings", ) ) - await client.test_iam_permissions(request=None) + & set( + ( + "parent", + "appProfileId", + "appProfile", + ) + ) + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() - assert args[0] == request_msg +def test_create_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_hot_tablets_empty_call_grpc_asyncio(): - client = BigtableInstanceAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), ) - await client.list_hot_tablets(request=None) + mock_args.update(sample_request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_instance_admin.ListHotTabletsRequest() + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - assert args[0] == request_msg + client.create_app_profile(**mock_args) + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/appProfiles" + % client.transport._host, + args[1], + ) -def test_transport_kind_rest(): - transport = BigtableInstanceAdminClient.get_transport_class("rest")( - credentials=ga_credentials.AnonymousCredentials() - ) + +def test_create_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=instance.AppProfile(name="name_value"), + ) + + +def test_get_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_app_profile in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_app_profile] = mock_rpc + + request = {} + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.GetAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_app_profile(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_get_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), + name="name_value", + ) + + +def test_list_app_profiles_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_app_profiles in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_app_profiles + ] = mock_rpc + + request = {} + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_app_profiles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_app_profiles_rest_required_fields( + request_type=bigtable_instance_admin.ListAppProfilesRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_app_profiles._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_app_profiles._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_app_profiles(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_app_profiles_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_app_profiles._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_app_profiles_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_app_profiles(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/appProfiles" + % client.transport._host, + args[1], + ) + + +def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), + parent="parent_value", + ) + + +def test_list_app_profiles_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token="def", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_app_profiles(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.AppProfile) for i in results) + + pages = list(client.list_app_profiles(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_app_profile + ] = mock_rpc + + request = {} + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.UpdateAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_app_profile(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set( + ( + "appProfile", + "updateMask", + ) + ) + ) + + +def test_update_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_update_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_app_profile_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_app_profile in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_app_profile + ] = mock_rpc + + request = {} + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_app_profile(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_app_profile_rest_required_fields( + request_type=bigtable_instance_admin.DeleteAppProfileRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["ignore_warnings"] = False + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "ignoreWarnings" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] + + jsonified_request["name"] = "name_value" + jsonified_request["ignoreWarnings"] = True + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("ignore_warnings",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == True + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_app_profile(request) + + expected_params = [ + ( + "ignoreWarnings", + str(False).lower(), + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("ignoreWarnings",)) + & set( + ( + "name", + "ignoreWarnings", + ) + ) + ) + + +def test_delete_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ignore_warnings=True, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/appProfiles/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), + name="name_value", + ignore_warnings=True, + ) + + +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + + request = {} + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource",))) + + +def test_get_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:getIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + + request = {} + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.set_iam_policy(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_set_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.set_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "policy", + ) + ) + ) + + +def test_set_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:setIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_test_iam_permissions_rest_required_fields( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + jsonified_request["permissions"] = "permissions_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == "permissions_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.test_iam_permissions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "permissions", + ) + ) + ) + + +def test_test_iam_permissions_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + permissions=["permissions_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*}:testIamPermissions" + % client.transport._host, + args[1], + ) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_list_hot_tablets_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.list_hot_tablets in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_hot_tablets + ] = mock_rpc + + request = {} + client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_hot_tablets(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_hot_tablets_rest_required_fields( + request_type=bigtable_instance_admin.ListHotTabletsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_hot_tablets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_hot_tablets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "end_time", + "page_size", + "page_token", + "start_time", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_hot_tablets(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_hot_tablets_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "endTime", + "pageSize", + "pageToken", + "startTime", + ) + ) + & set(("parent",)) + ) + + +def test_list_hot_tablets_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_hot_tablets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" + % client.transport._host, + args[1], + ) + + +def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), + parent="parent_value", + ) + + +def test_list_hot_tablets_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token="def", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_hot_tablets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.HotTablet) for i in results) + + pages = list(client.list_hot_tablets(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_create_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_logical_view + ] = mock_rpc + + request = {} + client.create_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.CreateLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["logical_view_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "logicalViewId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "logicalViewId" in jsonified_request + assert jsonified_request["logicalViewId"] == request_init["logical_view_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["logicalViewId"] = "logical_view_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_logical_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("logical_view_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "logicalViewId" in jsonified_request + assert jsonified_request["logicalViewId"] == "logical_view_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_logical_view(request) + + expected_params = [ + ( + "logicalViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("logicalViewId",)) + & set( + ( + "parent", + "logicalViewId", + "logicalView", + ) + ) + ) + + +def test_create_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/logicalViews" + % client.transport._host, + args[1], + ) + + +def test_create_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_logical_view( + bigtable_instance_admin.CreateLogicalViewRequest(), + parent="parent_value", + logical_view=instance.LogicalView(name="name_value"), + logical_view_id="logical_view_id_value", + ) + + +def test_get_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_logical_view in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_logical_view + ] = mock_rpc + + request = {} + client.get_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.GetLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.LogicalView() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.LogicalView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_logical_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.LogicalView() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.LogicalView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/logicalViews/*}" + % client.transport._host, + args[1], + ) + + +def test_get_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_logical_view( + bigtable_instance_admin.GetLogicalViewRequest(), + name="name_value", + ) + + +def test_list_logical_views_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_logical_views in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_logical_views + ] = mock_rpc + + request = {} + client.list_logical_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_logical_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_logical_views_rest_required_fields( + request_type=bigtable_instance_admin.ListLogicalViewsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_logical_views._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_logical_views._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListLogicalViewsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_logical_views(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_logical_views_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_logical_views._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_logical_views_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListLogicalViewsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_logical_views(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/logicalViews" + % client.transport._host, + args[1], + ) + + +def test_list_logical_views_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_logical_views( + bigtable_instance_admin.ListLogicalViewsRequest(), + parent="parent_value", + ) + + +def test_list_logical_views_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + instance.LogicalView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListLogicalViewsResponse( + logical_views=[ + instance.LogicalView(), + instance.LogicalView(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListLogicalViewsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_logical_views(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.LogicalView) for i in results) + + pages = list(client.list_logical_views(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_logical_view + ] = mock_rpc + + request = {} + client.update_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.UpdateLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_logical_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_logical_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask",)) & set(("logicalView",))) + + +def test_update_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "logical_view": { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{logical_view.name=projects/*/instances/*/logicalViews/*}" + % client.transport._host, + args[1], + ) + + +def test_update_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_logical_view( + bigtable_instance_admin.UpdateLogicalViewRequest(), + logical_view=instance.LogicalView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_logical_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_logical_view in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_logical_view + ] = mock_rpc + + request = {} + client.delete_logical_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_logical_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_logical_view_rest_required_fields( + request_type=bigtable_instance_admin.DeleteLogicalViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_logical_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_logical_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_logical_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_logical_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_logical_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) + + +def test_delete_logical_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_logical_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/logicalViews/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_logical_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_logical_view( + bigtable_instance_admin.DeleteLogicalViewRequest(), + name="name_value", + ) + + +def test_create_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_materialized_view + ] = mock_rpc + + request = {} + client.create_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.CreateMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["materialized_view_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "materializedViewId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "materializedViewId" in jsonified_request + assert ( + jsonified_request["materializedViewId"] == request_init["materialized_view_id"] + ) + + jsonified_request["parent"] = "parent_value" + jsonified_request["materializedViewId"] = "materialized_view_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_materialized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("materialized_view_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "materializedViewId" in jsonified_request + assert jsonified_request["materializedViewId"] == "materialized_view_id_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_materialized_view(request) + + expected_params = [ + ( + "materializedViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("materializedViewId",)) + & set( + ( + "parent", + "materializedViewId", + "materializedView", + ) + ) + ) + + +def test_create_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/materializedViews" + % client.transport._host, + args[1], + ) + + +def test_create_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_materialized_view( + bigtable_instance_admin.CreateMaterializedViewRequest(), + parent="parent_value", + materialized_view=instance.MaterializedView(name="name_value"), + materialized_view_id="materialized_view_id_value", + ) + + +def test_get_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_materialized_view + ] = mock_rpc + + request = {} + client.get_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.GetMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.MaterializedView() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.MaterializedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_materialized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.MaterializedView() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.MaterializedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/materializedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_get_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_materialized_view( + bigtable_instance_admin.GetMaterializedViewRequest(), + name="name_value", + ) + + +def test_list_materialized_views_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_materialized_views + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_materialized_views + ] = mock_rpc + + request = {} + client.list_materialized_views(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_materialized_views(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_materialized_views_rest_required_fields( + request_type=bigtable_instance_admin.ListMaterializedViewsRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_materialized_views._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_materialized_views._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_materialized_views(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_materialized_views_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_materialized_views._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_materialized_views_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_materialized_views(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*}/materializedViews" + % client.transport._host, + args[1], + ) + + +def test_list_materialized_views_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_materialized_views( + bigtable_instance_admin.ListMaterializedViewsRequest(), + parent="parent_value", + ) + + +def test_list_materialized_views_rest_pager(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + instance.MaterializedView(), + ], + next_page_token="abc", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[], + next_page_token="def", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + ], + next_page_token="ghi", + ), + bigtable_instance_admin.ListMaterializedViewsResponse( + materialized_views=[ + instance.MaterializedView(), + instance.MaterializedView(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_instance_admin.ListMaterializedViewsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_materialized_views(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.MaterializedView) for i in results) + + pages = list(client.list_materialized_views(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_update_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_materialized_view + ] = mock_rpc + + request = {} + client.update_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_materialized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_materialized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask",)) & set(("materializedView",))) + + +def test_update_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "materialized_view": { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{materialized_view.name=projects/*/instances/*/materializedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_update_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_materialized_view( + bigtable_instance_admin.UpdateMaterializedViewRequest(), + materialized_view=instance.MaterializedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_materialized_view_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_materialized_view + in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_materialized_view + ] = mock_rpc + + request = {} + client.delete_materialized_view(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_materialized_view(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_materialized_view_rest_required_fields( + request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, +): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_materialized_view._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_materialized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_materialized_view(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_materialized_view_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_materialized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) + + +def test_delete_materialized_view_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_materialized_view(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/materializedViews/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_materialized_view_rest_flattened_error(transport: str = "rest"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_materialized_view( + bigtable_instance_admin.DeleteMaterializedViewRequest(), + name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableInstanceAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + transports.BigtableInstanceAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = BigtableInstanceAdminClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + call.return_value = instance.Instance() + client.get_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_instances_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + call.return_value = bigtable_instance_admin.ListInstancesResponse() + client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + call.return_value = instance.Instance() + client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_instance_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + call.return_value = None + client.delete_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + call.return_value = instance.Cluster() + client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_clusters_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + call.return_value = bigtable_instance_admin.ListClustersResponse() + client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_partial_update_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_cluster_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + call.return_value = None + client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + call.return_value = instance.AppProfile() + client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + call.return_value = instance.AppProfile() + client.get_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_app_profiles_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + client.list_app_profiles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_app_profile_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + call.return_value = None + client.delete_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_hot_tablets_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + client.list_hot_tablets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + call.return_value = instance.LogicalView() + client.get_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_logical_views_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + client.list_logical_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListLogicalViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_logical_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + call.return_value = None + client.delete_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + call.return_value = instance.MaterializedView() + client.get_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_materialized_views_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + call.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + client.list_materialized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_materialized_view_empty_call_grpc(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + call.return_value = None + client.delete_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = BigtableInstanceAdminAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + satisfies_pzi=True, + ) + ) + await client.get_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_instances_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_instances), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_instances(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListInstancesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + satisfies_pzi=True, + ) + ) + await client.update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Instance() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_partial_update_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.partial_update_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_instance_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_instance), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_instance(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteInstanceRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, + default_storage_type=common.StorageType.SSD, + ) + ) + await client.get_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_clusters_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + ) + await client.list_clusters(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListClustersRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = instance.Cluster() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_partial_update_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.partial_update_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.PartialUpdateClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_cluster_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_cluster(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteClusterRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + await client.create_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_app_profile), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + ) + ) + await client.get_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_app_profiles_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) + ) + await client.list_app_profiles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListAppProfilesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_app_profile_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_app_profile(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteAppProfileRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_hot_tablets_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_hot_tablets), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_hot_tablets(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListHotTabletsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.LogicalView( + name="name_value", + query="query_value", + etag="etag_value", + ) + ) + await client.get_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_logical_views_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListLogicalViewsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_logical_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListLogicalViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_logical_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, + ) + ) + await client.get_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_materialized_views_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_materialized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_materialized_view_empty_call_grpc_asyncio(): + client = BigtableInstanceAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = BigtableInstanceAdminClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) assert transport.kind == "rest" -def test_create_instance_rest_bad_request( - request_type=bigtable_instance_admin.CreateInstanceRequest, +def test_create_instance_rest_bad_request( + request_type=bigtable_instance_admin.CreateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateInstanceRequest, + dict, + ], +) +def test_create_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_instance(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_create_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( + bigtable_instance_admin.CreateInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.CreateInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_instance_rest_bad_request( + request_type=bigtable_instance_admin.GetInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetInstanceRequest, + dict, + ], +) +def test_get_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + satisfies_pzi=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_get_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetInstanceRequest.pb( + bigtable_instance_admin.GetInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.Instance.to_json(instance.Instance()) + req.return_value.content = return_value + + request = bigtable_instance_admin.GetInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + post_with_metadata.return_value = instance.Instance(), metadata + + client.get_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_instances_rest_bad_request( + request_type=bigtable_instance_admin.ListInstancesRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_instances(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.ListInstancesRequest, + dict, + ], +) +def test_list_instances_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_instances(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_instances_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_instances_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListInstancesRequest.pb( + bigtable_instance_admin.ListInstancesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListInstancesResponse.to_json( + bigtable_instance_admin.ListInstancesResponse() + ) + req.return_value.content = return_value + + request = bigtable_instance_admin.ListInstancesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListInstancesResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListInstancesResponse(), + metadata, + ) + + client.list_instances( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_update_instance_rest_bad_request(request_type=instance.Instance): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + instance.Instance, + dict, + ], +) +def test_update_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name="name_value", + display_name="display_name_value", + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + satisfies_pzi=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + assert response.satisfies_pzi is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_update_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = instance.Instance.pb(instance.Instance()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.Instance.to_json(instance.Instance()) + req.return_value.content = return_value + + request = instance.Instance() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + post_with_metadata.return_value = instance.Instance(), metadata + + client.update_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_partial_update_instance_rest_bad_request( + request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.partial_update_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.PartialUpdateInstanceRequest, + dict, + ], +) +def test_partial_update_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} + request_init["instance"] = { + "name": "projects/sample1/instances/sample2", + "display_name": "display_name_value", + "state": 1, + "type_": 1, + "labels": {}, + "create_time": {"seconds": 751, "nanos": 543}, + "satisfies_pzs": True, + "satisfies_pzi": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ + "instance" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["instance"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["instance"][field])): + del request_init["instance"][field][i][subfield] + else: + del request_init["instance"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.partial_update_instance(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_partial_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_partial_update_instance_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( + bigtable_instance_admin.PartialUpdateInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.partial_update_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_delete_instance_rest_bad_request( + request_type=bigtable_instance_admin.DeleteInstanceRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_instance(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteInstanceRequest, + dict, + ], +) +def test_delete_instance_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_instance(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( + bigtable_instance_admin.DeleteInstanceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + request = bigtable_instance_admin.DeleteInstanceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_instance( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_create_cluster_rest_bad_request( + request_type=bigtable_instance_admin.CreateClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateClusterRequest, + dict, + ], +) +def test_create_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["cluster"] = { + "name": "name_value", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "node_scaling_factor": 1, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_cluster(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_create_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateClusterRequest.pb( + bigtable_instance_admin.CreateClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_instance_admin.CreateClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_cluster_rest_bad_request( + request_type=bigtable_instance_admin.GetClusterRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_cluster(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetClusterRequest, + dict, + ], +) +def test_get_cluster_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster( + name="name_value", + location="location_value", + state=instance.Cluster.State.READY, + serve_nodes=1181, + node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, + default_storage_type=common.StorageType.SSD, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + assert response.name == "name_value" + assert response.location == "location_value" + assert response.state == instance.Cluster.State.READY + assert response.serve_nodes == 1181 + assert ( + response.node_scaling_factor + == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X + ) + assert response.default_storage_type == common.StorageType.SSD + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_get_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetClusterRequest.pb( + bigtable_instance_admin.GetClusterRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.Cluster.to_json(instance.Cluster()) + req.return_value.content = return_value + + request = bigtable_instance_admin.GetClusterRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Cluster() + post_with_metadata.return_value = instance.Cluster(), metadata + + client.get_cluster( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_clusters_rest_bad_request( + request_type=bigtable_instance_admin.ListClustersRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -13563,44 +21320,56 @@ def test_create_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.create_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_clusters(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.CreateInstanceRequest, + bigtable_instance_admin.ListClustersRequest, dict, ], ) -def test_create_instance_rest_call_success(request_type): +def test_list_clusters_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=["failed_locations_value"], + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_clusters(request) + + assert response.raw_page is response # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + assert response.failed_locations == ["failed_locations_value"] + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_instance_rest_interceptors(null_interceptor): +def test_list_clusters_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -13614,16 +21383,18 @@ def test_create_instance_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_instance" + transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance" + transports.BigtableInstanceAdminRestInterceptor, + "post_list_clusters_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.CreateInstanceRequest.pb( - bigtable_instance_admin.CreateInstanceRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListClustersRequest.pb( + bigtable_instance_admin.ListClustersRequest() ) transcode.return_value = { "method": "post", @@ -13634,18 +21405,25 @@ def test_create_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListClustersResponse.to_json( + bigtable_instance_admin.ListClustersResponse() + ) req.return_value.content = return_value - request = bigtable_instance_admin.CreateInstanceRequest() + request = bigtable_instance_admin.ListClustersRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = bigtable_instance_admin.ListClustersResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListClustersResponse(), + metadata, + ) - client.create_instance( + client.list_clusters( request, metadata=[ ("key", "val"), @@ -13655,16 +21433,15 @@ def test_create_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_instance_rest_bad_request( - request_type=bigtable_instance_admin.GetInstanceRequest, -): +def test_update_cluster_rest_bad_request(request_type=instance.Cluster): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -13678,58 +21455,46 @@ def test_get_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.get_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_cluster(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.GetInstanceRequest, + instance.Cluster, dict, ], ) -def test_get_instance_rest_call_success(request_type): +def test_update_cluster_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_cluster(request) # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_instance_rest_interceptors(null_interceptor): +def test_update_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -13743,15 +21508,19 @@ def test_get_instance_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_instance" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance" + transports.BigtableInstanceAdminRestInterceptor, + "post_update_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.GetInstanceRequest.pb( - bigtable_instance_admin.GetInstanceRequest() - ) + post_with_metadata.assert_not_called() + pb_message = instance.Cluster.pb(instance.Cluster()) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -13761,18 +21530,20 @@ def test_get_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = instance.Instance.to_json(instance.Instance()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_instance_admin.GetInstanceRequest() + request = instance.Cluster() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.Instance() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.get_instance( + client.update_cluster( request, metadata=[ ("key", "val"), @@ -13782,16 +21553,19 @@ def test_get_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_instances_rest_bad_request( - request_type=bigtable_instance_admin.ListInstancesRequest, +def test_partial_update_cluster_rest_bad_request( + request_type=bigtable_instance_admin.PartialUpdateClusterRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -13805,54 +21579,138 @@ def test_list_instances_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.list_instances(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.partial_update_cluster(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.ListInstancesRequest, + bigtable_instance_admin.PartialUpdateClusterRequest, dict, ], ) -def test_list_instances_rest_call_success(request_type): +def test_partial_update_cluster_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1"} + request_init = { + "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} + } + request_init["cluster"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3", + "location": "location_value", + "state": 1, + "serve_nodes": 1181, + "node_scaling_factor": 1, + "cluster_config": { + "cluster_autoscaling_config": { + "autoscaling_limits": { + "min_serve_nodes": 1600, + "max_serve_nodes": 1602, + }, + "autoscaling_targets": { + "cpu_utilization_percent": 2483, + "storage_utilization_gib_per_node": 3404, + }, + } + }, + "default_storage_type": 1, + "encryption_config": {"kms_key_name": "kms_key_name_value"}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ + "cluster" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_instances(request) - - assert response.raw_page is response + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.partial_update_cluster(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_instances_rest_interceptors(null_interceptor): +def test_partial_update_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -13866,14 +21724,20 @@ def test_list_instances_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_instances" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances" + transports.BigtableInstanceAdminRestInterceptor, + "post_partial_update_cluster_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.ListInstancesRequest.pb( - bigtable_instance_admin.ListInstancesRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( + bigtable_instance_admin.PartialUpdateClusterRequest() ) transcode.return_value = { "method": "post", @@ -13884,20 +21748,20 @@ def test_list_instances_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = bigtable_instance_admin.ListInstancesResponse.to_json( - bigtable_instance_admin.ListInstancesResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_instance_admin.ListInstancesRequest() + request = bigtable_instance_admin.PartialUpdateClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListInstancesResponse() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.list_instances( + client.partial_update_cluster( request, metadata=[ ("key", "val"), @@ -13907,14 +21771,17 @@ def test_list_instances_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_update_instance_rest_bad_request(request_type=instance.Instance): +def test_delete_cluster_rest_bad_request( + request_type=bigtable_instance_admin.DeleteClusterRequest, +): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -13928,58 +21795,46 @@ def test_update_instance_rest_bad_request(request_type=instance.Instance): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.update_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_cluster(request) @pytest.mark.parametrize( "request_type", [ - instance.Instance, + bigtable_instance_admin.DeleteClusterRequest, dict, ], ) -def test_update_instance_rest_call_success(request_type): +def test_delete_cluster_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name="name_value", - display_name="display_name_value", - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_cluster(request) # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == "name_value" - assert response.display_name == "display_name_value" - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_instance_rest_interceptors(null_interceptor): +def test_delete_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -13993,13 +21848,12 @@ def test_update_instance_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_instance" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance" + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = instance.Instance.pb(instance.Instance()) + pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( + bigtable_instance_admin.DeleteClusterRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -14009,18 +21863,16 @@ def test_update_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = instance.Instance.to_json(instance.Instance()) - req.return_value.content = return_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = instance.Instance() + request = bigtable_instance_admin.DeleteClusterRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.Instance() - client.update_instance( + client.delete_cluster( request, metadata=[ ("key", "val"), @@ -14029,17 +21881,16 @@ def test_update_instance_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_partial_update_instance_rest_bad_request( - request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, +def test_create_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -14053,39 +21904,47 @@ def test_partial_update_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.partial_update_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_app_profile(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.PartialUpdateInstanceRequest, + bigtable_instance_admin.CreateAppProfileRequest, dict, ], ) -def test_partial_update_instance_rest_call_success(request_type): +def test_create_app_profile_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request_init["instance"] = { - "name": "projects/sample1/instances/sample2", - "display_name": "display_name_value", - "state": 1, - "type_": 1, - "labels": {}, - "create_time": {"seconds": 751, "nanos": 543}, - "satisfies_pzs": True, + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["app_profile"] = { + "name": "name_value", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, + }, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + "priority": 1, + "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ - "instance" + test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ + "app_profile" ] def get_message_fields(field): @@ -14114,7 +21973,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["instance"].items(): # pragma: NO COVER + for field, value in request_init["app_profile"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -14144,31 +22003,43 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range(0, len(request_init["instance"][field])): - del request_init["instance"][field][i][subfield] + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] else: - del request_init["instance"][field][subfield] + del request_init["app_profile"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.partial_update_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_app_profile(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_instance_rest_interceptors(null_interceptor): +def test_create_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -14182,16 +22053,18 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance" + transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance" + transports.BigtableInstanceAdminRestInterceptor, + "post_create_app_profile_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb( - bigtable_instance_admin.PartialUpdateInstanceRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( + bigtable_instance_admin.CreateAppProfileRequest() ) transcode.return_value = { "method": "post", @@ -14202,18 +22075,153 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.content = return_value + + request = bigtable_instance_admin.CreateAppProfileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.AppProfile() + post_with_metadata.return_value = instance.AppProfile(), metadata + + client.create_app_profile( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.GetAppProfileRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_app_profile(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.GetAppProfileRequest, + dict, + ], +) +def test_get_app_profile_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name="name_value", + etag="etag_value", + description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == "name_value" + assert response.etag == "etag_value" + assert response.description == "description_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_get_app_profile_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( + bigtable_instance_admin.GetAppProfileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.AppProfile.to_json(instance.AppProfile()) req.return_value.content = return_value - request = bigtable_instance_admin.PartialUpdateInstanceRequest() + request = bigtable_instance_admin.GetAppProfileRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = instance.AppProfile() + post_with_metadata.return_value = instance.AppProfile(), metadata - client.partial_update_instance( + client.get_app_profile( request, metadata=[ ("key", "val"), @@ -14223,16 +22231,17 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_instance_rest_bad_request( - request_type=bigtable_instance_admin.DeleteInstanceRequest, +def test_list_app_profiles_rest_bad_request( + request_type=bigtable_instance_admin.ListAppProfilesRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -14246,44 +22255,54 @@ def test_delete_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.delete_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_app_profiles(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.DeleteInstanceRequest, + bigtable_instance_admin.ListAppProfilesRequest, dict, ], ) -def test_delete_instance_rest_call_success(request_type): +def test_list_app_profiles_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token="next_page_token_value", + failed_locations=["failed_locations_value"], + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_instance(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_app_profiles(request) # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, pagers.ListAppProfilesPager) + assert response.next_page_token == "next_page_token_value" + assert response.failed_locations == ["failed_locations_value"] @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_instance_rest_interceptors(null_interceptor): +def test_list_app_profiles_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -14297,11 +22316,18 @@ def test_delete_instance_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance" + transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_app_profiles_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" ) as pre: pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb( - bigtable_instance_admin.DeleteInstanceRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( + bigtable_instance_admin.ListAppProfilesRequest() ) transcode.return_value = { "method": "post", @@ -14312,15 +22338,25 @@ def test_delete_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListAppProfilesResponse.to_json( + bigtable_instance_admin.ListAppProfilesResponse() + ) + req.return_value.content = return_value - request = bigtable_instance_admin.DeleteInstanceRequest() + request = bigtable_instance_admin.ListAppProfilesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListAppProfilesResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListAppProfilesResponse(), + metadata, + ) - client.delete_instance( + client.list_app_profiles( request, metadata=[ ("key", "val"), @@ -14329,16 +22365,22 @@ def test_delete_instance_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_create_cluster_rest_bad_request( - request_type=bigtable_instance_admin.CreateClusterRequest, +def test_update_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -14352,50 +22394,52 @@ def test_create_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.create_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_app_profile(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.CreateClusterRequest, + bigtable_instance_admin.UpdateAppProfileRequest, dict, ], ) -def test_create_cluster_rest_call_success(request_type): +def test_update_app_profile_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["cluster"] = { - "name": "name_value", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "node_scaling_factor": 1, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } + request_init = { + "app_profile": { + "name": "projects/sample1/instances/sample2/appProfiles/sample3" + } + } + request_init["app_profile"] = { + "name": "projects/sample1/instances/sample2/appProfiles/sample3", + "etag": "etag_value", + "description": "description_value", + "multi_cluster_routing_use_any": { + "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], + "row_affinity": {}, }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, + "single_cluster_routing": { + "cluster_id": "cluster_id_value", + "allow_transactional_writes": True, + }, + "priority": 1, + "standard_isolation": {"priority": 1}, + "data_boost_isolation_read_only": {"compute_billing_owner": 1}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ + "app_profile" + ] def get_message_fields(field): # Given a field which is a message (composite type), return a list with @@ -14423,7 +22467,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER + for field, value in request_init["app_profile"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -14453,10 +22497,10 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] else: - del request_init["cluster"][field][subfield] + del request_init["app_profile"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -14470,14 +22514,15 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_app_profile(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_cluster_rest_interceptors(null_interceptor): +def test_update_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -14493,14 +22538,18 @@ def test_create_cluster_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster" + transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster" + transports.BigtableInstanceAdminRestInterceptor, + "post_update_app_profile_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.CreateClusterRequest.pb( - bigtable_instance_admin.CreateClusterRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( + bigtable_instance_admin.UpdateAppProfileRequest() ) transcode.return_value = { "method": "post", @@ -14511,18 +22560,20 @@ def test_create_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_instance_admin.CreateClusterRequest() + request = bigtable_instance_admin.UpdateAppProfileRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_cluster( + client.update_app_profile( request, metadata=[ ("key", "val"), @@ -14532,16 +22583,17 @@ def test_create_cluster_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_cluster_rest_bad_request( - request_type=bigtable_instance_admin.GetClusterRequest, +def test_delete_app_profile_rest_bad_request( + request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -14555,63 +22607,46 @@ def test_get_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.get_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_app_profile(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.GetClusterRequest, + bigtable_instance_admin.DeleteAppProfileRequest, dict, ], ) -def test_get_cluster_rest_call_success(request_type): +def test_delete_app_profile_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.Cluster( - name="name_value", - location="location_value", - state=instance.Cluster.State.READY, - serve_nodes=1181, - node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X, - default_storage_type=common.StorageType.SSD, - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_app_profile(request) # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == "name_value" - assert response.location == "location_value" - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert ( - response.node_scaling_factor - == instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X - ) - assert response.default_storage_type == common.StorageType.SSD + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_cluster_rest_interceptors(null_interceptor): +def test_delete_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -14625,14 +22660,11 @@ def test_get_cluster_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster" + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetClusterRequest.pb( - bigtable_instance_admin.GetClusterRequest() + pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( + bigtable_instance_admin.DeleteAppProfileRequest() ) transcode.return_value = { "method": "post", @@ -14643,18 +22675,16 @@ def test_get_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = instance.Cluster.to_json(instance.Cluster()) - req.return_value.content = return_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = bigtable_instance_admin.GetClusterRequest() + request = bigtable_instance_admin.DeleteAppProfileRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.Cluster() - client.get_cluster( + client.delete_app_profile( request, metadata=[ ("key", "val"), @@ -14663,17 +22693,16 @@ def test_get_cluster_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() -def test_list_clusters_rest_bad_request( - request_type=bigtable_instance_admin.ListClustersRequest, +def test_get_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.GetIamPolicyRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -14687,54 +22716,51 @@ def test_list_clusters_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.list_clusters(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.ListClustersRequest, + iam_policy_pb2.GetIamPolicyRequest, dict, ], ) -def test_list_clusters_rest_call_success(request_type): +def test_get_iam_policy_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse( - failed_locations=["failed_locations_value"], - next_page_token="next_page_token_value", + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_clusters(request) - - assert response.raw_page is response + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_iam_policy(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ["failed_locations_value"] - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_clusters_rest_interceptors(null_interceptor): +def test_get_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -14748,15 +22774,17 @@ def test_list_clusters_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters" + transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters" + transports.BigtableInstanceAdminRestInterceptor, + "post_get_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.ListClustersRequest.pb( - bigtable_instance_admin.ListClustersRequest() - ) + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -14766,20 +22794,20 @@ def test_list_clusters_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = bigtable_instance_admin.ListClustersResponse.to_json( - bigtable_instance_admin.ListClustersResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value - request = bigtable_instance_admin.ListClustersRequest() + request = iam_policy_pb2.GetIamPolicyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListClustersResponse() + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata - client.list_clusters( + client.get_iam_policy( request, metadata=[ ("key", "val"), @@ -14789,14 +22817,17 @@ def test_list_clusters_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_update_cluster_rest_bad_request(request_type=instance.Cluster): +def test_set_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -14810,29 +22841,33 @@ def test_update_cluster_rest_bad_request(request_type=instance.Cluster): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.update_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.set_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - instance.Cluster, + iam_policy_pb2.SetIamPolicyRequest, dict, ], ) -def test_update_cluster_rest_call_success(request_type): +def test_set_iam_policy_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() @@ -14840,14 +22875,17 @@ def test_update_cluster_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.set_iam_policy(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_cluster_rest_interceptors(null_interceptor): +def test_set_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -14861,15 +22899,17 @@ def test_update_cluster_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster" + transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster" + transports.BigtableInstanceAdminRestInterceptor, + "post_set_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = instance.Cluster.pb(instance.Cluster()) + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -14879,18 +22919,20 @@ def test_update_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value - request = instance.Cluster() + request = iam_policy_pb2.SetIamPolicyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata - client.update_cluster( + client.set_iam_policy( request, metadata=[ ("key", "val"), @@ -14900,18 +22942,17 @@ def test_update_cluster_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_partial_update_cluster_rest_bad_request( - request_type=bigtable_instance_admin.PartialUpdateClusterRequest, +def test_test_iam_permissions_rest_bad_request( + request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -14925,121 +22966,32 @@ def test_partial_update_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.partial_update_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.test_iam_permissions(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.PartialUpdateClusterRequest, + iam_policy_pb2.TestIamPermissionsRequest, dict, ], ) -def test_partial_update_cluster_rest_call_success(request_type): +def test_test_iam_permissions_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} - } - request_init["cluster"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "node_scaling_factor": 1, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ - "cluster" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] - else: - del request_init["cluster"][field][subfield] + request_init = {"resource": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) # Wrap the value into a proper Response obj response_value = mock.Mock() @@ -15047,14 +22999,16 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.partial_update_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_cluster_rest_interceptors(null_interceptor): +def test_test_iam_permissions_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -15068,17 +23022,17 @@ def test_partial_update_cluster_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster" + transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster" + transports.BigtableInstanceAdminRestInterceptor, + "post_test_iam_permissions_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb( - bigtable_instance_admin.PartialUpdateClusterRequest() - ) + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -15088,18 +23042,25 @@ def test_partial_update_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() + ) req.return_value.content = return_value - request = bigtable_instance_admin.PartialUpdateClusterRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + post_with_metadata.return_value = ( + iam_policy_pb2.TestIamPermissionsResponse(), + metadata, + ) - client.partial_update_cluster( + client.test_iam_permissions( request, metadata=[ ("key", "val"), @@ -15109,16 +23070,17 @@ def test_partial_update_cluster_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_cluster_rest_bad_request( - request_type=bigtable_instance_admin.DeleteClusterRequest, +def test_list_hot_tablets_rest_bad_request( + request_type=bigtable_instance_admin.ListHotTabletsRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -15132,44 +23094,52 @@ def test_delete_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.delete_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_hot_tablets(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.DeleteClusterRequest, + bigtable_instance_admin.ListHotTabletsRequest, dict, ], ) -def test_delete_cluster_rest_call_success(request_type): +def test_list_hot_tablets_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_instance_admin.ListHotTabletsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_cluster(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_hot_tablets(request) # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, pagers.ListHotTabletsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_cluster_rest_interceptors(null_interceptor): +def test_list_hot_tablets_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -15183,11 +23153,18 @@ def test_delete_cluster_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster" + transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_list_hot_tablets_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" ) as pre: pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteClusterRequest.pb( - bigtable_instance_admin.DeleteClusterRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( + bigtable_instance_admin.ListHotTabletsRequest() ) transcode.return_value = { "method": "post", @@ -15198,15 +23175,25 @@ def test_delete_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListHotTabletsResponse.to_json( + bigtable_instance_admin.ListHotTabletsResponse() + ) + req.return_value.content = return_value - request = bigtable_instance_admin.DeleteClusterRequest() + request = bigtable_instance_admin.ListHotTabletsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListHotTabletsResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListHotTabletsResponse(), + metadata, + ) - client.delete_cluster( + client.list_hot_tablets( request, metadata=[ ("key", "val"), @@ -15215,10 +23202,12 @@ def test_delete_cluster_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_create_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.CreateAppProfileRequest, +def test_create_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.CreateLogicalViewRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -15238,46 +23227,36 @@ def test_create_app_profile_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.create_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_logical_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.CreateAppProfileRequest, + bigtable_instance_admin.CreateLogicalViewRequest, dict, ], ) -def test_create_app_profile_rest_call_success(request_type): +def test_create_logical_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["app_profile"] = { + request_init["logical_view"] = { "name": "name_value", + "query": "query_value", "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], - "row_affinity": {}, - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - "data_boost_isolation_read_only": {"compute_billing_owner": 1}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ - "app_profile" + test_field = bigtable_instance_admin.CreateLogicalViewRequest.meta.fields[ + "logical_view" ] def get_message_fields(field): @@ -15306,7 +23285,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER + for field, value in request_init["logical_view"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -15336,42 +23315,32 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] + for i in range(0, len(request_init["logical_view"][field])): + del request_init["logical_view"][field][i][subfield] else: - del request_init["app_profile"][field][subfield] + del request_init["logical_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name="name_value", - etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.create_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_logical_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.description == "description_value" + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_app_profile_rest_interceptors(null_interceptor): +def test_create_logical_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -15385,14 +23354,20 @@ def test_create_app_profile_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_logical_view" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile" + transports.BigtableInstanceAdminRestInterceptor, + "post_create_logical_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_logical_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb( - bigtable_instance_admin.CreateAppProfileRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateLogicalViewRequest.pb( + bigtable_instance_admin.CreateLogicalViewRequest() ) transcode.return_value = { "method": "post", @@ -15403,18 +23378,20 @@ def test_create_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_instance_admin.CreateAppProfileRequest() + request = bigtable_instance_admin.CreateLogicalViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.AppProfile() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_app_profile( + client.create_logical_view( request, metadata=[ ("key", "val"), @@ -15424,16 +23401,17 @@ def test_create_app_profile_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.GetAppProfileRequest, +def test_get_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.GetLogicalViewRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -15447,33 +23425,33 @@ def test_get_app_profile_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.get_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_logical_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.GetAppProfileRequest, + bigtable_instance_admin.GetLogicalViewRequest, dict, ], ) -def test_get_app_profile_rest_call_success(request_type): +def test_get_logical_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( + return_value = instance.LogicalView( name="name_value", + query="query_value", etag="etag_value", - description="description_value", - priority=instance.AppProfile.Priority.PRIORITY_LOW, ) # Wrap the value into a proper Response obj @@ -15481,21 +23459,22 @@ def test_get_app_profile_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) + return_value = instance.LogicalView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_logical_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) + assert isinstance(response, instance.LogicalView) assert response.name == "name_value" + assert response.query == "query_value" assert response.etag == "etag_value" - assert response.description == "description_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_app_profile_rest_interceptors(null_interceptor): +def test_get_logical_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -15509,14 +23488,18 @@ def test_get_app_profile_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile" + transports.BigtableInstanceAdminRestInterceptor, "post_get_logical_view" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile" + transports.BigtableInstanceAdminRestInterceptor, + "post_get_logical_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_logical_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.GetAppProfileRequest.pb( - bigtable_instance_admin.GetAppProfileRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetLogicalViewRequest.pb( + bigtable_instance_admin.GetLogicalViewRequest() ) transcode.return_value = { "method": "post", @@ -15527,18 +23510,20 @@ def test_get_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = instance.AppProfile.to_json(instance.AppProfile()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.LogicalView.to_json(instance.LogicalView()) req.return_value.content = return_value - request = bigtable_instance_admin.GetAppProfileRequest() + request = bigtable_instance_admin.GetLogicalViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = instance.AppProfile() + post.return_value = instance.LogicalView() + post_with_metadata.return_value = instance.LogicalView(), metadata - client.get_app_profile( + client.get_logical_view( request, metadata=[ ("key", "val"), @@ -15548,10 +23533,11 @@ def test_get_app_profile_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_app_profiles_rest_bad_request( - request_type=bigtable_instance_admin.ListAppProfilesRequest, +def test_list_logical_views_rest_bad_request( + request_type=bigtable_instance_admin.ListLogicalViewsRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -15571,17 +23557,18 @@ def test_list_app_profiles_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.list_app_profiles(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_logical_views(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.ListAppProfilesRequest, + bigtable_instance_admin.ListLogicalViewsRequest, dict, ], ) -def test_list_app_profiles_rest_call_success(request_type): +def test_list_logical_views_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -15593,9 +23580,8 @@ def test_list_app_profiles_rest_call_success(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse( + return_value = bigtable_instance_admin.ListLogicalViewsResponse( next_page_token="next_page_token_value", - failed_locations=["failed_locations_value"], ) # Wrap the value into a proper Response obj @@ -15603,20 +23589,20 @@ def test_list_app_profiles_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + return_value = bigtable_instance_admin.ListLogicalViewsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_app_profiles(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_logical_views(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) + assert isinstance(response, pagers.ListLogicalViewsPager) assert response.next_page_token == "next_page_token_value" - assert response.failed_locations == ["failed_locations_value"] @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_app_profiles_rest_interceptors(null_interceptor): +def test_list_logical_views_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -15630,14 +23616,18 @@ def test_list_app_profiles_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles" + transports.BigtableInstanceAdminRestInterceptor, "post_list_logical_views" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles" + transports.BigtableInstanceAdminRestInterceptor, + "post_list_logical_views_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_logical_views" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb( - bigtable_instance_admin.ListAppProfilesRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListLogicalViewsRequest.pb( + bigtable_instance_admin.ListLogicalViewsRequest() ) transcode.return_value = { "method": "post", @@ -15648,20 +23638,25 @@ def test_list_app_profiles_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = bigtable_instance_admin.ListAppProfilesResponse.to_json( - bigtable_instance_admin.ListAppProfilesResponse() + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListLogicalViewsResponse.to_json( + bigtable_instance_admin.ListLogicalViewsResponse() ) req.return_value.content = return_value - request = bigtable_instance_admin.ListAppProfilesRequest() + request = bigtable_instance_admin.ListLogicalViewsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListAppProfilesResponse() + post.return_value = bigtable_instance_admin.ListLogicalViewsResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListLogicalViewsResponse(), + metadata, + ) - client.list_app_profiles( + client.list_logical_views( request, metadata=[ ("key", "val"), @@ -15671,18 +23666,19 @@ def test_list_app_profiles_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_update_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.UpdateAppProfileRequest, +def test_update_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.UpdateLogicalViewRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" + "logical_view": { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" } } request = request_type(**request_init) @@ -15698,50 +23694,40 @@ def test_update_app_profile_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.update_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_logical_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.UpdateAppProfileRequest, + bigtable_instance_admin.UpdateLogicalViewRequest, dict, ], ) -def test_update_app_profile_rest_call_success(request_type): +def test_update_logical_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "app_profile": { - "name": "projects/sample1/instances/sample2/appProfiles/sample3" + "logical_view": { + "name": "projects/sample1/instances/sample2/logicalViews/sample3" } } - request_init["app_profile"] = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3", + request_init["logical_view"] = { + "name": "projects/sample1/instances/sample2/logicalViews/sample3", + "query": "query_value", "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"], - "row_affinity": {}, - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - "data_boost_isolation_read_only": {"compute_billing_owner": 1}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ - "app_profile" + test_field = bigtable_instance_admin.UpdateLogicalViewRequest.meta.fields[ + "logical_view" ] def get_message_fields(field): @@ -15770,7 +23756,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER + for field, value in request_init["logical_view"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -15800,10 +23786,10 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] + for i in range(0, len(request_init["logical_view"][field])): + del request_init["logical_view"][field][i][subfield] else: - del request_init["app_profile"][field][subfield] + del request_init["logical_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -15817,14 +23803,15 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.update_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_logical_view(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_app_profile_rest_interceptors(null_interceptor): +def test_update_logical_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -15840,14 +23827,18 @@ def test_update_app_profile_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile" + transports.BigtableInstanceAdminRestInterceptor, "post_update_logical_view" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile" + transports.BigtableInstanceAdminRestInterceptor, + "post_update_logical_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_logical_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb( - bigtable_instance_admin.UpdateAppProfileRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.UpdateLogicalViewRequest.pb( + bigtable_instance_admin.UpdateLogicalViewRequest() ) transcode.return_value = { "method": "post", @@ -15858,18 +23849,20 @@ def test_update_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_instance_admin.UpdateAppProfileRequest() + request = bigtable_instance_admin.UpdateLogicalViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.update_app_profile( + client.update_logical_view( request, metadata=[ ("key", "val"), @@ -15879,67 +23872,254 @@ def test_update_app_profile_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_delete_app_profile_rest_bad_request( - request_type=bigtable_instance_admin.DeleteAppProfileRequest, +def test_delete_logical_view_rest_bad_request( + request_type=bigtable_instance_admin.DeleteLogicalViewRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_logical_view(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.DeleteLogicalViewRequest, + dict, + ], +) +def test_delete_logical_view_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/logicalViews/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_logical_view(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_logical_view_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_logical_view" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteLogicalViewRequest.pb( + bigtable_instance_admin.DeleteLogicalViewRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + request = bigtable_instance_admin.DeleteLogicalViewRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_logical_view( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_create_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.CreateMaterializedViewRequest, +): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - client.delete_app_profile(request) + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_materialized_view(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_instance_admin.CreateMaterializedViewRequest, + dict, + ], +) +def test_create_materialized_view_rest_call_success(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request_init["materialized_view"] = { + "name": "name_value", + "query": "query_value", + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateMaterializedViewRequest.meta.fields[ + "materialized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + subfields_not_in_runtime = [] -@pytest.mark.parametrize( - "request_type", - [ - bigtable_instance_admin.DeleteAppProfileRequest, - dict, - ], -) -def test_delete_app_profile_rest_call_success(request_type): - client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["materialized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/appProfiles/sample3"} + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["materialized_view"][field])): + del request_init["materialized_view"][field][i][subfield] + else: + del request_init["materialized_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.delete_app_profile(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_materialized_view(request) # Establish that the response is the type that we expect. - assert response is None + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_app_profile_rest_interceptors(null_interceptor): +def test_create_materialized_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -15953,11 +24133,20 @@ def test_delete_app_profile_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_create_materialized_view" + ) as post, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, + "post_create_materialized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_create_materialized_view" ) as pre: pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb( - bigtable_instance_admin.DeleteAppProfileRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.CreateMaterializedViewRequest.pb( + bigtable_instance_admin.CreateMaterializedViewRequest() ) transcode.return_value = { "method": "post", @@ -15968,15 +24157,20 @@ def test_delete_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_instance_admin.DeleteAppProfileRequest() + request = bigtable_instance_admin.CreateMaterializedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_app_profile( + client.create_materialized_view( request, metadata=[ ("key", "val"), @@ -15985,16 +24179,20 @@ def test_delete_app_profile_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_get_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.GetIamPolicyRequest, +def test_get_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.GetMaterializedViewRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -16008,49 +24206,60 @@ def test_get_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.get_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_materialized_view(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.GetIamPolicyRequest, + bigtable_instance_admin.GetMaterializedViewRequest, dict, ], ) -def test_get_iam_policy_rest_call_success(request_type): +def test_get_materialized_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + return_value = instance.MaterializedView( + name="name_value", + query="query_value", + etag="etag_value", + deletion_protection=True, ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.MaterializedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.get_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_materialized_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, instance.MaterializedView) + assert response.name == "name_value" + assert response.query == "query_value" + assert response.etag == "etag_value" + assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): +def test_get_materialized_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -16064,13 +24273,19 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy" + transports.BigtableInstanceAdminRestInterceptor, "post_get_materialized_view" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy" + transports.BigtableInstanceAdminRestInterceptor, + "post_get_materialized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_get_materialized_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.GetMaterializedViewRequest.pb( + bigtable_instance_admin.GetMaterializedViewRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -16080,18 +24295,20 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = instance.MaterializedView.to_json(instance.MaterializedView()) req.return_value.content = return_value - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_instance_admin.GetMaterializedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() + post.return_value = instance.MaterializedView() + post_with_metadata.return_value = instance.MaterializedView(), metadata - client.get_iam_policy( + client.get_materialized_view( request, metadata=[ ("key", "val"), @@ -16101,16 +24318,17 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_set_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.SetIamPolicyRequest, +def test_list_materialized_views_rest_bad_request( + request_type=bigtable_instance_admin.ListMaterializedViewsRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -16124,49 +24342,54 @@ def test_set_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.set_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_materialized_views(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.SetIamPolicyRequest, + bigtable_instance_admin.ListMaterializedViewsRequest, dict, ], ) -def test_set_iam_policy_rest_call_success(request_type): +def test_list_materialized_views_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + return_value = bigtable_instance_admin.ListMaterializedViewsResponse( + next_page_token="next_page_token_value", ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.set_iam_policy(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_materialized_views(request) # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, pagers.ListMaterializedViewsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): +def test_list_materialized_views_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -16180,13 +24403,19 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy" + transports.BigtableInstanceAdminRestInterceptor, "post_list_materialized_views" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy" + transports.BigtableInstanceAdminRestInterceptor, + "post_list_materialized_views_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_list_materialized_views" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.ListMaterializedViewsRequest.pb( + bigtable_instance_admin.ListMaterializedViewsRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -16196,18 +24425,25 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_instance_admin.ListMaterializedViewsResponse.to_json( + bigtable_instance_admin.ListMaterializedViewsResponse() + ) req.return_value.content = return_value - request = iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_instance_admin.ListMaterializedViewsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() + post.return_value = bigtable_instance_admin.ListMaterializedViewsResponse() + post_with_metadata.return_value = ( + bigtable_instance_admin.ListMaterializedViewsResponse(), + metadata, + ) - client.set_iam_policy( + client.list_materialized_views( request, metadata=[ ("key", "val"), @@ -16217,16 +24453,21 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_test_iam_permissions_rest_bad_request( - request_type=iam_policy_pb2.TestIamPermissionsRequest, +def test_update_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.UpdateMaterializedViewRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "materialized_view": { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -16240,31 +24481,109 @@ def test_test_iam_permissions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.test_iam_permissions(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_materialized_view(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, + bigtable_instance_admin.UpdateMaterializedViewRequest, dict, ], ) -def test_test_iam_permissions_rest_call_success(request_type): +def test_update_materialized_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2"} + request_init = { + "materialized_view": { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } + } + request_init["materialized_view"] = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3", + "query": "query_value", + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateMaterializedViewRequest.meta.fields[ + "materialized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["materialized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["materialized_view"][field])): + del request_init["materialized_view"][field][i][subfield] + else: + del request_init["materialized_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() @@ -16272,15 +24591,15 @@ def test_test_iam_permissions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.test_iam_permissions(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_materialized_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): +def test_update_materialized_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -16294,13 +24613,21 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "post_update_materialized_view" ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions" + transports.BigtableInstanceAdminRestInterceptor, + "post_update_materialized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableInstanceAdminRestInterceptor, "pre_update_materialized_view" ) as pre: pre.assert_not_called() post.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() + post_with_metadata.assert_not_called() + pb_message = bigtable_instance_admin.UpdateMaterializedViewRequest.pb( + bigtable_instance_admin.UpdateMaterializedViewRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -16310,20 +24637,20 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_instance_admin.UpdateMaterializedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.test_iam_permissions( + client.update_materialized_view( request, metadata=[ ("key", "val"), @@ -16333,16 +24660,19 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_hot_tablets_rest_bad_request( - request_type=bigtable_instance_admin.ListHotTabletsRequest, +def test_delete_materialized_view_rest_bad_request( + request_type=bigtable_instance_admin.DeleteMaterializedViewRequest, ): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -16356,50 +24686,48 @@ def test_list_hot_tablets_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value - client.list_hot_tablets(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_materialized_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_instance_admin.ListHotTabletsRequest, + bigtable_instance_admin.DeleteMaterializedViewRequest, dict, ], ) -def test_list_hot_tablets_rest_call_success(request_type): +def test_delete_materialized_view_rest_call_success(request_type): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/materializedViews/sample3" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse( - next_page_token="next_page_token_value", - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value - response = client.list_hot_tablets(request) + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_materialized_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsPager) - assert response.next_page_token == "next_page_token_value" + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_hot_tablets_rest_interceptors(null_interceptor): +def test_delete_materialized_view_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -16413,14 +24741,11 @@ def test_list_hot_tablets_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets" - ) as post, mock.patch.object( - transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets" + transports.BigtableInstanceAdminRestInterceptor, "pre_delete_materialized_view" ) as pre: pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb( - bigtable_instance_admin.ListHotTabletsRequest() + pb_message = bigtable_instance_admin.DeleteMaterializedViewRequest.pb( + bigtable_instance_admin.DeleteMaterializedViewRequest() ) transcode.return_value = { "method": "post", @@ -16431,20 +24756,16 @@ def test_list_hot_tablets_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 - return_value = bigtable_instance_admin.ListHotTabletsResponse.to_json( - bigtable_instance_admin.ListHotTabletsResponse() - ) - req.return_value.content = return_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - request = bigtable_instance_admin.ListHotTabletsRequest() + request = bigtable_instance_admin.DeleteMaterializedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListHotTabletsResponse() - client.list_hot_tablets( + client.delete_materialized_view( request, metadata=[ ("key", "val"), @@ -16453,7 +24774,6 @@ def test_list_hot_tablets_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() def test_initialize_client_w_rest(): @@ -16897,6 +25217,224 @@ def test_list_hot_tablets_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_logical_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_logical_view), "__call__" + ) as call: + client.create_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_logical_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_logical_view), "__call__") as call: + client.get_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_logical_views_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_logical_views), "__call__" + ) as call: + client.list_logical_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListLogicalViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_logical_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_logical_view), "__call__" + ) as call: + client.update_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_logical_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_logical_view), "__call__" + ) as call: + client.delete_logical_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteLogicalViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_materialized_view), "__call__" + ) as call: + client.create_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.CreateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_materialized_view), "__call__" + ) as call: + client.get_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.GetMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_materialized_views_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_materialized_views), "__call__" + ) as call: + client.list_materialized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.ListMaterializedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_materialized_view), "__call__" + ) as call: + client.update_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.UpdateMaterializedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_materialized_view_empty_call_rest(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_materialized_view), "__call__" + ) as call: + client.delete_materialized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_instance_admin.DeleteMaterializedViewRequest() + + assert args[0] == request_msg + + def test_bigtable_instance_admin_rest_lro_client(): client = BigtableInstanceAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16968,6 +25506,16 @@ def test_bigtable_instance_admin_base_transport(): "set_iam_policy", "test_iam_permissions", "list_hot_tablets", + "create_logical_view", + "get_logical_view", + "list_logical_views", + "update_logical_view", + "delete_logical_view", + "create_materialized_view", + "get_materialized_view", + "list_materialized_views", + "update_materialized_view", + "delete_materialized_view", ) for method in methods: with pytest.raises(NotImplementedError): @@ -17330,6 +25878,36 @@ def test_bigtable_instance_admin_client_transport_session_collision(transport_na session1 = client1.transport.list_hot_tablets._session session2 = client2.transport.list_hot_tablets._session assert session1 != session2 + session1 = client1.transport.create_logical_view._session + session2 = client2.transport.create_logical_view._session + assert session1 != session2 + session1 = client1.transport.get_logical_view._session + session2 = client2.transport.get_logical_view._session + assert session1 != session2 + session1 = client1.transport.list_logical_views._session + session2 = client2.transport.list_logical_views._session + assert session1 != session2 + session1 = client1.transport.update_logical_view._session + session2 = client2.transport.update_logical_view._session + assert session1 != session2 + session1 = client1.transport.delete_logical_view._session + session2 = client2.transport.delete_logical_view._session + assert session1 != session2 + session1 = client1.transport.create_materialized_view._session + session2 = client2.transport.create_materialized_view._session + assert session1 != session2 + session1 = client1.transport.get_materialized_view._session + session2 = client2.transport.get_materialized_view._session + assert session1 != session2 + session1 = client1.transport.list_materialized_views._session + session2 = client2.transport.list_materialized_views._session + assert session1 != session2 + session1 = client1.transport.update_materialized_view._session + session2 = client2.transport.update_materialized_view._session + assert session1 != session2 + session1 = client1.transport.delete_materialized_view._session + session2 = client2.transport.delete_materialized_view._session + assert session1 != session2 def test_bigtable_instance_admin_grpc_transport_channel(): @@ -17633,6 +26211,64 @@ def test_parse_instance_path(): assert expected == actual +def test_logical_view_path(): + project = "winkle" + instance = "nautilus" + logical_view = "scallop" + expected = ( + "projects/{project}/instances/{instance}/logicalViews/{logical_view}".format( + project=project, + instance=instance, + logical_view=logical_view, + ) + ) + actual = BigtableInstanceAdminClient.logical_view_path( + project, instance, logical_view + ) + assert expected == actual + + +def test_parse_logical_view_path(): + expected = { + "project": "abalone", + "instance": "squid", + "logical_view": "clam", + } + path = BigtableInstanceAdminClient.logical_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_logical_view_path(path) + assert expected == actual + + +def test_materialized_view_path(): + project = "whelk" + instance = "octopus" + materialized_view = "oyster" + expected = "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + actual = BigtableInstanceAdminClient.materialized_view_path( + project, instance, materialized_view + ) + assert expected == actual + + +def test_parse_materialized_view_path(): + expected = { + "project": "nudibranch", + "instance": "cuttlefish", + "materialized_view": "mussel", + } + path = BigtableInstanceAdminClient.materialized_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_materialized_view_path(path) + assert expected == actual + + def test_table_path(): project = "winkle" instance = "nautilus" diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 53788921f411..21d2720d7451 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -83,6 +83,14 @@ import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + async def mock_async_gen(data, chunk_size=1): for i in range(0, len(data)): # pragma: NO COVER chunk = data[i : i + chunk_size] @@ -353,6 +361,49 @@ def test__get_universe_domain(): assert str(excinfo.value) == "Universe Domain cannot be an empty string." +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = BigtableTableAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = BigtableTableAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -12093,6 +12144,7 @@ def test_create_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table(request) @@ -12149,6 +12201,7 @@ def test_create_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table(**mock_args) @@ -12296,6 +12349,7 @@ def test_create_table_from_snapshot_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table_from_snapshot(request) @@ -12350,6 +12404,7 @@ def test_create_table_from_snapshot_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table_from_snapshot(**mock_args) @@ -12491,6 +12546,7 @@ def test_list_tables_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_tables(request) @@ -12545,6 +12601,7 @@ def test_list_tables_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_tables(**mock_args) @@ -12740,6 +12797,7 @@ def test_get_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_table(request) @@ -12785,6 +12843,7 @@ def test_get_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_table(**mock_args) @@ -12878,7 +12937,12 @@ def test_update_table_rest_required_fields( credentials=ga_credentials.AnonymousCredentials() ).update_table._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -12914,6 +12978,7 @@ def test_update_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_table(request) @@ -12929,7 +12994,12 @@ def test_update_table_rest_unset_required_fields(): unset_fields = transport.update_table._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("updateMask",)) + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) & set( ( "table", @@ -12968,6 +13038,7 @@ def test_update_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_table(**mock_args) @@ -13097,6 +13168,7 @@ def test_delete_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_table(request) @@ -13140,6 +13212,7 @@ def test_delete_table_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_table(**mock_args) @@ -13272,6 +13345,7 @@ def test_undelete_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undelete_table(request) @@ -13315,6 +13389,7 @@ def test_undelete_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undelete_table(**mock_args) @@ -13462,6 +13537,7 @@ def test_create_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_authorized_view(request) @@ -13522,6 +13598,7 @@ def test_create_authorized_view_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_authorized_view(**mock_args) @@ -13670,6 +13747,7 @@ def test_list_authorized_views_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_authorized_views(request) @@ -13724,6 +13802,7 @@ def test_list_authorized_views_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_authorized_views(**mock_args) @@ -13925,6 +14004,7 @@ def test_get_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_authorized_view(request) @@ -13972,6 +14052,7 @@ def test_get_authorized_view_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_authorized_view(**mock_args) @@ -14112,6 +14193,7 @@ def test_update_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_authorized_view(request) @@ -14168,6 +14250,7 @@ def test_update_authorized_view_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_authorized_view(**mock_args) @@ -14304,6 +14387,7 @@ def test_delete_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_authorized_view(request) @@ -14349,6 +14433,7 @@ def test_delete_authorized_view_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_authorized_view(**mock_args) @@ -14486,6 +14571,7 @@ def test_modify_column_families_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.modify_column_families(request) @@ -14544,6 +14630,7 @@ def test_modify_column_families_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.modify_column_families(**mock_args) @@ -14678,6 +14765,7 @@ def test_drop_row_range_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.drop_row_range(request) @@ -14805,6 +14893,7 @@ def test_generate_consistency_token_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.generate_consistency_token(request) @@ -14852,6 +14941,7 @@ def test_generate_consistency_token_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.generate_consistency_token(**mock_args) @@ -14992,6 +15082,7 @@ def test_check_consistency_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_consistency(request) @@ -15046,6 +15137,7 @@ def test_check_consistency_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_consistency(**mock_args) @@ -15188,6 +15280,7 @@ def test_snapshot_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.snapshot_table(request) @@ -15243,6 +15336,7 @@ def test_snapshot_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.snapshot_table(**mock_args) @@ -15377,6 +15471,7 @@ def test_get_snapshot_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_snapshot(request) @@ -15424,6 +15519,7 @@ def test_get_snapshot_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_snapshot(**mock_args) @@ -15562,6 +15658,7 @@ def test_list_snapshots_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_snapshots(request) @@ -15617,6 +15714,7 @@ def test_list_snapshots_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_snapshots(**mock_args) @@ -15810,6 +15908,7 @@ def test_delete_snapshot_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_snapshot(request) @@ -15855,6 +15954,7 @@ def test_delete_snapshot_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_snapshot(**mock_args) @@ -15997,6 +16097,7 @@ def test_create_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup(request) @@ -16059,6 +16160,7 @@ def test_create_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup(**mock_args) @@ -16192,6 +16294,7 @@ def test_get_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup(request) @@ -16239,6 +16342,7 @@ def test_get_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup(**mock_args) @@ -16368,6 +16472,7 @@ def test_update_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup(request) @@ -16426,6 +16531,7 @@ def test_update_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup(**mock_args) @@ -16555,6 +16661,7 @@ def test_delete_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup(request) @@ -16600,6 +16707,7 @@ def test_delete_backup_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup(**mock_args) @@ -16740,6 +16848,7 @@ def test_list_backups_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backups(request) @@ -16797,6 +16906,7 @@ def test_list_backups_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backups(**mock_args) @@ -16999,6 +17109,7 @@ def test_restore_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.restore_table(request) @@ -17136,6 +17247,7 @@ def test_copy_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.copy_backup(request) @@ -17194,6 +17306,7 @@ def test_copy_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.copy_backup(**mock_args) @@ -17327,6 +17440,7 @@ def test_get_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) @@ -17372,6 +17486,7 @@ def test_get_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(**mock_args) @@ -17502,6 +17617,7 @@ def test_set_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) @@ -17555,6 +17671,7 @@ def test_set_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(**mock_args) @@ -17693,6 +17810,7 @@ def test_test_iam_permissions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) @@ -17747,6 +17865,7 @@ def test_test_iam_permissions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(**mock_args) @@ -19390,6 +19509,7 @@ def test_create_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table(request) @@ -19427,6 +19547,7 @@ def test_create_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table(request) # Establish that the response is the type that we expect. @@ -19453,10 +19574,13 @@ def test_create_table_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_create_table" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_create_table" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.CreateTableRequest.pb( bigtable_table_admin.CreateTableRequest() ) @@ -19469,6 +19593,7 @@ def test_create_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gba_table.Table.to_json(gba_table.Table()) req.return_value.content = return_value @@ -19479,6 +19604,7 @@ def test_create_table_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = gba_table.Table() + post_with_metadata.return_value = gba_table.Table(), metadata client.create_table( request, @@ -19490,6 +19616,7 @@ def test_create_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_table_from_snapshot_rest_bad_request( @@ -19513,6 +19640,7 @@ def test_create_table_from_snapshot_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table_from_snapshot(request) @@ -19543,6 +19671,7 @@ def test_create_table_from_snapshot_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table_from_snapshot(request) # Establish that the response is the type that we expect. @@ -19568,10 +19697,14 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_create_table_from_snapshot_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( bigtable_table_admin.CreateTableFromSnapshotRequest() ) @@ -19584,6 +19717,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19594,6 +19728,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_table_from_snapshot( request, @@ -19605,6 +19740,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_tables_rest_bad_request( @@ -19628,6 +19764,7 @@ def test_list_tables_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_tables(request) @@ -19663,6 +19800,7 @@ def test_list_tables_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_tables(request) # Establish that the response is the type that we expect. @@ -19687,10 +19825,13 @@ def test_list_tables_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_list_tables" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_list_tables" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.ListTablesRequest.pb( bigtable_table_admin.ListTablesRequest() ) @@ -19703,6 +19844,7 @@ def test_list_tables_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListTablesResponse.to_json( bigtable_table_admin.ListTablesResponse() ) @@ -19715,6 +19857,10 @@ def test_list_tables_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable_table_admin.ListTablesResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListTablesResponse(), + metadata, + ) client.list_tables( request, @@ -19726,6 +19872,7 @@ def test_list_tables_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest): @@ -19747,6 +19894,7 @@ def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRe response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_table(request) @@ -19784,6 +19932,7 @@ def test_get_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_table(request) # Establish that the response is the type that we expect. @@ -19810,10 +19959,13 @@ def test_get_table_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_get_table" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_table_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_get_table" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.GetTableRequest.pb( bigtable_table_admin.GetTableRequest() ) @@ -19826,6 +19978,7 @@ def test_get_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Table.to_json(table.Table()) req.return_value.content = return_value @@ -19836,6 +19989,7 @@ def test_get_table_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = table.Table() + post_with_metadata.return_value = table.Table(), metadata client.get_table( request, @@ -19847,6 +20001,7 @@ def test_get_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_table_rest_bad_request( @@ -19872,6 +20027,7 @@ def test_update_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_table(request) @@ -19909,6 +20065,44 @@ def test_update_table_rest_call_success(request_type): "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, "automated_backup_policy": {"retention_period": {}, "frequency": {}}, + "row_key_schema": { + "fields": [ + { + "field_name": "field_name_value", + "type_": { + "bytes_type": {"encoding": {"raw": {}}}, + "string_type": {"encoding": {"utf8_raw": {}, "utf8_bytes": {}}}, + "int64_type": { + "encoding": { + "big_endian_bytes": {"bytes_type": {}}, + "ordered_code_bytes": {}, + } + }, + "float32_type": {}, + "float64_type": {}, + "bool_type": {}, + "timestamp_type": {"encoding": {"unix_micros_int64": {}}}, + "date_type": {}, + "aggregate_type": { + "input_type": {}, + "state_type": {}, + "sum": {}, + "hllpp_unique_count": {}, + "max_": {}, + "min_": {}, + }, + "struct_type": {}, + "array_type": {"element_type": {}}, + "map_type": {"key_type": {}, "value_type": {}}, + }, + } + ], + "encoding": { + "singleton": {}, + "delimited_bytes": {"delimiter": b"delimiter_blob"}, + "ordered_code_bytes": {}, + }, + }, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -19990,6 +20184,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_table(request) # Establish that the response is the type that we expect. @@ -20015,10 +20210,13 @@ def test_update_table_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_update_table" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_update_table" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.UpdateTableRequest.pb( bigtable_table_admin.UpdateTableRequest() ) @@ -20031,6 +20229,7 @@ def test_update_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20041,6 +20240,7 @@ def test_update_table_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.update_table( request, @@ -20052,6 +20252,7 @@ def test_update_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_table_rest_bad_request( @@ -20075,6 +20276,7 @@ def test_delete_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_table(request) @@ -20105,6 +20307,7 @@ def test_delete_table_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_table(request) # Establish that the response is the type that we expect. @@ -20141,6 +20344,7 @@ def test_delete_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteTableRequest() metadata = [ @@ -20181,6 +20385,7 @@ def test_undelete_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undelete_table(request) @@ -20211,6 +20416,7 @@ def test_undelete_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undelete_table(request) # Establish that the response is the type that we expect. @@ -20236,10 +20442,14 @@ def test_undelete_table_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_undelete_table" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_undelete_table_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.UndeleteTableRequest.pb( bigtable_table_admin.UndeleteTableRequest() ) @@ -20252,6 +20462,7 @@ def test_undelete_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20262,6 +20473,7 @@ def test_undelete_table_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.undelete_table( request, @@ -20273,6 +20485,7 @@ def test_undelete_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_authorized_view_rest_bad_request( @@ -20296,6 +20509,7 @@ def test_create_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_authorized_view(request) @@ -20404,6 +20618,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_authorized_view(request) # Establish that the response is the type that we expect. @@ -20429,10 +20644,14 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_create_authorized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( bigtable_table_admin.CreateAuthorizedViewRequest() ) @@ -20445,6 +20664,7 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20455,6 +20675,7 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_authorized_view( request, @@ -20466,6 +20687,7 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_authorized_views_rest_bad_request( @@ -20489,6 +20711,7 @@ def test_list_authorized_views_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_authorized_views(request) @@ -20524,6 +20747,7 @@ def test_list_authorized_views_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_authorized_views(request) # Establish that the response is the type that we expect. @@ -20548,10 +20772,14 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_list_authorized_views_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( bigtable_table_admin.ListAuthorizedViewsRequest() ) @@ -20564,6 +20792,7 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json( bigtable_table_admin.ListAuthorizedViewsResponse() ) @@ -20576,6 +20805,10 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListAuthorizedViewsResponse(), + metadata, + ) client.list_authorized_views( request, @@ -20587,6 +20820,7 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_authorized_view_rest_bad_request( @@ -20612,6 +20846,7 @@ def test_get_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_authorized_view(request) @@ -20651,6 +20886,7 @@ def test_get_authorized_view_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_authorized_view(request) # Establish that the response is the type that we expect. @@ -20677,10 +20913,14 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_get_authorized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( bigtable_table_admin.GetAuthorizedViewRequest() ) @@ -20693,6 +20933,7 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.AuthorizedView.to_json(table.AuthorizedView()) req.return_value.content = return_value @@ -20703,6 +20944,7 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = table.AuthorizedView() + post_with_metadata.return_value = table.AuthorizedView(), metadata client.get_authorized_view( request, @@ -20714,6 +20956,7 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_authorized_view_rest_bad_request( @@ -20741,6 +20984,7 @@ def test_update_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_authorized_view(request) @@ -20853,6 +21097,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_authorized_view(request) # Establish that the response is the type that we expect. @@ -20878,10 +21123,14 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_update_authorized_view_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( bigtable_table_admin.UpdateAuthorizedViewRequest() ) @@ -20894,6 +21143,7 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20904,6 +21154,7 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.update_authorized_view( request, @@ -20915,6 +21166,7 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_authorized_view_rest_bad_request( @@ -20940,6 +21192,7 @@ def test_delete_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_authorized_view(request) @@ -20972,6 +21225,7 @@ def test_delete_authorized_view_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_authorized_view(request) # Establish that the response is the type that we expect. @@ -21008,6 +21262,7 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteAuthorizedViewRequest() metadata = [ @@ -21048,6 +21303,7 @@ def test_modify_column_families_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.modify_column_families(request) @@ -21085,6 +21341,7 @@ def test_modify_column_families_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.modify_column_families(request) # Establish that the response is the type that we expect. @@ -21111,10 +21368,14 @@ def test_modify_column_families_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_modify_column_families_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( bigtable_table_admin.ModifyColumnFamiliesRequest() ) @@ -21127,6 +21388,7 @@ def test_modify_column_families_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Table.to_json(table.Table()) req.return_value.content = return_value @@ -21137,6 +21399,7 @@ def test_modify_column_families_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = table.Table() + post_with_metadata.return_value = table.Table(), metadata client.modify_column_families( request, @@ -21148,6 +21411,7 @@ def test_modify_column_families_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_drop_row_range_rest_bad_request( @@ -21171,6 +21435,7 @@ def test_drop_row_range_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.drop_row_range(request) @@ -21201,6 +21466,7 @@ def test_drop_row_range_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.drop_row_range(request) # Establish that the response is the type that we expect. @@ -21237,6 +21503,7 @@ def test_drop_row_range_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DropRowRangeRequest() metadata = [ @@ -21277,6 +21544,7 @@ def test_generate_consistency_token_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.generate_consistency_token(request) @@ -21314,6 +21582,7 @@ def test_generate_consistency_token_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.generate_consistency_token(request) # Establish that the response is the type that we expect. @@ -21338,10 +21607,14 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_generate_consistency_token_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( bigtable_table_admin.GenerateConsistencyTokenRequest() ) @@ -21354,6 +21627,7 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( bigtable_table_admin.GenerateConsistencyTokenResponse() ) @@ -21366,6 +21640,10 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse(), + metadata, + ) client.generate_consistency_token( request, @@ -21377,6 +21655,7 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_check_consistency_rest_bad_request( @@ -21400,6 +21679,7 @@ def test_check_consistency_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_consistency(request) @@ -21435,6 +21715,7 @@ def test_check_consistency_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_consistency(request) # Establish that the response is the type that we expect. @@ -21459,10 +21740,14 @@ def test_check_consistency_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_check_consistency" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_check_consistency_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( bigtable_table_admin.CheckConsistencyRequest() ) @@ -21475,6 +21760,7 @@ def test_check_consistency_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.CheckConsistencyResponse.to_json( bigtable_table_admin.CheckConsistencyResponse() ) @@ -21487,6 +21773,10 @@ def test_check_consistency_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable_table_admin.CheckConsistencyResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.CheckConsistencyResponse(), + metadata, + ) client.check_consistency( request, @@ -21498,6 +21788,7 @@ def test_check_consistency_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_snapshot_table_rest_bad_request( @@ -21521,6 +21812,7 @@ def test_snapshot_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.snapshot_table(request) @@ -21551,6 +21843,7 @@ def test_snapshot_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.snapshot_table(request) # Establish that the response is the type that we expect. @@ -21576,10 +21869,14 @@ def test_snapshot_table_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_snapshot_table_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.SnapshotTableRequest.pb( bigtable_table_admin.SnapshotTableRequest() ) @@ -21592,6 +21889,7 @@ def test_snapshot_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -21602,6 +21900,7 @@ def test_snapshot_table_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.snapshot_table( request, @@ -21613,6 +21912,7 @@ def test_snapshot_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_snapshot_rest_bad_request( @@ -21638,6 +21938,7 @@ def test_get_snapshot_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_snapshot(request) @@ -21678,6 +21979,7 @@ def test_get_snapshot_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_snapshot(request) # Establish that the response is the type that we expect. @@ -21705,10 +22007,13 @@ def test_get_snapshot_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_snapshot_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.GetSnapshotRequest.pb( bigtable_table_admin.GetSnapshotRequest() ) @@ -21721,6 +22026,7 @@ def test_get_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Snapshot.to_json(table.Snapshot()) req.return_value.content = return_value @@ -21731,6 +22037,7 @@ def test_get_snapshot_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = table.Snapshot() + post_with_metadata.return_value = table.Snapshot(), metadata client.get_snapshot( request, @@ -21742,6 +22049,7 @@ def test_get_snapshot_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_snapshots_rest_bad_request( @@ -21765,6 +22073,7 @@ def test_list_snapshots_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_snapshots(request) @@ -21800,6 +22109,7 @@ def test_list_snapshots_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_snapshots(request) # Establish that the response is the type that we expect. @@ -21824,10 +22134,14 @@ def test_list_snapshots_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_list_snapshots_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( bigtable_table_admin.ListSnapshotsRequest() ) @@ -21840,6 +22154,7 @@ def test_list_snapshots_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListSnapshotsResponse.to_json( bigtable_table_admin.ListSnapshotsResponse() ) @@ -21852,6 +22167,10 @@ def test_list_snapshots_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable_table_admin.ListSnapshotsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListSnapshotsResponse(), + metadata, + ) client.list_snapshots( request, @@ -21863,6 +22182,7 @@ def test_list_snapshots_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_snapshot_rest_bad_request( @@ -21888,6 +22208,7 @@ def test_delete_snapshot_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_snapshot(request) @@ -21920,6 +22241,7 @@ def test_delete_snapshot_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_snapshot(request) # Establish that the response is the type that we expect. @@ -21956,6 +22278,7 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteSnapshotRequest() metadata = [ @@ -21996,6 +22319,7 @@ def test_create_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup(request) @@ -22119,6 +22443,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup(request) # Establish that the response is the type that we expect. @@ -22144,10 +22469,13 @@ def test_create_backup_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_create_backup" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_create_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.CreateBackupRequest.pb( bigtable_table_admin.CreateBackupRequest() ) @@ -22160,6 +22488,7 @@ def test_create_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -22170,6 +22499,7 @@ def test_create_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_backup( request, @@ -22181,6 +22511,7 @@ def test_create_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_backup_rest_bad_request( @@ -22206,6 +22537,7 @@ def test_get_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup(request) @@ -22248,6 +22580,7 @@ def test_get_backup_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup(request) # Establish that the response is the type that we expect. @@ -22277,10 +22610,13 @@ def test_get_backup_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_get_backup" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_get_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.GetBackupRequest.pb( bigtable_table_admin.GetBackupRequest() ) @@ -22293,6 +22629,7 @@ def test_get_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Backup.to_json(table.Backup()) req.return_value.content = return_value @@ -22303,6 +22640,7 @@ def test_get_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = table.Backup() + post_with_metadata.return_value = table.Backup(), metadata client.get_backup( request, @@ -22314,6 +22652,7 @@ def test_get_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_backup_rest_bad_request( @@ -22341,6 +22680,7 @@ def test_update_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup(request) @@ -22478,6 +22818,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup(request) # Establish that the response is the type that we expect. @@ -22507,10 +22848,13 @@ def test_update_backup_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_update_backup" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_update_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.UpdateBackupRequest.pb( bigtable_table_admin.UpdateBackupRequest() ) @@ -22523,6 +22867,7 @@ def test_update_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Backup.to_json(table.Backup()) req.return_value.content = return_value @@ -22533,6 +22878,7 @@ def test_update_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = table.Backup() + post_with_metadata.return_value = table.Backup(), metadata client.update_backup( request, @@ -22544,6 +22890,7 @@ def test_update_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_backup_rest_bad_request( @@ -22569,6 +22916,7 @@ def test_delete_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup(request) @@ -22601,6 +22949,7 @@ def test_delete_backup_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup(request) # Establish that the response is the type that we expect. @@ -22637,6 +22986,7 @@ def test_delete_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteBackupRequest() metadata = [ @@ -22677,6 +23027,7 @@ def test_list_backups_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backups(request) @@ -22712,6 +23063,7 @@ def test_list_backups_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backups(request) # Establish that the response is the type that we expect. @@ -22736,10 +23088,13 @@ def test_list_backups_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_list_backups" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_backups_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_list_backups" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.ListBackupsRequest.pb( bigtable_table_admin.ListBackupsRequest() ) @@ -22752,6 +23107,7 @@ def test_list_backups_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListBackupsResponse.to_json( bigtable_table_admin.ListBackupsResponse() ) @@ -22764,6 +23120,10 @@ def test_list_backups_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable_table_admin.ListBackupsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListBackupsResponse(), + metadata, + ) client.list_backups( request, @@ -22775,6 +23135,7 @@ def test_list_backups_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_restore_table_rest_bad_request( @@ -22798,6 +23159,7 @@ def test_restore_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.restore_table(request) @@ -22828,6 +23190,7 @@ def test_restore_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.restore_table(request) # Establish that the response is the type that we expect. @@ -22853,10 +23216,13 @@ def test_restore_table_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_restore_table" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_restore_table_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_restore_table" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.RestoreTableRequest.pb( bigtable_table_admin.RestoreTableRequest() ) @@ -22869,6 +23235,7 @@ def test_restore_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -22879,6 +23246,7 @@ def test_restore_table_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.restore_table( request, @@ -22890,6 +23258,7 @@ def test_restore_table_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_copy_backup_rest_bad_request( @@ -22913,6 +23282,7 @@ def test_copy_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.copy_backup(request) @@ -22943,6 +23313,7 @@ def test_copy_backup_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.copy_backup(request) # Establish that the response is the type that we expect. @@ -22968,10 +23339,13 @@ def test_copy_backup_rest_interceptors(null_interceptor): ), mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_copy_backup" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_copy_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable_table_admin.CopyBackupRequest.pb( bigtable_table_admin.CopyBackupRequest() ) @@ -22984,6 +23358,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -22994,6 +23369,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.copy_backup( request, @@ -23005,6 +23381,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_iam_policy_rest_bad_request( @@ -23028,6 +23405,7 @@ def test_get_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(request) @@ -23061,6 +23439,7 @@ def test_get_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) # Establish that the response is the type that we expect. @@ -23086,10 +23465,14 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_get_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.GetIamPolicyRequest() transcode.return_value = { "method": "post", @@ -23100,6 +23483,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -23110,6 +23494,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata client.get_iam_policy( request, @@ -23121,6 +23506,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_set_iam_policy_rest_bad_request( @@ -23144,6 +23530,7 @@ def test_set_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(request) @@ -23177,6 +23564,7 @@ def test_set_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) # Establish that the response is the type that we expect. @@ -23202,10 +23590,14 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_set_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.SetIamPolicyRequest() transcode.return_value = { "method": "post", @@ -23216,6 +23608,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -23226,6 +23619,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata client.set_iam_policy( request, @@ -23237,6 +23631,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_test_iam_permissions_rest_bad_request( @@ -23260,6 +23655,7 @@ def test_test_iam_permissions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(request) @@ -23292,6 +23688,7 @@ def test_test_iam_permissions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. @@ -23316,10 +23713,14 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_test_iam_permissions_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.TestIamPermissionsRequest() transcode.return_value = { "method": "post", @@ -23330,6 +23731,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson( iam_policy_pb2.TestIamPermissionsResponse() ) @@ -23342,6 +23744,10 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + post_with_metadata.return_value = ( + iam_policy_pb2.TestIamPermissionsResponse(), + metadata, + ) client.test_iam_permissions( request, @@ -23353,6 +23759,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_initialize_client_w_rest(): diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 10543bd3acbe..85700b67d806 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -67,6 +67,14 @@ import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + async def mock_async_gen(data, chunk_size=1): for i in range(0, len(data)): # pragma: NO COVER chunk = data[i : i + chunk_size] @@ -296,6 +304,49 @@ def test__get_universe_domain(): assert str(excinfo.value) == "Universe Domain cannot be an empty string." +@pytest.mark.parametrize( + "error_code,cred_info_json,show_cred_info", + [ + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), + ], +) +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = BigtableClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] + + +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = BigtableClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + assert error.details == [] + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -1065,6 +1116,7 @@ def test_read_rows_non_empty_request_with_auto_populated_field(): request = bigtable.ReadRowsRequest( table_name="table_name_value", authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", app_profile_id="app_profile_id_value", ) @@ -1079,6 +1131,7 @@ def test_read_rows_non_empty_request_with_auto_populated_field(): assert args[0] == bigtable.ReadRowsRequest( table_name="table_name_value", authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", app_profile_id="app_profile_id_value", ) @@ -1334,6 +1387,7 @@ def test_sample_row_keys_non_empty_request_with_auto_populated_field(): request = bigtable.SampleRowKeysRequest( table_name="table_name_value", authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", app_profile_id="app_profile_id_value", ) @@ -1348,6 +1402,7 @@ def test_sample_row_keys_non_empty_request_with_auto_populated_field(): assert args[0] == bigtable.SampleRowKeysRequest( table_name="table_name_value", authorized_view_name="authorized_view_name_value", + materialized_view_name="materialized_view_name_value", app_profile_id="app_profile_id_value", ) @@ -3869,6 +3924,292 @@ async def test_read_change_stream_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + bigtable.PrepareQueryRequest, + dict, + ], +) +def test_prepare_query(request_type, transport: str = "grpc"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + response = client.prepare_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable.PrepareQueryRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PrepareQueryResponse) + assert response.prepared_query == b"prepared_query_blob" + + +def test_prepare_query_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable.PrepareQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.prepare_query(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PrepareQueryRequest( + instance_name="instance_name_value", + app_profile_id="app_profile_id_value", + query="query_value", + ) + + +def test_prepare_query_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.prepare_query in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.prepare_query] = mock_rpc + request = {} + client.prepare_query(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.prepare_query(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_prepare_query_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.prepare_query + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.prepare_query + ] = mock_rpc + + request = {} + await client.prepare_query(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.prepare_query(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_prepare_query_async( + transport: str = "grpc_asyncio", request_type=bigtable.PrepareQueryRequest +): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + response = await client.prepare_query(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable.PrepareQueryRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PrepareQueryResponse) + assert response.prepared_query == b"prepared_query_blob" + + +@pytest.mark.asyncio +async def test_prepare_query_async_from_dict(): + await test_prepare_query_async(request_type=dict) + + +def test_prepare_query_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PrepareQueryResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.prepare_query( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +def test_prepare_query_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.prepare_query( + bigtable.PrepareQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +@pytest.mark.asyncio +async def test_prepare_query_flattened_async(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PrepareQueryResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.prepare_query( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].instance_name + mock_val = "instance_name_value" + assert arg == mock_val + arg = args[0].query + mock_val = "query_value" + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = "app_profile_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_prepare_query_flattened_error_async(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.prepare_query( + bigtable.PrepareQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + @pytest.mark.parametrize( "request_type", [ @@ -4218,6 +4559,7 @@ def test_read_rows_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4318,6 +4660,7 @@ def test_sample_row_keys_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4451,6 +4794,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.mutate_row(request) @@ -4513,6 +4857,7 @@ def test_mutate_row_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.mutate_row(**mock_args) @@ -4646,6 +4991,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4698,6 +5044,7 @@ def test_mutate_rows_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4838,6 +5185,7 @@ def test_check_and_mutate_row_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_and_mutate_row(request) @@ -4908,6 +5256,7 @@ def test_check_and_mutate_row_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_and_mutate_row(**mock_args) @@ -5061,6 +5410,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.ping_and_warm(request) @@ -5107,6 +5457,7 @@ def test_ping_and_warm_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.ping_and_warm(**mock_args) @@ -5243,6 +5594,7 @@ def test_read_modify_write_row_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_modify_write_row(request) @@ -5301,6 +5653,7 @@ def test_read_modify_write_row_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_modify_write_row(**mock_args) @@ -5448,6 +5801,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5505,6 +5859,7 @@ def test_generate_initial_change_stream_partitions_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5647,6 +6002,7 @@ def test_read_change_stream_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5698,6 +6054,7 @@ def test_read_change_stream_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5730,7 +6087,7 @@ def test_read_change_stream_rest_flattened_error(transport: str = "rest"): ) -def test_execute_query_rest_use_cached_wrapped_rpc(): +def test_prepare_query_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -5744,29 +6101,29 @@ def test_execute_query_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.execute_query in client._transport._wrapped_methods + assert client._transport.prepare_query in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.execute_query] = mock_rpc + client._transport._wrapped_methods[client._transport.prepare_query] = mock_rpc request = {} - client.execute_query(request) + client.prepare_query(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.execute_query(request) + client.prepare_query(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_execute_query_rest_required_fields(request_type=bigtable.ExecuteQueryRequest): +def test_prepare_query_rest_required_fields(request_type=bigtable.PrepareQueryRequest): transport_class = transports.BigtableRestTransport request_init = {} @@ -5782,7 +6139,201 @@ def test_execute_query_rest_required_fields(request_type=bigtable.ExecuteQueryRe unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).execute_query._get_unset_required_fields(jsonified_request) + ).prepare_query._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["instanceName"] = "instance_name_value" + jsonified_request["query"] = "query_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).prepare_query._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "instanceName" in jsonified_request + assert jsonified_request["instanceName"] == "instance_name_value" + assert "query" in jsonified_request + assert jsonified_request["query"] == "query_value" + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.PrepareQueryResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.PrepareQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.prepare_query(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_prepare_query_rest_unset_required_fields(): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.prepare_query._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "instanceName", + "query", + "paramTypes", + ) + ) + ) + + +def test_prepare_query_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PrepareQueryResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"instance_name": "projects/sample1/instances/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.PrepareQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.prepare_query(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{instance_name=projects/*/instances/*}:prepareQuery" + % client.transport._host, + args[1], + ) + + +def test_prepare_query_rest_flattened_error(transport: str = "rest"): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.prepare_query( + bigtable.PrepareQueryRequest(), + instance_name="instance_name_value", + query="query_value", + app_profile_id="app_profile_id_value", + ) + + +def test_execute_query_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.execute_query in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.execute_query] = mock_rpc + + request = {} + client.execute_query(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.execute_query(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_execute_query_rest_required_fields(request_type=bigtable.ExecuteQueryRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["instance_name"] = "" + request_init["query"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).execute_query._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -5836,6 +6387,7 @@ def test_execute_query_rest_required_fields(request_type=bigtable.ExecuteQueryRe response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5895,6 +6447,7 @@ def test_execute_query_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -6233,6 +6786,27 @@ def test_read_change_stream_empty_call_grpc(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_prepare_query_empty_call_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest() + + assert args[0] == request_msg + + # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_execute_query_empty_call_grpc(): @@ -6846,6 +7420,58 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc(): ) +def test_prepare_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_prepare_query_routing_parameters_request_2_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + def test_execute_query_routing_parameters_request_1_grpc(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -6868,11 +7494,7 @@ def test_execute_query_routing_parameters_request_1_grpc(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 - expected_headers = { - "name": "projects/sample1/instances/sample2", - "app_profile_id": "", - } + expected_headers = {"name": "projects/sample1/instances/sample2"} assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -7156,6 +7778,33 @@ async def test_read_change_stream_empty_call_grpc_asyncio(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_prepare_query_empty_call_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest() + + assert args[0] == request_msg + + # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio @@ -7871,6 +8520,70 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio() ) +@pytest.mark.asyncio +async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_prepare_query_routing_parameters_request_2_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + @pytest.mark.asyncio async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): client = BigtableAsyncClient( @@ -7898,11 +8611,7 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 - expected_headers = { - "name": "projects/sample1/instances/sample2", - "app_profile_id": "", - } + expected_headers = {"name": "projects/sample1/instances/sample2"} assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -7963,6 +8672,7 @@ def test_read_rows_rest_bad_request(request_type=bigtable.ReadRowsRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_rows(request) @@ -7999,6 +8709,7 @@ def test_read_rows_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_rows(request) assert isinstance(response, Iterable) @@ -8024,10 +8735,13 @@ def test_read_rows_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_read_rows" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_rows_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_read_rows" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) transcode.return_value = { "method": "post", @@ -8038,6 +8752,7 @@ def test_read_rows_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ReadRowsResponse.to_json(bigtable.ReadRowsResponse()) req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) @@ -8048,6 +8763,7 @@ def test_read_rows_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.ReadRowsResponse() + post_with_metadata.return_value = bigtable.ReadRowsResponse(), metadata client.read_rows( request, @@ -8059,6 +8775,7 @@ def test_read_rows_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_sample_row_keys_rest_bad_request(request_type=bigtable.SampleRowKeysRequest): @@ -8080,6 +8797,7 @@ def test_sample_row_keys_rest_bad_request(request_type=bigtable.SampleRowKeysReq response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.sample_row_keys(request) @@ -8117,6 +8835,7 @@ def test_sample_row_keys_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.sample_row_keys(request) assert isinstance(response, Iterable) @@ -8143,10 +8862,13 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_sample_row_keys" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_sample_row_keys_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_sample_row_keys" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) transcode.return_value = { "method": "post", @@ -8157,6 +8879,7 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.SampleRowKeysResponse.to_json( bigtable.SampleRowKeysResponse() ) @@ -8169,6 +8892,7 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.SampleRowKeysResponse() + post_with_metadata.return_value = bigtable.SampleRowKeysResponse(), metadata client.sample_row_keys( request, @@ -8180,6 +8904,7 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_mutate_row_rest_bad_request(request_type=bigtable.MutateRowRequest): @@ -8201,6 +8926,7 @@ def test_mutate_row_rest_bad_request(request_type=bigtable.MutateRowRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.mutate_row(request) @@ -8234,6 +8960,7 @@ def test_mutate_row_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.mutate_row(request) # Establish that the response is the type that we expect. @@ -8255,10 +8982,13 @@ def test_mutate_row_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_mutate_row" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_row_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_mutate_row" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) transcode.return_value = { "method": "post", @@ -8269,6 +8999,7 @@ def test_mutate_row_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.MutateRowResponse.to_json(bigtable.MutateRowResponse()) req.return_value.content = return_value @@ -8279,6 +9010,7 @@ def test_mutate_row_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.MutateRowResponse() + post_with_metadata.return_value = bigtable.MutateRowResponse(), metadata client.mutate_row( request, @@ -8290,6 +9022,7 @@ def test_mutate_row_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_mutate_rows_rest_bad_request(request_type=bigtable.MutateRowsRequest): @@ -8311,6 +9044,7 @@ def test_mutate_rows_rest_bad_request(request_type=bigtable.MutateRowsRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.mutate_rows(request) @@ -8345,6 +9079,7 @@ def test_mutate_rows_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.mutate_rows(request) assert isinstance(response, Iterable) @@ -8369,10 +9104,13 @@ def test_mutate_rows_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_mutate_rows" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_mutate_rows_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_mutate_rows" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) transcode.return_value = { "method": "post", @@ -8383,6 +9121,7 @@ def test_mutate_rows_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.MutateRowsResponse.to_json( bigtable.MutateRowsResponse() ) @@ -8395,6 +9134,7 @@ def test_mutate_rows_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.MutateRowsResponse() + post_with_metadata.return_value = bigtable.MutateRowsResponse(), metadata client.mutate_rows( request, @@ -8406,6 +9146,7 @@ def test_mutate_rows_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_check_and_mutate_row_rest_bad_request( @@ -8429,6 +9170,7 @@ def test_check_and_mutate_row_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_and_mutate_row(request) @@ -8464,6 +9206,7 @@ def test_check_and_mutate_row_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_and_mutate_row(request) # Establish that the response is the type that we expect. @@ -8486,10 +9229,13 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_check_and_mutate_row" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_check_and_mutate_row_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_check_and_mutate_row" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.CheckAndMutateRowRequest.pb( bigtable.CheckAndMutateRowRequest() ) @@ -8502,6 +9248,7 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.CheckAndMutateRowResponse.to_json( bigtable.CheckAndMutateRowResponse() ) @@ -8514,6 +9261,7 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.CheckAndMutateRowResponse() + post_with_metadata.return_value = bigtable.CheckAndMutateRowResponse(), metadata client.check_and_mutate_row( request, @@ -8525,6 +9273,7 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_ping_and_warm_rest_bad_request(request_type=bigtable.PingAndWarmRequest): @@ -8546,6 +9295,7 @@ def test_ping_and_warm_rest_bad_request(request_type=bigtable.PingAndWarmRequest response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.ping_and_warm(request) @@ -8579,6 +9329,7 @@ def test_ping_and_warm_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.ping_and_warm(request) # Establish that the response is the type that we expect. @@ -8600,10 +9351,13 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_ping_and_warm" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_ping_and_warm_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_ping_and_warm" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) transcode.return_value = { "method": "post", @@ -8614,6 +9368,7 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.PingAndWarmResponse.to_json( bigtable.PingAndWarmResponse() ) @@ -8626,6 +9381,7 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.PingAndWarmResponse() + post_with_metadata.return_value = bigtable.PingAndWarmResponse(), metadata client.ping_and_warm( request, @@ -8637,6 +9393,7 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_read_modify_write_row_rest_bad_request( @@ -8660,6 +9417,7 @@ def test_read_modify_write_row_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_modify_write_row(request) @@ -8693,6 +9451,7 @@ def test_read_modify_write_row_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_modify_write_row(request) # Establish that the response is the type that we expect. @@ -8714,10 +9473,13 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_read_modify_write_row" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_modify_write_row_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_read_modify_write_row" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.ReadModifyWriteRowRequest.pb( bigtable.ReadModifyWriteRowRequest() ) @@ -8730,6 +9492,7 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ReadModifyWriteRowResponse.to_json( bigtable.ReadModifyWriteRowResponse() ) @@ -8742,6 +9505,10 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.ReadModifyWriteRowResponse() + post_with_metadata.return_value = ( + bigtable.ReadModifyWriteRowResponse(), + metadata, + ) client.read_modify_write_row( request, @@ -8753,6 +9520,7 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_generate_initial_change_stream_partitions_rest_bad_request( @@ -8776,6 +9544,7 @@ def test_generate_initial_change_stream_partitions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.generate_initial_change_stream_partitions(request) @@ -8812,6 +9581,7 @@ def test_generate_initial_change_stream_partitions_rest_call_success(request_typ json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.generate_initial_change_stream_partitions(request) assert isinstance(response, Iterable) @@ -8837,11 +9607,15 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc transports.BigtableRestInterceptor, "post_generate_initial_change_stream_partitions", ) as post, mock.patch.object( + transports.BigtableRestInterceptor, + "post_generate_initial_change_stream_partitions_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_generate_initial_change_stream_partitions", ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb( bigtable.GenerateInitialChangeStreamPartitionsRequest() ) @@ -8854,6 +9628,7 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( bigtable.GenerateInitialChangeStreamPartitionsResponse() ) @@ -8866,6 +9641,10 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc ] pre.return_value = request, metadata post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + post_with_metadata.return_value = ( + bigtable.GenerateInitialChangeStreamPartitionsResponse(), + metadata, + ) client.generate_initial_change_stream_partitions( request, @@ -8877,6 +9656,7 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_read_change_stream_rest_bad_request( @@ -8900,6 +9680,7 @@ def test_read_change_stream_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_change_stream(request) @@ -8934,6 +9715,7 @@ def test_read_change_stream_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_change_stream(request) assert isinstance(response, Iterable) @@ -8958,10 +9740,13 @@ def test_read_change_stream_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_read_change_stream" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_read_change_stream_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_read_change_stream" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.ReadChangeStreamRequest.pb( bigtable.ReadChangeStreamRequest() ) @@ -8974,6 +9759,7 @@ def test_read_change_stream_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ReadChangeStreamResponse.to_json( bigtable.ReadChangeStreamResponse() ) @@ -8986,6 +9772,7 @@ def test_read_change_stream_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.ReadChangeStreamResponse() + post_with_metadata.return_value = bigtable.ReadChangeStreamResponse(), metadata client.read_change_stream( request, @@ -8997,6 +9784,130 @@ def test_read_change_stream_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_prepare_query_rest_bad_request(request_type=bigtable.PrepareQueryRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"instance_name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.prepare_query(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable.PrepareQueryRequest, + dict, + ], +) +def test_prepare_query_rest_call_success(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"instance_name": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.PrepareQueryResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.prepare_query(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PrepareQueryResponse) + assert response.prepared_query == b"prepared_query_blob" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_prepare_query_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableRestInterceptor, "post_prepare_query" + ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_prepare_query_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableRestInterceptor, "pre_prepare_query" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable.PrepareQueryRequest.pb(bigtable.PrepareQueryRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable.PrepareQueryResponse.to_json( + bigtable.PrepareQueryResponse() + ) + req.return_value.content = return_value + + request = bigtable.PrepareQueryRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.PrepareQueryResponse() + post_with_metadata.return_value = bigtable.PrepareQueryResponse(), metadata + + client.prepare_query( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() def test_execute_query_rest_bad_request(request_type=bigtable.ExecuteQueryRequest): @@ -9018,6 +9929,7 @@ def test_execute_query_rest_bad_request(request_type=bigtable.ExecuteQueryReques response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.execute_query(request) @@ -9052,6 +9964,7 @@ def test_execute_query_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.execute_query(request) assert isinstance(response, Iterable) @@ -9076,10 +9989,13 @@ def test_execute_query_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.BigtableRestInterceptor, "post_execute_query" ) as post, mock.patch.object( + transports.BigtableRestInterceptor, "post_execute_query_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.BigtableRestInterceptor, "pre_execute_query" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = bigtable.ExecuteQueryRequest.pb(bigtable.ExecuteQueryRequest()) transcode.return_value = { "method": "post", @@ -9090,6 +10006,7 @@ def test_execute_query_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ExecuteQueryResponse.to_json( bigtable.ExecuteQueryResponse() ) @@ -9102,6 +10019,7 @@ def test_execute_query_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = bigtable.ExecuteQueryResponse() + post_with_metadata.return_value = bigtable.ExecuteQueryResponse(), metadata client.execute_query( request, @@ -9113,6 +10031,7 @@ def test_execute_query_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_initialize_client_w_rest(): @@ -9310,6 +10229,26 @@ def test_read_change_stream_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_prepare_query_empty_call_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + client.prepare_query(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest() + + assert args[0] == request_msg + + # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_execute_query_empty_call_rest(): @@ -9902,6 +10841,56 @@ def test_read_modify_write_row_routing_parameters_request_3_rest(): ) +def test_prepare_query_routing_parameters_request_1_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + expected_headers = {"name": "projects/sample1/instances/sample2"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_prepare_query_routing_parameters_request_2_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + client.prepare_query(request={"app_profile_id": "sample1"}) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest(**{"app_profile_id": "sample1"}) + + assert args[0] == request_msg + + expected_headers = {"app_profile_id": "sample1"} + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + def test_execute_query_routing_parameters_request_1_rest(): client = BigtableClient( credentials=ga_credentials.AnonymousCredentials(), @@ -9923,11 +10912,7 @@ def test_execute_query_routing_parameters_request_1_rest(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 - expected_headers = { - "name": "projects/sample1/instances/sample2", - "app_profile_id": "", - } + expected_headers = {"name": "projects/sample1/instances/sample2"} assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -9998,6 +10983,7 @@ def test_bigtable_base_transport(): "read_modify_write_row", "generate_initial_change_stream_partitions", "read_change_stream", + "prepare_query", "execute_query", ) for method in methods: @@ -10309,6 +11295,9 @@ def test_bigtable_client_transport_session_collision(transport_name): session1 = client1.transport.read_change_stream._session session2 = client2.transport.read_change_stream._session assert session1 != session2 + session1 = client1.transport.prepare_query._session + session2 = client2.transport.prepare_query._session + assert session1 != session2 session1 = client1.transport.execute_query._session session2 = client2.transport.execute_query._session assert session1 != session2 @@ -10486,10 +11475,36 @@ def test_parse_instance_path(): assert expected == actual -def test_table_path(): +def test_materialized_view_path(): project = "squid" instance = "clam" - table = "whelk" + materialized_view = "whelk" + expected = "projects/{project}/instances/{instance}/materializedViews/{materialized_view}".format( + project=project, + instance=instance, + materialized_view=materialized_view, + ) + actual = BigtableClient.materialized_view_path(project, instance, materialized_view) + assert expected == actual + + +def test_parse_materialized_view_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "materialized_view": "nudibranch", + } + path = BigtableClient.materialized_view_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_materialized_view_path(path) + assert expected == actual + + +def test_table_path(): + project = "cuttlefish" + instance = "mussel" + table = "winkle" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, @@ -10501,9 +11516,9 @@ def test_table_path(): def test_parse_table_path(): expected = { - "project": "octopus", - "instance": "oyster", - "table": "nudibranch", + "project": "nautilus", + "instance": "scallop", + "table": "abalone", } path = BigtableClient.table_path(**expected) @@ -10513,7 +11528,7 @@ def test_parse_table_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "squid" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -10523,7 +11538,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "clam", } path = BigtableClient.common_billing_account_path(**expected) @@ -10533,7 +11548,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "whelk" expected = "folders/{folder}".format( folder=folder, ) @@ -10543,7 +11558,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "octopus", } path = BigtableClient.common_folder_path(**expected) @@ -10553,7 +11568,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "oyster" expected = "organizations/{organization}".format( organization=organization, ) @@ -10563,7 +11578,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "nudibranch", } path = BigtableClient.common_organization_path(**expected) @@ -10573,7 +11588,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "cuttlefish" expected = "projects/{project}".format( project=project, ) @@ -10583,7 +11598,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "mussel", } path = BigtableClient.common_project_path(**expected) @@ -10593,8 +11608,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "winkle" + location = "nautilus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -10605,8 +11620,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "scallop", + "location": "abalone", } path = BigtableClient.common_location_path(**expected) From 97e5f248e04260ef983baf52d68bf6d22ae49f5f Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Mon, 17 Mar 2025 11:07:22 -0400 Subject: [PATCH 854/892] fix: Allow protobuf 6.x (#1092) --- packages/google-cloud-bigtable/setup.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 23eb8d3607c9..2e51249e5e5b 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -37,13 +37,13 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 2.16.0, <3.0.0dev", - "google-cloud-core >= 1.4.4, <3.0.0dev", - "google-auth >= 2.14.1, <3.0.0dev,!=2.24.0,!=2.25.0", - "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", - "proto-plus >= 1.22.3, <2.0.0dev", - "proto-plus >= 1.25.0, <2.0.0dev; python_version>='3.13'", - "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "google-api-core[grpc] >= 2.16.0, <3.0.0", + "google-cloud-core >= 1.4.4, <3.0.0", + "google-auth >= 2.14.1, <3.0.0,!=2.24.0,!=2.25.0", + "grpc-google-iam-v1 >= 0.12.4, <1.0.0", + "proto-plus >= 1.22.3, <2.0.0", + "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'", + "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", ] extras = {"libcst": "libcst >= 0.2.5"} From 4cc0fc71099854d10ae29cd56ffed42fb01fe04c Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Tue, 18 Mar 2025 16:46:59 -0400 Subject: [PATCH 855/892] feat: Update ExecuteQuery to use Prepare (#1100) * feat: update execute_query to use PrepareQuery API (#1095) * feat: Implement updated execute query protocol (#1096) * feat: Refactor Metadata, add system tests, remove preview warning (#1099) * Fix setup.py merge * fix: skip sql tests for emulator --- .../cloud/bigtable/data/_async/client.py | 77 ++- .../google/cloud/bigtable/data/_helpers.py | 28 +- .../bigtable/data/_sync_autogen/client.py | 74 ++- .../google/cloud/bigtable/data/exceptions.py | 4 + .../bigtable/data/execute_query/__init__.py | 2 - .../_async/execute_query_iterator.py | 120 +++-- .../data/execute_query/_byte_cursor.py | 101 ++-- .../bigtable/data/execute_query/_checksum.py | 43 ++ .../execute_query/_parameters_formatting.py | 28 +- .../bigtable/data/execute_query/_reader.py | 87 ++-- .../_sync_autogen/execute_query_iterator.py | 111 ++-- .../bigtable/data/execute_query/metadata.py | 14 +- packages/google-cloud-bigtable/setup.py | 1 + .../google-cloud-bigtable/tests/_testing.py | 36 -- .../tests/system/data/test_system_async.py | 78 +++ .../tests/system/data/test_system_autogen.py | 65 +++ .../tests/unit/_testing.py | 16 - .../tests/unit/data/_async/test_client.py | 482 +++++++++--------- .../unit/data/_sync_autogen/test_client.py | 466 ++++++++--------- .../tests/unit/data/_testing.py | 18 - .../_async/test_query_iterator.py | 212 ++++++-- .../_sync_autogen/test_query_iterator.py | 191 +++++-- .../tests/unit/data/execute_query/_testing.py | 17 - .../unit/data/execute_query/sql_helpers.py | 212 ++++++++ .../data/execute_query/test_byte_cursor.py | 194 +++---- .../unit/data/execute_query/test_checksum.py | 59 +++ .../test_execute_query_parameters_parsing.py | 19 + .../test_query_result_parsing_utils.py | 20 +- .../test_query_result_row_reader.py | 191 +++---- .../tests/unit/data/test__helpers.py | 33 ++ .../tests/unit/v2_client/_testing.py | 3 - 31 files changed, 1871 insertions(+), 1131 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_checksum.py delete mode 100644 packages/google-cloud-bigtable/tests/_testing.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/_testing.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/data/_testing.py delete mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/test_checksum.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 4d52c64c2e5d..3c5093d10ad5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -35,9 +35,13 @@ from grpc import Channel from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType -from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query.metadata import ( + SqlType, + _pb_metadata_to_metadata_types, +) from google.cloud.bigtable.data.execute_query._parameters_formatting import ( _format_execute_query_params, + _to_param_types, ) from google.cloud.bigtable_v2.services.bigtable.transports.base import ( DEFAULT_CLIENT_INFO, @@ -59,7 +63,7 @@ from google.cloud.bigtable.data.exceptions import FailedQueryShardError from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup -from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT, _align_timeouts from google.cloud.bigtable.data._helpers import _WarmedInstanceKey from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT from google.cloud.bigtable.data._helpers import _retry_exception_factory @@ -542,6 +546,12 @@ async def execute_query( ServiceUnavailable, Aborted, ), + prepare_operation_timeout: float = 60, + prepare_attempt_timeout: float | None = 20, + prepare_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), ) -> "ExecuteQueryIteratorAsync": """ Executes an SQL query on an instance. @@ -550,6 +560,10 @@ async def execute_query( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. + Note that this makes two requests, one to ``PrepareQuery`` and one to ``ExecuteQuery``. + These have separate retry configurations. ``ExecuteQuery`` is where the bulk of the + work happens. + Args: query: Query to be run on Bigtable instance. The query can use ``@param`` placeholders to use parameter interpolation on the server. Values for all @@ -566,16 +580,26 @@ async def execute_query( an empty dict). app_profile_id: The app profile to associate with requests. https://cloud.google.com/bigtable/docs/app-profiles - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire executeQuery operation, in seconds. Failed requests will be retried within the budget. Defaults to 600 seconds. - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual executeQuery network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the 20 seconds. If None, defaults to operation_timeout. - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered during executeQuery. Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + prepare_operation_timeout: the time budget for the entire prepareQuery operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 60 seconds. + prepare_attempt_timeout: the time budget for an individual prepareQuery network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to prepare_operation_timeout. + prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) Returns: ExecuteQueryIteratorAsync: an asynchronous iterator that yields rows returned by the query Raises: @@ -586,30 +610,59 @@ async def execute_query( google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if a parameter is passed without an explicit type, and the type cannot be infered """ - warnings.warn( - "ExecuteQuery is in preview and may change in the future.", - category=RuntimeWarning, + instance_name = self._gapic_client.instance_path(self.project, instance_id) + converted_param_types = _to_param_types(parameters, parameter_types) + prepare_request = { + "instance_name": instance_name, + "query": query, + "app_profile_id": app_profile_id, + "param_types": converted_param_types, + "proto_format": {}, + } + prepare_predicate = retries.if_exception_type( + *[_get_error_type(e) for e in prepare_retryable_errors] + ) + prepare_operation_timeout, prepare_attempt_timeout = _align_timeouts( + prepare_operation_timeout, prepare_attempt_timeout + ) + prepare_sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + + target = partial( + self._gapic_client.prepare_query, + request=prepare_request, + timeout=prepare_attempt_timeout, + retry=None, + ) + prepare_result = await CrossSync.retry_target( + target, + prepare_predicate, + prepare_sleep_generator, + prepare_operation_timeout, + exception_factory=_retry_exception_factory, ) + prepare_metadata = _pb_metadata_to_metadata_types(prepare_result.metadata) + retryable_excs = [_get_error_type(e) for e in retryable_errors] pb_params = _format_execute_query_params(parameters, parameter_types) - instance_name = self._gapic_client.instance_path(self.project, instance_id) - request_body = { "instance_name": instance_name, "app_profile_id": app_profile_id, - "query": query, + "prepared_query": prepare_result.prepared_query, "params": pb_params, - "proto_format": {}, } + operation_timeout, attempt_timeout = _align_timeouts( + operation_timeout, attempt_timeout + ) return CrossSync.ExecuteQueryIterator( self, instance_id, app_profile_id, request_body, + prepare_metadata, attempt_timeout, operation_timeout, retryable_excs=retryable_excs, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py index 4c45e5c1c7dd..a70ebfb6d98e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py @@ -136,7 +136,7 @@ def _get_timeouts( attempt: The timeout value to use for each attempt, in seconds. table: The table to use for default values. Returns: - typle[float, float]: A tuple of (operation_timeout, attempt_timeout) + tuple[float, float]: A tuple of (operation_timeout, attempt_timeout) """ # load table defaults if necessary if operation == TABLE_DEFAULT.DEFAULT: @@ -154,15 +154,33 @@ def _get_timeouts( elif attempt == TABLE_DEFAULT.MUTATE_ROWS: attempt = table.default_mutate_rows_attempt_timeout + return _align_timeouts(final_operation, attempt) + + +def _align_timeouts(operation: float, attempt: float | None) -> tuple[float, float]: + """ + Convert passed in timeout values to floats. + + attempt will use operation value if None, or if larger than operation. + + Will call _validate_timeouts on the outputs, and raise ValueError if the + resulting timeouts are invalid. + + Args: + operation: The timeout value to use for the entire operation, in seconds. + attempt: The timeout value to use for each attempt, in seconds. + Returns: + tuple[float, float]: A tuple of (operation_timeout, attempt_timeout) + """ if attempt is None: # no timeout specified, use operation timeout for both - final_attempt = final_operation + final_attempt = operation else: # cap attempt timeout at operation timeout - final_attempt = min(attempt, final_operation) if final_operation else attempt + final_attempt = min(attempt, operation) if operation else attempt - _validate_timeouts(final_operation, final_attempt, allow_none=False) - return final_operation, final_attempt + _validate_timeouts(operation, final_attempt, allow_none=False) + return operation, final_attempt def _validate_timeouts( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index 7b1e72ad6f67..5e21c1f518a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -26,9 +26,13 @@ from functools import partial from grpc import Channel from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType -from google.cloud.bigtable.data.execute_query.metadata import SqlType +from google.cloud.bigtable.data.execute_query.metadata import ( + SqlType, + _pb_metadata_to_metadata_types, +) from google.cloud.bigtable.data.execute_query._parameters_formatting import ( _format_execute_query_params, + _to_param_types, ) from google.cloud.bigtable_v2.services.bigtable.transports.base import ( DEFAULT_CLIENT_INFO, @@ -48,7 +52,7 @@ from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.cloud.bigtable.data.exceptions import FailedQueryShardError from google.cloud.bigtable.data.exceptions import ShardedReadRowsExceptionGroup -from google.cloud.bigtable.data._helpers import TABLE_DEFAULT +from google.cloud.bigtable.data._helpers import TABLE_DEFAULT, _align_timeouts from google.cloud.bigtable.data._helpers import _WarmedInstanceKey from google.cloud.bigtable.data._helpers import _CONCURRENCY_LIMIT from google.cloud.bigtable.data._helpers import _retry_exception_factory @@ -404,6 +408,12 @@ def execute_query( ServiceUnavailable, Aborted, ), + prepare_operation_timeout: float = 60, + prepare_attempt_timeout: float | None = 20, + prepare_retryable_errors: Sequence[type[Exception]] = ( + DeadlineExceeded, + ServiceUnavailable, + ), ) -> "ExecuteQueryIterator": """Executes an SQL query on an instance. Returns an iterator to asynchronously stream back columns from selected rows. @@ -411,6 +421,10 @@ def execute_query( Failed requests within operation_timeout will be retried based on the retryable_errors list until operation_timeout is reached. + Note that this makes two requests, one to ``PrepareQuery`` and one to ``ExecuteQuery``. + These have separate retry configurations. ``ExecuteQuery`` is where the bulk of the + work happens. + Args: query: Query to be run on Bigtable instance. The query can use ``@param`` placeholders to use parameter interpolation on the server. Values for all @@ -427,16 +441,26 @@ def execute_query( an empty dict). app_profile_id: The app profile to associate with requests. https://cloud.google.com/bigtable/docs/app-profiles - operation_timeout: the time budget for the entire operation, in seconds. + operation_timeout: the time budget for the entire executeQuery operation, in seconds. Failed requests will be retried within the budget. Defaults to 600 seconds. - attempt_timeout: the time budget for an individual network request, in seconds. + attempt_timeout: the time budget for an individual executeQuery network request, in seconds. If it takes longer than this time to complete, the request will be cancelled with a DeadlineExceeded exception, and a retry will be attempted. Defaults to the 20 seconds. If None, defaults to operation_timeout. - retryable_errors: a list of errors that will be retried if encountered. + retryable_errors: a list of errors that will be retried if encountered during executeQuery. Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + prepare_operation_timeout: the time budget for the entire prepareQuery operation, in seconds. + Failed requests will be retried within the budget. + Defaults to 60 seconds. + prepare_attempt_timeout: the time budget for an individual prepareQuery network request, in seconds. + If it takes longer than this time to complete, the request will be cancelled with + a DeadlineExceeded exception, and a retry will be attempted. + Defaults to the 20 seconds. + If None, defaults to prepare_operation_timeout. + prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) Returns: ExecuteQueryIterator: an asynchronous iterator that yields rows returned by the query Raises: @@ -447,25 +471,53 @@ def execute_query( google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if a parameter is passed without an explicit type, and the type cannot be infered """ - warnings.warn( - "ExecuteQuery is in preview and may change in the future.", - category=RuntimeWarning, + instance_name = self._gapic_client.instance_path(self.project, instance_id) + converted_param_types = _to_param_types(parameters, parameter_types) + prepare_request = { + "instance_name": instance_name, + "query": query, + "app_profile_id": app_profile_id, + "param_types": converted_param_types, + "proto_format": {}, + } + prepare_predicate = retries.if_exception_type( + *[_get_error_type(e) for e in prepare_retryable_errors] + ) + (prepare_operation_timeout, prepare_attempt_timeout) = _align_timeouts( + prepare_operation_timeout, prepare_attempt_timeout + ) + prepare_sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) + target = partial( + self._gapic_client.prepare_query, + request=prepare_request, + timeout=prepare_attempt_timeout, + retry=None, ) + prepare_result = CrossSync._Sync_Impl.retry_target( + target, + prepare_predicate, + prepare_sleep_generator, + prepare_operation_timeout, + exception_factory=_retry_exception_factory, + ) + prepare_metadata = _pb_metadata_to_metadata_types(prepare_result.metadata) retryable_excs = [_get_error_type(e) for e in retryable_errors] pb_params = _format_execute_query_params(parameters, parameter_types) - instance_name = self._gapic_client.instance_path(self.project, instance_id) request_body = { "instance_name": instance_name, "app_profile_id": app_profile_id, - "query": query, + "prepared_query": prepare_result.prepared_query, "params": pb_params, - "proto_format": {}, } + (operation_timeout, attempt_timeout) = _align_timeouts( + operation_timeout, attempt_timeout + ) return CrossSync._Sync_Impl.ExecuteQueryIterator( self, instance_id, app_profile_id, request_body, + prepare_metadata, attempt_timeout, operation_timeout, retryable_excs=retryable_excs, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py index 62f0b62fc9b1..54ca308535b8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py @@ -334,3 +334,7 @@ class InvalidExecuteQueryResponse(core_exceptions.GoogleAPICallError): class ParameterTypeInferenceFailed(ValueError): """Exception raised when query parameter types were not provided and cannot be inferred.""" + + +class EarlyMetadataCallError(RuntimeError): + """Execption raised when metadata is request from an ExecuteQueryIterator before the first row has been read, or the query has completed""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py index 31fd5e3cca14..029e79b9390a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/__init__.py @@ -20,7 +20,6 @@ ) from google.cloud.bigtable.data.execute_query.metadata import ( Metadata, - ProtoMetadata, SqlType, ) from google.cloud.bigtable.data.execute_query.values import ( @@ -39,7 +38,6 @@ "QueryResultRow", "Struct", "Metadata", - "ProtoMetadata", "ExecuteQueryIteratorAsync", "ExecuteQueryIterator", ] diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index a8f60be36820..d3ca890b4c61 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -29,15 +29,19 @@ _attempt_timeout_generator, _retry_exception_factory, ) -from google.cloud.bigtable.data.exceptions import InvalidExecuteQueryResponse +from google.cloud.bigtable.data.exceptions import ( + EarlyMetadataCallError, + InvalidExecuteQueryResponse, +) from google.cloud.bigtable.data.execute_query.values import QueryResultRow -from google.cloud.bigtable.data.execute_query.metadata import Metadata, ProtoMetadata +from google.cloud.bigtable.data.execute_query.metadata import Metadata from google.cloud.bigtable.data.execute_query._reader import ( _QueryResultRowReader, _Reader, ) from google.cloud.bigtable_v2.types.bigtable import ( ExecuteQueryRequest as ExecuteQueryRequestPB, + ExecuteQueryResponse, ) from google.cloud.bigtable.data._cross_sync import CrossSync @@ -53,6 +57,14 @@ ) +def _has_resume_token(response: ExecuteQueryResponse) -> bool: + response_pb = response._pb # proto-plus attribute retrieval is slow. + if response_pb.HasField("results"): + results = response_pb.results + return len(results.resume_token) > 0 + return False + + @CrossSync.convert_class(sync_name="ExecuteQueryIterator") class ExecuteQueryIteratorAsync: @CrossSync.convert( @@ -70,6 +82,7 @@ def __init__( instance_id: str, app_profile_id: Optional[str], request_body: Dict[str, Any], + prepare_metadata: Metadata, attempt_timeout: float | None, operation_timeout: float, req_metadata: Sequence[Tuple[str, str]] = (), @@ -78,6 +91,9 @@ def __init__( """ Collects responses from ExecuteQuery requests and parses them into QueryResultRows. + **Please Note** this is not meant to be constructed directly by applications. It should always + be created via the client. The constructor is subject to change. + It is **not thread-safe**. It should not be used by multiple {TASK_OR_THREAD}. Args: @@ -93,13 +109,17 @@ def __init__( retryable_excs: a list of errors that will be retried if encountered. Raises: {NO_LOOP} + :class:`ValueError ` as a safeguard if data is processed in an unexpected state """ self._table_name = None self._app_profile_id = app_profile_id self._client = client self._instance_id = instance_id - self._byte_cursor = _ByteCursor[ProtoMetadata]() - self._reader: _Reader[QueryResultRow] = _QueryResultRowReader(self._byte_cursor) + self._prepare_metadata = prepare_metadata + self._final_metadata = None + self._byte_cursor = _ByteCursor() + self._reader: _Reader[QueryResultRow] = _QueryResultRowReader() + self.has_received_token = False self._result_generator = self._next_impl() self._register_instance_task = None self._is_closed = False @@ -118,7 +138,7 @@ def __init__( try: self._register_instance_task = CrossSync.create_task( self._client._register_instance, - instance_id, + self._instance_id, self, sync_executor=self._client._executor, ) @@ -161,31 +181,28 @@ async def _make_request_with_resume_token(self): retry=None, ) - @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) - async def _fetch_metadata(self) -> None: - """ - If called before the first response was recieved, the first response - is retrieved as part of this call. - """ - if self._byte_cursor.metadata is None: - metadata_msg = await self._stream.__anext__() - self._byte_cursor.consume_metadata(metadata_msg) - @CrossSync.convert async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]: """ Generator wrapping the response stream which parses the stream results and returns full `QueryResultRow`s. """ - await self._fetch_metadata() - async for response in self._stream: try: - bytes_to_parse = self._byte_cursor.consume(response) - if bytes_to_parse is None: - continue + # we've received a resume token, so we can finalize the metadata + if self._final_metadata is None and _has_resume_token(response): + self._finalize_metadata() - results = self._reader.consume(bytes_to_parse) + batches_to_parse = self._byte_cursor.consume(response) + if not batches_to_parse: + continue + # metadata must be set at this point since there must be a resume_token + # for byte_cursor to yield data + if not self.metadata: + raise ValueError( + "Error parsing response before finalizing metadata" + ) + results = self._reader.consume(batches_to_parse, self.metadata) if results is None: continue @@ -196,10 +213,19 @@ async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]: for result in results: yield result + # this means the stream has finished with no responses. In that case we know the + # latest_prepare_reponses was used successfully so we can finalize the metadata + if self._final_metadata is None: + self._finalize_metadata() await self.close() @CrossSync.convert(sync_name="__next__", replace_symbols={"__anext__": "__next__"}) async def __anext__(self) -> QueryResultRow: + """ + Yields QueryResultRows representing the results of the query. + + :raises: :class:`ValueError ` as a safeguard if data is processed in an unexpected state + """ if self._is_closed: raise CrossSync.StopIteration return await self._result_generator.__anext__() @@ -209,28 +235,56 @@ def __aiter__(self): return self @CrossSync.convert - async def metadata(self) -> Optional[Metadata]: + def _finalize_metadata(self) -> None: """ - Returns query metadata from the server or None if the iterator was - explicitly closed. + Sets _final_metadata to the metadata of the latest prepare_response. + The iterator should call this after either the first resume token is received or the + stream completes succesfully with no responses. + + This can't be set on init because the metadata will be able to change due to plan refresh. + Plan refresh isn't implemented yet, but we want functionality to stay the same when it is. + + For example the following scenario for query "SELECT * FROM table": + - Make a request, table has one column family 'cf' + - Return an incomplete batch + - request fails with transient error + - Meanwhile the table has had a second column family added 'cf2' + - Retry the request, get an error indicating the `prepared_query` has expired + - Refresh the prepared_query and retry the request, the new prepared_query + contains both 'cf' & 'cf2' + - It sends a new incomplete batch and resets the old outdated batch + - It send the next chunk with a checksum and resume_token, closing the batch. + In this we need to use the updated schema from the refreshed prepare request. """ - if self._is_closed: - return None - # Metadata should be present in the first response in a stream. - if self._byte_cursor.metadata is None: - try: - await self._fetch_metadata() - except CrossSync.StopIteration: - return None - return self._byte_cursor.metadata + self._final_metadata = self._prepare_metadata + + @property + def metadata(self) -> Metadata: + """ + Returns query metadata from the server or None if the iterator has been closed + or if metadata has not been set yet. + + Metadata will not be set until the first row has been yielded or response with no rows + completes. + + raises: :class:`EarlyMetadataCallError` when called before the first row has been returned + or the iterator has completed with no rows in the response. + """ + if not self._final_metadata: + raise EarlyMetadataCallError() + return self._final_metadata @CrossSync.convert async def close(self) -> None: """ Cancel all background tasks. Should be called all rows were processed. + + :raises: :class:`ValueError ` if called in an invalid state """ if self._is_closed: return + if not self._byte_cursor.empty(): + raise ValueError("Unexpected buffered data at end of executeQuery reqest") self._is_closed = True if self._register_instance_task is not None: self._register_instance_task.cancel() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py index 60f23f54127e..16eacbe9b81d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_byte_cursor.py @@ -12,24 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Generic, Optional, TypeVar +from typing import List, Optional +from google.cloud.bigtable.data.execute_query._checksum import _CRC32C from google.cloud.bigtable_v2 import ExecuteQueryResponse -from google.cloud.bigtable.data.execute_query.metadata import ( - Metadata, - _pb_metadata_to_metadata_types, -) -MT = TypeVar("MT", bound=Metadata) # metadata type - -class _ByteCursor(Generic[MT]): +class _ByteCursor: """ Buffers bytes from `ExecuteQuery` responses until resume_token is received or end-of-stream is reached. :class:`google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse` obtained from - the server should be passed to ``consume`` or ``consume_metadata`` methods and its non-None - results should be passed to appropriate - :class:`google.cloud.bigtable.execute_query_reader._Reader` for parsing gathered bytes. + the server should be passed to the ``consume`` method and its non-None results should be passed + to appropriate :class:`google.cloud.bigtable.execute_query_reader._Reader` for parsing gathered + bytes. This class consumes data obtained externally to be usable in both sync and async clients. @@ -37,19 +32,13 @@ class _ByteCursor(Generic[MT]): """ def __init__(self): - self._metadata: Optional[MT] = None - self._buffer = bytearray() + self._batch_buffer = bytearray() + self._batches: List[bytes] = [] self._resume_token = None - self._last_response_results_field = None - @property - def metadata(self) -> Optional[MT]: - """ - Returns: - Metadata or None: Metadata read from the first response of the stream - or None if no response was consumed yet. - """ - return self._metadata + def reset(self): + self._batch_buffer = bytearray() + self._batches = [] def prepare_for_new_request(self): """ @@ -67,40 +56,15 @@ def prepare_for_new_request(self): Returns: bytes: Last received resume_token. """ - self._buffer = bytearray() - # metadata is sent in the first response in a stream, - # if we've already received one, but it was not already commited - # by a subsequent resume_token, then we should clear it as well. - if not self._resume_token: - self._metadata = None - + # The first response of any retried stream will always contain reset, so + # this isn't actually necessary, but we do it for safety + self.reset() return self._resume_token - def consume_metadata(self, response: ExecuteQueryResponse) -> None: - """ - Reads metadata from first response of ``ExecuteQuery`` responses stream. - Should be called only once. - - Args: - response (google.cloud.bigtable_v2.types.bigtable.ExecuteQueryResponse): First response - from the stream. - - Raises: - ValueError: If this method was already called or if metadata received from the server - cannot be parsed. - """ - if self._metadata is not None: - raise ValueError("Invalid state - metadata already consumed") - - if "metadata" in response: - metadata: Any = _pb_metadata_to_metadata_types(response.metadata) - self._metadata = metadata - else: - raise ValueError("Invalid parameter - response without metadata") - - return None + def empty(self) -> bool: + return not self._batch_buffer and not self._batches - def consume(self, response: ExecuteQueryResponse) -> Optional[bytes]: + def consume(self, response: ExecuteQueryResponse) -> Optional[List[bytes]]: """ Reads results bytes from an ``ExecuteQuery`` response and adds them to a buffer. @@ -116,7 +80,8 @@ def consume(self, response: ExecuteQueryResponse) -> Optional[bytes]: Response obtained from the stream. Returns: - bytes or None: bytes if buffers were flushed or None otherwise. + bytes or None: List of bytes if buffers were flushed or None otherwise. + Each element in the list represents the bytes of a `ProtoRows` message. Raises: ValueError: If provided ``ExecuteQueryResponse`` is not valid @@ -127,18 +92,32 @@ def consume(self, response: ExecuteQueryResponse) -> Optional[bytes]: if response_pb.HasField("results"): results = response_pb.results + if results.reset: + self.reset() if results.HasField("proto_rows_batch"): - self._buffer.extend(results.proto_rows_batch.batch_data) + self._batch_buffer.extend(results.proto_rows_batch.batch_data) + # Note that 0 is a valid checksum so we must check for field presence + if results.HasField("batch_checksum"): + expected_checksum = results.batch_checksum + checksum = _CRC32C.checksum(self._batch_buffer) + if expected_checksum != checksum: + raise ValueError( + f"Unexpected checksum mismatch. Expected: {expected_checksum}, got: {checksum}" + ) + # We have a complete batch so we move it to batches and reset the + # batch_buffer + self._batches.append(memoryview(self._batch_buffer)) + self._batch_buffer = bytearray() if results.resume_token: self._resume_token = results.resume_token - if self._buffer: - return_value = memoryview(self._buffer) - self._buffer = bytearray() + if self._batches: + if self._batch_buffer: + raise ValueError("Unexpected resume_token without checksum") + return_value = self._batches + self._batches = [] return return_value - elif response_pb.HasField("metadata"): - self.consume_metadata(response) else: - raise ValueError(f"Invalid ExecuteQueryResponse: {response}") + raise ValueError(f"Unexpected ExecuteQueryResponse: {response}") return None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_checksum.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_checksum.py new file mode 100644 index 000000000000..b45a164d5835 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_checksum.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings + +with warnings.catch_warnings(record=True) as import_warning: + import google_crc32c # type: ignore + + +class _CRC32C(object): + """ + Wrapper around ``google_crc32c`` library + """ + + warn_emitted = False + + @classmethod + def checksum(cls, val: bytearray) -> int: + """ + Returns the crc32c checksum of the data. + """ + if import_warning and not cls.warn_emitted: + cls.warn_emitted = True + warnings.warn( + "Using pure python implementation of `google-crc32` for ExecuteQuery response " + "validation. This is significantly slower than the c extension. If possible, " + "run in an environment that supports the c extension.", + RuntimeWarning, + ) + memory_view = memoryview(val) + return google_crc32c.value(bytes(memory_view)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py index eadda21f4a44..ed7e946e8455 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_parameters_formatting.py @@ -20,12 +20,13 @@ from google.cloud.bigtable.data.exceptions import ParameterTypeInferenceFailed from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.execute_query.values import ExecuteQueryValueType +from google.cloud.bigtable_v2.types.data import Value def _format_execute_query_params( params: Optional[Dict[str, ExecuteQueryValueType]], parameter_types: Optional[Dict[str, SqlType.Type]], -) -> Any: +) -> Dict[str, Value]: """ Takes a dictionary of param_name -> param_value and optionally parameter types. If the parameters types are not provided, this function tries to infer them. @@ -70,6 +71,31 @@ def _format_execute_query_params( return result_values +def _to_param_types( + params: Optional[Dict[str, ExecuteQueryValueType]], + param_types: Optional[Dict[str, SqlType.Type]], +) -> Dict[str, Dict[str, Any]]: + """ + Takes the params and user supplied types and creates a param_type dict for the PrepareQuery api + + Args: + params: Dict of param name to param value + param_types: Dict of param name to param type for params with types that cannot be inferred + + Returns: + Dict containing the param name and type for each parameter + """ + if params is None: + return {} + formatted_types = {} + for param_key, param_value in params.items(): + if param_types and param_key in param_types: + formatted_types[param_key] = param_types[param_key]._to_type_pb_dict() + else: + formatted_types[param_key] = _detect_type(param_value)._to_type_pb_dict() + return formatted_types + + def _convert_value_to_pb_value_dict( value: ExecuteQueryValueType, param_type: SqlType.Type ) -> Any: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py index 9c0259cde638..d9507fe350ca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py @@ -13,18 +13,16 @@ # limitations under the License. from typing import ( + List, TypeVar, Generic, Iterable, Optional, - List, Sequence, - cast, ) from abc import ABC, abstractmethod from google.cloud.bigtable_v2 import ProtoRows, Value as PBValue -from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor from google.cloud.bigtable.data.execute_query._query_result_parsing_utils import ( _parse_pb_value_to_python_value, @@ -33,7 +31,7 @@ from google.cloud.bigtable.helpers import batched from google.cloud.bigtable.data.execute_query.values import QueryResultRow -from google.cloud.bigtable.data.execute_query.metadata import ProtoMetadata +from google.cloud.bigtable.data.execute_query.metadata import Metadata T = TypeVar("T") @@ -55,15 +53,17 @@ class _Reader(ABC, Generic[T]): """ @abstractmethod - def consume(self, bytes_to_consume: bytes) -> Optional[Iterable[T]]: - """This method receives a parsable chunk of bytes and returns either a None if there is - not enough chunks to return to the user yet (e.g. we haven't received all columns in a - row yet), or a list of appropriate values gathered from one or more parsable chunks. - + def consume( + self, batches_to_consume: List[bytes], metadata: Metadata + ) -> Optional[Iterable[T]]: + """This method receives a list of batches of bytes to be parsed as ProtoRows messages. + It then uses the metadata to group the values in the parsed messages into rows. Returns + None if batches_to_consume is empty Args: - bytes_to_consume (bytes): chunk of parsable bytes received from + bytes_to_consume (bytes): chunk of parsable byte batches received from :meth:`google.cloud.bigtable.byte_cursor._ByteCursor.consume` method. + metadata: metadata used to transform values to rows Returns: Iterable[T] or None: Iterable if gathered values can form one or more instances of T, @@ -84,28 +84,14 @@ class _QueryResultRowReader(_Reader[QueryResultRow]): :class:`google.cloud.bigtable.byte_cursor._ByteCursor` passed in the constructor. """ - def __init__(self, byte_cursor: _ByteCursor[ProtoMetadata]): - """ - Constructs new instance of ``_QueryResultRowReader``. - - Args: - byte_cursor (google.cloud.bigtable.byte_cursor._ByteCursor): - byte_cursor that will be used to gather bytes for this instance of ``_Reader``, - needed to obtain :class:`google.cloud.bigtable.execute_query.Metadata` about - processed stream. - """ - self._values: List[PBValue] = [] - self._byte_cursor = byte_cursor - - @property - def _metadata(self) -> Optional[ProtoMetadata]: - return self._byte_cursor.metadata + def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]: + proto_rows = ProtoRows.pb().FromString(bytes_to_parse) + return proto_rows.values - def _construct_query_result_row(self, values: Sequence[PBValue]) -> QueryResultRow: + def _construct_query_result_row( + self, values: Sequence[PBValue], metadata: Metadata + ) -> QueryResultRow: result = QueryResultRow() - # The logic, not defined by mypy types, ensures that the value of - # "metadata" is never null at the time it is retrieved here - metadata = cast(ProtoMetadata, self._metadata) columns = metadata.columns assert len(values) == len( @@ -117,33 +103,20 @@ def _construct_query_result_row(self, values: Sequence[PBValue]) -> QueryResultR result.add_field(column.column_name, parsed_value) return result - def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]: - proto_rows = ProtoRows.pb().FromString(bytes_to_parse) - return proto_rows.values - - def consume(self, bytes_to_consume: bytes) -> Optional[Iterable[QueryResultRow]]: - if bytes_to_consume is None: - raise ValueError("bytes_to_consume shouldn't be None") - - self._values.extend(self._parse_proto_rows(bytes_to_consume)) - - # The logic, not defined by mypy types, ensures that the value of - # "metadata" is never null at the time it is retrieved here - num_columns = len(cast(ProtoMetadata, self._metadata).columns) - - if len(self._values) < num_columns: - return None - + def consume( + self, batches_to_consume: List[bytes], metadata: Metadata + ) -> Optional[Iterable[QueryResultRow]]: + num_columns = len(metadata.columns) rows = [] - for batch in batched(self._values, n=num_columns): - if len(batch) == num_columns: - rows.append(self._construct_query_result_row(batch)) - else: - raise ValueError( - "Server error, recieved bad number of values. " - f"Expected {num_columns} got {len(batch)}." - ) - - self._values = [] + for batch_bytes in batches_to_consume: + values = self._parse_proto_rows(batch_bytes) + for row_data in batched(values, n=num_columns): + if len(row_data) == num_columns: + rows.append(self._construct_query_result_row(row_data, metadata)) + else: + raise ValueError( + "Unexpected error, recieved bad number of values. " + f"Expected {num_columns} got {len(row_data)}." + ) return rows diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py index 854148ff3567..9c2d1c6d8ee8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py @@ -23,15 +23,19 @@ _attempt_timeout_generator, _retry_exception_factory, ) -from google.cloud.bigtable.data.exceptions import InvalidExecuteQueryResponse +from google.cloud.bigtable.data.exceptions import ( + EarlyMetadataCallError, + InvalidExecuteQueryResponse, +) from google.cloud.bigtable.data.execute_query.values import QueryResultRow -from google.cloud.bigtable.data.execute_query.metadata import Metadata, ProtoMetadata +from google.cloud.bigtable.data.execute_query.metadata import Metadata from google.cloud.bigtable.data.execute_query._reader import ( _QueryResultRowReader, _Reader, ) from google.cloud.bigtable_v2.types.bigtable import ( ExecuteQueryRequest as ExecuteQueryRequestPB, + ExecuteQueryResponse, ) from google.cloud.bigtable.data._cross_sync import CrossSync @@ -39,6 +43,14 @@ from google.cloud.bigtable.data import BigtableDataClient as DataClientType +def _has_resume_token(response: ExecuteQueryResponse) -> bool: + response_pb = response._pb + if response_pb.HasField("results"): + results = response_pb.results + return len(results.resume_token) > 0 + return False + + class ExecuteQueryIterator: def __init__( self, @@ -46,6 +58,7 @@ def __init__( instance_id: str, app_profile_id: Optional[str], request_body: Dict[str, Any], + prepare_metadata: Metadata, attempt_timeout: float | None, operation_timeout: float, req_metadata: Sequence[Tuple[str, str]] = (), @@ -53,6 +66,9 @@ def __init__( ) -> None: """Collects responses from ExecuteQuery requests and parses them into QueryResultRows. + **Please Note** this is not meant to be constructed directly by applications. It should always + be created via the client. The constructor is subject to change. + It is **not thread-safe**. It should not be used by multiple threads. Args: @@ -67,13 +83,18 @@ def __init__( req_metadata: metadata used while sending the gRPC request retryable_excs: a list of errors that will be retried if encountered. Raises: - None""" + None + :class:`ValueError ` as a safeguard if data is processed in an unexpected state + """ self._table_name = None self._app_profile_id = app_profile_id self._client = client self._instance_id = instance_id - self._byte_cursor = _ByteCursor[ProtoMetadata]() - self._reader: _Reader[QueryResultRow] = _QueryResultRowReader(self._byte_cursor) + self._prepare_metadata = prepare_metadata + self._final_metadata = None + self._byte_cursor = _ByteCursor() + self._reader: _Reader[QueryResultRow] = _QueryResultRowReader() + self.has_received_token = False self._result_generator = self._next_impl() self._register_instance_task = None self._is_closed = False @@ -92,7 +113,7 @@ def __init__( try: self._register_instance_task = CrossSync._Sync_Impl.create_task( self._client._register_instance, - instance_id, + self._instance_id, self, sync_executor=self._client._executor, ) @@ -129,23 +150,21 @@ def _make_request_with_resume_token(self): retry=None, ) - def _fetch_metadata(self) -> None: - """If called before the first response was recieved, the first response - is retrieved as part of this call.""" - if self._byte_cursor.metadata is None: - metadata_msg = self._stream.__next__() - self._byte_cursor.consume_metadata(metadata_msg) - def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]: """Generator wrapping the response stream which parses the stream results and returns full `QueryResultRow`s.""" - self._fetch_metadata() for response in self._stream: try: - bytes_to_parse = self._byte_cursor.consume(response) - if bytes_to_parse is None: + if self._final_metadata is None and _has_resume_token(response): + self._finalize_metadata() + batches_to_parse = self._byte_cursor.consume(response) + if not batches_to_parse: continue - results = self._reader.consume(bytes_to_parse) + if not self.metadata: + raise ValueError( + "Error parsing response before finalizing metadata" + ) + results = self._reader.consume(batches_to_parse, self.metadata) if results is None: continue except ValueError as e: @@ -154,9 +173,15 @@ def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]: ) from e for result in results: yield result + if self._final_metadata is None: + self._finalize_metadata() self.close() def __next__(self) -> QueryResultRow: + """Yields QueryResultRows representing the results of the query. + + :raises: :class:`ValueError ` as a safeguard if data is processed in an unexpected state + """ if self._is_closed: raise CrossSync._Sync_Impl.StopIteration return self._result_generator.__next__() @@ -164,22 +189,50 @@ def __next__(self) -> QueryResultRow: def __iter__(self): return self - def metadata(self) -> Optional[Metadata]: - """Returns query metadata from the server or None if the iterator was - explicitly closed.""" - if self._is_closed: - return None - if self._byte_cursor.metadata is None: - try: - self._fetch_metadata() - except CrossSync._Sync_Impl.StopIteration: - return None - return self._byte_cursor.metadata + def _finalize_metadata(self) -> None: + """Sets _final_metadata to the metadata of the latest prepare_response. + The iterator should call this after either the first resume token is received or the + stream completes succesfully with no responses. + + This can't be set on init because the metadata will be able to change due to plan refresh. + Plan refresh isn't implemented yet, but we want functionality to stay the same when it is. + + For example the following scenario for query "SELECT * FROM table": + - Make a request, table has one column family 'cf' + - Return an incomplete batch + - request fails with transient error + - Meanwhile the table has had a second column family added 'cf2' + - Retry the request, get an error indicating the `prepared_query` has expired + - Refresh the prepared_query and retry the request, the new prepared_query + contains both 'cf' & 'cf2' + - It sends a new incomplete batch and resets the old outdated batch + - It send the next chunk with a checksum and resume_token, closing the batch. + In this we need to use the updated schema from the refreshed prepare request.""" + self._final_metadata = self._prepare_metadata + + @property + def metadata(self) -> Metadata: + """Returns query metadata from the server or None if the iterator has been closed + or if metadata has not been set yet. + + Metadata will not be set until the first row has been yielded or response with no rows + completes. + + raises: :class:`EarlyMetadataCallError` when called before the first row has been returned + or the iterator has completed with no rows in the response.""" + if not self._final_metadata: + raise EarlyMetadataCallError() + return self._final_metadata def close(self) -> None: - """Cancel all background tasks. Should be called all rows were processed.""" + """Cancel all background tasks. Should be called all rows were processed. + + :raises: :class:`ValueError ` if called in an invalid state + """ if self._is_closed: return + if not self._byte_cursor.empty(): + raise ValueError("Unexpected buffered data at end of executeQuery reqest") self._is_closed = True if self._register_instance_task is not None: self._register_instance_task.cancel() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py index bb29588d0e4b..40ef60bc975e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py @@ -298,14 +298,6 @@ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: class Metadata: - """ - Base class for metadata returned by the ExecuteQuery operation. - """ - - pass - - -class ProtoMetadata(Metadata): """ Metadata class for the ExecuteQuery operation. @@ -335,7 +327,7 @@ def columns(self) -> List[Column]: def __init__( self, columns: Optional[List[Tuple[Optional[str], SqlType.Type]]] = None ): - self._columns: List[ProtoMetadata.Column] = [] + self._columns: List[Metadata.Column] = [] self._column_indexes: Dict[str, List[int]] = defaultdict(list) self._duplicate_names: Set[str] = set() @@ -345,7 +337,7 @@ def __init__( if column_name in self._column_indexes: self._duplicate_names.add(column_name) self._column_indexes[column_name].append(len(self._columns)) - self._columns.append(ProtoMetadata.Column(column_name, column_type)) + self._columns.append(Metadata.Column(column_name, column_type)) def __getitem__(self, index_or_name: Union[str, int]) -> Column: if isinstance(index_or_name, str): @@ -381,7 +373,7 @@ def _pb_metadata_to_metadata_types( fields.append( (column_metadata.name, _pb_type_to_metadata_type(column_metadata.type)) ) - return ProtoMetadata(fields) + return Metadata(fields) raise ValueError("Invalid ResultSetMetadata object received.") diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 2e51249e5e5b..7e89af11b3cf 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -44,6 +44,7 @@ "proto-plus >= 1.22.3, <2.0.0", "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'", "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "google-crc32c>=1.5.0, <2.0.0dev", ] extras = {"libcst": "libcst >= 0.2.5"} diff --git a/packages/google-cloud-bigtable/tests/_testing.py b/packages/google-cloud-bigtable/tests/_testing.py deleted file mode 100644 index 81cce7b78454..000000000000 --- a/packages/google-cloud-bigtable/tests/_testing.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue - - -TYPE_INT = { - "int64_type": { - "encoding": {"big_endian_bytes": {"bytes_type": {"encoding": {"raw": {}}}}} - } -} - - -def proto_rows_bytes(*args): - return ProtoRows.serialize(ProtoRows(values=[PBValue(**arg) for arg in args])) - - -def split_bytes_into_chunks(bytes_to_split, num_chunks): - from google.cloud.bigtable.helpers import batched - - assert num_chunks <= len(bytes_to_split) - bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 - result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) - assert len(result) == num_chunks - return result diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index d10c71d78a71..53e97acc16f4 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -1050,6 +1050,10 @@ async def test_literal_value_filter( expect_match ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) @CrossSync.pytest @pytest.mark.usefixtures("client") @CrossSync.Retry( @@ -1063,6 +1067,44 @@ async def test_execute_query_simple(self, client, table_id, instance_id): assert row["a"] == 1 assert row["b"] == "foo" + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) + @CrossSync.pytest + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_execute_against_table( + self, client, instance_id, table_id, temp_rows + ): + await temp_rows.add_row(b"row_key_1") + result = await client.execute_query( + "SELECT * FROM `" + table_id + "`", instance_id + ) + rows = [r async for r in result] + + assert len(rows) == 1 + assert rows[0]["_key"] == b"row_key_1" + family_map = rows[0][TEST_FAMILY] + assert len(family_map) == 1 + assert family_map[b"q"] == b"test-value" + assert len(rows[0][TEST_FAMILY_2]) == 0 + md = result.metadata + assert len(md) == 3 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) @CrossSync.pytest @pytest.mark.usefixtures("client") @CrossSync.Retry( @@ -1105,8 +1147,14 @@ async def test_execute_query_params(self, client, table_id, instance_id): ], } param_types = { + "stringParam": SqlType.String(), + "bytesParam": SqlType.Bytes(), + "int64Param": SqlType.Int64(), "float32Param": SqlType.Float32(), "float64Param": SqlType.Float64(), + "boolParam": SqlType.Bool(), + "tsParam": SqlType.Timestamp(), + "dateParam": SqlType.Date(), "byteArrayParam": SqlType.Array(SqlType.Bytes()), "stringArrayParam": SqlType.Array(SqlType.String()), "intArrayParam": SqlType.Array(SqlType.Int64()), @@ -1116,6 +1164,7 @@ async def test_execute_query_params(self, client, table_id, instance_id): "tsArrayParam": SqlType.Array(SqlType.Timestamp()), "dateArrayParam": SqlType.Array(SqlType.Date()), } + result = await client.execute_query( query, instance_id, parameters=parameters, parameter_types=param_types ) @@ -1142,3 +1191,32 @@ async def test_execute_query_params(self, client, table_id, instance_id): date_pb2.Date(year=2025, month=1, day=17), None, ] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) + @CrossSync.pytest + @pytest.mark.usefixtures("table") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_execute_metadata_on_empty_response( + self, client, instance_id, table_id, temp_rows + ): + await temp_rows.add_row(b"row_key_1") + result = await client.execute_query( + "SELECT * FROM `" + table_id + "` WHERE _key='non-existent'", instance_id + ) + rows = [r async for r in result] + + assert len(rows) == 0 + md = result.metadata + assert len(md) == 3 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index 18d65b21c5a0..ede24be76fe6 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -857,6 +857,9 @@ def test_literal_value_filter( expect_match ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) @pytest.mark.usefixtures("client") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 @@ -869,6 +872,36 @@ def test_execute_query_simple(self, client, table_id, instance_id): assert row["a"] == 1 assert row["b"] == "foo" + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_against_table(self, client, instance_id, table_id, temp_rows): + temp_rows.add_row(b"row_key_1") + result = client.execute_query("SELECT * FROM `" + table_id + "`", instance_id) + rows = [r for r in result] + assert len(rows) == 1 + assert rows[0]["_key"] == b"row_key_1" + family_map = rows[0][TEST_FAMILY] + assert len(family_map) == 1 + assert family_map[b"q"] == b"test-value" + assert len(rows[0][TEST_FAMILY_2]) == 0 + md = result.metadata + assert len(md) == 3 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) @pytest.mark.usefixtures("client") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 @@ -902,8 +935,14 @@ def test_execute_query_params(self, client, table_id, instance_id): ], } param_types = { + "stringParam": SqlType.String(), + "bytesParam": SqlType.Bytes(), + "int64Param": SqlType.Int64(), "float32Param": SqlType.Float32(), "float64Param": SqlType.Float64(), + "boolParam": SqlType.Bool(), + "tsParam": SqlType.Timestamp(), + "dateParam": SqlType.Date(), "byteArrayParam": SqlType.Array(SqlType.Bytes()), "stringArrayParam": SqlType.Array(SqlType.String()), "intArrayParam": SqlType.Array(SqlType.Int64()), @@ -939,3 +978,29 @@ def test_execute_query_params(self, client, table_id, instance_id): date_pb2.Date(year=2025, month=1, day=17), None, ] + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + @pytest.mark.usefixtures("table") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_execute_metadata_on_empty_response( + self, client, instance_id, table_id, temp_rows + ): + temp_rows.add_row(b"row_key_1") + result = client.execute_query( + "SELECT * FROM `" + table_id + "` WHERE _key='non-existent'", instance_id + ) + rows = [r for r in result] + assert len(rows) == 0 + md = result.metadata + assert len(md) == 3 + assert md["_key"].column_type == SqlType.Bytes() + assert md[TEST_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) + assert md[TEST_FAMILY_2].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Bytes() + ) diff --git a/packages/google-cloud-bigtable/tests/unit/_testing.py b/packages/google-cloud-bigtable/tests/unit/_testing.py deleted file mode 100644 index e0d8d2a22166..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/_testing.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa -from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index d59a8618750c..96fcf66b3fc5 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -35,6 +35,17 @@ from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse from google.cloud.bigtable.data._cross_sync import CrossSync +from tests.unit.data.execute_query.sql_helpers import ( + chunked_responses, + column, + int64_type, + int_val, + metadata, + null_val, + prepare_response, + str_type, + str_val, +) if CrossSync.is_async: from google.api_core import grpc_helpers_async @@ -3019,10 +3030,31 @@ class TestExecuteQueryAsync: TABLE_NAME = "TABLE_NAME" INSTANCE_NAME = "INSTANCE_NAME" + @pytest.fixture(scope="function") @CrossSync.convert - def _make_client(self, *args, **kwargs): + def client(self, *args, **kwargs): return CrossSync.TestBigtableDataClient._make_client(*args, **kwargs) + @pytest.fixture(scope="function") + @CrossSync.convert + def execute_query_mock(self, client): + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync.Mock() + ) as execute_query_mock: + yield execute_query_mock + + @pytest.fixture(scope="function") + @CrossSync.convert + def prepare_mock(self, client): + with mock.patch.object( + client._gapic_client, "prepare_query", CrossSync.Mock() + ) as prepare_mock: + prepare_mock.return_value = prepare_response( + prepared_query=b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ) + yield prepare_mock + @CrossSync.convert def _make_gapic_stream(self, sample_list: list["ExecuteQueryResponse" | Exception]): class MockStream: @@ -3048,201 +3080,125 @@ async def __anext__(self): return MockStream(sample_list) - def resonse_with_metadata(self): - from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse - - schema = {"a": "string_type", "b": "int64_type"} - return ExecuteQueryResponse( - { - "metadata": { - "proto_schema": { - "columns": [ - {"name": name, "type_": {_type: {}}} - for name, _type in schema.items() - ] - } - } - } - ) - - def resonse_with_result(self, *args, resume_token=None): - from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue - from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse - - if resume_token is None: - resume_token_dict = {} - else: - resume_token_dict = {"resume_token": resume_token} - - values = [] - for column_value in args: - if column_value is None: - pb_value = PBValue({}) - else: - pb_value = PBValue( - { - "int_value" - if isinstance(column_value, int) - else "string_value": column_value - } - ) - values.append(pb_value) - rows = ProtoRows(values=values) - - return ExecuteQueryResponse( - { - "results": { - "proto_rows_batch": { - "batch_data": ProtoRows.serialize(rows), - }, - **resume_token_dict, - } - } - ) - @CrossSync.pytest - async def test_execute_query(self): + async def test_execute_query(self, client, execute_query_mock, prepare_mock): values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r async for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert results[1]["a"] == "test2" - assert results[1]["b"] == 9 - assert results[2]["a"] == "test3" - assert results[2]["b"] is None - assert execute_query_mock.call_count == 1 + execute_query_mock.return_value = self._make_gapic_stream(values) - @CrossSync.pytest - async def test_execute_query_with_params(self): + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + + @CrossSync.pytest + async def test_execute_query_with_params( + self, client, execute_query_mock, prepare_mock + ): values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", - self.INSTANCE_NAME, - parameters={"b": 9}, - ) - results = [r async for r in result] - assert len(results) == 1 - assert results[0]["a"] == "test2" - assert results[0]["b"] == 9 - assert execute_query_mock.call_count == 1 + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", + self.INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r async for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 @CrossSync.pytest - async def test_execute_query_error_before_metadata(self): + async def test_execute_query_error_before_metadata( + self, client, execute_query_mock, prepare_mock + ): from google.api_core.exceptions import DeadlineExceeded values = [ DeadlineExceeded(""), - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r async for r in result] - assert len(results) == 3 - assert execute_query_mock.call_count == 2 + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 @CrossSync.pytest - async def test_execute_query_error_after_metadata(self): + async def test_execute_query_error_after_metadata( + self, client, execute_query_mock, prepare_mock + ): from google.api_core.exceptions import DeadlineExceeded values = [ - self.resonse_with_metadata(), DeadlineExceeded(""), - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r async for r in result] - assert len(results) == 3 - assert execute_query_mock.call_count == 2 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] @CrossSync.pytest - async def test_execute_query_with_retries(self): + async def test_execute_query_with_retries( + self, client, execute_query_mock, prepare_mock + ): from google.api_core.exceptions import DeadlineExceeded values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), DeadlineExceeded(""), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), DeadlineExceeded(""), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r async for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert results[1]["a"] == "test2" - assert results[1]["b"] == 9 - assert results[2]["a"] == "test3" - assert results[2]["b"] is None - assert len(results) == 3 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [b"r1", b"r2"] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(results) == 3 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"r1", b"r2"] + assert prepare_mock.call_count == 1 @pytest.mark.parametrize( "exception", @@ -3253,53 +3209,29 @@ async def test_execute_query_with_retries(self): ], ) @CrossSync.pytest - async def test_execute_query_retryable_error(self, exception): + async def test_execute_query_retryable_error( + self, client, execute_query_mock, prepare_mock, exception + ): + [res1, res2] = chunked_responses( + 2, str_val("test"), int_val(8), reset=True, token=b"t1" + ) values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test", resume_token=b"t1"), + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), exception, - self.resonse_with_result(8, resume_token=b"t2"), - ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r async for r in result] - assert len(results) == 1 - assert execute_query_mock.call_count == 2 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [b"t1"] - - @CrossSync.pytest - async def test_execute_query_retry_partial_row(self): - values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test", resume_token=b"t1"), - core_exceptions.DeadlineExceeded(""), - self.resonse_with_result(8, resume_token=b"t2"), + *chunked_responses(1, str_val("tes2"), int_val(9), reset=True, token=b"t1"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) + execute_query_mock.return_value = self._make_gapic_stream(values) - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r async for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert execute_query_mock.call_count == 2 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [b"t1"] + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert len(results) == 2 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] @pytest.mark.parametrize( "ExceptionType", @@ -3320,55 +3252,101 @@ async def test_execute_query_retry_partial_row(self): ], ) @CrossSync.pytest - async def test_execute_query_non_retryable(self, ExceptionType): + async def test_execute_query_non_retryable( + self, client, execute_query_mock, prepare_mock, ExceptionType + ): values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), + # Each splits values into chunks across two responses + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), ExceptionType(""), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) + execute_query_mock.return_value = self._make_gapic_stream(values) - result = await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + r = await CrossSync.next(result) + assert r["a"] == "test" + assert r["b"] == 8 + + with pytest.raises(ExceptionType): r = await CrossSync.next(result) - assert r["a"] == "test" - assert r["b"] == 8 - with pytest.raises(ExceptionType): - r = await CrossSync.next(result) + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] - assert execute_query_mock.call_count == 1 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [] + @pytest.mark.parametrize( + "retryable_exception", + [ + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ], + ) + @CrossSync.pytest + async def test_prepare_query_retryable( + self, client, execute_query_mock, prepare_mock, retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r async for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 2 + @pytest.mark.parametrize( + "non_retryable_exception", + [ + (core_exceptions.InvalidArgument), + (core_exceptions.FailedPrecondition), + (core_exceptions.PermissionDenied), + (core_exceptions.MethodNotImplemented), + (core_exceptions.Cancelled), + (core_exceptions.AlreadyExists), + (core_exceptions.OutOfRange), + (core_exceptions.DataLoss), + (core_exceptions.Unauthenticated), + (core_exceptions.NotFound), + (core_exceptions.ResourceExhausted), + (core_exceptions.Unknown), + (core_exceptions.InternalServerError), + ], + ) @CrossSync.pytest - async def test_execute_query_metadata_received_multiple_times_detected(self): + async def test_prepare_query_non_retryable( + self, client, execute_query_mock, prepare_mock, non_retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + non_retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] values = [ - self.resonse_with_metadata(), - self.resonse_with_metadata(), + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - with pytest.raises( - Exception, match="Invalid ExecuteQuery response received" - ): - [ - r - async for r in await client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - ] + execute_query_mock.return_value = self._make_gapic_stream(values) + with pytest.raises(non_retryable_exception): + await client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index c7738128002a..720f0e0b650f 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -32,6 +32,17 @@ from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse from google.cloud.bigtable.data._cross_sync import CrossSync +from tests.unit.data.execute_query.sql_helpers import ( + chunked_responses, + column, + int64_type, + int_val, + metadata, + null_val, + prepare_response, + str_type, + str_val, +) from google.api_core import grpc_helpers CrossSync._Sync_Impl.add_mapping("grpc_helpers", grpc_helpers) @@ -2562,9 +2573,28 @@ class TestExecuteQuery: TABLE_NAME = "TABLE_NAME" INSTANCE_NAME = "INSTANCE_NAME" - def _make_client(self, *args, **kwargs): + @pytest.fixture(scope="function") + def client(self, *args, **kwargs): return CrossSync._Sync_Impl.TestBigtableDataClient._make_client(*args, **kwargs) + @pytest.fixture(scope="function") + def execute_query_mock(self, client): + with mock.patch.object( + client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() + ) as execute_query_mock: + yield execute_query_mock + + @pytest.fixture(scope="function") + def prepare_mock(self, client): + with mock.patch.object( + client._gapic_client, "prepare_query", CrossSync._Sync_Impl.Mock() + ) as prepare_mock: + prepare_mock.return_value = prepare_response( + prepared_query=b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ) + yield prepare_mock + def _make_gapic_stream(self, sample_list: list["ExecuteQueryResponse" | Exception]): class MockStream: def __init__(self, sample_list): @@ -2589,191 +2619,109 @@ def __anext__(self): return MockStream(sample_list) - def resonse_with_metadata(self): - from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse - - schema = {"a": "string_type", "b": "int64_type"} - return ExecuteQueryResponse( - { - "metadata": { - "proto_schema": { - "columns": [ - {"name": name, "type_": {_type: {}}} - for (name, _type) in schema.items() - ] - } - } - } - ) - - def resonse_with_result(self, *args, resume_token=None): - from google.cloud.bigtable_v2.types.data import ProtoRows, Value as PBValue - from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse - - if resume_token is None: - resume_token_dict = {} - else: - resume_token_dict = {"resume_token": resume_token} - values = [] - for column_value in args: - if column_value is None: - pb_value = PBValue({}) - else: - pb_value = PBValue( - { - "int_value" - if isinstance(column_value, int) - else "string_value": column_value - } - ) - values.append(pb_value) - rows = ProtoRows(values=values) - return ExecuteQueryResponse( - { - "results": { - "proto_rows_batch": {"batch_data": ProtoRows.serialize(rows)}, - **resume_token_dict, - } - } - ) - - def test_execute_query(self): - values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), - ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert results[1]["a"] == "test2" - assert results[1]["b"] == 9 - assert results[2]["a"] == "test3" - assert results[2]["b"] is None - assert execute_query_mock.call_count == 1 - - def test_execute_query_with_params(self): + def test_execute_query(self, client, execute_query_mock, prepare_mock): values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", - self.INSTANCE_NAME, - parameters={"b": 9}, - ) - results = [r for r in result] - assert len(results) == 1 - assert results[0]["a"] == "test2" - assert results[0]["b"] == 9 - assert execute_query_mock.call_count == 1 - - def test_execute_query_error_before_metadata(self): + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + + def test_execute_query_with_params(self, client, execute_query_mock, prepare_mock): + values = [*chunked_responses(2, str_val("test2"), int_val(9), token=b"r2")] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME} WHERE b=@b", + self.INSTANCE_NAME, + parameters={"b": 9}, + ) + results = [r for r in result] + assert len(results) == 1 + assert results[0]["a"] == "test2" + assert results[0]["b"] == 9 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + + def test_execute_query_error_before_metadata( + self, client, execute_query_mock, prepare_mock + ): from google.api_core.exceptions import DeadlineExceeded values = [ DeadlineExceeded(""), - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r for r in result] - assert len(results) == 3 - assert execute_query_mock.call_count == 2 + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 - def test_execute_query_error_after_metadata(self): + def test_execute_query_error_after_metadata( + self, client, execute_query_mock, prepare_mock + ): from google.api_core.exceptions import DeadlineExceeded values = [ - self.resonse_with_metadata(), DeadlineExceeded(""), - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r for r in result] - assert len(results) == 3 - assert execute_query_mock.call_count == 2 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [] - - def test_execute_query_with_retries(self): + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 3 + assert execute_query_mock.call_count == 2 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + def test_execute_query_with_retries(self, client, execute_query_mock, prepare_mock): from google.api_core.exceptions import DeadlineExceeded values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), DeadlineExceeded(""), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), DeadlineExceeded(""), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert results[1]["a"] == "test2" - assert results[1]["b"] == 9 - assert results[2]["a"] == "test3" - assert results[2]["b"] is None - assert len(results) == 3 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [b"r1", b"r2"] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert results[1]["a"] == "test2" + assert results[1]["b"] == 9 + assert results[2]["a"] == "test3" + assert results[2]["b"] is None + assert len(results) == 3 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"r1", b"r2"] + assert prepare_mock.call_count == 1 @pytest.mark.parametrize( "exception", @@ -2783,50 +2731,28 @@ def test_execute_query_with_retries(self): core_exceptions.ServiceUnavailable(""), ], ) - def test_execute_query_retryable_error(self, exception): + def test_execute_query_retryable_error( + self, client, execute_query_mock, prepare_mock, exception + ): + [res1, res2] = chunked_responses( + 2, str_val("test"), int_val(8), reset=True, token=b"t1" + ) values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test", resume_token=b"t1"), + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1"), exception, - self.resonse_with_result(8, resume_token=b"t2"), - ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r for r in result] - assert len(results) == 1 - assert execute_query_mock.call_count == 2 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [b"t1"] - - def test_execute_query_retry_partial_row(self): - values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test", resume_token=b"t1"), - core_exceptions.DeadlineExceeded(""), - self.resonse_with_result(8, resume_token=b"t2"), + *chunked_responses(1, str_val("tes2"), int_val(9), reset=True, token=b"t1"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - results = [r for r in result] - assert results[0]["a"] == "test" - assert results[0]["b"] == 8 - assert execute_query_mock.call_count == 2 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [b"t1"] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert len(results) == 2 + assert execute_query_mock.call_count == 2 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [b"t1"] @pytest.mark.parametrize( "ExceptionType", @@ -2846,48 +2772,92 @@ def test_execute_query_retry_partial_row(self): core_exceptions.InternalServerError, ], ) - def test_execute_query_non_retryable(self, ExceptionType): + def test_execute_query_non_retryable( + self, client, execute_query_mock, prepare_mock, ExceptionType + ): values = [ - self.resonse_with_metadata(), - self.resonse_with_result("test"), - self.resonse_with_result(8, resume_token=b"r1"), + *chunked_responses(2, str_val("test"), int_val(8), reset=True, token=b"r1"), ExceptionType(""), - self.resonse_with_result("test2"), - self.resonse_with_result(9, resume_token=b"r2"), - self.resonse_with_result("test3"), - self.resonse_with_result(None, resume_token=b"r3"), + *chunked_responses(2, str_val("test2"), int_val(9), token=b"r2"), + *chunked_responses(2, str_val("test3"), null_val(), token=b"r3"), ] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - result = client.execute_query( + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + r = CrossSync._Sync_Impl.next(result) + assert r["a"] == "test" + assert r["b"] == 8 + with pytest.raises(ExceptionType): + r = CrossSync._Sync_Impl.next(result) + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 1 + requests = [args[0][0] for args in execute_query_mock.call_args_list] + resume_tokens = [r.resume_token for r in requests if r.resume_token] + assert resume_tokens == [] + + @pytest.mark.parametrize( + "retryable_exception", + [core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable], + ) + def test_prepare_query_retryable( + self, client, execute_query_mock, prepare_mock, retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1") + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + result = client.execute_query( + f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME + ) + results = [r for r in result] + assert results[0]["a"] == "test" + assert results[0]["b"] == 8 + assert execute_query_mock.call_count == 1 + assert prepare_mock.call_count == 2 + + @pytest.mark.parametrize( + "non_retryable_exception", + [ + core_exceptions.InvalidArgument, + core_exceptions.FailedPrecondition, + core_exceptions.PermissionDenied, + core_exceptions.MethodNotImplemented, + core_exceptions.Cancelled, + core_exceptions.AlreadyExists, + core_exceptions.OutOfRange, + core_exceptions.DataLoss, + core_exceptions.Unauthenticated, + core_exceptions.NotFound, + core_exceptions.ResourceExhausted, + core_exceptions.Unknown, + core_exceptions.InternalServerError, + ], + ) + def test_prepare_query_non_retryable( + self, client, execute_query_mock, prepare_mock, non_retryable_exception + ): + prepare_mock.reset_mock() + prepare_mock.side_effect = [ + non_retryable_exception("test"), + prepare_response( + b"foo", + metadata=metadata(column("a", str_type()), column("b", int64_type())), + ), + ] + values = [ + *chunked_responses(1, str_val("test"), int_val(8), reset=True, token=b"t1") + ] + execute_query_mock.return_value = self._make_gapic_stream(values) + with pytest.raises(non_retryable_exception): + client.execute_query( f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME ) - r = CrossSync._Sync_Impl.next(result) - assert r["a"] == "test" - assert r["b"] == 8 - with pytest.raises(ExceptionType): - r = CrossSync._Sync_Impl.next(result) - assert execute_query_mock.call_count == 1 - requests = [args[0][0] for args in execute_query_mock.call_args_list] - resume_tokens = [r.resume_token for r in requests if r.resume_token] - assert resume_tokens == [] - - def test_execute_query_metadata_received_multiple_times_detected(self): - values = [self.resonse_with_metadata(), self.resonse_with_metadata()] - client = self._make_client() - with mock.patch.object( - client._gapic_client, "execute_query", CrossSync._Sync_Impl.Mock() - ) as execute_query_mock: - execute_query_mock.return_value = self._make_gapic_stream(values) - with pytest.raises( - Exception, match="Invalid ExecuteQuery response received" - ): - [ - r - for r in client.execute_query( - f"SELECT a, b FROM {self.TABLE_NAME}", self.INSTANCE_NAME - ) - ] diff --git a/packages/google-cloud-bigtable/tests/unit/data/_testing.py b/packages/google-cloud-bigtable/tests/unit/data/_testing.py deleted file mode 100644 index b5dd3f444f66..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/data/_testing.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa -from unittest.mock import Mock -from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py index ea93fed552c5..9823655569c5 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py @@ -12,10 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from google.cloud.bigtable.data import exceptions +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_metadata_to_metadata_types, +) import pytest import concurrent.futures -from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse -from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes +from ..sql_helpers import ( + chunked_responses, + int_val, + column, + metadata, + int64_type, +) from google.cloud.bigtable.data._cross_sync import CrossSync @@ -64,56 +73,10 @@ def _make_one(self, *args, **kwargs): @pytest.fixture def proto_byte_stream(self): - proto_rows = [ - proto_rows_bytes({"int_value": 1}, {"int_value": 2}), - proto_rows_bytes({"int_value": 3}, {"int_value": 4}), - proto_rows_bytes({"int_value": 5}, {"int_value": 6}), - ] - - messages = [ - *split_bytes_into_chunks(proto_rows[0], num_chunks=2), - *split_bytes_into_chunks(proto_rows[1], num_chunks=3), - proto_rows[2], - ] - stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": { - "columns": [ - {"name": "test1", "type_": TYPE_INT}, - {"name": "test2", "type_": TYPE_INT}, - ] - } - } - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[0]}} - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[1]}, - "resume_token": b"token1", - } - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[2]}} - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[3]}} - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[4]}, - "resume_token": b"token2", - } - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[5]}, - "resume_token": b"token3", - } - ), + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + *chunked_responses(1, int_val(5), int_val(6), token=b"token3"), ] return stream @@ -137,6 +100,11 @@ async def test_iterator(self, proto_byte_stream): instance_id="test-instance", app_profile_id="test_profile", request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), attempt_timeout=10, operation_timeout=10, req_metadata=(), @@ -154,7 +122,7 @@ async def test_iterator(self, proto_byte_stream): assert mock_async_iterator.idx == len(proto_byte_stream) @CrossSync.pytest - async def test_iterator_awaits_metadata(self, proto_byte_stream): + async def test_iterator_returns_metadata_after_data(self, proto_byte_stream): client_mock = mock.Mock() client_mock._register_instance = CrossSync.Mock() @@ -171,12 +139,148 @@ async def test_iterator_awaits_metadata(self, proto_byte_stream): instance_id="test-instance", app_profile_id="test_profile", request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + + await CrossSync.next(iterator) + assert len(iterator.metadata) == 2 + + assert mock_async_iterator.idx == 2 + + @CrossSync.pytest + async def test_iterator_throws_error_on_close_w_bufferred_data(self): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + stream = [ + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + # Remove the last response, which has the token. We expect this + # to cause the call to close within _next_impl_ to fail + chunked_responses(2, int_val(5), int_val(6), token=b"token3")[0], + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + i = 0 + async for row in iterator: + i += 1 + if i == 2: + break + with pytest.raises( + ValueError, + match="Unexpected buffered data at end of executeQuery reqest", + ): + await CrossSync.next(iterator) + + @CrossSync.pytest + async def test_iterator_handles_reset(self): + client_mock = mock.Mock() + + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + stream = [ + # Expect this to be dropped by reset + *chunked_responses(2, int_val(1), int_val(2)), + *chunked_responses(3, int_val(3), int_val(4), reset=True), + *chunked_responses(2, int_val(5), int_val(6), reset=False, token=b"token1"), + # Only send first of two responses so that there is no checksum + # expect to be reset + chunked_responses(2, int_val(10), int_val(12))[0], + *chunked_responses(2, int_val(7), int_val(8), token=b"token2"), + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), attempt_timeout=10, operation_timeout=10, req_metadata=(), retryable_excs=[], ) + results = [] + async for value in iterator: + results.append(value) + assert len(results) == 3 + [row1, row2, row3] = results + assert row1["test1"] == 3 + assert row1["test2"] == 4 + assert row2["test1"] == 5 + assert row2["test2"] == 6 + assert row3["test1"] == 7 + assert row3["test2"] == 8 + + @CrossSync.pytest + async def test_iterator_returns_error_if_metadata_requested_too_early( + self, proto_byte_stream + ): + client_mock = mock.Mock() - await iterator.metadata() + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) - assert mock_async_iterator.idx == 1 + with pytest.raises(exceptions.EarlyMetadataCallError): + iterator.metadata diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py index 77a28ea92d1e..d4f3ec26f3c7 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py @@ -15,10 +15,13 @@ # This file is automatically generated by CrossSync. Do not edit manually. +from google.cloud.bigtable.data import exceptions +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_metadata_to_metadata_types, +) import pytest import concurrent.futures -from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse -from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes +from ..sql_helpers import chunked_responses, int_val, column, metadata, int64_type from google.cloud.bigtable.data._cross_sync import CrossSync try: @@ -56,54 +59,10 @@ def _make_one(self, *args, **kwargs): @pytest.fixture def proto_byte_stream(self): - proto_rows = [ - proto_rows_bytes({"int_value": 1}, {"int_value": 2}), - proto_rows_bytes({"int_value": 3}, {"int_value": 4}), - proto_rows_bytes({"int_value": 5}, {"int_value": 6}), - ] - messages = [ - *split_bytes_into_chunks(proto_rows[0], num_chunks=2), - *split_bytes_into_chunks(proto_rows[1], num_chunks=3), - proto_rows[2], - ] stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": { - "columns": [ - {"name": "test1", "type_": TYPE_INT}, - {"name": "test2", "type_": TYPE_INT}, - ] - } - } - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[0]}} - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[1]}, - "resume_token": b"token1", - } - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[2]}} - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[3]}} - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[4]}, - "resume_token": b"token2", - } - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[5]}, - "resume_token": b"token3", - } - ), + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + *chunked_responses(1, int_val(5), int_val(6), token=b"token3"), ] return stream @@ -124,6 +83,11 @@ def test_iterator(self, proto_byte_stream): instance_id="test-instance", app_profile_id="test_profile", request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), attempt_timeout=10, operation_timeout=10, req_metadata=(), @@ -138,7 +102,125 @@ def test_iterator(self, proto_byte_stream): client_mock._remove_instance_registration.assert_called_once() assert mock_async_iterator.idx == len(proto_byte_stream) - def test_iterator_awaits_metadata(self, proto_byte_stream): + def test_iterator_returns_metadata_after_data(self, proto_byte_stream): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + CrossSync._Sync_Impl.next(iterator) + assert len(iterator.metadata) == 2 + assert mock_async_iterator.idx == 2 + + def test_iterator_throws_error_on_close_w_bufferred_data(self): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + stream = [ + *chunked_responses(2, int_val(1), int_val(2), token=b"token1"), + *chunked_responses(3, int_val(3), int_val(4), token=b"token2"), + chunked_responses(2, int_val(5), int_val(6), token=b"token3")[0], + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + i = 0 + for row in iterator: + i += 1 + if i == 2: + break + with pytest.raises( + ValueError, match="Unexpected buffered data at end of executeQuery reqest" + ): + CrossSync._Sync_Impl.next(iterator) + + def test_iterator_handles_reset(self): + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + stream = [ + *chunked_responses(2, int_val(1), int_val(2)), + *chunked_responses(3, int_val(3), int_val(4), reset=True), + *chunked_responses(2, int_val(5), int_val(6), reset=False, token=b"token1"), + chunked_responses(2, int_val(10), int_val(12))[0], + *chunked_responses(2, int_val(7), int_val(8), token=b"token2"), + ] + mock_async_iterator = MockIterator(stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + req_metadata=(), + retryable_excs=[], + ) + results = [] + for value in iterator: + results.append(value) + assert len(results) == 3 + [row1, row2, row3] = results + assert row1["test1"] == 3 + assert row1["test2"] == 4 + assert row2["test1"] == 5 + assert row2["test2"] == 6 + assert row3["test1"] == 7 + assert row3["test2"] == 8 + + def test_iterator_returns_error_if_metadata_requested_too_early( + self, proto_byte_stream + ): client_mock = mock.Mock() client_mock._register_instance = CrossSync._Sync_Impl.Mock() client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() @@ -154,10 +236,15 @@ def test_iterator_awaits_metadata(self, proto_byte_stream): instance_id="test-instance", app_profile_id="test_profile", request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), attempt_timeout=10, operation_timeout=10, req_metadata=(), retryable_excs=[], ) - iterator.metadata() - assert mock_async_iterator.idx == 1 + with pytest.raises(exceptions.EarlyMetadataCallError): + iterator.metadata diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py deleted file mode 100644 index 9d24eee342cb..000000000000 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_testing.py +++ /dev/null @@ -1,17 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# flake8: noqa -from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py new file mode 100644 index 000000000000..5d5569dba5ef --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py @@ -0,0 +1,212 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime, timedelta +from typing import List + +from google.protobuf import timestamp_pb2 + +from google.cloud.bigtable_v2.types.bigtable import ( + ExecuteQueryResponse, + PrepareQueryResponse, +) +from google.cloud.bigtable_v2.types.data import ( + Value, + ProtoRows, + ProtoRowsBatch, + ResultSetMetadata, + ColumnMetadata, +) +from google.cloud.bigtable_v2.types.types import Type +import google_crc32c # type: ignore + + +def checksum(data: bytearray) -> int: + return google_crc32c.value(bytes(memoryview(data))) + + +def split_bytes_into_chunks(bytes_to_split, num_chunks) -> List[bytes]: + from google.cloud.bigtable.helpers import batched + + assert num_chunks <= len(bytes_to_split) + bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 + result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) + assert len(result) == num_chunks + return result + + +def column(name: str, type: Type) -> ColumnMetadata: + c = ColumnMetadata() + c.name = name + c.type_ = type + return c + + +def metadata(*args: ColumnMetadata) -> ResultSetMetadata: + metadata = ResultSetMetadata() + metadata.proto_schema.columns = args + return metadata + + +def prepare_response( + prepared_query: bytes, + metadata: ResultSetMetadata, + valid_until=datetime.now() + timedelta(seconds=10), +) -> PrepareQueryResponse: + res = PrepareQueryResponse() + res.prepared_query = prepared_query + res.metadata = metadata + ts = timestamp_pb2.Timestamp() + ts.FromDatetime(valid_until) + res.valid_until = ts + return res + + +def batch_response( + b: bytes, reset=False, token=None, checksum=None +) -> ExecuteQueryResponse: + res = ExecuteQueryResponse() + res.results.proto_rows_batch.batch_data = b + res.results.reset = reset + res.results.resume_token = token + if checksum: + res.results.batch_checksum = checksum + return res + + +def execute_query_response( + *args: Value, reset=False, token=None, checksum=None +) -> ExecuteQueryResponse: + data = proto_rows_batch(args) + return batch_response(data, reset, token, checksum=checksum) + + +def chunked_responses( + num_chunks: int, + *args: Value, + reset=True, + token=None, +) -> List[ExecuteQueryResponse]: + """ + Creates one ExecuteQuery response per chunk, with the data in args split between chunks. + """ + data_bytes = proto_rows_bytes(*args) + chunks = split_bytes_into_chunks(data_bytes, num_chunks) + responses = [] + for i, chunk in enumerate(chunks): + response = ExecuteQueryResponse() + if i == 0: + response.results.reset = reset + if i == len(chunks) - 1: + response.results.resume_token = token + response.results.batch_checksum = checksum(data_bytes) + response.results.proto_rows_batch.batch_data = chunk + responses.append(response) + return responses + + +def proto_rows_bytes(*args: Value) -> bytes: + rows = ProtoRows() + rows.values = args + return ProtoRows.serialize(rows) + + +def token_only_response(token: bytes) -> ExecuteQueryResponse: + r = ExecuteQueryResponse() + r.results.resume_token = token + return r + + +def proto_rows_batch(*args: Value) -> ProtoRowsBatch: + batch = ProtoRowsBatch() + batch.batch_data = proto_rows_bytes(args) + return batch + + +def str_val(s: str) -> Value: + v = Value() + v.string_value = s + return v + + +def bytes_val(b: bytes) -> Value: + v = Value() + v.bytes_value = b + return v + + +def int_val(i: int) -> Value: + v = Value() + v.int_value = i + return v + + +def null_val() -> Value: + return Value() + + +def str_type() -> Type: + t = Type() + t.string_type = {} + return t + + +def bytes_type() -> Type: + t = Type() + t.bytes_type = {} + return t + + +def int64_type() -> Type: + t = Type() + t.int64_type = {} + return t + + +def float64_type() -> Type: + t = Type() + t.float64_type = {} + return t + + +def float32_type() -> Type: + t = Type() + t.float32_type = {} + return t + + +def bool_type() -> Type: + t = Type() + t.bool_type = {} + return t + + +def ts_type() -> Type: + t = Type() + t.timestamp_type = {} + return t + + +def date_type() -> Type: + t = Type() + t.date_type = {} + return t + + +def array_type(elem_type: Type) -> Type: + t = Type() + arr_type = Type.Array() + arr_type.element_type = elem_type + t.array_type = arr_type + return t diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py index e283e1ca215c..fc764c86cb6f 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_byte_cursor.py @@ -11,11 +11,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import pytest -from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor -from ._testing import TYPE_INT +from .sql_helpers import ( + batch_response, + checksum, + token_only_response, +) def pass_values_to_byte_cursor(byte_cursor, iterable): @@ -29,121 +33,139 @@ class TestByteCursor: def test__proto_rows_batch__complete_data(self): byte_cursor = _ByteCursor() stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} - } - ), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"456"}}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"789"}}), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": b"0"}, - "resume_token": b"token1", - } - ), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"abc"}}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"def"}}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"ghi"}}), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": b"j"}, - "resume_token": b"token2", - } - ), + batch_response(b"123"), + batch_response(b"456"), + batch_response(b"789"), + batch_response(b"0", token=b"token1", checksum=checksum(b"1234567890")), + batch_response(b"abc"), + batch_response(b"def"), + batch_response(b"ghi"), + batch_response(b"j", token=b"token2", checksum=checksum(b"abcdefghij")), ] - assert byte_cursor.metadata is None byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) value = next(byte_cursor_iter) - assert value == b"1234567890" + assert value[0] == b"1234567890" assert byte_cursor._resume_token == b"token1" - assert byte_cursor.metadata.columns[0].column_name == "test1" value = next(byte_cursor_iter) - assert value == b"abcdefghij" + assert value[0] == b"abcdefghij" assert byte_cursor._resume_token == b"token2" def test__proto_rows_batch__empty_proto_rows_batch(self): byte_cursor = _ByteCursor() stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} - } - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {}, "resume_token": b"token1"} - ), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": b"0"}, - "resume_token": b"token2", - } - ), + batch_response(b"", token=b"token1"), + batch_response(b"123"), + batch_response(b"0", token=b"token2", checksum=checksum(b"1230")), ] byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) value = next(byte_cursor_iter) - assert value == b"1230" + assert value[0] == b"1230" assert byte_cursor._resume_token == b"token2" - def test__proto_rows_batch__no_proto_rows_batch(self): + def test__proto_rows_batch__handles_response_with_just_a_token(self): byte_cursor = _ByteCursor() stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} - } - ), - ExecuteQueryResponse(results={"resume_token": b"token1"}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"123"}}), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": b"0"}, - "resume_token": b"token2", - } - ), + token_only_response(b"token1"), + batch_response(b"123"), + batch_response(b"0", token=b"token2", checksum=checksum(b"1230")), ] byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) value = next(byte_cursor_iter) - assert value == b"1230" + assert value[0] == b"1230" assert byte_cursor._resume_token == b"token2" def test__proto_rows_batch__no_resume_token_at_the_end_of_stream(self): byte_cursor = _ByteCursor() stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": {"columns": [{"name": "test1", "type_": TYPE_INT}]} - } - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": b"0"}, - "resume_token": b"token1", - } - ), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"abc"}}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"def"}}), - ExecuteQueryResponse(results={"proto_rows_batch": {"batch_data": b"ghi"}}), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": b"j"}, - } - ), + batch_response(b"0", token=b"token1", checksum=checksum(b"0")), + batch_response(b"abc"), + batch_response(b"def"), + batch_response(b"ghi"), + batch_response(b"j", checksum=checksum(b"abcdefghij")), ] - assert byte_cursor.metadata is None - assert byte_cursor.consume(stream[0]) is None - value = byte_cursor.consume(stream[1]) - assert value == b"0" + value = byte_cursor.consume(stream[0]) + assert value[0] == b"0" assert byte_cursor._resume_token == b"token1" - assert byte_cursor.metadata.columns[0].column_name == "test1" + assert byte_cursor.consume(stream[1]) is None assert byte_cursor.consume(stream[2]) is None assert byte_cursor.consume(stream[3]) is None - assert byte_cursor.consume(stream[3]) is None assert byte_cursor.consume(stream[4]) is None - assert byte_cursor.consume(stream[5]) is None + # Empty should be checked by the iterator and should throw an error if this happens + assert not byte_cursor.empty() + + def test__proto_rows_batch__prepare_for_new_request_resets_buffer(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"abc")) is None + assert ( + byte_cursor.consume( + batch_response(b"def", token=b"token1", checksum=checksum(b"abcdef")) + )[0] + == b"abcdef" + ) + assert byte_cursor.consume(batch_response(b"foo")) is None + assert byte_cursor.prepare_for_new_request() == b"token1" + # foo is dropped because of new request + assert ( + byte_cursor.consume( + batch_response(b"bar", token=b"token2", checksum=checksum(b"bar")) + )[0] + == b"bar" + ) + + def test__proto_rows_batch__multiple_batches_before_token(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"foo")) is None + assert ( + byte_cursor.consume(batch_response(b"bar", checksum=checksum(b"foobar"))) + is None + ) + assert byte_cursor.consume(batch_response(b"1")) is None + assert byte_cursor.consume(batch_response(b"2")) is None + assert ( + byte_cursor.consume(batch_response(b"3", checksum=checksum(b"123"))) is None + ) + batches = byte_cursor.consume( + batch_response(b"done", token=b"token", checksum=checksum(b"done")) + ) + assert len(batches) == 3 + assert batches[0] == b"foobar" + assert batches[1] == b"123" + assert batches[2] == b"done" + + def test__proto_rows_batch__reset_on_partial_batch(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"foo")) is None + assert byte_cursor.consume(batch_response(b"bar", reset=True)) is None + batches = byte_cursor.consume( + batch_response(b"baz", token=b"token", checksum=checksum(b"barbaz")) + ) + assert len(batches) == 1 + assert batches[0] == b"barbaz" + + def test__proto_rows_batch__reset_on_complete_batch(self): + byte_cursor = _ByteCursor() + assert byte_cursor.consume(batch_response(b"foo")) is None + assert ( + byte_cursor.consume(batch_response(b"bar", checksum=checksum(b"foobar"))) + is None + ) + assert byte_cursor.consume(batch_response(b"discard")) is None + assert byte_cursor.consume(batch_response(b"1", reset=True)) is None + assert byte_cursor.consume(batch_response(b"2")) is None + batches = byte_cursor.consume( + batch_response(b"3", token=b"token", checksum=checksum(b"123")) + ) + assert len(batches) == 1 + assert batches[0] == b"123" + + def test__proto_rows_batch__checksum_mismatch(self): + byte_cursor = _ByteCursor() + with pytest.raises( + ValueError, + match="Unexpected checksum mismatch.", + ): + byte_cursor.consume(batch_response(b"foo", checksum=1234)) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_checksum.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_checksum.py new file mode 100644 index 000000000000..2a391882dc0e --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_checksum.py @@ -0,0 +1,59 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +import sys +from unittest import mock +import warnings + +with warnings.catch_warnings(record=True) as suppressed_warning: + warnings.warn("Supressed warning", RuntimeWarning) + + +def test_import_warning_is_rewritten(): + with mock.patch( + "google.cloud.bigtable.data.execute_query._checksum.import_warning", + suppressed_warning, + ): + with warnings.catch_warnings(record=True) as import_warning: + from google.cloud.bigtable.data.execute_query._checksum import _CRC32C + + # reset this in case the warning has been emitted in other tests + _CRC32C.warn_emitted = False + + assert import_warning == [] + with warnings.catch_warnings(record=True) as first_call_warning: + assert _CRC32C.checksum(b"test") == 2258662080 + assert ( + "Using pure python implementation of `google-crc32` for ExecuteQuery response validation" + in str(first_call_warning[0]) + ) + with warnings.catch_warnings(record=True) as second_call_warning: + assert _CRC32C.checksum(b"test") == 2258662080 + assert second_call_warning == [] + + +@pytest.mark.skipif( + sys.version_info < (3, 9) or sys.version_info > (3, 12), + reason="google_crc32c currently uses pure python for versions not between 3.9 & 3.12", +) +def test_no_warning(): + with warnings.catch_warnings(record=True) as first_call_warning: + from google.cloud.bigtable.data.execute_query._checksum import _CRC32C + + # reset this in case the warning has been emitted in other tests + _CRC32C.warn_emitted = False + + assert _CRC32C.checksum(b"test") == 2258662080 + assert first_call_warning == [] diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py index bebbd8d45c0a..ee03222725be 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py @@ -20,6 +20,7 @@ from google.cloud.bigtable.data.execute_query._parameters_formatting import ( _format_execute_query_params, + _to_param_types, ) from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.execute_query.values import Struct @@ -292,3 +293,21 @@ def test_array_params_enforce_element_type(): ) assert "Expected query parameter of type str, got int" in str(e1.value.__cause__) assert "Expected query parameter of type int, got str" in str(e2.value.__cause__) + + +def test_to_params_types(): + results = _to_param_types( + {"a": 1, "s": "str", "b": b"bytes", "array": ["foo", "bar"]}, + {"array": SqlType.Array(SqlType.String())}, + ) + assert results == { + "a": SqlType.Int64()._to_type_pb_dict(), + "s": SqlType.String()._to_type_pb_dict(), + "b": SqlType.Bytes()._to_type_pb_dict(), + "array": SqlType.Array(SqlType.String())._to_type_pb_dict(), + } + + +def test_to_param_types_empty(): + results = _to_param_types({}, {}) + assert results == {} diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py index ff7211654545..627570c3782f 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py @@ -28,7 +28,7 @@ import datetime -from ._testing import TYPE_INT +from tests.unit.data.execute_query.sql_helpers import int64_type TYPE_BYTES = {"bytes_type": {}} TYPE_TIMESTAMP = {"timestamp_type": {}} @@ -38,7 +38,7 @@ class TestQueryResultParsingUtils: @pytest.mark.parametrize( "type_dict,value_dict,expected_metadata_type,expected_value", [ - (TYPE_INT, {"int_value": 1}, SqlType.Int64, 1), + (int64_type(), {"int_value": 1}, SqlType.Int64, 1), ( {"string_type": {}}, {"string_value": "test"}, @@ -87,7 +87,7 @@ def test_basic_types( # Larger test cases were extracted for readability def test__array(self): - _type = PBType({"array_type": {"element_type": TYPE_INT}}) + _type = PBType({"array_type": {"element_type": int64_type()}}) metadata_type = _pb_type_to_metadata_type(_type) assert type(metadata_type) is SqlType.Array assert type(metadata_type.element_type) is SqlType.Int64 @@ -112,7 +112,7 @@ def test__struct(self): "fields": [ { "field_name": "field1", - "type_": TYPE_INT, + "type_": int64_type(), }, { "field_name": None, @@ -120,7 +120,7 @@ def test__struct(self): }, { "field_name": "field3", - "type_": {"array_type": {"element_type": TYPE_INT}}, + "type_": {"array_type": {"element_type": int64_type()}}, }, { "field_name": "field3", @@ -186,7 +186,7 @@ def test__array_of_structs(self): "fields": [ { "field_name": "field1", - "type_": TYPE_INT, + "type_": int64_type(), }, { "field_name": None, @@ -282,7 +282,7 @@ def test__map(self): _type = PBType( { "map_type": { - "key_type": TYPE_INT, + "key_type": int64_type(), "value_type": {"string_type": {}}, } } @@ -348,7 +348,7 @@ def test__map_repeated_values(self): _type = PBType( { "map_type": { - "key_type": TYPE_INT, + "key_type": int64_type(), "value_type": {"string_type": {}}, } }, @@ -398,7 +398,7 @@ def test__map_of_maps_of_structs(self): _type = PBType( { "map_type": { - "key_type": TYPE_INT, + "key_type": int64_type(), "value_type": { "map_type": { "key_type": {"string_type": {}}, @@ -407,7 +407,7 @@ def test__map_of_maps_of_structs(self): "fields": [ { "field_name": "field1", - "type_": TYPE_INT, + "type_": int64_type(), }, { "field_name": "field2", diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py index 2bb1e4da01e2..6adb1e3c73ae 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py @@ -14,58 +14,55 @@ import pytest from unittest import mock -from google.cloud.bigtable_v2.types.bigtable import ExecuteQueryResponse from google.cloud.bigtable_v2.types.data import Value as PBValue from google.cloud.bigtable.data.execute_query._reader import _QueryResultRowReader -from google.cloud.bigtable.data.execute_query.metadata import ProtoMetadata, SqlType +from google.cloud.bigtable.data.execute_query.metadata import ( + Metadata, + SqlType, + _pb_metadata_to_metadata_types, +) import google.cloud.bigtable.data.execute_query._reader -from ._testing import TYPE_INT, proto_rows_bytes +from tests.unit.data.execute_query.sql_helpers import ( + chunked_responses, + column, + int64_type, + int_val, + metadata, + proto_rows_bytes, + str_val, +) class TestQueryResultRowReader: def test__single_values_received(self): - byte_cursor = mock.Mock( - metadata=ProtoMetadata( - [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] - ) - ) + metadata = Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]) values = [ - proto_rows_bytes({"int_value": 1}), - proto_rows_bytes({"int_value": 2}), - proto_rows_bytes({"int_value": 3}), + proto_rows_bytes(int_val(1), int_val(2)), + proto_rows_bytes(int_val(3), int_val(4)), ] - reader = _QueryResultRowReader(byte_cursor) + reader = _QueryResultRowReader() - assert reader.consume(values[0]) is None - result = reader.consume(values[1]) + result = reader.consume(values[0:1], metadata) + assert len(result) == 1 + assert len(result[0]) == 2 + result = reader.consume(values[1:], metadata) assert len(result) == 1 assert len(result[0]) == 2 - assert reader.consume(values[2]) is None def test__multiple_rows_received(self): values = [ - proto_rows_bytes( - {"int_value": 1}, - {"int_value": 2}, - {"int_value": 3}, - {"int_value": 4}, - ), - proto_rows_bytes({"int_value": 5}, {"int_value": 6}), - proto_rows_bytes({"int_value": 7}, {"int_value": 8}), + proto_rows_bytes(int_val(1), int_val(2), int_val(3), int_val(4)), + proto_rows_bytes(int_val(5), int_val(6)), + proto_rows_bytes(int_val(7), int_val(8)), ] - byte_cursor = mock.Mock( - metadata=ProtoMetadata( - [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] - ) - ) + metadata = Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]) + reader = _QueryResultRowReader() - reader = _QueryResultRowReader(byte_cursor) - - result = reader.consume(values[0]) + result = reader.consume(values[0:1], metadata) assert len(result) == 2 assert len(result[0]) == 2 assert result[0][0] == result[0]["test1"] == 1 @@ -75,25 +72,22 @@ def test__multiple_rows_received(self): assert result[1][0] == result[1]["test1"] == 3 assert result[1][1] == result[1]["test2"] == 4 - result = reader.consume(values[1]) + result = reader.consume(values[1:2], metadata) assert len(result) == 1 assert len(result[0]) == 2 assert result[0][0] == result[0]["test1"] == 5 assert result[0][1] == result[0]["test2"] == 6 - result = reader.consume(values[2]) + result = reader.consume(values[2:], metadata) assert len(result) == 1 assert len(result[0]) == 2 assert result[0][0] == result[0]["test1"] == 7 assert result[0][1] == result[0]["test2"] == 8 def test__received_values_are_passed_to_parser_in_batches(self): - byte_cursor = mock.Mock( - metadata=ProtoMetadata( - [("test1", SqlType.Int64()), ("test2", SqlType.Int64())] - ) - ) + metadata = Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]) + # TODO move to a SqlType test assert SqlType.Struct([("a", SqlType.Int64())]) == SqlType.Struct( [("a", SqlType.Int64())] ) @@ -114,41 +108,32 @@ def test__received_values_are_passed_to_parser_in_batches(self): SqlType.String(), SqlType.String() ) - values = [ - {"int_value": 1}, - {"int_value": 2}, - ] - - reader = _QueryResultRowReader(byte_cursor) + reader = _QueryResultRowReader() with mock.patch.object( google.cloud.bigtable.data.execute_query._reader, "_parse_pb_value_to_python_value", ) as parse_mock: - reader.consume(proto_rows_bytes(values[0])) - parse_mock.assert_not_called() - reader.consume(proto_rows_bytes(values[1])) + reader.consume([proto_rows_bytes(int_val(1), int_val(2))], metadata) parse_mock.assert_has_calls( [ - mock.call(PBValue(values[0]), SqlType.Int64()), - mock.call(PBValue(values[1]), SqlType.Int64()), + mock.call(PBValue(int_val(1)), SqlType.Int64()), + mock.call(PBValue(int_val(2)), SqlType.Int64()), ] ) def test__parser_errors_are_forwarded(self): - byte_cursor = mock.Mock(metadata=ProtoMetadata([("test1", SqlType.Int64())])) + metadata = Metadata([("test1", SqlType.Int64())]) - values = [ - {"string_value": "test"}, - ] + values = [str_val("test")] - reader = _QueryResultRowReader(byte_cursor) + reader = _QueryResultRowReader() with mock.patch.object( google.cloud.bigtable.data.execute_query._reader, "_parse_pb_value_to_python_value", side_effect=ValueError("test"), ) as parse_mock: with pytest.raises(ValueError, match="test"): - reader.consume(proto_rows_bytes(values[0])) + reader.consume([proto_rows_bytes(values[0])], metadata) parse_mock.assert_has_calls( [ @@ -159,75 +144,25 @@ def test__parser_errors_are_forwarded(self): def test__multiple_proto_rows_received_with_one_resume_token(self): from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor - def split_bytes_into_chunks(bytes_to_split, num_chunks): - from google.cloud.bigtable.helpers import batched - - assert num_chunks <= len(bytes_to_split) - bytes_per_part = (len(bytes_to_split) - 1) // num_chunks + 1 - result = list(map(bytes, batched(bytes_to_split, bytes_per_part))) - assert len(result) == num_chunks - return result - def pass_values_to_byte_cursor(byte_cursor, iterable): for value in iterable: result = byte_cursor.consume(value) if result is not None: yield result - proto_rows = [ - proto_rows_bytes({"int_value": 1}, {"int_value": 2}), - proto_rows_bytes({"int_value": 3}, {"int_value": 4}), - proto_rows_bytes({"int_value": 5}, {"int_value": 6}), - ] - - messages = [ - *split_bytes_into_chunks(proto_rows[0], num_chunks=2), - *split_bytes_into_chunks(proto_rows[1], num_chunks=3), - proto_rows[2], - ] - stream = [ - ExecuteQueryResponse( - metadata={ - "proto_schema": { - "columns": [ - {"name": "test1", "type_": TYPE_INT}, - {"name": "test2", "type_": TYPE_INT}, - ] - } - } - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[0]}} - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[1]}} - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[2]}} - ), - ExecuteQueryResponse( - results={"proto_rows_batch": {"batch_data": messages[3]}} - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[4]}, - "resume_token": b"token1", - } - ), - ExecuteQueryResponse( - results={ - "proto_rows_batch": {"batch_data": messages[5]}, - "resume_token": b"token2", - } + *chunked_responses( + 4, int_val(1), int_val(2), int_val(3), int_val(4), token=b"token1" ), + *chunked_responses(1, int_val(5), int_val(6), token=b"token2"), ] byte_cursor = _ByteCursor() - - reader = _QueryResultRowReader(byte_cursor) - + reader = _QueryResultRowReader() byte_cursor_iter = pass_values_to_byte_cursor(byte_cursor, stream) + md = _pb_metadata_to_metadata_types( + metadata(column("test1", int64_type()), column("test2", int64_type())) + ) returned_values = [] @@ -246,7 +181,7 @@ def wrapped(*args, **kwargs): "_parse_proto_rows", wraps=intercept_return_values(reader._parse_proto_rows), ): - result = reader.consume(next(byte_cursor_iter)) + result = reader.consume(next(byte_cursor_iter), md) # Despite the fact that two ProtoRows were received, a single resume_token after the second ProtoRows object forces us to parse them together. # We will interpret them as one larger ProtoRows object. @@ -276,7 +211,7 @@ def wrapped(*args, **kwargs): "_parse_proto_rows", wraps=intercept_return_values(reader._parse_proto_rows), ): - result = reader.consume(next(byte_cursor_iter)) + result = reader.consume(next(byte_cursor_iter), md) assert len(result) == 1 assert len(result[0]) == 2 @@ -286,10 +221,32 @@ def wrapped(*args, **kwargs): assert result[0]["test2"] == 6 assert byte_cursor._resume_token == b"token2" - -class TestProtoMetadata: + def test_multiple_batches(self): + reader = _QueryResultRowReader() + batches = [ + proto_rows_bytes(int_val(1), int_val(2), int_val(3), int_val(4)), + proto_rows_bytes(int_val(5), int_val(6)), + proto_rows_bytes(int_val(7), int_val(8)), + ] + results = reader.consume( + batches, + Metadata([("test1", SqlType.Int64()), ("test2", SqlType.Int64())]), + ) + assert len(results) == 4 + [row1, row2, row3, row4] = results + assert row1["test1"] == 1 + assert row1["test2"] == 2 + assert row2["test1"] == 3 + assert row2["test2"] == 4 + assert row3["test1"] == 5 + assert row3["test2"] == 6 + assert row4["test1"] == 7 + assert row4["test2"] == 8 + + +class TestMetadata: def test__duplicate_column_names(self): - metadata = ProtoMetadata( + metadata = Metadata( [ ("test1", SqlType.Int64()), ("test2", SqlType.Bytes()), diff --git a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py index 39db0668991d..96c726a20cf4 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py +++ b/packages/google-cloud-bigtable/tests/unit/data/test__helpers.py @@ -189,6 +189,39 @@ def test_get_timeouts_invalid(self, input_times, input_table): _helpers._get_timeouts(input_times[0], input_times[1], fake_table) +class TestAlignTimeouts: + @pytest.mark.parametrize( + "input_times,expected", + [ + ((2, 1), (2, 1)), + ((2, 4), (2, 2)), + ((2, None), (2, 2)), + ], + ) + def test_get_timeouts(self, input_times, expected): + """ + test input/output mappings for a variety of valid inputs + """ + t1, t2 = _helpers._align_timeouts(input_times[0], input_times[1]) + assert t1 == expected[0] + assert t2 == expected[1] + + @pytest.mark.parametrize( + "input_times", + [ + ([0, 1]), + ([1, 0]), + ([None, 1]), + ], + ) + def test_get_timeouts_invalid(self, input_times): + """ + test with inputs that should raise error during validation step + """ + with pytest.raises(ValueError): + _helpers._align_timeouts(input_times[0], input_times[1]) + + class TestGetRetryableErrors: @pytest.mark.parametrize( "input_codes,input_table,expected", diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py b/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py index 855c0c10e95d..302d33ac1540 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/_testing.py @@ -17,9 +17,6 @@ import mock -# flake8: noqa -from .._testing import TYPE_INT, split_bytes_into_chunks, proto_rows_bytes - class _FakeStub(object): """Acts as a gPRC stub.""" From acb83193ea33c557794a7a254564a1cc30512e82 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 18 Mar 2025 16:48:41 -0400 Subject: [PATCH 856/892] fix: remove setup.cfg configuration for creating universal wheels (#1097) --- packages/google-cloud-bigtable/setup.cfg | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 packages/google-cloud-bigtable/setup.cfg diff --git a/packages/google-cloud-bigtable/setup.cfg b/packages/google-cloud-bigtable/setup.cfg deleted file mode 100644 index 052350089505..000000000000 --- a/packages/google-cloud-bigtable/setup.cfg +++ /dev/null @@ -1,19 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! -[bdist_wheel] -universal = 1 From 83f7879822d0331b863ace8a73535a1c5615a9e8 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 16:19:55 -0700 Subject: [PATCH 857/892] chore(main): release 2.30.0 (#1098) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 13 +++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 18 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 26729a93f7ec..7745bad24ffa 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.29.0" + ".": "2.30.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 75ec4c5acc26..7f8acf49fc53 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,19 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.30.0](https://github.com/googleapis/python-bigtable/compare/v2.29.0...v2.30.0) (2025-03-18) + + +### Features + +* Update ExecuteQuery to use Prepare ([#1100](https://github.com/googleapis/python-bigtable/issues/1100)) ([8a7abc1](https://github.com/googleapis/python-bigtable/commit/8a7abc1e9c34a9122b2d648e8a358a7097ed3a5d)) + + +### Bug Fixes + +* Allow protobuf 6.x ([#1092](https://github.com/googleapis/python-bigtable/issues/1092)) ([1015fa8](https://github.com/googleapis/python-bigtable/commit/1015fa83c505487f09820e3a37f76690bd00ab5d)) +* Remove setup.cfg configuration for creating universal wheels ([#1097](https://github.com/googleapis/python-bigtable/issues/1097)) ([95f4b82](https://github.com/googleapis/python-bigtable/commit/95f4b8233cba2a18633e64c5e0bc177e23767a83)) + ## [2.29.0](https://github.com/googleapis/python-bigtable/compare/v2.28.1...v2.29.0) (2025-02-26) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 07483fa04d24..5ebb3bec4b3c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.29.0" # {x-release-please-version} +__version__ = "2.30.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 07483fa04d24..5ebb3bec4b3c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.29.0" # {x-release-please-version} +__version__ = "2.30.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 07483fa04d24..5ebb3bec4b3c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.29.0" # {x-release-please-version} +__version__ = "2.30.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 07483fa04d24..5ebb3bec4b3c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.29.0" # {x-release-please-version} +__version__ = "2.30.0" # {x-release-please-version} From 49663c1532e3b496a24681638eb54f8b78866f96 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 10:51:36 -0400 Subject: [PATCH 858/892] chore: Update gapic-generator-python to 1.23.6 (#1101) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to 1.23.6 PiperOrigin-RevId: 738170370 Source-Link: https://github.com/googleapis/googleapis/commit/3f1e17aa2dec3f146a9a2a8a64c5c6d19d0b6e15 Source-Link: https://github.com/googleapis/googleapis-gen/commit/9afd8c33d4cae610b75fa4999264ea8c8c66b9d2 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWFmZDhjMzNkNGNhZTYxMGI3NWZhNDk5OTI2NGVhOGM4YzY2YjlkMiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../google/cloud/bigtable_admin/__init__.py | 2 +- .../google/cloud/bigtable_admin_v2/__init__.py | 2 +- .../google/cloud/bigtable_admin_v2/services/__init__.py | 2 +- .../services/bigtable_instance_admin/__init__.py | 2 +- .../services/bigtable_instance_admin/async_client.py | 2 +- .../services/bigtable_instance_admin/client.py | 2 +- .../services/bigtable_instance_admin/pagers.py | 2 +- .../services/bigtable_instance_admin/transports/__init__.py | 2 +- .../services/bigtable_instance_admin/transports/base.py | 2 +- .../services/bigtable_instance_admin/transports/grpc.py | 2 +- .../services/bigtable_instance_admin/transports/grpc_asyncio.py | 2 +- .../services/bigtable_instance_admin/transports/rest.py | 2 +- .../services/bigtable_instance_admin/transports/rest_base.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/__init__.py | 2 +- .../services/bigtable_table_admin/async_client.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/client.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/pagers.py | 2 +- .../services/bigtable_table_admin/transports/__init__.py | 2 +- .../services/bigtable_table_admin/transports/base.py | 2 +- .../services/bigtable_table_admin/transports/grpc.py | 2 +- .../services/bigtable_table_admin/transports/grpc_asyncio.py | 2 +- .../services/bigtable_table_admin/transports/rest.py | 2 +- .../services/bigtable_table_admin/transports/rest_base.py | 2 +- .../google/cloud/bigtable_admin_v2/types/__init__.py | 2 +- .../cloud/bigtable_admin_v2/types/bigtable_instance_admin.py | 2 +- .../cloud/bigtable_admin_v2/types/bigtable_table_admin.py | 2 +- .../google/cloud/bigtable_admin_v2/types/common.py | 2 +- .../google/cloud/bigtable_admin_v2/types/instance.py | 2 +- .../google/cloud/bigtable_admin_v2/types/table.py | 2 +- .../google/cloud/bigtable_admin_v2/types/types.py | 2 +- .../google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py | 2 +- .../google/cloud/bigtable_v2/services/__init__.py | 2 +- .../google/cloud/bigtable_v2/services/bigtable/__init__.py | 2 +- .../google/cloud/bigtable_v2/services/bigtable/async_client.py | 2 +- .../google/cloud/bigtable_v2/services/bigtable/client.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/__init__.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/base.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/grpc.py | 2 +- .../bigtable_v2/services/bigtable/transports/grpc_asyncio.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/rest.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/rest_base.py | 2 +- .../google/cloud/bigtable_v2/types/__init__.py | 2 +- .../google/cloud/bigtable_v2/types/bigtable.py | 2 +- .../google/cloud/bigtable_v2/types/data.py | 2 +- .../google/cloud/bigtable_v2/types/feature_flags.py | 2 +- .../google/cloud/bigtable_v2/types/request_stats.py | 2 +- .../google/cloud/bigtable_v2/types/response_params.py | 2 +- .../google/cloud/bigtable_v2/types/types.py | 2 +- .../scripts/fixup_bigtable_admin_v2_keywords.py | 2 +- .../google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py | 2 +- packages/google-cloud-bigtable/tests/__init__.py | 2 +- packages/google-cloud-bigtable/tests/unit/__init__.py | 2 +- packages/google-cloud-bigtable/tests/unit/gapic/__init__.py | 2 +- .../tests/unit/gapic/bigtable_admin_v2/__init__.py | 2 +- .../gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | 2 +- .../unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py | 2 +- .../tests/unit/gapic/bigtable_v2/__init__.py | 2 +- .../tests/unit/gapic/bigtable_v2/test_bigtable.py | 2 +- 58 files changed, 58 insertions(+), 58 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index 319c1f3320e0..c8f2a44826fb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 1d2d13cf0712..4ee0cc6b176a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py index 8f6cf068242c..cbf94b283c70 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py index 09a827f872e5..20ac9e4fc5f6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ad3745c0699c..a9c7ebc21bb6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index f9635515610f..72fd030acf49 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index 355d641e40d4..ce5b67b27324 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index 45cf579fbeb5..021458f35945 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index f2576c676f86..cd3289655943 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index eb13e683ba71..36eae1ddf80a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 12e63f7fe1bc..aae0f44c4bf2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 858055974c93..0d2239ad822b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py index 5851243ed2a9..9855756b8ee3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index 7fdf89eb6356..cd916a2c8020 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index a10691b71ea7..2eaebae35104 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 3204f43a1b64..cc69fa05115e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 4351a58148b7..8b1ffba34039 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index 11a7f83292bf..e7621f781d0b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 5f74859a5762..ea6dca7c236e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 59c701b8fca3..9ea3f5465230 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 751828e68dc0..8b08cbe8ce90 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 80d485bd02ed..f676835b593b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py index fbaf89e52d33..add95bccac29 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 817bbc89adbf..26821e2a4290 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index 5bed1c4f782d..4197ed0b7424 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index ab8273a0a84c..4cadfb1bf436 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py index 1ab52a0e3c69..7b05e5ff5a2e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 19a17c698dc7..8b2e01607d52 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 6dcf7b4a885a..730b54ce3efe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py index ec5744156746..42935df3c0db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index a7ff5ac1dfa2..3cb3d4de09ef 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py index 8f6cf068242c..cbf94b283c70 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py index 191b24851d83..c74141156324 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 3d4e2373d17e..0ae20f3a2c84 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 330a2252099c..2835a10cfda0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index ae007bc2bd04..b35e85534182 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 72d0638281d2..8c014abada3d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 84bc1dd43c11..6d0a798cf849 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 192ce82810ff..cebee0208c5d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index fb0af2af9351..7b410297fd5c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py index c33fc1e83864..b2080f4a49ba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index c15a1d3078d7..629dd6c90750 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 6d9be1438887..f941c867a8a8 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index 97e32197e9fd..cecbc138af8a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index 1e408bb3a7bd..69cfe1cf459e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py index 115f76af5835..8548996efd2d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index 3bbf3163ffe8..2c04dadaa41c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py index 153420e45655..7f92a15ae71b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 9e2dd2794163..352e63a93949 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 466b1d1c7e2e..70e0795e2c02 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/__init__.py b/packages/google-cloud-bigtable/tests/__init__.py index 8f6cf068242c..cbf94b283c70 100644 --- a/packages/google-cloud-bigtable/tests/__init__.py +++ b/packages/google-cloud-bigtable/tests/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/__init__.py b/packages/google-cloud-bigtable/tests/unit/__init__.py index 8f6cf068242c..cbf94b283c70 100644 --- a/packages/google-cloud-bigtable/tests/unit/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py index 8f6cf068242c..cbf94b283c70 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py index 8f6cf068242c..cbf94b283c70 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index eeb014f54706..69bad1c7b274 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 21d2720d7451..67b4302c9f1d 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py index 8f6cf068242c..cbf94b283c70 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 85700b67d806..059e6a58ce9b 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2024 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From b9eb8346e23c41bcf6b7be30ad32bb2810600b10 Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Thu, 17 Apr 2025 12:54:39 -0400 Subject: [PATCH 859/892] fix: populate SQL app_profile_id header even when it is unset (#1109) --- .../services/bigtable/async_client.py | 8 +- .../bigtable_v2/services/bigtable/client.py | 8 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 36 +++- .../tests/unit/test_sql_routing_parameters.py | 188 ++++++++++++++++++ 4 files changed, 230 insertions(+), 10 deletions(-) create mode 100644 packages/google-cloud-bigtable/tests/unit/test_sql_routing_parameters.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 0ae20f3a2c84..84832ffd606b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1584,7 +1584,9 @@ async def prepare_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if request.app_profile_id is not None: + # prepare_query currently requires app_profile_id header to be set + # even when the request param is unpopulated TODO: remove after support is added header_params["app_profile_id"] = request.app_profile_id if header_params: @@ -1704,7 +1706,9 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if request.app_profile_id is not None: + # execute_query currently requires app_profile_id header to be set + # even when the request param is unpopulated TODO: remove after support is added header_params["app_profile_id"] = request.app_profile_id if header_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 2835a10cfda0..c5b14b54a998 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -2033,7 +2033,9 @@ def prepare_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if request.app_profile_id is not None: + # prepare_query currently requires app_profile_id header to be set + # even when the request param is unpopulated TODO: remove after support is added header_params["app_profile_id"] = request.app_profile_id if header_params: @@ -2150,7 +2152,9 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if request.app_profile_id is not None: + # execute_query currently requires app_profile_id header to be set + # even when the request param is unpopulated TODO: remove after support is added header_params["app_profile_id"] = request.app_profile_id if header_params: diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 059e6a58ce9b..1750be32b5d9 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -7442,7 +7442,11 @@ def test_prepare_query_routing_parameters_request_1_grpc(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -7494,7 +7498,11 @@ def test_execute_query_routing_parameters_request_1_grpc(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -8548,7 +8556,11 @@ async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -8611,7 +8623,11 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -10862,7 +10878,11 @@ def test_prepare_query_routing_parameters_request_1_rest(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -10912,7 +10932,11 @@ def test_execute_query_routing_parameters_request_1_rest(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) diff --git a/packages/google-cloud-bigtable/tests/unit/test_sql_routing_parameters.py b/packages/google-cloud-bigtable/tests/unit/test_sql_routing_parameters.py new file mode 100644 index 000000000000..fa9316369508 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/test_sql_routing_parameters.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # type: ignore # noqa: F401 +except ImportError: # pragma: NO COVER + import mock +import pytest + +from grpc.experimental import aio + +try: + from google.auth.aio import credentials as ga_credentials_async + + HAS_GOOGLE_AUTH_AIO = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_AUTH_AIO = False + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient +from google.cloud.bigtable_v2.services.bigtable.client import BigtableClient +from google.cloud.bigtable_v2.types import bigtable + +# This test file duplicates the gapic request header tests so that the temporary fix +# for SQL app_profile_id header handling can not be override by GAPIC. +# TODO: remove this once the fix is upstreamed + + +def async_anonymous_credentials(): + if HAS_GOOGLE_AUTH_AIO: + return ga_credentials_async.AnonymousCredentials() + return ga_credentials.AnonymousCredentials() + + +def test_prepare_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + call.return_value = bigtable.PrepareQueryResponse() + client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.prepare_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable.PrepareQueryResponse( + prepared_query=b"prepared_query_blob", + ) + ) + await client.prepare_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.PrepareQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +def test_execute_query_routing_parameters_request_1_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + call.return_value = iter([bigtable.ExecuteQueryResponse()]) + client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) + + +@pytest.mark.asyncio +async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.execute_query), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ExecuteQueryResponse()] + ) + await client.execute_query( + request={"instance_name": "projects/sample1/instances/sample2"} + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ExecuteQueryRequest( + **{"instance_name": "projects/sample1/instances/sample2"} + ) + + assert args[0] == request_msg + + # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1109 + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } + assert ( + gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + ) From b05488741f685c9f81a347676d34ae1dbd696930 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 17 Apr 2025 16:01:48 -0400 Subject: [PATCH 860/892] chore(main): release 2.30.1 (#1110) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 7745bad24ffa..570ecf862db2 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.30.0" + ".": "2.30.1" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 7f8acf49fc53..f55767795ee3 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.30.1](https://github.com/googleapis/python-bigtable/compare/v2.30.0...v2.30.1) (2025-04-17) + + +### Bug Fixes + +* Populate SQL app_profile_id header even when it is unset ([#1109](https://github.com/googleapis/python-bigtable/issues/1109)) ([17b75bd](https://github.com/googleapis/python-bigtable/commit/17b75bd746cb0a616f64a05eb0ed72b46de28a17)) + ## [2.30.0](https://github.com/googleapis/python-bigtable/compare/v2.29.0...v2.30.0) (2025-03-18) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 5ebb3bec4b3c..8202296bffca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.0" # {x-release-please-version} +__version__ = "2.30.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 5ebb3bec4b3c..8202296bffca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.0" # {x-release-please-version} +__version__ = "2.30.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 5ebb3bec4b3c..8202296bffca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.0" # {x-release-please-version} +__version__ = "2.30.1" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 5ebb3bec4b3c..8202296bffca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.0" # {x-release-please-version} +__version__ = "2.30.1" # {x-release-please-version} From 6b479de6cae968218e6b74f32f23a0c1d9c50902 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 13:16:14 -0700 Subject: [PATCH 861/892] feat: add deletion_protection support for LVs (#1108) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to 1.24.0 PiperOrigin-RevId: 747419463 Source-Link: https://github.com/googleapis/googleapis/commit/340579bf7f97ba56cda0c70176dc5b03a8357667 Source-Link: https://github.com/googleapis/googleapis-gen/commit/e8997ec5136ecb6ed9a969a4c2f13b3ab6a17c12 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTg5OTdlYzUxMzZlY2I2ZWQ5YTk2OWE0YzJmMTNiM2FiNmExN2MxMiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to 1.24.1 PiperOrigin-RevId: 748739072 Source-Link: https://github.com/googleapis/googleapis/commit/b947e523934dbac5d97613d8aa08e04fc38c5fb6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/8c5821aa65a921d59b3f7653d6f37c9c67410c2f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOGM1ODIxYWE2NWE5MjFkNTliM2Y3NjUzZDZmMzdjOWM2NzQxMGMyZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: add deletion_protection support for LVs PiperOrigin-RevId: 750666273 Source-Link: https://github.com/googleapis/googleapis/commit/98297c5f6e3404e9f07040cd8d711d0e2ab1d3e7 Source-Link: https://github.com/googleapis/googleapis-gen/commit/7af2b6b39ae4a39560cc53d4a768b95c3fa63f3f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiN2FmMmI2YjM5YWU0YTM5NTYwY2M1M2Q0YTc2OGI5NWMzZmE2M2YzZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../services/bigtable_instance_admin/transports/grpc.py | 3 +-- .../services/bigtable_table_admin/transports/grpc.py | 3 +-- .../google/cloud/bigtable_admin_v2/types/instance.py | 7 +++++++ .../bigtable_v2/services/bigtable/transports/grpc.py | 3 +-- .../bigtable_admin_v2/test_bigtable_instance_admin.py | 9 +++++++++ 5 files changed, 19 insertions(+), 6 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index 36eae1ddf80a..a294144efd68 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -76,12 +76,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 9ea3f5465230..b18f131335c1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -77,12 +77,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 8b2e01607d52..2623b770e42f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -759,6 +759,9 @@ class LogicalView(proto.Message): that the client has an up-to-date value before proceeding. The server returns an ABORTED error on a mismatched etag. + deletion_protection (bool): + Optional. Set to true to make the LogicalView + protected against deletion. """ name: str = proto.Field( @@ -773,6 +776,10 @@ class LogicalView(proto.Message): proto.STRING, number=3, ) + deletion_protection: bool = proto.Field( + proto.BOOL, + number=6, + ) class MaterializedView(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 6d0a798cf849..a3c0865f1dcd 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -70,12 +70,11 @@ def intercept_unary_unary(self, continuation, client_call_details, request): f"Sending request for {client_call_details.method}", extra={ "serviceName": "google.bigtable.v2.Bigtable", - "rpcName": client_call_details.method, + "rpcName": str(client_call_details.method), "request": grpc_request, "metadata": grpc_request["metadata"], }, ) - response = continuation(client_call_details, request) if logging_enabled: # pragma: NO COVER response_metadata = response.trailing_metadata() diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 69bad1c7b274..2ad52bf5281f 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -9000,6 +9000,7 @@ def test_get_logical_view(request_type, transport: str = "grpc"): name="name_value", query="query_value", etag="etag_value", + deletion_protection=True, ) response = client.get_logical_view(request) @@ -9014,6 +9015,7 @@ def test_get_logical_view(request_type, transport: str = "grpc"): assert response.name == "name_value" assert response.query == "query_value" assert response.etag == "etag_value" + assert response.deletion_protection is True def test_get_logical_view_non_empty_request_with_auto_populated_field(): @@ -9145,6 +9147,7 @@ async def test_get_logical_view_async( name="name_value", query="query_value", etag="etag_value", + deletion_protection=True, ) ) response = await client.get_logical_view(request) @@ -9160,6 +9163,7 @@ async def test_get_logical_view_async( assert response.name == "name_value" assert response.query == "query_value" assert response.etag == "etag_value" + assert response.deletion_protection is True @pytest.mark.asyncio @@ -19861,6 +19865,7 @@ async def test_get_logical_view_empty_call_grpc_asyncio(): name="name_value", query="query_value", etag="etag_value", + deletion_protection=True, ) ) await client.get_logical_view(request=None) @@ -23249,6 +23254,7 @@ def test_create_logical_view_rest_call_success(request_type): "name": "name_value", "query": "query_value", "etag": "etag_value", + "deletion_protection": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -23452,6 +23458,7 @@ def test_get_logical_view_rest_call_success(request_type): name="name_value", query="query_value", etag="etag_value", + deletion_protection=True, ) # Wrap the value into a proper Response obj @@ -23471,6 +23478,7 @@ def test_get_logical_view_rest_call_success(request_type): assert response.name == "name_value" assert response.query == "query_value" assert response.etag == "etag_value" + assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -23720,6 +23728,7 @@ def test_update_logical_view_rest_call_success(request_type): "name": "projects/sample1/instances/sample2/logicalViews/sample3", "query": "query_value", "etag": "etag_value", + "deletion_protection": True, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency From 480b96eb3d824b9de6b84c678ed1da6e542609d8 Mon Sep 17 00:00:00 2001 From: ayu Date: Wed, 7 May 2025 04:14:04 +0900 Subject: [PATCH 862/892] chore(docs): update `RowSet` to no longer reference deprecated `Table.yield_rows` (#1050) --- packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py index 82a540b5a891..2bc436d54c0c 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/row_set.py @@ -22,7 +22,7 @@ class RowSet(object): """Convenience wrapper of google.bigtable.v2.RowSet Useful for creating a set of row keys and row ranges, which can - be passed to yield_rows method of class:`.Table.yield_rows`. + be passed to read_rows method of class:`.Table.read_rows`. """ def __init__(self): From 153ade1067ca436b89b430de3d5b8ceff0b307ae Mon Sep 17 00:00:00 2001 From: Akshay Joshi Date: Thu, 15 May 2025 23:19:37 +0100 Subject: [PATCH 863/892] fix: re-add py-typed file for bigtable package (#1085) --- packages/google-cloud-bigtable/google/cloud/bigtable/py.typed | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/py.typed diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed b/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed new file mode 100644 index 000000000000..889d34043118 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable package uses inline types. From 4419c2b22b4cd85a1e170372f19d2ad6081ff909 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 16 May 2025 15:56:27 -0700 Subject: [PATCH 864/892] chore: add owlbot rule to preserve app_profile_id header (#1115) --- .../services/bigtable/async_client.py | 22 ++- .../bigtable_v2/services/bigtable/client.py | 22 ++- packages/google-cloud-bigtable/owlbot.py | 46 +++++++ .../tests/system/data/test_system_async.py | 4 + .../tests/system/data/test_system_autogen.py | 4 + .../tests/unit/data/_async/test_client.py | 3 +- .../unit/data/_sync_autogen/test_client.py | 2 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 129 ++++++++++++------ 8 files changed, 159 insertions(+), 73 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 84832ffd606b..3ca8bb256151 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -386,7 +386,7 @@ def read_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -510,7 +510,7 @@ def sample_row_keys( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -655,7 +655,7 @@ async def mutate_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -794,7 +794,7 @@ def mutate_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -974,7 +974,7 @@ async def check_and_mutate_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -1093,7 +1093,7 @@ async def ping_and_warm( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id if header_params: @@ -1235,7 +1235,7 @@ async def read_modify_write_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -1584,9 +1584,7 @@ async def prepare_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id is not None: - # prepare_query currently requires app_profile_id header to be set - # even when the request param is unpopulated TODO: remove after support is added + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id if header_params: @@ -1706,9 +1704,7 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id is not None: - # execute_query currently requires app_profile_id header to be set - # even when the request param is unpopulated TODO: remove after support is added + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id if header_params: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index c5b14b54a998..ba3eb9de3451 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -862,7 +862,7 @@ def read_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -983,7 +983,7 @@ def sample_row_keys( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -1125,7 +1125,7 @@ def mutate_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -1261,7 +1261,7 @@ def mutate_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -1438,7 +1438,7 @@ def check_and_mutate_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -1554,7 +1554,7 @@ def ping_and_warm( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id if header_params: @@ -1693,7 +1693,7 @@ def read_modify_write_row( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") - if request.app_profile_id: + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( @@ -2033,9 +2033,7 @@ def prepare_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id is not None: - # prepare_query currently requires app_profile_id header to be set - # even when the request param is unpopulated TODO: remove after support is added + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id if header_params: @@ -2152,9 +2150,7 @@ def execute_query( if regex_match and regex_match.group("name"): header_params["name"] = regex_match.group("name") - if request.app_profile_id is not None: - # execute_query currently requires app_profile_id header to be set - # even when the request param is unpopulated TODO: remove after support is added + if True: # always attach app_profile_id, even if empty string header_params["app_profile_id"] = request.app_profile_id if header_params: diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 16ce11b4f342..f144a2d21457 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -97,6 +97,52 @@ def get_staging_dirs( s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py"]) + +# ---------------------------------------------------------------------------- +# Always supply app_profile_id in routing headers: https://github.com/googleapis/python-bigtable/pull/1109 +# TODO: remove after backend no longer requires empty strings +# ---------------------------------------------------------------------------- +for file in ["async_client.py", "client.py"]: + s.replace( + f"google/cloud/bigtable_v2/services/bigtable/{file}", + "if request.app_profile_id:", + "if True: # always attach app_profile_id, even if empty string" + ) +# fix tests +s.replace( + "tests/unit/gapic/bigtable_v2/test_bigtable.py", + 'expected_headers = {"name": "projects/sample1/instances/sample2"}', + 'expected_headers = {"name": "projects/sample1/instances/sample2", "app_profile_id": ""}' +) +s.replace( + "tests/unit/gapic/bigtable_v2/test_bigtable.py", + """ + expected_headers = { + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + """, + """ + expected_headers = { + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } + """ +) +s.replace( + "tests/unit/gapic/bigtable_v2/test_bigtable.py", + """ + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3" + } + """, + """ + expected_headers = { + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "" + } + """ +) + # ---------------------------------------------------------------------------- # Samples templates # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index 53e97acc16f4..d45c7c16eb10 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -207,6 +207,10 @@ async def test_ping_and_warm(self, client, table): assert len(results) == 1 assert results[0] is None + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator mode doesn't refresh channel", + ) @CrossSync.pytest async def test_channel_refresh(self, table_id, instance_id, temp_rows): """ diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index ede24be76fe6..f9af614a262a 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -165,6 +165,10 @@ def test_ping_and_warm(self, client, table): assert len(results) == 1 assert results[0] is None + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator mode doesn't refresh channel", + ) def test_channel_refresh(self, table_id, instance_id, temp_rows): """change grpc channel to refresh after 1 second. Schedule a read_rows call after refresh, to ensure new channel works""" diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 96fcf66b3fc5..f45a17bf6816 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -1311,7 +1311,8 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ if include_app_profile: assert "app_profile_id=profile" in routing_str else: - assert "app_profile_id=" not in routing_str + # empty app_profile_id should send empty string + assert "app_profile_id=" in routing_str @CrossSync.convert_class( diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index 720f0e0b650f..eea3f36bf5cc 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -1052,7 +1052,7 @@ def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): if include_app_profile: assert "app_profile_id=profile" in routing_str else: - assert "app_profile_id=" not in routing_str + assert "app_profile_id=" in routing_str @CrossSync._Sync_Impl.add_mapping_decorator("TestReadRows") diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 1750be32b5d9..dba535dcc25f 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -6851,7 +6851,8 @@ def test_read_rows_routing_parameters_request_1_grpc(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -6909,7 +6910,8 @@ def test_read_rows_routing_parameters_request_3_grpc(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -6939,7 +6941,8 @@ def test_sample_row_keys_routing_parameters_request_1_grpc(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -6997,7 +7000,8 @@ def test_sample_row_keys_routing_parameters_request_3_grpc(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7027,7 +7031,8 @@ def test_mutate_row_routing_parameters_request_1_grpc(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7085,7 +7090,8 @@ def test_mutate_row_routing_parameters_request_3_grpc(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7115,7 +7121,8 @@ def test_mutate_rows_routing_parameters_request_1_grpc(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7173,7 +7180,8 @@ def test_mutate_rows_routing_parameters_request_3_grpc(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7205,7 +7213,8 @@ def test_check_and_mutate_row_routing_parameters_request_1_grpc(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7267,7 +7276,8 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7294,7 +7304,10 @@ def test_ping_and_warm_routing_parameters_request_1_grpc(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -7349,7 +7362,8 @@ def test_read_modify_write_row_routing_parameters_request_1_grpc(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7413,7 +7427,8 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7442,7 +7457,6 @@ def test_prepare_query_routing_parameters_request_1_grpc(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", @@ -7498,7 +7512,6 @@ def test_execute_query_routing_parameters_request_1_grpc(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", @@ -7867,7 +7880,8 @@ async def test_read_rows_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7935,7 +7949,8 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -7970,7 +7985,8 @@ async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8038,7 +8054,8 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8072,7 +8089,8 @@ async def test_mutate_row_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8138,7 +8156,8 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8173,7 +8192,8 @@ async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8241,7 +8261,8 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8279,7 +8300,8 @@ async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8353,7 +8375,8 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8386,7 +8409,10 @@ async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -8449,7 +8475,8 @@ async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio() assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8521,7 +8548,8 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio() assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -8556,7 +8584,6 @@ async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", @@ -8623,7 +8650,6 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", @@ -10307,7 +10333,8 @@ def test_read_rows_routing_parameters_request_1_rest(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10363,7 +10390,8 @@ def test_read_rows_routing_parameters_request_3_rest(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10392,7 +10420,8 @@ def test_sample_row_keys_routing_parameters_request_1_rest(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10448,7 +10477,8 @@ def test_sample_row_keys_routing_parameters_request_3_rest(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10477,7 +10507,8 @@ def test_mutate_row_routing_parameters_request_1_rest(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10533,7 +10564,8 @@ def test_mutate_row_routing_parameters_request_3_rest(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10562,7 +10594,8 @@ def test_mutate_rows_routing_parameters_request_1_rest(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10618,7 +10651,8 @@ def test_mutate_rows_routing_parameters_request_3_rest(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10649,7 +10683,8 @@ def test_check_and_mutate_row_routing_parameters_request_1_rest(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10709,7 +10744,8 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10735,7 +10771,10 @@ def test_ping_and_warm_routing_parameters_request_1_rest(): assert args[0] == request_msg - expected_headers = {"name": "projects/sample1/instances/sample2"} + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] ) @@ -10788,7 +10827,8 @@ def test_read_modify_write_row_routing_parameters_request_1_rest(): assert args[0] == request_msg expected_headers = { - "table_name": "projects/sample1/instances/sample2/tables/sample3" + "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10850,7 +10890,8 @@ def test_read_modify_write_row_routing_parameters_request_3_rest(): assert args[0] == request_msg expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "app_profile_id": "", + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", } assert ( gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] @@ -10878,7 +10919,6 @@ def test_prepare_query_routing_parameters_request_1_rest(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", @@ -10932,7 +10972,6 @@ def test_execute_query_routing_parameters_request_1_rest(): assert args[0] == request_msg - # expect app_profile_id while temporary patch is in place: https://github.com/googleapis/python-bigtable/pull/1072 expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", From 800063f2e4bce95ad6be67566917273fc237c22e Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Mon, 19 May 2025 19:06:45 -0400 Subject: [PATCH 865/892] feat: throw better error on invalid metadata response (#1107) --- .../bigtable/data/execute_query/metadata.py | 2 ++ .../unit/data/execute_query/test_metadata.py | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 packages/google-cloud-bigtable/tests/unit/data/execute_query/test_metadata.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py index 40ef60bc975e..2fd66947d339 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py @@ -369,6 +369,8 @@ def _pb_metadata_to_metadata_types( ) -> Metadata: if "proto_schema" in metadata_pb: fields: List[Tuple[Optional[str], SqlType.Type]] = [] + if not metadata_pb.proto_schema.columns: + raise ValueError("Invalid empty ResultSetMetadata received.") for column_metadata in metadata_pb.proto_schema.columns: fields.append( (column_metadata.name, _pb_type_to_metadata_type(column_metadata.type)) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_metadata.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_metadata.py new file mode 100644 index 000000000000..c90529d6fc8c --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_metadata.py @@ -0,0 +1,25 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from google.cloud.bigtable.data.execute_query.metadata import ( + _pb_metadata_to_metadata_types, +) +from google.cloud.bigtable_v2.types.data import ResultSetMetadata + + +def test_empty_metadata_fails_parsing(): + invalid_md_proto = ResultSetMetadata({"proto_schema": {"columns": []}}) + with pytest.raises(ValueError): + _pb_metadata_to_metadata_types(invalid_md_proto) From 4a58d73d27857c3f0a73a764c2e0ca51d3327c61 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 21 May 2025 07:45:20 -0400 Subject: [PATCH 866/892] chore: Update gapic-generator-python to 1.25.0 (#1123) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to 1.25.0 PiperOrigin-RevId: 755914147 Source-Link: https://github.com/googleapis/googleapis/commit/97a83d76a09a7f6dcab43675c87bdfeb5bcf1cb5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/a9977efedc836ccece1f01d529b0315e1efe52ad Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTk5NzdlZmVkYzgzNmNjZWNlMWYwMWQ1MjliMDMxNWUxZWZlNTJhZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Daniel Sanche --- .../services/bigtable_instance_admin/async_client.py | 4 ++++ .../services/bigtable_instance_admin/client.py | 3 +++ .../services/bigtable_instance_admin/transports/base.py | 4 ++++ .../services/bigtable_instance_admin/transports/rest.py | 4 ++++ .../services/bigtable_table_admin/async_client.py | 4 ++++ .../bigtable_admin_v2/services/bigtable_table_admin/client.py | 3 +++ .../services/bigtable_table_admin/transports/base.py | 4 ++++ .../services/bigtable_table_admin/transports/rest.py | 4 ++++ .../cloud/bigtable_v2/services/bigtable/async_client.py | 4 ++++ .../google/cloud/bigtable_v2/services/bigtable/client.py | 3 +++ .../cloud/bigtable_v2/services/bigtable/transports/base.py | 4 ++++ .../cloud/bigtable_v2/services/bigtable/transports/rest.py | 4 ++++ .../tests/unit/gapic/bigtable_v2/test_bigtable.py | 1 - 13 files changed, 45 insertions(+), 1 deletion(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index a9c7ebc21bb6..b150b7123c5d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -3461,5 +3462,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("BigtableInstanceAdminAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 72fd030acf49..accaa1e036ca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -45,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -3940,5 +3941,7 @@ def __exit__(self, type, value, traceback): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("BigtableInstanceAdminClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index cd3289655943..f5ceeeb687aa 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -38,6 +39,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableInstanceAdminTransport(abc.ABC): """Abstract transport class for BigtableInstanceAdmin.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 0d2239ad822b..12af0792b075 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -64,6 +65,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableInstanceAdminRestInterceptor: """Interceptor for BigtableInstanceAdmin. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 2eaebae35104..1bf544db68eb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -37,6 +37,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -3468,5 +3469,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("BigtableTableAdminAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index cc69fa05115e..abb82b1ed766 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -45,6 +45,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -3945,5 +3946,7 @@ def __exit__(self, type, value, traceback): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("BigtableTableAdminClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index ea6dca7c236e..9a549b7ca52f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -26,6 +26,7 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -39,6 +40,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableTableAdminTransport(abc.ABC): """Abstract transport class for BigtableTableAdmin.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index f676835b593b..fd9445161863 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format from google.api_core import operations_v1 @@ -65,6 +66,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableTableAdminRestInterceptor: """Interceptor for BigtableTableAdmin. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 3ca8bb256151..123c340faaa1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -39,6 +39,7 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: @@ -1737,5 +1738,8 @@ async def __aexit__(self, exc_type, exc, tb): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + __all__ = ("BigtableAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index ba3eb9de3451..902e435c5c8a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -46,6 +46,7 @@ from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] @@ -2190,5 +2191,7 @@ def __exit__(self, type, value, traceback): gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ __all__ = ("BigtableClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 8c014abada3d..4d25d8b3090f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -25,6 +25,7 @@ from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore +import google.protobuf from google.cloud.bigtable_v2.types import bigtable @@ -32,6 +33,9 @@ gapic_version=package_version.__version__ ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableTransport(abc.ABC): """Abstract transport class for Bigtable.""" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 7b410297fd5c..c84ef147fca0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -23,6 +23,7 @@ from google.api_core import rest_helpers from google.api_core import rest_streaming from google.api_core import gapic_v1 +import google.protobuf from google.protobuf import json_format @@ -58,6 +59,9 @@ rest_version=f"requests@{requests_version}", ) +if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER + DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ + class BigtableRestInterceptor: """Interceptor for Bigtable. diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index dba535dcc25f..84093a926313 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -7511,7 +7511,6 @@ def test_execute_query_routing_parameters_request_1_grpc(): ) assert args[0] == request_msg - expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", From 7bebfd6f126abef09cd873d1670d98ff59c04d74 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Wed, 21 May 2025 11:10:20 -0700 Subject: [PATCH 867/892] chore: update renovate.json (#1127) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: update renovate.json Renovate bot should ignore all Github Action workflows. We want tests to use fixed python versions, instead of having them update to latest * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Update owlbot.py * Update renovate.json --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- packages/google-cloud-bigtable/owlbot.py | 2 +- packages/google-cloud-bigtable/renovate.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index f144a2d21457..56573f71eb98 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -95,7 +95,7 @@ def get_staging_dirs( ], ) -s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py"]) +s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py", "renovate.json"]) # ---------------------------------------------------------------------------- diff --git a/packages/google-cloud-bigtable/renovate.json b/packages/google-cloud-bigtable/renovate.json index c7875c469bd5..e2175ba2e887 100644 --- a/packages/google-cloud-bigtable/renovate.json +++ b/packages/google-cloud-bigtable/renovate.json @@ -5,7 +5,7 @@ ":preserveSemverRanges", ":disableDependencyDashboard" ], - "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py", ".github/workflows/unittest.yml"], + "ignorePaths": [".pre-commit-config.yaml", ".kokoro/requirements.txt", "setup.py", ".github/workflows/*"], "pip_requirements": { "fileMatch": ["requirements-test.txt", "samples/[\\S/]*constraints.txt", "samples/[\\S/]*constraints-test.txt"] } From 36c3e84237ca418c7885b84abcc0718d523632f8 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 22 May 2025 10:38:51 -0700 Subject: [PATCH 868/892] feat: support authorized views (#1034) --- .../.cross_sync/transformers.py | 6 +- .../async_data_authorized_view.rst | 11 + .../docs/data_client/async_data_table.rst | 2 +- .../docs/data_client/data_client_usage.rst | 2 + .../data_client/sync_data_authorized_view.rst | 6 + .../google/cloud/bigtable/data/__init__.py | 4 + .../bigtable/data/_async/_mutate_rows.py | 29 +- .../cloud/bigtable/data/_async/_read_rows.py | 22 +- .../cloud/bigtable/data/_async/client.py | 268 +++++++++-- .../bigtable/data/_async/mutations_batcher.py | 20 +- .../bigtable/data/_cross_sync/_decorators.py | 19 +- .../google/cloud/bigtable/data/_helpers.py | 13 +- .../data/_sync_autogen/_mutate_rows.py | 24 +- .../bigtable/data/_sync_autogen/_read_rows.py | 20 +- .../bigtable/data/_sync_autogen/client.py | 212 ++++++-- .../data/_sync_autogen/mutations_batcher.py | 18 +- .../cloud/bigtable/data/read_rows_query.py | 2 +- .../tests/system/data/setup_fixtures.py | 63 +++ .../tests/system/data/test_system_async.py | 332 +++++++------ .../tests/system/data/test_system_autogen.py | 322 +++++++------ .../unit/data/_async/test__mutate_rows.py | 25 +- .../tests/unit/data/_async/test__read_rows.py | 8 +- .../tests/unit/data/_async/test_client.py | 447 ++++++++++++----- .../data/_async/test_mutations_batcher.py | 28 +- .../data/_sync_autogen/test__mutate_rows.py | 22 +- .../data/_sync_autogen/test__read_rows.py | 8 +- .../unit/data/_sync_autogen/test_client.py | 453 ++++++++++++------ .../_sync_autogen/test_mutations_batcher.py | 28 +- .../tests/unit/data/test_sync_up_to_date.py | 6 +- 29 files changed, 1659 insertions(+), 761 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/data_client/async_data_authorized_view.rst create mode 100644 packages/google-cloud-bigtable/docs/data_client/sync_data_authorized_view.rst diff --git a/packages/google-cloud-bigtable/.cross_sync/transformers.py b/packages/google-cloud-bigtable/.cross_sync/transformers.py index ab2d5dd63d56..42ba3f83c4e7 100644 --- a/packages/google-cloud-bigtable/.cross_sync/transformers.py +++ b/packages/google-cloud-bigtable/.cross_sync/transformers.py @@ -81,7 +81,11 @@ def visit_FunctionDef(self, node): def visit_Constant(self, node): """Replace string type annotations""" - node.s = self.replacements.get(node.s, node.s) + try: + node.s = self.replacements.get(node.s, node.s) + except TypeError: + # ignore unhashable types (e.g. list) + pass return node diff --git a/packages/google-cloud-bigtable/docs/data_client/async_data_authorized_view.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_authorized_view.rst new file mode 100644 index 000000000000..7d731297049f --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data_client/async_data_authorized_view.rst @@ -0,0 +1,11 @@ +Authorized View Async +~~~~~~~~~~~~~~~~~~~~~ + + .. note:: + + It is generally not recommended to use the async client in an otherwise synchronous codebase. To make use of asyncio's + performance benefits, the codebase should be designed to be async from the ground up. + +.. autoclass:: google.cloud.bigtable.data._async.client.AuthorizedViewAsync + :members: + :inherited-members: diff --git a/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst b/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst index 3b7973e8eeff..37c396570fba 100644 --- a/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst +++ b/packages/google-cloud-bigtable/docs/data_client/async_data_table.rst @@ -8,4 +8,4 @@ Table Async .. autoclass:: google.cloud.bigtable.data._async.client.TableAsync :members: - :show-inheritance: + :inherited-members: diff --git a/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst b/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst index f5bbac278f7b..708dafc621cd 100644 --- a/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst +++ b/packages/google-cloud-bigtable/docs/data_client/data_client_usage.rst @@ -9,6 +9,7 @@ Sync Surface sync_data_client sync_data_table + sync_data_authorized_view sync_data_mutations_batcher sync_data_execute_query_iterator @@ -20,6 +21,7 @@ Async Surface async_data_client async_data_table + async_data_authorized_view async_data_mutations_batcher async_data_execute_query_iterator diff --git a/packages/google-cloud-bigtable/docs/data_client/sync_data_authorized_view.rst b/packages/google-cloud-bigtable/docs/data_client/sync_data_authorized_view.rst new file mode 100644 index 000000000000..c0ac29721d5d --- /dev/null +++ b/packages/google-cloud-bigtable/docs/data_client/sync_data_authorized_view.rst @@ -0,0 +1,6 @@ +Authorized View +~~~~~~~~~~~~~~~ + +.. autoclass:: google.cloud.bigtable.data._sync_autogen.client.AuthorizedView + :members: + :inherited-members: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py index 15f9bc1675f0..9439f0f8d19b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/__init__.py @@ -17,9 +17,11 @@ from google.cloud.bigtable.data._async.client import BigtableDataClientAsync from google.cloud.bigtable.data._async.client import TableAsync +from google.cloud.bigtable.data._async.client import AuthorizedViewAsync from google.cloud.bigtable.data._async.mutations_batcher import MutationsBatcherAsync from google.cloud.bigtable.data._sync_autogen.client import BigtableDataClient from google.cloud.bigtable.data._sync_autogen.client import Table +from google.cloud.bigtable.data._sync_autogen.client import AuthorizedView from google.cloud.bigtable.data._sync_autogen.mutations_batcher import MutationsBatcher from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery @@ -76,9 +78,11 @@ __all__ = ( "BigtableDataClientAsync", "TableAsync", + "AuthorizedViewAsync", "MutationsBatcherAsync", "BigtableDataClient", "Table", + "AuthorizedView", "MutationsBatcher", "RowKeySamples", "ReadRowsQuery", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py index bf618bf04247..8e6833bcafee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -15,10 +15,10 @@ from __future__ import annotations from typing import Sequence, TYPE_CHECKING -import functools from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries +import google.cloud.bigtable_v2.types.bigtable as types_pb import google.cloud.bigtable.data.exceptions as bt_exceptions from google.cloud.bigtable.data._helpers import _attempt_timeout_generator from google.cloud.bigtable.data._helpers import _retry_exception_factory @@ -36,12 +36,16 @@ from google.cloud.bigtable_v2.services.bigtable.async_client import ( BigtableAsyncClient as GapicClientType, ) - from google.cloud.bigtable.data._async.client import TableAsync as TableType + from google.cloud.bigtable.data._async.client import ( # type: ignore + _DataApiTargetAsync as TargetType, + ) else: from google.cloud.bigtable_v2.services.bigtable.client import ( # type: ignore BigtableClient as GapicClientType, ) - from google.cloud.bigtable.data._sync_autogen.client import Table as TableType # type: ignore + from google.cloud.bigtable.data._sync_autogen.client import ( # type: ignore + _DataApiTarget as TargetType, + ) __CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._mutate_rows" @@ -59,7 +63,7 @@ class _MutateRowsOperationAsync: Args: gapic_client: the client to use for the mutate_rows call - table: the table associated with the request + target: the table or view associated with the request mutation_entries: a list of RowMutationEntry objects to send to the server operation_timeout: the timeout to use for the entire operation, in seconds. attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. @@ -70,7 +74,7 @@ class _MutateRowsOperationAsync: def __init__( self, gapic_client: GapicClientType, - table: TableType, + target: TargetType, mutation_entries: list["RowMutationEntry"], operation_timeout: float, attempt_timeout: float | None, @@ -84,13 +88,8 @@ def __init__( f"{_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across " f"all entries. Found {total_mutations}." ) - # create partial function to pass to trigger rpc call - self._gapic_fn = functools.partial( - gapic_client.mutate_rows, - table_name=table.table_name, - app_profile_id=table.app_profile_id, - retry=None, - ) + self._target = target + self._gapic_fn = gapic_client.mutate_rows # create predicate for determining which errors are retryable self.is_retryable = retries.if_exception_type( # RPC level errors @@ -173,8 +172,12 @@ async def _run_attempt(self): # make gapic request try: result_generator = await self._gapic_fn( + request=types_pb.MutateRowsRequest( + entries=request_entries, + app_profile_id=self._target.app_profile_id, + **self._target._request_path, + ), timeout=next(self.timeout_generator), - entries=request_entries, retry=None, ) async for result_list in result_generator: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py index 6d2fa3a7d717..8787bfa71411 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_read_rows.py @@ -37,9 +37,11 @@ if TYPE_CHECKING: if CrossSync.is_async: - from google.cloud.bigtable.data._async.client import TableAsync as TableType + from google.cloud.bigtable.data._async.client import ( + _DataApiTargetAsync as TargetType, + ) else: - from google.cloud.bigtable.data._sync_autogen.client import Table as TableType # type: ignore + from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget as TargetType # type: ignore __CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._read_rows" @@ -59,7 +61,7 @@ class _ReadRowsOperationAsync: Args: query: The query to execute - table: The table to send the request to + target: The table or view to send the request to operation_timeout: The total time to allow for the operation, in seconds attempt_timeout: The time to allow for each individual attempt, in seconds retryable_exceptions: A list of exceptions that should trigger a retry @@ -69,7 +71,7 @@ class _ReadRowsOperationAsync: "attempt_timeout_gen", "operation_timeout", "request", - "table", + "target", "_predicate", "_last_yielded_row_key", "_remaining_count", @@ -78,7 +80,7 @@ class _ReadRowsOperationAsync: def __init__( self, query: ReadRowsQuery, - table: TableType, + target: TargetType, operation_timeout: float, attempt_timeout: float, retryable_exceptions: Sequence[type[Exception]] = (), @@ -90,12 +92,12 @@ def __init__( if isinstance(query, dict): self.request = ReadRowsRequestPB( **query, - table_name=table.table_name, - app_profile_id=table.app_profile_id, + **target._request_path, + app_profile_id=target.app_profile_id, ) else: - self.request = query._to_pb(table) - self.table = table + self.request = query._to_pb(target) + self.target = target self._predicate = retries.if_exception_type(*retryable_exceptions) self._last_yielded_row_key: bytes | None = None self._remaining_count: int | None = self.request.rows_limit or None @@ -142,7 +144,7 @@ def _read_rows_attempt(self) -> CrossSync.Iterable[Row]: if self._remaining_count == 0: return self.merge_rows(None) # create and return a new row merger - gapic_stream = self.table.client._gapic_client.read_rows( + gapic_stream = self.target.client._gapic_client.read_rows( self.request, timeout=next(self.attempt_timeout_gen), retry=None, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 3c5093d10ad5..6ee21b554608 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -25,6 +25,7 @@ TYPE_CHECKING, ) +import abc import time import warnings import random @@ -47,6 +48,10 @@ DEFAULT_CLIENT_INFO, ) from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest +from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest from google.cloud.client import ClientWithProject from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore from google.api_core import retry as retries @@ -210,8 +215,8 @@ def __init__( self.transport = cast(TransportType, self._gapic_client.transport) # keep track of active instances to for warmup on channel refresh self._active_instances: Set[_WarmedInstanceKey] = set() - # keep track of table objects associated with each instance - # only remove instance from _active_instances when all associated tables remove it + # keep track of _DataApiTarget objects associated with each instance + # only remove instance from _active_instances when all associated targets are closed self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} self._channel_init_time = time.monotonic() self._channel_refresh_task: CrossSync.Task[None] | None = None @@ -320,7 +325,7 @@ async def _ping_and_warm_instances( ], wait_for_ready=True, ) - for (instance_name, table_name, app_profile_id) in instance_list + for (instance_name, app_profile_id) in instance_list ] result_list = await CrossSync.gather_partials( partial_list, return_exceptions=True, sync_executor=self._executor @@ -404,10 +409,13 @@ async def _manage_channel( replace_symbols={ "TableAsync": "Table", "ExecuteQueryIteratorAsync": "ExecuteQueryIterator", + "_DataApiTargetAsync": "_DataApiTarget", } ) async def _register_instance( - self, instance_id: str, owner: TableAsync | ExecuteQueryIteratorAsync + self, + instance_id: str, + owner: _DataApiTargetAsync | ExecuteQueryIteratorAsync, ) -> None: """ Registers an instance with the client, and warms the channel for the instance @@ -422,9 +430,7 @@ async def _register_instance( owners call _remove_instance_registration """ instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey( - instance_name, owner.table_name, owner.app_profile_id - ) + instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) self._instance_owners.setdefault(instance_key, set()).add(id(owner)) if instance_key not in self._active_instances: self._active_instances.add(instance_key) @@ -440,10 +446,13 @@ async def _register_instance( replace_symbols={ "TableAsync": "Table", "ExecuteQueryIteratorAsync": "ExecuteQueryIterator", + "_DataApiTargetAsync": "_DataApiTarget", } ) async def _remove_instance_registration( - self, instance_id: str, owner: TableAsync | "ExecuteQueryIteratorAsync" + self, + instance_id: str, + owner: _DataApiTargetAsync | ExecuteQueryIteratorAsync, ) -> bool: """ Removes an instance from the client's registered instances, to prevent @@ -460,9 +469,7 @@ async def _remove_instance_registration( bool: True if instance was removed, else False """ instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey( - instance_name, owner.table_name, owner.app_profile_id - ) + instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) owner_list = self._instance_owners.get(instance_key, set()) try: owner_list.remove(id(owner)) @@ -528,6 +535,72 @@ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> TableAs """ return TableAsync(self, instance_id, table_id, *args, **kwargs) + @CrossSync.convert( + replace_symbols={"AuthorizedViewAsync": "AuthorizedView"}, + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + }, + ) + def get_authorized_view( + self, instance_id: str, table_id: str, authorized_view_id: str, *args, **kwargs + ) -> AuthorizedViewAsync: + """ + Returns an authorized view instance for making data API requests. All arguments are passed + directly to the AuthorizedViewAsync constructor. + + {LOOP_MESSAGE} + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to Table's value + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults Table's value + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to Table's value + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults Table's value + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to Table's value + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to Table's value + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. If not set, + defaults to Table's value + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. If not set, + defaults to Table's value + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. If not set, defaults to + Table's value + Returns: + AuthorizedViewAsync: a table instance for making data API requests + Raises: + {RAISE_NO_LOOP} + """ + return CrossSync.AuthorizedView( + self, + instance_id, + table_id, + authorized_view_id, + *args, + **kwargs, + ) + @CrossSync.convert( replace_symbols={"ExecuteQueryIteratorAsync": "ExecuteQueryIterator"} ) @@ -679,13 +752,12 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): await self._gapic_client.__aexit__(exc_type, exc_val, exc_tb) -@CrossSync.convert_class(sync_name="Table", add_mapping_for_name="Table") -class TableAsync: +@CrossSync.convert_class(sync_name="_DataApiTarget") +class _DataApiTargetAsync(abc.ABC): """ - Main Data API surface + Abstract class containing API surface for BigtableDataClient. Should not be created directly - Table object maintains table_id, and app_profile_id context, and passes them with - each call + Can be instantiated as a Table or an AuthorizedView """ @CrossSync.convert( @@ -809,6 +881,7 @@ def __init__( default_mutate_rows_retryable_errors or () ) self.default_retryable_errors = default_retryable_errors or () + try: self._register_instance_future = CrossSync.create_task( self.client._register_instance, @@ -821,6 +894,20 @@ def __init__( f"{self.__class__.__name__} must be created within an async event loop context." ) from e + @property + @abc.abstractmethod + def _request_path(self) -> dict[str, str]: + """ + Used to populate table_name or authorized_view_name for rpc requests, depending on the subclass + + Unimplemented in base class + """ + raise NotImplementedError + + def __str__(self): + path_str = list(self._request_path.values())[0] if self._request_path else "" + return f"{self.__class__.__name__}<{path_str!r}>" + @CrossSync.convert(replace_symbols={"AsyncIterable": "Iterable"}) async def read_rows_stream( self, @@ -1177,8 +1264,9 @@ async def sample_row_keys( @CrossSync.convert async def execute_rpc(): results = await self.client._gapic_client.sample_row_keys( - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=SampleRowKeysRequest( + app_profile_id=self.app_profile_id, **self._request_path + ), timeout=next(attempt_timeout_gen), retry=None, ) @@ -1305,10 +1393,14 @@ async def mutate_row( target = partial( self.client._gapic_client.mutate_row, - row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, - mutations=[mutation._to_pb() for mutation in mutations_list], - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=MutateRowRequest( + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + mutations=[mutation._to_pb() for mutation in mutations_list], + app_profile_id=self.app_profile_id, + **self._request_path, + ), timeout=attempt_timeout, retry=None, ) @@ -1430,12 +1522,16 @@ async def check_and_mutate_row( false_case_mutations = [false_case_mutations] false_case_list = [m._to_pb() for m in false_case_mutations or []] result = await self.client._gapic_client.check_and_mutate_row( - true_mutations=true_case_list, - false_mutations=false_case_list, - predicate_filter=predicate._to_pb() if predicate is not None else None, - row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=CheckAndMutateRowRequest( + true_mutations=true_case_list, + false_mutations=false_case_list, + predicate_filter=predicate._to_pb() if predicate is not None else None, + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), timeout=operation_timeout, retry=None, ) @@ -1480,10 +1576,14 @@ async def read_modify_write_row( if not rules: raise ValueError("rules must contain at least one item") result = await self.client._gapic_client.read_modify_write_row( - rules=[rule._to_pb() for rule in rules], - row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=ReadModifyWriteRowRequest( + rules=[rule._to_pb() for rule in rules], + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), timeout=operation_timeout, retry=None, ) @@ -1520,3 +1620,107 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): grpc channels will no longer be warmed """ await self.close() + + +@CrossSync.convert_class( + sync_name="Table", + add_mapping_for_name="Table", + replace_symbols={"_DataApiTargetAsync": "_DataApiTarget"}, +) +class TableAsync(_DataApiTargetAsync): + """ + Main Data API surface for interacting with a Bigtable table. + + Table object maintains table_id, and app_profile_id context, and passes them with + each call + """ + + @property + def _request_path(self) -> dict[str, str]: + return {"table_name": self.table_name} + + +@CrossSync.convert_class( + sync_name="AuthorizedView", + add_mapping_for_name="AuthorizedView", + replace_symbols={"_DataApiTargetAsync": "_DataApiTarget"}, +) +class AuthorizedViewAsync(_DataApiTargetAsync): + """ + Provides access to an authorized view of a table. + + An authorized view is a subset of a table that you configure to include specific table data. + Then you grant access to the authorized view separately from access to the table. + + AuthorizedView object maintains table_id, app_profile_id, and authorized_view_id context, + and passed them with each call + """ + + @CrossSync.convert( + docstring_format_vars={ + "LOOP_MESSAGE": ( + "Must be created within an async context (running event loop)", + "", + ), + "RAISE_NO_LOOP": ( + "RuntimeError: if called outside of an async context (no running event loop)", + "None", + ), + } + ) + def __init__( + self, + client, + instance_id, + table_id, + authorized_view_id, + app_profile_id: str | None = None, + **kwargs, + ): + """ + Initialize an AuthorizedView instance + + {LOOP_MESSAGE} + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + {RAISE_NO_LOOP} + """ + super().__init__(client, instance_id, table_id, app_profile_id, **kwargs) + self.authorized_view_id = authorized_view_id + self.authorized_view_name: str = self.client._gapic_client.authorized_view_path( + self.client.project, instance_id, table_id, authorized_view_id + ) + + @property + def _request_path(self) -> dict[str, str]: + return {"authorized_view_name": self.authorized_view_name} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py index 6e15bb5f33fb..a8e99ea9e91b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/mutations_batcher.py @@ -37,9 +37,11 @@ from google.cloud.bigtable.data.mutations import RowMutationEntry if CrossSync.is_async: - from google.cloud.bigtable.data._async.client import TableAsync as TableType + from google.cloud.bigtable.data._async.client import ( + _DataApiTargetAsync as TargetType, + ) else: - from google.cloud.bigtable.data._sync_autogen.client import Table as TableType # type: ignore + from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget as TargetType # type: ignore __CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.mutations_batcher" @@ -179,7 +181,7 @@ async def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry] @CrossSync.convert_class(sync_name="MutationsBatcher") class MutationsBatcherAsync: """ - Allows users to send batches using context manager API: + Allows users to send batches using context manager API. Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining to use as few network requests as required @@ -191,7 +193,7 @@ class MutationsBatcherAsync: - when batcher is closed or destroyed Args: - table: Table to preform rpc calls + table: table or autrhorized_view used to preform rpc calls flush_interval: Automatically flush every flush_interval seconds. If None, no time-based flushing is performed. flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count @@ -210,7 +212,7 @@ class MutationsBatcherAsync: def __init__( self, - table: TableType, + table: TargetType, *, flush_interval: float | None = 5, flush_limit_mutation_count: int | None = 1000, @@ -230,7 +232,7 @@ def __init__( ) self._closed = CrossSync.Event() - self._table = table + self._target = table self._staged_entries: list[RowMutationEntry] = [] self._staged_count, self._staged_bytes = 0, 0 self._flow_control = CrossSync._FlowControl( @@ -380,8 +382,8 @@ async def _execute_mutate_rows( """ try: operation = CrossSync._MutateRowsOperation( - self._table.client._gapic_client, - self._table, + self._target.client._gapic_client, + self._target, batch, operation_timeout=self._operation_timeout, attempt_timeout=self._attempt_timeout, @@ -491,7 +493,7 @@ def _on_exit(self): """ if not self._closed.is_set() and self._staged_entries: warnings.warn( - f"MutationsBatcher for table {self._table.table_name} was not closed. " + f"MutationsBatcher for target {self._target!r} was not closed. " f"{len(self._staged_entries)} Unflushed mutations will not be sent to the server." ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py index ea86e83af8d7..a0dd140dd01d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_cross_sync/_decorators.py @@ -179,7 +179,8 @@ def _convert_ast_to_py(cls, ast_node: ast.expr | None) -> Any: cls._convert_ast_to_py(k): cls._convert_ast_to_py(v) for k, v in zip(ast_node.keys, ast_node.values) } - raise ValueError(f"Unsupported type {type(ast_node)}") + # unsupported node type + return ast_node class ConvertClass(AstDecorator): @@ -421,6 +422,15 @@ def sync_ast_transform(self, wrapped_node, transformers_globals): import ast import copy + arg_nodes = [ + a if isinstance(a, ast.expr) else ast.Constant(value=a) for a in self._args + ] + kwarg_nodes = [] + for k, v in self._kwargs.items(): + if not isinstance(v, ast.expr): + v = ast.Constant(value=v) + kwarg_nodes.append(ast.keyword(arg=k, value=v)) + new_node = copy.deepcopy(wrapped_node) if not hasattr(new_node, "decorator_list"): new_node.decorator_list = [] @@ -431,11 +441,8 @@ def sync_ast_transform(self, wrapped_node, transformers_globals): attr="fixture", ctx=ast.Load(), ), - args=[ast.Constant(value=a) for a in self._args], - keywords=[ - ast.keyword(arg=k, value=ast.Constant(value=v)) - for k, v in self._kwargs.items() - ], + args=arg_nodes, + keywords=kwarg_nodes, ) ) return new_node diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py index a70ebfb6d98e..424a344860e3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_helpers.py @@ -28,8 +28,8 @@ if TYPE_CHECKING: import grpc - from google.cloud.bigtable.data import TableAsync - from google.cloud.bigtable.data import Table + from google.cloud.bigtable.data._async.client import _DataApiTargetAsync + from google.cloud.bigtable.data._sync_autogen.client import _DataApiTarget """ Helper functions used in various places in the library. @@ -44,9 +44,10 @@ # used by read_rows_sharded to limit how many requests are attempted in parallel _CONCURRENCY_LIMIT = 10 -# used to register instance data with the client for channel warming +# used to identify an active bigtable resource that needs to be warmed through PingAndWarm +# each instance/app_profile_id pair needs to be individually tracked _WarmedInstanceKey = namedtuple( - "_WarmedInstanceKey", ["instance_name", "table_name", "app_profile_id"] + "_WarmedInstanceKey", ["instance_name", "app_profile_id"] ) @@ -121,7 +122,7 @@ def _retry_exception_factory( def _get_timeouts( operation: float | TABLE_DEFAULT, attempt: float | None | TABLE_DEFAULT, - table: "TableAsync" | "Table", + table: "_DataApiTargetAsync" | "_DataApiTarget", ) -> tuple[float, float]: """ Convert passed in timeout values to floats, using table defaults if necessary. @@ -226,7 +227,7 @@ def _get_error_type( def _get_retryable_errors( call_codes: Sequence["grpc.StatusCode" | int | type[Exception]] | TABLE_DEFAULT, - table: "TableAsync" | "Table", + table: "_DataApiTargetAsync" | "_DataApiTarget", ) -> list[type[Exception]]: """ Convert passed in retryable error codes to a list of exception types. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py index 8e8c5ca89dde..3bf7b562f1db 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_mutate_rows.py @@ -17,9 +17,9 @@ from __future__ import annotations from typing import Sequence, TYPE_CHECKING -import functools from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries +import google.cloud.bigtable_v2.types.bigtable as types_pb import google.cloud.bigtable.data.exceptions as bt_exceptions from google.cloud.bigtable.data._helpers import _attempt_timeout_generator from google.cloud.bigtable.data._helpers import _retry_exception_factory @@ -32,7 +32,9 @@ from google.cloud.bigtable_v2.services.bigtable.client import ( BigtableClient as GapicClientType, ) - from google.cloud.bigtable.data._sync_autogen.client import Table as TableType + from google.cloud.bigtable.data._sync_autogen.client import ( + _DataApiTarget as TargetType, + ) class _MutateRowsOperation: @@ -47,7 +49,7 @@ class _MutateRowsOperation: Args: gapic_client: the client to use for the mutate_rows call - table: the table associated with the request + target: the table or view associated with the request mutation_entries: a list of RowMutationEntry objects to send to the server operation_timeout: the timeout to use for the entire operation, in seconds. attempt_timeout: the timeout to use for each mutate_rows attempt, in seconds. @@ -57,7 +59,7 @@ class _MutateRowsOperation: def __init__( self, gapic_client: GapicClientType, - table: TableType, + target: TargetType, mutation_entries: list["RowMutationEntry"], operation_timeout: float, attempt_timeout: float | None, @@ -68,12 +70,8 @@ def __init__( raise ValueError( f"mutate_rows requests can contain at most {_MUTATE_ROWS_REQUEST_MUTATION_LIMIT} mutations across all entries. Found {total_mutations}." ) - self._gapic_fn = functools.partial( - gapic_client.mutate_rows, - table_name=table.table_name, - app_profile_id=table.app_profile_id, - retry=None, - ) + self._target = target + self._gapic_fn = gapic_client.mutate_rows self.is_retryable = retries.if_exception_type( *retryable_exceptions, bt_exceptions._MutateRowsIncomplete ) @@ -140,8 +138,12 @@ def _run_attempt(self): return try: result_generator = self._gapic_fn( + request=types_pb.MutateRowsRequest( + entries=request_entries, + app_profile_id=self._target.app_profile_id, + **self._target._request_path, + ), timeout=next(self.timeout_generator), - entries=request_entries, retry=None, ) for result_list in result_generator: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py index 92619c6a4740..3593475a98d2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_read_rows.py @@ -34,7 +34,9 @@ from google.cloud.bigtable.data._cross_sync import CrossSync if TYPE_CHECKING: - from google.cloud.bigtable.data._sync_autogen.client import Table as TableType + from google.cloud.bigtable.data._sync_autogen.client import ( + _DataApiTarget as TargetType, + ) class _ReadRowsOperation: @@ -51,7 +53,7 @@ class _ReadRowsOperation: Args: query: The query to execute - table: The table to send the request to + target: The table or view to send the request to operation_timeout: The total time to allow for the operation, in seconds attempt_timeout: The time to allow for each individual attempt, in seconds retryable_exceptions: A list of exceptions that should trigger a retry @@ -61,7 +63,7 @@ class _ReadRowsOperation: "attempt_timeout_gen", "operation_timeout", "request", - "table", + "target", "_predicate", "_last_yielded_row_key", "_remaining_count", @@ -70,7 +72,7 @@ class _ReadRowsOperation: def __init__( self, query: ReadRowsQuery, - table: TableType, + target: TargetType, operation_timeout: float, attempt_timeout: float, retryable_exceptions: Sequence[type[Exception]] = (), @@ -81,13 +83,11 @@ def __init__( self.operation_timeout = operation_timeout if isinstance(query, dict): self.request = ReadRowsRequestPB( - **query, - table_name=table.table_name, - app_profile_id=table.app_profile_id, + **query, **target._request_path, app_profile_id=target.app_profile_id ) else: - self.request = query._to_pb(table) - self.table = table + self.request = query._to_pb(target) + self.target = target self._predicate = retries.if_exception_type(*retryable_exceptions) self._last_yielded_row_key: bytes | None = None self._remaining_count: int | None = self.request.rows_limit or None @@ -125,7 +125,7 @@ def _read_rows_attempt(self) -> CrossSync._Sync_Impl.Iterable[Row]: self.request.rows_limit = self._remaining_count if self._remaining_count == 0: return self.merge_rows(None) - gapic_stream = self.table.client._gapic_client.read_rows( + gapic_stream = self.target.client._gapic_client.read_rows( self.request, timeout=next(self.attempt_timeout_gen), retry=None ) chunked_stream = self.chunk_stream(gapic_stream) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index 5e21c1f518a2..b36bf359a952 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -18,6 +18,7 @@ from __future__ import annotations from typing import cast, Any, Optional, Set, Sequence, TYPE_CHECKING +import abc import time import warnings import random @@ -38,6 +39,10 @@ DEFAULT_CLIENT_INFO, ) from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest +from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest from google.cloud.client import ClientWithProject from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.api_core import retry as retries @@ -243,7 +248,7 @@ def _ping_and_warm_instances( ], wait_for_ready=True, ) - for (instance_name, table_name, app_profile_id) in instance_list + for (instance_name, app_profile_id) in instance_list ] result_list = CrossSync._Sync_Impl.gather_partials( partial_list, return_exceptions=True, sync_executor=self._executor @@ -300,7 +305,7 @@ def _manage_channel( next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0) def _register_instance( - self, instance_id: str, owner: Table | ExecuteQueryIterator + self, instance_id: str, owner: _DataApiTarget | ExecuteQueryIterator ) -> None: """Registers an instance with the client, and warms the channel for the instance The client will periodically refresh grpc channel used to make @@ -313,9 +318,7 @@ def _register_instance( _instance_owners, and instances will only be unregistered when all owners call _remove_instance_registration""" instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey( - instance_name, owner.table_name, owner.app_profile_id - ) + instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) self._instance_owners.setdefault(instance_key, set()).add(id(owner)) if instance_key not in self._active_instances: self._active_instances.add(instance_key) @@ -325,7 +328,7 @@ def _register_instance( self._start_background_channel_refresh() def _remove_instance_registration( - self, instance_id: str, owner: Table | "ExecuteQueryIterator" + self, instance_id: str, owner: _DataApiTarget | ExecuteQueryIterator ) -> bool: """Removes an instance from the client's registered instances, to prevent warming new channels for the instance @@ -340,9 +343,7 @@ def _remove_instance_registration( Returns: bool: True if instance was removed, else False""" instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey( - instance_name, owner.table_name, owner.app_profile_id - ) + instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) owner_list = self._instance_owners.get(instance_key, set()) try: owner_list.remove(id(owner)) @@ -393,6 +394,52 @@ def get_table(self, instance_id: str, table_id: str, *args, **kwargs) -> Table: None""" return Table(self, instance_id, table_id, *args, **kwargs) + def get_authorized_view( + self, instance_id: str, table_id: str, authorized_view_id: str, *args, **kwargs + ) -> AuthorizedView: + """Returns an authorized view instance for making data API requests. All arguments are passed + directly to the AuthorizedView constructor. + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to Table's value + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults Table's value + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to Table's value + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults Table's value + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to Table's value + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to Table's value + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. If not set, + defaults to Table's value + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. If not set, + defaults to Table's value + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. If not set, defaults to + Table's value + Returns: + AuthorizedView: a table instance for making data API requests + Raises: + None""" + return CrossSync._Sync_Impl.AuthorizedView( + self, instance_id, table_id, authorized_view_id, *args, **kwargs + ) + def execute_query( self, query: str, @@ -532,13 +579,11 @@ def __exit__(self, exc_type, exc_val, exc_tb): self._gapic_client.__exit__(exc_type, exc_val, exc_tb) -@CrossSync._Sync_Impl.add_mapping_decorator("Table") -class Table: +class _DataApiTarget(abc.ABC): """ - Main Data API surface + Abstract class containing API surface for BigtableDataClient. Should not be created directly - Table object maintains table_id, and app_profile_id context, and passes them with - each call + Can be instantiated as a Table or an AuthorizedView """ def __init__( @@ -653,6 +698,18 @@ def __init__( f"{self.__class__.__name__} must be created within an async event loop context." ) from e + @property + @abc.abstractmethod + def _request_path(self) -> dict[str, str]: + """Used to populate table_name or authorized_view_name for rpc requests, depending on the subclass + + Unimplemented in base class""" + raise NotImplementedError + + def __str__(self): + path_str = list(self._request_path.values())[0] if self._request_path else "" + return f"{self.__class__.__name__}<{path_str!r}>" + def read_rows_stream( self, query: ReadRowsQuery, @@ -979,8 +1036,9 @@ def sample_row_keys( def execute_rpc(): results = self.client._gapic_client.sample_row_keys( - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=SampleRowKeysRequest( + app_profile_id=self.app_profile_id, **self._request_path + ), timeout=next(attempt_timeout_gen), retry=None, ) @@ -1096,10 +1154,14 @@ def mutate_row( sleep_generator = retries.exponential_sleep_generator(0.01, 2, 60) target = partial( self.client._gapic_client.mutate_row, - row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, - mutations=[mutation._to_pb() for mutation in mutations_list], - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=MutateRowRequest( + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + mutations=[mutation._to_pb() for mutation in mutations_list], + app_profile_id=self.app_profile_id, + **self._request_path, + ), timeout=attempt_timeout, retry=None, ) @@ -1214,12 +1276,16 @@ def check_and_mutate_row( false_case_mutations = [false_case_mutations] false_case_list = [m._to_pb() for m in false_case_mutations or []] result = self.client._gapic_client.check_and_mutate_row( - true_mutations=true_case_list, - false_mutations=false_case_list, - predicate_filter=predicate._to_pb() if predicate is not None else None, - row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=CheckAndMutateRowRequest( + true_mutations=true_case_list, + false_mutations=false_case_list, + predicate_filter=predicate._to_pb() if predicate is not None else None, + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), timeout=operation_timeout, retry=None, ) @@ -1261,10 +1327,14 @@ def read_modify_write_row( if not rules: raise ValueError("rules must contain at least one item") result = self.client._gapic_client.read_modify_write_row( - rules=[rule._to_pb() for rule in rules], - row_key=row_key.encode("utf-8") if isinstance(row_key, str) else row_key, - table_name=self.table_name, - app_profile_id=self.app_profile_id, + request=ReadModifyWriteRowRequest( + rules=[rule._to_pb() for rule in rules], + row_key=row_key.encode("utf-8") + if isinstance(row_key, str) + else row_key, + app_profile_id=self.app_profile_id, + **self._request_path, + ), timeout=operation_timeout, retry=None, ) @@ -1291,3 +1361,85 @@ def __exit__(self, exc_type, exc_val, exc_tb): Unregister this instance with the client, so that grpc channels will no longer be warmed""" self.close() + + +@CrossSync._Sync_Impl.add_mapping_decorator("Table") +class Table(_DataApiTarget): + """ + Main Data API surface for interacting with a Bigtable table. + + Table object maintains table_id, and app_profile_id context, and passes them with + each call + """ + + @property + def _request_path(self) -> dict[str, str]: + return {"table_name": self.table_name} + + +@CrossSync._Sync_Impl.add_mapping_decorator("AuthorizedView") +class AuthorizedView(_DataApiTarget): + """ + Provides access to an authorized view of a table. + + An authorized view is a subset of a table that you configure to include specific table data. + Then you grant access to the authorized view separately from access to the table. + + AuthorizedView object maintains table_id, app_profile_id, and authorized_view_id context, + and passed them with each call + """ + + def __init__( + self, + client, + instance_id, + table_id, + authorized_view_id, + app_profile_id: str | None = None, + **kwargs, + ): + """Initialize an AuthorizedView instance + + + + Args: + instance_id: The Bigtable instance ID to associate with this client. + instance_id is combined with the client's project to fully + specify the instance + table_id: The ID of the table. table_id is combined with the + instance_id and the client's project to fully specify the table + authorized_view_id: The id for the authorized view to use for requests + app_profile_id: The app profile to associate with requests. + https://cloud.google.com/bigtable/docs/app-profiles + default_read_rows_operation_timeout: The default timeout for read rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_read_rows_attempt_timeout: The default timeout for individual + read rows rpc requests, in seconds. If not set, defaults to 20 seconds + default_mutate_rows_operation_timeout: The default timeout for mutate rows + operations, in seconds. If not set, defaults to 600 seconds (10 minutes) + default_mutate_rows_attempt_timeout: The default timeout for individual + mutate rows rpc requests, in seconds. If not set, defaults to 60 seconds + default_operation_timeout: The default timeout for all other operations, in + seconds. If not set, defaults to 60 seconds + default_attempt_timeout: The default timeout for all other individual rpc + requests, in seconds. If not set, defaults to 20 seconds + default_read_rows_retryable_errors: a list of errors that will be retried + if encountered during read_rows and related operations. + Defaults to 4 (DeadlineExceeded), 14 (ServiceUnavailable), and 10 (Aborted) + default_mutate_rows_retryable_errors: a list of errors that will be retried + if encountered during mutate_rows and related operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + default_retryable_errors: a list of errors that will be retried if + encountered during all other operations. + Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + Raises: + None""" + super().__init__(client, instance_id, table_id, app_profile_id, **kwargs) + self.authorized_view_id = authorized_view_id + self.authorized_view_name: str = self.client._gapic_client.authorized_view_path( + self.client.project, instance_id, table_id, authorized_view_id + ) + + @property + def _request_path(self) -> dict[str, str]: + return {"authorized_view_name": self.authorized_view_name} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py index 2e4237b741a4..84f0ba8c0618 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/mutations_batcher.py @@ -32,7 +32,9 @@ if TYPE_CHECKING: from google.cloud.bigtable.data.mutations import RowMutationEntry - from google.cloud.bigtable.data._sync_autogen.client import Table as TableType + from google.cloud.bigtable.data._sync_autogen.client import ( + _DataApiTarget as TargetType, + ) _MB_SIZE = 1024 * 1024 @@ -148,7 +150,7 @@ def add_to_flow(self, mutations: RowMutationEntry | list[RowMutationEntry]): class MutationsBatcher: """ - Allows users to send batches using context manager API: + Allows users to send batches using context manager API. Runs mutate_row, mutate_rows, and check_and_mutate_row internally, combining to use as few network requests as required @@ -160,7 +162,7 @@ class MutationsBatcher: - when batcher is closed or destroyed Args: - table: Table to preform rpc calls + table: table or autrhorized_view used to preform rpc calls flush_interval: Automatically flush every flush_interval seconds. If None, no time-based flushing is performed. flush_limit_mutation_count: Flush immediately after flush_limit_mutation_count @@ -179,7 +181,7 @@ class MutationsBatcher: def __init__( self, - table: TableType, + table: TargetType, *, flush_interval: float | None = 5, flush_limit_mutation_count: int | None = 1000, @@ -198,7 +200,7 @@ def __init__( batch_retryable_errors, table ) self._closed = CrossSync._Sync_Impl.Event() - self._table = table + self._target = table self._staged_entries: list[RowMutationEntry] = [] (self._staged_count, self._staged_bytes) = (0, 0) self._flow_control = CrossSync._Sync_Impl._FlowControl( @@ -324,8 +326,8 @@ def _execute_mutate_rows( FailedMutationEntryError objects will not contain index information""" try: operation = CrossSync._Sync_Impl._MutateRowsOperation( - self._table.client._gapic_client, - self._table, + self._target.client._gapic_client, + self._target, batch, operation_timeout=self._operation_timeout, attempt_timeout=self._attempt_timeout, @@ -414,7 +416,7 @@ def _on_exit(self): """Called when program is exited. Raises warning if unflushed mutations remain""" if not self._closed.is_set() and self._staged_entries: warnings.warn( - f"MutationsBatcher for table {self._table.table_name} was not closed. {len(self._staged_entries)} Unflushed mutations will not be sent to the server." + f"MutationsBatcher for target {self._target!r} was not closed. {len(self._staged_entries)} Unflushed mutations will not be sent to the server." ) @staticmethod diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py index e0839a2af7be..7652bfbb9af7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/read_rows_query.py @@ -489,11 +489,11 @@ def _to_pb(self, table) -> ReadRowsRequestPB: ReadRowsRequest protobuf """ return ReadRowsRequestPB( - table_name=table.table_name, app_profile_id=table.app_profile_id, filter=self.filter._to_pb() if self.filter else None, rows_limit=self.limit or 0, rows=self._row_set, + **table._request_path, ) def __eq__(self, other): diff --git a/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py index 3b5a0af0681c..a77ffc008b10 100644 --- a/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py +++ b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py @@ -20,6 +20,12 @@ import os import uuid +from . import TEST_FAMILY, TEST_FAMILY_2 + +# authorized view subset to allow all qualifiers +ALLOW_ALL = "" +ALL_QUALIFIERS = {"qualifier_prefixes": [ALLOW_ALL]} + @pytest.fixture(scope="session") def admin_client(): @@ -140,6 +146,63 @@ def table_id( print(f"Table {init_table_id} not found, skipping deletion") +@pytest.fixture(scope="session") +def authorized_view_id( + admin_client, + project_id, + instance_id, + table_id, +): + """ + Creates and returns a new temporary authorized view for the test session + + Args: + - admin_client: Client for interacting with the Table Admin API. Supplied by the admin_client fixture. + - project_id: The project ID of the GCP project to test against. Supplied by the project_id fixture. + - instance_id: The ID of the Bigtable instance to test against. Supplied by the instance_id fixture. + - table_id: The ID of the table to create the authorized view for. Supplied by the table_id fixture. + """ + from google.api_core import exceptions + from google.api_core import retry + + retry = retry.Retry( + predicate=retry.if_exception_type(exceptions.FailedPrecondition) + ) + new_view_id = uuid.uuid4().hex[:8] + parent_path = f"projects/{project_id}/instances/{instance_id}/tables/{table_id}" + new_path = f"{parent_path}/authorizedViews/{new_view_id}" + try: + print(f"Creating view: {new_path}") + admin_client.table_admin_client.create_authorized_view( + request={ + "parent": parent_path, + "authorized_view_id": new_view_id, + "authorized_view": { + "subset_view": { + "row_prefixes": [ALLOW_ALL], + "family_subsets": { + TEST_FAMILY: ALL_QUALIFIERS, + TEST_FAMILY_2: ALL_QUALIFIERS, + }, + }, + }, + }, + retry=retry, + ) + except exceptions.AlreadyExists: + pass + except exceptions.MethodNotImplemented: + # will occur when run in emulator. Pass empty id + new_view_id = None + yield new_view_id + if new_view_id: + print(f"Deleting view: {new_path}") + try: + admin_client.table_admin_client.delete_authorized_view(name=new_path) + except exceptions.NotFound: + print(f"View {new_view_id} not found, skipping deletion") + + @pytest.fixture(scope="session") def project_id(client): """Returns the project ID from the client.""" diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index d45c7c16eb10..b59131414a52 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -18,7 +18,7 @@ import uuid import os from google.api_core import retry -from google.api_core.exceptions import ClientError +from google.api_core.exceptions import ClientError, PermissionDenied from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE @@ -33,6 +33,12 @@ __CROSS_SYNC_OUTPUT__ = "tests.system.data.test_system_autogen" +TARGETS = ["table"] +if not os.environ.get(BIGTABLE_EMULATOR): + # emulator doesn't support authorized views + TARGETS.append("authorized_view") + + @CrossSync.convert_class( sync_name="TempRowBuilder", add_mapping_for_name="TempRowBuilder", @@ -42,9 +48,9 @@ class TempRowBuilderAsync: Used to add rows to a table for testing purposes. """ - def __init__(self, table): + def __init__(self, target): self.rows = [] - self.table = table + self.target = target @CrossSync.convert async def add_row( @@ -55,7 +61,7 @@ async def add_row( elif isinstance(value, int): value = value.to_bytes(8, byteorder="big", signed=True) request = { - "table_name": self.table.table_name, + "table_name": self.target.table_name, "row_key": row_key, "mutations": [ { @@ -67,20 +73,20 @@ async def add_row( } ], } - await self.table.client._gapic_client.mutate_row(request) + await self.target.client._gapic_client.mutate_row(request) self.rows.append(row_key) @CrossSync.convert async def delete_rows(self): if self.rows: request = { - "table_name": self.table.table_name, + "table_name": self.target.table_name, "entries": [ {"row_key": row, "mutations": [{"delete_from_row": {}}]} for row in self.rows ], } - await self.table.client._gapic_client.mutate_rows(request) + await self.target.client._gapic_client.mutate_rows(request) @CrossSync.convert_class(sync_name="TestSystem") @@ -93,10 +99,23 @@ async def client(self): yield client @CrossSync.convert - @CrossSync.pytest_fixture(scope="session") - async def table(self, client, table_id, instance_id): - async with client.get_table(instance_id, table_id) as table: - yield table + @CrossSync.pytest_fixture(scope="session", params=TARGETS) + async def target(self, client, table_id, authorized_view_id, instance_id, request): + """ + This fixture runs twice: once for a standard table, and once with an authorized view + + Note: emulator doesn't support authorized views. Only use target + """ + if request.param == "table": + async with client.get_table(instance_id, table_id) as table: + yield table + elif request.param == "authorized_view": + async with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + yield view + else: + raise ValueError(f"unknown target type: {request.param}") @CrossSync.drop @pytest.fixture(scope="session") @@ -138,14 +157,14 @@ def cluster_config(self, project_id): return cluster @CrossSync.convert - @pytest.mark.usefixtures("table") - async def _retrieve_cell_value(self, table, row_key): + @pytest.mark.usefixtures("target") + async def _retrieve_cell_value(self, target, row_key): """ Helper to read an individual row """ from google.cloud.bigtable.data import ReadRowsQuery - row_list = await table.read_rows(ReadRowsQuery(row_keys=row_key)) + row_list = await target.read_rows(ReadRowsQuery(row_keys=row_key)) assert len(row_list) == 1 row = row_list[0] cell = row.cells[0] @@ -174,32 +193,32 @@ async def _create_row_and_mutation( @CrossSync.convert @CrossSync.pytest_fixture(scope="function") - async def temp_rows(self, table): - builder = CrossSync.TempRowBuilder(table) + async def temp_rows(self, target): + builder = CrossSync.TempRowBuilder(target) yield builder await builder.delete_rows() - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.usefixtures("client") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=10 ) @CrossSync.pytest - async def test_ping_and_warm_gapic(self, client, table): + async def test_ping_and_warm_gapic(self, client, target): """ Simple ping rpc test This test ensures channels are able to authenticate with backend """ - request = {"name": table.instance_name} + request = {"name": target.instance_name} await client._gapic_client.ping_and_warm(request) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.usefixtures("client") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_ping_and_warm(self, client, table): + async def test_ping_and_warm(self, client, target): """ Test ping and warm from handwritten client """ @@ -244,41 +263,43 @@ async def test_channel_refresh(self, table_id, instance_id, temp_rows): await client.close() @CrossSync.pytest - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - async def test_mutation_set_cell(self, table, temp_rows): + async def test_mutation_set_cell(self, target, temp_rows): """ Ensure cells can be set properly """ row_key = b"bulk_mutate" new_value = uuid.uuid4().hex.encode() row_key, mutation = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) - await table.mutate_row(row_key, mutation) + await target.mutate_row(row_key, mutation) # ensure cell is updated - assert (await self._retrieve_cell_value(table, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key)) == new_value @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" ) @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_sample_row_keys(self, client, table, temp_rows, column_split_config): + async def test_sample_row_keys( + self, client, target, temp_rows, column_split_config + ): """ - Sample keys should return a single sample in small test tables + Sample keys should return a single sample in small test targets """ await temp_rows.add_row(b"row_key_1") await temp_rows.add_row(b"row_key_2") - results = await table.sample_row_keys() + results = await target.sample_row_keys() assert len(results) == len(column_split_config) + 1 # first keys should match the split config for idx in range(len(column_split_config)): @@ -289,9 +310,9 @@ async def test_sample_row_keys(self, client, table, temp_rows, column_split_conf assert isinstance(results[-1][1], int) @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_bulk_mutations_set_cell(self, client, table, temp_rows): + async def test_bulk_mutations_set_cell(self, client, target, temp_rows): """ Ensure cells can be set properly """ @@ -299,17 +320,17 @@ async def test_bulk_mutations_set_cell(self, client, table, temp_rows): new_value = uuid.uuid4().hex.encode() row_key, mutation = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - await table.bulk_mutate_rows([bulk_mutation]) + await target.bulk_mutate_rows([bulk_mutation]) # ensure cell is updated - assert (await self._retrieve_cell_value(table, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key)) == new_value @CrossSync.pytest - async def test_bulk_mutations_raise_exception(self, client, table): + async def test_bulk_mutations_raise_exception(self, client, target): """ If an invalid mutation is passed, an exception should be raised """ @@ -324,7 +345,7 @@ async def test_bulk_mutations_raise_exception(self, client, table): bulk_mutation = RowMutationEntry(row_key, [mutation]) with pytest.raises(MutationsExceptionGroup) as exc: - await table.bulk_mutate_rows([bulk_mutation]) + await target.bulk_mutate_rows([bulk_mutation]) assert len(exc.value.exceptions) == 1 entry_error = exc.value.exceptions[0] assert isinstance(entry_error, FailedMutationEntryError) @@ -332,12 +353,12 @@ async def test_bulk_mutations_raise_exception(self, client, table): assert entry_error.entry == bulk_mutation @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_mutations_batcher_context_manager(self, client, table, temp_rows): + async def test_mutations_batcher_context_manager(self, client, target, temp_rows): """ test batcher with context manager. Should flush on exit """ @@ -345,28 +366,28 @@ async def test_mutations_batcher_context_manager(self, client, table, temp_rows) new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] row_key, mutation = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) row_key2, mutation2 = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value2 + target, temp_rows, new_value=new_value2 ) bulk_mutation = RowMutationEntry(row_key, [mutation]) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - async with table.mutations_batcher() as batcher: + async with target.mutations_batcher() as batcher: await batcher.append(bulk_mutation) await batcher.append(bulk_mutation2) # ensure cell is updated - assert (await self._retrieve_cell_value(table, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key)) == new_value assert len(batcher._staged_entries) == 0 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_mutations_batcher_timer_flush(self, client, table, temp_rows): + async def test_mutations_batcher_timer_flush(self, client, target, temp_rows): """ batch should occur after flush_interval seconds """ @@ -374,26 +395,26 @@ async def test_mutations_batcher_timer_flush(self, client, table, temp_rows): new_value = uuid.uuid4().hex.encode() row_key, mutation = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) flush_interval = 0.1 - async with table.mutations_batcher(flush_interval=flush_interval) as batcher: + async with target.mutations_batcher(flush_interval=flush_interval) as batcher: await batcher.append(bulk_mutation) await CrossSync.yield_to_event_loop() assert len(batcher._staged_entries) == 1 await CrossSync.sleep(flush_interval + 0.1) assert len(batcher._staged_entries) == 0 # ensure cell is updated - assert (await self._retrieve_cell_value(table, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key)) == new_value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_mutations_batcher_count_flush(self, client, table, temp_rows): + async def test_mutations_batcher_count_flush(self, client, target, temp_rows): """ batch should flush after flush_limit_mutation_count mutations """ @@ -401,15 +422,15 @@ async def test_mutations_batcher_count_flush(self, client, table, temp_rows): new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] row_key, mutation = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) row_key2, mutation2 = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value2 + target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - async with table.mutations_batcher(flush_limit_mutation_count=2) as batcher: + async with target.mutations_batcher(flush_limit_mutation_count=2) as batcher: await batcher.append(bulk_mutation) assert len(batcher._flush_jobs) == 0 # should be noop; flush not scheduled @@ -425,16 +446,16 @@ async def test_mutations_batcher_count_flush(self, client, table, temp_rows): assert len(batcher._staged_entries) == 0 assert len(batcher._flush_jobs) == 0 # ensure cells were updated - assert (await self._retrieve_cell_value(table, row_key)) == new_value - assert (await self._retrieve_cell_value(table, row_key2)) == new_value2 + assert (await self._retrieve_cell_value(target, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key2)) == new_value2 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_mutations_batcher_bytes_flush(self, client, table, temp_rows): + async def test_mutations_batcher_bytes_flush(self, client, target, temp_rows): """ batch should flush after flush_limit_bytes bytes """ @@ -442,17 +463,17 @@ async def test_mutations_batcher_bytes_flush(self, client, table, temp_rows): new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] row_key, mutation = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) row_key2, mutation2 = await self._create_row_and_mutation( - table, temp_rows, new_value=new_value2 + target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 - async with table.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: + async with target.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: await batcher.append(bulk_mutation) assert len(batcher._flush_jobs) == 0 assert len(batcher._staged_entries) == 1 @@ -466,13 +487,13 @@ async def test_mutations_batcher_bytes_flush(self, client, table, temp_rows): # for sync version: grab result future.result() # ensure cells were updated - assert (await self._retrieve_cell_value(table, row_key)) == new_value - assert (await self._retrieve_cell_value(table, row_key2)) == new_value2 + assert (await self._retrieve_cell_value(target, row_key)) == new_value + assert (await self._retrieve_cell_value(target, row_key2)) == new_value2 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_mutations_batcher_no_flush(self, client, table, temp_rows): + async def test_mutations_batcher_no_flush(self, client, target, temp_rows): """ test with no flush requirements met """ @@ -481,16 +502,16 @@ async def test_mutations_batcher_no_flush(self, client, table, temp_rows): new_value = uuid.uuid4().hex.encode() start_value = b"unchanged" row_key, mutation = await self._create_row_and_mutation( - table, temp_rows, start_value=start_value, new_value=new_value + target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) row_key2, mutation2 = await self._create_row_and_mutation( - table, temp_rows, start_value=start_value, new_value=new_value + target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 - async with table.mutations_batcher( + async with target.mutations_batcher( flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 ) as batcher: await batcher.append(bulk_mutation) @@ -502,16 +523,16 @@ async def test_mutations_batcher_no_flush(self, client, table, temp_rows): assert len(batcher._staged_entries) == 2 assert len(batcher._flush_jobs) == 0 # ensure cells were not updated - assert (await self._retrieve_cell_value(table, row_key)) == start_value - assert (await self._retrieve_cell_value(table, row_key2)) == start_value + assert (await self._retrieve_cell_value(target, row_key)) == start_value + assert (await self._retrieve_cell_value(target, row_key2)) == start_value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_mutations_batcher_large_batch(self, client, table, temp_rows): + async def test_mutations_batcher_large_batch(self, client, target, temp_rows): """ test batcher with large batch of mutations """ @@ -527,14 +548,14 @@ async def test_mutations_batcher_large_batch(self, client, table, temp_rows): # append row key for eventual deletion temp_rows.rows.append(row_key) - async with table.mutations_batcher() as batcher: + async with target.mutations_batcher() as batcher: for mutation in row_mutations: await batcher.append(mutation) # ensure cell is updated assert len(batcher._staged_entries) == 0 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.parametrize( "start,increment,expected", [ @@ -552,7 +573,7 @@ async def test_mutations_batcher_large_batch(self, client, table, temp_rows): ) @CrossSync.pytest async def test_read_modify_write_row_increment( - self, client, table, temp_rows, start, increment, expected + self, client, target, temp_rows, start, increment, expected ): """ test read_modify_write_row @@ -567,17 +588,17 @@ async def test_read_modify_write_row_increment( ) rule = IncrementRule(family, qualifier, increment) - result = await table.read_modify_write_row(row_key, rule) + result = await target.read_modify_write_row(row_key, rule) assert result.row_key == row_key assert len(result) == 1 assert result[0].family == family assert result[0].qualifier == qualifier assert int(result[0]) == expected # ensure that reading from server gives same value - assert (await self._retrieve_cell_value(table, row_key)) == result[0].value + assert (await self._retrieve_cell_value(target, row_key)) == result[0].value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.parametrize( "start,append,expected", [ @@ -592,7 +613,7 @@ async def test_read_modify_write_row_increment( ) @CrossSync.pytest async def test_read_modify_write_row_append( - self, client, table, temp_rows, start, append, expected + self, client, target, temp_rows, start, append, expected ): """ test read_modify_write_row @@ -607,19 +628,19 @@ async def test_read_modify_write_row_append( ) rule = AppendValueRule(family, qualifier, append) - result = await table.read_modify_write_row(row_key, rule) + result = await target.read_modify_write_row(row_key, rule) assert result.row_key == row_key assert len(result) == 1 assert result[0].family == family assert result[0].qualifier == qualifier assert result[0].value == expected # ensure that reading from server gives same value - assert (await self._retrieve_cell_value(table, row_key)) == result[0].value + assert (await self._retrieve_cell_value(target, row_key)) == result[0].value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_read_modify_write_row_chained(self, client, table, temp_rows): + async def test_read_modify_write_row_chained(self, client, target, temp_rows): """ test read_modify_write_row with multiple rules """ @@ -640,7 +661,7 @@ async def test_read_modify_write_row_chained(self, client, table, temp_rows): AppendValueRule(family, qualifier, "world"), AppendValueRule(family, qualifier, "!"), ] - result = await table.read_modify_write_row(row_key, rule) + result = await target.read_modify_write_row(row_key, rule) assert result.row_key == row_key assert result[0].family == family assert result[0].qualifier == qualifier @@ -651,10 +672,10 @@ async def test_read_modify_write_row_chained(self, client, table, temp_rows): + b"helloworld!" ) # ensure that reading from server gives same value - assert (await self._retrieve_cell_value(table, row_key)) == result[0].value + assert (await self._retrieve_cell_value(target, row_key)) == result[0].value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.parametrize( "start_val,predicate_range,expected_result", [ @@ -664,7 +685,7 @@ async def test_read_modify_write_row_chained(self, client, table, temp_rows): ) @CrossSync.pytest async def test_check_and_mutate( - self, client, table, temp_rows, start_val, predicate_range, expected_result + self, client, target, temp_rows, start_val, predicate_range, expected_result ): """ test that check_and_mutate_row works applies the right mutations, and returns the right result @@ -689,7 +710,7 @@ async def test_check_and_mutate( family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value ) predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) - result = await table.check_and_mutate_row( + result = await target.check_and_mutate_row( row_key, predicate, true_case_mutations=true_mutation, @@ -700,34 +721,34 @@ async def test_check_and_mutate( expected_value = ( true_mutation_value if expected_result else false_mutation_value ) - assert (await self._retrieve_cell_value(table, row_key)) == expected_value + assert (await self._retrieve_cell_value(target, row_key)) == expected_value @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't raise InvalidArgument", ) @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_check_and_mutate_empty_request(self, client, table): + async def test_check_and_mutate_empty_request(self, client, target): """ check_and_mutate with no true or fale mutations should raise an error """ from google.api_core import exceptions with pytest.raises(exceptions.InvalidArgument) as e: - await table.check_and_mutate_row( + await target.check_and_mutate_row( b"row_key", None, true_case_mutations=None, false_case_mutations=None ) assert "No mutations provided" in str(e.value) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.convert(replace_symbols={"__anext__": "__next__"}) @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows_stream(self, table, temp_rows): + async def test_read_rows_stream(self, target, temp_rows): """ Ensure that the read_rows_stream method works """ @@ -735,7 +756,7 @@ async def test_read_rows_stream(self, table, temp_rows): await temp_rows.add_row(b"row_key_2") # full table scan - generator = await table.read_rows_stream({}) + generator = await target.read_rows_stream({}) first_row = await generator.__anext__() second_row = await generator.__anext__() assert first_row.row_key == b"row_key_1" @@ -743,29 +764,29 @@ async def test_read_rows_stream(self, table, temp_rows): with pytest.raises(CrossSync.StopIteration): await generator.__anext__() - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows(self, table, temp_rows): + async def test_read_rows(self, target, temp_rows): """ Ensure that the read_rows method works """ await temp_rows.add_row(b"row_key_1") await temp_rows.add_row(b"row_key_2") # full table scan - row_list = await table.read_rows({}) + row_list = await target.read_rows({}) assert len(row_list) == 2 assert row_list[0].row_key == b"row_key_1" assert row_list[1].row_key == b"row_key_2" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows_sharded_simple(self, table, temp_rows): + async def test_read_rows_sharded_simple(self, target, temp_rows): """ Test read rows sharded with two queries """ @@ -777,19 +798,19 @@ async def test_read_rows_sharded_simple(self, table, temp_rows): await temp_rows.add_row(b"d") query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) - row_list = await table.read_rows_sharded([query1, query2]) + row_list = await target.read_rows_sharded([query1, query2]) assert len(row_list) == 4 assert row_list[0].row_key == b"a" assert row_list[1].row_key == b"c" assert row_list[2].row_key == b"b" assert row_list[3].row_key == b"d" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows_sharded_from_sample(self, table, temp_rows): + async def test_read_rows_sharded_from_sample(self, target, temp_rows): """ Test end-to-end sharding """ @@ -801,21 +822,21 @@ async def test_read_rows_sharded_from_sample(self, table, temp_rows): await temp_rows.add_row(b"c") await temp_rows.add_row(b"d") - table_shard_keys = await table.sample_row_keys() + table_shard_keys = await target.sample_row_keys() query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) shard_queries = query.shard(table_shard_keys) - row_list = await table.read_rows_sharded(shard_queries) + row_list = await target.read_rows_sharded(shard_queries) assert len(row_list) == 3 assert row_list[0].row_key == b"b" assert row_list[1].row_key == b"c" assert row_list[2].row_key == b"d" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows_sharded_filters_limits(self, table, temp_rows): + async def test_read_rows_sharded_filters_limits(self, target, temp_rows): """ Test read rows sharded with filters and limits """ @@ -831,7 +852,7 @@ async def test_read_rows_sharded_filters_limits(self, table, temp_rows): label_filter2 = ApplyLabelFilter("second") query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) - row_list = await table.read_rows_sharded([query1, query2]) + row_list = await target.read_rows_sharded([query1, query2]) assert len(row_list) == 3 assert row_list[0].row_key == b"a" assert row_list[1].row_key == b"b" @@ -840,12 +861,12 @@ async def test_read_rows_sharded_filters_limits(self, table, temp_rows): assert row_list[1][0].labels == ["second"] assert row_list[2][0].labels == ["second"] - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows_range_query(self, table, temp_rows): + async def test_read_rows_range_query(self, target, temp_rows): """ Ensure that the read_rows method works """ @@ -858,17 +879,17 @@ async def test_read_rows_range_query(self, table, temp_rows): await temp_rows.add_row(b"d") # full table scan query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) - row_list = await table.read_rows(query) + row_list = await target.read_rows(query) assert len(row_list) == 2 assert row_list[0].row_key == b"b" assert row_list[1].row_key == b"c" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows_single_key_query(self, table, temp_rows): + async def test_read_rows_single_key_query(self, target, temp_rows): """ Ensure that the read_rows method works with specified query """ @@ -880,17 +901,17 @@ async def test_read_rows_single_key_query(self, table, temp_rows): await temp_rows.add_row(b"d") # retrieve specific keys query = ReadRowsQuery(row_keys=[b"a", b"c"]) - row_list = await table.read_rows(query) + row_list = await target.read_rows(query) assert len(row_list) == 2 assert row_list[0].row_key == b"a" assert row_list[1].row_key == b"c" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @CrossSync.pytest - async def test_read_rows_with_filter(self, table, temp_rows): + async def test_read_rows_with_filter(self, target, temp_rows): """ ensure filters are applied """ @@ -905,15 +926,15 @@ async def test_read_rows_with_filter(self, table, temp_rows): expected_label = "test-label" row_filter = ApplyLabelFilter(expected_label) query = ReadRowsQuery(row_filter=row_filter) - row_list = await table.read_rows(query) + row_list = await target.read_rows(query) assert len(row_list) == 4 for row in row_list: assert row[0].labels == [expected_label] - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.convert(replace_symbols={"__anext__": "__next__", "aclose": "close"}) @CrossSync.pytest - async def test_read_rows_stream_close(self, table, temp_rows): + async def test_read_rows_stream_close(self, target, temp_rows): """ Ensure that the read_rows_stream can be closed """ @@ -923,7 +944,7 @@ async def test_read_rows_stream_close(self, table, temp_rows): await temp_rows.add_row(b"row_key_2") # full table scan query = ReadRowsQuery() - generator = await table.read_rows_stream(query) + generator = await target.read_rows_stream(query) # grab first row first_row = await generator.__anext__() assert first_row.row_key == b"row_key_1" @@ -932,16 +953,16 @@ async def test_read_rows_stream_close(self, table, temp_rows): with pytest.raises(CrossSync.StopIteration): await generator.__anext__() - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_read_row(self, table, temp_rows): + async def test_read_row(self, target, temp_rows): """ Test read_row (single row helper) """ from google.cloud.bigtable.data import Row await temp_rows.add_row(b"row_key_1", value=b"value") - row = await table.read_row(b"row_key_1") + row = await target.read_row(b"row_key_1") assert isinstance(row, Row) assert row.row_key == b"row_key_1" assert row.cells[0].value == b"value" @@ -950,24 +971,24 @@ async def test_read_row(self, table, temp_rows): bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't raise InvalidArgument", ) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_read_row_missing(self, table): + async def test_read_row_missing(self, target): """ Test read_row when row does not exist """ from google.api_core import exceptions row_key = "row_key_not_exist" - result = await table.read_row(row_key) + result = await target.read_row(row_key) assert result is None with pytest.raises(exceptions.InvalidArgument) as e: - await table.read_row("") + await target.read_row("") assert "Row keys must be non-empty" in str(e) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_read_row_w_filter(self, table, temp_rows): + async def test_read_row_w_filter(self, target, temp_rows): """ Test read_row (single row helper) """ @@ -977,7 +998,7 @@ async def test_read_row_w_filter(self, table, temp_rows): await temp_rows.add_row(b"row_key_1", value=b"value") expected_label = "test-label" label_filter = ApplyLabelFilter(expected_label) - row = await table.read_row(b"row_key_1", row_filter=label_filter) + row = await target.read_row(b"row_key_1", row_filter=label_filter) assert isinstance(row, Row) assert row.row_key == b"row_key_1" assert row.cells[0].value == b"value" @@ -987,26 +1008,26 @@ async def test_read_row_w_filter(self, table, temp_rows): bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't raise InvalidArgument", ) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.pytest - async def test_row_exists(self, table, temp_rows): + async def test_row_exists(self, target, temp_rows): from google.api_core import exceptions """Test row_exists with rows that exist and don't exist""" - assert await table.row_exists(b"row_key_1") is False + assert await target.row_exists(b"row_key_1") is False await temp_rows.add_row(b"row_key_1") - assert await table.row_exists(b"row_key_1") is True - assert await table.row_exists("row_key_1") is True - assert await table.row_exists(b"row_key_2") is False - assert await table.row_exists("row_key_2") is False - assert await table.row_exists("3") is False + assert await target.row_exists(b"row_key_1") is True + assert await target.row_exists("row_key_1") is True + assert await target.row_exists(b"row_key_2") is False + assert await target.row_exists("row_key_2") is False + assert await target.row_exists("3") is False await temp_rows.add_row(b"3") - assert await table.row_exists(b"3") is True + assert await target.row_exists(b"3") is True with pytest.raises(exceptions.InvalidArgument) as e: - await table.row_exists("") + await target.row_exists("") assert "Row keys must be non-empty" in str(e) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @@ -1037,7 +1058,7 @@ async def test_row_exists(self, table, temp_rows): ) @CrossSync.pytest async def test_literal_value_filter( - self, table, temp_rows, cell_value, filter_input, expect_match + self, target, temp_rows, cell_value, filter_input, expect_match ): """ Literal value filter does complex escaping on re2 strings. @@ -1049,7 +1070,7 @@ async def test_literal_value_filter( f = LiteralValueFilter(filter_input) await temp_rows.add_row(b"row_key_1", value=cell_value) query = ReadRowsQuery(row_filter=f) - row_list = await table.read_rows(query) + row_list = await target.read_rows(query) assert len(row_list) == bool( expect_match ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" @@ -1059,10 +1080,31 @@ async def test_literal_value_filter( reason="emulator doesn't support SQL", ) @CrossSync.pytest + async def test_authorized_view_unauthenticated( + self, client, authorized_view_id, instance_id, table_id + ): + """ + Requesting family outside authorized family_subset should raise exception + """ + from google.cloud.bigtable.data.mutations import SetCell + + async with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + mutation = SetCell(family="unauthorized", qualifier="q", new_value="v") + with pytest.raises(PermissionDenied) as e: + await view.mutate_row(b"row-key", mutation) + assert "outside the Authorized View" in e.value.message + + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), + reason="emulator doesn't support SQL", + ) @pytest.mark.usefixtures("client") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) + @CrossSync.pytest async def test_execute_query_simple(self, client, table_id, instance_id): result = await client.execute_query("SELECT 1 AS a, 'foo' AS b", instance_id) rows = [r async for r in result] @@ -1076,11 +1118,11 @@ async def test_execute_query_simple(self, client, table_id, instance_id): reason="emulator doesn't support SQL", ) @CrossSync.pytest - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - async def test_execute_against_table( + async def test_execute_against_target( self, client, instance_id, table_id, temp_rows ): await temp_rows.add_row(b"row_key_1") @@ -1201,7 +1243,7 @@ async def test_execute_query_params(self, client, table_id, instance_id): reason="emulator doesn't support SQL", ) @CrossSync.pytest - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index f9af614a262a..6b2006d7b360 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -20,7 +20,7 @@ import uuid import os from google.api_core import retry -from google.api_core.exceptions import ClientError +from google.api_core.exceptions import ClientError, PermissionDenied from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.read_modify_write_rules import _MAX_INCREMENT_VALUE from google.cloud.environment_vars import BIGTABLE_EMULATOR @@ -28,6 +28,10 @@ from google.cloud.bigtable.data._cross_sync import CrossSync from . import TEST_FAMILY, TEST_FAMILY_2 +TARGETS = ["table"] +if not os.environ.get(BIGTABLE_EMULATOR): + TARGETS.append("authorized_view") + @CrossSync._Sync_Impl.add_mapping_decorator("TempRowBuilder") class TempRowBuilder: @@ -35,9 +39,9 @@ class TempRowBuilder: Used to add rows to a table for testing purposes. """ - def __init__(self, table): + def __init__(self, target): self.rows = [] - self.table = table + self.target = target def add_row( self, row_key, *, family=TEST_FAMILY, qualifier=b"q", value=b"test-value" @@ -47,7 +51,7 @@ def add_row( elif isinstance(value, int): value = value.to_bytes(8, byteorder="big", signed=True) request = { - "table_name": self.table.table_name, + "table_name": self.target.table_name, "row_key": row_key, "mutations": [ { @@ -59,19 +63,19 @@ def add_row( } ], } - self.table.client._gapic_client.mutate_row(request) + self.target.client._gapic_client.mutate_row(request) self.rows.append(row_key) def delete_rows(self): if self.rows: request = { - "table_name": self.table.table_name, + "table_name": self.target.table_name, "entries": [ {"row_key": row, "mutations": [{"delete_from_row": {}}]} for row in self.rows ], } - self.table.client._gapic_client.mutate_rows(request) + self.target.client._gapic_client.mutate_rows(request) class TestSystem: @@ -81,10 +85,21 @@ def client(self): with CrossSync._Sync_Impl.DataClient(project=project) as client: yield client - @pytest.fixture(scope="session") - def table(self, client, table_id, instance_id): - with client.get_table(instance_id, table_id) as table: - yield table + @pytest.fixture(scope="session", params=TARGETS) + def target(self, client, table_id, authorized_view_id, instance_id, request): + """This fixture runs twice: once for a standard table, and once with an authorized view + + Note: emulator doesn't support authorized views. Only use target""" + if request.param == "table": + with client.get_table(instance_id, table_id) as table: + yield table + elif request.param == "authorized_view": + with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + yield view + else: + raise ValueError(f"unknown target type: {request.param}") @pytest.fixture(scope="session") def column_family_config(self): @@ -110,12 +125,12 @@ def cluster_config(self, project_id): } return cluster - @pytest.mark.usefixtures("table") - def _retrieve_cell_value(self, table, row_key): + @pytest.mark.usefixtures("target") + def _retrieve_cell_value(self, target, row_key): """Helper to read an individual row""" from google.cloud.bigtable.data import ReadRowsQuery - row_list = table.read_rows(ReadRowsQuery(row_keys=row_key)) + row_list = target.read_rows(ReadRowsQuery(row_keys=row_key)) assert len(row_list) == 1 row = row_list[0] cell = row.cells[0] @@ -138,28 +153,28 @@ def _create_row_and_mutation( return (row_key, mutation) @pytest.fixture(scope="function") - def temp_rows(self, table): - builder = CrossSync._Sync_Impl.TempRowBuilder(table) + def temp_rows(self, target): + builder = CrossSync._Sync_Impl.TempRowBuilder(target) yield builder builder.delete_rows() - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.usefixtures("client") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=10 ) - def test_ping_and_warm_gapic(self, client, table): + def test_ping_and_warm_gapic(self, client, target): """Simple ping rpc test This test ensures channels are able to authenticate with backend""" - request = {"name": table.instance_name} + request = {"name": target.instance_name} client._gapic_client.ping_and_warm(request) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.usefixtures("client") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_ping_and_warm(self, client, table): + def test_ping_and_warm(self, client, target): """Test ping and warm from handwritten client""" results = client._ping_and_warm_instances() assert len(results) == 1 @@ -196,33 +211,33 @@ def test_channel_refresh(self, table_id, instance_id, temp_rows): finally: client.close() - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_mutation_set_cell(self, table, temp_rows): + def test_mutation_set_cell(self, target, temp_rows): """Ensure cells can be set properly""" row_key = b"bulk_mutate" new_value = uuid.uuid4().hex.encode() (row_key, mutation) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) - table.mutate_row(row_key, mutation) - assert self._retrieve_cell_value(table, row_key) == new_value + target.mutate_row(row_key, mutation) + assert self._retrieve_cell_value(target, row_key) == new_value @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" ) @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_sample_row_keys(self, client, table, temp_rows, column_split_config): - """Sample keys should return a single sample in small test tables""" + def test_sample_row_keys(self, client, target, temp_rows, column_split_config): + """Sample keys should return a single sample in small test targets""" temp_rows.add_row(b"row_key_1") temp_rows.add_row(b"row_key_2") - results = table.sample_row_keys() + results = target.sample_row_keys() assert len(results) == len(column_split_config) + 1 for idx in range(len(column_split_config)): assert results[idx][0] == column_split_config[idx] @@ -231,20 +246,20 @@ def test_sample_row_keys(self, client, table, temp_rows, column_split_config): assert isinstance(results[-1][1], int) @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") - def test_bulk_mutations_set_cell(self, client, table, temp_rows): + @pytest.mark.usefixtures("target") + def test_bulk_mutations_set_cell(self, client, target, temp_rows): """Ensure cells can be set properly""" from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() (row_key, mutation) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - table.bulk_mutate_rows([bulk_mutation]) - assert self._retrieve_cell_value(table, row_key) == new_value + target.bulk_mutate_rows([bulk_mutation]) + assert self._retrieve_cell_value(target, row_key) == new_value - def test_bulk_mutations_raise_exception(self, client, table): + def test_bulk_mutations_raise_exception(self, client, target): """If an invalid mutation is passed, an exception should be raised""" from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell from google.cloud.bigtable.data.exceptions import MutationsExceptionGroup @@ -256,7 +271,7 @@ def test_bulk_mutations_raise_exception(self, client, table): ) bulk_mutation = RowMutationEntry(row_key, [mutation]) with pytest.raises(MutationsExceptionGroup) as exc: - table.bulk_mutate_rows([bulk_mutation]) + target.bulk_mutate_rows([bulk_mutation]) assert len(exc.value.exceptions) == 1 entry_error = exc.value.exceptions[0] assert isinstance(entry_error, FailedMutationEntryError) @@ -264,71 +279,71 @@ def test_bulk_mutations_raise_exception(self, client, table): assert entry_error.entry == bulk_mutation @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_mutations_batcher_context_manager(self, client, table, temp_rows): + def test_mutations_batcher_context_manager(self, client, target, temp_rows): """test batcher with context manager. Should flush on exit""" from google.cloud.bigtable.data.mutations import RowMutationEntry (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] (row_key, mutation) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) (row_key2, mutation2) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value2 + target, temp_rows, new_value=new_value2 ) bulk_mutation = RowMutationEntry(row_key, [mutation]) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - with table.mutations_batcher() as batcher: + with target.mutations_batcher() as batcher: batcher.append(bulk_mutation) batcher.append(bulk_mutation2) - assert self._retrieve_cell_value(table, row_key) == new_value + assert self._retrieve_cell_value(target, row_key) == new_value assert len(batcher._staged_entries) == 0 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_mutations_batcher_timer_flush(self, client, table, temp_rows): + def test_mutations_batcher_timer_flush(self, client, target, temp_rows): """batch should occur after flush_interval seconds""" from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() (row_key, mutation) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) flush_interval = 0.1 - with table.mutations_batcher(flush_interval=flush_interval) as batcher: + with target.mutations_batcher(flush_interval=flush_interval) as batcher: batcher.append(bulk_mutation) CrossSync._Sync_Impl.yield_to_event_loop() assert len(batcher._staged_entries) == 1 CrossSync._Sync_Impl.sleep(flush_interval + 0.1) assert len(batcher._staged_entries) == 0 - assert self._retrieve_cell_value(table, row_key) == new_value + assert self._retrieve_cell_value(target, row_key) == new_value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_mutations_batcher_count_flush(self, client, table, temp_rows): + def test_mutations_batcher_count_flush(self, client, target, temp_rows): """batch should flush after flush_limit_mutation_count mutations""" from google.cloud.bigtable.data.mutations import RowMutationEntry (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] (row_key, mutation) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) (row_key2, mutation2) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value2 + target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) - with table.mutations_batcher(flush_limit_mutation_count=2) as batcher: + with target.mutations_batcher(flush_limit_mutation_count=2) as batcher: batcher.append(bulk_mutation) assert len(batcher._flush_jobs) == 0 assert len(batcher._staged_entries) == 1 @@ -339,29 +354,29 @@ def test_mutations_batcher_count_flush(self, client, table, temp_rows): future.result() assert len(batcher._staged_entries) == 0 assert len(batcher._flush_jobs) == 0 - assert self._retrieve_cell_value(table, row_key) == new_value - assert self._retrieve_cell_value(table, row_key2) == new_value2 + assert self._retrieve_cell_value(target, row_key) == new_value + assert self._retrieve_cell_value(target, row_key2) == new_value2 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_mutations_batcher_bytes_flush(self, client, table, temp_rows): + def test_mutations_batcher_bytes_flush(self, client, target, temp_rows): """batch should flush after flush_limit_bytes bytes""" from google.cloud.bigtable.data.mutations import RowMutationEntry (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] (row_key, mutation) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value + target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) (row_key2, mutation2) = self._create_row_and_mutation( - table, temp_rows, new_value=new_value2 + target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) flush_limit = bulk_mutation.size() + bulk_mutation2.size() - 1 - with table.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: + with target.mutations_batcher(flush_limit_bytes=flush_limit) as batcher: batcher.append(bulk_mutation) assert len(batcher._flush_jobs) == 0 assert len(batcher._staged_entries) == 1 @@ -371,27 +386,27 @@ def test_mutations_batcher_bytes_flush(self, client, table, temp_rows): for future in list(batcher._flush_jobs): future future.result() - assert self._retrieve_cell_value(table, row_key) == new_value - assert self._retrieve_cell_value(table, row_key2) == new_value2 + assert self._retrieve_cell_value(target, row_key) == new_value + assert self._retrieve_cell_value(target, row_key2) == new_value2 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") - def test_mutations_batcher_no_flush(self, client, table, temp_rows): + @pytest.mark.usefixtures("target") + def test_mutations_batcher_no_flush(self, client, target, temp_rows): """test with no flush requirements met""" from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() start_value = b"unchanged" (row_key, mutation) = self._create_row_and_mutation( - table, temp_rows, start_value=start_value, new_value=new_value + target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) (row_key2, mutation2) = self._create_row_and_mutation( - table, temp_rows, start_value=start_value, new_value=new_value + target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) size_limit = bulk_mutation.size() + bulk_mutation2.size() + 1 - with table.mutations_batcher( + with target.mutations_batcher( flush_limit_bytes=size_limit, flush_limit_mutation_count=3, flush_interval=1 ) as batcher: batcher.append(bulk_mutation) @@ -401,15 +416,15 @@ def test_mutations_batcher_no_flush(self, client, table, temp_rows): CrossSync._Sync_Impl.yield_to_event_loop() assert len(batcher._staged_entries) == 2 assert len(batcher._flush_jobs) == 0 - assert self._retrieve_cell_value(table, row_key) == start_value - assert self._retrieve_cell_value(table, row_key2) == start_value + assert self._retrieve_cell_value(target, row_key) == start_value + assert self._retrieve_cell_value(target, row_key2) == start_value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_mutations_batcher_large_batch(self, client, table, temp_rows): + def test_mutations_batcher_large_batch(self, client, target, temp_rows): """test batcher with large batch of mutations""" from google.cloud.bigtable.data.mutations import RowMutationEntry, SetCell @@ -421,13 +436,13 @@ def test_mutations_batcher_large_batch(self, client, table, temp_rows): row_key = uuid.uuid4().hex.encode() row_mutations.append(RowMutationEntry(row_key, [add_mutation])) temp_rows.rows.append(row_key) - with table.mutations_batcher() as batcher: + with target.mutations_batcher() as batcher: for mutation in row_mutations: batcher.append(mutation) assert len(batcher._staged_entries) == 0 @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.parametrize( "start,increment,expected", [ @@ -444,7 +459,7 @@ def test_mutations_batcher_large_batch(self, client, table, temp_rows): ], ) def test_read_modify_write_row_increment( - self, client, table, temp_rows, start, increment, expected + self, client, target, temp_rows, start, increment, expected ): """test read_modify_write_row""" from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule @@ -454,16 +469,16 @@ def test_read_modify_write_row_increment( qualifier = b"test-qualifier" temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) rule = IncrementRule(family, qualifier, increment) - result = table.read_modify_write_row(row_key, rule) + result = target.read_modify_write_row(row_key, rule) assert result.row_key == row_key assert len(result) == 1 assert result[0].family == family assert result[0].qualifier == qualifier assert int(result[0]) == expected - assert self._retrieve_cell_value(table, row_key) == result[0].value + assert self._retrieve_cell_value(target, row_key) == result[0].value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.parametrize( "start,append,expected", [ @@ -477,7 +492,7 @@ def test_read_modify_write_row_increment( ], ) def test_read_modify_write_row_append( - self, client, table, temp_rows, start, append, expected + self, client, target, temp_rows, start, append, expected ): """test read_modify_write_row""" from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule @@ -487,17 +502,17 @@ def test_read_modify_write_row_append( qualifier = b"test-qualifier" temp_rows.add_row(row_key, value=start, family=family, qualifier=qualifier) rule = AppendValueRule(family, qualifier, append) - result = table.read_modify_write_row(row_key, rule) + result = target.read_modify_write_row(row_key, rule) assert result.row_key == row_key assert len(result) == 1 assert result[0].family == family assert result[0].qualifier == qualifier assert result[0].value == expected - assert self._retrieve_cell_value(table, row_key) == result[0].value + assert self._retrieve_cell_value(target, row_key) == result[0].value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") - def test_read_modify_write_row_chained(self, client, table, temp_rows): + @pytest.mark.usefixtures("target") + def test_read_modify_write_row_chained(self, client, target, temp_rows): """test read_modify_write_row with multiple rules""" from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule @@ -516,7 +531,7 @@ def test_read_modify_write_row_chained(self, client, table, temp_rows): AppendValueRule(family, qualifier, "world"), AppendValueRule(family, qualifier, "!"), ] - result = table.read_modify_write_row(row_key, rule) + result = target.read_modify_write_row(row_key, rule) assert result.row_key == row_key assert result[0].family == family assert result[0].qualifier == qualifier @@ -525,16 +540,16 @@ def test_read_modify_write_row_chained(self, client, table, temp_rows): == (start_amount + increment_amount).to_bytes(8, "big", signed=True) + b"helloworld!" ) - assert self._retrieve_cell_value(table, row_key) == result[0].value + assert self._retrieve_cell_value(target, row_key) == result[0].value @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @pytest.mark.parametrize( "start_val,predicate_range,expected_result", [(1, (0, 2), True), (-1, (0, 2), False)], ) def test_check_and_mutate( - self, client, table, temp_rows, start_val, predicate_range, expected_result + self, client, target, temp_rows, start_val, predicate_range, expected_result ): """test that check_and_mutate_row works applies the right mutations, and returns the right result""" from google.cloud.bigtable.data.mutations import SetCell @@ -553,7 +568,7 @@ def test_check_and_mutate( family=TEST_FAMILY, qualifier=qualifier, new_value=true_mutation_value ) predicate = ValueRangeFilter(predicate_range[0], predicate_range[1]) - result = table.check_and_mutate_row( + result = target.check_and_mutate_row( row_key, predicate, true_case_mutations=true_mutation, @@ -563,33 +578,33 @@ def test_check_and_mutate( expected_value = ( true_mutation_value if expected_result else false_mutation_value ) - assert self._retrieve_cell_value(table, row_key) == expected_value + assert self._retrieve_cell_value(target, row_key) == expected_value @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't raise InvalidArgument", ) @pytest.mark.usefixtures("client") - @pytest.mark.usefixtures("table") - def test_check_and_mutate_empty_request(self, client, table): + @pytest.mark.usefixtures("target") + def test_check_and_mutate_empty_request(self, client, target): """check_and_mutate with no true or fale mutations should raise an error""" from google.api_core import exceptions with pytest.raises(exceptions.InvalidArgument) as e: - table.check_and_mutate_row( + target.check_and_mutate_row( b"row_key", None, true_case_mutations=None, false_case_mutations=None ) assert "No mutations provided" in str(e.value) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows_stream(self, table, temp_rows): + def test_read_rows_stream(self, target, temp_rows): """Ensure that the read_rows_stream method works""" temp_rows.add_row(b"row_key_1") temp_rows.add_row(b"row_key_2") - generator = table.read_rows_stream({}) + generator = target.read_rows_stream({}) first_row = generator.__next__() second_row = generator.__next__() assert first_row.row_key == b"row_key_1" @@ -597,24 +612,24 @@ def test_read_rows_stream(self, table, temp_rows): with pytest.raises(CrossSync._Sync_Impl.StopIteration): generator.__next__() - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows(self, table, temp_rows): + def test_read_rows(self, target, temp_rows): """Ensure that the read_rows method works""" temp_rows.add_row(b"row_key_1") temp_rows.add_row(b"row_key_2") - row_list = table.read_rows({}) + row_list = target.read_rows({}) assert len(row_list) == 2 assert row_list[0].row_key == b"row_key_1" assert row_list[1].row_key == b"row_key_2" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows_sharded_simple(self, table, temp_rows): + def test_read_rows_sharded_simple(self, target, temp_rows): """Test read rows sharded with two queries""" from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery @@ -624,18 +639,18 @@ def test_read_rows_sharded_simple(self, table, temp_rows): temp_rows.add_row(b"d") query1 = ReadRowsQuery(row_keys=[b"a", b"c"]) query2 = ReadRowsQuery(row_keys=[b"b", b"d"]) - row_list = table.read_rows_sharded([query1, query2]) + row_list = target.read_rows_sharded([query1, query2]) assert len(row_list) == 4 assert row_list[0].row_key == b"a" assert row_list[1].row_key == b"c" assert row_list[2].row_key == b"b" assert row_list[3].row_key == b"d" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows_sharded_from_sample(self, table, temp_rows): + def test_read_rows_sharded_from_sample(self, target, temp_rows): """Test end-to-end sharding""" from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.cloud.bigtable.data.read_rows_query import RowRange @@ -644,20 +659,20 @@ def test_read_rows_sharded_from_sample(self, table, temp_rows): temp_rows.add_row(b"b") temp_rows.add_row(b"c") temp_rows.add_row(b"d") - table_shard_keys = table.sample_row_keys() + table_shard_keys = target.sample_row_keys() query = ReadRowsQuery(row_ranges=[RowRange(start_key=b"b", end_key=b"z")]) shard_queries = query.shard(table_shard_keys) - row_list = table.read_rows_sharded(shard_queries) + row_list = target.read_rows_sharded(shard_queries) assert len(row_list) == 3 assert row_list[0].row_key == b"b" assert row_list[1].row_key == b"c" assert row_list[2].row_key == b"d" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows_sharded_filters_limits(self, table, temp_rows): + def test_read_rows_sharded_filters_limits(self, target, temp_rows): """Test read rows sharded with filters and limits""" from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.cloud.bigtable.data.row_filters import ApplyLabelFilter @@ -670,7 +685,7 @@ def test_read_rows_sharded_filters_limits(self, table, temp_rows): label_filter2 = ApplyLabelFilter("second") query1 = ReadRowsQuery(row_keys=[b"a", b"c"], limit=1, row_filter=label_filter1) query2 = ReadRowsQuery(row_keys=[b"b", b"d"], row_filter=label_filter2) - row_list = table.read_rows_sharded([query1, query2]) + row_list = target.read_rows_sharded([query1, query2]) assert len(row_list) == 3 assert row_list[0].row_key == b"a" assert row_list[1].row_key == b"b" @@ -679,11 +694,11 @@ def test_read_rows_sharded_filters_limits(self, table, temp_rows): assert row_list[1][0].labels == ["second"] assert row_list[2][0].labels == ["second"] - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows_range_query(self, table, temp_rows): + def test_read_rows_range_query(self, target, temp_rows): """Ensure that the read_rows method works""" from google.cloud.bigtable.data import ReadRowsQuery from google.cloud.bigtable.data import RowRange @@ -693,16 +708,16 @@ def test_read_rows_range_query(self, table, temp_rows): temp_rows.add_row(b"c") temp_rows.add_row(b"d") query = ReadRowsQuery(row_ranges=RowRange(start_key=b"b", end_key=b"d")) - row_list = table.read_rows(query) + row_list = target.read_rows(query) assert len(row_list) == 2 assert row_list[0].row_key == b"b" assert row_list[1].row_key == b"c" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows_single_key_query(self, table, temp_rows): + def test_read_rows_single_key_query(self, target, temp_rows): """Ensure that the read_rows method works with specified query""" from google.cloud.bigtable.data import ReadRowsQuery @@ -711,16 +726,16 @@ def test_read_rows_single_key_query(self, table, temp_rows): temp_rows.add_row(b"c") temp_rows.add_row(b"d") query = ReadRowsQuery(row_keys=[b"a", b"c"]) - row_list = table.read_rows(query) + row_list = target.read_rows(query) assert len(row_list) == 2 assert row_list[0].row_key == b"a" assert row_list[1].row_key == b"c" - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_read_rows_with_filter(self, table, temp_rows): + def test_read_rows_with_filter(self, target, temp_rows): """ensure filters are applied""" from google.cloud.bigtable.data import ReadRowsQuery from google.cloud.bigtable.data.row_filters import ApplyLabelFilter @@ -732,33 +747,33 @@ def test_read_rows_with_filter(self, table, temp_rows): expected_label = "test-label" row_filter = ApplyLabelFilter(expected_label) query = ReadRowsQuery(row_filter=row_filter) - row_list = table.read_rows(query) + row_list = target.read_rows(query) assert len(row_list) == 4 for row in row_list: assert row[0].labels == [expected_label] - @pytest.mark.usefixtures("table") - def test_read_rows_stream_close(self, table, temp_rows): + @pytest.mark.usefixtures("target") + def test_read_rows_stream_close(self, target, temp_rows): """Ensure that the read_rows_stream can be closed""" from google.cloud.bigtable.data import ReadRowsQuery temp_rows.add_row(b"row_key_1") temp_rows.add_row(b"row_key_2") query = ReadRowsQuery() - generator = table.read_rows_stream(query) + generator = target.read_rows_stream(query) first_row = generator.__next__() assert first_row.row_key == b"row_key_1" generator.close() with pytest.raises(CrossSync._Sync_Impl.StopIteration): generator.__next__() - @pytest.mark.usefixtures("table") - def test_read_row(self, table, temp_rows): + @pytest.mark.usefixtures("target") + def test_read_row(self, target, temp_rows): """Test read_row (single row helper)""" from google.cloud.bigtable.data import Row temp_rows.add_row(b"row_key_1", value=b"value") - row = table.read_row(b"row_key_1") + row = target.read_row(b"row_key_1") assert isinstance(row, Row) assert row.row_key == b"row_key_1" assert row.cells[0].value == b"value" @@ -767,20 +782,20 @@ def test_read_row(self, table, temp_rows): bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't raise InvalidArgument", ) - @pytest.mark.usefixtures("table") - def test_read_row_missing(self, table): + @pytest.mark.usefixtures("target") + def test_read_row_missing(self, target): """Test read_row when row does not exist""" from google.api_core import exceptions row_key = "row_key_not_exist" - result = table.read_row(row_key) + result = target.read_row(row_key) assert result is None with pytest.raises(exceptions.InvalidArgument) as e: - table.read_row("") + target.read_row("") assert "Row keys must be non-empty" in str(e) - @pytest.mark.usefixtures("table") - def test_read_row_w_filter(self, table, temp_rows): + @pytest.mark.usefixtures("target") + def test_read_row_w_filter(self, target, temp_rows): """Test read_row (single row helper)""" from google.cloud.bigtable.data import Row from google.cloud.bigtable.data.row_filters import ApplyLabelFilter @@ -788,7 +803,7 @@ def test_read_row_w_filter(self, table, temp_rows): temp_rows.add_row(b"row_key_1", value=b"value") expected_label = "test-label" label_filter = ApplyLabelFilter(expected_label) - row = table.read_row(b"row_key_1", row_filter=label_filter) + row = target.read_row(b"row_key_1", row_filter=label_filter) assert isinstance(row, Row) assert row.row_key == b"row_key_1" assert row.cells[0].value == b"value" @@ -798,25 +813,25 @@ def test_read_row_w_filter(self, table, temp_rows): bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't raise InvalidArgument", ) - @pytest.mark.usefixtures("table") - def test_row_exists(self, table, temp_rows): + @pytest.mark.usefixtures("target") + def test_row_exists(self, target, temp_rows): from google.api_core import exceptions "Test row_exists with rows that exist and don't exist" - assert table.row_exists(b"row_key_1") is False + assert target.row_exists(b"row_key_1") is False temp_rows.add_row(b"row_key_1") - assert table.row_exists(b"row_key_1") is True - assert table.row_exists("row_key_1") is True - assert table.row_exists(b"row_key_2") is False - assert table.row_exists("row_key_2") is False - assert table.row_exists("3") is False + assert target.row_exists(b"row_key_1") is True + assert target.row_exists("row_key_1") is True + assert target.row_exists(b"row_key_2") is False + assert target.row_exists("row_key_2") is False + assert target.row_exists("3") is False temp_rows.add_row(b"3") - assert table.row_exists(b"3") is True + assert target.row_exists(b"3") is True with pytest.raises(exceptions.InvalidArgument) as e: - table.row_exists("") + target.row_exists("") assert "Row keys must be non-empty" in str(e) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) @@ -846,7 +861,7 @@ def test_row_exists(self, table, temp_rows): ], ) def test_literal_value_filter( - self, table, temp_rows, cell_value, filter_input, expect_match + self, target, temp_rows, cell_value, filter_input, expect_match ): """Literal value filter does complex escaping on re2 strings. Make sure inputs are properly interpreted by the server""" @@ -856,11 +871,28 @@ def test_literal_value_filter( f = LiteralValueFilter(filter_input) temp_rows.add_row(b"row_key_1", value=cell_value) query = ReadRowsQuery(row_filter=f) - row_list = table.read_rows(query) + row_list = target.read_rows(query) assert len(row_list) == bool( expect_match ), f"row {type(cell_value)}({cell_value}) not found with {type(filter_input)}({filter_input}) filter" + @pytest.mark.skipif( + bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" + ) + def test_authorized_view_unauthenticated( + self, client, authorized_view_id, instance_id, table_id + ): + """Requesting family outside authorized family_subset should raise exception""" + from google.cloud.bigtable.data.mutations import SetCell + + with client.get_authorized_view( + instance_id, table_id, authorized_view_id + ) as view: + mutation = SetCell(family="unauthorized", qualifier="q", new_value="v") + with pytest.raises(PermissionDenied) as e: + view.mutate_row(b"row-key", mutation) + assert "outside the Authorized View" in e.value.message + @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" ) @@ -879,11 +911,11 @@ def test_execute_query_simple(self, client, table_id, instance_id): @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" ) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_execute_against_table(self, client, instance_id, table_id, temp_rows): + def test_execute_against_target(self, client, instance_id, table_id, temp_rows): temp_rows.add_row(b"row_key_1") result = client.execute_query("SELECT * FROM `" + table_id + "`", instance_id) rows = [r for r in result] @@ -986,7 +1018,7 @@ def test_execute_query_params(self, client, table_id, instance_id): @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" ) - @pytest.mark.usefixtures("table") + @pytest.mark.usefixtures("target") @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py index 13f668fd34f3..f14fa6dee12a 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__mutate_rows.py @@ -15,6 +15,8 @@ import pytest from google.cloud.bigtable_v2.types import MutateRowsResponse +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow from google.rpc import status_pb2 from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import Forbidden @@ -37,8 +39,11 @@ def _target_class(self): def _make_one(self, *args, **kwargs): if not args: + fake_target = CrossSync.Mock() + fake_target._request_path = {"table_name": "table"} + fake_target.app_profile_id = None kwargs["gapic_client"] = kwargs.pop("gapic_client", mock.Mock()) - kwargs["table"] = kwargs.pop("table", CrossSync.Mock()) + kwargs["target"] = kwargs.pop("target", fake_target) kwargs["operation_timeout"] = kwargs.pop("operation_timeout", 5) kwargs["attempt_timeout"] = kwargs.pop("attempt_timeout", 0.1) kwargs["retryable_exceptions"] = kwargs.pop("retryable_exceptions", ()) @@ -46,9 +51,8 @@ def _make_one(self, *args, **kwargs): return self._target_class()(*args, **kwargs) def _make_mutation(self, count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count + mutation = RowMutationEntry("k", [DeleteAllFromRow() for _ in range(count)]) + mutation.size = lambda: size return mutation @CrossSync.convert @@ -95,16 +99,10 @@ def test_ctor(self): attempt_timeout, retryable_exceptions, ) - # running gapic_fn should trigger a client call + # running gapic_fn should trigger a client call with baked-in args assert client.mutate_rows.call_count == 0 instance._gapic_fn() assert client.mutate_rows.call_count == 1 - # gapic_fn should call with table details - inner_kwargs = client.mutate_rows.call_args[1] - assert len(inner_kwargs) == 3 - assert inner_kwargs["table_name"] == table.table_name - assert inner_kwargs["app_profile_id"] == table.app_profile_id - assert inner_kwargs["retry"] is None # entries should be passed down entries_w_pb = [_EntryWithProto(e, e._to_pb()) for e in entries] assert instance.mutations == entries_w_pb @@ -174,6 +172,8 @@ async def test_mutate_rows_attempt_exception(self, exc_type): """ client = CrossSync.Mock() table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None entries = [self._make_mutation(), self._make_mutation()] operation_timeout = 0.05 expected_exception = exc_type("test") @@ -307,7 +307,8 @@ async def test_run_attempt_single_entry_success(self): assert mock_gapic_fn.call_count == 1 _, kwargs = mock_gapic_fn.call_args assert kwargs["timeout"] == expected_timeout - assert kwargs["entries"] == [mutation._to_pb()] + request = kwargs["request"] + assert request.entries == [mutation._to_pb()] @CrossSync.pytest async def test_run_attempt_empty_request(self): diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py index 944681a84c96..c43f46d5a66b 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__read_rows.py @@ -54,7 +54,7 @@ def test_ctor(self): client.read_rows.return_value = None table = mock.Mock() table._client = client - table.table_name = "test_table" + table._request_path = {"table_name": "test_table"} table.app_profile_id = "test_profile" expected_operation_timeout = 42 expected_request_timeout = 44 @@ -78,7 +78,7 @@ def test_ctor(self): assert instance._remaining_count == row_limit assert instance.operation_timeout == expected_operation_timeout assert client.read_rows.call_count == 0 - assert instance.request.table_name == table.table_name + assert instance.request.table_name == "test_table" assert instance.request.app_profile_id == table.app_profile_id assert instance.request.rows_limit == row_limit @@ -267,7 +267,7 @@ async def mock_stream(): query = ReadRowsQuery(limit=start_limit) table = mock.Mock() - table.table_name = "table_name" + table._request_path = {"table_name": "table_name"} table.app_profile_id = "app_profile_id" instance = self._make_one(query, table, 10, 10) assert instance._remaining_count == start_limit @@ -306,7 +306,7 @@ async def mock_stream(): query = ReadRowsQuery(limit=start_limit) table = mock.Mock() - table.table_name = "table_name" + table._request_path = {"table_name": "table_name"} table.app_profile_id = "app_profile_id" instance = self._make_one(query, table, 10, 10) assert instance._remaining_count == start_limit diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index f45a17bf6816..5e7302d75380 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -28,6 +28,7 @@ from google.api_core import exceptions as core_exceptions from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import DeleteAllFromRow from google.cloud.bigtable.data import TABLE_DEFAULT from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule @@ -272,9 +273,7 @@ async def test__ping_and_warm_instances(self): assert gather.call_args[1]["return_exceptions"] is True assert gather.call_args[1]["sync_executor"] == client_mock._executor # test with instances - client_mock._active_instances = [ - (mock.Mock(), mock.Mock(), mock.Mock()) - ] * 4 + client_mock._active_instances = [(mock.Mock(), mock.Mock())] * 4 gather.reset_mock() channel.reset_mock() result = await self._get_target_class()._ping_and_warm_instances( @@ -292,7 +291,6 @@ async def test__ping_and_warm_instances(self): for idx, (_, kwargs) in enumerate(grpc_call_args): ( expected_instance, - expected_table, expected_app_profile, ) = client_mock._active_instances[idx] request = kwargs["request"] @@ -323,7 +321,7 @@ async def test__ping_and_warm_single_instance(self): gather.side_effect = lambda *args, **kwargs: [fn() for fn in args[0]] # test with large set of instances client_mock._active_instances = [mock.Mock()] * 100 - test_key = ("test-instance", "test-table", "test-app-profile") + test_key = ("test-instance", "test-app-profile") result = await self._get_target_class()._ping_and_warm_instances( client_mock, test_key ) @@ -551,7 +549,6 @@ async def test__register_instance(self): # ensure active_instances and instance_owners were updated properly expected_key = ( "prefix/instance-1", - table_mock.table_name, table_mock.app_profile_id, ) assert len(active_instances) == 1 @@ -577,7 +574,6 @@ async def test__register_instance(self): assert len(instance_owners) == 2 expected_key2 = ( "prefix/instance-2", - table_mock2.table_name, table_mock2.app_profile_id, ) assert any( @@ -612,7 +608,6 @@ async def test__register_instance_duplicate(self): table_mock = mock.Mock() expected_key = ( "prefix/instance-1", - table_mock.table_name, table_mock.app_profile_id, ) # fake first registration @@ -639,13 +634,13 @@ async def test__register_instance_duplicate(self): @pytest.mark.parametrize( "insert_instances,expected_active,expected_owner_keys", [ - ([("i", "t", None)], [("i", "t", None)], [("i", "t", None)]), - ([("i", "t", "p")], [("i", "t", "p")], [("i", "t", "p")]), - ([("1", "t", "p"), ("1", "t", "p")], [("1", "t", "p")], [("1", "t", "p")]), + ([("i", None)], [("i", None)], [("i", None)]), + ([("i", "p")], [("i", "p")], [("i", "p")]), + ([("1", "p"), ("1", "p")], [("1", "p")], [("1", "p")]), ( - [("1", "t", "p"), ("2", "t", "p")], - [("1", "t", "p"), ("2", "t", "p")], - [("1", "t", "p"), ("2", "t", "p")], + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], ), ], ) @@ -666,8 +661,7 @@ async def test__register_instance_state( client_mock._ping_and_warm_instances = CrossSync.Mock() table_mock = mock.Mock() # register instances - for instance, table, profile in insert_instances: - table_mock.table_name = table + for instance, profile in insert_instances: table_mock.app_profile_id = profile await self._get_target_class()._register_instance( client_mock, instance, table_mock @@ -700,11 +694,11 @@ async def test__remove_instance_registration(self): instance_1_path = client._gapic_client.instance_path( client.project, "instance-1" ) - instance_1_key = (instance_1_path, table.table_name, table.app_profile_id) + instance_1_key = (instance_1_path, table.app_profile_id) instance_2_path = client._gapic_client.instance_path( client.project, "instance-2" ) - instance_2_key = (instance_2_path, table.table_name, table.app_profile_id) + instance_2_key = (instance_2_path, table.app_profile_id) assert len(client._instance_owners[instance_1_key]) == 1 assert list(client._instance_owners[instance_1_key])[0] == id(table) assert len(client._instance_owners[instance_2_key]) == 1 @@ -735,13 +729,13 @@ async def test__multiple_table_registration(self): client.project, "instance_1" ) instance_1_key = _WarmedInstanceKey( - instance_1_path, table_1.table_name, table_1.app_profile_id + instance_1_path, table_1.app_profile_id ) assert len(client._instance_owners[instance_1_key]) == 1 assert len(client._active_instances) == 1 assert id(table_1) in client._instance_owners[instance_1_key] # duplicate table should register in instance_owners under same key - async with client.get_table("instance_1", "table_1") as table_2: + async with client.get_table("instance_1", "table_2") as table_2: assert table_2._register_instance_future is not None if not CrossSync.is_async: # give the background task time to run @@ -751,7 +745,9 @@ async def test__multiple_table_registration(self): assert id(table_1) in client._instance_owners[instance_1_key] assert id(table_2) in client._instance_owners[instance_1_key] # unique table should register in instance_owners and active_instances - async with client.get_table("instance_1", "table_3") as table_3: + async with client.get_table( + "instance_1", "table_3", app_profile_id="diff" + ) as table_3: assert table_3._register_instance_future is not None if not CrossSync.is_async: # give the background task time to run @@ -760,7 +756,7 @@ async def test__multiple_table_registration(self): client.project, "instance_1" ) instance_3_key = _WarmedInstanceKey( - instance_3_path, table_3.table_name, table_3.app_profile_id + instance_3_path, table_3.app_profile_id ) assert len(client._instance_owners[instance_1_key]) == 2 assert len(client._instance_owners[instance_3_key]) == 1 @@ -800,13 +796,13 @@ async def test__multiple_instance_registration(self): client.project, "instance_1" ) instance_1_key = _WarmedInstanceKey( - instance_1_path, table_1.table_name, table_1.app_profile_id + instance_1_path, table_1.app_profile_id ) instance_2_path = client._gapic_client.instance_path( client.project, "instance_2" ) instance_2_key = _WarmedInstanceKey( - instance_2_path, table_2.table_name, table_2.app_profile_id + instance_2_path, table_2.app_profile_id ) assert len(client._instance_owners[instance_1_key]) == 1 assert len(client._instance_owners[instance_2_key]) == 1 @@ -824,8 +820,12 @@ async def test__multiple_instance_registration(self): assert len(client._instance_owners[instance_1_key]) == 0 assert len(client._instance_owners[instance_2_key]) == 0 + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) @CrossSync.pytest - async def test_get_table(self): + async def test_get_api_surface(self, method): + """ + test client.get_table and client.get_authorized_view + """ from google.cloud.bigtable.data._helpers import _WarmedInstanceKey client = self._make_client(project="project-id") @@ -833,67 +833,90 @@ async def test_get_table(self): expected_table_id = "table-id" expected_instance_id = "instance-id" expected_app_profile_id = "app-profile-id" - table = client.get_table( - expected_instance_id, - expected_table_id, - expected_app_profile_id, - ) + if method == "get_table": + surface = client.get_table( + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) + assert isinstance(surface, CrossSync.TestTable._get_target_class()) + elif method == "get_authorized_view": + surface = client.get_authorized_view( + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, + ) + assert isinstance(surface, CrossSync.TestAuthorizedView._get_target_class()) + assert ( + surface.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/view_id" + ) + else: + raise TypeError(f"unexpected method: {method}") await CrossSync.yield_to_event_loop() - assert isinstance(table, CrossSync.TestTable._get_target_class()) - assert table.table_id == expected_table_id + assert surface.table_id == expected_table_id assert ( - table.table_name + surface.table_name == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" ) - assert table.instance_id == expected_instance_id + assert surface.instance_id == expected_instance_id assert ( - table.instance_name + surface.instance_name == f"projects/{client.project}/instances/{expected_instance_id}" ) - assert table.app_profile_id == expected_app_profile_id - assert table.client is client - instance_key = _WarmedInstanceKey( - table.instance_name, table.table_name, table.app_profile_id - ) + assert surface.app_profile_id == expected_app_profile_id + assert surface.client is client + instance_key = _WarmedInstanceKey(surface.instance_name, surface.app_profile_id) assert instance_key in client._active_instances - assert client._instance_owners[instance_key] == {id(table)} + assert client._instance_owners[instance_key] == {id(surface)} await client.close() + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) @CrossSync.pytest - async def test_get_table_arg_passthrough(self): + async def test_api_surface_arg_passthrough(self, method): """ - All arguments passed in get_table should be sent to constructor + All arguments passed in get_table and get_authorized_view should be sent to constructor """ + if method == "get_table": + surface_type = CrossSync.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") + async with self._make_client(project="project-id") as client: - with mock.patch.object( - CrossSync.TestTable._get_target_class(), "__init__" - ) as mock_constructor: + with mock.patch.object(surface_type, "__init__") as mock_constructor: mock_constructor.return_value = None assert not client._active_instances - expected_table_id = "table-id" - expected_instance_id = "instance-id" - expected_app_profile_id = "app-profile-id" - expected_args = (1, "test", {"test": 2}) + expected_args = ( + "table", + "instance", + "view", + "app_profile", + 1, + "test", + {"test": 2}, + ) expected_kwargs = {"hello": "world", "test": 2} - client.get_table( - expected_instance_id, - expected_table_id, - expected_app_profile_id, + getattr(client, method)( *expected_args, **expected_kwargs, ) mock_constructor.assert_called_once_with( client, - expected_instance_id, - expected_table_id, - expected_app_profile_id, *expected_args, **expected_kwargs, ) + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) @CrossSync.pytest - async def test_get_table_context_manager(self): + async def test_api_surface_context_manager(self, method): + """ + get_table and get_authorized_view should work as context managers + """ + from functools import partial from google.cloud.bigtable.data._helpers import _WarmedInstanceKey expected_table_id = "table-id" @@ -901,17 +924,35 @@ async def test_get_table_context_manager(self): expected_app_profile_id = "app-profile-id" expected_project_id = "project-id" - with mock.patch.object( - CrossSync.TestTable._get_target_class(), "close" - ) as close_mock: + if method == "get_table": + surface_type = CrossSync.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") + + with mock.patch.object(surface_type, "close") as close_mock: async with self._make_client(project=expected_project_id) as client: - async with client.get_table( - expected_instance_id, - expected_table_id, - expected_app_profile_id, - ) as table: + if method == "get_table": + fn = partial( + client.get_table, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) + elif method == "get_authorized_view": + fn = partial( + client.get_authorized_view, + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, + ) + else: + raise TypeError(f"unexpected method: {method}") + async with fn() as table: await CrossSync.yield_to_event_loop() - assert isinstance(table, CrossSync.TestTable._get_target_class()) + assert isinstance(table, surface_type) assert table.table_id == expected_table_id assert ( table.table_name @@ -925,7 +966,7 @@ async def test_get_table_context_manager(self): assert table.app_profile_id == expected_app_profile_id assert table.client is client instance_key = _WarmedInstanceKey( - table.instance_name, table.table_name, table.app_profile_id + table.instance_name, table.app_profile_id ) assert instance_key in client._active_instances assert client._instance_owners[instance_key] == {id(table)} @@ -1009,8 +1050,20 @@ def _make_client(self, *args, **kwargs): def _get_target_class(): return CrossSync.Table + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, app_profile_id, **kwargs + ) + @CrossSync.pytest - async def test_table_ctor(self): + async def test_ctor(self): from google.cloud.bigtable.data._helpers import _WarmedInstanceKey expected_table_id = "table-id" @@ -1040,11 +1093,17 @@ async def test_table_ctor(self): await CrossSync.yield_to_event_loop() assert table.table_id == expected_table_id assert table.instance_id == expected_instance_id + assert ( + table.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert ( + table.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) assert table.app_profile_id == expected_app_profile_id assert table.client is client - instance_key = _WarmedInstanceKey( - table.instance_name, table.table_name, table.app_profile_id - ) + instance_key = _WarmedInstanceKey(table.instance_name, table.app_profile_id) assert instance_key in client._active_instances assert client._instance_owners[instance_key] == {id(table)} assert table.default_operation_timeout == expected_operation_timeout @@ -1073,23 +1132,15 @@ async def test_table_ctor(self): await client.close() @CrossSync.pytest - async def test_table_ctor_defaults(self): + async def test_ctor_defaults(self): """ should provide default timeout values and app_profile_id """ - expected_table_id = "table-id" - expected_instance_id = "instance-id" client = self._make_client() assert not client._active_instances - table = self._get_target_class()( - client, - expected_instance_id, - expected_table_id, - ) + table = self._make_one(client) await CrossSync.yield_to_event_loop() - assert table.table_id == expected_table_id - assert table.instance_id == expected_instance_id assert table.app_profile_id is None assert table.client is client assert table.default_operation_timeout == 60 @@ -1101,7 +1152,7 @@ async def test_table_ctor_defaults(self): await client.close() @CrossSync.pytest - async def test_table_ctor_invalid_timeout_values(self): + async def test_ctor_invalid_timeout_values(self): """ bad timeout values should raise ValueError """ @@ -1120,10 +1171,10 @@ async def test_table_ctor_invalid_timeout_values(self): ] for operation_timeout, attempt_timeout in timeout_pairs: with pytest.raises(ValueError) as e: - self._get_target_class()(client, "", "", **{attempt_timeout: -1}) + self._make_one(client, **{attempt_timeout: -1}) assert "attempt_timeout must be greater than 0" in str(e.value) with pytest.raises(ValueError) as e: - self._get_target_class()(client, "", "", **{operation_timeout: -1}) + self._make_one(client, **{operation_timeout: -1}) assert "operation_timeout must be greater than 0" in str(e.value) await client.close() @@ -1173,13 +1224,13 @@ def test_table_ctor_sync(self): ("sample_row_keys", (), False, ()), ( "mutate_row", - (b"row_key", [mock.Mock()]), + (b"row_key", [DeleteAllFromRow()]), False, (), ), ( "bulk_mutate_rows", - ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + ([mutations.RowMutationEntry(b"key", [DeleteAllFromRow()])],), False, (_MutateRowsIncomplete,), ), @@ -1291,7 +1342,7 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ gapic_client = gapic_client._client gapic_client._transport = transport_mock gapic_client._is_universe_domain_valid = True - table = self._get_target_class()(client, "instance-id", "table-id", profile) + table = self._make_one(client, app_profile_id=profile) try: test_fn = table.__getattribute__(fn_name) maybe_stream = await test_fn(*fn_args) @@ -1307,13 +1358,129 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ # expect x-goog-request-params tag assert metadata[0][0] == "x-goog-request-params" routing_str = metadata[0][1] - assert "table_name=" + table.table_name in routing_str + assert self._expected_routing_header(table) in routing_str if include_app_profile: assert "app_profile_id=profile" in routing_str else: # empty app_profile_id should send empty string assert "app_profile_id=" in routing_str + @staticmethod + def _expected_routing_header(table): + """ + the expected routing header for this _ApiSurface type + """ + return f"table_name={table.table_name}" + + +@CrossSync.convert_class( + "TestAuthorizedView", add_mapping_for_name="TestAuthorizedView" +) +class TestAuthorizedViewsAsync(CrossSync.TestTable): + """ + Inherit tests from TestTableAsync, with some modifications + """ + + @staticmethod + @CrossSync.convert + def _get_target_class(): + return CrossSync.AuthorizedView + + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + view_id="view", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, view_id, app_profile_id, **kwargs + ) + + @staticmethod + def _expected_routing_header(view): + """ + the expected routing header for this _ApiSurface type + """ + return f"authorized_view_name={view.authorized_view_name}" + + @CrossSync.pytest + async def test_ctor(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_view_id = "view_id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = self._make_client() + assert not client._active_instances + + view = self._get_target_class()( + client, + expected_instance_id, + expected_table_id, + expected_view_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + await CrossSync.yield_to_event_loop() + assert view.table_id == expected_table_id + assert ( + view.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert view.instance_id == expected_instance_id + assert ( + view.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert view.authorized_view_id == expected_view_id + assert ( + view.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/{expected_view_id}" + ) + assert view.app_profile_id == expected_app_profile_id + assert view.client is client + instance_key = _WarmedInstanceKey(view.instance_name, view.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(view)} + assert view.default_operation_timeout == expected_operation_timeout + assert view.default_attempt_timeout == expected_attempt_timeout + assert ( + view.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + view.default_read_rows_attempt_timeout == expected_read_rows_attempt_timeout + ) + assert ( + view.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + view.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + # ensure task reaches completion + await view._register_instance_future + assert view._register_instance_future.done() + assert not view._register_instance_future.cancelled() + assert view._register_instance_future.exception() is None + await client.close() + @CrossSync.convert_class( "TestReadRows", @@ -2145,11 +2312,12 @@ async def test_sample_row_keys_gapic_params(self): await table.sample_row_keys(attempt_timeout=expected_timeout) args, kwargs = sample_row_keys.call_args assert len(args) == 0 - assert len(kwargs) == 4 + assert len(kwargs) == 3 assert kwargs["timeout"] == expected_timeout - assert kwargs["app_profile_id"] == expected_profile - assert kwargs["table_name"] == table.table_name assert kwargs["retry"] is None + request = kwargs["request"] + assert request.app_profile_id == expected_profile + assert request.table_name == table.table_name @pytest.mark.parametrize( "retryable_exception", @@ -2245,17 +2413,18 @@ async def test_mutate_row(self, mutation_arg): ) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0].kwargs + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == "projects/project/instances/instance/tables/table" ) - assert kwargs["row_key"] == b"row_key" + assert request.row_key == b"row_key" formatted_mutations = ( [mutation._to_pb() for mutation in mutation_arg] if isinstance(mutation_arg, list) else [mutation_arg._to_pb()] ) - assert kwargs["mutations"] == formatted_mutations + assert request.mutations == formatted_mutations assert kwargs["timeout"] == expected_attempt_timeout # make sure gapic layer is not retrying assert kwargs["retry"] is None @@ -2427,11 +2596,12 @@ async def test_bulk_mutate_rows(self, mutation_arg): ) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args[1] + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == "projects/project/instances/instance/tables/table" ) - assert kwargs["entries"] == [bulk_mutation._to_pb()] + assert request.entries == [bulk_mutation._to_pb()] assert kwargs["timeout"] == expected_attempt_timeout assert kwargs["retry"] is None @@ -2452,12 +2622,13 @@ async def test_bulk_mutate_rows_multiple_entries(self): ) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args[1] + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == "projects/project/instances/instance/tables/table" ) - assert kwargs["entries"][0] == entry_1._to_pb() - assert kwargs["entries"][1] == entry_2._to_pb() + assert request.entries[0] == entry_1._to_pb() + assert request.entries[1] == entry_2._to_pb() @CrossSync.pytest @pytest.mark.parametrize( @@ -2765,8 +2936,8 @@ async def test_check_and_mutate(self, gapic_result): ) row_key = b"row_key" predicate = None - true_mutations = [mock.Mock()] - false_mutations = [mock.Mock(), mock.Mock()] + true_mutations = [DeleteAllFromRow()] + false_mutations = [DeleteAllFromRow(), DeleteAllFromRow()] operation_timeout = 0.2 found = await table.check_and_mutate_row( row_key, @@ -2777,16 +2948,17 @@ async def test_check_and_mutate(self, gapic_result): ) assert found == gapic_result kwargs = mock_gapic.call_args[1] - assert kwargs["table_name"] == table.table_name - assert kwargs["row_key"] == row_key - assert kwargs["predicate_filter"] == predicate - assert kwargs["true_mutations"] == [ + request = kwargs["request"] + assert request.table_name == table.table_name + assert request.row_key == row_key + assert bool(request.predicate_filter) is False + assert request.true_mutations == [ m._to_pb() for m in true_mutations ] - assert kwargs["false_mutations"] == [ + assert request.false_mutations == [ m._to_pb() for m in false_mutations ] - assert kwargs["app_profile_id"] == app_profile + assert request.app_profile_id == app_profile assert kwargs["timeout"] == operation_timeout assert kwargs["retry"] is None @@ -2828,16 +3000,18 @@ async def test_check_and_mutate_single_mutations(self): false_case_mutations=false_mutation, ) kwargs = mock_gapic.call_args[1] - assert kwargs["true_mutations"] == [true_mutation._to_pb()] - assert kwargs["false_mutations"] == [false_mutation._to_pb()] + request = kwargs["request"] + assert request.true_mutations == [true_mutation._to_pb()] + assert request.false_mutations == [false_mutation._to_pb()] @CrossSync.pytest async def test_check_and_mutate_predicate_object(self): """predicate filter should be passed to gapic request""" from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable_v2.types.data import RowFilter mock_predicate = mock.Mock() - predicate_pb = {"predicate": "dict"} + predicate_pb = RowFilter({"sink": True}) mock_predicate._to_pb.return_value = predicate_pb async with self._make_client() as client: async with client.get_table("instance", "table") as table: @@ -2850,10 +3024,11 @@ async def test_check_and_mutate_predicate_object(self): await table.check_and_mutate_row( b"row_key", mock_predicate, - false_case_mutations=[mock.Mock()], + false_case_mutations=[DeleteAllFromRow()], ) kwargs = mock_gapic.call_args[1] - assert kwargs["predicate_filter"] == predicate_pb + request = kwargs["request"] + assert request.predicate_filter == predicate_pb assert mock_predicate._to_pb.call_count == 1 assert kwargs["retry"] is None @@ -2861,11 +3036,11 @@ async def test_check_and_mutate_predicate_object(self): async def test_check_and_mutate_mutations_parsing(self): """mutations objects should be converted to protos""" from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse - from google.cloud.bigtable.data.mutations import DeleteAllFromRow + from google.cloud.bigtable.data.mutations import DeleteAllFromFamily mutations = [mock.Mock() for _ in range(5)] for idx, mutation in enumerate(mutations): - mutation._to_pb.return_value = f"fake {idx}" + mutation._to_pb.return_value = DeleteAllFromFamily(f"fake {idx}")._to_pb() mutations.append(DeleteAllFromRow()) async with self._make_client() as client: async with client.get_table("instance", "table") as table: @@ -2882,11 +3057,15 @@ async def test_check_and_mutate_mutations_parsing(self): false_case_mutations=mutations[2:], ) kwargs = mock_gapic.call_args[1] - assert kwargs["true_mutations"] == ["fake 0", "fake 1"] - assert kwargs["false_mutations"] == [ - "fake 2", - "fake 3", - "fake 4", + request = kwargs["request"] + assert request.true_mutations == [ + DeleteAllFromFamily("fake 0")._to_pb(), + DeleteAllFromFamily("fake 1")._to_pb(), + ] + assert request.false_mutations == [ + DeleteAllFromFamily("fake 2")._to_pb(), + DeleteAllFromFamily("fake 3")._to_pb(), + DeleteAllFromFamily("fake 4")._to_pb(), DeleteAllFromRow()._to_pb(), ] assert all( @@ -2934,7 +3113,8 @@ async def test_read_modify_write_call_rule_args(self, call_rules, expected_rules await table.read_modify_write_row("key", call_rules) assert mock_gapic.call_count == 1 found_kwargs = mock_gapic.call_args_list[0][1] - assert found_kwargs["rules"] == expected_rules + request = found_kwargs["request"] + assert request.rules == expected_rules assert found_kwargs["retry"] is None @pytest.mark.parametrize("rules", [[], None]) @@ -2957,15 +3137,16 @@ async def test_read_modify_write_call_defaults(self): with mock.patch.object( client._gapic_client, "read_modify_write_row" ) as mock_gapic: - await table.read_modify_write_row(row_key, mock.Mock()) + await table.read_modify_write_row(row_key, IncrementRule("f", "q")) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == f"projects/{project}/instances/{instance}/tables/{table_id}" ) - assert kwargs["app_profile_id"] is None - assert kwargs["row_key"] == row_key.encode() + assert bool(request.app_profile_id) is False + assert request.row_key == row_key.encode() assert kwargs["timeout"] > 1 @CrossSync.pytest @@ -2982,13 +3163,14 @@ async def test_read_modify_write_call_overrides(self): ) as mock_gapic: await table.read_modify_write_row( row_key, - mock.Mock(), + IncrementRule("f", "q"), operation_timeout=expected_timeout, ) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0][1] - assert kwargs["app_profile_id"] is profile_id - assert kwargs["row_key"] == row_key + request = kwargs["request"] + assert request.app_profile_id == profile_id + assert request.row_key == row_key assert kwargs["timeout"] == expected_timeout @CrossSync.pytest @@ -2999,10 +3181,11 @@ async def test_read_modify_write_string_key(self): with mock.patch.object( client._gapic_client, "read_modify_write_row" ) as mock_gapic: - await table.read_modify_write_row(row_key, mock.Mock()) + await table.read_modify_write_row(row_key, IncrementRule("f", "q")) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0][1] - assert kwargs["row_key"] == row_key.encode() + request = kwargs["request"] + assert request.row_key == row_key.encode() @CrossSync.pytest async def test_read_modify_write_row_building(self): @@ -3021,7 +3204,9 @@ async def test_read_modify_write_row_building(self): ) as mock_gapic: with mock.patch.object(Row, "_from_pb") as constructor_mock: mock_gapic.return_value = mock_response - await table.read_modify_write_row("key", mock.Mock()) + await table.read_modify_write_row( + "key", IncrementRule("f", "q") + ) assert constructor_mock.call_count == 1 constructor_mock.assert_called_once_with(mock_response.row) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py index 2df8dde6d216..29f2f10263a6 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_mutations_batcher.py @@ -19,6 +19,8 @@ import google.api_core.exceptions as core_exceptions import google.api_core.retry from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow from google.cloud.bigtable.data import TABLE_DEFAULT from google.cloud.bigtable.data._cross_sync import CrossSync @@ -38,9 +40,9 @@ def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): @staticmethod def _make_mutation(count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] + mutation.size = lambda: size return mutation def test_ctor(self): @@ -308,6 +310,8 @@ def _make_one(self, table=None, **kwargs): if table is None: table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None table.default_mutate_rows_operation_timeout = 10 table.default_mutate_rows_attempt_timeout = 10 table.default_mutate_rows_retryable_errors = ( @@ -319,9 +323,9 @@ def _make_one(self, table=None, **kwargs): @staticmethod def _make_mutation(count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.size = lambda: size + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] return mutation @CrossSync.pytest @@ -334,7 +338,7 @@ async def test_ctor_defaults(self): table.default_mutate_rows_attempt_timeout = 8 table.default_mutate_rows_retryable_errors = [Exception] async with self._make_one(table) as instance: - assert instance._table == table + assert instance._target == table assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -390,7 +394,7 @@ async def test_ctor_explicit(self): batch_attempt_timeout=attempt_timeout, batch_retryable_errors=retryable_errors, ) as instance: - assert instance._table == table + assert instance._target == table assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -435,7 +439,7 @@ async def test_ctor_no_flush_limits(self): flush_limit_mutation_count=flush_limit_count, flush_limit_bytes=flush_limit_bytes, ) as instance: - assert instance._table == table + assert instance._target == table assert instance.closed is False assert instance._staged_entries == [] assert len(instance._oldest_exceptions) == 0 @@ -903,10 +907,10 @@ async def test_timer_flush_end_to_end(self): mutations = [self._make_mutation(count=2, size=2)] * num_mutations async with self._make_one(flush_interval=0.05) as instance: - instance._table.default_operation_timeout = 10 - instance._table.default_attempt_timeout = 9 + instance._target.default_operation_timeout = 10 + instance._target.default_attempt_timeout = 9 with mock.patch.object( - instance._table.client._gapic_client, "mutate_rows" + instance._target.client._gapic_client, "mutate_rows" ) as gapic_mock: gapic_mock.side_effect = ( lambda *args, **kwargs: self._mock_gapic_return(num_mutations) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py index 2173c88fb1ae..b198df01b9c8 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__mutate_rows.py @@ -17,6 +17,8 @@ import pytest from google.cloud.bigtable_v2.types import MutateRowsResponse +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow from google.rpc import status_pb2 from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import Forbidden @@ -34,8 +36,11 @@ def _target_class(self): def _make_one(self, *args, **kwargs): if not args: + fake_target = CrossSync._Sync_Impl.Mock() + fake_target._request_path = {"table_name": "table"} + fake_target.app_profile_id = None kwargs["gapic_client"] = kwargs.pop("gapic_client", mock.Mock()) - kwargs["table"] = kwargs.pop("table", CrossSync._Sync_Impl.Mock()) + kwargs["target"] = kwargs.pop("target", fake_target) kwargs["operation_timeout"] = kwargs.pop("operation_timeout", 5) kwargs["attempt_timeout"] = kwargs.pop("attempt_timeout", 0.1) kwargs["retryable_exceptions"] = kwargs.pop("retryable_exceptions", ()) @@ -43,9 +48,8 @@ def _make_one(self, *args, **kwargs): return self._target_class()(*args, **kwargs) def _make_mutation(self, count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count + mutation = RowMutationEntry("k", [DeleteAllFromRow() for _ in range(count)]) + mutation.size = lambda: size return mutation def _mock_stream(self, mutation_list, error_dict): @@ -92,11 +96,6 @@ def test_ctor(self): assert client.mutate_rows.call_count == 0 instance._gapic_fn() assert client.mutate_rows.call_count == 1 - inner_kwargs = client.mutate_rows.call_args[1] - assert len(inner_kwargs) == 3 - assert inner_kwargs["table_name"] == table.table_name - assert inner_kwargs["app_profile_id"] == table.app_profile_id - assert inner_kwargs["retry"] is None entries_w_pb = [_EntryWithProto(e, e._to_pb()) for e in entries] assert instance.mutations == entries_w_pb assert next(instance.timeout_generator) == attempt_timeout @@ -148,6 +147,8 @@ def test_mutate_rows_attempt_exception(self, exc_type): """exceptions raised from attempt should be raised in MutationsExceptionGroup""" client = CrossSync._Sync_Impl.Mock() table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None entries = [self._make_mutation(), self._make_mutation()] operation_timeout = 0.05 expected_exception = exc_type("test") @@ -260,7 +261,8 @@ def test_run_attempt_single_entry_success(self): assert mock_gapic_fn.call_count == 1 (_, kwargs) = mock_gapic_fn.call_args assert kwargs["timeout"] == expected_timeout - assert kwargs["entries"] == [mutation._to_pb()] + request = kwargs["request"] + assert request.entries == [mutation._to_pb()] def test_run_attempt_empty_request(self): """Calling with no mutations should result in no API calls""" diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py index 973b07bcb27c..a545142d3dfb 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__read_rows.py @@ -48,7 +48,7 @@ def test_ctor(self): client.read_rows.return_value = None table = mock.Mock() table._client = client - table.table_name = "test_table" + table._request_path = {"table_name": "test_table"} table.app_profile_id = "test_profile" expected_operation_timeout = 42 expected_request_timeout = 44 @@ -72,7 +72,7 @@ def test_ctor(self): assert instance._remaining_count == row_limit assert instance.operation_timeout == expected_operation_timeout assert client.read_rows.call_count == 0 - assert instance.request.table_name == table.table_name + assert instance.request.table_name == "test_table" assert instance.request.app_profile_id == table.app_profile_id assert instance.request.rows_limit == row_limit @@ -252,7 +252,7 @@ def mock_stream(): query = ReadRowsQuery(limit=start_limit) table = mock.Mock() - table.table_name = "table_name" + table._request_path = {"table_name": "table_name"} table.app_profile_id = "app_profile_id" instance = self._make_one(query, table, 10, 10) assert instance._remaining_count == start_limit @@ -287,7 +287,7 @@ def mock_stream(): query = ReadRowsQuery(limit=start_limit) table = mock.Mock() - table.table_name = "table_name" + table._request_path = {"table_name": "table_name"} table.app_profile_id = "app_profile_id" instance = self._make_one(query, table, 10, 10) assert instance._remaining_count == start_limit diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index eea3f36bf5cc..38866c9dd540 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -27,6 +27,7 @@ from google.api_core import exceptions as core_exceptions from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import DeleteAllFromRow from google.cloud.bigtable.data import TABLE_DEFAULT from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule @@ -207,9 +208,7 @@ def test__ping_and_warm_instances(self): assert len(result) == 0 assert gather.call_args[1]["return_exceptions"] is True assert gather.call_args[1]["sync_executor"] == client_mock._executor - client_mock._active_instances = [ - (mock.Mock(), mock.Mock(), mock.Mock()) - ] * 4 + client_mock._active_instances = [(mock.Mock(), mock.Mock())] * 4 gather.reset_mock() channel.reset_mock() result = self._get_target_class()._ping_and_warm_instances( @@ -223,7 +222,6 @@ def test__ping_and_warm_instances(self): for idx, (_, kwargs) in enumerate(grpc_call_args): ( expected_instance, - expected_table, expected_app_profile, ) = client_mock._active_instances[idx] request = kwargs["request"] @@ -250,7 +248,7 @@ def test__ping_and_warm_single_instance(self): ) as gather: gather.side_effect = lambda *args, **kwargs: [fn() for fn in args[0]] client_mock._active_instances = [mock.Mock()] * 100 - test_key = ("test-instance", "test-table", "test-app-profile") + test_key = ("test-instance", "test-app-profile") result = self._get_target_class()._ping_and_warm_instances( client_mock, test_key ) @@ -436,11 +434,7 @@ def test__register_instance(self): client_mock, "instance-1", table_mock ) assert client_mock._start_background_channel_refresh.call_count == 1 - expected_key = ( - "prefix/instance-1", - table_mock.table_name, - table_mock.app_profile_id, - ) + expected_key = ("prefix/instance-1", table_mock.app_profile_id) assert len(active_instances) == 1 assert expected_key == tuple(list(active_instances)[0]) assert len(instance_owners) == 1 @@ -458,11 +452,7 @@ def test__register_instance(self): assert client_mock._ping_and_warm_instances.call_count == 1 assert len(active_instances) == 2 assert len(instance_owners) == 2 - expected_key2 = ( - "prefix/instance-2", - table_mock2.table_name, - table_mock2.app_profile_id, - ) + expected_key2 = ("prefix/instance-2", table_mock2.app_profile_id) assert any( [ expected_key2 == tuple(list(active_instances)[i]) @@ -489,11 +479,7 @@ def test__register_instance_duplicate(self): client_mock.transport.channels = mock_channels client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() table_mock = mock.Mock() - expected_key = ( - "prefix/instance-1", - table_mock.table_name, - table_mock.app_profile_id, - ) + expected_key = ("prefix/instance-1", table_mock.app_profile_id) self._get_target_class()._register_instance( client_mock, "instance-1", table_mock ) @@ -514,13 +500,13 @@ def test__register_instance_duplicate(self): @pytest.mark.parametrize( "insert_instances,expected_active,expected_owner_keys", [ - ([("i", "t", None)], [("i", "t", None)], [("i", "t", None)]), - ([("i", "t", "p")], [("i", "t", "p")], [("i", "t", "p")]), - ([("1", "t", "p"), ("1", "t", "p")], [("1", "t", "p")], [("1", "t", "p")]), + ([("i", None)], [("i", None)], [("i", None)]), + ([("i", "p")], [("i", "p")], [("i", "p")]), + ([("1", "p"), ("1", "p")], [("1", "p")], [("1", "p")]), ( - [("1", "t", "p"), ("2", "t", "p")], - [("1", "t", "p"), ("2", "t", "p")], - [("1", "t", "p"), ("2", "t", "p")], + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], + [("1", "p"), ("2", "p")], ), ], ) @@ -537,8 +523,7 @@ def test__register_instance_state( client_mock._channel_refresh_task = None client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() table_mock = mock.Mock() - for instance, table, profile in insert_instances: - table_mock.table_name = table + for instance, profile in insert_instances: table_mock.app_profile_id = profile self._get_target_class()._register_instance( client_mock, instance, table_mock @@ -570,11 +555,11 @@ def test__remove_instance_registration(self): instance_1_path = client._gapic_client.instance_path( client.project, "instance-1" ) - instance_1_key = (instance_1_path, table.table_name, table.app_profile_id) + instance_1_key = (instance_1_path, table.app_profile_id) instance_2_path = client._gapic_client.instance_path( client.project, "instance-2" ) - instance_2_key = (instance_2_path, table.table_name, table.app_profile_id) + instance_2_key = (instance_2_path, table.app_profile_id) assert len(client._instance_owners[instance_1_key]) == 1 assert list(client._instance_owners[instance_1_key])[0] == id(table) assert len(client._instance_owners[instance_2_key]) == 1 @@ -602,26 +587,28 @@ def test__multiple_table_registration(self): client.project, "instance_1" ) instance_1_key = _WarmedInstanceKey( - instance_1_path, table_1.table_name, table_1.app_profile_id + instance_1_path, table_1.app_profile_id ) assert len(client._instance_owners[instance_1_key]) == 1 assert len(client._active_instances) == 1 assert id(table_1) in client._instance_owners[instance_1_key] - with client.get_table("instance_1", "table_1") as table_2: + with client.get_table("instance_1", "table_2") as table_2: assert table_2._register_instance_future is not None table_2._register_instance_future.result() assert len(client._instance_owners[instance_1_key]) == 2 assert len(client._active_instances) == 1 assert id(table_1) in client._instance_owners[instance_1_key] assert id(table_2) in client._instance_owners[instance_1_key] - with client.get_table("instance_1", "table_3") as table_3: + with client.get_table( + "instance_1", "table_3", app_profile_id="diff" + ) as table_3: assert table_3._register_instance_future is not None table_3._register_instance_future.result() instance_3_path = client._gapic_client.instance_path( client.project, "instance_1" ) instance_3_key = _WarmedInstanceKey( - instance_3_path, table_3.table_name, table_3.app_profile_id + instance_3_path, table_3.app_profile_id ) assert len(client._instance_owners[instance_1_key]) == 2 assert len(client._instance_owners[instance_3_key]) == 1 @@ -652,13 +639,13 @@ def test__multiple_instance_registration(self): client.project, "instance_1" ) instance_1_key = _WarmedInstanceKey( - instance_1_path, table_1.table_name, table_1.app_profile_id + instance_1_path, table_1.app_profile_id ) instance_2_path = client._gapic_client.instance_path( client.project, "instance_2" ) instance_2_key = _WarmedInstanceKey( - instance_2_path, table_2.table_name, table_2.app_profile_id + instance_2_path, table_2.app_profile_id ) assert len(client._instance_owners[instance_1_key]) == 1 assert len(client._instance_owners[instance_2_key]) == 1 @@ -674,7 +661,9 @@ def test__multiple_instance_registration(self): assert len(client._instance_owners[instance_1_key]) == 0 assert len(client._instance_owners[instance_2_key]) == 0 - def test_get_table(self): + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + def test_get_api_surface(self, method): + """test client.get_table and client.get_authorized_view""" from google.cloud.bigtable.data._helpers import _WarmedInstanceKey client = self._make_client(project="project-id") @@ -682,77 +671,113 @@ def test_get_table(self): expected_table_id = "table-id" expected_instance_id = "instance-id" expected_app_profile_id = "app-profile-id" - table = client.get_table( - expected_instance_id, expected_table_id, expected_app_profile_id - ) + if method == "get_table": + surface = client.get_table( + expected_instance_id, expected_table_id, expected_app_profile_id + ) + assert isinstance( + surface, CrossSync._Sync_Impl.TestTable._get_target_class() + ) + elif method == "get_authorized_view": + surface = client.get_authorized_view( + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, + ) + assert isinstance( + surface, CrossSync._Sync_Impl.TestAuthorizedView._get_target_class() + ) + assert ( + surface.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/view_id" + ) + else: + raise TypeError(f"unexpected method: {method}") CrossSync._Sync_Impl.yield_to_event_loop() - assert isinstance(table, CrossSync._Sync_Impl.TestTable._get_target_class()) - assert table.table_id == expected_table_id + assert surface.table_id == expected_table_id assert ( - table.table_name + surface.table_name == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" ) - assert table.instance_id == expected_instance_id + assert surface.instance_id == expected_instance_id assert ( - table.instance_name + surface.instance_name == f"projects/{client.project}/instances/{expected_instance_id}" ) - assert table.app_profile_id == expected_app_profile_id - assert table.client is client - instance_key = _WarmedInstanceKey( - table.instance_name, table.table_name, table.app_profile_id - ) + assert surface.app_profile_id == expected_app_profile_id + assert surface.client is client + instance_key = _WarmedInstanceKey(surface.instance_name, surface.app_profile_id) assert instance_key in client._active_instances - assert client._instance_owners[instance_key] == {id(table)} + assert client._instance_owners[instance_key] == {id(surface)} client.close() - def test_get_table_arg_passthrough(self): - """All arguments passed in get_table should be sent to constructor""" + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + def test_api_surface_arg_passthrough(self, method): + """All arguments passed in get_table and get_authorized_view should be sent to constructor""" + if method == "get_table": + surface_type = CrossSync._Sync_Impl.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync._Sync_Impl.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") with self._make_client(project="project-id") as client: - with mock.patch.object( - CrossSync._Sync_Impl.TestTable._get_target_class(), "__init__" - ) as mock_constructor: + with mock.patch.object(surface_type, "__init__") as mock_constructor: mock_constructor.return_value = None assert not client._active_instances - expected_table_id = "table-id" - expected_instance_id = "instance-id" - expected_app_profile_id = "app-profile-id" - expected_args = (1, "test", {"test": 2}) - expected_kwargs = {"hello": "world", "test": 2} - client.get_table( - expected_instance_id, - expected_table_id, - expected_app_profile_id, - *expected_args, - **expected_kwargs, + expected_args = ( + "table", + "instance", + "view", + "app_profile", + 1, + "test", + {"test": 2}, ) + expected_kwargs = {"hello": "world", "test": 2} + getattr(client, method)(*expected_args, **expected_kwargs) mock_constructor.assert_called_once_with( - client, - expected_instance_id, - expected_table_id, - expected_app_profile_id, - *expected_args, - **expected_kwargs, + client, *expected_args, **expected_kwargs ) - def test_get_table_context_manager(self): + @pytest.mark.parametrize("method", ["get_table", "get_authorized_view"]) + def test_api_surface_context_manager(self, method): + """get_table and get_authorized_view should work as context managers""" + from functools import partial from google.cloud.bigtable.data._helpers import _WarmedInstanceKey expected_table_id = "table-id" expected_instance_id = "instance-id" expected_app_profile_id = "app-profile-id" expected_project_id = "project-id" - with mock.patch.object( - CrossSync._Sync_Impl.TestTable._get_target_class(), "close" - ) as close_mock: + if method == "get_table": + surface_type = CrossSync._Sync_Impl.TestTable._get_target_class() + elif method == "get_authorized_view": + surface_type = CrossSync._Sync_Impl.TestAuthorizedView._get_target_class() + else: + raise TypeError(f"unexpected method: {method}") + with mock.patch.object(surface_type, "close") as close_mock: with self._make_client(project=expected_project_id) as client: - with client.get_table( - expected_instance_id, expected_table_id, expected_app_profile_id - ) as table: - CrossSync._Sync_Impl.yield_to_event_loop() - assert isinstance( - table, CrossSync._Sync_Impl.TestTable._get_target_class() + if method == "get_table": + fn = partial( + client.get_table, + expected_instance_id, + expected_table_id, + expected_app_profile_id, + ) + elif method == "get_authorized_view": + fn = partial( + client.get_authorized_view, + expected_instance_id, + expected_table_id, + "view_id", + expected_app_profile_id, ) + else: + raise TypeError(f"unexpected method: {method}") + with fn() as table: + CrossSync._Sync_Impl.yield_to_event_loop() + assert isinstance(table, surface_type) assert table.table_id == expected_table_id assert ( table.table_name @@ -766,7 +791,7 @@ def test_get_table_context_manager(self): assert table.app_profile_id == expected_app_profile_id assert table.client is client instance_key = _WarmedInstanceKey( - table.instance_name, table.table_name, table.app_profile_id + table.instance_name, table.app_profile_id ) assert instance_key in client._active_instances assert client._instance_owners[instance_key] == {id(table)} @@ -821,7 +846,19 @@ def _make_client(self, *args, **kwargs): def _get_target_class(): return CrossSync._Sync_Impl.Table - def test_table_ctor(self): + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, app_profile_id, **kwargs + ) + + def test_ctor(self): from google.cloud.bigtable.data._helpers import _WarmedInstanceKey expected_table_id = "table-id" @@ -850,11 +887,17 @@ def test_table_ctor(self): CrossSync._Sync_Impl.yield_to_event_loop() assert table.table_id == expected_table_id assert table.instance_id == expected_instance_id + assert ( + table.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert ( + table.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) assert table.app_profile_id == expected_app_profile_id assert table.client is client - instance_key = _WarmedInstanceKey( - table.instance_name, table.table_name, table.app_profile_id - ) + instance_key = _WarmedInstanceKey(table.instance_name, table.app_profile_id) assert instance_key in client._active_instances assert client._instance_owners[instance_key] == {id(table)} assert table.default_operation_timeout == expected_operation_timeout @@ -881,18 +924,12 @@ def test_table_ctor(self): assert table._register_instance_future.exception() is None client.close() - def test_table_ctor_defaults(self): + def test_ctor_defaults(self): """should provide default timeout values and app_profile_id""" - expected_table_id = "table-id" - expected_instance_id = "instance-id" client = self._make_client() assert not client._active_instances - table = self._get_target_class()( - client, expected_instance_id, expected_table_id - ) + table = self._make_one(client) CrossSync._Sync_Impl.yield_to_event_loop() - assert table.table_id == expected_table_id - assert table.instance_id == expected_instance_id assert table.app_profile_id is None assert table.client is client assert table.default_operation_timeout == 60 @@ -903,7 +940,7 @@ def test_table_ctor_defaults(self): assert table.default_mutate_rows_attempt_timeout == 60 client.close() - def test_table_ctor_invalid_timeout_values(self): + def test_ctor_invalid_timeout_values(self): """bad timeout values should raise ValueError""" client = self._make_client() timeout_pairs = [ @@ -919,10 +956,10 @@ def test_table_ctor_invalid_timeout_values(self): ] for operation_timeout, attempt_timeout in timeout_pairs: with pytest.raises(ValueError) as e: - self._get_target_class()(client, "", "", **{attempt_timeout: -1}) + self._make_one(client, **{attempt_timeout: -1}) assert "attempt_timeout must be greater than 0" in str(e.value) with pytest.raises(ValueError) as e: - self._get_target_class()(client, "", "", **{operation_timeout: -1}) + self._make_one(client, **{operation_timeout: -1}) assert "operation_timeout must be greater than 0" in str(e.value) client.close() @@ -935,10 +972,10 @@ def test_table_ctor_invalid_timeout_values(self): ("read_rows_sharded", ([ReadRowsQuery()],), True, ()), ("row_exists", (b"row_key",), True, ()), ("sample_row_keys", (), False, ()), - ("mutate_row", (b"row_key", [mock.Mock()]), False, ()), + ("mutate_row", (b"row_key", [DeleteAllFromRow()]), False, ()), ( "bulk_mutate_rows", - ([mutations.RowMutationEntry(b"key", [mutations.DeleteAllFromRow()])],), + ([mutations.RowMutationEntry(b"key", [DeleteAllFromRow()])],), False, (_MutateRowsIncomplete,), ), @@ -1035,7 +1072,7 @@ def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): gapic_client = client._gapic_client gapic_client._transport = transport_mock gapic_client._is_universe_domain_valid = True - table = self._get_target_class()(client, "instance-id", "table-id", profile) + table = self._make_one(client, app_profile_id=profile) try: test_fn = table.__getattribute__(fn_name) maybe_stream = test_fn(*fn_args) @@ -1048,12 +1085,118 @@ def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): assert len(metadata) == 1 assert metadata[0][0] == "x-goog-request-params" routing_str = metadata[0][1] - assert "table_name=" + table.table_name in routing_str + assert self._expected_routing_header(table) in routing_str if include_app_profile: assert "app_profile_id=profile" in routing_str else: assert "app_profile_id=" in routing_str + @staticmethod + def _expected_routing_header(table): + """the expected routing header for this _ApiSurface type""" + return f"table_name={table.table_name}" + + +@CrossSync._Sync_Impl.add_mapping_decorator("TestAuthorizedView") +class TestAuthorizedView(CrossSync._Sync_Impl.TestTable): + """ + Inherit tests from TestTableAsync, with some modifications + """ + + @staticmethod + def _get_target_class(): + return CrossSync._Sync_Impl.AuthorizedView + + def _make_one( + self, + client, + instance_id="instance", + table_id="table", + view_id="view", + app_profile_id=None, + **kwargs, + ): + return self._get_target_class()( + client, instance_id, table_id, view_id, app_profile_id, **kwargs + ) + + @staticmethod + def _expected_routing_header(view): + """the expected routing header for this _ApiSurface type""" + return f"authorized_view_name={view.authorized_view_name}" + + def test_ctor(self): + from google.cloud.bigtable.data._helpers import _WarmedInstanceKey + + expected_table_id = "table-id" + expected_instance_id = "instance-id" + expected_view_id = "view_id" + expected_app_profile_id = "app-profile-id" + expected_operation_timeout = 123 + expected_attempt_timeout = 12 + expected_read_rows_operation_timeout = 1.5 + expected_read_rows_attempt_timeout = 0.5 + expected_mutate_rows_operation_timeout = 2.5 + expected_mutate_rows_attempt_timeout = 0.75 + client = self._make_client() + assert not client._active_instances + view = self._get_target_class()( + client, + expected_instance_id, + expected_table_id, + expected_view_id, + expected_app_profile_id, + default_operation_timeout=expected_operation_timeout, + default_attempt_timeout=expected_attempt_timeout, + default_read_rows_operation_timeout=expected_read_rows_operation_timeout, + default_read_rows_attempt_timeout=expected_read_rows_attempt_timeout, + default_mutate_rows_operation_timeout=expected_mutate_rows_operation_timeout, + default_mutate_rows_attempt_timeout=expected_mutate_rows_attempt_timeout, + ) + CrossSync._Sync_Impl.yield_to_event_loop() + assert view.table_id == expected_table_id + assert ( + view.table_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}" + ) + assert view.instance_id == expected_instance_id + assert ( + view.instance_name + == f"projects/{client.project}/instances/{expected_instance_id}" + ) + assert view.authorized_view_id == expected_view_id + assert ( + view.authorized_view_name + == f"projects/{client.project}/instances/{expected_instance_id}/tables/{expected_table_id}/authorizedViews/{expected_view_id}" + ) + assert view.app_profile_id == expected_app_profile_id + assert view.client is client + instance_key = _WarmedInstanceKey(view.instance_name, view.app_profile_id) + assert instance_key in client._active_instances + assert client._instance_owners[instance_key] == {id(view)} + assert view.default_operation_timeout == expected_operation_timeout + assert view.default_attempt_timeout == expected_attempt_timeout + assert ( + view.default_read_rows_operation_timeout + == expected_read_rows_operation_timeout + ) + assert ( + view.default_read_rows_attempt_timeout == expected_read_rows_attempt_timeout + ) + assert ( + view.default_mutate_rows_operation_timeout + == expected_mutate_rows_operation_timeout + ) + assert ( + view.default_mutate_rows_attempt_timeout + == expected_mutate_rows_attempt_timeout + ) + view._register_instance_future + assert view._register_instance_future.done() + assert not view._register_instance_future.cancelled() + assert view._register_instance_future.exception() is None + client.close() + @CrossSync._Sync_Impl.add_mapping_decorator("TestReadRows") class TestReadRows: @@ -1787,11 +1930,12 @@ def test_sample_row_keys_gapic_params(self): table.sample_row_keys(attempt_timeout=expected_timeout) (args, kwargs) = sample_row_keys.call_args assert len(args) == 0 - assert len(kwargs) == 4 + assert len(kwargs) == 3 assert kwargs["timeout"] == expected_timeout - assert kwargs["app_profile_id"] == expected_profile - assert kwargs["table_name"] == table.table_name assert kwargs["retry"] is None + request = kwargs["request"] + assert request.app_profile_id == expected_profile + assert request.table_name == table.table_name @pytest.mark.parametrize( "retryable_exception", @@ -1879,17 +2023,18 @@ def test_mutate_row(self, mutation_arg): ) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0].kwargs + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == "projects/project/instances/instance/tables/table" ) - assert kwargs["row_key"] == b"row_key" + assert request.row_key == b"row_key" formatted_mutations = ( [mutation._to_pb() for mutation in mutation_arg] if isinstance(mutation_arg, list) else [mutation_arg._to_pb()] ) - assert kwargs["mutations"] == formatted_mutations + assert request.mutations == formatted_mutations assert kwargs["timeout"] == expected_attempt_timeout assert kwargs["retry"] is None @@ -2033,11 +2178,12 @@ def test_bulk_mutate_rows(self, mutation_arg): ) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args[1] + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == "projects/project/instances/instance/tables/table" ) - assert kwargs["entries"] == [bulk_mutation._to_pb()] + assert request.entries == [bulk_mutation._to_pb()] assert kwargs["timeout"] == expected_attempt_timeout assert kwargs["retry"] is None @@ -2055,12 +2201,13 @@ def test_bulk_mutate_rows_multiple_entries(self): table.bulk_mutate_rows([entry_1, entry_2]) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args[1] + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == "projects/project/instances/instance/tables/table" ) - assert kwargs["entries"][0] == entry_1._to_pb() - assert kwargs["entries"][1] == entry_2._to_pb() + assert request.entries[0] == entry_1._to_pb() + assert request.entries[1] == entry_2._to_pb() @pytest.mark.parametrize( "exception", @@ -2328,8 +2475,8 @@ def test_check_and_mutate(self, gapic_result): ) row_key = b"row_key" predicate = None - true_mutations = [mock.Mock()] - false_mutations = [mock.Mock(), mock.Mock()] + true_mutations = [DeleteAllFromRow()] + false_mutations = [DeleteAllFromRow(), DeleteAllFromRow()] operation_timeout = 0.2 found = table.check_and_mutate_row( row_key, @@ -2340,16 +2487,17 @@ def test_check_and_mutate(self, gapic_result): ) assert found == gapic_result kwargs = mock_gapic.call_args[1] - assert kwargs["table_name"] == table.table_name - assert kwargs["row_key"] == row_key - assert kwargs["predicate_filter"] == predicate - assert kwargs["true_mutations"] == [ + request = kwargs["request"] + assert request.table_name == table.table_name + assert request.row_key == row_key + assert bool(request.predicate_filter) is False + assert request.true_mutations == [ m._to_pb() for m in true_mutations ] - assert kwargs["false_mutations"] == [ + assert request.false_mutations == [ m._to_pb() for m in false_mutations ] - assert kwargs["app_profile_id"] == app_profile + assert request.app_profile_id == app_profile assert kwargs["timeout"] == operation_timeout assert kwargs["retry"] is None @@ -2389,15 +2537,17 @@ def test_check_and_mutate_single_mutations(self): false_case_mutations=false_mutation, ) kwargs = mock_gapic.call_args[1] - assert kwargs["true_mutations"] == [true_mutation._to_pb()] - assert kwargs["false_mutations"] == [false_mutation._to_pb()] + request = kwargs["request"] + assert request.true_mutations == [true_mutation._to_pb()] + assert request.false_mutations == [false_mutation._to_pb()] def test_check_and_mutate_predicate_object(self): """predicate filter should be passed to gapic request""" from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse + from google.cloud.bigtable_v2.types.data import RowFilter mock_predicate = mock.Mock() - predicate_pb = {"predicate": "dict"} + predicate_pb = RowFilter({"sink": True}) mock_predicate._to_pb.return_value = predicate_pb with self._make_client() as client: with client.get_table("instance", "table") as table: @@ -2408,21 +2558,24 @@ def test_check_and_mutate_predicate_object(self): predicate_matched=True ) table.check_and_mutate_row( - b"row_key", mock_predicate, false_case_mutations=[mock.Mock()] + b"row_key", + mock_predicate, + false_case_mutations=[DeleteAllFromRow()], ) kwargs = mock_gapic.call_args[1] - assert kwargs["predicate_filter"] == predicate_pb + request = kwargs["request"] + assert request.predicate_filter == predicate_pb assert mock_predicate._to_pb.call_count == 1 assert kwargs["retry"] is None def test_check_and_mutate_mutations_parsing(self): """mutations objects should be converted to protos""" from google.cloud.bigtable_v2.types import CheckAndMutateRowResponse - from google.cloud.bigtable.data.mutations import DeleteAllFromRow + from google.cloud.bigtable.data.mutations import DeleteAllFromFamily mutations = [mock.Mock() for _ in range(5)] for idx, mutation in enumerate(mutations): - mutation._to_pb.return_value = f"fake {idx}" + mutation._to_pb.return_value = DeleteAllFromFamily(f"fake {idx}")._to_pb() mutations.append(DeleteAllFromRow()) with self._make_client() as client: with client.get_table("instance", "table") as table: @@ -2439,11 +2592,15 @@ def test_check_and_mutate_mutations_parsing(self): false_case_mutations=mutations[2:], ) kwargs = mock_gapic.call_args[1] - assert kwargs["true_mutations"] == ["fake 0", "fake 1"] - assert kwargs["false_mutations"] == [ - "fake 2", - "fake 3", - "fake 4", + request = kwargs["request"] + assert request.true_mutations == [ + DeleteAllFromFamily("fake 0")._to_pb(), + DeleteAllFromFamily("fake 1")._to_pb(), + ] + assert request.false_mutations == [ + DeleteAllFromFamily("fake 2")._to_pb(), + DeleteAllFromFamily("fake 3")._to_pb(), + DeleteAllFromFamily("fake 4")._to_pb(), DeleteAllFromRow()._to_pb(), ] assert all( @@ -2486,7 +2643,8 @@ def test_read_modify_write_call_rule_args(self, call_rules, expected_rules): table.read_modify_write_row("key", call_rules) assert mock_gapic.call_count == 1 found_kwargs = mock_gapic.call_args_list[0][1] - assert found_kwargs["rules"] == expected_rules + request = found_kwargs["request"] + assert request.rules == expected_rules assert found_kwargs["retry"] is None @pytest.mark.parametrize("rules", [[], None]) @@ -2507,15 +2665,16 @@ def test_read_modify_write_call_defaults(self): with mock.patch.object( client._gapic_client, "read_modify_write_row" ) as mock_gapic: - table.read_modify_write_row(row_key, mock.Mock()) + table.read_modify_write_row(row_key, IncrementRule("f", "q")) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0][1] + request = kwargs["request"] assert ( - kwargs["table_name"] + request.table_name == f"projects/{project}/instances/{instance}/tables/{table_id}" ) - assert kwargs["app_profile_id"] is None - assert kwargs["row_key"] == row_key.encode() + assert bool(request.app_profile_id) is False + assert request.row_key == row_key.encode() assert kwargs["timeout"] > 1 def test_read_modify_write_call_overrides(self): @@ -2530,12 +2689,15 @@ def test_read_modify_write_call_overrides(self): client._gapic_client, "read_modify_write_row" ) as mock_gapic: table.read_modify_write_row( - row_key, mock.Mock(), operation_timeout=expected_timeout + row_key, + IncrementRule("f", "q"), + operation_timeout=expected_timeout, ) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0][1] - assert kwargs["app_profile_id"] is profile_id - assert kwargs["row_key"] == row_key + request = kwargs["request"] + assert request.app_profile_id == profile_id + assert request.row_key == row_key assert kwargs["timeout"] == expected_timeout def test_read_modify_write_string_key(self): @@ -2545,10 +2707,11 @@ def test_read_modify_write_string_key(self): with mock.patch.object( client._gapic_client, "read_modify_write_row" ) as mock_gapic: - table.read_modify_write_row(row_key, mock.Mock()) + table.read_modify_write_row(row_key, IncrementRule("f", "q")) assert mock_gapic.call_count == 1 kwargs = mock_gapic.call_args_list[0][1] - assert kwargs["row_key"] == row_key.encode() + request = kwargs["request"] + assert request.row_key == row_key.encode() def test_read_modify_write_row_building(self): """results from gapic call should be used to construct row""" @@ -2564,7 +2727,7 @@ def test_read_modify_write_row_building(self): ) as mock_gapic: with mock.patch.object(Row, "_from_pb") as constructor_mock: mock_gapic.return_value = mock_response - table.read_modify_write_row("key", mock.Mock()) + table.read_modify_write_row("key", IncrementRule("f", "q")) assert constructor_mock.call_count == 1 constructor_mock.assert_called_once_with(mock_response.row) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py index 59ea621ac3d3..72db64146900 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_mutations_batcher.py @@ -22,6 +22,8 @@ import google.api_core.exceptions as core_exceptions import google.api_core.retry from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete +from google.cloud.bigtable.data.mutations import RowMutationEntry +from google.cloud.bigtable.data.mutations import DeleteAllFromRow from google.cloud.bigtable.data import TABLE_DEFAULT from google.cloud.bigtable.data._cross_sync import CrossSync @@ -36,9 +38,9 @@ def _make_one(self, max_mutation_count=10, max_mutation_bytes=100): @staticmethod def _make_mutation(count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] + mutation.size = lambda: size return mutation def test_ctor(self): @@ -258,6 +260,8 @@ def _make_one(self, table=None, **kwargs): if table is None: table = mock.Mock() + table._request_path = {"table_name": "table"} + table.app_profile_id = None table.default_mutate_rows_operation_timeout = 10 table.default_mutate_rows_attempt_timeout = 10 table.default_mutate_rows_retryable_errors = ( @@ -268,9 +272,9 @@ def _make_one(self, table=None, **kwargs): @staticmethod def _make_mutation(count=1, size=1): - mutation = mock.Mock() - mutation.size.return_value = size - mutation.mutations = [mock.Mock()] * count + mutation = RowMutationEntry("k", DeleteAllFromRow()) + mutation.size = lambda: size + mutation.mutations = [DeleteAllFromRow() for _ in range(count)] return mutation def test_ctor_defaults(self): @@ -284,7 +288,7 @@ def test_ctor_defaults(self): table.default_mutate_rows_attempt_timeout = 8 table.default_mutate_rows_retryable_errors = [Exception] with self._make_one(table) as instance: - assert instance._table == table + assert instance._target == table assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -341,7 +345,7 @@ def test_ctor_explicit(self): batch_attempt_timeout=attempt_timeout, batch_retryable_errors=retryable_errors, ) as instance: - assert instance._table == table + assert instance._target == table assert instance.closed is False assert instance._flush_jobs == set() assert len(instance._staged_entries) == 0 @@ -387,7 +391,7 @@ def test_ctor_no_flush_limits(self): flush_limit_mutation_count=flush_limit_count, flush_limit_bytes=flush_limit_bytes, ) as instance: - assert instance._table == table + assert instance._target == table assert instance.closed is False assert instance._staged_entries == [] assert len(instance._oldest_exceptions) == 0 @@ -783,10 +787,10 @@ def test_timer_flush_end_to_end(self): num_mutations = 10 mutations = [self._make_mutation(count=2, size=2)] * num_mutations with self._make_one(flush_interval=0.05) as instance: - instance._table.default_operation_timeout = 10 - instance._table.default_attempt_timeout = 9 + instance._target.default_operation_timeout = 10 + instance._target.default_attempt_timeout = 9 with mock.patch.object( - instance._table.client._gapic_client, "mutate_rows" + instance._target.client._gapic_client, "mutate_rows" ) as gapic_mock: gapic_mock.side_effect = ( lambda *args, **kwargs: self._mock_gapic_return(num_mutations) diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py b/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py index 492d35ddf0fc..d4623a6c8c84 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py +++ b/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py @@ -19,6 +19,9 @@ import re from difflib import unified_diff +if sys.version_info < (3, 9): + pytest.skip("ast.unparse is only available in 3.9+", allow_module_level=True) + # add cross_sync to path test_dir_name = os.path.dirname(__file__) repo_root = os.path.join(test_dir_name, "..", "..", "..") @@ -48,9 +51,6 @@ def test_found_files(): ), "test proxy handler not found" -@pytest.mark.skipif( - sys.version_info < (3, 9), reason="ast.unparse is only available in 3.9+" -) @pytest.mark.parametrize("sync_file", sync_files, ids=lambda f: f.output_path) def test_sync_up_to_date(sync_file): """ From 62c78652ceb19cf01f85a9357353520cf562bb13 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 22 May 2025 20:58:30 +0200 Subject: [PATCH 869/892] chore(deps): update all dependencies (#1031) --- .../google-cloud-bigtable/samples/beam/requirements.txt | 6 +++--- .../google-cloud-bigtable/samples/hello/requirements.txt | 4 ++-- .../samples/hello_happybase/requirements.txt | 2 +- .../samples/instanceadmin/requirements.txt | 2 +- .../samples/metricscaler/requirements-test.txt | 2 +- .../samples/metricscaler/requirements.txt | 4 ++-- .../samples/quickstart/requirements.txt | 2 +- .../samples/quickstart_happybase/requirements.txt | 2 +- .../samples/snippets/data_client/requirements.txt | 2 +- .../samples/snippets/deletes/requirements.txt | 2 +- .../samples/snippets/filters/requirements.txt | 2 +- .../samples/snippets/reads/requirements.txt | 2 +- .../samples/snippets/writes/requirements.txt | 2 +- .../samples/tableadmin/requirements-test.txt | 2 +- .../samples/tableadmin/requirements.txt | 2 +- 15 files changed, 19 insertions(+), 19 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 9010a422b9f2..55b3ae719e62 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.57.0 -google-cloud-bigtable==2.25.0 -google-cloud-core==2.4.1 +apache-beam==2.65.0 +google-cloud-bigtable==2.30.1 +google-cloud-core==2.4.3 diff --git a/packages/google-cloud-bigtable/samples/hello/requirements.txt b/packages/google-cloud-bigtable/samples/hello/requirements.txt index 9a665c3be4d8..55d3a1ddd972 100644 --- a/packages/google-cloud-bigtable/samples/hello/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.25.0 -google-cloud-core==2.4.1 +google-cloud-bigtable==2.30.1 +google-cloud-core==2.4.3 diff --git a/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt b/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt index d3368cd0f872..dc1a04f30378 100644 --- a/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt +++ b/packages/google-cloud-bigtable/samples/hello_happybase/requirements.txt @@ -1,2 +1,2 @@ google-cloud-happybase==0.33.0 -six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 +six==1.17.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt index bb8b24a679fe..a2922fe6e743 100644 --- a/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.25.0 +google-cloud-bigtable==2.30.1 backoff==2.2.1 diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt index 13d73437842a..d11108b81f7c 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ pytest -mock==5.1.0 +mock==5.2.0 google-cloud-testutils diff --git a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt index 9136f4763d34..522c28ae22fd 100644 --- a/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt +++ b/packages/google-cloud-bigtable/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.25.0 -google-cloud-monitoring==2.22.2 +google-cloud-bigtable==2.30.1 +google-cloud-monitoring==2.27.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt index 3760ce41505d..807132c7e633 100644 --- a/packages/google-cloud-bigtable/samples/quickstart/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.25.0 +google-cloud-bigtable==2.30.1 diff --git a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt index d3368cd0f872..dc1a04f30378 100644 --- a/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt +++ b/packages/google-cloud-bigtable/samples/quickstart_happybase/requirements.txt @@ -1,2 +1,2 @@ google-cloud-happybase==0.33.0 -six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 +six==1.17.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt index 3760ce41505d..807132c7e633 100644 --- a/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/data_client/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.25.0 +google-cloud-bigtable==2.30.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt index 3760ce41505d..807132c7e633 100644 --- a/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/deletes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.25.0 +google-cloud-bigtable==2.30.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt index 3760ce41505d..807132c7e633 100644 --- a/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/filters/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.25.0 +google-cloud-bigtable==2.30.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt index 3760ce41505d..807132c7e633 100644 --- a/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/reads/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.25.0 +google-cloud-bigtable==2.30.1 diff --git a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt index 82d7fad3307c..874788bf7d99 100644 --- a/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt +++ b/packages/google-cloud-bigtable/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.25.0 \ No newline at end of file +google-cloud-bigtable==2.30.1 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt index a4c9e9c0b59c..d8889022d905 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ pytest -google-cloud-testutils==1.4.0 +google-cloud-testutils==1.6.4 diff --git a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt index 3760ce41505d..807132c7e633 100644 --- a/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt +++ b/packages/google-cloud-bigtable/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.25.0 +google-cloud-bigtable==2.30.1 From 986c87acdd1325e55a3f2b1bd9a5809697bdb85e Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 22 May 2025 14:45:26 -0700 Subject: [PATCH 870/892] chore(main): release 2.31.0 (#1121) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 14 ++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 570ecf862db2..90999b77509c 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.30.1" + ".": "2.31.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index f55767795ee3..9a0b2e013e2a 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,20 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.31.0](https://github.com/googleapis/python-bigtable/compare/v2.30.1...v2.31.0) (2025-05-22) + + +### Features + +* Add deletion_protection support for LVs ([#1108](https://github.com/googleapis/python-bigtable/issues/1108)) ([c6d384d](https://github.com/googleapis/python-bigtable/commit/c6d384d4a104c182326e22dc3f10b7b905780dee)) +* Support authorized views ([#1034](https://github.com/googleapis/python-bigtable/issues/1034)) ([97a0198](https://github.com/googleapis/python-bigtable/commit/97a019833d82e617769c56761aa5548d3ab896b9)) +* Throw better error on invalid metadata response ([#1107](https://github.com/googleapis/python-bigtable/issues/1107)) ([2642317](https://github.com/googleapis/python-bigtable/commit/2642317077b723ca8fd62aa86322b524868c2c4d)) + + +### Bug Fixes + +* Re-add py-typed file for bigtable package ([#1085](https://github.com/googleapis/python-bigtable/issues/1085)) ([0c322c7](https://github.com/googleapis/python-bigtable/commit/0c322c79ecbe4cde3e79d8e83ac655a978d07877)) + ## [2.30.1](https://github.com/googleapis/python-bigtable/compare/v2.30.0...v2.30.1) (2025-04-17) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 8202296bffca..8ab09c42e9c1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.1" # {x-release-please-version} +__version__ = "2.31.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 8202296bffca..8ab09c42e9c1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.1" # {x-release-please-version} +__version__ = "2.31.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 8202296bffca..8ab09c42e9c1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.1" # {x-release-please-version} +__version__ = "2.31.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 8202296bffca..8ab09c42e9c1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.30.1" # {x-release-please-version} +__version__ = "2.31.0" # {x-release-please-version} From 91a55726bc69038c2208a4013a2963f475fee320 Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Thu, 12 Jun 2025 15:50:16 -0400 Subject: [PATCH 871/892] feat: Implement SQL support in test proxy (#1106) --- .../handlers/client_handler_data_async.py | 114 +++++-- .../client_handler_data_sync_autogen.py | 41 +++ .../test_proxy/handlers/grpc_handler.py | 71 +++- .../handlers/helpers/sql_encoding_helpers.py | 183 +++++++++++ .../test_proxy/protos/bigtable_pb2.py | 306 +++++++++++------- .../test_proxy/protos/bigtable_pb2_grpc.py | 271 +++++++++++++--- .../test_proxy/protos/data_pb2.py | 141 +++++--- .../test_proxy/protos/data_pb2_grpc.py | 20 ++ .../test_proxy/protos/test_proxy_pb2.py | 123 ++++--- .../test_proxy/protos/test_proxy_pb2_grpc.py | 245 +++++++++++--- .../test_proxy/protos/types_pb2.py | 92 ++++++ .../test_proxy/protos/types_pb2_grpc.py | 24 ++ 12 files changed, 1300 insertions(+), 331 deletions(-) create mode 100644 packages/google-cloud-bigtable/test_proxy/handlers/helpers/sql_encoding_helpers.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/types_pb2.py create mode 100644 packages/google-cloud-bigtable/test_proxy/protos/types_pb2_grpc.py diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py index 49539c1aa2c5..85ef2c7d4f37 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py @@ -19,6 +19,7 @@ from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.data import BigtableDataClientAsync from google.cloud.bigtable.data._cross_sync import CrossSync +from helpers import sql_encoding_helpers if not CrossSync.is_async: from client_handler_data_async import error_safe @@ -32,6 +33,7 @@ def error_safe(func): Catch and pass errors back to the grpc_server_process Also check if client is closed before processing requests """ + async def wrapper(self, *args, **kwargs): try: if self.closed: @@ -50,6 +52,7 @@ def encode_exception(exc): Encode an exception or chain of exceptions to pass back to grpc_handler """ from google.api_core.exceptions import GoogleAPICallError + error_msg = f"{type(exc).__name__}: {exc}" result = {"error": error_msg} if exc.__cause__: @@ -113,7 +116,9 @@ async def ReadRows(self, request, **kwargs): table_id = request.pop("table_name").split("/")[-1] app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) - kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) result_list = CrossSync.rm_aio(await table.read_rows(request, **kwargs)) # pack results back into protobuf-parsable format serialized_response = [row._to_dict() for row in result_list] @@ -124,7 +129,9 @@ async def ReadRow(self, row_key, **kwargs): table_id = kwargs.pop("table_name").split("/")[-1] app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) - kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) result_row = CrossSync.rm_aio(await table.read_row(row_key, **kwargs)) # pack results back into protobuf-parsable format if result_row: @@ -135,10 +142,13 @@ async def ReadRow(self, row_key, **kwargs): @error_safe async def MutateRow(self, request, **kwargs): from google.cloud.bigtable.data.mutations import Mutation + table_id = request["table_name"].split("/")[-1] app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) - kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) row_key = request["row_key"] mutations = [Mutation._from_dict(d) for d in request["mutations"]] CrossSync.rm_aio(await table.mutate_row(row_key, mutations, **kwargs)) @@ -147,21 +157,29 @@ async def MutateRow(self, request, **kwargs): @error_safe async def BulkMutateRows(self, request, **kwargs): from google.cloud.bigtable.data.mutations import RowMutationEntry + table_id = request["table_name"].split("/")[-1] app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) - kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 - entry_list = [RowMutationEntry._from_dict(entry) for entry in request["entries"]] + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + entry_list = [ + RowMutationEntry._from_dict(entry) for entry in request["entries"] + ] CrossSync.rm_aio(await table.bulk_mutate_rows(entry_list, **kwargs)) return "OK" @error_safe async def CheckAndMutateRow(self, request, **kwargs): from google.cloud.bigtable.data.mutations import Mutation, SetCell + table_id = request["table_name"].split("/")[-1] app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) - kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) row_key = request["row_key"] # add default values for incomplete dicts, so they can still be parsed to objects true_mutations = [] @@ -180,33 +198,44 @@ async def CheckAndMutateRow(self, request, **kwargs): # invalid mutation type. Conformance test may be sending generic empty request false_mutations.append(SetCell("", "", "", 0)) predicate_filter = request.get("predicate_filter", None) - result = CrossSync.rm_aio(await table.check_and_mutate_row( - row_key, - predicate_filter, - true_case_mutations=true_mutations, - false_case_mutations=false_mutations, - **kwargs, - )) + result = CrossSync.rm_aio( + await table.check_and_mutate_row( + row_key, + predicate_filter, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + **kwargs, + ) + ) return result @error_safe async def ReadModifyWriteRow(self, request, **kwargs): from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + table_id = request["table_name"].split("/")[-1] app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) - kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) row_key = request["row_key"] rules = [] for rule_dict in request.get("rules", []): qualifier = rule_dict["column_qualifier"] if "append_value" in rule_dict: - new_rule = AppendValueRule(rule_dict["family_name"], qualifier, rule_dict["append_value"]) + new_rule = AppendValueRule( + rule_dict["family_name"], qualifier, rule_dict["append_value"] + ) else: - new_rule = IncrementRule(rule_dict["family_name"], qualifier, rule_dict["increment_amount"]) + new_rule = IncrementRule( + rule_dict["family_name"], qualifier, rule_dict["increment_amount"] + ) rules.append(new_rule) - result = CrossSync.rm_aio(await table.read_modify_write_row(row_key, rules, **kwargs)) + result = CrossSync.rm_aio( + await table.read_modify_write_row(row_key, rules, **kwargs) + ) # pack results back into protobuf-parsable format if result: return result._to_dict() @@ -218,6 +247,55 @@ async def SampleRowKeys(self, request, **kwargs): table_id = request["table_name"].split("/")[-1] app_profile_id = self.app_profile_id or request.get("app_profile_id", None) table = self.client.get_table(self.instance_id, table_id, app_profile_id) - kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + kwargs["operation_timeout"] = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) result = CrossSync.rm_aio(await table.sample_row_keys(**kwargs)) return result + + @error_safe + async def ExecuteQuery(self, request, **kwargs): + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + query = request.get("query") + params = request.get("params") or {} + # Note that the request has been coverted to json, and the code for this converts + # query param names to snake case. convert_params reverses this conversion. For this + # reason, snake case params will have issues if they're used in the conformance tests. + formatted_params, parameter_types = sql_encoding_helpers.convert_params(params) + operation_timeout = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result = CrossSync.rm_aio( + await self.client.execute_query( + query, + self.instance_id, + parameters=formatted_params, + parameter_types=parameter_types, + app_profile_id=app_profile_id, + operation_timeout=operation_timeout, + prepare_operation_timeout=operation_timeout, + ) + ) + rows = [r async for r in result] + md = result.metadata + proto_rows = [] + for r in rows: + vals = [] + for c in md.columns: + vals.append(sql_encoding_helpers.convert_value(c.column_type, r[c.column_name])) + + proto_rows.append({"values": vals}) + + proto_columns = [] + for c in md.columns: + proto_columns.append( + { + "name": c.column_name, + "type": sql_encoding_helpers.convert_type(c.column_type), + } + ) + + return { + "metadata": {"columns": proto_columns}, + "rows": proto_rows, + } diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py index eabae0ffa2a7..4a680cc8cea0 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py @@ -20,6 +20,7 @@ import os from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.cloud.bigtable.data._cross_sync import CrossSync +from helpers import sql_encoding_helpers from client_handler_data_async import error_safe @@ -183,3 +184,43 @@ async def SampleRowKeys(self, request, **kwargs): ) result = table.sample_row_keys(**kwargs) return result + + @error_safe + async def ExecuteQuery(self, request, **kwargs): + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + query = request.get("query") + params = request.get("params") or {} + (formatted_params, parameter_types) = sql_encoding_helpers.convert_params( + params + ) + operation_timeout = ( + kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + ) + result = self.client.execute_query( + query, + self.instance_id, + parameters=formatted_params, + parameter_types=parameter_types, + app_profile_id=app_profile_id, + operation_timeout=operation_timeout, + prepare_operation_timeout=operation_timeout, + ) + rows = [r async for r in result] + md = result.metadata + proto_rows = [] + for r in rows: + vals = [] + for c in md.columns: + vals.append( + sql_encoding_helpers.convert_value(c.column_type, r[c.column_name]) + ) + proto_rows.append({"values": vals}) + proto_columns = [] + for c in md.columns: + proto_columns.append( + { + "name": c.column_name, + "type": sql_encoding_helpers.convert_type(c.column_type), + } + ) + return {"metadata": {"columns": proto_columns}, "rows": proto_rows} diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py b/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py index 2c70778ddedd..5dc7aa0908ed 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py @@ -1,4 +1,3 @@ - import time import test_proxy_pb2 @@ -59,7 +58,6 @@ def wrapper(self, request, context, **kwargs): return wrapper - @delegate_to_client_handler def CreateClient(self, request, context, client_response=None): return test_proxy_pb2.CreateClientResponse() @@ -80,7 +78,7 @@ def ReadRows(self, request, context, client_response=None): status = Status(code=5, message=client_response["error"]) else: rows = [data_pb2.Row(**d) for d in client_response] - result = test_proxy_pb2.RowsResult(row=rows, status=status) + result = test_proxy_pb2.RowsResult(rows=rows, status=status) return result @delegate_to_client_handler @@ -88,7 +86,10 @@ def ReadRow(self, request, context, client_response=None): status = Status() row = None if isinstance(client_response, dict) and "error" in client_response: - status=Status(code=client_response.get("code", 5), message=client_response.get("error")) + status = Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) elif client_response != "None": row = data_pb2.Row(**client_response) result = test_proxy_pb2.RowResult(row=row, status=status) @@ -98,7 +99,9 @@ def ReadRow(self, request, context, client_response=None): def MutateRow(self, request, context, client_response=None): status = Status() if isinstance(client_response, dict) and "error" in client_response: - status = Status(code=client_response.get("code", 5), message=client_response["error"]) + status = Status( + code=client_response.get("code", 5), message=client_response["error"] + ) return test_proxy_pb2.MutateRowResult(status=status) @delegate_to_client_handler @@ -106,22 +109,36 @@ def BulkMutateRows(self, request, context, client_response=None): status = Status() entries = [] if isinstance(client_response, dict) and "error" in client_response: - entries = [bigtable_pb2.MutateRowsResponse.Entry(index=exc_dict.get("index",1), status=Status(code=exc_dict.get("code", 5))) for exc_dict in client_response.get("subexceptions", [])] + entries = [ + bigtable_pb2.MutateRowsResponse.Entry( + index=exc_dict.get("index", 1), + status=Status(code=exc_dict.get("code", 5)), + ) + for exc_dict in client_response.get("subexceptions", []) + ] if not entries: # only return failure on the overall request if there are failed entries - status = Status(code=client_response.get("code", 5), message=client_response["error"]) - # TODO: protos were updated. entry is now entries: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3 - response = test_proxy_pb2.MutateRowsResult(status=status, entry=entries) + status = Status( + code=client_response.get("code", 5), + message=client_response["error"], + ) + response = test_proxy_pb2.MutateRowsResult(status=status, entries=entries) return response @delegate_to_client_handler def CheckAndMutateRow(self, request, context, client_response=None): if isinstance(client_response, dict) and "error" in client_response: - status = Status(code=client_response.get("code", 5), message=client_response["error"]) + status = Status( + code=client_response.get("code", 5), message=client_response["error"] + ) response = test_proxy_pb2.CheckAndMutateRowResult(status=status) else: - result = bigtable_pb2.CheckAndMutateRowResponse(predicate_matched=client_response) - response = test_proxy_pb2.CheckAndMutateRowResult(result=result, status=Status()) + result = bigtable_pb2.CheckAndMutateRowResponse( + predicate_matched=client_response + ) + response = test_proxy_pb2.CheckAndMutateRowResult( + result=result, status=Status() + ) return response @delegate_to_client_handler @@ -129,7 +146,10 @@ def ReadModifyWriteRow(self, request, context, client_response=None): status = Status() row = None if isinstance(client_response, dict) and "error" in client_response: - status = Status(code=client_response.get("code", 5), message=client_response.get("error")) + status = Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) elif client_response != "None": row = data_pb2.Row(**client_response) result = test_proxy_pb2.RowResult(row=row, status=status) @@ -140,9 +160,26 @@ def SampleRowKeys(self, request, context, client_response=None): status = Status() sample_list = [] if isinstance(client_response, dict) and "error" in client_response: - status = Status(code=client_response.get("code", 5), message=client_response.get("error")) + status = Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) else: for sample in client_response: - sample_list.append(bigtable_pb2.SampleRowKeysResponse(offset_bytes=sample[1], row_key=sample[0])) - # TODO: protos were updated. sample is now samples: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3 - return test_proxy_pb2.SampleRowKeysResult(status=status, sample=sample_list) + sample_list.append( + bigtable_pb2.SampleRowKeysResponse( + offset_bytes=sample[1], row_key=sample[0] + ) + ) + return test_proxy_pb2.SampleRowKeysResult(status=status, samples=sample_list) + + @delegate_to_client_handler + def ExecuteQuery(self, request, context, client_response=None): + if isinstance(client_response, dict) and "error" in client_response: + return test_proxy_pb2.ExecuteQueryResult( + status=Status(code=13, message=client_response["error"]) + ) + else: + return test_proxy_pb2.ExecuteQueryResult( + metadata=client_response["metadata"], rows=client_response["rows"] + ) diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/helpers/sql_encoding_helpers.py b/packages/google-cloud-bigtable/test_proxy/handlers/helpers/sql_encoding_helpers.py new file mode 100644 index 000000000000..9640ae3fd8a1 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/handlers/helpers/sql_encoding_helpers.py @@ -0,0 +1,183 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains helpers for handling sql data types for the test proxy. +""" +from datetime import date +from typing import Any + +from google.api_core.datetime_helpers import DatetimeWithNanoseconds +from google.cloud.bigtable.data.execute_query.metadata import SqlType + + +PRIMITIVE_TYPE_MAPPING = { + "bytes_type": SqlType.Bytes(), + "string_type": SqlType.String(), + "int64_type": SqlType.Int64(), + "float32_type": SqlType.Float32(), + "float64_type": SqlType.Float64(), + "bool_type": SqlType.Bool(), + "timestamp_type": SqlType.Timestamp(), + "date_type": SqlType.Date(), +} + +PRIMITIVE_VALUE_FIELDS = [ + "bytes_value", + "string_value", + "int_value", + "float_value", + "bool_value", +] + + +def snake_to_camel(snake_string): + """ + Used to convert query parameter names back to camel case. This needs to be handled + specifically because the python test proxy converts all keys to snake case when it + converts proto messages to dicts. + """ + components = snake_string.split("_") + return components[0] + "".join(x.title() for x in components[1:]) + + +def convert_value(type: SqlType, val: Any): + """ + Converts python value to a dict representation of a protobuf Value message. + """ + if val is None: + return {} + elif isinstance(type, SqlType.Date): + return {"date_value": val} + elif isinstance(type, SqlType.Map): + key_type = type.key_type + val_type = type.value_type + results = [] + for k, v in val.items(): + results.append( + { + "array_value": { + "values": [ + convert_value(key_type, k), + convert_value(val_type, v), + ] + } + } + ) + return {"array_value": {"values": results}} + elif isinstance(type, SqlType.Struct): + results = [] + for i, (_, field_val) in enumerate(val.fields): + results.append(convert_value(type[i], field_val)) + return {"array_value": {"values": results}} + elif isinstance(type, SqlType.Array): + elem_type = type.element_type + results = [] + for e in val: + results.append(convert_value(elem_type, e)) + return {"array_value": {"values": results}} + else: + return type._to_value_pb_dict(val) + + +def convert_type(type: SqlType): + if isinstance(type, SqlType.Map): + return { + "map_type": { + "key_type": convert_type(type.key_type), + "value_type": convert_type(type.value_type), + } + } + elif isinstance(type, SqlType.Struct): + fields = [] + for field_name, field_type in type.fields: + fields.append({"field_name": field_name, "type": convert_type(field_type)}) + return {"struct_type": {"fields": fields}} + elif isinstance(type, SqlType.Array): + return {"array_type": {"element_type": convert_type(type.element_type)}} + else: + return type._to_type_pb_dict() + + +def to_sql_type(proto_type_dict): + if len(proto_type_dict.keys()) != 1: + raise ValueError("Invalid type: ", proto_type_dict) + type_field = list(proto_type_dict.keys())[0] + if type_field in PRIMITIVE_TYPE_MAPPING: + return PRIMITIVE_TYPE_MAPPING[type_field] + elif type_field == "array_type": + elem_type_dict = proto_type_dict["array_type"]["element_type"] + return SqlType.Array(to_sql_type(elem_type_dict)) + else: + raise ValueError("Invalid query parameter type: ", proto_type_dict) + + +def convert_to_python_value(proto_val: Any, sql_type: SqlType): + """ + Converts the given dict representation of a proto Value message to the correct + python value. This is used to convert query params to the represetation expected + from users. We can't reuse existing parsers because they expect actual proto messages + rather than dicts. + """ + value_field = sql_type.value_pb_dict_field_name + if isinstance(sql_type, SqlType.Array): + if "array_value" not in proto_val: + return None + elem_type = sql_type.element_type + return [ + convert_to_python_value(v, elem_type) + for v in proto_val["array_value"]["values"] + ] + if value_field and value_field not in proto_val: + return None + if value_field in PRIMITIVE_VALUE_FIELDS: + return proto_val[value_field] + if isinstance(sql_type, SqlType.Timestamp): + if "timestamp_value" not in proto_val: + return None + return DatetimeWithNanoseconds.from_rfc3339(proto_val["timestamp_value"]) + if isinstance(sql_type, SqlType.Date): + if "date_value" not in proto_val: + return None + return date( + year=proto_val["date_value"]["year"], + month=proto_val["date_value"]["month"], + day=proto_val["date_value"]["day"], + ) + raise ValueError("Unexpected parameter: %s, %s", proto_val, sql_type) + + +def convert_params(request_params): + """ + Converts the given dictionary of parameters to a python representation. + This converts parameter names from snake to camel case and protobuf Value dicts + to python values. + """ + python_params = {} + param_types = {} + for param_key, param_value in request_params.items(): + if "type" not in param_value: + raise ValueError("type must be set for query params") + + sql_type = to_sql_type(param_value["type"]) + readjusted_param_name = snake_to_camel(param_key) + param_types[readjusted_param_name] = sql_type + if len(param_value.keys()) == 1: + # this means type is set and nothing else + python_params[readjusted_param_name] = None + elif len(param_value) > 2: + raise ValueError("Unexpected Value format: ", param_value) + python_params[readjusted_param_name] = convert_to_python_value( + param_value, sql_type + ) + return python_params, param_types diff --git a/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py index 936a4ed55332..edc90c3ecd80 100644 --- a/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py +++ b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2.py @@ -1,11 +1,22 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE # source: google/bigtable/v2/bigtable.proto +# Protobuf Python Version: 5.29.0 """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'google/bigtable/v2/bigtable.proto' +) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,128 +29,187 @@ from google.api import routing_pb2 as google_dot_api_dot_routing__pb2 import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 import request_stats_pb2 as google_dot_bigtable_dot_v2_dot_request__stats__pb2 +import types_pb2 as google_dot_bigtable_dot_v2_dot_types__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x90\x03\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"n\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\xb6\x01\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xfe\x01\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xae\x02\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\xc6\x01\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xeb\t\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a{\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationTokenB\x0f\n\rstream_record2\xd7\x18\n\x08\x42igtable\x12\x9b\x02\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\xc1\x01\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xac\x02\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\xc3\x01\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xc1\x02\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xe6\x01\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xb3\x02\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\xd3\x01\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xad\x03\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\xba\x02\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x04name\xda\x41\x13name,app_profile_id\x12\xdd\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xe7\x01\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xeb\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.bigtable_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/bigtable/v2/types.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\xcc\x04\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\t \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12U\n\x16materialized_view_name\x18\x0b \x01(\tB5\xe0\x41\x01\xfa\x41/\n-bigtableadmin.googleapis.com/MaterializedView\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\x12\x10\n\x08reversed\x18\x07 \x01(\x08\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"\x98\x02\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x04 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12U\n\x16materialized_view_name\x18\x05 \x01(\tB5\xe0\x41\x01\xfa\x41/\n-bigtableadmin.googleapis.com/MaterializedView\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\x89\x02\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x06 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xd1\x02\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x05 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\xe4\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x12?\n\x0frate_limit_info\x18\x03 \x01(\x0b\x32!.google.bigtable.v2.RateLimitInfoH\x00\x88\x01\x01\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.StatusB\x12\n\x10_rate_limit_info\"J\n\rRateLimitInfo\x12)\n\x06period\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x0e\n\x06\x66\x61\x63tor\x18\x02 \x01(\x01\"\x81\x03\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\t \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\x99\x02\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x01\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12Q\n\x14\x61uthorized_view_name\x18\x06 \x01(\tB3\xe0\x41\x01\xfa\x41-\n+bigtableadmin.googleapis.com/AuthorizedView\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xa9\n\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\xb8\x01\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x0enew_partitions\x18\x03 \x03(\x0b\x32#.google.bigtable.v2.StreamPartitionB\x0f\n\rstream_record\"\xa1\x03\n\x13\x45xecuteQueryRequest\x12\x44\n\rinstance_name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x14\n\x05query\x18\x03 \x01(\tB\x05\x18\x01\xe0\x41\x02\x12\x16\n\x0eprepared_query\x18\t \x01(\x0c\x12;\n\x0cproto_format\x18\x04 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoFormatB\x02\x18\x01H\x00\x12\x19\n\x0cresume_token\x18\x08 \x01(\x0c\x42\x03\xe0\x41\x01\x12H\n\x06params\x18\x07 \x03(\x0b\x32\x33.google.bigtable.v2.ExecuteQueryRequest.ParamsEntryB\x03\xe0\x41\x02\x1aH\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value:\x02\x38\x01\x42\r\n\x0b\x64\x61ta_format\"\x96\x01\n\x14\x45xecuteQueryResponse\x12\x39\n\x08metadata\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.ResultSetMetadataH\x00\x12\x37\n\x07results\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.PartialResultSetH\x00\x42\n\n\x08response\"\xf4\x02\n\x13PrepareQueryRequest\x12\x44\n\rinstance_name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x1b\n\x0e\x61pp_profile_id\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x12\n\x05query\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x0cproto_format\x18\x04 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoFormatH\x00\x12Q\n\x0bparam_types\x18\x06 \x03(\x0b\x32\x37.google.bigtable.v2.PrepareQueryRequest.ParamTypesEntryB\x03\xe0\x41\x02\x1aK\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type:\x02\x38\x01\x42\r\n\x0b\x64\x61ta_format\"\x98\x01\n\x14PrepareQueryResponse\x12\x37\n\x08metadata\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.ResultSetMetadata\x12\x16\n\x0eprepared_query\x18\x02 \x01(\x0c\x12/\n\x0bvalid_until\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp2\xc9&\n\x08\x42igtable\x12\xdb\x03\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\x81\x03\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x9a\x01\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*ZZ\"U/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\xee\x03\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\x85\x03\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x9e\x01\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeysZ\\\x12Z/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\x82\x04\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xa7\x03\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x82\xd3\xe4\x93\x02\x9c\x01\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*Z[\"V/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xf5\x03\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\x95\x03\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id\x82\xd3\xe4\x93\x02\x9e\x01\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*Z\\\"W/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}0\x01\x12\xf6\x04\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\x83\x04\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x82\xd3\xe4\x93\x02\xac\x01\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*Zc\"^/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\xda\x41\x04name\xda\x41\x13name,app_profile_id\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\x12\xa7\x04\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xb1\x03\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x82\xd3\xe4\x93\x02\xae\x01\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*Zd\"_/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02\xb0\x01\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\x12`\n\x14\x61uthorized_view_name\x12H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*0\x01\x12\xa9\x02\n\x0cPrepareQuery\x12\'.google.bigtable.v2.PrepareQueryRequest\x1a(.google.bigtable.v2.PrepareQueryResponse\"\xc5\x01\xda\x41\x13instance_name,query\xda\x41\"instance_name,query,app_profile_id\x82\xd3\xe4\x93\x02<\"7/v2/{instance_name=projects/*/instances/*}:prepareQuery:\x01*\x8a\xd3\xe4\x93\x02\x42\x12.\n\rinstance_name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\x12\xab\x02\n\x0c\x45xecuteQuery\x12\'.google.bigtable.v2.ExecuteQueryRequest\x1a(.google.bigtable.v2.ExecuteQueryResponse\"\xc5\x01\xda\x41\x13instance_name,query\xda\x41\"instance_name,query,app_profile_id\x82\xd3\xe4\x93\x02<\"7/v2/{instance_name=projects/*/instances/*}:executeQuery:\x01*\x8a\xd3\xe4\x93\x02\x42\x12.\n\rinstance_name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xf5\x04\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}\xea\x41\x87\x01\n+bigtableadmin.googleapis.com/AuthorizedView\x12Xprojects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}\xea\x41~\n-bigtableadmin.googleapis.com/MaterializedView\x12Mprojects/{project}/instances/{instance}/materializedViews/{materialized_view}b\x06proto3') - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AP\n%bigtableadmin.googleapis.com/Instance\022\'projects/{project}/instances/{instance}\352A\\\n\"bigtableadmin.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}' - _READROWSREQUEST.fields_by_name['table_name']._options = None - _READROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._options = None - _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _MUTATEROWREQUEST.fields_by_name['table_name']._options = None - _MUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _MUTATEROWREQUEST.fields_by_name['row_key']._options = None - _MUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' - _MUTATEROWREQUEST.fields_by_name['mutations']._options = None - _MUTATEROWREQUEST.fields_by_name['mutations']._serialized_options = b'\340A\002' - _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._options = None - _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._serialized_options = b'\340A\002' - _MUTATEROWSREQUEST.fields_by_name['table_name']._options = None - _MUTATEROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _MUTATEROWSREQUEST.fields_by_name['entries']._options = None - _MUTATEROWSREQUEST.fields_by_name['entries']._serialized_options = b'\340A\002' - _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._options = None - _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._options = None - _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' - _PINGANDWARMREQUEST.fields_by_name['name']._options = None - _PINGANDWARMREQUEST.fields_by_name['name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' - _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._options = None - _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._options = None - _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' - _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._options = None - _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._serialized_options = b'\340A\002' - _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._options = None - _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _READCHANGESTREAMREQUEST.fields_by_name['table_name']._options = None - _READCHANGESTREAMREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' - _BIGTABLE._options = None - _BIGTABLE._serialized_options = b'\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only' - _BIGTABLE.methods_by_name['ReadRows']._options = None - _BIGTABLE.methods_by_name['ReadRows']._serialized_options = b'\202\323\344\223\002>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id' - _BIGTABLE.methods_by_name['SampleRowKeys']._options = None - _BIGTABLE.methods_by_name['SampleRowKeys']._serialized_options = b'\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id' - _BIGTABLE.methods_by_name['MutateRow']._options = None - _BIGTABLE.methods_by_name['MutateRow']._serialized_options = b'\202\323\344\223\002?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id' - _BIGTABLE.methods_by_name['MutateRows']._options = None - _BIGTABLE.methods_by_name['MutateRows']._serialized_options = b'\202\323\344\223\002@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\022table_name,entries\332A!table_name,entries,app_profile_id' - _BIGTABLE.methods_by_name['CheckAndMutateRow']._options = None - _BIGTABLE.methods_by_name['CheckAndMutateRow']._serialized_options = b'\202\323\344\223\002G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id' - _BIGTABLE.methods_by_name['PingAndWarm']._options = None - _BIGTABLE.methods_by_name['PingAndWarm']._serialized_options = b'\202\323\344\223\002+\"&/v2/{name=projects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id\332A\004name\332A\023name,app_profile_id' - _BIGTABLE.methods_by_name['ReadModifyWriteRow']._options = None - _BIGTABLE.methods_by_name['ReadModifyWriteRow']._serialized_options = b'\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\030table_name,row_key,rules\332A\'table_name,row_key,rules,app_profile_id' - _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._options = None - _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._serialized_options = b'\202\323\344\223\002[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\001*\332A\ntable_name\332A\031table_name,app_profile_id' - _BIGTABLE.methods_by_name['ReadChangeStream']._options = None - _BIGTABLE.methods_by_name['ReadChangeStream']._serialized_options = b'\202\323\344\223\002F\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\001*\332A\ntable_name\332A\031table_name,app_profile_id' - _READROWSREQUEST._serialized_start=392 - _READROWSREQUEST._serialized_end=792 - _READROWSREQUEST_REQUESTSTATSVIEW._serialized_start=690 - _READROWSREQUEST_REQUESTSTATSVIEW._serialized_end=792 - _READROWSRESPONSE._serialized_start=795 - _READROWSRESPONSE._serialized_end=1228 - _READROWSRESPONSE_CELLCHUNK._serialized_start=967 - _READROWSRESPONSE_CELLCHUNK._serialized_end=1228 - _SAMPLEROWKEYSREQUEST._serialized_start=1230 - _SAMPLEROWKEYSREQUEST._serialized_end=1340 - _SAMPLEROWKEYSRESPONSE._serialized_start=1342 - _SAMPLEROWKEYSRESPONSE._serialized_end=1404 - _MUTATEROWREQUEST._serialized_start=1407 - _MUTATEROWREQUEST._serialized_end=1589 - _MUTATEROWRESPONSE._serialized_start=1591 - _MUTATEROWRESPONSE._serialized_end=1610 - _MUTATEROWSREQUEST._serialized_start=1613 - _MUTATEROWSREQUEST._serialized_end=1867 - _MUTATEROWSREQUEST_ENTRY._serialized_start=1789 - _MUTATEROWSREQUEST_ENTRY._serialized_end=1867 - _MUTATEROWSRESPONSE._serialized_start=1870 - _MUTATEROWSRESPONSE._serialized_end=2013 - _MUTATEROWSRESPONSE_ENTRY._serialized_start=1955 - _MUTATEROWSRESPONSE_ENTRY._serialized_end=2013 - _CHECKANDMUTATEROWREQUEST._serialized_start=2016 - _CHECKANDMUTATEROWREQUEST._serialized_end=2318 - _CHECKANDMUTATEROWRESPONSE._serialized_start=2320 - _CHECKANDMUTATEROWRESPONSE._serialized_end=2374 - _PINGANDWARMREQUEST._serialized_start=2376 - _PINGANDWARMREQUEST._serialized_end=2481 - _PINGANDWARMRESPONSE._serialized_start=2483 - _PINGANDWARMRESPONSE._serialized_end=2504 - _READMODIFYWRITEROWREQUEST._serialized_start=2507 - _READMODIFYWRITEROWREQUEST._serialized_end=2705 - _READMODIFYWRITEROWRESPONSE._serialized_start=2707 - _READMODIFYWRITEROWRESPONSE._serialized_end=2773 - _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_start=2776 - _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_end=2910 - _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_start=2912 - _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_end=3015 - _READCHANGESTREAMREQUEST._serialized_start=3018 - _READCHANGESTREAMREQUEST._serialized_end=3429 - _READCHANGESTREAMRESPONSE._serialized_start=3432 - _READCHANGESTREAMRESPONSE._serialized_end=4691 - _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_start=3700 - _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_end=3944 - _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_start=3855 - _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_end=3944 - _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_start=3947 - _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_end=4401 - _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_start=4321 - _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_end=4401 - _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_start=4404 - _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_end=4549 - _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_start=4551 - _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_end=4674 - _BIGTABLE._serialized_start=4694 - _BIGTABLE._serialized_end=7853 +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.bigtable_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AP\n%bigtableadmin.googleapis.com/Instance\022\'projects/{project}/instances/{instance}\352A\\\n\"bigtableadmin.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}\352A\207\001\n+bigtableadmin.googleapis.com/AuthorizedView\022Xprojects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}\352A~\n-bigtableadmin.googleapis.com/MaterializedView\022Mprojects/{project}/instances/{instance}/materializedViews/{materialized_view}' + _globals['_READROWSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_READROWSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_READROWSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_READROWSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_READROWSREQUEST'].fields_by_name['materialized_view_name']._loaded_options = None + _globals['_READROWSREQUEST'].fields_by_name['materialized_view_name']._serialized_options = b'\340A\001\372A/\n-bigtableadmin.googleapis.com/MaterializedView' + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['materialized_view_name']._loaded_options = None + _globals['_SAMPLEROWKEYSREQUEST'].fields_by_name['materialized_view_name']._serialized_options = b'\340A\001\372A/\n-bigtableadmin.googleapis.com/MaterializedView' + _globals['_MUTATEROWREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_MUTATEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_MUTATEROWREQUEST'].fields_by_name['row_key']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002' + _globals['_MUTATEROWREQUEST'].fields_by_name['mutations']._loaded_options = None + _globals['_MUTATEROWREQUEST'].fields_by_name['mutations']._serialized_options = b'\340A\002' + _globals['_MUTATEROWSREQUEST_ENTRY'].fields_by_name['mutations']._loaded_options = None + _globals['_MUTATEROWSREQUEST_ENTRY'].fields_by_name['mutations']._serialized_options = b'\340A\002' + _globals['_MUTATEROWSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_MUTATEROWSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_MUTATEROWSREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_MUTATEROWSREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_MUTATEROWSREQUEST'].fields_by_name['entries']._loaded_options = None + _globals['_MUTATEROWSREQUEST'].fields_by_name['entries']._serialized_options = b'\340A\002' + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['row_key']._loaded_options = None + _globals['_CHECKANDMUTATEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002' + _globals['_PINGANDWARMREQUEST'].fields_by_name['name']._loaded_options = None + _globals['_PINGANDWARMREQUEST'].fields_by_name['name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\001\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['authorized_view_name']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['authorized_view_name']._serialized_options = b'\340A\001\372A-\n+bigtableadmin.googleapis.com/AuthorizedView' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['row_key']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['row_key']._serialized_options = b'\340A\002' + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['rules']._loaded_options = None + _globals['_READMODIFYWRITEROWREQUEST'].fields_by_name['rules']._serialized_options = b'\340A\002' + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_READCHANGESTREAMREQUEST'].fields_by_name['table_name']._loaded_options = None + _globals['_READCHANGESTREAMREQUEST'].fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_options = b'8\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['instance_name']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['instance_name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['app_profile_id']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['app_profile_id']._serialized_options = b'\340A\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['query']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['query']._serialized_options = b'\030\001\340A\002' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['proto_format']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['proto_format']._serialized_options = b'\030\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['resume_token']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['resume_token']._serialized_options = b'\340A\001' + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['params']._loaded_options = None + _globals['_EXECUTEQUERYREQUEST'].fields_by_name['params']._serialized_options = b'\340A\002' + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._loaded_options = None + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_options = b'8\001' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['instance_name']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['instance_name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['app_profile_id']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['app_profile_id']._serialized_options = b'\340A\001' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['query']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['query']._serialized_options = b'\340A\002' + _globals['_PREPAREQUERYREQUEST'].fields_by_name['param_types']._loaded_options = None + _globals['_PREPAREQUERYREQUEST'].fields_by_name['param_types']._serialized_options = b'\340A\002' + _globals['_BIGTABLE']._loaded_options = None + _globals['_BIGTABLE']._serialized_options = b'\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only' + _globals['_BIGTABLE'].methods_by_name['ReadRows']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ReadRows']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002\232\001\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*ZZ\"U/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['SampleRowKeys']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['SampleRowKeys']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002\236\001\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeysZ\\\022Z/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['MutateRow']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['MutateRow']._serialized_options = b'\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id\202\323\344\223\002\234\001\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*Z[\"V/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['MutateRows']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['MutateRows']._serialized_options = b'\332A\022table_name,entries\332A!table_name,entries,app_profile_id\202\323\344\223\002\236\001\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*Z\\\"W/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:mutateRows:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['CheckAndMutateRow']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['CheckAndMutateRow']._serialized_options = b'\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\202\323\344\223\002\254\001\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*Zc\"^/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:checkAndMutateRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['PingAndWarm']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['PingAndWarm']._serialized_options = b'\332A\004name\332A\023name,app_profile_id\202\323\344\223\002+\"&/v2/{name=projects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id' + _globals['_BIGTABLE'].methods_by_name['ReadModifyWriteRow']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ReadModifyWriteRow']._serialized_options = b'\332A\030table_name,row_key,rules\332A\'table_name,row_key,rules,app_profile_id\202\323\344\223\002\256\001\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*Zd\"_/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readModifyWriteRow:\001*\212\323\344\223\002\260\001\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\022`\n\024authorized_view_name\022H{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}' + _globals['_BIGTABLE'].methods_by_name['GenerateInitialChangeStreamPartitions']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['GenerateInitialChangeStreamPartitions']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\001*' + _globals['_BIGTABLE'].methods_by_name['ReadChangeStream']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ReadChangeStream']._serialized_options = b'\332A\ntable_name\332A\031table_name,app_profile_id\202\323\344\223\002F\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\001*' + _globals['_BIGTABLE'].methods_by_name['PrepareQuery']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['PrepareQuery']._serialized_options = b'\332A\023instance_name,query\332A\"instance_name,query,app_profile_id\202\323\344\223\002<\"7/v2/{instance_name=projects/*/instances/*}:prepareQuery:\001*\212\323\344\223\002B\022.\n\rinstance_name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id' + _globals['_BIGTABLE'].methods_by_name['ExecuteQuery']._loaded_options = None + _globals['_BIGTABLE'].methods_by_name['ExecuteQuery']._serialized_options = b'\332A\023instance_name,query\332A\"instance_name,query,app_profile_id\202\323\344\223\002<\"7/v2/{instance_name=projects/*/instances/*}:executeQuery:\001*\212\323\344\223\002B\022.\n\rinstance_name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id' + _globals['_READROWSREQUEST']._serialized_start=424 + _globals['_READROWSREQUEST']._serialized_end=1012 + _globals['_READROWSREQUEST_REQUESTSTATSVIEW']._serialized_start=910 + _globals['_READROWSREQUEST_REQUESTSTATSVIEW']._serialized_end=1012 + _globals['_READROWSRESPONSE']._serialized_start=1015 + _globals['_READROWSRESPONSE']._serialized_end=1448 + _globals['_READROWSRESPONSE_CELLCHUNK']._serialized_start=1187 + _globals['_READROWSRESPONSE_CELLCHUNK']._serialized_end=1448 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_start=1451 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_end=1731 + _globals['_SAMPLEROWKEYSRESPONSE']._serialized_start=1733 + _globals['_SAMPLEROWKEYSRESPONSE']._serialized_end=1795 + _globals['_MUTATEROWREQUEST']._serialized_start=1798 + _globals['_MUTATEROWREQUEST']._serialized_end=2063 + _globals['_MUTATEROWRESPONSE']._serialized_start=2065 + _globals['_MUTATEROWRESPONSE']._serialized_end=2084 + _globals['_MUTATEROWSREQUEST']._serialized_start=2087 + _globals['_MUTATEROWSREQUEST']._serialized_end=2424 + _globals['_MUTATEROWSREQUEST_ENTRY']._serialized_start=2346 + _globals['_MUTATEROWSREQUEST_ENTRY']._serialized_end=2424 + _globals['_MUTATEROWSRESPONSE']._serialized_start=2427 + _globals['_MUTATEROWSRESPONSE']._serialized_end=2655 + _globals['_MUTATEROWSRESPONSE_ENTRY']._serialized_start=2577 + _globals['_MUTATEROWSRESPONSE_ENTRY']._serialized_end=2635 + _globals['_RATELIMITINFO']._serialized_start=2657 + _globals['_RATELIMITINFO']._serialized_end=2731 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_start=2734 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_end=3119 + _globals['_CHECKANDMUTATEROWRESPONSE']._serialized_start=3121 + _globals['_CHECKANDMUTATEROWRESPONSE']._serialized_end=3175 + _globals['_PINGANDWARMREQUEST']._serialized_start=3177 + _globals['_PINGANDWARMREQUEST']._serialized_end=3282 + _globals['_PINGANDWARMRESPONSE']._serialized_start=3284 + _globals['_PINGANDWARMRESPONSE']._serialized_end=3305 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_start=3308 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_end=3589 + _globals['_READMODIFYWRITEROWRESPONSE']._serialized_start=3591 + _globals['_READMODIFYWRITEROWRESPONSE']._serialized_end=3657 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST']._serialized_start=3660 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST']._serialized_end=3794 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE']._serialized_start=3796 + _globals['_GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE']._serialized_end=3899 + _globals['_READCHANGESTREAMREQUEST']._serialized_start=3902 + _globals['_READCHANGESTREAMREQUEST']._serialized_end=4313 + _globals['_READCHANGESTREAMRESPONSE']._serialized_start=4316 + _globals['_READCHANGESTREAMRESPONSE']._serialized_end=5637 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK']._serialized_start=4584 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK']._serialized_end=4828 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO']._serialized_start=4739 + _globals['_READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO']._serialized_end=4828 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE']._serialized_start=4831 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE']._serialized_end=5285 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE_TYPE']._serialized_start=5205 + _globals['_READCHANGESTREAMRESPONSE_DATACHANGE_TYPE']._serialized_end=5285 + _globals['_READCHANGESTREAMRESPONSE_HEARTBEAT']._serialized_start=5288 + _globals['_READCHANGESTREAMRESPONSE_HEARTBEAT']._serialized_end=5433 + _globals['_READCHANGESTREAMRESPONSE_CLOSESTREAM']._serialized_start=5436 + _globals['_READCHANGESTREAMRESPONSE_CLOSESTREAM']._serialized_end=5620 + _globals['_EXECUTEQUERYREQUEST']._serialized_start=5640 + _globals['_EXECUTEQUERYREQUEST']._serialized_end=6057 + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_start=5970 + _globals['_EXECUTEQUERYREQUEST_PARAMSENTRY']._serialized_end=6042 + _globals['_EXECUTEQUERYRESPONSE']._serialized_start=6060 + _globals['_EXECUTEQUERYRESPONSE']._serialized_end=6210 + _globals['_PREPAREQUERYREQUEST']._serialized_start=6213 + _globals['_PREPAREQUERYREQUEST']._serialized_end=6585 + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_start=6495 + _globals['_PREPAREQUERYREQUEST_PARAMTYPESENTRY']._serialized_end=6570 + _globals['_PREPAREQUERYRESPONSE']._serialized_start=6588 + _globals['_PREPAREQUERYRESPONSE']._serialized_end=6740 + _globals['_BIGTABLE']._serialized_start=6743 + _globals['_BIGTABLE']._serialized_end=11680 # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py index 9ce87d8696fc..ef4e5bed679d 100644 --- a/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py +++ b/packages/google-cloud-bigtable/test_proxy/protos/bigtable_pb2_grpc.py @@ -1,9 +1,29 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc +import warnings import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2 +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in google/bigtable/v2/bigtable_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + class BigtableStub(object): """Service for reading from and writing to existing Bigtable tables. @@ -19,47 +39,57 @@ def __init__(self, channel): '/google.bigtable.v2.Bigtable/ReadRows', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, - ) + _registered_method=True) self.SampleRowKeys = channel.unary_stream( '/google.bigtable.v2.Bigtable/SampleRowKeys', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - ) + _registered_method=True) self.MutateRow = channel.unary_unary( '/google.bigtable.v2.Bigtable/MutateRow', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, - ) + _registered_method=True) self.MutateRows = channel.unary_stream( '/google.bigtable.v2.Bigtable/MutateRows', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, - ) + _registered_method=True) self.CheckAndMutateRow = channel.unary_unary( '/google.bigtable.v2.Bigtable/CheckAndMutateRow', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - ) + _registered_method=True) self.PingAndWarm = channel.unary_unary( '/google.bigtable.v2.Bigtable/PingAndWarm', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, - ) + _registered_method=True) self.ReadModifyWriteRow = channel.unary_unary( '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - ) + _registered_method=True) self.GenerateInitialChangeStreamPartitions = channel.unary_stream( '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, - ) + _registered_method=True) self.ReadChangeStream = channel.unary_stream( '/google.bigtable.v2.Bigtable/ReadChangeStream', request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, - ) + _registered_method=True) + self.PrepareQuery = channel.unary_unary( + '/google.bigtable.v2.Bigtable/PrepareQuery', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.FromString, + _registered_method=True) + self.ExecuteQuery = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ExecuteQuery', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.FromString, + _registered_method=True) class BigtableServicer(object): @@ -150,6 +180,20 @@ def ReadChangeStream(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def PrepareQuery(self, request, context): + """Prepares a GoogleSQL query for execution on a particular Bigtable instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ExecuteQuery(self, request, context): + """Executes a SQL query against a particular Bigtable instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_BigtableServicer_to_server(servicer, server): rpc_method_handlers = { @@ -198,10 +242,21 @@ def add_BigtableServicer_to_server(servicer, server): request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.FromString, response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.SerializeToString, ), + 'PrepareQuery': grpc.unary_unary_rpc_method_handler( + servicer.PrepareQuery, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.SerializeToString, + ), + 'ExecuteQuery': grpc.unary_stream_rpc_method_handler( + servicer.ExecuteQuery, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'google.bigtable.v2.Bigtable', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('google.bigtable.v2.Bigtable', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. @@ -220,11 +275,21 @@ def ReadRows(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadRows', + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/ReadRows', google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def SampleRowKeys(request, @@ -237,11 +302,21 @@ def SampleRowKeys(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/SampleRowKeys', + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/SampleRowKeys', google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def MutateRow(request, @@ -254,11 +329,21 @@ def MutateRow(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/MutateRow', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/MutateRow', google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def MutateRows(request, @@ -271,11 +356,21 @@ def MutateRows(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/MutateRows', + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/MutateRows', google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CheckAndMutateRow(request, @@ -288,11 +383,21 @@ def CheckAndMutateRow(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def PingAndWarm(request, @@ -305,11 +410,21 @@ def PingAndWarm(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/PingAndWarm', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/PingAndWarm', google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def ReadModifyWriteRow(request, @@ -322,11 +437,21 @@ def ReadModifyWriteRow(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def GenerateInitialChangeStreamPartitions(request, @@ -339,11 +464,21 @@ def GenerateInitialChangeStreamPartitions(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def ReadChangeStream(request, @@ -356,8 +491,72 @@ def ReadChangeStream(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadChangeStream', + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/ReadChangeStream', google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def PrepareQuery(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.v2.Bigtable/PrepareQuery', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PrepareQueryResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ExecuteQuery(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream( + request, + target, + '/google.bigtable.v2.Bigtable/ExecuteQuery', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ExecuteQueryResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py index fff2120347b4..8b6e68df121e 100644 --- a/packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py +++ b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2.py @@ -1,68 +1,105 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE # source: google/bigtable/v2/data.proto +# Protobuf Python Version: 5.29.0 """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'google/bigtable/v2/data.proto' +) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +import types_pb2 as google_dot_bigtable_dot_v2_dot_types__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.type import date_pb2 as google_dot_type_dot_date__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\tB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.data_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1egoogle/bigtable/v2/types.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x16google/type/date.proto\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\xf4\x02\n\x05Value\x12&\n\x04type\x18\x07 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12\x13\n\traw_value\x18\x08 \x01(\x0cH\x00\x12\x1e\n\x14raw_timestamp_micros\x18\t \x01(\x03H\x00\x12\x15\n\x0b\x62ytes_value\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x12\x13\n\tint_value\x18\x06 \x01(\x03H\x00\x12\x14\n\nbool_value\x18\n \x01(\x08H\x00\x12\x15\n\x0b\x66loat_value\x18\x0b \x01(\x01H\x00\x12\x35\n\x0ftimestamp_value\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\'\n\ndate_value\x18\r \x01(\x0b\x32\x11.google.type.DateH\x00\x12\x35\n\x0b\x61rray_value\x18\x04 \x01(\x0b\x32\x1e.google.bigtable.v2.ArrayValueH\x00\x42\x06\n\x04kind\"7\n\nArrayValue\x12)\n\x06values\x18\x01 \x03(\x0b\x32\x19.google.bigtable.v2.Value\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xad\x08\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12=\n\x0b\x61\x64\x64_to_cell\x18\x05 \x01(\x0b\x32&.google.bigtable.v2.Mutation.AddToCellH\x00\x12\x41\n\rmerge_to_cell\x18\x06 \x01(\x0b\x32(.google.bigtable.v2.Mutation.MergeToCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1a\xad\x01\n\tAddToCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x33\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12,\n\ttimestamp\x18\x03 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12(\n\x05input\x18\x04 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x1a\xaf\x01\n\x0bMergeToCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x33\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12,\n\ttimestamp\x18\x03 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x12(\n\x05input\x18\x04 \x01(\x0b\x32\x19.google.bigtable.v2.Value\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\t\"\r\n\x0bProtoFormat\"F\n\x0e\x43olumnMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x04type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\"B\n\x0bProtoSchema\x12\x33\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\".google.bigtable.v2.ColumnMetadata\"V\n\x11ResultSetMetadata\x12\x37\n\x0cproto_schema\x18\x01 \x01(\x0b\x32\x1f.google.bigtable.v2.ProtoSchemaH\x00\x42\x08\n\x06schema\"6\n\tProtoRows\x12)\n\x06values\x18\x02 \x03(\x0b\x32\x19.google.bigtable.v2.Value\"$\n\x0eProtoRowsBatch\x12\x12\n\nbatch_data\x18\x01 \x01(\x0c\"\xd5\x01\n\x10PartialResultSet\x12>\n\x10proto_rows_batch\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.ProtoRowsBatchH\x00\x12\x1b\n\x0e\x62\x61tch_checksum\x18\x06 \x01(\rH\x01\x88\x01\x01\x12\x14\n\x0cresume_token\x18\x05 \x01(\x0c\x12\r\n\x05reset\x18\x07 \x01(\x08\x12\x1c\n\x14\x65stimated_batch_size\x18\x04 \x01(\x05\x42\x0e\n\x0cpartial_rowsB\x11\n\x0f_batch_checksumB\xb3\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' - _ROW._serialized_start=53 - _ROW._serialized_end=117 - _FAMILY._serialized_start=119 - _FAMILY._serialized_end=186 - _COLUMN._serialized_start=188 - _COLUMN._serialized_end=256 - _CELL._serialized_start=258 - _CELL._serialized_end=321 - _ROWRANGE._serialized_start=324 - _ROWRANGE._serialized_end=462 - _ROWSET._serialized_start=464 - _ROWSET._serialized_end=540 - _COLUMNRANGE._serialized_start=543 - _COLUMNRANGE._serialized_end=741 - _TIMESTAMPRANGE._serialized_start=743 - _TIMESTAMPRANGE._serialized_end=821 - _VALUERANGE._serialized_start=824 - _VALUERANGE._serialized_end=976 - _ROWFILTER._serialized_start=979 - _ROWFILTER._serialized_end=2098 - _ROWFILTER_CHAIN._serialized_start=1795 - _ROWFILTER_CHAIN._serialized_end=1850 - _ROWFILTER_INTERLEAVE._serialized_start=1852 - _ROWFILTER_INTERLEAVE._serialized_end=1912 - _ROWFILTER_CONDITION._serialized_start=1915 - _ROWFILTER_CONDITION._serialized_end=2088 - _MUTATION._serialized_start=2101 - _MUTATION._serialized_end=2686 - _MUTATION_SETCELL._serialized_start=2396 - _MUTATION_SETCELL._serialized_end=2493 - _MUTATION_DELETEFROMCOLUMN._serialized_start=2495 - _MUTATION_DELETEFROMCOLUMN._serialized_end=2616 - _MUTATION_DELETEFROMFAMILY._serialized_start=2618 - _MUTATION_DELETEFROMFAMILY._serialized_end=2657 - _MUTATION_DELETEFROMROW._serialized_start=2659 - _MUTATION_DELETEFROMROW._serialized_end=2674 - _READMODIFYWRITERULE._serialized_start=2689 - _READMODIFYWRITERULE._serialized_end=2817 - _STREAMPARTITION._serialized_start=2819 - _STREAMPARTITION._serialized_end=2885 - _STREAMCONTINUATIONTOKENS._serialized_start=2887 - _STREAMCONTINUATIONTOKENS._serialized_end=2974 - _STREAMCONTINUATIONTOKEN._serialized_start=2976 - _STREAMCONTINUATIONTOKEN._serialized_end=3072 +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.data_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\tDataProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _globals['_ROW']._serialized_start=175 + _globals['_ROW']._serialized_end=239 + _globals['_FAMILY']._serialized_start=241 + _globals['_FAMILY']._serialized_end=308 + _globals['_COLUMN']._serialized_start=310 + _globals['_COLUMN']._serialized_end=378 + _globals['_CELL']._serialized_start=380 + _globals['_CELL']._serialized_end=443 + _globals['_VALUE']._serialized_start=446 + _globals['_VALUE']._serialized_end=818 + _globals['_ARRAYVALUE']._serialized_start=820 + _globals['_ARRAYVALUE']._serialized_end=875 + _globals['_ROWRANGE']._serialized_start=878 + _globals['_ROWRANGE']._serialized_end=1016 + _globals['_ROWSET']._serialized_start=1018 + _globals['_ROWSET']._serialized_end=1094 + _globals['_COLUMNRANGE']._serialized_start=1097 + _globals['_COLUMNRANGE']._serialized_end=1295 + _globals['_TIMESTAMPRANGE']._serialized_start=1297 + _globals['_TIMESTAMPRANGE']._serialized_end=1375 + _globals['_VALUERANGE']._serialized_start=1378 + _globals['_VALUERANGE']._serialized_end=1530 + _globals['_ROWFILTER']._serialized_start=1533 + _globals['_ROWFILTER']._serialized_end=2652 + _globals['_ROWFILTER_CHAIN']._serialized_start=2349 + _globals['_ROWFILTER_CHAIN']._serialized_end=2404 + _globals['_ROWFILTER_INTERLEAVE']._serialized_start=2406 + _globals['_ROWFILTER_INTERLEAVE']._serialized_end=2466 + _globals['_ROWFILTER_CONDITION']._serialized_start=2469 + _globals['_ROWFILTER_CONDITION']._serialized_end=2642 + _globals['_MUTATION']._serialized_start=2655 + _globals['_MUTATION']._serialized_end=3724 + _globals['_MUTATION_SETCELL']._serialized_start=3080 + _globals['_MUTATION_SETCELL']._serialized_end=3177 + _globals['_MUTATION_ADDTOCELL']._serialized_start=3180 + _globals['_MUTATION_ADDTOCELL']._serialized_end=3353 + _globals['_MUTATION_MERGETOCELL']._serialized_start=3356 + _globals['_MUTATION_MERGETOCELL']._serialized_end=3531 + _globals['_MUTATION_DELETEFROMCOLUMN']._serialized_start=3533 + _globals['_MUTATION_DELETEFROMCOLUMN']._serialized_end=3654 + _globals['_MUTATION_DELETEFROMFAMILY']._serialized_start=3656 + _globals['_MUTATION_DELETEFROMFAMILY']._serialized_end=3695 + _globals['_MUTATION_DELETEFROMROW']._serialized_start=3697 + _globals['_MUTATION_DELETEFROMROW']._serialized_end=3712 + _globals['_READMODIFYWRITERULE']._serialized_start=3727 + _globals['_READMODIFYWRITERULE']._serialized_end=3855 + _globals['_STREAMPARTITION']._serialized_start=3857 + _globals['_STREAMPARTITION']._serialized_end=3923 + _globals['_STREAMCONTINUATIONTOKENS']._serialized_start=3925 + _globals['_STREAMCONTINUATIONTOKENS']._serialized_end=4012 + _globals['_STREAMCONTINUATIONTOKEN']._serialized_start=4014 + _globals['_STREAMCONTINUATIONTOKEN']._serialized_end=4110 + _globals['_PROTOFORMAT']._serialized_start=4112 + _globals['_PROTOFORMAT']._serialized_end=4125 + _globals['_COLUMNMETADATA']._serialized_start=4127 + _globals['_COLUMNMETADATA']._serialized_end=4197 + _globals['_PROTOSCHEMA']._serialized_start=4199 + _globals['_PROTOSCHEMA']._serialized_end=4265 + _globals['_RESULTSETMETADATA']._serialized_start=4267 + _globals['_RESULTSETMETADATA']._serialized_end=4353 + _globals['_PROTOROWS']._serialized_start=4355 + _globals['_PROTOROWS']._serialized_end=4409 + _globals['_PROTOROWSBATCH']._serialized_start=4411 + _globals['_PROTOROWSBATCH']._serialized_end=4447 + _globals['_PARTIALRESULTSET']._serialized_start=4450 + _globals['_PARTIALRESULTSET']._serialized_end=4663 # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py index 2daafffebfc8..f7a5195e8058 100644 --- a/packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py +++ b/packages/google-cloud-bigtable/test_proxy/protos/data_pb2_grpc.py @@ -1,4 +1,24 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc +import warnings + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in google/bigtable/v2/data_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py index 8c7817b14259..1f85b086bf97 100644 --- a/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py +++ b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2.py @@ -1,11 +1,22 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE # source: test_proxy.proto +# Protobuf Python Version: 5.29.0 """Generated protocol buffer code.""" -from google.protobuf.internal import builder as _builder from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'test_proxy.proto' +) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,54 +29,66 @@ from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10test_proxy.proto\x12\x19google.bigtable.testproxy\x1a\x17google/api/client.proto\x1a!google/bigtable/v2/bigtable.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xb8\x01\n\x13\x43reateClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61ta_target\x18\x02 \x01(\t\x12\x12\n\nproject_id\x18\x03 \x01(\t\x12\x13\n\x0binstance_id\x18\x04 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12\x38\n\x15per_operation_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x16\n\x14\x43reateClientResponse\"\'\n\x12\x43loseClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x15\n\x13\x43loseClientResponse\"(\n\x13RemoveClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x16\n\x14RemoveClientResponse\"w\n\x0eReadRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\t\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\"U\n\tRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"u\n\x0fReadRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x34\n\x07request\x18\x02 \x01(\x0b\x32#.google.bigtable.v2.ReadRowsRequest\x12\x19\n\x11\x63\x61ncel_after_rows\x18\x03 \x01(\x05\"V\n\nRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x03(\x0b\x32\x17.google.bigtable.v2.Row\"\\\n\x10MutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x35\n\x07request\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.MutateRowRequest\"5\n\x0fMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\"^\n\x11MutateRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x36\n\x07request\x18\x02 \x01(\x0b\x32%.google.bigtable.v2.MutateRowsRequest\"s\n\x10MutateRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12;\n\x05\x65ntry\x18\x02 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\"l\n\x18\x43heckAndMutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.bigtable.v2.CheckAndMutateRowRequest\"|\n\x17\x43heckAndMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x06result\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.CheckAndMutateRowResponse\"d\n\x14SampleRowKeysRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x39\n\x07request\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.SampleRowKeysRequest\"t\n\x13SampleRowKeysResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x39\n\x06sample\x18\x02 \x03(\x0b\x32).google.bigtable.v2.SampleRowKeysResponse\"n\n\x19ReadModifyWriteRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12>\n\x07request\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.ReadModifyWriteRowRequest2\xa4\t\n\x18\x43loudBigtableV2TestProxy\x12q\n\x0c\x43reateClient\x12..google.bigtable.testproxy.CreateClientRequest\x1a/.google.bigtable.testproxy.CreateClientResponse\"\x00\x12n\n\x0b\x43loseClient\x12-.google.bigtable.testproxy.CloseClientRequest\x1a..google.bigtable.testproxy.CloseClientResponse\"\x00\x12q\n\x0cRemoveClient\x12..google.bigtable.testproxy.RemoveClientRequest\x1a/.google.bigtable.testproxy.RemoveClientResponse\"\x00\x12\\\n\x07ReadRow\x12).google.bigtable.testproxy.ReadRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12_\n\x08ReadRows\x12*.google.bigtable.testproxy.ReadRowsRequest\x1a%.google.bigtable.testproxy.RowsResult\"\x00\x12\x66\n\tMutateRow\x12+.google.bigtable.testproxy.MutateRowRequest\x1a*.google.bigtable.testproxy.MutateRowResult\"\x00\x12m\n\x0e\x42ulkMutateRows\x12,.google.bigtable.testproxy.MutateRowsRequest\x1a+.google.bigtable.testproxy.MutateRowsResult\"\x00\x12~\n\x11\x43heckAndMutateRow\x12\x33.google.bigtable.testproxy.CheckAndMutateRowRequest\x1a\x32.google.bigtable.testproxy.CheckAndMutateRowResult\"\x00\x12r\n\rSampleRowKeys\x12/.google.bigtable.testproxy.SampleRowKeysRequest\x1a..google.bigtable.testproxy.SampleRowKeysResult\"\x00\x12r\n\x12ReadModifyWriteRow\x12\x34.google.bigtable.testproxy.ReadModifyWriteRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x1a\x34\xca\x41\x31\x62igtable-test-proxy-not-accessible.googleapis.comB6\n#com.google.cloud.bigtable.testproxyP\x01Z\r./testproxypbb\x06proto3') - -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'test_proxy_pb2', globals()) -if _descriptor._USE_C_DESCRIPTORS == False: +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10test_proxy.proto\x12\x19google.bigtable.testproxy\x1a\x17google/api/client.proto\x1a!google/bigtable/v2/bigtable.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xda\x03\n\x13\x43reateClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61ta_target\x18\x02 \x01(\t\x12\x12\n\nproject_id\x18\x03 \x01(\t\x12\x13\n\x0binstance_id\x18\x04 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12\x38\n\x15per_operation_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12Q\n\x17optional_feature_config\x18\x07 \x01(\x0e\x32\x30.google.bigtable.testproxy.OptionalFeatureConfig\x12X\n\x10security_options\x18\x08 \x01(\x0b\x32>.google.bigtable.testproxy.CreateClientRequest.SecurityOptions\x1as\n\x0fSecurityOptions\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x01 \x01(\t\x12\x0f\n\x07use_ssl\x18\x02 \x01(\x08\x12\x1d\n\x15ssl_endpoint_override\x18\x03 \x01(\t\x12\x1a\n\x12ssl_root_certs_pem\x18\x04 \x01(\t\"\x16\n\x14\x43reateClientResponse\"\'\n\x12\x43loseClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x15\n\x13\x43loseClientResponse\"(\n\x13RemoveClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x16\n\x14RemoveClientResponse\"w\n\x0eReadRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\t\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\"U\n\tRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"u\n\x0fReadRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x34\n\x07request\x18\x02 \x01(\x0b\x32#.google.bigtable.v2.ReadRowsRequest\x12\x19\n\x11\x63\x61ncel_after_rows\x18\x03 \x01(\x05\"W\n\nRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12%\n\x04rows\x18\x02 \x03(\x0b\x32\x17.google.bigtable.v2.Row\"\\\n\x10MutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x35\n\x07request\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.MutateRowRequest\"5\n\x0fMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\"^\n\x11MutateRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x36\n\x07request\x18\x02 \x01(\x0b\x32%.google.bigtable.v2.MutateRowsRequest\"u\n\x10MutateRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x07\x65ntries\x18\x02 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\"l\n\x18\x43heckAndMutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.bigtable.v2.CheckAndMutateRowRequest\"|\n\x17\x43heckAndMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x06result\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.CheckAndMutateRowResponse\"d\n\x14SampleRowKeysRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x39\n\x07request\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.SampleRowKeysRequest\"u\n\x13SampleRowKeysResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12:\n\x07samples\x18\x02 \x03(\x0b\x32).google.bigtable.v2.SampleRowKeysResponse\"n\n\x19ReadModifyWriteRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12>\n\x07request\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.ReadModifyWriteRowRequest\"b\n\x13\x45xecuteQueryRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x38\n\x07request\x18\x02 \x01(\x0b\x32\'.google.bigtable.v2.ExecuteQueryRequest\"\xa9\x01\n\x12\x45xecuteQueryResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12>\n\x08metadata\x18\x04 \x01(\x0b\x32,.google.bigtable.testproxy.ResultSetMetadata\x12/\n\x04rows\x18\x03 \x03(\x0b\x32!.google.bigtable.testproxy.SqlRow\"H\n\x11ResultSetMetadata\x12\x33\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\".google.bigtable.v2.ColumnMetadata\"3\n\x06SqlRow\x12)\n\x06values\x18\x01 \x03(\x0b\x32\x19.google.bigtable.v2.Value*d\n\x15OptionalFeatureConfig\x12#\n\x1fOPTIONAL_FEATURE_CONFIG_DEFAULT\x10\x00\x12&\n\"OPTIONAL_FEATURE_CONFIG_ENABLE_ALL\x10\x01\x32\x95\n\n\x18\x43loudBigtableV2TestProxy\x12q\n\x0c\x43reateClient\x12..google.bigtable.testproxy.CreateClientRequest\x1a/.google.bigtable.testproxy.CreateClientResponse\"\x00\x12n\n\x0b\x43loseClient\x12-.google.bigtable.testproxy.CloseClientRequest\x1a..google.bigtable.testproxy.CloseClientResponse\"\x00\x12q\n\x0cRemoveClient\x12..google.bigtable.testproxy.RemoveClientRequest\x1a/.google.bigtable.testproxy.RemoveClientResponse\"\x00\x12\\\n\x07ReadRow\x12).google.bigtable.testproxy.ReadRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12_\n\x08ReadRows\x12*.google.bigtable.testproxy.ReadRowsRequest\x1a%.google.bigtable.testproxy.RowsResult\"\x00\x12\x66\n\tMutateRow\x12+.google.bigtable.testproxy.MutateRowRequest\x1a*.google.bigtable.testproxy.MutateRowResult\"\x00\x12m\n\x0e\x42ulkMutateRows\x12,.google.bigtable.testproxy.MutateRowsRequest\x1a+.google.bigtable.testproxy.MutateRowsResult\"\x00\x12~\n\x11\x43heckAndMutateRow\x12\x33.google.bigtable.testproxy.CheckAndMutateRowRequest\x1a\x32.google.bigtable.testproxy.CheckAndMutateRowResult\"\x00\x12r\n\rSampleRowKeys\x12/.google.bigtable.testproxy.SampleRowKeysRequest\x1a..google.bigtable.testproxy.SampleRowKeysResult\"\x00\x12r\n\x12ReadModifyWriteRow\x12\x34.google.bigtable.testproxy.ReadModifyWriteRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12o\n\x0c\x45xecuteQuery\x12..google.bigtable.testproxy.ExecuteQueryRequest\x1a-.google.bigtable.testproxy.ExecuteQueryResult\"\x00\x1a\x34\xca\x41\x31\x62igtable-test-proxy-not-accessible.googleapis.comBg\n#com.google.cloud.bigtable.testproxyP\x01Z>cloud.google.com/go/bigtable/testproxy/testproxypb;testproxypbb\x06proto3') - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\n#com.google.cloud.bigtable.testproxyP\001Z\r./testproxypb' - _CLOUDBIGTABLEV2TESTPROXY._options = None - _CLOUDBIGTABLEV2TESTPROXY._serialized_options = b'\312A1bigtable-test-proxy-not-accessible.googleapis.com' - _CREATECLIENTREQUEST._serialized_start=196 - _CREATECLIENTREQUEST._serialized_end=380 - _CREATECLIENTRESPONSE._serialized_start=382 - _CREATECLIENTRESPONSE._serialized_end=404 - _CLOSECLIENTREQUEST._serialized_start=406 - _CLOSECLIENTREQUEST._serialized_end=445 - _CLOSECLIENTRESPONSE._serialized_start=447 - _CLOSECLIENTRESPONSE._serialized_end=468 - _REMOVECLIENTREQUEST._serialized_start=470 - _REMOVECLIENTREQUEST._serialized_end=510 - _REMOVECLIENTRESPONSE._serialized_start=512 - _REMOVECLIENTRESPONSE._serialized_end=534 - _READROWREQUEST._serialized_start=536 - _READROWREQUEST._serialized_end=655 - _ROWRESULT._serialized_start=657 - _ROWRESULT._serialized_end=742 - _READROWSREQUEST._serialized_start=744 - _READROWSREQUEST._serialized_end=861 - _ROWSRESULT._serialized_start=863 - _ROWSRESULT._serialized_end=949 - _MUTATEROWREQUEST._serialized_start=951 - _MUTATEROWREQUEST._serialized_end=1043 - _MUTATEROWRESULT._serialized_start=1045 - _MUTATEROWRESULT._serialized_end=1098 - _MUTATEROWSREQUEST._serialized_start=1100 - _MUTATEROWSREQUEST._serialized_end=1194 - _MUTATEROWSRESULT._serialized_start=1196 - _MUTATEROWSRESULT._serialized_end=1311 - _CHECKANDMUTATEROWREQUEST._serialized_start=1313 - _CHECKANDMUTATEROWREQUEST._serialized_end=1421 - _CHECKANDMUTATEROWRESULT._serialized_start=1423 - _CHECKANDMUTATEROWRESULT._serialized_end=1547 - _SAMPLEROWKEYSREQUEST._serialized_start=1549 - _SAMPLEROWKEYSREQUEST._serialized_end=1649 - _SAMPLEROWKEYSRESULT._serialized_start=1651 - _SAMPLEROWKEYSRESULT._serialized_end=1767 - _READMODIFYWRITEROWREQUEST._serialized_start=1769 - _READMODIFYWRITEROWREQUEST._serialized_end=1879 - _CLOUDBIGTABLEV2TESTPROXY._serialized_start=1882 - _CLOUDBIGTABLEV2TESTPROXY._serialized_end=3070 +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'test_proxy_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n#com.google.cloud.bigtable.testproxyP\001Z>cloud.google.com/go/bigtable/testproxy/testproxypb;testproxypb' + _globals['_CLOUDBIGTABLEV2TESTPROXY']._loaded_options = None + _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_options = b'\312A1bigtable-test-proxy-not-accessible.googleapis.com' + _globals['_OPTIONALFEATURECONFIG']._serialized_start=2574 + _globals['_OPTIONALFEATURECONFIG']._serialized_end=2674 + _globals['_CREATECLIENTREQUEST']._serialized_start=196 + _globals['_CREATECLIENTREQUEST']._serialized_end=670 + _globals['_CREATECLIENTREQUEST_SECURITYOPTIONS']._serialized_start=555 + _globals['_CREATECLIENTREQUEST_SECURITYOPTIONS']._serialized_end=670 + _globals['_CREATECLIENTRESPONSE']._serialized_start=672 + _globals['_CREATECLIENTRESPONSE']._serialized_end=694 + _globals['_CLOSECLIENTREQUEST']._serialized_start=696 + _globals['_CLOSECLIENTREQUEST']._serialized_end=735 + _globals['_CLOSECLIENTRESPONSE']._serialized_start=737 + _globals['_CLOSECLIENTRESPONSE']._serialized_end=758 + _globals['_REMOVECLIENTREQUEST']._serialized_start=760 + _globals['_REMOVECLIENTREQUEST']._serialized_end=800 + _globals['_REMOVECLIENTRESPONSE']._serialized_start=802 + _globals['_REMOVECLIENTRESPONSE']._serialized_end=824 + _globals['_READROWREQUEST']._serialized_start=826 + _globals['_READROWREQUEST']._serialized_end=945 + _globals['_ROWRESULT']._serialized_start=947 + _globals['_ROWRESULT']._serialized_end=1032 + _globals['_READROWSREQUEST']._serialized_start=1034 + _globals['_READROWSREQUEST']._serialized_end=1151 + _globals['_ROWSRESULT']._serialized_start=1153 + _globals['_ROWSRESULT']._serialized_end=1240 + _globals['_MUTATEROWREQUEST']._serialized_start=1242 + _globals['_MUTATEROWREQUEST']._serialized_end=1334 + _globals['_MUTATEROWRESULT']._serialized_start=1336 + _globals['_MUTATEROWRESULT']._serialized_end=1389 + _globals['_MUTATEROWSREQUEST']._serialized_start=1391 + _globals['_MUTATEROWSREQUEST']._serialized_end=1485 + _globals['_MUTATEROWSRESULT']._serialized_start=1487 + _globals['_MUTATEROWSRESULT']._serialized_end=1604 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_start=1606 + _globals['_CHECKANDMUTATEROWREQUEST']._serialized_end=1714 + _globals['_CHECKANDMUTATEROWRESULT']._serialized_start=1716 + _globals['_CHECKANDMUTATEROWRESULT']._serialized_end=1840 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_start=1842 + _globals['_SAMPLEROWKEYSREQUEST']._serialized_end=1942 + _globals['_SAMPLEROWKEYSRESULT']._serialized_start=1944 + _globals['_SAMPLEROWKEYSRESULT']._serialized_end=2061 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_start=2063 + _globals['_READMODIFYWRITEROWREQUEST']._serialized_end=2173 + _globals['_EXECUTEQUERYREQUEST']._serialized_start=2175 + _globals['_EXECUTEQUERYREQUEST']._serialized_end=2273 + _globals['_EXECUTEQUERYRESULT']._serialized_start=2276 + _globals['_EXECUTEQUERYRESULT']._serialized_end=2445 + _globals['_RESULTSETMETADATA']._serialized_start=2447 + _globals['_RESULTSETMETADATA']._serialized_end=2519 + _globals['_SQLROW']._serialized_start=2521 + _globals['_SQLROW']._serialized_end=2572 + _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_start=2677 + _globals['_CLOUDBIGTABLEV2TESTPROXY']._serialized_end=3978 # @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py index 60214a5848eb..b9d11034eb4b 100644 --- a/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py +++ b/packages/google-cloud-bigtable/test_proxy/protos/test_proxy_pb2_grpc.py @@ -1,9 +1,29 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc +import warnings import test_proxy_pb2 as test__proxy__pb2 +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in test_proxy_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) + class CloudBigtableV2TestProxyStub(object): """Note that all RPCs are unary, even when the equivalent client binding call @@ -34,52 +54,57 @@ def __init__(self, channel): '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', request_serializer=test__proxy__pb2.CreateClientRequest.SerializeToString, response_deserializer=test__proxy__pb2.CreateClientResponse.FromString, - ) + _registered_method=True) self.CloseClient = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', request_serializer=test__proxy__pb2.CloseClientRequest.SerializeToString, response_deserializer=test__proxy__pb2.CloseClientResponse.FromString, - ) + _registered_method=True) self.RemoveClient = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', request_serializer=test__proxy__pb2.RemoveClientRequest.SerializeToString, response_deserializer=test__proxy__pb2.RemoveClientResponse.FromString, - ) + _registered_method=True) self.ReadRow = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', request_serializer=test__proxy__pb2.ReadRowRequest.SerializeToString, response_deserializer=test__proxy__pb2.RowResult.FromString, - ) + _registered_method=True) self.ReadRows = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', request_serializer=test__proxy__pb2.ReadRowsRequest.SerializeToString, response_deserializer=test__proxy__pb2.RowsResult.FromString, - ) + _registered_method=True) self.MutateRow = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', request_serializer=test__proxy__pb2.MutateRowRequest.SerializeToString, response_deserializer=test__proxy__pb2.MutateRowResult.FromString, - ) + _registered_method=True) self.BulkMutateRows = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', request_serializer=test__proxy__pb2.MutateRowsRequest.SerializeToString, response_deserializer=test__proxy__pb2.MutateRowsResult.FromString, - ) + _registered_method=True) self.CheckAndMutateRow = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', request_serializer=test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, response_deserializer=test__proxy__pb2.CheckAndMutateRowResult.FromString, - ) + _registered_method=True) self.SampleRowKeys = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', request_serializer=test__proxy__pb2.SampleRowKeysRequest.SerializeToString, response_deserializer=test__proxy__pb2.SampleRowKeysResult.FromString, - ) + _registered_method=True) self.ReadModifyWriteRow = channel.unary_unary( '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', request_serializer=test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, response_deserializer=test__proxy__pb2.RowResult.FromString, - ) + _registered_method=True) + self.ExecuteQuery = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ExecuteQuery', + request_serializer=test__proxy__pb2.ExecuteQueryRequest.SerializeToString, + response_deserializer=test__proxy__pb2.ExecuteQueryResult.FromString, + _registered_method=True) class CloudBigtableV2TestProxyServicer(object): @@ -183,6 +208,13 @@ def ReadModifyWriteRow(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') + def ExecuteQuery(self, request, context): + """Executes a BTQL query with the client. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + def add_CloudBigtableV2TestProxyServicer_to_server(servicer, server): rpc_method_handlers = { @@ -236,10 +268,16 @@ def add_CloudBigtableV2TestProxyServicer_to_server(servicer, server): request_deserializer=test__proxy__pb2.ReadModifyWriteRowRequest.FromString, response_serializer=test__proxy__pb2.RowResult.SerializeToString, ), + 'ExecuteQuery': grpc.unary_unary_rpc_method_handler( + servicer.ExecuteQuery, + request_deserializer=test__proxy__pb2.ExecuteQueryRequest.FromString, + response_serializer=test__proxy__pb2.ExecuteQueryResult.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( 'google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers('google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers) # This class is part of an EXPERIMENTAL API. @@ -273,11 +311,21 @@ def CreateClient(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', test__proxy__pb2.CreateClientRequest.SerializeToString, test__proxy__pb2.CreateClientResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CloseClient(request, @@ -290,11 +338,21 @@ def CloseClient(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', test__proxy__pb2.CloseClientRequest.SerializeToString, test__proxy__pb2.CloseClientResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def RemoveClient(request, @@ -307,11 +365,21 @@ def RemoveClient(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', test__proxy__pb2.RemoveClientRequest.SerializeToString, test__proxy__pb2.RemoveClientResponse.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def ReadRow(request, @@ -324,11 +392,21 @@ def ReadRow(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', test__proxy__pb2.ReadRowRequest.SerializeToString, test__proxy__pb2.RowResult.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def ReadRows(request, @@ -341,11 +419,21 @@ def ReadRows(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', test__proxy__pb2.ReadRowsRequest.SerializeToString, test__proxy__pb2.RowsResult.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def MutateRow(request, @@ -358,11 +446,21 @@ def MutateRow(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', test__proxy__pb2.MutateRowRequest.SerializeToString, test__proxy__pb2.MutateRowResult.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def BulkMutateRows(request, @@ -375,11 +473,21 @@ def BulkMutateRows(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', test__proxy__pb2.MutateRowsRequest.SerializeToString, test__proxy__pb2.MutateRowsResult.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def CheckAndMutateRow(request, @@ -392,11 +500,21 @@ def CheckAndMutateRow(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, test__proxy__pb2.CheckAndMutateRowResult.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def SampleRowKeys(request, @@ -409,11 +527,21 @@ def SampleRowKeys(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', test__proxy__pb2.SampleRowKeysRequest.SerializeToString, test__proxy__pb2.SampleRowKeysResult.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) @staticmethod def ReadModifyWriteRow(request, @@ -426,8 +554,45 @@ def ReadModifyWriteRow(request, wait_for_ready=None, timeout=None, metadata=None): - return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, test__proxy__pb2.RowResult.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) + + @staticmethod + def ExecuteQuery(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary( + request, + target, + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ExecuteQuery', + test__proxy__pb2.ExecuteQueryRequest.SerializeToString, + test__proxy__pb2.ExecuteQueryResult.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/types_pb2.py b/packages/google-cloud-bigtable/test_proxy/protos/types_pb2.py new file mode 100644 index 000000000000..7acdbf7f1e9e --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/types_pb2.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# NO CHECKED-IN PROTOBUF GENCODE +# source: google/bigtable/v2/types.proto +# Protobuf Python Version: 5.29.0 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import runtime_version as _runtime_version +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +_runtime_version.ValidateProtobufRuntimeVersion( + _runtime_version.Domain.PUBLIC, + 5, + 29, + 0, + '', + 'google/bigtable/v2/types.proto' +) +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/bigtable/v2/types.proto\x12\x12google.bigtable.v2\x1a\x1fgoogle/api/field_behavior.proto\"\xe0\x10\n\x04Type\x12\x34\n\nbytes_type\x18\x01 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.BytesH\x00\x12\x36\n\x0bstring_type\x18\x02 \x01(\x0b\x32\x1f.google.bigtable.v2.Type.StringH\x00\x12\x34\n\nint64_type\x18\x05 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.Int64H\x00\x12\x38\n\x0c\x66loat32_type\x18\x0c \x01(\x0b\x32 .google.bigtable.v2.Type.Float32H\x00\x12\x38\n\x0c\x66loat64_type\x18\t \x01(\x0b\x32 .google.bigtable.v2.Type.Float64H\x00\x12\x32\n\tbool_type\x18\x08 \x01(\x0b\x32\x1d.google.bigtable.v2.Type.BoolH\x00\x12<\n\x0etimestamp_type\x18\n \x01(\x0b\x32\".google.bigtable.v2.Type.TimestampH\x00\x12\x32\n\tdate_type\x18\x0b \x01(\x0b\x32\x1d.google.bigtable.v2.Type.DateH\x00\x12<\n\x0e\x61ggregate_type\x18\x06 \x01(\x0b\x32\".google.bigtable.v2.Type.AggregateH\x00\x12\x36\n\x0bstruct_type\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.Type.StructH\x00\x12\x34\n\narray_type\x18\x03 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.ArrayH\x00\x12\x30\n\x08map_type\x18\x04 \x01(\x0b\x32\x1c.google.bigtable.v2.Type.MapH\x00\x1a\x9d\x01\n\x05\x42ytes\x12\x39\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32\'.google.bigtable.v2.Type.Bytes.Encoding\x1aY\n\x08\x45ncoding\x12:\n\x03raw\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.Type.Bytes.Encoding.RawH\x00\x1a\x05\n\x03RawB\n\n\x08\x65ncoding\x1a\x8d\x02\n\x06String\x12:\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32(.google.bigtable.v2.Type.String.Encoding\x1a\xc6\x01\n\x08\x45ncoding\x12H\n\x08utf8_raw\x18\x01 \x01(\x0b\x32\x30.google.bigtable.v2.Type.String.Encoding.Utf8RawB\x02\x18\x01H\x00\x12H\n\nutf8_bytes\x18\x02 \x01(\x0b\x32\x32.google.bigtable.v2.Type.String.Encoding.Utf8BytesH\x00\x1a\r\n\x07Utf8Raw:\x02\x18\x01\x1a\x0b\n\tUtf8BytesB\n\n\x08\x65ncoding\x1a\xf5\x01\n\x05Int64\x12\x39\n\x08\x65ncoding\x18\x01 \x01(\x0b\x32\'.google.bigtable.v2.Type.Int64.Encoding\x1a\xb0\x01\n\x08\x45ncoding\x12R\n\x10\x62ig_endian_bytes\x18\x01 \x01(\x0b\x32\x36.google.bigtable.v2.Type.Int64.Encoding.BigEndianBytesH\x00\x1a\x44\n\x0e\x42igEndianBytes\x12\x32\n\nbytes_type\x18\x01 \x01(\x0b\x32\x1e.google.bigtable.v2.Type.BytesB\n\n\x08\x65ncoding\x1a\x06\n\x04\x42ool\x1a\t\n\x07\x46loat32\x1a\t\n\x07\x46loat64\x1a\x0b\n\tTimestamp\x1a\x06\n\x04\x44\x61te\x1a\x84\x01\n\x06Struct\x12\x35\n\x06\x66ields\x18\x01 \x03(\x0b\x32%.google.bigtable.v2.Type.Struct.Field\x1a\x43\n\x05\x46ield\x12\x12\n\nfield_name\x18\x01 \x01(\t\x12&\n\x04type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a\x37\n\x05\x41rray\x12.\n\x0c\x65lement_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a_\n\x03Map\x12*\n\x08key_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12,\n\nvalue_type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x1a\xb7\x03\n\tAggregate\x12,\n\ninput_type\x18\x01 \x01(\x0b\x32\x18.google.bigtable.v2.Type\x12\x31\n\nstate_type\x18\x02 \x01(\x0b\x32\x18.google.bigtable.v2.TypeB\x03\xe0\x41\x03\x12\x35\n\x03sum\x18\x04 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.SumH\x00\x12_\n\x12hllpp_unique_count\x18\x05 \x01(\x0b\x32\x41.google.bigtable.v2.Type.Aggregate.HyperLogLogPlusPlusUniqueCountH\x00\x12\x35\n\x03max\x18\x06 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.MaxH\x00\x12\x35\n\x03min\x18\x07 \x01(\x0b\x32&.google.bigtable.v2.Type.Aggregate.MinH\x00\x1a\x05\n\x03Sum\x1a\x05\n\x03Max\x1a\x05\n\x03Min\x1a \n\x1eHyperLogLogPlusPlusUniqueCountB\x0c\n\naggregatorB\x06\n\x04kindB\xb4\x01\n\x16\x63om.google.bigtable.v2B\nTypesProtoP\x01Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.types_pb2', _globals) +if not _descriptor._USE_C_DESCRIPTORS: + _globals['DESCRIPTOR']._loaded_options = None + _globals['DESCRIPTOR']._serialized_options = b'\n\026com.google.bigtable.v2B\nTypesProtoP\001Z8cloud.google.com/go/bigtable/apiv2/bigtablepb;bigtablepb\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._loaded_options = None + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_options = b'\030\001' + _globals['_TYPE_STRING_ENCODING'].fields_by_name['utf8_raw']._loaded_options = None + _globals['_TYPE_STRING_ENCODING'].fields_by_name['utf8_raw']._serialized_options = b'\030\001' + _globals['_TYPE_AGGREGATE'].fields_by_name['state_type']._loaded_options = None + _globals['_TYPE_AGGREGATE'].fields_by_name['state_type']._serialized_options = b'\340A\003' + _globals['_TYPE']._serialized_start=88 + _globals['_TYPE']._serialized_end=2232 + _globals['_TYPE_BYTES']._serialized_start=765 + _globals['_TYPE_BYTES']._serialized_end=922 + _globals['_TYPE_BYTES_ENCODING']._serialized_start=833 + _globals['_TYPE_BYTES_ENCODING']._serialized_end=922 + _globals['_TYPE_BYTES_ENCODING_RAW']._serialized_start=905 + _globals['_TYPE_BYTES_ENCODING_RAW']._serialized_end=910 + _globals['_TYPE_STRING']._serialized_start=925 + _globals['_TYPE_STRING']._serialized_end=1194 + _globals['_TYPE_STRING_ENCODING']._serialized_start=996 + _globals['_TYPE_STRING_ENCODING']._serialized_end=1194 + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_start=1156 + _globals['_TYPE_STRING_ENCODING_UTF8RAW']._serialized_end=1169 + _globals['_TYPE_STRING_ENCODING_UTF8BYTES']._serialized_start=1171 + _globals['_TYPE_STRING_ENCODING_UTF8BYTES']._serialized_end=1182 + _globals['_TYPE_INT64']._serialized_start=1197 + _globals['_TYPE_INT64']._serialized_end=1442 + _globals['_TYPE_INT64_ENCODING']._serialized_start=1266 + _globals['_TYPE_INT64_ENCODING']._serialized_end=1442 + _globals['_TYPE_INT64_ENCODING_BIGENDIANBYTES']._serialized_start=1362 + _globals['_TYPE_INT64_ENCODING_BIGENDIANBYTES']._serialized_end=1430 + _globals['_TYPE_BOOL']._serialized_start=1444 + _globals['_TYPE_BOOL']._serialized_end=1450 + _globals['_TYPE_FLOAT32']._serialized_start=1452 + _globals['_TYPE_FLOAT32']._serialized_end=1461 + _globals['_TYPE_FLOAT64']._serialized_start=1463 + _globals['_TYPE_FLOAT64']._serialized_end=1472 + _globals['_TYPE_TIMESTAMP']._serialized_start=1474 + _globals['_TYPE_TIMESTAMP']._serialized_end=1485 + _globals['_TYPE_DATE']._serialized_start=1487 + _globals['_TYPE_DATE']._serialized_end=1493 + _globals['_TYPE_STRUCT']._serialized_start=1496 + _globals['_TYPE_STRUCT']._serialized_end=1628 + _globals['_TYPE_STRUCT_FIELD']._serialized_start=1561 + _globals['_TYPE_STRUCT_FIELD']._serialized_end=1628 + _globals['_TYPE_ARRAY']._serialized_start=1630 + _globals['_TYPE_ARRAY']._serialized_end=1685 + _globals['_TYPE_MAP']._serialized_start=1687 + _globals['_TYPE_MAP']._serialized_end=1782 + _globals['_TYPE_AGGREGATE']._serialized_start=1785 + _globals['_TYPE_AGGREGATE']._serialized_end=2224 + _globals['_TYPE_AGGREGATE_SUM']._serialized_start=2157 + _globals['_TYPE_AGGREGATE_SUM']._serialized_end=2162 + _globals['_TYPE_AGGREGATE_MAX']._serialized_start=2164 + _globals['_TYPE_AGGREGATE_MAX']._serialized_end=2169 + _globals['_TYPE_AGGREGATE_MIN']._serialized_start=2171 + _globals['_TYPE_AGGREGATE_MIN']._serialized_end=2176 + _globals['_TYPE_AGGREGATE_HYPERLOGLOGPLUSPLUSUNIQUECOUNT']._serialized_start=2178 + _globals['_TYPE_AGGREGATE_HYPERLOGLOGPLUSPLUSUNIQUECOUNT']._serialized_end=2210 +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/test_proxy/protos/types_pb2_grpc.py b/packages/google-cloud-bigtable/test_proxy/protos/types_pb2_grpc.py new file mode 100644 index 000000000000..29956dd38063 --- /dev/null +++ b/packages/google-cloud-bigtable/test_proxy/protos/types_pb2_grpc.py @@ -0,0 +1,24 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc +import warnings + + +GRPC_GENERATED_VERSION = '1.70.0' +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + _version_not_supported = first_version_is_lower(GRPC_VERSION, GRPC_GENERATED_VERSION) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f'The grpc package installed is at version {GRPC_VERSION},' + + f' but the generated code in google/bigtable/v2/types_pb2_grpc.py depends on' + + f' grpcio>={GRPC_GENERATED_VERSION}.' + + f' Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}' + + f' or downgrade your generated code using grpcio-tools<={GRPC_VERSION}.' + ) From 067ebbfbfa196fc8b0761271e8f531161afb8d0e Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Fri, 18 Jul 2025 10:19:05 -0400 Subject: [PATCH 872/892] test: Use latest conformance test version and exclude unsupported features (#1149) --- .../.github/workflows/conformance.yaml | 14 ++--- .../google/cloud/bigtable/data/exceptions.py | 3 + .../handlers/client_handler_data_async.py | 2 +- .../client_handler_data_sync_autogen.py | 2 +- .../test_proxy/handlers/grpc_handler.py | 63 +++++++++++++------ 5 files changed, 55 insertions(+), 29 deletions(-) diff --git a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml index 8445240c3ea9..6a96a87d3f6a 100644 --- a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml +++ b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml @@ -24,17 +24,15 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - test-version: [ "v0.0.2" ] + test-version: [ "v0.0.4" ] py-version: [ 3.8 ] - client-type: [ "async", "sync", "legacy" ] + client-type: [ "async", "sync"] + # None of the clients currently support reverse scans, execute query plan refresh, retry info, or routing cookie include: + - client-type: "async" + test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie\"" - client-type: "sync" - # sync client does not support concurrent streams - test_args: "-skip _Generic_MultiStream" - - client-type: "legacy" - # legacy client is synchronous and does not support concurrent streams - # legacy client does not expose mutate_row. Disable those tests - test_args: "-skip _Generic_MultiStream -skip TestMutateRow_" + test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie|_Generic_MultiStream\"" fail-fast: false name: "${{ matrix.client-type }} client / python ${{ matrix.py-version }} / test tag ${{ matrix.test-version }}" steps: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py index 54ca308535b8..5645ae3aa228 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py @@ -331,6 +331,9 @@ def __init__( class InvalidExecuteQueryResponse(core_exceptions.GoogleAPICallError): """Exception raised to invalid query response data from back-end.""" + # Set to internal. This is representative of an internal error. + code = 13 + class ParameterTypeInferenceFailed(ValueError): """Exception raised when query parameter types were not provided and cannot be inferred.""" diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py index 85ef2c7d4f37..246b7fcd70cc 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_async.py @@ -276,7 +276,7 @@ async def ExecuteQuery(self, request, **kwargs): prepare_operation_timeout=operation_timeout, ) ) - rows = [r async for r in result] + rows = CrossSync.rm_aio([r async for r in result]) md = result.metadata proto_rows = [] for r in rows: diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py index 4a680cc8cea0..0e557f058f39 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/client_handler_data_sync_autogen.py @@ -205,7 +205,7 @@ async def ExecuteQuery(self, request, **kwargs): operation_timeout=operation_timeout, prepare_operation_timeout=operation_timeout, ) - rows = [r async for r in result] + rows = [r for r in result] md = result.metadata proto_rows = [] for r in rows: diff --git a/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py b/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py index 5dc7aa0908ed..28ae19cf9f67 100644 --- a/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py +++ b/packages/google-cloud-bigtable/test_proxy/handlers/grpc_handler.py @@ -8,6 +8,17 @@ from google.protobuf import json_format +def correct_cancelled(status): + """ + Deadline exceeded errors are a race between client side cancellation and server + side deadline exceeded. For the purpose of these tests, the client will never cancel, + so we adjust cancelled errors to deadline_exceeded for consistency. + """ + if status.code == 1: + return Status(code=4, message="deadlineexceeded") + return status + + class TestProxyGrpcServer(test_proxy_pb2_grpc.CloudBigtableV2TestProxyServicer): """ Implements a grpc server that proxies conformance test requests to the client library @@ -75,7 +86,7 @@ def ReadRows(self, request, context, client_response=None): status = Status() rows = [] if isinstance(client_response, dict) and "error" in client_response: - status = Status(code=5, message=client_response["error"]) + status = correct_cancelled(Status(code=5, message=client_response["error"])) else: rows = [data_pb2.Row(**d) for d in client_response] result = test_proxy_pb2.RowsResult(rows=rows, status=status) @@ -86,9 +97,11 @@ def ReadRow(self, request, context, client_response=None): status = Status() row = None if isinstance(client_response, dict) and "error" in client_response: - status = Status( - code=client_response.get("code", 5), - message=client_response.get("error"), + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) ) elif client_response != "None": row = data_pb2.Row(**client_response) @@ -99,8 +112,11 @@ def ReadRow(self, request, context, client_response=None): def MutateRow(self, request, context, client_response=None): status = Status() if isinstance(client_response, dict) and "error" in client_response: - status = Status( - code=client_response.get("code", 5), message=client_response["error"] + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response["error"], + ) ) return test_proxy_pb2.MutateRowResult(status=status) @@ -112,24 +128,27 @@ def BulkMutateRows(self, request, context, client_response=None): entries = [ bigtable_pb2.MutateRowsResponse.Entry( index=exc_dict.get("index", 1), - status=Status(code=exc_dict.get("code", 5)), + status=correct_cancelled(Status(code=exc_dict.get("code", 5))), ) for exc_dict in client_response.get("subexceptions", []) ] - if not entries: - # only return failure on the overall request if there are failed entries - status = Status( + status = correct_cancelled( + Status( code=client_response.get("code", 5), message=client_response["error"], ) + ) response = test_proxy_pb2.MutateRowsResult(status=status, entries=entries) return response @delegate_to_client_handler def CheckAndMutateRow(self, request, context, client_response=None): if isinstance(client_response, dict) and "error" in client_response: - status = Status( - code=client_response.get("code", 5), message=client_response["error"] + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response["error"], + ) ) response = test_proxy_pb2.CheckAndMutateRowResult(status=status) else: @@ -146,9 +165,11 @@ def ReadModifyWriteRow(self, request, context, client_response=None): status = Status() row = None if isinstance(client_response, dict) and "error" in client_response: - status = Status( - code=client_response.get("code", 5), - message=client_response.get("error"), + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) ) elif client_response != "None": row = data_pb2.Row(**client_response) @@ -160,9 +181,11 @@ def SampleRowKeys(self, request, context, client_response=None): status = Status() sample_list = [] if isinstance(client_response, dict) and "error" in client_response: - status = Status( - code=client_response.get("code", 5), - message=client_response.get("error"), + status = correct_cancelled( + Status( + code=client_response.get("code", 5), + message=client_response.get("error"), + ) ) else: for sample in client_response: @@ -177,7 +200,9 @@ def SampleRowKeys(self, request, context, client_response=None): def ExecuteQuery(self, request, context, client_response=None): if isinstance(client_response, dict) and "error" in client_response: return test_proxy_pb2.ExecuteQueryResult( - status=Status(code=13, message=client_response["error"]) + status=correct_cancelled( + Status(code=client_response.get("code", 13), message=client_response["error"]) + ) ) else: return test_proxy_pb2.ExecuteQueryResult( From 5f6ae512c430e3f7849e5d7d845169e16df7dcf3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 15:42:21 -0700 Subject: [PATCH 873/892] feat: add Idempotency to Cloud Bigtable MutateRowsRequest API (#1143) --- .../google/cloud/bigtable_admin/__init__.py | 36 + .../cloud/bigtable_admin_v2/__init__.py | 20 + .../bigtable_admin_v2/gapic_metadata.json | 75 + .../bigtable_table_admin/async_client.py | 519 +- .../services/bigtable_table_admin/client.py | 524 +- .../services/bigtable_table_admin/pagers.py | 160 + .../bigtable_table_admin/transports/base.py | 73 + .../bigtable_table_admin/transports/grpc.py | 145 +- .../transports/grpc_asyncio.py | 176 +- .../bigtable_table_admin/transports/rest.py | 1251 +- .../transports/rest_base.py | 287 + .../cloud/bigtable_admin_v2/types/__init__.py | 20 + .../types/bigtable_table_admin.py | 259 +- .../cloud/bigtable_admin_v2/types/table.py | 70 + .../cloud/bigtable_admin_v2/types/types.py | 66 + .../google/cloud/bigtable_v2/__init__.py | 2 + .../services/bigtable/async_client.py | 18 +- .../bigtable_v2/services/bigtable/client.py | 18 +- .../services/bigtable/transports/grpc.py | 12 +- .../bigtable/transports/grpc_asyncio.py | 12 +- .../cloud/bigtable_v2/types/__init__.py | 2 + .../cloud/bigtable_v2/types/bigtable.py | 88 +- .../google/cloud/bigtable_v2/types/data.py | 36 +- .../cloud/bigtable_v2/types/request_stats.py | 9 +- .../bigtable_v2/types/response_params.py | 5 +- .../fixup_bigtable_admin_v2_keywords.py | 5 + .../scripts/fixup_bigtable_v2_keywords.py | 2 +- .../test_bigtable_table_admin.py | 14131 ++++++++++------ .../unit/gapic/bigtable_v2/test_bigtable.py | 1 + 29 files changed, 12842 insertions(+), 5180 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index c8f2a44826fb..309d06c7bd51 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -177,6 +177,12 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateBackupRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateSchemaBundleMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + CreateSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateTableFromSnapshotMetadata, ) @@ -193,6 +199,9 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( DeleteBackupRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + DeleteSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( DeleteSnapshotRequest, ) @@ -210,6 +219,9 @@ GetAuthorizedViewRequest, ) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + GetSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( @@ -222,6 +234,12 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( ListBackupsResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListSchemaBundlesRequest, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + ListSchemaBundlesResponse, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( ListSnapshotsRequest, ) @@ -266,6 +284,12 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UpdateBackupRequest, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateSchemaBundleMetadata, +) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( + UpdateSchemaBundleRequest, +) from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( UpdateTableMetadata, ) @@ -287,7 +311,9 @@ from google.cloud.bigtable_admin_v2.types.table import ColumnFamily from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo from google.cloud.bigtable_admin_v2.types.table import GcRule +from google.cloud.bigtable_admin_v2.types.table import ProtoSchema from google.cloud.bigtable_admin_v2.types.table import RestoreInfo +from google.cloud.bigtable_admin_v2.types.table import SchemaBundle from google.cloud.bigtable_admin_v2.types.table import Snapshot from google.cloud.bigtable_admin_v2.types.table import Table from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType @@ -348,12 +374,15 @@ "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", + "CreateSchemaBundleMetadata", + "CreateSchemaBundleRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", "DataBoostReadLocalWrites", "DeleteAuthorizedViewRequest", "DeleteBackupRequest", + "DeleteSchemaBundleRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", @@ -361,12 +390,15 @@ "GenerateConsistencyTokenResponse", "GetAuthorizedViewRequest", "GetBackupRequest", + "GetSchemaBundleRequest", "GetSnapshotRequest", "GetTableRequest", "ListAuthorizedViewsRequest", "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", "ListTablesRequest", @@ -383,6 +415,8 @@ "UpdateAuthorizedViewMetadata", "UpdateAuthorizedViewRequest", "UpdateBackupRequest", + "UpdateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", "UpdateTableMetadata", "UpdateTableRequest", "OperationProgress", @@ -402,7 +436,9 @@ "ColumnFamily", "EncryptionInfo", "GcRule", + "ProtoSchema", "RestoreInfo", + "SchemaBundle", "Snapshot", "Table", "RestoreSourceType", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 4ee0cc6b176a..13f1c2670a4e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -73,12 +73,15 @@ from .types.bigtable_table_admin import CreateAuthorizedViewRequest from .types.bigtable_table_admin import CreateBackupMetadata from .types.bigtable_table_admin import CreateBackupRequest +from .types.bigtable_table_admin import CreateSchemaBundleMetadata +from .types.bigtable_table_admin import CreateSchemaBundleRequest from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata from .types.bigtable_table_admin import CreateTableFromSnapshotRequest from .types.bigtable_table_admin import CreateTableRequest from .types.bigtable_table_admin import DataBoostReadLocalWrites from .types.bigtable_table_admin import DeleteAuthorizedViewRequest from .types.bigtable_table_admin import DeleteBackupRequest +from .types.bigtable_table_admin import DeleteSchemaBundleRequest from .types.bigtable_table_admin import DeleteSnapshotRequest from .types.bigtable_table_admin import DeleteTableRequest from .types.bigtable_table_admin import DropRowRangeRequest @@ -86,12 +89,15 @@ from .types.bigtable_table_admin import GenerateConsistencyTokenResponse from .types.bigtable_table_admin import GetAuthorizedViewRequest from .types.bigtable_table_admin import GetBackupRequest +from .types.bigtable_table_admin import GetSchemaBundleRequest from .types.bigtable_table_admin import GetSnapshotRequest from .types.bigtable_table_admin import GetTableRequest from .types.bigtable_table_admin import ListAuthorizedViewsRequest from .types.bigtable_table_admin import ListAuthorizedViewsResponse from .types.bigtable_table_admin import ListBackupsRequest from .types.bigtable_table_admin import ListBackupsResponse +from .types.bigtable_table_admin import ListSchemaBundlesRequest +from .types.bigtable_table_admin import ListSchemaBundlesResponse from .types.bigtable_table_admin import ListSnapshotsRequest from .types.bigtable_table_admin import ListSnapshotsResponse from .types.bigtable_table_admin import ListTablesRequest @@ -108,6 +114,8 @@ from .types.bigtable_table_admin import UpdateAuthorizedViewMetadata from .types.bigtable_table_admin import UpdateAuthorizedViewRequest from .types.bigtable_table_admin import UpdateBackupRequest +from .types.bigtable_table_admin import UpdateSchemaBundleMetadata +from .types.bigtable_table_admin import UpdateSchemaBundleRequest from .types.bigtable_table_admin import UpdateTableMetadata from .types.bigtable_table_admin import UpdateTableRequest from .types.common import OperationProgress @@ -127,7 +135,9 @@ from .types.table import ColumnFamily from .types.table import EncryptionInfo from .types.table import GcRule +from .types.table import ProtoSchema from .types.table import RestoreInfo +from .types.table import SchemaBundle from .types.table import Snapshot from .types.table import Table from .types.table import RestoreSourceType @@ -164,6 +174,8 @@ "CreateLogicalViewRequest", "CreateMaterializedViewMetadata", "CreateMaterializedViewRequest", + "CreateSchemaBundleMetadata", + "CreateSchemaBundleRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", @@ -175,6 +187,7 @@ "DeleteInstanceRequest", "DeleteLogicalViewRequest", "DeleteMaterializedViewRequest", + "DeleteSchemaBundleRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", @@ -189,6 +202,7 @@ "GetInstanceRequest", "GetLogicalViewRequest", "GetMaterializedViewRequest", + "GetSchemaBundleRequest", "GetSnapshotRequest", "GetTableRequest", "HotTablet", @@ -209,6 +223,8 @@ "ListLogicalViewsResponse", "ListMaterializedViewsRequest", "ListMaterializedViewsResponse", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", "ListTablesRequest", @@ -221,10 +237,12 @@ "PartialUpdateClusterMetadata", "PartialUpdateClusterRequest", "PartialUpdateInstanceRequest", + "ProtoSchema", "RestoreInfo", "RestoreSourceType", "RestoreTableMetadata", "RestoreTableRequest", + "SchemaBundle", "Snapshot", "SnapshotTableMetadata", "SnapshotTableRequest", @@ -245,6 +263,8 @@ "UpdateLogicalViewRequest", "UpdateMaterializedViewMetadata", "UpdateMaterializedViewRequest", + "UpdateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", "UpdateTableMetadata", "UpdateTableRequest", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index c56fde6e7dba..19918190fa43 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -514,6 +514,11 @@ "create_backup" ] }, + "CreateSchemaBundle": { + "methods": [ + "create_schema_bundle" + ] + }, "CreateTable": { "methods": [ "create_table" @@ -534,6 +539,11 @@ "delete_backup" ] }, + "DeleteSchemaBundle": { + "methods": [ + "delete_schema_bundle" + ] + }, "DeleteSnapshot": { "methods": [ "delete_snapshot" @@ -569,6 +579,11 @@ "get_iam_policy" ] }, + "GetSchemaBundle": { + "methods": [ + "get_schema_bundle" + ] + }, "GetSnapshot": { "methods": [ "get_snapshot" @@ -589,6 +604,11 @@ "list_backups" ] }, + "ListSchemaBundles": { + "methods": [ + "list_schema_bundles" + ] + }, "ListSnapshots": { "methods": [ "list_snapshots" @@ -639,6 +659,11 @@ "update_backup" ] }, + "UpdateSchemaBundle": { + "methods": [ + "update_schema_bundle" + ] + }, "UpdateTable": { "methods": [ "update_table" @@ -669,6 +694,11 @@ "create_backup" ] }, + "CreateSchemaBundle": { + "methods": [ + "create_schema_bundle" + ] + }, "CreateTable": { "methods": [ "create_table" @@ -689,6 +719,11 @@ "delete_backup" ] }, + "DeleteSchemaBundle": { + "methods": [ + "delete_schema_bundle" + ] + }, "DeleteSnapshot": { "methods": [ "delete_snapshot" @@ -724,6 +759,11 @@ "get_iam_policy" ] }, + "GetSchemaBundle": { + "methods": [ + "get_schema_bundle" + ] + }, "GetSnapshot": { "methods": [ "get_snapshot" @@ -744,6 +784,11 @@ "list_backups" ] }, + "ListSchemaBundles": { + "methods": [ + "list_schema_bundles" + ] + }, "ListSnapshots": { "methods": [ "list_snapshots" @@ -794,6 +839,11 @@ "update_backup" ] }, + "UpdateSchemaBundle": { + "methods": [ + "update_schema_bundle" + ] + }, "UpdateTable": { "methods": [ "update_table" @@ -824,6 +874,11 @@ "create_backup" ] }, + "CreateSchemaBundle": { + "methods": [ + "create_schema_bundle" + ] + }, "CreateTable": { "methods": [ "create_table" @@ -844,6 +899,11 @@ "delete_backup" ] }, + "DeleteSchemaBundle": { + "methods": [ + "delete_schema_bundle" + ] + }, "DeleteSnapshot": { "methods": [ "delete_snapshot" @@ -879,6 +939,11 @@ "get_iam_policy" ] }, + "GetSchemaBundle": { + "methods": [ + "get_schema_bundle" + ] + }, "GetSnapshot": { "methods": [ "get_snapshot" @@ -899,6 +964,11 @@ "list_backups" ] }, + "ListSchemaBundles": { + "methods": [ + "list_schema_bundles" + ] + }, "ListSnapshots": { "methods": [ "list_snapshots" @@ -949,6 +1019,11 @@ "update_backup" ] }, + "UpdateSchemaBundle": { + "methods": [ + "update_schema_bundle" + ] + }, "UpdateTable": { "methods": [ "update_table" diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 1bf544db68eb..ba25264dd031 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -103,6 +103,10 @@ class BigtableTableAdminAsyncClient: ) instance_path = staticmethod(BigtableTableAdminClient.instance_path) parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) + schema_bundle_path = staticmethod(BigtableTableAdminClient.schema_bundle_path) + parse_schema_bundle_path = staticmethod( + BigtableTableAdminClient.parse_schema_bundle_path + ) snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) table_path = staticmethod(BigtableTableAdminClient.table_path) @@ -1396,8 +1400,8 @@ async def update_authorized_view( authorized_view (:class:`google.cloud.bigtable_admin_v2.types.AuthorizedView`): Required. The AuthorizedView to update. The ``name`` in ``authorized_view`` is used to identify the - AuthorizedView. AuthorizedView name must in this format - projects//instances//tables//authorizedViews/ + AuthorizedView. AuthorizedView name must in this format: + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. This corresponds to the ``authorized_view`` field on the ``request`` instance; if ``request`` is provided, this @@ -3148,7 +3152,7 @@ async def get_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Table or Backup + r"""Gets the access control policy for a Bigtable resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -3261,7 +3265,7 @@ async def set_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Table or Backup + r"""Sets the access control policy on a Bigtable resource. Replaces any existing policy. Args: @@ -3375,7 +3379,7 @@ async def test_iam_permissions( metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the - specified Table or Backup resource. + specified Bigtable resource. Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): @@ -3458,6 +3462,511 @@ async def test_iam_permissions( # Done; return the response. return response + async def create_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.CreateSchemaBundleRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + schema_bundle_id: Optional[str] = None, + schema_bundle: Optional[table.SchemaBundle] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new schema bundle in the specified table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]]): + The request object. The request for + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + parent (:class:`str`): + Required. The parent resource where this schema bundle + will be created. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle_id (:class:`str`): + Required. The unique ID to use for + the schema bundle, which will become the + final component of the schema bundle's + resource name. + + This corresponds to the ``schema_bundle_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`): + Required. The schema bundle to + create. + + This corresponds to the ``schema_bundle`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, schema_bundle_id, schema_bundle] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest): + request = bigtable_table_admin.CreateSchemaBundleRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if schema_bundle_id is not None: + request.schema_bundle_id = schema_bundle_id + if schema_bundle is not None: + request.schema_bundle = schema_bundle + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.create_schema_bundle + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.SchemaBundle, + metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata, + ) + + # Done; return the response. + return response + + async def update_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict] + ] = None, + *, + schema_bundle: Optional[table.SchemaBundle] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a schema bundle in the specified table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]]): + The request object. The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + schema_bundle (:class:`google.cloud.bigtable_admin_v2.types.SchemaBundle`): + Required. The schema bundle to update. + + The schema bundle's ``name`` field is used to identify + the schema bundle to update. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``schema_bundle`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [schema_bundle, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest): + request = bigtable_table_admin.UpdateSchemaBundleRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if schema_bundle is not None: + request.schema_bundle = schema_bundle + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.update_schema_bundle + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("schema_bundle.name", request.schema_bundle.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.SchemaBundle, + metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata, + ) + + # Done; return the response. + return response + + async def get_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.GetSchemaBundleRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.SchemaBundle: + r"""Gets metadata information about the specified schema + bundle. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]]): + The request object. The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + name (:class:`str`): + Required. The unique name of the schema bundle to + retrieve. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.SchemaBundle: + A named collection of related + schemas. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest): + request = bigtable_table_admin.GetSchemaBundleRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.get_schema_bundle + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_schema_bundles( + self, + request: Optional[ + Union[bigtable_table_admin.ListSchemaBundlesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSchemaBundlesAsyncPager: + r"""Lists all schema bundles associated with the + specified table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]]): + The request object. The request for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + parent (:class:`str`): + Required. The parent, which owns this collection of + schema bundles. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager: + The response for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest): + request = bigtable_table_admin.ListSchemaBundlesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.list_schema_bundles + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSchemaBundlesAsyncPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a schema bundle in the specified table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]]): + The request object. The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + name (:class:`str`): + Required. The unique name of the schema bundle to + delete. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest): + request = bigtable_table_admin.DeleteSchemaBundleRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.delete_schema_bundle + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + async def __aenter__(self) -> "BigtableTableAdminAsyncClient": return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index abb82b1ed766..812a9366ab33 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -322,6 +322,30 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def schema_bundle_path( + project: str, + instance: str, + table: str, + schema_bundle: str, + ) -> str: + """Returns a fully-qualified schema_bundle string.""" + return "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format( + project=project, + instance=instance, + table=table, + schema_bundle=schema_bundle, + ) + + @staticmethod + def parse_schema_bundle_path(path: str) -> Dict[str, str]: + """Parses a schema_bundle path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)/schemaBundles/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def snapshot_path( project: str, @@ -1910,8 +1934,8 @@ def update_authorized_view( authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): Required. The AuthorizedView to update. The ``name`` in ``authorized_view`` is used to identify the - AuthorizedView. AuthorizedView name must in this format - projects//instances//tables//authorizedViews/ + AuthorizedView. AuthorizedView name must in this format: + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. This corresponds to the ``authorized_view`` field on the ``request`` instance; if ``request`` is provided, this @@ -3615,7 +3639,7 @@ def get_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Table or Backup + r"""Gets the access control policy for a Bigtable resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -3729,7 +3753,7 @@ def set_iam_policy( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Table or Backup + r"""Sets the access control policy on a Bigtable resource. Replaces any existing policy. Args: @@ -3844,7 +3868,7 @@ def test_iam_permissions( metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the - specified Table or Backup resource. + specified Bigtable resource. Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): @@ -3928,6 +3952,496 @@ def test_iam_permissions( # Done; return the response. return response + def create_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.CreateSchemaBundleRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + schema_bundle_id: Optional[str] = None, + schema_bundle: Optional[table.SchemaBundle] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Creates a new schema bundle in the specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]): + The request object. The request for + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + parent (str): + Required. The parent resource where this schema bundle + will be created. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle_id (str): + Required. The unique ID to use for + the schema bundle, which will become the + final component of the schema bundle's + resource name. + + This corresponds to the ``schema_bundle_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to + create. + + This corresponds to the ``schema_bundle`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent, schema_bundle_id, schema_bundle] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.CreateSchemaBundleRequest): + request = bigtable_table_admin.CreateSchemaBundleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if schema_bundle_id is not None: + request.schema_bundle_id = schema_bundle_id + if schema_bundle is not None: + request.schema_bundle = schema_bundle + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_schema_bundle] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.SchemaBundle, + metadata_type=bigtable_table_admin.CreateSchemaBundleMetadata, + ) + + # Done; return the response. + return response + + def update_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.UpdateSchemaBundleRequest, dict] + ] = None, + *, + schema_bundle: Optional[table.SchemaBundle] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operation.Operation: + r"""Updates a schema bundle in the specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]): + The request object. The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to update. + + The schema bundle's ``name`` field is used to identify + the schema bundle to update. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``schema_bundle`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.SchemaBundle` + A named collection of related schemas. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [schema_bundle, update_mask] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.UpdateSchemaBundleRequest): + request = bigtable_table_admin.UpdateSchemaBundleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if schema_bundle is not None: + request.schema_bundle = schema_bundle + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_schema_bundle] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("schema_bundle.name", request.schema_bundle.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.SchemaBundle, + metadata_type=bigtable_table_admin.UpdateSchemaBundleMetadata, + ) + + # Done; return the response. + return response + + def get_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.GetSchemaBundleRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.SchemaBundle: + r"""Gets metadata information about the specified schema + bundle. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]): + The request object. The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + name (str): + Required. The unique name of the schema bundle to + retrieve. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.types.SchemaBundle: + A named collection of related + schemas. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.GetSchemaBundleRequest): + request = bigtable_table_admin.GetSchemaBundleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_schema_bundle] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_schema_bundles( + self, + request: Optional[ + Union[bigtable_table_admin.ListSchemaBundlesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> pagers.ListSchemaBundlesPager: + r"""Lists all schema bundles associated with the + specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]): + The request object. The request for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + parent (str): + Required. The parent, which owns this collection of + schema bundles. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager: + The response for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [parent] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.ListSchemaBundlesRequest): + request = bigtable_table_admin.ListSchemaBundlesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_schema_bundles] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSchemaBundlesPager( + method=rpc, + request=request, + response=response, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_schema_bundle( + self, + request: Optional[ + Union[bigtable_table_admin.DeleteSchemaBundleRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> None: + r"""Deletes a schema bundle in the specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]): + The request object. The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + name (str): + Required. The unique name of the schema bundle to + delete. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, bigtable_table_admin.DeleteSchemaBundleRequest): + request = bigtable_table_admin.DeleteSchemaBundleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_schema_bundle] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + def __enter__(self) -> "BigtableTableAdminClient": return self diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 8b1ffba34039..e6d83ba63a0e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -667,3 +667,163 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSchemaBundlesPager: + """A pager for iterating through ``list_schema_bundles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``schema_bundles`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSchemaBundles`` requests and continue to iterate + through the ``schema_bundles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., bigtable_table_admin.ListSchemaBundlesResponse], + request: bigtable_table_admin.ListSchemaBundlesRequest, + response: bigtable_table_admin.ListSchemaBundlesResponse, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse): + The initial response object. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_table_admin.ListSchemaBundlesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_table_admin.ListSchemaBundlesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __iter__(self) -> Iterator[table.SchemaBundle]: + for page in self.pages: + yield from page.schema_bundles + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListSchemaBundlesAsyncPager: + """A pager for iterating through ``list_schema_bundles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``schema_bundles`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSchemaBundles`` requests and continue to iterate + through the ``schema_bundles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[bigtable_table_admin.ListSchemaBundlesResponse] + ], + request: bigtable_table_admin.ListSchemaBundlesRequest, + response: bigtable_table_admin.ListSchemaBundlesResponse, + *, + retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSchemaBundlesResponse): + The initial response object. + retry (google.api_core.retry.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + self._method = method + self._request = bigtable_table_admin.ListSchemaBundlesRequest(request) + self._response = response + self._retry = retry + self._timeout = timeout + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[bigtable_table_admin.ListSchemaBundlesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method( + self._request, + retry=self._retry, + timeout=self._timeout, + metadata=self._metadata, + ) + yield self._response + + def __aiter__(self) -> AsyncIterator[table.SchemaBundle]: + async def async_generator(): + async for page in self.pages: + for response in page.schema_bundles: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 9a549b7ca52f..8e2cb7304429 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -397,6 +397,31 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.create_schema_bundle: gapic_v1.method.wrap_method( + self.create_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.update_schema_bundle: gapic_v1.method.wrap_method( + self.update_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.get_schema_bundle: gapic_v1.method.wrap_method( + self.get_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.list_schema_bundles: gapic_v1.method.wrap_method( + self.list_schema_bundles, + default_timeout=None, + client_info=client_info, + ), + self.delete_schema_bundle: gapic_v1.method.wrap_method( + self.delete_schema_bundle, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -704,6 +729,54 @@ def test_iam_permissions( ]: raise NotImplementedError() + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.GetSchemaBundleRequest], + Union[table.SchemaBundle, Awaitable[table.SchemaBundle]], + ]: + raise NotImplementedError() + + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + Union[ + bigtable_table_admin.ListSchemaBundlesResponse, + Awaitable[bigtable_table_admin.ListSchemaBundlesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteSchemaBundleRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index b18f131335c1..5f46c3aa3929 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1166,7 +1166,7 @@ def get_iam_policy( ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the get iam policy method over gRPC. - Gets the access control policy for a Table or Backup + Gets the access control policy for a Bigtable resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -1194,7 +1194,7 @@ def set_iam_policy( ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: r"""Return a callable for the set iam policy method over gRPC. - Sets the access control policy on a Table or Backup + Sets the access control policy on a Bigtable resource. Replaces any existing policy. Returns: @@ -1225,7 +1225,7 @@ def test_iam_permissions( r"""Return a callable for the test iam permissions method over gRPC. Returns permissions that the caller has on the - specified Table or Backup resource. + specified Bigtable resource. Returns: Callable[[~.TestIamPermissionsRequest], @@ -1245,6 +1245,145 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create schema bundle method over gRPC. + + Creates a new schema bundle in the specified table. + + Returns: + Callable[[~.CreateSchemaBundleRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_schema_bundle" not in self._stubs: + self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle", + request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_schema_bundle"] + + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation + ]: + r"""Return a callable for the update schema bundle method over gRPC. + + Updates a schema bundle in the specified table. + + Returns: + Callable[[~.UpdateSchemaBundleRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_schema_bundle" not in self._stubs: + self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle", + request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_schema_bundle"] + + @property + def get_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]: + r"""Return a callable for the get schema bundle method over gRPC. + + Gets metadata information about the specified schema + bundle. + + Returns: + Callable[[~.GetSchemaBundleRequest], + ~.SchemaBundle]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_schema_bundle" not in self._stubs: + self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle", + request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize, + response_deserializer=table.SchemaBundle.deserialize, + ) + return self._stubs["get_schema_bundle"] + + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + bigtable_table_admin.ListSchemaBundlesResponse, + ]: + r"""Return a callable for the list schema bundles method over gRPC. + + Lists all schema bundles associated with the + specified table. + + Returns: + Callable[[~.ListSchemaBundlesRequest], + ~.ListSchemaBundlesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_schema_bundles" not in self._stubs: + self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles", + request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize, + response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize, + ) + return self._stubs["list_schema_bundles"] + + @property + def delete_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]: + r"""Return a callable for the delete schema bundle method over gRPC. + + Deletes a schema bundle in the specified table. + + Returns: + Callable[[~.DeleteSchemaBundleRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_schema_bundle" not in self._stubs: + self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle", + request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_schema_bundle"] + def close(self): self._logged_channel.close() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 8b08cbe8ce90..159a96edae02 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1199,7 +1199,7 @@ def get_iam_policy( ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the get iam policy method over gRPC. - Gets the access control policy for a Table or Backup + Gets the access control policy for a Bigtable resource. Returns an empty policy if the resource exists but does not have a policy set. @@ -1227,7 +1227,7 @@ def set_iam_policy( ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]: r"""Return a callable for the set iam policy method over gRPC. - Sets the access control policy on a Table or Backup + Sets the access control policy on a Bigtable resource. Replaces any existing policy. Returns: @@ -1258,7 +1258,7 @@ def test_iam_permissions( r"""Return a callable for the test iam permissions method over gRPC. Returns permissions that the caller has on the - specified Table or Backup resource. + specified Bigtable resource. Returns: Callable[[~.TestIamPermissionsRequest], @@ -1278,6 +1278,151 @@ def test_iam_permissions( ) return self._stubs["test_iam_permissions"] + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create schema bundle method over gRPC. + + Creates a new schema bundle in the specified table. + + Returns: + Callable[[~.CreateSchemaBundleRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_schema_bundle" not in self._stubs: + self._stubs["create_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CreateSchemaBundle", + request_serializer=bigtable_table_admin.CreateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_schema_bundle"] + + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update schema bundle method over gRPC. + + Updates a schema bundle in the specified table. + + Returns: + Callable[[~.UpdateSchemaBundleRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_schema_bundle" not in self._stubs: + self._stubs["update_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateSchemaBundle", + request_serializer=bigtable_table_admin.UpdateSchemaBundleRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_schema_bundle"] + + @property + def get_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.GetSchemaBundleRequest], Awaitable[table.SchemaBundle] + ]: + r"""Return a callable for the get schema bundle method over gRPC. + + Gets metadata information about the specified schema + bundle. + + Returns: + Callable[[~.GetSchemaBundleRequest], + Awaitable[~.SchemaBundle]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_schema_bundle" not in self._stubs: + self._stubs["get_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/GetSchemaBundle", + request_serializer=bigtable_table_admin.GetSchemaBundleRequest.serialize, + response_deserializer=table.SchemaBundle.deserialize, + ) + return self._stubs["get_schema_bundle"] + + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + Awaitable[bigtable_table_admin.ListSchemaBundlesResponse], + ]: + r"""Return a callable for the list schema bundles method over gRPC. + + Lists all schema bundles associated with the + specified table. + + Returns: + Callable[[~.ListSchemaBundlesRequest], + Awaitable[~.ListSchemaBundlesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_schema_bundles" not in self._stubs: + self._stubs["list_schema_bundles"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/ListSchemaBundles", + request_serializer=bigtable_table_admin.ListSchemaBundlesRequest.serialize, + response_deserializer=bigtable_table_admin.ListSchemaBundlesResponse.deserialize, + ) + return self._stubs["list_schema_bundles"] + + @property + def delete_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.DeleteSchemaBundleRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the delete schema bundle method over gRPC. + + Deletes a schema bundle in the specified table. + + Returns: + Callable[[~.DeleteSchemaBundleRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_schema_bundle" not in self._stubs: + self._stubs["delete_schema_bundle"] = self._logged_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSchemaBundle", + request_serializer=bigtable_table_admin.DeleteSchemaBundleRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["delete_schema_bundle"] + def _prep_wrapped_messages(self, client_info): """Precompute the wrapped methods, overriding the base class method to use async wrappers.""" self._wrapped_methods = { @@ -1531,6 +1676,31 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.create_schema_bundle: self._wrap_method( + self.create_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.update_schema_bundle: self._wrap_method( + self.update_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.get_schema_bundle: self._wrap_method( + self.get_schema_bundle, + default_timeout=None, + client_info=client_info, + ), + self.list_schema_bundles: self._wrap_method( + self.list_schema_bundles, + default_timeout=None, + client_info=client_info, + ), + self.delete_schema_bundle: self._wrap_method( + self.delete_schema_bundle, + default_timeout=None, + client_info=client_info, + ), } def _wrap_method(self, func, *args, **kwargs): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index fd9445161863..adf448f823d4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -117,6 +117,14 @@ def post_create_backup(self, response): logging.log(f"Received response: {response}") return response + def pre_create_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_schema_bundle(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_table(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -141,6 +149,10 @@ def pre_delete_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata + def pre_delete_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + def pre_delete_snapshot(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -185,6 +197,14 @@ def post_get_iam_policy(self, response): logging.log(f"Received response: {response}") return response + def pre_get_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_schema_bundle(self, response): + logging.log(f"Received response: {response}") + return response + def pre_get_snapshot(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -217,6 +237,14 @@ def post_list_backups(self, response): logging.log(f"Received response: {response}") return response + def pre_list_schema_bundles(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_schema_bundles(self, response): + logging.log(f"Received response: {response}") + return response + def pre_list_snapshots(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -297,6 +325,14 @@ def post_update_backup(self, response): logging.log(f"Received response: {response}") return response + def pre_update_schema_bundle(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_schema_bundle(self, response): + logging.log(f"Received response: {response}") + return response + def pre_update_table(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -509,6 +545,55 @@ def post_create_backup_with_metadata( """ return response, metadata + def pre_create_schema_bundle( + self, + request: bigtable_table_admin.CreateSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for create_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_schema_bundle( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_schema_bundle + + DEPRECATED. Please use the `post_create_schema_bundle_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_create_schema_bundle` interceptor runs + before the `post_create_schema_bundle_with_metadata` interceptor. + """ + return response + + def post_create_schema_bundle_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_schema_bundle + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_create_schema_bundle_with_metadata` + interceptor in new development instead of the `post_create_schema_bundle` interceptor. + When both interceptors are used, this `post_create_schema_bundle_with_metadata` interceptor runs after the + `post_create_schema_bundle` interceptor. The (possibly modified) response returned by + `post_create_schema_bundle` will be passed to + `post_create_schema_bundle_with_metadata`. + """ + return response, metadata + def pre_create_table( self, request: bigtable_table_admin.CreateTableRequest, @@ -634,6 +719,21 @@ def pre_delete_backup( """ return request, metadata + def pre_delete_schema_bundle( + self, + request: bigtable_table_admin.DeleteSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for delete_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + def pre_delete_snapshot( self, request: bigtable_table_admin.DeleteSnapshotRequest, @@ -869,6 +969,55 @@ def post_get_iam_policy_with_metadata( """ return response, metadata + def pre_get_schema_bundle( + self, + request: bigtable_table_admin.GetSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for get_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_schema_bundle( + self, response: table.SchemaBundle + ) -> table.SchemaBundle: + """Post-rpc interceptor for get_schema_bundle + + DEPRECATED. Please use the `post_get_schema_bundle_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_get_schema_bundle` interceptor runs + before the `post_get_schema_bundle_with_metadata` interceptor. + """ + return response + + def post_get_schema_bundle_with_metadata( + self, + response: table.SchemaBundle, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[table.SchemaBundle, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_schema_bundle + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_get_schema_bundle_with_metadata` + interceptor in new development instead of the `post_get_schema_bundle` interceptor. + When both interceptors are used, this `post_get_schema_bundle_with_metadata` interceptor runs after the + `post_get_schema_bundle` interceptor. The (possibly modified) response returned by + `post_get_schema_bundle` will be passed to + `post_get_schema_bundle_with_metadata`. + """ + return response, metadata + def pre_get_snapshot( self, request: bigtable_table_admin.GetSnapshotRequest, @@ -1062,6 +1211,58 @@ def post_list_backups_with_metadata( """ return response, metadata + def pre_list_schema_bundles( + self, + request: bigtable_table_admin.ListSchemaBundlesRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSchemaBundlesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for list_schema_bundles + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_schema_bundles( + self, response: bigtable_table_admin.ListSchemaBundlesResponse + ) -> bigtable_table_admin.ListSchemaBundlesResponse: + """Post-rpc interceptor for list_schema_bundles + + DEPRECATED. Please use the `post_list_schema_bundles_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_list_schema_bundles` interceptor runs + before the `post_list_schema_bundles_with_metadata` interceptor. + """ + return response + + def post_list_schema_bundles_with_metadata( + self, + response: bigtable_table_admin.ListSchemaBundlesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSchemaBundlesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_schema_bundles + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_list_schema_bundles_with_metadata` + interceptor in new development instead of the `post_list_schema_bundles` interceptor. + When both interceptors are used, this `post_list_schema_bundles_with_metadata` interceptor runs after the + `post_list_schema_bundles` interceptor. The (possibly modified) response returned by + `post_list_schema_bundles` will be passed to + `post_list_schema_bundles_with_metadata`. + """ + return response, metadata + def pre_list_snapshots( self, request: bigtable_table_admin.ListSnapshotsRequest, @@ -1548,6 +1749,55 @@ def post_update_backup_with_metadata( """ return response, metadata + def pre_update_schema_bundle( + self, + request: bigtable_table_admin.UpdateSchemaBundleRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateSchemaBundleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for update_schema_bundle + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_schema_bundle( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_schema_bundle + + DEPRECATED. Please use the `post_update_schema_bundle_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. This `post_update_schema_bundle` interceptor runs + before the `post_update_schema_bundle_with_metadata` interceptor. + """ + return response + + def post_update_schema_bundle_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_schema_bundle + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the BigtableTableAdmin server but before it is returned to user code. + + We recommend only using this `post_update_schema_bundle_with_metadata` + interceptor in new development instead of the `post_update_schema_bundle` interceptor. + When both interceptors are used, this `post_update_schema_bundle_with_metadata` interceptor runs after the + `post_update_schema_bundle` interceptor. The (possibly modified) response returned by + `post_update_schema_bundle` will be passed to + `post_update_schema_bundle_with_metadata`. + """ + return response, metadata + def pre_update_table( self, request: bigtable_table_admin.UpdateTableRequest, @@ -2360,12 +2610,12 @@ def __call__( ) return resp - class _CreateTable( - _BaseBigtableTableAdminRestTransport._BaseCreateTable, + class _CreateSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle, BigtableTableAdminRestStub, ): def __hash__(self): - return hash("BigtableTableAdminRestTransport.CreateTable") + return hash("BigtableTableAdminRestTransport.CreateSchemaBundle") @staticmethod def _get_response( @@ -2392,18 +2642,18 @@ def _get_response( def __call__( self, - request: bigtable_table_admin.CreateTableRequest, + request: bigtable_table_admin.CreateSchemaBundleRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> gba_table.Table: - r"""Call the create table method over HTTP. + ) -> operations_pb2.Operation: + r"""Call the create schema bundle method over HTTP. Args: - request (~.bigtable_table_admin.CreateTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + request (~.bigtable_table_admin.CreateSchemaBundleRequest): + The request object. The request for + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2413,29 +2663,30 @@ def __call__( be of type `bytes`. Returns: - ~.gba_table.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. """ http_options = ( - _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options() + _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_http_options() ) - request, metadata = self._interceptor.pre_create_table(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request( + request, metadata = self._interceptor.pre_create_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_transcoded_request( http_options, request ) - body = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_request_body_json( + body = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_request_body_json( transcoded_request ) # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_query_params_json( + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_query_params_json( transcoded_request ) @@ -2447,7 +2698,7 @@ def __call__( ) method = transcoded_request["method"] try: - request_payload = type(request).to_json(request) + request_payload = json_format.MessageToJson(request) except: request_payload = None http_request = { @@ -2457,24 +2708,26 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTable", + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateSchemaBundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateTable", + "rpcName": "CreateSchemaBundle", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = BigtableTableAdminRestTransport._CreateTable._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - body, + response = ( + BigtableTableAdminRestTransport._CreateSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -2483,21 +2736,19 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = gba_table.Table() - pb_resp = gba_table.Table.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_table(resp) + resp = self._interceptor.post_create_schema_bundle(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_create_table_with_metadata( + resp, _ = self._interceptor.post_create_schema_bundle_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = gba_table.Table.to_json(response) + response_payload = json_format.MessageToJson(resp) except: response_payload = None http_response = { @@ -2506,22 +2757,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table", + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_schema_bundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "CreateTable", + "rpcName": "CreateSchemaBundle", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _CreateTableFromSnapshot( - _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot, + class _CreateTable( + _BaseBigtableTableAdminRestTransport._BaseCreateTable, BigtableTableAdminRestStub, ): def __hash__(self): - return hash("BigtableTableAdminRestTransport.CreateTableFromSnapshot") + return hash("BigtableTableAdminRestTransport.CreateTable") @staticmethod def _get_response( @@ -2548,27 +2799,183 @@ def _get_response( def __call__( self, - request: bigtable_table_admin.CreateTableFromSnapshotRequest, + request: bigtable_table_admin.CreateTableRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> operations_pb2.Operation: - r"""Call the create table from - snapshot method over HTTP. + ) -> gba_table.Table: + r"""Call the create table method over HTTP. - Args: - request (~.bigtable_table_admin.CreateTableFromSnapshotRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Args: + request (~.bigtable_table_admin.CreateTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + Returns: + ~.gba_table.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options() + ) + + request, metadata = self._interceptor.pre_create_table(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._CreateTable._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gba_table.Table() + pb_resp = gba_table.Table.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_create_table(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_table_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gba_table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _CreateTableFromSnapshot( + _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.CreateTableFromSnapshot") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the create table from + snapshot method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateTableFromSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be @@ -2901,6 +3308,118 @@ def __call__( if response.status_code >= 400: raise core_exceptions.from_http_response(response) + class _DeleteSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.DeleteSchemaBundle") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.DeleteSchemaBundleRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ): + r"""Call the delete schema bundle method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteSchemaBundleRequest): + The request object. The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_http_options() + ) + + request, metadata = self._interceptor.pre_delete_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteSchemaBundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteSchemaBundle", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableTableAdminRestTransport._DeleteSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + class _DeleteSnapshot( _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot, BigtableTableAdminRestStub, @@ -3926,7 +4445,157 @@ def __call__( ) return resp - class _GetSnapshot( + class _GetSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.GetSchemaBundle") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.GetSchemaBundleRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> table.SchemaBundle: + r"""Call the get schema bundle method over HTTP. + + Args: + request (~.bigtable_table_admin.GetSchemaBundleRequest): + The request object. The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.table.SchemaBundle: + A named collection of related + schemas. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_http_options() + ) + + request, metadata = self._interceptor.pre_get_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetSchemaBundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSchemaBundle", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = BigtableTableAdminRestTransport._GetSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.SchemaBundle() + pb_resp = table.SchemaBundle.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_get_schema_bundle(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_schema_bundle_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.SchemaBundle.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_schema_bundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSchemaBundle", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _GetSnapshot( _BaseBigtableTableAdminRestTransport._BaseGetSnapshot, BigtableTableAdminRestStub, ): @@ -4293,25 +4962,179 @@ def __call__( be of type `bytes`. Returns: - ~.bigtable_table_admin.ListAuthorizedViewsResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + ~.bigtable_table_admin.ListAuthorizedViewsResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews][google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews] + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options() + ) + + request, metadata = self._interceptor.pre_list_authorized_views( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_transcoded_request( + http_options, request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListAuthorizedViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableTableAdminRestTransport._ListAuthorizedViews._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListAuthorizedViewsResponse() + pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_list_authorized_views(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_authorized_views_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_authorized_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + + class _ListBackups( + _BaseBigtableTableAdminRestTransport._BaseListBackups, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.ListBackups") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + return response + + def __call__( + self, + request: bigtable_table_admin.ListBackupsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bigtable_table_admin.ListBackupsResponse: + r"""Call the list backups method over HTTP. + + Args: + request (~.bigtable_table_admin.ListBackupsRequest): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.bigtable_table_admin.ListBackupsResponse: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. """ http_options = ( - _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options() + _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options() ) - request, metadata = self._interceptor.pre_list_authorized_views( - request, metadata - ) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_transcoded_request( + request, metadata = self._interceptor.pre_list_backups(request, metadata) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_query_params_json( + query_params = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_query_params_json( transcoded_request ) @@ -4333,25 +5156,23 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListAuthorizedViews", + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListBackups", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListAuthorizedViews", + "rpcName": "ListBackups", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = ( - BigtableTableAdminRestTransport._ListAuthorizedViews._get_response( - self._host, - metadata, - query_params, - self._session, - timeout, - transcoded_request, - ) + response = BigtableTableAdminRestTransport._ListBackups._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, ) # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception @@ -4360,24 +5181,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = bigtable_table_admin.ListAuthorizedViewsResponse() - pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp) + resp = bigtable_table_admin.ListBackupsResponse() + pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_authorized_views(resp) + resp = self._interceptor.post_list_backups(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_authorized_views_with_metadata( + resp, _ = self._interceptor.post_list_backups_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = ( - bigtable_table_admin.ListAuthorizedViewsResponse.to_json( - response - ) + response_payload = bigtable_table_admin.ListBackupsResponse.to_json( + response ) except: response_payload = None @@ -4387,22 +5206,22 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_authorized_views", + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_backups", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListAuthorizedViews", + "rpcName": "ListBackups", "metadata": http_response["headers"], "httpResponse": http_response, }, ) return resp - class _ListBackups( - _BaseBigtableTableAdminRestTransport._BaseListBackups, + class _ListSchemaBundles( + _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles, BigtableTableAdminRestStub, ): def __hash__(self): - return hash("BigtableTableAdminRestTransport.ListBackups") + return hash("BigtableTableAdminRestTransport.ListSchemaBundles") @staticmethod def _get_response( @@ -4428,18 +5247,18 @@ def _get_response( def __call__( self, - request: bigtable_table_admin.ListBackupsRequest, + request: bigtable_table_admin.ListSchemaBundlesRequest, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), - ) -> bigtable_table_admin.ListBackupsResponse: - r"""Call the list backups method over HTTP. + ) -> bigtable_table_admin.ListSchemaBundlesResponse: + r"""Call the list schema bundles method over HTTP. Args: - request (~.bigtable_table_admin.ListBackupsRequest): + request (~.bigtable_table_admin.ListSchemaBundlesRequest): The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -4449,23 +5268,25 @@ def __call__( be of type `bytes`. Returns: - ~.bigtable_table_admin.ListBackupsResponse: + ~.bigtable_table_admin.ListSchemaBundlesResponse: The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. """ http_options = ( - _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options() + _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_http_options() ) - request, metadata = self._interceptor.pre_list_backups(request, metadata) - transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request( + request, metadata = self._interceptor.pre_list_schema_bundles( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_transcoded_request( http_options, request ) # Jsonify the query params - query_params = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_query_params_json( + query_params = _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_query_params_json( transcoded_request ) @@ -4487,17 +5308,17 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListBackups", + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListSchemaBundles", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListBackups", + "rpcName": "ListSchemaBundles", "httpRequest": http_request, "metadata": http_request["headers"], }, ) # Send the request - response = BigtableTableAdminRestTransport._ListBackups._get_response( + response = BigtableTableAdminRestTransport._ListSchemaBundles._get_response( self._host, metadata, query_params, @@ -4512,22 +5333,22 @@ def __call__( raise core_exceptions.from_http_response(response) # Return the response - resp = bigtable_table_admin.ListBackupsResponse() - pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) + resp = bigtable_table_admin.ListSchemaBundlesResponse() + pb_resp = bigtable_table_admin.ListSchemaBundlesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_backups(resp) + resp = self._interceptor.post_list_schema_bundles(resp) response_metadata = [(k, str(v)) for k, v in response.headers.items()] - resp, _ = self._interceptor.post_list_backups_with_metadata( + resp, _ = self._interceptor.post_list_schema_bundles_with_metadata( resp, response_metadata ) if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( logging.DEBUG ): # pragma: NO COVER try: - response_payload = bigtable_table_admin.ListBackupsResponse.to_json( - response + response_payload = ( + bigtable_table_admin.ListSchemaBundlesResponse.to_json(response) ) except: response_payload = None @@ -4537,10 +5358,10 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_backups", + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_schema_bundles", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", - "rpcName": "ListBackups", + "rpcName": "ListSchemaBundles", "metadata": http_response["headers"], "httpResponse": http_response, }, @@ -6176,6 +6997,163 @@ def __call__( ) return resp + class _UpdateSchemaBundle( + _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle, + BigtableTableAdminRestStub, + ): + def __hash__(self): + return hash("BigtableTableAdminRestTransport.UpdateSchemaBundle") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: bigtable_table_admin.UpdateSchemaBundleRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> operations_pb2.Operation: + r"""Call the update schema bundle method over HTTP. + + Args: + request (~.bigtable_table_admin.UpdateSchemaBundleRequest): + The request object. The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options = ( + _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_http_options() + ) + + request, metadata = self._interceptor.pre_update_schema_bundle( + request, metadata + ) + transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_transcoded_request( + http_options, request + ) + + body = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateSchemaBundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateSchemaBundle", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + + # Send the request + response = ( + BigtableTableAdminRestTransport._UpdateSchemaBundle._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, + ) + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_update_schema_bundle(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_schema_bundle_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_schema_bundle", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateSchemaBundle", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp + class _UpdateTable( _BaseBigtableTableAdminRestTransport._BaseUpdateTable, BigtableTableAdminRestStub, @@ -6366,6 +7344,16 @@ def create_backup( # In C++ this would require a dynamic_cast return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def create_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.CreateSchemaBundleRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def create_table( self, @@ -6400,6 +7388,14 @@ def delete_backup( # In C++ this would require a dynamic_cast return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def delete_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.DeleteSchemaBundleRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def delete_snapshot( self, @@ -6461,6 +7457,14 @@ def get_iam_policy( # In C++ this would require a dynamic_cast return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + @property + def get_schema_bundle( + self, + ) -> Callable[[bigtable_table_admin.GetSchemaBundleRequest], table.SchemaBundle]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def get_snapshot( self, @@ -6499,6 +7503,17 @@ def list_backups( # In C++ this would require a dynamic_cast return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore + @property + def list_schema_bundles( + self, + ) -> Callable[ + [bigtable_table_admin.ListSchemaBundlesRequest], + bigtable_table_admin.ListSchemaBundlesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListSchemaBundles(self._session, self._host, self._interceptor) # type: ignore + @property def list_snapshots( self, @@ -6594,6 +7609,16 @@ def update_backup( # In C++ this would require a dynamic_cast return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore + @property + def update_schema_bundle( + self, + ) -> Callable[ + [bigtable_table_admin.UpdateSchemaBundleRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateSchemaBundle(self._session, self._host, self._interceptor) # type: ignore + @property def update_table( self, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py index add95bccac29..ef6c2374d2a2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest_base.py @@ -327,6 +327,65 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseCreateSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "schemaBundleId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles", + "body": "schema_bundle", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.CreateSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseCreateSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseCreateTable: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -535,6 +594,53 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseDeleteSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.DeleteSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseDeleteSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseDeleteSnapshot: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -866,6 +972,16 @@ def _get_http_options(): "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:getIamPolicy", + "body": "*", + }, ] return http_options @@ -901,6 +1017,53 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseGetSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.GetSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseGetSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseGetSnapshot: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1089,6 +1252,53 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseListSchemaBundles: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.ListSchemaBundlesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseListSchemaBundles._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseListSnapshots: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") @@ -1324,6 +1534,16 @@ def _get_http_options(): "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy", "body": "*", }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:setIamPolicy", + "body": "*", + }, ] return http_options @@ -1443,6 +1663,16 @@ def _get_http_options(): "uri": "/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions", "body": "*", }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/authorizedViews/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v2/{resource=projects/*/instances/*/tables/*/schemaBundles/*}:testIamPermissions", + "body": "*", + }, ] return http_options @@ -1651,6 +1881,63 @@ def _get_query_params_json(transcoded_request): query_params["$alt"] = "json;enum-encoding=int" return query_params + class _BaseUpdateSchemaBundle: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}", + "body": "schema_bundle", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = bigtable_table_admin.UpdateSchemaBundleRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseBigtableTableAdminRestTransport._BaseUpdateSchemaBundle._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseUpdateTable: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py index 26821e2a4290..e5deb36a1749 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/__init__.py @@ -66,12 +66,15 @@ CreateAuthorizedViewRequest, CreateBackupMetadata, CreateBackupRequest, + CreateSchemaBundleMetadata, + CreateSchemaBundleRequest, CreateTableFromSnapshotMetadata, CreateTableFromSnapshotRequest, CreateTableRequest, DataBoostReadLocalWrites, DeleteAuthorizedViewRequest, DeleteBackupRequest, + DeleteSchemaBundleRequest, DeleteSnapshotRequest, DeleteTableRequest, DropRowRangeRequest, @@ -79,12 +82,15 @@ GenerateConsistencyTokenResponse, GetAuthorizedViewRequest, GetBackupRequest, + GetSchemaBundleRequest, GetSnapshotRequest, GetTableRequest, ListAuthorizedViewsRequest, ListAuthorizedViewsResponse, ListBackupsRequest, ListBackupsResponse, + ListSchemaBundlesRequest, + ListSchemaBundlesResponse, ListSnapshotsRequest, ListSnapshotsResponse, ListTablesRequest, @@ -101,6 +107,8 @@ UpdateAuthorizedViewMetadata, UpdateAuthorizedViewRequest, UpdateBackupRequest, + UpdateSchemaBundleMetadata, + UpdateSchemaBundleRequest, UpdateTableMetadata, UpdateTableRequest, ) @@ -126,7 +134,9 @@ ColumnFamily, EncryptionInfo, GcRule, + ProtoSchema, RestoreInfo, + SchemaBundle, Snapshot, Table, RestoreSourceType, @@ -186,12 +196,15 @@ "CreateAuthorizedViewRequest", "CreateBackupMetadata", "CreateBackupRequest", + "CreateSchemaBundleMetadata", + "CreateSchemaBundleRequest", "CreateTableFromSnapshotMetadata", "CreateTableFromSnapshotRequest", "CreateTableRequest", "DataBoostReadLocalWrites", "DeleteAuthorizedViewRequest", "DeleteBackupRequest", + "DeleteSchemaBundleRequest", "DeleteSnapshotRequest", "DeleteTableRequest", "DropRowRangeRequest", @@ -199,12 +212,15 @@ "GenerateConsistencyTokenResponse", "GetAuthorizedViewRequest", "GetBackupRequest", + "GetSchemaBundleRequest", "GetSnapshotRequest", "GetTableRequest", "ListAuthorizedViewsRequest", "ListAuthorizedViewsResponse", "ListBackupsRequest", "ListBackupsResponse", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", "ListTablesRequest", @@ -221,6 +237,8 @@ "UpdateAuthorizedViewMetadata", "UpdateAuthorizedViewRequest", "UpdateBackupRequest", + "UpdateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", "UpdateTableMetadata", "UpdateTableRequest", "OperationProgress", @@ -240,7 +258,9 @@ "ColumnFamily", "EncryptionInfo", "GcRule", + "ProtoSchema", "RestoreInfo", + "SchemaBundle", "Snapshot", "Table", "RestoreSourceType", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 4cadfb1bf436..d6403fc2a72a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -74,6 +74,14 @@ "UpdateAuthorizedViewRequest", "UpdateAuthorizedViewMetadata", "DeleteAuthorizedViewRequest", + "CreateSchemaBundleRequest", + "CreateSchemaBundleMetadata", + "UpdateSchemaBundleRequest", + "UpdateSchemaBundleMetadata", + "GetSchemaBundleRequest", + "ListSchemaBundlesRequest", + "ListSchemaBundlesResponse", + "DeleteSchemaBundleRequest", }, ) @@ -1484,7 +1492,7 @@ class CreateAuthorizedViewMetadata(proto.Message): Attributes: original_request (google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest): The request that prompted the initiation of - this CreateInstance operation. + this CreateAuthorizedView operation. request_time (google.protobuf.timestamp_pb2.Timestamp): The time at which the original request was received. @@ -1536,7 +1544,7 @@ class ListAuthorizedViewsRequest(proto.Message): previous call. view (google.cloud.bigtable_admin_v2.types.AuthorizedView.ResponseView): Optional. The resource_view to be applied to the returned - views' fields. Default to NAME_ONLY. + AuthorizedViews' fields. Default to NAME_ONLY. """ parent: str = proto.Field( @@ -1620,8 +1628,8 @@ class UpdateAuthorizedViewRequest(proto.Message): authorized_view (google.cloud.bigtable_admin_v2.types.AuthorizedView): Required. The AuthorizedView to update. The ``name`` in ``authorized_view`` is used to identify the AuthorizedView. - AuthorizedView name must in this format - projects//instances//tables//authorizedViews/ + AuthorizedView name must in this format: + ``projects/{project}/instances/{instance}/tables/{table}/authorizedViews/{authorized_view}``. update_mask (google.protobuf.field_mask_pb2.FieldMask): Optional. The list of fields to update. A mask specifying which fields in the AuthorizedView resource should be @@ -1712,4 +1720,247 @@ class DeleteAuthorizedViewRequest(proto.Message): ) +class CreateSchemaBundleRequest(proto.Message): + r"""The request for + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + + Attributes: + parent (str): + Required. The parent resource where this schema bundle will + be created. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + schema_bundle_id (str): + Required. The unique ID to use for the schema + bundle, which will become the final component of + the schema bundle's resource name. + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + schema_bundle_id: str = proto.Field( + proto.STRING, + number=2, + ) + schema_bundle: gba_table.SchemaBundle = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.SchemaBundle, + ) + + +class CreateSchemaBundleMetadata(proto.Message): + r"""The metadata for the Operation returned by + [CreateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle]. + + Attributes: + name (str): + The unique name identifying this schema bundle. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class UpdateSchemaBundleRequest(proto.Message): + r"""The request for + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + + Attributes: + schema_bundle (google.cloud.bigtable_admin_v2.types.SchemaBundle): + Required. The schema bundle to update. + + The schema bundle's ``name`` field is used to identify the + schema bundle to update. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. The list of fields to update. + ignore_warnings (bool): + Optional. If set, ignore the safety checks + when updating the Schema Bundle. The safety + checks are: + + - The new Schema Bundle is backwards compatible + with the existing Schema Bundle. + """ + + schema_bundle: gba_table.SchemaBundle = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.SchemaBundle, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class UpdateSchemaBundleMetadata(proto.Message): + r"""The metadata for the Operation returned by + [UpdateSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle]. + + Attributes: + name (str): + The unique name identifying this schema bundle. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class GetSchemaBundleRequest(proto.Message): + r"""The request for + [GetSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle]. + + Attributes: + name (str): + Required. The unique name of the schema bundle to retrieve. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSchemaBundlesRequest(proto.Message): + r"""The request for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + + Attributes: + parent (str): + Required. The parent, which owns this collection of schema + bundles. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + page_size (int): + The maximum number of schema bundles to + return. If the value is positive, the server may + return at most this value. If unspecified, the + server will return the maximum allowed page + size. + page_token (str): + A page token, received from a previous ``ListSchemaBundles`` + call. Provide this to retrieve the subsequent page. + + When paginating, all other parameters provided to + ``ListSchemaBundles`` must match the call that provided the + page token. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListSchemaBundlesResponse(proto.Message): + r"""The response for + [ListSchemaBundles][google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles]. + + Attributes: + schema_bundles (MutableSequence[google.cloud.bigtable_admin_v2.types.SchemaBundle]): + The schema bundles from the specified table. + next_page_token (str): + A token, which can be sent as ``page_token`` to retrieve the + next page. If this field is omitted, there are no subsequent + pages. + """ + + @property + def raw_page(self): + return self + + schema_bundles: MutableSequence[gba_table.SchemaBundle] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.SchemaBundle, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteSchemaBundleRequest(proto.Message): + r"""The request for + [DeleteSchemaBundle][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle]. + + Attributes: + name (str): + Required. The unique name of the schema bundle to delete. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + etag (str): + Optional. The etag of the schema bundle. + If this is provided, it must match the server's + etag. The server returns an ABORTED error on a + mismatched etag. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 730b54ce3efe..44e9463d4a8b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -39,6 +39,8 @@ "Snapshot", "Backup", "BackupInfo", + "ProtoSchema", + "SchemaBundle", }, ) @@ -1025,4 +1027,72 @@ class BackupInfo(proto.Message): ) +class ProtoSchema(proto.Message): + r"""Represents a protobuf schema. + + Attributes: + proto_descriptors (bytes): + Required. Contains a protobuf-serialized + `google.protobuf.FileDescriptorSet `__, + which could include multiple proto files. To generate it, + `install `__ and + run ``protoc`` with ``--include_imports`` and + ``--descriptor_set_out``. For example, to generate for + moon/shot/app.proto, run + + :: + + $protoc --proto_path=/app_path --proto_path=/lib_path \ + --include_imports \ + --descriptor_set_out=descriptors.pb \ + moon/shot/app.proto + + For more details, see protobuffer `self + description `__. + """ + + proto_descriptors: bytes = proto.Field( + proto.BYTES, + number=2, + ) + + +class SchemaBundle(proto.Message): + r"""A named collection of related schemas. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Identifier. The unique name identifying this schema bundle. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}`` + proto_schema (google.cloud.bigtable_admin_v2.types.ProtoSchema): + Schema for Protobufs. + + This field is a member of `oneof`_ ``type``. + etag (str): + Optional. The etag for this schema bundle. + This may be sent on update and delete requests + to ensure the client has an up-to-date value + before proceeding. The server returns an ABORTED + error on a mismatched etag. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + proto_schema: "ProtoSchema" = proto.Field( + proto.MESSAGE, + number=2, + oneof="type", + message="ProtoSchema", + ) + etag: str = proto.Field( + proto.STRING, + number=3, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py index 42935df3c0db..b6ea5341d362 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py @@ -112,6 +112,14 @@ class Type(proto.Message): map_type (google.cloud.bigtable_admin_v2.types.Type.Map): Map + This field is a member of `oneof`_ ``kind``. + proto_type (google.cloud.bigtable_admin_v2.types.Type.Proto): + Proto + + This field is a member of `oneof`_ ``kind``. + enum_type (google.cloud.bigtable_admin_v2.types.Type.Enum): + Enum + This field is a member of `oneof`_ ``kind``. """ @@ -548,6 +556,52 @@ class OrderedCodeBytes(proto.Message): message="Type.Struct.Encoding", ) + class Proto(proto.Message): + r"""A protobuf message type. Values of type ``Proto`` are stored in + ``Value.bytes_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this proto + is defined in. + message_name (str): + The fully qualified name of the protobuf + message, including package. In the format of + "foo.bar.Message". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + message_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class Enum(proto.Message): + r"""A protobuf enum type. Values of type ``Enum`` are stored in + ``Value.int_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this enum is + defined in. + enum_name (str): + The fully qualified name of the protobuf enum + message, including package. In the format of + "foo.bar.EnumMessage". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + enum_name: str = proto.Field( + proto.STRING, + number=2, + ) + class Array(proto.Message): r"""An ordered list of elements of a given type. Values of type ``Array`` are stored in ``Value.array_value``. @@ -771,6 +825,18 @@ class HyperLogLogPlusPlusUniqueCount(proto.Message): oneof="kind", message=Map, ) + proto_type: Proto = proto.Field( + proto.MESSAGE, + number=13, + oneof="kind", + message=Proto, + ) + enum_type: Enum = proto.Field( + proto.MESSAGE, + number=14, + oneof="kind", + message=Enum, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 3cb3d4de09ef..3a5a72c9c2a4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -50,6 +50,7 @@ from .types.data import ColumnMetadata from .types.data import ColumnRange from .types.data import Family +from .types.data import Idempotency from .types.data import Mutation from .types.data import PartialResultSet from .types.data import ProtoFormat @@ -93,6 +94,7 @@ "FullReadStatsView", "GenerateInitialChangeStreamPartitionsRequest", "GenerateInitialChangeStreamPartitionsResponse", + "Idempotency", "MutateRowRequest", "MutateRowResponse", "MutateRowsRequest", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 123c340faaa1..5cb3fbaa4305 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1164,7 +1164,9 @@ async def read_modify_write_row( transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later - ones. + ones. At least one entry must be + specified, and there can be at most + 100000 rules. This corresponds to the ``rules`` field on the ``request`` instance; if ``request`` is provided, this @@ -1281,10 +1283,11 @@ def generate_initial_change_stream_partitions( ) -> Awaitable[ AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse] ]: - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's + r"""Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): @@ -1392,10 +1395,11 @@ def read_change_stream( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + r"""Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index 902e435c5c8a..c35ea1514f85 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1625,7 +1625,9 @@ def read_modify_write_row( transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of later - ones. + ones. At least one entry must be + specified, and there can be at most + 100000 rules. This corresponds to the ``rules`` field on the ``request`` instance; if ``request`` is provided, this @@ -1737,10 +1739,11 @@ def generate_initial_change_stream_partitions( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]: - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's + r"""Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): @@ -1847,10 +1850,11 @@ def read_change_stream( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + r"""Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index a3c0865f1dcd..309e72662282 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -538,10 +538,11 @@ def generate_initial_change_stream_partitions( r"""Return a callable for the generate initial change stream partitions method over gRPC. - NOTE: This API is intended to be used by Apache Beam BigtableIO. Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Returns: Callable[[~.GenerateInitialChangeStreamPartitionsRequest], @@ -571,10 +572,11 @@ def read_change_stream( ]: r"""Return a callable for the read change stream method over gRPC. - NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index cebee0208c5d..3f7df3c4ef3d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -552,10 +552,11 @@ def generate_initial_change_stream_partitions( r"""Return a callable for the generate initial change stream partitions method over gRPC. - NOTE: This API is intended to be used by Apache Beam BigtableIO. Returns the current list of partitions that make up the table's change stream. The union of partitions will cover the entire keyspace. Partitions can be read with ``ReadChangeStream``. + NOTE: This API is only intended to be used by Apache Beam + BigtableIO. Returns: Callable[[~.GenerateInitialChangeStreamPartitionsRequest], @@ -585,10 +586,11 @@ def read_change_stream( ]: r"""Return a callable for the read change stream method over gRPC. - NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + Reads changes from a table's change stream. Changes + will reflect both user-initiated mutations and mutations + that are caused by garbage collection. + NOTE: This API is only intended to be used by Apache + Beam BigtableIO. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index 629dd6c90750..bd3c361549ee 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -45,6 +45,7 @@ ColumnMetadata, ColumnRange, Family, + Idempotency, Mutation, PartialResultSet, ProtoFormat, @@ -110,6 +111,7 @@ "ColumnMetadata", "ColumnRange", "Family", + "Idempotency", "Mutation", "PartialResultSet", "ProtoFormat", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index f941c867a8a8..0e7ac1df3a1e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -197,27 +197,12 @@ class ReadRowsResponse(proto.Message): row key, allowing the client to skip that work on a retry. request_stats (google.cloud.bigtable_v2.types.RequestStats): - If requested, provide enhanced query performance statistics. - The semantics dictate: - - - request_stats is empty on every (streamed) response, - except - - request_stats has non-empty information after all chunks - have been streamed, where the ReadRowsResponse message - only contains request_stats. - - - For example, if a read request would have returned an - empty response instead a single ReadRowsResponse is - streamed with empty chunks and request_stats filled. - - Visually, response messages will stream as follows: ... -> - {chunks: [...]} -> {chunks: [], request_stats: {...}} - \_\ **/ \_**\ \__________/ Primary response Trailer of - RequestStats info - - Or if the read did not return any values: {chunks: [], - request_stats: {...}} \________________________________/ - Trailer of RequestStats info + If requested, return enhanced query performance statistics. + The field request_stats is empty in a streamed response + unless the ReadRowsResponse message contains request_stats + in the last message of the stream. Always returned when + requested, even when the read request returns an empty + response. """ class CellChunk(proto.Message): @@ -457,6 +442,10 @@ class MutateRowRequest(proto.Message): meaning that earlier mutations can be masked by later ones. Must contain at least one entry and at most 100000. + idempotency (google.cloud.bigtable_v2.types.Idempotency): + If set consistently across retries, prevents + this mutation from being double applied to + aggregate column families within a 15m window. """ table_name: str = proto.Field( @@ -480,6 +469,11 @@ class MutateRowRequest(proto.Message): number=3, message=data.Mutation, ) + idempotency: data.Idempotency = proto.Field( + proto.MESSAGE, + number=8, + message=data.Idempotency, + ) class MutateRowResponse(proto.Message): @@ -529,6 +523,10 @@ class Entry(proto.Message): order, meaning that earlier mutations can be masked by later ones. You must specify at least one mutation. + idempotency (google.cloud.bigtable_v2.types.Idempotency): + If set consistently across retries, prevents + this mutation from being double applied to + aggregate column families within a 15m window. """ row_key: bytes = proto.Field( @@ -540,6 +538,11 @@ class Entry(proto.Message): number=2, message=data.Mutation, ) + idempotency: data.Idempotency = proto.Field( + proto.MESSAGE, + number=3, + message=data.Idempotency, + ) table_name: str = proto.Field( proto.STRING, @@ -640,8 +643,8 @@ class RateLimitInfo(proto.Message): ``factor`` until another ``period`` has passed. The client can measure its load using any unit that's - comparable over time For example, QPS can be used as long as - each request involves a similar amount of work. + comparable over time. For example, QPS can be used as long + as each request involves a similar amount of work. """ period: duration_pb2.Duration = proto.Field( @@ -807,7 +810,9 @@ class ReadModifyWriteRowRequest(proto.Message): row's contents are to be transformed into writes. Entries are applied in order, meaning that earlier rules will affect the results of - later ones. + later ones. At least one entry must be + specified, and there can be at most 100000 + rules. """ table_name: str = proto.Field( @@ -935,10 +940,10 @@ class ReadChangeStreamRequest(proto.Message): the stream as part of ``Heartbeat`` and ``CloseStream`` messages. - If a single token is provided, the token’s partition must - exactly match the request’s partition. If multiple tokens + If a single token is provided, the token's partition must + exactly match the request's partition. If multiple tokens are provided, as in the case of a partition merge, the union - of the token partitions must exactly cover the request’s + of the token partitions must exactly cover the request's partition. Otherwise, INVALID_ARGUMENT will be returned. This field is a member of `oneof`_ ``start_from``. @@ -1119,7 +1124,7 @@ class DataChange(proto.Message): a record that will be delivered in the future on the stream. It is possible that, under particular circumstances that a future record - has a timestamp is is lower than a previously + has a timestamp that is lower than a previously seen timestamp. For an example usage see https://beam.apache.org/documentation/basics/#watermarks """ @@ -1203,7 +1208,7 @@ class Heartbeat(proto.Message): a record that will be delivered in the future on the stream. It is possible that, under particular circumstances that a future record - has a timestamp is is lower than a previously + has a timestamp that is lower than a previously seen timestamp. For an example usage see https://beam.apache.org/documentation/basics/#watermarks """ @@ -1226,12 +1231,25 @@ class CloseStream(proto.Message): if there was an ``end_time`` specified). If ``continuation_tokens`` & ``new_partitions`` are present, then a change in partitioning requires the client to open a new stream for each token to resume - reading. Example: [B, D) ends \| v new_partitions: [A, C) [C, E) - continuation_tokens.partitions: [B,C) [C,D) ^---^ ^---^ ^ ^ \| \| \| - StreamContinuationToken 2 \| StreamContinuationToken 1 To read the - new partition [A,C), supply the continuation tokens whose ranges - cover the new partition, for example ContinuationToken[A,B) & - ContinuationToken[B,C). + reading. Example: + + :: + + [B, D) ends + | + v + new_partitions: [A, C) [C, E) + continuation_tokens.partitions: [B,C) [C,D) + ^---^ ^---^ + ^ ^ + | | + | StreamContinuationToken 2 + | + StreamContinuationToken 1 + + To read the new partition [A,C), supply the continuation tokens + whose ranges cover the new partition, for example + ContinuationToken[A,B) & ContinuationToken[B,C). Attributes: status (google.rpc.status_pb2.Status): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index cecbc138af8a..ad7e382f7579 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -51,6 +51,7 @@ "ProtoRows", "ProtoRowsBatch", "PartialResultSet", + "Idempotency", }, ) @@ -240,7 +241,8 @@ class Value(proto.Message): This field is a member of `oneof`_ ``kind``. float_value (float): Represents a typed value transported as a - floating point number. + floating point number. Does not support NaN or + infinities. This field is a member of `oneof`_ ``kind``. timestamp_value (google.protobuf.timestamp_pb2.Timestamp): @@ -1609,4 +1611,36 @@ class PartialResultSet(proto.Message): ) +class Idempotency(proto.Message): + r"""Parameters on mutations where clients want to ensure + idempotency (i.e. at-most-once semantics). This is currently + only needed for certain aggregate types. + + Attributes: + token (bytes): + Unique token used to identify replays of this + mutation. Must be at least 8 bytes long. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Client-assigned timestamp when the mutation's + first attempt was sent. Used to reject mutations + that arrive after idempotency protection may + have expired. May cause spurious rejections if + clock skew is too high. + + Leave unset or zero to always accept the + mutation, at the risk of double counting if the + protection for previous attempts has expired. + """ + + token: bytes = proto.Field( + proto.BYTES, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py index 8548996efd2d..540e6548d052 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/request_stats.py @@ -142,11 +142,10 @@ class FullReadStatsView(proto.Message): class RequestStats(proto.Message): - r"""RequestStats is the container for additional information pertaining - to a single request, helpful for evaluating the performance of the - sent request. Currently, there are the following supported methods: - - - google.bigtable.v2.ReadRows + r"""RequestStats is the container for additional information + pertaining to a single request, helpful for evaluating the + performance of the sent request. Currently, the following method + is supported: google.bigtable.v2.ReadRows .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index 2c04dadaa41c..fb373d0559df 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -29,10 +29,7 @@ class ResponseParams(proto.Message): - r"""Response metadata proto This is an experimental feature that will be - used to get zone_id and cluster_id from response trailers to tag the - metrics. This should not be used by customers directly - + r"""Response metadata proto .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 352e63a93949..6265c176871a 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -48,6 +48,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), 'create_logical_view': ('parent', 'logical_view_id', 'logical_view', ), 'create_materialized_view': ('parent', 'materialized_view_id', 'materialized_view', ), + 'create_schema_bundle': ('parent', 'schema_bundle_id', 'schema_bundle', ), 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), 'delete_app_profile': ('name', 'ignore_warnings', ), @@ -57,6 +58,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'delete_instance': ('name', ), 'delete_logical_view': ('name', 'etag', ), 'delete_materialized_view': ('name', 'etag', ), + 'delete_schema_bundle': ('name', 'etag', ), 'delete_snapshot': ('name', ), 'delete_table': ('name', ), 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), @@ -69,6 +71,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'get_instance': ('name', ), 'get_logical_view': ('name', ), 'get_materialized_view': ('name', ), + 'get_schema_bundle': ('name', ), 'get_snapshot': ('name', ), 'get_table': ('name', 'view', ), 'list_app_profiles': ('parent', 'page_size', 'page_token', ), @@ -79,6 +82,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'list_instances': ('parent', 'page_token', ), 'list_logical_views': ('parent', 'page_size', 'page_token', ), 'list_materialized_views': ('parent', 'page_size', 'page_token', ), + 'list_schema_bundles': ('parent', 'page_size', 'page_token', ), 'list_snapshots': ('parent', 'page_size', 'page_token', ), 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), 'modify_column_families': ('name', 'modifications', 'ignore_warnings', ), @@ -96,6 +100,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', 'satisfies_pzi', ), 'update_logical_view': ('logical_view', 'update_mask', ), 'update_materialized_view': ('materialized_view', 'update_mask', ), + 'update_schema_bundle': ('schema_bundle', 'update_mask', 'ignore_warnings', ), 'update_table': ('table', 'update_mask', 'ignore_warnings', ), } diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py index 70e0795e2c02..e65ad39a44d7 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_v2_keywords.py @@ -42,7 +42,7 @@ class bigtableCallTransformer(cst.CSTTransformer): 'check_and_mutate_row': ('row_key', 'table_name', 'authorized_view_name', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), 'execute_query': ('instance_name', 'query', 'params', 'app_profile_id', 'prepared_query', 'proto_format', 'resume_token', ), 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), - 'mutate_row': ('row_key', 'mutations', 'table_name', 'authorized_view_name', 'app_profile_id', ), + 'mutate_row': ('row_key', 'mutations', 'table_name', 'authorized_view_name', 'app_profile_id', 'idempotency', ), 'mutate_rows': ('entries', 'table_name', 'authorized_view_name', 'app_profile_id', ), 'ping_and_warm': ('name', 'app_profile_id', ), 'prepare_query': ('instance_name', 'query', 'param_types', 'app_profile_id', 'proto_format', ), diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 67b4302c9f1d..eba8e8d41ce4 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -12037,208 +12037,80 @@ async def test_test_iam_permissions_flattened_error_async(): ) -def test_create_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.create_table in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. - ) - client._transport._wrapped_methods[client._transport.create_table] = mock_rpc - - request = {} - client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.create_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_create_table_rest_required_fields( - request_type=bigtable_table_admin.CreateTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" - +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateSchemaBundleRequest, + dict, + ], +) +def test_create_schema_bundle(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_schema_bundle(request) -def test_create_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CreateSchemaBundleRequest() + assert args[0] == request - unset_fields = transport.create_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "table", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_create_table_rest_flattened(): +def test_create_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, - args[1], - ) - - -def test_create_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table( - bigtable_table_admin.CreateTableRequest(), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.create_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateSchemaBundleRequest( parent="parent_value", - table_id="table_id_value", - table=gba_table.Table(name="name_value"), + schema_bundle_id="schema_bundle_id_value", ) -def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): +def test_create_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -12247,8 +12119,7 @@ def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.create_table_from_snapshot - in client._transport._wrapped_methods + client._transport.create_schema_bundle in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -12257,449 +12128,351 @@ def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.create_table_from_snapshot + client._transport.create_schema_bundle ] = mock_rpc - request = {} - client.create_table_from_snapshot(request) + client.create_schema_bundle(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.create_table_from_snapshot(request) + client.create_schema_bundle(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_table_from_snapshot_rest_required_fields( - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +@pytest.mark.asyncio +async def test_create_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableTableAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request_init["source_snapshot"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.create_schema_bundle + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.create_schema_bundle + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.create_schema_bundle(request) - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" - jsonified_request["sourceSnapshot"] = "source_snapshot_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + await client.create_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_create_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.CreateSchemaBundleRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.CreateSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" - assert "sourceSnapshot" in jsonified_request - assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" +@pytest.mark.asyncio +async def test_create_schema_bundle_async_from_dict(): + await test_create_schema_bundle_async(request_type=dict) + + +def test_create_schema_bundle_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateSchemaBundleRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_schema_bundle(request) - response = client.create_table_from_snapshot(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_create_table_from_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_create_schema_bundle_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "parent", - "tableId", - "sourceSnapshot", - ) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateSchemaBundleRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - ) + await client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_create_table_from_snapshot_rest_flattened(): + +def test_create_schema_bundle_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_schema_bundle( parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_table_from_snapshot(**mock_args) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].schema_bundle_id + mock_val = "schema_bundle_id_value" + assert arg == mock_val + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val -def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): +def test_create_schema_bundle_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), + client.create_schema_bundle( + bigtable_table_admin.CreateSchemaBundleRequest(), parent="parent_value", - table_id="table_id_value", - source_snapshot="source_snapshot_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), ) -def test_list_tables_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() +@pytest.mark.asyncio +async def test_create_schema_bundle_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Ensure method has been cached - assert client._transport.list_tables in client._transport._wrapped_methods + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_schema_bundle( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), ) - client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc - request = {} - client.list_tables(request) + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].schema_bundle_id + mock_val = "schema_bundle_id_value" + assert arg == mock_val + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - client.list_tables(request) +@pytest.mark.asyncio +async def test_create_schema_bundle_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_tables_rest_required_fields( - request_type=bigtable_table_admin.ListTablesRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_tables._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - "view", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_tables(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_tables_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_tables._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - "view", - ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_schema_bundle( + bigtable_table_admin.CreateSchemaBundleRequest(), + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), ) - & set(("parent",)) - ) -def test_list_tables_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateSchemaBundleRequest, + dict, + ], +) +def test_update_schema_bundle(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2"} - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_tables(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, - args[1], - ) + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_schema_bundle(request) -def test_list_tables_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.UpdateSchemaBundleRequest() + assert args[0] == request - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent="parent_value", - ) + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) -def test_list_tables_rest_pager(transport: str = "rest"): +def test_update_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token="def", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - ) - # Two responses for two calls - response = response + response + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.UpdateSchemaBundleRequest() - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListTablesResponse.to_json(x) for x in response + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {"parent": "projects/sample1/instances/sample2"} - - pager = client.list_tables(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Table) for i in results) - - pages = list(client.list_tables(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + client.update_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateSchemaBundleRequest() -def test_get_table_rest_use_cached_wrapped_rpc(): +def test_update_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -12707,375 +12480,360 @@ def test_get_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_table in client._transport._wrapped_methods + assert ( + client._transport.update_schema_bundle in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_table] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.update_schema_bundle + ] = mock_rpc request = {} - client.get_table(request) + client.update_schema_bundle(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_table(request) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_schema_bundle(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_table_rest_required_fields( - request_type=bigtable_table_admin.GetTableRequest, +@pytest.mark.asyncio +async def test_update_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableTableAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.update_schema_bundle + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.update_schema_bundle + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.update_schema_bundle(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).get_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view",)) - jsonified_request.update(unset_fields) + # Operation methods call wrapper_fn to build a cached + # client._transport.operations_client instance on first rpc call. + # Subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + await client.update_schema_bundle(request) - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_update_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.UpdateSchemaBundleRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response_value = Response() - response_value.status_code = 200 + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_schema_bundle(request) - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.UpdateSchemaBundleRequest() + assert args[0] == request - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) - response = client.get_table(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params +@pytest.mark.asyncio +async def test_update_schema_bundle_async_from_dict(): + await test_update_schema_bundle_async(request_type=dict) -def test_get_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +def test_update_schema_bundle_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), ) - unset_fields = transport.get_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view",)) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateSchemaBundleRequest() + request.schema_bundle.name = "name_value" -def test_get_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "schema_bundle.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_schema_bundle_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = table.Table() + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateSchemaBundleRequest() - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request.schema_bundle.name = "name_value" - # get truthy value for each flattened field - mock_args = dict( - name="name_value", + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") ) - mock_args.update(sample_request) + await client.update_schema_bundle(request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - client.get_table(**mock_args) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "schema_bundle.name=name_value", + ) in kw["metadata"] + + +def test_update_schema_bundle_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_schema_bundle( + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val -def test_get_table_rest_flattened_error(transport: str = "rest"): +def test_update_schema_bundle_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_table( - bigtable_table_admin.GetTableRequest(), - name="name_value", + client.update_schema_bundle( + bigtable_table_admin.UpdateSchemaBundleRequest(), + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_update_table_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() +@pytest.mark.asyncio +async def test_update_schema_bundle_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Ensure method has been cached - assert client._transport.update_table in client._transport._wrapped_methods + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_schema_bundle( + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - client._transport._wrapped_methods[client._transport.update_table] = mock_rpc - - request = {} - client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.update_table(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].schema_bundle + mock_val = table.SchemaBundle(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val -def test_update_table_rest_required_fields( - request_type=bigtable_table_admin.UpdateTableRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +@pytest.mark.asyncio +async def test_update_schema_bundle_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).update_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_schema_bundle( + bigtable_table_admin.UpdateSchemaBundleRequest(), + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - ) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetSchemaBundleRequest, + dict, + ], +) +def test_get_schema_bundle(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "patch", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_table(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.SchemaBundle( + name="name_value", + etag="etag_value", + ) + response = client.get_schema_bundle(request) -def test_update_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.GetSchemaBundleRequest() + assert args[0] == request - unset_fields = transport.update_table._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "ignoreWarnings", - "updateMask", - ) - ) - & set( - ( - "table", - "updateMask", - ) - ) - ) + # Establish that the response is the type that we expect. + assert isinstance(response, table.SchemaBundle) + assert response.name == "name_value" + assert response.etag == "etag_value" -def test_update_table_rest_flattened(): +def test_get_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - - # get arguments that satisfy an http rule for this method - sample_request = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} - } - - # get truthy value for each flattened field - mock_args = dict( - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.update_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{table.name=projects/*/instances/*/tables/*}" - % client.transport._host, - args[1], - ) - - -def test_update_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.GetSchemaBundleRequest( + name="name_value", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetSchemaBundleRequest( + name="name_value", ) -def test_delete_table_rest_use_cached_wrapped_rpc(): +def test_get_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -13083,171 +12841,339 @@ def test_delete_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.delete_table in client._transport._wrapped_methods + assert client._transport.get_schema_bundle in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.get_schema_bundle + ] = mock_rpc request = {} - client.delete_table(request) + client.get_schema_bundle(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_table(request) + client.get_schema_bundle(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_table_rest_required_fields( - request_type=bigtable_table_admin.DeleteTableRequest, +@pytest.mark.asyncio +async def test_get_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableTableAdminRestTransport + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify fields with default values are dropped + # Ensure method has been cached + assert ( + client._client._transport.get_schema_bundle + in client._client._transport._wrapped_methods + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_schema_bundle + ] = mock_rpc - # verify required fields with default values are now present + request = {} + await client.get_schema_bundle(request) - jsonified_request["name"] = "name_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + await client.get_schema_bundle(request) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.GetSchemaBundleRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.SchemaBundle( + name="name_value", + etag="etag_value", + ) + ) + response = await client.get_schema_bundle(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.GetSchemaBundleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, table.SchemaBundle) + assert response.name == "name_value" + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_schema_bundle_async_from_dict(): + await test_get_schema_bundle_async(request_type=dict) + + +def test_get_schema_bundle_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "delete", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSchemaBundleRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = "" + request.name = "name_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value = table.SchemaBundle() + client.get_schema_bundle(request) - response = client.delete_table(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_delete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_get_schema_bundle_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.delete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSchemaBundleRequest() + request.name = "name_value" -def test_delete_table_rest_flattened(): + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle()) + await client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_schema_bundle_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.SchemaBundle() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_schema_bundle( + name="name_value", + ) - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val - # get truthy value for each flattened field - mock_args = dict( + +def test_get_schema_bundle_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_schema_bundle( + bigtable_table_admin.GetSchemaBundleRequest(), name="name_value", ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_table(**mock_args) +@pytest.mark.asyncio +async def test_get_schema_bundle_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = table.SchemaBundle() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.SchemaBundle()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_schema_bundle( + name="name_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, - args[1], - ) + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_delete_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_get_schema_bundle_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), + await client.get_schema_bundle( + bigtable_table_admin.GetSchemaBundleRequest(), name="name_value", ) -def test_undelete_table_rest_use_cached_wrapped_rpc(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListSchemaBundlesRequest, + dict, + ], +) +def test_list_schema_bundles(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListSchemaBundlesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSchemaBundlesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_schema_bundles_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.ListSchemaBundlesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.list_schema_bundles(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListSchemaBundlesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +def test_list_schema_bundles_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -13255,388 +13181,542 @@ def test_undelete_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.undelete_table in client._transport._wrapped_methods + assert ( + client._transport.list_schema_bundles in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc - + client._transport._wrapped_methods[ + client._transport.list_schema_bundles + ] = mock_rpc request = {} - client.undelete_table(request) + client.list_schema_bundles(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper + client.list_schema_bundles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_list_schema_bundles_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 wrapper_fn.reset_mock() - client.undelete_table(request) + # Ensure method has been cached + assert ( + client._client._transport.list_schema_bundles + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.list_schema_bundles + ] = mock_rpc + + request = {} + await client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.list_schema_bundles(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_undelete_table_rest_required_fields( - request_type=bigtable_table_admin.UndeleteTableRequest, +@pytest.mark.asyncio +async def test_list_schema_bundles_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.ListSchemaBundlesRequest, ): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, ) - # verify fields with default values are dropped + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_schema_bundles(request) - # verify required fields with default values are now present + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.ListSchemaBundlesRequest() + assert args[0] == request - jsonified_request["name"] = "name_value" + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSchemaBundlesAsyncPager) + assert response.next_page_token == "next_page_token_value" - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" +@pytest.mark.asyncio +async def test_list_schema_bundles_async_from_dict(): + await test_list_schema_bundles_async(request_type=dict) + +def test_list_schema_bundles_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSchemaBundlesRequest() - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + request.parent = "parent_value" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + client.list_schema_bundles(request) - response = client.undelete_table(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] -def test_undelete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_list_schema_bundles_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.undelete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSchemaBundlesRequest() + request.parent = "parent_value" -def test_undelete_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse() + ) + await client.list_schema_bundles(request) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request - # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} +def test_list_schema_bundles_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) - client.undelete_table(**mock_args) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_schema_bundles( + parent="parent_value", + ) # Establish that the underlying call was made with the expected # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" - % client.transport._host, - args[1], - ) + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val -def test_undelete_table_rest_flattened_error(transport: str = "rest"): +def test_list_schema_bundles_flattened_error(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name="name_value", + client.list_schema_bundles( + bigtable_table_admin.ListSchemaBundlesRequest(), + parent="parent_value", ) -def test_create_authorized_view_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) +@pytest.mark.asyncio +async def test_list_schema_bundles_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() - # Ensure method has been cached - assert ( - client._transport.create_authorized_view - in client._transport._wrapped_methods + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse() ) - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_schema_bundles( + parent="parent_value", ) - client._transport._wrapped_methods[ - client._transport.create_authorized_view - ] = mock_rpc - - request = {} - client.create_authorized_view(request) - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val - client.create_authorized_view(request) - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 +@pytest.mark.asyncio +async def test_list_schema_bundles_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_schema_bundles( + bigtable_table_admin.ListSchemaBundlesRequest(), + parent="parent_value", + ) -def test_create_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.CreateAuthorizedViewRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["parent"] = "" - request_init["authorized_view_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) +def test_list_schema_bundles_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, ) - # verify fields with default values are dropped - assert "authorizedViewId" not in jsonified_request - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_authorized_view._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, + ) - # verify required fields with default values are now present - assert "authorizedViewId" in jsonified_request - assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"] + expected_metadata = () + retry = retries.Retry() + timeout = 5 + expected_metadata = tuple(expected_metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_schema_bundles(request={}, retry=retry, timeout=timeout) - jsonified_request["parent"] = "parent_value" - jsonified_request["authorizedViewId"] = "authorized_view_id_value" + assert pager._metadata == expected_metadata + assert pager._retry == retry + assert pager._timeout == timeout - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).create_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("authorized_view_id",)) - jsonified_request.update(unset_fields) + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.SchemaBundle) for i in results) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "authorizedViewId" in jsonified_request - assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" +def test_list_schema_bundles_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport_name, ) - request = request_type(**request_init) - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "post", - "query_params": pb_request, - } - transcode_result["body"] = pb_request - transcode.return_value = transcode_result + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, + ) + pages = list(client.list_schema_bundles(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} +@pytest.mark.asyncio +async def test_list_schema_bundles_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - response = client.create_authorized_view(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_schema_bundles( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) - expected_params = [ - ( - "authorizedViewId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + assert len(responses) == 6 + assert all(isinstance(i, table.SchemaBundle) for i in responses) -def test_create_authorized_view_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials +@pytest.mark.asyncio +async def test_list_schema_bundles_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - unset_fields = transport.create_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(("authorizedViewId",)) - & set( - ( - "parent", - "authorizedViewId", - "authorizedView", - ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + RuntimeError, ) - ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_schema_bundles(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -def test_create_authorized_view_rest_flattened(): +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteSchemaBundleRequest, + dict, + ], +) +def test_delete_schema_bundle(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_schema_bundle(request) - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", - ) - mock_args.update(sample_request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteSchemaBundleRequest() + assert args[0] == request - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.create_authorized_view(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" - % client.transport._host, - args[1], - ) + # Establish that the response is the type that we expect. + assert response is None -def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_delete_schema_bundle_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="grpc", ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_authorized_view( - bigtable_table_admin.CreateAuthorizedViewRequest(), - parent="parent_value", - authorized_view=table.AuthorizedView(name="name_value"), - authorized_view_id="authorized_view_id_value", + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = bigtable_table_admin.DeleteSchemaBundleRequest( + name="name_value", + etag="etag_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.delete_schema_bundle(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteSchemaBundleRequest( + name="name_value", + etag="etag_value", ) -def test_list_authorized_views_rest_use_cached_wrapped_rpc(): +def test_delete_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", + transport="grpc", ) # Should wrap all calls on client creation @@ -13645,8 +13725,7 @@ def test_list_authorized_views_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.list_authorized_views - in client._transport._wrapped_methods + client._transport.delete_schema_bundle in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -13655,248 +13734,248 @@ def test_list_authorized_views_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.list_authorized_views + client._transport.delete_schema_bundle ] = mock_rpc - request = {} - client.list_authorized_views(request) + client.delete_schema_bundle(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_authorized_views(request) + client.delete_schema_bundle(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_authorized_views_rest_required_fields( - request_type=bigtable_table_admin.ListAuthorizedViewsRequest, +@pytest.mark.asyncio +async def test_delete_schema_bundle_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", ): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_authorized_views._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # verify required fields with default values are now present + # Ensure method has been cached + assert ( + client._client._transport.delete_schema_bundle + in client._client._transport._wrapped_methods + ) - jsonified_request["parent"] = "parent_value" + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.delete_schema_bundle + ] = mock_rpc - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_authorized_views._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - "view", - ) - ) - jsonified_request.update(unset_fields) + request = {} + await client.delete_schema_bundle(request) - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) + await client.delete_schema_bundle(request) - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) +@pytest.mark.asyncio +async def test_delete_schema_bundle_async( + transport: str = "grpc_asyncio", + request_type=bigtable_table_admin.DeleteSchemaBundleRequest, +): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() - response = client.list_authorized_views(request) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_schema_bundle(request) - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = bigtable_table_admin.DeleteSchemaBundleRequest() + assert args[0] == request + # Establish that the response is the type that we expect. + assert response is None -def test_list_authorized_views_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - unset_fields = transport.list_authorized_views._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - "view", - ) - ) - & set(("parent",)) - ) +@pytest.mark.asyncio +async def test_delete_schema_bundle_async_from_dict(): + await test_delete_schema_bundle_async(request_type=dict) -def test_list_authorized_views_rest_flattened(): +def test_delete_schema_bundle_field_headers(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="rest", ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSchemaBundleRequest() - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) + request.name = "name_value" - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value = None + client.delete_schema_bundle(request) - client.list_authorized_views(**mock_args) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" - % client.transport._host, - args[1], - ) + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] -def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +@pytest.mark.asyncio +async def test_delete_schema_bundle_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), ) - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_authorized_views( - bigtable_table_admin.ListAuthorizedViewsRequest(), - parent="parent_value", - ) - + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSchemaBundleRequest() -def test_list_authorized_views_rest_pager(transport: str = "rest"): + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_schema_bundle_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - table.AuthorizedView(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[], - next_page_token="def", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListAuthorizedViewsResponse( - authorized_views=[ - table.AuthorizedView(), - table.AuthorizedView(), - ], - ), + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_schema_bundle( + name="name_value", ) - # Two responses for two calls - response = response + response - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x) - for x in response + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_schema_bundle_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_schema_bundle( + bigtable_table_admin.DeleteSchemaBundleRequest(), + name="name_value", ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - pager = client.list_authorized_views(request=sample_request) +@pytest.mark.asyncio +async def test_delete_schema_bundle_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.AuthorizedView) for i in results) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None - pages = list(client.list_authorized_views(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_schema_bundle( + name="name_value", + ) + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val -def test_get_authorized_view_rest_use_cached_wrapped_rpc(): + +@pytest.mark.asyncio +async def test_delete_schema_bundle_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_schema_bundle( + bigtable_table_admin.DeleteSchemaBundleRequest(), + name="name_value", + ) + + +def test_create_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -13910,39 +13989,36 @@ def test_get_authorized_view_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.get_authorized_view in client._transport._wrapped_methods - ) + assert client._transport.create_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.get_authorized_view - ] = mock_rpc + client._transport._wrapped_methods[client._transport.create_table] = mock_rpc request = {} - client.get_authorized_view(request) + client.create_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_authorized_view(request) + client.create_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.GetAuthorizedViewRequest, +def test_create_table_rest_required_fields( + request_type=bigtable_table_admin.CreateTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" + request_init["table_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -13953,23 +14029,24 @@ def test_get_authorized_view_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_authorized_view._get_unset_required_fields(jsonified_request) + ).create_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view",)) + ).create_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -13978,7 +14055,7 @@ def test_get_authorized_view_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView() + return_value = gba_table.Table() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -13990,39 +14067,49 @@ def test_get_authorized_view_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) + return_value = gba_table.Table.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_authorized_view(request) + response = client.create_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_authorized_view_rest_unset_required_fields(): +def test_create_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view",)) & set(("name",))) + unset_fields = transport.create_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", + "table", + ) + ) + ) -def test_get_authorized_view_rest_flattened(): +def test_create_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -14031,16 +14118,16 @@ def test_get_authorized_view_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView() + return_value = gba_table.Table() # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } + sample_request = {"parent": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), ) mock_args.update(sample_request) @@ -14048,26 +14135,25 @@ def test_get_authorized_view_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) + return_value = gba_table.Table.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_authorized_view(**mock_args) + client.create_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" - % client.transport._host, + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, args[1], ) -def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_create_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14076,13 +14162,15 @@ def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_authorized_view( - bigtable_table_admin.GetAuthorizedViewRequest(), - name="name_value", + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent="parent_value", + table_id="table_id_value", + table=gba_table.Table(name="name_value"), ) -def test_update_authorized_view_rest_use_cached_wrapped_rpc(): +def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -14097,7 +14185,7 @@ def test_update_authorized_view_rest_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.update_authorized_view + client._transport.create_table_from_snapshot in client._transport._wrapped_methods ) @@ -14107,11 +14195,11 @@ def test_update_authorized_view_rest_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.update_authorized_view + client._transport.create_table_from_snapshot ] = mock_rpc request = {} - client.update_authorized_view(request) + client.create_table_from_snapshot(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -14120,19 +14208,22 @@ def test_update_authorized_view_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.update_authorized_view(request) + client.create_table_from_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_update_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, +def test_create_table_from_snapshot_rest_required_fields( + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request_init["source_snapshot"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -14143,27 +14234,30 @@ def test_update_authorized_view_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_authorized_view._get_unset_required_fields(jsonified_request) + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" + jsonified_request["sourceSnapshot"] = "source_snapshot_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_authorized_view._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "ignore_warnings", - "update_mask", - ) - ) + ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + assert "sourceSnapshot" in jsonified_request + assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) request = request_type(**request_init) @@ -14181,7 +14275,7 @@ def test_update_authorized_view_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "post", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -14195,31 +14289,32 @@ def test_update_authorized_view_rest_required_fields( req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_authorized_view(request) + response = client.create_table_from_snapshot(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_authorized_view_rest_unset_required_fields(): +def test_create_table_from_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.update_authorized_view._get_unset_required_fields({}) + unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) assert set(unset_fields) == ( - set( + set(()) + & set( ( - "ignoreWarnings", - "updateMask", + "parent", + "tableId", + "sourceSnapshot", ) ) - & set(("authorizedView",)) ) -def test_update_authorized_view_rest_flattened(): +def test_create_table_from_snapshot_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -14231,16 +14326,13 @@ def test_update_authorized_view_rest_flattened(): return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } + sample_request = {"parent": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", ) mock_args.update(sample_request) @@ -14252,20 +14344,20 @@ def test_update_authorized_view_rest_flattened(): req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_authorized_view(**mock_args) + client.create_table_from_snapshot(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}" + "%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" % client.transport._host, args[1], ) -def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14274,14 +14366,15 @@ def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_authorized_view( - bigtable_table_admin.UpdateAuthorizedViewRequest(), - authorized_view=table.AuthorizedView(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", ) -def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): +def test_list_tables_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -14295,40 +14388,35 @@ def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.delete_authorized_view - in client._transport._wrapped_methods - ) + assert client._transport.list_tables in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.delete_authorized_view - ] = mock_rpc + client._transport._wrapped_methods[client._transport.list_tables] = mock_rpc request = {} - client.delete_authorized_view(request) + client.list_tables(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_authorized_view(request) + client.list_tables(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_authorized_view_rest_required_fields( - request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +def test_list_tables_rest_required_fields( + request_type=bigtable_table_admin.ListTablesRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -14339,23 +14427,29 @@ def test_delete_authorized_view_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_authorized_view._get_unset_required_fields(jsonified_request) + ).list_tables._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_authorized_view._get_unset_required_fields(jsonified_request) + ).list_tables._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("etag",)) + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -14364,7 +14458,7 @@ def test_delete_authorized_view_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_table_admin.ListTablesResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -14376,36 +14470,48 @@ def test_delete_authorized_view_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "get", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_authorized_view(request) + response = client.list_tables(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_authorized_view_rest_unset_required_fields(): +def test_list_tables_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_authorized_view._get_unset_required_fields({}) - assert set(unset_fields) == (set(("etag",)) & set(("name",))) + unset_fields = transport.list_tables._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) -def test_delete_authorized_view_rest_flattened(): +def test_list_tables_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -14414,41 +14520,40 @@ def test_delete_authorized_view_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_table_admin.ListTablesResponse() # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } + sample_request = {"parent": "projects/sample1/instances/sample2"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_authorized_view(**mock_args) + client.list_tables(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" - % client.transport._host, + "%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, args[1], ) -def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): +def test_list_tables_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14457,13 +14562,76 @@ def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_authorized_view( - bigtable_table_admin.DeleteAuthorizedViewRequest(), - name="name_value", + client.list_tables( + bigtable_table_admin.ListTablesRequest(), + parent="parent_value", ) -def test_modify_column_families_rest_use_cached_wrapped_rpc(): +def test_list_tables_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token="def", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListTablesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2"} + + pager = client.list_tables(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Table) for i in results) + + pages = list(client.list_tables(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_get_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -14477,35 +14645,30 @@ def test_modify_column_families_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.modify_column_families - in client._transport._wrapped_methods - ) + assert client._transport.get_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.modify_column_families - ] = mock_rpc + client._transport._wrapped_methods[client._transport.get_table] = mock_rpc request = {} - client.modify_column_families(request) + client.get_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.modify_column_families(request) + client.get_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_modify_column_families_rest_required_fields( - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +def test_get_table_rest_required_fields( + request_type=bigtable_table_admin.GetTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -14521,7 +14684,7 @@ def test_modify_column_families_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) + ).get_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -14530,7 +14693,9 @@ def test_modify_column_families_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).modify_column_families._get_unset_required_fields(jsonified_request) + ).get_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -14556,10 +14721,9 @@ def test_modify_column_families_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() @@ -14573,31 +14737,23 @@ def test_modify_column_families_rest_required_fields( req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.modify_column_families(request) + response = client.get_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_modify_column_families_rest_unset_required_fields(): +def test_get_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.modify_column_families._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "modifications", - ) - ) - ) + unset_fields = transport.get_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) -def test_modify_column_families_rest_flattened(): +def test_get_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -14614,11 +14770,6 @@ def test_modify_column_families_rest_flattened(): # get truthy value for each flattened field mock_args = dict( name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], ) mock_args.update(sample_request) @@ -14632,20 +14783,19 @@ def test_modify_column_families_rest_flattened(): req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.modify_column_families(**mock_args) + client.get_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" - % client.transport._host, + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1], ) -def test_modify_column_families_rest_flattened_error(transport: str = "rest"): +def test_get_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14654,18 +14804,13 @@ def test_modify_column_families_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), + client.get_table( + bigtable_table_admin.GetTableRequest(), name="name_value", - modifications=[ - bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( - id="id_value" - ) - ], ) -def test_drop_row_range_rest_use_cached_wrapped_rpc(): +def test_update_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -14679,35 +14824,38 @@ def test_drop_row_range_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.drop_row_range in client._transport._wrapped_methods + assert client._transport.update_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc + client._transport._wrapped_methods[client._transport.update_table] = mock_rpc request = {} - client.drop_row_range(request) + client.update_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.drop_row_range(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_drop_row_range_rest_required_fields( - request_type=bigtable_table_admin.DropRowRangeRequest, +def test_update_table_rest_required_fields( + request_type=bigtable_table_admin.UpdateTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -14718,21 +14866,24 @@ def test_drop_row_range_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) + ).update_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" - unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).drop_row_range._get_unset_required_fields(jsonified_request) + ).update_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -14741,7 +14892,7 @@ def test_drop_row_range_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -14753,7 +14904,7 @@ def test_drop_row_range_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "patch", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -14761,29 +14912,102 @@ def test_drop_row_range_rest_required_fields( response_value = Response() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.drop_row_range(request) + response = client.update_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_drop_row_range_rest_unset_required_fields(): +def test_update_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.drop_row_range._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.update_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set( + ( + "table", + "updateMask", + ) + ) + ) -def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): +def test_update_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + + # get truthy value for each flattened field + mock_args = dict( + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{table.name=projects/*/instances/*/tables/*}" + % client.transport._host, + args[1], + ) + + +def test_update_table_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -14797,35 +15021,30 @@ def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.generate_consistency_token - in client._transport._wrapped_methods - ) + assert client._transport.delete_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.generate_consistency_token - ] = mock_rpc + client._transport._wrapped_methods[client._transport.delete_table] = mock_rpc request = {} - client.generate_consistency_token(request) + client.delete_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.generate_consistency_token(request) + client.delete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_generate_consistency_token_rest_required_fields( - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +def test_delete_table_rest_required_fields( + request_type=bigtable_table_admin.DeleteTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -14841,7 +15060,7 @@ def test_generate_consistency_token_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) + ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -14850,7 +15069,7 @@ def test_generate_consistency_token_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).generate_consistency_token._get_unset_required_fields(jsonified_request) + ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -14864,7 +15083,7 @@ def test_generate_consistency_token_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -14876,42 +15095,36 @@ def test_generate_consistency_token_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.generate_consistency_token(request) + response = client.delete_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_generate_consistency_token_rest_unset_required_fields(): +def test_delete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) + unset_fields = transport.delete_table._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) -def test_generate_consistency_token_rest_flattened(): +def test_delete_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -14920,7 +15133,7 @@ def test_generate_consistency_token_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + return_value = None # get arguments that satisfy an http rule for this method sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} @@ -14934,29 +15147,24 @@ def test_generate_consistency_token_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.generate_consistency_token(**mock_args) + client.delete_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" - % client.transport._host, + "%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1], ) -def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): +def test_delete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -14965,13 +15173,13 @@ def test_generate_consistency_token_rest_flattened_error(transport: str = "rest" # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), name="name_value", ) -def test_check_consistency_rest_use_cached_wrapped_rpc(): +def test_undelete_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -14985,38 +15193,39 @@ def test_check_consistency_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.check_consistency in client._transport._wrapped_methods + assert client._transport.undelete_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.check_consistency - ] = mock_rpc + client._transport._wrapped_methods[client._transport.undelete_table] = mock_rpc request = {} - client.check_consistency(request) + client.undelete_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.check_consistency(request) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.undelete_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_check_consistency_rest_required_fields( - request_type=bigtable_table_admin.CheckConsistencyRequest, +def test_undelete_table_rest_required_fields( + request_type=bigtable_table_admin.UndeleteTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} request_init["name"] = "" - request_init["consistency_token"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -15027,24 +15236,21 @@ def test_check_consistency_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) + ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present jsonified_request["name"] = "name_value" - jsonified_request["consistencyToken"] = "consistency_token_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).check_consistency._get_unset_required_fields(jsonified_request) + ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - assert "consistencyToken" in jsonified_request - assert jsonified_request["consistencyToken"] == "consistency_token_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -15053,7 +15259,7 @@ def test_check_consistency_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() + return_value = operations_pb2.Operation(name="operations/spam") # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -15073,42 +15279,29 @@ def test_check_consistency_rest_required_fields( response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb( - return_value - ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.check_consistency(request) + response = client.undelete_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_check_consistency_rest_unset_required_fields(): +def test_undelete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.check_consistency._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "name", - "consistencyToken", - ) - ) - ) + unset_fields = transport.undelete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_check_consistency_rest_flattened(): +def test_undelete_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -15117,7 +15310,7 @@ def test_check_consistency_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() + return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} @@ -15125,34 +15318,31 @@ def test_check_consistency_rest_flattened(): # get truthy value for each flattened field mock_args = dict( name="name_value", - consistency_token="consistency_token_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.check_consistency(**mock_args) + client.undelete_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + "%s/v2/{name=projects/*/instances/*/tables/*}:undelete" % client.transport._host, args[1], ) -def test_check_consistency_rest_flattened_error(transport: str = "rest"): +def test_undelete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -15161,14 +15351,13 @@ def test_check_consistency_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), name="name_value", - consistency_token="consistency_token_value", ) -def test_snapshot_table_rest_use_cached_wrapped_rpc(): +def test_create_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -15182,17 +15371,22 @@ def test_snapshot_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.snapshot_table in client._transport._wrapped_methods + assert ( + client._transport.create_authorized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc + client._transport._wrapped_methods[ + client._transport.create_authorized_view + ] = mock_rpc request = {} - client.snapshot_table(request) + client.create_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -15201,22 +15395,21 @@ def test_snapshot_table_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.snapshot_table(request) + client.create_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_snapshot_table_rest_required_fields( - request_type=bigtable_table_admin.SnapshotTableRequest, +def test_create_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" - request_init["cluster"] = "" - request_init["snapshot_id"] = "" + request_init["parent"] = "" + request_init["authorized_view_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -15224,30 +15417,32 @@ def test_snapshot_table_rest_required_fields( ) # verify fields with default values are dropped + assert "authorizedViewId" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) + ).create_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == request_init["authorized_view_id"] - jsonified_request["name"] = "name_value" - jsonified_request["cluster"] = "cluster_value" - jsonified_request["snapshotId"] = "snapshot_id_value" + jsonified_request["parent"] = "parent_value" + jsonified_request["authorizedViewId"] = "authorized_view_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).snapshot_table._get_unset_required_fields(jsonified_request) + ).create_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("authorized_view_id",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" - assert "cluster" in jsonified_request - assert jsonified_request["cluster"] == "cluster_value" - assert "snapshotId" in jsonified_request - assert jsonified_request["snapshotId"] == "snapshot_id_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "authorizedViewId" in jsonified_request + assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -15282,32 +15477,38 @@ def test_snapshot_table_rest_required_fields( req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.snapshot_table(request) + response = client.create_authorized_view(request) - expected_params = [("$alt", "json;enum-encoding=int")] + expected_params = [ + ( + "authorizedViewId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_snapshot_table_rest_unset_required_fields(): +def test_create_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.snapshot_table._get_unset_required_fields({}) + unset_fields = transport.create_authorized_view._get_unset_required_fields({}) assert set(unset_fields) == ( - set(()) + set(("authorizedViewId",)) & set( ( - "name", - "cluster", - "snapshotId", + "parent", + "authorizedViewId", + "authorizedView", ) ) ) -def test_snapshot_table_rest_flattened(): +def test_create_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -15319,14 +15520,13 @@ def test_snapshot_table_rest_flattened(): return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) mock_args.update(sample_request) @@ -15338,20 +15538,20 @@ def test_snapshot_table_rest_flattened(): req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.snapshot_table(**mock_args) + client.create_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" + "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" % client.transport._host, args[1], ) -def test_snapshot_table_rest_flattened_error(transport: str = "rest"): +def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -15360,16 +15560,15 @@ def test_snapshot_table_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name="name_value", - cluster="cluster_value", - snapshot_id="snapshot_id_value", - description="description_value", + client.create_authorized_view( + bigtable_table_admin.CreateAuthorizedViewRequest(), + parent="parent_value", + authorized_view=table.AuthorizedView(name="name_value"), + authorized_view_id="authorized_view_id_value", ) -def test_get_snapshot_rest_use_cached_wrapped_rpc(): +def test_list_authorized_views_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -15383,35 +15582,40 @@ def test_get_snapshot_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_snapshot in client._transport._wrapped_methods + assert ( + client._transport.list_authorized_views + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc + client._transport._wrapped_methods[ + client._transport.list_authorized_views + ] = mock_rpc request = {} - client.get_snapshot(request) + client.list_authorized_views(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_snapshot(request) + client.list_authorized_views(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_snapshot_rest_required_fields( - request_type=bigtable_table_admin.GetSnapshotRequest, +def test_list_authorized_views_rest_required_fields( + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["name"] = "" + request_init["parent"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -15422,21 +15626,29 @@ def test_get_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) + ).list_authorized_views._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["name"] = "name_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_snapshot._get_unset_required_fields(jsonified_request) + ).list_authorized_views._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + "view", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == "name_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -15445,7 +15657,7 @@ def test_get_snapshot_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Snapshot() + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -15466,30 +15678,41 @@ def test_get_snapshot_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_snapshot(request) + response = client.list_authorized_views(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_snapshot_rest_unset_required_fields(): +def test_list_authorized_views_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.list_authorized_views._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + "view", + ) + ) + & set(("parent",)) + ) -def test_get_snapshot_rest_flattened(): +def test_list_authorized_views_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -15498,16 +15721,14 @@ def test_get_snapshot_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Snapshot() + return_value = bigtable_table_admin.ListAuthorizedViewsResponse() # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - name="name_value", + parent="parent_value", ) mock_args.update(sample_request) @@ -15515,26 +15736,26 @@ def test_get_snapshot_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_snapshot(**mock_args) + client.list_authorized_views(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + "%s/v2/{parent=projects/*/instances/*/tables/*}/authorizedViews" % client.transport._host, args[1], ) -def test_get_snapshot_rest_flattened_error(transport: str = "rest"): +def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -15543,211 +15764,16 @@ def test_get_snapshot_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name="name_value", - ) - - -def test_list_snapshots_rest_use_cached_wrapped_rpc(): - # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, - # instead of constructing them on each call - with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Should wrap all calls on client creation - assert wrapper_fn.call_count > 0 - wrapper_fn.reset_mock() - - # Ensure method has been cached - assert client._transport.list_snapshots in client._transport._wrapped_methods - - # Replace cached wrapped function with mock - mock_rpc = mock.Mock() - mock_rpc.return_value.name = ( - "foo" # operation_request.operation in compute client(s) expect a string. + client.list_authorized_views( + bigtable_table_admin.ListAuthorizedViewsRequest(), + parent="parent_value", ) - client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc - request = {} - client.list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert mock_rpc.call_count == 1 - - client.list_snapshots(request) - - # Establish that a new wrapper was not created for this call - assert wrapper_fn.call_count == 0 - assert mock_rpc.call_count == 2 - - -def test_list_snapshots_rest_required_fields( - request_type=bigtable_table_admin.ListSnapshotsRequest, -): - transport_class = transports.BigtableTableAdminRestTransport - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads( - json_format.MessageToJson(pb_request, use_integers_for_enums=False) - ) - - # verify fields with default values are dropped - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = "parent_value" - - unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ).list_snapshots._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "page_size", - "page_token", - ) - ) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, "transcode") as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - "uri": "v1/sample_method", - "method": "get", - "query_params": pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - response = client.list_snapshots(request) - - expected_params = [("$alt", "json;enum-encoding=int")] - actual_params = req.call_args.kwargs["params"] - assert expected_params == actual_params - - -def test_list_snapshots_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials - ) - - unset_fields = transport.list_snapshots._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) - - -def test_list_snapshots_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - # get truthy value for each flattened field - mock_args = dict( - parent="parent_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.list_snapshots(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" - % client.transport._host, - args[1], - ) - - -def test_list_snapshots_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent="parent_value", - ) - - -def test_list_snapshots_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, +def test_list_authorized_views_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) # Mock the http request call within the method and fake a response. @@ -15756,28 +15782,28 @@ def test_list_snapshots_rest_pager(transport: str = "rest"): # with mock.patch.object(path_template, 'transcode') as transcode: # Set the response as a series of pages response = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), + table.AuthorizedView(), ], next_page_token="abc", ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[], next_page_token="def", ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), ], next_page_token="ghi", ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), + bigtable_table_admin.ListAuthorizedViewsResponse( + authorized_views=[ + table.AuthorizedView(), + table.AuthorizedView(), ], ), ) @@ -15786,7 +15812,8 @@ def test_list_snapshots_rest_pager(transport: str = "rest"): # Wrap the values into proper Response objs response = tuple( - bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response + bigtable_table_admin.ListAuthorizedViewsResponse.to_json(x) + for x in response ) return_values = tuple(Response() for i in response) for return_val, response_val in zip(return_values, response): @@ -15794,22 +15821,20 @@ def test_list_snapshots_rest_pager(transport: str = "rest"): return_val.status_code = 200 req.side_effect = return_values - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - pager = client.list_snapshots(request=sample_request) + pager = client.list_authorized_views(request=sample_request) results = list(pager) assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) for i in results) + assert all(isinstance(i, table.AuthorizedView) for i in results) - pages = list(client.list_snapshots(request=sample_request).pages) + pages = list(client.list_authorized_views(request=sample_request).pages) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token -def test_delete_snapshot_rest_use_cached_wrapped_rpc(): +def test_get_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -15823,30 +15848,34 @@ def test_delete_snapshot_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.delete_snapshot in client._transport._wrapped_methods + assert ( + client._transport.get_authorized_view in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc + client._transport._wrapped_methods[ + client._transport.get_authorized_view + ] = mock_rpc request = {} - client.delete_snapshot(request) + client.get_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_snapshot(request) + client.get_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_snapshot_rest_required_fields( - request_type=bigtable_table_admin.DeleteSnapshotRequest, +def test_get_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.GetAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -15862,7 +15891,7 @@ def test_delete_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) + ).get_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -15871,7 +15900,9 @@ def test_delete_snapshot_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_snapshot._get_unset_required_fields(jsonified_request) + ).get_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -15885,7 +15916,7 @@ def test_delete_snapshot_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = None + return_value = table.AuthorizedView() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -15897,36 +15928,39 @@ def test_delete_snapshot_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "get", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_snapshot(request) + response = client.get_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_snapshot_rest_unset_required_fields(): +def test_get_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.get_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view",)) & set(("name",))) -def test_delete_snapshot_rest_flattened(): +def test_get_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -15935,11 +15969,11 @@ def test_delete_snapshot_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = table.AuthorizedView() # get arguments that satisfy an http rule for this method sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" } # get truthy value for each flattened field @@ -15951,25 +15985,27 @@ def test_delete_snapshot_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = "" + # Convert return value to protobuf type + return_value = table.AuthorizedView.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_snapshot(**mock_args) + client.get_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): +def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -15978,13 +16014,13 @@ def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), + client.get_authorized_view( + bigtable_table_admin.GetAuthorizedViewRequest(), name="name_value", ) -def test_create_backup_rest_use_cached_wrapped_rpc(): +def test_update_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -15998,17 +16034,22 @@ def test_create_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.create_backup in client._transport._wrapped_methods + assert ( + client._transport.update_authorized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc + client._transport._wrapped_methods[ + client._transport.update_authorized_view + ] = mock_rpc request = {} - client.create_backup(request) + client.update_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -16017,21 +16058,19 @@ def test_create_backup_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.create_backup(request) + client.update_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_create_backup_rest_required_fields( - request_type=bigtable_table_admin.CreateBackupRequest, +def test_update_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -16039,32 +16078,27 @@ def test_create_backup_rest_required_fields( ) # verify fields with default values are dropped - assert "backupId" not in jsonified_request unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) + ).update_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == request_init["backup_id"] - - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).create_backup._get_unset_required_fields(jsonified_request) + ).update_authorized_view._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("backup_id",)) + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16085,7 +16119,7 @@ def test_create_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "patch", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -16099,38 +16133,31 @@ def test_create_backup_rest_required_fields( req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_backup(request) + response = client.update_authorized_view(request) - expected_params = [ - ( - "backupId", - "", - ), - ("$alt", "json;enum-encoding=int"), - ] + expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_create_backup_rest_unset_required_fields(): +def test_update_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.create_backup._get_unset_required_fields({}) + unset_fields = transport.update_authorized_view._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("backupId",)) - & set( + set( ( - "parent", - "backupId", - "backup", + "ignoreWarnings", + "updateMask", ) ) + & set(("authorizedView",)) ) -def test_create_backup_rest_flattened(): +def test_update_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -16143,14 +16170,15 @@ def test_create_backup_rest_flattened(): # get arguments that satisfy an http rule for this method sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } } # get truthy value for each flattened field mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) mock_args.update(sample_request) @@ -16162,20 +16190,20 @@ def test_create_backup_rest_flattened(): req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_backup(**mock_args) + client.update_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + "%s/v2/{authorized_view.name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_create_backup_rest_flattened_error(transport: str = "rest"): +def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -16184,15 +16212,14 @@ def test_create_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - backup=table.Backup(name="name_value"), + client.update_authorized_view( + bigtable_table_admin.UpdateAuthorizedViewRequest(), + authorized_view=table.AuthorizedView(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) -def test_get_backup_rest_use_cached_wrapped_rpc(): +def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -16206,30 +16233,35 @@ def test_get_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_backup in client._transport._wrapped_methods + assert ( + client._transport.delete_authorized_view + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc + client._transport._wrapped_methods[ + client._transport.delete_authorized_view + ] = mock_rpc request = {} - client.get_backup(request) + client.delete_authorized_view(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_backup(request) + client.delete_authorized_view(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_backup_rest_required_fields( - request_type=bigtable_table_admin.GetBackupRequest, +def test_delete_authorized_view_rest_required_fields( + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -16245,7 +16277,7 @@ def test_get_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) + ).delete_authorized_view._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -16254,7 +16286,9 @@ def test_get_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_backup._get_unset_required_fields(jsonified_request) + ).delete_authorized_view._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -16268,7 +16302,7 @@ def test_get_backup_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Backup() + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -16280,39 +16314,36 @@ def test_get_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "delete", "query_params": pb_request, } transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_backup(request) + response = client.delete_authorized_view(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_backup_rest_unset_required_fields(): +def test_delete_authorized_view_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name",))) + unset_fields = transport.delete_authorized_view._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) -def test_get_backup_rest_flattened(): +def test_delete_authorized_view_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -16321,11 +16352,11 @@ def test_get_backup_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Backup() + return_value = None # get arguments that satisfy an http rule for this method sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" } # get truthy value for each flattened field @@ -16337,27 +16368,25 @@ def test_get_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_backup(**mock_args) + client.delete_authorized_view(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + "%s/v2/{name=projects/*/instances/*/tables/*/authorizedViews/*}" % client.transport._host, args[1], ) -def test_get_backup_rest_flattened_error(transport: str = "rest"): +def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -16366,13 +16395,13 @@ def test_get_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), + client.delete_authorized_view( + bigtable_table_admin.DeleteAuthorizedViewRequest(), name="name_value", ) -def test_update_backup_rest_use_cached_wrapped_rpc(): +def test_modify_column_families_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -16386,34 +16415,40 @@ def test_update_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.update_backup in client._transport._wrapped_methods + assert ( + client._transport.modify_column_families + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc + client._transport._wrapped_methods[ + client._transport.modify_column_families + ] = mock_rpc request = {} - client.update_backup(request) + client.modify_column_families(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.update_backup(request) + client.modify_column_families(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_update_backup_rest_required_fields( - request_type=bigtable_table_admin.UpdateBackupRequest, +def test_modify_column_families_rest_required_fields( + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -16424,19 +16459,21 @@ def test_update_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) + ).modify_column_families._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present + jsonified_request["name"] = "name_value" + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).update_backup._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask",)) + ).modify_column_families._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16445,7 +16482,7 @@ def test_update_backup_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = table.Backup() + return_value = table.Table() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -16457,7 +16494,7 @@ def test_update_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "patch", + "method": "post", "query_params": pb_request, } transcode_result["body"] = pb_request @@ -16467,38 +16504,38 @@ def test_update_backup_rest_required_fields( response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) + return_value = table.Table.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_backup(request) + response = client.modify_column_families(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_update_backup_rest_unset_required_fields(): +def test_modify_column_families_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.update_backup._get_unset_required_fields({}) + unset_fields = transport.modify_column_families._get_unset_required_fields({}) assert set(unset_fields) == ( - set(("updateMask",)) + set(()) & set( ( - "backup", - "updateMask", + "name", + "modifications", ) ) ) -def test_update_backup_rest_flattened(): +def test_modify_column_families_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -16507,19 +16544,19 @@ def test_update_backup_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Backup() + return_value = table.Table() # get arguments that satisfy an http rule for this method - sample_request = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) mock_args.update(sample_request) @@ -16527,26 +16564,26 @@ def test_update_backup_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) + return_value = table.Table.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_backup(**mock_args) + client.modify_column_families(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" + "%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" % client.transport._host, args[1], ) -def test_update_backup_rest_flattened_error(transport: str = "rest"): +def test_modify_column_families_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -16555,14 +16592,18 @@ def test_update_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name="name_value"), - update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name="name_value", + modifications=[ + bigtable_table_admin.ModifyColumnFamiliesRequest.Modification( + id="id_value" + ) + ], ) -def test_delete_backup_rest_use_cached_wrapped_rpc(): +def test_drop_row_range_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -16576,30 +16617,30 @@ def test_delete_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.delete_backup in client._transport._wrapped_methods + assert client._transport.drop_row_range in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc + client._transport._wrapped_methods[client._transport.drop_row_range] = mock_rpc request = {} - client.delete_backup(request) + client.drop_row_range(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.delete_backup(request) + client.drop_row_range(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_delete_backup_rest_required_fields( - request_type=bigtable_table_admin.DeleteBackupRequest, +def test_drop_row_range_rest_required_fields( + request_type=bigtable_table_admin.DropRowRangeRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -16615,7 +16656,7 @@ def test_delete_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) + ).drop_row_range._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present @@ -16624,7 +16665,7 @@ def test_delete_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).delete_backup._get_unset_required_fields(jsonified_request) + ).drop_row_range._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone @@ -16650,9 +16691,10 @@ def test_delete_backup_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "delete", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() @@ -16663,81 +16705,23 @@ def test_delete_backup_rest_required_fields( req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_backup(request) + response = client.drop_row_range(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_delete_backup_rest_unset_required_fields(): +def test_drop_row_range_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.delete_backup._get_unset_required_fields({}) + unset_fields = transport.drop_row_range._get_unset_required_fields({}) assert set(unset_fields) == (set(()) & set(("name",))) -def test_delete_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - - # get truthy value for each flattened field - mock_args = dict( - name="name_value", - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = "" - response_value._content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - client.delete_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate( - "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" - % client.transport._host, - args[1], - ) - - -def test_delete_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name="name_value", - ) - - -def test_list_backups_rest_use_cached_wrapped_rpc(): +def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -16751,35 +16735,40 @@ def test_list_backups_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.list_backups in client._transport._wrapped_methods + assert ( + client._transport.generate_consistency_token + in client._transport._wrapped_methods + ) # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc + client._transport._wrapped_methods[ + client._transport.generate_consistency_token + ] = mock_rpc request = {} - client.list_backups(request) + client.generate_consistency_token(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.list_backups(request) + client.generate_consistency_token(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_list_backups_rest_required_fields( - request_type=bigtable_table_admin.ListBackupsRequest, +def test_generate_consistency_token_rest_required_fields( + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" + request_init["name"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -16790,30 +16779,21 @@ def test_list_backups_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) + ).generate_consistency_token._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).list_backups._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set( - ( - "filter", - "order_by", - "page_size", - "page_token", - ) - ) + ).generate_consistency_token._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -16822,7 +16802,7 @@ def test_list_backups_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -16834,49 +16814,42 @@ def test_list_backups_rest_required_fields( pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "get", + "method": "post", "query_params": pb_request, } + transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_backups(request) + response = client.generate_consistency_token(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_list_backups_rest_unset_required_fields(): +def test_generate_consistency_token_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.list_backups._get_unset_required_fields({}) - assert set(unset_fields) == ( - set( - ( - "filter", - "orderBy", - "pageSize", - "pageToken", - ) - ) - & set(("parent",)) - ) + unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_list_backups_rest_flattened(): +def test_generate_consistency_token_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -16885,16 +16858,14 @@ def test_list_backups_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - parent="parent_value", + name="name_value", ) mock_args.update(sample_request) @@ -16902,26 +16873,28 @@ def test_list_backups_rest_flattened(): response_value = Response() response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_backups(**mock_args) + client.generate_consistency_token(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + "%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" % client.transport._host, args[1], ) -def test_list_backups_rest_flattened_error(transport: str = "rest"): +def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -16930,78 +16903,13 @@ def test_list_backups_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent="parent_value", - ) - - -def test_list_backups_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token="abc", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token="def", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token="ghi", - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name="name_value", ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } - - pager = client.list_backups(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Backup) for i in results) - - pages = list(client.list_backups(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token -def test_restore_table_rest_use_cached_wrapped_rpc(): +def test_check_consistency_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17015,40 +16923,38 @@ def test_restore_table_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.restore_table in client._transport._wrapped_methods + assert client._transport.check_consistency in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc + client._transport._wrapped_methods[ + client._transport.check_consistency + ] = mock_rpc request = {} - client.restore_table(request) + client.check_consistency(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - # Operation methods build a cached wrapper on first rpc call - # subsequent calls should use the cached wrapper - wrapper_fn.reset_mock() - - client.restore_table(request) + client.check_consistency(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_restore_table_rest_required_fields( - request_type=bigtable_table_admin.RestoreTableRequest, +def test_check_consistency_rest_required_fields( + request_type=bigtable_table_admin.CheckConsistencyRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" + request_init["name"] = "" + request_init["consistency_token"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -17059,24 +16965,24 @@ def test_restore_table_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) + ).check_consistency._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["tableId"] = "table_id_value" + jsonified_request["name"] = "name_value" + jsonified_request["consistencyToken"] = "consistency_token_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).restore_table._get_unset_required_fields(jsonified_request) + ).check_consistency._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == "table_id_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "consistencyToken" in jsonified_request + assert jsonified_request["consistencyToken"] == "consistency_token_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -17085,7 +16991,7 @@ def test_restore_table_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.CheckConsistencyResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -17105,37 +17011,102 @@ def test_restore_table_rest_required_fields( response_value = Response() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.restore_table(request) + response = client.check_consistency(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_restore_table_rest_unset_required_fields(): +def test_check_consistency_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.restore_table._get_unset_required_fields({}) + unset_fields = transport.check_consistency._get_unset_required_fields({}) assert set(unset_fields) == ( set(()) & set( ( - "parent", - "tableId", + "name", + "consistencyToken", ) ) ) -def test_copy_backup_rest_use_cached_wrapped_rpc(): +def test_check_consistency_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + consistency_token="consistency_token_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.check_consistency(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" + % client.transport._host, + args[1], + ) + + +def test_check_consistency_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name="name_value", + consistency_token="consistency_token_value", + ) + + +def test_snapshot_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17149,17 +17120,17 @@ def test_copy_backup_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.copy_backup in client._transport._wrapped_methods + assert client._transport.snapshot_table in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc + client._transport._wrapped_methods[client._transport.snapshot_table] = mock_rpc request = {} - client.copy_backup(request) + client.snapshot_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -17168,22 +17139,22 @@ def test_copy_backup_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.copy_backup(request) + client.snapshot_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_copy_backup_rest_required_fields( - request_type=bigtable_table_admin.CopyBackupRequest, +def test_snapshot_table_rest_required_fields( + request_type=bigtable_table_admin.SnapshotTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request_init["source_backup"] = "" + request_init["name"] = "" + request_init["cluster"] = "" + request_init["snapshot_id"] = "" request = request_type(**request_init) pb_request = request_type.pb(request) jsonified_request = json.loads( @@ -17194,27 +17165,27 @@ def test_copy_backup_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) + ).snapshot_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["parent"] = "parent_value" - jsonified_request["backupId"] = "backup_id_value" - jsonified_request["sourceBackup"] = "source_backup_value" + jsonified_request["name"] = "name_value" + jsonified_request["cluster"] = "cluster_value" + jsonified_request["snapshotId"] = "snapshot_id_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).copy_backup._get_unset_required_fields(jsonified_request) + ).snapshot_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == "parent_value" - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == "backup_id_value" - assert "sourceBackup" in jsonified_request - assert jsonified_request["sourceBackup"] == "source_backup_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "cluster" in jsonified_request + assert jsonified_request["cluster"] == "cluster_value" + assert "snapshotId" in jsonified_request + assert jsonified_request["snapshotId"] == "snapshot_id_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -17249,33 +17220,32 @@ def test_copy_backup_rest_required_fields( req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.copy_backup(request) + response = client.snapshot_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_copy_backup_rest_unset_required_fields(): +def test_snapshot_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.copy_backup._get_unset_required_fields({}) + unset_fields = transport.snapshot_table._get_unset_required_fields({}) assert set(unset_fields) == ( set(()) & set( ( - "parent", - "backupId", - "sourceBackup", - "expireTime", + "name", + "cluster", + "snapshotId", ) ) ) -def test_copy_backup_rest_flattened(): +def test_snapshot_table_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -17287,16 +17257,14 @@ def test_copy_backup_rest_flattened(): return_value = operations_pb2.Operation(name="operations/spam") # get arguments that satisfy an http rule for this method - sample_request = { - "parent": "projects/sample1/instances/sample2/clusters/sample3" - } + sample_request = {"name": "projects/sample1/instances/sample2/tables/sample3"} # get truthy value for each flattened field mock_args = dict( - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) mock_args.update(sample_request) @@ -17308,20 +17276,20 @@ def test_copy_backup_rest_flattened(): req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.copy_backup(**mock_args) + client.snapshot_table(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" + "%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" % client.transport._host, args[1], ) -def test_copy_backup_rest_flattened_error(transport: str = "rest"): +def test_snapshot_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -17330,16 +17298,16 @@ def test_copy_backup_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent="parent_value", - backup_id="backup_id_value", - source_backup="source_backup_value", - expire_time=timestamp_pb2.Timestamp(seconds=751), + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + description="description_value", ) -def test_get_iam_policy_rest_use_cached_wrapped_rpc(): +def test_get_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17353,37 +17321,37 @@ def test_get_iam_policy_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.get_iam_policy in client._transport._wrapped_methods + assert client._transport.get_snapshot in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc + client._transport._wrapped_methods[client._transport.get_snapshot] = mock_rpc request = {} - client.get_iam_policy(request) + client.get_snapshot(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_iam_policy(request) + client.get_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_get_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.GetIamPolicyRequest, +def test_get_snapshot_rest_required_fields( + request_type=bigtable_table_admin.GetSnapshotRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["resource"] = "" + request_init["name"] = "" request = request_type(**request_init) - pb_request = request + pb_request = request_type.pb(request) jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -17392,21 +17360,21 @@ def test_get_iam_policy_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) + ).get_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["resource"] = "resource_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).get_iam_policy._get_unset_required_fields(jsonified_request) + ).get_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -17415,7 +17383,7 @@ def test_get_iam_policy_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + return_value = table.Snapshot() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -17424,41 +17392,42 @@ def test_get_iam_policy_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request + pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_iam_policy(request) + response = client.get_snapshot(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_get_iam_policy_rest_unset_required_fields(): +def test_get_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource",))) + unset_fields = transport.get_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_get_iam_policy_rest_flattened(): +def test_get_snapshot_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -17467,41 +17436,43 @@ def test_get_iam_policy_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + return_value = table.Snapshot() # get arguments that satisfy an http rule for this method sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" } # get truthy value for each flattened field mock_args = dict( - resource="resource_value", + name="name_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_iam_policy(**mock_args) + client.get_snapshot(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" + "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" % client.transport._host, args[1], ) -def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): +def test_get_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -17510,13 +17481,13 @@ def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource="resource_value", + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), + name="name_value", ) -def test_set_iam_policy_rest_use_cached_wrapped_rpc(): +def test_list_snapshots_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17530,37 +17501,37 @@ def test_set_iam_policy_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert client._transport.set_iam_policy in client._transport._wrapped_methods + assert client._transport.list_snapshots in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc + client._transport._wrapped_methods[client._transport.list_snapshots] = mock_rpc request = {} - client.set_iam_policy(request) + client.list_snapshots(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.set_iam_policy(request) + client.list_snapshots(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_set_iam_policy_rest_required_fields( - request_type=iam_policy_pb2.SetIamPolicyRequest, +def test_list_snapshots_rest_required_fields( + request_type=bigtable_table_admin.ListSnapshotsRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["resource"] = "" + request_init["parent"] = "" request = request_type(**request_init) - pb_request = request + pb_request = request_type.pb(request) jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -17569,21 +17540,28 @@ def test_set_iam_policy_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) + ).list_snapshots._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["resource"] = "resource_value" + jsonified_request["parent"] = "parent_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).set_iam_policy._get_unset_required_fields(jsonified_request) + ).list_snapshots._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -17592,7 +17570,7 @@ def test_set_iam_policy_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + return_value = bigtable_table_admin.ListSnapshotsResponse() # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -17601,49 +17579,50 @@ def test_set_iam_policy_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request + pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "get", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.set_iam_policy(request) + response = client.list_snapshots(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_set_iam_policy_rest_unset_required_fields(): +def test_list_snapshots_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + unset_fields = transport.list_snapshots._get_unset_required_fields({}) assert set(unset_fields) == ( - set(()) - & set( + set( ( - "resource", - "policy", + "pageSize", + "pageToken", ) ) + & set(("parent",)) ) -def test_set_iam_policy_rest_flattened(): +def test_list_snapshots_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -17652,41 +17631,43 @@ def test_set_iam_policy_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() + return_value = bigtable_table_admin.ListSnapshotsResponse() # get arguments that satisfy an http rule for this method sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" + "parent": "projects/sample1/instances/sample2/clusters/sample3" } # get truthy value for each flattened field mock_args = dict( - resource="resource_value", + parent="parent_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.set_iam_policy(**mock_args) + client.list_snapshots(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" + "%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" % client.transport._host, args[1], ) -def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): +def test_list_snapshots_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -17695,13 +17676,78 @@ def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource="resource_value", + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), + parent="parent_value", ) -def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): +def test_list_snapshots_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token="def", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + pager = client.list_snapshots(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) for i in results) + + pages = list(client.list_snapshots(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -17715,42 +17761,37 @@ def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.test_iam_permissions in client._transport._wrapped_methods - ) + assert client._transport.delete_snapshot in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() mock_rpc.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client._transport._wrapped_methods[ - client._transport.test_iam_permissions - ] = mock_rpc + client._transport._wrapped_methods[client._transport.delete_snapshot] = mock_rpc request = {} - client.test_iam_permissions(request) + client.delete_snapshot(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.test_iam_permissions(request) + client.delete_snapshot(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_test_iam_permissions_rest_required_fields( - request_type=iam_policy_pb2.TestIamPermissionsRequest, +def test_delete_snapshot_rest_required_fields( + request_type=bigtable_table_admin.DeleteSnapshotRequest, ): transport_class = transports.BigtableTableAdminRestTransport request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" + request_init["name"] = "" request = request_type(**request_init) - pb_request = request + pb_request = request_type.pb(request) jsonified_request = json.loads( json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) @@ -17759,24 +17800,21 @@ def test_test_iam_permissions_rest_required_fields( unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) + ).delete_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present - jsonified_request["resource"] = "resource_value" - jsonified_request["permissions"] = "permissions_value" + jsonified_request["name"] = "name_value" unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ).test_iam_permissions._get_unset_required_fields(jsonified_request) + ).delete_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == "resource_value" - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == "permissions_value" + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -17785,7 +17823,7 @@ def test_test_iam_permissions_rest_required_fields( request = request_type(**request_init) # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() + return_value = None # Mock the http request call within the method and fake a response. with mock.patch.object(Session, "request") as req: # We need to mock transcode() because providing default values @@ -17794,49 +17832,39 @@ def test_test_iam_permissions_rest_required_fields( with mock.patch.object(path_template, "transcode") as transcode: # A uri without fields and an empty body will force all the # request fields to show up in the query_params. - pb_request = request + pb_request = request_type.pb(request) transcode_result = { "uri": "v1/sample_method", - "method": "post", + "method": "delete", "query_params": pb_request, } - transcode_result["body"] = pb_request transcode.return_value = transcode_result response_value = Response() response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.test_iam_permissions(request) + response = client.delete_snapshot(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_test_iam_permissions_rest_unset_required_fields(): +def test_delete_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == ( - set(()) - & set( - ( - "resource", - "permissions", - ) - ) - ) + unset_fields = transport.delete_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -def test_test_iam_permissions_rest_flattened(): +def test_delete_snapshot_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", @@ -17845,42 +17873,41 @@ def test_test_iam_permissions_rest_flattened(): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() + return_value = None # get arguments that satisfy an http rule for this method sample_request = { - "resource": "projects/sample1/instances/sample2/tables/sample3" + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" } # get truthy value for each flattened field mock_args = dict( - resource="resource_value", - permissions=["permissions_value"], + name="name_value", ) mock_args.update(sample_request) # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.test_iam_permissions(**mock_args) + client.delete_snapshot(**mock_args) # Establish that the underlying call was made with the expected # request object values. assert len(req.mock_calls) == 1 _, args, _ = req.mock_calls[0] assert path_template.validate( - "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" + "%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" % client.transport._host, args[1], ) -def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): +def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -17889,1613 +17916,5694 @@ def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource="resource_value", - permissions=["permissions_value"], + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name="name_value", ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): +def test_create_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport=transport, + transport="rest", ) - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, - transport=transport, + # Ensure method has been cached + assert client._transport.create_backup in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) + client._transport._wrapped_methods[client._transport.create_backup] = mock_rpc - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) + request = {} + client.create_backup(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = BigtableTableAdminClient(transport=transport) - assert client.transport is transport + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + client.create_backup(request) -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel +def test_create_backup_rest_required_fields( + request_type=bigtable_table_admin.CreateBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - transports.BigtableTableAdminRestTransport, - ], -) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + # verify fields with default values are dropped + assert "backupId" not in jsonified_request -def test_transport_kind_grpc(): - transport = BigtableTableAdminClient.get_transport_class("grpc")( + unset_fields = transport_class( credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "grpc" + ).create_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + # verify required fields with default values are now present + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == request_init["backup_id"] -def test_initialize_client_w_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="grpc" - ) - assert client is not None + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("backup_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_table_empty_call_grpc(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - call.return_value = gba_table.Table() - client.create_table(request=None) + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableRequest() + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) - assert args[0] == request_msg + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_backup(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_table_from_snapshot_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + expected_params = [ + ( + "backupId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_table_from_snapshot(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() +def test_create_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) - assert args[0] == request_msg + unset_fields = transport.create_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("backupId",)) + & set( + ( + "parent", + "backupId", + "backup", + ) + ) + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_tables_empty_call_grpc(): +def test_create_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - call.return_value = bigtable_table_admin.ListTablesResponse() - client.list_tables(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListTablesRequest() - - assert args[0] == request_msg + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_table_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) + mock_args.update(sample_request) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - call.return_value = table.Table() - client.get_table(request=None) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetTableRequest() + client.create_backup(**mock_args) - assert args[0] == request_msg + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_table_empty_call_grpc(): +def test_create_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_table(request=None) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + backup=table.Backup(name="name_value"), + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateTableRequest() - assert args[0] == request_msg +def test_get_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_table_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Ensure method has been cached + assert client._transport.get_backup in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - call.return_value = None - client.delete_table(request=None) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.get_backup] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteTableRequest() + request = {} + client.get_backup(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.get_backup(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_undelete_table_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.undelete_table(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UndeleteTableRequest() +def test_get_backup_rest_required_fields( + request_type=bigtable_table_admin.GetBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport - assert args[0] == request_msg + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + # verify fields with default values are dropped -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_authorized_view_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_authorized_view(request=None) + # verify required fields with default values are now present - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + jsonified_request["name"] = "name_value" - assert args[0] == request_msg + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_authorized_views_empty_call_grpc(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - client.list_authorized_views(request=None) + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + response_value = Response() + response_value.status_code = 200 - assert args[0] == request_msg + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_backup(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_authorized_view_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - call.return_value = table.AuthorizedView() - client.get_authorized_view(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetAuthorizedViewRequest() +def test_get_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) - assert args[0] == request_msg + unset_fields = transport.get_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_authorized_view_empty_call_grpc(): +def test_get_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.update_authorized_view(request=None) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } - assert args[0] == request_msg + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_backup(**mock_args) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_authorized_view_empty_call_grpc(): + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_get_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - call.return_value = None - client.delete_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() - - assert args[0] == request_msg + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name="name_value", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_modify_column_families_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) +def test_update_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - call.return_value = table.Table() - client.modify_column_families(request=None) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + # Ensure method has been cached + assert client._transport.update_backup in client._transport._wrapped_methods - assert args[0] == request_msg + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.update_backup] = mock_rpc + request = {} + client.update_backup(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_drop_row_range_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - call.return_value = None - client.drop_row_range(request=None) + client.update_backup(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DropRowRangeRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - assert args[0] == request_msg +def test_update_backup_rest_required_fields( + request_type=bigtable_table_admin.UpdateBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_generate_consistency_token_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - client.generate_consistency_token(request=None) + # verify fields with default values are dropped - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - assert args[0] == request_msg + # verify required fields with default values are now present + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_check_consistency_empty_call_grpc(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - client.check_consistency(request=None) + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CheckConsistencyRequest() + response_value = Response() + response_value.status_code = 200 - assert args[0] == request_msg + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_snapshot_table_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + response = client.update_backup(request) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.snapshot_table(request=None) + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.SnapshotTableRequest() - assert args[0] == request_msg +def test_update_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + unset_fields = transport.update_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "backup", + "updateMask", + ) + ) + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_snapshot_empty_call_grpc(): + +def test_update_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - call.return_value = table.Snapshot() - client.get_snapshot(request=None) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSnapshotRequest() + # get arguments that satisfy an http rule for this method + sample_request = { + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } - assert args[0] == request_msg + # get truthy value for each flattened field + mock_args = dict( + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_backup(**mock_args) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_snapshots_empty_call_grpc(): + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) + + +def test_update_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - client.list_snapshots(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSnapshotsRequest() - - assert args[0] == request_msg + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_snapshot_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) +def test_delete_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - call.return_value = None - client.delete_snapshot(request=None) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSnapshotRequest() + # Ensure method has been cached + assert client._transport.delete_backup in client._transport._wrapped_methods - assert args[0] == request_msg + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.delete_backup] = mock_rpc + request = {} + client.delete_backup(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_create_backup_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.create_backup(request=None) + client.delete_backup(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateBackupRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - assert args[0] == request_msg +def test_delete_backup_rest_required_fields( + request_type=bigtable_table_admin.DeleteBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_backup_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - call.return_value = table.Backup() - client.get_backup(request=None) + # verify fields with default values are dropped - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetBackupRequest() + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - assert args[0] == request_msg + # verify required fields with default values are now present + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_update_backup_empty_call_grpc(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - call.return_value = table.Backup() - client.update_backup(request=None) + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateBackupRequest() + response_value = Response() + response_value.status_code = 200 + json_return_value = "" - assert args[0] == request_msg + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_backup(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_delete_backup_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - call.return_value = None - client.delete_backup(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteBackupRequest() +def test_delete_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) - assert args[0] == request_msg + unset_fields = transport.delete_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_list_backups_empty_call_grpc(): +def test_delete_backup_rest_flattened(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - call.return_value = bigtable_table_admin.ListBackupsResponse() - client.list_backups(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListBackupsRequest() - - assert args[0] == request_msg + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_restore_table_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.restore_table(request=None) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.RestoreTableRequest() + client.delete_backup(**mock_args) - assert args[0] == request_msg + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" + % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_copy_backup_empty_call_grpc(): +def test_delete_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - call.return_value = operations_pb2.Operation(name="operations/op") - client.copy_backup(request=None) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name="name_value", + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CopyBackupRequest() - assert args[0] == request_msg +def test_list_backups_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_get_iam_policy_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", - ) + # Ensure method has been cached + assert client._transport.list_backups in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request=None) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.list_backups] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() + request = {} + client.list_backups(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.list_backups(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_set_iam_policy_empty_call_grpc(): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_backups_rest_required_fields( + request_type=bigtable_table_admin.ListBackupsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request=None) + # verify fields with default values are dropped - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_backups._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - assert args[0] == request_msg + # verify required fields with default values are now present + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_backups._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -def test_test_iam_permissions_empty_call_grpc(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), - transport="grpc", + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result - assert args[0] == request_msg + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) -def test_transport_kind_grpc_asyncio(): - transport = BigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")( - credentials=async_anonymous_credentials() - ) - assert transport.kind == "grpc_asyncio" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_backups(request) -def test_initialize_client_w_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), transport="grpc_asyncio" - ) - assert client is not None + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_list_backups_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, + unset_fields = transport.list_backups._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", ) ) - await client.create_table(request=None) + & set(("parent",)) + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableRequest() - assert args[0] == request_msg +def test_list_backups_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_table_from_snapshot_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", ) - await client.create_table_from_snapshot(request=None) + mock_args.update(sample_request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - assert args[0] == request_msg + client.list_backups(**mock_args) + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" + % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_tables_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + +def test_list_backups_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_tables), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListTablesResponse( - next_page_token="next_page_token_value", - ) + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent="parent_value", ) - await client.list_tables(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListTablesRequest() - - assert args[0] == request_msg -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_list_backups_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token="def", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), ) - await client.get_table(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetTableRequest() + # Two responses for two calls + response = response + response - assert args[0] == request_msg + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + pager = client.list_backups(request=sample_request) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.update_table(request=None) + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) for i in results) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateTableRequest() + pages = list(client.list_backups(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token - assert args[0] == request_msg +def test_restore_table_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_table(request=None) + # Ensure method has been cached + assert client._transport.restore_table in client._transport._wrapped_methods - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteTableRequest() + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc - assert args[0] == request_msg + request = {} + client.restore_table(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_undelete_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.undelete_table(request=None) + client.restore_table(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UndeleteTableRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - assert args[0] == request_msg +def test_restore_table_rest_required_fields( + request_type=bigtable_table_admin.RestoreTableRequest, +): + transport_class = transports.BigtableTableAdminRestTransport -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.create_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_authorized_view(request=None) + # verify fields with default values are dropped - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - assert args[0] == request_msg + # verify required fields with default values are now present + jsonified_request["parent"] = "parent_value" + jsonified_request["tableId"] = "table_id_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_authorized_views_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == "table_id_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.list_authorized_views), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_authorized_views(request=None) + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) - assert args[0] == request_msg + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.restore_table(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_restore_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.get_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.AuthorizedView( - name="name_value", - etag="etag_value", - deletion_protection=True, + unset_fields = transport.restore_table._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tableId", ) ) - await client.get_authorized_view(request=None) + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetAuthorizedViewRequest() - assert args[0] == request_msg +def test_copy_backup_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Ensure method has been cached + assert client._transport.copy_backup in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.update_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - await client.update_authorized_view(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + client._transport._wrapped_methods[client._transport.copy_backup] = mock_rpc - assert args[0] == request_msg + request = {} + client.copy_backup(request) + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.delete_authorized_view), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_authorized_view(request=None) + client.copy_backup(request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - assert args[0] == request_msg +def test_copy_backup_rest_required_fields( + request_type=bigtable_table_admin.CopyBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_modify_column_families_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request_init["source_backup"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Table( - name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - ) - await client.modify_column_families(request=None) + # verify fields with default values are dropped - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - assert args[0] == request_msg + # verify required fields with default values are now present + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + jsonified_request["sourceBackup"] = "source_backup_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_drop_row_range_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + assert "sourceBackup" in jsonified_request + assert jsonified_request["sourceBackup"] == "source_backup_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.drop_row_range(request=None) + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DropRowRangeRequest() + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) - assert args[0] == request_msg + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.copy_backup(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_generate_consistency_token_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_copy_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", + unset_fields = transport.copy_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "backupId", + "sourceBackup", + "expireTime", ) ) - await client.generate_consistency_token(request=None) + ) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() - assert args[0] == request_msg +def test_copy_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_check_consistency_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } - # Mock the actual call, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), ) - await client.check_consistency(request=None) + mock_args.update(sample_request) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CheckConsistencyRequest() + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - assert args[0] == request_msg + client.copy_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" + % client.transport._host, + args[1], + ) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_snapshot_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_copy_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), ) - await client.snapshot_table(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.SnapshotTableRequest() - assert args[0] == request_msg +def test_get_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_snapshot_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Ensure method has been cached + assert client._transport.get_iam_policy in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Snapshot( - name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", - ) + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - await client.get_snapshot(request=None) + client._transport._wrapped_methods[client._transport.get_iam_policy] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetSnapshotRequest() + request = {} + client.get_iam_policy(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.get_iam_policy(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_snapshots_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) - ) - await client.list_snapshots(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListSnapshotsRequest() +def test_get_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.GetIamPolicyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport - assert args[0] == request_msg + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + # verify fields with default values are dropped -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_snapshot_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_snapshot(request=None) + # verify required fields with default values are now present - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteSnapshotRequest() + jsonified_request["resource"] = "resource_value" - assert args[0] == request_msg + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_create_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) + request = request_type(**request_init) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.create_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") - ) - await client.create_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CreateBackupRequest() - - assert args[0] == request_msg + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + response_value = Response() + response_value.status_code = 200 -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + json_return_value = json_format.MessageToJson(return_value) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - await client.get_backup(request=None) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.GetBackupRequest() + response = client.get_iam_policy(request) - assert args[0] == request_msg + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_update_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.update_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) - ) - await client.update_backup(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.UpdateBackupRequest() - - assert args[0] == request_msg + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource",))) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_delete_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_get_iam_policy_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_backup(request=None) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.DeleteBackupRequest() + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } - assert args[0] == request_msg + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_list_backups_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + client.get_iam_policy(**mock_args) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.list_backups), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", - ) + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" + % client.transport._host, + args[1], ) - await client.list_backups(request=None) - - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.ListBackupsRequest() - - assert args[0] == request_msg -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_restore_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", +def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, ) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource="resource_value", ) - await client.restore_table(request=None) - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.RestoreTableRequest() - assert args[0] == request_msg +def test_set_iam_policy_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_copy_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Ensure method has been cached + assert client._transport.set_iam_policy in client._transport._wrapped_methods - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name="operations/spam") + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. ) - await client.copy_backup(request=None) + client._transport._wrapped_methods[client._transport.set_iam_policy] = mock_rpc - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = bigtable_table_admin.CopyBackupRequest() + request = {} + client.set_iam_policy(request) - assert args[0] == request_msg + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + client.set_iam_policy(request) -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_get_iam_policy_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", - ) + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", + +def test_set_iam_policy_rest_required_fields( + request_type=iam_policy_pb2.SetIamPolicyRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.set_iam_policy(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "policy", ) ) - await client.get_iam_policy(request=None) + ) + + +def test_set_iam_policy_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" + % client.transport._host, + args[1], + ) + + +def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource="resource_value", + ) + + +def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.test_iam_permissions in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.test_iam_permissions + ] = mock_rpc + + request = {} + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.test_iam_permissions(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_test_iam_permissions_rest_required_fields( + request_type=iam_policy_pb2.TestIamPermissionsRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = "resource_value" + jsonified_request["permissions"] = "permissions_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == "resource_value" + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == "permissions_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.test_iam_permissions(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "resource", + "permissions", + ) + ) + ) + + +def test_test_iam_permissions_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "resource": "projects/sample1/instances/sample2/tables/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + resource="resource_value", + permissions=["permissions_value"], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" + % client.transport._host, + args[1], + ) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource="resource_value", + permissions=["permissions_value"], + ) + + +def test_create_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.create_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.create_schema_bundle + ] = mock_rpc + + request = {} + client.create_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.create_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_create_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.CreateSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["schema_bundle_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "schemaBundleId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "schemaBundleId" in jsonified_request + assert jsonified_request["schemaBundleId"] == request_init["schema_bundle_id"] + + jsonified_request["parent"] = "parent_value" + jsonified_request["schemaBundleId"] = "schema_bundle_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_schema_bundle._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("schema_bundle_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "schemaBundleId" in jsonified_request + assert jsonified_request["schemaBundleId"] == "schema_bundle_id_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.create_schema_bundle(request) + + expected_params = [ + ( + "schemaBundleId", + "", + ), + ("$alt", "json;enum-encoding=int"), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("schemaBundleId",)) + & set( + ( + "parent", + "schemaBundleId", + "schemaBundle", + ) + ) + ) + + +def test_create_schema_bundle_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.create_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles" + % client.transport._host, + args[1], + ) + + +def test_create_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_schema_bundle( + bigtable_table_admin.CreateSchemaBundleRequest(), + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=table.SchemaBundle(name="name_value"), + ) + + +def test_update_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.update_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.update_schema_bundle + ] = mock_rpc + + request = {} + client.update_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + # Operation methods build a cached wrapper on first rpc call + # subsequent calls should use the cached wrapper + wrapper_fn.reset_mock() + + client.update_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_update_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.UpdateSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_schema_bundle._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "ignore_warnings", + "update_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.update_schema_bundle(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "ignoreWarnings", + "updateMask", + ) + ) + & set(("schemaBundle",)) + ) + + +def test_update_schema_bundle_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "schema_bundle": { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + } + + # get truthy value for each flattened field + mock_args = dict( + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.update_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{schema_bundle.name=projects/*/instances/*/tables/*/schemaBundles/*}" + % client.transport._host, + args[1], + ) + + +def test_update_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_schema_bundle( + bigtable_table_admin.UpdateSchemaBundleRequest(), + schema_bundle=table.SchemaBundle(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_get_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.get_schema_bundle in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_schema_bundle + ] = mock_rpc + + request = {} + client.get_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_get_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.GetSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.SchemaBundle() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.SchemaBundle.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.get_schema_bundle(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +def test_get_schema_bundle_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.SchemaBundle() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.SchemaBundle.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.get_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}" + % client.transport._host, + args[1], + ) + + +def test_get_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_schema_bundle( + bigtable_table_admin.GetSchemaBundleRequest(), + name="name_value", + ) + + +def test_list_schema_bundles_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.list_schema_bundles in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.list_schema_bundles + ] = mock_rpc + + request = {} + client.list_schema_bundles(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.list_schema_bundles(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_list_schema_bundles_rest_required_fields( + request_type=bigtable_table_admin.ListSchemaBundlesRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_schema_bundles._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_schema_bundles._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSchemaBundlesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.list_schema_bundles(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_schema_bundles_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_schema_bundles._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +def test_list_schema_bundles_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSchemaBundlesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.list_schema_bundles(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/tables/*}/schemaBundles" + % client.transport._host, + args[1], + ) + + +def test_list_schema_bundles_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_schema_bundles( + bigtable_table_admin.ListSchemaBundlesRequest(), + parent="parent_value", + ) + + +def test_list_schema_bundles_rest_pager(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + table.SchemaBundle(), + ], + next_page_token="abc", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[], + next_page_token="def", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + ], + next_page_token="ghi", + ), + bigtable_table_admin.ListSchemaBundlesResponse( + schema_bundles=[ + table.SchemaBundle(), + table.SchemaBundle(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + bigtable_table_admin.ListSchemaBundlesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + + pager = client.list_schema_bundles(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.SchemaBundle) for i in results) + + pages = list(client.list_schema_bundles(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_delete_schema_bundle_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.delete_schema_bundle in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.delete_schema_bundle + ] = mock_rpc + + request = {} + client.delete_schema_bundle(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.delete_schema_bundle(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_delete_schema_bundle_rest_required_fields( + request_type=bigtable_table_admin.DeleteSchemaBundleRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_schema_bundle._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_schema_bundle._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("etag",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.delete_schema_bundle(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_schema_bundle_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_schema_bundle._get_unset_required_fields({}) + assert set(unset_fields) == (set(("etag",)) & set(("name",))) + + +def test_delete_schema_bundle_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.delete_schema_bundle(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{name=projects/*/instances/*/tables/*/schemaBundles/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_schema_bundle_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_schema_bundle( + bigtable_table_admin.DeleteSchemaBundleRequest(), + name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableTableAdminClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + transports.BigtableTableAdminRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_transport_kind_grpc(): + transport = BigtableTableAdminClient.get_transport_class("grpc")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "grpc" + + +def test_initialize_client_w_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="grpc" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + call.return_value = gba_table.Table() + client.create_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_table_from_snapshot_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_table_from_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_tables_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + call.return_value = bigtable_table_admin.ListTablesResponse() + client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + call.return_value = table.Table() + client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + call.return_value = None + client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_undelete_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_authorized_views_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + call.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() + client.list_authorized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + call.return_value = table.AuthorizedView() + client.get_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_authorized_view_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + call.return_value = None + client.delete_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_modify_column_families_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + call.return_value = table.Table() + client.modify_column_families(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_drop_row_range_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + call.return_value = None + client.drop_row_range(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_generate_consistency_token_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + client.generate_consistency_token(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_check_consistency_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + client.check_consistency(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_snapshot_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.snapshot_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_snapshot_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + call.return_value = table.Snapshot() + client.get_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_snapshots_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + client.list_snapshots(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_snapshot_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + call.return_value = None + client.delete_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + call.return_value = table.Backup() + client.get_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + call.return_value = table.Backup() + client.update_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + call.return_value = None + client.delete_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_backups_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + client.list_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_restore_table_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.restore_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_copy_backup_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_iam_policy_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_set_iam_policy_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_test_iam_permissions_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_schema_bundle_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_schema_bundle_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_schema_bundle_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + call.return_value = table.SchemaBundle() + client.get_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_schema_bundles_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + call.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + client.list_schema_bundles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSchemaBundlesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_schema_bundle_empty_call_grpc(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + call.return_value = None + client.delete_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() + + assert args[0] == request_msg + + +def test_transport_kind_grpc_asyncio(): + transport = BigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")( + credentials=async_anonymous_credentials() + ) + assert transport.kind == "grpc_asyncio" + + +def test_initialize_client_w_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), transport="grpc_asyncio" + ) + assert client is not None + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.create_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_table_from_snapshot_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_table_from_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateTableFromSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_tables_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_tables), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_tables(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListTablesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.get_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_undelete_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.undelete_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.undelete_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UndeleteTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_authorized_views_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_authorized_views), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListAuthorizedViewsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_authorized_views(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListAuthorizedViewsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.AuthorizedView( + name="name_value", + etag="etag_value", + deletion_protection=True, + ) + ) + await client.get_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_authorized_view_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_authorized_view), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_authorized_view(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteAuthorizedViewRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_modify_column_families_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + ) + await client.modify_column_families(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ModifyColumnFamiliesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_drop_row_range_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.drop_row_range), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.drop_row_range(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DropRowRangeRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_generate_consistency_token_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", + ) + ) + await client.generate_consistency_token(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GenerateConsistencyTokenRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_check_consistency_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + ) + await client.check_consistency(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CheckConsistencyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_snapshot_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.snapshot_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.snapshot_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.SnapshotTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_snapshot_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Snapshot( + name="name_value", + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", + ) + ) + await client.get_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_snapshots_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_snapshots), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_snapshots(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSnapshotsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_snapshot_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_snapshot), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_snapshot(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSnapshotRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.create_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + ) + await client.get_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.update_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) + ) + await client.update_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.delete_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_backups_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.list_backups), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_backups(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListBackupsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_restore_table_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.restore_table), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.restore_table(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.RestoreTableRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_copy_backup_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.copy_backup(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CopyBackupRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_iam_policy_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.get_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.GetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_set_iam_policy_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + await client.set_iam_policy(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.SetIamPolicyRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_test_iam_permissions_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + await client.test_iam_permissions(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = iam_policy_pb2.TestIamPermissionsRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_create_schema_bundle_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.create_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_update_schema_bundle_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + await client.update_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_get_schema_bundle_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + table.SchemaBundle( + name="name_value", + etag="etag_value", + ) + ) + await client.get_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_list_schema_bundles_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", + ) + ) + await client.list_schema_bundles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSchemaBundlesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_delete_schema_bundle_empty_call_grpc_asyncio(): + client = BigtableTableAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() + + assert args[0] == request_msg + + +def test_transport_kind_rest(): + transport = BigtableTableAdminClient.get_transport_class("rest")( + credentials=ga_credentials.AnonymousCredentials() + ) + assert transport.kind == "rest" + + +def test_create_table_rest_bad_request( + request_type=bigtable_table_admin.CreateTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableRequest, + dict, + ], +) +def test_create_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table( + name="name_value", + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == "name_value" + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CreateTableRequest.pb( + bigtable_table_admin.CreateTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = gba_table.Table.to_json(gba_table.Table()) + req.return_value.content = return_value + + request = bigtable_table_admin.CreateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gba_table.Table() + post_with_metadata.return_value = gba_table.Table(), metadata + + client.create_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_create_table_from_snapshot_rest_bad_request( + request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.create_table_from_snapshot(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CreateTableFromSnapshotRequest, + dict, + ], +) +def test_create_table_from_snapshot_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.create_table_from_snapshot(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_from_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_create_table_from_snapshot_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( + bigtable_table_admin.CreateTableFromSnapshotRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value + + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.create_table_from_snapshot( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_list_tables_rest_bad_request( + request_type=bigtable_table_admin.ListTablesRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.list_tables(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.ListTablesRequest, + dict, + ], +) +def test_list_tables_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.list_tables(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tables_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_tables_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListTablesRequest.pb( + bigtable_table_admin.ListTablesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListTablesResponse.to_json( + bigtable_table_admin.ListTablesResponse() + ) + req.return_value.content = return_value + + request = bigtable_table_admin.ListTablesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListTablesResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListTablesResponse(), + metadata, + ) + + client.list_tables( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.get_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.GetTableRequest, + dict, + ], +) +def test_get_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.get_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_get_table_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_get_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.GetTableRequest.pb( + bigtable_table_admin.GetTableRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = table.Table.to_json(table.Table()) + req.return_value.content = return_value + + request = bigtable_table_admin.GetTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + post_with_metadata.return_value = table.Table(), metadata + + client.get_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_update_table_rest_bad_request( + request_type=bigtable_table_admin.UpdateTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.update_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.UpdateTableRequest, + dict, + ], +) +def test_update_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = { + "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + } + request_init["table"] = { + "name": "projects/sample1/instances/sample2/tables/sample3", + "cluster_states": {}, + "column_families": {}, + "granularity": 1, + "restore_info": { + "source_type": 1, + "backup_info": { + "backup": "backup_value", + "start_time": {"seconds": 751, "nanos": 543}, + "end_time": {}, + "source_table": "source_table_value", + "source_backup": "source_backup_value", + }, + }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, + "deletion_protection": True, + "automated_backup_policy": {"retention_period": {}, "frequency": {}}, + "row_key_schema": { + "fields": [ + { + "field_name": "field_name_value", + "type_": { + "bytes_type": {"encoding": {"raw": {}}}, + "string_type": {"encoding": {"utf8_raw": {}, "utf8_bytes": {}}}, + "int64_type": { + "encoding": { + "big_endian_bytes": {"bytes_type": {}}, + "ordered_code_bytes": {}, + } + }, + "float32_type": {}, + "float64_type": {}, + "bool_type": {}, + "timestamp_type": {"encoding": {"unix_micros_int64": {}}}, + "date_type": {}, + "aggregate_type": { + "input_type": {}, + "state_type": {}, + "sum": {}, + "hllpp_unique_count": {}, + "max_": {}, + "min_": {}, + }, + "struct_type": {}, + "array_type": {"element_type": {}}, + "map_type": {"key_type": {}, "value_type": {}}, + "proto_type": { + "schema_bundle_id": "schema_bundle_id_value", + "message_name": "message_name_value", + }, + "enum_type": { + "schema_bundle_id": "schema_bundle_id_value", + "enum_name": "enum_name_value", + }, + }, + } + ], + "encoding": { + "singleton": {}, + "delimited_bytes": {"delimiter": b"delimiter_blob"}, + "ordered_code_bytes": {}, + }, + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.GetIamPolicyRequest() + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] - assert args[0] == request_msg + subfields_not_in_runtime = [] + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_set_iam_policy_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table"][field])): + del request_init["table"][field][i][subfield] + else: + del request_init["table"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.update_table(request) + + # Establish that the response is the type that we expect. + json_return_value = json_format.MessageToJson(return_value) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), ) + client = BigtableTableAdminClient(transport=transport) - # Mock the actual call, and fake the request. - with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - policy_pb2.Policy( - version=774, - etag=b"etag_blob", - ) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_update_table_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_update_table" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.UpdateTableRequest.pb( + bigtable_table_admin.UpdateTableRequest() ) - await client.set_iam_policy(request=None) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.SetIamPolicyRequest() + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - assert args[0] == request_msg + request = bigtable_table_admin.UpdateTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata + + client.update_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -# This test is a coverage failsafe to make sure that totally empty calls, -# i.e. request == None and no flattened fields passed, work. -@pytest.mark.asyncio -async def test_test_iam_permissions_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( - credentials=async_anonymous_credentials(), - transport="grpc_asyncio", + +def test_delete_table_rest_bad_request( + request_type=bigtable_table_admin.DeleteTableRequest, +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.delete_table(request) + + +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.DeleteTableRequest, + dict, + ], +) +def test_delete_table_rest_call_success(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + json_return_value = "" + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.delete_table(request) + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) - # Mock the actual call, and fake the request. with mock.patch.object( - type(client.transport.test_iam_permissions), "__call__" - ) as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_delete_table" + ) as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteTableRequest.pb( + bigtable_table_admin.DeleteTableRequest() ) - await client.test_iam_permissions(request=None) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } - # Establish that the underlying stub method was called. - call.assert_called() - _, args, _ = call.mock_calls[0] - request_msg = iam_policy_pb2.TestIamPermissionsRequest() + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - assert args[0] == request_msg + request = bigtable_table_admin.DeleteTableRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + client.delete_table( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) -def test_transport_kind_rest(): - transport = BigtableTableAdminClient.get_transport_class("rest")( - credentials=ga_credentials.AnonymousCredentials() - ) - assert transport.kind == "rest" + pre.assert_called_once() -def test_create_table_rest_bad_request( - request_type=bigtable_table_admin.CreateTableRequest, +def test_undelete_table_rest_bad_request( + request_type=bigtable_table_admin.UndeleteTableRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -19510,55 +23618,45 @@ def test_create_table_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_table(request) + client.undelete_table(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateTableRequest, + bigtable_table_admin.UndeleteTableRequest, dict, ], ) -def test_create_table_rest_call_success(request_type): +def test_undelete_table_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = gba_table.Table( - name="name_value", - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_table(request) + response = client.undelete_table(request) # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == "name_value" - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_rest_interceptors(null_interceptor): +def test_undelete_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -19572,17 +23670,20 @@ def test_create_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_undelete_table" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_undelete_table_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table" + transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateTableRequest.pb( - bigtable_table_admin.CreateTableRequest() + pb_message = bigtable_table_admin.UndeleteTableRequest.pb( + bigtable_table_admin.UndeleteTableRequest() ) transcode.return_value = { "method": "post", @@ -19594,19 +23695,19 @@ def test_create_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = gba_table.Table.to_json(gba_table.Table()) + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.CreateTableRequest() + request = bigtable_table_admin.UndeleteTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = gba_table.Table() - post_with_metadata.return_value = gba_table.Table(), metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_table( + client.undelete_table( request, metadata=[ ("key", "val"), @@ -19619,14 +23720,14 @@ def test_create_table_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_create_table_from_snapshot_rest_bad_request( - request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, +def test_create_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -19641,23 +23742,101 @@ def test_create_table_from_snapshot_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_table_from_snapshot(request) + client.create_authorized_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateTableFromSnapshotRequest, + bigtable_table_admin.CreateAuthorizedViewRequest, dict, ], ) -def test_create_table_from_snapshot_rest_call_success(request_type): +def test_create_authorized_view_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init["authorized_view"] = { + "name": "name_value", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, + }, + "etag": "etag_value", + "deletion_protection": True, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] + else: + del request_init["authorized_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -19672,14 +23851,14 @@ def test_create_table_from_snapshot_rest_call_success(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_table_from_snapshot(request) + response = client.create_authorized_view(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_from_snapshot_rest_interceptors(null_interceptor): +def test_create_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -19695,18 +23874,18 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" ) as post, mock.patch.object( transports.BigtableTableAdminRestInterceptor, - "post_create_table_from_snapshot_with_metadata", + "post_create_authorized_view_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot" + transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb( - bigtable_table_admin.CreateTableFromSnapshotRequest() + pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( + bigtable_table_admin.CreateAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -19721,7 +23900,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.CreateTableFromSnapshotRequest() + request = bigtable_table_admin.CreateAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -19730,7 +23909,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_table_from_snapshot( + client.create_authorized_view( request, metadata=[ ("key", "val"), @@ -19743,14 +23922,14 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_tables_rest_bad_request( - request_type=bigtable_table_admin.ListTablesRequest, +def test_list_authorized_views_rest_bad_request( + request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -19765,29 +23944,29 @@ def test_list_tables_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_tables(request) + client.list_authorized_views(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListTablesRequest, + bigtable_table_admin.ListAuthorizedViewsRequest, dict, ], ) -def test_list_tables_rest_call_success(request_type): +def test_list_authorized_views_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse( + return_value = bigtable_table_admin.ListAuthorizedViewsResponse( next_page_token="next_page_token_value", ) @@ -19796,20 +23975,20 @@ def test_list_tables_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_tables(request) + response = client.list_authorized_views(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) + assert isinstance(response, pagers.ListAuthorizedViewsPager) assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_tables_rest_interceptors(null_interceptor): +def test_list_authorized_views_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -19823,17 +24002,18 @@ def test_list_tables_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_tables" + transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_tables_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_list_authorized_views_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_tables" + transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListTablesRequest.pb( - bigtable_table_admin.ListTablesRequest() + pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( + bigtable_table_admin.ListAuthorizedViewsRequest() ) transcode.return_value = { "method": "post", @@ -19845,24 +24025,24 @@ def test_list_tables_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListTablesResponse.to_json( - bigtable_table_admin.ListTablesResponse() + return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + bigtable_table_admin.ListAuthorizedViewsResponse() ) req.return_value.content = return_value - request = bigtable_table_admin.ListTablesRequest() + request = bigtable_table_admin.ListAuthorizedViewsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListTablesResponse() + post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() post_with_metadata.return_value = ( - bigtable_table_admin.ListTablesResponse(), + bigtable_table_admin.ListAuthorizedViewsResponse(), metadata, ) - client.list_tables( + client.list_authorized_views( request, metadata=[ ("key", "val"), @@ -19875,12 +24055,16 @@ def test_list_tables_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest): +def test_get_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.GetAuthorizedViewRequest, +): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -19895,31 +24079,33 @@ def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRe response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_table(request) + client.get_authorized_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetTableRequest, + bigtable_table_admin.GetAuthorizedViewRequest, dict, ], ) -def test_get_table_rest_call_success(request_type): +def test_get_authorized_view_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Table( + return_value = table.AuthorizedView( name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, + etag="etag_value", deletion_protection=True, ) @@ -19928,22 +24114,22 @@ def test_get_table_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Table.pb(return_value) + return_value = table.AuthorizedView.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_table(request) + response = client.get_authorized_view(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) + assert isinstance(response, table.AuthorizedView) assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.etag == "etag_value" assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_table_rest_interceptors(null_interceptor): +def test_get_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -19957,17 +24143,18 @@ def test_get_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_table" + transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_table_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_get_authorized_view_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_table" + transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetTableRequest.pb( - bigtable_table_admin.GetTableRequest() + pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( + bigtable_table_admin.GetAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -19979,19 +24166,19 @@ def test_get_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Table.to_json(table.Table()) + return_value = table.AuthorizedView.to_json(table.AuthorizedView()) req.return_value.content = return_value - request = bigtable_table_admin.GetTableRequest() + request = bigtable_table_admin.GetAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Table() - post_with_metadata.return_value = table.Table(), metadata + post.return_value = table.AuthorizedView() + post_with_metadata.return_value = table.AuthorizedView(), metadata - client.get_table( + client.get_authorized_view( request, metadata=[ ("key", "val"), @@ -20004,15 +24191,17 @@ def test_get_table_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_update_table_rest_bad_request( - request_type=bigtable_table_admin.UpdateTableRequest, +def test_update_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } } request = request_type(**request_init) @@ -20028,88 +24217,44 @@ def test_update_table_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_table(request) + client.update_authorized_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UpdateTableRequest, + bigtable_table_admin.UpdateAuthorizedViewRequest, dict, ], ) -def test_update_table_rest_call_success(request_type): +def test_update_authorized_view_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} + "authorized_view": { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } } - request_init["table"] = { - "name": "projects/sample1/instances/sample2/tables/sample3", - "cluster_states": {}, - "column_families": {}, - "granularity": 1, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "start_time": {"seconds": 751, "nanos": 543}, - "end_time": {}, - "source_table": "source_table_value", - "source_backup": "source_backup_value", - }, + request_init["authorized_view"] = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "subset_view": { + "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], + "family_subsets": {}, }, - "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, + "etag": "etag_value", "deletion_protection": True, - "automated_backup_policy": {"retention_period": {}, "frequency": {}}, - "row_key_schema": { - "fields": [ - { - "field_name": "field_name_value", - "type_": { - "bytes_type": {"encoding": {"raw": {}}}, - "string_type": {"encoding": {"utf8_raw": {}, "utf8_bytes": {}}}, - "int64_type": { - "encoding": { - "big_endian_bytes": {"bytes_type": {}}, - "ordered_code_bytes": {}, - } - }, - "float32_type": {}, - "float64_type": {}, - "bool_type": {}, - "timestamp_type": {"encoding": {"unix_micros_int64": {}}}, - "date_type": {}, - "aggregate_type": { - "input_type": {}, - "state_type": {}, - "sum": {}, - "hllpp_unique_count": {}, - "max_": {}, - "min_": {}, - }, - "struct_type": {}, - "array_type": {"element_type": {}}, - "map_type": {"key_type": {}, "value_type": {}}, - }, - } - ], - "encoding": { - "singleton": {}, - "delimited_bytes": {"delimiter": b"delimiter_blob"}, - "ordered_code_bytes": {}, - }, - }, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency # See https://github.com/googleapis/gapic-generator-python/issues/1748 # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[ + "authorized_view" + ] def get_message_fields(field): # Given a field which is a message (composite type), return a list with @@ -20137,7 +24282,7 @@ def get_message_fields(field): # For each item in the sample request, create a list of sub fields which are not present at runtime # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["table"].items(): # pragma: NO COVER + for field, value in request_init["authorized_view"].items(): # pragma: NO COVER result = None is_repeated = False # For repeated fields @@ -20167,10 +24312,10 @@ def get_message_fields(field): subfield = subfield_to_delete.get("subfield") if subfield: if field_repeated: - for i in range(0, len(request_init["table"][field])): - del request_init["table"][field][i][subfield] + for i in range(0, len(request_init["authorized_view"][field])): + del request_init["authorized_view"][field][i][subfield] else: - del request_init["table"][field][subfield] + del request_init["authorized_view"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -20185,14 +24330,14 @@ def get_message_fields(field): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_table(request) + response = client.update_authorized_view(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_table_rest_interceptors(null_interceptor): +def test_update_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20208,17 +24353,18 @@ def test_update_table_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_table" + transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_table_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_update_authorized_view_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_table" + transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UpdateTableRequest.pb( - bigtable_table_admin.UpdateTableRequest() + pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( + bigtable_table_admin.UpdateAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -20233,7 +24379,7 @@ def test_update_table_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.UpdateTableRequest() + request = bigtable_table_admin.UpdateAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -20242,7 +24388,7 @@ def test_update_table_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.update_table( + client.update_authorized_view( request, metadata=[ ("key", "val"), @@ -20255,123 +24401,16 @@ def test_update_table_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_delete_table_rest_bad_request( - request_type=bigtable_table_admin.DeleteTableRequest, -): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, "request") as req, pytest.raises( - core_exceptions.BadRequest - ): - # Wrap the value into a proper Response obj - response_value = mock.Mock() - json_return_value = "" - response_value.json = mock.Mock(return_value={}) - response_value.status_code = 400 - response_value.request = mock.Mock() - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_table(request) - - -@pytest.mark.parametrize( - "request_type", - [ - bigtable_table_admin.DeleteTableRequest, - dict, - ], -) -def test_delete_table_rest_call_success(request_type): - client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" - ) - - # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), "request") as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = mock.Mock() - response_value.status_code = 200 - json_return_value = "" - response_value.content = json_return_value.encode("UTF-8") - req.return_value = response_value - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_table(request) - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), - interceptor=None - if null_interceptor - else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - - with mock.patch.object( - type(client.transport._session), "request" - ) as req, mock.patch.object( - path_template, "transcode" - ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_table" - ) as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteTableRequest.pb( - bigtable_table_admin.DeleteTableRequest() - ) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = mock.Mock() - req.return_value.status_code = 200 - req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - - request = bigtable_table_admin.DeleteTableRequest() - metadata = [ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_table( - request, - metadata=[ - ("key", "val"), - ("cephalopod", "squid"), - ], - ) - - pre.assert_called_once() - - -def test_undelete_table_rest_bad_request( - request_type=bigtable_table_admin.UndeleteTableRequest, +def test_delete_authorized_view_rest_bad_request( + request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -20386,45 +24425,47 @@ def test_undelete_table_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.undelete_table(request) + client.delete_authorized_view(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UndeleteTableRequest, + bigtable_table_admin.DeleteAuthorizedViewRequest, dict, ], ) -def test_undelete_table_rest_call_success(request_type): +def test_delete_authorized_view_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.undelete_table(request) + response = client.delete_authorized_view(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undelete_table_rest_interceptors(null_interceptor): +def test_delete_authorized_view_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20438,20 +24479,11 @@ def test_undelete_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_undelete_table" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_undelete_table_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_undelete_table" + transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view" ) as pre: pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UndeleteTableRequest.pb( - bigtable_table_admin.UndeleteTableRequest() + pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb( + bigtable_table_admin.DeleteAuthorizedViewRequest() ) transcode.return_value = { "method": "post", @@ -20463,19 +24495,15 @@ def test_undelete_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) - req.return_value.content = return_value - request = bigtable_table_admin.UndeleteTableRequest() + request = bigtable_table_admin.DeleteAuthorizedViewRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.undelete_table( + client.delete_authorized_view( request, metadata=[ ("key", "val"), @@ -20484,18 +24512,16 @@ def test_undelete_table_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() -def test_create_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.CreateAuthorizedViewRequest, +def test_modify_column_families_rest_bad_request( + request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -20510,123 +24536,55 @@ def test_create_authorized_view_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_authorized_view(request) + client.modify_column_families(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateAuthorizedViewRequest, + bigtable_table_admin.ModifyColumnFamiliesRequest, dict, ], ) -def test_create_authorized_view_rest_call_success(request_type): +def test_modify_column_families_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} - request_init["authorized_view"] = { - "name": "name_value", - "subset_view": { - "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], - "family_subsets": {}, - }, - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateAuthorizedViewRequest.meta.fields[ - "authorized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["authorized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["authorized_view"][field])): - del request_init["authorized_view"][field][i][subfield] - else: - del request_init["authorized_view"][field][subfield] + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = table.Table( + name="name_value", + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_authorized_view(request) + response = client.modify_column_families(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, table.Table) + assert response.name == "name_value" + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_authorized_view_rest_interceptors(null_interceptor): +def test_modify_column_families_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20640,20 +24598,18 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_authorized_view" + transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" ) as post, mock.patch.object( transports.BigtableTableAdminRestInterceptor, - "post_create_authorized_view_with_metadata", + "post_modify_column_families_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_authorized_view" + transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateAuthorizedViewRequest.pb( - bigtable_table_admin.CreateAuthorizedViewRequest() + pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( + bigtable_table_admin.ModifyColumnFamiliesRequest() ) transcode.return_value = { "method": "post", @@ -20665,19 +24621,19 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) + return_value = table.Table.to_json(table.Table()) req.return_value.content = return_value - request = bigtable_table_admin.CreateAuthorizedViewRequest() + request = bigtable_table_admin.ModifyColumnFamiliesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata + post.return_value = table.Table() + post_with_metadata.return_value = table.Table(), metadata - client.create_authorized_view( + client.modify_column_families( request, metadata=[ ("key", "val"), @@ -20690,14 +24646,14 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_authorized_views_rest_bad_request( - request_type=bigtable_table_admin.ListAuthorizedViewsRequest, +def test_drop_row_range_rest_bad_request( + request_type=bigtable_table_admin.DropRowRangeRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -20712,51 +24668,45 @@ def test_list_authorized_views_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_authorized_views(request) + client.drop_row_range(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListAuthorizedViewsRequest, + bigtable_table_admin.DropRowRangeRequest, dict, ], ) -def test_list_authorized_views_rest_call_success(request_type): +def test_drop_row_range_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListAuthorizedViewsResponse( - next_page_token="next_page_token_value", - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_authorized_views(request) + response = client.drop_row_range(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAuthorizedViewsPager) - assert response.next_page_token == "next_page_token_value" + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_authorized_views_rest_interceptors(null_interceptor): +def test_drop_row_range_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20770,18 +24720,11 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_authorized_views" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_list_authorized_views_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_authorized_views" + transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" ) as pre: pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListAuthorizedViewsRequest.pb( - bigtable_table_admin.ListAuthorizedViewsRequest() + pb_message = bigtable_table_admin.DropRowRangeRequest.pb( + bigtable_table_admin.DropRowRangeRequest() ) transcode.return_value = { "method": "post", @@ -20793,24 +24736,15 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json( - bigtable_table_admin.ListAuthorizedViewsResponse() - ) - req.return_value.content = return_value - request = bigtable_table_admin.ListAuthorizedViewsRequest() + request = bigtable_table_admin.DropRowRangeRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListAuthorizedViewsResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.ListAuthorizedViewsResponse(), - metadata, - ) - client.list_authorized_views( + client.drop_row_range( request, metadata=[ ("key", "val"), @@ -20819,20 +24753,16 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() -def test_get_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.GetAuthorizedViewRequest, +def test_generate_consistency_token_rest_bad_request( + request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -20847,34 +24777,30 @@ def test_get_authorized_view_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_authorized_view(request) + client.generate_consistency_token(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetAuthorizedViewRequest, + bigtable_table_admin.GenerateConsistencyTokenRequest, dict, ], ) -def test_get_authorized_view_rest_call_success(request_type): +def test_generate_consistency_token_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.AuthorizedView( - name="name_value", - etag="etag_value", - deletion_protection=True, + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token="consistency_token_value", ) # Wrap the value into a proper Response obj @@ -20882,22 +24808,22 @@ def test_get_authorized_view_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.AuthorizedView.pb(return_value) + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + return_value + ) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_authorized_view(request) + response = client.generate_consistency_token(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.AuthorizedView) - assert response.name == "name_value" - assert response.etag == "etag_value" - assert response.deletion_protection is True + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == "consistency_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_authorized_view_rest_interceptors(null_interceptor): +def test_generate_consistency_token_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -20911,18 +24837,18 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_authorized_view" + transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" ) as post, mock.patch.object( transports.BigtableTableAdminRestInterceptor, - "post_get_authorized_view_with_metadata", + "post_generate_consistency_token_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_authorized_view" + transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetAuthorizedViewRequest.pb( - bigtable_table_admin.GetAuthorizedViewRequest() + pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( + bigtable_table_admin.GenerateConsistencyTokenRequest() ) transcode.return_value = { "method": "post", @@ -20934,19 +24860,24 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.AuthorizedView.to_json(table.AuthorizedView()) + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( + bigtable_table_admin.GenerateConsistencyTokenResponse() + ) req.return_value.content = return_value - request = bigtable_table_admin.GetAuthorizedViewRequest() + request = bigtable_table_admin.GenerateConsistencyTokenRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.AuthorizedView() - post_with_metadata.return_value = table.AuthorizedView(), metadata + post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse(), + metadata, + ) - client.get_authorized_view( + client.generate_consistency_token( request, metadata=[ ("key", "val"), @@ -20959,18 +24890,14 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_update_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, +def test_check_consistency_rest_bad_request( + request_type=bigtable_table_admin.CheckConsistencyRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -20985,127 +24912,51 @@ def test_update_authorized_view_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_authorized_view(request) + client.check_consistency(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UpdateAuthorizedViewRequest, + bigtable_table_admin.CheckConsistencyRequest, dict, ], ) -def test_update_authorized_view_rest_call_success(request_type): +def test_check_consistency_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "authorized_view": { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - } - request_init["authorized_view"] = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", - "subset_view": { - "row_prefixes": [b"row_prefixes_blob1", b"row_prefixes_blob2"], - "family_subsets": {}, - }, - "etag": "etag_value", - "deletion_protection": True, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateAuthorizedViewRequest.meta.fields[ - "authorized_view" - ] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["authorized_view"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["authorized_view"][field])): - del request_init["authorized_view"][field][i][subfield] - else: - del request_init["authorized_view"][field][subfield] + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_authorized_view(request) + response = client.check_consistency(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_authorized_view_rest_interceptors(null_interceptor): +def test_check_consistency_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21119,20 +24970,18 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_authorized_view" + transports.BigtableTableAdminRestInterceptor, "post_check_consistency" ) as post, mock.patch.object( transports.BigtableTableAdminRestInterceptor, - "post_update_authorized_view_with_metadata", + "post_check_consistency_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_authorized_view" + transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UpdateAuthorizedViewRequest.pb( - bigtable_table_admin.UpdateAuthorizedViewRequest() + pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( + bigtable_table_admin.CheckConsistencyRequest() ) transcode.return_value = { "method": "post", @@ -21144,19 +24993,24 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) + return_value = bigtable_table_admin.CheckConsistencyResponse.to_json( + bigtable_table_admin.CheckConsistencyResponse() + ) req.return_value.content = return_value - request = bigtable_table_admin.UpdateAuthorizedViewRequest() + request = bigtable_table_admin.CheckConsistencyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata + post.return_value = bigtable_table_admin.CheckConsistencyResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.CheckConsistencyResponse(), + metadata, + ) - client.update_authorized_view( + client.check_consistency( request, metadata=[ ("key", "val"), @@ -21169,16 +25023,14 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_delete_authorized_view_rest_bad_request( - request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, +def test_snapshot_table_rest_bad_request( + request_type=bigtable_table_admin.SnapshotTableRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21193,47 +25045,45 @@ def test_delete_authorized_view_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_authorized_view(request) + client.snapshot_table(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteAuthorizedViewRequest, + bigtable_table_admin.SnapshotTableRequest, dict, ], ) -def test_delete_authorized_view_rest_call_success(request_type): +def test_snapshot_table_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } + request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_authorized_view(request) + response = client.snapshot_table(request) # Establish that the response is the type that we expect. - assert response is None + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_authorized_view_rest_interceptors(null_interceptor): +def test_snapshot_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21247,11 +25097,20 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_authorized_view" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_snapshot_table_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteAuthorizedViewRequest.pb( - bigtable_table_admin.DeleteAuthorizedViewRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.SnapshotTableRequest.pb( + bigtable_table_admin.SnapshotTableRequest() ) transcode.return_value = { "method": "post", @@ -21263,15 +25122,19 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(operations_pb2.Operation()) + req.return_value.content = return_value - request = bigtable_table_admin.DeleteAuthorizedViewRequest() + request = bigtable_table_admin.SnapshotTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.delete_authorized_view( + client.snapshot_table( request, metadata=[ ("key", "val"), @@ -21280,16 +25143,20 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_modify_column_families_rest_bad_request( - request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, +def test_get_snapshot_rest_bad_request( + request_type=bigtable_table_admin.GetSnapshotRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21304,32 +25171,35 @@ def test_modify_column_families_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.modify_column_families(request) + client.get_snapshot(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ModifyColumnFamiliesRequest, + bigtable_table_admin.GetSnapshotRequest, dict, ], ) -def test_modify_column_families_rest_call_success(request_type): +def test_get_snapshot_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Table( + return_value = table.Snapshot( name="name_value", - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description="description_value", ) # Wrap the value into a proper Response obj @@ -21337,22 +25207,23 @@ def test_modify_column_families_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Table.pb(return_value) + return_value = table.Snapshot.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.modify_column_families(request) + response = client.get_snapshot(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) + assert isinstance(response, table.Snapshot) assert response.name == "name_value" - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == "description_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_modify_column_families_rest_interceptors(null_interceptor): +def test_get_snapshot_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21366,18 +25237,17 @@ def test_modify_column_families_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_modify_column_families" + transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_modify_column_families_with_metadata", + transports.BigtableTableAdminRestInterceptor, "post_get_snapshot_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families" + transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb( - bigtable_table_admin.ModifyColumnFamiliesRequest() + pb_message = bigtable_table_admin.GetSnapshotRequest.pb( + bigtable_table_admin.GetSnapshotRequest() ) transcode.return_value = { "method": "post", @@ -21389,19 +25259,19 @@ def test_modify_column_families_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Table.to_json(table.Table()) + return_value = table.Snapshot.to_json(table.Snapshot()) req.return_value.content = return_value - request = bigtable_table_admin.ModifyColumnFamiliesRequest() + request = bigtable_table_admin.GetSnapshotRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Table() - post_with_metadata.return_value = table.Table(), metadata + post.return_value = table.Snapshot() + post_with_metadata.return_value = table.Snapshot(), metadata - client.modify_column_families( + client.get_snapshot( request, metadata=[ ("key", "val"), @@ -21414,14 +25284,14 @@ def test_modify_column_families_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_drop_row_range_rest_bad_request( - request_type=bigtable_table_admin.DropRowRangeRequest, +def test_list_snapshots_rest_bad_request( + request_type=bigtable_table_admin.ListSnapshotsRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21436,45 +25306,51 @@ def test_drop_row_range_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.drop_row_range(request) + client.list_snapshots(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DropRowRangeRequest, + bigtable_table_admin.ListSnapshotsRequest, dict, ], ) -def test_drop_row_range_rest_call_success(request_type): +def test_list_snapshots_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.drop_row_range(request) + response = client.list_snapshots(request) # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, pagers.ListSnapshotsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_drop_row_range_rest_interceptors(null_interceptor): +def test_list_snapshots_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21488,11 +25364,18 @@ def test_drop_row_range_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range" + transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_list_snapshots_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DropRowRangeRequest.pb( - bigtable_table_admin.DropRowRangeRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( + bigtable_table_admin.ListSnapshotsRequest() ) transcode.return_value = { "method": "post", @@ -21504,15 +25387,24 @@ def test_drop_row_range_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListSnapshotsResponse.to_json( + bigtable_table_admin.ListSnapshotsResponse() + ) + req.return_value.content = return_value - request = bigtable_table_admin.DropRowRangeRequest() + request = bigtable_table_admin.ListSnapshotsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListSnapshotsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListSnapshotsResponse(), + metadata, + ) - client.drop_row_range( + client.list_snapshots( request, metadata=[ ("key", "val"), @@ -21521,16 +25413,20 @@ def test_drop_row_range_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_generate_consistency_token_rest_bad_request( - request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, +def test_delete_snapshot_rest_bad_request( + request_type=bigtable_table_admin.DeleteSnapshotRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21545,53 +25441,47 @@ def test_generate_consistency_token_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.generate_consistency_token(request) + client.delete_snapshot(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GenerateConsistencyTokenRequest, + bigtable_table_admin.DeleteSnapshotRequest, dict, ], ) -def test_generate_consistency_token_rest_call_success(request_type): +def test_delete_snapshot_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token="consistency_token_value", - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.generate_consistency_token(request) + response = client.delete_snapshot(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == "consistency_token_value" + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_consistency_token_rest_interceptors(null_interceptor): +def test_delete_snapshot_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21605,18 +25495,11 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_generate_consistency_token_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token" + transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" ) as pre: pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb( - bigtable_table_admin.GenerateConsistencyTokenRequest() + pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( + bigtable_table_admin.DeleteSnapshotRequest() ) transcode.return_value = { "method": "post", @@ -21628,24 +25511,15 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( - bigtable_table_admin.GenerateConsistencyTokenResponse() - ) - req.return_value.content = return_value - request = bigtable_table_admin.GenerateConsistencyTokenRequest() + request = bigtable_table_admin.DeleteSnapshotRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.GenerateConsistencyTokenResponse(), - metadata, - ) - client.generate_consistency_token( + client.delete_snapshot( request, metadata=[ ("key", "val"), @@ -21654,18 +25528,16 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() -def test_check_consistency_rest_bad_request( - request_type=bigtable_table_admin.CheckConsistencyRequest, +def test_create_backup_rest_bad_request( + request_type=bigtable_table_admin.CreateBackupRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21680,51 +25552,138 @@ def test_check_consistency_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.check_consistency(request) + client.create_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CheckConsistencyRequest, + bigtable_table_admin.CreateBackupRequest, dict, ], ) -def test_check_consistency_rest_call_success(request_type): +def test_create_backup_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init["backup"] = { + "name": "name_value", + "source_table": "source_table_value", + "source_backup": "source_backup_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + "backup_type": 1, + "hot_to_standard_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.check_consistency(request) + response = client.create_backup(request) # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_consistency_rest_interceptors(null_interceptor): +def test_create_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21738,18 +25697,19 @@ def test_check_consistency_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_check_consistency" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_create_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_check_consistency_with_metadata", + transports.BigtableTableAdminRestInterceptor, "post_create_backup_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_check_consistency" + transports.BigtableTableAdminRestInterceptor, "pre_create_backup" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CheckConsistencyRequest.pb( - bigtable_table_admin.CheckConsistencyRequest() + pb_message = bigtable_table_admin.CreateBackupRequest.pb( + bigtable_table_admin.CreateBackupRequest() ) transcode.return_value = { "method": "post", @@ -21761,24 +25721,19 @@ def test_check_consistency_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.CheckConsistencyResponse.to_json( - bigtable_table_admin.CheckConsistencyResponse() - ) + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.CheckConsistencyRequest() + request = bigtable_table_admin.CreateBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.CheckConsistencyResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.CheckConsistencyResponse(), - metadata, - ) + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.check_consistency( + client.create_backup( request, metadata=[ ("key", "val"), @@ -21791,14 +25746,16 @@ def test_check_consistency_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_snapshot_table_rest_bad_request( - request_type=bigtable_table_admin.SnapshotTableRequest, +def test_get_backup_rest_bad_request( + request_type=bigtable_table_admin.GetBackupRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -21813,45 +25770,63 @@ def test_snapshot_table_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.snapshot_table(request) + client.get_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.SnapshotTableRequest, + bigtable_table_admin.GetBackupRequest, dict, ], ) -def test_snapshot_table_rest_call_success(request_type): +def test_get_backup_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"name": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name="operations/spam") + return_value = table.Backup( + name="name_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.snapshot_table(request) + response = client.get_backup(request) # Establish that the response is the type that we expect. - json_return_value = json_format.MessageToJson(return_value) + assert isinstance(response, table.Backup) + assert response.name == "name_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_snapshot_table_rest_interceptors(null_interceptor): +def test_get_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -21865,20 +25840,17 @@ def test_snapshot_table_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - operation.Operation, "_set_result_from_operation" - ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_snapshot_table" + transports.BigtableTableAdminRestInterceptor, "post_get_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_snapshot_table_with_metadata", + transports.BigtableTableAdminRestInterceptor, "post_get_backup_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table" + transports.BigtableTableAdminRestInterceptor, "pre_get_backup" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.SnapshotTableRequest.pb( - bigtable_table_admin.SnapshotTableRequest() + pb_message = bigtable_table_admin.GetBackupRequest.pb( + bigtable_table_admin.GetBackupRequest() ) transcode.return_value = { "method": "post", @@ -21890,19 +25862,19 @@ def test_snapshot_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(operations_pb2.Operation()) + return_value = table.Backup.to_json(table.Backup()) req.return_value.content = return_value - request = bigtable_table_admin.SnapshotTableRequest() + request = bigtable_table_admin.GetBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - post_with_metadata.return_value = operations_pb2.Operation(), metadata + post.return_value = table.Backup() + post_with_metadata.return_value = table.Backup(), metadata - client.snapshot_table( + client.get_backup( request, metadata=[ ("key", "val"), @@ -21915,15 +25887,17 @@ def test_snapshot_table_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_get_snapshot_rest_bad_request( - request_type=bigtable_table_admin.GetSnapshotRequest, +def test_update_backup_rest_bad_request( + request_type=bigtable_table_admin.UpdateBackupRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } } request = request_type(**request_init) @@ -21939,35 +25913,132 @@ def test_get_snapshot_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_snapshot(request) + client.update_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetSnapshotRequest, + bigtable_table_admin.UpdateBackupRequest, dict, ], ) -def test_get_snapshot_rest_call_success(request_type): +def test_update_backup_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" + "backup": { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } + } + request_init["backup"] = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", + "source_table": "source_table_value", + "source_backup": "source_backup_value", + "expire_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "size_bytes": 1089, + "state": 1, + "encryption_info": { + "encryption_type": 1, + "encryption_status": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "kms_key_version": "kms_key_version_value", + }, + "backup_type": 1, + "hot_to_standard_time": {}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Snapshot( + return_value = table.Backup( name="name_value", - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description="description_value", + source_table="source_table_value", + source_backup="source_backup_value", + size_bytes=1089, + state=table.Backup.State.CREATING, + backup_type=table.Backup.BackupType.STANDARD, ) # Wrap the value into a proper Response obj @@ -21975,23 +26046,25 @@ def test_get_snapshot_rest_call_success(request_type): response_value.status_code = 200 # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) + return_value = table.Backup.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_snapshot(request) + response = client.update_backup(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) + assert isinstance(response, table.Backup) assert response.name == "name_value" - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == "description_value" + assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + assert response.backup_type == table.Backup.BackupType.STANDARD @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_snapshot_rest_interceptors(null_interceptor): +def test_update_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22005,17 +26078,17 @@ def test_get_snapshot_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_update_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_snapshot_with_metadata" + transports.BigtableTableAdminRestInterceptor, "post_update_backup_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot" + transports.BigtableTableAdminRestInterceptor, "pre_update_backup" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetSnapshotRequest.pb( - bigtable_table_admin.GetSnapshotRequest() + pb_message = bigtable_table_admin.UpdateBackupRequest.pb( + bigtable_table_admin.UpdateBackupRequest() ) transcode.return_value = { "method": "post", @@ -22027,19 +26100,19 @@ def test_get_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Snapshot.to_json(table.Snapshot()) + return_value = table.Backup.to_json(table.Backup()) req.return_value.content = return_value - request = bigtable_table_admin.GetSnapshotRequest() + request = bigtable_table_admin.UpdateBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Snapshot() - post_with_metadata.return_value = table.Snapshot(), metadata + post.return_value = table.Backup() + post_with_metadata.return_value = table.Backup(), metadata - client.get_snapshot( + client.update_backup( request, metadata=[ ("key", "val"), @@ -22052,14 +26125,16 @@ def test_get_snapshot_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_list_snapshots_rest_bad_request( - request_type=bigtable_table_admin.ListSnapshotsRequest, +def test_delete_backup_rest_bad_request( + request_type=bigtable_table_admin.DeleteBackupRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22074,51 +26149,47 @@ def test_list_snapshots_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_snapshots(request) + client.delete_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListSnapshotsRequest, + bigtable_table_admin.DeleteBackupRequest, dict, ], ) -def test_list_snapshots_rest_call_success(request_type): +def test_delete_backup_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token="next_page_token_value", - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_snapshots(request) + response = client.delete_backup(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == "next_page_token_value" + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_snapshots_rest_interceptors(null_interceptor): +def test_delete_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22132,18 +26203,11 @@ def test_list_snapshots_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_snapshots" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_list_snapshots_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots" + transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" ) as pre: pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListSnapshotsRequest.pb( - bigtable_table_admin.ListSnapshotsRequest() + pb_message = bigtable_table_admin.DeleteBackupRequest.pb( + bigtable_table_admin.DeleteBackupRequest() ) transcode.return_value = { "method": "post", @@ -22155,24 +26219,15 @@ def test_list_snapshots_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListSnapshotsResponse.to_json( - bigtable_table_admin.ListSnapshotsResponse() - ) - req.return_value.content = return_value - request = bigtable_table_admin.ListSnapshotsRequest() + request = bigtable_table_admin.DeleteBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListSnapshotsResponse() - post_with_metadata.return_value = ( - bigtable_table_admin.ListSnapshotsResponse(), - metadata, - ) - client.list_snapshots( + client.delete_backup( request, metadata=[ ("key", "val"), @@ -22181,20 +26236,16 @@ def test_list_snapshots_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() -def test_delete_snapshot_rest_bad_request( - request_type=bigtable_table_admin.DeleteSnapshotRequest, +def test_list_backups_rest_bad_request( + request_type=bigtable_table_admin.ListBackupsRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22209,47 +26260,51 @@ def test_delete_snapshot_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_snapshot(request) + client.list_backups(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteSnapshotRequest, + bigtable_table_admin.ListBackupsRequest, dict, ], ) -def test_delete_snapshot_rest_call_success(request_type): +def test_list_backups_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4" - } + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token="next_page_token_value", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_snapshot(request) + response = client.list_backups(request) # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_snapshot_rest_interceptors(null_interceptor): +def test_list_backups_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22263,11 +26318,17 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot" + transports.BigtableTableAdminRestInterceptor, "post_list_backups" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_list_backups_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_list_backups" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb( - bigtable_table_admin.DeleteSnapshotRequest() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = bigtable_table_admin.ListBackupsRequest.pb( + bigtable_table_admin.ListBackupsRequest() ) transcode.return_value = { "method": "post", @@ -22279,15 +26340,24 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = bigtable_table_admin.ListBackupsResponse.to_json( + bigtable_table_admin.ListBackupsResponse() + ) + req.return_value.content = return_value - request = bigtable_table_admin.DeleteSnapshotRequest() + request = bigtable_table_admin.ListBackupsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListBackupsResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListBackupsResponse(), + metadata, + ) - client.delete_snapshot( + client.list_backups( request, metadata=[ ("key", "val"), @@ -22296,16 +26366,18 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_create_backup_rest_bad_request( - request_type=bigtable_table_admin.CreateBackupRequest, +def test_restore_table_rest_bad_request( + request_type=bigtable_table_admin.RestoreTableRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22320,116 +26392,23 @@ def test_create_backup_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.create_backup(request) + client.restore_table(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CreateBackupRequest, + bigtable_table_admin.RestoreTableRequest, dict, ], ) -def test_create_backup_rest_call_success(request_type): +def test_restore_table_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request_init["backup"] = { - "name": "name_value", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "backup_type": 1, - "hot_to_standard_time": {}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] + request_init = {"parent": "projects/sample1/instances/sample2"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -22444,14 +26423,14 @@ def get_message_fields(field): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.create_backup(request) + response = client.restore_table(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_backup_rest_interceptors(null_interceptor): +def test_restore_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22467,17 +26446,17 @@ def test_create_backup_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_backup" + transports.BigtableTableAdminRestInterceptor, "post_restore_table" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_create_backup_with_metadata" + transports.BigtableTableAdminRestInterceptor, "post_restore_table_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_create_backup" + transports.BigtableTableAdminRestInterceptor, "pre_restore_table" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CreateBackupRequest.pb( - bigtable_table_admin.CreateBackupRequest() + pb_message = bigtable_table_admin.RestoreTableRequest.pb( + bigtable_table_admin.RestoreTableRequest() ) transcode.return_value = { "method": "post", @@ -22492,7 +26471,7 @@ def test_create_backup_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.CreateBackupRequest() + request = bigtable_table_admin.RestoreTableRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -22501,7 +26480,7 @@ def test_create_backup_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.create_backup( + client.restore_table( request, metadata=[ ("key", "val"), @@ -22514,16 +26493,14 @@ def test_create_backup_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_get_backup_rest_bad_request( - request_type=bigtable_table_admin.GetBackupRequest, +def test_copy_backup_rest_bad_request( + request_type=bigtable_table_admin.CopyBackupRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22538,63 +26515,45 @@ def test_get_backup_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_backup(request) + client.copy_backup(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.GetBackupRequest, + bigtable_table_admin.CopyBackupRequest, dict, ], ) -def test_get_backup_rest_call_success(request_type): +def test_copy_backup_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, - ) + return_value = operations_pb2.Operation(name="operations/spam") # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_backup(request) + response = client.copy_backup(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD + json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_backup_rest_interceptors(null_interceptor): +def test_copy_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22608,17 +26567,19 @@ def test_get_backup_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_backup" + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_copy_backup" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_backup_with_metadata" + transports.BigtableTableAdminRestInterceptor, "post_copy_backup_with_metadata" ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_backup" + transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.GetBackupRequest.pb( - bigtable_table_admin.GetBackupRequest() + pb_message = bigtable_table_admin.CopyBackupRequest.pb( + bigtable_table_admin.CopyBackupRequest() ) transcode.return_value = { "method": "post", @@ -22630,19 +26591,19 @@ def test_get_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Backup.to_json(table.Backup()) + return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.GetBackupRequest() + request = bigtable_table_admin.CopyBackupRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Backup() - post_with_metadata.return_value = table.Backup(), metadata + post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.get_backup( + client.copy_backup( request, metadata=[ ("key", "val"), @@ -22655,18 +26616,14 @@ def test_get_backup_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_update_backup_rest_bad_request( - request_type=bigtable_table_admin.UpdateBackupRequest, +def test_get_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.GetIamPolicyRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22681,158 +26638,50 @@ def test_update_backup_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.update_backup(request) + client.get_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.UpdateBackupRequest, + iam_policy_pb2.GetIamPolicyRequest, dict, ], ) -def test_update_backup_rest_call_success(request_type): +def test_get_iam_policy_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "backup": { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } - } - request_init["backup"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - "backup_type": 1, - "hot_to_standard_time": {}, - } - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - { - "field": field, - "subfield": subfield, - "is_repeated": is_repeated, - } - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = table.Backup( - name="name_value", - source_table="source_table_value", - source_backup="source_backup_value", - size_bytes=1089, - state=table.Backup.State.CREATING, - backup_type=table.Backup.BackupType.STANDARD, + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.update_backup(request) + response = client.get_iam_policy(request) # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == "name_value" - assert response.source_table == "source_table_value" - assert response.source_backup == "source_backup_value" - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - assert response.backup_type == table.Backup.BackupType.STANDARD + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_backup_rest_interceptors(null_interceptor): +def test_get_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22846,18 +26695,17 @@ def test_update_backup_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_backup" + transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_update_backup_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_get_iam_policy_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_update_backup" + transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.UpdateBackupRequest.pb( - bigtable_table_admin.UpdateBackupRequest() - ) + pb_message = iam_policy_pb2.GetIamPolicyRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -22868,19 +26716,19 @@ def test_update_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = table.Backup.to_json(table.Backup()) + return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value - request = bigtable_table_admin.UpdateBackupRequest() + request = iam_policy_pb2.GetIamPolicyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = table.Backup() - post_with_metadata.return_value = table.Backup(), metadata + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata - client.update_backup( + client.get_iam_policy( request, metadata=[ ("key", "val"), @@ -22893,16 +26741,14 @@ def test_update_backup_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_delete_backup_rest_bad_request( - request_type=bigtable_table_admin.DeleteBackupRequest, +def test_set_iam_policy_rest_bad_request( + request_type=iam_policy_pb2.SetIamPolicyRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -22917,47 +26763,50 @@ def test_delete_backup_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.delete_backup(request) + client.set_iam_policy(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.DeleteBackupRequest, + iam_policy_pb2.SetIamPolicyRequest, dict, ], ) -def test_delete_backup_rest_call_success(request_type): +def test_set_iam_policy_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" - } + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = None + return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = "" + json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.delete_backup(request) + response = client.set_iam_policy(request) # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b"etag_blob" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_backup_rest_interceptors(null_interceptor): +def test_set_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -22971,12 +26820,17 @@ def test_delete_backup_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_delete_backup" + transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, + "post_set_iam_policy_with_metadata", + ) as post_with_metadata, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy" ) as pre: pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteBackupRequest.pb( - bigtable_table_admin.DeleteBackupRequest() - ) + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -22987,15 +26841,19 @@ def test_delete_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = json_format.MessageToJson(policy_pb2.Policy()) + req.return_value.content = return_value - request = bigtable_table_admin.DeleteBackupRequest() + request = iam_policy_pb2.SetIamPolicyRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata - client.delete_backup( + client.set_iam_policy( request, metadata=[ ("key", "val"), @@ -23004,16 +26862,18 @@ def test_delete_backup_rest_interceptors(null_interceptor): ) pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() -def test_list_backups_rest_bad_request( - request_type=bigtable_table_admin.ListBackupsRequest, +def test_test_iam_permissions_rest_bad_request( + request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -23028,51 +26888,48 @@ def test_list_backups_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.list_backups(request) + client.test_iam_permissions(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.ListBackupsRequest, + iam_policy_pb2.TestIamPermissionsRequest, dict, ], ) -def test_list_backups_rest_call_success(request_type): +def test_test_iam_permissions_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse( - next_page_token="next_page_token_value", + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.list_backups(request) + response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == "next_page_token_value" + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ["permissions_value"] @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_backups_rest_interceptors(null_interceptor): +def test_test_iam_permissions_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -23086,18 +26943,17 @@ def test_list_backups_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_backups" + transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_list_backups_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_test_iam_permissions_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_list_backups" + transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.ListBackupsRequest.pb( - bigtable_table_admin.ListBackupsRequest() - ) + pb_message = iam_policy_pb2.TestIamPermissionsRequest() transcode.return_value = { "method": "post", "uri": "my_uri", @@ -23108,24 +26964,24 @@ def test_list_backups_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = bigtable_table_admin.ListBackupsResponse.to_json( - bigtable_table_admin.ListBackupsResponse() + return_value = json_format.MessageToJson( + iam_policy_pb2.TestIamPermissionsResponse() ) req.return_value.content = return_value - request = bigtable_table_admin.ListBackupsRequest() + request = iam_policy_pb2.TestIamPermissionsRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListBackupsResponse() + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() post_with_metadata.return_value = ( - bigtable_table_admin.ListBackupsResponse(), + iam_policy_pb2.TestIamPermissionsResponse(), metadata, ) - client.list_backups( + client.test_iam_permissions( request, metadata=[ ("key", "val"), @@ -23138,14 +26994,14 @@ def test_list_backups_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_restore_table_rest_bad_request( - request_type=bigtable_table_admin.RestoreTableRequest, +def test_create_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.CreateSchemaBundleRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -23160,23 +27016,97 @@ def test_restore_table_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.restore_table(request) + client.create_schema_bundle(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.RestoreTableRequest, + bigtable_table_admin.CreateSchemaBundleRequest, dict, ], ) -def test_restore_table_rest_call_success(request_type): +def test_create_schema_bundle_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} + request_init["schema_bundle"] = { + "name": "name_value", + "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"}, + "etag": "etag_value", + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateSchemaBundleRequest.meta.fields[ + "schema_bundle" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["schema_bundle"][field])): + del request_init["schema_bundle"][field][i][subfield] + else: + del request_init["schema_bundle"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -23191,14 +27121,14 @@ def test_restore_table_rest_call_success(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.restore_table(request) + response = client.create_schema_bundle(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_restore_table_rest_interceptors(null_interceptor): +def test_create_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -23214,17 +27144,18 @@ def test_restore_table_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_restore_table" + transports.BigtableTableAdminRestInterceptor, "post_create_schema_bundle" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_restore_table_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_create_schema_bundle_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_restore_table" + transports.BigtableTableAdminRestInterceptor, "pre_create_schema_bundle" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.RestoreTableRequest.pb( - bigtable_table_admin.RestoreTableRequest() + pb_message = bigtable_table_admin.CreateSchemaBundleRequest.pb( + bigtable_table_admin.CreateSchemaBundleRequest() ) transcode.return_value = { "method": "post", @@ -23239,7 +27170,7 @@ def test_restore_table_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.RestoreTableRequest() + request = bigtable_table_admin.CreateSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -23248,7 +27179,7 @@ def test_restore_table_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.restore_table( + client.create_schema_bundle( request, metadata=[ ("key", "val"), @@ -23261,14 +27192,18 @@ def test_restore_table_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_copy_backup_rest_bad_request( - request_type=bigtable_table_admin.CopyBackupRequest, +def test_update_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.UpdateSchemaBundleRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "schema_bundle": { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -23283,23 +27218,101 @@ def test_copy_backup_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.copy_backup(request) + client.update_schema_bundle(request) @pytest.mark.parametrize( "request_type", [ - bigtable_table_admin.CopyBackupRequest, + bigtable_table_admin.UpdateSchemaBundleRequest, dict, ], ) -def test_copy_backup_rest_call_success(request_type): +def test_update_schema_bundle_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request_init = { + "schema_bundle": { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } + } + request_init["schema_bundle"] = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4", + "proto_schema": {"proto_descriptors": b"proto_descriptors_blob"}, + "etag": "etag_value", + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateSchemaBundleRequest.meta.fields[ + "schema_bundle" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["schema_bundle"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["schema_bundle"][field])): + del request_init["schema_bundle"][field][i][subfield] + else: + del request_init["schema_bundle"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -23314,14 +27327,14 @@ def test_copy_backup_rest_call_success(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.copy_backup(request) + response = client.update_schema_bundle(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_copy_backup_rest_interceptors(null_interceptor): +def test_update_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -23337,17 +27350,18 @@ def test_copy_backup_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( operation.Operation, "_set_result_from_operation" ), mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_copy_backup" + transports.BigtableTableAdminRestInterceptor, "post_update_schema_bundle" ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_copy_backup_with_metadata" + transports.BigtableTableAdminRestInterceptor, + "post_update_schema_bundle_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" + transports.BigtableTableAdminRestInterceptor, "pre_update_schema_bundle" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = bigtable_table_admin.CopyBackupRequest.pb( - bigtable_table_admin.CopyBackupRequest() + pb_message = bigtable_table_admin.UpdateSchemaBundleRequest.pb( + bigtable_table_admin.UpdateSchemaBundleRequest() ) transcode.return_value = { "method": "post", @@ -23362,7 +27376,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value - request = bigtable_table_admin.CopyBackupRequest() + request = bigtable_table_admin.UpdateSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), @@ -23371,7 +27385,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.copy_backup( + client.update_schema_bundle( request, metadata=[ ("key", "val"), @@ -23384,14 +27398,16 @@ def test_copy_backup_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_get_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.GetIamPolicyRequest, +def test_get_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.GetSchemaBundleRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -23406,50 +27422,55 @@ def test_get_iam_policy_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.get_iam_policy(request) + client.get_schema_bundle(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.GetIamPolicyRequest, + bigtable_table_admin.GetSchemaBundleRequest, dict, ], ) -def test_get_iam_policy_rest_call_success(request_type): +def test_get_schema_bundle_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + return_value = table.SchemaBundle( + name="name_value", + etag="etag_value", ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.SchemaBundle.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.get_iam_policy(request) + response = client.get_schema_bundle(request) # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, table.SchemaBundle) + assert response.name == "name_value" + assert response.etag == "etag_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): +def test_get_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -23463,17 +27484,19 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy" + transports.BigtableTableAdminRestInterceptor, "post_get_schema_bundle" ) as post, mock.patch.object( transports.BigtableTableAdminRestInterceptor, - "post_get_iam_policy_with_metadata", + "post_get_schema_bundle_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy" + transports.BigtableTableAdminRestInterceptor, "pre_get_schema_bundle" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() + pb_message = bigtable_table_admin.GetSchemaBundleRequest.pb( + bigtable_table_admin.GetSchemaBundleRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -23484,19 +27507,19 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(policy_pb2.Policy()) + return_value = table.SchemaBundle.to_json(table.SchemaBundle()) req.return_value.content = return_value - request = iam_policy_pb2.GetIamPolicyRequest() + request = bigtable_table_admin.GetSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - post_with_metadata.return_value = policy_pb2.Policy(), metadata + post.return_value = table.SchemaBundle() + post_with_metadata.return_value = table.SchemaBundle(), metadata - client.get_iam_policy( + client.get_schema_bundle( request, metadata=[ ("key", "val"), @@ -23509,14 +27532,14 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_set_iam_policy_rest_bad_request( - request_type=iam_policy_pb2.SetIamPolicyRequest, +def test_list_schema_bundles_rest_bad_request( + request_type=bigtable_table_admin.ListSchemaBundlesRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -23531,50 +27554,51 @@ def test_set_iam_policy_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.set_iam_policy(request) + client.list_schema_bundles(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.SetIamPolicyRequest, + bigtable_table_admin.ListSchemaBundlesRequest, dict, ], ) -def test_set_iam_policy_rest_call_success(request_type): +def test_list_schema_bundles_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request_init = {"parent": "projects/sample1/instances/sample2/tables/sample3"} request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b"etag_blob", + return_value = bigtable_table_admin.ListSchemaBundlesResponse( + next_page_token="next_page_token_value", ) # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSchemaBundlesResponse.pb(return_value) json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.set_iam_policy(request) + response = client.list_schema_bundles(request) # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b"etag_blob" + assert isinstance(response, pagers.ListSchemaBundlesPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): +def test_list_schema_bundles_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -23588,17 +27612,19 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy" + transports.BigtableTableAdminRestInterceptor, "post_list_schema_bundles" ) as post, mock.patch.object( transports.BigtableTableAdminRestInterceptor, - "post_set_iam_policy_with_metadata", + "post_list_schema_bundles_with_metadata", ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy" + transports.BigtableTableAdminRestInterceptor, "pre_list_schema_bundles" ) as pre: pre.assert_not_called() post.assert_not_called() post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() + pb_message = bigtable_table_admin.ListSchemaBundlesRequest.pb( + bigtable_table_admin.ListSchemaBundlesRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -23609,19 +27635,24 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson(policy_pb2.Policy()) + return_value = bigtable_table_admin.ListSchemaBundlesResponse.to_json( + bigtable_table_admin.ListSchemaBundlesResponse() + ) req.return_value.content = return_value - request = iam_policy_pb2.SetIamPolicyRequest() + request = bigtable_table_admin.ListSchemaBundlesRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - post_with_metadata.return_value = policy_pb2.Policy(), metadata + post.return_value = bigtable_table_admin.ListSchemaBundlesResponse() + post_with_metadata.return_value = ( + bigtable_table_admin.ListSchemaBundlesResponse(), + metadata, + ) - client.set_iam_policy( + client.list_schema_bundles( request, metadata=[ ("key", "val"), @@ -23634,14 +27665,16 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_test_iam_permissions_rest_bad_request( - request_type=iam_policy_pb2.TestIamPermissionsRequest, +def test_delete_schema_bundle_rest_bad_request( + request_type=bigtable_table_admin.DeleteSchemaBundleRequest, ): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -23656,48 +27689,47 @@ def test_test_iam_permissions_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.test_iam_permissions(request) + client.delete_schema_bundle(request) @pytest.mark.parametrize( "request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, + bigtable_table_admin.DeleteSchemaBundleRequest, dict, ], ) -def test_test_iam_permissions_rest_call_success(request_type): +def test_delete_schema_bundle_rest_call_success(request_type): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding - request_init = {"resource": "projects/sample1/instances/sample2/tables/sample3"} + request_init = { + "name": "projects/sample1/instances/sample2/tables/sample3/schemaBundles/sample4" + } request = request_type(**request_init) # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=["permissions_value"], - ) + return_value = None # Wrap the value into a proper Response obj response_value = mock.Mock() response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) + json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.test_iam_permissions(request) + response = client.delete_schema_bundle(request) # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ["permissions_value"] + assert response is None @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): +def test_delete_schema_bundle_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None @@ -23711,17 +27743,12 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ) as req, mock.patch.object( path_template, "transcode" ) as transcode, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions" - ) as post, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, - "post_test_iam_permissions_with_metadata", - ) as post_with_metadata, mock.patch.object( - transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions" + transports.BigtableTableAdminRestInterceptor, "pre_delete_schema_bundle" ) as pre: pre.assert_not_called() - post.assert_not_called() - post_with_metadata.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() + pb_message = bigtable_table_admin.DeleteSchemaBundleRequest.pb( + bigtable_table_admin.DeleteSchemaBundleRequest() + ) transcode.return_value = { "method": "post", "uri": "my_uri", @@ -23732,24 +27759,15 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - return_value = json_format.MessageToJson( - iam_policy_pb2.TestIamPermissionsResponse() - ) - req.return_value.content = return_value - request = iam_policy_pb2.TestIamPermissionsRequest() + request = bigtable_table_admin.DeleteSchemaBundleRequest() metadata = [ ("key", "val"), ("cephalopod", "squid"), ] pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() - post_with_metadata.return_value = ( - iam_policy_pb2.TestIamPermissionsResponse(), - metadata, - ) - client.test_iam_permissions( + client.delete_schema_bundle( request, metadata=[ ("key", "val"), @@ -23758,8 +27776,6 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ) pre.assert_called_once() - post.assert_called_once() - post_with_metadata.assert_called_once() def test_initialize_client_w_rest(): @@ -24389,6 +28405,116 @@ def test_test_iam_permissions_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_create_schema_bundle_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.create_schema_bundle), "__call__" + ) as call: + client.create_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.CreateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_update_schema_bundle_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.update_schema_bundle), "__call__" + ) as call: + client.update_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.UpdateSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_get_schema_bundle_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.get_schema_bundle), "__call__" + ) as call: + client.get_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.GetSchemaBundleRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_list_schema_bundles_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.list_schema_bundles), "__call__" + ) as call: + client.list_schema_bundles(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.ListSchemaBundlesRequest() + + assert args[0] == request_msg + + +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_delete_schema_bundle_empty_call_rest(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object( + type(client.transport.delete_schema_bundle), "__call__" + ) as call: + client.delete_schema_bundle(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = bigtable_table_admin.DeleteSchemaBundleRequest() + + assert args[0] == request_msg + + def test_bigtable_table_admin_rest_lro_client(): client = BigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), @@ -24469,6 +28595,11 @@ def test_bigtable_table_admin_base_transport(): "get_iam_policy", "set_iam_policy", "test_iam_permissions", + "create_schema_bundle", + "update_schema_bundle", + "get_schema_bundle", + "list_schema_bundles", + "delete_schema_bundle", ) for method in methods: with pytest.raises(NotImplementedError): @@ -24852,6 +28983,21 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name) session1 = client1.transport.test_iam_permissions._session session2 = client2.transport.test_iam_permissions._session assert session1 != session2 + session1 = client1.transport.create_schema_bundle._session + session2 = client2.transport.create_schema_bundle._session + assert session1 != session2 + session1 = client1.transport.update_schema_bundle._session + session2 = client2.transport.update_schema_bundle._session + assert session1 != session2 + session1 = client1.transport.get_schema_bundle._session + session2 = client2.transport.get_schema_bundle._session + assert session1 != session2 + session1 = client1.transport.list_schema_bundles._session + session2 = client2.transport.list_schema_bundles._session + assert session1 != session2 + session1 = client1.transport.delete_schema_bundle._session + session2 = client2.transport.delete_schema_bundle._session + assert session1 != session2 def test_bigtable_table_admin_grpc_transport_channel(): @@ -25157,11 +29303,42 @@ def test_parse_instance_path(): assert expected == actual -def test_snapshot_path(): +def test_schema_bundle_path(): project = "squid" instance = "clam" - cluster = "whelk" - snapshot = "octopus" + table = "whelk" + schema_bundle = "octopus" + expected = "projects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}".format( + project=project, + instance=instance, + table=table, + schema_bundle=schema_bundle, + ) + actual = BigtableTableAdminClient.schema_bundle_path( + project, instance, table, schema_bundle + ) + assert expected == actual + + +def test_parse_schema_bundle_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "table": "cuttlefish", + "schema_bundle": "mussel", + } + path = BigtableTableAdminClient.schema_bundle_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_schema_bundle_path(path) + assert expected == actual + + +def test_snapshot_path(): + project = "winkle" + instance = "nautilus" + cluster = "scallop" + snapshot = "abalone" expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format( project=project, instance=instance, @@ -25176,10 +29353,10 @@ def test_snapshot_path(): def test_parse_snapshot_path(): expected = { - "project": "oyster", - "instance": "nudibranch", - "cluster": "cuttlefish", - "snapshot": "mussel", + "project": "squid", + "instance": "clam", + "cluster": "whelk", + "snapshot": "octopus", } path = BigtableTableAdminClient.snapshot_path(**expected) @@ -25189,9 +29366,9 @@ def test_parse_snapshot_path(): def test_table_path(): - project = "winkle" - instance = "nautilus" - table = "scallop" + project = "oyster" + instance = "nudibranch" + table = "cuttlefish" expected = "projects/{project}/instances/{instance}/tables/{table}".format( project=project, instance=instance, @@ -25203,9 +29380,9 @@ def test_table_path(): def test_parse_table_path(): expected = { - "project": "abalone", - "instance": "squid", - "table": "clam", + "project": "mussel", + "instance": "winkle", + "table": "nautilus", } path = BigtableTableAdminClient.table_path(**expected) @@ -25215,7 +29392,7 @@ def test_parse_table_path(): def test_common_billing_account_path(): - billing_account = "whelk" + billing_account = "scallop" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -25225,7 +29402,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "octopus", + "billing_account": "abalone", } path = BigtableTableAdminClient.common_billing_account_path(**expected) @@ -25235,7 +29412,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "oyster" + folder = "squid" expected = "folders/{folder}".format( folder=folder, ) @@ -25245,7 +29422,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nudibranch", + "folder": "clam", } path = BigtableTableAdminClient.common_folder_path(**expected) @@ -25255,7 +29432,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "cuttlefish" + organization = "whelk" expected = "organizations/{organization}".format( organization=organization, ) @@ -25265,7 +29442,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "mussel", + "organization": "octopus", } path = BigtableTableAdminClient.common_organization_path(**expected) @@ -25275,7 +29452,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "winkle" + project = "oyster" expected = "projects/{project}".format( project=project, ) @@ -25285,7 +29462,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nautilus", + "project": "nudibranch", } path = BigtableTableAdminClient.common_project_path(**expected) @@ -25295,8 +29472,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "scallop" - location = "abalone" + project = "cuttlefish" + location = "mussel" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -25307,8 +29484,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "squid", - "location": "clam", + "project": "winkle", + "location": "nautilus", } path = BigtableTableAdminClient.common_location_path(**expected) diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 84093a926313..dba535dcc25f 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -7511,6 +7511,7 @@ def test_execute_query_routing_parameters_request_1_grpc(): ) assert args[0] == request_msg + expected_headers = { "name": "projects/sample1/instances/sample2", "app_profile_id": "", From 9d77a13e22765c1c680e8a37ad5d776b3b6dd890 Mon Sep 17 00:00:00 2001 From: Akshay Joshi Date: Mon, 28 Jul 2025 14:24:03 -0400 Subject: [PATCH 874/892] feat: add support for AddToCell in Data Client (#1147) --- .../google/cloud/bigtable/data/mutations.py | 77 ++++++++++++ .../tests/system/data/__init__.py | 1 + .../tests/system/data/setup_fixtures.py | 3 +- .../tests/system/data/test_system_async.py | 84 ++++++++++++- .../tests/system/data/test_system_autogen.py | 71 ++++++++++- .../tests/unit/data/test_mutations.py | 114 ++++++++++++++++++ 6 files changed, 337 insertions(+), 13 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py index 2f4e441ede81..f19b1e49e862 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/mutations.py @@ -123,6 +123,14 @@ def _from_dict(cls, input_dict: dict[str, Any]) -> Mutation: instance = DeleteAllFromFamily(details["family_name"]) elif "delete_from_row" in input_dict: instance = DeleteAllFromRow() + elif "add_to_cell" in input_dict: + details = input_dict["add_to_cell"] + instance = AddToCell( + details["family_name"], + details["column_qualifier"]["raw_value"], + details["input"]["int_value"], + details["timestamp"]["raw_timestamp_micros"], + ) except KeyError as e: raise ValueError("Invalid mutation dictionary") from e if instance is None: @@ -276,6 +284,75 @@ def _to_dict(self) -> dict[str, Any]: } +@dataclass +class AddToCell(Mutation): + """ + Adds an int64 value to an aggregate cell. The column family must be an + aggregate family and have an "int64" input type or this mutation will be + rejected. + + Note: The timestamp values are in microseconds but must match the + granularity of the table (defaults to `MILLIS`). Therefore, the given value + must be a multiple of 1000 (millisecond granularity). For example: + `1571902339435000`. + + Args: + family: The name of the column family to which the cell belongs. + qualifier: The column qualifier of the cell. + value: The value to be accumulated into the cell. + timestamp_micros: The timestamp of the cell. Must be provided for + cell aggregation to work correctly. + + + Raises: + TypeError: If `qualifier` is not `bytes` or `str`. + TypeError: If `value` is not `int`. + TypeError: If `timestamp_micros` is not `int`. + ValueError: If `value` is out of bounds for a 64-bit signed int. + ValueError: If `timestamp_micros` is less than 0. + """ + + def __init__( + self, + family: str, + qualifier: bytes | str, + value: int, + timestamp_micros: int, + ): + qualifier = qualifier.encode() if isinstance(qualifier, str) else qualifier + if not isinstance(qualifier, bytes): + raise TypeError("qualifier must be bytes or str") + if not isinstance(value, int): + raise TypeError("value must be int") + if not isinstance(timestamp_micros, int): + raise TypeError("timestamp_micros must be int") + if abs(value) > _MAX_INCREMENT_VALUE: + raise ValueError( + "int values must be between -2**63 and 2**63 (64-bit signed int)" + ) + + if timestamp_micros < 0: + raise ValueError("timestamp must be non-negative") + + self.family = family + self.qualifier = qualifier + self.value = value + self.timestamp = timestamp_micros + + def _to_dict(self) -> dict[str, Any]: + return { + "add_to_cell": { + "family_name": self.family, + "column_qualifier": {"raw_value": self.qualifier}, + "timestamp": {"raw_timestamp_micros": self.timestamp}, + "input": {"int_value": self.value}, + } + } + + def is_idempotent(self) -> bool: + return False + + class RowMutationEntry: """ A single entry in a `MutateRows` request. diff --git a/packages/google-cloud-bigtable/tests/system/data/__init__.py b/packages/google-cloud-bigtable/tests/system/data/__init__.py index f2952b2cdb1b..2b35cea8f778 100644 --- a/packages/google-cloud-bigtable/tests/system/data/__init__.py +++ b/packages/google-cloud-bigtable/tests/system/data/__init__.py @@ -16,3 +16,4 @@ TEST_FAMILY = "test-family" TEST_FAMILY_2 = "test-family-2" +TEST_AGGREGATE_FAMILY = "test-aggregate-family" diff --git a/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py index a77ffc008b10..169e2396bdea 100644 --- a/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py +++ b/packages/google-cloud-bigtable/tests/system/data/setup_fixtures.py @@ -20,7 +20,7 @@ import os import uuid -from . import TEST_FAMILY, TEST_FAMILY_2 +from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY # authorized view subset to allow all qualifiers ALLOW_ALL = "" @@ -183,6 +183,7 @@ def authorized_view_id( "family_subsets": { TEST_FAMILY: ALL_QUALIFIERS, TEST_FAMILY_2: ALL_QUALIFIERS, + TEST_AGGREGATE_FAMILY: ALL_QUALIFIERS, }, }, }, diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index b59131414a52..0dd6e8100e36 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -27,7 +27,7 @@ from google.cloud.bigtable.data._cross_sync import CrossSync -from . import TEST_FAMILY, TEST_FAMILY_2 +from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY __CROSS_SYNC_OUTPUT__ = "tests.system.data.test_system_autogen" @@ -76,6 +76,27 @@ async def add_row( await self.target.client._gapic_client.mutate_row(request) self.rows.append(row_key) + @CrossSync.convert + async def add_aggregate_row( + self, row_key, *, family=TEST_AGGREGATE_FAMILY, qualifier=b"q", input=0 + ): + request = { + "table_name": self.target.table_name, + "row_key": row_key, + "mutations": [ + { + "add_to_cell": { + "family_name": family, + "column_qualifier": {"raw_value": qualifier}, + "timestamp": {"raw_timestamp_micros": 0}, + "input": {"int_value": input}, + } + } + ], + } + await self.target.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + @CrossSync.convert async def delete_rows(self): if self.rows: @@ -132,7 +153,17 @@ def column_family_config(self): """ from google.cloud.bigtable_admin_v2 import types - return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()} + int_aggregate_type = types.Type.Aggregate( + input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}), + sum={}, + ) + return { + TEST_FAMILY: types.ColumnFamily(), + TEST_FAMILY_2: types.ColumnFamily(), + TEST_AGGREGATE_FAMILY: types.ColumnFamily( + value_type=types.Type(aggregate_type=int_aggregate_type) + ), + } @pytest.fixture(scope="session") def init_table_id(self): @@ -281,6 +312,37 @@ async def test_mutation_set_cell(self, target, temp_rows): # ensure cell is updated assert (await self._retrieve_cell_value(target, row_key)) == new_value + @CrossSync.pytest + @pytest.mark.usefixtures("target") + @CrossSync.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + async def test_mutation_add_to_cell(self, target, temp_rows): + """ + Test add to cell mutation + """ + from google.cloud.bigtable.data.mutations import AddToCell + + row_key = b"add_to_cell" + family = TEST_AGGREGATE_FAMILY + qualifier = b"test-qualifier" + # add row to temp_rows, for future deletion + await temp_rows.add_aggregate_row(row_key, family=family, qualifier=qualifier) + # set and check cell value + await target.mutate_row( + row_key, AddToCell(family, qualifier, 1, timestamp_micros=0) + ) + encoded_result = await self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 1 + # update again + await target.mutate_row( + row_key, AddToCell(family, qualifier, 9, timestamp_micros=0) + ) + encoded_result = await self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 10 + @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" ) @@ -1123,7 +1185,7 @@ async def test_execute_query_simple(self, client, table_id, instance_id): predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) async def test_execute_against_target( - self, client, instance_id, table_id, temp_rows + self, client, instance_id, table_id, temp_rows, column_family_config ): await temp_rows.add_row(b"row_key_1") result = await client.execute_query( @@ -1138,7 +1200,9 @@ async def test_execute_against_target( assert family_map[b"q"] == b"test-value" assert len(rows[0][TEST_FAMILY_2]) == 0 md = result.metadata - assert len(md) == 3 + # we expect it to fetch each column family, plus _key + # add additional families here if column_family_config changes + assert len(md) == len(column_family_config) + 1 assert md["_key"].column_type == SqlType.Bytes() assert md[TEST_FAMILY].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() @@ -1146,6 +1210,9 @@ async def test_execute_against_target( assert md[TEST_FAMILY_2].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), @@ -1248,7 +1315,7 @@ async def test_execute_query_params(self, client, table_id, instance_id): predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) async def test_execute_metadata_on_empty_response( - self, client, instance_id, table_id, temp_rows + self, client, instance_id, table_id, temp_rows, column_family_config ): await temp_rows.add_row(b"row_key_1") result = await client.execute_query( @@ -1258,7 +1325,9 @@ async def test_execute_metadata_on_empty_response( assert len(rows) == 0 md = result.metadata - assert len(md) == 3 + # we expect it to fetch each column family, plus _key + # add additional families here if column_family_config change + assert len(md) == len(column_family_config) + 1 assert md["_key"].column_type == SqlType.Bytes() assert md[TEST_FAMILY].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() @@ -1266,3 +1335,6 @@ async def test_execute_metadata_on_empty_response( assert md[TEST_FAMILY_2].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index 6b2006d7b360..46e9c2215e88 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -26,7 +26,7 @@ from google.cloud.environment_vars import BIGTABLE_EMULATOR from google.type import date_pb2 from google.cloud.bigtable.data._cross_sync import CrossSync -from . import TEST_FAMILY, TEST_FAMILY_2 +from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY TARGETS = ["table"] if not os.environ.get(BIGTABLE_EMULATOR): @@ -66,6 +66,26 @@ def add_row( self.target.client._gapic_client.mutate_row(request) self.rows.append(row_key) + def add_aggregate_row( + self, row_key, *, family=TEST_AGGREGATE_FAMILY, qualifier=b"q", input=0 + ): + request = { + "table_name": self.target.table_name, + "row_key": row_key, + "mutations": [ + { + "add_to_cell": { + "family_name": family, + "column_qualifier": {"raw_value": qualifier}, + "timestamp": {"raw_timestamp_micros": 0}, + "input": {"int_value": input}, + } + } + ], + } + self.target.client._gapic_client.mutate_row(request) + self.rows.append(row_key) + def delete_rows(self): if self.rows: request = { @@ -106,7 +126,17 @@ def column_family_config(self): """specify column families to create when creating a new test table""" from google.cloud.bigtable_admin_v2 import types - return {TEST_FAMILY: types.ColumnFamily(), TEST_FAMILY_2: types.ColumnFamily()} + int_aggregate_type = types.Type.Aggregate( + input_type=types.Type(int64_type={"encoding": {"big_endian_bytes": {}}}), + sum={}, + ) + return { + TEST_FAMILY: types.ColumnFamily(), + TEST_FAMILY_2: types.ColumnFamily(), + TEST_AGGREGATE_FAMILY: types.ColumnFamily( + value_type=types.Type(aggregate_type=int_aggregate_type) + ), + } @pytest.fixture(scope="session") def init_table_id(self): @@ -225,6 +255,27 @@ def test_mutation_set_cell(self, target, temp_rows): target.mutate_row(row_key, mutation) assert self._retrieve_cell_value(target, row_key) == new_value + @pytest.mark.usefixtures("target") + @CrossSync._Sync_Impl.Retry( + predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 + ) + def test_mutation_add_to_cell(self, target, temp_rows): + """Test add to cell mutation""" + from google.cloud.bigtable.data.mutations import AddToCell + + row_key = b"add_to_cell" + family = TEST_AGGREGATE_FAMILY + qualifier = b"test-qualifier" + temp_rows.add_aggregate_row(row_key, family=family, qualifier=qualifier) + target.mutate_row(row_key, AddToCell(family, qualifier, 1, timestamp_micros=0)) + encoded_result = self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 1 + target.mutate_row(row_key, AddToCell(family, qualifier, 9, timestamp_micros=0)) + encoded_result = self._retrieve_cell_value(target, row_key) + int_result = int.from_bytes(encoded_result, byteorder="big") + assert int_result == 10 + @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't use splits" ) @@ -915,7 +966,9 @@ def test_execute_query_simple(self, client, table_id, instance_id): @CrossSync._Sync_Impl.Retry( predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) - def test_execute_against_target(self, client, instance_id, table_id, temp_rows): + def test_execute_against_target( + self, client, instance_id, table_id, temp_rows, column_family_config + ): temp_rows.add_row(b"row_key_1") result = client.execute_query("SELECT * FROM `" + table_id + "`", instance_id) rows = [r for r in result] @@ -926,7 +979,7 @@ def test_execute_against_target(self, client, instance_id, table_id, temp_rows): assert family_map[b"q"] == b"test-value" assert len(rows[0][TEST_FAMILY_2]) == 0 md = result.metadata - assert len(md) == 3 + assert len(md) == len(column_family_config) + 1 assert md["_key"].column_type == SqlType.Bytes() assert md[TEST_FAMILY].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() @@ -934,6 +987,9 @@ def test_execute_against_target(self, client, instance_id, table_id, temp_rows): assert md[TEST_FAMILY_2].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) @pytest.mark.skipif( bool(os.environ.get(BIGTABLE_EMULATOR)), reason="emulator doesn't support SQL" @@ -1023,7 +1079,7 @@ def test_execute_query_params(self, client, table_id, instance_id): predicate=retry.if_exception_type(ClientError), initial=1, maximum=5 ) def test_execute_metadata_on_empty_response( - self, client, instance_id, table_id, temp_rows + self, client, instance_id, table_id, temp_rows, column_family_config ): temp_rows.add_row(b"row_key_1") result = client.execute_query( @@ -1032,7 +1088,7 @@ def test_execute_metadata_on_empty_response( rows = [r for r in result] assert len(rows) == 0 md = result.metadata - assert len(md) == 3 + assert len(md) == len(column_family_config) + 1 assert md["_key"].column_type == SqlType.Bytes() assert md[TEST_FAMILY].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() @@ -1040,3 +1096,6 @@ def test_execute_metadata_on_empty_response( assert md[TEST_FAMILY_2].column_type == SqlType.Map( SqlType.Bytes(), SqlType.Bytes() ) + assert md[TEST_AGGREGATE_FAMILY].column_type == SqlType.Map( + SqlType.Bytes(), SqlType.Int64() + ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_mutations.py b/packages/google-cloud-bigtable/tests/unit/data/test_mutations.py index 485c86e42e3f..17050162c32f 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/test_mutations.py +++ b/packages/google-cloud-bigtable/tests/unit/data/test_mutations.py @@ -117,6 +117,17 @@ def test_size(self, test_dict): {"delete_from_family": {"family_name": "foo"}}, ), (mutations.DeleteAllFromRow, {"delete_from_row": {}}), + ( + mutations.AddToCell, + { + "add_to_cell": { + "family_name": "foo", + "column_qualifier": {"raw_value": b"bar"}, + "timestamp": {"raw_timestamp_micros": 12345}, + "input": {"int_value": 123}, + } + }, + ), ], ) def test__from_dict(self, expected_class, input_dict): @@ -162,6 +173,7 @@ def test__from_dict_wrong_subclass(self): mutations.DeleteRangeFromColumn("foo", b"bar"), mutations.DeleteAllFromFamily("foo"), mutations.DeleteAllFromRow(), + mutations.AddToCell("foo", b"bar", 123, 456), ] for instance in subclasses: others = [other for other in subclasses if other != instance] @@ -706,3 +718,105 @@ def test__from_dict(self): assert len(instance.mutations) == 1 assert isinstance(instance.mutations[0], mutations.DeleteAllFromFamily) assert instance.mutations[0].family_to_delete == "test_family" + + +class TestAddToCell: + def _target_class(self): + from google.cloud.bigtable.data.mutations import AddToCell + + return AddToCell + + def _make_one(self, *args, **kwargs): + return self._target_class()(*args, **kwargs) + + @pytest.mark.parametrize("input_val", [2**64, -(2**64)]) + def test_ctor_large_int(self, input_val): + with pytest.raises(ValueError) as e: + self._make_one( + family="f", qualifier=b"b", value=input_val, timestamp_micros=123 + ) + assert "int values must be between" in str(e.value) + + @pytest.mark.parametrize("input_val", ["", "a", "abc", "hello world!"]) + def test_ctor_str_value(self, input_val): + with pytest.raises(TypeError) as e: + self._make_one( + family="f", qualifier=b"b", value=input_val, timestamp_micros=123 + ) + assert "value must be int" in str(e.value) + + def test_ctor(self): + """Ensure constructor sets expected values""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = 1234 + expected_timestamp = 1234567890 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + assert instance.family == expected_family + assert instance.qualifier == expected_qualifier + assert instance.value == expected_value + assert instance.timestamp == expected_timestamp + + def test_ctor_negative_timestamp(self): + """Only non-negative timestamps are valid""" + with pytest.raises(ValueError) as e: + self._make_one("test-family", b"test-qualifier", 1234, -2) + assert "timestamp must be non-negative" in str(e.value) + + def test__to_dict(self): + """ensure dict representation is as expected""" + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = 1234 + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_dict = instance._to_dict() + assert list(got_dict.keys()) == ["add_to_cell"] + got_inner_dict = got_dict["add_to_cell"] + assert got_inner_dict["family_name"] == expected_family + assert got_inner_dict["column_qualifier"]["raw_value"] == expected_qualifier + assert got_inner_dict["timestamp"]["raw_timestamp_micros"] == expected_timestamp + assert got_inner_dict["input"]["int_value"] == expected_value + assert len(got_inner_dict.keys()) == 4 + + def test__to_pb(self): + """ensure proto representation is as expected""" + import google.cloud.bigtable_v2.types.data as data_pb + + expected_family = "test-family" + expected_qualifier = b"test-qualifier" + expected_value = 1234 + expected_timestamp = 123456789 + instance = self._make_one( + expected_family, expected_qualifier, expected_value, expected_timestamp + ) + got_pb = instance._to_pb() + assert isinstance(got_pb, data_pb.Mutation) + assert got_pb.add_to_cell.family_name == expected_family + assert got_pb.add_to_cell.column_qualifier.raw_value == expected_qualifier + assert got_pb.add_to_cell.timestamp.raw_timestamp_micros == expected_timestamp + assert got_pb.add_to_cell.input.int_value == expected_value + + @pytest.mark.parametrize( + "timestamp", + [ + (1234567890), + (1), + (0), + ], + ) + def test_is_idempotent(self, timestamp): + """is_idempotent is not based on the timestamp""" + instance = self._make_one("test-family", b"test-qualifier", 1234, timestamp) + assert not instance.is_idempotent() + + def test___str__(self): + """Str representation of mutations should be to_dict""" + instance = self._make_one("test-family", b"test-qualifier", 1234, 1234567890) + str_value = instance.__str__() + dict_value = instance._to_dict() + assert str_value == str(dict_value) From 761dee2849266fc65663daab6721ed6c3f2a1378 Mon Sep 17 00:00:00 2001 From: Kevin Zheng <147537668+gkevinzheng@users.noreply.github.com> Date: Fri, 1 Aug 2025 11:55:09 -0400 Subject: [PATCH 875/892] feat: Modernized Bigtable Admin Client featuring selective GAPIC generation (#1177) * chore: Removed old admin_v2 GAPIC layer (#1111) * feat!: Generated Selective GAPIC layer for Admin API (#1112) * chore: Updated service YAML by making all methods in BigtableInstanceAdmin public (#1113) * refactor: Refactored classic client to use new Admin API (#1114) * refactor: Refactored classic client to use new Admin API * added newline after gapic_version files * fix: Made generate_consistency_token and check_consistency public (#1116) methods * feat: Consistency polling + restore table for sync client in admin (#1117) * feat: Prototyped handwritten layer * Added newlines * linting * Added docstrings for restore table and consistency token polling; removed gc_rule * docs: owlbot related changes (#1133) * docs: owlbot related changes * Addressed PR feedback + made changes to toc.yml for docs pipeline * Fixed type hint * linting + added validation for admin section * linting + added noqas to owlbot lines * tests: Tests for sync client + fixes + client library versioning (#1132) * tests: Tests for sync client + fixes + client library versioning * Removed raise exception * linting + name changes in tests + added test for timeout * linting * Fixed tests on Python 3.7 * feat: Proto-plus modifications for enforcing strict oneofs (#1126) * feat: Proto-plus modifications for enforcing strict oneofs * Added template directory + changed unit tests to pytest * Finished README * linting * Added source of truth comment * feat: Reworked the wait_for_consistency call (#1144) * feat: Reworked the wait_for_consistency call * linting * Update google/cloud/bigtable/admin_v2/overlay/services/bigtable_table_admin/client.py Co-authored-by: Mattie Fu * Improved documentation * linting again * linting --------- Co-authored-by: Mattie Fu * feat: Async consistency polling harness (#1142) * feat: Async consistency polling harness * Fixed AsyncMock issue in Python 3.7 * Reworked async_consistency and added async client to __init__.py * linting * addressed review feedback * linting * feat: Restore Table LRO rework + async restore table (#1148) * chore(tests): system tests for autogen API (#1151) * tests: system tests for autogen API * Fixed async system tests * addressed review feedback * Fixed system test failure at the end of a test run * Linting * more linting * chore: Moved Admin API from google.cloud.bigtable.admin_v2 back to google.cloud.bigtable_admin_v2 (#1153) * chore: Removed autogenerated files from the feature branch (#1170) * chore: Merged selective GAPIC autogenerated changes into feature branch (#1175) * chore: Merged selective GAPIC owlbot changes into feature branch * linting * changed comment text * Removed redundant items * Fixed owlbot infinitely appending text * Added comments + fixed indentation in Owlbot * Added anonymous credentials to client tests * Fixed project ID issue in system tests * Fixed docstrings and skipped system tests on emulator. --------- Co-authored-by: Mattie Fu --- .../docs/admin_client/admin_client_usage.rst | 11 + .../admin_client/bigtable_instance_admin.rst | 10 + .../admin_client/bigtable_table_admin.rst | 10 + .../docs/admin_client/services_.rst | 7 + .../docs/admin_client/types_.rst | 10 + packages/google-cloud-bigtable/docs/index.rst | 2 +- .../docs/scripts/patch_devsite_toc.py | 111 +- .../google/cloud/bigtable/backup.py | 19 +- .../google/cloud/bigtable/client.py | 4 +- .../google/cloud/bigtable/table.py | 10 +- .../google/cloud/bigtable_admin/__init__.py | 13 +- .../cloud/bigtable_admin_v2/__init__.py | 12 +- .../bigtable_admin_v2/gapic_metadata.json | 12 +- .../bigtable_admin_v2/overlay/__init__.py | 49 + .../overlay/services/__init__.py | 13 + .../services/bigtable_table_admin/__init__.py | 23 + .../bigtable_table_admin/async_client.py | 375 ++++ .../services/bigtable_table_admin/client.py | 373 ++++ .../overlay/types/__init__.py | 31 + .../overlay/types/async_consistency.py | 104 + .../overlay/types/async_restore_table.py | 99 + .../overlay/types/consistency.py | 101 + .../overlay/types/restore_table.py | 102 + .../types/wait_for_consistency_request.py | 85 + .../services/bigtable_table_admin/__init__.py | 8 +- .../bigtable_table_admin/async_client.py | 94 +- .../services/bigtable_table_admin/client.py | 40 +- .../bigtable_table_admin/transports/rest.py | 130 +- .../cloud/bigtable_admin_v2/types/table.py | 48 +- .../cloud/bigtable_admin_v2/utils/__init__.py | 19 + .../bigtable_admin_v2/utils/oneof_message.py | 108 + packages/google-cloud-bigtable/owlbot.py | 170 +- .../scripts/fixup_admin_v2_keywords.py | 233 ++ packages/google-cloud-bigtable/setup.py | 4 +- .../testing/constraints-3.7.txt | 2 +- .../testing/constraints-3.8.txt | 2 +- .../tests/system/admin_overlay/__init__.py | 0 .../tests/system/admin_overlay/conftest.py | 38 + .../system/admin_overlay/test_system_async.py | 384 ++++ .../admin_overlay/test_system_autogen.py | 291 +++ .../tests/system/conftest.py | 11 + .../tests/system/data/test_system_async.py | 9 - .../tests/system/data/test_system_autogen.py | 28 +- .../unit/admin_overlay/my_oneof_message.py | 45 + .../admin_overlay/test_admin_packaging.py | 41 + .../unit/admin_overlay/test_async_client.py | 297 +++ .../admin_overlay/test_async_consistency.py | 74 + .../admin_overlay/test_async_restore_table.py | 248 +++ .../tests/unit/admin_overlay/test_client.py | 278 +++ .../unit/admin_overlay/test_consistency.py | 68 + .../unit/admin_overlay/test_oneof_message.py | 164 ++ .../unit/admin_overlay/test_restore_table.py | 230 ++ .../test_bigtable_table_admin.py | 1950 +++++++++-------- .../tests/unit/v2_client/test_backup.py | 26 +- .../tests/unit/v2_client/test_client.py | 10 +- .../unit/v2_client/test_column_family.py | 12 +- .../tests/unit/v2_client/test_instance.py | 4 +- .../tests/unit/v2_client/test_table.py | 6 +- 58 files changed, 5430 insertions(+), 1228 deletions(-) create mode 100644 packages/google-cloud-bigtable/docs/admin_client/admin_client_usage.rst create mode 100644 packages/google-cloud-bigtable/docs/admin_client/bigtable_instance_admin.rst create mode 100644 packages/google-cloud-bigtable/docs/admin_client/bigtable_table_admin.rst create mode 100644 packages/google-cloud-bigtable/docs/admin_client/services_.rst create mode 100644 packages/google-cloud-bigtable/docs/admin_client/types_.rst create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/consistency.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/__init__.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/oneof_message.py create mode 100644 packages/google-cloud-bigtable/scripts/fixup_admin_v2_keywords.py create mode 100644 packages/google-cloud-bigtable/tests/system/admin_overlay/__init__.py create mode 100644 packages/google-cloud-bigtable/tests/system/admin_overlay/conftest.py create mode 100644 packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py create mode 100644 packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/my_oneof_message.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_admin_packaging.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_client.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_restore_table.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_client.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_consistency.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_oneof_message.py create mode 100644 packages/google-cloud-bigtable/tests/unit/admin_overlay/test_restore_table.py diff --git a/packages/google-cloud-bigtable/docs/admin_client/admin_client_usage.rst b/packages/google-cloud-bigtable/docs/admin_client/admin_client_usage.rst new file mode 100644 index 000000000000..8c6f4a5dc508 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/admin_client/admin_client_usage.rst @@ -0,0 +1,11 @@ +Admin Client +============ +.. toctree:: + :maxdepth: 2 + + services_ + types_ + +.. + This should be the only handwritten RST file in this directory. + Everything else should be autogenerated. diff --git a/packages/google-cloud-bigtable/docs/admin_client/bigtable_instance_admin.rst b/packages/google-cloud-bigtable/docs/admin_client/bigtable_instance_admin.rst new file mode 100644 index 000000000000..42f7caad7cb1 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/admin_client/bigtable_instance_admin.rst @@ -0,0 +1,10 @@ +BigtableInstanceAdmin +--------------------------------------- + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin + :members: + :inherited-members: + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers + :members: + :inherited-members: diff --git a/packages/google-cloud-bigtable/docs/admin_client/bigtable_table_admin.rst b/packages/google-cloud-bigtable/docs/admin_client/bigtable_table_admin.rst new file mode 100644 index 000000000000..0fa4b276a616 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/admin_client/bigtable_table_admin.rst @@ -0,0 +1,10 @@ +BigtableTableAdmin +------------------------------------ + +.. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin + :members: + :inherited-members: + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers + :members: + :inherited-members: diff --git a/packages/google-cloud-bigtable/docs/admin_client/services_.rst b/packages/google-cloud-bigtable/docs/admin_client/services_.rst new file mode 100644 index 000000000000..ea55c7da14a3 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/admin_client/services_.rst @@ -0,0 +1,7 @@ +Services for Google Cloud Bigtable Admin v2 API +=============================================== +.. toctree:: + :maxdepth: 2 + + bigtable_instance_admin + bigtable_table_admin diff --git a/packages/google-cloud-bigtable/docs/admin_client/types_.rst b/packages/google-cloud-bigtable/docs/admin_client/types_.rst new file mode 100644 index 000000000000..ef32b9684bd4 --- /dev/null +++ b/packages/google-cloud-bigtable/docs/admin_client/types_.rst @@ -0,0 +1,10 @@ +Types for Google Cloud Bigtable Admin v2 API +============================================ + +.. automodule:: google.cloud.bigtable_admin_v2.types + :members: + :show-inheritance: + +.. automodule:: google.cloud.bigtable_admin_v2.overlay.types + :members: + :show-inheritance: diff --git a/packages/google-cloud-bigtable/docs/index.rst b/packages/google-cloud-bigtable/docs/index.rst index c7f9721f383e..0694c8bb00e0 100644 --- a/packages/google-cloud-bigtable/docs/index.rst +++ b/packages/google-cloud-bigtable/docs/index.rst @@ -9,7 +9,7 @@ Client Types data_client/data_client_usage classic_client/usage - + admin_client/admin_client_usage Changelog --------- diff --git a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py index 5889300d265a..fbb753daf6ac 100644 --- a/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py +++ b/packages/google-cloud-bigtable/docs/scripts/patch_devsite_toc.py @@ -20,6 +20,7 @@ """ +import glob import yaml import os import shutil @@ -153,6 +154,81 @@ def copy_markdown(self): f"_build/html/docfx_yaml", ) + def validate_section(self, toc): + # Make sure each rst file is listed in the toc. + items_in_toc = [ + d["items"] for d in toc[0]["items"] if d["name"] == self.title and ".rst" + ][0] + items_in_dir = [f for f in os.listdir(self.dir_name) if f.endswith(".rst")] + # subtract 1 for index + assert len(items_in_toc) == len(items_in_dir) - 1 + for file in items_in_dir: + if file != self.index_file_name: + base_name, _ = os.path.splitext(file) + assert any(d["href"] == f"{base_name}.md" for d in items_in_toc) + # make sure the markdown files are present in the docfx_yaml directory + md_files = [d["href"] for d in items_in_toc] + for file in md_files: + assert os.path.exists(f"_build/html/docfx_yaml/{file}") + + +class UIDFilteredTocSection(TocSection): + def __init__(self, toc_file_path, section_name, title, uid_prefix): + """Creates a filtered section denoted by section_name in the toc_file_path to items with the given UID prefix. + + The section is then renamed to the title. + """ + current_toc = yaml.safe_load(open(toc_file_path, "r")) + self.uid_prefix = uid_prefix + + # Since we are looking for a specific section_name there should only + # be one match. + section_items = [ + d for d in current_toc[0]["items"] if d["name"] == section_name + ][0]["items"] + filtered_items = [d for d in section_items if d["uid"].startswith(uid_prefix)] + self.items = filtered_items + self.title = title + + def copy_markdown(self): + """ + No-op because we are filtering on UIDs, not markdown files. + """ + pass + + def validate_section(self, toc): + uids_in_toc = set() + + # A UID-filtered TOC tree looks like the following: + # - items: + # items: + # name: + # uid: + # + # Walk through the TOC tree to find all UIDs recursively. + def find_uids_in_items(items): + uids_in_toc.add(items["uid"]) + for subitem in items.get("items", []): + find_uids_in_items(subitem) + + items_in_toc = [d["items"] for d in toc[0]["items"] if d["name"] == self.title][ + 0 + ] + for item in items_in_toc: + find_uids_in_items(item) + + # Now that we have all the UIDs, first match all of them + # with corresponding .yml files. + for uid in uids_in_toc: + assert os.path.exists(f"_build/html/docfx_yaml/{uid}.yml") + + # Also validate that every uid yml file that starts with the uid_prefix + # exists in the section. + for filename in glob.glob( + f"{self.uid_prefix}*.yml", root_dir="_build/html/docfx_yaml" + ): + assert filename[:-4] in uids_in_toc + def validate_toc(toc_file_path, expected_section_list, added_sections): current_toc = yaml.safe_load(open(toc_file_path, "r")) @@ -164,43 +240,27 @@ def validate_toc(toc_file_path, expected_section_list, added_sections): # make sure each customs ection is in the toc for section in added_sections: assert section.title in found_sections - # make sure each rst file in each custom section dir is listed in the toc - for section in added_sections: - items_in_toc = [ - d["items"] - for d in current_toc[0]["items"] - if d["name"] == section.title and ".rst" - ][0] - items_in_dir = [f for f in os.listdir(section.dir_name) if f.endswith(".rst")] - # subtract 1 for index - assert len(items_in_toc) == len(items_in_dir) - 1 - for file in items_in_dir: - if file != section.index_file_name: - base_name, _ = os.path.splitext(file) - assert any(d["href"] == f"{base_name}.md" for d in items_in_toc) - # make sure the markdown files are present in the docfx_yaml directory - for section in added_sections: - items_in_toc = [ - d["items"] - for d in current_toc[0]["items"] - if d["name"] == section.title and ".rst" - ][0] - md_files = [d["href"] for d in items_in_toc] - for file in md_files: - assert os.path.exists(f"_build/html/docfx_yaml/{file}") + section.validate_section(current_toc) print("Toc validation passed") if __name__ == "__main__": # Add secrtions for the async_data_client and classic_client directories toc_path = "_build/html/docfx_yaml/toc.yml" + custom_sections = [ TocSection(dir_name="data_client", index_file_name="data_client_usage.rst"), + UIDFilteredTocSection( + toc_file_path=toc_path, + section_name="Bigtable Admin V2", + title="Admin Client", + uid_prefix="google.cloud.bigtable_admin_v2", + ), TocSection(dir_name="classic_client", index_file_name="usage.rst"), ] add_sections(toc_path, custom_sections) # Remove the Bigtable section, since it has duplicated data - remove_sections(toc_path, ["Bigtable"]) + remove_sections(toc_path, ["Bigtable", "Bigtable Admin V2"]) # run validation to make sure yaml is structured as we expect validate_toc( toc_file_path=toc_path, @@ -210,6 +270,7 @@ def validate_toc(toc_file_path, expected_section_list, added_sections): "Changelog", "Multiprocessing", "Data Client", + "Admin Client", "Classic Client", ], added_sections=custom_sections, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py index 5b2cafc543e9..f6fa24421f02 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/backup.py @@ -17,7 +17,7 @@ import re from google.cloud._helpers import _datetime_to_pb_timestamp # type: ignore -from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy @@ -106,7 +106,7 @@ def name(self): if not self._cluster: raise ValueError('"cluster" parameter must be set') - return BigtableTableAdminClient.backup_path( + return BaseBigtableTableAdminClient.backup_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=self._cluster, @@ -141,7 +141,7 @@ def parent(self): :returns: A full path to the parent cluster. """ if not self._parent and self._cluster: - self._parent = BigtableTableAdminClient.cluster_path( + self._parent = BaseBigtableTableAdminClient.cluster_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=self._cluster, @@ -163,7 +163,7 @@ def source_table(self): :returns: The Table name. """ if not self._source_table and self.table_id: - self._source_table = BigtableTableAdminClient.table_path( + self._source_table = BaseBigtableTableAdminClient.table_path( project=self._instance._client.project, instance=self._instance.instance_id, table=self.table_id, @@ -226,7 +226,7 @@ def size_bytes(self): def state(self): """The current state of this Backup. - :rtype: :class:`~google.cloud.bigtable_admin_v2.gapic.enums.Backup.State` + :rtype: :class:`~google.cloud.bigtable_admin_v2.types.table.Backup.State` :returns: The current state of this Backup. """ return self._state @@ -305,8 +305,7 @@ def create(self, cluster_id=None): created Backup. :rtype: :class:`~google.api_core.operation.Operation` - :returns: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` - instance, to be used to poll the status of the 'create' request + :returns: A future to be used to poll the status of the 'create' request :raises Conflict: if the Backup already exists :raises NotFound: if the Instance owning the Backup does not exist :raises BadRequest: if the `table` or `expire_time` values are invalid, @@ -412,7 +411,7 @@ def restore(self, table_id, instance_id=None): :param instance_id: (Optional) The ID of the Instance to restore the backup into, if different from the current one. - :rtype: :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture` + :rtype: :class:`~google.api_core.operation.Operation` :returns: A future to be used to poll the status of the 'restore' request. @@ -426,14 +425,14 @@ def restore(self, table_id, instance_id=None): """ api = self._instance._client.table_admin_client if instance_id: - parent = BigtableTableAdminClient.instance_path( + parent = BaseBigtableTableAdminClient.instance_path( project=self._instance._client.project, instance=instance_id, ) else: parent = self._instance.name - return api.restore_table( + return api._restore_table( request={"parent": parent, "table_id": table_id, "backup": self.name} ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py index 0c89ea562097..37de10b6e772 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/client.py @@ -325,11 +325,11 @@ def table_admin_client(self): raise ValueError("Client is not an admin client.") transport = self._create_gapic_client_channel( - bigtable_admin_v2.BigtableTableAdminClient, + bigtable_admin_v2.BaseBigtableTableAdminClient, BigtableTableAdminGrpcTransport, ) klass = _create_gapic_client( - bigtable_admin_v2.BigtableTableAdminClient, + bigtable_admin_v2.BaseBigtableTableAdminClient, client_options=self._admin_client_options, transport=transport, ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py index 7429bd36f251..0009f287ef85 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/table.py @@ -47,7 +47,7 @@ from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable import enums from google.cloud.bigtable_v2.types import bigtable as data_messages_v2_pb2 -from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient from google.cloud.bigtable_admin_v2.types import table as admin_messages_v2_pb2 from google.cloud.bigtable_admin_v2.types import ( bigtable_table_admin as table_admin_messages_v2_pb2, @@ -990,7 +990,7 @@ def list_backups(self, cluster_id=None, filter_=None, order_by=None, page_size=0 if filter_: backups_filter = "({}) AND ({})".format(backups_filter, filter_) - parent = BigtableTableAdminClient.cluster_path( + parent = BaseBigtableTableAdminClient.cluster_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=cluster_id, @@ -1037,7 +1037,7 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non and `backup_id` parameters even of such specified. :return: An instance of - :class:`~google.cloud.bigtable_admin_v2.types._OperationFuture`. + :class:`~google.api_core.operation.Operation`. :raises: google.api_core.exceptions.AlreadyExists: If the table already exists. @@ -1049,13 +1049,13 @@ def restore(self, new_table_id, cluster_id=None, backup_id=None, backup_name=Non """ api = self._instance._client.table_admin_client if not backup_name: - backup_name = BigtableTableAdminClient.backup_path( + backup_name = BaseBigtableTableAdminClient.backup_path( project=self._instance._client.project, instance=self._instance.instance_id, cluster=cluster_id, backup=backup_id, ) - return api.restore_table( + return api._restore_table( request={ "parent": self._instance.name, "table_id": new_table_id, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py index 309d06c7bd51..00353ea96958 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/__init__.py @@ -25,10 +25,10 @@ BigtableInstanceAdminAsyncClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, ) from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ( @@ -322,8 +322,8 @@ __all__ = ( "BigtableInstanceAdminClient", "BigtableInstanceAdminAsyncClient", - "BigtableTableAdminClient", - "BigtableTableAdminAsyncClient", + "BaseBigtableTableAdminClient", + "BaseBigtableTableAdminAsyncClient", "CreateAppProfileRequest", "CreateClusterMetadata", "CreateClusterRequest", @@ -444,3 +444,8 @@ "RestoreSourceType", "Type", ) + +import google.cloud.bigtable_admin_v2.overlay # noqa: F401 +from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403 + +__all__ += google.cloud.bigtable_admin_v2.overlay.__all__ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py index 13f1c2670a4e..713b2408f2a6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/__init__.py @@ -20,8 +20,8 @@ from .services.bigtable_instance_admin import BigtableInstanceAdminClient from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient -from .services.bigtable_table_admin import BigtableTableAdminClient -from .services.bigtable_table_admin import BigtableTableAdminAsyncClient +from .services.bigtable_table_admin import BaseBigtableTableAdminClient +from .services.bigtable_table_admin import BaseBigtableTableAdminAsyncClient from .types.bigtable_instance_admin import CreateAppProfileRequest from .types.bigtable_instance_admin import CreateClusterMetadata @@ -144,16 +144,16 @@ from .types.types import Type __all__ = ( + "BaseBigtableTableAdminAsyncClient", "BigtableInstanceAdminAsyncClient", - "BigtableTableAdminAsyncClient", "AppProfile", "AuthorizedView", "AutoscalingLimits", "AutoscalingTargets", "Backup", "BackupInfo", + "BaseBigtableTableAdminClient", "BigtableInstanceAdminClient", - "BigtableTableAdminClient", "ChangeStreamConfig", "CheckConsistencyRequest", "CheckConsistencyResponse", @@ -268,3 +268,7 @@ "UpdateTableMetadata", "UpdateTableRequest", ) + +from .overlay import * # noqa: F403 + +__all__ += overlay.__all__ # noqa: F405 diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json index 19918190fa43..9725d3384819 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -492,7 +492,7 @@ "BigtableTableAdmin": { "clients": { "grpc": { - "libraryClient": "BigtableTableAdminClient", + "libraryClient": "BaseBigtableTableAdminClient", "rpcs": { "CheckConsistency": { "methods": [ @@ -626,7 +626,7 @@ }, "RestoreTable": { "methods": [ - "restore_table" + "_restore_table" ] }, "SetIamPolicy": { @@ -672,7 +672,7 @@ } }, "grpc-async": { - "libraryClient": "BigtableTableAdminAsyncClient", + "libraryClient": "BaseBigtableTableAdminAsyncClient", "rpcs": { "CheckConsistency": { "methods": [ @@ -806,7 +806,7 @@ }, "RestoreTable": { "methods": [ - "restore_table" + "_restore_table" ] }, "SetIamPolicy": { @@ -852,7 +852,7 @@ } }, "rest": { - "libraryClient": "BigtableTableAdminClient", + "libraryClient": "BaseBigtableTableAdminClient", "rpcs": { "CheckConsistency": { "methods": [ @@ -986,7 +986,7 @@ }, "RestoreTable": { "methods": [ - "restore_table" + "_restore_table" ] }, "SetIamPolicy": { diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/__init__.py new file mode 100644 index 000000000000..f66c7f8dd885 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/__init__.py @@ -0,0 +1,49 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This directory and all its subdirectories are the only handwritten +# components of the otherwise autogenerated google/cloud/bigtable/admin_v2. +# The purpose of the overlay directory is to add additional functionality to +# the autogenerated library while preserving its developer experience. These +# handwritten additions currently consist of the following: +# +# 1. TODO: Document final GcRule design choice here +# 2. An LRO class for restore_table that exposes an Operation for +# OptimizeRestoreTable, if that LRO exists. +# 3. New methods (wait_for_consistency and wait_for_replication) that return +# a polling future class for automatically polling check_consistency. +# +# This directory is structured to mirror that of a typical autogenerated library (e.g. +# services/types subdirectories), and the aforementioned handwritten additions are +# currently implemented as either types under overlay/types or in methods in an overwritten +# client class under overlay/services. + +from .types import ( + AsyncRestoreTableOperation, + RestoreTableOperation, + WaitForConsistencyRequest, +) + +from .services.bigtable_table_admin import ( + BigtableTableAdminAsyncClient, + BigtableTableAdminClient, +) + +__all__ = ( + "AsyncRestoreTableOperation", + "RestoreTableOperation", + "BigtableTableAdminAsyncClient", + "BigtableTableAdminClient", + "WaitForConsistencyRequest", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/__init__.py new file mode 100644 index 000000000000..ab7686e260fc --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py new file mode 100644 index 000000000000..f80e3234f064 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TODO: Add the async client after owlbot changes. + +from .async_client import BigtableTableAdminAsyncClient +from .client import BigtableTableAdminClient + +__all__ = ( + "BigtableTableAdminAsyncClient", + "BigtableTableAdminClient", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py new file mode 100644 index 000000000000..ee8e5757d23a --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/async_client.py @@ -0,0 +1,375 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import functools + +from typing import Callable, Optional, Sequence, Tuple, Union +from google.api_core import gapic_v1 +from google.api_core import retry as retries + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import client_options as client_options_lib +from google.auth import credentials as ga_credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + async_client as base_client, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( + BigtableTableAdminTransport, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + async_consistency, + async_restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable.gapic_version import __version__ as bigtable_version + + +DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO) +DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay-async" + + +class BigtableTableAdminAsyncClient(base_client.BaseBigtableTableAdminAsyncClient): + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = "grpc_asyncio", + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the Bigtable table admin async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport to use. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + super(BigtableTableAdminAsyncClient, self).__init__( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def restore_table( + self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> async_restore_table.AsyncRestoreTableOperation: + r"""Create a new table by restoring from a completed backup. The + returned table :class:`long-running operation + ` + can be used to track the progress of the operation, and to cancel it. The + :attr:`metadata ` field type is + :class:`RestoreTableMetadata `. + The :meth:`response ` type is + :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful. + + Additionally, the returned :class:`long-running-operation ` + provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation.optimize_restore_table_operation` that + provides access to a :class:`google.api_core.operation_async.AsyncOperation` object representing the OptimizeRestoreTable long-running-operation + after the current one has completed. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + async def sample_restore_table(): + # Create a client + client = admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = await client.restore_table(request=request) + + print("Waiting for operation to complete...") + + response = await operation.result() + + # Handle the response + print(response) + + # Handle LRO2 + optimize_operation = await operation.optimize_restore_table_operation() + + if optimize_operation: + print("Waiting for table optimization to complete...") + + response = await optimize_operation.result() + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.overlay.types.async_restore_table.AsyncRestoreTableOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + """ + operation = await self._restore_table( + request=request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + restore_table_operation = async_restore_table.AsyncRestoreTableOperation( + self._client._transport.operations_client, operation + ) + return restore_table_operation + + async def wait_for_consistency( + self, + request: Optional[ + Union[wait_for_consistency_request.WaitForConsistencyRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bool: + r"""Blocks until the mutations for the specified Table that have been + made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly` + can see all writes committed before the token was created. This is done by generating + a consistency token for the Table, then polling :meth:`check_consistency` + for the specified table until the call returns True. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + async def sample_wait_for_consistency(): + # Create a client + client = admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = admin_v2.WaitForConsistencyRequest( + name="name_value", + ) + + # Make the request + print("Waiting for operation to complete...") + + response = await client.wait_for_replication(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]): + The request object. + name (str): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + bool: + If the `standard_read_remote_writes` mode is specified in the request object, returns + `True` after the mutations of the specified table have been fully replicated. If the + `data_boost_read_local_writes` mode is specified in the request object, returns `True` + after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes + committed before the token was created. + + Raises: + google.api_core.GoogleAPICallError: If the operation errors or if + the timeout is reached before the operation completes. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, wait_for_consistency_request.WaitForConsistencyRequest + ): + request = wait_for_consistency_request.WaitForConsistencyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Generate the consistency token. + generate_consistency_token_request = ( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=request.name, + ) + ) + + generate_consistency_response = await self.generate_consistency_token( + generate_consistency_token_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Create the CheckConsistencyRequest object. + check_consistency_request = bigtable_table_admin.CheckConsistencyRequest( + name=request.name, + consistency_token=generate_consistency_response.consistency_token, + ) + + # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to + # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check + # whether or not that field is defined in the original request object. + mode_oneof_field = request._pb.WhichOneof("mode") + if mode_oneof_field: + setattr( + check_consistency_request, + mode_oneof_field, + getattr(request, mode_oneof_field), + ) + + check_consistency_call = functools.partial( + self.check_consistency, + check_consistency_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Block and wait until the polling harness returns True. + check_consistency_future = ( + async_consistency._AsyncCheckConsistencyPollingFuture( + check_consistency_call + ) + ) + return await check_consistency_future.result() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py new file mode 100644 index 000000000000..1b6770b10195 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/services/bigtable_table_admin/client.py @@ -0,0 +1,373 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import functools + +from typing import Callable, Optional, Sequence, Tuple, Union +from google.api_core import gapic_v1 +from google.api_core import retry as retries + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import client_options as client_options_lib +from google.auth import credentials as ga_credentials # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( + client as base_client, +) +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.base import ( + BigtableTableAdminTransport, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + consistency, + restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable.gapic_version import __version__ as bigtable_version + + +DEFAULT_CLIENT_INFO = copy.copy(base_client.DEFAULT_CLIENT_INFO) +DEFAULT_CLIENT_INFO.client_library_version = f"{bigtable_version}-admin-overlay" + + +class BigtableTableAdminClient(base_client.BaseBigtableTableAdminClient): + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[ + Union[ + str, + BigtableTableAdminTransport, + Callable[..., BigtableTableAdminTransport], + ] + ] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the Bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Optional[Union[str,BigtableTableAdminTransport,Callable[..., BigtableTableAdminTransport]]]): + The transport to use, or a Callable that constructs and returns a new transport. + If a Callable is given, it will be called with the same set of initialization + arguments as used in the BigtableTableAdminTransport constructor. + If set to None, a transport is chosen automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + super(BigtableTableAdminClient, self).__init__( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + def restore_table( + self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> restore_table.RestoreTableOperation: + r"""Create a new table by restoring from a completed backup. The + returned table :class:`long-running operation + ` + can be used to track the progress of the operation, and to cancel it. The + :attr:`metadata ` field type is + :class:`RestoreTableMetadata `. + The :meth:`response ` type is + :class:`google.cloud.bigtable_admin_v2.types.Table`, if successful. + + Additionally, the returned :class:`long-running-operation ` + provides a method, :meth:`google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation.optimize_restore_table_operation` that + provides access to a :class:`google.api_core.operation.Operation` object representing the OptimizeRestoreTable long-running-operation + after the current one has completed. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + def sample_restore_table(): + # Create a client + client = admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client.restore_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + # Handle LRO2 + optimize_operation = operation.optimize_restore_table_operation() + + if optimize_operation: + print("Waiting for table optimization to complete...") + + response = optimize_operation.result() + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.bigtable_admin_v2.overlay.types.restore_table.RestoreTableOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + """ + operation = self._restore_table( + request=request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + restore_table_operation = restore_table.RestoreTableOperation( + self._transport.operations_client, operation + ) + return restore_table_operation + + def wait_for_consistency( + self, + request: Optional[ + Union[wait_for_consistency_request.WaitForConsistencyRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> bool: + r"""Blocks until the mutations for the specified Table that have been + made before the call have been replicated or reads using an app profile with `DataBoostIsolationReadOnly` + can see all writes committed before the token was created. This is done by generating + a consistency token for the Table, then polling :meth:`check_consistency` + for the specified table until the call returns True. + + .. code-block:: python + + # This snippet should be regarded as a code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud.bigtable import admin_v2 + + def sample_wait_for_consistency(): + # Create a client + client = admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = admin_v2.WaitForConsistencyRequest( + name="name_value", + ) + + # Make the request + print("Waiting for operation to complete...") + + response = client.wait_for_replication(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.bigtable_admin_v2.overlay.types.WaitForConsistencyRequest, dict]): + The request object. + name (str): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + bool: + If the `standard_read_remote_writes` mode is specified in the request object, returns + `True` after the mutations of the specified table have been fully replicated. If the + `data_boost_read_local_writes` mode is specified in the request object, returns `True` + after reads using an app profile with `DataBoostIsolationReadOnly` can see all writes + committed before the token was created. + + Raises: + google.api_core.GoogleAPICallError: If the operation errors or if + the timeout is reached before the operation completes. + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + flattened_params = [name] + has_flattened_params = ( + len([param for param in flattened_params if param is not None]) > 0 + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance( + request, wait_for_consistency_request.WaitForConsistencyRequest + ): + request = wait_for_consistency_request.WaitForConsistencyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Generate the consistency token. + generate_consistency_token_request = ( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=request.name, + ) + ) + + generate_consistency_response = self.generate_consistency_token( + generate_consistency_token_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Create the CheckConsistencyRequest object. + check_consistency_request = bigtable_table_admin.CheckConsistencyRequest( + name=request.name, + consistency_token=generate_consistency_response.consistency_token, + ) + + # Since the default values of StandardReadRemoteWrites and DataBoostReadLocalWrites evaluate to + # False in proto plus, we cannot do a simple "if request.standard_read_remote_writes" to check + # whether or not that field is defined in the original request object. + mode_oneof_field = request._pb.WhichOneof("mode") + if mode_oneof_field: + setattr( + check_consistency_request, + mode_oneof_field, + getattr(request, mode_oneof_field), + ) + + check_consistency_call = functools.partial( + self.check_consistency, + check_consistency_request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Block and wait until the polling harness returns True. + check_consistency_future = consistency._CheckConsistencyPollingFuture( + check_consistency_call + ) + return check_consistency_future.result() diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/__init__.py new file mode 100644 index 000000000000..16b032ac4743 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .async_restore_table import ( + AsyncRestoreTableOperation, +) + +from .restore_table import ( + RestoreTableOperation, +) + +from .wait_for_consistency_request import ( + WaitForConsistencyRequest, +) + +__all__ = ( + "AsyncRestoreTableOperation", + "RestoreTableOperation", + "WaitForConsistencyRequest", +) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py new file mode 100644 index 000000000000..0703940d5138 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_consistency.py @@ -0,0 +1,104 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Awaitable, Union, Callable + +from google.api_core.future import async_future +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +# The consistency check could take a very long time, so we wait indefinitely. +DEFAULT_RETRY = async_future.DEFAULT_RETRY.with_timeout(None) + + +class _AsyncCheckConsistencyPollingFuture(async_future.AsyncFuture): + """A Future that polls an underlying `check_consistency` operation until it returns True. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_consistency` + or + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.AsyncBigtableTableAdminClient.wait_for_replication` + methods. + + Args: + check_consistency_call(Callable[ + [Optional[google.api_core.retry.Retry], + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]): + A :meth:`check_consistency + ` + call from the admin client. The call should fix every user parameter except for retry, + which will be done via :meth:`functools.partial`. + default_retry(Optional[google.api_core.retry.Retry]): The `retry` parameter passed in to either + :meth:`wait_for_consistency + ` + or :meth:`wait_for_replication + ` + retry (google.api_core.retry.AsyncRetry): The retry configuration used + when polling. This can be used to control how often :meth:`done` + is polled. Regardless of the retry's ``deadline``, it will be + overridden by the ``timeout`` argument to :meth:`result`. + """ + + def __init__( + self, + check_consistency_call: Callable[ + [OptionalRetry], Awaitable[bigtable_table_admin.CheckConsistencyResponse] + ], + retry: retries.AsyncRetry = DEFAULT_RETRY, + **kwargs + ): + super(_AsyncCheckConsistencyPollingFuture, self).__init__(retry=retry, **kwargs) + + # Done is called with two different scenarios, retry is specified or not specified. + # API_call will be a functools partial with everything except retry specified because of + # that. + self._check_consistency_call = check_consistency_call + + async def done(self, retry: OptionalRetry = None): + """Polls the underlying `check_consistency` call to see if the future is complete. + + Args: + retry (google.api_core.retry.Retry): (Optional) How to retry the + polling RPC (to not be confused with polling configuration. See + the documentation for :meth:`result ` + for details). + + Returns: + bool: True if the future is complete, False otherwise. + """ + if self._future.done(): + return True + + try: + check_consistency_response = await self._check_consistency_call() + if check_consistency_response.consistent: + self.set_result(True) + + return check_consistency_response.consistent + except Exception as e: + self.set_exception(e) + + def cancel(self): + raise NotImplementedError("Cannot cancel consistency token operation") + + def cancelled(self): + raise NotImplementedError("Cannot cancel consistency token operation") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py new file mode 100644 index 000000000000..9edfb4963cd3 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/async_restore_table.py @@ -0,0 +1,99 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from google.api_core import exceptions +from google.api_core import operation_async +from google.protobuf import empty_pb2 + +from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata + + +class AsyncRestoreTableOperation(operation_async.AsyncOperation): + """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation. + + This is needed to expose a potential long-running operation that might run after this operation + finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation` + method. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's :meth:`restore_table + ` + method. + + Args: + operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations + client from the admin client class's transport. + restore_table_operation (google.api_core.operation_async.AsyncOperation): A + :class:`google.api_core.operation_async.AsyncOperation` + instance resembling a RestoreTable long-running operation + """ + + def __init__( + self, operations_client, restore_table_operation: operation_async.AsyncOperation + ): + self._operations_client = operations_client + self._optimize_restored_table_operation = None + super().__init__( + restore_table_operation._operation, + restore_table_operation._refresh, + restore_table_operation._cancel, + restore_table_operation._result_type, + restore_table_operation._metadata_type, + retry=restore_table_operation._retry, + ) + + async def optimize_restored_table_operation( + self, + ) -> Optional[operation_async.AsyncOperation]: + """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes. + The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this + method will return `None`. + This method must not be called before the parent restore_table operation is complete. + Returns: + An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation + after this one. + Raises: + RuntimeError: raised when accessed before the restore_table operation is complete + + Raises: + google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete + """ + if not await self.done(): + raise exceptions.GoogleAPIError( + "optimize_restored_table operation can't be accessed until the restore_table operation is complete" + ) + + if self._optimize_restored_table_operation is not None: + return self._optimize_restored_table_operation + + operation_name = self.metadata.optimize_table_operation_name + + # When the RestoreTable operation finishes, it might not necessarily trigger + # an optimize operation. + if operation_name: + gapic_operation = await self._operations_client.get_operation( + name=operation_name + ) + self._optimize_restored_table_operation = operation_async.from_gapic( + gapic_operation, + self._operations_client, + empty_pb2.Empty, + metadata_type=OptimizeRestoredTableMetadata, + ) + return self._optimize_restored_table_operation + else: + # no optimize operation found + return None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/consistency.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/consistency.py new file mode 100644 index 000000000000..63a110975442 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/consistency.py @@ -0,0 +1,101 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Union, Callable + +from google.api_core.future import polling +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +# The consistency check could take a very long time, so we wait indefinitely. +DEFAULT_RETRY = polling.DEFAULT_POLLING.with_timeout(None) + + +class _CheckConsistencyPollingFuture(polling.PollingFuture): + """A Future that polls an underlying `check_consistency` operation until it returns True. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_consistency` + or + :meth:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.BigtableTableAdminClient.wait_for_replication` + methods. + + Args: + check_consistency_call(Callable[ + [Optional[google.api_core.retry.Retry], + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse]): + A :meth:`check_consistency + ` + call from the admin client. The call should fix every user parameter, + which will be done via :meth:`functools.partial`. + polling (google.api_core.retry.Retry): The configuration used for polling. + This parameter controls how often :meth:`done` is polled. If the + ``timeout`` argument is specified in the :meth:`result + ` method it will + override the ``polling.timeout`` property. + """ + + def __init__( + self, + check_consistency_call: Callable[ + [OptionalRetry], bigtable_table_admin.CheckConsistencyResponse + ], + polling: retries.Retry = DEFAULT_RETRY, + **kwargs + ): + super(_CheckConsistencyPollingFuture, self).__init__(polling=polling, **kwargs) + + # Done is called with two different scenarios, retry is specified or not specified. + # API_call will be a functools partial with everything except retry specified because of + # that. + self._check_consistency_call = check_consistency_call + + def done(self, retry: OptionalRetry = None): + """Polls the underlying `check_consistency` call to see if the future is complete. + + Args: + retry (google.api_core.retry.Retry): (Optional) How to retry the + polling RPC (to not be confused with polling configuration. See + the documentation for :meth:`result ` + for details). + + Returns: + bool: True if the future is complete, False otherwise. + """ + + if self._result_set: + return True + + try: + check_consistency_response = self._check_consistency_call() + if check_consistency_response.consistent: + self.set_result(True) + + return check_consistency_response.consistent + except Exception as e: + self.set_exception(e) + + def cancel(self): + raise NotImplementedError("Cannot cancel consistency token operation") + + def cancelled(self): + raise NotImplementedError("Cannot cancel consistency token operation") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py new file mode 100644 index 000000000000..84c9c5d91644 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/restore_table.py @@ -0,0 +1,102 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from google.api_core import exceptions +from google.api_core import operation +from google.protobuf import empty_pb2 + +from google.cloud.bigtable_admin_v2.types import OptimizeRestoredTableMetadata + + +class RestoreTableOperation(operation.Operation): + """A Future for interacting with Bigtable Admin's RestoreTable Long-Running Operation. + + This is needed to expose a potential long-running operation that might run after this operation + finishes, OptimizeRestoreTable. This is exposed via the the :meth:`optimize_restore_table_operation` + method. + + **This class should not be instantiated by users** and should only be instantiated by the admin + client's :meth:`restore_table + ` + method. + + Args: + operations_client (google.api_core.operations_v1.AbstractOperationsClient): The operations + client from the admin client class's transport. + restore_table_operation (google.api_core.operation.Operation): A :class:`google.api_core.operation.Operation` + instance resembling a RestoreTable long-running operation + """ + + def __init__(self, operations_client, restore_table_operation: operation.Operation): + self._operations_client = operations_client + self._optimize_restored_table_operation = None + super().__init__( + restore_table_operation._operation, + restore_table_operation._refresh, + restore_table_operation._cancel, + restore_table_operation._result_type, + restore_table_operation._metadata_type, + polling=restore_table_operation._polling, + ) + + def optimize_restored_table_operation(self) -> Optional[operation.Operation]: + """Gets the OptimizeRestoredTable long-running operation that runs after this operation finishes. + + This must not be called before the parent restore_table operation is complete. You can guarantee + this happening by calling this function after this class's :meth:`google.api_core.operation.Operation.result` + method. + + The follow-up operation has + :attr:`metadata ` type + :class:`OptimizeRestoredTableMetadata + ` + and no return value, but can be waited for with `result`. + + The current operation might not trigger a follow-up OptimizeRestoredTable operation, in which case, this + method will return `None`. + + Returns: + Optional[google.api_core.operation.Operation]: + An object representing a long-running operation, or None if there is no OptimizeRestoredTable operation + after this one. + + Raises: + google.api_core.GoogleAPIError: raised when accessed before the restore_table operation is complete + """ + if not self.done(): + raise exceptions.GoogleAPIError( + "optimize_restored_table operation can't be accessed until the restore_table operation is complete" + ) + + if self._optimize_restored_table_operation is not None: + return self._optimize_restored_table_operation + + operation_name = self.metadata.optimize_table_operation_name + + # When the RestoreTable operation finishes, it might not necessarily trigger + # an optimize operation. + if operation_name: + gapic_operation = self._operations_client.get_operation(name=operation_name) + self._optimize_restored_table_operation = operation.from_gapic( + gapic_operation, + self._operations_client, + empty_pb2.Empty, + metadata_type=OptimizeRestoredTableMetadata, + ) + return self._optimize_restored_table_operation + else: + # no optimize operation found + return None diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py new file mode 100644 index 000000000000..51070230a857 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/overlay/types/wait_for_consistency_request.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +__protobuf__ = proto.module( + package="google.bigtable.admin.v2", + manifest={ + "WaitForConsistencyRequest", + }, +) + + +# The WaitForConsistencyRequest object is not a real proto. It is a wrapper +# class intended for the handwritten method wait_for_consistency. It is +# constructed by extending a Proto Plus message class to get a developer +# experience closest to that of an autogenerated GAPIC method, and to allow +# developers to manipulate the wrapper class like they would a request proto +# for an autogenerated call. +class WaitForConsistencyRequest(proto.Message): + """Wrapper class for encapsulating parameters for the `wait_for_consistency` method in both + :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client.BigtableTableAdminClient` + and :class:`google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.async_client.BigtableTableAdmiAsyncClient`. + + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + standard_read_remote_writes (google.cloud.bigtable_admin_v2.types.StandardReadRemoteWrites): + Checks that reads using an app profile with + ``StandardIsolation`` can see all writes committed before + the token was created, even if the read and write target + different clusters. + + This field is a member of `oneof`_ ``mode``. + data_boost_read_local_writes (google.cloud.bigtable_admin_v2.types.DataBoostReadLocalWrites): + Checks that reads using an app profile with + ``DataBoostIsolationReadOnly`` can see all writes committed + before the token was created, but only if the read and write + target the same cluster. + + This field is a member of `oneof`_ ``mode``. + """ + + name: str = proto.Field(proto.STRING, number=1) + standard_read_remote_writes: bigtable_table_admin.StandardReadRemoteWrites = ( + proto.Field( + proto.MESSAGE, + number=2, + oneof="mode", + message=bigtable_table_admin.StandardReadRemoteWrites, + ) + ) + data_boost_read_local_writes: bigtable_table_admin.DataBoostReadLocalWrites = ( + proto.Field( + proto.MESSAGE, + number=3, + oneof="mode", + message=bigtable_table_admin.DataBoostReadLocalWrites, + ) + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index cd916a2c8020..c5e8544d6423 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -13,10 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .client import BigtableTableAdminClient -from .async_client import BigtableTableAdminAsyncClient +from .client import BaseBigtableTableAdminClient +from .async_client import BaseBigtableTableAdminAsyncClient __all__ = ( - "BigtableTableAdminClient", - "BigtableTableAdminAsyncClient", + "BaseBigtableTableAdminClient", + "BaseBigtableTableAdminAsyncClient", ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index ba25264dd031..c3047b3cf33e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -58,7 +58,7 @@ from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .client import BigtableTableAdminClient +from .client import BaseBigtableTableAdminClient try: from google.api_core import client_logging # type: ignore @@ -70,7 +70,7 @@ _LOGGER = std_logging.getLogger(__name__) -class BigtableTableAdminAsyncClient: +class BaseBigtableTableAdminAsyncClient: """Service for creating, configuring, and deleting Cloud Bigtable tables. @@ -78,62 +78,66 @@ class BigtableTableAdminAsyncClient: within the tables. """ - _client: BigtableTableAdminClient + _client: BaseBigtableTableAdminClient # Copy defaults from the synchronous client for use here. # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - _DEFAULT_ENDPOINT_TEMPLATE = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE - _DEFAULT_UNIVERSE = BigtableTableAdminClient._DEFAULT_UNIVERSE + DEFAULT_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE - authorized_view_path = staticmethod(BigtableTableAdminClient.authorized_view_path) + authorized_view_path = staticmethod( + BaseBigtableTableAdminClient.authorized_view_path + ) parse_authorized_view_path = staticmethod( - BigtableTableAdminClient.parse_authorized_view_path + BaseBigtableTableAdminClient.parse_authorized_view_path ) - backup_path = staticmethod(BigtableTableAdminClient.backup_path) - parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) - cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) - parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path) + backup_path = staticmethod(BaseBigtableTableAdminClient.backup_path) + parse_backup_path = staticmethod(BaseBigtableTableAdminClient.parse_backup_path) + cluster_path = staticmethod(BaseBigtableTableAdminClient.cluster_path) + parse_cluster_path = staticmethod(BaseBigtableTableAdminClient.parse_cluster_path) crypto_key_version_path = staticmethod( - BigtableTableAdminClient.crypto_key_version_path + BaseBigtableTableAdminClient.crypto_key_version_path ) parse_crypto_key_version_path = staticmethod( - BigtableTableAdminClient.parse_crypto_key_version_path + BaseBigtableTableAdminClient.parse_crypto_key_version_path ) - instance_path = staticmethod(BigtableTableAdminClient.instance_path) - parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) - schema_bundle_path = staticmethod(BigtableTableAdminClient.schema_bundle_path) + instance_path = staticmethod(BaseBigtableTableAdminClient.instance_path) + parse_instance_path = staticmethod(BaseBigtableTableAdminClient.parse_instance_path) + schema_bundle_path = staticmethod(BaseBigtableTableAdminClient.schema_bundle_path) parse_schema_bundle_path = staticmethod( - BigtableTableAdminClient.parse_schema_bundle_path + BaseBigtableTableAdminClient.parse_schema_bundle_path ) - snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) - parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) - table_path = staticmethod(BigtableTableAdminClient.table_path) - parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path) + snapshot_path = staticmethod(BaseBigtableTableAdminClient.snapshot_path) + parse_snapshot_path = staticmethod(BaseBigtableTableAdminClient.parse_snapshot_path) + table_path = staticmethod(BaseBigtableTableAdminClient.table_path) + parse_table_path = staticmethod(BaseBigtableTableAdminClient.parse_table_path) common_billing_account_path = staticmethod( - BigtableTableAdminClient.common_billing_account_path + BaseBigtableTableAdminClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( - BigtableTableAdminClient.parse_common_billing_account_path + BaseBigtableTableAdminClient.parse_common_billing_account_path ) - common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path) + common_folder_path = staticmethod(BaseBigtableTableAdminClient.common_folder_path) parse_common_folder_path = staticmethod( - BigtableTableAdminClient.parse_common_folder_path + BaseBigtableTableAdminClient.parse_common_folder_path ) common_organization_path = staticmethod( - BigtableTableAdminClient.common_organization_path + BaseBigtableTableAdminClient.common_organization_path ) parse_common_organization_path = staticmethod( - BigtableTableAdminClient.parse_common_organization_path + BaseBigtableTableAdminClient.parse_common_organization_path ) - common_project_path = staticmethod(BigtableTableAdminClient.common_project_path) + common_project_path = staticmethod(BaseBigtableTableAdminClient.common_project_path) parse_common_project_path = staticmethod( - BigtableTableAdminClient.parse_common_project_path + BaseBigtableTableAdminClient.parse_common_project_path + ) + common_location_path = staticmethod( + BaseBigtableTableAdminClient.common_location_path ) - common_location_path = staticmethod(BigtableTableAdminClient.common_location_path) parse_common_location_path = staticmethod( - BigtableTableAdminClient.parse_common_location_path + BaseBigtableTableAdminClient.parse_common_location_path ) @classmethod @@ -147,9 +151,9 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminAsyncClient: The constructed client. + BaseBigtableTableAdminAsyncClient: The constructed client. """ - return BigtableTableAdminClient.from_service_account_info.__func__(BigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore + return BaseBigtableTableAdminClient.from_service_account_info.__func__(BaseBigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): @@ -163,9 +167,9 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminAsyncClient: The constructed client. + BaseBigtableTableAdminAsyncClient: The constructed client. """ - return BigtableTableAdminClient.from_service_account_file.__func__(BigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore + return BaseBigtableTableAdminClient.from_service_account_file.__func__(BaseBigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @@ -203,7 +207,7 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ - return BigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + return BaseBigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> BigtableTableAdminTransport: @@ -233,7 +237,7 @@ def universe_domain(self) -> str: """ return self._client._universe_domain - get_transport_class = BigtableTableAdminClient.get_transport_class + get_transport_class = BaseBigtableTableAdminClient.get_transport_class def __init__( self, @@ -249,7 +253,7 @@ def __init__( client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable table admin async client. + """Instantiates the base bigtable table admin async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -298,7 +302,7 @@ def __init__( google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ - self._client = BigtableTableAdminClient( + self._client = BaseBigtableTableAdminClient( credentials=credentials, transport=transport, client_options=client_options, @@ -309,7 +313,7 @@ def __init__( std_logging.DEBUG ): # pragma: NO COVER _LOGGER.debug( - "Created client `google.bigtable.admin_v2.BigtableTableAdminAsyncClient`.", + "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminAsyncClient`.", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "universeDomain": getattr( @@ -2921,7 +2925,7 @@ async def list_backups( # Done; return the response. return response - async def restore_table( + async def _restore_table( self, request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, *, @@ -3967,7 +3971,7 @@ async def delete_schema_bundle( metadata=metadata, ) - async def __aenter__(self) -> "BigtableTableAdminAsyncClient": + async def __aenter__(self) -> "BaseBigtableTableAdminAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): @@ -3982,4 +3986,4 @@ async def __aexit__(self, exc_type, exc, tb): DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ -__all__ = ("BigtableTableAdminAsyncClient",) +__all__ = ("BaseBigtableTableAdminAsyncClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 812a9366ab33..c1f5a3e64196 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -78,7 +78,7 @@ from .transports.rest import BigtableTableAdminRestTransport -class BigtableTableAdminClientMeta(type): +class BaseBigtableTableAdminClientMeta(type): """Metaclass for the BigtableTableAdmin client. This provides class-level methods for building and retrieving @@ -115,7 +115,7 @@ def get_transport_class( return next(iter(cls._transport_registry.values())) -class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta): +class BaseBigtableTableAdminClient(metaclass=BaseBigtableTableAdminClientMeta): """Service for creating, configuring, and deleting Cloud Bigtable tables. @@ -173,7 +173,7 @@ def from_service_account_info(cls, info: dict, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminClient: The constructed client. + BaseBigtableTableAdminClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials @@ -191,7 +191,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - BigtableTableAdminClient: The constructed client. + BaseBigtableTableAdminClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -611,15 +611,17 @@ def _get_api_endpoint( elif use_mtls_endpoint == "always" or ( use_mtls_endpoint == "auto" and client_cert_source ): - _default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + _default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE if universe_domain != _default_universe: raise MutualTLSChannelError( f"mTLS is not supported in any universe other than {_default_universe}." ) - api_endpoint = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + api_endpoint = BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT else: - api_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( - UNIVERSE_DOMAIN=universe_domain + api_endpoint = ( + BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) ) return api_endpoint @@ -639,7 +641,7 @@ def _get_universe_domain( Raises: ValueError: If the universe domain is an empty string. """ - universe_domain = BigtableTableAdminClient._DEFAULT_UNIVERSE + universe_domain = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE if client_universe_domain is not None: universe_domain = client_universe_domain elif universe_domain_env is not None: @@ -720,7 +722,7 @@ def __init__( client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable table admin client. + """Instantiates the base bigtable table admin client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -784,11 +786,11 @@ def __init__( self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env, - ) = BigtableTableAdminClient._read_environment_variables() - self._client_cert_source = BigtableTableAdminClient._get_client_cert_source( + ) = BaseBigtableTableAdminClient._read_environment_variables() + self._client_cert_source = BaseBigtableTableAdminClient._get_client_cert_source( self._client_options.client_cert_source, self._use_client_cert ) - self._universe_domain = BigtableTableAdminClient._get_universe_domain( + self._universe_domain = BaseBigtableTableAdminClient._get_universe_domain( universe_domain_opt, self._universe_domain_env ) self._api_endpoint = None # updated below, depending on `transport` @@ -827,7 +829,7 @@ def __init__( self._api_endpoint = ( self._api_endpoint - or BigtableTableAdminClient._get_api_endpoint( + or BaseBigtableTableAdminClient._get_api_endpoint( self._client_options.api_endpoint, self._client_cert_source, self._universe_domain, @@ -849,7 +851,7 @@ def __init__( Type[BigtableTableAdminTransport], Callable[..., BigtableTableAdminTransport], ] = ( - BigtableTableAdminClient.get_transport_class(transport) + BaseBigtableTableAdminClient.get_transport_class(transport) if isinstance(transport, str) or transport is None else cast(Callable[..., BigtableTableAdminTransport], transport) ) @@ -871,7 +873,7 @@ def __init__( std_logging.DEBUG ): # pragma: NO COVER _LOGGER.debug( - "Created client `google.bigtable.admin_v2.BigtableTableAdminClient`.", + "Created client `google.bigtable.admin_v2.BaseBigtableTableAdminClient`.", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "universeDomain": getattr( @@ -3413,7 +3415,7 @@ def list_backups( # Done; return the response. return response - def restore_table( + def _restore_table( self, request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, *, @@ -4442,7 +4444,7 @@ def delete_schema_bundle( metadata=metadata, ) - def __enter__(self) -> "BigtableTableAdminClient": + def __enter__(self) -> "BaseBigtableTableAdminClient": return self def __exit__(self, type, value, traceback): @@ -4463,4 +4465,4 @@ def __exit__(self, type, value, traceback): if hasattr(DEFAULT_CLIENT_INFO, "protobuf_runtime_version"): # pragma: NO COVER DEFAULT_CLIENT_INFO.protobuf_runtime_version = google.protobuf.__version__ -__all__ = ("BigtableTableAdminClient",) +__all__ = ("BaseBigtableTableAdminClient",) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index adf448f823d4..ec2462d4acfb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -342,7 +342,7 @@ def post_update_table(self, response): return response transport = BigtableTableAdminRestTransport(interceptor=MyCustomBigtableTableAdminInterceptor()) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) """ @@ -2087,7 +2087,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CheckConsistency", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CheckConsistency", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CheckConsistency", @@ -2138,7 +2138,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.check_consistency", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.check_consistency", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CheckConsistency", @@ -2243,7 +2243,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CopyBackup", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CopyBackup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CopyBackup", @@ -2290,7 +2290,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.copy_backup", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.copy_backup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CopyBackup", @@ -2398,7 +2398,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateAuthorizedView", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateAuthorizedView", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateAuthorizedView", @@ -2447,7 +2447,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_authorized_view", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_authorized_view", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateAuthorizedView", @@ -2553,7 +2553,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateBackup", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateBackup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateBackup", @@ -2600,7 +2600,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_backup", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_backup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateBackup", @@ -2708,7 +2708,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateSchemaBundle", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateSchemaBundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateSchemaBundle", @@ -2757,7 +2757,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_schema_bundle", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_schema_bundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateSchemaBundle", @@ -2864,7 +2864,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTable", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTable", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateTable", @@ -2913,7 +2913,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateTable", @@ -3029,7 +3029,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTableFromSnapshot", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.CreateTableFromSnapshot", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateTableFromSnapshot", @@ -3078,7 +3078,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table_from_snapshot", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "CreateTableFromSnapshot", @@ -3174,7 +3174,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteAuthorizedView", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteAuthorizedView", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "DeleteAuthorizedView", @@ -3284,7 +3284,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteBackup", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteBackup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "DeleteBackup", @@ -3394,7 +3394,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteSchemaBundle", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSchemaBundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "DeleteSchemaBundle", @@ -3511,7 +3511,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteSnapshot", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteSnapshot", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "DeleteSnapshot", @@ -3619,7 +3619,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteTable", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DeleteTable", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "DeleteTable", @@ -3732,7 +3732,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DropRowRange", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.DropRowRange", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "DropRowRange", @@ -3855,7 +3855,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GenerateConsistencyToken", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GenerateConsistencyToken", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GenerateConsistencyToken", @@ -3910,7 +3910,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.generate_consistency_token", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.generate_consistency_token", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GenerateConsistencyToken", @@ -4016,7 +4016,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetAuthorizedView", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetAuthorizedView", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetAuthorizedView", @@ -4064,7 +4064,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_authorized_view", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_authorized_view", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetAuthorizedView", @@ -4161,7 +4161,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetBackup", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetBackup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetBackup", @@ -4209,7 +4209,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_backup", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_backup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetBackup", @@ -4386,7 +4386,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetIamPolicy", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetIamPolicy", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetIamPolicy", @@ -4435,7 +4435,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_iam_policy", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_iam_policy", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetIamPolicy", @@ -4537,7 +4537,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetSchemaBundle", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSchemaBundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetSchemaBundle", @@ -4585,7 +4585,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_schema_bundle", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_schema_bundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetSchemaBundle", @@ -4703,7 +4703,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetSnapshot", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetSnapshot", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetSnapshot", @@ -4751,7 +4751,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_snapshot", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_snapshot", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetSnapshot", @@ -4852,7 +4852,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetTable", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.GetTable", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetTable", @@ -4900,7 +4900,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_table", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.get_table", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "GetTable", @@ -5002,7 +5002,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListAuthorizedViews", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListAuthorizedViews", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListAuthorizedViews", @@ -5056,7 +5056,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_authorized_views", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_authorized_views", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListAuthorizedViews", @@ -5156,7 +5156,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListBackups", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListBackups", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListBackups", @@ -5206,7 +5206,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_backups", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_backups", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListBackups", @@ -5308,7 +5308,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListSchemaBundles", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSchemaBundles", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListSchemaBundles", @@ -5358,7 +5358,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_schema_bundles", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_schema_bundles", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListSchemaBundles", @@ -5472,7 +5472,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListSnapshots", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListSnapshots", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListSnapshots", @@ -5522,7 +5522,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_snapshots", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_snapshots", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListSnapshots", @@ -5621,7 +5621,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListTables", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ListTables", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListTables", @@ -5671,7 +5671,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_tables", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.list_tables", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ListTables", @@ -5780,7 +5780,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ModifyColumnFamilies", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.ModifyColumnFamilies", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ModifyColumnFamilies", @@ -5831,7 +5831,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.modify_column_families", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.modify_column_families", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "ModifyColumnFamilies", @@ -5937,7 +5937,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.RestoreTable", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.RestoreTable", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "RestoreTable", @@ -5984,7 +5984,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.restore_table", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.restore_table", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "RestoreTable", @@ -6161,7 +6161,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SetIamPolicy", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SetIamPolicy", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "SetIamPolicy", @@ -6210,7 +6210,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.set_iam_policy", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.set_iam_policy", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "SetIamPolicy", @@ -6323,7 +6323,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SnapshotTable", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.SnapshotTable", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "SnapshotTable", @@ -6370,7 +6370,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.snapshot_table", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.snapshot_table", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "SnapshotTable", @@ -6474,7 +6474,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.TestIamPermissions", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.TestIamPermissions", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "TestIamPermissions", @@ -6525,7 +6525,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.test_iam_permissions", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.test_iam_permissions", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "TestIamPermissions", @@ -6631,7 +6631,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UndeleteTable", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UndeleteTable", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UndeleteTable", @@ -6678,7 +6678,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.undelete_table", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.undelete_table", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UndeleteTable", @@ -6786,7 +6786,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateAuthorizedView", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateAuthorizedView", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateAuthorizedView", @@ -6835,7 +6835,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_authorized_view", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_authorized_view", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateAuthorizedView", @@ -6938,7 +6938,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateBackup", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateBackup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateBackup", @@ -6987,7 +6987,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_backup", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_backup", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateBackup", @@ -7095,7 +7095,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateSchemaBundle", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateSchemaBundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateSchemaBundle", @@ -7144,7 +7144,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_schema_bundle", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_schema_bundle", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateSchemaBundle", @@ -7250,7 +7250,7 @@ def __call__( "headers": dict(metadata), } _LOGGER.debug( - f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateTable", + f"Sending request for google.bigtable.admin_v2.BaseBigtableTableAdminClient.UpdateTable", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateTable", @@ -7297,7 +7297,7 @@ def __call__( "status": response.status_code, } _LOGGER.debug( - "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_table", + "Received response for google.bigtable.admin_v2.BaseBigtableTableAdminClient.update_table", extra={ "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", "rpcName": "UpdateTable", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index 44e9463d4a8b..c15eac7990b5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -20,6 +20,7 @@ import proto # type: ignore from google.cloud.bigtable_admin_v2.types import types +from google.cloud.bigtable_admin_v2.utils import oneof_message from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore @@ -181,21 +182,36 @@ class Table(proto.Message): For example, if \_key = "some_id#2024-04-30#\x00\x13\x00\xf3" with the following - schema: { fields { field_name: "id" type { string { - encoding: utf8_bytes {} } } } fields { field_name: "date" - type { string { encoding: utf8_bytes {} } } } fields { - field_name: "product_code" type { int64 { encoding: - big_endian_bytes {} } } } encoding { delimited_bytes { - delimiter: "#" } } } - - | The decoded key parts would be: id = "some_id", date = - "2024-04-30", product_code = 1245427 The query "SELECT - \_key, product_code FROM table" will return two columns: - /------------------------------------------------------ - | \| \_key \| product_code \| \| - --------------------------------------|--------------\| \| - "some_id#2024-04-30#\x00\x13\x00\xf3" \| 1245427 \| - ------------------------------------------------------/ + schema: + + .. code-block:: + + { + fields { + field_name: "id" + type { string { encoding: utf8_bytes {} } } + } + fields { + field_name: "date" + type { string { encoding: utf8_bytes {} } } + } + fields { + field_name: "product_code" + type { int64 { encoding: big_endian_bytes {} } } + } + encoding { delimited_bytes { delimiter: "#" } } + } + + The decoded key parts would be: + id = "some_id", date = "2024-04-30", product_code = 1245427 + The query "SELECT \_key, product_code FROM table" will return + two columns: + + +========================================+==============+ + | \_key | product_code | + +========================================+==============+ + | "some_id#2024-04-30#\x00\x13\x00\xf3" | 1245427 | + +----------------------------------------+--------------+ The schema has the following invariants: (1) The decoded field values are order-preserved. For read, the field values @@ -571,7 +587,7 @@ class ColumnFamily(proto.Message): ) -class GcRule(proto.Message): +class GcRule(oneof_message.OneofMessage): r"""Rule for determining which cells to delete during garbage collection. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/__init__.py new file mode 100644 index 000000000000..93d7660568fe --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This directory is a directory for handwritten code, made for inserting +# specifically the oneof_message module into files in the autogenerated +# types directory without causing ImportErrors due to circular imports. +# For other use cases, use the overlay submodule. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/oneof_message.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/oneof_message.py new file mode 100644 index 000000000000..e110d8fa6cf1 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/utils/oneof_message.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +import collections.abc +import proto + + +class OneofMessage(proto.Message): + def _get_oneof_field_from_key(self, key): + """Given a field name, return the corresponding oneof associated with it. If it doesn't exist, return None.""" + + oneof_type = None + + try: + oneof_type = self._meta.fields[key].oneof + except KeyError: + # Underscores may be appended to field names + # that collide with python or proto-plus keywords. + # In case a key only exists with a `_` suffix, coerce the key + # to include the `_` suffix. It's not possible to + # natively define the same field with a trailing underscore in protobuf. + # See related issue + # https://github.com/googleapis/python-api-core/issues/227 + if f"{key}_" in self._meta.fields: + key = f"{key}_" + oneof_type = self._meta.fields[key].oneof + + return oneof_type + + def __init__( + self, + mapping=None, + *, + ignore_unknown_fields=False, + **kwargs, + ): + # We accept several things for `mapping`: + # * An instance of this class. + # * An instance of the underlying protobuf descriptor class. + # * A dict + # * Nothing (keyword arguments only). + # + # + # Check for oneofs collisions in the parameters provided. Extract a set of + # all fields that are set from the mappings + kwargs combined. + mapping_fields = set(kwargs.keys()) + + if mapping is None: + pass + elif isinstance(mapping, collections.abc.Mapping): + mapping_fields.update(mapping.keys()) + elif isinstance(mapping, self._meta.pb): + mapping_fields.update(field.name for field, _ in mapping.ListFields()) + elif isinstance(mapping, type(self)): + mapping_fields.update(field.name for field, _ in mapping._pb.ListFields()) + else: + # Sanity check: Did we get something not a map? Error if so. + raise TypeError( + "Invalid constructor input for %s: %r" + % ( + self.__class__.__name__, + mapping, + ) + ) + + oneofs = set() + + for field in mapping_fields: + oneof_field = self._get_oneof_field_from_key(field) + if oneof_field is not None: + if oneof_field in oneofs: + raise ValueError( + "Invalid constructor input for %s: Multiple fields defined for oneof %s" + % (self.__class__.__name__, oneof_field) + ) + else: + oneofs.add(oneof_field) + + super().__init__(mapping, ignore_unknown_fields=ignore_unknown_fields, **kwargs) + + def __setattr__(self, key, value): + # Oneof check: Only set the value of an existing oneof field + # if the field being overridden is the same as the field already set + # for the oneof. + oneof = self._get_oneof_field_from_key(key) + if ( + oneof is not None + and self._pb.HasField(oneof) + and self._pb.WhichOneof(oneof) != key + ): + raise ValueError( + "Overriding the field set for oneof %s with a different field %s" + % (oneof, key) + ) + super().__setattr__(key, value) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 56573f71eb98..04d871e95a13 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -16,11 +16,13 @@ from pathlib import Path import re +import textwrap from typing import List, Optional import synthtool as s -from synthtool import gcp +from synthtool import gcp, _tracked_paths from synthtool.languages import python +from synthtool.sources import templates common = gcp.CommonTemplates() @@ -69,16 +71,30 @@ def get_staging_dirs( bigtable_default_version = "v2" bigtable_admin_default_version = "v2" +# These flags are needed because certain post-processing operations +# append things after a certain line of text, and can infinitely loop +# in a Github PR. We use these flags to only do those operations +# on fresh copies of files found in googleapis-gen, and not on user-submitted +# changes. +is_fresh_admin_copy = False +is_fresh_admin_v2_copy = False +is_fresh_admin_docs_copy = False + for library in get_staging_dirs(bigtable_default_version, "bigtable"): s.move(library / "google/cloud/bigtable_v2", excludes=["**/gapic_version.py"]) s.move(library / "tests") s.move(library / "scripts") for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"): - s.move(library / "google/cloud/bigtable_admin", excludes=["**/gapic_version.py"]) - s.move(library / "google/cloud/bigtable_admin_v2", excludes=["**/gapic_version.py"]) + is_fresh_admin_copy = \ + s.move(library / "google/cloud/bigtable_admin", excludes=["**/gapic_version.py"]) + is_fresh_admin_v2_copy = \ + s.move(library / "google/cloud/bigtable_admin_v2", excludes=["**/gapic_version.py"]) s.move(library / "tests") + s.move(library / "samples") s.move(library / "scripts") + is_fresh_admin_docs_copy = \ + s.move(library / "docs/bigtable_admin_v2", destination="docs/admin_client") s.remove_staging_dirs() @@ -158,4 +174,152 @@ def get_staging_dirs( """# todo(kolea2): temporary workaround to install pinned dep version INSTALL_LIBRARY_FROM_SOURCE = False""") +# -------------------------------------------------------------------------- +# Admin Overlay work +# -------------------------------------------------------------------------- + +# Add overlay imports to top level __init__.py files in admin_v2 and admin at the end +# of each file, after the __all__ definition. These changes should only be done on fresh +# copies of the __init__.py files. +def add_overlay_to_init_py(init_py_location, import_statements, should_add): + if should_add: + s.replace( + init_py_location, + r"(?s)(^__all__ = \(.*\)$)", + r"\1\n\n" + import_statements + ) + +add_overlay_to_init_py( + "google/cloud/bigtable_admin_v2/__init__.py", + """from .overlay import * # noqa: F403 +__all__ += overlay.__all__ # noqa: F405 +""", + is_fresh_admin_v2_copy, +) + +add_overlay_to_init_py( + "google/cloud/bigtable_admin/__init__.py", + """import google.cloud.bigtable_admin_v2.overlay # noqa: F401 +from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403 + +__all__ += google.cloud.bigtable_admin_v2.overlay.__all__ +""", + is_fresh_admin_copy, +) + +# Replace all instances of BaseBigtableTableAdminClient/BaseBigtableAdminAsyncClient +# in samples and docstrings with BigtableTableAdminClient/BigtableTableAdminAsyncClient +s.replace( + [ + "google/cloud/bigtable_admin_v2/services/*/client.py", + "google/cloud/bigtable_admin_v2/services/*/async_client.py", + "samples/generated_samples/bigtableadmin_v2_*.py" + ], + r"client = bigtable_admin_v2\.Base(BigtableTableAdmin(Async)?Client\(\))", + r"client = bigtable_admin_v2.\1" +) + +# Fix an improperly formatted table that breaks nox -s docs. +s.replace( + "google/cloud/bigtable_admin_v2/types/table.py", + """ For example, if \\\\_key = + "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following + schema: \\{ fields \\{ field_name: "id" type \\{ string \\{ + encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{ field_name: "date" + type \\{ string \\{ encoding: utf8_bytes \\{\\} \\} \\} \\} fields \\{ + field_name: "product_code" type \\{ int64 \\{ encoding: + big_endian_bytes \\{\\} \\} \\} \\} encoding \\{ delimited_bytes \\{ + delimiter: "#" \\} \\} \\} + + \\| The decoded key parts would be: id = "some_id", date = + "2024-04-30", product_code = 1245427 The query "SELECT + \\\\_key, product_code FROM table" will return two columns: + /------------------------------------------------------ + \\| \\\\\\| \\\\_key \\\\\\| product_code \\\\\\| \\\\\\| + --------------------------------------\\|--------------\\\\\\| \\\\\\| + "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" \\\\\\| 1245427 \\\\\\| + ------------------------------------------------------/ +""", + textwrap.indent( + """For example, if \\\\_key = +"some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" with the following +schema: + +.. code-block:: + + { + fields { + field_name: "id" + type { string { encoding: utf8_bytes {} } } + } + fields { + field_name: "date" + type { string { encoding: utf8_bytes {} } } + } + fields { + field_name: "product_code" + type { int64 { encoding: big_endian_bytes {} } } + } + encoding { delimited_bytes { delimiter: "#" } } + } + +The decoded key parts would be: +id = "some_id", date = "2024-04-30", product_code = 1245427 +The query "SELECT \\\\_key, product_code FROM table" will return +two columns: + ++========================================+==============+ +| \\\\_key | product_code | ++========================================+==============+ +| "some_id#2024-04-30#\\\\x00\\\\x13\\\\x00\\\\xf3" | 1245427 | ++----------------------------------------+--------------+ +""", + " " * 12, + ), +) + +# These changes should only be done on fresh copies of the .rst files +# from googleapis-gen. +if is_fresh_admin_docs_copy: + # Change the subpackage for clients with overridden internal methods in them + # from service to overlay.service. + s.replace( + "docs/admin_client/bigtable_table_admin.rst", + r"^\.\. automodule:: google\.cloud\.bigtable_admin_v2\.services\.bigtable_table_admin$", + ".. automodule:: google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin" + ) + + # Add overlay types to types documentation + s.replace( + "docs/admin_client/types_.rst", + r"""(\.\. automodule:: google\.cloud\.bigtable_admin_v2\.types + :members: + :show-inheritance:) +""", + r"""\1 + +.. automodule:: google.cloud.bigtable_admin_v2.overlay.types + :members: + :show-inheritance: +""" + ) + +# These changes should only be done on a fresh copy of table.py +# from googleapis-gen. +if is_fresh_admin_v2_copy: + # Add the oneof_message import into table.py for GcRule + s.replace( + "google/cloud/bigtable_admin_v2/types/table.py", + r"^(from google\.cloud\.bigtable_admin_v2\.types import .+)$", + r"""\1 +from google.cloud.bigtable_admin_v2.utils import oneof_message""", + ) + + # Re-subclass GcRule in table.py + s.replace( + "google/cloud/bigtable_admin_v2/types/table.py", + r"class GcRule\(proto\.Message\)\:", + "class GcRule(oneof_message.OneofMessage):", + ) + s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/packages/google-cloud-bigtable/scripts/fixup_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_admin_v2_keywords.py new file mode 100644 index 000000000000..d287df24f364 --- /dev/null +++ b/packages/google-cloud-bigtable/scripts/fixup_admin_v2_keywords.py @@ -0,0 +1,233 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class adminCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'check_consistency': ('name', 'consistency_token', 'standard_read_remote_writes', 'data_boost_read_local_writes', ), + 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), + 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_authorized_view': ('parent', 'authorized_view_id', 'authorized_view', ), + 'create_backup': ('parent', 'backup_id', 'backup', ), + 'create_cluster': ('parent', 'cluster_id', 'cluster', ), + 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_logical_view': ('parent', 'logical_view_id', 'logical_view', ), + 'create_materialized_view': ('parent', 'materialized_view_id', 'materialized_view', ), + 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), + 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), + 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_authorized_view': ('name', 'etag', ), + 'delete_backup': ('name', ), + 'delete_cluster': ('name', ), + 'delete_instance': ('name', ), + 'delete_logical_view': ('name', 'etag', ), + 'delete_materialized_view': ('name', 'etag', ), + 'delete_snapshot': ('name', ), + 'delete_table': ('name', ), + 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), + 'generate_consistency_token': ('name', ), + 'get_app_profile': ('name', ), + 'get_authorized_view': ('name', 'view', ), + 'get_backup': ('name', ), + 'get_cluster': ('name', ), + 'get_iam_policy': ('resource', 'options', ), + 'get_instance': ('name', ), + 'get_logical_view': ('name', ), + 'get_materialized_view': ('name', ), + 'get_snapshot': ('name', ), + 'get_table': ('name', 'view', ), + 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_authorized_views': ('parent', 'page_size', 'page_token', 'view', ), + 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_clusters': ('parent', 'page_token', ), + 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ), + 'list_instances': ('parent', 'page_token', ), + 'list_logical_views': ('parent', 'page_size', 'page_token', ), + 'list_materialized_views': ('parent', 'page_size', 'page_token', ), + 'list_snapshots': ('parent', 'page_size', 'page_token', ), + 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), + 'modify_column_families': ('name', 'modifications', 'ignore_warnings', ), + 'partial_update_cluster': ('cluster', 'update_mask', ), + 'partial_update_instance': ('instance', 'update_mask', ), + 'restore_table': ('parent', 'table_id', 'backup', ), + 'set_iam_policy': ('resource', 'policy', 'update_mask', ), + 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'undelete_table': ('name', ), + 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ), + 'update_backup': ('backup', 'update_mask', ), + 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'node_scaling_factor', 'cluster_config', 'default_storage_type', 'encryption_config', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', 'satisfies_pzi', ), + 'update_logical_view': ('logical_view', 'update_mask', ), + 'update_materialized_view': ('materialized_view', 'update_mask', ), + 'update_table': ('table', 'update_mask', 'ignore_warnings', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=adminCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the admin client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 7e89af11b3cf..e7113a6117b8 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -37,7 +37,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 2.16.0, <3.0.0", + "google-api-core[grpc] >= 2.17.0, <3.0.0", "google-cloud-core >= 1.4.4, <3.0.0", "google-auth >= 2.14.1, <3.0.0,!=2.24.0,!=2.25.0", "grpc-google-iam-v1 >= 0.12.4, <1.0.0", @@ -94,7 +94,7 @@ extras_require=extras, scripts=[ "scripts/fixup_bigtable_v2_keywords.py", - "scripts/fixup_bigtable_admin_v2_keywords.py", + "scripts/fixup_admin_v2_keywords.py", ], python_requires=">=3.7", include_package_data=True, diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index 5a3f3e3fc3f6..ec7a8c807a10 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -5,7 +5,7 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==2.16.0 +google-api-core==2.17.0 google-auth==2.14.1 google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.8.txt b/packages/google-cloud-bigtable/testing/constraints-3.8.txt index 5ed0c2fb9a10..1c867060d068 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.8.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.8.txt @@ -5,7 +5,7 @@ # # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 -google-api-core==2.16.0 +google-api-core==2.17.0 google-auth==2.14.1 google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 diff --git a/packages/google-cloud-bigtable/tests/system/admin_overlay/__init__.py b/packages/google-cloud-bigtable/tests/system/admin_overlay/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/tests/system/admin_overlay/conftest.py b/packages/google-cloud-bigtable/tests/system/admin_overlay/conftest.py new file mode 100644 index 000000000000..66baef3f4d7a --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/admin_overlay/conftest.py @@ -0,0 +1,38 @@ +import google.auth + +import os +import pytest +import uuid + + +INSTANCE_PREFIX = "admin-overlay-instance" +BACKUP_PREFIX = "admin-overlay-backup" +ROW_PREFIX = "test-row" + +DEFAULT_CLUSTER_LOCATIONS = ["us-east1-b"] +REPLICATION_CLUSTER_LOCATIONS = ["us-east1-b", "us-west1-b"] +TEST_TABLE_NAME = "system-test-table" +TEST_BACKUP_TABLE_NAME = "system-test-backup-table" +TEST_COLUMMN_FAMILY_NAME = "test-column" +TEST_COLUMN_NAME = "value" +NUM_ROWS = 500 +INITIAL_CELL_VALUE = "Hello" +NEW_CELL_VALUE = "World" + + +@pytest.fixture(scope="session") +def admin_overlay_project_id(): + project_id = os.getenv("GOOGLE_CLOUD_PROJECT") + if not project_id: + _, project_id = google.auth.default() + return project_id + + +def generate_unique_suffix(name): + """ + Generates a unique suffix for the name. + + Uses UUID4 because using time.time doesn't guarantee + uniqueness when the time is frozen in containers. + """ + return f"{name}-{uuid.uuid4().hex[:7]}" diff --git a/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py new file mode 100644 index 000000000000..8dea4f5f1461 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py @@ -0,0 +1,384 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple + +from google.cloud import bigtable_admin_v2 as admin_v2 +from google.cloud.bigtable.data._cross_sync import CrossSync +from google.cloud.bigtable.data import mutations, read_rows_query +from google.cloud.environment_vars import BIGTABLE_EMULATOR + +from .conftest import ( + INSTANCE_PREFIX, + BACKUP_PREFIX, + ROW_PREFIX, + DEFAULT_CLUSTER_LOCATIONS, + REPLICATION_CLUSTER_LOCATIONS, + TEST_TABLE_NAME, + TEST_BACKUP_TABLE_NAME, + TEST_COLUMMN_FAMILY_NAME, + TEST_COLUMN_NAME, + NUM_ROWS, + INITIAL_CELL_VALUE, + NEW_CELL_VALUE, + generate_unique_suffix, +) + +from datetime import datetime, timedelta + +import pytest +import os + + +if CrossSync.is_async: + from google.api_core import operation_async as api_core_operation +else: + from google.api_core import operation as api_core_operation + + +__CROSS_SYNC_OUTPUT__ = "tests.system.admin_overlay.test_system_autogen" + +if os.getenv(BIGTABLE_EMULATOR): + pytest.skip( + allow_module_level=True, + reason="Emulator support for admin client tests unsupported.", + ) + + +@CrossSync.convert +@CrossSync.pytest_fixture(scope="session") +async def data_client(admin_overlay_project_id): + async with CrossSync.DataClient(project=admin_overlay_project_id) as client: + yield client + + +@CrossSync.convert( + replace_symbols={"BigtableTableAdminAsyncClient": "BigtableTableAdminClient"} +) +@CrossSync.pytest_fixture(scope="session") +async def table_admin_client(admin_overlay_project_id): + async with admin_v2.BigtableTableAdminAsyncClient( + client_options={ + "quota_project_id": admin_overlay_project_id, + } + ) as client: + yield client + + +@CrossSync.convert( + replace_symbols={"BigtableInstanceAdminAsyncClient": "BigtableInstanceAdminClient"} +) +@CrossSync.pytest_fixture(scope="session") +async def instance_admin_client(admin_overlay_project_id): + async with admin_v2.BigtableInstanceAdminAsyncClient( + client_options={ + "quota_project_id": admin_overlay_project_id, + } + ) as client: + yield client + + +@CrossSync.convert +@CrossSync.pytest_fixture(scope="session") +async def instances_to_delete(instance_admin_client): + instances = [] + + try: + yield instances + finally: + for instance in instances: + await instance_admin_client.delete_instance(name=instance.name) + + +@CrossSync.convert +@CrossSync.pytest_fixture(scope="session") +async def backups_to_delete(table_admin_client): + backups = [] + + try: + yield backups + finally: + for backup in backups: + await table_admin_client.delete_backup(name=backup.name) + + +@CrossSync.convert +async def create_instance( + instance_admin_client, + table_admin_client, + data_client, + project_id, + instances_to_delete, + storage_type=admin_v2.StorageType.HDD, + cluster_locations=DEFAULT_CLUSTER_LOCATIONS, +) -> Tuple[admin_v2.Instance, admin_v2.Table]: + """ + Creates a new Bigtable instance with the specified project_id, storage type, and cluster locations. + + After creating the Bigtable instance, it will create a test table and populate it with dummy data. + This is not defined as a fixture because the different system tests need different kinds of instances. + """ + # Create the instance + clusters = {} + + instance_id = generate_unique_suffix(INSTANCE_PREFIX) + + for idx, location in enumerate(cluster_locations): + clusters[location] = admin_v2.Cluster( + name=instance_admin_client.cluster_path( + project_id, instance_id, f"{instance_id}-{idx}" + ), + location=instance_admin_client.common_location_path(project_id, location), + default_storage_type=storage_type, + ) + + create_instance_request = admin_v2.CreateInstanceRequest( + parent=instance_admin_client.common_project_path(project_id), + instance_id=instance_id, + instance=admin_v2.Instance( + display_name=instance_id[ + :30 + ], # truncate to 30 characters because of character limit + ), + clusters=clusters, + ) + operation = await instance_admin_client.create_instance(create_instance_request) + instance = await operation.result() + + instances_to_delete.append(instance) + + # Create a table within the instance + create_table_request = admin_v2.CreateTableRequest( + parent=instance_admin_client.instance_path(project_id, instance_id), + table_id=TEST_TABLE_NAME, + table=admin_v2.Table( + column_families={ + TEST_COLUMMN_FAMILY_NAME: admin_v2.ColumnFamily(), + } + ), + ) + + table = await table_admin_client.create_table(create_table_request) + + # Populate with dummy data + await populate_table( + table_admin_client, data_client, instance, table, INITIAL_CELL_VALUE + ) + + return instance, table + + +@CrossSync.convert +async def populate_table(table_admin_client, data_client, instance, table, cell_value): + """ + Populates all the test cells in the given table with the given cell value. + + This is used to populate test data when creating an instance, and for testing the + wait_for_consistency call. + """ + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + row_mutation_entries = [] + for i in range(0, NUM_ROWS): + row_mutation_entries.append( + mutations.RowMutationEntry( + row_key=f"{ROW_PREFIX}-{i}", + mutations=[ + mutations.SetCell( + family=TEST_COLUMMN_FAMILY_NAME, + qualifier=TEST_COLUMN_NAME, + new_value=cell_value, + timestamp_micros=-1, + ) + ], + ) + ) + + await data_client_table.bulk_mutate_rows(row_mutation_entries) + + +@CrossSync.convert +async def create_backup( + instance_admin_client, table_admin_client, instance, table, backups_to_delete +) -> admin_v2.Backup: + """ + Creates a backup of the given table under the given instance. + + This will be restored to a different instance later on, to test + optimize_restored_table. + """ + # Get a cluster in the instance for the backup + list_clusters_response = await instance_admin_client.list_clusters( + parent=instance.name + ) + cluster_name = list_clusters_response.clusters[0].name + + backup_id = generate_unique_suffix(BACKUP_PREFIX) + + # Create the backup + operation = await table_admin_client.create_backup( + admin_v2.CreateBackupRequest( + parent=cluster_name, + backup_id=backup_id, + backup=admin_v2.Backup( + name=f"{cluster_name}/backups/{backup_id}", + source_table=table.name, + expire_time=datetime.now() + timedelta(hours=7), + ), + ) + ) + + backup = await operation.result() + backups_to_delete.append(backup) + return backup + + +@CrossSync.convert +async def assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, value +): + """ + Asserts that all cells in the given table have the given value. + """ + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + + # Read all the rows; there shouldn't be that many of them + query = read_rows_query.ReadRowsQuery(limit=NUM_ROWS) + async for row in await data_client_table.read_rows_stream(query): + latest_cell = row[TEST_COLUMMN_FAMILY_NAME, TEST_COLUMN_NAME][0] + assert latest_cell.value.decode("utf-8") == value + + +@CrossSync.convert( + replace_symbols={ + "AsyncRestoreTableOperation": "RestoreTableOperation", + "AsyncOperation": "Operation", + } +) +@CrossSync.pytest +@pytest.mark.parametrize( + "second_instance_storage_type,expect_optimize_operation", + [ + (admin_v2.StorageType.HDD, False), + (admin_v2.StorageType.SSD, True), + ], +) +async def test_optimize_restored_table( + admin_overlay_project_id, + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + backups_to_delete, + second_instance_storage_type, + expect_optimize_operation, +): + # Create two instances. We backup a table from the first instance to a new table in the + # second instance. This is to test whether or not different scenarios trigger an + # optimize_restored_table operation + instance_with_backup, table_to_backup = await create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + admin_v2.StorageType.HDD, + ) + + instance_to_restore, _ = await create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + second_instance_storage_type, + ) + + backup = await create_backup( + instance_admin_client, + table_admin_client, + instance_with_backup, + table_to_backup, + backups_to_delete, + ) + + # Restore to other instance + restore_operation = await table_admin_client.restore_table( + admin_v2.RestoreTableRequest( + parent=instance_to_restore.name, + table_id=TEST_BACKUP_TABLE_NAME, + backup=backup.name, + ) + ) + + assert isinstance(restore_operation, admin_v2.AsyncRestoreTableOperation) + restored_table = await restore_operation.result() + + optimize_operation = await restore_operation.optimize_restored_table_operation() + if expect_optimize_operation: + assert isinstance(optimize_operation, api_core_operation.AsyncOperation) + await optimize_operation.result() + else: + assert optimize_operation is None + + # Test that the new table exists + assert ( + restored_table.name + == f"{instance_to_restore.name}/tables/{TEST_BACKUP_TABLE_NAME}" + ) + await assert_table_cell_value_equal_to( + table_admin_client, + data_client, + instance_to_restore, + restored_table, + INITIAL_CELL_VALUE, + ) + + +@CrossSync.pytest +async def test_wait_for_consistency( + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + admin_overlay_project_id, +): + # Create an instance and a table, then try to write NEW_CELL_VALUE + # to each table row instead of INITIAL_CELL_VALUE. + instance, table = await create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + cluster_locations=REPLICATION_CLUSTER_LOCATIONS, + ) + + await populate_table( + table_admin_client, data_client, instance, table, NEW_CELL_VALUE + ) + + wait_for_consistency_request = admin_v2.WaitForConsistencyRequest( + name=table.name, + standard_read_remote_writes=admin_v2.StandardReadRemoteWrites(), + ) + await table_admin_client.wait_for_consistency(wait_for_consistency_request) + await assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, NEW_CELL_VALUE + ) diff --git a/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py new file mode 100644 index 000000000000..21e4aff3cfc2 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py @@ -0,0 +1,291 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +from typing import Tuple +from google.cloud import bigtable_admin_v2 as admin_v2 +from google.cloud.bigtable.data._cross_sync import CrossSync +from google.cloud.bigtable.data import mutations, read_rows_query +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from .conftest import ( + INSTANCE_PREFIX, + BACKUP_PREFIX, + ROW_PREFIX, + DEFAULT_CLUSTER_LOCATIONS, + REPLICATION_CLUSTER_LOCATIONS, + TEST_TABLE_NAME, + TEST_BACKUP_TABLE_NAME, + TEST_COLUMMN_FAMILY_NAME, + TEST_COLUMN_NAME, + NUM_ROWS, + INITIAL_CELL_VALUE, + NEW_CELL_VALUE, + generate_unique_suffix, +) +from datetime import datetime, timedelta +import pytest +import os +from google.api_core import operation as api_core_operation + +if os.getenv(BIGTABLE_EMULATOR): + pytest.skip( + allow_module_level=True, + reason="Emulator support for admin client tests unsupported.", + ) + + +@pytest.fixture(scope="session") +def data_client(admin_overlay_project_id): + with CrossSync._Sync_Impl.DataClient(project=admin_overlay_project_id) as client: + yield client + + +@pytest.fixture(scope="session") +def table_admin_client(admin_overlay_project_id): + with admin_v2.BigtableTableAdminClient( + client_options={"quota_project_id": admin_overlay_project_id} + ) as client: + yield client + + +@pytest.fixture(scope="session") +def instance_admin_client(admin_overlay_project_id): + with admin_v2.BigtableInstanceAdminClient( + client_options={"quota_project_id": admin_overlay_project_id} + ) as client: + yield client + + +@pytest.fixture(scope="session") +def instances_to_delete(instance_admin_client): + instances = [] + try: + yield instances + finally: + for instance in instances: + instance_admin_client.delete_instance(name=instance.name) + + +@pytest.fixture(scope="session") +def backups_to_delete(table_admin_client): + backups = [] + try: + yield backups + finally: + for backup in backups: + table_admin_client.delete_backup(name=backup.name) + + +def create_instance( + instance_admin_client, + table_admin_client, + data_client, + project_id, + instances_to_delete, + storage_type=admin_v2.StorageType.HDD, + cluster_locations=DEFAULT_CLUSTER_LOCATIONS, +) -> Tuple[admin_v2.Instance, admin_v2.Table]: + """Creates a new Bigtable instance with the specified project_id, storage type, and cluster locations. + + After creating the Bigtable instance, it will create a test table and populate it with dummy data. + This is not defined as a fixture because the different system tests need different kinds of instances. + """ + clusters = {} + instance_id = generate_unique_suffix(INSTANCE_PREFIX) + for idx, location in enumerate(cluster_locations): + clusters[location] = admin_v2.Cluster( + name=instance_admin_client.cluster_path( + project_id, instance_id, f"{instance_id}-{idx}" + ), + location=instance_admin_client.common_location_path(project_id, location), + default_storage_type=storage_type, + ) + create_instance_request = admin_v2.CreateInstanceRequest( + parent=instance_admin_client.common_project_path(project_id), + instance_id=instance_id, + instance=admin_v2.Instance(display_name=instance_id[:30]), + clusters=clusters, + ) + operation = instance_admin_client.create_instance(create_instance_request) + instance = operation.result() + instances_to_delete.append(instance) + create_table_request = admin_v2.CreateTableRequest( + parent=instance_admin_client.instance_path(project_id, instance_id), + table_id=TEST_TABLE_NAME, + table=admin_v2.Table( + column_families={TEST_COLUMMN_FAMILY_NAME: admin_v2.ColumnFamily()} + ), + ) + table = table_admin_client.create_table(create_table_request) + populate_table(table_admin_client, data_client, instance, table, INITIAL_CELL_VALUE) + return (instance, table) + + +def populate_table(table_admin_client, data_client, instance, table, cell_value): + """Populates all the test cells in the given table with the given cell value. + + This is used to populate test data when creating an instance, and for testing the + wait_for_consistency call.""" + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + row_mutation_entries = [] + for i in range(0, NUM_ROWS): + row_mutation_entries.append( + mutations.RowMutationEntry( + row_key=f"{ROW_PREFIX}-{i}", + mutations=[ + mutations.SetCell( + family=TEST_COLUMMN_FAMILY_NAME, + qualifier=TEST_COLUMN_NAME, + new_value=cell_value, + timestamp_micros=-1, + ) + ], + ) + ) + data_client_table.bulk_mutate_rows(row_mutation_entries) + + +def create_backup( + instance_admin_client, table_admin_client, instance, table, backups_to_delete +) -> admin_v2.Backup: + """Creates a backup of the given table under the given instance. + + This will be restored to a different instance later on, to test + optimize_restored_table.""" + list_clusters_response = instance_admin_client.list_clusters(parent=instance.name) + cluster_name = list_clusters_response.clusters[0].name + backup_id = generate_unique_suffix(BACKUP_PREFIX) + operation = table_admin_client.create_backup( + admin_v2.CreateBackupRequest( + parent=cluster_name, + backup_id=backup_id, + backup=admin_v2.Backup( + name=f"{cluster_name}/backups/{backup_id}", + source_table=table.name, + expire_time=datetime.now() + timedelta(hours=7), + ), + ) + ) + backup = operation.result() + backups_to_delete.append(backup) + return backup + + +def assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, value +): + """Asserts that all cells in the given table have the given value.""" + data_client_table = data_client.get_table( + table_admin_client.parse_instance_path(instance.name)["instance"], + table_admin_client.parse_table_path(table.name)["table"], + ) + query = read_rows_query.ReadRowsQuery(limit=NUM_ROWS) + for row in data_client_table.read_rows_stream(query): + latest_cell = row[TEST_COLUMMN_FAMILY_NAME, TEST_COLUMN_NAME][0] + assert latest_cell.value.decode("utf-8") == value + + +@pytest.mark.parametrize( + "second_instance_storage_type,expect_optimize_operation", + [(admin_v2.StorageType.HDD, False), (admin_v2.StorageType.SSD, True)], +) +def test_optimize_restored_table( + admin_overlay_project_id, + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + backups_to_delete, + second_instance_storage_type, + expect_optimize_operation, +): + instance_with_backup, table_to_backup = create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + admin_v2.StorageType.HDD, + ) + instance_to_restore, _ = create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + second_instance_storage_type, + ) + backup = create_backup( + instance_admin_client, + table_admin_client, + instance_with_backup, + table_to_backup, + backups_to_delete, + ) + restore_operation = table_admin_client.restore_table( + admin_v2.RestoreTableRequest( + parent=instance_to_restore.name, + table_id=TEST_BACKUP_TABLE_NAME, + backup=backup.name, + ) + ) + assert isinstance(restore_operation, admin_v2.RestoreTableOperation) + restored_table = restore_operation.result() + optimize_operation = restore_operation.optimize_restored_table_operation() + if expect_optimize_operation: + assert isinstance(optimize_operation, api_core_operation.Operation) + optimize_operation.result() + else: + assert optimize_operation is None + assert ( + restored_table.name + == f"{instance_to_restore.name}/tables/{TEST_BACKUP_TABLE_NAME}" + ) + assert_table_cell_value_equal_to( + table_admin_client, + data_client, + instance_to_restore, + restored_table, + INITIAL_CELL_VALUE, + ) + + +def test_wait_for_consistency( + instance_admin_client, + table_admin_client, + data_client, + instances_to_delete, + admin_overlay_project_id, +): + instance, table = create_instance( + instance_admin_client, + table_admin_client, + data_client, + admin_overlay_project_id, + instances_to_delete, + cluster_locations=REPLICATION_CLUSTER_LOCATIONS, + ) + populate_table(table_admin_client, data_client, instance, table, NEW_CELL_VALUE) + wait_for_consistency_request = admin_v2.WaitForConsistencyRequest( + name=table.name, standard_read_remote_writes=admin_v2.StandardReadRemoteWrites() + ) + table_admin_client.wait_for_consistency(wait_for_consistency_request) + assert_table_cell_value_equal_to( + table_admin_client, data_client, instance, table, NEW_CELL_VALUE + ) diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index b8862ea4bc6f..39480942dc4d 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -17,9 +17,20 @@ import sys import os +import pytest +import asyncio + script_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(script_path) pytest_plugins = [ "data.setup_fixtures", ] + + +@pytest.fixture(scope="session") +def event_loop(): + loop = asyncio.get_event_loop() + yield loop + loop.stop() + loop.close() diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index 0dd6e8100e36..ed9fbd8b8ce1 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -13,7 +13,6 @@ # limitations under the License. import pytest -import asyncio import datetime import uuid import os @@ -138,14 +137,6 @@ async def target(self, client, table_id, authorized_view_id, instance_id, reques else: raise ValueError(f"unknown target type: {request.param}") - @CrossSync.drop - @pytest.fixture(scope="session") - def event_loop(self): - loop = asyncio.get_event_loop() - yield loop - loop.stop() - loop.close() - @pytest.fixture(scope="session") def column_family_config(self): """ diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index 46e9c2215e88..693b8d966424 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -249,7 +249,7 @@ def test_mutation_set_cell(self, target, temp_rows): """Ensure cells can be set properly""" row_key = b"bulk_mutate" new_value = uuid.uuid4().hex.encode() - (row_key, mutation) = self._create_row_and_mutation( + row_key, mutation = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) target.mutate_row(row_key, mutation) @@ -303,7 +303,7 @@ def test_bulk_mutations_set_cell(self, client, target, temp_rows): from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() - (row_key, mutation) = self._create_row_and_mutation( + row_key, mutation = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) @@ -338,11 +338,11 @@ def test_mutations_batcher_context_manager(self, client, target, temp_rows): """test batcher with context manager. Should flush on exit""" from google.cloud.bigtable.data.mutations import RowMutationEntry - (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] - (row_key, mutation) = self._create_row_and_mutation( + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) - (row_key2, mutation2) = self._create_row_and_mutation( + row_key2, mutation2 = self._create_row_and_mutation( target, temp_rows, new_value=new_value2 ) bulk_mutation = RowMutationEntry(row_key, [mutation]) @@ -363,7 +363,7 @@ def test_mutations_batcher_timer_flush(self, client, target, temp_rows): from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() - (row_key, mutation) = self._create_row_and_mutation( + row_key, mutation = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) @@ -385,12 +385,12 @@ def test_mutations_batcher_count_flush(self, client, target, temp_rows): """batch should flush after flush_limit_mutation_count mutations""" from google.cloud.bigtable.data.mutations import RowMutationEntry - (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] - (row_key, mutation) = self._create_row_and_mutation( + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - (row_key2, mutation2) = self._create_row_and_mutation( + row_key2, mutation2 = self._create_row_and_mutation( target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) @@ -417,12 +417,12 @@ def test_mutations_batcher_bytes_flush(self, client, target, temp_rows): """batch should flush after flush_limit_bytes bytes""" from google.cloud.bigtable.data.mutations import RowMutationEntry - (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] - (row_key, mutation) = self._create_row_and_mutation( + new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] + row_key, mutation = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - (row_key2, mutation2) = self._create_row_and_mutation( + row_key2, mutation2 = self._create_row_and_mutation( target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) @@ -448,11 +448,11 @@ def test_mutations_batcher_no_flush(self, client, target, temp_rows): new_value = uuid.uuid4().hex.encode() start_value = b"unchanged" - (row_key, mutation) = self._create_row_and_mutation( + row_key, mutation = self._create_row_and_mutation( target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - (row_key2, mutation2) = self._create_row_and_mutation( + row_key2, mutation2 = self._create_row_and_mutation( target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/my_oneof_message.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/my_oneof_message.py new file mode 100644 index 000000000000..25667cfca99a --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/my_oneof_message.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto + +from google.cloud.bigtable_admin_v2.utils import oneof_message + +__protobuf__ = proto.module( + package="test.oneof.v1", + manifest={ + "MyOneofMessage", + }, +) + + +# Foo and Bar belong to oneof foobar, and baz is independent. +class MyOneofMessage(oneof_message.OneofMessage): + foo: int = proto.Field( + proto.INT32, + number=1, + oneof="foobar", + ) + + bar: int = proto.Field( + proto.INT32, + number=2, + oneof="foobar", + ) + + baz: int = proto.Field( + proto.INT32, + number=3, + ) diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_admin_packaging.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_admin_packaging.py new file mode 100644 index 000000000000..729a92b5c308 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_admin_packaging.py @@ -0,0 +1,41 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib + +import pytest + + +@pytest.mark.parametrize( + "module", ["google.cloud.bigtable_admin", "google.cloud.bigtable_admin_v2"] +) +def test_admin_overlay_imports(module): + # Simulate from import dynamically using importlib + mod = importlib.import_module(module) + + # Check that the import aliasing works as expected for overlay/autogenerated clients/types. + classes_to_modules = { + "BigtableTableAdminClient": "google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client", + "RestoreTableOperation": "google.cloud.bigtable_admin_v2.overlay.types.restore_table", + "BigtableInstanceAdminClient": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client", + "RestoreTableRequest": "google.cloud.bigtable_admin_v2.types.bigtable_table_admin", + } + + for cls_name, submodule_name in classes_to_modules.items(): + cls = getattr(mod, cls_name) + submodule = importlib.import_module(submodule_name) + assert cls == getattr(submodule, cls_name) + + # Check that from import * has the class inside. + assert cls_name in mod.__all__ diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_client.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_client.py new file mode 100644 index 000000000000..0d844a9e4ced --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_client.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401 +except ImportError: # pragma: NO COVER + import mock + +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.async_client import ( + BigtableTableAdminAsyncClient, + DEFAULT_CLIENT_INFO, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + async_restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable import __version__ as bigtable_version + +from test_async_consistency import ( + FALSE_CONSISTENCY_RESPONSE, + TRUE_CONSISTENCY_RESPONSE, +) + +import pytest + + +PARENT_NAME = "my_parent" +TABLE_NAME = "my_table" +CONSISTENCY_TOKEN = "abcdefg" + + +def _make_client(**kwargs): + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + return BigtableTableAdminAsyncClient(**kwargs) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + ( + transports.BigtableTableAdminGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_bigtable_table_admin_async_client_client_version( + transport_class, transport_name +): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + _make_client(transport=transport_name) + + # call_args.kwargs is not supported in Python 3.7, so find them from the tuple + # instead. It's always the last item in the call_args tuple. + transport_init_call_kwargs = patched.call_args[-1] + assert transport_init_call_kwargs["client_info"] == DEFAULT_CLIENT_INFO + + assert ( + DEFAULT_CLIENT_INFO.client_library_version + == f"{bigtable_version}-admin-overlay-async" + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "kwargs", + [ + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ) + }, + { + "request": { + "parent": PARENT_NAME, + "table_id": TABLE_NAME, + }, + }, + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + ], +) +async def test_bigtable_table_admin_async_client_restore_table(kwargs): + client = _make_client() + + with mock.patch.object( + async_restore_table, "AsyncRestoreTableOperation", new_callable=mock.AsyncMock + ) as future_mock: + with mock.patch.object( + client._client, "_transport", new_callable=mock.AsyncMock + ) as transport_mock: + with mock.patch.object( + client, "_restore_table", new_callable=mock.AsyncMock + ) as restore_table_mock: + operation_mock = mock.Mock() + restore_table_mock.return_value = operation_mock + await client.restore_table(**kwargs) + + restore_table_mock.assert_called_once_with( + request=kwargs["request"], + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + future_mock.assert_called_once_with( + transport_mock.operations_client, operation_mock + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "kwargs,check_consistency_request_extras", + [ + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + standard_read_remote_writes=bigtable_table_admin.StandardReadRemoteWrites(), + ) + }, + { + "standard_read_remote_writes": bigtable_table_admin.StandardReadRemoteWrites(), + }, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + data_boost_read_local_writes=bigtable_table_admin.DataBoostReadLocalWrites(), + ) + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "request": { + "name": TABLE_NAME, + "data_boost_read_local_writes": {}, + } + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "name": TABLE_NAME, + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + {}, + ), + ], +) +async def test_bigtable_table_admin_async_client_wait_for_consistency( + kwargs, check_consistency_request_extras +): + client = _make_client() + poll_count = 3 + check_mock_side_effect = [FALSE_CONSISTENCY_RESPONSE] * (poll_count - 1) + check_mock_side_effect.append(TRUE_CONSISTENCY_RESPONSE) + + with mock.patch.object( + client, "generate_consistency_token", new_callable=mock.AsyncMock + ) as generate_mock: + with mock.patch.object( + client, "check_consistency", new_callable=mock.AsyncMock + ) as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = check_mock_side_effect + result = await client.wait_for_consistency(**kwargs) + + assert result is True + + generate_mock.assert_awaited_once_with( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=TABLE_NAME, + ), + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + expected_check_consistency_request = ( + bigtable_table_admin.CheckConsistencyRequest( + name=TABLE_NAME, + consistency_token=CONSISTENCY_TOKEN, + **check_consistency_request_extras, + ) + ) + + check_mock.assert_awaited_with( + expected_check_consistency_request, + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + +@pytest.mark.asyncio +async def test_bigtable_table_admin_async_client_wait_for_consistency_error_in_call(): + client = _make_client() + request = wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object( + client, "generate_consistency_token", new_callable=mock.AsyncMock + ) as generate_mock: + generate_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + await client.wait_for_consistency(request) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object( + client, "generate_consistency_token", new_callable=mock.AsyncMock + ) as generate_mock: + with mock.patch.object( + client, "check_consistency", new_callable=mock.AsyncMock + ) as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + await client.wait_for_consistency(request) + + +@pytest.mark.asyncio +async def test_bigtable_table_admin_async_client_wait_for_consistency_user_error(): + client = _make_client() + with pytest.raises(ValueError): + await client.wait_for_consistency( + { + "name": TABLE_NAME, + }, + name=TABLE_NAME, + ) diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py new file mode 100644 index 000000000000..56978713c5f2 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401 +except ImportError: # pragma: NO COVER + import mock + +from google.cloud.bigtable_admin_v2.overlay.types import async_consistency +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +import pytest + + +TRUE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=True +) + +FALSE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=False +) + + +def async_mock_check_consistency_callable(max_poll_count=1): + # Return False max_poll_count - 1 times, then True, for a total of + # max_poll_count calls. + side_effect = [FALSE_CONSISTENCY_RESPONSE] * (max_poll_count - 1) + side_effect.append(TRUE_CONSISTENCY_RESPONSE) + return mock.AsyncMock(spec=["__call__"], side_effect=side_effect) + + +def test_check_consistency_future_cancel(): + check_consistency_call = async_mock_check_consistency_callable() + future = async_consistency._AsyncCheckConsistencyPollingFuture( + check_consistency_call + ) + with pytest.raises(NotImplementedError): + future.cancel() + + with pytest.raises(NotImplementedError): + future.cancelled() + + +@pytest.mark.asyncio +async def test_check_consistency_future_result(): + times = 5 + check_consistency_call = async_mock_check_consistency_callable(times) + future = async_consistency._AsyncCheckConsistencyPollingFuture( + check_consistency_call + ) + is_consistent = await future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) + + # Check that calling result again doesn't produce more calls. + is_consistent = future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_restore_table.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_restore_table.py new file mode 100644 index 000000000000..95799fc147a4 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_restore_table.py @@ -0,0 +1,248 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER # noqa: F401 +except ImportError: # pragma: NO COVER + import mock + +from google.longrunning import operations_pb2 +from google.rpc import status_pb2, code_pb2 + +from google.api_core import operation_async, exceptions +from google.api_core.future import async_future +from google.api_core.operations_v1 import operations_async_client +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin, table +from google.cloud.bigtable_admin_v2.overlay.types import async_restore_table + +import pytest + + +# Set up the mock operations +DEFAULT_MAX_POLL = 3 +RESTORE_TABLE_OPERATION_TABLE_NAME = "Test Table" +RESTORE_TABLE_OPERATION_NAME = "test/restore_table" +RESTORE_TABLE_OPERATION_METADATA = bigtable_table_admin.RestoreTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +OPTIMIZE_RESTORED_TABLE_OPERATION_NAME = "test/optimize_restore_table" +OPTIMIZE_RESTORED_TABLE_METADATA = bigtable_table_admin.OptimizeRestoredTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_ID = "abcdefg" +RESTORE_TABLE_OPERATION_FINISHED_RESPONSE = table.Table( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +RESTORE_TABLE_OPERATION_FINISHED_ERROR = status_pb2.Status( + code=code_pb2.DEADLINE_EXCEEDED, message="Deadline Exceeded" +) + + +def make_operation_proto( + name, done=False, metadata=None, response=None, error=None, **kwargs +): + operation_proto = operations_pb2.Operation(name=name, done=done, **kwargs) + + if metadata is not None: + operation_proto.metadata.Pack(metadata._pb) + + if response is not None: + operation_proto.response.Pack(response._pb) + + if error is not None: + operation_proto.error.CopyFrom(error) + + return operation_proto + + +RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=False, + metadata=RESTORE_TABLE_OPERATION_METADATA, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO = make_operation_proto( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_NAME, + metadata=OPTIMIZE_RESTORED_TABLE_METADATA, +) + + +# Set up the mock operation client +def mock_restore_table_operation( + max_poll_count=DEFAULT_MAX_POLL, fail=False, has_optimize_operation=True +): + client = mock.AsyncMock(spec=operations_async_client.OperationsAsyncClient) + + # Set up the polling + side_effect = [RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO] * (max_poll_count - 1) + finished_operation_metadata = bigtable_table_admin.RestoreTableMetadata() + bigtable_table_admin.RestoreTableMetadata.copy_from( + finished_operation_metadata, RESTORE_TABLE_OPERATION_METADATA + ) + if has_optimize_operation: + finished_operation_metadata.optimize_table_operation_name = ( + OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + + if fail: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + error=RESTORE_TABLE_OPERATION_FINISHED_ERROR, + ) + else: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + response=RESTORE_TABLE_OPERATION_FINISHED_RESPONSE, + ) + side_effect.append(final_operation_proto) + refresh = mock.AsyncMock(spec=["__call__"], side_effect=side_effect) + cancel = mock.AsyncMock(spec=["__call__"]) + future = operation_async.AsyncOperation( + RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO, + refresh, + cancel, + result_type=table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Set up the optimize_restore_table_operation + client.get_operation.side_effect = [OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO] + + return async_restore_table.AsyncRestoreTableOperation(client, future) + + +@pytest.mark.asyncio +async def test_async_restore_table_operation_client_success_has_optimize(): + restore_table_operation = mock_restore_table_operation() + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + +@pytest.mark.asyncio +async def test_restore_table_operation_client_success_has_optimize_multiple_calls(): + restore_table_operation = mock_restore_table_operation() + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + await restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + +@pytest.mark.asyncio +async def test_restore_table_operation_success_has_optimize_call_before_done(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + await restore_table_operation.optimize_restored_table_operation() + + restore_table_operation._operations_client.get_operation.assert_not_called() + + +@pytest.mark.asyncio +async def test_restore_table_operation_client_success_only_cache_after_finishing(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + await restore_table_operation.optimize_restored_table_operation() + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation_async.AsyncOperation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls( + [mock.call(retry=async_future.DEFAULT_RETRY)] * DEFAULT_MAX_POLL + ) + + +@pytest.mark.asyncio +async def test_restore_table_operation_success_no_optimize(): + restore_table_operation = mock_restore_table_operation(has_optimize_operation=False) + + await restore_table_operation.result() + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() + + +@pytest.mark.asyncio +async def test_restore_table_operation_exception(): + restore_table_operation = mock_restore_table_operation( + fail=True, has_optimize_operation=False + ) + + with pytest.raises(exceptions.GoogleAPICallError): + await restore_table_operation.result() + + optimize_restored_table_operation = ( + await restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_client.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_client.py new file mode 100644 index 000000000000..07922b349458 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_client.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock + +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth.credentials import AnonymousCredentials +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.overlay.services.bigtable_table_admin.client import ( + BigtableTableAdminClient, + DEFAULT_CLIENT_INFO, +) +from google.cloud.bigtable_admin_v2.overlay.types import ( + restore_table, + wait_for_consistency_request, +) + +from google.cloud.bigtable import __version__ as bigtable_version + +from test_consistency import ( + FALSE_CONSISTENCY_RESPONSE, + TRUE_CONSISTENCY_RESPONSE, +) + +import pytest + + +PARENT_NAME = "my_parent" +TABLE_NAME = "my_table" +CONSISTENCY_TOKEN = "abcdefg" + + +def _make_client(**kwargs): + kwargs["credentials"] = kwargs.get("credentials", AnonymousCredentials()) + return BigtableTableAdminClient(**kwargs) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + ( + transports.BigtableTableAdminGrpcTransport, + "grpc", + ), + ( + transports.BigtableTableAdminRestTransport, + "rest", + ), + ], +) +def test_bigtable_table_admin_client_client_version(transport_class, transport_name): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + _make_client(transport=transport_name) + + # call_args.kwargs is not supported in Python 3.7, so find them from the tuple + # instead. It's always the last item in the call_args tuple. + transport_init_call_kwargs = patched.call_args[-1] + assert transport_init_call_kwargs["client_info"] == DEFAULT_CLIENT_INFO + + assert ( + DEFAULT_CLIENT_INFO.client_library_version + == f"{bigtable_version}-admin-overlay" + ) + + +@pytest.mark.parametrize( + "kwargs", + [ + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ) + }, + { + "request": { + "parent": PARENT_NAME, + "table_id": TABLE_NAME, + }, + }, + { + "request": bigtable_table_admin.RestoreTableRequest( + parent=PARENT_NAME, + table_id=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + ], +) +def test_bigtable_table_admin_client_restore_table(kwargs): + client = _make_client() + + with mock.patch.object(restore_table, "RestoreTableOperation") as future_mock: + with mock.patch.object(client, "_transport") as transport_mock: + with mock.patch.object(client, "_restore_table") as restore_table_mock: + operation_mock = mock.Mock() + restore_table_mock.return_value = operation_mock + client.restore_table(**kwargs) + + restore_table_mock.assert_called_once_with( + request=kwargs["request"], + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + future_mock.assert_called_once_with( + transport_mock.operations_client, operation_mock + ) + + +@pytest.mark.parametrize( + "kwargs,check_consistency_request_extras", + [ + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + standard_read_remote_writes=bigtable_table_admin.StandardReadRemoteWrites(), + ) + }, + { + "standard_read_remote_writes": bigtable_table_admin.StandardReadRemoteWrites(), + }, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + data_boost_read_local_writes=bigtable_table_admin.DataBoostReadLocalWrites(), + ) + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "request": { + "name": TABLE_NAME, + "data_boost_read_local_writes": {}, + } + }, + { + "data_boost_read_local_writes": bigtable_table_admin.DataBoostReadLocalWrites(), + }, + ), + ( + { + "name": TABLE_NAME, + }, + {}, + ), + ( + { + "request": wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ), + "retry": mock.Mock(spec=retries.Retry), + "timeout": mock.Mock(spec=retries.Retry), + "metadata": [("foo", "bar")], + }, + {}, + ), + ], +) +def test_bigtable_table_admin_client_wait_for_consistency( + kwargs, check_consistency_request_extras +): + client = _make_client() + poll_count = 3 + check_mock_side_effect = [FALSE_CONSISTENCY_RESPONSE] * (poll_count - 1) + check_mock_side_effect.append(TRUE_CONSISTENCY_RESPONSE) + + with mock.patch.object(client, "generate_consistency_token") as generate_mock: + with mock.patch.object(client, "check_consistency") as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = check_mock_side_effect + result = client.wait_for_consistency(**kwargs) + + assert result is True + + generate_mock.assert_called_once_with( + bigtable_table_admin.GenerateConsistencyTokenRequest( + name=TABLE_NAME, + ), + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + expected_check_consistency_request = ( + bigtable_table_admin.CheckConsistencyRequest( + name=TABLE_NAME, + consistency_token=CONSISTENCY_TOKEN, + **check_consistency_request_extras, + ) + ) + + check_mock.assert_called_with( + expected_check_consistency_request, + retry=kwargs.get("retry", gapic_v1.method.DEFAULT), + timeout=kwargs.get("timeout", gapic_v1.method.DEFAULT), + metadata=kwargs.get("metadata", ()), + ) + + +def test_bigtable_table_admin_client_wait_for_consistency_error_in_call(): + client = _make_client() + request = wait_for_consistency_request.WaitForConsistencyRequest( + name=TABLE_NAME, + ) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object(client, "generate_consistency_token") as generate_mock: + generate_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + client.wait_for_consistency(request) + + with pytest.raises(exceptions.GoogleAPICallError): + with mock.patch.object(client, "generate_consistency_token") as generate_mock: + with mock.patch.object(client, "check_consistency") as check_mock: + generate_mock.return_value = ( + bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token=CONSISTENCY_TOKEN, + ) + ) + + check_mock.side_effect = exceptions.DeadlineExceeded( + "Deadline Exceeded." + ) + client.wait_for_consistency(request) + + +def test_bigtable_table_admin_client_wait_for_consistency_user_error(): + client = _make_client() + with pytest.raises(ValueError): + client.wait_for_consistency( + { + "name": TABLE_NAME, + }, + name=TABLE_NAME, + ) diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_consistency.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_consistency.py new file mode 100644 index 000000000000..29bc0c4817ac --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_consistency.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock + +from google.cloud.bigtable_admin_v2.overlay.types import consistency +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin + +import pytest + + +TRUE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=True +) + +FALSE_CONSISTENCY_RESPONSE = bigtable_table_admin.CheckConsistencyResponse( + consistent=False +) + + +def mock_check_consistency_callable(max_poll_count=1): + # Return False max_poll_count - 1 times, then True, for a total of + # max_poll_count calls. + side_effect = [FALSE_CONSISTENCY_RESPONSE] * (max_poll_count - 1) + side_effect.append(TRUE_CONSISTENCY_RESPONSE) + return mock.Mock(spec=["__call__"], side_effect=side_effect) + + +def test_check_consistency_future_cancel(): + check_consistency_call = mock_check_consistency_callable() + future = consistency._CheckConsistencyPollingFuture(check_consistency_call) + with pytest.raises(NotImplementedError): + future.cancel() + + with pytest.raises(NotImplementedError): + future.cancelled() + + +def test_check_consistency_future_result(): + times = 5 + check_consistency_call = mock_check_consistency_callable(times) + future = consistency._CheckConsistencyPollingFuture(check_consistency_call) + is_consistent = future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) + + # Check that calling result again doesn't produce more calls. + is_consistent = future.result() + + assert is_consistent + check_consistency_call.assert_has_calls([mock.call()] * times) diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_oneof_message.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_oneof_message.py new file mode 100644 index 000000000000..b9c521235caf --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_oneof_message.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable_admin_v2.types import GcRule +from google.protobuf import duration_pb2 + +import my_oneof_message + +import pytest + + +# The following proto bytestring was constructed running printproto in +# text-to-binary mode on the following textproto for GcRule: +# +# intersection { +# rules { +# max_num_versions: 1234 +# } +# rules { +# max_age { +# seconds: 12345 +# } +# } +# } +GCRULE_RAW_PROTO_BYTESTRING = b"\x1a\x0c\n\x03\x08\xd2\t\n\x05\x12\x03\x08\xb9`" +INITIAL_VALUE = 123 +FINAL_VALUE = 456 + + +@pytest.fixture +def default_msg(): + return my_oneof_message.MyOneofMessage() + + +@pytest.fixture +def foo_msg(): + return my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE) + + +def test_oneof_message_setattr_oneof_no_conflict(default_msg): + default_msg.foo = INITIAL_VALUE + default_msg.baz = INITIAL_VALUE + assert default_msg.foo == INITIAL_VALUE + assert default_msg.baz == INITIAL_VALUE + assert not default_msg.bar + + +def test_oneof_message_setattr_conflict(default_msg, foo_msg): + with pytest.raises(ValueError): + foo_msg.bar = INITIAL_VALUE + assert foo_msg.foo == INITIAL_VALUE + assert not foo_msg.bar + + default_msg.bar = INITIAL_VALUE + with pytest.raises(ValueError): + default_msg.foo = INITIAL_VALUE + assert default_msg.bar == INITIAL_VALUE + assert not default_msg.foo + + +def test_oneof_message_setattr_oneof_same_oneof_field(default_msg, foo_msg): + foo_msg.foo = FINAL_VALUE + assert foo_msg.foo == FINAL_VALUE + assert not foo_msg.bar + + default_msg.bar = INITIAL_VALUE + default_msg.bar = FINAL_VALUE + assert default_msg.bar == FINAL_VALUE + assert not default_msg.foo + + +def test_oneof_message_setattr_oneof_delattr(foo_msg): + del foo_msg.foo + foo_msg.bar = INITIAL_VALUE + assert foo_msg.bar == INITIAL_VALUE + assert not foo_msg.foo + + +def test_oneof_message_init_oneof_conflict(foo_msg): + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE, bar=INITIAL_VALUE) + + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage( + { + "foo": INITIAL_VALUE, + "bar": INITIAL_VALUE, + } + ) + + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage(foo_msg._pb, bar=INITIAL_VALUE) + + with pytest.raises(ValueError): + my_oneof_message.MyOneofMessage(foo_msg, bar=INITIAL_VALUE) + + +def test_oneof_message_init_oneof_no_conflict(foo_msg): + msg = my_oneof_message.MyOneofMessage(foo=INITIAL_VALUE, baz=INITIAL_VALUE) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + msg = my_oneof_message.MyOneofMessage( + { + "foo": INITIAL_VALUE, + "baz": INITIAL_VALUE, + } + ) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + msg = my_oneof_message.MyOneofMessage(foo_msg, baz=INITIAL_VALUE) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + msg = my_oneof_message.MyOneofMessage(foo_msg._pb, baz=INITIAL_VALUE) + assert msg.foo == INITIAL_VALUE + assert msg.baz == INITIAL_VALUE + assert not msg.bar + + +def test_oneof_message_init_kwargs_override_same_field_oneof(foo_msg): + # Kwargs take precedence over mapping, and this should be OK + msg = my_oneof_message.MyOneofMessage( + { + "foo": INITIAL_VALUE, + }, + foo=FINAL_VALUE, + ) + assert msg.foo == FINAL_VALUE + + msg = my_oneof_message.MyOneofMessage(foo_msg, foo=FINAL_VALUE) + assert msg.foo == FINAL_VALUE + + msg = my_oneof_message.MyOneofMessage(foo_msg._pb, foo=FINAL_VALUE) + assert msg.foo == FINAL_VALUE + + +def test_gcrule_serialize_deserialize(): + test = GcRule( + intersection=GcRule.Intersection( + rules=[ + GcRule(max_num_versions=1234), + GcRule(max_age=duration_pb2.Duration(seconds=12345)), + ] + ) + ) + assert GcRule.serialize(test) == GCRULE_RAW_PROTO_BYTESTRING + assert GcRule.deserialize(GCRULE_RAW_PROTO_BYTESTRING) == test diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_restore_table.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_restore_table.py new file mode 100644 index 000000000000..23c6609e46d4 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_restore_table.py @@ -0,0 +1,230 @@ +# Copyright 2025 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock + +from google.longrunning import operations_pb2 +from google.rpc import status_pb2, code_pb2 + +from google.api_core import operation, exceptions +from google.api_core.operations_v1 import operations_client +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin, table +from google.cloud.bigtable_admin_v2.overlay.types import restore_table + +import pytest + + +# Set up the mock operations +DEFAULT_MAX_POLL = 3 +RESTORE_TABLE_OPERATION_TABLE_NAME = "Test Table" +RESTORE_TABLE_OPERATION_NAME = "test/restore_table" +RESTORE_TABLE_OPERATION_METADATA = bigtable_table_admin.RestoreTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +OPTIMIZE_RESTORED_TABLE_OPERATION_NAME = "test/optimize_restore_table" +OPTIMIZE_RESTORED_TABLE_METADATA = bigtable_table_admin.OptimizeRestoredTableMetadata( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_ID = "abcdefg" +RESTORE_TABLE_OPERATION_FINISHED_RESPONSE = table.Table( + name=RESTORE_TABLE_OPERATION_TABLE_NAME, +) +RESTORE_TABLE_OPERATION_FINISHED_ERROR = status_pb2.Status( + code=code_pb2.DEADLINE_EXCEEDED, message="Deadline Exceeded" +) + + +def make_operation_proto( + name, done=False, metadata=None, response=None, error=None, **kwargs +): + operation_proto = operations_pb2.Operation(name=name, done=done, **kwargs) + + if metadata is not None: + operation_proto.metadata.Pack(metadata._pb) + + if response is not None: + operation_proto.response.Pack(response._pb) + + if error is not None: + operation_proto.error.CopyFrom(error) + + return operation_proto + + +RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=False, + metadata=RESTORE_TABLE_OPERATION_METADATA, +) + +OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO = make_operation_proto( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_NAME, + metadata=OPTIMIZE_RESTORED_TABLE_METADATA, +) + + +# Set up the mock operation client +def mock_restore_table_operation( + max_poll_count=DEFAULT_MAX_POLL, fail=False, has_optimize_operation=True +): + client = mock.Mock(spec=operations_client.OperationsClient) + + # Set up the polling + side_effect = [RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO] * (max_poll_count - 1) + finished_operation_metadata = bigtable_table_admin.RestoreTableMetadata() + bigtable_table_admin.RestoreTableMetadata.copy_from( + finished_operation_metadata, RESTORE_TABLE_OPERATION_METADATA + ) + if has_optimize_operation: + finished_operation_metadata.optimize_table_operation_name = ( + OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + + if fail: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + error=RESTORE_TABLE_OPERATION_FINISHED_ERROR, + ) + else: + final_operation_proto = make_operation_proto( + name=RESTORE_TABLE_OPERATION_NAME, + done=True, + metadata=finished_operation_metadata, + response=RESTORE_TABLE_OPERATION_FINISHED_RESPONSE, + ) + side_effect.append(final_operation_proto) + refresh = mock.Mock(spec=["__call__"], side_effect=side_effect) + cancel = mock.Mock(spec=["__call__"]) + future = operation.Operation( + RESTORE_TABLE_IN_PROGRESS_OPERATION_PROTO, + refresh, + cancel, + result_type=table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Set up the optimize_restore_table_operation + client.get_operation.side_effect = [OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO] + + return restore_table.RestoreTableOperation(client, future) + + +def test_restore_table_operation_client_success_has_optimize(): + restore_table_operation = mock_restore_table_operation() + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation.Operation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + +def test_restore_table_operation_client_success_has_optimize_multiple_calls(): + restore_table_operation = mock_restore_table_operation() + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation.Operation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + +def test_restore_table_operation_success_has_optimize_call_before_done(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + restore_table_operation.optimize_restored_table_operation() + + restore_table_operation._operations_client.get_operation.assert_not_called() + + +def test_restore_table_operation_client_success_only_cache_after_finishing(): + restore_table_operation = mock_restore_table_operation() + + with pytest.raises(exceptions.GoogleAPIError): + restore_table_operation.optimize_restored_table_operation() + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert isinstance(optimize_restored_table_operation, operation.Operation) + assert ( + optimize_restored_table_operation._operation + == OPTIMIZE_RESTORED_TABLE_OPERATION_PROTO + ) + restore_table_operation._operations_client.get_operation.assert_called_with( + name=OPTIMIZE_RESTORED_TABLE_OPERATION_ID + ) + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + restore_table_operation.optimize_restored_table_operation() + restore_table_operation._refresh.assert_has_calls([mock.call()] * DEFAULT_MAX_POLL) + + +def test_restore_table_operation_success_no_optimize(): + restore_table_operation = mock_restore_table_operation(has_optimize_operation=False) + + restore_table_operation.result() + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() + + +def test_restore_table_operation_exception(): + restore_table_operation = mock_restore_table_operation( + fail=True, has_optimize_operation=False + ) + + with pytest.raises(exceptions.GoogleAPICallError): + restore_table_operation.result() + + optimize_restored_table_operation = ( + restore_table_operation.optimize_restored_table_operation() + ) + + assert optimize_restored_table_operation is None + restore_table_operation._operations_client.get_operation.assert_not_called() diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index eba8e8d41ce4..7cbe6f3b13b0 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -58,10 +58,10 @@ from google.auth import credentials as ga_credentials from google.auth.exceptions import MutualTLSChannelError from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports @@ -138,45 +138,45 @@ def test__get_default_mtls_endpoint(): sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" non_googleapi = "api.example.com" - assert BigtableTableAdminClient._get_default_mtls_endpoint(None) is None + assert BaseBigtableTableAdminClient._get_default_mtls_endpoint(None) is None assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint ) assert ( - BigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) + BaseBigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi ) def test__read_environment_variables(): - assert BigtableTableAdminClient._read_environment_variables() == ( + assert BaseBigtableTableAdminClient._read_environment_variables() == ( False, "auto", None, ) with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - assert BigtableTableAdminClient._read_environment_variables() == ( + assert BaseBigtableTableAdminClient._read_environment_variables() == ( True, "auto", None, ) with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - assert BigtableTableAdminClient._read_environment_variables() == ( + assert BaseBigtableTableAdminClient._read_environment_variables() == ( False, "auto", None, @@ -186,28 +186,28 @@ def test__read_environment_variables(): os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): with pytest.raises(ValueError) as excinfo: - BigtableTableAdminClient._read_environment_variables() + BaseBigtableTableAdminClient._read_environment_variables() assert ( str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" ) with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - assert BigtableTableAdminClient._read_environment_variables() == ( + assert BaseBigtableTableAdminClient._read_environment_variables() == ( False, "never", None, ) with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - assert BigtableTableAdminClient._read_environment_variables() == ( + assert BaseBigtableTableAdminClient._read_environment_variables() == ( False, "always", None, ) with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): - assert BigtableTableAdminClient._read_environment_variables() == ( + assert BaseBigtableTableAdminClient._read_environment_variables() == ( False, "auto", None, @@ -215,14 +215,14 @@ def test__read_environment_variables(): with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableTableAdminClient._read_environment_variables() + BaseBigtableTableAdminClient._read_environment_variables() assert ( str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" ) with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): - assert BigtableTableAdminClient._read_environment_variables() == ( + assert BaseBigtableTableAdminClient._read_environment_variables() == ( False, "auto", "foo.com", @@ -233,15 +233,15 @@ def test__get_client_cert_source(): mock_provided_cert_source = mock.Mock() mock_default_cert_source = mock.Mock() - assert BigtableTableAdminClient._get_client_cert_source(None, False) is None + assert BaseBigtableTableAdminClient._get_client_cert_source(None, False) is None assert ( - BigtableTableAdminClient._get_client_cert_source( + BaseBigtableTableAdminClient._get_client_cert_source( mock_provided_cert_source, False ) is None ) assert ( - BigtableTableAdminClient._get_client_cert_source( + BaseBigtableTableAdminClient._get_client_cert_source( mock_provided_cert_source, True ) == mock_provided_cert_source @@ -255,11 +255,11 @@ def test__get_client_cert_source(): return_value=mock_default_cert_source, ): assert ( - BigtableTableAdminClient._get_client_cert_source(None, True) + BaseBigtableTableAdminClient._get_client_cert_source(None, True) is mock_default_cert_source ) assert ( - BigtableTableAdminClient._get_client_cert_source( + BaseBigtableTableAdminClient._get_client_cert_source( mock_provided_cert_source, "true" ) is mock_provided_cert_source @@ -267,68 +267,72 @@ def test__get_client_cert_source(): @mock.patch.object( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminClient), + modify_default_endpoint_template(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminAsyncClient), + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), ) def test__get_api_endpoint(): api_override = "foo.com" mock_client_cert_source = mock.Mock() - default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( UNIVERSE_DOMAIN=default_universe ) mock_universe = "bar.com" - mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( UNIVERSE_DOMAIN=mock_universe ) assert ( - BigtableTableAdminClient._get_api_endpoint( + BaseBigtableTableAdminClient._get_api_endpoint( api_override, mock_client_cert_source, default_universe, "always" ) == api_override ) assert ( - BigtableTableAdminClient._get_api_endpoint( + BaseBigtableTableAdminClient._get_api_endpoint( None, mock_client_cert_source, default_universe, "auto" ) - == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT ) assert ( - BigtableTableAdminClient._get_api_endpoint(None, None, default_universe, "auto") + BaseBigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "auto" + ) == default_endpoint ) assert ( - BigtableTableAdminClient._get_api_endpoint( + BaseBigtableTableAdminClient._get_api_endpoint( None, None, default_universe, "always" ) - == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT ) assert ( - BigtableTableAdminClient._get_api_endpoint( + BaseBigtableTableAdminClient._get_api_endpoint( None, mock_client_cert_source, default_universe, "always" ) - == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + == BaseBigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT ) assert ( - BigtableTableAdminClient._get_api_endpoint(None, None, mock_universe, "never") + BaseBigtableTableAdminClient._get_api_endpoint( + None, None, mock_universe, "never" + ) == mock_endpoint ) assert ( - BigtableTableAdminClient._get_api_endpoint( + BaseBigtableTableAdminClient._get_api_endpoint( None, None, default_universe, "never" ) == default_endpoint ) with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableTableAdminClient._get_api_endpoint( + BaseBigtableTableAdminClient._get_api_endpoint( None, mock_client_cert_source, mock_universe, "auto" ) assert ( @@ -342,22 +346,22 @@ def test__get_universe_domain(): universe_domain_env = "bar.com" assert ( - BigtableTableAdminClient._get_universe_domain( + BaseBigtableTableAdminClient._get_universe_domain( client_universe_domain, universe_domain_env ) == client_universe_domain ) assert ( - BigtableTableAdminClient._get_universe_domain(None, universe_domain_env) + BaseBigtableTableAdminClient._get_universe_domain(None, universe_domain_env) == universe_domain_env ) assert ( - BigtableTableAdminClient._get_universe_domain(None, None) - == BigtableTableAdminClient._DEFAULT_UNIVERSE + BaseBigtableTableAdminClient._get_universe_domain(None, None) + == BaseBigtableTableAdminClient._DEFAULT_UNIVERSE ) with pytest.raises(ValueError) as excinfo: - BigtableTableAdminClient._get_universe_domain("", None) + BaseBigtableTableAdminClient._get_universe_domain("", None) assert str(excinfo.value) == "Universe Domain cannot be an empty string." @@ -377,7 +381,7 @@ def test__get_universe_domain(): def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): cred = mock.Mock(["get_cred_info"]) cred.get_cred_info = mock.Mock(return_value=cred_info_json) - client = BigtableTableAdminClient(credentials=cred) + client = BaseBigtableTableAdminClient(credentials=cred) client._transport._credentials = cred error = core_exceptions.GoogleAPICallError("message", details=["foo"]) @@ -394,7 +398,7 @@ def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_in def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): cred = mock.Mock([]) assert not hasattr(cred, "get_cred_info") - client = BigtableTableAdminClient(credentials=cred) + client = BaseBigtableTableAdminClient(credentials=cred) client._transport._credentials = cred error = core_exceptions.GoogleAPICallError("message", details=[]) @@ -407,12 +411,12 @@ def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): @pytest.mark.parametrize( "client_class,transport_name", [ - (BigtableTableAdminClient, "grpc"), - (BigtableTableAdminAsyncClient, "grpc_asyncio"), - (BigtableTableAdminClient, "rest"), + (BaseBigtableTableAdminClient, "grpc"), + (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"), + (BaseBigtableTableAdminClient, "rest"), ], ) -def test_bigtable_table_admin_client_from_service_account_info( +def test_base_bigtable_table_admin_client_from_service_account_info( client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() @@ -440,7 +444,7 @@ def test_bigtable_table_admin_client_from_service_account_info( (transports.BigtableTableAdminRestTransport, "rest"), ], ) -def test_bigtable_table_admin_client_service_account_always_use_jwt( +def test_base_bigtable_table_admin_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( @@ -461,12 +465,12 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt( @pytest.mark.parametrize( "client_class,transport_name", [ - (BigtableTableAdminClient, "grpc"), - (BigtableTableAdminAsyncClient, "grpc_asyncio"), - (BigtableTableAdminClient, "rest"), + (BaseBigtableTableAdminClient, "grpc"), + (BaseBigtableTableAdminAsyncClient, "grpc_asyncio"), + (BaseBigtableTableAdminClient, "rest"), ], ) -def test_bigtable_table_admin_client_from_service_account_file( +def test_base_bigtable_table_admin_client_from_service_account_file( client_class, transport_name ): creds = ga_credentials.AnonymousCredentials() @@ -493,51 +497,59 @@ def test_bigtable_table_admin_client_from_service_account_file( ) -def test_bigtable_table_admin_client_get_transport_class(): - transport = BigtableTableAdminClient.get_transport_class() +def test_base_bigtable_table_admin_client_get_transport_class(): + transport = BaseBigtableTableAdminClient.get_transport_class() available_transports = [ transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminRestTransport, ] assert transport in available_transports - transport = BigtableTableAdminClient.get_transport_class("grpc") + transport = BaseBigtableTableAdminClient.get_transport_class("grpc") assert transport == transports.BigtableTableAdminGrpcTransport @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + ), + ( + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", ), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), + ( + BaseBigtableTableAdminClient, + transports.BigtableTableAdminRestTransport, + "rest", + ), ], ) @mock.patch.object( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminClient), + modify_default_endpoint_template(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminAsyncClient), + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), ) -def test_bigtable_table_admin_client_client_options( +def test_base_bigtable_table_admin_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. - with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc: transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) client = client_class(transport=transport) gtc.assert_not_called() # Check that if channel is provided via str we will create a new one. - with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: + with mock.patch.object(BaseBigtableTableAdminClient, "get_transport_class") as gtc: client = client_class(transport=transport_name) gtc.assert_called() @@ -661,37 +673,37 @@ def test_bigtable_table_admin_client_client_options( "client_class,transport_class,transport_name,use_client_cert_env", [ ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "true", ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "true", ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "false", ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "false", ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "true", ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "false", @@ -699,17 +711,17 @@ def test_bigtable_table_admin_client_client_options( ], ) @mock.patch.object( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminClient), + modify_default_endpoint_template(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminAsyncClient), + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_bigtable_table_admin_client_mtls_env_auto( +def test_base_bigtable_table_admin_client_mtls_env_auto( client_class, transport_class, transport_name, use_client_cert_env ): # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default @@ -812,19 +824,21 @@ def test_bigtable_table_admin_client_mtls_env_auto( @pytest.mark.parametrize( - "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient] + "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient] ) @mock.patch.object( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + modify_default_endpoint(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + modify_default_endpoint(BaseBigtableTableAdminAsyncClient), ) -def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_class): +def test_base_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source( + client_class, +): mock_client_cert_source = mock.Mock() # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". @@ -916,27 +930,27 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl @pytest.mark.parametrize( - "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient] + "client_class", [BaseBigtableTableAdminClient, BaseBigtableTableAdminAsyncClient] ) @mock.patch.object( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminClient), + modify_default_endpoint_template(BaseBigtableTableAdminClient), ) @mock.patch.object( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", - modify_default_endpoint_template(BigtableTableAdminAsyncClient), + modify_default_endpoint_template(BaseBigtableTableAdminAsyncClient), ) -def test_bigtable_table_admin_client_client_api_endpoint(client_class): +def test_base_bigtable_table_admin_client_client_api_endpoint(client_class): mock_client_cert_source = client_cert_source_callback api_override = "foo.com" - default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + default_universe = BaseBigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( UNIVERSE_DOMAIN=default_universe ) mock_universe = "bar.com" - mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + mock_endpoint = BaseBigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( UNIVERSE_DOMAIN=mock_universe ) @@ -1004,16 +1018,24 @@ def test_bigtable_table_admin_client_client_api_endpoint(client_class): @pytest.mark.parametrize( "client_class,transport_class,transport_name", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminClient, + transports.BigtableTableAdminGrpcTransport, + "grpc", + ), + ( + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", ), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), + ( + BaseBigtableTableAdminClient, + transports.BigtableTableAdminRestTransport, + "rest", + ), ], ) -def test_bigtable_table_admin_client_client_options_scopes( +def test_base_bigtable_table_admin_client_client_options_scopes( client_class, transport_class, transport_name ): # Check the case scopes are provided. @@ -1042,26 +1064,26 @@ def test_bigtable_table_admin_client_client_options_scopes( "client_class,transport_class,transport_name,grpc_helpers", [ ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers, ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", None, ), ], ) -def test_bigtable_table_admin_client_client_options_credentials_file( +def test_base_bigtable_table_admin_client_client_options_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. @@ -1085,12 +1107,12 @@ def test_bigtable_table_admin_client_client_options_credentials_file( ) -def test_bigtable_table_admin_client_client_options_from_dict(): +def test_base_bigtable_table_admin_client_client_options_from_dict(): with mock.patch( "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__" ) as grpc_transport: grpc_transport.return_value = None - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( client_options={"api_endpoint": "squid.clam.whelk"} ) grpc_transport.assert_called_once_with( @@ -1110,20 +1132,20 @@ def test_bigtable_table_admin_client_client_options_from_dict(): "client_class,transport_class,transport_name,grpc_helpers", [ ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers, ), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async, ), ], ) -def test_bigtable_table_admin_client_create_channel_credentials_file( +def test_base_bigtable_table_admin_client_create_channel_credentials_file( client_class, transport_class, transport_name, grpc_helpers ): # Check the case credentials file is provided. @@ -1190,7 +1212,7 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( ], ) def test_create_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1225,7 +1247,7 @@ def test_create_table(request_type, transport: str = "grpc"): def test_create_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -1256,7 +1278,7 @@ def test_create_table_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -1294,7 +1316,7 @@ async def test_create_table_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -1334,7 +1356,7 @@ async def test_create_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CreateTableRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -1374,7 +1396,7 @@ async def test_create_table_async_from_dict(): def test_create_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1404,7 +1426,7 @@ def test_create_table_field_headers(): @pytest.mark.asyncio async def test_create_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -1433,7 +1455,7 @@ async def test_create_table_field_headers_async(): def test_create_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1465,7 +1487,7 @@ def test_create_table_flattened(): def test_create_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1482,7 +1504,7 @@ def test_create_table_flattened_error(): @pytest.mark.asyncio async def test_create_table_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -1517,7 +1539,7 @@ async def test_create_table_flattened_async(): @pytest.mark.asyncio async def test_create_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -1540,7 +1562,7 @@ async def test_create_table_flattened_error_async(): ], ) def test_create_table_from_snapshot(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1570,7 +1592,7 @@ def test_create_table_from_snapshot(request_type, transport: str = "grpc"): def test_create_table_from_snapshot_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -1605,7 +1627,7 @@ def test_create_table_from_snapshot_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -1653,7 +1675,7 @@ async def test_create_table_from_snapshot_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -1698,7 +1720,7 @@ async def test_create_table_from_snapshot_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -1733,7 +1755,7 @@ async def test_create_table_from_snapshot_async_from_dict(): def test_create_table_from_snapshot_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1765,7 +1787,7 @@ def test_create_table_from_snapshot_field_headers(): @pytest.mark.asyncio async def test_create_table_from_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -1798,7 +1820,7 @@ async def test_create_table_from_snapshot_field_headers_async(): def test_create_table_from_snapshot_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1832,7 +1854,7 @@ def test_create_table_from_snapshot_flattened(): def test_create_table_from_snapshot_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -1849,7 +1871,7 @@ def test_create_table_from_snapshot_flattened_error(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -1888,7 +1910,7 @@ async def test_create_table_from_snapshot_flattened_async(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -1911,7 +1933,7 @@ async def test_create_table_from_snapshot_flattened_error_async(): ], ) def test_list_tables(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -1942,7 +1964,7 @@ def test_list_tables(request_type, transport: str = "grpc"): def test_list_tables_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -1973,7 +1995,7 @@ def test_list_tables_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -2011,7 +2033,7 @@ async def test_list_tables_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -2050,7 +2072,7 @@ async def test_list_tables_async_use_cached_wrapped_rpc( async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -2086,7 +2108,7 @@ async def test_list_tables_async_from_dict(): def test_list_tables_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2116,7 +2138,7 @@ def test_list_tables_field_headers(): @pytest.mark.asyncio async def test_list_tables_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2147,7 +2169,7 @@ async def test_list_tables_field_headers_async(): def test_list_tables_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2171,7 +2193,7 @@ def test_list_tables_flattened(): def test_list_tables_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2186,7 +2208,7 @@ def test_list_tables_flattened_error(): @pytest.mark.asyncio async def test_list_tables_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2215,7 +2237,7 @@ async def test_list_tables_flattened_async(): @pytest.mark.asyncio async def test_list_tables_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2229,7 +2251,7 @@ async def test_list_tables_flattened_error_async(): def test_list_tables_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -2283,7 +2305,7 @@ def test_list_tables_pager(transport_name: str = "grpc"): def test_list_tables_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -2325,7 +2347,7 @@ def test_list_tables_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_tables_async_pager(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2375,7 +2397,7 @@ async def test_list_tables_async_pager(): @pytest.mark.asyncio async def test_list_tables_async_pages(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2430,7 +2452,7 @@ async def test_list_tables_async_pages(): ], ) def test_get_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2465,7 +2487,7 @@ def test_get_table(request_type, transport: str = "grpc"): def test_get_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -2494,7 +2516,7 @@ def test_get_table_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -2530,7 +2552,7 @@ async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asy # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -2569,7 +2591,7 @@ async def test_get_table_async_use_cached_wrapped_rpc(transport: str = "grpc_asy async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -2609,7 +2631,7 @@ async def test_get_table_async_from_dict(): def test_get_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2639,7 +2661,7 @@ def test_get_table_field_headers(): @pytest.mark.asyncio async def test_get_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2668,7 +2690,7 @@ async def test_get_table_field_headers_async(): def test_get_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2692,7 +2714,7 @@ def test_get_table_flattened(): def test_get_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2707,7 +2729,7 @@ def test_get_table_flattened_error(): @pytest.mark.asyncio async def test_get_table_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2734,7 +2756,7 @@ async def test_get_table_flattened_async(): @pytest.mark.asyncio async def test_get_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2755,7 +2777,7 @@ async def test_get_table_flattened_error_async(): ], ) def test_update_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -2783,7 +2805,7 @@ def test_update_table(request_type, transport: str = "grpc"): def test_update_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -2808,7 +2830,7 @@ def test_update_table_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -2851,7 +2873,7 @@ async def test_update_table_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -2896,7 +2918,7 @@ async def test_update_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.UpdateTableRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -2929,7 +2951,7 @@ async def test_update_table_async_from_dict(): def test_update_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -2959,7 +2981,7 @@ def test_update_table_field_headers(): @pytest.mark.asyncio async def test_update_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -2990,7 +3012,7 @@ async def test_update_table_field_headers_async(): def test_update_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3018,7 +3040,7 @@ def test_update_table_flattened(): def test_update_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3034,7 +3056,7 @@ def test_update_table_flattened_error(): @pytest.mark.asyncio async def test_update_table_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3067,7 +3089,7 @@ async def test_update_table_flattened_async(): @pytest.mark.asyncio async def test_update_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3089,7 +3111,7 @@ async def test_update_table_flattened_error_async(): ], ) def test_delete_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3117,7 +3139,7 @@ def test_delete_table(request_type, transport: str = "grpc"): def test_delete_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -3146,7 +3168,7 @@ def test_delete_table_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -3184,7 +3206,7 @@ async def test_delete_table_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -3224,7 +3246,7 @@ async def test_delete_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.DeleteTableRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -3255,7 +3277,7 @@ async def test_delete_table_async_from_dict(): def test_delete_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3285,7 +3307,7 @@ def test_delete_table_field_headers(): @pytest.mark.asyncio async def test_delete_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3314,7 +3336,7 @@ async def test_delete_table_field_headers_async(): def test_delete_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3338,7 +3360,7 @@ def test_delete_table_flattened(): def test_delete_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3353,7 +3375,7 @@ def test_delete_table_flattened_error(): @pytest.mark.asyncio async def test_delete_table_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3380,7 +3402,7 @@ async def test_delete_table_flattened_async(): @pytest.mark.asyncio async def test_delete_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3401,7 +3423,7 @@ async def test_delete_table_flattened_error_async(): ], ) def test_undelete_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3429,7 +3451,7 @@ def test_undelete_table(request_type, transport: str = "grpc"): def test_undelete_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -3458,7 +3480,7 @@ def test_undelete_table_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -3501,7 +3523,7 @@ async def test_undelete_table_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -3546,7 +3568,7 @@ async def test_undelete_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.UndeleteTableRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -3579,7 +3601,7 @@ async def test_undelete_table_async_from_dict(): def test_undelete_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3609,7 +3631,7 @@ def test_undelete_table_field_headers(): @pytest.mark.asyncio async def test_undelete_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3640,7 +3662,7 @@ async def test_undelete_table_field_headers_async(): def test_undelete_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3664,7 +3686,7 @@ def test_undelete_table_flattened(): def test_undelete_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3679,7 +3701,7 @@ def test_undelete_table_flattened_error(): @pytest.mark.asyncio async def test_undelete_table_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3708,7 +3730,7 @@ async def test_undelete_table_flattened_async(): @pytest.mark.asyncio async def test_undelete_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3729,7 +3751,7 @@ async def test_undelete_table_flattened_error_async(): ], ) def test_create_authorized_view(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -3759,7 +3781,7 @@ def test_create_authorized_view(request_type, transport: str = "grpc"): def test_create_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -3792,7 +3814,7 @@ def test_create_authorized_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -3840,7 +3862,7 @@ async def test_create_authorized_view_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -3885,7 +3907,7 @@ async def test_create_authorized_view_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -3920,7 +3942,7 @@ async def test_create_authorized_view_async_from_dict(): def test_create_authorized_view_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -3952,7 +3974,7 @@ def test_create_authorized_view_field_headers(): @pytest.mark.asyncio async def test_create_authorized_view_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -3985,7 +4007,7 @@ async def test_create_authorized_view_field_headers_async(): def test_create_authorized_view_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4019,7 +4041,7 @@ def test_create_authorized_view_flattened(): def test_create_authorized_view_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4036,7 +4058,7 @@ def test_create_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_create_authorized_view_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4075,7 +4097,7 @@ async def test_create_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_create_authorized_view_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4098,7 +4120,7 @@ async def test_create_authorized_view_flattened_error_async(): ], ) def test_list_authorized_views(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4131,7 +4153,7 @@ def test_list_authorized_views(request_type, transport: str = "grpc"): def test_list_authorized_views_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -4164,7 +4186,7 @@ def test_list_authorized_views_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -4207,7 +4229,7 @@ async def test_list_authorized_views_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -4247,7 +4269,7 @@ async def test_list_authorized_views_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -4285,7 +4307,7 @@ async def test_list_authorized_views_async_from_dict(): def test_list_authorized_views_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4317,7 +4339,7 @@ def test_list_authorized_views_field_headers(): @pytest.mark.asyncio async def test_list_authorized_views_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4350,7 +4372,7 @@ async def test_list_authorized_views_field_headers_async(): def test_list_authorized_views_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4376,7 +4398,7 @@ def test_list_authorized_views_flattened(): def test_list_authorized_views_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4391,7 +4413,7 @@ def test_list_authorized_views_flattened_error(): @pytest.mark.asyncio async def test_list_authorized_views_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4422,7 +4444,7 @@ async def test_list_authorized_views_flattened_async(): @pytest.mark.asyncio async def test_list_authorized_views_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4436,7 +4458,7 @@ async def test_list_authorized_views_flattened_error_async(): def test_list_authorized_views_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4492,7 +4514,7 @@ def test_list_authorized_views_pager(transport_name: str = "grpc"): def test_list_authorized_views_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -4536,7 +4558,7 @@ def test_list_authorized_views_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_authorized_views_async_pager(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4588,7 +4610,7 @@ async def test_list_authorized_views_async_pager(): @pytest.mark.asyncio async def test_list_authorized_views_async_pages(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4645,7 +4667,7 @@ async def test_list_authorized_views_async_pages(): ], ) def test_get_authorized_view(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -4682,7 +4704,7 @@ def test_get_authorized_view(request_type, transport: str = "grpc"): def test_get_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -4713,7 +4735,7 @@ def test_get_authorized_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -4755,7 +4777,7 @@ async def test_get_authorized_view_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -4795,7 +4817,7 @@ async def test_get_authorized_view_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetAuthorizedViewRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -4837,7 +4859,7 @@ async def test_get_authorized_view_async_from_dict(): def test_get_authorized_view_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4869,7 +4891,7 @@ def test_get_authorized_view_field_headers(): @pytest.mark.asyncio async def test_get_authorized_view_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4902,7 +4924,7 @@ async def test_get_authorized_view_field_headers_async(): def test_get_authorized_view_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4928,7 +4950,7 @@ def test_get_authorized_view_flattened(): def test_get_authorized_view_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -4943,7 +4965,7 @@ def test_get_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_get_authorized_view_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4974,7 +4996,7 @@ async def test_get_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_get_authorized_view_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -4995,7 +5017,7 @@ async def test_get_authorized_view_flattened_error_async(): ], ) def test_update_authorized_view(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5025,7 +5047,7 @@ def test_update_authorized_view(request_type, transport: str = "grpc"): def test_update_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -5052,7 +5074,7 @@ def test_update_authorized_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -5100,7 +5122,7 @@ async def test_update_authorized_view_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -5145,7 +5167,7 @@ async def test_update_authorized_view_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -5180,7 +5202,7 @@ async def test_update_authorized_view_async_from_dict(): def test_update_authorized_view_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5212,7 +5234,7 @@ def test_update_authorized_view_field_headers(): @pytest.mark.asyncio async def test_update_authorized_view_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -5245,7 +5267,7 @@ async def test_update_authorized_view_field_headers_async(): def test_update_authorized_view_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5275,7 +5297,7 @@ def test_update_authorized_view_flattened(): def test_update_authorized_view_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5291,7 +5313,7 @@ def test_update_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_update_authorized_view_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -5326,7 +5348,7 @@ async def test_update_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_update_authorized_view_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -5348,7 +5370,7 @@ async def test_update_authorized_view_flattened_error_async(): ], ) def test_delete_authorized_view(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5378,7 +5400,7 @@ def test_delete_authorized_view(request_type, transport: str = "grpc"): def test_delete_authorized_view_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -5411,7 +5433,7 @@ def test_delete_authorized_view_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -5454,7 +5476,7 @@ async def test_delete_authorized_view_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -5494,7 +5516,7 @@ async def test_delete_authorized_view_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -5527,7 +5549,7 @@ async def test_delete_authorized_view_async_from_dict(): def test_delete_authorized_view_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5559,7 +5581,7 @@ def test_delete_authorized_view_field_headers(): @pytest.mark.asyncio async def test_delete_authorized_view_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -5590,7 +5612,7 @@ async def test_delete_authorized_view_field_headers_async(): def test_delete_authorized_view_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5616,7 +5638,7 @@ def test_delete_authorized_view_flattened(): def test_delete_authorized_view_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5631,7 +5653,7 @@ def test_delete_authorized_view_flattened_error(): @pytest.mark.asyncio async def test_delete_authorized_view_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -5660,7 +5682,7 @@ async def test_delete_authorized_view_flattened_async(): @pytest.mark.asyncio async def test_delete_authorized_view_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -5681,7 +5703,7 @@ async def test_delete_authorized_view_flattened_error_async(): ], ) def test_modify_column_families(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -5718,7 +5740,7 @@ def test_modify_column_families(request_type, transport: str = "grpc"): def test_modify_column_families_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -5749,7 +5771,7 @@ def test_modify_column_families_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -5792,7 +5814,7 @@ async def test_modify_column_families_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -5832,7 +5854,7 @@ async def test_modify_column_families_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -5874,7 +5896,7 @@ async def test_modify_column_families_async_from_dict(): def test_modify_column_families_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5906,7 +5928,7 @@ def test_modify_column_families_field_headers(): @pytest.mark.asyncio async def test_modify_column_families_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -5937,7 +5959,7 @@ async def test_modify_column_families_field_headers_async(): def test_modify_column_families_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5973,7 +5995,7 @@ def test_modify_column_families_flattened(): def test_modify_column_families_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -5993,7 +6015,7 @@ def test_modify_column_families_flattened_error(): @pytest.mark.asyncio async def test_modify_column_families_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6032,7 +6054,7 @@ async def test_modify_column_families_flattened_async(): @pytest.mark.asyncio async def test_modify_column_families_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6058,7 +6080,7 @@ async def test_modify_column_families_flattened_error_async(): ], ) def test_drop_row_range(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6086,7 +6108,7 @@ def test_drop_row_range(request_type, transport: str = "grpc"): def test_drop_row_range_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -6115,7 +6137,7 @@ def test_drop_row_range_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -6153,7 +6175,7 @@ async def test_drop_row_range_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -6193,7 +6215,7 @@ async def test_drop_row_range_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.DropRowRangeRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -6224,7 +6246,7 @@ async def test_drop_row_range_async_from_dict(): def test_drop_row_range_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6254,7 +6276,7 @@ def test_drop_row_range_field_headers(): @pytest.mark.asyncio async def test_drop_row_range_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6290,7 +6312,7 @@ async def test_drop_row_range_field_headers_async(): ], ) def test_generate_consistency_token(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6323,7 +6345,7 @@ def test_generate_consistency_token(request_type, transport: str = "grpc"): def test_generate_consistency_token_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -6354,7 +6376,7 @@ def test_generate_consistency_token_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -6397,7 +6419,7 @@ async def test_generate_consistency_token_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -6437,7 +6459,7 @@ async def test_generate_consistency_token_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -6475,7 +6497,7 @@ async def test_generate_consistency_token_async_from_dict(): def test_generate_consistency_token_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6507,7 +6529,7 @@ def test_generate_consistency_token_field_headers(): @pytest.mark.asyncio async def test_generate_consistency_token_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6540,7 +6562,7 @@ async def test_generate_consistency_token_field_headers_async(): def test_generate_consistency_token_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6566,7 +6588,7 @@ def test_generate_consistency_token_flattened(): def test_generate_consistency_token_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6581,7 +6603,7 @@ def test_generate_consistency_token_flattened_error(): @pytest.mark.asyncio async def test_generate_consistency_token_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6612,7 +6634,7 @@ async def test_generate_consistency_token_flattened_async(): @pytest.mark.asyncio async def test_generate_consistency_token_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6633,7 +6655,7 @@ async def test_generate_consistency_token_flattened_error_async(): ], ) def test_check_consistency(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -6666,7 +6688,7 @@ def test_check_consistency(request_type, transport: str = "grpc"): def test_check_consistency_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -6699,7 +6721,7 @@ def test_check_consistency_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -6739,7 +6761,7 @@ async def test_check_consistency_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -6779,7 +6801,7 @@ async def test_check_consistency_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CheckConsistencyRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -6817,7 +6839,7 @@ async def test_check_consistency_async_from_dict(): def test_check_consistency_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6849,7 +6871,7 @@ def test_check_consistency_field_headers(): @pytest.mark.asyncio async def test_check_consistency_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6882,7 +6904,7 @@ async def test_check_consistency_field_headers_async(): def test_check_consistency_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6912,7 +6934,7 @@ def test_check_consistency_flattened(): def test_check_consistency_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -6928,7 +6950,7 @@ def test_check_consistency_flattened_error(): @pytest.mark.asyncio async def test_check_consistency_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6963,7 +6985,7 @@ async def test_check_consistency_flattened_async(): @pytest.mark.asyncio async def test_check_consistency_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -6985,7 +7007,7 @@ async def test_check_consistency_flattened_error_async(): ], ) def test_snapshot_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -7013,7 +7035,7 @@ def test_snapshot_table(request_type, transport: str = "grpc"): def test_snapshot_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -7048,7 +7070,7 @@ def test_snapshot_table_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -7091,7 +7113,7 @@ async def test_snapshot_table_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -7136,7 +7158,7 @@ async def test_snapshot_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.SnapshotTableRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -7169,7 +7191,7 @@ async def test_snapshot_table_async_from_dict(): def test_snapshot_table_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7199,7 +7221,7 @@ def test_snapshot_table_field_headers(): @pytest.mark.asyncio async def test_snapshot_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7230,7 +7252,7 @@ async def test_snapshot_table_field_headers_async(): def test_snapshot_table_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7266,7 +7288,7 @@ def test_snapshot_table_flattened(): def test_snapshot_table_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7284,7 +7306,7 @@ def test_snapshot_table_flattened_error(): @pytest.mark.asyncio async def test_snapshot_table_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7325,7 +7347,7 @@ async def test_snapshot_table_flattened_async(): @pytest.mark.asyncio async def test_snapshot_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7349,7 +7371,7 @@ async def test_snapshot_table_flattened_error_async(): ], ) def test_get_snapshot(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -7386,7 +7408,7 @@ def test_get_snapshot(request_type, transport: str = "grpc"): def test_get_snapshot_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -7415,7 +7437,7 @@ def test_get_snapshot_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -7453,7 +7475,7 @@ async def test_get_snapshot_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -7493,7 +7515,7 @@ async def test_get_snapshot_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetSnapshotRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -7535,7 +7557,7 @@ async def test_get_snapshot_async_from_dict(): def test_get_snapshot_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7565,7 +7587,7 @@ def test_get_snapshot_field_headers(): @pytest.mark.asyncio async def test_get_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7594,7 +7616,7 @@ async def test_get_snapshot_field_headers_async(): def test_get_snapshot_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7618,7 +7640,7 @@ def test_get_snapshot_flattened(): def test_get_snapshot_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7633,7 +7655,7 @@ def test_get_snapshot_flattened_error(): @pytest.mark.asyncio async def test_get_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7660,7 +7682,7 @@ async def test_get_snapshot_flattened_async(): @pytest.mark.asyncio async def test_get_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7681,7 +7703,7 @@ async def test_get_snapshot_flattened_error_async(): ], ) def test_list_snapshots(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -7712,7 +7734,7 @@ def test_list_snapshots(request_type, transport: str = "grpc"): def test_list_snapshots_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -7743,7 +7765,7 @@ def test_list_snapshots_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -7781,7 +7803,7 @@ async def test_list_snapshots_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -7821,7 +7843,7 @@ async def test_list_snapshots_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListSnapshotsRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -7857,7 +7879,7 @@ async def test_list_snapshots_async_from_dict(): def test_list_snapshots_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7887,7 +7909,7 @@ def test_list_snapshots_field_headers(): @pytest.mark.asyncio async def test_list_snapshots_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7918,7 +7940,7 @@ async def test_list_snapshots_field_headers_async(): def test_list_snapshots_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7942,7 +7964,7 @@ def test_list_snapshots_flattened(): def test_list_snapshots_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -7957,7 +7979,7 @@ def test_list_snapshots_flattened_error(): @pytest.mark.asyncio async def test_list_snapshots_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -7986,7 +8008,7 @@ async def test_list_snapshots_flattened_async(): @pytest.mark.asyncio async def test_list_snapshots_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8000,7 +8022,7 @@ async def test_list_snapshots_flattened_error_async(): def test_list_snapshots_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -8054,7 +8076,7 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): def test_list_snapshots_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -8096,7 +8118,7 @@ def test_list_snapshots_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_snapshots_async_pager(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8146,7 +8168,7 @@ async def test_list_snapshots_async_pager(): @pytest.mark.asyncio async def test_list_snapshots_async_pages(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8201,7 +8223,7 @@ async def test_list_snapshots_async_pages(): ], ) def test_delete_snapshot(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -8229,7 +8251,7 @@ def test_delete_snapshot(request_type, transport: str = "grpc"): def test_delete_snapshot_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -8258,7 +8280,7 @@ def test_delete_snapshot_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -8296,7 +8318,7 @@ async def test_delete_snapshot_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -8336,7 +8358,7 @@ async def test_delete_snapshot_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.DeleteSnapshotRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -8367,7 +8389,7 @@ async def test_delete_snapshot_async_from_dict(): def test_delete_snapshot_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -8397,7 +8419,7 @@ def test_delete_snapshot_field_headers(): @pytest.mark.asyncio async def test_delete_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8426,7 +8448,7 @@ async def test_delete_snapshot_field_headers_async(): def test_delete_snapshot_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -8450,7 +8472,7 @@ def test_delete_snapshot_flattened(): def test_delete_snapshot_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -8465,7 +8487,7 @@ def test_delete_snapshot_flattened_error(): @pytest.mark.asyncio async def test_delete_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8492,7 +8514,7 @@ async def test_delete_snapshot_flattened_async(): @pytest.mark.asyncio async def test_delete_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8513,7 +8535,7 @@ async def test_delete_snapshot_flattened_error_async(): ], ) def test_create_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -8541,7 +8563,7 @@ def test_create_backup(request_type, transport: str = "grpc"): def test_create_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -8572,7 +8594,7 @@ def test_create_backup_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -8615,7 +8637,7 @@ async def test_create_backup_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -8660,7 +8682,7 @@ async def test_create_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CreateBackupRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -8693,7 +8715,7 @@ async def test_create_backup_async_from_dict(): def test_create_backup_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -8723,7 +8745,7 @@ def test_create_backup_field_headers(): @pytest.mark.asyncio async def test_create_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8754,7 +8776,7 @@ async def test_create_backup_field_headers_async(): def test_create_backup_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -8786,7 +8808,7 @@ def test_create_backup_flattened(): def test_create_backup_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -8803,7 +8825,7 @@ def test_create_backup_flattened_error(): @pytest.mark.asyncio async def test_create_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8840,7 +8862,7 @@ async def test_create_backup_flattened_async(): @pytest.mark.asyncio async def test_create_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -8863,7 +8885,7 @@ async def test_create_backup_flattened_error_async(): ], ) def test_get_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -8904,7 +8926,7 @@ def test_get_backup(request_type, transport: str = "grpc"): def test_get_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -8933,7 +8955,7 @@ def test_get_backup_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -8969,7 +8991,7 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -9008,7 +9030,7 @@ async def test_get_backup_async_use_cached_wrapped_rpc(transport: str = "grpc_as async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -9054,7 +9076,7 @@ async def test_get_backup_async_from_dict(): def test_get_backup_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9084,7 +9106,7 @@ def test_get_backup_field_headers(): @pytest.mark.asyncio async def test_get_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9113,7 +9135,7 @@ async def test_get_backup_field_headers_async(): def test_get_backup_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9137,7 +9159,7 @@ def test_get_backup_flattened(): def test_get_backup_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9152,7 +9174,7 @@ def test_get_backup_flattened_error(): @pytest.mark.asyncio async def test_get_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9179,7 +9201,7 @@ async def test_get_backup_flattened_async(): @pytest.mark.asyncio async def test_get_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9200,7 +9222,7 @@ async def test_get_backup_flattened_error_async(): ], ) def test_update_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -9241,7 +9263,7 @@ def test_update_backup(request_type, transport: str = "grpc"): def test_update_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -9266,7 +9288,7 @@ def test_update_backup_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -9304,7 +9326,7 @@ async def test_update_backup_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -9344,7 +9366,7 @@ async def test_update_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.UpdateBackupRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -9390,7 +9412,7 @@ async def test_update_backup_async_from_dict(): def test_update_backup_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9420,7 +9442,7 @@ def test_update_backup_field_headers(): @pytest.mark.asyncio async def test_update_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9449,7 +9471,7 @@ async def test_update_backup_field_headers_async(): def test_update_backup_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9477,7 +9499,7 @@ def test_update_backup_flattened(): def test_update_backup_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9493,7 +9515,7 @@ def test_update_backup_flattened_error(): @pytest.mark.asyncio async def test_update_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9524,7 +9546,7 @@ async def test_update_backup_flattened_async(): @pytest.mark.asyncio async def test_update_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9546,7 +9568,7 @@ async def test_update_backup_flattened_error_async(): ], ) def test_delete_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -9574,7 +9596,7 @@ def test_delete_backup(request_type, transport: str = "grpc"): def test_delete_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -9603,7 +9625,7 @@ def test_delete_backup_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -9641,7 +9663,7 @@ async def test_delete_backup_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -9681,7 +9703,7 @@ async def test_delete_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.DeleteBackupRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -9712,7 +9734,7 @@ async def test_delete_backup_async_from_dict(): def test_delete_backup_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9742,7 +9764,7 @@ def test_delete_backup_field_headers(): @pytest.mark.asyncio async def test_delete_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9771,7 +9793,7 @@ async def test_delete_backup_field_headers_async(): def test_delete_backup_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9795,7 +9817,7 @@ def test_delete_backup_flattened(): def test_delete_backup_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9810,7 +9832,7 @@ def test_delete_backup_flattened_error(): @pytest.mark.asyncio async def test_delete_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9837,7 +9859,7 @@ async def test_delete_backup_flattened_async(): @pytest.mark.asyncio async def test_delete_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9858,7 +9880,7 @@ async def test_delete_backup_flattened_error_async(): ], ) def test_list_backups(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -9889,7 +9911,7 @@ def test_list_backups(request_type, transport: str = "grpc"): def test_list_backups_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -9924,7 +9946,7 @@ def test_list_backups_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -9962,7 +9984,7 @@ async def test_list_backups_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -10002,7 +10024,7 @@ async def test_list_backups_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListBackupsRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -10038,7 +10060,7 @@ async def test_list_backups_async_from_dict(): def test_list_backups_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -10068,7 +10090,7 @@ def test_list_backups_field_headers(): @pytest.mark.asyncio async def test_list_backups_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10099,7 +10121,7 @@ async def test_list_backups_field_headers_async(): def test_list_backups_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -10123,7 +10145,7 @@ def test_list_backups_flattened(): def test_list_backups_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -10138,7 +10160,7 @@ def test_list_backups_flattened_error(): @pytest.mark.asyncio async def test_list_backups_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10167,7 +10189,7 @@ async def test_list_backups_flattened_async(): @pytest.mark.asyncio async def test_list_backups_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10181,7 +10203,7 @@ async def test_list_backups_flattened_error_async(): def test_list_backups_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -10235,7 +10257,7 @@ def test_list_backups_pager(transport_name: str = "grpc"): def test_list_backups_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -10277,7 +10299,7 @@ def test_list_backups_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_backups_async_pager(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10327,7 +10349,7 @@ async def test_list_backups_async_pager(): @pytest.mark.asyncio async def test_list_backups_async_pages(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10381,8 +10403,8 @@ async def test_list_backups_async_pages(): dict, ], ) -def test_restore_table(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( +def test__restore_table(request_type, transport: str = "grpc"): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -10395,7 +10417,7 @@ def test_restore_table(request_type, transport: str = "grpc"): with mock.patch.object(type(client.transport.restore_table), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name="operations/spam") - response = client.restore_table(request) + response = client._restore_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -10407,10 +10429,10 @@ def test_restore_table(request_type, transport: str = "grpc"): assert isinstance(response, future.Future) -def test_restore_table_non_empty_request_with_auto_populated_field(): +def test__restore_table_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -10429,7 +10451,7 @@ def test_restore_table_non_empty_request_with_auto_populated_field(): call.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client.restore_table(request=request) + client._restore_table(request=request) call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == bigtable_table_admin.RestoreTableRequest( @@ -10439,11 +10461,11 @@ def test_restore_table_non_empty_request_with_auto_populated_field(): ) -def test_restore_table_use_cached_wrapped_rpc(): +def test__restore_table_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -10462,7 +10484,7 @@ def test_restore_table_use_cached_wrapped_rpc(): ) client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc request = {} - client.restore_table(request) + client._restore_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -10472,7 +10494,7 @@ def test_restore_table_use_cached_wrapped_rpc(): # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.restore_table(request) + client._restore_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -10480,13 +10502,13 @@ def test_restore_table_use_cached_wrapped_rpc(): @pytest.mark.asyncio -async def test_restore_table_async_use_cached_wrapped_rpc( +async def test__restore_table_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", ): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -10509,7 +10531,7 @@ async def test_restore_table_async_use_cached_wrapped_rpc( ] = mock_rpc request = {} - await client.restore_table(request) + await client._restore_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -10519,7 +10541,7 @@ async def test_restore_table_async_use_cached_wrapped_rpc( # Subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - await client.restore_table(request) + await client._restore_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -10527,11 +10549,11 @@ async def test_restore_table_async_use_cached_wrapped_rpc( @pytest.mark.asyncio -async def test_restore_table_async( +async def test__restore_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.RestoreTableRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -10546,7 +10568,7 @@ async def test_restore_table_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - response = await client.restore_table(request) + response = await client._restore_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -10559,12 +10581,12 @@ async def test_restore_table_async( @pytest.mark.asyncio -async def test_restore_table_async_from_dict(): - await test_restore_table_async(request_type=dict) +async def test__restore_table_async_from_dict(): + await test__restore_table_async(request_type=dict) -def test_restore_table_field_headers(): - client = BigtableTableAdminClient( +def test__restore_table_field_headers(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -10577,7 +10599,7 @@ def test_restore_table_field_headers(): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.restore_table), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.restore_table(request) + client._restore_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -10593,8 +10615,8 @@ def test_restore_table_field_headers(): @pytest.mark.asyncio -async def test_restore_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( +async def test__restore_table_field_headers_async(): + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10609,7 +10631,7 @@ async def test_restore_table_field_headers_async(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/op") ) - await client.restore_table(request) + await client._restore_table(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -10632,7 +10654,7 @@ async def test_restore_table_field_headers_async(): ], ) def test_copy_backup(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -10660,7 +10682,7 @@ def test_copy_backup(request_type, transport: str = "grpc"): def test_copy_backup_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -10693,7 +10715,7 @@ def test_copy_backup_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -10736,7 +10758,7 @@ async def test_copy_backup_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -10780,7 +10802,7 @@ async def test_copy_backup_async_use_cached_wrapped_rpc( async def test_copy_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -10813,7 +10835,7 @@ async def test_copy_backup_async_from_dict(): def test_copy_backup_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -10843,7 +10865,7 @@ def test_copy_backup_field_headers(): @pytest.mark.asyncio async def test_copy_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10874,7 +10896,7 @@ async def test_copy_backup_field_headers_async(): def test_copy_backup_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -10910,7 +10932,7 @@ def test_copy_backup_flattened(): def test_copy_backup_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -10928,7 +10950,7 @@ def test_copy_backup_flattened_error(): @pytest.mark.asyncio async def test_copy_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10969,7 +10991,7 @@ async def test_copy_backup_flattened_async(): @pytest.mark.asyncio async def test_copy_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -10993,7 +11015,7 @@ async def test_copy_backup_flattened_error_async(): ], ) def test_get_iam_policy(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -11026,7 +11048,7 @@ def test_get_iam_policy(request_type, transport: str = "grpc"): def test_get_iam_policy_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -11055,7 +11077,7 @@ def test_get_iam_policy_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -11093,7 +11115,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -11132,7 +11154,7 @@ async def test_get_iam_policy_async_use_cached_wrapped_rpc( async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -11170,7 +11192,7 @@ async def test_get_iam_policy_async_from_dict(): def test_get_iam_policy_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11200,7 +11222,7 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -11229,7 +11251,7 @@ async def test_get_iam_policy_field_headers_async(): def test_get_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -11246,7 +11268,7 @@ def test_get_iam_policy_from_dict_foreign(): def test_get_iam_policy_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11270,7 +11292,7 @@ def test_get_iam_policy_flattened(): def test_get_iam_policy_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11285,7 +11307,7 @@ def test_get_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -11312,7 +11334,7 @@ async def test_get_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -11333,7 +11355,7 @@ async def test_get_iam_policy_flattened_error_async(): ], ) def test_set_iam_policy(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -11366,7 +11388,7 @@ def test_set_iam_policy(request_type, transport: str = "grpc"): def test_set_iam_policy_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -11395,7 +11417,7 @@ def test_set_iam_policy_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -11433,7 +11455,7 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -11472,7 +11494,7 @@ async def test_set_iam_policy_async_use_cached_wrapped_rpc( async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -11510,7 +11532,7 @@ async def test_set_iam_policy_async_from_dict(): def test_set_iam_policy_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11540,7 +11562,7 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -11569,7 +11591,7 @@ async def test_set_iam_policy_field_headers_async(): def test_set_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -11587,7 +11609,7 @@ def test_set_iam_policy_from_dict_foreign(): def test_set_iam_policy_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11611,7 +11633,7 @@ def test_set_iam_policy_flattened(): def test_set_iam_policy_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11626,7 +11648,7 @@ def test_set_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -11653,7 +11675,7 @@ async def test_set_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -11674,7 +11696,7 @@ async def test_set_iam_policy_flattened_error_async(): ], ) def test_test_iam_permissions(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -11707,7 +11729,7 @@ def test_test_iam_permissions(request_type, transport: str = "grpc"): def test_test_iam_permissions_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -11738,7 +11760,7 @@ def test_test_iam_permissions_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -11780,7 +11802,7 @@ async def test_test_iam_permissions_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -11820,7 +11842,7 @@ async def test_test_iam_permissions_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.TestIamPermissionsRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -11858,7 +11880,7 @@ async def test_test_iam_permissions_async_from_dict(): def test_test_iam_permissions_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11890,7 +11912,7 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -11923,7 +11945,7 @@ async def test_test_iam_permissions_field_headers_async(): def test_test_iam_permissions_from_dict_foreign(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -11942,7 +11964,7 @@ def test_test_iam_permissions_from_dict_foreign(): def test_test_iam_permissions_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11972,7 +11994,7 @@ def test_test_iam_permissions_flattened(): def test_test_iam_permissions_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -11988,7 +12010,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12023,7 +12045,7 @@ async def test_test_iam_permissions_flattened_async(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12045,7 +12067,7 @@ async def test_test_iam_permissions_flattened_error_async(): ], ) def test_create_schema_bundle(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -12075,7 +12097,7 @@ def test_create_schema_bundle(request_type, transport: str = "grpc"): def test_create_schema_bundle_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -12108,7 +12130,7 @@ def test_create_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -12155,7 +12177,7 @@ async def test_create_schema_bundle_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -12200,7 +12222,7 @@ async def test_create_schema_bundle_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CreateSchemaBundleRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -12235,7 +12257,7 @@ async def test_create_schema_bundle_async_from_dict(): def test_create_schema_bundle_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -12267,7 +12289,7 @@ def test_create_schema_bundle_field_headers(): @pytest.mark.asyncio async def test_create_schema_bundle_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12300,7 +12322,7 @@ async def test_create_schema_bundle_field_headers_async(): def test_create_schema_bundle_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -12334,7 +12356,7 @@ def test_create_schema_bundle_flattened(): def test_create_schema_bundle_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -12351,7 +12373,7 @@ def test_create_schema_bundle_flattened_error(): @pytest.mark.asyncio async def test_create_schema_bundle_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12390,7 +12412,7 @@ async def test_create_schema_bundle_flattened_async(): @pytest.mark.asyncio async def test_create_schema_bundle_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12413,7 +12435,7 @@ async def test_create_schema_bundle_flattened_error_async(): ], ) def test_update_schema_bundle(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -12443,7 +12465,7 @@ def test_update_schema_bundle(request_type, transport: str = "grpc"): def test_update_schema_bundle_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -12470,7 +12492,7 @@ def test_update_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -12517,7 +12539,7 @@ async def test_update_schema_bundle_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -12562,7 +12584,7 @@ async def test_update_schema_bundle_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.UpdateSchemaBundleRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -12597,7 +12619,7 @@ async def test_update_schema_bundle_async_from_dict(): def test_update_schema_bundle_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -12629,7 +12651,7 @@ def test_update_schema_bundle_field_headers(): @pytest.mark.asyncio async def test_update_schema_bundle_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12662,7 +12684,7 @@ async def test_update_schema_bundle_field_headers_async(): def test_update_schema_bundle_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -12692,7 +12714,7 @@ def test_update_schema_bundle_flattened(): def test_update_schema_bundle_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -12708,7 +12730,7 @@ def test_update_schema_bundle_flattened_error(): @pytest.mark.asyncio async def test_update_schema_bundle_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12743,7 +12765,7 @@ async def test_update_schema_bundle_flattened_async(): @pytest.mark.asyncio async def test_update_schema_bundle_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -12765,7 +12787,7 @@ async def test_update_schema_bundle_flattened_error_async(): ], ) def test_get_schema_bundle(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -12800,7 +12822,7 @@ def test_get_schema_bundle(request_type, transport: str = "grpc"): def test_get_schema_bundle_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -12831,7 +12853,7 @@ def test_get_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -12871,7 +12893,7 @@ async def test_get_schema_bundle_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -12911,7 +12933,7 @@ async def test_get_schema_bundle_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetSchemaBundleRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -12951,7 +12973,7 @@ async def test_get_schema_bundle_async_from_dict(): def test_get_schema_bundle_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -12983,7 +13005,7 @@ def test_get_schema_bundle_field_headers(): @pytest.mark.asyncio async def test_get_schema_bundle_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13014,7 +13036,7 @@ async def test_get_schema_bundle_field_headers_async(): def test_get_schema_bundle_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13040,7 +13062,7 @@ def test_get_schema_bundle_flattened(): def test_get_schema_bundle_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13055,7 +13077,7 @@ def test_get_schema_bundle_flattened_error(): @pytest.mark.asyncio async def test_get_schema_bundle_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13084,7 +13106,7 @@ async def test_get_schema_bundle_flattened_async(): @pytest.mark.asyncio async def test_get_schema_bundle_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13105,7 +13127,7 @@ async def test_get_schema_bundle_flattened_error_async(): ], ) def test_list_schema_bundles(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -13138,7 +13160,7 @@ def test_list_schema_bundles(request_type, transport: str = "grpc"): def test_list_schema_bundles_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -13171,7 +13193,7 @@ def test_list_schema_bundles_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -13213,7 +13235,7 @@ async def test_list_schema_bundles_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -13253,7 +13275,7 @@ async def test_list_schema_bundles_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListSchemaBundlesRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -13291,7 +13313,7 @@ async def test_list_schema_bundles_async_from_dict(): def test_list_schema_bundles_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13323,7 +13345,7 @@ def test_list_schema_bundles_field_headers(): @pytest.mark.asyncio async def test_list_schema_bundles_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13356,7 +13378,7 @@ async def test_list_schema_bundles_field_headers_async(): def test_list_schema_bundles_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13382,7 +13404,7 @@ def test_list_schema_bundles_flattened(): def test_list_schema_bundles_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13397,7 +13419,7 @@ def test_list_schema_bundles_flattened_error(): @pytest.mark.asyncio async def test_list_schema_bundles_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13428,7 +13450,7 @@ async def test_list_schema_bundles_flattened_async(): @pytest.mark.asyncio async def test_list_schema_bundles_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13442,7 +13464,7 @@ async def test_list_schema_bundles_flattened_error_async(): def test_list_schema_bundles_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -13498,7 +13520,7 @@ def test_list_schema_bundles_pager(transport_name: str = "grpc"): def test_list_schema_bundles_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport_name, ) @@ -13542,7 +13564,7 @@ def test_list_schema_bundles_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_schema_bundles_async_pager(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13594,7 +13616,7 @@ async def test_list_schema_bundles_async_pager(): @pytest.mark.asyncio async def test_list_schema_bundles_async_pages(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13651,7 +13673,7 @@ async def test_list_schema_bundles_async_pages(): ], ) def test_delete_schema_bundle(request_type, transport: str = "grpc"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -13681,7 +13703,7 @@ def test_delete_schema_bundle(request_type, transport: str = "grpc"): def test_delete_schema_bundle_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -13714,7 +13736,7 @@ def test_delete_schema_bundle_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -13756,7 +13778,7 @@ async def test_delete_schema_bundle_async_use_cached_wrapped_rpc( # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -13796,7 +13818,7 @@ async def test_delete_schema_bundle_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.DeleteSchemaBundleRequest, ): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport=transport, ) @@ -13829,7 +13851,7 @@ async def test_delete_schema_bundle_async_from_dict(): def test_delete_schema_bundle_field_headers(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13861,7 +13883,7 @@ def test_delete_schema_bundle_field_headers(): @pytest.mark.asyncio async def test_delete_schema_bundle_field_headers_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13892,7 +13914,7 @@ async def test_delete_schema_bundle_field_headers_async(): def test_delete_schema_bundle_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13918,7 +13940,7 @@ def test_delete_schema_bundle_flattened(): def test_delete_schema_bundle_flattened_error(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -13933,7 +13955,7 @@ def test_delete_schema_bundle_flattened_error(): @pytest.mark.asyncio async def test_delete_schema_bundle_flattened_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13962,7 +13984,7 @@ async def test_delete_schema_bundle_flattened_async(): @pytest.mark.asyncio async def test_delete_schema_bundle_flattened_error_async(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -13979,7 +14001,7 @@ def test_create_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14048,7 +14070,7 @@ def test_create_table_rest_required_fields( assert "tableId" in jsonified_request assert jsonified_request["tableId"] == "table_id_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14110,7 +14132,7 @@ def test_create_table_rest_unset_required_fields(): def test_create_table_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14154,7 +14176,7 @@ def test_create_table_rest_flattened(): def test_create_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -14174,7 +14196,7 @@ def test_create_table_from_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14256,7 +14278,7 @@ def test_create_table_from_snapshot_rest_required_fields( assert "sourceSnapshot" in jsonified_request assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14315,7 +14337,7 @@ def test_create_table_from_snapshot_rest_unset_required_fields(): def test_create_table_from_snapshot_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14358,7 +14380,7 @@ def test_create_table_from_snapshot_rest_flattened(): def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -14378,7 +14400,7 @@ def test_list_tables_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14451,7 +14473,7 @@ def test_list_tables_rest_required_fields( assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14512,7 +14534,7 @@ def test_list_tables_rest_unset_required_fields(): def test_list_tables_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14554,7 +14576,7 @@ def test_list_tables_rest_flattened(): def test_list_tables_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -14569,7 +14591,7 @@ def test_list_tables_rest_flattened_error(transport: str = "rest"): def test_list_tables_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -14635,7 +14657,7 @@ def test_get_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14702,7 +14724,7 @@ def test_get_table_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14754,7 +14776,7 @@ def test_get_table_rest_unset_required_fields(): def test_get_table_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14796,7 +14818,7 @@ def test_get_table_rest_flattened(): def test_get_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -14814,7 +14836,7 @@ def test_update_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14885,7 +14907,7 @@ def test_update_table_rest_required_fields( # verify required fields with non-default values are left alone - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14948,7 +14970,7 @@ def test_update_table_rest_unset_required_fields(): def test_update_table_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -14992,7 +15014,7 @@ def test_update_table_rest_flattened(): def test_update_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -15011,7 +15033,7 @@ def test_delete_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15076,7 +15098,7 @@ def test_delete_table_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15125,7 +15147,7 @@ def test_delete_table_rest_unset_required_fields(): def test_delete_table_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15165,7 +15187,7 @@ def test_delete_table_rest_flattened(): def test_delete_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -15183,7 +15205,7 @@ def test_undelete_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15252,7 +15274,7 @@ def test_undelete_table_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15302,7 +15324,7 @@ def test_undelete_table_rest_unset_required_fields(): def test_undelete_table_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15343,7 +15365,7 @@ def test_undelete_table_rest_flattened(): def test_undelete_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -15361,7 +15383,7 @@ def test_create_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15444,7 +15466,7 @@ def test_create_authorized_view_rest_required_fields( assert "authorizedViewId" in jsonified_request assert jsonified_request["authorizedViewId"] == "authorized_view_id_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15509,7 +15531,7 @@ def test_create_authorized_view_rest_unset_required_fields(): def test_create_authorized_view_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15552,7 +15574,7 @@ def test_create_authorized_view_rest_flattened(): def test_create_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -15572,7 +15594,7 @@ def test_list_authorized_views_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15650,7 +15672,7 @@ def test_list_authorized_views_rest_required_fields( assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15713,7 +15735,7 @@ def test_list_authorized_views_rest_unset_required_fields(): def test_list_authorized_views_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15756,7 +15778,7 @@ def test_list_authorized_views_rest_flattened(): def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -15771,7 +15793,7 @@ def test_list_authorized_views_rest_flattened_error(transport: str = "rest"): def test_list_authorized_views_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -15838,7 +15860,7 @@ def test_get_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15909,7 +15931,7 @@ def test_get_authorized_view_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -15961,7 +15983,7 @@ def test_get_authorized_view_rest_unset_required_fields(): def test_get_authorized_view_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16006,7 +16028,7 @@ def test_get_authorized_view_rest_flattened(): def test_get_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -16024,7 +16046,7 @@ def test_update_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16100,7 +16122,7 @@ def test_update_authorized_view_rest_required_fields( # verify required fields with non-default values are left alone - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16158,7 +16180,7 @@ def test_update_authorized_view_rest_unset_required_fields(): def test_update_authorized_view_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16204,7 +16226,7 @@ def test_update_authorized_view_rest_flattened(): def test_update_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -16223,7 +16245,7 @@ def test_delete_authorized_view_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16295,7 +16317,7 @@ def test_delete_authorized_view_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16344,7 +16366,7 @@ def test_delete_authorized_view_rest_unset_required_fields(): def test_delete_authorized_view_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16387,7 +16409,7 @@ def test_delete_authorized_view_rest_flattened(): def test_delete_authorized_view_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -16405,7 +16427,7 @@ def test_modify_column_families_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16475,7 +16497,7 @@ def test_modify_column_families_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16536,7 +16558,7 @@ def test_modify_column_families_rest_unset_required_fields(): def test_modify_column_families_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16584,7 +16606,7 @@ def test_modify_column_families_rest_flattened(): def test_modify_column_families_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -16607,7 +16629,7 @@ def test_drop_row_range_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16672,7 +16694,7 @@ def test_drop_row_range_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16725,7 +16747,7 @@ def test_generate_consistency_token_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16795,7 +16817,7 @@ def test_generate_consistency_token_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16850,7 +16872,7 @@ def test_generate_consistency_token_rest_unset_required_fields(): def test_generate_consistency_token_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16895,7 +16917,7 @@ def test_generate_consistency_token_rest_flattened(): def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -16913,7 +16935,7 @@ def test_check_consistency_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -16984,7 +17006,7 @@ def test_check_consistency_rest_required_fields( assert "consistencyToken" in jsonified_request assert jsonified_request["consistencyToken"] == "consistency_token_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17047,7 +17069,7 @@ def test_check_consistency_rest_unset_required_fields(): def test_check_consistency_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17091,7 +17113,7 @@ def test_check_consistency_rest_flattened(): def test_check_consistency_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -17110,7 +17132,7 @@ def test_snapshot_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17187,7 +17209,7 @@ def test_snapshot_table_rest_required_fields( assert "snapshotId" in jsonified_request assert jsonified_request["snapshotId"] == "snapshot_id_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17246,7 +17268,7 @@ def test_snapshot_table_rest_unset_required_fields(): def test_snapshot_table_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17290,7 +17312,7 @@ def test_snapshot_table_rest_flattened(): def test_snapshot_table_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -17311,7 +17333,7 @@ def test_get_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17376,7 +17398,7 @@ def test_get_snapshot_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17428,7 +17450,7 @@ def test_get_snapshot_rest_unset_required_fields(): def test_get_snapshot_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17473,7 +17495,7 @@ def test_get_snapshot_rest_flattened(): def test_get_snapshot_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -17491,7 +17513,7 @@ def test_list_snapshots_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17563,7 +17585,7 @@ def test_list_snapshots_rest_required_fields( assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17623,7 +17645,7 @@ def test_list_snapshots_rest_unset_required_fields(): def test_list_snapshots_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17668,7 +17690,7 @@ def test_list_snapshots_rest_flattened(): def test_list_snapshots_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -17683,7 +17705,7 @@ def test_list_snapshots_rest_flattened_error(transport: str = "rest"): def test_list_snapshots_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -17751,7 +17773,7 @@ def test_delete_snapshot_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17816,7 +17838,7 @@ def test_delete_snapshot_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17865,7 +17887,7 @@ def test_delete_snapshot_rest_unset_required_fields(): def test_delete_snapshot_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -17908,7 +17930,7 @@ def test_delete_snapshot_rest_flattened(): def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -17926,7 +17948,7 @@ def test_create_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18004,7 +18026,7 @@ def test_create_backup_rest_required_fields( assert "backupId" in jsonified_request assert jsonified_request["backupId"] == "backup_id_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18069,7 +18091,7 @@ def test_create_backup_rest_unset_required_fields(): def test_create_backup_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18114,7 +18136,7 @@ def test_create_backup_rest_flattened(): def test_create_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -18134,7 +18156,7 @@ def test_get_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18199,7 +18221,7 @@ def test_get_backup_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18251,7 +18273,7 @@ def test_get_backup_rest_unset_required_fields(): def test_get_backup_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18296,7 +18318,7 @@ def test_get_backup_rest_flattened(): def test_get_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -18314,7 +18336,7 @@ def test_update_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18376,7 +18398,7 @@ def test_update_backup_rest_required_fields( # verify required fields with non-default values are left alone - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18437,7 +18459,7 @@ def test_update_backup_rest_unset_required_fields(): def test_update_backup_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18485,7 +18507,7 @@ def test_update_backup_rest_flattened(): def test_update_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -18504,7 +18526,7 @@ def test_delete_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18569,7 +18591,7 @@ def test_delete_backup_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18618,7 +18640,7 @@ def test_delete_backup_rest_unset_required_fields(): def test_delete_backup_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18661,7 +18683,7 @@ def test_delete_backup_rest_flattened(): def test_delete_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -18679,7 +18701,7 @@ def test_list_backups_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18753,7 +18775,7 @@ def test_list_backups_rest_required_fields( assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18815,7 +18837,7 @@ def test_list_backups_rest_unset_required_fields(): def test_list_backups_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18860,7 +18882,7 @@ def test_list_backups_rest_flattened(): def test_list_backups_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -18875,7 +18897,7 @@ def test_list_backups_rest_flattened_error(transport: str = "rest"): def test_list_backups_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -18939,11 +18961,11 @@ def test_list_backups_rest_pager(transport: str = "rest"): assert page_.raw_page.next_page_token == token -def test_restore_table_rest_use_cached_wrapped_rpc(): +def test__restore_table_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -18963,7 +18985,7 @@ def test_restore_table_rest_use_cached_wrapped_rpc(): client._transport._wrapped_methods[client._transport.restore_table] = mock_rpc request = {} - client.restore_table(request) + client._restore_table(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 @@ -18972,14 +18994,14 @@ def test_restore_table_rest_use_cached_wrapped_rpc(): # subsequent calls should use the cached wrapper wrapper_fn.reset_mock() - client.restore_table(request) + client._restore_table(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 assert mock_rpc.call_count == 2 -def test_restore_table_rest_required_fields( +def test__restore_table_rest_required_fields( request_type=bigtable_table_admin.RestoreTableRequest, ): transport_class = transports.BigtableTableAdminRestTransport @@ -19016,7 +19038,7 @@ def test_restore_table_rest_required_fields( assert "tableId" in jsonified_request assert jsonified_request["tableId"] == "table_id_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19049,14 +19071,14 @@ def test_restore_table_rest_required_fields( req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.restore_table(request) + response = client._restore_table(request) expected_params = [("$alt", "json;enum-encoding=int")] actual_params = req.call_args.kwargs["params"] assert expected_params == actual_params -def test_restore_table_rest_unset_required_fields(): +def test__restore_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials ) @@ -19077,7 +19099,7 @@ def test_copy_backup_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19154,7 +19176,7 @@ def test_copy_backup_rest_required_fields( assert "sourceBackup" in jsonified_request assert jsonified_request["sourceBackup"] == "source_backup_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19214,7 +19236,7 @@ def test_copy_backup_rest_unset_required_fields(): def test_copy_backup_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19260,7 +19282,7 @@ def test_copy_backup_rest_flattened(): def test_copy_backup_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -19281,7 +19303,7 @@ def test_get_iam_policy_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19346,7 +19368,7 @@ def test_get_iam_policy_rest_required_fields( assert "resource" in jsonified_request assert jsonified_request["resource"] == "resource_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19397,7 +19419,7 @@ def test_get_iam_policy_rest_unset_required_fields(): def test_get_iam_policy_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19440,7 +19462,7 @@ def test_get_iam_policy_rest_flattened(): def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -19458,7 +19480,7 @@ def test_set_iam_policy_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19523,7 +19545,7 @@ def test_set_iam_policy_rest_required_fields( assert "resource" in jsonified_request assert jsonified_request["resource"] == "resource_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19582,7 +19604,7 @@ def test_set_iam_policy_rest_unset_required_fields(): def test_set_iam_policy_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19625,7 +19647,7 @@ def test_set_iam_policy_rest_flattened(): def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -19643,7 +19665,7 @@ def test_test_iam_permissions_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19716,7 +19738,7 @@ def test_test_iam_permissions_rest_required_fields( assert "permissions" in jsonified_request assert jsonified_request["permissions"] == "permissions_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19775,7 +19797,7 @@ def test_test_iam_permissions_rest_unset_required_fields(): def test_test_iam_permissions_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19819,7 +19841,7 @@ def test_test_iam_permissions_rest_flattened(): def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -19838,7 +19860,7 @@ def test_create_schema_bundle_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19920,7 +19942,7 @@ def test_create_schema_bundle_rest_required_fields( assert "schemaBundleId" in jsonified_request assert jsonified_request["schemaBundleId"] == "schema_bundle_id_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -19985,7 +20007,7 @@ def test_create_schema_bundle_rest_unset_required_fields(): def test_create_schema_bundle_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20028,7 +20050,7 @@ def test_create_schema_bundle_rest_flattened(): def test_create_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -20048,7 +20070,7 @@ def test_update_schema_bundle_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20123,7 +20145,7 @@ def test_update_schema_bundle_rest_required_fields( # verify required fields with non-default values are left alone - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20181,7 +20203,7 @@ def test_update_schema_bundle_rest_unset_required_fields(): def test_update_schema_bundle_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20227,7 +20249,7 @@ def test_update_schema_bundle_rest_flattened(): def test_update_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -20246,7 +20268,7 @@ def test_get_schema_bundle_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20313,7 +20335,7 @@ def test_get_schema_bundle_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20365,7 +20387,7 @@ def test_get_schema_bundle_rest_unset_required_fields(): def test_get_schema_bundle_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20410,7 +20432,7 @@ def test_get_schema_bundle_rest_flattened(): def test_get_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -20428,7 +20450,7 @@ def test_list_schema_bundles_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20504,7 +20526,7 @@ def test_list_schema_bundles_rest_required_fields( assert "parent" in jsonified_request assert jsonified_request["parent"] == "parent_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20566,7 +20588,7 @@ def test_list_schema_bundles_rest_unset_required_fields(): def test_list_schema_bundles_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20609,7 +20631,7 @@ def test_list_schema_bundles_rest_flattened(): def test_list_schema_bundles_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -20624,7 +20646,7 @@ def test_list_schema_bundles_rest_flattened_error(transport: str = "rest"): def test_list_schema_bundles_rest_pager(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -20690,7 +20712,7 @@ def test_delete_schema_bundle_rest_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20761,7 +20783,7 @@ def test_delete_schema_bundle_rest_required_fields( assert "name" in jsonified_request assert jsonified_request["name"] == "name_value" - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20810,7 +20832,7 @@ def test_delete_schema_bundle_rest_unset_required_fields(): def test_delete_schema_bundle_rest_flattened(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -20853,7 +20875,7 @@ def test_delete_schema_bundle_rest_flattened(): def test_delete_schema_bundle_rest_flattened_error(transport: str = "rest"): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -20873,7 +20895,7 @@ def test_credentials_transport_error(): credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) @@ -20883,7 +20905,7 @@ def test_credentials_transport_error(): credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( client_options={"credentials_file": "credentials.json"}, transport=transport, ) @@ -20895,7 +20917,7 @@ def test_credentials_transport_error(): options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( client_options=options, transport=transport, ) @@ -20904,7 +20926,7 @@ def test_credentials_transport_error(): options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( client_options=options, credentials=ga_credentials.AnonymousCredentials() ) @@ -20913,7 +20935,7 @@ def test_credentials_transport_error(): credentials=ga_credentials.AnonymousCredentials(), ) with pytest.raises(ValueError): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( client_options={"scopes": ["1", "2"]}, transport=transport, ) @@ -20924,7 +20946,7 @@ def test_transport_instance(): transport = transports.BigtableTableAdminGrpcTransport( credentials=ga_credentials.AnonymousCredentials(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) assert client.transport is transport @@ -20960,14 +20982,14 @@ def test_transport_adc(transport_class): def test_transport_kind_grpc(): - transport = BigtableTableAdminClient.get_transport_class("grpc")( + transport = BaseBigtableTableAdminClient.get_transport_class("grpc")( credentials=ga_credentials.AnonymousCredentials() ) assert transport.kind == "grpc" def test_initialize_client_w_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc" ) assert client is not None @@ -20976,7 +20998,7 @@ def test_initialize_client_w_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_table_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -20997,7 +21019,7 @@ def test_create_table_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_table_from_snapshot_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21020,7 +21042,7 @@ def test_create_table_from_snapshot_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_tables_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21041,7 +21063,7 @@ def test_list_tables_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_table_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21062,7 +21084,7 @@ def test_get_table_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_table_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21083,7 +21105,7 @@ def test_update_table_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_table_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21104,7 +21126,7 @@ def test_delete_table_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_undelete_table_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21125,7 +21147,7 @@ def test_undelete_table_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_authorized_view_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21148,7 +21170,7 @@ def test_create_authorized_view_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_authorized_views_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21171,7 +21193,7 @@ def test_list_authorized_views_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_authorized_view_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21194,7 +21216,7 @@ def test_get_authorized_view_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_authorized_view_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21217,7 +21239,7 @@ def test_update_authorized_view_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_authorized_view_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21240,7 +21262,7 @@ def test_delete_authorized_view_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_modify_column_families_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21263,7 +21285,7 @@ def test_modify_column_families_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_drop_row_range_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21284,7 +21306,7 @@ def test_drop_row_range_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_generate_consistency_token_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21307,7 +21329,7 @@ def test_generate_consistency_token_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_check_consistency_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21330,7 +21352,7 @@ def test_check_consistency_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_snapshot_table_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21351,7 +21373,7 @@ def test_snapshot_table_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_snapshot_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21372,7 +21394,7 @@ def test_get_snapshot_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_snapshots_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21393,7 +21415,7 @@ def test_list_snapshots_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_snapshot_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21414,7 +21436,7 @@ def test_delete_snapshot_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_backup_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21435,7 +21457,7 @@ def test_create_backup_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_backup_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21456,7 +21478,7 @@ def test_get_backup_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_backup_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21477,7 +21499,7 @@ def test_update_backup_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_backup_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21498,7 +21520,7 @@ def test_delete_backup_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_backups_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21518,8 +21540,8 @@ def test_list_backups_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_restore_table_empty_call_grpc(): - client = BigtableTableAdminClient( +def test__restore_table_empty_call_grpc(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21527,7 +21549,7 @@ def test_restore_table_empty_call_grpc(): # Mock the actual call, and fake the request. with mock.patch.object(type(client.transport.restore_table), "__call__") as call: call.return_value = operations_pb2.Operation(name="operations/op") - client.restore_table(request=None) + client._restore_table(request=None) # Establish that the underlying stub method was called. call.assert_called() @@ -21540,7 +21562,7 @@ def test_restore_table_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_copy_backup_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21561,7 +21583,7 @@ def test_copy_backup_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_iam_policy_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21582,7 +21604,7 @@ def test_get_iam_policy_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_set_iam_policy_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21603,7 +21625,7 @@ def test_set_iam_policy_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_test_iam_permissions_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21626,7 +21648,7 @@ def test_test_iam_permissions_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_schema_bundle_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21649,7 +21671,7 @@ def test_create_schema_bundle_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_schema_bundle_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21672,7 +21694,7 @@ def test_update_schema_bundle_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_schema_bundle_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21695,7 +21717,7 @@ def test_get_schema_bundle_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_schema_bundles_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21718,7 +21740,7 @@ def test_list_schema_bundles_empty_call_grpc(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_schema_bundle_empty_call_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -21739,14 +21761,14 @@ def test_delete_schema_bundle_empty_call_grpc(): def test_transport_kind_grpc_asyncio(): - transport = BigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")( + transport = BaseBigtableTableAdminAsyncClient.get_transport_class("grpc_asyncio")( credentials=async_anonymous_credentials() ) assert transport.kind == "grpc_asyncio" def test_initialize_client_w_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) assert client is not None @@ -21756,7 +21778,7 @@ def test_initialize_client_w_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_create_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21785,7 +21807,7 @@ async def test_create_table_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_create_table_from_snapshot_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21812,7 +21834,7 @@ async def test_create_table_from_snapshot_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_list_tables_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21839,7 +21861,7 @@ async def test_list_tables_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_get_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21868,7 +21890,7 @@ async def test_get_table_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_update_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21893,7 +21915,7 @@ async def test_update_table_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_delete_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21916,7 +21938,7 @@ async def test_delete_table_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_undelete_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21941,7 +21963,7 @@ async def test_undelete_table_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_create_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21968,7 +21990,7 @@ async def test_create_authorized_view_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_list_authorized_views_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -21997,7 +22019,7 @@ async def test_list_authorized_views_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_get_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22028,7 +22050,7 @@ async def test_get_authorized_view_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_update_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22055,7 +22077,7 @@ async def test_update_authorized_view_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_delete_authorized_view_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22080,7 +22102,7 @@ async def test_delete_authorized_view_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_modify_column_families_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22111,7 +22133,7 @@ async def test_modify_column_families_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_drop_row_range_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22134,7 +22156,7 @@ async def test_drop_row_range_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_generate_consistency_token_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22163,7 +22185,7 @@ async def test_generate_consistency_token_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_check_consistency_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22192,7 +22214,7 @@ async def test_check_consistency_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_snapshot_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22217,7 +22239,7 @@ async def test_snapshot_table_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_get_snapshot_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22247,7 +22269,7 @@ async def test_get_snapshot_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_list_snapshots_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22274,7 +22296,7 @@ async def test_list_snapshots_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_delete_snapshot_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22297,7 +22319,7 @@ async def test_delete_snapshot_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_create_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22322,7 +22344,7 @@ async def test_create_backup_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_get_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22354,7 +22376,7 @@ async def test_get_backup_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_update_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22386,7 +22408,7 @@ async def test_update_backup_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_delete_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22409,7 +22431,7 @@ async def test_delete_backup_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_list_backups_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22435,8 +22457,8 @@ async def test_list_backups_empty_call_grpc_asyncio(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio -async def test_restore_table_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( +async def test__restore_table_empty_call_grpc_asyncio(): + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22447,7 +22469,7 @@ async def test_restore_table_empty_call_grpc_asyncio(): call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name="operations/spam") ) - await client.restore_table(request=None) + await client._restore_table(request=None) # Establish that the underlying stub method was called. call.assert_called() @@ -22461,7 +22483,7 @@ async def test_restore_table_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_copy_backup_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22486,7 +22508,7 @@ async def test_copy_backup_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_get_iam_policy_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22514,7 +22536,7 @@ async def test_get_iam_policy_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_set_iam_policy_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22542,7 +22564,7 @@ async def test_set_iam_policy_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_test_iam_permissions_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22571,7 +22593,7 @@ async def test_test_iam_permissions_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_create_schema_bundle_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22598,7 +22620,7 @@ async def test_create_schema_bundle_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_update_schema_bundle_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22625,7 +22647,7 @@ async def test_update_schema_bundle_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_get_schema_bundle_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22655,7 +22677,7 @@ async def test_get_schema_bundle_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_list_schema_bundles_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22684,7 +22706,7 @@ async def test_list_schema_bundles_empty_call_grpc_asyncio(): # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio async def test_delete_schema_bundle_empty_call_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio", ) @@ -22706,7 +22728,7 @@ async def test_delete_schema_bundle_empty_call_grpc_asyncio(): def test_transport_kind_rest(): - transport = BigtableTableAdminClient.get_transport_class("rest")( + transport = BaseBigtableTableAdminClient.get_transport_class("rest")( credentials=ga_credentials.AnonymousCredentials() ) assert transport.kind == "rest" @@ -22715,7 +22737,7 @@ def test_transport_kind_rest(): def test_create_table_rest_bad_request( request_type=bigtable_table_admin.CreateTableRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -22745,7 +22767,7 @@ def test_create_table_rest_bad_request( ], ) def test_create_table_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -22789,7 +22811,7 @@ def test_create_table_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -22846,7 +22868,7 @@ def test_create_table_rest_interceptors(null_interceptor): def test_create_table_from_snapshot_rest_bad_request( request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -22876,7 +22898,7 @@ def test_create_table_from_snapshot_rest_bad_request( ], ) def test_create_table_from_snapshot_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -22910,7 +22932,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -22970,7 +22992,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): def test_list_tables_rest_bad_request( request_type=bigtable_table_admin.ListTablesRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -23000,7 +23022,7 @@ def test_list_tables_rest_bad_request( ], ) def test_list_tables_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -23040,7 +23062,7 @@ def test_list_tables_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -23100,7 +23122,7 @@ def test_list_tables_rest_interceptors(null_interceptor): def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRequest): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -23130,7 +23152,7 @@ def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRe ], ) def test_get_table_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -23174,7 +23196,7 @@ def test_get_table_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -23231,7 +23253,7 @@ def test_get_table_rest_interceptors(null_interceptor): def test_update_table_rest_bad_request( request_type=bigtable_table_admin.UpdateTableRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -23263,7 +23285,7 @@ def test_update_table_rest_bad_request( ], ) def test_update_table_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -23431,7 +23453,7 @@ def test_update_table_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -23490,7 +23512,7 @@ def test_update_table_rest_interceptors(null_interceptor): def test_delete_table_rest_bad_request( request_type=bigtable_table_admin.DeleteTableRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -23520,7 +23542,7 @@ def test_delete_table_rest_bad_request( ], ) def test_delete_table_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -23554,7 +23576,7 @@ def test_delete_table_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -23599,7 +23621,7 @@ def test_delete_table_rest_interceptors(null_interceptor): def test_undelete_table_rest_bad_request( request_type=bigtable_table_admin.UndeleteTableRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -23629,7 +23651,7 @@ def test_undelete_table_rest_bad_request( ], ) def test_undelete_table_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -23663,7 +23685,7 @@ def test_undelete_table_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -23723,7 +23745,7 @@ def test_undelete_table_rest_interceptors(null_interceptor): def test_create_authorized_view_rest_bad_request( request_type=bigtable_table_admin.CreateAuthorizedViewRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -23753,7 +23775,7 @@ def test_create_authorized_view_rest_bad_request( ], ) def test_create_authorized_view_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -23865,7 +23887,7 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -23925,7 +23947,7 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): def test_list_authorized_views_rest_bad_request( request_type=bigtable_table_admin.ListAuthorizedViewsRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -23955,7 +23977,7 @@ def test_list_authorized_views_rest_bad_request( ], ) def test_list_authorized_views_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -23995,7 +24017,7 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -24058,7 +24080,7 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): def test_get_authorized_view_rest_bad_request( request_type=bigtable_table_admin.GetAuthorizedViewRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -24090,7 +24112,7 @@ def test_get_authorized_view_rest_bad_request( ], ) def test_get_authorized_view_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -24136,7 +24158,7 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -24194,7 +24216,7 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): def test_update_authorized_view_rest_bad_request( request_type=bigtable_table_admin.UpdateAuthorizedViewRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -24228,7 +24250,7 @@ def test_update_authorized_view_rest_bad_request( ], ) def test_update_authorized_view_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -24344,7 +24366,7 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -24404,7 +24426,7 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): def test_delete_authorized_view_rest_bad_request( request_type=bigtable_table_admin.DeleteAuthorizedViewRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -24436,7 +24458,7 @@ def test_delete_authorized_view_rest_bad_request( ], ) def test_delete_authorized_view_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -24472,7 +24494,7 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -24517,7 +24539,7 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor): def test_modify_column_families_rest_bad_request( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -24547,7 +24569,7 @@ def test_modify_column_families_rest_bad_request( ], ) def test_modify_column_families_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -24591,7 +24613,7 @@ def test_modify_column_families_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -24649,7 +24671,7 @@ def test_modify_column_families_rest_interceptors(null_interceptor): def test_drop_row_range_rest_bad_request( request_type=bigtable_table_admin.DropRowRangeRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -24679,7 +24701,7 @@ def test_drop_row_range_rest_bad_request( ], ) def test_drop_row_range_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -24713,7 +24735,7 @@ def test_drop_row_range_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -24758,7 +24780,7 @@ def test_drop_row_range_rest_interceptors(null_interceptor): def test_generate_consistency_token_rest_bad_request( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -24788,7 +24810,7 @@ def test_generate_consistency_token_rest_bad_request( ], ) def test_generate_consistency_token_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -24830,7 +24852,7 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -24893,7 +24915,7 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): def test_check_consistency_rest_bad_request( request_type=bigtable_table_admin.CheckConsistencyRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -24923,7 +24945,7 @@ def test_check_consistency_rest_bad_request( ], ) def test_check_consistency_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -24963,7 +24985,7 @@ def test_check_consistency_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -25026,7 +25048,7 @@ def test_check_consistency_rest_interceptors(null_interceptor): def test_snapshot_table_rest_bad_request( request_type=bigtable_table_admin.SnapshotTableRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -25056,7 +25078,7 @@ def test_snapshot_table_rest_bad_request( ], ) def test_snapshot_table_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -25090,7 +25112,7 @@ def test_snapshot_table_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -25150,7 +25172,7 @@ def test_snapshot_table_rest_interceptors(null_interceptor): def test_get_snapshot_rest_bad_request( request_type=bigtable_table_admin.GetSnapshotRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -25182,7 +25204,7 @@ def test_get_snapshot_rest_bad_request( ], ) def test_get_snapshot_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -25230,7 +25252,7 @@ def test_get_snapshot_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -25287,7 +25309,7 @@ def test_get_snapshot_rest_interceptors(null_interceptor): def test_list_snapshots_rest_bad_request( request_type=bigtable_table_admin.ListSnapshotsRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -25317,7 +25339,7 @@ def test_list_snapshots_rest_bad_request( ], ) def test_list_snapshots_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -25357,7 +25379,7 @@ def test_list_snapshots_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -25420,7 +25442,7 @@ def test_list_snapshots_rest_interceptors(null_interceptor): def test_delete_snapshot_rest_bad_request( request_type=bigtable_table_admin.DeleteSnapshotRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -25452,7 +25474,7 @@ def test_delete_snapshot_rest_bad_request( ], ) def test_delete_snapshot_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -25488,7 +25510,7 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -25533,7 +25555,7 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): def test_create_backup_rest_bad_request( request_type=bigtable_table_admin.CreateBackupRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -25563,7 +25585,7 @@ def test_create_backup_rest_bad_request( ], ) def test_create_backup_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -25690,7 +25712,7 @@ def test_create_backup_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -25749,7 +25771,7 @@ def test_create_backup_rest_interceptors(null_interceptor): def test_get_backup_rest_bad_request( request_type=bigtable_table_admin.GetBackupRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -25781,7 +25803,7 @@ def test_get_backup_rest_bad_request( ], ) def test_get_backup_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -25833,7 +25855,7 @@ def test_get_backup_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -25890,7 +25912,7 @@ def test_get_backup_rest_interceptors(null_interceptor): def test_update_backup_rest_bad_request( request_type=bigtable_table_admin.UpdateBackupRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -25924,7 +25946,7 @@ def test_update_backup_rest_bad_request( ], ) def test_update_backup_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26071,7 +26093,7 @@ def test_update_backup_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26128,7 +26150,7 @@ def test_update_backup_rest_interceptors(null_interceptor): def test_delete_backup_rest_bad_request( request_type=bigtable_table_admin.DeleteBackupRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -26160,7 +26182,7 @@ def test_delete_backup_rest_bad_request( ], ) def test_delete_backup_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26196,7 +26218,7 @@ def test_delete_backup_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26241,7 +26263,7 @@ def test_delete_backup_rest_interceptors(null_interceptor): def test_list_backups_rest_bad_request( request_type=bigtable_table_admin.ListBackupsRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -26271,7 +26293,7 @@ def test_list_backups_rest_bad_request( ], ) def test_list_backups_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26311,7 +26333,7 @@ def test_list_backups_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26370,10 +26392,10 @@ def test_list_backups_rest_interceptors(null_interceptor): post_with_metadata.assert_called_once() -def test_restore_table_rest_bad_request( +def test__restore_table_rest_bad_request( request_type=bigtable_table_admin.RestoreTableRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -26392,7 +26414,7 @@ def test_restore_table_rest_bad_request( response_value.request = mock.Mock() req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - client.restore_table(request) + client._restore_table(request) @pytest.mark.parametrize( @@ -26402,8 +26424,8 @@ def test_restore_table_rest_bad_request( dict, ], ) -def test_restore_table_rest_call_success(request_type): - client = BigtableTableAdminClient( +def test__restore_table_rest_call_success(request_type): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26423,21 +26445,21 @@ def test_restore_table_rest_call_success(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} - response = client.restore_table(request) + response = client._restore_table(request) # Establish that the response is the type that we expect. json_return_value = json_format.MessageToJson(return_value) @pytest.mark.parametrize("null_interceptor", [True, False]) -def test_restore_table_rest_interceptors(null_interceptor): +def test__restore_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( credentials=ga_credentials.AnonymousCredentials(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26480,7 +26502,7 @@ def test_restore_table_rest_interceptors(null_interceptor): post.return_value = operations_pb2.Operation() post_with_metadata.return_value = operations_pb2.Operation(), metadata - client.restore_table( + client._restore_table( request, metadata=[ ("key", "val"), @@ -26496,7 +26518,7 @@ def test_restore_table_rest_interceptors(null_interceptor): def test_copy_backup_rest_bad_request( request_type=bigtable_table_admin.CopyBackupRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -26526,7 +26548,7 @@ def test_copy_backup_rest_bad_request( ], ) def test_copy_backup_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26560,7 +26582,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26619,7 +26641,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): def test_get_iam_policy_rest_bad_request( request_type=iam_policy_pb2.GetIamPolicyRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -26649,7 +26671,7 @@ def test_get_iam_policy_rest_bad_request( ], ) def test_get_iam_policy_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26688,7 +26710,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26744,7 +26766,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): def test_set_iam_policy_rest_bad_request( request_type=iam_policy_pb2.SetIamPolicyRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -26774,7 +26796,7 @@ def test_set_iam_policy_rest_bad_request( ], ) def test_set_iam_policy_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26813,7 +26835,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26869,7 +26891,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): def test_test_iam_permissions_rest_bad_request( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -26899,7 +26921,7 @@ def test_test_iam_permissions_rest_bad_request( ], ) def test_test_iam_permissions_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -26936,7 +26958,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -26997,7 +27019,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): def test_create_schema_bundle_rest_bad_request( request_type=bigtable_table_admin.CreateSchemaBundleRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -27027,7 +27049,7 @@ def test_create_schema_bundle_rest_bad_request( ], ) def test_create_schema_bundle_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -27135,7 +27157,7 @@ def test_create_schema_bundle_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -27195,7 +27217,7 @@ def test_create_schema_bundle_rest_interceptors(null_interceptor): def test_update_schema_bundle_rest_bad_request( request_type=bigtable_table_admin.UpdateSchemaBundleRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -27229,7 +27251,7 @@ def test_update_schema_bundle_rest_bad_request( ], ) def test_update_schema_bundle_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -27341,7 +27363,7 @@ def test_update_schema_bundle_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -27401,7 +27423,7 @@ def test_update_schema_bundle_rest_interceptors(null_interceptor): def test_get_schema_bundle_rest_bad_request( request_type=bigtable_table_admin.GetSchemaBundleRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -27433,7 +27455,7 @@ def test_get_schema_bundle_rest_bad_request( ], ) def test_get_schema_bundle_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -27477,7 +27499,7 @@ def test_get_schema_bundle_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -27535,7 +27557,7 @@ def test_get_schema_bundle_rest_interceptors(null_interceptor): def test_list_schema_bundles_rest_bad_request( request_type=bigtable_table_admin.ListSchemaBundlesRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -27565,7 +27587,7 @@ def test_list_schema_bundles_rest_bad_request( ], ) def test_list_schema_bundles_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -27605,7 +27627,7 @@ def test_list_schema_bundles_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -27668,7 +27690,7 @@ def test_list_schema_bundles_rest_interceptors(null_interceptor): def test_delete_schema_bundle_rest_bad_request( request_type=bigtable_table_admin.DeleteSchemaBundleRequest, ): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) # send a request that will satisfy transcoding @@ -27700,7 +27722,7 @@ def test_delete_schema_bundle_rest_bad_request( ], ) def test_delete_schema_bundle_rest_call_success(request_type): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) @@ -27736,7 +27758,7 @@ def test_delete_schema_bundle_rest_interceptors(null_interceptor): if null_interceptor else transports.BigtableTableAdminRestInterceptor(), ) - client = BigtableTableAdminClient(transport=transport) + client = BaseBigtableTableAdminClient(transport=transport) with mock.patch.object( type(client.transport._session), "request" @@ -27779,7 +27801,7 @@ def test_delete_schema_bundle_rest_interceptors(null_interceptor): def test_initialize_client_w_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) assert client is not None @@ -27788,7 +27810,7 @@ def test_initialize_client_w_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_table_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27808,7 +27830,7 @@ def test_create_table_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_table_from_snapshot_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27830,7 +27852,7 @@ def test_create_table_from_snapshot_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_tables_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27850,7 +27872,7 @@ def test_list_tables_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_table_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27870,7 +27892,7 @@ def test_get_table_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_table_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27890,7 +27912,7 @@ def test_update_table_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_table_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27910,7 +27932,7 @@ def test_delete_table_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_undelete_table_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27930,7 +27952,7 @@ def test_undelete_table_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_authorized_view_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27952,7 +27974,7 @@ def test_create_authorized_view_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_authorized_views_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27974,7 +27996,7 @@ def test_list_authorized_views_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_authorized_view_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -27996,7 +28018,7 @@ def test_get_authorized_view_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_authorized_view_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28018,7 +28040,7 @@ def test_update_authorized_view_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_authorized_view_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28040,7 +28062,7 @@ def test_delete_authorized_view_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_modify_column_families_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28062,7 +28084,7 @@ def test_modify_column_families_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_drop_row_range_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28082,7 +28104,7 @@ def test_drop_row_range_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_generate_consistency_token_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28104,7 +28126,7 @@ def test_generate_consistency_token_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_check_consistency_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28126,7 +28148,7 @@ def test_check_consistency_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_snapshot_table_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28146,7 +28168,7 @@ def test_snapshot_table_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_snapshot_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28166,7 +28188,7 @@ def test_get_snapshot_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_snapshots_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28186,7 +28208,7 @@ def test_list_snapshots_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_snapshot_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28206,7 +28228,7 @@ def test_delete_snapshot_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_backup_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28226,7 +28248,7 @@ def test_create_backup_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_backup_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28246,7 +28268,7 @@ def test_get_backup_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_backup_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28266,7 +28288,7 @@ def test_update_backup_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_backup_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28286,7 +28308,7 @@ def test_delete_backup_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_backups_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28305,15 +28327,15 @@ def test_list_backups_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. -def test_restore_table_empty_call_rest(): - client = BigtableTableAdminClient( +def test__restore_table_empty_call_rest(): + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) # Mock the actual call, and fake the request. with mock.patch.object(type(client.transport.restore_table), "__call__") as call: - client.restore_table(request=None) + client._restore_table(request=None) # Establish that the underlying stub method was called. call.assert_called() @@ -28326,7 +28348,7 @@ def test_restore_table_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_copy_backup_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28346,7 +28368,7 @@ def test_copy_backup_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_iam_policy_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28366,7 +28388,7 @@ def test_get_iam_policy_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_set_iam_policy_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28386,7 +28408,7 @@ def test_set_iam_policy_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_test_iam_permissions_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28408,7 +28430,7 @@ def test_test_iam_permissions_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_schema_bundle_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28430,7 +28452,7 @@ def test_create_schema_bundle_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_update_schema_bundle_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28452,7 +28474,7 @@ def test_update_schema_bundle_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_get_schema_bundle_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28474,7 +28496,7 @@ def test_get_schema_bundle_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_list_schema_bundles_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28496,7 +28518,7 @@ def test_list_schema_bundles_empty_call_rest(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_delete_schema_bundle_empty_call_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28516,7 +28538,7 @@ def test_delete_schema_bundle_empty_call_rest(): def test_bigtable_table_admin_rest_lro_client(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest", ) @@ -28534,7 +28556,7 @@ def test_bigtable_table_admin_rest_lro_client(): def test_transport_grpc_default(): # A client should use the gRPC transport by default. - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) assert isinstance( @@ -28665,7 +28687,7 @@ def test_bigtable_table_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: adc.return_value = (ga_credentials.AnonymousCredentials(), None) - BigtableTableAdminClient() + BaseBigtableTableAdminClient() adc.assert_called_once_with( scopes=None, default_scopes=( @@ -28839,7 +28861,7 @@ def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): ], ) def test_bigtable_table_admin_host_no_port(transport_name): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" @@ -28862,7 +28884,7 @@ def test_bigtable_table_admin_host_no_port(transport_name): ], ) def test_bigtable_table_admin_host_with_port(transport_name): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" @@ -28885,11 +28907,11 @@ def test_bigtable_table_admin_host_with_port(transport_name): def test_bigtable_table_admin_client_transport_session_collision(transport_name): creds1 = ga_credentials.AnonymousCredentials() creds2 = ga_credentials.AnonymousCredentials() - client1 = BigtableTableAdminClient( + client1 = BaseBigtableTableAdminClient( credentials=creds1, transport=transport_name, ) - client2 = BigtableTableAdminClient( + client2 = BaseBigtableTableAdminClient( credentials=creds2, transport=transport_name, ) @@ -29127,7 +29149,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): def test_bigtable_table_admin_grpc_lro_client(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc", ) @@ -29144,7 +29166,7 @@ def test_bigtable_table_admin_grpc_lro_client(): def test_bigtable_table_admin_grpc_lro_async_client(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio", ) @@ -29171,7 +29193,7 @@ def test_authorized_view_path(): table=table, authorized_view=authorized_view, ) - actual = BigtableTableAdminClient.authorized_view_path( + actual = BaseBigtableTableAdminClient.authorized_view_path( project, instance, table, authorized_view ) assert expected == actual @@ -29184,10 +29206,10 @@ def test_parse_authorized_view_path(): "table": "cuttlefish", "authorized_view": "mussel", } - path = BigtableTableAdminClient.authorized_view_path(**expected) + path = BaseBigtableTableAdminClient.authorized_view_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_authorized_view_path(path) + actual = BaseBigtableTableAdminClient.parse_authorized_view_path(path) assert expected == actual @@ -29202,7 +29224,9 @@ def test_backup_path(): cluster=cluster, backup=backup, ) - actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup) + actual = BaseBigtableTableAdminClient.backup_path( + project, instance, cluster, backup + ) assert expected == actual @@ -29213,10 +29237,10 @@ def test_parse_backup_path(): "cluster": "whelk", "backup": "octopus", } - path = BigtableTableAdminClient.backup_path(**expected) + path = BaseBigtableTableAdminClient.backup_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_backup_path(path) + actual = BaseBigtableTableAdminClient.parse_backup_path(path) assert expected == actual @@ -29229,7 +29253,7 @@ def test_cluster_path(): instance=instance, cluster=cluster, ) - actual = BigtableTableAdminClient.cluster_path(project, instance, cluster) + actual = BaseBigtableTableAdminClient.cluster_path(project, instance, cluster) assert expected == actual @@ -29239,10 +29263,10 @@ def test_parse_cluster_path(): "instance": "winkle", "cluster": "nautilus", } - path = BigtableTableAdminClient.cluster_path(**expected) + path = BaseBigtableTableAdminClient.cluster_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_cluster_path(path) + actual = BaseBigtableTableAdminClient.parse_cluster_path(path) assert expected == actual @@ -29259,7 +29283,7 @@ def test_crypto_key_version_path(): crypto_key=crypto_key, crypto_key_version=crypto_key_version, ) - actual = BigtableTableAdminClient.crypto_key_version_path( + actual = BaseBigtableTableAdminClient.crypto_key_version_path( project, location, key_ring, crypto_key, crypto_key_version ) assert expected == actual @@ -29273,10 +29297,10 @@ def test_parse_crypto_key_version_path(): "crypto_key": "cuttlefish", "crypto_key_version": "mussel", } - path = BigtableTableAdminClient.crypto_key_version_path(**expected) + path = BaseBigtableTableAdminClient.crypto_key_version_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_crypto_key_version_path(path) + actual = BaseBigtableTableAdminClient.parse_crypto_key_version_path(path) assert expected == actual @@ -29287,7 +29311,7 @@ def test_instance_path(): project=project, instance=instance, ) - actual = BigtableTableAdminClient.instance_path(project, instance) + actual = BaseBigtableTableAdminClient.instance_path(project, instance) assert expected == actual @@ -29296,10 +29320,10 @@ def test_parse_instance_path(): "project": "scallop", "instance": "abalone", } - path = BigtableTableAdminClient.instance_path(**expected) + path = BaseBigtableTableAdminClient.instance_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_instance_path(path) + actual = BaseBigtableTableAdminClient.parse_instance_path(path) assert expected == actual @@ -29314,7 +29338,7 @@ def test_schema_bundle_path(): table=table, schema_bundle=schema_bundle, ) - actual = BigtableTableAdminClient.schema_bundle_path( + actual = BaseBigtableTableAdminClient.schema_bundle_path( project, instance, table, schema_bundle ) assert expected == actual @@ -29327,10 +29351,10 @@ def test_parse_schema_bundle_path(): "table": "cuttlefish", "schema_bundle": "mussel", } - path = BigtableTableAdminClient.schema_bundle_path(**expected) + path = BaseBigtableTableAdminClient.schema_bundle_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_schema_bundle_path(path) + actual = BaseBigtableTableAdminClient.parse_schema_bundle_path(path) assert expected == actual @@ -29345,7 +29369,7 @@ def test_snapshot_path(): cluster=cluster, snapshot=snapshot, ) - actual = BigtableTableAdminClient.snapshot_path( + actual = BaseBigtableTableAdminClient.snapshot_path( project, instance, cluster, snapshot ) assert expected == actual @@ -29358,10 +29382,10 @@ def test_parse_snapshot_path(): "cluster": "whelk", "snapshot": "octopus", } - path = BigtableTableAdminClient.snapshot_path(**expected) + path = BaseBigtableTableAdminClient.snapshot_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_snapshot_path(path) + actual = BaseBigtableTableAdminClient.parse_snapshot_path(path) assert expected == actual @@ -29374,7 +29398,7 @@ def test_table_path(): instance=instance, table=table, ) - actual = BigtableTableAdminClient.table_path(project, instance, table) + actual = BaseBigtableTableAdminClient.table_path(project, instance, table) assert expected == actual @@ -29384,10 +29408,10 @@ def test_parse_table_path(): "instance": "winkle", "table": "nautilus", } - path = BigtableTableAdminClient.table_path(**expected) + path = BaseBigtableTableAdminClient.table_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_table_path(path) + actual = BaseBigtableTableAdminClient.parse_table_path(path) assert expected == actual @@ -29396,7 +29420,7 @@ def test_common_billing_account_path(): expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) - actual = BigtableTableAdminClient.common_billing_account_path(billing_account) + actual = BaseBigtableTableAdminClient.common_billing_account_path(billing_account) assert expected == actual @@ -29404,10 +29428,10 @@ def test_parse_common_billing_account_path(): expected = { "billing_account": "abalone", } - path = BigtableTableAdminClient.common_billing_account_path(**expected) + path = BaseBigtableTableAdminClient.common_billing_account_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_billing_account_path(path) + actual = BaseBigtableTableAdminClient.parse_common_billing_account_path(path) assert expected == actual @@ -29416,7 +29440,7 @@ def test_common_folder_path(): expected = "folders/{folder}".format( folder=folder, ) - actual = BigtableTableAdminClient.common_folder_path(folder) + actual = BaseBigtableTableAdminClient.common_folder_path(folder) assert expected == actual @@ -29424,10 +29448,10 @@ def test_parse_common_folder_path(): expected = { "folder": "clam", } - path = BigtableTableAdminClient.common_folder_path(**expected) + path = BaseBigtableTableAdminClient.common_folder_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_folder_path(path) + actual = BaseBigtableTableAdminClient.parse_common_folder_path(path) assert expected == actual @@ -29436,7 +29460,7 @@ def test_common_organization_path(): expected = "organizations/{organization}".format( organization=organization, ) - actual = BigtableTableAdminClient.common_organization_path(organization) + actual = BaseBigtableTableAdminClient.common_organization_path(organization) assert expected == actual @@ -29444,10 +29468,10 @@ def test_parse_common_organization_path(): expected = { "organization": "octopus", } - path = BigtableTableAdminClient.common_organization_path(**expected) + path = BaseBigtableTableAdminClient.common_organization_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_organization_path(path) + actual = BaseBigtableTableAdminClient.parse_common_organization_path(path) assert expected == actual @@ -29456,7 +29480,7 @@ def test_common_project_path(): expected = "projects/{project}".format( project=project, ) - actual = BigtableTableAdminClient.common_project_path(project) + actual = BaseBigtableTableAdminClient.common_project_path(project) assert expected == actual @@ -29464,10 +29488,10 @@ def test_parse_common_project_path(): expected = { "project": "nudibranch", } - path = BigtableTableAdminClient.common_project_path(**expected) + path = BaseBigtableTableAdminClient.common_project_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_project_path(path) + actual = BaseBigtableTableAdminClient.parse_common_project_path(path) assert expected == actual @@ -29478,7 +29502,7 @@ def test_common_location_path(): project=project, location=location, ) - actual = BigtableTableAdminClient.common_location_path(project, location) + actual = BaseBigtableTableAdminClient.common_location_path(project, location) assert expected == actual @@ -29487,10 +29511,10 @@ def test_parse_common_location_path(): "project": "winkle", "location": "nautilus", } - path = BigtableTableAdminClient.common_location_path(**expected) + path = BaseBigtableTableAdminClient.common_location_path(**expected) # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_location_path(path) + actual = BaseBigtableTableAdminClient.parse_common_location_path(path) assert expected == actual @@ -29500,7 +29524,7 @@ def test_client_with_default_client_info(): with mock.patch.object( transports.BigtableTableAdminTransport, "_prep_wrapped_messages" ) as prep: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, ) @@ -29509,7 +29533,7 @@ def test_client_with_default_client_info(): with mock.patch.object( transports.BigtableTableAdminTransport, "_prep_wrapped_messages" ) as prep: - transport_class = BigtableTableAdminClient.get_transport_class() + transport_class = BaseBigtableTableAdminClient.get_transport_class() transport = transport_class( credentials=ga_credentials.AnonymousCredentials(), client_info=client_info, @@ -29518,7 +29542,7 @@ def test_client_with_default_client_info(): def test_transport_close_grpc(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="grpc" ) with mock.patch.object( @@ -29531,7 +29555,7 @@ def test_transport_close_grpc(): @pytest.mark.asyncio async def test_transport_close_grpc_asyncio(): - client = BigtableTableAdminAsyncClient( + client = BaseBigtableTableAdminAsyncClient( credentials=async_anonymous_credentials(), transport="grpc_asyncio" ) with mock.patch.object( @@ -29543,7 +29567,7 @@ async def test_transport_close_grpc_asyncio(): def test_transport_close_rest(): - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" ) with mock.patch.object( @@ -29560,7 +29584,7 @@ def test_client_ctx(): "grpc", ] for transport in transports: - client = BigtableTableAdminClient( + client = BaseBigtableTableAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport ) # Test client calls underlying transport. @@ -29574,9 +29598,9 @@ def test_client_ctx(): @pytest.mark.parametrize( "client_class,transport_class", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), + (BaseBigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), ( - BigtableTableAdminAsyncClient, + BaseBigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, ), ], diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_backup.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_backup.py index 9882ca339c3b..cc9251a354c2 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_backup.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_backup.py @@ -42,9 +42,9 @@ def _make_timestamp(): def _make_table_admin_client(): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient - return mock.create_autospec(BigtableTableAdminClient, instance=True) + return mock.create_autospec(BaseBigtableTableAdminClient, instance=True) def _make_backup(*args, **kwargs): @@ -735,7 +735,7 @@ def test_backup_restore_w_grpc_error(): client = _Client() api = client.table_admin_client = _make_table_admin_client() - api.restore_table.side_effect = Unknown("testing") + api._restore_table.side_effect = Unknown("testing") timestamp = _make_timestamp() backup = _make_backup( @@ -749,7 +749,7 @@ def test_backup_restore_w_grpc_error(): with pytest.raises(GoogleAPICallError): backup.restore(TABLE_ID) - api.restore_table.assert_called_once_with( + api._restore_table.assert_called_once_with( request={"parent": INSTANCE_NAME, "table_id": TABLE_ID, "backup": BACKUP_NAME} ) @@ -772,7 +772,7 @@ def _restore_helper(instance_id=None, instance_name=None): op_future = object() client = _Client() api = client.table_admin_client = _make_table_admin_client() - api.restore_table.return_value = op_future + api._restore_table.return_value = op_future timestamp = _make_timestamp() backup = _make_backup( @@ -787,14 +787,14 @@ def _restore_helper(instance_id=None, instance_name=None): assert backup._cluster == CLUSTER_ID assert future is op_future - api.restore_table.assert_called_once_with( + api._restore_table.assert_called_once_with( request={ "parent": instance_name or INSTANCE_NAME, "table_id": TABLE_ID, "backup": BACKUP_NAME, } ) - api.restore_table.reset_mock() + api._restore_table.reset_mock() def test_backup_restore_default(): @@ -808,7 +808,7 @@ def test_backup_restore_to_another_instance(): def test_backup_get_iam_policy(): from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE @@ -825,7 +825,7 @@ def test_backup_get_iam_policy(): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}] iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec(BigtableTableAdminClient) + table_api = mock.create_autospec(BaseBigtableTableAdminClient) client._table_admin_client = table_api table_api.get_iam_policy.return_value = iam_policy @@ -844,7 +844,7 @@ def test_backup_get_iam_policy(): def test_backup_set_iam_policy(): from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.iam.v1 import policy_pb2 from google.cloud.bigtable.policy import Policy @@ -862,7 +862,7 @@ def test_backup_set_iam_policy(): bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}] iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings) - table_api = mock.create_autospec(BigtableTableAdminClient) + table_api = mock.create_autospec(BaseBigtableTableAdminClient) client._table_admin_client = table_api table_api.set_iam_policy.return_value = iam_policy_pb @@ -889,7 +889,7 @@ def test_backup_set_iam_policy(): def test_backup_test_iam_permissions(): from google.cloud.bigtable.client import Client from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) from google.iam.v1 import iam_policy_pb2 @@ -903,7 +903,7 @@ def test_backup_test_iam_permissions(): response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions) - table_api = mock.create_autospec(BigtableTableAdminClient) + table_api = mock.create_autospec(BaseBigtableTableAdminClient) table_api.test_iam_permissions.return_value = response client._table_admin_client = table_api diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py index 4338f8553014..a4fc0f9cb40e 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_client.py @@ -449,18 +449,18 @@ def test_client_table_admin_client_not_initialized_no_admin_flag(): def test_client_table_admin_client_not_initialized_w_admin_flag(): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient credentials = _make_credentials() client = _make_client(project=PROJECT, credentials=credentials, admin=True) table_admin_client = client.table_admin_client - assert isinstance(table_admin_client, BigtableTableAdminClient) + assert isinstance(table_admin_client, BaseBigtableTableAdminClient) assert client._table_admin_client is table_admin_client def test_client_table_admin_client_not_initialized_w_client_info(): - from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient + from google.cloud.bigtable_admin_v2 import BaseBigtableTableAdminClient credentials = _make_credentials() client_info = mock.Mock() @@ -472,7 +472,7 @@ def test_client_table_admin_client_not_initialized_w_client_info(): ) table_admin_client = client.table_admin_client - assert isinstance(table_admin_client, BigtableTableAdminClient) + assert isinstance(table_admin_client, BaseBigtableTableAdminClient) assert client._client_info is client_info assert client._table_admin_client is table_admin_client @@ -488,7 +488,7 @@ def test_client_table_admin_client_not_initialized_w_client_options(): ) client._create_gapic_client_channel = mock.Mock() - patch = mock.patch("google.cloud.bigtable_admin_v2.BigtableTableAdminClient") + patch = mock.patch("google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient") with patch as mocked: table_admin_client = client.table_admin_client diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_column_family.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_column_family.py index e4f74e26463b..2480e11cba11 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_column_family.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_column_family.py @@ -338,7 +338,7 @@ def _create_test_helper(gc_rule=None): ) from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) project_id = "project-id" @@ -357,7 +357,7 @@ def _create_test_helper(gc_rule=None): + table_id ) - api = mock.create_autospec(BigtableTableAdminClient) + api = mock.create_autospec(BaseBigtableTableAdminClient) credentials = _make_credentials() client = _make_client(project=project_id, credentials=credentials, admin=True) @@ -409,7 +409,7 @@ def _update_test_helper(gc_rule=None): bigtable_table_admin as table_admin_v2_pb2, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) project_id = "project-id" @@ -428,7 +428,7 @@ def _update_test_helper(gc_rule=None): + table_id ) - api = mock.create_autospec(BigtableTableAdminClient) + api = mock.create_autospec(BaseBigtableTableAdminClient) credentials = _make_credentials() client = _make_client(project=project_id, credentials=credentials, admin=True) table = _Table(table_name, client=client) @@ -480,7 +480,7 @@ def test_column_family_delete(): ) from ._testing import _FakeStub from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) project_id = "project-id" @@ -499,7 +499,7 @@ def test_column_family_delete(): + table_id ) - api = mock.create_autospec(BigtableTableAdminClient) + api = mock.create_autospec(BaseBigtableTableAdminClient) credentials = _make_credentials() client = _make_client(project=project_id, credentials=credentials, admin=True) table = _Table(table_name, client=client) diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py index de6844a165b6..712fab1f5688 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_instance.py @@ -806,7 +806,7 @@ def _list_tables_helper(table_name=None): bigtable_table_admin as table_messages_v1_pb2, ) from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import ( - BigtableTableAdminClient, + BaseBigtableTableAdminClient, ) credentials = _make_credentials() @@ -816,7 +816,7 @@ def _list_tables_helper(table_name=None): instance_api = client._instance_admin_client = _make_instance_admin_api() instance_api.instance_path.return_value = "projects/project/instances/instance-id" table_api = client._table_admin_client = mock.create_autospec( - BigtableTableAdminClient + BaseBigtableTableAdminClient ) if table_name is None: table_name = TABLE_NAME diff --git a/packages/google-cloud-bigtable/tests/unit/v2_client/test_table.py b/packages/google-cloud-bigtable/tests/unit/v2_client/test_table.py index 032363bd70a7..1d183e2fb506 100644 --- a/packages/google-cloud-bigtable/tests/unit/v2_client/test_table.py +++ b/packages/google-cloud-bigtable/tests/unit/v2_client/test_table.py @@ -349,7 +349,7 @@ def _make_table_api(): client as bigtable_table_admin, ) - return mock.create_autospec(bigtable_table_admin.BigtableTableAdminClient) + return mock.create_autospec(bigtable_table_admin.BaseBigtableTableAdminClient) def _create_table_helper(split_keys=[], column_families={}): @@ -1482,7 +1482,7 @@ def _table_restore_helper(backup_name=None): table = _make_table(TABLE_ID, instance) table_api = client._table_admin_client = _make_table_api() - table_api.restore_table.return_value = op_future + table_api._restore_table.return_value = op_future if backup_name: future = table.restore(TABLE_ID, backup_name=BACKUP_NAME) @@ -1496,7 +1496,7 @@ def _table_restore_helper(backup_name=None): "table_id": TABLE_ID, "backup": BACKUP_NAME, } - table_api.restore_table.assert_called_once_with(request=expected_request) + table_api._restore_table.assert_called_once_with(request=expected_request) def test_table_restore_table_w_backup_id(): From a44605f4d0d0df0a0645c38a52fcb140d424220c Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 13:22:49 -0400 Subject: [PATCH 876/892] chore(main): release 2.32.0 (#1152) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 10 ++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 15 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 90999b77509c..355f140d6cbc 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.31.0" + ".": "2.32.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 9a0b2e013e2a..d0308016471d 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,16 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.32.0](https://github.com/googleapis/python-bigtable/compare/v2.31.0...v2.32.0) (2025-08-01) + + +### Features + +* Add Idempotency to Cloud Bigtable MutateRowsRequest API ([#1143](https://github.com/googleapis/python-bigtable/issues/1143)) ([c3e3eb0](https://github.com/googleapis/python-bigtable/commit/c3e3eb0e4ce44ece72b150dc5822846627074fba)) +* Add support for AddToCell in Data Client ([#1147](https://github.com/googleapis/python-bigtable/issues/1147)) ([1a5b4b5](https://github.com/googleapis/python-bigtable/commit/1a5b4b514cadae5c83d61296314285d3774992c5)) +* Implement SQL support in test proxy ([#1106](https://github.com/googleapis/python-bigtable/issues/1106)) ([7a91bbf](https://github.com/googleapis/python-bigtable/commit/7a91bbfb9df23f7e93c40b88648840342af6f16f)) +* Modernized Bigtable Admin Client featuring selective GAPIC generation ([#1177](https://github.com/googleapis/python-bigtable/issues/1177)) ([58e7d37](https://github.com/googleapis/python-bigtable/commit/58e7d3782df6b13a42af053263afc575222a6b83)) + ## [2.31.0](https://github.com/googleapis/python-bigtable/compare/v2.30.1...v2.31.0) (2025-05-22) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 8ab09c42e9c1..3c958586feba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.31.0" # {x-release-please-version} +__version__ = "2.32.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 8ab09c42e9c1..3c958586feba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.31.0" # {x-release-please-version} +__version__ = "2.32.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 8ab09c42e9c1..3c958586feba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.31.0" # {x-release-please-version} +__version__ = "2.32.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 8ab09c42e9c1..3c958586feba 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.31.0" # {x-release-please-version} +__version__ = "2.32.0" # {x-release-please-version} From 90111e795b85ca3ea9485d9881778abc05ad5bbb Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 13:46:00 -0700 Subject: [PATCH 877/892] chore: update gapic files (#1171) --- .../bigtable_instance_admin/async_client.py | 871 ++ .../bigtable_instance_admin/client.py | 871 ++ .../bigtable_table_admin/async_client.py | 971 ++ .../services/bigtable_table_admin/client.py | 971 ++ .../cloud/bigtable_admin_v2/types/instance.py | 19 + .../services/bigtable/async_client.py | 48 +- .../bigtable_v2/services/bigtable/client.py | 48 +- .../google/cloud/bigtable_v2/types/types.py | 66 + packages/google-cloud-bigtable/owlbot.py | 21 +- ...instance_admin_create_app_profile_async.py | 57 + ..._instance_admin_create_app_profile_sync.py | 57 + ...ble_instance_admin_create_cluster_async.py | 57 + ...able_instance_admin_create_cluster_sync.py | 57 + ...le_instance_admin_create_instance_async.py | 61 + ...ble_instance_admin_create_instance_sync.py | 61 + ...nstance_admin_create_logical_view_async.py | 61 + ...instance_admin_create_logical_view_sync.py | 61 + ...ce_admin_create_materialized_view_async.py | 61 + ...nce_admin_create_materialized_view_sync.py | 61 + ...instance_admin_delete_app_profile_async.py | 51 + ..._instance_admin_delete_app_profile_sync.py | 51 + ...ble_instance_admin_delete_cluster_async.py | 50 + ...able_instance_admin_delete_cluster_sync.py | 50 + ...le_instance_admin_delete_instance_async.py | 50 + ...ble_instance_admin_delete_instance_sync.py | 50 + ...nstance_admin_delete_logical_view_async.py | 50 + ...instance_admin_delete_logical_view_sync.py | 50 + ...ce_admin_delete_materialized_view_async.py | 50 + ...nce_admin_delete_materialized_view_sync.py | 50 + ...le_instance_admin_get_app_profile_async.py | 52 + ...ble_instance_admin_get_app_profile_sync.py | 52 + ...gtable_instance_admin_get_cluster_async.py | 52 + ...igtable_instance_admin_get_cluster_sync.py | 52 + ...ble_instance_admin_get_iam_policy_async.py | 53 + ...able_instance_admin_get_iam_policy_sync.py | 53 + ...table_instance_admin_get_instance_async.py | 52 + ...gtable_instance_admin_get_instance_sync.py | 52 + ...e_instance_admin_get_logical_view_async.py | 52 + ...le_instance_admin_get_logical_view_sync.py | 52 + ...tance_admin_get_materialized_view_async.py | 52 + ...stance_admin_get_materialized_view_sync.py | 52 + ..._instance_admin_list_app_profiles_async.py | 53 + ...e_instance_admin_list_app_profiles_sync.py | 53 + ...able_instance_admin_list_clusters_async.py | 52 + ...table_instance_admin_list_clusters_sync.py | 52 + ...e_instance_admin_list_hot_tablets_async.py | 53 + ...le_instance_admin_list_hot_tablets_sync.py | 53 + ...ble_instance_admin_list_instances_async.py | 52 + ...able_instance_admin_list_instances_sync.py | 52 + ...instance_admin_list_logical_views_async.py | 53 + ..._instance_admin_list_logical_views_sync.py | 53 + ...nce_admin_list_materialized_views_async.py | 53 + ...ance_admin_list_materialized_views_sync.py | 53 + ...ance_admin_partial_update_cluster_async.py | 55 + ...tance_admin_partial_update_cluster_sync.py | 55 + ...nce_admin_partial_update_instance_async.py | 59 + ...ance_admin_partial_update_instance_sync.py | 59 + ...ble_instance_admin_set_iam_policy_async.py | 53 + ...able_instance_admin_set_iam_policy_sync.py | 53 + ...stance_admin_test_iam_permissions_async.py | 54 + ...nstance_admin_test_iam_permissions_sync.py | 54 + ...instance_admin_update_app_profile_async.py | 59 + ..._instance_admin_update_app_profile_sync.py | 59 + ...ble_instance_admin_update_cluster_async.py | 55 + ...able_instance_admin_update_cluster_sync.py | 55 + ...le_instance_admin_update_instance_async.py | 52 + ...ble_instance_admin_update_instance_sync.py | 52 + ...nstance_admin_update_logical_view_async.py | 59 + ...instance_admin_update_logical_view_sync.py | 59 + ...ce_admin_update_materialized_view_async.py | 59 + ...nce_admin_update_materialized_view_sync.py | 59 + ...ble_table_admin_check_consistency_async.py | 53 + ...able_table_admin_check_consistency_sync.py | 53 + ..._bigtable_table_admin_copy_backup_async.py | 58 + ...d_bigtable_table_admin_copy_backup_sync.py | 58 + ...able_admin_create_authorized_view_async.py | 57 + ...table_admin_create_authorized_view_sync.py | 57 + ...igtable_table_admin_create_backup_async.py | 61 + ...bigtable_table_admin_create_backup_sync.py | 61 + ..._table_admin_create_schema_bundle_async.py | 61 + ...e_table_admin_create_schema_bundle_sync.py | 61 + ...bigtable_table_admin_create_table_async.py | 53 + ..._admin_create_table_from_snapshot_async.py | 58 + ...e_admin_create_table_from_snapshot_sync.py | 58 + ..._bigtable_table_admin_create_table_sync.py | 53 + ...able_admin_delete_authorized_view_async.py | 50 + ...table_admin_delete_authorized_view_sync.py | 50 + ...igtable_table_admin_delete_backup_async.py | 50 + ...bigtable_table_admin_delete_backup_sync.py | 50 + ..._table_admin_delete_schema_bundle_async.py | 50 + ...e_table_admin_delete_schema_bundle_sync.py | 50 + ...table_table_admin_delete_snapshot_async.py | 50 + ...gtable_table_admin_delete_snapshot_sync.py | 50 + ...bigtable_table_admin_delete_table_async.py | 50 + ..._bigtable_table_admin_delete_table_sync.py | 50 + ...gtable_table_admin_drop_row_range_async.py | 51 + ...igtable_table_admin_drop_row_range_sync.py | 51 + ..._admin_generate_consistency_token_async.py | 52 + ...e_admin_generate_consistency_token_sync.py | 52 + ...e_table_admin_get_authorized_view_async.py | 52 + ...le_table_admin_get_authorized_view_sync.py | 52 + ...d_bigtable_table_admin_get_backup_async.py | 52 + ...ed_bigtable_table_admin_get_backup_sync.py | 52 + ...gtable_table_admin_get_iam_policy_async.py | 53 + ...igtable_table_admin_get_iam_policy_sync.py | 53 + ...ble_table_admin_get_schema_bundle_async.py | 52 + ...able_table_admin_get_schema_bundle_sync.py | 52 + ...bigtable_table_admin_get_snapshot_async.py | 52 + ..._bigtable_table_admin_get_snapshot_sync.py | 52 + ...ed_bigtable_table_admin_get_table_async.py | 52 + ...ted_bigtable_table_admin_get_table_sync.py | 52 + ...table_admin_list_authorized_views_async.py | 53 + ..._table_admin_list_authorized_views_sync.py | 53 + ...bigtable_table_admin_list_backups_async.py | 53 + ..._bigtable_table_admin_list_backups_sync.py | 53 + ...e_table_admin_list_schema_bundles_async.py | 53 + ...le_table_admin_list_schema_bundles_sync.py | 53 + ...gtable_table_admin_list_snapshots_async.py | 53 + ...igtable_table_admin_list_snapshots_sync.py | 53 + ..._bigtable_table_admin_list_tables_async.py | 53 + ...d_bigtable_table_admin_list_tables_sync.py | 53 + ...able_admin_modify_column_families_async.py | 52 + ...table_admin_modify_column_families_sync.py | 52 + ...able_admin_restore_table_async_internal.py | 58 + ...table_admin_restore_table_sync_internal.py | 58 + ...gtable_table_admin_set_iam_policy_async.py | 53 + ...igtable_table_admin_set_iam_policy_sync.py | 53 + ...gtable_table_admin_snapshot_table_async.py | 58 + ...igtable_table_admin_snapshot_table_sync.py | 58 + ..._table_admin_test_iam_permissions_async.py | 54 + ...e_table_admin_test_iam_permissions_sync.py | 54 + ...gtable_table_admin_undelete_table_async.py | 56 + ...igtable_table_admin_undelete_table_sync.py | 56 + ...able_admin_update_authorized_view_async.py | 55 + ...table_admin_update_authorized_view_sync.py | 55 + ...igtable_table_admin_update_backup_async.py | 55 + ...bigtable_table_admin_update_backup_sync.py | 55 + ..._table_admin_update_schema_bundle_async.py | 59 + ...e_table_admin_update_schema_bundle_sync.py | 59 + ...bigtable_table_admin_update_table_async.py | 55 + ..._bigtable_table_admin_update_table_sync.py | 55 + ...pet_metadata_google.bigtable.admin.v2.json | 10871 ++++++++++++++++ .../fixup_bigtable_admin_v2_keywords.py | 2 +- .../tests/unit/data/_async/test_client.py | 16 +- .../unit/data/_sync_autogen/test_client.py | 12 +- .../test_bigtable_instance_admin.py | 1 + .../unit/gapic/bigtable_v2/test_bigtable.py | 657 +- 147 files changed, 22240 insertions(+), 343 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py create mode 100644 packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index b150b7123c5d..a1aee2370fd3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -354,6 +354,41 @@ async def create_instance( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]): The request object. Request message for @@ -487,6 +522,32 @@ async def get_instance( ) -> instance.Instance: r"""Gets information about an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_instance(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]): The request object. Request message for @@ -578,6 +639,32 @@ async def list_instances( ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_instances(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]): The request object. Request message for @@ -666,6 +753,32 @@ async def update_instance( To update other Instance properties, such as labels, use PartialUpdateInstance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = await client.update_instance(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]): The request object. A collection of Bigtable @@ -739,6 +852,39 @@ async def partial_update_instance( method can modify all fields of an Instance and is the preferred way to update an Instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]): The request object. Request message for @@ -853,6 +999,29 @@ async def delete_instance( ) -> None: r"""Delete an instance from a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + await client.delete_instance(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]): The request object. Request message for @@ -940,6 +1109,37 @@ async def create_cluster( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]): The request object. Request message for @@ -1060,6 +1260,32 @@ async def get_cluster( ) -> instance.Cluster: r"""Gets information about a cluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]): The request object. Request message for @@ -1150,6 +1376,32 @@ async def list_clusters( ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_clusters(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]): The request object. Request message for @@ -1241,6 +1493,35 @@ async def update_cluster( cluster_config.cluster_autoscaling_config. In order to update it, you must use PartialUpdateCluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): The request object. A resizable group of nodes in a particular cloud @@ -1332,6 +1613,35 @@ async def partial_update_cluster( cluster_config.cluster_autoscaling_config, and explicitly set a serve_node count via the update_mask. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]): The request object. Request message for @@ -1442,6 +1752,29 @@ async def delete_cluster( ) -> None: r"""Deletes a cluster from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + await client.delete_cluster(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]): The request object. Request message for @@ -1523,6 +1856,37 @@ async def create_app_profile( ) -> instance.AppProfile: r"""Creates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = await client.create_app_profile(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]): The request object. Request message for @@ -1632,6 +1996,32 @@ async def get_app_profile( ) -> instance.AppProfile: r"""Gets information about an app profile. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = await client.get_app_profile(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]): The request object. Request message for @@ -1721,6 +2111,33 @@ async def list_app_profiles( ) -> pagers.ListAppProfilesAsyncPager: r"""Lists information about app profiles in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]): The request object. Request message for @@ -1827,6 +2244,39 @@ async def update_app_profile( ) -> operation_async.AsyncOperation: r"""Updates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]): The request object. Request message for @@ -1937,6 +2387,30 @@ async def delete_app_profile( ) -> None: r"""Deletes an app profile from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + await client.delete_app_profile(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]): The request object. Request message for @@ -2025,6 +2499,33 @@ async def get_iam_policy( resource. Returns an empty policy if an instance exists but does not have a policy set. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): The request object. Request message for ``GetIamPolicy`` method. @@ -2137,6 +2638,33 @@ async def set_iam_policy( r"""Sets the access control policy on an instance resource. Replaces any existing policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): The request object. Request message for ``SetIamPolicy`` method. @@ -2250,6 +2778,34 @@ async def test_iam_permissions( r"""Returns permissions that the caller has on the specified instance resource. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): The request object. Request message for ``TestIamPermissions`` method. @@ -2345,6 +2901,33 @@ async def list_hot_tablets( r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]): The request object. Request message for @@ -2449,6 +3032,41 @@ async def create_logical_view( ) -> operation_async.AsyncOperation: r"""Creates a logical view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]]): The request object. Request message for @@ -2567,6 +3185,32 @@ async def get_logical_view( ) -> instance.LogicalView: r"""Gets information about a logical view. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_logical_view(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]]): The request object. Request message for @@ -2655,6 +3299,33 @@ async def list_logical_views( ) -> pagers.ListLogicalViewsAsyncPager: r"""Lists information about logical views in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]]): The request object. Request message for @@ -2758,6 +3429,39 @@ async def update_logical_view( ) -> operation_async.AsyncOperation: r"""Updates a logical view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]]): The request object. Request message for @@ -2871,6 +3575,29 @@ async def delete_logical_view( ) -> None: r"""Deletes a logical view from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_logical_view(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]]): The request object. Request message for @@ -2952,6 +3679,41 @@ async def create_materialized_view( ) -> operation_async.AsyncOperation: r"""Creates a materialized view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]]): The request object. Request message for @@ -3074,6 +3836,32 @@ async def get_materialized_view( ) -> instance.MaterializedView: r"""Gets information about a materialized view. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_materialized_view(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]]): The request object. Request message for @@ -3163,6 +3951,33 @@ async def list_materialized_views( r"""Lists information about materialized views in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]]): The request object. Request message for @@ -3268,6 +4083,39 @@ async def update_materialized_view( ) -> operation_async.AsyncOperation: r"""Updates a materialized view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]]): The request object. Request message for @@ -3383,6 +4231,29 @@ async def delete_materialized_view( ) -> None: r"""Deletes a materialized view from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_materialized_view(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]]): The request object. Request message for diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index accaa1e036ca..84df01058326 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -906,6 +906,41 @@ def create_instance( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): The request object. Request message for @@ -1035,6 +1070,32 @@ def get_instance( ) -> instance.Instance: r"""Gets information about an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = client.get_instance(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): The request object. Request message for @@ -1123,6 +1184,32 @@ def list_instances( ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_instances(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): The request object. Request message for @@ -1208,6 +1295,32 @@ def update_instance( To update other Instance properties, such as labels, use PartialUpdateInstance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = client.update_instance(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): The request object. A collection of Bigtable @@ -1279,6 +1392,39 @@ def partial_update_instance( method can modify all fields of an Instance and is the preferred way to update an Instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): The request object. Request message for @@ -1390,6 +1536,29 @@ def delete_instance( ) -> None: r"""Delete an instance from a project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + client.delete_instance(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): The request object. Request message for @@ -1474,6 +1643,37 @@ def create_cluster( scaled. If cluster_config.cluster_autoscaling_config is non-empty, then autoscaling is enabled. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): The request object. Request message for @@ -1591,6 +1791,32 @@ def get_cluster( ) -> instance.Cluster: r"""Gets information about a cluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): The request object. Request message for @@ -1678,6 +1904,32 @@ def list_clusters( ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_clusters(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): The request object. Request message for @@ -1766,6 +2018,35 @@ def update_cluster( cluster_config.cluster_autoscaling_config. In order to update it, you must use PartialUpdateCluster. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): The request object. A resizable group of nodes in a particular cloud @@ -1855,6 +2136,35 @@ def partial_update_cluster( cluster_config.cluster_autoscaling_config, and explicitly set a serve_node count via the update_mask. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): The request object. Request message for @@ -1962,6 +2272,29 @@ def delete_cluster( ) -> None: r"""Deletes a cluster from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + client.delete_cluster(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): The request object. Request message for @@ -2040,6 +2373,37 @@ def create_app_profile( ) -> instance.AppProfile: r"""Creates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = client.create_app_profile(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): The request object. Request message for @@ -2146,6 +2510,32 @@ def get_app_profile( ) -> instance.AppProfile: r"""Gets information about an app profile. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = client.get_app_profile(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): The request object. Request message for @@ -2232,6 +2622,33 @@ def list_app_profiles( ) -> pagers.ListAppProfilesPager: r"""Lists information about app profiles in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): The request object. Request message for @@ -2335,6 +2752,39 @@ def update_app_profile( ) -> operation.Operation: r"""Updates an app profile within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): The request object. Request message for @@ -2442,6 +2892,30 @@ def delete_app_profile( ) -> None: r"""Deletes an app profile from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + client.delete_app_profile(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): The request object. Request message for @@ -2527,6 +3001,33 @@ def get_iam_policy( resource. Returns an empty policy if an instance exists but does not have a policy set. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): The request object. Request message for ``GetIamPolicy`` method. @@ -2640,6 +3141,33 @@ def set_iam_policy( r"""Sets the access control policy on an instance resource. Replaces any existing policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): The request object. Request message for ``SetIamPolicy`` method. @@ -2754,6 +3282,34 @@ def test_iam_permissions( r"""Returns permissions that the caller has on the specified instance resource. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): The request object. Request message for ``TestIamPermissions`` method. @@ -2850,6 +3406,33 @@ def list_hot_tablets( r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): The request object. Request message for @@ -2951,6 +3534,41 @@ def create_logical_view( ) -> operation.Operation: r"""Creates a logical view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest, dict]): The request object. Request message for @@ -3066,6 +3684,32 @@ def get_logical_view( ) -> instance.LogicalView: r"""Gets information about a logical view. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_logical_view(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest, dict]): The request object. Request message for @@ -3151,6 +3795,33 @@ def list_logical_views( ) -> pagers.ListLogicalViewsPager: r"""Lists information about logical views in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest, dict]): The request object. Request message for @@ -3251,6 +3922,39 @@ def update_logical_view( ) -> operation.Operation: r"""Updates a logical view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest, dict]): The request object. Request message for @@ -3361,6 +4065,29 @@ def delete_logical_view( ) -> None: r"""Deletes a logical view from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + client.delete_logical_view(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest, dict]): The request object. Request message for @@ -3439,6 +4166,41 @@ def create_materialized_view( ) -> operation.Operation: r"""Creates a materialized view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest, dict]): The request object. Request message for @@ -3558,6 +4320,32 @@ def get_materialized_view( ) -> instance.MaterializedView: r"""Gets information about a materialized view. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_materialized_view(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest, dict]): The request object. Request message for @@ -3644,6 +4432,33 @@ def list_materialized_views( r"""Lists information about materialized views in an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest, dict]): The request object. Request message for @@ -3746,6 +4561,39 @@ def update_materialized_view( ) -> operation.Operation: r"""Updates a materialized view within an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest, dict]): The request object. Request message for @@ -3858,6 +4706,29 @@ def delete_materialized_view( ) -> None: r"""Deletes a materialized view from an instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + client.delete_materialized_view(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest, dict]): The request object. Request message for diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index c3047b3cf33e..d79d1b088020 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -346,6 +346,33 @@ async def create_table( The table can be created with a full set of initial column families, specified in the request. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = await client.create_table(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]]): The request object. Request message for @@ -465,6 +492,38 @@ async def create_table_from_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): The request object. Request message for @@ -590,6 +649,33 @@ async def list_tables( ) -> pagers.ListTablesAsyncPager: r"""Lists all tables served from a specified instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]]): The request object. Request message for @@ -690,6 +776,32 @@ async def get_table( ) -> table.Table: r"""Gets metadata information about the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = await client.get_table(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]]): The request object. Request message for @@ -779,6 +891,35 @@ async def update_table( ) -> operation_async.AsyncOperation: r"""Updates a specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]]): The request object. The request for @@ -900,6 +1041,29 @@ async def delete_table( r"""Permanently deletes a specified table and all of its data. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + await client.delete_table(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]]): The request object. Request message for @@ -980,6 +1144,36 @@ async def undelete_table( r"""Restores a specified table which was accidentally deleted. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]]): The request object. Request message for @@ -1081,6 +1275,37 @@ async def create_authorized_view( ) -> operation_async.AsyncOperation: r"""Creates a new AuthorizedView in a table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]]): The request object. The request for @@ -1202,6 +1427,33 @@ async def list_authorized_views( ) -> pagers.ListAuthorizedViewsAsyncPager: r"""Lists all AuthorizedViews from a specific table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]]): The request object. Request message for @@ -1304,6 +1556,32 @@ async def get_authorized_view( ) -> table.AuthorizedView: r"""Gets information from a specified AuthorizedView. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_authorized_view(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]]): The request object. Request message for @@ -1397,6 +1675,35 @@ async def update_authorized_view( ) -> operation_async.AsyncOperation: r"""Updates an AuthorizedView in a table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]]): The request object. The request for @@ -1515,6 +1822,29 @@ async def delete_authorized_view( ) -> None: r"""Permanently deletes a specified AuthorizedView. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_authorized_view(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]]): The request object. Request message for @@ -1603,6 +1933,32 @@ async def modify_column_families( data requests received prior to that point may see a table where only some modifications have taken effect. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = await client.modify_column_families(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]): The request object. Request message for @@ -1707,6 +2063,30 @@ async def drop_row_range( rows in a table, or only those that match a particular prefix. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) + + # Make the request + await client.drop_row_range(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): The request object. Request message for @@ -1765,6 +2145,32 @@ async def generate_consistency_token( been replicated. The tokens will be available for 90 days. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = await client.generate_consistency_token(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]): The request object. Request message for @@ -1859,6 +2265,33 @@ async def check_consistency( the conditions specified in the token and the check request. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = await client.check_consistency(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]): The request object. Request message for @@ -1968,6 +2401,38 @@ async def snapshot_table( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): The request object. Request message for @@ -2115,6 +2580,32 @@ async def get_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = await client.get_snapshot(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): The request object. Request message for @@ -2228,6 +2719,33 @@ async def list_snapshots( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): The request object. Request message for @@ -2354,6 +2872,29 @@ async def delete_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + await client.delete_snapshot(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): The request object. Request message for @@ -2448,6 +2989,41 @@ async def create_backup( Cancelling the returned operation will stop the creation and delete the backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]): The request object. The request for @@ -2568,6 +3144,32 @@ async def get_backup( r"""Gets metadata on a pending or completed Cloud Bigtable Backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_backup(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]): The request object. The request for @@ -2652,6 +3254,35 @@ async def update_backup( ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = await client.update_backup(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]): The request object. The request for @@ -2755,6 +3386,29 @@ async def delete_backup( ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + await client.delete_backup(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]): The request object. The request for @@ -2833,6 +3487,33 @@ async def list_backups( r"""Lists Cloud Bigtable backups. Returns both completed and pending backups. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]): The request object. The request for @@ -2942,6 +3623,38 @@ async def _restore_table( The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): The request object. The request for @@ -3019,6 +3732,38 @@ async def copy_backup( destination cluster located in the destination instance and project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): The request object. The request for @@ -3160,6 +3905,33 @@ async def get_iam_policy( resource. Returns an empty policy if the resource exists but does not have a policy set. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): The request object. Request message for ``GetIamPolicy`` method. @@ -3272,6 +4044,33 @@ async def set_iam_policy( r"""Sets the access control policy on a Bigtable resource. Replaces any existing policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): The request object. Request message for ``SetIamPolicy`` method. @@ -3385,6 +4184,34 @@ async def test_iam_permissions( r"""Returns permissions that the caller has on the specified Bigtable resource. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): The request object. Request message for ``TestIamPermissions`` method. @@ -3481,6 +4308,41 @@ async def create_schema_bundle( ) -> operation_async.AsyncOperation: r"""Creates a new schema bundle in the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]]): The request object. The request for @@ -3601,6 +4463,39 @@ async def update_schema_bundle( ) -> operation_async.AsyncOperation: r"""Updates a schema bundle in the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]]): The request object. The request for @@ -3714,6 +4609,32 @@ async def get_schema_bundle( r"""Gets metadata information about the specified schema bundle. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) + + # Make the request + response = await client.get_schema_bundle(request=request) + + # Handle the response + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]]): The request object. The request for @@ -3803,6 +4724,33 @@ async def list_schema_bundles( r"""Lists all schema bundles associated with the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + async for response in page_result: + print(response) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]]): The request object. The request for @@ -3905,6 +4853,29 @@ async def delete_schema_bundle( ) -> None: r"""Deletes a schema bundle in the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + async def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) + + # Make the request + await client.delete_schema_bundle(request=request) + Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]]): The request object. The request for diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index c1f5a3e64196..d0030af9257b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -906,6 +906,33 @@ def create_table( The table can be created with a full set of initial column families, specified in the request. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = client.create_table(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): The request object. Request message for @@ -1022,6 +1049,38 @@ def create_table_from_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): The request object. Request message for @@ -1146,6 +1205,33 @@ def list_tables( ) -> pagers.ListTablesPager: r"""Lists all tables served from a specified instance. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): The request object. Request message for @@ -1243,6 +1329,32 @@ def get_table( ) -> table.Table: r"""Gets metadata information about the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = client.get_table(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): The request object. Request message for @@ -1329,6 +1441,35 @@ def update_table( ) -> operation.Operation: r"""Updates a specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): The request object. The request for @@ -1447,6 +1588,29 @@ def delete_table( r"""Permanently deletes a specified table and all of its data. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + client.delete_table(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): The request object. Request message for @@ -1524,6 +1688,36 @@ def undelete_table( r"""Restores a specified table which was accidentally deleted. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): The request object. Request message for @@ -1622,6 +1816,37 @@ def create_authorized_view( ) -> operation.Operation: r"""Creates a new AuthorizedView in a table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest, dict]): The request object. The request for @@ -1740,6 +1965,33 @@ def list_authorized_views( ) -> pagers.ListAuthorizedViewsPager: r"""Lists all AuthorizedViews from a specific table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest, dict]): The request object. Request message for @@ -1839,6 +2091,32 @@ def get_authorized_view( ) -> table.AuthorizedView: r"""Gets information from a specified AuthorizedView. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_authorized_view(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest, dict]): The request object. Request message for @@ -1929,6 +2207,35 @@ def update_authorized_view( ) -> operation.Operation: r"""Updates an AuthorizedView in a table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest, dict]): The request object. The request for @@ -2044,6 +2351,29 @@ def delete_authorized_view( ) -> None: r"""Permanently deletes a specified AuthorizedView. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + client.delete_authorized_view(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest, dict]): The request object. Request message for @@ -2129,6 +2459,32 @@ def modify_column_families( data requests received prior to that point may see a table where only some modifications have taken effect. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = client.modify_column_families(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): The request object. Request message for @@ -2230,6 +2586,30 @@ def drop_row_range( rows in a table, or only those that match a particular prefix. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) + + # Make the request + client.drop_row_range(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): The request object. Request message for @@ -2286,6 +2666,32 @@ def generate_consistency_token( been replicated. The tokens will be available for 90 days. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = client.generate_consistency_token(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): The request object. Request message for @@ -2379,6 +2785,33 @@ def check_consistency( the conditions specified in the token and the check request. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = client.check_consistency(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): The request object. Request message for @@ -2485,6 +2918,38 @@ def snapshot_table( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): The request object. Request message for @@ -2629,6 +3094,32 @@ def get_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = client.get_snapshot(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): The request object. Request message for @@ -2739,6 +3230,33 @@ def list_snapshots( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): The request object. Request message for @@ -2862,6 +3380,29 @@ def delete_snapshot( recommended for production use. It is not subject to any SLA or deprecation policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + client.delete_snapshot(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): The request object. Request message for @@ -2953,6 +3494,41 @@ def create_backup( Cancelling the returned operation will stop the creation and delete the backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): The request object. The request for @@ -3070,6 +3646,32 @@ def get_backup( r"""Gets metadata on a pending or completed Cloud Bigtable Backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = client.get_backup(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): The request object. The request for @@ -3151,6 +3753,35 @@ def update_backup( ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = client.update_backup(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): The request object. The request for @@ -3251,6 +3882,29 @@ def delete_backup( ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + client.delete_backup(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): The request object. The request for @@ -3326,6 +3980,33 @@ def list_backups( r"""Lists Cloud Bigtable backups. Returns both completed and pending backups. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): The request object. The request for @@ -3432,6 +4113,38 @@ def _restore_table( The [response][google.longrunning.Operation.response] type is [Table][google.bigtable.admin.v2.Table], if successful. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): The request object. The request for @@ -3507,6 +4220,38 @@ def copy_backup( destination cluster located in the destination instance and project. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): The request object. The request for @@ -3645,6 +4390,33 @@ def get_iam_policy( resource. Returns an empty policy if the resource exists but does not have a policy set. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): The request object. Request message for ``GetIamPolicy`` method. @@ -3758,6 +4530,33 @@ def set_iam_policy( r"""Sets the access control policy on a Bigtable resource. Replaces any existing policy. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): The request object. Request message for ``SetIamPolicy`` method. @@ -3872,6 +4671,34 @@ def test_iam_permissions( r"""Returns permissions that the caller has on the specified Bigtable resource. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + from google.iam.v1 import iam_policy_pb2 # type: ignore + + def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): The request object. Request message for ``TestIamPermissions`` method. @@ -3969,6 +4796,41 @@ def create_schema_bundle( ) -> operation.Operation: r"""Creates a new schema bundle in the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest, dict]): The request object. The request for @@ -4086,6 +4948,39 @@ def update_schema_bundle( ) -> operation.Operation: r"""Updates a schema bundle in the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest, dict]): The request object. The request for @@ -4196,6 +5091,32 @@ def get_schema_bundle( r"""Gets metadata information about the specified schema bundle. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) + + # Make the request + response = client.get_schema_bundle(request=request) + + # Handle the response + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest, dict]): The request object. The request for @@ -4282,6 +5203,33 @@ def list_schema_bundles( r"""Lists all schema bundles associated with the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + for response in page_result: + print(response) + Args: request (Union[google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest, dict]): The request object. The request for @@ -4381,6 +5329,29 @@ def delete_schema_bundle( ) -> None: r"""Deletes a schema bundle in the specified table. + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import bigtable_admin_v2 + + def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) + + # Make the request + client.delete_schema_bundle(request=request) + Args: request (Union[google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest, dict]): The request object. The request for diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 2623b770e42f..865487f0d5e1 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -89,6 +89,20 @@ class Instance(proto.Message): Output only. Reserved for future use. This field is a member of `oneof`_ ``_satisfies_pzi``. + tags (MutableMapping[str, str]): + Optional. Input only. Immutable. Tag + keys/values directly bound to this resource. For + example: + + - "123/environment": "production", + - "123/costCenter": "marketing" + + Tags and Labels (above) are both used to bind + metadata to resources, with different use-cases. + See + https://cloud.google.com/resource-manager/docs/tags/tags-overview + for an in-depth overview on the difference + between tags and labels. """ class State(proto.Enum): @@ -169,6 +183,11 @@ class Type(proto.Enum): number=11, optional=True, ) + tags: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) class AutoscalingTargets(proto.Message): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 5cb3fbaa4305..103ff141c925 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -391,13 +391,11 @@ def read_rows( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -515,13 +513,11 @@ def sample_row_keys( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -660,13 +656,11 @@ async def mutate_row( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -799,13 +793,11 @@ def mutate_rows( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -979,13 +971,11 @@ async def check_and_mutate_row( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -1242,13 +1232,11 @@ async def read_modify_write_row( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index c35ea1514f85..ffc448c25f0d 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -867,13 +867,11 @@ def read_rows( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -988,13 +986,11 @@ def sample_row_keys( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -1130,13 +1126,11 @@ def mutate_row( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -1266,13 +1260,11 @@ def mutate_rows( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -1443,13 +1435,11 @@ def check_and_mutate_row( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( @@ -1700,13 +1690,11 @@ def read_modify_write_row( header_params["app_profile_id"] = request.app_profile_id routing_param_regex = re.compile( - "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+/authorizedViews/[^/]+)$" + "^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)(?:/.*)?$" ) regex_match = routing_param_regex.match(request.authorized_view_name) - if regex_match and regex_match.group("authorized_view_name"): - header_params["authorized_view_name"] = regex_match.group( - "authorized_view_name" - ) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") if header_params: metadata = tuple(metadata) + ( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py index 7f92a15ae71b..5eae9e526897 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py @@ -119,6 +119,14 @@ class Type(proto.Message): map_type (google.cloud.bigtable_v2.types.Type.Map): Map + This field is a member of `oneof`_ ``kind``. + proto_type (google.cloud.bigtable_v2.types.Type.Proto): + Proto + + This field is a member of `oneof`_ ``kind``. + enum_type (google.cloud.bigtable_v2.types.Type.Enum): + Enum + This field is a member of `oneof`_ ``kind``. """ @@ -351,6 +359,52 @@ class Field(proto.Message): message="Type.Struct.Field", ) + class Proto(proto.Message): + r"""A protobuf message type. Values of type ``Proto`` are stored in + ``Value.bytes_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this proto + is defined in. + message_name (str): + The fully qualified name of the protobuf + message, including package. In the format of + "foo.bar.Message". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + message_name: str = proto.Field( + proto.STRING, + number=2, + ) + + class Enum(proto.Message): + r"""A protobuf enum type. Values of type ``Enum`` are stored in + ``Value.int_value``. + + Attributes: + schema_bundle_id (str): + The ID of the schema bundle that this enum is + defined in. + enum_name (str): + The fully qualified name of the protobuf enum + message, including package. In the format of + "foo.bar.EnumMessage". + """ + + schema_bundle_id: str = proto.Field( + proto.STRING, + number=1, + ) + enum_name: str = proto.Field( + proto.STRING, + number=2, + ) + class Array(proto.Message): r"""An ordered list of elements of a given type. Values of type ``Array`` are stored in ``Value.array_value``. @@ -574,6 +628,18 @@ class HyperLogLogPlusPlusUniqueCount(proto.Message): oneof="kind", message=Map, ) + proto_type: Proto = proto.Field( + proto.MESSAGE, + number=13, + oneof="kind", + message=Proto, + ) + enum_type: Enum = proto.Field( + proto.MESSAGE, + number=14, + oneof="kind", + message=Enum, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 04d871e95a13..9562b61423a3 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -127,22 +127,17 @@ def get_staging_dirs( # fix tests s.replace( "tests/unit/gapic/bigtable_v2/test_bigtable.py", - 'expected_headers = {"name": "projects/sample1/instances/sample2"}', - 'expected_headers = {"name": "projects/sample1/instances/sample2", "app_profile_id": ""}' + 'assert \(\n\s*gapic_v1\.routing_header\.to_grpc_metadata\(expected_headers\) in kw\["metadata"\]\n.*', + """ + # assert the expected headers are present, in any order + routing_string = next(iter([m[1] for m in kw["metadata"] if m[0] == 'x-goog-request-params'])) + assert all([f"{k}={v}" in routing_string for k,v in expected_headers.items()]) + """ ) s.replace( "tests/unit/gapic/bigtable_v2/test_bigtable.py", - """ - expected_headers = { - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - """, - """ - expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" - } - """ + 'expected_headers = {"name": "projects/sample1/instances/sample2"}', + 'expected_headers = {"name": "projects/sample1/instances/sample2", "app_profile_id": ""}' ) s.replace( "tests/unit/gapic/bigtable_v2/test_bigtable.py", diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py new file mode 100644 index 000000000000..82dafab44fb2 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = await client.create_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py new file mode 100644 index 000000000000..82ff382b76a8 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.CreateAppProfileRequest( + parent="parent_value", + app_profile_id="app_profile_id_value", + app_profile=app_profile, + ) + + # Make the request + response = client.create_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py new file mode 100644 index 000000000000..fb9fac60f732 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py new file mode 100644 index 000000000000..d8d5f99582fd --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateClusterRequest( + parent="parent_value", + cluster_id="cluster_id_value", + ) + + # Make the request + operation = client.create_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py new file mode 100644 index 000000000000..dbde6c4bc007 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py new file mode 100644 index 000000000000..83ec90e53d31 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.CreateInstanceRequest( + parent="parent_value", + instance_id="instance_id_value", + instance=instance, + ) + + # Make the request + operation = client.create_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py new file mode 100644 index 000000000000..6dfb1d6124f9 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py new file mode 100644 index 000000000000..f0214acbf298 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.CreateLogicalViewRequest( + parent="parent_value", + logical_view_id="logical_view_id_value", + logical_view=logical_view, + ) + + # Make the request + operation = client.create_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py new file mode 100644 index 000000000000..30481d2f3186 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py new file mode 100644 index 000000000000..45116fb49f2e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.CreateMaterializedViewRequest( + parent="parent_value", + materialized_view_id="materialized_view_id_value", + materialized_view=materialized_view, + ) + + # Make the request + operation = client.create_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py new file mode 100644 index 000000000000..76d272519a64 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + await client.delete_app_profile(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py new file mode 100644 index 000000000000..47f552fb885f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAppProfileRequest( + name="name_value", + ignore_warnings=True, + ) + + # Make the request + client.delete_app_profile(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py new file mode 100644 index 000000000000..6f97b6a5e997 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + await client.delete_cluster(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py new file mode 100644 index 000000000000..d058a08e6aeb --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteClusterRequest( + name="name_value", + ) + + # Make the request + client.delete_cluster(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py new file mode 100644 index 000000000000..ecf5583bed44 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + await client.delete_instance(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py new file mode 100644 index 000000000000..e8f568486c47 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteInstanceRequest( + name="name_value", + ) + + # Make the request + client.delete_instance(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py new file mode 100644 index 000000000000..93f9d8ce8d64 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_logical_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py new file mode 100644 index 000000000000..fdece2bbc197 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteLogicalViewRequest( + name="name_value", + ) + + # Make the request + client.delete_logical_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py new file mode 100644 index 000000000000..22a9f0ad402b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_materialized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py new file mode 100644 index 000000000000..b6cf3a453c34 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteMaterializedViewRequest( + name="name_value", + ) + + # Make the request + client.delete_materialized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py new file mode 100644 index 000000000000..3a59ca599370 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = await client.get_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py new file mode 100644 index 000000000000..2e54bfcad292 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAppProfileRequest( + name="name_value", + ) + + # Make the request + response = client.get_app_profile(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py new file mode 100644 index 000000000000..b4d89a11d819 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = await client.get_cluster(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py new file mode 100644 index 000000000000..25a80a8718ac --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetClusterRequest( + name="name_value", + ) + + # Make the request + response = client.get_cluster(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py new file mode 100644 index 000000000000..b2e479c11d4a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py new file mode 100644 index 000000000000..ffb2a81b02fa --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py new file mode 100644 index 000000000000..b76fac83a53f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py new file mode 100644 index 000000000000..711ed99a5ced --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetInstanceRequest( + name="name_value", + ) + + # Make the request + response = client.get_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py new file mode 100644 index 000000000000..4ce25cdda60a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_logical_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py new file mode 100644 index 000000000000..daaf7fa63580 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetLogicalViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_logical_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py new file mode 100644 index 000000000000..165fb262c062 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_materialized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py new file mode 100644 index 000000000000..1f94e3954879 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetMaterializedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_materialized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py new file mode 100644 index 000000000000..d377fc6784e7 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAppProfiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py new file mode 100644 index 000000000000..07f49ba3977f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAppProfiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_app_profiles(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAppProfilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_app_profiles(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py new file mode 100644 index 000000000000..71532d98aa3c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_clusters(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py new file mode 100644 index 000000000000..1c36c098d536 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListClusters +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_clusters(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListClustersRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_clusters(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py new file mode 100644 index 000000000000..cb6d58847903 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHotTablets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py new file mode 100644 index 000000000000..5add7715d2f4 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListHotTablets +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_hot_tablets(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListHotTabletsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_hot_tablets(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py new file mode 100644 index 000000000000..91c9a823024d --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListInstances +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = await client.list_instances(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py new file mode 100644 index 000000000000..bbe708c0e3c0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListInstances +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_instances(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListInstancesRequest( + parent="parent_value", + ) + + # Make the request + response = client.list_instances(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py new file mode 100644 index 000000000000..8de9bd06e9d0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListLogicalViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py new file mode 100644 index 000000000000..b5fb602cd656 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListLogicalViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_logical_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListLogicalViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_logical_views(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py new file mode 100644 index 000000000000..6fa672a2525a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMaterializedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py new file mode 100644 index 000000000000..5a25da88ac06 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListMaterializedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_materialized_views(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListMaterializedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_materialized_views(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py new file mode 100644 index 000000000000..dab73b9cb659 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py new file mode 100644 index 000000000000..bab63c6ed1bd --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_partial_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.PartialUpdateClusterRequest( + ) + + # Make the request + operation = client.partial_update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py new file mode 100644 index 000000000000..4c5e53ebe929 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py new file mode 100644 index 000000000000..0d2a74cfcc30 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for PartialUpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_partial_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + instance = bigtable_admin_v2.Instance() + instance.display_name = "display_name_value" + + request = bigtable_admin_v2.PartialUpdateInstanceRequest( + instance=instance, + ) + + # Make the request + operation = client.partial_update_instance(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py new file mode 100644 index 000000000000..b389b76791b9 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py new file mode 100644 index 000000000000..97bc29d65589 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py new file mode 100644 index 000000000000..977f79d9b148 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py new file mode 100644 index 000000000000..db047d3670f4 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py new file mode 100644 index 000000000000..2c55a45bd474 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py new file mode 100644 index 000000000000..a7b683426695 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAppProfile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_app_profile(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + app_profile = bigtable_admin_v2.AppProfile() + app_profile.priority = "PRIORITY_HIGH" + + request = bigtable_admin_v2.UpdateAppProfileRequest( + app_profile=app_profile, + ) + + # Make the request + operation = client.update_app_profile(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py new file mode 100644 index 000000000000..af3abde41cb6 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py new file mode 100644 index 000000000000..ec02a64aff7c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateCluster +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_cluster(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Cluster( + ) + + # Make the request + operation = client.update_cluster(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py new file mode 100644 index 000000000000..798afaf801a5 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = await client.update_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py new file mode 100644 index 000000000000..fb6e5e2d3cc0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateInstance +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_instance(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.Instance( + display_name="display_name_value", + ) + + # Make the request + response = client.update_instance(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py new file mode 100644 index 000000000000..9bdd620e6d1f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py new file mode 100644 index 000000000000..10d962205fed --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateLogicalView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_logical_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + logical_view = bigtable_admin_v2.LogicalView() + logical_view.query = "query_value" + + request = bigtable_admin_v2.UpdateLogicalViewRequest( + logical_view=logical_view, + ) + + # Make the request + operation = client.update_logical_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py new file mode 100644 index 000000000000..ddd93047521d --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminAsyncClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py new file mode 100644 index 000000000000..a2ef78bd3dad --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateMaterializedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_materialized_view(): + # Create a client + client = bigtable_admin_v2.BigtableInstanceAdminClient() + + # Initialize request argument(s) + materialized_view = bigtable_admin_v2.MaterializedView() + materialized_view.query = "query_value" + + request = bigtable_admin_v2.UpdateMaterializedViewRequest( + materialized_view=materialized_view, + ) + + # Make the request + operation = client.update_materialized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py new file mode 100644 index 000000000000..4cd57edc8245 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckConsistency +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = await client.check_consistency(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py new file mode 100644 index 000000000000..ec6085b3f596 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CheckConsistency +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_check_consistency(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CheckConsistencyRequest( + name="name_value", + consistency_token="consistency_token_value", + ) + + # Make the request + response = client.check_consistency(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py new file mode 100644 index 000000000000..9355b7d44196 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CopyBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py new file mode 100644 index 000000000000..25456ad2176f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CopyBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_copy_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CopyBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + ) + + # Make the request + operation = client.copy_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py new file mode 100644 index 000000000000..135bbe220738 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py new file mode 100644 index 000000000000..cafbf56cb3f9 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateAuthorizedViewRequest( + parent="parent_value", + authorized_view_id="authorized_view_id_value", + ) + + # Make the request + operation = client.create_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py new file mode 100644 index 000000000000..d9bd402b47c3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py new file mode 100644 index 000000000000..835f0573c95e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.CreateBackupRequest( + parent="parent_value", + backup_id="backup_id_value", + backup=backup, + ) + + # Make the request + operation = client.create_backup(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py new file mode 100644 index 000000000000..8e4992635e97 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py new file mode 100644 index 000000000000..a5911497d4aa --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.CreateSchemaBundleRequest( + parent="parent_value", + schema_bundle_id="schema_bundle_id_value", + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.create_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py new file mode 100644 index 000000000000..3096539b98f3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = await client.create_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py new file mode 100644 index 000000000000..f7767438ef71 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTableFromSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py new file mode 100644 index 000000000000..ff1dd7899c38 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTableFromSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_table_from_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableFromSnapshotRequest( + parent="parent_value", + table_id="table_id_value", + source_snapshot="source_snapshot_value", + ) + + # Make the request + operation = client.create_table_from_snapshot(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py new file mode 100644 index 000000000000..552a1095f3ee --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_create_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.CreateTableRequest( + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + response = client.create_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py new file mode 100644 index 000000000000..cbee06ae1baa --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + await client.delete_authorized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py new file mode 100644 index 000000000000..298e66efb467 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + client.delete_authorized_view(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py new file mode 100644 index 000000000000..d2615f7926c0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + await client.delete_backup(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py new file mode 100644 index 000000000000..c9888bf39c42 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteBackupRequest( + name="name_value", + ) + + # Make the request + client.delete_backup(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py new file mode 100644 index 000000000000..7377299d137b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) + + # Make the request + await client.delete_schema_bundle(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py new file mode 100644 index 000000000000..5dc12b46464b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSchemaBundleRequest( + name="name_value", + ) + + # Make the request + client.delete_schema_bundle(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py new file mode 100644 index 000000000000..eb8ca8166279 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + await client.delete_snapshot(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py new file mode 100644 index 000000000000..ad979615df01 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteSnapshotRequest( + name="name_value", + ) + + # Make the request + client.delete_snapshot(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py new file mode 100644 index 000000000000..375e615574bc --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + await client.delete_table(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py new file mode 100644 index 000000000000..17397bfabf22 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_delete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DeleteTableRequest( + name="name_value", + ) + + # Make the request + client.delete_table(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py new file mode 100644 index 000000000000..391205c7c1d8 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DropRowRange +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) + + # Make the request + await client.drop_row_range(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py new file mode 100644 index 000000000000..bcd528f1ae77 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DropRowRange +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_drop_row_range(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.DropRowRangeRequest( + row_key_prefix=b'row_key_prefix_blob', + name="name_value", + ) + + # Make the request + client.drop_row_range(request=request) + + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py new file mode 100644 index 000000000000..1953441b6d61 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateConsistencyToken +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = await client.generate_consistency_token(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py new file mode 100644 index 000000000000..4ae52264d270 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GenerateConsistencyToken +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_generate_consistency_token(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GenerateConsistencyTokenRequest( + name="name_value", + ) + + # Make the request + response = client.generate_consistency_token(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py new file mode 100644 index 000000000000..129948bc5dd3 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = await client.get_authorized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py new file mode 100644 index 000000000000..9cc63538c03a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetAuthorizedViewRequest( + name="name_value", + ) + + # Make the request + response = client.get_authorized_view(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py new file mode 100644 index 000000000000..524d63e8638b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py new file mode 100644 index 000000000000..5ed91b80c4db --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetBackupRequest( + name="name_value", + ) + + # Make the request + response = client.get_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py new file mode 100644 index 000000000000..a599239d587e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py new file mode 100644 index 000000000000..2d6e71c27394 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_get_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.GetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.get_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py new file mode 100644 index 000000000000..b5e580276409 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) + + # Make the request + response = await client.get_schema_bundle(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py new file mode 100644 index 000000000000..1ea7b69b70e9 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSchemaBundleRequest( + name="name_value", + ) + + # Make the request + response = client.get_schema_bundle(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py new file mode 100644 index 000000000000..ae48060bb882 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = await client.get_snapshot(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py new file mode 100644 index 000000000000..8626549fda5c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetSnapshot +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_snapshot(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetSnapshotRequest( + name="name_value", + ) + + # Make the request + response = client.get_snapshot(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py new file mode 100644 index 000000000000..ff8dff1ae962 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = await client.get_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py new file mode 100644 index 000000000000..ccb68b7664d2 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_get_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.GetTableRequest( + name="name_value", + ) + + # Make the request + response = client.get_table(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py new file mode 100644 index 000000000000..658b8f96a136 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAuthorizedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py new file mode 100644 index 000000000000..a7bf4b6adaa5 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListAuthorizedViews +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_authorized_views(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListAuthorizedViewsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_authorized_views(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py new file mode 100644 index 000000000000..368c376f015b --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBackups +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py new file mode 100644 index 000000000000..ca0e3e0f2329 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListBackups +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_backups(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListBackupsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_backups(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py new file mode 100644 index 000000000000..3daf30e6dd39 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSchemaBundles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py new file mode 100644 index 000000000000..945d606bbf97 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSchemaBundles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_schema_bundles(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSchemaBundlesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_schema_bundles(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py new file mode 100644 index 000000000000..91acb1d9e49e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSnapshots +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py new file mode 100644 index 000000000000..7f809156fa44 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListSnapshots +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_snapshots(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListSnapshotsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_snapshots(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py new file mode 100644 index 000000000000..191de0fc738f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTables +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py new file mode 100644 index 000000000000..5d0f3a2781e4 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTables +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_list_tables(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ListTablesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tables(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py new file mode 100644 index 000000000000..2c206eb44706 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModifyColumnFamilies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = await client.modify_column_families(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py new file mode 100644 index 000000000000..6224f5c5e62c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModifyColumnFamilies +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_modify_column_families(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.ModifyColumnFamiliesRequest( + name="name_value", + ) + + # Make the request + response = client.modify_column_families(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py new file mode 100644 index 000000000000..f70b5da17f49 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RestoreTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py new file mode 100644 index 000000000000..45621c22b068 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RestoreTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_restore_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.RestoreTableRequest( + backup="backup_value", + parent="parent_value", + table_id="table_id_value", + ) + + # Make the request + operation = client._restore_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py new file mode 100644 index 000000000000..cbfafdc7728c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = await client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py new file mode 100644 index 000000000000..9a6c5fcc23da --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SetIamPolicy +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_set_iam_policy(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.SetIamPolicyRequest( + resource="resource_value", + ) + + # Make the request + response = client.set_iam_policy(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py new file mode 100644 index 000000000000..6ff619e85bc8 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SnapshotTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py new file mode 100644 index 000000000000..f983f78240c0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for SnapshotTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_snapshot_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.SnapshotTableRequest( + name="name_value", + cluster="cluster_value", + snapshot_id="snapshot_id_value", + ) + + # Make the request + operation = client.snapshot_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py new file mode 100644 index 000000000000..ee5fe6027719 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +async def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = await client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py new file mode 100644 index 000000000000..46f0870b04e0 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for TestIamPermissions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 +from google.iam.v1 import iam_policy_pb2 # type: ignore + + +def sample_test_iam_permissions(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = iam_policy_pb2.TestIamPermissionsRequest( + resource="resource_value", + permissions=['permissions_value1', 'permissions_value2'], + ) + + # Make the request + response = client.test_iam_permissions(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py new file mode 100644 index 000000000000..1e2f6aa5afbf --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py new file mode 100644 index 000000000000..637afee8b270 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UndeleteTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_undelete_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UndeleteTableRequest( + name="name_value", + ) + + # Make the request + operation = client.undelete_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py new file mode 100644 index 000000000000..541427d4894f --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py new file mode 100644 index 000000000000..9c8198d9aceb --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateAuthorizedView +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_authorized_view(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateAuthorizedViewRequest( + ) + + # Make the request + operation = client.update_authorized_view(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py new file mode 100644 index 000000000000..f98e1e33a771 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = await client.update_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py new file mode 100644 index 000000000000..466a3decb94e --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateBackup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_backup(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + backup = bigtable_admin_v2.Backup() + backup.source_table = "source_table_value" + + request = bigtable_admin_v2.UpdateBackupRequest( + backup=backup, + ) + + # Make the request + response = client.update_backup(request=request) + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py new file mode 100644 index 000000000000..96447088e398 --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py new file mode 100644 index 000000000000..07568306045c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateSchemaBundle +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_schema_bundle(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + schema_bundle = bigtable_admin_v2.SchemaBundle() + schema_bundle.proto_schema.proto_descriptors = b'proto_descriptors_blob' + + request = bigtable_admin_v2.UpdateSchemaBundleRequest( + schema_bundle=schema_bundle, + ) + + # Make the request + operation = client.update_schema_bundle(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py new file mode 100644 index 000000000000..93839d36f5ce --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +async def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminAsyncClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py new file mode 100644 index 000000000000..fea09f6a890c --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateTable +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-bigtable-admin + + +# [START bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import bigtable_admin_v2 + + +def sample_update_table(): + # Create a client + client = bigtable_admin_v2.BigtableTableAdminClient() + + # Initialize request argument(s) + request = bigtable_admin_v2.UpdateTableRequest( + ) + + # Make the request + operation = client.update_table(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync] diff --git a/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json b/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json new file mode 100644 index 000000000000..3d73099e881a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json @@ -0,0 +1,10871 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.bigtable.admin.v2", + "version": "v2" + } + ], + "language": "PYTHON", + "name": "google-cloud-bigtable-admin", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "app_profile_id", + "type": "str" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "create_app_profile" + }, + "description": "Sample for CreateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "app_profile_id", + "type": "str" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "create_app_profile" + }, + "description": "Sample for CreateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateAppProfile_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateClusterRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "cluster_id", + "type": "str" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_cluster" + }, + "description": "Sample for CreateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateCluster_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "instance_id", + "type": "str" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "clusters", + "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_instance" + }, + "description": "Sample for CreateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateInstanceRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "instance_id", + "type": "str" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "clusters", + "type": "MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_instance" + }, + "description": "Sample for CreateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateInstance_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "logical_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_logical_view" + }, + "description": "Sample for CreateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateLogicalViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "logical_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_logical_view" + }, + "description": "Sample for CreateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateLogicalView_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.create_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "materialized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_materialized_view" + }, + "description": "Sample for CreateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.create_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.CreateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "CreateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateMaterializedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "materialized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_materialized_view" + }, + "description": "Sample for CreateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_CreateMaterializedView_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_create_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "ignore_warnings", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_app_profile" + }, + "description": "Sample for DeleteAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "ignore_warnings", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_app_profile" + }, + "description": "Sample for DeleteAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteAppProfile_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_cluster" + }, + "description": "Sample for DeleteCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteCluster_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_instance" + }, + "description": "Sample for DeleteInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_instance" + }, + "description": "Sample for DeleteInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteInstance_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_logical_view" + }, + "description": "Sample for DeleteLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_logical_view" + }, + "description": "Sample for DeleteLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteLogicalView_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.delete_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_materialized_view" + }, + "description": "Sample for DeleteMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.delete_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.DeleteMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "DeleteMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_materialized_view" + }, + "description": "Sample for DeleteMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_DeleteMaterializedView_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_delete_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "get_app_profile" + }, + "description": "Sample for GetAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAppProfileRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AppProfile", + "shortName": "get_app_profile" + }, + "description": "Sample for GetAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetAppProfile_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetClusterRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Cluster", + "shortName": "get_cluster" + }, + "description": "Sample for GetCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetCluster_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "get_instance" + }, + "description": "Sample for GetInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetInstanceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "get_instance" + }, + "description": "Sample for GetInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetInstance_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView", + "shortName": "get_logical_view" + }, + "description": "Sample for GetLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetLogicalViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.LogicalView", + "shortName": "get_logical_view" + }, + "description": "Sample for GetLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetLogicalView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.get_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView", + "shortName": "get_materialized_view" + }, + "description": "Sample for GetMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.get_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.GetMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "GetMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetMaterializedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.MaterializedView", + "shortName": "get_materialized_view" + }, + "description": "Sample for GetMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_GetMaterializedView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_get_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_app_profiles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListAppProfiles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager", + "shortName": "list_app_profiles" + }, + "description": "Sample for ListAppProfiles", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_app_profiles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListAppProfiles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListAppProfiles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager", + "shortName": "list_app_profiles" + }, + "description": "Sample for ListAppProfiles", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListAppProfiles_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_app_profiles_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_clusters", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_clusters", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListClusters", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListClusters" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListClustersRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListClustersResponse", + "shortName": "list_clusters" + }, + "description": "Sample for ListClusters", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListClusters_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_clusters_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_hot_tablets", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListHotTablets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager", + "shortName": "list_hot_tablets" + }, + "description": "Sample for ListHotTablets", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_hot_tablets", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListHotTablets", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListHotTablets" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager", + "shortName": "list_hot_tablets" + }, + "description": "Sample for ListHotTablets", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListHotTablets_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_hot_tablets_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_instances", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListInstances" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse", + "shortName": "list_instances" + }, + "description": "Sample for ListInstances", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_instances", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListInstances", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListInstances" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListInstancesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.ListInstancesResponse", + "shortName": "list_instances" + }, + "description": "Sample for ListInstances", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListInstances_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_instances_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_logical_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListLogicalViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsAsyncPager", + "shortName": "list_logical_views" + }, + "description": "Sample for ListLogicalViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_logical_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListLogicalViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListLogicalViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListLogicalViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListLogicalViewsPager", + "shortName": "list_logical_views" + }, + "description": "Sample for ListLogicalViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListLogicalViews_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_logical_views_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.list_materialized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListMaterializedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsAsyncPager", + "shortName": "list_materialized_views" + }, + "description": "Sample for ListMaterializedViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.list_materialized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.ListMaterializedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "ListMaterializedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListMaterializedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListMaterializedViewsPager", + "shortName": "list_materialized_views" + }, + "description": "Sample for ListMaterializedViews", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_ListMaterializedViews_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_list_materialized_views_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "partial_update_cluster" + }, + "description": "Sample for PartialUpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest" + }, + { + "name": "cluster", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "partial_update_cluster" + }, + "description": "Sample for PartialUpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateCluster_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.partial_update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "partial_update_instance" + }, + "description": "Sample for PartialUpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.partial_update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.PartialUpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "PartialUpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest" + }, + { + "name": "instance", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "partial_update_instance" + }, + "description": "Sample for PartialUpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_PartialUpdateInstance_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_partial_update_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_SetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_set_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_TestIamPermissions_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_test_iam_permissions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_app_profile" + }, + "description": "Sample for UpdateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_app_profile", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateAppProfile", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateAppProfile" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest" + }, + { + "name": "app_profile", + "type": "google.cloud.bigtable_admin_v2.types.AppProfile" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_app_profile" + }, + "description": "Sample for UpdateAppProfile", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateAppProfile_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_app_profile_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_cluster", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateCluster", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateCluster" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Cluster" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_cluster" + }, + "description": "Sample for UpdateCluster", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateCluster_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_cluster_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "update_instance" + }, + "description": "Sample for UpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_instance", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateInstance", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateInstance" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.Instance" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Instance", + "shortName": "update_instance" + }, + "description": "Sample for UpdateInstance", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateInstance_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_instance_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_logical_view" + }, + "description": "Sample for UpdateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_logical_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateLogicalView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateLogicalView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateLogicalViewRequest" + }, + { + "name": "logical_view", + "type": "google.cloud.bigtable_admin_v2.types.LogicalView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_logical_view" + }, + "description": "Sample for UpdateLogicalView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateLogicalView_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_logical_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient", + "shortName": "BigtableInstanceAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminAsyncClient.update_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_materialized_view" + }, + "description": "Sample for UpdateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient", + "shortName": "BigtableInstanceAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BigtableInstanceAdminClient.update_materialized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin.UpdateMaterializedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "shortName": "BigtableInstanceAdmin" + }, + "shortName": "UpdateMaterializedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateMaterializedViewRequest" + }, + { + "name": "materialized_view", + "type": "google.cloud.bigtable_admin_v2.types.MaterializedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_materialized_view" + }, + "description": "Sample for UpdateMaterializedView", + "file": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableInstanceAdmin_UpdateMaterializedView_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_instance_admin_update_materialized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.check_consistency", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CheckConsistency" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "consistency_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse", + "shortName": "check_consistency" + }, + "description": "Sample for CheckConsistency", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.check_consistency", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CheckConsistency" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "consistency_token", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse", + "shortName": "check_consistency" + }, + "description": "Sample for CheckConsistency", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CheckConsistency_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_check_consistency_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.copy_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CopyBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "source_backup", + "type": "str" + }, + { + "name": "expire_time", + "type": "google.protobuf.timestamp_pb2.Timestamp" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "copy_backup" + }, + "description": "Sample for CopyBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.copy_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CopyBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CopyBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "source_backup", + "type": "str" + }, + { + "name": "expire_time", + "type": "google.protobuf.timestamp_pb2.Timestamp" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "copy_backup" + }, + "description": "Sample for CopyBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CopyBackup_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_copy_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "authorized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_authorized_view" + }, + "description": "Sample for CreateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateAuthorizedViewRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "authorized_view_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_authorized_view" + }, + "description": "Sample for CreateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateAuthorizedView_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_backup" + }, + "description": "Sample for CreateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateBackupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "backup_id", + "type": "str" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_backup" + }, + "description": "Sample for CreateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateBackup_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "schema_bundle_id", + "type": "str" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_schema_bundle" + }, + "description": "Sample for CreateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateSchemaBundleRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "schema_bundle_id", + "type": "str" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_schema_bundle" + }, + "description": "Sample for CreateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateSchemaBundle_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table_from_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTableFromSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "source_snapshot", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_table_from_snapshot" + }, + "description": "Sample for CreateTableFromSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table_from_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTableFromSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "source_snapshot", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_table_from_snapshot" + }, + "description": "Sample for CreateTableFromSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTableFromSnapshot_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_from_snapshot_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.create_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "create_table" + }, + "description": "Sample for CreateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.create_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.CreateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "CreateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.CreateTableRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "table_id", + "type": "str" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "create_table" + }, + "description": "Sample for CreateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_CreateTable_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_create_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_authorized_view" + }, + "description": "Sample for DeleteAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_authorized_view" + }, + "description": "Sample for DeleteAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteAuthorizedView_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_backup" + }, + "description": "Sample for DeleteBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_backup" + }, + "description": "Sample for DeleteBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteBackup_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_schema_bundle" + }, + "description": "Sample for DeleteSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_schema_bundle" + }, + "description": "Sample for DeleteSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSchemaBundle_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_snapshot" + }, + "description": "Sample for DeleteSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_snapshot" + }, + "description": "Sample for DeleteSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteSnapshot_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_snapshot_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.delete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_table" + }, + "description": "Sample for DeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_async", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.delete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "delete_table" + }, + "description": "Sample for DeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DeleteTable_sync", + "segments": [ + { + "end": 49, + "start": 27, + "type": "FULL" + }, + { + "end": 49, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_delete_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.drop_row_range", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DropRowRange" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "drop_row_range" + }, + "description": "Sample for DropRowRange", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.drop_row_range", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "DropRowRange" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.DropRowRangeRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "shortName": "drop_row_range" + }, + "description": "Sample for DropRowRange", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_DropRowRange_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_drop_row_range_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.generate_consistency_token", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GenerateConsistencyToken" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse", + "shortName": "generate_consistency_token" + }, + "description": "Sample for GenerateConsistencyToken", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.generate_consistency_token", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GenerateConsistencyToken" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse", + "shortName": "generate_consistency_token" + }, + "description": "Sample for GenerateConsistencyToken", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GenerateConsistencyToken_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_generate_consistency_token_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView", + "shortName": "get_authorized_view" + }, + "description": "Sample for GetAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetAuthorizedViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.AuthorizedView", + "shortName": "get_authorized_view" + }, + "description": "Sample for GetAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetAuthorizedView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "get_backup" + }, + "description": "Sample for GetBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetBackupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "get_backup" + }, + "description": "Sample for GetBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetBackup_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.GetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "get_iam_policy" + }, + "description": "Sample for GetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle", + "shortName": "get_schema_bundle" + }, + "description": "Sample for GetSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSchemaBundleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.SchemaBundle", + "shortName": "get_schema_bundle" + }, + "description": "Sample for GetSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSchemaBundle_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot", + "shortName": "get_snapshot" + }, + "description": "Sample for GetSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_snapshot", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetSnapshot" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetSnapshotRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Snapshot", + "shortName": "get_snapshot" + }, + "description": "Sample for GetSnapshot", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetSnapshot_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_snapshot_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.get_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "get_table" + }, + "description": "Sample for GetTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.get_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.GetTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "GetTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.GetTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "get_table" + }, + "description": "Sample for GetTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_GetTable_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_get_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_authorized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListAuthorizedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager", + "shortName": "list_authorized_views" + }, + "description": "Sample for ListAuthorizedViews", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_authorized_views", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListAuthorizedViews", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListAuthorizedViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListAuthorizedViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager", + "shortName": "list_authorized_views" + }, + "description": "Sample for ListAuthorizedViews", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListAuthorizedViews_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_authorized_views_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_backups", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListBackups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager", + "shortName": "list_backups" + }, + "description": "Sample for ListBackups", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_backups", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListBackups", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListBackups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListBackupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager", + "shortName": "list_backups" + }, + "description": "Sample for ListBackups", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListBackups_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_backups_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_schema_bundles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSchemaBundles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesAsyncPager", + "shortName": "list_schema_bundles" + }, + "description": "Sample for ListSchemaBundles", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_schema_bundles", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSchemaBundles", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSchemaBundles" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSchemaBundlesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSchemaBundlesPager", + "shortName": "list_schema_bundles" + }, + "description": "Sample for ListSchemaBundles", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSchemaBundles_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_schema_bundles_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_snapshots", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSnapshots" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager", + "shortName": "list_snapshots" + }, + "description": "Sample for ListSnapshots", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_snapshots", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListSnapshots" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager", + "shortName": "list_snapshots" + }, + "description": "Sample for ListSnapshots", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListSnapshots_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_snapshots_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.list_tables", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListTables" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager", + "shortName": "list_tables" + }, + "description": "Sample for ListTables", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.list_tables", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ListTables", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ListTables" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ListTablesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager", + "shortName": "list_tables" + }, + "description": "Sample for ListTables", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ListTables_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_list_tables_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.modify_column_families", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ModifyColumnFamilies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "modifications", + "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "modify_column_families" + }, + "description": "Sample for ModifyColumnFamilies", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.modify_column_families", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "ModifyColumnFamilies" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "modifications", + "type": "MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Table", + "shortName": "modify_column_families" + }, + "description": "Sample for ModifyColumnFamilies", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_ModifyColumnFamilies_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_modify_column_families_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient._restore_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "RestoreTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "_restore_table" + }, + "description": "Sample for RestoreTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_async_internal", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_async_internal.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient._restore_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "RestoreTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.RestoreTableRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "_restore_table" + }, + "description": "Sample for RestoreTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_RestoreTable_sync_internal", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_restore_table_sync_internal.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.set_iam_policy", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SetIamPolicy" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.SetIamPolicyRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.policy_pb2.Policy", + "shortName": "set_iam_policy" + }, + "description": "Sample for SetIamPolicy", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SetIamPolicy_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_set_iam_policy_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.snapshot_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SnapshotTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "cluster", + "type": "str" + }, + { + "name": "snapshot_id", + "type": "str" + }, + { + "name": "description", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "snapshot_table" + }, + "description": "Sample for SnapshotTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.snapshot_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "SnapshotTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.SnapshotTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "cluster", + "type": "str" + }, + { + "name": "snapshot_id", + "type": "str" + }, + { + "name": "description", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "snapshot_table" + }, + "description": "Sample for SnapshotTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_SnapshotTable_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_snapshot_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_async", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.test_iam_permissions", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "TestIamPermissions" + }, + "parameters": [ + { + "name": "request", + "type": "google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest" + }, + { + "name": "resource", + "type": "str" + }, + { + "name": "permissions", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", + "shortName": "test_iam_permissions" + }, + "description": "Sample for TestIamPermissions", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_TestIamPermissions_sync", + "segments": [ + { + "end": 53, + "start": 27, + "type": "FULL" + }, + { + "end": 53, + "start": 27, + "type": "SHORT" + }, + { + "end": 41, + "start": 39, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 42, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 50, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 54, + "start": 51, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_test_iam_permissions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.undelete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UndeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undelete_table" + }, + "description": "Sample for UndeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.undelete_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UndeleteTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UndeleteTableRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "undelete_table" + }, + "description": "Sample for UndeleteTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UndeleteTable_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_undelete_table_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_authorized_view" + }, + "description": "Sample for UpdateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_authorized_view", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateAuthorizedView", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateAuthorizedView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateAuthorizedViewRequest" + }, + { + "name": "authorized_view", + "type": "google.cloud.bigtable_admin_v2.types.AuthorizedView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_authorized_view" + }, + "description": "Sample for UpdateAuthorizedView", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateAuthorizedView_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_authorized_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "update_backup" + }, + "description": "Sample for UpdateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_backup", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateBackup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateBackupRequest" + }, + { + "name": "backup", + "type": "google.cloud.bigtable_admin_v2.types.Backup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.bigtable_admin_v2.types.Backup", + "shortName": "update_backup" + }, + "description": "Sample for UpdateBackup", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateBackup_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_backup_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_schema_bundle" + }, + "description": "Sample for UpdateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_schema_bundle", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateSchemaBundle", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateSchemaBundle" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateSchemaBundleRequest" + }, + { + "name": "schema_bundle", + "type": "google.cloud.bigtable_admin_v2.types.SchemaBundle" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_schema_bundle" + }, + "description": "Sample for UpdateSchemaBundle", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateSchemaBundle_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_schema_bundle_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient", + "shortName": "BaseBigtableTableAdminAsyncClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminAsyncClient.update_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_table" + }, + "description": "Sample for UpdateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient", + "shortName": "BaseBigtableTableAdminClient" + }, + "fullName": "google.cloud.bigtable_admin_v2.BaseBigtableTableAdminClient.update_table", + "method": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable", + "service": { + "fullName": "google.bigtable.admin.v2.BigtableTableAdmin", + "shortName": "BigtableTableAdmin" + }, + "shortName": "UpdateTable" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigtable_admin_v2.types.UpdateTableRequest" + }, + { + "name": "table", + "type": "google.cloud.bigtable_admin_v2.types.Table" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_table" + }, + "description": "Sample for UpdateTable", + "file": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigtableadmin_v2_generated_BigtableTableAdmin_UpdateTable_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigtableadmin_v2_generated_bigtable_table_admin_update_table_sync.py" + } + ] +} diff --git a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py index 6265c176871a..1fda056686f9 100644 --- a/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/packages/google-cloud-bigtable/scripts/fixup_bigtable_admin_v2_keywords.py @@ -97,7 +97,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): 'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ), 'update_backup': ('backup', 'update_mask', ), 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'node_scaling_factor', 'cluster_config', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', 'satisfies_pzi', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', 'satisfies_pzi', 'tags', ), 'update_logical_view': ('logical_view', 'update_mask', ), 'update_materialized_view': ('materialized_view', 'update_mask', ), 'update_schema_bundle': ('schema_bundle', 'update_mask', 'ignore_warnings', ), diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 5e7302d75380..d47f6b8edbfc 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -1358,20 +1358,13 @@ async def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_ # expect x-goog-request-params tag assert metadata[0][0] == "x-goog-request-params" routing_str = metadata[0][1] - assert self._expected_routing_header(table) in routing_str + assert f"table_name={table.table_name}" in routing_str if include_app_profile: assert "app_profile_id=profile" in routing_str else: # empty app_profile_id should send empty string assert "app_profile_id=" in routing_str - @staticmethod - def _expected_routing_header(table): - """ - the expected routing header for this _ApiSurface type - """ - return f"table_name={table.table_name}" - @CrossSync.convert_class( "TestAuthorizedView", add_mapping_for_name="TestAuthorizedView" @@ -1399,13 +1392,6 @@ def _make_one( client, instance_id, table_id, view_id, app_profile_id, **kwargs ) - @staticmethod - def _expected_routing_header(view): - """ - the expected routing header for this _ApiSurface type - """ - return f"authorized_view_name={view.authorized_view_name}" - @CrossSync.pytest async def test_ctor(self): from google.cloud.bigtable.data._helpers import _WarmedInstanceKey diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index 38866c9dd540..22ca8ee260fa 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -1085,17 +1085,12 @@ def test_call_metadata(self, include_app_profile, fn_name, fn_args, gapic_fn): assert len(metadata) == 1 assert metadata[0][0] == "x-goog-request-params" routing_str = metadata[0][1] - assert self._expected_routing_header(table) in routing_str + assert f"table_name={table.table_name}" in routing_str if include_app_profile: assert "app_profile_id=profile" in routing_str else: assert "app_profile_id=" in routing_str - @staticmethod - def _expected_routing_header(table): - """the expected routing header for this _ApiSurface type""" - return f"table_name={table.table_name}" - @CrossSync._Sync_Impl.add_mapping_decorator("TestAuthorizedView") class TestAuthorizedView(CrossSync._Sync_Impl.TestTable): @@ -1120,11 +1115,6 @@ def _make_one( client, instance_id, table_id, view_id, app_profile_id, **kwargs ) - @staticmethod - def _expected_routing_header(view): - """the expected routing header for this _ApiSurface type""" - return f"authorized_view_name={view.authorized_view_name}" - def test_ctor(self): from google.cloud.bigtable.data._helpers import _WarmedInstanceKey diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 2ad52bf5281f..166f27eb8271 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -20687,6 +20687,7 @@ def test_partial_update_instance_rest_call_success(request_type): "create_time": {"seconds": 751, "nanos": 543}, "satisfies_pzs": True, "satisfies_pzi": True, + "tags": {}, } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index dba535dcc25f..cb78d2b7af30 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -6852,11 +6852,13 @@ def test_read_rows_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_rows_routing_parameters_request_2_grpc(): @@ -6878,9 +6880,12 @@ def test_read_rows_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_rows_routing_parameters_request_3_grpc(): @@ -6894,7 +6899,7 @@ def test_read_rows_routing_parameters_request_3_grpc(): call.return_value = iter([bigtable.ReadRowsResponse()]) client.read_rows( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -6903,19 +6908,21 @@ def test_read_rows_routing_parameters_request_3_grpc(): _, args, kw = call.mock_calls[0] request_msg = bigtable.ReadRowsRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_sample_row_keys_routing_parameters_request_1_grpc(): @@ -6942,11 +6949,13 @@ def test_sample_row_keys_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_sample_row_keys_routing_parameters_request_2_grpc(): @@ -6968,9 +6977,12 @@ def test_sample_row_keys_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_sample_row_keys_routing_parameters_request_3_grpc(): @@ -6984,7 +6996,7 @@ def test_sample_row_keys_routing_parameters_request_3_grpc(): call.return_value = iter([bigtable.SampleRowKeysResponse()]) client.sample_row_keys( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -6993,19 +7005,21 @@ def test_sample_row_keys_routing_parameters_request_3_grpc(): _, args, kw = call.mock_calls[0] request_msg = bigtable.SampleRowKeysRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_row_routing_parameters_request_1_grpc(): @@ -7032,11 +7046,13 @@ def test_mutate_row_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_row_routing_parameters_request_2_grpc(): @@ -7058,9 +7074,12 @@ def test_mutate_row_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_row_routing_parameters_request_3_grpc(): @@ -7074,7 +7093,7 @@ def test_mutate_row_routing_parameters_request_3_grpc(): call.return_value = bigtable.MutateRowResponse() client.mutate_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -7083,19 +7102,21 @@ def test_mutate_row_routing_parameters_request_3_grpc(): _, args, kw = call.mock_calls[0] request_msg = bigtable.MutateRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_rows_routing_parameters_request_1_grpc(): @@ -7122,11 +7143,13 @@ def test_mutate_rows_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_rows_routing_parameters_request_2_grpc(): @@ -7148,9 +7171,12 @@ def test_mutate_rows_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_rows_routing_parameters_request_3_grpc(): @@ -7164,7 +7190,7 @@ def test_mutate_rows_routing_parameters_request_3_grpc(): call.return_value = iter([bigtable.MutateRowsResponse()]) client.mutate_rows( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -7173,19 +7199,21 @@ def test_mutate_rows_routing_parameters_request_3_grpc(): _, args, kw = call.mock_calls[0] request_msg = bigtable.MutateRowsRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_check_and_mutate_row_routing_parameters_request_1_grpc(): @@ -7214,11 +7242,13 @@ def test_check_and_mutate_row_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_check_and_mutate_row_routing_parameters_request_2_grpc(): @@ -7242,9 +7272,12 @@ def test_check_and_mutate_row_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_check_and_mutate_row_routing_parameters_request_3_grpc(): @@ -7260,7 +7293,7 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc(): call.return_value = bigtable.CheckAndMutateRowResponse() client.check_and_mutate_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -7269,19 +7302,21 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc(): _, args, kw = call.mock_calls[0] request_msg = bigtable.CheckAndMutateRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_ping_and_warm_routing_parameters_request_1_grpc(): @@ -7306,11 +7341,13 @@ def test_ping_and_warm_routing_parameters_request_1_grpc(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_ping_and_warm_routing_parameters_request_2_grpc(): @@ -7332,9 +7369,12 @@ def test_ping_and_warm_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_modify_write_row_routing_parameters_request_1_grpc(): @@ -7363,11 +7403,13 @@ def test_read_modify_write_row_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_modify_write_row_routing_parameters_request_2_grpc(): @@ -7393,9 +7435,12 @@ def test_read_modify_write_row_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_modify_write_row_routing_parameters_request_3_grpc(): @@ -7411,7 +7456,7 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc(): call.return_value = bigtable.ReadModifyWriteRowResponse() client.read_modify_write_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -7420,19 +7465,21 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc(): _, args, kw = call.mock_calls[0] request_msg = bigtable.ReadModifyWriteRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_prepare_query_routing_parameters_request_1_grpc(): @@ -7459,11 +7506,13 @@ def test_prepare_query_routing_parameters_request_1_grpc(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_prepare_query_routing_parameters_request_2_grpc(): @@ -7485,9 +7534,12 @@ def test_prepare_query_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_execute_query_routing_parameters_request_1_grpc(): @@ -7514,11 +7566,13 @@ def test_execute_query_routing_parameters_request_1_grpc(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_execute_query_routing_parameters_request_2_grpc(): @@ -7540,9 +7594,12 @@ def test_execute_query_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_transport_kind_grpc_asyncio(): @@ -7881,11 +7938,13 @@ async def test_read_rows_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -7912,9 +7971,12 @@ async def test_read_rows_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -7933,7 +7995,7 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio(): ) await client.read_rows( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -7942,19 +8004,21 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio(): _, args, kw = call.mock_calls[0] request_msg = bigtable.ReadRowsRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -7986,11 +8050,13 @@ async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8017,9 +8083,12 @@ async def test_sample_row_keys_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8038,7 +8107,7 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio(): ) await client.sample_row_keys( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -8047,19 +8116,21 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio(): _, args, kw = call.mock_calls[0] request_msg = bigtable.SampleRowKeysRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8090,11 +8161,13 @@ async def test_mutate_row_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8120,9 +8193,12 @@ async def test_mutate_row_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8140,7 +8216,7 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio(): ) await client.mutate_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -8149,19 +8225,21 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio(): _, args, kw = call.mock_calls[0] request_msg = bigtable.MutateRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8193,11 +8271,13 @@ async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8224,9 +8304,12 @@ async def test_mutate_rows_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8245,7 +8328,7 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio(): ) await client.mutate_rows( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -8254,19 +8337,21 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio(): _, args, kw = call.mock_calls[0] request_msg = bigtable.MutateRowsRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8301,11 +8386,13 @@ async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8335,9 +8422,12 @@ async def test_check_and_mutate_row_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8359,7 +8449,7 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio(): ) await client.check_and_mutate_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -8368,19 +8458,21 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio(): _, args, kw = call.mock_calls[0] request_msg = bigtable.CheckAndMutateRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8411,11 +8503,13 @@ async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8441,9 +8535,12 @@ async def test_ping_and_warm_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8476,11 +8573,13 @@ async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio() expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8510,9 +8609,12 @@ async def test_read_modify_write_row_routing_parameters_request_2_grpc_asyncio() assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8532,7 +8634,7 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio() ) await client.read_modify_write_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -8541,19 +8643,21 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio() _, args, kw = call.mock_calls[0] request_msg = bigtable.ReadModifyWriteRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8586,11 +8690,13 @@ async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8618,9 +8724,12 @@ async def test_prepare_query_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8652,11 +8761,13 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) @pytest.mark.asyncio @@ -8683,9 +8794,12 @@ async def test_execute_query_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_transport_kind_rest(): @@ -10334,11 +10448,13 @@ def test_read_rows_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_rows_routing_parameters_request_2_rest(): @@ -10359,9 +10475,12 @@ def test_read_rows_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_rows_routing_parameters_request_3_rest(): @@ -10374,7 +10493,7 @@ def test_read_rows_routing_parameters_request_3_rest(): with mock.patch.object(type(client.transport.read_rows), "__call__") as call: client.read_rows( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -10383,19 +10502,21 @@ def test_read_rows_routing_parameters_request_3_rest(): _, args, kw = call.mock_calls[0] request_msg = bigtable.ReadRowsRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_sample_row_keys_routing_parameters_request_1_rest(): @@ -10421,11 +10542,13 @@ def test_sample_row_keys_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_sample_row_keys_routing_parameters_request_2_rest(): @@ -10446,9 +10569,12 @@ def test_sample_row_keys_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_sample_row_keys_routing_parameters_request_3_rest(): @@ -10461,7 +10587,7 @@ def test_sample_row_keys_routing_parameters_request_3_rest(): with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: client.sample_row_keys( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -10470,19 +10596,21 @@ def test_sample_row_keys_routing_parameters_request_3_rest(): _, args, kw = call.mock_calls[0] request_msg = bigtable.SampleRowKeysRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_row_routing_parameters_request_1_rest(): @@ -10508,11 +10636,13 @@ def test_mutate_row_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_row_routing_parameters_request_2_rest(): @@ -10533,9 +10663,12 @@ def test_mutate_row_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_row_routing_parameters_request_3_rest(): @@ -10548,7 +10681,7 @@ def test_mutate_row_routing_parameters_request_3_rest(): with mock.patch.object(type(client.transport.mutate_row), "__call__") as call: client.mutate_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -10557,19 +10690,21 @@ def test_mutate_row_routing_parameters_request_3_rest(): _, args, kw = call.mock_calls[0] request_msg = bigtable.MutateRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_rows_routing_parameters_request_1_rest(): @@ -10595,11 +10730,13 @@ def test_mutate_rows_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_rows_routing_parameters_request_2_rest(): @@ -10620,9 +10757,12 @@ def test_mutate_rows_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_mutate_rows_routing_parameters_request_3_rest(): @@ -10635,7 +10775,7 @@ def test_mutate_rows_routing_parameters_request_3_rest(): with mock.patch.object(type(client.transport.mutate_rows), "__call__") as call: client.mutate_rows( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -10644,19 +10784,21 @@ def test_mutate_rows_routing_parameters_request_3_rest(): _, args, kw = call.mock_calls[0] request_msg = bigtable.MutateRowsRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_check_and_mutate_row_routing_parameters_request_1_rest(): @@ -10684,11 +10826,13 @@ def test_check_and_mutate_row_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_check_and_mutate_row_routing_parameters_request_2_rest(): @@ -10711,9 +10855,12 @@ def test_check_and_mutate_row_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_check_and_mutate_row_routing_parameters_request_3_rest(): @@ -10728,7 +10875,7 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest(): ) as call: client.check_and_mutate_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -10737,19 +10884,21 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest(): _, args, kw = call.mock_calls[0] request_msg = bigtable.CheckAndMutateRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_ping_and_warm_routing_parameters_request_1_rest(): @@ -10773,11 +10922,13 @@ def test_ping_and_warm_routing_parameters_request_1_rest(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_ping_and_warm_routing_parameters_request_2_rest(): @@ -10798,9 +10949,12 @@ def test_ping_and_warm_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_modify_write_row_routing_parameters_request_1_rest(): @@ -10828,11 +10982,13 @@ def test_read_modify_write_row_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_modify_write_row_routing_parameters_request_2_rest(): @@ -10857,9 +11013,12 @@ def test_read_modify_write_row_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_read_modify_write_row_routing_parameters_request_3_rest(): @@ -10874,7 +11033,7 @@ def test_read_modify_write_row_routing_parameters_request_3_rest(): ) as call: client.read_modify_write_row( request={ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) @@ -10883,19 +11042,21 @@ def test_read_modify_write_row_routing_parameters_request_3_rest(): _, args, kw = call.mock_calls[0] request_msg = bigtable.ReadModifyWriteRowRequest( **{ - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4" + "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/sample4" } ) assert args[0] == request_msg expected_headers = { - "app_profile_id": "", - "authorized_view_name": "projects/sample1/instances/sample2/tables/sample3/authorizedViews/sample4", + "table_name": "projects/sample1/instances/sample2/tables/sample3", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_prepare_query_routing_parameters_request_1_rest(): @@ -10921,11 +11082,13 @@ def test_prepare_query_routing_parameters_request_1_rest(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_prepare_query_routing_parameters_request_2_rest(): @@ -10946,9 +11109,12 @@ def test_prepare_query_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_execute_query_routing_parameters_request_1_rest(): @@ -10974,11 +11140,13 @@ def test_execute_query_routing_parameters_request_1_rest(): expected_headers = { "name": "projects/sample1/instances/sample2", - "app_profile_id": "", } - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_execute_query_routing_parameters_request_2_rest(): @@ -10999,9 +11167,12 @@ def test_execute_query_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - assert ( - gapic_v1.routing_header.to_grpc_metadata(expected_headers) in kw["metadata"] + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) def test_transport_grpc_default(): From bfb903ed5daa9f796c11967530eaa85bc1d3951c Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 7 Aug 2025 14:47:28 -0700 Subject: [PATCH 878/892] feat: expose universe_domain for tpc (#1150) --- .../cloud/bigtable/data/_async/client.py | 32 +++++++ .../bigtable/data/_sync_autogen/client.py | 24 +++++ packages/google-cloud-bigtable/setup.py | 2 +- .../testing/constraints-3.7.txt | 2 +- .../testing/constraints-3.8.txt | 2 +- .../tests/unit/data/_async/test_client.py | 92 +++++++++++++++++++ .../unit/data/_sync_autogen/test_client.py | 75 +++++++++++++++ 7 files changed, 226 insertions(+), 3 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 6ee21b554608..d7eac2a4dc22 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -211,6 +211,20 @@ def __init__( *args, **kwargs, channel=custom_channel ), ) + if ( + credentials + and credentials.universe_domain != self.universe_domain + and self._emulator_host is None + ): + # validate that the universe domain of the credentials matches the + # universe domain configured in client_options + raise ValueError( + f"The configured universe domain ({self.universe_domain}) does " + "not match the universe domain found in the credentials " + f"({self._credentials.universe_domain}). If you haven't " + "configured the universe domain explicitly, `googleapis.com` " + "is the default." + ) self._is_closed = CrossSync.Event() self.transport = cast(TransportType, self._gapic_client.transport) # keep track of active instances to for warmup on channel refresh @@ -235,6 +249,24 @@ def __init__( stacklevel=2, ) + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._gapic_client.universe_domain + + @property + def api_endpoint(self) -> str: + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._gapic_client.api_endpoint + @staticmethod def _client_version() -> str: """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index b36bf359a952..a7e07e20dfa2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -158,6 +158,14 @@ def __init__( *args, **kwargs, channel=custom_channel ), ) + if ( + credentials + and credentials.universe_domain != self.universe_domain + and (self._emulator_host is None) + ): + raise ValueError( + f"The configured universe domain ({self.universe_domain}) does not match the universe domain found in the credentials ({self._credentials.universe_domain}). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) self._is_closed = CrossSync._Sync_Impl.Event() self.transport = cast(TransportType, self._gapic_client.transport) self._active_instances: Set[_WarmedInstanceKey] = set() @@ -179,6 +187,22 @@ def __init__( stacklevel=2, ) + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance.""" + return self._gapic_client.universe_domain + + @property + def api_endpoint(self) -> str: + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance.""" + return self._gapic_client.api_endpoint + @staticmethod def _client_version() -> str: """Helper function to return the client version string for this client""" diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index e7113a6117b8..3cb9d465dafe 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -39,7 +39,7 @@ dependencies = [ "google-api-core[grpc] >= 2.17.0, <3.0.0", "google-cloud-core >= 1.4.4, <3.0.0", - "google-auth >= 2.14.1, <3.0.0,!=2.24.0,!=2.25.0", + "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0", "grpc-google-iam-v1 >= 0.12.4, <1.0.0", "proto-plus >= 1.22.3, <2.0.0", "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'", diff --git a/packages/google-cloud-bigtable/testing/constraints-3.7.txt b/packages/google-cloud-bigtable/testing/constraints-3.7.txt index ec7a8c807a10..023133380894 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.7.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.7.txt @@ -6,7 +6,7 @@ # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 google-api-core==2.17.0 -google-auth==2.14.1 +google-auth==2.23.0 google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 proto-plus==1.22.3 diff --git a/packages/google-cloud-bigtable/testing/constraints-3.8.txt b/packages/google-cloud-bigtable/testing/constraints-3.8.txt index 1c867060d068..a7e4616c9efb 100644 --- a/packages/google-cloud-bigtable/testing/constraints-3.8.txt +++ b/packages/google-cloud-bigtable/testing/constraints-3.8.txt @@ -6,7 +6,7 @@ # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 google-api-core==2.17.0 -google-auth==2.14.1 +google-auth==2.23.0 google-cloud-core==2.0.0 grpc-google-iam-v1==0.12.4 proto-plus==1.22.3 diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index d47f6b8edbfc..97179a3b15ef 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -26,6 +26,7 @@ from google.cloud.bigtable_v2.types import ReadRowsResponse from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.api_core import exceptions as core_exceptions +from google.api_core import client_options from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete from google.cloud.bigtable.data.mutations import DeleteAllFromRow @@ -1038,6 +1039,97 @@ def test_client_ctor_sync(self): assert client.project == "project-id" assert client._channel_refresh_task is None + @CrossSync.pytest + async def test_default_universe_domain(self): + """ + When not passed, universe_domain should default to googleapis.com + """ + async with self._make_client(project="project-id", credentials=None) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_custom_universe_domain(self): + """test with a customized universe domain value and emulator enabled""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + async with self._make_client( + project="project_id", + client_options=options, + use_emulator=True, + credentials=None, + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + + @CrossSync.pytest + async def test_configured_universe_domain_matches_GDU(self): + """that configured universe domain succeeds with matched GDU credentials.""" + universe_domain = "googleapis.com" + options = client_options.ClientOptions(universe_domain=universe_domain) + async with self._make_client( + project="project_id", client_options=options, credentials=None + ) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_credential_universe_domain_matches_GDU(self): + """Test with credentials""" + creds = AnonymousCredentials() + creds._universe_domain = "googleapis.com" + async with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_anomynous_credential_universe_domain(self): + """Anomynopus credentials should use default universe domain""" + creds = AnonymousCredentials() + async with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + @CrossSync.pytest + async def test_configured_universe_domain_mismatched_credentials(self): + """Test that configured universe domain errors with mismatched universe + domain credentials. + """ + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = "different-universe" + with pytest.raises(ValueError) as exc: + self._make_client( + project="project_id", + client_options=options, + use_emulator=False, + credentials=creds, + ) + err_msg = ( + f"The configured universe domain ({universe_domain}) does " + "not match the universe domain found in the credentials " + f"({creds.universe_domain}). If you haven't " + "configured the universe domain explicitly, `googleapis.com` " + "is the default." + ) + assert exc.value.args[0] == err_msg + + @CrossSync.pytest + async def test_configured_universe_domain_matches_credentials(self): + """Test that configured universe domain succeeds with matching universe + domain credentials. + """ + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = universe_domain + async with self._make_client( + project="project_id", credentials=creds, client_options=options + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + @CrossSync.convert_class("TestTable", add_mapping_for_name="TestTable") class TestTableAsync: diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index 22ca8ee260fa..6012a10d3985 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -25,6 +25,7 @@ from google.cloud.bigtable_v2.types import ReadRowsResponse from google.cloud.bigtable.data.read_rows_query import ReadRowsQuery from google.api_core import exceptions as core_exceptions +from google.api_core import client_options from google.cloud.bigtable.data.exceptions import InvalidChunk from google.cloud.bigtable.data.exceptions import _MutateRowsIncomplete from google.cloud.bigtable.data.mutations import DeleteAllFromRow @@ -836,6 +837,80 @@ def test_context_manager(self): close_mock.assert_called_once() true_close() + def test_default_universe_domain(self): + """When not passed, universe_domain should default to googleapis.com""" + with self._make_client(project="project-id", credentials=None) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_custom_universe_domain(self): + """test with a customized universe domain value and emulator enabled""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + with self._make_client( + project="project_id", + client_options=options, + use_emulator=True, + credentials=None, + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + + def test_configured_universe_domain_matches_GDU(self): + """that configured universe domain succeeds with matched GDU credentials.""" + universe_domain = "googleapis.com" + options = client_options.ClientOptions(universe_domain=universe_domain) + with self._make_client( + project="project_id", client_options=options, credentials=None + ) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_credential_universe_domain_matches_GDU(self): + """Test with credentials""" + creds = AnonymousCredentials() + creds._universe_domain = "googleapis.com" + with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_anomynous_credential_universe_domain(self): + """Anomynopus credentials should use default universe domain""" + creds = AnonymousCredentials() + with self._make_client(project="project_id", credentials=creds) as client: + assert client.universe_domain == "googleapis.com" + assert client.api_endpoint == "bigtable.googleapis.com" + + def test_configured_universe_domain_mismatched_credentials(self): + """Test that configured universe domain errors with mismatched universe + domain credentials.""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = "different-universe" + with pytest.raises(ValueError) as exc: + self._make_client( + project="project_id", + client_options=options, + use_emulator=False, + credentials=creds, + ) + err_msg = f"The configured universe domain ({universe_domain}) does not match the universe domain found in the credentials ({creds.universe_domain}). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + assert exc.value.args[0] == err_msg + + def test_configured_universe_domain_matches_credentials(self): + """Test that configured universe domain succeeds with matching universe + domain credentials.""" + universe_domain = "test-universe.test" + options = client_options.ClientOptions(universe_domain=universe_domain) + creds = AnonymousCredentials() + creds._universe_domain = universe_domain + with self._make_client( + project="project_id", credentials=creds, client_options=options + ) as client: + assert client.universe_domain == universe_domain + assert client.api_endpoint == f"bigtable.{universe_domain}" + @CrossSync._Sync_Impl.add_mapping_decorator("TestTable") class TestTable: From b672c5853caff4f8dbbda62afe7a743924e8c1b5 Mon Sep 17 00:00:00 2001 From: Kevin Zheng <147537668+gkevinzheng@users.noreply.github.com> Date: Mon, 11 Aug 2025 16:09:03 -0400 Subject: [PATCH 879/892] test: Unskipped wait_for_consistency system tests in emulator (#1186) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: Unskipped wait_for_consistency system tests in emulator * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Added sync surface * Actual sync surface * Used pytest.mark.skipif --------- Co-authored-by: Owl Bot --- .../system/admin_overlay/test_system_async.py | 37 ++++++++++++------- .../admin_overlay/test_system_autogen.py | 33 +++++++++++------ 2 files changed, 45 insertions(+), 25 deletions(-) diff --git a/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py index 8dea4f5f1461..aa412569edd8 100644 --- a/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_async.py @@ -143,20 +143,27 @@ async def create_instance( default_storage_type=storage_type, ) - create_instance_request = admin_v2.CreateInstanceRequest( - parent=instance_admin_client.common_project_path(project_id), - instance_id=instance_id, - instance=admin_v2.Instance( - display_name=instance_id[ - :30 - ], # truncate to 30 characters because of character limit - ), - clusters=clusters, - ) - operation = await instance_admin_client.create_instance(create_instance_request) - instance = await operation.result() + # Instance and cluster creation are currently unsupported in the Bigtable emulator + if os.getenv(BIGTABLE_EMULATOR): + # All we need for system tests so far is the instance name. + instance = admin_v2.Instance( + name=instance_admin_client.instance_path(project_id, instance_id), + ) + else: + create_instance_request = admin_v2.CreateInstanceRequest( + parent=instance_admin_client.common_project_path(project_id), + instance_id=instance_id, + instance=admin_v2.Instance( + display_name=instance_id[ + :30 + ], # truncate to 30 characters because of character limit + ), + clusters=clusters, + ) + operation = await instance_admin_client.create_instance(create_instance_request) + instance = await operation.result() - instances_to_delete.append(instance) + instances_to_delete.append(instance) # Create a table within the instance create_table_request = admin_v2.CreateTableRequest( @@ -272,6 +279,10 @@ async def assert_table_cell_value_equal_to( } ) @CrossSync.pytest +@pytest.mark.skipif( + os.getenv(BIGTABLE_EMULATOR), + reason="Backups are not supported in the Bigtable emulator", +) @pytest.mark.parametrize( "second_instance_storage_type,expect_optimize_operation", [ diff --git a/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py index 21e4aff3cfc2..4fde3571fa7e 100644 --- a/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/admin_overlay/test_system_autogen.py @@ -113,15 +113,20 @@ def create_instance( location=instance_admin_client.common_location_path(project_id, location), default_storage_type=storage_type, ) - create_instance_request = admin_v2.CreateInstanceRequest( - parent=instance_admin_client.common_project_path(project_id), - instance_id=instance_id, - instance=admin_v2.Instance(display_name=instance_id[:30]), - clusters=clusters, - ) - operation = instance_admin_client.create_instance(create_instance_request) - instance = operation.result() - instances_to_delete.append(instance) + if os.getenv(BIGTABLE_EMULATOR): + instance = admin_v2.Instance( + name=instance_admin_client.instance_path(project_id, instance_id) + ) + else: + create_instance_request = admin_v2.CreateInstanceRequest( + parent=instance_admin_client.common_project_path(project_id), + instance_id=instance_id, + instance=admin_v2.Instance(display_name=instance_id[:30]), + clusters=clusters, + ) + operation = instance_admin_client.create_instance(create_instance_request) + instance = operation.result() + instances_to_delete.append(instance) create_table_request = admin_v2.CreateTableRequest( parent=instance_admin_client.instance_path(project_id, instance_id), table_id=TEST_TABLE_NAME, @@ -201,6 +206,10 @@ def assert_table_cell_value_equal_to( assert latest_cell.value.decode("utf-8") == value +@pytest.mark.skipif( + os.getenv(BIGTABLE_EMULATOR), + reason="Backups are not supported in the Bigtable emulator", +) @pytest.mark.parametrize( "second_instance_storage_type,expect_optimize_operation", [(admin_v2.StorageType.HDD, False), (admin_v2.StorageType.SSD, True)], @@ -215,7 +224,7 @@ def test_optimize_restored_table( second_instance_storage_type, expect_optimize_operation, ): - instance_with_backup, table_to_backup = create_instance( + (instance_with_backup, table_to_backup) = create_instance( instance_admin_client, table_admin_client, data_client, @@ -223,7 +232,7 @@ def test_optimize_restored_table( instances_to_delete, admin_v2.StorageType.HDD, ) - instance_to_restore, _ = create_instance( + (instance_to_restore, _) = create_instance( instance_admin_client, table_admin_client, data_client, @@ -273,7 +282,7 @@ def test_wait_for_consistency( instances_to_delete, admin_overlay_project_id, ): - instance, table = create_instance( + (instance, table) = create_instance( instance_admin_client, table_admin_client, data_client, From d51088cf4c974fa4d7f3e3303a205ea5feabfae8 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Mon, 25 Aug 2025 17:43:31 -0700 Subject: [PATCH 880/892] fix: refactor channel refresh (#1174) --- .../data/_async/_swappable_channel.py | 139 ++++++++++++++++++ .../cloud/bigtable/data/_async/client.py | 105 ++++++++----- .../data/_sync_autogen/_swappable_channel.py | 96 ++++++++++++ .../bigtable/data/_sync_autogen/client.py | 79 +++++++--- .../_async/execute_query_iterator.py | 4 +- .../_sync_autogen/execute_query_iterator.py | 4 +- .../bigtable/transports/grpc_asyncio.py | 1 - .../tests/system/data/test_system_async.py | 35 ++++- .../tests/system/data/test_system_autogen.py | 23 ++- .../data/_async/test__swappable_channel.py | 135 +++++++++++++++++ .../tests/unit/data/_async/test_client.py | 80 +++++----- .../_sync_autogen/test__swappable_channel.py | 100 +++++++++++++ .../unit/data/_sync_autogen/test_client.py | 68 ++++----- 13 files changed, 712 insertions(+), 157 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_swappable_channel.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/test__swappable_channel.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__swappable_channel.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_swappable_channel.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_swappable_channel.py new file mode 100644 index 000000000000..bbc9a0d47ec1 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/_swappable_channel.py @@ -0,0 +1,139 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import Callable + +from google.cloud.bigtable.data._cross_sync import CrossSync + +from grpc import ChannelConnectivity + +if CrossSync.is_async: + from grpc.aio import Channel +else: + from grpc import Channel + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen._swappable_channel" + + +@CrossSync.convert_class(sync_name="_WrappedChannel", rm_aio=True) +class _AsyncWrappedChannel(Channel): + """ + A wrapper around a gRPC channel. All methods are passed + through to the underlying channel. + """ + + def __init__(self, channel: Channel): + self._channel = channel + + def unary_unary(self, *args, **kwargs): + return self._channel.unary_unary(*args, **kwargs) + + def unary_stream(self, *args, **kwargs): + return self._channel.unary_stream(*args, **kwargs) + + def stream_unary(self, *args, **kwargs): + return self._channel.stream_unary(*args, **kwargs) + + def stream_stream(self, *args, **kwargs): + return self._channel.stream_stream(*args, **kwargs) + + async def channel_ready(self): + return await self._channel.channel_ready() + + @CrossSync.convert( + sync_name="__enter__", replace_symbols={"__aenter__": "__enter__"} + ) + async def __aenter__(self): + await self._channel.__aenter__() + return self + + @CrossSync.convert(sync_name="__exit__", replace_symbols={"__aexit__": "__exit__"}) + async def __aexit__(self, exc_type, exc_val, exc_tb): + return await self._channel.__aexit__(exc_type, exc_val, exc_tb) + + def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity: + return self._channel.get_state(try_to_connect=try_to_connect) + + async def wait_for_state_change(self, last_observed_state): + return await self._channel.wait_for_state_change(last_observed_state) + + def __getattr__(self, name): + return getattr(self._channel, name) + + async def close(self, grace=None): + if CrossSync.is_async: + return await self._channel.close(grace=grace) + else: + # grace not supported by sync version + return self._channel.close() + + if not CrossSync.is_async: + # add required sync methods + + def subscribe(self, callback, try_to_connect=False): + return self._channel.subscribe(callback, try_to_connect) + + def unsubscribe(self, callback): + return self._channel.unsubscribe(callback) + + +@CrossSync.convert_class( + sync_name="SwappableChannel", + replace_symbols={"_AsyncWrappedChannel": "_WrappedChannel"}, +) +class AsyncSwappableChannel(_AsyncWrappedChannel): + """ + Provides a grpc channel wrapper, that allows the internal channel to be swapped out + + Args: + - channel_fn: a nullary function that returns a new channel instance. + It should be a partial with all channel configuration arguments built-in + """ + + def __init__(self, channel_fn: Callable[[], Channel]): + self._channel_fn = channel_fn + self._channel = channel_fn() + + def create_channel(self) -> Channel: + """ + Create a fresh channel using the stored `channel_fn` partial + """ + new_channel = self._channel_fn() + if CrossSync.is_async: + # copy over interceptors + # this is needed because of how gapic attaches the LoggingClientAIOInterceptor + # sync channels add interceptors by wrapping, so this step isn't needed + new_channel._unary_unary_interceptors = ( + self._channel._unary_unary_interceptors + ) + new_channel._unary_stream_interceptors = ( + self._channel._unary_stream_interceptors + ) + new_channel._stream_unary_interceptors = ( + self._channel._stream_unary_interceptors + ) + new_channel._stream_stream_interceptors = ( + self._channel._stream_stream_interceptors + ) + return new_channel + + def swap_channel(self, new_channel: Channel) -> Channel: + """ + Replace the wrapped channel with a new instance. Typically created using `create_channel` + """ + old_channel = self._channel + self._channel = new_channel + return old_channel diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index d7eac2a4dc22..40f30f1d8c81 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -92,13 +92,22 @@ from google.cloud.bigtable_v2.services.bigtable.transports import ( BigtableGrpcAsyncIOTransport as TransportType, ) + from google.cloud.bigtable_v2.services.bigtable import ( + BigtableAsyncClient as GapicClient, + ) from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE + from google.cloud.bigtable.data._async._swappable_channel import ( + AsyncSwappableChannel, + ) else: from typing import Iterable # noqa: F401 from grpc import insecure_channel - from grpc import intercept_channel from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore + from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient # type: ignore from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE + from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( # noqa: F401 + SwappableChannel, + ) if TYPE_CHECKING: @@ -182,7 +191,6 @@ def __init__( client_options = cast( Optional[client_options_lib.ClientOptions], client_options ) - custom_channel = None self._emulator_host = os.getenv(BIGTABLE_EMULATOR) if self._emulator_host is not None: warnings.warn( @@ -191,11 +199,11 @@ def __init__( stacklevel=2, ) # use insecure channel if emulator is set - custom_channel = insecure_channel(self._emulator_host) if credentials is None: credentials = google.auth.credentials.AnonymousCredentials() if project is None: project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT + # initialize client ClientWithProject.__init__( self, @@ -203,12 +211,12 @@ def __init__( project=project, client_options=client_options, ) - self._gapic_client = CrossSync.GapicClient( + self._gapic_client = GapicClient( credentials=credentials, client_options=client_options, client_info=self.client_info, transport=lambda *args, **kwargs: TransportType( - *args, **kwargs, channel=custom_channel + *args, **kwargs, channel=self._build_grpc_channel ), ) if ( @@ -234,7 +242,7 @@ def __init__( self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} self._channel_init_time = time.monotonic() self._channel_refresh_task: CrossSync.Task[None] | None = None - self._executor = ( + self._executor: concurrent.futures.ThreadPoolExecutor | None = ( concurrent.futures.ThreadPoolExecutor() if not CrossSync.is_async else None ) if self._emulator_host is None: @@ -249,6 +257,29 @@ def __init__( stacklevel=2, ) + @CrossSync.convert(replace_symbols={"AsyncSwappableChannel": "SwappableChannel"}) + def _build_grpc_channel(self, *args, **kwargs) -> AsyncSwappableChannel: + """ + This method is called by the gapic transport to create a grpc channel. + + The init arguments passed down are captured in a partial used by AsyncSwappableChannel + to create new channel instances in the future, as part of the channel refresh logic + + Emulators always use an inseucre channel + + Args: + - *args: positional arguments passed by the gapic layer to create a new channel with + - **kwargs: keyword arguments passed by the gapic layer to create a new channel with + Returns: + a custom wrapped swappable channel + """ + if self._emulator_host is not None: + # emulators use insecure channel + create_channel_fn = partial(insecure_channel, self._emulator_host) + else: + create_channel_fn = partial(TransportType.create_channel, *args, **kwargs) + return AsyncSwappableChannel(create_channel_fn) + @property def universe_domain(self) -> str: """Return the universe domain used by the client instance. @@ -364,7 +395,12 @@ async def _ping_and_warm_instances( ) return [r or None for r in result_list] - @CrossSync.convert + def _invalidate_channel_stubs(self): + """Helper to reset the cached stubs. Needed when changing out the grpc channel""" + self.transport._stubs = {} + self.transport._prep_wrapped_messages(self.client_info) + + @CrossSync.convert(replace_symbols={"AsyncSwappableChannel": "SwappableChannel"}) async def _manage_channel( self, refresh_interval_min: float = 60 * 35, @@ -389,13 +425,17 @@ async def _manage_channel( grace_period: time to allow previous channel to serve existing requests before closing, in seconds """ + if not isinstance(self.transport.grpc_channel, AsyncSwappableChannel): + warnings.warn("Channel does not support auto-refresh.") + return + super_channel: AsyncSwappableChannel = self.transport.grpc_channel first_refresh = self._channel_init_time + random.uniform( refresh_interval_min, refresh_interval_max ) next_sleep = max(first_refresh - time.monotonic(), 0) if next_sleep > 0: # warm the current channel immediately - await self._ping_and_warm_instances(channel=self.transport.grpc_channel) + await self._ping_and_warm_instances(channel=super_channel) # continuously refresh the channel every `refresh_interval` seconds while not self._is_closed.is_set(): await CrossSync.event_wait( @@ -408,24 +448,11 @@ async def _manage_channel( break start_timestamp = time.monotonic() # prepare new channel for use - # TODO: refactor to avoid using internal references: https://github.com/googleapis/python-bigtable/issues/1094 - old_channel = self.transport.grpc_channel - new_channel = self.transport.create_channel() - if CrossSync.is_async: - new_channel._unary_unary_interceptors.append( - self.transport._interceptor - ) - else: - new_channel = intercept_channel( - new_channel, self.transport._interceptor - ) + new_channel = super_channel.create_channel() await self._ping_and_warm_instances(channel=new_channel) # cycle channel out of use, with long grace window before closure - self.transport._grpc_channel = new_channel - self.transport._logged_channel = new_channel - # invalidate caches - self.transport._stubs = {} - self.transport._prep_wrapped_messages(self.client_info) + old_channel = super_channel.swap_channel(new_channel) + self._invalidate_channel_stubs() # give old_channel a chance to complete existing rpcs if CrossSync.is_async: await old_channel.close(grace_period) @@ -433,7 +460,7 @@ async def _manage_channel( if grace_period: self._is_closed.wait(grace_period) # type: ignore old_channel.close() # type: ignore - # subtract thed time spent waiting for the channel to be replaced + # subtract the time spent waiting for the channel to be replaced next_refresh = random.uniform(refresh_interval_min, refresh_interval_max) next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0) @@ -895,24 +922,32 @@ def __init__( self.table_name = self.client._gapic_client.table_path( self.client.project, instance_id, table_id ) - self.app_profile_id = app_profile_id + self.app_profile_id: str | None = app_profile_id - self.default_operation_timeout = default_operation_timeout - self.default_attempt_timeout = default_attempt_timeout - self.default_read_rows_operation_timeout = default_read_rows_operation_timeout - self.default_read_rows_attempt_timeout = default_read_rows_attempt_timeout - self.default_mutate_rows_operation_timeout = ( + self.default_operation_timeout: float = default_operation_timeout + self.default_attempt_timeout: float | None = default_attempt_timeout + self.default_read_rows_operation_timeout: float = ( + default_read_rows_operation_timeout + ) + self.default_read_rows_attempt_timeout: float | None = ( + default_read_rows_attempt_timeout + ) + self.default_mutate_rows_operation_timeout: float = ( default_mutate_rows_operation_timeout ) - self.default_mutate_rows_attempt_timeout = default_mutate_rows_attempt_timeout + self.default_mutate_rows_attempt_timeout: float | None = ( + default_mutate_rows_attempt_timeout + ) - self.default_read_rows_retryable_errors = ( + self.default_read_rows_retryable_errors: Sequence[type[Exception]] = ( default_read_rows_retryable_errors or () ) - self.default_mutate_rows_retryable_errors = ( + self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( default_mutate_rows_retryable_errors or () ) - self.default_retryable_errors = default_retryable_errors or () + self.default_retryable_errors: Sequence[type[Exception]] = ( + default_retryable_errors or () + ) try: self._register_instance_future = CrossSync.create_task( diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py new file mode 100644 index 000000000000..78ba129d98c5 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/_swappable_channel.py @@ -0,0 +1,96 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from typing import Callable +from grpc import ChannelConnectivity +from grpc import Channel + + +class _WrappedChannel(Channel): + """ + A wrapper around a gRPC channel. All methods are passed + through to the underlying channel. + """ + + def __init__(self, channel: Channel): + self._channel = channel + + def unary_unary(self, *args, **kwargs): + return self._channel.unary_unary(*args, **kwargs) + + def unary_stream(self, *args, **kwargs): + return self._channel.unary_stream(*args, **kwargs) + + def stream_unary(self, *args, **kwargs): + return self._channel.stream_unary(*args, **kwargs) + + def stream_stream(self, *args, **kwargs): + return self._channel.stream_stream(*args, **kwargs) + + def channel_ready(self): + return self._channel.channel_ready() + + def __enter__(self): + self._channel.__enter__() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + return self._channel.__exit__(exc_type, exc_val, exc_tb) + + def get_state(self, try_to_connect: bool = False) -> ChannelConnectivity: + return self._channel.get_state(try_to_connect=try_to_connect) + + def wait_for_state_change(self, last_observed_state): + return self._channel.wait_for_state_change(last_observed_state) + + def __getattr__(self, name): + return getattr(self._channel, name) + + def close(self, grace=None): + return self._channel.close() + + def subscribe(self, callback, try_to_connect=False): + return self._channel.subscribe(callback, try_to_connect) + + def unsubscribe(self, callback): + return self._channel.unsubscribe(callback) + + +class SwappableChannel(_WrappedChannel): + """ + Provides a grpc channel wrapper, that allows the internal channel to be swapped out + + Args: + - channel_fn: a nullary function that returns a new channel instance. + It should be a partial with all channel configuration arguments built-in + """ + + def __init__(self, channel_fn: Callable[[], Channel]): + self._channel_fn = channel_fn + self._channel = channel_fn() + + def create_channel(self) -> Channel: + """Create a fresh channel using the stored `channel_fn` partial""" + new_channel = self._channel_fn() + return new_channel + + def swap_channel(self, new_channel: Channel) -> Channel: + """Replace the wrapped channel with a new instance. Typically created using `create_channel`""" + old_channel = self._channel + self._channel = new_channel + return old_channel diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index a7e07e20dfa2..1c75823ae5de 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -75,11 +75,12 @@ from google.cloud.bigtable.data._cross_sync import CrossSync from typing import Iterable from grpc import insecure_channel -from grpc import intercept_channel from google.cloud.bigtable_v2.services.bigtable.transports import ( BigtableGrpcTransport as TransportType, ) +from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE +from google.cloud.bigtable.data._sync_autogen._swappable_channel import SwappableChannel if TYPE_CHECKING: from google.cloud.bigtable.data._helpers import RowKeySamples @@ -131,7 +132,6 @@ def __init__( client_options = cast( Optional[client_options_lib.ClientOptions], client_options ) - custom_channel = None self._emulator_host = os.getenv(BIGTABLE_EMULATOR) if self._emulator_host is not None: warnings.warn( @@ -139,7 +139,6 @@ def __init__( RuntimeWarning, stacklevel=2, ) - custom_channel = insecure_channel(self._emulator_host) if credentials is None: credentials = google.auth.credentials.AnonymousCredentials() if project is None: @@ -150,12 +149,12 @@ def __init__( project=project, client_options=client_options, ) - self._gapic_client = CrossSync._Sync_Impl.GapicClient( + self._gapic_client = GapicClient( credentials=credentials, client_options=client_options, client_info=self.client_info, transport=lambda *args, **kwargs: TransportType( - *args, **kwargs, channel=custom_channel + *args, **kwargs, channel=self._build_grpc_channel ), ) if ( @@ -172,7 +171,7 @@ def __init__( self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} self._channel_init_time = time.monotonic() self._channel_refresh_task: CrossSync._Sync_Impl.Task[None] | None = None - self._executor = ( + self._executor: concurrent.futures.ThreadPoolExecutor | None = ( concurrent.futures.ThreadPoolExecutor() if not CrossSync._Sync_Impl.is_async else None @@ -187,6 +186,25 @@ def __init__( stacklevel=2, ) + def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannel: + """This method is called by the gapic transport to create a grpc channel. + + The init arguments passed down are captured in a partial used by SwappableChannel + to create new channel instances in the future, as part of the channel refresh logic + + Emulators always use an inseucre channel + + Args: + - *args: positional arguments passed by the gapic layer to create a new channel with + - **kwargs: keyword arguments passed by the gapic layer to create a new channel with + Returns: + a custom wrapped swappable channel""" + if self._emulator_host is not None: + create_channel_fn = partial(insecure_channel, self._emulator_host) + else: + create_channel_fn = partial(TransportType.create_channel, *args, **kwargs) + return SwappableChannel(create_channel_fn) + @property def universe_domain(self) -> str: """Return the universe domain used by the client instance. @@ -279,6 +297,11 @@ def _ping_and_warm_instances( ) return [r or None for r in result_list] + def _invalidate_channel_stubs(self): + """Helper to reset the cached stubs. Needed when changing out the grpc channel""" + self.transport._stubs = {} + self.transport._prep_wrapped_messages(self.client_info) + def _manage_channel( self, refresh_interval_min: float = 60 * 35, @@ -301,12 +324,16 @@ def _manage_channel( between `refresh_interval_min` and `refresh_interval_max` grace_period: time to allow previous channel to serve existing requests before closing, in seconds""" + if not isinstance(self.transport.grpc_channel, SwappableChannel): + warnings.warn("Channel does not support auto-refresh.") + return + super_channel: SwappableChannel = self.transport.grpc_channel first_refresh = self._channel_init_time + random.uniform( refresh_interval_min, refresh_interval_max ) next_sleep = max(first_refresh - time.monotonic(), 0) if next_sleep > 0: - self._ping_and_warm_instances(channel=self.transport.grpc_channel) + self._ping_and_warm_instances(channel=super_channel) while not self._is_closed.is_set(): CrossSync._Sync_Impl.event_wait( self._is_closed, next_sleep, async_break_early=False @@ -314,14 +341,10 @@ def _manage_channel( if self._is_closed.is_set(): break start_timestamp = time.monotonic() - old_channel = self.transport.grpc_channel - new_channel = self.transport.create_channel() - new_channel = intercept_channel(new_channel, self.transport._interceptor) + new_channel = super_channel.create_channel() self._ping_and_warm_instances(channel=new_channel) - self.transport._grpc_channel = new_channel - self.transport._logged_channel = new_channel - self.transport._stubs = {} - self.transport._prep_wrapped_messages(self.client_info) + old_channel = super_channel.swap_channel(new_channel) + self._invalidate_channel_stubs() if grace_period: self._is_closed.wait(grace_period) old_channel.close() @@ -694,22 +717,30 @@ def __init__( self.table_name = self.client._gapic_client.table_path( self.client.project, instance_id, table_id ) - self.app_profile_id = app_profile_id - self.default_operation_timeout = default_operation_timeout - self.default_attempt_timeout = default_attempt_timeout - self.default_read_rows_operation_timeout = default_read_rows_operation_timeout - self.default_read_rows_attempt_timeout = default_read_rows_attempt_timeout - self.default_mutate_rows_operation_timeout = ( + self.app_profile_id: str | None = app_profile_id + self.default_operation_timeout: float = default_operation_timeout + self.default_attempt_timeout: float | None = default_attempt_timeout + self.default_read_rows_operation_timeout: float = ( + default_read_rows_operation_timeout + ) + self.default_read_rows_attempt_timeout: float | None = ( + default_read_rows_attempt_timeout + ) + self.default_mutate_rows_operation_timeout: float = ( default_mutate_rows_operation_timeout ) - self.default_mutate_rows_attempt_timeout = default_mutate_rows_attempt_timeout - self.default_read_rows_retryable_errors = ( + self.default_mutate_rows_attempt_timeout: float | None = ( + default_mutate_rows_attempt_timeout + ) + self.default_read_rows_retryable_errors: Sequence[type[Exception]] = ( default_read_rows_retryable_errors or () ) - self.default_mutate_rows_retryable_errors = ( + self.default_mutate_rows_retryable_errors: Sequence[type[Exception]] = ( default_mutate_rows_retryable_errors or () ) - self.default_retryable_errors = default_retryable_errors or () + self.default_retryable_errors: Sequence[type[Exception]] = ( + default_retryable_errors or () + ) try: self._register_instance_future = CrossSync._Sync_Impl.create_task( self.client._register_instance, diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index d3ca890b4c61..74f01c60c055 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -115,8 +115,8 @@ def __init__( self._app_profile_id = app_profile_id self._client = client self._instance_id = instance_id - self._prepare_metadata = prepare_metadata - self._final_metadata = None + self._prepare_metadata: Metadata = prepare_metadata + self._final_metadata: Metadata | None = None self._byte_cursor = _ByteCursor() self._reader: _Reader[QueryResultRow] = _QueryResultRowReader() self.has_received_token = False diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py index 9c2d1c6d8ee8..e819acda7281 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py @@ -90,8 +90,8 @@ def __init__( self._app_profile_id = app_profile_id self._client = client self._instance_id = instance_id - self._prepare_metadata = prepare_metadata - self._final_metadata = None + self._prepare_metadata: Metadata = prepare_metadata + self._final_metadata: Metadata | None = None self._byte_cursor = _ByteCursor() self._reader: _Reader[QueryResultRow] = _QueryResultRowReader() self.has_received_token = False diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 3f7df3c4ef3d..49f981d9a1f3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -290,7 +290,6 @@ def __init__( always_use_jwt_access=always_use_jwt_access, api_audience=api_audience, ) - if not self._grpc_channel: # initialize with the provided callable or the default channel channel_init = channel or type(self).create_channel diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index ed9fbd8b8ce1..c96570b76af2 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -28,6 +28,14 @@ from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY +if CrossSync.is_async: + from google.cloud.bigtable_v2.services.bigtable.transports.grpc_asyncio import ( + _LoggingClientAIOInterceptor as GapicInterceptor, + ) +else: + from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( + _LoggingClientInterceptor as GapicInterceptor, + ) __CROSS_SYNC_OUTPUT__ = "tests.system.data.test_system_autogen" @@ -111,11 +119,14 @@ async def delete_rows(self): @CrossSync.convert_class(sync_name="TestSystem") class TestSystemAsync: + def _make_client(self): + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + return CrossSync.DataClient(project=project) + @CrossSync.convert @CrossSync.pytest_fixture(scope="session") async def client(self): - project = os.getenv("GOOGLE_CLOUD_PROJECT") or None - async with CrossSync.DataClient(project=project) as client: + async with self._make_client() as client: yield client @CrossSync.convert @@ -260,8 +271,7 @@ async def test_channel_refresh(self, table_id, instance_id, temp_rows): """ await temp_rows.add_row(b"row_key_1") await temp_rows.add_row(b"row_key_2") - project = os.getenv("GOOGLE_CLOUD_PROJECT") or None - client = CrossSync.DataClient(project=project) + client = self._make_client() # start custom refresh task try: client._channel_refresh_task = CrossSync.create_task( @@ -274,13 +284,24 @@ async def test_channel_refresh(self, table_id, instance_id, temp_rows): await CrossSync.yield_to_event_loop() async with client.get_table(instance_id, table_id) as table: rows = await table.read_rows({}) - first_channel = client.transport.grpc_channel + channel_wrapper = client.transport.grpc_channel + first_channel = client.transport.grpc_channel._channel assert len(rows) == 2 await CrossSync.sleep(2) rows_after_refresh = await table.read_rows({}) assert len(rows_after_refresh) == 2 - assert client.transport.grpc_channel is not first_channel - print(table) + assert client.transport.grpc_channel is channel_wrapper + assert client.transport.grpc_channel._channel is not first_channel + # ensure gapic's logging interceptor is still active + if CrossSync.is_async: + interceptors = ( + client.transport.grpc_channel._channel._unary_unary_interceptors + ) + assert GapicInterceptor in [type(i) for i in interceptors] + else: + assert isinstance( + client.transport._logged_channel._interceptor, GapicInterceptor + ) finally: await client.close() diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index 693b8d966424..a78a8eb4c141 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -27,6 +27,9 @@ from google.type import date_pb2 from google.cloud.bigtable.data._cross_sync import CrossSync from . import TEST_FAMILY, TEST_FAMILY_2, TEST_AGGREGATE_FAMILY +from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( + _LoggingClientInterceptor as GapicInterceptor, +) TARGETS = ["table"] if not os.environ.get(BIGTABLE_EMULATOR): @@ -99,10 +102,13 @@ def delete_rows(self): class TestSystem: + def _make_client(self): + project = os.getenv("GOOGLE_CLOUD_PROJECT") or None + return CrossSync._Sync_Impl.DataClient(project=project) + @pytest.fixture(scope="session") def client(self): - project = os.getenv("GOOGLE_CLOUD_PROJECT") or None - with CrossSync._Sync_Impl.DataClient(project=project) as client: + with self._make_client() as client: yield client @pytest.fixture(scope="session", params=TARGETS) @@ -219,8 +225,7 @@ def test_channel_refresh(self, table_id, instance_id, temp_rows): to ensure new channel works""" temp_rows.add_row(b"row_key_1") temp_rows.add_row(b"row_key_2") - project = os.getenv("GOOGLE_CLOUD_PROJECT") or None - client = CrossSync._Sync_Impl.DataClient(project=project) + client = self._make_client() try: client._channel_refresh_task = CrossSync._Sync_Impl.create_task( client._manage_channel, @@ -231,13 +236,17 @@ def test_channel_refresh(self, table_id, instance_id, temp_rows): CrossSync._Sync_Impl.yield_to_event_loop() with client.get_table(instance_id, table_id) as table: rows = table.read_rows({}) - first_channel = client.transport.grpc_channel + channel_wrapper = client.transport.grpc_channel + first_channel = client.transport.grpc_channel._channel assert len(rows) == 2 CrossSync._Sync_Impl.sleep(2) rows_after_refresh = table.read_rows({}) assert len(rows_after_refresh) == 2 - assert client.transport.grpc_channel is not first_channel - print(table) + assert client.transport.grpc_channel is channel_wrapper + assert client.transport.grpc_channel._channel is not first_channel + assert isinstance( + client.transport._logged_channel._interceptor, GapicInterceptor + ) finally: client.close() diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test__swappable_channel.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test__swappable_channel.py new file mode 100644 index 000000000000..14fef2c85944 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test__swappable_channel.py @@ -0,0 +1,135 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + +import pytest +from grpc import ChannelConnectivity + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if CrossSync.is_async: + from google.cloud.bigtable.data._async._swappable_channel import ( + AsyncSwappableChannel as TargetType, + ) +else: + from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel as TargetType, + ) + + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test__swappable_channel" + + +@CrossSync.convert_class(sync_name="TestSwappableChannel") +class TestAsyncSwappableChannel: + @staticmethod + @CrossSync.convert + def _get_target_class(): + return TargetType + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + assert instance._channel_fn == channel_fn + channel_fn.assert_called_once_with() + assert instance._channel == channel_fn.return_value + + def test_swap_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + old_channel = instance._channel + new_channel = object() + result = instance.swap_channel(new_channel) + assert result == old_channel + assert instance._channel == new_channel + + def test_create_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + # reset mock from ctor call + channel_fn.reset_mock() + new_channel = instance.create_channel() + channel_fn.assert_called_once_with() + assert new_channel == channel_fn.return_value + + @CrossSync.drop + def test_create_channel_async_interceptors_copied(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + # reset mock from ctor call + channel_fn.reset_mock() + # mock out interceptors on original channel + instance._channel._unary_unary_interceptors = ["unary_unary"] + instance._channel._unary_stream_interceptors = ["unary_stream"] + instance._channel._stream_unary_interceptors = ["stream_unary"] + instance._channel._stream_stream_interceptors = ["stream_stream"] + + new_channel = instance.create_channel() + channel_fn.assert_called_once_with() + assert new_channel == channel_fn.return_value + assert new_channel._unary_unary_interceptors == ["unary_unary"] + assert new_channel._unary_stream_interceptors == ["unary_stream"] + assert new_channel._stream_unary_interceptors == ["stream_unary"] + assert new_channel._stream_stream_interceptors == ["stream_stream"] + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("unary_unary", (1,), {"kw": 2}), + ("unary_stream", (3,), {"kw": 4}), + ("stream_unary", (5,), {"kw": 6}), + ("stream_stream", (7,), {"kw": 8}), + ("get_state", (), {"try_to_connect": True}), + ], + ) + def test_forwarded_methods(self, method_name, args, kwargs): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = method(*args, **kwargs) + mock_method = getattr(channel_fn.return_value, method_name) + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock_method.return_value + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("channel_ready", (), {}), + ("wait_for_state_change", (ChannelConnectivity.READY,), {}), + ], + ) + @CrossSync.pytest + async def test_forwarded_async_methods(self, method_name, args, kwargs): + async def dummy_coro(*a, **k): + return mock.sentinel.result + + channel = mock.Mock() + mock_method = getattr(channel, method_name) + mock_method.side_effect = dummy_coro + + channel_fn = mock.Mock(return_value=channel) + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = await method(*args, **kwargs) + + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock.sentinel.result diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 97179a3b15ef..9e434d12f874 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -52,13 +52,21 @@ if CrossSync.is_async: from google.api_core import grpc_helpers_async from google.cloud.bigtable.data._async.client import TableAsync + from google.cloud.bigtable.data._async._swappable_channel import ( + AsyncSwappableChannel, + ) CrossSync.add_mapping("grpc_helpers", grpc_helpers_async) + CrossSync.add_mapping("SwappableChannel", AsyncSwappableChannel) else: from google.api_core import grpc_helpers from google.cloud.bigtable.data._sync_autogen.client import Table # noqa: F401 + from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel, + ) CrossSync.add_mapping("grpc_helpers", grpc_helpers) + CrossSync.add_mapping("SwappableChannel", SwappableChannel) __CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_client" @@ -229,6 +237,7 @@ async def test__start_background_channel_refresh(self): client, "_ping_and_warm_instances", CrossSync.Mock() ) as ping_and_warm: client._emulator_host = None + client.transport._grpc_channel = CrossSync.SwappableChannel(mock.Mock) client._start_background_channel_refresh() assert client._channel_refresh_task is not None assert isinstance(client._channel_refresh_task, CrossSync.Task) @@ -384,44 +393,31 @@ async def test__manage_channel_ping_and_warm(self): """ _manage channel should call ping and warm internally """ - import time import threading - if CrossSync.is_async: - from google.cloud.bigtable_v2.services.bigtable.transports.grpc_asyncio import ( - _LoggingClientAIOInterceptor as Interceptor, - ) - else: - from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( - _LoggingClientInterceptor as Interceptor, - ) - - client_mock = mock.Mock() - client_mock.transport._interceptor = Interceptor() - client_mock._is_closed.is_set.return_value = False - client_mock._channel_init_time = time.monotonic() - orig_channel = client_mock.transport.grpc_channel + client = self._make_client(project="project-id", use_emulator=True) + orig_channel = client.transport.grpc_channel # should ping an warm all new channels, and old channels if sleeping sleep_tuple = ( (asyncio, "sleep") if CrossSync.is_async else (threading.Event, "wait") ) - with mock.patch.object(*sleep_tuple): - # stop process after close is called - orig_channel.close.side_effect = asyncio.CancelledError - ping_and_warm = client_mock._ping_and_warm_instances = CrossSync.Mock() + with mock.patch.object(*sleep_tuple) as sleep_mock: + # stop process after loop + sleep_mock.side_effect = [None, asyncio.CancelledError] + ping_and_warm = client._ping_and_warm_instances = CrossSync.Mock() # should ping and warm old channel then new if sleep > 0 try: - await self._get_target_class()._manage_channel(client_mock, 10) + await client._manage_channel(10) except asyncio.CancelledError: pass # should have called at loop start, and after replacement assert ping_and_warm.call_count == 2 # should have replaced channel once - assert client_mock.transport._grpc_channel != orig_channel + assert client.transport.grpc_channel._channel != orig_channel # make sure new and old channels were warmed called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list] assert orig_channel in called_with - assert client_mock.transport.grpc_channel in called_with + assert client.transport.grpc_channel._channel in called_with @CrossSync.pytest @pytest.mark.parametrize( @@ -439,8 +435,6 @@ async def test__manage_channel_sleeps( import time import random - channel = mock.Mock() - channel.close = CrossSync.Mock() with mock.patch.object(random, "uniform") as uniform: uniform.side_effect = lambda min_, max_: min_ with mock.patch.object(time, "time") as time_mock: @@ -449,8 +443,7 @@ async def test__manage_channel_sleeps( sleep.side_effect = [None for i in range(num_cycles - 1)] + [ asyncio.CancelledError ] - client = self._make_client(project="project-id") - client.transport._grpc_channel = channel + client = self._make_client(project="project-id", use_emulator=True) with mock.patch.object( client.transport, "create_channel", CrossSync.Mock ): @@ -506,26 +499,27 @@ async def test__manage_channel_refresh(self, num_cycles): expected_refresh = 0.5 grpc_lib = grpc.aio if CrossSync.is_async else grpc new_channel = grpc_lib.insecure_channel("localhost:8080") + create_channel_mock = mock.Mock() + create_channel_mock.return_value = new_channel + refreshable_channel = CrossSync.SwappableChannel(create_channel_mock) with mock.patch.object(CrossSync, "event_wait") as sleep: sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError] - with mock.patch.object( - CrossSync.grpc_helpers, "create_channel" - ) as create_channel: - create_channel.return_value = new_channel - client = self._make_client(project="project-id") - create_channel.reset_mock() - try: - await client._manage_channel( - refresh_interval_min=expected_refresh, - refresh_interval_max=expected_refresh, - grace_period=0, - ) - except RuntimeError: - pass - assert sleep.call_count == num_cycles + 1 - assert create_channel.call_count == num_cycles - await client.close() + client = self._make_client(project="project-id") + client.transport._grpc_channel = refreshable_channel + create_channel_mock.reset_mock() + sleep.reset_mock() + try: + await client._manage_channel( + refresh_interval_min=expected_refresh, + refresh_interval_max=expected_refresh, + grace_period=0, + ) + except RuntimeError: + pass + assert sleep.call_count == num_cycles + 1 + assert create_channel_mock.call_count == num_cycles + await client.close() @CrossSync.pytest async def test__register_instance(self): diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__swappable_channel.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__swappable_channel.py new file mode 100644 index 000000000000..04f3f61c8d86 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test__swappable_channel.py @@ -0,0 +1,100 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# try/except added for compatibility with python < 3.8 + +# This file is automatically generated by CrossSync. Do not edit manually. + +try: + from unittest import mock +except ImportError: + import mock +import pytest +from grpc import ChannelConnectivity +from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel as TargetType, +) + + +class TestSwappableChannel: + @staticmethod + def _get_target_class(): + return TargetType + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_ctor(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + assert instance._channel_fn == channel_fn + channel_fn.assert_called_once_with() + assert instance._channel == channel_fn.return_value + + def test_swap_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + old_channel = instance._channel + new_channel = object() + result = instance.swap_channel(new_channel) + assert result == old_channel + assert instance._channel == new_channel + + def test_create_channel(self): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + channel_fn.reset_mock() + new_channel = instance.create_channel() + channel_fn.assert_called_once_with() + assert new_channel == channel_fn.return_value + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("unary_unary", (1,), {"kw": 2}), + ("unary_stream", (3,), {"kw": 4}), + ("stream_unary", (5,), {"kw": 6}), + ("stream_stream", (7,), {"kw": 8}), + ("get_state", (), {"try_to_connect": True}), + ], + ) + def test_forwarded_methods(self, method_name, args, kwargs): + channel_fn = mock.Mock() + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = method(*args, **kwargs) + mock_method = getattr(channel_fn.return_value, method_name) + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock_method.return_value + + @pytest.mark.parametrize( + "method_name,args,kwargs", + [ + ("channel_ready", (), {}), + ("wait_for_state_change", (ChannelConnectivity.READY,), {}), + ], + ) + def test_forwarded_async_methods(self, method_name, args, kwargs): + def dummy_coro(*a, **k): + return mock.sentinel.result + + channel = mock.Mock() + mock_method = getattr(channel, method_name) + mock_method.side_effect = dummy_coro + channel_fn = mock.Mock(return_value=channel) + instance = self._make_one(channel_fn) + method = getattr(instance, method_name) + result = method(*args, **kwargs) + mock_method.assert_called_once_with(*args, **kwargs) + assert result == mock.sentinel.result diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index 6012a10d3985..506ad7e94d87 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -46,8 +46,10 @@ str_val, ) from google.api_core import grpc_helpers +from google.cloud.bigtable.data._sync_autogen._swappable_channel import SwappableChannel CrossSync._Sync_Impl.add_mapping("grpc_helpers", grpc_helpers) +CrossSync._Sync_Impl.add_mapping("SwappableChannel", SwappableChannel) @CrossSync._Sync_Impl.add_mapping_decorator("TestBigtableDataClient") @@ -182,6 +184,9 @@ def test__start_background_channel_refresh(self): client, "_ping_and_warm_instances", CrossSync._Sync_Impl.Mock() ) as ping_and_warm: client._emulator_host = None + client.transport._grpc_channel = CrossSync._Sync_Impl.SwappableChannel( + mock.Mock + ) client._start_background_channel_refresh() assert client._channel_refresh_task is not None assert isinstance(client._channel_refresh_task, CrossSync._Sync_Impl.Task) @@ -297,36 +302,29 @@ def test__manage_channel_first_sleep( def test__manage_channel_ping_and_warm(self): """_manage channel should call ping and warm internally""" - import time import threading - from google.cloud.bigtable_v2.services.bigtable.transports.grpc import ( - _LoggingClientInterceptor as Interceptor, - ) - client_mock = mock.Mock() - client_mock.transport._interceptor = Interceptor() - client_mock._is_closed.is_set.return_value = False - client_mock._channel_init_time = time.monotonic() - orig_channel = client_mock.transport.grpc_channel + client = self._make_client(project="project-id", use_emulator=True) + orig_channel = client.transport.grpc_channel sleep_tuple = ( (asyncio, "sleep") if CrossSync._Sync_Impl.is_async else (threading.Event, "wait") ) - with mock.patch.object(*sleep_tuple): - orig_channel.close.side_effect = asyncio.CancelledError + with mock.patch.object(*sleep_tuple) as sleep_mock: + sleep_mock.side_effect = [None, asyncio.CancelledError] ping_and_warm = ( - client_mock._ping_and_warm_instances + client._ping_and_warm_instances ) = CrossSync._Sync_Impl.Mock() try: - self._get_target_class()._manage_channel(client_mock, 10) + client._manage_channel(10) except asyncio.CancelledError: pass assert ping_and_warm.call_count == 2 - assert client_mock.transport._grpc_channel != orig_channel + assert client.transport.grpc_channel._channel != orig_channel called_with = [call[1]["channel"] for call in ping_and_warm.call_args_list] assert orig_channel in called_with - assert client_mock.transport.grpc_channel in called_with + assert client.transport.grpc_channel._channel in called_with @pytest.mark.parametrize( "refresh_interval, num_cycles, expected_sleep", @@ -336,8 +334,6 @@ def test__manage_channel_sleeps(self, refresh_interval, num_cycles, expected_sle import time import random - channel = mock.Mock() - channel.close = CrossSync._Sync_Impl.Mock() with mock.patch.object(random, "uniform") as uniform: uniform.side_effect = lambda min_, max_: min_ with mock.patch.object(time, "time") as time_mock: @@ -346,8 +342,7 @@ def test__manage_channel_sleeps(self, refresh_interval, num_cycles, expected_sle sleep.side_effect = [None for i in range(num_cycles - 1)] + [ asyncio.CancelledError ] - client = self._make_client(project="project-id") - client.transport._grpc_channel = channel + client = self._make_client(project="project-id", use_emulator=True) with mock.patch.object( client.transport, "create_channel", CrossSync._Sync_Impl.Mock ): @@ -400,25 +395,26 @@ def test__manage_channel_refresh(self, num_cycles): expected_refresh = 0.5 grpc_lib = grpc.aio if CrossSync._Sync_Impl.is_async else grpc new_channel = grpc_lib.insecure_channel("localhost:8080") + create_channel_mock = mock.Mock() + create_channel_mock.return_value = new_channel + refreshable_channel = CrossSync._Sync_Impl.SwappableChannel(create_channel_mock) with mock.patch.object(CrossSync._Sync_Impl, "event_wait") as sleep: sleep.side_effect = [None for i in range(num_cycles)] + [RuntimeError] - with mock.patch.object( - CrossSync._Sync_Impl.grpc_helpers, "create_channel" - ) as create_channel: - create_channel.return_value = new_channel - client = self._make_client(project="project-id") - create_channel.reset_mock() - try: - client._manage_channel( - refresh_interval_min=expected_refresh, - refresh_interval_max=expected_refresh, - grace_period=0, - ) - except RuntimeError: - pass - assert sleep.call_count == num_cycles + 1 - assert create_channel.call_count == num_cycles - client.close() + client = self._make_client(project="project-id") + client.transport._grpc_channel = refreshable_channel + create_channel_mock.reset_mock() + sleep.reset_mock() + try: + client._manage_channel( + refresh_interval_min=expected_refresh, + refresh_interval_max=expected_refresh, + grace_period=0, + ) + except RuntimeError: + pass + assert sleep.call_count == num_cycles + 1 + assert create_channel_mock.call_count == num_cycles + client.close() def test__register_instance(self): """test instance registration""" From 6f9504b903e827c96d8ed1669fa56c14e784b719 Mon Sep 17 00:00:00 2001 From: Lixia Chen Date: Fri, 12 Sep 2025 15:06:04 -0400 Subject: [PATCH 881/892] feat: Add support for Proto and Enum types (#1202) --- .../cloud/bigtable/data/_async/client.py | 61 + .../bigtable/data/_sync_autogen/client.py | 61 + .../_async/execute_query_iterator.py | 10 +- .../_query_result_parsing_utils.py | 155 ++- .../bigtable/data/execute_query/_reader.py | 30 +- .../_sync_autogen/execute_query_iterator.py | 10 +- .../bigtable/data/execute_query/metadata.py | 24 + .../samples/testdata/README.md | 5 + .../samples/testdata/descriptors.pb | Bin 0 -> 182 bytes .../samples/testdata/singer.proto | 15 + .../samples/testdata/singer_pb2.py | 27 + .../tests/system/data/test_system_autogen.py | 28 +- .../unit/data/execute_query/sql_helpers.py | 12 + .../test_execute_query_parameters_parsing.py | 13 + .../test_query_result_parsing_utils.py | 1030 ++++++++++++++++- .../test_query_result_row_reader.py | 42 +- 16 files changed, 1462 insertions(+), 61 deletions(-) create mode 100644 packages/google-cloud-bigtable/samples/testdata/README.md create mode 100644 packages/google-cloud-bigtable/samples/testdata/descriptors.pb create mode 100644 packages/google-cloud-bigtable/samples/testdata/singer.proto create mode 100644 packages/google-cloud-bigtable/samples/testdata/singer_pb2.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 40f30f1d8c81..516e20eb34e9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -58,6 +58,8 @@ from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import ServiceUnavailable from google.api_core.exceptions import Aborted +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper import google.auth.credentials import google.auth._default @@ -684,6 +686,7 @@ async def execute_query( DeadlineExceeded, ServiceUnavailable, ), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> "ExecuteQueryIteratorAsync": """ Executes an SQL query on an instance. @@ -732,6 +735,62 @@ async def execute_query( If None, defaults to prepare_operation_timeout. prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery. Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects. + This dictionary provides the necessary type information for deserializing PROTO and + ENUM column values from the query results. When an entry is provided + for a PROTO or ENUM column, the client library will attempt to deserialize the raw data. + + - For PROTO columns: The value in the dictionary should be the + Protobuf Message class (e.g., ``my_pb2.MyMessage``). + - For ENUM columns: The value should be the Protobuf EnumTypeWrapper + object (e.g., ``my_pb2.MyEnum``). + + Example:: + + import my_pb2 + + column_info = { + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum + } + + If ``column_info`` is not provided, or if a specific column name is not found + in the dictionary: + + - PROTO columns will be returned as raw bytes. + - ENUM columns will be returned as integers. + + Note for Nested PROTO or ENUM Fields: + + To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated + path from the top-level column name. + + - For STRUCTs: ``struct_column_name.field_name`` + - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types + for the map keys or values, respectively. + + Example:: + + import my_pb2 + + column_info = { + # Top-level column + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum, + + # Nested field in a STRUCT column named 'my_struct' + "my_struct.nested_proto_field": my_pb2.OtherMessage, + "my_struct.nested_enum_field": my_pb2.AnotherEnum, + + # Nested field in a MAP column named 'my_map' + "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums + "my_map.value": my_pb2.MapValueMessage, + + # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column + "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto, + "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum + } + Returns: ExecuteQueryIteratorAsync: an asynchronous iterator that yields rows returned by the query Raises: @@ -741,6 +800,7 @@ async def execute_query( google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if a parameter is passed without an explicit type, and the type cannot be infered + google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails. """ instance_name = self._gapic_client.instance_path(self.project, instance_id) converted_param_types = _to_param_types(parameters, parameter_types) @@ -798,6 +858,7 @@ async def execute_query( attempt_timeout, operation_timeout, retryable_excs=retryable_excs, + column_info=column_info, ) @CrossSync.convert(sync_name="__enter__") diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index 1c75823ae5de..a168f360db77 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -49,6 +49,8 @@ from google.api_core.exceptions import DeadlineExceeded from google.api_core.exceptions import ServiceUnavailable from google.api_core.exceptions import Aborted +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper import google.auth.credentials import google.auth._default from google.api_core import client_options as client_options_lib @@ -508,6 +510,7 @@ def execute_query( DeadlineExceeded, ServiceUnavailable, ), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> "ExecuteQueryIterator": """Executes an SQL query on an instance. Returns an iterator to asynchronously stream back columns from selected rows. @@ -555,6 +558,62 @@ def execute_query( If None, defaults to prepare_operation_timeout. prepare_retryable_errors: a list of errors that will be retried if encountered during prepareQuery. Defaults to 4 (DeadlineExceeded) and 14 (ServiceUnavailable) + column_info: (Optional) A dictionary mapping column names to Protobuf message classes or EnumTypeWrapper objects. + This dictionary provides the necessary type information for deserializing PROTO and + ENUM column values from the query results. When an entry is provided + for a PROTO or ENUM column, the client library will attempt to deserialize the raw data. + + - For PROTO columns: The value in the dictionary should be the + Protobuf Message class (e.g., ``my_pb2.MyMessage``). + - For ENUM columns: The value should be the Protobuf EnumTypeWrapper + object (e.g., ``my_pb2.MyEnum``). + + Example:: + + import my_pb2 + + column_info = { + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum + } + + If ``column_info`` is not provided, or if a specific column name is not found + in the dictionary: + + - PROTO columns will be returned as raw bytes. + - ENUM columns will be returned as integers. + + Note for Nested PROTO or ENUM Fields: + + To specify types for PROTO or ENUM fields within STRUCTs or MAPs, use a dot-separated + path from the top-level column name. + + - For STRUCTs: ``struct_column_name.field_name`` + - For MAPs: ``map_column_name.key`` or ``map_column_name.value`` to specify types + for the map keys or values, respectively. + + Example:: + + import my_pb2 + + column_info = { + # Top-level column + "my_proto_column": my_pb2.MyMessage, + "my_enum_column": my_pb2.MyEnum, + + # Nested field in a STRUCT column named 'my_struct' + "my_struct.nested_proto_field": my_pb2.OtherMessage, + "my_struct.nested_enum_field": my_pb2.AnotherEnum, + + # Nested field in a MAP column named 'my_map' + "my_map.key": my_pb2.MapKeyEnum, # If map keys were enums + "my_map.value": my_pb2.MapValueMessage, + + # PROTO field inside a STRUCT, where the STRUCT is the value in a MAP column + "struct_map.value.nested_proto_field": my_pb2.DeeplyNestedProto, + "struct_map.value.nested_enum_field": my_pb2.DeeplyNestedEnum + } + Returns: ExecuteQueryIterator: an asynchronous iterator that yields rows returned by the query Raises: @@ -564,6 +623,7 @@ def execute_query( google.api_core.exceptions.GoogleAPIError: raised if the request encounters an unrecoverable error google.cloud.bigtable.data.exceptions.ParameterTypeInferenceFailed: Raised if a parameter is passed without an explicit type, and the type cannot be infered + google.protobuf.message.DecodeError: raised if the deserialization of a PROTO/ENUM value fails. """ instance_name = self._gapic_client.instance_path(self.project, instance_id) converted_param_types = _to_param_types(parameters, parameter_types) @@ -615,6 +675,7 @@ def execute_query( attempt_timeout, operation_timeout, retryable_excs=retryable_excs, + column_info=column_info, ) def __enter__(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index 74f01c60c055..41900bb12317 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -23,6 +23,8 @@ TYPE_CHECKING, ) from google.api_core import retry as retries +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor from google.cloud.bigtable.data._helpers import ( @@ -87,6 +89,7 @@ def __init__( operation_timeout: float, req_metadata: Sequence[Tuple[str, str]] = (), retryable_excs: Sequence[type[Exception]] = (), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> None: """ Collects responses from ExecuteQuery requests and parses them into QueryResultRows. @@ -107,6 +110,8 @@ def __init__( Failed requests will be retried within the budget req_metadata: metadata used while sending the gRPC request retryable_excs: a list of errors that will be retried if encountered. + column_info: dict with mappings between column names and additional column information + for protobuf deserialization. Raises: {NO_LOOP} :class:`ValueError ` as a safeguard if data is processed in an unexpected state @@ -135,6 +140,7 @@ def __init__( exception_factory=_retry_exception_factory, ) self._req_metadata = req_metadata + self._column_info = column_info try: self._register_instance_task = CrossSync.create_task( self._client._register_instance, @@ -202,7 +208,9 @@ async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]: raise ValueError( "Error parsing response before finalizing metadata" ) - results = self._reader.consume(batches_to_parse, self.metadata) + results = self._reader.consume( + batches_to_parse, self.metadata, self._column_info + ) if results is None: continue diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py index 4cb5db2911de..a43539e55de0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_query_result_parsing_utils.py @@ -11,8 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations -from typing import Any, Callable, Dict, Type +from typing import Any, Callable, Dict, Type, Optional, Union + +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper from google.cloud.bigtable.data.execute_query.values import Struct from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable_v2 import Value as PBValue @@ -30,24 +34,36 @@ SqlType.Struct: "array_value", SqlType.Array: "array_value", SqlType.Map: "array_value", + SqlType.Proto: "bytes_value", + SqlType.Enum: "int_value", } -def _parse_array_type(value: PBValue, metadata_type: SqlType.Array) -> Any: +def _parse_array_type( + value: PBValue, + metadata_type: SqlType.Array, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> list[Any]: """ used for parsing an array represented as a protobuf to a python list. """ return list( map( lambda val: _parse_pb_value_to_python_value( - val, metadata_type.element_type + val, metadata_type.element_type, column_name, column_info ), value.array_value.values, ) ) -def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any: +def _parse_map_type( + value: PBValue, + metadata_type: SqlType.Map, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> dict[Any, Any]: """ used for parsing a map represented as a protobuf to a python dict. @@ -64,10 +80,16 @@ def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any: map( lambda map_entry: ( _parse_pb_value_to_python_value( - map_entry.array_value.values[0], metadata_type.key_type + map_entry.array_value.values[0], + metadata_type.key_type, + f"{column_name}.key" if column_name is not None else None, + column_info, ), _parse_pb_value_to_python_value( - map_entry.array_value.values[1], metadata_type.value_type + map_entry.array_value.values[1], + metadata_type.value_type, + f"{column_name}.value" if column_name is not None else None, + column_info, ), ), value.array_value.values, @@ -77,7 +99,12 @@ def _parse_map_type(value: PBValue, metadata_type: SqlType.Map) -> Any: raise ValueError("Invalid map entry - less or more than two values.") -def _parse_struct_type(value: PBValue, metadata_type: SqlType.Struct) -> Struct: +def _parse_struct_type( + value: PBValue, + metadata_type: SqlType.Struct, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> Struct: """ used for parsing a struct represented as a protobuf to a google.cloud.bigtable.data.execute_query.Struct @@ -88,13 +115,27 @@ def _parse_struct_type(value: PBValue, metadata_type: SqlType.Struct) -> Struct: struct = Struct() for value, field in zip(value.array_value.values, metadata_type.fields): field_name, field_type = field - struct.add_field(field_name, _parse_pb_value_to_python_value(value, field_type)) + nested_column_name: str | None + if column_name and field_name: + # qualify the column name for nested lookups + nested_column_name = f"{column_name}.{field_name}" + else: + nested_column_name = None + struct.add_field( + field_name, + _parse_pb_value_to_python_value( + value, field_type, nested_column_name, column_info + ), + ) return struct def _parse_timestamp_type( - value: PBValue, metadata_type: SqlType.Timestamp + value: PBValue, + metadata_type: SqlType.Timestamp, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> DatetimeWithNanoseconds: """ used for parsing a timestamp represented as a protobuf to DatetimeWithNanoseconds @@ -102,15 +143,105 @@ def _parse_timestamp_type( return DatetimeWithNanoseconds.from_timestamp_pb(value.timestamp_value) -_TYPE_PARSERS: Dict[Type[SqlType.Type], Callable[[PBValue, Any], Any]] = { +def _parse_proto_type( + value: PBValue, + metadata_type: SqlType.Proto, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> Message | bytes: + """ + Parses a serialized protobuf message into a Message object using type information + provided in column_info. + + Args: + value: The value to parse, expected to have a bytes_value attribute. + metadata_type: The expected SQL type (Proto). + column_name: The name of the column. + column_info: (Optional) A dictionary mapping column names to their + corresponding Protobuf Message classes. This information is used + to deserialize the raw bytes. + + Returns: + A deserialized Protobuf Message object if parsing is successful. + If the required type information is not found in column_info, the function + returns the original serialized data as bytes (value.bytes_value). + This fallback ensures that the raw data is still accessible. + + Raises: + google.protobuf.message.DecodeError: If `value.bytes_value` cannot be + parsed as the Message type specified in `column_info`. + """ + if ( + column_name is not None + and column_info is not None + and column_info.get(column_name) is not None + ): + default_proto_message = column_info.get(column_name) + if isinstance(default_proto_message, Message): + proto_message = type(default_proto_message)() + proto_message.ParseFromString(value.bytes_value) + return proto_message + return value.bytes_value + + +def _parse_enum_type( + value: PBValue, + metadata_type: SqlType.Enum, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> int | str: + """ + Parses an integer value into a Protobuf enum name string using type information + provided in column_info. + + Args: + value: The value to parse, expected to have an int_value attribute. + metadata_type: The expected SQL type (Enum). + column_name: The name of the column. + column_info: (Optional) A dictionary mapping column names to their + corresponding Protobuf EnumTypeWrapper objects. This information + is used to convert the integer to an enum name. + + Returns: + A string representing the name of the enum value if conversion is successful. + If conversion fails for any reason, such as the required EnumTypeWrapper + not being found in column_info, or if an error occurs during the name lookup + (e.g., the integer is not a valid enum value), the function returns the + original integer value (value.int_value). This fallback ensures the + raw integer representation is still accessible. + """ + if ( + column_name is not None + and column_info is not None + and column_info.get(column_name) is not None + ): + proto_enum = column_info.get(column_name) + if isinstance(proto_enum, EnumTypeWrapper): + return proto_enum.Name(value.int_value) + return value.int_value + + +ParserCallable = Callable[ + [PBValue, Any, Optional[str], Optional[Dict[str, Union[Message, EnumTypeWrapper]]]], + Any, +] + +_TYPE_PARSERS: Dict[Type[SqlType.Type], ParserCallable] = { SqlType.Timestamp: _parse_timestamp_type, SqlType.Struct: _parse_struct_type, SqlType.Array: _parse_array_type, SqlType.Map: _parse_map_type, + SqlType.Proto: _parse_proto_type, + SqlType.Enum: _parse_enum_type, } -def _parse_pb_value_to_python_value(value: PBValue, metadata_type: SqlType.Type) -> Any: +def _parse_pb_value_to_python_value( + value: PBValue, + metadata_type: SqlType.Type, + column_name: str | None, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, +) -> Any: """ used for converting the value represented as a protobufs to a python object. """ @@ -126,7 +257,7 @@ def _parse_pb_value_to_python_value(value: PBValue, metadata_type: SqlType.Type) if kind in _TYPE_PARSERS: parser = _TYPE_PARSERS[kind] - return parser(value, metadata_type) + return parser(value, metadata_type, column_name, column_info) elif kind in _REQUIRED_PROTO_FIELDS: field_name = _REQUIRED_PROTO_FIELDS[kind] return getattr(value, field_name) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py index d9507fe350ca..467c2030fe67 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_reader.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations from typing import ( List, @@ -21,6 +22,8 @@ Sequence, ) from abc import ABC, abstractmethod +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper from google.cloud.bigtable_v2 import ProtoRows, Value as PBValue @@ -54,7 +57,10 @@ class _Reader(ABC, Generic[T]): @abstractmethod def consume( - self, batches_to_consume: List[bytes], metadata: Metadata + self, + batches_to_consume: List[bytes], + metadata: Metadata, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> Optional[Iterable[T]]: """This method receives a list of batches of bytes to be parsed as ProtoRows messages. It then uses the metadata to group the values in the parsed messages into rows. Returns @@ -64,6 +70,8 @@ def consume( :meth:`google.cloud.bigtable.byte_cursor._ByteCursor.consume` method. metadata: metadata used to transform values to rows + column_info: (Optional) dict with mappings between column names and additional column information + for protobuf deserialization. Returns: Iterable[T] or None: Iterable if gathered values can form one or more instances of T, @@ -89,7 +97,10 @@ def _parse_proto_rows(self, bytes_to_parse: bytes) -> Iterable[PBValue]: return proto_rows.values def _construct_query_result_row( - self, values: Sequence[PBValue], metadata: Metadata + self, + values: Sequence[PBValue], + metadata: Metadata, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> QueryResultRow: result = QueryResultRow() columns = metadata.columns @@ -99,12 +110,17 @@ def _construct_query_result_row( ), "This function should be called only when count of values matches count of columns." for column, value in zip(columns, values): - parsed_value = _parse_pb_value_to_python_value(value, column.column_type) + parsed_value = _parse_pb_value_to_python_value( + value, column.column_type, column.column_name, column_info + ) result.add_field(column.column_name, parsed_value) return result def consume( - self, batches_to_consume: List[bytes], metadata: Metadata + self, + batches_to_consume: List[bytes], + metadata: Metadata, + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> Optional[Iterable[QueryResultRow]]: num_columns = len(metadata.columns) rows = [] @@ -112,7 +128,11 @@ def consume( values = self._parse_proto_rows(batch_bytes) for row_data in batched(values, n=num_columns): if len(row_data) == num_columns: - rows.append(self._construct_query_result_row(row_data, metadata)) + rows.append( + self._construct_query_result_row( + row_data, metadata, column_info + ) + ) else: raise ValueError( "Unexpected error, recieved bad number of values. " diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py index e819acda7281..6b29cbfe77f4 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py @@ -18,6 +18,8 @@ from __future__ import annotations from typing import Any, Dict, Optional, Sequence, Tuple, TYPE_CHECKING from google.api_core import retry as retries +from google.protobuf.message import Message +from google.protobuf.internal.enum_type_wrapper import EnumTypeWrapper from google.cloud.bigtable.data.execute_query._byte_cursor import _ByteCursor from google.cloud.bigtable.data._helpers import ( _attempt_timeout_generator, @@ -63,6 +65,7 @@ def __init__( operation_timeout: float, req_metadata: Sequence[Tuple[str, str]] = (), retryable_excs: Sequence[type[Exception]] = (), + column_info: dict[str, Message | EnumTypeWrapper] | None = None, ) -> None: """Collects responses from ExecuteQuery requests and parses them into QueryResultRows. @@ -82,6 +85,8 @@ def __init__( Failed requests will be retried within the budget req_metadata: metadata used while sending the gRPC request retryable_excs: a list of errors that will be retried if encountered. + column_info: dict with mappings between column names and additional column information + for protobuf deserialization. Raises: None :class:`ValueError ` as a safeguard if data is processed in an unexpected state @@ -110,6 +115,7 @@ def __init__( exception_factory=_retry_exception_factory, ) self._req_metadata = req_metadata + self._column_info = column_info try: self._register_instance_task = CrossSync._Sync_Impl.create_task( self._client._register_instance, @@ -164,7 +170,9 @@ def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]: raise ValueError( "Error parsing response before finalizing metadata" ) - results = self._reader.consume(batches_to_parse, self.metadata) + results = self._reader.consume( + batches_to_parse, self.metadata, self._column_info + ) if results is None: continue except ValueError as e: diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py index 2fd66947d339..74b6cb836688 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/metadata.py @@ -296,6 +296,28 @@ def _to_value_pb_dict(self, value: Any) -> Dict[str, Any]: ) } + class Proto(Type): + """Proto SQL type.""" + + type_field_name = "proto_type" + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Proto is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Proto is not supported as a query parameter") + + class Enum(Type): + """Enum SQL type.""" + + type_field_name = "enum_type" + + def _to_value_pb_dict(self, value: Any): + raise NotImplementedError("Enum is not supported as a query parameter") + + def _to_type_pb_dict(self) -> Dict[str, Any]: + raise NotImplementedError("Enum is not supported as a query parameter") + class Metadata: """ @@ -388,6 +410,8 @@ def _pb_metadata_to_metadata_types( "bool_type": SqlType.Bool, "timestamp_type": SqlType.Timestamp, "date_type": SqlType.Date, + "proto_type": SqlType.Proto, + "enum_type": SqlType.Enum, "struct_type": SqlType.Struct, "array_type": SqlType.Array, "map_type": SqlType.Map, diff --git a/packages/google-cloud-bigtable/samples/testdata/README.md b/packages/google-cloud-bigtable/samples/testdata/README.md new file mode 100644 index 000000000000..57520179f2dc --- /dev/null +++ b/packages/google-cloud-bigtable/samples/testdata/README.md @@ -0,0 +1,5 @@ +#### To generate singer_pb2.py and descriptors.pb file from singer.proto using `protoc` +```shell +cd samples +protoc --proto_path=testdata/ --include_imports --descriptor_set_out=testdata/descriptors.pb --python_out=testdata/ testdata/singer.proto +``` \ No newline at end of file diff --git a/packages/google-cloud-bigtable/samples/testdata/descriptors.pb b/packages/google-cloud-bigtable/samples/testdata/descriptors.pb new file mode 100644 index 0000000000000000000000000000000000000000..bddf04de378263f791d1d7f558e97f934b281d2b GIT binary patch literal 182 zcmd5Zl#{BLTUwl%tQ5q>77SJ> zB*ev%mzbL>!KlEf!5IW*3z=}Srl;l=rAjaX1^JBR^l%uX=MGX81W~M|$HfZf3$b%C o2lxjFFbHvQv3NN~MF}v1SZ@A4-U3V@R*=85w*Yez8`zD;07g_Xr2qf` literal 0 HcmV?d00001 diff --git a/packages/google-cloud-bigtable/samples/testdata/singer.proto b/packages/google-cloud-bigtable/samples/testdata/singer.proto new file mode 100644 index 000000000000..d60e0dfb3b2a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/testdata/singer.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package examples.bigtable.music; + +enum Genre { + POP = 0; + JAZZ = 1; + FOLK = 2; + ROCK = 3; +} + +message Singer { + string name = 1; + Genre genre = 2; +} diff --git a/packages/google-cloud-bigtable/samples/testdata/singer_pb2.py b/packages/google-cloud-bigtable/samples/testdata/singer_pb2.py new file mode 100644 index 000000000000..d2a328df0e9a --- /dev/null +++ b/packages/google-cloud-bigtable/samples/testdata/singer_pb2.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: singer.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0csinger.proto\x12\x17\x65xamples.bigtable.music\"E\n\x06Singer\x12\x0c\n\x04name\x18\x01 \x01(\t\x12-\n\x05genre\x18\x02 \x01(\x0e\x32\x1e.examples.bigtable.music.Genre*.\n\x05Genre\x12\x07\n\x03POP\x10\x00\x12\x08\n\x04JAZZ\x10\x01\x12\x08\n\x04\x46OLK\x10\x02\x12\x08\n\x04ROCK\x10\x03\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'singer_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _GENRE._serialized_start=112 + _GENRE._serialized_end=158 + _SINGER._serialized_start=41 + _SINGER._serialized_end=110 +# @@protoc_insertion_point(module_scope) diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index a78a8eb4c141..44895808a9ab 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -258,7 +258,7 @@ def test_mutation_set_cell(self, target, temp_rows): """Ensure cells can be set properly""" row_key = b"bulk_mutate" new_value = uuid.uuid4().hex.encode() - row_key, mutation = self._create_row_and_mutation( + (row_key, mutation) = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) target.mutate_row(row_key, mutation) @@ -312,7 +312,7 @@ def test_bulk_mutations_set_cell(self, client, target, temp_rows): from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() - row_key, mutation = self._create_row_and_mutation( + (row_key, mutation) = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) @@ -347,11 +347,11 @@ def test_mutations_batcher_context_manager(self, client, target, temp_rows): """test batcher with context manager. Should flush on exit""" from google.cloud.bigtable.data.mutations import RowMutationEntry - new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] - row_key, mutation = self._create_row_and_mutation( + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) - row_key2, mutation2 = self._create_row_and_mutation( + (row_key2, mutation2) = self._create_row_and_mutation( target, temp_rows, new_value=new_value2 ) bulk_mutation = RowMutationEntry(row_key, [mutation]) @@ -372,7 +372,7 @@ def test_mutations_batcher_timer_flush(self, client, target, temp_rows): from google.cloud.bigtable.data.mutations import RowMutationEntry new_value = uuid.uuid4().hex.encode() - row_key, mutation = self._create_row_and_mutation( + (row_key, mutation) = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) @@ -394,12 +394,12 @@ def test_mutations_batcher_count_flush(self, client, target, temp_rows): """batch should flush after flush_limit_mutation_count mutations""" from google.cloud.bigtable.data.mutations import RowMutationEntry - new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] - row_key, mutation = self._create_row_and_mutation( + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - row_key2, mutation2 = self._create_row_and_mutation( + (row_key2, mutation2) = self._create_row_and_mutation( target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) @@ -426,12 +426,12 @@ def test_mutations_batcher_bytes_flush(self, client, target, temp_rows): """batch should flush after flush_limit_bytes bytes""" from google.cloud.bigtable.data.mutations import RowMutationEntry - new_value, new_value2 = [uuid.uuid4().hex.encode() for _ in range(2)] - row_key, mutation = self._create_row_and_mutation( + (new_value, new_value2) = [uuid.uuid4().hex.encode() for _ in range(2)] + (row_key, mutation) = self._create_row_and_mutation( target, temp_rows, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - row_key2, mutation2 = self._create_row_and_mutation( + (row_key2, mutation2) = self._create_row_and_mutation( target, temp_rows, new_value=new_value2 ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) @@ -457,11 +457,11 @@ def test_mutations_batcher_no_flush(self, client, target, temp_rows): new_value = uuid.uuid4().hex.encode() start_value = b"unchanged" - row_key, mutation = self._create_row_and_mutation( + (row_key, mutation) = self._create_row_and_mutation( target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation = RowMutationEntry(row_key, [mutation]) - row_key2, mutation2 = self._create_row_and_mutation( + (row_key2, mutation2) = self._create_row_and_mutation( target, temp_rows, start_value=start_value, new_value=new_value ) bulk_mutation2 = RowMutationEntry(row_key2, [mutation2]) diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py index 5d5569dba5ef..119bb2d50862 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/sql_helpers.py @@ -204,6 +204,18 @@ def date_type() -> Type: return t +def proto_type() -> Type: + t = Type() + t.proto_type = {} + return t + + +def enum_type() -> Type: + t = Type() + t.enum_type = {} + return t + + def array_type(elem_type: Type) -> Type: t = Type() arr_type = Type.Array() diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py index ee03222725be..0a1be14232c9 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_execute_query_parameters_parsing.py @@ -25,6 +25,7 @@ from google.cloud.bigtable.data.execute_query.metadata import SqlType from google.cloud.bigtable.data.execute_query.values import Struct from google.protobuf import timestamp_pb2 +from samples.testdata import singer_pb2 timestamp = int( datetime.datetime(2024, 5, 12, 17, 44, 12, tzinfo=datetime.timezone.utc).timestamp() @@ -267,6 +268,18 @@ def test_execute_query_parameters_not_supported_types(): {"test1": SqlType.Struct([("field1", SqlType.Int64())])}, ) + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": singer_pb2.Singer()}, + {"test1": SqlType.Proto()}, + ) + + with pytest.raises(NotImplementedError, match="not supported"): + _format_execute_query_params( + {"test1": singer_pb2.Genre.ROCK}, + {"test1": SqlType.Enum()}, + ) + def test_instance_execute_query_parameters_not_match(): with pytest.raises(ValueError, match="test2"): diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py index 627570c3782f..ea03dfe9ae17 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_parsing_utils.py @@ -28,7 +28,8 @@ import datetime -from tests.unit.data.execute_query.sql_helpers import int64_type +from tests.unit.data.execute_query.sql_helpers import int64_type, proto_type, enum_type +from samples.testdata import singer_pb2 TYPE_BYTES = {"bytes_type": {}} TYPE_TIMESTAMP = {"timestamp_type": {}} @@ -82,9 +83,61 @@ def test_basic_types( assert type(metadata_type) is expected_metadata_type value = PBValue(value_dict) assert ( - _parse_pb_value_to_python_value(value._pb, metadata_type) == expected_value + _parse_pb_value_to_python_value(value._pb, metadata_type, "my_field") + == expected_value ) + def test__proto(self): + _type = PBType({"proto_type": {}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Proto + + singer = singer_pb2.Singer(name="John") + value = PBValue({"bytes_value": singer.SerializeToString()}) + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "proto_field" + ) + assert result == singer.SerializeToString() + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + None, + {"proto_field": singer_pb2.Singer()}, + ) + assert result == singer.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "proto_field", + {"proto_field": singer_pb2.Singer()}, + ) + assert result == singer + + def test__enum(self): + _type = PBType({"enum_type": {}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Enum + + value = PBValue({"int_value": 1}) + + # without enum definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "enum_field") + assert result == 1 + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, None, {"enum_field": singer_pb2.Genre} + ) + assert result == 1 + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "enum_field", {"enum_field": singer_pb2.Genre} + ) + assert result == "JAZZ" + # Larger test cases were extracted for readability def test__array(self): _type = PBType({"array_type": {"element_type": int64_type()}}) @@ -103,7 +156,79 @@ def test__array(self): } } ) - assert _parse_pb_value_to_python_value(value._pb, metadata_type) == [1, 2, 3, 4] + assert _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) == [1, 2, 3, 4] + + def test__array_of_protos(self): + _type = PBType({"array_type": {"element_type": proto_type()}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Proto + + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + value = PBValue( + { + "array_value": { + "values": [ + {"bytes_value": singer1.SerializeToString()}, + {"bytes_value": singer2.SerializeToString()}, + ] + } + } + ) + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) + assert result == [singer1.SerializeToString(), singer2.SerializeToString()] + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, None, {"array_field": singer_pb2.Singer()} + ) + assert result == [singer1.SerializeToString(), singer2.SerializeToString()] + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "array_field", + {"array_field": singer_pb2.Singer()}, + ) + assert result == [singer1, singer2] + + def test__array_of_enums(self): + _type = PBType({"array_type": {"element_type": enum_type()}}) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Enum + + value = PBValue( + { + "array_value": { + "values": [ + {"int_value": 0}, # POP + {"int_value": 1}, # JAZZ + ] + } + } + ) + + # without enum definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) + assert result == [0, 1] + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "array_field", + {"array_field": singer_pb2.Genre}, + ) + assert result == ["POP", "JAZZ"] def test__struct(self): _type = PBType( @@ -164,7 +289,9 @@ def test__struct(self): with pytest.raises(KeyError, match="Ambigious field name"): metadata_type["field3"] - result = _parse_pb_value_to_python_value(value._pb, metadata_type) + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "struct_field" + ) assert isinstance(result, Struct) assert result["field1"] == result[0] == 1 assert result[1] == "test2" @@ -177,6 +304,87 @@ def test__struct(self): assert result[2] == [2, 3, 4, 5] assert result[3] == "test4" + def test__struct_with_proto_and_enum(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + _type = PBType( + { + "struct_type": { + "fields": [ + { + "field_name": "field1", + "type_": proto_type(), + }, + { + "field_name": None, + "type_": proto_type(), + }, + { + "field_name": "field2", + "type_": enum_type(), + }, + { + "field_name": None, + "type_": enum_type(), + }, + ] + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + {"bytes_value": singer1.SerializeToString()}, + {"bytes_value": singer2.SerializeToString()}, + {"int_value": 0}, + {"int_value": 1}, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Struct + assert type(metadata_type["field1"]) is SqlType.Proto + assert type(metadata_type["field2"]) is SqlType.Enum + assert type(metadata_type[0]) is SqlType.Proto + assert type(metadata_type[1]) is SqlType.Proto + assert type(metadata_type[2]) is SqlType.Enum + assert type(metadata_type[3]) is SqlType.Enum + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "struct_field" + ) + assert isinstance(result, Struct) + assert result["field1"] == singer1.SerializeToString() + assert result["field2"] == 0 + assert result[0] == singer1.SerializeToString() + assert result[1] == singer2.SerializeToString() + assert result[2] == 0 + assert result[3] == 1 + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "struct_field", + { + "struct_field.field1": singer_pb2.Singer(), + "struct_field.field2": singer_pb2.Genre, + }, + ) + assert isinstance(result, Struct) + assert result["field1"] == singer1 + assert result["field2"] == "POP" + assert result[0] == singer1 + # unnamed proto fields won't get parsed + assert result[1] == singer2.SerializeToString() + assert result[2] == "POP" + # unnamed enum fields won't get parsed + assert result[3] == 1 + def test__array_of_structs(self): _type = PBType( { @@ -254,7 +462,9 @@ def test__array_of_structs(self): assert type(metadata_type.element_type[1]) is SqlType.String assert type(metadata_type.element_type["field3"]) is SqlType.Bool - result = _parse_pb_value_to_python_value(value._pb, metadata_type) + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) assert isinstance(result, list) assert len(result) == 4 @@ -278,6 +488,106 @@ def test__array_of_structs(self): assert result[3][1] == "test4" assert not result[3]["field3"] + def test__array_of_structs_with_proto_and_enum(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + _type = PBType( + { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "proto_field", + "type_": proto_type(), + }, + { + "field_name": "enum_field", + "type_": enum_type(), + }, + { + "field_name": None, + "type_": proto_type(), + }, + ] + } + } + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"bytes_value": singer1.SerializeToString()}, + {"int_value": 0}, # POP + {"bytes_value": singer1.SerializeToString()}, + ] + } + }, + { + "array_value": { + "values": [ + {"bytes_value": singer2.SerializeToString()}, + {"int_value": 1}, # JAZZ + {"bytes_value": singer2.SerializeToString()}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Array + assert type(metadata_type.element_type) is SqlType.Struct + assert type(metadata_type.element_type["proto_field"]) is SqlType.Proto + assert type(metadata_type.element_type["enum_field"]) is SqlType.Enum + assert type(metadata_type.element_type[2]) is SqlType.Proto + + # without proto definition + result = _parse_pb_value_to_python_value( + value._pb, metadata_type, "array_field" + ) + assert isinstance(result, list) + assert len(result) == 2 + assert isinstance(result[0], Struct) + assert result[0]["proto_field"] == singer1.SerializeToString() + assert result[0]["enum_field"] == 0 + assert result[0][2] == singer1.SerializeToString() + assert isinstance(result[1], Struct) + assert result[1]["proto_field"] == singer2.SerializeToString() + assert result[1]["enum_field"] == 1 + assert result[1][2] == singer2.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "array_field", + { + "array_field.proto_field": singer_pb2.Singer(), + "array_field.enum_field": singer_pb2.Genre, + "array_field": singer_pb2.Singer(), # unused + }, + ) + assert isinstance(result, list) + assert len(result) == 2 + assert isinstance(result[0], Struct) + assert result[0]["proto_field"] == singer1 + assert result[0]["enum_field"] == "POP" + # unnamed proto fields won't get parsed + assert result[0][2] == singer1.SerializeToString() + assert isinstance(result[1], Struct) + assert result[1]["proto_field"] == singer2 + assert result[1]["enum_field"] == "JAZZ" + # unnamed proto fields won't get parsed + assert result[1][2] == singer2.SerializeToString() + def test__map(self): _type = PBType( { @@ -333,7 +643,7 @@ def test__map(self): assert type(metadata_type.key_type) is SqlType.Int64 assert type(metadata_type.value_type) is SqlType.String - result = _parse_pb_value_to_python_value(value._pb, metadata_type) + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") assert isinstance(result, dict) assert len(result) == 4 @@ -387,13 +697,135 @@ def test__map_repeated_values(self): ) metadata_type = _pb_type_to_metadata_type(_type) - result = _parse_pb_value_to_python_value(value._pb, metadata_type) + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") assert len(result) == 1 assert result == { 1: "test3", } + def test__map_with_protos(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": proto_type(), + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"bytes_value": singer1.SerializeToString()}, + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"bytes_value": singer2.SerializeToString()}, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Proto + + # without proto definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == singer1.SerializeToString() + assert result[2] == singer2.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value": singer_pb2.Singer(), + }, + ) + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == singer1 + assert result[2] == singer2 + + def test__map_with_enums(self): + _type = PBType( + { + "map_type": { + "key_type": int64_type(), + "value_type": enum_type(), + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ + { + "array_value": { + "values": [ + {"int_value": 1}, + {"int_value": 0}, # POP + ] + } + }, + { + "array_value": { + "values": [ + {"int_value": 2}, + {"int_value": 1}, # JAZZ + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Enum + + # without enum definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == 0 + assert result[2] == 1 + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value": singer_pb2.Genre, + }, + ) + assert isinstance(result, dict) + assert len(result) == 2 + assert result[1] == "POP" + assert result[2] == "JAZZ" + def test__map_of_maps_of_structs(self): _type = PBType( { @@ -539,7 +971,7 @@ def test__map_of_maps_of_structs(self): assert type(metadata_type.value_type.value_type) is SqlType.Struct assert type(metadata_type.value_type.value_type["field1"]) is SqlType.Int64 assert type(metadata_type.value_type.value_type["field2"]) is SqlType.String - result = _parse_pb_value_to_python_value(value._pb, metadata_type) + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") assert result[1]["1_1"]["field1"] == 1 assert result[1]["1_1"]["field2"] == "test1" @@ -553,23 +985,31 @@ def test__map_of_maps_of_structs(self): assert result[2]["2_2"]["field1"] == 4 assert result[2]["2_2"]["field2"] == "test4" - def test__map_of_lists_of_structs(self): + def test__map_of_maps_of_structs_with_proto_and_enum(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + _type = PBType( { "map_type": { - "key_type": TYPE_BYTES, + "key_type": int64_type(), "value_type": { - "array_type": { - "element_type": { + "map_type": { + "key_type": {"string_type": {}}, + "value_type": { "struct_type": { "fields": [ { - "field_name": "timestamp", - "type_": TYPE_TIMESTAMP, + "field_name": "int_field", + "type_": int64_type(), }, { - "field_name": "value", - "type_": TYPE_BYTES, + "field_name": "singer", + "type_": proto_type(), + }, + { + "field_name": "genre", + "type_": enum_type(), }, ] } @@ -582,20 +1022,225 @@ def test__map_of_lists_of_structs(self): value = PBValue( { "array_value": { - "values": [ # list of (byte, list) tuples + "values": [ # list of (int, map) tuples { "array_value": { - "values": [ # (byte, list) tuple - {"bytes_value": b"key1"}, + "values": [ # (int, map) tuples + {"int_value": 1}, { "array_value": { - "values": [ # list of structs + "values": [ # list of (str, struct) tuples { "array_value": { - "values": [ # (timestamp, bytes) tuple + "values": [ # (str, struct) tuples + {"string_value": "1_1"}, { - "timestamp_value": { - "seconds": 1111111111 + "array_value": { + "values": [ + { + "int_value": 12 + }, + { + "bytes_value": singer1.SerializeToString() + }, + { + "int_value": 0 + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuples + {"string_value": "1_2"}, + { + "array_value": { + "values": [ + { + "int_value": 34 + }, + { + "bytes_value": singer2.SerializeToString() + }, + { + "int_value": 1 + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (int, map) tuples + {"int_value": 2}, + { + "array_value": { + "values": [ # list of (str, struct) tuples + { + "array_value": { + "values": [ # (str, struct) tuples + {"string_value": "2_1"}, + { + "array_value": { + "values": [ + { + "int_value": 56 + }, + { + "bytes_value": singer1.SerializeToString() + }, + { + "int_value": 2 + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (str, struct) tuples + {"string_value": "2_2"}, + { + "array_value": { + "values": [ + { + "int_value": 78 + }, + { + "bytes_value": singer2.SerializeToString() + }, + { + "int_value": 3 + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Int64 + assert type(metadata_type.value_type) is SqlType.Map + assert type(metadata_type.value_type.key_type) is SqlType.String + assert type(metadata_type.value_type.value_type) is SqlType.Struct + assert type(metadata_type.value_type.value_type["int_field"]) is SqlType.Int64 + assert type(metadata_type.value_type.value_type["singer"]) is SqlType.Proto + assert type(metadata_type.value_type.value_type["genre"]) is SqlType.Enum + + # without proto definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + + assert result[1]["1_1"]["int_field"] == 12 + assert result[1]["1_1"]["singer"] == singer1.SerializeToString() + assert result[1]["1_1"]["genre"] == 0 + + assert result[1]["1_2"]["int_field"] == 34 + assert result[1]["1_2"]["singer"] == singer2.SerializeToString() + assert result[1]["1_2"]["genre"] == 1 + + assert result[2]["2_1"]["int_field"] == 56 + assert result[2]["2_1"]["singer"] == singer1.SerializeToString() + assert result[2]["2_1"]["genre"] == 2 + + assert result[2]["2_2"]["int_field"] == 78 + assert result[2]["2_2"]["singer"] == singer2.SerializeToString() + assert result[2]["2_2"]["genre"] == 3 + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value.value.singer": singer_pb2.Singer(), + "map_field.value.value.genre": singer_pb2.Genre, + }, + ) + + assert result[1]["1_1"]["int_field"] == 12 + assert result[1]["1_1"]["singer"] == singer1 + assert result[1]["1_1"]["genre"] == "POP" + + assert result[1]["1_2"]["int_field"] == 34 + assert result[1]["1_2"]["singer"] == singer2 + assert result[1]["1_2"]["genre"] == "JAZZ" + + assert result[2]["2_1"]["int_field"] == 56 + assert result[2]["2_1"]["singer"] == singer1 + assert result[2]["2_1"]["genre"] == "FOLK" + + assert result[2]["2_2"]["int_field"] == 78 + assert result[2]["2_2"]["singer"] == singer2 + assert result[2]["2_2"]["genre"] == "ROCK" + + def test__map_of_lists_of_structs(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": TYPE_BYTES, + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 } }, { @@ -679,7 +1324,7 @@ def test__map_of_lists_of_structs(self): is SqlType.Timestamp ) assert type(metadata_type.value_type.element_type["value"]) is SqlType.Bytes - result = _parse_pb_value_to_python_value(value._pb, metadata_type) + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") timestamp1 = DatetimeWithNanoseconds( 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc @@ -703,6 +1348,341 @@ def test__map_of_lists_of_structs(self): assert result[b"key2"][1]["timestamp"] == timestamp4 assert result[b"key2"][1]["value"] == b"key2-value2" + def test__map_of_lists_of_structs_with_protos(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + singer3 = singer_pb2.Singer(name="Jay") + singer4 = singer_pb2.Singer(name="Eric") + + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": proto_type(), + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 + } + }, + { + "bytes_value": singer1.SerializeToString() + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 2222222222 + } + }, + { + "bytes_value": singer2.SerializeToString() + }, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key2"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 3333333333 + } + }, + { + "bytes_value": singer3.SerializeToString() + }, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 4444444444 + } + }, + { + "bytes_value": singer4.SerializeToString() + }, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Bytes + assert type(metadata_type.value_type) is SqlType.Array + assert type(metadata_type.value_type.element_type) is SqlType.Struct + assert ( + type(metadata_type.value_type.element_type["timestamp"]) + is SqlType.Timestamp + ) + assert type(metadata_type.value_type.element_type["value"]) is SqlType.Proto + + timestamp1 = DatetimeWithNanoseconds( + 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc + ) + timestamp2 = DatetimeWithNanoseconds( + 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc + ) + timestamp3 = DatetimeWithNanoseconds( + 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc + ) + timestamp4 = DatetimeWithNanoseconds( + 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc + ) + + # without proto definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == singer1.SerializeToString() + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == singer2.SerializeToString() + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == singer3.SerializeToString() + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == singer4.SerializeToString() + + # with proto definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value.value": singer_pb2.Singer(), + }, + ) + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == singer1 + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == singer2 + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == singer3 + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == singer4 + + def test__map_of_lists_of_structs_with_enums(self): + _type = PBType( + { + "map_type": { + "key_type": TYPE_BYTES, + "value_type": { + "array_type": { + "element_type": { + "struct_type": { + "fields": [ + { + "field_name": "timestamp", + "type_": TYPE_TIMESTAMP, + }, + { + "field_name": "value", + "type_": enum_type(), + }, + ] + } + }, + } + }, + } + } + ) + value = PBValue( + { + "array_value": { + "values": [ # list of (byte, list) tuples + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key1"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 1111111111 + } + }, + {"int_value": 0}, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 2222222222 + } + }, + {"int_value": 1}, + ] + } + }, + ] + } + }, + ] + } + }, + { + "array_value": { + "values": [ # (byte, list) tuple + {"bytes_value": b"key2"}, + { + "array_value": { + "values": [ # list of structs + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 3333333333 + } + }, + {"int_value": 2}, + ] + } + }, + { + "array_value": { + "values": [ # (timestamp, bytes) tuple + { + "timestamp_value": { + "seconds": 4444444444 + } + }, + {"int_value": 3}, + ] + } + }, + ] + } + }, + ] + } + }, + ] + } + } + ) + metadata_type = _pb_type_to_metadata_type(_type) + assert type(metadata_type) is SqlType.Map + assert type(metadata_type.key_type) is SqlType.Bytes + assert type(metadata_type.value_type) is SqlType.Array + assert type(metadata_type.value_type.element_type) is SqlType.Struct + assert ( + type(metadata_type.value_type.element_type["timestamp"]) + is SqlType.Timestamp + ) + assert type(metadata_type.value_type.element_type["value"]) is SqlType.Enum + + timestamp1 = DatetimeWithNanoseconds( + 2005, 3, 18, 1, 58, 31, tzinfo=datetime.timezone.utc + ) + timestamp2 = DatetimeWithNanoseconds( + 2040, 6, 2, 3, 57, 2, tzinfo=datetime.timezone.utc + ) + timestamp3 = DatetimeWithNanoseconds( + 2075, 8, 18, 5, 55, 33, tzinfo=datetime.timezone.utc + ) + timestamp4 = DatetimeWithNanoseconds( + 2110, 11, 3, 7, 54, 4, tzinfo=datetime.timezone.utc + ) + + # without enum definition + result = _parse_pb_value_to_python_value(value._pb, metadata_type, "map_field") + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == 0 + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == 1 + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == 2 + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == 3 + + # with enum definition + result = _parse_pb_value_to_python_value( + value._pb, + metadata_type, + "map_field", + { + "map_field.value.value": singer_pb2.Genre, + }, + ) + assert result[b"key1"][0]["timestamp"] == timestamp1 + assert result[b"key1"][0]["value"] == "POP" + assert result[b"key1"][1]["timestamp"] == timestamp2 + assert result[b"key1"][1]["value"] == "JAZZ" + assert result[b"key2"][0]["timestamp"] == timestamp3 + assert result[b"key2"][0]["value"] == "FOLK" + assert result[b"key2"][1]["timestamp"] == timestamp4 + assert result[b"key2"][1]["value"] == "ROCK" + def test__invalid_type_throws_exception(self): _type = PBType({"string_type": {}}) value = PBValue({"int_value": 1}) @@ -712,4 +1692,4 @@ def test__invalid_type_throws_exception(self): ValueError, match="string_value field for String type not found in a Value.", ): - _parse_pb_value_to_python_value(value._pb, metadata_type) + _parse_pb_value_to_python_value(value._pb, metadata_type, "string_field") diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py index 6adb1e3c73ae..ab98b54bda96 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/test_query_result_row_reader.py @@ -32,7 +32,9 @@ metadata, proto_rows_bytes, str_val, + bytes_val, ) +from samples.testdata import singer_pb2 class TestQueryResultRowReader: @@ -116,8 +118,8 @@ def test__received_values_are_passed_to_parser_in_batches(self): reader.consume([proto_rows_bytes(int_val(1), int_val(2))], metadata) parse_mock.assert_has_calls( [ - mock.call(PBValue(int_val(1)), SqlType.Int64()), - mock.call(PBValue(int_val(2)), SqlType.Int64()), + mock.call(PBValue(int_val(1)), SqlType.Int64(), "test1", None), + mock.call(PBValue(int_val(2)), SqlType.Int64(), "test2", None), ] ) @@ -137,7 +139,7 @@ def test__parser_errors_are_forwarded(self): parse_mock.assert_has_calls( [ - mock.call(PBValue(values[0]), SqlType.Int64()), + mock.call(PBValue(values[0]), SqlType.Int64(), "test1", None), ] ) @@ -243,6 +245,40 @@ def test_multiple_batches(self): assert row4["test1"] == 7 assert row4["test2"] == 8 + def test_multiple_batches_with_proto_and_enum_types(self): + singer1 = singer_pb2.Singer(name="John") + singer2 = singer_pb2.Singer(name="Taylor") + singer3 = singer_pb2.Singer(name="Jay") + singer4 = singer_pb2.Singer(name="Eric") + + reader = _QueryResultRowReader() + batches = [ + proto_rows_bytes( + bytes_val(singer1.SerializeToString()), + int_val(0), + bytes_val(singer2.SerializeToString()), + int_val(1), + ), + proto_rows_bytes(bytes_val(singer3.SerializeToString()), int_val(2)), + proto_rows_bytes(bytes_val(singer4.SerializeToString()), int_val(3)), + ] + + results = reader.consume( + batches, + Metadata([("singer", SqlType.Proto()), ("genre", SqlType.Enum())]), + {"singer": singer_pb2.Singer(), "genre": singer_pb2.Genre}, + ) + assert len(results) == 4 + [row1, row2, row3, row4] = results + assert row1["singer"] == singer1 + assert row1["genre"] == "POP" + assert row2["singer"] == singer2 + assert row2["genre"] == "JAZZ" + assert row3["singer"] == singer3 + assert row3["genre"] == "FOLK" + assert row4["singer"] == singer4 + assert row4["genre"] == "ROCK" + class TestMetadata: def test__duplicate_column_names(self): From 199ab28e1222a65ac5f146e8006d3d3610c0a938 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Fri, 26 Sep 2025 11:12:39 -0700 Subject: [PATCH 882/892] chore(tests): prefer bytes for mutation fields (#1207) * chore(tests): prefer bytes for mutation fields * decode instead of encode --- .../samples/hello/async_main.py | 8 ++++---- .../google-cloud-bigtable/samples/hello/main.py | 17 +++++++++-------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/packages/google-cloud-bigtable/samples/hello/async_main.py b/packages/google-cloud-bigtable/samples/hello/async_main.py index 34159bedb4c9..af95898e57bd 100644 --- a/packages/google-cloud-bigtable/samples/hello/async_main.py +++ b/packages/google-cloud-bigtable/samples/hello/async_main.py @@ -57,7 +57,7 @@ async def main(project_id, instance_id, table_id): # Create a column family with GC policy : most recent N versions # Define the GC policy to retain only the most recent 2 versions max_versions_rule = column_family.MaxVersionsGCRule(2) - column_family_id = "cf1" + column_family_id = b"cf1" column_families = {column_family_id: max_versions_rule} if not admin_table.exists(): admin_table.create(column_families=column_families) @@ -70,9 +70,9 @@ async def main(project_id, instance_id, table_id): wait_for_table(admin_table) # [START bigtable_async_hw_write_rows] print("Writing some greetings to the table.") - greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] + greetings = [b"Hello World!", b"Hello Cloud Bigtable!", b"Hello Python!"] mutations = [] - column = "greeting" + column = b"greeting" for i, value in enumerate(greetings): # Note: This example uses sequential numeric IDs for simplicity, # but this can result in poor performance in a production @@ -84,7 +84,7 @@ async def main(project_id, instance_id, table_id): # the best performance, see the documentation: # # https://cloud.google.com/bigtable/docs/schema-design - row_key = "greeting{}".format(i).encode() + row_key = f"greeting{i}".encode() row_mutation = bigtable.data.RowMutationEntry( row_key, bigtable.data.SetCell(column_family_id, column, value) ) diff --git a/packages/google-cloud-bigtable/samples/hello/main.py b/packages/google-cloud-bigtable/samples/hello/main.py index 41124e82675b..7a193ba6f6b2 100644 --- a/packages/google-cloud-bigtable/samples/hello/main.py +++ b/packages/google-cloud-bigtable/samples/hello/main.py @@ -57,7 +57,7 @@ def main(project_id, instance_id, table_id): # Create a column family with GC policy : most recent N versions # Define the GC policy to retain only the most recent 2 versions max_versions_rule = bigtable.column_family.MaxVersionsGCRule(2) - column_family_id = "cf1" + column_family_id = b"cf1" column_families = {column_family_id: max_versions_rule} if not table.exists(): table.create(column_families=column_families) @@ -71,9 +71,9 @@ def main(project_id, instance_id, table_id): # [START bigtable_hw_write_rows] print("Writing some greetings to the table.") - greetings = ["Hello World!", "Hello Cloud Bigtable!", "Hello Python!"] + greetings = [b"Hello World!", b"Hello Cloud Bigtable!", b"Hello Python!"] rows = [] - column = "greeting".encode() + column = b"greeting" for i, value in enumerate(greetings): # Note: This example uses sequential numeric IDs for simplicity, # but this can result in poor performance in a production @@ -85,10 +85,10 @@ def main(project_id, instance_id, table_id): # the best performance, see the documentation: # # https://cloud.google.com/bigtable/docs/schema-design - row_key = "greeting{}".format(i).encode() + row_key = f"greeting{i}".encode() row = table.direct_row(row_key) row.set_cell( - column_family_id, column, value, timestamp=datetime.datetime.utcnow() + column_family_id, column, value, timestamp=datetime.datetime.utcnow(), ) rows.append(row) table.mutate_rows(rows) @@ -103,10 +103,10 @@ def main(project_id, instance_id, table_id): # [START bigtable_hw_get_with_filter] # [START bigtable_hw_get_by_key] print("Getting a single greeting by row key.") - key = "greeting0".encode() + key = b"greeting0" row = table.read_row(key, row_filter) - cell = row.cells[column_family_id][column][0] + cell = row.cells[column_family_id.decode("utf-8")][column][0] print(cell.value.decode("utf-8")) # [END bigtable_hw_get_by_key] # [END bigtable_hw_get_with_filter] @@ -117,7 +117,8 @@ def main(project_id, instance_id, table_id): partial_rows = table.read_rows(filter_=row_filter) for row in partial_rows: - cell = row.cells[column_family_id][column][0] + column_family_id_str = column_family_id.decode("utf-8") + cell = row.cells[column_family_id_str][column][0] print(cell.value.decode("utf-8")) # [END bigtable_hw_scan_all] # [END bigtable_hw_scan_with_filter] From 94b47ed8d4399477968fd03241162a6e8de891b2 Mon Sep 17 00:00:00 2001 From: Jack Dingilian Date: Mon, 6 Oct 2025 18:33:05 -0400 Subject: [PATCH 883/892] fix: Fix instance registration cleanup on early iterator termination (#1216) --- .../cloud/bigtable/data/_async/client.py | 35 +++-- .../bigtable/data/_sync_autogen/client.py | 31 +++-- .../_async/execute_query_iterator.py | 88 +++++++------ .../_sync_autogen/execute_query_iterator.py | 68 ++++++---- .../tests/unit/data/_async/test_client.py | 20 +-- .../unit/data/_sync_autogen/test_client.py | 20 +-- .../_async/test_query_iterator.py | 121 ++++++++++++++++++ .../_sync_autogen/test_query_iterator.py | 103 +++++++++++++++ 8 files changed, 377 insertions(+), 109 deletions(-) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 516e20eb34e9..0af7154a64f7 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -476,7 +476,8 @@ async def _manage_channel( async def _register_instance( self, instance_id: str, - owner: _DataApiTargetAsync | ExecuteQueryIteratorAsync, + app_profile_id: Optional[str], + owner_id: int, ) -> None: """ Registers an instance with the client, and warms the channel for the instance @@ -486,13 +487,15 @@ async def _register_instance( Args: instance_id: id of the instance to register. - owner: table that owns the instance. Owners will be tracked in + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Owners will be tracked in _instance_owners, and instances will only be unregistered when all - owners call _remove_instance_registration + owners call _remove_instance_registration. Can be obtained by calling + `id` identity funcion, using `id(owner)` """ instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) - self._instance_owners.setdefault(instance_key, set()).add(id(owner)) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) + self._instance_owners.setdefault(instance_key, set()).add(owner_id) if instance_key not in self._active_instances: self._active_instances.add(instance_key) if self._channel_refresh_task: @@ -510,10 +513,11 @@ async def _register_instance( "_DataApiTargetAsync": "_DataApiTarget", } ) - async def _remove_instance_registration( + def _remove_instance_registration( self, instance_id: str, - owner: _DataApiTargetAsync | ExecuteQueryIteratorAsync, + app_profile_id: Optional[str], + owner_id: int, ) -> bool: """ Removes an instance from the client's registered instances, to prevent @@ -523,17 +527,17 @@ async def _remove_instance_registration( Args: instance_id: id of the instance to remove - owner: table that owns the instance. Owners will be tracked in - _instance_owners, and instances will only be unregistered when all - owners call _remove_instance_registration + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Can be + obtained by the `id` identity funcion, using `id(owner)`. Returns: bool: True if instance was removed, else False """ instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) owner_list = self._instance_owners.get(instance_key, set()) try: - owner_list.remove(id(owner)) + owner_list.remove(owner_id) if len(owner_list) == 0: self._active_instances.remove(instance_key) return True @@ -1014,7 +1018,8 @@ def __init__( self._register_instance_future = CrossSync.create_task( self.client._register_instance, self.instance_id, - self, + self.app_profile_id, + id(self), sync_executor=self.client._executor, ) except RuntimeError as e: @@ -1725,7 +1730,9 @@ async def close(self): """ if self._register_instance_future: self._register_instance_future.cancel() - await self.client._remove_instance_registration(self.instance_id, self) + self.client._remove_instance_registration( + self.instance_id, self.app_profile_id, id(self) + ) @CrossSync.convert(sync_name="__enter__") async def __aenter__(self): diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index a168f360db77..adc849649eb5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -354,7 +354,7 @@ def _manage_channel( next_sleep = max(next_refresh - (time.monotonic() - start_timestamp), 0) def _register_instance( - self, instance_id: str, owner: _DataApiTarget | ExecuteQueryIterator + self, instance_id: str, app_profile_id: Optional[str], owner_id: int ) -> None: """Registers an instance with the client, and warms the channel for the instance The client will periodically refresh grpc channel used to make @@ -363,12 +363,14 @@ def _register_instance( Args: instance_id: id of the instance to register. - owner: table that owns the instance. Owners will be tracked in + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Owners will be tracked in _instance_owners, and instances will only be unregistered when all - owners call _remove_instance_registration""" + owners call _remove_instance_registration. Can be obtained by calling + `id` identity funcion, using `id(owner)`""" instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) - self._instance_owners.setdefault(instance_key, set()).add(id(owner)) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) + self._instance_owners.setdefault(instance_key, set()).add(owner_id) if instance_key not in self._active_instances: self._active_instances.add(instance_key) if self._channel_refresh_task: @@ -377,7 +379,7 @@ def _register_instance( self._start_background_channel_refresh() def _remove_instance_registration( - self, instance_id: str, owner: _DataApiTarget | ExecuteQueryIterator + self, instance_id: str, app_profile_id: Optional[str], owner_id: int ) -> bool: """Removes an instance from the client's registered instances, to prevent warming new channels for the instance @@ -386,16 +388,16 @@ def _remove_instance_registration( Args: instance_id: id of the instance to remove - owner: table that owns the instance. Owners will be tracked in - _instance_owners, and instances will only be unregistered when all - owners call _remove_instance_registration + app_profile_id: id of the app profile calling the instance. + owner_id: integer id of the object owning the instance. Can be + obtained by the `id` identity funcion, using `id(owner)`. Returns: bool: True if instance was removed, else False""" instance_name = self._gapic_client.instance_path(self.project, instance_id) - instance_key = _WarmedInstanceKey(instance_name, owner.app_profile_id) + instance_key = _WarmedInstanceKey(instance_name, app_profile_id) owner_list = self._instance_owners.get(instance_key, set()) try: - owner_list.remove(id(owner)) + owner_list.remove(owner_id) if len(owner_list) == 0: self._active_instances.remove(instance_key) return True @@ -806,7 +808,8 @@ def __init__( self._register_instance_future = CrossSync._Sync_Impl.create_task( self.client._register_instance, self.instance_id, - self, + self.app_profile_id, + id(self), sync_executor=self.client._executor, ) except RuntimeError as e: @@ -1460,7 +1463,9 @@ def close(self): """Called to close the Table instance and release any resources held by it.""" if self._register_instance_future: self._register_instance_future.cancel() - self.client._remove_instance_registration(self.instance_id, self) + self.client._remove_instance_registration( + self.instance_id, self.app_profile_id, id(self) + ) def __enter__(self): """Implement async context manager protocol diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py index 41900bb12317..2beda4cd65be 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_async/execute_query_iterator.py @@ -127,6 +127,7 @@ def __init__( self.has_received_token = False self._result_generator = self._next_impl() self._register_instance_task = None + self._fully_consumed = False self._is_closed = False self._request_body = request_body self._attempt_timeout_gen = _attempt_timeout_generator( @@ -145,7 +146,8 @@ def __init__( self._register_instance_task = CrossSync.create_task( self._client._register_instance, self._instance_id, - self, + self.app_profile_id, + id(self), sync_executor=self._client._executor, ) except RuntimeError as e: @@ -193,39 +195,42 @@ async def _next_impl(self) -> CrossSync.Iterator[QueryResultRow]: Generator wrapping the response stream which parses the stream results and returns full `QueryResultRow`s. """ - async for response in self._stream: - try: - # we've received a resume token, so we can finalize the metadata - if self._final_metadata is None and _has_resume_token(response): - self._finalize_metadata() - - batches_to_parse = self._byte_cursor.consume(response) - if not batches_to_parse: - continue - # metadata must be set at this point since there must be a resume_token - # for byte_cursor to yield data - if not self.metadata: - raise ValueError( - "Error parsing response before finalizing metadata" + try: + async for response in self._stream: + try: + # we've received a resume token, so we can finalize the metadata + if self._final_metadata is None and _has_resume_token(response): + self._finalize_metadata() + + batches_to_parse = self._byte_cursor.consume(response) + if not batches_to_parse: + continue + # metadata must be set at this point since there must be a resume_token + # for byte_cursor to yield data + if not self.metadata: + raise ValueError( + "Error parsing response before finalizing metadata" + ) + results = self._reader.consume( + batches_to_parse, self.metadata, self._column_info ) - results = self._reader.consume( - batches_to_parse, self.metadata, self._column_info - ) - if results is None: - continue - - except ValueError as e: - raise InvalidExecuteQueryResponse( - "Invalid ExecuteQuery response received" - ) from e - - for result in results: - yield result - # this means the stream has finished with no responses. In that case we know the - # latest_prepare_reponses was used successfully so we can finalize the metadata - if self._final_metadata is None: - self._finalize_metadata() - await self.close() + if results is None: + continue + + except ValueError as e: + raise InvalidExecuteQueryResponse( + "Invalid ExecuteQuery response received" + ) from e + + for result in results: + yield result + # this means the stream has finished with no responses. In that case we know the + # latest_prepare_reponses was used successfully so we can finalize the metadata + if self._final_metadata is None: + self._finalize_metadata() + self._fully_consumed = True + finally: + self._close_internal() @CrossSync.convert(sync_name="__next__", replace_symbols={"__anext__": "__next__"}) async def __anext__(self) -> QueryResultRow: @@ -285,15 +290,26 @@ def metadata(self) -> Metadata: @CrossSync.convert async def close(self) -> None: """ - Cancel all background tasks. Should be called all rows were processed. + Cancel all background tasks. Should be called after all rows were processed. + + Called automatically by iterator :raises: :class:`ValueError ` if called in an invalid state """ + # this doesn't need to be async anymore but we wrap the sync api to avoid a breaking + # change + self._close_internal() + + def _close_internal(self) -> None: if self._is_closed: return - if not self._byte_cursor.empty(): + # Throw an error if the iterator has been successfully consumed but there is + # still buffered data + if self._fully_consumed and not self._byte_cursor.empty(): raise ValueError("Unexpected buffered data at end of executeQuery reqest") self._is_closed = True if self._register_instance_task is not None: self._register_instance_task.cancel() - await self._client._remove_instance_registration(self._instance_id, self) + self._client._remove_instance_registration( + self._instance_id, self.app_profile_id, id(self) + ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py index 6b29cbfe77f4..68594d0e867a 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/execute_query/_sync_autogen/execute_query_iterator.py @@ -102,6 +102,7 @@ def __init__( self.has_received_token = False self._result_generator = self._next_impl() self._register_instance_task = None + self._fully_consumed = False self._is_closed = False self._request_body = request_body self._attempt_timeout_gen = _attempt_timeout_generator( @@ -120,7 +121,8 @@ def __init__( self._register_instance_task = CrossSync._Sync_Impl.create_task( self._client._register_instance, self._instance_id, - self, + self.app_profile_id, + id(self), sync_executor=self._client._executor, ) except RuntimeError as e: @@ -159,31 +161,34 @@ def _make_request_with_resume_token(self): def _next_impl(self) -> CrossSync._Sync_Impl.Iterator[QueryResultRow]: """Generator wrapping the response stream which parses the stream results and returns full `QueryResultRow`s.""" - for response in self._stream: - try: - if self._final_metadata is None and _has_resume_token(response): - self._finalize_metadata() - batches_to_parse = self._byte_cursor.consume(response) - if not batches_to_parse: - continue - if not self.metadata: - raise ValueError( - "Error parsing response before finalizing metadata" + try: + for response in self._stream: + try: + if self._final_metadata is None and _has_resume_token(response): + self._finalize_metadata() + batches_to_parse = self._byte_cursor.consume(response) + if not batches_to_parse: + continue + if not self.metadata: + raise ValueError( + "Error parsing response before finalizing metadata" + ) + results = self._reader.consume( + batches_to_parse, self.metadata, self._column_info ) - results = self._reader.consume( - batches_to_parse, self.metadata, self._column_info - ) - if results is None: - continue - except ValueError as e: - raise InvalidExecuteQueryResponse( - "Invalid ExecuteQuery response received" - ) from e - for result in results: - yield result - if self._final_metadata is None: - self._finalize_metadata() - self.close() + if results is None: + continue + except ValueError as e: + raise InvalidExecuteQueryResponse( + "Invalid ExecuteQuery response received" + ) from e + for result in results: + yield result + if self._final_metadata is None: + self._finalize_metadata() + self._fully_consumed = True + finally: + self._close_internal() def __next__(self) -> QueryResultRow: """Yields QueryResultRows representing the results of the query. @@ -233,15 +238,22 @@ def metadata(self) -> Metadata: return self._final_metadata def close(self) -> None: - """Cancel all background tasks. Should be called all rows were processed. + """Cancel all background tasks. Should be called after all rows were processed. + + Called automatically by iterator :raises: :class:`ValueError ` if called in an invalid state """ + self._close_internal() + + def _close_internal(self) -> None: if self._is_closed: return - if not self._byte_cursor.empty(): + if self._fully_consumed and (not self._byte_cursor.empty()): raise ValueError("Unexpected buffered data at end of executeQuery reqest") self._is_closed = True if self._register_instance_task is not None: self._register_instance_task.cancel() - self._client._remove_instance_registration(self._instance_id, self) + self._client._remove_instance_registration( + self._instance_id, self.app_profile_id, id(self) + ) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py index 9e434d12f874..a5ec1d02d3d1 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_client.py @@ -537,7 +537,7 @@ async def test__register_instance(self): client_mock._ping_and_warm_instances = CrossSync.Mock() table_mock = mock.Mock() await self._get_target_class()._register_instance( - client_mock, "instance-1", table_mock + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) ) # first call should start background refresh assert client_mock._start_background_channel_refresh.call_count == 1 @@ -555,7 +555,7 @@ async def test__register_instance(self): # next call should not call _start_background_channel_refresh again table_mock2 = mock.Mock() await self._get_target_class()._register_instance( - client_mock, "instance-2", table_mock2 + client_mock, "instance-2", table_mock2.app_profile_id, id(table_mock2) ) assert client_mock._start_background_channel_refresh.call_count == 1 assert ( @@ -607,7 +607,7 @@ async def test__register_instance_duplicate(self): ) # fake first registration await self._get_target_class()._register_instance( - client_mock, "instance-1", table_mock + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) ) assert len(active_instances) == 1 assert expected_key == tuple(list(active_instances)[0]) @@ -617,7 +617,7 @@ async def test__register_instance_duplicate(self): assert client_mock._ping_and_warm_instances.call_count == 1 # next call should do nothing await self._get_target_class()._register_instance( - client_mock, "instance-1", table_mock + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) ) assert len(active_instances) == 1 assert expected_key == tuple(list(active_instances)[0]) @@ -659,7 +659,7 @@ async def test__register_instance_state( for instance, profile in insert_instances: table_mock.app_profile_id = profile await self._get_target_class()._register_instance( - client_mock, instance, table_mock + client_mock, instance, profile, id(table_mock) ) assert len(active_instances) == len(expected_active) assert len(instance_owners) == len(expected_owner_keys) @@ -682,8 +682,8 @@ async def test__register_instance_state( async def test__remove_instance_registration(self): client = self._make_client(project="project-id") table = mock.Mock() - await client._register_instance("instance-1", table) - await client._register_instance("instance-2", table) + await client._register_instance("instance-1", table.app_profile_id, id(table)) + await client._register_instance("instance-2", table.app_profile_id, id(table)) assert len(client._active_instances) == 2 assert len(client._instance_owners.keys()) == 2 instance_1_path = client._gapic_client.instance_path( @@ -698,13 +698,15 @@ async def test__remove_instance_registration(self): assert list(client._instance_owners[instance_1_key])[0] == id(table) assert len(client._instance_owners[instance_2_key]) == 1 assert list(client._instance_owners[instance_2_key])[0] == id(table) - success = await client._remove_instance_registration("instance-1", table) + success = client._remove_instance_registration( + "instance-1", table.app_profile_id, id(table) + ) assert success assert len(client._active_instances) == 1 assert len(client._instance_owners[instance_1_key]) == 0 assert len(client._instance_owners[instance_2_key]) == 1 assert client._active_instances == {instance_2_key} - success = await client._remove_instance_registration("fake-key", table) + success = client._remove_instance_registration("fake-key", "profile", id(table)) assert not success assert len(client._active_instances) == 1 await client.close() diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py index 506ad7e94d87..6ad6c10632d4 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_client.py @@ -428,7 +428,7 @@ def test__register_instance(self): client_mock._ping_and_warm_instances = CrossSync._Sync_Impl.Mock() table_mock = mock.Mock() self._get_target_class()._register_instance( - client_mock, "instance-1", table_mock + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) ) assert client_mock._start_background_channel_refresh.call_count == 1 expected_key = ("prefix/instance-1", table_mock.app_profile_id) @@ -439,7 +439,7 @@ def test__register_instance(self): client_mock._channel_refresh_task = mock.Mock() table_mock2 = mock.Mock() self._get_target_class()._register_instance( - client_mock, "instance-2", table_mock2 + client_mock, "instance-2", table_mock2.app_profile_id, id(table_mock2) ) assert client_mock._start_background_channel_refresh.call_count == 1 assert ( @@ -478,7 +478,7 @@ def test__register_instance_duplicate(self): table_mock = mock.Mock() expected_key = ("prefix/instance-1", table_mock.app_profile_id) self._get_target_class()._register_instance( - client_mock, "instance-1", table_mock + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) ) assert len(active_instances) == 1 assert expected_key == tuple(list(active_instances)[0]) @@ -486,7 +486,7 @@ def test__register_instance_duplicate(self): assert expected_key == tuple(list(instance_owners)[0]) assert client_mock._ping_and_warm_instances.call_count == 1 self._get_target_class()._register_instance( - client_mock, "instance-1", table_mock + client_mock, "instance-1", table_mock.app_profile_id, id(table_mock) ) assert len(active_instances) == 1 assert expected_key == tuple(list(active_instances)[0]) @@ -523,7 +523,7 @@ def test__register_instance_state( for instance, profile in insert_instances: table_mock.app_profile_id = profile self._get_target_class()._register_instance( - client_mock, instance, table_mock + client_mock, instance, profile, id(table_mock) ) assert len(active_instances) == len(expected_active) assert len(instance_owners) == len(expected_owner_keys) @@ -545,8 +545,8 @@ def test__register_instance_state( def test__remove_instance_registration(self): client = self._make_client(project="project-id") table = mock.Mock() - client._register_instance("instance-1", table) - client._register_instance("instance-2", table) + client._register_instance("instance-1", table.app_profile_id, id(table)) + client._register_instance("instance-2", table.app_profile_id, id(table)) assert len(client._active_instances) == 2 assert len(client._instance_owners.keys()) == 2 instance_1_path = client._gapic_client.instance_path( @@ -561,13 +561,15 @@ def test__remove_instance_registration(self): assert list(client._instance_owners[instance_1_key])[0] == id(table) assert len(client._instance_owners[instance_2_key]) == 1 assert list(client._instance_owners[instance_2_key])[0] == id(table) - success = client._remove_instance_registration("instance-1", table) + success = client._remove_instance_registration( + "instance-1", table.app_profile_id, id(table) + ) assert success assert len(client._active_instances) == 1 assert len(client._instance_owners[instance_1_key]) == 0 assert len(client._instance_owners[instance_2_key]) == 1 assert client._active_instances == {instance_2_key} - success = client._remove_instance_registration("fake-key", table) + success = client._remove_instance_registration("fake-key", "profile", id(table)) assert not success assert len(client._active_instances) == 1 client.close() diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py index 9823655569c5..df6321f7f5e0 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_async/test_query_iterator.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import gc from google.cloud.bigtable.data import exceptions from google.cloud.bigtable.data.execute_query.metadata import ( _pb_metadata_to_metadata_types, @@ -284,3 +285,123 @@ async def test_iterator_returns_error_if_metadata_requested_too_early( with pytest.raises(exceptions.EarlyMetadataCallError): iterator.metadata + + @CrossSync.pytest + async def test_iterator_closes_on_full_consumption(self, proto_byte_stream): + """ + Tests that the iterator's close() method is called after all results + have been successfully consumed. + """ + client_mock = mock.Mock() + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + + with mock.patch.object( + CrossSync, "retry_target_stream", return_value=mock_async_iterator + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + # Consume the entire iterator + results = [row async for row in iterator] + assert len(results) == 3 + + # The close method should be called automatically by the finally block + client_mock._remove_instance_registration.assert_called_once() + assert iterator.is_closed + + @CrossSync.pytest + async def test_iterator_closes_on_early_break(self, proto_byte_stream): + """ + Tests that the iterator's close() method is called if the user breaks + out of the iteration loop early. + """ + client_mock = mock.Mock() + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync, "retry_target_stream", return_value=mock_async_iterator + ): + iterator = CrossSync.ExecuteQueryIterator( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + async for _ in iterator: + break + + del iterator + await CrossSync.sleep(1) + # GC outside the loop bc the mock ends up holding a reference to + # the iterator + gc.collect() + await CrossSync.sleep(1) + + # The close method should be called by the finally block when the + # generator is closed + client_mock._remove_instance_registration.assert_called_once() + + @CrossSync.pytest + async def test_iterator_closes_on_error(self, proto_byte_stream): + """ + Tests that the iterator's close() method is called if an exception + is raised during iteration. + """ + client_mock = mock.Mock() + client_mock._register_instance = CrossSync.Mock() + client_mock._remove_instance_registration = CrossSync.Mock() + + class MockErrorIterator(MockIterator): + @CrossSync.convert( + sync_name="__next__", replace_symbols={"__anext__": "__next__"} + ) + async def __anext__(self): + if self.idx >= 1: + raise ValueError("Injected-test-error") + return await super().__anext__() + + mock_async_iterator = MockErrorIterator(proto_byte_stream) + with mock.patch.object( + CrossSync, "retry_target_stream", return_value=mock_async_iterator + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + with pytest.raises(ValueError, match="Injected-test-error"): + async for _ in iterator: + pass + + # The close method should be called by the finally block on error + client_mock._remove_instance_registration.assert_called_once() diff --git a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py index d4f3ec26f3c7..3915693cd2cc 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py +++ b/packages/google-cloud-bigtable/tests/unit/data/execute_query/_sync_autogen/test_query_iterator.py @@ -15,6 +15,7 @@ # This file is automatically generated by CrossSync. Do not edit manually. +import gc from google.cloud.bigtable.data import exceptions from google.cloud.bigtable.data.execute_query.metadata import ( _pb_metadata_to_metadata_types, @@ -248,3 +249,105 @@ def test_iterator_returns_error_if_metadata_requested_too_early( ) with pytest.raises(exceptions.EarlyMetadataCallError): iterator.metadata + + def test_iterator_closes_on_full_consumption(self, proto_byte_stream): + """Tests that the iterator's close() method is called after all results + have been successfully consumed.""" + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + client_mock._executor = concurrent.futures.ThreadPoolExecutor() + mock_async_iterator = MockIterator(proto_byte_stream) + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + results = [row for row in iterator] + assert len(results) == 3 + client_mock._remove_instance_registration.assert_called_once() + assert iterator.is_closed + + def test_iterator_closes_on_early_break(self, proto_byte_stream): + """Tests that the iterator's close() method is called if the user breaks + out of the iteration loop early.""" + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + mock_async_iterator = MockIterator(proto_byte_stream) + iterator = None + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = CrossSync._Sync_Impl.ExecuteQueryIterator( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + for _ in iterator: + break + del iterator + CrossSync._Sync_Impl.sleep(1) + gc.collect() + CrossSync._Sync_Impl.sleep(1) + client_mock._remove_instance_registration.assert_called_once() + + def test_iterator_closes_on_error(self, proto_byte_stream): + """Tests that the iterator's close() method is called if an exception + is raised during iteration.""" + client_mock = mock.Mock() + client_mock._register_instance = CrossSync._Sync_Impl.Mock() + client_mock._remove_instance_registration = CrossSync._Sync_Impl.Mock() + + class MockErrorIterator(MockIterator): + def __next__(self): + if self.idx >= 1: + raise ValueError("Injected-test-error") + return super().__next__() + + mock_async_iterator = MockErrorIterator(proto_byte_stream) + with mock.patch.object( + CrossSync._Sync_Impl, + "retry_target_stream", + return_value=mock_async_iterator, + ): + iterator = self._make_one( + client=client_mock, + instance_id="test-instance", + app_profile_id="test_profile", + request_body={}, + prepare_metadata=_pb_metadata_to_metadata_types( + metadata( + column("test1", int64_type()), column("test2", int64_type()) + ) + ), + attempt_timeout=10, + operation_timeout=10, + ) + with pytest.raises(ValueError, match="Injected-test-error"): + for _ in iterator: + pass + client_mock._remove_instance_registration.assert_called_once() From 0b065005a2923692a24f8dadb26461c038a087cf Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 7 Oct 2025 16:46:42 -0700 Subject: [PATCH 884/892] chore(main): release 2.33.0 (#1185) --- .../.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 14 ++++++++++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 19 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 355f140d6cbc..12401d7d2d45 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.32.0" + ".": "2.33.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index d0308016471d..7dc43a9a28f3 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,20 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.33.0](https://github.com/googleapis/python-bigtable/compare/v2.32.0...v2.33.0) (2025-10-06) + + +### Features + +* Add support for Proto and Enum types ([#1202](https://github.com/googleapis/python-bigtable/issues/1202)) ([34ceb86](https://github.com/googleapis/python-bigtable/commit/34ceb86007db08d453fa25cca4968d5b498ffcd6)) +* Expose universe_domain for tpc ([#1150](https://github.com/googleapis/python-bigtable/issues/1150)) ([451fd97](https://github.com/googleapis/python-bigtable/commit/451fd97e435218ffed47d39423680ffc4feccac4)) + + +### Bug Fixes + +* Fix instance registration cleanup on early iterator termination ([#1216](https://github.com/googleapis/python-bigtable/issues/1216)) ([bbfd746](https://github.com/googleapis/python-bigtable/commit/bbfd746c61a6362efa42c7899ec3e34ceb541c83)) +* Refactor channel refresh ([#1174](https://github.com/googleapis/python-bigtable/issues/1174)) ([6fa3008](https://github.com/googleapis/python-bigtable/commit/6fa30084058bc34d4487d1fee5c87d7795ff167a)) + ## [2.32.0](https://github.com/googleapis/python-bigtable/compare/v2.31.0...v2.32.0) (2025-08-01) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 3c958586feba..0c5de5c03afe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.32.0" # {x-release-please-version} +__version__ = "2.33.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 3c958586feba..0c5de5c03afe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.32.0" # {x-release-please-version} +__version__ = "2.33.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 3c958586feba..0c5de5c03afe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.32.0" # {x-release-please-version} +__version__ = "2.33.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 3c958586feba..0c5de5c03afe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.32.0" # {x-release-please-version} +__version__ = "2.33.0" # {x-release-please-version} From 4649eae6f324502b25c05975b4dc9b3597f1740e Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 16 Oct 2025 12:08:00 -0700 Subject: [PATCH 885/892] feat: Add support for Python 3.14 (#1217) --- .../.cross_sync/transformers.py | 13 +++--- .../.github/.OwlBot.lock.yaml | 4 +- .../.github/sync-repo-settings.yaml | 12 +++++- .../.github/workflows/conformance.yaml | 2 +- .../.github/workflows/mypy.yml | 2 +- .../.github/workflows/system_emulated.yml | 2 +- .../.github/workflows/unittest.yml | 2 +- .../{system-3.8.cfg => system-3.9.cfg} | 2 +- .../.kokoro/presubmit/system.cfg | 7 ++++ .../.kokoro/samples/python3.14/common.cfg | 40 +++++++++++++++++++ .../.kokoro/samples/python3.14/continuous.cfg | 6 +++ .../samples/python3.14/periodic-head.cfg | 11 +++++ .../.kokoro/samples/python3.14/periodic.cfg | 6 +++ .../.kokoro/samples/python3.14/presubmit.cfg | 6 +++ .../google-cloud-bigtable/CONTRIBUTING.rst | 10 +++-- .../google/cloud/bigtable/data/exceptions.py | 2 +- .../google/cloud/bigtable/data/row.py | 2 +- packages/google-cloud-bigtable/mypy.ini | 12 +++++- packages/google-cloud-bigtable/noxfile.py | 40 ++++++++----------- packages/google-cloud-bigtable/owlbot.py | 2 + packages/google-cloud-bigtable/setup.py | 2 + .../testing/constraints-3.14.txt | 0 .../tests/system/conftest.py | 2 +- .../admin_overlay/test_async_consistency.py | 3 +- 24 files changed, 144 insertions(+), 46 deletions(-) rename packages/google-cloud-bigtable/.kokoro/presubmit/{system-3.8.cfg => system-3.9.cfg} (83%) create mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg create mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg create mode 100644 packages/google-cloud-bigtable/testing/constraints-3.14.txt diff --git a/packages/google-cloud-bigtable/.cross_sync/transformers.py b/packages/google-cloud-bigtable/.cross_sync/transformers.py index 42ba3f83c4e7..9adadd0aa727 100644 --- a/packages/google-cloud-bigtable/.cross_sync/transformers.py +++ b/packages/google-cloud-bigtable/.cross_sync/transformers.py @@ -71,18 +71,19 @@ def visit_FunctionDef(self, node): Replace function docstrings """ docstring = ast.get_docstring(node) - if docstring and isinstance(node.body[0], ast.Expr) and isinstance( - node.body[0].value, ast.Str - ): + if docstring and isinstance(node.body[0], ast.Expr) \ + and isinstance(node.body[0].value, ast.Constant) \ + and isinstance(node.body[0].value.value, str) \ + : for key_word, replacement in self.replacements.items(): docstring = docstring.replace(key_word, replacement) - node.body[0].value.s = docstring + node.body[0].value.value = docstring return self.generic_visit(node) def visit_Constant(self, node): """Replace string type annotations""" try: - node.s = self.replacements.get(node.s, node.s) + node.value = self.replacements.get(node.value, node.value) except TypeError: # ignore unhashable types (e.g. list) pass @@ -264,7 +265,7 @@ def get_output_path(self, node): for target in n.targets: if isinstance(target, ast.Name) and target.id == self.FILE_ANNOTATION: # return the output path - return n.value.s.replace(".", "/") + ".py" + return n.value.value.replace(".", "/") + ".py" def visit_Module(self, node): # look for __CROSS_SYNC_OUTPUT__ Assign statement diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml index c631e1f7d7e9..9a7846675f55 100644 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:5581906b957284864632cde4e9c51d1cc66b0094990b27e689132fe5cd036046 -# created: 2025-03-05 + digest: sha256:4a9e5d44b98e8672e2037ee22bc6b4f8e844a2d75fcb78ea8a4b38510112abc6 +# created: 2025-10-07 diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml index df49eafcc962..14e32d6fcb64 100644 --- a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml +++ b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml @@ -29,9 +29,19 @@ branchProtectionRules: # List of required status check contexts that must pass for commits to be accepted to matching branches. requiredStatusCheckContexts: - 'Kokoro' - - 'Kokoro system-3.8' + - 'Kokoro system' - 'cla/google' - 'OwlBot Post Processor' + - 'lint' + - 'mypy' + - 'docs' + - 'docfx' + - 'unit-3.9' + - 'unit-3.10' + - 'unit-3.11' + - 'unit-3.12' + - 'unit-3.13' + - 'unit-3.14' # List of explicit permissions to add (additive only) permissionRules: # Team slug to add to repository permissions diff --git a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml index 6a96a87d3f6a..f7396eaa997f 100644 --- a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml +++ b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml @@ -25,7 +25,7 @@ jobs: strategy: matrix: test-version: [ "v0.0.4" ] - py-version: [ 3.8 ] + py-version: [ 3.13 ] client-type: [ "async", "sync"] # None of the clients currently support reverse scans, execute query plan refresh, retry info, or routing cookie include: diff --git a/packages/google-cloud-bigtable/.github/workflows/mypy.yml b/packages/google-cloud-bigtable/.github/workflows/mypy.yml index 3915cddd3d1c..f2b78a5363f3 100644 --- a/packages/google-cloud-bigtable/.github/workflows/mypy.yml +++ b/packages/google-cloud-bigtable/.github/workflows/mypy.yml @@ -12,7 +12,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.13" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml index c9dab998c992..d8bbbb639a08 100644 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml @@ -17,7 +17,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.13' - name: Setup GCloud SDK uses: google-github-actions/setup-gcloud@v2.1.1 diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index 6a0429d96101..d59bbb1b82a6 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-22.04 strategy: matrix: - python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] + python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14'] steps: - name: Checkout uses: actions/checkout@v4 diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.8.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.9.cfg similarity index 83% rename from packages/google-cloud-bigtable/.kokoro/presubmit/system-3.8.cfg rename to packages/google-cloud-bigtable/.kokoro/presubmit/system-3.9.cfg index f4bcee3db0f0..b8ae66b376ff 100644 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.8.cfg +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.9.cfg @@ -3,5 +3,5 @@ # Only run this nox session. env_vars: { key: "NOX_SESSION" - value: "system-3.8" + value: "system-3.9" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg new file mode 100644 index 000000000000..b8ae66b376ff --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "system-3.9" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg new file mode 100644 index 000000000000..a9ea06119a94 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.14" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-314" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg new file mode 100644 index 000000000000..be25a34f9ad3 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg new file mode 100644 index 000000000000..71cd1e597e38 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg new file mode 100644 index 000000000000..a1c8d9759c88 --- /dev/null +++ b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CONTRIBUTING.rst b/packages/google-cloud-bigtable/CONTRIBUTING.rst index 985538f489d5..07ac8f2187fc 100644 --- a/packages/google-cloud-bigtable/CONTRIBUTING.rst +++ b/packages/google-cloud-bigtable/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.7, 3.8, 3.9, 3.10, 3.11, 3.12 and 3.13 on both UNIX and Windows. + 3.7, 3.8, 3.9, 3.10, 3.11, 3.12, 3.13 and 3.14 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.13 -- -k + $ nox -s unit-3.14 -- -k .. note:: @@ -143,12 +143,12 @@ Running System Tests $ nox -s system # Run a single system test - $ nox -s system-3.8 -- -k + $ nox -s system-3.9 -- -k .. note:: - System tests are only configured to run under Python 3.8. + System tests are only configured to run under Python 3.9. For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local @@ -228,6 +228,7 @@ We support: - `Python 3.11`_ - `Python 3.12`_ - `Python 3.13`_ +- `Python 3.14`_ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ @@ -236,6 +237,7 @@ We support: .. _Python 3.11: https://docs.python.org/3.11/ .. _Python 3.12: https://docs.python.org/3.12/ .. _Python 3.13: https://docs.python.org/3.13/ +.. _Python 3.14: https://docs.python.org/3.14/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py index 5645ae3aa228..b19e0e5ea126 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/exceptions.py @@ -90,7 +90,7 @@ def __init__(self, message, excs): # apply index header if idx != 0: message_parts.append( - f"+---------------- {str(idx+1).rjust(2)} ----------------" + f"+---------------- {str(idx + 1).rjust(2)} ----------------" ) cause = e.__cause__ # if this exception was had a cause, print the cause first diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py index a5575b83ac2b..50e65a958c51 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/row.py @@ -190,7 +190,7 @@ def __str__(self) -> str: elif len(cell_list) == 1: line.append(f"[{cell_list[0]}],") else: - line.append(f"[{cell_list[0]}, (+{len(cell_list)-1} more)],") + line.append(f"[{cell_list[0]}, (+{len(cell_list) - 1} more)],") output.append("".join(line)) output.append("}") return "\n".join(output) diff --git a/packages/google-cloud-bigtable/mypy.ini b/packages/google-cloud-bigtable/mypy.ini index 31cc24223c7a..701b7587ce6f 100644 --- a/packages/google-cloud-bigtable/mypy.ini +++ b/packages/google-cloud-bigtable/mypy.ini @@ -1,6 +1,9 @@ [mypy] -python_version = 3.8 +python_version = 3.13 namespace_packages = True +check_untyped_defs = True +warn_unreachable = True +disallow_any_generics = True exclude = tests/unit/gapic/ [mypy-grpc.*] @@ -26,3 +29,10 @@ ignore_missing_imports = True [mypy-pytest] ignore_missing_imports = True + +[mypy-google.cloud.*] +ignore_errors = True + +# only verify data client +[mypy-google.cloud.bigtable.data.*] +ignore_errors = False diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index 548bfd0ec97c..a182bafba993 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -32,7 +32,7 @@ ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.8" +DEFAULT_PYTHON_VERSION = "3.13" UNIT_TEST_PYTHON_VERSIONS: List[str] = [ "3.7", @@ -42,6 +42,7 @@ "3.11", "3.12", "3.13", + "3.14", ] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", @@ -58,7 +59,7 @@ UNIT_TEST_EXTRAS: List[str] = [] UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} -SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8", "3.12"] +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.9", "3.14"] SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ "mock", "pytest", @@ -78,7 +79,12 @@ # 'docfx' is excluded since it only needs to run in 'docs-presubmit' nox.options.sessions = [ - "unit", + "unit-3.9", + "unit-3.10", + "unit-3.11", + "unit-3.12", + "unit-3.13", + "unit-3.14", "system_emulated", "system", "mypy", @@ -148,26 +154,13 @@ def mypy(session): "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests" ) session.install("google-cloud-testutils") - session.run( - "mypy", - "-p", - "google.cloud.bigtable.data", - "--check-untyped-defs", - "--warn-unreachable", - "--disallow-any-generics", - "--exclude", - "tests/system/v2_client", - "--exclude", - "tests/unit/v2_client", - "--disable-error-code", - "func-returns-value", # needed for CrossSync.rm_aio - ) + session.run("mypy", "-p", "google.cloud.bigtable.data") @nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" - session.install("docutils", "pygments") + session.install("setuptools", "docutils", "pygments") session.run("python", "setup.py", "check", "--restructuredtext", "--strict") @@ -206,8 +199,8 @@ def install_unittest_dependencies(session, *constraints): ) def unit(session, protobuf_implementation): # Install all test dependencies, then install this package in-place. - - if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12", "3.13"): + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): session.skip("cpp implementation is not supported in python 3.11+") constraints_path = str( @@ -270,7 +263,7 @@ def install_systemtest_dependencies(session, *constraints): session.install("-e", ".", *constraints) -@nox.session(python="3.8") +@nox.session(python=DEFAULT_PYTHON_VERSION) def system_emulated(session): import subprocess import signal @@ -456,7 +449,7 @@ def docfx(session): session.run("python", "docs/scripts/patch_devsite_toc.py") -@nox.session(python="3.12") +@nox.session(python="3.14") @nox.parametrize( "protobuf_implementation", ["python", "upb", "cpp"], @@ -464,7 +457,8 @@ def docfx(session): def prerelease_deps(session, protobuf_implementation): """Run all tests with prerelease versions of dependencies installed.""" - if protobuf_implementation == "cpp" and session.python in ("3.11", "3.12", "3.13"): + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): session.skip("cpp implementation is not supported in python 3.11+") # Install all dependencies diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index 9562b61423a3..b6b741b542e5 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -109,6 +109,8 @@ def get_staging_dirs( system_test_external_dependencies=[ "pytest-asyncio==0.21.2", ], + system_test_python_versions=["3.9"], + unit_test_python_versions=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"], ) s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py", "renovate.json"]) diff --git a/packages/google-cloud-bigtable/setup.py b/packages/google-cloud-bigtable/setup.py index 3cb9d465dafe..cac533db6427 100644 --- a/packages/google-cloud-bigtable/setup.py +++ b/packages/google-cloud-bigtable/setup.py @@ -85,6 +85,8 @@ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", "Operating System :: OS Independent", "Topic :: Internet", ], diff --git a/packages/google-cloud-bigtable/testing/constraints-3.14.txt b/packages/google-cloud-bigtable/testing/constraints-3.14.txt new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/packages/google-cloud-bigtable/tests/system/conftest.py b/packages/google-cloud-bigtable/tests/system/conftest.py index 39480942dc4d..8c0eb30b1565 100644 --- a/packages/google-cloud-bigtable/tests/system/conftest.py +++ b/packages/google-cloud-bigtable/tests/system/conftest.py @@ -30,7 +30,7 @@ @pytest.fixture(scope="session") def event_loop(): - loop = asyncio.get_event_loop() + loop = asyncio.new_event_loop() yield loop loop.stop() loop.close() diff --git a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py index 56978713c5f2..b64ae1a117ff 100644 --- a/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py +++ b/packages/google-cloud-bigtable/tests/unit/admin_overlay/test_async_consistency.py @@ -43,7 +43,8 @@ def async_mock_check_consistency_callable(max_poll_count=1): return mock.AsyncMock(spec=["__call__"], side_effect=side_effect) -def test_check_consistency_future_cancel(): +@pytest.mark.asyncio +async def test_check_consistency_future_cancel(): check_consistency_call = async_mock_check_consistency_callable() future = async_consistency._AsyncCheckConsistencyPollingFuture( check_consistency_call From 6777666328050931ddb58b48f327f593900d7119 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 22 Oct 2025 11:53:30 -0700 Subject: [PATCH 886/892] chore(main): release 2.34.0 (#1219) --- .../google-cloud-bigtable/.release-please-manifest.json | 2 +- packages/google-cloud-bigtable/CHANGELOG.md | 7 +++++++ .../google/cloud/bigtable/gapic_version.py | 2 +- .../google/cloud/bigtable_admin/gapic_version.py | 2 +- .../google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json index 12401d7d2d45..7887ba9321f0 100644 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ b/packages/google-cloud-bigtable/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.33.0" + ".": "2.34.0" } \ No newline at end of file diff --git a/packages/google-cloud-bigtable/CHANGELOG.md b/packages/google-cloud-bigtable/CHANGELOG.md index 7dc43a9a28f3..2a0251dc158f 100644 --- a/packages/google-cloud-bigtable/CHANGELOG.md +++ b/packages/google-cloud-bigtable/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.34.0](https://github.com/googleapis/python-bigtable/compare/v2.33.0...v2.34.0) (2025-10-16) + + +### Features + +* Add support for Python 3.14 ([#1217](https://github.com/googleapis/python-bigtable/issues/1217)) ([263332a](https://github.com/googleapis/python-bigtable/commit/263332af71a229cb4fa598008a708137086a6f67)) + ## [2.33.0](https://github.com/googleapis/python-bigtable/compare/v2.32.0...v2.33.0) (2025-10-06) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py index 0c5de5c03afe..4800b05591a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.33.0" # {x-release-please-version} +__version__ = "2.34.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 0c5de5c03afe..4800b05591a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.33.0" # {x-release-please-version} +__version__ = "2.34.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 0c5de5c03afe..4800b05591a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.33.0" # {x-release-please-version} +__version__ = "2.34.0" # {x-release-please-version} diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 0c5de5c03afe..4800b05591a5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.33.0" # {x-release-please-version} +__version__ = "2.34.0" # {x-release-please-version} From bd528498573a7d0fa676948277ac5e58a7dcb30a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 12 Nov 2025 13:04:28 -0800 Subject: [PATCH 887/892] feat: add PeerInfo proto in Bigtable API (#1190) - [ ] Regenerate this pull request now. BEGIN_COMMIT_OVERRIDE feat: add PeerInfo proto in Bigtable API fix: Add ReadRows/SampleRowKeys bindings for materialized views fix: Deprecate credentials_file argument feat: Add Type API updates needed to support structured keys in materialized views feat: Add encodings for STRUCT and the Timestamp type END_COMMIT_OVERRIDE PiperOrigin-RevId: 829585900 Source-Link: https://github.com/googleapis/googleapis/commit/1b5f8632487bce889ce05366647addc6ef5ee36d Source-Link: https://github.com/googleapis/googleapis-gen/commit/1a692875003e2754729dc2a4cca88055051d2aae Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMWE2OTI4NzUwMDNlMjc1NDcyOWRjMmE0Y2NhODgwNTUwNTFkMmFhZSJ9 BEGIN_NESTED_COMMIT fix: Add ReadRows/SampleRowKeys bindings for materialized views fix: Deprecate credentials_file argument chore: Update gapic-generator-python to 1.28.0 PiperOrigin-RevId: 816753840 Source-Link: https://github.com/googleapis/googleapis/commit/d06cf27a47074d1de3fde6f0ca48680a96229306 Source-Link: https://github.com/googleapis/googleapis-gen/commit/a524e7310882bbb99bfe1399b18bed328979211c Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTUyNGU3MzEwODgyYmJiOTliZmUxMzk5YjE4YmVkMzI4OTc5MjExYyJ9 END_NESTED_COMMIT BEGIN_NESTED_COMMIT feat: Add Type API updates needed to support structured keys in materialized views feat: Add encodings for STRUCT and the Timestamp type PiperOrigin-RevId: 805031861 Source-Link: https://github.com/googleapis/googleapis/commit/6d1dca2b8e3d50914609414e219df2778b2b20ba Source-Link: https://github.com/googleapis/googleapis-gen/commit/ecd9d8860bae8bb37b452bfc6eefbdd22d028f09 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZWNkOWQ4ODYwYmFlOGJiMzdiNDUyYmZjNmVlZmJkZDIyZDAyOGYwOSJ9 END_NESTED_COMMIT BEGIN_NESTED_COMMIT chore: Update gapic-generator-python to 1.26.2 PiperOrigin-RevId: 802200836 Source-Link: https://github.com/googleapis/googleapis/commit/d300b151a973ce0425ae4ad07b3de957ca31bec6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/a1ff0ae72ddcb68a259215d8c77661e2cdbb9b02 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTFmZjBhZTcyZGRjYjY4YTI1OTIxNWQ4Yzc3NjYxZTJjZGJiOWIwMiJ9 END_NESTED_COMMIT BEGIN_NESTED_COMMIT chore: update Python generator version to 1.25.1 PiperOrigin-RevId: 800535761 Source-Link: https://github.com/googleapis/googleapis/commit/4cf1f99cccc014627af5e8a6c0f80a3e6ec0d268 Source-Link: https://github.com/googleapis/googleapis-gen/commit/133d25b68e712116e1c5dc71fc3eb3c5e717022a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTMzZDI1YjY4ZTcxMjExNmUxYzVkYzcxZmMzZWIzYzVlNzE3MDIyYSJ9 END_NESTED_COMMIT BEGIN_NESTED_COMMIT fix: Add ReadRows/SampleRowKeys bindings for materialized views PiperOrigin-RevId: 793800781 Source-Link: https://github.com/googleapis/googleapis/commit/fe06a492944dc3a8360ed5b426942d34631eeca7 Source-Link: https://github.com/googleapis/googleapis-gen/commit/c4550f60725dc4c07adfe68cee0ac72eb4a5b1bb Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzQ1NTBmNjA3MjVkYzRjMDdhZGZlNjhjZWUwYWM3MmViNGE1YjFiYiJ9 END_NESTED_COMMIT --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../.github/workflows/lint.yml | 2 +- .../.github/workflows/unittest.yml | 2 +- .../bigtable_instance_admin/async_client.py | 16 +- .../bigtable_instance_admin/client.py | 16 +- .../transports/base.py | 5 +- .../transports/grpc.py | 8 +- .../transports/grpc_asyncio.py | 8 +- .../transports/rest.py | 5 +- .../bigtable_table_admin/async_client.py | 32 +- .../services/bigtable_table_admin/client.py | 32 +- .../bigtable_table_admin/transports/base.py | 5 +- .../bigtable_table_admin/transports/grpc.py | 8 +- .../transports/grpc_asyncio.py | 8 +- .../bigtable_table_admin/transports/rest.py | 5 +- .../types/bigtable_table_admin.py | 106 +++--- .../cloud/bigtable_admin_v2/types/instance.py | 18 +- .../cloud/bigtable_admin_v2/types/table.py | 79 ++-- .../cloud/bigtable_admin_v2/types/types.py | 95 +++-- .../google/cloud/bigtable_v2/__init__.py | 2 + .../services/bigtable/async_client.py | 14 + .../bigtable_v2/services/bigtable/client.py | 14 + .../services/bigtable/transports/base.py | 5 +- .../services/bigtable/transports/grpc.py | 8 +- .../bigtable/transports/grpc_asyncio.py | 9 +- .../services/bigtable/transports/rest.py | 101 ++++- .../services/bigtable/transports/rest_base.py | 9 + .../cloud/bigtable_v2/types/__init__.py | 4 + .../cloud/bigtable_v2/types/bigtable.py | 8 +- .../google/cloud/bigtable_v2/types/data.py | 69 ++-- .../cloud/bigtable_v2/types/feature_flags.py | 7 + .../cloud/bigtable_v2/types/peer_info.py | 118 ++++++ .../bigtable_v2/types/response_params.py | 10 + .../google/cloud/bigtable_v2/types/types.py | 354 +++++++++++++++--- packages/google-cloud-bigtable/owlbot.py | 1 + .../samples/beam/requirements.txt | 3 +- ...pet_metadata_google.bigtable.admin.v2.json | 2 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 269 +++++++++++++ 37 files changed, 1117 insertions(+), 340 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/peer_info.py diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml index 4866193af2a9..9a0598202bb2 100644 --- a/packages/google-cloud-bigtable/.github/workflows/lint.yml +++ b/packages/google-cloud-bigtable/.github/workflows/lint.yml @@ -12,7 +12,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.13" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml index d59bbb1b82a6..dad646c6b9e6 100644 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ b/packages/google-cloud-bigtable/.github/workflows/unittest.yml @@ -45,7 +45,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: "3.8" + python-version: "3.13" - name: Install coverage run: | python -m pip install --upgrade setuptools pip wheel diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index a1aee2370fd3..632496543912 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -2565,19 +2565,19 @@ async def sample_get_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -2704,19 +2704,19 @@ async def sample_set_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 84df01058326..7c72be99730b 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -3067,19 +3067,19 @@ def sample_get_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -3207,19 +3207,19 @@ def sample_set_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index f5ceeeb687aa..3a05dd6631ca 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -81,9 +81,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index a294144efd68..d5d5cf1e53b9 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -160,9 +160,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -296,9 +297,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index aae0f44c4bf2..7ce7627649fe 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -157,8 +157,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -209,9 +210,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 12af0792b075..9879c4c45360 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1719,9 +1719,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index d79d1b088020..7f772c87c77f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -936,14 +936,14 @@ async def sample_update_table(): specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not + message. The wildcard (\*) path is currently not supported. Currently UpdateTable is only supported for the following fields: - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - ``row_key_schema`` + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. @@ -3044,7 +3044,7 @@ async def sample_create_backup(): full backup name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -3293,7 +3293,7 @@ async def sample_update_backup(): required. Other fields are ignored. Update is only supported for the following fields: - - ``backup.expire_time``. + - ``backup.expire_time``. This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this @@ -3784,7 +3784,7 @@ async def sample_copy_backup(): name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -3971,19 +3971,19 @@ async def sample_get_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -4110,19 +4110,19 @@ async def sample_set_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index d0030af9257b..4c6aff187ae0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1486,14 +1486,14 @@ def sample_update_table(): specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not + message. The wildcard (\*) path is currently not supported. Currently UpdateTable is only supported for the following fields: - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - ``row_key_schema`` + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. @@ -3549,7 +3549,7 @@ def sample_create_backup(): full backup name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -3792,7 +3792,7 @@ def sample_update_backup(): required. Other fields are ignored. Update is only supported for the following fields: - - ``backup.expire_time``. + - ``backup.expire_time``. This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this @@ -4272,7 +4272,7 @@ def sample_copy_backup(): name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. This corresponds to the ``backup_id`` field on the ``request`` instance; if ``request`` is provided, this @@ -4456,19 +4456,19 @@ def sample_get_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. @@ -4596,19 +4596,19 @@ def sample_set_iam_policy(): constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + :literal:`` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + :literal:`` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM - documentation](\ https://cloud.google.com/iam/docs/). + documentation](https://cloud.google.com/iam/docs/). """ # Create or coerce a protobuf request object. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 8e2cb7304429..8ad08df3ffa0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -81,9 +81,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 5f46c3aa3929..f8d1058c8c32 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -162,9 +162,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -298,9 +299,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 159a96edae02..5017f17d0575 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -159,8 +159,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -211,9 +212,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index ec2462d4acfb..6c3815f79437 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1896,9 +1896,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index d6403fc2a72a..69de07a2ab8e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -235,20 +235,20 @@ class CreateTableRequest(proto.Message): Example: - - Row keys := - ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 - ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` + - Row keys := + ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial_split_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: + + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 + ``[other, ) => {"other", "zz"}.`` """ class Split(proto.Message): @@ -482,13 +482,13 @@ class UpdateTableRequest(proto.Message): which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The - wildcard (*) path is currently not supported. Currently + wildcard (\*) path is currently not supported. Currently UpdateTable is only supported for the following fields: - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - ``row_key_schema`` + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + - ``row_key_schema`` If ``column_families`` is set in ``update_mask``, it will return an UNIMPLEMENTED error. @@ -1099,7 +1099,7 @@ class CreateBackupRequest(proto.Message): name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. backup (google.cloud.bigtable_admin_v2.types.Backup): Required. The backup to create. """ @@ -1167,7 +1167,7 @@ class UpdateBackupRequest(proto.Message): required. Other fields are ignored. Update is only supported for the following fields: - - ``backup.expire_time``. + - ``backup.expire_time``. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. A mask specifying which fields (e.g. ``expire_time``) in the Backup resource should be updated. @@ -1246,16 +1246,16 @@ class ListBackupsRequest(proto.Message): The fields eligible for filtering are: - - ``name`` - - ``source_table`` - - ``state`` - - ``start_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``end_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` To filter on multiple expressions, provide each separate expression within parentheses. By default, each expression @@ -1264,20 +1264,20 @@ class ListBackupsRequest(proto.Message): Some examples of using filters are: - - ``name:"exact"`` --> The backup's name is the string - "exact". - - ``name:howl`` --> The backup's name contains the string - "howl". - - ``source_table:prod`` --> The source_table's name - contains the string "prod". - - ``state:CREATING`` --> The backup is pending creation. - - ``state:READY`` --> The backup is fully created and ready - for use. - - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` - --> The backup name contains the string "howl" and - start_time of the backup is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` --> The backup's size is - greater than 10GB + - ``name:"exact"`` --> The backup's name is the string + "exact". + - ``name:howl`` --> The backup's name contains the string + "howl". + - ``source_table:prod`` --> The source_table's name contains + the string "prod". + - ``state:CREATING`` --> The backup is pending creation. + - ``state:READY`` --> The backup is fully created and ready + for use. + - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` + --> The backup name contains the string "howl" and + start_time of the backup is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The backup's size is + greater than 10GB order_by (str): An expression for specifying the sort order of the results of the request. The string value should specify one or more @@ -1286,13 +1286,13 @@ class ListBackupsRequest(proto.Message): Fields supported are: - - name - - source_table - - expire_time - - start_time - - end_time - - size_bytes - - state + - name + - source_table + - expire_time + - start_time + - end_time + - size_bytes + - state For example, "start_time". The default sorting order is ascending. To specify descending order for the field, a @@ -1381,7 +1381,7 @@ class CopyBackupRequest(proto.Message): to create the full backup name, of the form: ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]\*. source_backup (str): Required. The source backup to be copied from. The source backup needs to be in READY state for it to be copied. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py index 865487f0d5e1..f07414d56957 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/instance.py @@ -67,15 +67,15 @@ class Instance(proto.Message): customer's organizational needs and deployment strategies. They can be used to filter resources and aggregate metrics. - - Label keys must be between 1 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - - Keys and values must both be under 128 bytes. + - Label keys must be between 1 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given + resource. + - Keys and values must both be under 128 bytes. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. A commit timestamp representing when this Instance was created. For instances created before this diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py index c15eac7990b5..f6d1fe729ab5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/table.py @@ -154,9 +154,9 @@ class Table(proto.Message): i.e. deleting the following resources through Admin APIs are prohibited: - - The table. - - The column families in the table. - - The instance containing the table. + - The table. + - The column families in the table. + - The instance containing the table. Note one can still delete the data stored in the table through Data APIs. @@ -181,37 +181,22 @@ class Table(proto.Message): they encounter an invalid row key. For example, if \_key = - "some_id#2024-04-30#\x00\x13\x00\xf3" with the following - schema: - - .. code-block:: - - { - fields { - field_name: "id" - type { string { encoding: utf8_bytes {} } } - } - fields { - field_name: "date" - type { string { encoding: utf8_bytes {} } } - } - fields { - field_name: "product_code" - type { int64 { encoding: big_endian_bytes {} } } - } - encoding { delimited_bytes { delimiter: "#" } } - } - - The decoded key parts would be: - id = "some_id", date = "2024-04-30", product_code = 1245427 - The query "SELECT \_key, product_code FROM table" will return - two columns: - - +========================================+==============+ - | \_key | product_code | - +========================================+==============+ - | "some_id#2024-04-30#\x00\x13\x00\xf3" | 1245427 | - +----------------------------------------+--------------+ + "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" with the following + schema: { fields { field_name: "id" type { string { + encoding: utf8_bytes {} } } } fields { field_name: "date" + type { string { encoding: utf8_bytes {} } } } fields { + field_name: "product_code" type { int64 { encoding: + big_endian_bytes {} } } } encoding { delimited_bytes { + delimiter: "#" } } } + + | The decoded key parts would be: id = "some_id", date = + "2024-04-30", product_code = 1245427 The query "SELECT + \_key, product_code FROM table" will return two columns: + /------------------------------------------------------ + | \| \_key \| product_code \| \| + --------------------------------------\|--------------\| + \| "some_id#2024-04-30#\\x00\\x13\\x00\\xf3" \| 1245427 \| + ------------------------------------------------------/ The schema has the following invariants: (1) The decoded field values are order-preserved. For read, the field values @@ -221,19 +206,19 @@ class Table(proto.Message): type is limited to scalar types only: Array, Map, Aggregate, and Struct are not allowed. (4) The field names must not collide with existing column family names and reserved - keywords "_key" and "_timestamp". + keywords "\_key" and "\_timestamp". The following update operations are allowed for row_key_schema: - - Update from an empty schema to a new schema. - - Remove the existing schema. This operation requires - setting the ``ignore_warnings`` flag to ``true``, since - it might be a backward incompatible change. Without the - flag, the update request will fail with an - INVALID_ARGUMENT error. Any other row key schema update - operation (e.g. update existing schema columns names or - types) is currently unsupported. + - Update from an empty schema to a new schema. + - Remove the existing schema. This operation requires + setting the ``ignore_warnings`` flag to ``true``, since it + might be a backward incompatible change. Without the flag, + the update request will fail with an INVALID_ARGUMENT + error. Any other row key schema update operation (e.g. + update existing schema columns names or types) is + currently unsupported. """ class TimestampGranularity(proto.Enum): @@ -572,7 +557,7 @@ class ColumnFamily(proto.Message): If ``value_type`` is ``Aggregate``, written data must be compatible with: - - ``value_type.input_type`` for ``AddInput`` mutations + - ``value_type.input_type`` for ``AddInput`` mutations """ gc_rule: "GcRule" = proto.Field( @@ -864,8 +849,8 @@ class Backup(proto.Message): backup or updating its ``expire_time``, the value must be greater than the backup creation time by: - - At least 6 hours - - At most 90 days + - At least 6 hours + - At most 90 days Once the ``expire_time`` has passed, Cloud Bigtable will delete the backup. @@ -895,7 +880,7 @@ class Backup(proto.Message): standard backup. This value must be greater than the backup creation time by: - - At least 24 hours + - At least 24 hours This field only applies for hot backups. When creating or updating a standard backup, attempting to set this field diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py index b6ea5341d362..4f56429dabff 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/types/types.py @@ -40,15 +40,15 @@ class Type(proto.Message): Each encoding can operate in one of two modes: - - Sorted: In this mode, Bigtable guarantees that - ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is - useful anywhere sort order is important, for example when - encoding keys. - - Distinct: In this mode, Bigtable guarantees that if ``X != Y`` - then ``Encode(X) != Encode(Y)``. However, the converse is not - guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and - "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON - value. + - Sorted: In this mode, Bigtable guarantees that + ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is + useful anywhere sort order is important, for example when encoding + keys. + - Distinct: In this mode, Bigtable guarantees that if ``X != Y`` + then ``Encode(X) != Encode(Y)``. However, the converse is not + guaranteed. For example, both "{'foo': '1', 'bar': '2'}" and + "{'bar': '2', 'foo': '1'}" are valid encodings of the same JSON + value. The API clearly documents which mode is used wherever an encoding can be configured. Each encoding also documents which values are @@ -205,16 +205,16 @@ class Utf8Bytes(proto.Message): Sorted mode: - - All values are supported. - - Code point order is preserved. + - All values are supported. + - Code point order is preserved. Distinct mode: all values are supported. Compatible with: - - BigQuery ``TEXT`` encoding - - HBase ``Bytes.toBytes`` - - Java ``String#getBytes(StandardCharsets.UTF_8)`` + - BigQuery ``TEXT`` encoding + - HBase ``Bytes.toBytes`` + - Java ``String#getBytes(StandardCharsets.UTF_8)`` """ @@ -276,9 +276,9 @@ class BigEndianBytes(proto.Message): Compatible with: - - BigQuery ``BINARY`` encoding - - HBase ``Bytes.toBytes`` - - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` + - BigQuery ``BINARY`` encoding + - HBase ``Bytes.toBytes`` + - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` Attributes: bytes_type (google.cloud.bigtable_admin_v2.types.Type.Bytes): @@ -358,7 +358,7 @@ class Encoding(proto.Message): Compatible with: - - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS`` + - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS`` This field is a member of `oneof`_ ``encoding``. """ @@ -455,17 +455,17 @@ class DelimitedBytes(proto.Message): Sorted mode: - - Fields are encoded in sorted mode. - - Encoded field values must not contain any bytes <= - ``delimiter[0]`` - - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or - if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort - first. + - Fields are encoded in sorted mode. + - Encoded field values must not contain any bytes <= + ``delimiter[0]`` + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. Distinct mode: - - Fields are encoded in distinct mode. - - Encoded field values must not contain ``delimiter[0]``. + - Fields are encoded in distinct mode. + - Encoded field values must not contain ``delimiter[0]``. Attributes: delimiter (bytes): @@ -488,24 +488,23 @@ class OrderedCodeBytes(proto.Message): Fields that encode to the empty string "" have special handling: - - If *every* field encodes to "", or if the STRUCT has no fields - defined, then the STRUCT is encoded as the fixed byte pair {0x00, - 0x00}. - - Otherwise, the STRUCT only encodes until the last non-empty - field, omitting any trailing empty fields. Any empty fields that - aren't omitted are replaced with the fixed byte pair {0x00, - 0x00}. + - If *every* field encodes to "", or if the STRUCT has no fields + defined, then the STRUCT is encoded as the fixed byte pair {0x00, + 0x00}. + - Otherwise, the STRUCT only encodes until the last non-empty field, + omitting any trailing empty fields. Any empty fields that aren't + omitted are replaced with the fixed byte pair {0x00, 0x00}. Examples: - - STRUCT() -> "\00\00" - - STRUCT("") -> "\00\00" - - STRUCT("", "") -> "\00\00" - - STRUCT("", "B") -> "\00\00" + "\00\01" + "B" - - STRUCT("A", "") -> "A" - - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B" - - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" + - "C" + - STRUCT() -> "\\00\\00" + - STRUCT("") -> "\\00\\00" + - STRUCT("", "") -> "\\00\\00" + - STRUCT("", "B") -> "\\00\\00" + "\\00\\01" + "B" + - STRUCT("A", "") -> "A" + - STRUCT("", "B", "") -> "\\00\\00" + "\\00\\01" + "B" + - STRUCT("A", "", "C") -> "A" + "\\00\\01" + "\\00\\00" + "\\00\\01" + + "C" Since null bytes are always escaped, this encoding can cause size blowup for encodings like ``Int64.BigEndianBytes`` that are likely @@ -513,16 +512,16 @@ class OrderedCodeBytes(proto.Message): Sorted mode: - - Fields are encoded in sorted mode. - - All values supported by the field encodings are allowed - - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or - if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort - first. + - Fields are encoded in sorted mode. + - All values supported by the field encodings are allowed + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. Distinct mode: - - Fields are encoded in distinct mode. - - All values supported by the field encodings are allowed. + - Fields are encoded in distinct mode. + - All values supported by the field encodings are allowed. """ diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py index 3a5a72c9c2a4..a14cdab5feef 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/__init__.py @@ -70,6 +70,7 @@ from .types.data import Value from .types.data import ValueRange from .types.feature_flags import FeatureFlags +from .types.peer_info import PeerInfo from .types.request_stats import FullReadStatsView from .types.request_stats import ReadIterationStats from .types.request_stats import RequestLatencyStats @@ -101,6 +102,7 @@ "MutateRowsResponse", "Mutation", "PartialResultSet", + "PeerInfo", "PingAndWarmRequest", "PingAndWarmResponse", "PrepareQueryRequest", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py index 103ff141c925..0a9442287e22 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -397,6 +397,13 @@ def read_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -519,6 +526,13 @@ def sample_row_keys( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py index ffc448c25f0d..d8e85f54ea69 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/client.py @@ -873,6 +873,13 @@ def read_rows( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), @@ -992,6 +999,13 @@ def sample_row_keys( if regex_match and regex_match.group("table_name"): header_params["table_name"] = regex_match.group("table_name") + routing_param_regex = re.compile( + "^(?Pprojects/[^/]+/instances/[^/]+)(?:/.*)?$" + ) + regex_match = routing_param_regex.match(request.materialized_view_name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + if header_params: metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata(header_params), diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 4d25d8b3090f..f08bca73ede0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -74,9 +74,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 309e72662282..8ddbf15a20c0 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -152,9 +152,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if a ``channel`` instance is provided. channel (Optional[Union[grpc.Channel, Callable[..., grpc.Channel]]]): @@ -287,9 +288,10 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. + This argument is mutually exclusive with credentials. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 49f981d9a1f3..3e6b70832307 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -149,8 +149,9 @@ def create_channel( credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. + credentials_file (Optional[str]): Deprecated. A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. This argument will be + removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -201,9 +202,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if a ``channel`` instance is provided. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if a ``channel`` instance is provided. + This argument will be removed in the next major version of this library. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. @@ -290,6 +292,7 @@ def __init__( always_use_jwt_access=always_use_jwt_access, api_audience=api_audience, ) + if not self._grpc_channel: # initialize with the provided callable or the default channel channel_init = channel or type(self).create_channel diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index c84ef147fca0..f0a761a360c3 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -750,9 +750,10 @@ def __init__( are specified, the client will attempt to ascertain the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can + credentials_file (Optional[str]): Deprecated. A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. + This argument is ignored if ``channel`` is provided. This argument will be + removed in the next major version of this library. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client @@ -1080,6 +1081,22 @@ def __call__( resp, _ = self._interceptor.post_execute_query_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.execute_query", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ExecuteQuery", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GenerateInitialChangeStreamPartitions( @@ -1228,6 +1245,22 @@ def __call__( ) = self._interceptor.post_generate_initial_change_stream_partitions_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.generate_initial_change_stream_partitions", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "GenerateInitialChangeStreamPartitions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _MutateRow(_BaseBigtableRestTransport._BaseMutateRow, BigtableRestStub): @@ -1515,6 +1548,22 @@ def __call__( resp, _ = self._interceptor.post_mutate_rows_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.mutate_rows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRows", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _PingAndWarm(_BaseBigtableRestTransport._BasePingAndWarm, BigtableRestStub): @@ -1966,6 +2015,22 @@ def __call__( resp, _ = self._interceptor.post_read_change_stream_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.read_change_stream", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadChangeStream", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ReadModifyWriteRow( @@ -2253,6 +2318,22 @@ def __call__( resp, _ = self._interceptor.post_read_rows_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.read_rows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadRows", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SampleRowKeys( @@ -2383,6 +2464,22 @@ def __call__( resp, _ = self._interceptor.post_sample_row_keys_with_metadata( resp, response_metadata ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + http_response = { + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.sample_row_keys", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "SampleRowKeys", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py index b2080f4a49ba..5eab0ded45e2 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/services/bigtable/transports/rest_base.py @@ -641,6 +641,11 @@ def _get_http_options(): "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:readRows", "body": "*", }, + { + "method": "post", + "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:readRows", + "body": "*", + }, ] return http_options @@ -686,6 +691,10 @@ def _get_http_options(): "method": "get", "uri": "/v2/{authorized_view_name=projects/*/instances/*/tables/*/authorizedViews/*}:sampleRowKeys", }, + { + "method": "get", + "uri": "/v2/{materialized_view_name=projects/*/instances/*/materializedViews/*}:sampleRowKeys", + }, ] return http_options diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py index bd3c361549ee..b13c076a2cf6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/__init__.py @@ -68,6 +68,9 @@ from .feature_flags import ( FeatureFlags, ) +from .peer_info import ( + PeerInfo, +) from .request_stats import ( FullReadStatsView, ReadIterationStats, @@ -131,6 +134,7 @@ "Value", "ValueRange", "FeatureFlags", + "PeerInfo", "FullReadStatsView", "ReadIterationStats", "RequestLatencyStats", diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py index 0e7ac1df3a1e..19abba67b7d6 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/bigtable.py @@ -1330,10 +1330,10 @@ class ExecuteQueryRequest(proto.Message): Setting this field also places restrictions on several other fields: - - ``data_format`` must be empty. - - ``validate_only`` must be false. - - ``params`` must match the ``param_types`` set in the - ``PrepareQueryRequest``. + - ``data_format`` must be empty. + - ``validate_only`` must be false. + - ``params`` must match the ``param_types`` set in the + ``PrepareQueryRequest``. proto_format (google.cloud.bigtable_v2.types.ProtoFormat): Protocol buffer format as described by ProtoSchema and ProtoRows messages. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py index ad7e382f7579..12ac8b2b1cbb 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/data.py @@ -573,26 +573,26 @@ class RowFilter(proto.Message): transformers), as well as two ways to compose simple filters into more complex ones (chains and interleaves). They work as follows: - - True filters alter the input row by excluding some of its cells - wholesale from the output row. An example of a true filter is the - ``value_regex_filter``, which excludes cells whose values don't - match the specified pattern. All regex true filters use RE2 - syntax (https://github.com/google/re2/wiki/Syntax) in raw byte - mode (RE2::Latin1), and are evaluated as full matches. An - important point to keep in mind is that ``RE2(.)`` is equivalent - by default to ``RE2([^\n])``, meaning that it does not match - newlines. When attempting to match an arbitrary byte, you should - therefore use the escape sequence ``\C``, which may need to be - further escaped as ``\\C`` in your client language. - - - Transformers alter the input row by changing the values of some - of its cells in the output, without excluding them completely. - Currently, the only supported transformer is the - ``strip_value_transformer``, which replaces every cell's value - with the empty string. - - - Chains and interleaves are described in more detail in the - RowFilter.Chain and RowFilter.Interleave documentation. + - True filters alter the input row by excluding some of its cells + wholesale from the output row. An example of a true filter is the + ``value_regex_filter``, which excludes cells whose values don't + match the specified pattern. All regex true filters use RE2 syntax + (https://github.com/google/re2/wiki/Syntax) in raw byte mode + (RE2::Latin1), and are evaluated as full matches. An important + point to keep in mind is that ``RE2(.)`` is equivalent by default + to ``RE2([^\n])``, meaning that it does not match newlines. When + attempting to match an arbitrary byte, you should therefore use + the escape sequence ``\C``, which may need to be further escaped + as ``\\C`` in your client language. + + - Transformers alter the input row by changing the values of some of + its cells in the output, without excluding them completely. + Currently, the only supported transformer is the + ``strip_value_transformer``, which replaces every cell's value + with the empty string. + + - Chains and interleaves are described in more detail in the + RowFilter.Chain and RowFilter.Interleave documentation. The total serialized size of a RowFilter message must not exceed 20480 bytes, and RowFilters may not be nested within each other (in @@ -1493,21 +1493,20 @@ class PartialResultSet(proto.Message): Having: - - queue of row results waiting to be returned ``queue`` - - extensible buffer of bytes ``buffer`` - - a place to keep track of the most recent ``resume_token`` for - each PartialResultSet ``p`` received { if p.reset { ensure - ``queue`` is empty ensure ``buffer`` is empty } if - p.estimated_batch_size != 0 { (optional) ensure ``buffer`` is - sized to at least ``p.estimated_batch_size`` } if - ``p.proto_rows_batch`` is set { append - ``p.proto_rows_batch.bytes`` to ``buffer`` } if p.batch_checksum - is set and ``buffer`` is not empty { validate the checksum - matches the contents of ``buffer`` (see comments on - ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message, - clearing ``buffer`` add parsed rows to end of ``queue`` } if - p.resume_token is set { release results in ``queue`` save - ``p.resume_token`` in ``resume_token`` } } + - queue of row results waiting to be returned ``queue`` + - extensible buffer of bytes ``buffer`` + - a place to keep track of the most recent ``resume_token`` for each + PartialResultSet ``p`` received { if p.reset { ensure ``queue`` is + empty ensure ``buffer`` is empty } if p.estimated_batch_size != 0 + { (optional) ensure ``buffer`` is sized to at least + ``p.estimated_batch_size`` } if ``p.proto_rows_batch`` is set { + append ``p.proto_rows_batch.bytes`` to ``buffer`` } if + p.batch_checksum is set and ``buffer`` is not empty { validate the + checksum matches the contents of ``buffer`` (see comments on + ``batch_checksum``) parse ``buffer`` as ``ProtoRows`` message, + clearing ``buffer`` add parsed rows to end of ``queue`` } if + p.resume_token is set { release results in ``queue`` save + ``p.resume_token`` in ``resume_token`` } } .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py index 69cfe1cf459e..2c8ea8732746 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/feature_flags.py @@ -76,6 +76,9 @@ class FeatureFlags(proto.Message): direct_access_requested (bool): Notify the server that the client explicitly opted in for Direct Access. + peer_info (bool): + If the client can support using + BigtablePeerInfo. """ reverse_scans: bool = proto.Field( @@ -114,6 +117,10 @@ class FeatureFlags(proto.Message): proto.BOOL, number=10, ) + peer_info: bool = proto.Field( + proto.BOOL, + number=11, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/peer_info.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/peer_info.py new file mode 100644 index 000000000000..b3f1203cc9e4 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/peer_info.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "PeerInfo", + }, +) + + +class PeerInfo(proto.Message): + r"""PeerInfo contains information about the peer that the client + is connecting to. + + Attributes: + google_frontend_id (int): + An opaque identifier for the Google Frontend + which serviced this request. Only set when not + using DirectAccess. + application_frontend_id (int): + An opaque identifier for the application + frontend which serviced this request. + application_frontend_zone (str): + The Cloud zone of the application frontend + that served this request. + application_frontend_subzone (str): + The subzone of the application frontend that + served this request, e.g. an identifier for + where within the zone the application frontend + is. + transport_type (google.cloud.bigtable_v2.types.PeerInfo.TransportType): + + """ + + class TransportType(proto.Enum): + r"""The transport type that the client used to connect to this + peer. + + Values: + TRANSPORT_TYPE_UNKNOWN (0): + The transport type is unknown. + TRANSPORT_TYPE_EXTERNAL (1): + The client connected to this peer via an + external network (e.g. outside Google Coud). + TRANSPORT_TYPE_CLOUD_PATH (2): + The client connected to this peer via + CloudPath. + TRANSPORT_TYPE_DIRECT_ACCESS (3): + The client connected to this peer via + DirectAccess. + TRANSPORT_TYPE_SESSION_UNKNOWN (4): + The client connected to this peer via + Bigtable Sessions using an unknown transport + type. + TRANSPORT_TYPE_SESSION_EXTERNAL (5): + The client connected to this peer via + Bigtable Sessions on an external network (e.g. + outside Google Cloud). + TRANSPORT_TYPE_SESSION_CLOUD_PATH (6): + The client connected to this peer via + Bigtable Sessions using CloudPath. + TRANSPORT_TYPE_SESSION_DIRECT_ACCESS (7): + The client connected to this peer via + Bigtable Sessions using DirectAccess. + """ + TRANSPORT_TYPE_UNKNOWN = 0 + TRANSPORT_TYPE_EXTERNAL = 1 + TRANSPORT_TYPE_CLOUD_PATH = 2 + TRANSPORT_TYPE_DIRECT_ACCESS = 3 + TRANSPORT_TYPE_SESSION_UNKNOWN = 4 + TRANSPORT_TYPE_SESSION_EXTERNAL = 5 + TRANSPORT_TYPE_SESSION_CLOUD_PATH = 6 + TRANSPORT_TYPE_SESSION_DIRECT_ACCESS = 7 + + google_frontend_id: int = proto.Field( + proto.INT64, + number=1, + ) + application_frontend_id: int = proto.Field( + proto.INT64, + number=2, + ) + application_frontend_zone: str = proto.Field( + proto.STRING, + number=3, + ) + application_frontend_subzone: str = proto.Field( + proto.STRING, + number=4, + ) + transport_type: TransportType = proto.Field( + proto.ENUM, + number=5, + enum=TransportType, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py index fb373d0559df..cc6384ab3465 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/response_params.py @@ -44,6 +44,11 @@ class ResponseParams(proto.Message): of bigtable resources. This field is a member of `oneof`_ ``_cluster_id``. + afe_id (int): + The AFE ID for the AFE that is served this + request. + + This field is a member of `oneof`_ ``_afe_id``. """ zone_id: str = proto.Field( @@ -56,6 +61,11 @@ class ResponseParams(proto.Message): number=2, optional=True, ) + afe_id: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py index 5eae9e526897..0b4ddb57a6f5 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/types/types.py @@ -35,34 +35,27 @@ class Type(proto.Message): features. For compatibility with Bigtable's existing untyped APIs, each - ``Type`` includes an ``Encoding`` which describes how to convert - to/from the underlying data. - - Each encoding also defines the following properties: - - - Order-preserving: Does the encoded value sort consistently with - the original typed value? Note that Bigtable will always sort - data based on the raw encoded value, *not* the decoded type. - - - Example: BYTES values sort in the same order as their raw - encodings. - - Counterexample: Encoding INT64 as a fixed-width decimal string - does *not* preserve sort order when dealing with negative - numbers. ``INT64(1) > INT64(-1)``, but - ``STRING("-00001") > STRING("00001)``. - - - Self-delimiting: If we concatenate two encoded values, can we - always tell where the first one ends and the second one begins? - - - Example: If we encode INT64s to fixed-width STRINGs, the first - value will always contain exactly N digits, possibly preceded - by a sign. - - Counterexample: If we concatenate two UTF-8 encoded STRINGs, - we have no way to tell where the first one ends. - - - Compatibility: Which other systems have matching encoding - schemes? For example, does this encoding have a GoogleSQL - equivalent? HBase? Java? + ``Type`` includes an ``Encoding`` which describes how to convert to + or from the underlying data. + + Each encoding can operate in one of two modes: + + - Sorted: In this mode, Bigtable guarantees that + ``Encode(X) <= Encode(Y)`` if and only if ``X <= Y``. This is + useful anywhere sort order is important, for example when encoding + keys. + - Distinct: In this mode, Bigtable guarantees that if ``X != Y`` + then ``Encode(X) != Encode(Y)``. However, the converse is not + guaranteed. For example, both ``{'foo': '1', 'bar': '2'}`` and + ``{'bar': '2', 'foo': '1'}`` are valid encodings of the same JSON + value. + + The API clearly documents which mode is used wherever an encoding + can be configured. Each encoding also documents which values are + supported in which modes. For example, when encoding INT64 as a + numeric STRING, negative numbers cannot be encoded in sorted mode. + This is because ``INT64(1) > INT64(-1)``, but + ``STRING("-00001") > STRING("00001")``. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -135,12 +128,12 @@ class Bytes(proto.Message): Attributes: encoding (google.cloud.bigtable_v2.types.Type.Bytes.Encoding): - The encoding to use when converting to/from - lower level types. + The encoding to use when converting to or + from lower level types. """ class Encoding(proto.Message): - r"""Rules used to convert to/from lower level types. + r"""Rules used to convert to or from lower level types. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -152,14 +145,26 @@ class Encoding(proto.Message): """ class Raw(proto.Message): - r"""Leaves the value "as-is" + r"""Leaves the value as-is. + + Sorted mode: all values are supported. + + Distinct mode: all values are supported. - - Order-preserving? Yes - - Self-delimiting? No - - Compatibility? N/A + Attributes: + escape_nulls (bool): + If set, allows NULL values to be encoded as the empty string + "". + The actual empty string, or any value which only contains + the null byte ``0x00``, has one more null byte appended. """ + escape_nulls: bool = proto.Field( + proto.BOOL, + number=1, + ) + raw: "Type.Bytes.Encoding.Raw" = proto.Field( proto.MESSAGE, number=1, @@ -179,12 +184,12 @@ class String(proto.Message): Attributes: encoding (google.cloud.bigtable_v2.types.Type.String.Encoding): - The encoding to use when converting to/from - lower level types. + The encoding to use when converting to or + from lower level types. """ class Encoding(proto.Message): - r"""Rules used to convert to/from lower level types. + r"""Rules used to convert to or from lower level types. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -208,18 +213,45 @@ class Utf8Raw(proto.Message): r"""Deprecated: prefer the equivalent ``Utf8Bytes``.""" class Utf8Bytes(proto.Message): - r"""UTF-8 encoding + r"""UTF-8 encoding. - - Order-preserving? Yes (code point order) - - Self-delimiting? No - - Compatibility? + Sorted mode: - - BigQuery Federation ``TEXT`` encoding - - HBase ``Bytes.toBytes`` - - Java ``String#getBytes(StandardCharsets.UTF_8)`` + - All values are supported. + - Code point order is preserved. + Distinct mode: all values are supported. + + Compatible with: + + - BigQuery ``TEXT`` encoding + - HBase ``Bytes.toBytes`` + - Java ``String#getBytes(StandardCharsets.UTF_8)`` + + Attributes: + null_escape_char (str): + Single-character escape sequence used to support NULL + values. + + If set, allows NULL values to be encoded as the empty string + "". + + The actual empty string, or any value where every character + equals ``null_escape_char``, has one more + ``null_escape_char`` appended. + + If ``null_escape_char`` is set and does not equal the ASCII + null character ``0x00``, then the encoding will not support + sorted mode. + + . """ + null_escape_char: str = proto.Field( + proto.STRING, + number=1, + ) + utf8_raw: "Type.String.Encoding.Utf8Raw" = proto.Field( proto.MESSAGE, number=1, @@ -244,12 +276,17 @@ class Int64(proto.Message): Attributes: encoding (google.cloud.bigtable_v2.types.Type.Int64.Encoding): - The encoding to use when converting to/from - lower level types. + The encoding to use when converting to or + from lower level types. """ class Encoding(proto.Message): - r"""Rules used to convert to/from lower level types. + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -257,20 +294,25 @@ class Encoding(proto.Message): big_endian_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.BigEndianBytes): Use ``BigEndianBytes`` encoding. + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Int64.Encoding.OrderedCodeBytes): + Use ``OrderedCodeBytes`` encoding. + This field is a member of `oneof`_ ``encoding``. """ class BigEndianBytes(proto.Message): - r"""Encodes the value as an 8-byte big endian twos complement ``Bytes`` - value. + r"""Encodes the value as an 8-byte big-endian two's complement value. + + Sorted mode: non-negative values are supported. - - Order-preserving? No (positive values only) - - Self-delimiting? Yes - - Compatibility? + Distinct mode: all values are supported. - - BigQuery Federation ``BINARY`` encoding - - HBase ``Bytes.toBytes`` - - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` + Compatible with: + + - BigQuery ``BINARY`` encoding + - HBase ``Bytes.toBytes`` + - Java ``ByteBuffer.putLong()`` with ``ByteOrder.BIG_ENDIAN`` Attributes: bytes_type (google.cloud.bigtable_v2.types.Type.Bytes): @@ -283,12 +325,28 @@ class BigEndianBytes(proto.Message): message="Type.Bytes", ) + class OrderedCodeBytes(proto.Message): + r"""Encodes the value in a variable length binary format of up to + 10 bytes. Values that are closer to zero use fewer bytes. + + Sorted mode: all values are supported. + + Distinct mode: all values are supported. + + """ + big_endian_bytes: "Type.Int64.Encoding.BigEndianBytes" = proto.Field( proto.MESSAGE, number=1, oneof="encoding", message="Type.Int64.Encoding.BigEndianBytes", ) + ordered_code_bytes: "Type.Int64.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Int64.Encoding.OrderedCodeBytes", + ) encoding: "Type.Int64.Encoding" = proto.Field( proto.MESSAGE, @@ -315,8 +373,43 @@ class Timestamp(proto.Message): r"""Timestamp Values of type ``Timestamp`` are stored in ``Value.timestamp_value``. + Attributes: + encoding (google.cloud.bigtable_v2.types.Type.Timestamp.Encoding): + The encoding to use when converting to or + from lower level types. """ + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + unix_micros_int64 (google.cloud.bigtable_v2.types.Type.Int64.Encoding): + Encodes the number of microseconds since the Unix epoch + using the given ``Int64`` encoding. Values must be + microsecond-aligned. + + Compatible with: + + - Java ``Instant.truncatedTo()`` with ``ChronoUnit.MICROS`` + + This field is a member of `oneof`_ ``encoding``. + """ + + unix_micros_int64: "Type.Int64.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Int64.Encoding", + ) + + encoding: "Type.Timestamp.Encoding" = proto.Field( + proto.MESSAGE, + number=1, + message="Type.Timestamp.Encoding", + ) + class Date(proto.Message): r"""Date Values of type ``Date`` are stored in ``Value.date_value``.""" @@ -330,6 +423,9 @@ class Struct(proto.Message): fields (MutableSequence[google.cloud.bigtable_v2.types.Type.Struct.Field]): The names and types of the fields in this struct. + encoding (google.cloud.bigtable_v2.types.Type.Struct.Encoding): + The encoding to use when converting to or + from lower level types. """ class Field(proto.Message): @@ -353,11 +449,146 @@ class Field(proto.Message): message="Type", ) + class Encoding(proto.Message): + r"""Rules used to convert to or from lower level types. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + singleton (google.cloud.bigtable_v2.types.Type.Struct.Encoding.Singleton): + Use ``Singleton`` encoding. + + This field is a member of `oneof`_ ``encoding``. + delimited_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.DelimitedBytes): + Use ``DelimitedBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + ordered_code_bytes (google.cloud.bigtable_v2.types.Type.Struct.Encoding.OrderedCodeBytes): + User ``OrderedCodeBytes`` encoding. + + This field is a member of `oneof`_ ``encoding``. + """ + + class Singleton(proto.Message): + r"""Uses the encoding of ``fields[0].type`` as-is. Only valid if + ``fields.size == 1``. + + """ + + class DelimitedBytes(proto.Message): + r"""Fields are encoded independently and concatenated with a + configurable ``delimiter`` in between. + + A struct with no fields defined is encoded as a single + ``delimiter``. + + Sorted mode: + + - Fields are encoded in sorted mode. + - Encoded field values must not contain any bytes <= + ``delimiter[0]`` + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - Encoded field values must not contain ``delimiter[0]``. + + Attributes: + delimiter (bytes): + Byte sequence used to delimit concatenated + fields. The delimiter must contain at least 1 + character and at most 50 characters. + """ + + delimiter: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + class OrderedCodeBytes(proto.Message): + r"""Fields are encoded independently and concatenated with the fixed + byte pair ``{0x00, 0x01}`` in between. + + Any null ``(0x00)`` byte in an encoded field is replaced by the + fixed byte pair ``{0x00, 0xFF}``. + + Fields that encode to the empty string "" have special handling: + + - If *every* field encodes to "", or if the STRUCT has no fields + defined, then the STRUCT is encoded as the fixed byte pair + ``{0x00, 0x00}``. + - Otherwise, the STRUCT only encodes until the last non-empty field, + omitting any trailing empty fields. Any empty fields that aren't + omitted are replaced with the fixed byte pair ``{0x00, 0x00}``. + + Examples: + + :: + + - STRUCT() -> "\00\00" + - STRUCT("") -> "\00\00" + - STRUCT("", "") -> "\00\00" + - STRUCT("", "B") -> "\00\00" + "\00\01" + "B" + - STRUCT("A", "") -> "A" + - STRUCT("", "B", "") -> "\00\00" + "\00\01" + "B" + - STRUCT("A", "", "C") -> "A" + "\00\01" + "\00\00" + "\00\01" + "C" + + Since null bytes are always escaped, this encoding can cause size + blowup for encodings like ``Int64.BigEndianBytes`` that are likely + to produce many such bytes. + + Sorted mode: + + - Fields are encoded in sorted mode. + - All values supported by the field encodings are allowed + - Element-wise order is preserved: ``A < B`` if ``A[0] < B[0]``, or + if ``A[0] == B[0] && A[1] < B[1]``, etc. Strict prefixes sort + first. + + Distinct mode: + + - Fields are encoded in distinct mode. + - All values supported by the field encodings are allowed. + + """ + + singleton: "Type.Struct.Encoding.Singleton" = proto.Field( + proto.MESSAGE, + number=1, + oneof="encoding", + message="Type.Struct.Encoding.Singleton", + ) + delimited_bytes: "Type.Struct.Encoding.DelimitedBytes" = proto.Field( + proto.MESSAGE, + number=2, + oneof="encoding", + message="Type.Struct.Encoding.DelimitedBytes", + ) + ordered_code_bytes: "Type.Struct.Encoding.OrderedCodeBytes" = proto.Field( + proto.MESSAGE, + number=3, + oneof="encoding", + message="Type.Struct.Encoding.OrderedCodeBytes", + ) + fields: MutableSequence["Type.Struct.Field"] = proto.RepeatedField( proto.MESSAGE, number=1, message="Type.Struct.Field", ) + encoding: "Type.Struct.Encoding" = proto.Field( + proto.MESSAGE, + number=2, + message="Type.Struct.Encoding", + ) class Proto(proto.Message): r"""A protobuf message type. Values of type ``Proto`` are stored in @@ -453,8 +684,8 @@ class Aggregate(proto.Message): r"""A value that combines incremental updates into a summarized value. Data is never directly written or read using type ``Aggregate``. - Writes will provide either the ``input_type`` or ``state_type``, and - reads will always return the ``state_type`` . + Writes provide either the ``input_type`` or ``state_type``, and + reads always return the ``state_type`` . This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -466,13 +697,12 @@ class Aggregate(proto.Message): Attributes: input_type (google.cloud.bigtable_v2.types.Type): Type of the inputs that are accumulated by this - ``Aggregate``, which must specify a full encoding. Use - ``AddInput`` mutations to accumulate new inputs. + ``Aggregate``. Use ``AddInput`` mutations to accumulate new + inputs. state_type (google.cloud.bigtable_v2.types.Type): Output only. Type that holds the internal accumulator state for the ``Aggregate``. This is a function of the - ``input_type`` and ``aggregator`` chosen, and will always - specify a full encoding. + ``input_type`` and ``aggregator`` chosen. sum (google.cloud.bigtable_v2.types.Type.Aggregate.Sum): Sum aggregator. diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/owlbot.py index b6b741b542e5..6b2e1ea4fc33 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/owlbot.py @@ -111,6 +111,7 @@ def get_staging_dirs( ], system_test_python_versions=["3.9"], unit_test_python_versions=["3.7", "3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "3.14"], + default_python_version="3.13", ) s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py", "renovate.json"]) diff --git a/packages/google-cloud-bigtable/samples/beam/requirements.txt b/packages/google-cloud-bigtable/samples/beam/requirements.txt index 55b3ae719e62..4b84ddec38cc 100644 --- a/packages/google-cloud-bigtable/samples/beam/requirements.txt +++ b/packages/google-cloud-bigtable/samples/beam/requirements.txt @@ -1,3 +1,4 @@ -apache-beam==2.65.0 +apache-beam===2.60.0; python_version == '3.8' +apache-beam==2.65.0; python_version >= '3.9' google-cloud-bigtable==2.30.1 google-cloud-core==2.4.3 diff --git a/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json b/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json index 3d73099e881a..66b5c8f679e9 100644 --- a/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json +++ b/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-bigtable-admin", - "version": "0.1.0" + "version": "0.0.0" }, "snippets": [ { diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index cb78d2b7af30..24db8e2695a7 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -6852,6 +6852,7 @@ def test_read_rows_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -6916,6 +6917,43 @@ def test_read_rows_routing_parameters_request_3_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_rows_routing_parameters_request_4_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -6949,6 +6987,7 @@ def test_sample_row_keys_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7013,6 +7052,43 @@ def test_sample_row_keys_routing_parameters_request_3_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_sample_row_keys_routing_parameters_request_4_grpc(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7046,6 +7122,7 @@ def test_mutate_row_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7110,6 +7187,7 @@ def test_mutate_row_routing_parameters_request_3_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7143,6 +7221,7 @@ def test_mutate_rows_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7207,6 +7286,7 @@ def test_mutate_rows_routing_parameters_request_3_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7242,6 +7322,7 @@ def test_check_and_mutate_row_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7310,6 +7391,7 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7341,6 +7423,7 @@ def test_ping_and_warm_routing_parameters_request_1_grpc(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7403,6 +7486,7 @@ def test_read_modify_write_row_routing_parameters_request_1_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7473,6 +7557,7 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7506,6 +7591,7 @@ def test_prepare_query_routing_parameters_request_1_grpc(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7566,6 +7652,7 @@ def test_execute_query_routing_parameters_request_1_grpc(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -7938,6 +8025,7 @@ async def test_read_rows_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8012,6 +8100,48 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_read_rows_routing_parameters_request_4_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.ReadRowsResponse()] + ) + await client.read_rows( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8050,6 +8180,7 @@ async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8124,6 +8255,48 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +@pytest.mark.asyncio +async def test_sample_row_keys_routing_parameters_request_4_grpc_asyncio(): + client = BigtableAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[bigtable.SampleRowKeysResponse()] + ) + await client.sample_row_keys( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8161,6 +8334,7 @@ async def test_mutate_row_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8233,6 +8407,7 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8271,6 +8446,7 @@ async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8345,6 +8521,7 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8386,6 +8563,7 @@ async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8466,6 +8644,7 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8503,6 +8682,7 @@ async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8573,6 +8753,7 @@ async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio() expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8651,6 +8832,7 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio() expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8690,6 +8872,7 @@ async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -8761,6 +8944,7 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10448,6 +10632,7 @@ def test_read_rows_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10510,6 +10695,42 @@ def test_read_rows_routing_parameters_request_3_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_read_rows_routing_parameters_request_4_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.read_rows), "__call__") as call: + client.read_rows( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.ReadRowsRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10542,6 +10763,7 @@ def test_sample_row_keys_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10604,6 +10826,42 @@ def test_sample_row_keys_routing_parameters_request_3_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", + } + + # assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()]) + + +def test_sample_row_keys_routing_parameters_request_4_rest(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.sample_row_keys), "__call__") as call: + client.sample_row_keys( + request={ + "materialized_view_name": "projects/sample1/instances/sample2/sample3" + } + ) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, kw = call.mock_calls[0] + request_msg = bigtable.SampleRowKeysRequest( + **{"materialized_view_name": "projects/sample1/instances/sample2/sample3"} + ) + + assert args[0] == request_msg + + expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10636,6 +10894,7 @@ def test_mutate_row_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10698,6 +10957,7 @@ def test_mutate_row_routing_parameters_request_3_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10730,6 +10990,7 @@ def test_mutate_rows_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10792,6 +11053,7 @@ def test_mutate_rows_routing_parameters_request_3_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10826,6 +11088,7 @@ def test_check_and_mutate_row_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10892,6 +11155,7 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10922,6 +11186,7 @@ def test_ping_and_warm_routing_parameters_request_1_rest(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -10982,6 +11247,7 @@ def test_read_modify_write_row_routing_parameters_request_1_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -11050,6 +11316,7 @@ def test_read_modify_write_row_routing_parameters_request_3_rest(): expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -11082,6 +11349,7 @@ def test_prepare_query_routing_parameters_request_1_rest(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order @@ -11140,6 +11408,7 @@ def test_execute_query_routing_parameters_request_1_rest(): expected_headers = { "name": "projects/sample1/instances/sample2", + "app_profile_id": "", } # assert the expected headers are present, in any order From 215f6eee498258c68d7c6542e4efd86c3d07ad73 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Thu, 20 Nov 2025 01:59:16 -0800 Subject: [PATCH 888/892] feat: add basic interceptor to client (#1206) --- .../cloud/bigtable/data/_async/client.py | 50 ++++-- .../data/_async/metrics_interceptor.py | 78 ++++++++ .../bigtable/data/_sync_autogen/client.py | 30 +++- .../data/_sync_autogen/metrics_interceptor.py | 59 ++++++ .../tests/system/data/test_system_async.py | 19 +- .../tests/system/data/test_system_autogen.py | 6 +- .../data/_async/test_metrics_interceptor.py | 168 ++++++++++++++++++ .../_sync_autogen/test_metrics_interceptor.py | 140 +++++++++++++++ .../tests/unit/data/test_sync_up_to_date.py | 2 +- 9 files changed, 522 insertions(+), 30 deletions(-) create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/metrics_interceptor.py create mode 100644 packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_async/test_metrics_interceptor.py create mode 100644 packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_metrics_interceptor.py diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py index 0af7154a64f7..1c98f56abc9e 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/client.py @@ -19,6 +19,7 @@ cast, Any, AsyncIterable, + Callable, Optional, Set, Sequence, @@ -99,18 +100,24 @@ ) from google.cloud.bigtable.data._async.mutations_batcher import _MB_SIZE from google.cloud.bigtable.data._async._swappable_channel import ( - AsyncSwappableChannel, + AsyncSwappableChannel as SwappableChannelType, + ) + from google.cloud.bigtable.data._async.metrics_interceptor import ( + AsyncBigtableMetricsInterceptor as MetricsInterceptorType, ) else: from typing import Iterable # noqa: F401 from grpc import insecure_channel + from grpc import intercept_channel from google.cloud.bigtable_v2.services.bigtable.transports import BigtableGrpcTransport as TransportType # type: ignore from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient # type: ignore from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( # noqa: F401 - SwappableChannel, + SwappableChannel as SwappableChannelType, + ) + from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401 + BigtableMetricsInterceptor as MetricsInterceptorType, ) - if TYPE_CHECKING: from google.cloud.bigtable.data._helpers import RowKeySamples @@ -205,7 +212,7 @@ def __init__( credentials = google.auth.credentials.AnonymousCredentials() if project is None: project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT - + self._metrics_interceptor = MetricsInterceptorType() # initialize client ClientWithProject.__init__( self, @@ -259,12 +266,11 @@ def __init__( stacklevel=2, ) - @CrossSync.convert(replace_symbols={"AsyncSwappableChannel": "SwappableChannel"}) - def _build_grpc_channel(self, *args, **kwargs) -> AsyncSwappableChannel: + def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType: """ This method is called by the gapic transport to create a grpc channel. - The init arguments passed down are captured in a partial used by AsyncSwappableChannel + The init arguments passed down are captured in a partial used by SwappableChannel to create new channel instances in the future, as part of the channel refresh logic Emulators always use an inseucre channel @@ -275,12 +281,30 @@ def _build_grpc_channel(self, *args, **kwargs) -> AsyncSwappableChannel: Returns: a custom wrapped swappable channel """ + create_channel_fn: Callable[[], Channel] if self._emulator_host is not None: - # emulators use insecure channel + # Emulators use insecure channels create_channel_fn = partial(insecure_channel, self._emulator_host) - else: + elif CrossSync.is_async: + # For async client, use the default create_channel. create_channel_fn = partial(TransportType.create_channel, *args, **kwargs) - return AsyncSwappableChannel(create_channel_fn) + else: + # For sync client, wrap create_channel with interceptors. + def sync_create_channel_fn(): + return intercept_channel( + TransportType.create_channel(*args, **kwargs), + self._metrics_interceptor, + ) + + create_channel_fn = sync_create_channel_fn + + # Instantiate SwappableChannelType with the determined creation function. + new_channel = SwappableChannelType(create_channel_fn) + if CrossSync.is_async: + # Attach async interceptors to the channel instance itself. + new_channel._unary_unary_interceptors.append(self._metrics_interceptor) + new_channel._unary_stream_interceptors.append(self._metrics_interceptor) + return new_channel @property def universe_domain(self) -> str: @@ -402,7 +426,7 @@ def _invalidate_channel_stubs(self): self.transport._stubs = {} self.transport._prep_wrapped_messages(self.client_info) - @CrossSync.convert(replace_symbols={"AsyncSwappableChannel": "SwappableChannel"}) + @CrossSync.convert async def _manage_channel( self, refresh_interval_min: float = 60 * 35, @@ -427,10 +451,10 @@ async def _manage_channel( grace_period: time to allow previous channel to serve existing requests before closing, in seconds """ - if not isinstance(self.transport.grpc_channel, AsyncSwappableChannel): + if not isinstance(self.transport.grpc_channel, SwappableChannelType): warnings.warn("Channel does not support auto-refresh.") return - super_channel: AsyncSwappableChannel = self.transport.grpc_channel + super_channel: SwappableChannelType = self.transport.grpc_channel first_refresh = self._channel_init_time + random.uniform( refresh_interval_min, refresh_interval_max ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/metrics_interceptor.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/metrics_interceptor.py new file mode 100644 index 000000000000..a154c0083dd6 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_async/metrics_interceptor.py @@ -0,0 +1,78 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from google.cloud.bigtable.data._cross_sync import CrossSync + +if CrossSync.is_async: + from grpc.aio import UnaryUnaryClientInterceptor + from grpc.aio import UnaryStreamClientInterceptor +else: + from grpc import UnaryUnaryClientInterceptor + from grpc import UnaryStreamClientInterceptor + + +__CROSS_SYNC_OUTPUT__ = "google.cloud.bigtable.data._sync_autogen.metrics_interceptor" + + +@CrossSync.convert_class(sync_name="BigtableMetricsInterceptor") +class AsyncBigtableMetricsInterceptor( + UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor +): + """ + An async gRPC interceptor to add client metadata and print server metadata. + """ + + @CrossSync.convert + async def intercept_unary_unary(self, continuation, client_call_details, request): + """ + Interceptor for unary rpcs: + - MutateRow + - CheckAndMutateRow + - ReadModifyWriteRow + """ + try: + call = await continuation(client_call_details, request) + return call + except Exception as rpc_error: + raise rpc_error + + @CrossSync.convert + async def intercept_unary_stream(self, continuation, client_call_details, request): + """ + Interceptor for streaming rpcs: + - ReadRows + - MutateRows + - SampleRowKeys + """ + try: + return self._streaming_generator_wrapper( + await continuation(client_call_details, request) + ) + except Exception as rpc_error: + # handle errors while intializing stream + raise rpc_error + + @staticmethod + @CrossSync.convert + async def _streaming_generator_wrapper(call): + """ + Wrapped generator to be returned by intercept_unary_stream. + """ + try: + async for response in call: + yield response + except Exception as e: + # handle errors while processing stream + raise e diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py index adc849649eb5..a403643f5027 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/client.py @@ -17,7 +17,7 @@ # This file is automatically generated by CrossSync. Do not edit manually. from __future__ import annotations -from typing import cast, Any, Optional, Set, Sequence, TYPE_CHECKING +from typing import cast, Any, Callable, Optional, Set, Sequence, TYPE_CHECKING import abc import time import warnings @@ -77,12 +77,18 @@ from google.cloud.bigtable.data._cross_sync import CrossSync from typing import Iterable from grpc import insecure_channel +from grpc import intercept_channel from google.cloud.bigtable_v2.services.bigtable.transports import ( BigtableGrpcTransport as TransportType, ) from google.cloud.bigtable_v2.services.bigtable import BigtableClient as GapicClient from google.cloud.bigtable.data._sync_autogen.mutations_batcher import _MB_SIZE -from google.cloud.bigtable.data._sync_autogen._swappable_channel import SwappableChannel +from google.cloud.bigtable.data._sync_autogen._swappable_channel import ( + SwappableChannel as SwappableChannelType, +) +from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( + BigtableMetricsInterceptor as MetricsInterceptorType, +) if TYPE_CHECKING: from google.cloud.bigtable.data._helpers import RowKeySamples @@ -145,6 +151,7 @@ def __init__( credentials = google.auth.credentials.AnonymousCredentials() if project is None: project = _DEFAULT_BIGTABLE_EMULATOR_CLIENT + self._metrics_interceptor = MetricsInterceptorType() ClientWithProject.__init__( self, credentials=credentials, @@ -188,7 +195,7 @@ def __init__( stacklevel=2, ) - def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannel: + def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannelType: """This method is called by the gapic transport to create a grpc channel. The init arguments passed down are captured in a partial used by SwappableChannel @@ -201,11 +208,20 @@ def _build_grpc_channel(self, *args, **kwargs) -> SwappableChannel: - **kwargs: keyword arguments passed by the gapic layer to create a new channel with Returns: a custom wrapped swappable channel""" + create_channel_fn: Callable[[], Channel] if self._emulator_host is not None: create_channel_fn = partial(insecure_channel, self._emulator_host) else: - create_channel_fn = partial(TransportType.create_channel, *args, **kwargs) - return SwappableChannel(create_channel_fn) + + def sync_create_channel_fn(): + return intercept_channel( + TransportType.create_channel(*args, **kwargs), + self._metrics_interceptor, + ) + + create_channel_fn = sync_create_channel_fn + new_channel = SwappableChannelType(create_channel_fn) + return new_channel @property def universe_domain(self) -> str: @@ -326,10 +342,10 @@ def _manage_channel( between `refresh_interval_min` and `refresh_interval_max` grace_period: time to allow previous channel to serve existing requests before closing, in seconds""" - if not isinstance(self.transport.grpc_channel, SwappableChannel): + if not isinstance(self.transport.grpc_channel, SwappableChannelType): warnings.warn("Channel does not support auto-refresh.") return - super_channel: SwappableChannel = self.transport.grpc_channel + super_channel: SwappableChannelType = self.transport.grpc_channel first_refresh = self._channel_init_time + random.uniform( refresh_interval_min, refresh_interval_max ) diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py new file mode 100644 index 000000000000..9e47313b07b9 --- /dev/null +++ b/packages/google-cloud-bigtable/google/cloud/bigtable/data/_sync_autogen/metrics_interceptor.py @@ -0,0 +1,59 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is automatically generated by CrossSync. Do not edit manually. + +from __future__ import annotations +from grpc import UnaryUnaryClientInterceptor +from grpc import UnaryStreamClientInterceptor + + +class BigtableMetricsInterceptor( + UnaryUnaryClientInterceptor, UnaryStreamClientInterceptor +): + """ + An async gRPC interceptor to add client metadata and print server metadata. + """ + + def intercept_unary_unary(self, continuation, client_call_details, request): + """Interceptor for unary rpcs: + - MutateRow + - CheckAndMutateRow + - ReadModifyWriteRow""" + try: + call = continuation(client_call_details, request) + return call + except Exception as rpc_error: + raise rpc_error + + def intercept_unary_stream(self, continuation, client_call_details, request): + """Interceptor for streaming rpcs: + - ReadRows + - MutateRows + - SampleRowKeys""" + try: + return self._streaming_generator_wrapper( + continuation(client_call_details, request) + ) + except Exception as rpc_error: + raise rpc_error + + @staticmethod + def _streaming_generator_wrapper(call): + """Wrapped generator to be returned by intercept_unary_stream.""" + try: + for response in call: + yield response + except Exception as e: + raise e diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py index c96570b76af2..39c454996267 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_async.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_async.py @@ -285,23 +285,28 @@ async def test_channel_refresh(self, table_id, instance_id, temp_rows): async with client.get_table(instance_id, table_id) as table: rows = await table.read_rows({}) channel_wrapper = client.transport.grpc_channel - first_channel = client.transport.grpc_channel._channel + first_channel = channel_wrapper._channel assert len(rows) == 2 await CrossSync.sleep(2) rows_after_refresh = await table.read_rows({}) assert len(rows_after_refresh) == 2 assert client.transport.grpc_channel is channel_wrapper - assert client.transport.grpc_channel._channel is not first_channel - # ensure gapic's logging interceptor is still active + updated_channel = channel_wrapper._channel + assert updated_channel is not first_channel + # ensure interceptors are kept (gapic's logging interceptor, and metric interceptor) if CrossSync.is_async: - interceptors = ( - client.transport.grpc_channel._channel._unary_unary_interceptors - ) - assert GapicInterceptor in [type(i) for i in interceptors] + unary_interceptors = updated_channel._unary_unary_interceptors + assert len(unary_interceptors) == 2 + assert GapicInterceptor in [type(i) for i in unary_interceptors] + assert client._metrics_interceptor in unary_interceptors + stream_interceptors = updated_channel._unary_stream_interceptors + assert len(stream_interceptors) == 1 + assert client._metrics_interceptor in stream_interceptors else: assert isinstance( client.transport._logged_channel._interceptor, GapicInterceptor ) + assert updated_channel._interceptor == client._metrics_interceptor finally: await client.close() diff --git a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py index 44895808a9ab..37c00f2ae803 100644 --- a/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py +++ b/packages/google-cloud-bigtable/tests/system/data/test_system_autogen.py @@ -237,16 +237,18 @@ def test_channel_refresh(self, table_id, instance_id, temp_rows): with client.get_table(instance_id, table_id) as table: rows = table.read_rows({}) channel_wrapper = client.transport.grpc_channel - first_channel = client.transport.grpc_channel._channel + first_channel = channel_wrapper._channel assert len(rows) == 2 CrossSync._Sync_Impl.sleep(2) rows_after_refresh = table.read_rows({}) assert len(rows_after_refresh) == 2 assert client.transport.grpc_channel is channel_wrapper - assert client.transport.grpc_channel._channel is not first_channel + updated_channel = channel_wrapper._channel + assert updated_channel is not first_channel assert isinstance( client.transport._logged_channel._interceptor, GapicInterceptor ) + assert updated_channel._interceptor == client._metrics_interceptor finally: client.close() diff --git a/packages/google-cloud-bigtable/tests/unit/data/_async/test_metrics_interceptor.py b/packages/google-cloud-bigtable/tests/unit/data/_async/test_metrics_interceptor.py new file mode 100644 index 000000000000..6ea958358df3 --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_async/test_metrics_interceptor.py @@ -0,0 +1,168 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from grpc import RpcError + +from google.cloud.bigtable.data._cross_sync import CrossSync + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock +except ImportError: # pragma: NO COVER + import mock # type: ignore + +if CrossSync.is_async: + from google.cloud.bigtable.data._async.metrics_interceptor import ( + AsyncBigtableMetricsInterceptor, + ) +else: + from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( # noqa: F401 + BigtableMetricsInterceptor, + ) + + +__CROSS_SYNC_OUTPUT__ = "tests.unit.data._sync_autogen.test_metrics_interceptor" + + +@CrossSync.convert(replace_symbols={"__aiter__": "__iter__"}) +def _make_mock_stream_call(values, exc=None): + """ + Create a mock call object that can be used for streaming calls + """ + call = CrossSync.Mock() + + async def gen(): + for val in values: + yield val + if exc: + raise exc + + call.__aiter__ = mock.Mock(return_value=gen()) + return call + + +@CrossSync.convert_class(sync_name="TestMetricsInterceptor") +class TestMetricsInterceptorAsync: + @staticmethod + @CrossSync.convert( + replace_symbols={ + "AsyncBigtableMetricsInterceptor": "BigtableMetricsInterceptor" + } + ) + def _get_target_class(): + return AsyncBigtableMetricsInterceptor + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + @CrossSync.pytest + async def test_unary_unary_interceptor_success(self): + """Test that interceptor handles successful unary-unary calls""" + instance = self._make_one() + continuation = CrossSync.Mock() + call = continuation.return_value + details = mock.Mock() + request = mock.Mock() + result = await instance.intercept_unary_unary(continuation, details, request) + assert result == call + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_unary_interceptor_failure(self): + """Test a failed RpcError with metadata""" + + instance = self._make_one() + exc = RpcError("test") + continuation = CrossSync.Mock(side_effect=exc) + details = mock.Mock() + request = mock.Mock() + with pytest.raises(RpcError) as e: + await instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_unary_interceptor_failure_generic(self): + """Test generic exception""" + + instance = self._make_one() + exc = ValueError("test") + continuation = CrossSync.Mock(side_effect=exc) + details = mock.Mock() + request = mock.Mock() + with pytest.raises(ValueError) as e: + await instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_stream_interceptor_success(self): + """Test that interceptor handles successful unary-stream calls""" + + instance = self._make_one() + + continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1, 2])) + details = mock.Mock() + request = mock.Mock() + wrapper = await instance.intercept_unary_stream(continuation, details, request) + results = [val async for val in wrapper] + assert results == [1, 2] + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_stream_interceptor_failure_mid_stream(self): + """Test that interceptor handles failures mid-stream""" + instance = self._make_one() + exc = ValueError("test") + continuation = CrossSync.Mock(return_value=_make_mock_stream_call([1], exc=exc)) + details = mock.Mock() + request = mock.Mock() + wrapper = await instance.intercept_unary_stream(continuation, details, request) + with pytest.raises(ValueError) as e: + [val async for val in wrapper] + assert e.value == exc + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_stream_interceptor_failure_start_stream(self): + """Test that interceptor handles failures at start of stream with RpcError with metadata""" + + instance = self._make_one() + exc = RpcError("test") + + continuation = CrossSync.Mock() + continuation.side_effect = exc + details = mock.Mock() + request = mock.Mock() + with pytest.raises(RpcError) as e: + await instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + + @CrossSync.pytest + async def test_unary_stream_interceptor_failure_start_stream_generic(self): + """Test that interceptor handles failures at start of stream with generic exception""" + + instance = self._make_one() + exc = ValueError("test") + + continuation = CrossSync.Mock() + continuation.side_effect = exc + details = mock.Mock() + request = mock.Mock() + with pytest.raises(ValueError) as e: + await instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) diff --git a/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_metrics_interceptor.py b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_metrics_interceptor.py new file mode 100644 index 000000000000..56a6f365013f --- /dev/null +++ b/packages/google-cloud-bigtable/tests/unit/data/_sync_autogen/test_metrics_interceptor.py @@ -0,0 +1,140 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This file is automatically generated by CrossSync. Do not edit manually. + +import pytest +from grpc import RpcError +from google.cloud.bigtable.data._cross_sync import CrossSync + +try: + from unittest import mock +except ImportError: + import mock +from google.cloud.bigtable.data._sync_autogen.metrics_interceptor import ( + BigtableMetricsInterceptor, +) + + +def _make_mock_stream_call(values, exc=None): + """Create a mock call object that can be used for streaming calls""" + call = CrossSync._Sync_Impl.Mock() + + def gen(): + for val in values: + yield val + if exc: + raise exc + + call.__iter__ = mock.Mock(return_value=gen()) + return call + + +class TestMetricsInterceptor: + @staticmethod + def _get_target_class(): + return BigtableMetricsInterceptor + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + def test_unary_unary_interceptor_success(self): + """Test that interceptor handles successful unary-unary calls""" + instance = self._make_one() + continuation = CrossSync._Sync_Impl.Mock() + call = continuation.return_value + details = mock.Mock() + request = mock.Mock() + result = instance.intercept_unary_unary(continuation, details, request) + assert result == call + continuation.assert_called_once_with(details, request) + + def test_unary_unary_interceptor_failure(self): + """Test a failed RpcError with metadata""" + instance = self._make_one() + exc = RpcError("test") + continuation = CrossSync._Sync_Impl.Mock(side_effect=exc) + details = mock.Mock() + request = mock.Mock() + with pytest.raises(RpcError) as e: + instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + + def test_unary_unary_interceptor_failure_generic(self): + """Test generic exception""" + instance = self._make_one() + exc = ValueError("test") + continuation = CrossSync._Sync_Impl.Mock(side_effect=exc) + details = mock.Mock() + request = mock.Mock() + with pytest.raises(ValueError) as e: + instance.intercept_unary_unary(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + + def test_unary_stream_interceptor_success(self): + """Test that interceptor handles successful unary-stream calls""" + instance = self._make_one() + continuation = CrossSync._Sync_Impl.Mock( + return_value=_make_mock_stream_call([1, 2]) + ) + details = mock.Mock() + request = mock.Mock() + wrapper = instance.intercept_unary_stream(continuation, details, request) + results = [val for val in wrapper] + assert results == [1, 2] + continuation.assert_called_once_with(details, request) + + def test_unary_stream_interceptor_failure_mid_stream(self): + """Test that interceptor handles failures mid-stream""" + instance = self._make_one() + exc = ValueError("test") + continuation = CrossSync._Sync_Impl.Mock( + return_value=_make_mock_stream_call([1], exc=exc) + ) + details = mock.Mock() + request = mock.Mock() + wrapper = instance.intercept_unary_stream(continuation, details, request) + with pytest.raises(ValueError) as e: + [val for val in wrapper] + assert e.value == exc + continuation.assert_called_once_with(details, request) + + def test_unary_stream_interceptor_failure_start_stream(self): + """Test that interceptor handles failures at start of stream with RpcError with metadata""" + instance = self._make_one() + exc = RpcError("test") + continuation = CrossSync._Sync_Impl.Mock() + continuation.side_effect = exc + details = mock.Mock() + request = mock.Mock() + with pytest.raises(RpcError) as e: + instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) + + def test_unary_stream_interceptor_failure_start_stream_generic(self): + """Test that interceptor handles failures at start of stream with generic exception""" + instance = self._make_one() + exc = ValueError("test") + continuation = CrossSync._Sync_Impl.Mock() + continuation.side_effect = exc + details = mock.Mock() + request = mock.Mock() + with pytest.raises(ValueError) as e: + instance.intercept_unary_stream(continuation, details, request) + assert e.value == exc + continuation.assert_called_once_with(details, request) diff --git a/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py b/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py index d4623a6c8c84..e6bce9cf6266 100644 --- a/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py +++ b/packages/google-cloud-bigtable/tests/unit/data/test_sync_up_to_date.py @@ -90,7 +90,7 @@ def test_verify_headers(sync_file): \#\ distributed\ under\ the\ License\ is\ distributed\ on\ an\ \"AS\ IS\"\ BASIS,\n \#\ WITHOUT\ WARRANTIES\ OR\ CONDITIONS\ OF\ ANY\ KIND,\ either\ express\ or\ implied\.\n \#\ See\ the\ License\ for\ the\ specific\ language\ governing\ permissions\ and\n - \#\ limitations\ under\ the\ License\. + \#\ limitations\ under\ the\ License """ pattern = re.compile(license_regex, re.VERBOSE) From 7fac0c36eb5ca46642fd69c8e022c015889a932c Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Thu, 20 Nov 2025 11:24:36 -0500 Subject: [PATCH 889/892] chore(librarian): onboard to librarian (#1232) Towards https://github.com/googleapis/librarian/issues/2455 --- .../.github/.OwlBot.lock.yaml | 17 - .../.github/.OwlBot.yaml | 28 - .../.github/auto-approve.yml | 3 - .../.github/release-please.yml | 12 - .../.github/release-trigger.yml | 2 - .../.github/sync-repo-settings.yaml | 58 -- .../generator-input/.repo-metadata.json | 80 +++ .../generator-input/librarian.py} | 101 +--- .../.librarian/generator-input/noxfile.py | 569 ++++++++++++++++++ .../.librarian/generator-input/setup.py | 104 ++++ .../.librarian/state.yaml | 40 ++ .../.release-please-manifest.json | 3 - .../cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- .../google/cloud/bigtable_v2/gapic_version.py | 2 +- packages/google-cloud-bigtable/noxfile.py | 2 +- .../release-please-config.json | 22 - ...pet_metadata_google.bigtable.admin.v2.json | 2 +- .../unit/gapic/bigtable_v2/test_bigtable.py | 78 --- 19 files changed, 820 insertions(+), 307 deletions(-) delete mode 100644 packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml delete mode 100644 packages/google-cloud-bigtable/.github/.OwlBot.yaml delete mode 100644 packages/google-cloud-bigtable/.github/auto-approve.yml delete mode 100644 packages/google-cloud-bigtable/.github/release-please.yml delete mode 100644 packages/google-cloud-bigtable/.github/release-trigger.yml delete mode 100644 packages/google-cloud-bigtable/.github/sync-repo-settings.yaml create mode 100644 packages/google-cloud-bigtable/.librarian/generator-input/.repo-metadata.json rename packages/google-cloud-bigtable/{owlbot.py => .librarian/generator-input/librarian.py} (73%) create mode 100644 packages/google-cloud-bigtable/.librarian/generator-input/noxfile.py create mode 100644 packages/google-cloud-bigtable/.librarian/generator-input/setup.py create mode 100644 packages/google-cloud-bigtable/.librarian/state.yaml delete mode 100644 packages/google-cloud-bigtable/.release-please-manifest.json delete mode 100644 packages/google-cloud-bigtable/release-please-config.json diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml deleted file mode 100644 index 9a7846675f55..000000000000 --- a/packages/google-cloud-bigtable/.github/.OwlBot.lock.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2025 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -docker: - image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:4a9e5d44b98e8672e2037ee22bc6b4f8e844a2d75fcb78ea8a4b38510112abc6 -# created: 2025-10-07 diff --git a/packages/google-cloud-bigtable/.github/.OwlBot.yaml b/packages/google-cloud-bigtable/.github/.OwlBot.yaml deleted file mode 100644 index fe2f7841a3d4..000000000000 --- a/packages/google-cloud-bigtable/.github/.OwlBot.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -docker: - image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - -deep-remove-regex: - - /owl-bot-staging - -deep-copy-regex: - - source: /google/bigtable/admin/(v.*)/.*-py/(.*) - dest: /owl-bot-staging/bigtable_admin/$1/$2 - - source: /google/bigtable/(v.*)/.*-py/(.*) - dest: /owl-bot-staging/bigtable/$1/$2 - -begin-after-commit-hash: a21f1091413a260393548c1b2ac44b7347923f08 - diff --git a/packages/google-cloud-bigtable/.github/auto-approve.yml b/packages/google-cloud-bigtable/.github/auto-approve.yml deleted file mode 100644 index 311ebbb853a9..000000000000 --- a/packages/google-cloud-bigtable/.github/auto-approve.yml +++ /dev/null @@ -1,3 +0,0 @@ -# https://github.com/googleapis/repo-automation-bots/tree/main/packages/auto-approve -processes: - - "OwlBotTemplateChanges" diff --git a/packages/google-cloud-bigtable/.github/release-please.yml b/packages/google-cloud-bigtable/.github/release-please.yml deleted file mode 100644 index 593e83f9fea2..000000000000 --- a/packages/google-cloud-bigtable/.github/release-please.yml +++ /dev/null @@ -1,12 +0,0 @@ -releaseType: python -handleGHRelease: true -# NOTE: this section is generated by synthtool.languages.python -# See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py -manifest: true -branches: -- branch: v1 - handleGHRelease: true - releaseType: python -- branch: v0 - handleGHRelease: true - releaseType: python diff --git a/packages/google-cloud-bigtable/.github/release-trigger.yml b/packages/google-cloud-bigtable/.github/release-trigger.yml deleted file mode 100644 index 0bbdd8e4cabb..000000000000 --- a/packages/google-cloud-bigtable/.github/release-trigger.yml +++ /dev/null @@ -1,2 +0,0 @@ -enabled: true -multiScmName: python-bigtable diff --git a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml b/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml deleted file mode 100644 index 14e32d6fcb64..000000000000 --- a/packages/google-cloud-bigtable/.github/sync-repo-settings.yaml +++ /dev/null @@ -1,58 +0,0 @@ -# Whether or not rebase-merging is enabled on this repository. -# Defaults to `true` -rebaseMergeAllowed: true - -# Whether or not squash-merging is enabled on this repository. -# Defaults to `true` -squashMergeAllowed: true - -# Whether or not PRs are merged with a merge commit on this repository. -# Defaults to `false` -mergeCommitAllowed: false - -# Rules for main branch protection -branchProtectionRules: -# Identifies the protection rule pattern. Name of the branch to be protected. -# Defaults to `main` -- pattern: main - # Can admins overwrite branch protection. - # Defaults to `true` - isAdminEnforced: true - # Number of approving reviews required to update matching branches. - # Defaults to `1` - requiredApprovingReviewCount: 1 - # Are reviews from code owners required to update matching branches. - # Defaults to `false` - requiresCodeOwnerReviews: true - # Require up to date branches - requiresStrictStatusChecks: false - # List of required status check contexts that must pass for commits to be accepted to matching branches. - requiredStatusCheckContexts: - - 'Kokoro' - - 'Kokoro system' - - 'cla/google' - - 'OwlBot Post Processor' - - 'lint' - - 'mypy' - - 'docs' - - 'docfx' - - 'unit-3.9' - - 'unit-3.10' - - 'unit-3.11' - - 'unit-3.12' - - 'unit-3.13' - - 'unit-3.14' -# List of explicit permissions to add (additive only) -permissionRules: - # Team slug to add to repository permissions - - team: yoshi-admins - # Access level required, one of push|pull|admin|maintain|triage - permission: admin - # Team slug to add to repository permissions - - team: yoshi-python-admins - # Access level required, one of push|pull|admin|maintain|triage - permission: admin - # Team slug to add to repository permissions - - team: yoshi-python - # Access level required, one of push|pull|admin|maintain|triage - permission: push diff --git a/packages/google-cloud-bigtable/.librarian/generator-input/.repo-metadata.json b/packages/google-cloud-bigtable/.librarian/generator-input/.repo-metadata.json new file mode 100644 index 000000000000..9de4b5f92bf5 --- /dev/null +++ b/packages/google-cloud-bigtable/.librarian/generator-input/.repo-metadata.json @@ -0,0 +1,80 @@ +{ + "name": "bigtable", + "name_pretty": "Cloud Bigtable", + "product_documentation": "https://cloud.google.com/bigtable", + "client_documentation": "https://cloud.google.com/python/docs/reference/bigtable/latest", + "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", + "release_level": "stable", + "language": "python", + "library_type": "GAPIC_COMBO", + "repo": "googleapis/python-bigtable", + "distribution_name": "google-cloud-bigtable", + "api_id": "bigtable.googleapis.com", + "requires_billing": true, + "samples": [ + { + "name": "Hello World in Cloud Bigtable", + "description": "Demonstrates how to connect to Cloud Bigtable and run some basic operations. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello" + }, + { + "name": "Hello World using HappyBase", + "description": "This sample demonstrates using the Google Cloud Client Library HappyBase package, an implementation of the HappyBase API to connect to and interact with Cloud Bigtable. More information available at: https://cloud.google.com/bigtable/docs/samples-python-hello-happybase", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "hello_happybase" + }, + { + "name": "cbt Command Demonstration", + "description": "This page explains how to use the cbt command to connect to a Cloud Bigtable instance, perform basic administrative tasks, and read and write data in a table. More information about this quickstart is available at https://cloud.google.com/bigtable/docs/quickstart-cbt", + "file": "instanceadmin.py", + "runnable": true, + "custom_content": "
usage: instanceadmin.py [-h] [run] [dev-instance] [del-instance] [add-cluster] [del-cluster] project_id instance_id cluster_id
Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "instanceadmin" + }, + { + "name": "Metric Scaler", + "description": "This sample demonstrates how to use Stackdriver Monitoring to scale Cloud Bigtable based on CPU usage.", + "file": "metricscaler.py", + "runnable": true, + "custom_content": "
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD] [--low_cpu_threshold LOW_CPU_THRESHOLD] [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP] bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
                       [--low_cpu_threshold LOW_CPU_THRESHOLD]
                       [--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
                       bigtable_instance bigtable_cluster


Scales Cloud Bigtable clusters based on CPU usage.


positional arguments:
  bigtable_instance     ID of the Cloud Bigtable instance to connect to.
  bigtable_cluster      ID of the Cloud Bigtable cluster to connect to.


optional arguments:
  -h, --help            show this help message and exit
  --high_cpu_threshold HIGH_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is above this threshold,
                        scale up
  --low_cpu_threshold LOW_CPU_THRESHOLD
                        If Cloud Bigtable CPU usage is below this threshold,
                        scale down
  --short_sleep SHORT_SLEEP
                        How long to sleep in seconds between checking metrics
                        after no scale operation
  --long_sleep LONG_SLEEP
                        How long to sleep in seconds between checking metrics
                        after a scaling operation
", + "override_path": "metricscaler" + }, + { + "name": "Quickstart", + "description": "Demonstrates of Cloud Bigtable. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id 


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)
", + "override_path": "quickstart" + }, + { + "name": "Quickstart using HappyBase", + "description": "Demonstrates of Cloud Bigtable using HappyBase. This sample creates a Bigtable client, connects to an instance and then to a table, then closes the connection.", + "file": "main.py", + "runnable": true, + "custom_content": "
usage: main.py [-h] [--table TABLE] project_id instance_id
usage: main.py [-h] [--table TABLE] project_id instance_id


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Existing table used in the quickstart. (default: my-table)usage: tableadmin.py [-h] [run] [delete] [--table TABLE] project_id instance_id


Demonstrates how to connect to Cloud Bigtable and run some basic operations.
Prerequisites: - Create a Cloud Bigtable cluster.
https://cloud.google.com/bigtable/docs/creating-cluster - Set your Google
Application Default Credentials.
https://developers.google.com/identity/protocols/application-default-
credentials


positional arguments:
  project_id     Your Cloud Platform project ID.
  instance_id    ID of the Cloud Bigtable instance to connect to.


optional arguments:
  -h, --help     show this help message and exit
  --table TABLE  Table to create and destroy. (default: Hello-Bigtable)
", + "override_path": "tableadmin" + } + ], + "default_version": "v2", + "codeowner_team": "@googleapis/api-bigtable @googleapis/api-bigtable-partners", + "api_shortname": "bigtable" +} diff --git a/packages/google-cloud-bigtable/owlbot.py b/packages/google-cloud-bigtable/.librarian/generator-input/librarian.py similarity index 73% rename from packages/google-cloud-bigtable/owlbot.py rename to packages/google-cloud-bigtable/.librarian/generator-input/librarian.py index 6b2e1ea4fc33..5b943d24bd96 100644 --- a/packages/google-cloud-bigtable/owlbot.py +++ b/packages/google-cloud-bigtable/.librarian/generator-input/librarian.py @@ -26,51 +26,6 @@ common = gcp.CommonTemplates() -# This is a customized version of the s.get_staging_dirs() function from synthtool to -# cater for copying 2 different folders from googleapis-gen -# which are bigtable and bigtable/admin. -# Source https://github.com/googleapis/synthtool/blob/master/synthtool/transforms.py#L280 -def get_staging_dirs( - default_version: Optional[str] = None, sub_directory: Optional[str] = None -) -> List[Path]: - """Returns the list of directories, one per version, copied from - https://github.com/googleapis/googleapis-gen. Will return in lexical sorting - order with the exception of the default_version which will be last (if specified). - - Args: - default_version (str): the default version of the API. The directory for this version - will be the last item in the returned list if specified. - sub_directory (str): if a `sub_directory` is provided, only the directories within the - specified `sub_directory` will be returned. - - Returns: the empty list if no file were copied. - """ - - staging = Path("owl-bot-staging") - - if sub_directory: - staging /= sub_directory - - if staging.is_dir(): - # Collect the subdirectories of the staging directory. - versions = [v.name for v in staging.iterdir() if v.is_dir()] - # Reorder the versions so the default version always comes last. - versions = [v for v in versions if v != default_version] - versions.sort() - if default_version is not None: - versions += [default_version] - dirs = [staging / v for v in versions] - for dir in dirs: - s._tracked_paths.add(dir) - return dirs - else: - return [] - -# This library ships clients for two different APIs, -# BigTable and BigTable Admin -bigtable_default_version = "v2" -bigtable_admin_default_version = "v2" - # These flags are needed because certain post-processing operations # append things after a certain line of text, and can infinitely loop # in a Github PR. We use these flags to only do those operations @@ -80,16 +35,12 @@ def get_staging_dirs( is_fresh_admin_v2_copy = False is_fresh_admin_docs_copy = False -for library in get_staging_dirs(bigtable_default_version, "bigtable"): - s.move(library / "google/cloud/bigtable_v2", excludes=["**/gapic_version.py"]) - s.move(library / "tests") - s.move(library / "scripts") - -for library in get_staging_dirs(bigtable_admin_default_version, "bigtable_admin"): +for library in s.get_staging_dirs("v2"): + s.move(library / "google/cloud/bigtable_v2") is_fresh_admin_copy = \ - s.move(library / "google/cloud/bigtable_admin", excludes=["**/gapic_version.py"]) + s.move(library / "google/cloud/bigtable_admin") is_fresh_admin_v2_copy = \ - s.move(library / "google/cloud/bigtable_admin_v2", excludes=["**/gapic_version.py"]) + s.move(library / "google/cloud/bigtable_admin_v2") s.move(library / "tests") s.move(library / "samples") s.move(library / "scripts") @@ -114,8 +65,10 @@ def get_staging_dirs( default_python_version="3.13", ) -s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/release-please.yml", "noxfile.py", "renovate.json"]) +s.move(templated_files, excludes=[".coveragerc", "README.rst", ".github/**", ".kokoro/**", "noxfile.py", "renovate.json"]) + +s.shell.run(["nox", "-s", "blacken"], hide_output=False) # ---------------------------------------------------------------------------- # Always supply app_profile_id in routing headers: https://github.com/googleapis/python-bigtable/pull/1109 @@ -131,16 +84,19 @@ def get_staging_dirs( s.replace( "tests/unit/gapic/bigtable_v2/test_bigtable.py", 'assert \(\n\s*gapic_v1\.routing_header\.to_grpc_metadata\(expected_headers\) in kw\["metadata"\]\n.*', - """ - # assert the expected headers are present, in any order - routing_string = next(iter([m[1] for m in kw["metadata"] if m[0] == 'x-goog-request-params'])) - assert all([f"{k}={v}" in routing_string for k,v in expected_headers.items()]) - """ + """# assert the expected headers are present, in any order + routing_string = next( + iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) + ) + assert all([f"{k}={v}" in routing_string for k, v in expected_headers.items()])""" ) s.replace( "tests/unit/gapic/bigtable_v2/test_bigtable.py", 'expected_headers = {"name": "projects/sample1/instances/sample2"}', - 'expected_headers = {"name": "projects/sample1/instances/sample2", "app_profile_id": ""}' + """expected_headers = { + "name": "projects/sample1/instances/sample2", + "app_profile_id": "", + }""" ) s.replace( "tests/unit/gapic/bigtable_v2/test_bigtable.py", @@ -148,13 +104,13 @@ def get_staging_dirs( expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3" } - """, +""", """ expected_headers = { "table_name": "projects/sample1/instances/sample2/tables/sample3", - "app_profile_id": "" + "app_profile_id": "", } - """ +""" ) # ---------------------------------------------------------------------------- @@ -163,15 +119,6 @@ def get_staging_dirs( python.py_samples(skip_readmes=True) -s.replace( - "samples/beam/noxfile.py", - """INSTALL_LIBRARY_FROM_SOURCE \= os.environ.get\("INSTALL_LIBRARY_FROM_SOURCE", False\) in \( - "True", - "true", -\)""", - """# todo(kolea2): temporary workaround to install pinned dep version -INSTALL_LIBRARY_FROM_SOURCE = False""") - # -------------------------------------------------------------------------- # Admin Overlay work # -------------------------------------------------------------------------- @@ -189,9 +136,8 @@ def add_overlay_to_init_py(init_py_location, import_statements, should_add): add_overlay_to_init_py( "google/cloud/bigtable_admin_v2/__init__.py", - """from .overlay import * # noqa: F403 -__all__ += overlay.__all__ # noqa: F405 -""", + """from .overlay import * # noqa: F403\n +__all__ += overlay.__all__ # noqa: F405""", is_fresh_admin_v2_copy, ) @@ -200,8 +146,7 @@ def add_overlay_to_init_py(init_py_location, import_statements, should_add): """import google.cloud.bigtable_admin_v2.overlay # noqa: F401 from google.cloud.bigtable_admin_v2.overlay import * # noqa: F401, F403 -__all__ += google.cloud.bigtable_admin_v2.overlay.__all__ -""", +__all__ += google.cloud.bigtable_admin_v2.overlay.__all__""", is_fresh_admin_copy, ) @@ -319,5 +264,3 @@ def add_overlay_to_init_py(init_py_location, import_statements, should_add): r"class GcRule\(proto\.Message\)\:", "class GcRule(oneof_message.OneofMessage):", ) - -s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/packages/google-cloud-bigtable/.librarian/generator-input/noxfile.py b/packages/google-cloud-bigtable/.librarian/generator-input/noxfile.py new file mode 100644 index 000000000000..16c8a6327788 --- /dev/null +++ b/packages/google-cloud-bigtable/.librarian/generator-input/noxfile.py @@ -0,0 +1,569 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! + +from __future__ import absolute_import + +import os +import pathlib +import re +import shutil +from typing import Dict, List +import warnings + +import nox + +FLAKE8_VERSION = "flake8==6.1.0" +BLACK_VERSION = "black[jupyter]==23.3.0" +ISORT_VERSION = "isort==5.11.0" +LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"] + +DEFAULT_PYTHON_VERSION = "3.13" + +UNIT_TEST_PYTHON_VERSIONS: List[str] = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", + "3.12", + "3.13", + "3.14", +] +UNIT_TEST_STANDARD_DEPENDENCIES = [ + "mock", + "asyncmock", + "pytest", + "pytest-cov", + "pytest-asyncio", + BLACK_VERSION, + "autoflake", +] +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.9", "3.14"] +SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ + "mock", + "pytest", + "google-cloud-testutils", +] +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [ + "pytest-asyncio==0.21.2", + BLACK_VERSION, + "pyyaml==6.0.2", +] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [] +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit-3.9", + "unit-3.10", + "unit-3.11", + "unit-3.12", + "unit-3.13", + "unit-3.14", + "system_emulated", + "system", + "mypy", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", + "format", +] + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install(FLAKE8_VERSION, BLACK_VERSION) + session.run( + "black", + "--check", + *LINT_PATHS, + ) + session.run("flake8", "google", "tests") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *LINT_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def format(session): + """ + Run isort to sort imports. Then run black + to format code to uniform standard. + """ + session.install(BLACK_VERSION, ISORT_VERSION) + # Use the --fss option to sort imports using strict alphabetical order. + # See https://pycqa.github.io/isort/docs/configuration/options.html#force-sort-within-sections + session.run( + "isort", + "--fss", + *LINT_PATHS, + ) + session.run( + "black", + *LINT_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def mypy(session): + """Verify type hints are mypy compatible.""" + session.install("-e", ".") + session.install( + "mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests" + ) + session.install("google-cloud-testutils") + session.run("mypy", "-p", "google.cloud.bigtable.data") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("setuptools", "docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") + + +def install_unittest_dependencies(session, *constraints): + standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES + session.install(*standard_deps, *constraints) + + if UNIT_TEST_EXTERNAL_DEPENDENCIES: + warnings.warn( + "'unit_test_external_dependencies' is deprecated. Instead, please " + "use 'unit_test_dependencies' or 'unit_test_local_dependencies'.", + DeprecationWarning, + ) + session.install(*UNIT_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_LOCAL_DEPENDENCIES: + session.install(*UNIT_TEST_LOCAL_DEPENDENCIES, *constraints) + + if UNIT_TEST_EXTRAS_BY_PYTHON: + extras = UNIT_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif UNIT_TEST_EXTRAS: + extras = UNIT_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def unit(session, protobuf_implementation): + # Install all test dependencies, then install this package in-place. + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): + session.skip("cpp implementation is not supported in python 3.11+") + + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + + # TODO(https://github.com/googleapis/synthtool/issues/1976): + # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped. + # The 'cpp' implementation requires Protobuf<4. + if protobuf_implementation == "cpp": + session.install("protobuf<4") + + # Run py.test against the unit tests. + session.run( + "py.test", + "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", + "--cov=google", + "--cov=tests/unit", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + os.path.join("tests", "unit"), + *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + + +def install_systemtest_dependencies(session, *constraints): + # Use pre-release gRPC for system tests. + # Exclude version 1.52.0rc1 which has a known issue. + # See https://github.com/grpc/grpc/issues/32163 + session.install("--pre", "grpcio!=1.52.0rc1") + + session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTERNAL_DEPENDENCIES: + session.install(*SYSTEM_TEST_EXTERNAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_LOCAL_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_LOCAL_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_DEPENDENCIES: + session.install("-e", *SYSTEM_TEST_DEPENDENCIES, *constraints) + + if SYSTEM_TEST_EXTRAS_BY_PYTHON: + extras = SYSTEM_TEST_EXTRAS_BY_PYTHON.get(session.python, []) + elif SYSTEM_TEST_EXTRAS: + extras = SYSTEM_TEST_EXTRAS + else: + extras = [] + + if extras: + session.install("-e", f".[{','.join(extras)}]", *constraints) + else: + session.install("-e", ".", *constraints) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def system_emulated(session): + import subprocess + import signal + + try: + subprocess.call(["gcloud", "--version"]) + except OSError: + session.skip("gcloud not found but required for emulator support") + + # Currently, CI/CD doesn't have beta component of gcloud. + subprocess.call(["gcloud", "components", "install", "beta", "bigtable"]) + + hostport = "localhost:8789" + session.env["BIGTABLE_EMULATOR_HOST"] = hostport + + p = subprocess.Popen( + ["gcloud", "beta", "emulators", "bigtable", "start", "--host-port", hostport] + ) + + try: + system(session) + finally: + # Stop Emulator + os.killpg(os.getpgid(p.pid), signal.SIGKILL) + + +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +@nox.parametrize("client_type", ["async", "sync", "legacy"]) +def conformance(session, client_type): + # install dependencies + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + with session.chdir("test_proxy"): + # download the conformance test suite + session.run( + "bash", + "-e", + "run_tests.sh", + external=True, + env={"CLIENT_TYPE": client_type}, + ) + + +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def system(session): + """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + system_test_path = os.path.join("tests", "system.py") + system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") + # Install pyopenssl for mTLS testing. + if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": + session.install("pyopenssl") + + system_test_exists = os.path.exists(system_test_path) + system_test_folder_exists = os.path.exists(system_test_folder_path) + # Sanity check: only run tests if found. + if not system_test_exists and not system_test_folder_exists: + session.skip("System tests were not found") + + install_systemtest_dependencies(session, "-c", constraints_path) + + # Run py.test against the system tests. + if system_test_exists: + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + ) + if system_test_folder_exists: + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=99") + + session.run("coverage", "erase") + + +@nox.session(python="3.10") +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install( + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", + "sphinx==4.5.0", + "alabaster", + "recommonmark", + ) + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python="3.10") +def docfx(session): + """Build the docfx yaml files for this library.""" + + session.install("-e", ".") + session.install( + # We need to pin to specific versions of the `sphinxcontrib-*` packages + # which still support sphinx 4.x. + # See https://github.com/googleapis/sphinx-docfx-yaml/issues/344 + # and https://github.com/googleapis/sphinx-docfx-yaml/issues/345. + "sphinxcontrib-applehelp==1.0.4", + "sphinxcontrib-devhelp==1.0.2", + "sphinxcontrib-htmlhelp==2.0.1", + "sphinxcontrib-qthelp==1.0.3", + "sphinxcontrib-serializinghtml==1.1.5", + "gcp-sphinx-docfx-yaml", + "alabaster", + "recommonmark", + ) + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-T", # show full traceback on exception + "-N", # no colors + "-D", + ( + "extensions=sphinx.ext.autodoc," + "sphinx.ext.autosummary," + "docfx_yaml.extension," + "sphinx.ext.intersphinx," + "sphinx.ext.coverage," + "sphinx.ext.napoleon," + "sphinx.ext.todo," + "sphinx.ext.viewcode," + "recommonmark" + ), + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + # Customization: Add extra sections to the table of contents for the Classic vs Async clients + session.install("pyyaml") + session.run("python", "docs/scripts/patch_devsite_toc.py") + + +@nox.session(python="3.14") +@nox.parametrize( + "protobuf_implementation", + ["python", "upb", "cpp"], +) +def prerelease_deps(session, protobuf_implementation): + """Run all tests with prerelease versions of dependencies installed.""" + + py_version = tuple([int(v) for v in session.python.split(".")]) + if protobuf_implementation == "cpp" and py_version >= (3, 11): + session.skip("cpp implementation is not supported in python 3.11+") + + # Install all dependencies + session.install("-e", ".[all, tests, tracing]") + unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES + session.install(*unit_deps_all) + system_deps_all = ( + SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES + ) + session.install(*system_deps_all) + + # Because we test minimum dependency versions on the minimum Python + # version, the first version we test with in the unit tests sessions has a + # constraints file containing all dependencies and extras. + with open( + CURRENT_DIRECTORY + / "testing" + / f"constraints-{UNIT_TEST_PYTHON_VERSIONS[0]}.txt", + encoding="utf-8", + ) as constraints_file: + constraints_text = constraints_file.read() + + # Ignore leading whitespace and comment lines. + constraints_deps = [ + match.group(1) + for match in re.finditer( + r"^\s*(\S+)(?===\S+)", constraints_text, flags=re.MULTILINE + ) + ] + + session.install(*constraints_deps) + + prerel_deps = [ + "protobuf", + # dependency of grpc + "six", + "grpc-google-iam-v1", + "googleapis-common-protos", + "grpcio", + "grpcio-status", + "google-api-core", + "google-auth", + "proto-plus", + "google-cloud-testutils", + # dependencies of google-cloud-testutils" + "click", + ] + + for dep in prerel_deps: + session.install("--pre", "--no-deps", "--upgrade", dep) + + # Remaining dependencies + other_deps = [ + "requests", + ] + session.install(*other_deps) + + # Print out prerelease package versions + session.run( + "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" + ) + session.run("python", "-c", "import grpc; print(grpc.__version__)") + session.run("python", "-c", "import google.auth; print(google.auth.__version__)") + + session.run( + "py.test", + "tests/unit", + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + + system_test_path = os.path.join("tests", "system.py") + system_test_folder_path = os.path.join("tests", "system") + + # Only run system tests if found. + if os.path.exists(system_test_path): + session.run( + "py.test", + "--verbose", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + if os.path.exists(system_test_folder_path): + session.run( + "py.test", + "--verbose", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, + }, + ) + + +@nox.session(python="3.10") +def generate_sync(session): + """ + Re-generate sync files for the library from CrossSync-annotated async source + """ + session.install(BLACK_VERSION) + session.install("autoflake") + session.run("python", ".cross_sync/generate.py", ".") diff --git a/packages/google-cloud-bigtable/.librarian/generator-input/setup.py b/packages/google-cloud-bigtable/.librarian/generator-input/setup.py new file mode 100644 index 000000000000..cac533db6427 --- /dev/null +++ b/packages/google-cloud-bigtable/.librarian/generator-input/setup.py @@ -0,0 +1,104 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import os + +import setuptools + + +package_root = os.path.abspath(os.path.dirname(__file__)) + +# Package metadata. + +name = "google-cloud-bigtable" +description = "Google Cloud Bigtable API client library" + +version = {} +with open(os.path.join(package_root, "google/cloud/bigtable/gapic_version.py")) as fp: + exec(fp.read(), version) +version = version["__version__"] + + +# Should be one of: +# 'Development Status :: 3 - Alpha' +# 'Development Status :: 4 - Beta' +# 'Development Status :: 5 - Production/Stable' +release_status = "Development Status :: 5 - Production/Stable" +dependencies = [ + "google-api-core[grpc] >= 2.17.0, <3.0.0", + "google-cloud-core >= 1.4.4, <3.0.0", + "google-auth >= 2.23.0, <3.0.0,!=2.24.0,!=2.25.0", + "grpc-google-iam-v1 >= 0.12.4, <1.0.0", + "proto-plus >= 1.22.3, <2.0.0", + "proto-plus >= 1.25.0, <2.0.0; python_version>='3.13'", + "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "google-crc32c>=1.5.0, <2.0.0dev", +] +extras = {"libcst": "libcst >= 0.2.5"} + + +# Setup boilerplate below this line. + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +# Only include packages under the 'google' namespace. Do not include tests, +# benchmarks, etc. +packages = [ + package + for package in setuptools.find_namespace_packages() + if package.startswith("google") +] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url="https://github.com/googleapis/python-bigtable", + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + install_requires=dependencies, + extras_require=extras, + scripts=[ + "scripts/fixup_bigtable_v2_keywords.py", + "scripts/fixup_admin_v2_keywords.py", + ], + python_requires=">=3.7", + include_package_data=True, + zip_safe=False, +) diff --git a/packages/google-cloud-bigtable/.librarian/state.yaml b/packages/google-cloud-bigtable/.librarian/state.yaml new file mode 100644 index 000000000000..049e7b1cf7b6 --- /dev/null +++ b/packages/google-cloud-bigtable/.librarian/state.yaml @@ -0,0 +1,40 @@ +image: us-central1-docker.pkg.dev/cloud-sdk-librarian-prod/images-prod/python-librarian-generator@sha256:8e2c32496077054105bd06c54a59d6a6694287bc053588e24debe6da6920ad91 +libraries: + - id: google-cloud-bigtable + version: 2.34.0 + last_generated_commit: a17b84add8318f780fcc8a027815d5fee644b9f7 + apis: + - path: google/bigtable/v2 + service_config: bigtable_v2.yaml + - path: google/bigtable/admin/v2 + service_config: bigtableadmin_v2.yaml + source_roots: + - . + preserve_regex: [] + remove_regex: + - ^.pre-commit-config.yaml + - ^.repo-metadata.json + - ^.trampolinerc + - ^docs/admin_client/bigtable + - ^docs/admin_client/services_.rst + - ^docs/admin_client/types_.rst + - ^docs/summary_overview.md + - ^google/cloud/bigtable_v2 + - ^google/cloud/bigtable_admin/ + - ^google/cloud/bigtable_admin_v2/services + - ^google/cloud/bigtable_admin_v2/types + - ^google/cloud/bigtable_admin_v2/__init__.py + - ^google/cloud/bigtable_admin_v2/gapic + - ^google/cloud/bigtable_admin_v2/py.typed + - ^samples/AUTHORING_GUIDE.md + - ^samples/CONTRIBUTING.md + - ^samples/generated_samples + - ^tests/unit/gapic + - ^noxfile.py + - ^scripts/fixup_bigtable + - ^setup.py + - ^SECURITY.md + - ^tests/__init__.py + - ^tests/unit/__init__.py + - ^tests/unit/gapic + tag_format: v{version} diff --git a/packages/google-cloud-bigtable/.release-please-manifest.json b/packages/google-cloud-bigtable/.release-please-manifest.json deleted file mode 100644 index 7887ba9321f0..000000000000 --- a/packages/google-cloud-bigtable/.release-please-manifest.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - ".": "2.34.0" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py index 4800b05591a5..b31b170e1e8f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin/gapic_version.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py index 4800b05591a5..b31b170e1e8f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_admin_v2/gapic_version.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py index 4800b05591a5..b31b170e1e8f 100644 --- a/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py +++ b/packages/google-cloud-bigtable/google/cloud/bigtable_v2/gapic_version.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/packages/google-cloud-bigtable/noxfile.py b/packages/google-cloud-bigtable/noxfile.py index a182bafba993..16c8a6327788 100644 --- a/packages/google-cloud-bigtable/noxfile.py +++ b/packages/google-cloud-bigtable/noxfile.py @@ -30,7 +30,7 @@ FLAKE8_VERSION = "flake8==6.1.0" BLACK_VERSION = "black[jupyter]==23.3.0" ISORT_VERSION = "isort==5.11.0" -LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] +LINT_PATHS = ["google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.13" diff --git a/packages/google-cloud-bigtable/release-please-config.json b/packages/google-cloud-bigtable/release-please-config.json deleted file mode 100644 index 33d5a7e21784..000000000000 --- a/packages/google-cloud-bigtable/release-please-config.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "$schema": -"https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", - "packages": { - ".": { - "release-type": "python", - "extra-files": [ - "google/cloud/bigtable/gapic_version.py", - "google/cloud/bigtable_admin/gapic_version.py", - "google/cloud/bigtable_v2/gapic_version.py", - "google/cloud/bigtable_admin_v2/gapic_version.py" - ] - } - }, - "release-type": "python", - "plugins": [ - { - "type": "sentence-case" - } - ], - "initial-version": "2.13.2" -} diff --git a/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json b/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json index 66b5c8f679e9..8b4f59fbd875 100644 --- a/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json +++ b/packages/google-cloud-bigtable/samples/generated_samples/snippet_metadata_google.bigtable.admin.v2.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-bigtable-admin", - "version": "0.0.0" + "version": "2.34.0" }, "snippets": [ { diff --git a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py index 24db8e2695a7..9922999d6012 100644 --- a/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/packages/google-cloud-bigtable/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -6854,7 +6854,6 @@ def test_read_rows_routing_parameters_request_1_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -6881,7 +6880,6 @@ def test_read_rows_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -6919,7 +6917,6 @@ def test_read_rows_routing_parameters_request_3_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -6955,7 +6952,6 @@ def test_read_rows_routing_parameters_request_4_grpc(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -6989,7 +6985,6 @@ def test_sample_row_keys_routing_parameters_request_1_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7016,7 +7011,6 @@ def test_sample_row_keys_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7054,7 +7048,6 @@ def test_sample_row_keys_routing_parameters_request_3_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7090,7 +7083,6 @@ def test_sample_row_keys_routing_parameters_request_4_grpc(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7124,7 +7116,6 @@ def test_mutate_row_routing_parameters_request_1_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7151,7 +7142,6 @@ def test_mutate_row_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7189,7 +7179,6 @@ def test_mutate_row_routing_parameters_request_3_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7223,7 +7212,6 @@ def test_mutate_rows_routing_parameters_request_1_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7250,7 +7238,6 @@ def test_mutate_rows_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7288,7 +7275,6 @@ def test_mutate_rows_routing_parameters_request_3_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7324,7 +7310,6 @@ def test_check_and_mutate_row_routing_parameters_request_1_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7353,7 +7338,6 @@ def test_check_and_mutate_row_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7393,7 +7377,6 @@ def test_check_and_mutate_row_routing_parameters_request_3_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7425,7 +7408,6 @@ def test_ping_and_warm_routing_parameters_request_1_grpc(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7452,7 +7434,6 @@ def test_ping_and_warm_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7488,7 +7469,6 @@ def test_read_modify_write_row_routing_parameters_request_1_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7519,7 +7499,6 @@ def test_read_modify_write_row_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7559,7 +7538,6 @@ def test_read_modify_write_row_routing_parameters_request_3_grpc(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7593,7 +7571,6 @@ def test_prepare_query_routing_parameters_request_1_grpc(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7620,7 +7597,6 @@ def test_prepare_query_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7654,7 +7630,6 @@ def test_execute_query_routing_parameters_request_1_grpc(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -7681,7 +7656,6 @@ def test_execute_query_routing_parameters_request_2_grpc(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8027,7 +8001,6 @@ async def test_read_rows_routing_parameters_request_1_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8059,7 +8032,6 @@ async def test_read_rows_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8102,7 +8074,6 @@ async def test_read_rows_routing_parameters_request_3_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8143,7 +8114,6 @@ async def test_read_rows_routing_parameters_request_4_grpc_asyncio(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8182,7 +8152,6 @@ async def test_sample_row_keys_routing_parameters_request_1_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8214,7 +8183,6 @@ async def test_sample_row_keys_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8257,7 +8225,6 @@ async def test_sample_row_keys_routing_parameters_request_3_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8298,7 +8265,6 @@ async def test_sample_row_keys_routing_parameters_request_4_grpc_asyncio(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8336,7 +8302,6 @@ async def test_mutate_row_routing_parameters_request_1_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8367,7 +8332,6 @@ async def test_mutate_row_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8409,7 +8373,6 @@ async def test_mutate_row_routing_parameters_request_3_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8448,7 +8411,6 @@ async def test_mutate_rows_routing_parameters_request_1_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8480,7 +8442,6 @@ async def test_mutate_rows_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8523,7 +8484,6 @@ async def test_mutate_rows_routing_parameters_request_3_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8565,7 +8525,6 @@ async def test_check_and_mutate_row_routing_parameters_request_1_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8600,7 +8559,6 @@ async def test_check_and_mutate_row_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8646,7 +8604,6 @@ async def test_check_and_mutate_row_routing_parameters_request_3_grpc_asyncio(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8684,7 +8641,6 @@ async def test_ping_and_warm_routing_parameters_request_1_grpc_asyncio(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8715,7 +8671,6 @@ async def test_ping_and_warm_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8755,7 +8710,6 @@ async def test_read_modify_write_row_routing_parameters_request_1_grpc_asyncio() "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8790,7 +8744,6 @@ async def test_read_modify_write_row_routing_parameters_request_2_grpc_asyncio() assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8834,7 +8787,6 @@ async def test_read_modify_write_row_routing_parameters_request_3_grpc_asyncio() "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8874,7 +8826,6 @@ async def test_prepare_query_routing_parameters_request_1_grpc_asyncio(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8907,7 +8858,6 @@ async def test_prepare_query_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8946,7 +8896,6 @@ async def test_execute_query_routing_parameters_request_1_grpc_asyncio(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -8978,7 +8927,6 @@ async def test_execute_query_routing_parameters_request_2_grpc_asyncio(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10634,7 +10582,6 @@ def test_read_rows_routing_parameters_request_1_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10660,7 +10607,6 @@ def test_read_rows_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10697,7 +10643,6 @@ def test_read_rows_routing_parameters_request_3_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10732,7 +10677,6 @@ def test_read_rows_routing_parameters_request_4_rest(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10765,7 +10709,6 @@ def test_sample_row_keys_routing_parameters_request_1_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10791,7 +10734,6 @@ def test_sample_row_keys_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10828,7 +10770,6 @@ def test_sample_row_keys_routing_parameters_request_3_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10863,7 +10804,6 @@ def test_sample_row_keys_routing_parameters_request_4_rest(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10896,7 +10836,6 @@ def test_mutate_row_routing_parameters_request_1_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10922,7 +10861,6 @@ def test_mutate_row_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10959,7 +10897,6 @@ def test_mutate_row_routing_parameters_request_3_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -10992,7 +10929,6 @@ def test_mutate_rows_routing_parameters_request_1_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11018,7 +10954,6 @@ def test_mutate_rows_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11055,7 +10990,6 @@ def test_mutate_rows_routing_parameters_request_3_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11090,7 +11024,6 @@ def test_check_and_mutate_row_routing_parameters_request_1_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11118,7 +11051,6 @@ def test_check_and_mutate_row_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11157,7 +11089,6 @@ def test_check_and_mutate_row_routing_parameters_request_3_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11188,7 +11119,6 @@ def test_ping_and_warm_routing_parameters_request_1_rest(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11214,7 +11144,6 @@ def test_ping_and_warm_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11249,7 +11178,6 @@ def test_read_modify_write_row_routing_parameters_request_1_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11279,7 +11207,6 @@ def test_read_modify_write_row_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11318,7 +11245,6 @@ def test_read_modify_write_row_routing_parameters_request_3_rest(): "table_name": "projects/sample1/instances/sample2/tables/sample3", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11351,7 +11277,6 @@ def test_prepare_query_routing_parameters_request_1_rest(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11377,7 +11302,6 @@ def test_prepare_query_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11410,7 +11334,6 @@ def test_execute_query_routing_parameters_request_1_rest(): "name": "projects/sample1/instances/sample2", "app_profile_id": "", } - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) @@ -11436,7 +11359,6 @@ def test_execute_query_routing_parameters_request_2_rest(): assert args[0] == request_msg expected_headers = {"app_profile_id": "sample1"} - # assert the expected headers are present, in any order routing_string = next( iter([m[1] for m in kw["metadata"] if m[0] == "x-goog-request-params"]) From 391f563607f738e09c0ab9832dee5f1b59dbd4d9 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Mon, 24 Nov 2025 18:02:07 -0500 Subject: [PATCH 890/892] chore: remove releases.md (#1237) Remove [releases.md](https://github.com/googleapis/python-bigtable/blob/main/releases.md) which is unused. This points to CHANGELOG.md at `../../bigtable/CHANGELOG.md` which doesn't exist --- packages/google-cloud-bigtable/releases.md | 1 - 1 file changed, 1 deletion(-) delete mode 120000 packages/google-cloud-bigtable/releases.md diff --git a/packages/google-cloud-bigtable/releases.md b/packages/google-cloud-bigtable/releases.md deleted file mode 120000 index 4c43d49320dc..000000000000 --- a/packages/google-cloud-bigtable/releases.md +++ /dev/null @@ -1 +0,0 @@ -../../bigtable/CHANGELOG.md \ No newline at end of file From d6cd120ad145dba775b3c5dc7df17fccffe2e869 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 25 Nov 2025 16:18:41 +0000 Subject: [PATCH 891/892] Trigger owlbot post-processor --- .../google-cloud-bigtable/google-cloud-bigtable.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 owl-bot-staging/google-cloud-bigtable/google-cloud-bigtable/google-cloud-bigtable.txt diff --git a/owl-bot-staging/google-cloud-bigtable/google-cloud-bigtable/google-cloud-bigtable.txt b/owl-bot-staging/google-cloud-bigtable/google-cloud-bigtable/google-cloud-bigtable.txt new file mode 100644 index 000000000000..e69de29bb2d1 From 34d45ee579d9a4006348fa0e27a226b460e9f261 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 25 Nov 2025 16:18:49 +0000 Subject: [PATCH 892/892] build: google-cloud-bigtable migration: adjust owlbot-related files --- .../google-cloud-bigtable/.github/CODEOWNERS | 12 - .../.github/CONTRIBUTING.md | 28 - .../.github/ISSUE_TEMPLATE/bug_report.md | 43 -- .../.github/ISSUE_TEMPLATE/feature_request.md | 18 - .../.github/ISSUE_TEMPLATE/support_request.md | 7 - .../.github/PULL_REQUEST_TEMPLATE.md | 7 - .../.github/auto-label.yaml | 20 - .../.github/blunderbuss.yml | 20 - .../.github/flakybot.yaml | 15 - .../.github/header-checker-lint.yml | 15 - .../.github/snippet-bot.yml | 0 .../.github/workflows/conformance.yaml | 64 --- .../.github/workflows/docs.yml | 38 -- .../.github/workflows/lint.yml | 25 - .../.github/workflows/mypy.yml | 22 - .../.github/workflows/system_emulated.yml | 29 -- .../.github/workflows/unittest.yml | 61 --- .../google-cloud-bigtable/.kokoro/build.sh | 60 --- .../.kokoro/conformance.sh | 43 -- .../.kokoro/continuous/common.cfg | 27 - .../.kokoro/continuous/continuous.cfg | 1 - .../.kokoro/continuous/prerelease-deps.cfg | 7 - .../.kokoro/populate-secrets.sh | 43 -- .../.kokoro/presubmit/common.cfg | 27 - .../.kokoro/presubmit/conformance.cfg | 6 - .../.kokoro/presubmit/prerelease-deps.cfg | 7 - .../.kokoro/presubmit/presubmit.cfg | 7 - .../.kokoro/presubmit/system-3.9.cfg | 7 - .../.kokoro/presubmit/system.cfg | 7 - .../.kokoro/samples/lint/common.cfg | 34 -- .../.kokoro/samples/lint/continuous.cfg | 6 - .../.kokoro/samples/lint/periodic.cfg | 6 - .../.kokoro/samples/lint/presubmit.cfg | 6 - .../.kokoro/samples/python3.10/common.cfg | 40 -- .../.kokoro/samples/python3.10/continuous.cfg | 6 - .../samples/python3.10/periodic-head.cfg | 11 - .../.kokoro/samples/python3.10/periodic.cfg | 6 - .../.kokoro/samples/python3.10/presubmit.cfg | 6 - .../.kokoro/samples/python3.11/common.cfg | 40 -- .../.kokoro/samples/python3.11/continuous.cfg | 6 - .../samples/python3.11/periodic-head.cfg | 11 - .../.kokoro/samples/python3.11/periodic.cfg | 6 - .../.kokoro/samples/python3.11/presubmit.cfg | 6 - .../.kokoro/samples/python3.12/common.cfg | 40 -- .../.kokoro/samples/python3.12/continuous.cfg | 6 - .../samples/python3.12/periodic-head.cfg | 11 - .../.kokoro/samples/python3.12/periodic.cfg | 6 - .../.kokoro/samples/python3.12/presubmit.cfg | 6 - .../.kokoro/samples/python3.13/common.cfg | 40 -- .../.kokoro/samples/python3.13/continuous.cfg | 6 - .../samples/python3.13/periodic-head.cfg | 11 - .../.kokoro/samples/python3.13/periodic.cfg | 6 - .../.kokoro/samples/python3.13/presubmit.cfg | 6 - .../.kokoro/samples/python3.14/common.cfg | 40 -- .../.kokoro/samples/python3.14/continuous.cfg | 6 - .../samples/python3.14/periodic-head.cfg | 11 - .../.kokoro/samples/python3.14/periodic.cfg | 6 - .../.kokoro/samples/python3.14/presubmit.cfg | 6 - .../.kokoro/samples/python3.7/common.cfg | 40 -- .../.kokoro/samples/python3.7/continuous.cfg | 6 - .../samples/python3.7/periodic-head.cfg | 11 - .../.kokoro/samples/python3.7/periodic.cfg | 6 - .../.kokoro/samples/python3.7/presubmit.cfg | 6 - .../.kokoro/samples/python3.8/common.cfg | 40 -- .../.kokoro/samples/python3.8/continuous.cfg | 6 - .../samples/python3.8/periodic-head.cfg | 11 - .../.kokoro/samples/python3.8/periodic.cfg | 6 - .../.kokoro/samples/python3.8/presubmit.cfg | 6 - .../.kokoro/samples/python3.9/common.cfg | 40 -- .../.kokoro/samples/python3.9/continuous.cfg | 6 - .../samples/python3.9/periodic-head.cfg | 11 - .../.kokoro/samples/python3.9/periodic.cfg | 6 - .../.kokoro/samples/python3.9/presubmit.cfg | 6 - .../.kokoro/test-samples-against-head.sh | 26 - .../.kokoro/test-samples-impl.sh | 103 ---- .../.kokoro/test-samples.sh | 44 -- .../.kokoro/trampoline.sh | 28 - .../.kokoro/trampoline_v2.sh | 487 ------------------ packages/google-cloud-bigtable/.trampolinerc | 61 --- .../google-cloud-bigtable/docs/changelog.md | 1 - 80 files changed, 2020 deletions(-) delete mode 100644 packages/google-cloud-bigtable/.github/CODEOWNERS delete mode 100644 packages/google-cloud-bigtable/.github/CONTRIBUTING.md delete mode 100644 packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md delete mode 100644 packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 packages/google-cloud-bigtable/.github/auto-label.yaml delete mode 100644 packages/google-cloud-bigtable/.github/blunderbuss.yml delete mode 100644 packages/google-cloud-bigtable/.github/flakybot.yaml delete mode 100644 packages/google-cloud-bigtable/.github/header-checker-lint.yml delete mode 100644 packages/google-cloud-bigtable/.github/snippet-bot.yml delete mode 100644 packages/google-cloud-bigtable/.github/workflows/conformance.yaml delete mode 100644 packages/google-cloud-bigtable/.github/workflows/docs.yml delete mode 100644 packages/google-cloud-bigtable/.github/workflows/lint.yml delete mode 100644 packages/google-cloud-bigtable/.github/workflows/mypy.yml delete mode 100644 packages/google-cloud-bigtable/.github/workflows/system_emulated.yml delete mode 100644 packages/google-cloud-bigtable/.github/workflows/unittest.yml delete mode 100755 packages/google-cloud-bigtable/.kokoro/build.sh delete mode 100644 packages/google-cloud-bigtable/.kokoro/conformance.sh delete mode 100644 packages/google-cloud-bigtable/.kokoro/continuous/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg delete mode 100755 packages/google-cloud-bigtable/.kokoro/populate-secrets.sh delete mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/system-3.9.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg delete mode 100644 packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg delete mode 100755 packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh delete mode 100755 packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh delete mode 100755 packages/google-cloud-bigtable/.kokoro/test-samples.sh delete mode 100755 packages/google-cloud-bigtable/.kokoro/trampoline.sh delete mode 100755 packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh delete mode 100644 packages/google-cloud-bigtable/.trampolinerc delete mode 120000 packages/google-cloud-bigtable/docs/changelog.md diff --git a/packages/google-cloud-bigtable/.github/CODEOWNERS b/packages/google-cloud-bigtable/.github/CODEOWNERS deleted file mode 100644 index 8e8f088b7f44..000000000000 --- a/packages/google-cloud-bigtable/.github/CODEOWNERS +++ /dev/null @@ -1,12 +0,0 @@ -# Code owners file. -# This file controls who is tagged for review for any given pull request. -# -# For syntax help see: -# https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax -# Note: This file is autogenerated. To make changes to the codeowner team, please update .repo-metadata.json. - -# @googleapis/yoshi-python @googleapis/api-bigtable @googleapis/api-bigtable-partners are the default owners for changes in this repo -* @googleapis/yoshi-python @googleapis/api-bigtable @googleapis/api-bigtable-partners - -# @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners are the default owners for samples changes -/samples/ @googleapis/python-samples-reviewers @googleapis/api-bigtable @googleapis/api-bigtable-partners diff --git a/packages/google-cloud-bigtable/.github/CONTRIBUTING.md b/packages/google-cloud-bigtable/.github/CONTRIBUTING.md deleted file mode 100644 index 939e5341e74d..000000000000 --- a/packages/google-cloud-bigtable/.github/CONTRIBUTING.md +++ /dev/null @@ -1,28 +0,0 @@ -# How to Contribute - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution; -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - -## Code reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. - -## Community Guidelines - -This project follows [Google's Open Source Community -Guidelines](https://opensource.google.com/conduct/). diff --git a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index e372a064e0f0..000000000000 --- a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- - -Thanks for stopping by to let us know something could be better! - -**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. - -Please run down the following list and make sure you've tried the usual "quick fixes": - - - Search the issues already opened: https://github.com/googleapis/python-bigtable/issues - - Search StackOverflow: https://stackoverflow.com/questions/tagged/google-cloud-platform+python - -If you are still having issues, please be sure to include as much information as possible: - -#### Environment details - - - OS type and version: - - Python version: `python --version` - - pip version: `pip --version` - - `google-cloud-bigtable` version: `pip show google-cloud-bigtable` - -#### Steps to reproduce - - 1. ? - 2. ? - -#### Code example - -```python -# example -``` - -#### Stack trace -``` -# example -``` - -Making sure to follow these steps will guarantee the quickest resolution possible. - -Thanks! diff --git a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 6365857f33c6..000000000000 --- a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this library - ---- - -Thanks for stopping by to let us know something could be better! - -**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. - - **Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - **Describe the solution you'd like** -A clear and concise description of what you want to happen. - **Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - **Additional context** -Add any other context or screenshots about the feature request here. diff --git a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md b/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md deleted file mode 100644 index 995869032125..000000000000 --- a/packages/google-cloud-bigtable/.github/ISSUE_TEMPLATE/support_request.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -name: Support request -about: If you have a support contract with Google, please create an issue in the Google Cloud Support console. - ---- - -**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. diff --git a/packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md b/packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 4499ce89ac43..000000000000 --- a/packages/google-cloud-bigtable/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,7 +0,0 @@ -Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: -- [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/python-bigtable/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea -- [ ] Ensure the tests and linter pass -- [ ] Code coverage does not decrease (if any source code was changed) -- [ ] Appropriate docs were updated (if necessary) - -Fixes # 🦕 diff --git a/packages/google-cloud-bigtable/.github/auto-label.yaml b/packages/google-cloud-bigtable/.github/auto-label.yaml deleted file mode 100644 index 21786a4eb085..000000000000 --- a/packages/google-cloud-bigtable/.github/auto-label.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -requestsize: - enabled: true - -path: - pullrequest: true - paths: - samples: "samples" diff --git a/packages/google-cloud-bigtable/.github/blunderbuss.yml b/packages/google-cloud-bigtable/.github/blunderbuss.yml deleted file mode 100644 index 1e27e789aaa0..000000000000 --- a/packages/google-cloud-bigtable/.github/blunderbuss.yml +++ /dev/null @@ -1,20 +0,0 @@ -# Blunderbuss config -# -# This file controls who is assigned for pull requests and issues. -# Note: This file is autogenerated. To make changes to the assignee -# team, please update `codeowner_team` in `.repo-metadata.json`. -assign_issues: - - googleapis/api-bigtable - - googleapis/api-bigtable-partners - -assign_issues_by: - - labels: - - "samples" - to: - - googleapis/python-samples-reviewers - - googleapis/api-bigtable - - googleapis/api-bigtable-partners - -assign_prs: - - googleapis/api-bigtable - - googleapis/api-bigtable-partners diff --git a/packages/google-cloud-bigtable/.github/flakybot.yaml b/packages/google-cloud-bigtable/.github/flakybot.yaml deleted file mode 100644 index 2159a1bca569..000000000000 --- a/packages/google-cloud-bigtable/.github/flakybot.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -issuePriority: p2 \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.github/header-checker-lint.yml b/packages/google-cloud-bigtable/.github/header-checker-lint.yml deleted file mode 100644 index 6fe78aa7987a..000000000000 --- a/packages/google-cloud-bigtable/.github/header-checker-lint.yml +++ /dev/null @@ -1,15 +0,0 @@ -{"allowedCopyrightHolders": ["Google LLC"], - "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"], - "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt", "**/__init__.py", "samples/**/constraints.txt", "samples/**/constraints-test.txt"], - "sourceFileExtensions": [ - "ts", - "js", - "java", - "sh", - "Dockerfile", - "yaml", - "py", - "html", - "txt" - ] -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.github/snippet-bot.yml b/packages/google-cloud-bigtable/.github/snippet-bot.yml deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml b/packages/google-cloud-bigtable/.github/workflows/conformance.yaml deleted file mode 100644 index f7396eaa997f..000000000000 --- a/packages/google-cloud-bigtable/.github/workflows/conformance.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Github action job to test core java library features on -# downstream client libraries before they are released. -on: - push: - branches: - - main - pull_request: -name: Conformance -jobs: - conformance: - runs-on: ubuntu-latest - strategy: - matrix: - test-version: [ "v0.0.4" ] - py-version: [ 3.13 ] - client-type: [ "async", "sync"] - # None of the clients currently support reverse scans, execute query plan refresh, retry info, or routing cookie - include: - - client-type: "async" - test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie\"" - - client-type: "sync" - test_args: "-skip \"PlanRefresh|_Reverse|_WithRetryInfo|_WithRoutingCookie|_Generic_MultiStream\"" - fail-fast: false - name: "${{ matrix.client-type }} client / python ${{ matrix.py-version }} / test tag ${{ matrix.test-version }}" - steps: - - uses: actions/checkout@v4 - name: "Checkout python-bigtable" - - uses: actions/checkout@v4 - name: "Checkout conformance tests" - with: - repository: googleapis/cloud-bigtable-clients-test - ref: ${{ matrix.test-version }} - path: cloud-bigtable-clients-test - - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.py-version }} - - uses: actions/setup-go@v5 - with: - go-version: '>=1.20.2' - - run: chmod +x .kokoro/conformance.sh - - run: pip install -e . - name: "Install python-bigtable from HEAD" - - run: go version - - run: .kokoro/conformance.sh - name: "Run tests" - env: - CLIENT_TYPE: ${{ matrix.client-type }} - PYTHONUNBUFFERED: 1 - TEST_ARGS: ${{ matrix.test_args }} - PROXY_PORT: 9999 - diff --git a/packages/google-cloud-bigtable/.github/workflows/docs.yml b/packages/google-cloud-bigtable/.github/workflows/docs.yml deleted file mode 100644 index 2833fe98fff0..000000000000 --- a/packages/google-cloud-bigtable/.github/workflows/docs.yml +++ /dev/null @@ -1,38 +0,0 @@ -on: - pull_request: - branches: - - main -name: docs -jobs: - docs: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - name: Install nox - run: | - python -m pip install --upgrade setuptools pip wheel - python -m pip install nox - - name: Run docs - run: | - nox -s docs - docfx: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - name: Install nox - run: | - python -m pip install --upgrade setuptools pip wheel - python -m pip install nox - - name: Run docfx - run: | - nox -s docfx diff --git a/packages/google-cloud-bigtable/.github/workflows/lint.yml b/packages/google-cloud-bigtable/.github/workflows/lint.yml deleted file mode 100644 index 9a0598202bb2..000000000000 --- a/packages/google-cloud-bigtable/.github/workflows/lint.yml +++ /dev/null @@ -1,25 +0,0 @@ -on: - pull_request: - branches: - - main -name: lint -jobs: - lint: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - name: Install nox - run: | - python -m pip install --upgrade setuptools pip wheel - python -m pip install nox - - name: Run lint - run: | - nox -s lint - - name: Run lint_setup_py - run: | - nox -s lint_setup_py diff --git a/packages/google-cloud-bigtable/.github/workflows/mypy.yml b/packages/google-cloud-bigtable/.github/workflows/mypy.yml deleted file mode 100644 index f2b78a5363f3..000000000000 --- a/packages/google-cloud-bigtable/.github/workflows/mypy.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - pull_request: - branches: - - main -name: mypy -jobs: - mypy: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - name: Install nox - run: | - python -m pip install --upgrade setuptools pip wheel - python -m pip install nox - - name: Run mypy - run: | - nox -s mypy diff --git a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml b/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml deleted file mode 100644 index d8bbbb639a08..000000000000 --- a/packages/google-cloud-bigtable/.github/workflows/system_emulated.yml +++ /dev/null @@ -1,29 +0,0 @@ -name: "Run systests on emulator" -on: - pull_request: - branches: - - main - -jobs: - - run-systests: - runs-on: ubuntu-22.04 - - steps: - - - name: Checkout - uses: actions/checkout@v4 - - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: '3.13' - - - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v2.1.1 - - - name: Install / run Nox - run: | - python -m pip install --upgrade setuptools pip - python -m pip install nox - nox -s system_emulated diff --git a/packages/google-cloud-bigtable/.github/workflows/unittest.yml b/packages/google-cloud-bigtable/.github/workflows/unittest.yml deleted file mode 100644 index dad646c6b9e6..000000000000 --- a/packages/google-cloud-bigtable/.github/workflows/unittest.yml +++ /dev/null @@ -1,61 +0,0 @@ -on: - pull_request: - branches: - - main -name: unittest -jobs: - unit: - # TODO(https://github.com/googleapis/gapic-generator-python/issues/2303): use `ubuntu-latest` once this bug is fixed. - # Use ubuntu-22.04 until Python 3.7 is removed from the test matrix - # https://docs.github.com/en/actions/using-github-hosted-runners/using-github-hosted-runners/about-github-hosted-runners#standard-github-hosted-runners-for-public-repositories - runs-on: ubuntu-22.04 - strategy: - matrix: - python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14'] - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python }} - - name: Install nox - run: | - python -m pip install --upgrade setuptools pip wheel - python -m pip install nox - - name: Run unit tests - env: - COVERAGE_FILE: .coverage-${{ matrix.python }} - run: | - nox -s unit-${{ matrix.python }} - - name: Upload coverage results - uses: actions/upload-artifact@v4 - with: - name: coverage-artifact-${{ matrix.python }} - path: .coverage-${{ matrix.python }} - include-hidden-files: true - - cover: - runs-on: ubuntu-latest - needs: - - unit - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - name: Install coverage - run: | - python -m pip install --upgrade setuptools pip wheel - python -m pip install coverage - - name: Download coverage results - uses: actions/download-artifact@v4 - with: - path: .coverage-results/ - - name: Report coverage results - run: | - find .coverage-results -type f -name '*.zip' -exec unzip {} \; - coverage combine .coverage-results/**/.coverage* - coverage report --show-missing --fail-under=99 diff --git a/packages/google-cloud-bigtable/.kokoro/build.sh b/packages/google-cloud-bigtable/.kokoro/build.sh deleted file mode 100755 index d41b45aa1dd0..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/build.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -CURRENT_DIR=$(dirname "${BASH_SOURCE[0]}") - -if [[ -z "${PROJECT_ROOT:-}" ]]; then - PROJECT_ROOT=$(realpath "${CURRENT_DIR}/..") -fi - -pushd "${PROJECT_ROOT}" - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Debug: show build environment -env | grep KOKORO - -# Setup service account credentials. -if [[ -f "${KOKORO_GFILE_DIR}/service-account.json" ]] -then - export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json -fi - -# Setup project id. -if [[ -f "${KOKORO_GFILE_DIR}/project-id.json" ]] -then - export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") -fi - -# If this is a continuous build, send the test log to the FlakyBot. -# See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. -if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then - cleanup() { - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot - } - trap cleanup EXIT HUP -fi - -# If NOX_SESSION is set, it only runs the specified session, -# otherwise run all the sessions. -if [[ -n "${NOX_SESSION:-}" ]]; then - python3 -m nox -s ${NOX_SESSION:-} -else - python3 -m nox -fi diff --git a/packages/google-cloud-bigtable/.kokoro/conformance.sh b/packages/google-cloud-bigtable/.kokoro/conformance.sh deleted file mode 100644 index fd585142ec27..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/conformance.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -## cd to the parent directory, i.e. the root of the git repo -cd $(dirname $0)/.. - -# Build and start the proxy in a separate process -pushd test_proxy -nohup python test_proxy.py --port $PROXY_PORT --client_type=$CLIENT_TYPE & -proxyPID=$! -popd - -# Kill proxy on exit -function cleanup() { - echo "Cleanup testbench"; - kill $proxyPID -} -trap cleanup EXIT - -# Run the conformance test -echo "running tests with args: $TEST_ARGS" -pushd cloud-bigtable-clients-test/tests -eval "go test -v -proxy_addr=:$PROXY_PORT $TEST_ARGS" -RETURN_CODE=$? -popd - -echo "exiting with ${RETURN_CODE}" -exit ${RETURN_CODE} diff --git a/packages/google-cloud-bigtable/.kokoro/continuous/common.cfg b/packages/google-cloud-bigtable/.kokoro/continuous/common.cfg deleted file mode 100644 index 69e0570b844b..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/continuous/common.cfg +++ /dev/null @@ -1,27 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Download resources for system tests (service account key, etc.) -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/build.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg deleted file mode 100644 index 8f43917d92fe..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/continuous/continuous.cfg +++ /dev/null @@ -1 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg b/packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg deleted file mode 100644 index 3595fb43f5c0..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/continuous/prerelease-deps.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Only run this nox session. -env_vars: { - key: "NOX_SESSION" - value: "prerelease_deps" -} diff --git a/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh b/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh deleted file mode 100755 index c435402f473e..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/populate-secrets.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} -function msg { println "$*" >&2 ;} -function println { printf '%s\n' "$(now) $*" ;} - - -# Populates requested secrets set in SECRET_MANAGER_KEYS from service account: -# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com -SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" -msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" -mkdir -p ${SECRET_LOCATION} -for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") -do - msg "Retrieving secret ${key}" - docker run --entrypoint=gcloud \ - --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ - gcr.io/google.com/cloudsdktool/cloud-sdk \ - secrets versions access latest \ - --project cloud-devrel-kokoro-resources \ - --secret ${key} > \ - "${SECRET_LOCATION}/${key}" - if [[ $? == 0 ]]; then - msg "Secret written to ${SECRET_LOCATION}/${key}" - else - msg "Error retrieving secret ${key}" - fi -done diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg deleted file mode 100644 index 69e0570b844b..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/common.cfg +++ /dev/null @@ -1,27 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Download resources for system tests (service account key, etc.) -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-python" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/build.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg deleted file mode 100644 index 4f44e8a78df0..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/conformance.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "NOX_SESSION" - value: "conformance" -} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg deleted file mode 100644 index 3595fb43f5c0..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/prerelease-deps.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Only run this nox session. -env_vars: { - key: "NOX_SESSION" - value: "prerelease_deps" -} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg deleted file mode 100644 index b158096f0ae2..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/presubmit.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Disable system tests. -env_vars: { - key: "RUN_SYSTEM_TESTS" - value: "false" -} diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.9.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.9.cfg deleted file mode 100644 index b8ae66b376ff..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/system-3.9.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Only run this nox session. -env_vars: { - key: "NOX_SESSION" - value: "system-3.9" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg b/packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg deleted file mode 100644 index b8ae66b376ff..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/presubmit/system.cfg +++ /dev/null @@ -1,7 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Only run this nox session. -env_vars: { - key: "NOX_SESSION" - value: "system-3.9" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg deleted file mode 100644 index 54b069fd0d4f..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/lint/common.cfg +++ /dev/null @@ -1,34 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "lint" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/lint/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg deleted file mode 100644 index 50fec9649732..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/lint/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/lint/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg deleted file mode 100644 index 0dc18096b8cc..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.10" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-310" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.10/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg deleted file mode 100644 index 467d405ae833..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.11" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-311" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.11/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg deleted file mode 100644 index 34e0a95f3cf4..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.12" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-312" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.12/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg deleted file mode 100644 index 15ba807cb6cd..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.13" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-313" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.13/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg deleted file mode 100644 index a9ea06119a94..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.14" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-314" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.14/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg deleted file mode 100644 index 7db66bb86d7a..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.7" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py37" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.7/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg deleted file mode 100644 index 482008891964..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.8" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py38" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.8/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg deleted file mode 100644 index 4e3b12fcc4ce..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/common.cfg +++ /dev/null @@ -1,40 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Specify which tests to run -env_vars: { - key: "RUN_TESTS_SESSION" - value: "py-3.9" -} - -# Declare build specific Cloud project. -env_vars: { - key: "BUILD_SPECIFIC_GCLOUD_PROJECT" - value: "python-docs-samples-tests-py39" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples.sh" -} - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" -} - -# Download secrets for samples -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/continuous.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg deleted file mode 100644 index be25a34f9ad3..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic-head.cfg +++ /dev/null @@ -1,11 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg deleted file mode 100644 index 71cd1e597e38..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/periodic.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "False" -} diff --git a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg b/packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg deleted file mode 100644 index a1c8d9759c88..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/samples/python3.9/presubmit.cfg +++ /dev/null @@ -1,6 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "INSTALL_LIBRARY_FROM_SOURCE" - value: "True" -} \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh deleted file mode 100755 index e9d8bd79a644..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-against-head.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# A customized test runner for samples. -# -# For periodic builds, you can specify this file for testing against head. - -# `-e` enables the script to automatically fail when a command fails -# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero -set -eo pipefail -# Enables `**` to include files nested inside sub-folders -shopt -s globstar - -exec .kokoro/test-samples-impl.sh diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh b/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh deleted file mode 100755 index 53e365bc4e79..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/test-samples-impl.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# `-e` enables the script to automatically fail when a command fails -# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero -set -eo pipefail -# Enables `**` to include files nested inside sub-folders -shopt -s globstar - -# Exit early if samples don't exist -if ! find samples -name 'requirements.txt' | grep -q .; then - echo "No tests run. './samples/**/requirements.txt' not found" - exit 0 -fi - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Debug: show build environment -env | grep KOKORO - -# Install nox -# `virtualenv==20.26.6` is added for Python 3.7 compatibility -python3.9 -m pip install --upgrade --quiet nox virtualenv==20.26.6 - -# Use secrets acessor service account to get secrets -if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then - gcloud auth activate-service-account \ - --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ - --project="cloud-devrel-kokoro-resources" -fi - -# This script will create 3 files: -# - testing/test-env.sh -# - testing/service-account.json -# - testing/client-secrets.json -./scripts/decrypt-secrets.sh - -source ./testing/test-env.sh -export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json - -# For cloud-run session, we activate the service account for gcloud sdk. -gcloud auth activate-service-account \ - --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" - -export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json - -echo -e "\n******************** TESTING PROJECTS ********************" - -# Switch to 'fail at end' to allow all tests to complete before exiting. -set +e -# Use RTN to return a non-zero value if the test fails. -RTN=0 -ROOT=$(pwd) -# Find all requirements.txt in the samples directory (may break on whitespace). -for file in samples/**/requirements.txt; do - cd "$ROOT" - # Navigate to the project folder. - file=$(dirname "$file") - cd "$file" - - echo "------------------------------------------------------------" - echo "- testing $file" - echo "------------------------------------------------------------" - - # Use nox to execute the tests for the project. - python3.9 -m nox -s "$RUN_TESTS_SESSION" - EXIT=$? - - # If this is a periodic build, send the test log to the FlakyBot. - # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. - if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot - fi - - if [[ $EXIT -ne 0 ]]; then - RTN=1 - echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" - else - echo -e "\n Testing completed.\n" - fi - -done -cd "$ROOT" - -# Workaround for Kokoro permissions issue: delete secrets -rm testing/{test-env.sh,client-secrets.json,service-account.json} - -exit "$RTN" diff --git a/packages/google-cloud-bigtable/.kokoro/test-samples.sh b/packages/google-cloud-bigtable/.kokoro/test-samples.sh deleted file mode 100755 index 7933d820149a..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/test-samples.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# The default test runner for samples. -# -# For periodic builds, we rewinds the repo to the latest release, and -# run test-samples-impl.sh. - -# `-e` enables the script to automatically fail when a command fails -# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero -set -eo pipefail -# Enables `**` to include files nested inside sub-folders -shopt -s globstar - -# Run periodic samples tests at latest release -if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - # preserving the test runner implementation. - cp .kokoro/test-samples-impl.sh "${TMPDIR}/test-samples-impl.sh" - echo "--- IMPORTANT IMPORTANT IMPORTANT ---" - echo "Now we rewind the repo back to the latest release..." - LATEST_RELEASE=$(git describe --abbrev=0 --tags) - git checkout $LATEST_RELEASE - echo "The current head is: " - echo $(git rev-parse --verify HEAD) - echo "--- IMPORTANT IMPORTANT IMPORTANT ---" - # move back the test runner implementation if there's no file. - if [ ! -f .kokoro/test-samples-impl.sh ]; then - cp "${TMPDIR}/test-samples-impl.sh" .kokoro/test-samples-impl.sh - fi -fi - -exec .kokoro/test-samples-impl.sh diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline.sh b/packages/google-cloud-bigtable/.kokoro/trampoline.sh deleted file mode 100755 index 48f79699706e..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/trampoline.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -# Always run the cleanup script, regardless of the success of bouncing into -# the container. -function cleanup() { - chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh - ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh - echo "cleanup"; -} -trap cleanup EXIT - -$(dirname $0)/populate-secrets.sh # Secret Manager secrets. -python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" \ No newline at end of file diff --git a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh b/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh deleted file mode 100755 index 35fa529231dc..000000000000 --- a/packages/google-cloud-bigtable/.kokoro/trampoline_v2.sh +++ /dev/null @@ -1,487 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# trampoline_v2.sh -# -# This script does 3 things. -# -# 1. Prepare the Docker image for the test -# 2. Run the Docker with appropriate flags to run the test -# 3. Upload the newly built Docker image -# -# in a way that is somewhat compatible with trampoline_v1. -# -# To run this script, first download few files from gcs to /dev/shm. -# (/dev/shm is passed into the container as KOKORO_GFILE_DIR). -# -# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm -# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm -# -# Then run the script. -# .kokoro/trampoline_v2.sh -# -# These environment variables are required: -# TRAMPOLINE_IMAGE: The docker image to use. -# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. -# -# You can optionally change these environment variables: -# TRAMPOLINE_IMAGE_UPLOAD: -# (true|false): Whether to upload the Docker image after the -# successful builds. -# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. -# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. -# Defaults to /workspace. -# Potentially there are some repo specific envvars in .trampolinerc in -# the project root. - - -set -euo pipefail - -TRAMPOLINE_VERSION="2.0.5" - -if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then - readonly IO_COLOR_RED="$(tput setaf 1)" - readonly IO_COLOR_GREEN="$(tput setaf 2)" - readonly IO_COLOR_YELLOW="$(tput setaf 3)" - readonly IO_COLOR_RESET="$(tput sgr0)" -else - readonly IO_COLOR_RED="" - readonly IO_COLOR_GREEN="" - readonly IO_COLOR_YELLOW="" - readonly IO_COLOR_RESET="" -fi - -function function_exists { - [ $(LC_ALL=C type -t $1)"" == "function" ] -} - -# Logs a message using the given color. The first argument must be one -# of the IO_COLOR_* variables defined above, such as -# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the -# given color. The log message will also have an RFC-3339 timestamp -# prepended (in UTC). You can disable the color output by setting -# TERM=vt100. -function log_impl() { - local color="$1" - shift - local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" - echo "================================================================" - echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" - echo "================================================================" -} - -# Logs the given message with normal coloring and a timestamp. -function log() { - log_impl "${IO_COLOR_RESET}" "$@" -} - -# Logs the given message in green with a timestamp. -function log_green() { - log_impl "${IO_COLOR_GREEN}" "$@" -} - -# Logs the given message in yellow with a timestamp. -function log_yellow() { - log_impl "${IO_COLOR_YELLOW}" "$@" -} - -# Logs the given message in red with a timestamp. -function log_red() { - log_impl "${IO_COLOR_RED}" "$@" -} - -readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) -readonly tmphome="${tmpdir}/h" -mkdir -p "${tmphome}" - -function cleanup() { - rm -rf "${tmpdir}" -} -trap cleanup EXIT - -RUNNING_IN_CI="${RUNNING_IN_CI:-false}" - -# The workspace in the container, defaults to /workspace. -TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" - -pass_down_envvars=( - # TRAMPOLINE_V2 variables. - # Tells scripts whether they are running as part of CI or not. - "RUNNING_IN_CI" - # Indicates which CI system we're in. - "TRAMPOLINE_CI" - # Indicates the version of the script. - "TRAMPOLINE_VERSION" -) - -log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" - -# Detect which CI systems we're in. If we're in any of the CI systems -# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be -# the name of the CI system. Both envvars will be passing down to the -# container for telling which CI system we're in. -if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then - # descriptive env var for indicating it's on CI. - RUNNING_IN_CI="true" - TRAMPOLINE_CI="kokoro" - if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then - if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then - log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." - exit 1 - fi - # This service account will be activated later. - TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" - else - if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then - gcloud auth list - fi - log_yellow "Configuring Container Registry access" - gcloud auth configure-docker --quiet - fi - pass_down_envvars+=( - # KOKORO dynamic variables. - "KOKORO_BUILD_NUMBER" - "KOKORO_BUILD_ID" - "KOKORO_JOB_NAME" - "KOKORO_GIT_COMMIT" - "KOKORO_GITHUB_COMMIT" - "KOKORO_GITHUB_PULL_REQUEST_NUMBER" - "KOKORO_GITHUB_PULL_REQUEST_COMMIT" - # For FlakyBot - "KOKORO_GITHUB_COMMIT_URL" - "KOKORO_GITHUB_PULL_REQUEST_URL" - ) -elif [[ "${TRAVIS:-}" == "true" ]]; then - RUNNING_IN_CI="true" - TRAMPOLINE_CI="travis" - pass_down_envvars+=( - "TRAVIS_BRANCH" - "TRAVIS_BUILD_ID" - "TRAVIS_BUILD_NUMBER" - "TRAVIS_BUILD_WEB_URL" - "TRAVIS_COMMIT" - "TRAVIS_COMMIT_MESSAGE" - "TRAVIS_COMMIT_RANGE" - "TRAVIS_JOB_NAME" - "TRAVIS_JOB_NUMBER" - "TRAVIS_JOB_WEB_URL" - "TRAVIS_PULL_REQUEST" - "TRAVIS_PULL_REQUEST_BRANCH" - "TRAVIS_PULL_REQUEST_SHA" - "TRAVIS_PULL_REQUEST_SLUG" - "TRAVIS_REPO_SLUG" - "TRAVIS_SECURE_ENV_VARS" - "TRAVIS_TAG" - ) -elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then - RUNNING_IN_CI="true" - TRAMPOLINE_CI="github-workflow" - pass_down_envvars+=( - "GITHUB_WORKFLOW" - "GITHUB_RUN_ID" - "GITHUB_RUN_NUMBER" - "GITHUB_ACTION" - "GITHUB_ACTIONS" - "GITHUB_ACTOR" - "GITHUB_REPOSITORY" - "GITHUB_EVENT_NAME" - "GITHUB_EVENT_PATH" - "GITHUB_SHA" - "GITHUB_REF" - "GITHUB_HEAD_REF" - "GITHUB_BASE_REF" - ) -elif [[ "${CIRCLECI:-}" == "true" ]]; then - RUNNING_IN_CI="true" - TRAMPOLINE_CI="circleci" - pass_down_envvars+=( - "CIRCLE_BRANCH" - "CIRCLE_BUILD_NUM" - "CIRCLE_BUILD_URL" - "CIRCLE_COMPARE_URL" - "CIRCLE_JOB" - "CIRCLE_NODE_INDEX" - "CIRCLE_NODE_TOTAL" - "CIRCLE_PREVIOUS_BUILD_NUM" - "CIRCLE_PROJECT_REPONAME" - "CIRCLE_PROJECT_USERNAME" - "CIRCLE_REPOSITORY_URL" - "CIRCLE_SHA1" - "CIRCLE_STAGE" - "CIRCLE_USERNAME" - "CIRCLE_WORKFLOW_ID" - "CIRCLE_WORKFLOW_JOB_ID" - "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" - "CIRCLE_WORKFLOW_WORKSPACE_ID" - ) -fi - -# Configure the service account for pulling the docker image. -function repo_root() { - local dir="$1" - while [[ ! -d "${dir}/.git" ]]; do - dir="$(dirname "$dir")" - done - echo "${dir}" -} - -# Detect the project root. In CI builds, we assume the script is in -# the git tree and traverse from there, otherwise, traverse from `pwd` -# to find `.git` directory. -if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then - PROGRAM_PATH="$(realpath "$0")" - PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" - PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" -else - PROJECT_ROOT="$(repo_root $(pwd))" -fi - -log_yellow "Changing to the project root: ${PROJECT_ROOT}." -cd "${PROJECT_ROOT}" - -# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need -# to use this environment variable in `PROJECT_ROOT`. -if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then - - mkdir -p "${tmpdir}/gcloud" - gcloud_config_dir="${tmpdir}/gcloud" - - log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." - export CLOUDSDK_CONFIG="${gcloud_config_dir}" - - log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." - gcloud auth activate-service-account \ - --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" - log_yellow "Configuring Container Registry access" - gcloud auth configure-docker --quiet -fi - -required_envvars=( - # The basic trampoline configurations. - "TRAMPOLINE_IMAGE" - "TRAMPOLINE_BUILD_FILE" -) - -if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then - source "${PROJECT_ROOT}/.trampolinerc" -fi - -log_yellow "Checking environment variables." -for e in "${required_envvars[@]}" -do - if [[ -z "${!e:-}" ]]; then - log "Missing ${e} env var. Aborting." - exit 1 - fi -done - -# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 -# script: e.g. "github/repo-name/.kokoro/run_tests.sh" -TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" -log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" - -# ignore error on docker operations and test execution -set +e - -log_yellow "Preparing Docker image." -# We only download the docker image in CI builds. -if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then - # Download the docker image specified by `TRAMPOLINE_IMAGE` - - # We may want to add --max-concurrent-downloads flag. - - log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." - if docker pull "${TRAMPOLINE_IMAGE}"; then - log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." - has_image="true" - else - log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." - has_image="false" - fi -else - # For local run, check if we have the image. - if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then - has_image="true" - else - has_image="false" - fi -fi - - -# The default user for a Docker container has uid 0 (root). To avoid -# creating root-owned files in the build directory we tell docker to -# use the current user ID. -user_uid="$(id -u)" -user_gid="$(id -g)" -user_name="$(id -un)" - -# To allow docker in docker, we add the user to the docker group in -# the host os. -docker_gid=$(cut -d: -f3 < <(getent group docker)) - -update_cache="false" -if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then - # Build the Docker image from the source. - context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") - docker_build_flags=( - "-f" "${TRAMPOLINE_DOCKERFILE}" - "-t" "${TRAMPOLINE_IMAGE}" - "--build-arg" "UID=${user_uid}" - "--build-arg" "USERNAME=${user_name}" - ) - if [[ "${has_image}" == "true" ]]; then - docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") - fi - - log_yellow "Start building the docker image." - if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then - echo "docker build" "${docker_build_flags[@]}" "${context_dir}" - fi - - # ON CI systems, we want to suppress docker build logs, only - # output the logs when it fails. - if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then - if docker build "${docker_build_flags[@]}" "${context_dir}" \ - > "${tmpdir}/docker_build.log" 2>&1; then - if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then - cat "${tmpdir}/docker_build.log" - fi - - log_green "Finished building the docker image." - update_cache="true" - else - log_red "Failed to build the Docker image, aborting." - log_yellow "Dumping the build logs:" - cat "${tmpdir}/docker_build.log" - exit 1 - fi - else - if docker build "${docker_build_flags[@]}" "${context_dir}"; then - log_green "Finished building the docker image." - update_cache="true" - else - log_red "Failed to build the Docker image, aborting." - exit 1 - fi - fi -else - if [[ "${has_image}" != "true" ]]; then - log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." - exit 1 - fi -fi - -# We use an array for the flags so they are easier to document. -docker_flags=( - # Remove the container after it exists. - "--rm" - - # Use the host network. - "--network=host" - - # Run in priviledged mode. We are not using docker for sandboxing or - # isolation, just for packaging our dev tools. - "--privileged" - - # Run the docker script with the user id. Because the docker image gets to - # write in ${PWD} you typically want this to be your user id. - # To allow docker in docker, we need to use docker gid on the host. - "--user" "${user_uid}:${docker_gid}" - - # Pass down the USER. - "--env" "USER=${user_name}" - - # Mount the project directory inside the Docker container. - "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" - "--workdir" "${TRAMPOLINE_WORKSPACE}" - "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" - - # Mount the temporary home directory. - "--volume" "${tmphome}:/h" - "--env" "HOME=/h" - - # Allow docker in docker. - "--volume" "/var/run/docker.sock:/var/run/docker.sock" - - # Mount the /tmp so that docker in docker can mount the files - # there correctly. - "--volume" "/tmp:/tmp" - # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR - # TODO(tmatsuo): This part is not portable. - "--env" "TRAMPOLINE_SECRET_DIR=/secrets" - "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" - "--env" "KOKORO_GFILE_DIR=/secrets/gfile" - "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" - "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" -) - -# Add an option for nicer output if the build gets a tty. -if [[ -t 0 ]]; then - docker_flags+=("-it") -fi - -# Passing down env vars -for e in "${pass_down_envvars[@]}" -do - if [[ -n "${!e:-}" ]]; then - docker_flags+=("--env" "${e}=${!e}") - fi -done - -# If arguments are given, all arguments will become the commands run -# in the container, otherwise run TRAMPOLINE_BUILD_FILE. -if [[ $# -ge 1 ]]; then - log_yellow "Running the given commands '" "${@:1}" "' in the container." - readonly commands=("${@:1}") - if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then - echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" - fi - docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" -else - log_yellow "Running the tests in a Docker container." - docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") - if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then - echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" - fi - docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" -fi - - -test_retval=$? - -if [[ ${test_retval} -eq 0 ]]; then - log_green "Build finished with ${test_retval}" -else - log_red "Build finished with ${test_retval}" -fi - -# Only upload it when the test passes. -if [[ "${update_cache}" == "true" ]] && \ - [[ $test_retval == 0 ]] && \ - [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then - log_yellow "Uploading the Docker image." - if docker push "${TRAMPOLINE_IMAGE}"; then - log_green "Finished uploading the Docker image." - else - log_red "Failed uploading the Docker image." - fi - # Call trampoline_after_upload_hook if it's defined. - if function_exists trampoline_after_upload_hook; then - trampoline_after_upload_hook - fi - -fi - -exit "${test_retval}" diff --git a/packages/google-cloud-bigtable/.trampolinerc b/packages/google-cloud-bigtable/.trampolinerc deleted file mode 100644 index 0080152373d5..000000000000 --- a/packages/google-cloud-bigtable/.trampolinerc +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Add required env vars here. -required_envvars+=( -) - -# Add env vars which are passed down into the container here. -pass_down_envvars+=( - "NOX_SESSION" - ############### - # Docs builds - ############### - "STAGING_BUCKET" - "V2_STAGING_BUCKET" - ################## - # Samples builds - ################## - "INSTALL_LIBRARY_FROM_SOURCE" - "RUN_TESTS_SESSION" - "BUILD_SPECIFIC_GCLOUD_PROJECT" - # Target directories. - "RUN_TESTS_DIRS" - # The nox session to run. - "RUN_TESTS_SESSION" -) - -# Prevent unintentional override on the default image. -if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \ - [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then - echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image." - exit 1 -fi - -# Define the default value if it makes sense. -if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then - TRAMPOLINE_IMAGE_UPLOAD="" -fi - -if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then - TRAMPOLINE_IMAGE="" -fi - -if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then - TRAMPOLINE_DOCKERFILE="" -fi - -if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then - TRAMPOLINE_BUILD_FILE="" -fi diff --git a/packages/google-cloud-bigtable/docs/changelog.md b/packages/google-cloud-bigtable/docs/changelog.md deleted file mode 120000 index 04c99a55caae..000000000000 --- a/packages/google-cloud-bigtable/docs/changelog.md +++ /dev/null @@ -1 +0,0 @@ -../CHANGELOG.md \ No newline at end of file